summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile6
-rw-r--r--drivers/acpi/Kconfig13
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/acpi_pad.c36
-rw-r--r--drivers/acpi/acpica/acevents.h8
-rw-r--r--drivers/acpi/acpica/acglobal.h9
-rw-r--r--drivers/acpi/acpica/achware.h5
-rw-r--r--drivers/acpi/acpica/aclocal.h8
-rw-r--r--drivers/acpi/acpica/acnamesp.h7
-rw-r--r--drivers/acpi/acpica/acobject.h16
-rw-r--r--drivers/acpi/acpica/acpredef.h13
-rw-r--r--drivers/acpi/acpica/acstruct.h32
-rw-r--r--drivers/acpi/acpica/dsinit.c14
-rw-r--r--drivers/acpi/acpica/dsmethod.c20
-rw-r--r--drivers/acpi/acpica/dsmthdat.c16
-rw-r--r--drivers/acpi/acpica/dsobject.c16
-rw-r--r--drivers/acpi/acpica/dsopcode.c6
-rw-r--r--drivers/acpi/acpica/dsutils.c4
-rw-r--r--drivers/acpi/acpica/evevent.c5
-rw-r--r--drivers/acpi/acpica/evgpe.c142
-rw-r--r--drivers/acpi/acpica/evgpeblk.c70
-rw-r--r--drivers/acpi/acpica/evgpeinit.c236
-rw-r--r--drivers/acpi/acpica/evrgnini.c10
-rw-r--r--drivers/acpi/acpica/evxface.c77
-rw-r--r--drivers/acpi/acpica/evxfevnt.c233
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exdump.c6
-rw-r--r--drivers/acpi/acpica/exfldio.c4
-rw-r--r--drivers/acpi/acpica/exprep.c23
-rw-r--r--drivers/acpi/acpica/exregion.c6
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c35
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c9
-rw-r--r--drivers/acpi/acpica/nsalloc.c94
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c16
-rw-r--r--drivers/acpi/acpica/nsnames.c4
-rw-r--r--drivers/acpi/acpica/nsparse.c4
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c7
-rw-r--r--drivers/acpi/acpica/nssearch.c15
-rw-r--r--drivers/acpi/acpica/nsutils.c113
-rw-r--r--drivers/acpi/acpica/nswalk.c15
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c5
-rw-r--r--drivers/acpi/acpica/utxface.c8
-rw-r--r--drivers/acpi/apei/Kconfig9
-rw-r--r--drivers/acpi/apei/Makefile1
-rw-r--r--drivers/acpi/apei/apei-base.c25
-rw-r--r--drivers/acpi/apei/einj.c4
-rw-r--r--drivers/acpi/apei/erst-dbg.c211
-rw-r--r--drivers/acpi/apei/erst.c30
-rw-r--r--drivers/acpi/apei/ghes.c172
-rw-r--r--drivers/acpi/apei/hest.c79
-rw-r--r--drivers/acpi/atomicio.c2
-rw-r--r--drivers/acpi/battery.c1
-rw-r--r--drivers/acpi/blacklist.c18
-rw-r--r--drivers/acpi/bus.c22
-rw-r--r--drivers/acpi/button.c6
-rw-r--r--drivers/acpi/debug.c406
-rw-r--r--drivers/acpi/debugfs.c93
-rw-r--r--drivers/acpi/ec.c39
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/glue.c3
-rw-r--r--drivers/acpi/internal.h8
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/acpi/osl.c67
-rw-r--r--drivers/acpi/pci_root.c88
-rw-r--r--drivers/acpi/power.c129
-rw-r--r--drivers/acpi/proc.c70
-rw-r--r--drivers/acpi/processor_core.c6
-rw-r--r--drivers/acpi/processor_driver.c87
-rw-r--r--drivers/acpi/processor_idle.c106
-rw-r--r--drivers/acpi/processor_perflib.c4
-rw-r--r--drivers/acpi/processor_thermal.c83
-rw-r--r--drivers/acpi/processor_throttling.c2
-rw-r--r--drivers/acpi/scan.c7
-rw-r--r--drivers/acpi/sleep.c97
-rw-r--r--drivers/acpi/sleep.h5
-rw-r--r--drivers/acpi/sysfs.c (renamed from drivers/acpi/system.c)480
-rw-r--r--drivers/acpi/thermal.c86
-rw-r--r--drivers/acpi/video.c141
-rw-r--r--drivers/acpi/video_detect.c4
-rw-r--r--drivers/acpi/wakeup.c66
-rw-r--r--drivers/ata/Kconfig30
-rw-r--r--drivers/ata/Makefile5
-rw-r--r--drivers/ata/ahci.c20
-rw-r--r--drivers/ata/ahci.h13
-rw-r--r--drivers/ata/ahci_platform.c32
-rw-r--r--drivers/ata/ata_generic.c4
-rw-r--r--drivers/ata/ata_piix.c12
-rw-r--r--drivers/ata/libahci.c34
-rw-r--r--drivers/ata/libata-acpi.c6
-rw-r--r--drivers/ata/libata-core.c206
-rw-r--r--drivers/ata/libata-eh.c9
-rw-r--r--drivers/ata/libata-scsi.c14
-rw-r--r--drivers/ata/libata-sff.c54
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_artop.c3
-rw-r--r--drivers/ata/pata_cmd64x.c6
-rw-r--r--drivers/ata/pata_legacy.c15
-rw-r--r--drivers/ata/pata_mpc52xx.c8
-rw-r--r--drivers/ata/pata_of_platform.c4
-rw-r--r--drivers/ata/pata_pcmcia.c38
-rw-r--r--drivers/ata/pata_pxa.c411
-rw-r--r--drivers/ata/pata_samsung_cf.c683
-rw-r--r--drivers/ata/pata_scc.c3
-rw-r--r--drivers/ata/pata_via.c2
-rw-r--r--drivers/ata/pata_winbond.c282
-rw-r--r--drivers/ata/sata_dwc_460ex.c1756
-rw-r--r--drivers/ata/sata_fsl.c28
-rw-r--r--drivers/ata/sata_mv.c93
-rw-r--r--drivers/ata/sata_nv.c32
-rw-r--r--drivers/atm/firestream.c4
-rw-r--r--drivers/atm/fore200e.c34
-rw-r--r--drivers/atm/horizon.c6
-rw-r--r--drivers/atm/idt77252.c6
-rw-r--r--drivers/atm/iphase.c8
-rw-r--r--drivers/atm/iphase.h2
-rw-r--r--drivers/atm/solos-pci.c8
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/dd.c4
-rw-r--r--drivers/base/dma-coherent.c2
-rw-r--r--drivers/base/firmware_class.c262
-rw-r--r--drivers/base/node.c46
-rw-r--r--drivers/base/platform.c123
-rw-r--r--drivers/base/power/main.c1
-rw-r--r--drivers/block/DAC960.c13
-rw-r--r--drivers/block/amiflop.c29
-rw-r--r--drivers/block/aoe/aoeblk.c6
-rw-r--r--drivers/block/ataflop.c32
-rw-r--r--drivers/block/brd.c9
-rw-r--r--drivers/block/cciss.c2176
-rw-r--r--drivers/block/cciss.h135
-rw-r--r--drivers/block/cciss_cmd.h36
-rw-r--r--drivers/block/cciss_scsi.c670
-rw-r--r--drivers/block/cpqarray.c78
-rw-r--r--drivers/block/drbd/drbd_actlog.c8
-rw-r--r--drivers/block/drbd/drbd_int.h16
-rw-r--r--drivers/block/drbd/drbd_main.c102
-rw-r--r--drivers/block/drbd/drbd_nl.c4
-rw-r--r--drivers/block/drbd/drbd_proc.c19
-rw-r--r--drivers/block/drbd/drbd_receiver.c135
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c15
-rw-r--r--drivers/block/floppy.c182
-rw-r--r--drivers/block/hd.c2
-rw-r--r--drivers/block/loop.c9
-rw-r--r--drivers/block/mg_disk.c7
-rw-r--r--drivers/block/nbd.c7
-rw-r--r--drivers/block/osdblk.c15
-rw-r--r--drivers/block/paride/pcd.c21
-rw-r--r--drivers/block/paride/pd.c11
-rw-r--r--drivers/block/paride/pf.c26
-rw-r--r--drivers/block/pktcdvd.c22
-rw-r--r--drivers/block/ps3disk.c25
-rw-r--r--drivers/block/swim.c20
-rw-r--r--drivers/block/swim3.c32
-rw-r--r--drivers/block/ub.c35
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/viodasd.c21
-rw-r--r--drivers/block/virtio_blk.c150
-rw-r--r--drivers/block/xd.c19
-rw-r--r--drivers/block/xen-blkfront.c433
-rw-r--r--drivers/block/xsysace.c13
-rw-r--r--drivers/block/z2ram.c13
-rw-r--r--drivers/bluetooth/bluecard_cs.c32
-rw-r--r--drivers/bluetooth/bt3c_cs.c27
-rw-r--r--drivers/bluetooth/btmrvl_main.c4
-rw-r--r--drivers/bluetooth/btsdio.c8
-rw-r--r--drivers/bluetooth/btuart_cs.c31
-rw-r--r--drivers/bluetooth/btusb.c6
-rw-r--r--drivers/bluetooth/dtl1_cs.c35
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/cdrom/cdrom.c46
-rw-r--r--drivers/cdrom/gdrom.c48
-rw-r--r--drivers/cdrom/viocd.c106
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/efficeon-agp.c22
-rw-r--r--drivers/char/agp/intel-agp.c68
-rw-r--r--drivers/char/agp/intel-agp.h29
-rw-r--r--drivers/char/agp/intel-gtt.c139
-rw-r--r--drivers/char/amiserial.c25
-rw-r--r--drivers/char/briq_panel.c6
-rw-r--r--drivers/char/bsr.c1
-rw-r--r--drivers/char/cyclades.c22
-rw-r--r--drivers/char/epca.c4
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/hvc_console.c14
-rw-r--r--drivers/char/hvc_iucv.c9
-rw-r--r--drivers/char/hvc_tile.c67
-rw-r--r--drivers/char/hvsi.c6
-rw-r--r--drivers/char/hw_random/n2-drv.c10
-rw-r--r--drivers/char/hw_random/n2rng.h2
-rw-r--r--drivers/char/hw_random/pasemi-rng.c4
-rw-r--r--drivers/char/ip2/ip2main.c8
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c78
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c42
-rw-r--r--drivers/char/isicom.c13
-rw-r--r--drivers/char/istallion.c68
-rw-r--r--drivers/char/keyboard.c10
-rw-r--r--drivers/char/mem.c5
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/mxser.c2
-rw-r--r--drivers/char/n_gsm.c3
-rw-r--r--drivers/char/n_hdlc.c16
-rw-r--r--drivers/char/n_r3964.c10
-rw-r--r--drivers/char/n_tty.c17
-rw-r--r--drivers/char/nozomi.c7
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c30
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c37
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c47
-rw-r--r--drivers/char/pcmcia/ipwireless/main.h1
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.h1
-rw-r--r--drivers/char/pcmcia/synclink_cs.c29
-rw-r--r--drivers/char/pty.c51
-rw-r--r--drivers/char/riscom8.c14
-rw-r--r--drivers/char/rocket.c29
-rw-r--r--drivers/char/rtc.c2
-rw-r--r--drivers/char/selection.c13
-rw-r--r--drivers/char/serial167.c8
-rw-r--r--drivers/char/specialix.c13
-rw-r--r--drivers/char/stallion.c20
-rw-r--r--drivers/char/sx.c12
-rw-r--r--drivers/char/synclink.c21
-rw-r--r--drivers/char/synclink_gt.c96
-rw-r--r--drivers/char/synclinkmp.c43
-rw-r--r--drivers/char/sysrq.c53
-rw-r--r--drivers/char/tty_io.c236
-rw-r--r--drivers/char/tty_ioctl.c18
-rw-r--r--drivers/char/tty_ldisc.c43
-rw-r--r--drivers/char/tty_mutex.c47
-rw-r--r--drivers/char/tty_port.c4
-rw-r--r--drivers/char/vc_screen.c4
-rw-r--r--drivers/char/virtio_console.c6
-rw-r--r--drivers/char/vt.c133
-rw-r--r--drivers/char/vt_ioctl.c33
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c5
-rw-r--r--drivers/clocksource/acpi_pm.c9
-rw-r--r--drivers/cpuidle/cpuidle.c31
-rw-r--r--drivers/cpuidle/governors/menu.c25
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c21
-rw-r--r--drivers/crypto/n2_core.c36
-rw-r--r--drivers/crypto/talitos.c6
-rw-r--r--drivers/dca/dca-core.c85
-rw-r--r--drivers/dma/Kconfig22
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/coh901318.c169
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/intel_mid_dma.c1143
-rw-r--r--drivers/dma/intel_mid_dma_regs.h260
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/dma_v2.c24
-rw-r--r--drivers/dma/ioat/dma_v3.c5
-rw-r--r--drivers/dma/mpc512x_dma.c4
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/pch_dma.c957
-rw-r--r--drivers/dma/ppc4xx/adma.c8
-rw-r--r--drivers/dma/shdma.c11
-rw-r--r--drivers/dma/ste_dma40.c860
-rw-r--r--drivers/dma/ste_dma40_ll.c40
-rw-r--r--drivers/dma/ste_dma40_ll.h15
-rw-r--r--drivers/dma/timb_dma.c8
-rw-r--r--drivers/edac/amd64_edac.c10
-rw-r--r--drivers/edac/edac_mc.c3
-rw-r--r--drivers/edac/edac_mce_amd.c17
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/i5400_edac.c2
-rw-r--r--drivers/edac/i7core_edac.c1
-rw-r--r--drivers/edac/mpc85xx_edac.c28
-rw-r--r--drivers/edac/ppc4xx_edac.c12
-rw-r--r--drivers/firewire/Kconfig24
-rw-r--r--drivers/firewire/Makefile1
-rw-r--r--drivers/firewire/core-card.c218
-rw-r--r--drivers/firewire/core-cdev.c409
-rw-r--r--drivers/firewire/core-device.c11
-rw-r--r--drivers/firewire/core-iso.c34
-rw-r--r--drivers/firewire/core-topology.c22
-rw-r--r--drivers/firewire/core-transaction.c319
-rw-r--r--drivers/firewire/core.h24
-rw-r--r--drivers/firewire/net.c45
-rw-r--r--drivers/firewire/nosy-user.h25
-rw-r--r--drivers/firewire/nosy.c721
-rw-r--r--drivers/firewire/nosy.h237
-rw-r--r--drivers/firewire/ohci.c712
-rw-r--r--drivers/firewire/ohci.h1
-rw-r--r--drivers/firewire/sbp2.c36
-rw-r--r--drivers/firmware/Kconfig10
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/dcdbas.c5
-rw-r--r--drivers/firmware/dmi-id.c4
-rw-r--r--drivers/firmware/dmi_scan.c25
-rw-r--r--drivers/firmware/edd.c20
-rw-r--r--drivers/firmware/iscsi_ibft.c6
-rw-r--r--drivers/gpio/Kconfig18
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpiolib.c151
-rw-r--r--drivers/gpio/max730x.c22
-rw-r--r--drivers/gpio/pcf857x.c9
-rw-r--r--drivers/gpio/stmpe-gpio.c399
-rw-r--r--drivers/gpio/sx150x.c661
-rw-r--r--drivers/gpio/wm831x-gpio.c32
-rw-r--r--drivers/gpio/xilinx_gpio.c15
-rw-r--r--drivers/gpu/drm/Kconfig11
-rw-r--r--drivers/gpu/drm/Makefile7
-rw-r--r--drivers/gpu/drm/ati_pcigart.c2
-rw-r--r--drivers/gpu/drm/drm_buffer.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c48
-rw-r--r--drivers/gpu/drm/drm_crtc.c36
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c152
-rw-r--r--drivers/gpu/drm/drm_drv.c68
-rw-r--r--drivers/gpu/drm/drm_edid.c844
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h380
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c7
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c81
-rw-r--r--drivers/gpu/drm/drm_fops.c34
-rw-r--r--drivers/gpu/drm/drm_gem.c52
-rw-r--r--drivers/gpu/drm/drm_global.c (renamed from drivers/gpu/drm/ttm/ttm_global.c)30
-rw-r--r--drivers/gpu/drm/drm_info.c25
-rw-r--r--drivers/gpu/drm/drm_ioctl.c141
-rw-r--r--drivers/gpu/drm/drm_irq.c26
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c363
-rw-r--r--drivers/gpu/drm/drm_modes.c5
-rw-r--r--drivers/gpu/drm/drm_pci.c147
-rw-r--r--drivers/gpu/drm/drm_platform.c127
-rw-r--r--drivers/gpu/drm/drm_stub.c92
-rw-r--r--drivers/gpu/drm/drm_sysfs.c5
-rw-r--r--drivers/gpu/drm/drm_trace.h66
-rw-r--r--drivers/gpu/drm/drm_trace_points.c4
-rw-r--r--drivers/gpu/drm/drm_vm.c44
-rw-r--r--drivers/gpu/drm/i2c/Makefile3
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c23
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c5
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h3
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c462
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c127
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h65
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c139
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c2
-rw-r--r--drivers/gpu/drm/i830/i830_drv.h49
-rw-r--r--drivers/gpu/drm/i830/i830_irq.c10
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo.h7
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c53
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c153
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c125
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h101
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c622
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c271
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c201
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h79
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c138
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c59
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1301
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c732
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h46
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c139
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c14
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c88
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c416
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c115
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c111
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c111
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2158
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h50
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c181
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c103
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c4
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h187
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c9
-rw-r--r--drivers/gpu/drm/mga/mga_state.c73
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c4
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig11
-rw-r--r--drivers/gpu/drm/nouveau/Makefile12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c910
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c421
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c128
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h190
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c72
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c160
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c83
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c341
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c105
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h109
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c340
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c110
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c90
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv04_mc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c133
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c (renamed from drivers/gpu/drm/nouveau/nv17_gpio.c)4
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c175
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c69
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c100
-rw-r--r--drivers/gpu/drm/nouveau/nv30_fb.c95
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c60
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c76
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c43
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c424
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c126
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c35
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c86
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c72
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c105
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c96
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c75
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c235
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h22
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c52
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h122
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c4
-rw-r--r--drivers/gpu/drm/r128/r128_state.c156
-rw-r--r--drivers/gpu/drm/radeon/Makefile1
-rw-r--r--drivers/gpu/drm/radeon/atom.c9
-rw-r--r--drivers/gpu/drm/radeon/atom.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c308
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c20
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c97
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h5
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h5
-rw-r--r--drivers/gpu/drm/radeon/r100.c79
-rw-r--r--drivers/gpu/drm/radeon/r100d.h2
-rw-r--r--drivers/gpu/drm/radeon/r300.c46
-rw-r--r--drivers/gpu/drm/radeon/r300d.h2
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h5
-rw-r--r--drivers/gpu/drm/radeon/r520.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c66
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c22
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c25
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c1115
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h24
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c270
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon.h63
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c326
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c139
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c590
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c205
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c78
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c130
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c238
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c91
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c138
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h49
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c135
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c20
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r30013
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r42014
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs60013
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv51514
-rw-r--r--drivers/gpu/drm/radeon/rs400.c10
-rw-r--r--drivers/gpu/drm/radeon/rs600.c16
-rw-r--r--drivers/gpu/drm/radeon/rs690.c44
-rw-r--r--drivers/gpu/drm/radeon/rv515.c25
-rw-r--r--drivers/gpu/drm/radeon/rv770.c88
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h6
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c32
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c26
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c8
-rw-r--r--drivers/gpu/drm/via/via_dma.c148
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c71
-rw-r--r--drivers/gpu/drm/via/via_dmablit.h8
-rw-r--r--drivers/gpu/drm/via/via_drv.h22
-rw-r--r--drivers/gpu/drm/via/via_irq.c13
-rw-r--r--drivers/gpu/drm/via/via_map.c4
-rw-r--r--drivers/gpu/drm/via/via_mm.c7
-rw-r--r--drivers/gpu/drm/via/via_verifier.c47
-rw-r--r--drivers/gpu/drm/via/via_verifier.h4
-rw-r--r--drivers/gpu/drm/via/via_video.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c181
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c20
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hid/hid-core.c5
-rw-r--r--drivers/hid/hid-egalax.c9
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-mosart.c1
-rw-r--r--drivers/hid/hid-picolcd.c4
-rw-r--r--drivers/hid/hid-topseed.c1
-rw-r--r--drivers/hid/hid-wacom.c49
-rw-r--r--drivers/hid/usbhid/hid-core.c8
-rw-r--r--drivers/hid/usbhid/hid-quirks.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c13
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hwmon/Kconfig59
-rw-r--r--drivers/hwmon/Makefile5
-rw-r--r--drivers/hwmon/adm1031.c43
-rw-r--r--drivers/hwmon/ads7871.c38
-rw-r--r--drivers/hwmon/ams/ams.h2
-rw-r--r--drivers/hwmon/asc7621.c9
-rw-r--r--drivers/hwmon/coretemp.c71
-rw-r--r--drivers/hwmon/emc1403.c34
-rw-r--r--drivers/hwmon/emc2103.c740
-rw-r--r--drivers/hwmon/f71882fg.c32
-rw-r--r--drivers/hwmon/f75375s.c6
-rw-r--r--drivers/hwmon/hdaps.c1
-rw-r--r--drivers/hwmon/hp_accel.c2
-rw-r--r--drivers/hwmon/it87.c46
-rw-r--r--drivers/hwmon/jc42.c593
-rw-r--r--drivers/hwmon/jz4740-hwmon.c230
-rw-r--r--drivers/hwmon/k8temp.c38
-rw-r--r--drivers/hwmon/lis3lv02d.c4
-rw-r--r--drivers/hwmon/lis3lv02d_i2c.c4
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c4
-rw-r--r--drivers/hwmon/lm75.c39
-rw-r--r--drivers/hwmon/lm75.h1
-rw-r--r--drivers/hwmon/lm95241.c21
-rw-r--r--drivers/hwmon/ltc4245.c177
-rw-r--r--drivers/hwmon/mc13783-adc.c17
-rw-r--r--drivers/hwmon/pc87360.c31
-rw-r--r--drivers/hwmon/pc87427.c862
-rw-r--r--drivers/hwmon/pkgtemp.c455
-rw-r--r--drivers/hwmon/smm665.c743
-rw-r--r--drivers/hwmon/ultra45_env.c8
-rw-r--r--drivers/hwmon/via-cputemp.c2
-rw-r--r--drivers/hwmon/w83627ehf.c98
-rw-r--r--drivers/i2c/Kconfig13
-rw-r--r--drivers/i2c/Makefile3
-rw-r--r--drivers/i2c/busses/Kconfig24
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c14
-rw-r--r--drivers/i2c/busses/i2c-davinci.c316
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c10
-rw-r--r--drivers/i2c/busses/i2c-mpc.c75
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c709
-rw-r--r--drivers/i2c/busses/i2c-octeon.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c121
-rw-r--r--drivers/i2c/i2c-core.c167
-rw-r--r--drivers/i2c/i2c-dev.c66
-rw-r--r--drivers/i2c/i2c-mux.c165
-rw-r--r--drivers/i2c/muxes/Kconfig18
-rw-r--r--drivers/i2c/muxes/Makefile8
-rw-r--r--drivers/i2c/muxes/pca954x.c301
-rw-r--r--drivers/ide/ide-atapi.c17
-rw-r--r--drivers/ide/ide-cd.c112
-rw-r--r--drivers/ide/ide-cd_ioctl.c2
-rw-r--r--drivers/ide/ide-cs.c39
-rw-r--r--drivers/ide/ide-disk.c18
-rw-r--r--drivers/ide/ide-disk_ioctl.c9
-rw-r--r--drivers/ide/ide-eh.c5
-rw-r--r--drivers/ide/ide-floppy.c27
-rw-r--r--drivers/ide/ide-floppy_ioctl.c12
-rw-r--r--drivers/ide/ide-gd.c19
-rw-r--r--drivers/ide/ide-io.c8
-rw-r--r--drivers/ide/ide-pm.c8
-rw-r--r--drivers/ide/ide-probe.c12
-rw-r--r--drivers/ide/ide-tape.c22
-rw-r--r--drivers/ide/ide-taskfile.c10
-rw-r--r--drivers/ide/ide.c20
-rw-r--r--drivers/ide/tx4938ide.c2
-rw-r--r--drivers/ide/tx4939ide.c2
-rw-r--r--drivers/ide/via82cxxx.c2
-rw-r--r--drivers/idle/Kconfig3
-rw-r--r--[-rwxr-xr-x]drivers/idle/intel_idle.c81
-rw-r--r--drivers/ieee1394/dv1394.c18
-rw-r--r--drivers/ieee1394/eth1394.c19
-rw-r--r--drivers/ieee1394/ohci1394.c2
-rw-r--r--drivers/ieee1394/raw1394.c7
-rw-r--r--drivers/ieee1394/sbp2.c11
-rw-r--r--drivers/ieee1394/video1394.c17
-rw-r--r--drivers/infiniband/core/cm.c10
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c8
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c91
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c240
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h34
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h10
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_eq.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c2
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c32
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.c11
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c37
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c32
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c37
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h4
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c19
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c25
-rw-r--r--drivers/infiniband/hw/qib/qib.h4
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h16
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c203
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c47
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c21
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c17
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c7
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c118
-rw-r--r--drivers/input/evdev.c152
-rw-r--r--drivers/input/input.c57
-rw-r--r--drivers/input/joydev.c31
-rw-r--r--drivers/input/joystick/a3d.c3
-rw-r--r--drivers/input/joystick/adi.c2
-rw-r--r--drivers/input/joystick/amijoy.c4
-rw-r--r--drivers/input/joystick/gf2k.c20
-rw-r--r--drivers/input/joystick/interact.c14
-rw-r--r--drivers/input/joystick/sidewinder.c18
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/keyboard/Kconfig10
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/gpio_keys.c22
-rw-r--r--drivers/input/keyboard/hil_kbd.c21
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c2
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c386
-rw-r--r--drivers/input/misc/adxl34x.c1
-rw-r--r--drivers/input/misc/ati_remote2.c26
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c3
-rw-r--r--drivers/input/misc/sparcspkr.c22
-rw-r--r--drivers/input/misc/uinput.c31
-rw-r--r--drivers/input/mouse/appletouch.c6
-rw-r--r--drivers/input/mouse/bcm5974.c12
-rw-r--r--drivers/input/mouse/elantech.c31
-rw-r--r--drivers/input/mouse/elantech.h7
-rw-r--r--drivers/input/mouse/pc110pad.c4
-rw-r--r--drivers/input/mouse/psmouse-base.c14
-rw-r--r--drivers/input/mouse/synaptics.c4
-rw-r--r--drivers/input/mousedev.c44
-rw-r--r--drivers/input/serio/i8042-io.h5
-rw-r--r--drivers/input/serio/i8042-sparcio.h21
-rw-r--r--drivers/input/serio/i8042.c27
-rw-r--r--drivers/input/serio/xilinx_ps2.c4
-rw-r--r--drivers/input/tablet/aiptek.c15
-rw-r--r--drivers/input/tablet/wacom_wac.c8
-rw-r--r--drivers/input/touchscreen/Kconfig10
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c6
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c397
-rw-r--r--drivers/input/xen-kbdfront.c2
-rw-r--r--drivers/isdn/capi/capidrv.c17
-rw-r--r--drivers/isdn/divert/isdn_divert.c6
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c400
-rw-r--r--drivers/isdn/gigaset/common.c26
-rw-r--r--drivers/isdn/gigaset/gigaset.h3
-rw-r--r--drivers/isdn/gigaset/i4l.c2
-rw-r--r--drivers/isdn/gigaset/isocdata.c8
-rw-r--r--drivers/isdn/hardware/avm/Kconfig3
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c25
-rw-r--r--drivers/isdn/hardware/avm/c4.c1
-rw-r--r--drivers/isdn/hardware/avm/t1pci.c1
-rw-r--r--drivers/isdn/hardware/eicon/debug.c2
-rw-r--r--drivers/isdn/hardware/eicon/debuglib.h2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c5
-rw-r--r--drivers/isdn/hisax/avma1_cs.c29
-rw-r--r--drivers/isdn/hisax/elsa_cs.c32
-rw-r--r--drivers/isdn/hisax/hfc_sx.c13
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c83
-rw-r--r--drivers/isdn/hisax/teles_cs.c30
-rw-r--r--drivers/isdn/i4l/isdn_tty.c15
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c1
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/isdn/mISDN/stack.c7
-rw-r--r--drivers/isdn/pcbit/edss1.c2
-rw-r--r--drivers/isdn/pcbit/edss1.h2
-rw-r--r--drivers/isdn/sc/interrupt.c18
-rw-r--r--drivers/leds/leds-bd2802.c4
-rw-r--r--drivers/leds/leds-gpio.c4
-rw-r--r--drivers/leds/leds-ns2.c9
-rw-r--r--drivers/macintosh/macio_sysfs.c11
-rw-r--r--drivers/macintosh/smu.c6
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/macintosh/therm_pm72.c6
-rw-r--r--drivers/macintosh/therm_windtunnel.c10
-rw-r--r--drivers/macintosh/via-pmu.c51
-rw-r--r--drivers/md/.gitignore4
-rw-r--r--drivers/md/Kconfig18
-rw-r--r--drivers/md/Makefile77
-rw-r--r--drivers/md/bitmap.c505
-rw-r--r--drivers/md/bitmap.h6
-rw-r--r--drivers/md/dm-crypt.c342
-rw-r--r--drivers/md/dm-delay.c6
-rw-r--r--drivers/md/dm-exception-store.c4
-rw-r--r--drivers/md/dm-exception-store.h3
-rw-r--r--drivers/md/dm-io.c12
-rw-r--r--drivers/md/dm-ioctl.c207
-rw-r--r--drivers/md/dm-kcopyd.c2
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-mpath.c11
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-snap-persistent.c6
-rw-r--r--drivers/md/dm-snap.c62
-rw-r--r--drivers/md/dm-stripe.c89
-rw-r--r--drivers/md/dm-table.c99
-rw-r--r--drivers/md/dm-target.c5
-rw-r--r--drivers/md/dm-zero.c5
-rw-r--r--drivers/md/dm.c374
-rw-r--r--drivers/md/dm.h14
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c367
-rw-r--r--drivers/md/md.h61
-rw-r--r--drivers/md/mktables.c132
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c37
-rw-r--r--drivers/md/raid10.c43
-rw-r--r--drivers/md/raid5.c183
-rw-r--r--drivers/md/raid5.h9
-rw-r--r--drivers/md/raid6algos.c154
-rw-r--r--drivers/md/raid6altivec.uc130
-rw-r--r--drivers/md/raid6int.uc117
-rw-r--r--drivers/md/raid6mmx.c142
-rw-r--r--drivers/md/raid6recov.c132
-rw-r--r--drivers/md/raid6sse1.c162
-rw-r--r--drivers/md/raid6sse2.c262
-rw-r--r--drivers/md/raid6test/Makefile75
-rw-r--r--drivers/md/raid6test/test.c124
-rw-r--r--drivers/md/raid6x86.h61
-rw-r--r--drivers/md/unroll.awk20
-rw-r--r--drivers/media/IR/Kconfig43
-rw-r--r--drivers/media/IR/Makefile2
-rw-r--r--drivers/media/IR/ene_ir.c1023
-rw-r--r--drivers/media/IR/ene_ir.h235
-rw-r--r--drivers/media/IR/imon.c20
-rw-r--r--drivers/media/IR/ir-core-priv.h13
-rw-r--r--drivers/media/IR/ir-jvc-decoder.c14
-rw-r--r--drivers/media/IR/ir-keytable.c13
-rw-r--r--drivers/media/IR/ir-lirc-codec.c124
-rw-r--r--drivers/media/IR/ir-nec-decoder.c25
-rw-r--r--drivers/media/IR/ir-raw-event.c159
-rw-r--r--drivers/media/IR/ir-sysfs.c2
-rw-r--r--drivers/media/IR/keymaps/Makefile2
-rw-r--r--drivers/media/IR/keymaps/rc-empty.c44
-rw-r--r--drivers/media/IR/keymaps/rc-rc5-streamzap.c81
-rw-r--r--drivers/media/IR/keymaps/rc-rc6-mce.c2
-rw-r--r--drivers/media/IR/mceusb.c21
-rw-r--r--drivers/media/IR/rc-map.c23
-rw-r--r--drivers/media/IR/streamzap.c741
-rw-r--r--drivers/media/common/tuners/Kconfig2
-rw-r--r--drivers/media/dvb/bt8xx/dst.c10
-rw-r--r--drivers/media/dvb/dm1105/Kconfig2
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig2
-rw-r--r--drivers/media/dvb/firewire/firedtv-fw.c4
-rw-r--r--drivers/media/dvb/frontends/Kconfig2
-rw-r--r--drivers/media/dvb/mantis/Kconfig2
-rw-r--r--drivers/media/dvb/siano/Kconfig2
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c6
-rw-r--r--drivers/media/dvb/ttpci/Kconfig2
-rw-r--r--drivers/media/video/Kconfig16
-rw-r--r--drivers/media/video/Makefile3
-rw-r--r--drivers/media/video/bt8xx/Kconfig2
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c2
-rw-r--r--drivers/media/video/cs53l32a.c107
-rw-r--r--drivers/media/video/cx18/Kconfig2
-rw-r--r--drivers/media/video/cx18/cx18-i2c.c3
-rw-r--r--drivers/media/video/cx231xx/Kconfig2
-rw-r--r--drivers/media/video/cx2341x.c747
-rw-r--r--drivers/media/video/cx23885/Kconfig2
-rw-r--r--drivers/media/video/cx23885/Makefile5
-rw-r--r--drivers/media/video/cx23885/cx23885-av.c35
-rw-r--r--drivers/media/video/cx23885/cx23885-av.h27
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c114
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c124
-rw-r--r--drivers/media/video/cx23885/cx23885-i2c.c42
-rw-r--r--drivers/media/video/cx23885/cx23885-input.c72
-rw-r--r--drivers/media/video/cx23885/cx23885-ir.c24
-rw-r--r--drivers/media/video/cx23885/cx23885-reg.h1
-rw-r--r--drivers/media/video/cx23885/cx23885-vbi.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c23
-rw-r--r--drivers/media/video/cx23885/cx23885.h9
-rw-r--r--drivers/media/video/cx23885/cx23888-ir.c142
-rw-r--r--drivers/media/video/cx25840/Makefile2
-rw-r--r--drivers/media/video/cx25840/cx25840-audio.c144
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c540
-rw-r--r--drivers/media/video/cx25840/cx25840-core.h52
-rw-r--r--drivers/media/video/cx25840/cx25840-ir.c1279
-rw-r--r--drivers/media/video/cx88/Kconfig2
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c19
-rw-r--r--drivers/media/video/em28xx/Kconfig2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c2
-rw-r--r--drivers/media/video/fsl-viu.c8
-rw-r--r--drivers/media/video/gspca/gspca.c21
-rw-r--r--drivers/media/video/gspca/sonixj.c10
-rw-r--r--drivers/media/video/gspca/sq930x.c347
-rw-r--r--drivers/media/video/gspca/t613.c4
-rw-r--r--drivers/media/video/gspca/vc032x.c360
-rw-r--r--drivers/media/video/gspca/zc3xx.c1715
-rw-r--r--drivers/media/video/ivtv/Kconfig2
-rw-r--r--drivers/media/video/ivtv/ivtv-controls.c276
-rw-r--r--drivers/media/video/ivtv/ivtv-controls.h6
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c52
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h12
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c23
-rw-r--r--drivers/media/video/ivtv/ivtv-firmware.c6
-rw-r--r--drivers/media/video/ivtv/ivtv-gpio.c77
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c16
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c31
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c15
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.h2
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c24
-rw-r--r--drivers/media/video/msp3400-driver.c248
-rw-r--r--drivers/media/video/msp3400-driver.h18
-rw-r--r--drivers/media/video/msp3400-kthreads.c16
-rw-r--r--drivers/media/video/mt9m111.c40
-rw-r--r--drivers/media/video/mx2_camera.c4
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-debugifc.c14
-rw-r--r--drivers/media/video/s5p-fimc/Makefile3
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c1586
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h471
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c527
-rw-r--r--drivers/media/video/s5p-fimc/regs-fimc.h293
-rw-r--r--drivers/media/video/saa7115.c183
-rw-r--r--drivers/media/video/saa7134/Kconfig2
-rw-r--r--drivers/media/video/saa717x.c323
-rw-r--r--drivers/media/video/soc_camera.c9
-rw-r--r--drivers/media/video/tlg2300/Kconfig2
-rw-r--r--drivers/media/video/tvp7002.c10
-rw-r--r--drivers/media/video/usbvideo/usbvideo.c12
-rw-r--r--drivers/media/video/uvc/uvc_driver.c9
-rw-r--r--drivers/media/video/uvc/uvc_queue.c13
-rw-r--r--drivers/media/video/uvc/uvc_video.c19
-rw-r--r--drivers/media/video/uvc/uvcvideo.h5
-rw-r--r--drivers/media/video/v4l2-common.c482
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c7
-rw-r--r--drivers/media/video/v4l2-ctrls.c1852
-rw-r--r--drivers/media/video/v4l2-dev.c60
-rw-r--r--drivers/media/video/v4l2-device.c7
-rw-r--r--drivers/media/video/v4l2-ioctl.c46
-rw-r--r--drivers/media/video/wm8739.c179
-rw-r--r--drivers/media/video/wm8775.c79
-rw-r--r--drivers/memstick/core/mspro_block.c18
-rw-r--r--drivers/message/fusion/mptbase.c247
-rw-r--r--drivers/message/fusion/mptbase.h25
-rw-r--r--drivers/message/i2o/exec-osm.c8
-rw-r--r--drivers/message/i2o/i2o_block.c33
-rw-r--r--drivers/message/i2o/i2o_config.c18
-rw-r--r--drivers/message/i2o/i2o_scsi.c3
-rw-r--r--drivers/mfd/88pm860x-core.c84
-rw-r--r--drivers/mfd/Kconfig75
-rw-r--r--drivers/mfd/Makefile5
-rw-r--r--drivers/mfd/ab3100-otp.c16
-rw-r--r--drivers/mfd/ab3550-core.c23
-rw-r--r--drivers/mfd/ab8500-core.c4
-rw-r--r--drivers/mfd/ab8500-spi.c7
-rw-r--r--drivers/mfd/abx500-core.c2
-rw-r--r--drivers/mfd/davinci_voicecodec.c6
-rw-r--r--drivers/mfd/janz-cmodio.c1
-rw-r--r--drivers/mfd/jz4740-adc.c394
-rw-r--r--drivers/mfd/max8925-core.c40
-rw-r--r--drivers/mfd/max8998.c158
-rw-r--r--drivers/mfd/mc13783-core.c30
-rw-r--r--drivers/mfd/menelaus.c75
-rw-r--r--drivers/mfd/mfd-core.c4
-rw-r--r--drivers/mfd/stmpe.c985
-rw-r--r--drivers/mfd/stmpe.h183
-rw-r--r--drivers/mfd/t7l66xb.c3
-rw-r--r--drivers/mfd/tc6387xb.c16
-rw-r--r--drivers/mfd/tc6393xb.c4
-rw-r--r--drivers/mfd/tps6507x.c4
-rw-r--r--drivers/mfd/tps6586x.c375
-rw-r--r--drivers/mfd/twl6030-pwm.c163
-rw-r--r--drivers/mfd/ucb1400_core.c2
-rw-r--r--drivers/mfd/wm831x-core.c18
-rw-r--r--drivers/mfd/wm831x-irq.c9
-rw-r--r--drivers/mfd/wm8350-core.c6
-rw-r--r--drivers/mfd/wm8994-core.c8
-rw-r--r--drivers/misc/Kconfig33
-rw-r--r--drivers/misc/Makefile5
-rw-r--r--drivers/misc/bh1780gli.c273
-rw-r--r--drivers/misc/bmp085.c482
-rw-r--r--drivers/misc/cs5535-mfgpt.c11
-rw-r--r--drivers/misc/hmc6352.c166
-rw-r--r--drivers/misc/hpilo.c17
-rw-r--r--drivers/misc/hpilo.h8
-rw-r--r--drivers/misc/lkdtm.c4
-rw-r--r--drivers/misc/vmw_balloon.c (renamed from drivers/misc/vmware_balloon.c)0
-rw-r--r--drivers/mmc/card/block.c87
-rw-r--r--drivers/mmc/card/mmc_test.c811
-rw-r--r--drivers/mmc/card/queue.c21
-rw-r--r--drivers/mmc/core/bus.c9
-rw-r--r--drivers/mmc/core/core.c441
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mmc/core/mmc.c75
-rw-r--r--drivers/mmc/core/sd.c331
-rw-r--r--drivers/mmc/core/sd.h17
-rw-r--r--drivers/mmc/core/sd_ops.c48
-rw-r--r--drivers/mmc/core/sd_ops.h1
-rw-r--r--drivers/mmc/core/sdio.c209
-rw-r--r--drivers/mmc/host/Kconfig29
-rw-r--r--drivers/mmc/host/Makefile8
-rw-r--r--drivers/mmc/host/at91_mci.c1
-rw-r--r--drivers/mmc/host/imxmmc.c3
-rw-r--r--drivers/mmc/host/jz4740_mmc.c1029
-rw-r--r--drivers/mmc/host/mmc_spi.c68
-rw-r--r--drivers/mmc/host/msm_sdcc.c62
-rw-r--r--drivers/mmc/host/msm_sdcc.h6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c50
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c97
-rw-r--r--drivers/mmc/host/sdhci-of-core.c12
-rw-r--r--drivers/mmc/host/sdhci-pci.c49
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c17
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h18
-rw-r--r--drivers/mmc/host/sdhci-s3c.c131
-rw-r--r--drivers/mmc/host/sdhci.c56
-rw-r--r--drivers/mmc/host/sdhci.h12
-rw-r--r--drivers/mmc/host/sdricoh_cs.c1
-rw-r--r--drivers/mmc/host/tmio_mmc.c7
-rw-r--r--drivers/mmc/host/tmio_mmc.h13
-rw-r--r--drivers/mtd/Kconfig12
-rw-r--r--drivers/mtd/afs.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c31
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c17
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1
-rw-r--r--drivers/mtd/chips/cfi_probe.c4
-rw-r--r--drivers/mtd/chips/cfi_util.c1
-rw-r--r--drivers/mtd/chips/chipreg.c1
-rw-r--r--drivers/mtd/chips/map_absent.c1
-rw-r--r--drivers/mtd/chips/map_ram.c1
-rw-r--r--drivers/mtd/chips/map_rom.c1
-rw-r--r--drivers/mtd/cmdlinepart.c17
-rw-r--r--drivers/mtd/devices/docecc.c1
-rw-r--r--drivers/mtd/devices/docprobe.c1
-rw-r--r--drivers/mtd/devices/m25p80.c57
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c12
-rw-r--r--drivers/mtd/devices/mtdram.c1
-rw-r--r--drivers/mtd/devices/pmc551.c1
-rw-r--r--drivers/mtd/devices/sst25l.c2
-rw-r--r--drivers/mtd/ftl.c2
-rw-r--r--drivers/mtd/inftlcore.c6
-rw-r--r--drivers/mtd/inftlmount.c5
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c20
-rw-r--r--drivers/mtd/maps/Kconfig8
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/ixp4xx.c35
-rw-r--r--drivers/mtd/maps/pcmciamtd.c15
-rw-r--r--drivers/mtd/maps/physmap.c14
-rw-r--r--drivers/mtd/maps/physmap_of.c15
-rw-r--r--drivers/mtd/maps/redwood.c174
-rw-r--r--drivers/mtd/maps/sun_uflash.c10
-rw-r--r--drivers/mtd/mtd_blkdevs.c41
-rw-r--r--drivers/mtd/mtdblock.c19
-rw-r--r--drivers/mtd/mtdblock_ro.c19
-rw-r--r--drivers/mtd/mtdchar.c59
-rw-r--r--drivers/mtd/mtdconcat.c38
-rw-r--r--drivers/mtd/mtdcore.c21
-rw-r--r--drivers/mtd/mtdoops.c2
-rw-r--r--drivers/mtd/mtdpart.c31
-rw-r--r--drivers/mtd/mtdsuper.c2
-rw-r--r--drivers/mtd/nand/Kconfig39
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel_nand.c2
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c124
-rw-r--r--drivers/mtd/nand/davinci_nand.c17
-rw-r--r--drivers/mtd/nand/denali.c1597
-rw-r--r--drivers/mtd/nand/denali.h148
-rw-r--r--drivers/mtd/nand/diskonchip.c6
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c4
-rw-r--r--drivers/mtd/nand/fsl_upm.c4
-rw-r--r--drivers/mtd/nand/jz4740_nand.c516
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c4
-rw-r--r--drivers/mtd/nand/mxc_nand.c627
-rw-r--r--drivers/mtd/nand/nand_base.c115
-rw-r--r--drivers/mtd/nand/nand_bbt.c103
-rw-r--r--drivers/mtd/nand/nand_ids.c4
-rw-r--r--drivers/mtd/nand/nandsim.c14
-rw-r--r--drivers/mtd/nand/ndfc.c6
-rw-r--r--drivers/mtd/nand/omap2.c220
-rw-r--r--drivers/mtd/nand/pasemi_nand.c4
-rw-r--r--drivers/mtd/nand/plat_nand.c7
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c8
-rw-r--r--drivers/mtd/nand/r852.c6
-rw-r--r--drivers/mtd/nand/rtc_from4.c1
-rw-r--r--drivers/mtd/nand/s3c2410.c15
-rw-r--r--drivers/mtd/nand/sm_common.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c4
-rw-r--r--drivers/mtd/nftlcore.c25
-rw-r--r--drivers/mtd/nftlmount.c3
-rw-r--r--drivers/mtd/ofpart.c4
-rw-r--r--drivers/mtd/onenand/Kconfig4
-rw-r--r--drivers/mtd/onenand/onenand_base.c49
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c1
-rw-r--r--drivers/mtd/onenand/samsung.c37
-rw-r--r--drivers/mtd/redboot.c18
-rw-r--r--drivers/mtd/rfd_ftl.c2
-rw-r--r--drivers/mtd/ssfdc.c2
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c9
-rw-r--r--drivers/mtd/ubi/Kconfig.debug2
-rw-r--r--drivers/mtd/ubi/cdev.c12
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/3c503.c8
-rw-r--r--drivers/net/3c515.c6
-rw-r--r--drivers/net/3c523.c2
-rw-r--r--drivers/net/3c59x.c49
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/Kconfig55
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/acenic.c2
-rw-r--r--drivers/net/amd8111e.c18
-rw-r--r--drivers/net/amd8111e.h1
-rw-r--r--drivers/net/appletalk/ipddp.c10
-rw-r--r--drivers/net/appletalk/ltpc.c2
-rw-r--r--drivers/net/arm/am79c961a.c35
-rw-r--r--drivers/net/arm/am79c961a.h1
-rw-r--r--drivers/net/arm/ep93xx_eth.c39
-rw-r--r--drivers/net/arm/ether1.c34
-rw-r--r--drivers/net/arm/ether1.h1
-rw-r--r--drivers/net/arm/ether3.c33
-rw-r--r--drivers/net/arm/ether3.h1
-rw-r--r--drivers/net/arm/ixp4xx_eth.c2
-rw-r--r--drivers/net/atarilance.c24
-rw-r--r--drivers/net/atl1c/atl1c.h1
-rw-r--r--drivers/net/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/atlx/atl1.c19
-rw-r--r--drivers/net/atlx/atl2.c10
-rw-r--r--drivers/net/atp.c2
-rw-r--r--drivers/net/au1000_eth.c344
-rw-r--r--drivers/net/au1000_eth.h42
-rw-r--r--drivers/net/b44.c11
-rw-r--r--drivers/net/bcm63xx_enet.c62
-rw-r--r--drivers/net/bcm63xx_enet.h1
-rw-r--r--drivers/net/benet/be.h95
-rw-r--r--drivers/net/benet/be_cmds.c52
-rw-r--r--drivers/net/benet/be_cmds.h34
-rw-r--r--drivers/net/benet/be_ethtool.c176
-rw-r--r--drivers/net/benet/be_hw.h7
-rw-r--r--drivers/net/benet/be_main.c658
-rw-r--r--drivers/net/bfin_mac.c10
-rw-r--r--drivers/net/bmac.c9
-rw-r--r--drivers/net/bna/Makefile11
-rw-r--r--drivers/net/bna/bfa_cee.c291
-rw-r--r--drivers/net/bna/bfa_cee.h64
-rw-r--r--drivers/net/bna/bfa_defs.h243
-rw-r--r--drivers/net/bna/bfa_defs_cna.h223
-rw-r--r--drivers/net/bna/bfa_defs_mfg_comm.h244
-rw-r--r--drivers/net/bna/bfa_defs_status.h216
-rw-r--r--drivers/net/bna/bfa_ioc.c1732
-rw-r--r--drivers/net/bna/bfa_ioc.h300
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c392
-rw-r--r--drivers/net/bna/bfa_sm.h88
-rw-r--r--drivers/net/bna/bfa_wc.h69
-rw-r--r--drivers/net/bna/bfi.h392
-rw-r--r--drivers/net/bna/bfi_cna.h199
-rw-r--r--drivers/net/bna/bfi_ctreg.h637
-rw-r--r--drivers/net/bna/bfi_ll.h438
-rw-r--r--drivers/net/bna/bna.h550
-rw-r--r--drivers/net/bna/bna_ctrl.c3261
-rw-r--r--drivers/net/bna/bna_hw.h1490
-rw-r--r--drivers/net/bna/bna_txrx.c4172
-rw-r--r--drivers/net/bna/bna_types.h1128
-rw-r--r--drivers/net/bna/bnad.c3264
-rw-r--r--drivers/net/bna/bnad.h332
-rw-r--r--drivers/net/bna/bnad_ethtool.c1277
-rw-r--r--drivers/net/bna/cna.h81
-rw-r--r--drivers/net/bna/cna_fwimg.c64
-rw-r--r--drivers/net/bnx2.c154
-rw-r--r--drivers/net/bnx2.h17
-rw-r--r--drivers/net/bnx2x/bnx2x.h705
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c949
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h593
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h35
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c399
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_defs.h819
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_file_hdr.h1
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h1778
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h44
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h366
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c8871
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h242
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c5960
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h932
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c305
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h8
-rw-r--r--drivers/net/bonding/bond_3ad.c5
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_main.c153
-rw-r--r--drivers/net/bonding/bond_sysfs.c44
-rw-r--r--drivers/net/bonding/bonding.h3
-rw-r--r--drivers/net/bsd_comp.c2
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/caif/caif_spi_slave.c4
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c26
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c4
-rw-r--r--drivers/net/cassini.c6
-rw-r--r--drivers/net/chelsio/sge.c4
-rw-r--r--drivers/net/chelsio/subr.c2
-rw-r--r--drivers/net/chelsio/vsc7326.c2
-rw-r--r--drivers/net/cnic.c955
-rw-r--r--drivers/net/cnic.h118
-rw-r--r--drivers/net/cnic_defs.h456
-rw-r--r--drivers/net/cnic_if.h23
-rw-r--r--drivers/net/cpmac.c39
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c31
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/cxgb3/regs.h4
-rw-r--r--drivers/net/cxgb3/sge.c2
-rw-r--r--drivers/net/cxgb3/t3_hw.c7
-rw-r--r--drivers/net/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c101
-rw-r--r--drivers/net/cxgb4/sge.c19
-rw-r--r--drivers/net/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/cxgb4/t4fw_api.h5
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c5
-rw-r--r--drivers/net/cxgb4vf/sge.c3
-rw-r--r--drivers/net/de620.c2
-rw-r--r--drivers/net/declance.c2
-rw-r--r--drivers/net/defxx.c66
-rw-r--r--drivers/net/dl2k.c2
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/dummy.c58
-rw-r--r--drivers/net/e100.c4
-rw-r--r--drivers/net/e1000/e1000.h3
-rw-r--r--drivers/net/e1000/e1000_main.c241
-rw-r--r--drivers/net/e1000e/82571.c37
-rw-r--r--drivers/net/e1000e/defines.h6
-rw-r--r--drivers/net/e1000e/e1000.h29
-rw-r--r--drivers/net/e1000e/es2lan.c1
-rw-r--r--drivers/net/e1000e/ethtool.c4
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c199
-rw-r--r--drivers/net/e1000e/lib.c10
-rw-r--r--drivers/net/e1000e/netdev.c190
-rw-r--r--drivers/net/e1000e/param.c2
-rw-r--r--drivers/net/eepro.c8
-rw-r--r--drivers/net/ehea/ehea.h9
-rw-r--r--drivers/net/ehea/ehea_main.c99
-rw-r--r--drivers/net/enic/enic.h3
-rw-r--r--drivers/net/enic/enic_main.c25
-rw-r--r--drivers/net/enic/enic_res.c17
-rw-r--r--drivers/net/enic/enic_res.h2
-rw-r--r--drivers/net/enic/vnic_dev.c139
-rw-r--r--drivers/net/enic/vnic_dev.h19
-rw-r--r--drivers/net/enic/vnic_devcmd.h12
-rw-r--r--drivers/net/enic/vnic_enet.h2
-rw-r--r--drivers/net/enic/vnic_intr.c5
-rw-r--r--drivers/net/enic/vnic_resource.h13
-rw-r--r--drivers/net/enic/vnic_rq.c8
-rw-r--r--drivers/net/enic/vnic_rq.h6
-rw-r--r--drivers/net/enic/vnic_rss.h45
-rw-r--r--drivers/net/enic/vnic_vic.c7
-rw-r--r--drivers/net/enic/vnic_wq.c8
-rw-r--r--drivers/net/enic/vnic_wq.h4
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/eql.c2
-rw-r--r--drivers/net/eth16i.c16
-rw-r--r--drivers/net/ethoc.c6
-rw-r--r--drivers/net/fealnx.c4
-rw-r--r--drivers/net/fec.c3
-rw-r--r--drivers/net/fec_mpc52xx.c14
-rw-r--r--drivers/net/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/forcedeth.c8
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c7
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/fs_enet/mac-scc.c2
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c4
-rw-r--r--drivers/net/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/fsl_pq_mdio.c9
-rw-r--r--drivers/net/gianfar.c32
-rw-r--r--drivers/net/gianfar.h2
-rw-r--r--drivers/net/gianfar_ethtool.c4
-rw-r--r--drivers/net/greth.c12
-rw-r--r--drivers/net/greth.h2
-rw-r--r--drivers/net/hamachi.c2
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/scc.c3
-rw-r--r--drivers/net/hp.c8
-rw-r--r--drivers/net/hp100.c6
-rw-r--r--drivers/net/hydra.c13
-rw-r--r--drivers/net/ibm_newemac/core.c20
-rw-r--r--drivers/net/ibm_newemac/core.h18
-rw-r--r--drivers/net/ibm_newemac/debug.c2
-rw-r--r--drivers/net/ibm_newemac/mal.c4
-rw-r--r--drivers/net/ibm_newemac/mal.h2
-rw-r--r--drivers/net/ibm_newemac/rgmii.c18
-rw-r--r--drivers/net/ibm_newemac/rgmii.h16
-rw-r--r--drivers/net/ibm_newemac/tah.c14
-rw-r--r--drivers/net/ibm_newemac/tah.h12
-rw-r--r--drivers/net/ibm_newemac/zmii.c18
-rw-r--r--drivers/net/ibm_newemac/zmii.h16
-rw-r--r--drivers/net/ibmlana.c2
-rw-r--r--drivers/net/ibmveth.c985
-rw-r--r--drivers/net/ibmveth.h59
-rw-r--r--drivers/net/igb/e1000_82575.c18
-rw-r--r--drivers/net/igb/e1000_defines.h31
-rw-r--r--drivers/net/igb/e1000_hw.h2
-rw-r--r--drivers/net/igb/e1000_phy.c206
-rw-r--r--drivers/net/igb/e1000_phy.h2
-rw-r--r--drivers/net/igb/igb.h2
-rw-r--r--drivers/net/igb/igb_main.c47
-rw-r--r--drivers/net/igbvf/ethtool.c2
-rw-r--r--drivers/net/igbvf/netdev.c4
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/ipg.c6
-rw-r--r--drivers/net/irda/donauboe.c4
-rw-r--r--drivers/net/irda/irda-usb.c10
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/irda/nsc-ircc.c2
-rw-r--r--drivers/net/irda/sh_irda.c6
-rw-r--r--drivers/net/irda/sir_dev.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/irda/stir4200.c2
-rw-r--r--drivers/net/irda/via-ircc.c3
-rw-r--r--drivers/net/irda/via-ircc.h2
-rw-r--r--drivers/net/irda/vlsi_ir.h2
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c32
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c2
-rw-r--r--drivers/net/ixgb/ixgb_hw.c14
-rw-r--r--drivers/net/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ixgbe/ixgbe.h32
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c234
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c50
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c219
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h18
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c67
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h15
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c69
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h18
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c387
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c7
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1799
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c19
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c17
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ixgbevf/ethtool.c153
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h1
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c32
-rw-r--r--drivers/net/ixgbevf/mbx.c2
-rw-r--r--drivers/net/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ixgbevf/vf.c2
-rw-r--r--drivers/net/ixgbevf/vf.h2
-rw-r--r--drivers/net/jme.c100
-rw-r--r--drivers/net/jme.h3
-rw-r--r--drivers/net/ks8851.c39
-rw-r--r--drivers/net/ll_temac_main.c17
-rw-r--r--drivers/net/ll_temac_mdio.c1
-rw-r--r--drivers/net/loopback.c28
-rw-r--r--drivers/net/lp486e.c2
-rw-r--r--drivers/net/mac8390.c48
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/macvtap.c99
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mlx4/Makefile2
-rw-r--r--drivers/net/mlx4/alloc.c17
-rw-r--r--drivers/net/mlx4/en_ethtool.c173
-rw-r--r--drivers/net/mlx4/en_main.c24
-rw-r--r--drivers/net/mlx4/en_netdev.c31
-rw-r--r--drivers/net/mlx4/en_port.c32
-rw-r--r--drivers/net/mlx4/en_port.h14
-rw-r--r--drivers/net/mlx4/en_rx.c104
-rw-r--r--drivers/net/mlx4/en_selftest.c179
-rw-r--r--drivers/net/mlx4/en_tx.c20
-rw-r--r--drivers/net/mlx4/eq.c44
-rw-r--r--drivers/net/mlx4/fw.c15
-rw-r--r--drivers/net/mlx4/fw.h6
-rw-r--r--drivers/net/mlx4/main.c6
-rw-r--r--drivers/net/mlx4/mlx4_en.h39
-rw-r--r--drivers/net/mlx4/profile.c2
-rw-r--r--drivers/net/mv643xx_eth.c3
-rw-r--r--drivers/net/myri10ge/myri10ge.c103
-rw-r--r--drivers/net/myri_sbus.c12
-rw-r--r--drivers/net/myri_sbus.h2
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h4
-rw-r--r--drivers/net/netxen/netxen_nic_init.c11
-rw-r--r--drivers/net/netxen/netxen_nic_main.c13
-rw-r--r--drivers/net/niu.c80
-rw-r--r--drivers/net/niu.h4
-rw-r--r--drivers/net/ns83820.c53
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pasemi_mac_ethtool.c16
-rw-r--r--drivers/net/pch_gbe/Makefile4
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h661
-rw-r--r--drivers/net/pch_gbe/pch_gbe_api.c245
-rw-r--r--drivers/net/pch_gbe/pch_gbe_api.h36
-rw-r--r--drivers/net/pch_gbe/pch_gbe_ethtool.c584
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c2473
-rw-r--r--drivers/net/pch_gbe/pch_gbe_param.c499
-rw-r--r--drivers/net/pch_gbe/pch_gbe_phy.c274
-rw-r--r--drivers/net/pch_gbe/pch_gbe_phy.h37
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/3c574_cs.c102
-rw-r--r--drivers/net/pcmcia/3c589_cs.c31
-rw-r--r--drivers/net/pcmcia/axnet_cs.c235
-rw-r--r--drivers/net/pcmcia/com20020_cs.c54
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c108
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c55
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c88
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c303
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c176
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c164
-rw-r--r--drivers/net/pcnet32.c4
-rw-r--r--drivers/net/phy/Kconfig3
-rw-r--r--drivers/net/phy/bcm63xx.c2
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/icplus.c2
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/mdio-gpio.c4
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/realtek.c2
-rw-r--r--drivers/net/phy/smsc.c2
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/vitesse.c2
-rw-r--r--drivers/net/plip.c9
-rw-r--r--drivers/net/ppp_generic.c55
-rw-r--r--drivers/net/pppoe.c2
-rw-r--r--drivers/net/pppox.c4
-rw-r--r--drivers/net/pptp.c726
-rw-r--r--drivers/net/ps3_gelic_net.c4
-rw-r--r--drivers/net/ps3_gelic_wireless.c6
-rw-r--r--drivers/net/pxa168_eth.c1664
-rw-r--r--drivers/net/qla3xxx.c4
-rw-r--r--drivers/net/qlcnic/qlcnic.h179
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c411
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c153
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h47
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c131
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c339
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c1383
-rw-r--r--drivers/net/qlge/qlge_main.c38
-rw-r--r--drivers/net/r6040.c70
-rw-r--r--drivers/net/r8169.c104
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io.c37
-rw-r--r--drivers/net/s2io.h9
-rw-r--r--drivers/net/sb1250-mac.c2
-rw-r--r--drivers/net/sc92031.c11
-rw-r--r--drivers/net/sfc/Makefile7
-rw-r--r--drivers/net/sfc/efx.c337
-rw-r--r--drivers/net/sfc/efx.h36
-rw-r--r--drivers/net/sfc/ethtool.c171
-rw-r--r--drivers/net/sfc/falcon.c136
-rw-r--r--drivers/net/sfc/falcon_boards.c203
-rw-r--r--drivers/net/sfc/falcon_gmac.c230
-rw-r--r--drivers/net/sfc/filter.c454
-rw-r--r--drivers/net/sfc/filter.h189
-rw-r--r--drivers/net/sfc/mac.h1
-rw-r--r--drivers/net/sfc/mdio_10g.c30
-rw-r--r--drivers/net/sfc/net_driver.h112
-rw-r--r--drivers/net/sfc/nic.c197
-rw-r--r--drivers/net/sfc/phy.h18
-rw-r--r--drivers/net/sfc/regs.h14
-rw-r--r--drivers/net/sfc/rx.c73
-rw-r--r--drivers/net/sfc/selftest.c7
-rw-r--r--drivers/net/sfc/siena.c4
-rw-r--r--drivers/net/sfc/tenxpress.c424
-rw-r--r--drivers/net/sfc/tx.c78
-rw-r--r--drivers/net/sfc/txc43128_phy.c560
-rw-r--r--drivers/net/sfc/workarounds.h9
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/sh_eth.c6
-rw-r--r--drivers/net/sis900.c8
-rw-r--r--drivers/net/skfp/cfm.c10
-rw-r--r--drivers/net/skfp/drvfbi.c16
-rw-r--r--drivers/net/skfp/ess.c46
-rw-r--r--drivers/net/skfp/fplustm.c24
-rw-r--r--drivers/net/skfp/hwmtm.c30
-rw-r--r--drivers/net/skfp/hwt.c4
-rw-r--r--drivers/net/skfp/pcmplc.c22
-rw-r--r--drivers/net/skfp/pmf.c62
-rw-r--r--drivers/net/skfp/queue.c2
-rw-r--r--drivers/net/skfp/skfddi.c116
-rw-r--r--drivers/net/skfp/smt.c78
-rw-r--r--drivers/net/skfp/smtdef.c4
-rw-r--r--drivers/net/skfp/smtinit.c2
-rw-r--r--drivers/net/skfp/srf.c2
-rw-r--r--drivers/net/skge.c23
-rw-r--r--drivers/net/sky2.c3
-rw-r--r--drivers/net/slip.c93
-rw-r--r--drivers/net/slip.h9
-rw-r--r--drivers/net/smc91x.h37
-rw-r--r--drivers/net/smsc911x.c3
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/starfire.c10
-rw-r--r--drivers/net/stmmac/Kconfig5
-rw-r--r--drivers/net/stmmac/common.h55
-rw-r--r--drivers/net/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c34
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c18
-rw-r--r--drivers/net/stmmac/dwmac100_core.c29
-rw-r--r--drivers/net/stmmac/dwmac100_dma.c18
-rw-r--r--drivers/net/stmmac/dwmac_dma.h16
-rw-r--r--drivers/net/stmmac/dwmac_lib.c22
-rw-r--r--drivers/net/stmmac/enh_desc.c4
-rw-r--r--drivers/net/stmmac/norm_desc.c19
-rw-r--r--drivers/net/stmmac/stmmac.h9
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c37
-rw-r--r--drivers/net/stmmac/stmmac_main.c225
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c26
-rw-r--r--drivers/net/sun3lance.c4
-rw-r--r--drivers/net/sunbmac.c26
-rw-r--r--drivers/net/sunbmac.h4
-rw-r--r--drivers/net/sundance.c181
-rw-r--r--drivers/net/sungem.c211
-rw-r--r--drivers/net/sungem_phy.c5
-rw-r--r--drivers/net/sunhme.c42
-rw-r--r--drivers/net/sunhme.h2
-rw-r--r--drivers/net/sunlance.c28
-rw-r--r--drivers/net/sunqe.c28
-rw-r--r--drivers/net/sunqe.h4
-rw-r--r--drivers/net/sunvnet.c50
-rw-r--r--drivers/net/tc35815.c2
-rw-r--r--drivers/net/tehuti.c34
-rw-r--r--drivers/net/tehuti.h1
-rw-r--r--drivers/net/tg3.c547
-rw-r--r--drivers/net/tg3.h34
-rw-r--r--drivers/net/tlan.c10
-rw-r--r--drivers/net/tlan.h8
-rw-r--r--drivers/net/tokenring/proteon.c2
-rw-r--r--drivers/net/tokenring/smctr.c500
-rw-r--r--drivers/net/tokenring/tms380tr.c64
-rw-r--r--drivers/net/tokenring/tmspci.c10
-rw-r--r--drivers/net/tsi108_eth.c2
-rw-r--r--drivers/net/tulip/Kconfig4
-rw-r--r--drivers/net/tulip/de2104x.c46
-rw-r--r--drivers/net/tulip/de4x5.c57
-rw-r--r--drivers/net/tulip/dmfe.c2
-rw-r--r--drivers/net/tulip/interrupt.c77
-rw-r--r--drivers/net/tulip/tulip.h3
-rw-r--r--drivers/net/tulip/tulip_core.c10
-rw-r--r--drivers/net/tulip/uli526x.c4
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c15
-rw-r--r--drivers/net/typhoon.c50
-rw-r--r--drivers/net/ucc_geth.c8
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cx82310_eth.c346
-rw-r--r--drivers/net/usb/hso.c11
-rw-r--r--drivers/net/usb/ipheth.c12
-rw-r--r--drivers/net/usb/kaweth.c9
-rw-r--r--drivers/net/usb/sierra_net.c4
-rw-r--r--drivers/net/usb/usbnet.c22
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/via-velocity.c4
-rw-r--r--drivers/net/via-velocity.h11
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vxge/vxge-main.c34
-rw-r--r--drivers/net/vxge/vxge-main.h1
-rw-r--r--drivers/net/wan/c101.c2
-rw-r--r--drivers/net/wan/cycx_drv.c14
-rw-r--r--drivers/net/wan/cycx_main.c6
-rw-r--r--drivers/net/wan/dlci.c42
-rw-r--r--drivers/net/wan/farsync.c15
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/ixp4xx_hss.c2
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c6
-rw-r--r--drivers/net/wan/n2.c6
-rw-r--r--drivers/net/wan/pc300_drv.c20
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/sdla.c108
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wan/z85230.c4
-rw-r--r--drivers/net/wd.c8
-rw-r--r--drivers/net/wimax/i2400m/control.c18
-rw-r--r--drivers/net/wimax/i2400m/driver.c2
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h1
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h9
-rw-r--r--drivers/net/wimax/i2400m/rx.c28
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c2
-rw-r--r--drivers/net/wireless/airo.c5
-rw-r--r--drivers/net/wireless/airo_cs.c74
-rw-r--r--drivers/net/wireless/atmel_cs.c25
-rw-r--r--drivers/net/wireless/b43/pcmcia.c13
-rw-r--r--drivers/net/wireless/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c136
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c6
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c12
-rw-r--r--drivers/net/wireless/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/libertas/if_cs.c16
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c10
-rw-r--r--drivers/net/wireless/libertas/if_usb.c3
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c3
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c30
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c62
-rw-r--r--drivers/net/wireless/ray_cs.c55
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_scan.c2
-rw-r--r--drivers/net/wireless/wl3501_cs.c33
-rw-r--r--drivers/net/xen-netfront.c14
-rw-r--r--drivers/net/xilinx_emaclite.c23
-rw-r--r--drivers/net/yellowfin.c2
-rw-r--r--drivers/of/Kconfig40
-rw-r--r--drivers/of/Makefile2
-rw-r--r--drivers/of/address.c595
-rw-r--r--drivers/of/base.c76
-rw-r--r--drivers/of/device.c93
-rw-r--r--drivers/of/fdt.c26
-rw-r--r--drivers/of/gpio.c93
-rw-r--r--drivers/of/irq.c349
-rw-r--r--drivers/of/of_i2c.c50
-rw-r--r--drivers/of/of_mdio.c1
-rw-r--r--drivers/of/of_spi.c11
-rw-r--r--drivers/of/platform.c384
-rw-r--r--drivers/oprofile/buffer_sync.c27
-rw-r--r--drivers/oprofile/cpu_buffer.c2
-rw-r--r--drivers/oprofile/event_buffer.c3
-rw-r--r--drivers/parport/parport_cs.c23
-rw-r--r--drivers/parport/parport_serial.c1
-rw-r--r--drivers/parport/parport_sunbpp.c10
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/bus.c4
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c6
-rw-r--r--drivers/pci/hotplug/fakephp.c2
-rw-r--r--drivers/pci/hotplug/pciehp.h16
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c4
-rw-r--r--drivers/pci/hotplug/pciehp_core.c4
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c2
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c2
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c19
-rw-r--r--drivers/pci/intel-iommu.c149
-rw-r--r--drivers/pci/intr_remapping.c28
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/msi.c29
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci-driver.c32
-rw-r--r--drivers/pci/pci-label.c143
-rw-r--r--drivers/pci/pci-sysfs.c75
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/pci/pci.h16
-rw-r--r--drivers/pci/pcie/Kconfig20
-rw-r--r--drivers/pci/pcie/Makefile3
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c9
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c36
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c31
-rw-r--r--drivers/pci/pcie/aspm.c16
-rw-r--r--drivers/pci/pcie/pme.c (renamed from drivers/pci/pcie/pme/pcie_pme.c)66
-rw-r--r--drivers/pci/pcie/pme/Makefile8
-rw-r--r--drivers/pci/pcie/pme/pcie_pme.h28
-rw-r--r--drivers/pci/pcie/pme/pcie_pme_acpi.c54
-rw-r--r--drivers/pci/pcie/portdrv.h22
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c77
-rw-r--r--drivers/pci/pcie/portdrv_core.c53
-rw-r--r--drivers/pci/pcie/portdrv_pci.c38
-rw-r--r--drivers/pci/probe.c10
-rw-r--r--drivers/pci/proc.c6
-rw-r--r--drivers/pci/quirks.c68
-rw-r--r--drivers/pci/search.c2
-rw-r--r--drivers/pci/setup-bus.c12
-rw-r--r--drivers/pci/setup-irq.c3
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/pcmcia/Kconfig6
-rw-r--r--drivers/pcmcia/Makefile2
-rw-r--r--drivers/pcmcia/au1000_generic.h1
-rw-r--r--drivers/pcmcia/au1000_pb1x00.c2
-rw-r--r--drivers/pcmcia/cistpl.c11
-rw-r--r--drivers/pcmcia/cs.c79
-rw-r--r--drivers/pcmcia/cs_internal.h62
-rw-r--r--drivers/pcmcia/db1xxx_ss.c1
-rw-r--r--drivers/pcmcia/ds.c184
-rw-r--r--drivers/pcmcia/electra_cf.c6
-rw-r--r--drivers/pcmcia/i82092.c1
-rw-r--r--drivers/pcmcia/i82365.c1
-rw-r--r--drivers/pcmcia/m32r_cfc.c1
-rw-r--r--drivers/pcmcia/m32r_pcc.c1
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c5
-rw-r--r--drivers/pcmcia/pcmcia_cis.c1
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c1077
-rw-r--r--drivers/pcmcia/pcmcia_resource.c380
-rw-r--r--drivers/pcmcia/pd6729.c3
-rw-r--r--drivers/pcmcia/pxa2xx_balloon3.c158
-rw-r--r--drivers/pcmcia/pxa2xx_base.c1
-rw-r--r--drivers/pcmcia/rsrc_iodyn.c8
-rw-r--r--drivers/pcmcia/rsrc_mgr.c6
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c15
-rw-r--r--drivers/pcmcia/sa1100_generic.c1
-rw-r--r--drivers/pcmcia/soc_common.h1
-rw-r--r--drivers/pcmcia/socket_sysfs.c1
-rw-r--r--drivers/pcmcia/tcic.c1
-rw-r--r--drivers/pcmcia/xxs1500_ss.c1
-rw-r--r--drivers/pcmcia/yenta_socket.c1
-rw-r--r--drivers/platform/x86/Kconfig11
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/asus_acpi.c6
-rw-r--r--drivers/platform/x86/compal-laptop.c9
-rw-r--r--drivers/platform/x86/dell-laptop.c7
-rw-r--r--drivers/platform/x86/hp-wmi.c64
-rw-r--r--drivers/platform/x86/ideapad_acpi.c306
-rw-r--r--drivers/platform/x86/intel_ips.c15
-rw-r--r--drivers/platform/x86/intel_rar_register.c2
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c172
-rw-r--r--drivers/power/Kconfig24
-rw-r--r--drivers/power/Makefile3
-rw-r--r--drivers/power/apm_power.c1
-rw-r--r--drivers/power/intel_mid_battery.c799
-rw-r--r--drivers/power/jz4740-battery.c445
-rw-r--r--drivers/power/olpc_battery.c3
-rw-r--r--drivers/power/s3c_adc_battery.c431
-rw-r--r--drivers/power/wm97xx_battery.c16
-rw-r--r--drivers/regulator/88pm8607.c4
-rw-r--r--drivers/regulator/Kconfig34
-rw-r--r--drivers/regulator/Makefile5
-rw-r--r--drivers/regulator/ab3100.c5
-rw-r--r--drivers/regulator/ab8500.c426
-rw-r--r--drivers/regulator/ad5398.c288
-rw-r--r--drivers/regulator/core.c7
-rw-r--r--drivers/regulator/isl6271a-regulator.c236
-rw-r--r--drivers/regulator/lp3971.c10
-rw-r--r--drivers/regulator/max1586.c22
-rw-r--r--drivers/regulator/max8649.c2
-rw-r--r--drivers/regulator/max8660.c10
-rw-r--r--drivers/regulator/max8998.c637
-rw-r--r--drivers/regulator/tps65023-regulator.c2
-rw-r--r--drivers/regulator/tps6507x-regulator.c7
-rw-r--r--drivers/regulator/tps6586x-regulator.c396
-rw-r--r--drivers/regulator/wm831x-ldo.c7
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/regulator/wm8994-regulator.c5
-rw-r--r--drivers/rtc/Kconfig51
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/rtc-ab3100.c2
-rw-r--r--drivers/rtc/rtc-bfin.c15
-rw-r--r--drivers/rtc/rtc-cmos.c6
-rw-r--r--drivers/rtc/rtc-ds3232.c326
-rw-r--r--drivers/rtc/rtc-fm3130.c181
-rw-r--r--drivers/rtc/rtc-imxdi.c519
-rw-r--r--drivers/rtc/rtc-isl12022.c327
-rw-r--r--drivers/rtc/rtc-jz4740.c345
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-m48t59.c5
-rw-r--r--drivers/rtc/rtc-m48t86.c2
-rw-r--r--drivers/rtc/rtc-max6900.c2
-rw-r--r--drivers/rtc/rtc-mpc5121.c4
-rw-r--r--drivers/rtc/rtc-mxc.c6
-rw-r--r--drivers/rtc/rtc-nuc900.c64
-rw-r--r--drivers/rtc/rtc-pcf8563.c8
-rw-r--r--drivers/rtc/rtc-pl031.c3
-rw-r--r--drivers/rtc/rtc-pxa.c42
-rw-r--r--drivers/rtc/rtc-rp5c01.c89
-rw-r--r--drivers/rtc/rtc-s3c.c57
-rw-r--r--drivers/s390/block/dasd.c12
-rw-r--r--drivers/s390/block/dasd_devmap.c44
-rw-r--r--drivers/s390/block/dasd_diag.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c94
-rw-r--r--drivers/s390/block/dasd_eckd.h7
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/dasd_int.h8
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/char/ctrlchar.c4
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/tape_block.c11
-rw-r--r--drivers/s390/cio/ccwreq.c16
-rw-r--r--drivers/s390/cio/chsc.c48
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/s390/cio/device.c47
-rw-r--r--drivers/s390/cio/device_pgid.c3
-rw-r--r--drivers/s390/cio/io_sch.h10
-rw-r--r--drivers/s390/cio/qdio.h29
-rw-r--r--drivers/s390/cio/qdio_debug.c33
-rw-r--r--drivers/s390/cio/qdio_main.c138
-rw-r--r--drivers/s390/cio/qdio_setup.c1
-rw-r--r--drivers/s390/cio/qdio_thinint.c66
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/claw.c118
-rw-r--r--drivers/s390/net/claw.h4
-rw-r--r--drivers/s390/net/ctcm_fsms.c60
-rw-r--r--drivers/s390/net/ctcm_main.c84
-rw-r--r--drivers/s390/net/ctcm_main.h4
-rw-r--r--drivers/s390/net/ctcm_mpc.c66
-rw-r--r--drivers/s390/net/ctcm_sysfs.c20
-rw-r--r--drivers/s390/net/qeth_core.h17
-rw-r--r--drivers/s390/net/qeth_core_main.c26
-rw-r--r--drivers/s390/net/qeth_l2_main.c175
-rw-r--r--drivers/s390/net/qeth_l3_main.c212
-rw-r--r--drivers/s390/net/smsgiucv_app.c7
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c6
-rw-r--r--drivers/sbus/char/bbc_envctrl.c6
-rw-r--r--drivers/sbus/char/bbc_i2c.c28
-rw-r--r--drivers/sbus/char/bbc_i2c.h10
-rw-r--r--drivers/sbus/char/display7seg.c16
-rw-r--r--drivers/sbus/char/envctrl.c10
-rw-r--r--drivers/sbus/char/flash.c23
-rw-r--r--drivers/sbus/char/openprom.c15
-rw-r--r--drivers/sbus/char/uctrl.c17
-rw-r--r--drivers/scsi/53c700.c3
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c4
-rw-r--r--drivers/scsi/aacraid/rx.c5
-rw-r--r--drivers/scsi/aha1542.c25
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c3
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1
-rw-r--r--drivers/scsi/be2iscsi/Kconfig1
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h147
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c79
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c311
-rw-r--r--drivers/scsi/be2iscsi/be_main.h7
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c73
-rw-r--r--drivers/scsi/bfa/bfa_fcport.c2
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.c4
-rw-r--r--drivers/scsi/bfa/include/protocol/fcp.h4
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h2
-rw-r--r--drivers/scsi/bnx2i/Kconfig3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c3
-rw-r--r--drivers/scsi/ch.c89
-rw-r--r--drivers/scsi/constants.c6
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/g_NCR5380.c47
-rw-r--r--drivers/scsi/g_NCR5380.h6
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c380
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h6
-rw-r--r--drivers/scsi/initio.c1
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c (renamed from drivers/firmware/iscsi_boot_sysfs.c)0
-rw-r--r--drivers/scsi/libfc/fc_fcp.c4
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c70
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c76
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h32
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c132
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/osd/osd_initiator.c8
-rw-r--r--drivers/scsi/osst.c3
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c17
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c15
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c61
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c33
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c31
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h20
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c34
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h36
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h20
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c48
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c315
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/qlogicpti.c22
-rw-r--r--drivers/scsi/qlogicpti.h2
-rw-r--r--drivers/scsi/scsi_error.c23
-rw-r--r--drivers/scsi/scsi_lib.c20
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/scsi/scsi_tgt_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c12
-rw-r--r--drivers/scsi/sd.c144
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/sg.c11
-rw-r--r--drivers/scsi/sr.c25
-rw-r--r--drivers/scsi/sun3_NCR5380.c2
-rw-r--r--drivers/scsi/sun3_scsi.c2
-rw-r--r--drivers/scsi/sun3_scsi_vme.c2
-rw-r--r--drivers/scsi/sun_esp.c50
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c10
-rw-r--r--drivers/serial/21285.c10
-rw-r--r--drivers/serial/68328serial.c55
-rw-r--r--drivers/serial/68360serial.c4
-rw-r--r--drivers/serial/8250.c59
-rw-r--r--drivers/serial/8250_early.c57
-rw-r--r--drivers/serial/8250_pci.c13
-rw-r--r--drivers/serial/Kconfig56
-rw-r--r--drivers/serial/Makefile4
-rw-r--r--drivers/serial/altera_uart.c2
-rw-r--r--drivers/serial/amba-pl010.c9
-rw-r--r--drivers/serial/apbuart.c2
-rw-r--r--drivers/serial/atmel_serial.c11
-rw-r--r--drivers/serial/bfin_5xx.c7
-rw-r--r--drivers/serial/bfin_sport_uart.c2
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c4
-rw-r--r--drivers/serial/crisv10.c23
-rw-r--r--drivers/serial/imx.c10
-rw-r--r--drivers/serial/ioc3_serial.c9
-rw-r--r--drivers/serial/ioc4_serial.c9
-rw-r--r--drivers/serial/kgdboc.c18
-rw-r--r--drivers/serial/max3100.c7
-rw-r--r--drivers/serial/max3107-aava.c344
-rw-r--r--drivers/serial/max3107.c1197
-rw-r--r--drivers/serial/max3107.h441
-rw-r--r--drivers/serial/mcf.c31
-rw-r--r--drivers/serial/mfd.c1500
-rw-r--r--drivers/serial/mpc52xx_uart.c154
-rw-r--r--drivers/serial/mrst_max3110.c845
-rw-r--r--drivers/serial/mrst_max3110.h59
-rw-r--r--drivers/serial/nwpserial.c2
-rw-r--r--drivers/serial/of_serial.c9
-rw-r--r--drivers/serial/s5pv210.c8
-rw-r--r--drivers/serial/samsung.c9
-rw-r--r--drivers/serial/serial_core.c288
-rw-r--r--drivers/serial/serial_cs.c116
-rw-r--r--drivers/serial/sh-sci.c42
-rw-r--r--drivers/serial/sh-sci.h29
-rw-r--r--drivers/serial/sn_console.c2
-rw-r--r--drivers/serial/suncore.c15
-rw-r--r--drivers/serial/sunhv.c12
-rw-r--r--drivers/serial/sunsab.c14
-rw-r--r--drivers/serial/sunsu.c12
-rw-r--r--drivers/serial/sunzilog.c22
-rw-r--r--drivers/serial/timbuart.c6
-rw-r--r--drivers/serial/uartlite.c5
-rw-r--r--drivers/serial/ucc_uart.c4
-rw-r--r--drivers/sh/Makefile5
-rw-r--r--drivers/sh/clk-cpg.c58
-rw-r--r--drivers/spi/amba-pl022.c22
-rw-r--r--drivers/spi/coldfire_qspi.c1
-rw-r--r--drivers/spi/dw_spi.c24
-rw-r--r--drivers/spi/mpc512x_psc_spi.c18
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c15
-rw-r--r--drivers/spi/mpc52xx_spi.c7
-rw-r--r--drivers/spi/omap_spi_100k.c23
-rw-r--r--drivers/spi/spi.c243
-rw-r--r--drivers/spi/spi_bitbang.c9
-rw-r--r--drivers/spi/spi_bitbang_txrx.h16
-rw-r--r--drivers/spi/spi_butterfly.c2
-rw-r--r--drivers/spi/spi_gpio.c109
-rw-r--r--drivers/spi/spi_lm70llp.c2
-rw-r--r--drivers/spi/spi_mpc8xxx.c18
-rw-r--r--drivers/spi/spi_ppc4xx.c8
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c8
-rw-r--r--drivers/spi/spi_s3c64xx.c37
-rw-r--r--drivers/spi/spi_sh_sci.c8
-rw-r--r--drivers/spi/xilinx_spi.c3
-rw-r--r--drivers/spi/xilinx_spi_of.c10
-rw-r--r--drivers/ssb/main.c1
-rw-r--r--drivers/ssb/pcmcia.c15
-rw-r--r--drivers/ssb/scan.c1
-rw-r--r--drivers/staging/Kconfig16
-rw-r--r--drivers/staging/Makefile9
-rw-r--r--drivers/staging/adis16255/adis16255.c4
-rw-r--r--drivers/staging/batman-adv/CHANGELOG12
-rw-r--r--drivers/staging/batman-adv/Kconfig2
-rw-r--r--drivers/staging/batman-adv/Makefile4
-rw-r--r--drivers/staging/batman-adv/README2
-rw-r--r--drivers/staging/batman-adv/TODO9
-rw-r--r--drivers/staging/batman-adv/aggregation.c9
-rw-r--r--drivers/staging/batman-adv/aggregation.h5
-rw-r--r--drivers/staging/batman-adv/bat_debugfs.c341
-rw-r--r--drivers/staging/batman-adv/bat_debugfs.h33
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.c302
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.h13
-rw-r--r--drivers/staging/batman-adv/bitarray.c17
-rw-r--r--drivers/staging/batman-adv/bitarray.h13
-rw-r--r--drivers/staging/batman-adv/device.c354
-rw-r--r--drivers/staging/batman-adv/hard-interface.c87
-rw-r--r--drivers/staging/batman-adv/hard-interface.h5
-rw-r--r--drivers/staging/batman-adv/hash.c2
-rw-r--r--drivers/staging/batman-adv/hash.h12
-rw-r--r--drivers/staging/batman-adv/icmp_socket.c338
-rw-r--r--drivers/staging/batman-adv/icmp_socket.h (renamed from drivers/staging/batman-adv/device.h)24
-rw-r--r--drivers/staging/batman-adv/main.c69
-rw-r--r--drivers/staging/batman-adv/main.h82
-rw-r--r--drivers/staging/batman-adv/originator.c170
-rw-r--r--drivers/staging/batman-adv/originator.h10
-rw-r--r--drivers/staging/batman-adv/packet.h32
-rw-r--r--drivers/staging/batman-adv/ring_buffer.h5
-rw-r--r--drivers/staging/batman-adv/routing.c422
-rw-r--r--drivers/staging/batman-adv/routing.h13
-rw-r--r--drivers/staging/batman-adv/send.c73
-rw-r--r--drivers/staging/batman-adv/send.h7
-rw-r--r--drivers/staging/batman-adv/soft-interface.c165
-rw-r--r--drivers/staging/batman-adv/soft-interface.h13
-rw-r--r--drivers/staging/batman-adv/sysfs-class-net-batman-adv14
-rw-r--r--drivers/staging/batman-adv/sysfs-class-net-mesh33
-rw-r--r--drivers/staging/batman-adv/translation-table.c171
-rw-r--r--drivers/staging/batman-adv/translation-table.h14
-rw-r--r--drivers/staging/batman-adv/types.h44
-rw-r--r--drivers/staging/batman-adv/vis.c147
-rw-r--r--drivers/staging/batman-adv/vis.h13
-rw-r--r--drivers/staging/comedi/TODO1
-rw-r--r--drivers/staging/comedi/comedi_fops.c11
-rw-r--r--drivers/staging/comedi/comedidev.h58
-rw-r--r--drivers/staging/comedi/drivers/8255.c17
-rw-r--r--drivers/staging/comedi/drivers/acl7225b.c17
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h6
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c38
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_eeprom.c12
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c8
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_035.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2016.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3001.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c4
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c48
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7230.c51
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7296.c51
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7432.c51
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c51
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c213
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c42
-rw-r--r--drivers/staging/comedi/drivers/adq12b.c17
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c89
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c42
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c355
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c17
-rw-r--r--drivers/staging/comedi/drivers/aio_iiro_16.c17
-rw-r--r--drivers/staging/comedi/drivers/am9513.h22
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c57
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c57
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c57
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c44
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c50
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c17
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c63
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c48
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c48
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c48
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidio.c46
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c50
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c46
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c37
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c17
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c17
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c49
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c44
-rw-r--r--drivers/staging/comedi/drivers/das08.c71
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c50
-rw-r--r--drivers/staging/comedi/drivers/das16.c17
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c17
-rw-r--r--drivers/staging/comedi/drivers/das1800.c17
-rw-r--r--drivers/staging/comedi/drivers/das6402.c17
-rw-r--r--drivers/staging/comedi/drivers/das800.c17
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c99
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c17
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c17
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c17
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c17
-rw-r--r--drivers/staging/comedi/drivers/dt2817.c17
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c17
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c42
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c6
-rw-r--r--drivers/staging/comedi/drivers/fl512.c17
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c72
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c17
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.h6
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c17
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c69
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c48
-rw-r--r--drivers/staging/comedi/drivers/me4000.c464
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c48
-rw-r--r--drivers/staging/comedi/drivers/mite.c10
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c17
-rw-r--r--drivers/staging/comedi/drivers/mpc8260cpm.c13
-rw-r--r--drivers/staging/comedi/drivers/multiq3.c17
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c38
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c38
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c40
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c38
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c17
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c17
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c13
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c17
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c75
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c76
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c121
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c73
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c19
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c38
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c38
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c54
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c31
-rw-r--r--drivers/staging/comedi/drivers/pcl724.c17
-rw-r--r--drivers/staging/comedi/drivers/pcl725.c17
-rw-r--r--drivers/staging/comedi/drivers/pcl726.c17
-rw-r--r--drivers/staging/comedi/drivers/pcl730.c17
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c493
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c240
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c17
-rw-r--r--drivers/staging/comedi/drivers/pcm3724.c19
-rw-r--r--drivers/staging/comedi/drivers/pcm3730.c17
-rw-r--r--drivers/staging/comedi/drivers/pcm_common.c4
-rw-r--r--drivers/staging/comedi/drivers/pcmad.c17
-rw-r--r--drivers/staging/comedi/drivers/pcmda12.c29
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c34
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c17
-rw-r--r--drivers/staging/comedi/drivers/poc.c17
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c103
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c42
-rw-r--r--drivers/staging/comedi/drivers/rti800.c17
-rw-r--r--drivers/staging/comedi/drivers/rti802.c17
-rw-r--r--drivers/staging/comedi/drivers/s526.c17
-rw-r--r--drivers/staging/comedi/drivers/s626.c163
-rw-r--r--drivers/staging/comedi/drivers/s626.h9
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c115
-rw-r--r--drivers/staging/comedi/drivers/skel.c109
-rw-r--r--drivers/staging/comedi/drivers/ssv_dnp.c17
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c26
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c4
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c2
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c4
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c22
-rw-r--r--drivers/staging/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/staging/cxt1e1/functions.c12
-rw-r--r--drivers/staging/cxt1e1/hwprobe.c6
-rw-r--r--drivers/staging/cxt1e1/linux.c163
-rw-r--r--drivers/staging/cxt1e1/musycc.c204
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.c4
-rw-r--r--drivers/staging/cxt1e1/pmcc4.h4
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c158
-rw-r--r--drivers/staging/cxt1e1/sbecom_inline_linux.h11
-rw-r--r--drivers/staging/dream/camera/msm_vfe8x.c45
-rw-r--r--drivers/staging/dream/pmem.c46
-rw-r--r--drivers/staging/dt3155/Kconfig4
-rw-r--r--drivers/staging/dt3155/Makefile6
-rw-r--r--drivers/staging/dt3155/TODO10
-rw-r--r--drivers/staging/dt3155/allocator.README98
-rw-r--r--drivers/staging/dt3155/allocator.c294
-rw-r--r--drivers/staging/dt3155/allocator.h28
-rw-r--r--drivers/staging/dt3155/dt3155.h161
-rw-r--r--drivers/staging/dt3155/dt3155.sysvinit60
-rw-r--r--drivers/staging/dt3155/dt3155_drv.c1099
-rw-r--r--drivers/staging/dt3155/dt3155_drv.h45
-rw-r--r--drivers/staging/dt3155/dt3155_io.c165
-rw-r--r--drivers/staging/dt3155/dt3155_io.h358
-rw-r--r--drivers/staging/dt3155/dt3155_isr.c509
-rw-r--r--drivers/staging/dt3155/dt3155_isr.h77
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.c31
-rw-r--r--drivers/staging/easycap/Kconfig17
-rw-r--r--drivers/staging/easycap/Makefile13
-rw-r--r--drivers/staging/easycap/README130
-rw-r--r--drivers/staging/easycap/easycap.h638
-rw-r--r--drivers/staging/easycap/easycap_debug.h27
-rw-r--r--drivers/staging/easycap/easycap_ioctl.c2695
-rw-r--r--drivers/staging/easycap/easycap_ioctl.h28
-rw-r--r--drivers/staging/easycap/easycap_low.c1041
-rw-r--r--drivers/staging/easycap/easycap_main.c4354
-rw-r--r--drivers/staging/easycap/easycap_settings.c489
-rw-r--r--drivers/staging/easycap/easycap_sound.c1046
-rw-r--r--drivers/staging/easycap/easycap_sound.h28
-rw-r--r--drivers/staging/easycap/easycap_standard.h27
-rw-r--r--drivers/staging/easycap/easycap_testcard.c392
-rw-r--r--drivers/staging/et131x/et1310_phy.c8
-rw-r--r--drivers/staging/hv/Kconfig2
-rw-r--r--drivers/staging/hv/Makefile2
-rw-r--r--drivers/staging/hv/blkvsc.c12
-rw-r--r--drivers/staging/hv/blkvsc_drv.c39
-rw-r--r--drivers/staging/hv/channel.c54
-rw-r--r--drivers/staging/hv/channel_mgmt.c43
-rw-r--r--drivers/staging/hv/channel_mgmt.h8
-rw-r--r--drivers/staging/hv/connection.c23
-rw-r--r--drivers/staging/hv/hv.c29
-rw-r--r--drivers/staging/hv/hv_timesource.c101
-rw-r--r--drivers/staging/hv/hv_utils.c12
-rw-r--r--drivers/staging/hv/logging.h17
-rw-r--r--drivers/staging/hv/netvsc.c65
-rw-r--r--drivers/staging/hv/netvsc_drv.c54
-rw-r--r--drivers/staging/hv/ring_buffer.c74
-rw-r--r--drivers/staging/hv/ring_buffer.h35
-rw-r--r--drivers/staging/hv/rndis_filter.c86
-rw-r--r--drivers/staging/hv/storvsc.c36
-rw-r--r--drivers/staging/hv/storvsc_api.h4
-rw-r--r--drivers/staging/hv/storvsc_drv.c60
-rw-r--r--drivers/staging/hv/vmbus.c18
-rw-r--r--drivers/staging/hv/vmbus_drv.c99
-rw-r--r--drivers/staging/iio/Documentation/overview.txt2
-rw-r--r--drivers/staging/iio/Kconfig2
-rw-r--r--drivers/staging/iio/Makefile4
-rw-r--r--drivers/staging/iio/TODO2
-rw-r--r--drivers/staging/iio/accel/Kconfig50
-rw-r--r--drivers/staging/iio/accel/Makefile3
-rw-r--r--drivers/staging/iio/accel/adis16209.h33
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c15
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c94
-rw-r--r--drivers/staging/iio/accel/adis16209_trigger.c10
-rw-r--r--drivers/staging/iio/accel/adis16220.h1
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c20
-rw-r--r--drivers/staging/iio/accel/adis16240.h27
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c17
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c91
-rw-r--r--drivers/staging/iio/accel/adis16240_trigger.c10
-rw-r--r--drivers/staging/iio/accel/kxsd9.c6
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h35
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c144
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c210
-rw-r--r--drivers/staging/iio/accel/sca3000.h14
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c14
-rw-r--r--drivers/staging/iio/adc/Makefile2
-rw-r--r--drivers/staging/iio/adc/adc.h3
-rw-r--r--drivers/staging/iio/adc/max1363.h69
-rw-r--r--drivers/staging/iio/adc/max1363_core.c861
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c60
-rw-r--r--drivers/staging/iio/chrdev.h3
-rw-r--r--drivers/staging/iio/gyro/Makefile2
-rw-r--r--drivers/staging/iio/gyro/adis16260.h25
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c12
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c92
-rw-r--r--drivers/staging/iio/gyro/adis16260_trigger.c10
-rw-r--r--drivers/staging/iio/iio.h51
-rw-r--r--drivers/staging/iio/imu/Kconfig18
-rw-r--r--drivers/staging/iio/imu/Makefile3
-rw-r--r--drivers/staging/iio/imu/adis16300.h28
-rw-r--r--drivers/staging/iio/imu/adis16300_core.c160
-rw-r--r--drivers/staging/iio/imu/adis16300_ring.c142
-rw-r--r--drivers/staging/iio/imu/adis16300_trigger.c10
-rw-r--r--drivers/staging/iio/imu/adis16350.h40
-rw-r--r--drivers/staging/iio/imu/adis16350_core.c17
-rw-r--r--drivers/staging/iio/imu/adis16350_ring.c91
-rw-r--r--drivers/staging/iio/imu/adis16350_trigger.c10
-rw-r--r--drivers/staging/iio/imu/adis16400.h45
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c101
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c137
-rw-r--r--drivers/staging/iio/imu/adis16400_trigger.c10
-rw-r--r--drivers/staging/iio/industrialio-core.c22
-rw-r--r--drivers/staging/iio/industrialio-ring.c4
-rw-r--r--drivers/staging/iio/industrialio-trigger.c51
-rw-r--r--drivers/staging/iio/light/Kconfig1
-rw-r--r--drivers/staging/iio/light/light.h7
-rw-r--r--drivers/staging/iio/light/tsl2563.c389
-rw-r--r--drivers/staging/iio/magnetometer/Kconfig15
-rw-r--r--drivers/staging/iio/magnetometer/Makefile5
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c624
-rw-r--r--drivers/staging/iio/ring_generic.h23
-rw-r--r--drivers/staging/iio/ring_sw.c69
-rw-r--r--drivers/staging/iio/ring_sw.h15
-rw-r--r--drivers/staging/iio/sysfs.h14
-rw-r--r--drivers/staging/iio/trigger.h24
-rw-r--r--drivers/staging/iio/trigger/Makefile3
-rw-r--r--drivers/staging/iio/trigger/iio-trig-gpio.c12
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c15
-rw-r--r--drivers/staging/line6/Kconfig1
-rw-r--r--drivers/staging/line6/driver.c68
-rw-r--r--drivers/staging/lirc/Kconfig29
-rw-r--r--drivers/staging/lirc/Makefile2
-rw-r--r--drivers/staging/lirc/lirc_ene0100.c646
-rw-r--r--drivers/staging/lirc/lirc_it87.c9
-rw-r--r--drivers/staging/lirc/lirc_parallel.c4
-rw-r--r--drivers/staging/lirc/lirc_streamzap.c821
-rw-r--r--drivers/staging/memrar/TODO2
-rw-r--r--drivers/staging/memrar/memrar-abi4
-rw-r--r--drivers/staging/memrar/memrar_handler.c37
-rw-r--r--drivers/staging/msm/Kconfig10
-rw-r--r--drivers/staging/msm/Makefile5
-rw-r--r--drivers/staging/msm/lcdc_grapefruit.c60
-rw-r--r--drivers/staging/msm/lcdc_st1_wxga.c54
-rw-r--r--drivers/staging/msm/lcdc_wxga.c56
-rw-r--r--drivers/staging/msm/mddi_toshiba_wvga.c63
-rw-r--r--drivers/staging/msm/mddihost.h2
-rw-r--r--drivers/staging/msm/mdp4_debugfs.c10
-rw-r--r--drivers/staging/msm/mdp4_overlay.c8
-rw-r--r--drivers/staging/msm/msm_fb_def.h4
-rw-r--r--drivers/staging/msm/staging-devices.c13
-rw-r--r--drivers/staging/octeon/Kconfig2
-rw-r--r--drivers/staging/octeon/cvmx-cmd-queue.c6
-rw-r--r--drivers/staging/octeon/cvmx-fau.h2
-rw-r--r--drivers/staging/octeon/ethernet-spi.c2
-rw-r--r--drivers/staging/otus/80211core/ctxrx.c2
-rw-r--r--drivers/staging/otus/TODO16
-rw-r--r--drivers/staging/otus/apdbg.c24
-rw-r--r--drivers/staging/otus/hal/hpani.c16
-rw-r--r--drivers/staging/otus/hal/hpmain.c28
-rw-r--r--drivers/staging/otus/hal/hpreg.c17
-rw-r--r--drivers/staging/otus/ioctl.c55
-rw-r--r--drivers/staging/otus/wrap_sec.c2
-rw-r--r--drivers/staging/otus/wrap_usb.c5
-rw-r--r--drivers/staging/otus/wwrap.c968
-rw-r--r--drivers/staging/otus/zdusb.c251
-rw-r--r--drivers/staging/panel/panel.c1071
-rw-r--r--drivers/staging/pohmelfs/inode.c24
-rw-r--r--drivers/staging/pohmelfs/path_entry.c8
-rw-r--r--drivers/staging/quatech_usb2/quatech_usb2.c24
-rw-r--r--drivers/staging/quickstart/Kconfig10
-rw-r--r--drivers/staging/quickstart/Makefile1
-rw-r--r--drivers/staging/quickstart/quickstart.c474
-rw-r--r--drivers/staging/ramzswap/Kconfig21
-rw-r--r--drivers/staging/ramzswap/Makefile3
-rw-r--r--drivers/staging/ramzswap/ramzswap.txt51
-rw-r--r--drivers/staging/ramzswap/ramzswap_drv.c837
-rw-r--r--drivers/staging/rt2860/ap.h6
-rw-r--r--drivers/staging/rt2860/chlist.h50
-rw-r--r--drivers/staging/rt2860/common/cmm_wpa.c4
-rw-r--r--drivers/staging/rt2860/common/rtmp_timer.c4
-rw-r--r--drivers/staging/rt2860/mlme.h21
-rw-r--r--drivers/staging/rt2860/rt_linux.c2
-rw-r--r--drivers/staging/rt2860/rtmp.h4
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c50
-rw-r--r--drivers/staging/rt3070/md4.h42
-rw-r--r--drivers/staging/rtl8187se/Kconfig1
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c6
-rw-r--r--drivers/staging/rtl8192e/Kconfig1
-rw-r--r--drivers/staging/rtl8192e/ieee80211/dot11d.c18
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211.h97
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c20
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.h8
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c48
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c198
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c96
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_module.c68
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c16
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c185
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c73
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c9
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c132
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_HTProc.c17
-rw-r--r--drivers/staging/rtl8192e/r8190_rtl8256.c6
-rw-r--r--drivers/staging/rtl8192e/r8192E.h4
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c75
-rw-r--r--drivers/staging/rtl8192e/r8192E_dm.c22
-rw-r--r--drivers/staging/rtl8192e/r8192E_wx.c157
-rw-r--r--drivers/staging/rtl8192e/r819xE_phy.c2
-rw-r--r--drivers/staging/rtl8192su/Kconfig1
-rw-r--r--drivers/staging/rtl8192su/TODO7
-rw-r--r--drivers/staging/rtl8192su/ieee80211/dot11d.c28
-rw-r--r--drivers/staging/rtl8192su/ieee80211/dot11d.h25
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211.h32
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c5
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.h2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_ccmp.c7
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_tkip.c31
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c6
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_module.c26
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h43
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c290
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c11
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c17
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_BA.h30
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_BAProc.c50
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_HT.h42
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_HTProc.c147
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_Qos.h88
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_TS.h23
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_TSProc.c41
-rw-r--r--drivers/staging/rtl8192su/r8192SU_HWImg.c37
-rw-r--r--drivers/staging/rtl8192su/r8192SU_HWImg.h22
-rw-r--r--drivers/staging/rtl8192su/r8192SU_led.c9
-rw-r--r--drivers/staging/rtl8192su/r8192S_Efuse.c510
-rw-r--r--drivers/staging/rtl8192su/r8192S_Efuse.h70
-rw-r--r--drivers/staging/rtl8192su/r8192S_firmware.c26
-rw-r--r--drivers/staging/rtl8192su/r8192S_firmware.h100
-rw-r--r--drivers/staging/rtl8192su/r8192S_hw.h152
-rw-r--r--drivers/staging/rtl8192su/r8192S_phy.c659
-rw-r--r--drivers/staging/rtl8192su/r8192U.h309
-rw-r--r--drivers/staging/rtl8192su/r8192U_core.c175
-rw-r--r--drivers/staging/rtl8192su/r8192U_dm.c29
-rw-r--r--drivers/staging/rtl8192su/r8192U_wx.c128
-rw-r--r--drivers/staging/rtl8192su/r819xU_cmdpkt.c44
-rw-r--r--drivers/staging/rtl8192su/r819xU_cmdpkt.h1
-rw-r--r--drivers/staging/rtl8192u/Kconfig1
-rw-r--r--drivers/staging/rtl8192u/dot11d.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211.h2595
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c73
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.h2
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c2
-rw-r--r--drivers/staging/sep/Kconfig10
-rw-r--r--drivers/staging/sep/Makefile2
-rw-r--r--drivers/staging/sep/TODO8
-rw-r--r--drivers/staging/sep/sep_dev.h110
-rw-r--r--drivers/staging/sep/sep_driver.c2742
-rw-r--r--drivers/staging/sep/sep_driver_api.h425
-rw-r--r--drivers/staging/sep/sep_driver_config.h225
-rw-r--r--drivers/staging/sep/sep_driver_hw_defs.h232
-rw-r--r--drivers/staging/slicoss/slic.h12
-rw-r--r--drivers/staging/slicoss/slicoss.c3952
-rw-r--r--drivers/staging/sm7xx/smtcfb.c2
-rw-r--r--drivers/staging/solo6x10/Kconfig7
-rw-r--r--drivers/staging/solo6x10/Makefile6
-rw-r--r--drivers/staging/solo6x10/TODO28
-rw-r--r--drivers/staging/solo6x10/solo6010-core.c282
-rw-r--r--drivers/staging/solo6x10/solo6010-disp.c271
-rw-r--r--drivers/staging/solo6x10/solo6010-enc.c229
-rw-r--r--drivers/staging/solo6x10/solo6010-g723.c398
-rw-r--r--drivers/staging/solo6x10/solo6010-gpio.c103
-rw-r--r--drivers/staging/solo6x10/solo6010-i2c.c331
-rw-r--r--drivers/staging/solo6x10/solo6010-jpeg.h105
-rw-r--r--drivers/staging/solo6x10/solo6010-offsets.h78
-rw-r--r--drivers/staging/solo6x10/solo6010-osd-font.h154
-rw-r--r--drivers/staging/solo6x10/solo6010-p2m.c208
-rw-r--r--drivers/staging/solo6x10/solo6010-registers.h657
-rw-r--r--drivers/staging/solo6x10/solo6010-tw28.c823
-rw-r--r--drivers/staging/solo6x10/solo6010-tw28.h65
-rw-r--r--drivers/staging/solo6x10/solo6010-v4l2-enc.c1564
-rw-r--r--drivers/staging/solo6x10/solo6010-v4l2.c859
-rw-r--r--drivers/staging/solo6x10/solo6010.h317
-rw-r--r--drivers/staging/spectra/Kconfig41
-rw-r--r--drivers/staging/spectra/Makefile11
-rw-r--r--drivers/staging/spectra/README29
-rw-r--r--drivers/staging/spectra/ffsdefs.h58
-rw-r--r--drivers/staging/spectra/ffsport.c831
-rw-r--r--drivers/staging/spectra/ffsport.h84
-rw-r--r--drivers/staging/spectra/flash.c4315
-rw-r--r--drivers/staging/spectra/flash.h198
-rw-r--r--drivers/staging/spectra/lld.c339
-rw-r--r--drivers/staging/spectra/lld.h111
-rw-r--r--drivers/staging/spectra/lld_cdma.c910
-rw-r--r--drivers/staging/spectra/lld_cdma.h123
-rw-r--r--drivers/staging/spectra/lld_emu.c780
-rw-r--r--drivers/staging/spectra/lld_emu.h51
-rw-r--r--drivers/staging/spectra/lld_mtd.c687
-rw-r--r--drivers/staging/spectra/lld_mtd.h51
-rw-r--r--drivers/staging/spectra/lld_nand.c2601
-rw-r--r--drivers/staging/spectra/lld_nand.h131
-rw-r--r--drivers/staging/spectra/nand_regs.h619
-rw-r--r--drivers/staging/spectra/spectraswconfig.h82
-rw-r--r--drivers/staging/ti-st/Kconfig2
-rw-r--r--drivers/staging/ti-st/TODO13
-rw-r--r--drivers/staging/ti-st/bt_drv.c29
-rw-r--r--drivers/staging/ti-st/st.h65
-rw-r--r--drivers/staging/ti-st/st_core.c209
-rw-r--r--drivers/staging/ti-st/st_core.h76
-rw-r--r--drivers/staging/ti-st/st_kim.c365
-rw-r--r--drivers/staging/ti-st/st_kim.h94
-rw-r--r--drivers/staging/ti-st/st_ll.c8
-rw-r--r--drivers/staging/ti-st/st_ll.h9
-rw-r--r--drivers/staging/ti-st/sysfs-uim12
-rw-r--r--drivers/staging/tidspbridge/Documentation/CONTRIBUTORS45
-rw-r--r--drivers/staging/tidspbridge/Documentation/README70
-rw-r--r--drivers/staging/tidspbridge/Documentation/error-codes157
-rw-r--r--drivers/staging/tidspbridge/Kconfig90
-rw-r--r--drivers/staging/tidspbridge/Makefile34
-rw-r--r--drivers/staging/tidspbridge/TODO18
-rw-r--r--drivers/staging/tidspbridge/core/_cmm.h45
-rw-r--r--drivers/staging/tidspbridge/core/_deh.h35
-rw-r--r--drivers/staging/tidspbridge/core/_msg_sm.h142
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h371
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap_pwr.h85
-rw-r--r--drivers/staging/tidspbridge/core/chnl_sm.c1014
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c422
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c2333
-rw-r--r--drivers/staging/tidspbridge/core/msg_sm.c673
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c1802
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c550
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c455
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.h104
-rw-r--r--drivers/staging/tidspbridge/core/ue_deh.c273
-rw-r--r--drivers/staging/tidspbridge/core/wdt.c150
-rw-r--r--drivers/staging/tidspbridge/dynload/cload.c1953
-rw-r--r--drivers/staging/tidspbridge/dynload/dload_internal.h344
-rw-r--r--drivers/staging/tidspbridge/dynload/doff.h354
-rw-r--r--drivers/staging/tidspbridge/dynload/getsection.c407
-rw-r--r--drivers/staging/tidspbridge/dynload/header.h49
-rw-r--r--drivers/staging/tidspbridge/dynload/module_list.h159
-rw-r--r--drivers/staging/tidspbridge/dynload/params.h226
-rw-r--r--drivers/staging/tidspbridge/dynload/reloc.c484
-rw-r--r--drivers/staging/tidspbridge/dynload/reloc_table.h102
-rw-r--r--drivers/staging/tidspbridge/dynload/reloc_table_c6000.c257
-rw-r--r--drivers/staging/tidspbridge/dynload/tramp.c1143
-rw-r--r--drivers/staging/tidspbridge/dynload/tramp_table_c6000.c164
-rw-r--r--drivers/staging/tidspbridge/gen/gb.c167
-rw-r--r--drivers/staging/tidspbridge/gen/gh.c215
-rw-r--r--drivers/staging/tidspbridge/gen/gs.c89
-rw-r--r--drivers/staging/tidspbridge/gen/uuidutil.c113
-rw-r--r--drivers/staging/tidspbridge/hw/EasiGlobal.h41
-rw-r--r--drivers/staging/tidspbridge/hw/MMUAccInt.h76
-rw-r--r--drivers/staging/tidspbridge/hw/MMURegAcM.h225
-rw-r--r--drivers/staging/tidspbridge/hw/hw_defs.h58
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.c562
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.h163
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h181
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/brddefs.h39
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cfg.h222
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h81
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/chnl.h130
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/chnldefs.h66
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h98
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/clk.h101
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cmm.h386
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h105
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cod.h369
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbc.h46
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbdcd.h358
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h78
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbdefs.h514
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbldefs.h141
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbll.h62
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dblldefs.h496
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dehdefs.h32
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dev.h702
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/devdefs.h26
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/disp.h204
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dispdefs.h35
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dmm.h75
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/drv.h521
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/drvdefs.h25
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h475
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspapi.h167
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspchnl.h72
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspdefs.h1054
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspdeh.h43
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspdrv.h62
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspio.h41
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspioctl.h73
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspmsg.h56
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h492
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/gb.h79
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/getsection.h108
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/gh.h34
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/gs.h59
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/host_os.h88
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/io.h114
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/io_sm.h298
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/iodefs.h36
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/ldr.h29
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/list.h225
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h184
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/memdefs.h30
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/mgr.h205
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h45
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/msg.h86
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/msgdefs.h29
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nldr.h57
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h293
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/node.h583
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nodedefs.h28
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nodepriv.h182
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/ntfy.h217
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/proc.h621
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/procpriv.h25
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/pwr.h107
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/pwr_sh.h33
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h52
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/rmm.h181
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/rms_sh.h95
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/rmstypes.h24
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/services.h50
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/strm.h404
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/strmdefs.h46
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/sync.h109
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/utildefs.h39
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/uuidutil.h62
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/wdt.h79
-rw-r--r--drivers/staging/tidspbridge/pmgr/chnl.c163
-rw-r--r--drivers/staging/tidspbridge/pmgr/chnlobj.h46
-rw-r--r--drivers/staging/tidspbridge/pmgr/cmm.c1154
-rw-r--r--drivers/staging/tidspbridge/pmgr/cod.c652
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c1585
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c1151
-rw-r--r--drivers/staging/tidspbridge/pmgr/dmm.c533
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c1906
-rw-r--r--drivers/staging/tidspbridge/pmgr/io.c142
-rw-r--r--drivers/staging/tidspbridge/pmgr/ioobj.h38
-rw-r--r--drivers/staging/tidspbridge/pmgr/msg.c129
-rw-r--r--drivers/staging/tidspbridge/pmgr/msgobj.h38
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c1512
-rw-r--r--drivers/staging/tidspbridge/rmgr/disp.c752
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c929
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c656
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.h28
-rw-r--r--drivers/staging/tidspbridge/rmgr/dspdrv.c142
-rw-r--r--drivers/staging/tidspbridge/rmgr/mgr.c375
-rw-r--r--drivers/staging/tidspbridge/rmgr/nldr.c1974
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c3234
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c1936
-rw-r--r--drivers/staging/tidspbridge/rmgr/pwr.c176
-rw-r--r--drivers/staging/tidspbridge/rmgr/rmm.c537
-rw-r--r--drivers/staging/tidspbridge/rmgr/strm.c853
-rw-r--r--drivers/staging/tidspbridge/services/cfg.c253
-rw-r--r--drivers/staging/tidspbridge/services/ntfy.c31
-rw-r--r--drivers/staging/tidspbridge/services/services.c70
-rw-r--r--drivers/staging/tidspbridge/services/sync.c104
-rw-r--r--drivers/staging/usbip/stub.h17
-rw-r--r--drivers/staging/usbip/stub_dev.c101
-rw-r--r--drivers/staging/usbip/stub_main.c65
-rw-r--r--drivers/staging/usbip/stub_rx.c101
-rw-r--r--drivers/staging/usbip/usbip_common.h2
-rw-r--r--drivers/staging/usbip/vhci_hcd.c6
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c12
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c10
-rw-r--r--drivers/staging/vme/devices/vme_user.c99
-rw-r--r--drivers/staging/vt6655/80211hdr.h118
-rw-r--r--drivers/staging/vt6655/80211mgr.c118
-rw-r--r--drivers/staging/vt6655/80211mgr.h336
-rw-r--r--drivers/staging/vt6655/IEEE11h.c68
-rw-r--r--drivers/staging/vt6655/IEEE11h.h2
-rw-r--r--drivers/staging/vt6655/Makefile1
-rw-r--r--drivers/staging/vt6655/TODO3
-rw-r--r--drivers/staging/vt6655/aes_ccmp.c112
-rw-r--r--drivers/staging/vt6655/aes_ccmp.h2
-rw-r--r--drivers/staging/vt6655/baseband.c196
-rw-r--r--drivers/staging/vt6655/baseband.h52
-rw-r--r--drivers/staging/vt6655/bssdb.c351
-rw-r--r--drivers/staging/vt6655/bssdb.h246
-rw-r--r--drivers/staging/vt6655/card.c1369
-rw-r--r--drivers/staging/vt6655/card.h158
-rw-r--r--drivers/staging/vt6655/channel.c835
-rw-r--r--drivers/staging/vt6655/channel.h58
-rw-r--r--drivers/staging/vt6655/country.h15
-rw-r--r--drivers/staging/vt6655/datarate.c90
-rw-r--r--drivers/staging/vt6655/datarate.h24
-rw-r--r--drivers/staging/vt6655/desc.h352
-rw-r--r--drivers/staging/vt6655/device.h526
-rw-r--r--drivers/staging/vt6655/device_cfg.h8
-rw-r--r--drivers/staging/vt6655/device_main.c566
-rw-r--r--drivers/staging/vt6655/dpc.c516
-rw-r--r--drivers/staging/vt6655/dpc.h2
-rw-r--r--drivers/staging/vt6655/hostap.c84
-rw-r--r--drivers/staging/vt6655/iocmd.h347
-rw-r--r--drivers/staging/vt6655/ioctl.c128
-rw-r--r--drivers/staging/vt6655/ioctl.h6
-rw-r--r--drivers/staging/vt6655/iwctl.c152
-rw-r--r--drivers/staging/vt6655/key.c288
-rw-r--r--drivers/staging/vt6655/key.h114
-rw-r--r--drivers/staging/vt6655/mac.c363
-rw-r--r--drivers/staging/vt6655/mac.h203
-rw-r--r--drivers/staging/vt6655/mib.c110
-rw-r--r--drivers/staging/vt6655/mib.h429
-rw-r--r--drivers/staging/vt6655/michael.c46
-rw-r--r--drivers/staging/vt6655/michael.h6
-rw-r--r--drivers/staging/vt6655/power.c74
-rw-r--r--drivers/staging/vt6655/power.h12
-rw-r--r--drivers/staging/vt6655/rc4.c40
-rw-r--r--drivers/staging/vt6655/rc4.h12
-rw-r--r--drivers/staging/vt6655/rf.c196
-rw-r--r--drivers/staging/vt6655/rf.h20
-rw-r--r--drivers/staging/vt6655/rxtx.c1104
-rw-r--r--drivers/staging/vt6655/rxtx.h62
-rw-r--r--drivers/staging/vt6655/srom.c82
-rw-r--r--drivers/staging/vt6655/srom.h80
-rw-r--r--drivers/staging/vt6655/tcrc.c12
-rw-r--r--drivers/staging/vt6655/tcrc.h6
-rw-r--r--drivers/staging/vt6655/tether.c20
-rw-r--r--drivers/staging/vt6655/tether.h60
-rw-r--r--drivers/staging/vt6655/tkip.c14
-rw-r--r--drivers/staging/vt6655/tkip.h10
-rw-r--r--drivers/staging/vt6655/tmacro.h12
-rw-r--r--drivers/staging/vt6655/ttype.h57
-rw-r--r--drivers/staging/vt6655/upc.h22
-rw-r--r--drivers/staging/vt6655/vntwifi.c174
-rw-r--r--drivers/staging/vt6655/vntwifi.h82
-rw-r--r--drivers/staging/vt6655/wcmd.c169
-rw-r--r--drivers/staging/vt6655/wcmd.h18
-rw-r--r--drivers/staging/vt6655/wctl.c62
-rw-r--r--drivers/staging/vt6655/wctl.h9
-rw-r--r--drivers/staging/vt6655/wmgr.c807
-rw-r--r--drivers/staging/vt6655/wmgr.h192
-rw-r--r--drivers/staging/vt6655/wpa.c62
-rw-r--r--drivers/staging/vt6655/wpa.h8
-rw-r--r--drivers/staging/vt6655/wpa2.c70
-rw-r--r--drivers/staging/vt6655/wpa2.h8
-rw-r--r--drivers/staging/vt6655/wpactl.c91
-rw-r--r--drivers/staging/vt6655/wpactl.h4
-rw-r--r--drivers/staging/vt6655/wroute.c44
-rw-r--r--drivers/staging/vt6655/wroute.h2
-rw-r--r--drivers/staging/vt6656/80211mgr.c516
-rw-r--r--drivers/staging/vt6656/80211mgr.h33
-rw-r--r--drivers/staging/vt6656/aes_ccmp.c4
-rw-r--r--drivers/staging/vt6656/baseband.c6
-rw-r--r--drivers/staging/vt6656/baseband.h19
-rw-r--r--drivers/staging/vt6656/bssdb.c117
-rw-r--r--drivers/staging/vt6656/bssdb.h12
-rw-r--r--drivers/staging/vt6656/card.c7
-rw-r--r--drivers/staging/vt6656/channel.c45
-rw-r--r--drivers/staging/vt6656/channel.h12
-rw-r--r--drivers/staging/vt6656/control.c2
-rw-r--r--drivers/staging/vt6656/control.h14
-rw-r--r--drivers/staging/vt6656/datarate.c69
-rw-r--r--drivers/staging/vt6656/desc.h79
-rw-r--r--drivers/staging/vt6656/device.h52
-rw-r--r--drivers/staging/vt6656/device_cfg.h16
-rw-r--r--drivers/staging/vt6656/dpc.c48
-rw-r--r--drivers/staging/vt6656/firmware.c2
-rw-r--r--drivers/staging/vt6656/int.c7
-rw-r--r--drivers/staging/vt6656/int.h8
-rw-r--r--drivers/staging/vt6656/iocmd.h16
-rw-r--r--drivers/staging/vt6656/ioctl.c12
-rw-r--r--drivers/staging/vt6656/iowpa.h47
-rw-r--r--drivers/staging/vt6656/iwctl.c131
-rw-r--r--drivers/staging/vt6656/iwctl.h4
-rw-r--r--drivers/staging/vt6656/key.c34
-rw-r--r--drivers/staging/vt6656/mac.c4
-rw-r--r--drivers/staging/vt6656/mac.h8
-rw-r--r--drivers/staging/vt6656/main_usb.c321
-rw-r--r--drivers/staging/vt6656/mib.c25
-rw-r--r--drivers/staging/vt6656/mib.h25
-rw-r--r--drivers/staging/vt6656/michael.c4
-rw-r--r--drivers/staging/vt6656/michael.h6
-rw-r--r--drivers/staging/vt6656/power.c10
-rw-r--r--drivers/staging/vt6656/power.h2
-rw-r--r--drivers/staging/vt6656/rf.h19
-rw-r--r--drivers/staging/vt6656/rndis.h2
-rw-r--r--drivers/staging/vt6656/rxtx.c79
-rw-r--r--drivers/staging/vt6656/rxtx.h6
-rw-r--r--drivers/staging/vt6656/tether.h41
-rw-r--r--drivers/staging/vt6656/tkip.c6
-rw-r--r--drivers/staging/vt6656/ttype.h31
-rw-r--r--drivers/staging/vt6656/usbpipe.c99
-rw-r--r--drivers/staging/vt6656/usbpipe.h33
-rw-r--r--drivers/staging/vt6656/wcmd.c41
-rw-r--r--drivers/staging/vt6656/wcmd.h3
-rw-r--r--drivers/staging/vt6656/wctl.c24
-rw-r--r--drivers/staging/vt6656/wmgr.c75
-rw-r--r--drivers/staging/vt6656/wmgr.h11
-rw-r--r--drivers/staging/vt6656/wpa.c6
-rw-r--r--drivers/staging/vt6656/wpa2.h18
-rw-r--r--drivers/staging/vt6656/wpactl.c12
-rw-r--r--drivers/staging/winbond/mac_structures.h144
-rw-r--r--drivers/staging/winbond/phy_calibration.c834
-rw-r--r--drivers/staging/winbond/reg.c36
-rw-r--r--drivers/staging/winbond/wbusb.c2
-rw-r--r--drivers/staging/wlags49_h2/hcf.c2
-rw-r--r--drivers/staging/wlags49_h2/mdd.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c500
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.h2
-rw-r--r--drivers/staging/wlags49_h2/wl_internal.h1
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.c16
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.h8
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.c2
-rw-r--r--drivers/staging/wlags49_h2/wl_util.c37
-rw-r--r--drivers/staging/wlags49_h2/wl_util.h2
-rw-r--r--drivers/staging/wlan-ng/Kconfig2
-rw-r--r--drivers/staging/wlan-ng/Makefile1
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c763
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h4
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c34
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c70
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h42
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h24
-rw-r--r--drivers/staging/wlan-ng/p80211ioctl.h4
-rw-r--r--drivers/staging/wlan-ng/p80211meta.h16
-rw-r--r--drivers/staging/wlan-ng/p80211metastruct.h62
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h24
-rw-r--r--drivers/staging/wlan-ng/p80211msg.h4
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c77
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h9
-rw-r--r--drivers/staging/wlan-ng/p80211req.c18
-rw-r--r--drivers/staging/wlan-ng/p80211wext.c1690
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c118
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c24
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c76
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c91
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c2
-rw-r--r--drivers/staging/xgifb/XGI.h10
-rw-r--r--drivers/staging/xgifb/XGI_accel.c307
-rw-r--r--drivers/staging/xgifb/XGI_accel.h14
-rw-r--r--drivers/staging/xgifb/XGI_main.h182
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c255
-rw-r--r--drivers/staging/xgifb/XGIfb.h31
-rw-r--r--drivers/staging/xgifb/osdef.h153
-rw-r--r--drivers/staging/xgifb/vb_def.h4
-rw-r--r--drivers/staging/xgifb/vb_ext.c510
-rw-r--r--drivers/staging/xgifb/vb_ext.h26
-rw-r--r--drivers/staging/xgifb/vb_init.c895
-rw-r--r--drivers/staging/xgifb/vb_init.h4
-rw-r--r--drivers/staging/xgifb/vb_setmode.c2060
-rw-r--r--drivers/staging/xgifb/vb_setmode.h66
-rw-r--r--drivers/staging/xgifb/vb_struct.h951
-rw-r--r--drivers/staging/xgifb/vb_table.h633
-rw-r--r--drivers/staging/xgifb/vb_util.c137
-rw-r--r--drivers/staging/xgifb/vb_util.h20
-rw-r--r--drivers/staging/xgifb/vgatypes.h271
-rw-r--r--drivers/staging/zram/Kconfig29
-rw-r--r--drivers/staging/zram/Makefile3
-rw-r--r--drivers/staging/zram/xvmalloc.c (renamed from drivers/staging/ramzswap/xvmalloc.c)0
-rw-r--r--drivers/staging/zram/xvmalloc.h (renamed from drivers/staging/ramzswap/xvmalloc.h)0
-rw-r--r--drivers/staging/zram/xvmalloc_int.h (renamed from drivers/staging/ramzswap/xvmalloc_int.h)0
-rw-r--r--drivers/staging/zram/zram.txt62
-rw-r--r--drivers/staging/zram/zram_drv.c806
-rw-r--r--drivers/staging/zram/zram_drv.h (renamed from drivers/staging/ramzswap/ramzswap_drv.h)71
-rw-r--r--drivers/staging/zram/zram_ioctl.h (renamed from drivers/staging/ramzswap/ramzswap_ioctl.h)21
-rw-r--r--drivers/telephony/ixj_pcmcia.c20
-rw-r--r--drivers/uio/uio_cif.c2
-rw-r--r--drivers/uio/uio_pdrv_genirq.c1
-rw-r--r--drivers/uio/uio_sercos3.c2
-rw-r--r--drivers/usb/Kconfig3
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/atm/cxacru.c53
-rw-r--r--drivers/usb/atm/speedtch.c10
-rw-r--r--drivers/usb/atm/ueagle-atm.c7
-rw-r--r--drivers/usb/atm/usbatm.c23
-rw-r--r--drivers/usb/atm/usbatm.h22
-rw-r--r--drivers/usb/atm/xusbatm.c10
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.c4
-rw-r--r--drivers/usb/class/cdc-acm.c35
-rw-r--r--drivers/usb/class/usblp.c371
-rw-r--r--drivers/usb/core/Kconfig6
-rw-r--r--drivers/usb/core/devio.c7
-rw-r--r--drivers/usb/core/driver.c11
-rw-r--r--drivers/usb/core/endpoint.c9
-rw-r--r--drivers/usb/core/file.c35
-rw-r--r--drivers/usb/core/generic.c4
-rw-r--r--drivers/usb/core/hcd-pci.c202
-rw-r--r--drivers/usb/core/hcd.c79
-rw-r--r--drivers/usb/core/hub.c13
-rw-r--r--drivers/usb/core/inode.c4
-rw-r--r--drivers/usb/core/message.c23
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/urb.c50
-rw-r--r--drivers/usb/core/usb.c6
-rw-r--r--drivers/usb/gadget/Kconfig52
-rw-r--r--drivers/usb/gadget/Makefile3
-rw-r--r--drivers/usb/gadget/audio.c4
-rw-r--r--drivers/usb/gadget/cdc2.c4
-rw-r--r--drivers/usb/gadget/composite.c73
-rw-r--r--drivers/usb/gadget/dbgp.c434
-rw-r--r--drivers/usb/gadget/dummy_hcd.c6
-rw-r--r--drivers/usb/gadget/ether.c6
-rw-r--r--drivers/usb/gadget/f_audio.c6
-rw-r--r--drivers/usb/gadget/f_fs.c38
-rw-r--r--drivers/usb/gadget/f_hid.c6
-rw-r--r--drivers/usb/gadget/f_loopback.c4
-rw-r--r--drivers/usb/gadget/f_mass_storage.c125
-rw-r--r--drivers/usb/gadget/f_sourcesink.c2
-rw-r--r--drivers/usb/gadget/file_storage.c104
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c11
-rw-r--r--drivers/usb/gadget/g_ffs.c176
-rw-r--r--drivers/usb/gadget/gmidi.c4
-rw-r--r--drivers/usb/gadget/hid.c4
-rw-r--r--drivers/usb/gadget/inode.c16
-rw-r--r--drivers/usb/gadget/langwell_udc.c6
-rw-r--r--drivers/usb/gadget/m66592-udc.c1
-rw-r--r--drivers/usb/gadget/mass_storage.c24
-rw-r--r--drivers/usb/gadget/multi.c262
-rw-r--r--drivers/usb/gadget/printer.c9
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c1
-rw-r--r--drivers/usb/gadget/rndis.c12
-rw-r--r--drivers/usb/gadget/rndis.h2
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c151
-rw-r--r--drivers/usb/gadget/serial.c4
-rw-r--r--drivers/usb/gadget/storage_common.c105
-rw-r--r--drivers/usb/gadget/u_ether.c15
-rw-r--r--drivers/usb/gadget/u_serial.c1
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c2
-rw-r--r--drivers/usb/gadget/webcam.c4
-rw-r--r--drivers/usb/gadget/zero.c2
-rw-r--r--drivers/usb/host/Kconfig11
-rw-r--r--drivers/usb/host/ehci-au1xxx.c2
-rw-r--r--drivers/usb/host/ehci-dbg.c196
-rw-r--r--drivers/usb/host/ehci-fsl.c3
-rw-r--r--drivers/usb/host/ehci-hcd.c49
-rw-r--r--drivers/usb/host/ehci-hub.c25
-rw-r--r--drivers/usb/host/ehci-lpm.c83
-rw-r--r--drivers/usb/host/ehci-omap.c36
-rw-r--r--drivers/usb/host/ehci-pci.c31
-rw-r--r--drivers/usb/host/ehci-ppc-of.c18
-rw-r--r--drivers/usb/host/ehci-q.c3
-rw-r--r--drivers/usb/host/ehci-sched.c182
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c12
-rw-r--r--drivers/usb/host/ehci.h18
-rw-r--r--drivers/usb/host/fhci-hcd.c4
-rw-r--r--drivers/usb/host/hwa-hc.c4
-rw-r--r--drivers/usb/host/imx21-hcd.c2
-rw-r--r--drivers/usb/host/isp1362.h24
-rw-r--r--drivers/usb/host/isp1760-hcd.c5
-rw-r--r--drivers/usb/host/isp1760-if.c4
-rw-r--r--drivers/usb/host/ohci-dbg.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c13
-rw-r--r--drivers/usb/host/ohci-hub.c23
-rw-r--r--drivers/usb/host/ohci-jz4740.c276
-rw-r--r--drivers/usb/host/ohci-pci.c2
-rw-r--r--drivers/usb/host/ohci-pnx4008.c2
-rw-r--r--drivers/usb/host/ohci-ppc-of.c6
-rw-r--r--drivers/usb/host/ohci-ssb.c52
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c7
-rw-r--r--drivers/usb/host/sl811-hcd.c5
-rw-r--r--drivers/usb/host/sl811_cs.c24
-rw-r--r--drivers/usb/host/uhci-debug.c23
-rw-r--r--drivers/usb/host/uhci-hcd.c87
-rw-r--r--drivers/usb/host/uhci-hcd.h7
-rw-r--r--drivers/usb/host/uhci-hub.c6
-rw-r--r--drivers/usb/host/uhci-q.c4
-rw-r--r--drivers/usb/host/whci/hcd.c2
-rw-r--r--drivers/usb/host/whci/qset.c2
-rw-r--r--drivers/usb/host/xhci-mem.c101
-rw-r--r--drivers/usb/host/xhci-pci.c9
-rw-r--r--drivers/usb/host/xhci-ring.c1338
-rw-r--r--drivers/usb/host/xhci.c344
-rw-r--r--drivers/usb/host/xhci.h30
-rw-r--r--drivers/usb/misc/adutux.c2
-rw-r--r--drivers/usb/misc/ftdi-elan.c4
-rw-r--r--drivers/usb/misc/iowarrior.c27
-rw-r--r--drivers/usb/misc/legousbtower.c6
-rw-r--r--drivers/usb/misc/rio500.c15
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c10
-rw-r--r--drivers/usb/misc/usblcd.c24
-rw-r--r--drivers/usb/misc/usbtest.c14
-rw-r--r--drivers/usb/mon/mon_bin.c24
-rw-r--r--drivers/usb/musb/cppi_dma.c1
-rw-r--r--drivers/usb/musb/musb_core.c7
-rw-r--r--drivers/usb/musb/musb_debugfs.c37
-rw-r--r--drivers/usb/musb/musb_gadget.c75
-rw-r--r--drivers/usb/musb/musb_gadget.h2
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c12
-rw-r--r--drivers/usb/musb/musb_host.c6
-rw-r--r--drivers/usb/musb/musb_virthub.c2
-rw-r--r--drivers/usb/musb/musbhsdma.c5
-rw-r--r--drivers/usb/musb/omap2430.c6
-rw-r--r--drivers/usb/otg/Kconfig2
-rw-r--r--drivers/usb/otg/twl4030-usb.c74
-rw-r--r--drivers/usb/otg/ulpi.c134
-rw-r--r--drivers/usb/serial/Kconfig9
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/cp210x.c11
-rw-r--r--drivers/usb/serial/digi_acceleport.c14
-rw-r--r--drivers/usb/serial/ftdi_sio.c19
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h31
-rw-r--r--drivers/usb/serial/generic.c26
-rw-r--r--drivers/usb/serial/io_ti.c8
-rw-r--r--drivers/usb/serial/ipaq.c1
-rw-r--r--drivers/usb/serial/iuu_phoenix.c54
-rw-r--r--drivers/usb/serial/mos7720.c3
-rw-r--r--drivers/usb/serial/mos7840.c35
-rw-r--r--drivers/usb/serial/navman.c1
-rw-r--r--drivers/usb/serial/option.c143
-rw-r--r--drivers/usb/serial/pl2303.c3
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/ssu100.c774
-rw-r--r--drivers/usb/serial/usb-serial.c55
-rw-r--r--drivers/usb/storage/freecom.c23
-rw-r--r--drivers/usb/storage/isd200.c3
-rw-r--r--drivers/usb/storage/usb.c4
-rw-r--r--drivers/usb/usb-skeleton.c8
-rw-r--r--drivers/uwb/address.c5
-rw-r--r--drivers/uwb/wlp/wss-lc.c7
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/vhost.c141
-rw-r--r--drivers/vhost/vhost.h18
-rw-r--r--drivers/video/Kconfig29
-rw-r--r--drivers/video/Makefile3
-rw-r--r--drivers/video/amba-clcd.c10
-rw-r--r--drivers/video/bw2.c8
-rw-r--r--drivers/video/cg14.c10
-rw-r--r--drivers/video/cg3.c8
-rw-r--r--drivers/video/cg6.c10
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/bitblit.c2
-rw-r--r--drivers/video/console/fbcon.c37
-rw-r--r--drivers/video/console/fbcon.h1
-rw-r--r--drivers/video/console/fbcon_ccw.c2
-rw-r--r--drivers/video/console/fbcon_cw.c2
-rw-r--r--drivers/video/console/fbcon_ud.c2
-rw-r--r--drivers/video/console/vgacon.c2
-rw-r--r--drivers/video/controlfb.c2
-rw-r--r--drivers/video/efifb.c111
-rw-r--r--drivers/video/fbmem.c6
-rw-r--r--drivers/video/ffb.c8
-rw-r--r--drivers/video/fsl-diu-fb.c145
-rw-r--r--drivers/video/fsl-diu-fb.h223
-rw-r--r--drivers/video/igafb.c5
-rw-r--r--drivers/video/imxfb.c19
-rw-r--r--drivers/video/jz4740_fb.c847
-rw-r--r--drivers/video/leo.c10
-rw-r--r--drivers/video/matrox/i2c-matroxfb.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.h4
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c4
-rw-r--r--drivers/video/msm/mddi.c4
-rw-r--r--drivers/video/msm/mdp.c1
-rw-r--r--drivers/video/offb.c3
-rw-r--r--drivers/video/omap/lcd_apollon.c3
-rw-r--r--drivers/video/omap2/displays/panel-taal.c567
-rw-r--r--drivers/video/omap2/displays/panel-toppoly-tdo35s.c8
-rw-r--r--drivers/video/omap2/dss/dispc.c16
-rw-r--r--drivers/video/omap2/dss/display.c4
-rw-r--r--drivers/video/omap2/dss/dsi.c463
-rw-r--r--drivers/video/omap2/dss/dss.c6
-rw-r--r--drivers/video/omap2/dss/dss.h11
-rw-r--r--drivers/video/omap2/dss/manager.c204
-rw-r--r--drivers/video/omap2/dss/overlay.c2
-rw-r--r--drivers/video/omap2/dss/rfbi.c2
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c188
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c229
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c70
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h29
-rw-r--r--drivers/video/p9100.c8
-rw-r--r--drivers/video/platinumfb.c4
-rw-r--r--drivers/video/pxa168fb.c10
-rw-r--r--drivers/video/s3c-fb.c811
-rw-r--r--drivers/video/sh_mipi_dsi.c505
-rw-r--r--drivers/video/sh_mobile_hdmi.c1028
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c196
-rw-r--r--drivers/video/sis/sis_main.c3
-rw-r--r--drivers/video/sunxvr1000.c8
-rw-r--r--drivers/video/tcx.c10
-rw-r--r--drivers/video/tdfxfb.c4
-rw-r--r--drivers/video/uvesafb.c7
-rw-r--r--drivers/video/via/chip.h1
-rw-r--r--drivers/video/via/hw.c587
-rw-r--r--drivers/video/via/hw.h14
-rw-r--r--drivers/video/via/ioctl.c2
-rw-r--r--drivers/video/via/ioctl.h3
-rw-r--r--drivers/video/via/lcd.c117
-rw-r--r--drivers/video/via/lcd.h5
-rw-r--r--drivers/video/via/share.h309
-rw-r--r--drivers/video/via/via-core.c22
-rw-r--r--drivers/video/via/via-gpio.c2
-rw-r--r--drivers/video/via/viafbdev.c284
-rw-r--r--drivers/video/vt8623fb.c2
-rw-r--r--drivers/video/w100fb.c4
-rw-r--r--drivers/video/xen-fbfront.c2
-rw-r--r--drivers/video/xilinxfb.c6
-rw-r--r--drivers/watchdog/Kconfig61
-rw-r--r--drivers/watchdog/Makefile4
-rw-r--r--drivers/watchdog/cpwd.c10
-rw-r--r--drivers/watchdog/f71808e_wdt.c768
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/hpwdt.c310
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c4
-rw-r--r--drivers/watchdog/octeon-wdt-main.c745
-rw-r--r--drivers/watchdog/octeon-wdt-nmi.S64
-rw-r--r--drivers/watchdog/riowd.c8
-rw-r--r--drivers/watchdog/s3c2410_wdt.c17
-rw-r--r--drivers/watchdog/sb_wdog.c12
-rw-r--r--drivers/watchdog/sch311x_wdt.c4
-rw-r--r--drivers/watchdog/sp805_wdt.c387
-rw-r--r--drivers/watchdog/ts72xx_wdt.c3
-rw-r--r--drivers/watchdog/wdt_pci.c15
-rw-r--r--drivers/xen/Kconfig14
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/balloon.c15
-rw-r--r--drivers/xen/events.c117
-rw-r--r--drivers/xen/grant-table.c77
-rw-r--r--drivers/xen/manage.c48
-rw-r--r--drivers/xen/platform-pci.c207
-rw-r--r--drivers/xen/swiotlb-xen.c515
-rw-r--r--drivers/xen/xenbus/xenbus_client.c90
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c51
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c57
-rw-r--r--drivers/xen/xenfs/super.c4
-rw-r--r--drivers/xen/xenfs/xenbus.c3
-rw-r--r--drivers/zorro/proc.c17
3053 files changed, 254991 insertions, 93981 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 91874e04855..a2aea53a75e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -50,7 +50,7 @@ obj-$(CONFIG_SPI) += spi/
obj-y += net/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_FUSION) += message/
-obj-$(CONFIG_FIREWIRE) += firewire/
+obj-y += firewire/
obj-y += ieee1394/
obj-$(CONFIG_UIO) += uio/
obj-y += cdrom/
@@ -101,7 +101,9 @@ obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
obj-$(CONFIG_ARCH_SHMOBILE) += sh/
-obj-$(CONFIG_GENERIC_TIME) += clocksource/
+ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
+obj-y += clocksource/
+endif
obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_DCA) += dca/
obj-$(CONFIG_HID) += hid/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 08e0140920e..88681aca88c 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -54,17 +54,10 @@ config ACPI_PROCFS
they have been replaced by functions in /sys.
The deprecated files (and their replacements) include:
- /proc/acpi/sleep (/sys/power/state)
- /proc/acpi/info (/sys/module/acpi/parameters/acpica_version)
- /proc/acpi/dsdt (/sys/firmware/acpi/tables/DSDT)
- /proc/acpi/fadt (/sys/firmware/acpi/tables/FACP)
- /proc/acpi/debug_layer (/sys/module/acpi/parameters/debug_layer)
- /proc/acpi/debug_level (/sys/module/acpi/parameters/debug_level)
- /proc/acpi/processor/*/power (/sys/devices/system/cpu/*/cpuidle/*)
- /proc/acpi/processor/*/performance (/sys/devices/system/cpu/*/
- cpufreq/*)
/proc/acpi/processor/*/throttling (/sys/class/thermal/
cooling_device*/*)
+ /proc/acpi/video/*/brightness (/sys/class/backlight/)
+ /proc/acpi/thermal_zone/*/* (/sys/class/thermal/)
This option has no effect on /proc/acpi/ files
and functions which do not yet exist in /sys.
@@ -112,7 +105,7 @@ config ACPI_EC_DEBUGFS
Be aware that using this interface can confuse your Embedded
Controller in a way that a normal reboot is not enough. You then
- have to power of your system, and remove the laptop battery for
+ have to power off your system, and remove the laptop battery for
some seconds.
An Embedded Controller typically is available on laptops and reads
sensor values like battery state and temperature.
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 833b582d176..3d031d02e54 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -37,8 +37,9 @@ acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
acpi-y += power.o
-acpi-y += system.o event.o
-acpi-$(CONFIG_ACPI_DEBUG) += debug.o
+acpi-y += event.o
+acpi-y += sysfs.o
+acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
ifdef CONFIG_ACPI_VIDEO
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 446aced33af..6b115f6c431 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -77,7 +77,7 @@ static void power_saving_mwait_init(void)
power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
(highest_subcstate - 1);
-#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86)
+#if defined(CONFIG_X86)
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
@@ -382,31 +382,32 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device)
device_remove_file(&device->dev, &dev_attr_rrtime);
}
-/* Query firmware how many CPUs should be idle */
-static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
+/*
+ * Query firmware how many CPUs should be idle
+ * return -1 on failure
+ */
+static int acpi_pad_pur(acpi_handle handle)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *package;
- int rev, num, ret = -EINVAL;
+ int num = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
- return -EINVAL;
+ return num;
if (!buffer.length || !buffer.pointer)
- return -EINVAL;
+ return num;
package = buffer.pointer;
- if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
- goto out;
- rev = package->package.elements[0].integer.value;
- num = package->package.elements[1].integer.value;
- if (rev != 1 || num < 0)
- goto out;
- *num_cpus = num;
- ret = 0;
-out:
+
+ if (package->type == ACPI_TYPE_PACKAGE &&
+ package->package.count == 2 &&
+ package->package.elements[0].integer.value == 1) /* rev 1 */
+
+ num = package->package.elements[1].integer.value;
+
kfree(buffer.pointer);
- return ret;
+ return num;
}
/* Notify firmware how many CPUs are idle */
@@ -433,7 +434,8 @@ static void acpi_pad_handle_notify(acpi_handle handle)
uint32_t idle_cpus;
mutex_lock(&isolated_cpus_lock);
- if (acpi_pad_pur(handle, &num_cpus)) {
+ num_cpus = acpi_pad_pur(handle);
+ if (num_cpus < 0) {
mutex_unlock(&isolated_cpus_lock);
return;
}
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index c3f43daa8be..36867cd70ea 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -78,7 +78,13 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
acpi_status
-acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info);
+acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info);
+
+acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+
+acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+
+acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 899d68afc3c..1d192142c69 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -100,13 +100,6 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE);
u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE);
/*
- * Disable wakeup GPEs during runtime? Default is TRUE because WAKE and
- * RUNTIME GPEs should never be shared, and WAKE GPEs should typically only
- * be enabled just before going to sleep.
- */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
-
-/*
* Optionally use default values for the ACPI register widths. Set this to
* TRUE to use the defaults, if an FADT contains incorrect widths/lengths.
*/
@@ -115,7 +108,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
/*
* Optionally enable output from the AML Debug Object.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
+u32 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
/*
* Optionally copy the entire DSDT to local memory (instead of simply
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 32391588e16..120b3af5659 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -90,16 +90,13 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width);
/*
* hwgpe - GPE support
*/
-u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
+u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
struct acpi_gpe_register_info *gpe_register_info);
acpi_status
acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action);
acpi_status
-acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info);
-
-acpi_status
acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, void *context);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 147a7e6bd38..7dad9160f20 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -184,8 +184,9 @@ struct acpi_namespace_node {
u8 flags; /* Miscellaneous flags */
acpi_owner_id owner_id; /* Node creator */
union acpi_name_union name; /* ACPI Name, always 4 chars per ACPI spec */
+ struct acpi_namespace_node *parent; /* Parent node */
struct acpi_namespace_node *child; /* First child */
- struct acpi_namespace_node *peer; /* Peer. Parent if ANOBJ_END_OF_PEER_LIST set */
+ struct acpi_namespace_node *peer; /* First peer */
/*
* The following fields are used by the ASL compiler and disassembler only
@@ -199,7 +200,7 @@ struct acpi_namespace_node {
/* Namespace Node flags */
-#define ANOBJ_END_OF_PEER_LIST 0x01 /* End-of-list, Peer field points to parent */
+#define ANOBJ_RESERVED 0x01 /* Available for use */
#define ANOBJ_TEMPORARY 0x02 /* Node is create by a method and is temporary */
#define ANOBJ_METHOD_ARG 0x04 /* Node is a method argument */
#define ANOBJ_METHOD_LOCAL 0x08 /* Node is a method local */
@@ -411,6 +412,7 @@ struct acpi_handler_info {
acpi_event_handler address; /* Address of handler, if any */
void *context; /* Context to be passed to handler */
struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */
+ u8 orig_flags; /* Original misc info about this GPE */
};
union acpi_gpe_dispatch_info {
@@ -428,7 +430,6 @@ struct acpi_gpe_event_info {
u8 flags; /* Misc info about this GPE */
u8 gpe_number; /* This GPE */
u8 runtime_count; /* References to a run GPE */
- u8 wakeup_count; /* References to a wake GPE */
};
/* Information about a GPE register pair, one per each status/enable pair in an array */
@@ -853,6 +854,7 @@ struct acpi_bit_register_info {
ACPI_BITMASK_POWER_BUTTON_STATUS | \
ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
ACPI_BITMASK_RT_CLOCK_STATUS | \
+ ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \
ACPI_BITMASK_WAKE_STATUS)
#define ACPI_BITMASK_TIMER_ENABLE 0x0001
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 258159cfcdf..9f60ff00220 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -369,11 +369,4 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle);
void acpi_ns_terminate(void);
-struct acpi_namespace_node *acpi_ns_get_parent_node(struct acpi_namespace_node
- *node);
-
-struct acpi_namespace_node *acpi_ns_get_next_valid_node(struct
- acpi_namespace_node
- *node);
-
#endif /* __ACNAMESP_H__ */
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index cde18ea8265..54857fa87aa 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -91,14 +91,14 @@
/* Values for Flag byte above */
-#define AOPOBJ_AML_CONSTANT 0x01
-#define AOPOBJ_STATIC_POINTER 0x02
-#define AOPOBJ_DATA_VALID 0x04
-#define AOPOBJ_OBJECT_INITIALIZED 0x08
-#define AOPOBJ_SETUP_COMPLETE 0x10
-#define AOPOBJ_SINGLE_DATUM 0x20
-#define AOPOBJ_INVALID 0x40 /* Used if host OS won't allow an op_region address */
-#define AOPOBJ_MODULE_LEVEL 0x80
+#define AOPOBJ_AML_CONSTANT 0x01 /* Integer is an AML constant */
+#define AOPOBJ_STATIC_POINTER 0x02 /* Data is part of an ACPI table, don't delete */
+#define AOPOBJ_DATA_VALID 0x04 /* Object is intialized and data is valid */
+#define AOPOBJ_OBJECT_INITIALIZED 0x08 /* Region is initialized, _REG was run */
+#define AOPOBJ_SETUP_COMPLETE 0x10 /* Region setup is complete */
+#define AOPOBJ_INVALID 0x20 /* Host OS won't allow a Region address */
+#define AOPOBJ_MODULE_LEVEL 0x40 /* Method is actually module-level code */
+#define AOPOBJ_MODIFIED_NAMESPACE 0x80 /* Method modified the namespace */
/******************************************************************************
*
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 97116082cb6..10998d369ad 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -503,15 +503,16 @@ static const union acpi_predefined_info predefined_names[] =
{{"_WAK", 1, ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}},
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, /* Fixed-length (2 Int), but is optional */
- {{{0,0,0,0}, 0,0}} /* Table terminator */
-};
+ /* _WDG/_WED are MS extensions defined by "Windows Instrumentation" */
-#if 0
- /* Not implemented */
+ {{"_WDG", 0, ACPI_RTYPE_BUFFER}},
+ {{"_WED", 1,
+ ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER}},
- {{"_WDG", 0, ACPI_RTYPE_BUFFER}}, /* MS Extension */
- {{"_WED", 1, ACPI_RTYPE_PACKAGE}}, /* MS Extension */
+ {{{0, 0, 0, 0}, 0, 0}} /* Table terminator */
+};
+#if 0
/* This is an internally implemented control method, no need to check */
{{"_OSI", 1, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 161bc0e3d70..6e5dd97949f 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -127,22 +127,22 @@ struct acpi_walk_state {
acpi_parse_upwards ascending_callback;
};
-/* Info used by acpi_ps_init_objects */
+/* Info used by acpi_ns_initialize_objects and acpi_ds_initialize_objects */
struct acpi_init_walk_info {
- u16 method_count;
- u16 device_count;
- u16 op_region_count;
- u16 field_count;
- u16 buffer_count;
- u16 package_count;
- u16 op_region_init;
- u16 field_init;
- u16 buffer_init;
- u16 package_init;
- u16 object_count;
- acpi_owner_id owner_id;
u32 table_index;
+ u32 object_count;
+ u32 method_count;
+ u32 device_count;
+ u32 op_region_count;
+ u32 field_count;
+ u32 buffer_count;
+ u32 package_count;
+ u32 op_region_init;
+ u32 field_init;
+ u32 buffer_init;
+ u32 package_init;
+ acpi_owner_id owner_id;
};
struct acpi_get_devices_info {
@@ -201,11 +201,11 @@ struct acpi_evaluate_info {
/* Info used by acpi_ns_initialize_devices */
struct acpi_device_walk_info {
- u16 device_count;
- u16 num_STA;
- u16 num_INI;
struct acpi_table_desc *table_desc;
struct acpi_evaluate_info *evaluate_info;
+ u32 device_count;
+ u32 num_STA;
+ u32 num_INI;
};
/* TBD: [Restructure] Merge with struct above */
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index abe140318a7..cc4a38c5755 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -171,12 +171,12 @@ acpi_ds_initialize_objects(u32 table_index,
"**** Starting initialization of namespace objects ****\n"));
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:"));
- info.method_count = 0;
- info.op_region_count = 0;
- info.object_count = 0;
- info.device_count = 0;
- info.table_index = table_index;
+ /* Set all init info to zero */
+
+ ACPI_MEMSET(&info, 0, sizeof(struct acpi_init_walk_info));
+
info.owner_id = owner_id;
+ info.table_index = table_index;
/* Walk entire namespace from the supplied root */
@@ -204,13 +204,13 @@ acpi_ds_initialize_objects(u32 table_index,
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n",
+ "\nTable [%4.4s](id %4.4X) - %u Objects with %u Devices %u Methods %u Regions\n",
table->signature, owner_id, info.object_count,
info.device_count, info.method_count,
info.op_region_count));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "%hd Methods, %hd Regions\n", info.method_count,
+ "%u Methods, %u Regions\n", info.method_count,
info.op_region_count));
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 2a9a561c2f0..64750ee96e2 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -584,8 +584,22 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
* want make the objects permanent.
*/
if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) {
- acpi_ns_delete_namespace_by_owner(method_desc->method.
- owner_id);
+
+ /* Delete any direct children of (created by) this method */
+
+ acpi_ns_delete_namespace_subtree(walk_state->
+ method_node);
+
+ /*
+ * Delete any objects that were created by this method
+ * elsewhere in the namespace (if any were created).
+ */
+ if (method_desc->method.
+ flags & AOPOBJ_MODIFIED_NAMESPACE) {
+ acpi_ns_delete_namespace_by_owner(method_desc->
+ method.
+ owner_id);
+ }
}
}
@@ -605,7 +619,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
* we immediately reuse it for the next thread executing this method
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "*** Completed execution of one thread, %d threads remaining\n",
+ "*** Completed execution of one thread, %u threads remaining\n",
method_desc->method.thread_count));
} else {
/* This is the only executing thread for this method */
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index f3d52f59250..8095306fcd8 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -102,8 +102,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
walk_state->arguments[i].name.integer |= (i << 24);
walk_state->arguments[i].descriptor_type = ACPI_DESC_TYPE_NAMED;
walk_state->arguments[i].type = ACPI_TYPE_ANY;
- walk_state->arguments[i].flags =
- ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_ARG;
+ walk_state->arguments[i].flags = ANOBJ_METHOD_ARG;
}
/* Init the method locals */
@@ -116,8 +115,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
walk_state->local_variables[i].descriptor_type =
ACPI_DESC_TYPE_NAMED;
walk_state->local_variables[i].type = ACPI_TYPE_ANY;
- walk_state->local_variables[i].flags =
- ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_LOCAL;
+ walk_state->local_variables[i].flags = ANOBJ_METHOD_LOCAL;
}
return_VOID;
@@ -146,7 +144,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
for (index = 0; index < ACPI_METHOD_NUM_LOCALS; index++) {
if (walk_state->local_variables[index].object) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Local%d=%p\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Local%u=%p\n",
index,
walk_state->local_variables[index].
object));
@@ -162,7 +160,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
for (index = 0; index < ACPI_METHOD_NUM_ARGS; index++) {
if (walk_state->arguments[index].object) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Arg%d=%p\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Arg%u=%p\n",
index,
walk_state->arguments[index].object));
@@ -226,7 +224,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
index++;
}
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%d args passed to method\n", index));
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%u args passed to method\n", index));
return_ACPI_STATUS(AE_OK);
}
@@ -323,7 +321,7 @@ acpi_ds_method_data_set_value(u8 type,
ACPI_FUNCTION_TRACE(ds_method_data_set_value);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "NewObj %p Type %2.2X, Refs=%d [%s]\n", object,
+ "NewObj %p Type %2.2X, Refs=%u [%s]\n", object,
type, object->common.reference_count,
acpi_ut_get_type_name(object->common.type)));
@@ -543,7 +541,7 @@ acpi_ds_store_object_to_local(u8 type,
union acpi_operand_object *new_obj_desc;
ACPI_FUNCTION_TRACE(ds_store_object_to_local);
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Type=%2.2X Index=%d Obj=%p\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Type=%2.2X Index=%u Obj=%p\n",
type, index, obj_desc));
/* Parameter validation */
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 3607adcaf08..8e85f54a8e0 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -81,6 +81,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
{
union acpi_operand_object *obj_desc;
acpi_status status;
+ acpi_object_type type;
ACPI_FUNCTION_TRACE(ds_build_internal_object);
@@ -172,7 +173,20 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
- switch (op->common.node->type) {
+ /*
+ * Special handling for Alias objects. We need to setup the type
+ * and the Op->Common.Node to point to the Alias target. Note,
+ * Alias has at most one level of indirection internally.
+ */
+ type = op->common.node->type;
+ if (type == ACPI_TYPE_LOCAL_ALIAS) {
+ type = obj_desc->common.type;
+ op->common.node =
+ ACPI_CAST_PTR(struct acpi_namespace_node,
+ op->common.node->object);
+ }
+
+ switch (type) {
/*
* For these types, we need the actual node, not the subobject.
* However, the subobject did not get an extra reference count above.
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 53a7e416f33..7c0e7422717 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -213,7 +213,7 @@ acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
/* Execute the AML code for the term_arg arguments */
- status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
+ status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
return_ACPI_STATUS(status);
@@ -257,7 +257,7 @@ acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
/* Execute the AML code for the term_arg arguments */
- status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
+ status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
return_ACPI_STATUS(status);
@@ -394,7 +394,7 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
/* Execute the argument AML */
- status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
+ status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 306c62ab2e8..15135c25aa9 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -746,7 +746,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
index--;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "Arg #%d (%p) done, Arg1=%p\n", index, arg,
+ "Arg #%u (%p) done, Arg1=%p\n", index, arg,
first_arg));
}
@@ -760,7 +760,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
*/
acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
- ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %d", index));
+ ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %u", index));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index f5795915a2e..303618889da 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -102,9 +102,8 @@ acpi_status acpi_ev_initialize_events(void)
* RETURN: Status
*
* DESCRIPTION: Completes initialization of the FADT-defined GPE blocks
- * (0 and 1). This causes the _PRW methods to be run, so the HW
- * must be fully initialized at this point, including global lock
- * support.
+ * (0 and 1). The HW must be fully initialized at this point,
+ * including global lock support.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 7c2c336006a..f226eac314d 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -54,51 +54,159 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
/*******************************************************************************
*
- * FUNCTION: acpi_ev_update_gpe_enable_masks
+ * FUNCTION: acpi_ev_update_gpe_enable_mask
*
* PARAMETERS: gpe_event_info - GPE to update
*
* RETURN: Status
*
- * DESCRIPTION: Updates GPE register enable masks based upon whether there are
- * references (either wake or run) to this GPE
+ * DESCRIPTION: Updates GPE register enable mask based upon whether there are
+ * runtime references to this GPE
*
******************************************************************************/
acpi_status
-acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
+acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
{
struct acpi_gpe_register_info *gpe_register_info;
u32 register_bit;
- ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
+ ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
- register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
gpe_register_info);
- /* Clear the wake/run bits up front */
+ /* Clear the run bit up front */
- ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit);
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
- /* Set the mask bits only if there are references to this GPE */
+ /* Set the mask bit only if there are references to this GPE */
if (gpe_event_info->runtime_count) {
- ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
+ ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit);
}
- if (gpe_event_info->wakeup_count) {
- ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_enable_gpe
+ *
+ * PARAMETERS: gpe_event_info - GPE to enable
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Clear the given GPE from stale events and enable it.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_enable_gpe);
+
+ /*
+ * We will only allow a GPE to be enabled if it has either an
+ * associated method (_Lxx/_Exx) or a handler. Otherwise, the
+ * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
+ * first time it fires.
+ */
+ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
+ return_ACPI_STATUS(AE_NO_HANDLER);
}
- return_ACPI_STATUS(AE_OK);
+ /* Clear the GPE (of stale events) */
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Enable the requested GPE */
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
+
+ return_ACPI_STATUS(status);
+}
+
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_raw_enable_gpe
+ *
+ * PARAMETERS: gpe_event_info - GPE to enable
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
+ * hardware-enabled.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+{
+ acpi_status status = AE_OK;
+
+ if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
+ return_ACPI_STATUS(AE_LIMIT);
+ }
+
+ gpe_event_info->runtime_count++;
+ if (gpe_event_info->runtime_count == 1) {
+ status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
+ if (ACPI_SUCCESS(status)) {
+ status = acpi_ev_enable_gpe(gpe_event_info);
+ }
+
+ if (ACPI_FAILURE(status)) {
+ gpe_event_info->runtime_count--;
+ }
+ }
+
+ return_ACPI_STATUS(status);
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_raw_disable_gpe
+ *
+ * PARAMETERS: gpe_event_info - GPE to disable
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a reference to a GPE. When the last reference is
+ * removed, the GPE is hardware-disabled.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+{
+ acpi_status status = AE_OK;
+
+ if (!gpe_event_info->runtime_count) {
+ return_ACPI_STATUS(AE_LIMIT);
+ }
+
+ gpe_event_info->runtime_count--;
+ if (!gpe_event_info->runtime_count) {
+ status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
+ if (ACPI_SUCCESS(status)) {
+ status = acpi_hw_low_set_gpe(gpe_event_info,
+ ACPI_GPE_DISABLE);
+ }
+
+ if (ACPI_FAILURE(status)) {
+ gpe_event_info->runtime_count++;
+ }
+ }
+
+ return_ACPI_STATUS(status);
+}
/*******************************************************************************
*
@@ -417,8 +525,12 @@ static void acpi_ev_asynch_enable_gpe(void *context)
}
}
- /* Enable this GPE */
- (void)acpi_hw_write_gpe_enable_reg(gpe_event_info);
+ /*
+ * Enable this GPE, conditionally. This means that the GPE will only be
+ * physically enabled if the enable_for_run bit is set in the event_info
+ */
+ (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE);
+
return_VOID;
}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 341a38ce8aa..85445fb5844 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -439,8 +439,6 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
{
acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
- struct acpi_gpe_walk_info walk_info;
- u32 wake_gpe_count;
u32 gpe_enabled_count;
u32 gpe_index;
u32 gpe_number;
@@ -456,37 +454,9 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
}
/*
- * Runtime option: Should wake GPEs be enabled at runtime? The default
- * is no, they should only be enabled just as the machine goes to sleep.
+ * Enable all GPEs that have a corresponding method. Any other GPEs
+ * within this block must be enabled via the acpi_enable_gpe interface.
*/
- if (acpi_gbl_leave_wake_gpes_disabled) {
- /*
- * Differentiate runtime vs wake GPEs, via the _PRW control methods.
- * Each GPE that has one or more _PRWs that reference it is by
- * definition a wake GPE and will not be enabled while the machine
- * is running.
- */
- walk_info.gpe_block = gpe_block;
- walk_info.gpe_device = gpe_device;
- walk_info.execute_by_owner_id = FALSE;
-
- status =
- acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
- acpi_ev_match_prw_and_gpe, NULL,
- &walk_info, NULL);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "While executing _PRW methods"));
- }
- }
-
- /*
- * Enable all GPEs that have a corresponding method and are not
- * capable of generating wakeups. Any other GPEs within this block
- * must be enabled via the acpi_enable_gpe interface.
- */
- wake_gpe_count = 0;
gpe_enabled_count = 0;
if (gpe_device == acpi_gbl_fadt_gpe_device) {
@@ -502,35 +472,21 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
gpe_event_info = &gpe_block->event_info[gpe_index];
gpe_number = gpe_index + gpe_block->block_base_number;
- /*
- * If the GPE has already been enabled for runtime
- * signaling, make sure it remains enabled, but do not
- * increment its reference counter.
- */
- if (gpe_event_info->runtime_count) {
- acpi_set_gpe(gpe_device, gpe_number,
- ACPI_GPE_ENABLE);
- gpe_enabled_count++;
- continue;
- }
-
- if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
- wake_gpe_count++;
- if (acpi_gbl_leave_wake_gpes_disabled) {
- continue;
- }
- }
-
/* Ignore GPEs that have no corresponding _Lxx/_Exx method */
if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) {
continue;
}
- /* Enable this GPE */
+ /*
+ * If the GPE has already been enabled for runtime
+ * signaling, make sure it remains enabled, but do not
+ * increment its reference counter.
+ */
+ status = gpe_event_info->runtime_count ?
+ acpi_ev_enable_gpe(gpe_event_info) :
+ acpi_enable_gpe(gpe_device, gpe_number);
- status = acpi_enable_gpe(gpe_device, gpe_number,
- ACPI_GPE_TYPE_RUNTIME);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
@@ -542,10 +498,10 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
}
}
- if (gpe_enabled_count || wake_gpe_count) {
+ if (gpe_enabled_count) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "Enabled %u Runtime GPEs, added %u Wake GPEs in this block\n",
- gpe_enabled_count, wake_gpe_count));
+ "Enabled %u GPEs in this block\n",
+ gpe_enabled_count));
}
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 3f6c2d26410..3084c5de1bb 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -211,9 +211,7 @@ acpi_status acpi_ev_gpe_initialize(void)
* DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
* result of a Load() or load_table() operation. If new GPE
* methods have been installed, register the new methods and
- * enable and runtime GPEs that are associated with them. Also,
- * run any newly loaded _PRW methods in order to discover any
- * new CAN_WAKE GPEs.
+ * enable and runtime GPEs that are associated with them.
*
******************************************************************************/
@@ -223,49 +221,12 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_walk_info walk_info;
acpi_status status = AE_OK;
- u32 new_wake_gpe_count = 0;
-
- /* We will examine only _PRW/_Lxx/_Exx methods owned by this table */
-
- walk_info.owner_id = table_owner_id;
- walk_info.execute_by_owner_id = TRUE;
- walk_info.count = 0;
-
- if (acpi_gbl_leave_wake_gpes_disabled) {
- /*
- * 1) Run any newly-loaded _PRW methods to find any GPEs that
- * can now be marked as CAN_WAKE GPEs. Note: We must run the
- * _PRW methods before we process the _Lxx/_Exx methods because
- * we will enable all runtime GPEs associated with the new
- * _Lxx/_Exx methods at the time we process those methods.
- *
- * Unlock interpreter so that we can run the _PRW methods.
- */
- walk_info.gpe_block = NULL;
- walk_info.gpe_device = NULL;
-
- acpi_ex_exit_interpreter();
-
- status =
- acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- ACPI_NS_WALK_NO_UNLOCK,
- acpi_ev_match_prw_and_gpe, NULL,
- &walk_info, NULL);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "While executing _PRW methods"));
- }
-
- acpi_ex_enter_interpreter();
- new_wake_gpe_count = walk_info.count;
- }
/*
* 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
*
- * Any GPEs that correspond to new _Lxx/_Exx methods and are not
- * marked as CAN_WAKE are immediately enabled.
+ * Any GPEs that correspond to new _Lxx/_Exx methods are immediately
+ * enabled.
*
* Examine the namespace underneath each gpe_device within the
* gpe_block lists.
@@ -275,6 +236,8 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
return;
}
+ walk_info.owner_id = table_owner_id;
+ walk_info.execute_by_owner_id = TRUE;
walk_info.count = 0;
walk_info.enable_this_gpe = TRUE;
@@ -307,10 +270,8 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
gpe_xrupt_info = gpe_xrupt_info->next;
}
- if (walk_info.count || new_wake_gpe_count) {
- ACPI_INFO((AE_INFO,
- "Enabled %u new runtime GPEs, added %u new wakeup GPEs",
- walk_info.count, new_wake_gpe_count));
+ if (walk_info.count) {
+ ACPI_INFO((AE_INFO, "Enabled %u new GPEs", walk_info.count));
}
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
@@ -386,9 +347,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
/*
* 3) Edge/Level determination is based on the 2nd character
* of the method name
- *
- * NOTE: Default GPE type is RUNTIME only. Later, if a _PRW object is
- * found that points to this GPE, the ACPI_GPE_CAN_WAKE flag is set.
*/
switch (name[1]) {
case 'L':
@@ -471,24 +429,18 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
*/
if (walk_info->enable_this_gpe) {
- /* Ignore GPEs that can wake the system */
+ walk_info->count++;
+ gpe_device = walk_info->gpe_device;
- if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE) ||
- !acpi_gbl_leave_wake_gpes_disabled) {
- walk_info->count++;
- gpe_device = walk_info->gpe_device;
-
- if (gpe_device == acpi_gbl_fadt_gpe_device) {
- gpe_device = NULL;
- }
+ if (gpe_device == acpi_gbl_fadt_gpe_device) {
+ gpe_device = NULL;
+ }
- status = acpi_enable_gpe(gpe_device, gpe_number,
- ACPI_GPE_TYPE_RUNTIME);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Could not enable GPE 0x%02X",
- gpe_number));
- }
+ status = acpi_enable_gpe(gpe_device, gpe_number);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not enable GPE 0x%02X",
+ gpe_number));
}
}
@@ -497,157 +449,3 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
name, gpe_number));
return_ACPI_STATUS(AE_OK);
}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_match_prw_and_gpe
- *
- * PARAMETERS: Callback from walk_namespace
- *
- * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
- * not aborted on a single _PRW failure.
- *
- * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
- * Device. Run the _PRW method. If present, extract the GPE
- * number and mark the GPE as a CAN_WAKE GPE. Allows a
- * per-owner_id execution if execute_by_owner_id is TRUE in the
- * walk_info parameter block.
- *
- * If walk_info->execute_by_owner_id is TRUE, we only execute _PRWs with that
- * owner.
- * If walk_info->gpe_device is NULL, we execute every _PRW found. Otherwise,
- * we only execute _PRWs that refer to the input gpe_device.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
- u32 level, void *context, void **return_value)
-{
- struct acpi_gpe_walk_info *walk_info =
- ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
- struct acpi_namespace_node *gpe_device;
- struct acpi_gpe_block_info *gpe_block;
- struct acpi_namespace_node *target_gpe_device;
- struct acpi_namespace_node *prw_node;
- struct acpi_gpe_event_info *gpe_event_info;
- union acpi_operand_object *pkg_desc;
- union acpi_operand_object *obj_desc;
- u32 gpe_number;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
-
- /* Check for a _PRW method under this device */
-
- status = acpi_ns_get_node(obj_handle, METHOD_NAME__PRW,
- ACPI_NS_NO_UPSEARCH, &prw_node);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Check if requested owner_id matches this owner_id */
-
- if ((walk_info->execute_by_owner_id) &&
- (prw_node->owner_id != walk_info->owner_id)) {
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Execute the _PRW */
-
- status = acpi_ut_evaluate_object(prw_node, NULL,
- ACPI_BTYPE_PACKAGE, &pkg_desc);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(AE_OK);
- }
-
- /* The returned _PRW package must have at least two elements */
-
- if (pkg_desc->package.count < 2) {
- goto cleanup;
- }
-
- /* Extract pointers from the input context */
-
- gpe_device = walk_info->gpe_device;
- gpe_block = walk_info->gpe_block;
-
- /*
- * The _PRW object must return a package, we are only interested
- * in the first element
- */
- obj_desc = pkg_desc->package.elements[0];
-
- if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
-
- /* Use FADT-defined GPE device (from definition of _PRW) */
-
- target_gpe_device = NULL;
- if (gpe_device) {
- target_gpe_device = acpi_gbl_fadt_gpe_device;
- }
-
- /* Integer is the GPE number in the FADT described GPE blocks */
-
- gpe_number = (u32)obj_desc->integer.value;
- } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
-
- /* Package contains a GPE reference and GPE number within a GPE block */
-
- if ((obj_desc->package.count < 2) ||
- ((obj_desc->package.elements[0])->common.type !=
- ACPI_TYPE_LOCAL_REFERENCE) ||
- ((obj_desc->package.elements[1])->common.type !=
- ACPI_TYPE_INTEGER)) {
- goto cleanup;
- }
-
- /* Get GPE block reference and decode */
-
- target_gpe_device =
- obj_desc->package.elements[0]->reference.node;
- gpe_number = (u32)obj_desc->package.elements[1]->integer.value;
- } else {
- /* Unknown type, just ignore it */
-
- goto cleanup;
- }
-
- /* Get the gpe_event_info for this GPE */
-
- if (gpe_device) {
- /*
- * Is this GPE within this block?
- *
- * TRUE if and only if these conditions are true:
- * 1) The GPE devices match.
- * 2) The GPE index(number) is within the range of the Gpe Block
- * associated with the GPE device.
- */
- if (gpe_device != target_gpe_device) {
- goto cleanup;
- }
-
- gpe_event_info =
- acpi_ev_low_get_gpe_info(gpe_number, gpe_block);
- } else {
- /* gpe_device is NULL, just match the target_device and gpe_number */
-
- gpe_event_info =
- acpi_ev_get_gpe_event_info(target_gpe_device, gpe_number);
- }
-
- if (gpe_event_info) {
- if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
-
- /* This GPE can wake the system */
-
- gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
- walk_info->count++;
- }
- }
-
- cleanup:
- acpi_ut_remove_reference(pkg_desc);
- return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 2e3b0334072..f40d271bf56 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -199,7 +199,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
return_ACPI_STATUS(status);
}
- parent_node = acpi_ns_get_parent_node(region_obj->region.node);
+ parent_node = region_obj->region.node->parent;
/*
* Get the _SEG and _BBN values from the device upon which the handler
@@ -248,7 +248,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
break;
}
- pci_root_node = acpi_ns_get_parent_node(pci_root_node);
+ pci_root_node = pci_root_node->parent;
}
/* PCI root bridge not found, use namespace root node */
@@ -280,7 +280,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*/
pci_device_node = region_obj->region.node;
while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) {
- pci_device_node = acpi_ns_get_parent_node(pci_device_node);
+ pci_device_node = pci_device_node->parent;
}
if (!pci_device_node) {
@@ -521,7 +521,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
return_ACPI_STATUS(AE_NOT_EXIST);
}
- node = acpi_ns_get_parent_node(region_obj->region.node);
+ node = region_obj->region.node->parent;
space_id = region_obj->region.space_id;
/* Setup defaults */
@@ -654,7 +654,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
/* This node does not have the handler we need; Pop up one level */
- node = acpi_ns_get_parent_node(node);
+ node = node->parent;
}
/* If we get here, there is no handler for this region */
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 4a531cdf794..14e48add32f 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -691,12 +691,22 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
return_ACPI_STATUS(status);
}
+ /* Allocate memory for the handler object */
+
+ handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info));
+ if (!handler) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
if (!gpe_event_info) {
status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
+ goto free_and_exit;
}
/* Make sure that there isn't a handler there already */
@@ -704,24 +714,30 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_HANDLER) {
status = AE_ALREADY_EXISTS;
- goto unlock_and_exit;
+ goto free_and_exit;
}
/* Allocate and init handler object */
- handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info));
- if (!handler) {
- status = AE_NO_MEMORY;
- goto unlock_and_exit;
- }
-
handler->address = address;
handler->context = context;
handler->method_node = gpe_event_info->dispatch.method_node;
+ handler->orig_flags = gpe_event_info->flags &
+ (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
+
+ /*
+ * If the GPE is associated with a method and it cannot wake up the
+ * system from sleep states, it was enabled automatically during
+ * initialization, so it has to be disabled now to avoid spurious
+ * execution of the handler.
+ */
+
+ if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
+ && !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE))
+ (void)acpi_raw_disable_gpe(gpe_event_info);
/* Install the handler */
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
gpe_event_info->dispatch.handler = handler;
/* Setup up dispatch flags to indicate handler (vs. method) */
@@ -735,6 +751,11 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
+
+free_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ ACPI_FREE(handler);
+ goto unlock_and_exit;
}
ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
@@ -770,11 +791,17 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
+ /* Make sure all deferred tasks are completed */
+
+ acpi_os_wait_events_complete(NULL);
+
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
@@ -798,34 +825,34 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
goto unlock_and_exit;
}
- /* Make sure all deferred tasks are completed */
-
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- acpi_os_wait_events_complete(NULL);
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
/* Remove the handler */
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
handler = gpe_event_info->dispatch.handler;
/* Restore Method node (if any), set dispatch flags */
gpe_event_info->dispatch.method_node = handler->method_node;
- gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; /* Clear bits */
- if (handler->method_node) {
- gpe_event_info->flags |= ACPI_GPE_DISPATCH_METHOD;
- }
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ gpe_event_info->flags &=
+ ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
+ gpe_event_info->flags |= handler->orig_flags;
+
+ /*
+ * If the GPE was previously associated with a method and it cannot wake
+ * up the system from sleep states, it should be enabled at this point
+ * to restore the post-initialization configuration.
+ */
+
+ if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
+ && !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE))
+ (void)acpi_raw_enable_gpe(gpe_event_info);
/* Now we can free the handler object */
ACPI_FREE(handler);
- unlock_and_exit:
+unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 18b3f1468b7..304825528d4 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -213,101 +213,71 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
/*******************************************************************************
*
- * FUNCTION: acpi_clear_and_enable_gpe
- *
- * PARAMETERS: gpe_event_info - GPE to enable
- *
- * RETURN: Status
- *
- * DESCRIPTION: Clear the given GPE from stale events and enable it.
- *
- ******************************************************************************/
-static acpi_status
-acpi_clear_and_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
-{
- acpi_status status;
-
- /*
- * We will only allow a GPE to be enabled if it has either an
- * associated method (_Lxx/_Exx) or a handler. Otherwise, the
- * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
- * first time it fires.
- */
- if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
- return_ACPI_STATUS(AE_NO_HANDLER);
- }
-
- /* Clear the GPE (of stale events) */
- status = acpi_hw_clear_gpe(gpe_event_info);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Enable the requested GPE */
- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
-
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_set_gpe
+ * FUNCTION: acpi_gpe_wakeup
*
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE
+ * Action - Enable or Disable
*
* RETURN: Status
*
- * DESCRIPTION: Enable or disable an individual GPE. This function bypasses
- * the reference count mechanism used in the acpi_enable_gpe and
- * acpi_disable_gpe interfaces -- and should be used with care.
- *
- * Note: Typically used to disable a runtime GPE for short period of time,
- * then re-enable it, without disturbing the existing reference counts. This
- * is useful, for example, in the Embedded Controller (EC) driver.
+ * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit.
*
******************************************************************************/
-acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
+acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action)
{
+ acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
- acpi_status status;
+ struct acpi_gpe_register_info *gpe_register_info;
acpi_cpu_flags flags;
+ u32 register_bit;
- ACPI_FUNCTION_TRACE(acpi_set_gpe);
+ ACPI_FUNCTION_TRACE(acpi_gpe_wakeup);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
- if (!gpe_event_info) {
+ if (!gpe_event_info || !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
+ gpe_register_info = gpe_event_info->register_info;
+ if (!gpe_register_info) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ register_bit =
+ acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
+
/* Perform the action */
switch (action) {
case ACPI_GPE_ENABLE:
- status = acpi_clear_and_enable_gpe(gpe_event_info);
+ ACPI_SET_BIT(gpe_register_info->enable_for_wake,
+ (u8)register_bit);
break;
case ACPI_GPE_DISABLE:
- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
+ ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
+ (u8)register_bit);
break;
default:
+ ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
status = AE_BAD_PARAMETER;
break;
}
- unlock_and_exit:
+unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_set_gpe)
+ACPI_EXPORT_SYMBOL(acpi_gpe_wakeup)
/*******************************************************************************
*
@@ -315,84 +285,30 @@ ACPI_EXPORT_SYMBOL(acpi_set_gpe)
*
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
- * or both
*
* RETURN: Status
*
* DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
- * hardware-enabled (for runtime GPEs), or the GPE register mask
- * is updated (for wake GPEs).
+ * hardware-enabled.
*
******************************************************************************/
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
{
- acpi_status status = AE_OK;
+ acpi_status status = AE_BAD_PARAMETER;
struct acpi_gpe_event_info *gpe_event_info;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_enable_gpe);
- /* Parameter validation */
-
- if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
- if (!gpe_event_info) {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
-
- if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
- if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
- status = AE_LIMIT; /* Too many references */
- goto unlock_and_exit;
- }
-
- gpe_event_info->runtime_count++;
- if (gpe_event_info->runtime_count == 1) {
- status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- if (ACPI_SUCCESS(status)) {
- status = acpi_clear_and_enable_gpe(gpe_event_info);
- }
-
- if (ACPI_FAILURE(status)) {
- gpe_event_info->runtime_count--;
- goto unlock_and_exit;
- }
- }
- }
-
- if (gpe_type & ACPI_GPE_TYPE_WAKE) {
- /* The GPE must have the ability to wake the system */
-
- if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
- status = AE_TYPE;
- goto unlock_and_exit;
- }
-
- if (gpe_event_info->wakeup_count == ACPI_UINT8_MAX) {
- status = AE_LIMIT; /* Too many references */
- goto unlock_and_exit;
- }
-
- /*
- * Update the enable mask on the first wakeup reference. Wake GPEs
- * are only hardware-enabled just before sleeping.
- */
- gpe_event_info->wakeup_count++;
- if (gpe_event_info->wakeup_count == 1) {
- status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- }
+ if (gpe_event_info) {
+ status = acpi_raw_enable_gpe(gpe_event_info);
}
-unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -404,8 +320,6 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
*
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
- * or both
*
* RETURN: Status
*
@@ -414,20 +328,52 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
* the GPE mask bit disabled (for wake GPEs)
*
******************************************************************************/
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
{
- acpi_status status = AE_OK;
+ acpi_status status = AE_BAD_PARAMETER;
struct acpi_gpe_event_info *gpe_event_info;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_disable_gpe);
- /* Parameter validation */
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Ensure that we have a valid GPE number */
- if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (gpe_event_info) {
+ status = acpi_raw_disable_gpe(gpe_event_info) ;
}
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_gpe_can_wake
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
+ * gpe_number - GPE level within the GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Set the ACPI_GPE_CAN_WAKE flag for the given GPE. If the GPE
+ * has a corresponding method and is currently enabled, disable it
+ * (GPEs with corresponding methods are enabled unconditionally
+ * during initialization, but GPEs that can wake up are expected
+ * to be initially disabled).
+ *
+ ******************************************************************************/
+acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number)
+{
+ acpi_status status = AE_OK;
+ struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(acpi_gpe_can_wake);
+
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
@@ -438,51 +384,20 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type
goto unlock_and_exit;
}
- /* Hardware-disable a runtime GPE on removal of the last reference */
-
- if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
- if (!gpe_event_info->runtime_count) {
- status = AE_LIMIT; /* There are no references to remove */
- goto unlock_and_exit;
- }
-
- gpe_event_info->runtime_count--;
- if (!gpe_event_info->runtime_count) {
- status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- if (ACPI_SUCCESS(status)) {
- status = acpi_hw_low_set_gpe(gpe_event_info,
- ACPI_GPE_DISABLE);
- }
-
- if (ACPI_FAILURE(status)) {
- gpe_event_info->runtime_count++;
- goto unlock_and_exit;
- }
- }
+ if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
+ goto unlock_and_exit;
}
- /*
- * Update masks for wake GPE on removal of the last reference.
- * No need to hardware-disable wake GPEs here, they are not currently
- * enabled.
- */
- if (gpe_type & ACPI_GPE_TYPE_WAKE) {
- if (!gpe_event_info->wakeup_count) {
- status = AE_LIMIT; /* There are no references to remove */
- goto unlock_and_exit;
- }
-
- gpe_event_info->wakeup_count--;
- if (!gpe_event_info->wakeup_count) {
- status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- }
+ gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+ if (gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD) {
+ (void)acpi_raw_disable_gpe(gpe_event_info);
}
unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
+ACPI_EXPORT_SYMBOL(acpi_gpe_can_wake)
/*******************************************************************************
*
@@ -800,7 +715,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
obj_desc->device.gpe_block = gpe_block;
- /* Run the _PRW methods and enable the runtime GPEs in the new block */
+ /* Enable the runtime GPEs in the new block */
status = acpi_ev_initialize_gpe_block(node, gpe_block);
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 008621c5ad8..18832205b63 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -120,7 +120,7 @@ acpi_ex_add_table(u32 table_index,
acpi_ns_exec_module_code_list();
acpi_ex_enter_interpreter();
- /* Update GPEs for any new _PRW or _Lxx/_Exx methods. Ignore errors */
+ /* Update GPEs for any new _Lxx/_Exx methods. Ignore errors */
status = acpi_tb_get_owner_id(table_index, &owner_id);
if (ACPI_SUCCESS(status)) {
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index d39d438ba1e..f067bbb0d96 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -742,7 +742,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "**** Start operand dump for opcode [%s], %d operands\n",
+ "**** Start operand dump for opcode [%s], %u operands\n",
opcode_name, num_operands));
if (num_operands == 0) {
@@ -812,7 +812,7 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
acpi_ex_out_string("Type", acpi_ut_get_type_name(node->type));
acpi_ex_out_pointer("Attached Object",
acpi_ns_get_attached_object(node));
- acpi_ex_out_pointer("Parent", acpi_ns_get_parent_node(node));
+ acpi_ex_out_pointer("Parent", node->parent);
acpi_ex_dump_object(ACPI_CAST_PTR(union acpi_operand_object, node),
acpi_ex_dump_node);
@@ -945,7 +945,7 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
case ACPI_TYPE_PACKAGE:
- acpi_os_printf("[Package] Contains %d Elements:\n",
+ acpi_os_printf("[Package] Contains %u Elements:\n",
obj_desc->package.count);
for (i = 0; i < obj_desc->package.count; i++) {
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index a6dc26f0b3b..047217303a4 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -534,13 +534,13 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
if (ACPI_SUCCESS(status)) {
if (read_write == ACPI_READ) {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "Value Read %8.8X%8.8X, Width %d\n",
+ "Value Read %8.8X%8.8X, Width %u\n",
ACPI_FORMAT_UINT64(*value),
obj_desc->common_field.
access_byte_width));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "Value Written %8.8X%8.8X, Width %d\n",
+ "Value Written %8.8X%8.8X, Width %u\n",
ACPI_FORMAT_UINT64(*value),
obj_desc->common_field.
access_byte_width));
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 25059dace0a..98a331d2249 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -108,11 +108,11 @@ acpi_ex_generate_access(u32 field_bit_offset,
field_byte_length = field_byte_end_offset - field_byte_offset;
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "Bit length %d, Bit offset %d\n",
+ "Bit length %u, Bit offset %u\n",
field_bit_length, field_bit_offset));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "Byte Length %d, Byte Offset %d, End Offset %d\n",
+ "Byte Length %u, Byte Offset %u, End Offset %u\n",
field_byte_length, field_byte_offset,
field_byte_end_offset));
@@ -147,11 +147,11 @@ acpi_ex_generate_access(u32 field_bit_offset,
accesses = field_end_offset - field_start_offset;
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "AccessWidth %d end is within region\n",
+ "AccessWidth %u end is within region\n",
access_byte_width));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "Field Start %d, Field End %d -- requires %d accesses\n",
+ "Field Start %u, Field End %u -- requires %u accesses\n",
field_start_offset, field_end_offset,
accesses));
@@ -159,7 +159,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
if (accesses <= 1) {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "Entire field can be accessed with one operation of size %d\n",
+ "Entire field can be accessed with one operation of size %u\n",
access_byte_width));
return_VALUE(access_byte_width);
}
@@ -174,7 +174,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
}
} else {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "AccessWidth %d end is NOT within region\n",
+ "AccessWidth %u end is NOT within region\n",
access_byte_width));
if (access_byte_width == 1) {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
@@ -190,7 +190,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
* previous access
*/
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "Backing off to previous optimal access width of %d\n",
+ "Backing off to previous optimal access width of %u\n",
minimum_access_width));
return_VALUE(minimum_access_width);
}
@@ -385,15 +385,6 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
(field_bit_position -
ACPI_MUL_8(obj_desc->common_field.base_byte_offset));
- /*
- * Does the entire field fit within a single field access element? (datum)
- * (i.e., without crossing a datum boundary)
- */
- if ((obj_desc->common_field.start_field_bit_offset +
- field_bit_length) <= (u16) access_bit_width) {
- obj_desc->common.flags |= AOPOBJ_SINGLE_DATUM;
- }
-
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 531000fc77d..8819d2ac5ae 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -194,7 +194,7 @@ acpi_ex_system_memory_space_handler(u32 function,
((u64) address - (u64) mem_info->mapped_physical_address);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "System-Memory (width %d) R/W %d Address=%8.8X%8.8X\n",
+ "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
bit_width, function,
ACPI_FORMAT_NATIVE_UINT(address)));
@@ -297,7 +297,7 @@ acpi_ex_system_io_space_handler(u32 function,
ACPI_FUNCTION_TRACE(ex_system_io_space_handler);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "System-IO (width %d) R/W %d Address=%8.8X%8.8X\n",
+ "System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
bit_width, function,
ACPI_FORMAT_NATIVE_UINT(address)));
@@ -373,7 +373,7 @@ acpi_ex_pci_config_space_handler(u32 function,
pci_register = (u16) (u32) address;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Pci-Config %d (%d) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n",
+ "Pci-Config %u (%u) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n",
function, bit_width, pci_id->segment, pci_id->bus,
pci_id->device, pci_id->function, pci_register));
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 74c24d517f8..4093522eed4 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void)
*
* DESCRIPTION: Reacquire the interpreter execution region from within the
* interpreter code. Failure to enter the interpreter region is a
- * fatal system error. Used in conjuction with
+ * fatal system error. Used in conjunction with
* relinquish_interpreter
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 3450309c278..14750db2a1b 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -57,7 +57,7 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/******************************************************************************
*
- * FUNCTION: acpi_hw_gpe_register_bit
+ * FUNCTION: acpi_hw_get_gpe_register_bit
*
* PARAMETERS: gpe_event_info - Info block for the GPE
* gpe_register_info - Info block for the GPE register
@@ -69,7 +69,7 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
*
******************************************************************************/
-u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
+u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
struct acpi_gpe_register_info *gpe_register_info)
{
return (u32)1 << (gpe_event_info->gpe_number -
@@ -115,7 +115,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
/* Set ot clear just the bit that corresponds to this GPE */
- register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
gpe_register_info);
switch (action) {
case ACPI_GPE_COND_ENABLE:
@@ -143,31 +143,6 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
/******************************************************************************
*
- * FUNCTION: acpi_hw_write_gpe_enable_reg
- *
- * PARAMETERS: gpe_event_info - Info block for the GPE to be enabled
- *
- * RETURN: Status
- *
- * DESCRIPTION: Write a GPE enable register. Note: The bit for this GPE must
- * already be cleared or set in the parent register
- * enable_for_run mask.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info)
-{
- acpi_status status;
-
- ACPI_FUNCTION_ENTRY();
-
- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE);
- return (status);
-}
-
-/******************************************************************************
- *
* FUNCTION: acpi_hw_clear_gpe
*
* PARAMETERS: gpe_event_info - Info block for the GPE to be cleared
@@ -193,7 +168,7 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
return (AE_NOT_EXIST);
}
- register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
gpe_register_info);
/*
@@ -241,7 +216,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
/* Get the register bitmask for this GPE */
- register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
gpe_register_info);
/* GPE currently enabled? (enabled for runtime?) */
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 36eb803dd9d..3796811276a 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -307,7 +307,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
return_ACPI_STATUS(status);
}
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "Entering sleep state [S%d]\n", sleep_state));
+ "Entering sleep state [S%u]\n", sleep_state));
/* Clear the SLP_EN and SLP_TYP fields */
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 3a2814676ac..0cd925be5fc 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -338,8 +338,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
*/
while (!acpi_ns_opens_scope(prefix_node->type) &&
prefix_node->type != ACPI_TYPE_ANY) {
- prefix_node =
- acpi_ns_get_parent_node(prefix_node);
+ prefix_node = prefix_node->parent;
}
}
}
@@ -419,7 +418,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
/* Backup to the parent node */
num_carats++;
- this_node = acpi_ns_get_parent_node(this_node);
+ this_node = this_node->parent;
if (!this_node) {
/* Current scope has no parent scope */
@@ -433,7 +432,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
if (search_parent_flag == ACPI_NS_NO_UPSEARCH) {
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "Search scope is [%4.4s], path has %d carat(s)\n",
+ "Search scope is [%4.4s], path has %u carat(s)\n",
acpi_ut_get_node_name
(this_node), num_carats));
}
@@ -495,7 +494,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
path++;
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "Multi Pathname (%d Segments, Flags=%X)\n",
+ "Multi Pathname (%u Segments, Flags=%X)\n",
num_segments, flags));
break;
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 982269c1fa4..1e5ff803d9a 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -159,7 +159,7 @@ void acpi_ns_remove_node(struct acpi_namespace_node *node)
ACPI_FUNCTION_TRACE_PTR(ns_remove_node, node);
- parent_node = acpi_ns_get_parent_node(node);
+ parent_node = node->parent;
prev_node = NULL;
next_node = parent_node->child;
@@ -168,29 +168,20 @@ void acpi_ns_remove_node(struct acpi_namespace_node *node)
while (next_node != node) {
prev_node = next_node;
- next_node = prev_node->peer;
+ next_node = next_node->peer;
}
if (prev_node) {
/* Node is not first child, unlink it */
- prev_node->peer = next_node->peer;
- if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
- prev_node->flags |= ANOBJ_END_OF_PEER_LIST;
- }
+ prev_node->peer = node->peer;
} else {
- /* Node is first child (has no previous peer) */
-
- if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
-
- /* No peers at all */
-
- parent_node->child = NULL;
- } else { /* Link peer list to parent */
-
- parent_node->child = next_node->peer;
- }
+ /*
+ * Node is first child (has no previous peer).
+ * Link peer list to parent
+ */
+ parent_node->child = node->peer;
}
/* Delete the node and any attached objects */
@@ -228,33 +219,42 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
ACPI_FUNCTION_TRACE(ns_install_node);
- /*
- * Get the owner ID from the Walk state. The owner ID is used to track
- * table deletion and deletion of objects created by methods.
- */
if (walk_state) {
+ /*
+ * Get the owner ID from the Walk state. The owner ID is used to
+ * track table deletion and deletion of objects created by methods.
+ */
owner_id = walk_state->owner_id;
+
+ if ((walk_state->method_desc) &&
+ (parent_node != walk_state->method_node)) {
+ /*
+ * A method is creating a new node that is not a child of the
+ * method (it is non-local). Mark the executing method as having
+ * modified the namespace. This is used for cleanup when the
+ * method exits.
+ */
+ walk_state->method_desc->method.flags |=
+ AOPOBJ_MODIFIED_NAMESPACE;
+ }
}
/* Link the new entry into the parent and existing children */
+ node->peer = NULL;
+ node->parent = parent_node;
child_node = parent_node->child;
+
if (!child_node) {
parent_node->child = node;
- node->flags |= ANOBJ_END_OF_PEER_LIST;
- node->peer = parent_node;
} else {
- while (!(child_node->flags & ANOBJ_END_OF_PEER_LIST)) {
+ /* Add node to the end of the peer list */
+
+ while (child_node->peer) {
child_node = child_node->peer;
}
child_node->peer = node;
-
- /* Clear end-of-list flag */
-
- child_node->flags &= ~ANOBJ_END_OF_PEER_LIST;
- node->flags |= ANOBJ_END_OF_PEER_LIST;
- node->peer = parent_node;
}
/* Init the new entry */
@@ -288,9 +288,8 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
{
- struct acpi_namespace_node *child_node;
struct acpi_namespace_node *next_node;
- u8 flags;
+ struct acpi_namespace_node *node_to_delete;
ACPI_FUNCTION_TRACE_PTR(ns_delete_children, parent_node);
@@ -298,37 +297,26 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
return_VOID;
}
- /* If no children, all done! */
-
- child_node = parent_node->child;
- if (!child_node) {
- return_VOID;
- }
-
/* Deallocate all children at this level */
- do {
-
- /* Get the things we need */
-
- next_node = child_node->peer;
- flags = child_node->flags;
+ next_node = parent_node->child;
+ while (next_node) {
/* Grandchildren should have all been deleted already */
- if (child_node->child) {
+ if (next_node->child) {
ACPI_ERROR((AE_INFO, "Found a grandchild! P=%p C=%p",
- parent_node, child_node));
+ parent_node, next_node));
}
/*
* Delete this child node and move on to the next child in the list.
* No need to unlink the node since we are deleting the entire branch.
*/
- acpi_ns_delete_node(child_node);
- child_node = next_node;
-
- } while (!(flags & ANOBJ_END_OF_PEER_LIST));
+ node_to_delete = next_node;
+ next_node = next_node->peer;
+ acpi_ns_delete_node(node_to_delete);
+ };
/* Clear the parent's child pointer */
@@ -405,7 +393,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
/* Move up the tree to the grandparent */
- parent_node = acpi_ns_get_parent_node(parent_node);
+ parent_node = parent_node->parent;
}
}
@@ -510,7 +498,7 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
/* Move up the tree to the grandparent */
- parent_node = acpi_ns_get_parent_node(parent_node);
+ parent_node = parent_node->parent;
}
}
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 2110cc2360f..a54dc39e304 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -441,7 +441,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
return (AE_OK);
}
- acpi_os_printf("(R%d)", obj_desc->common.reference_count);
+ acpi_os_printf("(R%u)", obj_desc->common.reference_count);
switch (type) {
case ACPI_TYPE_METHOD:
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 4e5272c313e..660a2728908 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -103,8 +103,8 @@ acpi_status acpi_ns_initialize_objects(void)
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "\nInitialized %hd/%hd Regions %hd/%hd Fields %hd/%hd "
- "Buffers %hd/%hd Packages (%hd nodes)\n",
+ "\nInitialized %u/%u Regions %u/%u Fields %u/%u "
+ "Buffers %u/%u Packages (%u nodes)\n",
info.op_region_init, info.op_region_count,
info.field_init, info.field_count,
info.buffer_init, info.buffer_count,
@@ -112,9 +112,9 @@ acpi_status acpi_ns_initialize_objects(void)
info.object_count));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "%hd Control Methods found\n", info.method_count));
+ "%u Control Methods found\n", info.method_count));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "%hd Op Regions found\n", info.op_region_count));
+ "%u Op Regions found\n", info.op_region_count));
return_ACPI_STATUS(AE_OK);
}
@@ -208,8 +208,8 @@ acpi_status acpi_ns_initialize_devices(void)
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "\nExecuted %hd _INI methods requiring %hd _STA executions "
- "(examined %hd objects)\n",
+ "\nExecuted %u _INI methods requiring %u _STA executions "
+ "(examined %u objects)\n",
info.num_INI, info.num_STA, info.device_count));
return_ACPI_STATUS(status);
@@ -410,7 +410,7 @@ acpi_ns_find_ini_methods(acpi_handle obj_handle,
* The only _INI methods that we care about are those that are
* present under Device, Processor, and Thermal objects.
*/
- parent_node = acpi_ns_get_parent_node(node);
+ parent_node = node->parent;
switch (parent_node->type) {
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_PROCESSOR:
@@ -420,7 +420,7 @@ acpi_ns_find_ini_methods(acpi_handle obj_handle,
while (parent_node) {
parent_node->flags |= ANOBJ_SUBTREE_HAS_INI;
- parent_node = acpi_ns_get_parent_node(parent_node);
+ parent_node = parent_node->parent;
}
break;
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 7dea0031605..d3104af57e1 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -93,7 +93,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
/* Put the name into the buffer */
ACPI_MOVE_32_TO_32((name_buffer + index), &parent_node->name);
- parent_node = acpi_ns_get_parent_node(parent_node);
+ parent_node = parent_node->parent;
/* Prefix name with the path separator */
@@ -198,7 +198,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
return 0;
}
size += ACPI_PATH_SEGMENT_LENGTH;
- next_node = acpi_ns_get_parent_node(next_node);
+ next_node = next_node->parent;
}
if (!size) {
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 27cda52c76b..5808c89e9fa 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -136,8 +136,8 @@ acpi_ns_one_complete_parse(u32 pass_number,
/* Parse the AML */
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %d parse\n",
- (unsigned)pass_number));
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %u parse\n",
+ pass_number));
status = acpi_ps_parse_aml(walk_state);
cleanup:
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index d4be37751be..d1c13669266 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -556,7 +556,7 @@ acpi_ns_repair_null_element(struct acpi_predefined_data *data,
/* Need an Integer - create a zero-value integer */
- new_object = acpi_ut_create_integer_object(0);
+ new_object = acpi_ut_create_integer_object((u64)0);
} else if (expected_btypes & ACPI_RTYPE_STRING) {
/* Need a String - create a NULL string */
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 61bd0f6755d..4009498fbab 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -112,6 +112,13 @@ acpi_ns_sort_list(union acpi_operand_object **elements,
* _GTM: Convert Buffer of BYTEs to a Buffer of DWORDs
* _PSS: Sort the list descending by Power
* _TSS: Sort the list descending by Power
+ *
+ * Names that must be packages, but cannot be sorted:
+ *
+ * _BCL: Values are tied to the Package index where they appear, and cannot
+ * be moved or sorted. These index values are used for _BQC and _BCM.
+ * However, we can fix the case where a buffer is returned, by converting
+ * it to a Package of integers.
*/
static const struct acpi_repair_info acpi_ns_repairable_names[] = {
{"_ALR", acpi_ns_repair_ALR},
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index a8e42b5e946..41102a84272 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -152,17 +152,6 @@ acpi_ns_search_one_scope(u32 target_name,
return_ACPI_STATUS(AE_OK);
}
- /*
- * The last entry in the list points back to the parent,
- * so a flag is used to indicate the end-of-list
- */
- if (node->flags & ANOBJ_END_OF_PEER_LIST) {
-
- /* Searched entire list, we are done */
-
- break;
- }
-
/* Didn't match name, move on to the next peer object */
node = node->peer;
@@ -217,7 +206,7 @@ acpi_ns_search_parent_tree(u32 target_name,
ACPI_FUNCTION_TRACE(ns_search_parent_tree);
- parent_node = acpi_ns_get_parent_node(node);
+ parent_node = node->parent;
/*
* If there is no parent (i.e., we are at the root) or type is "local",
@@ -261,7 +250,7 @@ acpi_ns_search_parent_tree(u32 target_name,
/* Not found here, go up another level (until we reach the root) */
- parent_node = acpi_ns_get_parent_node(parent_node);
+ parent_node = parent_node->parent;
}
/* Not found in parent tree */
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index bab559712da..e1add3491b0 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -847,116 +847,3 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
ACPI_FREE(internal_path);
return_ACPI_STATUS(status);
}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_get_parent_node
- *
- * PARAMETERS: Node - Current table entry
- *
- * RETURN: Parent entry of the given entry
- *
- * DESCRIPTION: Obtain the parent entry for a given entry in the namespace.
- *
- ******************************************************************************/
-
-struct acpi_namespace_node *acpi_ns_get_parent_node(struct acpi_namespace_node
- *node)
-{
- ACPI_FUNCTION_ENTRY();
-
- if (!node) {
- return (NULL);
- }
-
- /*
- * Walk to the end of this peer list. The last entry is marked with a flag
- * and the peer pointer is really a pointer back to the parent. This saves
- * putting a parent back pointer in each and every named object!
- */
- while (!(node->flags & ANOBJ_END_OF_PEER_LIST)) {
- node = node->peer;
- }
-
- return (node->peer);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_get_next_valid_node
- *
- * PARAMETERS: Node - Current table entry
- *
- * RETURN: Next valid Node in the linked node list. NULL if no more valid
- * nodes.
- *
- * DESCRIPTION: Find the next valid node within a name table.
- * Useful for implementing NULL-end-of-list loops.
- *
- ******************************************************************************/
-
-struct acpi_namespace_node *acpi_ns_get_next_valid_node(struct
- acpi_namespace_node
- *node)
-{
-
- /* If we are at the end of this peer list, return NULL */
-
- if (node->flags & ANOBJ_END_OF_PEER_LIST) {
- return NULL;
- }
-
- /* Otherwise just return the next peer */
-
- return (node->peer);
-}
-
-#ifdef ACPI_OBSOLETE_FUNCTIONS
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_find_parent_name
- *
- * PARAMETERS: *child_node - Named Obj whose name is to be found
- *
- * RETURN: The ACPI name
- *
- * DESCRIPTION: Search for the given obj in its parent scope and return the
- * name segment, or "????" if the parent name can't be found
- * (which "should not happen").
- *
- ******************************************************************************/
-
-acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node * child_node)
-{
- struct acpi_namespace_node *parent_node;
-
- ACPI_FUNCTION_TRACE(ns_find_parent_name);
-
- if (child_node) {
-
- /* Valid entry. Get the parent Node */
-
- parent_node = acpi_ns_get_parent_node(child_node);
- if (parent_node) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Parent of %p [%4.4s] is %p [%4.4s]\n",
- child_node,
- acpi_ut_get_node_name(child_node),
- parent_node,
- acpi_ut_get_node_name(parent_node)));
-
- if (parent_node->name.integer) {
- return_VALUE((acpi_name) parent_node->name.
- integer);
- }
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Unable to find parent of %p (%4.4s)\n",
- child_node,
- acpi_ut_get_node_name(child_node)));
- }
-
- return_VALUE(ACPI_UNKNOWN_NAME);
-}
-#endif
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 00e79fb2602..2cd5be8fe10 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -79,15 +79,6 @@ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
return parent_node->child;
}
- /*
- * Get the next node.
- *
- * If we are at the end of this peer list, return NULL
- */
- if (child_node->flags & ANOBJ_END_OF_PEER_LIST) {
- return NULL;
- }
-
/* Otherwise just return the next peer */
return child_node->peer;
@@ -146,9 +137,9 @@ struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
return (next_node);
}
- /* Otherwise, move on to the next node */
+ /* Otherwise, move on to the next peer node */
- next_node = acpi_ns_get_next_valid_node(next_node);
+ next_node = next_node->peer;
}
/* Not found */
@@ -355,7 +346,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
*/
level--;
child_node = parent_node;
- parent_node = acpi_ns_get_parent_node(parent_node);
+ parent_node = parent_node->parent;
node_previously_visited = TRUE;
}
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index eafef24ea44..a1f04e9b803 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -190,7 +190,7 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
/* Get the parent entry */
- parent_node = acpi_ns_get_parent_node(node);
+ parent_node = node->parent;
*ret_handle = ACPI_CAST_PTR(acpi_handle, parent_node);
/* Return exception if parent is null */
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 22cfcfbd9ff..491191e6cf6 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -149,7 +149,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
/*
* 16-, 32-, and 64-bit cases must use the move macros that perform
- * endian conversion and/or accomodate hardware that cannot perform
+ * endian conversion and/or accommodate hardware that cannot perform
* misaligned memory transfers
*/
case ACPI_RSC_MOVE16:
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 66116750a0f..0558747579e 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -813,10 +813,10 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE;
+ acpi_gbl_root_node_struct.parent = NULL;
acpi_gbl_root_node_struct.child = NULL;
acpi_gbl_root_node_struct.peer = NULL;
acpi_gbl_root_node_struct.object = NULL;
- acpi_gbl_root_node_struct.flags = ANOBJ_END_OF_PEER_LIST;
#ifdef ACPI_DEBUG_OUTPUT
acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 058b3df4827..f5cca3a1300 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -279,13 +279,10 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
{
- acpi_thread_id this_thread_id;
-
ACPI_FUNCTION_NAME(ut_release_mutex);
- this_thread_id = acpi_os_get_thread_id();
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %p releasing Mutex [%s]\n",
- ACPI_CAST_PTR(void, this_thread_id),
+ ACPI_CAST_PTR(void, acpi_os_get_thread_id()),
acpi_ut_get_mutex_name(mutex_id)));
if (mutex_id > ACPI_MAX_MUTEX) {
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index db9d8ca5798..7f8cefcb2b3 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -293,12 +293,8 @@ acpi_status acpi_initialize_objects(u32 flags)
* Complete the GPE initialization for the GPE blocks defined in the FADT
* (GPE block 0 and 1).
*
- * Note1: This is where the _PRW methods are executed for the GPEs. These
- * methods can only be executed after the SCI and Global Lock handlers are
- * installed and initialized.
- *
- * Note2: Currently, there seems to be no need to run the _REG methods
- * before execution of the _PRW methods and enabling of the GPEs.
+ * NOTE: Currently, there seems to be no need to run the _REG methods
+ * before enabling the GPEs.
*/
if (!(flags & ACPI_NO_EVENT_INIT)) {
status = acpi_ev_install_fadt_gpes();
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index f8c668f27b5..fca34ccfd29 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -28,3 +28,12 @@ config ACPI_APEI_EINJ
EINJ provides a hardware error injection mechanism, it is
mainly used for debugging and testing the other parts of
APEI and some other RAS features.
+
+config ACPI_APEI_ERST_DEBUG
+ tristate "APEI Error Record Serialization Table (ERST) Debug Support"
+ depends on ACPI_APEI
+ help
+ ERST is a way provided by APEI to save and retrieve hardware
+ error information to and from a persistent store. Enable this
+ if you want to debugging and testing the ERST kernel support
+ and firmware implementation.
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
index b13b03a1778..d1d1bc0a4ee 100644
--- a/drivers/acpi/apei/Makefile
+++ b/drivers/acpi/apei/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_ACPI_APEI) += apei.o
obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
+obj-$(CONFIG_ACPI_APEI_ERST_DEBUG) += erst-dbg.o
apei-y := apei-base.o hest.o cper.o erst.o
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 216e1e948ff..4a904a4bf05 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -445,11 +445,15 @@ EXPORT_SYMBOL_GPL(apei_resources_sub);
int apei_resources_request(struct apei_resources *resources,
const char *desc)
{
- struct apei_res *res, *res_bak;
+ struct apei_res *res, *res_bak = NULL;
struct resource *r;
+ int rc;
- apei_resources_sub(resources, &apei_resources_all);
+ rc = apei_resources_sub(resources, &apei_resources_all);
+ if (rc)
+ return rc;
+ rc = -EINVAL;
list_for_each_entry(res, &resources->iomem, list) {
r = request_mem_region(res->start, res->end - res->start,
desc);
@@ -475,28 +479,33 @@ int apei_resources_request(struct apei_resources *resources,
}
}
- apei_resources_merge(&apei_resources_all, resources);
+ rc = apei_resources_merge(&apei_resources_all, resources);
+ if (rc) {
+ pr_err(APEI_PFX "Fail to merge resources!\n");
+ goto err_unmap_ioport;
+ }
return 0;
err_unmap_ioport:
list_for_each_entry(res, &resources->ioport, list) {
if (res == res_bak)
break;
- release_mem_region(res->start, res->end - res->start);
+ release_region(res->start, res->end - res->start);
}
res_bak = NULL;
err_unmap_iomem:
list_for_each_entry(res, &resources->iomem, list) {
if (res == res_bak)
break;
- release_region(res->start, res->end - res->start);
+ release_mem_region(res->start, res->end - res->start);
}
- return -EINVAL;
+ return rc;
}
EXPORT_SYMBOL_GPL(apei_resources_request);
void apei_resources_release(struct apei_resources *resources)
{
+ int rc;
struct apei_res *res;
list_for_each_entry(res, &resources->iomem, list)
@@ -504,7 +513,9 @@ void apei_resources_release(struct apei_resources *resources)
list_for_each_entry(res, &resources->ioport, list)
release_region(res->start, res->end - res->start);
- apei_resources_sub(&apei_resources_all, resources);
+ rc = apei_resources_sub(&apei_resources_all, resources);
+ if (rc)
+ pr_err(APEI_PFX "Fail to sub resources!\n");
}
EXPORT_SYMBOL_GPL(apei_resources_release);
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 465c885938e..cf29df69380 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -426,7 +426,9 @@ DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
static int einj_check_table(struct acpi_table_einj *einj_tab)
{
- if (einj_tab->header_length != sizeof(struct acpi_table_einj))
+ if ((einj_tab->header_length !=
+ (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header)))
+ && (einj_tab->header_length != sizeof(struct acpi_table_einj)))
return -EINVAL;
if (einj_tab->header.length < sizeof(struct acpi_table_einj))
return -EINVAL;
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
new file mode 100644
index 00000000000..da1228a9a54
--- /dev/null
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -0,0 +1,211 @@
+/*
+ * APEI Error Record Serialization Table debug support
+ *
+ * ERST is a way provided by APEI to save and retrieve hardware error
+ * information to and from a persistent store. This file provide the
+ * debugging/testing support for ERST kernel support and firmware
+ * implementation.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <acpi/apei.h>
+#include <linux/miscdevice.h>
+
+#include "apei-internal.h"
+
+#define ERST_DBG_PFX "ERST DBG: "
+
+#define ERST_DBG_RECORD_LEN_MAX 4096
+
+static void *erst_dbg_buf;
+static unsigned int erst_dbg_buf_len;
+
+/* Prevent erst_dbg_read/write from being invoked concurrently */
+static DEFINE_MUTEX(erst_dbg_mutex);
+
+static int erst_dbg_open(struct inode *inode, struct file *file)
+{
+ if (erst_disable)
+ return -ENODEV;
+
+ return nonseekable_open(inode, file);
+}
+
+static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int rc;
+ u64 record_id;
+ u32 record_count;
+
+ switch (cmd) {
+ case APEI_ERST_CLEAR_RECORD:
+ rc = copy_from_user(&record_id, (void __user *)arg,
+ sizeof(record_id));
+ if (rc)
+ return -EFAULT;
+ return erst_clear(record_id);
+ case APEI_ERST_GET_RECORD_COUNT:
+ rc = erst_get_record_count();
+ if (rc < 0)
+ return rc;
+ record_count = rc;
+ rc = put_user(record_count, (u32 __user *)arg);
+ if (rc)
+ return rc;
+ return 0;
+ default:
+ return -ENOTTY;
+ }
+}
+
+static ssize_t erst_dbg_read(struct file *filp, char __user *ubuf,
+ size_t usize, loff_t *off)
+{
+ int rc;
+ ssize_t len = 0;
+ u64 id;
+
+ if (*off != 0)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&erst_dbg_mutex) != 0)
+ return -EINTR;
+
+retry_next:
+ rc = erst_get_next_record_id(&id);
+ if (rc)
+ goto out;
+ /* no more record */
+ if (id == APEI_ERST_INVALID_RECORD_ID)
+ goto out;
+retry:
+ rc = len = erst_read(id, erst_dbg_buf, erst_dbg_buf_len);
+ /* The record may be cleared by others, try read next record */
+ if (rc == -ENOENT)
+ goto retry_next;
+ if (rc < 0)
+ goto out;
+ if (len > ERST_DBG_RECORD_LEN_MAX) {
+ pr_warning(ERST_DBG_PFX
+ "Record (ID: 0x%llx) length is too long: %zd\n",
+ id, len);
+ rc = -EIO;
+ goto out;
+ }
+ if (len > erst_dbg_buf_len) {
+ void *p;
+ rc = -ENOMEM;
+ p = kmalloc(len, GFP_KERNEL);
+ if (!p)
+ goto out;
+ kfree(erst_dbg_buf);
+ erst_dbg_buf = p;
+ erst_dbg_buf_len = len;
+ goto retry;
+ }
+
+ rc = -EINVAL;
+ if (len > usize)
+ goto out;
+
+ rc = -EFAULT;
+ if (copy_to_user(ubuf, erst_dbg_buf, len))
+ goto out;
+ rc = 0;
+out:
+ mutex_unlock(&erst_dbg_mutex);
+ return rc ? rc : len;
+}
+
+static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
+ size_t usize, loff_t *off)
+{
+ int rc;
+ struct cper_record_header *rcd;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (usize > ERST_DBG_RECORD_LEN_MAX) {
+ pr_err(ERST_DBG_PFX "Too long record to be written\n");
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&erst_dbg_mutex))
+ return -EINTR;
+ if (usize > erst_dbg_buf_len) {
+ void *p;
+ rc = -ENOMEM;
+ p = kmalloc(usize, GFP_KERNEL);
+ if (!p)
+ goto out;
+ kfree(erst_dbg_buf);
+ erst_dbg_buf = p;
+ erst_dbg_buf_len = usize;
+ }
+ rc = copy_from_user(erst_dbg_buf, ubuf, usize);
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+ rcd = erst_dbg_buf;
+ rc = -EINVAL;
+ if (rcd->record_length != usize)
+ goto out;
+
+ rc = erst_write(erst_dbg_buf);
+
+out:
+ mutex_unlock(&erst_dbg_mutex);
+ return rc < 0 ? rc : usize;
+}
+
+static const struct file_operations erst_dbg_ops = {
+ .owner = THIS_MODULE,
+ .open = erst_dbg_open,
+ .read = erst_dbg_read,
+ .write = erst_dbg_write,
+ .unlocked_ioctl = erst_dbg_ioctl,
+};
+
+static struct miscdevice erst_dbg_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "erst_dbg",
+ .fops = &erst_dbg_ops,
+};
+
+static __init int erst_dbg_init(void)
+{
+ return misc_register(&erst_dbg_dev);
+}
+
+static __exit void erst_dbg_exit(void)
+{
+ misc_deregister(&erst_dbg_dev);
+ kfree(erst_dbg_buf);
+}
+
+module_init(erst_dbg_init);
+module_exit(erst_dbg_exit);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("APEI Error Record Serialization Table debug support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 864dd46c346..1211c03149e 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -2,7 +2,7 @@
* APEI Error Record Serialization Table support
*
* ERST is a way provided by APEI to save and retrieve hardware error
- * infomation to and from a persistent store.
+ * information to and from a persistent store.
*
* For more information about ERST, please refer to ACPI Specification
* version 4.0, section 17.4.
@@ -33,6 +33,7 @@
#include <linux/uaccess.h>
#include <linux/cper.h>
#include <linux/nmi.h>
+#include <linux/hardirq.h>
#include <acpi/apei.h>
#include "apei-internal.h"
@@ -265,13 +266,30 @@ static int erst_exec_move_data(struct apei_exec_context *ctx,
{
int rc;
u64 offset;
+ void *src, *dst;
+
+ /* ioremap does not work in interrupt context */
+ if (in_interrupt()) {
+ pr_warning(ERST_PFX
+ "MOVE_DATA can not be used in interrupt context");
+ return -EBUSY;
+ }
rc = __apei_exec_read_register(entry, &offset);
if (rc)
return rc;
- memmove((void *)ctx->dst_base + offset,
- (void *)ctx->src_base + offset,
- ctx->var2);
+
+ src = ioremap(ctx->src_base + offset, ctx->var2);
+ if (!src)
+ return -ENOMEM;
+ dst = ioremap(ctx->dst_base + offset, ctx->var2);
+ if (!dst)
+ return -ENOMEM;
+
+ memmove(dst, src, ctx->var2);
+
+ iounmap(src);
+ iounmap(dst);
return 0;
}
@@ -749,7 +767,9 @@ __setup("erst_disable", setup_erst_disable);
static int erst_check_table(struct acpi_table_erst *erst_tab)
{
- if (erst_tab->header_length != sizeof(struct acpi_table_erst))
+ if ((erst_tab->header_length !=
+ (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
+ && (erst_tab->header_length != sizeof(struct acpi_table_einj)))
return -EINVAL;
if (erst_tab->header.length < sizeof(struct acpi_table_erst))
return -EINVAL;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index fd0cc016a09..0d505e59214 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -41,6 +41,8 @@
#include <linux/interrupt.h>
#include <linux/cper.h>
#include <linux/kdebug.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
#include <acpi/apei.h>
#include <acpi/atomicio.h>
#include <acpi/hed.h>
@@ -87,6 +89,7 @@ struct ghes {
* used for that.
*/
static LIST_HEAD(ghes_sci);
+static DEFINE_MUTEX(ghes_list_mutex);
static struct ghes *ghes_new(struct acpi_hest_generic *generic)
{
@@ -132,26 +135,26 @@ static void ghes_fini(struct ghes *ghes)
}
enum {
- GHES_SER_NO = 0x0,
- GHES_SER_CORRECTED = 0x1,
- GHES_SER_RECOVERABLE = 0x2,
- GHES_SER_PANIC = 0x3,
+ GHES_SEV_NO = 0x0,
+ GHES_SEV_CORRECTED = 0x1,
+ GHES_SEV_RECOVERABLE = 0x2,
+ GHES_SEV_PANIC = 0x3,
};
static inline int ghes_severity(int severity)
{
switch (severity) {
- case CPER_SER_INFORMATIONAL:
- return GHES_SER_NO;
- case CPER_SER_CORRECTED:
- return GHES_SER_CORRECTED;
- case CPER_SER_RECOVERABLE:
- return GHES_SER_RECOVERABLE;
- case CPER_SER_FATAL:
- return GHES_SER_PANIC;
+ case CPER_SEV_INFORMATIONAL:
+ return GHES_SEV_NO;
+ case CPER_SEV_CORRECTED:
+ return GHES_SEV_CORRECTED;
+ case CPER_SEV_RECOVERABLE:
+ return GHES_SEV_RECOVERABLE;
+ case CPER_SEV_FATAL:
+ return GHES_SEV_PANIC;
default:
/* Unkown, go panic */
- return GHES_SER_PANIC;
+ return GHES_SEV_PANIC;
}
}
@@ -237,16 +240,16 @@ static void ghes_clear_estatus(struct ghes *ghes)
static void ghes_do_proc(struct ghes *ghes)
{
- int ser, processed = 0;
+ int sev, processed = 0;
struct acpi_hest_generic_data *gdata;
- ser = ghes_severity(ghes->estatus->error_severity);
+ sev = ghes_severity(ghes->estatus->error_severity);
apei_estatus_for_each_section(ghes->estatus, gdata) {
#ifdef CONFIG_X86_MCE
if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
CPER_SEC_PLATFORM_MEM)) {
apei_mce_report_mem_error(
- ser == GHES_SER_CORRECTED,
+ sev == GHES_SEV_CORRECTED,
(struct cper_sec_mem_err *)(gdata+1));
processed = 1;
}
@@ -293,18 +296,15 @@ static struct notifier_block ghes_notifier_sci = {
.notifier_call = ghes_notify_sci,
};
-static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data)
+static int __devinit ghes_probe(struct platform_device *ghes_dev)
{
struct acpi_hest_generic *generic;
struct ghes *ghes = NULL;
- int rc = 0;
+ int rc = -EINVAL;
- if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
- return 0;
-
- generic = (struct acpi_hest_generic *)hest_hdr;
+ generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
if (!generic->enabled)
- return 0;
+ return -ENODEV;
if (generic->error_block_length <
sizeof(struct acpi_hest_generic_status)) {
@@ -327,62 +327,91 @@ static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data)
ghes = NULL;
goto err;
}
- switch (generic->notify.type) {
- case ACPI_HEST_NOTIFY_POLLED:
- pr_warning(GHES_PFX
-"Generic hardware error source: %d notified via POLL is not supported!\n",
- generic->header.source_id);
- break;
- case ACPI_HEST_NOTIFY_EXTERNAL:
- case ACPI_HEST_NOTIFY_LOCAL:
- pr_warning(GHES_PFX
-"Generic hardware error source: %d notified via IRQ is not supported!\n",
- generic->header.source_id);
- break;
- case ACPI_HEST_NOTIFY_SCI:
+ if (generic->notify.type == ACPI_HEST_NOTIFY_SCI) {
+ mutex_lock(&ghes_list_mutex);
if (list_empty(&ghes_sci))
register_acpi_hed_notifier(&ghes_notifier_sci);
list_add_rcu(&ghes->list, &ghes_sci);
- break;
- case ACPI_HEST_NOTIFY_NMI:
- pr_warning(GHES_PFX
-"Generic hardware error source: %d notified via NMI is not supported!\n",
- generic->header.source_id);
- break;
- default:
- pr_warning(FW_WARN GHES_PFX
- "Unknown notification type: %u for generic hardware error source: %d\n",
- generic->notify.type, generic->header.source_id);
- break;
+ mutex_unlock(&ghes_list_mutex);
+ } else {
+ unsigned char *notify = NULL;
+
+ switch (generic->notify.type) {
+ case ACPI_HEST_NOTIFY_POLLED:
+ notify = "POLL";
+ break;
+ case ACPI_HEST_NOTIFY_EXTERNAL:
+ case ACPI_HEST_NOTIFY_LOCAL:
+ notify = "IRQ";
+ break;
+ case ACPI_HEST_NOTIFY_NMI:
+ notify = "NMI";
+ break;
+ }
+ if (notify) {
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via %s is not supported!\n",
+ generic->header.source_id, notify);
+ } else {
+ pr_warning(FW_WARN GHES_PFX
+"Unknown notification type: %u for generic hardware error source: %d\n",
+ generic->notify.type, generic->header.source_id);
+ }
+ rc = -ENODEV;
+ goto err;
}
+ platform_set_drvdata(ghes_dev, ghes);
return 0;
err:
- if (ghes)
+ if (ghes) {
ghes_fini(ghes);
+ kfree(ghes);
+ }
return rc;
}
-static void ghes_cleanup(void)
+static int __devexit ghes_remove(struct platform_device *ghes_dev)
{
- struct ghes *ghes, *nghes;
+ struct ghes *ghes;
+ struct acpi_hest_generic *generic;
- if (!list_empty(&ghes_sci))
- unregister_acpi_hed_notifier(&ghes_notifier_sci);
+ ghes = platform_get_drvdata(ghes_dev);
+ generic = ghes->generic;
+
+ switch (generic->notify.type) {
+ case ACPI_HEST_NOTIFY_SCI:
+ mutex_lock(&ghes_list_mutex);
+ list_del_rcu(&ghes->list);
+ if (list_empty(&ghes_sci))
+ unregister_acpi_hed_notifier(&ghes_notifier_sci);
+ mutex_unlock(&ghes_list_mutex);
+ break;
+ default:
+ BUG();
+ break;
+ }
synchronize_rcu();
+ ghes_fini(ghes);
+ kfree(ghes);
- list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) {
- list_del(&ghes->list);
- ghes_fini(ghes);
- kfree(ghes);
- }
+ platform_set_drvdata(ghes_dev, NULL);
+
+ return 0;
}
+static struct platform_driver ghes_platform_driver = {
+ .driver = {
+ .name = "GHES",
+ .owner = THIS_MODULE,
+ },
+ .probe = ghes_probe,
+ .remove = ghes_remove,
+};
+
static int __init ghes_init(void)
{
- int rc;
-
if (acpi_disabled)
return -ENODEV;
@@ -391,32 +420,12 @@ static int __init ghes_init(void)
return -EINVAL;
}
- rc = apei_hest_parse(hest_ghes_parse, NULL);
- if (rc) {
- pr_err(GHES_PFX
- "Error during parsing HEST generic hardware error sources.\n");
- goto err_cleanup;
- }
-
- if (list_empty(&ghes_sci)) {
- pr_info(GHES_PFX
- "No functional generic hardware error sources.\n");
- rc = -ENODEV;
- goto err_cleanup;
- }
-
- pr_info(GHES_PFX
- "Generic Hardware Error Source support is initialized.\n");
-
- return 0;
-err_cleanup:
- ghes_cleanup();
- return rc;
+ return platform_driver_register(&ghes_platform_driver);
}
static void __exit ghes_exit(void)
{
- ghes_cleanup();
+ platform_driver_unregister(&ghes_platform_driver);
}
module_init(ghes_init);
@@ -425,3 +434,4 @@ module_exit(ghes_exit);
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:GHES");
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index e7f40d362cb..1a3508a7fe0 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -34,6 +34,7 @@
#include <linux/kdebug.h>
#include <linux/highmem.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include <acpi/apei.h>
#include "apei-internal.h"
@@ -47,11 +48,6 @@ EXPORT_SYMBOL_GPL(hest_disable);
static struct acpi_table_hest *hest_tab;
-static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data)
-{
- return 0;
-}
-
static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
[ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */
[ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1,
@@ -125,6 +121,72 @@ int apei_hest_parse(apei_hest_func_t func, void *data)
}
EXPORT_SYMBOL_GPL(apei_hest_parse);
+struct ghes_arr {
+ struct platform_device **ghes_devs;
+ unsigned int count;
+};
+
+static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data)
+{
+ int *count = data;
+
+ if (hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR)
+ (*count)++;
+ return 0;
+}
+
+static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
+{
+ struct platform_device *ghes_dev;
+ struct ghes_arr *ghes_arr = data;
+ int rc;
+
+ if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
+ return 0;
+
+ if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
+ return 0;
+ ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
+ if (!ghes_dev)
+ return -ENOMEM;
+
+ rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *));
+ if (rc)
+ goto err;
+
+ rc = platform_device_add(ghes_dev);
+ if (rc)
+ goto err;
+ ghes_arr->ghes_devs[ghes_arr->count++] = ghes_dev;
+
+ return 0;
+err:
+ platform_device_put(ghes_dev);
+ return rc;
+}
+
+static int hest_ghes_dev_register(unsigned int ghes_count)
+{
+ int rc, i;
+ struct ghes_arr ghes_arr;
+
+ ghes_arr.count = 0;
+ ghes_arr.ghes_devs = kmalloc(sizeof(void *) * ghes_count, GFP_KERNEL);
+ if (!ghes_arr.ghes_devs)
+ return -ENOMEM;
+
+ rc = apei_hest_parse(hest_parse_ghes, &ghes_arr);
+ if (rc)
+ goto err;
+out:
+ kfree(ghes_arr.ghes_devs);
+ return rc;
+err:
+ for (i = 0; i < ghes_arr.count; i++)
+ platform_device_unregister(ghes_arr.ghes_devs[i]);
+ goto out;
+}
+
static int __init setup_hest_disable(char *str)
{
hest_disable = 1;
@@ -137,6 +199,7 @@ static int __init hest_init(void)
{
acpi_status status;
int rc = -ENODEV;
+ unsigned int ghes_count = 0;
if (acpi_disabled)
goto err;
@@ -158,7 +221,11 @@ static int __init hest_init(void)
goto err;
}
- rc = apei_hest_parse(hest_void_parse, NULL);
+ rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
+ if (rc)
+ goto err;
+
+ rc = hest_ghes_dev_register(ghes_count);
if (rc)
goto err;
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index 8f8bd736d4f..542e5390389 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -142,7 +142,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
list_add_tail_rcu(&map->list, &acpi_iomaps);
spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
- return vaddr + (paddr - pg_off);
+ return map->vaddr + (paddr - map->paddr);
err_unmap:
iounmap(vaddr);
return NULL;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index dc58402b0a1..98417201e9c 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -273,7 +273,6 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_POWER_NOW,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_ENERGY_FULL,
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 2bb28b9d91c..f7619600270 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -183,6 +183,8 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
{
printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
acpi_osi_setup("!Windows 2006");
+ acpi_osi_setup("!Windows 2006 SP1");
+ acpi_osi_setup("!Windows 2006 SP2");
return 0;
}
static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
@@ -226,6 +228,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
},
{
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba Satellite L355",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
+ },
+ },
+ {
.callback = dmi_disable_osi_win7,
.ident = "ASUS K50IJ",
.matches = {
@@ -233,6 +243,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
},
},
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba P305D",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index c1d23cd7165..310e3b9749c 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(acpi_root_dir);
static int set_power_nocheck(const struct dmi_system_id *id)
{
printk(KERN_NOTICE PREFIX "%s detected - "
- "disable power check in power transistion\n", id->ident);
+ "disable power check in power transition\n", id->ident);
acpi_power_nocheck = 1;
return 0;
}
@@ -80,23 +80,15 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
static struct dmi_system_id dsdt_dmi_table[] __initdata = {
/*
- * Insyde BIOS on some TOSHIBA machines corrupt the DSDT.
+ * Invoke DSDT corruption work-around on all Toshiba Satellite.
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
*/
{
.callback = set_copy_dsdt,
- .ident = "TOSHIBA Satellite A505",
+ .ident = "TOSHIBA Satellite",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),
- },
- },
- {
- .callback = set_copy_dsdt,
- .ident = "TOSHIBA Satellite L505D",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),
},
},
{}
@@ -1027,15 +1019,15 @@ static int __init acpi_init(void)
/*
* If the laptop falls into the DMI check table, the power state check
- * will be disabled in the course of device power transistion.
+ * will be disabled in the course of device power transition.
*/
dmi_check_system(power_nocheck_dmi_table);
acpi_scan_init();
acpi_ec_init();
acpi_power_init();
- acpi_system_init();
- acpi_debug_init();
+ acpi_sysfs_init();
+ acpi_debugfs_init();
acpi_sleep_proc_init();
acpi_wakeup_device_init();
return result;
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 7d857dabdde..1575a9b51f1 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -424,8 +424,7 @@ static int acpi_button_add(struct acpi_device *device)
if (device->wakeup.flags.valid) {
/* Button's GPE is run-wake GPE */
acpi_enable_gpe(device->wakeup.gpe_device,
- device->wakeup.gpe_number,
- ACPI_GPE_TYPE_RUNTIME);
+ device->wakeup.gpe_number);
device->wakeup.run_wake_count++;
device->wakeup.state.enabled = 1;
}
@@ -448,8 +447,7 @@ static int acpi_button_remove(struct acpi_device *device, int type)
if (device->wakeup.flags.valid) {
acpi_disable_gpe(device->wakeup.gpe_device,
- device->wakeup.gpe_number,
- ACPI_GPE_TYPE_RUNTIME);
+ device->wakeup.gpe_number);
device->wakeup.run_wake_count--;
device->wakeup.state.enabled = 0;
}
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
deleted file mode 100644
index 146135e7a6a..00000000000
--- a/drivers/acpi/debug.c
+++ /dev/null
@@ -1,406 +0,0 @@
-/*
- * debug.c - ACPI debug interface to userspace.
- */
-
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/moduleparam.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <asm/uaccess.h>
-#include <acpi/acpi_drivers.h>
-
-#define _COMPONENT ACPI_SYSTEM_COMPONENT
-ACPI_MODULE_NAME("debug");
-
-struct acpi_dlayer {
- const char *name;
- unsigned long value;
-};
-struct acpi_dlevel {
- const char *name;
- unsigned long value;
-};
-#define ACPI_DEBUG_INIT(v) { .name = #v, .value = v }
-
-static const struct acpi_dlayer acpi_debug_layers[] = {
- ACPI_DEBUG_INIT(ACPI_UTILITIES),
- ACPI_DEBUG_INIT(ACPI_HARDWARE),
- ACPI_DEBUG_INIT(ACPI_EVENTS),
- ACPI_DEBUG_INIT(ACPI_TABLES),
- ACPI_DEBUG_INIT(ACPI_NAMESPACE),
- ACPI_DEBUG_INIT(ACPI_PARSER),
- ACPI_DEBUG_INIT(ACPI_DISPATCHER),
- ACPI_DEBUG_INIT(ACPI_EXECUTER),
- ACPI_DEBUG_INIT(ACPI_RESOURCES),
- ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
- ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
- ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
- ACPI_DEBUG_INIT(ACPI_COMPILER),
- ACPI_DEBUG_INIT(ACPI_TOOLS),
-
- ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_AC_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT),
-};
-
-static const struct acpi_dlevel acpi_debug_levels[] = {
- ACPI_DEBUG_INIT(ACPI_LV_INIT),
- ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
- ACPI_DEBUG_INIT(ACPI_LV_INFO),
-
- ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
- ACPI_DEBUG_INIT(ACPI_LV_PARSE),
- ACPI_DEBUG_INIT(ACPI_LV_LOAD),
- ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
- ACPI_DEBUG_INIT(ACPI_LV_EXEC),
- ACPI_DEBUG_INIT(ACPI_LV_NAMES),
- ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
- ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
- ACPI_DEBUG_INIT(ACPI_LV_TABLES),
- ACPI_DEBUG_INIT(ACPI_LV_VALUES),
- ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
- ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
- ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
- ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
-
- ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
- ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
- ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
-
- ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
- ACPI_DEBUG_INIT(ACPI_LV_THREADS),
- ACPI_DEBUG_INIT(ACPI_LV_IO),
- ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
-
- ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
- ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
- ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
- ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
-};
-
-/* --------------------------------------------------------------------------
- FS Interface (/sys)
- -------------------------------------------------------------------------- */
-static int param_get_debug_layer(char *buffer, struct kernel_param *kp) {
- int result = 0;
- int i;
-
- result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
-
- for(i = 0; i <ARRAY_SIZE(acpi_debug_layers); i++) {
- result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
- acpi_debug_layers[i].name,
- acpi_debug_layers[i].value,
- (acpi_dbg_layer & acpi_debug_layers[i].value) ? '*' : ' ');
- }
- result += sprintf(buffer+result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
- ACPI_ALL_DRIVERS,
- (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
- ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer &
- ACPI_ALL_DRIVERS) == 0 ? ' ' : '-');
- result += sprintf(buffer+result, "--\ndebug_layer = 0x%08X ( * = enabled)\n", acpi_dbg_layer);
-
- return result;
-}
-
-static int param_get_debug_level(char *buffer, struct kernel_param *kp) {
- int result = 0;
- int i;
-
- result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
-
- for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
- result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
- acpi_debug_levels[i].name,
- acpi_debug_levels[i].value,
- (acpi_dbg_level & acpi_debug_levels[i].
- value) ? '*' : ' ');
- }
- result += sprintf(buffer+result, "--\ndebug_level = 0x%08X (* = enabled)\n",
- acpi_dbg_level);
-
- return result;
-}
-
-module_param_call(debug_layer, param_set_uint, param_get_debug_layer, &acpi_dbg_layer, 0644);
-module_param_call(debug_level, param_set_uint, param_get_debug_level, &acpi_dbg_level, 0644);
-
-static char trace_method_name[6];
-module_param_string(trace_method_name, trace_method_name, 6, 0644);
-static unsigned int trace_debug_layer;
-module_param(trace_debug_layer, uint, 0644);
-static unsigned int trace_debug_level;
-module_param(trace_debug_level, uint, 0644);
-
-static int param_set_trace_state(const char *val, struct kernel_param *kp)
-{
- int result = 0;
-
- if (!strncmp(val, "enable", strlen("enable") - 1)) {
- result = acpi_debug_trace(trace_method_name, trace_debug_level,
- trace_debug_layer, 0);
- if (result)
- result = -EBUSY;
- goto exit;
- }
-
- if (!strncmp(val, "disable", strlen("disable") - 1)) {
- int name = 0;
- result = acpi_debug_trace((char *)&name, trace_debug_level,
- trace_debug_layer, 0);
- if (result)
- result = -EBUSY;
- goto exit;
- }
-
- if (!strncmp(val, "1", 1)) {
- result = acpi_debug_trace(trace_method_name, trace_debug_level,
- trace_debug_layer, 1);
- if (result)
- result = -EBUSY;
- goto exit;
- }
-
- result = -EINVAL;
-exit:
- return result;
-}
-
-static int param_get_trace_state(char *buffer, struct kernel_param *kp)
-{
- if (!acpi_gbl_trace_method_name)
- return sprintf(buffer, "disable");
- else {
- if (acpi_gbl_trace_flags & 1)
- return sprintf(buffer, "1");
- else
- return sprintf(buffer, "enable");
- }
- return 0;
-}
-
-module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
- NULL, 0644);
-
-/* --------------------------------------------------------------------------
- DebugFS Interface
- -------------------------------------------------------------------------- */
-
-static ssize_t cm_write(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- static char *buf;
- static int uncopied_bytes;
- struct acpi_table_header table;
- acpi_status status;
-
- if (!(*ppos)) {
- /* parse the table header to get the table length */
- if (count <= sizeof(struct acpi_table_header))
- return -EINVAL;
- if (copy_from_user(&table, user_buf,
- sizeof(struct acpi_table_header)))
- return -EFAULT;
- uncopied_bytes = table.length;
- buf = kzalloc(uncopied_bytes, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- if (uncopied_bytes < count) {
- kfree(buf);
- return -EINVAL;
- }
-
- if (copy_from_user(buf + (*ppos), user_buf, count)) {
- kfree(buf);
- return -EFAULT;
- }
-
- uncopied_bytes -= count;
- *ppos += count;
-
- if (!uncopied_bytes) {
- status = acpi_install_method(buf);
- kfree(buf);
- if (ACPI_FAILURE(status))
- return -EINVAL;
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
- }
-
- return count;
-}
-
-static const struct file_operations cm_fops = {
- .write = cm_write,
-};
-
-static int acpi_debugfs_init(void)
-{
- struct dentry *acpi_dir, *cm_dentry;
-
- acpi_dir = debugfs_create_dir("acpi", NULL);
- if (!acpi_dir)
- goto err;
-
- cm_dentry = debugfs_create_file("custom_method", S_IWUGO,
- acpi_dir, NULL, &cm_fops);
- if (!cm_dentry)
- goto err;
-
- return 0;
-
-err:
- if (acpi_dir)
- debugfs_remove(acpi_dir);
- return -EINVAL;
-}
-
-/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_PROCFS
-#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer"
-#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level"
-
-static int acpi_system_debug_proc_show(struct seq_file *m, void *v)
-{
- unsigned int i;
-
- seq_printf(m, "%-25s\tHex SET\n", "Description");
-
- switch ((unsigned long)m->private) {
- case 0:
- for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
- seq_printf(m, "%-25s\t0x%08lX [%c]\n",
- acpi_debug_layers[i].name,
- acpi_debug_layers[i].value,
- (acpi_dbg_layer & acpi_debug_layers[i].
- value) ? '*' : ' ');
- }
- seq_printf(m, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
- ACPI_ALL_DRIVERS,
- (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
- ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer &
- ACPI_ALL_DRIVERS) ==
- 0 ? ' ' : '-');
- seq_printf(m,
- "--\ndebug_layer = 0x%08X (* = enabled, - = partial)\n",
- acpi_dbg_layer);
- break;
- case 1:
- for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
- seq_printf(m, "%-25s\t0x%08lX [%c]\n",
- acpi_debug_levels[i].name,
- acpi_debug_levels[i].value,
- (acpi_dbg_level & acpi_debug_levels[i].
- value) ? '*' : ' ');
- }
- seq_printf(m, "--\ndebug_level = 0x%08X (* = enabled)\n",
- acpi_dbg_level);
- break;
- }
- return 0;
-}
-
-static int acpi_system_debug_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_system_debug_proc_show, PDE(inode)->data);
-}
-
-static ssize_t acpi_system_debug_proc_write(struct file *file,
- const char __user * buffer,
- size_t count, loff_t *pos)
-{
- char debug_string[12] = { '\0' };
-
-
- if (count > sizeof(debug_string) - 1)
- return -EINVAL;
-
- if (copy_from_user(debug_string, buffer, count))
- return -EFAULT;
-
- debug_string[count] = '\0';
-
- switch ((unsigned long)PDE(file->f_path.dentry->d_inode)->data) {
- case 0:
- acpi_dbg_layer = simple_strtoul(debug_string, NULL, 0);
- break;
- case 1:
- acpi_dbg_level = simple_strtoul(debug_string, NULL, 0);
- break;
- default:
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations acpi_system_debug_proc_fops = {
- .owner = THIS_MODULE,
- .open = acpi_system_debug_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = acpi_system_debug_proc_write,
-};
-#endif
-
-int __init acpi_procfs_init(void)
-{
-#ifdef CONFIG_ACPI_PROCFS
- struct proc_dir_entry *entry;
- int error = 0;
- char *name;
-
- /* 'debug_layer' [R/W] */
- name = ACPI_SYSTEM_FILE_DEBUG_LAYER;
- entry = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR,
- acpi_root_dir, &acpi_system_debug_proc_fops,
- (void *)0);
- if (!entry)
- goto Error;
-
- /* 'debug_level' [R/W] */
- name = ACPI_SYSTEM_FILE_DEBUG_LEVEL;
- entry = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR,
- acpi_root_dir, &acpi_system_debug_proc_fops,
- (void *)1);
- if (!entry)
- goto Error;
-
- Done:
- return error;
-
- Error:
- remove_proc_entry(ACPI_SYSTEM_FILE_DEBUG_LEVEL, acpi_root_dir);
- remove_proc_entry(ACPI_SYSTEM_FILE_DEBUG_LAYER, acpi_root_dir);
- error = -ENODEV;
- goto Done;
-#else
- return 0;
-#endif
-}
-
-int __init acpi_debug_init(void)
-{
- acpi_debugfs_init();
- acpi_procfs_init();
- return 0;
-}
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
new file mode 100644
index 00000000000..7de27d49c4b
--- /dev/null
+++ b/drivers/acpi/debugfs.c
@@ -0,0 +1,93 @@
+/*
+ * debugfs.c - ACPI debugfs interface to userspace.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <acpi/acpi_drivers.h>
+
+#define _COMPONENT ACPI_SYSTEM_COMPONENT
+ACPI_MODULE_NAME("debugfs");
+
+
+/* /sys/modules/acpi/parameters/aml_debug_output */
+
+module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
+ bool, 0644);
+MODULE_PARM_DESC(aml_debug_output,
+ "To enable/disable the ACPI Debug Object output.");
+
+/* /sys/kernel/debug/acpi/custom_method */
+
+static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ size_t count, loff_t *ppos)
+{
+ static char *buf;
+ static int uncopied_bytes;
+ struct acpi_table_header table;
+ acpi_status status;
+
+ if (!(*ppos)) {
+ /* parse the table header to get the table length */
+ if (count <= sizeof(struct acpi_table_header))
+ return -EINVAL;
+ if (copy_from_user(&table, user_buf,
+ sizeof(struct acpi_table_header)))
+ return -EFAULT;
+ uncopied_bytes = table.length;
+ buf = kzalloc(uncopied_bytes, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ if (uncopied_bytes < count) {
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(buf + (*ppos), user_buf, count)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ uncopied_bytes -= count;
+ *ppos += count;
+
+ if (!uncopied_bytes) {
+ status = acpi_install_method(buf);
+ kfree(buf);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ }
+
+ return count;
+}
+
+static const struct file_operations cm_fops = {
+ .write = cm_write,
+};
+
+int __init acpi_debugfs_init(void)
+{
+ struct dentry *acpi_dir, *cm_dentry;
+
+ acpi_dir = debugfs_create_dir("acpi", NULL);
+ if (!acpi_dir)
+ goto err;
+
+ cm_dentry = debugfs_create_file("custom_method", S_IWUGO,
+ acpi_dir, NULL, &cm_fops);
+ if (!cm_dentry)
+ goto err;
+
+ return 0;
+
+err:
+ if (acpi_dir)
+ debugfs_remove(acpi_dir);
+ return -EINVAL;
+}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 1fa0aafebe2..f31291ba94d 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -303,11 +303,8 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
pr_debug(PREFIX "transaction start\n");
/* disable GPE during transaction if storm is detected */
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
- /*
- * It has to be disabled at the hardware level regardless of the
- * GPE reference counting, so that it doesn't trigger.
- */
- acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
+ /* It has to be disabled, so that it doesn't trigger. */
+ acpi_disable_gpe(NULL, ec->gpe);
}
status = acpi_ec_transaction_unlocked(ec, t);
@@ -316,12 +313,8 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
ec_check_sci_sync(ec, acpi_ec_read_status(ec));
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
msleep(1);
- /*
- * It is safe to enable the GPE outside of the transaction. Use
- * acpi_set_gpe() for that, since we used it to disable the GPE
- * above.
- */
- acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
+ /* It is safe to enable the GPE outside of the transaction. */
+ acpi_enable_gpe(NULL, ec->gpe);
} else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
pr_info(PREFIX "GPE storm detected, "
"transactions will use polling mode\n");
@@ -746,7 +739,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
if (ACPI_FAILURE(status))
return -ENODEV;
- acpi_enable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
+ acpi_enable_gpe(NULL, ec->gpe);
status = acpi_install_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler,
@@ -763,7 +756,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
} else {
acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler);
- acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
+ acpi_disable_gpe(NULL, ec->gpe);
return -ENODEV;
}
}
@@ -774,7 +767,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
static void ec_remove_handlers(struct acpi_ec *ec)
{
- acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
+ acpi_disable_gpe(NULL, ec->gpe);
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
pr_err(PREFIX "failed to remove space handler\n");
@@ -1018,22 +1011,6 @@ error:
return -ENODEV;
}
-static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state)
-{
- struct acpi_ec *ec = acpi_driver_data(device);
- /* Stop using the GPE, but keep it reference counted. */
- acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
- return 0;
-}
-
-static int acpi_ec_resume(struct acpi_device *device)
-{
- struct acpi_ec *ec = acpi_driver_data(device);
- /* Enable the GPE again, but don't reference count it once more. */
- acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
- return 0;
-}
-
static struct acpi_driver acpi_ec_driver = {
.name = "ec",
.class = ACPI_EC_CLASS,
@@ -1041,8 +1018,6 @@ static struct acpi_driver acpi_ec_driver = {
.ops = {
.add = acpi_ec_add,
.remove = acpi_ec_remove,
- .suspend = acpi_ec_suspend,
- .resume = acpi_ec_resume,
},
};
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 8a3b840c0bb..d94d2953c97 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -369,7 +369,9 @@ static void __exit acpi_fan_exit(void)
acpi_bus_unregister_driver(&acpi_fan_driver);
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
+#endif
return;
}
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 4af6301601e..78b0164c35b 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -100,7 +100,8 @@ do_acpi_find_child(acpi_handle handle, u32 lvl, void *context, void **rv)
status = acpi_get_object_info(handle, &info);
if (ACPI_SUCCESS(status)) {
- if (info->address == find->address)
+ if ((info->address == find->address)
+ && (info->valid & ACPI_VALID_ADR))
find->handle = handle;
kfree(info);
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 8ae27264a00..a212bfeddf8 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -27,12 +27,12 @@
int init_acpi_device_notify(void);
int acpi_scan_init(void);
-int acpi_system_init(void);
+int acpi_sysfs_init(void);
-#ifdef CONFIG_ACPI_DEBUG
-int acpi_debug_init(void);
+#ifdef CONFIG_DEBUG_FS
+int acpi_debugfs_init(void);
#else
-static inline int acpi_debug_init(void) { return 0; }
+static inline int acpi_debugfs_init(void) { return 0; }
#endif
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index b0337d31460..5718566e00f 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -255,12 +255,10 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
static int __init acpi_parse_srat(struct acpi_table_header *table)
{
- struct acpi_table_srat *srat;
-
if (!table)
return -EINVAL;
- srat = (struct acpi_table_srat *)table;
+ /* Real work done in acpi_table_parse_srat below. */
return 0;
}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 78418ce4fc7..65b25a303b8 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -141,15 +141,14 @@ static struct osi_linux {
static void __init acpi_request_region (struct acpi_generic_address *addr,
unsigned int length, char *desc)
{
- struct resource *res;
-
if (!addr->address || !length)
return;
+ /* Resources are never freed */
if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
- res = request_region(addr->address, length, desc);
+ request_region(addr->address, length, desc);
else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- res = request_mem_region(addr->address, length, desc);
+ request_mem_region(addr->address, length, desc);
}
static int __init acpi_reserve_resources(void)
@@ -191,36 +190,11 @@ acpi_status __init acpi_os_initialize(void)
return AE_OK;
}
-static void bind_to_cpu0(struct work_struct *work)
-{
- set_cpus_allowed_ptr(current, cpumask_of(0));
- kfree(work);
-}
-
-static void bind_workqueue(struct workqueue_struct *wq)
-{
- struct work_struct *work;
-
- work = kzalloc(sizeof(struct work_struct), GFP_KERNEL);
- INIT_WORK(work, bind_to_cpu0);
- queue_work(wq, work);
-}
-
acpi_status acpi_os_initialize1(void)
{
- /*
- * On some machines, a software-initiated SMI causes corruption unless
- * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
- * typically it's done in GPE-related methods that are run via
- * workqueues, so we can avoid the known corruption cases by binding
- * the workqueues to CPU 0.
- */
- kacpid_wq = create_singlethread_workqueue("kacpid");
- bind_workqueue(kacpid_wq);
- kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
- bind_workqueue(kacpi_notify_wq);
- kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug");
- bind_workqueue(kacpi_hotplug_wq);
+ kacpid_wq = create_workqueue("kacpid");
+ kacpi_notify_wq = create_workqueue("kacpi_notify");
+ kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
@@ -766,7 +740,14 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
else
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- ret = queue_work(queue, &dpc->work);
+ /*
+ * On some machines, a software-initiated SMI causes corruption unless
+ * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
+ * typically it's done in GPE-related methods that are run via
+ * workqueues, so we can avoid the known corruption cases by always
+ * queueing on CPU 0.
+ */
+ ret = queue_work_on(0, queue, &dpc->work);
if (!ret) {
printk(KERN_ERR PREFIX
@@ -1064,26 +1045,6 @@ static int __init acpi_serialize_setup(char *str)
__setup("acpi_serialize", acpi_serialize_setup);
-/*
- * Wake and Run-Time GPES are expected to be separate.
- * We disable wake-GPEs at run-time to prevent spurious
- * interrupts.
- *
- * However, if a system exists that shares Wake and
- * Run-time events on the same GPE this flag is available
- * to tell Linux to keep the wake-time GPEs enabled at run-time.
- */
-static int __init acpi_wake_gpes_always_on_setup(char *str)
-{
- printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
-
- acpi_gbl_leave_wake_gpes_disabled = FALSE;
-
- return 1;
-}
-
-__setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
-
/* Check of resource interference between native drivers and ACPI
* OperationRegions (SystemIO and System Memory only).
* IO ports and memory declared in ACPI might be used by the ACPI subsystem
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 4eac59393ed..3ba8d1f44a7 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -225,22 +225,31 @@ static acpi_status acpi_pci_run_osc(acpi_handle handle,
return status;
}
-static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags)
+static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
+ u32 support,
+ u32 *control)
{
acpi_status status;
- u32 support_set, result, capbuf[3];
+ u32 result, capbuf[3];
+
+ support &= OSC_PCI_SUPPORT_MASKS;
+ support |= root->osc_support_set;
- /* do _OSC query for all possible controls */
- support_set = root->osc_support_set | (flags & OSC_PCI_SUPPORT_MASKS);
capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- capbuf[OSC_SUPPORT_TYPE] = support_set;
- capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
+ capbuf[OSC_SUPPORT_TYPE] = support;
+ if (control) {
+ *control &= OSC_PCI_CONTROL_MASKS;
+ capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
+ } else {
+ /* Run _OSC query for all possible controls. */
+ capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
+ }
status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
if (ACPI_SUCCESS(status)) {
- root->osc_support_set = support_set;
- root->osc_control_qry = result;
- root->osc_queried = 1;
+ root->osc_support_set = support;
+ if (control)
+ *control = result;
}
return status;
}
@@ -254,7 +263,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
if (ACPI_FAILURE(status))
return status;
mutex_lock(&osc_lock);
- status = acpi_pci_query_osc(root, flags);
+ status = acpi_pci_query_osc(root, flags, NULL);
mutex_unlock(&osc_lock);
return status;
}
@@ -364,55 +373,70 @@ out:
EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
/**
- * acpi_pci_osc_control_set - commit requested control to Firmware
- * @handle: acpi_handle for the target ACPI object
- * @flags: driver's requested control bits
+ * acpi_pci_osc_control_set - Request control of PCI root _OSC features.
+ * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
+ * @mask: Mask of _OSC bits to request control of, place to store control mask.
+ * @req: Mask of _OSC bits the control of is essential to the caller.
*
- * Attempt to take control from Firmware on requested control bits.
+ * Run _OSC query for @mask and if that is successful, compare the returned
+ * mask of control bits with @req. If all of the @req bits are set in the
+ * returned mask, run _OSC request for it.
+ *
+ * The variable at the @mask address may be modified regardless of whether or
+ * not the function returns success. On success it will contain the mask of
+ * _OSC bits the BIOS has granted control of, but its contents are meaningless
+ * on failure.
**/
-acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags)
+acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
{
+ struct acpi_pci_root *root;
acpi_status status;
- u32 control_req, result, capbuf[3];
+ u32 ctrl, capbuf[3];
acpi_handle tmp;
- struct acpi_pci_root *root;
- status = acpi_get_handle(handle, "_OSC", &tmp);
- if (ACPI_FAILURE(status))
- return status;
+ if (!mask)
+ return AE_BAD_PARAMETER;
- control_req = (flags & OSC_PCI_CONTROL_MASKS);
- if (!control_req)
+ ctrl = *mask & OSC_PCI_CONTROL_MASKS;
+ if ((ctrl & req) != req)
return AE_TYPE;
root = acpi_pci_find_root(handle);
if (!root)
return AE_NOT_EXIST;
+ status = acpi_get_handle(handle, "_OSC", &tmp);
+ if (ACPI_FAILURE(status))
+ return status;
+
mutex_lock(&osc_lock);
+
+ *mask = ctrl | root->osc_control_set;
/* No need to evaluate _OSC if the control was already granted. */
- if ((root->osc_control_set & control_req) == control_req)
+ if ((root->osc_control_set & ctrl) == ctrl)
goto out;
- /* Need to query controls first before requesting them */
- if (!root->osc_queried) {
- status = acpi_pci_query_osc(root, root->osc_support_set);
+ /* Need to check the available controls bits before requesting them. */
+ while (*mask) {
+ status = acpi_pci_query_osc(root, root->osc_support_set, mask);
if (ACPI_FAILURE(status))
goto out;
+ if (ctrl == *mask)
+ break;
+ ctrl = *mask;
}
- if ((root->osc_control_qry & control_req) != control_req) {
- printk(KERN_DEBUG
- "Firmware did not grant requested _OSC control\n");
+
+ if ((ctrl & req) != req) {
status = AE_SUPPORT;
goto out;
}
capbuf[OSC_QUERY_TYPE] = 0;
capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
- capbuf[OSC_CONTROL_TYPE] = root->osc_control_set | control_req;
- status = acpi_pci_run_osc(handle, capbuf, &result);
+ capbuf[OSC_CONTROL_TYPE] = ctrl;
+ status = acpi_pci_run_osc(handle, capbuf, mask);
if (ACPI_SUCCESS(status))
- root->osc_control_set = result;
+ root->osc_control_set = *mask;
out:
mutex_unlock(&osc_lock);
return status;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index f74d3b31e5c..844c155aeb0 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -40,8 +40,6 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include "sleep.h"
@@ -64,7 +62,6 @@ module_param_named(power_nocheck, acpi_power_nocheck, bool, 000);
static int acpi_power_add(struct acpi_device *device);
static int acpi_power_remove(struct acpi_device *device, int type);
static int acpi_power_resume(struct acpi_device *device);
-static int acpi_power_open_fs(struct inode *inode, struct file *file);
static const struct acpi_device_id power_device_ids[] = {
{ACPI_POWER_HID, 0},
@@ -99,14 +96,6 @@ struct acpi_power_resource {
static struct list_head acpi_power_resource_list;
-static const struct file_operations acpi_power_fops = {
- .owner = THIS_MODULE,
- .open = acpi_power_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
/* --------------------------------------------------------------------------
Power Resource Management
-------------------------------------------------------------------------- */
@@ -255,7 +244,6 @@ static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
struct list_head *node, *next;
struct acpi_power_reference *ref;
-
result = acpi_power_get_context(handle, &resource);
if (result)
return result;
@@ -542,102 +530,6 @@ int acpi_power_transition(struct acpi_device *device, int state)
}
/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-
-static struct proc_dir_entry *acpi_power_dir;
-
-static int acpi_power_seq_show(struct seq_file *seq, void *offset)
-{
- int count = 0;
- int result = 0, state;
- struct acpi_power_resource *resource = NULL;
- struct list_head *node, *next;
- struct acpi_power_reference *ref;
-
-
- resource = seq->private;
-
- if (!resource)
- goto end;
-
- result = acpi_power_get_state(resource->device->handle, &state);
- if (result)
- goto end;
-
- seq_puts(seq, "state: ");
- switch (state) {
- case ACPI_POWER_RESOURCE_STATE_ON:
- seq_puts(seq, "on\n");
- break;
- case ACPI_POWER_RESOURCE_STATE_OFF:
- seq_puts(seq, "off\n");
- break;
- default:
- seq_puts(seq, "unknown\n");
- break;
- }
-
- mutex_lock(&resource->resource_lock);
- list_for_each_safe(node, next, &resource->reference) {
- ref = container_of(node, struct acpi_power_reference, node);
- count++;
- }
- mutex_unlock(&resource->resource_lock);
-
- seq_printf(seq, "system level: S%d\n"
- "order: %d\n"
- "reference count: %d\n",
- resource->system_level,
- resource->order, count);
-
- end:
- return 0;
-}
-
-static int acpi_power_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_power_seq_show, PDE(inode)->data);
-}
-
-static int acpi_power_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry = NULL;
-
-
- if (!device)
- return -EINVAL;
-
- if (!acpi_device_dir(device)) {
- acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
- acpi_power_dir);
- if (!acpi_device_dir(device))
- return -ENODEV;
- }
-
- /* 'status' [R] */
- entry = proc_create_data(ACPI_POWER_FILE_STATUS,
- S_IRUGO, acpi_device_dir(device),
- &acpi_power_fops, acpi_driver_data(device));
- if (!entry)
- return -EIO;
- return 0;
-}
-
-static int acpi_power_remove_fs(struct acpi_device *device)
-{
-
- if (acpi_device_dir(device)) {
- remove_proc_entry(ACPI_POWER_FILE_STATUS,
- acpi_device_dir(device));
- remove_proc_entry(acpi_device_bid(device), acpi_power_dir);
- acpi_device_dir(device) = NULL;
- }
-
- return 0;
-}
-
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -690,10 +582,6 @@ static int acpi_power_add(struct acpi_device *device)
break;
}
- result = acpi_power_add_fs(device);
- if (result)
- goto end;
-
printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device),
acpi_device_bid(device), state ? "on" : "off");
@@ -715,8 +603,6 @@ static int acpi_power_remove(struct acpi_device *device, int type)
resource = acpi_driver_data(device);
- acpi_power_remove_fs(device);
-
mutex_lock(&resource->resource_lock);
list_for_each_safe(node, next, &resource->reference) {
struct acpi_power_reference *ref = container_of(node, struct acpi_power_reference, node);
@@ -760,19 +646,6 @@ static int acpi_power_resume(struct acpi_device *device)
int __init acpi_power_init(void)
{
- int result = 0;
-
INIT_LIST_HEAD(&acpi_power_resource_list);
-
- acpi_power_dir = proc_mkdir(ACPI_POWER_CLASS, acpi_root_dir);
- if (!acpi_power_dir)
- return -ENODEV;
-
- result = acpi_bus_register_driver(&acpi_power_driver);
- if (result < 0) {
- remove_proc_entry(ACPI_POWER_CLASS, acpi_root_dir);
- return -ENODEV;
- }
-
- return 0;
+ return acpi_bus_register_driver(&acpi_power_driver);
}
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 1ac678d2c51..afad67769db 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -17,64 +17,11 @@
/*
* this file provides support for:
- * /proc/acpi/sleep
* /proc/acpi/alarm
* /proc/acpi/wakeup
*/
ACPI_MODULE_NAME("sleep")
-#ifdef CONFIG_ACPI_PROCFS
-static int acpi_system_sleep_seq_show(struct seq_file *seq, void *offset)
-{
- int i;
-
- for (i = 0; i <= ACPI_STATE_S5; i++) {
- if (sleep_states[i]) {
- seq_printf(seq, "S%d ", i);
- }
- }
-
- seq_puts(seq, "\n");
-
- return 0;
-}
-
-static int acpi_system_sleep_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_system_sleep_seq_show, PDE(inode)->data);
-}
-
-static ssize_t
-acpi_system_write_sleep(struct file *file,
- const char __user * buffer, size_t count, loff_t * ppos)
-{
- char str[12];
- u32 state = 0;
- int error = 0;
-
- if (count > sizeof(str) - 1)
- goto Done;
- memset(str, 0, sizeof(str));
- if (copy_from_user(str, buffer, count))
- return -EFAULT;
-
- /* Check for S4 bios request */
- if (!strcmp(str, "4b")) {
- error = acpi_suspend(4);
- goto Done;
- }
- state = simple_strtoul(str, NULL, 0);
-#ifdef CONFIG_HIBERNATION
- if (state == 4) {
- error = hibernate();
- goto Done;
- }
-#endif
- error = acpi_suspend(state);
- Done:
- return error ? error : count;
-}
-#endif /* CONFIG_ACPI_PROCFS */
#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86)
/* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */
@@ -463,17 +410,6 @@ static const struct file_operations acpi_system_wakeup_device_fops = {
.release = single_release,
};
-#ifdef CONFIG_ACPI_PROCFS
-static const struct file_operations acpi_system_sleep_fops = {
- .owner = THIS_MODULE,
- .open = acpi_system_sleep_open_fs,
- .read = seq_read,
- .write = acpi_system_write_sleep,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif /* CONFIG_ACPI_PROCFS */
-
#ifdef HAVE_ACPI_LEGACY_ALARM
static const struct file_operations acpi_system_alarm_fops = {
.owner = THIS_MODULE,
@@ -495,12 +431,6 @@ static u32 rtc_handler(void *context)
int __init acpi_sleep_proc_init(void)
{
-#ifdef CONFIG_ACPI_PROCFS
- /* 'sleep' [R/W] */
- proc_create("sleep", S_IFREG | S_IRUGO | S_IWUSR,
- acpi_root_dir, &acpi_system_sleep_fops);
-#endif /* CONFIG_ACPI_PROCFS */
-
#ifdef HAVE_ACPI_LEGACY_ALARM
/* 'alarm' [R/W] */
proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index e9699aaed10..b618f888d66 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -29,12 +29,6 @@ static int set_no_mwait(const struct dmi_system_id *id)
static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
{
- set_no_mwait, "IFL91 board", {
- DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
- DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
- DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
- {
set_no_mwait, "Extensa 5220", {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 38ea0cc6dc4..347eb21b235 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -83,9 +83,6 @@ MODULE_LICENSE("GPL");
static int acpi_processor_add(struct acpi_device *device);
static int acpi_processor_remove(struct acpi_device *device, int type);
-#ifdef CONFIG_ACPI_PROCFS
-static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
-#endif
static void acpi_processor_notify(struct acpi_device *device, u32 event);
static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
static int acpi_processor_handle_eject(struct acpi_processor *pr);
@@ -113,15 +110,6 @@ static struct acpi_driver acpi_processor_driver = {
#define INSTALL_NOTIFY_HANDLER 1
#define UNINSTALL_NOTIFY_HANDLER 2
-#ifdef CONFIG_ACPI_PROCFS
-static const struct file_operations acpi_processor_info_fops = {
- .owner = THIS_MODULE,
- .open = acpi_processor_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif
DEFINE_PER_CPU(struct acpi_processor *, processors);
EXPORT_PER_CPU_SYMBOL(processors);
@@ -256,44 +244,8 @@ static int acpi_processor_errata(struct acpi_processor *pr)
return result;
}
-/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS
static struct proc_dir_entry *acpi_processor_dir = NULL;
-static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_processor *pr = seq->private;
-
-
- if (!pr)
- goto end;
-
- seq_printf(seq, "processor id: %d\n"
- "acpi id: %d\n"
- "bus mastering control: %s\n"
- "power management: %s\n"
- "throttling control: %s\n"
- "limit interface: %s\n",
- pr->id,
- pr->acpi_id,
- pr->flags.bm_control ? "yes" : "no",
- pr->flags.power ? "yes" : "no",
- pr->flags.throttling ? "yes" : "no",
- pr->flags.limit ? "yes" : "no");
-
- end:
- return 0;
-}
-
-static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_processor_info_seq_show,
- PDE(inode)->data);
-}
-
static int __cpuinit acpi_processor_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *entry = NULL;
@@ -306,14 +258,6 @@ static int __cpuinit acpi_processor_add_fs(struct acpi_device *device)
return -ENODEV;
}
- /* 'info' [R] */
- entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
- S_IRUGO, acpi_device_dir(device),
- &acpi_processor_info_fops,
- acpi_driver_data(device));
- if (!entry)
- return -EIO;
-
/* 'throttling' [R/W] */
entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
S_IFREG | S_IRUGO | S_IWUSR,
@@ -322,43 +266,20 @@ static int __cpuinit acpi_processor_add_fs(struct acpi_device *device)
acpi_driver_data(device));
if (!entry)
return -EIO;
-
- /* 'limit' [R/W] */
- entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
- S_IFREG | S_IRUGO | S_IWUSR,
- acpi_device_dir(device),
- &acpi_processor_limit_fops,
- acpi_driver_data(device));
- if (!entry)
- return -EIO;
return 0;
}
static int acpi_processor_remove_fs(struct acpi_device *device)
{
if (acpi_device_dir(device)) {
- remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
- acpi_device_dir(device));
remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
acpi_device_dir(device));
- remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
- acpi_device_dir(device));
remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
acpi_device_dir(device) = NULL;
}
return 0;
}
-#else
-static inline int acpi_processor_add_fs(struct acpi_device *device)
-{
- return 0;
-}
-static inline int acpi_processor_remove_fs(struct acpi_device *device)
-{
- return 0;
-}
-#endif
/* --------------------------------------------------------------------------
Driver Interface
@@ -921,17 +842,15 @@ static int __init acpi_processor_init(void)
memset(&errata, 0, sizeof(errata));
-#ifdef CONFIG_ACPI_PROCFS
acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
if (!acpi_processor_dir)
return -ENOMEM;
-#endif
if (!cpuidle_register_driver(&acpi_idle_driver)) {
printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
acpi_idle_driver.name);
} else {
- printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s",
+ printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
cpuidle_get_driver()->name);
}
@@ -952,9 +871,7 @@ static int __init acpi_processor_init(void)
out_cpuidle:
cpuidle_unregister_driver(&acpi_idle_driver);
-#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
-#endif
return result;
}
@@ -974,9 +891,7 @@ static void __exit acpi_processor_exit(void)
cpuidle_unregister_driver(&acpi_idle_driver);
-#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
-#endif
return;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e9a8026d39f..f4428e82b35 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -33,8 +33,6 @@
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/moduleparam.h>
@@ -82,13 +80,6 @@ module_param(bm_check_disable, uint, 0000);
static unsigned int latency_factor __read_mostly = 2;
module_param(latency_factor, uint, 0644);
-#ifdef CONFIG_ACPI_PROCFS
-static u64 us_to_pm_timer_ticks(s64 t)
-{
- return div64_u64(t * PM_TIMER_FREQUENCY, 1000000);
-}
-#endif
-
/*
* IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
* For now disable this. Probably a bug somewhere else.
@@ -164,7 +155,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
return;
- if (boot_cpu_has(X86_FEATURE_AMDC1E))
+ if (c1e_detected)
type = ACPI_STATE_C1;
/*
@@ -264,7 +255,7 @@ int acpi_processor_resume(struct acpi_device * device)
return 0;
}
-#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
+#if defined(CONFIG_X86)
static void tsc_check_state(int state)
{
switch (boot_cpu_data.x86_vendor) {
@@ -689,78 +680,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
return 0;
}
-#ifdef CONFIG_ACPI_PROCFS
-static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_processor *pr = seq->private;
- unsigned int i;
-
-
- if (!pr)
- goto end;
-
- seq_printf(seq, "active state: C%zd\n"
- "max_cstate: C%d\n"
- "maximum allowed latency: %d usec\n",
- pr->power.state ? pr->power.state - pr->power.states : 0,
- max_cstate, pm_qos_request(PM_QOS_CPU_DMA_LATENCY));
-
- seq_puts(seq, "states:\n");
-
- for (i = 1; i <= pr->power.count; i++) {
- seq_printf(seq, " %cC%d: ",
- (&pr->power.states[i] ==
- pr->power.state ? '*' : ' '), i);
-
- if (!pr->power.states[i].valid) {
- seq_puts(seq, "<not supported>\n");
- continue;
- }
-
- switch (pr->power.states[i].type) {
- case ACPI_STATE_C1:
- seq_printf(seq, "type[C1] ");
- break;
- case ACPI_STATE_C2:
- seq_printf(seq, "type[C2] ");
- break;
- case ACPI_STATE_C3:
- seq_printf(seq, "type[C3] ");
- break;
- default:
- seq_printf(seq, "type[--] ");
- break;
- }
-
- seq_puts(seq, "promotion[--] ");
-
- seq_puts(seq, "demotion[--] ");
-
- seq_printf(seq, "latency[%03d] usage[%08d] duration[%020Lu]\n",
- pr->power.states[i].latency,
- pr->power.states[i].usage,
- us_to_pm_timer_ticks(pr->power.states[i].time));
- }
-
- end:
- return 0;
-}
-
-static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_processor_power_seq_show,
- PDE(inode)->data);
-}
-
-static const struct file_operations acpi_processor_power_fops = {
- .owner = THIS_MODULE,
- .open = acpi_processor_power_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif
-
/**
* acpi_idle_bm_check - checks if bus master activity was detected
*/
@@ -803,13 +722,12 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
} else if (cx->entry_method == ACPI_CSTATE_HALT) {
acpi_safe_halt();
} else {
- int unused;
/* IO port based C-state */
inb(cx->address);
/* Dummy wait op - must do something useless after P_LVL2 read
because chipsets cannot guarantee that STPCLK# signal
gets asserted in time to freeze execution properly. */
- unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ inl(acpi_gbl_FADT.xpm_timer_block.address);
}
start_critical_timings();
}
@@ -1172,9 +1090,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
{
acpi_status status = 0;
static int first_run;
-#ifdef CONFIG_ACPI_PROCFS
- struct proc_dir_entry *entry = NULL;
-#endif
if (boot_option_idle_override)
return 0;
@@ -1223,15 +1138,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
if (cpuidle_register_device(&pr->power.dev))
return -EIO;
}
-#ifdef CONFIG_ACPI_PROCFS
- /* 'power' [R] */
- entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
- S_IRUGO, acpi_device_dir(device),
- &acpi_processor_power_fops,
- acpi_driver_data(device));
- if (!entry)
- return -EIO;
-#endif
return 0;
}
@@ -1244,11 +1150,5 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
cpuidle_unregister_device(&pr->power.dev);
pr->flags.power_setup_done = 0;
-#ifdef CONFIG_ACPI_PROCFS
- if (acpi_device_dir(device))
- remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
- acpi_device_dir(device));
-#endif
-
return 0;
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index ba1bd263d90..3a73a93596e 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -447,8 +447,8 @@ int acpi_processor_notify_smm(struct module *calling_module)
if (!try_module_get(calling_module))
return -EINVAL;
- /* is_done is set to negative if an error occured,
- * and to postitive if _no_ error occured, but SMM
+ /* is_done is set to negative if an error occurred,
+ * and to postitive if _no_ error occurred, but SMM
* was already notified. This avoids double notification
* which might lead to unexpected results...
*/
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 6deafb4aa0d..953b25fb986 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -30,8 +30,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <linux/sysdev.h>
#include <asm/uaccess.h>
@@ -438,84 +436,3 @@ struct thermal_cooling_device_ops processor_cooling_ops = {
.get_cur_state = processor_get_cur_state,
.set_cur_state = processor_set_cur_state,
};
-
-/* /proc interface */
-#ifdef CONFIG_ACPI_PROCFS
-static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_processor *pr = seq->private;
-
- if (!pr)
- goto end;
-
- if (!pr->flags.limit) {
- seq_puts(seq, "<not supported>\n");
- goto end;
- }
-
- seq_printf(seq, "active limit: P%d:T%d\n"
- "user limit: P%d:T%d\n"
- "thermal limit: P%d:T%d\n",
- pr->limit.state.px, pr->limit.state.tx,
- pr->limit.user.px, pr->limit.user.tx,
- pr->limit.thermal.px, pr->limit.thermal.tx);
-
- end:
- return 0;
-}
-
-static int acpi_processor_limit_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_processor_limit_seq_show,
- PDE(inode)->data);
-}
-
-static ssize_t acpi_processor_write_limit(struct file * file,
- const char __user * buffer,
- size_t count, loff_t * data)
-{
- int result = 0;
- struct seq_file *m = file->private_data;
- struct acpi_processor *pr = m->private;
- char limit_string[25] = { '\0' };
- int px = 0;
- int tx = 0;
-
-
- if (!pr || (count > sizeof(limit_string) - 1)) {
- return -EINVAL;
- }
-
- if (copy_from_user(limit_string, buffer, count)) {
- return -EFAULT;
- }
-
- limit_string[count] = '\0';
-
- if (sscanf(limit_string, "%d:%d", &px, &tx) != 2) {
- printk(KERN_ERR PREFIX "Invalid data format\n");
- return -EINVAL;
- }
-
- if (pr->flags.throttling) {
- if ((tx < 0) || (tx > (pr->throttling.state_count - 1))) {
- printk(KERN_ERR PREFIX "Invalid tx\n");
- return -EINVAL;
- }
- pr->limit.user.tx = tx;
- }
-
- result = acpi_processor_apply_limit(pr);
-
- return count;
-}
-
-const struct file_operations acpi_processor_limit_fops = {
- .owner = THIS_MODULE,
- .open = acpi_processor_limit_open_fs,
- .read = seq_read,
- .write = acpi_processor_write_limit,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 9ade1a5b32e..730863855ed 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -1215,7 +1215,6 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
}
/* proc interface */
-#ifdef CONFIG_ACPI_PROCFS
static int acpi_processor_throttling_seq_show(struct seq_file *seq,
void *offset)
{
@@ -1323,4 +1322,3 @@ const struct file_operations acpi_processor_throttling_fops = {
.llseek = seq_lseek,
.release = single_release,
};
-#endif
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 7f2e051ed4f..b23825ecfa3 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -740,6 +740,8 @@ acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device,
device->wakeup.resources.handles[i] = element->reference.handle;
}
+ acpi_gpe_can_wake(device->wakeup.gpe_device, device->wakeup.gpe_number);
+
return AE_OK;
}
@@ -764,8 +766,9 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
return;
}
- status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number,
- &event_status);
+ status = acpi_get_gpe_status(device->wakeup.gpe_device,
+ device->wakeup.gpe_number,
+ &event_status);
if (status == AE_OK)
device->wakeup.flags.run_wake =
!!(event_status & ACPI_EVENT_FLAG_HANDLE);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 2862c781b37..4754ff6e70e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -70,10 +70,10 @@ static int acpi_sleep_prepare(u32 acpi_state)
}
ACPI_FLUSH_CPU_CACHE();
- acpi_enable_wakeup_device_prep(acpi_state);
#endif
printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n",
acpi_state);
+ acpi_enable_wakeup_devices(acpi_state);
acpi_enter_sleep_state_prep(acpi_state);
return 0;
}
@@ -119,6 +119,16 @@ static int acpi_pm_freeze(void)
}
/**
+ * acpi_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
+ */
+static int acpi_pm_pre_suspend(void)
+{
+ acpi_pm_freeze();
+ suspend_nvs_save();
+ return 0;
+}
+
+/**
* __acpi_pm_prepare - Prepare the platform to enter the target state.
*
* If necessary, set the firmware waking vector and do arch-specific
@@ -127,11 +137,9 @@ static int acpi_pm_freeze(void)
static int __acpi_pm_prepare(void)
{
int error = acpi_sleep_prepare(acpi_target_sleep_state);
-
- suspend_nvs_save();
-
if (error)
acpi_target_sleep_state = ACPI_STATE_S0;
+
return error;
}
@@ -142,9 +150,8 @@ static int __acpi_pm_prepare(void)
static int acpi_pm_prepare(void)
{
int error = __acpi_pm_prepare();
-
if (!error)
- acpi_pm_freeze();
+ acpi_pm_pre_suspend();
return error;
}
@@ -159,7 +166,6 @@ static void acpi_pm_finish(void)
{
u32 acpi_state = acpi_target_sleep_state;
- suspend_nvs_free();
acpi_ec_unblock_transactions();
if (acpi_state == ACPI_STATE_S0)
@@ -167,7 +173,7 @@ static void acpi_pm_finish(void)
printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n",
acpi_state);
- acpi_disable_wakeup_device(acpi_state);
+ acpi_disable_wakeup_devices(acpi_state);
acpi_leave_sleep_state(acpi_state);
/* reset firmware waking vector */
@@ -181,6 +187,7 @@ static void acpi_pm_finish(void)
*/
static void acpi_pm_end(void)
{
+ suspend_nvs_free();
/*
* This is necessary in case acpi_pm_finish() is not called during a
* failing transition to a sleep state.
@@ -251,7 +258,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
}
local_irq_save(flags);
- acpi_enable_wakeup_device(acpi_state);
switch (acpi_state) {
case ACPI_STATE_S1:
barrier();
@@ -297,11 +303,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
-static void acpi_suspend_finish(void)
-{
- acpi_pm_finish();
-}
-
static int acpi_suspend_state_valid(suspend_state_t pm_state)
{
u32 acpi_state;
@@ -323,7 +324,7 @@ static struct platform_suspend_ops acpi_suspend_ops = {
.begin = acpi_suspend_begin,
.prepare_late = acpi_pm_prepare,
.enter = acpi_suspend_enter,
- .wake = acpi_suspend_finish,
+ .wake = acpi_pm_finish,
.end = acpi_pm_end,
};
@@ -336,9 +337,9 @@ static struct platform_suspend_ops acpi_suspend_ops = {
static int acpi_suspend_begin_old(suspend_state_t pm_state)
{
int error = acpi_suspend_begin(pm_state);
-
if (!error)
error = __acpi_pm_prepare();
+
return error;
}
@@ -349,9 +350,9 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
static struct platform_suspend_ops acpi_suspend_ops_old = {
.valid = acpi_suspend_state_valid,
.begin = acpi_suspend_begin_old,
- .prepare_late = acpi_pm_freeze,
+ .prepare_late = acpi_pm_pre_suspend,
.enter = acpi_suspend_enter,
- .wake = acpi_suspend_finish,
+ .wake = acpi_pm_finish,
.end = acpi_pm_end,
.recover = acpi_pm_finish,
};
@@ -362,6 +363,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
return 0;
}
+static int __init init_nvs_nosave(const struct dmi_system_id *d)
+{
+ acpi_nvs_nosave();
+ return 0;
+}
+
static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
{
.callback = init_old_suspend_ordering,
@@ -396,6 +403,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
},
},
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-SR11M",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Everex StepNote Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
@@ -423,16 +446,6 @@ static int acpi_hibernation_begin(void)
return error;
}
-static int acpi_hibernation_pre_snapshot(void)
-{
- int error = acpi_pm_prepare();
-
- if (!error)
- suspend_nvs_save();
-
- return error;
-}
-
static int acpi_hibernation_enter(void)
{
acpi_status status = AE_OK;
@@ -441,7 +454,6 @@ static int acpi_hibernation_enter(void)
ACPI_FLUSH_CPU_CACHE();
local_irq_save(flags);
- acpi_enable_wakeup_device(ACPI_STATE_S4);
/* This shouldn't return. If it returns, we have a problem */
status = acpi_enter_sleep_state(ACPI_STATE_S4);
/* Reprogram control registers and execute _BFS */
@@ -481,7 +493,7 @@ static void acpi_pm_thaw(void)
static struct platform_hibernation_ops acpi_hibernation_ops = {
.begin = acpi_hibernation_begin,
.end = acpi_pm_end,
- .pre_snapshot = acpi_hibernation_pre_snapshot,
+ .pre_snapshot = acpi_pm_prepare,
.finish = acpi_pm_finish,
.prepare = acpi_pm_prepare,
.enter = acpi_hibernation_enter,
@@ -517,13 +529,6 @@ static int acpi_hibernation_begin_old(void)
return error;
}
-static int acpi_hibernation_pre_snapshot_old(void)
-{
- acpi_pm_freeze();
- suspend_nvs_save();
- return 0;
-}
-
/*
* The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
* been requested.
@@ -531,7 +536,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
static struct platform_hibernation_ops acpi_hibernation_ops_old = {
.begin = acpi_hibernation_begin_old,
.end = acpi_pm_end,
- .pre_snapshot = acpi_hibernation_pre_snapshot_old,
+ .pre_snapshot = acpi_pm_pre_suspend,
.prepare = acpi_pm_freeze,
.finish = acpi_pm_finish,
.enter = acpi_hibernation_enter,
@@ -663,18 +668,9 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
return -ENODEV;
}
- if (enable) {
- error = acpi_enable_wakeup_device_power(adev,
- acpi_target_sleep_state);
- if (!error)
- acpi_enable_gpe(adev->wakeup.gpe_device,
- adev->wakeup.gpe_number,
- ACPI_GPE_TYPE_WAKE);
- } else {
- acpi_disable_gpe(adev->wakeup.gpe_device, adev->wakeup.gpe_number,
- ACPI_GPE_TYPE_WAKE);
- error = acpi_disable_wakeup_device_power(adev);
- }
+ error = enable ?
+ acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
+ acpi_disable_wakeup_device_power(adev);
if (!error)
dev_info(dev, "wake-up capability %s by ACPI\n",
enable ? "enabled" : "disabled");
@@ -695,7 +691,6 @@ static void acpi_power_off(void)
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
printk(KERN_DEBUG "%s called\n", __func__);
local_irq_disable();
- acpi_enable_wakeup_device(ACPI_STATE_S5);
acpi_enter_sleep_state(ACPI_STATE_S5);
}
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 25b8bd14928..d8821805c3b 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -2,9 +2,8 @@
extern u8 sleep_states[];
extern int acpi_suspend(u32 state);
-extern void acpi_enable_wakeup_device_prep(u8 sleep_state);
-extern void acpi_enable_wakeup_device(u8 sleep_state);
-extern void acpi_disable_wakeup_device(u8 sleep_state);
+extern void acpi_enable_wakeup_devices(u8 sleep_state);
+extern void acpi_disable_wakeup_devices(u8 sleep_state);
extern struct list_head acpi_wakeup_device_list;
extern struct mutex acpi_device_lock;
diff --git a/drivers/acpi/system.c b/drivers/acpi/sysfs.c
index f8db50a0941..f8588f81048 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/sysfs.c
@@ -1,51 +1,226 @@
/*
- * acpi_system.c - ACPI System Driver ($Revision: 63 $)
- *
- * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
- * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * sysfs.c - ACPI sysfs interface to userspace.
*/
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/string.h>
-#include <asm/uaccess.h>
-
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
#include <acpi/acpi_drivers.h>
-#define PREFIX "ACPI: "
-
#define _COMPONENT ACPI_SYSTEM_COMPONENT
-ACPI_MODULE_NAME("system");
-
-#define ACPI_SYSTEM_CLASS "system"
-#define ACPI_SYSTEM_DEVICE_NAME "System"
+ACPI_MODULE_NAME("sysfs");
-u32 acpi_irq_handled;
-u32 acpi_irq_not_handled;
+#define PREFIX "ACPI: "
+#ifdef CONFIG_ACPI_DEBUG
/*
- * Make ACPICA version work as module param
+ * ACPI debug sysfs I/F, including:
+ * /sys/modules/acpi/parameters/debug_layer
+ * /sys/modules/acpi/parameters/debug_level
+ * /sys/modules/acpi/parameters/trace_method_name
+ * /sys/modules/acpi/parameters/trace_state
+ * /sys/modules/acpi/parameters/trace_debug_layer
+ * /sys/modules/acpi/parameters/trace_debug_level
*/
+
+struct acpi_dlayer {
+ const char *name;
+ unsigned long value;
+};
+struct acpi_dlevel {
+ const char *name;
+ unsigned long value;
+};
+#define ACPI_DEBUG_INIT(v) { .name = #v, .value = v }
+
+static const struct acpi_dlayer acpi_debug_layers[] = {
+ ACPI_DEBUG_INIT(ACPI_UTILITIES),
+ ACPI_DEBUG_INIT(ACPI_HARDWARE),
+ ACPI_DEBUG_INIT(ACPI_EVENTS),
+ ACPI_DEBUG_INIT(ACPI_TABLES),
+ ACPI_DEBUG_INIT(ACPI_NAMESPACE),
+ ACPI_DEBUG_INIT(ACPI_PARSER),
+ ACPI_DEBUG_INIT(ACPI_DISPATCHER),
+ ACPI_DEBUG_INIT(ACPI_EXECUTER),
+ ACPI_DEBUG_INIT(ACPI_RESOURCES),
+ ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
+ ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
+ ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
+ ACPI_DEBUG_INIT(ACPI_COMPILER),
+ ACPI_DEBUG_INIT(ACPI_TOOLS),
+
+ ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_AC_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT),
+ ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT),
+};
+
+static const struct acpi_dlevel acpi_debug_levels[] = {
+ ACPI_DEBUG_INIT(ACPI_LV_INIT),
+ ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
+ ACPI_DEBUG_INIT(ACPI_LV_INFO),
+
+ ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
+ ACPI_DEBUG_INIT(ACPI_LV_PARSE),
+ ACPI_DEBUG_INIT(ACPI_LV_LOAD),
+ ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
+ ACPI_DEBUG_INIT(ACPI_LV_EXEC),
+ ACPI_DEBUG_INIT(ACPI_LV_NAMES),
+ ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
+ ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
+ ACPI_DEBUG_INIT(ACPI_LV_TABLES),
+ ACPI_DEBUG_INIT(ACPI_LV_VALUES),
+ ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
+ ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
+ ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
+ ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
+
+ ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
+ ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
+ ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
+
+ ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
+ ACPI_DEBUG_INIT(ACPI_LV_THREADS),
+ ACPI_DEBUG_INIT(ACPI_LV_IO),
+ ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
+
+ ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
+ ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
+ ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
+ ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
+};
+
+static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
+{
+ int result = 0;
+ int i;
+
+ result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
+
+ for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
+ result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
+ acpi_debug_layers[i].name,
+ acpi_debug_layers[i].value,
+ (acpi_dbg_layer & acpi_debug_layers[i].value)
+ ? '*' : ' ');
+ }
+ result +=
+ sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
+ ACPI_ALL_DRIVERS,
+ (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
+ ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
+ == 0 ? ' ' : '-');
+ result +=
+ sprintf(buffer + result,
+ "--\ndebug_layer = 0x%08X ( * = enabled)\n",
+ acpi_dbg_layer);
+
+ return result;
+}
+
+static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
+{
+ int result = 0;
+ int i;
+
+ result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
+
+ for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
+ result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
+ acpi_debug_levels[i].name,
+ acpi_debug_levels[i].value,
+ (acpi_dbg_level & acpi_debug_levels[i].value)
+ ? '*' : ' ');
+ }
+ result +=
+ sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
+ acpi_dbg_level);
+
+ return result;
+}
+
+static struct kernel_param_ops param_ops_debug_layer = {
+ .set = param_set_uint,
+ .get = param_get_debug_layer,
+};
+
+static struct kernel_param_ops param_ops_debug_level = {
+ .set = param_set_uint,
+ .get = param_get_debug_level,
+};
+
+module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
+module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
+
+static char trace_method_name[6];
+module_param_string(trace_method_name, trace_method_name, 6, 0644);
+static unsigned int trace_debug_layer;
+module_param(trace_debug_layer, uint, 0644);
+static unsigned int trace_debug_level;
+module_param(trace_debug_level, uint, 0644);
+
+static int param_set_trace_state(const char *val, struct kernel_param *kp)
+{
+ int result = 0;
+
+ if (!strncmp(val, "enable", strlen("enable") - 1)) {
+ result = acpi_debug_trace(trace_method_name, trace_debug_level,
+ trace_debug_layer, 0);
+ if (result)
+ result = -EBUSY;
+ goto exit;
+ }
+
+ if (!strncmp(val, "disable", strlen("disable") - 1)) {
+ int name = 0;
+ result = acpi_debug_trace((char *)&name, trace_debug_level,
+ trace_debug_layer, 0);
+ if (result)
+ result = -EBUSY;
+ goto exit;
+ }
+
+ if (!strncmp(val, "1", 1)) {
+ result = acpi_debug_trace(trace_method_name, trace_debug_level,
+ trace_debug_layer, 1);
+ if (result)
+ result = -EBUSY;
+ goto exit;
+ }
+
+ result = -EINVAL;
+exit:
+ return result;
+}
+
+static int param_get_trace_state(char *buffer, struct kernel_param *kp)
+{
+ if (!acpi_gbl_trace_method_name)
+ return sprintf(buffer, "disable");
+ else {
+ if (acpi_gbl_trace_flags & 1)
+ return sprintf(buffer, "1");
+ else
+ return sprintf(buffer, "enable");
+ }
+ return 0;
+}
+
+module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
+ NULL, 0644);
+#endif /* CONFIG_ACPI_DEBUG */
+
+/* /sys/module/acpi/parameters/acpica_version */
static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
{
int result;
@@ -57,9 +232,12 @@ static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
-/* --------------------------------------------------------------------------
- FS Interface (/sys)
- -------------------------------------------------------------------------- */
+/*
+ * ACPI table sysfs I/F:
+ * /sys/firmware/acpi/tables/
+ * /sys/firmware/acpi/tables/dynamic/
+ */
+
static LIST_HEAD(acpi_table_attr_list);
static struct kobject *tables_kobj;
static struct kobject *dynamic_tables_kobj;
@@ -86,14 +264,12 @@ static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
else
memcpy(name, "\0\0\0\0", 4);
- status =
- acpi_get_table(name, table_attr->instance,
- &table_header);
+ status = acpi_get_table(name, table_attr->instance, &table_header);
if (ACPI_FAILURE(status))
return -ENODEV;
return memory_read_from_buffer(buf, count, &offset,
- table_header, table_header->length);
+ table_header, table_header->length);
}
static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
@@ -105,7 +281,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
sysfs_attr_init(&table_attr->attr.attr);
if (table_header->signature[0] != '\0')
memcpy(table_attr->name, table_header->signature,
- ACPI_NAME_SIZE);
+ ACPI_NAME_SIZE);
else
memcpy(table_attr->name, "NULL", 4);
@@ -117,8 +293,8 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
table_attr->instance++;
if (table_attr->instance > 1 || (table_attr->instance == 1 &&
- !acpi_get_table
- (table_header->signature, 2, &header)))
+ !acpi_get_table
+ (table_header->signature, 2, &header)))
sprintf(table_attr->name + ACPI_NAME_SIZE, "%d",
table_attr->instance);
@@ -138,18 +314,17 @@ acpi_sysfs_table_handler(u32 event, void *table, void *context)
switch (event) {
case ACPI_TABLE_EVENT_LOAD:
table_attr =
- kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
+ kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
if (!table_attr)
return AE_NO_MEMORY;
acpi_table_attr_init(table_attr, table);
if (sysfs_create_bin_file(dynamic_tables_kobj,
- &table_attr->attr)) {
+ &table_attr->attr)) {
kfree(table_attr);
return AE_ERROR;
} else
- list_add_tail(&table_attr->node,
- &acpi_table_attr_list);
+ list_add_tail(&table_attr->node, &acpi_table_attr_list);
break;
case ACPI_TABLE_EVENT_UNLOAD:
/*
@@ -164,7 +339,7 @@ acpi_sysfs_table_handler(u32 event, void *table, void *context)
return AE_OK;
}
-static int acpi_system_sysfs_init(void)
+static int acpi_tables_sysfs_init(void)
{
struct acpi_table_attr *table_attr;
struct acpi_table_header *table_header = NULL;
@@ -213,14 +388,17 @@ err:
}
/*
- * Detailed ACPI IRQ counters in /sys/firmware/acpi/interrupts/
- * See Documentation/ABI/testing/sysfs-firmware-acpi
+ * Detailed ACPI IRQ counters:
+ * /sys/firmware/acpi/interrupts/
*/
+u32 acpi_irq_handled;
+u32 acpi_irq_not_handled;
+
#define COUNT_GPE 0
-#define COUNT_SCI 1 /* acpi_irq_handled */
-#define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */
-#define COUNT_ERROR 3 /* other */
+#define COUNT_SCI 1 /* acpi_irq_handled */
+#define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */
+#define COUNT_ERROR 3 /* other */
#define NUM_COUNTERS_EXTRA 4
struct event_counter {
@@ -237,6 +415,7 @@ static u32 acpi_gpe_count;
static struct attribute_group interrupt_stats_attr_group = {
.name = "interrupts",
};
+
static struct kobj_attribute *counter_attrs;
static void delete_gpe_attr_array(void)
@@ -269,8 +448,8 @@ void acpi_os_gpe_count(u32 gpe_number)
if (gpe_number < num_gpes)
all_counters[gpe_number].count++;
else
- all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR].
- count++;
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
+ COUNT_ERROR].count++;
return;
}
@@ -283,13 +462,14 @@ void acpi_os_fixed_event_count(u32 event_number)
if (event_number < ACPI_NUM_FIXED_EVENTS)
all_counters[num_gpes + event_number].count++;
else
- all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR].
- count++;
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
+ COUNT_ERROR].count++;
return;
}
-static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle)
+static int get_status(u32 index, acpi_event_status *status,
+ acpi_handle *handle)
{
int result = 0;
@@ -300,7 +480,7 @@ static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle)
result = acpi_get_gpe_device(index, handle);
if (result) {
ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
- "Invalid GPE 0x%x\n", index));
+ "Invalid GPE 0x%x\n", index));
goto end;
}
result = acpi_get_gpe_status(*handle, index, status);
@@ -312,7 +492,7 @@ end:
}
static ssize_t counter_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
int index = attr - counter_attrs;
int size;
@@ -321,12 +501,11 @@ static ssize_t counter_show(struct kobject *kobj,
int result = 0;
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
- acpi_irq_handled;
+ acpi_irq_handled;
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
- acpi_irq_not_handled;
+ acpi_irq_not_handled;
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
- acpi_gpe_count;
-
+ acpi_gpe_count;
size = sprintf(buf, "%8d", all_counters[index].count);
/* "gpe_all" or "sci" */
@@ -338,13 +517,13 @@ static ssize_t counter_show(struct kobject *kobj,
goto end;
if (!(status & ACPI_EVENT_FLAG_HANDLE))
- size += sprintf(buf + size, " invalid");
+ size += sprintf(buf + size, " invalid");
else if (status & ACPI_EVENT_FLAG_ENABLED)
- size += sprintf(buf + size, " enabled");
+ size += sprintf(buf + size, " enabled");
else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
- size += sprintf(buf + size, " wake_enabled");
+ size += sprintf(buf + size, " wake_enabled");
else
- size += sprintf(buf + size, " disabled");
+ size += sprintf(buf + size, " disabled");
end:
size += sprintf(buf + size, "\n");
@@ -357,7 +536,8 @@ end:
* enable/disable/clear a gpe/fixed event in user space.
*/
static ssize_t counter_set(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t size)
+ struct kobj_attribute *attr, const char *buf,
+ size_t size)
{
int index = attr - counter_attrs;
acpi_event_status status;
@@ -381,34 +561,32 @@ static ssize_t counter_set(struct kobject *kobj,
if (!(status & ACPI_EVENT_FLAG_HANDLE)) {
printk(KERN_WARNING PREFIX
- "Can not change Invalid GPE/Fixed Event status\n");
+ "Can not change Invalid GPE/Fixed Event status\n");
return -EINVAL;
}
if (index < num_gpes) {
if (!strcmp(buf, "disable\n") &&
- (status & ACPI_EVENT_FLAG_ENABLED))
- result = acpi_disable_gpe(handle, index,
- ACPI_GPE_TYPE_RUNTIME);
+ (status & ACPI_EVENT_FLAG_ENABLED))
+ result = acpi_disable_gpe(handle, index);
else if (!strcmp(buf, "enable\n") &&
- !(status & ACPI_EVENT_FLAG_ENABLED))
- result = acpi_enable_gpe(handle, index,
- ACPI_GPE_TYPE_RUNTIME);
+ !(status & ACPI_EVENT_FLAG_ENABLED))
+ result = acpi_enable_gpe(handle, index);
else if (!strcmp(buf, "clear\n") &&
- (status & ACPI_EVENT_FLAG_SET))
+ (status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_gpe(handle, index);
else
all_counters[index].count = strtoul(buf, NULL, 0);
} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
int event = index - num_gpes;
if (!strcmp(buf, "disable\n") &&
- (status & ACPI_EVENT_FLAG_ENABLED))
+ (status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_disable_event(event, ACPI_NOT_ISR);
else if (!strcmp(buf, "enable\n") &&
- !(status & ACPI_EVENT_FLAG_ENABLED))
+ !(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_enable_event(event, ACPI_NOT_ISR);
else if (!strcmp(buf, "clear\n") &&
- (status & ACPI_EVENT_FLAG_SET))
+ (status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_event(event);
else
all_counters[index].count = strtoul(buf, NULL, 0);
@@ -432,17 +610,17 @@ void acpi_irq_stats_init(void)
num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1),
- GFP_KERNEL);
+ GFP_KERNEL);
if (all_attrs == NULL)
return;
all_counters = kzalloc(sizeof(struct event_counter) * (num_counters),
- GFP_KERNEL);
+ GFP_KERNEL);
if (all_counters == NULL)
goto fail;
counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
- GFP_KERNEL);
+ GFP_KERNEL);
if (counter_attrs == NULL)
goto fail;
@@ -505,135 +683,11 @@ static void __exit interrupt_stats_exit(void)
return;
}
-/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_PROCFS
-#define ACPI_SYSTEM_FILE_INFO "info"
-#define ACPI_SYSTEM_FILE_EVENT "event"
-#define ACPI_SYSTEM_FILE_DSDT "dsdt"
-#define ACPI_SYSTEM_FILE_FADT "fadt"
-
-static int acpi_system_read_info(struct seq_file *seq, void *offset)
-{
-
- seq_printf(seq, "version: %x\n", ACPI_CA_VERSION);
- return 0;
-}
-
-static int acpi_system_info_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_system_read_info, PDE(inode)->data);
-}
-
-static const struct file_operations acpi_system_info_ops = {
- .owner = THIS_MODULE,
- .open = acpi_system_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t,
- loff_t *);
-
-static const struct file_operations acpi_system_dsdt_ops = {
- .owner = THIS_MODULE,
- .read = acpi_system_read_dsdt,
-};
-
-static ssize_t
-acpi_system_read_dsdt(struct file *file,
- char __user * buffer, size_t count, loff_t * ppos)
-{
- acpi_status status = AE_OK;
- struct acpi_table_header *dsdt = NULL;
- ssize_t res;
-
- status = acpi_get_table(ACPI_SIG_DSDT, 1, &dsdt);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- res = simple_read_from_buffer(buffer, count, ppos, dsdt, dsdt->length);
-
- return res;
-}
-
-static ssize_t acpi_system_read_fadt(struct file *, char __user *, size_t,
- loff_t *);
-
-static const struct file_operations acpi_system_fadt_ops = {
- .owner = THIS_MODULE,
- .read = acpi_system_read_fadt,
-};
-
-static ssize_t
-acpi_system_read_fadt(struct file *file,
- char __user * buffer, size_t count, loff_t * ppos)
-{
- acpi_status status = AE_OK;
- struct acpi_table_header *fadt = NULL;
- ssize_t res;
-
- status = acpi_get_table(ACPI_SIG_FADT, 1, &fadt);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- res = simple_read_from_buffer(buffer, count, ppos, fadt, fadt->length);
-
- return res;
-}
-
-static int acpi_system_procfs_init(void)
-{
- struct proc_dir_entry *entry;
- int error = 0;
-
- /* 'info' [R] */
- entry = proc_create(ACPI_SYSTEM_FILE_INFO, S_IRUGO, acpi_root_dir,
- &acpi_system_info_ops);
- if (!entry)
- goto Error;
-
- /* 'dsdt' [R] */
- entry = proc_create(ACPI_SYSTEM_FILE_DSDT, S_IRUSR, acpi_root_dir,
- &acpi_system_dsdt_ops);
- if (!entry)
- goto Error;
-
- /* 'fadt' [R] */
- entry = proc_create(ACPI_SYSTEM_FILE_FADT, S_IRUSR, acpi_root_dir,
- &acpi_system_fadt_ops);
- if (!entry)
- goto Error;
-
- Done:
- return error;
-
- Error:
- remove_proc_entry(ACPI_SYSTEM_FILE_FADT, acpi_root_dir);
- remove_proc_entry(ACPI_SYSTEM_FILE_DSDT, acpi_root_dir);
- remove_proc_entry(ACPI_SYSTEM_FILE_INFO, acpi_root_dir);
-
- error = -EFAULT;
- goto Done;
-}
-#else
-static int acpi_system_procfs_init(void)
-{
- return 0;
-}
-#endif
-
-int __init acpi_system_init(void)
+int __init acpi_sysfs_init(void)
{
int result;
- result = acpi_system_procfs_init();
- if (result)
- return result;
-
- result = acpi_system_sysfs_init();
+ result = acpi_tables_sysfs_init();
return result;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index efad1f33aeb..2f8f17131d9 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -37,10 +37,14 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
+
+#ifdef CONFIG_ACPI_PROCFS
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#endif
+
#include <linux/jiffies.h>
#include <linux/kmod.h>
-#include <linux/seq_file.h>
#include <linux/reboot.h>
#include <linux/device.h>
#include <asm/uaccess.h>
@@ -102,16 +106,6 @@ static int acpi_thermal_add(struct acpi_device *device);
static int acpi_thermal_remove(struct acpi_device *device, int type);
static int acpi_thermal_resume(struct acpi_device *device);
static void acpi_thermal_notify(struct acpi_device *device, u32 event);
-static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
-static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
-static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
-static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_thermal_write_cooling_mode(struct file *,
- const char __user *, size_t,
- loff_t *);
-static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_thermal_write_polling(struct file *, const char __user *,
- size_t, loff_t *);
static const struct acpi_device_id thermal_device_ids[] = {
{ACPI_THERMAL_HID, 0},
@@ -201,6 +195,18 @@ struct acpi_thermal {
struct mutex lock;
};
+#ifdef CONFIG_ACPI_PROCFS
+static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
+static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
+static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
+static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file);
+static ssize_t acpi_thermal_write_cooling_mode(struct file *,
+ const char __user *, size_t,
+ loff_t *);
+static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file);
+static ssize_t acpi_thermal_write_polling(struct file *, const char __user *,
+ size_t, loff_t *);
+
static const struct file_operations acpi_thermal_state_fops = {
.owner = THIS_MODULE,
.open = acpi_thermal_state_open_fs,
@@ -242,6 +248,7 @@ static const struct file_operations acpi_thermal_polling_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif /* CONFIG_ACPI_PROCFS*/
/* --------------------------------------------------------------------------
Thermal Zone Management
@@ -287,26 +294,6 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
return 0;
}
-static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds)
-{
-
- if (!tz)
- return -EINVAL;
-
- tz->polling_frequency = seconds * 10; /* Convert value to deci-seconds */
-
- tz->thermal_zone->polling_delay = seconds * 1000;
-
- if (tz->tz_enabled)
- thermal_zone_device_update(tz->thermal_zone);
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Polling frequency set to %lu seconds\n",
- tz->polling_frequency/10));
-
- return 0;
-}
-
static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
{
acpi_status status = AE_OK;
@@ -973,7 +960,7 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
-
+#ifdef CONFIG_ACPI_PROCFS
static struct proc_dir_entry *acpi_thermal_dir;
static int acpi_thermal_state_seq_show(struct seq_file *seq, void *offset)
@@ -1187,6 +1174,26 @@ static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file)
PDE(inode)->data);
}
+static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds)
+{
+ if (!tz)
+ return -EINVAL;
+
+ /* Convert value to deci-seconds */
+ tz->polling_frequency = seconds * 10;
+
+ tz->thermal_zone->polling_delay = seconds * 1000;
+
+ if (tz->tz_enabled)
+ thermal_zone_device_update(tz->thermal_zone);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Polling frequency set to %lu seconds\n",
+ tz->polling_frequency/10));
+
+ return 0;
+}
+
static ssize_t
acpi_thermal_write_polling(struct file *file,
const char __user * buffer,
@@ -1295,7 +1302,13 @@ static int acpi_thermal_remove_fs(struct acpi_device *device)
return 0;
}
-
+#else
+static inline int acpi_thermal_add_fs(struct acpi_device *device) { return 0; }
+static inline int acpi_thermal_remove_fs(struct acpi_device *device)
+{
+ return 0;
+}
+#endif /* CONFIG_ACPI_PROCFS */
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -1566,13 +1579,18 @@ static int __init acpi_thermal_init(void)
printk(KERN_NOTICE "ACPI: thermal control disabled\n");
return -ENODEV;
}
+
+#ifdef CONFIG_ACPI_PROCFS
acpi_thermal_dir = proc_mkdir(ACPI_THERMAL_CLASS, acpi_root_dir);
if (!acpi_thermal_dir)
return -ENODEV;
+#endif
result = acpi_bus_register_driver(&acpi_thermal_driver);
if (result < 0) {
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir);
+#endif
return -ENODEV;
}
@@ -1584,7 +1602,9 @@ static void __exit acpi_thermal_exit(void)
acpi_bus_unregister_driver(&acpi_thermal_driver);
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir);
+#endif
return;
}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 9865d46f49a..67dec0c675a 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -152,7 +152,9 @@ struct acpi_video_bus {
struct acpi_video_bus_flags flags;
struct list_head video_device_list;
struct mutex device_list_lock; /* protects video_device_list */
+#ifdef CONFIG_ACPI_PROCFS
struct proc_dir_entry *dir;
+#endif
struct input_dev *input;
char phys[32]; /* for input device */
struct notifier_block pm_nb;
@@ -208,6 +210,7 @@ struct acpi_video_device {
struct output_device *output_dev;
};
+#ifdef CONFIG_ACPI_PROCFS
/* bus */
static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file);
static const struct file_operations acpi_video_bus_info_fops = {
@@ -307,6 +310,7 @@ static const struct file_operations acpi_video_device_EDID_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif /* CONFIG_ACPI_PROCFS */
static const char device_decode[][30] = {
"motherboard VGA device",
@@ -450,16 +454,6 @@ static struct thermal_cooling_device_ops video_cooling_ops = {
/* device */
static int
-acpi_video_device_query(struct acpi_video_device *device, unsigned long long *state)
-{
- int status;
-
- status = acpi_evaluate_integer(device->dev->handle, "_DGS", NULL, state);
-
- return status;
-}
-
-static int
acpi_video_device_get_state(struct acpi_video_device *device,
unsigned long long *state)
{
@@ -698,46 +692,6 @@ acpi_video_device_EDID(struct acpi_video_device *device,
/* bus */
-static int
-acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option)
-{
- int status;
- unsigned long long tmp;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list args = { 1, &arg0 };
-
-
- arg0.integer.value = option;
-
- status = acpi_evaluate_integer(video->device->handle, "_SPD", &args, &tmp);
- if (ACPI_SUCCESS(status))
- status = tmp ? (-EINVAL) : (AE_OK);
-
- return status;
-}
-
-static int
-acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long long *id)
-{
- int status;
-
- status = acpi_evaluate_integer(video->device->handle, "_GPD", NULL, id);
-
- return status;
-}
-
-static int
-acpi_video_bus_POST_options(struct acpi_video_bus *video,
- unsigned long long *options)
-{
- int status;
-
- status = acpi_evaluate_integer(video->device->handle, "_VPO", NULL, options);
- *options &= 3;
-
- return status;
-}
-
/*
* Arg:
* video : video bus device pointer
@@ -1159,6 +1113,7 @@ static int acpi_video_bus_check(struct acpi_video_bus *video)
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
+#ifdef CONFIG_ACPI_PROCFS
static struct proc_dir_entry *acpi_video_dir;
@@ -1198,6 +1153,18 @@ acpi_video_device_info_open_fs(struct inode *inode, struct file *file)
PDE(inode)->data);
}
+static int
+acpi_video_device_query(struct acpi_video_device *device,
+ unsigned long long *state)
+{
+ int status;
+
+ status = acpi_evaluate_integer(device->dev->handle, "_DGS",
+ NULL, state);
+
+ return status;
+}
+
static int acpi_video_device_state_seq_show(struct seq_file *seq, void *offset)
{
int status;
@@ -1492,6 +1459,19 @@ static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file)
return single_open(file, acpi_video_bus_ROM_seq_show, PDE(inode)->data);
}
+static int
+acpi_video_bus_POST_options(struct acpi_video_bus *video,
+ unsigned long long *options)
+{
+ int status;
+
+ status = acpi_evaluate_integer(video->device->handle, "_VPO",
+ NULL, options);
+ *options &= 3;
+
+ return status;
+}
+
static int acpi_video_bus_POST_info_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_video_bus *video = seq->private;
@@ -1530,6 +1510,16 @@ acpi_video_bus_POST_info_open_fs(struct inode *inode, struct file *file)
PDE(inode)->data);
}
+static int
+acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long long *id)
+{
+ int status;
+
+ status = acpi_evaluate_integer(video->device->handle, "_GPD", NULL, id);
+
+ return status;
+}
+
static int acpi_video_bus_POST_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_video_bus *video = seq->private;
@@ -1572,6 +1562,25 @@ static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file)
return single_open(file, acpi_video_bus_DOS_seq_show, PDE(inode)->data);
}
+static int
+acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option)
+{
+ int status;
+ unsigned long long tmp;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+
+
+ arg0.integer.value = option;
+
+ status = acpi_evaluate_integer(video->device->handle, "_SPD",
+ &args, &tmp);
+ if (ACPI_SUCCESS(status))
+ status = tmp ? (-EINVAL) : (AE_OK);
+
+ return status;
+}
+
static ssize_t
acpi_video_bus_write_POST(struct file *file,
const char __user * buffer,
@@ -1722,6 +1731,24 @@ static int acpi_video_bus_remove_fs(struct acpi_device *device)
return 0;
}
+#else
+static inline int acpi_video_device_add_fs(struct acpi_device *device)
+{
+ return 0;
+}
+static inline int acpi_video_device_remove_fs(struct acpi_device *device)
+{
+ return 0;
+}
+static inline int acpi_video_bus_add_fs(struct acpi_device *device)
+{
+ return 0;
+}
+static inline int acpi_video_bus_remove_fs(struct acpi_device *device)
+{
+ return 0;
+}
+#endif /* CONFIG_ACPI_PROCFS */
/* --------------------------------------------------------------------------
Driver Interface
@@ -2140,7 +2167,7 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
status = acpi_video_bus_get_one_device(dev, video);
if (ACPI_FAILURE(status)) {
printk(KERN_WARNING PREFIX
- "Cant attach device");
+ "Cant attach device\n");
continue;
}
}
@@ -2150,19 +2177,19 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
{
acpi_status status;
- struct acpi_video_bus *video;
-
if (!device || !device->video)
return -ENOENT;
- video = device->video;
-
acpi_video_device_remove_fs(device->dev);
status = acpi_remove_notify_handler(device->dev->handle,
ACPI_DEVICE_NOTIFY,
acpi_video_device_notify);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_WARNING PREFIX
+ "Cant remove video notify handler\n");
+ }
if (device->backlight) {
sysfs_remove_link(&device->backlight->dev.kobj, "device");
backlight_device_unregister(device->backlight);
@@ -2557,9 +2584,11 @@ int acpi_video_register(void)
return 0;
}
+#ifdef CONFIG_ACPI_PROCFS
acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir);
if (!acpi_video_dir)
return -ENODEV;
+#endif
result = acpi_bus_register_driver(&acpi_video_bus);
if (result < 0) {
@@ -2588,7 +2617,9 @@ void acpi_video_unregister(void)
}
acpi_bus_unregister_driver(&acpi_video_bus);
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
+#endif
register_count = 0;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index c5fef01b3c9..b8367612659 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -59,8 +59,8 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
"support\n"));
*cap |= ACPI_VIDEO_BACKLIGHT;
if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy)))
- printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness "
- "control misses _BQC function\n");
+ printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
+ "cannot determine initial brightness\n");
/* We have backlight support, no need to scan further */
return AE_CTRL_TERMINATE;
}
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index 388747a7ef4..f62a50c3ed3 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -21,67 +21,40 @@
ACPI_MODULE_NAME("wakeup_devices")
/**
- * acpi_enable_wakeup_device_prep - Prepare wake-up devices.
+ * acpi_enable_wakeup_devices - Enable wake-up device GPEs.
* @sleep_state: ACPI system sleep state.
*
- * Enable all wake-up devices' power, unless the requested system sleep state is
- * too deep.
+ * Enable wakeup device power of devices with the state.enable flag set and set
+ * the wakeup enable mask bits in the GPE registers that correspond to wakeup
+ * devices.
*/
-void acpi_enable_wakeup_device_prep(u8 sleep_state)
+void acpi_enable_wakeup_devices(u8 sleep_state)
{
struct list_head *node, *next;
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
- struct acpi_device *dev = container_of(node,
- struct acpi_device,
- wakeup_list);
-
- if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled
- || (sleep_state > (u32) dev->wakeup.sleep_state))
- continue;
-
- acpi_enable_wakeup_device_power(dev, sleep_state);
- }
-}
-
-/**
- * acpi_enable_wakeup_device - Enable wake-up device GPEs.
- * @sleep_state: ACPI system sleep state.
- *
- * Enable all wake-up devices' GPEs, with the assumption that
- * acpi_disable_all_gpes() was executed before, so we don't need to disable any
- * GPEs here.
- */
-void acpi_enable_wakeup_device(u8 sleep_state)
-{
- struct list_head *node, *next;
-
- /*
- * Caution: this routine must be invoked when interrupt is disabled
- * Refer ACPI2.0: P212
- */
- list_for_each_safe(node, next, &acpi_wakeup_device_list) {
struct acpi_device *dev =
container_of(node, struct acpi_device, wakeup_list);
- if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled
+ if (!dev->wakeup.flags.valid
+ || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count)
|| sleep_state > (u32) dev->wakeup.sleep_state)
continue;
+ if (dev->wakeup.state.enabled)
+ acpi_enable_wakeup_device_power(dev, sleep_state);
+
/* The wake-up power should have been enabled already. */
- acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
- ACPI_GPE_TYPE_WAKE);
+ acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+ ACPI_GPE_ENABLE);
}
}
/**
- * acpi_disable_wakeup_device - Disable devices' wakeup capability.
+ * acpi_disable_wakeup_devices - Disable devices' wakeup capability.
* @sleep_state: ACPI system sleep state.
- *
- * This function only affects devices with wakeup.state.enabled set, which means
- * that it reverses the changes made by acpi_enable_wakeup_device_prep().
*/
-void acpi_disable_wakeup_device(u8 sleep_state)
+void acpi_disable_wakeup_devices(u8 sleep_state)
{
struct list_head *node, *next;
@@ -89,13 +62,16 @@ void acpi_disable_wakeup_device(u8 sleep_state)
struct acpi_device *dev =
container_of(node, struct acpi_device, wakeup_list);
- if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled
+ if (!dev->wakeup.flags.valid
+ || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count)
|| (sleep_state > (u32) dev->wakeup.sleep_state))
continue;
- acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
- ACPI_GPE_TYPE_WAKE);
- acpi_disable_wakeup_device_power(dev);
+ acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+ ACPI_GPE_DISABLE);
+
+ if (dev->wakeup.state.enabled)
+ acpi_disable_wakeup_device_power(dev);
}
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index aa85a98d3a4..11ec911016c 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -187,6 +187,15 @@ config ATA_PIIX
If unsure, say N.
+config SATA_DWC
+ tristate "DesignWare Cores SATA support"
+ depends on 460EX
+ help
+ This option enables support for the on-chip SATA controller of the
+ AppliedMicro processor 460EX.
+
+ If unsure, say N.
+
config SATA_MV
tristate "Marvell SATA support"
help
@@ -642,6 +651,17 @@ config PATA_VIA
If unsure, say N.
+config PATA_PXA
+ tristate "PXA DMA-capable PATA support"
+ depends on ARCH_PXA
+ help
+ This option enables support for harddrive attached to PXA CPU's bus.
+
+ NOTE: This driver utilizes PXA DMA controller, in case your hardware
+ is not capable of doing MWDMA, use pata_platform instead.
+
+ If unsure, say N.
+
config PATA_WINBOND
tristate "Winbond SL82C105 PATA support"
depends on PCI
@@ -796,9 +816,19 @@ config PATA_RZ1000
If unsure, say N.
+config PATA_SAMSUNG_CF
+ tristate "Samsung SoC PATA support"
+ depends on SAMSUNG_DEV_IDE
+ help
+ This option enables basic support for Samsung's S3C/S5P board
+ PATA controllers via the new ATA layer
+
+ If unsure, say N.
+
config PATA_WINBOND_VLB
tristate "Winbond W83759A VLB PATA support (Experimental)"
depends on ISA && EXPERIMENTAL
+ select PATA_LEGACY
help
Support for the Winbond W83759A controller on Vesa Local Bus
systems.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 7ef89d73df6..d5df04a395c 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
obj-$(CONFIG_SATA_FSL) += sata_fsl.o
obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
+obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
# SFF w/ custom DMA
obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
@@ -87,7 +88,9 @@ obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
obj-$(CONFIG_PATA_QDI) += pata_qdi.o
obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
-obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
+obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o
+
+obj-$(CONFIG_PATA_PXA) += pata_pxa.o
# Should be last but two libata driver
obj-$(CONFIG_PATA_ACPI) += pata_acpi.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index f2522534ae6..99d0e5a5114 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -60,6 +60,7 @@ enum board_ids {
board_ahci,
board_ahci_ign_iferr,
board_ahci_nosntf,
+ board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
board_ahci_mcp65,
@@ -89,6 +90,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int ahci_pci_device_resume(struct pci_dev *pdev);
#endif
+static struct scsi_host_template ahci_sht = {
+ AHCI_SHT("ahci"),
+};
+
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
.hardreset = ahci_vt8251_hardreset,
@@ -132,6 +137,14 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_yes_fbs] =
+ {
+ AHCI_HFLAGS (AHCI_HFLAG_YES_FBS),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
/* by chipsets */
[board_ahci_mcp65] =
{
@@ -244,6 +257,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
+ { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
+ { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -362,6 +378,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* Marvell */
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
+ { PCI_DEVICE(0x1b4b, 0x9123),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
@@ -1042,7 +1060,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
VPRINTK("ENTER\n");
- WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
+ WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 7113c572447..e5fdeebf9ef 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -209,6 +209,7 @@ enum {
link offline */
AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
+ AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */
/* ap->flags bits */
@@ -297,7 +298,17 @@ struct ahci_host_priv {
extern int ahci_ignore_sss;
-extern struct scsi_host_template ahci_sht;
+extern struct device_attribute *ahci_shost_attrs[];
+extern struct device_attribute *ahci_sdev_attrs[];
+
+#define AHCI_SHT(drv_name) \
+ ATA_NCQ_SHT(drv_name), \
+ .can_queue = AHCI_MAX_CMDS - 1, \
+ .sg_tablesize = AHCI_MAX_SG, \
+ .dma_boundary = AHCI_DMA_BOUNDARY, \
+ .shost_attrs = ahci_shost_attrs, \
+ .sdev_attrs = ahci_sdev_attrs
+
extern struct ata_port_operations ahci_ops;
void ahci_save_initial_config(struct device *dev,
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 5e11b160f24..84b643270e7 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -23,6 +23,10 @@
#include <linux/ahci_platform.h>
#include "ahci.h"
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT("ahci_platform"),
+};
+
static int __init ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -54,19 +58,13 @@ static int __init ahci_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (pdata && pdata->init) {
- rc = pdata->init(dev);
- if (rc)
- return rc;
- }
-
if (pdata && pdata->ata_port_info)
pi = *pdata->ata_port_info;
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv) {
- rc = -ENOMEM;
- goto err0;
+ dev_err(dev, "can't alloc ahci_host_priv\n");
+ return -ENOMEM;
}
hpriv->flags |= (unsigned long)pi.private_data;
@@ -74,8 +72,19 @@ static int __init ahci_probe(struct platform_device *pdev)
hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
if (!hpriv->mmio) {
dev_err(dev, "can't map %pR\n", mem);
- rc = -ENOMEM;
- goto err0;
+ return -ENOMEM;
+ }
+
+ /*
+ * Some platforms might need to prepare for mmio region access,
+ * which could be done in the following init call. So, the mmio
+ * region shouldn't be accessed before init (if provided) has
+ * returned successfully.
+ */
+ if (pdata && pdata->init) {
+ rc = pdata->init(dev, hpriv->mmio);
+ if (rc)
+ return rc;
}
ahci_save_initial_config(dev, hpriv,
@@ -140,7 +149,7 @@ static int __init ahci_probe(struct platform_device *pdev)
ahci_print_info(host, "platform");
rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
- &ahci_sht);
+ &ahci_platform_sht);
if (rc)
goto err0;
@@ -166,7 +175,6 @@ static int __devexit ahci_remove(struct platform_device *pdev)
}
static struct platform_driver ahci_driver = {
- .probe = ahci_probe,
.remove = __devexit_p(ahci_remove),
.driver = {
.name = "ahci",
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 7107a6929de..cc5f7726bde 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -54,7 +54,6 @@ static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
const struct pci_device_id *id = ap->host->private_data;
int dma_enabled = 0;
struct ata_device *dev;
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (id->driver_data & ATA_GEN_FORCE_DMA) {
dma_enabled = 0xff;
@@ -63,9 +62,6 @@ static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
}
- if (pdev->vendor == PCI_VENDOR_ID_CENATEK)
- dma_enabled = 0xFF;
-
ata_for_each_dev(dev, link, ENABLED) {
/* We don't really care */
dev->pio_mode = XFER_PIO_0;
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 7409f98d2ae..d712675d0a9 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -158,6 +158,7 @@ struct piix_map_db {
struct piix_host_priv {
const int *map;
u32 saved_iocfg;
+ spinlock_t sidpr_lock; /* FIXME: remove once locking in EH is fixed */
void __iomem *sidpr;
};
@@ -301,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PBG) */
+ { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (PBG) */
+ { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
{ } /* terminate list */
};
@@ -951,12 +956,15 @@ static int piix_sidpr_scr_read(struct ata_link *link,
unsigned int reg, u32 *val)
{
struct piix_host_priv *hpriv = link->ap->host->private_data;
+ unsigned long flags;
if (reg >= ARRAY_SIZE(piix_sidx_map))
return -EINVAL;
+ spin_lock_irqsave(&hpriv->sidpr_lock, flags);
piix_sidpr_sel(link, reg);
*val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
+ spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
return 0;
}
@@ -964,12 +972,15 @@ static int piix_sidpr_scr_write(struct ata_link *link,
unsigned int reg, u32 val)
{
struct piix_host_priv *hpriv = link->ap->host->private_data;
+ unsigned long flags;
if (reg >= ARRAY_SIZE(piix_sidx_map))
return -EINVAL;
+ spin_lock_irqsave(&hpriv->sidpr_lock, flags);
piix_sidpr_sel(link, reg);
iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
+ spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
return 0;
}
@@ -1566,6 +1577,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
+ spin_lock_init(&hpriv->sidpr_lock);
/* Save IOCFG, this will be used for cable detection, quirk
* detection and restoration on detach. This is necessary
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 81e772a94d5..8eea309ea21 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -121,7 +121,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
ahci_read_em_buffer, ahci_store_em_buffer);
-static struct device_attribute *ahci_shost_attrs[] = {
+struct device_attribute *ahci_shost_attrs[] = {
&dev_attr_link_power_management_policy,
&dev_attr_em_message_type,
&dev_attr_em_message,
@@ -132,22 +132,14 @@ static struct device_attribute *ahci_shost_attrs[] = {
&dev_attr_em_buffer,
NULL
};
+EXPORT_SYMBOL_GPL(ahci_shost_attrs);
-static struct device_attribute *ahci_sdev_attrs[] = {
+struct device_attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity,
&dev_attr_unload_heads,
NULL
};
-
-struct scsi_host_template ahci_sht = {
- ATA_NCQ_SHT("ahci"),
- .can_queue = AHCI_MAX_CMDS - 1,
- .sg_tablesize = AHCI_MAX_SG,
- .dma_boundary = AHCI_DMA_BOUNDARY,
- .shost_attrs = ahci_shost_attrs,
- .sdev_attrs = ahci_sdev_attrs,
-};
-EXPORT_SYMBOL_GPL(ahci_sht);
+EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
struct ata_port_operations ahci_ops = {
.inherits = &sata_pmp_port_ops,
@@ -430,6 +422,12 @@ void ahci_save_initial_config(struct device *dev,
cap &= ~HOST_CAP_SNTF;
}
+ if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can do FBS, turning on CAP_FBS\n");
+ cap |= HOST_CAP_FBS;
+ }
+
if (force_port_map && port_map != force_port_map) {
dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
port_map, force_port_map);
@@ -1320,7 +1318,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
/* issue the first D2H Register FIS */
msecs = 0;
now = jiffies;
- if (time_after(now, deadline))
+ if (time_after(deadline, now))
msecs = jiffies_to_msecs(deadline - now);
tf.ctl |= ATA_SRST;
@@ -2036,9 +2034,15 @@ static int ahci_port_start(struct ata_port *ap)
u32 cmd = readl(port_mmio + PORT_CMD);
if (cmd & PORT_CMD_FBSCP)
pp->fbs_supported = true;
- else
+ else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
+ dev_printk(KERN_INFO, dev,
+ "port %d can do FBS, forcing FBSCP\n",
+ ap->port_no);
+ pp->fbs_supported = true;
+ } else
dev_printk(KERN_WARNING, dev,
- "The port is not capable of FBS\n");
+ "port %d is not capable of FBS\n",
+ ap->port_no);
}
if (pp->fbs_supported) {
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 7b5eea7e01d..8b5ea399a4f 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -145,12 +145,6 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
struct ata_eh_info *ehi = &ap->link.eh_info;
int wait = 0;
unsigned long flags;
- acpi_handle handle;
-
- if (dev)
- handle = dev->acpi_handle;
- else
- handle = ap->acpi_handle;
spin_lock_irqsave(ap->lock, flags);
/*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index ddf8e486278..932eaee5024 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -98,8 +98,6 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
unsigned int ata_print_id = 1;
-struct workqueue_struct *ata_aux_wq;
-
struct ata_force_param {
const char *name;
unsigned int cbl;
@@ -4167,15 +4165,13 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
{ "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
{ "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
- { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
- { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
+ { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
{ "CRD-84", NULL, ATA_HORKAGE_NODMA },
{ "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
{ "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
{ "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
{ "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
- { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
- { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
+ { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
{ "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
{ "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
{ "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
@@ -4211,70 +4207,16 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
/* Seagate NCQ + FLUSH CACHE firmware bug */
- { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ |
+ { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
- { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ |
+ { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ |
+ { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
-
- { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
-
- { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
- { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ |
+ { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
/* Blacklist entries taken from Silicon Image 3124/3132
@@ -4303,12 +4245,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* Devices which get the IVB wrong */
{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
/* Maybe we should just blacklist TSSTcorp... */
- { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, },
- { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, },
- { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
- { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
- { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
- { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
+ { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
/* Devices that do not need bridging limits applied */
{ "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
@@ -4326,29 +4263,73 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ }
};
-static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
+/**
+ * glob_match - match a text string against a glob-style pattern
+ * @text: the string to be examined
+ * @pattern: the glob-style pattern to be matched against
+ *
+ * Either/both of text and pattern can be empty strings.
+ *
+ * Match text against a glob-style pattern, with wildcards and simple sets:
+ *
+ * ? matches any single character.
+ * * matches any run of characters.
+ * [xyz] matches a single character from the set: x, y, or z.
+ * [a-d] matches a single character from the range: a, b, c, or d.
+ * [a-d0-9] matches a single character from either range.
+ *
+ * The special characters ?, [, -, or *, can be matched using a set, eg. [*]
+ * Behaviour with malformed patterns is undefined, though generally reasonable.
+ *
+ * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx"
+ *
+ * This function uses one level of recursion per '*' in pattern.
+ * Since it calls _nothing_ else, and has _no_ explicit local variables,
+ * this will not cause stack problems for any reasonable use here.
+ *
+ * RETURNS:
+ * 0 on match, 1 otherwise.
+ */
+static int glob_match (const char *text, const char *pattern)
{
- const char *p;
- int len;
-
- /*
- * check for trailing wildcard: *\0
- */
- p = strchr(patt, wildchar);
- if (p && ((*(p + 1)) == 0))
- len = p - patt;
- else {
- len = strlen(name);
- if (!len) {
- if (!*patt)
- return 0;
- return -1;
+ do {
+ /* Match single character or a '?' wildcard */
+ if (*text == *pattern || *pattern == '?') {
+ if (!*pattern++)
+ return 0; /* End of both strings: match */
+ } else {
+ /* Match single char against a '[' bracketed ']' pattern set */
+ if (!*text || *pattern != '[')
+ break; /* Not a pattern set */
+ while (*++pattern && *pattern != ']' && *text != *pattern) {
+ if (*pattern == '-' && *(pattern - 1) != '[')
+ if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
+ ++pattern;
+ break;
+ }
+ }
+ if (!*pattern || *pattern == ']')
+ return 1; /* No match */
+ while (*pattern && *pattern++ != ']');
+ }
+ } while (*++text && *pattern);
+
+ /* Match any run of chars against a '*' wildcard */
+ if (*pattern == '*') {
+ if (!*++pattern)
+ return 0; /* Match: avoid recursion at end of pattern */
+ /* Loop to handle additional pattern chars after the wildcard */
+ while (*text) {
+ if (glob_match(text, pattern) == 0)
+ return 0; /* Remainder matched */
+ ++text; /* Absorb (match) this char and try again */
}
}
-
- return strncmp(patt, name, len);
+ if (!*text && !*pattern)
+ return 0; /* End of both strings: match */
+ return 1; /* No match */
}
-
+
static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
@@ -4359,10 +4340,10 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
while (ad->model_num) {
- if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
+ if (!glob_match(model_num, ad->model_num)) {
if (ad->model_rev == NULL)
return ad->horkage;
- if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
+ if (!glob_match(model_rev, ad->model_rev))
return ad->horkage;
}
ad++;
@@ -5130,15 +5111,18 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
qc->flags |= ATA_QCFLAG_ACTIVE;
ap->qc_active |= 1 << qc->tag;
- /* We guarantee to LLDs that they will have at least one
+ /*
+ * We guarantee to LLDs that they will have at least one
* non-zero sg if the command is a data command.
*/
- BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
+ if (WARN_ON_ONCE(ata_is_data(prot) &&
+ (!qc->sg || !qc->n_elem || !qc->nbytes)))
+ goto sys_err;
if (ata_is_dma(prot) || (ata_is_pio(prot) &&
(ap->flags & ATA_FLAG_PIO_DMA)))
if (ata_sg_setup(qc))
- goto sg_err;
+ goto sys_err;
/* if device is sleeping, schedule reset and abort the link */
if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
@@ -5155,7 +5139,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
goto err;
return;
-sg_err:
+sys_err:
qc->err_mask |= AC_ERR_SYSTEM;
err:
ata_qc_complete(qc);
@@ -5434,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
*/
int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
{
+ unsigned int ehi_flags = ATA_EHI_QUIET;
int rc;
/*
@@ -5442,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
*/
ata_lpm_enable(host);
- rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
+ /*
+ * On some hardware, device fails to respond after spun down
+ * for suspend. As the device won't be used before being
+ * resumed, we don't need to touch the device. Ask EH to skip
+ * the usual stuff and proceed directly to suspend.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/46764
+ */
+ if (mesg.event == PM_EVENT_SUSPEND)
+ ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
+
+ rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
if (rc == 0)
host->dev->power.power_state = mesg;
return rc;
@@ -5611,6 +5607,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
+ mutex_init(&ap->scsi_scan_mutex);
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
@@ -6549,29 +6546,20 @@ static int __init ata_init(void)
ata_parse_force_param();
- ata_aux_wq = create_singlethread_workqueue("ata_aux");
- if (!ata_aux_wq)
- goto fail;
-
rc = ata_sff_init();
- if (rc)
- goto fail;
+ if (rc) {
+ kfree(ata_force_tbl);
+ return rc;
+ }
printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
return 0;
-
-fail:
- kfree(ata_force_tbl);
- if (ata_aux_wq)
- destroy_workqueue(ata_aux_wq);
- return rc;
}
static void __exit ata_exit(void)
{
ata_sff_exit();
kfree(ata_force_tbl);
- destroy_workqueue(ata_aux_wq);
}
subsys_initcall(ata_init);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index f77a67303f8..e48302eae55 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -727,7 +727,7 @@ void ata_scsi_error(struct Scsi_Host *host)
if (ap->pflags & ATA_PFLAG_LOADING)
ap->pflags &= ~ATA_PFLAG_LOADING;
else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
- queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
+ schedule_delayed_work(&ap->hotplug_task, 0);
if (ap->pflags & ATA_PFLAG_RECOVERED)
ata_port_printk(ap, KERN_INFO, "EH complete\n");
@@ -2214,6 +2214,7 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_SMART, "SMART" },
{ ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
{ ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
+ { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
{ ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
{ ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
{ ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
@@ -2944,7 +2945,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
ehc->i.flags |= ATA_EHI_SETMODE;
/* schedule the scsi_rescan_device() here */
- queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
+ schedule_work(&(ap->scsi_rescan_task));
} else if (dev->class == ATA_DEV_UNKNOWN &&
ehc->tries[dev->devno] &&
ata_class_enabled(ehc->classes[dev->devno])) {
@@ -3234,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link)
if (link->flags & ATA_LFLAG_DISABLED)
return 1;
+ /* skip if explicitly requested */
+ if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
+ return 1;
+
/* thaw frozen port and recover failed devices */
if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
return 0;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a54273d2c3c..a89172c100f 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1111,10 +1111,10 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
*/
static int atapi_drain_needed(struct request *rq)
{
- if (likely(!blk_pc_request(rq)))
+ if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
return 0;
- if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
+ if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE))
return 0;
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
@@ -3435,7 +3435,7 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
" switching to async\n");
}
- queue_delayed_work(ata_aux_wq, &ap->hotplug_task,
+ queue_delayed_work(system_long_wq, &ap->hotplug_task,
round_jiffies_relative(HZ));
}
@@ -3582,6 +3582,7 @@ void ata_scsi_hotplug(struct work_struct *work)
}
DPRINTK("ENTER\n");
+ mutex_lock(&ap->scsi_scan_mutex);
/* Unplug detached devices. We cannot use link iterator here
* because PMP links have to be scanned even if PMP is
@@ -3595,6 +3596,7 @@ void ata_scsi_hotplug(struct work_struct *work)
/* scan for new ones */
ata_scsi_scan_host(ap, 0);
+ mutex_unlock(&ap->scsi_scan_mutex);
DPRINTK("EXIT\n");
}
@@ -3673,9 +3675,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
* @work: Pointer to ATA port to perform scsi_rescan_device()
*
* After ATA pass thru (SAT) commands are executed successfully,
- * libata need to propagate the changes to SCSI layer. This
- * function must be executed from ata_aux_wq such that sdev
- * attach/detach don't race with rescan.
+ * libata need to propagate the changes to SCSI layer.
*
* LOCKING:
* Kernel thread context (may sleep).
@@ -3688,6 +3688,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
struct ata_device *dev;
unsigned long flags;
+ mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
ata_for_each_link(link, ap, EDGE) {
@@ -3707,6 +3708,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
}
spin_unlock_irqrestore(ap->lock, flags);
+ mutex_unlock(&ap->scsi_scan_mutex);
}
/**
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index efa4a18cfb9..e30c537cce3 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
if (ioaddr->ctl_addr)
iowrite8(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
iowrite8(tf->device, ioaddr->device_addr);
VPRINTK("device 0x%X\n", tf->device);
}
+
+ ata_wait_idle(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_tf_load);
@@ -1042,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq)
{
- struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct ata_link *link = qc->dev->link;
+ struct ata_eh_info *ehi = &link->eh_info;
unsigned long flags = 0;
int poll_next;
@@ -1298,8 +1302,14 @@ fsm_start:
}
EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
-void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
+void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
{
+ struct ata_port *ap = link->ap;
+
+ WARN_ON((ap->sff_pio_task_link != NULL) &&
+ (ap->sff_pio_task_link != link));
+ ap->sff_pio_task_link = link;
+
/* may fail if ata_sff_flush_pio_task() in progress */
queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
msecs_to_jiffies(delay));
@@ -1321,14 +1331,18 @@ static void ata_sff_pio_task(struct work_struct *work)
{
struct ata_port *ap =
container_of(work, struct ata_port, sff_pio_task.work);
+ struct ata_link *link = ap->sff_pio_task_link;
struct ata_queued_cmd *qc;
u8 status;
int poll_next;
+ BUG_ON(ap->sff_pio_task_link == NULL);
/* qc can be NULL if timeout occurred */
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (!qc)
+ qc = ata_qc_from_tag(ap, link->active_tag);
+ if (!qc) {
+ ap->sff_pio_task_link = NULL;
return;
+ }
fsm_start:
WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1345,11 +1359,16 @@ fsm_start:
msleep(2);
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
if (status & ATA_BUSY) {
- ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
+ ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
return;
}
}
+ /*
+ * hsm_move() may trigger another command to be processed.
+ * clean the link beforehand.
+ */
+ ap->sff_pio_task_link = NULL;
/* move the HSM */
poll_next = ata_sff_hsm_move(ap, qc, status, 1);
@@ -1376,6 +1395,7 @@ fsm_start:
unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct ata_link *link = qc->dev->link;
/* Use polling pio if the LLD doesn't handle
* interrupt driven pio and atapi CDB interrupt.
@@ -1396,7 +1416,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
ap->hsm_task_state = HSM_ST_LAST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
break;
@@ -1409,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* PIO data out protocol */
ap->hsm_task_state = HSM_ST_FIRST;
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
/* always send first data block using the
* ata_sff_pio_task() codepath.
@@ -1419,7 +1439,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
ap->hsm_task_state = HSM_ST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
/* if polling, ata_sff_pio_task() handles the
* rest. otherwise, interrupt handler takes
@@ -1441,7 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
/* send cdb by polling if no cdb interrupt */
if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
(qc->tf.flags & ATA_TFLAG_POLLING))
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
break;
default:
@@ -2734,10 +2754,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
-
- /* see ata_dma_blacklisted() */
- BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) &&
- qc->tf.protocol == ATAPI_PROT_DMA);
+ struct ata_link *link = qc->dev->link;
/* defer PIO handling to sff_qc_issue */
if (!ata_is_dma(qc->tf.protocol))
@@ -2766,7 +2783,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
/* send cdb by polling if no cdb interrupt */
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
break;
default:
@@ -3318,14 +3335,7 @@ void ata_sff_port_init(struct ata_port *ap)
int __init ata_sff_init(void)
{
- /*
- * FIXME: In UP case, there is only one workqueue thread and if you
- * have more than one PIO device, latency is bloody awful, with
- * occasional multi-second "hiccups" as one PIO device waits for
- * another. It's an ugly wart that users DO occasionally complain
- * about; luckily most users have at most one PIO polled device.
- */
- ata_sff_wq = create_workqueue("ata_sff");
+ ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE);
if (!ata_sff_wq)
return -ENOMEM;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 4b84ed60324..9ce1ecc63e3 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -54,7 +54,6 @@ enum {
};
extern unsigned int ata_print_id;
-extern struct workqueue_struct *ata_aux_wq;
extern int atapi_passthru16;
extern int libata_fua;
extern int libata_noacpi;
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index ba43f0f8c88..2215632e4b3 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
/* Odd numbered device ids are the units with enable bits (the -R cards) */
- if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+ if ((pdev->device & 1) &&
+ !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 9f5da1c7454..905ff76d3cb 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -121,14 +121,8 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
if (pair) {
struct ata_timing tp;
-
ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
- if (pair->dma_mode) {
- ata_timing_compute(pair, pair->dma_mode,
- &tp, T, 0);
- ata_timing_merge(&tp, &t, &t, ATA_TIMING_SETUP);
- }
}
}
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 9df1ff7e1ea..eaf194138f2 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -44,6 +44,9 @@
* Specific support is included for the ht6560a/ht6560b/opti82c611a/
* opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A
*
+ * Support for the Winbond 83759A when operating in advanced mode.
+ * Multichip mode is not currently supported.
+ *
* Use the autospeed and pio_mask options with:
* Appian ADI/2 aka CLPD7220 or AIC25VL01.
* Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
@@ -135,12 +138,18 @@ static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */
static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
static int qdi; /* Set to probe QDI controllers */
-static int winbond; /* Set to probe Winbond controllers,
- give I/O port if non standard */
static int autospeed; /* Chip present which snoops speed changes */
static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
+#ifdef PATA_WINBOND_VLB_MODULE
+static int winbond = 1; /* Set to probe Winbond controllers,
+ give I/O port if non standard */
+#else
+static int winbond; /* Set to probe Winbond controllers,
+ give I/O port if non standard */
+#endif
+
/**
* legacy_probe_add - Add interface to probe list
* @port: Controller port
@@ -1297,6 +1306,7 @@ MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for legacy ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("pata_winbond");
module_param(probe_all, int, 0);
module_param(autospeed, int, 0);
@@ -1305,6 +1315,7 @@ module_param(ht6560b, int, 0);
module_param(opti82c611a, int, 0);
module_param(opti82c46x, int, 0);
module_param(qdi, int, 0);
+module_param(winbond, int, 0);
module_param(pio_mask, int, 0);
module_param(iordy_mask, int, 0);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index f087ab55b1d..8cc536e49a0 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -680,7 +680,7 @@ mpc52xx_ata_remove_one(struct device *dev)
/* ======================================================================== */
static int __devinit
-mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
+mpc52xx_ata_probe(struct platform_device *op, const struct of_device_id *match)
{
unsigned int ipb_freq;
struct resource res_mem;
@@ -821,7 +821,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
}
static int
-mpc52xx_ata_remove(struct of_device *op)
+mpc52xx_ata_remove(struct platform_device *op)
{
struct mpc52xx_ata_priv *priv;
int task_irq;
@@ -848,7 +848,7 @@ mpc52xx_ata_remove(struct of_device *op)
#ifdef CONFIG_PM
static int
-mpc52xx_ata_suspend(struct of_device *op, pm_message_t state)
+mpc52xx_ata_suspend(struct platform_device *op, pm_message_t state)
{
struct ata_host *host = dev_get_drvdata(&op->dev);
@@ -856,7 +856,7 @@ mpc52xx_ata_suspend(struct of_device *op, pm_message_t state)
}
static int
-mpc52xx_ata_resume(struct of_device *op)
+mpc52xx_ata_resume(struct platform_device *op)
{
struct ata_host *host = dev_get_drvdata(&op->dev);
struct mpc52xx_ata_priv *priv = host->private_data;
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 5a1b82c08be..480e043ce6b 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -14,7 +14,7 @@
#include <linux/of_platform.h>
#include <linux/ata_platform.h>
-static int __devinit pata_of_platform_probe(struct of_device *ofdev,
+static int __devinit pata_of_platform_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
int ret;
@@ -78,7 +78,7 @@ static int __devinit pata_of_platform_probe(struct of_device *ofdev,
reg_shift, pio_mask);
}
-static int __devexit pata_of_platform_remove(struct of_device *ofdev)
+static int __devexit pata_of_platform_remove(struct platform_device *ofdev)
{
return __pata_platform_remove(&ofdev->dev);
}
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 118c28e8aba..e944aa0c551 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -34,7 +34,6 @@
#include <linux/ata.h>
#include <linux/libata.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -201,23 +200,25 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev,
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- pdev->io.BasePort1 = io->win[0].base;
- pdev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- if (!(io->flags & CISTPL_IO_16BIT))
- pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ pdev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ pdev->resource[0]->start = io->win[0].base;
+ if (!(io->flags & CISTPL_IO_16BIT)) {
+ pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ }
if (io->nwin == 2) {
- pdev->io.NumPorts1 = 8;
- pdev->io.BasePort2 = io->win[1].base;
- pdev->io.NumPorts2 = (stk->is_kme) ? 2 : 1;
- if (pcmcia_request_io(pdev, &pdev->io) != 0)
+ pdev->resource[0]->end = 8;
+ pdev->resource[1]->start = io->win[1].base;
+ pdev->resource[1]->end = (stk->is_kme) ? 2 : 1;
+ if (pcmcia_request_io(pdev) != 0)
return -ENODEV;
- stk->ctl_base = pdev->io.BasePort2;
+ stk->ctl_base = pdev->resource[1]->start;
} else if ((io->nwin == 1) && (io->win[0].len >= 16)) {
- pdev->io.NumPorts1 = io->win[0].len;
- pdev->io.NumPorts2 = 0;
- if (pcmcia_request_io(pdev, &pdev->io) != 0)
+ pdev->resource[0]->end = io->win[0].len;
+ pdev->resource[1]->end = 0;
+ if (pcmcia_request_io(pdev) != 0)
return -ENODEV;
- stk->ctl_base = pdev->io.BasePort1 + 0x0e;
+ stk->ctl_base = pdev->resource[0]->start + 0x0e;
} else
return -ENODEV;
/* If we've got this far, we're done */
@@ -246,9 +247,8 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
struct ata_port_operations *ops = &pcmcia_port_ops;
/* Set up attributes in order to probe card and get resources */
- pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- pdev->io.IOAddrLines = 3;
+ pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
pdev->conf.Attributes = CONF_ENABLE_IRQ;
pdev->conf.IntType = INT_MEMORY_AND_IO;
@@ -271,7 +271,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
if (pcmcia_loop_config(pdev, pcmcia_check_one_config, stk))
goto failed; /* No suitable config found */
}
- io_base = pdev->io.BasePort1;
+ io_base = pdev->resource[0]->start;
ctl_base = stk->ctl_base;
if (!pdev->irq)
goto failed;
@@ -294,7 +294,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
/* FIXME: Could be more ports at base + 0x10 but we only deal with
one right now */
- if (pdev->io.NumPorts1 >= 0x20)
+ if (resource_size(pdev->resource[0]) >= 0x20)
n_ports = 2;
if (pdev->manf_id == 0x0097 && pdev->card_id == 0x1620)
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
new file mode 100644
index 00000000000..1898c6ed4b4
--- /dev/null
+++ b/drivers/ata/pata_pxa.c
@@ -0,0 +1,411 @@
+/*
+ * Generic PXA PATA driver
+ *
+ * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+
+#include <scsi/scsi_host.h>
+
+#include <mach/pxa2xx-regs.h>
+#include <mach/pata_pxa.h>
+#include <mach/dma.h>
+
+#define DRV_NAME "pata_pxa"
+#define DRV_VERSION "0.1"
+
+struct pata_pxa_data {
+ uint32_t dma_channel;
+ struct pxa_dma_desc *dma_desc;
+ dma_addr_t dma_desc_addr;
+ uint32_t dma_desc_id;
+
+ /* DMA IO physical address */
+ uint32_t dma_io_addr;
+ /* PXA DREQ<0:2> pin selector */
+ uint32_t dma_dreq;
+ /* DMA DCSR register value */
+ uint32_t dma_dcsr;
+
+ struct completion dma_done;
+};
+
+/*
+ * Setup the DMA descriptors. The size is transfer capped at 4k per descriptor,
+ * if the transfer is longer, it is split into multiple chained descriptors.
+ */
+static void pxa_load_dmac(struct scatterlist *sg, struct ata_queued_cmd *qc)
+{
+ struct pata_pxa_data *pd = qc->ap->private_data;
+
+ uint32_t cpu_len, seg_len;
+ dma_addr_t cpu_addr;
+
+ cpu_addr = sg_dma_address(sg);
+ cpu_len = sg_dma_len(sg);
+
+ do {
+ seg_len = (cpu_len > 0x1000) ? 0x1000 : cpu_len;
+
+ pd->dma_desc[pd->dma_desc_id].ddadr = pd->dma_desc_addr +
+ ((pd->dma_desc_id + 1) * sizeof(struct pxa_dma_desc));
+
+ pd->dma_desc[pd->dma_desc_id].dcmd = DCMD_BURST32 |
+ DCMD_WIDTH2 | (DCMD_LENGTH & seg_len);
+
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
+ pd->dma_desc[pd->dma_desc_id].dsadr = cpu_addr;
+ pd->dma_desc[pd->dma_desc_id].dtadr = pd->dma_io_addr;
+ pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCSRCADDR |
+ DCMD_FLOWTRG;
+ } else {
+ pd->dma_desc[pd->dma_desc_id].dsadr = pd->dma_io_addr;
+ pd->dma_desc[pd->dma_desc_id].dtadr = cpu_addr;
+ pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCTRGADDR |
+ DCMD_FLOWSRC;
+ }
+
+ cpu_len -= seg_len;
+ cpu_addr += seg_len;
+ pd->dma_desc_id++;
+
+ } while (cpu_len);
+
+ /* Should not happen */
+ if (seg_len & 0x1f)
+ DALGN |= (1 << pd->dma_dreq);
+}
+
+/*
+ * Prepare taskfile for submission.
+ */
+static void pxa_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct pata_pxa_data *pd = qc->ap->private_data;
+ int si = 0;
+ struct scatterlist *sg;
+
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ pd->dma_desc_id = 0;
+
+ DCSR(pd->dma_channel) = 0;
+ DALGN &= ~(1 << pd->dma_dreq);
+
+ for_each_sg(qc->sg, sg, qc->n_elem, si)
+ pxa_load_dmac(sg, qc);
+
+ pd->dma_desc[pd->dma_desc_id - 1].ddadr = DDADR_STOP;
+
+ /* Fire IRQ only at the end of last block */
+ pd->dma_desc[pd->dma_desc_id - 1].dcmd |= DCMD_ENDIRQEN;
+
+ DDADR(pd->dma_channel) = pd->dma_desc_addr;
+ DRCMR(pd->dma_dreq) = DRCMR_MAPVLD | pd->dma_channel;
+
+}
+
+/*
+ * Configure the DMA controller, load the DMA descriptors, but don't start the
+ * DMA controller yet. Only issue the ATA command.
+ */
+static void pxa_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
+}
+
+/*
+ * Execute the DMA transfer.
+ */
+static void pxa_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct pata_pxa_data *pd = qc->ap->private_data;
+ init_completion(&pd->dma_done);
+ DCSR(pd->dma_channel) = DCSR_RUN;
+}
+
+/*
+ * Wait until the DMA transfer completes, then stop the DMA controller.
+ */
+static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct pata_pxa_data *pd = qc->ap->private_data;
+
+ if ((DCSR(pd->dma_channel) & DCSR_RUN) &&
+ wait_for_completion_timeout(&pd->dma_done, HZ))
+ dev_err(qc->ap->dev, "Timeout waiting for DMA completion!");
+
+ DCSR(pd->dma_channel) = 0;
+}
+
+/*
+ * Read DMA status. The bmdma_stop() will take care of properly finishing the
+ * DMA transfer so we always have DMA-complete interrupt here.
+ */
+static unsigned char pxa_bmdma_status(struct ata_port *ap)
+{
+ struct pata_pxa_data *pd = ap->private_data;
+ unsigned char ret = ATA_DMA_INTR;
+
+ if (pd->dma_dcsr & DCSR_BUSERR)
+ ret |= ATA_DMA_ERR;
+
+ return ret;
+}
+
+/*
+ * No IRQ register present so we do nothing.
+ */
+static void pxa_irq_clear(struct ata_port *ap)
+{
+}
+
+/*
+ * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still
+ * unclear why ATAPI has DMA issues.
+ */
+static int pxa_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct scsi_host_template pxa_ata_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pxa_ata_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+
+ .bmdma_setup = pxa_bmdma_setup,
+ .bmdma_start = pxa_bmdma_start,
+ .bmdma_stop = pxa_bmdma_stop,
+ .bmdma_status = pxa_bmdma_status,
+
+ .check_atapi_dma = pxa_check_atapi_dma,
+
+ .sff_irq_clear = pxa_irq_clear,
+
+ .qc_prep = pxa_qc_prep,
+};
+
+/*
+ * DMA interrupt handler.
+ */
+static void pxa_ata_dma_irq(int dma, void *port)
+{
+ struct ata_port *ap = port;
+ struct pata_pxa_data *pd = ap->private_data;
+
+ pd->dma_dcsr = DCSR(dma);
+ DCSR(dma) = pd->dma_dcsr;
+
+ if (pd->dma_dcsr & DCSR_STOPSTATE)
+ complete(&pd->dma_done);
+}
+
+static int __devinit pxa_ata_probe(struct platform_device *pdev)
+{
+ struct ata_host *host;
+ struct ata_port *ap;
+ struct pata_pxa_data *data;
+ struct resource *cmd_res;
+ struct resource *ctl_res;
+ struct resource *dma_res;
+ struct resource *irq_res;
+ struct pata_pxa_pdata *pdata = pdev->dev.platform_data;
+ int ret = 0;
+
+ /*
+ * Resource validation, three resources are needed:
+ * - CMD port base address
+ * - CTL port base address
+ * - DMA port base address
+ * - IRQ pin
+ */
+ if (pdev->num_resources != 4) {
+ dev_err(&pdev->dev, "invalid number of resources\n");
+ return -EINVAL;
+ }
+
+ /*
+ * CMD port base address
+ */
+ cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(cmd_res == NULL))
+ return -EINVAL;
+
+ /*
+ * CTL port base address
+ */
+ ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (unlikely(ctl_res == NULL))
+ return -EINVAL;
+
+ /*
+ * DMA port base address
+ */
+ dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (unlikely(dma_res == NULL))
+ return -EINVAL;
+
+ /*
+ * IRQ pin
+ */
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (unlikely(irq_res == NULL))
+ return -EINVAL;
+
+ /*
+ * Allocate the host
+ */
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host)
+ return -ENOMEM;
+
+ ap = host->ports[0];
+ ap->ops = &pxa_ata_port_ops;
+ ap->pio_mask = ATA_PIO4;
+ ap->mwdma_mask = ATA_MWDMA2;
+ ap->flags = ATA_FLAG_MMIO;
+
+ ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
+ resource_size(cmd_res));
+ ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
+ resource_size(ctl_res));
+ ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
+ resource_size(dma_res));
+
+ /*
+ * Adjust register offsets
+ */
+ ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
+ ap->ioaddr.data_addr = ap->ioaddr.cmd_addr +
+ (ATA_REG_DATA << pdata->reg_shift);
+ ap->ioaddr.error_addr = ap->ioaddr.cmd_addr +
+ (ATA_REG_ERR << pdata->reg_shift);
+ ap->ioaddr.feature_addr = ap->ioaddr.cmd_addr +
+ (ATA_REG_FEATURE << pdata->reg_shift);
+ ap->ioaddr.nsect_addr = ap->ioaddr.cmd_addr +
+ (ATA_REG_NSECT << pdata->reg_shift);
+ ap->ioaddr.lbal_addr = ap->ioaddr.cmd_addr +
+ (ATA_REG_LBAL << pdata->reg_shift);
+ ap->ioaddr.lbam_addr = ap->ioaddr.cmd_addr +
+ (ATA_REG_LBAM << pdata->reg_shift);
+ ap->ioaddr.lbah_addr = ap->ioaddr.cmd_addr +
+ (ATA_REG_LBAH << pdata->reg_shift);
+ ap->ioaddr.device_addr = ap->ioaddr.cmd_addr +
+ (ATA_REG_DEVICE << pdata->reg_shift);
+ ap->ioaddr.status_addr = ap->ioaddr.cmd_addr +
+ (ATA_REG_STATUS << pdata->reg_shift);
+ ap->ioaddr.command_addr = ap->ioaddr.cmd_addr +
+ (ATA_REG_CMD << pdata->reg_shift);
+
+ /*
+ * Allocate and load driver's internal data structure
+ */
+ data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ap->private_data = data;
+ data->dma_dreq = pdata->dma_dreq;
+ data->dma_io_addr = dma_res->start;
+
+ /*
+ * Allocate space for the DMA descriptors
+ */
+ data->dma_desc = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &data->dma_desc_addr, GFP_KERNEL);
+ if (!data->dma_desc)
+ return -EINVAL;
+
+ /*
+ * Request the DMA channel
+ */
+ data->dma_channel = pxa_request_dma(DRV_NAME, DMA_PRIO_LOW,
+ pxa_ata_dma_irq, ap);
+ if (data->dma_channel < 0)
+ return -EBUSY;
+
+ /*
+ * Stop and clear the DMA channel
+ */
+ DCSR(data->dma_channel) = 0;
+
+ /*
+ * Activate the ATA host
+ */
+ ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
+ pdata->irq_flags, &pxa_ata_sht);
+ if (ret)
+ pxa_free_dma(data->dma_channel);
+
+ return ret;
+}
+
+static int __devexit pxa_ata_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct pata_pxa_data *data = host->ports[0]->private_data;
+
+ pxa_free_dma(data->dma_channel);
+
+ ata_host_detach(host);
+
+ return 0;
+}
+
+static struct platform_driver pxa_ata_driver = {
+ .probe = pxa_ata_probe,
+ .remove = __devexit_p(pxa_ata_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pxa_ata_init(void)
+{
+ return platform_driver_register(&pxa_ata_driver);
+}
+
+static void __exit pxa_ata_exit(void)
+{
+ platform_driver_unregister(&pxa_ata_driver);
+}
+
+module_init(pxa_ata_init);
+module_exit(pxa_ata_exit);
+
+MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
new file mode 100644
index 00000000000..6f9cfb24b75
--- /dev/null
+++ b/drivers/ata/pata_samsung_cf.c
@@ -0,0 +1,683 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * PATA driver for Samsung SoCs.
+ * Supports CF Interface in True IDE mode. Currently only PIO mode has been
+ * implemented; UDMA support has to be added.
+ *
+ * Based on:
+ * PATA driver for AT91SAM9260 Static Memory Controller
+ * PATA driver for Toshiba SCC controller
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <plat/ata.h>
+#include <plat/regs-ata.h>
+
+#define DRV_NAME "pata_samsung_cf"
+#define DRV_VERSION "0.1"
+
+enum s3c_cpu_type {
+ TYPE_S3C64XX,
+ TYPE_S5PC100,
+ TYPE_S5PV210,
+};
+
+/*
+ * struct s3c_ide_info - S3C PATA instance.
+ * @clk: The clock resource for this controller.
+ * @ide_addr: The area mapped for the hardware registers.
+ * @sfr_addr: The area mapped for the special function registers.
+ * @irq: The IRQ number we are using.
+ * @cpu_type: The exact type of this controller.
+ * @fifo_status_reg: The ATA_FIFO_STATUS register offset.
+ */
+struct s3c_ide_info {
+ struct clk *clk;
+ void __iomem *ide_addr;
+ void __iomem *sfr_addr;
+ unsigned int irq;
+ enum s3c_cpu_type cpu_type;
+ unsigned int fifo_status_reg;
+};
+
+static void pata_s3c_set_endian(void __iomem *s3c_ide_regbase, u8 mode)
+{
+ u32 reg = readl(s3c_ide_regbase + S3C_ATA_CFG);
+ reg = mode ? (reg & ~S3C_ATA_CFG_SWAP) : (reg | S3C_ATA_CFG_SWAP);
+ writel(reg, s3c_ide_regbase + S3C_ATA_CFG);
+}
+
+static void pata_s3c_cfg_mode(void __iomem *s3c_ide_sfrbase)
+{
+ /* Select true-ide as the internal operating mode */
+ writel(readl(s3c_ide_sfrbase + S3C_CFATA_MUX) | S3C_CFATA_MUX_TRUEIDE,
+ s3c_ide_sfrbase + S3C_CFATA_MUX);
+}
+
+static unsigned long
+pata_s3c_setup_timing(struct s3c_ide_info *info, const struct ata_timing *ata)
+{
+ int t1 = ata->setup;
+ int t2 = ata->act8b;
+ int t2i = ata->rec8b;
+ ulong piotime;
+
+ piotime = ((t2i & 0xff) << 12) | ((t2 & 0xff) << 4) | (t1 & 0xf);
+
+ return piotime;
+}
+
+static void pata_s3c_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct s3c_ide_info *info = ap->host->private_data;
+ struct ata_timing timing;
+ int cycle_time;
+ ulong ata_cfg = readl(info->ide_addr + S3C_ATA_CFG);
+ ulong piotime;
+
+ /* Enables IORDY if mode requires it */
+ if (ata_pio_need_iordy(adev))
+ ata_cfg |= S3C_ATA_CFG_IORDYEN;
+ else
+ ata_cfg &= ~S3C_ATA_CFG_IORDYEN;
+
+ cycle_time = (int)(1000000000UL / clk_get_rate(info->clk));
+
+ ata_timing_compute(adev, adev->pio_mode, &timing,
+ cycle_time * 1000, 0);
+
+ piotime = pata_s3c_setup_timing(info, &timing);
+
+ writel(ata_cfg, info->ide_addr + S3C_ATA_CFG);
+ writel(piotime, info->ide_addr + S3C_ATA_PIO_TIME);
+}
+
+/*
+ * Waits until the IDE controller is able to perform next read/write
+ * operation to the disk. Needed for 64XX series boards only.
+ */
+static int wait_for_host_ready(struct s3c_ide_info *info)
+{
+ ulong timeout;
+ void __iomem *fifo_reg = info->ide_addr + info->fifo_status_reg;
+
+ /* wait for maximum of 20 msec */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ if ((readl(fifo_reg) >> 28) == 0)
+ return 0;
+ }
+ return -EBUSY;
+}
+
+/*
+ * Writes to one of the task file registers.
+ */
+static void ata_outb(struct ata_host *host, u8 addr, void __iomem *reg)
+{
+ struct s3c_ide_info *info = host->private_data;
+
+ wait_for_host_ready(info);
+ writeb(addr, reg);
+}
+
+/*
+ * Reads from one of the task file registers.
+ */
+static u8 ata_inb(struct ata_host *host, void __iomem *reg)
+{
+ struct s3c_ide_info *info = host->private_data;
+ u8 temp;
+
+ wait_for_host_ready(info);
+ (void) readb(reg);
+ wait_for_host_ready(info);
+ temp = readb(info->ide_addr + S3C_ATA_PIO_RDATA);
+ return temp;
+}
+
+/*
+ * pata_s3c_tf_load - send taskfile registers to host controller
+ */
+static void pata_s3c_tf_load(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ if (tf->ctl != ap->last_ctl) {
+ ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
+ }
+
+ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+ ata_outb(ap->host, tf->hob_feature, ioaddr->feature_addr);
+ ata_outb(ap->host, tf->hob_nsect, ioaddr->nsect_addr);
+ ata_outb(ap->host, tf->hob_lbal, ioaddr->lbal_addr);
+ ata_outb(ap->host, tf->hob_lbam, ioaddr->lbam_addr);
+ ata_outb(ap->host, tf->hob_lbah, ioaddr->lbah_addr);
+ }
+
+ if (is_addr) {
+ ata_outb(ap->host, tf->feature, ioaddr->feature_addr);
+ ata_outb(ap->host, tf->nsect, ioaddr->nsect_addr);
+ ata_outb(ap->host, tf->lbal, ioaddr->lbal_addr);
+ ata_outb(ap->host, tf->lbam, ioaddr->lbam_addr);
+ ata_outb(ap->host, tf->lbah, ioaddr->lbah_addr);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE)
+ ata_outb(ap->host, tf->device, ioaddr->device_addr);
+
+ ata_wait_idle(ap);
+}
+
+/*
+ * pata_s3c_tf_read - input device's ATA taskfile shadow registers
+ */
+static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ tf->feature = ata_inb(ap->host, ioaddr->error_addr);
+ tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr);
+ tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr);
+ tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr);
+ tf->lbah = ata_inb(ap->host, ioaddr->lbah_addr);
+ tf->device = ata_inb(ap->host, ioaddr->device_addr);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ ata_outb(ap->host, tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+ tf->hob_feature = ata_inb(ap->host, ioaddr->error_addr);
+ tf->hob_nsect = ata_inb(ap->host, ioaddr->nsect_addr);
+ tf->hob_lbal = ata_inb(ap->host, ioaddr->lbal_addr);
+ tf->hob_lbam = ata_inb(ap->host, ioaddr->lbam_addr);
+ tf->hob_lbah = ata_inb(ap->host, ioaddr->lbah_addr);
+ ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = tf->ctl;
+ }
+}
+
+/*
+ * pata_s3c_exec_command - issue ATA command to host controller
+ */
+static void pata_s3c_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ ata_outb(ap->host, tf->command, ap->ioaddr.command_addr);
+ ata_sff_pause(ap);
+}
+
+/*
+ * pata_s3c_check_status - Read device status register
+ */
+static u8 pata_s3c_check_status(struct ata_port *ap)
+{
+ return ata_inb(ap->host, ap->ioaddr.status_addr);
+}
+
+/*
+ * pata_s3c_check_altstatus - Read alternate device status register
+ */
+static u8 pata_s3c_check_altstatus(struct ata_port *ap)
+{
+ return ata_inb(ap->host, ap->ioaddr.altstatus_addr);
+}
+
+/*
+ * pata_s3c_data_xfer - Transfer data by PIO
+ */
+unsigned int pata_s3c_data_xfer(struct ata_device *dev, unsigned char *buf,
+ unsigned int buflen, int rw)
+{
+ struct ata_port *ap = dev->link->ap;
+ struct s3c_ide_info *info = ap->host->private_data;
+ void __iomem *data_addr = ap->ioaddr.data_addr;
+ unsigned int words = buflen >> 1, i;
+ u16 *data_ptr = (u16 *)buf;
+
+ /* Requires wait same as in ata_inb/ata_outb */
+ if (rw == READ)
+ for (i = 0; i < words; i++, data_ptr++) {
+ wait_for_host_ready(info);
+ (void) readw(data_addr);
+ wait_for_host_ready(info);
+ *data_ptr = readw(info->ide_addr
+ + S3C_ATA_PIO_RDATA);
+ }
+ else
+ for (i = 0; i < words; i++, data_ptr++) {
+ wait_for_host_ready(info);
+ writew(*data_ptr, data_addr);
+ }
+
+ if (buflen & 0x01)
+ dev_err(ap->dev, "unexpected trailing data\n");
+
+ return words << 1;
+}
+
+/*
+ * pata_s3c_dev_select - Select device on ATA bus
+ */
+static void pata_s3c_dev_select(struct ata_port *ap, unsigned int device)
+{
+ u8 tmp = ATA_DEVICE_OBS;
+
+ if (device != 0)
+ tmp |= ATA_DEV1;
+
+ ata_outb(ap->host, tmp, ap->ioaddr.device_addr);
+ ata_sff_pause(ap);
+}
+
+/*
+ * pata_s3c_devchk - PATA device presence detection
+ */
+static unsigned int pata_s3c_devchk(struct ata_port *ap,
+ unsigned int device)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u8 nsect, lbal;
+
+ pata_s3c_dev_select(ap, device);
+
+ ata_outb(ap->host, 0x55, ioaddr->nsect_addr);
+ ata_outb(ap->host, 0xaa, ioaddr->lbal_addr);
+
+ ata_outb(ap->host, 0xaa, ioaddr->nsect_addr);
+ ata_outb(ap->host, 0x55, ioaddr->lbal_addr);
+
+ ata_outb(ap->host, 0x55, ioaddr->nsect_addr);
+ ata_outb(ap->host, 0xaa, ioaddr->lbal_addr);
+
+ nsect = ata_inb(ap->host, ioaddr->nsect_addr);
+ lbal = ata_inb(ap->host, ioaddr->lbal_addr);
+
+ if ((nsect == 0x55) && (lbal == 0xaa))
+ return 1; /* we found a device */
+
+ return 0; /* nothing found */
+}
+
+/*
+ * pata_s3c_wait_after_reset - wait for devices to become ready after reset
+ */
+static int pata_s3c_wait_after_reset(struct ata_link *link,
+ unsigned long deadline)
+{
+ int rc;
+
+ msleep(ATA_WAIT_AFTER_RESET);
+
+ /* always check readiness of the master device */
+ rc = ata_sff_wait_ready(link, deadline);
+ /* -ENODEV means the odd clown forgot the D7 pulldown resistor
+ * and TF status is 0xff, bail out on it too.
+ */
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/*
+ * pata_s3c_bus_softreset - PATA device software reset
+ */
+static unsigned int pata_s3c_bus_softreset(struct ata_port *ap,
+ unsigned long deadline)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ /* software reset. causes dev0 to be selected */
+ ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr);
+ udelay(20);
+ ata_outb(ap->host, ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+ udelay(20);
+ ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = ap->ctl;
+
+ return pata_s3c_wait_after_reset(&ap->link, deadline);
+}
+
+/*
+ * pata_s3c_softreset - reset host port via ATA SRST
+ */
+static int pata_s3c_softreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ unsigned int devmask = 0;
+ int rc;
+ u8 err;
+
+ /* determine if device 0 is present */
+ if (pata_s3c_devchk(ap, 0))
+ devmask |= (1 << 0);
+
+ /* select device 0 again */
+ pata_s3c_dev_select(ap, 0);
+
+ /* issue bus reset */
+ rc = pata_s3c_bus_softreset(ap, deadline);
+ /* if link is occupied, -ENODEV too is an error */
+ if (rc && rc != -ENODEV) {
+ ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+ return rc;
+ }
+
+ /* determine by signature whether we have ATA or ATAPI devices */
+ classes[0] = ata_sff_dev_classify(&ap->link.device[0],
+ devmask & (1 << 0), &err);
+
+ return 0;
+}
+
+/*
+ * pata_s3c_set_devctl - Write device control register
+ */
+static void pata_s3c_set_devctl(struct ata_port *ap, u8 ctl)
+{
+ ata_outb(ap->host, ctl, ap->ioaddr.ctl_addr);
+}
+
+static struct scsi_host_template pata_s3c_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pata_s3c_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_check_status = pata_s3c_check_status,
+ .sff_check_altstatus = pata_s3c_check_altstatus,
+ .sff_tf_load = pata_s3c_tf_load,
+ .sff_tf_read = pata_s3c_tf_read,
+ .sff_data_xfer = pata_s3c_data_xfer,
+ .sff_exec_command = pata_s3c_exec_command,
+ .sff_dev_select = pata_s3c_dev_select,
+ .sff_set_devctl = pata_s3c_set_devctl,
+ .softreset = pata_s3c_softreset,
+ .set_piomode = pata_s3c_set_piomode,
+};
+
+static struct ata_port_operations pata_s5p_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .set_piomode = pata_s3c_set_piomode,
+};
+
+static void pata_s3c_enable(void *s3c_ide_regbase, bool state)
+{
+ u32 temp = readl(s3c_ide_regbase + S3C_ATA_CTRL);
+ temp = state ? (temp | 1) : (temp & ~1);
+ writel(temp, s3c_ide_regbase + S3C_ATA_CTRL);
+}
+
+static irqreturn_t pata_s3c_irq(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct s3c_ide_info *info = host->private_data;
+ u32 reg;
+
+ reg = readl(info->ide_addr + S3C_ATA_IRQ);
+ writel(reg, info->ide_addr + S3C_ATA_IRQ);
+
+ return ata_sff_interrupt(irq, dev_instance);
+}
+
+static void pata_s3c_hwinit(struct s3c_ide_info *info,
+ struct s3c_ide_platdata *pdata)
+{
+ switch (info->cpu_type) {
+ case TYPE_S3C64XX:
+ /* Configure as big endian */
+ pata_s3c_cfg_mode(info->sfr_addr);
+ pata_s3c_set_endian(info->ide_addr, 1);
+ pata_s3c_enable(info->ide_addr, true);
+ msleep(100);
+
+ /* Remove IRQ Status */
+ writel(0x1f, info->ide_addr + S3C_ATA_IRQ);
+ writel(0x1b, info->ide_addr + S3C_ATA_IRQ_MSK);
+ break;
+
+ case TYPE_S5PC100:
+ pata_s3c_cfg_mode(info->sfr_addr);
+ /* FALLTHROUGH */
+
+ case TYPE_S5PV210:
+ /* Configure as little endian */
+ pata_s3c_set_endian(info->ide_addr, 0);
+ pata_s3c_enable(info->ide_addr, true);
+ msleep(100);
+
+ /* Remove IRQ Status */
+ writel(0x3f, info->ide_addr + S3C_ATA_IRQ);
+ writel(0x3f, info->ide_addr + S3C_ATA_IRQ_MSK);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+static int __init pata_s3c_probe(struct platform_device *pdev)
+{
+ struct s3c_ide_platdata *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct s3c_ide_info *info;
+ struct resource *res;
+ struct ata_port *ap;
+ struct ata_host *host;
+ enum s3c_cpu_type cpu_type;
+ int ret;
+
+ cpu_type = platform_get_device_id(pdev)->driver_data;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(dev, "failed to allocate memory for device data\n");
+ return -ENOMEM;
+ }
+
+ info->irq = platform_get_irq(pdev, 0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "failed to get mem resource\n");
+ return -EINVAL;
+ }
+
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res), DRV_NAME)) {
+ dev_err(dev, "error requesting register region\n");
+ return -EBUSY;
+ }
+
+ info->ide_addr = devm_ioremap(dev, res->start, resource_size(res));
+ if (!info->ide_addr) {
+ dev_err(dev, "failed to map IO base address\n");
+ return -ENOMEM;
+ }
+
+ info->clk = clk_get(&pdev->dev, "cfcon");
+ if (IS_ERR(info->clk)) {
+ dev_err(dev, "failed to get access to cf controller clock\n");
+ ret = PTR_ERR(info->clk);
+ info->clk = NULL;
+ return ret;
+ }
+
+ clk_enable(info->clk);
+
+ /* init ata host */
+ host = ata_host_alloc(dev, 1);
+ if (!host) {
+ dev_err(dev, "failed to allocate ide host\n");
+ ret = -ENOMEM;
+ goto stop_clk;
+ }
+
+ ap = host->ports[0];
+ ap->flags |= ATA_FLAG_MMIO;
+ ap->pio_mask = ATA_PIO4;
+
+ if (cpu_type == TYPE_S3C64XX) {
+ ap->ops = &pata_s3c_port_ops;
+ info->sfr_addr = info->ide_addr + 0x1800;
+ info->ide_addr += 0x1900;
+ info->fifo_status_reg = 0x94;
+ } else if (cpu_type == TYPE_S5PC100) {
+ ap->ops = &pata_s5p_port_ops;
+ info->sfr_addr = info->ide_addr + 0x1800;
+ info->ide_addr += 0x1900;
+ info->fifo_status_reg = 0x84;
+ } else {
+ ap->ops = &pata_s5p_port_ops;
+ info->fifo_status_reg = 0x84;
+ }
+
+ info->cpu_type = cpu_type;
+
+ if (info->irq <= 0) {
+ ap->flags |= ATA_FLAG_PIO_POLLING;
+ info->irq = 0;
+ ata_port_desc(ap, "no IRQ, using PIO polling\n");
+ }
+
+ ap->ioaddr.cmd_addr = info->ide_addr + S3C_ATA_CMD;
+ ap->ioaddr.data_addr = info->ide_addr + S3C_ATA_PIO_DTR;
+ ap->ioaddr.error_addr = info->ide_addr + S3C_ATA_PIO_FED;
+ ap->ioaddr.feature_addr = info->ide_addr + S3C_ATA_PIO_FED;
+ ap->ioaddr.nsect_addr = info->ide_addr + S3C_ATA_PIO_SCR;
+ ap->ioaddr.lbal_addr = info->ide_addr + S3C_ATA_PIO_LLR;
+ ap->ioaddr.lbam_addr = info->ide_addr + S3C_ATA_PIO_LMR;
+ ap->ioaddr.lbah_addr = info->ide_addr + S3C_ATA_PIO_LHR;
+ ap->ioaddr.device_addr = info->ide_addr + S3C_ATA_PIO_DVR;
+ ap->ioaddr.status_addr = info->ide_addr + S3C_ATA_PIO_CSD;
+ ap->ioaddr.command_addr = info->ide_addr + S3C_ATA_PIO_CSD;
+ ap->ioaddr.altstatus_addr = info->ide_addr + S3C_ATA_PIO_DAD;
+ ap->ioaddr.ctl_addr = info->ide_addr + S3C_ATA_PIO_DAD;
+
+ ata_port_desc(ap, "mmio cmd 0x%llx ",
+ (unsigned long long)res->start);
+
+ host->private_data = info;
+
+ if (pdata && pdata->setup_gpio)
+ pdata->setup_gpio();
+
+ /* Set endianness and enable the interface */
+ pata_s3c_hwinit(info, pdata);
+
+ platform_set_drvdata(pdev, host);
+
+ return ata_host_activate(host, info->irq,
+ info->irq ? pata_s3c_irq : NULL,
+ 0, &pata_s3c_sht);
+
+stop_clk:
+ clk_disable(info->clk);
+ clk_put(info->clk);
+ return ret;
+}
+
+static int __exit pata_s3c_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = platform_get_drvdata(pdev);
+ struct s3c_ide_info *info = host->private_data;
+
+ ata_host_detach(host);
+
+ clk_disable(info->clk);
+ clk_put(info->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pata_s3c_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ata_host *host = platform_get_drvdata(pdev);
+
+ return ata_host_suspend(host, PMSG_SUSPEND);
+}
+
+static int pata_s3c_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ata_host *host = platform_get_drvdata(pdev);
+ struct s3c_ide_platdata *pdata = pdev->dev.platform_data;
+ struct s3c_ide_info *info = host->private_data;
+
+ pata_s3c_hwinit(info, pdata);
+ ata_host_resume(host);
+
+ return 0;
+}
+
+static const struct dev_pm_ops pata_s3c_pm_ops = {
+ .suspend = pata_s3c_suspend,
+ .resume = pata_s3c_resume,
+};
+#endif
+
+/* driver device registration */
+static struct platform_device_id pata_s3c_driver_ids[] = {
+ {
+ .name = "s3c64xx-pata",
+ .driver_data = TYPE_S3C64XX,
+ }, {
+ .name = "s5pc100-pata",
+ .driver_data = TYPE_S5PC100,
+ }, {
+ .name = "s5pv210-pata",
+ .driver_data = TYPE_S5PV210,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(platform, pata_s3c_driver_ids);
+
+static struct platform_driver pata_s3c_driver = {
+ .remove = __exit_p(pata_s3c_remove),
+ .id_table = pata_s3c_driver_ids,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &pata_s3c_pm_ops,
+#endif
+ },
+};
+
+static int __init pata_s3c_init(void)
+{
+ return platform_driver_probe(&pata_s3c_driver, pata_s3c_probe);
+}
+
+static void __exit pata_s3c_exit(void)
+{
+ platform_driver_unregister(&pata_s3c_driver);
+}
+
+module_init(pata_s3c_init);
+module_exit(pata_s3c_exit);
+
+MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>");
+MODULE_DESCRIPTION("low-level driver for Samsung PATA controller");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index d9db3f8d60e..fe36966f7e3 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -168,8 +168,7 @@ static const unsigned long JCACTSELtbl[2][7] = {
};
static const struct pci_device_id scc_pci_tbl[] = {
- {PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0},
{ } /* terminate list */
};
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 5e659885de1..ac8d7d97e40 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
tf->lbam,
tf->lbah);
}
+
+ ata_wait_idle(ap);
}
static int via_port_start(struct ata_port *ap)
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
deleted file mode 100644
index 6d8619b6f67..00000000000
--- a/drivers/ata/pata_winbond.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * pata_winbond.c - Winbond VLB ATA controllers
- * (C) 2006 Red Hat
- *
- * Support for the Winbond 83759A when operating in advanced mode.
- * Multichip mode is not currently supported.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <scsi/scsi_host.h>
-#include <linux/libata.h>
-#include <linux/platform_device.h>
-
-#define DRV_NAME "pata_winbond"
-#define DRV_VERSION "0.0.3"
-
-#define NR_HOST 4 /* Two winbond controllers, two channels each */
-
-struct winbond_data {
- unsigned long config;
- struct platform_device *platform_dev;
-};
-
-static struct ata_host *winbond_host[NR_HOST];
-static struct winbond_data winbond_data[NR_HOST];
-static int nr_winbond_host;
-
-#ifdef MODULE
-static int probe_winbond = 1;
-#else
-static int probe_winbond;
-#endif
-
-static DEFINE_SPINLOCK(winbond_lock);
-
-static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
-{
- unsigned long flags;
- spin_lock_irqsave(&winbond_lock, flags);
- outb(reg, port + 0x01);
- outb(val, port + 0x02);
- spin_unlock_irqrestore(&winbond_lock, flags);
-}
-
-static u8 winbond_readcfg(unsigned long port, u8 reg)
-{
- u8 val;
-
- unsigned long flags;
- spin_lock_irqsave(&winbond_lock, flags);
- outb(reg, port + 0x01);
- val = inb(port + 0x02);
- spin_unlock_irqrestore(&winbond_lock, flags);
-
- return val;
-}
-
-static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing t;
- struct winbond_data *winbond = ap->host->private_data;
- int active, recovery;
- u8 reg;
- int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
-
- reg = winbond_readcfg(winbond->config, 0x81);
-
- /* Get the timing data in cycles */
- if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
- ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
- else
- ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
-
- active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
- recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
- timing = (active << 4) | recovery;
- winbond_writecfg(winbond->config, timing, reg);
-
- /* Load the setup timing */
-
- reg = 0x35;
- if (adev->class != ATA_DEV_ATA)
- reg |= 0x08; /* FIFO off */
- if (!ata_pio_need_iordy(adev))
- reg |= 0x02; /* IORDY off */
- reg |= (clamp_val(t.setup, 0, 3) << 6);
- winbond_writecfg(winbond->config, timing + 1, reg);
-}
-
-
-static unsigned int winbond_data_xfer(struct ata_device *dev,
- unsigned char *buf, unsigned int buflen, int rw)
-{
- struct ata_port *ap = dev->link->ap;
- int slop = buflen & 3;
-
- if (ata_id_has_dword_io(dev->id)) {
- if (rw == READ)
- ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
- else
- iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
-
- if (unlikely(slop)) {
- __le32 pad;
- if (rw == READ) {
- pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
- memcpy(buf + buflen - slop, &pad, slop);
- } else {
- memcpy(&pad, buf + buflen - slop, slop);
- iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
- }
- buflen += 4 - slop;
- }
- } else
- buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
-
- return buflen;
-}
-
-static struct scsi_host_template winbond_sht = {
- ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations winbond_port_ops = {
- .inherits = &ata_sff_port_ops,
- .sff_data_xfer = winbond_data_xfer,
- .cable_detect = ata_cable_40wire,
- .set_piomode = winbond_set_piomode,
-};
-
-/**
- * winbond_init_one - attach a winbond interface
- * @type: Type to display
- * @io: I/O port start
- * @irq: interrupt line
- * @fast: True if on a > 33Mhz VLB
- *
- * Register a VLB bus IDE interface. Such interfaces are PIO and we
- * assume do not support IRQ sharing.
- */
-
-static __init int winbond_init_one(unsigned long port)
-{
- struct platform_device *pdev;
- u8 reg;
- int i, rc;
-
- reg = winbond_readcfg(port, 0x81);
- reg |= 0x80; /* jumpered mode off */
- winbond_writecfg(port, 0x81, reg);
- reg = winbond_readcfg(port, 0x83);
- reg |= 0xF0; /* local control */
- winbond_writecfg(port, 0x83, reg);
- reg = winbond_readcfg(port, 0x85);
- reg |= 0xF0; /* programmable timing */
- winbond_writecfg(port, 0x85, reg);
-
- reg = winbond_readcfg(port, 0x81);
-
- if (!(reg & 0x03)) /* Disabled */
- return -ENODEV;
-
- for (i = 0; i < 2 ; i ++) {
- unsigned long cmd_port = 0x1F0 - (0x80 * i);
- unsigned long ctl_port = cmd_port + 0x206;
- struct ata_host *host;
- struct ata_port *ap;
- void __iomem *cmd_addr, *ctl_addr;
-
- if (!(reg & (1 << i)))
- continue;
-
- pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- rc = -ENOMEM;
- host = ata_host_alloc(&pdev->dev, 1);
- if (!host)
- goto err_unregister;
- ap = host->ports[0];
-
- rc = -ENOMEM;
- cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
- ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1);
- if (!cmd_addr || !ctl_addr)
- goto err_unregister;
-
- ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port);
-
- ap->ops = &winbond_port_ops;
- ap->pio_mask = ATA_PIO4;
- ap->flags |= ATA_FLAG_SLAVE_POSS;
- ap->ioaddr.cmd_addr = cmd_addr;
- ap->ioaddr.altstatus_addr = ctl_addr;
- ap->ioaddr.ctl_addr = ctl_addr;
- ata_sff_std_ports(&ap->ioaddr);
-
- /* hook in a private data structure per channel */
- host->private_data = &winbond_data[nr_winbond_host];
- winbond_data[nr_winbond_host].config = port;
- winbond_data[nr_winbond_host].platform_dev = pdev;
-
- /* activate */
- rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0,
- &winbond_sht);
- if (rc)
- goto err_unregister;
-
- winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
- }
-
- return 0;
-
- err_unregister:
- platform_device_unregister(pdev);
- return rc;
-}
-
-/**
- * winbond_init - attach winbond interfaces
- *
- * Attach winbond IDE interfaces by scanning the ports it may occupy.
- */
-
-static __init int winbond_init(void)
-{
- static const unsigned long config[2] = { 0x130, 0x1B0 };
-
- int ct = 0;
- int i;
-
- if (probe_winbond == 0)
- return -ENODEV;
-
- /*
- * Check both base addresses
- */
-
- for (i = 0; i < 2; i++) {
- if (probe_winbond & (1<<i)) {
- int ret = 0;
- unsigned long port = config[i];
-
- if (request_region(port, 2, "pata_winbond")) {
- ret = winbond_init_one(port);
- if (ret <= 0)
- release_region(port, 2);
- else ct+= ret;
- }
- }
- }
- if (ct != 0)
- return 0;
- return -ENODEV;
-}
-
-static __exit void winbond_exit(void)
-{
- int i;
-
- for (i = 0; i < nr_winbond_host; i++) {
- ata_host_detach(winbond_host[i]);
- release_region(winbond_data[i].config, 2);
- platform_device_unregister(winbond_data[i].platform_dev);
- }
-}
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-module_init(winbond_init);
-module_exit(winbond_exit);
-
-module_param(probe_winbond, int, 0);
-
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
new file mode 100644
index 00000000000..6cf57c5c2b5
--- /dev/null
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -0,0 +1,1756 @@
+/*
+ * drivers/ata/sata_dwc_460ex.c
+ *
+ * Synopsys DesignWare Cores (DWC) SATA host driver
+ *
+ * Author: Mark Miesfeld <mmiesfeld@amcc.com>
+ *
+ * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
+ * Copyright 2008 DENX Software Engineering
+ *
+ * Based on versions provided by AMCC and Synopsys which are:
+ * Copyright 2006 Applied Micro Circuits Corporation
+ * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifdef CONFIG_SATA_DWC_DEBUG
+#define DEBUG
+#endif
+
+#ifdef CONFIG_SATA_DWC_VDEBUG
+#define VERBOSE_DEBUG
+#define DEBUG_NCQ
+#endif
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/slab.h>
+#include "libata.h"
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+
+#define DRV_NAME "sata-dwc"
+#define DRV_VERSION "1.0"
+
+/* SATA DMA driver Globals */
+#define DMA_NUM_CHANS 1
+#define DMA_NUM_CHAN_REGS 8
+
+/* SATA DMA Register definitions */
+#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/
+
+struct dmareg {
+ u32 low; /* Low bits 0-31 */
+ u32 high; /* High bits 32-63 */
+};
+
+/* DMA Per Channel registers */
+struct dma_chan_regs {
+ struct dmareg sar; /* Source Address */
+ struct dmareg dar; /* Destination address */
+ struct dmareg llp; /* Linked List Pointer */
+ struct dmareg ctl; /* Control */
+ struct dmareg sstat; /* Source Status not implemented in core */
+ struct dmareg dstat; /* Destination Status not implemented in core*/
+ struct dmareg sstatar; /* Source Status Address not impl in core */
+ struct dmareg dstatar; /* Destination Status Address not implemente */
+ struct dmareg cfg; /* Config */
+ struct dmareg sgr; /* Source Gather */
+ struct dmareg dsr; /* Destination Scatter */
+};
+
+/* Generic Interrupt Registers */
+struct dma_interrupt_regs {
+ struct dmareg tfr; /* Transfer Interrupt */
+ struct dmareg block; /* Block Interrupt */
+ struct dmareg srctran; /* Source Transfer Interrupt */
+ struct dmareg dsttran; /* Dest Transfer Interrupt */
+ struct dmareg error; /* Error */
+};
+
+struct ahb_dma_regs {
+ struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS];
+ struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */
+ struct dma_interrupt_regs interrupt_status; /* Interrupt Status */
+ struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */
+ struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */
+ struct dmareg statusInt; /* Interrupt combined*/
+ struct dmareg rq_srcreg; /* Src Trans Req */
+ struct dmareg rq_dstreg; /* Dst Trans Req */
+ struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req*/
+ struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req*/
+ struct dmareg rq_lst_srcreg; /* Last Src Trans Req*/
+ struct dmareg rq_lst_dstreg; /* Last Dst Trans Req*/
+ struct dmareg dma_cfg; /* DMA Config */
+ struct dmareg dma_chan_en; /* DMA Channel Enable*/
+ struct dmareg dma_id; /* DMA ID */
+ struct dmareg dma_test; /* DMA Test */
+ struct dmareg res1; /* reserved */
+ struct dmareg res2; /* reserved */
+ /*
+ * DMA Comp Params
+ * Param 6 = dma_param[0], Param 5 = dma_param[1],
+ * Param 4 = dma_param[2] ...
+ */
+ struct dmareg dma_params[6];
+};
+
+/* Data structure for linked list item */
+struct lli {
+ u32 sar; /* Source Address */
+ u32 dar; /* Destination address */
+ u32 llp; /* Linked List Pointer */
+ struct dmareg ctl; /* Control */
+ struct dmareg dstat; /* Destination Status */
+};
+
+enum {
+ SATA_DWC_DMAC_LLI_SZ = (sizeof(struct lli)),
+ SATA_DWC_DMAC_LLI_NUM = 256,
+ SATA_DWC_DMAC_LLI_TBL_SZ = (SATA_DWC_DMAC_LLI_SZ * \
+ SATA_DWC_DMAC_LLI_NUM),
+ SATA_DWC_DMAC_TWIDTH_BYTES = 4,
+ SATA_DWC_DMAC_CTRL_TSIZE_MAX = (0x00000800 * \
+ SATA_DWC_DMAC_TWIDTH_BYTES),
+};
+
+/* DMA Register Operation Bits */
+enum {
+ DMA_EN = 0x00000001, /* Enable AHB DMA */
+ DMA_CTL_LLP_SRCEN = 0x10000000, /* Blk chain enable Src */
+ DMA_CTL_LLP_DSTEN = 0x08000000, /* Blk chain enable Dst */
+};
+
+#define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */
+#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */
+ /* Enable channel */
+#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \
+ ((0x000000001 << (ch)) << 8))
+ /* Disable channel */
+#define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8))
+ /* Transfer Type & Flow Controller */
+#define DMA_CTL_TTFC(type) (((type) & 0x7) << 20)
+#define DMA_CTL_SMS(num) (((num) & 0x3) << 25) /* Src Master Select */
+#define DMA_CTL_DMS(num) (((num) & 0x3) << 23)/* Dst Master Select */
+ /* Src Burst Transaction Length */
+#define DMA_CTL_SRC_MSIZE(size) (((size) & 0x7) << 14)
+ /* Dst Burst Transaction Length */
+#define DMA_CTL_DST_MSIZE(size) (((size) & 0x7) << 11)
+ /* Source Transfer Width */
+#define DMA_CTL_SRC_TRWID(size) (((size) & 0x7) << 4)
+ /* Destination Transfer Width */
+#define DMA_CTL_DST_TRWID(size) (((size) & 0x7) << 1)
+
+/* Assign HW handshaking interface (x) to destination / source peripheral */
+#define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11)
+#define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7)
+#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master))
+
+/*
+ * This define is used to set block chaining disabled in the control low
+ * register. It is already in little endian format so it can be &'d dirctly.
+ * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN))
+ */
+enum {
+ DMA_CTL_LLP_DISABLE_LE32 = 0xffffffe7,
+ DMA_CTL_TTFC_P2M_DMAC = 0x00000002, /* Per to mem, DMAC cntr */
+ DMA_CTL_TTFC_M2P_PER = 0x00000003, /* Mem to per, peripheral cntr */
+ DMA_CTL_SINC_INC = 0x00000000, /* Source Address Increment */
+ DMA_CTL_SINC_DEC = 0x00000200,
+ DMA_CTL_SINC_NOCHANGE = 0x00000400,
+ DMA_CTL_DINC_INC = 0x00000000, /* Destination Address Increment */
+ DMA_CTL_DINC_DEC = 0x00000080,
+ DMA_CTL_DINC_NOCHANGE = 0x00000100,
+ DMA_CTL_INT_EN = 0x00000001, /* Interrupt Enable */
+
+/* Channel Configuration Register high bits */
+ DMA_CFG_FCMOD_REQ = 0x00000001, /* Flow Control - request based */
+ DMA_CFG_PROTCTL = (0x00000003 << 2),/* Protection Control */
+
+/* Channel Configuration Register low bits */
+ DMA_CFG_RELD_DST = 0x80000000, /* Reload Dest / Src Addr */
+ DMA_CFG_RELD_SRC = 0x40000000,
+ DMA_CFG_HS_SELSRC = 0x00000800, /* Software handshake Src/ Dest */
+ DMA_CFG_HS_SELDST = 0x00000400,
+ DMA_CFG_FIFOEMPTY = (0x00000001 << 9), /* FIFO Empty bit */
+
+/* Channel Linked List Pointer Register */
+ DMA_LLP_AHBMASTER1 = 0, /* List Master Select */
+ DMA_LLP_AHBMASTER2 = 1,
+
+ SATA_DWC_MAX_PORTS = 1,
+
+ SATA_DWC_SCR_OFFSET = 0x24,
+ SATA_DWC_REG_OFFSET = 0x64,
+};
+
+/* DWC SATA Registers */
+struct sata_dwc_regs {
+ u32 fptagr; /* 1st party DMA tag */
+ u32 fpbor; /* 1st party DMA buffer offset */
+ u32 fptcr; /* 1st party DMA Xfr count */
+ u32 dmacr; /* DMA Control */
+ u32 dbtsr; /* DMA Burst Transac size */
+ u32 intpr; /* Interrupt Pending */
+ u32 intmr; /* Interrupt Mask */
+ u32 errmr; /* Error Mask */
+ u32 llcr; /* Link Layer Control */
+ u32 phycr; /* PHY Control */
+ u32 physr; /* PHY Status */
+ u32 rxbistpd; /* Recvd BIST pattern def register */
+ u32 rxbistpd1; /* Recvd BIST data dword1 */
+ u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
+ u32 txbistpd; /* Trans BIST pattern def register */
+ u32 txbistpd1; /* Trans BIST data dword1 */
+ u32 txbistpd2; /* Trans BIST data dword2 */
+ u32 bistcr; /* BIST Control Register */
+ u32 bistfctr; /* BIST FIS Count Register */
+ u32 bistsr; /* BIST Status Register */
+ u32 bistdecr; /* BIST Dword Error count register */
+ u32 res[15]; /* Reserved locations */
+ u32 testr; /* Test Register */
+ u32 versionr; /* Version Register */
+ u32 idr; /* ID Register */
+ u32 unimpl[192]; /* Unimplemented */
+ u32 dmadr[256]; /* FIFO Locations in DMA Mode */
+};
+
+enum {
+ SCR_SCONTROL_DET_ENABLE = 0x00000001,
+ SCR_SSTATUS_DET_PRESENT = 0x00000001,
+ SCR_SERROR_DIAG_X = 0x04000000,
+/* DWC SATA Register Operations */
+ SATA_DWC_TXFIFO_DEPTH = 0x01FF,
+ SATA_DWC_RXFIFO_DEPTH = 0x01FF,
+ SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004,
+ SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
+ SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
+ SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN,
+ SATA_DWC_INTPR_DMAT = 0x00000001,
+ SATA_DWC_INTPR_NEWFP = 0x00000002,
+ SATA_DWC_INTPR_PMABRT = 0x00000004,
+ SATA_DWC_INTPR_ERR = 0x00000008,
+ SATA_DWC_INTPR_NEWBIST = 0x00000010,
+ SATA_DWC_INTPR_IPF = 0x10000000,
+ SATA_DWC_INTMR_DMATM = 0x00000001,
+ SATA_DWC_INTMR_NEWFPM = 0x00000002,
+ SATA_DWC_INTMR_PMABRTM = 0x00000004,
+ SATA_DWC_INTMR_ERRM = 0x00000008,
+ SATA_DWC_INTMR_NEWBISTM = 0x00000010,
+ SATA_DWC_LLCR_SCRAMEN = 0x00000001,
+ SATA_DWC_LLCR_DESCRAMEN = 0x00000002,
+ SATA_DWC_LLCR_RPDEN = 0x00000004,
+/* This is all error bits, zero's are reserved fields. */
+ SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03
+};
+
+#define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F)
+#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH)
+#define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
+ << 16)
+struct sata_dwc_device {
+ struct device *dev; /* generic device struct */
+ struct ata_probe_ent *pe; /* ptr to probe-ent */
+ struct ata_host *host;
+ u8 *reg_base;
+ struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
+ int irq_dma;
+};
+
+#define SATA_DWC_QCMD_MAX 32
+
+struct sata_dwc_device_port {
+ struct sata_dwc_device *hsdev;
+ int cmd_issued[SATA_DWC_QCMD_MAX];
+ struct lli *llit[SATA_DWC_QCMD_MAX]; /* DMA LLI table */
+ dma_addr_t llit_dma[SATA_DWC_QCMD_MAX];
+ u32 dma_chan[SATA_DWC_QCMD_MAX];
+ int dma_pending[SATA_DWC_QCMD_MAX];
+};
+
+/*
+ * Commonly used DWC SATA driver Macros
+ */
+#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)\
+ (host)->private_data)
+#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\
+ (ap)->host->private_data)
+#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\
+ (ap)->private_data)
+#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\
+ (qc)->ap->host->private_data)
+#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)\
+ (hsdevp)->hsdev)
+
+enum {
+ SATA_DWC_CMD_ISSUED_NOT = 0,
+ SATA_DWC_CMD_ISSUED_PEND = 1,
+ SATA_DWC_CMD_ISSUED_EXEC = 2,
+ SATA_DWC_CMD_ISSUED_NODATA = 3,
+
+ SATA_DWC_DMA_PENDING_NONE = 0,
+ SATA_DWC_DMA_PENDING_TX = 1,
+ SATA_DWC_DMA_PENDING_RX = 2,
+};
+
+struct sata_dwc_host_priv {
+ void __iomem *scr_addr_sstatus;
+ u32 sata_dwc_sactive_issued ;
+ u32 sata_dwc_sactive_queued ;
+ u32 dma_interrupt_count;
+ struct ahb_dma_regs *sata_dma_regs;
+ struct device *dwc_dev;
+};
+struct sata_dwc_host_priv host_pvt;
+/*
+ * Prototypes
+ */
+static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
+ u32 check_status);
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
+static void sata_dwc_port_stop(struct ata_port *ap);
+static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
+static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq);
+static void dma_dwc_exit(struct sata_dwc_device *hsdev);
+static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
+ struct lli *lli, dma_addr_t dma_lli,
+ void __iomem *addr, int dir);
+static void dma_dwc_xfer_start(int dma_ch);
+
+static void sata_dwc_tf_dump(struct ata_taskfile *tf)
+{
+ dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:"
+ "0x%lx device: %x\n", tf->command, ata_get_cmd_descript\
+ (tf->protocol), tf->flags, tf->device);
+ dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x "
+ "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal,
+ tf->lbam, tf->lbah);
+ dev_vdbg(host_pvt.dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x "
+ "hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n",
+ tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
+ tf->hob_lbah);
+}
+
+/*
+ * Function: get_burst_length_encode
+ * arguments: datalength: length in bytes of data
+ * returns value to be programmed in register corrresponding to data length
+ * This value is effectively the log(base 2) of the length
+ */
+static int get_burst_length_encode(int datalength)
+{
+ int items = datalength >> 2; /* div by 4 to get lword count */
+
+ if (items >= 64)
+ return 5;
+
+ if (items >= 32)
+ return 4;
+
+ if (items >= 16)
+ return 3;
+
+ if (items >= 8)
+ return 2;
+
+ if (items >= 4)
+ return 1;
+
+ return 0;
+}
+
+static void clear_chan_interrupts(int c)
+{
+ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.tfr.low),
+ DMA_CHANNEL(c));
+ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.block.low),
+ DMA_CHANNEL(c));
+ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.srctran.low),
+ DMA_CHANNEL(c));
+ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.dsttran.low),
+ DMA_CHANNEL(c));
+ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.error.low),
+ DMA_CHANNEL(c));
+}
+
+/*
+ * Function: dma_request_channel
+ * arguments: None
+ * returns channel number if available else -1
+ * This function assigns the next available DMA channel from the list to the
+ * requester
+ */
+static int dma_request_channel(void)
+{
+ int i;
+
+ for (i = 0; i < DMA_NUM_CHANS; i++) {
+ if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) &\
+ DMA_CHANNEL(i)))
+ return i;
+ }
+ dev_err(host_pvt.dwc_dev, "%s NO channel chan_en: 0x%08x\n", __func__,
+ in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)));
+ return -1;
+}
+
+/*
+ * Function: dma_dwc_interrupt
+ * arguments: irq, dev_id, pt_regs
+ * returns channel number if available else -1
+ * Interrupt Handler for DW AHB SATA DMA
+ */
+static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance)
+{
+ int chan;
+ u32 tfr_reg, err_reg;
+ unsigned long flags;
+ struct sata_dwc_device *hsdev =
+ (struct sata_dwc_device *)hsdev_instance;
+ struct ata_host *host = (struct ata_host *)hsdev->host;
+ struct ata_port *ap;
+ struct sata_dwc_device_port *hsdevp;
+ u8 tag = 0;
+ unsigned int port = 0;
+
+ spin_lock_irqsave(&host->lock, flags);
+ ap = host->ports[port];
+ hsdevp = HSDEVP_FROM_AP(ap);
+ tag = ap->link.active_tag;
+
+ tfr_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.tfr\
+ .low));
+ err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error\
+ .low));
+
+ dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n",
+ tfr_reg, err_reg, hsdevp->dma_pending[tag], port);
+
+ for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
+ /* Check for end-of-transfer interrupt. */
+ if (tfr_reg & DMA_CHANNEL(chan)) {
+ /*
+ * Each DMA command produces 2 interrupts. Only
+ * complete the command after both interrupts have been
+ * seen. (See sata_dwc_isr())
+ */
+ host_pvt.dma_interrupt_count++;
+ sata_dwc_clear_dmacr(hsdevp, tag);
+
+ if (hsdevp->dma_pending[tag] ==
+ SATA_DWC_DMA_PENDING_NONE) {
+ dev_err(ap->dev, "DMA not pending eot=0x%08x "
+ "err=0x%08x tag=0x%02x pending=%d\n",
+ tfr_reg, err_reg, tag,
+ hsdevp->dma_pending[tag]);
+ }
+
+ if ((host_pvt.dma_interrupt_count % 2) == 0)
+ sata_dwc_dma_xfer_complete(ap, 1);
+
+ /* Clear the interrupt */
+ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
+ .tfr.low),
+ DMA_CHANNEL(chan));
+ }
+
+ /* Check for error interrupt. */
+ if (err_reg & DMA_CHANNEL(chan)) {
+ /* TODO Need error handler ! */
+ dev_err(ap->dev, "error interrupt err_reg=0x%08x\n",
+ err_reg);
+
+ /* Clear the interrupt. */
+ out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
+ .error.low),
+ DMA_CHANNEL(chan));
+ }
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Function: dma_request_interrupts
+ * arguments: hsdev
+ * returns status
+ * This function registers ISR for a particular DMA channel interrupt
+ */
+static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq)
+{
+ int retval = 0;
+ int chan;
+
+ for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
+ /* Unmask error interrupt */
+ out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low,
+ DMA_ENABLE_CHAN(chan));
+
+ /* Unmask end-of-transfer interrupt */
+ out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.tfr.low,
+ DMA_ENABLE_CHAN(chan));
+ }
+
+ retval = request_irq(irq, dma_dwc_interrupt, 0, "SATA DMA", hsdev);
+ if (retval) {
+ dev_err(host_pvt.dwc_dev, "%s: could not get IRQ %d\n",
+ __func__, irq);
+ return -ENODEV;
+ }
+
+ /* Mark this interrupt as requested */
+ hsdev->irq_dma = irq;
+ return 0;
+}
+
+/*
+ * Function: map_sg_to_lli
+ * The Synopsis driver has a comment proposing that better performance
+ * is possible by only enabling interrupts on the last item in the linked list.
+ * However, it seems that could be a problem if an error happened on one of the
+ * first items. The transfer would halt, but no error interrupt would occur.
+ * Currently this function sets interrupts enabled for each linked list item:
+ * DMA_CTL_INT_EN.
+ */
+static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
+ struct lli *lli, dma_addr_t dma_lli,
+ void __iomem *dmadr_addr, int dir)
+{
+ int i, idx = 0;
+ int fis_len = 0;
+ dma_addr_t next_llp;
+ int bl;
+
+ dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x"
+ " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
+ (u32)dmadr_addr);
+
+ bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);
+
+ for (i = 0; i < num_elems; i++, sg++) {
+ u32 addr, offset;
+ u32 sg_len, len;
+
+ addr = (u32) sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len"
+ "=%d\n", __func__, i, addr, sg_len);
+
+ while (sg_len) {
+ if (idx >= SATA_DWC_DMAC_LLI_NUM) {
+ /* The LLI table is not large enough. */
+ dev_err(host_pvt.dwc_dev, "LLI table overrun "
+ "(idx=%d)\n", idx);
+ break;
+ }
+ len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
+ SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;
+
+ offset = addr & 0xffff;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ /*
+ * Make sure a LLI block is not created that will span
+ * 8K max FIS boundary. If the block spans such a FIS
+ * boundary, there is a chance that a DMA burst will
+ * cross that boundary -- this results in an error in
+ * the host controller.
+ */
+ if (fis_len + len > 8192) {
+ dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len="
+ "%d(0x%x) len=%d(0x%x)\n", fis_len,
+ fis_len, len, len);
+ len = 8192 - fis_len;
+ fis_len = 0;
+ } else {
+ fis_len += len;
+ }
+ if (fis_len == 8192)
+ fis_len = 0;
+
+ /*
+ * Set DMA addresses and lower half of control register
+ * based on direction.
+ */
+ if (dir == DMA_FROM_DEVICE) {
+ lli[idx].dar = cpu_to_le32(addr);
+ lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
+
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
+ DMA_CTL_SMS(0) |
+ DMA_CTL_DMS(1) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_SINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ } else { /* DMA_TO_DEVICE */
+ lli[idx].sar = cpu_to_le32(addr);
+ lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
+
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
+ DMA_CTL_SMS(1) |
+ DMA_CTL_DMS(0) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_DINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ }
+
+ dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: "
+ "0x%08x val: 0x%08x\n", __func__,
+ len, DMA_CTL_BLK_TS(len / 4));
+
+ /* Program the LLI CTL high register */
+ lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\
+ (len / 4));
+
+ /* Program the next pointer. The next pointer must be
+ * the physical address, not the virtual address.
+ */
+ next_llp = (dma_lli + ((idx + 1) * sizeof(struct \
+ lli)));
+
+ /* The last 2 bits encode the list master select. */
+ next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);
+
+ lli[idx].llp = cpu_to_le32(next_llp);
+ idx++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ /*
+ * The last next ptr has to be zero and the last control low register
+ * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source
+ * and destination enable) set back to 0 (disabled.) This is what tells
+ * the core that this is the last item in the linked list.
+ */
+ if (idx) {
+ lli[idx-1].llp = 0x00000000;
+ lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;
+
+ /* Flush cache to memory */
+ dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
+ DMA_BIDIRECTIONAL);
+ }
+
+ return idx;
+}
+
+/*
+ * Function: dma_dwc_xfer_start
+ * arguments: Channel number
+ * Return : None
+ * Enables the DMA channel
+ */
+static void dma_dwc_xfer_start(int dma_ch)
+{
+ /* Enable the DMA channel */
+ out_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low),
+ in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) |
+ DMA_ENABLE_CHAN(dma_ch));
+}
+
+static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
+ struct lli *lli, dma_addr_t dma_lli,
+ void __iomem *addr, int dir)
+{
+ int dma_ch;
+ int num_lli;
+ /* Acquire DMA channel */
+ dma_ch = dma_request_channel();
+ if (dma_ch == -1) {
+ dev_err(host_pvt.dwc_dev, "%s: dma channel unavailable\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ /* Convert SG list to linked list of items (LLIs) for AHB DMA */
+ num_lli = map_sg_to_lli(sg, num_elems, lli, dma_lli, addr, dir);
+
+ dev_dbg(host_pvt.dwc_dev, "%s sg: 0x%p, count: %d lli: %p dma_lli:"
+ " 0x%0xlx addr: %p lli count: %d\n", __func__, sg, num_elems,
+ lli, (u32)dma_lli, addr, num_lli);
+
+ clear_chan_interrupts(dma_ch);
+
+ /* Program the CFG register. */
+ out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high),
+ DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ);
+ out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low), 0);
+
+ /* Program the address of the linked list */
+ out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low),
+ DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2));
+
+ /* Program the CTL register with src enable / dst enable */
+ out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low),
+ DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
+ return 0;
+}
+
+/*
+ * Function: dma_dwc_exit
+ * arguments: None
+ * returns status
+ * This function exits the SATA DMA driver
+ */
+static void dma_dwc_exit(struct sata_dwc_device *hsdev)
+{
+ dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__);
+ if (host_pvt.sata_dma_regs)
+ iounmap(host_pvt.sata_dma_regs);
+
+ if (hsdev->irq_dma)
+ free_irq(hsdev->irq_dma, hsdev);
+}
+
+/*
+ * Function: dma_dwc_init
+ * arguments: hsdev
+ * returns status
+ * This function initializes the SATA DMA driver
+ */
+static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
+{
+ int err;
+
+ err = dma_request_interrupts(hsdev, irq);
+ if (err) {
+ dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
+ " %d\n", __func__, err);
+ goto error_out;
+ }
+
+ /* Enabe DMA */
+ out_le32(&(host_pvt.sata_dma_regs->dma_cfg.low), DMA_EN);
+
+ dev_notice(host_pvt.dwc_dev, "DMA initialized\n");
+ dev_dbg(host_pvt.dwc_dev, "SATA DMA registers=0x%p\n", host_pvt.\
+ sata_dma_regs);
+
+ return 0;
+
+error_out:
+ dma_dwc_exit(hsdev);
+
+ return err;
+}
+
+static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
+{
+ if (scr > SCR_NOTIFICATION) {
+ dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
+ __func__, scr);
+ return -EINVAL;
+ }
+
+ *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4));
+ dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
+ __func__, link->ap->print_id, scr, *val);
+
+ return 0;
+}
+
+static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
+{
+ dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
+ __func__, link->ap->print_id, scr, val);
+ if (scr > SCR_NOTIFICATION) {
+ dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
+ __func__, scr);
+ return -EINVAL;
+ }
+ out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val);
+
+ return 0;
+}
+
+static u32 core_scr_read(unsigned int scr)
+{
+ return in_le32((void __iomem *)(host_pvt.scr_addr_sstatus) +\
+ (scr * 4));
+}
+
+static void core_scr_write(unsigned int scr, u32 val)
+{
+ out_le32((void __iomem *)(host_pvt.scr_addr_sstatus) + (scr * 4),
+ val);
+}
+
+static void clear_serror(void)
+{
+ u32 val;
+ val = core_scr_read(SCR_ERROR);
+ core_scr_write(SCR_ERROR, val);
+
+}
+
+static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
+{
+ out_le32(&hsdev->sata_dwc_regs->intpr,
+ in_le32(&hsdev->sata_dwc_regs->intpr));
+}
+
+static u32 qcmd_tag_to_mask(u8 tag)
+{
+ return 0x00000001 << (tag & 0x1f);
+}
+
+/* See ahci.c */
+static void sata_dwc_error_intr(struct ata_port *ap,
+ struct sata_dwc_device *hsdev, uint intpr)
+{
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ unsigned int err_mask = 0, action = 0;
+ struct ata_queued_cmd *qc;
+ u32 serror;
+ u8 status, tag;
+ u32 err_reg;
+
+ ata_ehi_clear_desc(ehi);
+
+ serror = core_scr_read(SCR_ERROR);
+ status = ap->ops->sff_check_status(ap);
+
+ err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error.\
+ low));
+ tag = ap->link.active_tag;
+
+ dev_err(ap->dev, "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x "
+ "dma_intp=%d pending=%d issued=%d dma_err_status=0x%08x\n",
+ __func__, serror, intpr, status, host_pvt.dma_interrupt_count,
+ hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag], err_reg);
+
+ /* Clear error register and interrupt bit */
+ clear_serror();
+ clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
+
+ /* This is the only error happening now. TODO check for exact error */
+
+ err_mask |= AC_ERR_HOST_BUS;
+ action |= ATA_EH_RESET;
+
+ /* Pass this on to EH */
+ ehi->serror |= serror;
+ ehi->action |= action;
+
+ qc = ata_qc_from_tag(ap, tag);
+ if (qc)
+ qc->err_mask |= err_mask;
+ else
+ ehi->err_mask |= err_mask;
+
+ ata_port_abort(ap);
+}
+
+/*
+ * Function : sata_dwc_isr
+ * arguments : irq, void *dev_instance, struct pt_regs *regs
+ * Return value : irqreturn_t - status of IRQ
+ * This Interrupt handler called via port ops registered function.
+ * .irq_handler = sata_dwc_isr
+ */
+static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
+{
+ struct ata_host *host = (struct ata_host *)dev_instance;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
+ struct ata_port *ap;
+ struct ata_queued_cmd *qc;
+ unsigned long flags;
+ u8 status, tag;
+ int handled, num_processed, port = 0;
+ uint intpr, sactive, sactive2, tag_mask;
+ struct sata_dwc_device_port *hsdevp;
+ host_pvt.sata_dwc_sactive_issued = 0;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Read the interrupt register */
+ intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
+
+ ap = host->ports[port];
+ hsdevp = HSDEVP_FROM_AP(ap);
+
+ dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
+ ap->link.active_tag);
+
+ /* Check for error interrupt */
+ if (intpr & SATA_DWC_INTPR_ERR) {
+ sata_dwc_error_intr(ap, hsdev, intpr);
+ handled = 1;
+ goto DONE;
+ }
+
+ /* Check for DMA SETUP FIS (FP DMA) interrupt */
+ if (intpr & SATA_DWC_INTPR_NEWFP) {
+ clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
+
+ tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
+ dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
+ if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
+ dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
+
+ host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag);
+
+ qc = ata_qc_from_tag(ap, tag);
+ /*
+ * Start FP DMA for NCQ command. At this point the tag is the
+ * active tag. It is the tag that matches the command about to
+ * be completed.
+ */
+ qc->ap->link.active_tag = tag;
+ sata_dwc_bmdma_start_by_tag(qc, tag);
+
+ handled = 1;
+ goto DONE;
+ }
+ sactive = core_scr_read(SCR_ACTIVE);
+ tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
+
+ /* If no sactive issued and tag_mask is zero then this is not NCQ */
+ if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) {
+ if (ap->link.active_tag == ATA_TAG_POISON)
+ tag = 0;
+ else
+ tag = ap->link.active_tag;
+ qc = ata_qc_from_tag(ap, tag);
+
+ /* DEV interrupt w/ no active qc? */
+ if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
+ dev_err(ap->dev, "%s interrupt with no active qc "
+ "qc=%p\n", __func__, qc);
+ ap->ops->sff_check_status(ap);
+ handled = 1;
+ goto DONE;
+ }
+ status = ap->ops->sff_check_status(ap);
+
+ qc->ap->link.active_tag = tag;
+ hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
+
+ if (status & ATA_ERR) {
+ dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
+ sata_dwc_qc_complete(ap, qc, 1);
+ handled = 1;
+ goto DONE;
+ }
+
+ dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
+ __func__, ata_get_cmd_descript(qc->tf.protocol));
+DRVSTILLBUSY:
+ if (ata_is_dma(qc->tf.protocol)) {
+ /*
+ * Each DMA transaction produces 2 interrupts. The DMAC
+ * transfer complete interrupt and the SATA controller
+ * operation done interrupt. The command should be
+ * completed only after both interrupts are seen.
+ */
+ host_pvt.dma_interrupt_count++;
+ if (hsdevp->dma_pending[tag] == \
+ SATA_DWC_DMA_PENDING_NONE) {
+ dev_err(ap->dev, "%s: DMA not pending "
+ "intpr=0x%08x status=0x%08x pending"
+ "=%d\n", __func__, intpr, status,
+ hsdevp->dma_pending[tag]);
+ }
+
+ if ((host_pvt.dma_interrupt_count % 2) == 0)
+ sata_dwc_dma_xfer_complete(ap, 1);
+ } else if (ata_is_pio(qc->tf.protocol)) {
+ ata_sff_hsm_move(ap, qc, status, 0);
+ handled = 1;
+ goto DONE;
+ } else {
+ if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+ goto DRVSTILLBUSY;
+ }
+
+ handled = 1;
+ goto DONE;
+ }
+
+ /*
+ * This is a NCQ command. At this point we need to figure out for which
+ * tags we have gotten a completion interrupt. One interrupt may serve
+ * as completion for more than one operation when commands are queued
+ * (NCQ). We need to process each completed command.
+ */
+
+ /* process completed commands */
+ sactive = core_scr_read(SCR_ACTIVE);
+ tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
+
+ if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \
+ tag_mask > 1) {
+ dev_dbg(ap->dev, "%s NCQ:sactive=0x%08x sactive_issued=0x%08x"
+ "tag_mask=0x%08x\n", __func__, sactive,
+ host_pvt.sata_dwc_sactive_issued, tag_mask);
+ }
+
+ if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \
+ (host_pvt.sata_dwc_sactive_issued)) {
+ dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x "
+ "(host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask"
+ "=0x%08x\n", sactive, host_pvt.sata_dwc_sactive_issued,
+ tag_mask);
+ }
+
+ /* read just to clear ... not bad if currently still busy */
+ status = ap->ops->sff_check_status(ap);
+ dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
+
+ tag = 0;
+ num_processed = 0;
+ while (tag_mask) {
+ num_processed++;
+ while (!(tag_mask & 0x00000001)) {
+ tag++;
+ tag_mask <<= 1;
+ }
+
+ tag_mask &= (~0x00000001);
+ qc = ata_qc_from_tag(ap, tag);
+
+ /* To be picked up by completion functions */
+ qc->ap->link.active_tag = tag;
+ hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
+
+ /* Let libata/scsi layers handle error */
+ if (status & ATA_ERR) {
+ dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
+ status);
+ sata_dwc_qc_complete(ap, qc, 1);
+ handled = 1;
+ goto DONE;
+ }
+
+ /* Process completed command */
+ dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
+ ata_get_cmd_descript(qc->tf.protocol));
+ if (ata_is_dma(qc->tf.protocol)) {
+ host_pvt.dma_interrupt_count++;
+ if (hsdevp->dma_pending[tag] == \
+ SATA_DWC_DMA_PENDING_NONE)
+ dev_warn(ap->dev, "%s: DMA not pending?\n",
+ __func__);
+ if ((host_pvt.dma_interrupt_count % 2) == 0)
+ sata_dwc_dma_xfer_complete(ap, 1);
+ } else {
+ if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+ goto STILLBUSY;
+ }
+ continue;
+
+STILLBUSY:
+ ap->stats.idle_irq++;
+ dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
+ ap->print_id);
+ } /* while tag_mask */
+
+ /*
+ * Check to see if any commands completed while we were processing our
+ * initial set of completed commands (read status clears interrupts,
+ * so we might miss a completed command interrupt if one came in while
+ * we were processing --we read status as part of processing a completed
+ * command).
+ */
+ sactive2 = core_scr_read(SCR_ACTIVE);
+ if (sactive2 != sactive) {
+ dev_dbg(ap->dev, "More completed - sactive=0x%x sactive2"
+ "=0x%x\n", sactive, sactive2);
+ }
+ handled = 1;
+
+DONE:
+ spin_unlock_irqrestore(&host->lock, flags);
+ return IRQ_RETVAL(handled);
+}
+
+static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
+
+ if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_RX_CLEAR(
+ in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+ } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_TX_CLEAR(
+ in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+ } else {
+ /*
+ * This should not happen, it indicates the driver is out of
+ * sync. If it does happen, clear dmacr anyway.
+ */
+ dev_err(host_pvt.dwc_dev, "%s DMA protocol RX and"
+ "TX DMA not pending tag=0x%02x pending=%d"
+ " dmacr: 0x%08x\n", __func__, tag,
+ hsdevp->dma_pending[tag],
+ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
+ }
+}
+
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
+{
+ struct ata_queued_cmd *qc;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ u8 tag = 0;
+
+ tag = ap->link.active_tag;
+ qc = ata_qc_from_tag(ap, tag);
+ if (!qc) {
+ dev_err(ap->dev, "failed to get qc");
+ return;
+ }
+
+#ifdef DEBUG_NCQ
+ if (tag > 0) {
+ dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s "
+ "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command,
+ ata_get_cmd_descript(qc->dma_dir),
+ ata_get_cmd_descript(qc->tf.protocol),
+ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ }
+#endif
+
+ if (ata_is_dma(qc->tf.protocol)) {
+ if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
+ dev_err(ap->dev, "%s DMA protocol RX and TX DMA not "
+ "pending dmacr: 0x%08x\n", __func__,
+ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ }
+
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
+ sata_dwc_qc_complete(ap, qc, check_status);
+ ap->link.active_tag = ATA_TAG_POISON;
+ } else {
+ sata_dwc_qc_complete(ap, qc, check_status);
+ }
+}
+
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
+ u32 check_status)
+{
+ u8 status = 0;
+ u32 mask = 0x0;
+ u8 tag = qc->tag;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ host_pvt.sata_dwc_sactive_queued = 0;
+ dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
+
+ if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
+ dev_err(ap->dev, "TX DMA PENDING\n");
+ else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
+ dev_err(ap->dev, "RX DMA PENDING\n");
+ dev_dbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u:"
+ " protocol=%d\n", qc->tf.command, status, ap->print_id,
+ qc->tf.protocol);
+
+ /* clear active bit */
+ mask = (~(qcmd_tag_to_mask(tag)));
+ host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \
+ & mask;
+ host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \
+ & mask;
+ ata_qc_complete(qc);
+ return 0;
+}
+
+static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
+{
+ /* Enable selective interrupts by setting the interrupt maskregister*/
+ out_le32(&hsdev->sata_dwc_regs->intmr,
+ SATA_DWC_INTMR_ERRM |
+ SATA_DWC_INTMR_NEWFPM |
+ SATA_DWC_INTMR_PMABRTM |
+ SATA_DWC_INTMR_DMATM);
+ /*
+ * Unmask the error bits that should trigger an error interrupt by
+ * setting the error mask register.
+ */
+ out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
+
+ dev_dbg(host_pvt.dwc_dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
+ __func__, in_le32(&hsdev->sata_dwc_regs->intmr),
+ in_le32(&hsdev->sata_dwc_regs->errmr));
+}
+
+static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
+{
+ port->cmd_addr = (void *)base + 0x00;
+ port->data_addr = (void *)base + 0x00;
+
+ port->error_addr = (void *)base + 0x04;
+ port->feature_addr = (void *)base + 0x04;
+
+ port->nsect_addr = (void *)base + 0x08;
+
+ port->lbal_addr = (void *)base + 0x0c;
+ port->lbam_addr = (void *)base + 0x10;
+ port->lbah_addr = (void *)base + 0x14;
+
+ port->device_addr = (void *)base + 0x18;
+ port->command_addr = (void *)base + 0x1c;
+ port->status_addr = (void *)base + 0x1c;
+
+ port->altstatus_addr = (void *)base + 0x20;
+ port->ctl_addr = (void *)base + 0x20;
+}
+
+/*
+ * Function : sata_dwc_port_start
+ * arguments : struct ata_ioports *port
+ * Return value : returns 0 if success, error code otherwise
+ * This function allocates the scatter gather LLI table for AHB DMA
+ */
+static int sata_dwc_port_start(struct ata_port *ap)
+{
+ int err = 0;
+ struct sata_dwc_device *hsdev;
+ struct sata_dwc_device_port *hsdevp = NULL;
+ struct device *pdev;
+ int i;
+
+ hsdev = HSDEV_FROM_AP(ap);
+
+ dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
+
+ hsdev->host = ap->host;
+ pdev = ap->host->dev;
+ if (!pdev) {
+ dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
+ err = -ENODEV;
+ goto CLEANUP;
+ }
+
+ /* Allocate Port Struct */
+ hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
+ if (!hsdevp) {
+ dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__);
+ err = -ENOMEM;
+ goto CLEANUP;
+ }
+ hsdevp->hsdev = hsdev;
+
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
+ hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
+
+ ap->bmdma_prd = 0; /* set these so libata doesn't use them */
+ ap->bmdma_prd_dma = 0;
+
+ /*
+ * DMA - Assign scatter gather LLI table. We can't use the libata
+ * version since it's PRD is IDE PCI specific.
+ */
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
+ hsdevp->llit[i] = dma_alloc_coherent(pdev,
+ SATA_DWC_DMAC_LLI_TBL_SZ,
+ &(hsdevp->llit_dma[i]),
+ GFP_ATOMIC);
+ if (!hsdevp->llit[i]) {
+ dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
+ __func__);
+ err = -ENOMEM;
+ goto CLEANUP;
+ }
+ }
+
+ if (ap->port_no == 0) {
+ dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
+ __func__);
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
+
+ dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
+ __func__);
+ out_le32(&hsdev->sata_dwc_regs->dbtsr,
+ (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+ SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
+ }
+
+ /* Clear any error bits before libata starts issuing commands */
+ clear_serror();
+ ap->private_data = hsdevp;
+
+CLEANUP:
+ if (err) {
+ sata_dwc_port_stop(ap);
+ dev_dbg(ap->dev, "%s: fail\n", __func__);
+ } else {
+ dev_dbg(ap->dev, "%s: done\n", __func__);
+ }
+
+ return err;
+}
+
+static void sata_dwc_port_stop(struct ata_port *ap)
+{
+ int i;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+ dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
+
+ if (hsdevp && hsdev) {
+ /* deallocate LLI table */
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
+ dma_free_coherent(ap->host->dev,
+ SATA_DWC_DMAC_LLI_TBL_SZ,
+ hsdevp->llit[i], hsdevp->llit_dma[i]);
+ }
+
+ kfree(hsdevp);
+ }
+ ap->private_data = NULL;
+}
+
+/*
+ * Function : sata_dwc_exec_command_by_tag
+ * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
+ * Return value : None
+ * This function keeps track of individual command tag ids and calls
+ * ata_exec_command in libata
+ */
+static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
+ struct ata_taskfile *tf,
+ u8 tag, u32 cmd_issued)
+{
+ unsigned long flags;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+ dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
+ ata_get_cmd_descript(tf), tag);
+
+ spin_lock_irqsave(&ap->host->lock, flags);
+ hsdevp->cmd_issued[tag] = cmd_issued;
+ spin_unlock_irqrestore(&ap->host->lock, flags);
+ /*
+ * Clear SError before executing a new command.
+ * sata_dwc_scr_write and read can not be used here. Clearing the PM
+ * managed SError register for the disk needs to be done before the
+ * task file is loaded.
+ */
+ clear_serror();
+ ata_sff_exec_command(ap, tf);
+}
+
+static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+ sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
+ SATA_DWC_CMD_ISSUED_PEND);
+}
+
+static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ u8 tag = qc->tag;
+
+ if (ata_is_ncq(qc->tf.protocol)) {
+ dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
+ __func__, qc->ap->link.sactive, tag);
+ } else {
+ tag = 0;
+ }
+ sata_dwc_bmdma_setup_by_tag(qc, tag);
+}
+
+static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+ int start_dma;
+ u32 reg, dma_chan;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
+ struct ata_port *ap = qc->ap;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ int dir = qc->dma_dir;
+ dma_chan = hsdevp->dma_chan[tag];
+
+ if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
+ start_dma = 1;
+ if (dir == DMA_TO_DEVICE)
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
+ else
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
+ } else {
+ dev_err(ap->dev, "%s: Command not pending cmd_issued=%d "
+ "(tag=%d) DMA NOT started\n", __func__,
+ hsdevp->cmd_issued[tag], tag);
+ start_dma = 0;
+ }
+
+ dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s "
+ "start_dma? %x\n", __func__, qc, tag, qc->tf.command,
+ ata_get_cmd_descript(qc->dma_dir), start_dma);
+ sata_dwc_tf_dump(&(qc->tf));
+
+ if (start_dma) {
+ reg = core_scr_read(SCR_ERROR);
+ if (reg & SATA_DWC_SERROR_ERR_BITS) {
+ dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
+ __func__, reg);
+ }
+
+ if (dir == DMA_TO_DEVICE)
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXCHEN);
+ else
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_RXCHEN);
+
+ /* Enable AHB DMA transfer on the specified channel */
+ dma_dwc_xfer_start(dma_chan);
+ }
+}
+
+static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
+{
+ u8 tag = qc->tag;
+
+ if (ata_is_ncq(qc->tf.protocol)) {
+ dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
+ __func__, qc->ap->link.sactive, tag);
+ } else {
+ tag = 0;
+ }
+ dev_dbg(qc->ap->dev, "%s\n", __func__);
+ sata_dwc_bmdma_start_by_tag(qc, tag);
+}
+
+/*
+ * Function : sata_dwc_qc_prep_by_tag
+ * arguments : ata_queued_cmd *qc, u8 tag
+ * Return value : None
+ * qc_prep for a particular queued command based on tag
+ */
+static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+ struct scatterlist *sg = qc->sg;
+ struct ata_port *ap = qc->ap;
+ int dma_chan;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ int err;
+
+ dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
+ __func__, ap->port_no, ata_get_cmd_descript(qc->dma_dir),
+ qc->n_elem);
+
+ dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag],
+ hsdevp->llit_dma[tag],
+ (void *__iomem)(&hsdev->sata_dwc_regs->\
+ dmadr), qc->dma_dir);
+ if (dma_chan < 0) {
+ dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
+ __func__, err);
+ return;
+ }
+ hsdevp->dma_chan[tag] = dma_chan;
+}
+
+static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
+{
+ u32 sactive;
+ u8 tag = qc->tag;
+ struct ata_port *ap = qc->ap;
+
+#ifdef DEBUG_NCQ
+ if (qc->tag > 0 || ap->link.sactive > 1)
+ dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d "
+ "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
+ __func__, ap->print_id, qc->tf.command,
+ ata_get_cmd_descript(&qc->tf),
+ qc->tag, ata_get_cmd_descript(qc->tf.protocol),
+ ap->link.active_tag, ap->link.sactive);
+#endif
+
+ if (!ata_is_ncq(qc->tf.protocol))
+ tag = 0;
+ sata_dwc_qc_prep_by_tag(qc, tag);
+
+ if (ata_is_ncq(qc->tf.protocol)) {
+ sactive = core_scr_read(SCR_ACTIVE);
+ sactive |= (0x00000001 << tag);
+ core_scr_write(SCR_ACTIVE, sactive);
+
+ dev_dbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x "
+ "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive,
+ sactive);
+
+ ap->ops->sff_tf_load(ap, &qc->tf);
+ sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
+ SATA_DWC_CMD_ISSUED_PEND);
+ } else {
+ ata_sff_qc_issue(qc);
+ }
+ return 0;
+}
+
+/*
+ * Function : sata_dwc_qc_prep
+ * arguments : ata_queued_cmd *qc
+ * Return value : None
+ * qc_prep for a particular queued command
+ */
+
+static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
+{
+ if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
+ return;
+
+#ifdef DEBUG_NCQ
+ if (qc->tag > 0)
+ dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
+ __func__, tag, qc->ap->link.active_tag);
+
+ return ;
+#endif
+}
+
+static void sata_dwc_error_handler(struct ata_port *ap)
+{
+ ap->link.flags |= ATA_LFLAG_NO_HRST;
+ ata_sff_error_handler(ap);
+}
+
+/*
+ * scsi mid-layer and libata interface structures
+ */
+static struct scsi_host_template sata_dwc_sht = {
+ ATA_NCQ_SHT(DRV_NAME),
+ /*
+ * test-only: Currently this driver doesn't handle NCQ
+ * correctly. We enable NCQ but set the queue depth to a
+ * max of 1. This will get fixed in in a future release.
+ */
+ .sg_tablesize = LIBATA_MAX_PRD,
+ .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */
+ .dma_boundary = ATA_DMA_BOUNDARY,
+};
+
+static struct ata_port_operations sata_dwc_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .error_handler = sata_dwc_error_handler,
+
+ .qc_prep = sata_dwc_qc_prep,
+ .qc_issue = sata_dwc_qc_issue,
+
+ .scr_read = sata_dwc_scr_read,
+ .scr_write = sata_dwc_scr_write,
+
+ .port_start = sata_dwc_port_start,
+ .port_stop = sata_dwc_port_stop,
+
+ .bmdma_setup = sata_dwc_bmdma_setup,
+ .bmdma_start = sata_dwc_bmdma_start,
+};
+
+static const struct ata_port_info sata_dwc_port_info[] = {
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_NCQ,
+ .pio_mask = 0x1f, /* pio 0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &sata_dwc_ops,
+ },
+};
+
+static int sata_dwc_probe(struct platform_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct sata_dwc_device *hsdev;
+ u32 idr, versionr;
+ char *ver = (char *)&versionr;
+ u8 *base = NULL;
+ int err = 0;
+ int irq, rc;
+ struct ata_host *host;
+ struct ata_port_info pi = sata_dwc_port_info[0];
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+
+ /* Allocate DWC SATA device */
+ hsdev = kmalloc(sizeof(*hsdev), GFP_KERNEL);
+ if (hsdev == NULL) {
+ dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
+ err = -ENOMEM;
+ goto error_out;
+ }
+ memset(hsdev, 0, sizeof(*hsdev));
+
+ /* Ioremap SATA registers */
+ base = of_iomap(ofdev->dev.of_node, 0);
+ if (!base) {
+ dev_err(&ofdev->dev, "ioremap failed for SATA register"
+ " address\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+ hsdev->reg_base = base;
+ dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
+
+ /* Synopsys DWC SATA specific Registers */
+ hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
+
+ /* Allocate and fill host */
+ host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
+ if (!host) {
+ dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
+ err = -ENOMEM;
+ goto error_out;
+ }
+
+ host->private_data = hsdev;
+
+ /* Setup port */
+ host->ports[0]->ioaddr.cmd_addr = base;
+ host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
+ host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
+ sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
+
+ /* Read the ID and Version Registers */
+ idr = in_le32(&hsdev->sata_dwc_regs->idr);
+ versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
+ dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
+ idr, ver[0], ver[1], ver[2]);
+
+ /* Get SATA DMA interrupt number */
+ irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
+ if (irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "no SATA DMA irq\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+
+ /* Get physical SATA DMA register base address */
+ host_pvt.sata_dma_regs = of_iomap(ofdev->dev.of_node, 1);
+ if (!(host_pvt.sata_dma_regs)) {
+ dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
+ " address\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+
+ /* Save dev for later use in dev_xxx() routines */
+ host_pvt.dwc_dev = &ofdev->dev;
+
+ /* Initialize AHB DMAC */
+ dma_dwc_init(hsdev, irq);
+
+ /* Enable SATA Interrupts */
+ sata_dwc_enable_interrupts(hsdev);
+
+ /* Get SATA interrupt number */
+ irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ if (irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "no SATA DMA irq\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+
+ /*
+ * Now, register with libATA core, this will also initiate the
+ * device discovery process, invoking our port_start() handler &
+ * error_handler() to execute a dummy Softreset EH session
+ */
+ rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+
+ if (rc != 0)
+ dev_err(&ofdev->dev, "failed to activate host");
+
+ dev_set_drvdata(&ofdev->dev, host);
+ return 0;
+
+error_out:
+ /* Free SATA DMA resources */
+ dma_dwc_exit(hsdev);
+
+ if (base)
+ iounmap(base);
+ return err;
+}
+
+static int sata_dwc_remove(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct sata_dwc_device *hsdev = host->private_data;
+
+ ata_host_detach(host);
+ dev_set_drvdata(dev, NULL);
+
+ /* Free SATA DMA resources */
+ dma_dwc_exit(hsdev);
+
+ iounmap(hsdev->reg_base);
+ kfree(hsdev);
+ kfree(host);
+ dev_dbg(&ofdev->dev, "done\n");
+ return 0;
+}
+
+static const struct of_device_id sata_dwc_match[] = {
+ { .compatible = "amcc,sata-460ex", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sata_dwc_match);
+
+static struct of_platform_driver sata_dwc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sata_dwc_match,
+ },
+ .probe = sata_dwc_probe,
+ .remove = sata_dwc_remove,
+};
+
+static int __init sata_dwc_init(void)
+{
+ return of_register_platform_driver(&sata_dwc_driver);
+}
+
+static void __exit sata_dwc_exit(void)
+{
+ of_unregister_platform_driver(&sata_dwc_driver);
+}
+
+module_init(sata_dwc_init);
+module_exit(sata_dwc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
+MODULE_DESCRIPTION("DesignWare Cores SATA controller low lever driver");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 61c89b54ea2..7325f77480d 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1096,7 +1096,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
{
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
- u32 hstatus, qc_active = 0;
+ u32 hstatus, done_mask = 0;
struct ata_queued_cmd *qc;
u32 SError;
@@ -1116,28 +1116,28 @@ static void sata_fsl_host_intr(struct ata_port *ap)
}
/* Read command completed register */
- qc_active = ioread32(hcr_base + CC);
+ done_mask = ioread32(hcr_base + CC);
VPRINTK("Status of all queues :\n");
- VPRINTK("qc_active/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%x\n",
- qc_active,
+ VPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%x\n",
+ done_mask,
ioread32(hcr_base + CA),
ioread32(hcr_base + CE),
ioread32(hcr_base + CQ),
ap->qc_active);
- if (qc_active & ap->qc_active) {
+ if (done_mask & ap->qc_active) {
int i;
/* clear CC bit, this will also complete the interrupt */
- iowrite32(qc_active, hcr_base + CC);
+ iowrite32(done_mask, hcr_base + CC);
DPRINTK("Status of all queues :\n");
- DPRINTK("qc_active/CC = 0x%x, CA = 0x%x, CE=0x%x\n",
- qc_active, ioread32(hcr_base + CA),
+ DPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x\n",
+ done_mask, ioread32(hcr_base + CA),
ioread32(hcr_base + CE));
for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) {
- if (qc_active & (1 << i)) {
+ if (done_mask & (1 << i)) {
qc = ata_qc_from_tag(ap, i);
if (qc) {
ata_qc_complete(qc);
@@ -1164,7 +1164,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
/* Spurious Interrupt!! */
DPRINTK("spurious interrupt!!, CC = 0x%x\n",
ioread32(hcr_base + CC));
- iowrite32(qc_active, hcr_base + CC);
+ iowrite32(done_mask, hcr_base + CC);
return;
}
}
@@ -1296,7 +1296,7 @@ static const struct ata_port_info sata_fsl_port_info[] = {
},
};
-static int sata_fsl_probe(struct of_device *ofdev,
+static int sata_fsl_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
int retval = -ENXIO;
@@ -1370,7 +1370,7 @@ error_exit_with_cleanup:
return retval;
}
-static int sata_fsl_remove(struct of_device *ofdev)
+static int sata_fsl_remove(struct platform_device *ofdev)
{
struct ata_host *host = dev_get_drvdata(&ofdev->dev);
struct sata_fsl_host_priv *host_priv = host->private_data;
@@ -1387,13 +1387,13 @@ static int sata_fsl_remove(struct of_device *ofdev)
}
#ifdef CONFIG_PM
-static int sata_fsl_suspend(struct of_device *op, pm_message_t state)
+static int sata_fsl_suspend(struct platform_device *op, pm_message_t state)
{
struct ata_host *host = dev_get_drvdata(&op->dev);
return ata_host_suspend(host, state);
}
-static int sata_fsl_resume(struct of_device *op)
+static int sata_fsl_resume(struct platform_device *op)
{
struct ata_host *host = dev_get_drvdata(&op->dev);
struct sata_fsl_host_priv *host_priv = host->private_data;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index a476cd99b95..a9fd9709c26 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1898,19 +1898,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
* LOCKING:
* Inherited from caller.
*/
-static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+static void mv_bmdma_stop_ap(struct ata_port *ap)
{
- struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
u32 cmd;
/* clear start/stop bit */
cmd = readl(port_mmio + BMDMA_CMD);
- cmd &= ~ATA_DMA_START;
- writelfl(cmd, port_mmio + BMDMA_CMD);
+ if (cmd & ATA_DMA_START) {
+ cmd &= ~ATA_DMA_START;
+ writelfl(cmd, port_mmio + BMDMA_CMD);
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_sff_dma_pause(ap);
+ }
+}
- /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
- ata_sff_dma_pause(ap);
+static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ mv_bmdma_stop_ap(qc->ap);
}
/**
@@ -1934,8 +1940,21 @@ static u8 mv_bmdma_status(struct ata_port *ap)
reg = readl(port_mmio + BMDMA_STATUS);
if (reg & ATA_DMA_ACTIVE)
status = ATA_DMA_ACTIVE;
- else
+ else if (reg & ATA_DMA_ERR)
status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
+ else {
+ /*
+ * Just because DMA_ACTIVE is 0 (DMA completed),
+ * this does _not_ mean the device is "done".
+ * So we should not yet be signalling ATA_DMA_INTR
+ * in some cases. Eg. DSM/TRIM, and perhaps others.
+ */
+ mv_bmdma_stop_ap(ap);
+ if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
+ status = 0;
+ else
+ status = ATA_DMA_INTR;
+ }
return status;
}
@@ -1995,6 +2014,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
switch (tf->protocol) {
case ATA_PROT_DMA:
+ if (tf->command == ATA_CMD_DSM)
+ return;
+ /* fall-thru */
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
@@ -2094,6 +2116,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ))
return;
+ if (tf->command == ATA_CMD_DSM)
+ return; /* use bmdma for this */
/* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2260,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
}
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
return 0;
}
@@ -2289,6 +2313,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
+ if (qc->tf.command == ATA_CMD_DSM) {
+ if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
+ return AC_ERR_OTHER;
+ break; /* use bmdma for this */
+ }
+ /* fall thru */
case ATA_PROT_NCQ:
mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
@@ -2716,34 +2746,35 @@ static void mv_err_intr(struct ata_port *ap)
static void mv_process_crpb_response(struct ata_port *ap,
struct mv_crpb *response, unsigned int tag, int ncq_enabled)
{
+ u8 ata_status;
+ u16 edma_status = le16_to_cpu(response->flags);
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
- if (qc) {
- u8 ata_status;
- u16 edma_status = le16_to_cpu(response->flags);
- /*
- * edma_status from a response queue entry:
- * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
- * MSB is saved ATA status from command completion.
- */
- if (!ncq_enabled) {
- u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
- if (err_cause) {
- /*
- * Error will be seen/handled by mv_err_intr().
- * So do nothing at all here.
- */
- return;
- }
- }
- ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
- if (!ac_err_mask(ata_status))
- ata_qc_complete(qc);
- /* else: leave it for mv_err_intr() */
- } else {
+ if (unlikely(!qc)) {
ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
__func__, tag);
+ return;
+ }
+
+ /*
+ * edma_status from a response queue entry:
+ * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
+ * MSB is saved ATA status from command completion.
+ */
+ if (!ncq_enabled) {
+ u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
+ if (err_cause) {
+ /*
+ * Error will be seen/handled by
+ * mv_err_intr(). So do nothing at all here.
+ */
+ return;
+ }
}
+ ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
+ if (!ac_err_mask(ata_status))
+ ata_qc_complete(qc);
+ /* else: leave it for mv_err_intr() */
}
static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 21161136cad..cb89ef8d99d 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1018,7 +1018,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
NV_ADMA_STAT_CPBERR |
NV_ADMA_STAT_CMD_COMPLETE)) {
u32 check_commands = notifier_clears[i];
- int pos, error = 0;
+ int pos, rc;
if (status & NV_ADMA_STAT_CPBERR) {
/* check all active commands */
@@ -1030,10 +1030,12 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
}
/* check CPBs for completed commands */
- while ((pos = ffs(check_commands)) && !error) {
+ while ((pos = ffs(check_commands))) {
pos--;
- error = nv_adma_check_cpb(ap, pos,
+ rc = nv_adma_check_cpb(ap, pos,
notifier_error & (1 << pos));
+ if (unlikely(rc))
+ check_commands = 0;
check_commands &= ~(1 << pos);
}
}
@@ -2129,7 +2131,6 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
struct nv_swncq_port_priv *pp = ap->private_data;
struct ata_eh_info *ehi = &ap->link.eh_info;
u32 sactive;
- int nr_done = 0;
u32 done_mask;
int i;
u8 host_stat;
@@ -2170,22 +2171,21 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
pp->dhfis_bits &= ~(1 << i);
pp->dmafis_bits &= ~(1 << i);
pp->sdbfis_bits |= (1 << i);
- nr_done++;
}
}
if (!ap->qc_active) {
DPRINTK("over\n");
nv_swncq_pp_reinit(ap);
- return nr_done;
+ return 0;
}
if (pp->qc_active & pp->dhfis_bits)
- return nr_done;
+ return 0;
if ((pp->ncq_flags & ncq_saw_backout) ||
(pp->qc_active ^ pp->dhfis_bits))
- /* if the controller cann't get a device to host register FIS,
+ /* if the controller can't get a device to host register FIS,
* The driver needs to reissue the new command.
*/
lack_dhfis = 1;
@@ -2202,7 +2202,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
if (lack_dhfis) {
qc = ata_qc_from_tag(ap, pp->last_issue_tag);
nv_swncq_issue_atacmd(ap, qc);
- return nr_done;
+ return 0;
}
if (pp->defer_queue.defer_bits) {
@@ -2212,7 +2212,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
nv_swncq_issue_atacmd(ap, qc);
}
- return nr_done;
+ return 0;
}
static inline u32 nv_swncq_tag(struct ata_port *ap)
@@ -2224,7 +2224,7 @@ static inline u32 nv_swncq_tag(struct ata_port *ap)
return (tag & 0x1f);
}
-static int nv_swncq_dmafis(struct ata_port *ap)
+static void nv_swncq_dmafis(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
unsigned int rw;
@@ -2239,7 +2239,7 @@ static int nv_swncq_dmafis(struct ata_port *ap)
qc = ata_qc_from_tag(ap, tag);
if (unlikely(!qc))
- return 0;
+ return;
rw = qc->tf.flags & ATA_TFLAG_WRITE;
@@ -2254,8 +2254,6 @@ static int nv_swncq_dmafis(struct ata_port *ap)
dmactl |= ATA_DMA_WR;
iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-
- return 1;
}
static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
@@ -2265,7 +2263,6 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
struct ata_eh_info *ehi = &ap->link.eh_info;
u32 serror;
u8 ata_stat;
- int rc = 0;
ata_stat = ap->ops->sff_check_status(ap);
nv_swncq_irq_clear(ap, fis);
@@ -2310,8 +2307,7 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
ap->print_id, pp->qc_active, pp->dhfis_bits,
pp->dmafis_bits, readl(pp->sactive_block));
- rc = nv_swncq_sdbfis(ap);
- if (rc < 0)
+ if (nv_swncq_sdbfis(ap) < 0)
goto irq_error;
}
@@ -2348,7 +2344,7 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
*/
pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
pp->ncq_flags |= ncq_saw_dmas;
- rc = nv_swncq_dmafis(ap);
+ nv_swncq_dmafis(ap);
}
irq_exit:
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 8717809787f..5d86bb803e9 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -444,8 +444,8 @@ static inline void fs_kfree_skb (struct sk_buff * skb)
#define ROUND_NEAREST 3
/********** make rate (not quite as much fun as Horizon) **********/
-static unsigned int make_rate (unsigned int rate, int r,
- u16 * bits, unsigned int * actual)
+static int make_rate(unsigned int rate, int r,
+ u16 *bits, unsigned int *actual)
{
unsigned char exp = -1; /* hush gcc */
unsigned int man = -1; /* hush gcc */
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index da8f176c051..c8fc69c85a0 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -674,7 +674,7 @@ static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
static u32 fore200e_sba_dma_map(struct fore200e *fore200e, void* virt_addr, int size, int direction)
{
- struct of_device *op = fore200e->bus_dev;
+ struct platform_device *op = fore200e->bus_dev;
u32 dma_addr;
dma_addr = dma_map_single(&op->dev, virt_addr, size, direction);
@@ -687,7 +687,7 @@ static u32 fore200e_sba_dma_map(struct fore200e *fore200e, void* virt_addr, int
static void fore200e_sba_dma_unmap(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
{
- struct of_device *op = fore200e->bus_dev;
+ struct platform_device *op = fore200e->bus_dev;
DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
dma_addr, size, direction);
@@ -697,7 +697,7 @@ static void fore200e_sba_dma_unmap(struct fore200e *fore200e, u32 dma_addr, int
static void fore200e_sba_dma_sync_for_cpu(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
{
- struct of_device *op = fore200e->bus_dev;
+ struct platform_device *op = fore200e->bus_dev;
DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
@@ -706,7 +706,7 @@ static void fore200e_sba_dma_sync_for_cpu(struct fore200e *fore200e, u32 dma_add
static void fore200e_sba_dma_sync_for_device(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
{
- struct of_device *op = fore200e->bus_dev;
+ struct platform_device *op = fore200e->bus_dev;
DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
@@ -719,7 +719,7 @@ static void fore200e_sba_dma_sync_for_device(struct fore200e *fore200e, u32 dma_
static int fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
int size, int nbr, int alignment)
{
- struct of_device *op = fore200e->bus_dev;
+ struct platform_device *op = fore200e->bus_dev;
chunk->alloc_size = chunk->align_size = size * nbr;
@@ -738,7 +738,7 @@ static int fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk
/* free a DVMA consistent chunk of memory */
static void fore200e_sba_dma_chunk_free(struct fore200e *fore200e, struct chunk *chunk)
{
- struct of_device *op = fore200e->bus_dev;
+ struct platform_device *op = fore200e->bus_dev;
dma_free_coherent(&op->dev, chunk->alloc_size,
chunk->alloc_addr, chunk->dma_addr);
@@ -770,7 +770,7 @@ static void fore200e_sba_reset(struct fore200e *fore200e)
static int __init fore200e_sba_map(struct fore200e *fore200e)
{
- struct of_device *op = fore200e->bus_dev;
+ struct platform_device *op = fore200e->bus_dev;
unsigned int bursts;
/* gain access to the SBA specific registers */
@@ -800,7 +800,7 @@ static int __init fore200e_sba_map(struct fore200e *fore200e)
static void fore200e_sba_unmap(struct fore200e *fore200e)
{
- struct of_device *op = fore200e->bus_dev;
+ struct platform_device *op = fore200e->bus_dev;
of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
@@ -816,7 +816,7 @@ static int __init fore200e_sba_configure(struct fore200e *fore200e)
static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
{
- struct of_device *op = fore200e->bus_dev;
+ struct platform_device *op = fore200e->bus_dev;
const u8 *prop;
int len;
@@ -840,7 +840,7 @@ static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_
static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
{
- struct of_device *op = fore200e->bus_dev;
+ struct platform_device *op = fore200e->bus_dev;
const struct linux_prom_registers *regs;
regs = of_get_property(op->dev.of_node, "reg", NULL);
@@ -2513,7 +2513,7 @@ fore200e_load_and_start_fw(struct fore200e* fore200e)
device = &((struct pci_dev *) fore200e->bus_dev)->dev;
#ifdef CONFIG_SBUS
else if (strcmp(fore200e->bus->model_name, "SBA-200E") == 0)
- device = &((struct of_device *) fore200e->bus_dev)->dev;
+ device = &((struct platform_device *) fore200e->bus_dev)->dev;
#endif
else
return err;
@@ -2643,7 +2643,7 @@ fore200e_init(struct fore200e* fore200e)
}
#ifdef CONFIG_SBUS
-static int __devinit fore200e_sba_probe(struct of_device *op,
+static int __devinit fore200e_sba_probe(struct platform_device *op,
const struct of_device_id *match)
{
const struct fore200e_bus *bus = match->data;
@@ -2657,7 +2657,7 @@ static int __devinit fore200e_sba_probe(struct of_device *op,
fore200e->bus = bus;
fore200e->bus_dev = op;
- fore200e->irq = op->irqs[0];
+ fore200e->irq = op->archdata.irqs[0];
fore200e->phys_base = op->resource[0].start;
sprintf(fore200e->name, "%s-%d", bus->model_name, index);
@@ -2675,7 +2675,7 @@ static int __devinit fore200e_sba_probe(struct of_device *op,
return 0;
}
-static int __devexit fore200e_sba_remove(struct of_device *op)
+static int __devexit fore200e_sba_remove(struct platform_device *op)
{
struct fore200e *fore200e = dev_get_drvdata(&op->dev);
@@ -2795,7 +2795,7 @@ static int __init fore200e_module_init(void)
printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
#ifdef CONFIG_SBUS
- err = of_register_driver(&fore200e_sba_driver, &of_bus_type);
+ err = of_register_platform_driver(&fore200e_sba_driver);
if (err)
return err;
#endif
@@ -2806,7 +2806,7 @@ static int __init fore200e_module_init(void)
#ifdef CONFIG_SBUS
if (err)
- of_unregister_driver(&fore200e_sba_driver);
+ of_unregister_platform_driver(&fore200e_sba_driver);
#endif
return err;
@@ -2818,7 +2818,7 @@ static void __exit fore200e_module_cleanup(void)
pci_unregister_driver(&fore200e_pca_driver);
#endif
#ifdef CONFIG_SBUS
- of_unregister_driver(&fore200e_sba_driver);
+ of_unregister_platform_driver(&fore200e_sba_driver);
#endif
}
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 54720baa736..a95790452a6 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -1645,10 +1645,8 @@ static int hrz_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
unsigned short d = 0;
char * s = skb->data;
if (*s++ == 'D') {
- for (i = 0; i < 4; ++i) {
- d = (d<<4) | ((*s <= '9') ? (*s - '0') : (*s - 'a' + 10));
- ++s;
- }
+ for (i = 0; i < 4; ++i)
+ d = (d << 4) | hex_to_bin(*s++);
PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d);
}
}
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1679cbf0c58..bce57328ddd 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3152,7 +3152,7 @@ deinit_card(struct idt77252_dev *card)
}
-static int __devinit
+static void __devinit
init_sram(struct idt77252_dev *card)
{
int i;
@@ -3298,7 +3298,6 @@ init_sram(struct idt77252_dev *card)
SAR_REG_RXFD);
IPRINTK("%s: SRAM initialization complete.\n", card->name);
- return 0;
}
static int __devinit
@@ -3410,8 +3409,7 @@ init_card(struct atm_dev *dev)
writel(readl(SAR_REG_CFG) | conf, SAR_REG_CFG);
- if (init_sram(card) < 0)
- return -1;
+ init_sram(card);
/********************************************************************/
/* A L L O C R A M A N D S E T V A R I O U S T H I N G S */
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index ee9ddeb5341..9309d4724e1 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -220,7 +220,7 @@ static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
dev->ffL.tcq_rd += 2;
if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
- dev->ffL.tcq_rd = dev->ffL.tcq_st;
+ dev->ffL.tcq_rd = dev->ffL.tcq_st;
if (dev->ffL.tcq_rd == dev->host_tcq_wr)
return 0xFFFF;
desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
@@ -3156,7 +3156,6 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
{
struct atm_dev *dev;
IADEV *iadev;
- unsigned long flags;
int ret;
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
@@ -3188,19 +3187,14 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
ia_dev[iadev_count] = iadev;
_ia_dev[iadev_count] = dev;
iadev_count++;
- spin_lock_init(&iadev->misc_lock);
- /* First fixes first. I don't want to think about this now. */
- spin_lock_irqsave(&iadev->misc_lock, flags);
if (ia_init(dev) || ia_start(dev)) {
IF_INIT(printk("IA register failed!\n");)
iadev_count--;
ia_dev[iadev_count] = NULL;
_ia_dev[iadev_count] = NULL;
- spin_unlock_irqrestore(&iadev->misc_lock, flags);
ret = -EINVAL;
goto err_out_deregister_dev;
}
- spin_unlock_irqrestore(&iadev->misc_lock, flags);
IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
iadev->next_board = ia_boards;
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index b2cd20f549c..077735e0e04 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -1022,7 +1022,7 @@ typedef struct iadev_t {
struct dle_q rx_dle_q;
struct free_desc_q *rx_free_desc_qhead;
struct sk_buff_head rx_dma_q;
- spinlock_t rx_lock, misc_lock;
+ spinlock_t rx_lock;
struct atm_vcc **rx_open; /* list of all open VCs */
u16 num_rx_desc, rx_buf_sz, rxing;
u32 rx_pkt_ram, rx_tmp_cnt;
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index f916ddf6393..f46138ab38b 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -444,6 +444,7 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
struct solos_card *card = atmdev->dev_data;
struct sk_buff *skb;
+ unsigned int len;
spin_lock(&card->cli_queue_lock);
skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
@@ -451,11 +452,12 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
if(skb == NULL)
return sprintf(buf, "No data.\n");
- memcpy(buf, skb->data, skb->len);
- dev_dbg(&card->dev->dev, "len: %d\n", skb->len);
+ len = skb->len;
+ memcpy(buf, skb->data, len);
+ dev_dbg(&card->dev->dev, "len: %d\n", len);
kfree_skb(skb);
- return skb->len;
+ return len;
}
static int send_command(struct solos_card *card, int dev, const char *buf, size_t size)
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 12eec3f633b..eb1b7fa20dc 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -945,8 +945,8 @@ bus_devices_fail:
bus_remove_file(bus, &bus_attr_uevent);
bus_uevent_fail:
kset_unregister(&bus->p->subsys);
- kfree(bus->p);
out:
+ kfree(bus->p);
bus->p = NULL;
return retval;
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f8e72724dd4..d1b2c9adc27 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1599,7 +1599,7 @@ EXPORT_SYMBOL_GPL(device_destroy);
* on the same device to ensure that new_name is valid and
* won't conflict with other devices.
*/
-int device_rename(struct device *dev, char *new_name)
+int device_rename(struct device *dev, const char *new_name)
{
char *old_class_name = NULL;
char *new_class_name = NULL;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 503c2620bbc..da57ee9d63f 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -51,6 +51,10 @@ static int driver_sysfs_add(struct device *dev)
{
int ret;
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_BIND_DRIVER, dev);
+
ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
kobject_name(&dev->kobj));
if (ret == 0) {
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index d4d8ce53886..f369e279598 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -8,7 +8,7 @@
struct dma_coherent_mem {
void *virt_base;
- u32 device_base;
+ dma_addr_t device_base;
int size;
int flags;
unsigned long *bitmap;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 3f093b0dd21..40af43ebd92 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -87,29 +87,32 @@ static DEFINE_MUTEX(fw_lock);
struct firmware_priv {
struct completion completion;
- struct bin_attribute attr_data;
struct firmware *fw;
unsigned long status;
struct page **pages;
int nr_pages;
int page_array_size;
struct timer_list timeout;
+ struct device dev;
bool nowait;
char fw_id[];
};
-static void
-fw_load_abort(struct firmware_priv *fw_priv)
+static struct firmware_priv *to_firmware_priv(struct device *dev)
+{
+ return container_of(dev, struct firmware_priv, dev);
+}
+
+static void fw_load_abort(struct firmware_priv *fw_priv)
{
set_bit(FW_STATUS_ABORT, &fw_priv->status);
wmb();
complete(&fw_priv->completion);
}
-static ssize_t
-firmware_timeout_show(struct class *class,
- struct class_attribute *attr,
- char *buf)
+static ssize_t firmware_timeout_show(struct class *class,
+ struct class_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", loading_timeout);
}
@@ -127,14 +130,14 @@ firmware_timeout_show(struct class *class,
*
* Note: zero means 'wait forever'.
**/
-static ssize_t
-firmware_timeout_store(struct class *class,
- struct class_attribute *attr,
- const char *buf, size_t count)
+static ssize_t firmware_timeout_store(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t count)
{
loading_timeout = simple_strtol(buf, NULL, 10);
if (loading_timeout < 0)
loading_timeout = 0;
+
return count;
}
@@ -146,21 +149,20 @@ static struct class_attribute firmware_class_attrs[] = {
static void fw_dev_release(struct device *dev)
{
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
int i;
for (i = 0; i < fw_priv->nr_pages; i++)
__free_page(fw_priv->pages[i]);
kfree(fw_priv->pages);
kfree(fw_priv);
- kfree(dev);
module_put(THIS_MODULE);
}
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->fw_id))
return -ENOMEM;
@@ -182,8 +184,9 @@ static struct class firmware_class = {
static ssize_t firmware_loading_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
int loading = test_bit(FW_STATUS_LOADING, &fw_priv->status);
+
return sprintf(buf, "%d\n", loading);
}
@@ -219,7 +222,7 @@ static ssize_t firmware_loading_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
int loading = simple_strtol(buf, NULL, 10);
int i;
@@ -277,13 +280,12 @@ static ssize_t firmware_loading_store(struct device *dev,
static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
-static ssize_t
-firmware_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer, loff_t offset,
- size_t count)
+static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
{
struct device *dev = to_dev(kobj);
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
struct firmware *fw;
ssize_t ret_count;
@@ -322,8 +324,7 @@ out:
return ret_count;
}
-static int
-fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
+static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
{
int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
@@ -373,13 +374,12 @@ fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
* Data written to the 'data' attribute will be later handed to
* the driver as a firmware image.
**/
-static ssize_t
-firmware_data_write(struct file* filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer,
- loff_t offset, size_t count)
+static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
{
struct device *dev = to_dev(kobj);
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
struct firmware *fw;
ssize_t retval;
@@ -420,116 +420,103 @@ out:
return retval;
}
-static struct bin_attribute firmware_attr_data_tmpl = {
- .attr = {.name = "data", .mode = 0644},
+static struct bin_attribute firmware_attr_data = {
+ .attr = { .name = "data", .mode = 0644 },
.size = 0,
.read = firmware_data_read,
.write = firmware_data_write,
};
-static void
-firmware_class_timeout(u_long data)
+static void firmware_class_timeout(u_long data)
{
struct firmware_priv *fw_priv = (struct firmware_priv *) data;
+
fw_load_abort(fw_priv);
}
-static int fw_register_device(struct device **dev_p, const char *fw_name,
- struct device *device)
+static struct firmware_priv *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+ struct device *device, bool uevent, bool nowait)
{
- int retval;
- struct firmware_priv *fw_priv =
- kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
- struct device *f_dev = kzalloc(sizeof(*f_dev), GFP_KERNEL);
-
- *dev_p = NULL;
+ struct firmware_priv *fw_priv;
+ struct device *f_dev;
+ int error;
- if (!fw_priv || !f_dev) {
+ fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
+ if (!fw_priv) {
dev_err(device, "%s: kmalloc failed\n", __func__);
- retval = -ENOMEM;
- goto error_kfree;
+ error = -ENOMEM;
+ goto err_out;
}
+ fw_priv->fw = firmware;
+ fw_priv->nowait = nowait;
strcpy(fw_priv->fw_id, fw_name);
init_completion(&fw_priv->completion);
- fw_priv->attr_data = firmware_attr_data_tmpl;
- fw_priv->timeout.function = firmware_class_timeout;
- fw_priv->timeout.data = (u_long) fw_priv;
- init_timer(&fw_priv->timeout);
+ setup_timer(&fw_priv->timeout,
+ firmware_class_timeout, (u_long) fw_priv);
+ f_dev = &fw_priv->dev;
+
+ device_initialize(f_dev);
dev_set_name(f_dev, "%s", dev_name(device));
f_dev->parent = device;
f_dev->class = &firmware_class;
- dev_set_drvdata(f_dev, fw_priv);
- dev_set_uevent_suppress(f_dev, 1);
- retval = device_register(f_dev);
- if (retval) {
- dev_err(device, "%s: device_register failed\n", __func__);
- put_device(f_dev);
- return retval;
- }
- *dev_p = f_dev;
- return 0;
-
-error_kfree:
- kfree(f_dev);
- kfree(fw_priv);
- return retval;
-}
-
-static int fw_setup_device(struct firmware *fw, struct device **dev_p,
- const char *fw_name, struct device *device,
- int uevent, bool nowait)
-{
- struct device *f_dev;
- struct firmware_priv *fw_priv;
- int retval;
- *dev_p = NULL;
- retval = fw_register_device(&f_dev, fw_name, device);
- if (retval)
- goto out;
+ dev_set_uevent_suppress(f_dev, true);
/* Need to pin this module until class device is destroyed */
__module_get(THIS_MODULE);
- fw_priv = dev_get_drvdata(f_dev);
-
- fw_priv->nowait = nowait;
+ error = device_add(f_dev);
+ if (error) {
+ dev_err(device, "%s: device_register failed\n", __func__);
+ goto err_put_dev;
+ }
- fw_priv->fw = fw;
- sysfs_bin_attr_init(&fw_priv->attr_data);
- retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data);
- if (retval) {
+ error = device_create_bin_file(f_dev, &firmware_attr_data);
+ if (error) {
dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__);
- goto error_unreg;
+ goto err_del_dev;
}
- retval = device_create_file(f_dev, &dev_attr_loading);
- if (retval) {
+ error = device_create_file(f_dev, &dev_attr_loading);
+ if (error) {
dev_err(device, "%s: device_create_file failed\n", __func__);
- goto error_unreg;
+ goto err_del_bin_attr;
}
if (uevent)
- dev_set_uevent_suppress(f_dev, 0);
- *dev_p = f_dev;
- goto out;
+ dev_set_uevent_suppress(f_dev, false);
+
+ return fw_priv;
+
+err_del_bin_attr:
+ device_remove_bin_file(f_dev, &firmware_attr_data);
+err_del_dev:
+ device_del(f_dev);
+err_put_dev:
+ put_device(f_dev);
+err_out:
+ return ERR_PTR(error);
+}
+
+static void fw_destroy_instance(struct firmware_priv *fw_priv)
+{
+ struct device *f_dev = &fw_priv->dev;
-error_unreg:
+ device_remove_file(f_dev, &dev_attr_loading);
+ device_remove_bin_file(f_dev, &firmware_attr_data);
device_unregister(f_dev);
-out:
- return retval;
}
-static int
-_request_firmware(const struct firmware **firmware_p, const char *name,
- struct device *device, int uevent, bool nowait)
+static int _request_firmware(const struct firmware **firmware_p,
+ const char *name, struct device *device,
+ bool uevent, bool nowait)
{
- struct device *f_dev;
struct firmware_priv *fw_priv;
struct firmware *firmware;
- int retval;
+ int retval = 0;
if (!firmware_p)
return -EINVAL;
@@ -550,41 +537,40 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (uevent)
dev_dbg(device, "firmware: requesting %s\n", name);
- retval = fw_setup_device(firmware, &f_dev, name, device,
- uevent, nowait);
- if (retval)
- goto error_kfree_fw;
-
- fw_priv = dev_get_drvdata(f_dev);
+ fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
+ if (IS_ERR(fw_priv)) {
+ retval = PTR_ERR(fw_priv);
+ goto out;
+ }
if (uevent) {
- if (loading_timeout > 0) {
- fw_priv->timeout.expires = jiffies + loading_timeout * HZ;
- add_timer(&fw_priv->timeout);
- }
+ if (loading_timeout > 0)
+ mod_timer(&fw_priv->timeout,
+ round_jiffies_up(jiffies +
+ loading_timeout * HZ));
- kobject_uevent(&f_dev->kobj, KOBJ_ADD);
- wait_for_completion(&fw_priv->completion);
- set_bit(FW_STATUS_DONE, &fw_priv->status);
- del_timer_sync(&fw_priv->timeout);
- } else
- wait_for_completion(&fw_priv->completion);
+ kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
+ }
+
+ wait_for_completion(&fw_priv->completion);
+
+ set_bit(FW_STATUS_DONE, &fw_priv->status);
+ del_timer_sync(&fw_priv->timeout);
mutex_lock(&fw_lock);
- if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status)) {
+ if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status))
retval = -ENOENT;
- release_firmware(fw_priv->fw);
- *firmware_p = NULL;
- }
fw_priv->fw = NULL;
mutex_unlock(&fw_lock);
- device_unregister(f_dev);
- goto out;
-error_kfree_fw:
- kfree(firmware);
- *firmware_p = NULL;
+ fw_destroy_instance(fw_priv);
+
out:
+ if (retval) {
+ release_firmware(firmware);
+ *firmware_p = NULL;
+ }
+
return retval;
}
@@ -635,23 +621,24 @@ struct firmware_work {
int uevent;
};
-static int
-request_firmware_work_func(void *arg)
+static int request_firmware_work_func(void *arg)
{
struct firmware_work *fw_work = arg;
const struct firmware *fw;
int ret;
+
if (!arg) {
WARN_ON(1);
return 0;
}
- ret = _request_firmware(&fw, fw_work->name, fw_work->device,
- fw_work->uevent, true);
+ ret = _request_firmware(&fw, fw_work->name, fw_work->device,
+ fw_work->uevent, true);
fw_work->cont(fw, fw_work->context);
module_put(fw_work->module);
kfree(fw_work);
+
return ret;
}
@@ -679,34 +666,33 @@ request_firmware_nowait(
void (*cont)(const struct firmware *fw, void *context))
{
struct task_struct *task;
- struct firmware_work *fw_work = kmalloc(sizeof (struct firmware_work),
- gfp);
+ struct firmware_work *fw_work;
+ fw_work = kzalloc(sizeof (struct firmware_work), gfp);
if (!fw_work)
return -ENOMEM;
+
+ fw_work->module = module;
+ fw_work->name = name;
+ fw_work->device = device;
+ fw_work->context = context;
+ fw_work->cont = cont;
+ fw_work->uevent = uevent;
+
if (!try_module_get(module)) {
kfree(fw_work);
return -EFAULT;
}
- *fw_work = (struct firmware_work) {
- .module = module,
- .name = name,
- .device = device,
- .context = context,
- .cont = cont,
- .uevent = uevent,
- };
-
task = kthread_run(request_firmware_work_func, fw_work,
"firmware/%s", name);
-
if (IS_ERR(task)) {
fw_work->cont(NULL, fw_work->context);
module_put(fw_work->module);
kfree(fw_work);
return PTR_ERR(task);
}
+
return 0;
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 2bdd8a94ec9..2872e86837b 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -66,8 +66,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
struct sysinfo i;
si_meminfo_node(&i, nid);
-
- n = sprintf(buf, "\n"
+ n = sprintf(buf,
"Node %d MemTotal: %8lu kB\n"
"Node %d MemFree: %8lu kB\n"
"Node %d MemUsed: %8lu kB\n"
@@ -78,13 +77,33 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
"Node %d Active(file): %8lu kB\n"
"Node %d Inactive(file): %8lu kB\n"
"Node %d Unevictable: %8lu kB\n"
- "Node %d Mlocked: %8lu kB\n"
+ "Node %d Mlocked: %8lu kB\n",
+ nid, K(i.totalram),
+ nid, K(i.freeram),
+ nid, K(i.totalram - i.freeram),
+ nid, K(node_page_state(nid, NR_ACTIVE_ANON) +
+ node_page_state(nid, NR_ACTIVE_FILE)),
+ nid, K(node_page_state(nid, NR_INACTIVE_ANON) +
+ node_page_state(nid, NR_INACTIVE_FILE)),
+ nid, K(node_page_state(nid, NR_ACTIVE_ANON)),
+ nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
+ nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
+ nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
+ nid, K(node_page_state(nid, NR_UNEVICTABLE)),
+ nid, K(node_page_state(nid, NR_MLOCK)));
+
#ifdef CONFIG_HIGHMEM
+ n += sprintf(buf + n,
"Node %d HighTotal: %8lu kB\n"
"Node %d HighFree: %8lu kB\n"
"Node %d LowTotal: %8lu kB\n"
- "Node %d LowFree: %8lu kB\n"
+ "Node %d LowFree: %8lu kB\n",
+ nid, K(i.totalhigh),
+ nid, K(i.freehigh),
+ nid, K(i.totalram - i.totalhigh),
+ nid, K(i.freeram - i.freehigh));
#endif
+ n += sprintf(buf + n,
"Node %d Dirty: %8lu kB\n"
"Node %d Writeback: %8lu kB\n"
"Node %d FilePages: %8lu kB\n"
@@ -99,25 +118,6 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
"Node %d Slab: %8lu kB\n"
"Node %d SReclaimable: %8lu kB\n"
"Node %d SUnreclaim: %8lu kB\n",
- nid, K(i.totalram),
- nid, K(i.freeram),
- nid, K(i.totalram - i.freeram),
- nid, K(node_page_state(nid, NR_ACTIVE_ANON) +
- node_page_state(nid, NR_ACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_INACTIVE_ANON) +
- node_page_state(nid, NR_INACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_ACTIVE_ANON)),
- nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
- nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_UNEVICTABLE)),
- nid, K(node_page_state(nid, NR_MLOCK)),
-#ifdef CONFIG_HIGHMEM
- nid, K(i.totalhigh),
- nid, K(i.freehigh),
- nid, K(i.totalram - i.totalhigh),
- nid, K(i.freeram - i.freehigh),
-#endif
nid, K(node_page_state(nid, NR_FILE_DIRTY)),
nid, K(node_page_state(nid, NR_WRITEBACK)),
nid, K(node_page_state(nid, NR_FILE_PAGES)),
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4d99c8bdfed..c6c933f5810 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/platform_device.h>
+#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
@@ -191,13 +192,13 @@ int platform_device_add_resources(struct platform_device *pdev,
{
struct resource *r;
- r = kmalloc(sizeof(struct resource) * num, GFP_KERNEL);
+ r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
if (r) {
- memcpy(r, res, sizeof(struct resource) * num);
pdev->resource = r;
pdev->num_resources = num;
+ return 0;
}
- return r ? 0 : -ENOMEM;
+ return -ENOMEM;
}
EXPORT_SYMBOL_GPL(platform_device_add_resources);
@@ -344,108 +345,56 @@ void platform_device_unregister(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(platform_device_unregister);
/**
- * platform_device_register_simple - add a platform-level device and its resources
- * @name: base name of the device we're adding
- * @id: instance id
- * @res: set of resources that needs to be allocated for the device
- * @num: number of resources
+ * platform_device_register_resndata - add a platform-level device with
+ * resources and platform-specific data
*
- * This function creates a simple platform device that requires minimal
- * resource and memory management. Canned release function freeing memory
- * allocated for the device allows drivers using such devices to be
- * unloaded without waiting for the last reference to the device to be
- * dropped.
- *
- * This interface is primarily intended for use with legacy drivers which
- * probe hardware directly. Because such drivers create sysfs device nodes
- * themselves, rather than letting system infrastructure handle such device
- * enumeration tasks, they don't fully conform to the Linux driver model.
- * In particular, when such drivers are built as modules, they can't be
- * "hotplugged".
- *
- * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
- */
-struct platform_device *platform_device_register_simple(const char *name,
- int id,
- const struct resource *res,
- unsigned int num)
-{
- struct platform_device *pdev;
- int retval;
-
- pdev = platform_device_alloc(name, id);
- if (!pdev) {
- retval = -ENOMEM;
- goto error;
- }
-
- if (num) {
- retval = platform_device_add_resources(pdev, res, num);
- if (retval)
- goto error;
- }
-
- retval = platform_device_add(pdev);
- if (retval)
- goto error;
-
- return pdev;
-
-error:
- platform_device_put(pdev);
- return ERR_PTR(retval);
-}
-EXPORT_SYMBOL_GPL(platform_device_register_simple);
-
-/**
- * platform_device_register_data - add a platform-level device with platform-specific data
* @parent: parent device for the device we're adding
* @name: base name of the device we're adding
* @id: instance id
+ * @res: set of resources that needs to be allocated for the device
+ * @num: number of resources
* @data: platform specific data for this platform device
* @size: size of platform specific data
*
- * This function creates a simple platform device that requires minimal
- * resource and memory management. Canned release function freeing memory
- * allocated for the device allows drivers using such devices to be
- * unloaded without waiting for the last reference to the device to be
- * dropped.
- *
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
-struct platform_device *platform_device_register_data(
+struct platform_device *__init_or_module platform_device_register_resndata(
struct device *parent,
const char *name, int id,
+ const struct resource *res, unsigned int num,
const void *data, size_t size)
{
+ int ret = -ENOMEM;
struct platform_device *pdev;
- int retval;
pdev = platform_device_alloc(name, id);
- if (!pdev) {
- retval = -ENOMEM;
- goto error;
- }
+ if (!pdev)
+ goto err;
pdev->dev.parent = parent;
- if (size) {
- retval = platform_device_add_data(pdev, data, size);
- if (retval)
- goto error;
+ if (res) {
+ ret = platform_device_add_resources(pdev, res, num);
+ if (ret)
+ goto err;
}
- retval = platform_device_add(pdev);
- if (retval)
- goto error;
+ if (data) {
+ ret = platform_device_add_data(pdev, data, size);
+ if (ret)
+ goto err;
+ }
- return pdev;
+ ret = platform_device_add(pdev);
+ if (ret) {
+err:
+ platform_device_put(pdev);
+ return ERR_PTR(ret);
+ }
-error:
- platform_device_put(pdev);
- return ERR_PTR(retval);
+ return pdev;
}
-EXPORT_SYMBOL_GPL(platform_device_register_data);
+EXPORT_SYMBOL_GPL(platform_device_register_resndata);
static int platform_drv_probe(struct device *_dev)
{
@@ -635,6 +584,12 @@ static struct device_attribute platform_dev_attrs[] = {
static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct platform_device *pdev = to_platform_device(dev);
+ int rc;
+
+ /* Some devices have extra OF data and an OF-style MODALIAS */
+ rc = of_device_uevent(dev,env);
+ if (rc != -ENODEV)
+ return rc;
add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
(pdev->id_entry) ? pdev->id_entry->name : pdev->name);
@@ -673,7 +628,11 @@ static int platform_match(struct device *dev, struct device_driver *drv)
struct platform_device *pdev = to_platform_device(dev);
struct platform_driver *pdrv = to_platform_driver(drv);
- /* match against the id table first */
+ /* Attempt an OF style match first */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ /* Then try to match against the id table */
if (pdrv->id_table)
return platform_match_id(pdrv->id_table, pdev) != NULL;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5419a49ff13..276d5a701dc 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -59,6 +59,7 @@ void device_pm_init(struct device *dev)
{
dev->power.status = DPM_ON;
init_completion(&dev->power.completion);
+ complete_all(&dev->power.completion);
dev->power.wakeup_count = 0;
pm_runtime_init(dev);
}
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index c5f22bb0a48..4e2c367fec1 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -79,23 +79,28 @@ static int DAC960_open(struct block_device *bdev, fmode_t mode)
struct gendisk *disk = bdev->bd_disk;
DAC960_Controller_T *p = disk->queue->queuedata;
int drive_nr = (long)disk->private_data;
+ int ret = -ENXIO;
+ lock_kernel();
if (p->FirmwareType == DAC960_V1_Controller) {
if (p->V1.LogicalDriveInformation[drive_nr].
LogicalDriveState == DAC960_V1_LogicalDrive_Offline)
- return -ENXIO;
+ goto out;
} else {
DAC960_V2_LogicalDeviceInfo_T *i =
p->V2.LogicalDeviceInformation[drive_nr];
if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline)
- return -ENXIO;
+ goto out;
}
check_disk_change(bdev);
if (!get_capacity(p->disks[drive_nr]))
- return -ENXIO;
- return 0;
+ goto out;
+ ret = 0;
+out:
+ unlock_kernel();
+ return ret;
}
static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 832798aa14f..76f114f0bba 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -60,6 +60,7 @@
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/smp_lock.h>
#include <linux/amifdreg.h>
#include <linux/amifd.h>
#include <linux/buffer_head.h>
@@ -1423,7 +1424,7 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static int fd_ioctl(struct block_device *bdev, fmode_t mode,
+static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
struct amiga_floppy_struct *p = bdev->bd_disk->private_data;
@@ -1500,6 +1501,18 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode,
return 0;
}
+static int fd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long param)
+{
+ int ret;
+
+ lock_kernel();
+ ret = fd_locked_ioctl(bdev, mode, cmd, param);
+ unlock_kernel();
+
+ return ret;
+}
+
static void fd_probe(int dev)
{
unsigned long code;
@@ -1542,10 +1555,13 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
int old_dev;
unsigned long flags;
+ lock_kernel();
old_dev = fd_device[drive];
- if (fd_ref[drive] && old_dev != system)
+ if (fd_ref[drive] && old_dev != system) {
+ unlock_kernel();
return -EBUSY;
+ }
if (mode & (FMODE_READ|FMODE_WRITE)) {
check_disk_change(bdev);
@@ -1558,8 +1574,10 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
fd_deselect (drive);
rel_fdc();
- if (wrprot)
+ if (wrprot) {
+ unlock_kernel();
return -EROFS;
+ }
}
}
@@ -1576,6 +1594,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive,
unit[drive].type->name, data_types[system].name);
+ unlock_kernel();
return 0;
}
@@ -1584,6 +1603,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
struct amiga_floppy_struct *p = disk->private_data;
int drive = p - unit;
+ lock_kernel();
if (unit[drive].dirty == 1) {
del_timer (flush_track_timer + drive);
non_int_flush_track (drive);
@@ -1597,6 +1617,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
/* the mod_use counter is handled this way */
floppy_off (drive | 0x40000000);
#endif
+ unlock_kernel();
return 0;
}
@@ -1638,7 +1659,7 @@ static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
- .locked_ioctl = fd_ioctl,
+ .ioctl = fd_ioctl,
.getgeo = fd_getgeo,
.media_changed = amiga_floppy_change,
};
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 035cefe4045..a946929735a 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/genhd.h>
#include <linux/netdevice.h>
+#include <linux/smp_lock.h>
#include "aoe.h"
static struct kmem_cache *buf_pool_cache;
@@ -124,13 +125,16 @@ aoeblk_open(struct block_device *bdev, fmode_t mode)
struct aoedev *d = bdev->bd_disk->private_data;
ulong flags;
+ lock_kernel();
spin_lock_irqsave(&d->lock, flags);
if (d->flags & DEVFL_UP) {
d->nopen++;
spin_unlock_irqrestore(&d->lock, flags);
+ unlock_kernel();
return 0;
}
spin_unlock_irqrestore(&d->lock, flags);
+ unlock_kernel();
return -ENODEV;
}
@@ -173,7 +177,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
BUG();
bio_endio(bio, -ENXIO);
return 0;
- } else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
+ } else if (bio->bi_rw & REQ_HARDBARRIER) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
} else if (bio->bi_io_vec == NULL) {
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index e35cf59cbfd..aceb9647652 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -67,6 +67,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/blkdev.h>
+#include <linux/smp_lock.h>
#include <asm/atafd.h>
#include <asm/atafdreg.h>
@@ -359,7 +360,7 @@ static void finish_fdc( void );
static void finish_fdc_done( int dummy );
static void setup_req_params( int drive );
static void redo_fd_request( void);
-static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
+static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
cmd, unsigned long param);
static void fd_probe( int drive );
static int fd_test_drive_present( int drive );
@@ -1480,7 +1481,7 @@ void do_fd_request(struct request_queue * q)
atari_enable_irq( IRQ_MFP_FDC );
}
-static int fd_ioctl(struct block_device *bdev, fmode_t mode,
+static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
struct gendisk *disk = bdev->bd_disk;
@@ -1665,6 +1666,17 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode,
}
}
+static int fd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = fd_locked_ioctl(bdev, mode, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
/* Initialize the 'unit' variable for drive 'drive' */
@@ -1838,24 +1850,36 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return 0;
}
+static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+ int ret;
+
+ lock_kernel();
+ ret = floppy_open(bdev, mode);
+ unlock_kernel();
+
+ return ret;
+}
static int floppy_release(struct gendisk *disk, fmode_t mode)
{
struct atari_floppy_struct *p = disk->private_data;
+ lock_kernel();
if (p->ref < 0)
p->ref = 0;
else if (!p->ref--) {
printk(KERN_ERR "floppy_release with fd_ref == 0");
p->ref = 0;
}
+ unlock_kernel();
return 0;
}
static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
- .open = floppy_open,
+ .open = floppy_unlocked_open,
.release = floppy_release,
- .locked_ioctl = fd_ioctl,
+ .ioctl = fd_ioctl,
.media_changed = check_floppy_change,
.revalidate_disk= floppy_revalidate,
};
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index f1bf79d9bc0..1c7f63792ff 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -15,6 +15,7 @@
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/highmem.h>
+#include <linux/smp_lock.h>
#include <linux/radix-tree.h>
#include <linux/buffer_head.h> /* invalidate_bh_lrus() */
#include <linux/slab.h>
@@ -340,7 +341,7 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
get_capacity(bdev->bd_disk))
goto out;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
+ if (unlikely(bio->bi_rw & REQ_DISCARD)) {
err = 0;
discard_from_brd(brd, sector, bio->bi_size);
goto out;
@@ -401,6 +402,7 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode,
* ram device BLKFLSBUF has special semantics, we want to actually
* release and destroy the ramdisk data.
*/
+ lock_kernel();
mutex_lock(&bdev->bd_mutex);
error = -EBUSY;
if (bdev->bd_openers <= 1) {
@@ -417,13 +419,14 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode,
error = 0;
}
mutex_unlock(&bdev->bd_mutex);
+ unlock_kernel();
return error;
}
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
- .locked_ioctl = brd_ioctl,
+ .ioctl = brd_ioctl,
#ifdef CONFIG_BLK_DEV_XIP
.direct_access = brd_direct_access,
#endif
@@ -479,7 +482,7 @@ static struct brd_device *brd_alloc(int i)
if (!brd->brd_queue)
goto out_free_dev;
blk_queue_make_request(brd->brd_queue, brd_make_request);
- blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL);
+ blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG);
blk_queue_max_hw_sectors(brd->brd_queue, 1024);
blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index e1e7143ca1e..5e4fadcdece 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -56,16 +56,14 @@
#include <linux/kthread.h>
#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-#define DRIVER_NAME "HP CISS Driver (v 3.6.20)"
-#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20)
+#define DRIVER_NAME "HP CISS Driver (v 3.6.26)"
+#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 26)
/* Embedded module documentation macros - see modules.h */
MODULE_AUTHOR("Hewlett-Packard Company");
MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
-MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
- " SA6i P600 P800 P400 P400i E200 E200i E500 P700m"
- " Smart Array G2 Series SAS/SATA Controllers");
-MODULE_VERSION("3.6.20");
+MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
+MODULE_VERSION("3.6.26");
MODULE_LICENSE("GPL");
static int cciss_allow_hpsa;
@@ -107,6 +105,11 @@ static const struct pci_device_id cciss_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254},
{0,}
};
@@ -146,6 +149,11 @@ static struct board_type products[] = {
{0x3249103C, "Smart Array P812", &SA5_access},
{0x324A103C, "Smart Array P712m", &SA5_access},
{0x324B103C, "Smart Array P711m", &SA5_access},
+ {0x3250103C, "Smart Array", &SA5_access},
+ {0x3251103C, "Smart Array", &SA5_access},
+ {0x3252103C, "Smart Array", &SA5_access},
+ {0x3253103C, "Smart Array", &SA5_access},
+ {0x3254103C, "Smart Array", &SA5_access},
};
/* How long to wait (in milliseconds) for board to go into simple mode */
@@ -167,9 +175,13 @@ static DEFINE_MUTEX(scan_mutex);
static LIST_HEAD(scan_q);
static void do_cciss_request(struct request_queue *q);
-static irqreturn_t do_cciss_intr(int irq, void *dev_id);
+static irqreturn_t do_cciss_intx(int irq, void *dev_id);
+static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
static int cciss_open(struct block_device *bdev, fmode_t mode);
+static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
static int cciss_release(struct gendisk *disk, fmode_t mode);
+static int do_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -179,25 +191,23 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl);
static int deregister_disk(ctlr_info_t *h, int drv_index,
int clear_all, int via_ioctl);
-static void cciss_read_capacity(int ctlr, int logvol,
+static void cciss_read_capacity(ctlr_info_t *h, int logvol,
sector_t *total_size, unsigned int *block_size);
-static void cciss_read_capacity_16(int ctlr, int logvol,
+static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
sector_t *total_size, unsigned int *block_size);
-static void cciss_geometry_inquiry(int ctlr, int logvol,
+static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
sector_t total_size,
unsigned int block_size, InquiryData_struct *inq_buff,
drive_info_struct *drv);
-static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
- __u32);
+static void __devinit cciss_interrupt_mode(ctlr_info_t *);
static void start_io(ctlr_info_t *h);
-static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
+static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
__u8 page_code, unsigned char scsi3addr[],
int cmd_type);
static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
int attempt_retry);
static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
-static void fail_all_cmds(unsigned long ctlr);
static int add_to_scan_list(struct ctlr_info *h);
static int scan_thread(void *data);
static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
@@ -205,11 +215,23 @@ static void cciss_hba_release(struct device *dev);
static void cciss_device_release(struct device *dev);
static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
+static inline u32 next_command(ctlr_info_t *h);
+static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
+ void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset);
+static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar);
+
+
+/* performant mode helper functions */
+static void calc_bucket_map(int *bucket, int num_buckets, int nsgs,
+ int *bucket_map);
+static void cciss_put_controller_into_performant_mode(ctlr_info_t *h);
#ifdef CONFIG_PROC_FS
-static void cciss_procinit(int i);
+static void cciss_procinit(ctlr_info_t *h);
#else
-static void cciss_procinit(int i)
+static void cciss_procinit(ctlr_info_t *h)
{
}
#endif /* CONFIG_PROC_FS */
@@ -221,9 +243,9 @@ static int cciss_compat_ioctl(struct block_device *, fmode_t,
static const struct block_device_operations cciss_fops = {
.owner = THIS_MODULE,
- .open = cciss_open,
+ .open = cciss_unlocked_open,
.release = cciss_release,
- .locked_ioctl = cciss_ioctl,
+ .ioctl = do_ioctl,
.getgeo = cciss_getgeo,
#ifdef CONFIG_COMPAT
.compat_ioctl = cciss_compat_ioctl,
@@ -231,6 +253,16 @@ static const struct block_device_operations cciss_fops = {
.revalidate_disk = cciss_revalidate,
};
+/* set_performant_mode: Modify the tag for cciss performant
+ * set bit 0 for pull model, bits 3-1 for block fetch
+ * register number
+ */
+static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c)
+{
+ if (likely(h->transMethod == CFGTBL_Trans_Performant))
+ c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+}
+
/*
* Enqueuing and dequeuing functions for cmdlists.
*/
@@ -257,6 +289,20 @@ static inline void removeQ(CommandList_struct *c)
hlist_del_init(&c->list);
}
+static void enqueue_cmd_and_start_io(ctlr_info_t *h,
+ CommandList_struct *c)
+{
+ unsigned long flags;
+ set_performant_mode(h, c);
+ spin_lock_irqsave(&h->lock, flags);
+ addQ(&h->reqQ, c);
+ h->Qdepth++;
+ if (h->Qdepth > h->maxQsinceinit)
+ h->maxQsinceinit = h->Qdepth;
+ start_io(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list,
int nr_cmds)
{
@@ -366,32 +412,31 @@ static void cciss_seq_show_header(struct seq_file *seq)
h->product_name,
(unsigned long)h->board_id,
h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
- h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
+ h->firm_ver[3], (unsigned int)h->intr[PERF_MODE_INT],
h->num_luns,
h->Qdepth, h->commands_outstanding,
h->maxQsinceinit, h->max_outstanding, h->maxSG);
#ifdef CONFIG_CISS_SCSI_TAPE
- cciss_seq_tape_report(seq, h->ctlr);
+ cciss_seq_tape_report(seq, h);
#endif /* CONFIG_CISS_SCSI_TAPE */
}
static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
{
ctlr_info_t *h = seq->private;
- unsigned ctlr = h->ctlr;
unsigned long flags;
/* prevent displaying bogus info during configuration
* or deconfiguration of a logical volume
*/
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring) {
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return ERR_PTR(-EBUSY);
}
h->busy_configuring = 1;
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (*pos == 0)
cciss_seq_show_header(seq);
@@ -499,7 +544,7 @@ cciss_proc_write(struct file *file, const char __user *buf,
struct seq_file *seq = file->private_data;
ctlr_info_t *h = seq->private;
- err = cciss_engage_scsi(h->ctlr);
+ err = cciss_engage_scsi(h);
if (err == 0)
err = length;
} else
@@ -522,7 +567,7 @@ static const struct file_operations cciss_proc_fops = {
.write = cciss_proc_write,
};
-static void __devinit cciss_procinit(int i)
+static void __devinit cciss_procinit(ctlr_info_t *h)
{
struct proc_dir_entry *pde;
@@ -530,9 +575,9 @@ static void __devinit cciss_procinit(int i)
proc_cciss = proc_mkdir("driver/cciss", NULL);
if (!proc_cciss)
return;
- pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
+ pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP |
S_IROTH, proc_cciss,
- &cciss_proc_fops, hba[i]);
+ &cciss_proc_fops, h);
}
#endif /* CONFIG_PROC_FS */
@@ -565,12 +610,12 @@ static ssize_t dev_show_unique_id(struct device *dev,
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring)
ret = -EBUSY;
else
memcpy(sn, drv->serial_no, sizeof(sn));
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (ret)
return ret;
@@ -595,12 +640,12 @@ static ssize_t dev_show_vendor(struct device *dev,
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring)
ret = -EBUSY;
else
memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (ret)
return ret;
@@ -619,12 +664,12 @@ static ssize_t dev_show_model(struct device *dev,
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring)
ret = -EBUSY;
else
memcpy(model, drv->model, MODEL_LEN + 1);
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (ret)
return ret;
@@ -643,12 +688,12 @@ static ssize_t dev_show_rev(struct device *dev,
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring)
ret = -EBUSY;
else
memcpy(rev, drv->rev, REV_LEN + 1);
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (ret)
return ret;
@@ -665,17 +710,17 @@ static ssize_t cciss_show_lunid(struct device *dev,
unsigned long flags;
unsigned char lunid[8];
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring) {
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return -EBUSY;
}
if (!drv->heads) {
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return -ENOTTY;
}
memcpy(lunid, drv->LunID, sizeof(lunid));
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
lunid[0], lunid[1], lunid[2], lunid[3],
lunid[4], lunid[5], lunid[6], lunid[7]);
@@ -690,13 +735,13 @@ static ssize_t cciss_show_raid_level(struct device *dev,
int raid;
unsigned long flags;
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring) {
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return -EBUSY;
}
raid = drv->raid_level;
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (raid < 0 || raid > RAID_UNKNOWN)
raid = RAID_UNKNOWN;
@@ -713,13 +758,13 @@ static ssize_t cciss_show_usage_count(struct device *dev,
unsigned long flags;
int count;
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring) {
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return -EBUSY;
}
count = drv->usage_count;
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return snprintf(buf, 20, "%d\n", count);
}
static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
@@ -864,60 +909,70 @@ static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index,
/*
* For operations that cannot sleep, a command block is allocated at init,
* and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
- * which ones are free or in use. For operations that can wait for kmalloc
- * to possible sleep, this routine can be called with get_from_pool set to 0.
- * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
+ * which ones are free or in use.
*/
-static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
+static CommandList_struct *cmd_alloc(ctlr_info_t *h)
{
CommandList_struct *c;
int i;
u64bit temp64;
dma_addr_t cmd_dma_handle, err_dma_handle;
- if (!get_from_pool) {
- c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
- sizeof(CommandList_struct), &cmd_dma_handle);
- if (c == NULL)
+ do {
+ i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
+ if (i == h->nr_cmds)
return NULL;
- memset(c, 0, sizeof(CommandList_struct));
+ } while (test_and_set_bit(i & (BITS_PER_LONG - 1),
+ h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
+ c = h->cmd_pool + i;
+ memset(c, 0, sizeof(CommandList_struct));
+ cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct);
+ c->err_info = h->errinfo_pool + i;
+ memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+ err_dma_handle = h->errinfo_pool_dhandle
+ + i * sizeof(ErrorInfo_struct);
+ h->nr_allocs++;
- c->cmdindex = -1;
+ c->cmdindex = i;
- c->err_info = (ErrorInfo_struct *)
- pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
- &err_dma_handle);
+ INIT_HLIST_NODE(&c->list);
+ c->busaddr = (__u32) cmd_dma_handle;
+ temp64.val = (__u64) err_dma_handle;
+ c->ErrDesc.Addr.lower = temp64.val32.lower;
+ c->ErrDesc.Addr.upper = temp64.val32.upper;
+ c->ErrDesc.Len = sizeof(ErrorInfo_struct);
- if (c->err_info == NULL) {
- pci_free_consistent(h->pdev,
- sizeof(CommandList_struct), c, cmd_dma_handle);
- return NULL;
- }
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
- } else { /* get it out of the controllers pool */
-
- do {
- i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
- if (i == h->nr_cmds)
- return NULL;
- } while (test_and_set_bit
- (i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
-#endif
- c = h->cmd_pool + i;
- memset(c, 0, sizeof(CommandList_struct));
- cmd_dma_handle = h->cmd_pool_dhandle
- + i * sizeof(CommandList_struct);
- c->err_info = h->errinfo_pool + i;
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
- err_dma_handle = h->errinfo_pool_dhandle
- + i * sizeof(ErrorInfo_struct);
- h->nr_allocs++;
+ c->ctlr = h->ctlr;
+ return c;
+}
- c->cmdindex = i;
+/* allocate a command using pci_alloc_consistent, used for ioctls,
+ * etc., not for the main i/o path.
+ */
+static CommandList_struct *cmd_special_alloc(ctlr_info_t *h)
+{
+ CommandList_struct *c;
+ u64bit temp64;
+ dma_addr_t cmd_dma_handle, err_dma_handle;
+
+ c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
+ sizeof(CommandList_struct), &cmd_dma_handle);
+ if (c == NULL)
+ return NULL;
+ memset(c, 0, sizeof(CommandList_struct));
+
+ c->cmdindex = -1;
+
+ c->err_info = (ErrorInfo_struct *)
+ pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
+ &err_dma_handle);
+
+ if (c->err_info == NULL) {
+ pci_free_consistent(h->pdev,
+ sizeof(CommandList_struct), c, cmd_dma_handle);
+ return NULL;
}
+ memset(c->err_info, 0, sizeof(ErrorInfo_struct));
INIT_HLIST_NODE(&c->list);
c->busaddr = (__u32) cmd_dma_handle;
@@ -930,27 +985,26 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
return c;
}
-/*
- * Frees a command block that was previously allocated with cmd_alloc().
- */
-static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
+static void cmd_free(ctlr_info_t *h, CommandList_struct *c)
{
int i;
+
+ i = c - h->cmd_pool;
+ clear_bit(i & (BITS_PER_LONG - 1),
+ h->cmd_pool_bits + (i / BITS_PER_LONG));
+ h->nr_frees++;
+}
+
+static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c)
+{
u64bit temp64;
- if (!got_from_pool) {
- temp64.val32.lower = c->ErrDesc.Addr.lower;
- temp64.val32.upper = c->ErrDesc.Addr.upper;
- pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
- c->err_info, (dma_addr_t) temp64.val);
- pci_free_consistent(h->pdev, sizeof(CommandList_struct),
- c, (dma_addr_t) c->busaddr);
- } else {
- i = c - h->cmd_pool;
- clear_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
- h->nr_frees++;
- }
+ temp64.val32.lower = c->ErrDesc.Addr.lower;
+ temp64.val32.upper = c->ErrDesc.Addr.upper;
+ pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
+ c->err_info, (dma_addr_t) temp64.val);
+ pci_free_consistent(h->pdev, sizeof(CommandList_struct),
+ c, (dma_addr_t) c->busaddr);
}
static inline ctlr_info_t *get_host(struct gendisk *disk)
@@ -968,13 +1022,10 @@ static inline drive_info_struct *get_drv(struct gendisk *disk)
*/
static int cciss_open(struct block_device *bdev, fmode_t mode)
{
- ctlr_info_t *host = get_host(bdev->bd_disk);
+ ctlr_info_t *h = get_host(bdev->bd_disk);
drive_info_struct *drv = get_drv(bdev->bd_disk);
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
-#endif /* CCISS_DEBUG */
-
+ dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name);
if (drv->busy_configuring)
return -EBUSY;
/*
@@ -1000,29 +1051,39 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
return -EPERM;
}
drv->usage_count++;
- host->usage_count++;
+ h->usage_count++;
return 0;
}
+static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+ int ret;
+
+ lock_kernel();
+ ret = cciss_open(bdev, mode);
+ unlock_kernel();
+
+ return ret;
+}
+
/*
* Close. Sync first.
*/
static int cciss_release(struct gendisk *disk, fmode_t mode)
{
- ctlr_info_t *host = get_host(disk);
- drive_info_struct *drv = get_drv(disk);
-
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name);
-#endif /* CCISS_DEBUG */
+ ctlr_info_t *h;
+ drive_info_struct *drv;
+ lock_kernel();
+ h = get_host(disk);
+ drv = get_drv(disk);
+ dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name);
drv->usage_count--;
- host->usage_count--;
+ h->usage_count--;
+ unlock_kernel();
return 0;
}
-#ifdef CONFIG_COMPAT
-
static int do_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
@@ -1033,6 +1094,8 @@ static int do_ioctl(struct block_device *bdev, fmode_t mode,
return ret;
}
+#ifdef CONFIG_COMPAT
+
static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg);
static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
@@ -1163,11 +1226,11 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static void check_ioctl_unit_attention(ctlr_info_t *host, CommandList_struct *c)
+static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c)
{
if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
- (void)check_for_unit_attention(host, c);
+ (void)check_for_unit_attention(h, c);
}
/*
* ioctl
@@ -1176,15 +1239,12 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct gendisk *disk = bdev->bd_disk;
- ctlr_info_t *host = get_host(disk);
+ ctlr_info_t *h = get_host(disk);
drive_info_struct *drv = get_drv(disk);
- int ctlr = host->ctlr;
void __user *argp = (void __user *)arg;
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
-#endif /* CCISS_DEBUG */
-
+ dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n",
+ cmd, arg);
switch (cmd) {
case CCISS_GETPCIINFO:
{
@@ -1192,10 +1252,10 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (!arg)
return -EINVAL;
- pciinfo.domain = pci_domain_nr(host->pdev->bus);
- pciinfo.bus = host->pdev->bus->number;
- pciinfo.dev_fn = host->pdev->devfn;
- pciinfo.board_id = host->board_id;
+ pciinfo.domain = pci_domain_nr(h->pdev->bus);
+ pciinfo.bus = h->pdev->bus->number;
+ pciinfo.dev_fn = h->pdev->devfn;
+ pciinfo.board_id = h->board_id;
if (copy_to_user
(argp, &pciinfo, sizeof(cciss_pci_info_struct)))
return -EFAULT;
@@ -1207,9 +1267,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (!arg)
return -EINVAL;
intinfo.delay =
- readl(&host->cfgtable->HostWrite.CoalIntDelay);
+ readl(&h->cfgtable->HostWrite.CoalIntDelay);
intinfo.count =
- readl(&host->cfgtable->HostWrite.CoalIntCount);
+ readl(&h->cfgtable->HostWrite.CoalIntCount);
if (copy_to_user
(argp, &intinfo, sizeof(cciss_coalint_struct)))
return -EFAULT;
@@ -1229,26 +1289,23 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
(&intinfo, argp, sizeof(cciss_coalint_struct)))
return -EFAULT;
if ((intinfo.delay == 0) && (intinfo.count == 0))
- {
-// printk("cciss_ioctl: delay and count cannot be 0\n");
return -EINVAL;
- }
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
/* Update the field, and then ring the doorbell */
writel(intinfo.delay,
- &(host->cfgtable->HostWrite.CoalIntDelay));
+ &(h->cfgtable->HostWrite.CoalIntDelay));
writel(intinfo.count,
- &(host->cfgtable->HostWrite.CoalIntCount));
- writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
+ &(h->cfgtable->HostWrite.CoalIntCount));
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
- if (!(readl(host->vaddr + SA5_DOORBELL)
+ if (!(readl(h->vaddr + SA5_DOORBELL)
& CFGTBL_ChangeReq))
break;
/* delay and try again */
udelay(1000);
}
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (i >= MAX_IOCTL_CONFIG_WAIT)
return -EAGAIN;
return 0;
@@ -1262,7 +1319,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
return -EINVAL;
for (i = 0; i < 16; i++)
NodeName[i] =
- readb(&host->cfgtable->ServerName[i]);
+ readb(&h->cfgtable->ServerName[i]);
if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
return -EFAULT;
return 0;
@@ -1282,23 +1339,23 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
(NodeName, argp, sizeof(NodeName_type)))
return -EFAULT;
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
/* Update the field, and then ring the doorbell */
for (i = 0; i < 16; i++)
writeb(NodeName[i],
- &host->cfgtable->ServerName[i]);
+ &h->cfgtable->ServerName[i]);
- writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
- if (!(readl(host->vaddr + SA5_DOORBELL)
+ if (!(readl(h->vaddr + SA5_DOORBELL)
& CFGTBL_ChangeReq))
break;
/* delay and try again */
udelay(1000);
}
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
if (i >= MAX_IOCTL_CONFIG_WAIT)
return -EAGAIN;
return 0;
@@ -1310,7 +1367,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (!arg)
return -EINVAL;
- heartbeat = readl(&host->cfgtable->HeartBeat);
+ heartbeat = readl(&h->cfgtable->HeartBeat);
if (copy_to_user
(argp, &heartbeat, sizeof(Heartbeat_type)))
return -EFAULT;
@@ -1322,7 +1379,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (!arg)
return -EINVAL;
- BusTypes = readl(&host->cfgtable->BusTypes);
+ BusTypes = readl(&h->cfgtable->BusTypes);
if (copy_to_user
(argp, &BusTypes, sizeof(BusTypes_type)))
return -EFAULT;
@@ -1334,7 +1391,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (!arg)
return -EINVAL;
- memcpy(firmware, host->firm_ver, 4);
+ memcpy(firmware, h->firm_ver, 4);
if (copy_to_user
(argp, firmware, sizeof(FirmwareVer_type)))
@@ -1357,7 +1414,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
case CCISS_DEREGDISK:
case CCISS_REGNEWD:
case CCISS_REVALIDVOLS:
- return rebuild_lun_table(host, 0, 1);
+ return rebuild_lun_table(h, 0, 1);
case CCISS_GETLUNINFO:{
LogvolInfo_struct luninfo;
@@ -1377,7 +1434,6 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
CommandList_struct *c;
char *buff = NULL;
u64bit temp64;
- unsigned long flags;
DECLARE_COMPLETION_ONSTACK(wait);
if (!arg)
@@ -1413,7 +1469,8 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
} else {
memset(buff, 0, iocommand.buf_size);
}
- if ((c = cmd_alloc(host, 0)) == NULL) {
+ c = cmd_special_alloc(h);
+ if (!c) {
kfree(buff);
return -ENOMEM;
}
@@ -1439,7 +1496,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
/* Fill in the scatter gather information */
if (iocommand.buf_size > 0) {
- temp64.val = pci_map_single(host->pdev, buff,
+ temp64.val = pci_map_single(h->pdev, buff,
iocommand.buf_size,
PCI_DMA_BIDIRECTIONAL);
c->SG[0].Addr.lower = temp64.val32.lower;
@@ -1449,30 +1506,24 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
}
c->waiting = &wait;
- /* Put the request on the tail of the request queue */
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
- addQ(&host->reqQ, c);
- host->Qdepth++;
- start_io(host);
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-
+ enqueue_cmd_and_start_io(h, c);
wait_for_completion(&wait);
/* unlock the buffers from DMA */
temp64.val32.lower = c->SG[0].Addr.lower;
temp64.val32.upper = c->SG[0].Addr.upper;
- pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
+ pci_unmap_single(h->pdev, (dma_addr_t) temp64.val,
iocommand.buf_size,
PCI_DMA_BIDIRECTIONAL);
- check_ioctl_unit_attention(host, c);
+ check_ioctl_unit_attention(h, c);
/* Copy the error information out */
iocommand.error_info = *(c->err_info);
if (copy_to_user
(argp, &iocommand, sizeof(IOCTL_Command_struct))) {
kfree(buff);
- cmd_free(host, c, 0);
+ cmd_special_free(h, c);
return -EFAULT;
}
@@ -1481,12 +1532,12 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (copy_to_user
(iocommand.buf, buff, iocommand.buf_size)) {
kfree(buff);
- cmd_free(host, c, 0);
+ cmd_special_free(h, c);
return -EFAULT;
}
}
kfree(buff);
- cmd_free(host, c, 0);
+ cmd_special_free(h, c);
return 0;
}
case CCISS_BIG_PASSTHRU:{
@@ -1495,7 +1546,6 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
unsigned char **buff = NULL;
int *buff_size = NULL;
u64bit temp64;
- unsigned long flags;
BYTE sg_used = 0;
int status = 0;
int i;
@@ -1569,7 +1619,8 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
data_ptr += sz;
sg_used++;
}
- if ((c = cmd_alloc(host, 0)) == NULL) {
+ c = cmd_special_alloc(h);
+ if (!c) {
status = -ENOMEM;
goto cleanup1;
}
@@ -1590,7 +1641,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
if (ioc->buf_size > 0) {
for (i = 0; i < sg_used; i++) {
temp64.val =
- pci_map_single(host->pdev, buff[i],
+ pci_map_single(h->pdev, buff[i],
buff_size[i],
PCI_DMA_BIDIRECTIONAL);
c->SG[i].Addr.lower =
@@ -1602,26 +1653,21 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
}
}
c->waiting = &wait;
- /* Put the request on the tail of the request queue */
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
- addQ(&host->reqQ, c);
- host->Qdepth++;
- start_io(host);
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ enqueue_cmd_and_start_io(h, c);
wait_for_completion(&wait);
/* unlock the buffers from DMA */
for (i = 0; i < sg_used; i++) {
temp64.val32.lower = c->SG[i].Addr.lower;
temp64.val32.upper = c->SG[i].Addr.upper;
- pci_unmap_single(host->pdev,
+ pci_unmap_single(h->pdev,
(dma_addr_t) temp64.val, buff_size[i],
PCI_DMA_BIDIRECTIONAL);
}
- check_ioctl_unit_attention(host, c);
+ check_ioctl_unit_attention(h, c);
/* Copy the error information out */
ioc->error_info = *(c->err_info);
if (copy_to_user(argp, ioc, sizeof(*ioc))) {
- cmd_free(host, c, 0);
+ cmd_special_free(h, c);
status = -EFAULT;
goto cleanup1;
}
@@ -1631,14 +1677,14 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
for (i = 0; i < sg_used; i++) {
if (copy_to_user
(ptr, buff[i], buff_size[i])) {
- cmd_free(host, c, 0);
+ cmd_special_free(h, c);
status = -EFAULT;
goto cleanup1;
}
ptr += buff_size[i];
}
}
- cmd_free(host, c, 0);
+ cmd_special_free(h, c);
status = 0;
cleanup1:
if (buff) {
@@ -1726,26 +1772,26 @@ static void cciss_check_queues(ctlr_info_t *h)
static void cciss_softirq_done(struct request *rq)
{
- CommandList_struct *cmd = rq->completion_data;
- ctlr_info_t *h = hba[cmd->ctlr];
- SGDescriptor_struct *curr_sg = cmd->SG;
- unsigned long flags;
+ CommandList_struct *c = rq->completion_data;
+ ctlr_info_t *h = hba[c->ctlr];
+ SGDescriptor_struct *curr_sg = c->SG;
u64bit temp64;
+ unsigned long flags;
int i, ddir;
int sg_index = 0;
- if (cmd->Request.Type.Direction == XFER_READ)
+ if (c->Request.Type.Direction == XFER_READ)
ddir = PCI_DMA_FROMDEVICE;
else
ddir = PCI_DMA_TODEVICE;
/* command did not need to be retried */
/* unmap the DMA mapping for all the scatter gather elements */
- for (i = 0; i < cmd->Header.SGList; i++) {
+ for (i = 0; i < c->Header.SGList; i++) {
if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) {
- cciss_unmap_sg_chain_block(h, cmd);
+ cciss_unmap_sg_chain_block(h, c);
/* Point to the next block */
- curr_sg = h->cmd_sg_list[cmd->cmdindex];
+ curr_sg = h->cmd_sg_list[c->cmdindex];
sg_index = 0;
}
temp64.val32.lower = curr_sg[sg_index].Addr.lower;
@@ -1755,18 +1801,16 @@ static void cciss_softirq_done(struct request *rq)
++sg_index;
}
-#ifdef CCISS_DEBUG
- printk("Done with %p\n", rq);
-#endif /* CCISS_DEBUG */
+ dev_dbg(&h->pdev->dev, "Done with %p\n", rq);
/* set the residual count for pc requests */
- if (blk_pc_request(rq))
- rq->resid_len = cmd->err_info->ResidualCnt;
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ rq->resid_len = c->err_info->ResidualCnt;
blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
spin_lock_irqsave(&h->lock, flags);
- cmd_free(h, cmd, 1);
+ cmd_free(h, c);
cciss_check_queues(h);
spin_unlock_irqrestore(&h->lock, flags);
}
@@ -1782,7 +1826,7 @@ static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
* via the inquiry page 0. Model, vendor, and rev are set to empty strings if
* they cannot be read.
*/
-static void cciss_get_device_descr(int ctlr, int logvol,
+static void cciss_get_device_descr(ctlr_info_t *h, int logvol,
char *vendor, char *model, char *rev)
{
int rc;
@@ -1797,8 +1841,8 @@ static void cciss_get_device_descr(int ctlr, int logvol,
if (!inq_buf)
return;
- log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
- rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf, sizeof(*inq_buf), 0,
+ log_unit_to_scsi3addr(h, scsi3addr, logvol);
+ rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0,
scsi3addr, TYPE_CMD);
if (rc == IO_OK) {
memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
@@ -1818,7 +1862,7 @@ static void cciss_get_device_descr(int ctlr, int logvol,
* number cannot be had, for whatever reason, 16 bytes of 0xff
* are returned instead.
*/
-static void cciss_get_serial_no(int ctlr, int logvol,
+static void cciss_get_serial_no(ctlr_info_t *h, int logvol,
unsigned char *serial_no, int buflen)
{
#define PAGE_83_INQ_BYTES 64
@@ -1833,8 +1877,8 @@ static void cciss_get_serial_no(int ctlr, int logvol,
if (!buf)
return;
memset(serial_no, 0, buflen);
- log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
- rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
+ log_unit_to_scsi3addr(h, scsi3addr, logvol);
+ rc = sendcmd_withirq(h, CISS_INQUIRY, buf,
PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
if (rc == IO_OK)
memcpy(serial_no, &buf[8], buflen);
@@ -1900,10 +1944,9 @@ init_queue_failure:
* is also the controller node. Any changes to disk 0 will show up on
* the next reboot.
*/
-static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
- int via_ioctl)
+static void cciss_update_drive_info(ctlr_info_t *h, int drv_index,
+ int first_time, int via_ioctl)
{
- ctlr_info_t *h = hba[ctlr];
struct gendisk *disk;
InquiryData_struct *inq_buff = NULL;
unsigned int block_size;
@@ -1920,16 +1963,16 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
/* testing to see if 16-byte CDBs are already being used */
if (h->cciss_read == CCISS_READ_16) {
- cciss_read_capacity_16(h->ctlr, drv_index,
+ cciss_read_capacity_16(h, drv_index,
&total_size, &block_size);
} else {
- cciss_read_capacity(ctlr, drv_index, &total_size, &block_size);
+ cciss_read_capacity(h, drv_index, &total_size, &block_size);
/* if read_capacity returns all F's this volume is >2TB */
/* in size so we switch to 16-byte CDB's for all */
/* read/write ops */
if (total_size == 0xFFFFFFFFULL) {
- cciss_read_capacity_16(ctlr, drv_index,
+ cciss_read_capacity_16(h, drv_index,
&total_size, &block_size);
h->cciss_read = CCISS_READ_16;
h->cciss_write = CCISS_WRITE_16;
@@ -1939,14 +1982,14 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
}
}
- cciss_geometry_inquiry(ctlr, drv_index, total_size, block_size,
+ cciss_geometry_inquiry(h, drv_index, total_size, block_size,
inq_buff, drvinfo);
drvinfo->block_size = block_size;
drvinfo->nr_blocks = total_size + 1;
- cciss_get_device_descr(ctlr, drv_index, drvinfo->vendor,
+ cciss_get_device_descr(h, drv_index, drvinfo->vendor,
drvinfo->model, drvinfo->rev);
- cciss_get_serial_no(ctlr, drv_index, drvinfo->serial_no,
+ cciss_get_serial_no(h, drv_index, drvinfo->serial_no,
sizeof(drvinfo->serial_no));
/* Save the lunid in case we deregister the disk, below. */
memcpy(drvinfo->LunID, h->drv[drv_index]->LunID,
@@ -1971,10 +2014,10 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
* (unless it's the first disk (for the controller node).
*/
if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) {
- printk(KERN_WARNING "disk %d has changed.\n", drv_index);
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index);
+ spin_lock_irqsave(&h->lock, flags);
h->drv[drv_index]->busy_configuring = 1;
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
/* deregister_disk sets h->drv[drv_index]->queue = NULL
* which keeps the interrupt handler from starting
@@ -2024,8 +2067,8 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
if (cciss_add_disk(h, disk, drv_index) != 0) {
cciss_free_gendisk(h, drv_index);
cciss_free_drive_info(h, drv_index);
- printk(KERN_WARNING "cciss:%d could not update "
- "disk %d\n", h->ctlr, drv_index);
+ dev_warn(&h->pdev->dev, "could not update disk %d\n",
+ drv_index);
--h->num_luns;
}
}
@@ -2035,7 +2078,7 @@ freeret:
kfree(drvinfo);
return;
mem_msg:
- printk(KERN_ERR "cciss: out of memory\n");
+ dev_err(&h->pdev->dev, "out of memory\n");
goto freeret;
}
@@ -2127,9 +2170,9 @@ static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
h->gendisk[drv_index] =
alloc_disk(1 << NWD_SHIFT);
if (!h->gendisk[drv_index]) {
- printk(KERN_ERR "cciss%d: could not "
- "allocate a new disk %d\n",
- h->ctlr, drv_index);
+ dev_err(&h->pdev->dev,
+ "could not allocate a new disk %d\n",
+ drv_index);
goto err_free_drive_info;
}
}
@@ -2180,8 +2223,7 @@ static void cciss_add_controller_node(ctlr_info_t *h)
cciss_free_gendisk(h, drv_index);
cciss_free_drive_info(h, drv_index);
error:
- printk(KERN_WARNING "cciss%d: could not "
- "add disk 0.\n", h->ctlr);
+ dev_warn(&h->pdev->dev, "could not add disk 0.\n");
return;
}
@@ -2196,7 +2238,6 @@ error:
static int rebuild_lun_table(ctlr_info_t *h, int first_time,
int via_ioctl)
{
- int ctlr = h->ctlr;
int num_luns;
ReportLunData_struct *ld_buff = NULL;
int return_code;
@@ -2211,27 +2252,27 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
return -EPERM;
/* Set busy_configuring flag for this operation */
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
if (h->busy_configuring) {
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return -EBUSY;
}
h->busy_configuring = 1;
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
if (ld_buff == NULL)
goto mem_msg;
- return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
+ return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff,
sizeof(ReportLunData_struct),
0, CTLR_LUNID, TYPE_CMD);
if (return_code == IO_OK)
listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
else { /* reading number of logical volumes failed */
- printk(KERN_WARNING "cciss: report logical volume"
- " command failed\n");
+ dev_warn(&h->pdev->dev,
+ "report logical volume command failed\n");
listlength = 0;
goto freeret;
}
@@ -2239,7 +2280,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
num_luns = listlength / 8; /* 8 bytes per entry */
if (num_luns > CISS_MAX_LUN) {
num_luns = CISS_MAX_LUN;
- printk(KERN_WARNING "cciss: more luns configured"
+ dev_warn(&h->pdev->dev, "more luns configured"
" on controller than can be handled by"
" this driver.\n");
}
@@ -2270,9 +2311,9 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
}
if (!drv_found) {
/* Deregister it from the OS, it's gone. */
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
h->drv[i]->busy_configuring = 1;
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
return_code = deregister_disk(h, i, 1, via_ioctl);
if (h->drv[i] != NULL)
h->drv[i]->busy_configuring = 0;
@@ -2311,8 +2352,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
if (drv_index == -1)
goto freeret;
}
- cciss_update_drive_info(ctlr, drv_index, first_time,
- via_ioctl);
+ cciss_update_drive_info(h, drv_index, first_time, via_ioctl);
} /* end for */
freeret:
@@ -2324,7 +2364,7 @@ freeret:
*/
return -1;
mem_msg:
- printk(KERN_ERR "cciss: out of memory\n");
+ dev_err(&h->pdev->dev, "out of memory\n");
h->busy_configuring = 0;
goto freeret;
}
@@ -2444,11 +2484,10 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
return 0;
}
-static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
size_t size, __u8 page_code, unsigned char *scsi3addr,
int cmd_type)
{
- ctlr_info_t *h = hba[ctlr];
u64bit buff_dma_handle;
int status = IO_OK;
@@ -2532,8 +2571,7 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
c->Request.Timeout = 0;
break;
default:
- printk(KERN_WARNING
- "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
+ dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd);
return IO_ERROR;
}
} else if (cmd_type == TYPE_MSG) {
@@ -2565,13 +2603,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
c->Request.CDB[0] = cmd;
break;
default:
- printk(KERN_WARNING
- "cciss%d: unknown message type %d\n", ctlr, cmd);
+ dev_warn(&h->pdev->dev,
+ "unknown message type %d\n", cmd);
return IO_ERROR;
}
} else {
- printk(KERN_WARNING
- "cciss%d: unknown command type %d\n", ctlr, cmd_type);
+ dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
return IO_ERROR;
}
/* Fill in the scatter gather information */
@@ -2599,15 +2636,14 @@ static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
default:
if (check_for_unit_attention(h, c))
return IO_NEEDS_RETRY;
- printk(KERN_WARNING "cciss%d: cmd 0x%02x "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x "
"check condition, sense key = 0x%02x\n",
- h->ctlr, c->Request.CDB[0],
- c->err_info->SenseInfo[2]);
+ c->Request.CDB[0], c->err_info->SenseInfo[2]);
}
break;
default:
- printk(KERN_WARNING "cciss%d: cmd 0x%02x"
- "scsi status = 0x%02x\n", h->ctlr,
+ dev_warn(&h->pdev->dev, "cmd 0x%02x"
+ "scsi status = 0x%02x\n",
c->Request.CDB[0], c->err_info->ScsiStatus);
break;
}
@@ -2630,43 +2666,42 @@ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
/* expected for inquiry and report lun commands */
break;
case CMD_INVALID:
- printk(KERN_WARNING "cciss: cmd 0x%02x is "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x is "
"reported invalid\n", c->Request.CDB[0]);
return_status = IO_ERROR;
break;
case CMD_PROTOCOL_ERR:
- printk(KERN_WARNING "cciss: cmd 0x%02x has "
- "protocol error \n", c->Request.CDB[0]);
+ dev_warn(&h->pdev->dev, "cmd 0x%02x has "
+ "protocol error\n", c->Request.CDB[0]);
return_status = IO_ERROR;
break;
case CMD_HARDWARE_ERR:
- printk(KERN_WARNING "cciss: cmd 0x%02x had "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x had "
" hardware error\n", c->Request.CDB[0]);
return_status = IO_ERROR;
break;
case CMD_CONNECTION_LOST:
- printk(KERN_WARNING "cciss: cmd 0x%02x had "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x had "
"connection lost\n", c->Request.CDB[0]);
return_status = IO_ERROR;
break;
case CMD_ABORTED:
- printk(KERN_WARNING "cciss: cmd 0x%02x was "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x was "
"aborted\n", c->Request.CDB[0]);
return_status = IO_ERROR;
break;
case CMD_ABORT_FAILED:
- printk(KERN_WARNING "cciss: cmd 0x%02x reports "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x reports "
"abort failed\n", c->Request.CDB[0]);
return_status = IO_ERROR;
break;
case CMD_UNSOLICITED_ABORT:
- printk(KERN_WARNING
- "cciss%d: unsolicited abort 0x%02x\n", h->ctlr,
+ dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n",
c->Request.CDB[0]);
return_status = IO_NEEDS_RETRY;
break;
default:
- printk(KERN_WARNING "cciss: cmd 0x%02x returned "
+ dev_warn(&h->pdev->dev, "cmd 0x%02x returned "
"unknown status %x\n", c->Request.CDB[0],
c->err_info->CommandStatus);
return_status = IO_ERROR;
@@ -2679,17 +2714,11 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
{
DECLARE_COMPLETION_ONSTACK(wait);
u64bit buff_dma_handle;
- unsigned long flags;
int return_status = IO_OK;
resend_cmd2:
c->waiting = &wait;
- /* Put the request on the tail of the queue and send it */
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
- addQ(&h->reqQ, c);
- h->Qdepth++;
- start_io(h);
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+ enqueue_cmd_and_start_io(h, c);
wait_for_completion(&wait);
@@ -2700,7 +2729,7 @@ resend_cmd2:
if (return_status == IO_NEEDS_RETRY &&
c->retry_count < MAX_CMD_RETRIES) {
- printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr,
+ dev_warn(&h->pdev->dev, "retrying 0x%02x\n",
c->Request.CDB[0]);
c->retry_count++;
/* erase the old error information */
@@ -2719,27 +2748,26 @@ command_done:
return return_status;
}
-static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
+static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
__u8 page_code, unsigned char scsi3addr[],
int cmd_type)
{
- ctlr_info_t *h = hba[ctlr];
CommandList_struct *c;
int return_status;
- c = cmd_alloc(h, 0);
+ c = cmd_special_alloc(h);
if (!c)
return -ENOMEM;
- return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
+ return_status = fill_cmd(h, c, cmd, buff, size, page_code,
scsi3addr, cmd_type);
if (return_status == IO_OK)
return_status = sendcmd_withirq_core(h, c, 1);
- cmd_free(h, c, 0);
+ cmd_special_free(h, c);
return return_status;
}
-static void cciss_geometry_inquiry(int ctlr, int logvol,
+static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
sector_t total_size,
unsigned int block_size,
InquiryData_struct *inq_buff,
@@ -2750,13 +2778,13 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
unsigned char scsi3addr[8];
memset(inq_buff, 0, sizeof(InquiryData_struct));
- log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
- return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff,
+ log_unit_to_scsi3addr(h, scsi3addr, logvol);
+ return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD);
if (return_code == IO_OK) {
if (inq_buff->data_byte[8] == 0xFF) {
- printk(KERN_WARNING
- "cciss: reading geometry failed, volume "
+ dev_warn(&h->pdev->dev,
+ "reading geometry failed, volume "
"does not support reading geometry\n");
drv->heads = 255;
drv->sectors = 32; /* Sectors per track */
@@ -2780,12 +2808,12 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
drv->cylinders = real_size;
}
} else { /* Get geometry failed */
- printk(KERN_WARNING "cciss: reading geometry failed\n");
+ dev_warn(&h->pdev->dev, "reading geometry failed\n");
}
}
static void
-cciss_read_capacity(int ctlr, int logvol, sector_t *total_size,
+cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size,
unsigned int *block_size)
{
ReadCapdata_struct *buf;
@@ -2794,25 +2822,25 @@ cciss_read_capacity(int ctlr, int logvol, sector_t *total_size,
buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
if (!buf) {
- printk(KERN_WARNING "cciss: out of memory\n");
+ dev_warn(&h->pdev->dev, "out of memory\n");
return;
}
- log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
- return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr, buf,
+ log_unit_to_scsi3addr(h, scsi3addr, logvol);
+ return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf,
sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD);
if (return_code == IO_OK) {
*total_size = be32_to_cpu(*(__be32 *) buf->total_size);
*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
} else { /* read capacity command failed */
- printk(KERN_WARNING "cciss: read capacity failed\n");
+ dev_warn(&h->pdev->dev, "read capacity failed\n");
*total_size = 0;
*block_size = BLOCK_SIZE;
}
kfree(buf);
}
-static void cciss_read_capacity_16(int ctlr, int logvol,
+static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
sector_t *total_size, unsigned int *block_size)
{
ReadCapdata_struct_16 *buf;
@@ -2821,23 +2849,23 @@ static void cciss_read_capacity_16(int ctlr, int logvol,
buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
if (!buf) {
- printk(KERN_WARNING "cciss: out of memory\n");
+ dev_warn(&h->pdev->dev, "out of memory\n");
return;
}
- log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
- return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
- ctlr, buf, sizeof(ReadCapdata_struct_16),
+ log_unit_to_scsi3addr(h, scsi3addr, logvol);
+ return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16,
+ buf, sizeof(ReadCapdata_struct_16),
0, scsi3addr, TYPE_CMD);
if (return_code == IO_OK) {
*total_size = be64_to_cpu(*(__be64 *) buf->total_size);
*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
} else { /* read capacity command failed */
- printk(KERN_WARNING "cciss: read capacity failed\n");
+ dev_warn(&h->pdev->dev, "read capacity failed\n");
*total_size = 0;
*block_size = BLOCK_SIZE;
}
- printk(KERN_INFO " blocks= %llu block_size= %d\n",
+ dev_info(&h->pdev->dev, " blocks= %llu block_size= %d\n",
(unsigned long long)*total_size+1, *block_size);
kfree(buf);
}
@@ -2865,17 +2893,17 @@ static int cciss_revalidate(struct gendisk *disk)
inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
if (inq_buff == NULL) {
- printk(KERN_WARNING "cciss: out of memory\n");
+ dev_warn(&h->pdev->dev, "out of memory\n");
return 1;
}
if (h->cciss_read == CCISS_READ_10) {
- cciss_read_capacity(h->ctlr, logvol,
+ cciss_read_capacity(h, logvol,
&total_size, &block_size);
} else {
- cciss_read_capacity_16(h->ctlr, logvol,
+ cciss_read_capacity_16(h, logvol,
&total_size, &block_size);
}
- cciss_geometry_inquiry(h->ctlr, logvol, total_size, block_size,
+ cciss_geometry_inquiry(h, logvol, total_size, block_size,
inq_buff, drv);
blk_queue_logical_block_size(drv->queue, drv->block_size);
@@ -2909,7 +2937,7 @@ static void start_io(ctlr_info_t *h)
c = hlist_entry(h->reqQ.first, CommandList_struct, list);
/* can't do anything if fifo is full */
if ((h->access.fifo_full(h))) {
- printk(KERN_WARNING "cciss: fifo full\n");
+ dev_warn(&h->pdev->dev, "fifo full\n");
break;
}
@@ -2925,7 +2953,7 @@ static void start_io(ctlr_info_t *h)
}
}
-/* Assumes that CCISS_LOCK(h->ctlr) is held. */
+/* Assumes that h->lock is held. */
/* Zeros out the error record and then resends the command back */
/* to the controller */
static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
@@ -2966,7 +2994,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
driver_byte = DRIVER_OK;
msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
- if (blk_pc_request(cmd->rq))
+ if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC)
host_byte = DID_PASSTHROUGH;
else
host_byte = DID_OK;
@@ -2975,8 +3003,8 @@ static inline int evaluate_target_status(ctlr_info_t *h,
host_byte, driver_byte);
if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
- if (!blk_pc_request(cmd->rq))
- printk(KERN_WARNING "cciss: cmd %p "
+ if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)
+ dev_warn(&h->pdev->dev, "cmd %p "
"has SCSI Status 0x%x\n",
cmd, cmd->err_info->ScsiStatus);
return error_value;
@@ -2985,17 +3013,19 @@ static inline int evaluate_target_status(ctlr_info_t *h,
/* check the sense key */
sense_key = 0xf & cmd->err_info->SenseInfo[2];
/* no status or recovered error */
- if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
+ if (((sense_key == 0x0) || (sense_key == 0x1)) &&
+ (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC))
error_value = 0;
if (check_for_unit_attention(h, cmd)) {
- *retry_cmd = !blk_pc_request(cmd->rq);
+ *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC);
return 0;
}
- if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
+ /* Not SG_IO or similar? */
+ if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) {
if (error_value != 0)
- printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
+ dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION"
" sense key = 0x%x\n", cmd, sense_key);
return error_value;
}
@@ -3035,90 +3065,97 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
break;
case CMD_DATA_UNDERRUN:
- if (blk_fs_request(cmd->rq)) {
- printk(KERN_WARNING "cciss: cmd %p has"
+ if (cmd->rq->cmd_type == REQ_TYPE_FS) {
+ dev_warn(&h->pdev->dev, "cmd %p has"
" completed with data underrun "
"reported\n", cmd);
cmd->rq->resid_len = cmd->err_info->ResidualCnt;
}
break;
case CMD_DATA_OVERRUN:
- if (blk_fs_request(cmd->rq))
- printk(KERN_WARNING "cciss: cmd %p has"
+ if (cmd->rq->cmd_type == REQ_TYPE_FS)
+ dev_warn(&h->pdev->dev, "cciss: cmd %p has"
" completed with data overrun "
"reported\n", cmd);
break;
case CMD_INVALID:
- printk(KERN_WARNING "cciss: cmd %p is "
+ dev_warn(&h->pdev->dev, "cciss: cmd %p is "
"reported invalid\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_PROTOCOL_ERR:
- printk(KERN_WARNING "cciss: cmd %p has "
- "protocol error \n", cmd);
+ dev_warn(&h->pdev->dev, "cciss: cmd %p has "
+ "protocol error\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_HARDWARE_ERR:
- printk(KERN_WARNING "cciss: cmd %p had "
+ dev_warn(&h->pdev->dev, "cciss: cmd %p had "
" hardware error\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_CONNECTION_LOST:
- printk(KERN_WARNING "cciss: cmd %p had "
+ dev_warn(&h->pdev->dev, "cciss: cmd %p had "
"connection lost\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_ABORTED:
- printk(KERN_WARNING "cciss: cmd %p was "
+ dev_warn(&h->pdev->dev, "cciss: cmd %p was "
"aborted\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ABORT);
break;
case CMD_ABORT_FAILED:
- printk(KERN_WARNING "cciss: cmd %p reports "
+ dev_warn(&h->pdev->dev, "cciss: cmd %p reports "
"abort failed\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_UNSOLICITED_ABORT:
- printk(KERN_WARNING "cciss%d: unsolicited "
+ dev_warn(&h->pdev->dev, "cciss%d: unsolicited "
"abort %p\n", h->ctlr, cmd);
if (cmd->retry_count < MAX_CMD_RETRIES) {
retry_cmd = 1;
- printk(KERN_WARNING
- "cciss%d: retrying %p\n", h->ctlr, cmd);
+ dev_warn(&h->pdev->dev, "retrying %p\n", cmd);
cmd->retry_count++;
} else
- printk(KERN_WARNING
- "cciss%d: %p retried too "
- "many times\n", h->ctlr, cmd);
+ dev_warn(&h->pdev->dev,
+ "%p retried too many times\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ABORT);
break;
case CMD_TIMEOUT:
- printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
+ dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
break;
default:
- printk(KERN_WARNING "cciss: cmd %p returned "
+ dev_warn(&h->pdev->dev, "cmd %p returned "
"unknown status %x\n", cmd,
cmd->err_info->CommandStatus);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+ (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ DID_PASSTHROUGH : DID_ERROR);
}
after_error_processing:
@@ -3132,6 +3169,34 @@ after_error_processing:
blk_complete_request(cmd->rq);
}
+static inline u32 cciss_tag_contains_index(u32 tag)
+{
+#define DIRECT_LOOKUP_BIT 0x10
+ return tag & DIRECT_LOOKUP_BIT;
+}
+
+static inline u32 cciss_tag_to_index(u32 tag)
+{
+#define DIRECT_LOOKUP_SHIFT 5
+ return tag >> DIRECT_LOOKUP_SHIFT;
+}
+
+static inline u32 cciss_tag_discard_error_bits(u32 tag)
+{
+#define CCISS_ERROR_BITS 0x03
+ return tag & ~CCISS_ERROR_BITS;
+}
+
+static inline void cciss_mark_tag_indexed(u32 *tag)
+{
+ *tag |= DIRECT_LOOKUP_BIT;
+}
+
+static inline void cciss_set_tag_index(u32 *tag, u32 index)
+{
+ *tag |= (index << DIRECT_LOOKUP_SHIFT);
+}
+
/*
* Get a request and submit it to the controller.
*/
@@ -3163,7 +3228,8 @@ static void do_cciss_request(struct request_queue *q)
BUG_ON(creq->nr_phys_segments > h->maxsgentries);
- if ((c = cmd_alloc(h, 1)) == NULL)
+ c = cmd_alloc(h);
+ if (!c)
goto full;
blk_start_request(creq);
@@ -3180,8 +3246,8 @@ static void do_cciss_request(struct request_queue *q)
/* got command from pool, so use the command block index instead */
/* for direct lookups. */
/* The first 2 bits are reserved for controller error reporting. */
- c->Header.Tag.lower = (c->cmdindex << 3);
- c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
+ cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex);
+ cciss_mark_tag_indexed(&c->Header.Tag.lower);
memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */
c->Request.Type.Type = TYPE_CMD; /* It is a command. */
@@ -3192,11 +3258,8 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[0] =
(rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
start_blk = blk_rq_pos(creq);
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
+ dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n",
(int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
-#endif /* CCISS_DEBUG */
-
sg_init_table(tmp_sg, h->maxsgentries);
seg = blk_rq_map_sg(q, creq, tmp_sg);
@@ -3236,17 +3299,18 @@ static void do_cciss_request(struct request_queue *q)
if (seg > h->maxSG)
h->maxSG = seg;
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss: Submitting %ld sectors in %d segments "
+ dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments "
"chained[%d]\n",
blk_rq_sectors(creq), seg, chained);
-#endif /* CCISS_DEBUG */
- c->Header.SGList = c->Header.SGTotal = seg + chained;
- if (seg > h->max_cmd_sgentries)
+ c->Header.SGTotal = seg + chained;
+ if (seg <= h->max_cmd_sgentries)
+ c->Header.SGList = c->Header.SGTotal;
+ else
c->Header.SGList = h->max_cmd_sgentries;
+ set_performant_mode(h, c);
- if (likely(blk_fs_request(creq))) {
+ if (likely(creq->cmd_type == REQ_TYPE_FS)) {
if(h->cciss_read == CCISS_READ_10) {
c->Request.CDB[1] = 0;
c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
@@ -3276,11 +3340,12 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
c->Request.CDB[14] = c->Request.CDB[15] = 0;
}
- } else if (blk_pc_request(creq)) {
+ } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) {
c->Request.CDBLen = creq->cmd_len;
memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
} else {
- printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
+ dev_warn(&h->pdev->dev, "bad request type %d\n",
+ creq->cmd_type);
BUG();
}
@@ -3313,72 +3378,131 @@ static inline int interrupt_pending(ctlr_info_t *h)
static inline long interrupt_not_for_us(ctlr_info_t *h)
{
- return (((h->access.intr_pending(h) == 0) ||
- (h->interrupts_enabled == 0)));
+ return ((h->access.intr_pending(h) == 0) ||
+ (h->interrupts_enabled == 0));
}
-static irqreturn_t do_cciss_intr(int irq, void *dev_id)
+static inline int bad_tag(ctlr_info_t *h, u32 tag_index,
+ u32 raw_tag)
{
- ctlr_info_t *h = dev_id;
+ if (unlikely(tag_index >= h->nr_cmds)) {
+ dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
+ return 1;
+ }
+ return 0;
+}
+
+static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c,
+ u32 raw_tag)
+{
+ removeQ(c);
+ if (likely(c->cmd_type == CMD_RWREQ))
+ complete_command(h, c, 0);
+ else if (c->cmd_type == CMD_IOCTL_PEND)
+ complete(c->waiting);
+#ifdef CONFIG_CISS_SCSI_TAPE
+ else if (c->cmd_type == CMD_SCSI)
+ complete_scsi_command(c, 0, raw_tag);
+#endif
+}
+
+static inline u32 next_command(ctlr_info_t *h)
+{
+ u32 a;
+
+ if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
+ return h->access.command_completed(h);
+
+ if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+ a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+ (h->reply_pool_head)++;
+ h->commands_outstanding--;
+ } else {
+ a = FIFO_EMPTY;
+ }
+ /* Check for wraparound */
+ if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
+ h->reply_pool_head = h->reply_pool;
+ h->reply_pool_wraparound ^= 1;
+ }
+ return a;
+}
+
+/* process completion of an indexed ("direct lookup") command */
+static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag)
+{
+ u32 tag_index;
CommandList_struct *c;
+
+ tag_index = cciss_tag_to_index(raw_tag);
+ if (bad_tag(h, tag_index, raw_tag))
+ return next_command(h);
+ c = h->cmd_pool + tag_index;
+ finish_cmd(h, c, raw_tag);
+ return next_command(h);
+}
+
+/* process completion of a non-indexed command */
+static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
+{
+ u32 tag;
+ CommandList_struct *c = NULL;
+ struct hlist_node *tmp;
+ __u32 busaddr_masked, tag_masked;
+
+ tag = cciss_tag_discard_error_bits(raw_tag);
+ hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+ busaddr_masked = cciss_tag_discard_error_bits(c->busaddr);
+ tag_masked = cciss_tag_discard_error_bits(tag);
+ if (busaddr_masked == tag_masked) {
+ finish_cmd(h, c, raw_tag);
+ return next_command(h);
+ }
+ }
+ bad_tag(h, h->nr_cmds + 1, raw_tag);
+ return next_command(h);
+}
+
+static irqreturn_t do_cciss_intx(int irq, void *dev_id)
+{
+ ctlr_info_t *h = dev_id;
unsigned long flags;
- __u32 a, a1, a2;
+ u32 raw_tag;
if (interrupt_not_for_us(h))
return IRQ_NONE;
- /*
- * If there are completed commands in the completion queue,
- * we had better do something about it.
- */
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
while (interrupt_pending(h)) {
- while ((a = get_next_completion(h)) != FIFO_EMPTY) {
- a1 = a;
- if ((a & 0x04)) {
- a2 = (a >> 3);
- if (a2 >= h->nr_cmds) {
- printk(KERN_WARNING
- "cciss: controller cciss%d failed, stopping.\n",
- h->ctlr);
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
- fail_all_cmds(h->ctlr);
- return IRQ_HANDLED;
- }
-
- c = h->cmd_pool + a2;
- a = c->busaddr;
-
- } else {
- struct hlist_node *tmp;
-
- a &= ~3;
- c = NULL;
- hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
- if (c->busaddr == a)
- break;
- }
- }
- /*
- * If we've found the command, take it off the
- * completion Q and free it
- */
- if (c && c->busaddr == a) {
- removeQ(c);
- if (c->cmd_type == CMD_RWREQ) {
- complete_command(h, c, 0);
- } else if (c->cmd_type == CMD_IOCTL_PEND) {
- complete(c->waiting);
- }
-# ifdef CONFIG_CISS_SCSI_TAPE
- else if (c->cmd_type == CMD_SCSI)
- complete_scsi_command(c, 0, a1);
-# endif
- continue;
- }
+ raw_tag = get_next_completion(h);
+ while (raw_tag != FIFO_EMPTY) {
+ if (cciss_tag_contains_index(raw_tag))
+ raw_tag = process_indexed_cmd(h, raw_tag);
+ else
+ raw_tag = process_nonindexed_cmd(h, raw_tag);
}
}
+ spin_unlock_irqrestore(&h->lock, flags);
+ return IRQ_HANDLED;
+}
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+/* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never
+ * check the interrupt pending register because it is not set.
+ */
+static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id)
+{
+ ctlr_info_t *h = dev_id;
+ unsigned long flags;
+ u32 raw_tag;
+
+ spin_lock_irqsave(&h->lock, flags);
+ raw_tag = get_next_completion(h);
+ while (raw_tag != FIFO_EMPTY) {
+ if (cciss_tag_contains_index(raw_tag))
+ raw_tag = process_indexed_cmd(h, raw_tag);
+ else
+ raw_tag = process_nonindexed_cmd(h, raw_tag);
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
@@ -3510,18 +3634,17 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
switch (c->err_info->SenseInfo[12]) {
case STATE_CHANGED:
- printk(KERN_WARNING "cciss%d: a state change "
- "detected, command retried\n", h->ctlr);
+ dev_warn(&h->pdev->dev, "a state change "
+ "detected, command retried\n");
return 1;
break;
case LUN_FAILED:
- printk(KERN_WARNING "cciss%d: LUN failure "
- "detected, action required\n", h->ctlr);
+ dev_warn(&h->pdev->dev, "LUN failure "
+ "detected, action required\n");
return 1;
break;
case REPORT_LUNS_CHANGED:
- printk(KERN_WARNING "cciss%d: report LUN data "
- "changed\n", h->ctlr);
+ dev_warn(&h->pdev->dev, "report LUN data changed\n");
/*
* Here, we could call add_to_scan_list and wake up the scan thread,
* except that it's quite likely that we will get more than one
@@ -3541,19 +3664,18 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
return 1;
break;
case POWER_OR_RESET:
- printk(KERN_WARNING "cciss%d: a power on "
- "or device reset detected\n", h->ctlr);
+ dev_warn(&h->pdev->dev,
+ "a power on or device reset detected\n");
return 1;
break;
case UNIT_ATTENTION_CLEARED:
- printk(KERN_WARNING "cciss%d: unit attention "
- "cleared by another initiator\n", h->ctlr);
+ dev_warn(&h->pdev->dev,
+ "unit attention cleared by another initiator\n");
return 1;
break;
default:
- printk(KERN_WARNING "cciss%d: unknown "
- "unit attention detected\n", h->ctlr);
- return 1;
+ dev_warn(&h->pdev->dev, "unknown unit attention detected\n");
+ return 1;
}
}
@@ -3562,39 +3684,41 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
* the io functions.
* This is for debug only.
*/
-#ifdef CCISS_DEBUG
-static void print_cfg_table(CfgTable_struct *tb)
+static void print_cfg_table(ctlr_info_t *h)
{
int i;
char temp_name[17];
+ CfgTable_struct *tb = h->cfgtable;
- printk("Controller Configuration information\n");
- printk("------------------------------------\n");
+ dev_dbg(&h->pdev->dev, "Controller Configuration information\n");
+ dev_dbg(&h->pdev->dev, "------------------------------------\n");
for (i = 0; i < 4; i++)
temp_name[i] = readb(&(tb->Signature[i]));
temp_name[4] = '\0';
- printk(" Signature = %s\n", temp_name);
- printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
- printk(" Transport methods supported = 0x%x\n",
+ dev_dbg(&h->pdev->dev, " Signature = %s\n", temp_name);
+ dev_dbg(&h->pdev->dev, " Spec Number = %d\n",
+ readl(&(tb->SpecValence)));
+ dev_dbg(&h->pdev->dev, " Transport methods supported = 0x%x\n",
readl(&(tb->TransportSupport)));
- printk(" Transport methods active = 0x%x\n",
+ dev_dbg(&h->pdev->dev, " Transport methods active = 0x%x\n",
readl(&(tb->TransportActive)));
- printk(" Requested transport Method = 0x%x\n",
+ dev_dbg(&h->pdev->dev, " Requested transport Method = 0x%x\n",
readl(&(tb->HostWrite.TransportRequest)));
- printk(" Coalesce Interrupt Delay = 0x%x\n",
+ dev_dbg(&h->pdev->dev, " Coalesce Interrupt Delay = 0x%x\n",
readl(&(tb->HostWrite.CoalIntDelay)));
- printk(" Coalesce Interrupt Count = 0x%x\n",
+ dev_dbg(&h->pdev->dev, " Coalesce Interrupt Count = 0x%x\n",
readl(&(tb->HostWrite.CoalIntCount)));
- printk(" Max outstanding commands = 0x%d\n",
+ dev_dbg(&h->pdev->dev, " Max outstanding commands = 0x%d\n",
readl(&(tb->CmdsOutMax)));
- printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
+ dev_dbg(&h->pdev->dev, " Bus Types = 0x%x\n",
+ readl(&(tb->BusTypes)));
for (i = 0; i < 16; i++)
temp_name[i] = readb(&(tb->ServerName[i]));
temp_name[16] = '\0';
- printk(" Server Name = %s\n", temp_name);
- printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
+ dev_dbg(&h->pdev->dev, " Server Name = %s\n", temp_name);
+ dev_dbg(&h->pdev->dev, " Heartbeat Counter = 0x%x\n\n\n",
+ readl(&(tb->HeartBeat)));
}
-#endif /* CCISS_DEBUG */
static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
{
@@ -3618,7 +3742,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
offset += 8;
break;
default: /* reserved in PCI 2.2 */
- printk(KERN_WARNING
+ dev_warn(&pdev->dev,
"Base address is invalid\n");
return -1;
break;
@@ -3630,12 +3754,182 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
return -1;
}
+/* Fill in bucket_map[], given nsgs (the max number of
+ * scatter gather elements supported) and bucket[],
+ * which is an array of 8 integers. The bucket[] array
+ * contains 8 different DMA transfer sizes (in 16
+ * byte increments) which the controller uses to fetch
+ * commands. This function fills in bucket_map[], which
+ * maps a given number of scatter gather elements to one of
+ * the 8 DMA transfer sizes. The point of it is to allow the
+ * controller to only do as much DMA as needed to fetch the
+ * command, with the DMA transfer size encoded in the lower
+ * bits of the command address.
+ */
+static void calc_bucket_map(int bucket[], int num_buckets,
+ int nsgs, int *bucket_map)
+{
+ int i, j, b, size;
+
+ /* even a command with 0 SGs requires 4 blocks */
+#define MINIMUM_TRANSFER_BLOCKS 4
+#define NUM_BUCKETS 8
+ /* Note, bucket_map must have nsgs+1 entries. */
+ for (i = 0; i <= nsgs; i++) {
+ /* Compute size of a command with i SG entries */
+ size = i + MINIMUM_TRANSFER_BLOCKS;
+ b = num_buckets; /* Assume the biggest bucket */
+ /* Find the bucket that is just big enough */
+ for (j = 0; j < 8; j++) {
+ if (bucket[j] >= size) {
+ b = j;
+ break;
+ }
+ }
+ /* for a command with i SG entries, use bucket b. */
+ bucket_map[i] = b;
+ }
+}
+
+static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
+{
+ int i;
+
+ /* under certain very rare conditions, this can take awhile.
+ * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+ * as we enter this code.) */
+ for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+ if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+ break;
+ msleep(10);
+ }
+}
+
+static __devinit void cciss_enter_performant_mode(ctlr_info_t *h)
+{
+ /* This is a bit complicated. There are 8 registers on
+ * the controller which we write to to tell it 8 different
+ * sizes of commands which there may be. It's a way of
+ * reducing the DMA done to fetch each command. Encoded into
+ * each command's tag are 3 bits which communicate to the controller
+ * which of the eight sizes that command fits within. The size of
+ * each command depends on how many scatter gather entries there are.
+ * Each SG entry requires 16 bytes. The eight registers are programmed
+ * with the number of 16-byte blocks a command of that size requires.
+ * The smallest command possible requires 5 such 16 byte blocks.
+ * the largest command possible requires MAXSGENTRIES + 4 16-byte
+ * blocks. Note, this only extends to the SG entries contained
+ * within the command block, and does not extend to chained blocks
+ * of SG elements. bft[] contains the eight values we write to
+ * the registers. They are not evenly distributed, but have more
+ * sizes for small commands, and fewer sizes for larger commands.
+ */
+ __u32 trans_offset;
+ int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
+ /*
+ * 5 = 1 s/g entry or 4k
+ * 6 = 2 s/g entry or 8k
+ * 8 = 4 s/g entry or 16k
+ * 10 = 6 s/g entry or 24k
+ */
+ unsigned long register_value;
+ BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
+
+ h->reply_pool_wraparound = 1; /* spec: init to 1 */
+
+ /* Controller spec: zero out this buffer. */
+ memset(h->reply_pool, 0, h->max_commands * sizeof(__u64));
+ h->reply_pool_head = h->reply_pool;
+
+ trans_offset = readl(&(h->cfgtable->TransMethodOffset));
+ calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries,
+ h->blockFetchTable);
+ writel(bft[0], &h->transtable->BlockFetch0);
+ writel(bft[1], &h->transtable->BlockFetch1);
+ writel(bft[2], &h->transtable->BlockFetch2);
+ writel(bft[3], &h->transtable->BlockFetch3);
+ writel(bft[4], &h->transtable->BlockFetch4);
+ writel(bft[5], &h->transtable->BlockFetch5);
+ writel(bft[6], &h->transtable->BlockFetch6);
+ writel(bft[7], &h->transtable->BlockFetch7);
+
+ /* size of controller ring buffer */
+ writel(h->max_commands, &h->transtable->RepQSize);
+ writel(1, &h->transtable->RepQCount);
+ writel(0, &h->transtable->RepQCtrAddrLow32);
+ writel(0, &h->transtable->RepQCtrAddrHigh32);
+ writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
+ writel(0, &h->transtable->RepQAddr0High32);
+ writel(CFGTBL_Trans_Performant,
+ &(h->cfgtable->HostWrite.TransportRequest));
+
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ cciss_wait_for_mode_change_ack(h);
+ register_value = readl(&(h->cfgtable->TransportActive));
+ if (!(register_value & CFGTBL_Trans_Performant))
+ dev_warn(&h->pdev->dev, "cciss: unable to get board into"
+ " performant mode\n");
+}
+
+static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
+{
+ __u32 trans_support;
+
+ dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n");
+ /* Attempt to put controller into performant mode if supported */
+ /* Does board support performant mode? */
+ trans_support = readl(&(h->cfgtable->TransportSupport));
+ if (!(trans_support & PERFORMANT_MODE))
+ return;
+
+ dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n");
+ /* Performant mode demands commands on a 32 byte boundary
+ * pci_alloc_consistent aligns on page boundarys already.
+ * Just need to check if divisible by 32
+ */
+ if ((sizeof(CommandList_struct) % 32) != 0) {
+ dev_warn(&h->pdev->dev, "%s %d %s\n",
+ "cciss info: command size[",
+ (int)sizeof(CommandList_struct),
+ "] not divisible by 32, no performant mode..\n");
+ return;
+ }
+
+ /* Performant mode ring buffer and supporting data structures */
+ h->reply_pool = (__u64 *)pci_alloc_consistent(
+ h->pdev, h->max_commands * sizeof(__u64),
+ &(h->reply_pool_dhandle));
+
+ /* Need a block fetch table for performant mode */
+ h->blockFetchTable = kmalloc(((h->maxsgentries+1) *
+ sizeof(__u32)), GFP_KERNEL);
+
+ if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL))
+ goto clean_up;
+
+ cciss_enter_performant_mode(h);
+
+ /* Change the access methods to the performant access methods */
+ h->access = SA5_performant_access;
+ h->transMethod = CFGTBL_Trans_Performant;
+
+ return;
+clean_up:
+ kfree(h->blockFetchTable);
+ if (h->reply_pool)
+ pci_free_consistent(h->pdev,
+ h->max_commands * sizeof(__u64),
+ h->reply_pool,
+ h->reply_pool_dhandle);
+ return;
+
+} /* cciss_put_controller_into_performant_mode */
+
/* If MSI/MSI-X is supported by the kernel we will try to enable it on
* controllers that are capable. If not, we use IO-APIC mode.
*/
-static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
- struct pci_dev *pdev, __u32 board_id)
+static void __devinit cciss_interrupt_mode(ctlr_info_t *h)
{
#ifdef CONFIG_PCI_MSI
int err;
@@ -3644,268 +3938,283 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
};
/* Some boards advertise MSI but don't really support it */
- if ((board_id == 0x40700E11) ||
- (board_id == 0x40800E11) ||
- (board_id == 0x40820E11) || (board_id == 0x40830E11))
+ if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
+ (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
goto default_int_mode;
- if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
- err = pci_enable_msix(pdev, cciss_msix_entries, 4);
+ if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
+ err = pci_enable_msix(h->pdev, cciss_msix_entries, 4);
if (!err) {
- c->intr[0] = cciss_msix_entries[0].vector;
- c->intr[1] = cciss_msix_entries[1].vector;
- c->intr[2] = cciss_msix_entries[2].vector;
- c->intr[3] = cciss_msix_entries[3].vector;
- c->msix_vector = 1;
+ h->intr[0] = cciss_msix_entries[0].vector;
+ h->intr[1] = cciss_msix_entries[1].vector;
+ h->intr[2] = cciss_msix_entries[2].vector;
+ h->intr[3] = cciss_msix_entries[3].vector;
+ h->msix_vector = 1;
return;
}
if (err > 0) {
- printk(KERN_WARNING "cciss: only %d MSI-X vectors "
- "available\n", err);
+ dev_warn(&h->pdev->dev,
+ "only %d MSI-X vectors available\n", err);
goto default_int_mode;
} else {
- printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
- err);
+ dev_warn(&h->pdev->dev,
+ "MSI-X init failed %d\n", err);
goto default_int_mode;
}
}
- if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
- if (!pci_enable_msi(pdev)) {
- c->msi_vector = 1;
- } else {
- printk(KERN_WARNING "cciss: MSI init failed\n");
- }
+ if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
+ if (!pci_enable_msi(h->pdev))
+ h->msi_vector = 1;
+ else
+ dev_warn(&h->pdev->dev, "MSI init failed\n");
}
default_int_mode:
#endif /* CONFIG_PCI_MSI */
/* if we get here we're going to use the default interrupt mode */
- c->intr[SIMPLE_MODE_INT] = pdev->irq;
+ h->intr[PERF_MODE_INT] = h->pdev->irq;
return;
}
-static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
+static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
{
- ushort subsystem_vendor_id, subsystem_device_id, command;
- __u32 board_id, scratchpad = 0;
- __u64 cfg_offset;
- __u32 cfg_base_addr;
- __u64 cfg_base_addr_index;
- int i, prod_index, err;
+ int i;
+ u32 subsystem_vendor_id, subsystem_device_id;
subsystem_vendor_id = pdev->subsystem_vendor;
subsystem_device_id = pdev->subsystem_device;
- board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
- subsystem_vendor_id);
+ *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
+ subsystem_vendor_id;
for (i = 0; i < ARRAY_SIZE(products); i++) {
/* Stand aside for hpsa driver on request */
if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
return -ENODEV;
- if (board_id == products[i].board_id)
- break;
- }
- prod_index = i;
- if (prod_index == ARRAY_SIZE(products)) {
- dev_warn(&pdev->dev,
- "unrecognized board ID: 0x%08lx, ignoring.\n",
- (unsigned long) board_id);
- return -ENODEV;
+ if (*board_id == products[i].board_id)
+ return i;
}
+ dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
+ *board_id);
+ return -ENODEV;
+}
- /* check to see if controller has been disabled */
- /* BEFORE trying to enable it */
- (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
- if (!(command & 0x02)) {
- printk(KERN_WARNING
- "cciss: controller appears to be disabled\n");
- return -ENODEV;
- }
+static inline bool cciss_board_disabled(ctlr_info_t *h)
+{
+ u16 command;
- err = pci_enable_device(pdev);
- if (err) {
- printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
- return err;
- }
+ (void) pci_read_config_word(h->pdev, PCI_COMMAND, &command);
+ return ((command & PCI_COMMAND_MEMORY) == 0);
+}
- err = pci_request_regions(pdev, "cciss");
- if (err) {
- printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
- "aborting\n");
- return err;
- }
+static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar)
+{
+ int i;
-#ifdef CCISS_DEBUG
- printk("command = %x\n", command);
- printk("irq = %x\n", pdev->irq);
- printk("board_id = %x\n", board_id);
-#endif /* CCISS_DEBUG */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ /* addressing mode bits already removed */
+ *memory_bar = pci_resource_start(pdev, i);
+ dev_dbg(&pdev->dev, "memory BAR = %lx\n",
+ *memory_bar);
+ return 0;
+ }
+ dev_warn(&pdev->dev, "no memory BAR found\n");
+ return -ENODEV;
+}
-/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
- * else we use the IO-APIC interrupt assigned to us by system ROM.
- */
- cciss_interrupt_mode(c, pdev, board_id);
+static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h)
+{
+ int i;
+ u32 scratchpad;
- /* find the memory BAR */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
- break;
- }
- if (i == DEVICE_COUNT_RESOURCE) {
- printk(KERN_WARNING "cciss: No memory BAR found\n");
- err = -ENODEV;
- goto err_out_free_res;
+ for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) {
+ scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+ if (scratchpad == CCISS_FIRMWARE_READY)
+ return 0;
+ msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
}
+ dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+ return -ENODEV;
+}
- c->paddr = pci_resource_start(pdev, i); /* addressing mode bits
- * already removed
- */
+static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
+ void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset)
+{
+ *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
+ *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
+ *cfg_base_addr &= (u32) 0x0000ffff;
+ *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
+ if (*cfg_base_addr_index == -1) {
+ dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, "
+ "*cfg_base_addr = 0x%08x\n", *cfg_base_addr);
+ return -ENODEV;
+ }
+ return 0;
+}
-#ifdef CCISS_DEBUG
- printk("address 0 = %lx\n", c->paddr);
-#endif /* CCISS_DEBUG */
- c->vaddr = remap_pci_mem(c->paddr, 0x250);
+static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
+{
+ u64 cfg_offset;
+ u32 cfg_base_addr;
+ u64 cfg_base_addr_index;
+ u32 trans_offset;
+ int rc;
- /* Wait for the board to become ready. (PCI hotplug needs this.)
- * We poll for up to 120 secs, once per 100ms. */
- for (i = 0; i < 1200; i++) {
- scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
- if (scratchpad == CCISS_FIRMWARE_READY)
- break;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(100)); /* wait 100ms */
- }
- if (scratchpad != CCISS_FIRMWARE_READY) {
- printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
- err = -ENODEV;
- goto err_out_free_res;
- }
+ rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
+ &cfg_base_addr_index, &cfg_offset);
+ if (rc)
+ return rc;
+ h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
+ cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable));
+ if (!h->cfgtable)
+ return -ENOMEM;
+ /* Find performant mode table. */
+ trans_offset = readl(&h->cfgtable->TransMethodOffset);
+ h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
+ cfg_base_addr_index)+cfg_offset+trans_offset,
+ sizeof(*h->transtable));
+ if (!h->transtable)
+ return -ENOMEM;
+ return 0;
+}
- /* get the address index number */
- cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
- cfg_base_addr &= (__u32) 0x0000ffff;
-#ifdef CCISS_DEBUG
- printk("cfg base address = %x\n", cfg_base_addr);
-#endif /* CCISS_DEBUG */
- cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
-#ifdef CCISS_DEBUG
- printk("cfg base address index = %llx\n",
- (unsigned long long)cfg_base_addr_index);
-#endif /* CCISS_DEBUG */
- if (cfg_base_addr_index == -1) {
- printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
- err = -ENODEV;
- goto err_out_free_res;
+static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
+{
+ h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+ if (h->max_commands < 16) {
+ dev_warn(&h->pdev->dev, "Controller reports "
+ "max supported commands of %d, an obvious lie. "
+ "Using 16. Ensure that firmware is up to date.\n",
+ h->max_commands);
+ h->max_commands = 16;
}
+}
- cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
-#ifdef CCISS_DEBUG
- printk("cfg offset = %llx\n", (unsigned long long)cfg_offset);
-#endif /* CCISS_DEBUG */
- c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
- cfg_base_addr_index) +
- cfg_offset, sizeof(CfgTable_struct));
- c->board_id = board_id;
-
-#ifdef CCISS_DEBUG
- print_cfg_table(c->cfgtable);
-#endif /* CCISS_DEBUG */
-
- /* Some controllers support Zero Memory Raid (ZMR).
- * When configured in ZMR mode the number of supported
- * commands drops to 64. So instead of just setting an
- * arbitrary value we make the driver a little smarter.
- * We read the config table to tell us how many commands
- * are supported on the controller then subtract 4 to
- * leave a little room for ioctl calls.
- */
- c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
- c->maxsgentries = readl(&(c->cfgtable->MaxSGElements));
-
+/* Interrogate the hardware for some limits:
+ * max commands, max SG elements without chaining, and with chaining,
+ * SG chain block size, etc.
+ */
+static void __devinit cciss_find_board_params(ctlr_info_t *h)
+{
+ cciss_get_max_perf_mode_cmds(h);
+ h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
+ h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
/*
- * Limit native command to 32 s/g elements to save dma'able memory.
+ * Limit in-command s/g elements to 32 save dma'able memory.
* Howvever spec says if 0, use 31
*/
-
- c->max_cmd_sgentries = 31;
- if (c->maxsgentries > 512) {
- c->max_cmd_sgentries = 32;
- c->chainsize = c->maxsgentries - c->max_cmd_sgentries + 1;
- c->maxsgentries -= 1; /* account for chain pointer */
+ h->max_cmd_sgentries = 31;
+ if (h->maxsgentries > 512) {
+ h->max_cmd_sgentries = 32;
+ h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1;
+ h->maxsgentries--; /* save one for chain pointer */
} else {
- c->maxsgentries = 31; /* Default to traditional value */
- c->chainsize = 0; /* traditional */
+ h->maxsgentries = 31; /* default to traditional values */
+ h->chainsize = 0;
}
+}
- c->product_name = products[prod_index].product_name;
- c->access = *(products[prod_index].access);
- c->nr_cmds = c->max_commands - 4;
- if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
- (readb(&c->cfgtable->Signature[1]) != 'I') ||
- (readb(&c->cfgtable->Signature[2]) != 'S') ||
- (readb(&c->cfgtable->Signature[3]) != 'S')) {
- printk("Does not appear to be a valid CISS config table\n");
- err = -ENODEV;
- goto err_out_free_res;
+static inline bool CISS_signature_present(ctlr_info_t *h)
+{
+ if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
+ (readb(&h->cfgtable->Signature[1]) != 'I') ||
+ (readb(&h->cfgtable->Signature[2]) != 'S') ||
+ (readb(&h->cfgtable->Signature[3]) != 'S')) {
+ dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
+ return false;
}
+ return true;
+}
+
+/* Need to enable prefetch in the SCSI core for 6400 in x86 */
+static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h)
+{
#ifdef CONFIG_X86
- {
- /* Need to enable prefetch in the SCSI core for 6400 in x86 */
- __u32 prefetch;
- prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
- prefetch |= 0x100;
- writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
- }
+ u32 prefetch;
+
+ prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
+ prefetch |= 0x100;
+ writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
#endif
+}
- /* Disabling DMA prefetch and refetch for the P600.
- * An ASIC bug may result in accesses to invalid memory addresses.
- * We've disabled prefetch for some time now. Testing with XEN
- * kernels revealed a bug in the refetch if dom0 resides on a P600.
- */
- if(board_id == 0x3225103C) {
- __u32 dma_prefetch;
- __u32 dma_refetch;
- dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
- dma_prefetch |= 0x8000;
- writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
- pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
- dma_refetch |= 0x1;
- pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
+/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
+ * in a prefetch beyond physical memory.
+ */
+static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h)
+{
+ u32 dma_prefetch;
+ __u32 dma_refetch;
+
+ if (h->board_id != 0x3225103C)
+ return;
+ dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
+ dma_prefetch |= 0x8000;
+ writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
+ pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch);
+ dma_refetch |= 0x1;
+ pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch);
+}
+
+static int __devinit cciss_pci_init(ctlr_info_t *h)
+{
+ int prod_index, err;
+
+ prod_index = cciss_lookup_board_id(h->pdev, &h->board_id);
+ if (prod_index < 0)
+ return -ENODEV;
+ h->product_name = products[prod_index].product_name;
+ h->access = *(products[prod_index].access);
+
+ if (cciss_board_disabled(h)) {
+ dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
+ return -ENODEV;
+ }
+ err = pci_enable_device(h->pdev);
+ if (err) {
+ dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n");
+ return err;
}
-#ifdef CCISS_DEBUG
- printk("Trying to put board into Simple mode\n");
-#endif /* CCISS_DEBUG */
- c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
- /* Update the field, and then ring the doorbell */
- writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
- writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
+ err = pci_request_regions(h->pdev, "cciss");
+ if (err) {
+ dev_warn(&h->pdev->dev,
+ "Cannot obtain PCI resources, aborting\n");
+ return err;
+ }
- /* under certain very rare conditions, this can take awhile.
- * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
- * as we enter this code.) */
- for (i = 0; i < MAX_CONFIG_WAIT; i++) {
- if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
- break;
- /* delay and try again */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(1));
+ dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq);
+ dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id);
+
+/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
+ * else we use the IO-APIC interrupt assigned to us by system ROM.
+ */
+ cciss_interrupt_mode(h);
+ err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr);
+ if (err)
+ goto err_out_free_res;
+ h->vaddr = remap_pci_mem(h->paddr, 0x250);
+ if (!h->vaddr) {
+ err = -ENOMEM;
+ goto err_out_free_res;
}
+ err = cciss_wait_for_board_ready(h);
+ if (err)
+ goto err_out_free_res;
+ err = cciss_find_cfgtables(h);
+ if (err)
+ goto err_out_free_res;
+ print_cfg_table(h);
+ cciss_find_board_params(h);
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "I counter got to %d %x\n", i,
- readl(c->vaddr + SA5_DOORBELL));
-#endif /* CCISS_DEBUG */
-#ifdef CCISS_DEBUG
- print_cfg_table(c->cfgtable);
-#endif /* CCISS_DEBUG */
-
- if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
- printk(KERN_WARNING "cciss: unable to get board into"
- " simple mode\n");
+ if (!CISS_signature_present(h)) {
err = -ENODEV;
goto err_out_free_res;
}
+ cciss_enable_scsi_prefetch(h);
+ cciss_p600_dma_prefetch_quirk(h);
+ cciss_put_controller_into_performant_mode(h);
return 0;
err_out_free_res:
@@ -3913,42 +4222,47 @@ err_out_free_res:
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
- pci_release_regions(pdev);
+ if (h->transtable)
+ iounmap(h->transtable);
+ if (h->cfgtable)
+ iounmap(h->cfgtable);
+ if (h->vaddr)
+ iounmap(h->vaddr);
+ pci_release_regions(h->pdev);
return err;
}
/* Function to find the first free pointer into our hba[] array
* Returns -1 if no free entries are left.
*/
-static int alloc_cciss_hba(void)
+static int alloc_cciss_hba(struct pci_dev *pdev)
{
int i;
for (i = 0; i < MAX_CTLR; i++) {
if (!hba[i]) {
- ctlr_info_t *p;
+ ctlr_info_t *h;
- p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
- if (!p)
+ h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
+ if (!h)
goto Enomem;
- hba[i] = p;
+ hba[i] = h;
return i;
}
}
- printk(KERN_WARNING "cciss: This driver supports a maximum"
+ dev_warn(&pdev->dev, "This driver supports a maximum"
" of %d controllers.\n", MAX_CTLR);
return -1;
Enomem:
- printk(KERN_ERR "cciss: out of memory.\n");
+ dev_warn(&pdev->dev, "out of memory.\n");
return -1;
}
-static void free_hba(int n)
+static void free_hba(ctlr_info_t *h)
{
- ctlr_info_t *h = hba[n];
int i;
- hba[n] = NULL;
+ hba[h->ctlr] = NULL;
for (i = 0; i < h->highest_lun + 1; i++)
if (h->gendisk[i] != NULL)
put_disk(h->gendisk[i]);
@@ -4028,7 +4342,8 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
/* we leak the DMA buffer here ... no choice since the controller could
still complete the command. */
if (i == 10) {
- printk(KERN_ERR "cciss: controller message %02x:%02x timed out\n",
+ dev_err(&pdev->dev,
+ "controller message %02x:%02x timed out\n",
opcode, type);
return -ETIMEDOUT;
}
@@ -4036,12 +4351,12 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
if (tag & 2) {
- printk(KERN_ERR "cciss: controller message %02x:%02x failed\n",
+ dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
opcode, type);
return -EIO;
}
- printk(KERN_INFO "cciss: controller message %02x:%02x succeeded\n",
+ dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
opcode, type);
return 0;
}
@@ -4062,7 +4377,7 @@ static __devinit int cciss_reset_msi(struct pci_dev *pdev)
if (pos) {
pci_read_config_word(pdev, msi_control_reg(pos), &control);
if (control & PCI_MSI_FLAGS_ENABLE) {
- printk(KERN_INFO "cciss: resetting MSI\n");
+ dev_info(&pdev->dev, "resetting MSI\n");
pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
}
}
@@ -4071,7 +4386,7 @@ static __devinit int cciss_reset_msi(struct pci_dev *pdev)
if (pos) {
pci_read_config_word(pdev, msi_control_reg(pos), &control);
if (control & PCI_MSIX_FLAGS_ENABLE) {
- printk(KERN_INFO "cciss: resetting MSI-X\n");
+ dev_info(&pdev->dev, "resetting MSI-X\n");
pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
}
}
@@ -4079,68 +4394,150 @@ static __devinit int cciss_reset_msi(struct pci_dev *pdev)
return 0;
}
-/* This does a hard reset of the controller using PCI power management
- * states. */
-static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev)
+static int cciss_controller_hard_reset(struct pci_dev *pdev,
+ void * __iomem vaddr, bool use_doorbell)
{
- u16 pmcsr, saved_config_space[32];
- int i, pos;
+ u16 pmcsr;
+ int pos;
- printk(KERN_INFO "cciss: using PCI PM to reset controller\n");
+ if (use_doorbell) {
+ /* For everything after the P600, the PCI power state method
+ * of resetting the controller doesn't work, so we have this
+ * other way using the doorbell register.
+ */
+ dev_info(&pdev->dev, "using doorbell to reset controller\n");
+ writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL);
+ msleep(1000);
+ } else { /* Try to do it the PCI power state way */
+
+ /* Quoting from the Open CISS Specification: "The Power
+ * Management Control/Status Register (CSR) controls the power
+ * state of the device. The normal operating state is D0,
+ * CSR=00h. The software off state is D3, CSR=03h. To reset
+ * the controller, place the interface device in D3 then to D0,
+ * this causes a secondary PCI reset which will reset the
+ * controller." */
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pos == 0) {
+ dev_err(&pdev->dev,
+ "cciss_controller_hard_reset: "
+ "PCI PM not supported\n");
+ return -ENODEV;
+ }
+ dev_info(&pdev->dev, "using PCI PM to reset controller\n");
+ /* enter the D3hot power management state */
+ pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= PCI_D3hot;
+ pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
- /* This is very nearly the same thing as
+ msleep(500);
- pci_save_state(pci_dev);
- pci_set_power_state(pci_dev, PCI_D3hot);
- pci_set_power_state(pci_dev, PCI_D0);
- pci_restore_state(pci_dev);
+ /* enter the D0 power management state */
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= PCI_D0;
+ pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
- but we can't use these nice canned kernel routines on
- kexec, because they also check the MSI/MSI-X state in PCI
- configuration space and do the wrong thing when it is
- set/cleared. Also, the pci_save/restore_state functions
- violate the ordering requirements for restoring the
- configuration space from the CCISS document (see the
- comment below). So we roll our own .... */
+ msleep(500);
+ }
+ return 0;
+}
- for (i = 0; i < 32; i++)
- pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+/* This does a hard reset of the controller using PCI power management
+ * states or using the doorbell register. */
+static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
+{
+ u16 saved_config_space[32];
+ u64 cfg_offset;
+ u32 cfg_base_addr;
+ u64 cfg_base_addr_index;
+ void __iomem *vaddr;
+ unsigned long paddr;
+ u32 misc_fw_support, active_transport;
+ int rc, i;
+ CfgTable_struct __iomem *cfgtable;
+ bool use_doorbell;
+ u32 board_id;
+
+ /* For controllers as old a the p600, this is very nearly
+ * the same thing as
+ *
+ * pci_save_state(pci_dev);
+ * pci_set_power_state(pci_dev, PCI_D3hot);
+ * pci_set_power_state(pci_dev, PCI_D0);
+ * pci_restore_state(pci_dev);
+ *
+ * but we can't use these nice canned kernel routines on
+ * kexec, because they also check the MSI/MSI-X state in PCI
+ * configuration space and do the wrong thing when it is
+ * set/cleared. Also, the pci_save/restore_state functions
+ * violate the ordering requirements for restoring the
+ * configuration space from the CCISS document (see the
+ * comment below). So we roll our own ....
+ *
+ * For controllers newer than the P600, the pci power state
+ * method of resetting doesn't work so we have another way
+ * using the doorbell register.
+ */
- pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (pos == 0) {
- printk(KERN_ERR "cciss_reset_controller: PCI PM not supported\n");
+ /* Exclude 640x boards. These are two pci devices in one slot
+ * which share a battery backed cache module. One controls the
+ * cache, the other accesses the cache through the one that controls
+ * it. If we reset the one controlling the cache, the other will
+ * likely not be happy. Just forbid resetting this conjoined mess.
+ */
+ cciss_lookup_board_id(pdev, &board_id);
+ if (board_id == 0x409C0E11 || board_id == 0x409D0E11) {
+ dev_warn(&pdev->dev, "Cannot reset Smart Array 640x "
+ "due to shared cache module.");
return -ENODEV;
}
- /* Quoting from the Open CISS Specification: "The Power
- * Management Control/Status Register (CSR) controls the power
- * state of the device. The normal operating state is D0,
- * CSR=00h. The software off state is D3, CSR=03h. To reset
- * the controller, place the interface device in D3 then to
- * D0, this causes a secondary PCI reset which will reset the
- * controller." */
+ for (i = 0; i < 32; i++)
+ pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+
+ /* find the first memory BAR, so we can find the cfg table */
+ rc = cciss_pci_find_memory_BAR(pdev, &paddr);
+ if (rc)
+ return rc;
+ vaddr = remap_pci_mem(paddr, 0x250);
+ if (!vaddr)
+ return -ENOMEM;
- /* enter the D3hot power management state */
- pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- pmcsr |= PCI_D3hot;
- pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+ /* find cfgtable in order to check if reset via doorbell is supported */
+ rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
+ &cfg_base_addr_index, &cfg_offset);
+ if (rc)
+ goto unmap_vaddr;
+ cfgtable = remap_pci_mem(pci_resource_start(pdev,
+ cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
+ if (!cfgtable) {
+ rc = -ENOMEM;
+ goto unmap_vaddr;
+ }
- schedule_timeout_uninterruptible(HZ >> 1);
+ /* If reset via doorbell register is supported, use that. */
+ misc_fw_support = readl(&cfgtable->misc_fw_support);
+ use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
- /* enter the D0 power management state */
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- pmcsr |= PCI_D0;
- pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+ /* The doorbell reset seems to cause lockups on some Smart
+ * Arrays (e.g. P410, P410i, maybe others). Until this is
+ * fixed or at least isolated, avoid the doorbell reset.
+ */
+ use_doorbell = 0;
- schedule_timeout_uninterruptible(HZ >> 1);
+ rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
+ if (rc)
+ goto unmap_cfgtable;
/* Restore the PCI configuration space. The Open CISS
* Specification says, "Restore the PCI Configuration
* Registers, offsets 00h through 60h. It is important to
* restore the command register, 16-bits at offset 04h,
* last. Do not restore the configuration status register,
- * 16-bits at offset 06h." Note that the offset is 2*i. */
+ * 16-bits at offset 06h." Note that the offset is 2*i.
+ */
for (i = 0; i < 32; i++) {
if (i == 2 || i == 3)
continue;
@@ -4149,6 +4546,63 @@ static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev)
wmb();
pci_write_config_word(pdev, 4, saved_config_space[2]);
+ /* Some devices (notably the HP Smart Array 5i Controller)
+ need a little pause here */
+ msleep(CCISS_POST_RESET_PAUSE_MSECS);
+
+ /* Controller should be in simple mode at this point. If it's not,
+ * It means we're on one of those controllers which doesn't support
+ * the doorbell reset method and on which the PCI power management reset
+ * method doesn't work (P800, for example.)
+ * In those cases, don't try to proceed, as it generally doesn't work.
+ */
+ active_transport = readl(&cfgtable->TransportActive);
+ if (active_transport & PERFORMANT_MODE) {
+ dev_warn(&pdev->dev, "Unable to successfully reset controller,"
+ " Ignoring controller.\n");
+ rc = -ENODEV;
+ }
+
+unmap_cfgtable:
+ iounmap(cfgtable);
+
+unmap_vaddr:
+ iounmap(vaddr);
+ return rc;
+}
+
+static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
+{
+ int rc, i;
+
+ if (!reset_devices)
+ return 0;
+
+ /* Reset the controller with a PCI power-cycle or via doorbell */
+ rc = cciss_kdump_hard_reset_controller(pdev);
+
+ /* -ENOTSUPP here means we cannot reset the controller
+ * but it's already (and still) up and running in
+ * "performant mode". Or, it might be 640x, which can't reset
+ * due to concerns about shared bbwc between 6402/6404 pair.
+ */
+ if (rc == -ENOTSUPP)
+ return 0; /* just try to do the kdump anyhow. */
+ if (rc)
+ return -ENODEV;
+ if (cciss_reset_msi(pdev))
+ return -ENODEV;
+
+ /* Now try to get the controller to respond to a no-op */
+ for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
+ if (cciss_noop(pdev) == 0)
+ break;
+ else
+ dev_warn(&pdev->dev, "no-op failed%s\n",
+ (i < CCISS_POST_RESET_NOOP_RETRIES - 1 ?
+ "; re-trying" : ""));
+ msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS);
+ }
return 0;
}
@@ -4166,46 +4620,31 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
int rc;
int dac, return_code;
InquiryData_struct *inq_buff;
+ ctlr_info_t *h;
- if (reset_devices) {
- /* Reset the controller with a PCI power-cycle */
- if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev))
- return -ENODEV;
-
- /* Now try to get the controller to respond to a no-op. Some
- devices (notably the HP Smart Array 5i Controller) need
- up to 30 seconds to respond. */
- for (i=0; i<30; i++) {
- if (cciss_noop(pdev) == 0)
- break;
-
- schedule_timeout_uninterruptible(HZ);
- }
- if (i == 30) {
- printk(KERN_ERR "cciss: controller seems dead\n");
- return -EBUSY;
- }
- }
-
- i = alloc_cciss_hba();
+ rc = cciss_init_reset_devices(pdev);
+ if (rc)
+ return rc;
+ i = alloc_cciss_hba(pdev);
if (i < 0)
return -1;
- hba[i]->busy_initializing = 1;
- INIT_HLIST_HEAD(&hba[i]->cmpQ);
- INIT_HLIST_HEAD(&hba[i]->reqQ);
- mutex_init(&hba[i]->busy_shutting_down);
+ h = hba[i];
+ h->pdev = pdev;
+ h->busy_initializing = 1;
+ INIT_HLIST_HEAD(&h->cmpQ);
+ INIT_HLIST_HEAD(&h->reqQ);
+ mutex_init(&h->busy_shutting_down);
- if (cciss_pci_init(hba[i], pdev) != 0)
+ if (cciss_pci_init(h) != 0)
goto clean_no_release_regions;
- sprintf(hba[i]->devname, "cciss%d", i);
- hba[i]->ctlr = i;
- hba[i]->pdev = pdev;
+ sprintf(h->devname, "cciss%d", i);
+ h->ctlr = i;
- init_completion(&hba[i]->scan_wait);
+ init_completion(&h->scan_wait);
- if (cciss_create_hba_sysfs_entry(hba[i]))
+ if (cciss_create_hba_sysfs_entry(h))
goto clean0;
/* configure PCI DMA stuff */
@@ -4214,7 +4653,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
dac = 0;
else {
- printk(KERN_ERR "cciss: no suitable DMA available\n");
+ dev_err(&h->pdev->dev, "no suitable DMA available\n");
goto clean1;
}
@@ -4224,151 +4663,164 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
* 8 controller support.
*/
if (i < MAX_CTLR_ORIG)
- hba[i]->major = COMPAQ_CISS_MAJOR + i;
- rc = register_blkdev(hba[i]->major, hba[i]->devname);
+ h->major = COMPAQ_CISS_MAJOR + i;
+ rc = register_blkdev(h->major, h->devname);
if (rc == -EBUSY || rc == -EINVAL) {
- printk(KERN_ERR
- "cciss: Unable to get major number %d for %s "
- "on hba %d\n", hba[i]->major, hba[i]->devname, i);
+ dev_err(&h->pdev->dev,
+ "Unable to get major number %d for %s "
+ "on hba %d\n", h->major, h->devname, i);
goto clean1;
} else {
if (i >= MAX_CTLR_ORIG)
- hba[i]->major = rc;
+ h->major = rc;
}
/* make sure the board interrupts are off */
- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
- if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
- IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
- printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
- hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
- goto clean2;
+ h->access.set_intr_mask(h, CCISS_INTR_OFF);
+ if (h->msi_vector || h->msix_vector) {
+ if (request_irq(h->intr[PERF_MODE_INT],
+ do_cciss_msix_intr,
+ IRQF_DISABLED, h->devname, h)) {
+ dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
+ h->intr[PERF_MODE_INT], h->devname);
+ goto clean2;
+ }
+ } else {
+ if (request_irq(h->intr[PERF_MODE_INT], do_cciss_intx,
+ IRQF_DISABLED, h->devname, h)) {
+ dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
+ h->intr[PERF_MODE_INT], h->devname);
+ goto clean2;
+ }
}
- printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
- hba[i]->devname, pdev->device, pci_name(pdev),
- hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
+ dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
+ h->devname, pdev->device, pci_name(pdev),
+ h->intr[PERF_MODE_INT], dac ? "" : " not");
- hba[i]->cmd_pool_bits =
- kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
+ h->cmd_pool_bits =
+ kmalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG)
* sizeof(unsigned long), GFP_KERNEL);
- hba[i]->cmd_pool = (CommandList_struct *)
- pci_alloc_consistent(hba[i]->pdev,
- hba[i]->nr_cmds * sizeof(CommandList_struct),
- &(hba[i]->cmd_pool_dhandle));
- hba[i]->errinfo_pool = (ErrorInfo_struct *)
- pci_alloc_consistent(hba[i]->pdev,
- hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
- &(hba[i]->errinfo_pool_dhandle));
- if ((hba[i]->cmd_pool_bits == NULL)
- || (hba[i]->cmd_pool == NULL)
- || (hba[i]->errinfo_pool == NULL)) {
- printk(KERN_ERR "cciss: out of memory");
+ h->cmd_pool = (CommandList_struct *)
+ pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(CommandList_struct),
+ &(h->cmd_pool_dhandle));
+ h->errinfo_pool = (ErrorInfo_struct *)
+ pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(ErrorInfo_struct),
+ &(h->errinfo_pool_dhandle));
+ if ((h->cmd_pool_bits == NULL)
+ || (h->cmd_pool == NULL)
+ || (h->errinfo_pool == NULL)) {
+ dev_err(&h->pdev->dev, "out of memory");
goto clean4;
}
/* Need space for temp scatter list */
- hba[i]->scatter_list = kmalloc(hba[i]->max_commands *
+ h->scatter_list = kmalloc(h->max_commands *
sizeof(struct scatterlist *),
GFP_KERNEL);
- for (k = 0; k < hba[i]->nr_cmds; k++) {
- hba[i]->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
- hba[i]->maxsgentries,
+ if (!h->scatter_list)
+ goto clean4;
+
+ for (k = 0; k < h->nr_cmds; k++) {
+ h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
+ h->maxsgentries,
GFP_KERNEL);
- if (hba[i]->scatter_list[k] == NULL) {
- printk(KERN_ERR "cciss%d: could not allocate "
- "s/g lists\n", i);
+ if (h->scatter_list[k] == NULL) {
+ dev_err(&h->pdev->dev,
+ "could not allocate s/g lists\n");
goto clean4;
}
}
- hba[i]->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[i],
- hba[i]->chainsize, hba[i]->nr_cmds);
- if (!hba[i]->cmd_sg_list && hba[i]->chainsize > 0)
+ h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
+ h->chainsize, h->nr_cmds);
+ if (!h->cmd_sg_list && h->chainsize > 0)
goto clean4;
- spin_lock_init(&hba[i]->lock);
+ spin_lock_init(&h->lock);
/* Initialize the pdev driver private data.
- have it point to hba[i]. */
- pci_set_drvdata(pdev, hba[i]);
+ have it point to h. */
+ pci_set_drvdata(pdev, h);
/* command and error info recs zeroed out before
they are used */
- memset(hba[i]->cmd_pool_bits, 0,
- DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
+ memset(h->cmd_pool_bits, 0,
+ DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG)
* sizeof(unsigned long));
- hba[i]->num_luns = 0;
- hba[i]->highest_lun = -1;
+ h->num_luns = 0;
+ h->highest_lun = -1;
for (j = 0; j < CISS_MAX_LUN; j++) {
- hba[i]->drv[j] = NULL;
- hba[i]->gendisk[j] = NULL;
+ h->drv[j] = NULL;
+ h->gendisk[j] = NULL;
}
- cciss_scsi_setup(i);
+ cciss_scsi_setup(h);
/* Turn the interrupts on so we can service requests */
- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
+ h->access.set_intr_mask(h, CCISS_INTR_ON);
/* Get the firmware version */
inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
if (inq_buff == NULL) {
- printk(KERN_ERR "cciss: out of memory\n");
+ dev_err(&h->pdev->dev, "out of memory\n");
goto clean4;
}
- return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
+ return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
if (return_code == IO_OK) {
- hba[i]->firm_ver[0] = inq_buff->data_byte[32];
- hba[i]->firm_ver[1] = inq_buff->data_byte[33];
- hba[i]->firm_ver[2] = inq_buff->data_byte[34];
- hba[i]->firm_ver[3] = inq_buff->data_byte[35];
+ h->firm_ver[0] = inq_buff->data_byte[32];
+ h->firm_ver[1] = inq_buff->data_byte[33];
+ h->firm_ver[2] = inq_buff->data_byte[34];
+ h->firm_ver[3] = inq_buff->data_byte[35];
} else { /* send command failed */
- printk(KERN_WARNING "cciss: unable to determine firmware"
+ dev_warn(&h->pdev->dev, "unable to determine firmware"
" version of controller\n");
}
kfree(inq_buff);
- cciss_procinit(i);
+ cciss_procinit(h);
- hba[i]->cciss_max_sectors = 8192;
+ h->cciss_max_sectors = 8192;
- rebuild_lun_table(hba[i], 1, 0);
- hba[i]->busy_initializing = 0;
+ rebuild_lun_table(h, 1, 0);
+ h->busy_initializing = 0;
return 1;
clean4:
- kfree(hba[i]->cmd_pool_bits);
+ kfree(h->cmd_pool_bits);
/* Free up sg elements */
- for (k = 0; k < hba[i]->nr_cmds; k++)
- kfree(hba[i]->scatter_list[k]);
- kfree(hba[i]->scatter_list);
- cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds);
- if (hba[i]->cmd_pool)
- pci_free_consistent(hba[i]->pdev,
- hba[i]->nr_cmds * sizeof(CommandList_struct),
- hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
- if (hba[i]->errinfo_pool)
- pci_free_consistent(hba[i]->pdev,
- hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
- hba[i]->errinfo_pool,
- hba[i]->errinfo_pool_dhandle);
- free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
+ for (k-- ; k >= 0; k--)
+ kfree(h->scatter_list[k]);
+ kfree(h->scatter_list);
+ cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
+ if (h->cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(CommandList_struct),
+ h->cmd_pool, h->cmd_pool_dhandle);
+ if (h->errinfo_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(ErrorInfo_struct),
+ h->errinfo_pool,
+ h->errinfo_pool_dhandle);
+ free_irq(h->intr[PERF_MODE_INT], h);
clean2:
- unregister_blkdev(hba[i]->major, hba[i]->devname);
+ unregister_blkdev(h->major, h->devname);
clean1:
- cciss_destroy_hba_sysfs_entry(hba[i]);
+ cciss_destroy_hba_sysfs_entry(h);
clean0:
pci_release_regions(pdev);
clean_no_release_regions:
- hba[i]->busy_initializing = 0;
+ h->busy_initializing = 0;
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
pci_set_drvdata(pdev, NULL);
- free_hba(i);
+ free_hba(h);
return -1;
}
@@ -4381,55 +4833,51 @@ static void cciss_shutdown(struct pci_dev *pdev)
h = pci_get_drvdata(pdev);
flush_buf = kzalloc(4, GFP_KERNEL);
if (!flush_buf) {
- printk(KERN_WARNING
- "cciss:%d cache not flushed, out of memory.\n",
- h->ctlr);
+ dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n");
return;
}
/* write all data in the battery backed cache to disk */
memset(flush_buf, 0, 4);
- return_code = sendcmd_withirq(CCISS_CACHE_FLUSH, h->ctlr, flush_buf,
+ return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf,
4, 0, CTLR_LUNID, TYPE_CMD);
kfree(flush_buf);
if (return_code != IO_OK)
- printk(KERN_WARNING "cciss%d: Error flushing cache\n",
- h->ctlr);
+ dev_warn(&h->pdev->dev, "Error flushing cache\n");
h->access.set_intr_mask(h, CCISS_INTR_OFF);
- free_irq(h->intr[2], h);
+ free_irq(h->intr[PERF_MODE_INT], h);
}
static void __devexit cciss_remove_one(struct pci_dev *pdev)
{
- ctlr_info_t *tmp_ptr;
+ ctlr_info_t *h;
int i, j;
if (pci_get_drvdata(pdev) == NULL) {
- printk(KERN_ERR "cciss: Unable to remove device \n");
+ dev_err(&pdev->dev, "Unable to remove device\n");
return;
}
- tmp_ptr = pci_get_drvdata(pdev);
- i = tmp_ptr->ctlr;
+ h = pci_get_drvdata(pdev);
+ i = h->ctlr;
if (hba[i] == NULL) {
- printk(KERN_ERR "cciss: device appears to "
- "already be removed \n");
+ dev_err(&pdev->dev, "device appears to already be removed\n");
return;
}
- mutex_lock(&hba[i]->busy_shutting_down);
+ mutex_lock(&h->busy_shutting_down);
- remove_from_scan_list(hba[i]);
- remove_proc_entry(hba[i]->devname, proc_cciss);
- unregister_blkdev(hba[i]->major, hba[i]->devname);
+ remove_from_scan_list(h);
+ remove_proc_entry(h->devname, proc_cciss);
+ unregister_blkdev(h->major, h->devname);
/* remove it from the disk list */
for (j = 0; j < CISS_MAX_LUN; j++) {
- struct gendisk *disk = hba[i]->gendisk[j];
+ struct gendisk *disk = h->gendisk[j];
if (disk) {
struct request_queue *q = disk->queue;
if (disk->flags & GENHD_FL_UP) {
- cciss_destroy_ld_sysfs_entry(hba[i], j, 1);
+ cciss_destroy_ld_sysfs_entry(h, j, 1);
del_gendisk(disk);
}
if (q)
@@ -4438,39 +4886,41 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
}
#ifdef CONFIG_CISS_SCSI_TAPE
- cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
+ cciss_unregister_scsi(h); /* unhook from SCSI subsystem */
#endif
cciss_shutdown(pdev);
#ifdef CONFIG_PCI_MSI
- if (hba[i]->msix_vector)
- pci_disable_msix(hba[i]->pdev);
- else if (hba[i]->msi_vector)
- pci_disable_msi(hba[i]->pdev);
+ if (h->msix_vector)
+ pci_disable_msix(h->pdev);
+ else if (h->msi_vector)
+ pci_disable_msi(h->pdev);
#endif /* CONFIG_PCI_MSI */
- iounmap(hba[i]->vaddr);
+ iounmap(h->transtable);
+ iounmap(h->cfgtable);
+ iounmap(h->vaddr);
- pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
- hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
- pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
- hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
- kfree(hba[i]->cmd_pool_bits);
+ pci_free_consistent(h->pdev, h->nr_cmds * sizeof(CommandList_struct),
+ h->cmd_pool, h->cmd_pool_dhandle);
+ pci_free_consistent(h->pdev, h->nr_cmds * sizeof(ErrorInfo_struct),
+ h->errinfo_pool, h->errinfo_pool_dhandle);
+ kfree(h->cmd_pool_bits);
/* Free up sg elements */
- for (j = 0; j < hba[i]->nr_cmds; j++)
- kfree(hba[i]->scatter_list[j]);
- kfree(hba[i]->scatter_list);
- cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds);
+ for (j = 0; j < h->nr_cmds; j++)
+ kfree(h->scatter_list[j]);
+ kfree(h->scatter_list);
+ cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
- cciss_destroy_hba_sysfs_entry(hba[i]);
- mutex_unlock(&hba[i]->busy_shutting_down);
- free_hba(i);
+ cciss_destroy_hba_sysfs_entry(h);
+ mutex_unlock(&h->busy_shutting_down);
+ free_hba(h);
}
static struct pci_driver cciss_pci_driver = {
@@ -4495,7 +4945,6 @@ static int __init cciss_init(void)
* array of them, the size must be a multiple of 8 bytes.
*/
BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT);
-
printk(KERN_INFO DRIVER_NAME "\n");
err = bus_register(&cciss_bus_type);
@@ -4532,8 +4981,8 @@ static void __exit cciss_cleanup(void)
/* double check that all controller entrys have been removed */
for (i = 0; i < MAX_CTLR; i++) {
if (hba[i] != NULL) {
- printk(KERN_WARNING "cciss: had to remove"
- " controller %d\n", i);
+ dev_warn(&hba[i]->pdev->dev,
+ "had to remove controller\n");
cciss_remove_one(hba[i]->pdev);
}
}
@@ -4542,46 +4991,5 @@ static void __exit cciss_cleanup(void)
bus_unregister(&cciss_bus_type);
}
-static void fail_all_cmds(unsigned long ctlr)
-{
- /* If we get here, the board is apparently dead. */
- ctlr_info_t *h = hba[ctlr];
- CommandList_struct *c;
- unsigned long flags;
-
- printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
- h->alive = 0; /* the controller apparently died... */
-
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-
- pci_disable_device(h->pdev); /* Make sure it is really dead. */
-
- /* move everything off the request queue onto the completed queue */
- while (!hlist_empty(&h->reqQ)) {
- c = hlist_entry(h->reqQ.first, CommandList_struct, list);
- removeQ(c);
- h->Qdepth--;
- addQ(&h->cmpQ, c);
- }
-
- /* Now, fail everything on the completed queue with a HW error */
- while (!hlist_empty(&h->cmpQ)) {
- c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
- removeQ(c);
- if (c->cmd_type != CMD_MSG_STALE)
- c->err_info->CommandStatus = CMD_HARDWARE_ERR;
- if (c->cmd_type == CMD_RWREQ) {
- complete_command(h, c, 0);
- } else if (c->cmd_type == CMD_IOCTL_PEND)
- complete(c->waiting);
-#ifdef CONFIG_CISS_SCSI_TAPE
- else if (c->cmd_type == CMD_SCSI)
- complete_scsi_command(c, 0, 0);
-#endif
- }
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
- return;
-}
-
module_init(cciss_init);
module_exit(cciss_cleanup);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index c5d411174db..ae340ffc8f8 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -25,7 +25,7 @@ struct access_method {
void (*submit_command)(ctlr_info_t *h, CommandList_struct *c);
void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
unsigned long (*fifo_full)(ctlr_info_t *h);
- unsigned long (*intr_pending)(ctlr_info_t *h);
+ bool (*intr_pending)(ctlr_info_t *h);
unsigned long (*command_completed)(ctlr_info_t *h);
};
typedef struct _drive_info_struct
@@ -85,8 +85,8 @@ struct ctlr_info
int max_cmd_sgentries;
SGDescriptor_struct **cmd_sg_list;
-# define DOORBELL_INT 0
-# define PERF_MODE_INT 1
+# define PERF_MODE_INT 0
+# define DOORBELL_INT 1
# define SIMPLE_MODE_INT 2
# define MEMQ_MODE_INT 3
unsigned int intr[4];
@@ -137,10 +137,27 @@ struct ctlr_info
struct list_head scan_list;
struct completion scan_wait;
struct device dev;
+ /*
+ * Performant mode tables.
+ */
+ u32 trans_support;
+ u32 trans_offset;
+ struct TransTable_struct *transtable;
+ unsigned long transMethod;
+
+ /*
+ * Performant mode completion buffer
+ */
+ u64 *reply_pool;
+ dma_addr_t reply_pool_dhandle;
+ u64 *reply_pool_head;
+ size_t reply_pool_size;
+ unsigned char reply_pool_wraparound;
+ u32 *blockFetchTable;
};
-/* Defining the diffent access_menthods */
-/*
+/* Defining the diffent access_methods
+ *
* Memory mapped FIFO interface (SMART 53xx cards)
*/
#define SA5_DOORBELL 0x20
@@ -159,19 +176,47 @@ struct ctlr_info
#define SA5B_INTR_PENDING 0x04
#define FIFO_EMPTY 0xffffffff
#define CCISS_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
+/* Perf. mode flags */
+#define SA5_PERF_INTR_PENDING 0x04
+#define SA5_PERF_INTR_OFF 0x05
+#define SA5_OUTDB_STATUS_PERF_BIT 0x01
+#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
+#define SA5_OUTDB_CLEAR 0xA0
+#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
+#define SA5_OUTDB_STATUS 0x9C
+
#define CISS_ERROR_BIT 0x02
#define CCISS_INTR_ON 1
#define CCISS_INTR_OFF 0
+
+
+/* CCISS_BOARD_READY_WAIT_SECS is how long to wait for a board
+ * to become ready, in seconds, before giving up on it.
+ * CCISS_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
+ * between polling the board to see if it is ready, in
+ * milliseconds. CCISS_BOARD_READY_ITERATIONS is derived
+ * the above.
+ */
+#define CCISS_BOARD_READY_WAIT_SECS (120)
+#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
+#define CCISS_BOARD_READY_ITERATIONS \
+ ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
+ CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
+#define CCISS_POST_RESET_PAUSE_MSECS (3000)
+#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
+#define CCISS_POST_RESET_NOOP_RETRIES (12)
+
/*
Send the command to the hardware
*/
static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
{
#ifdef CCISS_DEBUG
- printk("Sending %x - down to controller\n", c->busaddr );
-#endif /* CCISS_DEBUG */
+ printk(KERN_WARNING "cciss%d: Sending %08x - down to controller\n",
+ h->ctlr, c->busaddr);
+#endif /* CCISS_DEBUG */
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
h->commands_outstanding++;
if ( h->commands_outstanding > h->max_outstanding)
@@ -214,6 +259,20 @@ static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
}
}
+
+/* Performant mode intr_mask */
+static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val)
+{
+ if (val) { /* turn on interrupts */
+ h->interrupts_enabled = 1;
+ writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ } else {
+ h->interrupts_enabled = 0;
+ writel(SA5_PERF_INTR_OFF,
+ h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ }
+}
+
/*
* Returns true if fifo is full.
*
@@ -250,10 +309,44 @@ static unsigned long SA5_completed(ctlr_info_t *h)
return ( register_value);
}
+
+/* Performant mode command completed */
+static unsigned long SA5_performant_completed(ctlr_info_t *h)
+{
+ unsigned long register_value = FIFO_EMPTY;
+
+ /* flush the controller write of the reply queue by reading
+ * outbound doorbell status register.
+ */
+ register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+ /* msi auto clears the interrupt pending bit. */
+ if (!(h->msi_vector || h->msix_vector)) {
+ writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
+ /* Do a read in order to flush the write to the controller
+ * (as per spec.)
+ */
+ register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+ }
+
+ if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+ register_value = *(h->reply_pool_head);
+ (h->reply_pool_head)++;
+ h->commands_outstanding--;
+ } else {
+ register_value = FIFO_EMPTY;
+ }
+ /* Check for wraparound */
+ if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
+ h->reply_pool_head = h->reply_pool;
+ h->reply_pool_wraparound ^= 1;
+ }
+
+ return register_value;
+}
/*
* Returns true if an interrupt is pending..
*/
-static unsigned long SA5_intr_pending(ctlr_info_t *h)
+static bool SA5_intr_pending(ctlr_info_t *h)
{
unsigned long register_value =
readl(h->vaddr + SA5_INTR_STATUS);
@@ -268,7 +361,7 @@ static unsigned long SA5_intr_pending(ctlr_info_t *h)
/*
* Returns true if an interrupt is pending..
*/
-static unsigned long SA5B_intr_pending(ctlr_info_t *h)
+static bool SA5B_intr_pending(ctlr_info_t *h)
{
unsigned long register_value =
readl(h->vaddr + SA5_INTR_STATUS);
@@ -280,6 +373,20 @@ static unsigned long SA5B_intr_pending(ctlr_info_t *h)
return 0 ;
}
+static bool SA5_performant_intr_pending(ctlr_info_t *h)
+{
+ unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
+
+ if (!register_value)
+ return false;
+
+ if (h->msi_vector || h->msix_vector)
+ return true;
+
+ /* Read outbound doorbell to flush */
+ register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+ return register_value & SA5_OUTDB_STATUS_PERF_BIT;
+}
static struct access_method SA5_access = {
SA5_submit_command,
@@ -297,6 +404,14 @@ static struct access_method SA5B_access = {
SA5_completed,
};
+static struct access_method SA5_performant_access = {
+ SA5_submit_command,
+ SA5_performant_intr_mask,
+ SA5_fifo_full,
+ SA5_performant_intr_pending,
+ SA5_performant_completed,
+};
+
struct board_type {
__u32 board_id;
char *product_name;
@@ -304,6 +419,4 @@ struct board_type {
int nr_cmds; /* Max cmds this kind of ctlr can handle. */
};
-#define CCISS_LOCK(i) (&hba[i]->lock)
-
#endif /* CCISS_H */
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index e624ff959cb..eb060f1b00b 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -52,8 +52,10 @@
/* Configuration Table */
#define CFGTBL_ChangeReq 0x00000001l
#define CFGTBL_AccCmds 0x00000001l
+#define DOORBELL_CTLR_RESET 0x00000004l
#define CFGTBL_Trans_Simple 0x00000002l
+#define CFGTBL_Trans_Performant 0x00000004l
#define CFGTBL_BusType_Ultra2 0x00000001l
#define CFGTBL_BusType_Ultra3 0x00000002l
@@ -173,12 +175,15 @@ typedef struct _SGDescriptor_struct {
* PAD_64 can be adjusted independently as needed for 32-bit
* and 64-bits systems.
*/
-#define COMMANDLIST_ALIGNMENT (8)
+#define COMMANDLIST_ALIGNMENT (32)
#define IS_64_BIT ((sizeof(long) - 4)/4)
#define IS_32_BIT (!IS_64_BIT)
#define PAD_32 (0)
#define PAD_64 (4)
#define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
+#define DIRECT_LOOKUP_BIT 0x10
+#define DIRECT_LOOKUP_SHIFT 5
+
typedef struct _CommandList_struct {
CommandListHeader_struct Header;
RequestBlock_struct Request;
@@ -195,7 +200,7 @@ typedef struct _CommandList_struct {
struct completion *waiting;
int retry_count;
void * scsi_cmd;
- char pad[PADSIZE];
+ char pad[PADSIZE];
} CommandList_struct;
/* Configuration Table Structure */
@@ -209,12 +214,15 @@ typedef struct _HostWrite_struct {
typedef struct _CfgTable_struct {
BYTE Signature[4];
DWORD SpecValence;
+#define SIMPLE_MODE 0x02
+#define PERFORMANT_MODE 0x04
+#define MEMQ_MODE 0x08
DWORD TransportSupport;
DWORD TransportActive;
HostWrite_struct HostWrite;
DWORD CmdsOutMax;
DWORD BusTypes;
- DWORD Reserved;
+ DWORD TransMethodOffset;
BYTE ServerName[16];
DWORD HeartBeat;
DWORD SCSI_Prefetch;
@@ -222,6 +230,28 @@ typedef struct _CfgTable_struct {
DWORD MaxLogicalUnits;
DWORD MaxPhysicalDrives;
DWORD MaxPhysicalDrivesPerLogicalUnit;
+ DWORD MaxPerformantModeCommands;
+ u8 reserved[0x78 - 0x58];
+ u32 misc_fw_support; /* offset 0x78 */
+#define MISC_FW_DOORBELL_RESET (0x02)
} CfgTable_struct;
+
+struct TransTable_struct {
+ u32 BlockFetch0;
+ u32 BlockFetch1;
+ u32 BlockFetch2;
+ u32 BlockFetch3;
+ u32 BlockFetch4;
+ u32 BlockFetch5;
+ u32 BlockFetch6;
+ u32 BlockFetch7;
+ u32 RepQSize;
+ u32 RepQCount;
+ u32 RepQCtrAddrLow32;
+ u32 RepQCtrAddrHigh32;
+ u32 RepQAddr0Low32;
+ u32 RepQAddr0High32;
+};
+
#pragma pack()
#endif /* CCISS_CMD_H */
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 72dae92f3ca..575495f3c4b 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -44,13 +44,15 @@
#define CCISS_ABORT_MSG 0x00
#define CCISS_RESET_MSG 0x01
-static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
size_t size,
__u8 page_code, unsigned char *scsi3addr,
int cmd_type);
-static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool);
-static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool);
+static CommandList_struct *cmd_alloc(ctlr_info_t *h);
+static CommandList_struct *cmd_special_alloc(ctlr_info_t *h);
+static void cmd_free(ctlr_info_t *h, CommandList_struct *c);
+static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c);
static int cciss_scsi_proc_info(
struct Scsi_Host *sh,
@@ -93,8 +95,8 @@ static struct scsi_host_template cciss_driver_template = {
#pragma pack(1)
-#define SCSI_PAD_32 0
-#define SCSI_PAD_64 0
+#define SCSI_PAD_32 8
+#define SCSI_PAD_64 8
struct cciss_scsi_cmd_stack_elem_t {
CommandList_struct cmd;
@@ -127,16 +129,16 @@ struct cciss_scsi_adapter_data_t {
spinlock_t lock; // to protect ccissscsi[ctlr];
};
-#define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \
- &hba[ctlr]->scsi_ctlr->lock, flags);
-#define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \
- &hba[ctlr]->scsi_ctlr->lock, flags);
+#define CPQ_TAPE_LOCK(h, flags) spin_lock_irqsave( \
+ &h->scsi_ctlr->lock, flags);
+#define CPQ_TAPE_UNLOCK(h, flags) spin_unlock_irqrestore( \
+ &h->scsi_ctlr->lock, flags);
static CommandList_struct *
scsi_cmd_alloc(ctlr_info_t *h)
{
/* assume only one process in here at a time, locking done by caller. */
- /* use CCISS_LOCK(ctlr) */
+ /* use h->lock */
/* might be better to rewrite how we allocate scsi commands in a way that */
/* needs no locking at all. */
@@ -177,10 +179,10 @@ scsi_cmd_alloc(ctlr_info_t *h)
}
static void
-scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd)
+scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c)
{
/* assume only one process in here at a time, locking done by caller. */
- /* use CCISS_LOCK(ctlr) */
+ /* use h->lock */
/* drop the free memory chunk on top of the stack. */
struct cciss_scsi_adapter_data_t *sa;
@@ -190,22 +192,23 @@ scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd)
stk = &sa->cmd_stack;
stk->top++;
if (stk->top >= CMD_STACK_SIZE) {
- printk("cciss: scsi_cmd_free called too many times.\n");
+ dev_err(&h->pdev->dev,
+ "scsi_cmd_free called too many times.\n");
BUG();
}
- stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) cmd;
+ stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) c;
}
static int
-scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa)
+scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa)
{
int i;
struct cciss_scsi_cmd_stack_t *stk;
size_t size;
- sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[ctlr],
- hba[ctlr]->chainsize, CMD_STACK_SIZE);
- if (!sa->cmd_sg_list && hba[ctlr]->chainsize > 0)
+ sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
+ h->chainsize, CMD_STACK_SIZE);
+ if (!sa->cmd_sg_list && h->chainsize > 0)
return -ENOMEM;
stk = &sa->cmd_stack;
@@ -215,7 +218,7 @@ scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa)
BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0);
/* pci_alloc_consistent guarantees 32-bit DMA address will be used */
stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
- pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle);
+ pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle);
if (stk->pool == NULL) {
cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE);
@@ -234,23 +237,22 @@ scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa)
}
static void
-scsi_cmd_stack_free(int ctlr)
+scsi_cmd_stack_free(ctlr_info_t *h)
{
struct cciss_scsi_adapter_data_t *sa;
struct cciss_scsi_cmd_stack_t *stk;
size_t size;
- sa = hba[ctlr]->scsi_ctlr;
+ sa = h->scsi_ctlr;
stk = &sa->cmd_stack;
if (stk->top != CMD_STACK_SIZE-1) {
- printk( "cciss: %d scsi commands are still outstanding.\n",
+ dev_warn(&h->pdev->dev,
+ "bug: %d scsi commands are still outstanding.\n",
CMD_STACK_SIZE - stk->top);
- // BUG();
- printk("WE HAVE A BUG HERE!!! stk=0x%p\n", stk);
}
size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
- pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle);
+ pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle);
stk->pool = NULL;
cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE);
}
@@ -342,20 +344,20 @@ print_cmd(CommandList_struct *cp)
#endif
static int
-find_bus_target_lun(int ctlr, int *bus, int *target, int *lun)
+find_bus_target_lun(ctlr_info_t *h, int *bus, int *target, int *lun)
{
/* finds an unused bus, target, lun for a new device */
- /* assumes hba[ctlr]->scsi_ctlr->lock is held */
+ /* assumes h->scsi_ctlr->lock is held */
int i, found=0;
unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA];
memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA);
target_taken[SELF_SCSI_ID] = 1;
- for (i=0;i<ccissscsi[ctlr].ndevices;i++)
- target_taken[ccissscsi[ctlr].dev[i].target] = 1;
+ for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++)
+ target_taken[ccissscsi[h->ctlr].dev[i].target] = 1;
- for (i=0;i<CCISS_MAX_SCSI_DEVS_PER_HBA;i++) {
+ for (i = 0; i < CCISS_MAX_SCSI_DEVS_PER_HBA; i++) {
if (!target_taken[i]) {
*bus = 0; *target=i; *lun = 0; found=1;
break;
@@ -369,19 +371,19 @@ struct scsi2map {
};
static int
-cciss_scsi_add_entry(int ctlr, int hostno,
+cciss_scsi_add_entry(ctlr_info_t *h, int hostno,
struct cciss_scsi_dev_t *device,
struct scsi2map *added, int *nadded)
{
- /* assumes hba[ctlr]->scsi_ctlr->lock is held */
- int n = ccissscsi[ctlr].ndevices;
+ /* assumes h->scsi_ctlr->lock is held */
+ int n = ccissscsi[h->ctlr].ndevices;
struct cciss_scsi_dev_t *sd;
int i, bus, target, lun;
unsigned char addr1[8], addr2[8];
if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
- printk("cciss%d: Too many devices, "
- "some will be inaccessible.\n", ctlr);
+ dev_warn(&h->pdev->dev, "Too many devices, "
+ "some will be inaccessible.\n");
return -1;
}
@@ -397,7 +399,7 @@ cciss_scsi_add_entry(int ctlr, int hostno,
memcpy(addr1, device->scsi3addr, 8);
addr1[4] = 0;
for (i = 0; i < n; i++) {
- sd = &ccissscsi[ctlr].dev[i];
+ sd = &ccissscsi[h->ctlr].dev[i];
memcpy(addr2, sd->scsi3addr, 8);
addr2[4] = 0;
/* differ only in byte 4? */
@@ -410,9 +412,9 @@ cciss_scsi_add_entry(int ctlr, int hostno,
}
}
- sd = &ccissscsi[ctlr].dev[n];
+ sd = &ccissscsi[h->ctlr].dev[n];
if (lun == 0) {
- if (find_bus_target_lun(ctlr,
+ if (find_bus_target_lun(h,
&sd->bus, &sd->target, &sd->lun) != 0)
return -1;
} else {
@@ -431,37 +433,37 @@ cciss_scsi_add_entry(int ctlr, int hostno,
memcpy(sd->device_id, device->device_id, sizeof(sd->device_id));
sd->devtype = device->devtype;
- ccissscsi[ctlr].ndevices++;
+ ccissscsi[h->ctlr].ndevices++;
/* initially, (before registering with scsi layer) we don't
know our hostno and we don't want to print anything first
time anyway (the scsi layer's inquiries will show that info) */
if (hostno != -1)
- printk("cciss%d: %s device c%db%dt%dl%d added.\n",
- ctlr, scsi_device_type(sd->devtype), hostno,
+ dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
+ scsi_device_type(sd->devtype), hostno,
sd->bus, sd->target, sd->lun);
return 0;
}
static void
-cciss_scsi_remove_entry(int ctlr, int hostno, int entry,
+cciss_scsi_remove_entry(ctlr_info_t *h, int hostno, int entry,
struct scsi2map *removed, int *nremoved)
{
- /* assumes hba[ctlr]->scsi_ctlr->lock is held */
+ /* assumes h->ctlr]->scsi_ctlr->lock is held */
int i;
struct cciss_scsi_dev_t sd;
if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return;
- sd = ccissscsi[ctlr].dev[entry];
+ sd = ccissscsi[h->ctlr].dev[entry];
removed[*nremoved].bus = sd.bus;
removed[*nremoved].target = sd.target;
removed[*nremoved].lun = sd.lun;
(*nremoved)++;
- for (i=entry;i<ccissscsi[ctlr].ndevices-1;i++)
- ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1];
- ccissscsi[ctlr].ndevices--;
- printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
- ctlr, scsi_device_type(sd.devtype), hostno,
+ for (i = entry; i < ccissscsi[h->ctlr].ndevices-1; i++)
+ ccissscsi[h->ctlr].dev[i] = ccissscsi[h->ctlr].dev[i+1];
+ ccissscsi[h->ctlr].ndevices--;
+ dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
+ scsi_device_type(sd.devtype), hostno,
sd.bus, sd.target, sd.lun);
}
@@ -476,24 +478,24 @@ cciss_scsi_remove_entry(int ctlr, int hostno, int entry,
(a)[1] == (b)[1] && \
(a)[0] == (b)[0])
-static void fixup_botched_add(int ctlr, char *scsi3addr)
+static void fixup_botched_add(ctlr_info_t *h, char *scsi3addr)
{
/* called when scsi_add_device fails in order to re-adjust */
/* ccissscsi[] to match the mid layer's view. */
unsigned long flags;
int i, j;
- CPQ_TAPE_LOCK(ctlr, flags);
- for (i = 0; i < ccissscsi[ctlr].ndevices; i++) {
+ CPQ_TAPE_LOCK(h, flags);
+ for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
if (memcmp(scsi3addr,
- ccissscsi[ctlr].dev[i].scsi3addr, 8) == 0) {
- for (j = i; j < ccissscsi[ctlr].ndevices-1; j++)
- ccissscsi[ctlr].dev[j] =
- ccissscsi[ctlr].dev[j+1];
- ccissscsi[ctlr].ndevices--;
+ ccissscsi[h->ctlr].dev[i].scsi3addr, 8) == 0) {
+ for (j = i; j < ccissscsi[h->ctlr].ndevices-1; j++)
+ ccissscsi[h->ctlr].dev[j] =
+ ccissscsi[h->ctlr].dev[j+1];
+ ccissscsi[h->ctlr].ndevices--;
break;
}
}
- CPQ_TAPE_UNLOCK(ctlr, flags);
+ CPQ_TAPE_UNLOCK(h, flags);
}
static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
@@ -513,7 +515,7 @@ static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
}
static int
-adjust_cciss_scsi_table(int ctlr, int hostno,
+adjust_cciss_scsi_table(ctlr_info_t *h, int hostno,
struct cciss_scsi_dev_t sd[], int nsds)
{
/* sd contains scsi3 addresses and devtypes, but
@@ -534,15 +536,15 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
GFP_KERNEL);
if (!added || !removed) {
- printk(KERN_WARNING "cciss%d: Out of memory in "
- "adjust_cciss_scsi_table\n", ctlr);
+ dev_warn(&h->pdev->dev,
+ "Out of memory in adjust_cciss_scsi_table\n");
goto free_and_out;
}
- CPQ_TAPE_LOCK(ctlr, flags);
+ CPQ_TAPE_LOCK(h, flags);
if (hostno != -1) /* if it's not the first time... */
- sh = hba[ctlr]->scsi_ctlr->scsi_host;
+ sh = h->scsi_ctlr->scsi_host;
/* find any devices in ccissscsi[] that are not in
sd[] and remove them from ccissscsi[] */
@@ -550,8 +552,8 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
i = 0;
nremoved = 0;
nadded = 0;
- while(i<ccissscsi[ctlr].ndevices) {
- csd = &ccissscsi[ctlr].dev[i];
+ while (i < ccissscsi[h->ctlr].ndevices) {
+ csd = &ccissscsi[h->ctlr].dev[i];
found=0;
for (j=0;j<nsds;j++) {
if (SCSI3ADDR_EQ(sd[j].scsi3addr,
@@ -566,20 +568,18 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
if (found == 0) { /* device no longer present. */
changes++;
- /* printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
- ctlr, scsi_device_type(csd->devtype), hostno,
- csd->bus, csd->target, csd->lun); */
- cciss_scsi_remove_entry(ctlr, hostno, i,
+ cciss_scsi_remove_entry(h, hostno, i,
removed, &nremoved);
/* remove ^^^, hence i not incremented */
} else if (found == 1) { /* device is different in some way */
changes++;
- printk("cciss%d: device c%db%dt%dl%d has changed.\n",
- ctlr, hostno, csd->bus, csd->target, csd->lun);
- cciss_scsi_remove_entry(ctlr, hostno, i,
+ dev_info(&h->pdev->dev,
+ "device c%db%dt%dl%d has changed.\n",
+ hostno, csd->bus, csd->target, csd->lun);
+ cciss_scsi_remove_entry(h, hostno, i,
removed, &nremoved);
/* remove ^^^, hence i not incremented */
- if (cciss_scsi_add_entry(ctlr, hostno, &sd[j],
+ if (cciss_scsi_add_entry(h, hostno, &sd[j],
added, &nadded) != 0)
/* we just removed one, so add can't fail. */
BUG();
@@ -601,8 +601,8 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
for (i=0;i<nsds;i++) {
found=0;
- for (j=0;j<ccissscsi[ctlr].ndevices;j++) {
- csd = &ccissscsi[ctlr].dev[j];
+ for (j = 0; j < ccissscsi[h->ctlr].ndevices; j++) {
+ csd = &ccissscsi[h->ctlr].dev[j];
if (SCSI3ADDR_EQ(sd[i].scsi3addr,
csd->scsi3addr)) {
if (device_is_the_same(&sd[i], csd))
@@ -614,18 +614,18 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
}
if (!found) {
changes++;
- if (cciss_scsi_add_entry(ctlr, hostno, &sd[i],
+ if (cciss_scsi_add_entry(h, hostno, &sd[i],
added, &nadded) != 0)
break;
} else if (found == 1) {
/* should never happen... */
changes++;
- printk(KERN_WARNING "cciss%d: device "
- "unexpectedly changed\n", ctlr);
+ dev_warn(&h->pdev->dev,
+ "device unexpectedly changed\n");
/* but if it does happen, we just ignore that device */
}
}
- CPQ_TAPE_UNLOCK(ctlr, flags);
+ CPQ_TAPE_UNLOCK(h, flags);
/* Don't notify scsi mid layer of any changes the first time through */
/* (or if there are no changes) scsi_scan_host will do it later the */
@@ -645,9 +645,9 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
/* We don't expect to get here. */
/* future cmds to this device will get selection */
/* timeout as if the device was gone. */
- printk(KERN_WARNING "cciss%d: didn't find "
+ dev_warn(&h->pdev->dev, "didn't find "
"c%db%dt%dl%d\n for removal.",
- ctlr, hostno, removed[i].bus,
+ hostno, removed[i].bus,
removed[i].target, removed[i].lun);
}
}
@@ -659,13 +659,12 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
added[i].target, added[i].lun);
if (rc == 0)
continue;
- printk(KERN_WARNING "cciss%d: scsi_add_device "
+ dev_warn(&h->pdev->dev, "scsi_add_device "
"c%db%dt%dl%d failed, device not added.\n",
- ctlr, hostno,
- added[i].bus, added[i].target, added[i].lun);
+ hostno, added[i].bus, added[i].target, added[i].lun);
/* now we have to remove it from ccissscsi, */
/* since it didn't get added to scsi mid layer */
- fixup_botched_add(ctlr, added[i].scsi3addr);
+ fixup_botched_add(h, added[i].scsi3addr);
}
free_and_out:
@@ -675,33 +674,33 @@ free_and_out:
}
static int
-lookup_scsi3addr(int ctlr, int bus, int target, int lun, char *scsi3addr)
+lookup_scsi3addr(ctlr_info_t *h, int bus, int target, int lun, char *scsi3addr)
{
int i;
struct cciss_scsi_dev_t *sd;
unsigned long flags;
- CPQ_TAPE_LOCK(ctlr, flags);
- for (i=0;i<ccissscsi[ctlr].ndevices;i++) {
- sd = &ccissscsi[ctlr].dev[i];
+ CPQ_TAPE_LOCK(h, flags);
+ for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
+ sd = &ccissscsi[h->ctlr].dev[i];
if (sd->bus == bus &&
sd->target == target &&
sd->lun == lun) {
memcpy(scsi3addr, &sd->scsi3addr[0], 8);
- CPQ_TAPE_UNLOCK(ctlr, flags);
+ CPQ_TAPE_UNLOCK(h, flags);
return 0;
}
}
- CPQ_TAPE_UNLOCK(ctlr, flags);
+ CPQ_TAPE_UNLOCK(h, flags);
return -1;
}
static void
-cciss_scsi_setup(int cntl_num)
+cciss_scsi_setup(ctlr_info_t *h)
{
struct cciss_scsi_adapter_data_t * shba;
- ccissscsi[cntl_num].ndevices = 0;
+ ccissscsi[h->ctlr].ndevices = 0;
shba = (struct cciss_scsi_adapter_data_t *)
kmalloc(sizeof(*shba), GFP_KERNEL);
if (shba == NULL)
@@ -709,35 +708,35 @@ cciss_scsi_setup(int cntl_num)
shba->scsi_host = NULL;
spin_lock_init(&shba->lock);
shba->registered = 0;
- if (scsi_cmd_stack_setup(cntl_num, shba) != 0) {
+ if (scsi_cmd_stack_setup(h, shba) != 0) {
kfree(shba);
shba = NULL;
}
- hba[cntl_num]->scsi_ctlr = shba;
+ h->scsi_ctlr = shba;
return;
}
-static void
-complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
+static void complete_scsi_command(CommandList_struct *c, int timeout,
+ __u32 tag)
{
struct scsi_cmnd *cmd;
- ctlr_info_t *ctlr;
+ ctlr_info_t *h;
ErrorInfo_struct *ei;
- ei = cp->err_info;
+ ei = c->err_info;
/* First, see if it was a message rather than a command */
- if (cp->Request.Type.Type == TYPE_MSG) {
- cp->cmd_type = CMD_MSG_DONE;
+ if (c->Request.Type.Type == TYPE_MSG) {
+ c->cmd_type = CMD_MSG_DONE;
return;
}
- cmd = (struct scsi_cmnd *) cp->scsi_cmd;
- ctlr = hba[cp->ctlr];
+ cmd = (struct scsi_cmnd *) c->scsi_cmd;
+ h = hba[c->ctlr];
scsi_dma_unmap(cmd);
- if (cp->Header.SGTotal > ctlr->max_cmd_sgentries)
- cciss_unmap_sg_chain_block(ctlr, cp);
+ if (c->Header.SGTotal > h->max_cmd_sgentries)
+ cciss_unmap_sg_chain_block(h, c);
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
@@ -764,9 +763,8 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
{
#if 0
printk(KERN_WARNING "cciss: cmd %p "
- "has SCSI Status = %x\n",
- cp,
- ei->ScsiStatus);
+ "has SCSI Status = %x\n",
+ c, ei->ScsiStatus);
#endif
cmd->result |= (ei->ScsiStatus << 1);
}
@@ -786,13 +784,13 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
break;
case CMD_DATA_OVERRUN:
- printk(KERN_WARNING "cciss: cp %p has"
+ dev_warn(&h->pdev->dev, "%p has"
" completed with data overrun "
- "reported\n", cp);
+ "reported\n", c);
break;
case CMD_INVALID: {
- /* print_bytes(cp, sizeof(*cp), 1, 0);
- print_cmd(cp); */
+ /* print_bytes(c, sizeof(*c), 1, 0);
+ print_cmd(c); */
/* We get CMD_INVALID if you address a non-existent tape drive instead
of a selection timeout (no response). You will see this if you yank
out a tape drive, then try to access it. This is kind of a shame
@@ -802,54 +800,50 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
}
break;
case CMD_PROTOCOL_ERR:
- printk(KERN_WARNING "cciss: cp %p has "
- "protocol error \n", cp);
+ dev_warn(&h->pdev->dev,
+ "%p has protocol error\n", c);
break;
case CMD_HARDWARE_ERR:
cmd->result = DID_ERROR << 16;
- printk(KERN_WARNING "cciss: cp %p had "
- " hardware error\n", cp);
+ dev_warn(&h->pdev->dev,
+ "%p had hardware error\n", c);
break;
case CMD_CONNECTION_LOST:
cmd->result = DID_ERROR << 16;
- printk(KERN_WARNING "cciss: cp %p had "
- "connection lost\n", cp);
+ dev_warn(&h->pdev->dev,
+ "%p had connection lost\n", c);
break;
case CMD_ABORTED:
cmd->result = DID_ABORT << 16;
- printk(KERN_WARNING "cciss: cp %p was "
- "aborted\n", cp);
+ dev_warn(&h->pdev->dev, "%p was aborted\n", c);
break;
case CMD_ABORT_FAILED:
cmd->result = DID_ERROR << 16;
- printk(KERN_WARNING "cciss: cp %p reports "
- "abort failed\n", cp);
+ dev_warn(&h->pdev->dev,
+ "%p reports abort failed\n", c);
break;
case CMD_UNSOLICITED_ABORT:
cmd->result = DID_ABORT << 16;
- printk(KERN_WARNING "cciss: cp %p aborted "
- "do to an unsolicited abort\n", cp);
+ dev_warn(&h->pdev->dev, "%p aborted do to an "
+ "unsolicited abort\n", c);
break;
case CMD_TIMEOUT:
cmd->result = DID_TIME_OUT << 16;
- printk(KERN_WARNING "cciss: cp %p timedout\n",
- cp);
+ dev_warn(&h->pdev->dev, "%p timedout\n", c);
break;
default:
cmd->result = DID_ERROR << 16;
- printk(KERN_WARNING "cciss: cp %p returned "
- "unknown status %x\n", cp,
+ dev_warn(&h->pdev->dev,
+ "%p returned unknown status %x\n", c,
ei->CommandStatus);
}
}
- // printk("c:%p:c%db%dt%dl%d ", cmd, ctlr->ctlr, cmd->channel,
- // cmd->target, cmd->lun);
cmd->scsi_done(cmd);
- scsi_cmd_free(ctlr, cp);
+ scsi_cmd_free(h, c);
}
static int
-cciss_scsi_detect(int ctlr)
+cciss_scsi_detect(ctlr_info_t *h)
{
struct Scsi_Host *sh;
int error;
@@ -860,15 +854,15 @@ cciss_scsi_detect(int ctlr)
sh->io_port = 0; // good enough? FIXME,
sh->n_io_port = 0; // I don't think we use these two...
sh->this_id = SELF_SCSI_ID;
- sh->sg_tablesize = hba[ctlr]->maxsgentries;
+ sh->sg_tablesize = h->maxsgentries;
sh->max_cmd_len = MAX_COMMAND_SIZE;
((struct cciss_scsi_adapter_data_t *)
- hba[ctlr]->scsi_ctlr)->scsi_host = sh;
- sh->hostdata[0] = (unsigned long) hba[ctlr];
- sh->irq = hba[ctlr]->intr[SIMPLE_MODE_INT];
+ h->scsi_ctlr)->scsi_host = sh;
+ sh->hostdata[0] = (unsigned long) h;
+ sh->irq = h->intr[SIMPLE_MODE_INT];
sh->unique_id = sh->irq;
- error = scsi_add_host(sh, &hba[ctlr]->pdev->dev);
+ error = scsi_add_host(sh, &h->pdev->dev);
if (error)
goto fail_host_put;
scsi_scan_host(sh);
@@ -882,20 +876,20 @@ cciss_scsi_detect(int ctlr)
static void
cciss_unmap_one(struct pci_dev *pdev,
- CommandList_struct *cp,
+ CommandList_struct *c,
size_t buflen,
int data_direction)
{
u64bit addr64;
- addr64.val32.lower = cp->SG[0].Addr.lower;
- addr64.val32.upper = cp->SG[0].Addr.upper;
+ addr64.val32.lower = c->SG[0].Addr.lower;
+ addr64.val32.upper = c->SG[0].Addr.upper;
pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction);
}
static void
cciss_map_one(struct pci_dev *pdev,
- CommandList_struct *cp,
+ CommandList_struct *c,
unsigned char *buf,
size_t buflen,
int data_direction)
@@ -903,164 +897,149 @@ cciss_map_one(struct pci_dev *pdev,
__u64 addr64;
addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
- cp->SG[0].Addr.lower =
+ c->SG[0].Addr.lower =
(__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
- cp->SG[0].Addr.upper =
+ c->SG[0].Addr.upper =
(__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
- cp->SG[0].Len = buflen;
- cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */
- cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
+ c->SG[0].Len = buflen;
+ c->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */
+ c->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
}
static int
-cciss_scsi_do_simple_cmd(ctlr_info_t *c,
- CommandList_struct *cp,
+cciss_scsi_do_simple_cmd(ctlr_info_t *h,
+ CommandList_struct *c,
unsigned char *scsi3addr,
unsigned char *cdb,
unsigned char cdblen,
unsigned char *buf, int bufsize,
int direction)
{
- unsigned long flags;
DECLARE_COMPLETION_ONSTACK(wait);
- cp->cmd_type = CMD_IOCTL_PEND; // treat this like an ioctl
- cp->scsi_cmd = NULL;
- cp->Header.ReplyQueue = 0; // unused in simple mode
- memcpy(&cp->Header.LUN, scsi3addr, sizeof(cp->Header.LUN));
- cp->Header.Tag.lower = cp->busaddr; // Use k. address of cmd as tag
+ c->cmd_type = CMD_IOCTL_PEND; /* treat this like an ioctl */
+ c->scsi_cmd = NULL;
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
+ memcpy(&c->Header.LUN, scsi3addr, sizeof(c->Header.LUN));
+ c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */
// Fill in the request block...
/* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n",
scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */
- memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
- memcpy(cp->Request.CDB, cdb, cdblen);
- cp->Request.Timeout = 0;
- cp->Request.CDBLen = cdblen;
- cp->Request.Type.Type = TYPE_CMD;
- cp->Request.Type.Attribute = ATTR_SIMPLE;
- cp->Request.Type.Direction = direction;
+ memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
+ memcpy(c->Request.CDB, cdb, cdblen);
+ c->Request.Timeout = 0;
+ c->Request.CDBLen = cdblen;
+ c->Request.Type.Type = TYPE_CMD;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = direction;
/* Fill in the SG list and do dma mapping */
- cciss_map_one(c->pdev, cp, (unsigned char *) buf,
+ cciss_map_one(h->pdev, c, (unsigned char *) buf,
bufsize, DMA_FROM_DEVICE);
- cp->waiting = &wait;
-
- /* Put the request on the tail of the request queue */
- spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
- addQ(&c->reqQ, cp);
- c->Qdepth++;
- start_io(c);
- spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
-
+ c->waiting = &wait;
+ enqueue_cmd_and_start_io(h, c);
wait_for_completion(&wait);
/* undo the dma mapping */
- cciss_unmap_one(c->pdev, cp, bufsize, DMA_FROM_DEVICE);
+ cciss_unmap_one(h->pdev, c, bufsize, DMA_FROM_DEVICE);
return(0);
}
static void
-cciss_scsi_interpret_error(CommandList_struct *cp)
+cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
{
ErrorInfo_struct *ei;
- ei = cp->err_info;
+ ei = c->err_info;
switch(ei->CommandStatus)
{
case CMD_TARGET_STATUS:
- printk(KERN_WARNING "cciss: cmd %p has "
- "completed with errors\n", cp);
- printk(KERN_WARNING "cciss: cmd %p "
- "has SCSI Status = %x\n",
- cp,
- ei->ScsiStatus);
+ dev_warn(&h->pdev->dev,
+ "cmd %p has completed with errors\n", c);
+ dev_warn(&h->pdev->dev,
+ "cmd %p has SCSI Status = %x\n",
+ c, ei->ScsiStatus);
if (ei->ScsiStatus == 0)
- printk(KERN_WARNING
- "cciss:SCSI status is abnormally zero. "
+ dev_warn(&h->pdev->dev,
+ "SCSI status is abnormally zero. "
"(probably indicates selection timeout "
"reported incorrectly due to a known "
"firmware bug, circa July, 2001.)\n");
break;
case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
- printk("UNDERRUN\n");
+ dev_info(&h->pdev->dev, "UNDERRUN\n");
break;
case CMD_DATA_OVERRUN:
- printk(KERN_WARNING "cciss: cp %p has"
+ dev_warn(&h->pdev->dev, "%p has"
" completed with data overrun "
- "reported\n", cp);
+ "reported\n", c);
break;
case CMD_INVALID: {
/* controller unfortunately reports SCSI passthru's */
/* to non-existent targets as invalid commands. */
- printk(KERN_WARNING "cciss: cp %p is "
- "reported invalid (probably means "
- "target device no longer present)\n",
- cp);
- /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
- print_cmd(cp); */
+ dev_warn(&h->pdev->dev,
+ "%p is reported invalid (probably means "
+ "target device no longer present)\n", c);
+ /* print_bytes((unsigned char *) c, sizeof(*c), 1, 0);
+ print_cmd(c); */
}
break;
case CMD_PROTOCOL_ERR:
- printk(KERN_WARNING "cciss: cp %p has "
- "protocol error \n", cp);
+ dev_warn(&h->pdev->dev, "%p has protocol error\n", c);
break;
case CMD_HARDWARE_ERR:
/* cmd->result = DID_ERROR << 16; */
- printk(KERN_WARNING "cciss: cp %p had "
- " hardware error\n", cp);
+ dev_warn(&h->pdev->dev, "%p had hardware error\n", c);
break;
case CMD_CONNECTION_LOST:
- printk(KERN_WARNING "cciss: cp %p had "
- "connection lost\n", cp);
+ dev_warn(&h->pdev->dev, "%p had connection lost\n", c);
break;
case CMD_ABORTED:
- printk(KERN_WARNING "cciss: cp %p was "
- "aborted\n", cp);
+ dev_warn(&h->pdev->dev, "%p was aborted\n", c);
break;
case CMD_ABORT_FAILED:
- printk(KERN_WARNING "cciss: cp %p reports "
- "abort failed\n", cp);
+ dev_warn(&h->pdev->dev,
+ "%p reports abort failed\n", c);
break;
case CMD_UNSOLICITED_ABORT:
- printk(KERN_WARNING "cciss: cp %p aborted "
- "do to an unsolicited abort\n", cp);
+ dev_warn(&h->pdev->dev,
+ "%p aborted do to an unsolicited abort\n", c);
break;
case CMD_TIMEOUT:
- printk(KERN_WARNING "cciss: cp %p timedout\n",
- cp);
+ dev_warn(&h->pdev->dev, "%p timedout\n", c);
break;
default:
- printk(KERN_WARNING "cciss: cp %p returned "
- "unknown status %x\n", cp,
- ei->CommandStatus);
+ dev_warn(&h->pdev->dev,
+ "%p returned unknown status %x\n",
+ c, ei->CommandStatus);
}
}
static int
-cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
+cciss_scsi_do_inquiry(ctlr_info_t *h, unsigned char *scsi3addr,
unsigned char page, unsigned char *buf,
unsigned char bufsize)
{
int rc;
- CommandList_struct *cp;
+ CommandList_struct *c;
char cdb[6];
ErrorInfo_struct *ei;
unsigned long flags;
- spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
- cp = scsi_cmd_alloc(c);
- spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
+ c = scsi_cmd_alloc(h);
+ spin_unlock_irqrestore(&h->lock, flags);
- if (cp == NULL) { /* trouble... */
+ if (c == NULL) { /* trouble... */
printk("cmd_alloc returned NULL!\n");
return -1;
}
- ei = cp->err_info;
+ ei = c->err_info;
cdb[0] = CISS_INQUIRY;
cdb[1] = (page != 0);
@@ -1068,24 +1047,24 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
cdb[3] = 0;
cdb[4] = bufsize;
cdb[5] = 0;
- rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb,
+ rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, cdb,
6, buf, bufsize, XFER_READ);
if (rc != 0) return rc; /* something went wrong */
if (ei->CommandStatus != 0 &&
ei->CommandStatus != CMD_DATA_UNDERRUN) {
- cciss_scsi_interpret_error(cp);
+ cciss_scsi_interpret_error(h, c);
rc = -1;
}
- spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
- scsi_cmd_free(c, cp);
- spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
+ scsi_cmd_free(h, c);
+ spin_unlock_irqrestore(&h->lock, flags);
return rc;
}
/* Get the device id from inquiry page 0x83 */
-static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
+static int cciss_scsi_get_device_id(ctlr_info_t *h, unsigned char *scsi3addr,
unsigned char *device_id, int buflen)
{
int rc;
@@ -1096,7 +1075,7 @@ static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
buf = kzalloc(64, GFP_KERNEL);
if (!buf)
return -1;
- rc = cciss_scsi_do_inquiry(c, scsi3addr, 0x83, buf, 64);
+ rc = cciss_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
if (rc == 0)
memcpy(device_id, &buf[8], buflen);
kfree(buf);
@@ -1104,20 +1083,20 @@ static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
}
static int
-cciss_scsi_do_report_phys_luns(ctlr_info_t *c,
+cciss_scsi_do_report_phys_luns(ctlr_info_t *h,
ReportLunData_struct *buf, int bufsize)
{
int rc;
- CommandList_struct *cp;
+ CommandList_struct *c;
unsigned char cdb[12];
unsigned char scsi3addr[8];
ErrorInfo_struct *ei;
unsigned long flags;
- spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
- cp = scsi_cmd_alloc(c);
- spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
- if (cp == NULL) { /* trouble... */
+ spin_lock_irqsave(&h->lock, flags);
+ c = scsi_cmd_alloc(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (c == NULL) { /* trouble... */
printk("cmd_alloc returned NULL!\n");
return -1;
}
@@ -1136,27 +1115,27 @@ cciss_scsi_do_report_phys_luns(ctlr_info_t *c,
cdb[10] = 0;
cdb[11] = 0;
- rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr,
+ rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr,
cdb, 12,
(unsigned char *) buf,
bufsize, XFER_READ);
if (rc != 0) return rc; /* something went wrong */
- ei = cp->err_info;
+ ei = c->err_info;
if (ei->CommandStatus != 0 &&
ei->CommandStatus != CMD_DATA_UNDERRUN) {
- cciss_scsi_interpret_error(cp);
+ cciss_scsi_interpret_error(h, c);
rc = -1;
}
- spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
- scsi_cmd_free(c, cp);
- spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
+ scsi_cmd_free(h, c);
+ spin_unlock_irqrestore(&h->lock, flags);
return rc;
}
static void
-cciss_update_non_disk_devices(int cntl_num, int hostno)
+cciss_update_non_disk_devices(ctlr_info_t *h, int hostno)
{
/* the idea here is we could get notified from /proc
that some devices have changed, so we do a report
@@ -1189,7 +1168,6 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
ReportLunData_struct *ld_buff;
unsigned char *inq_buff;
unsigned char scsi3addr[8];
- ctlr_info_t *c;
__u32 num_luns=0;
unsigned char *ch;
struct cciss_scsi_dev_t *currentsd, *this_device;
@@ -1197,7 +1175,6 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
int i;
- c = (ctlr_info_t *) hba[cntl_num];
ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
currentsd = kzalloc(sizeof(*currentsd) *
@@ -1207,7 +1184,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
goto out;
}
this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
- if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) {
+ if (cciss_scsi_do_report_phys_luns(h, ld_buff, reportlunsize) == 0) {
ch = &ld_buff->LUNListLength[0];
num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
if (num_luns > CISS_MAX_PHYS_LUN) {
@@ -1231,7 +1208,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
- if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, 0, inq_buff,
+ if (cciss_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
(unsigned char) OBDR_TAPE_INQ_SIZE) != 0)
/* Inquiry failed (msg printed already) */
continue; /* so we will skip this device. */
@@ -1249,7 +1226,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
sizeof(this_device->revision));
memset(this_device->device_id, 0,
sizeof(this_device->device_id));
- cciss_scsi_get_device_id(hba[cntl_num], scsi3addr,
+ cciss_scsi_get_device_id(h, scsi3addr,
this_device->device_id, sizeof(this_device->device_id));
switch (this_device->devtype)
@@ -1276,7 +1253,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
case 0x08: /* medium changer */
if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
printk(KERN_INFO "cciss%d: %s ignored, "
- "too many devices.\n", cntl_num,
+ "too many devices.\n", h->ctlr,
scsi_device_type(this_device->devtype));
break;
}
@@ -1288,7 +1265,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
}
}
- adjust_cciss_scsi_table(cntl_num, hostno, currentsd, ncurrent);
+ adjust_cciss_scsi_table(h, hostno, currentsd, ncurrent);
out:
kfree(inq_buff);
kfree(ld_buff);
@@ -1307,12 +1284,12 @@ is_keyword(char *ptr, int len, char *verb) // Thanks to ncr53c8xx.c
}
static int
-cciss_scsi_user_command(int ctlr, int hostno, char *buffer, int length)
+cciss_scsi_user_command(ctlr_info_t *h, int hostno, char *buffer, int length)
{
int arg_len;
if ((arg_len = is_keyword(buffer, length, "rescan")) != 0)
- cciss_update_non_disk_devices(ctlr, hostno);
+ cciss_update_non_disk_devices(h, hostno);
else
return -EINVAL;
return length;
@@ -1329,20 +1306,16 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
{
int buflen, datalen;
- ctlr_info_t *ci;
+ ctlr_info_t *h;
int i;
- int cntl_num;
-
- ci = (ctlr_info_t *) sh->hostdata[0];
- if (ci == NULL) /* This really shouldn't ever happen. */
+ h = (ctlr_info_t *) sh->hostdata[0];
+ if (h == NULL) /* This really shouldn't ever happen. */
return -EINVAL;
- cntl_num = ci->ctlr; /* Get our index into the hba[] array */
-
if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */
buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n",
- cntl_num, sh->host_no);
+ h->ctlr, sh->host_no);
/* this information is needed by apps to know which cciss
device corresponds to which scsi host number without
@@ -1352,8 +1325,9 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
this info is for an app to be able to use to know how to
get them back in sync. */
- for (i=0;i<ccissscsi[cntl_num].ndevices;i++) {
- struct cciss_scsi_dev_t *sd = &ccissscsi[cntl_num].dev[i];
+ for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
+ struct cciss_scsi_dev_t *sd =
+ &ccissscsi[h->ctlr].dev[i];
buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d "
"0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
sh->host_no, sd->bus, sd->target, sd->lun,
@@ -1371,15 +1345,15 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
*start = buffer + offset;
return(datalen);
} else /* User is writing to /proc/scsi/cciss*?/?* ... */
- return cciss_scsi_user_command(cntl_num, sh->host_no,
+ return cciss_scsi_user_command(h, sh->host_no,
buffer, length);
}
/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
dma mapping and fills in the scatter gather entries of the
- cciss command, cp. */
+ cciss command, c. */
-static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp,
+static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
struct scsi_cmnd *cmd)
{
unsigned int len;
@@ -1393,7 +1367,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp,
chained = 0;
sg_index = 0;
- curr_sg = cp->SG;
+ curr_sg = c->SG;
request_nsgs = scsi_dma_map(cmd);
if (request_nsgs) {
scsi_for_each_sg(cmd, sg, request_nsgs, i) {
@@ -1401,7 +1375,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp,
!chained && request_nsgs - i > 1) {
chained = 1;
sg_index = 0;
- curr_sg = sa->cmd_sg_list[cp->cmdindex];
+ curr_sg = sa->cmd_sg_list[c->cmdindex];
}
addr64 = (__u64) sg_dma_address(sg);
len = sg_dma_len(sg);
@@ -1414,19 +1388,19 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp,
++sg_index;
}
if (chained)
- cciss_map_sg_chain_block(h, cp,
- sa->cmd_sg_list[cp->cmdindex],
+ cciss_map_sg_chain_block(h, c,
+ sa->cmd_sg_list[c->cmdindex],
(request_nsgs - (h->max_cmd_sgentries - 1)) *
sizeof(SGDescriptor_struct));
}
/* track how many SG entries we are using */
if (request_nsgs > h->maxSG)
h->maxSG = request_nsgs;
- cp->Header.SGTotal = (__u8) request_nsgs + chained;
+ c->Header.SGTotal = (__u8) request_nsgs + chained;
if (request_nsgs > h->max_cmd_sgentries)
- cp->Header.SGList = h->max_cmd_sgentries;
+ c->Header.SGList = h->max_cmd_sgentries;
else
- cp->Header.SGList = cp->Header.SGTotal;
+ c->Header.SGList = c->Header.SGTotal;
return;
}
@@ -1434,18 +1408,17 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp,
static int
cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
{
- ctlr_info_t *c;
- int ctlr, rc;
+ ctlr_info_t *h;
+ int rc;
unsigned char scsi3addr[8];
- CommandList_struct *cp;
+ CommandList_struct *c;
unsigned long flags;
// Get the ptr to our adapter structure (hba[i]) out of cmd->host.
// We violate cmd->host privacy here. (Is there another way?)
- c = (ctlr_info_t *) cmd->device->host->hostdata[0];
- ctlr = c->ctlr;
+ h = (ctlr_info_t *) cmd->device->host->hostdata[0];
- rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id,
+ rc = lookup_scsi3addr(h, cmd->device->channel, cmd->device->id,
cmd->device->lun, scsi3addr);
if (rc != 0) {
/* the scsi nexus does not match any that we presented... */
@@ -1457,19 +1430,14 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
return 0;
}
- /* printk("cciss_queue_command, p=%p, cmd=0x%02x, c%db%dt%dl%d\n",
- cmd, cmd->cmnd[0], ctlr, cmd->channel, cmd->target, cmd->lun);*/
- // printk("q:%p:c%db%dt%dl%d ", cmd, ctlr, cmd->channel,
- // cmd->target, cmd->lun);
-
/* Ok, we have a reasonable scsi nexus, so send the cmd down, and
see what the device thinks of it. */
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
- cp = scsi_cmd_alloc(c);
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
- if (cp == NULL) { /* trouble... */
- printk("scsi_cmd_alloc returned NULL!\n");
+ spin_lock_irqsave(&h->lock, flags);
+ c = scsi_cmd_alloc(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (c == NULL) { /* trouble... */
+ dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n");
/* FIXME: next 3 lines are -> BAD! <- */
cmd->result = DID_NO_CONNECT << 16;
done(cmd);
@@ -1480,35 +1448,41 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
cmd->scsi_done = done; // save this for use by completion code
- // save cp in case we have to abort it
- cmd->host_scribble = (unsigned char *) cp;
+ /* save c in case we have to abort it */
+ cmd->host_scribble = (unsigned char *) c;
- cp->cmd_type = CMD_SCSI;
- cp->scsi_cmd = cmd;
- cp->Header.ReplyQueue = 0; // unused in simple mode
- memcpy(&cp->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
- cp->Header.Tag.lower = cp->busaddr; // Use k. address of cmd as tag
+ c->cmd_type = CMD_SCSI;
+ c->scsi_cmd = cmd;
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
+ memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+ c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */
// Fill in the request block...
- cp->Request.Timeout = 0;
- memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
- BUG_ON(cmd->cmd_len > sizeof(cp->Request.CDB));
- cp->Request.CDBLen = cmd->cmd_len;
- memcpy(cp->Request.CDB, cmd->cmnd, cmd->cmd_len);
- cp->Request.Type.Type = TYPE_CMD;
- cp->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Timeout = 0;
+ memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
+ BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
+ c->Request.CDBLen = cmd->cmd_len;
+ memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
+ c->Request.Type.Type = TYPE_CMD;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
switch(cmd->sc_data_direction)
{
- case DMA_TO_DEVICE: cp->Request.Type.Direction = XFER_WRITE; break;
- case DMA_FROM_DEVICE: cp->Request.Type.Direction = XFER_READ; break;
- case DMA_NONE: cp->Request.Type.Direction = XFER_NONE; break;
+ case DMA_TO_DEVICE:
+ c->Request.Type.Direction = XFER_WRITE;
+ break;
+ case DMA_FROM_DEVICE:
+ c->Request.Type.Direction = XFER_READ;
+ break;
+ case DMA_NONE:
+ c->Request.Type.Direction = XFER_NONE;
+ break;
case DMA_BIDIRECTIONAL:
// This can happen if a buggy application does a scsi passthru
// and sets both inlen and outlen to non-zero. ( see
// ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
- cp->Request.Type.Direction = XFER_RSVD;
+ c->Request.Type.Direction = XFER_RSVD;
// This is technically wrong, and cciss controllers should
// reject it with CMD_INVALID, which is the most correct
// response, but non-fibre backends appear to let it
@@ -1519,27 +1493,18 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
break;
default:
- printk("cciss: unknown data direction: %d\n",
+ dev_warn(&h->pdev->dev, "unknown data direction: %d\n",
cmd->sc_data_direction);
BUG();
break;
}
- cciss_scatter_gather(c, cp, cmd);
-
- /* Put the request on the tail of the request queue */
-
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
- addQ(&c->reqQ, cp);
- c->Qdepth++;
- start_io(c);
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-
+ cciss_scatter_gather(h, c, cmd);
+ enqueue_cmd_and_start_io(h, c);
/* the cmd'll come back via intr handler in complete_scsi_command() */
return 0;
}
-static void
-cciss_unregister_scsi(int ctlr)
+static void cciss_unregister_scsi(ctlr_info_t *h)
{
struct cciss_scsi_adapter_data_t *sa;
struct cciss_scsi_cmd_stack_t *stk;
@@ -1547,59 +1512,58 @@ cciss_unregister_scsi(int ctlr)
/* we are being forcibly unloaded, and may not refuse. */
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
- sa = hba[ctlr]->scsi_ctlr;
+ spin_lock_irqsave(&h->lock, flags);
+ sa = h->scsi_ctlr;
stk = &sa->cmd_stack;
/* if we weren't ever actually registered, don't unregister */
if (sa->registered) {
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ spin_unlock_irqrestore(&h->lock, flags);
scsi_remove_host(sa->scsi_host);
scsi_host_put(sa->scsi_host);
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+ spin_lock_irqsave(&h->lock, flags);
}
/* set scsi_host to NULL so our detect routine will
find us on register */
sa->scsi_host = NULL;
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
- scsi_cmd_stack_free(ctlr);
+ spin_unlock_irqrestore(&h->lock, flags);
+ scsi_cmd_stack_free(h);
kfree(sa);
}
-static int
-cciss_engage_scsi(int ctlr)
+static int cciss_engage_scsi(ctlr_info_t *h)
{
struct cciss_scsi_adapter_data_t *sa;
struct cciss_scsi_cmd_stack_t *stk;
unsigned long flags;
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
- sa = hba[ctlr]->scsi_ctlr;
+ spin_lock_irqsave(&h->lock, flags);
+ sa = h->scsi_ctlr;
stk = &sa->cmd_stack;
if (sa->registered) {
- printk("cciss%d: SCSI subsystem already engaged.\n", ctlr);
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ dev_info(&h->pdev->dev, "SCSI subsystem already engaged.\n");
+ spin_unlock_irqrestore(&h->lock, flags);
return -ENXIO;
}
sa->registered = 1;
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
- cciss_update_non_disk_devices(ctlr, -1);
- cciss_scsi_detect(ctlr);
+ spin_unlock_irqrestore(&h->lock, flags);
+ cciss_update_non_disk_devices(h, -1);
+ cciss_scsi_detect(h);
return 0;
}
static void
-cciss_seq_tape_report(struct seq_file *seq, int ctlr)
+cciss_seq_tape_report(struct seq_file *seq, ctlr_info_t *h)
{
unsigned long flags;
- CPQ_TAPE_LOCK(ctlr, flags);
+ CPQ_TAPE_LOCK(h, flags);
seq_printf(seq,
"Sequential access devices: %d\n\n",
- ccissscsi[ctlr].ndevices);
- CPQ_TAPE_UNLOCK(ctlr, flags);
+ ccissscsi[h->ctlr].ndevices);
+ CPQ_TAPE_UNLOCK(h, flags);
}
static int wait_for_device_to_become_ready(ctlr_info_t *h,
@@ -1610,10 +1574,10 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h,
int waittime = HZ;
CommandList_struct *c;
- c = cmd_alloc(h, 1);
+ c = cmd_alloc(h);
if (!c) {
- printk(KERN_WARNING "cciss%d: out of memory in "
- "wait_for_device_to_become_ready.\n", h->ctlr);
+ dev_warn(&h->pdev->dev, "out of memory in "
+ "wait_for_device_to_become_ready.\n");
return IO_ERROR;
}
@@ -1631,7 +1595,7 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h,
waittime = waittime * 2;
/* Send the Test Unit Ready */
- rc = fill_cmd(c, TEST_UNIT_READY, h->ctlr, NULL, 0, 0,
+ rc = fill_cmd(h, c, TEST_UNIT_READY, NULL, 0, 0,
lunaddr, TYPE_CMD);
if (rc == 0)
rc = sendcmd_withirq_core(h, c, 0);
@@ -1657,18 +1621,18 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h,
}
}
retry_tur:
- printk(KERN_WARNING "cciss%d: Waiting %d secs "
+ dev_warn(&h->pdev->dev, "Waiting %d secs "
"for device to become ready.\n",
- h->ctlr, waittime / HZ);
+ waittime / HZ);
rc = 1; /* device not ready. */
}
if (rc)
- printk("cciss%d: giving up on device.\n", h->ctlr);
+ dev_warn(&h->pdev->dev, "giving up on device.\n");
else
- printk(KERN_WARNING "cciss%d: device is ready.\n", h->ctlr);
+ dev_warn(&h->pdev->dev, "device is ready.\n");
- cmd_free(h, c, 1);
+ cmd_free(h, c);
return rc;
}
@@ -1688,26 +1652,24 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
int rc;
CommandList_struct *cmd_in_trouble;
unsigned char lunaddr[8];
- ctlr_info_t *c;
- int ctlr;
+ ctlr_info_t *h;
/* find the controller to which the command to be aborted was sent */
- c = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
- if (c == NULL) /* paranoia */
+ h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
+ if (h == NULL) /* paranoia */
return FAILED;
- ctlr = c->ctlr;
- printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr);
+ dev_warn(&h->pdev->dev, "resetting tape drive or medium changer.\n");
/* find the command that's giving us trouble */
cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
if (cmd_in_trouble == NULL) /* paranoia */
return FAILED;
memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
/* send a reset to the SCSI LUN which the command was sent to */
- rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr,
+ rc = sendcmd_withirq(h, CCISS_RESET_MSG, NULL, 0, 0, lunaddr,
TYPE_MSG);
- if (rc == 0 && wait_for_device_to_become_ready(c, lunaddr) == 0)
+ if (rc == 0 && wait_for_device_to_become_ready(h, lunaddr) == 0)
return SUCCESS;
- printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr);
+ dev_warn(&h->pdev->dev, "resetting device failed.\n");
return FAILED;
}
@@ -1716,22 +1678,20 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
int rc;
CommandList_struct *cmd_to_abort;
unsigned char lunaddr[8];
- ctlr_info_t *c;
- int ctlr;
+ ctlr_info_t *h;
/* find the controller to which the command to be aborted was sent */
- c = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
- if (c == NULL) /* paranoia */
+ h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
+ if (h == NULL) /* paranoia */
return FAILED;
- ctlr = c->ctlr;
- printk(KERN_WARNING "cciss%d: aborting tardy SCSI cmd\n", ctlr);
+ dev_warn(&h->pdev->dev, "aborting tardy SCSI cmd\n");
/* find the command to be aborted */
cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
if (cmd_to_abort == NULL) /* paranoia */
return FAILED;
memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
- rc = sendcmd_withirq(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
+ rc = sendcmd_withirq(h, CCISS_ABORT_MSG, &cmd_to_abort->Header.Tag,
0, 0, lunaddr, TYPE_MSG);
if (rc == 0)
return SUCCESS;
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index abb4ec6690f..d53b0291c44 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -35,6 +35,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/hdreg.h>
+#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
@@ -157,7 +158,7 @@ static int sendcmd(
unsigned int blkcnt,
unsigned int log_unit );
-static int ida_open(struct block_device *bdev, fmode_t mode);
+static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
static int ida_release(struct gendisk *disk, fmode_t mode);
static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -195,9 +196,9 @@ static inline ctlr_info_t *get_host(struct gendisk *disk)
static const struct block_device_operations ida_fops = {
.owner = THIS_MODULE,
- .open = ida_open,
+ .open = ida_unlocked_open,
.release = ida_release,
- .locked_ioctl = ida_ioctl,
+ .ioctl = ida_ioctl,
.getgeo = ida_getgeo,
.revalidate_disk= ida_revalidate,
};
@@ -840,13 +841,29 @@ static int ida_open(struct block_device *bdev, fmode_t mode)
return 0;
}
+static int ida_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ida_open(bdev, mode);
+ unlock_kernel();
+
+ return ret;
+}
+
/*
* Close. Sync first.
*/
static int ida_release(struct gendisk *disk, fmode_t mode)
{
- ctlr_info_t *host = get_host(disk);
+ ctlr_info_t *host;
+
+ lock_kernel();
+ host = get_host(disk);
host->usage_count--;
+ unlock_kernel();
+
return 0;
}
@@ -1128,7 +1145,7 @@ static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
* ida_ioctl does some miscellaneous stuff like reporting drive geometry,
* setting readahead and submitting commands from userspace to the controller.
*/
-static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
+static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
{
drv_info_t *drv = get_drv(bdev->bd_disk);
ctlr_info_t *host = get_host(bdev->bd_disk);
@@ -1162,7 +1179,8 @@ out_passthru:
return error;
case IDAGETCTLRSIG:
if (!arg) return -EINVAL;
- put_user(host->ctlr_sig, (int __user *)arg);
+ if (put_user(host->ctlr_sig, (int __user *)arg))
+ return -EFAULT;
return 0;
case IDAREVALIDATEVOLS:
if (MINOR(bdev->bd_dev) != 0)
@@ -1170,7 +1188,8 @@ out_passthru:
return revalidate_allvol(host);
case IDADRIVERVERSION:
if (!arg) return -EINVAL;
- put_user(DRIVER_VERSION, (unsigned long __user *)arg);
+ if (put_user(DRIVER_VERSION, (unsigned long __user *)arg))
+ return -EFAULT;
return 0;
case IDAGETPCIINFO:
{
@@ -1192,6 +1211,19 @@ out_passthru:
}
}
+
+static int ida_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long param)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ida_locked_ioctl(bdev, mode, cmd, param);
+ unlock_kernel();
+
+ return ret;
+}
+
/*
* ida_ctlr_ioctl is for passing commands to the controller from userspace.
* The command block (io) has already been copied to kernel space for us,
@@ -1225,17 +1257,11 @@ static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
/* Pre submit processing */
switch(io->cmd) {
case PASSTHRU_A:
- p = kmalloc(io->sg[0].size, GFP_KERNEL);
- if (!p)
- {
- error = -ENOMEM;
- cmd_free(h, c, 0);
- return(error);
- }
- if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
- kfree(p);
- cmd_free(h, c, 0);
- return -EFAULT;
+ p = memdup_user(io->sg[0].addr, io->sg[0].size);
+ if (IS_ERR(p)) {
+ error = PTR_ERR(p);
+ cmd_free(h, c, 0);
+ return error;
}
c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
sizeof(ida_ioctl_t),
@@ -1266,18 +1292,12 @@ static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
case DIAG_PASS_THRU:
case COLLECT_BUFFER:
case WRITE_FLASH_ROM:
- p = kmalloc(io->sg[0].size, GFP_KERNEL);
- if (!p)
- {
- error = -ENOMEM;
- cmd_free(h, c, 0);
- return(error);
+ p = memdup_user(io->sg[0].addr, io->sg[0].size);
+ if (IS_ERR(p)) {
+ error = PTR_ERR(p);
+ cmd_free(h, c, 0);
+ return error;
}
- if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
- kfree(p);
- cmd_free(h, c, 0);
- return -EFAULT;
- }
c->req.sg[0].size = io->sg[0].size;
c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index df018990c42..9400845d602 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -79,8 +79,8 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
md_io.error = 0;
if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
- rw |= (1 << BIO_RW_BARRIER);
- rw |= ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO));
+ rw |= REQ_HARDBARRIER;
+ rw |= REQ_UNPLUG | REQ_SYNC;
retry:
bio = bio_alloc(GFP_NOIO, 1);
@@ -103,11 +103,11 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
/* check for unsupported barrier op.
* would rather check on EOPNOTSUPP, but that is not reliable.
* don't try again for ANY return value != 0 */
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && !ok)) {
+ if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
/* Try again with no barrier */
dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
set_bit(MD_NO_BARRIER, &mdev->flags);
- rw &= ~(1 << BIO_RW_BARRIER);
+ rw &= ~REQ_HARDBARRIER;
bio_put(bio);
goto retry;
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 485ed8c7d62..352441b0f92 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -550,12 +550,6 @@ struct p_delay_probe {
u32 offset; /* usecs the probe got sent after the reference time point */
} __packed;
-struct delay_probe {
- struct list_head list;
- unsigned int seq_num;
- struct timeval time;
-};
-
/* DCBP: Drbd Compressed Bitmap Packet ... */
static inline enum drbd_bitmap_code
DCBP_get_code(struct p_compressed_bm *p)
@@ -942,11 +936,9 @@ struct drbd_conf {
unsigned int ko_count;
struct drbd_work resync_work,
unplug_work,
- md_sync_work,
- delay_probe_work;
+ md_sync_work;
struct timer_list resync_timer;
struct timer_list md_sync_timer;
- struct timer_list delay_probe_timer;
/* Used after attach while negotiating new disk state. */
union drbd_state new_state_tmp;
@@ -1062,12 +1054,6 @@ struct drbd_conf {
u64 ed_uuid; /* UUID of the exposed data */
struct mutex state_mutex;
char congestion_reason; /* Why we where congested... */
- struct list_head delay_probes; /* protected by peer_seq_lock */
- int data_delay; /* Delay of packets on the data-sock behind meta-sock */
- unsigned int delay_seq; /* To generate sequence numbers of delay probes */
- struct timeval dps_time; /* delay-probes-start-time */
- unsigned int dp_volume_last; /* send_cnt of last delay probe */
- int c_sync_rate; /* current resync rate after delay_probe magic */
};
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 7258c95e895..fa650dd85b9 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2184,43 +2184,6 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
return ok;
}
-static int drbd_send_delay_probe(struct drbd_conf *mdev, struct drbd_socket *ds)
-{
- struct p_delay_probe dp;
- int offset, ok = 0;
- struct timeval now;
-
- mutex_lock(&ds->mutex);
- if (likely(ds->socket)) {
- do_gettimeofday(&now);
- offset = now.tv_usec - mdev->dps_time.tv_usec +
- (now.tv_sec - mdev->dps_time.tv_sec) * 1000000;
- dp.seq_num = cpu_to_be32(mdev->delay_seq);
- dp.offset = cpu_to_be32(offset);
-
- ok = _drbd_send_cmd(mdev, ds->socket, P_DELAY_PROBE,
- (struct p_header *)&dp, sizeof(dp), 0);
- }
- mutex_unlock(&ds->mutex);
-
- return ok;
-}
-
-static int drbd_send_delay_probes(struct drbd_conf *mdev)
-{
- int ok;
-
- mdev->delay_seq++;
- do_gettimeofday(&mdev->dps_time);
- ok = drbd_send_delay_probe(mdev, &mdev->meta);
- ok = ok && drbd_send_delay_probe(mdev, &mdev->data);
-
- mdev->dp_volume_last = mdev->send_cnt;
- mod_timer(&mdev->delay_probe_timer, jiffies + mdev->sync_conf.dp_interval * HZ / 10);
-
- return ok;
-}
-
/* called on sndtimeo
* returns FALSE if we should retry,
* TRUE if we think connection is dead
@@ -2369,31 +2332,6 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
return 1;
}
-static void consider_delay_probes(struct drbd_conf *mdev)
-{
- if (mdev->state.conn != C_SYNC_SOURCE || mdev->agreed_pro_version < 93)
- return;
-
- if (mdev->dp_volume_last + mdev->sync_conf.dp_volume * 2 < mdev->send_cnt)
- drbd_send_delay_probes(mdev);
-}
-
-static int w_delay_probes(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
- if (!cancel && mdev->state.conn == C_SYNC_SOURCE)
- drbd_send_delay_probes(mdev);
-
- return 1;
-}
-
-static void delay_probe_timer_fn(unsigned long data)
-{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
-
- if (list_empty(&mdev->delay_probe_work.list))
- drbd_queue_work(&mdev->data.work, &mdev->delay_probe_work);
-}
-
/* Used to send write requests
* R_PRIMARY -> Peer (P_DATA)
*/
@@ -2425,15 +2363,15 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
/* NOTE: no need to check if barriers supported here as we would
* not pass the test in make_request_common in that case
*/
- if (bio_rw_flagged(req->master_bio, BIO_RW_BARRIER)) {
+ if (req->master_bio->bi_rw & REQ_HARDBARRIER) {
dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n");
/* dp_flags |= DP_HARDBARRIER; */
}
- if (bio_rw_flagged(req->master_bio, BIO_RW_SYNCIO))
+ if (req->master_bio->bi_rw & REQ_SYNC)
dp_flags |= DP_RW_SYNC;
/* for now handle SYNCIO and UNPLUG
* as if they still were one and the same flag */
- if (bio_rw_flagged(req->master_bio, BIO_RW_UNPLUG))
+ if (req->master_bio->bi_rw & REQ_UNPLUG)
dp_flags |= DP_RW_SYNC;
if (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T)
@@ -2457,9 +2395,6 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
drbd_put_data_sock(mdev);
- if (ok)
- consider_delay_probes(mdev);
-
return ok;
}
@@ -2506,9 +2441,6 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
drbd_put_data_sock(mdev);
- if (ok)
- consider_delay_probes(mdev);
-
return ok;
}
@@ -2604,6 +2536,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
unsigned long flags;
int rv = 0;
+ lock_kernel();
spin_lock_irqsave(&mdev->req_lock, flags);
/* to have a stable mdev->state.role
* and no race with updating open_cnt */
@@ -2618,6 +2551,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
if (!rv)
mdev->open_cnt++;
spin_unlock_irqrestore(&mdev->req_lock, flags);
+ unlock_kernel();
return rv;
}
@@ -2625,7 +2559,9 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
static int drbd_release(struct gendisk *gd, fmode_t mode)
{
struct drbd_conf *mdev = gd->private_data;
+ lock_kernel();
mdev->open_cnt--;
+ unlock_kernel();
return 0;
}
@@ -2660,9 +2596,20 @@ static void drbd_unplug_fn(struct request_queue *q)
static void drbd_set_defaults(struct drbd_conf *mdev)
{
- mdev->sync_conf.after = DRBD_AFTER_DEF;
- mdev->sync_conf.rate = DRBD_RATE_DEF;
- mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_DEF;
+ /* This way we get a compile error when sync_conf grows,
+ and we forgot to initialize it here */
+ mdev->sync_conf = (struct syncer_conf) {
+ /* .rate = */ DRBD_RATE_DEF,
+ /* .after = */ DRBD_AFTER_DEF,
+ /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
+ /* .verify_alg = */ {}, 0,
+ /* .cpu_mask = */ {}, 0,
+ /* .csums_alg = */ {}, 0,
+ /* .use_rle = */ 0
+ };
+
+ /* Have to use that way, because the layout differs between
+ big endian and little endian */
mdev->state = (union drbd_state) {
{ .role = R_SECONDARY,
.peer = R_UNKNOWN,
@@ -2721,24 +2668,17 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
INIT_LIST_HEAD(&mdev->unplug_work.list);
INIT_LIST_HEAD(&mdev->md_sync_work.list);
INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
- INIT_LIST_HEAD(&mdev->delay_probes);
- INIT_LIST_HEAD(&mdev->delay_probe_work.list);
mdev->resync_work.cb = w_resync_inactive;
mdev->unplug_work.cb = w_send_write_hint;
mdev->md_sync_work.cb = w_md_sync;
mdev->bm_io_work.w.cb = w_bitmap_io;
- mdev->delay_probe_work.cb = w_delay_probes;
init_timer(&mdev->resync_timer);
init_timer(&mdev->md_sync_timer);
- init_timer(&mdev->delay_probe_timer);
mdev->resync_timer.function = resync_timer_fn;
mdev->resync_timer.data = (unsigned long) mdev;
mdev->md_sync_timer.function = md_sync_timer_fn;
mdev->md_sync_timer.data = (unsigned long) mdev;
- mdev->delay_probe_timer.function = delay_probe_timer_fn;
- mdev->delay_probe_timer.data = (unsigned long) mdev;
-
init_waitqueue_head(&mdev->misc_wait);
init_waitqueue_head(&mdev->state_wait);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 2151f18b21d..73131c5ae33 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1557,10 +1557,6 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
sc.rate = DRBD_RATE_DEF;
sc.after = DRBD_AFTER_DEF;
sc.al_extents = DRBD_AL_EXTENTS_DEF;
- sc.dp_volume = DRBD_DP_VOLUME_DEF;
- sc.dp_interval = DRBD_DP_INTERVAL_DEF;
- sc.throttle_th = DRBD_RS_THROTTLE_TH_DEF;
- sc.hold_off_th = DRBD_RS_HOLD_OFF_TH_DEF;
} else
memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index d0f1767ea4c..be3374b6846 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -73,21 +73,14 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10);
/* if more than 1 GB display in MB */
if (mdev->rs_total > 0x100000L)
- seq_printf(seq, "(%lu/%lu)M",
+ seq_printf(seq, "(%lu/%lu)M\n\t",
(unsigned long) Bit2KB(rs_left >> 10),
(unsigned long) Bit2KB(mdev->rs_total >> 10));
else
- seq_printf(seq, "(%lu/%lu)K",
+ seq_printf(seq, "(%lu/%lu)K\n\t",
(unsigned long) Bit2KB(rs_left),
(unsigned long) Bit2KB(mdev->rs_total));
- if (mdev->state.conn == C_SYNC_TARGET)
- seq_printf(seq, " queue_delay: %d.%d ms\n\t",
- mdev->data_delay / 1000,
- (mdev->data_delay % 1000) / 100);
- else if (mdev->state.conn == C_SYNC_SOURCE)
- seq_printf(seq, " delay_probe: %u\n\t", mdev->delay_seq);
-
/* see drivers/md/md.c
* We do not want to overflow, so the order of operands and
* the * 100 / 100 trick are important. We do a +1 to be
@@ -135,14 +128,6 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
else
seq_printf(seq, " (%ld)", dbdt);
- if (mdev->state.conn == C_SYNC_TARGET) {
- if (mdev->c_sync_rate > 1000)
- seq_printf(seq, " want: %d,%03d",
- mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000);
- else
- seq_printf(seq, " want: %d", mdev->c_sync_rate);
- }
-
seq_printf(seq, " K/sec\n");
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index ec1711f7c5c..081522d3c74 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1180,7 +1180,7 @@ next_bio:
bio->bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev;
/* we special case some flags in the multi-bio case, see below
- * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */
+ * (REQ_UNPLUG, REQ_HARDBARRIER) */
bio->bi_rw = rw;
bio->bi_private = e;
bio->bi_end_io = drbd_endio_sec;
@@ -1209,16 +1209,16 @@ next_bio:
bios = bios->bi_next;
bio->bi_next = NULL;
- /* strip off BIO_RW_UNPLUG unless it is the last bio */
+ /* strip off REQ_UNPLUG unless it is the last bio */
if (bios)
- bio->bi_rw &= ~(1<<BIO_RW_UNPLUG);
+ bio->bi_rw &= ~REQ_UNPLUG;
drbd_generic_make_request(mdev, fault_type, bio);
- /* strip off BIO_RW_BARRIER,
+ /* strip off REQ_HARDBARRIER,
* unless it is the first or last bio */
if (bios && bios->bi_next)
- bios->bi_rw &= ~(1<<BIO_RW_BARRIER);
+ bios->bi_rw &= ~REQ_HARDBARRIER;
} while (bios);
maybe_kick_lo(mdev);
return 0;
@@ -1233,7 +1233,7 @@ fail:
}
/**
- * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
+ * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
* @mdev: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways (unused in this callback)
@@ -1245,7 +1245,7 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
(and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
so that we can finish that epoch in drbd_may_finish_epoch().
That is necessary if we already have a long chain of Epochs, before
- we realize that BIO_RW_BARRIER is actually not supported */
+ we realize that REQ_HARDBARRIER is actually not supported */
/* As long as the -ENOTSUPP on the barrier is reported immediately
that will never trigger. If it is reported late, we will just
@@ -1824,14 +1824,14 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
if (epoch == e->epoch) {
set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
- rw |= (1<<BIO_RW_BARRIER);
+ rw |= REQ_HARDBARRIER;
e->flags |= EE_IS_BARRIER;
} else {
if (atomic_read(&epoch->epoch_size) > 1 ||
!test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
- rw |= (1<<BIO_RW_BARRIER);
+ rw |= REQ_HARDBARRIER;
e->flags |= EE_IS_BARRIER;
}
}
@@ -1841,10 +1841,10 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
dp_flags = be32_to_cpu(p->dp_flags);
if (dp_flags & DP_HARDBARRIER) {
dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
- /* rw |= (1<<BIO_RW_BARRIER); */
+ /* rw |= REQ_HARDBARRIER; */
}
if (dp_flags & DP_RW_SYNC)
- rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
+ rw |= REQ_SYNC | REQ_UNPLUG;
if (dp_flags & DP_MAY_SET_IN_SYNC)
e->flags |= EE_MAY_SET_IN_SYNC;
@@ -3555,14 +3555,15 @@ static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
return ok;
}
-static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
+static int receive_skip_(struct drbd_conf *mdev, struct p_header *h, int silent)
{
/* TODO zero copy sink :) */
static char sink[128];
int size, want, r;
- dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
- h->command, h->length);
+ if (!silent)
+ dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
+ h->command, h->length);
size = h->length;
while (size > 0) {
@@ -3574,101 +3575,25 @@ static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
return size == 0;
}
-static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
-{
- if (mdev->state.disk >= D_INCONSISTENT)
- drbd_kick_lo(mdev);
-
- /* Make sure we've acked all the TCP data associated
- * with the data requests being unplugged */
- drbd_tcp_quickack(mdev->data.socket);
-
- return TRUE;
-}
-
-static void timeval_sub_us(struct timeval* tv, unsigned int us)
+static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
{
- tv->tv_sec -= us / 1000000;
- us = us % 1000000;
- if (tv->tv_usec > us) {
- tv->tv_usec += 1000000;
- tv->tv_sec--;
- }
- tv->tv_usec -= us;
+ return receive_skip_(mdev, h, 0);
}
-static void got_delay_probe(struct drbd_conf *mdev, int from, struct p_delay_probe *p)
+static int receive_skip_silent(struct drbd_conf *mdev, struct p_header *h)
{
- struct delay_probe *dp;
- struct list_head *le;
- struct timeval now;
- int seq_num;
- int offset;
- int data_delay;
-
- seq_num = be32_to_cpu(p->seq_num);
- offset = be32_to_cpu(p->offset);
-
- spin_lock(&mdev->peer_seq_lock);
- if (!list_empty(&mdev->delay_probes)) {
- if (from == USE_DATA_SOCKET)
- le = mdev->delay_probes.next;
- else
- le = mdev->delay_probes.prev;
-
- dp = list_entry(le, struct delay_probe, list);
-
- if (dp->seq_num == seq_num) {
- list_del(le);
- spin_unlock(&mdev->peer_seq_lock);
- do_gettimeofday(&now);
- timeval_sub_us(&now, offset);
- data_delay =
- now.tv_usec - dp->time.tv_usec +
- (now.tv_sec - dp->time.tv_sec) * 1000000;
-
- if (data_delay > 0)
- mdev->data_delay = data_delay;
-
- kfree(dp);
- return;
- }
-
- if (dp->seq_num > seq_num) {
- spin_unlock(&mdev->peer_seq_lock);
- dev_warn(DEV, "Previous allocation failure of struct delay_probe?\n");
- return; /* Do not alloca a struct delay_probe.... */
- }
- }
- spin_unlock(&mdev->peer_seq_lock);
-
- dp = kmalloc(sizeof(struct delay_probe), GFP_NOIO);
- if (!dp) {
- dev_warn(DEV, "Failed to allocate a struct delay_probe, do not worry.\n");
- return;
- }
-
- dp->seq_num = seq_num;
- do_gettimeofday(&dp->time);
- timeval_sub_us(&dp->time, offset);
-
- spin_lock(&mdev->peer_seq_lock);
- if (from == USE_DATA_SOCKET)
- list_add(&dp->list, &mdev->delay_probes);
- else
- list_add_tail(&dp->list, &mdev->delay_probes);
- spin_unlock(&mdev->peer_seq_lock);
+ return receive_skip_(mdev, h, 1);
}
-static int receive_delay_probe(struct drbd_conf *mdev, struct p_header *h)
+static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
{
- struct p_delay_probe *p = (struct p_delay_probe *)h;
+ if (mdev->state.disk >= D_INCONSISTENT)
+ drbd_kick_lo(mdev);
- ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
- if (drbd_recv(mdev, h->payload, h->length) != h->length)
- return FALSE;
+ /* Make sure we've acked all the TCP data associated
+ * with the data requests being unplugged */
+ drbd_tcp_quickack(mdev->data.socket);
- got_delay_probe(mdev, USE_DATA_SOCKET, p);
return TRUE;
}
@@ -3695,7 +3620,7 @@ static drbd_cmd_handler_f drbd_default_handler[] = {
[P_OV_REQUEST] = receive_DataRequest,
[P_OV_REPLY] = receive_DataRequest,
[P_CSUM_RS_REQUEST] = receive_DataRequest,
- [P_DELAY_PROBE] = receive_delay_probe,
+ [P_DELAY_PROBE] = receive_skip_silent,
/* anything missing from this table is in
* the asender_tbl, see get_asender_cmd */
[P_MAX_CMD] = NULL,
@@ -4472,11 +4397,9 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
return TRUE;
}
-static int got_delay_probe_m(struct drbd_conf *mdev, struct p_header *h)
+static int got_something_to_ignore_m(struct drbd_conf *mdev, struct p_header *h)
{
- struct p_delay_probe *p = (struct p_delay_probe *)h;
-
- got_delay_probe(mdev, USE_META_SOCKET, p);
+ /* IGNORE */
return TRUE;
}
@@ -4504,7 +4427,7 @@ static struct asender_cmd *get_asender_cmd(int cmd)
[P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
[P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
- [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_delay_probe_m },
+ [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_something_to_ignore_m },
[P_MAX_CMD] = { 0, NULL },
};
if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 654f1ef5cbb..f761d98a4e9 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -997,7 +997,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
* because of those XXX, this is not yet enabled,
* i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
*/
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags))) {
+ if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
/* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
bio_endio(bio, -EOPNOTSUPP);
return 0;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index b623ceee2a4..ca4a16cea2d 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -424,18 +424,6 @@ void resync_timer_fn(unsigned long data)
drbd_queue_work(&mdev->data.work, &mdev->resync_work);
}
-static int calc_resync_rate(struct drbd_conf *mdev)
-{
- int d = mdev->data_delay / 1000; /* us -> ms */
- int td = mdev->sync_conf.throttle_th * 100; /* 0.1s -> ms */
- int hd = mdev->sync_conf.hold_off_th * 100; /* 0.1s -> ms */
- int cr = mdev->sync_conf.rate;
-
- return d <= td ? cr :
- d >= hd ? 0 :
- cr + (cr * (td - d) / (hd - td));
-}
-
int w_make_resync_request(struct drbd_conf *mdev,
struct drbd_work *w, int cancel)
{
@@ -473,8 +461,7 @@ int w_make_resync_request(struct drbd_conf *mdev,
max_segment_size = mdev->agreed_pro_version < 94 ?
queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE;
- mdev->c_sync_rate = calc_resync_rate(mdev);
- number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
+ number = SLEEP_TIME * mdev->sync_conf.rate / ((BM_BLOCK_SIZE / 1024) * HZ);
pe = atomic_read(&mdev->rs_pending_cnt);
mutex_lock(&mdev->data.mutex);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 90c4038702d..cf04c1b234e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -178,6 +178,7 @@ static int print_unex = 1;
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/bio.h>
+#include <linux/smp_lock.h>
#include <linux/string.h>
#include <linux/jiffies.h>
#include <linux/fcntl.h>
@@ -514,8 +515,6 @@ static unsigned long fdc_busy;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_WAIT_QUEUE_HEAD(command_done);
-#define NO_SIGNAL (!interruptible || !signal_pending(current))
-
/* Errors during formatting are counted here. */
static int format_errors;
@@ -539,7 +538,7 @@ static int max_buffer_sectors;
static int *errors;
typedef void (*done_f)(int);
-static struct cont_t {
+static const struct cont_t {
void (*interrupt)(void);
/* this is called after the interrupt of the
* main command */
@@ -578,7 +577,7 @@ static void reset_fdc(void);
#define NEED_1_RECAL -2
#define NEED_2_RECAL -3
-static int usage_count;
+static atomic_t usage_count = ATOMIC_INIT(0);
/* buffer related variables */
static int buffer_track = -1;
@@ -858,36 +857,15 @@ static void set_fdc(int drive)
}
/* locks the driver */
-static int _lock_fdc(int drive, bool interruptible, int line)
+static int lock_fdc(int drive, bool interruptible)
{
- if (!usage_count) {
- pr_err("Trying to lock fdc while usage count=0 at line %d\n",
- line);
+ if (WARN(atomic_read(&usage_count) == 0,
+ "Trying to lock fdc while usage count=0\n"))
return -1;
- }
-
- if (test_and_set_bit(0, &fdc_busy)) {
- DECLARE_WAITQUEUE(wait, current);
- add_wait_queue(&fdc_wait, &wait);
-
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (!test_and_set_bit(0, &fdc_busy))
- break;
- schedule();
-
- if (!NO_SIGNAL) {
- remove_wait_queue(&fdc_wait, &wait);
- return -EINTR;
- }
- }
+ if (wait_event_interruptible(fdc_wait, !test_and_set_bit(0, &fdc_busy)))
+ return -EINTR;
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&fdc_wait, &wait);
- flush_scheduled_work();
- }
command_status = FD_COMMAND_NONE;
__reschedule_timeout(drive, "lock fdc");
@@ -895,11 +873,8 @@ static int _lock_fdc(int drive, bool interruptible, int line)
return 0;
}
-#define lock_fdc(drive, interruptible) \
- _lock_fdc(drive, interruptible, __LINE__)
-
/* unlocks the driver */
-static inline void unlock_fdc(void)
+static void unlock_fdc(void)
{
unsigned long flags;
@@ -1224,7 +1199,7 @@ static int need_more_output(void)
/* Set perpendicular mode as required, based on data rate, if supported.
* 82077 Now tested. 1Mbps data rate only possible with 82077-1.
*/
-static inline void perpendicular_mode(void)
+static void perpendicular_mode(void)
{
unsigned char perp_mode;
@@ -1995,14 +1970,14 @@ static void do_wakeup(void)
wake_up(&command_done);
}
-static struct cont_t wakeup_cont = {
+static const struct cont_t wakeup_cont = {
.interrupt = empty,
.redo = do_wakeup,
.error = empty,
.done = (done_f)empty
};
-static struct cont_t intr_cont = {
+static const struct cont_t intr_cont = {
.interrupt = empty,
.redo = process_fd_request,
.error = empty,
@@ -2015,25 +1990,10 @@ static int wait_til_done(void (*handler)(void), bool interruptible)
schedule_bh(handler);
- if (command_status < 2 && NO_SIGNAL) {
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&command_done, &wait);
- for (;;) {
- set_current_state(interruptible ?
- TASK_INTERRUPTIBLE :
- TASK_UNINTERRUPTIBLE);
-
- if (command_status >= 2 || !NO_SIGNAL)
- break;
-
- is_alive(__func__, "");
- schedule();
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&command_done, &wait);
- }
+ if (interruptible)
+ wait_event_interruptible(command_done, command_status >= 2);
+ else
+ wait_event(command_done, command_status >= 2);
if (command_status < 2) {
cancel_activity();
@@ -2223,7 +2183,7 @@ static void redo_format(void)
debugt(__func__, "queue format request");
}
-static struct cont_t format_cont = {
+static const struct cont_t format_cont = {
.interrupt = format_interrupt,
.redo = redo_format,
.error = bad_flp_intr,
@@ -2583,10 +2543,8 @@ static int make_raw_rw_request(void)
int tracksize;
int ssize;
- if (max_buffer_sectors == 0) {
- pr_info("VFS: Block I/O scheduled on unopened device\n");
+ if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n"))
return 0;
- }
set_fdc((long)current_req->rq_disk->private_data);
@@ -2921,7 +2879,7 @@ do_request:
return;
}
-static struct cont_t rw_cont = {
+static const struct cont_t rw_cont = {
.interrupt = rw_interrupt,
.redo = redo_fd_request,
.error = bad_flp_intr,
@@ -2936,19 +2894,16 @@ static void process_fd_request(void)
static void do_fd_request(struct request_queue *q)
{
- if (max_buffer_sectors == 0) {
- pr_info("VFS: %s called on non-open device\n", __func__);
+ if (WARN(max_buffer_sectors == 0,
+ "VFS: %s called on non-open device\n", __func__))
return;
- }
- if (usage_count == 0) {
- pr_info("warning: usage count=0, current_req=%p exiting\n",
- current_req);
- pr_info("sect=%ld type=%x flags=%x\n",
- (long)blk_rq_pos(current_req), current_req->cmd_type,
- current_req->cmd_flags);
+ if (WARN(atomic_read(&usage_count) == 0,
+ "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%x\n",
+ current_req, (long)blk_rq_pos(current_req), current_req->cmd_type,
+ current_req->cmd_flags))
return;
- }
+
if (test_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
@@ -2960,7 +2915,7 @@ static void do_fd_request(struct request_queue *q)
is_alive(__func__, "");
}
-static struct cont_t poll_cont = {
+static const struct cont_t poll_cont = {
.interrupt = success_and_wakeup,
.redo = floppy_ready,
.error = generic_failure,
@@ -2991,7 +2946,7 @@ static void reset_intr(void)
pr_info("weird, reset interrupt called\n");
}
-static struct cont_t reset_cont = {
+static const struct cont_t reset_cont = {
.interrupt = reset_intr,
.redo = success_and_wakeup,
.error = generic_failure,
@@ -3033,7 +2988,7 @@ static inline int fd_copyin(void __user *param, void *address,
return copy_from_user(address, param, size) ? -EFAULT : 0;
}
-static inline const char *drive_name(int type, int drive)
+static const char *drive_name(int type, int drive)
{
struct floppy_struct *floppy;
@@ -3096,14 +3051,14 @@ static void raw_cmd_done(int flag)
generic_done(flag);
}
-static struct cont_t raw_cmd_cont = {
+static const struct cont_t raw_cmd_cont = {
.interrupt = success_and_wakeup,
.redo = floppy_start,
.error = generic_failure,
.done = raw_cmd_done
};
-static inline int raw_cmd_copyout(int cmd, void __user *param,
+static int raw_cmd_copyout(int cmd, void __user *param,
struct floppy_raw_cmd *ptr)
{
int ret;
@@ -3148,7 +3103,7 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr)
}
}
-static inline int raw_cmd_copyin(int cmd, void __user *param,
+static int raw_cmd_copyin(int cmd, void __user *param,
struct floppy_raw_cmd **rcmd)
{
struct floppy_raw_cmd *ptr;
@@ -3266,7 +3221,7 @@ static int invalidate_drive(struct block_device *bdev)
return 0;
}
-static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
+static int set_geometry(unsigned int cmd, struct floppy_struct *g,
int drive, int type, struct block_device *bdev)
{
int cnt;
@@ -3337,7 +3292,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
}
/* handle obsolete ioctl's */
-static int ioctl_table[] = {
+static unsigned int ioctl_table[] = {
FDCLRPRM,
FDSETPRM,
FDDEFPRM,
@@ -3365,7 +3320,7 @@ static int ioctl_table[] = {
FDTWADDLE
};
-static inline int normalize_ioctl(int *cmd, int *size)
+static int normalize_ioctl(unsigned int *cmd, int *size)
{
int i;
@@ -3417,7 +3372,7 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
unsigned long param)
{
int drive = (long)bdev->bd_disk->private_data;
@@ -3593,6 +3548,18 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
return 0;
}
+static int fd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long param)
+{
+ int ret;
+
+ lock_kernel();
+ ret = fd_locked_ioctl(bdev, mode, cmd, param);
+ unlock_kernel();
+
+ return ret;
+}
+
static void __init config_types(void)
{
bool has_drive = false;
@@ -3649,6 +3616,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
{
int drive = (long)disk->private_data;
+ lock_kernel();
mutex_lock(&open_lock);
if (UDRS->fd_ref < 0)
UDRS->fd_ref = 0;
@@ -3659,6 +3627,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
if (!UDRS->fd_ref)
opened_bdev[drive] = NULL;
mutex_unlock(&open_lock);
+ unlock_kernel();
return 0;
}
@@ -3676,6 +3645,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
int res = -EBUSY;
char *tmp;
+ lock_kernel();
mutex_lock(&open_lock);
old_dev = UDRS->fd_device;
if (opened_bdev[drive] && opened_bdev[drive] != bdev)
@@ -3752,6 +3722,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
goto out;
}
mutex_unlock(&open_lock);
+ unlock_kernel();
return 0;
out:
if (UDRS->fd_ref < 0)
@@ -3762,6 +3733,7 @@ out:
opened_bdev[drive] = NULL;
out2:
mutex_unlock(&open_lock);
+ unlock_kernel();
return res;
}
@@ -3829,6 +3801,7 @@ static int __floppy_read_block_0(struct block_device *bdev)
bio.bi_size = size;
bio.bi_bdev = bdev;
bio.bi_sector = 0;
+ bio.bi_flags = BIO_QUIET;
init_completion(&complete);
bio.bi_private = &complete;
bio.bi_end_io = floppy_rb0_complete;
@@ -3857,10 +3830,10 @@ static int floppy_revalidate(struct gendisk *disk)
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
test_bit(drive, &fake_change) || NO_GEOM) {
- if (usage_count == 0) {
- pr_info("VFS: revalidate called on non-open device.\n");
+ if (WARN(atomic_read(&usage_count) == 0,
+ "VFS: revalidate called on non-open device.\n"))
return -EFAULT;
- }
+
lock_fdc(drive, false);
cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags));
@@ -3893,7 +3866,7 @@ static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
- .locked_ioctl = fd_ioctl,
+ .ioctl = fd_ioctl,
.getgeo = fd_getgeo,
.media_changed = check_floppy_change,
.revalidate_disk = floppy_revalidate,
@@ -4126,7 +4099,7 @@ static ssize_t floppy_cmos_show(struct device *dev,
return sprintf(buf, "%X\n", UDP->cmos);
}
-DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL);
+static DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL);
static void floppy_device_release(struct device *dev)
{
@@ -4175,6 +4148,9 @@ static int __init floppy_init(void)
int i, unit, drive;
int err, dr;
+ set_debugt();
+ interruptjiffies = resultjiffies = jiffies;
+
#if defined(CONFIG_PPC)
if (check_legacy_ioport(FDC1))
return -ENODEV;
@@ -4353,7 +4329,7 @@ out_unreg_platform_dev:
platform_device_unregister(&floppy_device[drive]);
out_flush_work:
flush_scheduled_work();
- if (usage_count)
+ if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
out_unreg_region:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
@@ -4370,8 +4346,6 @@ out_put_disk:
return err;
}
-static DEFINE_SPINLOCK(floppy_usage_lock);
-
static const struct io_region {
int offset;
int size;
@@ -4417,14 +4391,8 @@ static void floppy_release_regions(int fdc)
static int floppy_grab_irq_and_dma(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&floppy_usage_lock, flags);
- if (usage_count++) {
- spin_unlock_irqrestore(&floppy_usage_lock, flags);
+ if (atomic_inc_return(&usage_count) > 1)
return 0;
- }
- spin_unlock_irqrestore(&floppy_usage_lock, flags);
/*
* We might have scheduled a free_irq(), wait it to
@@ -4435,9 +4403,7 @@ static int floppy_grab_irq_and_dma(void)
if (fd_request_irq()) {
DPRINT("Unable to grab IRQ%d for the floppy driver\n",
FLOPPY_IRQ);
- spin_lock_irqsave(&floppy_usage_lock, flags);
- usage_count--;
- spin_unlock_irqrestore(&floppy_usage_lock, flags);
+ atomic_dec(&usage_count);
return -1;
}
if (fd_request_dma()) {
@@ -4447,9 +4413,7 @@ static int floppy_grab_irq_and_dma(void)
use_virtual_dma = can_use_virtual_dma = 1;
if (!(can_use_virtual_dma & 1)) {
fd_free_irq();
- spin_lock_irqsave(&floppy_usage_lock, flags);
- usage_count--;
- spin_unlock_irqrestore(&floppy_usage_lock, flags);
+ atomic_dec(&usage_count);
return -1;
}
}
@@ -4484,9 +4448,7 @@ cleanup:
fd_free_dma();
while (--fdc >= 0)
floppy_release_regions(fdc);
- spin_lock_irqsave(&floppy_usage_lock, flags);
- usage_count--;
- spin_unlock_irqrestore(&floppy_usage_lock, flags);
+ atomic_dec(&usage_count);
return -1;
}
@@ -4498,14 +4460,10 @@ static void floppy_release_irq_and_dma(void)
#endif
long tmpsize;
unsigned long tmpaddr;
- unsigned long flags;
- spin_lock_irqsave(&floppy_usage_lock, flags);
- if (--usage_count) {
- spin_unlock_irqrestore(&floppy_usage_lock, flags);
+ if (!atomic_dec_and_test(&usage_count))
return;
- }
- spin_unlock_irqrestore(&floppy_usage_lock, flags);
+
if (irqdma_allocated) {
fd_disable_dma();
fd_free_dma();
@@ -4598,7 +4556,7 @@ static void __exit floppy_module_exit(void)
del_timer_sync(&fd_timer);
blk_cleanup_queue(floppy_queue);
- if (usage_count)
+ if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
/* eject disk, if any */
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index 81c78b3ce2d..30ec6b37424 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -627,7 +627,7 @@ repeat:
req_data_dir(req) == READ ? "read" : "writ",
cyl, head, sec, nsect, req->buffer);
#endif
- if (blk_fs_request(req)) {
+ if (req->cmd_type == REQ_TYPE_FS) {
switch (rq_data_dir(req)) {
case READ:
hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 6120922f459..91797bbbe70 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -67,6 +67,7 @@
#include <linux/compat.h>
#include <linux/suspend.h>
#include <linux/freezer.h>
+#include <linux/smp_lock.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h> /* for invalidate_bdev() */
#include <linux/completion.h>
@@ -476,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
if (bio_rw(bio) == WRITE) {
- bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
+ bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER);
struct file *file = lo->lo_backing_file;
if (barrier) {
@@ -831,7 +832,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->lo_queue->unplug_fn = loop_unplug;
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
- blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN, NULL);
+ blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN);
set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << 9);
@@ -1408,9 +1409,11 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
{
struct loop_device *lo = bdev->bd_disk->private_data;
+ lock_kernel();
mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
mutex_unlock(&lo->lo_ctl_mutex);
+ unlock_kernel();
return 0;
}
@@ -1420,6 +1423,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
struct loop_device *lo = disk->private_data;
int err;
+ lock_kernel();
mutex_lock(&lo->lo_ctl_mutex);
if (--lo->lo_refcnt)
@@ -1444,6 +1448,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
out:
mutex_unlock(&lo->lo_ctl_mutex);
out_unlocked:
+ lock_kernel();
return 0;
}
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 28db925dbda..76fa3deaee8 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -670,7 +670,7 @@ static void mg_request_poll(struct request_queue *q)
break;
}
- if (unlikely(!blk_fs_request(host->req))) {
+ if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) {
mg_end_request_cur(host, -EIO);
continue;
}
@@ -756,7 +756,7 @@ static void mg_request(struct request_queue *q)
continue;
}
- if (unlikely(!blk_fs_request(req))) {
+ if (unlikely(req->cmd_type != REQ_TYPE_FS)) {
mg_end_request_cur(host, -EIO);
continue;
}
@@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev)
host->breq->queuedata = host;
/* mflash is random device, thanx for the noop */
- elevator_exit(host->breq->elevator);
- err = elevator_init(host->breq, "noop");
+ err = elevator_change(host->breq, "noop");
if (err) {
printk(KERN_ERR "%s:%d (elevator_init) fail\n",
__func__, __LINE__);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 16c3c8613cd..0daa422aa28 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -24,6 +24,7 @@
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/ioctl.h>
+#include <linux/smp_lock.h>
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/kernel.h>
@@ -448,7 +449,7 @@ static void nbd_clear_que(struct nbd_device *lo)
static void nbd_handle_req(struct nbd_device *lo, struct request *req)
{
- if (!blk_fs_request(req))
+ if (req->cmd_type != REQ_TYPE_FS)
goto error_out;
nbd_cmd(req) = NBD_CMD_READ;
@@ -716,9 +717,11 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
+ lock_kernel();
mutex_lock(&lo->tx_lock);
error = __nbd_ioctl(bdev, lo, cmd, arg);
mutex_unlock(&lo->tx_lock);
+ unlock_kernel();
return error;
}
@@ -726,7 +729,7 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
static const struct block_device_operations nbd_fops =
{
.owner = THIS_MODULE,
- .locked_ioctl = nbd_ioctl,
+ .ioctl = nbd_ioctl,
};
/*
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 6cd8b705b11..2284b4f05c6 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -310,7 +310,8 @@ static void osdblk_rq_fn(struct request_queue *q)
break;
/* filter out block requests we don't understand */
- if (!blk_fs_request(rq) && !blk_barrier_rq(rq)) {
+ if (rq->cmd_type != REQ_TYPE_FS &&
+ !(rq->cmd_flags & REQ_HARDBARRIER)) {
blk_end_request_all(rq, 0);
continue;
}
@@ -322,7 +323,7 @@ static void osdblk_rq_fn(struct request_queue *q)
* driver-specific, etc.
*/
- do_flush = (rq->special == (void *) 0xdeadbeefUL);
+ do_flush = rq->cmd_flags & REQ_FLUSH;
do_write = (rq_data_dir(rq) == WRITE);
if (!do_flush) { /* osd_flush does not use a bio */
@@ -379,14 +380,6 @@ static void osdblk_rq_fn(struct request_queue *q)
}
}
-static void osdblk_prepare_flush(struct request_queue *q, struct request *rq)
-{
- /* add driver-specific marker, to indicate that this request
- * is a flush command
- */
- rq->special = (void *) 0xdeadbeefUL;
-}
-
static void osdblk_free_disk(struct osdblk_device *osdev)
{
struct gendisk *disk = osdev->disk;
@@ -446,7 +439,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
blk_queue_prep_rq(q, blk_queue_start_tag);
- blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, osdblk_prepare_flush);
+ blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
disk->queue = q;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 71acf4e5335..76f8565e1e8 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -138,6 +138,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
#include <linux/cdrom.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
static DEFINE_SPINLOCK(pcd_lock);
@@ -224,13 +225,21 @@ static char *pcd_buf; /* buffer for request in progress */
static int pcd_block_open(struct block_device *bdev, fmode_t mode)
{
struct pcd_unit *cd = bdev->bd_disk->private_data;
- return cdrom_open(&cd->info, bdev, mode);
+ int ret;
+
+ lock_kernel();
+ ret = cdrom_open(&cd->info, bdev, mode);
+ unlock_kernel();
+
+ return ret;
}
static int pcd_block_release(struct gendisk *disk, fmode_t mode)
{
struct pcd_unit *cd = disk->private_data;
+ lock_kernel();
cdrom_release(&cd->info, mode);
+ unlock_kernel();
return 0;
}
@@ -238,7 +247,13 @@ static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
struct pcd_unit *cd = bdev->bd_disk->private_data;
- return cdrom_ioctl(&cd->info, bdev, mode, cmd, arg);
+ int ret;
+
+ lock_kernel();
+ ret = cdrom_ioctl(&cd->info, bdev, mode, cmd, arg);
+ unlock_kernel();
+
+ return ret;
}
static int pcd_block_media_changed(struct gendisk *disk)
@@ -251,7 +266,7 @@ static const struct block_device_operations pcd_bdops = {
.owner = THIS_MODULE,
.open = pcd_block_open,
.release = pcd_block_release,
- .locked_ioctl = pcd_block_ioctl,
+ .ioctl = pcd_block_ioctl,
.media_changed = pcd_block_media_changed,
};
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index c1e5cd029b2..985f0d4f1d1 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -153,6 +153,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/kernel.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <linux/workqueue.h>
@@ -439,7 +440,7 @@ static char *pd_buf; /* buffer for request in progress */
static enum action do_pd_io_start(void)
{
- if (blk_special_request(pd_req)) {
+ if (pd_req->cmd_type == REQ_TYPE_SPECIAL) {
phase = pd_special;
return pd_special();
}
@@ -735,12 +736,14 @@ static int pd_open(struct block_device *bdev, fmode_t mode)
{
struct pd_unit *disk = bdev->bd_disk->private_data;
+ lock_kernel();
disk->access++;
if (disk->removable) {
pd_special_command(disk, pd_media_check);
pd_special_command(disk, pd_door_lock);
}
+ unlock_kernel();
return 0;
}
@@ -768,8 +771,10 @@ static int pd_ioctl(struct block_device *bdev, fmode_t mode,
switch (cmd) {
case CDROMEJECT:
+ lock_kernel();
if (disk->access == 1)
pd_special_command(disk, pd_eject);
+ unlock_kernel();
return 0;
default:
return -EINVAL;
@@ -780,8 +785,10 @@ static int pd_release(struct gendisk *p, fmode_t mode)
{
struct pd_unit *disk = p->private_data;
+ lock_kernel();
if (!--disk->access && disk->removable)
pd_special_command(disk, pd_door_unlock);
+ unlock_kernel();
return 0;
}
@@ -812,7 +819,7 @@ static const struct block_device_operations pd_fops = {
.owner = THIS_MODULE,
.open = pd_open,
.release = pd_release,
- .locked_ioctl = pd_ioctl,
+ .ioctl = pd_ioctl,
.getgeo = pd_getgeo,
.media_changed = pd_check_media,
.revalidate_disk= pd_revalidate
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index c059aab3006..4457b494882 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -152,6 +152,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
static DEFINE_SPINLOCK(pf_spin_lock);
@@ -266,7 +267,7 @@ static const struct block_device_operations pf_fops = {
.owner = THIS_MODULE,
.open = pf_open,
.release = pf_release,
- .locked_ioctl = pf_ioctl,
+ .ioctl = pf_ioctl,
.getgeo = pf_getgeo,
.media_changed = pf_check_media,
};
@@ -299,20 +300,26 @@ static void __init pf_init_units(void)
static int pf_open(struct block_device *bdev, fmode_t mode)
{
struct pf_unit *pf = bdev->bd_disk->private_data;
+ int ret;
+ lock_kernel();
pf_identify(pf);
+ ret = -ENODEV;
if (pf->media_status == PF_NM)
- return -ENODEV;
+ goto out;
+ ret = -EROFS;
if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE))
- return -EROFS;
+ goto out;
+ ret = 0;
pf->access++;
if (pf->removable)
pf_lock(pf, 1);
-
- return 0;
+out:
+ unlock_kernel();
+ return ret;
}
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -342,7 +349,10 @@ static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, u
if (pf->access != 1)
return -EBUSY;
+ lock_kernel();
pf_eject(pf);
+ unlock_kernel();
+
return 0;
}
@@ -350,14 +360,18 @@ static int pf_release(struct gendisk *disk, fmode_t mode)
{
struct pf_unit *pf = disk->private_data;
- if (pf->access <= 0)
+ lock_kernel();
+ if (pf->access <= 0) {
+ unlock_kernel();
return -EINVAL;
+ }
pf->access--;
if (!pf->access && pf->removable)
pf_lock(pf, 0);
+ unlock_kernel();
return 0;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 8a549db2aa7..37a2bb59507 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -57,6 +57,7 @@
#include <linux/seq_file.h>
#include <linux/miscdevice.h>
#include <linux/freezer.h>
+#include <linux/smp_lock.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <scsi/scsi_cmnd.h>
@@ -1221,7 +1222,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
pkt->bio->bi_flags = 1 << BIO_UPTODATE;
pkt->bio->bi_idx = 0;
- BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW));
+ BUG_ON(pkt->bio->bi_rw != REQ_WRITE);
BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
@@ -2368,7 +2369,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
pkt_shrink_pktlist(pd);
}
-static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
+static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
{
if (dev_minor >= MAX_WRITERS)
return NULL;
@@ -2382,6 +2383,7 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
VPRINTK(DRIVER_NAME": entering open\n");
+ lock_kernel();
mutex_lock(&ctl_mutex);
pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
if (!pd) {
@@ -2409,6 +2411,7 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
}
mutex_unlock(&ctl_mutex);
+ unlock_kernel();
return 0;
out_dec:
@@ -2416,6 +2419,7 @@ out_dec:
out:
VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
mutex_unlock(&ctl_mutex);
+ unlock_kernel();
return ret;
}
@@ -2424,6 +2428,7 @@ static int pkt_close(struct gendisk *disk, fmode_t mode)
struct pktcdvd_device *pd = disk->private_data;
int ret = 0;
+ lock_kernel();
mutex_lock(&ctl_mutex);
pd->refcnt--;
BUG_ON(pd->refcnt < 0);
@@ -2432,6 +2437,7 @@ static int pkt_close(struct gendisk *disk, fmode_t mode)
pkt_release_dev(pd, flush);
}
mutex_unlock(&ctl_mutex);
+ unlock_kernel();
return ret;
}
@@ -2762,10 +2768,12 @@ out_mem:
static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
{
struct pktcdvd_device *pd = bdev->bd_disk->private_data;
+ int ret;
VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+ lock_kernel();
switch (cmd) {
case CDROMEJECT:
/*
@@ -2783,14 +2791,16 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
case CDROM_LAST_WRITTEN:
case CDROM_SEND_PACKET:
case SCSI_IOCTL_SEND_COMMAND:
- return __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
+ ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
+ break;
default:
VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
- return -ENOTTY;
+ ret = -ENOTTY;
}
+ unlock_kernel();
- return 0;
+ return ret;
}
static int pkt_media_changed(struct gendisk *disk)
@@ -2812,7 +2822,7 @@ static const struct block_device_operations pktcdvd_ops = {
.owner = THIS_MODULE,
.open = pkt_open,
.release = pkt_close,
- .locked_ioctl = pkt_ioctl,
+ .ioctl = pkt_ioctl,
.media_changed = pkt_media_changed,
};
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 3b419e3fffa..e9da874d041 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -196,13 +196,12 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
while ((req = blk_fetch_request(q))) {
- if (blk_fs_request(req)) {
- if (ps3disk_submit_request_sg(dev, req))
- break;
- } else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
- req->cmd[0] == REQ_LB_OP_FLUSH) {
+ if (req->cmd_flags & REQ_FLUSH) {
if (ps3disk_submit_flush_request(dev, req))
break;
+ } else if (req->cmd_type == REQ_TYPE_FS) {
+ if (ps3disk_submit_request_sg(dev, req))
+ break;
} else {
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
__blk_end_request_all(req, -EIO);
@@ -257,8 +256,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
- if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
- req->cmd[0] == REQ_LB_OP_FLUSH) {
+ if (req->cmd_flags & REQ_FLUSH) {
read = 0;
op = "flush";
} else {
@@ -398,16 +396,6 @@ static int ps3disk_identify(struct ps3_storage_device *dev)
return 0;
}
-static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
-{
- struct ps3_storage_device *dev = q->queuedata;
-
- dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
-
- req->cmd_type = REQ_TYPE_LINUX_BLOCK;
- req->cmd[0] = REQ_LB_OP_FLUSH;
-}
-
static unsigned long ps3disk_mask;
static DEFINE_MUTEX(ps3disk_mask_mutex);
@@ -480,8 +468,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_logical_block_size(queue, dev->blk_size);
- blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
- ps3disk_prepare_flush);
+ blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH);
blk_queue_max_segments(queue, -1);
blk_queue_max_segment_size(queue, dev->bounce_size);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index e463657569f..2e46815876d 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -20,6 +20,7 @@
#include <linux/fd.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
+#include <linux/smp_lock.h>
#include <linux/hdreg.h>
#include <linux/kernel.h>
#include <linux/delay.h>
@@ -661,11 +662,23 @@ out:
return err;
}
+static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+ int ret;
+
+ lock_kernel();
+ ret = floppy_open(bdev, mode);
+ unlock_kernel();
+
+ return ret;
+}
+
static int floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim __iomem *base = fs->swd->base;
+ lock_kernel();
if (fs->ref_count < 0)
fs->ref_count = 0;
else if (fs->ref_count > 0)
@@ -673,6 +686,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
if (fs->ref_count == 0)
swim_motor(base, OFF);
+ unlock_kernel();
return 0;
}
@@ -690,7 +704,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
case FDEJECT:
if (fs->ref_count != 1)
return -EBUSY;
+ lock_kernel();
err = floppy_eject(fs);
+ unlock_kernel();
return err;
case FDGETPRM:
@@ -751,9 +767,9 @@ static int floppy_revalidate(struct gendisk *disk)
static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
- .open = floppy_open,
+ .open = floppy_unlocked_open,
.release = floppy_release,
- .locked_ioctl = floppy_ioctl,
+ .ioctl = floppy_ioctl,
.getgeo = floppy_getgeo,
.media_changed = floppy_check_change,
.revalidate_disk = floppy_revalidate,
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index ed6fb91123a..cc6a3864822 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -25,6 +25,7 @@
#include <linux/ioctl.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/io.h>
@@ -839,7 +840,7 @@ static int fd_eject(struct floppy_state *fs)
static struct floppy_struct floppy_type =
{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
-static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
+static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
struct floppy_state *fs = bdev->bd_disk->private_data;
@@ -867,6 +868,18 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
return -ENOTTY;
}
+static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long param)
+{
+ int ret;
+
+ lock_kernel();
+ ret = floppy_locked_ioctl(bdev, mode, cmd, param);
+ unlock_kernel();
+
+ return ret;
+}
+
static int floppy_open(struct block_device *bdev, fmode_t mode)
{
struct floppy_state *fs = bdev->bd_disk->private_data;
@@ -936,15 +949,28 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return 0;
}
+static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+ int ret;
+
+ lock_kernel();
+ ret = floppy_open(bdev, mode);
+ unlock_kernel();
+
+ return ret;
+}
+
static int floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
+ lock_kernel();
if (fs->ref_count > 0 && --fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
out_8(&sw->control_bic, 0xff);
swim3_select(fs, RELAX);
}
+ unlock_kernel();
return 0;
}
@@ -995,9 +1021,9 @@ static int floppy_revalidate(struct gendisk *disk)
}
static const struct block_device_operations floppy_fops = {
- .open = floppy_open,
+ .open = floppy_unlocked_open,
.release = floppy_release,
- .locked_ioctl = floppy_ioctl,
+ .ioctl = floppy_ioctl,
.media_changed = floppy_check_change,
.revalidate_disk= floppy_revalidate,
};
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 0536b5b29ad..c48e1487858 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -28,6 +28,7 @@
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <scsi/scsi.h>
#define DRV_NAME "ub"
@@ -648,7 +649,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
return 0;
}
- if (lun->changed && !blk_pc_request(rq)) {
+ if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC) {
blk_start_request(rq);
ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
return 0;
@@ -684,7 +685,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
}
urq->nsg = n_elem;
- if (blk_pc_request(rq)) {
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
ub_cmd_build_packet(sc, lun, cmd, urq);
} else {
ub_cmd_build_block(sc, lun, cmd, urq);
@@ -781,7 +782,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
rq = urq->rq;
if (cmd->error == 0) {
- if (blk_pc_request(rq)) {
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
if (cmd->act_len >= rq->resid_len)
rq->resid_len = 0;
else
@@ -795,7 +796,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
}
}
} else {
- if (blk_pc_request(rq)) {
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
/* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
rq->sense_len = UB_SENSE_SIZE;
@@ -1710,6 +1711,18 @@ err_open:
return rc;
}
+static int ub_bd_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ub_bd_open(bdev, mode);
+ unlock_kernel();
+
+ return ret;
+}
+
+
/*
*/
static int ub_bd_release(struct gendisk *disk, fmode_t mode)
@@ -1717,7 +1730,10 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
struct ub_lun *lun = disk->private_data;
struct ub_dev *sc = lun->udev;
+ lock_kernel();
ub_put(sc);
+ unlock_kernel();
+
return 0;
}
@@ -1729,8 +1745,13 @@ static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
{
struct gendisk *disk = bdev->bd_disk;
void __user *usermem = (void __user *) arg;
+ int ret;
+
+ lock_kernel();
+ ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
+ unlock_kernel();
- return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
+ return ret;
}
/*
@@ -1792,9 +1813,9 @@ static int ub_bd_media_changed(struct gendisk *disk)
static const struct block_device_operations ub_bd_fops = {
.owner = THIS_MODULE,
- .open = ub_bd_open,
+ .open = ub_bd_unlocked_open,
.release = ub_bd_release,
- .locked_ioctl = ub_bd_ioctl,
+ .ioctl = ub_bd_ioctl,
.media_changed = ub_bd_media_changed,
.revalidate_disk = ub_bd_revalidate,
};
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 2f9470ff8f7..8be57151f5d 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -478,7 +478,7 @@ static void process_page(unsigned long data)
le32_to_cpu(desc->local_addr)>>9,
le32_to_cpu(desc->transfer_size));
dump_dmastat(card, control);
- } else if (test_bit(BIO_RW, &bio->bi_rw) &&
+ } else if ((bio->bi_rw & REQ_WRITE) &&
le32_to_cpu(desc->local_addr) >> 9 ==
card->init_size) {
card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 788d93882ab..f651e51a331 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -41,6 +41,7 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/string.h>
+#include <linux/smp_lock.h>
#include <linux/dma-mapping.h>
#include <linux/completion.h>
#include <linux/device.h>
@@ -175,6 +176,18 @@ static int viodasd_open(struct block_device *bdev, fmode_t mode)
return 0;
}
+static int viodasd_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+ int ret;
+
+ lock_kernel();
+ ret = viodasd_open(bdev, mode);
+ unlock_kernel();
+
+ return ret;
+}
+
+
/*
* External release entry point.
*/
@@ -183,6 +196,7 @@ static int viodasd_release(struct gendisk *disk, fmode_t mode)
struct viodasd_device *d = disk->private_data;
HvLpEvent_Rc hvrc;
+ lock_kernel();
/* Send the event to OS/400. We DON'T expect a response */
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
@@ -195,6 +209,9 @@ static int viodasd_release(struct gendisk *disk, fmode_t mode)
0, 0, 0);
if (hvrc != 0)
pr_warning("HV close call failed %d\n", (int)hvrc);
+
+ unlock_kernel();
+
return 0;
}
@@ -219,7 +236,7 @@ static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
*/
static const struct block_device_operations viodasd_fops = {
.owner = THIS_MODULE,
- .open = viodasd_open,
+ .open = viodasd_unlocked_open,
.release = viodasd_release,
.getgeo = viodasd_getgeo,
};
@@ -361,7 +378,7 @@ static void do_viodasd_request(struct request_queue *q)
if (req == NULL)
return;
/* check that request contains a valid command */
- if (!blk_fs_request(req)) {
+ if (req->cmd_type != REQ_TYPE_FS) {
viodasd_end_request(req, -EIO, blk_rq_sectors(req));
continue;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 258bc2ae288..2aafafca2b1 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -2,6 +2,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
+#include <linux/smp_lock.h>
#include <linux/hdreg.h>
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
@@ -65,13 +66,18 @@ static void blk_done(struct virtqueue *vq)
break;
}
- if (blk_pc_request(vbr->req)) {
+ switch (vbr->req->cmd_type) {
+ case REQ_TYPE_BLOCK_PC:
vbr->req->resid_len = vbr->in_hdr.residual;
vbr->req->sense_len = vbr->in_hdr.sense_len;
vbr->req->errors = vbr->in_hdr.errors;
- }
- if (blk_special_request(vbr->req))
+ break;
+ case REQ_TYPE_SPECIAL:
vbr->req->errors = (error != 0);
+ break;
+ default:
+ break;
+ }
__blk_end_request_all(vbr->req, error);
list_del(&vbr->list);
@@ -94,36 +100,35 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
return false;
vbr->req = req;
- switch (req->cmd_type) {
- case REQ_TYPE_FS:
- vbr->out_hdr.type = 0;
- vbr->out_hdr.sector = blk_rq_pos(vbr->req);
- vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
- break;
- case REQ_TYPE_BLOCK_PC:
- vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
- vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
- break;
- case REQ_TYPE_SPECIAL:
- vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
+
+ if (req->cmd_flags & REQ_FLUSH) {
+ vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
- break;
- case REQ_TYPE_LINUX_BLOCK:
- if (req->cmd[0] == REQ_LB_OP_FLUSH) {
- vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
+ } else {
+ switch (req->cmd_type) {
+ case REQ_TYPE_FS:
+ vbr->out_hdr.type = 0;
+ vbr->out_hdr.sector = blk_rq_pos(vbr->req);
+ vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ break;
+ case REQ_TYPE_BLOCK_PC:
+ vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
+ vbr->out_hdr.sector = 0;
+ vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ break;
+ case REQ_TYPE_SPECIAL:
+ vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
break;
+ default:
+ /* We don't put anything else in the queue. */
+ BUG();
}
- /*FALLTHRU*/
- default:
- /* We don't put anything else in the queue. */
- BUG();
}
- if (blk_barrier_rq(vbr->req))
+ if (vbr->req->cmd_flags & REQ_HARDBARRIER)
vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
@@ -134,12 +139,12 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
* block, and before the normal inhdr we put the sense data and the
* inhdr with additional status information before the normal inhdr.
*/
- if (blk_pc_request(vbr->req))
+ if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
- if (blk_pc_request(vbr->req)) {
+ if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
sizeof(vbr->in_hdr));
@@ -190,12 +195,6 @@ static void do_virtblk_request(struct request_queue *q)
virtqueue_kick(vblk->vq);
}
-static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
-{
- req->cmd_type = REQ_TYPE_LINUX_BLOCK;
- req->cmd[0] = REQ_LB_OP_FLUSH;
-}
-
/* return id (s/n) string for *disk to *id_str
*/
static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -219,22 +218,12 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
}
-static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
+static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long data)
{
struct gendisk *disk = bdev->bd_disk;
struct virtio_blk *vblk = disk->private_data;
- if (cmd == 0x56424944) { /* 'VBID' */
- void __user *usr_data = (void __user *)data;
- char id_str[VIRTIO_BLK_ID_BYTES];
- int err;
-
- err = virtblk_get_id(disk, id_str);
- if (!err && copy_to_user(usr_data, id_str, VIRTIO_BLK_ID_BYTES))
- err = -EFAULT;
- return err;
- }
/*
* Only allow the generic SCSI ioctls if the host can support it.
*/
@@ -245,6 +234,18 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
(void __user *)data);
}
+static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long param)
+{
+ int ret;
+
+ lock_kernel();
+ ret = virtblk_locked_ioctl(bdev, mode, cmd, param);
+ unlock_kernel();
+
+ return ret;
+}
+
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
@@ -271,7 +272,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
}
static const struct block_device_operations virtblk_fops = {
- .locked_ioctl = virtblk_ioctl,
+ .ioctl = virtblk_ioctl,
.owner = THIS_MODULE,
.getgeo = virtblk_getgeo,
};
@@ -281,6 +282,27 @@ static int index_to_minor(int index)
return index << PART_BITS;
}
+static ssize_t virtblk_serial_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ int err;
+
+ /* sysfs gives us a PAGE_SIZE buffer */
+ BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
+
+ buf[VIRTIO_BLK_ID_BYTES] = '\0';
+ err = virtblk_get_id(disk, buf);
+ if (!err)
+ return strlen(buf);
+
+ if (err == -EIO) /* Unsupported? Make it empty. */
+ return 0;
+
+ return err;
+}
+DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
+
static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
@@ -366,12 +388,31 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
vblk->disk->driverfs_dev = &vdev->dev;
index++;
- /* If barriers are supported, tell block layer that queue is ordered */
- if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
- blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH,
- virtblk_prepare_flush);
- else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
- blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL);
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) {
+ /*
+ * If the FLUSH feature is supported we do have support for
+ * flushing a volatile write cache on the host. Use that
+ * to implement write barrier support.
+ */
+ blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
+ } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) {
+ /*
+ * If the BARRIER feature is supported the host expects us
+ * to order request by tags. This implies there is not
+ * volatile write cache on the host, and that the host
+ * never re-orders outstanding I/O. This feature is not
+ * useful for real life scenarious and deprecated.
+ */
+ blk_queue_ordered(q, QUEUE_ORDERED_TAG);
+ } else {
+ /*
+ * If the FLUSH feature is not supported we must assume that
+ * the host does not perform any kind of volatile write
+ * caching. We still need to drain the queue to provider
+ * proper barrier semantics.
+ */
+ blk_queue_ordered(q, QUEUE_ORDERED_DRAIN);
+ }
/* If disk is read-only in the host, the guest should obey */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
@@ -445,8 +486,15 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
add_disk(vblk->disk);
+ err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
+ if (err)
+ goto out_del_disk;
+
return 0;
+out_del_disk:
+ del_gendisk(vblk->disk);
+ blk_cleanup_queue(vblk->disk->queue);
out_put_disk:
put_disk(vblk->disk);
out_mempool:
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 18a80ff57ce..d5a3cd75056 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -46,6 +46,7 @@
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/blkdev.h>
+#include <linux/smp_lock.h>
#include <linux/blkpg.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -133,7 +134,7 @@ static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static const struct block_device_operations xd_fops = {
.owner = THIS_MODULE,
- .locked_ioctl = xd_ioctl,
+ .ioctl = xd_ioctl,
.getgeo = xd_getgeo,
};
static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
@@ -322,7 +323,7 @@ static void do_xd_request (struct request_queue * q)
int res = -EIO;
int retry;
- if (!blk_fs_request(req))
+ if (req->cmd_type != REQ_TYPE_FS)
goto done;
if (block + count > get_capacity(req->rq_disk))
goto done;
@@ -347,7 +348,7 @@ static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
/* xd_ioctl: handle device ioctl's */
-static int xd_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg)
+static int xd_locked_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg)
{
switch (cmd) {
case HDIO_SET_DMA:
@@ -375,6 +376,18 @@ static int xd_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long a
}
}
+static int xd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long param)
+{
+ int ret;
+
+ lock_kernel();
+ ret = xd_locked_ioctl(bdev, mode, cmd, param);
+ unlock_kernel();
+
+ return ret;
+}
+
/* xd_readwrite: handle a read/write request */
static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count)
{
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 82ed403147c..ab735a605cf 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -41,6 +41,7 @@
#include <linux/cdrom.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <linux/scatterlist.h>
#include <xen/xen.h>
@@ -48,6 +49,7 @@
#include <xen/grant_table.h>
#include <xen/events.h>
#include <xen/page.h>
+#include <xen/platform_pci.h>
#include <xen/interface/grant_table.h>
#include <xen/interface/io/blkif.h>
@@ -78,6 +80,7 @@ static const struct block_device_operations xlvbd_block_fops;
*/
struct blkfront_info
{
+ struct mutex mutex;
struct xenbus_device *xbdev;
struct gendisk *gd;
int vdevice;
@@ -94,16 +97,14 @@ struct blkfront_info
unsigned long shadow_free;
int feature_barrier;
int is_ready;
-
- /**
- * The number of people holding this device open. We won't allow a
- * hot-unplug unless this is 0.
- */
- int users;
};
static DEFINE_SPINLOCK(blkif_io_lock);
+static unsigned int nr_minors;
+static unsigned long *minors;
+static DEFINE_SPINLOCK(minor_lock);
+
#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
(BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
#define GRANT_INVALID_REF 0
@@ -138,6 +139,55 @@ static void add_id_to_freelist(struct blkfront_info *info,
info->shadow_free = id;
}
+static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
+{
+ unsigned int end = minor + nr;
+ int rc;
+
+ if (end > nr_minors) {
+ unsigned long *bitmap, *old;
+
+ bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap),
+ GFP_KERNEL);
+ if (bitmap == NULL)
+ return -ENOMEM;
+
+ spin_lock(&minor_lock);
+ if (end > nr_minors) {
+ old = minors;
+ memcpy(bitmap, minors,
+ BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
+ minors = bitmap;
+ nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
+ } else
+ old = bitmap;
+ spin_unlock(&minor_lock);
+ kfree(old);
+ }
+
+ spin_lock(&minor_lock);
+ if (find_next_bit(minors, end, minor) >= end) {
+ for (; minor < end; ++minor)
+ __set_bit(minor, minors);
+ rc = 0;
+ } else
+ rc = -EBUSY;
+ spin_unlock(&minor_lock);
+
+ return rc;
+}
+
+static void xlbd_release_minors(unsigned int minor, unsigned int nr)
+{
+ unsigned int end = minor + nr;
+
+ BUG_ON(end > nr_minors);
+ spin_lock(&minor_lock);
+ for (; minor < end; ++minor)
+ __clear_bit(minor, minors);
+ spin_unlock(&minor_lock);
+}
+
static void blkif_restart_queue_callback(void *arg)
{
struct blkfront_info *info = (struct blkfront_info *)arg;
@@ -238,7 +288,7 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
- if (blk_barrier_rq(req))
+ if (req->cmd_flags & REQ_HARDBARRIER)
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
@@ -309,7 +359,7 @@ static void do_blkif_request(struct request_queue *rq)
blk_start_request(req);
- if (!blk_fs_request(req)) {
+ if (req->cmd_type != REQ_TYPE_FS) {
__blk_end_request_all(req, -EIO);
continue;
}
@@ -371,17 +421,22 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
static int xlvbd_barrier(struct blkfront_info *info)
{
int err;
+ const char *barrier;
- err = blk_queue_ordered(info->rq,
- info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE,
- NULL);
+ switch (info->feature_barrier) {
+ case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break;
+ case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break;
+ case QUEUE_ORDERED_NONE: barrier = "disabled"; break;
+ default: return -EINVAL;
+ }
+
+ err = blk_queue_ordered(info->rq, info->feature_barrier);
if (err)
return err;
printk(KERN_INFO "blkfront: %s: barriers %s\n",
- info->gd->disk_name,
- info->feature_barrier ? "enabled" : "disabled");
+ info->gd->disk_name, barrier);
return 0;
}
@@ -417,9 +472,14 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
if ((minor % nr_parts) == 0)
nr_minors = nr_parts;
+ err = xlbd_reserve_minors(minor, nr_minors);
+ if (err)
+ goto out;
+ err = -ENODEV;
+
gd = alloc_disk(nr_minors);
if (gd == NULL)
- goto out;
+ goto release;
offset = minor / nr_parts;
@@ -450,14 +510,13 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
if (xlvbd_init_blk_queue(gd, sector_size)) {
del_gendisk(gd);
- goto out;
+ goto release;
}
info->rq = gd->queue;
info->gd = gd;
- if (info->feature_barrier)
- xlvbd_barrier(info);
+ xlvbd_barrier(info);
if (vdisk_info & VDISK_READONLY)
set_disk_ro(gd, 1);
@@ -470,10 +529,45 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
return 0;
+ release:
+ xlbd_release_minors(minor, nr_minors);
out:
return err;
}
+static void xlvbd_release_gendisk(struct blkfront_info *info)
+{
+ unsigned int minor, nr_minors;
+ unsigned long flags;
+
+ if (info->rq == NULL)
+ return;
+
+ spin_lock_irqsave(&blkif_io_lock, flags);
+
+ /* No more blkif_request(). */
+ blk_stop_queue(info->rq);
+
+ /* No more gnttab callback work. */
+ gnttab_cancel_free_callback(&info->callback);
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+ /* Flush gnttab callback work. Must be done with no locks held. */
+ flush_scheduled_work();
+
+ del_gendisk(info->gd);
+
+ minor = info->gd->first_minor;
+ nr_minors = info->gd->minors;
+ xlbd_release_minors(minor, nr_minors);
+
+ blk_cleanup_queue(info->rq);
+ info->rq = NULL;
+
+ put_disk(info->gd);
+ info->gd = NULL;
+}
+
static void kick_pending_request_queues(struct blkfront_info *info)
{
if (!RING_FULL(&info->ring)) {
@@ -568,7 +662,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
info->gd->disk_name);
error = -EOPNOTSUPP;
- info->feature_barrier = 0;
+ info->feature_barrier = QUEUE_ORDERED_NONE;
xlvbd_barrier(info);
}
/* fall through */
@@ -651,7 +745,7 @@ fail:
/* Common code used when first setting up, and when resuming. */
-static int talk_to_backend(struct xenbus_device *dev,
+static int talk_to_blkback(struct xenbus_device *dev,
struct blkfront_info *info)
{
const char *message = NULL;
@@ -711,7 +805,6 @@ again:
return err;
}
-
/**
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffer for communication with the backend, and
@@ -737,12 +830,42 @@ static int blkfront_probe(struct xenbus_device *dev,
}
}
+ if (xen_hvm_domain()) {
+ char *type;
+ int len;
+ /* no unplug has been done: do not hook devices != xen vbds */
+ if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
+ int major;
+
+ if (!VDEV_IS_EXTENDED(vdevice))
+ major = BLKIF_MAJOR(vdevice);
+ else
+ major = XENVBD_MAJOR;
+
+ if (major != XENVBD_MAJOR) {
+ printk(KERN_INFO
+ "%s: HVM does not support vbd %d as xen block device\n",
+ __FUNCTION__, vdevice);
+ return -ENODEV;
+ }
+ }
+ /* do not create a PV cdrom device if we are an HVM guest */
+ type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
+ if (IS_ERR(type))
+ return -ENODEV;
+ if (strncmp(type, "cdrom", 5) == 0) {
+ kfree(type);
+ return -ENODEV;
+ }
+ kfree(type);
+ }
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
return -ENOMEM;
}
+ mutex_init(&info->mutex);
info->xbdev = dev;
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
@@ -756,7 +879,7 @@ static int blkfront_probe(struct xenbus_device *dev,
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
dev_set_drvdata(&dev->dev, info);
- err = talk_to_backend(dev, info);
+ err = talk_to_blkback(dev, info);
if (err) {
kfree(info);
dev_set_drvdata(&dev->dev, NULL);
@@ -851,13 +974,50 @@ static int blkfront_resume(struct xenbus_device *dev)
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
- err = talk_to_backend(dev, info);
+ err = talk_to_blkback(dev, info);
if (info->connected == BLKIF_STATE_SUSPENDED && !err)
err = blkif_recover(info);
return err;
}
+static void
+blkfront_closing(struct blkfront_info *info)
+{
+ struct xenbus_device *xbdev = info->xbdev;
+ struct block_device *bdev = NULL;
+
+ mutex_lock(&info->mutex);
+
+ if (xbdev->state == XenbusStateClosing) {
+ mutex_unlock(&info->mutex);
+ return;
+ }
+
+ if (info->gd)
+ bdev = bdget_disk(info->gd, 0);
+
+ mutex_unlock(&info->mutex);
+
+ if (!bdev) {
+ xenbus_frontend_closed(xbdev);
+ return;
+ }
+
+ mutex_lock(&bdev->bd_mutex);
+
+ if (bdev->bd_openers) {
+ xenbus_dev_error(xbdev, -EBUSY,
+ "Device in use; refusing to close");
+ xenbus_switch_state(xbdev, XenbusStateClosing);
+ } else {
+ xlvbd_release_gendisk(info);
+ xenbus_frontend_closed(xbdev);
+ }
+
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdev);
+}
/*
* Invoked when the backend is finally 'ready' (and has told produced
@@ -869,11 +1029,31 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned long sector_size;
unsigned int binfo;
int err;
-
- if ((info->connected == BLKIF_STATE_CONNECTED) ||
- (info->connected == BLKIF_STATE_SUSPENDED) )
+ int barrier;
+
+ switch (info->connected) {
+ case BLKIF_STATE_CONNECTED:
+ /*
+ * Potentially, the back-end may be signalling
+ * a capacity change; update the capacity.
+ */
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "sectors", "%Lu", &sectors);
+ if (XENBUS_EXIST_ERR(err))
+ return;
+ printk(KERN_INFO "Setting capacity to %Lu\n",
+ sectors);
+ set_capacity(info->gd, sectors);
+ revalidate_disk(info->gd);
+
+ /* fall through */
+ case BLKIF_STATE_SUSPENDED:
return;
+ default:
+ break;
+ }
+
dev_dbg(&info->xbdev->dev, "%s:%s.\n",
__func__, info->xbdev->otherend);
@@ -890,10 +1070,26 @@ static void blkfront_connect(struct blkfront_info *info)
}
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-barrier", "%lu", &info->feature_barrier,
+ "feature-barrier", "%lu", &barrier,
NULL);
+
+ /*
+ * If there's no "feature-barrier" defined, then it means
+ * we're dealing with a very old backend which writes
+ * synchronously; draining will do what needs to get done.
+ *
+ * If there are barriers, then we can do full queued writes
+ * with tagged barriers.
+ *
+ * If barriers are not supported, then there's no much we can
+ * do, so just set ordering to NONE.
+ */
if (err)
- info->feature_barrier = 0;
+ info->feature_barrier = QUEUE_ORDERED_DRAIN;
+ else if (barrier)
+ info->feature_barrier = QUEUE_ORDERED_TAG;
+ else
+ info->feature_barrier = QUEUE_ORDERED_NONE;
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
if (err) {
@@ -916,52 +1112,14 @@ static void blkfront_connect(struct blkfront_info *info)
}
/**
- * Handle the change of state of the backend to Closing. We must delete our
- * device-layer structures now, to ensure that writes are flushed through to
- * the backend. Once is this done, we can switch to Closed in
- * acknowledgement.
- */
-static void blkfront_closing(struct xenbus_device *dev)
-{
- struct blkfront_info *info = dev_get_drvdata(&dev->dev);
- unsigned long flags;
-
- dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
-
- if (info->rq == NULL)
- goto out;
-
- spin_lock_irqsave(&blkif_io_lock, flags);
-
- /* No more blkif_request(). */
- blk_stop_queue(info->rq);
-
- /* No more gnttab callback work. */
- gnttab_cancel_free_callback(&info->callback);
- spin_unlock_irqrestore(&blkif_io_lock, flags);
-
- /* Flush gnttab callback work. Must be done with no locks held. */
- flush_scheduled_work();
-
- blk_cleanup_queue(info->rq);
- info->rq = NULL;
-
- del_gendisk(info->gd);
-
- out:
- xenbus_frontend_closed(dev);
-}
-
-/**
* Callback received when the backend's state changes.
*/
-static void backend_changed(struct xenbus_device *dev,
+static void blkback_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
- struct block_device *bd;
- dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
+ dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
switch (backend_state) {
case XenbusStateInitialising:
@@ -976,35 +1134,56 @@ static void backend_changed(struct xenbus_device *dev,
break;
case XenbusStateClosing:
- if (info->gd == NULL) {
- xenbus_frontend_closed(dev);
- break;
- }
- bd = bdget_disk(info->gd, 0);
- if (bd == NULL)
- xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
-
- mutex_lock(&bd->bd_mutex);
- if (info->users > 0)
- xenbus_dev_error(dev, -EBUSY,
- "Device in use; refusing to close");
- else
- blkfront_closing(dev);
- mutex_unlock(&bd->bd_mutex);
- bdput(bd);
+ blkfront_closing(info);
break;
}
}
-static int blkfront_remove(struct xenbus_device *dev)
+static int blkfront_remove(struct xenbus_device *xbdev)
{
- struct blkfront_info *info = dev_get_drvdata(&dev->dev);
+ struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
+ struct block_device *bdev = NULL;
+ struct gendisk *disk;
- dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
+ dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
blkif_free(info, 0);
- kfree(info);
+ mutex_lock(&info->mutex);
+
+ disk = info->gd;
+ if (disk)
+ bdev = bdget_disk(disk, 0);
+
+ info->xbdev = NULL;
+ mutex_unlock(&info->mutex);
+
+ if (!bdev) {
+ kfree(info);
+ return 0;
+ }
+
+ /*
+ * The xbdev was removed before we reached the Closed
+ * state. See if it's safe to remove the disk. If the bdev
+ * isn't closed yet, we let release take care of it.
+ */
+
+ mutex_lock(&bdev->bd_mutex);
+ info = disk->private_data;
+
+ dev_warn(disk_to_dev(disk),
+ "%s was hot-unplugged, %d stale handles\n",
+ xbdev->nodename, bdev->bd_openers);
+
+ if (info && !bdev->bd_openers) {
+ xlvbd_release_gendisk(info);
+ disk->private_data = NULL;
+ kfree(info);
+ }
+
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdev);
return 0;
}
@@ -1013,30 +1192,78 @@ static int blkfront_is_ready(struct xenbus_device *dev)
{
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
- return info->is_ready;
+ return info->is_ready && info->xbdev;
}
static int blkif_open(struct block_device *bdev, fmode_t mode)
{
- struct blkfront_info *info = bdev->bd_disk->private_data;
- info->users++;
- return 0;
+ struct gendisk *disk = bdev->bd_disk;
+ struct blkfront_info *info;
+ int err = 0;
+
+ lock_kernel();
+
+ info = disk->private_data;
+ if (!info) {
+ /* xbdev gone */
+ err = -ERESTARTSYS;
+ goto out;
+ }
+
+ mutex_lock(&info->mutex);
+
+ if (!info->gd)
+ /* xbdev is closed */
+ err = -ERESTARTSYS;
+
+ mutex_unlock(&info->mutex);
+
+out:
+ unlock_kernel();
+ return err;
}
static int blkif_release(struct gendisk *disk, fmode_t mode)
{
struct blkfront_info *info = disk->private_data;
- info->users--;
- if (info->users == 0) {
- /* Check whether we have been instructed to close. We will
- have ignored this request initially, as the device was
- still mounted. */
- struct xenbus_device *dev = info->xbdev;
- enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
-
- if (state == XenbusStateClosing && info->is_ready)
- blkfront_closing(dev);
+ struct block_device *bdev;
+ struct xenbus_device *xbdev;
+
+ lock_kernel();
+
+ bdev = bdget_disk(disk, 0);
+ bdput(bdev);
+
+ if (bdev->bd_openers)
+ goto out;
+
+ /*
+ * Check if we have been instructed to close. We will have
+ * deferred this request, because the bdev was still open.
+ */
+
+ mutex_lock(&info->mutex);
+ xbdev = info->xbdev;
+
+ if (xbdev && xbdev->state == XenbusStateClosing) {
+ /* pending switch to state closed */
+ dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
+ xlvbd_release_gendisk(info);
+ xenbus_frontend_closed(info->xbdev);
+ }
+
+ mutex_unlock(&info->mutex);
+
+ if (!xbdev) {
+ /* sudden device removal */
+ dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
+ xlvbd_release_gendisk(info);
+ disk->private_data = NULL;
+ kfree(info);
}
+
+out:
+ unlock_kernel();
return 0;
}
@@ -1046,7 +1273,7 @@ static const struct block_device_operations xlvbd_block_fops =
.open = blkif_open,
.release = blkif_release,
.getgeo = blkif_getgeo,
- .locked_ioctl = blkif_ioctl,
+ .ioctl = blkif_ioctl,
};
@@ -1062,7 +1289,7 @@ static struct xenbus_driver blkfront = {
.probe = blkfront_probe,
.remove = blkfront_remove,
.resume = blkfront_resume,
- .otherend_changed = backend_changed,
+ .otherend_changed = blkback_changed,
.is_ready = blkfront_is_ready,
};
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index a7b83c0a7eb..057413bb16e 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -89,10 +89,12 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
+#include <linux/smp_lock.h>
#include <linux/ata.h>
#include <linux/hdreg.h>
#include <linux/platform_device.h>
#if defined(CONFIG_OF)
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#endif
@@ -465,7 +467,7 @@ struct request *ace_get_next_request(struct request_queue * q)
struct request *req;
while ((req = blk_peek_request(q)) != NULL) {
- if (blk_fs_request(req))
+ if (req->cmd_type == REQ_TYPE_FS)
break;
blk_start_request(req);
__blk_end_request_all(req, -EIO);
@@ -901,11 +903,14 @@ static int ace_open(struct block_device *bdev, fmode_t mode)
dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
+ lock_kernel();
spin_lock_irqsave(&ace->lock, flags);
ace->users++;
spin_unlock_irqrestore(&ace->lock, flags);
check_disk_change(bdev);
+ unlock_kernel();
+
return 0;
}
@@ -917,6 +922,7 @@ static int ace_release(struct gendisk *disk, fmode_t mode)
dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1);
+ lock_kernel();
spin_lock_irqsave(&ace->lock, flags);
ace->users--;
if (ace->users == 0) {
@@ -924,6 +930,7 @@ static int ace_release(struct gendisk *disk, fmode_t mode)
ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ);
}
spin_unlock_irqrestore(&ace->lock, flags);
+ unlock_kernel();
return 0;
}
@@ -1188,7 +1195,7 @@ static struct platform_driver ace_platform_driver = {
#if defined(CONFIG_OF)
static int __devinit
-ace_of_probe(struct of_device *op, const struct of_device_id *match)
+ace_of_probe(struct platform_device *op, const struct of_device_id *match)
{
struct resource res;
resource_size_t physaddr;
@@ -1220,7 +1227,7 @@ ace_of_probe(struct of_device *op, const struct of_device_id *match)
return ace_alloc(&op->dev, id ? *id : 0, physaddr, irq, bus_width);
}
-static int __devexit ace_of_remove(struct of_device *op)
+static int __devexit ace_of_remove(struct platform_device *op)
{
ace_free(&op->dev);
return 0;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 9114654b54d..d75b2bb601a 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/bitops.h>
+#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <asm/setup.h>
@@ -153,6 +154,7 @@ static int z2_open(struct block_device *bdev, fmode_t mode)
device = MINOR(bdev->bd_dev);
+ lock_kernel();
if ( current_device != -1 && current_device != device )
{
rc = -EBUSY;
@@ -294,20 +296,25 @@ static int z2_open(struct block_device *bdev, fmode_t mode)
set_capacity(z2ram_gendisk, z2ram_size >> 9);
}
+ unlock_kernel();
return 0;
err_out_kfree:
kfree(z2ram_map);
err_out:
+ unlock_kernel();
return rc;
}
static int
z2_release(struct gendisk *disk, fmode_t mode)
{
- if ( current_device == -1 )
- return 0;
-
+ lock_kernel();
+ if ( current_device == -1 ) {
+ unlock_kernel();
+ return 0;
+ }
+ unlock_kernel();
/*
* FIXME: unmap memory
*/
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 6d34f405a2f..d52e90a5a61 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -39,7 +39,6 @@
#include <linux/skbuff.h>
#include <linux/io.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -160,7 +159,7 @@ static void bluecard_detach(struct pcmcia_device *p_dev);
static void bluecard_activity_led_timeout(u_long arg)
{
bluecard_info_t *info = (bluecard_info_t *)arg;
- unsigned int iobase = info->p_dev->io.BasePort1;
+ unsigned int iobase = info->p_dev->resource[0]->start;
if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
return;
@@ -177,7 +176,7 @@ static void bluecard_activity_led_timeout(u_long arg)
static void bluecard_enable_activity_led(bluecard_info_t *info)
{
- unsigned int iobase = info->p_dev->io.BasePort1;
+ unsigned int iobase = info->p_dev->resource[0]->start;
if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
return;
@@ -233,7 +232,7 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
}
do {
- register unsigned int iobase = info->p_dev->io.BasePort1;
+ register unsigned int iobase = info->p_dev->resource[0]->start;
register unsigned int offset;
register unsigned char command;
register unsigned long ready_bit;
@@ -380,7 +379,7 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
return;
}
- iobase = info->p_dev->io.BasePort1;
+ iobase = info->p_dev->resource[0]->start;
if (test_bit(XMIT_SENDING_READY, &(info->tx_state)))
bluecard_enable_activity_led(info);
@@ -509,7 +508,7 @@ static irqreturn_t bluecard_interrupt(int irq, void *dev_inst)
if (!test_bit(CARD_READY, &(info->hw_state)))
return IRQ_HANDLED;
- iobase = info->p_dev->io.BasePort1;
+ iobase = info->p_dev->resource[0]->start;
spin_lock(&(info->lock));
@@ -623,7 +622,7 @@ static int bluecard_hci_flush(struct hci_dev *hdev)
static int bluecard_hci_open(struct hci_dev *hdev)
{
bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
- unsigned int iobase = info->p_dev->io.BasePort1;
+ unsigned int iobase = info->p_dev->resource[0]->start;
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
@@ -643,7 +642,7 @@ static int bluecard_hci_open(struct hci_dev *hdev)
static int bluecard_hci_close(struct hci_dev *hdev)
{
bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
- unsigned int iobase = info->p_dev->io.BasePort1;
+ unsigned int iobase = info->p_dev->resource[0]->start;
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
@@ -710,7 +709,7 @@ static int bluecard_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned l
static int bluecard_open(bluecard_info_t *info)
{
- unsigned int iobase = info->p_dev->io.BasePort1;
+ unsigned int iobase = info->p_dev->resource[0]->start;
struct hci_dev *hdev;
unsigned char id;
@@ -829,7 +828,7 @@ static int bluecard_open(bluecard_info_t *info)
static int bluecard_close(bluecard_info_t *info)
{
- unsigned int iobase = info->p_dev->io.BasePort1;
+ unsigned int iobase = info->p_dev->resource[0]->start;
struct hci_dev *hdev = info->hdev;
if (!hdev)
@@ -866,9 +865,6 @@ static int bluecard_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = info;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts1 = 8;
-
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -891,12 +887,14 @@ static int bluecard_config(struct pcmcia_device *link)
int i, n;
link->conf.ConfigIndex = 0x20;
- link->io.NumPorts1 = 64;
- link->io.IOAddrLines = 6;
+
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[0]->end = 64;
+ link->io_lines = 6;
for (n = 0; n < 0x400; n += 0x40) {
- link->io.BasePort1 = n ^ 0x300;
- i = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = n ^ 0x300;
+ i = pcmcia_request_io(link);
if (i == 0)
break;
}
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 21e05fdc912..7ab8f29d5e0 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -45,7 +45,6 @@
#include <linux/device.h>
#include <linux/firmware.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -189,7 +188,7 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
return;
do {
- register unsigned int iobase = info->p_dev->io.BasePort1;
+ register unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
register int len;
@@ -227,7 +226,7 @@ static void bt3c_receive(bt3c_info_t *info)
return;
}
- iobase = info->p_dev->io.BasePort1;
+ iobase = info->p_dev->resource[0]->start;
avail = bt3c_read(iobase, 0x7006);
//printk("bt3c_cs: receiving %d bytes\n", avail);
@@ -348,7 +347,7 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
/* our irq handler is shared */
return IRQ_NONE;
- iobase = info->p_dev->io.BasePort1;
+ iobase = info->p_dev->resource[0]->start;
spin_lock(&(info->lock));
@@ -481,7 +480,7 @@ static int bt3c_load_firmware(bt3c_info_t *info, const unsigned char *firmware,
unsigned int iobase, size, addr, fcs, tmp;
int i, err = 0;
- iobase = info->p_dev->io.BasePort1;
+ iobase = info->p_dev->resource[0]->start;
/* Reset */
bt3c_io_write(iobase, 0x8040, 0x0404);
@@ -658,8 +657,8 @@ static int bt3c_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = info;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts1 = 8;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[0]->end = 8;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -684,14 +683,14 @@ static int bt3c_check_config(struct pcmcia_device *p_dev,
{
unsigned long try = (unsigned long) priv_data;
+ p_dev->io_lines = (try == 0) ? 16 : cf->io.flags & CISTPL_IO_LINES_MASK;
+
if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM))
p_dev->conf.Vpp = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000;
if ((cf->io.nwin > 0) && (cf->io.win[0].len == 8) &&
(cf->io.win[0].base != 0)) {
- p_dev->io.BasePort1 = cf->io.win[0].base;
- p_dev->io.IOAddrLines = (try == 0) ? 16 :
- cf->io.flags & CISTPL_IO_LINES_MASK;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[0]->start = cf->io.win[0].base;
+ if (!pcmcia_request_io(p_dev))
return 0;
}
return -ENODEV;
@@ -708,9 +707,9 @@ static int bt3c_check_config_notpicky(struct pcmcia_device *p_dev,
if ((cf->io.nwin > 0) && ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) {
for (j = 0; j < 5; j++) {
- p_dev->io.BasePort1 = base[j];
- p_dev->io.IOAddrLines = base[j] ? 16 : 3;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[0]->start = base[j];
+ p_dev->io_lines = base[j] ? 16 : 3;
+ if (!pcmcia_request_io(p_dev))
return 0;
}
}
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 0d32ec82e9b..548d1d9e4dd 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -117,8 +117,8 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
(event->data[2] == MODULE_ALREADY_UP)) ?
"Bring-up succeed" : "Bring-up failed");
- if (event->length > 3)
- priv->btmrvl_dev.dev_type = event->data[3];
+ if (event->length > 3 && event->data[3])
+ priv->btmrvl_dev.dev_type = HCI_AMP;
else
priv->btmrvl_dev.dev_type = HCI_BREDR;
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 76e5127884f..792e32d29a1 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -46,6 +46,9 @@ static const struct sdio_device_id btsdio_table[] = {
/* Generic Bluetooth Type-B SDIO device */
{ SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) },
+ /* Generic Bluetooth AMP controller */
+ { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_AMP) },
+
{ } /* Terminating entry */
};
@@ -329,6 +332,11 @@ static int btsdio_probe(struct sdio_func *func,
hdev->bus = HCI_SDIO;
hdev->driver_data = data;
+ if (id->class == SDIO_CLASS_BT_AMP)
+ hdev->dev_type = HCI_AMP;
+ else
+ hdev->dev_type = HCI_BREDR;
+
data->hdev = hdev;
SET_HCIDEV_DEV(hdev, &func->dev);
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 4ed7288f99d..1c4f5e863b0 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -41,7 +41,6 @@
#include <asm/system.h>
#include <asm/io.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -143,7 +142,7 @@ static void btuart_write_wakeup(btuart_info_t *info)
}
do {
- register unsigned int iobase = info->p_dev->io.BasePort1;
+ register unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
register int len;
@@ -184,7 +183,7 @@ static void btuart_receive(btuart_info_t *info)
return;
}
- iobase = info->p_dev->io.BasePort1;
+ iobase = info->p_dev->resource[0]->start;
do {
info->hdev->stat.byte_rx++;
@@ -298,7 +297,7 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst)
/* our irq handler is shared */
return IRQ_NONE;
- iobase = info->p_dev->io.BasePort1;
+ iobase = info->p_dev->resource[0]->start;
spin_lock(&(info->lock));
@@ -355,7 +354,7 @@ static void btuart_change_speed(btuart_info_t *info, unsigned int speed)
return;
}
- iobase = info->p_dev->io.BasePort1;
+ iobase = info->p_dev->resource[0]->start;
spin_lock_irqsave(&(info->lock), flags);
@@ -479,7 +478,7 @@ static int btuart_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned lon
static int btuart_open(btuart_info_t *info)
{
unsigned long flags;
- unsigned int iobase = info->p_dev->io.BasePort1;
+ unsigned int iobase = info->p_dev->resource[0]->start;
struct hci_dev *hdev;
spin_lock_init(&(info->lock));
@@ -549,7 +548,7 @@ static int btuart_open(btuart_info_t *info)
static int btuart_close(btuart_info_t *info)
{
unsigned long flags;
- unsigned int iobase = info->p_dev->io.BasePort1;
+ unsigned int iobase = info->p_dev->resource[0]->start;
struct hci_dev *hdev = info->hdev;
if (!hdev)
@@ -587,8 +586,8 @@ static int btuart_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = info;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts1 = 8;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[0]->end = 8;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -613,14 +612,14 @@ static int btuart_check_config(struct pcmcia_device *p_dev,
{
int *try = priv_data;
+ p_dev->io_lines = (try == 0) ? 16 : cf->io.flags & CISTPL_IO_LINES_MASK;
+
if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM))
p_dev->conf.Vpp = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000;
if ((cf->io.nwin > 0) && (cf->io.win[0].len == 8) &&
(cf->io.win[0].base != 0)) {
- p_dev->io.BasePort1 = cf->io.win[0].base;
- p_dev->io.IOAddrLines = (*try == 0) ? 16 :
- cf->io.flags & CISTPL_IO_LINES_MASK;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[0]->start = cf->io.win[0].base;
+ if (!pcmcia_request_io(p_dev))
return 0;
}
return -ENODEV;
@@ -637,9 +636,9 @@ static int btuart_check_config_notpicky(struct pcmcia_device *p_dev,
if ((cf->io.nwin > 0) && ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) {
for (j = 0; j < 5; j++) {
- p_dev->io.BasePort1 = base[j];
- p_dev->io.IOAddrLines = base[j] ? 16 : 3;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[0]->start = base[j];
+ p_dev->io_lines = base[j] ? 16 : 3;
+ if (!pcmcia_request_io(p_dev))
return 0;
}
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index d22ce3cc611..d120a5c1c09 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -59,9 +59,15 @@ static struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
+ /* Apple MacBookPro 7,1 */
+ { USB_DEVICE(0x05ac, 0x8213) },
+
/* Apple iMac11,1 */
{ USB_DEVICE(0x05ac, 0x8215) },
+ /* Apple MacBookPro6,2 */
+ { USB_DEVICE(0x05ac, 0x8218) },
+
/* AVM BlueFRITZ! USB v2.0 */
{ USB_DEVICE(0x057c, 0x3800) },
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index cbe9e44a42e..db7c8db695f 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -41,7 +41,6 @@
#include <asm/system.h>
#include <asm/io.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -150,7 +149,7 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
}
do {
- register unsigned int iobase = info->p_dev->io.BasePort1;
+ register unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
register int len;
@@ -215,7 +214,7 @@ static void dtl1_receive(dtl1_info_t *info)
return;
}
- iobase = info->p_dev->io.BasePort1;
+ iobase = info->p_dev->resource[0]->start;
do {
info->hdev->stat.byte_rx++;
@@ -302,7 +301,7 @@ static irqreturn_t dtl1_interrupt(int irq, void *dev_inst)
/* our irq handler is shared */
return IRQ_NONE;
- iobase = info->p_dev->io.BasePort1;
+ iobase = info->p_dev->resource[0]->start;
spin_lock(&(info->lock));
@@ -462,7 +461,7 @@ static int dtl1_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long
static int dtl1_open(dtl1_info_t *info)
{
unsigned long flags;
- unsigned int iobase = info->p_dev->io.BasePort1;
+ unsigned int iobase = info->p_dev->resource[0]->start;
struct hci_dev *hdev;
spin_lock_init(&(info->lock));
@@ -509,7 +508,8 @@ static int dtl1_open(dtl1_info_t *info)
outb(UART_LCR_WLEN8, iobase + UART_LCR); /* Reset DLAB */
outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR);
- info->ri_latch = inb(info->p_dev->io.BasePort1 + UART_MSR) & UART_MSR_RI;
+ info->ri_latch = inb(info->p_dev->resource[0]->start + UART_MSR)
+ & UART_MSR_RI;
/* Turn on interrupts */
outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
@@ -534,7 +534,7 @@ static int dtl1_open(dtl1_info_t *info)
static int dtl1_close(dtl1_info_t *info)
{
unsigned long flags;
- unsigned int iobase = info->p_dev->io.BasePort1;
+ unsigned int iobase = info->p_dev->resource[0]->start;
struct hci_dev *hdev = info->hdev;
if (!hdev)
@@ -572,8 +572,8 @@ static int dtl1_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = info;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts1 = 8;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[0]->end = 8;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -597,14 +597,13 @@ static int dtl1_confcheck(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- if ((cf->io.nwin == 1) && (cf->io.win[0].len > 8)) {
- p_dev->io.BasePort1 = cf->io.win[0].base;
- p_dev->io.NumPorts1 = cf->io.win[0].len; /*yo */
- p_dev->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
- return 0;
- }
- return -ENODEV;
+ if ((cf->io.nwin != 1) || (cf->io.win[0].len <= 8))
+ return -ENODEV;
+
+ p_dev->resource[0]->start = cf->io.win[0].base;
+ p_dev->resource[0]->end = cf->io.win[0].len; /*yo */
+ p_dev->io_lines = cf->io.flags & CISTPL_IO_LINES_MASK;
+ return pcmcia_request_io(p_dev);
}
static int dtl1_config(struct pcmcia_device *link)
@@ -613,7 +612,7 @@ static int dtl1_config(struct pcmcia_device *link)
int i;
/* Look for a generic full-sized window */
- link->io.NumPorts1 = 8;
+ link->resource[0]->end = 8;
if (pcmcia_loop_config(link, dtl1_confcheck, NULL) < 0)
goto failed;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 998833d93c1..74cb6f3e86c 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -101,7 +101,7 @@ static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type)
break;
case HCI_SCODATA_PKT:
- hdev->stat.cmd_tx++;
+ hdev->stat.sco_tx++;
break;
}
}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index e3749d0ba68..af13c62dc47 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -242,6 +242,8 @@
-------------------------------------------------------------------------*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define REVISION "Revision: 3.20"
#define VERSION "Id: cdrom.c 3.20 2003/12/17"
@@ -314,11 +316,17 @@ static const char *mrw_format_status[] = {
static const char *mrw_address_space[] = { "DMA", "GAA" };
#if (ERRLOGMASK!=CD_NOTHING)
-#define cdinfo(type, fmt, args...) \
- if ((ERRLOGMASK & type) || debug==1 ) \
- printk(KERN_INFO "cdrom: " fmt, ## args)
+#define cdinfo(type, fmt, args...) \
+do { \
+ if ((ERRLOGMASK & type) || debug == 1) \
+ pr_info(fmt, ##args); \
+} while (0)
#else
-#define cdinfo(type, fmt, args...)
+#define cdinfo(type, fmt, args...) \
+do { \
+ if (0 && (ERRLOGMASK & type) || debug == 1) \
+ pr_info(fmt, ##args); \
+} while (0)
#endif
/* These are used to simplify getting data in from and back to user land */
@@ -395,7 +403,7 @@ int register_cdrom(struct cdrom_device_info *cdi)
if (cdo->open == NULL || cdo->release == NULL)
return -EINVAL;
if (!banner_printed) {
- printk(KERN_INFO "Uniform CD-ROM driver " REVISION "\n");
+ pr_info("Uniform CD-ROM driver " REVISION "\n");
banner_printed = 1;
cdrom_sysctl_register();
}
@@ -546,7 +554,7 @@ static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
unsigned char buffer[12];
int ret;
- printk(KERN_INFO "cdrom: %sstarting format\n", cont ? "Re" : "");
+ pr_info("%sstarting format\n", cont ? "Re" : "");
/*
* FmtData bit set (bit 4), format type is 1
@@ -576,7 +584,7 @@ static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
ret = cdi->ops->generic_packet(cdi, &cgc);
if (ret)
- printk(KERN_INFO "cdrom: bgformat failed\n");
+ pr_info("bgformat failed\n");
return ret;
}
@@ -622,8 +630,7 @@ static int cdrom_mrw_exit(struct cdrom_device_info *cdi)
ret = 0;
if (di.mrw_status == CDM_MRW_BGFORMAT_ACTIVE) {
- printk(KERN_INFO "cdrom: issuing MRW back ground "
- "format suspend\n");
+ pr_info("issuing MRW background format suspend\n");
ret = cdrom_mrw_bgformat_susp(cdi, 0);
}
@@ -658,7 +665,8 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
if ((ret = cdrom_mode_select(cdi, &cgc)))
return ret;
- printk(KERN_INFO "cdrom: %s: mrw address space %s selected\n", cdi->name, mrw_address_space[space]);
+ pr_info("%s: mrw address space %s selected\n",
+ cdi->name, mrw_address_space[space]);
return 0;
}
@@ -762,7 +770,7 @@ static int cdrom_mrw_open_write(struct cdrom_device_info *cdi)
* always reset to DMA lba space on open
*/
if (cdrom_mrw_set_lba_space(cdi, MRW_LBA_DMA)) {
- printk(KERN_ERR "cdrom: failed setting lba address space\n");
+ pr_err("failed setting lba address space\n");
return 1;
}
@@ -781,8 +789,7 @@ static int cdrom_mrw_open_write(struct cdrom_device_info *cdi)
* 3 - MRW formatting complete
*/
ret = 0;
- printk(KERN_INFO "cdrom open: mrw_status '%s'\n",
- mrw_format_status[di.mrw_status]);
+ pr_info("open: mrw_status '%s'\n", mrw_format_status[di.mrw_status]);
if (!di.mrw_status)
ret = 1;
else if (di.mrw_status == CDM_MRW_BGFORMAT_INACTIVE &&
@@ -932,8 +939,7 @@ static void cdrom_dvd_rw_close_write(struct cdrom_device_info *cdi)
return;
}
- printk(KERN_INFO "cdrom: %s: dirty DVD+RW media, \"finalizing\"\n",
- cdi->name);
+ pr_info("%s: dirty DVD+RW media, \"finalizing\"\n", cdi->name);
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_FLUSH_CACHE;
@@ -2176,7 +2182,7 @@ retry:
* frame dma, so drop to single frame dma if we need to
*/
if (cdi->cdda_method == CDDA_BPC_FULL && nframes > 1) {
- printk("cdrom: dropping to single frame dma\n");
+ pr_info("dropping to single frame dma\n");
cdi->cdda_method = CDDA_BPC_SINGLE;
goto retry;
}
@@ -2189,7 +2195,7 @@ retry:
if (cdi->last_sense != 0x04 && cdi->last_sense != 0x0b)
return ret;
- printk("cdrom: dropping to old style cdda (sense=%x)\n", cdi->last_sense);
+ pr_info("dropping to old style cdda (sense=%x)\n", cdi->last_sense);
cdi->cdda_method = CDDA_OLD;
return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);
}
@@ -3401,7 +3407,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
"\t%d", CDROM_CAN(val) != 0);
break;
default:
- printk(KERN_INFO "cdrom: invalid option%d\n", option);
+ pr_info("invalid option%d\n", option);
return 1;
}
if (!ret)
@@ -3491,7 +3497,7 @@ doit:
mutex_unlock(&cdrom_mutex);
return proc_dostring(ctl, write, buffer, lenp, ppos);
done:
- printk(KERN_INFO "cdrom: info buffer too small\n");
+ pr_info("info buffer too small\n");
goto doit;
}
@@ -3665,7 +3671,7 @@ static int __init cdrom_init(void)
static void __exit cdrom_exit(void)
{
- printk(KERN_INFO "Uniform CD-ROM driver unloaded\n");
+ pr_info("Uniform CD-ROM driver unloaded\n");
cdrom_sysctl_unregister();
}
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 03c71f7698c..261107d1457 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -19,6 +19,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
@@ -32,6 +34,7 @@
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/device.h>
+#include <linux/smp_lock.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
@@ -339,8 +342,7 @@ static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
tocuse = 0;
err = gdrom_readtoc_cmd(gd.toc, 0);
if (err) {
- printk(KERN_INFO "GDROM: Could not get CD "
- "table of contents\n");
+ pr_info("Could not get CD table of contents\n");
return -ENXIO;
}
}
@@ -357,8 +359,7 @@ static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
} while (track >= fentry);
if ((track > 100) || (track < get_entry_track(gd.toc->first))) {
- printk(KERN_INFO "GDROM: No data on the last "
- "session of the CD\n");
+ pr_info("No data on the last session of the CD\n");
gdrom_getsense(NULL);
return -ENXIO;
}
@@ -451,14 +452,14 @@ static int gdrom_getsense(short *bufstring)
goto cleanup_sense;
insw(GDROM_DATA_REG, &sense, sense_command->buflen/2);
if (sense[1] & 40) {
- printk(KERN_INFO "GDROM: Drive not ready - command aborted\n");
+ pr_info("Drive not ready - command aborted\n");
goto cleanup_sense;
}
sense_key = sense[1] & 0x0F;
if (sense_key < ARRAY_SIZE(sense_texts))
- printk(KERN_INFO "GDROM: %s\n", sense_texts[sense_key].text);
+ pr_info("%s\n", sense_texts[sense_key].text);
else
- printk(KERN_ERR "GDROM: Unknown sense key: %d\n", sense_key);
+ pr_err("Unknown sense key: %d\n", sense_key);
if (bufstring) /* return addional sense data */
memcpy(bufstring, &sense[4], 2);
if (sense_key < 2)
@@ -492,12 +493,18 @@ static struct cdrom_device_ops gdrom_ops = {
static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
{
- return cdrom_open(gd.cd_info, bdev, mode);
+ int ret;
+ lock_kernel();
+ ret = cdrom_open(gd.cd_info, bdev, mode);
+ unlock_kernel();
+ return ret;
}
static int gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
{
+ lock_kernel();
cdrom_release(gd.cd_info, mode);
+ unlock_kernel();
return 0;
}
@@ -509,7 +516,13 @@ static int gdrom_bdops_mediachanged(struct gendisk *disk)
static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
- return cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg);
+ int ret;
+
+ lock_kernel();
+ ret = cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg);
+ unlock_kernel();
+
+ return ret;
}
static const struct block_device_operations gdrom_bdops = {
@@ -517,7 +530,7 @@ static const struct block_device_operations gdrom_bdops = {
.open = gdrom_bdops_open,
.release = gdrom_bdops_release,
.media_changed = gdrom_bdops_mediachanged,
- .locked_ioctl = gdrom_bdops_ioctl,
+ .ioctl = gdrom_bdops_ioctl,
};
static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
@@ -643,14 +656,13 @@ static void gdrom_request(struct request_queue *rq)
struct request *req;
while ((req = blk_fetch_request(rq)) != NULL) {
- if (!blk_fs_request(req)) {
- printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
+ if (req->cmd_type != REQ_TYPE_FS) {
+ printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
__blk_end_request_all(req, -EIO);
continue;
}
if (rq_data_dir(req) != READ) {
- printk(KERN_NOTICE "GDROM: Read only device -");
- printk(" write request ignored\n");
+ pr_notice("Read only device - write request ignored\n");
__blk_end_request_all(req, -EIO);
continue;
}
@@ -685,7 +697,7 @@ static int __devinit gdrom_outputversion(void)
firmw_ver = kstrndup(id->firmver, 16, GFP_KERNEL);
if (!firmw_ver)
goto free_manuf_name;
- printk(KERN_INFO "GDROM: %s from %s with firmware %s\n",
+ pr_info("%s from %s with firmware %s\n",
model_name, manuf_name, firmw_ver);
err = 0;
kfree(firmw_ver);
@@ -757,7 +769,7 @@ static int __devinit probe_gdrom(struct platform_device *devptr)
int err;
/* Start the device */
if (gdrom_execute_diagnostic() != 1) {
- printk(KERN_WARNING "GDROM: ATA Probe for GDROM failed.\n");
+ pr_warning("ATA Probe for GDROM failed\n");
return -ENODEV;
}
/* Print out firmware ID */
@@ -767,7 +779,7 @@ static int __devinit probe_gdrom(struct platform_device *devptr)
gdrom_major = register_blkdev(0, GDROM_DEV_NAME);
if (gdrom_major <= 0)
return gdrom_major;
- printk(KERN_INFO "GDROM: Registered with major number %d\n",
+ pr_info("Registered with major number %d\n",
gdrom_major);
/* Specify basic properties of drive */
gd.cd_info = kzalloc(sizeof(struct cdrom_device_info), GFP_KERNEL);
@@ -818,7 +830,7 @@ probe_fail_no_disk:
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
gdrom_major = 0;
probe_fail_no_mem:
- printk(KERN_WARNING "GDROM: Probe failed - error is 0x%X\n", err);
+ pr_warning("Probe failed - error is 0x%X\n", err);
return err;
}
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 451cd7071b1..56bf9f44700 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -31,6 +31,8 @@
* the OS/400 partition.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/cdrom.h>
@@ -40,6 +42,7 @@
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/proc_fs.h>
+#include <linux/smp_lock.h>
#include <linux/seq_file.h>
#include <linux/scatterlist.h>
@@ -53,9 +56,6 @@
#define VIOCD_VERS "1.06"
-#define VIOCD_KERN_WARNING KERN_WARNING "viocd: "
-#define VIOCD_KERN_INFO KERN_INFO "viocd: "
-
/*
* Should probably make this a module parameter....sigh
*/
@@ -154,13 +154,21 @@ static const struct file_operations proc_viocd_operations = {
static int viocd_blk_open(struct block_device *bdev, fmode_t mode)
{
struct disk_info *di = bdev->bd_disk->private_data;
- return cdrom_open(&di->viocd_info, bdev, mode);
+ int ret;
+
+ lock_kernel();
+ ret = cdrom_open(&di->viocd_info, bdev, mode);
+ unlock_kernel();
+
+ return ret;
}
static int viocd_blk_release(struct gendisk *disk, fmode_t mode)
{
struct disk_info *di = disk->private_data;
+ lock_kernel();
cdrom_release(&di->viocd_info, mode);
+ unlock_kernel();
return 0;
}
@@ -168,7 +176,13 @@ static int viocd_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
struct disk_info *di = bdev->bd_disk->private_data;
- return cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg);
+ int ret;
+
+ lock_kernel();
+ ret = cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg);
+ unlock_kernel();
+
+ return ret;
}
static int viocd_blk_media_changed(struct gendisk *disk)
@@ -181,7 +195,7 @@ static const struct block_device_operations viocd_fops = {
.owner = THIS_MODULE,
.open = viocd_blk_open,
.release = viocd_blk_release,
- .locked_ioctl = viocd_blk_ioctl,
+ .ioctl = viocd_blk_ioctl,
.media_changed = viocd_blk_media_changed,
};
@@ -202,9 +216,8 @@ static int viocd_open(struct cdrom_device_info *cdi, int purpose)
(u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
0, 0, 0);
if (hvrc != 0) {
- printk(VIOCD_KERN_WARNING
- "bad rc on HvCallEvent_signalLpEventFast %d\n",
- (int)hvrc);
+ pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
+ (int)hvrc);
return -EIO;
}
@@ -213,8 +226,8 @@ static int viocd_open(struct cdrom_device_info *cdi, int purpose)
if (we.rc) {
const struct vio_error_entry *err =
vio_lookup_rc(viocd_err_table, we.sub_result);
- printk(VIOCD_KERN_WARNING "bad rc %d:0x%04X on open: %s\n",
- we.rc, we.sub_result, err->msg);
+ pr_warning("bad rc %d:0x%04X on open: %s\n",
+ we.rc, we.sub_result, err->msg);
return -err->errno;
}
@@ -234,9 +247,8 @@ static void viocd_release(struct cdrom_device_info *cdi)
viopath_targetinst(viopath_hostLp), 0,
VIOVERSION << 16, ((u64)device_no << 48), 0, 0, 0);
if (hvrc != 0)
- printk(VIOCD_KERN_WARNING
- "bad rc on HvCallEvent_signalLpEventFast %d\n",
- (int)hvrc);
+ pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
+ (int)hvrc);
}
/* Send a read or write request to OS/400 */
@@ -262,13 +274,12 @@ static int send_request(struct request *req)
sg_init_table(&sg, 1);
if (blk_rq_map_sg(req->q, req, &sg) == 0) {
- printk(VIOCD_KERN_WARNING
- "error setting up scatter/gather list\n");
+ pr_warning("error setting up scatter/gather list\n");
return -1;
}
if (dma_map_sg(diskinfo->dev, &sg, 1, direction) == 0) {
- printk(VIOCD_KERN_WARNING "error allocating sg tce\n");
+ pr_warning("error allocating sg tce\n");
return -1;
}
dmaaddr = sg_dma_address(&sg);
@@ -284,7 +295,7 @@ static int send_request(struct request *req)
((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
(u64)blk_rq_pos(req) * 512, len, 0);
if (hvrc != HvLpEvent_Rc_Good) {
- printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc);
+ pr_warning("hv error on op %d\n", (int)hvrc);
return -1;
}
@@ -298,11 +309,10 @@ static void do_viocd_request(struct request_queue *q)
struct request *req;
while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
- if (!blk_fs_request(req))
+ if (req->cmd_type != REQ_TYPE_FS)
__blk_end_request_all(req, -EIO);
else if (send_request(req) < 0) {
- printk(VIOCD_KERN_WARNING
- "unable to send message to OS/400!");
+ pr_warning("unable to send message to OS/400!\n");
__blk_end_request_all(req, -EIO);
} else
rwreq++;
@@ -327,8 +337,8 @@ static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr)
(u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
0, 0, 0);
if (hvrc != 0) {
- printk(VIOCD_KERN_WARNING "bad rc on HvCallEvent_signalLpEventFast %d\n",
- (int)hvrc);
+ pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
+ (int)hvrc);
return -EIO;
}
@@ -338,9 +348,8 @@ static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr)
if (we.rc) {
const struct vio_error_entry *err =
vio_lookup_rc(viocd_err_table, we.sub_result);
- printk(VIOCD_KERN_WARNING
- "bad rc %d:0x%04X on check_change: %s; Assuming no change\n",
- we.rc, we.sub_result, err->msg);
+ pr_warning("bad rc %d:0x%04X on check_change: %s; Assuming no change\n",
+ we.rc, we.sub_result, err->msg);
return 0;
}
@@ -367,8 +376,8 @@ static int viocd_lock_door(struct cdrom_device_info *cdi, int locking)
(u64)&we, VIOVERSION << 16,
(device_no << 48) | (flags << 32), 0, 0, 0);
if (hvrc != 0) {
- printk(VIOCD_KERN_WARNING "bad rc on HvCallEvent_signalLpEventFast %d\n",
- (int)hvrc);
+ pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
+ (int)hvrc);
return -EIO;
}
@@ -455,8 +464,7 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
return;
/* First, we should NEVER get an int here...only acks */
if (hvlpevent_is_int(event)) {
- printk(VIOCD_KERN_WARNING
- "Yikes! got an int in viocd event handler!\n");
+ pr_warning("Yikes! got an int in viocd event handler!\n");
if (hvlpevent_need_ack(event)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
@@ -510,10 +518,9 @@ return_complete:
const struct vio_error_entry *err =
vio_lookup_rc(viocd_err_table,
bevent->sub_result);
- printk(VIOCD_KERN_WARNING "request %p failed "
- "with rc %d:0x%04X: %s\n",
- req, event->xRc,
- bevent->sub_result, err->msg);
+ pr_warning("request %p failed with rc %d:0x%04X: %s\n",
+ req, event->xRc,
+ bevent->sub_result, err->msg);
__blk_end_request_all(req, -EIO);
} else
__blk_end_request_all(req, 0);
@@ -524,9 +531,8 @@ return_complete:
break;
default:
- printk(VIOCD_KERN_WARNING
- "message with invalid subtype %0x04X!\n",
- event->xSubtype & VIOMINOR_SUBTYPE_MASK);
+ pr_warning("message with invalid subtype %0x04X!\n",
+ event->xSubtype & VIOMINOR_SUBTYPE_MASK);
if (hvlpevent_need_ack(event)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
@@ -593,23 +599,19 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
sprintf(c->name, VIOCD_DEVICE "%c", 'a' + deviceno);
if (register_cdrom(c) != 0) {
- printk(VIOCD_KERN_WARNING "Cannot register viocd CD-ROM %s!\n",
- c->name);
+ pr_warning("Cannot register viocd CD-ROM %s!\n", c->name);
goto out;
}
- printk(VIOCD_KERN_INFO "cd %s is iSeries resource %10.10s "
- "type %4.4s, model %3.3s\n",
- c->name, d->rsrcname, d->type, d->model);
+ pr_info("cd %s is iSeries resource %10.10s type %4.4s, model %3.3s\n",
+ c->name, d->rsrcname, d->type, d->model);
q = blk_init_queue(do_viocd_request, &viocd_reqlock);
if (q == NULL) {
- printk(VIOCD_KERN_WARNING "Cannot allocate queue for %s!\n",
- c->name);
+ pr_warning("Cannot allocate queue for %s!\n", c->name);
goto out_unregister_cdrom;
}
gendisk = alloc_disk(1);
if (gendisk == NULL) {
- printk(VIOCD_KERN_WARNING "Cannot create gendisk for %s!\n",
- c->name);
+ pr_warning("Cannot create gendisk for %s!\n", c->name);
goto out_cleanup_queue;
}
gendisk->major = VIOCD_MAJOR;
@@ -682,21 +684,19 @@ static int __init viocd_init(void)
return -ENODEV;
}
- printk(VIOCD_KERN_INFO "vers " VIOCD_VERS ", hosting partition %d\n",
- viopath_hostLp);
+ pr_info("vers " VIOCD_VERS ", hosting partition %d\n", viopath_hostLp);
if (register_blkdev(VIOCD_MAJOR, VIOCD_DEVICE) != 0) {
- printk(VIOCD_KERN_WARNING "Unable to get major %d for %s\n",
- VIOCD_MAJOR, VIOCD_DEVICE);
+ pr_warning("Unable to get major %d for %s\n",
+ VIOCD_MAJOR, VIOCD_DEVICE);
return -EIO;
}
ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio,
MAX_CD_REQ + 2);
if (ret) {
- printk(VIOCD_KERN_WARNING
- "error opening path to host partition %d\n",
- viopath_hostLp);
+ pr_warning("error opening path to host partition %d\n",
+ viopath_hostLp);
goto out_unregister;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 7cfcc629a7f..3d44ec724c1 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1002,7 +1002,7 @@ config SCx200_GPIO
config PC8736x_GPIO
tristate "NatSemi PC8736x GPIO Support"
- depends on X86
+ depends on X86_32
default SCx200_GPIO # mostly N
select NSC_GPIO # needed for support routines
help
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 88d6eac6975..dc964166060 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -9,6 +9,7 @@ FONTMAPFILE = cp437.uni
obj-y += mem.o random.o tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o tty_buffer.o tty_port.o
+obj-y += tty_mutex.o
obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
obj-y += misc.o
@@ -47,6 +48,7 @@ obj-$(CONFIG_RIO) += rio/ generic_serial.o
obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
+obj-$(CONFIG_HVC_TILE) += hvc_tile.o
obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index aa109cbe0e6..d607f53d8af 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -372,6 +372,17 @@ static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
bridge->capndx = cap_ptr;
/*
+ * If the device has not been properly setup, the following will catch
+ * the problem and should stop the system from crashing.
+ * 20030610 - hamish@zot.org
+ */
+ if (pci_enable_device(pdev)) {
+ printk(KERN_ERR PFX "Unable to Enable PCI device\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
+ /*
* The following fixes the case where the BIOS has "forgotten" to
* provide an address range for the GART.
* 20030610 - hamish@zot.org
@@ -385,17 +396,6 @@ static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
}
}
- /*
- * If the device has not been properly setup, the following will catch
- * the problem and should stop the system from crashing.
- * 20030610 - hamish@zot.org
- */
- if (pci_enable_device(pdev)) {
- printk(KERN_ERR PFX "Unable to Enable PCI device\n");
- agp_put_bridge(bridge);
- return -ENODEV;
- }
-
/* Fill in the mode register */
if (cap_ptr) {
pci_read_config_dword(pdev,
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index d836a71bf06..cd18493c952 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,6 +12,7 @@
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
+#include <linux/intel-gtt.h>
#include "intel-gtt.c"
@@ -805,6 +806,8 @@ static const struct intel_driver_description {
"G45/G43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
"B43", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
+ "B43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
"G41", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
@@ -815,17 +818,28 @@ static const struct intel_driver_description {
"HD Graphics", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
- "Sandybridge", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
- "Sandybridge", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
{ 0, 0, NULL, NULL, NULL }
};
static int __devinit intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge)
{
- int i;
+ int i, mask;
+
bridge->driver = NULL;
for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
@@ -845,14 +859,19 @@ static int __devinit intel_gmch_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
- if (bridge->driver->mask_memory == intel_i965_mask_memory) {
- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
- dev_err(&intel_private.pcidev->dev,
- "set gfx device dma mask 36bit failed!\n");
- else
- pci_set_consistent_dma_mask(intel_private.pcidev,
- DMA_BIT_MASK(36));
- }
+ if (bridge->driver->mask_memory == intel_gen6_mask_memory)
+ mask = 40;
+ else if (bridge->driver->mask_memory == intel_i965_mask_memory)
+ mask = 36;
+ else
+ mask = 32;
+
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask %d-bit failed!\n", mask);
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(mask));
return 1;
}
@@ -908,6 +927,17 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
/*
+ * If the device has not been properly setup, the following will catch
+ * the problem and should stop the system from crashing.
+ * 20030610 - hamish@zot.org
+ */
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "can't enable PCI device\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
+ /*
* The following fixes the case where the BIOS has "forgotten" to
* provide an address range for the GART.
* 20030610 - hamish@zot.org
@@ -921,17 +951,6 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
}
}
- /*
- * If the device has not been properly setup, the following will catch
- * the problem and should stop the system from crashing.
- * 20030610 - hamish@zot.org
- */
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "can't enable PCI device\n");
- agp_put_bridge(bridge);
- return -ENODEV;
- }
-
/* Fill in the mode register */
if (cap_ptr) {
pci_read_config_dword(pdev,
@@ -1036,6 +1055,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
{ }
};
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 2547465d465..d09b1ab7e8a 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -1,6 +1,8 @@
/*
* Common Intel AGPGART and GTT definitions.
*/
+#ifndef _INTEL_AGP_H
+#define _INTEL_AGP_H
/* Intel registers */
#define INTEL_APSIZE 0xb4
@@ -60,6 +62,12 @@
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006
+/* GT PTE cache control fields */
+#define GEN6_PTE_UNCACHED 0x00000002
+#define GEN6_PTE_LLC 0x00000004
+#define GEN6_PTE_LLC_MLC 0x00000006
+#define GEN6_PTE_GFDT 0x00000008
+
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
#define I810_GFX_MEM_WIN_32M 0x00010000
@@ -178,6 +186,8 @@
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
+#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90
+#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92
#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
@@ -194,10 +204,16 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
/* cover 915 and 945 variants */
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -224,7 +240,8 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@@ -237,3 +254,5 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
IS_SNB)
+
+#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index a7547150a70..75e0a349788 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -25,6 +25,10 @@
#define USE_PCI_DMA_API 1
#endif
+/* Max amount of stolen space, anything above will be returned to Linux */
+int intel_max_stolen = 32 * 1024 * 1024;
+EXPORT_SYMBOL(intel_max_stolen);
+
static const struct aper_size_info_fixed intel_i810_sizes[] =
{
{64, 16384, 4},
@@ -45,6 +49,26 @@ static struct gatt_mask intel_i810_masks[] =
.type = INTEL_AGP_CACHED_MEMORY}
};
+#define INTEL_AGP_UNCACHED_MEMORY 0
+#define INTEL_AGP_CACHED_MEMORY_LLC 1
+#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
+#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
+#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
+
+static struct gatt_mask intel_gen6_masks[] =
+{
+ {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
+ .type = INTEL_AGP_UNCACHED_MEMORY },
+ {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
+ .type = INTEL_AGP_CACHED_MEMORY_LLC },
+ {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
+ .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
+ {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
+ .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
+ {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
+ .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
+};
+
static struct _intel_private {
struct pci_dev *pcidev; /* device one */
u8 __iomem *registers;
@@ -104,7 +128,7 @@ static int intel_agp_map_memory(struct agp_memory *mem)
DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
- return -ENOMEM;
+ goto err;
mem->sg_list = sg = st.sgl;
@@ -113,11 +137,14 @@ static int intel_agp_map_memory(struct agp_memory *mem)
mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
mem->page_count, PCI_DMA_BIDIRECTIONAL);
- if (unlikely(!mem->num_sg)) {
- intel_agp_free_sglist(mem);
- return -ENOMEM;
- }
+ if (unlikely(!mem->num_sg))
+ goto err;
+
return 0;
+
+err:
+ sg_free_table(&st);
+ return -ENOMEM;
}
static void intel_agp_unmap_memory(struct agp_memory *mem)
@@ -171,13 +198,6 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
off_t pg_start, int mask_type)
{
int i, j;
- u32 cache_bits = 0;
-
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
- {
- cache_bits = I830_PTE_SYSTEM_CACHED;
- }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -310,6 +330,23 @@ static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
return 0;
}
+static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
+ int type)
+{
+ unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
+ unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
+
+ if (type_mask == AGP_USER_UNCACHED_MEMORY)
+ return INTEL_AGP_UNCACHED_MEMORY;
+ else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
+ return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
+ INTEL_AGP_CACHED_MEMORY_LLC_MLC;
+ else /* set 'normal'/'cached' to LLC by default */
+ return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
+ INTEL_AGP_CACHED_MEMORY_LLC;
+}
+
+
static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
@@ -581,8 +618,7 @@ static void intel_i830_init_gtt_entries(void)
gtt_entries = 0;
break;
}
- } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
+ } else if (IS_SNB) {
/*
* SandyBridge has new memory control reg at 0x50.w
*/
@@ -710,7 +746,12 @@ static void intel_i830_init_gtt_entries(void)
break;
}
}
- if (gtt_entries > 0) {
+ if (!local && gtt_entries > intel_max_stolen) {
+ dev_info(&agp_bridge->dev->dev,
+ "detected %dK stolen memory, trimming to %dK\n",
+ gtt_entries / KB(1), intel_max_stolen / KB(1));
+ gtt_entries = intel_max_stolen / KB(4);
+ } else if (gtt_entries > 0) {
dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
gtt_entries / KB(1), local ? "local" : "stolen");
gtt_entries /= KB(4);
@@ -797,6 +838,10 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
/* we have to call this as early as possible after the MMIO base address is known */
intel_i830_init_gtt_entries();
+ if (intel_private.gtt_entries == 0) {
+ iounmap(intel_private.registers);
+ return -ENOMEM;
+ }
agp_bridge->gatt_table = NULL;
@@ -1052,11 +1097,11 @@ static void intel_i9xx_setup_flush(void)
intel_i915_setup_chipset_flush();
}
- if (intel_private.ifp_resource.start) {
+ if (intel_private.ifp_resource.start)
intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
- if (!intel_private.i9xx_flush_page)
- dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
- }
+ if (!intel_private.i9xx_flush_page)
+ dev_err(&intel_private.pcidev->dev,
+ "can't ioremap flush page - no chipset flushing\n");
}
static int intel_i9xx_configure(void)
@@ -1147,7 +1192,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+ if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
mask_type != INTEL_AGP_CACHED_MEMORY)
goto out_err;
@@ -1282,6 +1327,11 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
/* we have to call this as early as possible after the MMIO base address is known */
intel_i830_init_gtt_entries();
+ if (intel_private.gtt_entries == 0) {
+ iounmap(intel_private.gtt);
+ iounmap(intel_private.registers);
+ return -ENOMEM;
+ }
agp_bridge->gatt_table = NULL;
@@ -1309,6 +1359,16 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
return addr | bridge->driver->masks[type].mask;
}
+static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
+ dma_addr_t addr, int type)
+{
+ /* gen6 has bit11-4 for physical addr bit39-32 */
+ addr |= (addr >> 28) & 0xff0;
+
+ /* Type checking must be done elsewhere */
+ return addr | bridge->driver->masks[type].mask;
+}
+
static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
{
u16 snb_gmch_ctl;
@@ -1328,6 +1388,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
break;
case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
*gtt_offset = MB(2);
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -1390,6 +1451,11 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
/* we have to call this as early as possible after the MMIO base address is known */
intel_i830_init_gtt_entries();
+ if (intel_private.gtt_entries == 0) {
+ iounmap(intel_private.gtt);
+ iounmap(intel_private.registers);
+ return -ENOMEM;
+ }
agp_bridge->gatt_table = NULL;
@@ -1517,6 +1583,39 @@ static const struct agp_bridge_driver intel_i965_driver = {
#endif
};
+static const struct agp_bridge_driver intel_gen6_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_i9xx_configure,
+ .fetch_size = intel_i9xx_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .mask_memory = intel_gen6_mask_memory,
+ .masks = intel_gen6_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i965_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = intel_gen6_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
+};
+
static const struct agp_bridge_driver intel_g33_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_i830_sizes,
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 4f8d60c25a9..a11c8c9ca3d 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -1072,7 +1072,7 @@ static int get_serial_info(struct async_struct * info,
if (!retinfo)
return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
- lock_kernel();
+ tty_lock();
tmp.type = state->type;
tmp.line = state->line;
tmp.port = state->port;
@@ -1083,7 +1083,7 @@ static int get_serial_info(struct async_struct * info,
tmp.close_delay = state->close_delay;
tmp.closing_wait = state->closing_wait;
tmp.custom_divisor = state->custom_divisor;
- unlock_kernel();
+ tty_unlock();
if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
return -EFAULT;
return 0;
@@ -1100,14 +1100,14 @@ static int set_serial_info(struct async_struct * info,
if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
return -EFAULT;
- lock_kernel();
+ tty_lock();
state = info->state;
old_state = *state;
change_irq = new_serial.irq != state->irq;
change_port = (new_serial.port != state->port);
if(change_irq || change_port || (new_serial.xmit_fifo_size != state->xmit_fifo_size)) {
- unlock_kernel();
+ tty_unlock();
return -EINVAL;
}
@@ -1127,7 +1127,7 @@ static int set_serial_info(struct async_struct * info,
}
if (new_serial.baud_base < 9600) {
- unlock_kernel();
+ tty_unlock();
return -EINVAL;
}
@@ -1163,7 +1163,7 @@ check_and_exit:
}
} else
retval = startup(info);
- unlock_kernel();
+ tty_unlock();
return retval;
}
@@ -1528,6 +1528,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct async_struct * info = tty->driver_data;
unsigned long orig_jiffies, char_time;
+ int tty_was_locked = tty_locked();
int lsr;
if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent"))
@@ -1538,7 +1539,12 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
orig_jiffies = jiffies;
- lock_kernel();
+ /*
+ * tty_wait_until_sent is called from lots of places,
+ * with or without the BTM.
+ */
+ if (!tty_was_locked)
+ tty_lock();
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
@@ -1579,7 +1585,8 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
break;
}
__set_current_state(TASK_RUNNING);
- unlock_kernel();
+ if (!tty_was_locked)
+ tty_unlock();
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
#endif
@@ -1703,7 +1710,9 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
printk("block_til_ready blocking: ttys%d, count = %d\n",
info->line, state->count);
#endif
+ tty_unlock();
schedule();
+ tty_lock();
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&info->open_wait, &wait);
diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
index 555cd93c2ee..d5fa113afe3 100644
--- a/drivers/char/briq_panel.c
+++ b/drivers/char/briq_panel.c
@@ -67,15 +67,15 @@ static void set_led(char state)
static int briq_panel_open(struct inode *ino, struct file *filep)
{
- lock_kernel();
+ tty_lock();
/* enforce single access, vfd_is_open is protected by BKL */
if (vfd_is_open) {
- unlock_kernel();
+ tty_unlock();
return -EBUSY;
}
vfd_is_open = 1;
- unlock_kernel();
+ tty_unlock();
return 0;
}
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 89d871ef8c2..91917133ae0 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -23,6 +23,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/fs.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/list.h>
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 9824b416290..27aad942233 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -65,7 +65,6 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
-#include <linux/smp_lock.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/fcntl.h>
@@ -1608,7 +1607,7 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
* If the port is the middle of closing, bail out now
*/
if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) {
- wait_event_interruptible(info->port.close_wait,
+ wait_event_interruptible_tty(info->port.close_wait,
!(info->port.flags & ASYNC_CLOSING));
return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS;
}
@@ -1655,7 +1654,6 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
return; /* Just in case.... */
orig_jiffies = jiffies;
- lock_kernel();
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
@@ -1702,7 +1700,6 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
}
/* Run one more char cycle */
msleep_interruptible(jiffies_to_msecs(char_time * 5));
- unlock_kernel();
#ifdef CY_DEBUG_WAIT_UNTIL_SENT
printk(KERN_DEBUG "Clean (jiff=%lu)...done\n", jiffies);
#endif
@@ -1959,7 +1956,6 @@ static int cy_chars_in_buffer(struct tty_struct *tty)
int char_count;
__u32 tx_put, tx_get, tx_bufsize;
- lock_kernel();
tx_get = readl(&buf_ctrl->tx_get);
tx_put = readl(&buf_ctrl->tx_put);
tx_bufsize = readl(&buf_ctrl->tx_bufsize);
@@ -1971,7 +1967,6 @@ static int cy_chars_in_buffer(struct tty_struct *tty)
printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n",
info->line, info->xmit_cnt + char_count);
#endif
- unlock_kernel();
return info->xmit_cnt + char_count;
}
#endif /* Z_EXT_CHARS_IN_BUFFER */
@@ -2359,17 +2354,22 @@ cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty,
struct serial_struct __user *new_info)
{
struct serial_struct new_serial;
+ int ret;
if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
return -EFAULT;
+ mutex_lock(&info->port.mutex);
if (!capable(CAP_SYS_ADMIN)) {
if (new_serial.close_delay != info->port.close_delay ||
new_serial.baud_base != info->baud ||
(new_serial.flags & ASYNC_FLAGS &
~ASYNC_USR_MASK) !=
(info->port.flags & ASYNC_FLAGS & ~ASYNC_USR_MASK))
+ {
+ mutex_unlock(&info->port.mutex);
return -EPERM;
+ }
info->port.flags = (info->port.flags & ~ASYNC_USR_MASK) |
(new_serial.flags & ASYNC_USR_MASK);
info->baud = new_serial.baud_base;
@@ -2392,10 +2392,12 @@ cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty,
check_and_exit:
if (info->port.flags & ASYNC_INITIALIZED) {
cy_set_line_char(info, tty);
- return 0;
+ ret = 0;
} else {
- return cy_startup(info, tty);
+ ret = cy_startup(info, tty);
}
+ mutex_unlock(&info->port.mutex);
+ return ret;
} /* set_serial_info */
/*
@@ -2438,7 +2440,6 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
card = info->card;
- lock_kernel();
if (!cy_is_Z(card)) {
unsigned long flags;
int channel = info->line - card->first_line;
@@ -2478,7 +2479,6 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
((lstatus & C_RS_CTS) ? TIOCM_CTS : 0);
}
end:
- unlock_kernel();
return result;
} /* cy_tiomget */
@@ -2696,7 +2696,6 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
printk(KERN_DEBUG "cyc:cy_ioctl ttyC%d, cmd = %x arg = %lx\n",
info->line, cmd, arg);
#endif
- lock_kernel();
switch (cmd) {
case CYGETMON:
@@ -2817,7 +2816,6 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
default:
ret_val = -ENOIOCTLCMD;
}
- unlock_kernel();
#ifdef CY_DEBUG_OTHER
printk(KERN_DEBUG "cyc:cy_ioctl done\n");
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 6f5ffe1320f..d9df46aa0fb 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -36,7 +36,7 @@
#include <linux/ctype.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <linux/smp_lock.h>
+#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
@@ -2105,7 +2105,6 @@ static int pc_ioctl(struct tty_struct *tty, struct file *file,
break;
case DIGI_SETAW:
case DIGI_SETAF:
- lock_kernel();
if (cmd == DIGI_SETAW) {
/* Setup an event to indicate when the transmit
buffer empties */
@@ -2118,7 +2117,6 @@ static int pc_ioctl(struct tty_struct *tty, struct file *file,
if (tty->ldisc->ops->flush_buffer)
tty->ldisc->ops->flush_buffer(tty);
}
- unlock_kernel();
/* Fall Thru */
case DIGI_SETA:
if (copy_from_user(&ch->digiext, argp, sizeof(digi_t)))
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index e0249722d25..f953c96efc8 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -159,7 +159,7 @@ static void hangcheck_fire(unsigned long data)
if (hangcheck_dump_tasks) {
printk(KERN_CRIT "Hangcheck: Task state:\n");
#ifdef CONFIG_MAGIC_SYSRQ
- handle_sysrq('t', NULL);
+ handle_sysrq('t');
#endif /* CONFIG_MAGIC_SYSRQ */
}
if (hangcheck_reboot) {
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 35cca4c7fb1..3afd62e856e 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -194,7 +194,7 @@ static int __init hvc_console_setup(struct console *co, char *options)
return 0;
}
-static struct console hvc_con_driver = {
+static struct console hvc_console = {
.name = "hvc",
.write = hvc_console_print,
.device = hvc_console_device,
@@ -220,7 +220,7 @@ static struct console hvc_con_driver = {
*/
static int __init hvc_console_init(void)
{
- register_console(&hvc_con_driver);
+ register_console(&hvc_console);
return 0;
}
console_initcall(hvc_console_init);
@@ -276,8 +276,8 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
* now (setup won't fail at this point). It's ok to just
* call register again if previously .setup failed.
*/
- if (index == hvc_con_driver.index)
- register_console(&hvc_con_driver);
+ if (index == hvc_console.index)
+ register_console(&hvc_console);
return 0;
}
@@ -641,7 +641,7 @@ int hvc_poll(struct hvc_struct *hp)
}
for (i = 0; i < n; ++i) {
#ifdef CONFIG_MAGIC_SYSRQ
- if (hp->index == hvc_con_driver.index) {
+ if (hp->index == hvc_console.index) {
/* Handle the SysRq Hack */
/* XXX should support a sequence */
if (buf[i] == '\x0f') { /* ^O */
@@ -651,7 +651,7 @@ int hvc_poll(struct hvc_struct *hp)
if (sysrq_pressed)
continue;
} else if (sysrq_pressed) {
- handle_sysrq(buf[i], tty);
+ handle_sysrq(buf[i]);
sysrq_pressed = 0;
continue;
}
@@ -909,7 +909,7 @@ static void __exit hvc_exit(void)
tty_unregister_driver(hvc_driver);
/* return tty_struct instances allocated in hvc_init(). */
put_tty_driver(hvc_driver);
- unregister_console(&hvc_con_driver);
+ unregister_console(&hvc_console);
}
}
module_exit(hvc_exit);
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 5a80ad68ef2..7b01bc609de 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -1149,7 +1149,7 @@ out_err:
* Note: If it is called early in the boot process, @val is stored and
* parsed later in hvc_iucv_init().
*/
-static int param_set_vmidfilter(const char *val, struct kernel_param *kp)
+static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
{
int rc;
@@ -1176,7 +1176,7 @@ static int param_set_vmidfilter(const char *val, struct kernel_param *kp)
* The function stores the filter as a comma-separated list of z/VM user IDs
* in @buffer. Typically, sysfs routines call this function for attr show.
*/
-static int param_get_vmidfilter(char *buffer, struct kernel_param *kp)
+static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
{
int rc;
size_t index, len;
@@ -1203,6 +1203,11 @@ static int param_get_vmidfilter(char *buffer, struct kernel_param *kp)
#define param_check_vmidfilter(name, p) __param_check(name, p, void)
+static struct kernel_param_ops param_ops_vmidfilter = {
+ .set = param_set_vmidfilter,
+ .get = param_get_vmidfilter,
+};
+
/**
* hvc_iucv_init() - z/VM IUCV HVC device driver initialization
*/
diff --git a/drivers/char/hvc_tile.c b/drivers/char/hvc_tile.c
new file mode 100644
index 00000000000..c4efb55cbc0
--- /dev/null
+++ b/drivers/char/hvc_tile.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * Tilera TILE Processor hypervisor console
+ */
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#include <hv/hypervisor.h>
+
+#include "hvc_console.h"
+
+static int hvc_tile_put_chars(uint32_t vt, const char *buf, int count)
+{
+ return hv_console_write((HV_VirtAddr)buf, count);
+}
+
+static int hvc_tile_get_chars(uint32_t vt, char *buf, int count)
+{
+ int i, c;
+
+ for (i = 0; i < count; ++i) {
+ c = hv_console_read_if_ready();
+ if (c < 0)
+ break;
+ buf[i] = c;
+ }
+
+ return i;
+}
+
+static const struct hv_ops hvc_tile_get_put_ops = {
+ .get_chars = hvc_tile_get_chars,
+ .put_chars = hvc_tile_put_chars,
+};
+
+static int __init hvc_tile_console_init(void)
+{
+ extern void disable_early_printk(void);
+ hvc_instantiate(0, 0, &hvc_tile_get_put_ops);
+ add_preferred_console("hvc", 0, NULL);
+ disable_early_printk();
+ return 0;
+}
+console_initcall(hvc_tile_console_init);
+
+static int __init hvc_tile_init(void)
+{
+ hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128);
+ return 0;
+}
+device_initcall(hvc_tile_init);
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index d4b14ff1c4c..a2bc885ce60 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -403,7 +403,7 @@ static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
hp->sysrq = 1;
continue;
} else if (hp->sysrq) {
- handle_sysrq(c, hp->tty);
+ handle_sysrq(c);
hp->sysrq = 0;
continue;
}
@@ -1255,7 +1255,7 @@ static int __init hvsi_console_setup(struct console *console, char *options)
return 0;
}
-static struct console hvsi_con_driver = {
+static struct console hvsi_console = {
.name = "hvsi",
.write = hvsi_console_print,
.device = hvsi_console_device,
@@ -1308,7 +1308,7 @@ static int __init hvsi_console_init(void)
}
if (hvsi_count)
- register_console(&hvsi_con_driver);
+ register_console(&hvsi_console);
return 0;
}
console_initcall(hvsi_console_init);
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 101d5f23554..a3f5e381e74 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -387,7 +387,7 @@ static int n2rng_init_control(struct n2rng *np)
static int n2rng_data_read(struct hwrng *rng, u32 *data)
{
- struct n2rng *np = rng->priv;
+ struct n2rng *np = (struct n2rng *) rng->priv;
unsigned long ra = __pa(&np->test_data);
int len;
@@ -619,7 +619,7 @@ static void __devinit n2rng_driver_version(void)
pr_info("%s", version);
}
-static int __devinit n2rng_probe(struct of_device *op,
+static int __devinit n2rng_probe(struct platform_device *op,
const struct of_device_id *match)
{
int victoria_falls = (match->data != NULL);
@@ -714,7 +714,7 @@ out:
return err;
}
-static int __devexit n2rng_remove(struct of_device *op)
+static int __devexit n2rng_remove(struct platform_device *op)
{
struct n2rng *np = dev_get_drvdata(&op->dev);
@@ -762,12 +762,12 @@ static struct of_platform_driver n2rng_driver = {
static int __init n2rng_init(void)
{
- return of_register_driver(&n2rng_driver, &of_bus_type);
+ return of_register_platform_driver(&n2rng_driver);
}
static void __exit n2rng_exit(void)
{
- of_unregister_driver(&n2rng_driver);
+ of_unregister_platform_driver(&n2rng_driver);
}
module_init(n2rng_init);
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
index a2b81e7bfc1..4bea07f3097 100644
--- a/drivers/char/hw_random/n2rng.h
+++ b/drivers/char/hw_random/n2rng.h
@@ -65,7 +65,7 @@ struct n2rng_unit {
};
struct n2rng {
- struct of_device *op;
+ struct platform_device *op;
unsigned long flags;
#define N2RNG_FLAG_VF 0x00000001 /* Victoria Falls RNG, else N2 */
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 261ba8f22b8..a31c830ca8c 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -94,7 +94,7 @@ static struct hwrng pasemi_rng = {
.data_read = pasemi_rng_data_read,
};
-static int __devinit rng_probe(struct of_device *ofdev,
+static int __devinit rng_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
void __iomem *rng_regs;
@@ -123,7 +123,7 @@ static int __devinit rng_probe(struct of_device *ofdev,
return err;
}
-static int __devexit rng_remove(struct of_device *dev)
+static int __devexit rng_remove(struct platform_device *dev)
{
void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv;
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 911e1da6def..d4b71e8d0d2 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -1486,7 +1486,9 @@ ip2_open( PTTY tty, struct file *pFile )
if ( tty_hung_up_p(pFile) || ( pCh->flags & ASYNC_CLOSING )) {
if ( pCh->flags & ASYNC_CLOSING ) {
+ tty_unlock();
schedule();
+ tty_lock();
}
if ( tty_hung_up_p(pFile) ) {
set_current_state( TASK_RUNNING );
@@ -1548,7 +1550,9 @@ ip2_open( PTTY tty, struct file *pFile )
rc = (( pCh->flags & ASYNC_HUP_NOTIFY ) ? -EAGAIN : -ERESTARTSYS);
break;
}
+ tty_unlock();
schedule();
+ tty_lock();
}
set_current_state( TASK_RUNNING );
remove_wait_queue(&pCh->open_wait, &wait);
@@ -1646,7 +1650,7 @@ ip2_close( PTTY tty, struct file *pFile )
/* disable DSS reporting */
i2QueueCommands(PTYPE_INLINE, pCh, 100, 4,
CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP);
- if ( !tty || (tty->termios->c_cflag & HUPCL) ) {
+ if (tty->termios->c_cflag & HUPCL) {
i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN);
pCh->dataSetOut &= ~(I2_DTR | I2_RTS);
i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25));
@@ -2926,6 +2930,8 @@ ip2_ipl_ioctl (struct file *pFile, UINT cmd, ULONG arg )
if ( pCh )
{
rc = copy_to_user(argp, pCh, sizeof(i2ChanStr));
+ if (rc)
+ rc = -EFAULT;
} else {
rc = -ENODEV;
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 094bdc355b1..7bd7c45b53e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -305,6 +305,9 @@ static int num_force_kipmid;
#ifdef CONFIG_PCI
static int pci_registered;
#endif
+#ifdef CONFIG_ACPI
+static int pnp_registered;
+#endif
#ifdef CONFIG_PPC_OF
static int of_registered;
#endif
@@ -1804,9 +1807,12 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
info->irq_setup = std_irq_setup;
info->slave_addr = ipmb;
- if (!add_smi(info))
+ if (!add_smi(info)) {
if (try_smi_init(info))
cleanup_one_si(info);
+ } else {
+ kfree(info);
+ }
} else {
/* remove */
struct smi_info *e, *tmp_e;
@@ -1890,9 +1896,12 @@ static __devinit void hardcode_find_bmc(void)
info->irq_setup = std_irq_setup;
info->slave_addr = slave_addrs[i];
- if (!add_smi(info))
+ if (!add_smi(info)) {
if (try_smi_init(info))
cleanup_one_si(info);
+ } else {
+ kfree(info);
+ }
}
}
@@ -1965,8 +1974,8 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
/*
* Defined at
- * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/
- * Docs/TechPapers/IA64/hpspmi.pdf
+ * http://h21007.www2.hp.com/portal/download/files
+ * /unprot/hpspmi.pdf
*/
struct SPMITable {
s8 Signature[4];
@@ -2013,18 +2022,12 @@ struct SPMITable {
static __devinit int try_init_spmi(struct SPMITable *spmi)
{
struct smi_info *info;
- u8 addr_space;
if (spmi->IPMIlegacy != 1) {
printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
return -ENODEV;
}
- if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- addr_space = IPMI_MEM_ADDR_SPACE;
- else
- addr_space = IPMI_IO_ADDR_SPACE;
-
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
@@ -2088,7 +2091,13 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
}
info->io.addr_data = spmi->addr.address;
- add_smi(info);
+ pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ info->io.addr_data, info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ if (add_smi(info))
+ kfree(info);
return 0;
}
@@ -2120,7 +2129,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
{
struct acpi_device *acpi_dev;
struct smi_info *info;
- struct resource *res;
+ struct resource *res, *res_second;
acpi_handle handle;
acpi_status status;
unsigned long long tmp;
@@ -2176,6 +2185,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
info->io.addr_data = res->start;
info->io.regspacing = DEFAULT_REGSPACING;
+ res_second = pnp_get_resource(dev,
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
+ IORESOURCE_IO : IORESOURCE_MEM,
+ 1);
+ if (res_second) {
+ if (res_second->start > info->io.addr_data)
+ info->io.regspacing = res_second->start - info->io.addr_data;
+ }
info->io.regsize = DEFAULT_REGSPACING;
info->io.regshift = 0;
@@ -2196,7 +2213,10 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
res, info->io.regsize, info->io.regspacing,
info->irq);
- return add_smi(info);
+ if (add_smi(info))
+ goto err_free;
+
+ return 0;
err_free:
kfree(info);
@@ -2354,7 +2374,13 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
if (info->irq)
info->irq_setup = std_irq_setup;
- add_smi(info);
+ pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ info->io.addr_data, info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ if (add_smi(info))
+ kfree(info);
}
static void __devinit dmi_find_bmc(void)
@@ -2460,7 +2486,10 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
&pdev->resource[0], info->io.regsize, info->io.regspacing,
info->irq);
- return add_smi(info);
+ if (add_smi(info))
+ kfree(info);
+
+ return 0;
}
static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
@@ -2502,7 +2531,7 @@ static struct pci_driver ipmi_pci_driver = {
#ifdef CONFIG_PPC_OF
-static int __devinit ipmi_of_probe(struct of_device *dev,
+static int __devinit ipmi_of_probe(struct platform_device *dev,
const struct of_device_id *match)
{
struct smi_info *info;
@@ -2573,10 +2602,15 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
dev_set_drvdata(&dev->dev, info);
- return add_smi(info);
+ if (add_smi(info)) {
+ kfree(info);
+ return -EBUSY;
+ }
+
+ return 0;
}
-static int __devexit ipmi_of_remove(struct of_device *dev)
+static int __devexit ipmi_of_remove(struct platform_device *dev)
{
cleanup_one_si(dev_get_drvdata(&dev->dev));
return 0;
@@ -3006,6 +3040,8 @@ static __devinit void default_find_bmc(void)
info->io.addr_data);
} else
cleanup_one_si(info);
+ } else {
+ kfree(info);
}
}
}
@@ -3033,7 +3069,7 @@ static int add_smi(struct smi_info *new_smi)
si_to_str[new_smi->si_type]);
mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
- printk(KERN_CONT PFX "duplicate interface\n");
+ printk(KERN_CONT " duplicate interface\n");
rv = -EBUSY;
goto out_err;
}
@@ -3326,6 +3362,7 @@ static __devinit int init_ipmi_si(void)
#ifdef CONFIG_ACPI
pnp_register_driver(&ipmi_pnp_driver);
+ pnp_registered = 1;
#endif
#ifdef CONFIG_DMI
@@ -3493,7 +3530,8 @@ static __exit void cleanup_ipmi_si(void)
pci_unregister_driver(&ipmi_pci_driver);
#endif
#ifdef CONFIG_ACPI
- pnp_unregister_driver(&ipmi_pnp_driver);
+ if (pnp_registered)
+ pnp_unregister_driver(&ipmi_pnp_driver);
#endif
#ifdef CONFIG_PPC_OF
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 82bcdb262a3..654d566ca57 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -196,7 +196,7 @@ static void ipmi_unregister_watchdog(int ipmi_intf);
*/
static int start_now;
-static int set_param_int(const char *val, struct kernel_param *kp)
+static int set_param_timeout(const char *val, const struct kernel_param *kp)
{
char *endp;
int l;
@@ -215,10 +215,11 @@ static int set_param_int(const char *val, struct kernel_param *kp)
return rv;
}
-static int get_param_int(char *buffer, struct kernel_param *kp)
-{
- return sprintf(buffer, "%i", *((int *)kp->arg));
-}
+static struct kernel_param_ops param_ops_timeout = {
+ .set = set_param_timeout,
+ .get = param_get_int,
+};
+#define param_check_timeout param_check_int
typedef int (*action_fn)(const char *intval, char *outval);
@@ -227,7 +228,7 @@ static int preaction_op(const char *inval, char *outval);
static int preop_op(const char *inval, char *outval);
static void check_parms(void);
-static int set_param_str(const char *val, struct kernel_param *kp)
+static int set_param_str(const char *val, const struct kernel_param *kp)
{
action_fn fn = (action_fn) kp->arg;
int rv = 0;
@@ -251,7 +252,7 @@ static int set_param_str(const char *val, struct kernel_param *kp)
return rv;
}
-static int get_param_str(char *buffer, struct kernel_param *kp)
+static int get_param_str(char *buffer, const struct kernel_param *kp)
{
action_fn fn = (action_fn) kp->arg;
int rv;
@@ -263,7 +264,7 @@ static int get_param_str(char *buffer, struct kernel_param *kp)
}
-static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
+static int set_param_wdog_ifnum(const char *val, const struct kernel_param *kp)
{
int rv = param_set_int(val, kp);
if (rv)
@@ -276,27 +277,38 @@ static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
return 0;
}
-module_param_call(ifnum_to_use, set_param_wdog_ifnum, get_param_int,
- &ifnum_to_use, 0644);
+static struct kernel_param_ops param_ops_wdog_ifnum = {
+ .set = set_param_wdog_ifnum,
+ .get = param_get_int,
+};
+
+#define param_check_wdog_ifnum param_check_int
+
+static struct kernel_param_ops param_ops_str = {
+ .set = set_param_str,
+ .get = get_param_str,
+};
+
+module_param(ifnum_to_use, wdog_ifnum, 0644);
MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
"timer. Setting to -1 defaults to the first registered "
"interface");
-module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644);
+module_param(timeout, timeout, 0644);
MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
-module_param_call(pretimeout, set_param_int, get_param_int, &pretimeout, 0644);
+module_param(pretimeout, timeout, 0644);
MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
-module_param_call(action, set_param_str, get_param_str, action_op, 0644);
+module_param_cb(action, &param_ops_str, action_op, 0644);
MODULE_PARM_DESC(action, "Timeout action. One of: "
"reset, none, power_cycle, power_off.");
-module_param_call(preaction, set_param_str, get_param_str, preaction_op, 0644);
+module_param_cb(preaction, &param_ops_str, preaction_op, 0644);
MODULE_PARM_DESC(preaction, "Pretimeout action. One of: "
"pre_none, pre_smi, pre_nmi, pre_int.");
-module_param_call(preop, set_param_str, get_param_str, preop_op, 0644);
+module_param_cb(preop, &param_ops_str, preop_op, 0644);
MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: "
"preop_none, preop_panic, preop_give_data.");
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 98310e1aae3..c27e9d21fea 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -124,7 +124,6 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/serial.h>
-#include <linux/smp_lock.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
@@ -872,7 +871,6 @@ static struct tty_port *isicom_find_port(struct tty_struct *tty)
static int isicom_open(struct tty_struct *tty, struct file *filp)
{
struct isi_port *port;
- struct isi_board *card;
struct tty_port *tport;
tport = isicom_find_port(tty);
@@ -1118,8 +1116,7 @@ static int isicom_set_serial_info(struct tty_struct *tty,
if (copy_from_user(&newinfo, info, sizeof(newinfo)))
return -EFAULT;
- lock_kernel();
-
+ mutex_lock(&port->port.mutex);
reconfig_port = ((port->port.flags & ASYNC_SPD_MASK) !=
(newinfo.flags & ASYNC_SPD_MASK));
@@ -1128,7 +1125,7 @@ static int isicom_set_serial_info(struct tty_struct *tty,
(newinfo.closing_wait != port->port.closing_wait) ||
((newinfo.flags & ~ASYNC_USR_MASK) !=
(port->port.flags & ~ASYNC_USR_MASK))) {
- unlock_kernel();
+ mutex_unlock(&port->port.mutex);
return -EPERM;
}
port->port.flags = ((port->port.flags & ~ASYNC_USR_MASK) |
@@ -1145,7 +1142,7 @@ static int isicom_set_serial_info(struct tty_struct *tty,
isicom_config_port(tty);
spin_unlock_irqrestore(&port->card->card_lock, flags);
}
- unlock_kernel();
+ mutex_unlock(&port->port.mutex);
return 0;
}
@@ -1154,7 +1151,7 @@ static int isicom_get_serial_info(struct isi_port *port,
{
struct serial_struct out_info;
- lock_kernel();
+ mutex_lock(&port->port.mutex);
memset(&out_info, 0, sizeof(out_info));
/* out_info.type = ? */
out_info.line = port - isi_ports;
@@ -1164,7 +1161,7 @@ static int isicom_get_serial_info(struct isi_port *port,
/* out_info.baud_base = ? */
out_info.close_delay = port->port.close_delay;
out_info.closing_wait = port->port.closing_wait;
- unlock_kernel();
+ mutex_unlock(&port->port.mutex);
if (copy_to_user(info, &out_info, sizeof(out_info)))
return -EFAULT;
return 0;
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 4e395c956a0..be28391adb7 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -203,9 +203,9 @@ static int stli_shared;
* the board has been detected, and whether it is actually running a slave
* or not.
*/
-#define BST_FOUND 0x1
-#define BST_STARTED 0x2
-#define BST_PROBED 0x4
+#define BST_FOUND 0
+#define BST_STARTED 1
+#define BST_PROBED 2
/*
* Define the set of port state flags. These are marked for internal
@@ -816,7 +816,7 @@ static int stli_open(struct tty_struct *tty, struct file *filp)
brdp = stli_brds[brdnr];
if (brdp == NULL)
return -ENODEV;
- if ((brdp->state & BST_STARTED) == 0)
+ if (!test_bit(BST_STARTED, &brdp->state))
return -ENODEV;
portnr = MINOR2PORT(minordev);
if (portnr > brdp->nrports)
@@ -954,7 +954,7 @@ static int stli_rawopen(struct stlibrd *brdp, struct stliport *portp, unsigned l
* order of opens and closes may not be preserved across shared
* memory, so we must wait until it is complete.
*/
- wait_event_interruptible(portp->raw_wait,
+ wait_event_interruptible_tty(portp->raw_wait,
!test_bit(ST_CLOSING, &portp->state));
if (signal_pending(current)) {
return -ERESTARTSYS;
@@ -989,7 +989,7 @@ static int stli_rawopen(struct stlibrd *brdp, struct stliport *portp, unsigned l
set_bit(ST_OPENING, &portp->state);
spin_unlock_irqrestore(&brd_lock, flags);
- wait_event_interruptible(portp->raw_wait,
+ wait_event_interruptible_tty(portp->raw_wait,
!test_bit(ST_OPENING, &portp->state));
if (signal_pending(current))
rc = -ERESTARTSYS;
@@ -1020,7 +1020,7 @@ static int stli_rawclose(struct stlibrd *brdp, struct stliport *portp, unsigned
* occurs on this port.
*/
if (wait) {
- wait_event_interruptible(portp->raw_wait,
+ wait_event_interruptible_tty(portp->raw_wait,
!test_bit(ST_CLOSING, &portp->state));
if (signal_pending(current)) {
return -ERESTARTSYS;
@@ -1052,7 +1052,7 @@ static int stli_rawclose(struct stlibrd *brdp, struct stliport *portp, unsigned
* to come back.
*/
rc = 0;
- wait_event_interruptible(portp->raw_wait,
+ wait_event_interruptible_tty(portp->raw_wait,
!test_bit(ST_CLOSING, &portp->state));
if (signal_pending(current))
rc = -ERESTARTSYS;
@@ -1073,6 +1073,10 @@ static int stli_rawclose(struct stlibrd *brdp, struct stliport *portp, unsigned
static int stli_cmdwait(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback)
{
+ /*
+ * no need for wait_event_tty because clearing ST_CMDING cannot block
+ * on BTM
+ */
wait_event_interruptible(portp->raw_wait,
!test_bit(ST_CMDING, &portp->state));
if (signal_pending(current))
@@ -1846,7 +1850,7 @@ static void stli_portinfo(struct seq_file *m, struct stlibrd *brdp, struct stlip
rc = stli_portcmdstats(NULL, portp);
uart = "UNKNOWN";
- if (brdp->state & BST_STARTED) {
+ if (test_bit(BST_STARTED, &brdp->state)) {
switch (stli_comstats.hwid) {
case 0: uart = "2681"; break;
case 1: uart = "SC26198"; break;
@@ -1855,7 +1859,7 @@ static void stli_portinfo(struct seq_file *m, struct stlibrd *brdp, struct stlip
}
seq_printf(m, "%d: uart:%s ", portnr, uart);
- if ((brdp->state & BST_STARTED) && (rc >= 0)) {
+ if (test_bit(BST_STARTED, &brdp->state) && rc >= 0) {
char sep;
seq_printf(m, "tx:%d rx:%d", (int) stli_comstats.txtotal,
@@ -2355,7 +2359,7 @@ static void stli_poll(unsigned long arg)
brdp = stli_brds[brdnr];
if (brdp == NULL)
continue;
- if ((brdp->state & BST_STARTED) == 0)
+ if (!test_bit(BST_STARTED, &brdp->state))
continue;
spin_lock(&brd_lock);
@@ -3140,7 +3144,7 @@ static int stli_initecp(struct stlibrd *brdp)
}
- brdp->state |= BST_FOUND;
+ set_bit(BST_FOUND, &brdp->state);
return 0;
err_unmap:
iounmap(brdp->membase);
@@ -3297,7 +3301,7 @@ static int stli_initonb(struct stlibrd *brdp)
brdp->panels[0] = brdp->nrports;
- brdp->state |= BST_FOUND;
+ set_bit(BST_FOUND, &brdp->state);
return 0;
err_unmap:
iounmap(brdp->membase);
@@ -3407,7 +3411,7 @@ stli_donestartup:
spin_unlock_irqrestore(&brd_lock, flags);
if (rc == 0)
- brdp->state |= BST_STARTED;
+ set_bit(BST_STARTED, &brdp->state);
if (! stli_timeron) {
stli_timeron++;
@@ -3710,7 +3714,7 @@ static int __devinit stli_pciprobe(struct pci_dev *pdev,
if (retval)
goto err_null;
- brdp->state |= BST_PROBED;
+ set_bit(BST_PROBED, &brdp->state);
pci_set_drvdata(pdev, brdp);
EBRDENABLE(brdp);
@@ -3841,7 +3845,7 @@ static int __init stli_initbrds(void)
brdp = stli_brds[i];
if (brdp == NULL)
continue;
- if (brdp->state & BST_FOUND) {
+ if (test_bit(BST_FOUND, &brdp->state)) {
EBRDENABLE(brdp);
brdp->enable = NULL;
brdp->disable = NULL;
@@ -4011,6 +4015,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
return -ENODEV;
memset(&stli_brdstats, 0, sizeof(combrd_t));
+
stli_brdstats.brd = brdp->brdnr;
stli_brdstats.type = brdp->brdtype;
stli_brdstats.hwid = 0;
@@ -4076,10 +4081,13 @@ static int stli_portcmdstats(struct tty_struct *tty, struct stliport *portp)
if (brdp == NULL)
return -ENODEV;
- if (brdp->state & BST_STARTED) {
+ mutex_lock(&portp->port.mutex);
+ if (test_bit(BST_STARTED, &brdp->state)) {
if ((rc = stli_cmdwait(brdp, portp, A_GETSTATS,
- &stli_cdkstats, sizeof(asystats_t), 1)) < 0)
+ &stli_cdkstats, sizeof(asystats_t), 1)) < 0) {
+ mutex_unlock(&portp->port.mutex);
return rc;
+ }
} else {
memset(&stli_cdkstats, 0, sizeof(asystats_t));
}
@@ -4124,6 +4132,7 @@ static int stli_portcmdstats(struct tty_struct *tty, struct stliport *portp)
stli_comstats.modem = stli_cdkstats.dcdcnt;
stli_comstats.hwid = stli_cdkstats.hwid;
stli_comstats.signals = stli_mktiocm(stli_cdkstats.signals);
+ mutex_unlock(&portp->port.mutex);
return 0;
}
@@ -4186,15 +4195,20 @@ static int stli_clrportstats(struct stliport *portp, comstats_t __user *cp)
if (!brdp)
return -ENODEV;
- if (brdp->state & BST_STARTED) {
- if ((rc = stli_cmdwait(brdp, portp, A_CLEARSTATS, NULL, 0, 0)) < 0)
+ mutex_lock(&portp->port.mutex);
+
+ if (test_bit(BST_STARTED, &brdp->state)) {
+ if ((rc = stli_cmdwait(brdp, portp, A_CLEARSTATS, NULL, 0, 0)) < 0) {
+ mutex_unlock(&portp->port.mutex);
return rc;
+ }
}
memset(&stli_comstats, 0, sizeof(comstats_t));
stli_comstats.brd = portp->brdnr;
stli_comstats.panel = portp->panelnr;
stli_comstats.port = portp->portnr;
+ mutex_unlock(&portp->port.mutex);
if (copy_to_user(cp, &stli_comstats, sizeof(comstats_t)))
return -EFAULT;
@@ -4266,8 +4280,6 @@ static long stli_memioctl(struct file *fp, unsigned int cmd, unsigned long arg)
done = 0;
rc = 0;
- lock_kernel();
-
switch (cmd) {
case COM_GETPORTSTATS:
rc = stli_getportstats(NULL, NULL, argp);
@@ -4290,8 +4302,6 @@ static long stli_memioctl(struct file *fp, unsigned int cmd, unsigned long arg)
done++;
break;
}
- unlock_kernel();
-
if (done)
return rc;
@@ -4308,8 +4318,6 @@ static long stli_memioctl(struct file *fp, unsigned int cmd, unsigned long arg)
if (brdp->state == 0)
return -ENODEV;
- lock_kernel();
-
switch (cmd) {
case STL_BINTR:
EBRDINTR(brdp);
@@ -4318,10 +4326,10 @@ static long stli_memioctl(struct file *fp, unsigned int cmd, unsigned long arg)
rc = stli_startbrd(brdp);
break;
case STL_BSTOP:
- brdp->state &= ~BST_STARTED;
+ clear_bit(BST_STARTED, &brdp->state);
break;
case STL_BRESET:
- brdp->state &= ~BST_STARTED;
+ clear_bit(BST_STARTED, &brdp->state);
EBRDRESET(brdp);
if (stli_shared == 0) {
if (brdp->reenable != NULL)
@@ -4332,7 +4340,6 @@ static long stli_memioctl(struct file *fp, unsigned int cmd, unsigned long arg)
rc = -ENOIOCTLCMD;
break;
}
- unlock_kernel();
return rc;
}
@@ -4378,7 +4385,8 @@ static void istallion_cleanup_isa(void)
unsigned int j;
for (j = 0; (j < stli_nrbrds); j++) {
- if ((brdp = stli_brds[j]) == NULL || (brdp->state & BST_PROBED))
+ if ((brdp = stli_brds[j]) == NULL ||
+ test_bit(BST_PROBED, &brdp->state))
continue;
stli_cleanup_ports(brdp);
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 25be2102a60..a7ca75212bf 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -299,7 +299,7 @@ int kbd_rate(struct kbd_repeat *rep)
*/
static void put_queue(struct vc_data *vc, int ch)
{
- struct tty_struct *tty = vc->vc_tty;
+ struct tty_struct *tty = vc->port.tty;
if (tty) {
tty_insert_flip_char(tty, ch, 0);
@@ -309,7 +309,7 @@ static void put_queue(struct vc_data *vc, int ch)
static void puts_queue(struct vc_data *vc, char *cp)
{
- struct tty_struct *tty = vc->vc_tty;
+ struct tty_struct *tty = vc->port.tty;
if (!tty)
return;
@@ -485,7 +485,7 @@ static void fn_show_ptregs(struct vc_data *vc)
static void fn_hold(struct vc_data *vc)
{
- struct tty_struct *tty = vc->vc_tty;
+ struct tty_struct *tty = vc->port.tty;
if (rep || !tty)
return;
@@ -563,7 +563,7 @@ static void fn_inc_console(struct vc_data *vc)
static void fn_send_intr(struct vc_data *vc)
{
- struct tty_struct *tty = vc->vc_tty;
+ struct tty_struct *tty = vc->port.tty;
if (!tty)
return;
@@ -1162,7 +1162,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
struct keyboard_notifier_param param = { .vc = vc, .value = keycode, .down = down };
int rc;
- tty = vc->vc_tty;
+ tty = vc->port.tty;
if (tty && (!tty->driver_data)) {
/* No driver data? Strange. Okay we fix it then. */
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index f54dab8acdc..1f528fad351 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -788,10 +788,11 @@ static const struct file_operations zero_fops = {
/*
* capabilities for /dev/zero
* - permits private mappings, "copies" are taken of the source of zeros
+ * - no writeback happens
*/
static struct backing_dev_info zero_bdi = {
.name = "char/mem",
- .capabilities = BDI_CAP_MAP_COPY,
+ .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static const struct file_operations full_fops = {
@@ -916,7 +917,7 @@ static int __init chr_dev_init(void)
NULL, devlist[minor].name);
}
- return 0;
+ return tty_init();
}
fs_initcall(chr_dev_init);
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index cd650ca8c67..abdafd48898 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -242,7 +242,7 @@ int misc_deregister(struct miscdevice *misc)
{
int i = DYNAMIC_MINORS - misc->minor - 1;
- if (list_empty(&misc->list))
+ if (WARN_ON(list_empty(&misc->list)))
return -EINVAL;
mutex_lock(&misc_mtx);
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index d2692d443f7..3fc89da856a 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -2193,7 +2193,7 @@ static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port
port->mon_data.up_txcnt += (cnt - port->xmit_cnt);
port->icount.tx += (cnt - port->xmit_cnt);
- if (port->xmit_cnt < WAKEUP_CHARS && tty)
+ if (port->xmit_cnt < WAKEUP_CHARS)
tty_wakeup(tty);
if (port->xmit_cnt <= 0) {
diff --git a/drivers/char/n_gsm.c b/drivers/char/n_gsm.c
index e4089c432f1..04ef3ef0a42 100644
--- a/drivers/char/n_gsm.c
+++ b/drivers/char/n_gsm.c
@@ -43,7 +43,6 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
-#include <linux/timer.h>
#include <linux/ctype.h>
#include <linux/mm.h>
#include <linux/string.h>
@@ -920,7 +919,7 @@ static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
else
len = gsm_dlci_data_output_framed(gsm, dlci);
if (len < 0)
- return;
+ break;
/* DLCI empty - try the next */
if (len == 0)
i++;
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index c68118efad8..47d32281032 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -598,18 +598,18 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
return -EFAULT;
}
- lock_kernel();
+ tty_lock();
for (;;) {
if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
- unlock_kernel();
+ tty_unlock();
return -EIO;
}
n_hdlc = tty2n_hdlc (tty);
if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC ||
tty != n_hdlc->tty) {
- unlock_kernel();
+ tty_unlock();
return 0;
}
@@ -619,13 +619,13 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
/* no data */
if (file->f_flags & O_NONBLOCK) {
- unlock_kernel();
+ tty_unlock();
return -EAGAIN;
}
interruptible_sleep_on (&tty->read_wait);
if (signal_pending(current)) {
- unlock_kernel();
+ tty_unlock();
return -EINTR;
}
}
@@ -648,7 +648,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
kfree(rbuf);
else
n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,rbuf);
- unlock_kernel();
+ tty_unlock();
return ret;
} /* end of n_hdlc_tty_read() */
@@ -691,7 +691,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
count = maxframe;
}
- lock_kernel();
+ tty_lock();
add_wait_queue(&tty->write_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
@@ -731,7 +731,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf);
n_hdlc_send_frames(n_hdlc,tty);
}
- unlock_kernel();
+ tty_unlock();
return error;
} /* end of n_hdlc_tty_write() */
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index c1d8b54c816..a98290d7a2c 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -1067,7 +1067,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
TRACE_L("read()");
- lock_kernel();
+ tty_lock();
pClient = findClient(pInfo, task_pid(current));
if (pClient) {
@@ -1079,7 +1079,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
goto unlock;
}
/* block until there is a message: */
- wait_event_interruptible(pInfo->read_wait,
+ wait_event_interruptible_tty(pInfo->read_wait,
(pMsg = remove_msg(pInfo, pClient)));
}
@@ -1109,7 +1109,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
}
ret = -EPERM;
unlock:
- unlock_kernel();
+ tty_unlock();
return ret;
}
@@ -1158,7 +1158,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
pHeader->locks = 0;
pHeader->owner = NULL;
- lock_kernel();
+ tty_lock();
pClient = findClient(pInfo, task_pid(current));
if (pClient) {
@@ -1177,7 +1177,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
add_tx_queue(pInfo, pHeader);
trigger_transmit(pInfo);
- unlock_kernel();
+ tty_unlock();
return 0;
}
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index bdae8327143..428f4fe0b5f 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -1102,6 +1102,11 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
if (I_IUCLC(tty) && L_IEXTEN(tty))
c = tolower(c);
+ if (L_EXTPROC(tty)) {
+ put_tty_queue(c, tty);
+ return;
+ }
+
if (tty->stopped && !tty->flow_stopped && I_IXON(tty) &&
I_IXANY(tty) && c != START_CHAR(tty) && c != STOP_CHAR(tty) &&
c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) && c != SUSP_CHAR(tty)) {
@@ -1409,7 +1414,8 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
n_tty_set_room(tty);
- if (!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) {
+ if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) ||
+ L_EXTPROC(tty)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
if (waitqueue_active(&tty->read_wait))
wake_up_interruptible(&tty->read_wait);
@@ -1585,7 +1591,7 @@ static int n_tty_open(struct tty_struct *tty)
static inline int input_available_p(struct tty_struct *tty, int amt)
{
tty_flush_to_ldisc(tty);
- if (tty->icanon) {
+ if (tty->icanon && !L_EXTPROC(tty)) {
if (tty->canon_data)
return 1;
} else if (tty->read_cnt >= (amt ? amt : 1))
@@ -1632,6 +1638,11 @@ static int copy_from_read_buf(struct tty_struct *tty,
spin_lock_irqsave(&tty->read_lock, flags);
tty->read_tail = (tty->read_tail + n) & (N_TTY_BUF_SIZE-1);
tty->read_cnt -= n;
+ /* Turn single EOF into zero-length read */
+ if (L_EXTPROC(tty) && tty->icanon && n == 1) {
+ if (!tty->read_cnt && (*b)[n-1] == EOF_CHAR(tty))
+ n--;
+ }
spin_unlock_irqrestore(&tty->read_lock, flags);
*b += n;
*nr -= n;
@@ -1812,7 +1823,7 @@ do_it_again:
nr--;
}
- if (tty->icanon) {
+ if (tty->icanon && !L_EXTPROC(tty)) {
/* N.B. avoid overrun if nr == 0 */
while (nr && tty->read_cnt) {
int eol;
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index a6638003f53..817169cbb24 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1611,6 +1611,8 @@ static int ntty_install(struct tty_driver *driver, struct tty_struct *tty)
ret = tty_init_termios(tty);
if (ret == 0) {
tty_driver_kref_get(driver);
+ tty->count++;
+ tty->driver_data = port;
driver->ttys[tty->index] = tty;
}
return ret;
@@ -1639,7 +1641,7 @@ static int ntty_activate(struct tty_port *tport, struct tty_struct *tty)
static int ntty_open(struct tty_struct *tty, struct file *filp)
{
- struct port *port = get_port_by_tty(tty);
+ struct port *port = tty->driver_data;
return tty_port_open(&port->port, tty, filp);
}
@@ -1741,8 +1743,7 @@ static int ntty_write_room(struct tty_struct *tty)
if (dc) {
mutex_lock(&port->tty_sem);
if (port->port.count)
- room = port->fifo_ul.size -
- kfifo_len(&port->fifo_ul);
+ room = kfifo_avail(&port->fifo_ul);
mutex_unlock(&port->tty_sem);
}
return room;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index e7956acf2ad..ec73d9f6d9e 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -34,7 +34,6 @@
#include <linux/uaccess.h>
#include <linux/io.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -422,7 +421,7 @@ static struct card_fixup card_fixups[] = {
static void set_cardparameter(struct cm4000_dev *dev)
{
int i;
- unsigned int iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->resource[0]->start;
u_int8_t stopbits = 0x02; /* ISO default */
DEBUGP(3, dev, "-> set_cardparameter\n");
@@ -455,7 +454,7 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
unsigned short num_bytes_read;
unsigned char pts_reply[4];
ssize_t rc;
- unsigned int iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->resource[0]->start;
rc = 0;
@@ -664,7 +663,7 @@ static void terminate_monitor(struct cm4000_dev *dev)
static void monitor_card(unsigned long p)
{
struct cm4000_dev *dev = (struct cm4000_dev *) p;
- unsigned int iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->resource[0]->start;
unsigned short s;
struct ptsreq ptsreq;
int i, atrc;
@@ -925,7 +924,7 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
loff_t *ppos)
{
struct cm4000_dev *dev = filp->private_data;
- unsigned int iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->resource[0]->start;
ssize_t rc;
int i, j, k;
@@ -1048,7 +1047,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
struct cm4000_dev *dev = filp->private_data;
- unsigned int iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->resource[0]->start;
unsigned short s;
unsigned char tmp;
unsigned char infolen;
@@ -1401,7 +1400,7 @@ static void stop_monitor(struct cm4000_dev *dev)
static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct cm4000_dev *dev = filp->private_data;
- unsigned int iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->resource[0]->start;
struct inode *inode = filp->f_path.dentry->d_inode;
struct pcmcia_device *link;
int size;
@@ -1752,17 +1751,12 @@ static int cm4000_config_check(struct pcmcia_device *p_dev,
if (!cfg->io.nwin)
return -ENODEV;
- /* Get the IOaddr */
- p_dev->io.BasePort1 = cfg->io.win[0].base;
- p_dev->io.NumPorts1 = cfg->io.win[0].len;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(cfg->io.flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(cfg->io.flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK;
-
- return pcmcia_request_io(p_dev, &p_dev->io);
+ p_dev->resource[0]->start = cfg->io.win[0].base;
+ p_dev->resource[0]->end = cfg->io.win[0].len;
+ p_dev->resource[0]->flags |= pcmcia_io_cfg_data_width(cfg->io.flags);
+ p_dev->io_lines = cfg->io.flags & CISTPL_IO_LINES_MASK;
+
+ return pcmcia_request_io(p_dev);
}
static int cm4000_config(struct pcmcia_device * link, int devno)
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index c0775c844e0..815cde1d057 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -29,7 +29,6 @@
#include <asm/uaccess.h>
#include <asm/io.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -110,7 +109,7 @@ static inline unsigned char xinb(unsigned short port)
static void cm4040_do_poll(unsigned long dummy)
{
struct reader_dev *dev = (struct reader_dev *) dummy;
- unsigned int obs = xinb(dev->p_dev->io.BasePort1
+ unsigned int obs = xinb(dev->p_dev->resource[0]->start
+ REG_OFFSET_BUFFER_STATUS);
if ((obs & BSR_BULK_IN_FULL)) {
@@ -141,7 +140,7 @@ static void cm4040_stop_poll(struct reader_dev *dev)
static int wait_for_bulk_out_ready(struct reader_dev *dev)
{
int i, rc;
- int iobase = dev->p_dev->io.BasePort1;
+ int iobase = dev->p_dev->resource[0]->start;
for (i = 0; i < POLL_LOOP_COUNT; i++) {
if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS)
@@ -171,7 +170,7 @@ static int wait_for_bulk_out_ready(struct reader_dev *dev)
/* Write to Sync Control Register */
static int write_sync_reg(unsigned char val, struct reader_dev *dev)
{
- int iobase = dev->p_dev->io.BasePort1;
+ int iobase = dev->p_dev->resource[0]->start;
int rc;
rc = wait_for_bulk_out_ready(dev);
@@ -189,7 +188,7 @@ static int write_sync_reg(unsigned char val, struct reader_dev *dev)
static int wait_for_bulk_in_ready(struct reader_dev *dev)
{
int i, rc;
- int iobase = dev->p_dev->io.BasePort1;
+ int iobase = dev->p_dev->resource[0]->start;
for (i = 0; i < POLL_LOOP_COUNT; i++) {
if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS)
@@ -219,7 +218,7 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
struct reader_dev *dev = filp->private_data;
- int iobase = dev->p_dev->io.BasePort1;
+ int iobase = dev->p_dev->resource[0]->start;
size_t bytes_to_read;
unsigned long i;
size_t min_bytes_to_read;
@@ -321,7 +320,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
struct reader_dev *dev = filp->private_data;
- int iobase = dev->p_dev->io.BasePort1;
+ int iobase = dev->p_dev->resource[0]->start;
ssize_t rc;
int i;
unsigned int bytes_to_write;
@@ -528,16 +527,12 @@ static int cm4040_config_check(struct pcmcia_device *p_dev,
return -ENODEV;
/* Get the IOaddr */
- p_dev->io.BasePort1 = cfg->io.win[0].base;
- p_dev->io.NumPorts1 = cfg->io.win[0].len;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(cfg->io.flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(cfg->io.flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK;
-
- rc = pcmcia_request_io(p_dev, &p_dev->io);
+ p_dev->resource[0]->start = cfg->io.win[0].base;
+ p_dev->resource[0]->end = cfg->io.win[0].len;
+ p_dev->resource[0]->flags |= pcmcia_io_cfg_data_width(cfg->io.flags);
+ p_dev->io_lines = cfg->io.flags & CISTPL_IO_LINES_MASK;
+ rc = pcmcia_request_io(p_dev);
+
dev_printk(KERN_INFO, &p_dev->dev,
"pcmcia_request_io returned 0x%x\n", rc);
return rc;
@@ -549,10 +544,6 @@ static int reader_config(struct pcmcia_device *link, int devno)
struct reader_dev *dev;
int fail_rc;
- link->io.BasePort2 = 0;
- link->io.NumPorts2 = 0;
- link->io.Attributes2 = 0;
-
if (pcmcia_loop_config(link, cm4040_config_check, NULL))
goto cs_release;
@@ -568,8 +559,8 @@ static int reader_config(struct pcmcia_device *link, int devno)
dev = link->priv;
- DEBUGP(2, dev, "device " DEVICE_NAME "%d at 0x%.4x-0x%.4x\n", devno,
- link->io.BasePort1, link->io.BasePort1+link->io.NumPorts1);
+ DEBUGP(2, dev, "device " DEVICE_NAME "%d at %pR\n", devno,
+ link->resource[0]);
DEBUGP(2, dev, "<- reader_config (succ)\n");
return 0;
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index 63c32e3f23b..67bdb05798b 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -84,23 +84,22 @@ static int ipwireless_probe(struct pcmcia_device *p_dev,
{
struct ipw_dev *ipw = priv_data;
struct resource *io_resource;
- memreq_t memreq_attr_memory;
- memreq_t memreq_common_memory;
int ret;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- p_dev->io.BasePort1 = cfg->io.win[0].base;
- p_dev->io.NumPorts1 = cfg->io.win[0].len;
- p_dev->io.IOAddrLines = 16;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ p_dev->resource[0]->start = cfg->io.win[0].base;
+ p_dev->resource[0]->end = cfg->io.win[0].len;
/* 0x40 causes it to generate level mode interrupts. */
/* 0x04 enables IREQ pin. */
p_dev->conf.ConfigIndex = cfg->index | 0x44;
- ret = pcmcia_request_io(p_dev, &p_dev->io);
+ p_dev->io_lines = 16;
+ ret = pcmcia_request_io(p_dev);
if (ret)
return ret;
- io_resource = request_region(p_dev->io.BasePort1, p_dev->io.NumPorts1,
+ io_resource = request_region(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]),
IPWIRELESS_PCCARD_NAME);
if (cfg->mem.nwin == 0)
@@ -120,11 +119,8 @@ static int ipwireless_probe(struct pcmcia_device *p_dev,
if (ret != 0)
goto exit1;
- memreq_common_memory.CardOffset = cfg->mem.win[0].card_addr;
- memreq_common_memory.Page = 0;
-
ret = pcmcia_map_mem_page(p_dev, ipw->handle_common_memory,
- &memreq_common_memory);
+ cfg->mem.win[0].card_addr);
if (ret != 0)
goto exit2;
@@ -149,12 +145,7 @@ static int ipwireless_probe(struct pcmcia_device *p_dev,
if (ret != 0)
goto exit2;
- memreq_attr_memory.CardOffset = 0;
- memreq_attr_memory.Page = 0;
-
- ret = pcmcia_map_mem_page(p_dev, ipw->handle_attr_memory,
- &memreq_attr_memory);
-
+ ret = pcmcia_map_mem_page(p_dev, ipw->handle_attr_memory, 0);
if (ret != 0)
goto exit3;
@@ -166,15 +157,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev,
return 0;
exit3:
- pcmcia_release_window(p_dev, ipw->handle_attr_memory);
exit2:
if (ipw->common_memory) {
release_mem_region(ipw->request_common_memory.Base,
ipw->request_common_memory.Size);
iounmap(ipw->common_memory);
- pcmcia_release_window(p_dev, ipw->handle_common_memory);
- } else
- pcmcia_release_window(p_dev, ipw->handle_common_memory);
+ }
exit1:
release_resource(io_resource);
pcmcia_disable_device(p_dev);
@@ -197,7 +185,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
INIT_WORK(&ipw->work_reboot, signalled_reboot_work);
- ipwireless_init_hardware_v1(ipw->hardware, link->io.BasePort1,
+ ipwireless_init_hardware_v1(ipw->hardware, link->resource[0]->start,
ipw->attr_memory, ipw->common_memory,
ipw->is_v2_card, signalled_reboot_callback,
ipw);
@@ -209,10 +197,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n",
ipw->is_v2_card ? "V2/V3" : "V1");
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
- ": I/O ports 0x%04x-0x%04x, irq %d\n",
- (unsigned int) link->io.BasePort1,
- (unsigned int) (link->io.BasePort1 +
- link->io.NumPorts1 - 1),
+ ": I/O ports %pR, irq %d\n", link->resource[0],
(unsigned int) link->irq);
if (ipw->attr_memory && ipw->common_memory)
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
@@ -250,13 +235,12 @@ exit:
release_mem_region(ipw->request_attr_memory.Base,
ipw->request_attr_memory.Size);
iounmap(ipw->attr_memory);
- pcmcia_release_window(link, ipw->handle_attr_memory);
+
}
if (ipw->common_memory) {
release_mem_region(ipw->request_common_memory.Base,
ipw->request_common_memory.Size);
iounmap(ipw->common_memory);
- pcmcia_release_window(link, ipw->handle_common_memory);
}
pcmcia_disable_device(link);
return -1;
@@ -274,11 +258,6 @@ static void release_ipwireless(struct ipw_dev *ipw)
ipw->request_attr_memory.Size);
iounmap(ipw->attr_memory);
}
- if (ipw->common_memory)
- pcmcia_release_window(ipw->link, ipw->handle_common_memory);
- if (ipw->attr_memory)
- pcmcia_release_window(ipw->link, ipw->handle_attr_memory);
-
pcmcia_disable_device(ipw->link);
}
diff --git a/drivers/char/pcmcia/ipwireless/main.h b/drivers/char/pcmcia/ipwireless/main.h
index 96d0ef31b17..c207be87b59 100644
--- a/drivers/char/pcmcia/ipwireless/main.h
+++ b/drivers/char/pcmcia/ipwireless/main.h
@@ -21,7 +21,6 @@
#include <linux/sched.h>
#include <linux/types.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
diff --git a/drivers/char/pcmcia/ipwireless/tty.h b/drivers/char/pcmcia/ipwireless/tty.h
index 4da6c201f72..3e163d4cab1 100644
--- a/drivers/char/pcmcia/ipwireless/tty.h
+++ b/drivers/char/pcmcia/ipwireless/tty.h
@@ -21,7 +21,6 @@
#include <linux/types.h>
#include <linux/sched.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 308903ec8bf..9ecd6bef5d3 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -70,7 +70,6 @@
#include <linux/workqueue.h>
#include <linux/hdlc.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -572,18 +571,15 @@ static int mgslpc_ioprobe(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- if (cfg->io.nwin > 0) {
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(cfg->io.flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(cfg->io.flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = cfg->io.win[0].base;
- p_dev->io.NumPorts1 = cfg->io.win[0].len;
- return pcmcia_request_io(p_dev, &p_dev->io);
- }
- return -ENODEV;
+ if (!cfg->io.nwin)
+ return -ENODEV;
+
+ p_dev->resource[0]->start = cfg->io.win[0].base;
+ p_dev->resource[0]->end = cfg->io.win[0].len;
+ p_dev->resource[0]->flags |= pcmcia_io_cfg_data_width(cfg->io.flags);
+ p_dev->io_lines = cfg->io.flags & CISTPL_IO_LINES_MASK;
+
+ return pcmcia_request_io(p_dev);
}
static int mgslpc_config(struct pcmcia_device *link)
@@ -610,16 +606,15 @@ static int mgslpc_config(struct pcmcia_device *link)
if (ret)
goto failed;
- info->io_base = link->io.BasePort1;
+ info->io_base = link->resource[0]->start;
info->irq_level = link->irq;
dev_info(&link->dev, "index 0x%02x:",
link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1+link->io.NumPorts1-1);
+ if (link->resource[0])
+ printk(", io %pR", link->resource[0]);
printk("\n");
return 0;
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index d83a43130df..c350d01716b 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -62,7 +62,9 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
if (tty->driver == ptm_driver)
devpts_pty_kill(tty->link);
#endif
+ tty_unlock();
tty_vhangup(tty->link);
+ tty_lock();
}
}
@@ -171,6 +173,23 @@ static int pty_set_lock(struct tty_struct *tty, int __user *arg)
return 0;
}
+/* Send a signal to the slave */
+static int pty_signal(struct tty_struct *tty, int sig)
+{
+ unsigned long flags;
+ struct pid *pgrp;
+
+ if (tty->link) {
+ spin_lock_irqsave(&tty->link->ctrl_lock, flags);
+ pgrp = get_pid(tty->link->pgrp);
+ spin_unlock_irqrestore(&tty->link->ctrl_lock, flags);
+
+ kill_pgrp(pgrp, sig, 1);
+ put_pid(pgrp);
+ }
+ return 0;
+}
+
static void pty_flush_buffer(struct tty_struct *tty)
{
struct tty_struct *to = tty->link;
@@ -321,6 +340,8 @@ static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
switch (cmd) {
case TIOCSPTLCK: /* Set PT Lock (disallow slave open) */
return pty_set_lock(tty, (int __user *) arg);
+ case TIOCSIG: /* Send signal to other side of pty */
+ return pty_signal(tty, (int) arg);
}
return -ENOIOCTLCMD;
}
@@ -476,6 +497,8 @@ static int pty_unix98_ioctl(struct tty_struct *tty, struct file *file,
return pty_set_lock(tty, (int __user *)arg);
case TIOCGPTN: /* Get PT Number */
return put_user(tty->index, (unsigned int __user *)arg);
+ case TIOCSIG: /* Send signal to other side of pty */
+ return pty_signal(tty, (int) arg);
}
return -ENOIOCTLCMD;
@@ -626,7 +649,7 @@ static const struct tty_operations pty_unix98_ops = {
* allocated_ptys_lock handles the list of free pty numbers
*/
-static int __ptmx_open(struct inode *inode, struct file *filp)
+static int ptmx_open(struct inode *inode, struct file *filp)
{
struct tty_struct *tty;
int retval;
@@ -635,11 +658,14 @@ static int __ptmx_open(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp);
/* find a device that is not in use. */
+ tty_lock();
index = devpts_new_index(inode);
+ tty_unlock();
if (index < 0)
return index;
mutex_lock(&tty_mutex);
+ tty_lock();
tty = tty_init_dev(ptm_driver, index, 1);
mutex_unlock(&tty_mutex);
@@ -649,34 +675,29 @@ static int __ptmx_open(struct inode *inode, struct file *filp)
}
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- filp->private_data = tty;
- file_move(filp, &tty->tty_files);
+
+ tty_add_file(tty, filp);
retval = devpts_pty_new(inode, tty->link);
if (retval)
goto out1;
retval = ptm_driver->ops->open(tty, filp);
- if (!retval)
- return 0;
+ if (retval)
+ goto out2;
out1:
+ tty_unlock();
+ return retval;
+out2:
+ tty_unlock();
tty_release(inode, filp);
return retval;
out:
devpts_kill_index(inode, index);
+ tty_unlock();
return retval;
}
-static int ptmx_open(struct inode *inode, struct file *filp)
-{
- int ret;
-
- lock_kernel();
- ret = __ptmx_open(inode, filp);
- unlock_kernel();
- return ret;
-}
-
static struct file_operations ptmx_fops;
static void __init unix98_pty_init(void)
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index b02332a5412..af4de1fe844 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -47,7 +47,6 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/tty_flip.h>
-#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/device.h>
@@ -1184,6 +1183,7 @@ static int rc_set_serial_info(struct tty_struct *tty, struct riscom_port *port,
if (copy_from_user(&tmp, newinfo, sizeof(tmp)))
return -EFAULT;
+ mutex_lock(&port->port.mutex);
change_speed = ((port->port.flags & ASYNC_SPD_MASK) !=
(tmp.flags & ASYNC_SPD_MASK));
@@ -1191,8 +1191,10 @@ static int rc_set_serial_info(struct tty_struct *tty, struct riscom_port *port,
if ((tmp.close_delay != port->port.close_delay) ||
(tmp.closing_wait != port->port.closing_wait) ||
((tmp.flags & ~ASYNC_USR_MASK) !=
- (port->port.flags & ~ASYNC_USR_MASK)))
+ (port->port.flags & ~ASYNC_USR_MASK))) {
+ mutex_unlock(&port->port.mutex);
return -EPERM;
+ }
port->port.flags = ((port->port.flags & ~ASYNC_USR_MASK) |
(tmp.flags & ASYNC_USR_MASK));
} else {
@@ -1208,6 +1210,7 @@ static int rc_set_serial_info(struct tty_struct *tty, struct riscom_port *port,
rc_change_speed(tty, bp, port);
spin_unlock_irqrestore(&riscom_lock, flags);
}
+ mutex_unlock(&port->port.mutex);
return 0;
}
@@ -1220,12 +1223,15 @@ static int rc_get_serial_info(struct riscom_port *port,
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_CIRRUS;
tmp.line = port - rc_port;
+
+ mutex_lock(&port->port.mutex);
tmp.port = bp->base;
tmp.irq = bp->irq;
tmp.flags = port->port.flags;
tmp.baud_base = (RC_OSCFREQ + CD180_TPC/2) / CD180_TPC;
tmp.close_delay = port->port.close_delay * HZ/100;
tmp.closing_wait = port->port.closing_wait * HZ/100;
+ mutex_unlock(&port->port.mutex);
tmp.xmit_fifo_size = CD180_NFIFO;
return copy_to_user(retinfo, &tmp, sizeof(tmp)) ? -EFAULT : 0;
}
@@ -1242,14 +1248,10 @@ static int rc_ioctl(struct tty_struct *tty, struct file *filp,
switch (cmd) {
case TIOCGSERIAL:
- lock_kernel();
retval = rc_get_serial_info(port, argp);
- unlock_kernel();
break;
case TIOCSSERIAL:
- lock_kernel();
retval = rc_set_serial_info(tty, port, argp);
- unlock_kernel();
break;
default:
retval = -ENOIOCTLCMD;
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 0e29a23ec4c..7c79d243acc 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -73,7 +73,6 @@
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
-#include <linux/smp_lock.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
@@ -1017,6 +1016,7 @@ static void rp_close(struct tty_struct *tty, struct file *filp)
if (tty_port_close_start(port, tty, filp) == 0)
return;
+ mutex_lock(&port->mutex);
cp = &info->channel;
/*
* Before we drop DTR, make sure the UART transmitter
@@ -1060,9 +1060,13 @@ static void rp_close(struct tty_struct *tty, struct file *filp)
info->xmit_buf = NULL;
}
}
+ spin_lock_irq(&port->lock);
info->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_CLOSING | ASYNC_NORMAL_ACTIVE);
tty->closing = 0;
+ spin_unlock_irq(&port->lock);
+ mutex_unlock(&port->mutex);
tty_port_tty_set(port, NULL);
+
wake_up_interruptible(&port->close_wait);
complete_all(&info->close_wait);
atomic_dec(&rp_num_ports_open);
@@ -1210,11 +1214,13 @@ static int get_config(struct r_port *info, struct rocket_config __user *retinfo)
if (!retinfo)
return -EFAULT;
memset(&tmp, 0, sizeof (tmp));
+ mutex_lock(&info->port.mutex);
tmp.line = info->line;
tmp.flags = info->flags;
tmp.close_delay = info->port.close_delay;
tmp.closing_wait = info->port.closing_wait;
tmp.port = rcktpt_io_addr[(info->line >> 5) & 3];
+ mutex_unlock(&info->port.mutex);
if (copy_to_user(retinfo, &tmp, sizeof (*retinfo)))
return -EFAULT;
@@ -1229,12 +1235,16 @@ static int set_config(struct tty_struct *tty, struct r_port *info,
if (copy_from_user(&new_serial, new_info, sizeof (new_serial)))
return -EFAULT;
+ mutex_lock(&info->port.mutex);
if (!capable(CAP_SYS_ADMIN))
{
- if ((new_serial.flags & ~ROCKET_USR_MASK) != (info->flags & ~ROCKET_USR_MASK))
+ if ((new_serial.flags & ~ROCKET_USR_MASK) != (info->flags & ~ROCKET_USR_MASK)) {
+ mutex_unlock(&info->port.mutex);
return -EPERM;
+ }
info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK));
configure_r_port(tty, info, NULL);
+ mutex_unlock(&info->port.mutex);
return 0;
}
@@ -1250,6 +1260,7 @@ static int set_config(struct tty_struct *tty, struct r_port *info,
tty->alt_speed = 230400;
if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_WARP)
tty->alt_speed = 460800;
+ mutex_unlock(&info->port.mutex);
configure_r_port(tty, info, NULL);
return 0;
@@ -1325,8 +1336,6 @@ static int rp_ioctl(struct tty_struct *tty, struct file *file,
if (cmd != RCKP_GET_PORTS && rocket_paranoia_check(info, "rp_ioctl"))
return -ENXIO;
- lock_kernel();
-
switch (cmd) {
case RCKP_GET_STRUCT:
if (copy_to_user(argp, info, sizeof (struct r_port)))
@@ -1350,7 +1359,6 @@ static int rp_ioctl(struct tty_struct *tty, struct file *file,
default:
ret = -ENOIOCTLCMD;
}
- unlock_kernel();
return ret;
}
@@ -1471,7 +1479,6 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
jiffies);
printk(KERN_INFO "cps=%d...\n", info->cps);
#endif
- lock_kernel();
while (1) {
txcnt = sGetTxCnt(cp);
if (!txcnt) {
@@ -1499,7 +1506,6 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
break;
}
__set_current_state(TASK_RUNNING);
- unlock_kernel();
#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
printk(KERN_INFO "txcnt = %d (jiff=%lu)...done\n", txcnt, jiffies);
#endif
@@ -1512,6 +1518,7 @@ static void rp_hangup(struct tty_struct *tty)
{
CHANNEL_t *cp;
struct r_port *info = tty->driver_data;
+ unsigned long flags;
if (rocket_paranoia_check(info, "rp_hangup"))
return;
@@ -1520,11 +1527,15 @@ static void rp_hangup(struct tty_struct *tty)
printk(KERN_INFO "rp_hangup of ttyR%d...\n", info->line);
#endif
rp_flush_buffer(tty);
- if (info->port.flags & ASYNC_CLOSING)
+ spin_lock_irqsave(&info->port.lock, flags);
+ if (info->port.flags & ASYNC_CLOSING) {
+ spin_unlock_irqrestore(&info->port.lock, flags);
return;
+ }
if (info->port.count)
atomic_dec(&rp_num_ports_open);
clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
+ spin_unlock_irqrestore(&info->port.lock, flags);
tty_port_hangup(&info->port);
@@ -1535,7 +1546,7 @@ static void rp_hangup(struct tty_struct *tty)
sDisCTSFlowCtl(cp);
sDisTxSoftFlowCtl(cp);
sClrTxXOFF(cp);
- info->port.flags &= ~ASYNC_INITIALIZED;
+ clear_bit(ASYNCB_INITIALIZED, &info->port.flags);
wake_up_interruptible(&info->port.open_wait);
}
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 95acb8c880f..dfa8b3062fd 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -961,7 +961,7 @@ static int __init rtc_init(void)
#endif
#ifdef CONFIG_SPARC32
struct device_node *ebus_dp;
- struct of_device *op;
+ struct platform_device *op;
#else
void *r;
#ifdef RTC_IRQ
diff --git a/drivers/char/selection.c b/drivers/char/selection.c
index f97b9e84806..ebae344ce91 100644
--- a/drivers/char/selection.c
+++ b/drivers/char/selection.c
@@ -26,6 +26,7 @@
#include <linux/selection.h>
#include <linux/tiocl.h>
#include <linux/console.h>
+#include <linux/smp_lock.h>
/* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
#define isspace(c) ((c) == ' ')
@@ -312,12 +313,20 @@ int paste_selection(struct tty_struct *tty)
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
+ /* always called with BTM from vt_ioctl */
+ WARN_ON(!tty_locked());
+
acquire_console_sem();
poke_blanked_console();
release_console_sem();
- ld = tty_ldisc_ref_wait(tty);
-
+ ld = tty_ldisc_ref(tty);
+ if (!ld) {
+ tty_unlock();
+ ld = tty_ldisc_ref_wait(tty);
+ tty_lock();
+ }
+
add_wait_queue(&vc->paste_wait, &wait);
while (sel_buffer && sel_buffer_lth > pasted) {
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index ecbe479c7d6..f646725bd56 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -1505,7 +1505,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
printk("cy_ioctl %s, cmd = %x arg = %lx\n", tty->name, cmd, arg); /* */
#endif
- lock_kernel();
+ tty_lock();
switch (cmd) {
case CYGETMON:
@@ -1561,7 +1561,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
default:
ret_val = -ENOIOCTLCMD;
}
- unlock_kernel();
+ tty_unlock();
#ifdef SERIAL_DEBUG_OTHER
printk("cy_ioctl done\n");
@@ -1786,7 +1786,9 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
tty->name, info->count);
/**/
#endif
- schedule();
+ tty_unlock();
+ schedule();
+ tty_lock();
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&info->open_wait, &wait);
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 2c24fcdc722..9f8495b4fc8 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -1365,7 +1365,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
retval = -ERESTARTSYS;
break;
}
+ tty_unlock();
schedule();
+ tty_lock();
}
set_current_state(TASK_RUNNING);
@@ -1863,8 +1865,7 @@ static int sx_set_serial_info(struct specialix_port *port,
return -EFAULT;
}
- lock_kernel();
-
+ mutex_lock(&port->port.mutex);
change_speed = ((port->port.flags & ASYNC_SPD_MASK) !=
(tmp.flags & ASYNC_SPD_MASK));
change_speed |= (tmp.custom_divisor != port->custom_divisor);
@@ -1875,7 +1876,7 @@ static int sx_set_serial_info(struct specialix_port *port,
((tmp.flags & ~ASYNC_USR_MASK) !=
(port->port.flags & ~ASYNC_USR_MASK))) {
func_exit();
- unlock_kernel();
+ mutex_unlock(&port->port.mutex);
return -EPERM;
}
port->port.flags = ((port->port.flags & ~ASYNC_USR_MASK) |
@@ -1892,7 +1893,7 @@ static int sx_set_serial_info(struct specialix_port *port,
sx_change_speed(bp, port);
func_exit();
- unlock_kernel();
+ mutex_unlock(&port->port.mutex);
return 0;
}
@@ -1906,7 +1907,7 @@ static int sx_get_serial_info(struct specialix_port *port,
func_enter();
memset(&tmp, 0, sizeof(tmp));
- lock_kernel();
+ mutex_lock(&port->port.mutex);
tmp.type = PORT_CIRRUS;
tmp.line = port - sx_port;
tmp.port = bp->base;
@@ -1917,7 +1918,7 @@ static int sx_get_serial_info(struct specialix_port *port,
tmp.closing_wait = port->port.closing_wait * HZ/100;
tmp.custom_divisor = port->custom_divisor;
tmp.xmit_fifo_size = CD186x_NFIFO;
- unlock_kernel();
+ mutex_unlock(&port->port.mutex);
if (copy_to_user(retinfo, &tmp, sizeof(tmp))) {
func_exit();
return -EFAULT;
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 6049fd73192..f2167f8e5aa 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -807,7 +807,6 @@ static void stl_waituntilsent(struct tty_struct *tty, int timeout)
timeout = HZ;
tend = jiffies + timeout;
- lock_kernel();
while (stl_datastate(portp)) {
if (signal_pending(current))
break;
@@ -815,7 +814,6 @@ static void stl_waituntilsent(struct tty_struct *tty, int timeout)
if (time_after_eq(jiffies, tend))
break;
}
- unlock_kernel();
}
/*****************************************************************************/
@@ -1029,6 +1027,8 @@ static int stl_getserial(struct stlport *portp, struct serial_struct __user *sp)
pr_debug("stl_getserial(portp=%p,sp=%p)\n", portp, sp);
memset(&sio, 0, sizeof(struct serial_struct));
+
+ mutex_lock(&portp->port.mutex);
sio.line = portp->portnr;
sio.port = portp->ioaddr;
sio.flags = portp->port.flags;
@@ -1048,6 +1048,7 @@ static int stl_getserial(struct stlport *portp, struct serial_struct __user *sp)
brdp = stl_brds[portp->brdnr];
if (brdp != NULL)
sio.irq = brdp->irq;
+ mutex_unlock(&portp->port.mutex);
return copy_to_user(sp, &sio, sizeof(struct serial_struct)) ? -EFAULT : 0;
}
@@ -1069,12 +1070,15 @@ static int stl_setserial(struct tty_struct *tty, struct serial_struct __user *sp
if (copy_from_user(&sio, sp, sizeof(struct serial_struct)))
return -EFAULT;
+ mutex_lock(&portp->port.mutex);
if (!capable(CAP_SYS_ADMIN)) {
if ((sio.baud_base != portp->baud_base) ||
(sio.close_delay != portp->close_delay) ||
((sio.flags & ~ASYNC_USR_MASK) !=
- (portp->port.flags & ~ASYNC_USR_MASK)))
+ (portp->port.flags & ~ASYNC_USR_MASK))) {
+ mutex_unlock(&portp->port.mutex);
return -EPERM;
+ }
}
portp->port.flags = (portp->port.flags & ~ASYNC_USR_MASK) |
@@ -1083,6 +1087,7 @@ static int stl_setserial(struct tty_struct *tty, struct serial_struct __user *sp
portp->close_delay = sio.close_delay;
portp->closing_wait = sio.closing_wait;
portp->custom_divisor = sio.custom_divisor;
+ mutex_unlock(&portp->port.mutex);
stl_setport(portp, tty->termios);
return 0;
}
@@ -1147,8 +1152,6 @@ static int stl_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd
rc = 0;
- lock_kernel();
-
switch (cmd) {
case TIOCGSERIAL:
rc = stl_getserial(portp, argp);
@@ -1173,7 +1176,6 @@ static int stl_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd
rc = -ENOIOCTLCMD;
break;
}
- unlock_kernel();
return rc;
}
@@ -2327,6 +2329,7 @@ static int stl_getportstats(struct tty_struct *tty, struct stlport *portp, comst
return -ENODEV;
}
+ mutex_lock(&portp->port.mutex);
portp->stats.state = portp->istate;
portp->stats.flags = portp->port.flags;
portp->stats.hwid = portp->hwid;
@@ -2358,6 +2361,7 @@ static int stl_getportstats(struct tty_struct *tty, struct stlport *portp, comst
(STL_TXBUFSIZE - (tail - head));
portp->stats.signals = (unsigned long) stl_getsignals(portp);
+ mutex_unlock(&portp->port.mutex);
return copy_to_user(cp, &portp->stats,
sizeof(comstats_t)) ? -EFAULT : 0;
@@ -2382,10 +2386,12 @@ static int stl_clrportstats(struct stlport *portp, comstats_t __user *cp)
return -ENODEV;
}
+ mutex_lock(&portp->port.mutex);
memset(&portp->stats, 0, sizeof(comstats_t));
portp->stats.brd = portp->brdnr;
portp->stats.panel = portp->panelnr;
portp->stats.port = portp->portnr;
+ mutex_unlock(&portp->port.mutex);
return copy_to_user(cp, &portp->stats,
sizeof(comstats_t)) ? -EFAULT : 0;
}
@@ -2451,7 +2457,6 @@ static long stl_memioctl(struct file *fp, unsigned int cmd, unsigned long arg)
return -ENODEV;
rc = 0;
- lock_kernel();
switch (cmd) {
case COM_GETPORTSTATS:
rc = stl_getportstats(NULL, NULL, argp);
@@ -2472,7 +2477,6 @@ static long stl_memioctl(struct file *fp, unsigned int cmd, unsigned long arg)
rc = -ENOIOCTLCMD;
break;
}
- unlock_kernel();
return rc;
}
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index a81ec4fcf6f..5b24db4ff7f 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -1699,7 +1699,7 @@ static long sx_fw_ioctl(struct file *filp, unsigned int cmd,
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- lock_kernel();
+ tty_lock();
sx_dprintk(SX_DEBUG_FIRMWARE, "IOCTL %x: %lx\n", cmd, arg);
@@ -1848,7 +1848,7 @@ static long sx_fw_ioctl(struct file *filp, unsigned int cmd,
break;
}
out:
- unlock_kernel();
+ tty_unlock();
func_exit();
return rc;
}
@@ -1859,7 +1859,7 @@ static int sx_break(struct tty_struct *tty, int flag)
int rv;
func_enter();
- lock_kernel();
+ tty_lock();
if (flag)
rv = sx_send_command(port, HS_START, -1, HS_IDLE_BREAK);
@@ -1868,7 +1868,7 @@ static int sx_break(struct tty_struct *tty, int flag)
if (rv != 1)
printk(KERN_ERR "sx: couldn't send break (%x).\n",
read_sx_byte(port->board, CHAN_OFFSET(port, hi_hstat)));
- unlock_kernel();
+ tty_unlock();
func_exit();
return 0;
}
@@ -1909,7 +1909,7 @@ static int sx_ioctl(struct tty_struct *tty, struct file *filp,
/* func_enter2(); */
rc = 0;
- lock_kernel();
+ tty_lock();
switch (cmd) {
case TIOCGSERIAL:
rc = gs_getserial(&port->gs, argp);
@@ -1921,7 +1921,7 @@ static int sx_ioctl(struct tty_struct *tty, struct file *filp,
rc = -ENOIOCTLCMD;
break;
}
- unlock_kernel();
+ tty_unlock();
/* func_exit(); */
return rc;
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 0658fc54822..a2a58004e18 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -81,7 +81,6 @@
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
@@ -2436,7 +2435,9 @@ static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *
if (!user_icount) {
memset(&info->icount, 0, sizeof(info->icount));
} else {
+ mutex_lock(&info->port.mutex);
COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
+ mutex_unlock(&info->port.mutex);
if (err)
return -EFAULT;
}
@@ -2461,7 +2462,9 @@ static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_p
printk("%s(%d):mgsl_get_params(%s)\n",
__FILE__,__LINE__, info->device_name);
+ mutex_lock(&info->port.mutex);
COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
+ mutex_unlock(&info->port.mutex);
if (err) {
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
@@ -2501,11 +2504,13 @@ static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_pa
return -EFAULT;
}
+ mutex_lock(&info->port.mutex);
spin_lock_irqsave(&info->irq_spinlock,flags);
memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
spin_unlock_irqrestore(&info->irq_spinlock,flags);
mgsl_change_params(info);
+ mutex_unlock(&info->port.mutex);
return 0;
@@ -2935,7 +2940,6 @@ static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg)
{
struct mgsl_struct * info = tty->driver_data;
- int ret;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
@@ -2950,10 +2954,7 @@ static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
return -EIO;
}
- lock_kernel();
- ret = mgsl_ioctl_common(info, cmd, arg);
- unlock_kernel();
- return ret;
+ return mgsl_ioctl_common(info, cmd, arg);
}
static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
@@ -3109,12 +3110,14 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
if (tty_port_close_start(&info->port, tty, filp) == 0)
goto cleanup;
-
+
+ mutex_lock(&info->port.mutex);
if (info->port.flags & ASYNC_INITIALIZED)
mgsl_wait_until_sent(tty, info->timeout);
mgsl_flush_buffer(tty);
tty_ldisc_flush(tty);
shutdown(info);
+ mutex_unlock(&info->port.mutex);
tty_port_close_end(&info->port, tty);
info->port.tty = NULL;
@@ -3162,7 +3165,6 @@ static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
* Note: use tight timings here to satisfy the NIST-PCTS.
*/
- lock_kernel();
if ( info->params.data_rate ) {
char_time = info->timeout/(32 * 5);
if (!char_time)
@@ -3192,7 +3194,6 @@ static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
break;
}
}
- unlock_kernel();
exit:
if (debug_level >= DEBUG_LEVEL_INFO)
@@ -3348,7 +3349,9 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
printk("%s(%d):block_til_ready blocking on %s count=%d\n",
__FILE__,__LINE__, tty->driver->name, port->count );
+ tty_unlock();
schedule();
+ tty_lock();
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 334cf5c8c8b..e63b830c86c 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -40,8 +40,8 @@
#define DBGBH(fmt) if (debug_level >= DEBUG_LEVEL_BH) printk fmt
#define DBGISR(fmt) if (debug_level >= DEBUG_LEVEL_ISR) printk fmt
#define DBGDATA(info, buf, size, label) if (debug_level >= DEBUG_LEVEL_DATA) trace_block((info), (buf), (size), (label))
-//#define DBGTBUF(info) dump_tbufs(info)
-//#define DBGRBUF(info) dump_rbufs(info)
+/*#define DBGTBUF(info) dump_tbufs(info)*/
+/*#define DBGRBUF(info) dump_rbufs(info)*/
#include <linux/module.h>
@@ -62,7 +62,6 @@
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
@@ -676,12 +675,14 @@ static int open(struct tty_struct *tty, struct file *filp)
goto cleanup;
}
+ mutex_lock(&info->port.mutex);
info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
retval = -EBUSY;
spin_unlock_irqrestore(&info->netlock, flags);
+ mutex_unlock(&info->port.mutex);
goto cleanup;
}
info->port.count++;
@@ -690,10 +691,12 @@ static int open(struct tty_struct *tty, struct file *filp)
if (info->port.count == 1) {
/* 1st open on this device, init hardware */
retval = startup(info);
- if (retval < 0)
+ if (retval < 0) {
+ mutex_unlock(&info->port.mutex);
goto cleanup;
+ }
}
-
+ mutex_unlock(&info->port.mutex);
retval = block_til_ready(tty, filp, info);
if (retval) {
DBGINFO(("%s block_til_ready rc=%d\n", info->device_name, retval));
@@ -725,12 +728,14 @@ static void close(struct tty_struct *tty, struct file *filp)
if (tty_port_close_start(&info->port, tty, filp) == 0)
goto cleanup;
+ mutex_lock(&info->port.mutex);
if (info->port.flags & ASYNC_INITIALIZED)
wait_until_sent(tty, info->timeout);
flush_buffer(tty);
tty_ldisc_flush(tty);
shutdown(info);
+ mutex_unlock(&info->port.mutex);
tty_port_close_end(&info->port, tty);
info->port.tty = NULL;
@@ -741,17 +746,23 @@ cleanup:
static void hangup(struct tty_struct *tty)
{
struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
if (sanity_check(info, tty->name, "hangup"))
return;
DBGINFO(("%s hangup\n", info->device_name));
flush_buffer(tty);
+
+ mutex_lock(&info->port.mutex);
shutdown(info);
+ spin_lock_irqsave(&info->port.lock, flags);
info->port.count = 0;
info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
info->port.tty = NULL;
+ spin_unlock_irqrestore(&info->port.lock, flags);
+ mutex_unlock(&info->port.mutex);
wake_up_interruptible(&info->port.open_wait);
}
@@ -901,8 +912,6 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
* Note: use tight timings here to satisfy the NIST-PCTS.
*/
- lock_kernel();
-
if (info->params.data_rate) {
char_time = info->timeout/(32 * 5);
if (!char_time)
@@ -920,8 +929,6 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
if (timeout && time_after(jiffies, orig_jiffies + timeout))
break;
}
- unlock_kernel();
-
exit:
DBGINFO(("%s wait_until_sent exit\n", info->device_name));
}
@@ -1041,8 +1048,37 @@ static int ioctl(struct tty_struct *tty, struct file *file,
return -EIO;
}
- lock_kernel();
-
+ switch (cmd) {
+ case MGSL_IOCWAITEVENT:
+ return wait_mgsl_event(info, argp);
+ case TIOCMIWAIT:
+ return modem_input_wait(info,(int)arg);
+ case TIOCGICOUNT:
+ spin_lock_irqsave(&info->lock,flags);
+ cnow = info->icount;
+ spin_unlock_irqrestore(&info->lock,flags);
+ p_cuser = argp;
+ if (put_user(cnow.cts, &p_cuser->cts) ||
+ put_user(cnow.dsr, &p_cuser->dsr) ||
+ put_user(cnow.rng, &p_cuser->rng) ||
+ put_user(cnow.dcd, &p_cuser->dcd) ||
+ put_user(cnow.rx, &p_cuser->rx) ||
+ put_user(cnow.tx, &p_cuser->tx) ||
+ put_user(cnow.frame, &p_cuser->frame) ||
+ put_user(cnow.overrun, &p_cuser->overrun) ||
+ put_user(cnow.parity, &p_cuser->parity) ||
+ put_user(cnow.brk, &p_cuser->brk) ||
+ put_user(cnow.buf_overrun, &p_cuser->buf_overrun))
+ return -EFAULT;
+ return 0;
+ case MGSL_IOCSGPIO:
+ return set_gpio(info, argp);
+ case MGSL_IOCGGPIO:
+ return get_gpio(info, argp);
+ case MGSL_IOCWAITGPIO:
+ return wait_gpio(info, argp);
+ }
+ mutex_lock(&info->port.mutex);
switch (cmd) {
case MGSL_IOCGPARAMS:
ret = get_params(info, argp);
@@ -1068,50 +1104,16 @@ static int ioctl(struct tty_struct *tty, struct file *file,
case MGSL_IOCGSTATS:
ret = get_stats(info, argp);
break;
- case MGSL_IOCWAITEVENT:
- ret = wait_mgsl_event(info, argp);
- break;
- case TIOCMIWAIT:
- ret = modem_input_wait(info,(int)arg);
- break;
case MGSL_IOCGIF:
ret = get_interface(info, argp);
break;
case MGSL_IOCSIF:
ret = set_interface(info,(int)arg);
break;
- case MGSL_IOCSGPIO:
- ret = set_gpio(info, argp);
- break;
- case MGSL_IOCGGPIO:
- ret = get_gpio(info, argp);
- break;
- case MGSL_IOCWAITGPIO:
- ret = wait_gpio(info, argp);
- break;
- case TIOCGICOUNT:
- spin_lock_irqsave(&info->lock,flags);
- cnow = info->icount;
- spin_unlock_irqrestore(&info->lock,flags);
- p_cuser = argp;
- if (put_user(cnow.cts, &p_cuser->cts) ||
- put_user(cnow.dsr, &p_cuser->dsr) ||
- put_user(cnow.rng, &p_cuser->rng) ||
- put_user(cnow.dcd, &p_cuser->dcd) ||
- put_user(cnow.rx, &p_cuser->rx) ||
- put_user(cnow.tx, &p_cuser->tx) ||
- put_user(cnow.frame, &p_cuser->frame) ||
- put_user(cnow.overrun, &p_cuser->overrun) ||
- put_user(cnow.parity, &p_cuser->parity) ||
- put_user(cnow.brk, &p_cuser->brk) ||
- put_user(cnow.buf_overrun, &p_cuser->buf_overrun))
- ret = -EFAULT;
- ret = 0;
- break;
default:
ret = -ENOIOCTLCMD;
}
- unlock_kernel();
+ mutex_unlock(&info->port.mutex);
return ret;
}
@@ -3244,7 +3246,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
}
DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
+ tty_unlock();
schedule();
+ tty_lock();
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 2b18adc4ee1..e56caf7d82a 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -52,7 +52,6 @@
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
@@ -813,13 +812,15 @@ static void close(struct tty_struct *tty, struct file *filp)
if (tty_port_close_start(&info->port, tty, filp) == 0)
goto cleanup;
-
+
+ mutex_lock(&info->port.mutex);
if (info->port.flags & ASYNC_INITIALIZED)
wait_until_sent(tty, info->timeout);
flush_buffer(tty);
tty_ldisc_flush(tty);
shutdown(info);
+ mutex_unlock(&info->port.mutex);
tty_port_close_end(&info->port, tty);
info->port.tty = NULL;
@@ -835,6 +836,7 @@ cleanup:
static void hangup(struct tty_struct *tty)
{
SLMP_INFO *info = tty->driver_data;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):%s hangup()\n",
@@ -843,12 +845,16 @@ static void hangup(struct tty_struct *tty)
if (sanity_check(info, tty->name, "hangup"))
return;
+ mutex_lock(&info->port.mutex);
flush_buffer(tty);
shutdown(info);
+ spin_lock_irqsave(&info->port.lock, flags);
info->port.count = 0;
info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
info->port.tty = NULL;
+ spin_unlock_irqrestore(&info->port.lock, flags);
+ mutex_unlock(&info->port.mutex);
wake_up_interruptible(&info->port.open_wait);
}
@@ -1062,9 +1068,7 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
if (sanity_check(info, tty->name, "wait_until_sent"))
return;
- lock_kernel();
-
- if (!(info->port.flags & ASYNC_INITIALIZED))
+ if (!test_bit(ASYNCB_INITIALIZED, &info->port.flags))
goto exit;
orig_jiffies = jiffies;
@@ -1094,8 +1098,10 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
break;
}
} else {
- //TODO: determine if there is something similar to USC16C32
- // TXSTATUS_ALL_SENT status
+ /*
+ * TODO: determine if there is something similar to USC16C32
+ * TXSTATUS_ALL_SENT status
+ */
while ( info->tx_active && info->tx_enabled) {
msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
@@ -1106,7 +1112,6 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
}
exit:
- unlock_kernel();
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):%s wait_until_sent() exit\n",
__FILE__,__LINE__, info->device_name );
@@ -1122,7 +1127,6 @@ static int write_room(struct tty_struct *tty)
if (sanity_check(info, tty->name, "write_room"))
return 0;
- lock_kernel();
if (info->params.mode == MGSL_MODE_HDLC) {
ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE;
} else {
@@ -1130,7 +1134,6 @@ static int write_room(struct tty_struct *tty)
if (ret < 0)
ret = 0;
}
- unlock_kernel();
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):%s write_room()=%d\n",
@@ -1251,7 +1254,7 @@ static void tx_release(struct tty_struct *tty)
*
* Return Value: 0 if success, otherwise error code
*/
-static int do_ioctl(struct tty_struct *tty, struct file *file,
+static int ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
SLMP_INFO *info = tty->driver_data;
@@ -1341,16 +1344,6 @@ static int do_ioctl(struct tty_struct *tty, struct file *file,
return 0;
}
-static int ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
- lock_kernel();
- ret = do_ioctl(tty, file, cmd, arg);
- unlock_kernel();
- return ret;
-}
-
/*
* /proc fs routines....
*/
@@ -2883,7 +2876,9 @@ static int get_stats(SLMP_INFO * info, struct mgsl_icount __user *user_icount)
if (!user_icount) {
memset(&info->icount, 0, sizeof(info->icount));
} else {
+ mutex_lock(&info->port.mutex);
COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
+ mutex_unlock(&info->port.mutex);
if (err)
return -EFAULT;
}
@@ -2898,7 +2893,9 @@ static int get_params(SLMP_INFO * info, MGSL_PARAMS __user *user_params)
printk("%s(%d):%s get_params()\n",
__FILE__,__LINE__, info->device_name);
+ mutex_lock(&info->port.mutex);
COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
+ mutex_unlock(&info->port.mutex);
if (err) {
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):%s get_params() user buffer copy failed\n",
@@ -2926,11 +2923,13 @@ static int set_params(SLMP_INFO * info, MGSL_PARAMS __user *new_params)
return -EFAULT;
}
+ mutex_lock(&info->port.mutex);
spin_lock_irqsave(&info->lock,flags);
memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
spin_unlock_irqrestore(&info->lock,flags);
change_params(info);
+ mutex_unlock(&info->port.mutex);
return 0;
}
@@ -3366,7 +3365,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
printk("%s(%d):%s block_til_ready() count=%d\n",
__FILE__,__LINE__, tty->driver->name, port->count );
+ tty_unlock();
schedule();
+ tty_lock();
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 878ac0c2cc6..ef31bb81e84 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -18,7 +18,6 @@
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/fs.h>
-#include <linux/tty.h>
#include <linux/mount.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
@@ -76,7 +75,7 @@ static int __init sysrq_always_enabled_setup(char *str)
__setup("sysrq_always_enabled", sysrq_always_enabled_setup);
-static void sysrq_handle_loglevel(int key, struct tty_struct *tty)
+static void sysrq_handle_loglevel(int key)
{
int i;
@@ -93,7 +92,7 @@ static struct sysrq_key_op sysrq_loglevel_op = {
};
#ifdef CONFIG_VT
-static void sysrq_handle_SAK(int key, struct tty_struct *tty)
+static void sysrq_handle_SAK(int key)
{
struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
schedule_work(SAK_work);
@@ -109,7 +108,7 @@ static struct sysrq_key_op sysrq_SAK_op = {
#endif
#ifdef CONFIG_VT
-static void sysrq_handle_unraw(int key, struct tty_struct *tty)
+static void sysrq_handle_unraw(int key)
{
struct kbd_struct *kbd = &kbd_table[fg_console];
@@ -126,7 +125,7 @@ static struct sysrq_key_op sysrq_unraw_op = {
#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
#endif /* CONFIG_VT */
-static void sysrq_handle_crash(int key, struct tty_struct *tty)
+static void sysrq_handle_crash(int key)
{
char *killer = NULL;
@@ -141,7 +140,7 @@ static struct sysrq_key_op sysrq_crash_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_reboot(int key, struct tty_struct *tty)
+static void sysrq_handle_reboot(int key)
{
lockdep_off();
local_irq_enable();
@@ -154,7 +153,7 @@ static struct sysrq_key_op sysrq_reboot_op = {
.enable_mask = SYSRQ_ENABLE_BOOT,
};
-static void sysrq_handle_sync(int key, struct tty_struct *tty)
+static void sysrq_handle_sync(int key)
{
emergency_sync();
}
@@ -165,7 +164,7 @@ static struct sysrq_key_op sysrq_sync_op = {
.enable_mask = SYSRQ_ENABLE_SYNC,
};
-static void sysrq_handle_show_timers(int key, struct tty_struct *tty)
+static void sysrq_handle_show_timers(int key)
{
sysrq_timer_list_show();
}
@@ -176,7 +175,7 @@ static struct sysrq_key_op sysrq_show_timers_op = {
.action_msg = "Show clockevent devices & pending hrtimers (no others)",
};
-static void sysrq_handle_mountro(int key, struct tty_struct *tty)
+static void sysrq_handle_mountro(int key)
{
emergency_remount();
}
@@ -188,7 +187,7 @@ static struct sysrq_key_op sysrq_mountro_op = {
};
#ifdef CONFIG_LOCKDEP
-static void sysrq_handle_showlocks(int key, struct tty_struct *tty)
+static void sysrq_handle_showlocks(int key)
{
debug_show_all_locks();
}
@@ -226,7 +225,7 @@ static void sysrq_showregs_othercpus(struct work_struct *dummy)
static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
-static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
+static void sysrq_handle_showallcpus(int key)
{
/*
* Fall back to the workqueue based printing if the
@@ -252,7 +251,7 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
};
#endif
-static void sysrq_handle_showregs(int key, struct tty_struct *tty)
+static void sysrq_handle_showregs(int key)
{
struct pt_regs *regs = get_irq_regs();
if (regs)
@@ -266,7 +265,7 @@ static struct sysrq_key_op sysrq_showregs_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_showstate(int key, struct tty_struct *tty)
+static void sysrq_handle_showstate(int key)
{
show_state();
}
@@ -277,7 +276,7 @@ static struct sysrq_key_op sysrq_showstate_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty)
+static void sysrq_handle_showstate_blocked(int key)
{
show_state_filter(TASK_UNINTERRUPTIBLE);
}
@@ -291,7 +290,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
#ifdef CONFIG_TRACING
#include <linux/ftrace.h>
-static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
+static void sysrq_ftrace_dump(int key)
{
ftrace_dump(DUMP_ALL);
}
@@ -305,7 +304,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
#endif
-static void sysrq_handle_showmem(int key, struct tty_struct *tty)
+static void sysrq_handle_showmem(int key)
{
show_mem();
}
@@ -330,7 +329,7 @@ static void send_sig_all(int sig)
}
}
-static void sysrq_handle_term(int key, struct tty_struct *tty)
+static void sysrq_handle_term(int key)
{
send_sig_all(SIGTERM);
console_loglevel = 8;
@@ -349,7 +348,7 @@ static void moom_callback(struct work_struct *ignored)
static DECLARE_WORK(moom_work, moom_callback);
-static void sysrq_handle_moom(int key, struct tty_struct *tty)
+static void sysrq_handle_moom(int key)
{
schedule_work(&moom_work);
}
@@ -361,7 +360,7 @@ static struct sysrq_key_op sysrq_moom_op = {
};
#ifdef CONFIG_BLOCK
-static void sysrq_handle_thaw(int key, struct tty_struct *tty)
+static void sysrq_handle_thaw(int key)
{
emergency_thaw_all();
}
@@ -373,7 +372,7 @@ static struct sysrq_key_op sysrq_thaw_op = {
};
#endif
-static void sysrq_handle_kill(int key, struct tty_struct *tty)
+static void sysrq_handle_kill(int key)
{
send_sig_all(SIGKILL);
console_loglevel = 8;
@@ -385,7 +384,7 @@ static struct sysrq_key_op sysrq_kill_op = {
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
-static void sysrq_handle_unrt(int key, struct tty_struct *tty)
+static void sysrq_handle_unrt(int key)
{
normalize_rt_tasks();
}
@@ -493,7 +492,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
sysrq_key_table[i] = op_p;
}
-void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
+void __handle_sysrq(int key, bool check_mask)
{
struct sysrq_key_op *op_p;
int orig_log_level;
@@ -520,7 +519,7 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
printk("%s\n", op_p->action_msg);
console_loglevel = orig_log_level;
- op_p->handler(key, tty);
+ op_p->handler(key);
} else {
printk("This sysrq operation is disabled.\n");
}
@@ -545,10 +544,10 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
}
-void handle_sysrq(int key, struct tty_struct *tty)
+void handle_sysrq(int key)
{
if (sysrq_on())
- __handle_sysrq(key, tty, 1);
+ __handle_sysrq(key, true);
}
EXPORT_SYMBOL(handle_sysrq);
@@ -597,7 +596,7 @@ static bool sysrq_filter(struct input_handle *handle, unsigned int type,
default:
if (sysrq_down && value && value != 2)
- __handle_sysrq(sysrq_xlate[code], NULL, 1);
+ __handle_sysrq(sysrq_xlate[code], true);
break;
}
@@ -765,7 +764,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
if (get_user(c, buf))
return -EFAULT;
- __handle_sysrq(c, NULL, 0);
+ __handle_sysrq(c, false);
}
return count;
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index d71f0fc34b4..613c852ee0f 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -136,6 +136,9 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */
DEFINE_MUTEX(tty_mutex);
EXPORT_SYMBOL(tty_mutex);
+/* Spinlock to protect the tty->tty_files list */
+DEFINE_SPINLOCK(tty_files_lock);
+
static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
ssize_t redirected_tty_write(struct file *, const char __user *,
@@ -149,6 +152,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
#else
#define tty_compat_ioctl NULL
#endif
+static int __tty_fasync(int fd, struct file *filp, int on);
static int tty_fasync(int fd, struct file *filp, int on);
static void release_tty(struct tty_struct *tty, int idx);
static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
@@ -184,6 +188,41 @@ void free_tty_struct(struct tty_struct *tty)
kfree(tty);
}
+static inline struct tty_struct *file_tty(struct file *file)
+{
+ return ((struct tty_file_private *)file->private_data)->tty;
+}
+
+/* Associate a new file with the tty structure */
+void tty_add_file(struct tty_struct *tty, struct file *file)
+{
+ struct tty_file_private *priv;
+
+ /* XXX: must implement proper error handling in callers */
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL|__GFP_NOFAIL);
+
+ priv->tty = tty;
+ priv->file = file;
+ file->private_data = priv;
+
+ spin_lock(&tty_files_lock);
+ list_add(&priv->list, &tty->tty_files);
+ spin_unlock(&tty_files_lock);
+}
+
+/* Delete file from its tty */
+void tty_del_file(struct file *file)
+{
+ struct tty_file_private *priv = file->private_data;
+
+ spin_lock(&tty_files_lock);
+ list_del(&priv->list);
+ spin_unlock(&tty_files_lock);
+ file->private_data = NULL;
+ kfree(priv);
+}
+
+
#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
/**
@@ -234,11 +273,11 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
struct list_head *p;
int count = 0;
- file_list_lock();
+ spin_lock(&tty_files_lock);
list_for_each(p, &tty->tty_files) {
count++;
}
- file_list_unlock();
+ spin_unlock(&tty_files_lock);
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_SLAVE &&
tty->link && tty->link->count)
@@ -316,7 +355,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
if (*stp == '\0')
stp = NULL;
- if (tty_line >= 0 && tty_line <= p->num && p->ops &&
+ if (tty_line >= 0 && tty_line < p->num && p->ops &&
p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
res = tty_driver_kref_get(p);
*line = tty_line;
@@ -470,7 +509,7 @@ void tty_wakeup(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_wakeup);
/**
- * do_tty_hangup - actual handler for hangup events
+ * __tty_hangup - actual handler for hangup events
* @work: tty device
*
* This can be called by the "eventd" kernel thread. That is process
@@ -483,7 +522,7 @@ EXPORT_SYMBOL_GPL(tty_wakeup);
* remains intact.
*
* Locking:
- * BKL
+ * BTM
* redirect lock for undoing redirection
* file list lock for manipulating list of ttys
* tty_ldisc_lock from called functions
@@ -491,13 +530,12 @@ EXPORT_SYMBOL_GPL(tty_wakeup);
* tasklist_lock to walk task list for hangup event
* ->siglock to protect ->signal/->sighand
*/
-static void do_tty_hangup(struct work_struct *work)
+void __tty_hangup(struct tty_struct *tty)
{
- struct tty_struct *tty =
- container_of(work, struct tty_struct, hangup_work);
struct file *cons_filp = NULL;
struct file *filp, *f = NULL;
struct task_struct *p;
+ struct tty_file_private *priv;
int closecount = 0, n;
unsigned long flags;
int refs = 0;
@@ -507,28 +545,32 @@ static void do_tty_hangup(struct work_struct *work)
spin_lock(&redirect_lock);
- if (redirect && redirect->private_data == tty) {
+ if (redirect && file_tty(redirect) == tty) {
f = redirect;
redirect = NULL;
}
spin_unlock(&redirect_lock);
- /* inuse_filps is protected by the single kernel lock */
- lock_kernel();
- check_tty_count(tty, "do_tty_hangup");
+ tty_lock();
+
+ /* inuse_filps is protected by the single tty lock,
+ this really needs to change if we want to flush the
+ workqueue with the lock held */
+ check_tty_count(tty, "tty_hangup");
- file_list_lock();
+ spin_lock(&tty_files_lock);
/* This breaks for file handles being sent over AF_UNIX sockets ? */
- list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) {
+ list_for_each_entry(priv, &tty->tty_files, list) {
+ filp = priv->file;
if (filp->f_op->write == redirected_tty_write)
cons_filp = filp;
if (filp->f_op->write != tty_write)
continue;
closecount++;
- tty_fasync(-1, filp, 0); /* can't block */
+ __tty_fasync(-1, filp, 0); /* can't block */
filp->f_op = &hung_up_tty_fops;
}
- file_list_unlock();
+ spin_unlock(&tty_files_lock);
tty_ldisc_hangup(tty);
@@ -594,11 +636,21 @@ static void do_tty_hangup(struct work_struct *work)
*/
set_bit(TTY_HUPPED, &tty->flags);
tty_ldisc_enable(tty);
- unlock_kernel();
+
+ tty_unlock();
+
if (f)
fput(f);
}
+static void do_tty_hangup(struct work_struct *work)
+{
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, hangup_work);
+
+ __tty_hangup(tty);
+}
+
/**
* tty_hangup - trigger a hangup event
* @tty: tty to hangup
@@ -634,11 +686,12 @@ void tty_vhangup(struct tty_struct *tty)
printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
#endif
- do_tty_hangup(&tty->hangup_work);
+ __tty_hangup(tty);
}
EXPORT_SYMBOL(tty_vhangup);
+
/**
* tty_vhangup_self - process vhangup for own ctty
*
@@ -696,7 +749,8 @@ static void session_clear_tty(struct pid *session)
* exiting; it is 0 if called by the ioctl TIOCNOTTY.
*
* Locking:
- * BKL is taken for hysterical raisins
+ * BTM is taken for hysterical raisins, and held when
+ * called from no_tty().
* tty_mutex is taken to protect tty
* ->siglock is taken to protect ->signal/->sighand
* tasklist_lock is taken to walk process list for sessions
@@ -714,10 +768,10 @@ void disassociate_ctty(int on_exit)
tty = get_current_tty();
if (tty) {
tty_pgrp = get_pid(tty->pgrp);
- lock_kernel();
- if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY)
- tty_vhangup(tty);
- unlock_kernel();
+ if (on_exit) {
+ if (tty->driver->type != TTY_DRIVER_TYPE_PTY)
+ tty_vhangup(tty);
+ }
tty_kref_put(tty);
} else if (on_exit) {
struct pid *old_pgrp;
@@ -774,9 +828,9 @@ void disassociate_ctty(int on_exit)
void no_tty(void)
{
struct task_struct *tsk = current;
- lock_kernel();
+ tty_lock();
disassociate_ctty(0);
- unlock_kernel();
+ tty_unlock();
proc_clear_tty(tsk);
}
@@ -875,12 +929,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
int i;
- struct tty_struct *tty;
- struct inode *inode;
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
- tty = (struct tty_struct *)file->private_data;
- inode = file->f_path.dentry->d_inode;
if (tty_paranoia_check(tty, inode, "tty_read"))
return -EIO;
if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
@@ -1013,19 +1065,19 @@ out:
* We don't put it into the syslog queue right now maybe in the future if
* really needed.
*
- * We must still hold the BKL and test the CLOSING flag for the moment.
+ * We must still hold the BTM and test the CLOSING flag for the moment.
*/
void tty_write_message(struct tty_struct *tty, char *msg)
{
if (tty) {
mutex_lock(&tty->atomic_write_lock);
- lock_kernel();
+ tty_lock();
if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) {
- unlock_kernel();
+ tty_unlock();
tty->ops->write(tty, msg, strlen(msg));
} else
- unlock_kernel();
+ tty_unlock();
tty_write_unlock(tty);
}
return;
@@ -1051,12 +1103,11 @@ void tty_write_message(struct tty_struct *tty, char *msg)
static ssize_t tty_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct tty_struct *tty;
struct inode *inode = file->f_path.dentry->d_inode;
+ struct tty_struct *tty = file_tty(file);
+ struct tty_ldisc *ld;
ssize_t ret;
- struct tty_ldisc *ld;
- tty = (struct tty_struct *)file->private_data;
if (tty_paranoia_check(tty, inode, "tty_write"))
return -EIO;
if (!tty || !tty->ops->write ||
@@ -1208,18 +1259,14 @@ static int tty_driver_install_tty(struct tty_driver *driver,
int ret;
if (driver->ops->install) {
- lock_kernel();
ret = driver->ops->install(driver, tty);
- unlock_kernel();
return ret;
}
if (tty_init_termios(tty) == 0) {
- lock_kernel();
tty_driver_kref_get(driver);
tty->count++;
driver->ttys[idx] = tty;
- unlock_kernel();
return 0;
}
return -ENOMEM;
@@ -1312,14 +1359,11 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx,
struct tty_struct *tty;
int retval;
- lock_kernel();
/* Check if pty master is being opened multiple times */
if (driver->subtype == PTY_TYPE_MASTER &&
(driver->flags & TTY_DRIVER_DEVPTS_MEM) && !first_ok) {
- unlock_kernel();
return ERR_PTR(-EIO);
}
- unlock_kernel();
/*
* First time open is complex, especially for PTY devices.
@@ -1363,9 +1407,7 @@ release_mem_out:
if (printk_ratelimit())
printk(KERN_INFO "tty_init_dev: ldisc open failed, "
"clearing slot %d\n", idx);
- lock_kernel();
release_tty(tty, idx);
- unlock_kernel();
return ERR_PTR(retval);
}
@@ -1419,9 +1461,9 @@ static void release_one_tty(struct work_struct *work)
tty_driver_kref_put(driver);
module_put(driver->owner);
- file_list_lock();
+ spin_lock(&tty_files_lock);
list_del_init(&tty->tty_files);
- file_list_unlock();
+ spin_unlock(&tty_files_lock);
put_pid(tty->pgrp);
put_pid(tty->session);
@@ -1502,20 +1544,20 @@ static void release_tty(struct tty_struct *tty, int idx)
int tty_release(struct inode *inode, struct file *filp)
{
- struct tty_struct *tty, *o_tty;
+ struct tty_struct *tty = file_tty(filp);
+ struct tty_struct *o_tty;
int pty_master, tty_closing, o_tty_closing, do_sleep;
int devpts;
int idx;
char buf[64];
- tty = (struct tty_struct *)filp->private_data;
if (tty_paranoia_check(tty, inode, "tty_release_dev"))
return 0;
- lock_kernel();
+ tty_lock();
check_tty_count(tty, "tty_release_dev");
- tty_fasync(-1, filp, 0);
+ __tty_fasync(-1, filp, 0);
idx = tty->index;
pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
@@ -1527,18 +1569,18 @@ int tty_release(struct inode *inode, struct file *filp)
if (idx < 0 || idx >= tty->driver->num) {
printk(KERN_DEBUG "tty_release_dev: bad idx when trying to "
"free (%s)\n", tty->name);
- unlock_kernel();
+ tty_unlock();
return 0;
}
if (!devpts) {
if (tty != tty->driver->ttys[idx]) {
- unlock_kernel();
+ tty_unlock();
printk(KERN_DEBUG "tty_release_dev: driver.table[%d] not tty "
"for (%s)\n", idx, tty->name);
return 0;
}
if (tty->termios != tty->driver->termios[idx]) {
- unlock_kernel();
+ tty_unlock();
printk(KERN_DEBUG "tty_release_dev: driver.termios[%d] not termios "
"for (%s)\n",
idx, tty->name);
@@ -1556,21 +1598,21 @@ int tty_release(struct inode *inode, struct file *filp)
if (tty->driver->other &&
!(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
if (o_tty != tty->driver->other->ttys[idx]) {
- unlock_kernel();
+ tty_unlock();
printk(KERN_DEBUG "tty_release_dev: other->table[%d] "
"not o_tty for (%s)\n",
idx, tty->name);
return 0 ;
}
if (o_tty->termios != tty->driver->other->termios[idx]) {
- unlock_kernel();
+ tty_unlock();
printk(KERN_DEBUG "tty_release_dev: other->termios[%d] "
"not o_termios for (%s)\n",
idx, tty->name);
return 0;
}
if (o_tty->link != tty) {
- unlock_kernel();
+ tty_unlock();
printk(KERN_DEBUG "tty_release_dev: bad pty pointers\n");
return 0;
}
@@ -1579,7 +1621,7 @@ int tty_release(struct inode *inode, struct file *filp)
if (tty->ops->close)
tty->ops->close(tty, filp);
- unlock_kernel();
+ tty_unlock();
/*
* Sanity check: if tty->count is going to zero, there shouldn't be
* any waiters on tty->read_wait or tty->write_wait. We test the
@@ -1602,7 +1644,7 @@ int tty_release(struct inode *inode, struct file *filp)
opens on /dev/tty */
mutex_lock(&tty_mutex);
- lock_kernel();
+ tty_lock();
tty_closing = tty->count <= 1;
o_tty_closing = o_tty &&
(o_tty->count <= (pty_master ? 1 : 0));
@@ -1633,7 +1675,7 @@ int tty_release(struct inode *inode, struct file *filp)
printk(KERN_WARNING "tty_release_dev: %s: read/write wait queue "
"active!\n", tty_name(tty, buf));
- unlock_kernel();
+ tty_unlock();
mutex_unlock(&tty_mutex);
schedule();
}
@@ -1666,8 +1708,7 @@ int tty_release(struct inode *inode, struct file *filp)
* - do_tty_hangup no longer sees this file descriptor as
* something that needs to be handled for hangups.
*/
- file_kill(filp);
- filp->private_data = NULL;
+ tty_del_file(filp);
/*
* Perform some housekeeping before deciding whether to return.
@@ -1698,7 +1739,7 @@ int tty_release(struct inode *inode, struct file *filp)
/* check whether both sides are closing ... */
if (!tty_closing || (o_tty && !o_tty_closing)) {
- unlock_kernel();
+ tty_unlock();
return 0;
}
@@ -1718,7 +1759,7 @@ int tty_release(struct inode *inode, struct file *filp)
/* Make this pty number available for reallocation */
if (devpts)
devpts_kill_index(inode, idx);
- unlock_kernel();
+ tty_unlock();
return 0;
}
@@ -1760,12 +1801,12 @@ retry_open:
retval = 0;
mutex_lock(&tty_mutex);
- lock_kernel();
+ tty_lock();
if (device == MKDEV(TTYAUX_MAJOR, 0)) {
tty = get_current_tty();
if (!tty) {
- unlock_kernel();
+ tty_unlock();
mutex_unlock(&tty_mutex);
return -ENXIO;
}
@@ -1797,14 +1838,14 @@ retry_open:
goto got_driver;
}
}
- unlock_kernel();
+ tty_unlock();
mutex_unlock(&tty_mutex);
return -ENODEV;
}
driver = get_tty_driver(device, &index);
if (!driver) {
- unlock_kernel();
+ tty_unlock();
mutex_unlock(&tty_mutex);
return -ENODEV;
}
@@ -1814,7 +1855,7 @@ got_driver:
tty = tty_driver_lookup_tty(driver, inode, index);
if (IS_ERR(tty)) {
- unlock_kernel();
+ tty_unlock();
mutex_unlock(&tty_mutex);
return PTR_ERR(tty);
}
@@ -1830,12 +1871,12 @@ got_driver:
mutex_unlock(&tty_mutex);
tty_driver_kref_put(driver);
if (IS_ERR(tty)) {
- unlock_kernel();
+ tty_unlock();
return PTR_ERR(tty);
}
- filp->private_data = tty;
- file_move(filp, &tty->tty_files);
+ tty_add_file(tty, filp);
+
check_tty_count(tty, "tty_open");
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER)
@@ -1860,29 +1901,29 @@ got_driver:
printk(KERN_DEBUG "error %d in opening %s...", retval,
tty->name);
#endif
+ tty_unlock(); /* need to call tty_release without BTM */
tty_release(inode, filp);
- if (retval != -ERESTARTSYS) {
- unlock_kernel();
+ if (retval != -ERESTARTSYS)
return retval;
- }
- if (signal_pending(current)) {
- unlock_kernel();
+
+ if (signal_pending(current))
return retval;
- }
+
schedule();
/*
* Need to reset f_op in case a hangup happened.
*/
+ tty_lock();
if (filp->f_op == &hung_up_tty_fops)
filp->f_op = &tty_fops;
- unlock_kernel();
+ tty_unlock();
goto retry_open;
}
- unlock_kernel();
+ tty_unlock();
mutex_lock(&tty_mutex);
- lock_kernel();
+ tty_lock();
spin_lock_irq(&current->sighand->siglock);
if (!noctty &&
current->signal->leader &&
@@ -1890,7 +1931,7 @@ got_driver:
tty->session == NULL)
__proc_set_tty(current, tty);
spin_unlock_irq(&current->sighand->siglock);
- unlock_kernel();
+ tty_unlock();
mutex_unlock(&tty_mutex);
return 0;
}
@@ -1911,11 +1952,10 @@ got_driver:
static unsigned int tty_poll(struct file *filp, poll_table *wait)
{
- struct tty_struct *tty;
+ struct tty_struct *tty = file_tty(filp);
struct tty_ldisc *ld;
int ret = 0;
- tty = (struct tty_struct *)filp->private_data;
if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
return 0;
@@ -1926,14 +1966,12 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
return ret;
}
-static int tty_fasync(int fd, struct file *filp, int on)
+static int __tty_fasync(int fd, struct file *filp, int on)
{
- struct tty_struct *tty;
+ struct tty_struct *tty = file_tty(filp);
unsigned long flags;
int retval = 0;
- lock_kernel();
- tty = (struct tty_struct *)filp->private_data;
if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
goto out;
@@ -1966,7 +2004,15 @@ static int tty_fasync(int fd, struct file *filp, int on)
}
retval = 0;
out:
- unlock_kernel();
+ return retval;
+}
+
+static int tty_fasync(int fd, struct file *filp, int on)
+{
+ int retval;
+ tty_lock();
+ retval = __tty_fasync(fd, filp, on);
+ tty_unlock();
return retval;
}
@@ -2479,13 +2525,13 @@ EXPORT_SYMBOL(tty_pair_get_pty);
*/
long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct tty_struct *tty, *real_tty;
+ struct tty_struct *tty = file_tty(file);
+ struct tty_struct *real_tty;
void __user *p = (void __user *)arg;
int retval;
struct tty_ldisc *ld;
struct inode *inode = file->f_dentry->d_inode;
- tty = (struct tty_struct *)file->private_data;
if (tty_paranoia_check(tty, inode, "tty_ioctl"))
return -EINVAL;
@@ -2607,7 +2653,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct inode *inode = file->f_dentry->d_inode;
- struct tty_struct *tty = file->private_data;
+ struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
int retval = -ENOIOCTLCMD;
@@ -2699,7 +2745,7 @@ void __do_SAK(struct tty_struct *tty)
if (!filp)
continue;
if (filp->f_op->read == tty_read &&
- filp->private_data == tty) {
+ file_tty(filp) == tty) {
printk(KERN_NOTICE "SAK: killed process %d"
" (%s): fd#%d opened to the tty\n",
task_pid_nr(p), p->comm, i);
@@ -3128,7 +3174,7 @@ static struct cdev tty_cdev, console_cdev;
* Ok, now we can initialize the rest of the tty devices and can count
* on memory allocations, interrupts etc..
*/
-static int __init tty_init(void)
+int __init tty_init(void)
{
cdev_init(&tty_cdev, &tty_fops);
if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
@@ -3149,4 +3195,4 @@ static int __init tty_init(void)
#endif
return 0;
}
-module_init(tty_init);
+
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index 6bd5f8866c7..0c188997145 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -517,19 +517,25 @@ static void change_termios(struct tty_struct *tty, struct ktermios *new_termios)
/* See if packet mode change of state. */
if (tty->link && tty->link->packet) {
+ int extproc = (old_termios.c_lflag & EXTPROC) |
+ (tty->termios->c_lflag & EXTPROC);
int old_flow = ((old_termios.c_iflag & IXON) &&
(old_termios.c_cc[VSTOP] == '\023') &&
(old_termios.c_cc[VSTART] == '\021'));
int new_flow = (I_IXON(tty) &&
STOP_CHAR(tty) == '\023' &&
START_CHAR(tty) == '\021');
- if (old_flow != new_flow) {
+ if ((old_flow != new_flow) || extproc) {
spin_lock_irqsave(&tty->ctrl_lock, flags);
- tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP);
- if (new_flow)
- tty->ctrl_status |= TIOCPKT_DOSTOP;
- else
- tty->ctrl_status |= TIOCPKT_NOSTOP;
+ if (old_flow != new_flow) {
+ tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP);
+ if (new_flow)
+ tty->ctrl_status |= TIOCPKT_DOSTOP;
+ else
+ tty->ctrl_status |= TIOCPKT_NOSTOP;
+ }
+ if (extproc)
+ tty->ctrl_status |= TIOCPKT_IOCTL;
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
wake_up_interruptible(&tty->link->read_wait);
}
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index 500e740ec5e..412f9775d19 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -440,6 +440,8 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
*
* A helper opening method. Also a convenient debugging and check
* point.
+ *
+ * Locking: always called with BTM already held.
*/
static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
@@ -447,10 +449,9 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
WARN_ON(test_and_set_bit(TTY_LDISC_OPEN, &tty->flags));
if (ld->ops->open) {
int ret;
- /* BKL here locks verus a hangup event */
- lock_kernel();
+ /* BTM here locks versus a hangup event */
+ WARN_ON(!tty_locked());
ret = ld->ops->open(tty);
- unlock_kernel();
return ret;
}
return 0;
@@ -553,7 +554,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
if (IS_ERR(new_ldisc))
return PTR_ERR(new_ldisc);
- lock_kernel();
+ tty_lock();
/*
* We need to look at the tty locking here for pty/tty pairs
* when both sides try to change in parallel.
@@ -567,12 +568,12 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
*/
if (tty->ldisc->ops->num == ldisc) {
- unlock_kernel();
+ tty_unlock();
tty_ldisc_put(new_ldisc);
return 0;
}
- unlock_kernel();
+ tty_unlock();
/*
* Problem: What do we do if this blocks ?
* We could deadlock here
@@ -580,6 +581,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
tty_wait_until_sent(tty, 0);
+ tty_lock();
mutex_lock(&tty->ldisc_mutex);
/*
@@ -589,13 +591,13 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
mutex_unlock(&tty->ldisc_mutex);
+ tty_unlock();
wait_event(tty_ldisc_wait,
test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
+ tty_lock();
mutex_lock(&tty->ldisc_mutex);
}
- lock_kernel();
-
set_bit(TTY_LDISC_CHANGING, &tty->flags);
/*
@@ -607,7 +609,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
o_ldisc = tty->ldisc;
- unlock_kernel();
+ tty_unlock();
/*
* Make sure we don't change while someone holds a
* reference to the line discipline. The TTY_LDISC bit
@@ -632,15 +634,15 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
flush_scheduled_work();
+ tty_lock();
mutex_lock(&tty->ldisc_mutex);
- lock_kernel();
if (test_bit(TTY_HUPPED, &tty->flags)) {
/* We were raced by the hangup method. It will have stomped
the ldisc data and closed the ldisc down */
clear_bit(TTY_LDISC_CHANGING, &tty->flags);
mutex_unlock(&tty->ldisc_mutex);
tty_ldisc_put(new_ldisc);
- unlock_kernel();
+ tty_unlock();
return -EIO;
}
@@ -682,7 +684,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
if (o_work)
schedule_delayed_work(&o_tty->buf.work, 1);
mutex_unlock(&tty->ldisc_mutex);
- unlock_kernel();
+ tty_unlock();
return retval;
}
@@ -780,7 +782,20 @@ void tty_ldisc_hangup(struct tty_struct *tty)
* Avoid racing set_ldisc or tty_ldisc_release
*/
mutex_lock(&tty->ldisc_mutex);
- tty_ldisc_halt(tty);
+
+ /*
+ * this is like tty_ldisc_halt, but we need to give up
+ * the BTM before calling cancel_delayed_work_sync,
+ * which may need to wait for another function taking the BTM
+ */
+ clear_bit(TTY_LDISC, &tty->flags);
+ tty_unlock();
+ cancel_delayed_work_sync(&tty->buf.work);
+ mutex_unlock(&tty->ldisc_mutex);
+
+ tty_lock();
+ mutex_lock(&tty->ldisc_mutex);
+
/* At this point we have a closed ldisc and we want to
reopen it. We could defer this to the next open but
it means auditing a lot of other paths so this is
@@ -851,8 +866,10 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
* race with the set_ldisc code path.
*/
+ tty_unlock();
tty_ldisc_halt(tty);
flush_scheduled_work();
+ tty_lock();
mutex_lock(&tty->ldisc_mutex);
/*
diff --git a/drivers/char/tty_mutex.c b/drivers/char/tty_mutex.c
new file mode 100644
index 00000000000..133697540c7
--- /dev/null
+++ b/drivers/char/tty_mutex.c
@@ -0,0 +1,47 @@
+/*
+ * drivers/char/tty_lock.c
+ */
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/semaphore.h>
+#include <linux/sched.h>
+
+/*
+ * The 'big tty mutex'
+ *
+ * This mutex is taken and released by tty_lock() and tty_unlock(),
+ * replacing the older big kernel lock.
+ * It can no longer be taken recursively, and does not get
+ * released implicitly while sleeping.
+ *
+ * Don't use in new code.
+ */
+static DEFINE_MUTEX(big_tty_mutex);
+struct task_struct *__big_tty_mutex_owner;
+EXPORT_SYMBOL_GPL(__big_tty_mutex_owner);
+
+/*
+ * Getting the big tty mutex.
+ */
+void __lockfunc tty_lock(void)
+{
+ struct task_struct *task = current;
+
+ WARN_ON(__big_tty_mutex_owner == task);
+
+ mutex_lock(&big_tty_mutex);
+ __big_tty_mutex_owner = task;
+}
+EXPORT_SYMBOL(tty_lock);
+
+void __lockfunc tty_unlock(void)
+{
+ struct task_struct *task = current;
+
+ WARN_ON(__big_tty_mutex_owner != task);
+ __big_tty_mutex_owner = NULL;
+
+ mutex_unlock(&big_tty_mutex);
+}
+EXPORT_SYMBOL(tty_unlock);
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index a3bd1d0b66c..33d37d230f8 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -231,7 +231,7 @@ int tty_port_block_til_ready(struct tty_port *port,
/* block if port is in the process of being closed */
if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
- wait_event_interruptible(port->close_wait,
+ wait_event_interruptible_tty(port->close_wait,
!(port->flags & ASYNC_CLOSING));
if (port->flags & ASYNC_HUP_NOTIFY)
return -EAGAIN;
@@ -294,7 +294,9 @@ int tty_port_block_til_ready(struct tty_port *port,
retval = -ERESTARTSYS;
break;
}
+ tty_unlock();
schedule();
+ tty_lock();
}
finish_wait(&port->open_wait, &wait);
diff --git a/drivers/char/vc_screen.c b/drivers/char/vc_screen.c
index c1791a63d99..bcce46c96b8 100644
--- a/drivers/char/vc_screen.c
+++ b/drivers/char/vc_screen.c
@@ -463,10 +463,10 @@ vcs_open(struct inode *inode, struct file *filp)
unsigned int currcons = iminor(inode) & 127;
int ret = 0;
- lock_kernel();
+ tty_lock();
if(currcons && !vc_cons_allocated(currcons-1))
ret = -ENXIO;
- unlock_kernel();
+ tty_unlock();
return ret;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 942a9826bd2..c810481a5bc 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -596,6 +596,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
ssize_t ret;
bool nonblock;
+ /* Userspace could be out to fool us */
+ if (!count)
+ return 0;
+
port = filp->private_data;
nonblock = filp->f_flags & O_NONBLOCK;
@@ -642,7 +646,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
poll_wait(filp, &port->waitqueue, wait);
ret = 0;
- if (port->inbuf)
+ if (!will_read_block(port))
ret |= POLLIN | POLLRDNORM;
if (!will_write_block(port))
ret |= POLLOUT;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 7cdb6ee569c..281aada7b4a 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -104,6 +104,8 @@
#include <linux/io.h>
#include <asm/system.h>
#include <linux/uaccess.h>
+#include <linux/kdb.h>
+#include <linux/ctype.h>
#define MAX_NR_CON_DRIVER 16
@@ -187,10 +189,16 @@ static DECLARE_WORK(console_work, console_callback);
* fg_console is the current virtual console,
* last_console is the last used one,
* want_console is the console we want to switch to,
+ * saved_* variants are for save/restore around kernel debugger enter/leave
*/
int fg_console;
int last_console;
int want_console = -1;
+static int saved_fg_console;
+static int saved_last_console;
+static int saved_want_console;
+static int saved_vc_mode;
+static int saved_console_blanked;
/*
* For each existing display, we have a pointer to console currently visible
@@ -280,8 +288,12 @@ static inline unsigned short *screenpos(struct vc_data *vc, int offset, int view
return p;
}
+/* Called from the keyboard irq path.. */
static inline void scrolldelta(int lines)
{
+ /* FIXME */
+ /* scrolldelta needs some kind of consistency lock, but the BKL was
+ and still is not protecting versus the scheduled back end */
scrollback_delta += lines;
schedule_console_callback();
}
@@ -698,7 +710,10 @@ void redraw_screen(struct vc_data *vc, int is_switch)
update_attr(vc);
clear_buffer_attributes(vc);
}
- if (update && vc->vc_mode != KD_GRAPHICS)
+
+ /* Forcibly update if we're panicing */
+ if ((update && vc->vc_mode != KD_GRAPHICS) ||
+ vt_force_oops_output(vc))
do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2);
}
set_cursor(vc);
@@ -736,6 +751,7 @@ static void visual_init(struct vc_data *vc, int num, int init)
vc->vc_hi_font_mask = 0;
vc->vc_complement_mask = 0;
vc->vc_can_do_color = 0;
+ vc->vc_panic_force_write = false;
vc->vc_sw->con_init(vc, init);
if (!vc->vc_complement_mask)
vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
@@ -768,6 +784,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
if (!vc)
return -ENOMEM;
vc_cons[currcons].d = vc;
+ tty_port_init(&vc->port);
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
visual_init(vc, currcons, 1);
if (!*vc->vc_uni_pagedir_loc)
@@ -831,9 +848,10 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
unsigned int cols, unsigned int lines)
{
unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0;
+ unsigned long end;
unsigned int old_cols, old_rows, old_row_size, old_screen_size;
unsigned int new_cols, new_rows, new_row_size, new_screen_size;
- unsigned int end, user;
+ unsigned int user;
unsigned short *newscreen;
WARN_CONSOLE_UNLOCKED();
@@ -888,22 +906,16 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
* bottom of buffer
*/
old_origin += (old_rows - new_rows) * old_row_size;
- end = vc->vc_scr_end;
} else {
/*
* Cursor is in no man's land, copy 1/2 screenful
* from the top and bottom of cursor position
*/
old_origin += (vc->vc_y - new_rows/2) * old_row_size;
- end = old_origin + (old_row_size * new_rows);
}
- } else
- /*
- * Cursor near the top, copy contents from the top of buffer
- */
- end = (old_rows > new_rows) ? old_origin +
- (old_row_size * new_rows) :
- vc->vc_scr_end;
+ }
+
+ end = old_origin + old_row_size * min(old_rows, new_rows);
update_attr(vc);
@@ -956,12 +968,12 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
* Resize a virtual console as seen from the console end of things. We
* use the common vc_do_resize methods to update the structures. The
* caller must hold the console sem to protect console internals and
- * vc->vc_tty
+ * vc->port.tty
*/
int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows)
{
- return vc_do_resize(vc->vc_tty, vc, cols, rows);
+ return vc_do_resize(vc->port.tty, vc, cols, rows);
}
/**
@@ -1789,8 +1801,8 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
vc->vc_state = ESnormal;
return;
case ESpalette:
- if ( (c>='0'&&c<='9') || (c>='A'&&c<='F') || (c>='a'&&c<='f') ) {
- vc->vc_par[vc->vc_npar++] = (c > '9' ? (c & 0xDF) - 'A' + 10 : c - '0');
+ if (isxdigit(c)) {
+ vc->vc_par[vc->vc_npar++] = hex_to_bin(c);
if (vc->vc_npar == 7) {
int i = vc->vc_par[0] * 3, j = 1;
vc->vc_palette[i] = 16 * vc->vc_par[j++];
@@ -2498,7 +2510,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
goto quit;
}
- if (vc->vc_mode != KD_TEXT)
+ if (vc->vc_mode != KD_TEXT && !vt_force_oops_output(vc))
goto quit;
/* undraw cursor first */
@@ -2604,8 +2616,6 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
return -EFAULT;
ret = 0;
- lock_kernel();
-
switch (type)
{
case TIOCL_SETSEL:
@@ -2680,7 +2690,6 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
ret = -EINVAL;
break;
}
- unlock_kernel();
return ret;
}
@@ -2793,12 +2802,12 @@ static int con_open(struct tty_struct *tty, struct file *filp)
struct vc_data *vc = vc_cons[currcons].d;
/* Still being freed */
- if (vc->vc_tty) {
+ if (vc->port.tty) {
release_console_sem();
return -ERESTARTSYS;
}
tty->driver_data = vc;
- vc->vc_tty = tty;
+ vc->port.tty = tty;
if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
tty->winsize.ws_row = vc_cons[currcons].d->vc_rows;
@@ -2826,7 +2835,7 @@ static void con_shutdown(struct tty_struct *tty)
struct vc_data *vc = tty->driver_data;
BUG_ON(vc == NULL);
acquire_console_sem();
- vc->vc_tty = NULL;
+ vc->port.tty = NULL;
release_console_sem();
tty_shutdown(tty);
}
@@ -2908,6 +2917,7 @@ static int __init con_init(void)
for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) {
vc_cons[currcons].d = vc = kzalloc(sizeof(struct vc_data), GFP_NOWAIT);
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
+ tty_port_init(&vc->port);
visual_init(vc, currcons, 1);
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
vc_init(vc, vc->vc_rows, vc->vc_cols,
@@ -3414,6 +3424,80 @@ int con_is_bound(const struct consw *csw)
EXPORT_SYMBOL(con_is_bound);
/**
+ * con_debug_enter - prepare the console for the kernel debugger
+ * @sw: console driver
+ *
+ * Called when the console is taken over by the kernel debugger, this
+ * function needs to save the current console state, then put the console
+ * into a state suitable for the kernel debugger.
+ *
+ * RETURNS:
+ * Zero on success, nonzero if a failure occurred when trying to prepare
+ * the console for the debugger.
+ */
+int con_debug_enter(struct vc_data *vc)
+{
+ int ret = 0;
+
+ saved_fg_console = fg_console;
+ saved_last_console = last_console;
+ saved_want_console = want_console;
+ saved_vc_mode = vc->vc_mode;
+ saved_console_blanked = console_blanked;
+ vc->vc_mode = KD_TEXT;
+ console_blanked = 0;
+ if (vc->vc_sw->con_debug_enter)
+ ret = vc->vc_sw->con_debug_enter(vc);
+#ifdef CONFIG_KGDB_KDB
+ /* Set the initial LINES variable if it is not already set */
+ if (vc->vc_rows < 999) {
+ int linecount;
+ char lns[4];
+ const char *setargs[3] = {
+ "set",
+ "LINES",
+ lns,
+ };
+ if (kdbgetintenv(setargs[0], &linecount)) {
+ snprintf(lns, 4, "%i", vc->vc_rows);
+ kdb_set(2, setargs);
+ }
+ }
+#endif /* CONFIG_KGDB_KDB */
+ return ret;
+}
+EXPORT_SYMBOL_GPL(con_debug_enter);
+
+/**
+ * con_debug_leave - restore console state
+ * @sw: console driver
+ *
+ * Restore the console state to what it was before the kernel debugger
+ * was invoked.
+ *
+ * RETURNS:
+ * Zero on success, nonzero if a failure occurred when trying to restore
+ * the console.
+ */
+int con_debug_leave(void)
+{
+ struct vc_data *vc;
+ int ret = 0;
+
+ fg_console = saved_fg_console;
+ last_console = saved_last_console;
+ want_console = saved_want_console;
+ console_blanked = saved_console_blanked;
+ vc_cons[fg_console].d->vc_mode = saved_vc_mode;
+
+ vc = vc_cons[fg_console].d;
+ if (vc->vc_sw->con_debug_leave)
+ ret = vc->vc_sw->con_debug_leave(vc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(con_debug_leave);
+
+/**
* register_con_driver - register console driver to console layer
* @csw: console driver
* @first: the first console to take over, minimum value is 0
@@ -3703,7 +3787,8 @@ void do_unblank_screen(int leaving_gfx)
return;
}
vc = vc_cons[fg_console].d;
- if (vc->vc_mode != KD_TEXT)
+ /* Try to unblank in oops case too */
+ if (vc->vc_mode != KD_TEXT && !vt_force_oops_output(vc))
return; /* but leave console_blanked != 0 */
if (blankinterval) {
@@ -3712,7 +3797,7 @@ void do_unblank_screen(int leaving_gfx)
}
console_blanked = 0;
- if (vc->vc_sw->con_blank(vc, 0, leaving_gfx))
+ if (vc->vc_sw->con_blank(vc, 0, leaving_gfx) || vt_force_oops_output(vc))
/* Low-level driver cannot restore -> do it ourselves */
update_screen(vc);
if (console_blank_hook)
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index cb19dbc5213..38df8c19e74 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -133,7 +133,7 @@ static void vt_event_wait(struct vt_event_wait *vw)
list_add(&vw->list, &vt_events);
spin_unlock_irqrestore(&vt_event_lock, flags);
/* Wait for it to pass */
- wait_event_interruptible(vt_event_waitqueue, vw->done);
+ wait_event_interruptible_tty(vt_event_waitqueue, vw->done);
/* Dequeue it */
spin_lock_irqsave(&vt_event_lock, flags);
list_del(&vw->list);
@@ -509,7 +509,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
console = vc->vc_num;
- lock_kernel();
+ tty_lock();
if (!vc_cons_allocated(console)) { /* impossible? */
ret = -ENOIOCTLCMD;
@@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
case KIOCSOUND:
if (!perm)
goto eperm;
- /* FIXME: This is an old broken API but we need to keep it
- supported and somehow separate the historic advertised
- tick rate from any real one */
+ /*
+ * The use of PIT_TICK_RATE is historic, it used to be
+ * the platform-dependent CLOCK_TICK_RATE between 2.6.12
+ * and 2.6.36, which was a minor but unfortunate ABI
+ * change.
+ */
if (arg)
- arg = CLOCK_TICK_RATE / arg;
+ arg = PIT_TICK_RATE / arg;
kd_mksound(arg, 0);
break;
@@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
*/
ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
count = ticks ? (arg & 0xffff) : 0;
- /* FIXME: This is an old broken API but we need to keep it
- supported and somehow separate the historic advertised
- tick rate from any real one */
if (count)
- count = CLOCK_TICK_RATE / count;
+ count = PIT_TICK_RATE / count;
kd_mksound(count, ticks);
break;
}
@@ -1336,7 +1336,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
ret = -ENOIOCTLCMD;
}
out:
- unlock_kernel();
+ tty_unlock();
return ret;
eperm:
ret = -EPERM;
@@ -1369,7 +1369,7 @@ void vc_SAK(struct work_struct *work)
acquire_console_sem();
vc = vc_con->d;
if (vc) {
- tty = vc->vc_tty;
+ tty = vc->port.tty;
/*
* SAK should also work in all raw modes and reset
* them properly.
@@ -1503,7 +1503,7 @@ long vt_compat_ioctl(struct tty_struct *tty, struct file * file,
console = vc->vc_num;
- lock_kernel();
+ tty_lock();
if (!vc_cons_allocated(console)) { /* impossible? */
ret = -ENOIOCTLCMD;
@@ -1571,11 +1571,11 @@ long vt_compat_ioctl(struct tty_struct *tty, struct file * file,
goto fallback;
}
out:
- unlock_kernel();
+ tty_unlock();
return ret;
fallback:
- unlock_kernel();
+ tty_unlock();
return vt_ioctl(tty, file, cmd, arg);
}
@@ -1761,10 +1761,13 @@ int vt_move_to_console(unsigned int vt, int alloc)
return -EIO;
}
release_console_sem();
+ tty_lock();
if (vt_waitactive(vt + 1)) {
pr_debug("Suspend: Can't switch VCs.");
+ tty_unlock();
return -EINTR;
}
+ tty_unlock();
return prev;
}
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index ed8a9cec2a0..b663d573aad 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -94,6 +94,7 @@
#ifdef CONFIG_OF
/* For open firmware. */
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#endif
@@ -761,7 +762,7 @@ static struct platform_driver hwicap_platform_driver = {
#if defined(CONFIG_OF)
static int __devinit
-hwicap_of_probe(struct of_device *op, const struct of_device_id *match)
+hwicap_of_probe(struct platform_device *op, const struct of_device_id *match)
{
struct resource res;
const unsigned int *id;
@@ -798,7 +799,7 @@ hwicap_of_probe(struct of_device *op, const struct of_device_id *match)
regs);
}
-static int __devexit hwicap_of_remove(struct of_device *op)
+static int __devexit hwicap_of_remove(struct platform_device *op)
{
return hwicap_remove(&op->dev);
}
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 72a633a6ec9..cfb0f527841 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -68,10 +68,7 @@ static struct clocksource clocksource_acpi_pm = {
.rating = 200,
.read = acpi_pm_read,
.mask = (cycle_t)ACPI_PM_MASK,
- .mult = 0, /*to be calculated*/
- .shift = 22,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
-
};
@@ -190,9 +187,6 @@ static int __init init_acpi_pm_clocksource(void)
if (!pmtmr_ioport)
return -ENODEV;
- clocksource_acpi_pm.mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC,
- clocksource_acpi_pm.shift);
-
/* "verify" this timing source: */
for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
udelay(100 * j);
@@ -220,7 +214,8 @@ static int __init init_acpi_pm_clocksource(void)
if (verify_pmtmr_rate() != 0)
return -ENODEV;
- return clocksource_register(&clocksource_acpi_pm);
+ return clocksource_register_hz(&clocksource_acpi_pm,
+ PMTMR_TICKS_PER_SEC);
}
/* We use fs_initcall because we want the PCI fixups to have run
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index dbefe15bd58..a5071084337 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -74,6 +74,17 @@ static void cpuidle_idle_call(void)
*/
hrtimer_peek_ahead_timers();
#endif
+
+ /*
+ * Call the device's prepare function before calling the
+ * governor's select function. ->prepare gives the device's
+ * cpuidle driver a chance to update any dynamic information
+ * of its cpuidle states for the current idle period, e.g.
+ * state availability, latencies, residencies, etc.
+ */
+ if (dev->prepare)
+ dev->prepare(dev);
+
/* ask the governor for the next state */
next_state = cpuidle_curr_governor->select(dev);
if (need_resched()) {
@@ -282,6 +293,26 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
poll_idle_init(dev);
+ /*
+ * cpuidle driver should set the dev->power_specified bit
+ * before registering the device if the driver provides
+ * power_usage numbers.
+ *
+ * For those devices whose ->power_specified is not set,
+ * we fill in power_usage with decreasing values as the
+ * cpuidle code has an implicit assumption that state Cn
+ * uses less power than C(n-1).
+ *
+ * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
+ * an power value of -1. So we use -2, -3, etc, for other
+ * c-states.
+ */
+ if (!dev->power_specified) {
+ int i;
+ for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++)
+ dev->states[i].power_usage = -1 - i;
+ }
+
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
if ((ret = cpuidle_add_sysfs(sys_dev))) {
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 1b128702d30..f508690eb95 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -80,7 +80,7 @@
* Limiting Performance Impact
* ---------------------------
* C states, especially those with large exit latencies, can have a real
- * noticable impact on workloads, which is not acceptable for most sysadmins,
+ * noticeable impact on workloads, which is not acceptable for most sysadmins,
* and in addition, less performance has a power price of its own.
*
* As a general rule of thumb, menu assumes that the following heuristic
@@ -234,6 +234,7 @@ static int menu_select(struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
+ unsigned int power_usage = -1;
int i;
int multiplier;
@@ -278,19 +279,27 @@ static int menu_select(struct cpuidle_device *dev)
if (data->expected_us > 5)
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
-
- /* find the deepest idle state that satisfies our constraints */
+ /*
+ * Find the idle state with the lowest power while satisfying
+ * our constraints.
+ */
for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
struct cpuidle_state *s = &dev->states[i];
+ if (s->flags & CPUIDLE_FLAG_IGNORE)
+ continue;
if (s->target_residency > data->predicted_us)
- break;
+ continue;
if (s->exit_latency > latency_req)
- break;
+ continue;
if (s->exit_latency * multiplier > data->predicted_us)
- break;
- data->exit_us = s->exit_latency;
- data->last_state_idx = i;
+ continue;
+
+ if (s->power_usage < power_usage) {
+ power_usage = s->power_usage;
+ data->last_state_idx = i;
+ data->exit_us = s->exit_latency;
+ }
}
return data->last_state_idx;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index fbf94cf496f..ea0b3863ad0 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -77,7 +77,7 @@ config ZCRYPT
config ZCRYPT_MONOLITHIC
bool "Monolithic zcrypt module"
- depends on ZCRYPT="m"
+ depends on ZCRYPT
help
Select this option if you want to have a single module z90crypt,
that contains all parts of the crypto device driver (ap bus,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 983530ba04a..2b1baee525b 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1150,7 +1150,7 @@ struct crypto4xx_alg_common crypto4xx_alg[] = {
/**
* Module Initialization Routine
*/
-static int __init crypto4xx_probe(struct of_device *ofdev,
+static int __init crypto4xx_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
int rc;
@@ -1258,7 +1258,7 @@ err_alloc_dev:
return rc;
}
-static int __exit crypto4xx_remove(struct of_device *ofdev)
+static int __exit crypto4xx_remove(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index da9cbe3b9fc..bac0bdeb4b5 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -104,7 +104,7 @@ struct crypto4xx_device {
struct crypto4xx_core_device {
struct device *device;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
struct crypto4xx_device *dev;
u32 int_status;
u32 irq;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index f17ddf37a1e..0d662213c06 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -97,8 +97,13 @@
struct buffer_desc {
u32 phys_next;
+#ifdef __ARMEB__
u16 buf_len;
u16 pkt_len;
+#else
+ u16 pkt_len;
+ u16 buf_len;
+#endif
u32 phys_addr;
u32 __reserved[4];
struct buffer_desc *next;
@@ -106,17 +111,30 @@ struct buffer_desc {
};
struct crypt_ctl {
+#ifdef __ARMEB__
u8 mode; /* NPE_OP_* operation mode */
u8 init_len;
u16 reserved;
+#else
+ u16 reserved;
+ u8 init_len;
+ u8 mode; /* NPE_OP_* operation mode */
+#endif
u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
u32 icv_rev_aes; /* icv or rev aes */
u32 src_buf;
u32 dst_buf;
+#ifdef __ARMEB__
u16 auth_offs; /* Authentication start offset */
u16 auth_len; /* Authentication data length */
u16 crypt_offs; /* Cryption start offset */
u16 crypt_len; /* Cryption data length */
+#else
+ u16 auth_len; /* Authentication data length */
+ u16 auth_offs; /* Authentication start offset */
+ u16 crypt_len; /* Cryption data length */
+ u16 crypt_offs; /* Cryption start offset */
+#endif
u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
u32 crypto_ctx; /* NPE Crypto Param structure address */
@@ -652,6 +670,9 @@ static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
/* write cfg word to cryptinfo */
cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
+#ifndef __ARMEB__
+ cfgword ^= 0xAA000000; /* change the "byte swap" flags */
+#endif
*(u32*)cinfo = cpu_to_be32(cfgword);
cinfo += sizeof(cfgword);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index b99c38f23d6..88ee01510ec 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1552,7 +1552,7 @@ static void __exit n2_unregister_algs(void)
/* To map CWQ queues to interrupt sources, the hypervisor API provides
* a devino. This isn't very useful to us because all of the
- * interrupts listed in the of_device node have been translated to
+ * interrupts listed in the device_node have been translated to
* Linux virtual IRQ cookie numbers.
*
* So we have to back-translate, going through the 'intr' and 'ino'
@@ -1560,7 +1560,7 @@ static void __exit n2_unregister_algs(void)
* 'interrupts' property entries, in order to to figure out which
* devino goes to which already-translated IRQ.
*/
-static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
+static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
unsigned long dev_ino)
{
const unsigned int *dev_intrs;
@@ -1580,7 +1580,7 @@ static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
if (!dev_intrs)
return -ENODEV;
- for (i = 0; i < dev->num_irqs; i++) {
+ for (i = 0; i < dev->archdata.num_irqs; i++) {
if (dev_intrs[i] == intr)
return i;
}
@@ -1588,7 +1588,7 @@ static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
return -ENODEV;
}
-static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip,
+static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
const char *irq_name, struct spu_queue *p,
irq_handler_t handler)
{
@@ -1603,7 +1603,7 @@ static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip,
if (index < 0)
return index;
- p->irq = dev->irqs[index];
+ p->irq = dev->archdata.irqs[index];
sprintf(p->irq_name, "%s-%d", irq_name, index);
@@ -1736,7 +1736,7 @@ static void spu_list_destroy(struct list_head *list)
* gathering cpu membership information.
*/
static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
- struct of_device *dev,
+ struct platform_device *dev,
u64 node, struct spu_queue *p,
struct spu_queue **table)
{
@@ -1763,7 +1763,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
/* Process an 'exec-unit' MDESC node of type 'cwq'. */
static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
- struct of_device *dev, struct mdesc_handle *mdesc,
+ struct platform_device *dev, struct mdesc_handle *mdesc,
u64 node, const char *iname, unsigned long q_type,
irq_handler_t handler, struct spu_queue **table)
{
@@ -1794,7 +1794,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
return spu_map_ino(dev, ip, iname, p, handler);
}
-static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev,
+static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
struct spu_mdesc_info *ip, struct list_head *list,
const char *exec_name, unsigned long q_type,
irq_handler_t handler, struct spu_queue **table)
@@ -1855,7 +1855,7 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
}
static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
- struct of_device *dev,
+ struct platform_device *dev,
struct spu_mdesc_info *ip,
const char *node_name)
{
@@ -2004,7 +2004,7 @@ static void __devinit n2_spu_driver_version(void)
pr_info("%s", version);
}
-static int __devinit n2_crypto_probe(struct of_device *dev,
+static int __devinit n2_crypto_probe(struct platform_device *dev,
const struct of_device_id *match)
{
struct mdesc_handle *mdesc;
@@ -2081,7 +2081,7 @@ out_free_n2cp:
return err;
}
-static int __devexit n2_crypto_remove(struct of_device *dev)
+static int __devexit n2_crypto_remove(struct platform_device *dev)
{
struct n2_crypto *np = dev_get_drvdata(&dev->dev);
@@ -2116,7 +2116,7 @@ static void free_ncp(struct n2_mau *mp)
kfree(mp);
}
-static int __devinit n2_mau_probe(struct of_device *dev,
+static int __devinit n2_mau_probe(struct platform_device *dev,
const struct of_device_id *match)
{
struct mdesc_handle *mdesc;
@@ -2184,7 +2184,7 @@ out_free_ncp:
return err;
}
-static int __devexit n2_mau_remove(struct of_device *dev)
+static int __devexit n2_mau_remove(struct platform_device *dev)
{
struct n2_mau *mp = dev_get_drvdata(&dev->dev);
@@ -2247,20 +2247,20 @@ static struct of_platform_driver n2_mau_driver = {
static int __init n2_init(void)
{
- int err = of_register_driver(&n2_crypto_driver, &of_bus_type);
+ int err = of_register_platform_driver(&n2_crypto_driver);
if (!err) {
- err = of_register_driver(&n2_mau_driver, &of_bus_type);
+ err = of_register_platform_driver(&n2_mau_driver);
if (err)
- of_unregister_driver(&n2_crypto_driver);
+ of_unregister_platform_driver(&n2_crypto_driver);
}
return err;
}
static void __exit n2_exit(void)
{
- of_unregister_driver(&n2_mau_driver);
- of_unregister_driver(&n2_crypto_driver);
+ of_unregister_platform_driver(&n2_mau_driver);
+ of_unregister_platform_driver(&n2_crypto_driver);
}
module_init(n2_init);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 97f4af1d8a6..4bcd825b573 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -118,7 +118,7 @@ struct talitos_channel {
struct talitos_private {
struct device *dev;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
void __iomem *reg;
int irq;
@@ -2308,7 +2308,7 @@ static int hw_supports(struct device *dev, __be32 desc_hdr_template)
return ret;
}
-static int talitos_remove(struct of_device *ofdev)
+static int talitos_remove(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct talitos_private *priv = dev_get_drvdata(dev);
@@ -2401,7 +2401,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
return t_alg;
}
-static int talitos_probe(struct of_device *ofdev,
+static int talitos_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device *dev = &ofdev->dev;
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index 8661c84a105..b98c67664ae 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -39,6 +39,10 @@ static DEFINE_SPINLOCK(dca_lock);
static LIST_HEAD(dca_domains);
+static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
+
+static int dca_providers_blocked;
+
static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -70,6 +74,60 @@ static void dca_free_domain(struct dca_domain *domain)
kfree(domain);
}
+static int dca_provider_ioat_ver_3_0(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+ ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
+}
+
+static void unregister_dca_providers(void)
+{
+ struct dca_provider *dca, *_dca;
+ struct list_head unregistered_providers;
+ struct dca_domain *domain;
+ unsigned long flags;
+
+ blocking_notifier_call_chain(&dca_provider_chain,
+ DCA_PROVIDER_REMOVE, NULL);
+
+ INIT_LIST_HEAD(&unregistered_providers);
+
+ spin_lock_irqsave(&dca_lock, flags);
+
+ if (list_empty(&dca_domains)) {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ return;
+ }
+
+ /* at this point only one domain in the list is expected */
+ domain = list_first_entry(&dca_domains, struct dca_domain, node);
+ if (!domain)
+ return;
+
+ list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
+ list_del(&dca->node);
+ list_add(&dca->node, &unregistered_providers);
+ }
+
+ dca_free_domain(domain);
+
+ spin_unlock_irqrestore(&dca_lock, flags);
+
+ list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
+ dca_sysfs_remove_provider(dca);
+ list_del(&dca->node);
+ }
+}
+
static struct dca_domain *dca_find_domain(struct pci_bus *rc)
{
struct dca_domain *domain;
@@ -90,9 +148,13 @@ static struct dca_domain *dca_get_domain(struct device *dev)
domain = dca_find_domain(rc);
if (!domain) {
- domain = dca_allocate_domain(rc);
- if (domain)
- list_add(&domain->node, &dca_domains);
+ if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
+ dca_providers_blocked = 1;
+ } else {
+ domain = dca_allocate_domain(rc);
+ if (domain)
+ list_add(&domain->node, &dca_domains);
+ }
}
return domain;
@@ -293,8 +355,6 @@ void free_dca_provider(struct dca_provider *dca)
}
EXPORT_SYMBOL_GPL(free_dca_provider);
-static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
-
/**
* register_dca_provider - register a dca provider
* @dca - struct created by alloc_dca_provider()
@@ -306,6 +366,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
unsigned long flags;
struct dca_domain *domain;
+ spin_lock_irqsave(&dca_lock, flags);
+ if (dca_providers_blocked) {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+ spin_unlock_irqrestore(&dca_lock, flags);
+
err = dca_sysfs_add_provider(dca, dev);
if (err)
return err;
@@ -313,7 +380,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
spin_lock_irqsave(&dca_lock, flags);
domain = dca_get_domain(dev);
if (!domain) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ if (dca_providers_blocked) {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ dca_sysfs_remove_provider(dca);
+ unregister_dca_providers();
+ } else {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ }
return -ENODEV;
}
list_add(&dca->node, &domain->dca_providers);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9e01e96fee9..9520cf02edc 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -33,6 +33,19 @@ if DMADEVICES
comment "DMA Devices"
+config INTEL_MID_DMAC
+ tristate "Intel MID DMA support for Peripheral DMA controllers"
+ depends on PCI && X86
+ select DMA_ENGINE
+ default n
+ help
+ Enable support for the Intel(R) MID DMA engine present
+ in Intel MID chipsets.
+
+ Say Y here if you have such a chipset.
+
+ If unsure, say N.
+
config ASYNC_TX_DISABLE_CHANNEL_SWITCH
bool
@@ -128,7 +141,7 @@ config TXX9_DMAC
config SH_DMAE
tristate "Renesas SuperH DMAC support"
- depends on SUPERH && SH_DMA
+ depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
depends on !SH_DMA_API
select DMA_ENGINE
help
@@ -175,6 +188,13 @@ config PL330_DMA
You need to provide platform specific settings via
platform_data for a dma-pl330 device.
+config PCH_DMA
+ tristate "Topcliff PCH DMA support"
+ depends on PCI && X86
+ select DMA_ENGINE
+ help
+ Enable support for the Topcliff PCH DMA engine.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0fe5ebbfda5..72bd70384d8 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -7,6 +7,7 @@ endif
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o
+obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
@@ -23,3 +24,4 @@ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
+obj-$(CONFIG_PCH_DMA) += pch_dma.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index e88076022a7..a0f3e6a06e0 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -790,12 +790,12 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
list_splice_init(&atchan->queue, &list);
list_splice_init(&atchan->active_list, &list);
- spin_unlock_bh(&atchan->lock);
-
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc);
+ spin_unlock_bh(&atchan->lock);
+
return 0;
}
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index a724e6be1b4..557e2272e5b 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -72,6 +72,9 @@ struct coh901318_chan {
unsigned long nbr_active_done;
unsigned long busy;
+ u32 runtime_addr;
+ u32 runtime_ctrl;
+
struct coh901318_base *base;
};
@@ -190,6 +193,9 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
static inline dma_addr_t
cohc_dev_addr(struct coh901318_chan *cohc)
{
+ /* Runtime supplied address will take precedence */
+ if (cohc->runtime_addr)
+ return cohc->runtime_addr;
return cohc->base->platform->chan_conf[cohc->id].dev_addr;
}
@@ -1055,6 +1061,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
params = cohc_chan_param(cohc);
config = params->config;
+ /*
+ * Add runtime-specific control on top, make
+ * sure the bits you set per peripheral channel are
+ * cleared in the default config from the platform.
+ */
+ ctrl_chained |= cohc->runtime_ctrl;
+ ctrl_last |= cohc->runtime_ctrl;
+ ctrl |= cohc->runtime_ctrl;
if (direction == DMA_TO_DEVICE) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
@@ -1113,6 +1127,12 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (ret)
goto err_lli_fill;
+ /*
+ * Set the default ctrl for the channel to the one from the lli,
+ * things may have changed due to odd buffer alignment etc.
+ */
+ coh901318_set_ctrl(cohc, lli->control);
+
COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
@@ -1175,6 +1195,146 @@ coh901318_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
}
+/*
+ * Here we wrap in the runtime dma control interface
+ */
+struct burst_table {
+ int burst_8bit;
+ int burst_16bit;
+ int burst_32bit;
+ u32 reg;
+};
+
+static const struct burst_table burst_sizes[] = {
+ {
+ .burst_8bit = 64,
+ .burst_16bit = 32,
+ .burst_32bit = 16,
+ .reg = COH901318_CX_CTRL_BURST_COUNT_64_BYTES,
+ },
+ {
+ .burst_8bit = 48,
+ .burst_16bit = 24,
+ .burst_32bit = 12,
+ .reg = COH901318_CX_CTRL_BURST_COUNT_48_BYTES,
+ },
+ {
+ .burst_8bit = 32,
+ .burst_16bit = 16,
+ .burst_32bit = 8,
+ .reg = COH901318_CX_CTRL_BURST_COUNT_32_BYTES,
+ },
+ {
+ .burst_8bit = 16,
+ .burst_16bit = 8,
+ .burst_32bit = 4,
+ .reg = COH901318_CX_CTRL_BURST_COUNT_16_BYTES,
+ },
+ {
+ .burst_8bit = 8,
+ .burst_16bit = 4,
+ .burst_32bit = 2,
+ .reg = COH901318_CX_CTRL_BURST_COUNT_8_BYTES,
+ },
+ {
+ .burst_8bit = 4,
+ .burst_16bit = 2,
+ .burst_32bit = 1,
+ .reg = COH901318_CX_CTRL_BURST_COUNT_4_BYTES,
+ },
+ {
+ .burst_8bit = 2,
+ .burst_16bit = 1,
+ .burst_32bit = 0,
+ .reg = COH901318_CX_CTRL_BURST_COUNT_2_BYTES,
+ },
+ {
+ .burst_8bit = 1,
+ .burst_16bit = 0,
+ .burst_32bit = 0,
+ .reg = COH901318_CX_CTRL_BURST_COUNT_1_BYTE,
+ },
+};
+
+static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ dma_addr_t addr;
+ enum dma_slave_buswidth addr_width;
+ u32 maxburst;
+ u32 runtime_ctrl = 0;
+ int i = 0;
+
+ /* We only support mem to per or per to mem transfers */
+ if (config->direction == DMA_FROM_DEVICE) {
+ addr = config->src_addr;
+ addr_width = config->src_addr_width;
+ maxburst = config->src_maxburst;
+ } else if (config->direction == DMA_TO_DEVICE) {
+ addr = config->dst_addr;
+ addr_width = config->dst_addr_width;
+ maxburst = config->dst_maxburst;
+ } else {
+ dev_err(COHC_2_DEV(cohc), "illegal channel mode\n");
+ return;
+ }
+
+ dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n",
+ addr_width);
+ switch (addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ runtime_ctrl |=
+ COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS |
+ COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS;
+
+ while (i < ARRAY_SIZE(burst_sizes)) {
+ if (burst_sizes[i].burst_8bit <= maxburst)
+ break;
+ i++;
+ }
+
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ runtime_ctrl |=
+ COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS |
+ COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS;
+
+ while (i < ARRAY_SIZE(burst_sizes)) {
+ if (burst_sizes[i].burst_16bit <= maxburst)
+ break;
+ i++;
+ }
+
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ /* Direction doesn't matter here, it's 32/32 bits */
+ runtime_ctrl |=
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS;
+
+ while (i < ARRAY_SIZE(burst_sizes)) {
+ if (burst_sizes[i].burst_32bit <= maxburst)
+ break;
+ i++;
+ }
+
+ break;
+ default:
+ dev_err(COHC_2_DEV(cohc),
+ "bad runtimeconfig: alien address width\n");
+ return;
+ }
+
+ runtime_ctrl |= burst_sizes[i].reg;
+ dev_dbg(COHC_2_DEV(cohc),
+ "selected burst size %d bytes for address width %d bytes, maxburst %d\n",
+ burst_sizes[i].burst_8bit, addr_width, maxburst);
+
+ cohc->runtime_addr = addr;
+ cohc->runtime_ctrl = runtime_ctrl;
+}
+
static int
coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
@@ -1184,6 +1344,14 @@ coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct coh901318_desc *cohd;
void __iomem *virtbase = cohc->base->virtbase;
+ if (cmd == DMA_SLAVE_CONFIG) {
+ struct dma_slave_config *config =
+ (struct dma_slave_config *) arg;
+
+ coh901318_dma_set_runtimeconfig(chan, config);
+ return 0;
+ }
+
if (cmd == DMA_PAUSE) {
coh901318_pause(chan);
return 0;
@@ -1240,6 +1408,7 @@ coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return 0;
}
+
void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base)
{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 68d58c414cf..5589358b684 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -540,7 +540,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
struct dmatest_chan *dtc;
struct dma_device *dma_dev = chan->device;
unsigned int thread_count = 0;
- unsigned int cnt;
+ int cnt;
dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
if (!dtc) {
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index f0fd6db6063..cea08bed9cf 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1297,7 +1297,7 @@ static void fsl_dma_chan_remove(struct fsldma_chan *chan)
kfree(chan);
}
-static int __devinit fsldma_of_probe(struct of_device *op,
+static int __devinit fsldma_of_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct fsldma_device *fdev;
@@ -1382,7 +1382,7 @@ out_return:
return err;
}
-static int fsldma_of_remove(struct of_device *op)
+static int fsldma_of_remove(struct platform_device *op)
{
struct fsldma_device *fdev;
unsigned int i;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
new file mode 100644
index 00000000000..c2591e8d9b6
--- /dev/null
+++ b/drivers/dma/intel_mid_dma.c
@@ -0,0 +1,1143 @@
+/*
+ * intel_mid_dma.c - Intel Langwell DMA Drivers
+ *
+ * Copyright (C) 2008-10 Intel Corp
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ * The driver design is based on dw_dmac driver
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/intel_mid_dma.h>
+
+#define MAX_CHAN 4 /*max ch across controllers*/
+#include "intel_mid_dma_regs.h"
+
+#define INTEL_MID_DMAC1_ID 0x0814
+#define INTEL_MID_DMAC2_ID 0x0813
+#define INTEL_MID_GP_DMAC2_ID 0x0827
+#define INTEL_MFLD_DMAC1_ID 0x0830
+#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
+#define LNW_PERIPHRAL_MASK_SIZE 0x10
+#define LNW_PERIPHRAL_STATUS 0x0
+#define LNW_PERIPHRAL_MASK 0x8
+
+struct intel_mid_dma_probe_info {
+ u8 max_chan;
+ u8 ch_base;
+ u16 block_size;
+ u32 pimr_mask;
+};
+
+#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
+ ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
+ .max_chan = (_max_chan), \
+ .ch_base = (_ch_base), \
+ .block_size = (_block_size), \
+ .pimr_mask = (_pimr_mask), \
+ })
+
+/*****************************************************************************
+Utility Functions*/
+/**
+ * get_ch_index - convert status to channel
+ * @status: status mask
+ * @base: dma ch base value
+ *
+ * Modify the status mask and return the channel index needing
+ * attention (or -1 if neither)
+ */
+static int get_ch_index(int *status, unsigned int base)
+{
+ int i;
+ for (i = 0; i < MAX_CHAN; i++) {
+ if (*status & (1 << (i + base))) {
+ *status = *status & ~(1 << (i + base));
+ pr_debug("MDMA: index %d New status %x\n", i, *status);
+ return i;
+ }
+ }
+ return -1;
+}
+
+/**
+ * get_block_ts - calculates dma transaction length
+ * @len: dma transfer length
+ * @tx_width: dma transfer src width
+ * @block_size: dma controller max block size
+ *
+ * Based on src width calculate the DMA trsaction length in data items
+ * return data items or FFFF if exceeds max length for block
+ */
+static int get_block_ts(int len, int tx_width, int block_size)
+{
+ int byte_width = 0, block_ts = 0;
+
+ switch (tx_width) {
+ case LNW_DMA_WIDTH_8BIT:
+ byte_width = 1;
+ break;
+ case LNW_DMA_WIDTH_16BIT:
+ byte_width = 2;
+ break;
+ case LNW_DMA_WIDTH_32BIT:
+ default:
+ byte_width = 4;
+ break;
+ }
+
+ block_ts = len/byte_width;
+ if (block_ts > block_size)
+ block_ts = 0xFFFF;
+ return block_ts;
+}
+
+/*****************************************************************************
+DMAC1 interrupt Functions*/
+
+/**
+ * dmac1_mask_periphral_intr - mask the periphral interrupt
+ * @midc: dma channel for which masking is required
+ *
+ * Masks the DMA periphral interrupt
+ * this is valid for DMAC1 family controllers only
+ * This controller should have periphral mask registers already mapped
+ */
+static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc)
+{
+ u32 pimr;
+ struct middma_device *mid = to_middma_device(midc->chan.device);
+
+ if (mid->pimr_mask) {
+ pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
+ pimr |= mid->pimr_mask;
+ writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
+ }
+ return;
+}
+
+/**
+ * dmac1_unmask_periphral_intr - unmask the periphral interrupt
+ * @midc: dma channel for which masking is required
+ *
+ * UnMasks the DMA periphral interrupt,
+ * this is valid for DMAC1 family controllers only
+ * This controller should have periphral mask registers already mapped
+ */
+static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
+{
+ u32 pimr;
+ struct middma_device *mid = to_middma_device(midc->chan.device);
+
+ if (mid->pimr_mask) {
+ pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
+ pimr &= ~mid->pimr_mask;
+ writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
+ }
+ return;
+}
+
+/**
+ * enable_dma_interrupt - enable the periphral interrupt
+ * @midc: dma channel for which enable interrupt is required
+ *
+ * Enable the DMA periphral interrupt,
+ * this is valid for DMAC1 family controllers only
+ * This controller should have periphral mask registers already mapped
+ */
+static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
+{
+ dmac1_unmask_periphral_intr(midc);
+
+ /*en ch interrupts*/
+ iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
+ iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
+ return;
+}
+
+/**
+ * disable_dma_interrupt - disable the periphral interrupt
+ * @midc: dma channel for which disable interrupt is required
+ *
+ * Disable the DMA periphral interrupt,
+ * this is valid for DMAC1 family controllers only
+ * This controller should have periphral mask registers already mapped
+ */
+static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
+{
+ /*Check LPE PISR, make sure fwd is disabled*/
+ dmac1_mask_periphral_intr(midc);
+ iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
+ iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
+ iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
+ return;
+}
+
+/*****************************************************************************
+DMA channel helper Functions*/
+/**
+ * mid_desc_get - get a descriptor
+ * @midc: dma channel for which descriptor is required
+ *
+ * Obtain a descriptor for the channel. Returns NULL if none are free.
+ * Once the descriptor is returned it is private until put on another
+ * list or freed
+ */
+static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
+{
+ struct intel_mid_dma_desc *desc, *_desc;
+ struct intel_mid_dma_desc *ret = NULL;
+
+ spin_lock_bh(&midc->lock);
+ list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
+ if (async_tx_test_ack(&desc->txd)) {
+ list_del(&desc->desc_node);
+ ret = desc;
+ break;
+ }
+ }
+ spin_unlock_bh(&midc->lock);
+ return ret;
+}
+
+/**
+ * mid_desc_put - put a descriptor
+ * @midc: dma channel for which descriptor is required
+ * @desc: descriptor to put
+ *
+ * Return a descriptor from lwn_desc_get back to the free pool
+ */
+static void midc_desc_put(struct intel_mid_dma_chan *midc,
+ struct intel_mid_dma_desc *desc)
+{
+ if (desc) {
+ spin_lock_bh(&midc->lock);
+ list_add_tail(&desc->desc_node, &midc->free_list);
+ spin_unlock_bh(&midc->lock);
+ }
+}
+/**
+ * midc_dostart - begin a DMA transaction
+ * @midc: channel for which txn is to be started
+ * @first: first descriptor of series
+ *
+ * Load a transaction into the engine. This must be called with midc->lock
+ * held and bh disabled.
+ */
+static void midc_dostart(struct intel_mid_dma_chan *midc,
+ struct intel_mid_dma_desc *first)
+{
+ struct middma_device *mid = to_middma_device(midc->chan.device);
+
+ /* channel is idle */
+ if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) {
+ /*error*/
+ pr_err("ERR_MDMA: channel is busy in start\n");
+ /* The tasklet will hopefully advance the queue... */
+ return;
+ }
+
+ /*write registers and en*/
+ iowrite32(first->sar, midc->ch_regs + SAR);
+ iowrite32(first->dar, midc->ch_regs + DAR);
+ iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
+ iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
+ iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
+ iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
+ pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
+ (int)first->sar, (int)first->dar, first->cfg_hi,
+ first->cfg_lo, first->ctl_hi, first->ctl_lo);
+
+ iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+ first->status = DMA_IN_PROGRESS;
+}
+
+/**
+ * midc_descriptor_complete - process completed descriptor
+ * @midc: channel owning the descriptor
+ * @desc: the descriptor itself
+ *
+ * Process a completed descriptor and perform any callbacks upon
+ * the completion. The completion handling drops the lock during the
+ * callbacks but must be called with the lock held.
+ */
+static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
+ struct intel_mid_dma_desc *desc)
+{
+ struct dma_async_tx_descriptor *txd = &desc->txd;
+ dma_async_tx_callback callback_txd = NULL;
+ void *param_txd = NULL;
+
+ midc->completed = txd->cookie;
+ callback_txd = txd->callback;
+ param_txd = txd->callback_param;
+
+ list_move(&desc->desc_node, &midc->free_list);
+
+ spin_unlock_bh(&midc->lock);
+ if (callback_txd) {
+ pr_debug("MDMA: TXD callback set ... calling\n");
+ callback_txd(param_txd);
+ spin_lock_bh(&midc->lock);
+ return;
+ }
+ spin_lock_bh(&midc->lock);
+
+}
+/**
+ * midc_scan_descriptors - check the descriptors in channel
+ * mark completed when tx is completete
+ * @mid: device
+ * @midc: channel to scan
+ *
+ * Walk the descriptor chain for the device and process any entries
+ * that are complete.
+ */
+static void midc_scan_descriptors(struct middma_device *mid,
+ struct intel_mid_dma_chan *midc)
+{
+ struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
+
+ /*tx is complete*/
+ list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
+ if (desc->status == DMA_IN_PROGRESS) {
+ desc->status = DMA_SUCCESS;
+ midc_descriptor_complete(midc, desc);
+ }
+ }
+ return;
+}
+
+/*****************************************************************************
+DMA engine callback Functions*/
+/**
+ * intel_mid_dma_tx_submit - callback to submit DMA transaction
+ * @tx: dma engine descriptor
+ *
+ * Submit the DMA trasaction for this descriptor, start if ch idle
+ */
+static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx);
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&midc->lock);
+ cookie = midc->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ midc->chan.cookie = cookie;
+ desc->txd.cookie = cookie;
+
+
+ if (list_empty(&midc->active_list)) {
+ midc_dostart(midc, desc);
+ list_add_tail(&desc->desc_node, &midc->active_list);
+ } else {
+ list_add_tail(&desc->desc_node, &midc->queue);
+ }
+ spin_unlock_bh(&midc->lock);
+
+ return cookie;
+}
+
+/**
+ * intel_mid_dma_issue_pending - callback to issue pending txn
+ * @chan: chan where pending trascation needs to be checked and submitted
+ *
+ * Call for scan to issue pending descriptors
+ */
+static void intel_mid_dma_issue_pending(struct dma_chan *chan)
+{
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+
+ spin_lock_bh(&midc->lock);
+ if (!list_empty(&midc->queue))
+ midc_scan_descriptors(to_middma_device(chan->device), midc);
+ spin_unlock_bh(&midc->lock);
+}
+
+/**
+ * intel_mid_dma_tx_status - Return status of txn
+ * @chan: chan for where status needs to be checked
+ * @cookie: cookie for txn
+ * @txstate: DMA txn state
+ *
+ * Return status of DMA txn
+ */
+static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ int ret;
+
+ last_complete = midc->completed;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ if (ret != DMA_SUCCESS) {
+ midc_scan_descriptors(to_middma_device(chan->device), midc);
+
+ last_complete = midc->completed;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ }
+
+ if (txstate) {
+ txstate->last = last_complete;
+ txstate->used = last_used;
+ txstate->residue = 0;
+ }
+ return ret;
+}
+
+/**
+ * intel_mid_dma_device_control - DMA device control
+ * @chan: chan for DMA control
+ * @cmd: control cmd
+ * @arg: cmd arg value
+ *
+ * Perform DMA control command
+ */
+static int intel_mid_dma_device_control(struct dma_chan *chan,
+ enum dma_ctrl_cmd cmd, unsigned long arg)
+{
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+ struct middma_device *mid = to_middma_device(chan->device);
+ struct intel_mid_dma_desc *desc, *_desc;
+ LIST_HEAD(list);
+
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ spin_lock_bh(&midc->lock);
+ if (midc->in_use == false) {
+ spin_unlock_bh(&midc->lock);
+ return 0;
+ }
+ list_splice_init(&midc->free_list, &list);
+ midc->descs_allocated = 0;
+ midc->slave = NULL;
+
+ /* Disable interrupts */
+ disable_dma_interrupt(midc);
+
+ spin_unlock_bh(&midc->lock);
+ list_for_each_entry_safe(desc, _desc, &list, desc_node) {
+ pr_debug("MDMA: freeing descriptor %p\n", desc);
+ pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+ }
+ return 0;
+}
+
+/**
+ * intel_mid_dma_prep_slave_sg - Prep slave sg txn
+ * @chan: chan for DMA transfer
+ * @sgl: scatter gather list
+ * @sg_len: length of sg txn
+ * @direction: DMA transfer dirtn
+ * @flags: DMA flags
+ *
+ * Do DMA sg txn: NOT supported now
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags)
+{
+ /*not supported now*/
+ return NULL;
+}
+
+/**
+ * intel_mid_dma_prep_memcpy - Prep memcpy txn
+ * @chan: chan for DMA transfer
+ * @dest: destn address
+ * @src: src address
+ * @len: DMA transfer len
+ * @flags: DMA flags
+ *
+ * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
+ * The periphral txn details should be filled in slave structure properly
+ * Returns the descriptor for this txn
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dest,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct intel_mid_dma_chan *midc;
+ struct intel_mid_dma_desc *desc = NULL;
+ struct intel_mid_dma_slave *mids;
+ union intel_mid_dma_ctl_lo ctl_lo;
+ union intel_mid_dma_ctl_hi ctl_hi;
+ union intel_mid_dma_cfg_lo cfg_lo;
+ union intel_mid_dma_cfg_hi cfg_hi;
+ enum intel_mid_dma_width width = 0;
+
+ pr_debug("MDMA: Prep for memcpy\n");
+ WARN_ON(!chan);
+ if (!len)
+ return NULL;
+
+ mids = chan->private;
+ WARN_ON(!mids);
+
+ midc = to_intel_mid_dma_chan(chan);
+ WARN_ON(!midc);
+
+ pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
+ midc->dma->pci_id, midc->ch_id, len);
+ pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
+ mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width);
+
+ /*calculate CFG_LO*/
+ if (mids->hs_mode == LNW_DMA_SW_HS) {
+ cfg_lo.cfg_lo = 0;
+ cfg_lo.cfgx.hs_sel_dst = 1;
+ cfg_lo.cfgx.hs_sel_src = 1;
+ } else if (mids->hs_mode == LNW_DMA_HW_HS)
+ cfg_lo.cfg_lo = 0x00000;
+
+ /*calculate CFG_HI*/
+ if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
+ /*SW HS only*/
+ cfg_hi.cfg_hi = 0;
+ } else {
+ cfg_hi.cfg_hi = 0;
+ if (midc->dma->pimr_mask) {
+ cfg_hi.cfgx.protctl = 0x0; /*default value*/
+ cfg_hi.cfgx.fifo_mode = 1;
+ if (mids->dirn == DMA_TO_DEVICE) {
+ cfg_hi.cfgx.src_per = 0;
+ if (mids->device_instance == 0)
+ cfg_hi.cfgx.dst_per = 3;
+ if (mids->device_instance == 1)
+ cfg_hi.cfgx.dst_per = 1;
+ } else if (mids->dirn == DMA_FROM_DEVICE) {
+ if (mids->device_instance == 0)
+ cfg_hi.cfgx.src_per = 2;
+ if (mids->device_instance == 1)
+ cfg_hi.cfgx.src_per = 0;
+ cfg_hi.cfgx.dst_per = 0;
+ }
+ } else {
+ cfg_hi.cfgx.protctl = 0x1; /*default value*/
+ cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
+ midc->ch_id - midc->dma->chan_base;
+ }
+ }
+
+ /*calculate CTL_HI*/
+ ctl_hi.ctlx.reser = 0;
+ width = mids->src_width;
+
+ ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
+ pr_debug("MDMA:calc len %d for block size %d\n",
+ ctl_hi.ctlx.block_ts, midc->dma->block_size);
+ /*calculate CTL_LO*/
+ ctl_lo.ctl_lo = 0;
+ ctl_lo.ctlx.int_en = 1;
+ ctl_lo.ctlx.dst_tr_width = mids->dst_width;
+ ctl_lo.ctlx.src_tr_width = mids->src_width;
+ ctl_lo.ctlx.dst_msize = mids->src_msize;
+ ctl_lo.ctlx.src_msize = mids->dst_msize;
+
+ if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
+ ctl_lo.ctlx.tt_fc = 0;
+ ctl_lo.ctlx.sinc = 0;
+ ctl_lo.ctlx.dinc = 0;
+ } else {
+ if (mids->dirn == DMA_TO_DEVICE) {
+ ctl_lo.ctlx.sinc = 0;
+ ctl_lo.ctlx.dinc = 2;
+ ctl_lo.ctlx.tt_fc = 1;
+ } else if (mids->dirn == DMA_FROM_DEVICE) {
+ ctl_lo.ctlx.sinc = 2;
+ ctl_lo.ctlx.dinc = 0;
+ ctl_lo.ctlx.tt_fc = 2;
+ }
+ }
+
+ pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
+ ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
+
+ enable_dma_interrupt(midc);
+
+ desc = midc_desc_get(midc);
+ if (desc == NULL)
+ goto err_desc_get;
+ desc->sar = src;
+ desc->dar = dest ;
+ desc->len = len;
+ desc->cfg_hi = cfg_hi.cfg_hi;
+ desc->cfg_lo = cfg_lo.cfg_lo;
+ desc->ctl_lo = ctl_lo.ctl_lo;
+ desc->ctl_hi = ctl_hi.ctl_hi;
+ desc->width = width;
+ desc->dirn = mids->dirn;
+ return &desc->txd;
+
+err_desc_get:
+ pr_err("ERR_MDMA: Failed to get desc\n");
+ midc_desc_put(midc, desc);
+ return NULL;
+}
+
+/**
+ * intel_mid_dma_free_chan_resources - Frees dma resources
+ * @chan: chan requiring attention
+ *
+ * Frees the allocated resources on this DMA chan
+ */
+static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+ struct middma_device *mid = to_middma_device(chan->device);
+ struct intel_mid_dma_desc *desc, *_desc;
+
+ if (true == midc->in_use) {
+ /*trying to free ch in use!!!!!*/
+ pr_err("ERR_MDMA: trying to free ch in use\n");
+ }
+
+ spin_lock_bh(&midc->lock);
+ midc->descs_allocated = 0;
+ list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
+ list_del(&desc->desc_node);
+ pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+ }
+ list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
+ list_del(&desc->desc_node);
+ pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+ }
+ list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
+ list_del(&desc->desc_node);
+ pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+ }
+ spin_unlock_bh(&midc->lock);
+ midc->in_use = false;
+ /* Disable CH interrupts */
+ iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
+ iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
+}
+
+/**
+ * intel_mid_dma_alloc_chan_resources - Allocate dma resources
+ * @chan: chan requiring attention
+ *
+ * Allocates DMA resources on this chan
+ * Return the descriptors allocated
+ */
+static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+ struct middma_device *mid = to_middma_device(chan->device);
+ struct intel_mid_dma_desc *desc;
+ dma_addr_t phys;
+ int i = 0;
+
+
+ /* ASSERT: channel is idle */
+ if (test_ch_en(mid->dma_base, midc->ch_id)) {
+ /*ch is not idle*/
+ pr_err("ERR_MDMA: ch not idle\n");
+ return -EIO;
+ }
+ midc->completed = chan->cookie = 1;
+
+ spin_lock_bh(&midc->lock);
+ while (midc->descs_allocated < DESCS_PER_CHANNEL) {
+ spin_unlock_bh(&midc->lock);
+ desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
+ if (!desc) {
+ pr_err("ERR_MDMA: desc failed\n");
+ return -ENOMEM;
+ /*check*/
+ }
+ dma_async_tx_descriptor_init(&desc->txd, chan);
+ desc->txd.tx_submit = intel_mid_dma_tx_submit;
+ desc->txd.flags = DMA_CTRL_ACK;
+ desc->txd.phys = phys;
+ spin_lock_bh(&midc->lock);
+ i = ++midc->descs_allocated;
+ list_add_tail(&desc->desc_node, &midc->free_list);
+ }
+ spin_unlock_bh(&midc->lock);
+ midc->in_use = false;
+ pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
+ return i;
+}
+
+/**
+ * midc_handle_error - Handle DMA txn error
+ * @mid: controller where error occured
+ * @midc: chan where error occured
+ *
+ * Scan the descriptor for error
+ */
+static void midc_handle_error(struct middma_device *mid,
+ struct intel_mid_dma_chan *midc)
+{
+ midc_scan_descriptors(mid, midc);
+}
+
+/**
+ * dma_tasklet - DMA interrupt tasklet
+ * @data: tasklet arg (the controller structure)
+ *
+ * Scan the controller for interrupts for completion/error
+ * Clear the interrupt and call for handling completion/error
+ */
+static void dma_tasklet(unsigned long data)
+{
+ struct middma_device *mid = NULL;
+ struct intel_mid_dma_chan *midc = NULL;
+ u32 status;
+ int i;
+
+ mid = (struct middma_device *)data;
+ if (mid == NULL) {
+ pr_err("ERR_MDMA: tasklet Null param\n");
+ return;
+ }
+ pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
+ status = ioread32(mid->dma_base + RAW_TFR);
+ pr_debug("MDMA:RAW_TFR %x\n", status);
+ status &= mid->intr_mask;
+ while (status) {
+ /*txn interrupt*/
+ i = get_ch_index(&status, mid->chan_base);
+ if (i < 0) {
+ pr_err("ERR_MDMA:Invalid ch index %x\n", i);
+ return;
+ }
+ midc = &mid->ch[i];
+ if (midc == NULL) {
+ pr_err("ERR_MDMA:Null param midc\n");
+ return;
+ }
+ pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
+ status, midc->ch_id, i);
+ /*clearing this interrupts first*/
+ iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
+ iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
+
+ spin_lock_bh(&midc->lock);
+ midc_scan_descriptors(mid, midc);
+ pr_debug("MDMA:Scan of desc... complete, unmasking\n");
+ iowrite32(UNMASK_INTR_REG(midc->ch_id),
+ mid->dma_base + MASK_TFR);
+ spin_unlock_bh(&midc->lock);
+ }
+
+ status = ioread32(mid->dma_base + RAW_ERR);
+ status &= mid->intr_mask;
+ while (status) {
+ /*err interrupt*/
+ i = get_ch_index(&status, mid->chan_base);
+ if (i < 0) {
+ pr_err("ERR_MDMA:Invalid ch index %x\n", i);
+ return;
+ }
+ midc = &mid->ch[i];
+ if (midc == NULL) {
+ pr_err("ERR_MDMA:Null param midc\n");
+ return;
+ }
+ pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
+ status, midc->ch_id, i);
+
+ iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
+ spin_lock_bh(&midc->lock);
+ midc_handle_error(mid, midc);
+ iowrite32(UNMASK_INTR_REG(midc->ch_id),
+ mid->dma_base + MASK_ERR);
+ spin_unlock_bh(&midc->lock);
+ }
+ pr_debug("MDMA:Exiting takslet...\n");
+ return;
+}
+
+static void dma_tasklet1(unsigned long data)
+{
+ pr_debug("MDMA:in takslet1...\n");
+ return dma_tasklet(data);
+}
+
+static void dma_tasklet2(unsigned long data)
+{
+ pr_debug("MDMA:in takslet2...\n");
+ return dma_tasklet(data);
+}
+
+/**
+ * intel_mid_dma_interrupt - DMA ISR
+ * @irq: IRQ where interrupt occurred
+ * @data: ISR cllback data (the controller structure)
+ *
+ * See if this is our interrupt if so then schedule the tasklet
+ * otherwise ignore
+ */
+static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
+{
+ struct middma_device *mid = data;
+ u32 status;
+ int call_tasklet = 0;
+
+ /*DMA Interrupt*/
+ pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
+ if (!mid) {
+ pr_err("ERR_MDMA:null pointer mid\n");
+ return -EINVAL;
+ }
+
+ status = ioread32(mid->dma_base + RAW_TFR);
+ pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask);
+ status &= mid->intr_mask;
+ if (status) {
+ /*need to disable intr*/
+ iowrite32((status << 8), mid->dma_base + MASK_TFR);
+ pr_debug("MDMA: Calling tasklet %x\n", status);
+ call_tasklet = 1;
+ }
+ status = ioread32(mid->dma_base + RAW_ERR);
+ status &= mid->intr_mask;
+ if (status) {
+ iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR);
+ call_tasklet = 1;
+ }
+ if (call_tasklet)
+ tasklet_schedule(&mid->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
+{
+ return intel_mid_dma_interrupt(irq, data);
+}
+
+static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
+{
+ return intel_mid_dma_interrupt(irq, data);
+}
+
+/**
+ * mid_setup_dma - Setup the DMA controller
+ * @pdev: Controller PCI device structure
+ *
+ * Initilize the DMA controller, channels, registers with DMA engine,
+ * ISR. Initilize DMA controller channels.
+ */
+static int mid_setup_dma(struct pci_dev *pdev)
+{
+ struct middma_device *dma = pci_get_drvdata(pdev);
+ int err, i;
+ unsigned int irq_level;
+
+ /* DMA coherent memory pool for DMA descriptor allocations */
+ dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
+ sizeof(struct intel_mid_dma_desc),
+ 32, 0);
+ if (NULL == dma->dma_pool) {
+ pr_err("ERR_MDMA:pci_pool_create failed\n");
+ err = -ENOMEM;
+ kfree(dma);
+ goto err_dma_pool;
+ }
+
+ INIT_LIST_HEAD(&dma->common.channels);
+ dma->pci_id = pdev->device;
+ if (dma->pimr_mask) {
+ dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
+ LNW_PERIPHRAL_MASK_SIZE);
+ if (dma->mask_reg == NULL) {
+ pr_err("ERR_MDMA:Cant map periphral intr space !!\n");
+ return -ENOMEM;
+ }
+ } else
+ dma->mask_reg = NULL;
+
+ pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
+ /*init CH structures*/
+ dma->intr_mask = 0;
+ for (i = 0; i < dma->max_chan; i++) {
+ struct intel_mid_dma_chan *midch = &dma->ch[i];
+
+ midch->chan.device = &dma->common;
+ midch->chan.cookie = 1;
+ midch->chan.chan_id = i;
+ midch->ch_id = dma->chan_base + i;
+ pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
+
+ midch->dma_base = dma->dma_base;
+ midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id;
+ midch->dma = dma;
+ dma->intr_mask |= 1 << (dma->chan_base + i);
+ spin_lock_init(&midch->lock);
+
+ INIT_LIST_HEAD(&midch->active_list);
+ INIT_LIST_HEAD(&midch->queue);
+ INIT_LIST_HEAD(&midch->free_list);
+ /*mask interrupts*/
+ iowrite32(MASK_INTR_REG(midch->ch_id),
+ dma->dma_base + MASK_BLOCK);
+ iowrite32(MASK_INTR_REG(midch->ch_id),
+ dma->dma_base + MASK_SRC_TRAN);
+ iowrite32(MASK_INTR_REG(midch->ch_id),
+ dma->dma_base + MASK_DST_TRAN);
+ iowrite32(MASK_INTR_REG(midch->ch_id),
+ dma->dma_base + MASK_ERR);
+ iowrite32(MASK_INTR_REG(midch->ch_id),
+ dma->dma_base + MASK_TFR);
+
+ disable_dma_interrupt(midch);
+ list_add_tail(&midch->chan.device_node, &dma->common.channels);
+ }
+ pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask);
+
+ /*init dma structure*/
+ dma_cap_zero(dma->common.cap_mask);
+ dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
+ dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
+ dma->common.dev = &pdev->dev;
+ dma->common.chancnt = dma->max_chan;
+
+ dma->common.device_alloc_chan_resources =
+ intel_mid_dma_alloc_chan_resources;
+ dma->common.device_free_chan_resources =
+ intel_mid_dma_free_chan_resources;
+
+ dma->common.device_tx_status = intel_mid_dma_tx_status;
+ dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
+ dma->common.device_issue_pending = intel_mid_dma_issue_pending;
+ dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
+ dma->common.device_control = intel_mid_dma_device_control;
+
+ /*enable dma cntrl*/
+ iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
+
+ /*register irq */
+ if (dma->pimr_mask) {
+ irq_level = IRQF_SHARED;
+ pr_debug("MDMA:Requesting irq shared for DMAC1\n");
+ err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
+ IRQF_SHARED, "INTEL_MID_DMAC1", dma);
+ if (0 != err)
+ goto err_irq;
+ } else {
+ dma->intr_mask = 0x03;
+ irq_level = 0;
+ pr_debug("MDMA:Requesting irq for DMAC2\n");
+ err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
+ 0, "INTEL_MID_DMAC2", dma);
+ if (0 != err)
+ goto err_irq;
+ }
+ /*register device w/ engine*/
+ err = dma_async_device_register(&dma->common);
+ if (0 != err) {
+ pr_err("ERR_MDMA:device_register failed: %d\n", err);
+ goto err_engine;
+ }
+ if (dma->pimr_mask) {
+ pr_debug("setting up tasklet1 for DMAC1\n");
+ tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
+ } else {
+ pr_debug("setting up tasklet2 for DMAC2\n");
+ tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
+ }
+ return 0;
+
+err_engine:
+ free_irq(pdev->irq, dma);
+err_irq:
+ pci_pool_destroy(dma->dma_pool);
+ kfree(dma);
+err_dma_pool:
+ pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
+ return err;
+
+}
+
+/**
+ * middma_shutdown - Shutdown the DMA controller
+ * @pdev: Controller PCI device structure
+ *
+ * Called by remove
+ * Unregister DMa controller, clear all structures and free interrupt
+ */
+static void middma_shutdown(struct pci_dev *pdev)
+{
+ struct middma_device *device = pci_get_drvdata(pdev);
+
+ dma_async_device_unregister(&device->common);
+ pci_pool_destroy(device->dma_pool);
+ if (device->mask_reg)
+ iounmap(device->mask_reg);
+ if (device->dma_base)
+ iounmap(device->dma_base);
+ free_irq(pdev->irq, device);
+ return;
+}
+
+/**
+ * intel_mid_dma_probe - PCI Probe
+ * @pdev: Controller PCI device structure
+ * @id: pci device id structure
+ *
+ * Initilize the PCI device, map BARs, query driver data.
+ * Call setup_dma to complete contoller and chan initilzation
+ */
+static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct middma_device *device;
+ u32 base_addr, bar_size;
+ struct intel_mid_dma_probe_info *info;
+ int err;
+
+ pr_debug("MDMA: probe for %x\n", pdev->device);
+ info = (void *)id->driver_data;
+ pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
+ info->max_chan, info->ch_base,
+ info->block_size, info->pimr_mask);
+
+ err = pci_enable_device(pdev);
+ if (err)
+ goto err_enable_device;
+
+ err = pci_request_regions(pdev, "intel_mid_dmac");
+ if (err)
+ goto err_request_regions;
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ goto err_set_dma_mask;
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ goto err_set_dma_mask;
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ pr_err("ERR_MDMA:kzalloc failed probe\n");
+ err = -ENOMEM;
+ goto err_kzalloc;
+ }
+ device->pdev = pci_dev_get(pdev);
+
+ base_addr = pci_resource_start(pdev, 0);
+ bar_size = pci_resource_len(pdev, 0);
+ device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
+ if (!device->dma_base) {
+ pr_err("ERR_MDMA:ioremap failed\n");
+ err = -ENOMEM;
+ goto err_ioremap;
+ }
+ pci_set_drvdata(pdev, device);
+ pci_set_master(pdev);
+ device->max_chan = info->max_chan;
+ device->chan_base = info->ch_base;
+ device->block_size = info->block_size;
+ device->pimr_mask = info->pimr_mask;
+
+ err = mid_setup_dma(pdev);
+ if (err)
+ goto err_dma;
+
+ return 0;
+
+err_dma:
+ iounmap(device->dma_base);
+err_ioremap:
+ pci_dev_put(pdev);
+ kfree(device);
+err_kzalloc:
+err_set_dma_mask:
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+err_request_regions:
+err_enable_device:
+ pr_err("ERR_MDMA:Probe failed %d\n", err);
+ return err;
+}
+
+/**
+ * intel_mid_dma_remove - PCI remove
+ * @pdev: Controller PCI device structure
+ *
+ * Free up all resources and data
+ * Call shutdown_dma to complete contoller and chan cleanup
+ */
+static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
+{
+ struct middma_device *device = pci_get_drvdata(pdev);
+ middma_shutdown(pdev);
+ pci_dev_put(pdev);
+ kfree(device);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+/******************************************************************************
+* PCI stuff
+*/
+static struct pci_device_id intel_mid_dma_ids[] = {
+ { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)},
+ { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)},
+ { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)},
+ { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
+
+static struct pci_driver intel_mid_dma_pci = {
+ .name = "Intel MID DMA",
+ .id_table = intel_mid_dma_ids,
+ .probe = intel_mid_dma_probe,
+ .remove = __devexit_p(intel_mid_dma_remove),
+};
+
+static int __init intel_mid_dma_init(void)
+{
+ pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
+ INTEL_MID_DMA_DRIVER_VERSION);
+ return pci_register_driver(&intel_mid_dma_pci);
+}
+fs_initcall(intel_mid_dma_init);
+
+static void __exit intel_mid_dma_exit(void)
+{
+ pci_unregister_driver(&intel_mid_dma_pci);
+}
+module_exit(intel_mid_dma_exit);
+
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
new file mode 100644
index 00000000000..d81aa658ab0
--- /dev/null
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -0,0 +1,260 @@
+/*
+ * intel_mid_dma_regs.h - Intel MID DMA Drivers
+ *
+ * Copyright (C) 2008-10 Intel Corp
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#ifndef __INTEL_MID_DMAC_REGS_H__
+#define __INTEL_MID_DMAC_REGS_H__
+
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/pci_ids.h>
+
+#define INTEL_MID_DMA_DRIVER_VERSION "1.0.5"
+
+#define REG_BIT0 0x00000001
+#define REG_BIT8 0x00000100
+
+#define UNMASK_INTR_REG(chan_num) \
+ ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
+#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
+
+#define ENABLE_CHANNEL(chan_num) \
+ ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
+
+#define DESCS_PER_CHANNEL 16
+/*DMA Registers*/
+/*registers associated with channel programming*/
+#define DMA_REG_SIZE 0x400
+#define DMA_CH_SIZE 0x58
+
+/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
+#define SAR 0x00 /* Source Address Register*/
+#define DAR 0x08 /* Destination Address Register*/
+#define CTL_LOW 0x18 /* Control Register*/
+#define CTL_HIGH 0x1C /* Control Register*/
+#define CFG_LOW 0x40 /* Configuration Register Low*/
+#define CFG_HIGH 0x44 /* Configuration Register high*/
+
+#define STATUS_TFR 0x2E8
+#define STATUS_BLOCK 0x2F0
+#define STATUS_ERR 0x308
+
+#define RAW_TFR 0x2C0
+#define RAW_BLOCK 0x2C8
+#define RAW_ERR 0x2E0
+
+#define MASK_TFR 0x310
+#define MASK_BLOCK 0x318
+#define MASK_SRC_TRAN 0x320
+#define MASK_DST_TRAN 0x328
+#define MASK_ERR 0x330
+
+#define CLEAR_TFR 0x338
+#define CLEAR_BLOCK 0x340
+#define CLEAR_SRC_TRAN 0x348
+#define CLEAR_DST_TRAN 0x350
+#define CLEAR_ERR 0x358
+
+#define INTR_STATUS 0x360
+#define DMA_CFG 0x398
+#define DMA_CHAN_EN 0x3A0
+
+/*DMA channel control registers*/
+union intel_mid_dma_ctl_lo {
+ struct {
+ u32 int_en:1; /*enable or disable interrupts*/
+ /*should be 0*/
+ u32 dst_tr_width:3; /*destination transfer width*/
+ /*usually 32 bits = 010*/
+ u32 src_tr_width:3; /*source transfer width*/
+ /*usually 32 bits = 010*/
+ u32 dinc:2; /*destination address inc/dec*/
+ /*For mem:INC=00, Periphral NoINC=11*/
+ u32 sinc:2; /*source address inc or dec, as above*/
+ u32 dst_msize:3; /*destination burst transaction length*/
+ /*always = 16 ie 011*/
+ u32 src_msize:3; /*source burst transaction length*/
+ /*always = 16 ie 011*/
+ u32 reser1:3;
+ u32 tt_fc:3; /*transfer type and flow controller*/
+ /*M-M = 000
+ P-M = 010
+ M-P = 001*/
+ u32 dms:2; /*destination master select = 0*/
+ u32 sms:2; /*source master select = 0*/
+ u32 llp_dst_en:1; /*enable/disable destination LLP = 0*/
+ u32 llp_src_en:1; /*enable/disable source LLP = 0*/
+ u32 reser2:3;
+ } ctlx;
+ u32 ctl_lo;
+};
+
+union intel_mid_dma_ctl_hi {
+ struct {
+ u32 block_ts:12; /*block transfer size*/
+ /*configured by DMAC*/
+ u32 reser:20;
+ } ctlx;
+ u32 ctl_hi;
+
+};
+
+/*DMA channel configuration registers*/
+union intel_mid_dma_cfg_lo {
+ struct {
+ u32 reser1:5;
+ u32 ch_prior:3; /*channel priority = 0*/
+ u32 ch_susp:1; /*channel suspend = 0*/
+ u32 fifo_empty:1; /*FIFO empty or not R bit = 0*/
+ u32 hs_sel_dst:1; /*select HW/SW destn handshaking*/
+ /*HW = 0, SW = 1*/
+ u32 hs_sel_src:1; /*select HW/SW src handshaking*/
+ u32 reser2:6;
+ u32 dst_hs_pol:1; /*dest HS interface polarity*/
+ u32 src_hs_pol:1; /*src HS interface polarity*/
+ u32 max_abrst:10; /*max AMBA burst len = 0 (no sw limit*/
+ u32 reload_src:1; /*auto reload src addr =1 if src is P*/
+ u32 reload_dst:1; /*AR destn addr =1 if dstn is P*/
+ } cfgx;
+ u32 cfg_lo;
+};
+
+union intel_mid_dma_cfg_hi {
+ struct {
+ u32 fcmode:1; /*flow control mode = 1*/
+ u32 fifo_mode:1; /*FIFO mode select = 1*/
+ u32 protctl:3; /*protection control = 0*/
+ u32 rsvd:2;
+ u32 src_per:4; /*src hw HS interface*/
+ u32 dst_per:4; /*dstn hw HS interface*/
+ u32 reser2:17;
+ } cfgx;
+ u32 cfg_hi;
+};
+
+/**
+ * struct intel_mid_dma_chan - internal mid representation of a DMA channel
+ * @chan: dma_chan strcture represetation for mid chan
+ * @ch_regs: MMIO register space pointer to channel register
+ * @dma_base: MMIO register space DMA engine base pointer
+ * @ch_id: DMA channel id
+ * @lock: channel spinlock
+ * @completed: DMA cookie
+ * @active_list: current active descriptors
+ * @queue: current queued up descriptors
+ * @free_list: current free descriptors
+ * @slave: dma slave struture
+ * @descs_allocated: total number of decsiptors allocated
+ * @dma: dma device struture pointer
+ * @in_use: bool representing if ch is in use or not
+ */
+struct intel_mid_dma_chan {
+ struct dma_chan chan;
+ void __iomem *ch_regs;
+ void __iomem *dma_base;
+ int ch_id;
+ spinlock_t lock;
+ dma_cookie_t completed;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ struct intel_mid_dma_slave *slave;
+ unsigned int descs_allocated;
+ struct middma_device *dma;
+ bool in_use;
+};
+
+static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
+ struct dma_chan *chan)
+{
+ return container_of(chan, struct intel_mid_dma_chan, chan);
+}
+
+/**
+ * struct middma_device - internal representation of a DMA device
+ * @pdev: PCI device
+ * @dma_base: MMIO register space pointer of DMA
+ * @dma_pool: for allocating DMA descriptors
+ * @common: embedded struct dma_device
+ * @tasklet: dma tasklet for processing interrupts
+ * @ch: per channel data
+ * @pci_id: DMA device PCI ID
+ * @intr_mask: Interrupt mask to be used
+ * @mask_reg: MMIO register for periphral mask
+ * @chan_base: Base ch index (read from driver data)
+ * @max_chan: max number of chs supported (from drv_data)
+ * @block_size: Block size of DMA transfer supported (from drv_data)
+ * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
+ */
+struct middma_device {
+ struct pci_dev *pdev;
+ void __iomem *dma_base;
+ struct pci_pool *dma_pool;
+ struct dma_device common;
+ struct tasklet_struct tasklet;
+ struct intel_mid_dma_chan ch[MAX_CHAN];
+ unsigned int pci_id;
+ unsigned int intr_mask;
+ void __iomem *mask_reg;
+ int chan_base;
+ int max_chan;
+ int block_size;
+ unsigned int pimr_mask;
+};
+
+static inline struct middma_device *to_middma_device(struct dma_device *common)
+{
+ return container_of(common, struct middma_device, common);
+}
+
+struct intel_mid_dma_desc {
+ void __iomem *block; /*ch ptr*/
+ struct list_head desc_node;
+ struct dma_async_tx_descriptor txd;
+ size_t len;
+ dma_addr_t sar;
+ dma_addr_t dar;
+ u32 cfg_hi;
+ u32 cfg_lo;
+ u32 ctl_lo;
+ u32 ctl_hi;
+ dma_addr_t next;
+ enum dma_data_direction dirn;
+ enum dma_status status;
+ enum intel_mid_dma_width width; /*width of DMA txn*/
+ enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
+
+};
+
+static inline int test_ch_en(void __iomem *dma, u32 ch_no)
+{
+ u32 en_reg = ioread32(dma + DMA_CHAN_EN);
+ return (en_reg >> ch_no) & 0x1;
+}
+
+static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc
+ (struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct intel_mid_dma_desc, txd);
+}
+#endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 6d3a73b57e5..5216c8a92a2 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -97,6 +97,7 @@ struct ioat_chan_common {
#define IOAT_RESET_PENDING 2
#define IOAT_KOBJ_INIT_FAIL 3
#define IOAT_RESHAPE_PENDING 4
+ #define IOAT_RUN 5
struct timer_list timer;
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 3c8b32a8379..216f9d383b5 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -287,7 +287,10 @@ void ioat2_timer_event(unsigned long data)
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
__func__, chanerr);
- BUG_ON(is_ioat_bug(chanerr));
+ if (test_bit(IOAT_RUN, &chan->state))
+ BUG_ON(is_ioat_bug(chanerr));
+ else /* we never got off the ground */
+ return;
}
/* if we haven't made progress and we have already
@@ -492,6 +495,8 @@ static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gf
return ring;
}
+void ioat2_free_chan_resources(struct dma_chan *c);
+
/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
* @chan: channel to be initialized
*/
@@ -500,6 +505,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent **ring;
+ u64 status;
int order;
/* have we already been set up? */
@@ -540,7 +546,20 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
tasklet_enable(&chan->cleanup_task);
ioat2_start_null_desc(ioat);
- return 1 << ioat->alloc_order;
+ /* check that we got off the ground */
+ udelay(5);
+ status = ioat_chansts(chan);
+ if (is_ioat_active(status) || is_ioat_idle(status)) {
+ set_bit(IOAT_RUN, &chan->state);
+ return 1 << ioat->alloc_order;
+ } else {
+ u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+
+ dev_WARN(to_dev(chan),
+ "failed to start channel chanerr: %#x\n", chanerr);
+ ioat2_free_chan_resources(c);
+ return -EFAULT;
+ }
}
bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
@@ -778,6 +797,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
del_timer_sync(&chan->timer);
device->cleanup_fn((unsigned long) c);
device->reset_hw(chan);
+ clear_bit(IOAT_RUN, &chan->state);
spin_lock_bh(&chan->cleanup_lock);
spin_lock_bh(&ioat->prep_lock);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 1cdd22e1051..d0f49909847 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -361,7 +361,10 @@ static void ioat3_timer_event(unsigned long data)
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
__func__, chanerr);
- BUG_ON(is_ioat_bug(chanerr));
+ if (test_bit(IOAT_RUN, &chan->state))
+ BUG_ON(is_ioat_bug(chanerr));
+ else /* we never got off the ground */
+ return;
}
/* if we haven't made progress and we have already
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 14a8c0f1698..4e9cbf30059 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -627,7 +627,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
return &mdesc->desc;
}
-static int __devinit mpc_dma_probe(struct of_device *op,
+static int __devinit mpc_dma_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device_node *dn = op->dev.of_node;
@@ -753,7 +753,7 @@ static int __devinit mpc_dma_probe(struct of_device *op,
return retval;
}
-static int __devexit mpc_dma_remove(struct of_device *op)
+static int __devexit mpc_dma_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct mpc_dma *mdma = dev_get_drvdata(dev);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 86c5ae9fde3..411d5bf50fc 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause)
static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
- u32 val = (1 << (1 + (chan->idx * 16)));
+ u32 val = ~(1 << (chan->idx * 16));
dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
__raw_writel(val, XOR_INTR_CAUSE(chan));
}
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
new file mode 100644
index 00000000000..3533948b88b
--- /dev/null
+++ b/drivers/dma/pch_dma.c
@@ -0,0 +1,957 @@
+/*
+ * Topcliff PCH DMA controller driver
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pch_dma.h>
+
+#define DRV_NAME "pch-dma"
+
+#define DMA_CTL0_DISABLE 0x0
+#define DMA_CTL0_SG 0x1
+#define DMA_CTL0_ONESHOT 0x2
+#define DMA_CTL0_MODE_MASK_BITS 0x3
+#define DMA_CTL0_DIR_SHIFT_BITS 2
+#define DMA_CTL0_BITS_PER_CH 4
+
+#define DMA_CTL2_START_SHIFT_BITS 8
+#define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
+
+#define DMA_STATUS_IDLE 0x0
+#define DMA_STATUS_DESC_READ 0x1
+#define DMA_STATUS_WAIT 0x2
+#define DMA_STATUS_ACCESS 0x3
+#define DMA_STATUS_BITS_PER_CH 2
+#define DMA_STATUS_MASK_BITS 0x3
+#define DMA_STATUS_SHIFT_BITS 16
+#define DMA_STATUS_IRQ(x) (0x1 << (x))
+#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8))
+
+#define DMA_DESC_WIDTH_SHIFT_BITS 12
+#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
+#define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
+#define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
+#define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF
+#define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF
+#define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF
+#define DMA_DESC_END_WITHOUT_IRQ 0x0
+#define DMA_DESC_END_WITH_IRQ 0x1
+#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
+#define DMA_DESC_FOLLOW_WITH_IRQ 0x3
+
+#define MAX_CHAN_NR 8
+
+static unsigned int init_nr_desc_per_channel = 64;
+module_param(init_nr_desc_per_channel, uint, 0644);
+MODULE_PARM_DESC(init_nr_desc_per_channel,
+ "initial descriptors per channel (default: 64)");
+
+struct pch_dma_desc_regs {
+ u32 dev_addr;
+ u32 mem_addr;
+ u32 size;
+ u32 next;
+};
+
+struct pch_dma_regs {
+ u32 dma_ctl0;
+ u32 dma_ctl1;
+ u32 dma_ctl2;
+ u32 reserved1;
+ u32 dma_sts0;
+ u32 dma_sts1;
+ u32 reserved2;
+ u32 reserved3;
+ struct pch_dma_desc_regs desc[0];
+};
+
+struct pch_dma_desc {
+ struct pch_dma_desc_regs regs;
+ struct dma_async_tx_descriptor txd;
+ struct list_head desc_node;
+ struct list_head tx_list;
+};
+
+struct pch_dma_chan {
+ struct dma_chan chan;
+ void __iomem *membase;
+ enum dma_data_direction dir;
+ struct tasklet_struct tasklet;
+ unsigned long err_status;
+
+ spinlock_t lock;
+
+ dma_cookie_t completed_cookie;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ unsigned int descs_allocated;
+};
+
+#define PDC_DEV_ADDR 0x00
+#define PDC_MEM_ADDR 0x04
+#define PDC_SIZE 0x08
+#define PDC_NEXT 0x0C
+
+#define channel_readl(pdc, name) \
+ readl((pdc)->membase + PDC_##name)
+#define channel_writel(pdc, name, val) \
+ writel((val), (pdc)->membase + PDC_##name)
+
+struct pch_dma {
+ struct dma_device dma;
+ void __iomem *membase;
+ struct pci_pool *pool;
+ struct pch_dma_regs regs;
+ struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
+ struct pch_dma_chan channels[0];
+};
+
+#define PCH_DMA_CTL0 0x00
+#define PCH_DMA_CTL1 0x04
+#define PCH_DMA_CTL2 0x08
+#define PCH_DMA_STS0 0x10
+#define PCH_DMA_STS1 0x14
+
+#define dma_readl(pd, name) \
+ readl((pd)->membase + PCH_DMA_##name)
+#define dma_writel(pd, name, val) \
+ writel((val), (pd)->membase + PCH_DMA_##name)
+
+static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct pch_dma_desc, txd);
+}
+
+static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct pch_dma_chan, chan);
+}
+
+static inline struct pch_dma *to_pd(struct dma_device *ddev)
+{
+ return container_of(ddev, struct pch_dma, dma);
+}
+
+static inline struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct device *chan2parent(struct dma_chan *chan)
+{
+ return chan->dev->device.parent;
+}
+
+static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
+{
+ return list_first_entry(&pd_chan->active_list,
+ struct pch_dma_desc, desc_node);
+}
+
+static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
+{
+ return list_first_entry(&pd_chan->queue,
+ struct pch_dma_desc, desc_node);
+}
+
+static void pdc_enable_irq(struct dma_chan *chan, int enable)
+{
+ struct pch_dma *pd = to_pd(chan->device);
+ u32 val;
+
+ val = dma_readl(pd, CTL2);
+
+ if (enable)
+ val |= 0x1 << chan->chan_id;
+ else
+ val &= ~(0x1 << chan->chan_id);
+
+ dma_writel(pd, CTL2, val);
+
+ dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
+ chan->chan_id, val);
+}
+
+static void pdc_set_dir(struct dma_chan *chan)
+{
+ struct pch_dma_chan *pd_chan = to_pd_chan(chan);
+ struct pch_dma *pd = to_pd(chan->device);
+ u32 val;
+
+ val = dma_readl(pd, CTL0);
+
+ if (pd_chan->dir == DMA_TO_DEVICE)
+ val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
+ DMA_CTL0_DIR_SHIFT_BITS);
+ else
+ val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
+ DMA_CTL0_DIR_SHIFT_BITS));
+
+ dma_writel(pd, CTL0, val);
+
+ dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
+ chan->chan_id, val);
+}
+
+static void pdc_set_mode(struct dma_chan *chan, u32 mode)
+{
+ struct pch_dma *pd = to_pd(chan->device);
+ u32 val;
+
+ val = dma_readl(pd, CTL0);
+
+ val &= ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
+
+ dma_writel(pd, CTL0, val);
+
+ dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
+ chan->chan_id, val);
+}
+
+static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
+{
+ struct pch_dma *pd = to_pd(pd_chan->chan.device);
+ u32 val;
+
+ val = dma_readl(pd, STS0);
+ return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
+ DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
+}
+
+static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
+{
+ if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE)
+ return true;
+ else
+ return false;
+}
+
+static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
+{
+ struct pch_dma *pd = to_pd(pd_chan->chan.device);
+ u32 val;
+
+ if (!pdc_is_idle(pd_chan)) {
+ dev_err(chan2dev(&pd_chan->chan),
+ "BUG: Attempt to start non-idle channel\n");
+ return;
+ }
+
+ channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
+ channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
+ channel_writel(pd_chan, SIZE, desc->regs.size);
+ channel_writel(pd_chan, NEXT, desc->regs.next);
+
+ dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
+ pd_chan->chan.chan_id, desc->regs.dev_addr);
+ dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
+ pd_chan->chan.chan_id, desc->regs.mem_addr);
+ dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
+ pd_chan->chan.chan_id, desc->regs.size);
+ dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
+ pd_chan->chan.chan_id, desc->regs.next);
+
+ if (list_empty(&desc->tx_list))
+ pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
+ else
+ pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
+
+ val = dma_readl(pd, CTL2);
+ val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id);
+ dma_writel(pd, CTL2, val);
+}
+
+static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
+ struct pch_dma_desc *desc)
+{
+ struct dma_async_tx_descriptor *txd = &desc->txd;
+ dma_async_tx_callback callback = txd->callback;
+ void *param = txd->callback_param;
+
+ list_splice_init(&desc->tx_list, &pd_chan->free_list);
+ list_move(&desc->desc_node, &pd_chan->free_list);
+
+ if (callback)
+ callback(param);
+}
+
+static void pdc_complete_all(struct pch_dma_chan *pd_chan)
+{
+ struct pch_dma_desc *desc, *_d;
+ LIST_HEAD(list);
+
+ BUG_ON(!pdc_is_idle(pd_chan));
+
+ if (!list_empty(&pd_chan->queue))
+ pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
+
+ list_splice_init(&pd_chan->active_list, &list);
+ list_splice_init(&pd_chan->queue, &pd_chan->active_list);
+
+ list_for_each_entry_safe(desc, _d, &list, desc_node)
+ pdc_chain_complete(pd_chan, desc);
+}
+
+static void pdc_handle_error(struct pch_dma_chan *pd_chan)
+{
+ struct pch_dma_desc *bad_desc;
+
+ bad_desc = pdc_first_active(pd_chan);
+ list_del(&bad_desc->desc_node);
+
+ list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
+
+ if (!list_empty(&pd_chan->active_list))
+ pdc_dostart(pd_chan, pdc_first_active(pd_chan));
+
+ dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
+ dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
+ bad_desc->txd.cookie);
+
+ pdc_chain_complete(pd_chan, bad_desc);
+}
+
+static void pdc_advance_work(struct pch_dma_chan *pd_chan)
+{
+ if (list_empty(&pd_chan->active_list) ||
+ list_is_singular(&pd_chan->active_list)) {
+ pdc_complete_all(pd_chan);
+ } else {
+ pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
+ pdc_dostart(pd_chan, pdc_first_active(pd_chan));
+ }
+}
+
+static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan,
+ struct pch_dma_desc *desc)
+{
+ dma_cookie_t cookie = pd_chan->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ pd_chan->chan.cookie = cookie;
+ desc->txd.cookie = cookie;
+
+ return cookie;
+}
+
+static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct pch_dma_desc *desc = to_pd_desc(txd);
+ struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&pd_chan->lock);
+ cookie = pdc_assign_cookie(pd_chan, desc);
+
+ if (list_empty(&pd_chan->active_list)) {
+ list_add_tail(&desc->desc_node, &pd_chan->active_list);
+ pdc_dostart(pd_chan, desc);
+ } else {
+ list_add_tail(&desc->desc_node, &pd_chan->queue);
+ }
+
+ spin_unlock_bh(&pd_chan->lock);
+ return 0;
+}
+
+static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
+{
+ struct pch_dma_desc *desc = NULL;
+ struct pch_dma *pd = to_pd(chan->device);
+ dma_addr_t addr;
+
+ desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr);
+ if (desc) {
+ memset(desc, 0, sizeof(struct pch_dma_desc));
+ INIT_LIST_HEAD(&desc->tx_list);
+ dma_async_tx_descriptor_init(&desc->txd, chan);
+ desc->txd.tx_submit = pd_tx_submit;
+ desc->txd.flags = DMA_CTRL_ACK;
+ desc->txd.phys = addr;
+ }
+
+ return desc;
+}
+
+static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
+{
+ struct pch_dma_desc *desc, *_d;
+ struct pch_dma_desc *ret = NULL;
+ int i;
+
+ spin_lock_bh(&pd_chan->lock);
+ list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
+ i++;
+ if (async_tx_test_ack(&desc->txd)) {
+ list_del(&desc->desc_node);
+ ret = desc;
+ break;
+ }
+ dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
+ }
+ spin_unlock_bh(&pd_chan->lock);
+ dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
+
+ if (!ret) {
+ ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
+ if (ret) {
+ spin_lock_bh(&pd_chan->lock);
+ pd_chan->descs_allocated++;
+ spin_unlock_bh(&pd_chan->lock);
+ } else {
+ dev_err(chan2dev(&pd_chan->chan),
+ "failed to alloc desc\n");
+ }
+ }
+
+ return ret;
+}
+
+static void pdc_desc_put(struct pch_dma_chan *pd_chan,
+ struct pch_dma_desc *desc)
+{
+ if (desc) {
+ spin_lock_bh(&pd_chan->lock);
+ list_splice_init(&desc->tx_list, &pd_chan->free_list);
+ list_add(&desc->desc_node, &pd_chan->free_list);
+ spin_unlock_bh(&pd_chan->lock);
+ }
+}
+
+static int pd_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct pch_dma_chan *pd_chan = to_pd_chan(chan);
+ struct pch_dma_desc *desc;
+ LIST_HEAD(tmp_list);
+ int i;
+
+ if (!pdc_is_idle(pd_chan)) {
+ dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
+ return -EIO;
+ }
+
+ if (!list_empty(&pd_chan->free_list))
+ return pd_chan->descs_allocated;
+
+ for (i = 0; i < init_nr_desc_per_channel; i++) {
+ desc = pdc_alloc_desc(chan, GFP_KERNEL);
+
+ if (!desc) {
+ dev_warn(chan2dev(chan),
+ "Only allocated %d initial descriptors\n", i);
+ break;
+ }
+
+ list_add_tail(&desc->desc_node, &tmp_list);
+ }
+
+ spin_lock_bh(&pd_chan->lock);
+ list_splice(&tmp_list, &pd_chan->free_list);
+ pd_chan->descs_allocated = i;
+ pd_chan->completed_cookie = chan->cookie = 1;
+ spin_unlock_bh(&pd_chan->lock);
+
+ pdc_enable_irq(chan, 1);
+ pdc_set_dir(chan);
+
+ return pd_chan->descs_allocated;
+}
+
+static void pd_free_chan_resources(struct dma_chan *chan)
+{
+ struct pch_dma_chan *pd_chan = to_pd_chan(chan);
+ struct pch_dma *pd = to_pd(chan->device);
+ struct pch_dma_desc *desc, *_d;
+ LIST_HEAD(tmp_list);
+
+ BUG_ON(!pdc_is_idle(pd_chan));
+ BUG_ON(!list_empty(&pd_chan->active_list));
+ BUG_ON(!list_empty(&pd_chan->queue));
+
+ spin_lock_bh(&pd_chan->lock);
+ list_splice_init(&pd_chan->free_list, &tmp_list);
+ pd_chan->descs_allocated = 0;
+ spin_unlock_bh(&pd_chan->lock);
+
+ list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
+ pci_pool_free(pd->pool, desc, desc->txd.phys);
+
+ pdc_enable_irq(chan, 0);
+}
+
+static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct pch_dma_chan *pd_chan = to_pd_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_completed;
+ int ret;
+
+ spin_lock_bh(&pd_chan->lock);
+ last_completed = pd_chan->completed_cookie;
+ last_used = chan->cookie;
+ spin_unlock_bh(&pd_chan->lock);
+
+ ret = dma_async_is_complete(cookie, last_completed, last_used);
+
+ dma_set_tx_state(txstate, last_completed, last_used, 0);
+
+ return ret;
+}
+
+static void pd_issue_pending(struct dma_chan *chan)
+{
+ struct pch_dma_chan *pd_chan = to_pd_chan(chan);
+
+ if (pdc_is_idle(pd_chan)) {
+ spin_lock_bh(&pd_chan->lock);
+ pdc_advance_work(pd_chan);
+ spin_unlock_bh(&pd_chan->lock);
+ }
+}
+
+static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_data_direction direction, unsigned long flags)
+{
+ struct pch_dma_chan *pd_chan = to_pd_chan(chan);
+ struct pch_dma_slave *pd_slave = chan->private;
+ struct pch_dma_desc *first = NULL;
+ struct pch_dma_desc *prev = NULL;
+ struct pch_dma_desc *desc = NULL;
+ struct scatterlist *sg;
+ dma_addr_t reg;
+ int i;
+
+ if (unlikely(!sg_len)) {
+ dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
+ return NULL;
+ }
+
+ if (direction == DMA_FROM_DEVICE)
+ reg = pd_slave->rx_reg;
+ else if (direction == DMA_TO_DEVICE)
+ reg = pd_slave->tx_reg;
+ else
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ desc = pdc_desc_get(pd_chan);
+
+ if (!desc)
+ goto err_desc_get;
+
+ desc->regs.dev_addr = reg;
+ desc->regs.mem_addr = sg_phys(sg);
+ desc->regs.size = sg_dma_len(sg);
+ desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
+
+ switch (pd_slave->width) {
+ case PCH_DMA_WIDTH_1_BYTE:
+ if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
+ goto err_desc_get;
+ desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
+ break;
+ case PCH_DMA_WIDTH_2_BYTES:
+ if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
+ goto err_desc_get;
+ desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
+ break;
+ case PCH_DMA_WIDTH_4_BYTES:
+ if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
+ goto err_desc_get;
+ desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
+ break;
+ default:
+ goto err_desc_get;
+ }
+
+
+ if (!first) {
+ first = desc;
+ } else {
+ prev->regs.next |= desc->txd.phys;
+ list_add_tail(&desc->desc_node, &first->tx_list);
+ }
+
+ prev = desc;
+ }
+
+ if (flags & DMA_PREP_INTERRUPT)
+ desc->regs.next = DMA_DESC_END_WITH_IRQ;
+ else
+ desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
+
+ first->txd.cookie = -EBUSY;
+ desc->txd.flags = flags;
+
+ return &first->txd;
+
+err_desc_get:
+ dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
+ pdc_desc_put(pd_chan, first);
+ return NULL;
+}
+
+static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct pch_dma_chan *pd_chan = to_pd_chan(chan);
+ struct pch_dma_desc *desc, *_d;
+ LIST_HEAD(list);
+
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ spin_lock_bh(&pd_chan->lock);
+
+ pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
+
+ list_splice_init(&pd_chan->active_list, &list);
+ list_splice_init(&pd_chan->queue, &list);
+
+ list_for_each_entry_safe(desc, _d, &list, desc_node)
+ pdc_chain_complete(pd_chan, desc);
+
+ spin_unlock_bh(&pd_chan->lock);
+
+
+ return 0;
+}
+
+static void pdc_tasklet(unsigned long data)
+{
+ struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
+
+ if (!pdc_is_idle(pd_chan)) {
+ dev_err(chan2dev(&pd_chan->chan),
+ "BUG: handle non-idle channel in tasklet\n");
+ return;
+ }
+
+ spin_lock_bh(&pd_chan->lock);
+ if (test_and_clear_bit(0, &pd_chan->err_status))
+ pdc_handle_error(pd_chan);
+ else
+ pdc_advance_work(pd_chan);
+ spin_unlock_bh(&pd_chan->lock);
+}
+
+static irqreturn_t pd_irq(int irq, void *devid)
+{
+ struct pch_dma *pd = (struct pch_dma *)devid;
+ struct pch_dma_chan *pd_chan;
+ u32 sts0;
+ int i;
+ int ret = IRQ_NONE;
+
+ sts0 = dma_readl(pd, STS0);
+
+ dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
+
+ for (i = 0; i < pd->dma.chancnt; i++) {
+ pd_chan = &pd->channels[i];
+
+ if (sts0 & DMA_STATUS_IRQ(i)) {
+ if (sts0 & DMA_STATUS_ERR(i))
+ set_bit(0, &pd_chan->err_status);
+
+ tasklet_schedule(&pd_chan->tasklet);
+ ret = IRQ_HANDLED;
+ }
+
+ }
+
+ /* clear interrupt bits in status register */
+ dma_writel(pd, STS0, sts0);
+
+ return ret;
+}
+
+static void pch_dma_save_regs(struct pch_dma *pd)
+{
+ struct pch_dma_chan *pd_chan;
+ struct dma_chan *chan, *_c;
+ int i = 0;
+
+ pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
+ pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
+ pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
+
+ list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
+ pd_chan = to_pd_chan(chan);
+
+ pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
+ pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
+ pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
+ pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
+
+ i++;
+ }
+}
+
+static void pch_dma_restore_regs(struct pch_dma *pd)
+{
+ struct pch_dma_chan *pd_chan;
+ struct dma_chan *chan, *_c;
+ int i = 0;
+
+ dma_writel(pd, CTL0, pd->regs.dma_ctl0);
+ dma_writel(pd, CTL1, pd->regs.dma_ctl1);
+ dma_writel(pd, CTL2, pd->regs.dma_ctl2);
+
+ list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
+ pd_chan = to_pd_chan(chan);
+
+ channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
+ channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
+ channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
+ channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
+
+ i++;
+ }
+}
+
+static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct pch_dma *pd = pci_get_drvdata(pdev);
+
+ if (pd)
+ pch_dma_save_regs(pd);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int pch_dma_resume(struct pci_dev *pdev)
+{
+ struct pch_dma *pd = pci_get_drvdata(pdev);
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_dbg(&pdev->dev, "failed to enable device\n");
+ return err;
+ }
+
+ if (pd)
+ pch_dma_restore_regs(pd);
+
+ return 0;
+}
+
+static int __devinit pch_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct pch_dma *pd;
+ struct pch_dma_regs *regs;
+ unsigned int nr_channels;
+ int err;
+ int i;
+
+ nr_channels = id->driver_data;
+ pd = kzalloc(sizeof(struct pch_dma)+
+ sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, pd);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device\n");
+ goto err_free_mem;
+ }
+
+ if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "Cannot find proper base address\n");
+ goto err_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
+ goto err_disable_pdev;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "Cannot set proper DMA config\n");
+ goto err_free_res;
+ }
+
+ regs = pd->membase = pci_iomap(pdev, 1, 0);
+ if (!pd->membase) {
+ dev_err(&pdev->dev, "Cannot map MMIO registers\n");
+ err = -ENOMEM;
+ goto err_free_res;
+ }
+
+ pci_set_master(pdev);
+
+ err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request IRQ\n");
+ goto err_iounmap;
+ }
+
+ pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
+ sizeof(struct pch_dma_desc), 4, 0);
+ if (!pd->pool) {
+ dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
+ err = -ENOMEM;
+ goto err_free_irq;
+ }
+
+ pd->dma.dev = &pdev->dev;
+ pd->dma.chancnt = nr_channels;
+
+ INIT_LIST_HEAD(&pd->dma.channels);
+
+ for (i = 0; i < nr_channels; i++) {
+ struct pch_dma_chan *pd_chan = &pd->channels[i];
+
+ pd_chan->chan.device = &pd->dma;
+ pd_chan->chan.cookie = 1;
+ pd_chan->chan.chan_id = i;
+
+ pd_chan->membase = &regs->desc[i];
+
+ pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ spin_lock_init(&pd_chan->lock);
+
+ INIT_LIST_HEAD(&pd_chan->active_list);
+ INIT_LIST_HEAD(&pd_chan->queue);
+ INIT_LIST_HEAD(&pd_chan->free_list);
+
+ tasklet_init(&pd_chan->tasklet, pdc_tasklet,
+ (unsigned long)pd_chan);
+ list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
+ }
+
+ dma_cap_zero(pd->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
+
+ pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
+ pd->dma.device_free_chan_resources = pd_free_chan_resources;
+ pd->dma.device_tx_status = pd_tx_status;
+ pd->dma.device_issue_pending = pd_issue_pending;
+ pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
+ pd->dma.device_control = pd_device_control;
+
+ err = dma_async_device_register(&pd->dma);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register DMA device\n");
+ goto err_free_pool;
+ }
+
+ return 0;
+
+err_free_pool:
+ pci_pool_destroy(pd->pool);
+err_free_irq:
+ free_irq(pdev->irq, pd);
+err_iounmap:
+ pci_iounmap(pdev, pd->membase);
+err_free_res:
+ pci_release_regions(pdev);
+err_disable_pdev:
+ pci_disable_device(pdev);
+err_free_mem:
+ return err;
+}
+
+static void __devexit pch_dma_remove(struct pci_dev *pdev)
+{
+ struct pch_dma *pd = pci_get_drvdata(pdev);
+ struct pch_dma_chan *pd_chan;
+ struct dma_chan *chan, *_c;
+
+ if (pd) {
+ dma_async_device_unregister(&pd->dma);
+
+ list_for_each_entry_safe(chan, _c, &pd->dma.channels,
+ device_node) {
+ pd_chan = to_pd_chan(chan);
+
+ tasklet_disable(&pd_chan->tasklet);
+ tasklet_kill(&pd_chan->tasklet);
+ }
+
+ pci_pool_destroy(pd->pool);
+ free_irq(pdev->irq, pd);
+ pci_iounmap(pdev, pd->membase);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ kfree(pd);
+ }
+}
+
+/* PCI Device ID of DMA device */
+#define PCI_DEVICE_ID_PCH_DMA_8CH 0x8810
+#define PCI_DEVICE_ID_PCH_DMA_4CH 0x8815
+
+static const struct pci_device_id pch_dma_id_table[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 },
+};
+
+static struct pci_driver pch_dma_driver = {
+ .name = DRV_NAME,
+ .id_table = pch_dma_id_table,
+ .probe = pch_dma_probe,
+ .remove = __devexit_p(pch_dma_remove),
+#ifdef CONFIG_PM
+ .suspend = pch_dma_suspend,
+ .resume = pch_dma_resume,
+#endif
+};
+
+static int __init pch_dma_init(void)
+{
+ return pci_register_driver(&pch_dma_driver);
+}
+
+static void __exit pch_dma_exit(void)
+{
+ pci_unregister_driver(&pch_dma_driver);
+}
+
+module_init(pch_dma_init);
+module_exit(pch_dma_exit);
+
+MODULE_DESCRIPTION("Topcliff PCH DMA controller driver");
+MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 7c3747902a3..0d58a4a4487 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4257,11 +4257,11 @@ static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
struct ppc440spe_adma_chan *chan,
int *initcode)
{
- struct of_device *ofdev;
+ struct platform_device *ofdev;
struct device_node *np;
int ret;
- ofdev = container_of(adev->dev, struct of_device, dev);
+ ofdev = container_of(adev->dev, struct platform_device, dev);
np = ofdev->dev.of_node;
if (adev->id != PPC440SPE_XOR_ID) {
adev->err_irq = irq_of_parse_and_map(np, 1);
@@ -4393,7 +4393,7 @@ static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
/**
* ppc440spe_adma_probe - probe the asynch device
*/
-static int __devinit ppc440spe_adma_probe(struct of_device *ofdev,
+static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -4625,7 +4625,7 @@ out:
/**
* ppc440spe_adma_remove - remove the asynch device
*/
-static int __devexit ppc440spe_adma_remove(struct of_device *ofdev)
+static int __devexit ppc440spe_adma_remove(struct platform_device *ofdev)
{
struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev);
struct device_node *np = ofdev->dev.of_node;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index a2a519fd2a2..eb6b54dbb80 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -580,7 +580,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
sh_chan = to_sh_chan(chan);
param = chan->private;
- slave_addr = param->config->addr;
/* Someone calling slave DMA on a public channel? */
if (!param || !sg_len) {
@@ -589,6 +588,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
return NULL;
}
+ slave_addr = param->config->addr;
+
/*
* if (param != NULL), this is a successfully requested slave channel,
* therefore param->config != NULL too.
@@ -816,7 +817,7 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data)
return ret;
}
-#if defined(CONFIG_CPU_SH4)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
static irqreturn_t sh_dmae_err(int irq, void *data)
{
struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
@@ -1057,7 +1058,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
/* Default transfer size of 32 bytes requires 32-byte alignment */
shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
-#if defined(CONFIG_CPU_SH4)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
if (!chanirq_res)
@@ -1082,7 +1083,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
#else
chanirq_res = errirq_res;
-#endif /* CONFIG_CPU_SH4 */
+#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
if (chanirq_res->start == chanirq_res->end &&
!platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
@@ -1129,7 +1130,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
chan_probe_err:
sh_dmae_chan_remove(shdev);
eirqres:
-#if defined(CONFIG_CPU_SH4)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
free_irq(errirq, shdev);
eirq_err:
#endif
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index c426829f6ab..17e2600a00c 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -30,14 +30,16 @@
/* Maximum iterations taken before giving up suspending a channel */
#define D40_SUSPEND_MAX_IT 500
+/* Hardware requirement on LCLA alignment */
+#define LCLA_ALIGNMENT 0x40000
+/* Attempts before giving up to trying to get pages that are aligned */
+#define MAX_LCLA_ALLOC_ATTEMPTS 256
+
+/* Bit markings for allocation map */
#define D40_ALLOC_FREE (1 << 31)
#define D40_ALLOC_PHY (1 << 30)
#define D40_ALLOC_LOG_FREE 0
-/* The number of free d40_desc to keep in memory before starting
- * to kfree() them */
-#define D40_DESC_CACHE_SIZE 50
-
/* Hardware designer of the block */
#define D40_PERIPHID2_DESIGNER 0x8
@@ -68,9 +70,9 @@ enum d40_command {
*/
struct d40_lli_pool {
void *base;
- int size;
+ int size;
/* Space for dst and src, plus an extra for padding */
- u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
+ u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
};
/**
@@ -81,9 +83,10 @@ struct d40_lli_pool {
* lli_len equals one.
* @lli_log: Same as above but for logical channels.
* @lli_pool: The pool with two entries pre-allocated.
- * @lli_len: Number of LLI's in lli_pool
- * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len
- * then this transfer job is done.
+ * @lli_len: Number of llis of current descriptor.
+ * @lli_count: Number of transfered llis.
+ * @lli_tx_len: Max number of LLIs per transfer, there can be
+ * many transfer for one descriptor.
* @txd: DMA engine struct. Used for among other things for communication
* during a transfer.
* @node: List entry.
@@ -100,8 +103,9 @@ struct d40_desc {
struct d40_log_lli_bidir lli_log;
struct d40_lli_pool lli_pool;
- u32 lli_len;
- u32 lli_tcount;
+ int lli_len;
+ int lli_count;
+ u32 lli_tx_len;
struct dma_async_tx_descriptor txd;
struct list_head node;
@@ -113,18 +117,20 @@ struct d40_desc {
/**
* struct d40_lcla_pool - LCLA pool settings and data.
*
- * @base: The virtual address of LCLA.
- * @phy: Physical base address of LCLA.
- * @base_size: size of lcla.
+ * @base: The virtual address of LCLA. 18 bit aligned.
+ * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
+ * This pointer is only there for clean-up on error.
+ * @pages: The number of pages needed for all physical channels.
+ * Only used later for clean-up on error
* @lock: Lock to protect the content in this struct.
- * @alloc_map: Mapping between physical channel and LCLA entries.
+ * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
* @num_blocks: The number of entries of alloc_map. Equals to the
* number of physical channels.
*/
struct d40_lcla_pool {
void *base;
- dma_addr_t phy;
- resource_size_t base_size;
+ void *base_unaligned;
+ int pages;
spinlock_t lock;
u32 *alloc_map;
int num_blocks;
@@ -163,15 +169,14 @@ struct d40_base;
* @pending_tx: The number of pending transfers. Used between interrupt handler
* and tasklet.
* @busy: Set to true when transfer is ongoing on this channel.
- * @phy_chan: Pointer to physical channel which this instance runs on.
+ * @phy_chan: Pointer to physical channel which this instance runs on. If this
+ * point is NULL, then the channel is not allocated.
* @chan: DMA engine handle.
* @tasklet: Tasklet that gets scheduled from interrupt context to complete a
* transfer and call client callback.
* @client: Cliented owned descriptor list.
* @active: Active descriptor.
* @queue: Queued jobs.
- * @free: List of free descripts, ready to be reused.
- * @free_len: Number of descriptors in the free list.
* @dma_cfg: The client configuration of this dma channel.
* @base: Pointer to the device instance struct.
* @src_def_cfg: Default cfg register setting for src.
@@ -195,8 +200,6 @@ struct d40_chan {
struct list_head client;
struct list_head active;
struct list_head queue;
- struct list_head free;
- int free_len;
struct stedma40_chan_cfg dma_cfg;
struct d40_base *base;
/* Default register configurations */
@@ -205,6 +208,9 @@ struct d40_chan {
struct d40_def_lcsp log_def;
struct d40_lcla_elem lcla;
struct d40_log_lli_full *lcpa;
+ /* Runtime reconfiguration */
+ dma_addr_t runtime_addr;
+ enum dma_data_direction runtime_direction;
};
/**
@@ -215,6 +221,7 @@ struct d40_chan {
* the same physical register.
* @dev: The device structure.
* @virtbase: The virtual base address of the DMA's register.
+ * @rev: silicon revision detected.
* @clk: Pointer to the DMA clock structure.
* @phy_start: Physical memory start of the DMA registers.
* @phy_size: Size of the DMA register map.
@@ -240,12 +247,14 @@ struct d40_chan {
* @lcpa_base: The virtual mapped address of LCPA.
* @phy_lcpa: The physical address of the LCPA.
* @lcpa_size: The size of the LCPA area.
+ * @desc_slab: cache for descriptors.
*/
struct d40_base {
spinlock_t interrupt_lock;
spinlock_t execmd_lock;
struct device *dev;
void __iomem *virtbase;
+ u8 rev:4;
struct clk *clk;
phys_addr_t phy_start;
resource_size_t phy_size;
@@ -266,6 +275,7 @@ struct d40_base {
void *lcpa_base;
dma_addr_t phy_lcpa;
resource_size_t lcpa_size;
+ struct kmem_cache *desc_slab;
};
/**
@@ -365,11 +375,6 @@ static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
return cookie;
}
-static void d40_desc_reset(struct d40_desc *d40d)
-{
- d40d->lli_tcount = 0;
-}
-
static void d40_desc_remove(struct d40_desc *d40d)
{
list_del(&d40d->node);
@@ -377,7 +382,6 @@ static void d40_desc_remove(struct d40_desc *d40d)
static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
{
- struct d40_desc *desc;
struct d40_desc *d;
struct d40_desc *_d;
@@ -386,36 +390,21 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
if (async_tx_test_ack(&d->txd)) {
d40_pool_lli_free(d);
d40_desc_remove(d);
- desc = d;
- goto out;
+ break;
}
- }
-
- if (list_empty(&d40c->free)) {
- /* Alloc new desc because we're out of used ones */
- desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT);
- if (desc == NULL)
- goto out;
- INIT_LIST_HEAD(&desc->node);
} else {
- /* Reuse an old desc. */
- desc = list_first_entry(&d40c->free,
- struct d40_desc,
- node);
- list_del(&desc->node);
- d40c->free_len--;
+ d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
+ if (d != NULL) {
+ memset(d, 0, sizeof(struct d40_desc));
+ INIT_LIST_HEAD(&d->node);
+ }
}
-out:
- return desc;
+ return d;
}
static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
{
- if (d40c->free_len < D40_DESC_CACHE_SIZE) {
- list_add_tail(&d40d->node, &d40c->free);
- d40c->free_len++;
- } else
- kfree(d40d);
+ kmem_cache_free(d40c->base->desc_slab, d40d);
}
static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
@@ -456,37 +445,41 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
/* Support functions for logical channels */
-static int d40_lcla_id_get(struct d40_chan *d40c,
- struct d40_lcla_pool *pool)
+static int d40_lcla_id_get(struct d40_chan *d40c)
{
int src_id = 0;
int dst_id = 0;
struct d40_log_lli *lcla_lidx_base =
- pool->base + d40c->phy_chan->num * 1024;
+ d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
int i;
int lli_per_log = d40c->base->plat_data->llis_per_log;
+ unsigned long flags;
if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
return 0;
- if (pool->num_blocks > 32)
+ if (d40c->base->lcla_pool.num_blocks > 32)
return -EINVAL;
- spin_lock(&pool->lock);
+ spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
- for (i = 0; i < pool->num_blocks; i++) {
- if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
- pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
+ for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
+ if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
+ (0x1 << i))) {
+ d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
+ (0x1 << i);
break;
}
}
src_id = i;
- if (src_id >= pool->num_blocks)
+ if (src_id >= d40c->base->lcla_pool.num_blocks)
goto err;
- for (; i < pool->num_blocks; i++) {
- if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
- pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
+ for (; i < d40c->base->lcla_pool.num_blocks; i++) {
+ if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
+ (0x1 << i))) {
+ d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
+ (0x1 << i);
break;
}
}
@@ -500,28 +493,13 @@ static int d40_lcla_id_get(struct d40_chan *d40c,
d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
-
- spin_unlock(&pool->lock);
+ spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
return 0;
err:
- spin_unlock(&pool->lock);
+ spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
return -EINVAL;
}
-static void d40_lcla_id_put(struct d40_chan *d40c,
- struct d40_lcla_pool *pool,
- int id)
-{
- if (id < 0)
- return;
-
- d40c->lcla.src_id = -1;
- d40c->lcla.dst_id = -1;
-
- spin_lock(&pool->lock);
- pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
- spin_unlock(&pool->lock);
-}
static int d40_channel_execute_command(struct d40_chan *d40c,
enum d40_command command)
@@ -530,6 +508,7 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
void __iomem *active_reg;
int ret = 0;
unsigned long flags;
+ u32 wmask;
spin_lock_irqsave(&d40c->base->execmd_lock, flags);
@@ -547,7 +526,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
goto done;
}
- writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
+ wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
+ writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
+ active_reg);
if (command == D40_DMA_SUSPEND_REQ) {
@@ -586,8 +567,7 @@ done:
static void d40_term_all(struct d40_chan *d40c)
{
struct d40_desc *d40d;
- struct d40_desc *d;
- struct d40_desc *_d;
+ unsigned long flags;
/* Release active descriptors */
while ((d40d = d40_first_active_get(d40c))) {
@@ -605,19 +585,17 @@ static void d40_term_all(struct d40_chan *d40c)
d40_desc_free(d40c, d40d);
}
- /* Release client owned descriptors */
- if (!list_empty(&d40c->client))
- list_for_each_entry_safe(d, _d, &d40c->client, node) {
- d40_pool_lli_free(d);
- d40_desc_remove(d);
- /* Return desc to free-list */
- d40_desc_free(d40c, d40d);
- }
+ spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
+
+ d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
+ (~(0x1 << d40c->lcla.dst_id));
+ d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
+ (~(0x1 << d40c->lcla.src_id));
+
+ d40c->lcla.src_id = -1;
+ d40c->lcla.dst_id = -1;
- d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
- d40c->lcla.src_id);
- d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
- d40c->lcla.dst_id);
+ spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
d40c->pending_tx = 0;
d40c->busy = false;
@@ -628,6 +606,7 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
u32 val;
unsigned long flags;
+ /* Notice, that disable requires the physical channel to be stopped */
if (do_enable)
val = D40_ACTIVATE_EVENTLINE;
else
@@ -732,31 +711,34 @@ static int d40_config_write(struct d40_chan *d40c)
static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
{
-
if (d40d->lli_phy.dst && d40d->lli_phy.src) {
d40_phy_lli_write(d40c->base->virtbase,
d40c->phy_chan->num,
d40d->lli_phy.dst,
d40d->lli_phy.src);
- d40d->lli_tcount = d40d->lli_len;
} else if (d40d->lli_log.dst && d40d->lli_log.src) {
- u32 lli_len;
struct d40_log_lli *src = d40d->lli_log.src;
struct d40_log_lli *dst = d40d->lli_log.dst;
-
- src += d40d->lli_tcount;
- dst += d40d->lli_tcount;
-
- if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
- lli_len = d40d->lli_len;
- else
- lli_len = d40c->base->plat_data->llis_per_log;
- d40d->lli_tcount += lli_len;
- d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
- d40c->lcla.dst,
- dst, src,
- d40c->base->plat_data->llis_per_log);
+ int s;
+
+ src += d40d->lli_count;
+ dst += d40d->lli_count;
+ s = d40_log_lli_write(d40c->lcpa,
+ d40c->lcla.src, d40c->lcla.dst,
+ dst, src,
+ d40c->base->plat_data->llis_per_log);
+
+ /* If s equals to zero, the job is not linked */
+ if (s > 0) {
+ (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
+ s * sizeof(struct d40_log_lli),
+ DMA_TO_DEVICE);
+ (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
+ s * sizeof(struct d40_log_lli),
+ DMA_TO_DEVICE);
+ }
}
+ d40d->lli_count += d40d->lli_tx_len;
}
static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -780,18 +762,21 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
static int d40_start(struct d40_chan *d40c)
{
- int err;
+ if (d40c->base->rev == 0) {
+ int err;
- if (d40c->log_num != D40_PHY_CHAN) {
- err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
- if (err)
- return err;
- d40_config_set_event(d40c, true);
+ if (d40c->log_num != D40_PHY_CHAN) {
+ err = d40_channel_execute_command(d40c,
+ D40_DMA_SUSPEND_REQ);
+ if (err)
+ return err;
+ }
}
- err = d40_channel_execute_command(d40c, D40_DMA_RUN);
+ if (d40c->log_num != D40_PHY_CHAN)
+ d40_config_set_event(d40c, true);
- return err;
+ return d40_channel_execute_command(d40c, D40_DMA_RUN);
}
static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
@@ -838,7 +823,7 @@ static void dma_tc_handle(struct d40_chan *d40c)
if (d40d == NULL)
return;
- if (d40d->lli_tcount < d40d->lli_len) {
+ if (d40d->lli_count < d40d->lli_len) {
d40_desc_load(d40c, d40d);
/* Start dma job */
@@ -891,7 +876,6 @@ static void dma_tasklet(unsigned long data)
/* Return desc to free-list */
d40_desc_free(d40c, d40d_fin);
} else {
- d40_desc_reset(d40d_fin);
if (!d40d_fin->is_in_client_list) {
d40_desc_remove(d40d_fin);
list_add_tail(&d40d_fin->node, &d40c->client);
@@ -975,7 +959,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
if (!il[row].is_error)
dma_tc_handle(d40c);
else
- dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
+ dev_err(base->dev,
+ "[%s] IRQ chan: %ld offset %d idx %d\n",
__func__, chan, il[row].offset, idx);
spin_unlock(&d40c->lock);
@@ -1134,7 +1119,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
int j;
int log_num;
bool is_src;
- bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
+ bool is_log = (d40c->dma_cfg.channel_type &
+ STEDMA40_CHANNEL_IN_OPER_MODE)
== STEDMA40_CHANNEL_IN_LOG_MODE;
@@ -1169,8 +1155,10 @@ static int d40_allocate_channel(struct d40_chan *d40c)
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
int phy_num = j + event_group * 2;
for (i = phy_num; i < phy_num + 2; i++) {
- if (d40_alloc_mask_set(&phys[i], is_src,
- 0, is_log))
+ if (d40_alloc_mask_set(&phys[i],
+ is_src,
+ 0,
+ is_log))
goto found_phy;
}
}
@@ -1221,30 +1209,6 @@ out:
}
-static int d40_config_chan(struct d40_chan *d40c,
- struct stedma40_chan_cfg *info)
-{
-
- /* Fill in basic CFG register values */
- d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
- &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
-
- if (d40c->log_num != D40_PHY_CHAN) {
- d40_log_cfg(&d40c->dma_cfg,
- &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
-
- if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
- d40c->lcpa = d40c->base->lcpa_base +
- d40c->dma_cfg.src_dev_type * 32;
- else
- d40c->lcpa = d40c->base->lcpa_base +
- d40c->dma_cfg.dst_dev_type * 32 + 16;
- }
-
- /* Write channel configuration to the DMA */
- return d40_config_write(d40c);
-}
-
static int d40_config_memcpy(struct d40_chan *d40c)
{
dma_cap_mask_t cap = d40c->chan.device->cap_mask;
@@ -1272,13 +1236,25 @@ static int d40_free_dma(struct d40_chan *d40c)
{
int res = 0;
- u32 event, dir;
+ u32 event;
struct d40_phy_res *phy = d40c->phy_chan;
bool is_src;
+ struct d40_desc *d;
+ struct d40_desc *_d;
+
/* Terminate all queued and active transfers */
d40_term_all(d40c);
+ /* Release client owned descriptors */
+ if (!list_empty(&d40c->client))
+ list_for_each_entry_safe(d, _d, &d40c->client, node) {
+ d40_pool_lli_free(d);
+ d40_desc_remove(d);
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d);
+ }
+
if (phy == NULL) {
dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
__func__);
@@ -1292,22 +1268,12 @@ static int d40_free_dma(struct d40_chan *d40c)
return -EINVAL;
}
-
- res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
- if (res) {
- dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
- __func__);
- return res;
- }
-
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
- dir = D40_CHAN_REG_SDLNK;
is_src = false;
} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
- dir = D40_CHAN_REG_SSLNK;
is_src = true;
} else {
dev_err(&d40c->chan.dev->device,
@@ -1315,16 +1281,17 @@ static int d40_free_dma(struct d40_chan *d40c)
return -EINVAL;
}
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res) {
+ dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
+ __func__);
+ return res;
+ }
+
if (d40c->log_num != D40_PHY_CHAN) {
- /*
- * Release logical channel, deactivate the event line during
- * the time physical res is suspended.
- */
- writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
- D40_EVENTLINE_MASK(event),
- d40c->base->virtbase + D40_DREG_PCBASE +
- phy->num * D40_DREG_PCDELTA + dir);
+ /* Release logical channel, deactivate the event line */
+ d40_config_set_event(d40c, false);
d40c->base->lookup_log_chans[d40c->log_num] = NULL;
/*
@@ -1345,8 +1312,9 @@ static int d40_free_dma(struct d40_chan *d40c)
}
return 0;
}
- } else
- d40_alloc_mask_free(phy, is_src, 0);
+ } else {
+ (void) d40_alloc_mask_free(phy, is_src, 0);
+ }
/* Release physical channel */
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
@@ -1361,8 +1329,6 @@ static int d40_free_dma(struct d40_chan *d40c)
d40c->base->lookup_phy_chans[phy->num] = NULL;
return 0;
-
-
}
static int d40_pause(struct dma_chan *chan)
@@ -1370,7 +1336,6 @@ static int d40_pause(struct dma_chan *chan)
struct d40_chan *d40c =
container_of(chan, struct d40_chan, chan);
int res;
-
unsigned long flags;
spin_lock_irqsave(&d40c->lock, flags);
@@ -1397,7 +1362,6 @@ static bool d40_is_paused(struct d40_chan *d40c)
void __iomem *active_reg;
u32 status;
u32 event;
- int res;
spin_lock_irqsave(&d40c->lock, flags);
@@ -1416,10 +1380,6 @@ static bool d40_is_paused(struct d40_chan *d40c)
goto _exit;
}
- res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
- if (res != 0)
- goto _exit;
-
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
@@ -1436,12 +1396,6 @@ static bool d40_is_paused(struct d40_chan *d40c)
if (status != D40_DMA_RUN)
is_paused = true;
-
- /* Resume the other logical channels if any */
- if (d40_chan_has_events(d40c))
- res = d40_channel_execute_command(d40c,
- D40_DMA_RUN);
-
_exit:
spin_unlock_irqrestore(&d40c->lock, flags);
return is_paused;
@@ -1468,13 +1422,14 @@ static u32 d40_residue(struct d40_chan *d40c)
u32 num_elt;
if (d40c->log_num != D40_PHY_CHAN)
- num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
+ num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
>> D40_MEM_LCSP2_ECNT_POS;
else
num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
d40c->phy_chan->num * D40_DREG_PCDELTA +
D40_CHAN_REG_SDELT) &
- D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
+ D40_SREG_ELEM_PHY_ECNT_MASK) >>
+ D40_SREG_ELEM_PHY_ECNT_POS;
return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
}
@@ -1487,20 +1442,21 @@ static int d40_resume(struct dma_chan *chan)
spin_lock_irqsave(&d40c->lock, flags);
- if (d40c->log_num != D40_PHY_CHAN) {
- res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
- if (res)
- goto out;
+ if (d40c->base->rev == 0)
+ if (d40c->log_num != D40_PHY_CHAN) {
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_SUSPEND_REQ);
+ goto no_suspend;
+ }
- /* If bytes left to transfer or linked tx resume job */
- if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
+ /* If bytes left to transfer or linked tx resume job */
+ if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
+ if (d40c->log_num != D40_PHY_CHAN)
d40_config_set_event(d40c, true);
- res = d40_channel_execute_command(d40c, D40_DMA_RUN);
- }
- } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
res = d40_channel_execute_command(d40c, D40_DMA_RUN);
+ }
-out:
+no_suspend:
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
@@ -1534,8 +1490,10 @@ int stedma40_set_psize(struct dma_chan *chan,
if (d40c->log_num != D40_PHY_CHAN) {
d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
- d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
- d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ d40c->log_def.lcsp1 |= src_psize <<
+ D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ d40c->log_def.lcsp3 |= dst_psize <<
+ D40_MEM_LCSP1_SCFG_PSIZE_POS;
goto out;
}
@@ -1566,37 +1524,42 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
struct scatterlist *sgl_dst,
struct scatterlist *sgl_src,
unsigned int sgl_len,
- unsigned long flags)
+ unsigned long dma_flags)
{
int res;
struct d40_desc *d40d;
struct d40_chan *d40c = container_of(chan, struct d40_chan,
chan);
- unsigned long flg;
- int lli_max = d40c->base->plat_data->llis_per_log;
+ unsigned long flags;
+ if (d40c->phy_chan == NULL) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unallocated channel.\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
- spin_lock_irqsave(&d40c->lock, flg);
+ spin_lock_irqsave(&d40c->lock, flags);
d40d = d40_desc_get(d40c);
if (d40d == NULL)
goto err;
- memset(d40d, 0, sizeof(struct d40_desc));
d40d->lli_len = sgl_len;
-
- d40d->txd.flags = flags;
+ d40d->lli_tx_len = d40d->lli_len;
+ d40d->txd.flags = dma_flags;
if (d40c->log_num != D40_PHY_CHAN) {
+ if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
+ d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
+
if (sgl_len > 1)
/*
* Check if there is space available in lcla. If not,
* split list into 1-length and run only in lcpa
* space.
*/
- if (d40_lcla_id_get(d40c,
- &d40c->base->lcla_pool) != 0)
- lli_max = 1;
+ if (d40_lcla_id_get(d40c) != 0)
+ d40d->lli_tx_len = 1;
if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
dev_err(&d40c->chan.dev->device,
@@ -1610,7 +1573,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
d40d->lli_log.src,
d40c->log_def.lcsp1,
d40c->dma_cfg.src_info.data_width,
- flags & DMA_PREP_INTERRUPT, lli_max,
+ dma_flags & DMA_PREP_INTERRUPT,
+ d40d->lli_tx_len,
d40c->base->plat_data->llis_per_log);
(void) d40_log_sg_to_lli(d40c->lcla.dst_id,
@@ -1619,7 +1583,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
d40d->lli_log.dst,
d40c->log_def.lcsp3,
d40c->dma_cfg.dst_info.data_width,
- flags & DMA_PREP_INTERRUPT, lli_max,
+ dma_flags & DMA_PREP_INTERRUPT,
+ d40d->lli_tx_len,
d40c->base->plat_data->llis_per_log);
@@ -1664,11 +1629,11 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
d40d->txd.tx_submit = d40_tx_submit;
- spin_unlock_irqrestore(&d40c->lock, flg);
+ spin_unlock_irqrestore(&d40c->lock, flags);
return &d40d->txd;
err:
- spin_unlock_irqrestore(&d40c->lock, flg);
+ spin_unlock_irqrestore(&d40c->lock, flags);
return NULL;
}
EXPORT_SYMBOL(stedma40_memcpy_sg);
@@ -1698,46 +1663,66 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
unsigned long flags;
struct d40_chan *d40c =
container_of(chan, struct d40_chan, chan);
-
+ bool is_free_phy;
spin_lock_irqsave(&d40c->lock, flags);
d40c->completed = chan->cookie = 1;
/*
* If no dma configuration is set (channel_type == 0)
- * use default configuration
+ * use default configuration (memcpy)
*/
if (d40c->dma_cfg.channel_type == 0) {
err = d40_config_memcpy(d40c);
- if (err)
- goto err_alloc;
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to configure memcpy channel\n",
+ __func__);
+ goto fail;
+ }
}
+ is_free_phy = (d40c->phy_chan == NULL);
err = d40_allocate_channel(d40c);
if (err) {
dev_err(&d40c->chan.dev->device,
"[%s] Failed to allocate channel\n", __func__);
- goto err_alloc;
+ goto fail;
}
- err = d40_config_chan(d40c, &d40c->dma_cfg);
- if (err) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to configure channel\n",
- __func__);
- goto err_config;
- }
+ /* Fill in basic CFG register values */
+ d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
+ &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
- spin_unlock_irqrestore(&d40c->lock, flags);
- return 0;
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40_log_cfg(&d40c->dma_cfg,
+ &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
- err_config:
- (void) d40_free_dma(d40c);
- err_alloc:
+ if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+ d40c->lcpa = d40c->base->lcpa_base +
+ d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
+ else
+ d40c->lcpa = d40c->base->lcpa_base +
+ d40c->dma_cfg.dst_dev_type *
+ D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
+ }
+
+ /*
+ * Only write channel configuration to the DMA if the physical
+ * resource is free. In case of multiple logical channels
+ * on the same physical resource, only the first write is necessary.
+ */
+ if (is_free_phy) {
+ err = d40_config_write(d40c);
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to configure channel\n",
+ __func__);
+ }
+ }
+fail:
spin_unlock_irqrestore(&d40c->lock, flags);
- dev_err(&d40c->chan.dev->device,
- "[%s] Channel allocation failed\n", __func__);
- return -EINVAL;
+ return err;
}
static void d40_free_chan_resources(struct dma_chan *chan)
@@ -1747,6 +1732,13 @@ static void d40_free_chan_resources(struct dma_chan *chan)
int err;
unsigned long flags;
+ if (d40c->phy_chan == NULL) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Cannot free unallocated channel\n", __func__);
+ return;
+ }
+
+
spin_lock_irqsave(&d40c->lock, flags);
err = d40_free_dma(d40c);
@@ -1761,15 +1753,21 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
dma_addr_t dst,
dma_addr_t src,
size_t size,
- unsigned long flags)
+ unsigned long dma_flags)
{
struct d40_desc *d40d;
struct d40_chan *d40c = container_of(chan, struct d40_chan,
chan);
- unsigned long flg;
+ unsigned long flags;
int err = 0;
- spin_lock_irqsave(&d40c->lock, flg);
+ if (d40c->phy_chan == NULL) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Channel is not allocated.\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_lock_irqsave(&d40c->lock, flags);
d40d = d40_desc_get(d40c);
if (d40d == NULL) {
@@ -1778,9 +1776,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
goto err;
}
- memset(d40d, 0, sizeof(struct d40_desc));
-
- d40d->txd.flags = flags;
+ d40d->txd.flags = dma_flags;
dma_async_tx_descriptor_init(&d40d->txd, chan);
@@ -1794,6 +1790,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
goto err;
}
d40d->lli_len = 1;
+ d40d->lli_tx_len = 1;
d40_log_fill_lli(d40d->lli_log.src,
src,
@@ -1801,7 +1798,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
0,
d40c->log_def.lcsp1,
d40c->dma_cfg.src_info.data_width,
- true, true);
+ false, true);
d40_log_fill_lli(d40d->lli_log.dst,
dst,
@@ -1848,7 +1845,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
d40d->lli_pool.size, DMA_TO_DEVICE);
}
- spin_unlock_irqrestore(&d40c->lock, flg);
+ spin_unlock_irqrestore(&d40c->lock, flags);
return &d40d->txd;
err_fill_lli:
@@ -1856,7 +1853,7 @@ err_fill_lli:
"[%s] Failed filling in PHY LLI\n", __func__);
d40_pool_lli_free(d40d);
err:
- spin_unlock_irqrestore(&d40c->lock, flg);
+ spin_unlock_irqrestore(&d40c->lock, flags);
return NULL;
}
@@ -1865,11 +1862,10 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
struct scatterlist *sgl,
unsigned int sg_len,
enum dma_data_direction direction,
- unsigned long flags)
+ unsigned long dma_flags)
{
dma_addr_t dev_addr = 0;
int total_size;
- int lli_max = d40c->base->plat_data->llis_per_log;
if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
dev_err(&d40c->chan.dev->device,
@@ -1878,7 +1874,10 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
}
d40d->lli_len = sg_len;
- d40d->lli_tcount = 0;
+ if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
+ d40d->lli_tx_len = d40d->lli_len;
+ else
+ d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
if (sg_len > 1)
/*
@@ -1886,35 +1885,34 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
* If not, split list into 1-length and run only
* in lcpa space.
*/
- if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
- lli_max = 1;
+ if (d40_lcla_id_get(d40c) != 0)
+ d40d->lli_tx_len = 1;
- if (direction == DMA_FROM_DEVICE) {
- dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
- total_size = d40_log_sg_to_dev(&d40c->lcla,
- sgl, sg_len,
- &d40d->lli_log,
- &d40c->log_def,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- direction,
- flags & DMA_PREP_INTERRUPT,
- dev_addr, lli_max,
- d40c->base->plat_data->llis_per_log);
- } else if (direction == DMA_TO_DEVICE) {
- dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
- total_size = d40_log_sg_to_dev(&d40c->lcla,
- sgl, sg_len,
- &d40d->lli_log,
- &d40c->log_def,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- direction,
- flags & DMA_PREP_INTERRUPT,
- dev_addr, lli_max,
- d40c->base->plat_data->llis_per_log);
- } else
+ if (direction == DMA_FROM_DEVICE)
+ if (d40c->runtime_addr)
+ dev_addr = d40c->runtime_addr;
+ else
+ dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
+ else if (direction == DMA_TO_DEVICE)
+ if (d40c->runtime_addr)
+ dev_addr = d40c->runtime_addr;
+ else
+ dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
+
+ else
return -EINVAL;
+
+ total_size = d40_log_sg_to_dev(&d40c->lcla,
+ sgl, sg_len,
+ &d40d->lli_log,
+ &d40c->log_def,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
+ direction,
+ dma_flags & DMA_PREP_INTERRUPT,
+ dev_addr, d40d->lli_tx_len,
+ d40c->base->plat_data->llis_per_log);
+
if (total_size < 0)
return -EINVAL;
@@ -1926,7 +1924,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
struct scatterlist *sgl,
unsigned int sgl_len,
enum dma_data_direction direction,
- unsigned long flags)
+ unsigned long dma_flags)
{
dma_addr_t src_dev_addr;
dma_addr_t dst_dev_addr;
@@ -1939,13 +1937,19 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
}
d40d->lli_len = sgl_len;
- d40d->lli_tcount = 0;
+ d40d->lli_tx_len = sgl_len;
if (direction == DMA_FROM_DEVICE) {
dst_dev_addr = 0;
- src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
+ if (d40c->runtime_addr)
+ src_dev_addr = d40c->runtime_addr;
+ else
+ src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
} else if (direction == DMA_TO_DEVICE) {
- dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
+ if (d40c->runtime_addr)
+ dst_dev_addr = d40c->runtime_addr;
+ else
+ dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
src_dev_addr = 0;
} else
return -EINVAL;
@@ -1983,34 +1987,38 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl,
unsigned int sg_len,
enum dma_data_direction direction,
- unsigned long flags)
+ unsigned long dma_flags)
{
struct d40_desc *d40d;
struct d40_chan *d40c = container_of(chan, struct d40_chan,
chan);
- unsigned long flg;
+ unsigned long flags;
int err;
+ if (d40c->phy_chan == NULL) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Cannot prepare unallocated channel\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
if (d40c->dma_cfg.pre_transfer)
d40c->dma_cfg.pre_transfer(chan,
d40c->dma_cfg.pre_transfer_data,
sg_dma_len(sgl));
- spin_lock_irqsave(&d40c->lock, flg);
+ spin_lock_irqsave(&d40c->lock, flags);
d40d = d40_desc_get(d40c);
- spin_unlock_irqrestore(&d40c->lock, flg);
+ spin_unlock_irqrestore(&d40c->lock, flags);
if (d40d == NULL)
return NULL;
- memset(d40d, 0, sizeof(struct d40_desc));
-
if (d40c->log_num != D40_PHY_CHAN)
err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
- direction, flags);
+ direction, dma_flags);
else
err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
- direction, flags);
+ direction, dma_flags);
if (err) {
dev_err(&d40c->chan.dev->device,
"[%s] Failed to prepare %s slave sg job: %d\n",
@@ -2019,7 +2027,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
return NULL;
}
- d40d->txd.flags = flags;
+ d40d->txd.flags = dma_flags;
dma_async_tx_descriptor_init(&d40d->txd, chan);
@@ -2037,6 +2045,13 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
dma_cookie_t last_complete;
int ret;
+ if (d40c->phy_chan == NULL) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Cannot read status of unallocated channel\n",
+ __func__);
+ return -EINVAL;
+ }
+
last_complete = d40c->completed;
last_used = chan->cookie;
@@ -2056,6 +2071,12 @@ static void d40_issue_pending(struct dma_chan *chan)
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
unsigned long flags;
+ if (d40c->phy_chan == NULL) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Channel is not allocated!\n", __func__);
+ return;
+ }
+
spin_lock_irqsave(&d40c->lock, flags);
/* Busy means that pending jobs are already being processed */
@@ -2065,12 +2086,129 @@ static void d40_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&d40c->lock, flags);
}
+/* Runtime reconfiguration extension */
+static void d40_set_runtime_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
+ enum dma_slave_buswidth config_addr_width;
+ dma_addr_t config_addr;
+ u32 config_maxburst;
+ enum stedma40_periph_data_width addr_width;
+ int psize;
+
+ if (config->direction == DMA_FROM_DEVICE) {
+ dma_addr_t dev_addr_rx =
+ d40c->base->plat_data->dev_rx[cfg->src_dev_type];
+
+ config_addr = config->src_addr;
+ if (dev_addr_rx)
+ dev_dbg(d40c->base->dev,
+ "channel has a pre-wired RX address %08x "
+ "overriding with %08x\n",
+ dev_addr_rx, config_addr);
+ if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
+ dev_dbg(d40c->base->dev,
+ "channel was not configured for peripheral "
+ "to memory transfer (%d) overriding\n",
+ cfg->dir);
+ cfg->dir = STEDMA40_PERIPH_TO_MEM;
+
+ config_addr_width = config->src_addr_width;
+ config_maxburst = config->src_maxburst;
+
+ } else if (config->direction == DMA_TO_DEVICE) {
+ dma_addr_t dev_addr_tx =
+ d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
+
+ config_addr = config->dst_addr;
+ if (dev_addr_tx)
+ dev_dbg(d40c->base->dev,
+ "channel has a pre-wired TX address %08x "
+ "overriding with %08x\n",
+ dev_addr_tx, config_addr);
+ if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
+ dev_dbg(d40c->base->dev,
+ "channel was not configured for memory "
+ "to peripheral transfer (%d) overriding\n",
+ cfg->dir);
+ cfg->dir = STEDMA40_MEM_TO_PERIPH;
+
+ config_addr_width = config->dst_addr_width;
+ config_maxburst = config->dst_maxburst;
+
+ } else {
+ dev_err(d40c->base->dev,
+ "unrecognized channel direction %d\n",
+ config->direction);
+ return;
+ }
+
+ switch (config_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ addr_width = STEDMA40_BYTE_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ addr_width = STEDMA40_HALFWORD_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ addr_width = STEDMA40_WORD_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ addr_width = STEDMA40_DOUBLEWORD_WIDTH;
+ break;
+ default:
+ dev_err(d40c->base->dev,
+ "illegal peripheral address width "
+ "requested (%d)\n",
+ config->src_addr_width);
+ return;
+ }
+
+ if (config_maxburst >= 16)
+ psize = STEDMA40_PSIZE_LOG_16;
+ else if (config_maxburst >= 8)
+ psize = STEDMA40_PSIZE_LOG_8;
+ else if (config_maxburst >= 4)
+ psize = STEDMA40_PSIZE_LOG_4;
+ else
+ psize = STEDMA40_PSIZE_LOG_1;
+
+ /* Set up all the endpoint configs */
+ cfg->src_info.data_width = addr_width;
+ cfg->src_info.psize = psize;
+ cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN;
+ cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+ cfg->dst_info.data_width = addr_width;
+ cfg->dst_info.psize = psize;
+ cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
+ cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+
+ /* These settings will take precedence later */
+ d40c->runtime_addr = config_addr;
+ d40c->runtime_direction = config->direction;
+ dev_dbg(d40c->base->dev,
+ "configured channel %s for %s, data width %d, "
+ "maxburst %d bytes, LE, no flow control\n",
+ dma_chan_name(chan),
+ (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+ config_addr_width,
+ config_maxburst);
+}
+
static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
unsigned long flags;
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ if (d40c->phy_chan == NULL) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Channel is not allocated!\n", __func__);
+ return -EINVAL;
+ }
+
switch (cmd) {
case DMA_TERMINATE_ALL:
spin_lock_irqsave(&d40c->lock, flags);
@@ -2081,6 +2219,12 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return d40_pause(chan);
case DMA_RESUME:
return d40_resume(chan);
+ case DMA_SLAVE_CONFIG:
+ d40_set_runtime_config(chan,
+ (struct dma_slave_config *) arg);
+ return 0;
+ default:
+ break;
}
/* Other commands are unimplemented */
@@ -2111,13 +2255,10 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
d40c->log_num = D40_PHY_CHAN;
- INIT_LIST_HEAD(&d40c->free);
INIT_LIST_HEAD(&d40c->active);
INIT_LIST_HEAD(&d40c->queue);
INIT_LIST_HEAD(&d40c->client);
- d40c->free_len = 0;
-
tasklet_init(&d40c->tasklet, dma_tasklet,
(unsigned long) d40c);
@@ -2243,6 +2384,14 @@ static int __init d40_phy_res_init(struct d40_base *base)
}
spin_lock_init(&base->phy_res[i].lock);
}
+
+ /* Mark disabled channels as occupied */
+ for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
+ base->phy_res[i].allocated_src = D40_ALLOC_PHY;
+ base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
+ num_phy_chans_avail--;
+ }
+
dev_info(base->dev, "%d of %d physical DMA channels available\n",
num_phy_chans_avail, base->num_phy_chans);
@@ -2291,6 +2440,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
int num_log_chans = 0;
int num_phy_chans;
int i;
+ u32 val;
clk = clk_get(&pdev->dev, NULL);
@@ -2329,12 +2479,13 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
}
}
- i = readl(virtbase + D40_DREG_PERIPHID2);
+ /* Get silicon revision */
+ val = readl(virtbase + D40_DREG_PERIPHID2);
- if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
+ if ((val & 0xf) != D40_PERIPHID2_DESIGNER) {
dev_err(&pdev->dev,
"[%s] Unknown designer! Got %x wanted %x\n",
- __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
+ __func__, val & 0xf, D40_PERIPHID2_DESIGNER);
goto failure;
}
@@ -2342,7 +2493,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
- (i >> 4) & 0xf, res->start);
+ (val >> 4) & 0xf, res->start);
plat_data = pdev->dev.platform_data;
@@ -2364,6 +2515,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
goto failure;
}
+ base->rev = (val >> 4) & 0xf;
base->clk = clk;
base->num_phy_chans = num_phy_chans;
base->num_log_chans = num_log_chans;
@@ -2402,6 +2554,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if (!base->lcla_pool.alloc_map)
goto failure;
+ base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (base->desc_slab == NULL)
+ goto failure;
+
return base;
failure:
@@ -2495,6 +2653,78 @@ static void __init d40_hw_init(struct d40_base *base)
}
+static int __init d40_lcla_allocate(struct d40_base *base)
+{
+ unsigned long *page_list;
+ int i, j;
+ int ret = 0;
+
+ /*
+ * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
+ * To full fill this hardware requirement without wasting 256 kb
+ * we allocate pages until we get an aligned one.
+ */
+ page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
+ GFP_KERNEL);
+
+ if (!page_list) {
+ ret = -ENOMEM;
+ goto failure;
+ }
+
+ /* Calculating how many pages that are required */
+ base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
+
+ for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
+ page_list[i] = __get_free_pages(GFP_KERNEL,
+ base->lcla_pool.pages);
+ if (!page_list[i]) {
+
+ dev_err(base->dev,
+ "[%s] Failed to allocate %d pages.\n",
+ __func__, base->lcla_pool.pages);
+
+ for (j = 0; j < i; j++)
+ free_pages(page_list[j], base->lcla_pool.pages);
+ goto failure;
+ }
+
+ if ((virt_to_phys((void *)page_list[i]) &
+ (LCLA_ALIGNMENT - 1)) == 0)
+ break;
+ }
+
+ for (j = 0; j < i; j++)
+ free_pages(page_list[j], base->lcla_pool.pages);
+
+ if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
+ base->lcla_pool.base = (void *)page_list[i];
+ } else {
+ /* After many attempts, no succees with finding the correct
+ * alignment try with allocating a big buffer */
+ dev_warn(base->dev,
+ "[%s] Failed to get %d pages @ 18 bit align.\n",
+ __func__, base->lcla_pool.pages);
+ base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
+ base->num_phy_chans +
+ LCLA_ALIGNMENT,
+ GFP_KERNEL);
+ if (!base->lcla_pool.base_unaligned) {
+ ret = -ENOMEM;
+ goto failure;
+ }
+
+ base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
+ LCLA_ALIGNMENT);
+ }
+
+ writel(virt_to_phys(base->lcla_pool.base),
+ base->virtbase + D40_DREG_LCLA);
+failure:
+ kfree(page_list);
+ return ret;
+}
+
static int __init d40_probe(struct platform_device *pdev)
{
int err;
@@ -2554,41 +2784,11 @@ static int __init d40_probe(struct platform_device *pdev)
__func__);
goto failure;
}
- /* Get IO for logical channel link address */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
- if (!res) {
- ret = -ENOENT;
- dev_err(&pdev->dev,
- "[%s] No \"lcla\" resource defined\n",
- __func__);
- goto failure;
- }
- base->lcla_pool.base_size = resource_size(res);
- base->lcla_pool.phy = res->start;
-
- if (request_mem_region(res->start, resource_size(res),
- D40_NAME " I/O lcla") == NULL) {
- ret = -EBUSY;
- dev_err(&pdev->dev,
- "[%s] Failed to request LCLA region 0x%x-0x%x\n",
- __func__, res->start, res->end);
- goto failure;
- }
- val = readl(base->virtbase + D40_DREG_LCLA);
- if (res->start != val && val != 0) {
- dev_warn(&pdev->dev,
- "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
- __func__, val, res->start);
- } else
- writel(res->start, base->virtbase + D40_DREG_LCLA);
-
- base->lcla_pool.base = ioremap(res->start, resource_size(res));
- if (!base->lcla_pool.base) {
- ret = -ENOMEM;
- dev_err(&pdev->dev,
- "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
- __func__, res->start, res->end);
+ ret = d40_lcla_allocate(base);
+ if (ret) {
+ dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
+ __func__);
goto failure;
}
@@ -2616,11 +2816,15 @@ static int __init d40_probe(struct platform_device *pdev)
failure:
if (base) {
+ if (base->desc_slab)
+ kmem_cache_destroy(base->desc_slab);
if (base->virtbase)
iounmap(base->virtbase);
- if (base->lcla_pool.phy)
- release_mem_region(base->lcla_pool.phy,
- base->lcla_pool.base_size);
+ if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
+ free_pages((unsigned long)base->lcla_pool.base,
+ base->lcla_pool.pages);
+ if (base->lcla_pool.base_unaligned)
+ kfree(base->lcla_pool.base_unaligned);
if (base->phy_lcpa)
release_mem_region(base->phy_lcpa,
base->lcpa_size);
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 561fdd8a80c..d937f76d6e2 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -315,11 +315,8 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
int total_size = 0;
struct scatterlist *current_sg = sg;
int i;
- u32 next_lli_off_dst;
- u32 next_lli_off_src;
-
- next_lli_off_src = 0;
- next_lli_off_dst = 0;
+ u32 next_lli_off_dst = 0;
+ u32 next_lli_off_src = 0;
for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg);
@@ -351,7 +348,7 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
sg_dma_len(current_sg),
next_lli_off_src,
lcsp->lcsp1, src_data_width,
- term_int && !next_lli_off_src,
+ false,
true);
d40_log_fill_lli(&lli->dst[i],
dev_addr,
@@ -375,7 +372,7 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
sg_dma_len(current_sg),
next_lli_off_src,
lcsp->lcsp1, src_data_width,
- term_int && !next_lli_off_src,
+ false,
false);
}
}
@@ -423,32 +420,35 @@ int d40_log_sg_to_lli(int lcla_id,
return total_size;
}
-void d40_log_lli_write(struct d40_log_lli_full *lcpa,
+int d40_log_lli_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lcla_src,
struct d40_log_lli *lcla_dst,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
int llis_per_log)
{
- u32 slos = 0;
- u32 dlos = 0;
+ u32 slos;
+ u32 dlos;
int i;
- lcpa->lcsp0 = lli_src->lcsp02;
- lcpa->lcsp1 = lli_src->lcsp13;
- lcpa->lcsp2 = lli_dst->lcsp02;
- lcpa->lcsp3 = lli_dst->lcsp13;
+ writel(lli_src->lcsp02, &lcpa->lcsp0);
+ writel(lli_src->lcsp13, &lcpa->lcsp1);
+ writel(lli_dst->lcsp02, &lcpa->lcsp2);
+ writel(lli_dst->lcsp13, &lcpa->lcsp3);
slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
- writel(lli_src[i+1].lcsp02, &lcla_src[i].lcsp02);
- writel(lli_src[i+1].lcsp13, &lcla_src[i].lcsp13);
- writel(lli_dst[i+1].lcsp02, &lcla_dst[i].lcsp02);
- writel(lli_dst[i+1].lcsp13, &lcla_dst[i].lcsp13);
+ writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02);
+ writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13);
+ writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02);
+ writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13);
- slos = lli_src[i+1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
- dlos = lli_dst[i+1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
+ slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
+ dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
}
+
+ return i;
+
}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 2029280cb33..9c0fa2f5fe5 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -13,6 +13,9 @@
#define D40_DREG_PCDELTA (8 * 4)
#define D40_LLI_ALIGN 16 /* LLI alignment must be 16 bytes. */
+#define D40_LCPA_CHAN_SIZE 32
+#define D40_LCPA_CHAN_DST_DELTA 16
+
#define D40_TYPE_TO_GROUP(type) (type / 16)
#define D40_TYPE_TO_EVENT(type) (type % 16)
@@ -336,12 +339,12 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
bool term_int, dma_addr_t dev_addr, int max_len,
int llis_per_log);
-void d40_log_lli_write(struct d40_log_lli_full *lcpa,
- struct d40_log_lli *lcla_src,
- struct d40_log_lli *lcla_dst,
- struct d40_log_lli *lli_dst,
- struct d40_log_lli *lli_src,
- int llis_per_log);
+int d40_log_lli_write(struct d40_log_lli_full *lcpa,
+ struct d40_log_lli *lcla_src,
+ struct d40_log_lli *lcla_dst,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int llis_per_log);
int d40_log_sg_to_lli(int lcla_id,
struct scatterlist *sg,
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index a1bf77c1993..2ec1ed56f20 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -200,8 +200,8 @@ static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
return -EINVAL;
}
- dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n",
- dma_desc, (void *)sg_dma_address(sg));
+ dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n",
+ dma_desc, (unsigned long long)sg_dma_address(sg));
dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
@@ -382,7 +382,7 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
if (!td_desc) {
dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
- goto err;
+ goto out;
}
td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
@@ -410,7 +410,7 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
err:
kfree(td_desc->desc_list);
kfree(td_desc);
-
+out:
return NULL;
}
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 670239ab751..e7d5d6b5dcf 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2071,16 +2071,6 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
amd64_handle_ce(mci, info);
else if (ecc_type == 1)
amd64_handle_ue(mci, info);
-
- /*
- * If main error is CE then overflow must be CE. If main error is UE
- * then overflow is unknown. We'll call the overflow a CE - if
- * panic_on_ue is set then we're already panic'ed and won't arrive
- * here. Else, then apparently someone doesn't think that UE's are
- * catastrophic.
- */
- if (info->nbsh & K8_NBSH_OVERFLOW)
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR " Error Overflow");
}
void amd64_decode_bus_error(int node_id, struct err_regs *regs)
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 3630308e7b8..6b21e25f7a8 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -339,6 +339,9 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
{
int status;
+ if (mci->op_state != OP_RUNNING_POLL)
+ return;
+
status = cancel_delayed_work(&mci->work);
if (status == 0) {
debugf0("%s() not canceled, flush the queue\n",
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index bae9351e947..9014df6f605 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -365,11 +365,10 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
pr_emerg("MC%d_STATUS: ", m->bank);
- pr_cont("%sorrected error, report: %s, MiscV: %svalid, "
+ pr_cont("%sorrected error, other errors lost: %s, "
"CPU context corrupt: %s",
((m->status & MCI_STATUS_UC) ? "Unc" : "C"),
- ((m->status & MCI_STATUS_EN) ? "yes" : "no"),
- ((m->status & MCI_STATUS_MISCV) ? "" : "in"),
+ ((m->status & MCI_STATUS_OVER) ? "yes" : "no"),
((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
/* do the two bits[14:13] together */
@@ -426,11 +425,15 @@ static struct notifier_block amd_mce_dec_nb = {
static int __init mce_amd_init(void)
{
/*
- * We can decode MCEs for Opteron and later CPUs:
+ * We can decode MCEs for K8, F10h and F11h CPUs:
*/
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
- (boot_cpu_data.x86 >= 0xf))
- atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return 0;
+
+ if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
+ return 0;
+
+ atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
return 0;
}
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 996c1bdb5a3..a5cefab8d65 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1482,7 +1482,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev,
/* wake up device */
rc = pci_enable_device(pdev);
- if (rc == -EIO)
+ if (rc)
return rc;
/* now probe and enable the device */
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 010c1d6526f..38a9be9e1c7 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1348,7 +1348,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev,
/* wake up device */
rc = pci_enable_device(pdev);
- if (rc == -EIO)
+ if (rc)
return rc;
/* now probe and enable the device */
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index e0187d16dd7..0fd5b85a0f7 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1140,6 +1140,7 @@ static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
ATTR_COUNTER(0),
ATTR_COUNTER(1),
ATTR_COUNTER(2),
+ { .attr = { .name = NULL } }
};
static struct mcidev_sysfs_group i7core_udimm_counters = {
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 1052340e680..b123bb308a4 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -43,7 +43,7 @@ static u32 orig_pci_err_en;
#endif
static u32 orig_l2_err_disable;
-#ifdef CONFIG_MPC85xx
+#ifdef CONFIG_FSL_SOC_BOOKE
static u32 orig_hid1[2];
#endif
@@ -200,7 +200,7 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mpc85xx_pci_err_probe(struct of_device *op,
+static int __devinit mpc85xx_pci_err_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct edac_pci_ctl_info *pci;
@@ -305,7 +305,7 @@ err:
return res;
}
-static int mpc85xx_pci_err_remove(struct of_device *op)
+static int mpc85xx_pci_err_remove(struct platform_device *op)
{
struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
@@ -503,7 +503,7 @@ static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mpc85xx_l2_err_probe(struct of_device *op,
+static int __devinit mpc85xx_l2_err_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct edac_device_ctl_info *edac_dev;
@@ -613,7 +613,7 @@ err:
return res;
}
-static int mpc85xx_l2_err_remove(struct of_device *op)
+static int mpc85xx_l2_err_remove(struct platform_device *op)
{
struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
@@ -647,7 +647,10 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
{ .compatible = "fsl,mpc8555-l2-cache-controller", },
{ .compatible = "fsl,mpc8560-l2-cache-controller", },
{ .compatible = "fsl,mpc8568-l2-cache-controller", },
+ { .compatible = "fsl,mpc8569-l2-cache-controller", },
{ .compatible = "fsl,mpc8572-l2-cache-controller", },
+ { .compatible = "fsl,p1020-l2-cache-controller", },
+ { .compatible = "fsl,p1021-l2-cache-controller", },
{ .compatible = "fsl,p2020-l2-cache-controller", },
{},
};
@@ -953,7 +956,7 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
}
}
-static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
+static int __devinit mpc85xx_mc_err_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct mem_ctl_info *mci;
@@ -1085,7 +1088,7 @@ err:
return res;
}
-static int mpc85xx_mc_err_remove(struct of_device *op)
+static int mpc85xx_mc_err_remove(struct platform_device *op)
{
struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
@@ -1125,7 +1128,10 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
{ .compatible = "fsl,mpc8569-memory-controller", },
{ .compatible = "fsl,mpc8572-memory-controller", },
{ .compatible = "fsl,mpc8349-memory-controller", },
+ { .compatible = "fsl,p1020-memory-controller", },
+ { .compatible = "fsl,p1021-memory-controller", },
{ .compatible = "fsl,p2020-memory-controller", },
+ { .compatible = "fsl,p4080-memory-controller", },
{},
};
MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
@@ -1140,7 +1146,7 @@ static struct of_platform_driver mpc85xx_mc_err_driver = {
},
};
-#ifdef CONFIG_MPC85xx
+#ifdef CONFIG_FSL_SOC_BOOKE
static void __init mpc85xx_mc_clear_rfxe(void *data)
{
orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
@@ -1179,7 +1185,7 @@ static int __init mpc85xx_mc_init(void)
printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
#endif
-#ifdef CONFIG_MPC85xx
+#ifdef CONFIG_FSL_SOC_BOOKE
/*
* need to clear HID1[RFXE] to disable machine check int
* so we can catch it
@@ -1193,7 +1199,7 @@ static int __init mpc85xx_mc_init(void)
module_init(mpc85xx_mc_init);
-#ifdef CONFIG_MPC85xx
+#ifdef CONFIG_FSL_SOC_BOOKE
static void __exit mpc85xx_mc_restore_hid1(void *data)
{
mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
@@ -1202,7 +1208,7 @@ static void __exit mpc85xx_mc_restore_hid1(void *data)
static void __exit mpc85xx_mc_exit(void)
{
-#ifdef CONFIG_MPC85xx
+#ifdef CONFIG_FSL_SOC_BOOKE
on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
#endif
#ifdef CONFIG_PCI
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index e78839e89a0..070cea41b66 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -184,9 +184,9 @@ struct ppc4xx_ecc_status {
/* Function Prototypes */
-static int ppc4xx_edac_probe(struct of_device *device,
+static int ppc4xx_edac_probe(struct platform_device *device,
const struct of_device_id *device_id);
-static int ppc4xx_edac_remove(struct of_device *device);
+static int ppc4xx_edac_remove(struct platform_device *device);
/* Global Variables */
@@ -1014,7 +1014,7 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
*/
static int __devinit
ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
- struct of_device *op,
+ struct platform_device *op,
const struct of_device_id *match,
const dcr_host_t *dcr_host,
u32 mcopt1)
@@ -1108,7 +1108,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
* mapped and assigned.
*/
static int __devinit
-ppc4xx_edac_register_irq(struct of_device *op, struct mem_ctl_info *mci)
+ppc4xx_edac_register_irq(struct platform_device *op, struct mem_ctl_info *mci)
{
int status = 0;
int ded_irq, sec_irq;
@@ -1238,7 +1238,7 @@ ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host)
* driver; otherwise, < 0 on error.
*/
static int __devinit
-ppc4xx_edac_probe(struct of_device *op, const struct of_device_id *match)
+ppc4xx_edac_probe(struct platform_device *op, const struct of_device_id *match)
{
int status = 0;
u32 mcopt1, memcheck;
@@ -1359,7 +1359,7 @@ ppc4xx_edac_probe(struct of_device *op, const struct of_device_id *match)
* Unconditionally returns 0.
*/
static int
-ppc4xx_edac_remove(struct of_device *op)
+ppc4xx_edac_remove(struct platform_device *op)
{
struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index a9371b36a9b..fcf3ea28340 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -66,4 +66,28 @@ config FIREWIRE_NET
source "drivers/ieee1394/Kconfig"
+config FIREWIRE_NOSY
+ tristate "Nosy - a FireWire traffic sniffer for PCILynx cards"
+ depends on PCI
+ help
+ Nosy is an IEEE 1394 packet sniffer that is used for protocol
+ analysis and in development of IEEE 1394 drivers, applications,
+ or firmwares.
+
+ This driver lets you use a Texas Instruments PCILynx 1394 to PCI
+ link layer controller TSB12LV21/A/B as a low-budget bus analyzer.
+ PCILynx is a nowadays very rare IEEE 1394 controller which is
+ not OHCI 1394 compliant.
+
+ The following cards are known to be based on PCILynx or PCILynx-2:
+ IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2
+ (PCI card), Newer Technology FireWire 2 Go (CardBus card),
+ Apple Power Mac G3 blue & white (onboard controller).
+
+ To compile this driver as a module, say M here: The module will be
+ called nosy. Source code of a userspace interface to nosy, called
+ nosy-dump, can be found in tools/firewire/ of the kernel sources.
+
+ If unsure, say N.
+
endmenu
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index a8f9bb6d9fd..3c6a7fb20aa 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_FIREWIRE) += firewire-core.o
obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o
obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o
+obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 371713ff026..be0492398ef 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -204,17 +204,62 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
}
EXPORT_SYMBOL(fw_core_remove_descriptor);
+static int reset_bus(struct fw_card *card, bool short_reset)
+{
+ int reg = short_reset ? 5 : 1;
+ int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
+
+ return card->driver->update_phy_reg(card, reg, 0, bit);
+}
+
+void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
+{
+ /* We don't try hard to sort out requests of long vs. short resets. */
+ card->br_short = short_reset;
+
+ /* Use an arbitrary short delay to combine multiple reset requests. */
+ fw_card_get(card);
+ if (!schedule_delayed_work(&card->br_work,
+ delayed ? DIV_ROUND_UP(HZ, 100) : 0))
+ fw_card_put(card);
+}
+EXPORT_SYMBOL(fw_schedule_bus_reset);
+
+static void br_work(struct work_struct *work)
+{
+ struct fw_card *card = container_of(work, struct fw_card, br_work.work);
+
+ /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
+ if (card->reset_jiffies != 0 &&
+ time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) {
+ if (!schedule_delayed_work(&card->br_work, 2 * HZ))
+ fw_card_put(card);
+ return;
+ }
+
+ fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
+ FW_PHY_CONFIG_CURRENT_GAP_COUNT);
+ reset_bus(card, card->br_short);
+ fw_card_put(card);
+}
+
static void allocate_broadcast_channel(struct fw_card *card, int generation)
{
int channel, bandwidth = 0;
- fw_iso_resource_manage(card, generation, 1ULL << 31, &channel,
- &bandwidth, true, card->bm_transaction_data);
- if (channel == 31) {
+ if (!card->broadcast_channel_allocated) {
+ fw_iso_resource_manage(card, generation, 1ULL << 31,
+ &channel, &bandwidth, true,
+ card->bm_transaction_data);
+ if (channel != 31) {
+ fw_notify("failed to allocate broadcast channel\n");
+ return;
+ }
card->broadcast_channel_allocated = true;
- device_for_each_child(card->device, (void *)(long)generation,
- fw_device_set_broadcast_channel);
}
+
+ device_for_each_child(card->device, (void *)(long)generation,
+ fw_device_set_broadcast_channel);
}
static const char gap_count_table[] = {
@@ -224,27 +269,26 @@ static const char gap_count_table[] = {
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
{
fw_card_get(card);
- if (!schedule_delayed_work(&card->work, delay))
+ if (!schedule_delayed_work(&card->bm_work, delay))
fw_card_put(card);
}
-static void fw_card_bm_work(struct work_struct *work)
+static void bm_work(struct work_struct *work)
{
- struct fw_card *card = container_of(work, struct fw_card, work.work);
+ struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
struct fw_device *root_device, *irm_device;
struct fw_node *root_node;
- unsigned long flags;
- int root_id, new_root_id, irm_id, local_id;
+ int root_id, new_root_id, irm_id, bm_id, local_id;
int gap_count, generation, grace, rcode;
bool do_reset = false;
bool root_device_is_running;
bool root_device_is_cmc;
bool irm_is_1394_1995_only;
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irq(&card->lock);
if (card->local_node == NULL) {
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&card->lock);
goto out_put_card;
}
@@ -267,7 +311,8 @@ static void fw_card_bm_work(struct work_struct *work)
grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
- if (is_next_generation(generation, card->bm_generation) ||
+ if ((is_next_generation(generation, card->bm_generation) &&
+ !card->bm_abdicate) ||
(card->bm_generation != generation && grace)) {
/*
* This first step is to figure out who is IRM and
@@ -298,21 +343,26 @@ static void fw_card_bm_work(struct work_struct *work)
card->bm_transaction_data[0] = cpu_to_be32(0x3f);
card->bm_transaction_data[1] = cpu_to_be32(local_id);
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&card->lock);
rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
- card->bm_transaction_data,
- sizeof(card->bm_transaction_data));
+ card->bm_transaction_data, 8);
if (rcode == RCODE_GENERATION)
/* Another bus reset, BM work has been rescheduled. */
goto out;
- if (rcode == RCODE_COMPLETE &&
- card->bm_transaction_data[0] != cpu_to_be32(0x3f)) {
+ bm_id = be32_to_cpu(card->bm_transaction_data[0]);
+ spin_lock_irq(&card->lock);
+ if (rcode == RCODE_COMPLETE && generation == card->generation)
+ card->bm_node_id =
+ bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
+ spin_unlock_irq(&card->lock);
+
+ if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
/* Somebody else is BM. Only act as IRM. */
if (local_id == irm_id)
allocate_broadcast_channel(card, generation);
@@ -320,7 +370,17 @@ static void fw_card_bm_work(struct work_struct *work)
goto out;
}
- spin_lock_irqsave(&card->lock, flags);
+ if (rcode == RCODE_SEND_ERROR) {
+ /*
+ * We have been unable to send the lock request due to
+ * some local problem. Let's try again later and hope
+ * that the problem has gone away by then.
+ */
+ fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
+ goto out;
+ }
+
+ spin_lock_irq(&card->lock);
if (rcode != RCODE_COMPLETE) {
/*
@@ -339,7 +399,7 @@ static void fw_card_bm_work(struct work_struct *work)
* We weren't BM in the last generation, and the last
* bus reset is less than 125ms ago. Reschedule this job.
*/
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&card->lock);
fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
goto out;
}
@@ -362,14 +422,12 @@ static void fw_card_bm_work(struct work_struct *work)
* If we haven't probed this device yet, bail out now
* and let's try again once that's done.
*/
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&card->lock);
goto out;
} else if (root_device_is_cmc) {
/*
- * FIXME: I suppose we should set the cmstr bit in the
- * STATE_CLEAR register of this node, as described in
- * 1394-1995, 8.4.2.6. Also, send out a force root
- * packet for this node.
+ * We will send out a force root packet for this
+ * node as part of the gap count optimization.
*/
new_root_id = root_id;
} else {
@@ -402,19 +460,33 @@ static void fw_card_bm_work(struct work_struct *work)
(card->gap_count != gap_count || new_root_id != root_id))
do_reset = true;
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&card->lock);
if (do_reset) {
fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
card->index, new_root_id, gap_count);
fw_send_phy_config(card, new_root_id, generation, gap_count);
- fw_core_initiate_bus_reset(card, 1);
+ reset_bus(card, true);
/* Will allocate broadcast channel after the reset. */
- } else {
- if (local_id == irm_id)
- allocate_broadcast_channel(card, generation);
+ goto out;
+ }
+
+ if (root_device_is_cmc) {
+ /*
+ * Make sure that the cycle master sends cycle start packets.
+ */
+ card->bm_transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
+ rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
+ root_id, generation, SCODE_100,
+ CSR_REGISTER_BASE + CSR_STATE_SET,
+ card->bm_transaction_data, 4);
+ if (rcode == RCODE_GENERATION)
+ goto out;
}
+ if (local_id == irm_id)
+ allocate_broadcast_channel(card, generation);
+
out:
fw_node_put(root_node);
out_put_card:
@@ -432,17 +504,23 @@ void fw_card_initialize(struct fw_card *card,
card->device = device;
card->current_tlabel = 0;
card->tlabel_mask = 0;
+ card->split_timeout_hi = 0;
+ card->split_timeout_lo = 800 << 19;
+ card->split_timeout_cycles = 800;
+ card->split_timeout_jiffies = DIV_ROUND_UP(HZ, 10);
card->color = 0;
card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
kref_init(&card->kref);
init_completion(&card->done);
INIT_LIST_HEAD(&card->transaction_list);
+ INIT_LIST_HEAD(&card->phy_receiver_list);
spin_lock_init(&card->lock);
card->local_node = NULL;
- INIT_DELAYED_WORK(&card->work, fw_card_bm_work);
+ INIT_DELAYED_WORK(&card->br_work, br_work);
+ INIT_DELAYED_WORK(&card->bm_work, bm_work);
}
EXPORT_SYMBOL(fw_card_initialize);
@@ -468,20 +546,22 @@ int fw_card_add(struct fw_card *card,
}
EXPORT_SYMBOL(fw_card_add);
-
/*
* The next few functions implement a dummy driver that is used once a card
* driver shuts down an fw_card. This allows the driver to cleanly unload,
* as all IO to the card will be handled (and failed) by the dummy driver
* instead of calling into the module. Only functions for iso context
* shutdown still need to be provided by the card driver.
+ *
+ * .read/write_csr() should never be called anymore after the dummy driver
+ * was bound since they are only used within request handler context.
+ * .set_config_rom() is never called since the card is taken out of card_list
+ * before switching to the dummy driver.
*/
-static int dummy_enable(struct fw_card *card,
- const __be32 *config_rom, size_t length)
+static int dummy_read_phy_reg(struct fw_card *card, int address)
{
- BUG();
- return -1;
+ return -ENODEV;
}
static int dummy_update_phy_reg(struct fw_card *card, int address,
@@ -490,25 +570,14 @@ static int dummy_update_phy_reg(struct fw_card *card, int address,
return -ENODEV;
}
-static int dummy_set_config_rom(struct fw_card *card,
- const __be32 *config_rom, size_t length)
-{
- /*
- * We take the card out of card_list before setting the dummy
- * driver, so this should never get called.
- */
- BUG();
- return -1;
-}
-
static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
{
- packet->callback(packet, card, -ENODEV);
+ packet->callback(packet, card, RCODE_CANCELLED);
}
static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
{
- packet->callback(packet, card, -ENODEV);
+ packet->callback(packet, card, RCODE_CANCELLED);
}
static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
@@ -522,14 +591,40 @@ static int dummy_enable_phys_dma(struct fw_card *card,
return -ENODEV;
}
+static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
+ int type, int channel, size_t header_size)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static int dummy_start_iso(struct fw_iso_context *ctx,
+ s32 cycle, u32 sync, u32 tags)
+{
+ return -ENODEV;
+}
+
+static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
+{
+ return -ENODEV;
+}
+
+static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
+ struct fw_iso_buffer *buffer, unsigned long payload)
+{
+ return -ENODEV;
+}
+
static const struct fw_card_driver dummy_driver_template = {
- .enable = dummy_enable,
- .update_phy_reg = dummy_update_phy_reg,
- .set_config_rom = dummy_set_config_rom,
- .send_request = dummy_send_request,
- .cancel_packet = dummy_cancel_packet,
- .send_response = dummy_send_response,
- .enable_phys_dma = dummy_enable_phys_dma,
+ .read_phy_reg = dummy_read_phy_reg,
+ .update_phy_reg = dummy_update_phy_reg,
+ .send_request = dummy_send_request,
+ .send_response = dummy_send_response,
+ .cancel_packet = dummy_cancel_packet,
+ .enable_phys_dma = dummy_enable_phys_dma,
+ .allocate_iso_context = dummy_allocate_iso_context,
+ .start_iso = dummy_start_iso,
+ .set_iso_channels = dummy_set_iso_channels,
+ .queue_iso = dummy_queue_iso,
};
void fw_card_release(struct kref *kref)
@@ -545,7 +640,7 @@ void fw_core_remove_card(struct fw_card *card)
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
- fw_core_initiate_bus_reset(card, 1);
+ fw_schedule_bus_reset(card, false, true);
mutex_lock(&card_mutex);
list_del_init(&card->link);
@@ -565,12 +660,3 @@ void fw_core_remove_card(struct fw_card *card)
WARN_ON(!list_empty(&card->transaction_list));
}
EXPORT_SYMBOL(fw_core_remove_card);
-
-int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
-{
- int reg = short_reset ? 5 : 1;
- int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
-
- return card->driver->update_phy_reg(card, reg, 0, bit);
-}
-EXPORT_SYMBOL(fw_core_initiate_bus_reset);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 5bf106b9d79..14bb7b7b5dd 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -18,6 +18,7 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/bug.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -33,7 +34,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h>
-#include <linux/sched.h>
+#include <linux/sched.h> /* required for linux/wait.h */
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -47,6 +48,13 @@
#include "core.h"
+/*
+ * ABI version history is documented in linux/firewire-cdev.h.
+ */
+#define FW_CDEV_KERNEL_VERSION 4
+#define FW_CDEV_VERSION_EVENT_REQUEST2 4
+#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
+
struct client {
u32 version;
struct fw_device *device;
@@ -63,6 +71,9 @@ struct client {
struct fw_iso_buffer buffer;
unsigned long vm_start;
+ struct list_head phy_receiver_link;
+ u64 phy_receiver_closure;
+
struct list_head link;
struct kref kref;
};
@@ -107,6 +118,7 @@ struct outbound_transaction_resource {
struct inbound_transaction_resource {
struct client_resource resource;
+ struct fw_card *card;
struct fw_request *request;
void *data;
size_t length;
@@ -171,7 +183,10 @@ struct outbound_transaction_event {
struct inbound_transaction_event {
struct event event;
- struct fw_cdev_event_request request;
+ union {
+ struct fw_cdev_event_request request;
+ struct fw_cdev_event_request2 request2;
+ } req;
};
struct iso_interrupt_event {
@@ -179,11 +194,28 @@ struct iso_interrupt_event {
struct fw_cdev_event_iso_interrupt interrupt;
};
+struct iso_interrupt_mc_event {
+ struct event event;
+ struct fw_cdev_event_iso_interrupt_mc interrupt;
+};
+
struct iso_resource_event {
struct event event;
struct fw_cdev_event_iso_resource iso_resource;
};
+struct outbound_phy_packet_event {
+ struct event event;
+ struct client *client;
+ struct fw_packet p;
+ struct fw_cdev_event_phy_packet phy_packet;
+};
+
+struct inbound_phy_packet_event {
+ struct event event;
+ struct fw_cdev_event_phy_packet phy_packet;
+};
+
static inline void __user *u64_to_uptr(__u64 value)
{
return (void __user *)(unsigned long)value;
@@ -219,6 +251,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
idr_init(&client->resource_idr);
INIT_LIST_HEAD(&client->event_list);
init_waitqueue_head(&client->wait);
+ INIT_LIST_HEAD(&client->phy_receiver_link);
kref_init(&client->kref);
file->private_data = client;
@@ -309,7 +342,7 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
event->generation = client->device->generation;
event->node_id = client->device->node_id;
event->local_node_id = card->local_node->node_id;
- event->bm_node_id = 0; /* FIXME: We don't track the BM. */
+ event->bm_node_id = card->bm_node_id;
event->irm_node_id = card->irm_node->node_id;
event->root_node_id = card->root_node->node_id;
@@ -340,7 +373,7 @@ static void queue_bus_reset_event(struct client *client)
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL) {
- fw_notify("Out of memory when allocating bus reset event\n");
+ fw_notify("Out of memory when allocating event\n");
return;
}
@@ -386,6 +419,9 @@ union ioctl_arg {
struct fw_cdev_allocate_iso_resource allocate_iso_resource;
struct fw_cdev_send_stream_packet send_stream_packet;
struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
+ struct fw_cdev_send_phy_packet send_phy_packet;
+ struct fw_cdev_receive_phy_packets receive_phy_packets;
+ struct fw_cdev_set_iso_channels set_iso_channels;
};
static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
@@ -395,7 +431,7 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
unsigned long ret = 0;
client->version = a->version;
- a->version = FW_CDEV_VERSION;
+ a->version = FW_CDEV_KERNEL_VERSION;
a->card = client->device->card->index;
down_read(&fw_device_rwsem);
@@ -554,6 +590,10 @@ static int init_request(struct client *client,
(request->length > 4096 || request->length > 512 << speed))
return -EIO;
+ if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
+ request->length < 4)
+ return -EINVAL;
+
e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
if (e == NULL)
return -ENOMEM;
@@ -626,28 +666,34 @@ static void release_request(struct client *client,
if (is_fcp_request(r->request))
kfree(r->data);
else
- fw_send_response(client->device->card, r->request,
- RCODE_CONFLICT_ERROR);
+ fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
+
+ fw_card_put(r->card);
kfree(r);
}
static void handle_request(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source,
- int generation, int speed,
- unsigned long long offset,
+ int generation, unsigned long long offset,
void *payload, size_t length, void *callback_data)
{
struct address_handler_resource *handler = callback_data;
struct inbound_transaction_resource *r;
struct inbound_transaction_event *e;
+ size_t event_size0;
void *fcp_frame = NULL;
int ret;
+ /* card may be different from handler->client->device->card */
+ fw_card_get(card);
+
r = kmalloc(sizeof(*r), GFP_ATOMIC);
e = kmalloc(sizeof(*e), GFP_ATOMIC);
- if (r == NULL || e == NULL)
+ if (r == NULL || e == NULL) {
+ fw_notify("Out of memory when allocating event\n");
goto failed;
-
+ }
+ r->card = card;
r->request = request;
r->data = payload;
r->length = length;
@@ -669,15 +715,37 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
if (ret < 0)
goto failed;
- e->request.type = FW_CDEV_EVENT_REQUEST;
- e->request.tcode = tcode;
- e->request.offset = offset;
- e->request.length = length;
- e->request.handle = r->resource.handle;
- e->request.closure = handler->closure;
+ if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
+ struct fw_cdev_event_request *req = &e->req.request;
+
+ if (tcode & 0x10)
+ tcode = TCODE_LOCK_REQUEST;
+
+ req->type = FW_CDEV_EVENT_REQUEST;
+ req->tcode = tcode;
+ req->offset = offset;
+ req->length = length;
+ req->handle = r->resource.handle;
+ req->closure = handler->closure;
+ event_size0 = sizeof(*req);
+ } else {
+ struct fw_cdev_event_request2 *req = &e->req.request2;
+
+ req->type = FW_CDEV_EVENT_REQUEST2;
+ req->tcode = tcode;
+ req->offset = offset;
+ req->source_node_id = source;
+ req->destination_node_id = destination;
+ req->card = card->index;
+ req->generation = generation;
+ req->length = length;
+ req->handle = r->resource.handle;
+ req->closure = handler->closure;
+ event_size0 = sizeof(*req);
+ }
queue_event(handler->client, &e->event,
- &e->request, sizeof(e->request), r->data, length);
+ &e->req, event_size0, r->data, length);
return;
failed:
@@ -687,6 +755,8 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
if (!is_fcp_request(request))
fw_send_response(card, request, RCODE_CONFLICT_ERROR);
+
+ fw_card_put(card);
}
static void release_address_handler(struct client *client,
@@ -711,7 +781,11 @@ static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
return -ENOMEM;
region.start = a->offset;
- region.end = a->offset + a->length;
+ if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
+ region.end = a->offset + a->length;
+ else
+ region.end = a->region_end;
+
r->handler.length = a->length;
r->handler.address_callback = handle_request;
r->handler.callback_data = r;
@@ -723,6 +797,7 @@ static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
kfree(r);
return ret;
}
+ a->offset = r->handler.offset;
r->resource.release = release_address_handler;
ret = add_client_resource(client, &r->resource, GFP_KERNEL);
@@ -757,15 +832,19 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
if (is_fcp_request(r->request))
goto out;
- if (a->length < r->length)
- r->length = a->length;
- if (copy_from_user(r->data, u64_to_uptr(a->data), r->length)) {
+ if (a->length != fw_get_response_length(r->request)) {
+ ret = -EINVAL;
+ kfree(r->request);
+ goto out;
+ }
+ if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
ret = -EFAULT;
kfree(r->request);
goto out;
}
- fw_send_response(client->device->card, r->request, a->rcode);
+ fw_send_response(r->card, r->request, a->rcode);
out:
+ fw_card_put(r->card);
kfree(r);
return ret;
@@ -773,8 +852,9 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
{
- return fw_core_initiate_bus_reset(client->device->card,
+ fw_schedule_bus_reset(client->device->card, true,
arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
+ return 0;
}
static void release_descriptor(struct client *client,
@@ -845,10 +925,11 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
struct client *client = data;
struct iso_interrupt_event *e;
- e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
- if (e == NULL)
+ e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
+ if (e == NULL) {
+ fw_notify("Out of memory when allocating event\n");
return;
-
+ }
e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
e->interrupt.closure = client->iso_closure;
e->interrupt.cycle = cycle;
@@ -858,27 +939,54 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
sizeof(e->interrupt) + header_length, NULL, 0);
}
+static void iso_mc_callback(struct fw_iso_context *context,
+ dma_addr_t completed, void *data)
+{
+ struct client *client = data;
+ struct iso_interrupt_mc_event *e;
+
+ e = kmalloc(sizeof(*e), GFP_ATOMIC);
+ if (e == NULL) {
+ fw_notify("Out of memory when allocating event\n");
+ return;
+ }
+ e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
+ e->interrupt.closure = client->iso_closure;
+ e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
+ completed);
+ queue_event(client, &e->event, &e->interrupt,
+ sizeof(e->interrupt), NULL, 0);
+}
+
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
+ fw_iso_callback_t cb;
- /* We only support one context at this time. */
- if (client->iso_context != NULL)
- return -EBUSY;
-
- if (a->channel > 63)
- return -EINVAL;
+ BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
+ FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
+ FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
+ FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
switch (a->type) {
- case FW_ISO_CONTEXT_RECEIVE:
- if (a->header_size < 4 || (a->header_size & 3))
+ case FW_ISO_CONTEXT_TRANSMIT:
+ if (a->speed > SCODE_3200 || a->channel > 63)
return -EINVAL;
+
+ cb = iso_callback;
break;
- case FW_ISO_CONTEXT_TRANSMIT:
- if (a->speed > SCODE_3200)
+ case FW_ISO_CONTEXT_RECEIVE:
+ if (a->header_size < 4 || (a->header_size & 3) ||
+ a->channel > 63)
return -EINVAL;
+
+ cb = iso_callback;
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ cb = (fw_iso_callback_t)iso_mc_callback;
break;
default:
@@ -886,20 +994,37 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
}
context = fw_iso_context_create(client->device->card, a->type,
- a->channel, a->speed, a->header_size,
- iso_callback, client);
+ a->channel, a->speed, a->header_size, cb, client);
if (IS_ERR(context))
return PTR_ERR(context);
+ /* We only support one context at this time. */
+ spin_lock_irq(&client->lock);
+ if (client->iso_context != NULL) {
+ spin_unlock_irq(&client->lock);
+ fw_iso_context_destroy(context);
+ return -EBUSY;
+ }
client->iso_closure = a->closure;
client->iso_context = context;
+ spin_unlock_irq(&client->lock);
- /* We only support one context at this time. */
a->handle = 0;
return 0;
}
+static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
+{
+ struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
+ struct fw_iso_context *ctx = client->iso_context;
+
+ if (ctx == NULL || a->handle != 0)
+ return -EINVAL;
+
+ return fw_iso_context_set_channels(ctx, &a->channels);
+}
+
/* Macros for decoding the iso packet control header. */
#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
#define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
@@ -913,7 +1038,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
struct fw_cdev_queue_iso *a = &arg->queue_iso;
struct fw_cdev_iso_packet __user *p, *end, *next;
struct fw_iso_context *ctx = client->iso_context;
- unsigned long payload, buffer_end, header_length;
+ unsigned long payload, buffer_end, transmit_header_bytes = 0;
u32 control;
int count;
struct {
@@ -933,7 +1058,6 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
* use the indirect payload, the iso buffer need not be mapped
* and the a->data pointer is ignored.
*/
-
payload = (unsigned long)a->data - client->vm_start;
buffer_end = client->buffer.page_count << PAGE_SHIFT;
if (a->data == 0 || client->buffer.pages == NULL ||
@@ -942,8 +1066,10 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
buffer_end = 0;
}
- p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
+ if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
+ return -EINVAL;
+ p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
if (!access_ok(VERIFY_READ, p, a->size))
return -EFAULT;
@@ -959,31 +1085,32 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
u.packet.sy = GET_SY(control);
u.packet.header_length = GET_HEADER_LENGTH(control);
- if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
- if (u.packet.header_length % 4 != 0)
+ switch (ctx->type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
+ if (u.packet.header_length & 3)
+ return -EINVAL;
+ transmit_header_bytes = u.packet.header_length;
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE:
+ if (u.packet.header_length == 0 ||
+ u.packet.header_length % ctx->header_size != 0)
return -EINVAL;
- header_length = u.packet.header_length;
- } else {
- /*
- * We require that header_length is a multiple of
- * the fixed header size, ctx->header_size.
- */
- if (ctx->header_size == 0) {
- if (u.packet.header_length > 0)
- return -EINVAL;
- } else if (u.packet.header_length == 0 ||
- u.packet.header_length % ctx->header_size != 0) {
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ if (u.packet.payload_length == 0 ||
+ u.packet.payload_length & 3)
return -EINVAL;
- }
- header_length = 0;
+ break;
}
next = (struct fw_cdev_iso_packet __user *)
- &p->header[header_length / 4];
+ &p->header[transmit_header_bytes / 4];
if (next > end)
return -EINVAL;
if (__copy_from_user
- (u.packet.header, p->header, header_length))
+ (u.packet.header, p->header, transmit_header_bytes))
return -EFAULT;
if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
u.packet.header_length + u.packet.payload_length > 0)
@@ -1011,6 +1138,13 @@ static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_start_iso *a = &arg->start_iso;
+ BUILD_BUG_ON(
+ FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
+ FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
+ FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
+ FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
+ FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
+
if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
@@ -1042,7 +1176,7 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
local_irq_disable();
- cycle_time = card->driver->get_cycle_time(card);
+ cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
switch (a->clk_id) {
case CLOCK_REALTIME: getnstimeofday(&ts); break;
@@ -1323,28 +1457,135 @@ static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
return init_request(client, &request, dest, a->speed);
}
+static void outbound_phy_packet_callback(struct fw_packet *packet,
+ struct fw_card *card, int status)
+{
+ struct outbound_phy_packet_event *e =
+ container_of(packet, struct outbound_phy_packet_event, p);
+
+ switch (status) {
+ /* expected: */
+ case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break;
+ /* should never happen with PHY packets: */
+ case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break;
+ case ACK_BUSY_X:
+ case ACK_BUSY_A:
+ case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break;
+ case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break;
+ case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break;
+ /* stale generation; cancelled; on certain controllers: no ack */
+ default: e->phy_packet.rcode = status; break;
+ }
+ e->phy_packet.data[0] = packet->timestamp;
+
+ queue_event(e->client, &e->event, &e->phy_packet,
+ sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
+ client_put(e->client);
+}
+
+static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
+{
+ struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
+ struct fw_card *card = client->device->card;
+ struct outbound_phy_packet_event *e;
+
+ /* Access policy: Allow this ioctl only on local nodes' device files. */
+ if (!client->device->is_local)
+ return -ENOSYS;
+
+ e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
+ if (e == NULL)
+ return -ENOMEM;
+
+ client_get(client);
+ e->client = client;
+ e->p.speed = SCODE_100;
+ e->p.generation = a->generation;
+ e->p.header[0] = a->data[0];
+ e->p.header[1] = a->data[1];
+ e->p.header_length = 8;
+ e->p.callback = outbound_phy_packet_callback;
+ e->phy_packet.closure = a->closure;
+ e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT;
+ if (is_ping_packet(a->data))
+ e->phy_packet.length = 4;
+
+ card->driver->send_request(card, &e->p);
+
+ return 0;
+}
+
+static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
+{
+ struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
+ struct fw_card *card = client->device->card;
+
+ /* Access policy: Allow this ioctl only on local nodes' device files. */
+ if (!client->device->is_local)
+ return -ENOSYS;
+
+ spin_lock_irq(&card->lock);
+
+ list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
+ client->phy_receiver_closure = a->closure;
+
+ spin_unlock_irq(&card->lock);
+
+ return 0;
+}
+
+void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
+{
+ struct client *client;
+ struct inbound_phy_packet_event *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
+ e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
+ if (e == NULL) {
+ fw_notify("Out of memory when allocating event\n");
+ break;
+ }
+ e->phy_packet.closure = client->phy_receiver_closure;
+ e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
+ e->phy_packet.rcode = RCODE_COMPLETE;
+ e->phy_packet.length = 8;
+ e->phy_packet.data[0] = p->header[1];
+ e->phy_packet.data[1] = p->header[2];
+ queue_event(client, &e->event,
+ &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
+ }
+
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
- ioctl_get_info,
- ioctl_send_request,
- ioctl_allocate,
- ioctl_deallocate,
- ioctl_send_response,
- ioctl_initiate_bus_reset,
- ioctl_add_descriptor,
- ioctl_remove_descriptor,
- ioctl_create_iso_context,
- ioctl_queue_iso,
- ioctl_start_iso,
- ioctl_stop_iso,
- ioctl_get_cycle_timer,
- ioctl_allocate_iso_resource,
- ioctl_deallocate_iso_resource,
- ioctl_allocate_iso_resource_once,
- ioctl_deallocate_iso_resource_once,
- ioctl_get_speed,
- ioctl_send_broadcast_request,
- ioctl_send_stream_packet,
- ioctl_get_cycle_timer2,
+ [0x00] = ioctl_get_info,
+ [0x01] = ioctl_send_request,
+ [0x02] = ioctl_allocate,
+ [0x03] = ioctl_deallocate,
+ [0x04] = ioctl_send_response,
+ [0x05] = ioctl_initiate_bus_reset,
+ [0x06] = ioctl_add_descriptor,
+ [0x07] = ioctl_remove_descriptor,
+ [0x08] = ioctl_create_iso_context,
+ [0x09] = ioctl_queue_iso,
+ [0x0a] = ioctl_start_iso,
+ [0x0b] = ioctl_stop_iso,
+ [0x0c] = ioctl_get_cycle_timer,
+ [0x0d] = ioctl_allocate_iso_resource,
+ [0x0e] = ioctl_deallocate_iso_resource,
+ [0x0f] = ioctl_allocate_iso_resource_once,
+ [0x10] = ioctl_deallocate_iso_resource_once,
+ [0x11] = ioctl_get_speed,
+ [0x12] = ioctl_send_broadcast_request,
+ [0x13] = ioctl_send_stream_packet,
+ [0x14] = ioctl_get_cycle_timer2,
+ [0x15] = ioctl_send_phy_packet,
+ [0x16] = ioctl_receive_phy_packets,
+ [0x17] = ioctl_set_iso_channels,
};
static int dispatch_ioctl(struct client *client,
@@ -1452,6 +1693,10 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
struct client *client = file->private_data;
struct event *event, *next_event;
+ spin_lock_irq(&client->device->card->lock);
+ list_del(&client->phy_receiver_link);
+ spin_unlock_irq(&client->device->card->lock);
+
mutex_lock(&client->device->client_list_mutex);
list_del(&client->link);
mutex_unlock(&client->device->client_list_mutex);
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 4b8523f00dc..6113b896e79 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -107,11 +107,11 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
}
/**
- * fw_csr_string - reads a string from the configuration ROM
- * @directory: e.g. root directory or unit directory
- * @key: the key of the preceding directory entry
- * @buf: where to put the string
- * @size: size of @buf, in bytes
+ * fw_csr_string() - reads a string from the configuration ROM
+ * @directory: e.g. root directory or unit directory
+ * @key: the key of the preceding directory entry
+ * @buf: where to put the string
+ * @size: size of @buf, in bytes
*
* The string is taken from a minimal ASCII text descriptor leaf after
* the immediate entry with @key. The string is zero-terminated.
@@ -1136,6 +1136,7 @@ static void fw_device_refresh(struct work_struct *work)
goto give_up;
}
+ fw_device_cdev_update(device);
create_units(device);
/* Userspace may want to re-read attributes. */
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 8f5aebfb29d..c003fa4e2db 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -118,6 +118,23 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
}
EXPORT_SYMBOL(fw_iso_buffer_destroy);
+/* Convert DMA address to offset into virtually contiguous buffer. */
+size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
+{
+ int i;
+ dma_addr_t address;
+ ssize_t offset;
+
+ for (i = 0; i < buffer->page_count; i++) {
+ address = page_private(buffer->pages[i]);
+ offset = (ssize_t)completed - (ssize_t)address;
+ if (offset > 0 && offset <= PAGE_SIZE)
+ return (i << PAGE_SHIFT) + offset;
+ }
+
+ return 0;
+}
+
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
fw_iso_callback_t callback, void *callback_data)
@@ -134,7 +151,7 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
ctx->channel = channel;
ctx->speed = speed;
ctx->header_size = header_size;
- ctx->callback = callback;
+ ctx->callback.sc = callback;
ctx->callback_data = callback_data;
return ctx;
@@ -143,9 +160,7 @@ EXPORT_SYMBOL(fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx)
{
- struct fw_card *card = ctx->card;
-
- card->driver->free_iso_context(ctx);
+ ctx->card->driver->free_iso_context(ctx);
}
EXPORT_SYMBOL(fw_iso_context_destroy);
@@ -156,14 +171,17 @@ int fw_iso_context_start(struct fw_iso_context *ctx,
}
EXPORT_SYMBOL(fw_iso_context_start);
+int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels)
+{
+ return ctx->card->driver->set_iso_channels(ctx, channels);
+}
+
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
- struct fw_card *card = ctx->card;
-
- return card->driver->queue_iso(ctx, packet, buffer, payload);
+ return ctx->card->driver->queue_iso(ctx, packet, buffer, payload);
}
EXPORT_SYMBOL(fw_iso_context_queue);
@@ -279,7 +297,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
}
/**
- * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
+ * fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth
*
* In parameters: card, generation, channels_mask, bandwidth, allocate
* Out parameters: channel, bandwidth
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 93ec64cdeef..09be1a63550 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -174,12 +174,7 @@ static inline struct fw_node *fw_node(struct list_head *l)
return list_entry(l, struct fw_node, link);
}
-/**
- * build_tree - Build the tree representation of the topology
- * @self_ids: array of self IDs to create the tree from
- * @self_id_count: the length of the self_ids array
- * @local_id: the node ID of the local node
- *
+/*
* This function builds the tree representation of the topology given
* by the self IDs from the latest bus reset. During the construction
* of the tree, the function checks that the self IDs are valid and
@@ -420,11 +415,10 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
}
}
-/**
- * update_tree - compare the old topology tree for card with the new
- * one specified by root. Queue the nodes and mark them as either
- * found, lost or updated. Update the nodes in the card topology tree
- * as we go.
+/*
+ * Compare the old topology tree for card with the new one specified by root.
+ * Queue the nodes and mark them as either found, lost or updated.
+ * Update the nodes in the card topology tree as we go.
*/
static void update_tree(struct fw_card *card, struct fw_node *root)
{
@@ -524,7 +518,7 @@ static void update_topology_map(struct fw_card *card,
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
- int self_id_count, u32 *self_ids)
+ int self_id_count, u32 *self_ids, bool bm_abdicate)
{
struct fw_node *local_node;
unsigned long flags;
@@ -543,7 +537,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
spin_lock_irqsave(&card->lock, flags);
- card->broadcast_channel_allocated = false;
+ card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
card->node_id = node_id;
/*
* Update node_id before generation to prevent anybody from using
@@ -552,6 +546,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
smp_wmb();
card->generation = generation;
card->reset_jiffies = jiffies;
+ card->bm_node_id = 0xffff;
+ card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0);
local_node = build_tree(card, self_ids, self_id_count);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index fdc33ff06dc..b42a0bde849 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -81,6 +81,10 @@ static int close_transaction(struct fw_transaction *transaction,
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t == transaction) {
+ if (!del_timer(&t->split_timeout_timer)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ goto timed_out;
+ }
list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
@@ -89,11 +93,11 @@ static int close_transaction(struct fw_transaction *transaction,
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link != &card->transaction_list) {
- del_timer_sync(&t->split_timeout_timer);
t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
+ timed_out:
return -ENOENT;
}
@@ -246,7 +250,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
break;
default:
- WARN(1, KERN_ERR "wrong tcode %d", tcode);
+ WARN(1, "wrong tcode %d", tcode);
}
common:
packet->speed = speed;
@@ -273,43 +277,52 @@ static int allocate_tlabel(struct fw_card *card)
}
/**
- * This function provides low-level access to the IEEE1394 transaction
- * logic. Most C programs would use either fw_read(), fw_write() or
- * fw_lock() instead - those function are convenience wrappers for
- * this function. The fw_send_request() function is primarily
- * provided as a flexible, one-stop entry point for languages bindings
- * and protocol bindings.
+ * fw_send_request() - submit a request packet for transmission
+ * @card: interface to send the request at
+ * @t: transaction instance to which the request belongs
+ * @tcode: transaction code
+ * @destination_id: destination node ID, consisting of bus_ID and phy_ID
+ * @generation: bus generation in which request and response are valid
+ * @speed: transmission speed
+ * @offset: 48bit wide offset into destination's address space
+ * @payload: data payload for the request subaction
+ * @length: length of the payload, in bytes
+ * @callback: function to be called when the transaction is completed
+ * @callback_data: data to be passed to the transaction completion callback
*
- * FIXME: Document this function further, in particular the possible
- * values for rcode in the callback. In short, we map ACK_COMPLETE to
- * RCODE_COMPLETE, internal errors set errno and set rcode to
- * RCODE_SEND_ERROR (which is out of range for standard ieee1394
- * rcodes). All other rcodes are forwarded unchanged. For all
- * errors, payload is NULL, length is 0.
+ * Submit a request packet into the asynchronous request transmission queue.
+ * Can be called from atomic context. If you prefer a blocking API, use
+ * fw_run_transaction() in a context that can sleep.
*
- * Can not expect the callback to be called before the function
- * returns, though this does happen in some cases (ACK_COMPLETE and
- * errors).
+ * In case of lock requests, specify one of the firewire-core specific %TCODE_
+ * constants instead of %TCODE_LOCK_REQUEST in @tcode.
*
- * The payload is only used for write requests and must not be freed
- * until the callback has been called.
+ * Make sure that the value in @destination_id is not older than the one in
+ * @generation. Otherwise the request is in danger to be sent to a wrong node.
*
- * @param card the card from which to send the request
- * @param tcode the tcode for this transaction. Do not use
- * TCODE_LOCK_REQUEST directly, instead use TCODE_LOCK_MASK_SWAP
- * etc. to specify tcode and ext_tcode.
- * @param node_id the destination node ID (bus ID and PHY ID concatenated)
- * @param generation the generation for which node_id is valid
- * @param speed the speed to use for sending the request
- * @param offset the 48 bit offset on the destination node
- * @param payload the data payload for the request subaction
- * @param length the length in bytes of the data to read
- * @param callback function to be called when the transaction is completed
- * @param callback_data pointer to arbitrary data, which will be
- * passed to the callback
- *
- * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
+ * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller
* needs to synthesize @destination_id with fw_stream_packet_destination_id().
+ * It will contain tag, channel, and sy data instead of a node ID then.
+ *
+ * The payload buffer at @data is going to be DMA-mapped except in case of
+ * quadlet-sized payload or of local (loopback) requests. Hence make sure that
+ * the buffer complies with the restrictions for DMA-mapped memory. The
+ * @payload must not be freed before the @callback is called.
+ *
+ * In case of request types without payload, @data is NULL and @length is 0.
+ *
+ * After the transaction is completed successfully or unsuccessfully, the
+ * @callback will be called. Among its parameters is the response code which
+ * is either one of the rcodes per IEEE 1394 or, in case of internal errors,
+ * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core
+ * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION,
+ * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request
+ * generation, or missing ACK respectively.
+ *
+ * Note some timing corner cases: fw_send_request() may complete much earlier
+ * than when the request packet actually hits the wire. On the other hand,
+ * transaction completion and hence execution of @callback may happen even
+ * before fw_send_request() returns.
*/
void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
int destination_id, int generation, int speed,
@@ -339,7 +352,8 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
setup_timer(&t->split_timeout_timer,
split_transaction_timeout_callback, (unsigned long)t);
/* FIXME: start this timer later, relative to t->timestamp */
- mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10));
+ mod_timer(&t->split_timeout_timer,
+ jiffies + card->split_timeout_jiffies);
t->callback = callback;
t->callback_data = callback_data;
@@ -374,9 +388,11 @@ static void transaction_callback(struct fw_card *card, int rcode,
}
/**
- * fw_run_transaction - send request and sleep until transaction is completed
+ * fw_run_transaction() - send request and sleep until transaction is completed
*
- * Returns the RCODE.
+ * Returns the RCODE. See fw_send_request() for parameter documentation.
+ * Unlike fw_send_request(), @data points to the payload of the request or/and
+ * to the payload of the response.
*/
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset,
@@ -417,9 +433,21 @@ void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
long timeout = DIV_ROUND_UP(HZ, 10);
- u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
- PHY_CONFIG_ROOT_ID(node_id) |
- PHY_CONFIG_GAP_COUNT(gap_count);
+ u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG);
+
+ if (node_id != FW_PHY_CONFIG_NO_NODE_ID)
+ data |= PHY_CONFIG_ROOT_ID(node_id);
+
+ if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
+ gap_count = card->driver->read_phy_reg(card, 1);
+ if (gap_count < 0)
+ return;
+
+ gap_count &= 63;
+ if (gap_count == 63)
+ return;
+ }
+ data |= PHY_CONFIG_GAP_COUNT(gap_count);
mutex_lock(&phy_config_mutex);
@@ -494,9 +522,9 @@ static bool is_in_fcp_region(u64 offset, size_t length)
}
/**
- * fw_core_add_address_handler - register for incoming requests
- * @handler: callback
- * @region: region in the IEEE 1212 node space address range
+ * fw_core_add_address_handler() - register for incoming requests
+ * @handler: callback
+ * @region: region in the IEEE 1212 node space address range
*
* region->start, ->end, and handler->length have to be quadlet-aligned.
*
@@ -519,8 +547,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
int ret = -EBUSY;
if (region->start & 0xffff000000000003ULL ||
- region->end & 0xffff000000000003ULL ||
region->start >= region->end ||
+ region->end > 0x0001000000000000ULL ||
handler->length & 3 ||
handler->length == 0)
return -EINVAL;
@@ -551,7 +579,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
EXPORT_SYMBOL(fw_core_add_address_handler);
/**
- * fw_core_remove_address_handler - unregister an address handler
+ * fw_core_remove_address_handler() - unregister an address handler
*/
void fw_core_remove_address_handler(struct fw_address_handler *handler)
{
@@ -580,6 +608,41 @@ static void free_response_callback(struct fw_packet *packet,
kfree(request);
}
+int fw_get_response_length(struct fw_request *r)
+{
+ int tcode, ext_tcode, data_length;
+
+ tcode = HEADER_GET_TCODE(r->request_header[0]);
+
+ switch (tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ case TCODE_WRITE_BLOCK_REQUEST:
+ return 0;
+
+ case TCODE_READ_QUADLET_REQUEST:
+ return 4;
+
+ case TCODE_READ_BLOCK_REQUEST:
+ data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
+ return data_length;
+
+ case TCODE_LOCK_REQUEST:
+ ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]);
+ data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
+ switch (ext_tcode) {
+ case EXTCODE_FETCH_ADD:
+ case EXTCODE_LITTLE_ADD:
+ return data_length;
+ default:
+ return data_length / 2;
+ }
+
+ default:
+ WARN(1, "wrong tcode %d", tcode);
+ return 0;
+ }
+}
+
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length)
{
@@ -631,18 +694,35 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header,
break;
default:
- WARN(1, KERN_ERR "wrong tcode %d", tcode);
+ WARN(1, "wrong tcode %d", tcode);
}
response->payload_mapped = false;
}
EXPORT_SYMBOL(fw_fill_response);
-static struct fw_request *allocate_request(struct fw_packet *p)
+static u32 compute_split_timeout_timestamp(struct fw_card *card,
+ u32 request_timestamp)
+{
+ unsigned int cycles;
+ u32 timestamp;
+
+ cycles = card->split_timeout_cycles;
+ cycles += request_timestamp & 0x1fff;
+
+ timestamp = request_timestamp & ~0x1fff;
+ timestamp += (cycles / 8000) << 13;
+ timestamp |= cycles % 8000;
+
+ return timestamp;
+}
+
+static struct fw_request *allocate_request(struct fw_card *card,
+ struct fw_packet *p)
{
struct fw_request *request;
u32 *data, length;
- int request_tcode, t;
+ int request_tcode;
request_tcode = HEADER_GET_TCODE(p->header[0]);
switch (request_tcode) {
@@ -677,14 +757,9 @@ static struct fw_request *allocate_request(struct fw_packet *p)
if (request == NULL)
return NULL;
- t = (p->timestamp & 0x1fff) + 4000;
- if (t >= 8000)
- t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
- else
- t = (p->timestamp & ~0x1fff) + t;
-
request->response.speed = p->speed;
- request->response.timestamp = t;
+ request->response.timestamp =
+ compute_split_timeout_timestamp(card, p->timestamp);
request->response.generation = p->generation;
request->response.ack = 0;
request->response.callback = free_response_callback;
@@ -713,7 +788,8 @@ void fw_send_response(struct fw_card *card,
if (rcode == RCODE_COMPLETE)
fw_fill_response(&request->response, request->request_header,
- rcode, request->data, request->length);
+ rcode, request->data,
+ fw_get_response_length(request));
else
fw_fill_response(&request->response, request->request_header,
rcode, NULL, 0);
@@ -731,9 +807,11 @@ static void handle_exclusive_region_request(struct fw_card *card,
unsigned long flags;
int tcode, destination, source;
- tcode = HEADER_GET_TCODE(p->header[0]);
destination = HEADER_GET_DESTINATION(p->header[0]);
source = HEADER_GET_SOURCE(p->header[1]);
+ tcode = HEADER_GET_TCODE(p->header[0]);
+ if (tcode == TCODE_LOCK_REQUEST)
+ tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]);
spin_lock_irqsave(&address_handler_lock, flags);
handler = lookup_enclosing_address_handler(&address_handler_list,
@@ -753,7 +831,7 @@ static void handle_exclusive_region_request(struct fw_card *card,
else
handler->address_callback(card, request,
tcode, destination, source,
- p->generation, p->speed, offset,
+ p->generation, offset,
request->data, request->length,
handler->callback_data);
}
@@ -791,8 +869,8 @@ static void handle_fcp_region_request(struct fw_card *card,
if (is_enclosing_handler(handler, offset, request->length))
handler->address_callback(card, NULL, tcode,
destination, source,
- p->generation, p->speed,
- offset, request->data,
+ p->generation, offset,
+ request->data,
request->length,
handler->callback_data);
}
@@ -809,7 +887,12 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
return;
- request = allocate_request(p);
+ if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) {
+ fw_cdev_handle_phy_packet(card, p);
+ return;
+ }
+
+ request = allocate_request(card, p);
if (request == NULL) {
/* FIXME: send statically allocated busy packet. */
return;
@@ -832,17 +915,20 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
unsigned long flags;
u32 *data;
size_t data_length;
- int tcode, tlabel, destination, source, rcode;
+ int tcode, tlabel, source, rcode;
- tcode = HEADER_GET_TCODE(p->header[0]);
- tlabel = HEADER_GET_TLABEL(p->header[0]);
- destination = HEADER_GET_DESTINATION(p->header[0]);
- source = HEADER_GET_SOURCE(p->header[1]);
- rcode = HEADER_GET_RCODE(p->header[1]);
+ tcode = HEADER_GET_TCODE(p->header[0]);
+ tlabel = HEADER_GET_TLABEL(p->header[0]);
+ source = HEADER_GET_SOURCE(p->header[1]);
+ rcode = HEADER_GET_RCODE(p->header[1]);
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t->node_id == source && t->tlabel == tlabel) {
+ if (!del_timer(&t->split_timeout_timer)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ goto timed_out;
+ }
list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
@@ -851,6 +937,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link == &card->transaction_list) {
+ timed_out:
fw_notify("Unsolicited response (source %x, tlabel %x)\n",
source, tlabel);
return;
@@ -885,8 +972,6 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
- del_timer_sync(&t->split_timeout_timer);
-
/*
* The response handler may be executed while the request handler
* is still pending. Cancel the request handler.
@@ -903,8 +988,8 @@ static const struct fw_address_region topology_map_region =
static void handle_topology_map(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
- int speed, unsigned long long offset,
- void *payload, size_t length, void *callback_data)
+ unsigned long long offset, void *payload, size_t length,
+ void *callback_data)
{
int start;
@@ -933,19 +1018,97 @@ static const struct fw_address_region registers_region =
{ .start = CSR_REGISTER_BASE,
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
+static void update_split_timeout(struct fw_card *card)
+{
+ unsigned int cycles;
+
+ cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
+
+ cycles = max(cycles, 800u); /* minimum as per the spec */
+ cycles = min(cycles, 3u * 8000u); /* maximum OHCI timeout */
+
+ card->split_timeout_cycles = cycles;
+ card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
+}
+
static void handle_registers(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
- int speed, unsigned long long offset,
- void *payload, size_t length, void *callback_data)
+ unsigned long long offset, void *payload, size_t length,
+ void *callback_data)
{
int reg = offset & ~CSR_REGISTER_BASE;
__be32 *data = payload;
int rcode = RCODE_COMPLETE;
+ unsigned long flags;
switch (reg) {
+ case CSR_PRIORITY_BUDGET:
+ if (!card->priority_budget_implemented) {
+ rcode = RCODE_ADDRESS_ERROR;
+ break;
+ }
+ /* else fall through */
+
+ case CSR_NODE_IDS:
+ /*
+ * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8
+ * and 9.6, but interoperable with IEEE 1394.1-2004 bridges
+ */
+ /* fall through */
+
+ case CSR_STATE_CLEAR:
+ case CSR_STATE_SET:
case CSR_CYCLE_TIME:
- if (TCODE_IS_READ_REQUEST(tcode) && length == 4)
- *data = cpu_to_be32(card->driver->get_cycle_time(card));
+ case CSR_BUS_TIME:
+ case CSR_BUSY_TIMEOUT:
+ if (tcode == TCODE_READ_QUADLET_REQUEST)
+ *data = cpu_to_be32(card->driver->read_csr(card, reg));
+ else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
+ card->driver->write_csr(card, reg, be32_to_cpu(*data));
+ else
+ rcode = RCODE_TYPE_ERROR;
+ break;
+
+ case CSR_RESET_START:
+ if (tcode == TCODE_WRITE_QUADLET_REQUEST)
+ card->driver->write_csr(card, CSR_STATE_CLEAR,
+ CSR_STATE_BIT_ABDICATE);
+ else
+ rcode = RCODE_TYPE_ERROR;
+ break;
+
+ case CSR_SPLIT_TIMEOUT_HI:
+ if (tcode == TCODE_READ_QUADLET_REQUEST) {
+ *data = cpu_to_be32(card->split_timeout_hi);
+ } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
+ spin_lock_irqsave(&card->lock, flags);
+ card->split_timeout_hi = be32_to_cpu(*data) & 7;
+ update_split_timeout(card);
+ spin_unlock_irqrestore(&card->lock, flags);
+ } else {
+ rcode = RCODE_TYPE_ERROR;
+ }
+ break;
+
+ case CSR_SPLIT_TIMEOUT_LO:
+ if (tcode == TCODE_READ_QUADLET_REQUEST) {
+ *data = cpu_to_be32(card->split_timeout_lo);
+ } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
+ spin_lock_irqsave(&card->lock, flags);
+ card->split_timeout_lo =
+ be32_to_cpu(*data) & 0xfff80000;
+ update_split_timeout(card);
+ spin_unlock_irqrestore(&card->lock, flags);
+ } else {
+ rcode = RCODE_TYPE_ERROR;
+ }
+ break;
+
+ case CSR_MAINT_UTILITY:
+ if (tcode == TCODE_READ_QUADLET_REQUEST)
+ *data = card->maint_utility_register;
+ else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
+ card->maint_utility_register = *data;
else
rcode = RCODE_TYPE_ERROR;
break;
@@ -975,12 +1138,6 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
BUG();
break;
- case CSR_BUSY_TIMEOUT:
- /* FIXME: Implement this. */
-
- case CSR_BUS_TIME:
- /* Useless without initialization by the bus manager. */
-
default:
rcode = RCODE_ADDRESS_ERROR;
break;
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0ecfcd95f4c..e6239f971be 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -38,6 +38,9 @@ struct fw_packet;
#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
#define BROADCAST_CHANNEL_VALID (1 << 30)
+#define CSR_STATE_BIT_CMSTR (1 << 8)
+#define CSR_STATE_BIT_ABDICATE (1 << 10)
+
struct fw_card_driver {
/*
* Enable the given card with the given initial config rom.
@@ -48,6 +51,7 @@ struct fw_card_driver {
int (*enable)(struct fw_card *card,
const __be32 *config_rom, size_t length);
+ int (*read_phy_reg)(struct fw_card *card, int address);
int (*update_phy_reg)(struct fw_card *card, int address,
int clear_bits, int set_bits);
@@ -75,7 +79,8 @@ struct fw_card_driver {
int (*enable_phys_dma)(struct fw_card *card,
int node_id, int generation);
- u32 (*get_cycle_time)(struct fw_card *card);
+ u32 (*read_csr)(struct fw_card *card, int csr_offset);
+ void (*write_csr)(struct fw_card *card, int csr_offset, u32 value);
struct fw_iso_context *
(*allocate_iso_context)(struct fw_card *card,
@@ -85,6 +90,8 @@ struct fw_card_driver {
int (*start_iso)(struct fw_iso_context *ctx,
s32 cycle, u32 sync, u32 tags);
+ int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels);
+
int (*queue_iso)(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
@@ -98,8 +105,8 @@ void fw_card_initialize(struct fw_card *card,
int fw_card_add(struct fw_card *card,
u32 max_receive, u32 link_speed, u64 guid);
void fw_core_remove_card(struct fw_card *card);
-int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
int fw_compute_block_crc(__be32 *block);
+void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset);
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
static inline struct fw_card *fw_card_get(struct fw_card *card)
@@ -123,6 +130,7 @@ extern const struct file_operations fw_device_ops;
void fw_device_cdev_update(struct fw_device *device);
void fw_device_cdev_remove(struct fw_device *device);
+void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
/* -device */
@@ -192,7 +200,7 @@ static inline void fw_node_put(struct fw_node *node)
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
- int generation, int self_id_count, u32 *self_ids);
+ int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
void fw_destroy_nodes(struct fw_card *card);
/*
@@ -209,6 +217,7 @@ static inline bool is_next_generation(int new_generation, int old_generation)
#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
+#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == 0xe)
#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
@@ -218,9 +227,18 @@ static inline bool is_next_generation(int new_generation, int old_generation)
void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
+int fw_get_response_length(struct fw_request *request);
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length);
+
+#define FW_PHY_CONFIG_NO_NODE_ID -1
+#define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count);
+static inline bool is_ping_packet(u32 *data)
+{
+ return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1];
+}
+
#endif /* _FIREWIRE_CORE_H */
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 7142eeec807..18fdd9703b4 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -8,7 +8,6 @@
#include <linux/bug.h>
#include <linux/device.h>
-#include <linux/ethtool.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/highmem.h>
@@ -579,7 +578,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
if (!peer) {
fw_notify("No peer for ARP packet from %016llx\n",
(unsigned long long)peer_guid);
- goto failed_proto;
+ goto no_peer;
}
/*
@@ -656,7 +655,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
return 0;
- failed_proto:
+ no_peer:
net->stats.rx_errors++;
net->stats.rx_dropped++;
@@ -664,7 +663,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
if (netif_queue_stopped(net))
netif_wake_queue(net);
- return 0;
+ return -ENOENT;
}
static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
@@ -701,7 +700,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
fw_error("out of memory\n");
net->stats.rx_dropped++;
- return -1;
+ return -ENOMEM;
}
skb_reserve(skb, (net->hard_header_len + 15) & ~15);
memcpy(skb_put(skb, len), buf, len);
@@ -726,8 +725,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
spin_lock_irqsave(&dev->lock, flags);
peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
- if (!peer)
- goto bad_proto;
+ if (!peer) {
+ retval = -ENOENT;
+ goto fail;
+ }
pd = fwnet_pd_find(peer, datagram_label);
if (pd == NULL) {
@@ -741,7 +742,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
dg_size, buf, fg_off, len);
if (pd == NULL) {
retval = -ENOMEM;
- goto bad_proto;
+ goto fail;
}
peer->pdg_size++;
} else {
@@ -755,9 +756,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
pd = fwnet_pd_new(net, peer, datagram_label,
dg_size, buf, fg_off, len);
if (pd == NULL) {
- retval = -ENOMEM;
peer->pdg_size--;
- goto bad_proto;
+ retval = -ENOMEM;
+ goto fail;
}
} else {
if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
@@ -768,7 +769,8 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
*/
fwnet_pd_delete(pd);
peer->pdg_size--;
- goto bad_proto;
+ retval = -ENOMEM;
+ goto fail;
}
}
} /* new datagram or add to existing one */
@@ -794,20 +796,19 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
-
- bad_proto:
+ fail:
spin_unlock_irqrestore(&dev->lock, flags);
if (netif_queue_stopped(net))
netif_wake_queue(net);
- return 0;
+ return retval;
}
static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
int tcode, int destination, int source, int generation,
- int speed, unsigned long long offset, void *payload,
- size_t length, void *callback_data)
+ unsigned long long offset, void *payload, size_t length,
+ void *callback_data)
{
struct fwnet_device *dev = callback_data;
int rcode;
@@ -1359,17 +1360,6 @@ static int fwnet_change_mtu(struct net_device *net, int new_mtu)
return 0;
}
-static void fwnet_get_drvinfo(struct net_device *net,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->bus_info, "ieee1394");
-}
-
-static const struct ethtool_ops fwnet_ethtool_ops = {
- .get_drvinfo = fwnet_get_drvinfo,
-};
-
static const struct net_device_ops fwnet_netdev_ops = {
.ndo_open = fwnet_open,
.ndo_stop = fwnet_stop,
@@ -1388,7 +1378,6 @@ static void fwnet_init_dev(struct net_device *net)
net->hard_header_len = FWNET_HLEN;
net->type = ARPHRD_IEEE1394;
net->tx_queue_len = 10;
- SET_ETHTOOL_OPS(net, &fwnet_ethtool_ops);
}
/* caller must hold fwnet_device_mutex */
diff --git a/drivers/firewire/nosy-user.h b/drivers/firewire/nosy-user.h
new file mode 100644
index 00000000000..e48aa6200c7
--- /dev/null
+++ b/drivers/firewire/nosy-user.h
@@ -0,0 +1,25 @@
+#ifndef __nosy_user_h
+#define __nosy_user_h
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define NOSY_IOC_GET_STATS _IOR('&', 0, struct nosy_stats)
+#define NOSY_IOC_START _IO('&', 1)
+#define NOSY_IOC_STOP _IO('&', 2)
+#define NOSY_IOC_FILTER _IOW('&', 2, __u32)
+
+struct nosy_stats {
+ __u32 total_packet_count;
+ __u32 lost_packet_count;
+};
+
+/*
+ * Format of packets returned from the kernel driver:
+ *
+ * quadlet with timestamp (microseconds, CPU endian)
+ * quadlet-padded packet data... (little endian)
+ * quadlet with ack (little endian)
+ */
+
+#endif /* __nosy_user_h */
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
new file mode 100644
index 00000000000..8528b10763e
--- /dev/null
+++ b/drivers/firewire/nosy.c
@@ -0,0 +1,721 @@
+/*
+ * nosy - Snoop mode driver for TI PCILynx 1394 controllers
+ * Copyright (C) 2002-2007 Kristian Høgsberg
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/sched.h> /* required for linux/wait.h */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timex.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+
+#include "nosy.h"
+#include "nosy-user.h"
+
+#define TCODE_PHY_PACKET 0x10
+#define PCI_DEVICE_ID_TI_PCILYNX 0x8000
+
+static char driver_name[] = KBUILD_MODNAME;
+
+/* this is the physical layout of a PCL, its size is 128 bytes */
+struct pcl {
+ __le32 next;
+ __le32 async_error_next;
+ u32 user_data;
+ __le32 pcl_status;
+ __le32 remaining_transfer_count;
+ __le32 next_data_buffer;
+ struct {
+ __le32 control;
+ __le32 pointer;
+ } buffer[13];
+};
+
+struct packet {
+ unsigned int length;
+ char data[0];
+};
+
+struct packet_buffer {
+ char *data;
+ size_t capacity;
+ long total_packet_count, lost_packet_count;
+ atomic_t size;
+ struct packet *head, *tail;
+ wait_queue_head_t wait;
+};
+
+struct pcilynx {
+ struct pci_dev *pci_device;
+ __iomem char *registers;
+
+ struct pcl *rcv_start_pcl, *rcv_pcl;
+ __le32 *rcv_buffer;
+
+ dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus;
+
+ spinlock_t client_list_lock;
+ struct list_head client_list;
+
+ struct miscdevice misc;
+ struct list_head link;
+ struct kref kref;
+};
+
+static inline struct pcilynx *
+lynx_get(struct pcilynx *lynx)
+{
+ kref_get(&lynx->kref);
+
+ return lynx;
+}
+
+static void
+lynx_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct pcilynx, kref));
+}
+
+static inline void
+lynx_put(struct pcilynx *lynx)
+{
+ kref_put(&lynx->kref, lynx_release);
+}
+
+struct client {
+ struct pcilynx *lynx;
+ u32 tcode_mask;
+ struct packet_buffer buffer;
+ struct list_head link;
+};
+
+static DEFINE_MUTEX(card_mutex);
+static LIST_HEAD(card_list);
+
+static int
+packet_buffer_init(struct packet_buffer *buffer, size_t capacity)
+{
+ buffer->data = kmalloc(capacity, GFP_KERNEL);
+ if (buffer->data == NULL)
+ return -ENOMEM;
+ buffer->head = (struct packet *) buffer->data;
+ buffer->tail = (struct packet *) buffer->data;
+ buffer->capacity = capacity;
+ buffer->lost_packet_count = 0;
+ atomic_set(&buffer->size, 0);
+ init_waitqueue_head(&buffer->wait);
+
+ return 0;
+}
+
+static void
+packet_buffer_destroy(struct packet_buffer *buffer)
+{
+ kfree(buffer->data);
+}
+
+static int
+packet_buffer_get(struct client *client, char __user *data, size_t user_length)
+{
+ struct packet_buffer *buffer = &client->buffer;
+ size_t length;
+ char *end;
+
+ if (wait_event_interruptible(buffer->wait,
+ atomic_read(&buffer->size) > 0) ||
+ list_empty(&client->lynx->link))
+ return -ERESTARTSYS;
+
+ if (atomic_read(&buffer->size) == 0)
+ return -ENODEV;
+
+ /* FIXME: Check length <= user_length. */
+
+ end = buffer->data + buffer->capacity;
+ length = buffer->head->length;
+
+ if (&buffer->head->data[length] < end) {
+ if (copy_to_user(data, buffer->head->data, length))
+ return -EFAULT;
+ buffer->head = (struct packet *) &buffer->head->data[length];
+ } else {
+ size_t split = end - buffer->head->data;
+
+ if (copy_to_user(data, buffer->head->data, split))
+ return -EFAULT;
+ if (copy_to_user(data + split, buffer->data, length - split))
+ return -EFAULT;
+ buffer->head = (struct packet *) &buffer->data[length - split];
+ }
+
+ /*
+ * Decrease buffer->size as the last thing, since this is what
+ * keeps the interrupt from overwriting the packet we are
+ * retrieving from the buffer.
+ */
+ atomic_sub(sizeof(struct packet) + length, &buffer->size);
+
+ return length;
+}
+
+static void
+packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length)
+{
+ char *end;
+
+ buffer->total_packet_count++;
+
+ if (buffer->capacity <
+ atomic_read(&buffer->size) + sizeof(struct packet) + length) {
+ buffer->lost_packet_count++;
+ return;
+ }
+
+ end = buffer->data + buffer->capacity;
+ buffer->tail->length = length;
+
+ if (&buffer->tail->data[length] < end) {
+ memcpy(buffer->tail->data, data, length);
+ buffer->tail = (struct packet *) &buffer->tail->data[length];
+ } else {
+ size_t split = end - buffer->tail->data;
+
+ memcpy(buffer->tail->data, data, split);
+ memcpy(buffer->data, data + split, length - split);
+ buffer->tail = (struct packet *) &buffer->data[length - split];
+ }
+
+ /* Finally, adjust buffer size and wake up userspace reader. */
+
+ atomic_add(sizeof(struct packet) + length, &buffer->size);
+ wake_up_interruptible(&buffer->wait);
+}
+
+static inline void
+reg_write(struct pcilynx *lynx, int offset, u32 data)
+{
+ writel(data, lynx->registers + offset);
+}
+
+static inline u32
+reg_read(struct pcilynx *lynx, int offset)
+{
+ return readl(lynx->registers + offset);
+}
+
+static inline void
+reg_set_bits(struct pcilynx *lynx, int offset, u32 mask)
+{
+ reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
+}
+
+/*
+ * Maybe the pcl programs could be set up to just append data instead
+ * of using a whole packet.
+ */
+static inline void
+run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus,
+ int dmachan)
+{
+ reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus);
+ reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
+ DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
+}
+
+static int
+set_phy_reg(struct pcilynx *lynx, int addr, int val)
+{
+ if (addr > 15) {
+ dev_err(&lynx->pci_device->dev,
+ "PHY register address %d out of range\n", addr);
+ return -1;
+ }
+ if (val > 0xff) {
+ dev_err(&lynx->pci_device->dev,
+ "PHY register value %d out of range\n", val);
+ return -1;
+ }
+ reg_write(lynx, LINK_PHY, LINK_PHY_WRITE |
+ LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val));
+
+ return 0;
+}
+
+static int
+nosy_open(struct inode *inode, struct file *file)
+{
+ int minor = iminor(inode);
+ struct client *client;
+ struct pcilynx *tmp, *lynx = NULL;
+
+ mutex_lock(&card_mutex);
+ list_for_each_entry(tmp, &card_list, link)
+ if (tmp->misc.minor == minor) {
+ lynx = lynx_get(tmp);
+ break;
+ }
+ mutex_unlock(&card_mutex);
+ if (lynx == NULL)
+ return -ENODEV;
+
+ client = kmalloc(sizeof *client, GFP_KERNEL);
+ if (client == NULL)
+ goto fail;
+
+ client->tcode_mask = ~0;
+ client->lynx = lynx;
+ INIT_LIST_HEAD(&client->link);
+
+ if (packet_buffer_init(&client->buffer, 128 * 1024) < 0)
+ goto fail;
+
+ file->private_data = client;
+
+ return 0;
+fail:
+ kfree(client);
+ lynx_put(lynx);
+
+ return -ENOMEM;
+}
+
+static int
+nosy_release(struct inode *inode, struct file *file)
+{
+ struct client *client = file->private_data;
+ struct pcilynx *lynx = client->lynx;
+
+ spin_lock_irq(&lynx->client_list_lock);
+ list_del_init(&client->link);
+ spin_unlock_irq(&lynx->client_list_lock);
+
+ packet_buffer_destroy(&client->buffer);
+ kfree(client);
+ lynx_put(lynx);
+
+ return 0;
+}
+
+static unsigned int
+nosy_poll(struct file *file, poll_table *pt)
+{
+ struct client *client = file->private_data;
+ unsigned int ret = 0;
+
+ poll_wait(file, &client->buffer.wait, pt);
+
+ if (atomic_read(&client->buffer.size) > 0)
+ ret = POLLIN | POLLRDNORM;
+
+ if (list_empty(&client->lynx->link))
+ ret |= POLLHUP;
+
+ return ret;
+}
+
+static ssize_t
+nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)
+{
+ struct client *client = file->private_data;
+
+ return packet_buffer_get(client, buffer, count);
+}
+
+static long
+nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct client *client = file->private_data;
+ spinlock_t *client_list_lock = &client->lynx->client_list_lock;
+ struct nosy_stats stats;
+
+ switch (cmd) {
+ case NOSY_IOC_GET_STATS:
+ spin_lock_irq(client_list_lock);
+ stats.total_packet_count = client->buffer.total_packet_count;
+ stats.lost_packet_count = client->buffer.lost_packet_count;
+ spin_unlock_irq(client_list_lock);
+
+ if (copy_to_user((void __user *) arg, &stats, sizeof stats))
+ return -EFAULT;
+ else
+ return 0;
+
+ case NOSY_IOC_START:
+ spin_lock_irq(client_list_lock);
+ list_add_tail(&client->link, &client->lynx->client_list);
+ spin_unlock_irq(client_list_lock);
+
+ return 0;
+
+ case NOSY_IOC_STOP:
+ spin_lock_irq(client_list_lock);
+ list_del_init(&client->link);
+ spin_unlock_irq(client_list_lock);
+
+ return 0;
+
+ case NOSY_IOC_FILTER:
+ spin_lock_irq(client_list_lock);
+ client->tcode_mask = arg;
+ spin_unlock_irq(client_list_lock);
+
+ return 0;
+
+ default:
+ return -EINVAL;
+ /* Flush buffer, configure filter. */
+ }
+}
+
+static const struct file_operations nosy_ops = {
+ .owner = THIS_MODULE,
+ .read = nosy_read,
+ .unlocked_ioctl = nosy_ioctl,
+ .poll = nosy_poll,
+ .open = nosy_open,
+ .release = nosy_release,
+};
+
+#define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
+
+static void
+packet_irq_handler(struct pcilynx *lynx)
+{
+ struct client *client;
+ u32 tcode_mask, tcode;
+ size_t length;
+ struct timeval tv;
+
+ /* FIXME: Also report rcv_speed. */
+
+ length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
+ tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
+
+ do_gettimeofday(&tv);
+ lynx->rcv_buffer[0] = (__force __le32)tv.tv_usec;
+
+ if (length == PHY_PACKET_SIZE)
+ tcode_mask = 1 << TCODE_PHY_PACKET;
+ else
+ tcode_mask = 1 << tcode;
+
+ spin_lock(&lynx->client_list_lock);
+
+ list_for_each_entry(client, &lynx->client_list, link)
+ if (client->tcode_mask & tcode_mask)
+ packet_buffer_put(&client->buffer,
+ lynx->rcv_buffer, length + 4);
+
+ spin_unlock(&lynx->client_list_lock);
+}
+
+static void
+bus_reset_irq_handler(struct pcilynx *lynx)
+{
+ struct client *client;
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+
+ spin_lock(&lynx->client_list_lock);
+
+ list_for_each_entry(client, &lynx->client_list, link)
+ packet_buffer_put(&client->buffer, &tv.tv_usec, 4);
+
+ spin_unlock(&lynx->client_list_lock);
+}
+
+static irqreturn_t
+irq_handler(int irq, void *device)
+{
+ struct pcilynx *lynx = device;
+ u32 pci_int_status;
+
+ pci_int_status = reg_read(lynx, PCI_INT_STATUS);
+
+ if (pci_int_status == ~0)
+ /* Card was ejected. */
+ return IRQ_NONE;
+
+ if ((pci_int_status & PCI_INT_INT_PEND) == 0)
+ /* Not our interrupt, bail out quickly. */
+ return IRQ_NONE;
+
+ if ((pci_int_status & PCI_INT_P1394_INT) != 0) {
+ u32 link_int_status;
+
+ link_int_status = reg_read(lynx, LINK_INT_STATUS);
+ reg_write(lynx, LINK_INT_STATUS, link_int_status);
+
+ if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0)
+ bus_reset_irq_handler(lynx);
+ }
+
+ /* Clear the PCI_INT_STATUS register only after clearing the
+ * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
+ * be set again immediately. */
+
+ reg_write(lynx, PCI_INT_STATUS, pci_int_status);
+
+ if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) {
+ packet_irq_handler(lynx);
+ run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void
+remove_card(struct pci_dev *dev)
+{
+ struct pcilynx *lynx = pci_get_drvdata(dev);
+ struct client *client;
+
+ mutex_lock(&card_mutex);
+ list_del_init(&lynx->link);
+ misc_deregister(&lynx->misc);
+ mutex_unlock(&card_mutex);
+
+ reg_write(lynx, PCI_INT_ENABLE, 0);
+ free_irq(lynx->pci_device->irq, lynx);
+
+ spin_lock_irq(&lynx->client_list_lock);
+ list_for_each_entry(client, &lynx->client_list, link)
+ wake_up_interruptible(&client->buffer.wait);
+ spin_unlock_irq(&lynx->client_list_lock);
+
+ pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
+ lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
+ pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
+ lynx->rcv_pcl, lynx->rcv_pcl_bus);
+ pci_free_consistent(lynx->pci_device, PAGE_SIZE,
+ lynx->rcv_buffer, lynx->rcv_buffer_bus);
+
+ iounmap(lynx->registers);
+ pci_disable_device(dev);
+ lynx_put(lynx);
+}
+
+#define RCV_BUFFER_SIZE (16 * 1024)
+
+static int __devinit
+add_card(struct pci_dev *dev, const struct pci_device_id *unused)
+{
+ struct pcilynx *lynx;
+ u32 p, end;
+ int ret, i;
+
+ if (pci_set_dma_mask(dev, 0xffffffff)) {
+ dev_err(&dev->dev,
+ "DMA address limits not supported for PCILynx hardware\n");
+ return -ENXIO;
+ }
+ if (pci_enable_device(dev)) {
+ dev_err(&dev->dev, "Failed to enable PCILynx hardware\n");
+ return -ENXIO;
+ }
+ pci_set_master(dev);
+
+ lynx = kzalloc(sizeof *lynx, GFP_KERNEL);
+ if (lynx == NULL) {
+ dev_err(&dev->dev, "Failed to allocate control structure\n");
+ ret = -ENOMEM;
+ goto fail_disable;
+ }
+ lynx->pci_device = dev;
+ pci_set_drvdata(dev, lynx);
+
+ spin_lock_init(&lynx->client_list_lock);
+ INIT_LIST_HEAD(&lynx->client_list);
+ kref_init(&lynx->kref);
+
+ lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
+ PCILYNX_MAX_REGISTER);
+
+ lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
+ sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
+ lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device,
+ sizeof(struct pcl), &lynx->rcv_pcl_bus);
+ lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device,
+ RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus);
+ if (lynx->rcv_start_pcl == NULL ||
+ lynx->rcv_pcl == NULL ||
+ lynx->rcv_buffer == NULL) {
+ dev_err(&dev->dev, "Failed to allocate receive buffer\n");
+ ret = -ENOMEM;
+ goto fail_deallocate;
+ }
+ lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
+ lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
+ lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID);
+
+ lynx->rcv_pcl->buffer[0].control =
+ cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044);
+ lynx->rcv_pcl->buffer[0].pointer =
+ cpu_to_le32(lynx->rcv_buffer_bus + 4);
+ p = lynx->rcv_buffer_bus + 2048;
+ end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE;
+ for (i = 1; p < end; i++, p += 2048) {
+ lynx->rcv_pcl->buffer[i].control =
+ cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048);
+ lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p);
+ }
+ lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF);
+
+ reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
+ /* Fix buggy cards with autoboot pin not tied low: */
+ reg_write(lynx, DMA0_CHAN_CTRL, 0);
+ reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24);
+
+#if 0
+ /* now, looking for PHY register set */
+ if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
+ lynx->phyic.reg_1394a = 1;
+ PRINT(KERN_INFO, lynx->id,
+ "found 1394a conform PHY (using extended register set)");
+ lynx->phyic.vendor = get_phy_vendorid(lynx);
+ lynx->phyic.product = get_phy_productid(lynx);
+ } else {
+ lynx->phyic.reg_1394a = 0;
+ PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
+ }
+#endif
+
+ /* Setup the general receive FIFO max size. */
+ reg_write(lynx, FIFO_SIZES, 255);
+
+ reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
+
+ reg_write(lynx, LINK_INT_ENABLE,
+ LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD |
+ LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK |
+ LINK_INT_AT_STUCK | LINK_INT_SNTRJ |
+ LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW |
+ LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW);
+
+ /* Disable the L flag in self ID packets. */
+ set_phy_reg(lynx, 4, 0);
+
+ /* Put this baby into snoop mode */
+ reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE);
+
+ run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
+
+ if (request_irq(dev->irq, irq_handler, IRQF_SHARED,
+ driver_name, lynx)) {
+ dev_err(&dev->dev,
+ "Failed to allocate shared interrupt %d\n", dev->irq);
+ ret = -EIO;
+ goto fail_deallocate;
+ }
+
+ lynx->misc.parent = &dev->dev;
+ lynx->misc.minor = MISC_DYNAMIC_MINOR;
+ lynx->misc.name = "nosy";
+ lynx->misc.fops = &nosy_ops;
+
+ mutex_lock(&card_mutex);
+ ret = misc_register(&lynx->misc);
+ if (ret) {
+ dev_err(&dev->dev, "Failed to register misc char device\n");
+ mutex_unlock(&card_mutex);
+ goto fail_free_irq;
+ }
+ list_add_tail(&lynx->link, &card_list);
+ mutex_unlock(&card_mutex);
+
+ dev_info(&dev->dev,
+ "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq);
+
+ return 0;
+
+fail_free_irq:
+ reg_write(lynx, PCI_INT_ENABLE, 0);
+ free_irq(lynx->pci_device->irq, lynx);
+
+fail_deallocate:
+ if (lynx->rcv_start_pcl)
+ pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
+ lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
+ if (lynx->rcv_pcl)
+ pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
+ lynx->rcv_pcl, lynx->rcv_pcl_bus);
+ if (lynx->rcv_buffer)
+ pci_free_consistent(lynx->pci_device, PAGE_SIZE,
+ lynx->rcv_buffer, lynx->rcv_buffer_bus);
+ iounmap(lynx->registers);
+ kfree(lynx);
+
+fail_disable:
+ pci_disable_device(dev);
+
+ return ret;
+}
+
+static struct pci_device_id pci_table[] __devinitdata = {
+ {
+ .vendor = PCI_VENDOR_ID_TI,
+ .device = PCI_DEVICE_ID_TI_PCILYNX,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { } /* Terminating entry */
+};
+
+static struct pci_driver lynx_pci_driver = {
+ .name = driver_name,
+ .id_table = pci_table,
+ .probe = add_card,
+ .remove = remove_card,
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg");
+MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pci_table);
+
+static int __init nosy_init(void)
+{
+ return pci_register_driver(&lynx_pci_driver);
+}
+
+static void __exit nosy_cleanup(void)
+{
+ pci_unregister_driver(&lynx_pci_driver);
+
+ pr_info("Unloaded %s\n", driver_name);
+}
+
+module_init(nosy_init);
+module_exit(nosy_cleanup);
diff --git a/drivers/firewire/nosy.h b/drivers/firewire/nosy.h
new file mode 100644
index 00000000000..078ff27f475
--- /dev/null
+++ b/drivers/firewire/nosy.h
@@ -0,0 +1,237 @@
+/*
+ * Chip register definitions for PCILynx chipset. Based on pcilynx.h
+ * from the Linux 1394 drivers, but modified a bit so the names here
+ * match the specification exactly (even though they have weird names,
+ * like xxx_OVER_FLOW, or arbitrary abbreviations like SNTRJ for "sent
+ * reject" etc.)
+ */
+
+#define PCILYNX_MAX_REGISTER 0xfff
+#define PCILYNX_MAX_MEMORY 0xffff
+
+#define PCI_LATENCY_CACHELINE 0x0c
+
+#define MISC_CONTROL 0x40
+#define MISC_CONTROL_SWRESET (1<<0)
+
+#define SERIAL_EEPROM_CONTROL 0x44
+
+#define PCI_INT_STATUS 0x48
+#define PCI_INT_ENABLE 0x4c
+/* status and enable have identical bit numbers */
+#define PCI_INT_INT_PEND (1<<31)
+#define PCI_INT_FRC_INT (1<<30)
+#define PCI_INT_SLV_ADR_PERR (1<<28)
+#define PCI_INT_SLV_DAT_PERR (1<<27)
+#define PCI_INT_MST_DAT_PERR (1<<26)
+#define PCI_INT_MST_DEV_TO (1<<25)
+#define PCI_INT_INT_SLV_TO (1<<23)
+#define PCI_INT_AUX_TO (1<<18)
+#define PCI_INT_AUX_INT (1<<17)
+#define PCI_INT_P1394_INT (1<<16)
+#define PCI_INT_DMA4_PCL (1<<9)
+#define PCI_INT_DMA4_HLT (1<<8)
+#define PCI_INT_DMA3_PCL (1<<7)
+#define PCI_INT_DMA3_HLT (1<<6)
+#define PCI_INT_DMA2_PCL (1<<5)
+#define PCI_INT_DMA2_HLT (1<<4)
+#define PCI_INT_DMA1_PCL (1<<3)
+#define PCI_INT_DMA1_HLT (1<<2)
+#define PCI_INT_DMA0_PCL (1<<1)
+#define PCI_INT_DMA0_HLT (1<<0)
+/* all DMA interrupts combined: */
+#define PCI_INT_DMA_ALL 0x3ff
+
+#define PCI_INT_DMA_HLT(chan) (1 << (chan * 2))
+#define PCI_INT_DMA_PCL(chan) (1 << (chan * 2 + 1))
+
+#define LBUS_ADDR 0xb4
+#define LBUS_ADDR_SEL_RAM (0x0<<16)
+#define LBUS_ADDR_SEL_ROM (0x1<<16)
+#define LBUS_ADDR_SEL_AUX (0x2<<16)
+#define LBUS_ADDR_SEL_ZV (0x3<<16)
+
+#define GPIO_CTRL_A 0xb8
+#define GPIO_CTRL_B 0xbc
+#define GPIO_DATA_BASE 0xc0
+
+#define DMA_BREG(base, chan) (base + chan * 0x20)
+#define DMA_SREG(base, chan) (base + chan * 0x10)
+
+#define PCL_NEXT_INVALID (1<<0)
+
+/* transfer commands */
+#define PCL_CMD_RCV (0x1<<24)
+#define PCL_CMD_RCV_AND_UPDATE (0xa<<24)
+#define PCL_CMD_XMT (0x2<<24)
+#define PCL_CMD_UNFXMT (0xc<<24)
+#define PCL_CMD_PCI_TO_LBUS (0x8<<24)
+#define PCL_CMD_LBUS_TO_PCI (0x9<<24)
+
+/* aux commands */
+#define PCL_CMD_NOP (0x0<<24)
+#define PCL_CMD_LOAD (0x3<<24)
+#define PCL_CMD_STOREQ (0x4<<24)
+#define PCL_CMD_STORED (0xb<<24)
+#define PCL_CMD_STORE0 (0x5<<24)
+#define PCL_CMD_STORE1 (0x6<<24)
+#define PCL_CMD_COMPARE (0xe<<24)
+#define PCL_CMD_SWAP_COMPARE (0xf<<24)
+#define PCL_CMD_ADD (0xd<<24)
+#define PCL_CMD_BRANCH (0x7<<24)
+
+/* BRANCH condition codes */
+#define PCL_COND_DMARDY_SET (0x1<<20)
+#define PCL_COND_DMARDY_CLEAR (0x2<<20)
+
+#define PCL_GEN_INTR (1<<19)
+#define PCL_LAST_BUFF (1<<18)
+#define PCL_LAST_CMD (PCL_LAST_BUFF)
+#define PCL_WAITSTAT (1<<17)
+#define PCL_BIGENDIAN (1<<16)
+#define PCL_ISOMODE (1<<12)
+
+#define DMA0_PREV_PCL 0x100
+#define DMA1_PREV_PCL 0x120
+#define DMA2_PREV_PCL 0x140
+#define DMA3_PREV_PCL 0x160
+#define DMA4_PREV_PCL 0x180
+#define DMA_PREV_PCL(chan) (DMA_BREG(DMA0_PREV_PCL, chan))
+
+#define DMA0_CURRENT_PCL 0x104
+#define DMA1_CURRENT_PCL 0x124
+#define DMA2_CURRENT_PCL 0x144
+#define DMA3_CURRENT_PCL 0x164
+#define DMA4_CURRENT_PCL 0x184
+#define DMA_CURRENT_PCL(chan) (DMA_BREG(DMA0_CURRENT_PCL, chan))
+
+#define DMA0_CHAN_STAT 0x10c
+#define DMA1_CHAN_STAT 0x12c
+#define DMA2_CHAN_STAT 0x14c
+#define DMA3_CHAN_STAT 0x16c
+#define DMA4_CHAN_STAT 0x18c
+#define DMA_CHAN_STAT(chan) (DMA_BREG(DMA0_CHAN_STAT, chan))
+/* CHAN_STATUS registers share bits */
+#define DMA_CHAN_STAT_SELFID (1<<31)
+#define DMA_CHAN_STAT_ISOPKT (1<<30)
+#define DMA_CHAN_STAT_PCIERR (1<<29)
+#define DMA_CHAN_STAT_PKTERR (1<<28)
+#define DMA_CHAN_STAT_PKTCMPL (1<<27)
+#define DMA_CHAN_STAT_SPECIALACK (1<<14)
+
+#define DMA0_CHAN_CTRL 0x110
+#define DMA1_CHAN_CTRL 0x130
+#define DMA2_CHAN_CTRL 0x150
+#define DMA3_CHAN_CTRL 0x170
+#define DMA4_CHAN_CTRL 0x190
+#define DMA_CHAN_CTRL(chan) (DMA_BREG(DMA0_CHAN_CTRL, chan))
+/* CHAN_CTRL registers share bits */
+#define DMA_CHAN_CTRL_ENABLE (1<<31)
+#define DMA_CHAN_CTRL_BUSY (1<<30)
+#define DMA_CHAN_CTRL_LINK (1<<29)
+
+#define DMA0_READY 0x114
+#define DMA1_READY 0x134
+#define DMA2_READY 0x154
+#define DMA3_READY 0x174
+#define DMA4_READY 0x194
+#define DMA_READY(chan) (DMA_BREG(DMA0_READY, chan))
+
+#define DMA_GLOBAL_REGISTER 0x908
+
+#define FIFO_SIZES 0xa00
+
+#define FIFO_CONTROL 0xa10
+#define FIFO_CONTROL_GRF_FLUSH (1<<4)
+#define FIFO_CONTROL_ITF_FLUSH (1<<3)
+#define FIFO_CONTROL_ATF_FLUSH (1<<2)
+
+#define FIFO_XMIT_THRESHOLD 0xa14
+
+#define DMA0_WORD0_CMP_VALUE 0xb00
+#define DMA1_WORD0_CMP_VALUE 0xb10
+#define DMA2_WORD0_CMP_VALUE 0xb20
+#define DMA3_WORD0_CMP_VALUE 0xb30
+#define DMA4_WORD0_CMP_VALUE 0xb40
+#define DMA_WORD0_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD0_CMP_VALUE, chan))
+
+#define DMA0_WORD0_CMP_ENABLE 0xb04
+#define DMA1_WORD0_CMP_ENABLE 0xb14
+#define DMA2_WORD0_CMP_ENABLE 0xb24
+#define DMA3_WORD0_CMP_ENABLE 0xb34
+#define DMA4_WORD0_CMP_ENABLE 0xb44
+#define DMA_WORD0_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD0_CMP_ENABLE, chan))
+
+#define DMA0_WORD1_CMP_VALUE 0xb08
+#define DMA1_WORD1_CMP_VALUE 0xb18
+#define DMA2_WORD1_CMP_VALUE 0xb28
+#define DMA3_WORD1_CMP_VALUE 0xb38
+#define DMA4_WORD1_CMP_VALUE 0xb48
+#define DMA_WORD1_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD1_CMP_VALUE, chan))
+
+#define DMA0_WORD1_CMP_ENABLE 0xb0c
+#define DMA1_WORD1_CMP_ENABLE 0xb1c
+#define DMA2_WORD1_CMP_ENABLE 0xb2c
+#define DMA3_WORD1_CMP_ENABLE 0xb3c
+#define DMA4_WORD1_CMP_ENABLE 0xb4c
+#define DMA_WORD1_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD1_CMP_ENABLE, chan))
+/* word 1 compare enable flags */
+#define DMA_WORD1_CMP_MATCH_OTHERBUS (1<<15)
+#define DMA_WORD1_CMP_MATCH_BROADCAST (1<<14)
+#define DMA_WORD1_CMP_MATCH_BUS_BCAST (1<<13)
+#define DMA_WORD1_CMP_MATCH_LOCAL_NODE (1<<12)
+#define DMA_WORD1_CMP_MATCH_EXACT (1<<11)
+#define DMA_WORD1_CMP_ENABLE_SELF_ID (1<<10)
+#define DMA_WORD1_CMP_ENABLE_MASTER (1<<8)
+
+#define LINK_ID 0xf00
+#define LINK_ID_BUS(id) (id<<22)
+#define LINK_ID_NODE(id) (id<<16)
+
+#define LINK_CONTROL 0xf04
+#define LINK_CONTROL_BUSY (1<<29)
+#define LINK_CONTROL_TX_ISO_EN (1<<26)
+#define LINK_CONTROL_RX_ISO_EN (1<<25)
+#define LINK_CONTROL_TX_ASYNC_EN (1<<24)
+#define LINK_CONTROL_RX_ASYNC_EN (1<<23)
+#define LINK_CONTROL_RESET_TX (1<<21)
+#define LINK_CONTROL_RESET_RX (1<<20)
+#define LINK_CONTROL_CYCMASTER (1<<11)
+#define LINK_CONTROL_CYCSOURCE (1<<10)
+#define LINK_CONTROL_CYCTIMEREN (1<<9)
+#define LINK_CONTROL_RCV_CMP_VALID (1<<7)
+#define LINK_CONTROL_SNOOP_ENABLE (1<<6)
+
+#define CYCLE_TIMER 0xf08
+
+#define LINK_PHY 0xf0c
+#define LINK_PHY_READ (1<<31)
+#define LINK_PHY_WRITE (1<<30)
+#define LINK_PHY_ADDR(addr) (addr<<24)
+#define LINK_PHY_WDATA(data) (data<<16)
+#define LINK_PHY_RADDR(addr) (addr<<8)
+
+#define LINK_INT_STATUS 0xf14
+#define LINK_INT_ENABLE 0xf18
+/* status and enable have identical bit numbers */
+#define LINK_INT_LINK_INT (1<<31)
+#define LINK_INT_PHY_TIME_OUT (1<<30)
+#define LINK_INT_PHY_REG_RCVD (1<<29)
+#define LINK_INT_PHY_BUSRESET (1<<28)
+#define LINK_INT_TX_RDY (1<<26)
+#define LINK_INT_RX_DATA_RDY (1<<25)
+#define LINK_INT_IT_STUCK (1<<20)
+#define LINK_INT_AT_STUCK (1<<19)
+#define LINK_INT_SNTRJ (1<<17)
+#define LINK_INT_HDR_ERR (1<<16)
+#define LINK_INT_TC_ERR (1<<15)
+#define LINK_INT_CYC_SEC (1<<11)
+#define LINK_INT_CYC_STRT (1<<10)
+#define LINK_INT_CYC_DONE (1<<9)
+#define LINK_INT_CYC_PEND (1<<8)
+#define LINK_INT_CYC_LOST (1<<7)
+#define LINK_INT_CYC_ARB_FAILED (1<<6)
+#define LINK_INT_GRF_OVER_FLOW (1<<5)
+#define LINK_INT_ITF_UNDER_FLOW (1<<4)
+#define LINK_INT_ATF_UNDER_FLOW (1<<3)
+#define LINK_INT_IARB_FAILED (1<<0)
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9f627e758cf..1b05896648b 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -18,6 +18,7 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -32,11 +33,13 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/time.h>
#include <asm/byteorder.h>
#include <asm/page.h>
@@ -170,6 +173,10 @@ struct fw_ohci {
int generation;
int request_generation; /* for timestamping incoming requests */
unsigned quirks;
+ unsigned int pri_req_max;
+ u32 bus_time;
+ bool is_root;
+ bool csr_state_setclear_abdicate;
/*
* Spinlock for accessing fw_ohci data. Never call out of
@@ -177,16 +184,20 @@ struct fw_ohci {
*/
spinlock_t lock;
+ struct mutex phy_reg_mutex;
+
struct ar_context ar_request_ctx;
struct ar_context ar_response_ctx;
struct context at_request_ctx;
struct context at_response_ctx;
- u32 it_context_mask;
+ u32 it_context_mask; /* unoccupied IT contexts */
struct iso_context *it_context_list;
- u64 ir_context_channels;
- u32 ir_context_mask;
+ u64 ir_context_channels; /* unoccupied channels */
+ u32 ir_context_mask; /* unoccupied IR contexts */
struct iso_context *ir_context_list;
+ u64 mc_channels; /* channels in use by the multichannel IR context */
+ bool mc_allocated;
__be32 *config_rom;
dma_addr_t config_rom_bus;
@@ -231,12 +242,14 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
static char ohci_driver_name[] = KBUILD_MODNAME;
+#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
#define QUIRK_CYCLE_TIMER 1
#define QUIRK_RESET_PACKET 2
#define QUIRK_BE_HEADERS 4
#define QUIRK_NO_1394A 8
+#define QUIRK_NO_MSI 16
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
@@ -247,8 +260,10 @@ static const struct {
QUIRK_NO_1394A},
{PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
{PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
{PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
};
@@ -260,6 +275,7 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
+ ", disable MSI = " __stringify(QUIRK_NO_MSI)
")");
#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -288,7 +304,7 @@ static void log_irqs(u32 evt)
!(evt & OHCI1394_busReset))
return;
- fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
+ fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
evt & OHCI1394_selfIDComplete ? " selfID" : "",
evt & OHCI1394_RQPkt ? " AR_req" : "",
evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -298,6 +314,7 @@ static void log_irqs(u32 evt)
evt & OHCI1394_isochTx ? " IT" : "",
evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
+ evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
evt & OHCI1394_busReset ? " busReset" : "",
@@ -305,7 +322,8 @@ static void log_irqs(u32 evt)
OHCI1394_RSPkt | OHCI1394_reqTxComplete |
OHCI1394_respTxComplete | OHCI1394_isochRx |
OHCI1394_isochTx | OHCI1394_postedWriteErr |
- OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent |
+ OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
+ OHCI1394_cycleInconsistent |
OHCI1394_regAccessFail | OHCI1394_busReset)
? " ?" : "");
}
@@ -470,12 +488,17 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr)
int i;
reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl);
if (val & OHCI1394_PhyControl_ReadDone)
return OHCI1394_PhyControl_ReadData(val);
- msleep(1);
+ /*
+ * Try a few times without waiting. Sleeping is necessary
+ * only when the link/PHY interface is busy.
+ */
+ if (i >= 3)
+ msleep(1);
}
fw_error("failed to read phy reg\n");
@@ -488,25 +511,23 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
reg_write(ohci, OHCI1394_PhyControl,
OHCI1394_PhyControl_Write(addr, val));
- for (i = 0; i < 100; i++) {
+ for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl);
if (!(val & OHCI1394_PhyControl_WritePending))
return 0;
- msleep(1);
+ if (i >= 3)
+ msleep(1);
}
fw_error("failed to write phy reg\n");
return -EBUSY;
}
-static int ohci_update_phy_reg(struct fw_card *card, int addr,
- int clear_bits, int set_bits)
+static int update_phy_reg(struct fw_ohci *ohci, int addr,
+ int clear_bits, int set_bits)
{
- struct fw_ohci *ohci = fw_ohci(card);
- int ret;
-
- ret = read_phy_reg(ohci, addr);
+ int ret = read_phy_reg(ohci, addr);
if (ret < 0)
return ret;
@@ -524,13 +545,38 @@ static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
{
int ret;
- ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5);
+ ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
if (ret < 0)
return ret;
return read_phy_reg(ohci, addr);
}
+static int ohci_read_phy_reg(struct fw_card *card, int addr)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ int ret;
+
+ mutex_lock(&ohci->phy_reg_mutex);
+ ret = read_phy_reg(ohci, addr);
+ mutex_unlock(&ohci->phy_reg_mutex);
+
+ return ret;
+}
+
+static int ohci_update_phy_reg(struct fw_card *card, int addr,
+ int clear_bits, int set_bits)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ int ret;
+
+ mutex_lock(&ohci->phy_reg_mutex);
+ ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
+ mutex_unlock(&ohci->phy_reg_mutex);
+
+ return ret;
+}
+
static int ar_context_add_page(struct ar_context *ctx)
{
struct device *dev = ctx->ohci->card.device;
@@ -553,6 +599,7 @@ static int ar_context_add_page(struct ar_context *ctx)
ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
ab->descriptor.branch_address = 0;
+ wmb(); /* finish init of new descriptors before branch_address update */
ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
ctx->last_buffer->next = ab;
ctx->last_buffer = ab;
@@ -648,7 +695,15 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
log_ar_at_event('R', p.speed, p.header, evt);
/*
- * The OHCI bus reset handler synthesizes a phy packet with
+ * Several controllers, notably from NEC and VIA, forget to
+ * write ack_complete status at PHY packet reception.
+ */
+ if (evt == OHCI1394_evt_no_status &&
+ (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
+ p.ack = ACK_COMPLETE;
+
+ /*
+ * The OHCI bus reset handler synthesizes a PHY packet with
* the new generation number when a bus reset happens (see
* section 8.4.2.3). This helps us determine when a request
* was received and make sure we send the response in the same
@@ -940,6 +995,8 @@ static void context_append(struct context *ctx,
d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
desc->used += (z + extra) * sizeof(*d);
+
+ wmb(); /* finish init of new descriptors before branch_address update */
ctx->prev->branch_address = cpu_to_le32(d_bus | z);
ctx->prev = find_branch_descriptor(d, z);
@@ -1026,6 +1083,9 @@ static int at_context_queue_packet(struct context *ctx,
header[1] = cpu_to_le32(packet->header[0]);
header[2] = cpu_to_le32(packet->header[1]);
d[0].req_count = cpu_to_le16(12);
+
+ if (is_ping_packet(packet->header))
+ d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
break;
case 4:
@@ -1311,6 +1371,78 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
}
+static u32 cycle_timer_ticks(u32 cycle_timer)
+{
+ u32 ticks;
+
+ ticks = cycle_timer & 0xfff;
+ ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
+ ticks += (3072 * 8000) * (cycle_timer >> 25);
+
+ return ticks;
+}
+
+/*
+ * Some controllers exhibit one or more of the following bugs when updating the
+ * iso cycle timer register:
+ * - When the lowest six bits are wrapping around to zero, a read that happens
+ * at the same time will return garbage in the lowest ten bits.
+ * - When the cycleOffset field wraps around to zero, the cycleCount field is
+ * not incremented for about 60 ns.
+ * - Occasionally, the entire register reads zero.
+ *
+ * To catch these, we read the register three times and ensure that the
+ * difference between each two consecutive reads is approximately the same, i.e.
+ * less than twice the other. Furthermore, any negative difference indicates an
+ * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
+ * execute, so we have enough precision to compute the ratio of the differences.)
+ */
+static u32 get_cycle_time(struct fw_ohci *ohci)
+{
+ u32 c0, c1, c2;
+ u32 t0, t1, t2;
+ s32 diff01, diff12;
+ int i;
+
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+
+ if (ohci->quirks & QUIRK_CYCLE_TIMER) {
+ i = 0;
+ c1 = c2;
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ do {
+ c0 = c1;
+ c1 = c2;
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ t0 = cycle_timer_ticks(c0);
+ t1 = cycle_timer_ticks(c1);
+ t2 = cycle_timer_ticks(c2);
+ diff01 = t1 - t0;
+ diff12 = t2 - t1;
+ } while ((diff01 <= 0 || diff12 <= 0 ||
+ diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
+ && i++ < 20);
+ }
+
+ return c2;
+}
+
+/*
+ * This function has to be called at least every 64 seconds. The bus_time
+ * field stores not only the upper 25 bits of the BUS_TIME register but also
+ * the most significant bit of the cycle timer in bit 6 so that we can detect
+ * changes in this bit.
+ */
+static u32 update_bus_time(struct fw_ohci *ohci)
+{
+ u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
+
+ if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
+ ohci->bus_time += 0x40;
+
+ return ohci->bus_time | cycle_time_seconds;
+}
+
static void bus_reset_tasklet(unsigned long data)
{
struct fw_ohci *ohci = (struct fw_ohci *)data;
@@ -1319,6 +1451,7 @@ static void bus_reset_tasklet(unsigned long data)
unsigned long flags;
void *free_rom = NULL;
dma_addr_t free_rom_bus = 0;
+ bool is_new_root;
reg = reg_read(ohci, OHCI1394_NodeID);
if (!(reg & OHCI1394_NodeID_idValid)) {
@@ -1332,6 +1465,12 @@ static void bus_reset_tasklet(unsigned long data)
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
OHCI1394_NodeID_nodeNumber);
+ is_new_root = (reg & OHCI1394_NodeID_root) != 0;
+ if (!(ohci->is_root && is_new_root))
+ reg_write(ohci, OHCI1394_LinkControlSet,
+ OHCI1394_LinkControl_cycleMaster);
+ ohci->is_root = is_new_root;
+
reg = reg_read(ohci, OHCI1394_SelfIDCount);
if (reg & OHCI1394_SelfIDCount_selfIDError) {
fw_notify("inconsistent self IDs\n");
@@ -1439,7 +1578,9 @@ static void bus_reset_tasklet(unsigned long data)
self_id_count, ohci->self_id_buffer);
fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
- self_id_count, ohci->self_id_buffer);
+ self_id_count, ohci->self_id_buffer,
+ ohci->csr_state_setclear_abdicate);
+ ohci->csr_state_setclear_abdicate = false;
}
static irqreturn_t irq_handler(int irq, void *data)
@@ -1515,6 +1656,12 @@ static irqreturn_t irq_handler(int irq, void *data)
fw_notify("isochronous cycle inconsistent\n");
}
+ if (event & OHCI1394_cycle64Seconds) {
+ spin_lock(&ohci->lock);
+ update_bus_time(ohci);
+ spin_unlock(&ohci->lock);
+ }
+
return IRQ_HANDLED;
}
@@ -1577,7 +1724,7 @@ static int configure_1394a_enhancements(struct fw_ohci *ohci)
clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
set = 0;
}
- ret = ohci_update_phy_reg(&ohci->card, 5, clear, set);
+ ret = update_phy_reg(ohci, 5, clear, set);
if (ret < 0)
return ret;
@@ -1599,7 +1746,7 @@ static int ohci_enable(struct fw_card *card,
{
struct fw_ohci *ohci = fw_ohci(card);
struct pci_dev *dev = to_pci_dev(card->device);
- u32 lps;
+ u32 lps, seconds, version, irqs;
int i, ret;
if (software_reset(ohci)) {
@@ -1635,17 +1782,34 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_HCControl_noByteSwapData);
reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
- reg_write(ohci, OHCI1394_LinkControlClear,
- OHCI1394_LinkControl_rcvPhyPkt);
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_rcvSelfID |
+ OHCI1394_LinkControl_rcvPhyPkt |
OHCI1394_LinkControl_cycleTimerEnable |
OHCI1394_LinkControl_cycleMaster);
reg_write(ohci, OHCI1394_ATRetries,
OHCI1394_MAX_AT_REQ_RETRIES |
(OHCI1394_MAX_AT_RESP_RETRIES << 4) |
- (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
+ (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
+ (200 << 16));
+
+ seconds = lower_32_bits(get_seconds());
+ reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25);
+ ohci->bus_time = seconds & ~0x3f;
+
+ version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
+ if (version >= OHCI_VERSION_1_1) {
+ reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
+ 0xfffffffe);
+ card->broadcast_channel_auto_allocated = true;
+ }
+
+ /* Get implemented bits of the priority arbitration request counter. */
+ reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
+ ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
+ reg_write(ohci, OHCI1394_FairnessControl, 0);
+ card->priority_budget_implemented = ohci->pri_req_max != 0;
ar_context_run(&ohci->ar_request_ctx);
ar_context_run(&ohci->ar_response_ctx);
@@ -1653,16 +1817,6 @@ static int ohci_enable(struct fw_card *card,
reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
reg_write(ohci, OHCI1394_IntEventClear, ~0);
reg_write(ohci, OHCI1394_IntMaskClear, ~0);
- reg_write(ohci, OHCI1394_IntMaskSet,
- OHCI1394_selfIDComplete |
- OHCI1394_RQPkt | OHCI1394_RSPkt |
- OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
- OHCI1394_isochRx | OHCI1394_isochTx |
- OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
- OHCI1394_cycleInconsistent | OHCI1394_regAccessFail |
- OHCI1394_masterIntEnable);
- if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
- reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
ret = configure_1394a_enhancements(ohci);
if (ret < 0)
@@ -1719,26 +1873,38 @@ static int ohci_enable(struct fw_card *card,
reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
+ if (!(ohci->quirks & QUIRK_NO_MSI))
+ pci_enable_msi(dev);
if (request_irq(dev->irq, irq_handler,
- IRQF_SHARED, ohci_driver_name, ohci)) {
- fw_error("Failed to allocate shared interrupt %d.\n",
- dev->irq);
+ pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
+ ohci_driver_name, ohci)) {
+ fw_error("Failed to allocate interrupt %d.\n", dev->irq);
+ pci_disable_msi(dev);
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
ohci->config_rom, ohci->config_rom_bus);
return -EIO;
}
+ irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
+ OHCI1394_RQPkt | OHCI1394_RSPkt |
+ OHCI1394_isochTx | OHCI1394_isochRx |
+ OHCI1394_postedWriteErr |
+ OHCI1394_selfIDComplete |
+ OHCI1394_regAccessFail |
+ OHCI1394_cycle64Seconds |
+ OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong |
+ OHCI1394_masterIntEnable;
+ if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
+ irqs |= OHCI1394_busReset;
+ reg_write(ohci, OHCI1394_IntMaskSet, irqs);
+
reg_write(ohci, OHCI1394_HCControlSet,
OHCI1394_HCControl_linkEnable |
OHCI1394_HCControl_BIBimageValid);
flush_writes(ohci);
- /*
- * We are ready to go, initiate bus reset to finish the
- * initialization.
- */
-
- fw_core_initiate_bus_reset(&ohci->card, 1);
+ /* We are ready to go, reset bus to finish initialization. */
+ fw_schedule_bus_reset(&ohci->card, false, true);
return 0;
}
@@ -1813,7 +1979,7 @@ static int ohci_set_config_rom(struct fw_card *card,
* takes effect.
*/
if (ret == 0)
- fw_core_initiate_bus_reset(&ohci->card, 1);
+ fw_schedule_bus_reset(&ohci->card, true, true);
else
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
next_config_rom, next_config_rom_bus);
@@ -1903,61 +2069,117 @@ static int ohci_enable_phys_dma(struct fw_card *card,
#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
}
-static u32 cycle_timer_ticks(u32 cycle_timer)
+static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
{
- u32 ticks;
+ struct fw_ohci *ohci = fw_ohci(card);
+ unsigned long flags;
+ u32 value;
+
+ switch (csr_offset) {
+ case CSR_STATE_CLEAR:
+ case CSR_STATE_SET:
+ if (ohci->is_root &&
+ (reg_read(ohci, OHCI1394_LinkControlSet) &
+ OHCI1394_LinkControl_cycleMaster))
+ value = CSR_STATE_BIT_CMSTR;
+ else
+ value = 0;
+ if (ohci->csr_state_setclear_abdicate)
+ value |= CSR_STATE_BIT_ABDICATE;
- ticks = cycle_timer & 0xfff;
- ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
- ticks += (3072 * 8000) * (cycle_timer >> 25);
+ return value;
- return ticks;
+ case CSR_NODE_IDS:
+ return reg_read(ohci, OHCI1394_NodeID) << 16;
+
+ case CSR_CYCLE_TIME:
+ return get_cycle_time(ohci);
+
+ case CSR_BUS_TIME:
+ /*
+ * We might be called just after the cycle timer has wrapped
+ * around but just before the cycle64Seconds handler, so we
+ * better check here, too, if the bus time needs to be updated.
+ */
+ spin_lock_irqsave(&ohci->lock, flags);
+ value = update_bus_time(ohci);
+ spin_unlock_irqrestore(&ohci->lock, flags);
+ return value;
+
+ case CSR_BUSY_TIMEOUT:
+ value = reg_read(ohci, OHCI1394_ATRetries);
+ return (value >> 4) & 0x0ffff00f;
+
+ case CSR_PRIORITY_BUDGET:
+ return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
+ (ohci->pri_req_max << 8);
+
+ default:
+ WARN_ON(1);
+ return 0;
+ }
}
-/*
- * Some controllers exhibit one or more of the following bugs when updating the
- * iso cycle timer register:
- * - When the lowest six bits are wrapping around to zero, a read that happens
- * at the same time will return garbage in the lowest ten bits.
- * - When the cycleOffset field wraps around to zero, the cycleCount field is
- * not incremented for about 60 ns.
- * - Occasionally, the entire register reads zero.
- *
- * To catch these, we read the register three times and ensure that the
- * difference between each two consecutive reads is approximately the same, i.e.
- * less than twice the other. Furthermore, any negative difference indicates an
- * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
- * execute, so we have enough precision to compute the ratio of the differences.)
- */
-static u32 ohci_get_cycle_time(struct fw_card *card)
+static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
{
struct fw_ohci *ohci = fw_ohci(card);
- u32 c0, c1, c2;
- u32 t0, t1, t2;
- s32 diff01, diff12;
- int i;
+ unsigned long flags;
- c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ switch (csr_offset) {
+ case CSR_STATE_CLEAR:
+ if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
+ reg_write(ohci, OHCI1394_LinkControlClear,
+ OHCI1394_LinkControl_cycleMaster);
+ flush_writes(ohci);
+ }
+ if (value & CSR_STATE_BIT_ABDICATE)
+ ohci->csr_state_setclear_abdicate = false;
+ break;
- if (ohci->quirks & QUIRK_CYCLE_TIMER) {
- i = 0;
- c1 = c2;
- c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- do {
- c0 = c1;
- c1 = c2;
- c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- t0 = cycle_timer_ticks(c0);
- t1 = cycle_timer_ticks(c1);
- t2 = cycle_timer_ticks(c2);
- diff01 = t1 - t0;
- diff12 = t2 - t1;
- } while ((diff01 <= 0 || diff12 <= 0 ||
- diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
- && i++ < 20);
- }
+ case CSR_STATE_SET:
+ if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
+ reg_write(ohci, OHCI1394_LinkControlSet,
+ OHCI1394_LinkControl_cycleMaster);
+ flush_writes(ohci);
+ }
+ if (value & CSR_STATE_BIT_ABDICATE)
+ ohci->csr_state_setclear_abdicate = true;
+ break;
- return c2;
+ case CSR_NODE_IDS:
+ reg_write(ohci, OHCI1394_NodeID, value >> 16);
+ flush_writes(ohci);
+ break;
+
+ case CSR_CYCLE_TIME:
+ reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
+ reg_write(ohci, OHCI1394_IntEventSet,
+ OHCI1394_cycleInconsistent);
+ flush_writes(ohci);
+ break;
+
+ case CSR_BUS_TIME:
+ spin_lock_irqsave(&ohci->lock, flags);
+ ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f);
+ spin_unlock_irqrestore(&ohci->lock, flags);
+ break;
+
+ case CSR_BUSY_TIMEOUT:
+ value = (value & 0xf) | ((value & 0xf) << 4) |
+ ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
+ reg_write(ohci, OHCI1394_ATRetries, value);
+ flush_writes(ohci);
+ break;
+
+ case CSR_PRIORITY_BUDGET:
+ reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
+ flush_writes(ohci);
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
}
static void copy_iso_headers(struct iso_context *ctx, void *p)
@@ -1992,10 +2214,9 @@ static int handle_ir_packet_per_buffer(struct context *context,
__le32 *ir_header;
void *p;
- for (pd = d; pd <= last; pd++) {
+ for (pd = d; pd <= last; pd++)
if (pd->transfer_status)
break;
- }
if (pd > last)
/* Descriptor(s) not done yet, stop iteration */
return 0;
@@ -2005,16 +2226,38 @@ static int handle_ir_packet_per_buffer(struct context *context,
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
ir_header = (__le32 *) p;
- ctx->base.callback(&ctx->base,
- le32_to_cpu(ir_header[0]) & 0xffff,
- ctx->header_length, ctx->header,
- ctx->base.callback_data);
+ ctx->base.callback.sc(&ctx->base,
+ le32_to_cpu(ir_header[0]) & 0xffff,
+ ctx->header_length, ctx->header,
+ ctx->base.callback_data);
ctx->header_length = 0;
}
return 1;
}
+/* d == last because each descriptor block is only a single descriptor. */
+static int handle_ir_buffer_fill(struct context *context,
+ struct descriptor *d,
+ struct descriptor *last)
+{
+ struct iso_context *ctx =
+ container_of(context, struct iso_context, context);
+
+ if (!last->transfer_status)
+ /* Descriptor(s) not done yet, stop iteration */
+ return 0;
+
+ if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
+ ctx->base.callback.mc(&ctx->base,
+ le32_to_cpu(last->data_address) +
+ le16_to_cpu(last->req_count) -
+ le16_to_cpu(last->res_count),
+ ctx->base.callback_data);
+
+ return 1;
+}
+
static int handle_it_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
@@ -2040,71 +2283,118 @@ static int handle_it_packet(struct context *context,
ctx->header_length += 4;
}
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
- ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
- ctx->header_length, ctx->header,
- ctx->base.callback_data);
+ ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count),
+ ctx->header_length, ctx->header,
+ ctx->base.callback_data);
ctx->header_length = 0;
}
return 1;
}
+static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
+{
+ u32 hi = channels >> 32, lo = channels;
+
+ reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
+ reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
+ reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
+ reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
+ mmiowb();
+ ohci->mc_channels = channels;
+}
+
static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
int type, int channel, size_t header_size)
{
struct fw_ohci *ohci = fw_ohci(card);
- struct iso_context *ctx, *list;
- descriptor_callback_t callback;
- u64 *channels, dont_care = ~0ULL;
- u32 *mask, regs;
+ struct iso_context *uninitialized_var(ctx);
+ descriptor_callback_t uninitialized_var(callback);
+ u64 *uninitialized_var(channels);
+ u32 *uninitialized_var(mask), uninitialized_var(regs);
unsigned long flags;
- int index, ret = -ENOMEM;
+ int index, ret = -EBUSY;
- if (type == FW_ISO_CONTEXT_TRANSMIT) {
- channels = &dont_care;
- mask = &ohci->it_context_mask;
- list = ohci->it_context_list;
+ spin_lock_irqsave(&ohci->lock, flags);
+
+ switch (type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
+ mask = &ohci->it_context_mask;
callback = handle_it_packet;
- } else {
+ index = ffs(*mask) - 1;
+ if (index >= 0) {
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoXmitContextBase(index);
+ ctx = &ohci->it_context_list[index];
+ }
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE:
channels = &ohci->ir_context_channels;
- mask = &ohci->ir_context_mask;
- list = ohci->ir_context_list;
+ mask = &ohci->ir_context_mask;
callback = handle_ir_packet_per_buffer;
- }
+ index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
+ if (index >= 0) {
+ *channels &= ~(1ULL << channel);
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoRcvContextBase(index);
+ ctx = &ohci->ir_context_list[index];
+ }
+ break;
- spin_lock_irqsave(&ohci->lock, flags);
- index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
- if (index >= 0) {
- *channels &= ~(1ULL << channel);
- *mask &= ~(1 << index);
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ mask = &ohci->ir_context_mask;
+ callback = handle_ir_buffer_fill;
+ index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
+ if (index >= 0) {
+ ohci->mc_allocated = true;
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoRcvContextBase(index);
+ ctx = &ohci->ir_context_list[index];
+ }
+ break;
+
+ default:
+ index = -1;
+ ret = -ENOSYS;
}
+
spin_unlock_irqrestore(&ohci->lock, flags);
if (index < 0)
- return ERR_PTR(-EBUSY);
-
- if (type == FW_ISO_CONTEXT_TRANSMIT)
- regs = OHCI1394_IsoXmitContextBase(index);
- else
- regs = OHCI1394_IsoRcvContextBase(index);
+ return ERR_PTR(ret);
- ctx = &list[index];
memset(ctx, 0, sizeof(*ctx));
ctx->header_length = 0;
ctx->header = (void *) __get_free_page(GFP_KERNEL);
- if (ctx->header == NULL)
+ if (ctx->header == NULL) {
+ ret = -ENOMEM;
goto out;
-
+ }
ret = context_init(&ctx->context, ohci, regs, callback);
if (ret < 0)
goto out_with_header;
+ if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+ set_multichannel_mask(ohci, 0);
+
return &ctx->base;
out_with_header:
free_page((unsigned long)ctx->header);
out:
spin_lock_irqsave(&ohci->lock, flags);
+
+ switch (type) {
+ case FW_ISO_CONTEXT_RECEIVE:
+ *channels |= 1ULL << channel;
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ ohci->mc_allocated = false;
+ break;
+ }
*mask |= 1 << index;
+
spin_unlock_irqrestore(&ohci->lock, flags);
return ERR_PTR(ret);
@@ -2115,10 +2405,11 @@ static int ohci_start_iso(struct fw_iso_context *base,
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
struct fw_ohci *ohci = ctx->context.ohci;
- u32 control, match;
+ u32 control = IR_CONTEXT_ISOCH_HEADER, match;
int index;
- if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+ switch (ctx->base.type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
index = ctx - ohci->it_context_list;
match = 0;
if (cycle >= 0)
@@ -2128,9 +2419,13 @@ static int ohci_start_iso(struct fw_iso_context *base,
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
context_run(&ctx->context, match);
- } else {
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
+ /* fall through */
+ case FW_ISO_CONTEXT_RECEIVE:
index = ctx - ohci->ir_context_list;
- control = IR_CONTEXT_ISOCH_HEADER;
match = (tags << 28) | (sync << 8) | ctx->base.channel;
if (cycle >= 0) {
match |= (cycle & 0x07fff) << 12;
@@ -2141,6 +2436,7 @@ static int ohci_start_iso(struct fw_iso_context *base,
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
context_run(&ctx->context, control);
+ break;
}
return 0;
@@ -2152,12 +2448,17 @@ static int ohci_stop_iso(struct fw_iso_context *base)
struct iso_context *ctx = container_of(base, struct iso_context, base);
int index;
- if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+ switch (ctx->base.type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
index = ctx - ohci->it_context_list;
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
- } else {
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE:
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
index = ctx - ohci->ir_context_list;
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
+ break;
}
flush_writes(ohci);
context_stop(&ctx->context);
@@ -2178,24 +2479,65 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
spin_lock_irqsave(&ohci->lock, flags);
- if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+ switch (base->type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
index = ctx - ohci->it_context_list;
ohci->it_context_mask |= 1 << index;
- } else {
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE:
index = ctx - ohci->ir_context_list;
ohci->ir_context_mask |= 1 << index;
ohci->ir_context_channels |= 1ULL << base->channel;
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ index = ctx - ohci->ir_context_list;
+ ohci->ir_context_mask |= 1 << index;
+ ohci->ir_context_channels |= ohci->mc_channels;
+ ohci->mc_channels = 0;
+ ohci->mc_allocated = false;
+ break;
}
spin_unlock_irqrestore(&ohci->lock, flags);
}
-static int ohci_queue_iso_transmit(struct fw_iso_context *base,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload)
+static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
+{
+ struct fw_ohci *ohci = fw_ohci(base->card);
+ unsigned long flags;
+ int ret;
+
+ switch (base->type) {
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+
+ spin_lock_irqsave(&ohci->lock, flags);
+
+ /* Don't allow multichannel to grab other contexts' channels. */
+ if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
+ *channels = ohci->ir_context_channels;
+ ret = -EBUSY;
+ } else {
+ set_multichannel_mask(ohci, *channels);
+ ret = 0;
+ }
+
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int queue_iso_transmit(struct iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
{
- struct iso_context *ctx = container_of(base, struct iso_context, base);
struct descriptor *d, *last, *pd;
struct fw_iso_packet *p;
__le32 *header;
@@ -2291,14 +2633,12 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
return 0;
}
-static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload)
+static int queue_iso_packet_per_buffer(struct iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
{
- struct iso_context *ctx = container_of(base, struct iso_context, base);
struct descriptor *d, *pd;
- struct fw_iso_packet *p = packet;
dma_addr_t d_bus, page_bus;
u32 z, header_z, rest;
int i, j, length;
@@ -2308,14 +2648,14 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
* The OHCI controller puts the isochronous header and trailer in the
* buffer, so we need at least 8 bytes.
*/
- packet_count = p->header_length / ctx->base.header_size;
+ packet_count = packet->header_length / ctx->base.header_size;
header_size = max(ctx->base.header_size, (size_t)8);
/* Get header size in number of descriptors. */
header_z = DIV_ROUND_UP(header_size, sizeof(*d));
page = payload >> PAGE_SHIFT;
offset = payload & ~PAGE_MASK;
- payload_per_buffer = p->payload_length / packet_count;
+ payload_per_buffer = packet->payload_length / packet_count;
for (i = 0; i < packet_count; i++) {
/* d points to the header descriptor */
@@ -2327,7 +2667,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
d->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_INPUT_MORE);
- if (p->skip && i == 0)
+ if (packet->skip && i == 0)
d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
d->req_count = cpu_to_le16(header_size);
d->res_count = d->req_count;
@@ -2360,7 +2700,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_INPUT_LAST |
DESCRIPTOR_BRANCH_ALWAYS);
- if (p->interrupt && i == packet_count - 1)
+ if (packet->interrupt && i == packet_count - 1)
pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
context_append(&ctx->context, d, z, header_z);
@@ -2369,6 +2709,58 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
return 0;
}
+static int queue_iso_buffer_fill(struct iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
+{
+ struct descriptor *d;
+ dma_addr_t d_bus, page_bus;
+ int page, offset, rest, z, i, length;
+
+ page = payload >> PAGE_SHIFT;
+ offset = payload & ~PAGE_MASK;
+ rest = packet->payload_length;
+
+ /* We need one descriptor for each page in the buffer. */
+ z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
+
+ if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
+ return -EFAULT;
+
+ for (i = 0; i < z; i++) {
+ d = context_get_descriptors(&ctx->context, 1, &d_bus);
+ if (d == NULL)
+ return -ENOMEM;
+
+ d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
+ DESCRIPTOR_BRANCH_ALWAYS);
+ if (packet->skip && i == 0)
+ d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
+ if (packet->interrupt && i == z - 1)
+ d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
+
+ if (offset + rest < PAGE_SIZE)
+ length = rest;
+ else
+ length = PAGE_SIZE - offset;
+ d->req_count = cpu_to_le16(length);
+ d->res_count = d->req_count;
+ d->transfer_status = 0;
+
+ page_bus = page_private(buffer->pages[page]);
+ d->data_address = cpu_to_le32(page_bus + offset);
+
+ rest -= length;
+ offset = 0;
+ page++;
+
+ context_append(&ctx->context, d, 1, 0);
+ }
+
+ return 0;
+}
+
static int ohci_queue_iso(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
@@ -2376,14 +2768,20 @@ static int ohci_queue_iso(struct fw_iso_context *base,
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
unsigned long flags;
- int ret;
+ int ret = -ENOSYS;
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
- if (base->type == FW_ISO_CONTEXT_TRANSMIT)
- ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
- else
- ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
- buffer, payload);
+ switch (base->type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
+ ret = queue_iso_transmit(ctx, packet, buffer, payload);
+ break;
+ case FW_ISO_CONTEXT_RECEIVE:
+ ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
+ break;
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
+ break;
+ }
spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
return ret;
@@ -2391,16 +2789,19 @@ static int ohci_queue_iso(struct fw_iso_context *base,
static const struct fw_card_driver ohci_driver = {
.enable = ohci_enable,
+ .read_phy_reg = ohci_read_phy_reg,
.update_phy_reg = ohci_update_phy_reg,
.set_config_rom = ohci_set_config_rom,
.send_request = ohci_send_request,
.send_response = ohci_send_response,
.cancel_packet = ohci_cancel_packet,
.enable_phys_dma = ohci_enable_phys_dma,
- .get_cycle_time = ohci_get_cycle_time,
+ .read_csr = ohci_read_csr,
+ .write_csr = ohci_write_csr,
.allocate_iso_context = ohci_allocate_iso_context,
.free_iso_context = ohci_free_iso_context,
+ .set_iso_channels = ohci_set_iso_channels,
.queue_iso = ohci_queue_iso,
.start_iso = ohci_start_iso,
.stop_iso = ohci_stop_iso,
@@ -2465,6 +2866,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
pci_set_drvdata(dev, ohci);
spin_lock_init(&ohci->lock);
+ mutex_init(&ohci->phy_reg_mutex);
tasklet_init(&ohci->bus_reset_tasklet,
bus_reset_tasklet, (unsigned long)ohci);
@@ -2625,6 +3027,7 @@ static void pci_remove(struct pci_dev *dev)
context_release(&ohci->at_response_ctx);
kfree(ohci->it_context_list);
kfree(ohci->ir_context_list);
+ pci_disable_msi(dev);
pci_iounmap(dev, ohci->registers);
pci_release_region(dev, 0);
pci_disable_device(dev);
@@ -2642,6 +3045,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state)
software_reset(ohci);
free_irq(dev->irq, ohci);
+ pci_disable_msi(dev);
err = pci_save_state(dev);
if (err) {
fw_error("pci_save_state failed\n");
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index 3bc9a5d744e..0e6c5a46690 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -60,6 +60,7 @@
#define OHCI1394_LinkControl_cycleSource (1 << 22)
#define OHCI1394_NodeID 0x0E8
#define OHCI1394_NodeID_idValid 0x80000000
+#define OHCI1394_NodeID_root 0x40000000
#define OHCI1394_NodeID_nodeNumber 0x0000003f
#define OHCI1394_NodeID_busNumber 0x0000ffc0
#define OHCI1394_PhyControl 0x0EC
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index ca264f2fdf0..bfae4b30979 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -410,8 +410,7 @@ static void free_orb(struct kref *kref)
static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source,
- int generation, int speed,
- unsigned long long offset,
+ int generation, unsigned long long offset,
void *payload, size_t length, void *callback_data)
{
struct sbp2_logical_unit *lu = callback_data;
@@ -451,7 +450,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
if (&orb->link != &lu->orb_list) {
orb->callback(orb, &status);
- kref_put(&orb->kref, free_orb);
+ kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
fw_error("status write for unknown orb\n");
}
@@ -473,20 +472,28 @@ static void complete_transaction(struct fw_card *card, int rcode,
* So this callback only sets the rcode if it hasn't already
* been set and only does the cleanup if the transaction
* failed and we didn't already get a status write.
+ *
+ * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some
+ * OXUF936QSE firmwares occasionally respond after Split_Timeout and
+ * complete the ORB just fine. Note, we also get RCODE_CANCELLED
+ * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0.
*/
spin_lock_irqsave(&card->lock, flags);
if (orb->rcode == -1)
orb->rcode = rcode;
- if (orb->rcode != RCODE_COMPLETE) {
+
+ if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) {
list_del(&orb->link);
spin_unlock_irqrestore(&card->lock, flags);
+
orb->callback(orb, NULL);
+ kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
spin_unlock_irqrestore(&card->lock, flags);
}
- kref_put(&orb->kref, free_orb);
+ kref_put(&orb->kref, free_orb); /* transaction callback reference */
}
static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
@@ -502,14 +509,12 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
list_add_tail(&orb->link, &lu->orb_list);
spin_unlock_irqrestore(&device->card->lock, flags);
- /* Take a ref for the orb list and for the transaction callback. */
- kref_get(&orb->kref);
- kref_get(&orb->kref);
+ kref_get(&orb->kref); /* transaction callback reference */
+ kref_get(&orb->kref); /* orb callback reference */
fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
node_id, generation, device->max_speed, offset,
- &orb->pointer, sizeof(orb->pointer),
- complete_transaction, orb);
+ &orb->pointer, 8, complete_transaction, orb);
}
static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
@@ -527,11 +532,11 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
list_for_each_entry_safe(orb, next, &list, link) {
retval = 0;
- if (fw_cancel_transaction(device->card, &orb->t) == 0)
- continue;
+ fw_cancel_transaction(device->card, &orb->t);
orb->rcode = RCODE_CANCELLED;
orb->callback(orb, NULL);
+ kref_put(&orb->kref, free_orb); /* orb callback reference */
}
return retval;
@@ -654,7 +659,7 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
lu->command_block_agent_address + SBP2_AGENT_RESET,
- &d, sizeof(d));
+ &d, 4);
}
static void complete_agent_reset_write_no_wait(struct fw_card *card,
@@ -676,7 +681,7 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
lu->command_block_agent_address + SBP2_AGENT_RESET,
- &d, sizeof(d), complete_agent_reset_write_no_wait, t);
+ &d, 4, complete_agent_reset_write_no_wait, t);
}
static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
@@ -866,8 +871,7 @@ static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
- CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT,
- &d, sizeof(d));
+ CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &d, 4);
}
static void sbp2_reconnect(struct work_struct *work);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index a6c670b8ce5..280c9b5ad9e 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -122,18 +122,10 @@ config ISCSI_IBFT_FIND
is necessary for iSCSI Boot Firmware Table Attributes module to work
properly.
-config ISCSI_BOOT_SYSFS
- tristate "iSCSI Boot Sysfs Interface"
- default n
- help
- This option enables support for exposing iSCSI boot information
- via sysfs to userspace. If you wish to export this information,
- say Y. Otherwise, say N.
-
config ISCSI_IBFT
tristate "iSCSI Boot Firmware Table Attributes module"
select ISCSI_BOOT_SYSFS
- depends on ISCSI_IBFT_FIND
+ depends on ISCSI_IBFT_FIND && SCSI
default n
help
This option enables support for detection and exposing of iSCSI
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 5fe7e166292..1c3c17343db 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -10,5 +10,4 @@ obj-$(CONFIG_DCDBAS) += dcdbas.o
obj-$(CONFIG_DMIID) += dmi-id.o
obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
-obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index aa9bc9e980e..69ad529d92f 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -634,9 +634,6 @@ static void __exit dcdbas_exit(void)
* before platform_device_unregister
*/
unregister_reboot_notifier(&dcdbas_reboot_nb);
- smi_data_buf_free();
- platform_device_unregister(dcdbas_pdev);
- platform_driver_unregister(&dcdbas_driver);
/*
* We have to free the buffer here instead of dcdbas_remove
@@ -645,6 +642,8 @@ static void __exit dcdbas_exit(void)
* released.
*/
smi_data_buf_free();
+ platform_device_unregister(dcdbas_pdev);
+ platform_driver_unregister(&dcdbas_driver);
}
module_init(dcdbas_init);
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index a777a35381d..94a58a082b9 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -229,10 +229,12 @@ static int __init dmi_id_init(void)
ret = device_register(dmi_dev);
if (ret)
- goto fail_class_unregister;
+ goto fail_free_dmi_dev;
return 0;
+fail_free_dmi_dev:
+ kfree(dmi_dev);
fail_class_unregister:
class_unregister(&dmi_class);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index d4646727134..b3d22d65999 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -277,6 +277,29 @@ static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
list_add_tail(&dev->list, &dmi_devices);
}
+static void __init dmi_save_dev_onboard(int instance, int segment, int bus,
+ int devfn, const char *name)
+{
+ struct dmi_dev_onboard *onboard_dev;
+
+ onboard_dev = dmi_alloc(sizeof(*onboard_dev) + strlen(name) + 1);
+ if (!onboard_dev) {
+ printk(KERN_ERR "dmi_save_dev_onboard: out of memory.\n");
+ return;
+ }
+ onboard_dev->instance = instance;
+ onboard_dev->segment = segment;
+ onboard_dev->bus = bus;
+ onboard_dev->devfn = devfn;
+
+ strcpy((char *)&onboard_dev[1], name);
+ onboard_dev->dev.type = DMI_DEV_TYPE_DEV_ONBOARD;
+ onboard_dev->dev.name = (char *)&onboard_dev[1];
+ onboard_dev->dev.device_data = onboard_dev;
+
+ list_add(&onboard_dev->dev.list, &dmi_devices);
+}
+
static void __init dmi_save_extended_devices(const struct dmi_header *dm)
{
const u8 *d = (u8*) dm + 5;
@@ -285,6 +308,8 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
if ((*d & 0x80) == 0)
return;
+ dmi_save_dev_onboard(*(d+1), *(u16 *)(d+2), *(d+4), *(d+5),
+ dmi_string_nosave(dm, *(d-1)));
dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d - 1)));
}
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index 110e24e5088..f287fe79edc 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -744,7 +744,7 @@ static inline int edd_num_devices(void)
static int __init
edd_init(void)
{
- unsigned int i;
+ int i;
int rc=0;
struct edd_device *edev;
@@ -760,21 +760,27 @@ edd_init(void)
if (!edd_kset)
return -ENOMEM;
- for (i = 0; i < edd_num_devices() && !rc; i++) {
+ for (i = 0; i < edd_num_devices(); i++) {
edev = kzalloc(sizeof (*edev), GFP_KERNEL);
- if (!edev)
- return -ENOMEM;
+ if (!edev) {
+ rc = -ENOMEM;
+ goto out;
+ }
rc = edd_device_register(edev, i);
if (rc) {
kfree(edev);
- break;
+ goto out;
}
edd_devices[i] = edev;
}
- if (rc)
- kset_unregister(edd_kset);
+ return 0;
+
+out:
+ while (--i >= 0)
+ edd_device_unregister(edd_devices[i]);
+ kset_unregister(edd_kset);
return rc;
}
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 4f04ec0410a..6148a1c6789 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -727,8 +727,10 @@ static void ibft_unregister(void)
static void ibft_cleanup(void)
{
- ibft_unregister();
- iscsi_boot_destroy_kset(boot_kset);
+ if (boot_kset) {
+ ibft_unregister();
+ iscsi_boot_destroy_kset(boot_kset);
+ }
}
static void __exit ibft_exit(void)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 7face915b96..510aa205454 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -195,6 +195,24 @@ config GPIO_PCF857X
This driver provides an in-kernel interface to those GPIOs using
platform-neutral GPIO calls.
+config GPIO_SX150X
+ bool "Semtech SX150x I2C GPIO expander"
+ depends on I2C=y
+ default n
+ help
+ Say yes here to provide support for Semtech SX150-series I2C
+ GPIO expanders. Compatible models include:
+
+ 8 bits: sx1508q
+ 16 bits: sx1509q
+
+config GPIO_STMPE
+ bool "STMPE GPIOs"
+ depends on MFD_STMPE
+ help
+ This enables support for the GPIOs found on the STMPE I/O
+ Expanders.
+
config GPIO_TC35892
bool "TC35892 GPIOs"
depends on MFD_TC35892
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index e53dcff49b4..fc6019d9372 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PL061) += pl061.o
+obj-$(CONFIG_GPIO_STMPE) += stmpe-gpio.o
obj-$(CONFIG_GPIO_TC35892) += tc35892-gpio.o
obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
@@ -35,3 +36,4 @@ obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o
obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
+obj-$(CONFIG_GPIO_SX150X) += sx150x.o
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 4e51fe3c1fc..21da9c19a0c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/idr.h>
#include <linux/slab.h>
@@ -56,9 +57,9 @@ struct gpio_desc {
#define FLAG_TRIG_RISE 6 /* trigger on rising edge */
#define FLAG_ACTIVE_LOW 7 /* sysfs value has active low */
-#define PDESC_ID_SHIFT 16 /* add new flags before this one */
+#define ID_SHIFT 16 /* add new flags before this one */
-#define GPIO_FLAGS_MASK ((1 << PDESC_ID_SHIFT) - 1)
+#define GPIO_FLAGS_MASK ((1 << ID_SHIFT) - 1)
#define GPIO_TRIGGER_MASK (BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE))
#ifdef CONFIG_DEBUG_FS
@@ -68,12 +69,7 @@ struct gpio_desc {
static struct gpio_desc gpio_desc[ARCH_NR_GPIOS];
#ifdef CONFIG_GPIO_SYSFS
-struct poll_desc {
- struct work_struct work;
- struct sysfs_dirent *value_sd;
-};
-
-static struct idr pdesc_idr;
+static DEFINE_IDR(dirent_idr);
#endif
static inline void desc_set_label(struct gpio_desc *d, const char *label)
@@ -324,24 +320,16 @@ static const DEVICE_ATTR(value, 0644,
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
{
- struct work_struct *work = priv;
+ struct sysfs_dirent *value_sd = priv;
- schedule_work(work);
+ sysfs_notify_dirent(value_sd);
return IRQ_HANDLED;
}
-static void gpio_notify_sysfs(struct work_struct *work)
-{
- struct poll_desc *pdesc;
-
- pdesc = container_of(work, struct poll_desc, work);
- sysfs_notify_dirent(pdesc->value_sd);
-}
-
static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
unsigned long gpio_flags)
{
- struct poll_desc *pdesc;
+ struct sysfs_dirent *value_sd;
unsigned long irq_flags;
int ret, irq, id;
@@ -352,18 +340,16 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
if (irq < 0)
return -EIO;
- id = desc->flags >> PDESC_ID_SHIFT;
- pdesc = idr_find(&pdesc_idr, id);
- if (pdesc) {
- free_irq(irq, &pdesc->work);
- cancel_work_sync(&pdesc->work);
- }
+ id = desc->flags >> ID_SHIFT;
+ value_sd = idr_find(&dirent_idr, id);
+ if (value_sd)
+ free_irq(irq, value_sd);
desc->flags &= ~GPIO_TRIGGER_MASK;
if (!gpio_flags) {
ret = 0;
- goto free_sd;
+ goto free_id;
}
irq_flags = IRQF_SHARED;
@@ -374,55 +360,46 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
- if (!pdesc) {
- pdesc = kmalloc(sizeof(*pdesc), GFP_KERNEL);
- if (!pdesc) {
- ret = -ENOMEM;
+ if (!value_sd) {
+ value_sd = sysfs_get_dirent(dev->kobj.sd, NULL, "value");
+ if (!value_sd) {
+ ret = -ENODEV;
goto err_out;
}
do {
ret = -ENOMEM;
- if (idr_pre_get(&pdesc_idr, GFP_KERNEL))
- ret = idr_get_new_above(&pdesc_idr,
- pdesc, 1, &id);
+ if (idr_pre_get(&dirent_idr, GFP_KERNEL))
+ ret = idr_get_new_above(&dirent_idr, value_sd,
+ 1, &id);
} while (ret == -EAGAIN);
if (ret)
- goto free_mem;
+ goto free_sd;
desc->flags &= GPIO_FLAGS_MASK;
- desc->flags |= (unsigned long)id << PDESC_ID_SHIFT;
+ desc->flags |= (unsigned long)id << ID_SHIFT;
- if (desc->flags >> PDESC_ID_SHIFT != id) {
+ if (desc->flags >> ID_SHIFT != id) {
ret = -ERANGE;
goto free_id;
}
-
- pdesc->value_sd = sysfs_get_dirent(dev->kobj.sd, NULL, "value");
- if (!pdesc->value_sd) {
- ret = -ENODEV;
- goto free_id;
- }
- INIT_WORK(&pdesc->work, gpio_notify_sysfs);
}
- ret = request_irq(irq, gpio_sysfs_irq, irq_flags,
- "gpiolib", &pdesc->work);
- if (ret)
- goto free_sd;
+ ret = request_any_context_irq(irq, gpio_sysfs_irq, irq_flags,
+ "gpiolib", value_sd);
+ if (ret < 0)
+ goto free_id;
desc->flags |= gpio_flags;
return 0;
-free_sd:
- if (pdesc)
- sysfs_put(pdesc->value_sd);
free_id:
- idr_remove(&pdesc_idr, id);
+ idr_remove(&dirent_idr, id);
desc->flags &= GPIO_FLAGS_MASK;
-free_mem:
- kfree(pdesc);
+free_sd:
+ if (value_sd)
+ sysfs_put(value_sd);
err_out:
return ret;
}
@@ -993,8 +970,6 @@ static int __init gpiolib_sysfs_init(void)
unsigned long flags;
unsigned gpio;
- idr_init(&pdesc_idr);
-
status = class_register(&gpio_class);
if (status < 0)
return status;
@@ -1100,16 +1075,24 @@ int gpiochip_add(struct gpio_chip *chip)
}
}
+ of_gpiochip_add(chip);
+
unlock:
spin_unlock_irqrestore(&gpio_lock, flags);
- if (status == 0)
- status = gpiochip_export(chip);
+
+ if (status)
+ goto fail;
+
+ status = gpiochip_export(chip);
+ if (status)
+ goto fail;
+
+ return 0;
fail:
/* failures here can mean systems won't boot... */
- if (status)
- pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
- chip->base, chip->base + chip->ngpio - 1,
- chip->label ? : "generic");
+ pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
+ chip->base, chip->base + chip->ngpio - 1,
+ chip->label ? : "generic");
return status;
}
EXPORT_SYMBOL_GPL(gpiochip_add);
@@ -1128,6 +1111,8 @@ int gpiochip_remove(struct gpio_chip *chip)
spin_lock_irqsave(&gpio_lock, flags);
+ of_gpiochip_remove(chip);
+
for (id = chip->base; id < chip->base + chip->ngpio; id++) {
if (test_bit(FLAG_REQUESTED, &gpio_desc[id].flags)) {
status = -EBUSY;
@@ -1148,6 +1133,38 @@ int gpiochip_remove(struct gpio_chip *chip)
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
+/**
+ * gpiochip_find() - iterator for locating a specific gpio_chip
+ * @data: data to pass to match function
+ * @callback: Callback function to check gpio_chip
+ *
+ * Similar to bus_find_device. It returns a reference to a gpio_chip as
+ * determined by a user supplied @match callback. The callback should return
+ * 0 if the device doesn't match and non-zero if it does. If the callback is
+ * non-zero, this function will return to the caller and not iterate over any
+ * more gpio_chips.
+ */
+struct gpio_chip *gpiochip_find(void *data,
+ int (*match)(struct gpio_chip *chip, void *data))
+{
+ struct gpio_chip *chip = NULL;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+ for (i = 0; i < ARCH_NR_GPIOS; i++) {
+ if (!gpio_desc[i].chip)
+ continue;
+
+ if (match(gpio_desc[i].chip, data)) {
+ chip = gpio_desc[i].chip;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ return chip;
+}
/* These "optional" allocation calls help prevent drivers from stomping
* on each other, and help provide better diagnostics in debugfs.
@@ -1229,7 +1246,7 @@ void gpio_free(unsigned gpio)
if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) {
if (chip->free) {
spin_unlock_irqrestore(&gpio_lock, flags);
- might_sleep_if(extra_checks && chip->can_sleep);
+ might_sleep_if(chip->can_sleep);
chip->free(chip, gpio - chip->base);
spin_lock_irqsave(&gpio_lock, flags);
}
@@ -1367,7 +1384,7 @@ int gpio_direction_input(unsigned gpio)
spin_unlock_irqrestore(&gpio_lock, flags);
- might_sleep_if(extra_checks && chip->can_sleep);
+ might_sleep_if(chip->can_sleep);
if (status) {
status = chip->request(chip, gpio);
@@ -1420,7 +1437,7 @@ int gpio_direction_output(unsigned gpio, int value)
spin_unlock_irqrestore(&gpio_lock, flags);
- might_sleep_if(extra_checks && chip->can_sleep);
+ might_sleep_if(chip->can_sleep);
if (status) {
status = chip->request(chip, gpio);
@@ -1478,7 +1495,7 @@ int gpio_set_debounce(unsigned gpio, unsigned debounce)
spin_unlock_irqrestore(&gpio_lock, flags);
- might_sleep_if(extra_checks && chip->can_sleep);
+ might_sleep_if(chip->can_sleep);
return chip->set_debounce(chip, gpio, debounce);
@@ -1528,7 +1545,7 @@ int __gpio_get_value(unsigned gpio)
struct gpio_chip *chip;
chip = gpio_to_chip(gpio);
- WARN_ON(extra_checks && chip->can_sleep);
+ WARN_ON(chip->can_sleep);
return chip->get ? chip->get(chip, gpio - chip->base) : 0;
}
EXPORT_SYMBOL_GPL(__gpio_get_value);
@@ -1547,7 +1564,7 @@ void __gpio_set_value(unsigned gpio, int value)
struct gpio_chip *chip;
chip = gpio_to_chip(gpio);
- WARN_ON(extra_checks && chip->can_sleep);
+ WARN_ON(chip->can_sleep);
chip->set(chip, gpio - chip->base, value);
}
EXPORT_SYMBOL_GPL(__gpio_set_value);
diff --git a/drivers/gpio/max730x.c b/drivers/gpio/max730x.c
index 7696a5625d5..94ce773f95f 100644
--- a/drivers/gpio/max730x.c
+++ b/drivers/gpio/max730x.c
@@ -54,7 +54,7 @@ static int max7301_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct max7301 *ts = container_of(chip, struct max7301, chip);
u8 *config;
- u8 offset_bits;
+ u8 offset_bits, pin_config;
int ret;
/* First 4 pins are unused in the controller */
@@ -63,12 +63,15 @@ static int max7301_direction_input(struct gpio_chip *chip, unsigned offset)
config = &ts->port_config[offset >> 2];
+ if (ts->input_pullup_active & BIT(offset))
+ pin_config = PIN_CONFIG_IN_PULLUP;
+ else
+ pin_config = PIN_CONFIG_IN_WO_PULLUP;
+
mutex_lock(&ts->lock);
- /* Standard GPIO API doesn't support pull-ups, has to be extended.
- * Hard-coding no pollup for now. */
*config = (*config & ~(PIN_CONFIG_MASK << offset_bits))
- | (PIN_CONFIG_IN_WO_PULLUP << offset_bits);
+ | (pin_config << offset_bits);
ret = ts->write(ts->dev, 0x08 + (offset >> 2), *config);
@@ -177,6 +180,7 @@ int __devinit __max730x_probe(struct max7301 *ts)
/* Power up the chip and disable IRQ output */
ts->write(dev, 0x04, 0x01);
+ ts->input_pullup_active = pdata->input_pullup_active;
ts->chip.label = dev->driver->name;
ts->chip.direction_input = max7301_direction_input;
@@ -191,13 +195,17 @@ int __devinit __max730x_probe(struct max7301 *ts)
ts->chip.owner = THIS_MODULE;
/*
- * tristate all pins in hardware and cache the
+ * initialize pullups according to platform data and cache the
* register values for later use.
*/
for (i = 1; i < 8; i++) {
int j;
- /* 0xAA means input with internal pullup disabled */
- ts->write(dev, 0x08 + i, 0xAA);
+ /*
+ * initialize port_config with "0xAA", which means
+ * input with internal pullup disabled. This is needed
+ * to avoid writing zeros (in the inner for loop),
+ * which is not allowed according to the datasheet.
+ */
ts->port_config[i] = 0xAA;
for (j = 0; j < 4; j++) {
int offset = (i - 1) * 4 + j;
diff --git a/drivers/gpio/pcf857x.c b/drivers/gpio/pcf857x.c
index 29f19ce3e80..879b473aab5 100644
--- a/drivers/gpio/pcf857x.c
+++ b/drivers/gpio/pcf857x.c
@@ -190,7 +190,6 @@ static int pcf857x_probe(struct i2c_client *client,
pdata = client->dev.platform_data;
if (!pdata) {
dev_dbg(&client->dev, "no platform data\n");
- return -EINVAL;
}
/* Allocate, initialize, and register this gpio_chip. */
@@ -200,7 +199,7 @@ static int pcf857x_probe(struct i2c_client *client,
mutex_init(&gpio->lock);
- gpio->chip.base = pdata->gpio_base;
+ gpio->chip.base = pdata ? pdata->gpio_base : -1;
gpio->chip.can_sleep = 1;
gpio->chip.dev = &client->dev;
gpio->chip.owner = THIS_MODULE;
@@ -278,7 +277,7 @@ static int pcf857x_probe(struct i2c_client *client,
* to zero, our software copy of the "latch" then matches the chip's
* all-ones reset state. Otherwise it flags pins to be driven low.
*/
- gpio->out = ~pdata->n_latch;
+ gpio->out = pdata ? ~pdata->n_latch : ~0;
status = gpiochip_add(&gpio->chip);
if (status < 0)
@@ -299,7 +298,7 @@ static int pcf857x_probe(struct i2c_client *client,
/* Let platform code set up the GPIOs and their users.
* Now is the first time anyone could use them.
*/
- if (pdata->setup) {
+ if (pdata && pdata->setup) {
status = pdata->setup(client,
gpio->chip.base, gpio->chip.ngpio,
pdata->context);
@@ -322,7 +321,7 @@ static int pcf857x_remove(struct i2c_client *client)
struct pcf857x *gpio = i2c_get_clientdata(client);
int status = 0;
- if (pdata->teardown) {
+ if (pdata && pdata->teardown) {
status = pdata->teardown(client,
gpio->chip.base, gpio->chip.ngpio,
pdata->context);
diff --git a/drivers/gpio/stmpe-gpio.c b/drivers/gpio/stmpe-gpio.c
new file mode 100644
index 00000000000..4e1f1b9d5e6
--- /dev/null
+++ b/drivers/gpio/stmpe-gpio.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/stmpe.h>
+
+/*
+ * These registers are modified under the irq bus lock and cached to avoid
+ * unnecessary writes in bus_sync_unlock.
+ */
+enum { REG_RE, REG_FE, REG_IE };
+
+#define CACHE_NR_REGS 3
+#define CACHE_NR_BANKS (STMPE_NR_GPIOS / 8)
+
+struct stmpe_gpio {
+ struct gpio_chip chip;
+ struct stmpe *stmpe;
+ struct device *dev;
+ struct mutex irq_lock;
+
+ int irq_base;
+
+ /* Caches of interrupt control registers for bus_lock */
+ u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS];
+ u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS];
+};
+
+static inline struct stmpe_gpio *to_stmpe_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct stmpe_gpio, chip);
+}
+
+static int stmpe_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(chip);
+ struct stmpe *stmpe = stmpe_gpio->stmpe;
+ u8 reg = stmpe->regs[STMPE_IDX_GPMR_LSB] - (offset / 8);
+ u8 mask = 1 << (offset % 8);
+ int ret;
+
+ ret = stmpe_reg_read(stmpe, reg);
+ if (ret < 0)
+ return ret;
+
+ return ret & mask;
+}
+
+static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+ struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(chip);
+ struct stmpe *stmpe = stmpe_gpio->stmpe;
+ int which = val ? STMPE_IDX_GPSR_LSB : STMPE_IDX_GPCR_LSB;
+ u8 reg = stmpe->regs[which] - (offset / 8);
+ u8 mask = 1 << (offset % 8);
+
+ stmpe_reg_write(stmpe, reg, mask);
+}
+
+static int stmpe_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int val)
+{
+ struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(chip);
+ struct stmpe *stmpe = stmpe_gpio->stmpe;
+ u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB] - (offset / 8);
+ u8 mask = 1 << (offset % 8);
+
+ stmpe_gpio_set(chip, offset, val);
+
+ return stmpe_set_bits(stmpe, reg, mask, mask);
+}
+
+static int stmpe_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(chip);
+ struct stmpe *stmpe = stmpe_gpio->stmpe;
+ u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB] - (offset / 8);
+ u8 mask = 1 << (offset % 8);
+
+ return stmpe_set_bits(stmpe, reg, mask, 0);
+}
+
+static int stmpe_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(chip);
+
+ return stmpe_gpio->irq_base + offset;
+}
+
+static int stmpe_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(chip);
+ struct stmpe *stmpe = stmpe_gpio->stmpe;
+
+ return stmpe_set_altfunc(stmpe, 1 << offset, STMPE_BLOCK_GPIO);
+}
+
+static struct gpio_chip template_chip = {
+ .label = "stmpe",
+ .owner = THIS_MODULE,
+ .direction_input = stmpe_gpio_direction_input,
+ .get = stmpe_gpio_get,
+ .direction_output = stmpe_gpio_direction_output,
+ .set = stmpe_gpio_set,
+ .to_irq = stmpe_gpio_to_irq,
+ .request = stmpe_gpio_request,
+ .can_sleep = 1,
+};
+
+static int stmpe_gpio_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
+ int offset = irq - stmpe_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+
+ if (type == IRQ_TYPE_EDGE_RISING)
+ stmpe_gpio->regs[REG_RE][regoffset] |= mask;
+ else
+ stmpe_gpio->regs[REG_RE][regoffset] &= ~mask;
+
+ if (type == IRQ_TYPE_EDGE_FALLING)
+ stmpe_gpio->regs[REG_FE][regoffset] |= mask;
+ else
+ stmpe_gpio->regs[REG_FE][regoffset] &= ~mask;
+
+ return 0;
+}
+
+static void stmpe_gpio_irq_lock(unsigned int irq)
+{
+ struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
+
+ mutex_lock(&stmpe_gpio->irq_lock);
+}
+
+static void stmpe_gpio_irq_sync_unlock(unsigned int irq)
+{
+ struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
+ struct stmpe *stmpe = stmpe_gpio->stmpe;
+ int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
+ static const u8 regmap[] = {
+ [REG_RE] = STMPE_IDX_GPRER_LSB,
+ [REG_FE] = STMPE_IDX_GPFER_LSB,
+ [REG_IE] = STMPE_IDX_IEGPIOR_LSB,
+ };
+ int i, j;
+
+ for (i = 0; i < CACHE_NR_REGS; i++) {
+ for (j = 0; j < num_banks; j++) {
+ u8 old = stmpe_gpio->oldregs[i][j];
+ u8 new = stmpe_gpio->regs[i][j];
+
+ if (new == old)
+ continue;
+
+ stmpe_gpio->oldregs[i][j] = new;
+ stmpe_reg_write(stmpe, stmpe->regs[regmap[i]] - j, new);
+ }
+ }
+
+ mutex_unlock(&stmpe_gpio->irq_lock);
+}
+
+static void stmpe_gpio_irq_mask(unsigned int irq)
+{
+ struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
+ int offset = irq - stmpe_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ stmpe_gpio->regs[REG_IE][regoffset] &= ~mask;
+}
+
+static void stmpe_gpio_irq_unmask(unsigned int irq)
+{
+ struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
+ int offset = irq - stmpe_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ stmpe_gpio->regs[REG_IE][regoffset] |= mask;
+}
+
+static struct irq_chip stmpe_gpio_irq_chip = {
+ .name = "stmpe-gpio",
+ .bus_lock = stmpe_gpio_irq_lock,
+ .bus_sync_unlock = stmpe_gpio_irq_sync_unlock,
+ .mask = stmpe_gpio_irq_mask,
+ .unmask = stmpe_gpio_irq_unmask,
+ .set_type = stmpe_gpio_irq_set_type,
+};
+
+static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
+{
+ struct stmpe_gpio *stmpe_gpio = dev;
+ struct stmpe *stmpe = stmpe_gpio->stmpe;
+ u8 statmsbreg = stmpe->regs[STMPE_IDX_ISGPIOR_MSB];
+ int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
+ u8 status[num_banks];
+ int ret;
+ int i;
+
+ ret = stmpe_block_read(stmpe, statmsbreg, num_banks, status);
+ if (ret < 0)
+ return IRQ_NONE;
+
+ for (i = 0; i < num_banks; i++) {
+ int bank = num_banks - i - 1;
+ unsigned int enabled = stmpe_gpio->regs[REG_IE][bank];
+ unsigned int stat = status[i];
+
+ stat &= enabled;
+ if (!stat)
+ continue;
+
+ while (stat) {
+ int bit = __ffs(stat);
+ int line = bank * 8 + bit;
+
+ handle_nested_irq(stmpe_gpio->irq_base + line);
+ stat &= ~(1 << bit);
+ }
+
+ stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
+ stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_GPEDR_MSB] + i,
+ status[i]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio)
+{
+ int base = stmpe_gpio->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + stmpe_gpio->chip.ngpio; irq++) {
+ set_irq_chip_data(irq, stmpe_gpio);
+ set_irq_chip_and_handler(irq, &stmpe_gpio_irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void stmpe_gpio_irq_remove(struct stmpe_gpio *stmpe_gpio)
+{
+ int base = stmpe_gpio->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + stmpe_gpio->chip.ngpio; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ set_irq_chip_and_handler(irq, NULL, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+}
+
+static int __devinit stmpe_gpio_probe(struct platform_device *pdev)
+{
+ struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ struct stmpe_gpio_platform_data *pdata;
+ struct stmpe_gpio *stmpe_gpio;
+ int ret;
+ int irq;
+
+ pdata = stmpe->pdata->gpio;
+ if (!pdata)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ stmpe_gpio = kzalloc(sizeof(struct stmpe_gpio), GFP_KERNEL);
+ if (!stmpe_gpio)
+ return -ENOMEM;
+
+ mutex_init(&stmpe_gpio->irq_lock);
+
+ stmpe_gpio->dev = &pdev->dev;
+ stmpe_gpio->stmpe = stmpe;
+
+ stmpe_gpio->chip = template_chip;
+ stmpe_gpio->chip.ngpio = stmpe->num_gpios;
+ stmpe_gpio->chip.dev = &pdev->dev;
+ stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1;
+
+ stmpe_gpio->irq_base = stmpe->irq_base + STMPE_INT_GPIO(0);
+
+ ret = stmpe_enable(stmpe, STMPE_BLOCK_GPIO);
+ if (ret)
+ return ret;
+
+ ret = stmpe_gpio_irq_init(stmpe_gpio);
+ if (ret)
+ goto out_free;
+
+ ret = request_threaded_irq(irq, NULL, stmpe_gpio_irq, IRQF_ONESHOT,
+ "stmpe-gpio", stmpe_gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
+ goto out_removeirq;
+ }
+
+ ret = gpiochip_add(&stmpe_gpio->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
+ goto out_freeirq;
+ }
+
+ if (pdata && pdata->setup)
+ pdata->setup(stmpe, stmpe_gpio->chip.base);
+
+ platform_set_drvdata(pdev, stmpe_gpio);
+
+ return 0;
+
+out_freeirq:
+ free_irq(irq, stmpe_gpio);
+out_removeirq:
+ stmpe_gpio_irq_remove(stmpe_gpio);
+out_free:
+ kfree(stmpe_gpio);
+ return ret;
+}
+
+static int __devexit stmpe_gpio_remove(struct platform_device *pdev)
+{
+ struct stmpe_gpio *stmpe_gpio = platform_get_drvdata(pdev);
+ struct stmpe *stmpe = stmpe_gpio->stmpe;
+ struct stmpe_gpio_platform_data *pdata = stmpe->pdata->gpio;
+ int irq = platform_get_irq(pdev, 0);
+ int ret;
+
+ if (pdata && pdata->remove)
+ pdata->remove(stmpe, stmpe_gpio->chip.base);
+
+ ret = gpiochip_remove(&stmpe_gpio->chip);
+ if (ret < 0) {
+ dev_err(stmpe_gpio->dev,
+ "unable to remove gpiochip: %d\n", ret);
+ return ret;
+ }
+
+ stmpe_disable(stmpe, STMPE_BLOCK_GPIO);
+
+ free_irq(irq, stmpe_gpio);
+ stmpe_gpio_irq_remove(stmpe_gpio);
+ platform_set_drvdata(pdev, NULL);
+ kfree(stmpe_gpio);
+
+ return 0;
+}
+
+static struct platform_driver stmpe_gpio_driver = {
+ .driver.name = "stmpe-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = stmpe_gpio_probe,
+ .remove = __devexit_p(stmpe_gpio_remove),
+};
+
+static int __init stmpe_gpio_init(void)
+{
+ return platform_driver_register(&stmpe_gpio_driver);
+}
+subsys_initcall(stmpe_gpio_init);
+
+static void __exit stmpe_gpio_exit(void)
+{
+ platform_driver_unregister(&stmpe_gpio_driver);
+}
+module_exit(stmpe_gpio_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("STMPExxxx GPIO driver");
+MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
new file mode 100644
index 00000000000..823559ab0e2
--- /dev/null
+++ b/drivers/gpio/sx150x.c
@@ -0,0 +1,661 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/i2c/sx150x.h>
+
+struct sx150x_device_data {
+ u8 reg_pullup;
+ u8 reg_pulldn;
+ u8 reg_drain;
+ u8 reg_polarity;
+ u8 reg_dir;
+ u8 reg_data;
+ u8 reg_irq_mask;
+ u8 reg_irq_src;
+ u8 reg_sense;
+ u8 reg_clock;
+ u8 reg_misc;
+ u8 reg_reset;
+ u8 ngpios;
+};
+
+struct sx150x_chip {
+ struct gpio_chip gpio_chip;
+ struct i2c_client *client;
+ const struct sx150x_device_data *dev_cfg;
+ int irq_summary;
+ int irq_base;
+ u32 irq_sense;
+ unsigned long irq_set_type_pending;
+ struct irq_chip irq_chip;
+ struct mutex lock;
+};
+
+static const struct sx150x_device_data sx150x_devices[] = {
+ [0] = { /* sx1508q */
+ .reg_pullup = 0x03,
+ .reg_pulldn = 0x04,
+ .reg_drain = 0x05,
+ .reg_polarity = 0x06,
+ .reg_dir = 0x07,
+ .reg_data = 0x08,
+ .reg_irq_mask = 0x09,
+ .reg_irq_src = 0x0c,
+ .reg_sense = 0x0b,
+ .reg_clock = 0x0f,
+ .reg_misc = 0x10,
+ .reg_reset = 0x7d,
+ .ngpios = 8
+ },
+ [1] = { /* sx1509q */
+ .reg_pullup = 0x07,
+ .reg_pulldn = 0x09,
+ .reg_drain = 0x0b,
+ .reg_polarity = 0x0d,
+ .reg_dir = 0x0f,
+ .reg_data = 0x11,
+ .reg_irq_mask = 0x13,
+ .reg_irq_src = 0x19,
+ .reg_sense = 0x17,
+ .reg_clock = 0x1e,
+ .reg_misc = 0x1f,
+ .reg_reset = 0x7d,
+ .ngpios = 16
+ },
+};
+
+static const struct i2c_device_id sx150x_id[] = {
+ {"sx1508q", 0},
+ {"sx1509q", 1},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, sx150x_id);
+
+static s32 sx150x_i2c_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ s32 err = i2c_smbus_write_byte_data(client, reg, val);
+
+ if (err < 0)
+ dev_warn(&client->dev,
+ "i2c write fail: can't write %02x to %02x: %d\n",
+ val, reg, err);
+ return err;
+}
+
+static s32 sx150x_i2c_read(struct i2c_client *client, u8 reg, u8 *val)
+{
+ s32 err = i2c_smbus_read_byte_data(client, reg);
+
+ if (err >= 0)
+ *val = err;
+ else
+ dev_warn(&client->dev,
+ "i2c read fail: can't read from %02x: %d\n",
+ reg, err);
+ return err;
+}
+
+static inline bool offset_is_oscio(struct sx150x_chip *chip, unsigned offset)
+{
+ return (chip->dev_cfg->ngpios == offset);
+}
+
+/*
+ * These utility functions solve the common problem of locating and setting
+ * configuration bits. Configuration bits are grouped into registers
+ * whose indexes increase downwards. For example, with eight-bit registers,
+ * sixteen gpios would have their config bits grouped in the following order:
+ * REGISTER N-1 [ f e d c b a 9 8 ]
+ * N [ 7 6 5 4 3 2 1 0 ]
+ *
+ * For multi-bit configurations, the pattern gets wider:
+ * REGISTER N-3 [ f f e e d d c c ]
+ * N-2 [ b b a a 9 9 8 8 ]
+ * N-1 [ 7 7 6 6 5 5 4 4 ]
+ * N [ 3 3 2 2 1 1 0 0 ]
+ *
+ * Given the address of the starting register 'N', the index of the gpio
+ * whose configuration we seek to change, and the width in bits of that
+ * configuration, these functions allow us to locate the correct
+ * register and mask the correct bits.
+ */
+static inline void sx150x_find_cfg(u8 offset, u8 width,
+ u8 *reg, u8 *mask, u8 *shift)
+{
+ *reg -= offset * width / 8;
+ *mask = (1 << width) - 1;
+ *shift = (offset * width) % 8;
+ *mask <<= *shift;
+}
+
+static s32 sx150x_write_cfg(struct sx150x_chip *chip,
+ u8 offset, u8 width, u8 reg, u8 val)
+{
+ u8 mask;
+ u8 data;
+ u8 shift;
+ s32 err;
+
+ sx150x_find_cfg(offset, width, &reg, &mask, &shift);
+ err = sx150x_i2c_read(chip->client, reg, &data);
+ if (err < 0)
+ return err;
+
+ data &= ~mask;
+ data |= (val << shift) & mask;
+ return sx150x_i2c_write(chip->client, reg, data);
+}
+
+static int sx150x_get_io(struct sx150x_chip *chip, unsigned offset)
+{
+ u8 reg = chip->dev_cfg->reg_data;
+ u8 mask;
+ u8 data;
+ u8 shift;
+ s32 err;
+
+ sx150x_find_cfg(offset, 1, &reg, &mask, &shift);
+ err = sx150x_i2c_read(chip->client, reg, &data);
+ if (err >= 0)
+ err = (data & mask) != 0 ? 1 : 0;
+
+ return err;
+}
+
+static void sx150x_set_oscio(struct sx150x_chip *chip, int val)
+{
+ sx150x_i2c_write(chip->client,
+ chip->dev_cfg->reg_clock,
+ (val ? 0x1f : 0x10));
+}
+
+static void sx150x_set_io(struct sx150x_chip *chip, unsigned offset, int val)
+{
+ sx150x_write_cfg(chip,
+ offset,
+ 1,
+ chip->dev_cfg->reg_data,
+ (val ? 1 : 0));
+}
+
+static int sx150x_io_input(struct sx150x_chip *chip, unsigned offset)
+{
+ return sx150x_write_cfg(chip,
+ offset,
+ 1,
+ chip->dev_cfg->reg_dir,
+ 1);
+}
+
+static int sx150x_io_output(struct sx150x_chip *chip, unsigned offset, int val)
+{
+ int err;
+
+ err = sx150x_write_cfg(chip,
+ offset,
+ 1,
+ chip->dev_cfg->reg_data,
+ (val ? 1 : 0));
+ if (err >= 0)
+ err = sx150x_write_cfg(chip,
+ offset,
+ 1,
+ chip->dev_cfg->reg_dir,
+ 0);
+ return err;
+}
+
+static int sx150x_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct sx150x_chip *chip;
+ int status = -EINVAL;
+
+ chip = container_of(gc, struct sx150x_chip, gpio_chip);
+
+ if (!offset_is_oscio(chip, offset)) {
+ mutex_lock(&chip->lock);
+ status = sx150x_get_io(chip, offset);
+ mutex_unlock(&chip->lock);
+ }
+
+ return status;
+}
+
+static void sx150x_gpio_set(struct gpio_chip *gc, unsigned offset, int val)
+{
+ struct sx150x_chip *chip;
+
+ chip = container_of(gc, struct sx150x_chip, gpio_chip);
+
+ mutex_lock(&chip->lock);
+ if (offset_is_oscio(chip, offset))
+ sx150x_set_oscio(chip, val);
+ else
+ sx150x_set_io(chip, offset, val);
+ mutex_unlock(&chip->lock);
+}
+
+static int sx150x_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct sx150x_chip *chip;
+ int status = -EINVAL;
+
+ chip = container_of(gc, struct sx150x_chip, gpio_chip);
+
+ if (!offset_is_oscio(chip, offset)) {
+ mutex_lock(&chip->lock);
+ status = sx150x_io_input(chip, offset);
+ mutex_unlock(&chip->lock);
+ }
+ return status;
+}
+
+static int sx150x_gpio_direction_output(struct gpio_chip *gc,
+ unsigned offset,
+ int val)
+{
+ struct sx150x_chip *chip;
+ int status = 0;
+
+ chip = container_of(gc, struct sx150x_chip, gpio_chip);
+
+ if (!offset_is_oscio(chip, offset)) {
+ mutex_lock(&chip->lock);
+ status = sx150x_io_output(chip, offset, val);
+ mutex_unlock(&chip->lock);
+ }
+ return status;
+}
+
+static int sx150x_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct sx150x_chip *chip;
+
+ chip = container_of(gc, struct sx150x_chip, gpio_chip);
+
+ if (offset >= chip->dev_cfg->ngpios)
+ return -EINVAL;
+
+ if (chip->irq_base < 0)
+ return -EINVAL;
+
+ return chip->irq_base + offset;
+}
+
+static void sx150x_irq_mask(unsigned int irq)
+{
+ struct irq_chip *ic = get_irq_chip(irq);
+ struct sx150x_chip *chip;
+ unsigned n;
+
+ chip = container_of(ic, struct sx150x_chip, irq_chip);
+ n = irq - chip->irq_base;
+
+ sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 1);
+ sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, 0);
+}
+
+static void sx150x_irq_unmask(unsigned int irq)
+{
+ struct irq_chip *ic = get_irq_chip(irq);
+ struct sx150x_chip *chip;
+ unsigned n;
+
+ chip = container_of(ic, struct sx150x_chip, irq_chip);
+ n = irq - chip->irq_base;
+
+ sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 0);
+ sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense,
+ chip->irq_sense >> (n * 2));
+}
+
+static int sx150x_irq_set_type(unsigned int irq, unsigned int flow_type)
+{
+ struct irq_chip *ic = get_irq_chip(irq);
+ struct sx150x_chip *chip;
+ unsigned n, val = 0;
+
+ if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ return -EINVAL;
+
+ chip = container_of(ic, struct sx150x_chip, irq_chip);
+ n = irq - chip->irq_base;
+
+ if (flow_type & IRQ_TYPE_EDGE_RISING)
+ val |= 0x1;
+ if (flow_type & IRQ_TYPE_EDGE_FALLING)
+ val |= 0x2;
+
+ chip->irq_sense &= ~(3UL << (n * 2));
+ chip->irq_sense |= val << (n * 2);
+ chip->irq_set_type_pending |= BIT(n);
+ return 0;
+}
+
+static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id)
+{
+ struct sx150x_chip *chip = (struct sx150x_chip *)dev_id;
+ unsigned nhandled = 0;
+ unsigned sub_irq;
+ unsigned n;
+ s32 err;
+ u8 val;
+ int i;
+
+ for (i = (chip->dev_cfg->ngpios / 8) - 1; i >= 0; --i) {
+ err = sx150x_i2c_read(chip->client,
+ chip->dev_cfg->reg_irq_src - i,
+ &val);
+ if (err < 0)
+ continue;
+
+ sx150x_i2c_write(chip->client,
+ chip->dev_cfg->reg_irq_src - i,
+ val);
+ for (n = 0; n < 8; ++n) {
+ if (val & (1 << n)) {
+ sub_irq = chip->irq_base + (i * 8) + n;
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+ }
+
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void sx150x_irq_bus_lock(unsigned int irq)
+{
+ struct irq_chip *ic = get_irq_chip(irq);
+ struct sx150x_chip *chip;
+
+ chip = container_of(ic, struct sx150x_chip, irq_chip);
+
+ mutex_lock(&chip->lock);
+}
+
+static void sx150x_irq_bus_sync_unlock(unsigned int irq)
+{
+ struct irq_chip *ic = get_irq_chip(irq);
+ struct sx150x_chip *chip;
+ unsigned n;
+
+ chip = container_of(ic, struct sx150x_chip, irq_chip);
+
+ while (chip->irq_set_type_pending) {
+ n = __ffs(chip->irq_set_type_pending);
+ chip->irq_set_type_pending &= ~BIT(n);
+ if (!(irq_to_desc(n + chip->irq_base)->status & IRQ_MASKED))
+ sx150x_write_cfg(chip, n, 2,
+ chip->dev_cfg->reg_sense,
+ chip->irq_sense >> (n * 2));
+ }
+
+ mutex_unlock(&chip->lock);
+}
+
+static void sx150x_init_chip(struct sx150x_chip *chip,
+ struct i2c_client *client,
+ kernel_ulong_t driver_data,
+ struct sx150x_platform_data *pdata)
+{
+ mutex_init(&chip->lock);
+
+ chip->client = client;
+ chip->dev_cfg = &sx150x_devices[driver_data];
+ chip->gpio_chip.label = client->name;
+ chip->gpio_chip.direction_input = sx150x_gpio_direction_input;
+ chip->gpio_chip.direction_output = sx150x_gpio_direction_output;
+ chip->gpio_chip.get = sx150x_gpio_get;
+ chip->gpio_chip.set = sx150x_gpio_set;
+ chip->gpio_chip.to_irq = sx150x_gpio_to_irq;
+ chip->gpio_chip.base = pdata->gpio_base;
+ chip->gpio_chip.can_sleep = 1;
+ chip->gpio_chip.ngpio = chip->dev_cfg->ngpios;
+ if (pdata->oscio_is_gpo)
+ ++chip->gpio_chip.ngpio;
+
+ chip->irq_chip.name = client->name;
+ chip->irq_chip.mask = sx150x_irq_mask;
+ chip->irq_chip.unmask = sx150x_irq_unmask;
+ chip->irq_chip.set_type = sx150x_irq_set_type;
+ chip->irq_chip.bus_lock = sx150x_irq_bus_lock;
+ chip->irq_chip.bus_sync_unlock = sx150x_irq_bus_sync_unlock;
+ chip->irq_summary = -1;
+ chip->irq_base = -1;
+ chip->irq_sense = 0;
+ chip->irq_set_type_pending = 0;
+}
+
+static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
+{
+ int err = 0;
+ unsigned n;
+
+ for (n = 0; err >= 0 && n < (chip->dev_cfg->ngpios / 8); ++n)
+ err = sx150x_i2c_write(chip->client, base - n, cfg >> (n * 8));
+ return err;
+}
+
+static int sx150x_reset(struct sx150x_chip *chip)
+{
+ int err;
+
+ err = i2c_smbus_write_byte_data(chip->client,
+ chip->dev_cfg->reg_reset,
+ 0x12);
+ if (err < 0)
+ return err;
+
+ err = i2c_smbus_write_byte_data(chip->client,
+ chip->dev_cfg->reg_reset,
+ 0x34);
+ return err;
+}
+
+static int sx150x_init_hw(struct sx150x_chip *chip,
+ struct sx150x_platform_data *pdata)
+{
+ int err = 0;
+
+ if (pdata->reset_during_probe) {
+ err = sx150x_reset(chip);
+ if (err < 0)
+ return err;
+ }
+
+ err = sx150x_i2c_write(chip->client,
+ chip->dev_cfg->reg_misc,
+ 0x01);
+ if (err < 0)
+ return err;
+
+ err = sx150x_init_io(chip, chip->dev_cfg->reg_pullup,
+ pdata->io_pullup_ena);
+ if (err < 0)
+ return err;
+
+ err = sx150x_init_io(chip, chip->dev_cfg->reg_pulldn,
+ pdata->io_pulldn_ena);
+ if (err < 0)
+ return err;
+
+ err = sx150x_init_io(chip, chip->dev_cfg->reg_drain,
+ pdata->io_open_drain_ena);
+ if (err < 0)
+ return err;
+
+ err = sx150x_init_io(chip, chip->dev_cfg->reg_polarity,
+ pdata->io_polarity);
+ if (err < 0)
+ return err;
+
+ if (pdata->oscio_is_gpo)
+ sx150x_set_oscio(chip, 0);
+
+ return err;
+}
+
+static int sx150x_install_irq_chip(struct sx150x_chip *chip,
+ int irq_summary,
+ int irq_base)
+{
+ int err;
+ unsigned n;
+ unsigned irq;
+
+ chip->irq_summary = irq_summary;
+ chip->irq_base = irq_base;
+
+ for (n = 0; n < chip->dev_cfg->ngpios; ++n) {
+ irq = irq_base + n;
+ set_irq_chip_and_handler(irq, &chip->irq_chip, handle_edge_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ err = request_threaded_irq(irq_summary,
+ NULL,
+ sx150x_irq_thread_fn,
+ IRQF_SHARED | IRQF_TRIGGER_FALLING,
+ chip->irq_chip.name,
+ chip);
+ if (err < 0) {
+ chip->irq_summary = -1;
+ chip->irq_base = -1;
+ }
+
+ return err;
+}
+
+static void sx150x_remove_irq_chip(struct sx150x_chip *chip)
+{
+ unsigned n;
+ unsigned irq;
+
+ free_irq(chip->irq_summary, chip);
+
+ for (n = 0; n < chip->dev_cfg->ngpios; ++n) {
+ irq = chip->irq_base + n;
+ set_irq_handler(irq, NULL);
+ set_irq_chip(irq, NULL);
+ }
+}
+
+static int __devinit sx150x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ static const u32 i2c_funcs = I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WRITE_WORD_DATA;
+ struct sx150x_platform_data *pdata;
+ struct sx150x_chip *chip;
+ int rc;
+
+ pdata = client->dev.platform_data;
+ if (!pdata)
+ return -EINVAL;
+
+ if (!i2c_check_functionality(client->adapter, i2c_funcs))
+ return -ENOSYS;
+
+ chip = kzalloc(sizeof(struct sx150x_chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ sx150x_init_chip(chip, client, id->driver_data, pdata);
+ rc = sx150x_init_hw(chip, pdata);
+ if (rc < 0)
+ goto probe_fail_pre_gpiochip_add;
+
+ rc = gpiochip_add(&chip->gpio_chip);
+ if (rc < 0)
+ goto probe_fail_pre_gpiochip_add;
+
+ if (pdata->irq_summary >= 0) {
+ rc = sx150x_install_irq_chip(chip,
+ pdata->irq_summary,
+ pdata->irq_base);
+ if (rc < 0)
+ goto probe_fail_post_gpiochip_add;
+ }
+
+ i2c_set_clientdata(client, chip);
+
+ return 0;
+probe_fail_post_gpiochip_add:
+ WARN_ON(gpiochip_remove(&chip->gpio_chip) < 0);
+probe_fail_pre_gpiochip_add:
+ kfree(chip);
+ return rc;
+}
+
+static int __devexit sx150x_remove(struct i2c_client *client)
+{
+ struct sx150x_chip *chip;
+ int rc;
+
+ chip = i2c_get_clientdata(client);
+ rc = gpiochip_remove(&chip->gpio_chip);
+ if (rc < 0)
+ return rc;
+
+ if (chip->irq_summary >= 0)
+ sx150x_remove_irq_chip(chip);
+
+ kfree(chip);
+
+ return 0;
+}
+
+static struct i2c_driver sx150x_driver = {
+ .driver = {
+ .name = "sx150x",
+ .owner = THIS_MODULE
+ },
+ .probe = sx150x_probe,
+ .remove = __devexit_p(sx150x_remove),
+ .id_table = sx150x_id,
+};
+
+static int __init sx150x_init(void)
+{
+ return i2c_add_driver(&sx150x_driver);
+}
+subsys_initcall(sx150x_init);
+
+static void __exit sx150x_exit(void)
+{
+ return i2c_del_driver(&sx150x_driver);
+}
+module_exit(sx150x_exit);
+
+MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
+MODULE_DESCRIPTION("Driver for Semtech SX150X I2C GPIO Expanders");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("i2c:sx150x");
diff --git a/drivers/gpio/wm831x-gpio.c b/drivers/gpio/wm831x-gpio.c
index 1fa449a1a4c..309644cf4d9 100644
--- a/drivers/gpio/wm831x-gpio.c
+++ b/drivers/gpio/wm831x-gpio.c
@@ -108,6 +108,37 @@ static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
return wm831x->irq_base + WM831X_IRQ_GPIO_1 + offset;
}
+static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+ unsigned debounce)
+{
+ struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
+ struct wm831x *wm831x = wm831x_gpio->wm831x;
+ int reg = WM831X_GPIO1_CONTROL + offset;
+ int ret, fn;
+
+ ret = wm831x_reg_read(wm831x, reg);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & WM831X_GPN_FN_MASK) {
+ case 0:
+ case 1:
+ break;
+ default:
+ /* Not in GPIO mode */
+ return -EBUSY;
+ }
+
+ if (debounce >= 32 && debounce <= 64)
+ fn = 0;
+ else if (debounce >= 4000 && debounce <= 8000)
+ fn = 1;
+ else
+ return -EINVAL;
+
+ return wm831x_set_bits(wm831x, reg, WM831X_GPN_FN_MASK, fn);
+}
+
#ifdef CONFIG_DEBUG_FS
static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
@@ -208,6 +239,7 @@ static struct gpio_chip template_chip = {
.direction_output = wm831x_gpio_direction_out,
.set = wm831x_gpio_set,
.to_irq = wm831x_gpio_to_irq,
+ .set_debounce = wm831x_gpio_set_debounce,
.dbg_show = wm831x_gpio_dbg_show,
.can_sleep = 1,
};
diff --git a/drivers/gpio/xilinx_gpio.c b/drivers/gpio/xilinx_gpio.c
index b8fa65b5bfc..709690995d0 100644
--- a/drivers/gpio/xilinx_gpio.c
+++ b/drivers/gpio/xilinx_gpio.c
@@ -161,14 +161,12 @@ static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc)
static int __devinit xgpio_of_probe(struct device_node *np)
{
struct xgpio_instance *chip;
- struct of_gpio_chip *ofchip;
int status = 0;
const u32 *tree_info;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- ofchip = &chip->mmchip.of_gc;
/* Update GPIO state shadow register with default value */
tree_info = of_get_property(np, "xlnx,dout-default", NULL);
@@ -182,21 +180,20 @@ static int __devinit xgpio_of_probe(struct device_node *np)
chip->gpio_dir = *tree_info;
/* Check device node and parent device node for device width */
- ofchip->gc.ngpio = 32; /* By default assume full GPIO controller */
+ chip->mmchip.gc.ngpio = 32; /* By default assume full GPIO controller */
tree_info = of_get_property(np, "xlnx,gpio-width", NULL);
if (!tree_info)
tree_info = of_get_property(np->parent,
"xlnx,gpio-width", NULL);
if (tree_info)
- ofchip->gc.ngpio = *tree_info;
+ chip->mmchip.gc.ngpio = *tree_info;
spin_lock_init(&chip->gpio_lock);
- ofchip->gpio_cells = 2;
- ofchip->gc.direction_input = xgpio_dir_in;
- ofchip->gc.direction_output = xgpio_dir_out;
- ofchip->gc.get = xgpio_get;
- ofchip->gc.set = xgpio_set;
+ chip->mmchip.gc.direction_input = xgpio_dir_in;
+ chip->mmchip.gc.direction_output = xgpio_dir_out;
+ chip->mmchip.gc.get = xgpio_get;
+ chip->mmchip.gc.set = xgpio_set;
chip->mmchip.save_regs = xgpio_save_regs;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 88910e5a2c7..4cab0c6397e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,7 +6,7 @@
#
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
- depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
+ depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
select I2C
select I2C_ALGOBIT
select SLOW_WORK
@@ -17,7 +17,7 @@ menuconfig DRM
These modules provide support for synchronization, security, and
DMA transfers. Please see <http://dri.sourceforge.net/> for more
details. You should also select and configure AGP
- (/dev/agpgart) support.
+ (/dev/agpgart) support if it is available for your platform.
config DRM_KMS_HELPER
tristate
@@ -61,6 +61,7 @@ config DRM_RADEON
select DRM_KMS_HELPER
select DRM_TTM
select POWER_SUPPLY
+ select HWMON
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@@ -130,7 +131,7 @@ endchoice
config DRM_MGA
tristate "Matrox g200/g400"
- depends on DRM
+ depends on DRM && PCI
select FW_LOADER
help
Choose this option if you have a Matrox G200, G400 or G450 graphics
@@ -148,14 +149,14 @@ config DRM_SIS
config DRM_VIA
tristate "Via unichrome video cards"
- depends on DRM
+ depends on DRM && PCI
help
Choose this option if you have a Via unichrome or compatible video
chipset. If M is selected the module will be called via.
config DRM_SAVAGE
tristate "Savage video cards"
- depends on DRM
+ depends on DRM && PCI
help
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index abe3f446ca4..f3a23a329f4 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -9,9 +9,10 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
- drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+ drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
- drm_info.o drm_debugfs.o drm_encoder_slave.o
+ drm_info.o drm_debugfs.o drm_encoder_slave.o \
+ drm_trace_points.o drm_global.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
@@ -19,6 +20,8 @@ drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
+CFLAGS_drm_trace_points.o := -I$(src)
+
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index 17be051b7aa..1c364924220 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -152,7 +152,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
/* we need to support large memory configurations */
entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (entry->busaddr[i] == 0) {
+ if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_pcigart_cleanup(dev, gart_info);
address = NULL;
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 55d03ed0500..529a0dbe9fc 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc);
* user_data: A pointer the data that is copied to the buffer.
* size: The Number of bytes to copy.
*/
-extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
- void __user *user_data, int size)
+int drm_buffer_copy_from_user(struct drm_buffer *buf,
+ void __user *user_data, int size)
{
int nr_pages = size / PAGE_SIZE + 1;
int idx;
@@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
{
int idx = drm_buffer_index(buf);
int page = drm_buffer_page(buf);
- void *obj = 0;
+ void *obj = NULL;
if (idx + objsize <= PAGE_SIZE) {
obj = &buf->data[page][idx];
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 2092e7bb788..3e257a50bf5 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -39,19 +39,6 @@
#include <asm/shmparam.h>
#include "drmP.h"
-resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
-{
- return pci_resource_start(dev->pdev, resource);
-}
-EXPORT_SYMBOL(drm_get_resource_start);
-
-resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
-{
- return pci_resource_len(dev->pdev, resource);
-}
-
-EXPORT_SYMBOL(drm_get_resource_len);
-
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
struct drm_local_map *map)
{
@@ -189,7 +176,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
-#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
+#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
if (map->offset + (map->size-1) < map->offset ||
map->offset < virt_to_phys(high_memory)) {
kfree(map);
@@ -341,14 +328,13 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
return -EINVAL;
}
- list = kmalloc(sizeof(*list), GFP_KERNEL);
+ list = kzalloc(sizeof(*list), GFP_KERNEL);
if (!list) {
if (map->type == _DRM_REGISTERS)
iounmap(map->handle);
kfree(map);
return -EINVAL;
}
- memset(list, 0, sizeof(*list));
list->map = map;
mutex_lock(&dev->struct_mutex);
@@ -691,13 +677,12 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
return -EINVAL;
}
- entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+ entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
@@ -721,7 +706,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+ buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -730,7 +715,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(buf->dev_private, 0, buf->dev_priv_size);
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
@@ -845,22 +829,20 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
return -EINVAL;
}
- entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+ entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
- entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
+ entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
if (!entry->seglist) {
kfree(entry->buflist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->seglist, 0, count * sizeof(*entry->seglist));
/* Keep the original pagelist until we know all the allocations
* have succeeded
@@ -924,8 +906,8 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size,
- GFP_KERNEL);
+ buf->dev_private = kzalloc(buf->dev_priv_size,
+ GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -936,7 +918,6 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(buf->dev_private, 0, buf->dev_priv_size);
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
@@ -1061,14 +1042,13 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
return -EINVAL;
}
- entry->buflist = kmalloc(count * sizeof(*entry->buflist),
+ entry->buflist = kzalloc(count * sizeof(*entry->buflist),
GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
@@ -1093,7 +1073,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+ buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -1103,8 +1083,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
return -ENOMEM;
}
- memset(buf->dev_private, 0, buf->dev_priv_size);
-
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
offset += alignment;
@@ -1222,14 +1200,13 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
return -EINVAL;
}
- entry->buflist = kmalloc(count * sizeof(*entry->buflist),
+ entry->buflist = kzalloc(count * sizeof(*entry->buflist),
GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
@@ -1253,7 +1230,7 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+ buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -1262,7 +1239,6 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(buf->dev_private, 0, buf->dev_priv_size);
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 57cea01c4ff..37e0b4fa482 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -80,6 +80,7 @@ static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
{
{ DRM_MODE_DITHERING_OFF, "Off" },
{ DRM_MODE_DITHERING_ON, "On" },
+ { DRM_MODE_DITHERING_AUTO, "Automatic" },
};
/*
@@ -1126,7 +1127,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
head) {
- DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (put_user(crtc->base.id, crtc_id + copied)) {
ret = -EFAULT;
goto out;
@@ -1154,8 +1155,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
list_for_each_entry(encoder,
&dev->mode_config.encoder_list,
head) {
- DRM_DEBUG_KMS("ENCODER ID is %d\n",
- encoder->base.id);
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+ drm_get_encoder_name(encoder));
if (put_user(encoder->base.id, encoder_id +
copied)) {
ret = -EFAULT;
@@ -1185,8 +1186,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
list_for_each_entry(connector,
&dev->mode_config.connector_list,
head) {
- DRM_DEBUG_KMS("CONNECTOR ID is %d\n",
- connector->base.id);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector));
if (put_user(connector->base.id,
connector_id + copied)) {
ret = -EFAULT;
@@ -1209,7 +1211,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
}
card_res->count_connectors = connector_count;
- DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs,
+ DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
card_res->count_connectors, card_res->count_encoders);
out:
@@ -1312,7 +1314,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
- DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
mutex_lock(&dev->mode_config.mutex);
@@ -1493,6 +1495,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out;
}
crtc = obj_to_crtc(obj);
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (crtc_req->mode_valid) {
/* If we have a mode we need a framebuffer. */
@@ -1569,6 +1572,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out;
}
connector = obj_to_connector(obj);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector));
connector_set[i] = connector;
}
@@ -1676,14 +1682,15 @@ int drm_mode_addfb(struct drm_device *dev,
/* TODO setup destructor callback */
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
- if (!fb) {
+ if (IS_ERR(fb)) {
DRM_ERROR("could not create framebuffer\n");
- ret = -EINVAL;
+ ret = PTR_ERR(fb);
goto out;
}
r->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
out:
mutex_unlock(&dev->mode_config.mutex);
@@ -2534,7 +2541,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
goto out;
}
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
out:
mutex_unlock(&dev->mode_config.mutex);
@@ -2610,6 +2617,15 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
goto out;
crtc = obj_to_crtc(obj);
+ if (crtc->fb == NULL) {
+ /* The framebuffer is currently unbound, presumably
+ * due to a hotplug event, that userspace has not
+ * yet discovered.
+ */
+ ret = -EBUSY;
+ goto out;
+ }
+
if (crtc->funcs->page_flip == NULL)
goto out;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 9b2a54117c9..dcbeb98f195 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,6 +34,9 @@
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
+static bool drm_kms_helper_poll = true;
+module_param_named(poll, drm_kms_helper_poll, bool, 0600);
+
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
{
@@ -86,7 +89,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
int count = 0;
int mode_flags = 0;
- DRM_DEBUG_KMS("%s\n", drm_get_connector_name(connector));
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
+ drm_get_connector_name(connector));
/* set all modes to the unverified state */
list_for_each_entry_safe(mode, t, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
@@ -98,22 +102,23 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
connector->status = connector_status_disconnected;
if (connector->funcs->force)
connector->funcs->force(connector);
- } else
- connector->status = connector->funcs->detect(connector);
+ } else {
+ connector->status = connector->funcs->detect(connector, true);
+ drm_kms_helper_poll_enable(dev);
+ }
if (connector->status == connector_status_disconnected) {
- DRM_DEBUG_KMS("%s is disconnected\n",
- drm_get_connector_name(connector));
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
+ connector->base.id, drm_get_connector_name(connector));
drm_mode_connector_update_edid_property(connector, NULL);
goto prune;
}
count = (*connector_funcs->get_modes)(connector);
- if (!count) {
+ if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
- if (!count)
- return 0;
- }
+ if (count == 0)
+ goto prune;
drm_mode_connector_list_update(connector);
@@ -141,8 +146,8 @@ prune:
drm_mode_sort(&connector->modes);
- DRM_DEBUG_KMS("Probed modes for %s\n",
- drm_get_connector_name(connector));
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
+ drm_get_connector_name(connector));
list_for_each_entry_safe(mode, t, &connector->modes, head) {
mode->vrefresh = drm_mode_vrefresh(mode);
@@ -201,6 +206,17 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_helper_crtc_in_use);
+static void
+drm_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
+ if (encoder_funcs->disable)
+ (*encoder_funcs->disable)(encoder);
+ else
+ (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+}
+
/**
* drm_helper_disable_unused_functions - disable unused objects
* @dev: DRM device
@@ -215,7 +231,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_connector *connector;
- struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc *crtc;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -226,12 +241,8 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder_funcs = encoder->helper_private;
if (!drm_helper_encoder_in_use(encoder)) {
- if (encoder_funcs->disable)
- (*encoder_funcs->disable)(encoder);
- else
- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+ drm_encoder_disable(encoder);
/* disconnector encoder from any connector */
encoder->crtc = NULL;
}
@@ -241,7 +252,10 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled) {
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc_funcs->disable)
+ (*crtc_funcs->disable)(crtc);
+ else
+ (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
crtc->fb = NULL;
}
}
@@ -292,11 +306,11 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
encoder_funcs = encoder->helper_private;
/* Disable unused encoders */
if (encoder->crtc == NULL)
- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+ drm_encoder_disable(encoder);
/* Disable encoders whose CRTC is about to change */
if (encoder_funcs->get_crtc &&
encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+ drm_encoder_disable(encoder);
}
}
@@ -365,6 +379,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
goto done;
}
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
/* Prepare the encoders and CRTCs before setting the mode. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -392,8 +407,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
- DRM_DEBUG("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
- mode->name, mode->base.id);
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.id, drm_get_encoder_name(encoder),
+ mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
}
@@ -469,10 +485,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
crtc_funcs = set->crtc->helper_private;
- DRM_DEBUG_KMS("crtc: %p %d fb: %p connectors: %p num_connectors:"
- " %d (x, y) (%i, %i)\n",
- set->crtc, set->crtc->base.id, set->fb, set->connectors,
- (int)set->num_connectors, set->x, set->y);
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, (int)set->num_connectors,
+ set->x, set->y);
+ }
dev = set->crtc->dev;
@@ -601,8 +622,14 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
connector->encoder->crtc = new_crtc;
}
- DRM_DEBUG_KMS("setting connector %d crtc to %p\n",
- connector->base.id, new_crtc);
+ if (new_crtc) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.id, drm_get_connector_name(connector),
+ new_crtc->base.id);
+ } else {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.id, drm_get_connector_name(connector));
+ }
}
/* mode_set_base is not a required function */
@@ -610,18 +637,18 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
if (mode_changed) {
- old_fb = set->crtc->fb;
- set->crtc->fb = set->fb;
set->crtc->enabled = (set->mode != NULL);
if (set->mode != NULL) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
+ old_fb = set->crtc->fb;
+ set->crtc->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
old_fb)) {
- DRM_ERROR("failed to set mode on crtc %p\n",
- set->crtc);
+ DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+ set->crtc->base.id);
ret = -EINVAL;
goto fail;
}
@@ -794,12 +821,12 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
if (encoder_funcs->dpms)
(*encoder_funcs->dpms) (encoder,
drm_helper_choose_encoder_dpms(encoder));
-
- crtc_funcs = crtc->helper_private;
- if (crtc_funcs->dpms)
- (*crtc_funcs->dpms) (crtc,
- drm_helper_choose_crtc_dpms(crtc));
}
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
}
}
/* disable the unused connectors while restoring the modesetting */
@@ -808,17 +835,17 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
-static struct slow_work_ops output_poll_ops;
-
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
-static void output_poll_execute(struct slow_work *work)
+static void output_poll_execute(struct work_struct *work)
{
- struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
- struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work);
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
enum drm_connector_status old_status, status;
bool repoll = false, changed = false;
- int ret;
+
+ if (!drm_kms_helper_poll)
+ return;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -839,7 +866,7 @@ static void output_poll_execute(struct slow_work *work)
!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
- status = connector->funcs->detect(connector);
+ status = connector->funcs->detect(connector, false);
if (old_status != status)
changed = true;
}
@@ -853,18 +880,15 @@ static void output_poll_execute(struct slow_work *work)
dev->mode_config.funcs->output_poll_changed(dev);
}
- if (repoll) {
- ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD);
- if (ret)
- DRM_ERROR("delayed enqueue failed %d\n", ret);
- }
+ if (repoll)
+ queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
}
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+ cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
@@ -872,26 +896,23 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
- int ret;
+
+ if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+ return;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled)
poll = true;
}
- if (poll) {
- ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
- if (ret)
- DRM_ERROR("delayed enqueue failed %d\n", ret);
- }
+ if (poll)
+ queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
void drm_kms_helper_poll_init(struct drm_device *dev)
{
- slow_work_register_user(THIS_MODULE);
- delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
- &output_poll_ops);
+ INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
dev->mode_config.poll_enabled = true;
drm_kms_helper_poll_enable(dev);
@@ -901,7 +922,6 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init);
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
drm_kms_helper_poll_disable(dev);
- slow_work_unregister_user(THIS_MODULE);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
@@ -909,12 +929,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
- /* schedule a slow work asap */
- delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0);
+
+ /* kill timer and schedule immediate execution, this doesn't block */
+ cancel_delayed_work(&dev->mode_config.output_poll_work);
+ if (drm_kms_helper_poll)
+ queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
-
-static struct slow_work_ops output_poll_ops = {
- .execute = output_poll_execute,
-};
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 4a66201edae..84da748555b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -55,6 +55,9 @@
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
+
/** Ioctl table */
static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
@@ -243,47 +246,20 @@ int drm_lastclose(struct drm_device * dev)
*
* Initializes an array of drm_device structures, and attempts to
* initialize all available devices, using consecutive minors, registering the
- * stubs and initializing the AGP device.
+ * stubs and initializing the device.
*
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
*/
int drm_init(struct drm_driver *driver)
{
- struct pci_dev *pdev = NULL;
- const struct pci_device_id *pid;
- int i;
-
DRM_DEBUG("\n");
-
INIT_LIST_HEAD(&driver->device_list);
- if (driver->driver_features & DRIVER_MODESET)
- return pci_register_driver(&driver->pci_driver);
-
- /* If not using KMS, fall back to stealth mode manual scanning. */
- for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
- pid = &driver->pci_driver.id_table[i];
-
- /* Loop around setting up a DRM device for each PCI device
- * matching our ID and device class. If we had the internal
- * function that pci_get_subsys and pci_get_class used, we'd
- * be able to just pass pid in instead of doing a two-stage
- * thing.
- */
- pdev = NULL;
- while ((pdev =
- pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
- pid->subdevice, pdev)) != NULL) {
- if ((pdev->class & pid->class_mask) != pid->class)
- continue;
-
- /* stealth mode requires a manual probe */
- pci_dev_get(pdev);
- drm_get_dev(pdev, pid, driver);
- }
- }
- return 0;
+ if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE)
+ return drm_platform_init(driver);
+ else
+ return drm_pci_init(driver);
}
EXPORT_SYMBOL(drm_init);
@@ -315,6 +291,7 @@ static int __init drm_core_init(void)
{
int ret = -ENOMEM;
+ drm_global_init();
idr_init(&drm_minors_idr);
if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
@@ -362,6 +339,7 @@ static void __exit drm_core_exit(void)
unregister_chrdev(DRM_MAJOR, "drm");
+ idr_remove_all(&drm_minors_idr);
idr_destroy(&drm_minors_idr);
}
@@ -446,6 +424,7 @@ long drm_ioctl(struct file *filp,
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
+ unsigned int usize, asize;
dev = file_priv->minor->dev;
atomic_inc(&dev->ioctl_count);
@@ -461,11 +440,18 @@ long drm_ioctl(struct file *filp,
((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
goto err_i1;
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+ (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+ u32 drv_size;
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ drv_size = _IOC_SIZE(ioctl->cmd_drv);
+ usize = asize = _IOC_SIZE(cmd);
+ if (drv_size > asize)
+ asize = drv_size;
+ }
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
ioctl = &drm_ioctls[nr];
cmd = ioctl->cmd;
+ usize = asize = _IOC_SIZE(cmd);
} else
goto err_i1;
@@ -485,10 +471,10 @@ long drm_ioctl(struct file *filp,
retcode = -EACCES;
} else {
if (cmd & (IOC_IN | IOC_OUT)) {
- if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) {
+ if (asize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
- kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+ kdata = kmalloc(asize, GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto err_i1;
@@ -498,22 +484,24 @@ long drm_ioctl(struct file *filp,
if (cmd & IOC_IN) {
if (copy_from_user(kdata, (void __user *)arg,
- _IOC_SIZE(cmd)) != 0) {
+ usize) != 0) {
retcode = -EFAULT;
goto err_i1;
}
- }
+ } else
+ memset(kdata, 0, usize);
+
if (ioctl->flags & DRM_UNLOCKED)
retcode = func(dev, kdata, file_priv);
else {
- lock_kernel();
+ mutex_lock(&drm_global_mutex);
retcode = func(dev, kdata, file_priv);
- unlock_kernel();
+ mutex_unlock(&drm_global_mutex);
}
if (cmd & IOC_OUT) {
if (copy_to_user((void __user *)arg, kdata,
- _IOC_SIZE(cmd)) != 0)
+ usize) != 0)
retcode = -EFAULT;
}
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 9585e531ac6..96e96310822 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -33,6 +33,11 @@
#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm_edid.h"
+#include "drm_edid_modes.h"
+
+#define version_greater(edid, maj, min) \
+ (((edid)->version > (maj)) || \
+ ((edid)->version == (maj) && (edid)->revision > (min)))
#define EDID_EST_TIMINGS 16
#define EDID_STD_TIMINGS 8
@@ -62,6 +67,13 @@
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
+struct detailed_mode_closure {
+ struct drm_connector *connector;
+ struct edid *edid;
+ bool preferred;
+ u32 quirks;
+ int modes;
+};
#define LEVEL_DMT 0
#define LEVEL_GTF 1
@@ -282,7 +294,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
return block;
carp:
- dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n",
+ dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
drm_get_connector_name(connector), j);
out:
@@ -375,7 +387,6 @@ static u32 edid_get_quirks(struct edid *edid)
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
-
/**
* edid_fixup_preferred - set preferred modes based on quirk list
* @connector: has mode list to fix up
@@ -422,245 +433,6 @@ static void edid_fixup_preferred(struct drm_connector *connector,
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
-/*
- * Add the Autogenerated from the DMT spec.
- * This table is copied from xfree86/modes/xf86EdidModes.c.
- * But the mode with Reduced blank feature is deleted.
- */
-static struct drm_display_mode drm_dmt_modes[] = {
- /* 640x350@85Hz */
- { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 350, 382, 385, 445, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x400@85Hz */
- { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 400, 401, 404, 445, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x400@85Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
- 828, 936, 0, 400, 401, 404, 446, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 492, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@85Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
- 752, 832, 0, 480, 481, 484, 509, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 800x600@56Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@72Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@85Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
- 896, 1048, 0, 600, 601, 604, 631, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 848x480@60Hz */
- { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
- 976, 1088, 0, 480, 486, 494, 517, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@43Hz, interlace */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 772, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@85Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1168, 1376, 0, 768, 769, 772, 808, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1152x864@75Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@60Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
- 1472, 1664, 0, 768, 771, 778, 798, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@75Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
- 1488, 1696, 0, 768, 771, 778, 805, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x768@85Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
- 1496, 1712, 0, 768, 771, 778, 809, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@60Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
- 1480, 1680, 0, 800, 803, 809, 831, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x800@75Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
- 1488, 1696, 0, 800, 803, 809, 838, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@85Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
- 1496, 1712, 0, 800, 803, 809, 843, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@60Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
- 1488, 1800, 0, 960, 961, 964, 1000, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@85Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
- 1504, 1728, 0, 960, 961, 964, 1011, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@60Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@75Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@85Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
- 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1360x768@60Hz */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
- 1536, 1792, 0, 768, 771, 777, 795, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@60Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
- 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@75Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
- 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@85Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
- 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@60Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
- 1672, 1904, 0, 900, 903, 909, 934, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@75Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
- 1688, 1936, 0, 900, 903, 909, 942, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@85Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
- 1696, 1952, 0, 900, 903, 909, 948, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@60Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@65Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@70Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@75Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@85Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@60Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
- 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@75Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
- 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@85Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
- 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@60Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
- 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1729x1344@75Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
- 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1853x1392@60Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
- 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1856x1392@75Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
- 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@60Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
- 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@75Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
- 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@85Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
- 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@60Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
- 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@75Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
- 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@60Hz */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
- 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@75HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@85HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-};
-static const int drm_num_dmt_modes =
- sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
{
@@ -685,6 +457,46 @@ EXPORT_SYMBOL(drm_mode_find_dmt);
typedef void detailed_cb(struct detailed_timing *timing, void *closure);
static void
+cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ int i, n = 0;
+ u8 rev = ext[0x01], d = ext[0x02];
+ u8 *det_base = ext + d;
+
+ switch (rev) {
+ case 0:
+ /* can't happen */
+ return;
+ case 1:
+ /* have to infer how many blocks we have, check pixel clock */
+ for (i = 0; i < 6; i++)
+ if (det_base[18*i] || det_base[18*i+1])
+ n++;
+ break;
+ default:
+ /* explicit count */
+ n = min(ext[0x03] & 0x0f, 6);
+ break;
+ }
+
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ unsigned int i, n = min((int)ext[0x02], 6);
+ u8 *det_base = ext + 5;
+
+ if (ext[0x01] != 1)
+ return; /* unknown version */
+
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
{
int i;
@@ -696,7 +508,19 @@ drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
cb(&(edid->detailed_timings[i]), closure);
- /* XXX extension block walk */
+ for (i = 1; i <= raw_edid[0x7e]; i++) {
+ u8 *ext = raw_edid + (i * EDID_LENGTH);
+ switch (*ext) {
+ case CEA_EXT:
+ cea_for_each_detailed_block(ext, cb, closure);
+ break;
+ case VTB_EXT:
+ vtb_for_each_detailed_block(ext, cb, closure);
+ break;
+ default:
+ break;
+ }
+ }
}
static void
@@ -1047,117 +871,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
return mode;
}
-/*
- * Detailed mode info for the EDID "established modes" data to use.
- */
-static struct drm_display_mode edid_est_modes[] = {
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
- 768, 864, 0, 480, 483, 486, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
- 846, 900, 0, 400, 421, 423, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
- 846, 900, 0, 400, 412, 414, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 776, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
- { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
- 928, 1152, 0, 624, 625, 628, 667, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
-};
-
-/**
- * add_established_modes - get est. modes from EDID and add them
- * @edid: EDID block to scan
- *
- * Each EDID block contains a bitmap of the supported "established modes" list
- * (defined above). Tease them out and add them to the global modes list.
- */
-static int add_established_modes(struct drm_connector *connector, struct edid *edid)
-{
- struct drm_device *dev = connector->dev;
- unsigned long est_bits = edid->established_timings.t1 |
- (edid->established_timings.t2 << 8) |
- ((edid->established_timings.mfg_rsvd & 0x80) << 9);
- int i, modes = 0;
-
- for (i = 0; i <= EDID_EST_TIMINGS; i++)
- if (est_bits & (1<<i)) {
- struct drm_display_mode *newmode;
- newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
-
- return modes;
-}
-
-/**
- * add_standard_modes - get std. modes from EDID and add them
- * @edid: EDID block to scan
- *
- * Standard modes can be calculated using the CVT standard. Grab them from
- * @edid, calculate them, and add them to the list.
- */
-static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
-{
- int i, modes = 0;
-
- for (i = 0; i < EDID_STD_TIMINGS; i++) {
- struct drm_display_mode *newmode;
-
- newmode = drm_mode_std(connector, edid,
- &edid->standard_timings[i],
- edid->revision);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
-
- return modes;
-}
-
static bool
mode_is_rb(struct drm_display_mode *mode)
{
@@ -1267,113 +980,33 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
}
-static int drm_cvt_modes(struct drm_connector *connector,
- struct detailed_timing *timing)
+static void
+do_inferred_modes(struct detailed_timing *timing, void *c)
{
- int i, j, modes = 0;
- struct drm_display_mode *newmode;
- struct drm_device *dev = connector->dev;
- struct cvt_timing *cvt;
- const int rates[] = { 60, 85, 75, 60, 50 };
- const u8 empty[3] = { 0, 0, 0 };
-
- for (i = 0; i < 4; i++) {
- int uninitialized_var(width), height;
- cvt = &(timing->data.other_data.data.cvt[i]);
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
- if (!memcmp(cvt->code, empty, 3))
- continue;
+ if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
+ closure->modes += drm_gtf_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+}
- height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
- switch (cvt->code[1] & 0x0c) {
- case 0x00:
- width = height * 4 / 3;
- break;
- case 0x04:
- width = height * 16 / 9;
- break;
- case 0x08:
- width = height * 16 / 10;
- break;
- case 0x0c:
- width = height * 15 / 9;
- break;
- }
+static int
+add_inferred_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
- for (j = 1; j < 5; j++) {
- if (cvt->code[2] & (1 << j)) {
- newmode = drm_cvt_mode(dev, width, height,
- rates[j], j == 0,
- false, false);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- }
- }
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_inferred_modes,
+ &closure);
- return modes;
+ return closure.modes;
}
-static const struct {
- short w;
- short h;
- short r;
- short rb;
-} est3_modes[] = {
- /* byte 6 */
- { 640, 350, 85, 0 },
- { 640, 400, 85, 0 },
- { 720, 400, 85, 0 },
- { 640, 480, 85, 0 },
- { 848, 480, 60, 0 },
- { 800, 600, 85, 0 },
- { 1024, 768, 85, 0 },
- { 1152, 864, 75, 0 },
- /* byte 7 */
- { 1280, 768, 60, 1 },
- { 1280, 768, 60, 0 },
- { 1280, 768, 75, 0 },
- { 1280, 768, 85, 0 },
- { 1280, 960, 60, 0 },
- { 1280, 960, 85, 0 },
- { 1280, 1024, 60, 0 },
- { 1280, 1024, 85, 0 },
- /* byte 8 */
- { 1360, 768, 60, 0 },
- { 1440, 900, 60, 1 },
- { 1440, 900, 60, 0 },
- { 1440, 900, 75, 0 },
- { 1440, 900, 85, 0 },
- { 1400, 1050, 60, 1 },
- { 1400, 1050, 60, 0 },
- { 1400, 1050, 75, 0 },
- /* byte 9 */
- { 1400, 1050, 85, 0 },
- { 1680, 1050, 60, 1 },
- { 1680, 1050, 60, 0 },
- { 1680, 1050, 75, 0 },
- { 1680, 1050, 85, 0 },
- { 1600, 1200, 60, 0 },
- { 1600, 1200, 65, 0 },
- { 1600, 1200, 70, 0 },
- /* byte 10 */
- { 1600, 1200, 75, 0 },
- { 1600, 1200, 85, 0 },
- { 1792, 1344, 60, 0 },
- { 1792, 1344, 85, 0 },
- { 1856, 1392, 60, 0 },
- { 1856, 1392, 75, 0 },
- { 1920, 1200, 60, 1 },
- { 1920, 1200, 60, 0 },
- /* byte 11 */
- { 1920, 1200, 75, 0 },
- { 1920, 1200, 85, 0 },
- { 1920, 1440, 60, 0 },
- { 1920, 1440, 75, 0 },
-};
-
static int
drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
{
@@ -1403,37 +1036,63 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
return modes;
}
-static int add_detailed_modes(struct drm_connector *connector,
- struct detailed_timing *timing,
- struct edid *edid, u32 quirks, int preferred)
+static void
+do_established_modes(struct detailed_timing *timing, void *c)
{
- int i, modes = 0;
+ struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
- int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
- struct drm_display_mode *newmode;
- struct drm_device *dev = connector->dev;
- if (timing->pixel_clock) {
- newmode = drm_mode_detailed(dev, edid, timing, quirks);
- if (!newmode)
- return 0;
+ if (data->type == EDID_DETAIL_EST_TIMINGS)
+ closure->modes += drm_est3_modes(closure->connector, timing);
+}
- if (preferred)
- newmode->type |= DRM_MODE_TYPE_PREFERRED;
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above). Tease them out and add them to the global modes list.
+ */
+static int
+add_established_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ unsigned long est_bits = edid->established_timings.t1 |
+ (edid->established_timings.t2 << 8) |
+ ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
- drm_mode_probed_add(connector, newmode);
- return 1;
+ for (i = 0; i <= EDID_EST_TIMINGS; i++) {
+ if (est_bits & (1<<i)) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
}
- /* other timing types */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_RANGE:
- if (gtf)
- modes += drm_gtf_modes_for_range(connector, edid,
- timing);
- break;
- case EDID_DETAIL_STD_MODES:
- /* Six modes per detailed section */
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid,
+ do_established_modes, &closure);
+
+ return modes + closure.modes;
+}
+
+static void
+do_standard_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ struct drm_connector *connector = closure->connector;
+ struct edid *edid = closure->edid;
+
+ if (data->type == EDID_DETAIL_STD_MODES) {
+ int i;
for (i = 0; i < 6; i++) {
struct std_timing *std;
struct drm_display_mode *newmode;
@@ -1443,108 +1102,169 @@ static int add_detailed_modes(struct drm_connector *connector,
edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
- modes++;
+ closure->modes++;
}
}
- break;
- case EDID_DETAIL_CVT_3BYTE:
- modes += drm_cvt_modes(connector, timing);
- break;
- case EDID_DETAIL_EST_TIMINGS:
- modes += drm_est3_modes(connector, timing);
- break;
- default:
- break;
}
-
- return modes;
}
/**
- * add_detailed_info - get detailed mode info from EDID data
- * @connector: attached connector
+ * add_standard_modes - get std. modes from EDID and add them
* @edid: EDID block to scan
- * @quirks: quirks to apply
*
- * Some of the detailed timing sections may contain mode information. Grab
- * it and add it to the list.
+ * Standard modes can be calculated using the appropriate standard (DMT,
+ * GTF or CVT. Grab them from @edid and add them to the list.
*/
-static int add_detailed_info(struct drm_connector *connector,
- struct edid *edid, u32 quirks)
+static int
+add_standard_modes(struct drm_connector *connector, struct edid *edid)
{
int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ for (i = 0; i < EDID_STD_TIMINGS; i++) {
+ struct drm_display_mode *newmode;
+
+ newmode = drm_mode_std(connector, edid,
+ &edid->standard_timings[i],
+ edid->revision);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_standard_modes,
+ &closure);
+
+ /* XXX should also look for standard codes in VTB blocks */
+
+ return modes + closure.modes;
+}
- for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
- struct detailed_timing *timing = &edid->detailed_timings[i];
- int preferred = (i == 0);
+static int drm_cvt_modes(struct drm_connector *connector,
+ struct detailed_timing *timing)
+{
+ int i, j, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ struct cvt_timing *cvt;
+ const int rates[] = { 60, 85, 75, 60, 50 };
+ const u8 empty[3] = { 0, 0, 0 };
- if (preferred && edid->version == 1 && edid->revision < 4)
- preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+ for (i = 0; i < 4; i++) {
+ int uninitialized_var(width), height;
+ cvt = &(timing->data.other_data.data.cvt[i]);
- /* In 1.0, only timings are allowed */
- if (!timing->pixel_clock && edid->version == 1 &&
- edid->revision == 0)
+ if (!memcmp(cvt->code, empty, 3))
continue;
- modes += add_detailed_modes(connector, timing, edid, quirks,
- preferred);
+ height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
+ switch (cvt->code[1] & 0x0c) {
+ case 0x00:
+ width = height * 4 / 3;
+ break;
+ case 0x04:
+ width = height * 16 / 9;
+ break;
+ case 0x08:
+ width = height * 16 / 10;
+ break;
+ case 0x0c:
+ width = height * 15 / 9;
+ break;
+ }
+
+ for (j = 1; j < 5; j++) {
+ if (cvt->code[2] & (1 << j)) {
+ newmode = drm_cvt_mode(dev, width, height,
+ rates[j], j == 0,
+ false, false);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
}
return modes;
}
-/**
- * add_detailed_mode_eedid - get detailed mode info from addtional timing
- * EDID block
- * @connector: attached connector
- * @edid: EDID block to scan(It is only to get addtional timing EDID block)
- * @quirks: quirks to apply
- *
- * Some of the detailed timing sections may contain mode information. Grab
- * it and add it to the list.
- */
-static int add_detailed_info_eedid(struct drm_connector *connector,
- struct edid *edid, u32 quirks)
+static void
+do_cvt_mode(struct detailed_timing *timing, void *c)
{
- int i, modes = 0;
- char *edid_ext = NULL;
- struct detailed_timing *timing;
- int start_offset, end_offset;
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
- if (edid->version == 1 && edid->revision < 3)
- return 0;
- if (!edid->extensions)
- return 0;
+ if (data->type == EDID_DETAIL_CVT_3BYTE)
+ closure->modes += drm_cvt_modes(closure->connector, timing);
+}
- /* Find CEA extension */
- for (i = 0; i < edid->extensions; i++) {
- edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- if (edid_ext[0] == 0x02)
- break;
- }
+static int
+add_cvt_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
- if (i == edid->extensions)
- return 0;
+ if (version_greater(edid, 1, 2))
+ drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure);
- /* Get the start offset of detailed timing block */
- start_offset = edid_ext[2];
- if (start_offset == 0) {
- /* If the start_offset is zero, it means that neither detailed
- * info nor data block exist. In such case it is also
- * unnecessary to parse the detailed timing info.
- */
- return 0;
- }
+ /* XXX should also look for CVT codes in VTB blocks */
- end_offset = EDID_LENGTH;
- end_offset -= sizeof(struct detailed_timing);
- for (i = start_offset; i < end_offset;
- i += sizeof(struct detailed_timing)) {
- timing = (struct detailed_timing *)(edid_ext + i);
- modes += add_detailed_modes(connector, timing, edid, quirks, 0);
+ return closure.modes;
+}
+
+static void
+do_detailed_mode(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct drm_display_mode *newmode;
+
+ if (timing->pixel_clock) {
+ newmode = drm_mode_detailed(closure->connector->dev,
+ closure->edid, timing,
+ closure->quirks);
+ if (!newmode)
+ return;
+
+ if (closure->preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(closure->connector, newmode);
+ closure->modes++;
+ closure->preferred = 0;
}
+}
- return modes;
+/*
+ * add_detailed_modes - Add modes from detailed timings
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+ */
+static int
+add_detailed_modes(struct drm_connector *connector, struct edid *edid,
+ u32 quirks)
+{
+ struct detailed_mode_closure closure = {
+ connector,
+ edid,
+ 1,
+ quirks,
+ 0
+ };
+
+ if (closure.preferred && !version_greater(edid, 1, 3))
+ closure.preferred =
+ (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+
+ drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure);
+
+ return closure.modes;
}
#define HDMI_IDENTIFIER 0x000C03
@@ -1623,7 +1343,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
return 0;
}
if (!drm_edid_is_valid(edid)) {
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+ dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
return 0;
}
@@ -1640,35 +1360,21 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
* - established timing codes
* - modes inferred from GTF or CVT range information
*
- * We don't quite implement this yet, but we're close.
+ * We get this pretty much right.
*
* XXX order for additional mode types in extension blocks?
*/
- num_modes += add_detailed_info(connector, edid, quirks);
- num_modes += add_detailed_info_eedid(connector, edid, quirks);
+ num_modes += add_detailed_modes(connector, edid, quirks);
+ num_modes += add_cvt_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
+ num_modes += add_inferred_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
- connector->display_info.serration_vsync = (edid->input & DRM_EDID_INPUT_SERRATION_VSYNC) ? 1 : 0;
- connector->display_info.sync_on_green = (edid->input & DRM_EDID_INPUT_SYNC_ON_GREEN) ? 1 : 0;
- connector->display_info.composite_sync = (edid->input & DRM_EDID_INPUT_COMPOSITE_SYNC) ? 1 : 0;
- connector->display_info.separate_syncs = (edid->input & DRM_EDID_INPUT_SEPARATE_SYNCS) ? 1 : 0;
- connector->display_info.blank_to_black = (edid->input & DRM_EDID_INPUT_BLANK_TO_BLACK) ? 1 : 0;
- connector->display_info.video_level = (edid->input & DRM_EDID_INPUT_VIDEO_LEVEL) >> 5;
- connector->display_info.digital = (edid->input & DRM_EDID_INPUT_DIGITAL) ? 1 : 0;
connector->display_info.width_mm = edid->width_cm * 10;
connector->display_info.height_mm = edid->height_cm * 10;
- connector->display_info.gamma = edid->gamma;
- connector->display_info.gtf_supported = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) ? 1 : 0;
- connector->display_info.standard_color = (edid->features & DRM_EDID_FEATURE_STANDARD_COLOR) ? 1 : 0;
- connector->display_info.display_type = (edid->features & DRM_EDID_FEATURE_DISPLAY_TYPE) >> 3;
- connector->display_info.active_off_supported = (edid->features & DRM_EDID_FEATURE_PM_ACTIVE_OFF) ? 1 : 0;
- connector->display_info.suspend_supported = (edid->features & DRM_EDID_FEATURE_PM_SUSPEND) ? 1 : 0;
- connector->display_info.standby_supported = (edid->features & DRM_EDID_FEATURE_PM_STANDBY) ? 1 : 0;
- connector->display_info.gamma = edid->gamma;
return num_modes;
}
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
new file mode 100644
index 00000000000..6eb7592e152
--- /dev/null
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include "drmP.h"
+#include "drm_edid.h"
+
+/*
+ * Autogenerated from the DMT spec.
+ * This table is copied from xfree86/modes/xf86EdidModes.c.
+ * But the mode with Reduced blank feature is deleted.
+ */
+static struct drm_display_mode drm_dmt_modes[] = {
+ /* 640x350@85Hz */
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 350, 382, 385, 445, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x400@85Hz */
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 400, 401, 404, 445, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x400@85Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
+ 828, 936, 0, 400, 401, 404, 446, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 492, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@85Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
+ 752, 832, 0, 480, 481, 484, 509, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@56Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@72Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@85Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
+ 896, 1048, 0, 600, 601, 604, 631, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 848x480@60Hz */
+ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
+ 976, 1088, 0, 480, 486, 494, 517, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@43Hz, interlace */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 772, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@85Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@75Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
+ 1488, 1696, 0, 768, 771, 778, 805, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@85Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
+ 1496, 1712, 0, 768, 771, 778, 809, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@75Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
+ 1488, 1696, 0, 800, 803, 809, 838, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@85Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
+ 1496, 1712, 0, 800, 803, 809, 843, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@85Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
+ 1504, 1728, 0, 960, 961, 964, 1011, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@75Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@85Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
+ 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@75Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
+ 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@85Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
+ 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@75Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
+ 1688, 1936, 0, 900, 903, 909, 942, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@85Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
+ 1696, 1952, 0, 900, 903, 909, 948, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@65Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@70Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@75Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@85Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@75Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
+ 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@85Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
+ 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1729x1344@75Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
+ 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1853x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@75Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
+ 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@75Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
+ 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@85Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
+ 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@75Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
+ 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@75HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@85HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+static const int drm_num_dmt_modes =
+ sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+
+static struct drm_display_mode edid_est_modes[] = {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+ 768, 864, 0, 480, 483, 486, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+ 846, 900, 0, 400, 421, 423, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+ 846, 900, 0, 400, 412, 414, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+ 928, 1152, 0, 624, 625, 628, 667, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+static const struct {
+ short w;
+ short h;
+ short r;
+ short rb;
+} est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index f0184696edf..d62c064fbaa 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -41,6 +41,9 @@
* &drm_encoder_slave. The @slave_funcs field will be initialized with
* the hooks provided by the slave driver.
*
+ * If @info->platform_data is non-NULL it will be used as the initial
+ * slave config.
+ *
* Returns 0 on success or a negative errno on failure, in particular,
* -ENODEV is returned when no matching driver is found.
*/
@@ -85,6 +88,10 @@ int drm_i2c_encoder_init(struct drm_device *dev,
if (err)
goto fail_unregister;
+ if (info->platform_data)
+ encoder->slave_funcs->set_config(&encoder->base,
+ info->platform_data);
+
return 0;
fail_unregister:
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 719662034bb..6a5e403f9aa 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -94,10 +94,11 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn
int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
- struct drm_connector *connector = fb_helper_conn->connector;
+ struct drm_connector *connector;
if (!fb_helper_conn)
return false;
+ connector = fb_helper_conn->connector;
cmdline_mode = &fb_helper_conn->cmdline_mode;
if (!mode_option)
@@ -241,6 +242,80 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
return 0;
}
+int drm_fb_helper_debug_enter(struct fb_info *info)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct drm_crtc_helper_funcs *funcs;
+ int i;
+
+ if (list_empty(&kernel_fb_helper_list))
+ return false;
+
+ list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+ for (i = 0; i < helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set =
+ &helper->crtc_info[i].mode_set;
+
+ if (!mode_set->crtc->enabled)
+ continue;
+
+ funcs = mode_set->crtc->helper_private;
+ funcs->mode_set_base_atomic(mode_set->crtc,
+ mode_set->fb,
+ mode_set->x,
+ mode_set->y);
+
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_debug_enter);
+
+/* Find the real fb for a given fb helper CRTC */
+static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *c;
+
+ list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ if (crtc->base.id == c->base.id)
+ return c->fb;
+ }
+
+ return NULL;
+}
+
+int drm_fb_helper_debug_leave(struct fb_info *info)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_framebuffer *fb;
+ int i;
+
+ for (i = 0; i < helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
+ crtc = mode_set->crtc;
+ funcs = crtc->helper_private;
+ fb = drm_mode_config_fb(crtc);
+
+ if (!crtc->enabled)
+ continue;
+
+ if (!fb) {
+ DRM_ERROR("no fb to restore??\n");
+ continue;
+ }
+
+ funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
+ crtc->y);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_debug_leave);
+
bool drm_fb_helper_force_kernel_mode(void)
{
int i = 0;
@@ -295,7 +370,7 @@ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
-static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3)
+static void drm_fb_helper_sysrq(int dummy1)
{
schedule_work(&drm_fb_helper_restore_work);
}
@@ -611,7 +686,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct drm_framebuffer *fb = fb_helper->fb;
int depth;
- if (var->pixclock != 0)
+ if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
/* Need to resize the fb object !!! */
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index e7aace20981..b744dad5c23 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -39,6 +39,10 @@
#include <linux/slab.h>
#include <linux/smp_lock.h>
+/* from BKL pushdown: note that nothing else serializes idr_find() */
+DEFINE_MUTEX(drm_global_mutex);
+EXPORT_SYMBOL(drm_global_mutex);
+
static int drm_open_helper(struct inode *inode, struct file *filp,
struct drm_device * dev);
@@ -132,15 +136,9 @@ int drm_open(struct inode *inode, struct file *filp)
retcode = drm_open_helper(inode, filp, dev);
if (!retcode) {
atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
- spin_lock(&dev->count_lock);
- if (!dev->open_count++) {
- spin_unlock(&dev->count_lock);
+ if (!dev->open_count++)
retcode = drm_setup(dev);
- goto out;
- }
- spin_unlock(&dev->count_lock);
}
-out:
if (!retcode) {
mutex_lock(&dev->struct_mutex);
if (minor->type == DRM_MINOR_LEGACY) {
@@ -175,8 +173,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
DRM_DEBUG("\n");
- /* BKL pushdown: note that nothing else serializes idr_find() */
- lock_kernel();
+ mutex_lock(&drm_global_mutex);
minor = idr_find(&drm_minors_idr, minor_id);
if (!minor)
goto out;
@@ -197,7 +194,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
fops_put(old_fops);
out:
- unlock_kernel();
+ mutex_unlock(&drm_global_mutex);
return err;
}
@@ -472,7 +469,7 @@ int drm_release(struct inode *inode, struct file *filp)
struct drm_device *dev = file_priv->minor->dev;
int retcode = 0;
- lock_kernel();
+ mutex_lock(&drm_global_mutex);
DRM_DEBUG("open_count = %d\n", dev->open_count);
@@ -568,22 +565,15 @@ int drm_release(struct inode *inode, struct file *filp)
*/
atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
- spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count)) {
DRM_ERROR("Device busy: %d\n",
atomic_read(&dev->ioctl_count));
- spin_unlock(&dev->count_lock);
- unlock_kernel();
- return -EBUSY;
- }
- spin_unlock(&dev->count_lock);
- unlock_kernel();
- return drm_lastclose(dev);
+ retcode = -EBUSY;
+ } else
+ retcode = drm_lastclose(dev);
}
- spin_unlock(&dev->count_lock);
-
- unlock_kernel();
+ mutex_unlock(&drm_global_mutex);
return retcode;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 33dad3fa604..5663d271906 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -68,8 +68,18 @@
* We make up offsets for buffer objects so we can recognize them at
* mmap time.
*/
+
+/* pgoff in mmap is an unsigned long, so we need to make sure that
+ * the faked up offset will fit
+ */
+
+#if BITS_PER_LONG == 64
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#else
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
+#endif
/**
* Initialize the GEM device fields
@@ -138,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev,
return -ENOMEM;
kref_init(&obj->refcount);
- kref_init(&obj->handlecount);
+ atomic_set(&obj->handle_count, 0);
obj->size = size;
atomic_inc(&dev->object_count);
@@ -312,7 +322,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
again:
if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
@@ -419,6 +429,7 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
idr_for_each(&file_private->object_idr,
&drm_gem_object_release_handle, NULL);
+ idr_remove_all(&file_private->object_idr);
idr_destroy(&file_private->object_idr);
}
@@ -451,28 +462,6 @@ drm_gem_object_free(struct kref *kref)
}
EXPORT_SYMBOL(drm_gem_object_free);
-/**
- * Called after the last reference to the object has been lost.
- * Must be called without holding struct_mutex
- *
- * Frees the object
- */
-void
-drm_gem_object_free_unlocked(struct kref *kref)
-{
- struct drm_gem_object *obj = (struct drm_gem_object *) kref;
- struct drm_device *dev = obj->dev;
-
- if (dev->driver->gem_free_object_unlocked != NULL)
- dev->driver->gem_free_object_unlocked(obj);
- else if (dev->driver->gem_free_object != NULL) {
- mutex_lock(&dev->struct_mutex);
- dev->driver->gem_free_object(obj);
- mutex_unlock(&dev->struct_mutex);
- }
-}
-EXPORT_SYMBOL(drm_gem_object_free_unlocked);
-
static void drm_gem_object_ref_bug(struct kref *list_kref)
{
BUG();
@@ -485,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref)
* called before drm_gem_object_free or we'll be touching
* freed memory
*/
-void
-drm_gem_object_handle_free(struct kref *kref)
+void drm_gem_object_handle_free(struct drm_gem_object *obj)
{
- struct drm_gem_object *obj = container_of(kref,
- struct drm_gem_object,
- handlecount);
struct drm_device *dev = obj->dev;
/* Remove any name for this object */
@@ -517,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
drm_gem_object_reference(obj);
+
+ mutex_lock(&obj->dev->struct_mutex);
+ drm_vm_open_locked(vma);
+ mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_open);
@@ -524,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
- drm_gem_object_unreference_unlocked(obj);
+ mutex_lock(&obj->dev->struct_mutex);
+ drm_vm_close_locked(vma);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_close);
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/drm_global.c
index b17007178a3..c87dc96444d 100644
--- a/drivers/gpu/drm/ttm/ttm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -28,45 +28,45 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include "ttm/ttm_module.h"
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include "drm_global.h"
-struct ttm_global_item {
+struct drm_global_item {
struct mutex mutex;
void *object;
int refcount;
};
-static struct ttm_global_item glob[TTM_GLOBAL_NUM];
+static struct drm_global_item glob[DRM_GLOBAL_NUM];
-void ttm_global_init(void)
+void drm_global_init(void)
{
int i;
- for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
- struct ttm_global_item *item = &glob[i];
+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+ struct drm_global_item *item = &glob[i];
mutex_init(&item->mutex);
item->object = NULL;
item->refcount = 0;
}
}
-void ttm_global_release(void)
+void drm_global_release(void)
{
int i;
- for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
- struct ttm_global_item *item = &glob[i];
+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+ struct drm_global_item *item = &glob[i];
BUG_ON(item->object != NULL);
BUG_ON(item->refcount != 0);
}
}
-int ttm_global_item_ref(struct ttm_global_reference *ref)
+int drm_global_item_ref(struct drm_global_reference *ref)
{
int ret;
- struct ttm_global_item *item = &glob[ref->global_type];
+ struct drm_global_item *item = &glob[ref->global_type];
void *object;
mutex_lock(&item->mutex);
@@ -93,11 +93,11 @@ out_err:
item->object = NULL;
return ret;
}
-EXPORT_SYMBOL(ttm_global_item_ref);
+EXPORT_SYMBOL(drm_global_item_ref);
-void ttm_global_item_unref(struct ttm_global_reference *ref)
+void drm_global_item_unref(struct drm_global_reference *ref)
{
- struct ttm_global_item *item = &glob[ref->global_type];
+ struct drm_global_item *item = &glob[ref->global_type];
mutex_lock(&item->mutex);
BUG_ON(item->refcount == 0);
@@ -108,5 +108,5 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
}
mutex_unlock(&item->mutex);
}
-EXPORT_SYMBOL(ttm_global_item_unref);
+EXPORT_SYMBOL(drm_global_item_unref);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index f0f6c6b93f3..974e970ce3f 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -51,13 +51,24 @@ int drm_name_info(struct seq_file *m, void *data)
if (!master)
return 0;
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->pci_driver.name,
- pci_name(dev->pdev), master->unique);
+ if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
+ if (master->unique) {
+ seq_printf(m, "%s %s %s\n",
+ dev->driver->platform_device->name,
+ dev_name(dev->dev), master->unique);
+ } else {
+ seq_printf(m, "%s\n",
+ dev->driver->platform_device->name);
+ }
} else {
- seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
- pci_name(dev->pdev));
+ if (master->unique) {
+ seq_printf(m, "%s %s %s\n",
+ dev->driver->pci_driver.name,
+ dev_name(dev->dev), master->unique);
+ } else {
+ seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
+ dev_name(dev->dev));
+ }
}
return 0;
@@ -244,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data)
seq_printf(m, "%6d %8zd %7d %8d\n",
obj->name, obj->size,
- atomic_read(&obj->handlecount.refcount),
+ atomic_read(&obj->handle_count),
atomic_read(&obj->refcount.refcount));
return 0;
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 9b9ff46c237..47db4df37a6 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -64,6 +64,19 @@ int drm_getunique(struct drm_device *dev, void *data,
return 0;
}
+static void
+drm_unset_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ kfree(dev->devname);
+ dev->devname = NULL;
+
+ kfree(master->unique);
+ master->unique = NULL;
+ master->unique_len = 0;
+ master->unique_size = 0;
+}
+
/**
* Set the bus id.
*
@@ -94,17 +107,24 @@ int drm_setunique(struct drm_device *dev, void *data,
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (!master->unique)
- return -ENOMEM;
- if (copy_from_user(master->unique, u->unique, master->unique_len))
- return -EFAULT;
+ if (!master->unique) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (copy_from_user(master->unique, u->unique, master->unique_len)) {
+ ret = -EFAULT;
+ goto err;
+ }
master->unique[master->unique_len] = '\0';
dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
strlen(master->unique) + 2, GFP_KERNEL);
- if (!dev->devname)
- return -ENOMEM;
+ if (!dev->devname) {
+ ret = -ENOMEM;
+ goto err;
+ }
sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
master->unique);
@@ -113,53 +133,103 @@ int drm_setunique(struct drm_device *dev, void *data,
* busid.
*/
ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
- if (ret != 3)
- return -EINVAL;
+ if (ret != 3) {
+ ret = -EINVAL;
+ goto err;
+ }
+
domain = bus >> 8;
bus &= 0xff;
if ((domain != drm_get_pci_domain(dev)) ||
(bus != dev->pdev->bus->number) ||
(slot != PCI_SLOT(dev->pdev->devfn)) ||
- (func != PCI_FUNC(dev->pdev->devfn)))
- return -EINVAL;
+ (func != PCI_FUNC(dev->pdev->devfn))) {
+ ret = -EINVAL;
+ goto err;
+ }
return 0;
+
+err:
+ drm_unset_busid(dev, master);
+ return ret;
}
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
- int len;
+ int len, ret;
if (master->unique != NULL)
- return -EBUSY;
+ drm_unset_busid(dev, master);
- master->unique_len = 40;
- master->unique_size = master->unique_len;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d",
- drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
- if (len >= master->unique_len)
- DRM_ERROR("buffer overflow");
- else
- master->unique_len = len;
+ if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
+ master->unique_len = 10 + strlen(dev->platformdev->name);
+ master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
- dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
- master->unique_len + 2, GFP_KERNEL);
- if (dev->devname == NULL)
- return -ENOMEM;
+ if (master->unique == NULL)
+ return -ENOMEM;
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
+ len = snprintf(master->unique, master->unique_len,
+ "platform:%s", dev->platformdev->name);
+
+ if (len > master->unique_len) {
+ DRM_ERROR("Unique buffer overflowed\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dev->devname =
+ kmalloc(strlen(dev->platformdev->name) +
+ master->unique_len + 2, GFP_KERNEL);
+
+ if (dev->devname == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", dev->platformdev->name,
+ master->unique);
+
+ } else {
+ master->unique_len = 40;
+ master->unique_size = master->unique_len;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+ len = snprintf(master->unique, master->unique_len,
+ "pci:%04x:%02x:%02x.%d",
+ drm_get_pci_domain(dev),
+ dev->pdev->bus->number,
+ PCI_SLOT(dev->pdev->devfn),
+ PCI_FUNC(dev->pdev->devfn));
+ if (len >= master->unique_len) {
+ DRM_ERROR("buffer overflow");
+ ret = -EINVAL;
+ goto err;
+ } else
+ master->unique_len = len;
+
+ dev->devname =
+ kmalloc(strlen(dev->driver->pci_driver.name) +
+ master->unique_len + 2, GFP_KERNEL);
+
+ if (dev->devname == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
+ master->unique);
+ }
return 0;
+
+err:
+ drm_unset_busid(dev, master);
+ return ret;
}
/**
@@ -322,8 +392,11 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
if (sv->drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
+ * Version 1.4 has proper PCI domain support
*/
- drm_set_busid(dev, file_priv);
+ retcode = drm_set_busid(dev, file_priv);
+ if (retcode)
+ goto done;
}
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index a263b7070fc..9d3a5030b6e 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -34,6 +34,7 @@
*/
#include "drmP.h"
+#include "drm_trace.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/slab.h>
@@ -57,6 +58,9 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
{
struct drm_irq_busid *p = data;
+ if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
@@ -211,7 +215,7 @@ int drm_irq_install(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- if (dev->pdev->irq == 0)
+ if (drm_dev_to_irq(dev) == 0)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
@@ -229,7 +233,7 @@ int drm_irq_install(struct drm_device *dev)
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+ DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
/* Before installing handler */
dev->driver->irq_preinstall(dev);
@@ -302,14 +306,14 @@ int drm_irq_uninstall(struct drm_device * dev)
if (!irq_enabled)
return -EINVAL;
- DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+ DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
if (!drm_core_check_feature(dev, DRIVER_MODESET))
vga_client_register(dev->pdev, NULL, NULL, NULL);
dev->driver->irq_uninstall(dev);
- free_irq(dev->pdev->irq, dev);
+ free_irq(drm_dev_to_irq(dev), dev);
return 0;
}
@@ -341,7 +345,7 @@ int drm_control(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != dev->pdev->irq)
+ ctl->irq != drm_dev_to_irq(dev))
return -EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
@@ -587,6 +591,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
return -ENOMEM;
e->pipe = pipe;
+ e->base.pid = current->pid;
e->event.base.type = DRM_EVENT_VBLANK;
e->event.base.length = sizeof e->event;
e->event.user_data = vblwait->request.signal;
@@ -614,6 +619,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
vblwait->request.sequence, seq, pipe);
+ trace_drm_vblank_event_queued(current->pid, pipe,
+ vblwait->request.sequence);
+
e->event.sequence = vblwait->request.sequence;
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
e->event.tv_sec = now.tv_sec;
@@ -621,6 +629,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
drm_vblank_put(dev, e->pipe);
list_add_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_drm_vblank_event_delivered(current->pid, pipe,
+ vblwait->request.sequence);
} else {
list_add_tail(&e->base.link, &dev->vblank_event_list);
}
@@ -651,7 +661,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
int ret = 0;
unsigned int flags, seq, crtc;
- if ((!dev->pdev->irq) || (!dev->irq_enabled))
+ if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
return -EINVAL;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
@@ -751,9 +761,13 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
drm_vblank_put(dev, e->pipe);
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+ e->event.sequence);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ trace_drm_vblank_event(crtc, seq);
}
/**
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index e2f70a516c3..9bf93bc9a32 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -92,7 +92,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
/* Contention */
+ mutex_unlock(&drm_global_mutex);
schedule();
+ mutex_lock(&drm_global_mutex);
if (signal_pending(current)) {
ret = -EINTR;
break;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2ac074c8f5d..a6bfc302ed9 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -48,44 +48,14 @@
#define MM_UNUSED_TARGET 4
-unsigned long drm_mm_tail_space(struct drm_mm *mm)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
-
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return 0;
-
- return entry->size;
-}
-
-int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
-
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return -ENOMEM;
-
- if (entry->size <= size)
- return -ENOMEM;
-
- entry->size -= size;
- return 0;
-}
-
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
struct drm_mm_node *child;
if (atomic)
- child = kmalloc(sizeof(*child), GFP_ATOMIC);
+ child = kzalloc(sizeof(*child), GFP_ATOMIC);
else
- child = kmalloc(sizeof(*child), GFP_KERNEL);
+ child = kzalloc(sizeof(*child), GFP_KERNEL);
if (unlikely(child == NULL)) {
spin_lock(&mm->unused_lock);
@@ -94,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
else {
child =
list_entry(mm->unused_nodes.next,
- struct drm_mm_node, fl_entry);
- list_del(&child->fl_entry);
+ struct drm_mm_node, free_stack);
+ list_del(&child->free_stack);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
@@ -115,7 +85,7 @@ int drm_mm_pre_get(struct drm_mm *mm)
spin_lock(&mm->unused_lock);
while (mm->num_unused < MM_UNUSED_TARGET) {
spin_unlock(&mm->unused_lock);
- node = kmalloc(sizeof(*node), GFP_KERNEL);
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
spin_lock(&mm->unused_lock);
if (unlikely(node == NULL)) {
@@ -124,7 +94,7 @@ int drm_mm_pre_get(struct drm_mm *mm)
return ret;
}
++mm->num_unused;
- list_add_tail(&node->fl_entry, &mm->unused_nodes);
+ list_add_tail(&node->free_stack, &mm->unused_nodes);
}
spin_unlock(&mm->unused_lock);
return 0;
@@ -146,27 +116,12 @@ static int drm_mm_create_tail_node(struct drm_mm *mm,
child->start = start;
child->mm = mm;
- list_add_tail(&child->ml_entry, &mm->ml_entry);
- list_add_tail(&child->fl_entry, &mm->fl_entry);
+ list_add_tail(&child->node_list, &mm->node_list);
+ list_add_tail(&child->free_stack, &mm->free_stack);
return 0;
}
-int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
-
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free) {
- return drm_mm_create_tail_node(mm, entry->start + entry->size,
- size, atomic);
- }
- entry->size += size;
- return 0;
-}
-
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
unsigned long size,
int atomic)
@@ -177,15 +132,14 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
if (unlikely(child == NULL))
return NULL;
- INIT_LIST_HEAD(&child->fl_entry);
+ INIT_LIST_HEAD(&child->free_stack);
- child->free = 0;
child->size = size;
child->start = parent->start;
child->mm = parent->mm;
- list_add_tail(&child->ml_entry, &parent->ml_entry);
- INIT_LIST_HEAD(&child->fl_entry);
+ list_add_tail(&child->node_list, &parent->node_list);
+ INIT_LIST_HEAD(&child->free_stack);
parent->size -= size;
parent->start += size;
@@ -213,7 +167,7 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
}
if (node->size == size) {
- list_del_init(&node->fl_entry);
+ list_del_init(&node->free_stack);
node->free = 0;
} else {
node = drm_mm_split_at_start(node, size, atomic);
@@ -251,7 +205,7 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
}
if (node->size == size) {
- list_del_init(&node->fl_entry);
+ list_del_init(&node->free_stack);
node->free = 0;
} else {
node = drm_mm_split_at_start(node, size, atomic);
@@ -273,16 +227,19 @@ void drm_mm_put_block(struct drm_mm_node *cur)
{
struct drm_mm *mm = cur->mm;
- struct list_head *cur_head = &cur->ml_entry;
- struct list_head *root_head = &mm->ml_entry;
+ struct list_head *cur_head = &cur->node_list;
+ struct list_head *root_head = &mm->node_list;
struct drm_mm_node *prev_node = NULL;
struct drm_mm_node *next_node;
int merged = 0;
+ BUG_ON(cur->scanned_block || cur->scanned_prev_free
+ || cur->scanned_next_free);
+
if (cur_head->prev != root_head) {
prev_node =
- list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
+ list_entry(cur_head->prev, struct drm_mm_node, node_list);
if (prev_node->free) {
prev_node->size += cur->size;
merged = 1;
@@ -290,15 +247,15 @@ void drm_mm_put_block(struct drm_mm_node *cur)
}
if (cur_head->next != root_head) {
next_node =
- list_entry(cur_head->next, struct drm_mm_node, ml_entry);
+ list_entry(cur_head->next, struct drm_mm_node, node_list);
if (next_node->free) {
if (merged) {
prev_node->size += next_node->size;
- list_del(&next_node->ml_entry);
- list_del(&next_node->fl_entry);
+ list_del(&next_node->node_list);
+ list_del(&next_node->free_stack);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&next_node->fl_entry,
+ list_add(&next_node->free_stack,
&mm->unused_nodes);
++mm->num_unused;
} else
@@ -313,12 +270,12 @@ void drm_mm_put_block(struct drm_mm_node *cur)
}
if (!merged) {
cur->free = 1;
- list_add(&cur->fl_entry, &mm->fl_entry);
+ list_add(&cur->free_stack, &mm->free_stack);
} else {
- list_del(&cur->ml_entry);
+ list_del(&cur->node_list);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&cur->fl_entry, &mm->unused_nodes);
+ list_add(&cur->free_stack, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(cur);
@@ -328,40 +285,51 @@ void drm_mm_put_block(struct drm_mm_node *cur)
EXPORT_SYMBOL(drm_mm_put_block);
+static int check_free_hole(unsigned long start, unsigned long end,
+ unsigned long size, unsigned alignment)
+{
+ unsigned wasted = 0;
+
+ if (end - start < size)
+ return 0;
+
+ if (alignment) {
+ unsigned tmp = start % alignment;
+ if (tmp)
+ wasted = alignment - tmp;
+ }
+
+ if (end >= start + size + wasted) {
+ return 1;
+ }
+
+ return 0;
+}
+
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment, int best_match)
{
- struct list_head *list;
- const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
- unsigned wasted;
+
+ BUG_ON(mm->scanned_blocks);
best = NULL;
best_size = ~0UL;
- list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
- wasted = 0;
-
- if (entry->size < size)
+ list_for_each_entry(entry, &mm->free_stack, free_stack) {
+ if (!check_free_hole(entry->start, entry->start + entry->size,
+ size, alignment))
continue;
- if (alignment) {
- register unsigned tmp = entry->start % alignment;
- if (tmp)
- wasted += alignment - tmp;
- }
+ if (!best_match)
+ return entry;
- if (entry->size >= size + wasted) {
- if (!best_match)
- return entry;
- if (entry->size < best_size) {
- best = entry;
- best_size = entry->size;
- }
+ if (entry->size < best_size) {
+ best = entry;
+ best_size = entry->size;
}
}
@@ -376,53 +344,193 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
unsigned long end,
int best_match)
{
- struct list_head *list;
- const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
- unsigned wasted;
+
+ BUG_ON(mm->scanned_blocks);
best = NULL;
best_size = ~0UL;
- list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
- wasted = 0;
+ list_for_each_entry(entry, &mm->free_stack, free_stack) {
+ unsigned long adj_start = entry->start < start ?
+ start : entry->start;
+ unsigned long adj_end = entry->start + entry->size > end ?
+ end : entry->start + entry->size;
- if (entry->size < size)
+ if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
- if (entry->start > end || (entry->start+entry->size) < start)
- continue;
+ if (!best_match)
+ return entry;
+
+ if (entry->size < best_size) {
+ best = entry;
+ best_size = entry->size;
+ }
+ }
+
+ return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free_in_range);
+
+/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
+ unsigned alignment)
+{
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_size = 0;
+}
+EXPORT_SYMBOL(drm_mm_init_scan);
+
+/**
+ * Add a node to the scan list that might be freed to make space for the desired
+ * hole.
+ *
+ * Returns non-zero, if a hole has been found, zero otherwise.
+ */
+int drm_mm_scan_add_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct list_head *prev_free, *next_free;
+ struct drm_mm_node *prev_node, *next_node;
+
+ mm->scanned_blocks++;
+
+ prev_free = next_free = NULL;
+
+ BUG_ON(node->free);
+ node->scanned_block = 1;
+ node->free = 1;
+
+ if (node->node_list.prev != &mm->node_list) {
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
+
+ if (prev_node->free) {
+ list_del(&prev_node->node_list);
+
+ node->start = prev_node->start;
+ node->size += prev_node->size;
- if (entry->start < start)
- wasted += start - entry->start;
+ prev_node->scanned_prev_free = 1;
- if (alignment) {
- register unsigned tmp = (entry->start + wasted) % alignment;
- if (tmp)
- wasted += alignment - tmp;
+ prev_free = &prev_node->free_stack;
}
+ }
- if (entry->size >= size + wasted &&
- (entry->start + wasted + size) <= end) {
- if (!best_match)
- return entry;
- if (entry->size < best_size) {
- best = entry;
- best_size = entry->size;
- }
+ if (node->node_list.next != &mm->node_list) {
+ next_node = list_entry(node->node_list.next, struct drm_mm_node,
+ node_list);
+
+ if (next_node->free) {
+ list_del(&next_node->node_list);
+
+ node->size += next_node->size;
+
+ next_node->scanned_next_free = 1;
+
+ next_free = &next_node->free_stack;
}
}
- return best;
+ /* The free_stack list is not used for allocated objects, so these two
+ * pointers can be abused (as long as no allocations in this memory
+ * manager happens). */
+ node->free_stack.prev = prev_free;
+ node->free_stack.next = next_free;
+
+ if (check_free_hole(node->start, node->start + node->size,
+ mm->scan_size, mm->scan_alignment)) {
+ mm->scan_hit_start = node->start;
+ mm->scan_hit_size = node->size;
+
+ return 1;
+ }
+
+ return 0;
}
-EXPORT_SYMBOL(drm_mm_search_free_in_range);
+EXPORT_SYMBOL(drm_mm_scan_add_block);
+
+/**
+ * Remove a node from the scan list.
+ *
+ * Nodes _must_ be removed in the exact same order from the scan list as they
+ * have been added, otherwise the internal state of the memory manager will be
+ * corrupted.
+ *
+ * When the scan list is empty, the selected memory nodes can be freed. An
+ * immediatly following drm_mm_search_free with best_match = 0 will then return
+ * the just freed block (because its at the top of the free_stack list).
+ *
+ * Returns one if this block should be evicted, zero otherwise. Will always
+ * return zero when no hole has been found.
+ */
+int drm_mm_scan_remove_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node, *next_node;
+
+ mm->scanned_blocks--;
+
+ BUG_ON(!node->scanned_block);
+ node->scanned_block = 0;
+ node->free = 0;
+
+ prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
+ free_stack);
+ next_node = list_entry(node->free_stack.next, struct drm_mm_node,
+ free_stack);
+
+ if (prev_node) {
+ BUG_ON(!prev_node->scanned_prev_free);
+ prev_node->scanned_prev_free = 0;
+
+ list_add_tail(&prev_node->node_list, &node->node_list);
+
+ node->start = prev_node->start + prev_node->size;
+ node->size -= prev_node->size;
+ }
+
+ if (next_node) {
+ BUG_ON(!next_node->scanned_next_free);
+ next_node->scanned_next_free = 0;
+
+ list_add(&next_node->node_list, &node->node_list);
+
+ node->size -= next_node->size;
+ }
+
+ INIT_LIST_HEAD(&node->free_stack);
+
+ /* Only need to check for containement because start&size for the
+ * complete resulting free block (not just the desired part) is
+ * stored. */
+ if (node->start >= mm->scan_hit_start &&
+ node->start + node->size
+ <= mm->scan_hit_start + mm->scan_hit_size) {
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mm_scan_remove_block);
int drm_mm_clean(struct drm_mm * mm)
{
- struct list_head *head = &mm->ml_entry;
+ struct list_head *head = &mm->node_list;
return (head->next->next == head);
}
@@ -430,10 +538,11 @@ EXPORT_SYMBOL(drm_mm_clean);
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
- INIT_LIST_HEAD(&mm->ml_entry);
- INIT_LIST_HEAD(&mm->fl_entry);
+ INIT_LIST_HEAD(&mm->node_list);
+ INIT_LIST_HEAD(&mm->free_stack);
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
+ mm->scanned_blocks = 0;
spin_lock_init(&mm->unused_lock);
return drm_mm_create_tail_node(mm, start, size, 0);
@@ -442,25 +551,25 @@ EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
- struct list_head *bnode = mm->fl_entry.next;
+ struct list_head *bnode = mm->free_stack.next;
struct drm_mm_node *entry;
struct drm_mm_node *next;
- entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+ entry = list_entry(bnode, struct drm_mm_node, free_stack);
- if (entry->ml_entry.next != &mm->ml_entry ||
- entry->fl_entry.next != &mm->fl_entry) {
+ if (entry->node_list.next != &mm->node_list ||
+ entry->free_stack.next != &mm->free_stack) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
return;
}
- list_del(&entry->fl_entry);
- list_del(&entry->ml_entry);
+ list_del(&entry->free_stack);
+ list_del(&entry->node_list);
kfree(entry);
spin_lock(&mm->unused_lock);
- list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
- list_del(&entry->fl_entry);
+ list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
+ list_del(&entry->free_stack);
kfree(entry);
--mm->num_unused;
}
@@ -475,7 +584,7 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
struct drm_mm_node *entry;
int total_used = 0, total_free = 0, total = 0;
- list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
+ list_for_each_entry(entry, &mm->node_list, node_list) {
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
prefix, entry->start, entry->start + entry->size,
entry->size, entry->free ? "free" : "used");
@@ -496,7 +605,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
struct drm_mm_node *entry;
int total_used = 0, total_free = 0, total = 0;
- list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
+ list_for_each_entry(entry, &mm->node_list, node_list) {
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
total += entry->size;
if (entry->free)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index f1f473ea97d..949326d2a8e 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -251,7 +251,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
/* Fill in HSync values */
drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
- drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
+ drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
+ /* Fill in VSync values */
+ drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
+ drm_mode->vsync_end = drm_mode->vsync_start + vsync;
}
/* 15/13. Find pixel clock frequency (kHz for xf86) */
drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 2ea9ad4a8d6..f5bd9e590c8 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -124,4 +124,151 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
EXPORT_SYMBOL(drm_pci_free);
+#ifdef CONFIG_PCI
+/**
+ * Register.
+ *
+ * \param pdev - PCI device structure
+ * \param ent entry from the PCI ID table with device type flags
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+ struct drm_driver *driver)
+{
+ struct drm_device *dev;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_g1;
+
+ pci_set_master(pdev);
+
+ dev->pdev = pdev;
+ dev->dev = &pdev->dev;
+
+ dev->pci_device = pdev->device;
+ dev->pci_vendor = pdev->vendor;
+
+#ifdef __alpha__
+ dev->hose = pdev->sysdata;
+#endif
+
+ mutex_lock(&drm_global_mutex);
+
+ if ((ret = drm_fill_in_dev(dev, ent, driver))) {
+ printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+ goto err_g2;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ pci_set_drvdata(pdev, dev);
+ ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_g2;
+ }
+
+ if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
+ goto err_g3;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, ent->driver_data);
+ if (ret)
+ goto err_g4;
+ }
+
+ /* setup the grouping for the legacy output */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_g4;
+ }
+
+ list_add_tail(&dev->driver_item, &driver->device_list);
+
+ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, pci_name(pdev), dev->primary->index);
+
+ mutex_unlock(&drm_global_mutex);
+ return 0;
+
+err_g4:
+ drm_put_minor(&dev->primary);
+err_g3:
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_put_minor(&dev->control);
+err_g2:
+ pci_disable_device(pdev);
+err_g1:
+ kfree(dev);
+ mutex_unlock(&drm_global_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_get_pci_dev);
+
+/**
+ * PCI device initialization. Called via drm_init at module load time,
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes a drm_device structures,registering the
+ * stubs and initializing the AGP device.
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+int drm_pci_init(struct drm_driver *driver)
+{
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *pid;
+ int i;
+
+ if (driver->driver_features & DRIVER_MODESET)
+ return pci_register_driver(&driver->pci_driver);
+
+ /* If not using KMS, fall back to stealth mode manual scanning. */
+ for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
+ pid = &driver->pci_driver.id_table[i];
+
+ /* Loop around setting up a DRM device for each PCI device
+ * matching our ID and device class. If we had the internal
+ * function that pci_get_subsys and pci_get_class used, we'd
+ * be able to just pass pid in instead of doing a two-stage
+ * thing.
+ */
+ pdev = NULL;
+ while ((pdev =
+ pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
+ pid->subdevice, pdev)) != NULL) {
+ if ((pdev->class & pid->class_mask) != pid->class)
+ continue;
+
+ /* stealth mode requires a manual probe */
+ pci_dev_get(pdev);
+ drm_get_pci_dev(pdev, pid, driver);
+ }
+ }
+ return 0;
+}
+
+#else
+
+int drm_pci_init(struct drm_driver *driver)
+{
+ return -1;
+}
+
+#endif
/*@}*/
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
new file mode 100644
index 00000000000..92d1d0fb7b7
--- /dev/null
+++ b/drivers/gpu/drm/drm_platform.c
@@ -0,0 +1,127 @@
+/*
+ * Derived from drm_pci.c
+ *
+ * Copyright 2003 José Fonseca.
+ * Copyright 2003 Leif Delgass.
+ * Copyright (c) 2009, Code Aurora Forum.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Register.
+ *
+ * \param platdev - Platform device struture
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+
+int drm_get_platform_dev(struct platform_device *platdev,
+ struct drm_driver *driver)
+{
+ struct drm_device *dev;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->platformdev = platdev;
+ dev->dev = &platdev->dev;
+
+ mutex_lock(&drm_global_mutex);
+
+ ret = drm_fill_in_dev(dev, NULL, driver);
+
+ if (ret) {
+ printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+ goto err_g1;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ dev_set_drvdata(&platdev->dev, dev);
+ ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_g1;
+ }
+
+ ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_g2;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, 0);
+ if (ret)
+ goto err_g3;
+ }
+
+ /* setup the grouping for the legacy output */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_g3;
+ }
+
+ list_add_tail(&dev->driver_item, &driver->device_list);
+
+ mutex_unlock(&drm_global_mutex);
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, dev->primary->index);
+
+ return 0;
+
+err_g3:
+ drm_put_minor(&dev->primary);
+err_g2:
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_put_minor(&dev->control);
+err_g1:
+ kfree(dev);
+ mutex_unlock(&drm_global_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_get_platform_dev);
+
+/**
+ * Platform device initialization. Called via drm_init at module load time,
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes a drm_device structures,registering the
+ * stubs
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+
+int drm_platform_init(struct drm_driver *driver)
+{
+ return drm_get_platform_dev(driver->platform_device, driver);
+}
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index a0c365f2e52..d1ad57450df 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -156,6 +156,9 @@ static void drm_master_destroy(struct kref *kref)
master->unique_len = 0;
}
+ kfree(dev->devname);
+ dev->devname = NULL;
+
list_for_each_entry_safe(pt, next, &master->magicfree, head) {
list_del(&pt->head);
drm_ht_remove_item(&master->magiclist, &pt->hash_item);
@@ -224,7 +227,7 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
+int drm_fill_in_dev(struct drm_device *dev,
const struct pci_device_id *ent,
struct drm_driver *driver)
{
@@ -245,14 +248,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
idr_init(&dev->drw_idr);
- dev->pdev = pdev;
- dev->pci_device = pdev->device;
- dev->pci_vendor = pdev->vendor;
-
-#ifdef __alpha__
- dev->hose = pdev->sysdata;
-#endif
-
if (drm_ht_create(&dev->map_hash, 12)) {
return -ENOMEM;
}
@@ -321,7 +316,7 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
* create the proc init entry via proc_init(). This routines assigns
* minor numbers to secondary heads of multi-headed cards
*/
-static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
{
struct drm_minor *new_minor;
int ret;
@@ -388,83 +383,6 @@ err_idr:
}
/**
- * Register.
- *
- * \param pdev - PCI device structure
- * \param ent entry from the PCI ID table with device type flags
- * \return zero on success or a negative number on failure.
- *
- * Attempt to gets inter module "drm" information. If we are first
- * then register the character device and inter module information.
- * Try and register, if we fail to register, backout previous work.
- */
-int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- struct drm_device *dev;
- int ret;
-
- DRM_DEBUG("\n");
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- ret = pci_enable_device(pdev);
- if (ret)
- goto err_g1;
-
- pci_set_master(pdev);
- if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g2;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- pci_set_drvdata(pdev, dev);
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto err_g2;
- }
-
- if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
- goto err_g3;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, ent->driver_data);
- if (ret)
- goto err_g4;
- }
-
- /* setup the grouping for the legacy output */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
- if (ret)
- goto err_g4;
- }
-
- list_add_tail(&dev->driver_item, &driver->device_list);
-
- DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
- driver->name, driver->major, driver->minor, driver->patchlevel,
- driver->date, pci_name(pdev), dev->primary->index);
-
- return 0;
-
-err_g4:
- drm_put_minor(&dev->primary);
-err_g3:
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-err_g2:
- pci_disable_device(pdev);
-err_g1:
- kfree(dev);
- return ret;
-}
-EXPORT_SYMBOL(drm_get_dev);
-
-/**
* Put a secondary minor number.
*
* \param sec_minor - structure to be released
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 101d381e9d8..85da4c40694 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -159,7 +159,7 @@ static ssize_t status_show(struct device *device,
struct drm_connector *connector = to_drm_connector(device);
enum drm_connector_status status;
- status = connector->funcs->detect(connector);
+ status = connector->funcs->detect(connector, true);
return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_connector_status_name(status));
}
@@ -489,7 +489,8 @@ int drm_sysfs_device_add(struct drm_minor *minor)
int err;
char *minor_str;
- minor->kdev.parent = &minor->dev->pdev->dev;
+ minor->kdev.parent = minor->dev->dev;
+
minor->kdev.class = drm_class;
minor->kdev.release = drm_sysfs_device_release;
minor->kdev.devt = minor->device;
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
new file mode 100644
index 00000000000..03ea964aa60
--- /dev/null
+++ b/drivers/gpu/drm/drm_trace.h
@@ -0,0 +1,66 @@
+#if !defined(_DRM_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DRM_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE drm_trace
+
+TRACE_EVENT(drm_vblank_event,
+ TP_PROTO(int crtc, unsigned int seq),
+ TP_ARGS(crtc, seq),
+ TP_STRUCT__entry(
+ __field(int, crtc)
+ __field(unsigned int, seq)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->seq = seq;
+ ),
+ TP_printk("crtc=%d, seq=%d", __entry->crtc, __entry->seq)
+);
+
+TRACE_EVENT(drm_vblank_event_queued,
+ TP_PROTO(pid_t pid, int crtc, unsigned int seq),
+ TP_ARGS(pid, crtc, seq),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(int, crtc)
+ __field(unsigned int, seq)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->crtc = crtc;
+ __entry->seq = seq;
+ ),
+ TP_printk("pid=%d, crtc=%d, seq=%d", __entry->pid, __entry->crtc, \
+ __entry->seq)
+);
+
+TRACE_EVENT(drm_vblank_event_delivered,
+ TP_PROTO(pid_t pid, int crtc, unsigned int seq),
+ TP_ARGS(pid, crtc, seq),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(int, crtc)
+ __field(unsigned int, seq)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->crtc = crtc;
+ __entry->seq = seq;
+ ),
+ TP_printk("pid=%d, crtc=%d, seq=%d", __entry->pid, __entry->crtc, \
+ __entry->seq)
+);
+
+#endif /* _DRM_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/drm_trace_points.c b/drivers/gpu/drm/drm_trace_points.c
new file mode 100644
index 00000000000..0d0eb90864a
--- /dev/null
+++ b/drivers/gpu/drm/drm_trace_points.c
@@ -0,0 +1,4 @@
+#include "drmP.h"
+
+#define CREATE_TRACE_POINTS
+#include "drm_trace.h"
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index c3b13fb41d0..5df450683aa 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -61,7 +61,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__)
+#elif defined(__sparc__) || defined(__arm__)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
@@ -138,7 +138,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
break;
}
- if (!agpmem)
+ if (&agpmem->head == &dev->agp->memory)
goto vm_fault_error;
/*
@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
-/**
- * \c close method for all virtual memory types.
- *
- * \param vma virtual memory area.
- *
- * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
- * free it.
- */
-static void drm_vm_close(struct vm_area_struct *vma)
+void drm_vm_close_locked(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_dec(&dev->vma_count);
- mutex_lock(&dev->struct_mutex);
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
list_del(&pt->head);
@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma)
break;
}
}
+}
+
+/**
+ * \c close method for all virtual memory types.
+ *
+ * \param vma virtual memory area.
+ *
+ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
+ * free it.
+ */
+static void drm_vm_close(struct vm_area_struct *vma)
+{
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->minor->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ drm_vm_close_locked(vma);
mutex_unlock(&dev->struct_mutex);
}
@@ -601,6 +609,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
}
switch (map->type) {
+#if !defined(__arm__)
case _DRM_AGP:
if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
/*
@@ -615,20 +624,31 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
break;
}
/* fall through to _DRM_FRAME_BUFFER... */
+#endif
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
offset = dev->driver->get_reg_ofs(dev);
vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
+#if !defined(__arm__)
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
+#else
+ if (remap_pfn_range(vma, vma->vm_start,
+ (map->offset + offset) >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+#endif
+
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%llx\n",
map->type,
vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
+
vma->vm_ops = &drm_vm_ops;
break;
case _DRM_CONSISTENT:
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 6d2abaf35ba..92862563e7e 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -2,3 +2,6 @@ ccflags-y := -Iinclude/drm
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
+
+sil164-y := sil164_drv.o
+obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 81681a07a80..08792a740f1 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -33,7 +33,7 @@ static void ch7006_encoder_set_config(struct drm_encoder *encoder,
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
- priv->params = params;
+ priv->params = *(struct ch7006_encoder_params *)params;
}
static void ch7006_encoder_destroy(struct drm_encoder *encoder)
@@ -114,7 +114,7 @@ static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
- struct ch7006_encoder_params *params = priv->params;
+ struct ch7006_encoder_params *params = &priv->params;
struct ch7006_state *state = &priv->state;
uint8_t *regs = state->regs;
struct ch7006_mode *mode = priv->mode;
@@ -428,6 +428,22 @@ static int ch7006_remove(struct i2c_client *client)
return 0;
}
+static int ch7006_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ ch7006_dbg(client, "\n");
+
+ return 0;
+}
+
+static int ch7006_resume(struct i2c_client *client)
+{
+ ch7006_dbg(client, "\n");
+
+ ch7006_write(client, 0x3d, 0x0);
+
+ return 0;
+}
+
static int ch7006_encoder_init(struct i2c_client *client,
struct drm_device *dev,
struct drm_encoder_slave *encoder)
@@ -454,6 +470,7 @@ static int ch7006_encoder_init(struct i2c_client *client,
priv->hmargin = 50;
priv->vmargin = 50;
priv->last_dpms = -1;
+ priv->chip_version = ch7006_read(client, CH7006_VERSION_ID);
if (ch7006_tv_norm) {
for (i = 0; i < NUM_TV_NORMS; i++) {
@@ -487,6 +504,8 @@ static struct drm_i2c_encoder_driver ch7006_driver = {
.i2c_driver = {
.probe = ch7006_probe,
.remove = ch7006_remove,
+ .suspend = ch7006_suspend,
+ .resume = ch7006_resume,
.driver = {
.name = "ch7006",
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
index e447dfb6389..c860f24a5af 100644
--- a/drivers/gpu/drm/i2c/ch7006_mode.c
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -316,7 +316,10 @@ void ch7006_setup_power_state(struct drm_encoder *encoder)
}
} else {
- *power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF);
+ if (priv->chip_version >= 0x20)
+ *power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF);
+ else
+ *power |= bitfs(CH7006_POWER_LEVEL, POWER_OFF);
}
}
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
index b06d3d93d8a..17667b7d57e 100644
--- a/drivers/gpu/drm/i2c/ch7006_priv.h
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -77,7 +77,7 @@ struct ch7006_state {
};
struct ch7006_priv {
- struct ch7006_encoder_params *params;
+ struct ch7006_encoder_params params;
struct ch7006_mode *mode;
struct ch7006_state state;
@@ -95,6 +95,7 @@ struct ch7006_priv {
int flicker;
int scale;
+ int chip_version;
int last_dpms;
};
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
new file mode 100644
index 00000000000..0b6773290c0
--- /dev/null
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -0,0 +1,462 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "drm_encoder_slave.h"
+#include "i2c/sil164.h"
+
+struct sil164_priv {
+ struct sil164_encoder_params config;
+ struct i2c_client *duallink_slave;
+
+ uint8_t saved_state[0x10];
+ uint8_t saved_slave_state[0x10];
+};
+
+#define to_sil164_priv(x) \
+ ((struct sil164_priv *)to_encoder_slave(x)->slave_priv)
+
+#define sil164_dbg(client, format, ...) do { \
+ if (drm_debug & DRM_UT_KMS) \
+ dev_printk(KERN_DEBUG, &client->dev, \
+ "%s: " format, __func__, ## __VA_ARGS__); \
+ } while (0)
+#define sil164_info(client, format, ...) \
+ dev_info(&client->dev, format, __VA_ARGS__)
+#define sil164_err(client, format, ...) \
+ dev_err(&client->dev, format, __VA_ARGS__)
+
+#define SIL164_I2C_ADDR_MASTER 0x38
+#define SIL164_I2C_ADDR_SLAVE 0x39
+
+/* HW register definitions */
+
+#define SIL164_VENDOR_LO 0x0
+#define SIL164_VENDOR_HI 0x1
+#define SIL164_DEVICE_LO 0x2
+#define SIL164_DEVICE_HI 0x3
+#define SIL164_REVISION 0x4
+#define SIL164_FREQ_MIN 0x6
+#define SIL164_FREQ_MAX 0x7
+#define SIL164_CONTROL0 0x8
+# define SIL164_CONTROL0_POWER_ON 0x01
+# define SIL164_CONTROL0_EDGE_RISING 0x02
+# define SIL164_CONTROL0_INPUT_24BIT 0x04
+# define SIL164_CONTROL0_DUAL_EDGE 0x08
+# define SIL164_CONTROL0_HSYNC_ON 0x10
+# define SIL164_CONTROL0_VSYNC_ON 0x20
+#define SIL164_DETECT 0x9
+# define SIL164_DETECT_INTR_STAT 0x01
+# define SIL164_DETECT_HOTPLUG_STAT 0x02
+# define SIL164_DETECT_RECEIVER_STAT 0x04
+# define SIL164_DETECT_INTR_MODE_RECEIVER 0x00
+# define SIL164_DETECT_INTR_MODE_HOTPLUG 0x08
+# define SIL164_DETECT_OUT_MODE_HIGH 0x00
+# define SIL164_DETECT_OUT_MODE_INTR 0x10
+# define SIL164_DETECT_OUT_MODE_RECEIVER 0x20
+# define SIL164_DETECT_OUT_MODE_HOTPLUG 0x30
+# define SIL164_DETECT_VSWING_STAT 0x80
+#define SIL164_CONTROL1 0xa
+# define SIL164_CONTROL1_DESKEW_ENABLE 0x10
+# define SIL164_CONTROL1_DESKEW_INCR_SHIFT 5
+#define SIL164_GPIO 0xb
+#define SIL164_CONTROL2 0xc
+# define SIL164_CONTROL2_FILTER_ENABLE 0x01
+# define SIL164_CONTROL2_FILTER_SETTING_SHIFT 1
+# define SIL164_CONTROL2_DUALLINK_MASTER 0x40
+# define SIL164_CONTROL2_SYNC_CONT 0x80
+#define SIL164_DUALLINK 0xd
+# define SIL164_DUALLINK_ENABLE 0x10
+# define SIL164_DUALLINK_SKEW_SHIFT 5
+#define SIL164_PLLZONE 0xe
+# define SIL164_PLLZONE_STAT 0x08
+# define SIL164_PLLZONE_FORCE_ON 0x10
+# define SIL164_PLLZONE_FORCE_HIGH 0x20
+
+/* HW access functions */
+
+static void
+sil164_write(struct i2c_client *client, uint8_t addr, uint8_t val)
+{
+ uint8_t buf[] = {addr, val};
+ int ret;
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ sil164_err(client, "Error %d writing to subaddress 0x%x\n",
+ ret, addr);
+}
+
+static uint8_t
+sil164_read(struct i2c_client *client, uint8_t addr)
+{
+ uint8_t val;
+ int ret;
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, &val, sizeof(val));
+ if (ret < 0)
+ goto fail;
+
+ return val;
+
+fail:
+ sil164_err(client, "Error %d reading from subaddress 0x%x\n",
+ ret, addr);
+ return 0;
+}
+
+static void
+sil164_save_state(struct i2c_client *client, uint8_t *state)
+{
+ int i;
+
+ for (i = 0x8; i <= 0xe; i++)
+ state[i] = sil164_read(client, i);
+}
+
+static void
+sil164_restore_state(struct i2c_client *client, uint8_t *state)
+{
+ int i;
+
+ for (i = 0x8; i <= 0xe; i++)
+ sil164_write(client, i, state[i]);
+}
+
+static void
+sil164_set_power_state(struct i2c_client *client, bool on)
+{
+ uint8_t control0 = sil164_read(client, SIL164_CONTROL0);
+
+ if (on)
+ control0 |= SIL164_CONTROL0_POWER_ON;
+ else
+ control0 &= ~SIL164_CONTROL0_POWER_ON;
+
+ sil164_write(client, SIL164_CONTROL0, control0);
+}
+
+static void
+sil164_init_state(struct i2c_client *client,
+ struct sil164_encoder_params *config,
+ bool duallink)
+{
+ sil164_write(client, SIL164_CONTROL0,
+ SIL164_CONTROL0_HSYNC_ON |
+ SIL164_CONTROL0_VSYNC_ON |
+ (config->input_edge ? SIL164_CONTROL0_EDGE_RISING : 0) |
+ (config->input_width ? SIL164_CONTROL0_INPUT_24BIT : 0) |
+ (config->input_dual ? SIL164_CONTROL0_DUAL_EDGE : 0));
+
+ sil164_write(client, SIL164_DETECT,
+ SIL164_DETECT_INTR_STAT |
+ SIL164_DETECT_OUT_MODE_RECEIVER);
+
+ sil164_write(client, SIL164_CONTROL1,
+ (config->input_skew ? SIL164_CONTROL1_DESKEW_ENABLE : 0) |
+ (((config->input_skew + 4) & 0x7)
+ << SIL164_CONTROL1_DESKEW_INCR_SHIFT));
+
+ sil164_write(client, SIL164_CONTROL2,
+ SIL164_CONTROL2_SYNC_CONT |
+ (config->pll_filter ? 0 : SIL164_CONTROL2_FILTER_ENABLE) |
+ (4 << SIL164_CONTROL2_FILTER_SETTING_SHIFT));
+
+ sil164_write(client, SIL164_PLLZONE, 0);
+
+ if (duallink)
+ sil164_write(client, SIL164_DUALLINK,
+ SIL164_DUALLINK_ENABLE |
+ (((config->duallink_skew + 4) & 0x7)
+ << SIL164_DUALLINK_SKEW_SHIFT));
+ else
+ sil164_write(client, SIL164_DUALLINK, 0);
+}
+
+/* DRM encoder functions */
+
+static void
+sil164_encoder_set_config(struct drm_encoder *encoder, void *params)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ priv->config = *(struct sil164_encoder_params *)params;
+}
+
+static void
+sil164_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+ bool on = (mode == DRM_MODE_DPMS_ON);
+ bool duallink = (on && encoder->crtc->mode.clock > 165000);
+
+ sil164_set_power_state(drm_i2c_encoder_get_client(encoder), on);
+
+ if (priv->duallink_slave)
+ sil164_set_power_state(priv->duallink_slave, duallink);
+}
+
+static void
+sil164_encoder_save(struct drm_encoder *encoder)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ sil164_save_state(drm_i2c_encoder_get_client(encoder),
+ priv->saved_state);
+
+ if (priv->duallink_slave)
+ sil164_save_state(priv->duallink_slave,
+ priv->saved_slave_state);
+}
+
+static void
+sil164_encoder_restore(struct drm_encoder *encoder)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ sil164_restore_state(drm_i2c_encoder_get_client(encoder),
+ priv->saved_state);
+
+ if (priv->duallink_slave)
+ sil164_restore_state(priv->duallink_slave,
+ priv->saved_slave_state);
+}
+
+static bool
+sil164_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+sil164_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ if (mode->clock < 32000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->clock > 330000 ||
+ (mode->clock > 165000 && !priv->duallink_slave))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void
+sil164_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+ bool duallink = adjusted_mode->clock > 165000;
+
+ sil164_init_state(drm_i2c_encoder_get_client(encoder),
+ &priv->config, duallink);
+
+ if (priv->duallink_slave)
+ sil164_init_state(priv->duallink_slave,
+ &priv->config, duallink);
+
+ sil164_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static enum drm_connector_status
+sil164_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+
+ if (sil164_read(client, SIL164_DETECT) & SIL164_DETECT_HOTPLUG_STAT)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static int
+sil164_encoder_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return 0;
+}
+
+static int
+sil164_encoder_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return 0;
+}
+
+static int
+sil164_encoder_set_property(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void
+sil164_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ if (priv->duallink_slave)
+ i2c_unregister_device(priv->duallink_slave);
+
+ kfree(priv);
+ drm_i2c_encoder_destroy(encoder);
+}
+
+static struct drm_encoder_slave_funcs sil164_encoder_funcs = {
+ .set_config = sil164_encoder_set_config,
+ .destroy = sil164_encoder_destroy,
+ .dpms = sil164_encoder_dpms,
+ .save = sil164_encoder_save,
+ .restore = sil164_encoder_restore,
+ .mode_fixup = sil164_encoder_mode_fixup,
+ .mode_valid = sil164_encoder_mode_valid,
+ .mode_set = sil164_encoder_mode_set,
+ .detect = sil164_encoder_detect,
+ .get_modes = sil164_encoder_get_modes,
+ .create_resources = sil164_encoder_create_resources,
+ .set_property = sil164_encoder_set_property,
+};
+
+/* I2C driver functions */
+
+static int
+sil164_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ int vendor = sil164_read(client, SIL164_VENDOR_HI) << 8 |
+ sil164_read(client, SIL164_VENDOR_LO);
+ int device = sil164_read(client, SIL164_DEVICE_HI) << 8 |
+ sil164_read(client, SIL164_DEVICE_LO);
+ int rev = sil164_read(client, SIL164_REVISION);
+
+ if (vendor != 0x1 || device != 0x6) {
+ sil164_dbg(client, "Unknown device %x:%x.%x\n",
+ vendor, device, rev);
+ return -ENODEV;
+ }
+
+ sil164_info(client, "Detected device %x:%x.%x\n",
+ vendor, device, rev);
+
+ return 0;
+}
+
+static int
+sil164_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static struct i2c_client *
+sil164_detect_slave(struct i2c_client *client)
+{
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_msg msg = {
+ .addr = SIL164_I2C_ADDR_SLAVE,
+ .len = 0,
+ };
+ const struct i2c_board_info info = {
+ I2C_BOARD_INFO("sil164", SIL164_I2C_ADDR_SLAVE)
+ };
+
+ if (i2c_transfer(adap, &msg, 1) != 1) {
+ sil164_dbg(adap, "No dual-link slave found.");
+ return NULL;
+ }
+
+ return i2c_new_device(adap, &info);
+}
+
+static int
+sil164_encoder_init(struct i2c_client *client,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder)
+{
+ struct sil164_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ encoder->slave_priv = priv;
+ encoder->slave_funcs = &sil164_encoder_funcs;
+
+ priv->duallink_slave = sil164_detect_slave(client);
+
+ return 0;
+}
+
+static struct i2c_device_id sil164_ids[] = {
+ { "sil164", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sil164_ids);
+
+static struct drm_i2c_encoder_driver sil164_driver = {
+ .i2c_driver = {
+ .probe = sil164_probe,
+ .remove = sil164_remove,
+ .driver = {
+ .name = "sil164",
+ },
+ .id_table = sil164_ids,
+ },
+ .encoder_init = sil164_encoder_init,
+};
+
+/* Module initialization */
+
+static int __init
+sil164_init(void)
+{
+ return drm_i2c_encoder_register(THIS_MODULE, &sil164_driver);
+}
+
+static void __exit
+sil164_exit(void)
+{
+ drm_i2c_encoder_unregister(&sil164_driver);
+}
+
+MODULE_AUTHOR("Francisco Jerez <currojerez@riseup.net>");
+MODULE_DESCRIPTION("Silicon Image sil164 TMDS transmitter driver");
+MODULE_LICENSE("GPL and additional rights");
+
+module_init(sil164_init);
+module_exit(sil164_exit);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 997d91707ad..fb07e73581e 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -37,6 +37,7 @@
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#define I810_BUF_FREE 2
@@ -60,9 +61,8 @@ static struct drm_buf *i810_freelist_get(struct drm_device * dev)
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
I810_BUF_CLIENT);
- if (used == I810_BUF_FREE) {
+ if (used == I810_BUF_FREE)
return buf;
- }
}
return NULL;
}
@@ -71,7 +71,7 @@ static struct drm_buf *i810_freelist_get(struct drm_device * dev)
* yet, the hardware updates in use for us once its on the ring buffer.
*/
-static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf)
+static int i810_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int used;
@@ -116,12 +116,12 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = i810_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
};
-static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
+static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
@@ -152,7 +152,7 @@ static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
return retcode;
}
-static int i810_unmap_buffer(struct drm_buf * buf)
+static int i810_unmap_buffer(struct drm_buf *buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int retcode = 0;
@@ -172,7 +172,7 @@ static int i810_unmap_buffer(struct drm_buf * buf)
return retcode;
}
-static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
+static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d,
struct drm_file *file_priv)
{
struct drm_buf *buf;
@@ -202,7 +202,7 @@ static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
return retcode;
}
-static int i810_dma_cleanup(struct drm_device * dev)
+static int i810_dma_cleanup(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
@@ -218,9 +218,8 @@ static int i810_dma_cleanup(struct drm_device * dev)
drm_i810_private_t *dev_priv =
(drm_i810_private_t *) dev->dev_private;
- if (dev_priv->ring.virtual_start) {
+ if (dev_priv->ring.virtual_start)
drm_core_ioremapfree(&dev_priv->ring.map, dev);
- }
if (dev_priv->hw_status_page) {
pci_free_consistent(dev->pdev, PAGE_SIZE,
dev_priv->hw_status_page,
@@ -242,7 +241,7 @@ static int i810_dma_cleanup(struct drm_device * dev)
return 0;
}
-static int i810_wait_ring(struct drm_device * dev, int n)
+static int i810_wait_ring(struct drm_device *dev, int n)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -271,11 +270,11 @@ static int i810_wait_ring(struct drm_device * dev, int n)
udelay(1);
}
- out_wait_ring:
+out_wait_ring:
return iters;
}
-static void i810_kernel_lost_context(struct drm_device * dev)
+static void i810_kernel_lost_context(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -287,7 +286,7 @@ static void i810_kernel_lost_context(struct drm_device * dev)
ring->space += ring->Size;
}
-static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv)
+static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv)
{
struct drm_device_dma *dma = dev->dma;
int my_idx = 24;
@@ -322,9 +321,9 @@ static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_
return 0;
}
-static int i810_dma_initialize(struct drm_device * dev,
- drm_i810_private_t * dev_priv,
- drm_i810_init_t * init)
+static int i810_dma_initialize(struct drm_device *dev,
+ drm_i810_private_t *dev_priv,
+ drm_i810_init_t *init)
{
struct drm_map_list *r_list;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
@@ -462,7 +461,7 @@ static int i810_dma_init(struct drm_device *dev, void *data,
* Use 'volatile' & local var tmp to force the emitted values to be
* identical to the verified ones.
*/
-static void i810EmitContextVerified(struct drm_device * dev,
+static void i810EmitContextVerified(struct drm_device *dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -495,7 +494,7 @@ static void i810EmitContextVerified(struct drm_device * dev,
ADVANCE_LP_RING();
}
-static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code)
+static void i810EmitTexVerified(struct drm_device *dev, volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int i, j = 0;
@@ -528,7 +527,7 @@ static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *
/* Need to do some additional checking when setting the dest buffer.
*/
-static void i810EmitDestVerified(struct drm_device * dev,
+static void i810EmitDestVerified(struct drm_device *dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -563,7 +562,7 @@ static void i810EmitDestVerified(struct drm_device * dev,
ADVANCE_LP_RING();
}
-static void i810EmitState(struct drm_device * dev)
+static void i810EmitState(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -594,7 +593,7 @@ static void i810EmitState(struct drm_device * dev)
/* need to verify
*/
-static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
+static void i810_dma_dispatch_clear(struct drm_device *dev, int flags,
unsigned int clear_color,
unsigned int clear_zval)
{
@@ -669,7 +668,7 @@ static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
}
}
-static void i810_dma_dispatch_swap(struct drm_device * dev)
+static void i810_dma_dispatch_swap(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -715,8 +714,8 @@ static void i810_dma_dispatch_swap(struct drm_device * dev)
}
}
-static void i810_dma_dispatch_vertex(struct drm_device * dev,
- struct drm_buf * buf, int discard, int used)
+static void i810_dma_dispatch_vertex(struct drm_device *dev,
+ struct drm_buf *buf, int discard, int used)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
@@ -795,7 +794,7 @@ static void i810_dma_dispatch_vertex(struct drm_device * dev,
}
}
-static void i810_dma_dispatch_flip(struct drm_device * dev)
+static void i810_dma_dispatch_flip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int pitch = dev_priv->pitch;
@@ -841,7 +840,7 @@ static void i810_dma_dispatch_flip(struct drm_device * dev)
}
-static void i810_dma_quiescent(struct drm_device * dev)
+static void i810_dma_quiescent(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -858,7 +857,7 @@ static void i810_dma_quiescent(struct drm_device * dev)
i810_wait_ring(dev, dev_priv->ring.Size - 8);
}
-static int i810_flush_queue(struct drm_device * dev)
+static int i810_flush_queue(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma;
@@ -891,7 +890,7 @@ static int i810_flush_queue(struct drm_device * dev)
}
/* Must be called with the lock held */
-static void i810_reclaim_buffers(struct drm_device * dev,
+static void i810_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
@@ -969,9 +968,8 @@ static int i810_clear_bufs(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* GH: Someone's doing nasty things... */
- if (!dev->dev_private) {
+ if (!dev->dev_private)
return -EINVAL;
- }
i810_dma_dispatch_clear(dev, clear->flags,
clear->clear_color, clear->clear_depth);
@@ -1039,7 +1037,7 @@ static int i810_docopy(struct drm_device *dev, void *data,
return 0;
}
-static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used,
+static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, int used,
unsigned int last_render)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1053,9 +1051,8 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf,
i810_kernel_lost_context(dev);
u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
- if (u != I810_BUF_CLIENT) {
+ if (u != I810_BUF_CLIENT)
DRM_DEBUG("MC found buffer that isn't mine!\n");
- }
if (used > 4 * 1024)
used = 0;
@@ -1160,7 +1157,7 @@ static int i810_ov0_flip(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
- //Tell the overlay to update
+ /* Tell the overlay to update */
I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
return 0;
@@ -1168,7 +1165,7 @@ static int i810_ov0_flip(struct drm_device *dev, void *data,
/* Not sure why this isn't set all the time:
*/
-static void i810_do_init_pageflip(struct drm_device * dev)
+static void i810_do_init_pageflip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1178,7 +1175,7 @@ static void i810_do_init_pageflip(struct drm_device * dev)
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
-static int i810_do_cleanup_pageflip(struct drm_device * dev)
+static int i810_do_cleanup_pageflip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1218,49 +1215,61 @@ int i810_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
}
-void i810_driver_lastclose(struct drm_device * dev)
+void i810_driver_lastclose(struct drm_device *dev)
{
i810_dma_cleanup(dev);
}
-void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_i810_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping) {
+ if (dev_priv->page_flipping)
i810_do_cleanup_pageflip(dev);
- }
}
}
-void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
+void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv)
{
i810_reclaim_buffers(dev, file_priv);
}
-int i810_driver_dma_quiescent(struct drm_device * dev)
+int i810_driver_dma_quiescent(struct drm_device *dev)
{
i810_dma_quiescent(dev);
return 0;
}
+/*
+ * call the drm_ioctl under the big kernel lock because
+ * to lock against the i810_mmap_buffers function.
+ */
+long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ lock_kernel();
+ ret = drm_ioctl(file, cmd, arg);
+ unlock_kernel();
+ return ret;
+}
+
struct drm_ioctl_desc i810_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH)
+ DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
};
int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
@@ -1276,7 +1285,7 @@ int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
* \returns
* A value of 1 is always retured to indictate every i810 is AGP.
*/
-int i810_driver_device_is_agp(struct drm_device * dev)
+int i810_driver_device_is_agp(struct drm_device *dev)
{
return 1;
}
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index c1e02752e02..b4250b2cac1 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -59,7 +59,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = i810_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index 21e2691f28f..c9339f48179 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -115,56 +115,59 @@ typedef struct drm_i810_private {
} drm_i810_private_t;
/* i810_dma.c */
-extern int i810_driver_dma_quiescent(struct drm_device * dev);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
+extern int i810_driver_dma_quiescent(struct drm_device *dev);
+extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
extern int i810_driver_load(struct drm_device *, unsigned long flags);
-extern void i810_driver_lastclose(struct drm_device * dev);
-extern void i810_driver_preclose(struct drm_device * dev,
+extern void i810_driver_lastclose(struct drm_device *dev);
+extern void i810_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
+extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
-extern int i810_driver_device_is_agp(struct drm_device * dev);
+extern int i810_driver_device_is_agp(struct drm_device *dev);
+extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
extern struct drm_ioctl_desc i810_ioctls[];
extern int i810_max_ioctl;
#define I810_BASE(reg) ((unsigned long) \
dev_priv->mmio_map->handle)
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
-#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
+#define I810_DEREF(reg) (*(__volatile__ int *)I810_ADDR(reg))
#define I810_READ(reg) I810_DEREF(reg)
-#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
-#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
+#define I810_WRITE(reg, val) do { I810_DEREF(reg) = val; } while (0)
+#define I810_DEREF16(reg) (*(__volatile__ u16 *)I810_ADDR(reg))
#define I810_READ16(reg) I810_DEREF16(reg)
-#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
+#define I810_WRITE16(reg, val) do { I810_DEREF16(reg) = val; } while (0)
#define I810_VERBOSE 0
#define RING_LOCALS unsigned int outring, ringmask; \
- volatile char *virt;
-
-#define BEGIN_LP_RING(n) do { \
- if (I810_VERBOSE) \
- DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
- if (dev_priv->ring.space < n*4) \
- i810_wait_ring(dev, n*4); \
- dev_priv->ring.space -= n*4; \
- outring = dev_priv->ring.tail; \
- ringmask = dev_priv->ring.tail_mask; \
- virt = dev_priv->ring.virtual_start; \
+ volatile char *virt;
+
+#define BEGIN_LP_RING(n) do { \
+ if (I810_VERBOSE) \
+ DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
+ if (dev_priv->ring.space < n*4) \
+ i810_wait_ring(dev, n*4); \
+ dev_priv->ring.space -= n*4; \
+ outring = dev_priv->ring.tail; \
+ ringmask = dev_priv->ring.tail_mask; \
+ virt = dev_priv->ring.virtual_start; \
} while (0)
-#define ADVANCE_LP_RING() do { \
- if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
+#define ADVANCE_LP_RING() do { \
+ if (I810_VERBOSE) \
+ DRM_DEBUG("ADVANCE_LP_RING\n"); \
dev_priv->ring.tail = outring; \
- I810_WRITE(LP_RING + RING_TAIL, outring); \
-} while(0)
-
-#define OUT_RING(n) do { \
- if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
- *(volatile unsigned int *)(virt + outring) = n; \
- outring += 4; \
- outring &= ringmask; \
+ I810_WRITE(LP_RING + RING_TAIL, outring); \
+} while (0)
+
+#define OUT_RING(n) do { \
+ if (I810_VERBOSE) \
+ DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
+ *(volatile unsigned int *)(virt + outring) = n; \
+ outring += 4; \
+ outring &= ringmask; \
} while (0)
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 65759a9a85c..cc92c7e6236 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -36,6 +36,7 @@
#include "i830_drm.h"
#include "i830_drv.h"
#include <linux/interrupt.h> /* For task queue support */
+#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -62,9 +63,8 @@ static struct drm_buf *i830_freelist_get(struct drm_device * dev)
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
I830_BUF_CLIENT);
- if (used == I830_BUF_FREE) {
+ if (used == I830_BUF_FREE)
return buf;
- }
}
return NULL;
}
@@ -73,7 +73,7 @@ static struct drm_buf *i830_freelist_get(struct drm_device * dev)
* yet, the hardware updates in use for us once its on the ring buffer.
*/
-static int i830_freelist_put(struct drm_device * dev, struct drm_buf * buf)
+static int i830_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
int used;
@@ -118,12 +118,12 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i830_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = i830_ioctl,
.mmap = i830_mmap_buffers,
.fasync = drm_fasync,
};
-static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
+static int i830_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
@@ -156,7 +156,7 @@ static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
return retcode;
}
-static int i830_unmap_buffer(struct drm_buf * buf)
+static int i830_unmap_buffer(struct drm_buf *buf)
{
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
int retcode = 0;
@@ -176,7 +176,7 @@ static int i830_unmap_buffer(struct drm_buf * buf)
return retcode;
}
-static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
+static int i830_dma_get_buffer(struct drm_device *dev, drm_i830_dma_t *d,
struct drm_file *file_priv)
{
struct drm_buf *buf;
@@ -206,7 +206,7 @@ static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
return retcode;
}
-static int i830_dma_cleanup(struct drm_device * dev)
+static int i830_dma_cleanup(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
@@ -222,9 +222,8 @@ static int i830_dma_cleanup(struct drm_device * dev)
drm_i830_private_t *dev_priv =
(drm_i830_private_t *) dev->dev_private;
- if (dev_priv->ring.virtual_start) {
+ if (dev_priv->ring.virtual_start)
drm_core_ioremapfree(&dev_priv->ring.map, dev);
- }
if (dev_priv->hw_status_page) {
pci_free_consistent(dev->pdev, PAGE_SIZE,
dev_priv->hw_status_page,
@@ -246,7 +245,7 @@ static int i830_dma_cleanup(struct drm_device * dev)
return 0;
}
-int i830_wait_ring(struct drm_device * dev, int n, const char *caller)
+int i830_wait_ring(struct drm_device *dev, int n, const char *caller)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
@@ -276,11 +275,11 @@ int i830_wait_ring(struct drm_device * dev, int n, const char *caller)
dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
}
- out_wait_ring:
+out_wait_ring:
return iters;
}
-static void i830_kernel_lost_context(struct drm_device * dev)
+static void i830_kernel_lost_context(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
@@ -295,7 +294,7 @@ static void i830_kernel_lost_context(struct drm_device * dev)
dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
}
-static int i830_freelist_init(struct drm_device * dev, drm_i830_private_t * dev_priv)
+static int i830_freelist_init(struct drm_device *dev, drm_i830_private_t *dev_priv)
{
struct drm_device_dma *dma = dev->dma;
int my_idx = 36;
@@ -329,9 +328,9 @@ static int i830_freelist_init(struct drm_device * dev, drm_i830_private_t * dev_
return 0;
}
-static int i830_dma_initialize(struct drm_device * dev,
- drm_i830_private_t * dev_priv,
- drm_i830_init_t * init)
+static int i830_dma_initialize(struct drm_device *dev,
+ drm_i830_private_t *dev_priv,
+ drm_i830_init_t *init)
{
struct drm_map_list *r_list;
@@ -482,7 +481,7 @@ static int i830_dma_init(struct drm_device *dev, void *data,
/* Most efficient way to verify state for the i830 is as it is
* emitted. Non-conformant state is silently dropped.
*/
-static void i830EmitContextVerified(struct drm_device * dev, unsigned int *code)
+static void i830EmitContextVerified(struct drm_device *dev, unsigned int *code)
{
drm_i830_private_t *dev_priv = dev->dev_private;
int i, j = 0;
@@ -527,7 +526,7 @@ static void i830EmitContextVerified(struct drm_device * dev, unsigned int *code)
ADVANCE_LP_RING();
}
-static void i830EmitTexVerified(struct drm_device * dev, unsigned int *code)
+static void i830EmitTexVerified(struct drm_device *dev, unsigned int *code)
{
drm_i830_private_t *dev_priv = dev->dev_private;
int i, j = 0;
@@ -561,7 +560,7 @@ static void i830EmitTexVerified(struct drm_device * dev, unsigned int *code)
printk("rejected packet %x\n", code[0]);
}
-static void i830EmitTexBlendVerified(struct drm_device * dev,
+static void i830EmitTexBlendVerified(struct drm_device *dev,
unsigned int *code, unsigned int num)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -586,7 +585,7 @@ static void i830EmitTexBlendVerified(struct drm_device * dev,
ADVANCE_LP_RING();
}
-static void i830EmitTexPalette(struct drm_device * dev,
+static void i830EmitTexPalette(struct drm_device *dev,
unsigned int *palette, int number, int is_shared)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -603,9 +602,8 @@ static void i830EmitTexPalette(struct drm_device * dev,
} else {
OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
}
- for (i = 0; i < 256; i++) {
+ for (i = 0; i < 256; i++)
OUT_RING(palette[i]);
- }
OUT_RING(0);
/* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
*/
@@ -613,7 +611,7 @@ static void i830EmitTexPalette(struct drm_device * dev,
/* Need to do some additional checking when setting the dest buffer.
*/
-static void i830EmitDestVerified(struct drm_device * dev, unsigned int *code)
+static void i830EmitDestVerified(struct drm_device *dev, unsigned int *code)
{
drm_i830_private_t *dev_priv = dev->dev_private;
unsigned int tmp;
@@ -674,7 +672,7 @@ static void i830EmitDestVerified(struct drm_device * dev, unsigned int *code)
ADVANCE_LP_RING();
}
-static void i830EmitStippleVerified(struct drm_device * dev, unsigned int *code)
+static void i830EmitStippleVerified(struct drm_device *dev, unsigned int *code)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -685,7 +683,7 @@ static void i830EmitStippleVerified(struct drm_device * dev, unsigned int *code)
ADVANCE_LP_RING();
}
-static void i830EmitState(struct drm_device * dev)
+static void i830EmitState(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -788,7 +786,7 @@ static void i830EmitState(struct drm_device * dev)
* Performance monitoring functions
*/
-static void i830_fill_box(struct drm_device * dev,
+static void i830_fill_box(struct drm_device *dev,
int x, int y, int w, int h, int r, int g, int b)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -816,17 +814,16 @@ static void i830_fill_box(struct drm_device * dev,
OUT_RING((y << 16) | x);
OUT_RING(((y + h) << 16) | (x + w));
- if (dev_priv->current_page == 1) {
+ if (dev_priv->current_page == 1)
OUT_RING(dev_priv->front_offset);
- } else {
+ else
OUT_RING(dev_priv->back_offset);
- }
OUT_RING(color);
ADVANCE_LP_RING();
}
-static void i830_cp_performance_boxes(struct drm_device * dev)
+static void i830_cp_performance_boxes(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -871,7 +868,7 @@ static void i830_cp_performance_boxes(struct drm_device * dev)
dev_priv->sarea_priv->perf_boxes = 0;
}
-static void i830_dma_dispatch_clear(struct drm_device * dev, int flags,
+static void i830_dma_dispatch_clear(struct drm_device *dev, int flags,
unsigned int clear_color,
unsigned int clear_zval,
unsigned int clear_depthmask)
@@ -966,7 +963,7 @@ static void i830_dma_dispatch_clear(struct drm_device * dev, int flags,
}
}
-static void i830_dma_dispatch_swap(struct drm_device * dev)
+static void i830_dma_dispatch_swap(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -1036,7 +1033,7 @@ static void i830_dma_dispatch_swap(struct drm_device * dev)
}
}
-static void i830_dma_dispatch_flip(struct drm_device * dev)
+static void i830_dma_dispatch_flip(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -1079,8 +1076,8 @@ static void i830_dma_dispatch_flip(struct drm_device * dev)
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
-static void i830_dma_dispatch_vertex(struct drm_device * dev,
- struct drm_buf * buf, int discard, int used)
+static void i830_dma_dispatch_vertex(struct drm_device *dev,
+ struct drm_buf *buf, int discard, int used)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
@@ -1100,9 +1097,8 @@ static void i830_dma_dispatch_vertex(struct drm_device * dev,
if (discard) {
u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
I830_BUF_HARDWARE);
- if (u != I830_BUF_CLIENT) {
+ if (u != I830_BUF_CLIENT)
DRM_DEBUG("xxxx 2\n");
- }
}
if (used > 4 * 1023)
@@ -1191,7 +1187,7 @@ static void i830_dma_dispatch_vertex(struct drm_device * dev,
}
}
-static void i830_dma_quiescent(struct drm_device * dev)
+static void i830_dma_quiescent(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -1208,7 +1204,7 @@ static void i830_dma_quiescent(struct drm_device * dev)
i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
}
-static int i830_flush_queue(struct drm_device * dev)
+static int i830_flush_queue(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma;
@@ -1241,7 +1237,7 @@ static int i830_flush_queue(struct drm_device * dev)
}
/* Must be called with the lock held */
-static void i830_reclaim_buffers(struct drm_device * dev, struct drm_file *file_priv)
+static void i830_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int i;
@@ -1316,9 +1312,8 @@ static int i830_clear_bufs(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* GH: Someone's doing nasty things... */
- if (!dev->dev_private) {
+ if (!dev->dev_private)
return -EINVAL;
- }
i830_dma_dispatch_clear(dev, clear->flags,
clear->clear_color,
@@ -1339,7 +1334,7 @@ static int i830_swap_bufs(struct drm_device *dev, void *data,
/* Not sure why this isn't set all the time:
*/
-static void i830_do_init_pageflip(struct drm_device * dev)
+static void i830_do_init_pageflip(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -1349,7 +1344,7 @@ static void i830_do_init_pageflip(struct drm_device * dev)
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
-static int i830_do_cleanup_pageflip(struct drm_device * dev)
+static int i830_do_cleanup_pageflip(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -1490,47 +1485,59 @@ int i830_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
}
-void i830_driver_lastclose(struct drm_device * dev)
+void i830_driver_lastclose(struct drm_device *dev)
{
i830_dma_cleanup(dev);
}
-void i830_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+void i830_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_i830_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping) {
+ if (dev_priv->page_flipping)
i830_do_cleanup_pageflip(dev);
- }
}
}
-void i830_driver_reclaim_buffers_locked(struct drm_device * dev, struct drm_file *file_priv)
+void i830_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv)
{
i830_reclaim_buffers(dev, file_priv);
}
-int i830_driver_dma_quiescent(struct drm_device * dev)
+int i830_driver_dma_quiescent(struct drm_device *dev)
{
i830_dma_quiescent(dev);
return 0;
}
+/*
+ * call the drm_ioctl under the big kernel lock because
+ * to lock against the i830_mmap_buffers function.
+ */
+long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ lock_kernel();
+ ret = drm_ioctl(file, cmd, arg);
+ unlock_kernel();
+ return ret;
+}
+
struct drm_ioctl_desc i830_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH)
+ DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
};
int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
@@ -1546,7 +1553,7 @@ int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
* \returns
* A value of 1 is always retured to indictate every i8xx is AGP.
*/
-int i830_driver_device_is_agp(struct drm_device * dev)
+int i830_driver_device_is_agp(struct drm_device *dev)
{
return 1;
}
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
index 44f990bed8f..a5c66aa82f0 100644
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ b/drivers/gpu/drm/i830/i830_drv.c
@@ -70,7 +70,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = i830_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
index da82afe4ded..0df1c720560 100644
--- a/drivers/gpu/drm/i830/i830_drv.h
+++ b/drivers/gpu/drm/i830/i830_drv.h
@@ -122,6 +122,7 @@ typedef struct drm_i830_private {
} drm_i830_private_t;
+long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
extern struct drm_ioctl_desc i830_ioctls[];
extern int i830_max_ioctl;
@@ -132,33 +133,33 @@ extern int i830_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i830_driver_irq_preinstall(struct drm_device * dev);
-extern void i830_driver_irq_postinstall(struct drm_device * dev);
-extern void i830_driver_irq_uninstall(struct drm_device * dev);
+extern void i830_driver_irq_preinstall(struct drm_device *dev);
+extern void i830_driver_irq_postinstall(struct drm_device *dev);
+extern void i830_driver_irq_uninstall(struct drm_device *dev);
extern int i830_driver_load(struct drm_device *, unsigned long flags);
-extern void i830_driver_preclose(struct drm_device * dev,
+extern void i830_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
-extern void i830_driver_lastclose(struct drm_device * dev);
-extern void i830_driver_reclaim_buffers_locked(struct drm_device * dev,
+extern void i830_driver_lastclose(struct drm_device *dev);
+extern void i830_driver_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
-extern int i830_driver_dma_quiescent(struct drm_device * dev);
-extern int i830_driver_device_is_agp(struct drm_device * dev);
+extern int i830_driver_dma_quiescent(struct drm_device *dev);
+extern int i830_driver_device_is_agp(struct drm_device *dev);
-#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
-#define I830_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
-#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
-#define I830_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
+#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
+#define I830_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
+#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
+#define I830_WRITE16(reg, val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
#define I830_VERBOSE 0
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
- volatile char *virt;
+ volatile char *virt;
#define BEGIN_LP_RING(n) do { \
if (I830_VERBOSE) \
printk("BEGIN_LP_RING(%d)\n", (n)); \
if (dev_priv->ring.space < n*4) \
- i830_wait_ring(dev, n*4, __func__); \
+ i830_wait_ring(dev, n*4, __func__); \
outcount = 0; \
outring = dev_priv->ring.tail; \
ringmask = dev_priv->ring.tail_mask; \
@@ -166,21 +167,23 @@ extern int i830_driver_device_is_agp(struct drm_device * dev);
} while (0)
#define OUT_RING(n) do { \
- if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \
+ if (I830_VERBOSE) \
+ printk(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
- outcount++; \
+ outcount++; \
outring += 4; \
outring &= ringmask; \
} while (0)
-#define ADVANCE_LP_RING() do { \
- if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring); \
- dev_priv->ring.tail = outring; \
- dev_priv->ring.space -= outcount * 4; \
- I830_WRITE(LP_RING + RING_TAIL, outring); \
-} while(0)
+#define ADVANCE_LP_RING() do { \
+ if (I830_VERBOSE) \
+ printk("ADVANCE_LP_RING %x\n", outring); \
+ dev_priv->ring.tail = outring; \
+ dev_priv->ring.space -= outcount * 4; \
+ I830_WRITE(LP_RING + RING_TAIL, outring); \
+} while (0)
-extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
+extern int i830_wait_ring(struct drm_device *dev, int n, const char *caller);
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
index 91ec2bb497e..d1a6b95d631 100644
--- a/drivers/gpu/drm/i830/i830_irq.c
+++ b/drivers/gpu/drm/i830/i830_irq.c
@@ -53,7 +53,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_HANDLED;
}
-static int i830_emit_irq(struct drm_device * dev)
+static int i830_emit_irq(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -70,7 +70,7 @@ static int i830_emit_irq(struct drm_device * dev)
return atomic_read(&dev_priv->irq_emitted);
}
-static int i830_wait_irq(struct drm_device * dev, int irq_nr)
+static int i830_wait_irq(struct drm_device *dev, int irq_nr)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
DECLARE_WAITQUEUE(entry, current);
@@ -156,7 +156,7 @@ int i830_irq_wait(struct drm_device *dev, void *data,
/* drm_dma.h hooks
*/
-void i830_driver_irq_preinstall(struct drm_device * dev)
+void i830_driver_irq_preinstall(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
@@ -168,14 +168,14 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
init_waitqueue_head(&dev_priv->irq_queue);
}
-void i830_driver_irq_postinstall(struct drm_device * dev)
+void i830_driver_irq_postinstall(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
}
-void i830_driver_irq_uninstall(struct drm_device * dev)
+void i830_driver_irq_uninstall(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
if (!dev_priv)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index da78f2c0d90..5c8e53458ed 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -8,6 +8,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_suspend.o \
i915_gem.o \
i915_gem_debug.o \
+ i915_gem_evict.o \
i915_gem_tiling.o \
i915_trace_points.o \
intel_display.o \
@@ -18,6 +19,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
+ intel_panel.o \
intel_i2c.o \
intel_fb.o \
intel_tv.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 0d6ff640e1c..8c2ad014c47 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -30,20 +30,17 @@
#include "intel_drv.h"
struct intel_dvo_device {
- char *name;
+ const char *name;
int type;
/* DVOA/B/C output register */
u32 dvo_reg;
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
- struct i2c_adapter *i2c_bus;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
-
- struct drm_display_mode *panel_fixed_mode;
- bool panel_wants_dither;
+ struct i2c_adapter *i2c_bus;
};
struct intel_dvo_dev_ops {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9214119c015..5e43d707678 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
+#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -121,6 +122,54 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
return 0;
}
+static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ unsigned long flags;
+ struct intel_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ const char *pipe = crtc->pipe ? "B" : "A";
+ const char *plane = crtc->plane ? "B" : "A";
+ struct intel_unpin_work *work;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = crtc->unpin_work;
+ if (work == NULL) {
+ seq_printf(m, "No flip due on pipe %s (plane %s)\n",
+ pipe, plane);
+ } else {
+ if (!work->pending) {
+ seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
+ pipe, plane);
+ } else {
+ seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
+ pipe, plane);
+ }
+ if (work->enable_stall_check)
+ seq_printf(m, "Stall check enabled, ");
+ else
+ seq_printf(m, "Stall check waiting for page flip ioctl, ");
+ seq_printf(m, "%d prepares\n", work->pending);
+
+ if (work->old_fb_obj) {
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
+ if(obj_priv)
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ }
+ if (work->pending_flip_obj) {
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
+ if(obj_priv)
+ seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ return 0;
+}
+
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -467,6 +516,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
+ if (error->overlay)
+ intel_overlay_print_error_state(m, error->overlay);
+
out:
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -774,6 +826,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+ {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2305a1234f1..c74e4e8006d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -34,12 +34,15 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
+#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
+extern int intel_max_stolen; /* from AGP driver */
+
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
@@ -496,6 +499,13 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
}
}
+
+ if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ BEGIN_LP_RING(2);
+ OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+ }
i915_emit_breadcrumb(dev);
return 0;
@@ -610,8 +620,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
ret = copy_from_user(cliprects, batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_free;
+ }
}
mutex_lock(&dev->struct_mutex);
@@ -652,8 +664,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_batch_free;
+ }
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
@@ -666,8 +680,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_clip_free;
+ }
}
mutex_lock(&dev->struct_mutex);
@@ -875,7 +891,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
- int ret = 0;
+ int ret;
if (IS_I965G(dev))
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
@@ -885,22 +901,23 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
if (mchbar_addr &&
- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
- ret = 0;
- goto out;
- }
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+ return 0;
#endif
/* Get some space for it */
- ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
+ dev_priv->mch_res.name = "i915 MCHBAR";
+ dev_priv->mch_res.flags = IORESOURCE_MEM;
+ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
+ &dev_priv->mch_res,
MCHBAR_SIZE, MCHBAR_SIZE,
PCIBIOS_MIN_MEM,
- 0, pcibios_align_resource,
+ 0, pcibios_align_resource,
dev_priv->bridge_dev);
if (ret) {
DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
dev_priv->mch_res.start = 0;
- goto out;
+ return ret;
}
if (IS_I965G(dev))
@@ -909,8 +926,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
pci_write_config_dword(dev_priv->bridge_dev, reg,
lower_32_bits(dev_priv->mch_res.start));
-out:
- return ret;
+ return 0;
}
/* Setup MCHBAR if possible, return true if we should disable it again */
@@ -1256,7 +1272,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
drm_mm_put_block(compressed_fb);
}
- if (!IS_GM45(dev)) {
+ if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
4096, 0);
if (!compressed_llb) {
@@ -1282,8 +1298,9 @@ static void i915_setup_compression(struct drm_device *dev, int size)
intel_disable_fbc(dev);
dev_priv->compressed_fb = compressed_fb;
-
- if (IS_GM45(dev)) {
+ if (IS_IRONLAKE_M(dev))
+ I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
I915_WRITE(FBC_CFB_BASE, cfb_base);
@@ -1291,7 +1308,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->compressed_llb = compressed_llb;
}
- DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
ll_base, size >> 20);
}
@@ -1354,7 +1371,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
int fb_bar = IS_I9XX(dev) ? 2 : 0;
int ret = 0;
- dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) &
0xff000000;
/* Basic memrange allocator for stolen space (aka vram) */
@@ -1770,9 +1787,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
}
}
- div_u64(diff, diff1);
+ diff = div_u64(diff, diff1);
ret = ((m * diff) + c);
- div_u64(ret, 10);
+ ret = div_u64(ret, 10);
dev_priv->last_count1 = total_count;
dev_priv->last_time1 = now;
@@ -1841,7 +1858,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
/* More magic constants... */
diff = diff * 1181;
- div_u64(diff, diffms * 10);
+ diff = div_u64(diff, diffms * 10);
dev_priv->gfx_power = diff;
}
@@ -2063,14 +2080,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Add register map (needed for suspend/resume) */
mmio_bar = IS_I9XX(dev) ? 0 : 1;
- base = drm_get_resource_start(dev, mmio_bar);
- size = drm_get_resource_len(dev, mmio_bar);
+ base = pci_resource_start(dev->pdev, mmio_bar);
+ size = pci_resource_len(dev->pdev, mmio_bar);
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
}
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (IS_GEN2(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+
dev_priv->regs = ioremap(base, size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
@@ -2104,6 +2125,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_iomapfree;
+ if (prealloc_size > intel_max_stolen) {
+ DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
+ prealloc_size >> 20, intel_max_stolen >> 20);
+ prealloc_size = intel_max_stolen;
+ }
+
dev_priv->wq = create_singlethread_workqueue("i915");
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
@@ -2350,46 +2377,46 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
}
struct drm_ioctl_desc i915_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
- DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
- DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
- DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 423dc90c1e2..6dbe14cc4f7 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -61,126 +61,127 @@ extern int intel_agp_enabled;
.driver_data = (unsigned long) info }
static const struct intel_device_info intel_i830_info = {
- .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+ .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_845g_info = {
- .is_i8xx = 1,
+ .gen = 2, .is_i8xx = 1,
};
static const struct intel_device_info intel_i85x_info = {
- .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+ .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i865g_info = {
- .is_i8xx = 1,
+ .gen = 2, .is_i8xx = 1,
};
static const struct intel_device_info intel_i915g_info = {
- .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+ .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i915gm_info = {
- .is_i9xx = 1, .is_mobile = 1,
+ .gen = 3, .is_i9xx = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i945g_info = {
- .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+ .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i945gm_info = {
- .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
+ .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i965g_info = {
- .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
+ .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
+ .has_hotplug = 1,
};
static const struct intel_device_info intel_i965gm_info = {
- .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
- .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
- .has_hotplug = 1,
+ .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_g33_info = {
- .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_hotplug = 1,
+ .gen = 3, .is_g33 = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_g45_info = {
- .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_pipe_cxsr = 1,
- .has_hotplug = 1,
+ .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_gm45_info = {
- .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
+ .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
- .has_pipe_cxsr = 1,
- .has_hotplug = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_pineview_info = {
- .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
- .need_gfx_hws = 1,
- .has_hotplug = 1,
+ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_ironlake_d_info = {
- .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_pipe_cxsr = 1,
- .has_hotplug = 1,
+ .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
- .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
- .need_gfx_hws = 1, .has_rc6 = 1,
- .has_hotplug = 1,
+ .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
- .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_hotplug = 1, .is_gen6 = 1,
+ .gen = 6, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
- .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_hotplug = 1, .is_gen6 = 1,
+ .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
};
-static const struct pci_device_id pciidlist[] = {
- INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
- INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
- INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
+static const struct pci_device_id pciidlist[] = { /* aka */
+ INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
+ INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
+ INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
- INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
- INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
- INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
- INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
- INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
- INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
- INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
- INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
- INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
- INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
- INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
- INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
- INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
- INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
- INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
- INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
- INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
- INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
- INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
- INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
- INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
- INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
+ INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
+ INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
+ INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
+ INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
+ INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
+ INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
+ INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
+ INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
+ INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
+ INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
+ INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
+ INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
+ INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
+ INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
+ INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
+ INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
+ INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
+ INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
+ INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
+ INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
+ INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
+ INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
{0, 0, 0}
};
@@ -340,7 +341,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
/*
* Clear request list
*/
- i915_gem_retire_requests(dev, &dev_priv->render_ring);
+ i915_gem_retire_requests(dev);
if (need_display)
i915_save_display(dev);
@@ -413,7 +414,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
static int __devinit
i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_dev(pdev, ent, &driver);
+ return drm_get_pci_dev(pdev, ent, &driver);
}
static void
@@ -482,7 +483,7 @@ static int i915_pm_poweroff(struct device *dev)
return i915_drm_freeze(drm_dev);
}
-const struct dev_pm_ops i915_pm_ops = {
+static const struct dev_pm_ops i915_pm_ops = {
.suspend = i915_pm_suspend,
.resume = i915_pm_resume,
.freeze = i915_pm_freeze,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2e1744d37ad..af4a263cf25 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -113,6 +113,9 @@ struct intel_opregion {
int enabled;
};
+struct intel_overlay;
+struct intel_overlay_error_state;
+
struct drm_i915_master_private {
drm_local_map_t *sarea;
struct _drm_i915_sarea *sarea_priv;
@@ -166,6 +169,7 @@ struct drm_i915_error_state {
u32 purgeable:1;
} *active_bo;
u32 active_bo_count;
+ struct intel_overlay_error_state *overlay;
};
struct drm_i915_display_funcs {
@@ -176,7 +180,8 @@ struct drm_i915_display_funcs {
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
void (*update_wm)(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size);
+ int planeb_clock, int sr_hdisplay, int sr_htotal,
+ int pixel_size);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -185,9 +190,8 @@ struct drm_i915_display_funcs {
/* clock gating init */
};
-struct intel_overlay;
-
struct intel_device_info {
+ u8 gen;
u8 is_mobile : 1;
u8 is_i8xx : 1;
u8 is_i85x : 1;
@@ -200,8 +204,9 @@ struct intel_device_info {
u8 need_gfx_hws : 1;
u8 is_g4x : 1;
u8 is_pineview : 1;
+ u8 is_broadwater : 1;
+ u8 is_crestline : 1;
u8 is_ironlake : 1;
- u8 is_gen6 : 1;
u8 has_fbc : 1;
u8 has_rc6 : 1;
u8 has_pipe_cxsr : 1;
@@ -239,6 +244,7 @@ typedef struct drm_i915_private {
struct pci_dev *bridge_dev;
struct intel_ring_buffer render_ring;
struct intel_ring_buffer bsd_ring;
+ uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
void *seqno_page;
@@ -248,6 +254,7 @@ typedef struct drm_i915_private {
drm_local_map_t hws_map;
struct drm_gem_object *seqno_obj;
struct drm_gem_object *pwrctx;
+ struct drm_gem_object *renderctx;
struct resource mch_res;
@@ -282,12 +289,17 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
+ u32 flush_rings;
+#define FLUSH_RENDER_RING 0x1
+#define FLUSH_BSD_RING 0x2
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
+ uint32_t last_instdone;
+ uint32_t last_instdone1;
struct drm_mm vram;
@@ -547,6 +559,14 @@ typedef struct drm_i915_private {
struct list_head fence_list;
/**
+ * List of objects currently pending being freed.
+ *
+ * These objects are no longer in use, but due to a signal
+ * we were prevented from freeing them at the appointed time.
+ */
+ struct list_head deferred_free_list;
+
+ /**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
@@ -555,8 +575,6 @@ typedef struct drm_i915_private {
*/
struct delayed_work retire_work;
- uint32_t next_gem_seqno;
-
/**
* Waiting sequence number, if any
*/
@@ -597,6 +615,8 @@ typedef struct drm_i915_private {
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
+ /* Panel fitter placement and size for Ironlake+ */
+ u32 pch_pf_pos, pch_pf_size;
struct drm_crtc *plane_to_crtc_mapping[2];
struct drm_crtc *pipe_to_crtc_mapping[2];
@@ -656,6 +676,8 @@ struct drm_i915_gem_object {
struct list_head list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
+ /** This object's place on eviction list */
+ struct list_head evict_list;
/**
* This is set if the object is on the active or flushing lists
@@ -677,7 +699,7 @@ struct drm_i915_gem_object {
*
* Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
*/
- int fence_reg : 5;
+ signed int fence_reg : 5;
/**
* Used for checking the object doesn't appear more than once
@@ -713,7 +735,7 @@ struct drm_i915_gem_object {
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
- int pin_count : 4;
+ unsigned int pin_count : 4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
/** AGP memory structure for our GTT binding. */
@@ -743,7 +765,7 @@ struct drm_i915_gem_object {
uint32_t stride;
/** Record of address bit 17 of each page at last unbind. */
- long *bit_17;
+ unsigned long *bit_17;
/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
uint32_t agp_type;
@@ -955,8 +977,7 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev,
bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-void i915_gem_retire_requests(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_work_handler(struct work_struct *work);
void i915_gem_clflush_object(struct drm_gem_object *obj);
int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -966,6 +987,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
+int i915_gpu_idle(struct drm_device *dev);
int i915_gem_idle(struct drm_device *dev);
uint32_t i915_add_request(struct drm_device *dev,
struct drm_file *file_priv,
@@ -979,18 +1001,25 @@ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj, int id);
+ struct drm_gem_object *obj,
+ int id,
+ int align);
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
-void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
+int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
+/* i915_gem_evict.c */
+int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
+int i915_gem_evict_everything(struct drm_device *dev);
+int i915_gem_evict_inactive(struct drm_device *dev);
+
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
@@ -1046,6 +1075,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void i8xx_disable_fbc(struct drm_device *dev);
extern void g4x_disable_fbc(struct drm_device *dev);
+extern void ironlake_disable_fbc(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev);
@@ -1053,6 +1083,10 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
+/* overlay */
+extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+
/**
* Lock test for when it's just for synchronization of ring access.
*
@@ -1079,26 +1113,26 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#define I915_VERBOSE 0
#define BEGIN_LP_RING(n) do { \
- drm_i915_private_t *dev_priv = dev->dev_private; \
+ drm_i915_private_t *dev_priv__ = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
- intel_ring_begin(dev, &dev_priv->render_ring, (n)); \
+ intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \
} while (0)
#define OUT_RING(x) do { \
- drm_i915_private_t *dev_priv = dev->dev_private; \
+ drm_i915_private_t *dev_priv__ = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
- intel_ring_emit(dev, &dev_priv->render_ring, x); \
+ intel_ring_emit(dev, &dev_priv__->render_ring, x); \
} while (0)
#define ADVANCE_LP_RING() do { \
- drm_i915_private_t *dev_priv = dev->dev_private; \
+ drm_i915_private_t *dev_priv__ = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG("ADVANCE_LP_RING %x\n", \
- dev_priv->render_ring.tail); \
- intel_ring_advance(dev, &dev_priv->render_ring); \
+ dev_priv__->render_ring.tail); \
+ intel_ring_advance(dev, &dev_priv__->render_ring); \
} while(0)
/**
@@ -1128,13 +1162,14 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
+#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
@@ -1145,27 +1180,13 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
-#define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-#define IS_GEN3(dev) (IS_I915G(dev) || \
- IS_I915GM(dev) || \
- IS_I945G(dev) || \
- IS_I945GM(dev) || \
- IS_G33(dev) || \
- IS_PINEVIEW(dev))
-#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
- (dev)->pci_device == 0x2982 || \
- (dev)->pci_device == 0x2992 || \
- (dev)->pci_device == 0x29A2 || \
- (dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12 || \
- (dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32 || \
- (dev)->pci_device == 0x2A42 || \
- (dev)->pci_device == 0x2E42)
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5aa747fc25a..90b1d6753b9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,8 +34,10 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
+#include <linux/intel-gtt.h>
-static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
+static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -48,15 +50,22 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_evict_something(struct drm_device *dev, int min_size);
-static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv);
+static void i915_gem_free_object_tail(struct drm_gem_object *obj);
static LIST_HEAD(shrink_list);
static DEFINE_SPINLOCK(shrink_list_lock);
+static inline bool
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+{
+ return obj_priv->gtt_space &&
+ !obj_priv->active &&
+ obj_priv->pin_count == 0;
+}
+
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end)
{
@@ -127,13 +136,13 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle);
- drm_gem_object_handle_unreference_unlocked(obj);
-
- if (ret)
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(obj);
+ if (ret) {
return ret;
+ }
args->handle = handle;
-
return 0;
}
@@ -313,7 +322,8 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
if (ret == -ENOMEM) {
struct drm_device *dev = obj->dev;
- ret = i915_gem_evict_something(dev, obj->size);
+ ret = i915_gem_evict_something(dev, obj->size,
+ i915_gem_get_gtt_alignment(obj));
if (ret)
return ret;
@@ -456,17 +466,20 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
obj_priv = to_intel_bo(obj);
- /* Bounds check source.
- *
- * XXX: This could use review for overflow issues...
- */
- if (args->offset > obj->size || args->size > obj->size ||
- args->offset + args->size > obj->size) {
- drm_gem_object_unreference_unlocked(obj);
- return -EINVAL;
+ /* Bounds check source. */
+ if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (!access_ok(VERIFY_WRITE,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size)) {
+ ret = -EFAULT;
+ goto err;
}
if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -478,8 +491,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
file_priv);
}
+err:
drm_gem_object_unreference_unlocked(obj);
-
return ret;
}
@@ -496,10 +509,10 @@ fast_user_write(struct io_mapping *mapping,
char *vaddr_atomic;
unsigned long unwritten;
- vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
+ vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
user_data, length);
- io_mapping_unmap_atomic(vaddr_atomic);
+ io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
if (unwritten)
return -EFAULT;
return 0;
@@ -568,8 +581,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- if (!access_ok(VERIFY_READ, user_data, remain))
- return -EFAULT;
mutex_lock(&dev->struct_mutex);
@@ -919,17 +930,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
obj_priv = to_intel_bo(obj);
- /* Bounds check destination.
- *
- * XXX: This could use review for overflow issues...
- */
- if (args->offset > obj->size || args->size > obj->size ||
- args->offset + args->size > obj->size) {
- drm_gem_object_unreference_unlocked(obj);
- return -EINVAL;
+ /* Bounds check destination. */
+ if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (!access_ok(VERIFY_READ,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size)) {
+ ret = -EFAULT;
+ goto err;
}
/* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -963,8 +977,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
DRM_INFO("pwrite failed %d\n", ret);
#endif
+err:
drm_gem_object_unreference_unlocked(obj);
-
return ret;
}
@@ -1002,7 +1016,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
@@ -1036,6 +1050,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
+
+ /* Maintain LRU order of "inactive" objects */
+ if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
+ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1060,7 +1079,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
mutex_unlock(&dev->struct_mutex);
- return -EBADF;
+ return -ENOENT;
}
#if WATCH_BUF
@@ -1099,7 +1118,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
offset = args->offset;
@@ -1137,7 +1156,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset;
unsigned long pfn;
@@ -1155,8 +1174,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto unlock;
- list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
goto unlock;
@@ -1169,6 +1186,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
+ if (i915_gem_object_is_inactive(obj_priv))
+ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
page_offset;
@@ -1363,7 +1383,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_mmap_gtt *args = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret;
@@ -1373,7 +1392,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
mutex_lock(&dev->struct_mutex);
@@ -1409,7 +1428,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
return ret;
}
- list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
}
drm_gem_object_unreference(obj);
@@ -1493,9 +1511,16 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode;
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*. Here we mirror the actions taken
+ * when by shmem_delete_inode() to release the backing store.
+ */
inode = obj->filp->f_path.dentry->d_inode;
- if (inode->i_op->truncate)
- inode->i_op->truncate (inode);
+ truncate_inode_pages(inode->i_mapping, 0);
+ if (inode->i_op->truncate_range)
+ inode->i_op->truncate_range(inode, 0, (loff_t)-1);
obj_priv->madv = __I915_MADV_PURGED;
}
@@ -1709,9 +1734,9 @@ i915_get_gem_seqno(struct drm_device *dev,
/**
* This function clears the request list as sequence numbers are passed.
*/
-void
-i915_gem_retire_requests(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static void
+i915_gem_retire_requests_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
@@ -1751,6 +1776,30 @@ i915_gem_retire_requests(struct drm_device *dev,
}
void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!list_empty(&dev_priv->mm.deferred_free_list)) {
+ struct drm_i915_gem_object *obj_priv, *tmp;
+
+ /* We must be careful that during unbind() we do not
+ * accidentally infinitely recurse into retire requests.
+ * Currently:
+ * retire -> free -> unbind -> wait -> retire_ring
+ */
+ list_for_each_entry_safe(obj_priv, tmp,
+ &dev_priv->mm.deferred_free_list,
+ list)
+ i915_gem_free_object_tail(&obj_priv->base);
+ }
+
+ i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+}
+
+void
i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
@@ -1761,10 +1810,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
dev = dev_priv->dev;
mutex_lock(&dev->struct_mutex);
- i915_gem_retire_requests(dev, &dev_priv->render_ring);
-
- if (HAS_BSD(dev))
- i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+ i915_gem_retire_requests(dev);
if (!dev_priv->mm.suspended &&
(!list_empty(&dev_priv->render_ring.request_list) ||
@@ -1832,7 +1878,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
* a separate wait queue to handle that.
*/
if (ret == 0)
- i915_gem_retire_requests(dev, ring);
+ i915_gem_retire_requests_ring(dev, ring);
return ret;
}
@@ -1866,19 +1912,6 @@ i915_gem_flush(struct drm_device *dev,
flush_domains);
}
-static void
-i915_gem_flush_ring(struct drm_device *dev,
- uint32_t invalidate_domains,
- uint32_t flush_domains,
- struct intel_ring_buffer *ring)
-{
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- drm_agp_chipset_flush(dev);
- ring->flush(dev, ring,
- invalidate_domains,
- flush_domains);
-}
-
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
@@ -1945,13 +1978,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
* before we unbind.
*/
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("set_domain failed: %d\n", ret);
+ if (ret == -ERESTARTSYS)
return ret;
- }
-
- BUG_ON(obj_priv->active);
+ /* Continue on if we fail due to EIO, the GPU is hung so we
+ * should be safe and we need to cleanup or else we might
+ * cause memory corruption through use-after-free.
+ */
/* release the fence reg _after_ flushing */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
@@ -1985,37 +2017,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
trace_i915_gem_object_unbind(obj);
- return 0;
-}
-
-static struct drm_gem_object *
-i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *best = NULL;
- struct drm_gem_object *first = NULL;
-
- /* Try to find the smallest clean object */
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- struct drm_gem_object *obj = &obj_priv->base;
- if (obj->size >= min_size) {
- if ((!obj_priv->dirty ||
- i915_gem_object_is_purgeable(obj_priv)) &&
- (!best || obj->size < best->size)) {
- best = obj;
- if (best->size == min_size)
- return best;
- }
- if (!first)
- first = obj;
- }
- }
-
- return best ? best : first;
+ return ret;
}
-static int
+int
i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2056,158 +2061,6 @@ i915_gpu_idle(struct drm_device *dev)
return ret;
}
-static int
-i915_gem_evict_everything(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
- bool lists_empty;
-
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev)
- || list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
-
- if (lists_empty)
- return -ENOSPC;
-
- /* Flush everything (on to the inactive lists) and evict */
- ret = i915_gpu_idle(dev);
- if (ret)
- return ret;
-
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
- ret = i915_gem_evict_from_inactive_list(dev);
- if (ret)
- return ret;
-
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev)
- || list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
- BUG_ON(!lists_empty);
-
- return 0;
-}
-
-static int
-i915_gem_evict_something(struct drm_device *dev, int min_size)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- int ret;
-
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
- struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
- for (;;) {
- i915_gem_retire_requests(dev, render_ring);
-
- if (HAS_BSD(dev))
- i915_gem_retire_requests(dev, bsd_ring);
-
- /* If there's an inactive buffer available now, grab it
- * and be done.
- */
- obj = i915_gem_find_inactive_object(dev, min_size);
- if (obj) {
- struct drm_i915_gem_object *obj_priv;
-
-#if WATCH_LRU
- DRM_INFO("%s: evicting %p\n", __func__, obj);
-#endif
- obj_priv = to_intel_bo(obj);
- BUG_ON(obj_priv->pin_count != 0);
- BUG_ON(obj_priv->active);
-
- /* Wait on the rendering and unbind the buffer. */
- return i915_gem_object_unbind(obj);
- }
-
- /* If we didn't get anything, but the ring is still processing
- * things, wait for the next to finish and hopefully leave us
- * a buffer to evict.
- */
- if (!list_empty(&render_ring->request_list)) {
- struct drm_i915_gem_request *request;
-
- request = list_first_entry(&render_ring->request_list,
- struct drm_i915_gem_request,
- list);
-
- ret = i915_wait_request(dev,
- request->seqno, request->ring);
- if (ret)
- return ret;
-
- continue;
- }
-
- if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
- struct drm_i915_gem_request *request;
-
- request = list_first_entry(&bsd_ring->request_list,
- struct drm_i915_gem_request,
- list);
-
- ret = i915_wait_request(dev,
- request->seqno, request->ring);
- if (ret)
- return ret;
-
- continue;
- }
-
- /* If we didn't have anything on the request list but there
- * are buffers awaiting a flush, emit one and try again.
- * When we wait on it, those buffers waiting for that flush
- * will get moved to inactive.
- */
- if (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_i915_gem_object *obj_priv;
-
- /* Find an object that we can immediately reuse */
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
- obj = &obj_priv->base;
- if (obj->size >= min_size)
- break;
-
- obj = NULL;
- }
-
- if (obj != NULL) {
- uint32_t seqno;
-
- i915_gem_flush_ring(dev,
- obj->write_domain,
- obj->write_domain,
- obj_priv->ring);
- seqno = i915_add_request(dev, NULL,
- obj->write_domain,
- obj_priv->ring);
- if (seqno == 0)
- return -ENOMEM;
- continue;
- }
- }
-
- /* If we didn't do any of the above, there's no single buffer
- * large enough to swap out for the new one, so just evict
- * everything and start again. (This should be rare.)
- */
- if (!list_empty (&dev_priv->mm.inactive_list))
- return i915_gem_evict_from_inactive_list(dev);
- else
- return i915_gem_evict_everything(dev);
- }
-}
-
int
i915_gem_object_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
@@ -2500,14 +2353,21 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
reg->obj = obj;
- if (IS_GEN6(dev))
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
sandybridge_write_fence_reg(reg);
- else if (IS_I965G(dev))
+ break;
+ case 5:
+ case 4:
i965_write_fence_reg(reg);
- else if (IS_I9XX(dev))
+ break;
+ case 3:
i915_write_fence_reg(reg);
- else
+ break;
+ case 2:
i830_write_fence_reg(reg);
+ break;
+ }
trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
obj_priv->tiling_mode);
@@ -2530,22 +2390,26 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_i915_fence_reg *reg =
&dev_priv->fence_regs[obj_priv->fence_reg];
+ uint32_t fence_reg;
- if (IS_GEN6(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
(obj_priv->fence_reg * 8), 0);
- } else if (IS_I965G(dev)) {
+ break;
+ case 5:
+ case 4:
I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
- } else {
- uint32_t fence_reg;
-
- if (obj_priv->fence_reg < 8)
- fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+ break;
+ case 3:
+ if (obj_priv->fence_reg >= 8)
+ fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
else
- fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
- 8) * 4;
+ case 2:
+ fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
I915_WRITE(fence_reg, 0);
+ break;
}
reg->obj = NULL;
@@ -2583,7 +2447,10 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
if (!IS_I965G(dev)) {
int ret;
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret != 0)
+ return ret;
+
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
@@ -2634,10 +2501,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
if (free_space != NULL) {
obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
alignment);
- if (obj_priv->gtt_space != NULL) {
- obj_priv->gtt_space->private = obj;
+ if (obj_priv->gtt_space != NULL)
obj_priv->gtt_offset = obj_priv->gtt_space->start;
- }
}
if (obj_priv->gtt_space == NULL) {
/* If the gtt is empty and we're still having trouble
@@ -2646,7 +2511,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
#if WATCH_LRU
DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
- ret = i915_gem_evict_something(dev, obj->size);
+ ret = i915_gem_evict_something(dev, obj->size, alignment);
if (ret)
return ret;
@@ -2664,7 +2529,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */
- ret = i915_gem_evict_something(dev, obj->size);
+ ret = i915_gem_evict_something(dev, obj->size,
+ alignment);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
@@ -2694,7 +2560,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
- ret = i915_gem_evict_something(dev, obj->size);
+ ret = i915_gem_evict_something(dev, obj->size, alignment);
if (ret)
return ret;
@@ -2703,6 +2569,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
atomic_inc(&dev->gtt_count);
atomic_add(obj->size, &dev->gtt_memory);
+ /* keep track of bounds object by adding it to the inactive list */
+ list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
@@ -2733,7 +2602,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
}
/** Flushes any GPU write domain for the object if it's dirty. */
-static void
+static int
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
@@ -2741,17 +2610,18 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return;
+ return 0;
/* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
- (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
- BUG_ON(obj->write_domain);
+ if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
+ return -ENOMEM;
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
+ return 0;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2795,9 +2665,11 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
old_write_domain);
}
-void
+int
i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
{
+ int ret = 0;
+
switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
i915_gem_object_flush_gtt_write_domain(obj);
@@ -2806,9 +2678,11 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
i915_gem_object_flush_cpu_write_domain(obj);
break;
default:
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
break;
}
+
+ return ret;
}
/**
@@ -2828,7 +2702,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret != 0)
+ return ret;
+
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
@@ -2878,7 +2755,9 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret)
+ return ret;
/* Wait on any GPU rendering and flushing to occur. */
if (obj_priv->active) {
@@ -2926,7 +2805,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
uint32_t old_write_domain, old_read_domains;
int ret;
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret)
+ return ret;
+
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
@@ -3084,6 +2966,7 @@ static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
@@ -3146,6 +3029,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
obj->pending_write_domain = obj->write_domain;
obj->read_domains = obj->pending_read_domains;
+ if (flush_domains & I915_GEM_GPU_DOMAINS) {
+ if (obj_priv->ring == &dev_priv->render_ring)
+ dev_priv->flush_rings |= FLUSH_RENDER_RING;
+ else if (obj_priv->ring == &dev_priv->bsd_ring)
+ dev_priv->flush_rings |= FLUSH_BSD_RING;
+ }
+
dev->invalidate_domains |= invalidate_domains;
dev->flush_domains |= flush_domains;
#if WATCH_BUF
@@ -3216,7 +3106,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
if (offset == 0 && size == obj->size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret)
+ return ret;
+
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
@@ -3328,7 +3221,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
reloc->target_handle);
if (target_obj == NULL) {
i915_gem_object_unpin(obj);
- return -EBADF;
+ return -ENOENT;
}
target_obj_priv = to_intel_bo(target_obj);
@@ -3367,6 +3260,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
return -EINVAL;
}
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
@@ -3451,7 +3346,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
reloc_offset = obj_priv->gtt_offset + reloc->offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
(reloc_offset &
- ~(PAGE_SIZE - 1)));
+ ~(PAGE_SIZE - 1)),
+ KM_USER0);
reloc_entry = (uint32_t __iomem *)(reloc_page +
(reloc_offset & (PAGE_SIZE - 1)));
reloc_val = target_obj_priv->gtt_offset + reloc->delta;
@@ -3462,7 +3358,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
readl(reloc_entry), reloc_val);
#endif
writel(reloc_val, reloc_entry);
- io_mapping_unmap_atomic(reloc_page);
+ io_mapping_unmap_atomic(reloc_page, KM_USER0);
/* The updated presumed offset for this entry will be
* copied back out to the user.
@@ -3681,7 +3577,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ring = &dev_priv->render_ring;
}
-
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
@@ -3709,6 +3604,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret != 0) {
DRM_ERROR("copy %d cliprects failed: %d\n",
args->num_cliprects, ret);
+ ret = -EFAULT;
goto pre_mutex_err;
}
}
@@ -3744,7 +3640,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
exec_list[i].handle, i);
/* prevent error path from reading uninitialized data */
args->buffer_count = i + 1;
- ret = -EBADF;
+ ret = -ENOENT;
goto err;
}
@@ -3754,7 +3650,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
object_list[i]);
/* prevent error path from reading uninitialized data */
args->buffer_count = i + 1;
- ret = -EBADF;
+ ret = -EINVAL;
goto err;
}
obj_priv->in_execbuffer = true;
@@ -3855,6 +3751,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
+ dev_priv->flush_rings = 0;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
@@ -3875,16 +3772,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
- if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
+ if (dev_priv->flush_rings & FLUSH_RENDER_RING)
(void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->render_ring);
-
- if (HAS_BSD(dev))
- (void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->bsd_ring);
- }
+ dev->flush_domains,
+ &dev_priv->render_ring);
+ if (dev_priv->flush_rings & FLUSH_BSD_RING)
+ (void)i915_add_request(dev, file_priv,
+ dev->flush_domains,
+ &dev_priv->bsd_ring);
}
for (i = 0; i < args->buffer_count; i++) {
@@ -4155,6 +4050,10 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
if (alignment == 0)
alignment = i915_gem_get_gtt_alignment(obj);
if (obj_priv->gtt_offset & (alignment - 1)) {
+ WARN(obj_priv->pin_count,
+ "bo is already pinned with incorrect alignment:"
+ " offset=%x, req.alignment=%x\n",
+ obj_priv->gtt_offset, alignment);
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
@@ -4176,8 +4075,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
atomic_inc(&dev->pin_count);
atomic_add(obj->size, &dev->pin_memory);
if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
- !list_empty(&obj_priv->list))
+ (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
list_del_init(&obj_priv->list);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4228,7 +4126,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
args->handle);
mutex_unlock(&dev->struct_mutex);
- return -EBADF;
+ return -ENOENT;
}
obj_priv = to_intel_bo(obj);
@@ -4284,7 +4182,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
args->handle);
mutex_unlock(&dev->struct_mutex);
- return -EBADF;
+ return -ENOENT;
}
obj_priv = to_intel_bo(obj);
@@ -4313,35 +4211,43 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_busy *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- drm_i915_private_t *dev_priv = dev->dev_private;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
args->handle);
- return -EBADF;
+ return -ENOENT;
}
mutex_lock(&dev->struct_mutex);
- /* Update the active list for the hardware's current position.
- * Otherwise this only updates on a delayed timer or when irqs are
- * actually unmasked, and our working set ends up being larger than
- * required.
+
+ /* Count all active objects as busy, even if they are currently not used
+ * by the gpu. Users of this interface expect objects to eventually
+ * become non-busy without any further actions, therefore emit any
+ * necessary flushes here.
*/
- i915_gem_retire_requests(dev, &dev_priv->render_ring);
+ obj_priv = to_intel_bo(obj);
+ args->busy = obj_priv->active;
+ if (args->busy) {
+ /* Unconditionally flush objects, even when the gpu still uses this
+ * object. Userspace calling this function indicates that it wants to
+ * use this buffer rather sooner than later, so issuing the required
+ * flush earlier is beneficial.
+ */
+ if (obj->write_domain) {
+ i915_gem_flush(dev, 0, obj->write_domain);
+ (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
+ }
- if (HAS_BSD(dev))
- i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+ /* Update the active list for the hardware's current position.
+ * Otherwise this only updates on a delayed timer or when irqs
+ * are actually unmasked, and our working set ends up being
+ * larger than required.
+ */
+ i915_gem_retire_requests_ring(dev, obj_priv->ring);
- obj_priv = to_intel_bo(obj);
- /* Don't count being on the flushing list against the object being
- * done. Otherwise, a buffer left on the flushing list but not getting
- * flushed (because nobody's flushing that domain) won't ever return
- * unbusy and get reused by libdrm's bo cache. The other expected
- * consumer of this interface, OpenGL's occlusion queries, also specs
- * that the objects get unbusy "eventually" without any interference.
- */
- args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
+ args->busy = obj_priv->active;
+ }
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -4375,7 +4281,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (obj == NULL) {
DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
args->handle);
- return -EBADF;
+ return -ENOENT;
}
mutex_lock(&dev->struct_mutex);
@@ -4441,20 +4347,19 @@ int i915_gem_init_object(struct drm_gem_object *obj)
return 0;
}
-void i915_gem_free_object(struct drm_gem_object *obj)
+static void i915_gem_free_object_tail(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ int ret;
- trace_i915_gem_object_destroy(obj);
-
- while (obj_priv->pin_count > 0)
- i915_gem_object_unpin(obj);
-
- if (obj_priv->phys_obj)
- i915_gem_detach_phys_object(dev, obj);
-
- i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj);
+ if (ret == -ERESTARTSYS) {
+ list_move(&obj_priv->list,
+ &dev_priv->mm.deferred_free_list);
+ return;
+ }
if (obj_priv->mmap_offset)
i915_gem_free_mmap_offset(obj);
@@ -4466,28 +4371,20 @@ void i915_gem_free_object(struct drm_gem_object *obj)
kfree(obj_priv);
}
-/** Unbinds all inactive objects. */
-static int
-i915_gem_evict_from_inactive_list(struct drm_device *dev)
+void i915_gem_free_object(struct drm_gem_object *obj)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- while (!list_empty(&dev_priv->mm.inactive_list)) {
- struct drm_gem_object *obj;
- int ret;
+ trace_i915_gem_object_destroy(obj);
- obj = &list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list)->base;
+ while (obj_priv->pin_count > 0)
+ i915_gem_object_unpin(obj);
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object: %d\n", ret);
- return ret;
- }
- }
+ if (obj_priv->phys_obj)
+ i915_gem_detach_phys_object(dev, obj);
- return 0;
+ i915_gem_free_object_tail(obj);
}
int
@@ -4514,7 +4411,7 @@ i915_gem_idle(struct drm_device *dev)
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_from_inactive_list(dev);
+ ret = i915_gem_evict_inactive(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4632,6 +4529,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
goto cleanup_render_ring;
}
+ dev_priv->next_seqno = 1;
+
return 0;
cleanup_render_ring:
@@ -4689,9 +4588,19 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
mutex_unlock(&dev->struct_mutex);
- drm_irq_install(dev);
+ ret = drm_irq_install(dev);
+ if (ret)
+ goto cleanup_ringbuffer;
return 0;
+
+cleanup_ringbuffer:
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_cleanup_ringbuffer(dev);
+ dev_priv->mm.suspended = 1;
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
}
int
@@ -4729,6 +4638,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
if (HAS_BSD(dev)) {
@@ -4782,7 +4692,7 @@ i915_gem_load(struct drm_device *dev)
* e.g. for cursor + overlay regs
*/
int i915_gem_init_phys_object(struct drm_device *dev,
- int id, int size)
+ int id, int size, int align)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
@@ -4797,7 +4707,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
phys_obj->id = id;
- phys_obj->handle = drm_pci_alloc(dev, size, 0);
+ phys_obj->handle = drm_pci_alloc(dev, size, align);
if (!phys_obj->handle) {
ret = -ENOMEM;
goto kfree_obj;
@@ -4879,7 +4789,9 @@ out:
int
i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj, int id)
+ struct drm_gem_object *obj,
+ int id,
+ int align)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
@@ -4898,11 +4810,10 @@ i915_gem_attach_phys_object(struct drm_device *dev,
i915_gem_detach_phys_object(dev, obj);
}
-
/* create a new object */
if (!dev_priv->mm.phys_objs[id - 1]) {
ret = i915_gem_init_phys_object(dev, id,
- obj->size);
+ obj->size, align);
if (ret) {
DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
goto out;
@@ -5027,10 +4938,7 @@ rescan:
continue;
spin_unlock(&shrink_list_lock);
- i915_gem_retire_requests(dev, &dev_priv->render_ring);
-
- if (HAS_BSD(dev))
- i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+ i915_gem_retire_requests(dev);
list_for_each_entry_safe(obj_priv, next_obj,
&dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
new file mode 100644
index 00000000000..5c428fa3e0b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uuk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+
+static struct drm_i915_gem_object *
+i915_gem_next_active_object(struct drm_device *dev,
+ struct list_head **render_iter,
+ struct list_head **bsd_iter)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
+
+ if (*render_iter != &dev_priv->render_ring.active_list)
+ render_obj = list_entry(*render_iter,
+ struct drm_i915_gem_object,
+ list);
+
+ if (HAS_BSD(dev)) {
+ if (*bsd_iter != &dev_priv->bsd_ring.active_list)
+ bsd_obj = list_entry(*bsd_iter,
+ struct drm_i915_gem_object,
+ list);
+
+ if (render_obj == NULL) {
+ *bsd_iter = (*bsd_iter)->next;
+ return bsd_obj;
+ }
+
+ if (bsd_obj == NULL) {
+ *render_iter = (*render_iter)->next;
+ return render_obj;
+ }
+
+ /* XXX can we handle seqno wrapping? */
+ if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
+ *render_iter = (*render_iter)->next;
+ return render_obj;
+ } else {
+ *bsd_iter = (*bsd_iter)->next;
+ return bsd_obj;
+ }
+ } else {
+ *render_iter = (*render_iter)->next;
+ return render_obj;
+ }
+}
+
+static bool
+mark_free(struct drm_i915_gem_object *obj_priv,
+ struct list_head *unwind)
+{
+ list_add(&obj_priv->evict_list, unwind);
+ drm_gem_object_reference(&obj_priv->base);
+ return drm_mm_scan_add_block(obj_priv->gtt_space);
+}
+
+#define i915_for_each_active_object(OBJ, R, B) \
+ *(R) = dev_priv->render_ring.active_list.next; \
+ *(B) = dev_priv->bsd_ring.active_list.next; \
+ while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
+
+int
+i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct list_head eviction_list, unwind_list;
+ struct drm_i915_gem_object *obj_priv;
+ struct list_head *render_iter, *bsd_iter;
+ int ret = 0;
+
+ i915_gem_retire_requests(dev);
+
+ /* Re-check for free space after retiring requests */
+ if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0))
+ return 0;
+
+ /*
+ * The goal is to evict objects and amalgamate space in LRU order.
+ * The oldest idle objects reside on the inactive list, which is in
+ * retirement order. The next objects to retire are those on the (per
+ * ring) active list that do not have an outstanding flush. Once the
+ * hardware reports completion (the seqno is updated after the
+ * batchbuffer has been finished) the clean buffer objects would
+ * be retired to the inactive list. Any dirty objects would be added
+ * to the tail of the flushing list. So after processing the clean
+ * active objects we need to emit a MI_FLUSH to retire the flushing
+ * list, hence the retirement order of the flushing list is in
+ * advance of the dirty objects on the active lists.
+ *
+ * The retirement sequence is thus:
+ * 1. Inactive objects (already retired)
+ * 2. Clean active objects
+ * 3. Flushing list
+ * 4. Dirty active objects.
+ *
+ * On each list, the oldest objects lie at the HEAD with the freshest
+ * object on the TAIL.
+ */
+
+ INIT_LIST_HEAD(&unwind_list);
+ drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+
+ /* First see if there is a large enough contiguous idle region... */
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ if (mark_free(obj_priv, &unwind_list))
+ goto found;
+ }
+
+ /* Now merge in the soon-to-be-expired objects... */
+ i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+ /* Does the object require an outstanding flush? */
+ if (obj_priv->base.write_domain || obj_priv->pin_count)
+ continue;
+
+ if (mark_free(obj_priv, &unwind_list))
+ goto found;
+ }
+
+ /* Finally add anything with a pending flush (in order of retirement) */
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ if (obj_priv->pin_count)
+ continue;
+
+ if (mark_free(obj_priv, &unwind_list))
+ goto found;
+ }
+ i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+ if (! obj_priv->base.write_domain || obj_priv->pin_count)
+ continue;
+
+ if (mark_free(obj_priv, &unwind_list))
+ goto found;
+ }
+
+ /* Nothing found, clean up and bail out! */
+ list_for_each_entry(obj_priv, &unwind_list, evict_list) {
+ ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
+ BUG_ON(ret);
+ drm_gem_object_unreference(&obj_priv->base);
+ }
+
+ /* We expect the caller to unpin, evict all and try again, or give up.
+ * So calling i915_gem_evict_everything() is unnecessary.
+ */
+ return -ENOSPC;
+
+found:
+ /* drm_mm doesn't allow any other other operations while
+ * scanning, therefore store to be evicted objects on a
+ * temporary list. */
+ INIT_LIST_HEAD(&eviction_list);
+ while (!list_empty(&unwind_list)) {
+ obj_priv = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ evict_list);
+ if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
+ list_move(&obj_priv->evict_list, &eviction_list);
+ continue;
+ }
+ list_del(&obj_priv->evict_list);
+ drm_gem_object_unreference(&obj_priv->base);
+ }
+
+ /* Unbinding will emit any required flushes */
+ while (!list_empty(&eviction_list)) {
+ obj_priv = list_first_entry(&eviction_list,
+ struct drm_i915_gem_object,
+ evict_list);
+ if (ret == 0)
+ ret = i915_gem_object_unbind(&obj_priv->base);
+ list_del(&obj_priv->evict_list);
+ drm_gem_object_unreference(&obj_priv->base);
+ }
+
+ return ret;
+}
+
+int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ bool lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ if (lists_empty)
+ return -ENOSPC;
+
+ /* Flush everything (on to the inactive lists) and evict */
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
+ ret = i915_gem_evict_inactive(dev);
+ if (ret)
+ return ret;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+ BUG_ON(!lists_empty);
+
+ return 0;
+}
+
+/** Unbinds all inactive objects. */
+int
+i915_gem_evict_inactive(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ while (!list_empty(&dev_priv->mm.inactive_list)) {
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = &list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list)->base;
+
+ ret = i915_gem_object_unbind(obj);
+ if (ret != 0) {
+ DRM_ERROR("Error unbinding object: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4b7c49d4257..710eca70b32 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -275,7 +275,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EINVAL;
+ return -ENOENT;
obj_priv = to_intel_bo(obj);
if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
@@ -333,8 +333,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
i915_gem_release_mmap(obj);
if (ret != 0) {
- WARN(ret != -ERESTARTSYS,
- "failed to reset object for tiling switch");
args->tiling_mode = obj_priv->tiling_mode;
args->stride = obj_priv->stride;
goto err;
@@ -364,7 +362,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EINVAL;
+ return -ENOENT;
obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index dba53d4b9fb..744225ebb4b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -171,10 +171,10 @@ void intel_enable_asle (struct drm_device *dev)
ironlake_enable_display_irq(dev_priv, DE_GSE);
else {
i915_enable_pipestat(dev_priv, 1,
- I915_LEGACY_BLC_EVENT_ENABLE);
+ PIPE_LEGACY_BLC_EVENT_ENABLE);
if (IS_I965G(dev))
i915_enable_pipestat(dev_priv, 0,
- I915_LEGACY_BLC_EVENT_ENABLE);
+ PIPE_LEGACY_BLC_EVENT_ENABLE);
}
}
@@ -425,9 +425,11 @@ static struct drm_i915_error_object *
i915_error_object_create(struct drm_device *dev,
struct drm_gem_object *src)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_object *dst;
struct drm_i915_gem_object *src_priv;
int page, page_count;
+ u32 reloc_offset;
if (src == NULL)
return NULL;
@@ -442,18 +444,27 @@ i915_error_object_create(struct drm_device *dev,
if (dst == NULL)
return NULL;
+ reloc_offset = src_priv->gtt_offset;
for (page = 0; page < page_count; page++) {
- void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
unsigned long flags;
+ void __iomem *s;
+ void *d;
+ d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
if (d == NULL)
goto unwind;
+
local_irq_save(flags);
- s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
- memcpy(d, s, PAGE_SIZE);
- kunmap_atomic(s, KM_IRQ0);
+ s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc_offset,
+ KM_IRQ0);
+ memcpy_fromio(d, s, PAGE_SIZE);
+ io_mapping_unmap_atomic(s, KM_IRQ0);
local_irq_restore(flags);
+
dst->pages[page] = d;
+
+ reloc_offset += PAGE_SIZE;
}
dst->page_count = page_count;
dst->gtt_offset = src_priv->gtt_offset;
@@ -489,6 +500,7 @@ i915_error_state_free(struct drm_device *dev,
i915_error_object_free(error->batchbuffer[1]);
i915_error_object_free(error->ringbuffer);
kfree(error->active_bo);
+ kfree(error->overlay);
kfree(error);
}
@@ -612,18 +624,57 @@ static void i915_capture_error_state(struct drm_device *dev)
if (batchbuffer[1] == NULL &&
error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size &&
- batchbuffer[0] != obj)
+ error->acthd < obj_priv->gtt_offset + obj->size)
batchbuffer[1] = obj;
count++;
}
+ /* Scan the other lists for completeness for those bizarre errors. */
+ if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ struct drm_gem_object *obj = &obj_priv->base;
+
+ if (batchbuffer[0] == NULL &&
+ bbaddr >= obj_priv->gtt_offset &&
+ bbaddr < obj_priv->gtt_offset + obj->size)
+ batchbuffer[0] = obj;
+
+ if (batchbuffer[1] == NULL &&
+ error->acthd >= obj_priv->gtt_offset &&
+ error->acthd < obj_priv->gtt_offset + obj->size)
+ batchbuffer[1] = obj;
+
+ if (batchbuffer[0] && batchbuffer[1])
+ break;
+ }
+ }
+ if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ struct drm_gem_object *obj = &obj_priv->base;
+
+ if (batchbuffer[0] == NULL &&
+ bbaddr >= obj_priv->gtt_offset &&
+ bbaddr < obj_priv->gtt_offset + obj->size)
+ batchbuffer[0] = obj;
+
+ if (batchbuffer[1] == NULL &&
+ error->acthd >= obj_priv->gtt_offset &&
+ error->acthd < obj_priv->gtt_offset + obj->size)
+ batchbuffer[1] = obj;
+
+ if (batchbuffer[0] && batchbuffer[1])
+ break;
+ }
+ }
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userpace.
*/
error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
- error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
+ if (batchbuffer[1] != batchbuffer[0])
+ error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
+ else
+ error->batchbuffer[1] = NULL;
/* Record the ringbuffer */
error->ringbuffer = i915_error_object_create(dev,
@@ -667,6 +718,8 @@ static void i915_capture_error_state(struct drm_device *dev)
do_gettimeofday(&error->time);
+ error->overlay = intel_overlay_capture_error_state(dev);
+
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (dev_priv->first_error == NULL) {
dev_priv->first_error = error;
@@ -834,6 +887,49 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
queue_work(dev_priv->wq, &dev_priv->error_work);
}
+static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj_priv;
+ struct intel_unpin_work *work;
+ unsigned long flags;
+ bool stall_detected;
+
+ /* Ignore early vblank irqs */
+ if (intel_crtc == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+
+ if (work == NULL || work->pending || !work->enable_stall_check) {
+ /* Either the pending flip IRQ arrived, or we're too early. Don't check */
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return;
+ }
+
+ /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
+ obj_priv = to_intel_bo(work->pending_flip_obj);
+ if(IS_I965G(dev)) {
+ int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
+ stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+ } else {
+ int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
+ stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
+ crtc->y * crtc->fb->pitch +
+ crtc->x * crtc->fb->bits_per_pixel/8);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (stall_detected) {
+ DRM_DEBUG_DRIVER("Pageflip stall detected\n");
+ intel_prepare_page_flip(dev, intel_crtc->plane);
+ }
+}
+
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -842,7 +938,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
u32 iir, new_iir;
u32 pipea_stats, pipeb_stats;
u32 vblank_status;
- u32 vblank_enable;
int vblank = 0;
unsigned long irqflags;
int irq_received;
@@ -856,13 +951,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
iir = I915_READ(IIR);
- if (IS_I965G(dev)) {
- vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
- vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
- } else {
- vblank_status = I915_VBLANK_INTERRUPT_STATUS;
- vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
- }
+ if (IS_I965G(dev))
+ vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
+ else
+ vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
for (;;) {
irq_received = iir != 0;
@@ -955,19 +1047,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if (pipea_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 0);
- if (!dev_priv->flip_pending_is_done)
+ if (!dev_priv->flip_pending_is_done) {
+ i915_pageflip_stall_check(dev, 0);
intel_finish_page_flip(dev, 0);
+ }
}
if (pipeb_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 1);
- if (!dev_priv->flip_pending_is_done)
+ if (!dev_priv->flip_pending_is_done) {
+ i915_pageflip_stall_check(dev, 1);
intel_finish_page_flip(dev, 1);
+ }
}
- if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
- (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+ if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
+ (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
(iir & I915_ASLE_INTERRUPT))
opregion_asle_intr(dev);
@@ -1233,41 +1329,67 @@ void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd;
+ uint32_t acthd, instdone, instdone1;
/* No reset support on this chip yet. */
if (IS_GEN6(dev))
return;
- if (!IS_I965G(dev))
+ if (!IS_I965G(dev)) {
acthd = I915_READ(ACTHD);
- else
+ instdone = I915_READ(INSTDONE);
+ instdone1 = 0;
+ } else {
acthd = I915_READ(ACTHD_I965);
+ instdone = I915_READ(INSTDONE_I965);
+ instdone1 = I915_READ(INSTDONE1);
+ }
/* If all work is done then ACTHD clearly hasn't advanced. */
if (list_empty(&dev_priv->render_ring.request_list) ||
i915_seqno_passed(i915_get_gem_seqno(dev,
&dev_priv->render_ring),
i915_get_tail_request(dev)->seqno)) {
+ bool missed_wakeup = false;
+
dev_priv->hangcheck_count = 0;
+
+ /* Issue a wake-up to catch stuck h/w. */
+ if (dev_priv->render_ring.waiting_gem_seqno &&
+ waitqueue_active(&dev_priv->render_ring.irq_queue)) {
+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+ missed_wakeup = true;
+ }
+
+ if (dev_priv->bsd_ring.waiting_gem_seqno &&
+ waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
+ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+ missed_wakeup = true;
+ }
+
+ if (missed_wakeup)
+ DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
return;
}
- if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
- DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
- i915_handle_error(dev, true);
- return;
- }
+ if (dev_priv->last_acthd == acthd &&
+ dev_priv->last_instdone == instdone &&
+ dev_priv->last_instdone1 == instdone1) {
+ if (dev_priv->hangcheck_count++ > 1) {
+ DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+ i915_handle_error(dev, true);
+ return;
+ }
+ } else {
+ dev_priv->hangcheck_count = 0;
+
+ dev_priv->last_acthd = acthd;
+ dev_priv->last_instdone = instdone;
+ dev_priv->last_instdone1 = instdone1;
+ }
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
-
- if (acthd != dev_priv->last_acthd)
- dev_priv->hangcheck_count = 0;
- else
- dev_priv->hangcheck_count++;
-
- dev_priv->last_acthd = acthd;
}
/* drm_dma.h hooks
@@ -1314,12 +1436,17 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
(void) I915_READ(DEIER);
- /* user interrupt should be enabled, but masked initial */
+ /* Gen6 only needs render pipe_control now */
+ if (IS_GEN6(dev))
+ render_mask = GT_PIPE_NOTIFY;
+
dev_priv->gt_irq_mask_reg = ~render_mask;
dev_priv->gt_irq_enable_reg = render_mask;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+ if (IS_GEN6(dev))
+ I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
(void) I915_READ(GTIER);
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 8fcc75c1aa2..ea5d3fea4b6 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -114,10 +114,6 @@ struct opregion_asle {
#define ASLE_REQ_MSK 0xf
/* response bits of ASLE irq request */
-#define ASLE_ALS_ILLUM_FAIL (2<<10)
-#define ASLE_BACKLIGHT_FAIL (2<<12)
-#define ASLE_PFIT_FAIL (2<<14)
-#define ASLE_PWM_FREQ_FAIL (2<<16)
#define ASLE_ALS_ILLUM_FAILED (1<<10)
#define ASLE_BACKLIGHT_FAILED (1<<12)
#define ASLE_PFIT_FAILED (1<<14)
@@ -155,11 +151,11 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
u32 max_backlight, level, shift;
if (!(bclp & ASLE_BCLP_VALID))
- return ASLE_BACKLIGHT_FAIL;
+ return ASLE_BACKLIGHT_FAILED;
bclp &= ASLE_BCLP_MSK;
if (bclp < 0 || bclp > 255)
- return ASLE_BACKLIGHT_FAIL;
+ return ASLE_BACKLIGHT_FAILED;
blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
@@ -211,7 +207,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
if (!(pfit & ASLE_PFIT_VALID))
- return ASLE_PFIT_FAIL;
+ return ASLE_PFIT_FAILED;
return 0;
}
@@ -535,6 +531,7 @@ int intel_opregion_init(struct drm_device *dev, int resume)
err_out:
iounmap(opregion->header);
opregion->header = NULL;
+ acpi_video_register();
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cf41c672def..4f5e15577e8 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -170,6 +170,7 @@
#define MI_NO_WRITE_FLUSH (1 << 2)
#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
+#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
@@ -180,6 +181,12 @@
#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
+#define MI_MM_SPACE_GTT (1<<8)
+#define MI_MM_SPACE_PHYSICAL (0<<8)
+#define MI_SAVE_EXT_STATE_EN (1<<3)
+#define MI_RESTORE_EXT_STATE_EN (1<<2)
+#define MI_RESTORE_INHIBIT (1<<0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -312,6 +319,7 @@
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
+# define MI_FLUSH_ENABLE (1 << 11)
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
@@ -442,7 +450,7 @@
#define GEN6_RENDER_IMR 0x20a8
#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7)
-#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6)
+#define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6)
#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5)
#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4)
#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3)
@@ -530,6 +538,21 @@
#define DPFC_CHICKEN 0x3224
#define DPFC_HT_MODIFY (1<<31)
+/* Framebuffer compression for Ironlake */
+#define ILK_DPFC_CB_BASE 0x43200
+#define ILK_DPFC_CONTROL 0x43208
+/* The bit 28-8 is reserved */
+#define DPFC_RESERVED (0x1FFFFF00)
+#define ILK_DPFC_RECOMP_CTL 0x4320c
+#define ILK_DPFC_STATUS 0x43210
+#define ILK_DPFC_FENCE_YOFF 0x43218
+#define ILK_DPFC_CHICKEN 0x43224
+#define ILK_FBC_RT_BASE 0x2128
+#define ILK_FBC_RT_VALID (1<<0)
+
+#define ILK_DISPLAY_CHICKEN1 0x42000
+#define ILK_FBCQ_DIS (1<<22)
+
/*
* GPIO regs
*/
@@ -595,32 +618,6 @@
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
-#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
-#define I915_CRC_ERROR_ENABLE (1UL<<29)
-#define I915_CRC_DONE_ENABLE (1UL<<28)
-#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
-#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
-#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
-#define I915_DPST_EVENT_ENABLE (1UL<<23)
-#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
-#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
-#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
-#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
-#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
-#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
-#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
-#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
-#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
-#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
-#define I915_DPST_EVENT_STATUS (1UL<<7)
-#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
-#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
-#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
-#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
-#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1)
-#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
-
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
#define SR01 1
@@ -1111,6 +1108,11 @@
#define PEG_BAND_GAP_DATA 0x14d68
/*
+ * Logical Context regs
+ */
+#define CCID 0x2180
+#define CCID_EN (1<<0)
+/*
* Overlay regs
*/
@@ -2080,6 +2082,7 @@
#define PIPE_DITHER_TYPE_ST01 (1 << 2)
/* Pipe A */
#define PIPEADSL 0x70000
+#define DSL_LINEMASK 0x00000fff
#define PIPEACONF 0x70008
#define PIPEACONF_ENABLE (1<<31)
#define PIPEACONF_DISABLE 0
@@ -2166,7 +2169,8 @@
#define I830_FIFO_LINE_SIZE 32
#define G4X_FIFO_SIZE 127
-#define I945_FIFO_SIZE 127 /* 945 & 965 */
+#define I965_FIFO_SIZE 512
+#define I945_FIFO_SIZE 127
#define I915_FIFO_SIZE 95
#define I855GM_FIFO_SIZE 127 /* In cachelines */
#define I830_FIFO_SIZE 95
@@ -2185,6 +2189,9 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
+#define I965_CURSOR_FIFO 64
+#define I965_CURSOR_MAX_WM 32
+#define I965_CURSOR_DFT_WM 8
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK 0x45100
@@ -2199,9 +2206,17 @@
#define WM1_LP_SR_EN (1<<31)
#define WM1_LP_LATENCY_SHIFT 24
#define WM1_LP_LATENCY_MASK (0x7f<<24)
+#define WM1_LP_FBC_LP1_MASK (0xf<<20)
+#define WM1_LP_FBC_LP1_SHIFT 20
#define WM1_LP_SR_MASK (0x1ff<<8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0x3f)
+#define WM2_LP_ILK 0x4510c
+#define WM2_LP_EN (1<<31)
+#define WM3_LP_ILK 0x45110
+#define WM3_LP_EN (1<<31)
+#define WM1S_LP_ILK 0x45120
+#define WM1S_LP_EN (1<<31)
/* Memory latency timer register */
#define MLTR_ILK 0x11222
@@ -2212,6 +2227,9 @@
#define ILK_DISPLAY_FIFO 128
#define ILK_DISPLAY_MAXWM 64
#define ILK_DISPLAY_DFTWM 8
+#define ILK_CURSOR_FIFO 32
+#define ILK_CURSOR_MAXWM 16
+#define ILK_CURSOR_DFTWM 8
#define ILK_DISPLAY_SR_FIFO 512
#define ILK_DISPLAY_MAX_SRWM 0x1ff
@@ -2510,6 +2528,10 @@
#define ILK_VSDPFD_FULL (1<<21)
#define ILK_DSPCLK_GATE 0x42020
#define ILK_DPARB_CLK_GATE (1<<5)
+/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
+#define ILK_CLK_FBC (1<<7)
+#define ILK_DPFC_DIS1 (1<<8)
+#define ILK_DPFC_DIS2 (1<<9)
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
@@ -2928,6 +2950,7 @@
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
#define TRANS_DP_HSYNC_ACTIVE_LOW 0
+#define TRANS_DP_SYNC_MASK (3<<3)
/* SNB eDP training params */
/* SNB A-stepping */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 60a5800fba6..31f08581e93 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -34,7 +34,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
} else {
dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
}
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
}
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
fpb1_reg = FPB1;
}
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
}
@@ -395,16 +395,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
- DRM_UDELAY(150);
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
}
I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
- DRM_UDELAY(150);
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
- DRM_UDELAY(150);
+ POSTING_READ(DPLL_A_MD);
+ }
+ udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
@@ -413,10 +417,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -460,16 +464,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
- DRM_UDELAY(150);
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
}
I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
- DRM_UDELAY(150);
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
- DRM_UDELAY(150);
+ POSTING_READ(DPLL_B_MD);
+ }
+ udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
@@ -478,10 +486,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -546,14 +554,14 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
/* CRT state */
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
} else {
dev_priv->saveADPA = I915_READ(ADPA);
}
/* LVDS state */
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -571,10 +579,10 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveLVDS = I915_READ(LVDS);
}
- if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -602,7 +610,9 @@ void i915_save_display(struct drm_device *dev)
/* Only save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
- if (IS_GM45(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+ } else if (IS_GM45(dev)) {
dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
} else {
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
@@ -616,7 +626,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -658,24 +668,24 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
/* CRT state */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
else
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
- if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -706,7 +716,10 @@ void i915_restore_display(struct drm_device *dev)
/* only restore FBC info on the platform that supports FBC*/
if (I915_HAS_FBC(dev)) {
- if (IS_GM45(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ ironlake_disable_fbc(dev);
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ } else if (IS_GM45(dev)) {
g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else {
@@ -718,14 +731,15 @@ void i915_restore_display(struct drm_device *dev)
}
}
/* VGA state */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
- DRM_UDELAY(150);
+ POSTING_READ(VGA_PD);
+ udelay(150);
i915_restore_vga(dev);
}
@@ -743,7 +757,7 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
/* Interrupt state */
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveDEIER = I915_READ(DEIER);
dev_priv->saveDEIMR = I915_READ(DEIMR);
dev_priv->saveGTIER = I915_READ(GTIER);
@@ -757,7 +771,7 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
- if (IS_IRONLAKE_M(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_disable_drps(dev);
/* Cache mode state */
@@ -775,16 +789,25 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
/* Fences */
- if (IS_I965G(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- } else {
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
-
+ break;
+ case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+
}
return 0;
@@ -801,21 +824,30 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
/* Fences */
- if (IS_I965G(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
- } else {
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ break;
}
i915_restore_display(dev);
/* Interrupt state */
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(DEIER, dev_priv->saveDEIER);
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -830,7 +862,7 @@ int i915_restore_state(struct drm_device *dev)
/* Clock gating state */
intel_init_clock_gating(dev);
- if (IS_IRONLAKE_M(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_enable_drps(dev);
/* Cache mode state */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fab21760dd5..fea97a21cc1 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -262,6 +262,42 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
TP_ARGS(dev)
);
+TRACE_EVENT(i915_flip_request,
+ TP_PROTO(int plane, struct drm_gem_object *obj),
+
+ TP_ARGS(plane, obj),
+
+ TP_STRUCT__entry(
+ __field(int, plane)
+ __field(struct drm_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
+);
+
+TRACE_EVENT(i915_flip_complete,
+ TP_PROTO(int plane, struct drm_gem_object *obj),
+
+ TP_ARGS(plane, obj),
+
+ TP_STRUCT__entry(
+ __field(int, plane)
+ __field(struct drm_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
+);
+
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ee0732b222a..197d4f32585 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -160,19 +160,20 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa, temp;
bool ret;
+ bool turn_off_dac = false;
temp = adpa = I915_READ(PCH_ADPA);
- if (HAS_PCH_CPT(dev)) {
- /* Disable DAC before force detect */
- I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
- (void)I915_READ(PCH_ADPA);
- } else {
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- /* disable HPD first */
- I915_WRITE(PCH_ADPA, adpa);
- (void)I915_READ(PCH_ADPA);
- }
+ if (HAS_PCH_SPLIT(dev))
+ turn_off_dac = true;
+
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ if (turn_off_dac)
+ adpa &= ~ADPA_DAC_ENABLE;
+
+ /* disable HPD first */
+ I915_WRITE(PCH_ADPA, adpa);
+ (void)I915_READ(PCH_ADPA);
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -185,10 +186,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
I915_WRITE(PCH_ADPA, adpa);
- while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
- ;
+ if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000, 1))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
- if (HAS_PCH_CPT(dev)) {
+ if (turn_off_dac) {
I915_WRITE(PCH_ADPA, temp);
(void)I915_READ(PCH_ADPA);
}
@@ -237,17 +239,13 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
for (i = 0; i < tries ; i++) {
- unsigned long timeout;
/* turn on the FORCE_DETECT */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- timeout = jiffies + msecs_to_jiffies(1000);
/* wait for FORCE_DETECT to go off */
- do {
- if (!(I915_READ(PORT_HOTPLUG_EN) &
- CRT_HOTPLUG_FORCE_DETECT))
- break;
- msleep(1);
- } while (time_after(timeout, jiffies));
+ if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT) == 0,
+ 1000, 1))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -331,7 +329,7 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
/* Wait for next Vblank to substitue
* border color for Color info */
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, pipe);
st00 = I915_READ8(VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
@@ -402,7 +400,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
return status;
}
-static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -421,6 +420,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
if (intel_crt_detect_ddc(encoder))
return connector_status_connected;
+ if (!force)
+ return connector->status;
+
/* for pre-945g platforms use load detect */
if (encoder->crtc && encoder->crtc->enabled) {
status = intel_crt_load_detect(encoder->crtc, intel_encoder);
@@ -508,17 +510,8 @@ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs
.best_encoder = intel_attached_encoder,
};
-static void intel_crt_enc_destroy(struct drm_encoder *encoder)
-{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- intel_i2c_destroy(intel_encoder->ddc_bus);
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
- .destroy = intel_crt_enc_destroy,
+ .destroy = intel_encoder_destroy,
};
void intel_crt_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5e21b311982..97922859459 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -29,10 +29,12 @@
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/vgaarb.h>
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include "drm_dp_helper.h"
#include "drm_crtc_helper.h"
@@ -42,6 +44,7 @@
bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
static void intel_update_watermarks(struct drm_device *dev);
static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
+static void intel_crtc_update_cursor(struct drm_crtc *crtc);
typedef struct {
/* given values */
@@ -322,6 +325,9 @@ struct intel_limit {
#define IRONLAKE_DP_P1_MIN 1
#define IRONLAKE_DP_P1_MAX 2
+/* FDI */
+#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
+
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
@@ -971,11 +977,84 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
-void
-intel_wait_for_vblank(struct drm_device *dev)
+/**
+ * intel_wait_for_vblank - wait for vblank on a given pipe
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * Wait for vblank to occur on a given pipe. Needed for various bits of
+ * mode setting code.
+ */
+void intel_wait_for_vblank(struct drm_device *dev, int pipe)
{
- /* Wait for 20ms, i.e. one cycle at 50hz. */
- msleep(20);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
+
+ /* Clear existing vblank status. Note this will clear any other
+ * sticky status fields as well.
+ *
+ * This races with i915_driver_irq_handler() with the result
+ * that either function could miss a vblank event. Here it is not
+ * fatal, as we will either wait upon the next vblank interrupt or
+ * timeout. Generally speaking intel_wait_for_vblank() is only
+ * called during modeset at which time the GPU should be idle and
+ * should *not* be performing page flips and thus not waiting on
+ * vblanks...
+ * Currently, the result of us stealing a vblank from the irq
+ * handler is that a single frame will be skipped during swapbuffers.
+ */
+ I915_WRITE(pipestat_reg,
+ I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
+
+ /* Wait for vblank interrupt bit to set */
+ if (wait_for((I915_READ(pipestat_reg) &
+ PIPE_VBLANK_INTERRUPT_STATUS),
+ 50, 0))
+ DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
+/*
+ * intel_wait_for_pipe_off - wait for pipe to turn off
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * After disabling a pipe, we can't wait for vblank in the usual way,
+ * spinning on the vblank interrupt status bit, since we won't actually
+ * see an interrupt when the pipe is disabled.
+ *
+ * On Gen4 and above:
+ * wait for the pipe register state bit to turn off
+ *
+ * Otherwise:
+ * wait for the display line value to settle (it usually
+ * ends up stopping at the start of the next frame).
+ *
+ */
+static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
+
+ /* Wait for the Pipe State to go off */
+ if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
+ 100, 0))
+ DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ } else {
+ u32 last_line;
+ int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+ /* Wait for the display line to settle */
+ do {
+ last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
+ mdelay(5);
+ } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
+ time_after(timeout, jiffies));
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ }
}
/* Parameters have changed, update FBC info */
@@ -1029,7 +1108,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long timeout = jiffies + msecs_to_jiffies(1);
u32 fbc_ctl;
if (!I915_HAS_FBC(dev))
@@ -1044,16 +1122,11 @@ void i8xx_disable_fbc(struct drm_device *dev)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) {
- if (time_after(jiffies, timeout)) {
- DRM_DEBUG_DRIVER("FBC idle timed out\n");
- break;
- }
- ; /* do nothing */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
}
- intel_wait_for_vblank(dev);
-
DRM_DEBUG_KMS("disabled FBC\n");
}
@@ -1110,7 +1183,6 @@ void g4x_disable_fbc(struct drm_device *dev)
dpfc_ctl = I915_READ(DPFC_CONTROL);
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
- intel_wait_for_vblank(dev);
DRM_DEBUG_KMS("disabled FBC\n");
}
@@ -1122,6 +1194,66 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
+static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA :
+ DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_plane = intel_crtc->plane;
+
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl &= DPFC_RESERVED;
+ dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
+ if (obj_priv->tiling_mode != I915_TILING_NONE) {
+ dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
+ I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
+ } else {
+ I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
+ }
+
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
+ /* enable it... */
+ I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) |
+ DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+void ironlake_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static bool ironlake_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1248,6 +1380,10 @@ static void intel_update_fbc(struct drm_crtc *crtc,
goto out_disable;
}
+ /* If the kernel debugger is active, always disable compression */
+ if (in_dbg_master())
+ goto out_disable;
+
if (intel_fbc_enabled(dev)) {
/* We can re-enable it in this case, but need to update pitch */
if ((fb->pitch > dev_priv->cfb_pitch) ||
@@ -1279,7 +1415,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
switch (obj_priv->tiling_mode) {
case I915_TILING_NONE:
- alignment = 64 * 1024;
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ alignment = 128 * 1024;
+ else if (IS_I965G(dev))
+ alignment = 4 * 1024;
+ else
+ alignment = 64 * 1024;
break;
case I915_TILING_X:
/* pin() will align the object as required by fence */
@@ -1314,18 +1455,17 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
return 0;
}
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
static int
-intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
- int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
unsigned long Start, Offset;
int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
@@ -1334,13 +1474,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
- int ret;
-
- /* no fb bound */
- if (!crtc->fb) {
- DRM_DEBUG_KMS("No FB bound\n");
- return 0;
- }
switch (plane) {
case 0:
@@ -1351,48 +1484,29 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL;
}
- intel_fb = to_intel_framebuffer(crtc->fb);
+ intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
obj_priv = to_intel_bo(obj);
- mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj);
- if (ret != 0) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- ret = i915_gem_object_set_to_display_plane(obj);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
dspcntr = I915_READ(dspcntr_reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (crtc->fb->bits_per_pixel) {
+ switch (fb->bits_per_pixel) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
- if (crtc->fb->depth == 15)
+ if (fb->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
break;
case 24:
case 32:
- if (crtc->fb->depth == 30)
- dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
- else
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
DRM_ERROR("Unknown color depth\n");
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
if (IS_I965G(dev)) {
@@ -1409,33 +1523,88 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
I915_WRITE(dspcntr_reg, dspcntr);
Start = obj_priv->gtt_offset;
- Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, crtc->fb->pitch);
- I915_WRITE(dspstride, crtc->fb->pitch);
+ Start, Offset, x, y, fb->pitch);
+ I915_WRITE(dspstride, fb->pitch);
if (IS_I965G(dev)) {
- I915_WRITE(dspbase, Offset);
- I915_READ(dspbase);
I915_WRITE(dspsurf, Start);
- I915_READ(dspsurf);
I915_WRITE(dsptileoff, (y << 16) | x);
+ I915_WRITE(dspbase, Offset);
} else {
I915_WRITE(dspbase, Start + Offset);
- I915_READ(dspbase);
}
+ POSTING_READ(dspbase);
- if ((IS_I965G(dev) || plane == 0))
+ if (IS_I965G(dev) || plane == 0)
intel_update_fbc(crtc, &crtc->mode);
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_increase_pllclock(crtc, true);
+
+ return 0;
+}
+
+static int
+intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_master_private *master_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int ret;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG_KMS("No FB bound\n");
+ return 0;
+ }
+
+ switch (plane) {
+ case 0:
+ case 1:
+ break;
+ default:
+ DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+ return -EINVAL;
+ }
+
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ obj = intel_fb->obj;
+ obj_priv = to_intel_bo(obj);
+
+ mutex_lock(&dev->struct_mutex);
+ ret = intel_pin_and_fence_fb_obj(dev, obj);
+ if (ret != 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ ret = i915_gem_object_set_to_display_plane(obj);
+ if (ret != 0) {
+ i915_gem_object_unpin(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
+ if (ret) {
+ i915_gem_object_unpin(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
if (old_fb) {
intel_fb = to_intel_framebuffer(old_fb);
obj_priv = to_intel_bo(intel_fb->obj);
i915_gem_object_unpin(intel_fb->obj);
}
- intel_increase_pllclock(crtc, true);
mutex_unlock(&dev->struct_mutex);
@@ -1457,54 +1626,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-/* Disable the VGA plane that we never use */
-static void i915_disable_vga (struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u8 sr1;
- u32 vga_reg;
-
- if (HAS_PCH_SPLIT(dev))
- vga_reg = CPU_VGACNTRL;
- else
- vga_reg = VGACNTRL;
-
- if (I915_READ(vga_reg) & VGA_DISP_DISABLE)
- return;
-
- I915_WRITE8(VGA_SR_INDEX, 1);
- sr1 = I915_READ8(VGA_SR_DATA);
- I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5));
- udelay(100);
-
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
-}
-
-static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
-
- DRM_DEBUG_KMS("\n");
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl &= ~DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
-}
-
-static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
-
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
- udelay(200);
-}
-
-
static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
@@ -1554,6 +1675,15 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
u32 temp, tries = 0;
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
/* enable CPU FDI TX and PCH FDI RX */
temp = I915_READ(fdi_tx_reg);
temp |= FDI_TX_ENABLE;
@@ -1571,16 +1701,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
I915_READ(fdi_rx_reg);
udelay(150);
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- temp = I915_READ(fdi_rx_imr_reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
- udelay(150);
-
- for (;;) {
+ for (tries = 0; tries < 5; tries++) {
temp = I915_READ(fdi_rx_iir_reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
@@ -1590,14 +1711,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
temp | FDI_RX_BIT_LOCK);
break;
}
-
- tries++;
-
- if (tries > 5) {
- DRM_DEBUG_KMS("FDI train 1 fail!\n");
- break;
- }
}
+ if (tries == 5)
+ DRM_DEBUG_KMS("FDI train 1 fail!\n");
/* Train 2 */
temp = I915_READ(fdi_tx_reg);
@@ -1613,7 +1729,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
tries = 0;
- for (;;) {
+ for (tries = 0; tries < 5; tries++) {
temp = I915_READ(fdi_rx_iir_reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
@@ -1623,14 +1739,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
-
- tries++;
-
- if (tries > 5) {
- DRM_DEBUG_KMS("FDI train 2 fail!\n");
- break;
- }
}
+ if (tries == 5)
+ DRM_DEBUG_KMS("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done\n");
}
@@ -1655,6 +1766,15 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
u32 temp, i;
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
/* enable CPU FDI TX and PCH FDI RX */
temp = I915_READ(fdi_tx_reg);
temp |= FDI_TX_ENABLE;
@@ -1680,15 +1800,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
I915_READ(fdi_rx_reg);
udelay(150);
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- temp = I915_READ(fdi_rx_imr_reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
- udelay(150);
-
for (i = 0; i < 4; i++ ) {
temp = I915_READ(fdi_tx_reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -1768,9 +1879,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
- int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
- int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
- int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1785,7 +1893,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
int trans_dpll_sel = (pipe == 0) ? 0 : 1;
u32 temp;
- int n;
u32 pipe_bpc;
temp = I915_READ(pipeconf_reg);
@@ -1798,7 +1905,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
+ DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
temp = I915_READ(PCH_LVDS);
@@ -1808,10 +1915,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
- if (HAS_eDP) {
- /* enable eDP PLL */
- ironlake_enable_pll_edp(crtc);
- } else {
+ if (!HAS_eDP) {
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
@@ -1843,16 +1947,19 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
/* Enable panel fitting for LVDS */
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(pf_ctl_reg);
- I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
-
- /* currently full aspect */
- I915_WRITE(pf_win_pos, 0);
-
- I915_WRITE(pf_win_size,
- (dev_priv->panel_fixed_mode->hdisplay << 16) |
- (dev_priv->panel_fixed_mode->vdisplay));
+ if (dev_priv->pch_pf_size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+ || HAS_eDP || intel_pch_has_edp(crtc))) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
+ PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
+ dev_priv->pch_pf_pos);
+ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
+ dev_priv->pch_pf_size);
}
/* Enable CPU pipe */
@@ -1936,11 +2043,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int reg;
reg = I915_READ(trans_dp_ctl);
- reg &= ~TRANS_DP_PORT_SEL_MASK;
- reg = TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_ENH_FRAMING |
- TRANS_DP_VSYNC_ACTIVE_HIGH |
- TRANS_DP_HSYNC_ACTIVE_HIGH;
+ reg &= ~(TRANS_DP_PORT_SEL_MASK |
+ TRANS_DP_SYNC_MASK);
+ reg |= (TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING);
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
switch (intel_trans_dp_port_sel(crtc)) {
case PCH_DP_B:
@@ -1973,16 +2084,17 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
I915_READ(transconf_reg);
- while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
- ;
-
+ if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
+ DRM_ERROR("failed to enable transcoder\n");
}
intel_crtc_load_lut(crtc);
- break;
+ intel_update_fbc(crtc, &crtc->mode);
+ break;
+
case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
+ DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
drm_vblank_off(dev, pipe);
/* Disable display plane */
@@ -1994,40 +2106,26 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(dspbase_reg);
}
- i915_disable_vga(dev);
+ if (dev_priv->cfb_plane == plane &&
+ dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
/* disable cpu pipe, disable after all planes disabled */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- I915_READ(pipeconf_reg);
- n = 0;
+
/* wait for cpu pipe off, pipe state */
- while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) {
- n++;
- if (n < 60) {
- udelay(500);
- continue;
- } else {
- DRM_DEBUG_KMS("pipe %d off delay\n",
- pipe);
- break;
- }
- }
+ if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
+ DRM_ERROR("failed to turn off cpu pipe\n");
} else
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
udelay(100);
/* Disable PF */
- temp = I915_READ(pf_ctl_reg);
- if ((temp & PF_ENABLE) != 0) {
- I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
- I915_READ(pf_ctl_reg);
- }
- I915_WRITE(pf_win_size, 0);
- POSTING_READ(pf_win_size);
-
+ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
+ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
/* disable CPU FDI tx and PCH FDI rx */
temp = I915_READ(fdi_tx_reg);
@@ -2074,20 +2172,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
temp = I915_READ(transconf_reg);
if ((temp & TRANS_ENABLE) != 0) {
I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
- I915_READ(transconf_reg);
- n = 0;
+
/* wait for PCH transcoder off, transcoder state */
- while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) {
- n++;
- if (n < 60) {
- udelay(500);
- continue;
- } else {
- DRM_DEBUG_KMS("transcoder %d off "
- "delay\n", pipe);
- break;
- }
- }
+ if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
+ DRM_ERROR("failed to disable transcoder\n");
}
temp = I915_READ(transconf_reg);
@@ -2124,10 +2212,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
I915_READ(pch_dpll_reg);
- if (HAS_eDP) {
- ironlake_disable_pll_edp(crtc);
- }
-
/* Switch from PCDclk to Rawclk */
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_SEL_PCDCLK;
@@ -2202,8 +2286,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
- intel_update_watermarks(dev);
-
/* Enable the DPLL */
temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) == 0) {
@@ -2243,8 +2325,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
intel_crtc_dpms_overlay(intel_crtc, true);
break;
case DRM_MODE_DPMS_OFF:
- intel_update_watermarks(dev);
-
/* Give the overlay scaler a chance to disable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, false);
drm_vblank_off(dev, pipe);
@@ -2253,9 +2333,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- /* Disable the VGA plane that we never use */
- i915_disable_vga(dev);
-
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2265,15 +2342,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(dspbase_reg);
}
- if (!IS_I9XX(dev)) {
- /* Wait for vblank for the disable to take effect */
- intel_wait_for_vblank(dev);
- }
-
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipeconf_reg == PIPEACONF &&
- (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
+ /* Wait for vblank for the disable to take effect */
+ intel_wait_for_vblank(dev, pipe);
goto skip_pipe_off;
+ }
/* Next, disable display pipes */
temp = I915_READ(pipeconf_reg);
@@ -2282,8 +2357,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(pipeconf_reg);
}
- /* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev);
+ /* Wait for the pipe to turn off */
+ intel_wait_for_pipe_off(dev, pipe);
temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -2299,9 +2374,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
/**
* Sets the power management mode of the pipe and plane.
- *
- * This code should probably grow support for turning the cursor off and back
- * on appropriately at the same time as we're turning the pipe off/on.
*/
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
@@ -2312,9 +2384,29 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
int pipe = intel_crtc->pipe;
bool enabled;
- dev_priv->display.dpms(crtc, mode);
+ if (intel_crtc->dpms_mode == mode)
+ return;
intel_crtc->dpms_mode = mode;
+ intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
+
+ /* When switching on the display, ensure that SR is disabled
+ * with multiple pipes prior to enabling to new pipe.
+ *
+ * When switching off the display, make sure the cursor is
+ * properly hidden prior to disabling the pipe.
+ */
+ if (mode == DRM_MODE_DPMS_ON)
+ intel_update_watermarks(dev);
+ else
+ intel_crtc_update_cursor(crtc);
+
+ dev_priv->display.dpms(crtc, mode);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ intel_crtc_update_cursor(crtc);
+ else
+ intel_update_watermarks(dev);
if (!dev->primary->master)
return;
@@ -2366,16 +2458,38 @@ void intel_encoder_commit (struct drm_encoder *encoder)
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
+void intel_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
+
if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
- if (mode->clock * 3 > 27000 * 4)
- return MODE_CLOCK_HIGH;
+ if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
+ return false;
}
+
+ /* XXX some encoders set the crtcinfo, others don't.
+ * Obviously we need some form of conflict resolution here...
+ */
+ if (adjusted_mode->crtc_htotal == 0)
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
return true;
}
@@ -2556,6 +2670,20 @@ static struct intel_watermark_params g4x_wm_info = {
2,
G4X_FIFO_LINE_SIZE,
};
+static struct intel_watermark_params g4x_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static struct intel_watermark_params i965_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ I915_FIFO_LINE_SIZE,
+};
static struct intel_watermark_params i945_wm_info = {
I945_FIFO_SIZE,
I915_MAX_WM,
@@ -2593,6 +2721,14 @@ static struct intel_watermark_params ironlake_display_wm_info = {
ILK_FIFO_LINE_SIZE
};
+static struct intel_watermark_params ironlake_cursor_wm_info = {
+ ILK_CURSOR_FIFO,
+ ILK_CURSOR_MAXWM,
+ ILK_CURSOR_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
static struct intel_watermark_params ironlake_display_srwm_info = {
ILK_DISPLAY_SR_FIFO,
ILK_DISPLAY_MAX_SRWM,
@@ -2642,7 +2778,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
*/
entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1000;
- entries_required /= wm->cacheline_size;
+ entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
@@ -2669,7 +2805,7 @@ struct cxsr_latency {
unsigned long cursor_hpll_disable;
};
-static struct cxsr_latency cxsr_latency_table[] = {
+static const struct cxsr_latency cxsr_latency_table[] = {
{1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
{1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
{1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
@@ -2707,11 +2843,13 @@ static struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
- int fsb, int mem)
+static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
+ int is_ddr3,
+ int fsb,
+ int mem)
{
+ const struct cxsr_latency *latency;
int i;
- struct cxsr_latency *latency;
if (fsb == 0 || mem == 0)
return NULL;
@@ -2732,13 +2870,9 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
static void pineview_disable_cxsr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
/* deactivate cxsr */
- reg = I915_READ(DSPFW3);
- reg &= ~(PINEVIEW_SELF_REFRESH_EN);
- I915_WRITE(DSPFW3, reg);
- DRM_INFO("Big FIFO is disabled\n");
+ I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
}
/*
@@ -2763,11 +2897,9 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
uint32_t dsparb = I915_READ(DSPARB);
int size;
- if (plane == 0)
- size = dsparb & 0x7f;
- else
- size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
- (dsparb & 0x7f);
+ size = dsparb & 0x7f;
+ if (plane)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
@@ -2781,11 +2913,9 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
uint32_t dsparb = I915_READ(DSPARB);
int size;
- if (plane == 0)
- size = dsparb & 0x1ff;
- else
- size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
- (dsparb & 0x1ff);
+ size = dsparb & 0x1ff;
+ if (plane)
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
@@ -2826,15 +2956,16 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
}
static void pineview_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+ int planeb_clock, int sr_hdisplay, int unused,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
- struct cxsr_latency *latency;
int sr_clock;
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
@@ -2880,9 +3011,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
/* activate cxsr */
- reg = I915_READ(DSPFW3);
- reg |= PINEVIEW_SELF_REFRESH_EN;
- I915_WRITE(DSPFW3, reg);
+ I915_WRITE(DSPFW3,
+ I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
DRM_DEBUG_KMS("Self-refresh is enabled\n");
} else {
pineview_disable_cxsr(dev);
@@ -2891,7 +3021,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
}
static void g4x_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+ int planeb_clock, int sr_hdisplay, int sr_htotal,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int total_size, cacheline_size;
@@ -2915,12 +3046,12 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
*/
entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
1000;
- entries_required /= G4X_FIFO_LINE_SIZE;
+ entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
planea_wm = entries_required + planea_params.guard_size;
entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
1000;
- entries_required /= G4X_FIFO_LINE_SIZE;
+ entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
planeb_wm = entries_required + planeb_params.guard_size;
cursora_wm = cursorb_wm = 16;
@@ -2934,13 +3065,24 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
static const int sr_latency_ns = 12000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+ line_time_us = ((sr_htotal * 1000) / sr_clock);
/* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1) *
- pixel_size * sr_hdisplay) / 1000;
- sr_entries = roundup(sr_entries / cacheline_size, 1);
- DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * sr_hdisplay;
+ sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
+
+ entries_required = (((sr_latency_ns / line_time_us) +
+ 1000) / 1000) * pixel_size * 64;
+ entries_required = DIV_ROUND_UP(entries_required,
+ g4x_cursor_wm_info.cacheline_size);
+ cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
+
+ if (cursor_sr > g4x_cursor_wm_info.max_wm)
+ cursor_sr = g4x_cursor_wm_info.max_wm;
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", sr_entries, cursor_sr);
+
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
/* Turn off self refresh if both pipes are enabled */
@@ -2965,11 +3107,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
}
static void i965_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+ int planeb_clock, int sr_hdisplay, int sr_htotal,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long line_time_us;
int sr_clock, sr_entries, srwm = 1;
+ int cursor_sr = 16;
/* Calc sr entries for one plane configs */
if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
@@ -2977,17 +3121,31 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
static const int sr_latency_ns = 12000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+ line_time_us = ((sr_htotal * 1000) / sr_clock);
/* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1) *
- pixel_size * sr_hdisplay) / 1000;
- sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
+ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * sr_hdisplay;
+ sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
- srwm = I945_FIFO_SIZE - sr_entries;
+ srwm = I965_FIFO_SIZE - sr_entries;
if (srwm < 0)
srwm = 1;
- srwm &= 0x3f;
+ srwm &= 0x1ff;
+
+ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * 64;
+ sr_entries = DIV_ROUND_UP(sr_entries,
+ i965_cursor_wm_info.cacheline_size);
+ cursor_sr = i965_cursor_wm_info.fifo_size -
+ (sr_entries + i965_cursor_wm_info.guard_size);
+
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = i965_cursor_wm_info.max_wm;
+
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
if (IS_I965GM(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
@@ -3004,10 +3162,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
(8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+ /* update cursor SR watermark */
+ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+ int planeb_clock, int sr_hdisplay, int sr_htotal,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t fwater_lo;
@@ -3052,12 +3213,12 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
static const int sr_latency_ns = 6000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+ line_time_us = ((sr_htotal * 1000) / sr_clock);
/* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1) *
- pixel_size * sr_hdisplay) / 1000;
- sr_entries = roundup(sr_entries / cacheline_size, 1);
+ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * sr_hdisplay;
+ sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
srwm = total_size - sr_entries;
if (srwm < 0)
@@ -3095,7 +3256,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
}
static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
- int unused2, int pixel_size)
+ int unused2, int unused3, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -3113,9 +3274,11 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
}
#define ILK_LP0_PLANE_LATENCY 700
+#define ILK_LP0_CURSOR_LATENCY 1300
static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+ int planeb_clock, int sr_hdisplay, int sr_htotal,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -3123,20 +3286,47 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
unsigned long line_time_us;
int sr_clock, entries_required;
u32 reg_value;
+ int line_count;
+ int planea_htotal = 0, planeb_htotal = 0;
+ struct drm_crtc *crtc;
+
+ /* Need htotal for all active display plane */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
+ if (intel_crtc->plane == 0)
+ planea_htotal = crtc->mode.htotal;
+ else
+ planeb_htotal = crtc->mode.htotal;
+ }
+ }
/* Calculate and update the watermark for plane A */
if (planea_clock) {
entries_required = ((planea_clock / 1000) * pixel_size *
ILK_LP0_PLANE_LATENCY) / 1000;
entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_wm_info.cacheline_size);
+ ironlake_display_wm_info.cacheline_size);
planea_wm = entries_required +
ironlake_display_wm_info.guard_size;
if (planea_wm > (int)ironlake_display_wm_info.max_wm)
planea_wm = ironlake_display_wm_info.max_wm;
- cursora_wm = 16;
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = (planea_htotal * 1000) / planea_clock;
+
+ /* Use ns/us then divide to preserve precision */
+ line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+
+ /* calculate the cursor watermark for cursor A */
+ entries_required = line_count * 64 * pixel_size;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_cursor_wm_info.cacheline_size);
+ cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
+ if (cursora_wm > ironlake_cursor_wm_info.max_wm)
+ cursora_wm = ironlake_cursor_wm_info.max_wm;
+
reg_value = I915_READ(WM0_PIPEA_ILK);
reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
@@ -3150,14 +3340,27 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
entries_required = ((planeb_clock / 1000) * pixel_size *
ILK_LP0_PLANE_LATENCY) / 1000;
entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_wm_info.cacheline_size);
+ ironlake_display_wm_info.cacheline_size);
planeb_wm = entries_required +
ironlake_display_wm_info.guard_size;
if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
planeb_wm = ironlake_display_wm_info.max_wm;
- cursorb_wm = 16;
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = (planeb_htotal * 1000) / planeb_clock;
+
+ /* Use ns/us then divide to preserve precision */
+ line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+
+ /* calculate the cursor watermark for cursor B */
+ entries_required = line_count * 64 * pixel_size;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_cursor_wm_info.cacheline_size);
+ cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size;
+ if (cursorb_wm > ironlake_cursor_wm_info.max_wm)
+ cursorb_wm = ironlake_cursor_wm_info.max_wm;
+
reg_value = I915_READ(WM0_PIPEB_ILK);
reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
@@ -3172,12 +3375,12 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
* display plane is used.
*/
if (!planea_clock || !planeb_clock) {
- int line_count;
+
/* Read the self-refresh latency. The unit is 0.5us */
int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+ line_time_us = ((sr_htotal * 1000) / sr_clock);
/* Use ns/us then divide to preserve precision */
line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
@@ -3186,14 +3389,14 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
/* calculate the self-refresh watermark for display plane */
entries_required = line_count * sr_hdisplay * pixel_size;
entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_srwm_info.cacheline_size);
+ ironlake_display_srwm_info.cacheline_size);
sr_wm = entries_required +
ironlake_display_srwm_info.guard_size;
/* calculate the self-refresh watermark for display cursor */
entries_required = line_count * pixel_size * 64;
entries_required = DIV_ROUND_UP(entries_required,
- ironlake_cursor_srwm_info.cacheline_size);
+ ironlake_cursor_srwm_info.cacheline_size);
cursor_wm = entries_required +
ironlake_cursor_srwm_info.guard_size;
@@ -3201,8 +3404,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
reg_value = I915_READ(WM1_LP_ILK);
reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
WM1_LP_CURSOR_MASK);
- reg_value |= WM1_LP_SR_EN |
- (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+ reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
(sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
I915_WRITE(WM1_LP_ILK, reg_value);
@@ -3237,6 +3439,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
* bytes per pixel
* where
* line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
* and latency is assumed to be high, as above.
*
* The final value programmed to the register should always be rounded up,
@@ -3249,18 +3452,18 @@ static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
int sr_hdisplay = 0;
unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
int enabled = 0, pixel_size = 0;
+ int sr_htotal = 0;
if (!dev_priv->display.update_wm)
return;
/* Get the clock config from both planes */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- intel_crtc = to_intel_crtc(crtc);
- if (crtc->enabled) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
enabled++;
if (intel_crtc->plane == 0) {
DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
@@ -3273,6 +3476,7 @@ static void intel_update_watermarks(struct drm_device *dev)
}
sr_hdisplay = crtc->mode.hdisplay;
sr_clock = crtc->mode.clock;
+ sr_htotal = crtc->mode.htotal;
if (crtc->fb)
pixel_size = crtc->fb->bits_per_pixel / 8;
else
@@ -3284,7 +3488,7 @@ static void intel_update_watermarks(struct drm_device *dev)
return;
dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
- sr_hdisplay, pixel_size);
+ sr_hdisplay, sr_htotal, pixel_size);
}
static int intel_crtc_mode_set(struct drm_crtc *crtc,
@@ -3317,10 +3521,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- bool is_edp = false;
+ struct intel_encoder *has_edp_encoder = NULL;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
- struct intel_encoder *intel_encoder = NULL;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
@@ -3341,12 +3544,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_pre_modeset(dev, pipe);
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder;
- if (!encoder || encoder->crtc != crtc)
+ if (encoder->crtc != crtc)
continue;
intel_encoder = enc_to_intel_encoder(encoder);
-
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -3370,7 +3573,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
- is_edp = true;
+ has_edp_encoder = intel_encoder;
break;
}
@@ -3403,6 +3606,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
}
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc);
+
if (is_lvds && dev_priv->lvds_downclock_avail) {
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
@@ -3445,10 +3651,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int lane = 0, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
- if (is_edp) {
+ if (has_edp_encoder) {
target_clock = mode->clock;
- intel_edp_link_config(intel_encoder,
- &lane, &link_bw);
+ intel_edp_link_config(has_edp_encoder,
+ &lane, &link_bw);
} else {
/* DP over FDI requires target mode clock
instead of link clock */
@@ -3469,7 +3675,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
temp |= PIPE_8BPC;
else
temp |= PIPE_6BPC;
- } else if (is_edp) {
+ } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
switch (dev_priv->edp_bpp/3) {
case 8:
temp |= PIPE_8BPC;
@@ -3542,7 +3748,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(200);
- if (is_edp) {
+ if (has_edp_encoder) {
if (dev_priv->lvds_use_ssc) {
temp |= DREF_SSC1_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -3691,9 +3897,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll_reg = pch_dpll_reg;
}
- if (is_edp) {
- ironlake_disable_pll_edp(crtc);
- } else if ((dpll & DPLL_VCO_ENABLE)) {
+ if (!has_edp_encoder) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
I915_READ(dpll_reg);
@@ -3712,6 +3916,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(150);
}
+ if (HAS_PCH_SPLIT(dev)) {
+ pipeconf &= ~PIPE_ENABLE_DITHER;
+ pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+ }
+
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
@@ -3754,16 +3963,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (dev_priv->lvds_dither) {
if (HAS_PCH_SPLIT(dev)) {
pipeconf |= PIPE_ENABLE_DITHER;
- pipeconf &= ~PIPE_DITHER_TYPE_MASK;
pipeconf |= PIPE_DITHER_TYPE_ST01;
} else
lvds |= LVDS_ENABLE_DITHER;
} else {
- if (HAS_PCH_SPLIT(dev)) {
- pipeconf &= ~PIPE_ENABLE_DITHER;
- pipeconf &= ~PIPE_DITHER_TYPE_MASK;
- } else
+ if (!HAS_PCH_SPLIT(dev)) {
lvds &= ~LVDS_ENABLE_DITHER;
+ }
}
}
I915_WRITE(lvds_reg, lvds);
@@ -3786,7 +3992,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
- if (!is_edp) {
+ if (!has_edp_encoder) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
I915_READ(dpll_reg);
@@ -3865,7 +4071,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(link_m1_reg, m_n.link_m);
I915_WRITE(link_n1_reg, m_n.link_n);
- if (is_edp) {
+ if (has_edp_encoder) {
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
} else {
/* enable FDI RX PLL too */
@@ -3890,7 +4096,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(pipeconf_reg, pipeconf);
I915_READ(pipeconf_reg);
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, pipe);
if (IS_IRONLAKE(dev)) {
/* enable address swizzle for tiling buffer */
@@ -3903,9 +4109,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
ret = intel_pipe_set_base(crtc, x, y, old_fb);
- if ((IS_I965G(dev) || plane == 0))
- intel_update_fbc(crtc, &crtc->mode);
-
intel_update_watermarks(dev);
drm_vblank_post_modeset(dev, pipe);
@@ -3939,6 +4142,118 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
}
}
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ bool visible = base != 0;
+ u32 cntl;
+
+ if (intel_crtc->cursor_visible == visible)
+ return;
+
+ cntl = I915_READ(CURACNTR);
+ if (visible) {
+ /* On these chipsets we can only modify the base whilst
+ * the cursor is disabled.
+ */
+ I915_WRITE(CURABASE, base);
+
+ cntl &= ~(CURSOR_FORMAT_MASK);
+ /* XXX width must be 64, stride 256 => 0x00 << 28 */
+ cntl |= CURSOR_ENABLE |
+ CURSOR_GAMMA_ENABLE |
+ CURSOR_FORMAT_ARGB;
+ } else
+ cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
+ I915_WRITE(CURACNTR, cntl);
+
+ intel_crtc->cursor_visible = visible;
+}
+
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ bool visible = base != 0;
+
+ if (intel_crtc->cursor_visible != visible) {
+ uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
+ if (base) {
+ cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
+ cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ cntl |= pipe << 28; /* Connect to correct pipe */
+ } else {
+ cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ cntl |= CURSOR_MODE_DISABLE;
+ }
+ I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
+
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
+ I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
+}
+
+/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+static void intel_crtc_update_cursor(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int x = intel_crtc->cursor_x;
+ int y = intel_crtc->cursor_y;
+ u32 base, pos;
+ bool visible;
+
+ pos = 0;
+
+ if (intel_crtc->cursor_on && crtc->fb) {
+ base = intel_crtc->cursor_addr;
+ if (x > (int) crtc->fb->width)
+ base = 0;
+
+ if (y > (int) crtc->fb->height)
+ base = 0;
+ } else
+ base = 0;
+
+ if (x < 0) {
+ if (x + intel_crtc->cursor_width < 0)
+ base = 0;
+
+ pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+ x = -x;
+ }
+ pos |= x << CURSOR_X_SHIFT;
+
+ if (y < 0) {
+ if (y + intel_crtc->cursor_height < 0)
+ base = 0;
+
+ pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+ y = -y;
+ }
+ pos |= y << CURSOR_Y_SHIFT;
+
+ visible = base != 0;
+ if (!visible && !intel_crtc->cursor_visible)
+ return;
+
+ I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
+ if (IS_845G(dev) || IS_I865G(dev))
+ i845_update_cursor(crtc, base);
+ else
+ i9xx_update_cursor(crtc, base);
+
+ if (visible)
+ intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
+}
+
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
@@ -3949,11 +4264,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_gem_object *bo;
struct drm_i915_gem_object *obj_priv;
- int pipe = intel_crtc->pipe;
- uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
- uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
- uint32_t temp = I915_READ(control);
- size_t addr;
+ uint32_t addr;
int ret;
DRM_DEBUG_KMS("\n");
@@ -3961,12 +4272,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
- if (IS_MOBILE(dev) || IS_I9XX(dev)) {
- temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
- temp |= CURSOR_MODE_DISABLE;
- } else {
- temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
- }
addr = 0;
bo = NULL;
mutex_lock(&dev->struct_mutex);
@@ -4008,7 +4313,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
addr = obj_priv->gtt_offset;
} else {
- ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
+ int align = IS_I830(dev) ? 16 * 1024 : 256;
+ ret = i915_gem_attach_phys_object(dev, bo,
+ (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
+ align);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
@@ -4019,21 +4327,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (!IS_I9XX(dev))
I915_WRITE(CURSIZE, (height << 12) | width);
- /* Hooray for CUR*CNTR differences */
- if (IS_MOBILE(dev) || IS_I9XX(dev)) {
- temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
- temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
- temp |= (pipe << 28); /* Connect to correct pipe */
- } else {
- temp &= ~(CURSOR_FORMAT_MASK);
- temp |= CURSOR_ENABLE;
- temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
- }
-
finish:
- I915_WRITE(control, temp);
- I915_WRITE(base, addr);
-
if (intel_crtc->cursor_bo) {
if (dev_priv->info->cursor_needs_physical) {
if (intel_crtc->cursor_bo != bo)
@@ -4047,6 +4341,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = bo;
+ intel_crtc->cursor_width = width;
+ intel_crtc->cursor_height = height;
+
+ intel_crtc_update_cursor(crtc);
return 0;
fail_unpin:
@@ -4060,34 +4358,12 @@ fail:
static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- int pipe = intel_crtc->pipe;
- uint32_t temp = 0;
- uint32_t adder;
- if (crtc->fb) {
- intel_fb = to_intel_framebuffer(crtc->fb);
- intel_mark_busy(dev, intel_fb->obj);
- }
+ intel_crtc->cursor_x = x;
+ intel_crtc->cursor_y = y;
- if (x < 0) {
- temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
- x = -x;
- }
- if (y < 0) {
- temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
- y = -y;
- }
-
- temp |= x << CURSOR_X_SHIFT;
- temp |= y << CURSOR_Y_SHIFT;
-
- adder = intel_crtc->cursor_addr;
- I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
- I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
+ intel_crtc_update_cursor(crtc);
return 0;
}
@@ -4114,15 +4390,12 @@ void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
}
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
+ u16 *blue, uint32_t start, uint32_t size)
{
+ int end = (start + size > 256) ? 256 : start + size, i;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int i;
-
- if (size != 256)
- return;
- for (i = 0; i < 256; i++) {
+ for (i = start; i < end; i++) {
intel_crtc->lut_r[i] = red[i] >> 8;
intel_crtc->lut_g[i] = green[i] >> 8;
intel_crtc->lut_b[i] = blue[i] >> 8;
@@ -4232,7 +4505,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
encoder_funcs->commit(encoder);
}
/* let the connector get through one full cycle before testing */
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
return crtc;
}
@@ -4437,7 +4710,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
dpll = I915_READ(dpll_reg);
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
@@ -4481,7 +4754,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
dpll = I915_READ(dpll_reg);
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
@@ -4604,15 +4877,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(intel_crtc);
}
-struct intel_unpin_work {
- struct work_struct work;
- struct drm_device *dev;
- struct drm_gem_object *old_fb_obj;
- struct drm_gem_object *pending_flip_obj;
- struct drm_pending_vblank_event *event;
- int pending;
-};
-
static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
@@ -4671,6 +4935,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
atomic_dec_and_test(&obj_priv->pending_flip))
DRM_WAKEUP(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
+
+ trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
void intel_finish_page_flip(struct drm_device *dev, int pipe)
@@ -4698,7 +4964,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
- intel_crtc->unpin_work->pending = 1;
+ if ((++intel_crtc->unpin_work->pending) > 1)
+ DRM_ERROR("Prepared flip multiple times\n");
} else {
DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
}
@@ -4717,9 +4984,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags, offset;
- int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
- int ret, pipesrc;
- u32 flip_mask;
+ int pipe = intel_crtc->pipe;
+ u32 pf, pipesrc;
+ int ret;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
@@ -4748,65 +5015,115 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, obj);
- if (ret != 0) {
- mutex_unlock(&dev->struct_mutex);
-
- spin_lock_irqsave(&dev->event_lock, flags);
- intel_crtc->unpin_work = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- kfree(work);
-
- DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
- to_intel_bo(obj));
- return ret;
- }
+ if (ret)
+ goto cleanup_work;
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(work->old_fb_obj);
drm_gem_object_reference(obj);
crtc->fb = fb;
- i915_gem_object_flush_write_domain(obj);
- drm_vblank_get(dev, intel_crtc->pipe);
+ ret = i915_gem_object_flush_write_domain(obj);
+ if (ret)
+ goto cleanup_objs;
+
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+ if (ret)
+ goto cleanup_objs;
+
obj_priv = to_intel_bo(obj);
atomic_inc(&obj_priv->pending_flip);
work->pending_flip_obj = obj;
- if (intel_crtc->plane)
- flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
- else
- flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
+ if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ u32 flip_mask;
- /* Wait for any previous flip to finish */
- if (IS_GEN3(dev))
- while (I915_READ(ISR) & flip_mask)
- ;
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+ BEGIN_LP_RING(2);
+ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
+
+ work->enable_stall_check = true;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = obj_priv->gtt_offset;
- offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
BEGIN_LP_RING(4);
- if (IS_I965G(dev)) {
+ switch(INTEL_INFO(dev)->gen) {
+ case 2:
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(offset | obj_priv->tiling_mode);
- pipesrc = I915_READ(pipesrc_reg);
- OUT_RING(pipesrc & 0x0fff0fff);
- } else {
+ OUT_RING(obj_priv->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+ break;
+
+ case 3:
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(offset);
+ OUT_RING(obj_priv->gtt_offset + offset);
OUT_RING(MI_NOOP);
+ break;
+
+ case 4:
+ case 5:
+ /* i965+ uses the linear or tiled offsets from the
+ * Display Registers (which do not change across a page-flip)
+ * so we need only reprogram the base address.
+ */
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+
+ /* XXX Enabling the panel-fitter across page-flip is so far
+ * untested on non-native modes, so ignore it for now.
+ * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ break;
+
+ case 6:
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch | obj_priv->tiling_mode);
+ OUT_RING(obj_priv->gtt_offset);
+
+ pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ break;
}
ADVANCE_LP_RING();
mutex_unlock(&dev->struct_mutex);
+ trace_i915_flip_request(intel_crtc->plane, obj);
+
return 0;
+
+cleanup_objs:
+ drm_gem_object_unreference(work->old_fb_obj);
+ drm_gem_object_unreference(obj);
+cleanup_work:
+ mutex_unlock(&dev->struct_mutex);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ intel_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ kfree(work);
+
+ return ret;
}
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
@@ -4814,6 +5131,7 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_fixup = intel_crtc_mode_fixup,
.mode_set = intel_crtc_mode_set,
.mode_set_base = intel_pipe_set_base,
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
.prepare = intel_crtc_prepare,
.commit = intel_crtc_commit,
.load_lut = intel_crtc_load_lut,
@@ -4864,7 +5182,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
intel_crtc->cursor_addr = 0;
- intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+ intel_crtc->dpms_mode = -1;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_crtc->busy = false;
@@ -4932,19 +5250,26 @@ static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder *encoder;
+ bool dpd_is_edp = false;
- intel_crt_init(dev);
-
- /* Set up integrated LVDS */
if (IS_MOBILE(dev) && !IS_I830(dev))
intel_lvds_init(dev);
if (HAS_PCH_SPLIT(dev)) {
- int found;
+ dpd_is_edp = intel_dpd_is_edp(dev);
if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
intel_dp_init(dev, DP_A);
+ if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
+ intel_dp_init(dev, PCH_DP_D);
+ }
+
+ intel_crt_init(dev);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ int found;
+
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB);
@@ -4963,7 +5288,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_C);
- if (I915_READ(PCH_DP_D) & DP_DETECTED)
+ if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
@@ -5076,18 +5401,18 @@ intel_user_framebuffer_create(struct drm_device *dev,
obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
if (!obj)
- return NULL;
+ return ERR_PTR(-ENOENT);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ret = intel_framebuffer_init(dev, intel_fb,
mode_cmd, obj);
if (ret) {
drm_gem_object_unreference_unlocked(obj);
kfree(intel_fb);
- return NULL;
+ return ERR_PTR(ret);
}
return &intel_fb->base;
@@ -5099,37 +5424,37 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
};
static struct drm_gem_object *
-intel_alloc_power_context(struct drm_device *dev)
+intel_alloc_context_page(struct drm_device *dev)
{
- struct drm_gem_object *pwrctx;
+ struct drm_gem_object *ctx;
int ret;
- pwrctx = i915_gem_alloc_object(dev, 4096);
- if (!pwrctx) {
+ ctx = i915_gem_alloc_object(dev, 4096);
+ if (!ctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
}
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(pwrctx, 4096);
+ ret = i915_gem_object_pin(ctx, 4096);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
}
- ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
+ ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
if (ret) {
DRM_ERROR("failed to set-domain on power context: %d\n", ret);
goto err_unpin;
}
mutex_unlock(&dev->struct_mutex);
- return pwrctx;
+ return ctx;
err_unpin:
- i915_gem_object_unpin(pwrctx);
+ i915_gem_object_unpin(ctx);
err_unref:
- drm_gem_object_unreference(pwrctx);
+ drm_gem_object_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
return NULL;
}
@@ -5161,7 +5486,6 @@ void ironlake_enable_drps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
- int i = 0;
/* 100ms RC evaluation intervals */
I915_WRITE(RCUPEI, 100000);
@@ -5205,13 +5529,8 @@ void ironlake_enable_drps(struct drm_device *dev)
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
- while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
- if (i++ > 100) {
- DRM_ERROR("stuck trying to change perf mode\n");
- break;
- }
- msleep(1);
- }
+ if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0))
+ DRM_ERROR("stuck trying to change perf mode\n");
msleep(1);
ironlake_set_drps(dev, fstart);
@@ -5371,6 +5690,29 @@ void intel_init_clock_gating(struct drm_device *dev)
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+ }
+ /*
+ * Based on the document from hardware guys the following bits
+ * should be set unconditionally in order to enable FBC.
+ * The bit 22 of 0x42000
+ * The bit 22 of 0x42004
+ * The bit 7,8,9 of 0x42020.
+ */
+ if (IS_IRONLAKE_M(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPFC_DIS1 |
+ ILK_DPFC_DIS2 |
+ ILK_CLK_FBC);
}
return;
} else if (IS_G4X(dev)) {
@@ -5415,6 +5757,29 @@ void intel_init_clock_gating(struct drm_device *dev)
* GPU can automatically power down the render unit if given a page
* to save state.
*/
+ if (IS_IRONLAKE_M(dev)) {
+ if (dev_priv->renderctx == NULL)
+ dev_priv->renderctx = intel_alloc_context_page(dev);
+ if (dev_priv->renderctx) {
+ struct drm_i915_gem_object *obj_priv;
+ obj_priv = to_intel_bo(dev_priv->renderctx);
+ if (obj_priv) {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_SET_CONTEXT);
+ OUT_RING(obj_priv->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_FLUSH);
+ ADVANCE_LP_RING();
+ }
+ } else
+ DRM_DEBUG_KMS("Failed to allocate render context."
+ "Disable RC6\n");
+ }
+
if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_i915_gem_object *obj_priv = NULL;
@@ -5423,7 +5788,7 @@ void intel_init_clock_gating(struct drm_device *dev)
} else {
struct drm_gem_object *pwrctx;
- pwrctx = intel_alloc_power_context(dev);
+ pwrctx = intel_alloc_context_page(dev);
if (pwrctx) {
dev_priv->pwrctx = pwrctx;
obj_priv = to_intel_bo(pwrctx);
@@ -5450,7 +5815,11 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.dpms = i9xx_crtc_dpms;
if (I915_HAS_FBC(dev)) {
- if (IS_GM45(dev)) {
+ if (IS_IRONLAKE_M(dev)) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
@@ -5591,6 +5960,29 @@ static void intel_init_quirks(struct drm_device *dev)
}
}
+/* Disable the VGA plane that we never use */
+static void i915_disable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 sr1;
+ u32 vga_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ vga_reg = CPU_VGACNTRL;
+ else
+ vga_reg = VGACNTRL;
+
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(1, VGA_SR_INDEX);
+ sr1 = inb(VGA_SR_DATA);
+ outb(sr1 | 1<<5, VGA_SR_DATA);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+ udelay(300);
+
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+}
+
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5639,6 +6031,9 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_clock_gating(dev);
+ /* Just disable it once at startup */
+ i915_disable_vga(dev);
+
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
@@ -5677,6 +6072,16 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
+ if (dev_priv->renderctx) {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = to_intel_bo(dev_priv->renderctx);
+ I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
+ I915_READ(CCID);
+ i915_gem_object_unpin(dev_priv->renderctx);
+ drm_gem_object_unreference(dev_priv->renderctx);
+ }
+
if (dev_priv->pwrctx) {
struct drm_i915_gem_object *obj_priv;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5dde80f9e65..9ab8708ac6b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -42,9 +42,11 @@
#define DP_LINK_CONFIGURATION_SIZE 9
-#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP)
+#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
+#define IS_PCH_eDP(i) ((i)->is_pch_edp)
-struct intel_dp_priv {
+struct intel_dp {
+ struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
@@ -53,39 +55,39 @@ struct intel_dp_priv {
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[4];
- struct intel_encoder *intel_encoder;
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
+ bool is_pch_edp;
};
-static void
-intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
- uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
+static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+ return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
+}
-static void
-intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP);
+static void intel_dp_link_train(struct intel_dp *intel_dp);
+static void intel_dp_link_down(struct intel_dp *intel_dp);
void
intel_edp_link_config (struct intel_encoder *intel_encoder,
- int *lane_num, int *link_bw)
+ int *lane_num, int *link_bw)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
- *lane_num = dp_priv->lane_count;
- if (dp_priv->link_bw == DP_LINK_BW_1_62)
+ *lane_num = intel_dp->lane_count;
+ if (intel_dp->link_bw == DP_LINK_BW_1_62)
*link_bw = 162000;
- else if (dp_priv->link_bw == DP_LINK_BW_2_7)
+ else if (intel_dp->link_bw == DP_LINK_BW_2_7)
*link_bw = 270000;
}
static int
-intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
+intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int max_lane_count = 4;
- if (dp_priv->dpcd[0] >= 0x11) {
- max_lane_count = dp_priv->dpcd[2] & 0x1f;
+ if (intel_dp->dpcd[0] >= 0x11) {
+ max_lane_count = intel_dp->dpcd[2] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
@@ -97,10 +99,9 @@ intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
}
static int
-intel_dp_max_link_bw(struct intel_encoder *intel_encoder)
+intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- int max_link_bw = dp_priv->dpcd[1];
+ int max_link_bw = intel_dp->dpcd[1];
switch (max_link_bw) {
case DP_LINK_BW_1_62:
@@ -124,12 +125,11 @@ intel_dp_link_clock(uint8_t link_bw)
/* I think this is a fiction */
static int
-intel_dp_link_required(struct drm_device *dev,
- struct intel_encoder *intel_encoder, int pixel_clock)
+intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_eDP(intel_encoder))
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
return (pixel_clock * dev_priv->edp_bpp) / 8;
else
return pixel_clock * 3;
@@ -146,14 +146,25 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
- int max_lanes = intel_dp_max_lane_count(intel_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+ int max_lanes = intel_dp_max_lane_count(intel_dp);
+
+ if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+ dev_priv->panel_fixed_mode) {
+ if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
/* only refuse the mode on non eDP since we have seen some wierd eDP panels
which are outside spec tolerances but somehow work by magic */
- if (!IS_eDP(intel_encoder) &&
- (intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
+ if (!IS_eDP(intel_dp) &&
+ (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
> intel_dp_max_data_rate(max_link_clock, max_lanes)))
return MODE_CLOCK_HIGH;
@@ -217,19 +228,17 @@ intel_hrawclk(struct drm_device *dev)
}
static int
-intel_dp_aux_ch(struct intel_encoder *intel_encoder,
+intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- uint32_t output_reg = dp_priv->output_reg;
- struct drm_device *dev = intel_encoder->enc.dev;
+ uint32_t output_reg = intel_dp->output_reg;
+ struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
int i;
int recv_bytes;
- uint32_t ctl;
uint32_t status;
uint32_t aux_clock_divider;
int try, precharge;
@@ -238,7 +247,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*/
- if (IS_eDP(intel_encoder)) {
+ if (IS_eDP(intel_dp)) {
if (IS_GEN6(dev))
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
else
@@ -253,41 +262,43 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
else
precharge = 5;
+ if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
+ DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+ I915_READ(ch_ctl));
+ return -EBUSY;
+ }
+
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
- for (i = 0; i < send_bytes; i += 4) {
- uint32_t d = pack_aux(send + i, send_bytes - i);
-
- I915_WRITE(ch_data + i, d);
- }
-
- ctl = (DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
+ for (i = 0; i < send_bytes; i += 4)
+ I915_WRITE(ch_data + i,
+ pack_aux(send + i, send_bytes - i));
/* Send the command and wait for it to complete */
- I915_WRITE(ch_ctl, ctl);
- (void) I915_READ(ch_ctl);
+ I915_WRITE(ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
for (;;) {
- udelay(100);
status = I915_READ(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
+ udelay(100);
}
/* Clear done status and any errors */
- I915_WRITE(ch_ctl, (status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR));
- (void) I915_READ(ch_ctl);
- if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
+ I915_WRITE(ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+ if (status & DP_AUX_CH_CTL_DONE)
break;
}
@@ -314,22 +325,19 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
/* Unload any bytes sent back from the other side */
recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
-
if (recv_bytes > recv_size)
recv_bytes = recv_size;
- for (i = 0; i < recv_bytes; i += 4) {
- uint32_t d = I915_READ(ch_data + i);
-
- unpack_aux(d, recv + i, recv_bytes - i);
- }
+ for (i = 0; i < recv_bytes; i += 4)
+ unpack_aux(I915_READ(ch_data + i),
+ recv + i, recv_bytes - i);
return recv_bytes;
}
/* Write data to the aux channel in native mode */
static int
-intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
+intel_dp_aux_native_write(struct intel_dp *intel_dp,
uint16_t address, uint8_t *send, int send_bytes)
{
int ret;
@@ -346,7 +354,7 @@ intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
for (;;) {
- ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1);
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
@@ -361,15 +369,15 @@ intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
/* Write a single byte to the aux channel in native mode */
static int
-intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder,
+intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
uint16_t address, uint8_t byte)
{
- return intel_dp_aux_native_write(intel_encoder, address, &byte, 1);
+ return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
}
/* read bytes from a native aux channel */
static int
-intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
+intel_dp_aux_native_read(struct intel_dp *intel_dp,
uint16_t address, uint8_t *recv, int recv_bytes)
{
uint8_t msg[4];
@@ -388,7 +396,7 @@ intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
reply_bytes = recv_bytes + 1;
for (;;) {
- ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes,
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
return -EPROTO;
@@ -411,10 +419,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- struct intel_dp_priv *dp_priv = container_of(adapter,
- struct intel_dp_priv,
- adapter);
- struct intel_encoder *intel_encoder = dp_priv->intel_encoder;
+ struct intel_dp *intel_dp = container_of(adapter,
+ struct intel_dp,
+ adapter);
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
@@ -453,7 +460,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
for (;;) {
- ret = intel_dp_aux_ch(intel_encoder,
+ ret = intel_dp_aux_ch(intel_dp,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
@@ -481,67 +488,80 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
-intel_dp_i2c_init(struct intel_encoder *intel_encoder,
+intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
DRM_DEBUG_KMS("i2c_init %s\n", name);
- dp_priv->algo.running = false;
- dp_priv->algo.address = 0;
- dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
-
- memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
- dp_priv->adapter.owner = THIS_MODULE;
- dp_priv->adapter.class = I2C_CLASS_DDC;
- strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
- dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
- dp_priv->adapter.algo_data = &dp_priv->algo;
- dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
-
- return i2c_dp_aux_add_bus(&dp_priv->adapter);
+ intel_dp->algo.running = false;
+ intel_dp->algo.address = 0;
+ intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
+
+ memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
+ intel_dp->adapter.owner = THIS_MODULE;
+ intel_dp->adapter.class = I2C_CLASS_DDC;
+ strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+ intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+ intel_dp->adapter.algo_data = &intel_dp->algo;
+ intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
+
+ return i2c_dp_aux_add_bus(&intel_dp->adapter);
}
static bool
intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int lane_count, clock;
- int max_lane_count = intel_dp_max_lane_count(intel_encoder);
- int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
+ int max_lane_count = intel_dp_max_lane_count(intel_dp);
+ int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+ if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+ dev_priv->panel_fixed_mode) {
+ intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+ intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
+ mode, adjusted_mode);
+ /*
+ * the mode->clock is used to calculate the Data&Link M/N
+ * of the pipe. For the eDP the fixed clock should be used.
+ */
+ mode->clock = dev_priv->panel_fixed_mode->clock;
+ }
+
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
+ if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock)
<= link_avail) {
- dp_priv->link_bw = bws[clock];
- dp_priv->lane_count = lane_count;
- adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
+ intel_dp->link_bw = bws[clock];
+ intel_dp->lane_count = lane_count;
+ adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
DRM_DEBUG_KMS("Display port link bw %02x lane "
"count %d clock %d\n",
- dp_priv->link_bw, dp_priv->lane_count,
+ intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock);
return true;
}
}
}
- if (IS_eDP(intel_encoder)) {
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
/* okay we failed just pick the highest */
- dp_priv->lane_count = max_lane_count;
- dp_priv->link_bw = bws[max_clock];
- adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
+ intel_dp->lane_count = max_lane_count;
+ intel_dp->link_bw = bws[max_clock];
+ adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
"count %d clock %d\n",
- dp_priv->link_bw, dp_priv->lane_count,
+ intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock);
+
return true;
}
+
return false;
}
@@ -563,14 +583,14 @@ intel_reduce_ratio(uint32_t *num, uint32_t *den)
}
static void
-intel_dp_compute_m_n(int bytes_per_pixel,
+intel_dp_compute_m_n(int bpp,
int nlanes,
int pixel_clock,
int link_clock,
struct intel_dp_m_n *m_n)
{
m_n->tu = 64;
- m_n->gmch_m = pixel_clock * bytes_per_pixel;
+ m_n->gmch_m = (pixel_clock * bpp) >> 3;
m_n->gmch_n = link_clock * nlanes;
intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
m_n->link_m = pixel_clock;
@@ -578,6 +598,25 @@ intel_dp_compute_m_n(int bytes_per_pixel,
intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
+bool intel_pch_has_edp(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_dp *intel_dp;
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
+ return intel_dp->is_pch_edp;
+ }
+ return false;
+}
+
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -587,24 +626,23 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int lane_count = 4;
+ int lane_count = 4, bpp = 24;
struct intel_dp_m_n m_n;
/*
* Find the lane count in the intel_encoder private
*/
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_encoder *intel_encoder;
- struct intel_dp_priv *dp_priv;
+ struct intel_dp *intel_dp;
if (encoder->crtc != crtc)
continue;
- intel_encoder = enc_to_intel_encoder(encoder);
- dp_priv = intel_encoder->dev_priv;
-
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
- lane_count = dp_priv->lane_count;
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ lane_count = intel_dp->lane_count;
+ if (IS_PCH_eDP(intel_dp))
+ bpp = dev_priv->edp_bpp;
break;
}
}
@@ -614,7 +652,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
- intel_dp_compute_m_n(3, lane_count,
+ intel_dp_compute_m_n(bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
if (HAS_PCH_SPLIT(dev)) {
@@ -659,107 +697,114 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- struct drm_crtc *crtc = intel_encoder->enc.crtc;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_crtc *crtc = intel_dp->base.enc.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- dp_priv->DP = (DP_VOLTAGE_0_4 |
+ intel_dp->DP = (DP_VOLTAGE_0_4 |
DP_PRE_EMPHASIS_0);
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- dp_priv->DP |= DP_SYNC_HS_HIGH;
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- dp_priv->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
- dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
else
- dp_priv->DP |= DP_LINK_TRAIN_OFF;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
- switch (dp_priv->lane_count) {
+ switch (intel_dp->lane_count) {
case 1:
- dp_priv->DP |= DP_PORT_WIDTH_1;
+ intel_dp->DP |= DP_PORT_WIDTH_1;
break;
case 2:
- dp_priv->DP |= DP_PORT_WIDTH_2;
+ intel_dp->DP |= DP_PORT_WIDTH_2;
break;
case 4:
- dp_priv->DP |= DP_PORT_WIDTH_4;
+ intel_dp->DP |= DP_PORT_WIDTH_4;
break;
}
- if (dp_priv->has_audio)
- dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
+ if (intel_dp->has_audio)
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
- dp_priv->link_configuration[0] = dp_priv->link_bw;
- dp_priv->link_configuration[1] = dp_priv->lane_count;
+ memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp->link_configuration[0] = intel_dp->link_bw;
+ intel_dp->link_configuration[1] = intel_dp->lane_count;
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
- if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
- dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- dp_priv->DP |= DP_ENHANCED_FRAMING;
+ if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
+ intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
}
/* CPT DP's pipe select is decided in TRANS_DP_CTL */
if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
- dp_priv->DP |= DP_PIPEB_SELECT;
+ intel_dp->DP |= DP_PIPEB_SELECT;
- if (IS_eDP(intel_encoder)) {
+ if (IS_eDP(intel_dp)) {
/* don't miss out required setting for eDP */
- dp_priv->DP |= DP_PLL_ENABLE;
+ intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
- dp_priv->DP |= DP_PLL_FREQ_160MHZ;
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
- dp_priv->DP |= DP_PLL_FREQ_270MHZ;
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
}
}
static void ironlake_edp_panel_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long timeout = jiffies + msecs_to_jiffies(5000);
- u32 pp, pp_status;
+ u32 pp;
- pp_status = I915_READ(PCH_PP_STATUS);
- if (pp_status & PP_ON)
+ if (I915_READ(PCH_PP_STATUS) & PP_ON)
return;
pp = I915_READ(PCH_PP_CONTROL);
+
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
- do {
- pp_status = I915_READ(PCH_PP_STATUS);
- } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
- if (time_after(jiffies, timeout))
- DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
+ if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
+ DRM_ERROR("panel on wait timed out: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS));
pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
}
static void ironlake_edp_panel_off (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long timeout = jiffies + msecs_to_jiffies(5000);
- u32 pp, pp_status;
+ u32 pp;
pp = I915_READ(PCH_PP_CONTROL);
+
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
pp &= ~POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
- do {
- pp_status = I915_READ(PCH_PP_STATUS);
- } while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
- if (time_after(jiffies, timeout))
- DRM_DEBUG_KMS("panel off wait timed out\n");
+ if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
+ DRM_ERROR("panel off wait timed out: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS));
/* Make sure VDD is enabled so DP AUX will work */
- pp |= EDP_FORCE_VDD;
+ pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
}
static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -784,33 +829,87 @@ static void ironlake_edp_backlight_off (struct drm_device *dev)
I915_WRITE(PCH_PP_CONTROL, pp);
}
+static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ DRM_DEBUG_KMS("\n");
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl &= ~DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
+}
+
+static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
+ udelay(200);
+}
+
+static void intel_dp_prepare(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+ if (IS_eDP(intel_dp)) {
+ ironlake_edp_backlight_off(dev);
+ ironlake_edp_panel_on(dev);
+ ironlake_edp_pll_on(encoder);
+ }
+ if (dp_reg & DP_PORT_EN)
+ intel_dp_link_down(intel_dp);
+}
+
+static void intel_dp_commit(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+ if (!(dp_reg & DP_PORT_EN)) {
+ intel_dp_link_train(intel_dp);
+ }
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ ironlake_edp_backlight_on(dev);
+}
+
static void
intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(dp_priv->output_reg);
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
- if (dp_reg & DP_PORT_EN) {
- intel_dp_link_down(intel_encoder, dp_priv->DP);
- if (IS_eDP(intel_encoder)) {
- ironlake_edp_backlight_off(dev);
- ironlake_edp_panel_off(dev);
- }
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+ ironlake_edp_backlight_off(dev);
+ ironlake_edp_panel_off(dev);
}
+ if (dp_reg & DP_PORT_EN)
+ intel_dp_link_down(intel_dp);
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ ironlake_edp_pll_off(encoder);
} else {
if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
- if (IS_eDP(intel_encoder)) {
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
ironlake_edp_panel_on(dev);
+ intel_dp_link_train(intel_dp);
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
ironlake_edp_backlight_on(dev);
- }
}
}
- dp_priv->dpms_mode = mode;
+ intel_dp->dpms_mode = mode;
}
/*
@@ -818,12 +917,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
* link status information
*/
static bool
-intel_dp_get_link_status(struct intel_encoder *intel_encoder,
+intel_dp_get_link_status(struct intel_dp *intel_dp,
uint8_t link_status[DP_LINK_STATUS_SIZE])
{
int ret;
- ret = intel_dp_aux_native_read(intel_encoder,
+ ret = intel_dp_aux_native_read(intel_dp,
DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE);
if (ret != DP_LINK_STATUS_SIZE)
@@ -900,7 +999,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
static void
-intel_get_adjust_train(struct intel_encoder *intel_encoder,
+intel_get_adjust_train(struct intel_dp *intel_dp,
uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane_count,
uint8_t train_set[4])
@@ -1036,27 +1135,23 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
}
static bool
-intel_dp_set_link_train(struct intel_encoder *intel_encoder,
+intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat,
- uint8_t train_set[4],
- bool first)
+ uint8_t train_set[4])
{
- struct drm_device *dev = intel_encoder->enc.dev;
+ struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
- I915_WRITE(dp_priv->output_reg, dp_reg_value);
- POSTING_READ(dp_priv->output_reg);
- if (first)
- intel_wait_for_vblank(dev);
+ I915_WRITE(intel_dp->output_reg, dp_reg_value);
+ POSTING_READ(intel_dp->output_reg);
- intel_dp_aux_native_write_1(intel_encoder,
+ intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
- ret = intel_dp_aux_native_write(intel_encoder,
+ ret = intel_dp_aux_native_write(intel_dp,
DP_TRAINING_LANE0_SET, train_set, 4);
if (ret != 4)
return false;
@@ -1065,28 +1160,33 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder,
}
static void
-intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
- uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
+intel_dp_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_encoder->enc.dev;
+ struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t train_set[4];
uint8_t link_status[DP_LINK_STATUS_SIZE];
int i;
uint8_t voltage;
bool clock_recovery = false;
bool channel_eq = false;
- bool first = true;
int tries;
u32 reg;
+ uint32_t DP = intel_dp->DP;
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
+
+ /* Enable output, wait for it to become active */
+ I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+ POSTING_READ(intel_dp->output_reg);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
/* Write the link configuration data */
- intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
- link_configuration, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+ intel_dp->link_configuration,
+ DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -1097,39 +1197,38 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
- if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
- if (!intel_dp_set_link_train(intel_encoder, reg,
- DP_TRAINING_PATTERN_1, train_set, first))
+ if (!intel_dp_set_link_train(intel_dp, reg,
+ DP_TRAINING_PATTERN_1, train_set))
break;
- first = false;
/* Set training pattern 1 */
udelay(100);
- if (!intel_dp_get_link_status(intel_encoder, link_status))
+ if (!intel_dp_get_link_status(intel_dp, link_status))
break;
- if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
+ if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
clock_recovery = true;
break;
}
/* Check to see if we've tried the max voltage */
- for (i = 0; i < dp_priv->lane_count; i++)
+ for (i = 0; i < intel_dp->lane_count; i++)
if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
- if (i == dp_priv->lane_count)
+ if (i == intel_dp->lane_count)
break;
/* Check to see if we've tried the same voltage 5 times */
@@ -1142,7 +1241,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
+ intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
}
/* channel equalization */
@@ -1152,30 +1251,29 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
- if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_encoder, reg,
- DP_TRAINING_PATTERN_2, train_set,
- false))
+ if (!intel_dp_set_link_train(intel_dp, reg,
+ DP_TRAINING_PATTERN_2, train_set))
break;
udelay(400);
- if (!intel_dp_get_link_status(intel_encoder, link_status))
+ if (!intel_dp_get_link_status(intel_dp, link_status))
break;
- if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
+ if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
channel_eq = true;
break;
}
@@ -1185,53 +1283,53 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
break;
/* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
+ intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
++tries;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
- I915_WRITE(dp_priv->output_reg, reg);
- POSTING_READ(dp_priv->output_reg);
- intel_dp_aux_native_write_1(intel_encoder,
+ I915_WRITE(intel_dp->output_reg, reg);
+ POSTING_READ(intel_dp->output_reg);
+ intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
}
static void
-intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
+intel_dp_link_down(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_encoder->enc.dev;
+ struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ uint32_t DP = intel_dp->DP;
DRM_DEBUG_KMS("\n");
- if (IS_eDP(intel_encoder)) {
+ if (IS_eDP(intel_dp)) {
DP &= ~DP_PLL_ENABLE;
- I915_WRITE(dp_priv->output_reg, DP);
- POSTING_READ(dp_priv->output_reg);
+ I915_WRITE(intel_dp->output_reg, DP);
+ POSTING_READ(intel_dp->output_reg);
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
- POSTING_READ(dp_priv->output_reg);
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ POSTING_READ(intel_dp->output_reg);
} else {
DP &= ~DP_LINK_TRAIN_MASK;
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
- POSTING_READ(dp_priv->output_reg);
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ POSTING_READ(intel_dp->output_reg);
}
udelay(17000);
- if (IS_eDP(intel_encoder))
+ if (IS_eDP(intel_dp))
DP |= DP_LINK_TRAIN_OFF;
- I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
- POSTING_READ(dp_priv->output_reg);
+ I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
}
/*
@@ -1244,41 +1342,39 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
*/
static void
-intel_dp_check_link_status(struct intel_encoder *intel_encoder)
+intel_dp_check_link_status(struct intel_dp *intel_dp)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t link_status[DP_LINK_STATUS_SIZE];
- if (!intel_encoder->enc.crtc)
+ if (!intel_dp->base.enc.crtc)
return;
- if (!intel_dp_get_link_status(intel_encoder, link_status)) {
- intel_dp_link_down(intel_encoder, dp_priv->DP);
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ intel_dp_link_down(intel_dp);
return;
}
- if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
- intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
+ if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
+ intel_dp_link_train(intel_dp);
}
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum drm_connector_status status;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_encoder,
- 0x000, dp_priv->dpcd,
- sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
+ if (intel_dp_aux_native_read(intel_dp,
+ 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
{
- if (dp_priv->dpcd[0] != 0)
+ if (intel_dp->dpcd[0] != 0)
status = connector_status_connected;
}
- DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0],
- dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]);
+ DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
+ intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
return status;
}
@@ -1289,22 +1385,21 @@ ironlake_dp_detect(struct drm_connector *connector)
* \return false if DP port is disconnected.
*/
static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector)
+intel_dp_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct drm_device *dev = intel_encoder->enc.dev;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t temp, bit;
enum drm_connector_status status;
- dp_priv->has_audio = false;
+ intel_dp->has_audio = false;
if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);
- switch (dp_priv->output_reg) {
+ switch (intel_dp->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
break;
@@ -1324,11 +1419,11 @@ intel_dp_detect(struct drm_connector *connector)
return connector_status_disconnected;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_encoder,
- 0x000, dp_priv->dpcd,
- sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
+ if (intel_dp_aux_native_read(intel_dp,
+ 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
{
- if (dp_priv->dpcd[0] != 0)
+ if (intel_dp->dpcd[0] != 0)
status = connector_status_connected;
}
return status;
@@ -1337,20 +1432,34 @@ intel_dp_detect(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct drm_device *dev = intel_encoder->enc.dev;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
- if (ret)
+ ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
+ if (ret) {
+ if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+ !dev_priv->panel_fixed_mode) {
+ struct drm_display_mode *newmode;
+ list_for_each_entry(newmode, &connector->probed_modes,
+ head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ dev_priv->panel_fixed_mode =
+ drm_mode_duplicate(dev, newmode);
+ break;
+ }
+ }
+ }
+
return ret;
+ }
/* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (IS_eDP(intel_encoder)) {
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1372,9 +1481,9 @@ intel_dp_destroy (struct drm_connector *connector)
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
- .prepare = intel_encoder_prepare,
+ .prepare = intel_dp_prepare,
.mode_set = intel_dp_mode_set,
- .commit = intel_encoder_commit,
+ .commit = intel_dp_commit,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -1390,27 +1499,17 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
.best_encoder = intel_attached_encoder,
};
-static void intel_dp_enc_destroy(struct drm_encoder *encoder)
-{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
- .destroy = intel_dp_enc_destroy,
+ .destroy = intel_encoder_destroy,
};
void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
- if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
- intel_dp_check_link_status(intel_encoder);
+ if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON)
+ intel_dp_check_link_status(intel_dp);
}
/* Return which DP Port should be selected for Transcoder DP control */
@@ -1420,56 +1519,81 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
- struct intel_encoder *intel_encoder = NULL;
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_dp *intel_dp;
+
if (encoder->crtc != crtc)
continue;
- intel_encoder = enc_to_intel_encoder(encoder);
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- return dp_priv->output_reg;
- }
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
+ return intel_dp->output_reg;
}
+
return -1;
}
+/* check the VBT to see whether the eDP is on DP-D port */
+bool intel_dpd_is_edp(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i;
+
+ if (!dev_priv->child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+
+ if (p_child->dvo_port == PORT_IDPD &&
+ p_child->device_type == DEVICE_TYPE_eDP)
+ return true;
+ }
+ return false;
+}
+
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
+ struct intel_dp *intel_dp;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- struct intel_dp_priv *dp_priv;
const char *name = NULL;
+ int type;
- intel_encoder = kcalloc(sizeof(struct intel_encoder) +
- sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
- if (!intel_encoder)
+ intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
+ if (!intel_dp)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_encoder);
+ kfree(intel_dp);
return;
}
+ intel_encoder = &intel_dp->base;
+
+ if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
+ if (intel_dpd_is_edp(dev))
+ intel_dp->is_pch_edp = true;
- dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
+ if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
+ type = DRM_MODE_CONNECTOR_eDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+ } else {
+ type = DRM_MODE_CONNECTOR_DisplayPort;
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ }
connector = &intel_connector->base;
- drm_connector_init(dev, connector, &intel_dp_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
+ drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD;
- if (output_reg == DP_A)
- intel_encoder->type = INTEL_OUTPUT_EDP;
- else
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
-
if (output_reg == DP_B || output_reg == PCH_DP_B)
intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
else if (output_reg == DP_C || output_reg == PCH_DP_C)
@@ -1477,18 +1601,16 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- if (IS_eDP(intel_encoder))
+ if (IS_eDP(intel_dp))
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- dp_priv->intel_encoder = intel_encoder;
- dp_priv->output_reg = output_reg;
- dp_priv->has_audio = false;
- dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
- intel_encoder->dev_priv = dp_priv;
+ intel_dp->output_reg = output_reg;
+ intel_dp->has_audio = false;
+ intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
@@ -1523,12 +1645,12 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
- intel_dp_i2c_init(intel_encoder, intel_connector, name);
+ intel_dp_i2c_init(intel_dp, intel_connector, name);
- intel_encoder->ddc_bus = &dp_priv->adapter;
+ intel_encoder->ddc_bus = &intel_dp->adapter;
intel_encoder->hot_plug = intel_dp_hot_plug;
- if (output_reg == DP_A) {
+ if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
/* initialize panel mode from VBT if available for eDP */
if (dev_priv->lfp_lvds_vbt_mode) {
dev_priv->panel_fixed_mode =
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 2f7970be905..8828b3ac641 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,6 +32,20 @@
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+
+#define wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (! (COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W) msleep(W); \
+ } \
+ ret__; \
+})
+
/*
* Display related stuff
*/
@@ -102,7 +116,6 @@ struct intel_encoder {
struct i2c_adapter *ddc_bus;
bool load_detect_temp;
bool needs_tv_clock;
- void *dev_priv;
void (*hot_plug)(struct intel_encoder *);
int crtc_mask;
int clone_mask;
@@ -110,7 +123,6 @@ struct intel_encoder {
struct intel_connector {
struct drm_connector base;
- void *dev_priv;
};
struct intel_crtc;
@@ -143,8 +155,6 @@ struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
enum plane plane;
- struct drm_gem_object *cursor_bo;
- uint32_t cursor_addr;
u8 lut_r[256], lut_g[256], lut_b[256];
int dpms_mode;
bool busy; /* is scanout buffer being updated frequently? */
@@ -153,6 +163,12 @@ struct intel_crtc {
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
int fdi_lanes;
+
+ struct drm_gem_object *cursor_bo;
+ uint32_t cursor_addr;
+ int16_t cursor_x, cursor_y;
+ int16_t cursor_width, cursor_height;
+ bool cursor_visible, cursor_on;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -160,6 +176,16 @@ struct intel_crtc {
#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+struct intel_unpin_work {
+ struct work_struct work;
+ struct drm_device *dev;
+ struct drm_gem_object *old_fb_obj;
+ struct drm_gem_object *pending_flip_obj;
+ struct drm_pending_vblank_event *event;
+ int pending;
+ bool enable_stall_check;
+};
+
struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name);
void intel_i2c_destroy(struct i2c_adapter *adapter);
@@ -179,13 +205,23 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+extern bool intel_pch_has_edp(struct drm_crtc *crtc);
+extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
+extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode);
+extern void intel_pch_panel_fitting(struct drm_device *dev,
+ int fitting_mode,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
extern int intel_panel_fitter_pipe (struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
+extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
@@ -193,7 +229,7 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void intel_wait_for_vblank(struct drm_device *dev);
+extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 227feca7cf8..7c9ec1472d4 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -38,7 +38,7 @@
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
-static struct intel_dvo_device intel_dvo_devices[] = {
+static const struct intel_dvo_device intel_dvo_devices[] = {
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "sil164",
@@ -77,20 +77,33 @@ static struct intel_dvo_device intel_dvo_devices[] = {
}
};
+struct intel_dvo {
+ struct intel_encoder base;
+
+ struct intel_dvo_device dev;
+
+ struct drm_display_mode *panel_fixed_mode;
+ bool panel_wants_dither;
+};
+
+static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
+{
+ return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
+}
+
static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
- u32 dvo_reg = dvo->dvo_reg;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
if (mode == DRM_MODE_DPMS_ON) {
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
- dvo->dev_ops->dpms(dvo, mode);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
} else {
- dvo->dev_ops->dpms(dvo, mode);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
I915_READ(dvo_reg);
}
@@ -100,38 +113,36 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* XXX: Validate clock range */
- if (dvo->panel_fixed_mode) {
- if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay)
+ if (intel_dvo->panel_fixed_mode) {
+ if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay)
+ if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
return MODE_PANEL;
}
- return dvo->dev_ops->mode_valid(dvo, mode);
+ return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
}
static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- if (dvo->panel_fixed_mode != NULL) {
-#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x
+ if (intel_dvo->panel_fixed_mode != NULL) {
+#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
C(hdisplay);
C(hsync_start);
C(hsync_end);
@@ -145,8 +156,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
#undef C
}
- if (dvo->dev_ops->mode_fixup)
- return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode);
+ if (intel_dvo->dev.dev_ops->mode_fixup)
+ return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
return true;
}
@@ -158,11 +169,10 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
int pipe = intel_crtc->pipe;
u32 dvo_val;
- u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
+ u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
switch (dvo_reg) {
@@ -178,7 +188,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
break;
}
- dvo->dev_ops->mode_set(dvo, mode, adjusted_mode);
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
@@ -211,43 +221,42 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
*
* Unimplemented.
*/
-static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_dvo_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
- return dvo->dev_ops->detect(dvo);
+ return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
static int intel_dvo_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
if (!list_empty(&connector->probed_modes))
return 1;
-
- if (dvo->panel_fixed_mode != NULL) {
+ if (intel_dvo->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode);
+ mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
}
+
return 0;
}
-static void intel_dvo_destroy (struct drm_connector *connector)
+static void intel_dvo_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -277,28 +286,20 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- if (dvo) {
- if (dvo->dev_ops->destroy)
- dvo->dev_ops->destroy(dvo);
- if (dvo->panel_fixed_mode)
- kfree(dvo->panel_fixed_mode);
- }
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+
+ if (intel_dvo->dev.dev_ops->destroy)
+ intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
+
+ kfree(intel_dvo->panel_fixed_mode);
+
+ intel_encoder_destroy(encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
.destroy = intel_dvo_enc_destroy,
};
-
/**
* Attempts to get a fixed panel timing for LVDS (currently only the i830).
*
@@ -306,15 +307,13 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
* chip being on DVOB/C and having multiple pipes.
*/
static struct drm_display_mode *
-intel_dvo_get_current_mode (struct drm_connector *connector)
+intel_dvo_get_current_mode(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
- uint32_t dvo_reg = dvo->dvo_reg;
- uint32_t dvo_val = I915_READ(dvo_reg);
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
struct drm_display_mode *mode = NULL;
/* If the DVO port is active, that'll be the LVDS, so we can pull out
@@ -327,7 +326,6 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
crtc = intel_get_crtc_from_pipe(dev, pipe);
if (crtc) {
mode = intel_crtc_mode_get(dev, crtc);
-
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
@@ -337,28 +335,32 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
}
}
}
+
return mode;
}
void intel_dvo_init(struct drm_device *dev)
{
struct intel_encoder *intel_encoder;
+ struct intel_dvo *intel_dvo;
struct intel_connector *intel_connector;
- struct intel_dvo_device *dvo;
struct i2c_adapter *i2cbus = NULL;
int ret = 0;
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
- intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL);
- if (!intel_encoder)
+
+ intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
+ if (!intel_dvo)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_encoder);
+ kfree(intel_dvo);
return;
}
+ intel_encoder = &intel_dvo->base;
+
/* Set up the DDC bus */
intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
if (!intel_encoder->ddc_bus)
@@ -367,10 +369,9 @@ void intel_dvo_init(struct drm_device *dev)
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_connector->base;
+ const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
int gpio;
- dvo = &intel_dvo_devices[i];
-
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
* in the spec.
@@ -393,11 +394,8 @@ void intel_dvo_init(struct drm_device *dev)
continue;
}
- if (dvo->dev_ops!= NULL)
- ret = dvo->dev_ops->init(dvo, i2cbus);
- else
- ret = false;
-
+ intel_dvo->dev = *dvo;
+ ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
if (!ret)
continue;
@@ -429,9 +427,6 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- intel_encoder->dev_priv = dvo;
- intel_encoder->i2c_bus = i2cbus;
-
drm_encoder_init(dev, &intel_encoder->enc,
&intel_dvo_enc_funcs, encoder_type);
drm_encoder_helper_add(&intel_encoder->enc,
@@ -447,9 +442,9 @@ void intel_dvo_init(struct drm_device *dev)
* headers, likely), so for now, just get the current
* mode being output through DVO.
*/
- dvo->panel_fixed_mode =
+ intel_dvo->panel_fixed_mode =
intel_dvo_get_current_mode(connector);
- dvo->panel_wants_dither = true;
+ intel_dvo->panel_wants_dither = true;
}
drm_sysfs_connector_add(connector);
@@ -461,6 +456,6 @@ void intel_dvo_init(struct drm_device *dev)
if (i2cbus != NULL)
intel_i2c_destroy(i2cbus);
free_intel:
- kfree(intel_encoder);
+ kfree(intel_dvo);
kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 3e18c9e7729..56ad9df2ccb 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -61,6 +61,8 @@ static struct fb_ops intelfb_ops = {
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int intelfb_create(struct intel_fbdev *ifbdev,
@@ -119,7 +121,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->par = ifbdev;
- intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+ ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+ if (ret)
+ goto out_unpin;
fb = &ifbdev->ifb.base;
@@ -128,7 +132,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
strcpy(info->fix.id, "inteldrmfb");
- info->flags = FBINFO_DEFAULT;
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
@@ -146,8 +150,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
info->fix.smem_len = size;
- info->flags = FBINFO_DEFAULT;
-
info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
size);
if (!info->screen_base) {
@@ -235,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&ifbdev->helper);
drm_framebuffer_cleanup(&ifb->base);
- if (ifb->obj)
+ if (ifb->obj) {
+ drm_gem_object_handle_unreference(ifb->obj);
drm_gem_object_unreference(ifb->obj);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 83bd764b000..926934a482e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,11 +37,17 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intel_hdmi_priv {
+struct intel_hdmi {
+ struct intel_encoder base;
u32 sdvox_reg;
bool has_hdmi_sink;
};
+static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+{
+ return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
+}
+
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -50,16 +56,16 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
- sdvox = SDVO_ENCODING_HDMI |
- SDVO_BORDER_ENABLE |
- SDVO_VSYNC_ACTIVE_HIGH |
- SDVO_HSYNC_ACTIVE_HIGH;
+ sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
- if (hdmi_priv->has_hdmi_sink) {
+ if (intel_hdmi->has_hdmi_sink) {
sdvox |= SDVO_AUDIO_ENABLE;
if (HAS_PCH_CPT(dev))
sdvox |= HDMI_MODE_SELECT;
@@ -72,26 +78,25 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
sdvox |= SDVO_PIPE_B_SELECT;
}
- I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
- POSTING_READ(hdmi_priv->sdvox_reg);
+ I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
+ POSTING_READ(intel_hdmi->sdvox_reg);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
- temp = I915_READ(hdmi_priv->sdvox_reg);
+ temp = I915_READ(intel_hdmi->sdvox_reg);
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
- POSTING_READ(hdmi_priv->sdvox_reg);
+ I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->sdvox_reg);
}
if (mode != DRM_MODE_DPMS_ON) {
@@ -100,15 +105,15 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
temp |= SDVO_ENABLE;
}
- I915_WRITE(hdmi_priv->sdvox_reg, temp);
- POSTING_READ(hdmi_priv->sdvox_reg);
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(hdmi_priv->sdvox_reg, temp);
- POSTING_READ(hdmi_priv->sdvox_reg);
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
}
}
@@ -134,22 +139,20 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
}
static enum drm_connector_status
-intel_hdmi_detect(struct drm_connector *connector)
+intel_hdmi_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
- hdmi_priv->has_hdmi_sink = false;
- edid = drm_get_edid(connector,
- intel_encoder->ddc_bus);
+ intel_hdmi->has_hdmi_sink = false;
+ edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
- hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
}
connector->display_info.raw_edid = NULL;
kfree(edid);
@@ -161,13 +164,13 @@ intel_hdmi_detect(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -198,18 +201,8 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
.best_encoder = intel_attached_encoder,
};
-static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
-{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
- .destroy = intel_hdmi_enc_destroy,
+ .destroy = intel_encoder_destroy,
};
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
@@ -218,21 +211,19 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- struct intel_hdmi_priv *hdmi_priv;
+ struct intel_hdmi *intel_hdmi;
- intel_encoder = kcalloc(sizeof(struct intel_encoder) +
- sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
- if (!intel_encoder)
+ intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
+ if (!intel_hdmi)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_encoder);
+ kfree(intel_hdmi);
return;
}
- hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
-
+ intel_encoder = &intel_hdmi->base;
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
@@ -273,8 +264,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
if (!intel_encoder->ddc_bus)
goto err_connector;
- hdmi_priv->sdvox_reg = sdvox_reg;
- intel_encoder->dev_priv = hdmi_priv;
+ intel_hdmi->sdvox_reg = sdvox_reg;
drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
@@ -297,7 +287,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
err_connector:
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(intel_hdmi);
kfree(intel_connector);
return;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0eab8df5bf7..6ec39a86ed0 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -41,12 +41,18 @@
#include <linux/acpi.h>
/* Private structure for the integrated LVDS support */
-struct intel_lvds_priv {
+struct intel_lvds {
+ struct intel_encoder base;
int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
};
+static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
+{
+ return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
+}
+
/**
* Sets the backlight level.
*
@@ -90,7 +96,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
static void intel_lvds_set_power(struct drm_device *dev, bool on)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_status, ctl_reg, status_reg, lvds_reg;
+ u32 ctl_reg, status_reg, lvds_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
@@ -108,9 +114,8 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
POWER_TARGET_ON);
- do {
- pp_status = I915_READ(status_reg);
- } while ((pp_status & PP_ON) == 0);
+ if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
+ DRM_ERROR("timed out waiting to enable LVDS pipe");
intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
} else {
@@ -118,9 +123,8 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
~POWER_TARGET_ON);
- do {
- pp_status = I915_READ(status_reg);
- } while (pp_status & PP_ON);
+ if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
+ DRM_ERROR("timed out waiting for LVDS pipe to turn off");
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_reg);
@@ -156,31 +160,72 @@ static int intel_lvds_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static void
+centre_horizontally(struct drm_display_mode *mode,
+ int width)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the hsync and hblank widths constant */
+ sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (mode->hdisplay - width + 1) / 2;
+ border += border & 1; /* make the border even */
+
+ mode->crtc_hdisplay = width;
+ mode->crtc_hblank_start = width + border;
+ mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
+
+ mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
+ mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
+}
+
+static void
+centre_vertically(struct drm_display_mode *mode,
+ int height)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the vsync and vblank widths constant */
+ sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (mode->vdisplay - height + 1) / 2;
+
+ mode->crtc_vdisplay = height;
+ mode->crtc_vblank_start = height + border;
+ mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
+
+ mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
+ mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
+}
+
+static inline u32 panel_fitter_scaling(u32 source, u32 target)
+{
+ /*
+ * Floating point operation is not supported. So the FACTOR
+ * is defined, which can avoid the floating point computation
+ * when calculating the panel ratio.
+ */
+#define ACCURACY 12
+#define FACTOR (1 << ACCURACY)
+ u32 ratio = source * FACTOR / target;
+ return (FACTOR * ratio + FACTOR/2) / FACTOR;
+}
+
static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- /*
- * float point operation is not supported . So the PANEL_RATIO_FACTOR
- * is defined, which can avoid the float point computation when
- * calculating the panel ratio.
- */
-#define PANEL_RATIO_FACTOR 8192
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
struct drm_encoder *tmp_encoder;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
- u32 pfit_control = 0, pfit_pgm_ratios = 0;
- int left_border = 0, right_border = 0, top_border = 0;
- int bottom_border = 0;
- bool border = 0;
- int panel_ratio, desired_ratio, vert_scale, horiz_scale;
- int horiz_ratio, vert_ratio;
- u32 hsync_width, vsync_width;
- u32 hblank_width, vblank_width;
- u32 hsync_pos, vsync_pos;
+ u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
/* Should never happen!! */
if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
@@ -199,27 +244,19 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
/* If we don't have a panel mode, there is nothing we can do */
if (dev_priv->panel_fixed_mode == NULL)
return true;
+
/*
- * If we have timings from the BIOS for the panel, put them in
+ * We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- if (dev_priv->panel_fixed_mode != NULL) {
- adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
- adjusted_mode->hsync_start =
- dev_priv->panel_fixed_mode->hsync_start;
- adjusted_mode->hsync_end =
- dev_priv->panel_fixed_mode->hsync_end;
- adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
- adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
- adjusted_mode->vsync_start =
- dev_priv->panel_fixed_mode->vsync_start;
- adjusted_mode->vsync_end =
- dev_priv->panel_fixed_mode->vsync_end;
- adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
- adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
+ mode, adjusted_mode);
+ return true;
}
/* Make sure pre-965s set dither correctly */
@@ -230,218 +267,86 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
- adjusted_mode->vdisplay == mode->vdisplay) {
- pfit_pgm_ratios = 0;
- border = 0;
- goto out;
- }
-
- /* full screen scale for now */
- if (HAS_PCH_SPLIT(dev))
+ adjusted_mode->vdisplay == mode->vdisplay)
goto out;
/* 965+ wants fuzzy fitting */
if (IS_I965G(dev))
- pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
- PFIT_FILTER_FUZZY;
-
- hsync_width = adjusted_mode->crtc_hsync_end -
- adjusted_mode->crtc_hsync_start;
- vsync_width = adjusted_mode->crtc_vsync_end -
- adjusted_mode->crtc_vsync_start;
- hblank_width = adjusted_mode->crtc_hblank_end -
- adjusted_mode->crtc_hblank_start;
- vblank_width = adjusted_mode->crtc_vblank_end -
- adjusted_mode->crtc_vblank_start;
- /*
- * Deal with panel fitting options. Figure out how to stretch the
- * image based on its aspect ratio & the current panel fitting mode.
- */
- panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR /
- adjusted_mode->vdisplay;
- desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR /
- mode->vdisplay;
+ pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+ PFIT_FILTER_FUZZY);
+
/*
* Enable automatic panel scaling for non-native modes so that they fill
* the screen. Should be enabled before the pipe is enabled, according
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- if (!HAS_PCH_SPLIT(dev)) {
- I915_WRITE(BCLRPAT_A, 0);
- I915_WRITE(BCLRPAT_B, 0);
- }
+ I915_WRITE(BCLRPAT_A, 0);
+ I915_WRITE(BCLRPAT_B, 0);
- switch (lvds_priv->fitting_mode) {
+ switch (intel_lvds->fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
- left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2;
- right_border = left_border;
- if (mode->hdisplay & 1)
- right_border++;
- top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2;
- bottom_border = top_border;
- if (mode->vdisplay & 1)
- bottom_border++;
- /* Set active & border values */
- adjusted_mode->crtc_hdisplay = mode->hdisplay;
- /* Keep the boder be even */
- if (right_border & 1)
- right_border++;
- /* use the border directly instead of border minuse one */
- adjusted_mode->crtc_hblank_start = mode->hdisplay +
- right_border;
- /* keep the blank width constant */
- adjusted_mode->crtc_hblank_end =
- adjusted_mode->crtc_hblank_start + hblank_width;
- /* get the hsync pos relative to hblank start */
- hsync_pos = (hblank_width - hsync_width) / 2;
- /* keep the hsync pos be even */
- if (hsync_pos & 1)
- hsync_pos++;
- adjusted_mode->crtc_hsync_start =
- adjusted_mode->crtc_hblank_start + hsync_pos;
- /* keep the hsync width constant */
- adjusted_mode->crtc_hsync_end =
- adjusted_mode->crtc_hsync_start + hsync_width;
- adjusted_mode->crtc_vdisplay = mode->vdisplay;
- /* use the border instead of border minus one */
- adjusted_mode->crtc_vblank_start = mode->vdisplay +
- bottom_border;
- /* keep the vblank width constant */
- adjusted_mode->crtc_vblank_end =
- adjusted_mode->crtc_vblank_start + vblank_width;
- /* get the vsync start postion relative to vblank start */
- vsync_pos = (vblank_width - vsync_width) / 2;
- adjusted_mode->crtc_vsync_start =
- adjusted_mode->crtc_vblank_start + vsync_pos;
- /* keep the vsync width constant */
- adjusted_mode->crtc_vsync_end =
- adjusted_mode->crtc_vsync_start + vsync_width;
- border = 1;
+ centre_horizontally(adjusted_mode, mode->hdisplay);
+ centre_vertically(adjusted_mode, mode->vdisplay);
+ border = LVDS_BORDER_ENABLE;
break;
+
case DRM_MODE_SCALE_ASPECT:
- /* Scale but preserve the spect ratio */
- pfit_control |= PFIT_ENABLE;
+ /* Scale but preserve the aspect ratio */
if (IS_I965G(dev)) {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+
+ pfit_control |= PFIT_ENABLE;
/* 965+ is easy, it does everything in hw */
- if (panel_ratio > desired_ratio)
+ if (scaled_width > scaled_height)
pfit_control |= PFIT_SCALING_PILLAR;
- else if (panel_ratio < desired_ratio)
+ else if (scaled_width < scaled_height)
pfit_control |= PFIT_SCALING_LETTER;
else
pfit_control |= PFIT_SCALING_AUTO;
} else {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
/*
* For earlier chips we have to calculate the scaling
* ratio by hand and program it into the
* PFIT_PGM_RATIO register
*/
- u32 horiz_bits, vert_bits, bits = 12;
- horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/
- adjusted_mode->hdisplay;
- vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/
- adjusted_mode->vdisplay;
- horiz_scale = adjusted_mode->hdisplay *
- PANEL_RATIO_FACTOR / mode->hdisplay;
- vert_scale = adjusted_mode->vdisplay *
- PANEL_RATIO_FACTOR / mode->vdisplay;
-
- /* retain aspect ratio */
- if (panel_ratio > desired_ratio) { /* Pillar */
- u32 scaled_width;
- scaled_width = mode->hdisplay * vert_scale /
- PANEL_RATIO_FACTOR;
- horiz_ratio = vert_ratio;
- pfit_control |= (VERT_AUTO_SCALE |
+ if (scaled_width > scaled_height) { /* pillar */
+ centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay);
+
+ border = LVDS_BORDER_ENABLE;
+ if (mode->vdisplay != adjusted_mode->vdisplay) {
+ u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
+ pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else if (scaled_width < scaled_height) { /* letter */
+ centre_vertically(adjusted_mode, scaled_width / mode->hdisplay);
+
+ border = LVDS_BORDER_ENABLE;
+ if (mode->hdisplay != adjusted_mode->hdisplay) {
+ u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
+ pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else
+ /* Aspects match, Let hw scale both directions */
+ pfit_control |= (PFIT_ENABLE |
+ VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
- /* Pillar will have left/right borders */
- left_border = (adjusted_mode->hdisplay -
- scaled_width) / 2;
- right_border = left_border;
- if (mode->hdisplay & 1) /* odd resolutions */
- right_border++;
- /* keep the border be even */
- if (right_border & 1)
- right_border++;
- adjusted_mode->crtc_hdisplay = scaled_width;
- /* use border instead of border minus one */
- adjusted_mode->crtc_hblank_start =
- scaled_width + right_border;
- /* keep the hblank width constant */
- adjusted_mode->crtc_hblank_end =
- adjusted_mode->crtc_hblank_start +
- hblank_width;
- /*
- * get the hsync start pos relative to
- * hblank start
- */
- hsync_pos = (hblank_width - hsync_width) / 2;
- /* keep the hsync_pos be even */
- if (hsync_pos & 1)
- hsync_pos++;
- adjusted_mode->crtc_hsync_start =
- adjusted_mode->crtc_hblank_start +
- hsync_pos;
- /* keept hsync width constant */
- adjusted_mode->crtc_hsync_end =
- adjusted_mode->crtc_hsync_start +
- hsync_width;
- border = 1;
- } else if (panel_ratio < desired_ratio) { /* letter */
- u32 scaled_height = mode->vdisplay *
- horiz_scale / PANEL_RATIO_FACTOR;
- vert_ratio = horiz_ratio;
- pfit_control |= (HORIZ_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- /* Letterbox will have top/bottom border */
- top_border = (adjusted_mode->vdisplay -
- scaled_height) / 2;
- bottom_border = top_border;
- if (mode->vdisplay & 1)
- bottom_border++;
- adjusted_mode->crtc_vdisplay = scaled_height;
- /* use border instead of border minus one */
- adjusted_mode->crtc_vblank_start =
- scaled_height + bottom_border;
- /* keep the vblank width constant */
- adjusted_mode->crtc_vblank_end =
- adjusted_mode->crtc_vblank_start +
- vblank_width;
- /*
- * get the vsync start pos relative to
- * vblank start
- */
- vsync_pos = (vblank_width - vsync_width) / 2;
- adjusted_mode->crtc_vsync_start =
- adjusted_mode->crtc_vblank_start +
- vsync_pos;
- /* keep the vsync width constant */
- adjusted_mode->crtc_vsync_end =
- adjusted_mode->crtc_vsync_start +
- vsync_width;
- border = 1;
- } else {
- /* Aspects match, Let hw scale both directions */
- pfit_control |= (VERT_AUTO_SCALE |
- HORIZ_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- }
- horiz_bits = (1 << bits) * horiz_ratio /
- PANEL_RATIO_FACTOR;
- vert_bits = (1 << bits) * vert_ratio /
- PANEL_RATIO_FACTOR;
- pfit_pgm_ratios =
- ((vert_bits << PFIT_VERT_SCALE_SHIFT) &
- PFIT_VERT_SCALE_MASK) |
- ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) &
- PFIT_HORIZ_SCALE_MASK);
}
break;
@@ -458,21 +363,16 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
break;
+
default:
break;
}
out:
- lvds_priv->pfit_control = pfit_control;
- lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
- /*
- * When there exists the border, it means that the LVDS_BORDR
- * should be enabled.
- */
- if (border)
- dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE;
- else
- dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE);
+ intel_lvds->pfit_control = pfit_control;
+ intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+ dev_priv->lvds_border_bits = border;
+
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
@@ -518,8 +418,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
+ struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
/*
* The LVDS pin pair will already have been turned on in the
@@ -535,8 +434,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
- I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
+ I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
}
/**
@@ -546,7 +445,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* connected and closed means disconnected. We also send hotplug events as
* needed, using lid status notification from the input layer.
*/
-static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_lvds_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
enum drm_connector_status status = connector_status_connected;
@@ -641,7 +541,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* the LID nofication event.
*/
if (connector)
- connector->status = connector->funcs->detect(connector);
+ connector->status = connector->funcs->detect(connector,
+ false);
+
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
return NOTIFY_OK;
@@ -691,18 +593,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
connector->encoder) {
struct drm_crtc *crtc = connector->encoder->crtc;
struct drm_encoder *encoder = connector->encoder;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
+ struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return 0;
}
- if (lvds_priv->fitting_mode == value) {
+ if (intel_lvds->fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
- lvds_priv->fitting_mode = value;
+ intel_lvds->fitting_mode = value;
if (crtc && crtc->enabled) {
/*
* If the CRTC is enabled, the display will be changed
@@ -738,19 +639,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.destroy = intel_lvds_destroy,
};
-
-static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
- .destroy = intel_lvds_enc_destroy,
+ .destroy = intel_encoder_destroy,
};
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
@@ -934,13 +824,13 @@ static int lvds_is_present_in_vbt(struct drm_device *dev)
void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_lvds *intel_lvds;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
- struct intel_lvds_priv *lvds_priv;
u32 lvds;
int pipe, gpio = GPIOC;
@@ -963,20 +853,20 @@ void intel_lvds_init(struct drm_device *dev)
gpio = PCH_GPIOC;
}
- intel_encoder = kzalloc(sizeof(struct intel_encoder) +
- sizeof(struct intel_lvds_priv), GFP_KERNEL);
- if (!intel_encoder) {
+ intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
+ if (!intel_lvds) {
return;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_encoder);
+ kfree(intel_lvds);
return;
}
- connector = &intel_connector->base;
+ intel_encoder = &intel_lvds->base;
encoder = &intel_encoder->enc;
+ connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -988,16 +878,12 @@ void intel_lvds_init(struct drm_device *dev)
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 1);
- if (IS_I965G(dev))
- intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1);
- intel_encoder->dev_priv = lvds_priv;
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
/*
@@ -1007,7 +893,7 @@ void intel_lvds_init(struct drm_device *dev)
drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
- lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT;
+ intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -1115,6 +1001,6 @@ failed:
intel_i2c_destroy(intel_encoder->ddc_bus);
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
+ kfree(intel_lvds);
kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d7ad5139d17..1d306a458be 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,6 +25,8 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
+
+#include <linux/seq_file.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -65,7 +67,7 @@
#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
-#define OCMD_BUF_TYPE_MASK (Ox1<<5)
+#define OCMD_BUF_TYPE_MASK (0x1<<5)
#define OCMD_BUF_TYPE_FRAME (0x0<<5)
#define OCMD_BUF_TYPE_FIELD (0x1<<5)
#define OCMD_TEST_MODE (0x1<<4)
@@ -185,7 +187,8 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
if (OVERLAY_NONPHYSICAL(overlay->dev)) {
regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- overlay->reg_bo->gtt_offset);
+ overlay->reg_bo->gtt_offset,
+ KM_USER0);
if (!regs) {
DRM_ERROR("failed to map overlay regs in GTT\n");
@@ -200,7 +203,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
{
if (OVERLAY_NONPHYSICAL(overlay->dev))
- io_mapping_unmap_atomic(overlay->virt_addr);
+ io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0);
overlay->virt_addr = NULL;
@@ -958,7 +961,7 @@ static int check_overlay_src(struct drm_device *dev,
|| rec->src_width < N_HORIZ_Y_TAPS*4)
return -EINVAL;
- /* check alingment constrains */
+ /* check alignment constraints */
switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
case I915_OVERLAY_RGB:
/* not implemented */
@@ -990,7 +993,10 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL;
/* stride checking */
- stride_mask = 63;
+ if (IS_I830(dev) || IS_845G(dev))
+ stride_mask = 255;
+ else
+ stride_mask = 63;
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
@@ -1363,7 +1369,8 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->flip_addr = overlay->reg_bo->gtt_offset;
} else {
ret = i915_gem_attach_phys_object(dev, reg_bo,
- I915_GEM_PHYS_OVERLAY_REGS);
+ I915_GEM_PHYS_OVERLAY_REGS,
+ 0);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
@@ -1412,3 +1419,99 @@ void intel_cleanup_overlay(struct drm_device *dev)
kfree(dev_priv->overlay);
}
}
+
+struct intel_overlay_error_state {
+ struct overlay_registers regs;
+ unsigned long base;
+ u32 dovsta;
+ u32 isr;
+};
+
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay = dev_priv->overlay;
+ struct intel_overlay_error_state *error;
+ struct overlay_registers __iomem *regs;
+
+ if (!overlay || !overlay->active)
+ return NULL;
+
+ error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ if (error == NULL)
+ return NULL;
+
+ error->dovsta = I915_READ(DOVSTA);
+ error->isr = I915_READ(ISR);
+ if (OVERLAY_NONPHYSICAL(overlay->dev))
+ error->base = (long) overlay->reg_bo->gtt_offset;
+ else
+ error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs)
+ goto err;
+
+ memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ return error;
+
+err:
+ kfree(error);
+ return NULL;
+}
+
+void
+intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error)
+{
+ seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
+ error->dovsta, error->isr);
+ seq_printf(m, " Register file at 0x%08lx:\n",
+ error->base);
+
+#define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x)
+ P(OBUF_0Y);
+ P(OBUF_1Y);
+ P(OBUF_0U);
+ P(OBUF_0V);
+ P(OBUF_1U);
+ P(OBUF_1V);
+ P(OSTRIDE);
+ P(YRGB_VPH);
+ P(UV_VPH);
+ P(HORZ_PH);
+ P(INIT_PHS);
+ P(DWINPOS);
+ P(DWINSZ);
+ P(SWIDTH);
+ P(SWIDTHSW);
+ P(SHEIGHT);
+ P(YRGBSCALE);
+ P(UVSCALE);
+ P(OCLRC0);
+ P(OCLRC1);
+ P(DCLRKV);
+ P(DCLRKM);
+ P(SCLRKVH);
+ P(SCLRKVL);
+ P(SCLRKEN);
+ P(OCONFIG);
+ P(OCMD);
+ P(OSTART_0Y);
+ P(OSTART_1Y);
+ P(OSTART_0U);
+ P(OSTART_0V);
+ P(OSTART_1U);
+ P(OSTART_1V);
+ P(OTILEOFF_0Y);
+ P(OTILEOFF_1Y);
+ P(OTILEOFF_0U);
+ P(OTILEOFF_0V);
+ P(OTILEOFF_1U);
+ P(OTILEOFF_1V);
+ P(FASTHSCALE);
+ P(UVSCALEV);
+#undef P
+}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
new file mode 100644
index 00000000000..e7f5299d9d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright © 2006-2010 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#include "intel_drv.h"
+
+void
+intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+
+ adjusted_mode->clock = fixed_mode->clock;
+
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+}
+
+/* adjusted_mode has been preset to be the panel's fixed mode */
+void
+intel_pch_panel_fitting(struct drm_device *dev,
+ int fitting_mode,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int x, y, width, height;
+
+ x = y = width = height = 0;
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == mode->hdisplay &&
+ adjusted_mode->vdisplay == mode->vdisplay)
+ goto done;
+
+ switch (fitting_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ width = mode->hdisplay;
+ height = mode->vdisplay;
+ x = (adjusted_mode->hdisplay - width + 1)/2;
+ y = (adjusted_mode->vdisplay - height + 1)/2;
+ break;
+
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+ if (scaled_width > scaled_height) { /* pillar */
+ width = scaled_height / mode->vdisplay;
+ x = (adjusted_mode->hdisplay - width + 1) / 2;
+ y = 0;
+ height = adjusted_mode->vdisplay;
+ } else if (scaled_width < scaled_height) { /* letter */
+ height = scaled_width / mode->hdisplay;
+ y = (adjusted_mode->vdisplay - height + 1) / 2;
+ x = 0;
+ width = adjusted_mode->hdisplay;
+ } else {
+ x = y = 0;
+ width = adjusted_mode->hdisplay;
+ height = adjusted_mode->vdisplay;
+ }
+ }
+ break;
+
+ default:
+ case DRM_MODE_SCALE_FULLSCREEN:
+ x = y = 0;
+ width = adjusted_mode->hdisplay;
+ height = adjusted_mode->vdisplay;
+ break;
+ }
+
+done:
+ dev_priv->pch_pf_pos = (x << 16) | y;
+ dev_priv->pch_pf_size = (width << 16) | height;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 26362f8495a..cb3508f78bc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,18 +33,35 @@
#include "i915_drm.h"
#include "i915_trace.h"
+static u32 i915_gem_get_seqno(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 seqno;
+
+ seqno = dev_priv->next_seqno;
+
+ /* reserve 0 for non-seqno */
+ if (++dev_priv->next_seqno == 0)
+ dev_priv->next_seqno = 1;
+
+ return seqno;
+}
+
static void
render_ring_flush(struct drm_device *dev,
struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 cmd;
+
#if WATCH_EXEC
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
invalidate_domains, flush_domains);
#endif
- u32 cmd;
- trace_i915_gem_request_flush(dev, ring->next_seqno,
+
+ trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
invalidate_domains, flush_domains);
if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
@@ -203,9 +220,13 @@ static int init_render_ring(struct drm_device *dev,
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret = init_ring_common(dev, ring);
+ int mode;
+
if (IS_I9XX(dev) && !IS_GEN3(dev)) {
- I915_WRITE(MI_MODE,
- (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+ mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+ if (IS_GEN6(dev))
+ mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
+ I915_WRITE(MI_MODE, mode);
}
return ret;
}
@@ -233,9 +254,10 @@ render_ring_add_request(struct drm_device *dev,
struct drm_file *file_priv,
u32 flush_domains)
{
- u32 seqno;
drm_i915_private_t *dev_priv = dev->dev_private;
- seqno = intel_ring_get_seqno(dev, ring);
+ u32 seqno;
+
+ seqno = i915_gem_get_seqno(dev);
if (IS_GEN6(dev)) {
BEGIN_LP_RING(6);
@@ -405,7 +427,9 @@ bsd_ring_add_request(struct drm_device *dev,
u32 flush_domains)
{
u32 seqno;
- seqno = intel_ring_get_seqno(dev, ring);
+
+ seqno = i915_gem_get_seqno(dev);
+
intel_ring_begin(dev, ring, 4);
intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(dev, ring,
@@ -479,7 +503,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
- trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
+ trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
count = nbox ? nbox : 1;
@@ -515,7 +539,16 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
+ if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring, MI_FLUSH |
+ MI_NO_WRITE_FLUSH |
+ MI_INVALIDATE_ISP );
+ intel_ring_emit(dev, ring, MI_NOOP);
+ intel_ring_advance(dev, ring);
+ }
/* XXX breadcrumb */
+
return 0;
}
@@ -588,9 +621,10 @@ err:
int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- int ret;
struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
+ int ret;
+
ring->dev = dev;
if (I915_NEED_GFX_HWS(dev)) {
@@ -603,16 +637,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
ret = -ENOMEM;
- goto cleanup;
+ goto err_hws;
}
ring->gem_object = obj;
ret = i915_gem_object_pin(obj, ring->alignment);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- goto cleanup;
- }
+ if (ret)
+ goto err_unref;
obj_priv = to_intel_bo(obj);
ring->map.size = ring->size;
@@ -624,18 +656,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
drm_core_ioremap_wc(&ring->map, dev);
if (ring->map.handle == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
ret = -EINVAL;
- goto cleanup;
+ goto err_unpin;
}
ring->virtual_start = ring->map.handle;
ret = ring->init(dev, ring);
- if (ret != 0) {
- intel_cleanup_ring_buffer(dev, ring);
- return ret;
- }
+ if (ret)
+ goto err_unmap;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_kernel_lost_context(dev);
@@ -649,7 +677,15 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
return ret;
-cleanup:
+
+err_unmap:
+ drm_core_ioremapfree(&ring->map, dev);
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+ ring->gem_object = NULL;
+err_hws:
cleanup_status_page(dev, ring);
return ret;
}
@@ -682,9 +718,11 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
}
virt = (unsigned int *)(ring->virtual_start + ring->tail);
- rem /= 4;
- while (rem--)
+ rem /= 8;
+ while (rem--) {
+ *virt++ = MI_NOOP;
*virt++ = MI_NOOP;
+ }
ring->tail = 0;
ring->space = ring->head - 8;
@@ -729,21 +767,14 @@ void intel_ring_begin(struct drm_device *dev,
intel_wrap_ring_buffer(dev, ring);
if (unlikely(ring->space < n))
intel_wait_ring_buffer(dev, ring, n);
-}
-void intel_ring_emit(struct drm_device *dev,
- struct intel_ring_buffer *ring, unsigned int data)
-{
- unsigned int *virt = ring->virtual_start + ring->tail;
- *virt = data;
- ring->tail += 4;
- ring->tail &= ring->size - 1;
- ring->space -= 4;
+ ring->space -= n;
}
void intel_ring_advance(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
+ ring->tail &= ring->size - 1;
ring->advance_ring(dev, ring);
}
@@ -762,18 +793,6 @@ void intel_fill_struct(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
-u32 intel_ring_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- u32 seqno;
- seqno = ring->next_seqno;
-
- /* reserve 0 for non-seqno */
- if (++ring->next_seqno == 0)
- ring->next_seqno = 1;
- return seqno;
-}
-
struct intel_ring_buffer render_ring = {
.name = "render ring",
.regs = {
@@ -791,7 +810,6 @@ struct intel_ring_buffer render_ring = {
.head = 0,
.tail = 0,
.space = 0,
- .next_seqno = 1,
.user_irq_refcount = 0,
.irq_gem_seqno = 0,
.waiting_gem_seqno = 0,
@@ -830,7 +848,6 @@ struct intel_ring_buffer bsd_ring = {
.head = 0,
.tail = 0,
.space = 0,
- .next_seqno = 1,
.user_irq_refcount = 0,
.irq_gem_seqno = 0,
.waiting_gem_seqno = 0,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index d5568d3766d..525e7d3edda 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -26,7 +26,6 @@ struct intel_ring_buffer {
unsigned int head;
unsigned int tail;
unsigned int space;
- u32 next_seqno;
struct intel_hw_status_page status_page;
u32 irq_gem_seqno; /* last seq seem at irq time */
@@ -106,8 +105,16 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring);
void intel_ring_begin(struct drm_device *dev,
struct intel_ring_buffer *ring, int n);
-void intel_ring_emit(struct drm_device *dev,
- struct intel_ring_buffer *ring, u32 data);
+
+static inline void intel_ring_emit(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ unsigned int data)
+{
+ unsigned int *virt = ring->virtual_start + ring->tail;
+ *virt = data;
+ ring->tail += 4;
+}
+
void intel_fill_struct(struct drm_device *dev,
struct intel_ring_buffer *ring,
void *data,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 03c231be227..ee73e428a84 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -31,8 +31,8 @@
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
-#include "intel_drv.h"
#include "drm_edid.h"
+#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
@@ -47,9 +47,10 @@
#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
-static char *tv_format_names[] = {
+static const char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
"PAL_B" , "PAL_D" , "PAL_G" ,
"PAL_H" , "PAL_I" , "PAL_M" ,
@@ -61,7 +62,9 @@ static char *tv_format_names[] = {
#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
-struct intel_sdvo_priv {
+struct intel_sdvo {
+ struct intel_encoder base;
+
u8 slave_addr;
/* Register for the SDVO device: SDVOB or SDVOC */
@@ -95,7 +98,7 @@ struct intel_sdvo_priv {
bool is_tv;
/* This is for current tv format name */
- char *tv_format_name;
+ int tv_format_index;
/**
* This is set if we treat the device as HDMI, instead of DVI.
@@ -132,37 +135,40 @@ struct intel_sdvo_priv {
};
struct intel_sdvo_connector {
+ struct intel_connector base;
+
/* Mark the type of connector */
uint16_t output_flag;
/* This contains all current supported TV format */
- char *tv_format_supported[TV_FORMAT_NUM];
+ u8 tv_format_supported[TV_FORMAT_NUM];
int format_supported_num;
- struct drm_property *tv_format_property;
- struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
-
- /**
- * Returned SDTV resolutions allowed for the current format, if the
- * device reported it.
- */
- struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
+ struct drm_property *tv_format;
/* add the property for the SDVO-TV */
- struct drm_property *left_property;
- struct drm_property *right_property;
- struct drm_property *top_property;
- struct drm_property *bottom_property;
- struct drm_property *hpos_property;
- struct drm_property *vpos_property;
+ struct drm_property *left;
+ struct drm_property *right;
+ struct drm_property *top;
+ struct drm_property *bottom;
+ struct drm_property *hpos;
+ struct drm_property *vpos;
+ struct drm_property *contrast;
+ struct drm_property *saturation;
+ struct drm_property *hue;
+ struct drm_property *sharpness;
+ struct drm_property *flicker_filter;
+ struct drm_property *flicker_filter_adaptive;
+ struct drm_property *flicker_filter_2d;
+ struct drm_property *tv_chroma_filter;
+ struct drm_property *tv_luma_filter;
+ struct drm_property *dot_crawl;
/* add the property for the SDVO-TV/LVDS */
- struct drm_property *brightness_property;
- struct drm_property *contrast_property;
- struct drm_property *saturation_property;
- struct drm_property *hue_property;
+ struct drm_property *brightness;
/* Add variable to record current setting for the above property */
u32 left_margin, right_margin, top_margin, bottom_margin;
+
/* this is to get the range of margin.*/
u32 max_hscan, max_vscan;
u32 max_hpos, cur_hpos;
@@ -171,36 +177,54 @@ struct intel_sdvo_connector {
u32 cur_contrast, max_contrast;
u32 cur_saturation, max_saturation;
u32 cur_hue, max_hue;
+ u32 cur_sharpness, max_sharpness;
+ u32 cur_flicker_filter, max_flicker_filter;
+ u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
+ u32 cur_flicker_filter_2d, max_flicker_filter_2d;
+ u32 cur_tv_chroma_filter, max_tv_chroma_filter;
+ u32 cur_tv_luma_filter, max_tv_luma_filter;
+ u32 cur_dot_crawl, max_dot_crawl;
};
+static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder)
+{
+ return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base);
+}
+
+static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
+{
+ return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base);
+}
+
static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
- uint16_t flags);
-static void
-intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
-static void
-intel_sdvo_create_enhance_property(struct drm_connector *connector);
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
+static bool
+intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type);
+static bool
+intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector);
/**
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
*/
-static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
+static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
{
- struct drm_device *dev = intel_encoder->enc.dev;
+ struct drm_device *dev = intel_sdvo->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 bval = val, cval = val;
int i;
- if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
- I915_WRITE(sdvo_priv->sdvo_reg, val);
- I915_READ(sdvo_priv->sdvo_reg);
+ if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
+ I915_WRITE(intel_sdvo->sdvo_reg, val);
+ I915_READ(intel_sdvo->sdvo_reg);
return;
}
- if (sdvo_priv->sdvo_reg == SDVOB) {
+ if (intel_sdvo->sdvo_reg == SDVOB) {
cval = I915_READ(SDVOC);
} else {
bval = I915_READ(SDVOB);
@@ -219,33 +243,27 @@ static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
}
}
-static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
- u8 *ch)
+static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- u8 out_buf[2];
+ u8 out_buf[2] = { addr, 0 };
u8 buf[2];
- int ret;
-
struct i2c_msg msgs[] = {
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr >> 1,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr >> 1,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
}
};
+ int ret;
- out_buf[0] = addr;
- out_buf[1] = 0;
-
- if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2)
+ if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2)
{
*ch = buf[0];
return true;
@@ -255,35 +273,26 @@ static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
return false;
}
-static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr,
- u8 ch)
+static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- u8 out_buf[2];
+ u8 out_buf[2] = { addr, ch };
struct i2c_msg msgs[] = {
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr >> 1,
.flags = 0,
.len = 2,
.buf = out_buf,
}
};
- out_buf[0] = addr;
- out_buf[1] = ch;
-
- if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1)
- {
- return true;
- }
- return false;
+ return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
}
#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
/** Mapping of command numbers to names, for debug output */
static const struct _sdvo_cmd_name {
u8 cmd;
- char *name;
+ const char *name;
} sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
@@ -328,13 +337,14 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
/* Add the op code for SDVO enhancements */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
@@ -353,6 +363,27 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
/* HDMI op code */
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
@@ -377,17 +408,15 @@ static const struct _sdvo_cmd_name {
};
#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
-#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
-#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
+#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
-static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
- void *args, int args_len)
+static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int i;
DRM_DEBUG_KMS("%s: W: %02X ",
- SDVO_NAME(sdvo_priv), cmd);
+ SDVO_NAME(intel_sdvo), cmd);
for (i = 0; i < args_len; i++)
DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
for (; i < 8; i++)
@@ -403,19 +432,20 @@ static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
DRM_LOG_KMS("\n");
}
-static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd,
- void *args, int args_len)
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
{
int i;
- intel_sdvo_debug_write(intel_encoder, cmd, args, args_len);
+ intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
- intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i,
- ((u8*)args)[i]);
+ if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
+ ((u8*)args)[i]))
+ return false;
}
- intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd);
+ return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
}
static const char *cmd_status_names[] = {
@@ -428,14 +458,13 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
-static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
+static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len,
u8 status)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int i;
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
for (i = 0; i < response_len; i++)
DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
for (; i < 8; i++)
@@ -447,8 +476,8 @@ static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
DRM_LOG_KMS("\n");
}
-static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
- void *response, int response_len)
+static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
+ void *response, int response_len)
{
int i;
u8 status;
@@ -457,24 +486,26 @@ static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
while (retry--) {
/* Read the command response */
for (i = 0; i < response_len; i++) {
- intel_sdvo_read_byte(intel_encoder,
- SDVO_I2C_RETURN_0 + i,
- &((u8 *)response)[i]);
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ return false;
}
/* read the return status */
- intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS,
- &status);
+ if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS,
+ &status))
+ return false;
- intel_sdvo_debug_response(intel_encoder, response, response_len,
+ intel_sdvo_debug_response(intel_sdvo, response, response_len,
status);
if (status != SDVO_CMD_STATUS_PENDING)
- return status;
+ break;
mdelay(50);
}
- return status;
+ return status == SDVO_CMD_STATUS_SUCCESS;
}
static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -494,37 +525,36 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
* another I2C transaction after issuing the DDC bus switch, it will be
* switched to the internal SDVO register.
*/
-static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder,
+static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
u8 target)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
struct i2c_msg msgs[] = {
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr >> 1,
.flags = 0,
.len = 2,
.buf = out_buf,
},
/* the following two are to read the response */
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr >> 1,
.flags = 0,
.len = 1,
.buf = cmd_buf,
},
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr >> 1,
.flags = I2C_M_RD,
.len = 1,
.buf = ret_value,
},
};
- intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
&target, 1);
/* write the DDC switch command argument */
- intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target);
+ intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
out_buf[0] = SDVO_I2C_OPCODE;
out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
@@ -533,7 +563,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encode
ret_value[0] = 0;
ret_value[1] = 0;
- ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3);
+ ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
if (ret != 3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
@@ -547,23 +577,29 @@ static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encode
return;
}
-static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1)
+static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
{
- struct intel_sdvo_set_target_input_args targets = {0};
- u8 status;
-
- if (target_0 && target_1)
- return SDVO_CMD_STATUS_NOTSUPP;
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+ return false;
- if (target_1)
- targets.target_1 = 1;
+ return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+}
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets,
- sizeof(targets));
+static bool
+intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len)
+{
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0))
+ return false;
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ return intel_sdvo_read_response(intel_sdvo, value, len);
+}
- return (status == SDVO_CMD_STATUS_SUCCESS);
+static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_set_target_input_args targets = {0};
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_INPUT,
+ &targets, sizeof(targets));
}
/**
@@ -572,14 +608,12 @@ static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, boo
* This function is making an assumption about the layout of the response,
* which should be checked against the docs.
*/
-static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2)
+static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2)
{
struct intel_sdvo_get_trained_inputs_response response;
- u8 status;
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response));
- if (status != SDVO_CMD_STATUS_SUCCESS)
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+ &response, sizeof(response)))
return false;
*input_1 = response.input0_trained;
@@ -587,21 +621,18 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b
return true;
}
-static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
u16 outputs)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
- sizeof(outputs));
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ACTIVE_OUTPUTS,
+ &outputs, sizeof(outputs));
}
-static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
int mode)
{
- u8 status, state = SDVO_ENCODER_STATE_ON;
+ u8 state = SDVO_ENCODER_STATE_ON;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -618,88 +649,63 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encod
break;
}
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
- sizeof(state));
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
}
-static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo,
int *clock_min,
int *clock_max)
{
struct intel_sdvo_pixel_clock_range clocks;
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
- NULL, 0);
-
- status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks));
- if (status != SDVO_CMD_STATUS_SUCCESS)
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ &clocks, sizeof(clocks)))
return false;
/* Convert the values from units of 10 kHz to kHz. */
*clock_min = clocks.min * 10;
*clock_max = clocks.max * 10;
-
return true;
}
-static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo,
u16 outputs)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
- sizeof(outputs));
-
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_OUTPUT,
+ &outputs, sizeof(outputs));
}
-static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
+static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1));
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2));
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
+ return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
}
-static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_timing(intel_encoder,
+ return intel_sdvo_set_timing(intel_sdvo,
SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
}
-static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_timing(intel_encoder,
+ return intel_sdvo_set_timing(intel_sdvo,
SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
}
static bool
-intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
+intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
uint16_t clock,
uint16_t width,
uint16_t height)
{
struct intel_sdvo_preferred_input_timing_args args;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- uint8_t status;
memset(&args, 0, sizeof(args));
args.clock = clock;
@@ -707,59 +713,32 @@ intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
args.height = height;
args.interlace = 0;
- if (sdvo_priv->is_lvds &&
- (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width ||
- sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
+ if (intel_sdvo->is_lvds &&
+ (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+ intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
args.scaled = 1;
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
- &args, sizeof(args));
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
}
-static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
- bool status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
- NULL, 0);
-
- status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
- sizeof(dtd->part1));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
- NULL, 0);
-
- status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
- sizeof(dtd->part2));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return false;
+ return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ &dtd->part2, sizeof(dtd->part2));
}
-static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
+static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
}
static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
uint16_t width, height;
uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
@@ -808,7 +787,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
}
static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
- struct intel_sdvo_dtd *dtd)
+ const struct intel_sdvo_dtd *dtd)
{
mode->hdisplay = dtd->part1.h_active;
mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
@@ -840,45 +819,33 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
-static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_encode *encode)
{
- uint8_t status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode));
- if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
- memset(encode, 0, sizeof(*encode));
- return false;
- }
+ if (intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPP_ENCODE,
+ encode, sizeof(*encode)))
+ return true;
- return true;
+ /* non-support means DVI */
+ memset(encode, 0, sizeof(*encode));
+ return false;
}
-static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
uint8_t mode)
{
- uint8_t status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1);
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
}
-static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
uint8_t mode)
{
- uint8_t status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
}
#if 0
-static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
+static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
{
int i, j;
uint8_t set_buf_index[2];
@@ -887,8 +854,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
uint8_t buf[48];
uint8_t *pos;
- intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
- intel_sdvo_read_response(encoder, &av_split, 1);
+ intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
for (i = 0; i <= av_split; i++) {
set_buf_index[0] = i; set_buf_index[1] = 0;
@@ -908,7 +874,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
}
#endif
-static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
int index,
uint8_t *data, int8_t size, uint8_t tx_rate)
{
@@ -917,15 +883,18 @@ static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
set_buf_index[0] = index;
set_buf_index[1] = 0;
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2);
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
for (; size > 0; size -= 8) {
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8);
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
+ return false;
+
data += 8;
}
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
+ return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
}
static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
@@ -1000,7 +969,7 @@ struct dip_infoframe {
} __attribute__ ((packed)) u;
} __attribute__((packed));
-static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
struct drm_display_mode * mode)
{
struct dip_infoframe avi_if = {
@@ -1011,133 +980,107 @@ static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
4 + avi_if.len);
- intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if,
- 4 + avi_if.len,
- SDVO_HBUF_TX_VSYNC);
+ return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if,
+ 4 + avi_if.len,
+ SDVO_HBUF_TX_VSYNC);
}
-static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
+static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
{
-
struct intel_sdvo_tv_format format;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- uint32_t format_map, i;
- uint8_t status;
+ uint32_t format_map;
- for (i = 0; i < TV_FORMAT_NUM; i++)
- if (tv_format_names[i] == sdvo_priv->tv_format_name)
- break;
-
- format_map = 1 << i;
+ format_map = 1 << intel_sdvo->tv_format_index;
memset(&format, 0, sizeof(format));
- memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
- sizeof(format) : sizeof(format_map));
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
- sizeof(format));
+ memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- DRM_DEBUG_KMS("%s: Failed to set TV format\n",
- SDVO_NAME(sdvo_priv));
+ BUILD_BUG_ON(sizeof(format) != 6);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TV_FORMAT,
+ &format, sizeof(format));
}
-static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool
+intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
+ struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv;
+ struct intel_sdvo_dtd output_dtd;
- if (dev_priv->is_tv) {
- struct intel_sdvo_dtd output_dtd;
- bool success;
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return false;
- /* We need to construct preferred input timings based on our
- * output timings. To do that, we have to set the output
- * timings, even though this isn't really the right place in
- * the sequence to do it. Oh well.
- */
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ return false;
+ return true;
+}
- /* Set output timings */
- intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
- intel_sdvo_set_target_output(intel_encoder,
- dev_priv->attached_output);
- intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
+static bool
+intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_sdvo_dtd input_dtd;
- /* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(intel_encoder, true, false);
+ /* Reset the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return false;
+ if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay))
+ return false;
- success = intel_sdvo_create_preferred_input_timing(intel_encoder,
- mode->clock / 10,
- mode->hdisplay,
- mode->vdisplay);
- if (success) {
- struct intel_sdvo_dtd input_dtd;
+ if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
+ &input_dtd))
+ return false;
- intel_sdvo_get_preferred_input_timing(intel_encoder,
- &input_dtd);
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
- dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
- drm_mode_set_crtcinfo(adjusted_mode, 0);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ mode->clock = adjusted_mode->clock;
+ return true;
+}
- mode->clock = adjusted_mode->clock;
+static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- adjusted_mode->clock *=
- intel_sdvo_get_pixel_multiplier(mode);
- } else {
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
+ if (intel_sdvo->is_tv) {
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
return false;
- }
- } else if (dev_priv->is_lvds) {
- struct intel_sdvo_dtd output_dtd;
- bool success;
-
- drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0);
- /* Set output timings */
- intel_sdvo_get_dtd_from_mode(&output_dtd,
- dev_priv->sdvo_lvds_fixed_mode);
-
- intel_sdvo_set_target_output(intel_encoder,
- dev_priv->attached_output);
- intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
-
- /* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(intel_encoder, true, false);
-
-
- success = intel_sdvo_create_preferred_input_timing(
- intel_encoder,
- mode->clock / 10,
- mode->hdisplay,
- mode->vdisplay);
-
- if (success) {
- struct intel_sdvo_dtd input_dtd;
- intel_sdvo_get_preferred_input_timing(intel_encoder,
- &input_dtd);
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
- dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
+ (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
+ } else if (intel_sdvo->is_lvds) {
+ drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- mode->clock = adjusted_mode->clock;
-
- adjusted_mode->clock *=
- intel_sdvo_get_pixel_multiplier(mode);
- } else {
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
+ intel_sdvo->sdvo_lvds_fixed_mode))
return false;
- }
- } else {
- /* Make the CRTC code factor in the SDVO pixel multiplier. The
- * SDVO device will be told of the multiplier during mode_set.
- */
- adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+ (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
}
+
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will be told of the multiplier during mode_set.
+ */
+ adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+
return true;
}
@@ -1149,13 +1092,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
u32 sdvox = 0;
- int sdvo_pixel_multiply;
+ int sdvo_pixel_multiply, rate;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd;
- u8 status;
if (!mode)
return;
@@ -1166,41 +1107,46 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* channel on the motherboard. In a two-input device, the first input
* will be SDVOB and the second SDVOC.
*/
- in_out.in0 = sdvo_priv->attached_output;
+ in_out.in0 = intel_sdvo->attached_output;
in_out.in1 = 0;
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
+ intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_IN_OUT_MAP,
&in_out, sizeof(in_out));
- status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- if (sdvo_priv->is_hdmi) {
- intel_sdvo_set_avi_infoframe(intel_encoder, mode);
+ if (intel_sdvo->is_hdmi) {
+ if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
+ return;
+
sdvox |= SDVO_AUDIO_ENABLE;
}
/* We have tried to get input timing in mode_fixup, and filled into
adjusted_mode */
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
- intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags;
- } else
- intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
+ input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
/* If it's a TV, we already set the output timing in mode_fixup.
* Otherwise, the output timing is equal to the input timing.
*/
- if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
+ if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) {
/* Set the output timing to the screen */
- intel_sdvo_set_target_output(intel_encoder,
- sdvo_priv->attached_output);
- intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return;
+
+ (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
}
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(intel_encoder, true, false);
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return;
- if (sdvo_priv->is_tv)
- intel_sdvo_set_tv_format(intel_encoder);
+ if (intel_sdvo->is_tv) {
+ if (!intel_sdvo_set_tv_format(intel_sdvo))
+ return;
+ }
/* We would like to use intel_sdvo_create_preferred_input_timing() to
* provide the device with a timing it can support, if it supports that
@@ -1217,32 +1163,28 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo_set_input_timing(encoder, &input_dtd);
}
#else
- intel_sdvo_set_input_timing(intel_encoder, &input_dtd);
+ (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
#endif
- switch (intel_sdvo_get_pixel_multiplier(mode)) {
- case 1:
- intel_sdvo_set_clock_rate_mult(intel_encoder,
- SDVO_CLOCK_RATE_MULT_1X);
- break;
- case 2:
- intel_sdvo_set_clock_rate_mult(intel_encoder,
- SDVO_CLOCK_RATE_MULT_2X);
- break;
- case 4:
- intel_sdvo_set_clock_rate_mult(intel_encoder,
- SDVO_CLOCK_RATE_MULT_4X);
- break;
+ sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
+ switch (sdvo_pixel_multiply) {
+ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+ case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+ case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
}
+ if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate))
+ return;
/* Set the SDVO control regs. */
if (IS_I965G(dev)) {
- sdvox |= SDVO_BORDER_ENABLE |
- SDVO_VSYNC_ACTIVE_HIGH |
- SDVO_HSYNC_ACTIVE_HIGH;
+ sdvox |= SDVO_BORDER_ENABLE;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
} else {
- sdvox |= I915_READ(sdvo_priv->sdvo_reg);
- switch (sdvo_priv->sdvo_reg) {
+ sdvox |= I915_READ(intel_sdvo->sdvo_reg);
+ switch (intel_sdvo->sdvo_reg) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
break;
@@ -1255,7 +1197,6 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
- sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
if (IS_I965G(dev)) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
@@ -1264,28 +1205,28 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
- if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
+ if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL)
sdvox |= SDVO_STALL_SELECT;
- intel_sdvo_write_sdvox(intel_encoder, sdvox);
+ intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 temp;
if (mode != DRM_MODE_DPMS_ON) {
- intel_sdvo_set_active_outputs(intel_encoder, 0);
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
- intel_sdvo_set_encoder_power_state(intel_encoder, mode);
+ intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
if (mode == DRM_MODE_DPMS_OFF) {
- temp = I915_READ(sdvo_priv->sdvo_reg);
+ temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
- intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE);
+ intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
}
}
} else {
@@ -1293,28 +1234,25 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
int i;
u8 status;
- temp = I915_READ(sdvo_priv->sdvo_reg);
+ temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0)
- intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE);
+ intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev);
-
- status = intel_sdvo_get_trained_inputs(intel_encoder, &input1,
- &input2);
-
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
* A lot of SDVO devices fail to notify of sync, but it's
* a given it the status is a success, we succeeded.
*/
if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(sdvo_priv));
+ "sync\n", SDVO_NAME(intel_sdvo));
}
if (0)
- intel_sdvo_set_encoder_power_state(intel_encoder, mode);
- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
+ intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+ intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
return;
}
@@ -1323,42 +1261,31 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- if (sdvo_priv->pixel_clock_min > mode->clock)
+ if (intel_sdvo->pixel_clock_min > mode->clock)
return MODE_CLOCK_LOW;
- if (sdvo_priv->pixel_clock_max < mode->clock)
+ if (intel_sdvo->pixel_clock_max < mode->clock)
return MODE_CLOCK_HIGH;
- if (sdvo_priv->is_lvds == true) {
- if (sdvo_priv->sdvo_lvds_fixed_mode == NULL)
- return MODE_PANEL;
-
- if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay)
+ if (intel_sdvo->is_lvds) {
+ if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay)
+ if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
return MODE_PANEL;
}
return MODE_OK;
}
-static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps)
+static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
+ return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps));
}
/* No use! */
@@ -1366,12 +1293,12 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str
struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
{
struct drm_connector *connector = NULL;
- struct intel_encoder *iout = NULL;
- struct intel_sdvo_priv *sdvo;
+ struct intel_sdvo *iout = NULL;
+ struct intel_sdvo *sdvo;
/* find the sdvo connector */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- iout = to_intel_encoder(connector);
+ iout = to_intel_sdvo(connector);
if (iout->type != INTEL_OUTPUT_SDVO)
continue;
@@ -1393,75 +1320,69 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector)
{
u8 response[2];
u8 status;
- struct intel_encoder *intel_encoder;
+ struct intel_sdvo *intel_sdvo;
DRM_DEBUG_KMS("\n");
if (!connector)
return 0;
- intel_encoder = to_intel_encoder(connector);
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &response, 2);
+ intel_sdvo = to_intel_sdvo(connector);
- if (response[0] !=0)
- return 1;
-
- return 0;
+ return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &response, 2) && response[0];
}
void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
{
u8 response[2];
u8 status;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_encoder, &response, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_sdvo, &response, 2);
if (on) {
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &response, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = intel_sdvo_read_response(intel_sdvo, &response, 2);
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
} else {
response[0] = 0;
response[1] = 0;
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
}
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_encoder, &response, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_sdvo, &response, 2);
}
#endif
static bool
-intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
+intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int caps = 0;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
caps++;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
caps++;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
caps++;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
caps++;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
caps++;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
caps++;
- if (sdvo_priv->caps.output_flags &
+ if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
caps++;
@@ -1473,11 +1394,11 @@ intel_find_analog_connector(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct intel_encoder *intel_encoder;
+ struct intel_sdvo *intel_sdvo;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- intel_encoder = enc_to_intel_encoder(encoder);
- if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
+ intel_sdvo = enc_to_intel_sdvo(encoder);
+ if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (encoder == intel_attached_encoder(connector))
return connector;
@@ -1491,12 +1412,12 @@ static int
intel_analog_is_connected(struct drm_device *dev)
{
struct drm_connector *analog_connector;
- analog_connector = intel_find_analog_connector(dev);
+ analog_connector = intel_find_analog_connector(dev);
if (!analog_connector)
return false;
- if (analog_connector->funcs->detect(analog_connector) ==
+ if (analog_connector->funcs->detect(analog_connector, false) ==
connector_status_disconnected)
return false;
@@ -1507,54 +1428,52 @@ enum drm_connector_status
intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status status = connector_status_connected;
struct edid *edid = NULL;
- edid = drm_get_edid(connector, intel_encoder->ddc_bus);
+ edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
/* This is only applied to SDVO cards with multiple outputs */
- if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
uint8_t saved_ddc, temp_ddc;
- saved_ddc = sdvo_priv->ddc_bus;
- temp_ddc = sdvo_priv->ddc_bus >> 1;
+ saved_ddc = intel_sdvo->ddc_bus;
+ temp_ddc = intel_sdvo->ddc_bus >> 1;
/*
* Don't use the 1 as the argument of DDC bus switch to get
* the EDID. It is used for SDVO SPD ROM.
*/
while(temp_ddc > 1) {
- sdvo_priv->ddc_bus = temp_ddc;
- edid = drm_get_edid(connector, intel_encoder->ddc_bus);
+ intel_sdvo->ddc_bus = temp_ddc;
+ edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
if (edid) {
/*
* When we can get the EDID, maybe it is the
* correct DDC bus. Update it.
*/
- sdvo_priv->ddc_bus = temp_ddc;
+ intel_sdvo->ddc_bus = temp_ddc;
break;
}
temp_ddc >>= 1;
}
if (edid == NULL)
- sdvo_priv->ddc_bus = saved_ddc;
+ intel_sdvo->ddc_bus = saved_ddc;
}
/* when there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector
*/
- if (edid == NULL && sdvo_priv->analog_ddc_bus &&
+ if (edid == NULL && intel_sdvo->analog_ddc_bus &&
!intel_analog_is_connected(connector->dev))
- edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus);
+ edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
if (edid != NULL) {
bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK);
+ bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
/* DDC bus is shared, match EDID to connector type */
if (is_digital && need_digital)
- sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
+ intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
else if (is_digital != need_digital)
status = connector_status_disconnected;
@@ -1567,36 +1486,33 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
return status;
}
-static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_sdvo_detect(struct drm_connector *connector, bool force)
{
uint16_t response;
- u8 status;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
- if (sdvo_priv->is_tv) {
+ if (!intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+ return connector_status_unknown;
+ if (intel_sdvo->is_tv) {
/* add 30ms delay when the output type is SDVO-TV */
mdelay(30);
}
- status = intel_sdvo_read_response(intel_encoder, &response, 2);
+ if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
+ return connector_status_unknown;
DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return connector_status_unknown;
-
if (response == 0)
return connector_status_disconnected;
- sdvo_priv->attached_output = response;
+ intel_sdvo->attached_output = response;
- if ((sdvo_connector->output_flag & response) == 0)
+ if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
else if (response & SDVO_TMDS_MASK)
ret = intel_sdvo_hdmi_sink_detect(connector);
@@ -1605,16 +1521,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
/* May update encoder flag for like clock for SDVO TV, etc.*/
if (ret == connector_status_connected) {
- sdvo_priv->is_tv = false;
- sdvo_priv->is_lvds = false;
- intel_encoder->needs_tv_clock = false;
+ intel_sdvo->is_tv = false;
+ intel_sdvo->is_lvds = false;
+ intel_sdvo->base.needs_tv_clock = false;
if (response & SDVO_TV_MASK) {
- sdvo_priv->is_tv = true;
- intel_encoder->needs_tv_clock = true;
+ intel_sdvo->is_tv = true;
+ intel_sdvo->base.needs_tv_clock = true;
}
if (response & SDVO_LVDS_MASK)
- sdvo_priv->is_lvds = true;
+ intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
}
return ret;
@@ -1623,12 +1539,11 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
int num_modes;
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1637,11 +1552,11 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* which case we'll look there for the digital DDC data.
*/
if (num_modes == 0 &&
- sdvo_priv->analog_ddc_bus &&
+ intel_sdvo->analog_ddc_bus &&
!intel_analog_is_connected(connector->dev)) {
/* Switch to the analog ddc bus and try that
*/
- (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
+ (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus);
}
}
@@ -1713,52 +1628,43 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
- uint8_t status;
-
/* Read the list of supported input resolutions for the selected TV
* format.
*/
- for (i = 0; i < TV_FORMAT_NUM; i++)
- if (tv_format_names[i] == sdvo_priv->tv_format_name)
- break;
-
- format_map = (1 << i);
+ format_map = 1 << intel_sdvo->tv_format_index;
memcpy(&tv_res, &format_map,
- sizeof(struct intel_sdvo_sdtv_resolution_request) >
- sizeof(format_map) ? sizeof(format_map) :
- sizeof(struct intel_sdvo_sdtv_resolution_request));
+ min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
- intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
+ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
+ return;
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
- &tv_res, sizeof(tv_res));
- status = intel_sdvo_read_response(intel_encoder, &reply, 3);
- if (status != SDVO_CMD_STATUS_SUCCESS)
+ BUILD_BUG_ON(sizeof(tv_res) != 3);
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res)))
+ return;
+ if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
return;
for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
if (reply & (1 << i)) {
struct drm_display_mode *nmode;
nmode = drm_mode_duplicate(connector->dev,
- &sdvo_tv_modes[i]);
+ &sdvo_tv_modes[i]);
if (nmode)
drm_mode_probed_add(connector, nmode);
}
-
}
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_display_mode *newmode;
/*
@@ -1766,7 +1672,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -1785,8 +1691,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
end:
list_for_each_entry(newmode, &connector->probed_modes, head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
- sdvo_priv->sdvo_lvds_fixed_mode =
+ intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
+ intel_sdvo->is_lvds = true;
break;
}
}
@@ -1795,66 +1702,67 @@ end:
static int intel_sdvo_get_modes(struct drm_connector *connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- if (IS_TV(sdvo_connector))
+ if (IS_TV(intel_sdvo_connector))
intel_sdvo_get_tv_modes(connector);
- else if (IS_LVDS(sdvo_connector))
+ else if (IS_LVDS(intel_sdvo_connector))
intel_sdvo_get_lvds_modes(connector);
else
intel_sdvo_get_ddc_modes(connector);
- if (list_empty(&connector->probed_modes))
- return 0;
- return 1;
+ return !list_empty(&connector->probed_modes);
}
-static
-void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+static void
+intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
struct drm_device *dev = connector->dev;
- if (IS_TV(sdvo_priv)) {
- if (sdvo_priv->left_property)
- drm_property_destroy(dev, sdvo_priv->left_property);
- if (sdvo_priv->right_property)
- drm_property_destroy(dev, sdvo_priv->right_property);
- if (sdvo_priv->top_property)
- drm_property_destroy(dev, sdvo_priv->top_property);
- if (sdvo_priv->bottom_property)
- drm_property_destroy(dev, sdvo_priv->bottom_property);
- if (sdvo_priv->hpos_property)
- drm_property_destroy(dev, sdvo_priv->hpos_property);
- if (sdvo_priv->vpos_property)
- drm_property_destroy(dev, sdvo_priv->vpos_property);
- if (sdvo_priv->saturation_property)
- drm_property_destroy(dev,
- sdvo_priv->saturation_property);
- if (sdvo_priv->contrast_property)
- drm_property_destroy(dev,
- sdvo_priv->contrast_property);
- if (sdvo_priv->hue_property)
- drm_property_destroy(dev, sdvo_priv->hue_property);
- }
- if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
- if (sdvo_priv->brightness_property)
- drm_property_destroy(dev,
- sdvo_priv->brightness_property);
- }
- return;
+ if (intel_sdvo_connector->left)
+ drm_property_destroy(dev, intel_sdvo_connector->left);
+ if (intel_sdvo_connector->right)
+ drm_property_destroy(dev, intel_sdvo_connector->right);
+ if (intel_sdvo_connector->top)
+ drm_property_destroy(dev, intel_sdvo_connector->top);
+ if (intel_sdvo_connector->bottom)
+ drm_property_destroy(dev, intel_sdvo_connector->bottom);
+ if (intel_sdvo_connector->hpos)
+ drm_property_destroy(dev, intel_sdvo_connector->hpos);
+ if (intel_sdvo_connector->vpos)
+ drm_property_destroy(dev, intel_sdvo_connector->vpos);
+ if (intel_sdvo_connector->saturation)
+ drm_property_destroy(dev, intel_sdvo_connector->saturation);
+ if (intel_sdvo_connector->contrast)
+ drm_property_destroy(dev, intel_sdvo_connector->contrast);
+ if (intel_sdvo_connector->hue)
+ drm_property_destroy(dev, intel_sdvo_connector->hue);
+ if (intel_sdvo_connector->sharpness)
+ drm_property_destroy(dev, intel_sdvo_connector->sharpness);
+ if (intel_sdvo_connector->flicker_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
+ if (intel_sdvo_connector->flicker_filter_2d)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
+ if (intel_sdvo_connector->flicker_filter_adaptive)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
+ if (intel_sdvo_connector->tv_luma_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
+ if (intel_sdvo_connector->tv_chroma_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
+ if (intel_sdvo_connector->dot_crawl)
+ drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
+ if (intel_sdvo_connector->brightness)
+ drm_property_destroy(dev, intel_sdvo_connector->brightness);
}
static void intel_sdvo_destroy(struct drm_connector *connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- if (sdvo_connector->tv_format_property)
+ if (intel_sdvo_connector->tv_format)
drm_property_destroy(connector->dev,
- sdvo_connector->tv_format_property);
+ intel_sdvo_connector->tv_format);
intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
@@ -1868,132 +1776,118 @@ intel_sdvo_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- struct drm_crtc *crtc = encoder->crtc;
- int ret = 0;
- bool changed = false;
- uint8_t cmd, status;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
uint16_t temp_value;
+ uint8_t cmd;
+ int ret;
ret = drm_connector_property_set_value(connector, property, val);
- if (ret < 0)
- goto out;
+ if (ret)
+ return ret;
+
+#define CHECK_PROPERTY(name, NAME) \
+ if (intel_sdvo_connector->name == property) { \
+ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
+ if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+ cmd = SDVO_CMD_SET_##NAME; \
+ intel_sdvo_connector->cur_##name = temp_value; \
+ goto set_value; \
+ }
- if (property == sdvo_connector->tv_format_property) {
- if (val >= TV_FORMAT_NUM) {
- ret = -EINVAL;
- goto out;
- }
- if (sdvo_priv->tv_format_name ==
- sdvo_connector->tv_format_supported[val])
- goto out;
+ if (property == intel_sdvo_connector->tv_format) {
+ if (val >= TV_FORMAT_NUM)
+ return -EINVAL;
- sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
- changed = true;
- }
+ if (intel_sdvo->tv_format_index ==
+ intel_sdvo_connector->tv_format_supported[val])
+ return 0;
- if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
- cmd = 0;
+ intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
+ goto done;
+ } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
temp_value = val;
- if (sdvo_connector->left_property == property) {
+ if (intel_sdvo_connector->left == property) {
drm_connector_property_set_value(connector,
- sdvo_connector->right_property, val);
- if (sdvo_connector->left_margin == temp_value)
- goto out;
-
- sdvo_connector->left_margin = temp_value;
- sdvo_connector->right_margin = temp_value;
- temp_value = sdvo_connector->max_hscan -
- sdvo_connector->left_margin;
+ intel_sdvo_connector->right, val);
+ if (intel_sdvo_connector->left_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->left_margin = temp_value;
+ intel_sdvo_connector->right_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_hscan -
+ intel_sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_connector->right_property == property) {
+ goto set_value;
+ } else if (intel_sdvo_connector->right == property) {
drm_connector_property_set_value(connector,
- sdvo_connector->left_property, val);
- if (sdvo_connector->right_margin == temp_value)
- goto out;
-
- sdvo_connector->left_margin = temp_value;
- sdvo_connector->right_margin = temp_value;
- temp_value = sdvo_connector->max_hscan -
- sdvo_connector->left_margin;
+ intel_sdvo_connector->left, val);
+ if (intel_sdvo_connector->right_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->left_margin = temp_value;
+ intel_sdvo_connector->right_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_hscan -
+ intel_sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_connector->top_property == property) {
+ goto set_value;
+ } else if (intel_sdvo_connector->top == property) {
drm_connector_property_set_value(connector,
- sdvo_connector->bottom_property, val);
- if (sdvo_connector->top_margin == temp_value)
- goto out;
-
- sdvo_connector->top_margin = temp_value;
- sdvo_connector->bottom_margin = temp_value;
- temp_value = sdvo_connector->max_vscan -
- sdvo_connector->top_margin;
+ intel_sdvo_connector->bottom, val);
+ if (intel_sdvo_connector->top_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->top_margin = temp_value;
+ intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_vscan -
+ intel_sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_connector->bottom_property == property) {
+ goto set_value;
+ } else if (intel_sdvo_connector->bottom == property) {
drm_connector_property_set_value(connector,
- sdvo_connector->top_property, val);
- if (sdvo_connector->bottom_margin == temp_value)
- goto out;
- sdvo_connector->top_margin = temp_value;
- sdvo_connector->bottom_margin = temp_value;
- temp_value = sdvo_connector->max_vscan -
- sdvo_connector->top_margin;
+ intel_sdvo_connector->top, val);
+ if (intel_sdvo_connector->bottom_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->top_margin = temp_value;
+ intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_vscan -
+ intel_sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_connector->hpos_property == property) {
- if (sdvo_connector->cur_hpos == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_POSITION_H;
- sdvo_connector->cur_hpos = temp_value;
- } else if (sdvo_connector->vpos_property == property) {
- if (sdvo_connector->cur_vpos == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_POSITION_V;
- sdvo_connector->cur_vpos = temp_value;
- } else if (sdvo_connector->saturation_property == property) {
- if (sdvo_connector->cur_saturation == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_SATURATION;
- sdvo_connector->cur_saturation = temp_value;
- } else if (sdvo_connector->contrast_property == property) {
- if (sdvo_connector->cur_contrast == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_CONTRAST;
- sdvo_connector->cur_contrast = temp_value;
- } else if (sdvo_connector->hue_property == property) {
- if (sdvo_connector->cur_hue == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_HUE;
- sdvo_connector->cur_hue = temp_value;
- } else if (sdvo_connector->brightness_property == property) {
- if (sdvo_connector->cur_brightness == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_BRIGHTNESS;
- sdvo_connector->cur_brightness = temp_value;
- }
- if (cmd) {
- intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
- status = intel_sdvo_read_response(intel_encoder,
- NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO command \n");
- return -EINVAL;
- }
- changed = true;
+ goto set_value;
}
+ CHECK_PROPERTY(hpos, HPOS)
+ CHECK_PROPERTY(vpos, VPOS)
+ CHECK_PROPERTY(saturation, SATURATION)
+ CHECK_PROPERTY(contrast, CONTRAST)
+ CHECK_PROPERTY(hue, HUE)
+ CHECK_PROPERTY(brightness, BRIGHTNESS)
+ CHECK_PROPERTY(sharpness, SHARPNESS)
+ CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+ CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+ CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+ CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+ CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+ CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
}
- if (changed && crtc)
+
+ return -EINVAL; /* unknown property */
+
+set_value:
+ if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2))
+ return -EIO;
+
+
+done:
+ if (encoder->crtc) {
+ struct drm_crtc *crtc = encoder->crtc;
+
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
-out:
- return ret;
+ crtc->y, crtc->fb);
+ }
+
+ return 0;
+#undef CHECK_PROPERTY
}
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
@@ -2020,28 +1914,57 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
- if (sdvo_priv->analog_ddc_bus)
- intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+ if (intel_sdvo->analog_ddc_bus)
+ intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
- if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
+ if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,
- sdvo_priv->sdvo_lvds_fixed_mode);
+ intel_sdvo->sdvo_lvds_fixed_mode);
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
+ intel_encoder_destroy(encoder);
}
static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
.destroy = intel_sdvo_enc_destroy,
};
+static void
+intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
+{
+ uint16_t mask = 0;
+ unsigned int num_bits;
+
+ /* Make a mask of outputs less than or equal to our own priority in the
+ * list.
+ */
+ switch (sdvo->controlled_output) {
+ case SDVO_OUTPUT_LVDS1:
+ mask |= SDVO_OUTPUT_LVDS1;
+ case SDVO_OUTPUT_LVDS0:
+ mask |= SDVO_OUTPUT_LVDS0;
+ case SDVO_OUTPUT_TMDS1:
+ mask |= SDVO_OUTPUT_TMDS1;
+ case SDVO_OUTPUT_TMDS0:
+ mask |= SDVO_OUTPUT_TMDS0;
+ case SDVO_OUTPUT_RGB1:
+ mask |= SDVO_OUTPUT_RGB1;
+ case SDVO_OUTPUT_RGB0:
+ mask |= SDVO_OUTPUT_RGB0;
+ break;
+ }
+
+ /* Count bits to find what number we are in the priority list. */
+ mask &= sdvo->caps.output_flags;
+ num_bits = hweight16(mask);
+ /* If more than 3 outputs, default to DDC bus 3 for now. */
+ if (num_bits > 3)
+ num_bits = 3;
+
+ /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+ sdvo->ddc_bus = 1 << num_bits;
+}
/**
* Choose the appropriate DDC bus for control bus switch command for this
@@ -2052,7 +1975,7 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
*/
static void
intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
- struct intel_sdvo_priv *sdvo, u32 reg)
+ struct intel_sdvo *sdvo, u32 reg)
{
struct sdvo_device_mapping *mapping;
@@ -2061,61 +1984,53 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
else
mapping = &(dev_priv->sdvo_mappings[1]);
- sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ if (mapping->initialized)
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ else
+ intel_sdvo_guess_ddc_bus(sdvo);
}
static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
+intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
{
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
- uint8_t status;
-
- if (device == 0)
- intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
- else
- intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
-
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
- status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
- return true;
+ return intel_sdvo_set_target_output(intel_sdvo,
+ device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
+ &intel_sdvo->is_hdmi, 1);
}
-static struct intel_encoder *
-intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
+static struct intel_sdvo *
+intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan)
{
struct drm_device *dev = chan->drm_dev;
struct drm_encoder *encoder;
- struct intel_encoder *intel_encoder = NULL;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- intel_encoder = enc_to_intel_encoder(encoder);
- if (intel_encoder->ddc_bus == &chan->adapter)
- break;
+ struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ if (intel_sdvo->base.ddc_bus == &chan->adapter)
+ return intel_sdvo;
}
- return intel_encoder;
+
+ return NULL;
}
static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[], int num)
{
- struct intel_encoder *intel_encoder;
- struct intel_sdvo_priv *sdvo_priv;
+ struct intel_sdvo *intel_sdvo;
struct i2c_algo_bit_data *algo_data;
const struct i2c_algorithm *algo;
algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
- intel_encoder =
- intel_sdvo_chan_to_intel_encoder(
- (struct intel_i2c_chan *)(algo_data->data));
- if (intel_encoder == NULL)
+ intel_sdvo =
+ intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
+ (algo_data->data));
+ if (intel_sdvo == NULL)
return -EINVAL;
- sdvo_priv = intel_encoder->dev_priv;
- algo = intel_encoder->i2c_bus->algo;
+ algo = intel_sdvo->base.i2c_bus->algo;
- intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus);
+ intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
return algo->master_xfer(i2c_adap, msgs, num);
}
@@ -2160,27 +2075,9 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
return 0x72;
}
-static bool
-intel_sdvo_connector_alloc (struct intel_connector **ret)
-{
- struct intel_connector *intel_connector;
- struct intel_sdvo_connector *sdvo_connector;
-
- *ret = kzalloc(sizeof(*intel_connector) +
- sizeof(*sdvo_connector), GFP_KERNEL);
- if (!*ret)
- return false;
-
- intel_connector = *ret;
- sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
- intel_connector->dev_priv = sdvo_connector;
-
- return true;
-}
-
static void
-intel_sdvo_connector_create (struct drm_encoder *encoder,
- struct drm_connector *connector)
+intel_sdvo_connector_init(struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
connector->connector_type);
@@ -2196,582 +2093,467 @@ intel_sdvo_connector_create (struct drm_encoder *encoder,
}
static bool
-intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
+intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = &intel_sdvo->base.enc;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *sdvo_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
- if (!intel_sdvo_connector_alloc(&intel_connector))
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
return false;
- sdvo_connector = intel_connector->dev_priv;
-
if (device == 0) {
- sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
- sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
} else if (device == 1) {
- sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
- sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
}
+ intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
- if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
- && intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
- && sdvo_priv->is_hdmi) {
+ if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode)
+ && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
+ && intel_sdvo->is_hdmi) {
/* enable hdmi encoding mode if supported */
- intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
- intel_sdvo_set_colorimetry(intel_encoder,
+ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
}
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
+ intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
- intel_sdvo_connector_create(encoder, connector);
+ intel_sdvo_connector_init(encoder, connector);
return true;
}
static bool
-intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
+intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = &intel_sdvo->base.enc;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *sdvo_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
- if (!intel_sdvo_connector_alloc(&intel_connector))
- return false;
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
+ intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- sdvo_connector = intel_connector->dev_priv;
- sdvo_priv->controlled_output |= type;
- sdvo_connector->output_flag = type;
+ intel_sdvo->controlled_output |= type;
+ intel_sdvo_connector->output_flag = type;
- sdvo_priv->is_tv = true;
- intel_encoder->needs_tv_clock = true;
- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ intel_sdvo->is_tv = true;
+ intel_sdvo->base.needs_tv_clock = true;
+ intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- intel_sdvo_connector_create(encoder, connector);
+ intel_sdvo_connector_init(encoder, connector);
- intel_sdvo_tv_create_property(connector, type);
+ if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+ goto err;
- intel_sdvo_create_enhance_property(connector);
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
return true;
+
+err:
+ intel_sdvo_destroy(connector);
+ return false;
}
static bool
-intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
+intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = &intel_sdvo->base.enc;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *sdvo_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
- if (!intel_sdvo_connector_alloc(&intel_connector))
- return false;
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
+ intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- sdvo_connector = intel_connector->dev_priv;
if (device == 0) {
- sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
- sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
} else if (device == 1) {
- sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
- sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
+ intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
- intel_sdvo_connector_create(encoder, connector);
+ intel_sdvo_connector_init(encoder, connector);
return true;
}
static bool
-intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
+intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = &intel_sdvo->base.enc;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *sdvo_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
- if (!intel_sdvo_connector_alloc(&intel_connector))
- return false;
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
- connector = &intel_connector->base;
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_connector = intel_connector->dev_priv;
-
- sdvo_priv->is_lvds = true;
if (device == 0) {
- sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
- sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
} else if (device == 1) {
- sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
- sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
+ intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT));
- intel_sdvo_connector_create(encoder, connector);
- intel_sdvo_create_enhance_property(connector);
- return true;
+ intel_sdvo_connector_init(encoder, connector);
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ intel_sdvo_destroy(connector);
+ return false;
}
static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
{
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-
- sdvo_priv->is_tv = false;
- intel_encoder->needs_tv_clock = false;
- sdvo_priv->is_lvds = false;
+ intel_sdvo->is_tv = false;
+ intel_sdvo->base.needs_tv_clock = false;
+ intel_sdvo->is_lvds = false;
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
if (flags & SDVO_OUTPUT_TMDS0)
- if (!intel_sdvo_dvi_init(intel_encoder, 0))
+ if (!intel_sdvo_dvi_init(intel_sdvo, 0))
return false;
if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
- if (!intel_sdvo_dvi_init(intel_encoder, 1))
+ if (!intel_sdvo_dvi_init(intel_sdvo, 1))
return false;
/* TV has no XXX1 function block */
if (flags & SDVO_OUTPUT_SVID0)
- if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
return false;
if (flags & SDVO_OUTPUT_CVBS0)
- if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
return false;
if (flags & SDVO_OUTPUT_RGB0)
- if (!intel_sdvo_analog_init(intel_encoder, 0))
+ if (!intel_sdvo_analog_init(intel_sdvo, 0))
return false;
if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
- if (!intel_sdvo_analog_init(intel_encoder, 1))
+ if (!intel_sdvo_analog_init(intel_sdvo, 1))
return false;
if (flags & SDVO_OUTPUT_LVDS0)
- if (!intel_sdvo_lvds_init(intel_encoder, 0))
+ if (!intel_sdvo_lvds_init(intel_sdvo, 0))
return false;
if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
- if (!intel_sdvo_lvds_init(intel_encoder, 1))
+ if (!intel_sdvo_lvds_init(intel_sdvo, 1))
return false;
if ((flags & SDVO_OUTPUT_MASK) == 0) {
unsigned char bytes[2];
- sdvo_priv->controlled_output = 0;
- memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
+ intel_sdvo->controlled_output = 0;
+ memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
- SDVO_NAME(sdvo_priv),
+ SDVO_NAME(intel_sdvo),
bytes[0], bytes[1]);
return false;
}
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
return true;
}
-static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
+static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ struct drm_device *dev = intel_sdvo->base.enc.dev;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
- uint8_t status;
- intel_sdvo_set_target_output(intel_encoder, type);
+ if (!intel_sdvo_set_target_output(intel_sdvo, type))
+ return false;
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &format, sizeof(format));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return;
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+ &format, sizeof(format)))
+ return false;
- memcpy(&format_map, &format, sizeof(format) > sizeof(format_map) ?
- sizeof(format_map) : sizeof(format));
+ memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
if (format_map == 0)
- return;
+ return false;
- sdvo_connector->format_supported_num = 0;
+ intel_sdvo_connector->format_supported_num = 0;
for (i = 0 ; i < TV_FORMAT_NUM; i++)
- if (format_map & (1 << i)) {
- sdvo_connector->tv_format_supported
- [sdvo_connector->format_supported_num++] =
- tv_format_names[i];
- }
+ if (format_map & (1 << i))
+ intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i;
- sdvo_connector->tv_format_property =
- drm_property_create(
- connector->dev, DRM_MODE_PROP_ENUM,
- "mode", sdvo_connector->format_supported_num);
+ intel_sdvo_connector->tv_format =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", intel_sdvo_connector->format_supported_num);
+ if (!intel_sdvo_connector->tv_format)
+ return false;
- for (i = 0; i < sdvo_connector->format_supported_num; i++)
+ for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
drm_property_add_enum(
- sdvo_connector->tv_format_property, i,
- i, sdvo_connector->tv_format_supported[i]);
+ intel_sdvo_connector->tv_format, i,
+ i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
- sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
- drm_connector_attach_property(
- connector, sdvo_connector->tv_format_property, 0);
+ intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
+ drm_connector_attach_property(&intel_sdvo_connector->base.base,
+ intel_sdvo_connector->tv_format, 0);
+ return true;
}
-static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
+#define ENHANCEMENT(name, NAME) do { \
+ if (enhancements.name) { \
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+ !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+ return false; \
+ intel_sdvo_connector->max_##name = data_value[0]; \
+ intel_sdvo_connector->cur_##name = response; \
+ intel_sdvo_connector->name = \
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
+ if (!intel_sdvo_connector->name) return false; \
+ intel_sdvo_connector->name->values[0] = 0; \
+ intel_sdvo_connector->name->values[1] = data_value[0]; \
+ drm_connector_attach_property(connector, \
+ intel_sdvo_connector->name, \
+ intel_sdvo_connector->cur_##name); \
+ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+ data_value[0], data_value[1], response); \
+ } \
+} while(0)
+
+static bool
+intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
- struct intel_sdvo_enhancements_reply sdvo_data;
- struct drm_device *dev = connector->dev;
- uint8_t status;
+ struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
- NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &sdvo_data,
- sizeof(sdvo_data));
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS(" incorrect response is returned\n");
- return;
+ /* when horizontal overscan is supported, Add the left/right property */
+ if (enhancements.overscan_h) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_H,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_H,
+ &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_hscan = data_value[0];
+ intel_sdvo_connector->left_margin = data_value[0] - response;
+ intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
+ intel_sdvo_connector->left =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "left_margin", 2);
+ if (!intel_sdvo_connector->left)
+ return false;
+
+ intel_sdvo_connector->left->values[0] = 0;
+ intel_sdvo_connector->left->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->left,
+ intel_sdvo_connector->left_margin);
+
+ intel_sdvo_connector->right =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "right_margin", 2);
+ if (!intel_sdvo_connector->right)
+ return false;
+
+ intel_sdvo_connector->right->values[0] = 0;
+ intel_sdvo_connector->right->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->right,
+ intel_sdvo_connector->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
}
- response = *((uint16_t *)&sdvo_data);
- if (!response) {
- DRM_DEBUG_KMS("No enhancement is supported\n");
- return;
+
+ if (enhancements.overscan_v) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_V,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_V,
+ &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_vscan = data_value[0];
+ intel_sdvo_connector->top_margin = data_value[0] - response;
+ intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
+ intel_sdvo_connector->top =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "top_margin", 2);
+ if (!intel_sdvo_connector->top)
+ return false;
+
+ intel_sdvo_connector->top->values[0] = 0;
+ intel_sdvo_connector->top->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->top,
+ intel_sdvo_connector->top_margin);
+
+ intel_sdvo_connector->bottom =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "bottom_margin", 2);
+ if (!intel_sdvo_connector->bottom)
+ return false;
+
+ intel_sdvo_connector->bottom->values[0] = 0;
+ intel_sdvo_connector->bottom->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->bottom,
+ intel_sdvo_connector->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
}
- if (IS_TV(sdvo_priv)) {
- /* when horizontal overscan is supported, Add the left/right
- * property
- */
- if (sdvo_data.overscan_h) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO max "
- "h_overscan\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
- return;
- }
- sdvo_priv->max_hscan = data_value[0];
- sdvo_priv->left_margin = data_value[0] - response;
- sdvo_priv->right_margin = sdvo_priv->left_margin;
- sdvo_priv->left_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "left_margin", 2);
- sdvo_priv->left_property->values[0] = 0;
- sdvo_priv->left_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->left_property,
- sdvo_priv->left_margin);
- sdvo_priv->right_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "right_margin", 2);
- sdvo_priv->right_property->values[0] = 0;
- sdvo_priv->right_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->right_property,
- sdvo_priv->right_margin);
- DRM_DEBUG_KMS("h_overscan: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.overscan_v) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO max "
- "v_overscan\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
- return;
- }
- sdvo_priv->max_vscan = data_value[0];
- sdvo_priv->top_margin = data_value[0] - response;
- sdvo_priv->bottom_margin = sdvo_priv->top_margin;
- sdvo_priv->top_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "top_margin", 2);
- sdvo_priv->top_property->values[0] = 0;
- sdvo_priv->top_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->top_property,
- sdvo_priv->top_margin);
- sdvo_priv->bottom_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "bottom_margin", 2);
- sdvo_priv->bottom_property->values[0] = 0;
- sdvo_priv->bottom_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->bottom_property,
- sdvo_priv->bottom_margin);
- DRM_DEBUG_KMS("v_overscan: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.position_h) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_POSITION_H, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
- return;
- }
- sdvo_priv->max_hpos = data_value[0];
- sdvo_priv->cur_hpos = response;
- sdvo_priv->hpos_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "hpos", 2);
- sdvo_priv->hpos_property->values[0] = 0;
- sdvo_priv->hpos_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->hpos_property,
- sdvo_priv->cur_hpos);
- DRM_DEBUG_KMS("h_position: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.position_v) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_POSITION_V, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
- return;
- }
- sdvo_priv->max_vpos = data_value[0];
- sdvo_priv->cur_vpos = response;
- sdvo_priv->vpos_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "vpos", 2);
- sdvo_priv->vpos_property->values[0] = 0;
- sdvo_priv->vpos_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->vpos_property,
- sdvo_priv->cur_vpos);
- DRM_DEBUG_KMS("v_position: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.saturation) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_SATURATION, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
- return;
- }
- sdvo_priv->max_saturation = data_value[0];
- sdvo_priv->cur_saturation = response;
- sdvo_priv->saturation_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "saturation", 2);
- sdvo_priv->saturation_property->values[0] = 0;
- sdvo_priv->saturation_property->values[1] =
- data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->saturation_property,
- sdvo_priv->cur_saturation);
- DRM_DEBUG_KMS("saturation: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.contrast) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_CONTRAST, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
- return;
- }
- sdvo_priv->max_contrast = data_value[0];
- sdvo_priv->cur_contrast = response;
- sdvo_priv->contrast_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "contrast", 2);
- sdvo_priv->contrast_property->values[0] = 0;
- sdvo_priv->contrast_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->contrast_property,
- sdvo_priv->cur_contrast);
- DRM_DEBUG_KMS("contrast: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.hue) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_HUE, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_HUE, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
- return;
- }
- sdvo_priv->max_hue = data_value[0];
- sdvo_priv->cur_hue = response;
- sdvo_priv->hue_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "hue", 2);
- sdvo_priv->hue_property->values[0] = 0;
- sdvo_priv->hue_property->values[1] =
- data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->hue_property,
- sdvo_priv->cur_hue);
- DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
+
+ ENHANCEMENT(hpos, HPOS);
+ ENHANCEMENT(vpos, VPOS);
+ ENHANCEMENT(saturation, SATURATION);
+ ENHANCEMENT(contrast, CONTRAST);
+ ENHANCEMENT(hue, HUE);
+ ENHANCEMENT(sharpness, SHARPNESS);
+ ENHANCEMENT(brightness, BRIGHTNESS);
+ ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+ ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+ ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+ ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+ ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+ if (enhancements.dot_crawl) {
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_dot_crawl = 1;
+ intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+ intel_sdvo_connector->dot_crawl =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
+ if (!intel_sdvo_connector->dot_crawl)
+ return false;
+
+ intel_sdvo_connector->dot_crawl->values[0] = 0;
+ intel_sdvo_connector->dot_crawl->values[1] = 1;
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->dot_crawl,
+ intel_sdvo_connector->cur_dot_crawl);
+ DRM_DEBUG_KMS("dot crawl: current %d\n", response);
}
- if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
- if (sdvo_data.brightness) {
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
- return;
- }
- intel_sdvo_write_cmd(intel_encoder,
- SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
- return;
- }
- sdvo_priv->max_brightness = data_value[0];
- sdvo_priv->cur_brightness = response;
- sdvo_priv->brightness_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "brightness", 2);
- sdvo_priv->brightness_property->values[0] = 0;
- sdvo_priv->brightness_property->values[1] =
- data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->brightness_property,
- sdvo_priv->cur_brightness);
- DRM_DEBUG_KMS("brightness: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
+
+ return true;
+}
+
+static bool
+intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ ENHANCEMENT(brightness, BRIGHTNESS);
+
+ return true;
+}
+#undef ENHANCEMENT
+
+static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector)
+{
+ union {
+ struct intel_sdvo_enhancements_reply reply;
+ uint16_t response;
+ } enhancements;
+
+ enhancements.response = 0;
+ intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements));
+ if (enhancements.response == 0) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return true;
}
- return;
+
+ if (IS_TV(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else if(IS_LVDS(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else
+ return true;
+
}
bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
- struct intel_sdvo_priv *sdvo_priv;
+ struct intel_sdvo *intel_sdvo;
u8 ch[0x40];
int i;
u32 i2c_reg, ddc_reg, analog_ddc_reg;
- intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
- if (!intel_encoder) {
+ intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+ if (!intel_sdvo)
return false;
- }
- sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1);
- sdvo_priv->sdvo_reg = sdvo_reg;
+ intel_sdvo->sdvo_reg = sdvo_reg;
- intel_encoder->dev_priv = sdvo_priv;
+ intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
if (HAS_PCH_SPLIT(dev)) {
@@ -2793,14 +2575,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
if (!intel_encoder->i2c_bus)
goto err_inteloutput;
- sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
+ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
/* Save the bit-banging i2c functionality for use by the DDC wrapper */
intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
- if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
+ if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
@@ -2810,17 +2592,16 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
/* setup the DDC bus. */
if (IS_SDVOB(sdvo_reg)) {
intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+ intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOB/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+ intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOC/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
-
- if (intel_encoder->ddc_bus == NULL)
+ if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
goto err_i2c;
/* Wrap with our custom algo which switches to DDC mode */
@@ -2831,53 +2612,56 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
/* In default case sdvo lvds is false */
- intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
+ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+ goto err_enc;
- if (intel_sdvo_output_setup(intel_encoder,
- sdvo_priv->caps.output_flags) != true) {
+ if (intel_sdvo_output_setup(intel_sdvo,
+ intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
- goto err_i2c;
+ goto err_enc;
}
- intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg);
+ intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(intel_encoder, true, false);
-
- intel_sdvo_get_input_pixel_clock_range(intel_encoder,
- &sdvo_priv->pixel_clock_min,
- &sdvo_priv->pixel_clock_max);
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ goto err_enc;
+ if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+ &intel_sdvo->pixel_clock_min,
+ &intel_sdvo->pixel_clock_max))
+ goto err_enc;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"input 1: %c, input 2: %c, "
"output 1: %c, output 2: %c\n",
- SDVO_NAME(sdvo_priv),
- sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
- sdvo_priv->caps.device_rev_id,
- sdvo_priv->pixel_clock_min / 1000,
- sdvo_priv->pixel_clock_max / 1000,
- (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
- (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ SDVO_NAME(intel_sdvo),
+ intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
+ intel_sdvo->caps.device_rev_id,
+ intel_sdvo->pixel_clock_min / 1000,
+ intel_sdvo->pixel_clock_max / 1000,
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
/* check currently supported outputs */
- sdvo_priv->caps.output_flags &
+ intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
- sdvo_priv->caps.output_flags &
+ intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
-
return true;
+err_enc:
+ drm_encoder_cleanup(&intel_encoder->enc);
err_i2c:
- if (sdvo_priv->analog_ddc_bus != NULL)
- intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+ if (intel_sdvo->analog_ddc_bus != NULL)
+ intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
if (intel_encoder->ddc_bus != NULL)
intel_i2c_destroy(intel_encoder->ddc_bus);
if (intel_encoder->i2c_bus != NULL)
intel_i2c_destroy(intel_encoder->i2c_bus);
err_inteloutput:
- kfree(intel_encoder);
+ kfree(intel_sdvo);
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index ba5cdf8ae40..a386b022e53 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -312,7 +312,7 @@ struct intel_sdvo_set_target_input_args {
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
-/** 5 bytes of bit flags for TV formats shared by all TV format functions */
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
struct intel_sdvo_tv_format {
unsigned int ntsc_m:1;
unsigned int ntsc_j:1;
@@ -596,32 +596,32 @@ struct intel_sdvo_enhancements_reply {
unsigned int overscan_h:1;
unsigned int overscan_v:1;
- unsigned int position_h:1;
- unsigned int position_v:1;
+ unsigned int hpos:1;
+ unsigned int vpos:1;
unsigned int sharpness:1;
unsigned int dot_crawl:1;
unsigned int dither:1;
- unsigned int max_tv_chroma_filter:1;
- unsigned int max_tv_luma_filter:1;
+ unsigned int tv_chroma_filter:1;
+ unsigned int tv_luma_filter:1;
} __attribute__((packed));
/* Picture enhancement limits below are dependent on the current TV format,
* and thus need to be queried and set after it.
*/
-#define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d
-#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b
-#define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
#define SDVO_CMD_GET_MAX_SATURATION 0x55
#define SDVO_CMD_GET_MAX_HUE 0x58
#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
-#define SDVO_CMD_GET_MAX_POSITION_H 0x67
-#define SDVO_CMD_GET_MAX_POSITION_V 0x6a
-#define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d
-#define SDVO_CMD_GET_MAX_TV_CHROMA 0x74
-#define SDVO_CMD_GET_MAX_TV_LUMA 0x77
+#define SDVO_CMD_GET_MAX_HPOS 0x67
+#define SDVO_CMD_GET_MAX_VPOS 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
struct intel_sdvo_enhancement_limits_reply {
u16 max_value;
u16 default_value;
@@ -638,10 +638,10 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
-#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50
-#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51
-#define SDVO_CMD_GET_2D_FLICKER_FITER 0x53
-#define SDVO_CMD_SET_2D_FLICKER_FITER 0x54
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
#define SDVO_CMD_GET_SATURATION 0x56
#define SDVO_CMD_SET_SATURATION 0x57
#define SDVO_CMD_GET_HUE 0x59
@@ -654,16 +654,16 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_SET_OVERSCAN_H 0x63
#define SDVO_CMD_GET_OVERSCAN_V 0x65
#define SDVO_CMD_SET_OVERSCAN_V 0x66
-#define SDVO_CMD_GET_POSITION_H 0x68
-#define SDVO_CMD_SET_POSITION_H 0x69
-#define SDVO_CMD_GET_POSITION_V 0x6b
-#define SDVO_CMD_SET_POSITION_V 0x6c
+#define SDVO_CMD_GET_HPOS 0x68
+#define SDVO_CMD_SET_HPOS 0x69
+#define SDVO_CMD_GET_VPOS 0x6b
+#define SDVO_CMD_SET_VPOS 0x6c
#define SDVO_CMD_GET_SHARPNESS 0x6e
#define SDVO_CMD_SET_SHARPNESS 0x6f
-#define SDVO_CMD_GET_TV_CHROMA 0x75
-#define SDVO_CMD_SET_TV_CHROMA 0x76
-#define SDVO_CMD_GET_TV_LUMA 0x78
-#define SDVO_CMD_SET_TV_LUMA 0x79
+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
struct intel_sdvo_enhancements_arg {
u16 value;
}__attribute__((packed));
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d2d4e4045ca..4a117e318a7 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -44,7 +44,9 @@ enum tv_margin {
};
/** Private structure for the integrated TV support */
-struct intel_tv_priv {
+struct intel_tv {
+ struct intel_encoder base;
+
int type;
char *tv_format;
int margin[4];
@@ -476,7 +478,7 @@ static const struct tv_mode tv_modes[] = {
.vi_end_f1 = 20, .vi_end_f2 = 21,
.nbr_end = 240,
- .burst_ena = 8,
+ .burst_ena = true,
.hburst_start = 72, .hburst_len = 34,
.vburst_start_f1 = 9, .vburst_end_f1 = 240,
.vburst_start_f2 = 10, .vburst_end_f2 = 240,
@@ -896,7 +898,10 @@ static const struct tv_mode tv_modes[] = {
},
};
-#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0])
+static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
+{
+ return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
+}
static void
intel_tv_dpms(struct drm_encoder *encoder, int mode)
@@ -931,19 +936,17 @@ intel_tv_mode_lookup (char *tv_format)
}
static const struct tv_mode *
-intel_tv_mode_find (struct intel_encoder *intel_encoder)
+intel_tv_mode_find (struct intel_tv *intel_tv)
{
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-
- return intel_tv_mode_lookup(tv_priv->tv_format);
+ return intel_tv_mode_lookup(intel_tv->tv_format);
}
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -959,8 +962,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct drm_device *dev = encoder->dev;
struct drm_mode_config *drm_config = &dev->mode_config;
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
struct drm_encoder *other_encoder;
if (!tv_mode)
@@ -985,9 +988,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
u32 tv_ctl;
u32 hctl1, hctl2, hctl3;
u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
@@ -1003,7 +1005,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
tv_ctl = I915_READ(TV_CTL);
tv_ctl &= TV_CTL_SAVE;
- switch (tv_priv->type) {
+ switch (intel_tv->type) {
default:
case DRM_MODE_CONNECTOR_Unknown:
case DRM_MODE_CONNECTOR_Composite:
@@ -1156,11 +1158,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* Wait for vblank for the disable to take effect */
if (!IS_I9XX(dev))
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
/* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
/* Filter ctl must be set before TV_WIN_SIZE */
I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
@@ -1170,12 +1172,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else
ysize = 2*tv_mode->nbr_end + 1;
- xpos += tv_priv->margin[TV_MARGIN_LEFT];
- ypos += tv_priv->margin[TV_MARGIN_TOP];
- xsize -= (tv_priv->margin[TV_MARGIN_LEFT] +
- tv_priv->margin[TV_MARGIN_RIGHT]);
- ysize -= (tv_priv->margin[TV_MARGIN_TOP] +
- tv_priv->margin[TV_MARGIN_BOTTOM]);
+ xpos += intel_tv->margin[TV_MARGIN_LEFT];
+ ypos += intel_tv->margin[TV_MARGIN_TOP];
+ xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
+ intel_tv->margin[TV_MARGIN_RIGHT]);
+ ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
@@ -1224,9 +1226,9 @@ static const struct drm_display_mode reported_modes[] = {
* \return false if TV is disconnected.
*/
static int
-intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
+intel_tv_detect_type (struct intel_tv *intel_tv)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = &intel_tv->base.enc;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -1265,11 +1267,15 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
DAC_C_0_7_V);
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
- intel_wait_for_vblank(dev);
+ POSTING_READ(TV_DAC);
+ msleep(20);
+
tv_dac = I915_READ(TV_DAC);
I915_WRITE(TV_DAC, save_tv_dac);
I915_WRITE(TV_CTL, save_tv_ctl);
- intel_wait_for_vblank(dev);
+ POSTING_READ(TV_CTL);
+ msleep(20);
+
/*
* A B C
* 0 1 1 Composite
@@ -1306,12 +1312,11 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
static void intel_tv_find_better_format(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int i;
- if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
+ if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
return;
@@ -1319,12 +1324,12 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
tv_mode = tv_modes + i;
- if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
+ if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
break;
}
- tv_priv->tv_format = tv_mode->name;
+ intel_tv->tv_format = tv_mode->name;
drm_connector_property_set_value(connector,
connector->dev->mode_config.tv_mode_property, i);
}
@@ -1336,33 +1341,32 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
* we have a pipe programmed in order to probe the TV.
*/
static enum drm_connector_status
-intel_tv_detect(struct drm_connector *connector)
+intel_tv_detect(struct drm_connector *connector, bool force)
{
- struct drm_crtc *crtc;
struct drm_display_mode mode;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- int dpms_mode;
- int type = tv_priv->type;
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ int type;
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
if (encoder->crtc && encoder->crtc->enabled) {
- type = intel_tv_detect_type(encoder->crtc, intel_encoder);
- } else {
- crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+ type = intel_tv_detect_type(intel_tv);
+ } else if (force) {
+ struct drm_crtc *crtc;
+ int dpms_mode;
+
+ crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
&mode, &dpms_mode);
if (crtc) {
- type = intel_tv_detect_type(crtc, intel_encoder);
- intel_release_load_detect_pipe(intel_encoder, connector,
+ type = intel_tv_detect_type(intel_tv);
+ intel_release_load_detect_pipe(&intel_tv->base, connector,
dpms_mode);
} else
- type = -1;
- }
-
- tv_priv->type = type;
+ return connector_status_unknown;
+ } else
+ return connector->status;
if (type < 0)
return connector_status_disconnected;
@@ -1393,8 +1397,8 @@ intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1419,8 +1423,8 @@ intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int j, count = 0;
u64 tmp;
@@ -1485,8 +1489,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
{
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -1496,30 +1499,30 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
goto out;
if (property == dev->mode_config.tv_left_margin_property &&
- tv_priv->margin[TV_MARGIN_LEFT] != val) {
- tv_priv->margin[TV_MARGIN_LEFT] = val;
+ intel_tv->margin[TV_MARGIN_LEFT] != val) {
+ intel_tv->margin[TV_MARGIN_LEFT] = val;
changed = true;
} else if (property == dev->mode_config.tv_right_margin_property &&
- tv_priv->margin[TV_MARGIN_RIGHT] != val) {
- tv_priv->margin[TV_MARGIN_RIGHT] = val;
+ intel_tv->margin[TV_MARGIN_RIGHT] != val) {
+ intel_tv->margin[TV_MARGIN_RIGHT] = val;
changed = true;
} else if (property == dev->mode_config.tv_top_margin_property &&
- tv_priv->margin[TV_MARGIN_TOP] != val) {
- tv_priv->margin[TV_MARGIN_TOP] = val;
+ intel_tv->margin[TV_MARGIN_TOP] != val) {
+ intel_tv->margin[TV_MARGIN_TOP] = val;
changed = true;
} else if (property == dev->mode_config.tv_bottom_margin_property &&
- tv_priv->margin[TV_MARGIN_BOTTOM] != val) {
- tv_priv->margin[TV_MARGIN_BOTTOM] = val;
+ intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
+ intel_tv->margin[TV_MARGIN_BOTTOM] = val;
changed = true;
} else if (property == dev->mode_config.tv_mode_property) {
- if (val >= NUM_TV_MODES) {
+ if (val >= ARRAY_SIZE(tv_modes)) {
ret = -EINVAL;
goto out;
}
- if (!strcmp(tv_priv->tv_format, tv_modes[val].name))
+ if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
goto out;
- tv_priv->tv_format = tv_modes[val].name;
+ intel_tv->tv_format = tv_modes[val].name;
changed = true;
} else {
ret = -EINVAL;
@@ -1555,16 +1558,8 @@ static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs =
.best_encoder = intel_attached_encoder,
};
-static void intel_tv_enc_destroy(struct drm_encoder *encoder)
-{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
- .destroy = intel_tv_enc_destroy,
+ .destroy = intel_encoder_destroy,
};
/*
@@ -1608,9 +1603,9 @@ intel_tv_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
+ struct intel_tv *intel_tv;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- struct intel_tv_priv *tv_priv;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char **tv_format_names;
int i, initial_mode = 0;
@@ -1649,18 +1644,18 @@ intel_tv_init(struct drm_device *dev)
(tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
return;
- intel_encoder = kzalloc(sizeof(struct intel_encoder) +
- sizeof(struct intel_tv_priv), GFP_KERNEL);
- if (!intel_encoder) {
+ intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
+ if (!intel_tv) {
return;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_encoder);
+ kfree(intel_tv);
return;
}
+ intel_encoder = &intel_tv->base;
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
@@ -1670,22 +1665,20 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_ENCODER_TVDAC);
drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
- tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
- intel_encoder->dev_priv = tv_priv;
- tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
+ intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
- tv_priv->margin[TV_MARGIN_LEFT] = 54;
- tv_priv->margin[TV_MARGIN_TOP] = 36;
- tv_priv->margin[TV_MARGIN_RIGHT] = 46;
- tv_priv->margin[TV_MARGIN_BOTTOM] = 37;
+ intel_tv->margin[TV_MARGIN_LEFT] = 54;
+ intel_tv->margin[TV_MARGIN_TOP] = 36;
+ intel_tv->margin[TV_MARGIN_RIGHT] = 46;
+ intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
- tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
+ intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
@@ -1693,28 +1686,28 @@ intel_tv_init(struct drm_device *dev)
connector->doublescan_allowed = false;
/* Create TV properties then attach current values */
- tv_format_names = kmalloc(sizeof(char *) * NUM_TV_MODES,
+ tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
GFP_KERNEL);
if (!tv_format_names)
goto out;
- for (i = 0; i < NUM_TV_MODES; i++)
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
tv_format_names[i] = tv_modes[i].name;
- drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names);
+ drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names);
drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
initial_mode);
drm_connector_attach_property(connector,
dev->mode_config.tv_left_margin_property,
- tv_priv->margin[TV_MARGIN_LEFT]);
+ intel_tv->margin[TV_MARGIN_LEFT]);
drm_connector_attach_property(connector,
dev->mode_config.tv_top_margin_property,
- tv_priv->margin[TV_MARGIN_TOP]);
+ intel_tv->margin[TV_MARGIN_TOP]);
drm_connector_attach_property(connector,
dev->mode_config.tv_right_margin_property,
- tv_priv->margin[TV_MARGIN_RIGHT]);
+ intel_tv->margin[TV_MARGIN_RIGHT]);
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
- tv_priv->margin[TV_MARGIN_BOTTOM]);
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
out:
drm_sysfs_connector_add(connector);
}
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 3c917fb3a60..08868ac3048 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -52,7 +52,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
* Engine control
*/
-int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
+int mga_do_wait_for_idle(drm_mga_private_t *dev_priv)
{
u32 status = 0;
int i;
@@ -74,7 +74,7 @@ int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
return -EBUSY;
}
-static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
+static int mga_do_dma_reset(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
@@ -102,7 +102,7 @@ static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
* Primary DMA stream
*/
-void mga_do_dma_flush(drm_mga_private_t * dev_priv)
+void mga_do_dma_flush(drm_mga_private_t *dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
u32 head, tail;
@@ -142,11 +142,10 @@ void mga_do_dma_flush(drm_mga_private_t * dev_priv)
head = MGA_READ(MGA_PRIMADDRESS);
- if (head <= tail) {
+ if (head <= tail)
primary->space = primary->size - primary->tail;
- } else {
+ else
primary->space = head - tail;
- }
DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
DRM_DEBUG(" tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset));
@@ -158,7 +157,7 @@ void mga_do_dma_flush(drm_mga_private_t * dev_priv)
DRM_DEBUG("done.\n");
}
-void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
+void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
u32 head, tail;
@@ -181,11 +180,10 @@ void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
head = MGA_READ(MGA_PRIMADDRESS);
- if (head == dev_priv->primary->offset) {
+ if (head == dev_priv->primary->offset)
primary->space = primary->size;
- } else {
+ else
primary->space = head - dev_priv->primary->offset;
- }
DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
DRM_DEBUG(" tail = 0x%06x\n", primary->tail);
@@ -199,7 +197,7 @@ void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
DRM_DEBUG("done.\n");
}
-void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
+void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -220,11 +218,11 @@ void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
* Freelist management
*/
-#define MGA_BUFFER_USED ~0
+#define MGA_BUFFER_USED (~0)
#define MGA_BUFFER_FREE 0
#if MGA_FREELIST_DEBUG
-static void mga_freelist_print(struct drm_device * dev)
+static void mga_freelist_print(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
@@ -245,7 +243,7 @@ static void mga_freelist_print(struct drm_device * dev)
}
#endif
-static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv)
+static int mga_freelist_init(struct drm_device *dev, drm_mga_private_t *dev_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
@@ -288,7 +286,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr
return 0;
}
-static void mga_freelist_cleanup(struct drm_device * dev)
+static void mga_freelist_cleanup(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
@@ -308,7 +306,7 @@ static void mga_freelist_cleanup(struct drm_device * dev)
#if 0
/* FIXME: Still needed?
*/
-static void mga_freelist_reset(struct drm_device * dev)
+static void mga_freelist_reset(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
@@ -356,7 +354,7 @@ static struct drm_buf *mga_freelist_get(struct drm_device * dev)
return NULL;
}
-int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
+int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -391,7 +389,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
* DMA initialization, cleanup
*/
-int mga_driver_load(struct drm_device * dev, unsigned long flags)
+int mga_driver_load(struct drm_device *dev, unsigned long flags)
{
drm_mga_private_t *dev_priv;
int ret;
@@ -405,8 +403,8 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
dev_priv->chipset = flags;
- dev_priv->mmio_base = drm_get_resource_start(dev, 1);
- dev_priv->mmio_size = drm_get_resource_len(dev, 1);
+ dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
+ dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
dev->counters += 3;
dev->types[6] = _DRM_STAT_IRQ;
@@ -439,8 +437,8 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
*
* \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
*/
-static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
- drm_mga_dma_bootstrap_t * dma_bs)
+static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
{
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
@@ -481,11 +479,10 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
*/
if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
- if (mode.mode & 0x02) {
+ if (mode.mode & 0x02)
MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
- } else {
+ else
MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
- }
}
/* Allocate and bind AGP memory. */
@@ -593,8 +590,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
return 0;
}
#else
-static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
- drm_mga_dma_bootstrap_t * dma_bs)
+static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
{
return -EINVAL;
}
@@ -614,8 +611,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
*
* \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
*/
-static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
- drm_mga_dma_bootstrap_t * dma_bs)
+static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
{
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
@@ -678,9 +675,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
req.size = dma_bs->secondary_bin_size;
err = drm_addbufs_pci(dev, &req);
- if (!err) {
+ if (!err)
break;
- }
}
if (bin_count == 0) {
@@ -704,8 +700,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
return 0;
}
-static int mga_do_dma_bootstrap(struct drm_device * dev,
- drm_mga_dma_bootstrap_t * dma_bs)
+static int mga_do_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
{
const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
int err;
@@ -737,17 +733,15 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,
* carve off portions of it for internal uses. The remaining memory
* is returned to user-mode to be used for AGP textures.
*/
- if (is_agp) {
+ if (is_agp)
err = mga_do_agp_dma_bootstrap(dev, dma_bs);
- }
/* If we attempted to initialize the card for AGP DMA but failed,
* clean-up any mess that may have been created.
*/
- if (err) {
+ if (err)
mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
- }
/* Not only do we want to try and initialized PCI cards for PCI DMA,
* but we also try to initialized AGP cards that could not be
@@ -757,9 +751,8 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,
* AGP memory, etc.
*/
- if (!is_agp || err) {
+ if (!is_agp || err)
err = mga_do_pci_dma_bootstrap(dev, dma_bs);
- }
return err;
}
@@ -792,7 +785,7 @@ int mga_dma_bootstrap(struct drm_device *dev, void *data,
return err;
}
-static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
+static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
{
drm_mga_private_t *dev_priv;
int ret;
@@ -800,11 +793,10 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
dev_priv = dev->dev_private;
- if (init->sgram) {
+ if (init->sgram)
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
- } else {
+ else
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
- }
dev_priv->maccess = init->maccess;
dev_priv->fb_cpp = init->fb_cpp;
@@ -975,9 +967,8 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
dev_priv->agp_handle = 0;
}
- if ((dev->agp != NULL) && dev->agp->acquired) {
+ if ((dev->agp != NULL) && dev->agp->acquired)
err = drm_agp_release(dev);
- }
#endif
}
@@ -998,9 +989,8 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
memset(dev_priv->warp_pipe_phys, 0,
sizeof(dev_priv->warp_pipe_phys));
- if (dev_priv->head != NULL) {
+ if (dev_priv->head != NULL)
mga_freelist_cleanup(dev);
- }
}
return err;
@@ -1017,9 +1007,8 @@ int mga_dma_init(struct drm_device *dev, void *data,
switch (init->func) {
case MGA_INIT_DMA:
err = mga_do_init_dma(dev, init);
- if (err) {
+ if (err)
(void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
- }
return err;
case MGA_CLEANUP_DMA:
return mga_do_cleanup_dma(dev, FULL_CLEANUP);
@@ -1047,9 +1036,8 @@ int mga_dma_flush(struct drm_device *dev, void *data,
WRAP_WAIT_WITH_RETURN(dev_priv);
- if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
+ if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL))
mga_do_dma_flush(dev_priv);
- }
if (lock->flags & _DRM_LOCK_QUIESCENT) {
#if MGA_DMA_DEBUG
@@ -1079,8 +1067,8 @@ int mga_dma_reset(struct drm_device *dev, void *data,
* DMA buffer management
*/
-static int mga_dma_get_buffers(struct drm_device * dev,
- struct drm_file *file_priv, struct drm_dma * d)
+static int mga_dma_get_buffers(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_dma *d)
{
struct drm_buf *buf;
int i;
@@ -1134,9 +1122,8 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
d->granted_count = 0;
- if (d->request_count) {
+ if (d->request_count)
ret = mga_dma_get_buffers(dev, file_priv, d);
- }
return ret;
}
@@ -1144,7 +1131,7 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
/**
* Called just before the module is unloaded.
*/
-int mga_driver_unload(struct drm_device * dev)
+int mga_driver_unload(struct drm_device *dev)
{
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -1155,12 +1142,12 @@ int mga_driver_unload(struct drm_device * dev)
/**
* Called when the last opener of the device is closed.
*/
-void mga_driver_lastclose(struct drm_device * dev)
+void mga_driver_lastclose(struct drm_device *dev)
{
mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
-int mga_driver_dma_quiescent(struct drm_device * dev)
+int mga_driver_dma_quiescent(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
return mga_do_wait_for_idle(dev_priv);
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index ddfe16197b5..26d0d8ced80 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -36,7 +36,7 @@
#include "drm_pciids.h"
-static int mga_driver_device_is_agp(struct drm_device * dev);
+static int mga_driver_device_is_agp(struct drm_device *dev);
static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
@@ -119,7 +119,7 @@ MODULE_LICENSE("GPL and additional rights");
* \returns
* If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
*/
-static int mga_driver_device_is_agp(struct drm_device * dev)
+static int mga_driver_device_is_agp(struct drm_device *dev)
{
const struct pci_dev *const pdev = dev->pdev;
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index be6c6b9b0e8..1084fa4d261 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -164,59 +164,59 @@ extern int mga_dma_reset(struct drm_device *dev, void *data,
extern int mga_dma_buffers(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
-extern int mga_driver_unload(struct drm_device * dev);
-extern void mga_driver_lastclose(struct drm_device * dev);
-extern int mga_driver_dma_quiescent(struct drm_device * dev);
+extern int mga_driver_unload(struct drm_device *dev);
+extern void mga_driver_lastclose(struct drm_device *dev);
+extern int mga_driver_dma_quiescent(struct drm_device *dev);
-extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
+extern int mga_do_wait_for_idle(drm_mga_private_t *dev_priv);
-extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
-extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
-extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
+extern void mga_do_dma_flush(drm_mga_private_t *dev_priv);
+extern void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv);
+extern void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv);
-extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf);
+extern int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf);
/* mga_warp.c */
-extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
-extern int mga_warp_init(drm_mga_private_t * dev_priv);
+extern int mga_warp_install_microcode(drm_mga_private_t *dev_priv);
+extern int mga_warp_init(drm_mga_private_t *dev_priv);
/* mga_irq.c */
extern int mga_enable_vblank(struct drm_device *dev, int crtc);
extern void mga_disable_vblank(struct drm_device *dev, int crtc);
extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
-extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
-extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
+extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
+extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
-extern void mga_driver_irq_preinstall(struct drm_device * dev);
+extern void mga_driver_irq_preinstall(struct drm_device *dev);
extern int mga_driver_irq_postinstall(struct drm_device *dev);
-extern void mga_driver_irq_uninstall(struct drm_device * dev);
+extern void mga_driver_irq_uninstall(struct drm_device *dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER()
#if defined(__linux__) && defined(__alpha__)
-#define MGA_BASE( reg ) ((unsigned long)(dev_priv->mmio->handle))
-#define MGA_ADDR( reg ) (MGA_BASE(reg) + reg)
+#define MGA_BASE(reg) ((unsigned long)(dev_priv->mmio->handle))
+#define MGA_ADDR(reg) (MGA_BASE(reg) + reg)
-#define MGA_DEREF( reg ) *(volatile u32 *)MGA_ADDR( reg )
-#define MGA_DEREF8( reg ) *(volatile u8 *)MGA_ADDR( reg )
+#define MGA_DEREF(reg) (*(volatile u32 *)MGA_ADDR(reg))
+#define MGA_DEREF8(reg) (*(volatile u8 *)MGA_ADDR(reg))
-#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg)))
-#define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg)))
-#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0)
-#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0)
+#define MGA_READ(reg) (_MGA_READ((u32 *)MGA_ADDR(reg)))
+#define MGA_READ8(reg) (_MGA_READ((u8 *)MGA_ADDR(reg)))
+#define MGA_WRITE(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF(reg) = val; } while (0)
+#define MGA_WRITE8(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8(reg) = val; } while (0)
-static inline u32 _MGA_READ(u32 * addr)
+static inline u32 _MGA_READ(u32 *addr)
{
DRM_MEMORYBARRIER();
return *(volatile u32 *)addr;
}
#else
-#define MGA_READ8( reg ) DRM_READ8(dev_priv->mmio, (reg))
-#define MGA_READ( reg ) DRM_READ32(dev_priv->mmio, (reg))
-#define MGA_WRITE8( reg, val ) DRM_WRITE8(dev_priv->mmio, (reg), (val))
-#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
+#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
+#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
+#define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
+#define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
#endif
#define DWGREG0 0x1c00
@@ -233,40 +233,39 @@ static inline u32 _MGA_READ(u32 * addr)
* Helper macross...
*/
-#define MGA_EMIT_STATE( dev_priv, dirty ) \
+#define MGA_EMIT_STATE(dev_priv, dirty) \
do { \
- if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \
- if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) { \
- mga_g400_emit_state( dev_priv ); \
- } else { \
- mga_g200_emit_state( dev_priv ); \
- } \
+ if ((dirty) & ~MGA_UPLOAD_CLIPRECTS) { \
+ if (dev_priv->chipset >= MGA_CARD_TYPE_G400) \
+ mga_g400_emit_state(dev_priv); \
+ else \
+ mga_g200_emit_state(dev_priv); \
} \
} while (0)
-#define WRAP_TEST_WITH_RETURN( dev_priv ) \
+#define WRAP_TEST_WITH_RETURN(dev_priv) \
do { \
- if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
- if ( mga_is_idle( dev_priv ) ) { \
- mga_do_dma_wrap_end( dev_priv ); \
- } else if ( dev_priv->prim.space < \
- dev_priv->prim.high_mark ) { \
- if ( MGA_DMA_DEBUG ) \
- DRM_INFO( "wrap...\n"); \
- return -EBUSY; \
+ if (test_bit(0, &dev_priv->prim.wrapped)) { \
+ if (mga_is_idle(dev_priv)) { \
+ mga_do_dma_wrap_end(dev_priv); \
+ } else if (dev_priv->prim.space < \
+ dev_priv->prim.high_mark) { \
+ if (MGA_DMA_DEBUG) \
+ DRM_INFO("wrap...\n"); \
+ return -EBUSY; \
} \
} \
} while (0)
-#define WRAP_WAIT_WITH_RETURN( dev_priv ) \
+#define WRAP_WAIT_WITH_RETURN(dev_priv) \
do { \
- if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
- if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \
- if ( MGA_DMA_DEBUG ) \
- DRM_INFO( "wrap...\n"); \
- return -EBUSY; \
+ if (test_bit(0, &dev_priv->prim.wrapped)) { \
+ if (mga_do_wait_for_idle(dev_priv) < 0) { \
+ if (MGA_DMA_DEBUG) \
+ DRM_INFO("wrap...\n"); \
+ return -EBUSY; \
} \
- mga_do_dma_wrap_end( dev_priv ); \
+ mga_do_dma_wrap_end(dev_priv); \
} \
} while (0)
@@ -280,12 +279,12 @@ do { \
#define DMA_BLOCK_SIZE (5 * sizeof(u32))
-#define BEGIN_DMA( n ) \
+#define BEGIN_DMA(n) \
do { \
- if ( MGA_VERBOSE ) { \
- DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \
- DRM_INFO( " space=0x%x req=0x%Zx\n", \
- dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
+ if (MGA_VERBOSE) { \
+ DRM_INFO("BEGIN_DMA(%d)\n", (n)); \
+ DRM_INFO(" space=0x%x req=0x%Zx\n", \
+ dev_priv->prim.space, (n) * DMA_BLOCK_SIZE); \
} \
prim = dev_priv->prim.start; \
write = dev_priv->prim.tail; \
@@ -293,9 +292,9 @@ do { \
#define BEGIN_DMA_WRAP() \
do { \
- if ( MGA_VERBOSE ) { \
- DRM_INFO( "BEGIN_DMA()\n" ); \
- DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \
+ if (MGA_VERBOSE) { \
+ DRM_INFO("BEGIN_DMA()\n"); \
+ DRM_INFO(" space=0x%x\n", dev_priv->prim.space); \
} \
prim = dev_priv->prim.start; \
write = dev_priv->prim.tail; \
@@ -304,72 +303,68 @@ do { \
#define ADVANCE_DMA() \
do { \
dev_priv->prim.tail = write; \
- if ( MGA_VERBOSE ) { \
- DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \
- write, dev_priv->prim.space ); \
- } \
+ if (MGA_VERBOSE) \
+ DRM_INFO("ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \
+ write, dev_priv->prim.space); \
} while (0)
#define FLUSH_DMA() \
do { \
- if ( 0 ) { \
- DRM_INFO( "\n" ); \
- DRM_INFO( " tail=0x%06x head=0x%06lx\n", \
- dev_priv->prim.tail, \
- (unsigned long)(MGA_READ(MGA_PRIMADDRESS) - \
- dev_priv->primary->offset)); \
+ if (0) { \
+ DRM_INFO("\n"); \
+ DRM_INFO(" tail=0x%06x head=0x%06lx\n", \
+ dev_priv->prim.tail, \
+ (unsigned long)(MGA_READ(MGA_PRIMADDRESS) - \
+ dev_priv->primary->offset)); \
} \
- if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) { \
- if ( dev_priv->prim.space < \
- dev_priv->prim.high_mark ) { \
- mga_do_dma_wrap_start( dev_priv ); \
- } else { \
- mga_do_dma_flush( dev_priv ); \
- } \
+ if (!test_bit(0, &dev_priv->prim.wrapped)) { \
+ if (dev_priv->prim.space < dev_priv->prim.high_mark) \
+ mga_do_dma_wrap_start(dev_priv); \
+ else \
+ mga_do_dma_flush(dev_priv); \
} \
} while (0)
/* Never use this, always use DMA_BLOCK(...) for primary DMA output.
*/
-#define DMA_WRITE( offset, val ) \
+#define DMA_WRITE(offset, val) \
do { \
- if ( MGA_VERBOSE ) { \
- DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \
- (u32)(val), write + (offset) * sizeof(u32) ); \
- } \
+ if (MGA_VERBOSE) \
+ DRM_INFO(" DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \
+ (u32)(val), write + (offset) * sizeof(u32)); \
*(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
} while (0)
-#define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 ) \
+#define DMA_BLOCK(reg0, val0, reg1, val1, reg2, val2, reg3, val3) \
do { \
- DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) | \
- (DMAREG( reg1 ) << 8) | \
- (DMAREG( reg2 ) << 16) | \
- (DMAREG( reg3 ) << 24)) ); \
- DMA_WRITE( 1, val0 ); \
- DMA_WRITE( 2, val1 ); \
- DMA_WRITE( 3, val2 ); \
- DMA_WRITE( 4, val3 ); \
+ DMA_WRITE(0, ((DMAREG(reg0) << 0) | \
+ (DMAREG(reg1) << 8) | \
+ (DMAREG(reg2) << 16) | \
+ (DMAREG(reg3) << 24))); \
+ DMA_WRITE(1, val0); \
+ DMA_WRITE(2, val1); \
+ DMA_WRITE(3, val2); \
+ DMA_WRITE(4, val3); \
write += DMA_BLOCK_SIZE; \
} while (0)
/* Buffer aging via primary DMA stream head pointer.
*/
-#define SET_AGE( age, h, w ) \
+#define SET_AGE(age, h, w) \
do { \
(age)->head = h; \
(age)->wrap = w; \
} while (0)
-#define TEST_AGE( age, h, w ) ( (age)->wrap < w || \
- ( (age)->wrap == w && \
- (age)->head < h ) )
+#define TEST_AGE(age, h, w) ((age)->wrap < w || \
+ ((age)->wrap == w && \
+ (age)->head < h))
-#define AGE_BUFFER( buf_priv ) \
+#define AGE_BUFFER(buf_priv) \
do { \
drm_mga_freelist_t *entry = (buf_priv)->list_entry; \
- if ( (buf_priv)->dispatched ) { \
+ if ((buf_priv)->dispatched) { \
entry->age.head = (dev_priv->prim.tail + \
dev_priv->primary->offset); \
entry->age.wrap = dev_priv->sarea_priv->last_wrap; \
@@ -681,7 +676,7 @@ do { \
/* Simple idle test.
*/
-static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv)
+static __inline__ int mga_is_idle(drm_mga_private_t *dev_priv)
{
u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
return (status == MGA_ENDPRDMASTS);
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index daa6041a483..2581202297e 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -76,9 +76,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
/* In addition to clearing the interrupt-pending bit, we
* have to write to MGA_PRIMEND to re-start the DMA operation.
*/
- if ((prim_start & ~0x03) != (prim_end & ~0x03)) {
+ if ((prim_start & ~0x03) != (prim_end & ~0x03))
MGA_WRITE(MGA_PRIMEND, prim_end);
- }
atomic_inc(&dev_priv->last_fence_retired);
DRM_WAKEUP(&dev_priv->fence_queue);
@@ -120,7 +119,7 @@ void mga_disable_vblank(struct drm_device *dev, int crtc)
/* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
}
-int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
+int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
unsigned int cur_fence;
@@ -139,7 +138,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
return ret;
}
-void mga_driver_irq_preinstall(struct drm_device * dev)
+void mga_driver_irq_preinstall(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -162,7 +161,7 @@ int mga_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-void mga_driver_irq_uninstall(struct drm_device * dev)
+void mga_driver_irq_uninstall(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
if (!dev_priv)
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index a53b848e0f1..9ce2827f8c0 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -41,8 +41,8 @@
* DMA hardware state programming functions
*/
-static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
- struct drm_clip_rect * box)
+static void mga_emit_clip_rect(drm_mga_private_t *dev_priv,
+ struct drm_clip_rect *box)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -66,7 +66,7 @@ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
ADVANCE_DMA();
}
-static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g200_emit_context(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -89,7 +89,7 @@ static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_context(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -116,7 +116,7 @@ static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g200_emit_tex0(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
@@ -144,7 +144,7 @@ static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_tex0(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
@@ -184,7 +184,7 @@ static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_tex1(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
@@ -223,7 +223,7 @@ static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g200_emit_pipe(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int pipe = sarea_priv->warp_pipe;
@@ -250,7 +250,7 @@ static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_pipe(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int pipe = sarea_priv->warp_pipe;
@@ -327,7 +327,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static void mga_g200_emit_state(drm_mga_private_t * dev_priv)
+static void mga_g200_emit_state(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
@@ -348,7 +348,7 @@ static void mga_g200_emit_state(drm_mga_private_t * dev_priv)
}
}
-static void mga_g400_emit_state(drm_mga_private_t * dev_priv)
+static void mga_g400_emit_state(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
@@ -381,7 +381,7 @@ static void mga_g400_emit_state(drm_mga_private_t * dev_priv)
/* Disallow all write destinations except the front and backbuffer.
*/
-static int mga_verify_context(drm_mga_private_t * dev_priv)
+static int mga_verify_context(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -400,7 +400,7 @@ static int mga_verify_context(drm_mga_private_t * dev_priv)
/* Disallow texture reads from PCI space.
*/
-static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
+static int mga_verify_tex(drm_mga_private_t *dev_priv, int unit)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit];
@@ -417,7 +417,7 @@ static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
return 0;
}
-static int mga_verify_state(drm_mga_private_t * dev_priv)
+static int mga_verify_state(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
@@ -446,7 +446,7 @@ static int mga_verify_state(drm_mga_private_t * dev_priv)
return (ret == 0);
}
-static int mga_verify_iload(drm_mga_private_t * dev_priv,
+static int mga_verify_iload(drm_mga_private_t *dev_priv,
unsigned int dstorg, unsigned int length)
{
if (dstorg < dev_priv->texture_offset ||
@@ -465,7 +465,7 @@ static int mga_verify_iload(drm_mga_private_t * dev_priv,
return 0;
}
-static int mga_verify_blit(drm_mga_private_t * dev_priv,
+static int mga_verify_blit(drm_mga_private_t *dev_priv,
unsigned int srcorg, unsigned int dstorg)
{
if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
@@ -480,7 +480,7 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv,
*
*/
-static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear)
+static void mga_dma_dispatch_clear(struct drm_device *dev, drm_mga_clear_t *clear)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -568,7 +568,7 @@ static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * cl
FLUSH_DMA();
}
-static void mga_dma_dispatch_swap(struct drm_device * dev)
+static void mga_dma_dispatch_swap(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -622,7 +622,7 @@ static void mga_dma_dispatch_swap(struct drm_device * dev)
DRM_DEBUG("... done.\n");
}
-static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
+static void mga_dma_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -669,7 +669,7 @@ static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * bu
FLUSH_DMA();
}
-static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf,
+static void mga_dma_dispatch_indices(struct drm_device *dev, struct drm_buf *buf,
unsigned int start, unsigned int end)
{
drm_mga_private_t *dev_priv = dev->dev_private;
@@ -718,7 +718,7 @@ static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * b
/* This copies a 64 byte aligned agp region to the frambuffer with a
* standard blit, the ioctl needs to do checking.
*/
-static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf,
+static void mga_dma_dispatch_iload(struct drm_device *dev, struct drm_buf *buf,
unsigned int dstorg, unsigned int length)
{
drm_mga_private_t *dev_priv = dev->dev_private;
@@ -766,7 +766,7 @@ static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf
FLUSH_DMA();
}
-static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit)
+static void mga_dma_dispatch_blit(struct drm_device *dev, drm_mga_blit_t *blit)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -801,9 +801,8 @@ static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit
int w = pbox[i].x2 - pbox[i].x1 - 1;
int start;
- if (blit->ydir == -1) {
+ if (blit->ydir == -1)
srcy = blit->height - srcy - 1;
- }
start = srcy * blit->src_pitch + srcx;
@@ -1086,19 +1085,19 @@ file_priv)
}
struct drm_ioctl_desc mga_ioctls[] = {
- DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
};
int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
index 9aad4847afd..f172bd5c257 100644
--- a/drivers/gpu/drm/mga/mga_warp.c
+++ b/drivers/gpu/drm/mga/mga_warp.c
@@ -46,7 +46,7 @@ MODULE_FIRMWARE(FIRMWARE_G400);
#define WARP_UCODE_SIZE(size) ALIGN(size, MGA_WARP_CODE_ALIGN)
-int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
+int mga_warp_install_microcode(drm_mga_private_t *dev_priv)
{
unsigned char *vcbase = dev_priv->warp->handle;
unsigned long pcbase = dev_priv->warp->offset;
@@ -133,7 +133,7 @@ out:
#define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
-int mga_warp_init(drm_mga_private_t * dev_priv)
+int mga_warp_init(drm_mga_private_t *dev_priv)
{
u32 wmisc;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 1175429da10..d2d28048efb 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -1,6 +1,6 @@
config DRM_NOUVEAU
tristate "Nouveau (nVidia) cards"
- depends on DRM
+ depends on DRM && PCI
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
@@ -41,4 +41,13 @@ config DRM_I2C_CH7006
This driver is currently only useful if you're also using
the nouveau driver.
+
+config DRM_I2C_SIL164
+ tristate "Silicon Image sil164 TMDS transmitter"
+ default m if DRM_NOUVEAU
+ help
+ Support for sil164 and similar single-link (or dual-link
+ when used in pairs) TMDS transmitters, used in some nVidia
+ video cards.
+
endmenu
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index acd31ed861e..e9b06e4ef2a 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -9,20 +9,20 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_dp.o nouveau_grctx.o \
+ nouveau_dp.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
- nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \
- nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
+ nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
+ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
- nv40_graph.o nv50_graph.o \
+ nv40_graph.o nv50_graph.o nvc0_graph.o \
nv40_grctx.o nv50_grctx.o \
- nv04_instmem.o nv50_instmem.o \
+ nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
nv50_crtc.o nv50_dac.o nv50_sor.o \
nv50_cursor.o nv50_display.o nv50_fbcon.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
- nv17_gpio.o nv50_gpio.o \
+ nv10_gpio.o nv50_gpio.o \
nv50_calc.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index d4bcca8a513..c17a055ee3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
+#include <acpi/video.h>
#include "drmP.h"
#include "drm.h"
@@ -11,6 +12,7 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nv50_display.h"
+#include "nouveau_connector.h"
#include <linux/vga_switcheroo.h>
@@ -42,7 +44,7 @@ static const char nouveau_dsm_muid[] = {
0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
};
-static int nouveau_dsm(acpi_handle handle, int func, int arg, int *result)
+static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
@@ -259,3 +261,37 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
{
return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
}
+
+int
+nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct acpi_device *acpidev;
+ acpi_handle handle;
+ int type, ret;
+ void *edid;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ type = ACPI_VIDEO_DISPLAY_LCD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ if (!handle)
+ return -ENODEV;
+
+ ret = acpi_bus_get_device(handle, &acpidev);
+ if (ret)
+ return -ENODEV;
+
+ ret = acpi_video_get_edid(acpidev, type, -1, &edid);
+ if (ret < 0)
+ return ret;
+
+ nv_connector->edid = edid;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index e492919faf4..974b0f8ae04 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -28,6 +28,8 @@
#include "nouveau_hw.h"
#include "nouveau_encoder.h"
+#include <linux/io-mapping.h>
+
/* these defines are made up */
#define NV_CIO_CRE_44_HEADA 0x0
#define NV_CIO_CRE_44_HEADB 0x3
@@ -209,20 +211,20 @@ static struct methods shadow_methods[] = {
{ "PCIROM", load_vbios_pci, true },
{ "ACPI", load_vbios_acpi, true },
};
+#define NUM_SHADOW_METHODS ARRAY_SIZE(shadow_methods)
static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
{
- const int nr_methods = ARRAY_SIZE(shadow_methods);
struct methods *methods = shadow_methods;
int testscore = 3;
- int scores[nr_methods], i;
+ int scores[NUM_SHADOW_METHODS], i;
if (nouveau_vbios) {
- for (i = 0; i < nr_methods; i++)
+ for (i = 0; i < NUM_SHADOW_METHODS; i++)
if (!strcasecmp(nouveau_vbios, methods[i].desc))
break;
- if (i < nr_methods) {
+ if (i < NUM_SHADOW_METHODS) {
NV_INFO(dev, "Attempting to use BIOS image from %s\n",
methods[i].desc);
@@ -234,7 +236,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
}
- for (i = 0; i < nr_methods; i++) {
+ for (i = 0; i < NUM_SHADOW_METHODS; i++) {
NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
methods[i].desc);
data[0] = data[1] = 0; /* avoid reuse of previous image */
@@ -245,7 +247,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
}
while (--testscore > 0) {
- for (i = 0; i < nr_methods; i++) {
+ for (i = 0; i < NUM_SHADOW_METHODS; i++) {
if (scores[i] == testscore) {
NV_TRACE(dev, "Using BIOS image from %s\n",
methods[i].desc);
@@ -920,7 +922,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return -EINVAL;
+ return len;
}
configval = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1022,7 +1024,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return -EINVAL;
+ return len;
}
freq = ROM16(bios->data[offset + 12 + config * 2]);
@@ -1194,7 +1196,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
dpe = nouveau_bios_dp_table(dev, dcb, &dummy);
if (!dpe) {
NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
- return -EINVAL;
+ return 3;
}
switch (cond) {
@@ -1218,12 +1220,16 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
int ret;
auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index);
- if (!auxch)
- return -ENODEV;
+ if (!auxch) {
+ NV_ERROR(dev, "0x%04X: couldn't get auxch\n", offset);
+ return 3;
+ }
ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1);
- if (ret)
- return ret;
+ if (ret) {
+ NV_ERROR(dev, "0x%04X: auxch rd fail: %d\n", offset, ret);
+ return 3;
+ }
if (cond & 1)
iexec->execute = false;
@@ -1392,7 +1398,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return -EINVAL;
+ return len;
}
freq = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1452,6 +1458,7 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* "mask n" and OR it with "data n" before writing it back to the device
*/
+ struct drm_device *dev = bios->dev;
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
@@ -1466,9 +1473,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
"Count: 0x%02X\n",
offset, i2c_index, i2c_address, count);
- chan = init_i2c_device_find(bios->dev, i2c_index);
- if (!chan)
- return -ENODEV;
+ chan = init_i2c_device_find(dev, i2c_index);
+ if (!chan) {
+ NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
+ return len;
+ }
for (i = 0; i < count; i++) {
uint8_t reg = bios->data[offset + 4 + i * 3];
@@ -1479,8 +1488,10 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
I2C_SMBUS_READ, reg,
I2C_SMBUS_BYTE_DATA, &val);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ NV_ERROR(dev, "0x%04X: i2c rd fail: %d\n", offset, ret);
+ return len;
+ }
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
"Mask: 0x%02X, Data: 0x%02X\n",
@@ -1494,8 +1505,10 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
I2C_SMBUS_WRITE, reg,
I2C_SMBUS_BYTE_DATA, &val);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
+ return len;
+ }
}
return len;
@@ -1520,6 +1533,7 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* "DCB I2C table entry index", set the register to "data n"
*/
+ struct drm_device *dev = bios->dev;
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
@@ -1534,9 +1548,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
"Count: 0x%02X\n",
offset, i2c_index, i2c_address, count);
- chan = init_i2c_device_find(bios->dev, i2c_index);
- if (!chan)
- return -ENODEV;
+ chan = init_i2c_device_find(dev, i2c_index);
+ if (!chan) {
+ NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
+ return len;
+ }
for (i = 0; i < count; i++) {
uint8_t reg = bios->data[offset + 4 + i * 2];
@@ -1553,8 +1569,10 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
I2C_SMBUS_WRITE, reg,
I2C_SMBUS_BYTE_DATA, &val);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
+ return len;
+ }
}
return len;
@@ -1577,6 +1595,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* address" on the I2C bus given by "DCB I2C table entry index"
*/
+ struct drm_device *dev = bios->dev;
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
@@ -1584,7 +1603,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
struct nouveau_i2c_chan *chan;
struct i2c_msg msg;
uint8_t data[256];
- int i;
+ int ret, i;
if (!iexec->execute)
return len;
@@ -1593,9 +1612,11 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
"Count: 0x%02X\n",
offset, i2c_index, i2c_address, count);
- chan = init_i2c_device_find(bios->dev, i2c_index);
- if (!chan)
- return -ENODEV;
+ chan = init_i2c_device_find(dev, i2c_index);
+ if (!chan) {
+ NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
+ return len;
+ }
for (i = 0; i < count; i++) {
data[i] = bios->data[offset + 4 + i];
@@ -1608,8 +1629,11 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.flags = 0;
msg.len = count;
msg.buf = data;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return -EIO;
+ ret = i2c_transfer(&chan->adapter, &msg, 1);
+ if (ret != 1) {
+ NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
+ return len;
+ }
}
return len;
@@ -1633,6 +1657,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* used -- see get_tmds_index_reg()
*/
+ struct drm_device *dev = bios->dev;
uint8_t mlv = bios->data[offset + 1];
uint32_t tmdsaddr = bios->data[offset + 2];
uint8_t mask = bios->data[offset + 3];
@@ -1647,8 +1672,10 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
offset, mlv, tmdsaddr, mask, data);
reg = get_tmds_index_reg(bios->dev, mlv);
- if (!reg)
- return -EINVAL;
+ if (!reg) {
+ NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset);
+ return 5;
+ }
bios_wr32(bios, reg,
tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
@@ -1678,6 +1705,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
* register is used -- see get_tmds_index_reg()
*/
+ struct drm_device *dev = bios->dev;
uint8_t mlv = bios->data[offset + 1];
uint8_t count = bios->data[offset + 2];
int len = 3 + count * 2;
@@ -1691,8 +1719,10 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
offset, mlv, count);
reg = get_tmds_index_reg(bios->dev, mlv);
- if (!reg)
- return -EINVAL;
+ if (!reg) {
+ NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset);
+ return len;
+ }
for (i = 0; i < count; i++) {
uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
@@ -1898,6 +1928,31 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
}
static int
+init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_LTIME opcode: 0x57 ('V')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): time
+ *
+ * Sleep for "time" miliseconds.
+ */
+
+ unsigned time = ROM16(bios->data[offset + 1]);
+
+ if (!iexec->execute)
+ return 3;
+
+ BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X miliseconds\n",
+ offset, time);
+
+ msleep(time);
+
+ return 3;
+}
+
+static int
init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1965,6 +2020,64 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
}
static int
+init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_I2C_IF opcode: 0x5E ('^')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): DCB I2C table entry index
+ * offset + 2 (8 bit): I2C slave address
+ * offset + 3 (8 bit): I2C register
+ * offset + 4 (8 bit): mask
+ * offset + 5 (8 bit): data
+ *
+ * Read the register given by "I2C register" on the device addressed
+ * by "I2C slave address" on the I2C bus given by "DCB I2C table
+ * entry index". Compare the result AND "mask" to "data".
+ * If they're not equal, skip subsequent opcodes until condition is
+ * inverted (INIT_NOT), or we hit INIT_RESUME
+ */
+
+ uint8_t i2c_index = bios->data[offset + 1];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
+ uint8_t reg = bios->data[offset + 3];
+ uint8_t mask = bios->data[offset + 4];
+ uint8_t data = bios->data[offset + 5];
+ struct nouveau_i2c_chan *chan;
+ union i2c_smbus_data val;
+ int ret;
+
+ /* no execute check by design */
+
+ BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X\n",
+ offset, i2c_index, i2c_address);
+
+ chan = init_i2c_device_find(bios->dev, i2c_index);
+ if (!chan)
+ return -ENODEV;
+
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0) {
+ BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: [no device], "
+ "Mask: 0x%02X, Data: 0x%02X\n",
+ offset, reg, mask, data);
+ iexec->execute = 0;
+ return 6;
+ }
+
+ BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
+ "Mask: 0x%02X, Data: 0x%02X\n",
+ offset, reg, val.byte, mask, data);
+
+ iexec->execute = ((val.byte & mask) == data);
+
+ return 6;
+}
+
+static int
init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2039,6 +2152,325 @@ init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
return 5;
}
+static inline void
+bios_md32(struct nvbios *bios, uint32_t reg,
+ uint32_t mask, uint32_t val)
+{
+ bios_wr32(bios, reg, (bios_rd32(bios, reg) & ~mask) | val);
+}
+
+static uint32_t
+peek_fb(struct drm_device *dev, struct io_mapping *fb,
+ uint32_t off)
+{
+ uint32_t val = 0;
+
+ if (off < pci_resource_len(dev->pdev, 1)) {
+ uint8_t __iomem *p =
+ io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
+
+ val = ioread32(p + (off & ~PAGE_MASK));
+
+ io_mapping_unmap_atomic(p, KM_USER0);
+ }
+
+ return val;
+}
+
+static void
+poke_fb(struct drm_device *dev, struct io_mapping *fb,
+ uint32_t off, uint32_t val)
+{
+ if (off < pci_resource_len(dev->pdev, 1)) {
+ uint8_t __iomem *p =
+ io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
+
+ iowrite32(val, p + (off & ~PAGE_MASK));
+ wmb();
+
+ io_mapping_unmap_atomic(p, KM_USER0);
+ }
+}
+
+static inline bool
+read_back_fb(struct drm_device *dev, struct io_mapping *fb,
+ uint32_t off, uint32_t val)
+{
+ poke_fb(dev, fb, off, val);
+ return val == peek_fb(dev, fb, off);
+}
+
+static int
+nv04_init_compute_mem(struct nvbios *bios)
+{
+ struct drm_device *dev = bios->dev;
+ uint32_t patt = 0xdeadbeef;
+ struct io_mapping *fb;
+ int i;
+
+ /* Map the framebuffer aperture */
+ fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1));
+ if (!fb)
+ return -ENOMEM;
+
+ /* Sequencer and refresh off */
+ NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) | 0x20);
+ bios_md32(bios, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
+
+ bios_md32(bios, NV04_PFB_BOOT_0, ~0,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
+ NV04_PFB_BOOT_0_RAM_WIDTH_128 |
+ NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
+
+ for (i = 0; i < 4; i++)
+ poke_fb(dev, fb, 4 * i, patt);
+
+ poke_fb(dev, fb, 0x400000, patt + 1);
+
+ if (peek_fb(dev, fb, 0) == patt + 1) {
+ bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
+ NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
+ bios_md32(bios, NV04_PFB_DEBUG_0,
+ NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+
+ for (i = 0; i < 4; i++)
+ poke_fb(dev, fb, 4 * i, patt);
+
+ if ((peek_fb(dev, fb, 0xc) & 0xffff) != (patt & 0xffff))
+ bios_md32(bios, NV04_PFB_BOOT_0,
+ NV04_PFB_BOOT_0_RAM_WIDTH_128 |
+ NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+
+ } else if ((peek_fb(dev, fb, 0xc) & 0xffff0000) !=
+ (patt & 0xffff0000)) {
+ bios_md32(bios, NV04_PFB_BOOT_0,
+ NV04_PFB_BOOT_0_RAM_WIDTH_128 |
+ NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
+
+ } else if (peek_fb(dev, fb, 0) != patt) {
+ if (read_back_fb(dev, fb, 0x800000, patt))
+ bios_md32(bios, NV04_PFB_BOOT_0,
+ NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+ else
+ bios_md32(bios, NV04_PFB_BOOT_0,
+ NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
+
+ bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
+ NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
+
+ } else if (!read_back_fb(dev, fb, 0x800000, patt)) {
+ bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+
+ }
+
+ /* Refresh on, sequencer on */
+ bios_md32(bios, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+ NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) & ~0x20);
+
+ io_mapping_free(fb);
+ return 0;
+}
+
+static const uint8_t *
+nv05_memory_config(struct nvbios *bios)
+{
+ /* Defaults for BIOSes lacking a memory config table */
+ static const uint8_t default_config_tab[][2] = {
+ { 0x24, 0x00 },
+ { 0x28, 0x00 },
+ { 0x24, 0x01 },
+ { 0x1f, 0x00 },
+ { 0x0f, 0x00 },
+ { 0x17, 0x00 },
+ { 0x06, 0x00 },
+ { 0x00, 0x00 }
+ };
+ int i = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) &
+ NV_PEXTDEV_BOOT_0_RAMCFG) >> 2;
+
+ if (bios->legacy.mem_init_tbl_ptr)
+ return &bios->data[bios->legacy.mem_init_tbl_ptr + 2 * i];
+ else
+ return default_config_tab[i];
+}
+
+static int
+nv05_init_compute_mem(struct nvbios *bios)
+{
+ struct drm_device *dev = bios->dev;
+ const uint8_t *ramcfg = nv05_memory_config(bios);
+ uint32_t patt = 0xdeadbeef;
+ struct io_mapping *fb;
+ int i, v;
+
+ /* Map the framebuffer aperture */
+ fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1));
+ if (!fb)
+ return -ENOMEM;
+
+ /* Sequencer off */
+ NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) | 0x20);
+
+ if (bios_rd32(bios, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
+ goto out;
+
+ bios_md32(bios, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+
+ /* If present load the hardcoded scrambling table */
+ if (bios->legacy.mem_init_tbl_ptr) {
+ uint32_t *scramble_tab = (uint32_t *)&bios->data[
+ bios->legacy.mem_init_tbl_ptr + 0x10];
+
+ for (i = 0; i < 8; i++)
+ bios_wr32(bios, NV04_PFB_SCRAMBLE(i),
+ ROM32(scramble_tab[i]));
+ }
+
+ /* Set memory type/width/length defaults depending on the straps */
+ bios_md32(bios, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
+
+ if (ramcfg[1] & 0x80)
+ bios_md32(bios, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
+
+ bios_md32(bios, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
+ bios_md32(bios, NV04_PFB_CFG1, 0, 1);
+
+ /* Probe memory bus width */
+ for (i = 0; i < 4; i++)
+ poke_fb(dev, fb, 4 * i, patt);
+
+ if (peek_fb(dev, fb, 0xc) != patt)
+ bios_md32(bios, NV04_PFB_BOOT_0,
+ NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
+
+ /* Probe memory length */
+ v = bios_rd32(bios, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
+
+ if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
+ (!read_back_fb(dev, fb, 0x1000000, ++patt) ||
+ !read_back_fb(dev, fb, 0, ++patt)))
+ bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
+
+ if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
+ !read_back_fb(dev, fb, 0x800000, ++patt))
+ bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+
+ if (!read_back_fb(dev, fb, 0x400000, ++patt))
+ bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
+
+out:
+ /* Sequencer on */
+ NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) & ~0x20);
+
+ io_mapping_free(fb);
+ return 0;
+}
+
+static int
+nv10_init_compute_mem(struct nvbios *bios)
+{
+ struct drm_device *dev = bios->dev;
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ const int mem_width[] = { 0x10, 0x00, 0x20 };
+ const int mem_width_count = (dev_priv->chipset >= 0x17 ? 3 : 2);
+ uint32_t patt = 0xdeadbeef;
+ struct io_mapping *fb;
+ int i, j, k;
+
+ /* Map the framebuffer aperture */
+ fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1));
+ if (!fb)
+ return -ENOMEM;
+
+ bios_wr32(bios, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+
+ /* Probe memory bus width */
+ for (i = 0; i < mem_width_count; i++) {
+ bios_md32(bios, NV04_PFB_CFG0, 0x30, mem_width[i]);
+
+ for (j = 0; j < 4; j++) {
+ for (k = 0; k < 4; k++)
+ poke_fb(dev, fb, 0x1c, 0);
+
+ poke_fb(dev, fb, 0x1c, patt);
+ poke_fb(dev, fb, 0x3c, 0);
+
+ if (peek_fb(dev, fb, 0x1c) == patt)
+ goto mem_width_found;
+ }
+ }
+
+mem_width_found:
+ patt <<= 1;
+
+ /* Probe amount of installed memory */
+ for (i = 0; i < 4; i++) {
+ int off = bios_rd32(bios, NV04_PFB_FIFO_DATA) - 0x100000;
+
+ poke_fb(dev, fb, off, patt);
+ poke_fb(dev, fb, 0, 0);
+
+ peek_fb(dev, fb, 0);
+ peek_fb(dev, fb, 0);
+ peek_fb(dev, fb, 0);
+ peek_fb(dev, fb, 0);
+
+ if (peek_fb(dev, fb, off) == patt)
+ goto amount_found;
+ }
+
+ /* IC missing - disable the upper half memory space. */
+ bios_md32(bios, NV04_PFB_CFG0, 0x1000, 0);
+
+amount_found:
+ io_mapping_free(fb);
+ return 0;
+}
+
+static int
+nv20_init_compute_mem(struct nvbios *bios)
+{
+ struct drm_device *dev = bios->dev;
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ uint32_t mask = (dev_priv->chipset >= 0x25 ? 0x300 : 0x900);
+ uint32_t amount, off;
+ struct io_mapping *fb;
+
+ /* Map the framebuffer aperture */
+ fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1));
+ if (!fb)
+ return -ENOMEM;
+
+ bios_wr32(bios, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+
+ /* Allow full addressing */
+ bios_md32(bios, NV04_PFB_CFG0, 0, mask);
+
+ amount = bios_rd32(bios, NV04_PFB_FIFO_DATA);
+ for (off = amount; off > 0x2000000; off -= 0x2000000)
+ poke_fb(dev, fb, off - 4, off);
+
+ amount = bios_rd32(bios, NV04_PFB_FIFO_DATA);
+ if (amount != peek_fb(dev, fb, amount - 4))
+ /* IC missing - disable the upper half memory space. */
+ bios_md32(bios, NV04_PFB_CFG0, mask, 0);
+
+ io_mapping_free(fb);
+ return 0;
+}
+
static int
init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
@@ -2047,64 +2479,57 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*
* offset (8 bit): opcode
*
- * This opcode is meant to set NV_PFB_CFG0 (0x100200) appropriately so
- * that the hardware can correctly calculate how much VRAM it has
- * (and subsequently report that value in NV_PFB_CSTATUS (0x10020C))
+ * This opcode is meant to set the PFB memory config registers
+ * appropriately so that we can correctly calculate how much VRAM it
+ * has (on nv10 and better chipsets the amount of installed VRAM is
+ * subsequently reported in NV_PFB_CSTATUS (0x10020C)).
*
- * The implementation of this opcode in general consists of two parts:
- * 1) determination of the memory bus width
- * 2) determination of how many of the card's RAM pads have ICs attached
+ * The implementation of this opcode in general consists of several
+ * parts:
*
- * 1) is done by a cunning combination of writes to offsets 0x1c and
- * 0x3c in the framebuffer, and seeing whether the written values are
- * read back correctly. This then affects bits 4-7 of NV_PFB_CFG0
+ * 1) Determination of memory type and density. Only necessary for
+ * really old chipsets, the memory type reported by the strap bits
+ * (0x101000) is assumed to be accurate on nv05 and newer.
*
- * 2) is done by a cunning combination of writes to an offset slightly
- * less than the maximum memory reported by NV_PFB_CSTATUS, then seeing
- * if the test pattern can be read back. This then affects bits 12-15 of
- * NV_PFB_CFG0
+ * 2) Determination of the memory bus width. Usually done by a cunning
+ * combination of writes to offsets 0x1c and 0x3c in the fb, and
+ * seeing whether the written values are read back correctly.
*
- * In this context a "cunning combination" may include multiple reads
- * and writes to varying locations, often alternating the test pattern
- * and 0, doubtless to make sure buffers are filled, residual charges
- * on tracks are removed etc.
+ * Only necessary on nv0x-nv1x and nv34, on the other cards we can
+ * trust the straps.
*
- * Unfortunately, the "cunning combination"s mentioned above, and the
- * changes to the bits in NV_PFB_CFG0 differ with nearly every bios
- * trace I have.
+ * 3) Determination of how many of the card's RAM pads have ICs
+ * attached, usually done by a cunning combination of writes to an
+ * offset slightly less than the maximum memory reported by
+ * NV_PFB_CSTATUS, then seeing if the test pattern can be read back.
*
- * Therefore, we cheat and assume the value of NV_PFB_CFG0 with which
- * we started was correct, and use that instead
+ * This appears to be a NOP on IGPs and NV4x or newer chipsets, both io
+ * logs of the VBIOS and kmmio traces of the binary driver POSTing the
+ * card show nothing being done for this opcode. Why is it still listed
+ * in the table?!
*/
/* no iexec->execute check by design */
- /*
- * This appears to be a NOP on G8x chipsets, both io logs of the VBIOS
- * and kmmio traces of the binary driver POSTing the card show nothing
- * being done for this opcode. why is it still listed in the table?!
- */
-
struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ int ret;
- if (dev_priv->card_type >= NV_40)
- return 1;
-
- /*
- * On every card I've seen, this step gets done for us earlier in
- * the init scripts
- uint8_t crdata = bios_idxprt_rd(dev, NV_VIO_SRX, 0x01);
- bios_idxprt_wr(dev, NV_VIO_SRX, 0x01, crdata | 0x20);
- */
-
- /*
- * This also has probably been done in the scripts, but an mmio trace of
- * s3 resume shows nvidia doing it anyway (unlike the NV_VIO_SRX write)
- */
- bios_wr32(bios, NV_PFB_REFCTRL, NV_PFB_REFCTRL_VALID_1);
+ if (dev_priv->chipset >= 0x40 ||
+ dev_priv->chipset == 0x1a ||
+ dev_priv->chipset == 0x1f)
+ ret = 0;
+ else if (dev_priv->chipset >= 0x20 &&
+ dev_priv->chipset != 0x34)
+ ret = nv20_init_compute_mem(bios);
+ else if (dev_priv->chipset >= 0x10)
+ ret = nv10_init_compute_mem(bios);
+ else if (dev_priv->chipset >= 0x5)
+ ret = nv05_init_compute_mem(bios);
+ else
+ ret = nv04_init_compute_mem(bios);
- /* write back the saved configuration value */
- bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0);
+ if (ret)
+ return ret;
return 1;
}
@@ -2131,7 +2556,8 @@ init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
/* no iexec->execute check by design */
pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19);
- bios_wr32(bios, NV_PBUS_PCI_NV_19, 0);
+ bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19 & ~0xf00);
+
bios_wr32(bios, reg, value1);
udelay(10);
@@ -2167,7 +2593,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
uint32_t reg, data;
if (bios->major_version > 2)
- return -ENODEV;
+ return 0;
bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
@@ -2180,14 +2606,14 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
reg = ROM32(bios->data[seqtbloffs += 4])) {
switch (reg) {
- case NV_PFB_PRE:
- data = NV_PFB_PRE_CMD_PRECHARGE;
+ case NV04_PFB_PRE:
+ data = NV04_PFB_PRE_CMD_PRECHARGE;
break;
- case NV_PFB_PAD:
- data = NV_PFB_PAD_CKE_NORMAL;
+ case NV04_PFB_PAD:
+ data = NV04_PFB_PAD_CKE_NORMAL;
break;
- case NV_PFB_REF:
- data = NV_PFB_REF_CMD_REFRESH;
+ case NV04_PFB_REF:
+ data = NV04_PFB_REF_CMD_REFRESH;
break;
default:
data = ROM32(bios->data[meminitdata]);
@@ -2222,7 +2648,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
int clock;
if (bios->major_version > 2)
- return -ENODEV;
+ return 0;
clock = ROM16(bios->data[meminitoffs + 4]) * 10;
setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
@@ -2252,10 +2678,10 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset,
/* no iexec->execute check by design */
uint32_t straps = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
- uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
+ uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & 0x40) >> 6;
if (bios->major_version > 2)
- return -ENODEV;
+ return 0;
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
@@ -2389,7 +2815,7 @@ init_ram_condition(struct nvbios *bios, uint16_t offset,
* offset + 1 (8 bit): mask
* offset + 2 (8 bit): cmpval
*
- * Test if (NV_PFB_BOOT_0 & "mask") equals "cmpval".
+ * Test if (NV04_PFB_BOOT_0 & "mask") equals "cmpval".
* If condition not met skip subsequent opcodes until condition is
* inverted (INIT_NOT), or we hit INIT_RESUME
*/
@@ -2401,7 +2827,7 @@ init_ram_condition(struct nvbios *bios, uint16_t offset,
if (!iexec->execute)
return 3;
- data = bios_rd32(bios, NV_PFB_BOOT_0) & mask;
+ data = bios_rd32(bios, NV04_PFB_BOOT_0) & mask;
BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
offset, data, cmpval);
@@ -2795,12 +3221,13 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
int i;
- if (dev_priv->card_type != NV_50) {
+ if (dev_priv->card_type < NV_50) {
NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
- return -ENODEV;
+ return 1;
}
if (!iexec->execute)
@@ -2815,7 +3242,7 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
offset, gpio->tag, gpio->state_default);
if (bios->execute)
- nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
+ pgpio->set(bios->dev, gpio->tag, gpio->state_default);
/* The NVIDIA binary driver doesn't appear to actually do
* any of this, my VBIOS does however.
@@ -2872,10 +3299,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
uint8_t index;
int i;
-
- if (!iexec->execute)
- return len;
-
+ /* critical! to know the length of the opcode */;
if (!blocklen) {
NV_ERROR(bios->dev,
"0x%04X: Zero block length - has the M table "
@@ -2883,6 +3307,9 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
return -EINVAL;
}
+ if (!iexec->execute)
+ return len;
+
strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg];
@@ -3064,14 +3491,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_AUXCH: no active output\n");
- return -EINVAL;
+ return len;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return -ENODEV;
+ return len;
}
if (!iexec->execute)
@@ -3084,7 +3511,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
- return ret;
+ return len;
}
data &= bios->data[offset + 0];
@@ -3093,7 +3520,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
- return ret;
+ return len;
}
}
@@ -3123,14 +3550,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
- return -EINVAL;
+ return len;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return -ENODEV;
+ return len;
}
if (!iexec->execute)
@@ -3141,13 +3568,76 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
if (ret) {
NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
- return ret;
+ return len;
}
}
return len;
}
+static int
+init_i2c_long_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_I2C_LONG_IF opcode: 0x9A ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): DCB I2C table entry index
+ * offset + 2 (8 bit): I2C slave address
+ * offset + 3 (16 bit): I2C register
+ * offset + 5 (8 bit): mask
+ * offset + 6 (8 bit): data
+ *
+ * Read the register given by "I2C register" on the device addressed
+ * by "I2C slave address" on the I2C bus given by "DCB I2C table
+ * entry index". Compare the result AND "mask" to "data".
+ * If they're not equal, skip subsequent opcodes until condition is
+ * inverted (INIT_NOT), or we hit INIT_RESUME
+ */
+
+ uint8_t i2c_index = bios->data[offset + 1];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
+ uint8_t reglo = bios->data[offset + 3];
+ uint8_t reghi = bios->data[offset + 4];
+ uint8_t mask = bios->data[offset + 5];
+ uint8_t data = bios->data[offset + 6];
+ struct nouveau_i2c_chan *chan;
+ uint8_t buf0[2] = { reghi, reglo };
+ uint8_t buf1[1];
+ struct i2c_msg msg[2] = {
+ { i2c_address, 0, 1, buf0 },
+ { i2c_address, I2C_M_RD, 1, buf1 },
+ };
+ int ret;
+
+ /* no execute check by design */
+
+ BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X\n",
+ offset, i2c_index, i2c_address);
+
+ chan = init_i2c_device_find(bios->dev, i2c_index);
+ if (!chan)
+ return -ENODEV;
+
+
+ ret = i2c_transfer(&chan->adapter, msg, 2);
+ if (ret < 0) {
+ BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X:0x%02X, Value: [no device], "
+ "Mask: 0x%02X, Data: 0x%02X\n",
+ offset, reghi, reglo, mask, data);
+ iexec->execute = 0;
+ return 7;
+ }
+
+ BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X:0x%02X, Value: 0x%02X, "
+ "Mask: 0x%02X, Data: 0x%02X\n",
+ offset, reghi, reglo, buf1[0], mask, data);
+
+ iexec->execute = ((buf1[0] & mask) == data);
+
+ return 7;
+}
+
static struct init_tbl_entry itbl_entry[] = {
/* command name , id , length , offset , mult , command handler */
/* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
@@ -3174,9 +3664,11 @@ static struct init_tbl_entry itbl_entry[] = {
{ "INIT_ZM_CR" , 0x53, init_zm_cr },
{ "INIT_ZM_CR_GROUP" , 0x54, init_zm_cr_group },
{ "INIT_CONDITION_TIME" , 0x56, init_condition_time },
+ { "INIT_LTIME" , 0x57, init_ltime },
{ "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
/* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
{ "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
+ { "INIT_I2C_IF" , 0x5E, init_i2c_if },
{ "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
{ "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
{ "INIT_COMPUTE_MEM" , 0x63, init_compute_mem },
@@ -3210,6 +3702,7 @@ static struct init_tbl_entry itbl_entry[] = {
{ "INIT_97" , 0x97, init_97 },
{ "INIT_AUXCH" , 0x98, init_auxch },
{ "INIT_ZM_AUXCH" , 0x99, init_zm_auxch },
+ { "INIT_I2C_LONG_IF" , 0x9A, init_i2c_long_if },
{ NULL , 0 , NULL }
};
@@ -3376,27 +3869,10 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
}
#ifdef __powerpc__
/* Powerbook specific quirks */
- if ((dev->pci_device & 0xffff) == 0x0179 ||
- (dev->pci_device & 0xffff) == 0x0189 ||
- (dev->pci_device & 0xffff) == 0x0329) {
- if (script == LVDS_RESET) {
- nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
-
- } else if (script == LVDS_PANEL_ON) {
- bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
- bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
- | (1 << 31));
- bios_wr32(bios, NV_PCRTC_GPIO_EXT,
- bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
-
- } else if (script == LVDS_PANEL_OFF) {
- bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
- bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
- & ~(1 << 31));
- bios_wr32(bios, NV_PCRTC_GPIO_EXT,
- bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
- }
- }
+ if (script == LVDS_RESET &&
+ (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
+ dev->pci_device == 0x0329))
+ nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
#endif
return 0;
@@ -3888,11 +4364,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
*
* For the moment, a quirk will do :)
*/
- if ((dev->pdev->device == 0x01d7) &&
- (dev->pdev->subsystem_vendor == 0x1028) &&
- (dev->pdev->subsystem_device == 0x01c2)) {
+ if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
bios->fp.duallink_transition_clk = 80000;
- }
/* set dual_link flag for EDID case */
if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
@@ -4068,7 +4541,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
bios->display.script_table_ptr,
table[2], table[3], table[0] >= 0x21);
if (!otable) {
- NV_ERROR(dev, "Couldn't find matching output script table\n");
+ NV_DEBUG_KMS(dev, "failed to match any output table\n");
return 1;
}
@@ -4094,7 +4567,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -1) {
@@ -4104,7 +4577,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -2) {
@@ -4117,7 +4590,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk > 0) {
@@ -4125,11 +4598,11 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
if (script)
script = clkcmptable(bios, script, pxclk);
if (!script) {
- NV_ERROR(dev, "clock script 0 not found\n");
+ NV_DEBUG_KMS(dev, "clock script 0 not found\n");
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk < 0) {
@@ -4141,7 +4614,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
}
@@ -4484,7 +4957,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
pll_lim->min_p = record[12];
pll_lim->max_p = record[13];
/* where did this go to?? */
- if (limit_match == 0x00614100 || limit_match == 0x00614900)
+ if ((entry[0] & 0xf0) == 0x80)
pll_lim->refclk = 27000;
else
pll_lim->refclk = 100000;
@@ -4864,19 +5337,17 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
}
tmdstableptr = ROM16(bios->data[bitentry->offset]);
-
- if (tmdstableptr == 0x0) {
+ if (!tmdstableptr) {
NV_ERROR(dev, "Pointer to TMDS table invalid\n");
return -EINVAL;
}
+ NV_INFO(dev, "TMDS table version %d.%d\n",
+ bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+
/* nv50+ has v2.0, but we don't parse it atm */
- if (bios->data[tmdstableptr] != 0x11) {
- NV_WARN(dev,
- "TMDS table revision %d.%d not currently supported\n",
- bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+ if (bios->data[tmdstableptr] != 0x11)
return -ENOSYS;
- }
/*
* These two scripts are odd: they don't seem to get run even when
@@ -5151,10 +5622,14 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
- bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
- bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
- bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
- bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
+ if (bios->data[legacy_i2c_offset + 4])
+ bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
+ if (bios->data[legacy_i2c_offset + 5])
+ bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
+ if (bios->data[legacy_i2c_offset + 6])
+ bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
+ if (bios->data[legacy_i2c_offset + 7])
+ bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
if (bmplength > 74) {
bios->fmaxvco = ROM32(bmp[67]);
@@ -5312,6 +5787,20 @@ parse_dcb_gpio_table(struct nvbios *bios)
gpio->line = tvdac_gpio[1] >> 4;
gpio->invert = tvdac_gpio[0] & 2;
}
+ } else {
+ /*
+ * No systematic way to store GPIO info on pre-v2.2
+ * DCBs, try to match the PCI device IDs.
+ */
+
+ /* Apple iMac G4 NV18 */
+ if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+ struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+
+ gpio->tag = DCB_GPIO_TVDAC0;
+ gpio->line = 4;
+ }
+
}
if (!gpio_table_ptr)
@@ -5387,9 +5876,7 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
struct drm_device *dev = bios->dev;
/* Gigabyte NX85T */
- if ((dev->pdev->device == 0x0421) &&
- (dev->pdev->subsystem_vendor == 0x1458) &&
- (dev->pdev->subsystem_device == 0x344c)) {
+ if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
if (cte->type == DCB_CONNECTOR_HDMI_1)
cte->type = DCB_CONNECTOR_DVI_I;
}
@@ -5506,7 +5993,7 @@ static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
entry->i2c_index = i2c;
entry->heads = heads;
entry->location = DCB_LOC_ON_CHIP;
- /* "or" mostly unused in early gen crt modesetting, 0 is fine */
+ entry->or = 1;
}
static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
@@ -5589,9 +6076,12 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
if (conf & 0x4 || conf & 0x8)
entry->lvdsconf.use_power_scripts = true;
} else {
- mask = ~0x5;
+ mask = ~0x7;
+ if (conf & 0x2)
+ entry->lvdsconf.use_acpi_for_edid = true;
if (conf & 0x4)
entry->lvdsconf.use_power_scripts = true;
+ entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4;
}
if (conf & mask) {
/*
@@ -5631,9 +6121,15 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
}
break;
case OUTPUT_TMDS:
- entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
+ if (dcb->version >= 0x40)
+ entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
+ else if (dcb->version >= 0x30)
+ entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8;
+ else if (dcb->version >= 0x22)
+ entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
+
break;
- case 0xe:
+ case OUTPUT_EOL:
/* weird g80 mobile type that "nv" treats as a terminator */
dcb->entries--;
return false;
@@ -5670,22 +6166,14 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->type = OUTPUT_TV;
break;
case 2:
- case 3:
- entry->type = OUTPUT_LVDS;
- break;
case 4:
- switch ((conn & 0x000000f0) >> 4) {
- case 0:
- entry->type = OUTPUT_TMDS;
- break;
- case 1:
+ if (conn & 0x10)
entry->type = OUTPUT_LVDS;
- break;
- default:
- NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
- (conn & 0x000000f0) >> 4);
- return false;
- }
+ else
+ entry->type = OUTPUT_TMDS;
+ break;
+ case 3:
+ entry->type = OUTPUT_LVDS;
break;
default:
NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
@@ -5706,13 +6194,6 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
case OUTPUT_TV:
entry->tvconf.has_component_output = false;
break;
- case OUTPUT_TMDS:
- /*
- * Invent a DVI-A output, by copying the fields of the DVI-D
- * output; reported to work by math_b on an NV20(!).
- */
- fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
- break;
case OUTPUT_LVDS:
if ((conn & 0x00003f00) != 0x10)
entry->lvdsconf.use_straps_for_mode = true;
@@ -5793,6 +6274,29 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
dcb->entries = newentries;
}
+static bool
+apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
+{
+ /* Dell Precision M6300
+ * DCB entry 2: 02025312 00000010
+ * DCB entry 3: 02026312 00000020
+ *
+ * Identical, except apparently a different connector on a
+ * different SOR link. Not a clue how we're supposed to know
+ * which one is in use if it even shares an i2c line...
+ *
+ * Ignore the connector on the second SOR link to prevent
+ * nasty problems until this is sorted (assuming it's not a
+ * VBIOS bug).
+ */
+ if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) {
+ if (*conn == 0x02026312 && *conf == 0x00000020)
+ return false;
+ }
+
+ return true;
+}
+
static int
parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
{
@@ -5903,6 +6407,19 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
dcb->i2c_table = &bios->data[i2ctabptr];
if (dcb->version >= 0x30)
dcb->i2c_default_indices = dcb->i2c_table[4];
+
+ /*
+ * Parse the "management" I2C bus, used for hardware
+ * monitoring and some external TMDS transmitters.
+ */
+ if (dcb->version >= 0x22) {
+ int idx = (dcb->version >= 0x40 ?
+ dcb->i2c_default_indices & 0xf :
+ 2);
+
+ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+ idx, &dcb->i2c[idx]);
+ }
}
if (entries > DCB_MAX_NUM_ENTRIES)
@@ -5926,6 +6443,9 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
if ((connection & 0x0000000f) == 0x0000000f)
continue;
+ if (!apply_dcb_encoder_quirks(dev, i, &connection, &config))
+ continue;
+
NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
dcb->entries, connection, config);
@@ -6181,9 +6701,8 @@ nouveau_run_vbios_init(struct drm_device *dev)
struct nvbios *bios = &dev_priv->vbios;
int i, ret = 0;
- NVLockVgaCrtcs(dev, false);
- if (nv_two_heads(dev))
- NVSetOwner(dev, bios->state.crtchead);
+ /* Reset the BIOS head to 0. */
+ bios->state.crtchead = 0;
if (bios->major_version < 5) /* BMP only */
load_nv17_hw_sequencer_ucode(dev, bios);
@@ -6216,8 +6735,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
}
}
- NVLockVgaCrtcs(dev, true);
-
return ret;
}
@@ -6238,7 +6755,6 @@ static bool
nouveau_bios_posted(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- bool was_locked;
unsigned htotal;
if (dev_priv->chipset >= NV_50) {
@@ -6248,13 +6764,12 @@ nouveau_bios_posted(struct drm_device *dev)
return true;
}
- was_locked = NVLockVgaCrtcs(dev, false);
htotal = NVReadVgaCrtc(dev, 0, 0x06);
htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8;
htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4;
htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10;
htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11;
- NVLockVgaCrtcs(dev, was_locked);
+
return (htotal != 0);
}
@@ -6263,8 +6778,6 @@ nouveau_bios_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->vbios;
- uint32_t saved_nv_pextdev_boot_0;
- bool was_locked;
int ret;
if (!NVInitVBIOS(dev))
@@ -6284,40 +6797,27 @@ nouveau_bios_init(struct drm_device *dev)
if (!bios->major_version) /* we don't run version 0 bios */
return 0;
- /* these will need remembering across a suspend */
- saved_nv_pextdev_boot_0 = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
- bios->state.saved_nv_pfb_cfg0 = bios_rd32(bios, NV_PFB_CFG0);
-
/* init script execution disabled */
bios->execute = false;
/* ... unless card isn't POSTed already */
if (!nouveau_bios_posted(dev)) {
- NV_INFO(dev, "Adaptor not initialised\n");
- if (dev_priv->card_type < NV_40) {
- NV_ERROR(dev, "Unable to POST this chipset\n");
- return -ENODEV;
- }
-
- NV_INFO(dev, "Running VBIOS init tables\n");
+ NV_INFO(dev, "Adaptor not initialised, "
+ "running VBIOS init tables.\n");
bios->execute = true;
}
- bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
-
ret = nouveau_run_vbios_init(dev);
if (ret)
return ret;
/* feature_byte on BMP is poor, but init always sets CR4B */
- was_locked = NVLockVgaCrtcs(dev, false);
if (bios->major_version < 5)
bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40;
/* all BIT systems need p_f_m_t for digital_min_front_porch */
if (bios->is_mobile || bios->major_version >= 5)
ret = parse_fp_mode_table(dev, bios);
- NVLockVgaCrtcs(dev, was_locked);
/* allow subsequent scripts to execute */
bios->execute = true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index adf4ec2d06c..c1de2f3fcb0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -81,6 +81,7 @@ struct dcb_connector_table_entry {
enum dcb_connector_type type;
uint8_t index2;
uint8_t gpio_tag;
+ void *drm;
};
struct dcb_connector_table {
@@ -94,6 +95,7 @@ enum dcb_type {
OUTPUT_TMDS = 2,
OUTPUT_LVDS = 3,
OUTPUT_DP = 6,
+ OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
OUTPUT_ANY = -1
};
@@ -117,6 +119,7 @@ struct dcb_entry {
struct {
struct sor_conf sor;
bool use_straps_for_mode;
+ bool use_acpi_for_edid;
bool use_power_scripts;
} lvdsconf;
struct {
@@ -129,6 +132,7 @@ struct dcb_entry {
} dpconf;
struct {
struct sor_conf sor;
+ int slave_addr;
} tmdsconf;
};
bool i2c_upper_default;
@@ -249,8 +253,6 @@ struct nvbios {
struct {
int crtchead;
- /* these need remembering across suspend */
- uint32_t saved_nv_pfb_cfg0;
} state;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6f3c1952237..f6f44779d82 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -36,6 +36,21 @@
#include <linux/log2.h>
#include <linux/slab.h>
+int
+nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
+{
+ struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
+ int ret;
+
+ if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
+ return 0;
+
+ spin_lock(&nvbo->bo.lock);
+ ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+ spin_unlock(&nvbo->bo.lock);
+ return ret;
+}
+
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
@@ -51,9 +66,6 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
if (nvbo->tile)
nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
- spin_lock(&dev_priv->ttm.bo_list_lock);
- list_del(&nvbo->head);
- spin_unlock(&dev_priv->ttm.bo_list_lock);
kfree(nvbo);
}
@@ -166,9 +178,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
nvbo->channel = NULL;
- spin_lock(&dev_priv->ttm.bo_list_lock);
- list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
- spin_unlock(&dev_priv->ttm.bo_list_lock);
*pnvbo = nvbo;
return 0;
}
@@ -461,9 +470,9 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
- evict, no_wait_reserve, no_wait_gpu, new_mem);
- if (nvbo->channel && nvbo->channel != chan)
- ret = nouveau_fence_wait(fence, NULL, false, false);
+ evict || (nvbo->channel &&
+ nvbo->channel != chan),
+ no_wait_reserve, no_wait_gpu, new_mem);
nouveau_fence_unref((void *)&fence);
return ret;
}
@@ -711,8 +720,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
return ret;
/* Software copy if the card isn't up and running yet. */
- if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
- !dev_priv->channel) {
+ if (!dev_priv->channel) {
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@@ -783,7 +791,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
- mem->bus.base = drm_get_resource_start(dev, 1);
+ mem->bus.base = pci_resource_start(dev->pdev, 1);
mem->bus.is_iomem = true;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index 88f9bc0941e..ca85da78484 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -200,7 +200,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
struct nv_sim_state sim_data;
int MClk = nouveau_hw_get_clock(dev, MPLL);
int NVClk = nouveau_hw_get_clock(dev, NVPLL);
- uint32_t cfg1 = nvReadFB(dev, NV_PFB_CFG1);
+ uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1);
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
@@ -218,7 +218,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
sim_data.mem_latency = 3;
sim_data.mem_page_miss = 10;
} else {
- sim_data.memory_type = nvReadFB(dev, NV_PFB_CFG0) & 0x1;
+ sim_data.memory_type = nvReadFB(dev, NV04_PFB_CFG0) & 0x1;
sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
sim_data.mem_latency = cfg1 & 0xf;
sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 1fc57ef5829..0480f064f2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -62,7 +62,8 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
* VRAM.
*/
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- drm_get_resource_start(dev, 1),
+ pci_resource_start(dev->pdev,
+ 1),
dev_priv->fb_available_size,
NV_DMA_ACCESS_RO,
NV_DMA_TARGET_PCI, &pushbuf);
@@ -257,9 +258,7 @@ nouveau_channel_free(struct nouveau_channel *chan)
nouveau_debugfs_channel_fini(chan);
/* Give outstanding push buffers a chance to complete */
- spin_lock_irqsave(&chan->fence.lock, flags);
nouveau_fence_update(chan);
- spin_unlock_irqrestore(&chan->fence.lock, flags);
if (chan->fence.sequence != chan->fence.sequence_ack) {
struct nouveau_fence *fence = NULL;
@@ -368,8 +367,6 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
struct nouveau_channel *chan;
int ret;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
if (dev_priv->engine.graph.accel_blocked)
return -ENODEV;
@@ -418,7 +415,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
struct drm_nouveau_channel_free *cfree = data;
struct nouveau_channel *chan;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
nouveau_channel_free(chan);
@@ -430,18 +426,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
***********************************/
struct drm_ioctl_desc nouveau_ioctls[] = {
- DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
};
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 149ed224c3c..fc737037f75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -37,12 +37,6 @@
#include "nouveau_connector.h"
#include "nouveau_hw.h"
-static inline struct drm_encoder_slave_funcs *
-get_slave_funcs(struct nouveau_encoder *enc)
-{
- return to_encoder_slave(to_drm_encoder(enc))->slave_funcs;
-}
-
static struct nouveau_encoder *
find_encoder_by_type(struct drm_connector *connector, int type)
{
@@ -102,60 +96,12 @@ nouveau_connector_destroy(struct drm_connector *drm_connector)
kfree(drm_connector);
}
-static void
-nouveau_connector_ddc_prepare(struct drm_connector *connector, int *flags)
-{
- struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
-
- if (dev_priv->card_type >= NV_50)
- return;
-
- *flags = 0;
- if (NVLockVgaCrtcs(dev_priv->dev, false))
- *flags |= 1;
- if (nv_heads_tied(dev_priv->dev))
- *flags |= 2;
-
- if (*flags & 2)
- NVSetOwner(dev_priv->dev, 0); /* necessary? */
-}
-
-static void
-nouveau_connector_ddc_finish(struct drm_connector *connector, int flags)
-{
- struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
-
- if (dev_priv->card_type >= NV_50)
- return;
-
- if (flags & 2)
- NVSetOwner(dev_priv->dev, 4);
- if (flags & 1)
- NVLockVgaCrtcs(dev_priv->dev, true);
-}
-
static struct nouveau_i2c_chan *
nouveau_connector_ddc_detect(struct drm_connector *connector,
struct nouveau_encoder **pnv_encoder)
{
struct drm_device *dev = connector->dev;
- uint8_t out_buf[] = { 0x0, 0x0}, buf[2];
- int ret, flags, i;
-
- struct i2c_msg msgs[] = {
- {
- .addr = 0x50,
- .flags = 0,
- .len = 1,
- .buf = out_buf,
- },
- {
- .addr = 0x50,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
+ int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
struct nouveau_i2c_chan *i2c = NULL;
@@ -174,14 +120,8 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
if (nv_encoder->dcb->i2c_index < 0xf)
i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
- if (!i2c)
- continue;
-
- nouveau_connector_ddc_prepare(connector, &flags);
- ret = i2c_transfer(&i2c->adapter, msgs, 2);
- nouveau_connector_ddc_finish(connector, flags);
- if (ret == 2) {
+ if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
*pnv_encoder = nv_encoder;
return i2c;
}
@@ -228,27 +168,13 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
}
static enum drm_connector_status
-nouveau_connector_detect(struct drm_connector *connector)
+nouveau_connector_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
struct nouveau_i2c_chan *i2c;
- int type, flags;
-
- if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS)
- nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
- if (nv_encoder && nv_connector->native_mode) {
- unsigned status = connector_status_connected;
-
-#if defined(CONFIG_ACPI_BUTTON) || \
- (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
- if (!nouveau_ignorelid && !acpi_lid_open())
- status = connector_status_unknown;
-#endif
- nouveau_connector_set_encoder(connector, nv_encoder);
- return status;
- }
+ int type;
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
@@ -259,9 +185,7 @@ nouveau_connector_detect(struct drm_connector *connector)
i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
if (i2c) {
- nouveau_connector_ddc_prepare(connector, &flags);
nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
- nouveau_connector_ddc_finish(connector, flags);
drm_mode_connector_update_edid_property(connector,
nv_connector->edid);
if (!nv_connector->edid) {
@@ -321,6 +245,85 @@ detect_analog:
return connector_status_disconnected;
}
+static enum drm_connector_status
+nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = NULL;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ /* Cleanup the previous EDID block. */
+ if (nv_connector->edid) {
+ drm_mode_connector_update_edid_property(connector, NULL);
+ kfree(nv_connector->edid);
+ nv_connector->edid = NULL;
+ }
+
+ nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
+ if (!nv_encoder)
+ return connector_status_disconnected;
+
+ /* Try retrieving EDID via DDC */
+ if (!dev_priv->vbios.fp_no_ddc) {
+ status = nouveau_connector_detect(connector, force);
+ if (status == connector_status_connected)
+ goto out;
+ }
+
+ /* On some laptops (Sony, i'm looking at you) there appears to
+ * be no direct way of accessing the panel's EDID. The only
+ * option available to us appears to be to ask ACPI for help..
+ *
+ * It's important this check's before trying straps, one of the
+ * said manufacturer's laptops are configured in such a way
+ * the nouveau decides an entry in the VBIOS FP mode table is
+ * valid - it's not (rh#613284)
+ */
+ if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
+ if (!nouveau_acpi_edid(dev, connector)) {
+ status = connector_status_connected;
+ goto out;
+ }
+ }
+
+ /* If no EDID found above, and the VBIOS indicates a hardcoded
+ * modeline is avalilable for the panel, set it as the panel's
+ * native mode and exit.
+ */
+ if (nouveau_bios_fp_mode(dev, NULL) && (dev_priv->vbios.fp_no_ddc ||
+ nv_encoder->dcb->lvdsconf.use_straps_for_mode)) {
+ status = connector_status_connected;
+ goto out;
+ }
+
+ /* Still nothing, some VBIOS images have a hardcoded EDID block
+ * stored for the panel stored in them.
+ */
+ if (!dev_priv->vbios.fp_no_ddc) {
+ struct edid *edid =
+ (struct edid *)nouveau_bios_embedded_edid(dev);
+ if (edid) {
+ nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ *(nv_connector->edid) = *edid;
+ status = connector_status_connected;
+ }
+ }
+
+out:
+#if defined(CONFIG_ACPI_BUTTON) || \
+ (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
+ if (status == connector_status_connected &&
+ !nouveau_ignorelid && !acpi_lid_open())
+ status = connector_status_unknown;
+#endif
+
+ drm_mode_connector_update_edid_property(connector, nv_connector->edid);
+ nouveau_connector_set_encoder(connector, nv_encoder);
+ return status;
+}
+
static void
nouveau_connector_force(struct drm_connector *connector)
{
@@ -353,6 +356,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_device *dev = connector->dev;
int ret;
@@ -425,8 +429,8 @@ nouveau_connector_set_property(struct drm_connector *connector,
}
if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
- return get_slave_funcs(nv_encoder)->
- set_property(to_drm_encoder(nv_encoder), connector, property, value);
+ return get_slave_funcs(encoder)->set_property(
+ encoder, connector, property, value);
return -EINVAL;
}
@@ -441,7 +445,8 @@ nouveau_connector_native_mode(struct drm_connector *connector)
int high_w = 0, high_h = 0, high_v = 0;
list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
- if (helper->mode_valid(connector, mode) != MODE_OK)
+ if (helper->mode_valid(connector, mode) != MODE_OK ||
+ (mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
/* Use preferred mode if there is one.. */
@@ -534,21 +539,30 @@ static int
nouveau_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
int ret = 0;
- /* If we're not LVDS, destroy the previous native mode, the attached
- * monitor could have changed.
+ /* destroy the native mode, the attached monitor could have changed.
*/
- if (nv_connector->dcb->type != DCB_CONNECTOR_LVDS &&
- nv_connector->native_mode) {
+ if (nv_connector->native_mode) {
drm_mode_destroy(dev, nv_connector->native_mode);
nv_connector->native_mode = NULL;
}
if (nv_connector->edid)
ret = drm_add_edid_modes(connector, nv_connector->edid);
+ else
+ if (nv_encoder->dcb->type == OUTPUT_LVDS &&
+ (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
+ dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
+ struct drm_display_mode mode;
+
+ nouveau_bios_fp_mode(dev, &mode);
+ nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
+ }
/* Find the native mode if this is a digital panel, if we didn't
* find any modes through DDC previously add the native mode to
@@ -566,10 +580,10 @@ nouveau_connector_get_modes(struct drm_connector *connector)
}
if (nv_encoder->dcb->type == OUTPUT_TV)
- ret = get_slave_funcs(nv_encoder)->
- get_modes(to_drm_encoder(nv_encoder), connector);
+ ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
- if (nv_encoder->dcb->type == OUTPUT_LVDS)
+ if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS ||
+ nv_connector->dcb->type == DCB_CONNECTOR_eDP)
ret += nouveau_connector_scaler_modes_add(connector);
return ret;
@@ -582,6 +596,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
unsigned min_clock = 25000, max_clock = min_clock;
unsigned clock = mode->clock;
@@ -608,8 +623,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
max_clock = 350000;
break;
case OUTPUT_TV:
- return get_slave_funcs(nv_encoder)->
- mode_valid(to_drm_encoder(nv_encoder), mode);
+ return get_slave_funcs(encoder)->mode_valid(encoder, mode);
case OUTPUT_DP:
if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7)
max_clock = nv_encoder->dp.link_nr * 270000;
@@ -643,6 +657,44 @@ nouveau_connector_best_encoder(struct drm_connector *connector)
return NULL;
}
+void
+nouveau_connector_set_polling(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ bool spare_crtc = false;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ spare_crtc |= !crtc->enabled;
+
+ connector->polled = 0;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_TV:
+ if (dev_priv->card_type >= NV_50 ||
+ (nv_gf4_disp_arch(dev) && spare_crtc))
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ break;
+
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ if (dev_priv->card_type >= NV_50)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else if (connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+ spare_crtc)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ break;
+
+ default:
+ break;
+ }
+}
+
static const struct drm_connector_helper_funcs
nouveau_connector_helper_funcs = {
.get_modes = nouveau_connector_get_modes,
@@ -662,148 +714,74 @@ nouveau_connector_funcs = {
.force = nouveau_connector_force
};
-static int
-nouveau_connector_create_lvds(struct drm_device *dev,
- struct drm_connector *connector)
-{
- struct nouveau_connector *nv_connector = nouveau_connector(connector);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_i2c_chan *i2c = NULL;
- struct nouveau_encoder *nv_encoder;
- struct drm_display_mode native, *mode, *temp;
- bool dummy, if_is_24bit = false;
- int ret, flags;
-
- nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
- if (!nv_encoder)
- return -ENODEV;
-
- ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit);
- if (ret) {
- NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n");
- return ret;
- }
- nv_connector->use_dithering = !if_is_24bit;
-
- /* Firstly try getting EDID over DDC, if allowed and I2C channel
- * is available.
- */
- if (!dev_priv->vbios.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
- i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
-
- if (i2c) {
- nouveau_connector_ddc_prepare(connector, &flags);
- nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
- nouveau_connector_ddc_finish(connector, flags);
- }
-
- /* If no EDID found above, and the VBIOS indicates a hardcoded
- * modeline is avalilable for the panel, set it as the panel's
- * native mode and exit.
- */
- if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
- (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
- dev_priv->vbios.fp_no_ddc)) {
- nv_connector->native_mode = drm_mode_duplicate(dev, &native);
- goto out;
- }
-
- /* Still nothing, some VBIOS images have a hardcoded EDID block
- * stored for the panel stored in them.
- */
- if (!nv_connector->edid && !nv_connector->native_mode &&
- !dev_priv->vbios.fp_no_ddc) {
- struct edid *edid =
- (struct edid *)nouveau_bios_embedded_edid(dev);
- if (edid) {
- nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
- *(nv_connector->edid) = *edid;
- }
- }
-
- if (!nv_connector->edid)
- goto out;
-
- /* We didn't find/use a panel mode from the VBIOS, so parse the EDID
- * block and look for the preferred mode there.
- */
- ret = drm_add_edid_modes(connector, nv_connector->edid);
- if (ret == 0)
- goto out;
- nv_connector->detected_encoder = nv_encoder;
- nv_connector->native_mode = nouveau_connector_native_mode(connector);
- list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
- drm_mode_remove(connector, mode);
-
-out:
- if (!nv_connector->native_mode) {
- NV_ERROR(dev, "LVDS present in DCB table, but couldn't "
- "determine its native mode. Disabling.\n");
- return -ENODEV;
- }
-
- drm_mode_connector_update_edid_property(connector, nv_connector->edid);
- return 0;
-}
+static const struct drm_connector_funcs
+nouveau_connector_funcs_lvds = {
+ .dpms = drm_helper_connector_dpms,
+ .save = NULL,
+ .restore = NULL,
+ .detect = nouveau_connector_detect_lvds,
+ .destroy = nouveau_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = nouveau_connector_set_property,
+ .force = nouveau_connector_force
+};
-int
-nouveau_connector_create(struct drm_device *dev,
- struct dcb_connector_table_entry *dcb)
+struct drm_connector *
+nouveau_connector_create(struct drm_device *dev, int index)
{
+ const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = NULL;
+ struct dcb_connector_table_entry *dcb = NULL;
struct drm_connector *connector;
- struct drm_encoder *encoder;
- int ret, type;
+ int type, ret = 0;
NV_DEBUG_KMS(dev, "\n");
+ if (index >= dev_priv->vbios.dcb.connector.entries)
+ return ERR_PTR(-EINVAL);
+
+ dcb = &dev_priv->vbios.dcb.connector.entry[index];
+ if (dcb->drm)
+ return dcb->drm;
+
switch (dcb->type) {
- case DCB_CONNECTOR_NONE:
- return 0;
case DCB_CONNECTOR_VGA:
- NV_INFO(dev, "Detected a VGA connector\n");
type = DRM_MODE_CONNECTOR_VGA;
break;
case DCB_CONNECTOR_TV_0:
case DCB_CONNECTOR_TV_1:
case DCB_CONNECTOR_TV_3:
- NV_INFO(dev, "Detected a TV connector\n");
type = DRM_MODE_CONNECTOR_TV;
break;
case DCB_CONNECTOR_DVI_I:
- NV_INFO(dev, "Detected a DVI-I connector\n");
type = DRM_MODE_CONNECTOR_DVII;
break;
case DCB_CONNECTOR_DVI_D:
- NV_INFO(dev, "Detected a DVI-D connector\n");
type = DRM_MODE_CONNECTOR_DVID;
break;
case DCB_CONNECTOR_HDMI_0:
case DCB_CONNECTOR_HDMI_1:
- NV_INFO(dev, "Detected a HDMI connector\n");
type = DRM_MODE_CONNECTOR_HDMIA;
break;
case DCB_CONNECTOR_LVDS:
- NV_INFO(dev, "Detected a LVDS connector\n");
type = DRM_MODE_CONNECTOR_LVDS;
+ funcs = &nouveau_connector_funcs_lvds;
break;
case DCB_CONNECTOR_DP:
- NV_INFO(dev, "Detected a DisplayPort connector\n");
type = DRM_MODE_CONNECTOR_DisplayPort;
break;
case DCB_CONNECTOR_eDP:
- NV_INFO(dev, "Detected an eDP connector\n");
type = DRM_MODE_CONNECTOR_eDP;
break;
default:
NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
nv_connector->dcb = dcb;
connector = &nv_connector->base;
@@ -811,27 +789,21 @@ nouveau_connector_create(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
+ drm_connector_init(dev, connector, funcs, type);
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
- /* attach encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
- if (nv_encoder->dcb->connector != dcb->index)
- continue;
-
- if (get_slave_funcs(nv_encoder))
- get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
+ /* Check if we need dithering enabled */
+ if (dcb->type == DCB_CONNECTOR_LVDS) {
+ bool dummy, is_24bit = false;
- drm_mode_connector_attach_encoder(connector, encoder);
- }
+ ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit);
+ if (ret) {
+ NV_ERROR(dev, "Error parsing LVDS table, disabling "
+ "LVDS\n");
+ goto fail;
+ }
- if (!connector->encoder_ids[0]) {
- NV_WARN(dev, " no encoders, ignoring\n");
- drm_connector_cleanup(connector);
- kfree(connector);
- return 0;
+ nv_connector->use_dithering = !is_24bit;
}
/* Init DVI-I specific properties */
@@ -841,12 +813,8 @@ nouveau_connector_create(struct drm_device *dev,
drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
}
- if (dcb->type != DCB_CONNECTOR_LVDS)
- nv_connector->use_dithering = false;
-
switch (dcb->type) {
case DCB_CONNECTOR_VGA:
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
@@ -858,17 +826,6 @@ nouveau_connector_create(struct drm_device *dev,
case DCB_CONNECTOR_TV_3:
nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
break;
- case DCB_CONNECTOR_DP:
- case DCB_CONNECTOR_eDP:
- case DCB_CONNECTOR_HDMI_0:
- case DCB_CONNECTOR_HDMI_1:
- case DCB_CONNECTOR_DVI_I:
- case DCB_CONNECTOR_DVI_D:
- if (dev_priv->card_type >= NV_50)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- else
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- /* fall-through */
default:
nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -882,15 +839,15 @@ nouveau_connector_create(struct drm_device *dev,
break;
}
+ nouveau_connector_set_polling(connector);
+
drm_sysfs_connector_add(connector);
+ dcb->drm = connector;
+ return dcb->drm;
- if (dcb->type == DCB_CONNECTOR_LVDS) {
- ret = nouveau_connector_create_lvds(dev, connector);
- if (ret) {
- connector->funcs->destroy(connector);
- return ret;
- }
- }
+fail:
+ drm_connector_cleanup(connector);
+ kfree(connector);
+ return ERR_PTR(ret);
- return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 4ef38abc2d9..0d2e668ccfe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -49,7 +49,10 @@ static inline struct nouveau_connector *nouveau_connector(
return container_of(con, struct nouveau_connector, base);
}
-int nouveau_connector_create(struct drm_device *,
- struct dcb_connector_table_entry *);
+struct drm_connector *
+nouveau_connector_create(struct drm_device *, int index);
+
+void
+nouveau_connector_set_polling(struct drm_connector *);
#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 74e6b4ed12c..2e11fd65b4d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -84,16 +84,16 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (!gem)
- return NULL;
+ return ERR_PTR(-ENOENT);
nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
if (!nouveau_fb)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
if (ret) {
drm_gem_object_unreference(gem);
- return NULL;
+ return ERR_PTR(ret);
}
return &nouveau_fb->base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 65c441a1999..2e3c6caa97e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -92,11 +92,9 @@ nouveau_dma_init(struct nouveau_channel *chan)
return ret;
/* Map M2MF notifier object - fbcon. */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = nouveau_bo_map(chan->notifier_bo);
- if (ret)
- return ret;
- }
+ ret = nouveau_bo_map(chan->notifier_bo);
+ if (ret)
+ return ret;
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index deeb21c6865..8a1b188b4cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -23,8 +23,10 @@
*/
#include "drmP.h"
+
#include "nouveau_drv.h"
#include "nouveau_i2c.h"
+#include "nouveau_connector.h"
#include "nouveau_encoder.h"
static int
@@ -270,13 +272,39 @@ bool
nouveau_dp_link_train(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- uint8_t config[4];
- uint8_t status[3];
+ struct nouveau_connector *nv_connector;
+ struct bit_displayport_encoder_table *dpe;
+ int dpe_headerlen;
+ uint8_t config[4], status[3];
bool cr_done, cr_max_vs, eq_done;
int ret = 0, i, tries, voltage;
NV_DEBUG_KMS(dev, "link training!!\n");
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!nv_connector)
+ return false;
+
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe) {
+ NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or);
+ return false;
+ }
+
+ /* disable hotplug detect, this flips around on some panels during
+ * link training.
+ */
+ pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+
+ if (dpe->script0) {
+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
+ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
+ nv_encoder->dcb);
+ }
+
train:
cr_done = eq_done = false;
@@ -403,6 +431,15 @@ stop:
}
}
+ if (dpe->script1) {
+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
+ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
+ nv_encoder->dcb);
+ }
+
+ /* re-enable hotplug detect */
+ pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+
return eq_done;
}
@@ -535,47 +572,64 @@ out:
return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY);
}
-int
-nouveau_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
- uint8_t write_byte, uint8_t *read_byte)
+static int
+nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adapter;
+ struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adap;
struct drm_device *dev = auxch->dev;
- int ret = 0, cmd, addr = algo_data->address;
- uint8_t *buf;
-
- if (mode == MODE_I2C_READ) {
- cmd = AUX_I2C_READ;
- buf = read_byte;
- } else {
- cmd = (mode & MODE_I2C_READ) ? AUX_I2C_READ : AUX_I2C_WRITE;
- buf = &write_byte;
- }
+ struct i2c_msg *msg = msgs;
+ int ret, mcnt = num;
- if (!(mode & MODE_I2C_STOP))
- cmd |= AUX_I2C_MOT;
+ while (mcnt--) {
+ u8 remaining = msg->len;
+ u8 *ptr = msg->buf;
- if (mode & MODE_I2C_START)
- return 1;
+ while (remaining) {
+ u8 cnt = (remaining > 16) ? 16 : remaining;
+ u8 cmd;
- for (;;) {
- ret = nouveau_dp_auxch(auxch, cmd, addr, buf, 1);
- if (ret < 0)
- return ret;
-
- switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
- case NV50_AUXCH_STAT_REPLY_I2C_ACK:
- return 1;
- case NV50_AUXCH_STAT_REPLY_I2C_NACK:
- return -EREMOTEIO;
- case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
- udelay(100);
- break;
- default:
- NV_ERROR(dev, "invalid auxch status: 0x%08x\n", ret);
- return -EREMOTEIO;
+ if (msg->flags & I2C_M_RD)
+ cmd = AUX_I2C_READ;
+ else
+ cmd = AUX_I2C_WRITE;
+
+ if (mcnt || remaining > 16)
+ cmd |= AUX_I2C_MOT;
+
+ ret = nouveau_dp_auxch(auxch, cmd, msg->addr, ptr, cnt);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
+ case NV50_AUXCH_STAT_REPLY_I2C_ACK:
+ break;
+ case NV50_AUXCH_STAT_REPLY_I2C_NACK:
+ return -EREMOTEIO;
+ case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
+ udelay(100);
+ continue;
+ default:
+ NV_ERROR(dev, "bad auxch reply: 0x%08x\n", ret);
+ return -EREMOTEIO;
+ }
+
+ ptr += cnt;
+ remaining -= cnt;
}
+
+ msg++;
}
+
+ return num;
+}
+
+static u32
+nouveau_dp_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
+const struct i2c_algorithm nouveau_dp_i2c_algo = {
+ .master_xfer = nouveau_dp_i2c_xfer,
+ .functionality = nouveau_dp_i2c_func
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 27377043229..1de5eb53e01 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -35,10 +35,6 @@
#include "drm_pciids.h"
-MODULE_PARM_DESC(ctxfw, "Use external firmware blob for grctx init (NV40)");
-int nouveau_ctxfw = 0;
-module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
-
MODULE_PARM_DESC(noagp, "Disable AGP");
int nouveau_noagp;
module_param_named(noagp, nouveau_noagp, int, 0400);
@@ -56,7 +52,7 @@ int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
-int nouveau_vram_notify = 1;
+int nouveau_vram_notify = 0;
module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -132,7 +128,7 @@ static struct drm_driver driver;
static int __devinit
nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_dev(pdev, ent, &driver);
+ return drm_get_pci_dev(pdev, ent, &driver);
}
static void
@@ -155,9 +151,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
struct drm_crtc *crtc;
int ret, i;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
if (pm_state.event == PM_EVENT_PRETHAW)
return 0;
@@ -257,9 +250,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
struct drm_crtc *crtc;
int ret, i;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "We're back, enabling device...\n");
@@ -269,6 +259,13 @@ nouveau_pci_resume(struct pci_dev *pdev)
return -1;
pci_set_master(dev->pdev);
+ /* Make sure the AGP controller is in a consistent state */
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
+ nouveau_mem_reset_agp(dev);
+
+ /* Make the CRTCs accessible */
+ engine->display.early_init(dev);
+
NV_INFO(dev, "POSTing device...\n");
ret = nouveau_run_vbios_init(dev);
if (ret)
@@ -323,7 +320,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -332,11 +328,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
NV_ERROR(dev, "Could not pin/map cursor.\n");
}
- if (dev_priv->card_type < NV_50) {
- nv04_display_restore(dev);
- NVLockVgaCrtcs(dev, false);
- } else
- nv50_display_init(dev);
+ engine->display.init(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -371,7 +363,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
+ DRIVER_MODESET,
.load = nouveau_load,
.firstopen = nouveau_firstopen,
.lastclose = nouveau_lastclose,
@@ -438,16 +431,18 @@ static int __init nouveau_init(void)
nouveau_modeset = 1;
}
- if (nouveau_modeset == 1) {
- driver.driver_features |= DRIVER_MODESET;
- nouveau_register_dsm_handler();
- }
+ if (!nouveau_modeset)
+ return 0;
+ nouveau_register_dsm_handler();
return drm_init(&driver);
}
static void __exit nouveau_exit(void)
{
+ if (!nouveau_modeset)
+ return;
+
drm_exit(&driver);
nouveau_unregister_dsm_handler();
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index c6971910648..b1be617373b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -123,14 +123,6 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
return ioptr;
}
-struct mem_block {
- struct mem_block *next;
- struct mem_block *prev;
- uint64_t start;
- uint64_t size;
- struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
-};
-
enum nouveau_flags {
NV_NFORCE = 0x10000000,
NV_NFORCE2 = 0x20000000
@@ -149,7 +141,7 @@ struct nouveau_gpuobj {
struct list_head list;
struct nouveau_channel *im_channel;
- struct mem_block *im_pramin;
+ struct drm_mm_node *im_pramin;
struct nouveau_bo *im_backing;
uint32_t im_backing_start;
uint32_t *im_backing_suspend;
@@ -196,7 +188,7 @@ struct nouveau_channel {
struct list_head pending;
uint32_t sequence;
uint32_t sequence_ack;
- uint32_t last_sequence_irq;
+ atomic_t last_sequence_irq;
} fence;
/* DMA push buffer */
@@ -206,7 +198,7 @@ struct nouveau_channel {
/* Notifier memory */
struct nouveau_bo *notifier_bo;
- struct mem_block *notifier_heap;
+ struct drm_mm notifier_heap;
/* PFIFO context */
struct nouveau_gpuobj_ref *ramfc;
@@ -224,7 +216,7 @@ struct nouveau_channel {
/* Objects */
struct nouveau_gpuobj_ref *ramin; /* Private instmem */
- struct mem_block *ramin_heap; /* Private PRAMIN heap */
+ struct drm_mm ramin_heap; /* Private PRAMIN heap */
struct nouveau_gpuobj_ref *ramht; /* Hash table */
struct list_head ramht_refs; /* Objects referenced by RAMHT */
@@ -277,8 +269,7 @@ struct nouveau_instmem_engine {
void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
- void (*prepare_access)(struct drm_device *, bool write);
- void (*finish_access)(struct drm_device *);
+ void (*flush)(struct drm_device *);
};
struct nouveau_mc_engine {
@@ -303,10 +294,11 @@ struct nouveau_fb_engine {
};
struct nouveau_fifo_engine {
- void *priv;
-
int channels;
+ struct nouveau_gpuobj_ref *playlist[2];
+ int cur_playlist;
+
int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *);
@@ -339,10 +331,11 @@ struct nouveau_pgraph_object_class {
struct nouveau_pgraph_engine {
struct nouveau_pgraph_object_class *grclass;
bool accel_blocked;
- void *ctxprog;
- void *ctxvals;
int grctx_size;
+ /* NV2x/NV3x context table (0x400780) */
+ struct nouveau_gpuobj_ref *ctx_table;
+
int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *);
@@ -358,6 +351,24 @@ struct nouveau_pgraph_engine {
uint32_t size, uint32_t pitch);
};
+struct nouveau_display_engine {
+ int (*early_init)(struct drm_device *);
+ void (*late_takedown)(struct drm_device *);
+ int (*create)(struct drm_device *);
+ int (*init)(struct drm_device *);
+ void (*destroy)(struct drm_device *);
+};
+
+struct nouveau_gpio_engine {
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+
+ int (*get)(struct drm_device *, enum dcb_gpio_tag);
+ int (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
+
+ void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
+};
+
struct nouveau_engine {
struct nouveau_instmem_engine instmem;
struct nouveau_mc_engine mc;
@@ -365,6 +376,8 @@ struct nouveau_engine {
struct nouveau_fb_engine fb;
struct nouveau_pgraph_engine graph;
struct nouveau_fifo_engine fifo;
+ struct nouveau_display_engine display;
+ struct nouveau_gpio_engine gpio;
};
struct nouveau_pll_vals {
@@ -397,7 +410,7 @@ enum nv04_fp_display_regs {
struct nv04_crtc_reg {
unsigned char MiscOutReg; /* */
- uint8_t CRTC[0x9f];
+ uint8_t CRTC[0xa0];
uint8_t CR58[0x10];
uint8_t Sequencer[5];
uint8_t Graphics[9];
@@ -496,15 +509,11 @@ enum nouveau_card_type {
NV_30 = 0x30,
NV_40 = 0x40,
NV_50 = 0x50,
+ NV_C0 = 0xc0,
};
struct drm_nouveau_private {
struct drm_device *dev;
- enum {
- NOUVEAU_CARD_INIT_DOWN,
- NOUVEAU_CARD_INIT_DONE,
- NOUVEAU_CARD_INIT_FAILED
- } init_state;
/* the card type, takes NV_* as values */
enum nouveau_card_type card_type;
@@ -525,16 +534,12 @@ struct drm_nouveau_private {
struct list_head vbl_waiting;
struct {
- struct ttm_global_reference mem_global_ref;
+ struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
- spinlock_t bo_list_lock;
- struct list_head bo_list;
atomic_t validate_sequence;
} ttm;
- struct fb_info *fbdev_info;
-
int fifo_alloc_count;
struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
@@ -595,11 +600,7 @@ struct drm_nouveau_private {
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
int vm_vram_pt_nr;
- struct mem_block *ramin_heap;
-
- /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
- uint32_t ctx_table_size;
- struct nouveau_gpuobj_ref *ctx_table;
+ struct drm_mm ramin_heap;
struct list_head gpuobj_list;
@@ -618,6 +619,11 @@ struct drm_nouveau_private {
struct backlight_device *backlight;
struct nouveau_channel *evo;
+ struct {
+ struct dcb_entry *dcb;
+ u16 script;
+ u32 pclk;
+ } evo_irq;
struct {
struct dentry *channel_root;
@@ -652,14 +658,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
return 0;
}
-#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \
- struct drm_nouveau_private *nv = dev->dev_private; \
- if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \
- NV_ERROR(dev, "called without init\n"); \
- return -EINVAL; \
- } \
-} while (0)
-
#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
struct drm_nouveau_private *nv = dev->dev_private; \
if (!nouveau_channel_owner(dev, (cl), (id))) { \
@@ -682,7 +680,6 @@ extern int nouveau_tv_disable;
extern char *nouveau_tv_norm;
extern int nouveau_reg_debug;
extern char *nouveau_vbios;
-extern int nouveau_ctxfw;
extern int nouveau_ignorelid;
extern int nouveau_nofbaccel;
extern int nouveau_noaccel;
@@ -707,17 +704,10 @@ extern bool nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
/* nouveau_mem.c */
-extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
- uint64_t size);
-extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
- uint64_t size, int align2,
- struct drm_file *, int tail);
-extern void nouveau_mem_takedown(struct mem_block **heap);
-extern void nouveau_mem_free_block(struct mem_block *);
extern int nouveau_mem_detect(struct drm_device *dev);
-extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
extern int nouveau_mem_init(struct drm_device *);
extern int nouveau_mem_init_agp(struct drm_device *);
+extern int nouveau_mem_reset_agp(struct drm_device *);
extern void nouveau_mem_close(struct drm_device *);
extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
uint32_t addr,
@@ -857,11 +847,13 @@ void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+int nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
+static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return -EINVAL; }
#endif
/* nouveau_backlight.c */
@@ -924,6 +916,10 @@ extern void nv10_fb_takedown(struct drm_device *);
extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
uint32_t, uint32_t);
+/* nv30_fb.c */
+extern int nv30_fb_init(struct drm_device *);
+extern void nv30_fb_takedown(struct drm_device *);
+
/* nv40_fb.c */
extern int nv40_fb_init(struct drm_device *);
extern void nv40_fb_takedown(struct drm_device *);
@@ -934,6 +930,10 @@ extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
extern int nv50_fb_init(struct drm_device *);
extern void nv50_fb_takedown(struct drm_device *);
+/* nvc0_fb.c */
+extern int nvc0_fb_init(struct drm_device *);
+extern void nvc0_fb_takedown(struct drm_device *);
+
/* nv04_fifo.c */
extern int nv04_fifo_init(struct drm_device *);
extern void nv04_fifo_disable(struct drm_device *);
@@ -971,6 +971,20 @@ extern void nv50_fifo_destroy_context(struct nouveau_channel *);
extern int nv50_fifo_load_context(struct nouveau_channel *);
extern int nv50_fifo_unload_context(struct drm_device *);
+/* nvc0_fifo.c */
+extern int nvc0_fifo_init(struct drm_device *);
+extern void nvc0_fifo_takedown(struct drm_device *);
+extern void nvc0_fifo_disable(struct drm_device *);
+extern void nvc0_fifo_enable(struct drm_device *);
+extern bool nvc0_fifo_reassign(struct drm_device *, bool);
+extern bool nvc0_fifo_cache_flush(struct drm_device *);
+extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
+extern int nvc0_fifo_channel_id(struct drm_device *);
+extern int nvc0_fifo_create_context(struct nouveau_channel *);
+extern void nvc0_fifo_destroy_context(struct nouveau_channel *);
+extern int nvc0_fifo_load_context(struct nouveau_channel *);
+extern int nvc0_fifo_unload_context(struct drm_device *);
+
/* nv04_graph.c */
extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
extern int nv04_graph_init(struct drm_device *);
@@ -1035,11 +1049,15 @@ extern int nv50_graph_unload_context(struct drm_device *);
extern void nv50_graph_context_switch(struct drm_device *);
extern int nv50_grctx_init(struct nouveau_grctx *);
-/* nouveau_grctx.c */
-extern int nouveau_grctx_prog_load(struct drm_device *);
-extern void nouveau_grctx_vals_load(struct drm_device *,
- struct nouveau_gpuobj *);
-extern void nouveau_grctx_fini(struct drm_device *);
+/* nvc0_graph.c */
+extern int nvc0_graph_init(struct drm_device *);
+extern void nvc0_graph_takedown(struct drm_device *);
+extern void nvc0_graph_fifo_access(struct drm_device *, bool);
+extern struct nouveau_channel *nvc0_graph_channel(struct drm_device *);
+extern int nvc0_graph_create_context(struct nouveau_channel *);
+extern void nvc0_graph_destroy_context(struct nouveau_channel *);
+extern int nvc0_graph_load_context(struct nouveau_channel *);
+extern int nvc0_graph_unload_context(struct drm_device *);
/* nv04_instmem.c */
extern int nv04_instmem_init(struct drm_device *);
@@ -1051,8 +1069,7 @@ extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
-extern void nv04_instmem_prepare_access(struct drm_device *, bool write);
-extern void nv04_instmem_finish_access(struct drm_device *);
+extern void nv04_instmem_flush(struct drm_device *);
/* nv50_instmem.c */
extern int nv50_instmem_init(struct drm_device *);
@@ -1064,8 +1081,21 @@ extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
-extern void nv50_instmem_prepare_access(struct drm_device *, bool write);
-extern void nv50_instmem_finish_access(struct drm_device *);
+extern void nv50_instmem_flush(struct drm_device *);
+extern void nv84_instmem_flush(struct drm_device *);
+extern void nv50_vm_flush(struct drm_device *, int engine);
+
+/* nvc0_instmem.c */
+extern int nvc0_instmem_init(struct drm_device *);
+extern void nvc0_instmem_takedown(struct drm_device *);
+extern int nvc0_instmem_suspend(struct drm_device *);
+extern void nvc0_instmem_resume(struct drm_device *);
+extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
+extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern void nvc0_instmem_flush(struct drm_device *);
/* nv04_mc.c */
extern int nv04_mc_init(struct drm_device *);
@@ -1088,13 +1118,14 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
/* nv04_dac.c */
-extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
+extern int nv04_dac_create(struct drm_connector *, struct dcb_entry *);
extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
extern int nv04_dac_output_offset(struct drm_encoder *encoder);
extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
+extern bool nv04_dac_in_use(struct drm_encoder *encoder);
/* nv04_dfp.c */
-extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry);
+extern int nv04_dfp_create(struct drm_connector *, struct dcb_entry *);
extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent);
extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
int head, bool dl);
@@ -1103,15 +1134,17 @@ extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
/* nv04_tv.c */
extern int nv04_tv_identify(struct drm_device *dev, int i2c_index);
-extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
+extern int nv04_tv_create(struct drm_connector *, struct dcb_entry *);
/* nv17_tv.c */
-extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
+extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *);
/* nv04_display.c */
+extern int nv04_display_early_init(struct drm_device *);
+extern void nv04_display_late_takedown(struct drm_device *);
extern int nv04_display_create(struct drm_device *);
+extern int nv04_display_init(struct drm_device *);
extern void nv04_display_destroy(struct drm_device *);
-extern void nv04_display_restore(struct drm_device *);
/* nv04_crtc.c */
extern int nv04_crtc_create(struct drm_device *, int index);
@@ -1132,6 +1165,7 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
/* nouveau_fence.c */
struct nouveau_fence;
@@ -1147,7 +1181,6 @@ extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
extern int nouveau_fence_flush(void *obj, void *arg);
extern void nouveau_fence_unref(void **obj);
extern void *nouveau_fence_ref(void *obj);
-extern void nouveau_fence_handler(struct drm_device *dev, int channel);
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
@@ -1167,13 +1200,15 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
-/* nv17_gpio.c */
-int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+/* nv10_gpio.c */
+int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
+int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
/* nv50_gpio.c */
+int nv50_gpio_init(struct drm_device *dev);
int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
/* nv50_calc. */
int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
@@ -1220,6 +1255,14 @@ static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
iowrite32_native(val, dev_priv->mmio + reg);
}
+static inline void nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
+{
+ u32 tmp = nv_rd32(dev, reg);
+ tmp &= ~mask;
+ tmp |= val;
+ nv_wr32(dev, reg, tmp);
+}
+
static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -1346,6 +1389,15 @@ nv_two_reg_pll(struct drm_device *dev)
return false;
}
+static inline bool
+nv_match_device(struct drm_device *dev, unsigned device,
+ unsigned sub_vendor, unsigned sub_device)
+{
+ return dev->pdev->device == device &&
+ dev->pdev->subsystem_vendor == sub_vendor &&
+ dev->pdev->subsystem_device == sub_device;
+}
+
#define NV_SW 0x0000506e
#define NV_SW_DMA_SEMAPHORE 0x00000060
#define NV_SW_SEMAPHORE_OFFSET 0x00000064
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index e1df8209cd0..7c82d68bc15 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -38,13 +38,15 @@ struct nouveau_encoder {
struct dcb_entry *dcb;
int or;
+ /* different to drm_encoder.crtc, this reflects what's
+ * actually programmed on the hw, not the proposed crtc */
+ struct drm_crtc *crtc;
+
struct drm_display_mode mode;
int last_dpms;
struct nv04_output_reg restore;
- void (*disconnect)(struct nouveau_encoder *encoder);
-
union {
struct {
int mc_unknown;
@@ -69,10 +71,16 @@ static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
return &enc->base.base;
}
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct drm_encoder *enc)
+{
+ return to_encoder_slave(enc)->slave_funcs;
+}
+
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
-int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry);
-int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry);
+int nv50_sor_create(struct drm_connector *, struct dcb_entry *);
+int nv50_dac_create(struct drm_connector *, struct dcb_entry *);
struct bit_displayport_encoder_table {
uint32_t match;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 257ea130ae1..d2047713dc5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -250,6 +250,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
+ info->flags |= FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &nouveau_fbcon_ops;
info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
dev_priv->vm_vram_base;
@@ -280,6 +281,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
if (dev_priv->channel && !nouveau_nofbaccel) {
switch (dev_priv->card_type) {
+ case NV_C0:
+ break;
case NV_50:
nv50_fbcon_accel_init(info);
info->fbops = &nv50_fbcon_ops;
@@ -333,7 +336,7 @@ nouveau_fbcon_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
}
-int
+static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
@@ -349,6 +352,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo);
+ drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index faddf53ff9e..87ac21ec23d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -64,13 +64,15 @@ nouveau_fence_update(struct nouveau_channel *chan)
struct nouveau_fence *fence;
uint32_t sequence;
+ spin_lock(&chan->fence.lock);
+
if (USE_REFCNT)
sequence = nvchan_rd32(chan, 0x48);
else
- sequence = chan->fence.last_sequence_irq;
+ sequence = atomic_read(&chan->fence.last_sequence_irq);
if (chan->fence.sequence_ack == sequence)
- return;
+ goto out;
chan->fence.sequence_ack = sequence;
list_for_each_safe(entry, tmp, &chan->fence.pending) {
@@ -84,6 +86,8 @@ nouveau_fence_update(struct nouveau_channel *chan)
if (sequence == chan->fence.sequence_ack)
break;
}
+out:
+ spin_unlock(&chan->fence.lock);
}
int
@@ -119,7 +123,6 @@ nouveau_fence_emit(struct nouveau_fence *fence)
{
struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
struct nouveau_channel *chan = fence->channel;
- unsigned long flags;
int ret;
ret = RING_SPACE(chan, 2);
@@ -127,9 +130,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
return ret;
if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
- spin_lock_irqsave(&chan->fence.lock, flags);
nouveau_fence_update(chan);
- spin_unlock_irqrestore(&chan->fence.lock, flags);
BUG_ON(chan->fence.sequence ==
chan->fence.sequence_ack - 1);
@@ -138,9 +139,9 @@ nouveau_fence_emit(struct nouveau_fence *fence)
fence->sequence = ++chan->fence.sequence;
kref_get(&fence->refcount);
- spin_lock_irqsave(&chan->fence.lock, flags);
+ spin_lock(&chan->fence.lock);
list_add_tail(&fence->entry, &chan->fence.pending);
- spin_unlock_irqrestore(&chan->fence.lock, flags);
+ spin_unlock(&chan->fence.lock);
BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
OUT_RING(chan, fence->sequence);
@@ -173,14 +174,11 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg)
{
struct nouveau_fence *fence = nouveau_fence(sync_obj);
struct nouveau_channel *chan = fence->channel;
- unsigned long flags;
if (fence->signalled)
return true;
- spin_lock_irqsave(&chan->fence.lock, flags);
nouveau_fence_update(chan);
- spin_unlock_irqrestore(&chan->fence.lock, flags);
return fence->signalled;
}
@@ -190,8 +188,6 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
unsigned long timeout = jiffies + (3 * DRM_HZ);
int ret = 0;
- __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
-
while (1) {
if (nouveau_fence_signalled(sync_obj, sync_arg))
break;
@@ -201,6 +197,8 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
break;
}
+ __set_current_state(intr ? TASK_INTERRUPTIBLE
+ : TASK_UNINTERRUPTIBLE);
if (lazy)
schedule_timeout(1);
@@ -221,27 +219,12 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg)
return 0;
}
-void
-nouveau_fence_handler(struct drm_device *dev, int channel)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = NULL;
-
- if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
- chan = dev_priv->fifos[channel];
-
- if (chan) {
- spin_lock_irq(&chan->fence.lock);
- nouveau_fence_update(chan);
- spin_unlock_irq(&chan->fence.lock);
- }
-}
-
int
nouveau_fence_init(struct nouveau_channel *chan)
{
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
+ atomic_set(&chan->fence.last_sequence_irq, 0);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 69c76cf9340..19620a6709f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -137,8 +137,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
uint32_t flags = 0;
int ret = 0;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
@@ -169,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
goto out;
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(nvbo->gem);
out:
- drm_gem_object_handle_unreference_unlocked(nvbo->gem);
-
- if (ret)
- drm_gem_object_unreference_unlocked(nvbo->gem);
return ret;
}
@@ -247,7 +243,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
- drm_gem_object_unreference(nvbo->gem);
+ drm_gem_object_unreference_unlocked(nvbo->gem);
}
}
@@ -286,7 +282,7 @@ retry:
if (!gem) {
NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
validate_fini(op, NULL);
- return -EINVAL;
+ return -ENOENT;
}
nvbo = gem->driver_private;
@@ -302,7 +298,7 @@ retry:
validate_fini(op, NULL);
if (ret == -EAGAIN)
ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
- drm_gem_object_unreference(gem);
+ drm_gem_object_unreference_unlocked(gem);
if (ret) {
NV_ERROR(dev, "fail reserve\n");
return ret;
@@ -339,7 +335,9 @@ retry:
return -EINVAL;
}
+ mutex_unlock(&drm_global_mutex);
ret = ttm_bo_wait_cpu(&nvbo->bo, false);
+ mutex_lock(&drm_global_mutex);
if (ret) {
NV_ERROR(dev, "fail wait_cpu\n");
return ret;
@@ -363,16 +361,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
- if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
- spin_lock(&nvbo->bo.lock);
- ret = ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.lock);
- if (unlikely(ret)) {
- NV_ERROR(dev, "fail wait other chan\n");
- return ret;
- }
+ ret = nouveau_bo_sync_gpu(nvbo, chan);
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail pre-validate sync\n");
+ return ret;
}
ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
@@ -383,7 +376,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- nvbo->channel = chan;
+ nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
false, false, false);
nvbo->channel = NULL;
@@ -392,6 +385,12 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
+ ret = nouveau_bo_sync_gpu(nvbo, chan);
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail post-validate sync\n");
+ return ret;
+ }
+
if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -577,10 +576,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_nouveau_gem_pushbuf_bo *bo;
struct nouveau_channel *chan;
struct validate_op op;
- struct nouveau_fence *fence = 0;
+ struct nouveau_fence *fence = NULL;
int i, j, ret = 0, do_reloc = 0;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
req->vram_available = dev_priv->fb_aper_free;
@@ -616,7 +614,20 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
return PTR_ERR(bo);
}
- mutex_lock(&dev->struct_mutex);
+ /* Mark push buffers as being used on PFIFO, the validation code
+ * will then make sure that if the pushbuf bo moves, that they
+ * happen on the kernel channel, which will in turn cause a sync
+ * to happen before we try and submit the push buffer.
+ */
+ for (i = 0; i < req->nr_push; i++) {
+ if (push[i].bo_index >= req->nr_buffers) {
+ NV_ERROR(dev, "push %d buffer not in list\n", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ bo[push[i].bo_index].read_domains |= (1 << 31);
+ }
/* Validate buffer list */
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
@@ -650,7 +661,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
push[i].length);
}
} else
- if (dev_priv->card_type >= NV_20) {
+ if (dev_priv->chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
NV_ERROR(dev, "cal_space: %d\n", ret);
@@ -716,7 +727,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
out:
validate_fini(&op, fence);
nouveau_fence_unref((void**)&fence);
- mutex_unlock(&dev->struct_mutex);
kfree(bo);
kfree(push);
@@ -725,7 +735,7 @@ out_next:
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
- if (dev_priv->card_type >= NV_20) {
+ if (dev_priv->chipset >= 0x25) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
@@ -760,11 +770,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
int ret = -EINVAL;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
- return ret;
+ return -ENOENT;
nvbo = nouveau_gem_object(gem);
if (nvbo->cpu_filp) {
@@ -800,11 +808,9 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo;
int ret = -EINVAL;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
- return ret;
+ return -ENOENT;
nvbo = nouveau_gem_object(gem);
if (nvbo->cpu_filp != file_priv)
@@ -827,11 +833,9 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
struct drm_gem_object *gem;
int ret;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
- return -EINVAL;
+ return -ENOENT;
ret = nouveau_gem_info(gem, req);
drm_gem_object_unreference_unlocked(gem);
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
deleted file mode 100644
index f731c5f6053..00000000000
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright 2009 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/firmware.h>
-#include <linux/slab.h>
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-
-struct nouveau_ctxprog {
- uint32_t signature;
- uint8_t version;
- uint16_t length;
- uint32_t data[];
-} __attribute__ ((packed));
-
-struct nouveau_ctxvals {
- uint32_t signature;
- uint8_t version;
- uint32_t length;
- struct {
- uint32_t offset;
- uint32_t value;
- } data[];
-} __attribute__ ((packed));
-
-int
-nouveau_grctx_prog_load(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- const int chipset = dev_priv->chipset;
- const struct firmware *fw;
- const struct nouveau_ctxprog *cp;
- const struct nouveau_ctxvals *cv;
- char name[32];
- int ret, i;
-
- if (pgraph->accel_blocked)
- return -ENODEV;
-
- if (!pgraph->ctxprog) {
- sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
- ret = request_firmware(&fw, name, &dev->pdev->dev);
- if (ret) {
- NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
- return ret;
- }
-
- pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL);
- if (!pgraph->ctxprog) {
- NV_ERROR(dev, "OOM copying ctxprog\n");
- release_firmware(fw);
- return -ENOMEM;
- }
-
- cp = pgraph->ctxprog;
- if (le32_to_cpu(cp->signature) != 0x5043564e ||
- cp->version != 0 ||
- le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
- NV_ERROR(dev, "ctxprog invalid\n");
- release_firmware(fw);
- nouveau_grctx_fini(dev);
- return -EINVAL;
- }
- release_firmware(fw);
- }
-
- if (!pgraph->ctxvals) {
- sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
- ret = request_firmware(&fw, name, &dev->pdev->dev);
- if (ret) {
- NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
- nouveau_grctx_fini(dev);
- return ret;
- }
-
- pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL);
- if (!pgraph->ctxvals) {
- NV_ERROR(dev, "OOM copying ctxvals\n");
- release_firmware(fw);
- nouveau_grctx_fini(dev);
- return -ENOMEM;
- }
-
- cv = (void *)pgraph->ctxvals;
- if (le32_to_cpu(cv->signature) != 0x5643564e ||
- cv->version != 0 ||
- le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
- NV_ERROR(dev, "ctxvals invalid\n");
- release_firmware(fw);
- nouveau_grctx_fini(dev);
- return -EINVAL;
- }
- release_firmware(fw);
- }
-
- cp = pgraph->ctxprog;
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < le16_to_cpu(cp->length); i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
- le32_to_cpu(cp->data[i]));
-
- return 0;
-}
-
-void
-nouveau_grctx_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-
- if (pgraph->ctxprog) {
- kfree(pgraph->ctxprog);
- pgraph->ctxprog = NULL;
- }
-
- if (pgraph->ctxvals) {
- kfree(pgraph->ctxprog);
- pgraph->ctxvals = NULL;
- }
-}
-
-void
-nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_ctxvals *cv = pgraph->ctxvals;
- int i;
-
- if (!cv)
- return;
-
- for (i = 0; i < le32_to_cpu(cv->length); i++)
- nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
- le32_to_cpu(cv->data[i].value));
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index 7855b35effc..7b613682e40 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -865,8 +865,12 @@ nv_save_state_ext(struct drm_device *dev, int head,
rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
- if (dev_priv->card_type >= NV_30)
+
+ if (dev_priv->card_type >= NV_30) {
rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
+ rd_cio_state(dev, head, regp, 0x9f);
+ }
+
rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
@@ -971,8 +975,11 @@ nv_load_state_ext(struct drm_device *dev, int head,
wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
- if (dev_priv->card_type >= NV_30)
+
+ if (dev_priv->card_type >= NV_30) {
wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
+ wr_cio_state(dev, head, regp, 0x9f);
+ }
wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 316a3c7e6eb..84614858728 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -163,7 +163,7 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
if (entry->chan)
return -EEXIST;
- if (dev_priv->card_type == NV_50 && entry->read >= NV50_I2C_PORTS) {
+ if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) {
NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
return -EINVAL;
}
@@ -174,26 +174,26 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
switch (entry->port_type) {
case 0:
- i2c->algo.bit.setsda = nv04_i2c_setsda;
- i2c->algo.bit.setscl = nv04_i2c_setscl;
- i2c->algo.bit.getsda = nv04_i2c_getsda;
- i2c->algo.bit.getscl = nv04_i2c_getscl;
+ i2c->bit.setsda = nv04_i2c_setsda;
+ i2c->bit.setscl = nv04_i2c_setscl;
+ i2c->bit.getsda = nv04_i2c_getsda;
+ i2c->bit.getscl = nv04_i2c_getscl;
i2c->rd = entry->read;
i2c->wr = entry->write;
break;
case 4:
- i2c->algo.bit.setsda = nv4e_i2c_setsda;
- i2c->algo.bit.setscl = nv4e_i2c_setscl;
- i2c->algo.bit.getsda = nv4e_i2c_getsda;
- i2c->algo.bit.getscl = nv4e_i2c_getscl;
+ i2c->bit.setsda = nv4e_i2c_setsda;
+ i2c->bit.setscl = nv4e_i2c_setscl;
+ i2c->bit.getsda = nv4e_i2c_getsda;
+ i2c->bit.getscl = nv4e_i2c_getscl;
i2c->rd = 0x600800 + entry->read;
i2c->wr = 0x600800 + entry->write;
break;
case 5:
- i2c->algo.bit.setsda = nv50_i2c_setsda;
- i2c->algo.bit.setscl = nv50_i2c_setscl;
- i2c->algo.bit.getsda = nv50_i2c_getsda;
- i2c->algo.bit.getscl = nv50_i2c_getscl;
+ i2c->bit.setsda = nv50_i2c_setsda;
+ i2c->bit.setscl = nv50_i2c_setscl;
+ i2c->bit.getsda = nv50_i2c_getsda;
+ i2c->bit.getscl = nv50_i2c_getscl;
i2c->rd = nv50_i2c_port[entry->read];
i2c->wr = i2c->rd;
break;
@@ -216,17 +216,14 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
i2c_set_adapdata(&i2c->adapter, i2c);
if (entry->port_type < 6) {
- i2c->adapter.algo_data = &i2c->algo.bit;
- i2c->algo.bit.udelay = 40;
- i2c->algo.bit.timeout = usecs_to_jiffies(5000);
- i2c->algo.bit.data = i2c;
+ i2c->adapter.algo_data = &i2c->bit;
+ i2c->bit.udelay = 40;
+ i2c->bit.timeout = usecs_to_jiffies(5000);
+ i2c->bit.data = i2c;
ret = i2c_bit_add_bus(&i2c->adapter);
} else {
- i2c->adapter.algo_data = &i2c->algo.dp;
- i2c->algo.dp.running = false;
- i2c->algo.dp.address = 0;
- i2c->algo.dp.aux_ch = nouveau_dp_i2c_aux_ch;
- ret = i2c_dp_aux_add_bus(&i2c->adapter);
+ i2c->adapter.algo = &nouveau_dp_i2c_algo;
+ ret = i2c_add_adapter(&i2c->adapter);
}
if (ret) {
@@ -278,3 +275,45 @@ nouveau_i2c_find(struct drm_device *dev, int index)
return i2c->chan;
}
+bool
+nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr)
+{
+ uint8_t buf[] = { 0 };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = addr,
+ .flags = 0,
+ .len = 1,
+ .buf = buf,
+ },
+ {
+ .addr = addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = buf,
+ }
+ };
+
+ return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+}
+
+int
+nouveau_i2c_identify(struct drm_device *dev, const char *what,
+ struct i2c_board_info *info, int index)
+{
+ struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
+ int i;
+
+ NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
+
+ for (i = 0; info[i].addr; i++) {
+ if (nouveau_probe_i2c_addr(i2c, info[i].addr)) {
+ NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
+ return i;
+ }
+ }
+
+ NV_DEBUG(dev, "No devices found.\n");
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index c8eaf7a9fcb..f71cb32f757 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -33,10 +33,7 @@ struct dcb_i2c_entry;
struct nouveau_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
- union {
- struct i2c_algo_bit_data bit;
- struct i2c_algo_dp_aux_data dp;
- } algo;
+ struct i2c_algo_bit_data bit;
unsigned rd;
unsigned wr;
unsigned data;
@@ -45,8 +42,10 @@ struct nouveau_i2c_chan {
int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
+bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
+int nouveau_i2c_identify(struct drm_device *dev, const char *what,
+ struct i2c_board_info *info, int index);
-int nouveau_dp_i2c_aux_ch(struct i2c_adapter *, int mode, uint8_t write_byte,
- uint8_t *read_byte);
+extern const struct i2c_algorithm nouveau_dp_i2c_algo;
#endif /* __NOUVEAU_I2C_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 53360f15606..794b0ee30cf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -49,7 +49,7 @@ nouveau_irq_preinstall(struct drm_device *dev)
/* Master disable */
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
- if (dev_priv->card_type == NV_50) {
+ if (dev_priv->card_type >= NV_50) {
INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
INIT_LIST_HEAD(&dev_priv->vbl_waiting);
@@ -586,11 +586,11 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
}
if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
- nouveau_pgraph_intr_context_switch(dev);
-
status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
nv_wr32(dev, NV03_PGRAPH_INTR,
NV_PGRAPH_INTR_CONTEXT_SWITCH);
+
+ nouveau_pgraph_intr_context_switch(dev);
}
if (status) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index c1fd42b0dad..9689d414768 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -35,162 +35,6 @@
#include "drm_sarea.h"
#include "nouveau_drv.h"
-static struct mem_block *
-split_block(struct mem_block *p, uint64_t start, uint64_t size,
- struct drm_file *file_priv)
-{
- /* Maybe cut off the start of an existing block */
- if (start > p->start) {
- struct mem_block *newblock =
- kmalloc(sizeof(*newblock), GFP_KERNEL);
- if (!newblock)
- goto out;
- newblock->start = start;
- newblock->size = p->size - (start - p->start);
- newblock->file_priv = NULL;
- newblock->next = p->next;
- newblock->prev = p;
- p->next->prev = newblock;
- p->next = newblock;
- p->size -= newblock->size;
- p = newblock;
- }
-
- /* Maybe cut off the end of an existing block */
- if (size < p->size) {
- struct mem_block *newblock =
- kmalloc(sizeof(*newblock), GFP_KERNEL);
- if (!newblock)
- goto out;
- newblock->start = start + size;
- newblock->size = p->size - size;
- newblock->file_priv = NULL;
- newblock->next = p->next;
- newblock->prev = p;
- p->next->prev = newblock;
- p->next = newblock;
- p->size = size;
- }
-
-out:
- /* Our block is in the middle */
- p->file_priv = file_priv;
- return p;
-}
-
-struct mem_block *
-nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
- int align2, struct drm_file *file_priv, int tail)
-{
- struct mem_block *p;
- uint64_t mask = (1 << align2) - 1;
-
- if (!heap)
- return NULL;
-
- if (tail) {
- list_for_each_prev(p, heap) {
- uint64_t start = ((p->start + p->size) - size) & ~mask;
-
- if (p->file_priv == NULL && start >= p->start &&
- start + size <= p->start + p->size)
- return split_block(p, start, size, file_priv);
- }
- } else {
- list_for_each(p, heap) {
- uint64_t start = (p->start + mask) & ~mask;
-
- if (p->file_priv == NULL &&
- start + size <= p->start + p->size)
- return split_block(p, start, size, file_priv);
- }
- }
-
- return NULL;
-}
-
-void nouveau_mem_free_block(struct mem_block *p)
-{
- p->file_priv = NULL;
-
- /* Assumes a single contiguous range. Needs a special file_priv in
- * 'heap' to stop it being subsumed.
- */
- if (p->next->file_priv == NULL) {
- struct mem_block *q = p->next;
- p->size += q->size;
- p->next = q->next;
- p->next->prev = p;
- kfree(q);
- }
-
- if (p->prev->file_priv == NULL) {
- struct mem_block *q = p->prev;
- q->size += p->size;
- q->next = p->next;
- q->next->prev = q;
- kfree(p);
- }
-}
-
-/* Initialize. How to check for an uninitialized heap?
- */
-int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
- uint64_t size)
-{
- struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
-
- if (!blocks)
- return -ENOMEM;
-
- *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
- if (!*heap) {
- kfree(blocks);
- return -ENOMEM;
- }
-
- blocks->start = start;
- blocks->size = size;
- blocks->file_priv = NULL;
- blocks->next = blocks->prev = *heap;
-
- memset(*heap, 0, sizeof(**heap));
- (*heap)->file_priv = (struct drm_file *) -1;
- (*heap)->next = (*heap)->prev = blocks;
- return 0;
-}
-
-/*
- * Free all blocks associated with the releasing file_priv
- */
-void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
-{
- struct mem_block *p;
-
- if (!heap || !heap->next)
- return;
-
- list_for_each(p, heap) {
- if (p->file_priv == file_priv)
- p->file_priv = NULL;
- }
-
- /* Assumes a single contiguous range. Needs a special file_priv in
- * 'heap' to stop it being subsumed.
- */
- list_for_each(p, heap) {
- while ((p->file_priv == NULL) &&
- (p->next->file_priv == NULL) &&
- (p->next != heap)) {
- struct mem_block *q = p->next;
- p->size += q->size;
- p->next = q->next;
- p->next->prev = p;
- kfree(q);
- }
- }
-}
-
/*
* NV10-NV40 tiling helpers
*/
@@ -299,7 +143,6 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
phys |= 0x30;
}
- dev_priv->engine.instmem.prepare_access(dev, true);
while (size) {
unsigned offset_h = upper_32_bits(phys);
unsigned offset_l = lower_32_bits(phys);
@@ -331,36 +174,12 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
}
}
}
- dev_priv->engine.instmem.finish_access(dev);
-
- nv_wr32(dev, 0x100c80, 0x00050001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x100c80, 0x00000001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x100c80, 0x00040001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x100c80, 0x00060001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
+ dev_priv->engine.instmem.flush(dev);
+ nv50_vm_flush(dev, 5);
+ nv50_vm_flush(dev, 0);
+ nv50_vm_flush(dev, 4);
+ nv50_vm_flush(dev, 6);
return 0;
}
@@ -374,7 +193,6 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
virt -= dev_priv->vm_vram_base;
pages = (size >> 16) << 1;
- dev_priv->engine.instmem.prepare_access(dev, true);
while (pages) {
pgt = dev_priv->vm_vram_pt[virt >> 29];
pte = (virt & 0x1ffe0000ULL) >> 15;
@@ -388,57 +206,19 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
while (pte < end)
nv_wo32(dev, pgt, pte++, 0);
}
- dev_priv->engine.instmem.finish_access(dev);
-
- nv_wr32(dev, 0x100c80, 0x00050001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return;
- }
-
- nv_wr32(dev, 0x100c80, 0x00000001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return;
- }
-
- nv_wr32(dev, 0x100c80, 0x00040001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return;
- }
+ dev_priv->engine.instmem.flush(dev);
- nv_wr32(dev, 0x100c80, 0x00060001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- }
+ nv50_vm_flush(dev, 5);
+ nv50_vm_flush(dev, 0);
+ nv50_vm_flush(dev, 4);
+ nv50_vm_flush(dev, 6);
}
/*
* Cleanup everything
*/
-void nouveau_mem_takedown(struct mem_block **heap)
-{
- struct mem_block *p;
-
- if (!*heap)
- return;
-
- for (p = (*heap)->next; p != *heap;) {
- struct mem_block *q = p;
- p = p->next;
- kfree(q);
- }
-
- kfree(*heap);
- *heap = NULL;
-}
-
-void nouveau_mem_close(struct drm_device *dev)
+void
+nouveau_mem_close(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -449,8 +229,7 @@ void nouveau_mem_close(struct drm_device *dev)
nouveau_ttm_global_release(dev_priv);
- if (drm_core_has_AGP(dev) && dev->agp &&
- drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (drm_core_has_AGP(dev) && dev->agp) {
struct drm_agp_mem *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
@@ -471,28 +250,29 @@ void nouveau_mem_close(struct drm_device *dev)
}
if (dev_priv->fb_mtrr) {
- drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),
- drm_get_resource_len(dev, 1), DRM_MTRR_WC);
- dev_priv->fb_mtrr = 0;
+ drm_mtrr_del(dev_priv->fb_mtrr,
+ pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
+ dev_priv->fb_mtrr = -1;
}
}
static uint32_t
nouveau_mem_detect_nv04(struct drm_device *dev)
{
- uint32_t boot0 = nv_rd32(dev, NV03_BOOT_0);
+ uint32_t boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
if (boot0 & 0x00000100)
return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
- switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
- case NV04_BOOT_0_RAM_AMOUNT_32MB:
+ switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
return 32 * 1024 * 1024;
- case NV04_BOOT_0_RAM_AMOUNT_16MB:
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
return 16 * 1024 * 1024;
- case NV04_BOOT_0_RAM_AMOUNT_8MB:
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
return 8 * 1024 * 1024;
- case NV04_BOOT_0_RAM_AMOUNT_4MB:
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
return 4 * 1024 * 1024;
}
@@ -536,12 +316,22 @@ nouveau_mem_detect(struct drm_device *dev)
} else
if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
dev_priv->vram_size = nouveau_mem_detect_nforce(dev);
- } else {
- dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA);
- dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK;
- if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
+ } else
+ if (dev_priv->card_type < NV_50) {
+ dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
+ dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
+ } else
+ if (dev_priv->card_type < NV_C0) {
+ dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
+ dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
+ dev_priv->vram_size &= 0xffffffff00ll;
+ if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
dev_priv->vram_sys_base <<= 12;
+ }
+ } else {
+ dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
+ dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
}
NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
@@ -555,18 +345,36 @@ nouveau_mem_detect(struct drm_device *dev)
return -ENOMEM;
}
-#if __OS_HAS_AGP
-static void nouveau_mem_reset_agp(struct drm_device *dev)
+int
+nouveau_mem_reset_agp(struct drm_device *dev)
{
- uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
+#if __OS_HAS_AGP
+ uint32_t saved_pci_nv_1, pmc_enable;
+ int ret;
+
+ /* First of all, disable fast writes, otherwise if it's
+ * already enabled in the AGP bridge and we disable the card's
+ * AGP controller we might be locking ourselves out of it. */
+ if (nv_rd32(dev, NV04_PBUS_PCI_NV_19) & PCI_AGP_COMMAND_FW) {
+ struct drm_agp_info info;
+ struct drm_agp_mode mode;
+
+ ret = drm_agp_info(dev, &info);
+ if (ret)
+ return ret;
+
+ mode.mode = info.mode & ~PCI_AGP_COMMAND_FW;
+ ret = drm_agp_enable(dev, mode);
+ if (ret)
+ return ret;
+ }
saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
- saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19);
/* clear busmaster bit */
nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
- /* clear SBA and AGP bits */
- nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
+ /* disable AGP */
+ nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0);
/* power cycle pgraph, if enabled */
pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
@@ -578,11 +386,12 @@ static void nouveau_mem_reset_agp(struct drm_device *dev)
}
/* and restore (gives effect of resetting AGP) */
- nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
-}
#endif
+ return 0;
+}
+
int
nouveau_mem_init_agp(struct drm_device *dev)
{
@@ -592,11 +401,6 @@ nouveau_mem_init_agp(struct drm_device *dev)
struct drm_agp_mode mode;
int ret;
- if (nouveau_noagp)
- return 0;
-
- nouveau_mem_reset_agp(dev);
-
if (!dev->agp->acquired) {
ret = drm_agp_acquire(dev);
if (ret) {
@@ -605,6 +409,8 @@ nouveau_mem_init_agp(struct drm_device *dev)
}
}
+ nouveau_mem_reset_agp(dev);
+
ret = drm_agp_info(dev, &info);
if (ret) {
NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
@@ -633,7 +439,7 @@ nouveau_mem_init(struct drm_device *dev)
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
int ret, dma_bits = 32;
- dev_priv->fb_phys = drm_get_resource_start(dev, 1);
+ dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
dev_priv->gart_info.type = NOUVEAU_GART_NONE;
if (dev_priv->card_type >= NV_50 &&
@@ -659,14 +465,13 @@ nouveau_mem_init(struct drm_device *dev)
return ret;
}
- INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
- spin_lock_init(&dev_priv->ttm.bo_list_lock);
spin_lock_init(&dev_priv->tile.lock);
dev_priv->fb_available_size = dev_priv->vram_size;
dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
- if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
- dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
+ if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
+ dev_priv->fb_mappable_pages =
+ pci_resource_len(dev->pdev, 1);
dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
/* remove reserved space at end of vram from available amount */
@@ -692,7 +497,7 @@ nouveau_mem_init(struct drm_device *dev)
/* GART */
#if !defined(__powerpc__) && !defined(__ia64__)
- if (drm_device_is_agp(dev) && dev->agp) {
+ if (drm_device_is_agp(dev) && dev->agp && !nouveau_noagp) {
ret = nouveau_mem_init_agp(dev);
if (ret)
NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -718,8 +523,8 @@ nouveau_mem_init(struct drm_device *dev)
return ret;
}
- dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
- drm_get_resource_len(dev, 1),
+ dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1),
DRM_MTRR_WC);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 9537f3e3011..3c9964a8fba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -55,7 +55,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
if (ret)
goto out_err;
- ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size);
+ ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
if (ret)
goto out_err;
@@ -79,8 +79,9 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
mutex_lock(&dev->struct_mutex);
nouveau_bo_unpin(chan->notifier_bo);
mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem);
drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
- nouveau_mem_takedown(&chan->notifier_heap);
+ drm_mm_takedown(&chan->notifier_heap);
}
static void
@@ -90,7 +91,7 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
NV_DEBUG(dev, "\n");
if (gpuobj->priv)
- nouveau_mem_free_block(gpuobj->priv);
+ drm_mm_put_block(gpuobj->priv);
}
int
@@ -100,18 +101,13 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
- struct mem_block *mem;
+ struct drm_mm_node *mem;
uint32_t offset;
int target, ret;
- if (!chan->notifier_heap) {
- NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n",
- chan->id);
- return -EINVAL;
- }
-
- mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0,
- (struct drm_file *)-2, 0);
+ mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0);
+ if (mem)
+ mem = drm_mm_get_block(mem, size, 0);
if (!mem) {
NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
return -ENOMEM;
@@ -144,17 +140,17 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
mem->size, NV_DMA_ACCESS_RW, target,
&nobj);
if (ret) {
- nouveau_mem_free_block(mem);
+ drm_mm_put_block(mem);
NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
return ret;
}
- nobj->dtor = nouveau_notifier_gpuobj_dtor;
- nobj->priv = mem;
+ nobj->dtor = nouveau_notifier_gpuobj_dtor;
+ nobj->priv = mem;
ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
if (ret) {
nouveau_gpuobj_del(dev, &nobj);
- nouveau_mem_free_block(mem);
+ drm_mm_put_block(mem);
NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
return ret;
}
@@ -170,7 +166,7 @@ nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset)
return -EINVAL;
if (poffset) {
- struct mem_block *mem = nobj->priv;
+ struct drm_mm_node *mem = nobj->priv;
if (*poffset >= mem->size)
return false;
@@ -189,7 +185,6 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
struct nouveau_channel *chan;
int ret;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index e7c100ba63a..b6bcb254f4a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -132,7 +132,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
}
}
- instmem->prepare_access(dev, true);
co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
do {
if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
@@ -143,7 +142,7 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
nv_wo32(dev, ramht, (co + 4)/4, ctx);
list_add_tail(&ref->list, &chan->ramht_refs);
- instmem->finish_access(dev);
+ instmem->flush(dev);
return 0;
}
NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
@@ -153,7 +152,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
if (co >= dev_priv->ramht_size)
co = 0;
} while (co != ho);
- instmem->finish_access(dev);
NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
return -ENOMEM;
@@ -173,7 +171,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
return;
}
- instmem->prepare_access(dev, true);
co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
do {
if (nouveau_ramht_entry_valid(dev, ramht, co) &&
@@ -186,7 +183,7 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
list_del(&ref->list);
- instmem->finish_access(dev);
+ instmem->flush(dev);
return;
}
@@ -195,7 +192,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
co = 0;
} while (co != ho);
list_del(&ref->list);
- instmem->finish_access(dev);
NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
chan->id, ref->handle);
@@ -209,7 +205,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
struct nouveau_gpuobj *gpuobj;
- struct mem_block *pramin = NULL;
+ struct drm_mm *pramin = NULL;
int ret;
NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
@@ -233,25 +229,12 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
* available.
*/
if (chan) {
- if (chan->ramin_heap) {
- NV_DEBUG(dev, "private heap\n");
- pramin = chan->ramin_heap;
- } else
- if (dev_priv->card_type < NV_50) {
- NV_DEBUG(dev, "global heap fallback\n");
- pramin = dev_priv->ramin_heap;
- }
+ NV_DEBUG(dev, "channel heap\n");
+ pramin = &chan->ramin_heap;
} else {
NV_DEBUG(dev, "global heap\n");
- pramin = dev_priv->ramin_heap;
- }
-
- if (!pramin) {
- NV_ERROR(dev, "No PRAMIN heap!\n");
- return -EINVAL;
- }
+ pramin = &dev_priv->ramin_heap;
- if (!chan) {
ret = engine->instmem.populate(dev, gpuobj, &size);
if (ret) {
nouveau_gpuobj_del(dev, &gpuobj);
@@ -260,9 +243,10 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
}
/* Allocate a chunk of the PRAMIN aperture */
- gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
- drm_order(align),
- (struct drm_file *)-2, 0);
+ gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0);
+ if (gpuobj->im_pramin)
+ gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align);
+
if (!gpuobj->im_pramin) {
nouveau_gpuobj_del(dev, &gpuobj);
return -ENOMEM;
@@ -279,10 +263,9 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
int i;
- engine->instmem.prepare_access(dev, true);
for (i = 0; i < gpuobj->im_pramin->size; i += 4)
nv_wo32(dev, gpuobj, i/4, 0);
- engine->instmem.finish_access(dev);
+ engine->instmem.flush(dev);
}
*gpuobj_ret = gpuobj;
@@ -370,10 +353,9 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
}
if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
- engine->instmem.prepare_access(dev, true);
for (i = 0; i < gpuobj->im_pramin->size; i += 4)
nv_wo32(dev, gpuobj, i/4, 0);
- engine->instmem.finish_access(dev);
+ engine->instmem.flush(dev);
}
if (gpuobj->dtor)
@@ -386,7 +368,7 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
if (gpuobj->flags & NVOBJ_FLAG_FAKE)
kfree(gpuobj->im_pramin);
else
- nouveau_mem_free_block(gpuobj->im_pramin);
+ drm_mm_put_block(gpuobj->im_pramin);
}
list_del(&gpuobj->list);
@@ -589,7 +571,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
if (p_offset != ~0) {
- gpuobj->im_pramin = kzalloc(sizeof(struct mem_block),
+ gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node),
GFP_KERNEL);
if (!gpuobj->im_pramin) {
nouveau_gpuobj_del(dev, &gpuobj);
@@ -605,10 +587,9 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
}
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
- dev_priv->engine.instmem.prepare_access(dev, true);
for (i = 0; i < gpuobj->im_pramin->size; i += 4)
nv_wo32(dev, gpuobj, i/4, 0);
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
}
if (pref) {
@@ -696,8 +677,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
return ret;
}
- instmem->prepare_access(dev, true);
-
if (dev_priv->card_type < NV_50) {
uint32_t frame, adjust, pte_flags = 0;
@@ -734,7 +713,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
nv_wo32(dev, *gpuobj, 5, flags5);
}
- instmem->finish_access(dev);
+ instmem->flush(dev);
(*gpuobj)->engine = NVOBJ_ENGINE_SW;
(*gpuobj)->class = class;
@@ -849,7 +828,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
return ret;
}
- dev_priv->engine.instmem.prepare_access(dev, true);
if (dev_priv->card_type >= NV_50) {
nv_wo32(dev, *gpuobj, 0, class);
nv_wo32(dev, *gpuobj, 5, 0x00010000);
@@ -874,7 +852,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
}
}
}
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
(*gpuobj)->engine = NVOBJ_ENGINE_GR;
(*gpuobj)->class = class;
@@ -920,6 +898,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
base = 0;
/* PGRAPH context */
+ size += dev_priv->engine.graph.grctx_size;
if (dev_priv->card_type == NV_50) {
/* Various fixed table thingos */
@@ -930,12 +909,8 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
size += 0x8000;
/* RAMFC */
size += 0x1000;
- /* PGRAPH context */
- size += 0x70000;
}
- NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
- chan->id, size, base);
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
&chan->ramin);
if (ret) {
@@ -944,8 +919,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
}
pramin = chan->ramin->gpuobj;
- ret = nouveau_mem_init_heap(&chan->ramin_heap,
- pramin->im_pramin->start + base, size);
+ ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size);
if (ret) {
NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
nouveau_gpuobj_ref_del(dev, &chan->ramin);
@@ -969,15 +943,11 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
- /* Reserve a block of PRAMIN for the channel
- *XXX: maybe on <NV50 too at some point
- */
- if (0 || dev_priv->card_type == NV_50) {
- ret = nouveau_gpuobj_channel_init_pramin(chan);
- if (ret) {
- NV_ERROR(dev, "init pramin\n");
- return ret;
- }
+ /* Allocate a chunk of memory for per-channel object storage */
+ ret = nouveau_gpuobj_channel_init_pramin(chan);
+ if (ret) {
+ NV_ERROR(dev, "init pramin\n");
+ return ret;
}
/* NV50 VM
@@ -988,17 +958,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
if (dev_priv->card_type >= NV_50) {
uint32_t vm_offset, pde;
- instmem->prepare_access(dev, true);
-
vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
vm_offset += chan->ramin->gpuobj->im_pramin->start;
ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
0, &chan->vm_pd, NULL);
- if (ret) {
- instmem->finish_access(dev);
+ if (ret)
return ret;
- }
for (i = 0; i < 0x4000; i += 8) {
nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
@@ -1008,10 +974,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
dev_priv->gart_info.sg_ctxdma,
&chan->vm_gart_pt);
- if (ret) {
- instmem->finish_access(dev);
+ if (ret)
return ret;
- }
nv_wo32(dev, chan->vm_pd, pde++,
chan->vm_gart_pt->instance | 0x03);
nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
@@ -1021,17 +985,15 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
dev_priv->vm_vram_pt[i],
&chan->vm_vram_pt[i]);
- if (ret) {
- instmem->finish_access(dev);
+ if (ret)
return ret;
- }
nv_wo32(dev, chan->vm_pd, pde++,
chan->vm_vram_pt[i]->instance | 0x61);
nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
}
- instmem->finish_access(dev);
+ instmem->flush(dev);
}
/* RAMHT */
@@ -1130,8 +1092,8 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
- if (chan->ramin_heap)
- nouveau_mem_takedown(&chan->ramin_heap);
+ if (chan->ramin_heap.free_stack.next)
+ drm_mm_takedown(&chan->ramin_heap);
if (chan->ramin)
nouveau_gpuobj_ref_del(dev, &chan->ramin);
@@ -1164,10 +1126,8 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
return -ENOMEM;
}
- dev_priv->engine.instmem.prepare_access(dev, false);
for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
- dev_priv->engine.instmem.finish_access(dev);
}
return 0;
@@ -1212,10 +1172,9 @@ nouveau_gpuobj_resume(struct drm_device *dev)
if (!gpuobj->im_backing_suspend)
continue;
- dev_priv->engine.instmem.prepare_access(dev, true);
for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
}
nouveau_gpuobj_suspend_cleanup(dev);
@@ -1232,7 +1191,6 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
struct nouveau_channel *chan;
int ret;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
if (init->handle == ~0)
@@ -1283,7 +1241,6 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
struct nouveau_channel *chan;
int ret;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 6ca80a3fe70..21a6e453b97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -1,19 +1,64 @@
+#define NV04_PFB_BOOT_0 0x00100000
+# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
+# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000
+# define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB 0x00000001
+# define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB 0x00000002
+# define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB 0x00000003
+# define NV04_PFB_BOOT_0_RAM_WIDTH_128 0x00000004
+# define NV04_PFB_BOOT_0_RAM_TYPE 0x00000028
+# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT 0x00000000
+# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT 0x00000008
+# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK 0x00000010
+# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT 0x00000018
+# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT 0x00000020
+# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028
+# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100
+# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000
+#define NV04_PFB_DEBUG_0 0x00100080
+# define NV04_PFB_DEBUG_0_PAGE_MODE 0x00000001
+# define NV04_PFB_DEBUG_0_REFRESH_OFF 0x00000010
+# define NV04_PFB_DEBUG_0_REFRESH_COUNTX64 0x00003f00
+# define NV04_PFB_DEBUG_0_REFRESH_SLOW_CLK 0x00004000
+# define NV04_PFB_DEBUG_0_SAFE_MODE 0x00008000
+# define NV04_PFB_DEBUG_0_ALOM_ENABLE 0x00010000
+# define NV04_PFB_DEBUG_0_CASOE 0x00100000
+# define NV04_PFB_DEBUG_0_CKE_INVERT 0x10000000
+# define NV04_PFB_DEBUG_0_REFINC 0x20000000
+# define NV04_PFB_DEBUG_0_SAVE_POWER_OFF 0x40000000
+#define NV04_PFB_CFG0 0x00100200
+# define NV04_PFB_CFG0_SCRAMBLE 0x20000000
+#define NV04_PFB_CFG1 0x00100204
+#define NV04_PFB_FIFO_DATA 0x0010020c
+# define NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
+# define NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
+#define NV10_PFB_REFCTRL 0x00100210
+# define NV10_PFB_REFCTRL_VALID_1 (1 << 31)
+#define NV04_PFB_PAD 0x0010021c
+# define NV04_PFB_PAD_CKE_NORMAL (1 << 0)
+#define NV10_PFB_TILE(i) (0x00100240 + (i*16))
+#define NV10_PFB_TILE__SIZE 8
+#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16))
+#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16))
+#define NV10_PFB_TSTATUS(i) (0x0010024c + (i*16))
+#define NV04_PFB_REF 0x001002d0
+# define NV04_PFB_REF_CMD_REFRESH (1 << 0)
+#define NV04_PFB_PRE 0x001002d4
+# define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0)
+#define NV10_PFB_CLOSE_PAGE2 0x0010033c
+#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i))
+#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
+#define NV40_PFB_TILE__SIZE_0 12
+#define NV40_PFB_TILE__SIZE_1 15
+#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16))
+#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16))
+#define NV40_PFB_TSTATUS(i) (0x0010060c + (i*16))
+#define NV40_PFB_UNK_800 0x00100800
-#define NV03_BOOT_0 0x00100000
-# define NV03_BOOT_0_RAM_AMOUNT 0x00000003
-# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000
-# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001
-# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002
-# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003
-# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000
-# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001
-# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002
-# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003
-
-#define NV04_FIFO_DATA 0x0010020c
-# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
-# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
+#define NV_PEXTDEV_BOOT_0 0x00101000
+#define NV_PEXTDEV_BOOT_0_RAMCFG 0x0000003c
+# define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT (8 << 12)
+#define NV_PEXTDEV_BOOT_3 0x0010100c
#define NV_RAMIN 0x00700000
@@ -131,23 +176,6 @@
#define NV04_PTIMER_TIME_1 0x00009410
#define NV04_PTIMER_ALARM_0 0x00009420
-#define NV04_PFB_CFG0 0x00100200
-#define NV04_PFB_CFG1 0x00100204
-#define NV40_PFB_020C 0x0010020C
-#define NV10_PFB_TILE(i) (0x00100240 + (i*16))
-#define NV10_PFB_TILE__SIZE 8
-#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16))
-#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16))
-#define NV10_PFB_TSTATUS(i) (0x0010024C + (i*16))
-#define NV10_PFB_CLOSE_PAGE2 0x0010033C
-#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
-#define NV40_PFB_TILE__SIZE_0 12
-#define NV40_PFB_TILE__SIZE_1 15
-#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16))
-#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16))
-#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16))
-#define NV40_PFB_UNK_800 0x00100800
-
#define NV04_PGRAPH_DEBUG_0 0x00400080
#define NV04_PGRAPH_DEBUG_1 0x00400084
#define NV04_PGRAPH_DEBUG_2 0x00400088
@@ -192,28 +220,21 @@
# define NV_PGRAPH_INTR_ERROR (1<<20)
#define NV10_PGRAPH_CTX_CONTROL 0x00400144
#define NV10_PGRAPH_CTX_USER 0x00400148
-#define NV10_PGRAPH_CTX_SWITCH1 0x0040014C
-#define NV10_PGRAPH_CTX_SWITCH2 0x00400150
-#define NV10_PGRAPH_CTX_SWITCH3 0x00400154
-#define NV10_PGRAPH_CTX_SWITCH4 0x00400158
-#define NV10_PGRAPH_CTX_SWITCH5 0x0040015C
+#define NV10_PGRAPH_CTX_SWITCH(i) (0x0040014C + 0x4*(i))
#define NV04_PGRAPH_CTX_SWITCH1 0x00400160
-#define NV10_PGRAPH_CTX_CACHE1 0x00400160
+#define NV10_PGRAPH_CTX_CACHE(i, j) (0x00400160 \
+ + 0x4*(i) + 0x20*(j))
#define NV04_PGRAPH_CTX_SWITCH2 0x00400164
#define NV04_PGRAPH_CTX_SWITCH3 0x00400168
#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C
#define NV04_PGRAPH_CTX_CONTROL 0x00400170
#define NV04_PGRAPH_CTX_USER 0x00400174
#define NV04_PGRAPH_CTX_CACHE1 0x00400180
-#define NV10_PGRAPH_CTX_CACHE2 0x00400180
#define NV03_PGRAPH_CTX_CONTROL 0x00400190
#define NV03_PGRAPH_CTX_USER 0x00400194
#define NV04_PGRAPH_CTX_CACHE2 0x004001A0
-#define NV10_PGRAPH_CTX_CACHE3 0x004001A0
#define NV04_PGRAPH_CTX_CACHE3 0x004001C0
-#define NV10_PGRAPH_CTX_CACHE4 0x004001C0
#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
-#define NV10_PGRAPH_CTX_CACHE5 0x004001E0
#define NV40_PGRAPH_CTXCTL_0304 0x00400304
#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
@@ -328,9 +349,12 @@
#define NV04_PGRAPH_FFINTFC_ST2 0x00400754
#define NV10_PGRAPH_RDI_DATA 0x00400754
#define NV04_PGRAPH_DMA_PITCH 0x00400760
-#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
+#define NV10_PGRAPH_FFINTFC_FIFO_PTR 0x00400760
#define NV04_PGRAPH_DVD_COLORFMT 0x00400764
+#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
#define NV04_PGRAPH_SCALED_FORMAT 0x00400768
+#define NV10_PGRAPH_FFINTFC_ST2_DL 0x00400768
+#define NV10_PGRAPH_FFINTFC_ST2_DH 0x0040076c
#define NV10_PGRAPH_DMA_PITCH 0x00400770
#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
@@ -814,6 +838,7 @@
#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000
#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff
#define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_CTRL_ENABLED 0x00000001
#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000
#define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000
#define NV50_SOR_DP_CTRL_LANE_0_ENABLED 0x00010000
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 1d6ee8b5515..6b9187d7f67 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -97,7 +97,6 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
- dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
nvbe->pte_start = pte;
for (i = 0; i < nvbe->nr_pages; i++) {
@@ -116,24 +115,11 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
dma_offset += NV_CTXDMA_PAGE_SIZE;
}
}
- dev_priv->engine.instmem.finish_access(nvbe->dev);
+ dev_priv->engine.instmem.flush(nvbe->dev);
if (dev_priv->card_type == NV_50) {
- nv_wr32(dev, 0x100c80, 0x00050001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n",
- nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x100c80, 0x00000001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n",
- nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
+ nv50_vm_flush(dev, 5); /* PGRAPH */
+ nv50_vm_flush(dev, 0); /* PFIFO */
}
nvbe->bound = true;
@@ -154,7 +140,6 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
if (!nvbe->bound)
return 0;
- dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
pte = nvbe->pte_start;
for (i = 0; i < nvbe->nr_pages; i++) {
dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
@@ -170,24 +155,11 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
dma_offset += NV_CTXDMA_PAGE_SIZE;
}
}
- dev_priv->engine.instmem.finish_access(nvbe->dev);
+ dev_priv->engine.instmem.flush(nvbe->dev);
if (dev_priv->card_type == NV_50) {
- nv_wr32(dev, 0x100c80, 0x00050001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n",
- nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x100c80, 0x00000001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n",
- nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
+ nv50_vm_flush(dev, 5);
+ nv50_vm_flush(dev, 0);
}
nvbe->bound = false;
@@ -242,6 +214,7 @@ int
nouveau_sgdma_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
struct nouveau_gpuobj *gpuobj = NULL;
uint32_t aper_size, obj_size;
int i, ret;
@@ -267,12 +240,20 @@ nouveau_sgdma_init(struct drm_device *dev)
dev_priv->gart_info.sg_dummy_page =
alloc_page(GFP_KERNEL|__GFP_DMA32);
+ if (!dev_priv->gart_info.sg_dummy_page) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -ENOMEM;
+ }
+
set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
dev_priv->gart_info.sg_dummy_bus =
- pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
+ pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -EFAULT;
+ }
- dev_priv->engine.instmem.prepare_access(dev, true);
if (dev_priv->card_type < NV_50) {
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
* confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
@@ -294,7 +275,7 @@ nouveau_sgdma_init(struct drm_device *dev)
nv_wo32(dev, gpuobj, (i+4)/4, 0);
}
}
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
dev_priv->gart_info.aper_base = 0;
@@ -325,14 +306,11 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
int pte;
pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
if (dev_priv->card_type < NV_50) {
- instmem->prepare_access(dev, false);
*page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
- instmem->finish_access(dev);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index b02a231d693..989322be372 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -38,6 +38,7 @@
#include "nv50_display.h"
static void nouveau_stub_takedown(struct drm_device *dev) {}
+static int nouveau_stub_init(struct drm_device *dev) { return 0; }
static int nouveau_init_engine_ptrs(struct drm_device *dev)
{
@@ -54,8 +55,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
- engine->instmem.prepare_access = nv04_instmem_prepare_access;
- engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
@@ -85,6 +85,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv04_fifo_load_context;
engine->fifo.unload_context = nv04_fifo_unload_context;
+ engine->display.early_init = nv04_display_early_init;
+ engine->display.late_takedown = nv04_display_late_takedown;
+ engine->display.create = nv04_display_create;
+ engine->display.init = nv04_display_init;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = NULL;
+ engine->gpio.set = NULL;
+ engine->gpio.irq_enable = NULL;
break;
case 0x10:
engine->instmem.init = nv04_instmem_init;
@@ -95,8 +105,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
- engine->instmem.prepare_access = nv04_instmem_prepare_access;
- engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
@@ -128,6 +137,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv10_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
+ engine->display.early_init = nv04_display_early_init;
+ engine->display.late_takedown = nv04_display_late_takedown;
+ engine->display.create = nv04_display_create;
+ engine->display.init = nv04_display_init;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv10_gpio_get;
+ engine->gpio.set = nv10_gpio_set;
+ engine->gpio.irq_enable = NULL;
break;
case 0x20:
engine->instmem.init = nv04_instmem_init;
@@ -138,8 +157,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
- engine->instmem.prepare_access = nv04_instmem_prepare_access;
- engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
@@ -171,6 +189,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv10_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
+ engine->display.early_init = nv04_display_early_init;
+ engine->display.late_takedown = nv04_display_late_takedown;
+ engine->display.create = nv04_display_create;
+ engine->display.init = nv04_display_init;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv10_gpio_get;
+ engine->gpio.set = nv10_gpio_set;
+ engine->gpio.irq_enable = NULL;
break;
case 0x30:
engine->instmem.init = nv04_instmem_init;
@@ -181,15 +209,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
- engine->instmem.prepare_access = nv04_instmem_prepare_access;
- engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
engine->timer.read = nv04_timer_read;
engine->timer.takedown = nv04_timer_takedown;
- engine->fb.init = nv10_fb_init;
- engine->fb.takedown = nv10_fb_takedown;
+ engine->fb.init = nv30_fb_init;
+ engine->fb.takedown = nv30_fb_takedown;
engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
engine->graph.grclass = nv30_graph_grclass;
engine->graph.init = nv30_graph_init;
@@ -214,6 +241,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv10_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
+ engine->display.early_init = nv04_display_early_init;
+ engine->display.late_takedown = nv04_display_late_takedown;
+ engine->display.create = nv04_display_create;
+ engine->display.init = nv04_display_init;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv10_gpio_get;
+ engine->gpio.set = nv10_gpio_set;
+ engine->gpio.irq_enable = NULL;
break;
case 0x40:
case 0x60:
@@ -225,8 +262,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
- engine->instmem.prepare_access = nv04_instmem_prepare_access;
- engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv40_mc_init;
engine->mc.takedown = nv40_mc_takedown;
engine->timer.init = nv04_timer_init;
@@ -258,6 +294,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv40_fifo_destroy_context;
engine->fifo.load_context = nv40_fifo_load_context;
engine->fifo.unload_context = nv40_fifo_unload_context;
+ engine->display.early_init = nv04_display_early_init;
+ engine->display.late_takedown = nv04_display_late_takedown;
+ engine->display.create = nv04_display_create;
+ engine->display.init = nv04_display_init;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv10_gpio_get;
+ engine->gpio.set = nv10_gpio_set;
+ engine->gpio.irq_enable = NULL;
break;
case 0x50:
case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -271,8 +317,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.clear = nv50_instmem_clear;
engine->instmem.bind = nv50_instmem_bind;
engine->instmem.unbind = nv50_instmem_unbind;
- engine->instmem.prepare_access = nv50_instmem_prepare_access;
- engine->instmem.finish_access = nv50_instmem_finish_access;
+ if (dev_priv->chipset == 0x50)
+ engine->instmem.flush = nv50_instmem_flush;
+ else
+ engine->instmem.flush = nv84_instmem_flush;
engine->mc.init = nv50_mc_init;
engine->mc.takedown = nv50_mc_takedown;
engine->timer.init = nv04_timer_init;
@@ -300,6 +348,64 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv50_fifo_destroy_context;
engine->fifo.load_context = nv50_fifo_load_context;
engine->fifo.unload_context = nv50_fifo_unload_context;
+ engine->display.early_init = nv50_display_early_init;
+ engine->display.late_takedown = nv50_display_late_takedown;
+ engine->display.create = nv50_display_create;
+ engine->display.init = nv50_display_init;
+ engine->display.destroy = nv50_display_destroy;
+ engine->gpio.init = nv50_gpio_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv50_gpio_get;
+ engine->gpio.set = nv50_gpio_set;
+ engine->gpio.irq_enable = nv50_gpio_irq_enable;
+ break;
+ case 0xC0:
+ engine->instmem.init = nvc0_instmem_init;
+ engine->instmem.takedown = nvc0_instmem_takedown;
+ engine->instmem.suspend = nvc0_instmem_suspend;
+ engine->instmem.resume = nvc0_instmem_resume;
+ engine->instmem.populate = nvc0_instmem_populate;
+ engine->instmem.clear = nvc0_instmem_clear;
+ engine->instmem.bind = nvc0_instmem_bind;
+ engine->instmem.unbind = nvc0_instmem_unbind;
+ engine->instmem.flush = nvc0_instmem_flush;
+ engine->mc.init = nv50_mc_init;
+ engine->mc.takedown = nv50_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nvc0_fb_init;
+ engine->fb.takedown = nvc0_fb_takedown;
+ engine->graph.grclass = NULL; //nvc0_graph_grclass;
+ engine->graph.init = nvc0_graph_init;
+ engine->graph.takedown = nvc0_graph_takedown;
+ engine->graph.fifo_access = nvc0_graph_fifo_access;
+ engine->graph.channel = nvc0_graph_channel;
+ engine->graph.create_context = nvc0_graph_create_context;
+ engine->graph.destroy_context = nvc0_graph_destroy_context;
+ engine->graph.load_context = nvc0_graph_load_context;
+ engine->graph.unload_context = nvc0_graph_unload_context;
+ engine->fifo.channels = 128;
+ engine->fifo.init = nvc0_fifo_init;
+ engine->fifo.takedown = nvc0_fifo_takedown;
+ engine->fifo.disable = nvc0_fifo_disable;
+ engine->fifo.enable = nvc0_fifo_enable;
+ engine->fifo.reassign = nvc0_fifo_reassign;
+ engine->fifo.channel_id = nvc0_fifo_channel_id;
+ engine->fifo.create_context = nvc0_fifo_create_context;
+ engine->fifo.destroy_context = nvc0_fifo_destroy_context;
+ engine->fifo.load_context = nvc0_fifo_load_context;
+ engine->fifo.unload_context = nvc0_fifo_unload_context;
+ engine->display.early_init = nv50_display_early_init;
+ engine->display.late_takedown = nv50_display_late_takedown;
+ engine->display.create = nv50_display_create;
+ engine->display.init = nv50_display_init;
+ engine->display.destroy = nv50_display_destroy;
+ engine->gpio.init = nv50_gpio_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv50_gpio_get;
+ engine->gpio.set = nv50_gpio_set;
+ engine->gpio.irq_enable = nv50_gpio_irq_enable;
break;
default:
NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -407,11 +513,6 @@ nouveau_card_init(struct drm_device *dev)
struct nouveau_engine *engine;
int ret;
- NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
-
- if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
- return 0;
-
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
nouveau_switcheroo_can_switch);
@@ -421,15 +522,17 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out;
engine = &dev_priv->engine;
- dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
spin_lock_init(&dev_priv->context_switch_lock);
+ /* Make the CRTCs and I2C buses accessible */
+ ret = engine->display.early_init(dev);
+ if (ret)
+ goto out;
+
/* Parse BIOS tables / Run init tables if card not POSTed */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = nouveau_bios_init(dev);
- if (ret)
- goto out;
- }
+ ret = nouveau_bios_init(dev);
+ if (ret)
+ goto out_display_early;
ret = nouveau_mem_detect(dev);
if (ret)
@@ -461,10 +564,15 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_gpuobj;
+ /* PGPIO */
+ ret = engine->gpio.init(dev);
+ if (ret)
+ goto out_mc;
+
/* PTIMER */
ret = engine->timer.init(dev);
if (ret)
- goto out_mc;
+ goto out_gpio;
/* PFB */
ret = engine->fb.init(dev);
@@ -485,12 +593,16 @@ nouveau_card_init(struct drm_device *dev)
goto out_graph;
}
+ ret = engine->display.create(dev);
+ if (ret)
+ goto out_fifo;
+
/* this call irq_preinstall, register irq handler and
* call irq_postinstall
*/
ret = drm_irq_install(dev);
if (ret)
- goto out_fifo;
+ goto out_display;
ret = drm_vblank_init(dev, 0);
if (ret)
@@ -504,35 +616,18 @@ nouveau_card_init(struct drm_device *dev)
goto out_irq;
}
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (dev_priv->card_type >= NV_50)
- ret = nv50_display_create(dev);
- else
- ret = nv04_display_create(dev);
- if (ret)
- goto out_channel;
- }
-
ret = nouveau_backlight_init(dev);
if (ret)
NV_ERROR(dev, "Error %d registering backlight\n", ret);
- dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- nouveau_fbcon_init(dev);
- drm_kms_helper_poll_init(dev);
- }
-
+ nouveau_fbcon_init(dev);
+ drm_kms_helper_poll_init(dev);
return 0;
-out_channel:
- if (dev_priv->channel) {
- nouveau_channel_free(dev_priv->channel);
- dev_priv->channel = NULL;
- }
out_irq:
drm_irq_uninstall(dev);
+out_display:
+ engine->display.destroy(dev);
out_fifo:
if (!nouveau_noaccel)
engine->fifo.takedown(dev);
@@ -543,6 +638,8 @@ out_fb:
engine->fb.takedown(dev);
out_timer:
engine->timer.takedown(dev);
+out_gpio:
+ engine->gpio.takedown(dev);
out_mc:
engine->mc.takedown(dev);
out_gpuobj:
@@ -556,6 +653,8 @@ out_gpuobj_early:
nouveau_gpuobj_late_takedown(dev);
out_bios:
nouveau_bios_takedown(dev);
+out_display_early:
+ engine->display.late_takedown(dev);
out:
vga_client_register(dev->pdev, NULL, NULL, NULL);
return ret;
@@ -566,45 +665,39 @@ static void nouveau_card_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
- NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
-
- if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
-
- nouveau_backlight_exit(dev);
-
- if (dev_priv->channel) {
- nouveau_channel_free(dev_priv->channel);
- dev_priv->channel = NULL;
- }
+ nouveau_backlight_exit(dev);
- if (!nouveau_noaccel) {
- engine->fifo.takedown(dev);
- engine->graph.takedown(dev);
- }
- engine->fb.takedown(dev);
- engine->timer.takedown(dev);
- engine->mc.takedown(dev);
+ if (dev_priv->channel) {
+ nouveau_channel_free(dev_priv->channel);
+ dev_priv->channel = NULL;
+ }
- mutex_lock(&dev->struct_mutex);
- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
- mutex_unlock(&dev->struct_mutex);
- nouveau_sgdma_takedown(dev);
+ if (!nouveau_noaccel) {
+ engine->fifo.takedown(dev);
+ engine->graph.takedown(dev);
+ }
+ engine->fb.takedown(dev);
+ engine->timer.takedown(dev);
+ engine->gpio.takedown(dev);
+ engine->mc.takedown(dev);
+ engine->display.late_takedown(dev);
- nouveau_gpuobj_takedown(dev);
- nouveau_mem_close(dev);
- engine->instmem.takedown(dev);
+ mutex_lock(&dev->struct_mutex);
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
+ mutex_unlock(&dev->struct_mutex);
+ nouveau_sgdma_takedown(dev);
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_uninstall(dev);
+ nouveau_gpuobj_takedown(dev);
+ nouveau_mem_close(dev);
+ engine->instmem.takedown(dev);
- nouveau_gpuobj_late_takedown(dev);
- nouveau_bios_takedown(dev);
+ drm_irq_uninstall(dev);
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ nouveau_gpuobj_late_takedown(dev);
+ nouveau_bios_takedown(dev);
- dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
- }
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
}
/* here a client dies, release the stuff that was allocated for its
@@ -691,22 +784,26 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
struct drm_nouveau_private *dev_priv;
uint32_t reg0;
resource_size_t mmio_start_offs;
+ int ret;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (!dev_priv)
- return -ENOMEM;
+ if (!dev_priv) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
dev->dev_private = dev_priv;
dev_priv->dev = dev;
dev_priv->flags = flags & NOUVEAU_FLAGS;
- dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
dev->pci_vendor, dev->pci_device, dev->pdev->class);
dev_priv->wq = create_workqueue("nouveau");
- if (!dev_priv->wq)
- return -EINVAL;
+ if (!dev_priv->wq) {
+ ret = -EINVAL;
+ goto err_priv;
+ }
/* resource 0 is mmio regs */
/* resource 1 is linear FB */
@@ -719,7 +816,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
if (!dev_priv->mmio) {
NV_ERROR(dev, "Unable to initialize the mmio mapping. "
"Please report your setup to " DRIVER_EMAIL "\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_wq;
}
NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
(unsigned long long)mmio_start_offs);
@@ -765,19 +863,21 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
case 0xa0:
dev_priv->card_type = NV_50;
break;
+ case 0xc0:
+ dev_priv->card_type = NV_C0;
+ break;
default:
NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_mmio;
}
NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
dev_priv->card_type, reg0);
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- int ret = nouveau_remove_conflicting_drivers(dev);
- if (ret)
- return ret;
- }
+ ret = nouveau_remove_conflicting_drivers(dev);
+ if (ret)
+ goto err_mmio;
/* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */
if (dev_priv->card_type >= NV_40) {
@@ -791,7 +891,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->ramin_size);
if (!dev_priv->ramin) {
NV_ERROR(dev, "Failed to PRAMIN BAR");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_mmio;
}
} else {
dev_priv->ramin_size = 1 * 1024 * 1024;
@@ -799,7 +900,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->ramin_size);
if (!dev_priv->ramin) {
NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_mmio;
}
}
@@ -812,46 +914,38 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->flags |= NV_NFORCE2;
/* For kernel modesetting, init card now and bring up fbcon */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- int ret = nouveau_card_init(dev);
- if (ret)
- return ret;
- }
+ ret = nouveau_card_init(dev);
+ if (ret)
+ goto err_ramin;
return 0;
-}
-
-static void nouveau_close(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- /* In the case of an error dev_priv may not be allocated yet */
- if (dev_priv)
- nouveau_card_takedown(dev);
+err_ramin:
+ iounmap(dev_priv->ramin);
+err_mmio:
+ iounmap(dev_priv->mmio);
+err_wq:
+ destroy_workqueue(dev_priv->wq);
+err_priv:
+ kfree(dev_priv);
+ dev->dev_private = NULL;
+err_out:
+ return ret;
}
-/* KMS: we need mmio at load time, not when the first drm client opens. */
void nouveau_lastclose(struct drm_device *dev)
{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- nouveau_close(dev);
}
int nouveau_unload(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_kms_helper_poll_fini(dev);
- nouveau_fbcon_fini(dev);
- if (dev_priv->card_type >= NV_50)
- nv50_display_destroy(dev);
- else
- nv04_display_destroy(dev);
- nouveau_close(dev);
- }
+ drm_kms_helper_poll_fini(dev);
+ nouveau_fbcon_fini(dev);
+ engine->display.destroy(dev);
+ nouveau_card_takedown(dev);
iounmap(dev_priv->mmio);
iounmap(dev_priv->ramin);
@@ -867,8 +961,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_getparam *getparam = data;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
switch (getparam->param) {
case NOUVEAU_GETPARAM_CHIPSET_ID:
getparam->value = dev_priv->chipset;
@@ -937,8 +1029,6 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data,
{
struct drm_nouveau_setparam *setparam = data;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
switch (setparam->param) {
default:
NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index c385d50f041..bd35f930568 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -42,13 +42,13 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
}
static int
-nouveau_ttm_mem_global_init(struct ttm_global_reference *ref)
+nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
{
return ttm_mem_global_init(ref->object);
}
static void
-nouveau_ttm_mem_global_release(struct ttm_global_reference *ref)
+nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
@@ -56,16 +56,16 @@ nouveau_ttm_mem_global_release(struct ttm_global_reference *ref)
int
nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
{
- struct ttm_global_reference *global_ref;
+ struct drm_global_reference *global_ref;
int ret;
global_ref = &dev_priv->ttm.mem_global_ref;
- global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &nouveau_ttm_mem_global_init;
global_ref->release = &nouveau_ttm_mem_global_release;
- ret = ttm_global_item_ref(global_ref);
+ ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM memory accounting\n");
dev_priv->ttm.mem_global_ref.release = NULL;
@@ -74,15 +74,15 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
global_ref = &dev_priv->ttm.bo_global_ref.ref;
- global_ref->global_type = TTM_GLOBAL_TTM_BO;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
- ret = ttm_global_item_ref(global_ref);
+ ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM BO subsystem\n");
- ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
+ drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
dev_priv->ttm.mem_global_ref.release = NULL;
return ret;
}
@@ -96,8 +96,8 @@ nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
if (dev_priv->ttm.mem_global_ref.release == NULL)
return;
- ttm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
- ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
+ drm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
dev_priv->ttm.mem_global_ref.release = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index eba687f1099..497df8765f2 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -157,6 +157,7 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
unsigned char seq1 = 0, crtc17 = 0;
unsigned char crtc1A;
@@ -211,6 +212,10 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
NVVgaSeqReset(dev, nv_crtc->index, false);
NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
+
+ /* Update connector polling modes */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ nouveau_connector_set_polling(connector);
}
static bool
@@ -537,6 +542,9 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
* 1 << 30 on 0x60.830), for no apparent reason */
regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
+ if (dev_priv->card_type >= NV_30)
+ regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
+
regp->crtc_830 = mode->crtc_vdisplay - 3;
regp->crtc_834 = mode->crtc_vdisplay - 1;
@@ -734,15 +742,13 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
}
static void
-nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size)
+nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
+ uint32_t size)
{
+ int end = (start + size > 256) ? 256 : start + size, i;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int i;
- if (size != 256)
- return;
-
- for (i = 0; i < 256; i++) {
+ for (i = start; i < end; i++) {
nv_crtc->lut.r[i] = r[i];
nv_crtc->lut.g[i] = g[i];
nv_crtc->lut.b[i] = b[i];
@@ -909,7 +915,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
if (!gem)
- return -EINVAL;
+ return -ENOENT;
cursor = nouveau_gem_object(gem);
ret = nouveau_bo_map(cursor);
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 1cb19e3acb5..ea3627041ec 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -220,6 +220,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -251,22 +252,21 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
}
- saved_gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
- saved_gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
+ saved_gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
+ saved_gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
+ gpio->set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
+ gpio->set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
msleep(4);
saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
head = (saved_routput & 0x100) >> 8;
-#if 0
- /* if there's a spare crtc, using it will minimise flicker for the case
- * where the in-use crtc is in use by an off-chip tmds encoder */
- if (xf86_config->crtc[head]->enabled && !xf86_config->crtc[head ^ 1]->enabled)
+
+ /* if there's a spare crtc, using it will minimise flicker */
+ if (!(NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX) & 0xC0))
head ^= 1;
-#endif
+
/* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
routput = (saved_routput & 0xfffffece) | head << 8;
@@ -304,8 +304,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
+ gpio->set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
+ gpio->set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
return sample;
}
@@ -315,9 +315,12 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
- uint32_t sample = nv17_dac_sample_load(encoder);
- if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
+ if (nv04_dac_in_use(encoder))
+ return connector_status_disconnected;
+
+ if (nv17_dac_sample_load(encoder) &
+ NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
NV_INFO(dev, "Load detected on output %c\n",
'@' + ffs(dcb->or));
return connector_status_connected;
@@ -330,6 +333,9 @@ static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ if (nv04_dac_in_use(encoder))
+ return false;
+
return true;
}
@@ -428,6 +434,17 @@ void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
}
}
+/* Check if the DAC corresponding to 'encoder' is being used by
+ * someone else. */
+bool nv04_dac_in_use(struct drm_encoder *encoder)
+{
+ struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+
+ return nv_gf4_disp_arch(encoder->dev) &&
+ (dev_priv->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index));
+}
+
static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -501,11 +518,13 @@ static const struct drm_encoder_funcs nv04_dac_funcs = {
.destroy = nv04_dac_destroy,
};
-int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry)
+int
+nv04_dac_create(struct drm_connector *connector, struct dcb_entry *entry)
{
const struct drm_encoder_helper_funcs *helper;
- struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder = NULL;
+ struct drm_device *dev = connector->dev;
+ struct drm_encoder *encoder;
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
@@ -527,5 +546,6 @@ int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry)
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
+ drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 41634d4752f..0d3206a7046 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -34,6 +34,8 @@
#include "nouveau_hw.h"
#include "nvreg.h"
+#include "i2c/sil164.h"
+
#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \
NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \
NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
@@ -144,6 +146,36 @@ void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
}
}
+static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ struct drm_encoder *slave;
+
+ if (dcb->type != OUTPUT_TMDS || dcb->location == DCB_LOC_ON_CHIP)
+ return NULL;
+
+ /* Some BIOSes (e.g. the one in a Quadro FX1000) report several
+ * TMDS transmitters at the same I2C address, in the same I2C
+ * bus. This can still work because in that case one of them is
+ * always hard-wired to a reasonable configuration using straps,
+ * and the other one needs to be programmed.
+ *
+ * I don't think there's a way to know which is which, even the
+ * blob programs the one exposed via I2C for *both* heads, so
+ * let's do the same.
+ */
+ list_for_each_entry(slave, &dev->mode_config.encoder_list, head) {
+ struct dcb_entry *slave_dcb = nouveau_encoder(slave)->dcb;
+
+ if (slave_dcb->type == OUTPUT_TMDS && get_slave_funcs(slave) &&
+ slave_dcb->tmdsconf.slave_addr == dcb->tmdsconf.slave_addr)
+ return slave;
+ }
+
+ return NULL;
+}
+
static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -412,10 +444,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct dcb_entry *dcbe = nv_encoder->dcb;
int head = nouveau_crtc(encoder->crtc)->index;
-
- NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
- nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+ struct drm_encoder *slave_encoder;
if (dcbe->type == OUTPUT_TMDS)
run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
@@ -433,6 +462,12 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
else
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
+ /* Init external transmitters */
+ slave_encoder = get_tmds_slave(encoder);
+ if (slave_encoder)
+ get_slave_funcs(slave_encoder)->mode_set(
+ slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
+
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
@@ -440,6 +475,27 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
+static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
+{
+#ifdef __powerpc__
+ struct drm_device *dev = encoder->dev;
+
+ /* BIOS scripts usually take care of the backlight, thanks
+ * Apple for your consistency.
+ */
+ if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
+ dev->pci_device == 0x0329) {
+ if (mode == DRM_MODE_DPMS_ON) {
+ nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
+ nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1);
+ } else {
+ nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
+ nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0);
+ }
+ }
+#endif
+}
+
static inline bool is_powersaving_dpms(int mode)
{
return (mode != DRM_MODE_DPMS_ON);
@@ -487,6 +543,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
LVDS_PANEL_OFF, 0);
}
+ nv04_dfp_update_backlight(encoder, mode);
nv04_dfp_update_fp_control(encoder, mode);
if (mode == DRM_MODE_DPMS_ON)
@@ -510,6 +567,7 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
mode, nv_encoder->dcb->index);
+ nv04_dfp_update_backlight(encoder, mode);
nv04_dfp_update_fp_control(encoder, mode);
}
@@ -554,10 +612,42 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
NV_DEBUG_KMS(encoder->dev, "\n");
+ if (get_slave_funcs(encoder))
+ get_slave_funcs(encoder)->destroy(encoder);
+
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
}
+static void nv04_tmds_slave_init(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, 2);
+ struct i2c_board_info info[] = {
+ {
+ .type = "sil164",
+ .addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38),
+ .platform_data = &(struct sil164_encoder_params) {
+ SIL164_INPUT_EDGE_RISING
+ }
+ },
+ { }
+ };
+ int type;
+
+ if (!nv_gf4_disp_arch(dev) || !i2c ||
+ get_tmds_slave(encoder))
+ return;
+
+ type = nouveau_i2c_identify(dev, "TMDS transmitter", info, 2);
+ if (type < 0)
+ return;
+
+ drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
+ &i2c->adapter, &info[type]);
+}
+
static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
.dpms = nv04_lvds_dpms,
.save = nv04_dfp_save,
@@ -584,11 +674,12 @@ static const struct drm_encoder_funcs nv04_dfp_funcs = {
.destroy = nv04_dfp_destroy,
};
-int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry)
+int
+nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry)
{
const struct drm_encoder_helper_funcs *helper;
- struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder = NULL;
+ struct drm_encoder *encoder;
int type;
switch (entry->type) {
@@ -613,11 +704,16 @@ int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry)
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
- drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type);
+ drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type);
drm_encoder_helper_add(encoder, helper);
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
+ if (entry->type == OUTPUT_TMDS &&
+ entry->location != DCB_LOC_ON_CHIP)
+ nv04_tmds_slave_init(encoder);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index c7898b4f6df..9e28cf772e3 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -32,8 +32,6 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
-#define MULTIPLE_ENCODERS(e) (e & (e - 1))
-
static void
nv04_display_store_initial_head_owner(struct drm_device *dev)
{
@@ -41,7 +39,7 @@ nv04_display_store_initial_head_owner(struct drm_device *dev)
if (dev_priv->chipset != 0x11) {
dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44);
- goto ownerknown;
+ return;
}
/* reading CR44 is broken on nv11, so we attempt to infer it */
@@ -52,8 +50,6 @@ nv04_display_store_initial_head_owner(struct drm_device *dev)
bool tvA = false;
bool tvB = false;
- NVLockVgaCrtcs(dev, false);
-
slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) &
0x80;
if (slaved_on_B)
@@ -66,8 +62,6 @@ nv04_display_store_initial_head_owner(struct drm_device *dev)
tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) &
MASK(NV_CIO_CRE_LCD_LCD_SELECT));
- NVLockVgaCrtcs(dev, true);
-
if (slaved_on_A && !tvA)
dev_priv->crtc_owner = 0x0;
else if (slaved_on_B && !tvB)
@@ -79,14 +73,40 @@ nv04_display_store_initial_head_owner(struct drm_device *dev)
else
dev_priv->crtc_owner = 0x0;
}
+}
+
+int
+nv04_display_early_init(struct drm_device *dev)
+{
+ /* Make the I2C buses accessible. */
+ if (!nv_gf4_disp_arch(dev)) {
+ uint32_t pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
+
+ if (!(pmc_enable & 1))
+ nv_wr32(dev, NV03_PMC_ENABLE, pmc_enable | 1);
+ }
-ownerknown:
- NV_INFO(dev, "Initial CRTC_OWNER is %d\n", dev_priv->crtc_owner);
+ /* Unlock the VGA CRTCs. */
+ NVLockVgaCrtcs(dev, false);
+
+ /* Make sure the CRTCs aren't in slaved mode. */
+ if (nv_two_heads(dev)) {
+ nv04_display_store_initial_head_owner(dev);
+ NVSetOwner(dev, 0);
+ }
+
+ return 0;
+}
+
+void
+nv04_display_late_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (nv_two_heads(dev))
+ NVSetOwner(dev, dev_priv->crtc_owner);
- /* we need to ensure the heads are not tied henceforth, or reading any
- * 8 bit reg on head B will fail
- * setting a single arbitrary head solves that */
- NVSetOwner(dev, 0);
+ NVLockVgaCrtcs(dev, true);
}
int
@@ -94,14 +114,13 @@ nv04_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
+ struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
int i, ret;
NV_DEBUG_KMS(dev, "\n");
- if (nv_two_heads(dev))
- nv04_display_store_initial_head_owner(dev);
nouveau_hw_save_vga_fonts(dev, 1);
drm_mode_config_init(dev);
@@ -132,19 +151,23 @@ nv04_display_create(struct drm_device *dev)
for (i = 0; i < dcb->entries; i++) {
struct dcb_entry *dcbent = &dcb->entry[i];
+ connector = nouveau_connector_create(dev, dcbent->connector);
+ if (IS_ERR(connector))
+ continue;
+
switch (dcbent->type) {
case OUTPUT_ANALOG:
- ret = nv04_dac_create(dev, dcbent);
+ ret = nv04_dac_create(connector, dcbent);
break;
case OUTPUT_LVDS:
case OUTPUT_TMDS:
- ret = nv04_dfp_create(dev, dcbent);
+ ret = nv04_dfp_create(connector, dcbent);
break;
case OUTPUT_TV:
if (dcbent->location == DCB_LOC_ON_CHIP)
- ret = nv17_tv_create(dev, dcbent);
+ ret = nv17_tv_create(connector, dcbent);
else
- ret = nv04_tv_create(dev, dcbent);
+ ret = nv04_tv_create(connector, dcbent);
break;
default:
NV_WARN(dev, "DCB type %d not known\n", dcbent->type);
@@ -155,12 +178,16 @@ nv04_display_create(struct drm_device *dev)
continue;
}
- for (i = 0; i < dcb->connector.entries; i++)
- nouveau_connector_create(dev, &dcb->connector.entry[i]);
+ list_for_each_entry_safe(connector, ct,
+ &dev->mode_config.connector_list, head) {
+ if (!connector->encoder_ids[0]) {
+ NV_WARN(dev, "%s has no encoders, removing\n",
+ drm_get_connector_name(connector));
+ connector->funcs->destroy(connector);
+ }
+ }
/* Save previous state */
- NVLockVgaCrtcs(dev, false);
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->save(crtc);
@@ -191,8 +218,6 @@ nv04_display_destroy(struct drm_device *dev)
}
/* Restore state */
- NVLockVgaCrtcs(dev, false);
-
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct drm_encoder_helper_funcs *func = encoder->helper_private;
@@ -207,15 +232,12 @@ nv04_display_destroy(struct drm_device *dev)
nouveau_hw_save_vga_fonts(dev, 0);
}
-void
-nv04_display_restore(struct drm_device *dev)
+int
+nv04_display_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
- NVLockVgaCrtcs(dev, false);
-
/* meh.. modeset apparently doesn't setup all the regs and depends
* on pre-existing state, for now load the state of the card *before*
* nouveau was loaded, and then do a modeset.
@@ -233,12 +255,6 @@ nv04_display_restore(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->restore(crtc);
- if (nv_two_heads(dev)) {
- NV_INFO(dev, "Restoring CRTC_OWNER to %d.\n",
- dev_priv->crtc_owner);
- NVSetOwner(dev, dev_priv->crtc_owner);
- }
-
- NVLockVgaCrtcs(dev, true);
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 66fe55983b6..06cedd99c26 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -112,6 +112,12 @@ nv04_fifo_channel_id(struct drm_device *dev)
NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
}
+#ifdef __BIG_ENDIAN
+#define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN
+#else
+#define DMA_FETCH_ENDIANNESS 0
+#endif
+
int
nv04_fifo_create_context(struct nouveau_channel *chan)
{
@@ -131,18 +137,13 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
/* Setup initial state */
- dev_priv->engine.instmem.prepare_access(dev, true);
RAMFC_WR(DMA_PUT, chan->pushbuf_base);
RAMFC_WR(DMA_GET, chan->pushbuf_base);
RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
-#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
- 0));
- dev_priv->engine.instmem.finish_access(dev);
+ DMA_FETCH_ENDIANNESS));
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
@@ -169,8 +170,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid)
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t fc = NV04_RAMFC(chid), tmp;
- dev_priv->engine.instmem.prepare_access(dev, false);
-
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
tmp = nv_ri32(dev, fc + 8);
@@ -181,8 +180,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid)
nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
- dev_priv->engine.instmem.finish_access(dev);
-
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
}
@@ -223,7 +220,6 @@ nv04_fifo_unload_context(struct drm_device *dev)
return -EINVAL;
}
- dev_priv->engine.instmem.prepare_access(dev, true);
RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
@@ -233,7 +229,6 @@ nv04_fifo_unload_context(struct drm_device *dev)
RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
- dev_priv->engine.instmem.finish_access(dev);
nv04_fifo_do_load_context(dev, pfifo->channels - 1);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
@@ -297,6 +292,7 @@ nv04_fifo_init(struct drm_device *dev)
nv04_fifo_init_intr(dev);
pfifo->enable(dev);
+ pfifo->reassign(dev, true);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
if (dev_priv->fifos[i]) {
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index 618355e9cdd..c8973421b63 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -342,7 +342,7 @@ static uint32_t nv04_graph_ctx_regs[] = {
};
struct graph_state {
- int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
+ uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
};
struct nouveau_channel *
@@ -527,8 +527,7 @@ static int
nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
- chan->fence.last_sequence_irq = data;
- nouveau_fence_handler(chan->dev, chan->id);
+ atomic_set(&chan->fence.last_sequence_irq, data);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index a3b9563a6f6..4408232d33f 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -49,10 +49,8 @@ nv04_instmem_determine_amount(struct drm_device *dev)
NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
/* Clear all of it, except the BIOS image that's in the first 64KiB */
- dev_priv->engine.instmem.prepare_access(dev, true);
for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
nv_wi32(dev, i, 0x00000000);
- dev_priv->engine.instmem.finish_access(dev);
}
static void
@@ -106,7 +104,7 @@ int nv04_instmem_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t offset;
- int ret = 0;
+ int ret;
nv04_instmem_determine_amount(dev);
nv04_instmem_configure_fixed_tables(dev);
@@ -129,14 +127,14 @@ int nv04_instmem_init(struct drm_device *dev)
offset = 0x40000;
}
- ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
- offset, dev_priv->ramin_rsvd_vram - offset);
+ ret = drm_mm_init(&dev_priv->ramin_heap, offset,
+ dev_priv->ramin_rsvd_vram - offset);
if (ret) {
- dev_priv->ramin_heap = NULL;
- NV_ERROR(dev, "Failed to init RAMIN heap\n");
+ NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret);
+ return ret;
}
- return ret;
+ return 0;
}
void
@@ -186,12 +184,7 @@ nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
}
void
-nv04_instmem_prepare_access(struct drm_device *dev, bool write)
-{
-}
-
-void
-nv04_instmem_finish_access(struct drm_device *dev)
+nv04_instmem_flush(struct drm_device *dev)
{
}
diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c
index 617ed1e0526..2af43a1cb2e 100644
--- a/drivers/gpu/drm/nouveau/nv04_mc.c
+++ b/drivers/gpu/drm/nouveau/nv04_mc.c
@@ -11,6 +11,10 @@ nv04_mc_init(struct drm_device *dev)
*/
nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+
+ /* Disable PROM access. */
+ nv_wr32(dev, NV_PBUS_PCI_NV_20, NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index c4e3404337d..0b5d012d7c2 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -34,69 +34,26 @@
#include "i2c/ch7006.h"
-static struct {
- struct i2c_board_info board_info;
- struct drm_encoder_funcs funcs;
- struct drm_encoder_helper_funcs hfuncs;
- void *params;
-
-} nv04_tv_encoder_info[] = {
+static struct i2c_board_info nv04_tv_encoder_info[] = {
{
- .board_info = { I2C_BOARD_INFO("ch7006", 0x75) },
- .params = &(struct ch7006_encoder_params) {
+ I2C_BOARD_INFO("ch7006", 0x75),
+ .platform_data = &(struct ch7006_encoder_params) {
CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
0, 0, 0,
CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
- },
+ }
},
+ { }
};
-static bool probe_i2c_addr(struct i2c_adapter *adapter, int addr)
-{
- struct i2c_msg msg = {
- .addr = addr,
- .len = 0,
- };
-
- return i2c_transfer(adapter, &msg, 1) == 1;
-}
-
int nv04_tv_identify(struct drm_device *dev, int i2c_index)
{
- struct nouveau_i2c_chan *i2c;
- bool was_locked;
- int i, ret;
-
- NV_TRACE(dev, "Probing TV encoders on I2C bus: %d\n", i2c_index);
-
- i2c = nouveau_i2c_find(dev, i2c_index);
- if (!i2c)
- return -ENODEV;
-
- was_locked = NVLockVgaCrtcs(dev, false);
-
- for (i = 0; i < ARRAY_SIZE(nv04_tv_encoder_info); i++) {
- if (probe_i2c_addr(&i2c->adapter,
- nv04_tv_encoder_info[i].board_info.addr)) {
- ret = i;
- break;
- }
- }
-
- if (i < ARRAY_SIZE(nv04_tv_encoder_info)) {
- NV_TRACE(dev, "Detected TV encoder: %s\n",
- nv04_tv_encoder_info[i].board_info.type);
-
- } else {
- NV_TRACE(dev, "No TV encoders found.\n");
- i = -ENODEV;
- }
-
- NVLockVgaCrtcs(dev, was_locked);
- return i;
+ return nouveau_i2c_identify(dev, "TV encoder",
+ nv04_tv_encoder_info, i2c_index);
}
+
#define PLLSEL_TV_CRTC1_MASK \
(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
| NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1)
@@ -132,7 +89,7 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
- to_encoder_slave(encoder)->slave_funcs->dpms(encoder, mode);
+ get_slave_funcs(encoder)->dpms(encoder, mode);
}
static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
@@ -195,7 +152,7 @@ static void nv04_tv_mode_set(struct drm_encoder *encoder,
regp->tv_vskew = 1;
regp->tv_vsync_delay = 1;
- to_encoder_slave(encoder)->slave_funcs->mode_set(encoder, mode, adjusted_mode);
+ get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
}
static void nv04_tv_commit(struct drm_encoder *encoder)
@@ -214,30 +171,31 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
static void nv04_tv_destroy(struct drm_encoder *encoder)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
- to_encoder_slave(encoder)->slave_funcs->destroy(encoder);
-
+ get_slave_funcs(encoder)->destroy(encoder);
drm_encoder_cleanup(encoder);
- kfree(nv_encoder);
+ kfree(encoder->helper_private);
+ kfree(nouveau_encoder(encoder));
}
-int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
+static const struct drm_encoder_funcs nv04_tv_funcs = {
+ .destroy = nv04_tv_destroy,
+};
+
+int
+nv04_tv_create(struct drm_connector *connector, struct dcb_entry *entry)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct i2c_adapter *adap;
- struct drm_encoder_funcs *funcs = NULL;
- struct drm_encoder_helper_funcs *hfuncs = NULL;
- struct drm_encoder_slave_funcs *sfuncs = NULL;
- int i2c_index = entry->i2c_index;
+ struct drm_device *dev = connector->dev;
+ struct drm_encoder_helper_funcs *hfuncs;
+ struct drm_encoder_slave_funcs *sfuncs;
+ struct nouveau_i2c_chan *i2c =
+ nouveau_i2c_find(dev, entry->i2c_index);
int type, ret;
- bool was_locked;
/* Ensure that we can talk to this encoder */
- type = nv04_tv_identify(dev, i2c_index);
+ type = nv04_tv_identify(dev, entry->i2c_index);
if (type < 0)
return type;
@@ -246,40 +204,31 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
if (!nv_encoder)
return -ENOMEM;
+ hfuncs = kzalloc(sizeof(*hfuncs), GFP_KERNEL);
+ if (!hfuncs) {
+ ret = -ENOMEM;
+ goto fail_free;
+ }
+
/* Initialize the common members */
encoder = to_drm_encoder(nv_encoder);
- funcs = &nv04_tv_encoder_info[type].funcs;
- hfuncs = &nv04_tv_encoder_info[type].hfuncs;
-
- drm_encoder_init(dev, encoder, funcs, DRM_MODE_ENCODER_TVDAC);
+ drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC);
drm_encoder_helper_add(encoder, hfuncs);
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
-
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
/* Run the slave-specific initialization */
- adap = &dev_priv->vbios.dcb.i2c[i2c_index].chan->adapter;
-
- was_locked = NVLockVgaCrtcs(dev, false);
-
- ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap,
- &nv04_tv_encoder_info[type].board_info);
-
- NVLockVgaCrtcs(dev, was_locked);
-
+ ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
+ &i2c->adapter, &nv04_tv_encoder_info[type]);
if (ret < 0)
- goto fail;
+ goto fail_cleanup;
/* Fill the function pointers */
- sfuncs = to_encoder_slave(encoder)->slave_funcs;
-
- *funcs = (struct drm_encoder_funcs) {
- .destroy = nv04_tv_destroy,
- };
+ sfuncs = get_slave_funcs(encoder);
*hfuncs = (struct drm_encoder_helper_funcs) {
.dpms = nv04_tv_dpms,
@@ -292,14 +241,16 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
.detect = sfuncs->detect,
};
- /* Set the slave encoder configuration */
- sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params);
+ /* Attach it to the specified connector. */
+ sfuncs->create_resources(encoder, connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
return 0;
-fail:
+fail_cleanup:
drm_encoder_cleanup(encoder);
-
+ kfree(hfuncs);
+fail_free:
kfree(nv_encoder);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index 7aeabf262bc..7a4069cf5d0 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -55,7 +55,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
/* Fill entries that are seen filled in dumps of nvidia driver just
* after channel's is put into DMA mode
*/
- dev_priv->engine.instmem.prepare_access(dev, true);
nv_wi32(dev, fc + 0, chan->pushbuf_base);
nv_wi32(dev, fc + 4, chan->pushbuf_base);
nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
@@ -66,7 +65,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0);
- dev_priv->engine.instmem.finish_access(dev);
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
@@ -91,8 +89,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid)
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t fc = NV10_RAMFC(chid), tmp;
- dev_priv->engine.instmem.prepare_access(dev, false);
-
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
@@ -117,8 +113,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid)
nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
out:
- dev_priv->engine.instmem.finish_access(dev);
-
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
}
@@ -155,8 +149,6 @@ nv10_fifo_unload_context(struct drm_device *dev)
return 0;
fc = NV10_RAMFC(chid);
- dev_priv->engine.instmem.prepare_access(dev, true);
-
nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
@@ -179,8 +171,6 @@ nv10_fifo_unload_context(struct drm_device *dev)
nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
out:
- dev_priv->engine.instmem.finish_access(dev);
-
nv10_fifo_do_load_context(dev, pfifo->channels - 1);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv17_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
index 2e58c331e9b..007fc29e2f8 100644
--- a/drivers/gpu/drm/nouveau/nv17_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv10_gpio.c
@@ -55,7 +55,7 @@ get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
}
int
-nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
{
struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
uint32_t reg, shift, mask, value;
@@ -72,7 +72,7 @@ nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
}
int
-nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
{
struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
uint32_t reg, shift, mask, value;
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index fcf2cdd1949..b2f6a57c0cc 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -43,51 +43,51 @@ struct pipe_state {
};
static int nv10_graph_ctx_regs[] = {
- NV10_PGRAPH_CTX_SWITCH1,
- NV10_PGRAPH_CTX_SWITCH2,
- NV10_PGRAPH_CTX_SWITCH3,
- NV10_PGRAPH_CTX_SWITCH4,
- NV10_PGRAPH_CTX_SWITCH5,
- NV10_PGRAPH_CTX_CACHE1, /* 8 values from 0x400160 to 0x40017c */
- NV10_PGRAPH_CTX_CACHE2, /* 8 values from 0x400180 to 0x40019c */
- NV10_PGRAPH_CTX_CACHE3, /* 8 values from 0x4001a0 to 0x4001bc */
- NV10_PGRAPH_CTX_CACHE4, /* 8 values from 0x4001c0 to 0x4001dc */
- NV10_PGRAPH_CTX_CACHE5, /* 8 values from 0x4001e0 to 0x4001fc */
- 0x00400164,
- 0x00400184,
- 0x004001a4,
- 0x004001c4,
- 0x004001e4,
- 0x00400168,
- 0x00400188,
- 0x004001a8,
- 0x004001c8,
- 0x004001e8,
- 0x0040016c,
- 0x0040018c,
- 0x004001ac,
- 0x004001cc,
- 0x004001ec,
- 0x00400170,
- 0x00400190,
- 0x004001b0,
- 0x004001d0,
- 0x004001f0,
- 0x00400174,
- 0x00400194,
- 0x004001b4,
- 0x004001d4,
- 0x004001f4,
- 0x00400178,
- 0x00400198,
- 0x004001b8,
- 0x004001d8,
- 0x004001f8,
- 0x0040017c,
- 0x0040019c,
- 0x004001bc,
- 0x004001dc,
- 0x004001fc,
+ NV10_PGRAPH_CTX_SWITCH(0),
+ NV10_PGRAPH_CTX_SWITCH(1),
+ NV10_PGRAPH_CTX_SWITCH(2),
+ NV10_PGRAPH_CTX_SWITCH(3),
+ NV10_PGRAPH_CTX_SWITCH(4),
+ NV10_PGRAPH_CTX_CACHE(0, 0),
+ NV10_PGRAPH_CTX_CACHE(0, 1),
+ NV10_PGRAPH_CTX_CACHE(0, 2),
+ NV10_PGRAPH_CTX_CACHE(0, 3),
+ NV10_PGRAPH_CTX_CACHE(0, 4),
+ NV10_PGRAPH_CTX_CACHE(1, 0),
+ NV10_PGRAPH_CTX_CACHE(1, 1),
+ NV10_PGRAPH_CTX_CACHE(1, 2),
+ NV10_PGRAPH_CTX_CACHE(1, 3),
+ NV10_PGRAPH_CTX_CACHE(1, 4),
+ NV10_PGRAPH_CTX_CACHE(2, 0),
+ NV10_PGRAPH_CTX_CACHE(2, 1),
+ NV10_PGRAPH_CTX_CACHE(2, 2),
+ NV10_PGRAPH_CTX_CACHE(2, 3),
+ NV10_PGRAPH_CTX_CACHE(2, 4),
+ NV10_PGRAPH_CTX_CACHE(3, 0),
+ NV10_PGRAPH_CTX_CACHE(3, 1),
+ NV10_PGRAPH_CTX_CACHE(3, 2),
+ NV10_PGRAPH_CTX_CACHE(3, 3),
+ NV10_PGRAPH_CTX_CACHE(3, 4),
+ NV10_PGRAPH_CTX_CACHE(4, 0),
+ NV10_PGRAPH_CTX_CACHE(4, 1),
+ NV10_PGRAPH_CTX_CACHE(4, 2),
+ NV10_PGRAPH_CTX_CACHE(4, 3),
+ NV10_PGRAPH_CTX_CACHE(4, 4),
+ NV10_PGRAPH_CTX_CACHE(5, 0),
+ NV10_PGRAPH_CTX_CACHE(5, 1),
+ NV10_PGRAPH_CTX_CACHE(5, 2),
+ NV10_PGRAPH_CTX_CACHE(5, 3),
+ NV10_PGRAPH_CTX_CACHE(5, 4),
+ NV10_PGRAPH_CTX_CACHE(6, 0),
+ NV10_PGRAPH_CTX_CACHE(6, 1),
+ NV10_PGRAPH_CTX_CACHE(6, 2),
+ NV10_PGRAPH_CTX_CACHE(6, 3),
+ NV10_PGRAPH_CTX_CACHE(6, 4),
+ NV10_PGRAPH_CTX_CACHE(7, 0),
+ NV10_PGRAPH_CTX_CACHE(7, 1),
+ NV10_PGRAPH_CTX_CACHE(7, 2),
+ NV10_PGRAPH_CTX_CACHE(7, 3),
+ NV10_PGRAPH_CTX_CACHE(7, 4),
NV10_PGRAPH_CTX_USER,
NV04_PGRAPH_DMA_START_0,
NV04_PGRAPH_DMA_START_1,
@@ -653,6 +653,78 @@ static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
return -1;
}
+static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
+ uint32_t inst)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
+ uint32_t ctx_user, ctx_switch[5];
+ int i, subchan = -1;
+
+ /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
+ * that cannot be restored via MMIO. Do it through the FIFO
+ * instead.
+ */
+
+ /* Look for a celsius object */
+ for (i = 0; i < 8; i++) {
+ int class = nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
+
+ if (class == 0x56 || class == 0x96 || class == 0x99) {
+ subchan = i;
+ break;
+ }
+ }
+
+ if (subchan < 0 || !inst)
+ return;
+
+ /* Save the current ctx object */
+ ctx_user = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
+ for (i = 0; i < 5; i++)
+ ctx_switch[i] = nv_rd32(dev, NV10_PGRAPH_CTX_SWITCH(i));
+
+ /* Save the FIFO state */
+ st2 = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
+ st2_dl = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DL);
+ st2_dh = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DH);
+ fifo_ptr = nv_rd32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR);
+
+ for (i = 0; i < ARRAY_SIZE(fifo); i++)
+ fifo[i] = nv_rd32(dev, 0x4007a0 + 4 * i);
+
+ /* Switch to the celsius subchannel */
+ for (i = 0; i < 5; i++)
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i),
+ nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(subchan, i)));
+ nv_mask(dev, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
+
+ /* Inject NV10TCL_DMA_VTXBUF */
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2,
+ 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
+ nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
+ pgraph->fifo_access(dev, true);
+ pgraph->fifo_access(dev, false);
+
+ /* Restore the FIFO state */
+ for (i = 0; i < ARRAY_SIZE(fifo); i++)
+ nv_wr32(dev, 0x4007a0 + 4 * i, fifo[i]);
+
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, st2);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
+
+ /* Restore the current ctx object */
+ for (i = 0; i < 5; i++)
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user);
+}
+
int nv10_graph_load_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
@@ -670,6 +742,8 @@ int nv10_graph_load_context(struct nouveau_channel *chan)
}
nv10_graph_load_pipe(chan);
+ nv10_graph_load_dma_vtxbuf(chan, (nv_rd32(dev, NV10_PGRAPH_GLOBALSTATE1)
+ & 0xffff));
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
@@ -856,11 +930,12 @@ int nv10_graph_init(struct drm_device *dev)
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 74c880374fb..13cdc05b7c2 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -37,6 +37,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -52,8 +53,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
head = (dacclk & 0x100) >> 8;
/* Save the previous state. */
- gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
- gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
+ gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
+ gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -64,8 +65,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
/* Prepare the DAC for load detection. */
- nv17_gpio_set(dev, DCB_GPIO_TVDAC1, true);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC0, true);
+ gpio->set(dev, DCB_GPIO_TVDAC1, true);
+ gpio->set(dev, DCB_GPIO_TVDAC0, true);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -110,12 +111,31 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC1, gpio1);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC0, gpio0);
+ gpio->set(dev, DCB_GPIO_TVDAC1, gpio1);
+ gpio->set(dev, DCB_GPIO_TVDAC0, gpio0);
return sample;
}
+static bool
+get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
+{
+ /* Zotac FX5200 */
+ if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) ||
+ nv_match_device(dev, 0x0322, 0x19da, 0x2035)) {
+ *pin_mask = 0xc;
+ return false;
+ }
+
+ /* MSI nForce2 IGP */
+ if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) {
+ *pin_mask = 0xc;
+ return false;
+ }
+
+ return true;
+}
+
static enum drm_connector_status
nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
@@ -124,12 +144,20 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
struct drm_mode_config *conf = &dev->mode_config;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct dcb_entry *dcb = tv_enc->base.dcb;
+ bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask);
- if (dev_priv->chipset == 0x42 ||
- dev_priv->chipset == 0x43)
- tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe;
- else
- tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe;
+ if (nv04_dac_in_use(encoder))
+ return connector_status_disconnected;
+
+ if (reliable) {
+ if (dev_priv->chipset == 0x42 ||
+ dev_priv->chipset == 0x43)
+ tv_enc->pin_mask =
+ nv42_tv_sample_load(encoder) >> 28 & 0xe;
+ else
+ tv_enc->pin_mask =
+ nv17_dac_sample_load(encoder) >> 28 & 0xe;
+ }
switch (tv_enc->pin_mask) {
case 0x2:
@@ -154,7 +182,9 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
conf->tv_subconnector_property,
tv_enc->subconnector);
- if (tv_enc->subconnector) {
+ if (!reliable) {
+ return connector_status_unknown;
+ } else if (tv_enc->subconnector) {
NV_INFO(dev, "Load detected on output %c\n",
'@' + ffs(dcb->or));
return connector_status_connected;
@@ -296,6 +326,9 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ if (nv04_dac_in_use(encoder))
+ return false;
+
if (tv_norm->kind == CTV_ENC_MODE)
adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock;
else
@@ -307,6 +340,8 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
@@ -331,8 +366,8 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
nv_load_ptv(dev, regs, 200);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
+ gpio->set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
+ gpio->set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -744,8 +779,10 @@ static struct drm_encoder_funcs nv17_tv_funcs = {
.destroy = nv17_tv_destroy,
};
-int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry)
+int
+nv17_tv_create(struct drm_connector *connector, struct dcb_entry *entry)
{
+ struct drm_device *dev = connector->dev;
struct drm_encoder *encoder;
struct nv17_tv_encoder *tv_enc = NULL;
@@ -774,5 +811,7 @@ int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry)
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
+ nv17_tv_create_resources(encoder, connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index d6fc0a82f03..17f309b36c9 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -370,68 +370,54 @@ nv20_graph_create_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
- unsigned int ctx_size;
unsigned int idoffs = 0x28/4;
int ret;
switch (dev_priv->chipset) {
case 0x20:
- ctx_size = NV20_GRCTX_SIZE;
ctx_init = nv20_graph_context_init;
idoffs = 0;
break;
case 0x25:
case 0x28:
- ctx_size = NV25_GRCTX_SIZE;
ctx_init = nv25_graph_context_init;
break;
case 0x2a:
- ctx_size = NV2A_GRCTX_SIZE;
ctx_init = nv2a_graph_context_init;
idoffs = 0;
break;
case 0x30:
case 0x31:
- ctx_size = NV30_31_GRCTX_SIZE;
ctx_init = nv30_31_graph_context_init;
break;
case 0x34:
- ctx_size = NV34_GRCTX_SIZE;
ctx_init = nv34_graph_context_init;
break;
case 0x35:
case 0x36:
- ctx_size = NV35_36_GRCTX_SIZE;
ctx_init = nv35_36_graph_context_init;
break;
default:
- ctx_size = 0;
- ctx_init = nv35_36_graph_context_init;
- NV_ERROR(dev, "Please contact the devs if you want your NV%x"
- " card to work\n", dev_priv->chipset);
- return -ENOSYS;
- break;
+ BUG_ON(1);
}
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
- NVOBJ_FLAG_ZERO_ALLOC,
- &chan->ramin_grctx);
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
+ 16, NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramin_grctx);
if (ret)
return ret;
/* Initialise default context values */
- dev_priv->engine.instmem.prepare_access(dev, true);
ctx_init(dev, chan->ramin_grctx->gpuobj);
/* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs,
(chan->id << 24) | 0x1); /* CTX_USER */
- nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id,
- chan->ramin_grctx->instance >> 4);
-
- dev_priv->engine.instmem.finish_access(dev);
+ nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id,
+ chan->ramin_grctx->instance >> 4);
return 0;
}
@@ -440,13 +426,12 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
if (chan->ramin_grctx)
nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
- dev_priv->engine.instmem.prepare_access(dev, true);
- nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0);
- dev_priv->engine.instmem.finish_access(dev);
+ nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, 0);
}
int
@@ -538,29 +523,44 @@ nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
int
nv20_graph_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv =
- (struct drm_nouveau_private *)dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
uint32_t tmp, vramsz;
int ret, i;
+ switch (dev_priv->chipset) {
+ case 0x20:
+ pgraph->grctx_size = NV20_GRCTX_SIZE;
+ break;
+ case 0x25:
+ case 0x28:
+ pgraph->grctx_size = NV25_GRCTX_SIZE;
+ break;
+ case 0x2a:
+ pgraph->grctx_size = NV2A_GRCTX_SIZE;
+ break;
+ default:
+ NV_ERROR(dev, "unknown chipset, disabling acceleration\n");
+ pgraph->accel_blocked = true;
+ return 0;
+ }
+
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
- if (!dev_priv->ctx_table) {
+ if (!pgraph->ctx_table) {
/* Create Context Pointer Table */
- dev_priv->ctx_table_size = 32 * 4;
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
- dev_priv->ctx_table_size, 16,
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC,
- &dev_priv->ctx_table);
+ &pgraph->ctx_table);
if (ret)
return ret;
}
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
- dev_priv->ctx_table->instance >> 4);
+ pgraph->ctx_table->instance >> 4);
nv20_graph_rdi(dev);
@@ -616,7 +616,7 @@ nv20_graph_init(struct drm_device *dev)
nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
/* begin RAM config */
- vramsz = drm_get_resource_len(dev, 0) - 1;
+ vramsz = pci_resource_len(dev->pdev, 0) - 1;
nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
@@ -644,34 +644,52 @@ void
nv20_graph_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
+ nouveau_gpuobj_ref_del(dev, &pgraph->ctx_table);
}
int
nv30_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
int ret, i;
+ switch (dev_priv->chipset) {
+ case 0x30:
+ case 0x31:
+ pgraph->grctx_size = NV30_31_GRCTX_SIZE;
+ break;
+ case 0x34:
+ pgraph->grctx_size = NV34_GRCTX_SIZE;
+ break;
+ case 0x35:
+ case 0x36:
+ pgraph->grctx_size = NV35_36_GRCTX_SIZE;
+ break;
+ default:
+ NV_ERROR(dev, "unknown chipset, disabling acceleration\n");
+ pgraph->accel_blocked = true;
+ return 0;
+ }
+
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
- if (!dev_priv->ctx_table) {
+ if (!pgraph->ctx_table) {
/* Create Context Pointer Table */
- dev_priv->ctx_table_size = 32 * 4;
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
- dev_priv->ctx_table_size, 16,
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC,
- &dev_priv->ctx_table);
+ &pgraph->ctx_table);
if (ret)
return ret;
}
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
- dev_priv->ctx_table->instance >> 4);
+ pgraph->ctx_table->instance >> 4);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -717,7 +735,7 @@ nv30_graph_init(struct drm_device *dev)
nv_wr32(dev, 0x0040075c , 0x00000001);
/* begin RAM config */
- /* vramsz = drm_get_resource_len(dev, 0) - 1; */
+ /* vramsz = pci_resource_len(dev->pdev, 0) - 1; */
nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
if (dev_priv->chipset != 0x34) {
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
new file mode 100644
index 00000000000..4a3f2f09512
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv30_fb.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+static int
+calc_bias(struct drm_device *dev, int k, int i, int j)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int b = (dev_priv->chipset > 0x30 ?
+ nv_rd32(dev, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
+ 0) & 0xf;
+
+ return 2 * (b & 0x8 ? b - 0x10 : b);
+}
+
+static int
+calc_ref(struct drm_device *dev, int l, int k, int i)
+{
+ int j, x = 0;
+
+ for (j = 0; j < 4; j++) {
+ int m = (l >> (8 * i) & 0xff) + calc_bias(dev, k, i, j);
+
+ x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
+ }
+
+ return x;
+}
+
+int
+nv30_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int i, j;
+
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
+
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->set_region_tiling(dev, i, 0, 0, 0);
+
+ /* Init the memory timing regs at 0x10037c/0x1003ac */
+ if (dev_priv->chipset == 0x30 ||
+ dev_priv->chipset == 0x31 ||
+ dev_priv->chipset == 0x35) {
+ /* Related to ROP count */
+ int n = (dev_priv->chipset == 0x31 ? 2 : 4);
+ int l = nv_rd32(dev, 0x1003d0);
+
+ for (i = 0; i < n; i++) {
+ for (j = 0; j < 3; j++)
+ nv_wr32(dev, 0x10037c + 0xc * i + 0x4 * j,
+ calc_ref(dev, l, 0, j));
+
+ for (j = 0; j < 2; j++)
+ nv_wr32(dev, 0x1003ac + 0x8 * i + 0x4 * j,
+ calc_ref(dev, l, 1, j));
+ }
+ }
+
+ return 0;
+}
+
+void
+nv30_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 500ccfd3a0b..2b67f1835c3 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -48,7 +48,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- dev_priv->engine.instmem.prepare_access(dev, true);
nv_wi32(dev, fc + 0, chan->pushbuf_base);
nv_wi32(dev, fc + 4, chan->pushbuf_base);
nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
@@ -61,7 +60,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
0x30000000 /* no idea.. */);
nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
nv_wi32(dev, fc + 60, 0x0001FFFF);
- dev_priv->engine.instmem.finish_access(dev);
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
@@ -89,8 +87,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid)
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
- dev_priv->engine.instmem.prepare_access(dev, false);
-
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
@@ -127,8 +123,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid)
nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
- dev_priv->engine.instmem.finish_access(dev);
-
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
}
@@ -166,7 +160,6 @@ nv40_fifo_unload_context(struct drm_device *dev)
return 0;
fc = NV40_RAMFC(chid);
- dev_priv->engine.instmem.prepare_access(dev, true);
nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
@@ -200,7 +193,6 @@ nv40_fifo_unload_context(struct drm_device *dev)
tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
nv_wi32(dev, fc + 72, tmp);
#endif
- dev_priv->engine.instmem.finish_access(dev);
nv40_fifo_do_load_context(dev, pfifo->channels - 1);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 704a25d04ac..fd7d2b50131 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -58,6 +58,7 @@ nv40_graph_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_grctx ctx = {};
int ret;
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
@@ -67,20 +68,13 @@ nv40_graph_create_context(struct nouveau_channel *chan)
return ret;
/* Initialise default context values */
- dev_priv->engine.instmem.prepare_access(dev, true);
- if (!pgraph->ctxprog) {
- struct nouveau_grctx ctx = {};
-
- ctx.dev = chan->dev;
- ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = chan->ramin_grctx->gpuobj;
- nv40_grctx_init(&ctx);
- } else {
- nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj);
- }
+ ctx.dev = chan->dev;
+ ctx.mode = NOUVEAU_GRCTX_VALS;
+ ctx.data = chan->ramin_grctx->gpuobj;
+ nv40_grctx_init(&ctx);
+
nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
chan->ramin_grctx->gpuobj->im_pramin->start);
- dev_priv->engine.instmem.finish_access(dev);
return 0;
}
@@ -238,7 +232,8 @@ nv40_graph_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv =
(struct drm_nouveau_private *)dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- uint32_t vramsz;
+ struct nouveau_grctx ctx = {};
+ uint32_t vramsz, *cp;
int i, j;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -246,32 +241,22 @@ nv40_graph_init(struct drm_device *dev)
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
- if (nouveau_ctxfw) {
- nouveau_grctx_prog_load(dev);
- dev_priv->engine.graph.grctx_size = 175 * 1024;
- }
+ cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
- if (!dev_priv->engine.graph.ctxprog) {
- struct nouveau_grctx ctx = {};
- uint32_t *cp;
+ ctx.dev = dev;
+ ctx.mode = NOUVEAU_GRCTX_PROG;
+ ctx.data = cp;
+ ctx.ctxprog_max = 256;
+ nv40_grctx_init(&ctx);
+ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
- cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
- if (!cp)
- return -ENOMEM;
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
- ctx.dev = dev;
- ctx.mode = NOUVEAU_GRCTX_PROG;
- ctx.data = cp;
- ctx.ctxprog_max = 256;
- nv40_grctx_init(&ctx);
- dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < ctx.ctxprog_len; i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
-
- kfree(cp);
- }
+ kfree(cp);
/* No context present currently */
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
@@ -367,7 +352,7 @@ nv40_graph_init(struct drm_device *dev)
nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
/* begin RAM config */
- vramsz = drm_get_resource_len(dev, 0) - 1;
+ vramsz = pci_resource_len(dev->pdev, 0) - 1;
switch (dev_priv->chipset) {
case 0x40:
nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
@@ -407,7 +392,6 @@ nv40_graph_init(struct drm_device *dev)
void nv40_graph_takedown(struct drm_device *dev)
{
- nouveau_grctx_fini(dev);
}
struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
index 2a3495e848e..e4e72c12ab6 100644
--- a/drivers/gpu/drm/nouveau/nv40_mc.c
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -19,7 +19,7 @@ nv40_mc_init(struct drm_device *dev)
case 0x46: /* G72 */
case 0x4e:
case 0x4c: /* C51_G7X */
- tmp = nv_rd32(dev, NV40_PFB_020C);
+ tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
nv_wr32(dev, NV40_PMC_1700, tmp);
nv_wr32(dev, NV40_PMC_1704, 0);
nv_wr32(dev, NV40_PMC_1708, 0);
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index b4e4a3b05ea..bfd4ca2fe7e 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -264,11 +264,16 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
int
nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
{
- uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct pll_lims pll;
- uint32_t reg1, reg2;
+ uint32_t reg, reg1, reg2;
int ret, N1, M1, N2, M2, P;
+ if (dev_priv->chipset < NV_C0)
+ reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
+ else
+ reg = 0x614140 + (head * 0x800);
+
ret = get_pll_limits(dev, reg, &pll);
if (ret)
return ret;
@@ -286,7 +291,8 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
nv_wr32(dev, reg, 0x10000611);
nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1);
nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
- } else {
+ } else
+ if (dev_priv->chipset < NV_C0) {
ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
if (ret <= 0)
return 0;
@@ -298,6 +304,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
nv_wr32(dev, reg, 0x50000610);
nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
nv_wr32(dev, reg + 8, N2);
+ } else {
+ ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
+ if (ret <= 0)
+ return 0;
+
+ NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
+ pclk, ret, N1, N2, M1, P);
+
+ nv_mask(dev, reg + 0x0c, 0x00000000, 0x00000100);
+ nv_wr32(dev, reg + 0x04, (P << 16) | (N1 << 8) | M1);
+ nv_wr32(dev, reg + 0x10, N2 << 16);
}
return 0;
@@ -348,7 +365,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
if (!gem)
- return -EINVAL;
+ return -ENOENT;
cursor = nouveau_gem_object(gem);
ret = nouveau_bo_map(cursor);
@@ -381,15 +398,12 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
static void
nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t size)
+ uint32_t start, uint32_t size)
{
+ int end = (start + size > 256) ? 256 : start + size, i;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int i;
-
- if (size != 256)
- return;
- for (i = 0; i < 256; i++) {
+ for (i = start; i < end; i++) {
nv_crtc->lut.r[i] = r[i];
nv_crtc->lut.g[i] = g[i];
nv_crtc->lut.b[i] = b[i];
@@ -440,47 +454,15 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
- uint32_t dac = 0, sor = 0;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
- /* Disconnect all unused encoders. */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
- if (!drm_helper_encoder_in_use(encoder))
- continue;
-
- if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
- nv_encoder->dcb->type == OUTPUT_TV)
- dac |= (1 << nv_encoder->or);
- else
- sor |= (1 << nv_encoder->or);
- }
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
- if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
- nv_encoder->dcb->type == OUTPUT_TV) {
- if (dac & (1 << nv_encoder->or))
- continue;
- } else {
- if (sor & (1 << nv_encoder->or))
- continue;
- }
-
- nv_encoder->disconnect(nv_encoder);
- }
-
nv50_crtc_blank(nv_crtc, true);
}
static void
nv50_crtc_commit(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc2;
struct drm_device *dev = crtc->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
@@ -491,20 +473,14 @@ nv50_crtc_commit(struct drm_crtc *crtc)
nv50_crtc_blank(nv_crtc, false);
- /* Explicitly blank all unused crtc's. */
- list_for_each_entry(crtc2, &dev->mode_config.crtc_list, head) {
- if (!drm_helper_crtc_in_use(crtc2))
- nv50_crtc_blank(nouveau_crtc(crtc2), true);
- }
-
ret = RING_SPACE(evo, 2);
if (ret) {
NV_ERROR(dev, "no space while committing crtc\n");
return;
}
BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
+ OUT_RING (evo, 0);
+ FIRE_RING (evo);
}
static bool
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 1fd9537beff..1bc08596294 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -37,22 +37,31 @@
#include "nv50_display.h"
static void
-nv50_dac_disconnect(struct nouveau_encoder *nv_encoder)
+nv50_dac_disconnect(struct drm_encoder *encoder)
{
- struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
int ret;
+ if (!nv_encoder->crtc)
+ return;
+ nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
+
NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or);
- ret = RING_SPACE(evo, 2);
+ ret = RING_SPACE(evo, 4);
if (ret) {
NV_ERROR(dev, "no space while disconnecting DAC\n");
return;
}
BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING(evo, 0);
+ OUT_RING (evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING (evo, 0);
+
+ nv_encoder->crtc = NULL;
}
static enum drm_connector_status
@@ -213,7 +222,8 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
uint32_t mode_ctl = 0, mode_ctl2 = 0;
int ret;
- NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "or %d type %d crtc %d\n",
+ nv_encoder->or, nv_encoder->dcb->type, crtc->index);
nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -243,6 +253,14 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
OUT_RING(evo, mode_ctl);
OUT_RING(evo, mode_ctl2);
+
+ nv_encoder->crtc = encoder->crtc;
+}
+
+static struct drm_crtc *
+nv50_dac_crtc_get(struct drm_encoder *encoder)
+{
+ return nouveau_encoder(encoder)->crtc;
}
static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
@@ -253,7 +271,9 @@ static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
.prepare = nv50_dac_prepare,
.commit = nv50_dac_commit,
.mode_set = nv50_dac_mode_set,
- .detect = nv50_dac_detect
+ .get_crtc = nv50_dac_crtc_get,
+ .detect = nv50_dac_detect,
+ .disable = nv50_dac_disconnect
};
static void
@@ -275,14 +295,11 @@ static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
};
int
-nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
+nv50_dac_create(struct drm_connector *connector, struct dcb_entry *entry)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- NV_DEBUG_KMS(dev, "\n");
- NV_INFO(dev, "Detected a DAC output\n");
-
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
@@ -291,14 +308,14 @@ nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
- nv_encoder->disconnect = nv50_dac_disconnect;
-
- drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs,
+ drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs,
DRM_MODE_ENCODER_DAC);
drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
+
+ drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 580a5d10be9..612fa6d6a0c 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -71,14 +71,16 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
return ret;
}
- dev_priv->engine.instmem.prepare_access(dev, true);
nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
nv_wo32(dev, obj, 1, limit);
nv_wo32(dev, obj, 2, offset);
nv_wo32(dev, obj, 3, 0x00000000);
nv_wo32(dev, obj, 4, 0x00000000);
- nv_wo32(dev, obj, 5, 0x00010000);
- dev_priv->engine.instmem.finish_access(dev);
+ if (dev_priv->card_type < NV_C0)
+ nv_wo32(dev, obj, 5, 0x00010000);
+ else
+ nv_wo32(dev, obj, 5, 0x00020000);
+ dev_priv->engine.instmem.flush(dev);
return 0;
}
@@ -110,8 +112,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
return ret;
}
- ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj->
- im_pramin->start, 32768);
+ ret = drm_mm_init(&chan->ramin_heap,
+ chan->ramin->gpuobj->im_pramin->start, 32768);
if (ret) {
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
nv50_evo_channel_del(pchan);
@@ -179,13 +181,25 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
}
int
+nv50_display_early_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+void
+nv50_display_late_takedown(struct drm_device *dev)
+{
+}
+
+int
nv50_display_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nouveau_channel *evo = dev_priv->evo;
struct drm_connector *connector;
- uint32_t val, ram_amount, hpd_en[2];
+ uint32_t val, ram_amount;
uint64_t start;
int ret, i;
@@ -366,26 +380,13 @@ nv50_display_init(struct drm_device *dev)
NV50_PDISPLAY_INTR_EN_CLK_UNK40));
/* enable hotplug interrupts */
- hpd_en[0] = hpd_en[1] = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- struct dcb_gpio_entry *gpio;
if (conn->dcb->gpio_tag == 0xff)
continue;
- gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
- if (!gpio)
- continue;
-
- hpd_en[gpio->line >> 4] |= (0x00010001 << (gpio->line & 0xf));
- }
-
- nv_wr32(dev, 0xe054, 0xffffffff);
- nv_wr32(dev, 0xe050, hpd_en[0]);
- if (dev_priv->chipset >= 0x90) {
- nv_wr32(dev, 0xe074, 0xffffffff);
- nv_wr32(dev, 0xe070, hpd_en[1]);
+ pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
}
return 0;
@@ -465,6 +466,7 @@ int nv50_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
+ struct drm_connector *connector, *ct;
int ret, i;
NV_DEBUG_KMS(dev, "\n");
@@ -507,14 +509,18 @@ int nv50_display_create(struct drm_device *dev)
continue;
}
+ connector = nouveau_connector_create(dev, entry->connector);
+ if (IS_ERR(connector))
+ continue;
+
switch (entry->type) {
case OUTPUT_TMDS:
case OUTPUT_LVDS:
case OUTPUT_DP:
- nv50_sor_create(dev, entry);
+ nv50_sor_create(connector, entry);
break;
case OUTPUT_ANALOG:
- nv50_dac_create(dev, entry);
+ nv50_dac_create(connector, entry);
break;
default:
NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
@@ -522,11 +528,13 @@ int nv50_display_create(struct drm_device *dev)
}
}
- for (i = 0 ; i < dcb->connector.entries; i++) {
- if (i != 0 && dcb->connector.entry[i].index2 ==
- dcb->connector.entry[i - 1].index2)
- continue;
- nouveau_connector_create(dev, &dcb->connector.entry[i]);
+ list_for_each_entry_safe(connector, ct,
+ &dev->mode_config.connector_list, head) {
+ if (!connector->encoder_ids[0]) {
+ NV_WARN(dev, "%s has no encoders, removing\n",
+ drm_get_connector_name(connector));
+ connector->funcs->destroy(connector);
+ }
}
ret = nv50_display_init(dev);
@@ -538,7 +546,8 @@ int nv50_display_create(struct drm_device *dev)
return 0;
}
-int nv50_display_destroy(struct drm_device *dev)
+void
+nv50_display_destroy(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -548,135 +557,30 @@ int nv50_display_destroy(struct drm_device *dev)
nv50_display_disable(dev);
nv50_evo_channel_del(&dev_priv->evo);
-
- return 0;
-}
-
-static inline uint32_t
-nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t mc;
-
- if (sor) {
- if (dev_priv->chipset < 0x90 ||
- dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
- mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or));
- else
- mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or));
- } else {
- mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or));
- }
-
- return mc;
-}
-
-static int
-nv50_display_irq_head(struct drm_device *dev, int *phead,
- struct dcb_entry **pdcbent)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL);
- uint32_t dac = 0, sor = 0;
- int head, i, or = 0, type = OUTPUT_ANY;
-
- /* We're assuming that head 0 *or* head 1 will be active here,
- * and not both. I'm not sure if the hw will even signal both
- * ever, but it definitely shouldn't for us as we commit each
- * CRTC separately, and submission will be blocked by the GPU
- * until we handle each in turn.
- */
- NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- head = ffs((unk30 >> 9) & 3) - 1;
- if (head < 0)
- return -EINVAL;
-
- /* This assumes CRTCs are never bound to multiple encoders, which
- * should be the case.
- */
- for (i = 0; i < 3 && type == OUTPUT_ANY; i++) {
- uint32_t mc = nv50_display_mode_ctrl(dev, false, i);
- if (!(mc & (1 << head)))
- continue;
-
- switch ((mc >> 8) & 0xf) {
- case 0: type = OUTPUT_ANALOG; break;
- case 1: type = OUTPUT_TV; break;
- default:
- NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac);
- return -1;
- }
-
- or = i;
- }
-
- for (i = 0; i < 4 && type == OUTPUT_ANY; i++) {
- uint32_t mc = nv50_display_mode_ctrl(dev, true, i);
- if (!(mc & (1 << head)))
- continue;
-
- switch ((mc >> 8) & 0xf) {
- case 0: type = OUTPUT_LVDS; break;
- case 1: type = OUTPUT_TMDS; break;
- case 2: type = OUTPUT_TMDS; break;
- case 5: type = OUTPUT_TMDS; break;
- case 8: type = OUTPUT_DP; break;
- case 9: type = OUTPUT_DP; break;
- default:
- NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor);
- return -1;
- }
-
- or = i;
- }
-
- NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or);
- if (type == OUTPUT_ANY) {
- NV_ERROR(dev, "unknown encoder!!\n");
- return -1;
- }
-
- for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
- struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i];
-
- if (dcbent->type != type)
- continue;
-
- if (!(dcbent->or & (1 << or)))
- continue;
-
- *phead = head;
- *pdcbent = dcbent;
- return 0;
- }
-
- NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or);
- return 0;
}
-static uint32_t
-nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
- int pxclk)
+static u16
+nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
+ u32 mc, int pxclk)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = NULL;
struct drm_encoder *encoder;
struct nvbios *bios = &dev_priv->vbios;
- uint32_t mc, script = 0, or;
+ u32 script = 0, or;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- if (nv_encoder->dcb != dcbent)
+ if (nv_encoder->dcb != dcb)
continue;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
break;
}
- or = ffs(dcbent->or) - 1;
- mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
- switch (dcbent->type) {
+ or = ffs(dcb->or) - 1;
+ switch (dcb->type) {
case OUTPUT_LVDS:
script = (mc >> 8) & 0xf;
if (bios->fp_no_ddc) {
@@ -767,17 +671,88 @@ nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
static void
nv50_display_unk10_handler(struct drm_device *dev)
{
- struct dcb_entry *dcbent;
- int head, ret;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 unk30 = nv_rd32(dev, 0x610030), mc;
+ int i, crtc, or, type = OUTPUT_ANY;
- ret = nv50_display_irq_head(dev, &head, &dcbent);
- if (ret)
- goto ack;
+ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
+ dev_priv->evo_irq.dcb = NULL;
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
- nouveau_bios_run_display_table(dev, dcbent, 0, -1);
+ /* Determine which CRTC we're dealing with, only 1 ever will be
+ * signalled at the same time with the current nouveau code.
+ */
+ crtc = ffs((unk30 & 0x00000060) >> 5) - 1;
+ if (crtc < 0)
+ goto ack;
+
+ /* Nothing needs to be done for the encoder */
+ crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
+ if (crtc < 0)
+ goto ack;
+ /* Find which encoder was connected to the CRTC */
+ for (i = 0; type == OUTPUT_ANY && i < 3; i++) {
+ mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
+ NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc);
+ if (!(mc & (1 << crtc)))
+ continue;
+
+ switch ((mc & 0x00000f00) >> 8) {
+ case 0: type = OUTPUT_ANALOG; break;
+ case 1: type = OUTPUT_TV; break;
+ default:
+ NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
+ goto ack;
+ }
+
+ or = i;
+ }
+
+ for (i = 0; type == OUTPUT_ANY && i < 4; i++) {
+ if (dev_priv->chipset < 0x90 ||
+ dev_priv->chipset == 0x92 ||
+ dev_priv->chipset == 0xa0)
+ mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
+ else
+ mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
+
+ NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc);
+ if (!(mc & (1 << crtc)))
+ continue;
+
+ switch ((mc & 0x00000f00) >> 8) {
+ case 0: type = OUTPUT_LVDS; break;
+ case 1: type = OUTPUT_TMDS; break;
+ case 2: type = OUTPUT_TMDS; break;
+ case 5: type = OUTPUT_TMDS; break;
+ case 8: type = OUTPUT_DP; break;
+ case 9: type = OUTPUT_DP; break;
+ default:
+ NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
+ goto ack;
+ }
+
+ or = i;
+ }
+
+ /* There was no encoder to disable */
+ if (type == OUTPUT_ANY)
+ goto ack;
+
+ /* Disable the encoder */
+ for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
+ struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
+
+ if (dcb->type == type && (dcb->or & (1 << or))) {
+ nouveau_bios_run_display_table(dev, dcb, 0, -1);
+ dev_priv->evo_irq.dcb = dcb;
+ goto ack;
+ }
+ }
+
+ NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc);
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
nv_wr32(dev, 0x610030, 0x80000000);
@@ -817,33 +792,103 @@ nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
static void
nv50_display_unk20_handler(struct drm_device *dev)
{
- struct dcb_entry *dcbent;
- uint32_t tmp, pclk, script;
- int head, or, ret;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc;
+ struct dcb_entry *dcb;
+ int i, crtc, or, type = OUTPUT_ANY;
- ret = nv50_display_irq_head(dev, &head, &dcbent);
- if (ret)
+ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
+ dcb = dev_priv->evo_irq.dcb;
+ if (dcb) {
+ nouveau_bios_run_display_table(dev, dcb, 0, -2);
+ dev_priv->evo_irq.dcb = NULL;
+ }
+
+ /* CRTC clock change requested? */
+ crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
+ if (crtc >= 0) {
+ pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
+ pclk &= 0x003fffff;
+
+ nv50_crtc_set_clock(dev, crtc, pclk);
+
+ tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
+ tmp &= ~0x000000f;
+ nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
+ }
+
+ /* Nothing needs to be done for the encoder */
+ crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
+ if (crtc < 0)
goto ack;
- or = ffs(dcbent->or) - 1;
- pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
- script = nv50_display_script_select(dev, dcbent, pclk);
+ pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
- NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk);
+ /* Find which encoder is connected to the CRTC */
+ for (i = 0; type == OUTPUT_ANY && i < 3; i++) {
+ mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
+ NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc);
+ if (!(mc & (1 << crtc)))
+ continue;
- if (dcbent->type != OUTPUT_DP)
- nouveau_bios_run_display_table(dev, dcbent, 0, -2);
+ switch ((mc & 0x00000f00) >> 8) {
+ case 0: type = OUTPUT_ANALOG; break;
+ case 1: type = OUTPUT_TV; break;
+ default:
+ NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
+ goto ack;
+ }
- nv50_crtc_set_clock(dev, head, pclk);
+ or = i;
+ }
- nouveau_bios_run_display_table(dev, dcbent, script, pclk);
+ for (i = 0; type == OUTPUT_ANY && i < 4; i++) {
+ if (dev_priv->chipset < 0x90 ||
+ dev_priv->chipset == 0x92 ||
+ dev_priv->chipset == 0xa0)
+ mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
+ else
+ mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
- nv50_display_unk20_dp_hack(dev, dcbent);
+ NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc);
+ if (!(mc & (1 << crtc)))
+ continue;
+
+ switch ((mc & 0x00000f00) >> 8) {
+ case 0: type = OUTPUT_LVDS; break;
+ case 1: type = OUTPUT_TMDS; break;
+ case 2: type = OUTPUT_TMDS; break;
+ case 5: type = OUTPUT_TMDS; break;
+ case 8: type = OUTPUT_DP; break;
+ case 9: type = OUTPUT_DP; break;
+ default:
+ NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
+ goto ack;
+ }
+
+ or = i;
+ }
+
+ if (type == OUTPUT_ANY)
+ goto ack;
+
+ /* Enable the encoder */
+ for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
+ dcb = &dev_priv->vbios.dcb.entry[i];
+ if (dcb->type == type && (dcb->or & (1 << or)))
+ break;
+ }
+
+ if (i == dev_priv->vbios.dcb.entries) {
+ NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc);
+ goto ack;
+ }
- tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
- tmp &= ~0x000000f;
- nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
+ script = nv50_display_script_select(dev, dcb, mc, pclk);
+ nouveau_bios_run_display_table(dev, dcb, script, pclk);
- if (dcbent->type != OUTPUT_ANALOG) {
+ nv50_display_unk20_dp_hack(dev, dcb);
+
+ if (dcb->type != OUTPUT_ANALOG) {
tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
tmp &= ~0x00000f0f;
if (script & 0x0100)
@@ -853,24 +898,61 @@ nv50_display_unk20_handler(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
}
+ dev_priv->evo_irq.dcb = dcb;
+ dev_priv->evo_irq.pclk = pclk;
+ dev_priv->evo_irq.script = script;
+
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
nv_wr32(dev, 0x610030, 0x80000000);
}
+/* If programming a TMDS output on a SOR that can also be configured for
+ * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
+ *
+ * It looks like the VBIOS TMDS scripts make an attempt at this, however,
+ * the VBIOS scripts on at least one board I have only switch it off on
+ * link 0, causing a blank display if the output has previously been
+ * programmed for DisplayPort.
+ */
+static void
+nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
+{
+ int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+ struct drm_encoder *encoder;
+ u32 tmp;
+
+ if (dcb->type != OUTPUT_TMDS)
+ return;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb->type == OUTPUT_DP &&
+ nv_encoder->dcb->or & (1 << or)) {
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
+ break;
+ }
+ }
+}
+
static void
nv50_display_unk40_handler(struct drm_device *dev)
{
- struct dcb_entry *dcbent;
- int head, pclk, script, ret;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_entry *dcb = dev_priv->evo_irq.dcb;
+ u16 script = dev_priv->evo_irq.script;
+ u32 unk30 = nv_rd32(dev, 0x610030), pclk = dev_priv->evo_irq.pclk;
- ret = nv50_display_irq_head(dev, &head, &dcbent);
- if (ret)
+ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
+ dev_priv->evo_irq.dcb = NULL;
+ if (!dcb)
goto ack;
- pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
- script = nv50_display_script_select(dev, dcbent, pclk);
- nouveau_bios_run_display_table(dev, dcbent, script, -pclk);
+ nouveau_bios_run_display_table(dev, dcb, script, -pclk);
+ nv50_display_unk40_dp_set_tmds(dev, dcb);
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 581d405ac01..c551f0b85ee 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -38,9 +38,11 @@
void nv50_display_irq_handler(struct drm_device *dev);
void nv50_display_irq_handler_bh(struct work_struct *work);
void nv50_display_irq_hotplug_bh(struct work_struct *work);
-int nv50_display_init(struct drm_device *dev);
+int nv50_display_early_init(struct drm_device *dev);
+void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
-int nv50_display_destroy(struct drm_device *dev);
+int nv50_display_init(struct drm_device *dev);
+void nv50_display_destroy(struct drm_device *dev);
int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index e20c0e2474f..fb0281ae8f9 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -28,41 +28,33 @@
#include "drm.h"
#include "nouveau_drv.h"
-struct nv50_fifo_priv {
- struct nouveau_gpuobj_ref *thingo[2];
- int cur_thingo;
-};
-
-#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
-
static void
-nv50_fifo_init_thingo(struct drm_device *dev)
+nv50_fifo_playlist_update(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_gpuobj_ref *cur;
int i, nr;
NV_DEBUG(dev, "\n");
- cur = priv->thingo[priv->cur_thingo];
- priv->cur_thingo = !priv->cur_thingo;
+ cur = pfifo->playlist[pfifo->cur_playlist];
+ pfifo->cur_playlist = !pfifo->cur_playlist;
/* We never schedule channel 0 or 127 */
- dev_priv->engine.instmem.prepare_access(dev, true);
for (i = 1, nr = 0; i < 127; i++) {
if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
nv_wo32(dev, cur->gpuobj, nr++, i);
}
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
nv_wr32(dev, 0x32f4, cur->instance >> 12);
nv_wr32(dev, 0x32ec, nr);
nv_wr32(dev, 0x2500, 0x101);
}
-static int
-nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
+static void
+nv50_fifo_channel_enable(struct drm_device *dev, int channel)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->fifos[channel];
@@ -70,37 +62,28 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
NV_DEBUG(dev, "ch%d\n", channel);
- if (!chan->ramfc)
- return -EINVAL;
-
- if (IS_G80)
+ if (dev_priv->chipset == 0x50)
inst = chan->ramfc->instance >> 12;
else
inst = chan->ramfc->instance >> 8;
- nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel),
- inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
- if (!nt)
- nv50_fifo_init_thingo(dev);
- return 0;
+ nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
+ NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
}
static void
-nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt)
+nv50_fifo_channel_disable(struct drm_device *dev, int channel)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t inst;
- NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt);
+ NV_DEBUG(dev, "ch%d\n", channel);
- if (IS_G80)
+ if (dev_priv->chipset == 0x50)
inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
else
inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
-
- if (!nt)
- nv50_fifo_init_thingo(dev);
}
static void
@@ -133,12 +116,12 @@ nv50_fifo_init_context_table(struct drm_device *dev)
for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
if (dev_priv->fifos[i])
- nv50_fifo_channel_enable(dev, i, true);
+ nv50_fifo_channel_enable(dev, i);
else
- nv50_fifo_channel_disable(dev, i, true);
+ nv50_fifo_channel_disable(dev, i);
}
- nv50_fifo_init_thingo(dev);
+ nv50_fifo_playlist_update(dev);
}
static void
@@ -162,41 +145,38 @@ nv50_fifo_init_regs(struct drm_device *dev)
nv_wr32(dev, 0x3270, 0);
/* Enable dummy channels setup by nv50_instmem.c */
- nv50_fifo_channel_enable(dev, 0, true);
- nv50_fifo_channel_enable(dev, 127, true);
+ nv50_fifo_channel_enable(dev, 0);
+ nv50_fifo_channel_enable(dev, 127);
}
int
nv50_fifo_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fifo_priv *priv;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
int ret;
NV_DEBUG(dev, "\n");
- priv = dev_priv->engine.fifo.priv;
- if (priv) {
- priv->cur_thingo = !priv->cur_thingo;
+ if (pfifo->playlist[0]) {
+ pfifo->cur_playlist = !pfifo->cur_playlist;
goto just_reset;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- dev_priv->engine.fifo.priv = priv;
-
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &pfifo->playlist[0]);
if (ret) {
- NV_ERROR(dev, "error creating thingo0: %d\n", ret);
+ NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
return ret;
}
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &pfifo->playlist[1]);
if (ret) {
- NV_ERROR(dev, "error creating thingo1: %d\n", ret);
+ nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]);
+ NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
return ret;
}
@@ -216,18 +196,15 @@ void
nv50_fifo_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
NV_DEBUG(dev, "\n");
- if (!priv)
+ if (!pfifo->playlist[0])
return;
- nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
- nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
-
- dev_priv->engine.fifo.priv = NULL;
- kfree(priv);
+ nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]);
+ nouveau_gpuobj_ref_del(dev, &pfifo->playlist[1]);
}
int
@@ -248,7 +225,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (IS_G80) {
+ if (dev_priv->chipset == 0x50) {
uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
@@ -281,10 +258,10 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- dev_priv->engine.instmem.prepare_access(dev, true);
-
nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
- nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
+ nv_wo32(dev, ramfc, 0x80/4, (0 << 27) /* 4KiB */ |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->instance >> 4));
nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
@@ -295,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
chan->dma.ib_base * 4);
nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
- if (!IS_G80) {
+ if (dev_priv->chipset != 0x50) {
nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
nv_wo32(dev, chan->ramin->gpuobj, 1,
chan->ramfc->instance >> 8);
@@ -304,16 +281,10 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
}
- dev_priv->engine.instmem.finish_access(dev);
-
- ret = nv50_fifo_channel_enable(dev, chan->id, false);
- if (ret) {
- NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
- return ret;
- }
+ dev_priv->engine.instmem.flush(dev);
+ nv50_fifo_channel_enable(dev, chan->id);
+ nv50_fifo_playlist_update(dev);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
@@ -328,11 +299,12 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
/* This will ensure the channel is seen as disabled. */
chan->ramfc = NULL;
- nv50_fifo_channel_disable(dev, chan->id, false);
+ nv50_fifo_channel_disable(dev, chan->id);
/* Dummy channel, also used on ch 127 */
if (chan->id == 0)
- nv50_fifo_channel_disable(dev, 127, false);
+ nv50_fifo_channel_disable(dev, 127);
+ nv50_fifo_playlist_update(dev);
nouveau_gpuobj_ref_del(dev, &ramfc);
nouveau_gpuobj_ref_del(dev, &chan->cache);
@@ -349,8 +321,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
- dev_priv->engine.instmem.prepare_access(dev, false);
-
nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
@@ -396,7 +366,7 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
/* guessing that all the 0x34xx regs aren't on NV50 */
- if (!IS_G80) {
+ if (dev_priv->chipset != 0x50) {
nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
@@ -404,8 +374,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
}
- dev_priv->engine.instmem.finish_access(dev);
-
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
return 0;
}
@@ -434,8 +402,6 @@ nv50_fifo_unload_context(struct drm_device *dev)
ramfc = chan->ramfc->gpuobj;
cache = chan->cache->gpuobj;
- dev_priv->engine.instmem.prepare_access(dev, true);
-
nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
@@ -482,7 +448,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
}
/* guessing that all the 0x34xx regs aren't on NV50 */
- if (!IS_G80) {
+ if (dev_priv->chipset != 0x50) {
nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
@@ -491,7 +457,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
}
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
/*XXX: probably reload ch127 (NULL) state back too */
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index bb47ad73726..b2fab2bf3d6 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -74,3 +74,38 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
nv_wr32(dev, r, v);
return 0;
}
+
+void
+nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
+{
+ struct dcb_gpio_entry *gpio;
+ u32 reg, mask;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio) {
+ NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag);
+ return;
+ }
+
+ reg = gpio->line < 16 ? 0xe050 : 0xe070;
+ mask = 0x00010001 << (gpio->line & 0xf);
+
+ nv_wr32(dev, reg + 4, mask);
+ nv_mask(dev, reg + 0, mask, on ? mask : 0);
+}
+
+int
+nv50_gpio_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* disable, and ack any pending gpio interrupts */
+ nv_wr32(dev, 0xe050, 0x00000000);
+ nv_wr32(dev, 0xe054, 0xffffffff);
+ if (dev_priv->chipset >= 0x90) {
+ nv_wr32(dev, 0xe070, 0x00000000);
+ nv_wr32(dev, 0xe074, 0xffffffff);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index b203d06f601..1413028e158 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -30,8 +30,6 @@
#include "nouveau_grctx.h"
-#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
-
static void
nv50_graph_init_reset(struct drm_device *dev)
{
@@ -103,37 +101,33 @@ static int
nv50_graph_init_ctxctl(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_grctx ctx = {};
+ uint32_t *cp;
+ int i;
NV_DEBUG(dev, "\n");
- if (nouveau_ctxfw) {
- nouveau_grctx_prog_load(dev);
- dev_priv->engine.graph.grctx_size = 0x70000;
+ cp = kmalloc(512 * 4, GFP_KERNEL);
+ if (!cp) {
+ NV_ERROR(dev, "failed to allocate ctxprog\n");
+ dev_priv->engine.graph.accel_blocked = true;
+ return 0;
}
- if (!dev_priv->engine.graph.ctxprog) {
- struct nouveau_grctx ctx = {};
- uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL);
- int i;
- if (!cp) {
- NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n");
- dev_priv->engine.graph.accel_blocked = true;
- return 0;
- }
- ctx.dev = dev;
- ctx.mode = NOUVEAU_GRCTX_PROG;
- ctx.data = cp;
- ctx.ctxprog_max = 512;
- if (!nv50_grctx_init(&ctx)) {
- dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < ctx.ctxprog_len; i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
- } else {
- dev_priv->engine.graph.accel_blocked = true;
- }
- kfree(cp);
+
+ ctx.dev = dev;
+ ctx.mode = NOUVEAU_GRCTX_PROG;
+ ctx.data = cp;
+ ctx.ctxprog_max = 512;
+ if (!nv50_grctx_init(&ctx)) {
+ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+ } else {
+ dev_priv->engine.graph.accel_blocked = true;
}
+ kfree(cp);
nv_wr32(dev, 0x400320, 4);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
@@ -164,7 +158,6 @@ void
nv50_graph_takedown(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
- nouveau_grctx_fini(dev);
}
void
@@ -212,8 +205,9 @@ nv50_graph_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
- struct nouveau_gpuobj *ctx;
+ struct nouveau_gpuobj *obj;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_grctx ctx = {};
int hdr, ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -223,10 +217,9 @@ nv50_graph_create_context(struct nouveau_channel *chan)
NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
if (ret)
return ret;
- ctx = chan->ramin_grctx->gpuobj;
+ obj = chan->ramin_grctx->gpuobj;
- hdr = IS_G80 ? 0x200 : 0x20;
- dev_priv->engine.instmem.prepare_access(dev, true);
+ hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
pgraph->grctx_size - 1);
@@ -234,21 +227,15 @@ nv50_graph_create_context(struct nouveau_channel *chan)
nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
- dev_priv->engine.instmem.finish_access(dev);
-
- dev_priv->engine.instmem.prepare_access(dev, true);
- if (!pgraph->ctxprog) {
- struct nouveau_grctx ctx = {};
- ctx.dev = chan->dev;
- ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = chan->ramin_grctx->gpuobj;
- nv50_grctx_init(&ctx);
- } else {
- nouveau_grctx_vals_load(dev, ctx);
- }
- nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
- dev_priv->engine.instmem.finish_access(dev);
+ ctx.dev = chan->dev;
+ ctx.mode = NOUVEAU_GRCTX_VALS;
+ ctx.data = obj;
+ nv50_grctx_init(&ctx);
+
+ nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12);
+
+ dev_priv->engine.instmem.flush(dev);
return 0;
}
@@ -257,17 +244,16 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i, hdr = IS_G80 ? 0x200 : 0x20;
+ int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
NV_DEBUG(dev, "ch%d\n", chan->id);
if (!chan->ramin || !chan->ramin->gpuobj)
return;
- dev_priv->engine.instmem.prepare_access(dev, true);
for (i = hdr; i < hdr + 24; i += 4)
nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 5f21df31f3a..91ef93cf1f3 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -35,8 +35,6 @@ struct nv50_instmem_priv {
struct nouveau_gpuobj_ref *pramin_pt;
struct nouveau_gpuobj_ref *pramin_bar;
struct nouveau_gpuobj_ref *fb_bar;
-
- bool last_access_wr;
};
#define NV50_INSTMEM_PAGE_SHIFT 12
@@ -141,13 +139,15 @@ nv50_instmem_init(struct drm_device *dev)
chan->file_priv = (struct drm_file *)-2;
dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
+ INIT_LIST_HEAD(&chan->ramht_refs);
+
/* Channel's PRAMIN object + heap */
ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
NULL, &chan->ramin);
if (ret)
return ret;
- if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
+ if (drm_mm_init(&chan->ramin_heap, c_base, c_size - c_base))
return -ENOMEM;
/* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
@@ -241,7 +241,7 @@ nv50_instmem_init(struct drm_device *dev)
return ret;
BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
- drm_get_resource_len(dev, 1) - 1);
+ pci_resource_len(dev->pdev, 1) - 1);
BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
@@ -262,30 +262,25 @@ nv50_instmem_init(struct drm_device *dev)
/* Assume that praying isn't enough, check that we can re-read the
* entire fake channel back from the PRAMIN BAR */
- dev_priv->engine.instmem.prepare_access(dev, false);
for (i = 0; i < c_size; i += 4) {
if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
i);
- dev_priv->engine.instmem.finish_access(dev);
return -EINVAL;
}
}
- dev_priv->engine.instmem.finish_access(dev);
nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
/* Global PRAMIN heap */
- if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
- c_size, dev_priv->ramin_size - c_size)) {
- dev_priv->ramin_heap = NULL;
+ if (drm_mm_init(&dev_priv->ramin_heap, c_size, dev_priv->ramin_size - c_size)) {
NV_ERROR(dev, "Failed to init RAMIN heap\n");
}
/*XXX: incorrect, but needed to make hash func "work" */
dev_priv->ramht_offset = 0x10000;
dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
return 0;
}
@@ -321,7 +316,7 @@ nv50_instmem_takedown(struct drm_device *dev)
nouveau_gpuobj_del(dev, &chan->vm_pd);
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
nouveau_gpuobj_ref_del(dev, &chan->ramin);
- nouveau_mem_takedown(&chan->ramin_heap);
+ drm_mm_takedown(&chan->ramin_heap);
dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
kfree(chan);
@@ -436,14 +431,14 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
return -EINVAL;
- NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
+ NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
gpuobj->im_pramin->start, gpuobj->im_pramin->size);
pte = (gpuobj->im_pramin->start >> 12) << 1;
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
vram = gpuobj->im_backing_start;
- NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
+ NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
gpuobj->im_pramin->start, pte, pte_end);
NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
@@ -453,27 +448,15 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
vram |= 0x30;
}
- dev_priv->engine.instmem.prepare_access(dev, true);
while (pte < pte_end) {
nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
vram += NV50_INSTMEM_PAGE_SIZE;
}
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
- nv_wr32(dev, 0x100c80, 0x00040001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x100c80, 0x00060001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
+ nv50_vm_flush(dev, 4);
+ nv50_vm_flush(dev, 6);
gpuobj->im_bound = 1;
return 0;
@@ -492,36 +475,37 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
pte = (gpuobj->im_pramin->start >> 12) << 1;
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
- dev_priv->engine.instmem.prepare_access(dev, true);
while (pte < pte_end) {
nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
}
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
gpuobj->im_bound = 0;
return 0;
}
void
-nv50_instmem_prepare_access(struct drm_device *dev, bool write)
+nv50_instmem_flush(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-
- priv->last_access_wr = write;
+ nv_wr32(dev, 0x00330c, 0x00000001);
+ if (!nv_wait(0x00330c, 0x00000002, 0x00000000))
+ NV_ERROR(dev, "PRAMIN flush timeout\n");
}
void
-nv50_instmem_finish_access(struct drm_device *dev)
+nv84_instmem_flush(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ nv_wr32(dev, 0x070000, 0x00000001);
+ if (!nv_wait(0x070000, 0x00000002, 0x00000000))
+ NV_ERROR(dev, "PRAMIN flush timeout\n");
+}
- if (priv->last_access_wr) {
- nv_wr32(dev, 0x070000, 0x00000001);
- if (!nv_wait(0x070000, 0x00000001, 0x00000000))
- NV_ERROR(dev, "PRAMIN flush timeout\n");
- }
+void
+nv50_vm_flush(struct drm_device *dev, int engine)
+{
+ nv_wr32(dev, 0x100c80, (engine << 16) | 1);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000))
+ NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 812778db76a..bcd4cf84a7e 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -37,52 +37,32 @@
#include "nv50_display.h"
static void
-nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
+nv50_sor_disconnect(struct drm_encoder *encoder)
{
- struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
int ret;
+ if (!nv_encoder->crtc)
+ return;
+ nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
+
NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or);
- ret = RING_SPACE(evo, 2);
+ ret = RING_SPACE(evo, 4);
if (ret) {
NV_ERROR(dev, "no space while disconnecting SOR\n");
return;
}
BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING(evo, 0);
-}
-
-static void
-nv50_sor_dp_link_train(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct bit_displayport_encoder_table *dpe;
- int dpe_headerlen;
-
- dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
- if (!dpe) {
- NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or);
- return;
- }
+ OUT_RING (evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING (evo, 0);
- if (dpe->script0) {
- NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
- nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
- nv_encoder->dcb);
- }
-
- if (!nouveau_dp_link_train(encoder))
- NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or);
-
- if (dpe->script1) {
- NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
- nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
- nv_encoder->dcb);
- }
+ nv_encoder->crtc = NULL;
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
}
static void
@@ -94,14 +74,16 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
uint32_t val;
int or = nv_encoder->or;
- NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
+ NV_DEBUG_KMS(dev, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
nv_encoder->last_dpms = mode;
list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nvenc = nouveau_encoder(enc);
if (nvenc == nv_encoder ||
- nvenc->disconnect != nv50_sor_disconnect ||
+ (nvenc->dcb->type != OUTPUT_TMDS &&
+ nvenc->dcb->type != OUTPUT_LVDS &&
+ nvenc->dcb->type != OUTPUT_DP) ||
nvenc->dcb->or != nv_encoder->dcb->or)
continue;
@@ -133,8 +115,22 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
}
- if (nv_encoder->dcb->type == OUTPUT_DP && mode == DRM_MODE_DPMS_ON)
- nv50_sor_dp_link_train(encoder);
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+ struct nouveau_i2c_chan *auxch;
+
+ auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ if (!auxch)
+ return;
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ u8 status = DP_SET_POWER_D0;
+ nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
+ nouveau_dp_link_train(encoder);
+ } else {
+ u8 status = DP_SET_POWER_D3;
+ nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
+ }
+ }
}
static void
@@ -196,7 +192,8 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
uint32_t mode_ctl = 0;
int ret;
- NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n",
+ nv_encoder->or, nv_encoder->dcb->type, crtc->index);
nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -239,6 +236,14 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
OUT_RING(evo, mode_ctl);
+
+ nv_encoder->crtc = encoder->crtc;
+}
+
+static struct drm_crtc *
+nv50_sor_crtc_get(struct drm_encoder *encoder)
+{
+ return nouveau_encoder(encoder)->crtc;
}
static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
@@ -249,7 +254,9 @@ static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
.prepare = nv50_sor_prepare,
.commit = nv50_sor_commit,
.mode_set = nv50_sor_mode_set,
- .detect = NULL
+ .get_crtc = nv50_sor_crtc_get,
+ .detect = NULL,
+ .disable = nv50_sor_disconnect
};
static void
@@ -272,32 +279,22 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
};
int
-nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
+nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry)
{
struct nouveau_encoder *nv_encoder = NULL;
+ struct drm_device *dev = connector->dev;
struct drm_encoder *encoder;
- bool dum;
int type;
NV_DEBUG_KMS(dev, "\n");
switch (entry->type) {
case OUTPUT_TMDS:
- NV_INFO(dev, "Detected a TMDS output\n");
+ case OUTPUT_DP:
type = DRM_MODE_ENCODER_TMDS;
break;
case OUTPUT_LVDS:
- NV_INFO(dev, "Detected a LVDS output\n");
type = DRM_MODE_ENCODER_LVDS;
-
- if (nouveau_bios_parse_lvds_table(dev, 0, &dum, &dum)) {
- NV_ERROR(dev, "Failed parsing LVDS table\n");
- return -EINVAL;
- }
- break;
- case OUTPUT_DP:
- NV_INFO(dev, "Detected a DP output\n");
- type = DRM_MODE_ENCODER_TMDS;
break;
default:
return -EINVAL;
@@ -310,8 +307,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
-
- nv_encoder->disconnect = nv50_sor_disconnect;
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
@@ -342,5 +338,6 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
nv_encoder->dp.mc_unknown = 5;
}
+ drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
new file mode 100644
index 00000000000..26a996025dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+
+int
+nvc0_fb_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+void
+nvc0_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
new file mode 100644
index 00000000000..d6437587197
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+
+void
+nvc0_fifo_disable(struct drm_device *dev)
+{
+}
+
+void
+nvc0_fifo_enable(struct drm_device *dev)
+{
+}
+
+bool
+nvc0_fifo_reassign(struct drm_device *dev, bool enable)
+{
+ return false;
+}
+
+bool
+nvc0_fifo_cache_flush(struct drm_device *dev)
+{
+ return true;
+}
+
+bool
+nvc0_fifo_cache_pull(struct drm_device *dev, bool enable)
+{
+ return false;
+}
+
+int
+nvc0_fifo_channel_id(struct drm_device *dev)
+{
+ return 127;
+}
+
+int
+nvc0_fifo_create_context(struct nouveau_channel *chan)
+{
+ return 0;
+}
+
+void
+nvc0_fifo_destroy_context(struct nouveau_channel *chan)
+{
+}
+
+int
+nvc0_fifo_load_context(struct nouveau_channel *chan)
+{
+ return 0;
+}
+
+int
+nvc0_fifo_unload_context(struct drm_device *dev)
+{
+ return 0;
+}
+
+void
+nvc0_fifo_takedown(struct drm_device *dev)
+{
+}
+
+int
+nvc0_fifo_init(struct drm_device *dev)
+{
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
new file mode 100644
index 00000000000..717a5177a8d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+
+void
+nvc0_graph_fifo_access(struct drm_device *dev, bool enabled)
+{
+}
+
+struct nouveau_channel *
+nvc0_graph_channel(struct drm_device *dev)
+{
+ return NULL;
+}
+
+int
+nvc0_graph_create_context(struct nouveau_channel *chan)
+{
+ return 0;
+}
+
+void
+nvc0_graph_destroy_context(struct nouveau_channel *chan)
+{
+}
+
+int
+nvc0_graph_load_context(struct nouveau_channel *chan)
+{
+ return 0;
+}
+
+int
+nvc0_graph_unload_context(struct drm_device *dev)
+{
+ return 0;
+}
+
+void
+nvc0_graph_takedown(struct drm_device *dev)
+{
+}
+
+int
+nvc0_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ dev_priv->engine.graph.accel_blocked = true;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
new file mode 100644
index 00000000000..6b451f86478
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+
+int
+nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
+ uint32_t *size)
+{
+ int ret;
+
+ *size = ALIGN(*size, 4096);
+ if (*size == 0)
+ return -EINVAL;
+
+ ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
+ true, false, &gpuobj->im_backing);
+ if (ret) {
+ NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
+ return ret;
+ }
+
+ ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
+ nouveau_bo_ref(NULL, &gpuobj->im_backing);
+ return ret;
+ }
+
+ gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
+ gpuobj->im_backing_start <<= PAGE_SHIFT;
+ return 0;
+}
+
+void
+nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (gpuobj && gpuobj->im_backing) {
+ if (gpuobj->im_bound)
+ dev_priv->engine.instmem.unbind(dev, gpuobj);
+ nouveau_bo_unpin(gpuobj->im_backing);
+ nouveau_bo_ref(NULL, &gpuobj->im_backing);
+ gpuobj->im_backing = NULL;
+ }
+}
+
+int
+nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t pte, pte_end;
+ uint64_t vram;
+
+ if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
+ return -EINVAL;
+
+ NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
+ gpuobj->im_pramin->start, gpuobj->im_pramin->size);
+
+ pte = gpuobj->im_pramin->start >> 12;
+ pte_end = (gpuobj->im_pramin->size >> 12) + pte;
+ vram = gpuobj->im_backing_start;
+
+ NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
+ gpuobj->im_pramin->start, pte, pte_end);
+ NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+
+ while (pte < pte_end) {
+ nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
+ nv_wr32(dev, 0x702004 + (pte * 8), 0);
+ vram += 4096;
+ pte++;
+ }
+ dev_priv->engine.instmem.flush(dev);
+
+ if (1) {
+ u32 chan = nv_rd32(dev, 0x1700) << 16;
+ nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8);
+ nv_wr32(dev, 0x100cbc, 0x80000005);
+ }
+
+ gpuobj->im_bound = 1;
+ return 0;
+}
+
+int
+nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t pte, pte_end;
+
+ if (gpuobj->im_bound == 0)
+ return -EINVAL;
+
+ pte = gpuobj->im_pramin->start >> 12;
+ pte_end = (gpuobj->im_pramin->size >> 12) + pte;
+ while (pte < pte_end) {
+ nv_wr32(dev, 0x702000 + (pte * 8), 0);
+ nv_wr32(dev, 0x702004 + (pte * 8), 0);
+ pte++;
+ }
+ dev_priv->engine.instmem.flush(dev);
+
+ gpuobj->im_bound = 0;
+ return 0;
+}
+
+void
+nvc0_instmem_flush(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x070000, 1);
+ if (!nv_wait(0x070000, 0x00000002, 0x00000000))
+ NV_ERROR(dev, "PRAMIN flush timeout\n");
+}
+
+int
+nvc0_instmem_suspend(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 *buf;
+ int i;
+
+ dev_priv->susres.ramin_copy = vmalloc(65536);
+ if (!dev_priv->susres.ramin_copy)
+ return -ENOMEM;
+ buf = dev_priv->susres.ramin_copy;
+
+ for (i = 0; i < 65536; i += 4)
+ buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
+ return 0;
+}
+
+void
+nvc0_instmem_resume(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 *buf = dev_priv->susres.ramin_copy;
+ u64 chan;
+ int i;
+
+ chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
+ nv_wr32(dev, 0x001700, chan >> 16);
+
+ for (i = 0; i < 65536; i += 4)
+ nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
+ vfree(dev_priv->susres.ramin_copy);
+ dev_priv->susres.ramin_copy = NULL;
+
+ nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
+}
+
+int
+nvc0_instmem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1;
+ int ret, i;
+
+ dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
+ chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
+ imem = 4096 + 4096 + 32768;
+
+ nv_wr32(dev, 0x001700, chan >> 16);
+
+ /* channel setup */
+ nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000));
+ nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000));
+ nv_wr32(dev, 0x700208, lower_32_bits(lim3));
+ nv_wr32(dev, 0x70020c, upper_32_bits(lim3));
+
+ /* point pgd -> pgt */
+ nv_wr32(dev, 0x701000, 0);
+ nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1);
+
+ /* point pgt -> physical vram for channel */
+ pgt3 = 0x2000;
+ for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) {
+ nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1);
+ nv_wr32(dev, 0x700004 + pgt3, 0);
+ }
+
+ /* clear rest of pgt */
+ for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) {
+ nv_wr32(dev, 0x700000 + pgt3, 0);
+ nv_wr32(dev, 0x700004 + pgt3, 0);
+ }
+
+ /* point bar3 at the channel */
+ nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
+
+ /* Global PRAMIN heap */
+ ret = drm_mm_init(&dev_priv->ramin_heap, imem,
+ dev_priv->ramin_size - imem);
+ if (ret) {
+ NV_ERROR(dev, "Failed to init RAMIN heap\n");
+ return -ENOMEM;
+ }
+
+ /*XXX: incorrect, but needed to make hash func "work" */
+ dev_priv->ramht_offset = 0x10000;
+ dev_priv->ramht_bits = 9;
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
+ return 0;
+}
+
+void
+nvc0_instmem_takedown(struct drm_device *dev)
+{
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index 5998c35237b..ad64673ace1 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -147,28 +147,6 @@
# define NV_VIO_GX_DONT_CARE_INDEX 0x07
# define NV_VIO_GX_BIT_MASK_INDEX 0x08
-#define NV_PFB_BOOT_0 0x00100000
-#define NV_PFB_CFG0 0x00100200
-#define NV_PFB_CFG1 0x00100204
-#define NV_PFB_CSTATUS 0x0010020C
-#define NV_PFB_REFCTRL 0x00100210
-# define NV_PFB_REFCTRL_VALID_1 (1 << 31)
-#define NV_PFB_PAD 0x0010021C
-# define NV_PFB_PAD_CKE_NORMAL (1 << 0)
-#define NV_PFB_TILE_NV10 0x00100240
-#define NV_PFB_TILE_SIZE_NV10 0x00100244
-#define NV_PFB_REF 0x001002D0
-# define NV_PFB_REF_CMD_REFRESH (1 << 0)
-#define NV_PFB_PRE 0x001002D4
-# define NV_PFB_PRE_CMD_PRECHARGE (1 << 0)
-#define NV_PFB_CLOSE_PAGE2 0x0010033C
-#define NV_PFB_TILE_NV40 0x00100600
-#define NV_PFB_TILE_SIZE_NV40 0x00100604
-
-#define NV_PEXTDEV_BOOT_0 0x00101000
-# define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT (8 << 12)
-#define NV_PEXTDEV_BOOT_3 0x0010100c
-
#define NV_PCRTC_INTR_0 0x00600100
# define NV_PCRTC_INTR_0_VBLANK (1 << 0)
#define NV_PCRTC_INTR_EN_0 0x00600140
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index e671d0e74d4..570e190710b 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -44,7 +44,7 @@
MODULE_FIRMWARE(FIRMWARE_NAME);
-static int R128_READ_PLL(struct drm_device * dev, int addr)
+static int R128_READ_PLL(struct drm_device *dev, int addr)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -53,7 +53,7 @@ static int R128_READ_PLL(struct drm_device * dev, int addr)
}
#if R128_FIFO_DEBUG
-static void r128_status(drm_r128_private_t * dev_priv)
+static void r128_status(drm_r128_private_t *dev_priv)
{
printk("GUI_STAT = 0x%08x\n",
(unsigned int)R128_READ(R128_GUI_STAT));
@@ -74,7 +74,7 @@ static void r128_status(drm_r128_private_t * dev_priv)
* Engine, FIFO control
*/
-static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
+static int r128_do_pixcache_flush(drm_r128_private_t *dev_priv)
{
u32 tmp;
int i;
@@ -83,9 +83,8 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
for (i = 0; i < dev_priv->usec_timeout; i++) {
- if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) {
+ if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY))
return 0;
- }
DRM_UDELAY(1);
}
@@ -95,7 +94,7 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
return -EBUSY;
}
-static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
+static int r128_do_wait_for_fifo(drm_r128_private_t *dev_priv, int entries)
{
int i;
@@ -112,7 +111,7 @@ static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
return -EBUSY;
}
-static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
+static int r128_do_wait_for_idle(drm_r128_private_t *dev_priv)
{
int i, ret;
@@ -189,7 +188,7 @@ out_release:
* prior to a wait for idle, as it informs the engine that the command
* stream is ending.
*/
-static void r128_do_cce_flush(drm_r128_private_t * dev_priv)
+static void r128_do_cce_flush(drm_r128_private_t *dev_priv)
{
u32 tmp;
@@ -199,7 +198,7 @@ static void r128_do_cce_flush(drm_r128_private_t * dev_priv)
/* Wait for the CCE to go idle.
*/
-int r128_do_cce_idle(drm_r128_private_t * dev_priv)
+int r128_do_cce_idle(drm_r128_private_t *dev_priv)
{
int i;
@@ -225,7 +224,7 @@ int r128_do_cce_idle(drm_r128_private_t * dev_priv)
/* Start the Concurrent Command Engine.
*/
-static void r128_do_cce_start(drm_r128_private_t * dev_priv)
+static void r128_do_cce_start(drm_r128_private_t *dev_priv)
{
r128_do_wait_for_idle(dev_priv);
@@ -242,7 +241,7 @@ static void r128_do_cce_start(drm_r128_private_t * dev_priv)
* commands, so you must wait for the CCE command stream to complete
* before calling this routine.
*/
-static void r128_do_cce_reset(drm_r128_private_t * dev_priv)
+static void r128_do_cce_reset(drm_r128_private_t *dev_priv)
{
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
@@ -253,7 +252,7 @@ static void r128_do_cce_reset(drm_r128_private_t * dev_priv)
* commands, so you must flush the command stream and wait for the CCE
* to go idle before calling this routine.
*/
-static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
+static void r128_do_cce_stop(drm_r128_private_t *dev_priv)
{
R128_WRITE(R128_PM4_MICRO_CNTL, 0);
R128_WRITE(R128_PM4_BUFFER_CNTL,
@@ -264,7 +263,7 @@ static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
/* Reset the engine. This will stop the CCE if it is running.
*/
-static int r128_do_engine_reset(struct drm_device * dev)
+static int r128_do_engine_reset(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
@@ -301,8 +300,8 @@ static int r128_do_engine_reset(struct drm_device * dev)
return 0;
}
-static void r128_cce_init_ring_buffer(struct drm_device * dev,
- drm_r128_private_t * dev_priv)
+static void r128_cce_init_ring_buffer(struct drm_device *dev,
+ drm_r128_private_t *dev_priv)
{
u32 ring_start;
u32 tmp;
@@ -340,7 +339,7 @@ static void r128_cce_init_ring_buffer(struct drm_device * dev,
R128_WRITE(R128_BUS_CNTL, tmp);
}
-static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
+static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
{
drm_r128_private_t *dev_priv;
int rc;
@@ -588,7 +587,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
return rc;
}
-int r128_do_cleanup_cce(struct drm_device * dev)
+int r128_do_cleanup_cce(struct drm_device *dev)
{
/* Make sure interrupts are disabled here because the uninstall ioctl
@@ -682,9 +681,8 @@ int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv
/* Flush any pending CCE commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
*/
- if (stop->flush) {
+ if (stop->flush)
r128_do_cce_flush(dev_priv);
- }
/* If we fail to make the engine go idle, we return an error
* code so that the DRM ioctl wrapper can try again.
@@ -735,9 +733,8 @@ int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv
DEV_INIT_TEST_WITH_RETURN(dev_priv);
- if (dev_priv->cce_running) {
+ if (dev_priv->cce_running)
r128_do_cce_flush(dev_priv);
- }
return r128_do_cce_idle(dev_priv);
}
@@ -765,7 +762,7 @@ int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_pr
#define R128_BUFFER_FREE 0
#if 0
-static int r128_freelist_init(struct drm_device * dev)
+static int r128_freelist_init(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -848,7 +845,7 @@ static struct drm_buf *r128_freelist_get(struct drm_device * dev)
return NULL;
}
-void r128_freelist_reset(struct drm_device * dev)
+void r128_freelist_reset(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
int i;
@@ -864,7 +861,7 @@ void r128_freelist_reset(struct drm_device * dev)
* CCE command submission
*/
-int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
+int r128_wait_ring(drm_r128_private_t *dev_priv, int n)
{
drm_r128_ring_buffer_t *ring = &dev_priv->ring;
int i;
@@ -881,9 +878,9 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
return -EBUSY;
}
-static int r128_cce_get_buffers(struct drm_device * dev,
+static int r128_cce_get_buffers(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_dma * d)
+ struct drm_dma *d)
{
int i;
struct drm_buf *buf;
@@ -933,9 +930,8 @@ int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_p
d->granted_count = 0;
- if (d->request_count) {
+ if (d->request_count)
ret = r128_cce_get_buffers(dev, file_priv, d);
- }
return ret;
}
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index b806fdcc717..1e2971f13aa 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -85,7 +85,7 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
-int r128_driver_load(struct drm_device * dev, unsigned long flags)
+int r128_driver_load(struct drm_device *dev, unsigned long flags)
{
return drm_vblank_init(dev, 1);
}
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 3c60829d82e..930c71b2fb5 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -53,7 +53,7 @@
#define DRIVER_MINOR 5
#define DRIVER_PATCHLEVEL 0
-#define GET_RING_HEAD(dev_priv) R128_READ( R128_PM4_BUFFER_DL_RPTR )
+#define GET_RING_HEAD(dev_priv) R128_READ(R128_PM4_BUFFER_DL_RPTR)
typedef struct drm_r128_freelist {
unsigned int age;
@@ -144,23 +144,23 @@ extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file
extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern void r128_freelist_reset(struct drm_device * dev);
+extern void r128_freelist_reset(struct drm_device *dev);
-extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
+extern int r128_wait_ring(drm_r128_private_t *dev_priv, int n);
-extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
-extern int r128_do_cleanup_cce(struct drm_device * dev);
+extern int r128_do_cce_idle(drm_r128_private_t *dev_priv);
+extern int r128_do_cleanup_cce(struct drm_device *dev);
extern int r128_enable_vblank(struct drm_device *dev, int crtc);
extern void r128_disable_vblank(struct drm_device *dev, int crtc);
extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
-extern void r128_driver_irq_preinstall(struct drm_device * dev);
+extern void r128_driver_irq_preinstall(struct drm_device *dev);
extern int r128_driver_irq_postinstall(struct drm_device *dev);
-extern void r128_driver_irq_uninstall(struct drm_device * dev);
-extern void r128_driver_lastclose(struct drm_device * dev);
-extern int r128_driver_load(struct drm_device * dev, unsigned long flags);
-extern void r128_driver_preclose(struct drm_device * dev,
+extern void r128_driver_irq_uninstall(struct drm_device *dev);
+extern void r128_driver_lastclose(struct drm_device *dev);
+extern int r128_driver_load(struct drm_device *dev, unsigned long flags);
+extern void r128_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -390,27 +390,27 @@ extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
#define R128_PCIGART_TABLE_SIZE 32768
-#define R128_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
-#define R128_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
-#define R128_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
-#define R128_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
+#define R128_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
+#define R128_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
+#define R128_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
+#define R128_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
-#define R128_WRITE_PLL(addr,val) \
+#define R128_WRITE_PLL(addr, val) \
do { \
R128_WRITE8(R128_CLOCK_CNTL_INDEX, \
((addr) & 0x1f) | R128_PLL_WR_EN); \
R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \
} while (0)
-#define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \
+#define CCE_PACKET0(reg, n) (R128_CCE_PACKET0 | \
((n) << 16) | ((reg) >> 2))
-#define CCE_PACKET1( reg0, reg1 ) (R128_CCE_PACKET1 | \
+#define CCE_PACKET1(reg0, reg1) (R128_CCE_PACKET1 | \
(((reg1) >> 2) << 11) | ((reg0) >> 2))
#define CCE_PACKET2() (R128_CCE_PACKET2)
-#define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \
+#define CCE_PACKET3(pkt, n) (R128_CCE_PACKET3 | \
(pkt) | ((n) << 16))
-static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv)
+static __inline__ void r128_update_ring_snapshot(drm_r128_private_t *dev_priv)
{
drm_r128_ring_buffer_t *ring = &dev_priv->ring;
ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32);
@@ -430,37 +430,38 @@ do { \
} \
} while (0)
-#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
+#define RING_SPACE_TEST_WITH_RETURN(dev_priv) \
do { \
drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \
- if ( ring->space < ring->high_mark ) { \
- for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \
- r128_update_ring_snapshot( dev_priv ); \
- if ( ring->space >= ring->high_mark ) \
+ if (ring->space < ring->high_mark) { \
+ for (i = 0 ; i < dev_priv->usec_timeout ; i++) { \
+ r128_update_ring_snapshot(dev_priv); \
+ if (ring->space >= ring->high_mark) \
goto __ring_space_done; \
- DRM_UDELAY(1); \
+ DRM_UDELAY(1); \
} \
- DRM_ERROR( "ring space check failed!\n" ); \
- return -EBUSY; \
+ DRM_ERROR("ring space check failed!\n"); \
+ return -EBUSY; \
} \
__ring_space_done: \
; \
} while (0)
-#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \
+#define VB_AGE_TEST_WITH_RETURN(dev_priv) \
do { \
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; \
- if ( sarea_priv->last_dispatch >= R128_MAX_VB_AGE ) { \
- int __ret = r128_do_cce_idle( dev_priv ); \
- if ( __ret ) return __ret; \
+ if (sarea_priv->last_dispatch >= R128_MAX_VB_AGE) { \
+ int __ret = r128_do_cce_idle(dev_priv); \
+ if (__ret) \
+ return __ret; \
sarea_priv->last_dispatch = 0; \
- r128_freelist_reset( dev ); \
+ r128_freelist_reset(dev); \
} \
} while (0)
#define R128_WAIT_UNTIL_PAGE_FLIPPED() do { \
- OUT_RING( CCE_PACKET0( R128_WAIT_UNTIL, 0 ) ); \
- OUT_RING( R128_EVENT_CRTC_OFFSET ); \
+ OUT_RING(CCE_PACKET0(R128_WAIT_UNTIL, 0)); \
+ OUT_RING(R128_EVENT_CRTC_OFFSET); \
} while (0)
/* ================================================================
@@ -472,13 +473,12 @@ do { \
#define RING_LOCALS \
int write, _nr; unsigned int tail_mask; volatile u32 *ring;
-#define BEGIN_RING( n ) do { \
- if ( R128_VERBOSE ) { \
- DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
- } \
- if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
+#define BEGIN_RING(n) do { \
+ if (R128_VERBOSE) \
+ DRM_INFO("BEGIN_RING(%d)\n", (n)); \
+ if (dev_priv->ring.space <= (n) * sizeof(u32)) { \
COMMIT_RING(); \
- r128_wait_ring( dev_priv, (n) * sizeof(u32) ); \
+ r128_wait_ring(dev_priv, (n) * sizeof(u32)); \
} \
_nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \
ring = dev_priv->ring.start; \
@@ -494,40 +494,36 @@ do { \
#define R128_BROKEN_CCE 1
#define ADVANCE_RING() do { \
- if ( R128_VERBOSE ) { \
- DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
- write, dev_priv->ring.tail ); \
- } \
- if ( R128_BROKEN_CCE && write < 32 ) { \
- memcpy( dev_priv->ring.end, \
- dev_priv->ring.start, \
- write * sizeof(u32) ); \
- } \
- if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \
+ if (R128_VERBOSE) \
+ DRM_INFO("ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
+ write, dev_priv->ring.tail); \
+ if (R128_BROKEN_CCE && write < 32) \
+ memcpy(dev_priv->ring.end, \
+ dev_priv->ring.start, \
+ write * sizeof(u32)); \
+ if (((dev_priv->ring.tail + _nr) & tail_mask) != write) \
DRM_ERROR( \
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
((dev_priv->ring.tail + _nr) & tail_mask), \
write, __LINE__); \
- } else \
+ else \
dev_priv->ring.tail = write; \
} while (0)
#define COMMIT_RING() do { \
- if ( R128_VERBOSE ) { \
- DRM_INFO( "COMMIT_RING() tail=0x%06x\n", \
- dev_priv->ring.tail ); \
- } \
+ if (R128_VERBOSE) \
+ DRM_INFO("COMMIT_RING() tail=0x%06x\n", \
+ dev_priv->ring.tail); \
DRM_MEMORYBARRIER(); \
- R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail ); \
- R128_READ( R128_PM4_BUFFER_DL_WPTR ); \
+ R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail); \
+ R128_READ(R128_PM4_BUFFER_DL_WPTR); \
} while (0)
-#define OUT_RING( x ) do { \
- if ( R128_VERBOSE ) { \
- DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \
- (unsigned int)(x), write ); \
- } \
- ring[write++] = cpu_to_le32( x ); \
+#define OUT_RING(x) do { \
+ if (R128_VERBOSE) \
+ DRM_INFO(" OUT_RING( 0x%08x ) at 0x%x\n", \
+ (unsigned int)(x), write); \
+ ring[write++] = cpu_to_le32(x); \
write &= tail_mask; \
} while (0)
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index 69810fb8ac4..429d5a02695 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -90,7 +90,7 @@ void r128_disable_vblank(struct drm_device *dev, int crtc)
*/
}
-void r128_driver_irq_preinstall(struct drm_device * dev)
+void r128_driver_irq_preinstall(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
@@ -105,7 +105,7 @@ int r128_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-void r128_driver_irq_uninstall(struct drm_device * dev)
+void r128_driver_irq_uninstall(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
if (!dev_priv)
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index af2665cf471..a9e33ce6591 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -37,8 +37,8 @@
* CCE hardware state programming functions
*/
-static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
- struct drm_clip_rect * boxes, int count)
+static void r128_emit_clip_rects(drm_r128_private_t *dev_priv,
+ struct drm_clip_rect *boxes, int count)
{
u32 aux_sc_cntl = 0x00000000;
RING_LOCALS;
@@ -80,7 +80,7 @@ static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
ADVANCE_RING();
}
-static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_core(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -95,7 +95,7 @@ static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_context(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -121,7 +121,7 @@ static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_setup(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -137,7 +137,7 @@ static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_masks(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -156,7 +156,7 @@ static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_window(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -171,7 +171,7 @@ static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_tex0(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -187,9 +187,8 @@ static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
OUT_RING(tex->tex_cntl);
OUT_RING(tex->tex_combine_cntl);
OUT_RING(ctx->tex_size_pitch_c);
- for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
+ for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
OUT_RING(tex->tex_offset[i]);
- }
OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
OUT_RING(ctx->constant_color_c);
@@ -198,7 +197,7 @@ static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_tex1(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
@@ -211,9 +210,8 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
OUT_RING(tex->tex_cntl);
OUT_RING(tex->tex_combine_cntl);
- for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
+ for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
OUT_RING(tex->tex_offset[i]);
- }
OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
OUT_RING(tex->tex_border_color);
@@ -221,7 +219,7 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static void r128_emit_state(drm_r128_private_t * dev_priv)
+static void r128_emit_state(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
@@ -274,7 +272,7 @@ static void r128_emit_state(drm_r128_private_t * dev_priv)
* Performance monitoring functions
*/
-static void r128_clear_box(drm_r128_private_t * dev_priv,
+static void r128_clear_box(drm_r128_private_t *dev_priv,
int x, int y, int w, int h, int r, int g, int b)
{
u32 pitch, offset;
@@ -321,13 +319,12 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
ADVANCE_RING();
}
-static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
+static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
{
- if (atomic_read(&dev_priv->idle_count) == 0) {
+ if (atomic_read(&dev_priv->idle_count) == 0)
r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
- } else {
+ else
atomic_set(&dev_priv->idle_count, 0);
- }
}
#endif
@@ -352,8 +349,8 @@ static void r128_print_dirty(const char *msg, unsigned int flags)
(flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
}
-static void r128_cce_dispatch_clear(struct drm_device * dev,
- drm_r128_clear_t * clear)
+static void r128_cce_dispatch_clear(struct drm_device *dev,
+ drm_r128_clear_t *clear)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -458,7 +455,7 @@ static void r128_cce_dispatch_clear(struct drm_device * dev,
}
}
-static void r128_cce_dispatch_swap(struct drm_device * dev)
+static void r128_cce_dispatch_swap(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -524,7 +521,7 @@ static void r128_cce_dispatch_swap(struct drm_device * dev)
ADVANCE_RING();
}
-static void r128_cce_dispatch_flip(struct drm_device * dev)
+static void r128_cce_dispatch_flip(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -542,11 +539,10 @@ static void r128_cce_dispatch_flip(struct drm_device * dev)
R128_WAIT_UNTIL_PAGE_FLIPPED();
OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
- if (dev_priv->current_page == 0) {
+ if (dev_priv->current_page == 0)
OUT_RING(dev_priv->back_offset);
- } else {
+ else
OUT_RING(dev_priv->front_offset);
- }
ADVANCE_RING();
@@ -566,7 +562,7 @@ static void r128_cce_dispatch_flip(struct drm_device * dev)
ADVANCE_RING();
}
-static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
+static void r128_cce_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@@ -585,9 +581,8 @@ static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * b
if (buf->used) {
buf_priv->dispatched = 1;
- if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
+ if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
r128_emit_state(dev_priv);
- }
do {
/* Emit the next set of up to three cliprects */
@@ -636,8 +631,8 @@ static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * b
sarea_priv->nbox = 0;
}
-static void r128_cce_dispatch_indirect(struct drm_device * dev,
- struct drm_buf * buf, int start, int end)
+static void r128_cce_dispatch_indirect(struct drm_device *dev,
+ struct drm_buf *buf, int start, int end)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@@ -691,8 +686,8 @@ static void r128_cce_dispatch_indirect(struct drm_device * dev,
dev_priv->sarea_priv->last_dispatch++;
}
-static void r128_cce_dispatch_indices(struct drm_device * dev,
- struct drm_buf * buf,
+static void r128_cce_dispatch_indices(struct drm_device *dev,
+ struct drm_buf *buf,
int start, int end, int count)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -713,9 +708,8 @@ static void r128_cce_dispatch_indices(struct drm_device * dev,
if (start != end) {
buf_priv->dispatched = 1;
- if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
+ if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
r128_emit_state(dev_priv);
- }
dwords = (end - start + 3) / sizeof(u32);
@@ -775,9 +769,9 @@ static void r128_cce_dispatch_indices(struct drm_device * dev,
sarea_priv->nbox = 0;
}
-static int r128_cce_dispatch_blit(struct drm_device * dev,
+static int r128_cce_dispatch_blit(struct drm_device *dev,
struct drm_file *file_priv,
- drm_r128_blit_t * blit)
+ drm_r128_blit_t *blit)
{
drm_r128_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma;
@@ -887,8 +881,8 @@ static int r128_cce_dispatch_blit(struct drm_device * dev,
* have hardware stencil support.
*/
-static int r128_cce_dispatch_write_span(struct drm_device * dev,
- drm_r128_depth_t * depth)
+static int r128_cce_dispatch_write_span(struct drm_device *dev,
+ drm_r128_depth_t *depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int count, x, y;
@@ -902,12 +896,10 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
if (count > 4096 || count <= 0)
return -EMSGSIZE;
- if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
+ if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
return -EFAULT;
- }
- if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
+ if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
return -EFAULT;
- }
buffer_size = depth->n * sizeof(u32);
buffer = kmalloc(buffer_size, GFP_KERNEL);
@@ -983,8 +975,8 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
return 0;
}
-static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
- drm_r128_depth_t * depth)
+static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
+ drm_r128_depth_t *depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int count, *x, *y;
@@ -1001,9 +993,8 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
xbuf_size = count * sizeof(*x);
ybuf_size = count * sizeof(*y);
x = kmalloc(xbuf_size, GFP_KERNEL);
- if (x == NULL) {
+ if (x == NULL)
return -ENOMEM;
- }
y = kmalloc(ybuf_size, GFP_KERNEL);
if (y == NULL) {
kfree(x);
@@ -1105,8 +1096,8 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
return 0;
}
-static int r128_cce_dispatch_read_span(struct drm_device * dev,
- drm_r128_depth_t * depth)
+static int r128_cce_dispatch_read_span(struct drm_device *dev,
+ drm_r128_depth_t *depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int count, x, y;
@@ -1117,12 +1108,10 @@ static int r128_cce_dispatch_read_span(struct drm_device * dev,
if (count > 4096 || count <= 0)
return -EMSGSIZE;
- if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
+ if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
return -EFAULT;
- }
- if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
+ if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
return -EFAULT;
- }
BEGIN_RING(7);
@@ -1148,8 +1137,8 @@ static int r128_cce_dispatch_read_span(struct drm_device * dev,
return 0;
}
-static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
- drm_r128_depth_t * depth)
+static int r128_cce_dispatch_read_pixels(struct drm_device *dev,
+ drm_r128_depth_t *depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int count, *x, *y;
@@ -1161,16 +1150,14 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
if (count > 4096 || count <= 0)
return -EMSGSIZE;
- if (count > dev_priv->depth_pitch) {
+ if (count > dev_priv->depth_pitch)
count = dev_priv->depth_pitch;
- }
xbuf_size = count * sizeof(*x);
ybuf_size = count * sizeof(*y);
x = kmalloc(xbuf_size, GFP_KERNEL);
- if (x == NULL) {
+ if (x == NULL)
return -ENOMEM;
- }
y = kmalloc(ybuf_size, GFP_KERNEL);
if (y == NULL) {
kfree(x);
@@ -1220,7 +1207,7 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
* Polygon stipple
*/
-static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
+static void r128_cce_dispatch_stipple(struct drm_device *dev, u32 *stipple)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
@@ -1230,9 +1217,8 @@ static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
BEGIN_RING(33);
OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < 32; i++)
OUT_RING(stipple[i]);
- }
ADVANCE_RING();
}
@@ -1269,7 +1255,7 @@ static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *f
return 0;
}
-static int r128_do_init_pageflip(struct drm_device * dev)
+static int r128_do_init_pageflip(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
@@ -1288,7 +1274,7 @@ static int r128_do_init_pageflip(struct drm_device * dev)
return 0;
}
-static int r128_do_cleanup_pageflip(struct drm_device * dev)
+static int r128_do_cleanup_pageflip(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
@@ -1645,39 +1631,37 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
return 0;
}
-void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_r128_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping) {
+ if (dev_priv->page_flipping)
r128_do_cleanup_pageflip(dev);
- }
}
}
-
-void r128_driver_lastclose(struct drm_device * dev)
+void r128_driver_lastclose(struct drm_device *dev)
{
r128_do_cleanup_cce(dev);
}
struct drm_ioctl_desc r128_ioctls[] = {
- DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
};
int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 84b1f2729d4..aebe0087504 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -69,5 +69,6 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
+radeon-$(CONFIG_ACPI) += radeon_acpi.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 1d569830ed9..8e421f644a5 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -108,12 +108,11 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base++;
break;
case ATOM_IIO_READ:
- temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
+ temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
base += 3;
break;
case ATOM_IIO_WRITE:
- (void)ctx->card->reg_read(ctx->card, CU16(base + 1));
- ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
+ ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
case ATOM_IIO_CLEAR:
@@ -715,8 +714,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
cjiffies = jiffies;
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
- if ((jiffies_to_msecs(cjiffies) > 1000)) {
- DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n");
+ if ((jiffies_to_msecs(cjiffies) > 5000)) {
+ DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
ctx->abort = true;
}
} else {
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index cd1b64ab5ca..a589a55b223 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -113,6 +113,8 @@ struct card_info {
struct drm_device *dev;
void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
+ void (* ioreg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
+ uint32_t (* ioreg_read)(struct card_info *, uint32_t); /* filled by driver */
void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 1bc72c3190a..fe359a239df 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS
#define SW_I2C_CNTL_WRITE1BIT 6
//==============================VESA definition Portion===============================
-#define VESA_OEM_PRODUCT_REV '01.00'
+#define VESA_OEM_PRODUCT_REV "01.00"
#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
#define VESA_MODE_WIN_ATTRIBUTE 7
#define VESA_WIN_SIZE 64
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 8c2d6478a22..cd0290f946c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -44,10 +44,6 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
memset(&args, 0, sizeof(args));
- args.usOverscanRight = 0;
- args.usOverscanLeft = 0;
- args.usOverscanBottom = 0;
- args.usOverscanTop = 0;
args.ucCRTC = radeon_crtc->crtc_id;
switch (radeon_crtc->rmx_type) {
@@ -56,7 +52,6 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
break;
case RMX_ASPECT:
a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
@@ -69,17 +64,16 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
break;
case RMX_FULL:
default:
- args.usOverscanRight = 0;
- args.usOverscanLeft = 0;
- args.usOverscanBottom = 0;
- args.usOverscanTop = 0;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ args.usOverscanRight = radeon_crtc->h_border;
+ args.usOverscanLeft = radeon_crtc->h_border;
+ args.usOverscanBottom = radeon_crtc->v_border;
+ args.usOverscanTop = radeon_crtc->v_border;
break;
}
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
static void atombios_scaler_setup(struct drm_crtc *crtc)
@@ -282,22 +276,22 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
u16 misc = 0;
memset(&args, 0, sizeof(args));
- args.usH_Size = cpu_to_le16(mode->crtc_hdisplay);
+ args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (radeon_crtc->h_border * 2));
args.usH_Blanking_Time =
- cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay);
- args.usV_Size = cpu_to_le16(mode->crtc_vdisplay);
+ cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (radeon_crtc->h_border * 2));
+ args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (radeon_crtc->v_border * 2));
args.usV_Blanking_Time =
- cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay);
+ cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (radeon_crtc->v_border * 2));
args.usH_SyncOffset =
- cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay);
+ cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + radeon_crtc->h_border);
args.usH_SyncWidth =
cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
args.usV_SyncOffset =
- cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay);
+ cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + radeon_crtc->v_border);
args.usV_SyncWidth =
cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
- /*args.ucH_Border = mode->hborder;*/
- /*args.ucV_Border = mode->vborder;*/
+ args.ucH_Border = radeon_crtc->h_border;
+ args.ucV_Border = radeon_crtc->v_border;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
misc |= ATOM_VSYNC_POLARITY;
@@ -338,6 +332,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
args.usV_SyncWidth =
cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+ args.ucOverscanRight = radeon_crtc->h_border;
+ args.ucOverscanLeft = radeon_crtc->h_border;
+ args.ucOverscanBottom = radeon_crtc->v_border;
+ args.ucOverscanTop = radeon_crtc->v_border;
+
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
misc |= ATOM_VSYNC_POLARITY;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -477,6 +476,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
struct radeon_encoder *radeon_encoder = NULL;
u32 adjusted_clock = mode->clock;
int encoder_mode = 0;
+ u32 dp_clock = mode->clock;
+ int bpc = 8;
/* reset the pll flags */
pll->flags = 0;
@@ -519,6 +520,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
encoder_mode = atombios_get_encoder_mode(encoder);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ }
+ }
+
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
@@ -527,6 +539,21 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
pll->algo = PLL_ALGO_LEGACY;
pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
}
+ /* There is some evidence (often anecdotal) that RV515/RV620 LVDS
+ * (on some boards at least) prefers the legacy algo. I'm not
+ * sure whether this should handled generically or on a
+ * case-by-case quirk basis. Both algos should work fine in the
+ * majority of cases.
+ */
+ if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
+ ((rdev->family == CHIP_RV515) ||
+ (rdev->family == CHIP_RV620))) {
+ /* allow the user to overrride just in case */
+ if (radeon_new_pll == 1)
+ pll->algo = PLL_ALGO_NEW;
+ else
+ pll->algo = PLL_ALGO_LEGACY;
+ }
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -561,6 +588,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
+ if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+ /* may want to enable SS on DP eventually */
+ /* args.v1.ucConfig |=
+ ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/
+ } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+ args.v1.ucConfig |=
+ ADJUST_DISPLAY_CONFIG_SS_ENABLE;
+ }
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
@@ -574,10 +609,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (encoder_mode == ATOM_ENCODER_MODE_DP)
+ if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+ /* may want to enable SS on DP/eDP eventually */
+ /*args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;*/
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
- else {
+ /* 16200 or 27000 */
+ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+ } else {
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ /* deep color support */
+ args.v3.sInput.usPixelClock =
+ cpu_to_le16((mode->clock * bpc / 8) / 10);
+ }
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
@@ -586,13 +631,19 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
DISPPLL_CONFIG_DUAL_LINK;
}
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- /* may want to enable SS on DP/eDP eventually */
- /*args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;*/
- if (encoder_mode == ATOM_ENCODER_MODE_DP)
+ if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+ /* may want to enable SS on DP/eDP eventually */
+ /*args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;*/
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
- else {
+ /* 16200 or 27000 */
+ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+ } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+ /* want to enable SS on LVDS eventually */
+ /*args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;*/
+ } else {
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
@@ -669,56 +720,25 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+static void atombios_crtc_program_pll(struct drm_crtc *crtc,
+ int crtc_id,
+ int pll_id,
+ u32 encoder_mode,
+ u32 encoder_id,
+ u32 clock,
+ u32 ref_div,
+ u32 fb_div,
+ u32 frac_fb_div,
+ u32 post_div)
{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder = NULL;
- struct radeon_encoder *radeon_encoder = NULL;
u8 frev, crev;
- int index;
+ int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
union set_pixel_clock args;
- u32 pll_clock = mode->clock;
- u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
- struct radeon_pll *pll;
- u32 adjusted_clock;
- int encoder_mode = 0;
memset(&args, 0, sizeof(args));
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- radeon_encoder = to_radeon_encoder(encoder);
- encoder_mode = atombios_get_encoder_mode(encoder);
- break;
- }
- }
-
- if (!radeon_encoder)
- return;
-
- switch (radeon_crtc->pll_id) {
- case ATOM_PPLL1:
- pll = &rdev->clock.p1pll;
- break;
- case ATOM_PPLL2:
- pll = &rdev->clock.p2pll;
- break;
- case ATOM_DCPLL:
- case ATOM_PPLL_INVALID:
- default:
- pll = &rdev->clock.dcpll;
- break;
- }
-
- /* adjust pixel clock as needed */
- adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
-
- radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div);
-
- index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev))
return;
@@ -727,47 +747,49 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
case 1:
switch (crev) {
case 1:
- args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
+ if (clock == ATOM_DISABLE)
+ return;
+ args.v1.usPixelClock = cpu_to_le16(clock / 10);
args.v1.usRefDiv = cpu_to_le16(ref_div);
args.v1.usFbDiv = cpu_to_le16(fb_div);
args.v1.ucFracFbDiv = frac_fb_div;
args.v1.ucPostDiv = post_div;
- args.v1.ucPpll = radeon_crtc->pll_id;
- args.v1.ucCRTC = radeon_crtc->crtc_id;
+ args.v1.ucPpll = pll_id;
+ args.v1.ucCRTC = crtc_id;
args.v1.ucRefDivSrc = 1;
break;
case 2:
- args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v2.usPixelClock = cpu_to_le16(clock / 10);
args.v2.usRefDiv = cpu_to_le16(ref_div);
args.v2.usFbDiv = cpu_to_le16(fb_div);
args.v2.ucFracFbDiv = frac_fb_div;
args.v2.ucPostDiv = post_div;
- args.v2.ucPpll = radeon_crtc->pll_id;
- args.v2.ucCRTC = radeon_crtc->crtc_id;
+ args.v2.ucPpll = pll_id;
+ args.v2.ucCRTC = crtc_id;
args.v2.ucRefDivSrc = 1;
break;
case 3:
- args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v3.usPixelClock = cpu_to_le16(clock / 10);
args.v3.usRefDiv = cpu_to_le16(ref_div);
args.v3.usFbDiv = cpu_to_le16(fb_div);
args.v3.ucFracFbDiv = frac_fb_div;
args.v3.ucPostDiv = post_div;
- args.v3.ucPpll = radeon_crtc->pll_id;
- args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2);
- args.v3.ucTransmitterId = radeon_encoder->encoder_id;
+ args.v3.ucPpll = pll_id;
+ args.v3.ucMiscInfo = (pll_id << 2);
+ args.v3.ucTransmitterId = encoder_id;
args.v3.ucEncoderMode = encoder_mode;
break;
case 5:
- args.v5.ucCRTC = radeon_crtc->crtc_id;
- args.v5.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v5.ucCRTC = crtc_id;
+ args.v5.usPixelClock = cpu_to_le16(clock / 10);
args.v5.ucRefDiv = ref_div;
args.v5.usFbDiv = cpu_to_le16(fb_div);
args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
args.v5.ucPostDiv = post_div;
args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
- args.v5.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v5.ucTransmitterID = encoder_id;
args.v5.ucEncoderMode = encoder_mode;
- args.v5.ucPpll = radeon_crtc->pll_id;
+ args.v5.ucPpll = pll_id;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
@@ -782,6 +804,56 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder = NULL;
+ struct radeon_encoder *radeon_encoder = NULL;
+ u32 pll_clock = mode->clock;
+ u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+ struct radeon_pll *pll;
+ u32 adjusted_clock;
+ int encoder_mode = 0;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ encoder_mode = atombios_get_encoder_mode(encoder);
+ break;
+ }
+ }
+
+ if (!radeon_encoder)
+ return;
+
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
+ pll = &rdev->clock.p1pll;
+ break;
+ case ATOM_PPLL2:
+ pll = &rdev->clock.p2pll;
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ default:
+ pll = &rdev->clock.dcpll;
+ break;
+ }
+
+ /* adjust pixel clock as needed */
+ adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
+
+ radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div);
+
+ atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+ encoder_mode, radeon_encoder->encoder_id, mode->clock,
+ ref_div, fb_div, frac_fb_div, post_div);
+
+}
+
static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -797,7 +869,7 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG("No FB bound\n");
+ DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -841,6 +913,11 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL;
}
+ if (tiling_flags & RADEON_TILING_MACRO)
+ fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(AVIVO_D1VGA_CONTROL, 0);
@@ -931,7 +1008,7 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG("No FB bound\n");
+ DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -979,11 +1056,18 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL;
}
- if (tiling_flags & RADEON_TILING_MACRO)
- fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
+ if (rdev->family >= CHIP_R600) {
+ if (tiling_flags & RADEON_TILING_MACRO)
+ fb_format |= R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1;
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ fb_format |= R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1;
+ } else {
+ if (tiling_flags & RADEON_TILING_MACRO)
+ fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
- if (tiling_flags & RADEON_TILING_MICRO)
- fb_format |= AVIVO_D1GRPH_TILED;
+ if (tiling_flags & RADEON_TILING_MICRO)
+ fb_format |= AVIVO_D1GRPH_TILED;
+ }
if (radeon_crtc->crtc_id == 0)
WREG32(AVIVO_D1VGA_CONTROL, 0);
@@ -992,11 +1076,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id) {
- WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
- WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+ WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
} else {
- WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
- WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+ WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
}
}
WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -1133,8 +1217,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ bool is_tvcv = false;
- /* TODO color tiling */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ /* find tv std */
+ if (encoder->crtc == crtc) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->active_device &
+ (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ is_tvcv = true;
+ }
+ }
atombios_disable_ss(crtc);
/* always set DCPLL */
@@ -1145,9 +1239,12 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
if (ASIC_IS_DCE4(rdev))
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
- else if (ASIC_IS_AVIVO(rdev))
- atombios_crtc_set_timing(crtc, adjusted_mode);
- else {
+ else if (ASIC_IS_AVIVO(rdev)) {
+ if (is_tvcv)
+ atombios_crtc_set_timing(crtc, adjusted_mode);
+ else
+ atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+ } else {
atombios_crtc_set_timing(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0)
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
@@ -1191,6 +1288,24 @@ static void atombios_crtc_commit(struct drm_crtc *crtc)
atombios_lock_crtc(crtc, ATOM_DISABLE);
}
+static void atombios_crtc_disable(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
+ case ATOM_PPLL2:
+ /* disable the ppll */
+ atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+ 0, 0, ATOM_DISABLE, 0, 0, 0, 0);
+ break;
+ default:
+ break;
+ }
+ radeon_crtc->pll_id = -1;
+}
+
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.dpms = atombios_crtc_dpms,
.mode_fixup = atombios_crtc_mode_fixup,
@@ -1199,6 +1314,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.prepare = atombios_crtc_prepare,
.commit = atombios_crtc_commit,
.load_lut = radeon_crtc_load_lut,
+ .disable = atombios_crtc_disable,
};
void radeon_atombios_init_crtc(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index abffb1499e2..4e7778d44b8 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -296,7 +296,7 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
- DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n",
+ DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
lane,
voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
@@ -313,7 +313,7 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
if (p >= dp_pre_emphasis_max(v))
p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
- DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n",
+ DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
@@ -358,7 +358,7 @@ retry:
if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) {
if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10)
goto retry;
- DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
+ DRM_DEBUG_KMS("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count);
return false;
@@ -461,10 +461,10 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
memcpy(dig_connector->dpcd, msg, 8);
{
int i;
- DRM_DEBUG("DPCD: ");
+ DRM_DEBUG_KMS("DPCD: ");
for (i = 0; i < 8; i++)
- DRM_DEBUG("%02x ", msg[i]);
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("%02x ", msg[i]);
+ DRM_DEBUG_KMS("\n");
}
return true;
}
@@ -512,7 +512,7 @@ static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
return false;
}
- DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n",
+ DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n",
link_status[0], link_status[1], link_status[2],
link_status[3], link_status[4], link_status[5]);
return true;
@@ -610,7 +610,7 @@ void dp_link_train(struct drm_encoder *encoder,
enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
else
enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
- if (dig_connector->linkb)
+ if (dig->linkb)
enc_id |= ATOM_DP_CONFIG_LINK_B;
else
enc_id |= ATOM_DP_CONFIG_LINK_A;
@@ -695,7 +695,7 @@ void dp_link_train(struct drm_encoder *encoder,
if (!clock_recovery)
DRM_ERROR("clock recovery failed\n");
else
- DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
+ DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
(train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
DP_TRAIN_PRE_EMPHASIS_SHIFT);
@@ -739,7 +739,7 @@ void dp_link_train(struct drm_encoder *encoder,
if (!channel_eq)
DRM_ERROR("channel eq failed\n");
else
- DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
+ DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
(train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1caf625e472..79082d4398a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -39,6 +39,23 @@
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
+/* get temperature in millidegrees */
+u32 evergreen_get_temp(struct radeon_device *rdev)
+{
+ u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+ ASIC_T_SHIFT;
+ u32 actual_temp = 0;
+
+ if ((temp >> 10) & 1)
+ actual_temp = 0;
+ else if ((temp >> 9) & 1)
+ actual_temp = 255;
+ else
+ actual_temp = (temp >> 1) & 0xff;
+
+ return actual_temp * 1000;
+}
+
void evergreen_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -658,6 +675,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
return 0;
}
+static int evergreen_cp_start(struct radeon_device *rdev)
+{
+ int r;
+ uint32_t cp_me;
+
+ r = radeon_ring_lock(rdev, 7);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(rdev, 0x1);
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+ radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_unlock_commit(rdev);
+
+ cp_me = 0xff;
+ WREG32(CP_ME_CNTL, cp_me);
+
+ r = radeon_ring_lock(rdev, 4);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ /* init some VGT regs */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, 0xe);
+ radeon_ring_write(rdev, 0x10);
+ radeon_ring_unlock_commit(rdev);
+
+ return 0;
+}
+
int evergreen_cp_resume(struct radeon_device *rdev)
{
u32 tmp;
@@ -702,7 +756,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
rdev->cp.rptr = RREG32(CP_RB_RPTR);
rdev->cp.wptr = RREG32(CP_RB_WPTR);
- r600_cp_start(rdev);
+ evergreen_cp_start(rdev);
rdev->cp.ready = true;
r = radeon_ring_test(rdev);
if (r) {
@@ -1106,15 +1160,27 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
EVERGREEN_MAX_BACKENDS_MASK));
break;
}
- } else
- gb_backend_map =
- evergreen_get_tile_pipe_to_backend_map(rdev,
- rdev->config.evergreen.max_tile_pipes,
- rdev->config.evergreen.max_backends,
- ((EVERGREEN_MAX_BACKENDS_MASK <<
- rdev->config.evergreen.max_backends) &
- EVERGREEN_MAX_BACKENDS_MASK));
+ } else {
+ switch (rdev->family) {
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ gb_backend_map = 0x66442200;
+ break;
+ case CHIP_JUNIPER:
+ gb_backend_map = 0x00006420;
+ break;
+ default:
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+ }
+ }
+ rdev->config.evergreen.tile_config = gb_addr_config;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -1334,8 +1400,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
}
rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
/* size in MB on evergreen */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
@@ -2036,11 +2102,6 @@ int evergreen_resume(struct radeon_device *rdev)
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- /* Initialize clocks */
- r = radeon_clocks_init(rdev);
- if (r) {
- return r;
- }
r = evergreen_startup(rdev);
if (r) {
@@ -2146,9 +2207,6 @@ int evergreen_init(struct radeon_device *rdev)
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- r = radeon_clocks_init(rdev);
- if (r)
- return r;
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -2218,7 +2276,6 @@ void evergreen_fini(struct radeon_device *rdev)
evergreen_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_clocks_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index e028c1cd9d9..2330f3a36fd 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -61,6 +61,11 @@
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
+# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
+# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
+# define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
+# define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
# define EVERGREEN_GRPH_ENDIAN_NONE 0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a1cd621780e..9b7532dd30f 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -165,6 +165,11 @@
#define SE_DB_BUSY (1 << 30)
#define SE_CB_BUSY (1 << 31)
+#define CG_MULT_THERMAL_STATUS 0x740
+#define ASIC_T(x) ((x) << 16)
+#define ASIC_T_MASK 0x7FF0000
+#define ASIC_T_SHIFT 16
+
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index a89a15ab524..e151f16a8f8 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -141,7 +141,7 @@ void r100_pm_get_dynpm_state(struct radeon_device *rdev)
/* only one clock mode per power state */
rdev->pm.requested_clock_mode_index = 0;
- DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+ DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk,
rdev->pm.power_state[rdev->pm.requested_power_state_index].
@@ -276,7 +276,7 @@ void r100_pm_misc(struct radeon_device *rdev)
rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
radeon_set_pcie_lanes(rdev,
ps->pcie_lanes);
- DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+ DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
}
}
@@ -849,7 +849,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
const char *fw_name = NULL;
int err;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
err = IS_ERR(pdev);
@@ -1803,6 +1803,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
return r;
break;
/* triggers drawing using indices to vertex buffer */
+ case PACKET3_3D_CLEAR_HIZ:
+ case PACKET3_3D_CLEAR_ZMASK:
+ if (p->rdev->hyperz_filp != p->filp)
+ return -EINVAL;
+ break;
case PACKET3_NOP:
break;
default:
@@ -2015,18 +2020,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
return false;
}
elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
- if (elapsed >= 3000) {
- /* very likely the improbable case where current
- * rptr is equal to last recorded, a while ago, rptr
- * this is more likely a false positive update tracking
- * information which should force us to be recall at
- * latter point
- */
- lockup->last_cp_rptr = cp->rptr;
- lockup->last_jiffies = jiffies;
- return false;
- }
- if (elapsed >= 1000) {
+ if (elapsed >= 10000) {
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
@@ -2295,8 +2289,8 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
u64 config_aper_size;
/* work out accessible VRAM */
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
/* FIXME we don't use the second aperture yet when we could use it */
if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
@@ -2364,11 +2358,10 @@ void r100_mc_init(struct radeon_device *rdev)
*/
void r100_pll_errata_after_index(struct radeon_device *rdev)
{
- if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
- return;
+ if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
+ (void)RREG32(RADEON_CLOCK_CNTL_DATA);
+ (void)RREG32(RADEON_CRTC_GEN_CNTL);
}
- (void)RREG32(RADEON_CLOCK_CNTL_DATA);
- (void)RREG32(RADEON_CRTC_GEN_CNTL);
}
static void r100_pll_errata_after_data(struct radeon_device *rdev)
@@ -2643,7 +2636,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
flags |= pitch / 8;
- DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
+ DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
@@ -3039,7 +3032,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
}
#endif
- DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
+ DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
/* (unsigned int)info->SavedReg->grph_buffer_cntl, */
(unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
}
@@ -3135,7 +3128,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
}
- DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
+ DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
(unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
}
}
@@ -3304,13 +3297,14 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
unsigned long size;
unsigned prim_walk;
unsigned nverts;
+ unsigned num_cb = track->num_cb;
- for (i = 0; i < track->num_cb; i++) {
+ if (!track->zb_cb_clear && !track->color_channel_mask &&
+ !track->blend_read_enable)
+ num_cb = 0;
+
+ for (i = 0; i < num_cb; i++) {
if (track->cb[i].robj == NULL) {
- if (!(track->zb_cb_clear || track->color_channel_mask ||
- track->blend_read_enable)) {
- continue;
- }
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
}
@@ -3809,6 +3803,31 @@ void r100_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
+/*
+ * Due to how kexec works, it can leave the hw fully initialised when it
+ * boots the new kernel. However doing our init sequence with the CP and
+ * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
+ * do some quick sanity checks and restore sane values to avoid this
+ * problem.
+ */
+void r100_restore_sanity(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG32(RADEON_CP_CSQ_CNTL);
+ if (tmp) {
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ }
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ if (tmp) {
+ WREG32(RADEON_CP_RB_CNTL, 0);
+ }
+ tmp = RREG32(RADEON_SCRATCH_UMSK);
+ if (tmp) {
+ WREG32(RADEON_SCRATCH_UMSK, 0);
+ }
+}
+
int r100_init(struct radeon_device *rdev)
{
int r;
@@ -3821,6 +3840,8 @@ int r100_init(struct radeon_device *rdev)
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+ /* sanity check some register to avoid hangs like after kexec */
+ r100_restore_sanity(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
if (!radeon_get_bios(rdev)) {
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index d016b16fa11..b121b6c678d 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -48,10 +48,12 @@
#define PACKET3_3D_DRAW_IMMD 0x29
#define PACKET3_3D_DRAW_INDX 0x2A
#define PACKET3_3D_LOAD_VBPNTR 0x2F
+#define PACKET3_3D_CLEAR_ZMASK 0x32
#define PACKET3_INDX_BUFFER 0x33
#define PACKET3_3D_DRAW_VBUF_2 0x34
#define PACKET3_3D_DRAW_IMMD_2 0x35
#define PACKET3_3D_DRAW_INDX_2 0x36
+#define PACKET3_3D_CLEAR_HIZ 0x37
#define PACKET3_BITBLT_MULTI 0x9B
#define PACKET0(reg, n) (CP_PACKET0 | \
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 19a7ef7ee34..c827738ad7d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1048,14 +1048,47 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLOR_CHANNEL_MASK */
track->color_channel_mask = idx_value;
break;
- case 0x4d1c:
+ case 0x43a4:
+ /* SC_HYPERZ_EN */
+ /* r300c emits this register - we need to disable hyperz for it
+ * without complaining */
+ if (p->rdev->hyperz_filp != p->filp) {
+ if (idx_value & 0x1)
+ ib[idx] = idx_value & ~1;
+ }
+ break;
+ case 0x4f1c:
/* ZB_BW_CNTL */
track->zb_cb_clear = !!(idx_value & (1 << 5));
+ if (p->rdev->hyperz_filp != p->filp) {
+ if (idx_value & (R300_HIZ_ENABLE |
+ R300_RD_COMP_ENABLE |
+ R300_WR_COMP_ENABLE |
+ R300_FAST_FILL_ENABLE))
+ goto fail;
+ }
break;
case 0x4e04:
/* RB3D_BLENDCNTL */
track->blend_read_enable = !!(idx_value & (1 << 2));
break;
+ case 0x4f28: /* ZB_DEPTHCLEARVALUE */
+ break;
+ case 0x4f30: /* ZB_MASK_OFFSET */
+ case 0x4f34: /* ZB_ZMASK_PITCH */
+ case 0x4f44: /* ZB_HIZ_OFFSET */
+ case 0x4f54: /* ZB_HIZ_PITCH */
+ if (idx_value && (p->rdev->hyperz_filp != p->filp))
+ goto fail;
+ break;
+ case 0x4028:
+ if (idx_value && (p->rdev->hyperz_filp != p->filp))
+ goto fail;
+ /* GB_Z_PEQ_CONFIG */
+ if (p->rdev->family >= CHIP_RV350)
+ break;
+ goto fail;
+ break;
case 0x4be8:
/* valid register only on RV530 */
if (p->rdev->family == CHIP_RV530)
@@ -1066,8 +1099,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
}
return 0;
fail:
- printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
- reg, idx);
+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n",
+ reg, idx, idx_value);
return -EINVAL;
}
@@ -1161,6 +1194,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
return r;
}
break;
+ case PACKET3_3D_CLEAR_HIZ:
+ case PACKET3_3D_CLEAR_ZMASK:
+ if (p->rdev->hyperz_filp != p->filp)
+ return -EINVAL;
+ break;
case PACKET3_NOP:
break;
default:
@@ -1380,6 +1418,8 @@ int r300_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 968a33317fb..0c036c60d9d 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -48,10 +48,12 @@
#define PACKET3_3D_DRAW_IMMD 0x29
#define PACKET3_3D_DRAW_INDX 0x2A
#define PACKET3_3D_LOAD_VBPNTR 0x2F
+#define PACKET3_3D_CLEAR_ZMASK 0x32
#define PACKET3_INDX_BUFFER 0x33
#define PACKET3_3D_DRAW_VBUF_2 0x34
#define PACKET3_3D_DRAW_IMMD_2 0x35
#define PACKET3_3D_DRAW_INDX_2 0x36
+#define PACKET3_3D_CLEAR_HIZ 0x37
#define PACKET3_BITBLT_MULTI 0x9B
#define PACKET0(reg, n) (CP_PACKET0 | \
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index e6c89142bb4..59f7bccc5be 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -343,6 +343,8 @@ int r420_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 93c9a2bbccf..6ac1f604e29 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -386,6 +386,11 @@
# define AVIVO_D1GRPH_TILED (1 << 20)
# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21)
+# define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20)
+# define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20)
+# define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20)
+# define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20)
+
/* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2
* block and vice versa. This applies to GRPH, CUR, etc.
*/
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 694af7cc23a..1458dee902d 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -231,6 +231,8 @@ int r520_init(struct radeon_device *rdev)
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
if (!radeon_get_bios(rdev)) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index e100f69faee..7a04959ba0e 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -92,6 +92,21 @@ void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
+/* get temperature in millidegrees */
+u32 rv6xx_get_temp(struct radeon_device *rdev)
+{
+ u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
+ ASIC_T_SHIFT;
+ u32 actual_temp = 0;
+
+ if ((temp >> 7) & 1)
+ actual_temp = 0;
+ else
+ actual_temp = (temp >> 1) & 0xff;
+
+ return actual_temp * 1000;
+}
+
void r600_pm_get_dynpm_state(struct radeon_device *rdev)
{
int i;
@@ -256,7 +271,7 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
}
}
- DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+ DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk,
rdev->pm.power_state[rdev->pm.requested_power_state_index].
@@ -571,7 +586,7 @@ void r600_pm_misc(struct radeon_device *rdev)
if (voltage->voltage != rdev->pm.current_vddc) {
radeon_atom_set_voltage(rdev, voltage->voltage);
rdev->pm.current_vddc = voltage->voltage;
- DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
}
}
}
@@ -869,7 +884,17 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
u32 tmp;
/* flush hdp cache so updates hit vram */
- WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
+ void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+ u32 tmp;
+
+ /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
+ * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+ */
+ WREG32(HDP_DEBUG1, 0);
+ tmp = readl((void __iomem *)ptr);
+ } else
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
@@ -1217,8 +1242,8 @@ int r600_mc_init(struct radeon_device *rdev)
}
rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
@@ -1609,7 +1634,7 @@ void r600_gpu_init(struct radeon_device *rdev)
r600_count_pipe_bits((cc_rb_backend_disable &
R6XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
-
+ rdev->config.r600.tile_config = tiling_config;
tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
@@ -2094,10 +2119,7 @@ int r600_cp_start(struct radeon_device *rdev)
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
- if (rdev->family >= CHIP_CEDAR) {
- radeon_ring_write(rdev, 0x0);
- radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
- } else if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_RV770) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
} else {
@@ -2464,11 +2486,6 @@ int r600_resume(struct radeon_device *rdev)
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- /* Initialize clocks */
- r = radeon_clocks_init(rdev);
- if (r) {
- return r;
- }
r = r600_startup(rdev);
if (r) {
@@ -2561,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- r = radeon_clocks_init(rdev);
- if (r)
- return r;
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -2638,7 +2652,6 @@ void r600_fini(struct radeon_device *rdev)
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_clocks_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
@@ -2716,7 +2729,7 @@ int r600_ib_test(struct radeon_device *rdev)
if (i < rdev->usec_timeout) {
DRM_INFO("ib test succeeded in %u usecs\n", i);
} else {
- DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
+ DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
@@ -3512,5 +3525,16 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
*/
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
{
- WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
+ * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+ */
+ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+ rdev->vram_scratch.ptr) {
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ WREG32(HDP_DEBUG1, 0);
+ tmp = readl((void __iomem *)ptr);
+ } else
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 2b26553c352..b5443fe1c1d 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -63,7 +63,8 @@ int r600_audio_bits_per_sample(struct radeon_device *rdev)
case 0x4: return 32;
}
- DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value);
+ dev_err(rdev->dev, "Unknown bits per sample 0x%x using 16 instead\n",
+ (int)value);
return 16;
}
@@ -150,7 +151,8 @@ static void r600_audio_update_hdmi(unsigned long param)
r600_hdmi_update_audio_settings(encoder);
}
- if(still_going) r600_audio_schedule_polling(rdev);
+ if (still_going)
+ r600_audio_schedule_polling(rdev);
}
/*
@@ -158,8 +160,9 @@ static void r600_audio_update_hdmi(unsigned long param)
*/
static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
{
- DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling");
+ DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
+ rdev->audio_enabled = enable;
}
/*
@@ -195,12 +198,14 @@ void r600_audio_enable_polling(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active);
+ DRM_DEBUG("r600_audio_enable_polling: %d\n",
+ radeon_encoder->audio_polling_active);
if (radeon_encoder->audio_polling_active)
return;
radeon_encoder->audio_polling_active = 1;
- mod_timer(&rdev->audio_timer, jiffies + 1);
+ if (rdev->audio_enabled)
+ mod_timer(&rdev->audio_timer, jiffies + 1);
}
/*
@@ -209,7 +214,8 @@ void r600_audio_enable_polling(struct drm_encoder *encoder)
void r600_audio_disable_polling(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active);
+ DRM_DEBUG("r600_audio_disable_polling: %d\n",
+ radeon_encoder->audio_polling_active);
radeon_encoder->audio_polling_active = 0;
}
@@ -236,7 +242,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
break;
default:
- DRM_ERROR("Unsupported encoder type 0x%02X\n",
+ dev_err(rdev->dev, "Unsupported encoder type 0x%02X\n",
radeon_encoder->encoder_id);
return;
}
@@ -266,7 +272,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
*/
void r600_audio_fini(struct radeon_device *rdev)
{
- if (!radeon_audio || !r600_audio_chipset_supported(rdev))
+ if (!rdev->audio_enabled)
return;
del_timer(&rdev->audio_timer);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index d13622ae74e..9ceb2a1ce79 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index 0271b53fa2d..e8151c1d55b 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -39,37 +39,45 @@
const u32 r6xx_default_state[] =
{
- 0xc0002400,
+ 0xc0002400, /* START_3D_CMDBUF */
0x00000000,
- 0xc0012800,
+
+ 0xc0012800, /* CONTEXT_CONTROL */
0x80000000,
0x80000000,
+
0xc0016800,
0x00000010,
- 0x00008000,
+ 0x00008000, /* WAIT_UNTIL */
+
0xc0016800,
0x00000542,
- 0x07000003,
+ 0x07000003, /* TA_CNTL_AUX */
+
0xc0016800,
0x000005c5,
- 0x00000000,
+ 0x00000000, /* VC_ENHANCE */
+
0xc0016800,
0x00000363,
- 0x00000000,
+ 0x00000000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
+
0xc0016800,
0x0000060c,
- 0x82000000,
+ 0x82000000, /* DB_DEBUG */
+
0xc0016800,
0x0000060e,
- 0x01020204,
- 0xc0016f00,
- 0x00000000,
- 0x00000000,
- 0xc0016f00,
- 0x00000001,
+ 0x01020204, /* DB_WATERMARKS */
+
+ 0xc0026f00,
0x00000000,
+ 0x00000000, /* SQ_VTX_BASE_VTX_LOC */
+ 0x00000000, /* SQ_VTX_START_INST_LOC */
+
0xc0096900,
0x0000022a,
+ 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
0x00000000,
0x00000000,
0x00000000,
@@ -78,515 +86,317 @@ const u32 r6xx_default_state[] =
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
+
0xc0016900,
0x00000004,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* DB_DEPTH_INFO */
+
+ 0xc0026900,
0x0000000a,
- 0x00000000,
- 0xc0016900,
- 0x0000000b,
- 0x00000000,
- 0xc0016900,
- 0x0000010c,
- 0x00000000,
- 0xc0016900,
- 0x0000010d,
- 0x00000000,
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
0xc0016900,
0x00000200,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+
+ 0xc0026900,
0x00000343,
- 0x00000060,
- 0xc0016900,
- 0x00000344,
- 0x00000040,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000040, /* DB_RENDER_OVERRIDE */
+
0xc0016900,
0x00000351,
- 0x0000aa00,
- 0xc0016900,
- 0x00000104,
- 0x00000000,
- 0xc0016900,
- 0x0000010e,
- 0x00000000,
- 0xc0046900,
- 0x00000105,
- 0x00000000,
- 0x00000000,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc00f6900,
+ 0x00000100,
+ 0x00000800, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* VGT_MIN_VTX_INDX */
+ 0x00000000, /* VGT_INDX_OFFSET */
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+ 0x00000000, /* SX_ALPHA_TEST_CONTROL */
+ 0x00000000, /* CB_BLEND_RED */
0x00000000,
0x00000000,
- 0xc0036900,
- 0x00000109,
0x00000000,
+ 0x00000000, /* CB_FOG_RED */
0x00000000,
0x00000000,
+ 0x00000000, /* DB_STENCILREFMASK */
+ 0x00000000, /* DB_STENCILREFMASK_BF */
+ 0x00000000, /* SX_ALPHA_REF */
+
0xc0046900,
0x0000030c,
- 0x01000000,
+ 0x01000000, /* CB_CLRCMP_CNTL */
0x00000000,
0x00000000,
0x00000000,
+
0xc0046900,
0x00000048,
- 0x3f800000,
+ 0x3f800000, /* CB_CLEAR_RED */
0x00000000,
0x3f800000,
0x3f800000,
- 0xc0016900,
- 0x0000008e,
- 0x0000000f,
+
0xc0016900,
0x00000080,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00a6900,
0x00000083,
- 0x0000ffff,
- 0xc0016900,
- 0x00000084,
- 0x00000000,
- 0xc0016900,
- 0x00000085,
+ 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
0x20002000,
- 0xc0016900,
- 0x00000086,
0x00000000,
- 0xc0016900,
- 0x00000087,
0x20002000,
- 0xc0016900,
- 0x00000088,
0x00000000,
- 0xc0016900,
- 0x00000089,
0x20002000,
- 0xc0016900,
- 0x0000008a,
0x00000000,
- 0xc0016900,
- 0x0000008b,
0x20002000,
- 0xc0016900,
- 0x0000008c,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* PA_SC_EDGERULE */
+
+ 0xc0406900,
0x00000094,
- 0x80000000,
- 0xc0016900,
- 0x00000095,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
0x20002000,
- 0xc0026900,
- 0x000000b4,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x00000096,
0x80000000,
- 0xc0016900,
- 0x00000097,
0x20002000,
- 0xc0026900,
- 0x000000b6,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x00000098,
0x80000000,
- 0xc0016900,
- 0x00000099,
0x20002000,
- 0xc0026900,
- 0x000000b8,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x0000009a,
0x80000000,
- 0xc0016900,
- 0x0000009b,
0x20002000,
- 0xc0026900,
- 0x000000ba,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x0000009c,
0x80000000,
- 0xc0016900,
- 0x0000009d,
0x20002000,
- 0xc0026900,
- 0x000000bc,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x0000009e,
0x80000000,
- 0xc0016900,
- 0x0000009f,
0x20002000,
- 0xc0026900,
- 0x000000be,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a0,
0x80000000,
- 0xc0016900,
- 0x000000a1,
0x20002000,
- 0xc0026900,
- 0x000000c0,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a2,
0x80000000,
- 0xc0016900,
- 0x000000a3,
0x20002000,
- 0xc0026900,
- 0x000000c2,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a4,
0x80000000,
- 0xc0016900,
- 0x000000a5,
0x20002000,
- 0xc0026900,
- 0x000000c4,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a6,
0x80000000,
- 0xc0016900,
- 0x000000a7,
0x20002000,
- 0xc0026900,
- 0x000000c6,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a8,
0x80000000,
- 0xc0016900,
- 0x000000a9,
0x20002000,
- 0xc0026900,
- 0x000000c8,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000aa,
0x80000000,
- 0xc0016900,
- 0x000000ab,
0x20002000,
- 0xc0026900,
- 0x000000ca,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000ac,
0x80000000,
- 0xc0016900,
- 0x000000ad,
0x20002000,
- 0xc0026900,
- 0x000000cc,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000ae,
0x80000000,
- 0xc0016900,
- 0x000000af,
0x20002000,
- 0xc0026900,
- 0x000000ce,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000b0,
0x80000000,
- 0xc0016900,
- 0x000000b1,
0x20002000,
- 0xc0026900,
- 0x000000d0,
- 0x00000000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
0x3f800000,
- 0xc0016900,
- 0x000000b2,
- 0x80000000,
- 0xc0016900,
- 0x000000b3,
- 0x20002000,
- 0xc0026900,
- 0x000000d2,
0x00000000,
0x3f800000,
- 0xc0016900,
- 0x00000293,
- 0x00004010,
- 0xc0016900,
- 0x00000300,
0x00000000,
- 0xc0016900,
- 0x00000301,
- 0x00000000,
- 0xc0016900,
- 0x00000312,
- 0xffffffff,
- 0xc0016900,
- 0x00000307,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000308,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000283,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000292,
+ 0x3f800000,
0x00000000,
- 0xc0066900,
- 0x0000010f,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000206,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000207,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000208,
+ 0x3f800000,
0x00000000,
- 0xc0046900,
- 0x00000303,
0x3f800000,
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MPASS_PS_CNTL */
+ 0x00004010, /* PA_SC_MODE_CNTL */
+
+ 0xc0096900,
+ 0x00000300,
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x0000002d, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
0x3f800000,
0x3f800000,
0x3f800000,
- 0xc0016900,
- 0x00000205,
- 0x00000004,
- 0xc0016900,
- 0x00000280,
- 0x00000000,
- 0xc0016900,
- 0x00000281,
+ 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
0x00000000,
+
0xc0016900,
+ 0x00000312,
+ 0xffffffff, /* PA_SC_AA_MASK */
+
+ 0xc0066900,
0x0000037e,
- 0x00000000,
- 0xc0016900,
- 0x00000382,
- 0x00000000,
- 0xc0016900,
- 0x00000380,
- 0x00000000,
- 0xc0016900,
- 0x00000383,
- 0x00000000,
- 0xc0016900,
- 0x00000381,
- 0x00000000,
- 0xc0016900,
- 0x00000282,
- 0x00000008,
- 0xc0016900,
- 0x00000302,
- 0x0000002d,
- 0xc0016900,
- 0x0000037f,
- 0x00000000,
- 0xc0016900,
- 0x000001b2,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
+ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
+ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
+ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
+ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
+
+ 0xc0046900,
0x000001b6,
- 0x00000000,
- 0xc0016900,
- 0x000001b7,
- 0x00000000,
- 0xc0016900,
- 0x000001b8,
- 0x00000000,
- 0xc0016900,
- 0x000001b9,
- 0x00000000,
+ 0x00000000, /* SPI_INPUT_Z */
+ 0x00000000, /* SPI_FOG_CNTL */
+ 0x00000000, /* SPI_FOG_FUNC_SCALE */
+ 0x00000000, /* SPI_FOG_FUNC_BIAS */
+
0xc0016900,
0x00000225,
- 0x00000000,
+ 0x00000000, /* SQ_PGM_START_FS */
+
0xc0016900,
0x00000229,
- 0x00000000,
+ 0x00000000, /* SQ_PGM_RESOURCES_FS */
+
0xc0016900,
0x00000237,
- 0x00000000,
- 0xc0016900,
- 0x00000100,
- 0x00000800,
- 0xc0016900,
- 0x00000101,
- 0x00000000,
- 0xc0016900,
- 0x00000102,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* SQ_PGM_CF_OFFSET_FS */
+
+ 0xc0026900,
0x000002a8,
- 0x00000000,
- 0xc0016900,
- 0x000002a9,
- 0x00000000,
- 0xc0016900,
- 0x00000103,
- 0x00000000,
- 0xc0016900,
- 0x00000284,
- 0x00000000,
- 0xc0016900,
- 0x00000290,
- 0x00000000,
- 0xc0016900,
- 0x00000285,
- 0x00000000,
- 0xc0016900,
- 0x00000286,
- 0x00000000,
- 0xc0016900,
- 0x00000287,
- 0x00000000,
- 0xc0016900,
- 0x00000288,
- 0x00000000,
- 0xc0016900,
- 0x00000289,
- 0x00000000,
- 0xc0016900,
- 0x0000028a,
- 0x00000000,
- 0xc0016900,
- 0x0000028b,
- 0x00000000,
- 0xc0016900,
- 0x0000028c,
- 0x00000000,
- 0xc0016900,
- 0x0000028d,
- 0x00000000,
- 0xc0016900,
- 0x0000028e,
- 0x00000000,
- 0xc0016900,
- 0x0000028f,
- 0x00000000,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
+ 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
+ 0x00000000, /* VGT_HOS_REUSE_DEPTH */
+ 0x00000000, /* VGT_GROUP_PRIM_TYPE */
+ 0x00000000, /* VGT_GROUP_FIRST_DECR */
+ 0x00000000, /* VGT_GROUP_DECR */
+ 0x00000000, /* VGT_GROUP_VECT_0_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_1_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
+ 0x00000000, /* VGT_GS_MODE */
+
0xc0016900,
0x000002a1,
- 0x00000000,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
0xc0016900,
0x000002a5,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
+
+ 0xc0036900,
0x000002ac,
- 0x00000000,
- 0xc0016900,
- 0x000002ad,
- 0x00000000,
- 0xc0016900,
- 0x000002ae,
- 0x00000000,
+ 0x00000000, /* VGT_STRMOUT_EN */
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000, /* VGT_VTX_CNT_EN */
+
0xc0016900,
0x000002c8,
- 0x00000000,
- 0xc0016900,
- 0x00000206,
- 0x00000100,
- 0xc0016900,
- 0x00000204,
- 0x00010000,
- 0xc0036e00,
- 0x00000000,
- 0x00000012,
- 0x00000000,
- 0x00000000,
- 0xc0016900,
- 0x0000008f,
- 0x0000000f,
- 0xc0016900,
- 0x000001e8,
- 0x00000001,
- 0xc0016900,
+ 0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+ 0xc0076900,
0x00000202,
- 0x00cc0000,
+ 0x00cc0000, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CNTL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000244, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+
+ 0xc0026900,
+ 0x0000008e,
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
0xc0016900,
- 0x00000205,
- 0x00000244,
+ 0x000001e8,
+ 0x00000001, /* CB_SHADER_CONTROL */
+
0xc0016900,
- 0x00000203,
- 0x00000210,
+ 0x00000185,
+ 0x00000000, /* SPI_VS_OUT_ID_0 */
+
0xc0016900,
+ 0x00000191,
+ 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
+
+ 0xc0056900,
0x000001b1,
+ 0x00000000, /* SPI_VS_OUT_CONFIG */
+ 0x00000000, /* SPI_THREAD_GROUPING */
+ 0x00000001, /* SPI_PS_IN_CONTROL_0 */
+ 0x00000000, /* SPI_PS_IN_CONTROL_1 */
+ 0x00000000, /* SPI_INTERP_CONTROL_0 */
+
+ 0xc0036e00, /* SET_SAMPLER */
0x00000000,
- 0xc0016900,
- 0x00000185,
- 0x00000000,
- 0xc0016900,
- 0x000001b3,
- 0x00000001,
- 0xc0016900,
- 0x000001b4,
+ 0x00000012,
0x00000000,
- 0xc0016900,
- 0x00000191,
- 0x00000b00,
- 0xc0016900,
- 0x000001b5,
0x00000000,
};
const u32 r7xx_default_state[] =
{
- 0xc0012800,
+ 0xc0012800, /* CONTEXT_CONTROL */
0x80000000,
0x80000000,
+
0xc0016800,
0x00000010,
- 0x00008000,
+ 0x00008000, /* WAIT_UNTIL */
+
0xc0016800,
0x00000542,
- 0x07000002,
+ 0x07000002, /* TA_CNTL_AUX */
+
0xc0016800,
0x000005c5,
- 0x00000000,
+ 0x00000000, /* VC_ENHANCE */
+
0xc0016800,
0x00000363,
- 0x00004000,
+ 0x00004000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
+
0xc0016800,
0x0000060c,
- 0x00000000,
+ 0x00000000, /* DB_DEBUG */
+
0xc0016800,
0x0000060e,
- 0x00420204,
- 0xc0016f00,
- 0x00000000,
- 0x00000000,
- 0xc0016f00,
- 0x00000001,
+ 0x00420204, /* DB_WATERMARKS */
+
+ 0xc0026f00,
0x00000000,
+ 0x00000000, /* SQ_VTX_BASE_VTX_LOC */
+ 0x00000000, /* SQ_VTX_START_INST_LOC */
+
0xc0096900,
0x0000022a,
+ 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
0x00000000,
0x00000000,
0x00000000,
@@ -595,470 +405,269 @@ const u32 r7xx_default_state[] =
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
+
0xc0016900,
0x00000004,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* DB_DEPTH_INFO */
+
+ 0xc0026900,
0x0000000a,
- 0x00000000,
- 0xc0016900,
- 0x0000000b,
- 0x00000000,
- 0xc0016900,
- 0x0000010c,
- 0x00000000,
- 0xc0016900,
- 0x0000010d,
- 0x00000000,
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
0xc0016900,
0x00000200,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+
+ 0xc0026900,
0x00000343,
- 0x00000060,
- 0xc0016900,
- 0x00000344,
- 0x00000000,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000000, /* DB_RENDER_OVERRIDE */
+
0xc0016900,
0x00000351,
- 0x0000aa00,
- 0xc0016900,
- 0x00000104,
- 0x00000000,
- 0xc0016900,
- 0x0000010e,
- 0x00000000,
- 0xc0046900,
- 0x00000105,
- 0x00000000,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc0096900,
+ 0x00000100,
+ 0x00000800, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* VGT_MIN_VTX_INDX */
+ 0x00000000, /* VGT_INDX_OFFSET */
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+ 0x00000000, /* SX_ALPHA_TEST_CONTROL */
+ 0x00000000, /* CB_BLEND_RED */
0x00000000,
0x00000000,
0x00000000,
+
+ 0xc0036900,
+ 0x0000010c,
+ 0x00000000, /* DB_STENCILREFMASK */
+ 0x00000000, /* DB_STENCILREFMASK_BF */
+ 0x00000000, /* SX_ALPHA_REF */
+
0xc0046900,
- 0x0000030c,
+ 0x0000030c, /* CB_CLRCMP_CNTL */
0x01000000,
0x00000000,
0x00000000,
0x00000000,
- 0xc0016900,
- 0x0000008e,
- 0x0000000f,
+
0xc0016900,
0x00000080,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00a6900,
0x00000083,
- 0x0000ffff,
- 0xc0016900,
- 0x00000084,
- 0x00000000,
- 0xc0016900,
- 0x00000085,
+ 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
0x20002000,
- 0xc0016900,
- 0x00000086,
0x00000000,
- 0xc0016900,
- 0x00000087,
0x20002000,
- 0xc0016900,
- 0x00000088,
0x00000000,
- 0xc0016900,
- 0x00000089,
0x20002000,
- 0xc0016900,
- 0x0000008a,
0x00000000,
- 0xc0016900,
- 0x0000008b,
0x20002000,
- 0xc0016900,
- 0x0000008c,
- 0xaaaaaaaa,
- 0xc0016900,
+ 0xaaaaaaaa, /* PA_SC_EDGERULE */
+
+ 0xc0406900,
0x00000094,
- 0x80000000,
- 0xc0016900,
- 0x00000095,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
0x20002000,
- 0xc0026900,
- 0x000000b4,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x00000096,
0x80000000,
- 0xc0016900,
- 0x00000097,
0x20002000,
- 0xc0026900,
- 0x000000b6,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x00000098,
0x80000000,
- 0xc0016900,
- 0x00000099,
0x20002000,
- 0xc0026900,
- 0x000000b8,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x0000009a,
0x80000000,
- 0xc0016900,
- 0x0000009b,
0x20002000,
- 0xc0026900,
- 0x000000ba,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x0000009c,
0x80000000,
- 0xc0016900,
- 0x0000009d,
0x20002000,
- 0xc0026900,
- 0x000000bc,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x0000009e,
0x80000000,
- 0xc0016900,
- 0x0000009f,
0x20002000,
- 0xc0026900,
- 0x000000be,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a0,
0x80000000,
- 0xc0016900,
- 0x000000a1,
0x20002000,
- 0xc0026900,
- 0x000000c0,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a2,
0x80000000,
- 0xc0016900,
- 0x000000a3,
0x20002000,
- 0xc0026900,
- 0x000000c2,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a4,
0x80000000,
- 0xc0016900,
- 0x000000a5,
0x20002000,
- 0xc0026900,
- 0x000000c4,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a6,
0x80000000,
- 0xc0016900,
- 0x000000a7,
0x20002000,
- 0xc0026900,
- 0x000000c6,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a8,
0x80000000,
- 0xc0016900,
- 0x000000a9,
0x20002000,
- 0xc0026900,
- 0x000000c8,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000aa,
0x80000000,
- 0xc0016900,
- 0x000000ab,
0x20002000,
- 0xc0026900,
- 0x000000ca,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000ac,
0x80000000,
- 0xc0016900,
- 0x000000ad,
0x20002000,
- 0xc0026900,
- 0x000000cc,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000ae,
0x80000000,
- 0xc0016900,
- 0x000000af,
0x20002000,
- 0xc0026900,
- 0x000000ce,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000b0,
0x80000000,
- 0xc0016900,
- 0x000000b1,
0x20002000,
- 0xc0026900,
- 0x000000d0,
- 0x00000000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
0x3f800000,
- 0xc0016900,
- 0x000000b2,
- 0x80000000,
- 0xc0016900,
- 0x000000b3,
- 0x20002000,
- 0xc0026900,
- 0x000000d2,
0x00000000,
0x3f800000,
- 0xc0016900,
- 0x00000293,
- 0x00514000,
- 0xc0016900,
- 0x00000300,
- 0x00000000,
- 0xc0016900,
- 0x00000301,
0x00000000,
- 0xc0016900,
- 0x00000312,
- 0xffffffff,
- 0xc0016900,
- 0x00000307,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000308,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000283,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000292,
+ 0x3f800000,
0x00000000,
- 0xc0066900,
- 0x0000010f,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000206,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000207,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000208,
+ 0x3f800000,
0x00000000,
- 0xc0046900,
- 0x00000303,
0x3f800000,
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MPASS_PS_CNTL */
+ 0x00514000, /* PA_SC_MODE_CNTL */
+
+ 0xc0096900,
+ 0x00000300,
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x0000002d, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
0x3f800000,
0x3f800000,
0x3f800000,
- 0xc0016900,
- 0x00000205,
- 0x00000004,
- 0xc0016900,
- 0x00000280,
- 0x00000000,
- 0xc0016900,
- 0x00000281,
+ 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
0x00000000,
+
0xc0016900,
+ 0x00000312,
+ 0xffffffff, /* PA_SC_AA_MASK */
+
+ 0xc0066900,
0x0000037e,
- 0x00000000,
- 0xc0016900,
- 0x00000382,
- 0x00000000,
- 0xc0016900,
- 0x00000380,
- 0x00000000,
- 0xc0016900,
- 0x00000383,
- 0x00000000,
- 0xc0016900,
- 0x00000381,
- 0x00000000,
- 0xc0016900,
- 0x00000282,
- 0x00000008,
- 0xc0016900,
- 0x00000302,
- 0x0000002d,
- 0xc0016900,
- 0x0000037f,
- 0x00000000,
- 0xc0016900,
- 0x000001b2,
- 0x00000001,
- 0xc0016900,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
+ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
+ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
+ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
+ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
+
+ 0xc0046900,
0x000001b6,
- 0x00000000,
- 0xc0016900,
- 0x000001b7,
- 0x00000000,
- 0xc0016900,
- 0x000001b8,
- 0x00000000,
- 0xc0016900,
- 0x000001b9,
- 0x00000000,
+ 0x00000000, /* SPI_INPUT_Z */
+ 0x00000000, /* SPI_FOG_CNTL */
+ 0x00000000, /* SPI_FOG_FUNC_SCALE */
+ 0x00000000, /* SPI_FOG_FUNC_BIAS */
+
0xc0016900,
0x00000225,
- 0x00000000,
+ 0x00000000, /* SQ_PGM_START_FS */
+
0xc0016900,
0x00000229,
- 0x00000000,
+ 0x00000000, /* SQ_PGM_RESOURCES_FS */
+
0xc0016900,
0x00000237,
- 0x00000000,
- 0xc0016900,
- 0x00000100,
- 0x00000800,
- 0xc0016900,
- 0x00000101,
- 0x00000000,
- 0xc0016900,
- 0x00000102,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* SQ_PGM_CF_OFFSET_FS */
+
+ 0xc0026900,
0x000002a8,
- 0x00000000,
- 0xc0016900,
- 0x000002a9,
- 0x00000000,
- 0xc0016900,
- 0x00000103,
- 0x00000000,
- 0xc0016900,
- 0x00000284,
- 0x00000000,
- 0xc0016900,
- 0x00000290,
- 0x00000000,
- 0xc0016900,
- 0x00000285,
- 0x00000000,
- 0xc0016900,
- 0x00000286,
- 0x00000000,
- 0xc0016900,
- 0x00000287,
- 0x00000000,
- 0xc0016900,
- 0x00000288,
- 0x00000000,
- 0xc0016900,
- 0x00000289,
- 0x00000000,
- 0xc0016900,
- 0x0000028a,
- 0x00000000,
- 0xc0016900,
- 0x0000028b,
- 0x00000000,
- 0xc0016900,
- 0x0000028c,
- 0x00000000,
- 0xc0016900,
- 0x0000028d,
- 0x00000000,
- 0xc0016900,
- 0x0000028e,
- 0x00000000,
- 0xc0016900,
- 0x0000028f,
- 0x00000000,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
+ 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
+ 0x00000000, /* VGT_HOS_REUSE_DEPTH */
+ 0x00000000, /* VGT_GROUP_PRIM_TYPE */
+ 0x00000000, /* VGT_GROUP_FIRST_DECR */
+ 0x00000000, /* VGT_GROUP_DECR */
+ 0x00000000, /* VGT_GROUP_VECT_0_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_1_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
+ 0x00000000, /* VGT_GS_MODE */
+
0xc0016900,
0x000002a1,
- 0x00000000,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
0xc0016900,
0x000002a5,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
+
+ 0xc0036900,
0x000002ac,
- 0x00000000,
- 0xc0016900,
- 0x000002ad,
- 0x00000000,
- 0xc0016900,
- 0x000002ae,
- 0x00000000,
+ 0x00000000, /* VGT_STRMOUT_EN */
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000, /* VGT_VTX_CNT_EN */
+
0xc0016900,
0x000002c8,
- 0x00000000,
- 0xc0016900,
- 0x00000206,
- 0x00000100,
- 0xc0016900,
- 0x00000204,
- 0x00010000,
- 0xc0036e00,
- 0x00000000,
- 0x00000012,
- 0x00000000,
- 0x00000000,
- 0xc0016900,
- 0x0000008f,
- 0x0000000f,
- 0xc0016900,
- 0x000001e8,
- 0x00000001,
- 0xc0016900,
+ 0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+ 0xc0076900,
0x00000202,
- 0x00cc0000,
+ 0x00cc0000, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CNTL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000244, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+
+ 0xc0026900,
+ 0x0000008e,
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
0xc0016900,
- 0x00000205,
- 0x00000244,
+ 0x000001e8,
+ 0x00000001, /* CB_SHADER_CONTROL */
+
0xc0016900,
- 0x00000203,
- 0x00000210,
+ 0x00000185,
+ 0x00000000, /* SPI_VS_OUT_ID_0 */
+
0xc0016900,
+ 0x00000191,
+ 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
+
+ 0xc0056900,
0x000001b1,
+ 0x00000000, /* SPI_VS_OUT_CONFIG */
+ 0x00000001, /* SPI_THREAD_GROUPING */
+ 0x00000001, /* SPI_PS_IN_CONTROL_0 */
+ 0x00000000, /* SPI_PS_IN_CONTROL_1 */
+ 0x00000000, /* SPI_INTERP_CONTROL_0 */
+
+ 0xc0036e00, /* SET_SAMPLER */
0x00000000,
- 0xc0016900,
- 0x00000185,
- 0x00000000,
- 0xc0016900,
- 0x000001b3,
- 0x00000001,
- 0xc0016900,
- 0x000001b4,
+ 0x00000012,
0x00000000,
- 0xc0016900,
- 0x00000191,
- 0x00000b00,
- 0xc0016900,
- 0x000001b5,
0x00000000,
};
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index fdc3b378cbb..f437d36dd98 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -1,3 +1,27 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
#ifndef R600_BLIT_SHADERS_H
#define R600_BLIT_SHADERS_H
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 68e6f434930..4f4cd8b286d 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -200,7 +200,7 @@ int r600_page_table_init(struct drm_device *dev)
entry->pagelist[i], 0,
PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
- if (entry->busaddr[i] == 0) {
+ if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
DRM_ERROR("unable to map PCIGART pages!\n");
r600_page_table_cleanup(dev, gart_info);
goto done;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 144c32d3713..250a3a91819 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/kernel.h>
#include "drmP.h"
#include "radeon.h"
#include "r600d.h"
@@ -132,6 +133,7 @@ static inline int r600_bpe_from_format(u32 *bpe, u32 format)
case V_038004_FMT_GB_GR:
case V_038004_FMT_BG_RG:
case V_038004_COLOR_INVALID:
+ default:
*bpe = 16;
return -EINVAL;
}
@@ -166,70 +168,71 @@ static void r600_cs_track_init(struct r600_cs_track *track)
static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
struct r600_cs_track *track = p->track;
- u32 bpe = 0, pitch, slice_tile_max, size, tmp, height;
+ u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align;
volatile u32 *ib = p->ib->ptr;
if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
return -EINVAL;
}
- size = radeon_bo_size(track->cb_color_bo[i]);
+ size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
__func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
i, track->cb_color_info[i]);
return -EINVAL;
}
- pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) << 3;
+ /* pitch is the number of 8x8 tiles per row */
+ pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1;
slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
- if (!pitch) {
- dev_warn(p->dev, "%s:%d cb pitch (%d) for %d invalid (0x%08X)\n",
- __func__, __LINE__, pitch, i, track->cb_color_size[i]);
- return -EINVAL;
- }
- height = size / (pitch * bpe);
+ height = size / (pitch * 8 * bpe);
if (height > 8192)
height = 8192;
+ if (height > 7)
+ height &= ~0x7;
switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
case V_0280A0_ARRAY_LINEAR_GENERAL:
+ /* technically height & 0x7 */
+ break;
case V_0280A0_ARRAY_LINEAR_ALIGNED:
- if (pitch & 0x3f) {
- dev_warn(p->dev, "%s:%d cb pitch (%d x %d = %d) invalid\n",
- __func__, __LINE__, pitch, bpe, pitch * bpe);
+ pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
return -EINVAL;
}
- if ((pitch * bpe) & (track->group_size - 1)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
+ if (!IS_ALIGNED(height, 8)) {
+ dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
+ __func__, __LINE__, height);
return -EINVAL;
}
break;
case V_0280A0_ARRAY_1D_TILED_THIN1:
- if ((pitch * 8 * bpe * track->nsamples) & (track->group_size - 1)) {
+ pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8;
+ if (!IS_ALIGNED(pitch, pitch_align)) {
dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, 8)) {
+ dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
+ __func__, __LINE__, height);
return -EINVAL;
}
- height &= ~0x7;
- if (!height)
- height = 8;
break;
case V_0280A0_ARRAY_2D_TILED_THIN1:
- if (pitch & ((8 * track->nbanks) - 1)) {
+ pitch_align = max((u32)track->nbanks,
+ (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks));
+ if (!IS_ALIGNED(pitch, pitch_align)) {
dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
__func__, __LINE__, pitch);
return -EINVAL;
}
- tmp = pitch * 8 * bpe * track->nsamples;
- tmp = tmp / track->nbanks;
- if (tmp & (track->group_size - 1)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
+ if (!IS_ALIGNED((height / 8), track->nbanks)) {
+ dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
+ __func__, __LINE__, height);
return -EINVAL;
}
- height &= ~((16 * track->npipes) - 1);
- if (!height)
- height = 16 * track->npipes;
break;
default:
dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
@@ -238,16 +241,20 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
return -EINVAL;
}
/* check offset */
- tmp = height * pitch;
+ tmp = height * pitch * 8 * bpe;
if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
- dev_warn(p->dev, "%s offset[%d] %d to big\n", __func__, i, track->cb_color_bo_offset[i]);
+ dev_warn(p->dev, "%s offset[%d] %d too big\n", __func__, i, track->cb_color_bo_offset[i]);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) {
+ dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]);
return -EINVAL;
}
/* limit max tile */
- tmp = (height * pitch) >> 6;
+ tmp = (height * pitch * 8) >> 6;
if (tmp < slice_tile_max)
slice_tile_max = tmp;
- tmp = S_028060_PITCH_TILE_MAX((pitch >> 3) - 1) |
+ tmp = S_028060_PITCH_TILE_MAX(pitch - 1) |
S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
ib[track->cb_color_size_idx[i]] = tmp;
return 0;
@@ -289,7 +296,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
/* Check depth buffer */
if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
G_028800_Z_ENABLE(track->db_depth_control)) {
- u32 nviews, bpe, ntiles;
+ u32 nviews, bpe, ntiles, pitch, pitch_align, height, size;
if (track->db_bo == NULL) {
dev_warn(p->dev, "z/stencil with no depth buffer\n");
return -EINVAL;
@@ -321,7 +328,6 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
dev_warn(p->dev, "z/stencil buffer size not set\n");
return -EINVAL;
}
- printk_once(KERN_WARNING "You have old & broken userspace please consider updating mesa\n");
tmp = radeon_bo_size(track->db_bo) - track->db_offset;
tmp = (tmp / bpe) >> 6;
if (!tmp) {
@@ -332,6 +338,51 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
} else {
+ size = radeon_bo_size(track->db_bo);
+ pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1;
+ height = size / (pitch * 8 * bpe);
+ height &= ~0x7;
+ if (!height)
+ height = 8;
+
+ switch (G_028010_ARRAY_MODE(track->db_depth_info)) {
+ case V_028010_ARRAY_1D_TILED_THIN1:
+ pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8);
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, 8)) {
+ dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
+ __func__, __LINE__, height);
+ return -EINVAL;
+ }
+ break;
+ case V_028010_ARRAY_2D_TILED_THIN1:
+ pitch_align = max((u32)track->nbanks,
+ (u32)(((track->group_size / 8) / bpe) * track->nbanks));
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ if ((height / 8) & (track->nbanks - 1)) {
+ dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
+ __func__, __LINE__, height);
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(track->db_offset, track->group_size)) {
+ dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset);
+ return -EINVAL;
+ }
ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
tmp = ntiles * bpe * 64 * nviews;
@@ -724,7 +775,25 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
track->db_depth_control = radeon_get_ib_value(p, idx);
break;
case R_028010_DB_DEPTH_INFO:
- track->db_depth_info = radeon_get_ib_value(p, idx);
+ if (r600_cs_packet_next_is_pkt3_nop(p)) {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_depth_info = radeon_get_ib_value(p, idx);
+ ib[idx] &= C_028010_ARRAY_MODE;
+ track->db_depth_info &= C_028010_ARRAY_MODE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
+ track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
+ } else {
+ ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+ track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+ }
+ } else
+ track->db_depth_info = radeon_get_ib_value(p, idx);
break;
case R_028004_DB_DEPTH_VIEW:
track->db_depth_view = radeon_get_ib_value(p, idx);
@@ -757,8 +826,25 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
case R_0280B4_CB_COLOR5_INFO:
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
- tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
- track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ if (r600_cs_packet_next_is_pkt3_nop(p)) {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
+ track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
+ track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
+ }
+ } else {
+ tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ }
break;
case R_028060_CB_COLOR0_SIZE:
case R_028064_CB_COLOR1_SIZE:
@@ -796,8 +882,6 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
return -EINVAL;
}
ib[idx] = track->cb_color_base_last[tmp];
- printk_once(KERN_WARNING "You have old & broken userspace "
- "please consider updating mesa & xf86-video-ati\n");
track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
} else {
r = r600_cs_packet_next_reloc(p, &reloc);
@@ -824,8 +908,6 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
return -EINVAL;
}
ib[idx] = track->cb_color_base_last[tmp];
- printk_once(KERN_WARNING "You have old & broken userspace "
- "please consider updating mesa & xf86-video-ati\n");
track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
} else {
r = r600_cs_packet_next_reloc(p, &reloc);
@@ -852,7 +934,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
return -EINVAL;
}
tmp = (reg - CB_COLOR0_BASE) / 4;
- track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
@@ -864,7 +946,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
"0x%04X\n", reg);
return -EINVAL;
}
- track->db_offset = radeon_get_ib_value(p, idx);
+ track->db_offset = radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_bo = reloc->robj;
break;
@@ -946,8 +1028,9 @@ static inline unsigned minify(unsigned size, unsigned levels)
}
static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
- unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
- unsigned *l0_size, unsigned *mipmap_size)
+ unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
+ unsigned pitch_align,
+ unsigned *l0_size, unsigned *mipmap_size)
{
unsigned offset, i, level, face;
unsigned width, height, depth, rowstride, size;
@@ -960,18 +1043,18 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels
height = minify(h0, i);
depth = minify(d0, i);
for(face = 0; face < nfaces; face++) {
- rowstride = ((width * bpe) + 255) & ~255;
+ rowstride = ALIGN((width * bpe), pitch_align);
size = height * rowstride * depth;
offset += size;
offset = (offset + 0x1f) & ~0x1f;
}
}
- *l0_size = (((w0 * bpe) + 255) & ~255) * h0 * d0;
+ *l0_size = ALIGN((w0 * bpe), pitch_align) * h0 * d0;
*mipmap_size = offset;
- if (!blevel)
- *mipmap_size -= *l0_size;
if (!nlevels)
*mipmap_size = *l0_size;
+ if (!blevel)
+ *mipmap_size -= *l0_size;
}
/**
@@ -985,16 +1068,23 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels
* the texture and mipmap bo object are big enough to cover this resource.
*/
static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
- struct radeon_bo *texture,
- struct radeon_bo *mipmap)
+ struct radeon_bo *texture,
+ struct radeon_bo *mipmap,
+ u32 tiling_flags)
{
+ struct r600_cs_track *track = p->track;
u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
- u32 word0, word1, l0_size, mipmap_size;
+ u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align;
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
return 0;
+
word0 = radeon_get_ib_value(p, idx + 0);
+ if (tiling_flags & RADEON_TILING_MACRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
word1 = radeon_get_ib_value(p, idx + 1);
w0 = G_038000_TEX_WIDTH(word0) + 1;
h0 = G_038004_TEX_HEIGHT(word1) + 1;
@@ -1021,24 +1111,67 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
__func__, __LINE__, G_038004_DATA_FORMAT(word1));
return -EINVAL;
}
+
+ pitch = G_038000_PITCH(word0) + 1;
+ switch (G_038000_TILE_MODE(word0)) {
+ case V_038000_ARRAY_LINEAR_GENERAL:
+ pitch_align = 1;
+ /* XXX check height align */
+ break;
+ case V_038000_ARRAY_LINEAR_ALIGNED:
+ pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ /* XXX check height align */
+ break;
+ case V_038000_ARRAY_1D_TILED_THIN1:
+ pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8;
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ /* XXX check height align */
+ break;
+ case V_038000_ARRAY_2D_TILED_THIN1:
+ pitch_align = max((u32)track->nbanks,
+ (u32)(((track->group_size / 8) / bpe) * track->nbanks));
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ /* XXX check height align */
+ break;
+ default:
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_038000_TILE_MODE(word0), word0);
+ return -EINVAL;
+ }
+ /* XXX check offset align */
+
word0 = radeon_get_ib_value(p, idx + 4);
word1 = radeon_get_ib_value(p, idx + 5);
blevel = G_038010_BASE_LEVEL(word0);
nlevels = G_038014_LAST_LEVEL(word1);
- r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, &l0_size, &mipmap_size);
+ r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe,
+ (pitch_align * bpe),
+ &l0_size, &mipmap_size);
/* using get ib will give us the offset into the texture bo */
- word0 = radeon_get_ib_value(p, idx + 2);
+ word0 = radeon_get_ib_value(p, idx + 2) << 8;
if ((l0_size + word0) > radeon_bo_size(texture)) {
dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
return -EINVAL;
}
/* using get ib will give us the offset into the mipmap bo */
- word0 = radeon_get_ib_value(p, idx + 3);
+ word0 = radeon_get_ib_value(p, idx + 3) << 8;
if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
- dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
- w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
- return -EINVAL;
+ /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+ w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
}
return 0;
}
@@ -1228,7 +1361,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
for (i = 0; i < (pkt->count / 7); i++) {
struct radeon_bo *texture, *mipmap;
- u32 size, offset;
+ u32 size, offset, base_offset, mip_offset;
switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
case SQ_TEX_VTX_VALID_TEXTURE:
@@ -1238,7 +1371,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
}
- ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
texture = reloc->robj;
/* tex mip base */
r = r600_cs_packet_next_reloc(p, &reloc);
@@ -1246,12 +1383,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
}
- ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
mipmap = reloc->robj;
r = r600_check_texture_resource(p, idx+(i*7)+1,
- texture, mipmap);
+ texture, mipmap, reloc->lobj.tiling_flags);
if (r)
return r;
+ ib[idx+1+(i*7)+2] += base_offset;
+ ib[idx+1+(i*7)+3] += mip_offset;
break;
case SQ_TEX_VTX_VALID_BUFFER:
/* vtx base */
@@ -1261,10 +1400,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
- size = radeon_get_ib_value(p, idx+1+(i*7)+1);
+ size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
/* force size to size of the buffer */
- dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+ dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
+ size + offset, radeon_bo_size(reloc->robj));
ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
}
ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 26b4bc9d89a..e6a58ed48dc 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -435,7 +435,8 @@ static int r600_hdmi_find_free_block(struct drm_device *dev)
}
}
- if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690) {
+ if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 ||
+ rdev->family == CHIP_RS740) {
return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
} else if (rdev->family >= CHIP_R600) {
if (free_blocks[0])
@@ -466,7 +467,8 @@ static void r600_hdmi_assign_block(struct drm_encoder *encoder)
if (ASIC_IS_DCE32(rdev))
radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
- } else if (rdev->family >= CHIP_R600) {
+ } else if (rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 ||
+ rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
}
}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 59c1f8793e6..858a1920c0d 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -239,12 +239,18 @@
#define GRBM_SOFT_RESET 0x8020
#define SOFT_RESET_CP (1<<0)
+#define CG_THERMAL_STATUS 0x7F4
+#define ASIC_T(x) ((x) << 0)
+#define ASIC_T_MASK 0x1FF
+#define ASIC_T_SHIFT 0
+
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
#define HDP_TILING_CONFIG 0x2F3C
+#define HDP_DEBUG1 0x2F34
#define MC_VM_AGP_TOP 0x2184
#define MC_VM_AGP_BOT 0x2188
@@ -1154,6 +1160,10 @@
#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3)
#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF)
#define C_038000_TILE_MODE 0xFFFFFF87
+#define V_038000_ARRAY_LINEAR_GENERAL 0x00000000
+#define V_038000_ARRAY_LINEAR_ALIGNED 0x00000001
+#define V_038000_ARRAY_1D_TILED_THIN1 0x00000002
+#define V_038000_ARRAY_2D_TILED_THIN1 0x00000004
#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7)
#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1)
#define C_038000_TILE_TYPE 0xFFFFFF7F
@@ -1357,6 +1367,8 @@
#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15)
#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF)
#define C_028010_ARRAY_MODE 0xFFF87FFF
+#define V_028010_ARRAY_1D_TILED_THIN1 0x00000002
+#define V_028010_ARRAY_2D_TILED_THIN1 0x00000004
#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25)
#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1)
#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2f94dc66c18..a168d644bf9 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -178,6 +178,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
void rs690_pm_info(struct radeon_device *rdev);
+extern u32 rv6xx_get_temp(struct radeon_device *rdev);
+extern u32 rv770_get_temp(struct radeon_device *rdev);
+extern u32 evergreen_get_temp(struct radeon_device *rdev);
/*
* Fences.
@@ -232,7 +235,7 @@ struct radeon_surface_reg {
*/
struct radeon_mman {
struct ttm_bo_global_ref bo_global_ref;
- struct ttm_global_reference mem_global_ref;
+ struct drm_global_reference mem_global_ref;
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
@@ -671,6 +674,13 @@ struct radeon_pm_profile {
int dpms_on_cm_idx;
};
+enum radeon_int_thermal_type {
+ THERMAL_TYPE_NONE,
+ THERMAL_TYPE_RV6XX,
+ THERMAL_TYPE_RV770,
+ THERMAL_TYPE_EVERGREEN,
+};
+
struct radeon_voltage {
enum radeon_voltage_type type;
/* gpio voltage */
@@ -766,6 +776,9 @@ struct radeon_pm {
enum radeon_pm_profile_type profile;
int profile_index;
struct radeon_pm_profile profiles[PM_PROFILE_MAX];
+ /* internal thermal controller on rv6xx+ */
+ enum radeon_int_thermal_type int_thermal_type;
+ struct device *int_hwmon_dev;
};
@@ -902,6 +915,7 @@ struct r600_asic {
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
+ unsigned tile_config;
struct r100_gpu_lockup lockup;
};
@@ -926,6 +940,7 @@ struct rv770_asic {
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
+ unsigned tile_config;
struct r100_gpu_lockup lockup;
};
@@ -951,6 +966,7 @@ struct evergreen_asic {
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
+ unsigned tile_config;
};
union radeon_asic_config {
@@ -997,6 +1013,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+/* VRAM scratch page for HDP bug */
+struct r700_vram_scratch {
+ struct radeon_bo *robj;
+ volatile uint32_t *ptr;
+};
/*
* Core structure, functions and helpers.
@@ -1033,6 +1054,9 @@ struct radeon_device {
uint32_t pcie_reg_mask;
radeon_rreg_t pciep_rreg;
radeon_wreg_t pciep_wreg;
+ /* io port */
+ void __iomem *rio_mem;
+ resource_size_t rio_mem_size;
struct radeon_clock clock;
struct radeon_mc mc;
struct radeon_gart gart;
@@ -1060,6 +1084,7 @@ struct radeon_device {
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
struct r600_blit r600_blit;
+ struct r700_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
struct workqueue_struct *wq;
@@ -1069,6 +1094,7 @@ struct radeon_device {
struct mutex vram_mutex;
/* audio stuff */
+ bool audio_enabled;
struct timer_list audio_timer;
int audio_channels;
int audio_rate;
@@ -1078,6 +1104,10 @@ struct radeon_device {
bool powered_down;
struct notifier_block acpi_nb;
+ /* only one userspace can use Hyperz features at a time */
+ struct drm_file *hyperz_filp;
+ /* i2c buses */
+ struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1114,6 +1144,26 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
}
}
+static inline u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
+{
+ if (reg < rdev->rio_mem_size)
+ return ioread32(rdev->rio_mem + reg);
+ else {
+ iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+ return ioread32(rdev->rio_mem + RADEON_MM_DATA);
+ }
+}
+
+static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+ if (reg < rdev->rio_mem_size)
+ iowrite32(v, rdev->rio_mem + reg);
+ else {
+ iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+ iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
+ }
+}
+
/*
* Cast helper
*/
@@ -1152,6 +1202,8 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
WREG32_PLL(reg, tmp_); \
} while (0)
#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
+#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
+#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
/*
* Indirect registers accessor
@@ -1287,8 +1339,6 @@ extern bool radeon_card_posted(struct radeon_device *rdev);
extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
extern void radeon_update_display_priority(struct radeon_device *rdev);
extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
-extern int radeon_clocks_init(struct radeon_device *rdev);
-extern void radeon_clocks_fini(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev);
extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
@@ -1415,6 +1465,13 @@ extern void r700_cp_fini(struct radeon_device *rdev);
extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
extern int evergreen_irq_set(struct radeon_device *rdev);
+/* radeon_acpi.c */
+#if defined(CONFIG_ACPI)
+extern int radeon_acpi_init(struct radeon_device *rdev);
+#else
+static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
+#endif
+
/* evergreen */
struct evergreen_mc_save {
u32 vga_control[6];
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
new file mode 100644
index 00000000000..3f6636bb2d7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -0,0 +1,67 @@
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "drm_crtc_helper.h"
+#include "radeon.h"
+
+#include <linux/vga_switcheroo.h>
+
+/* Call the ATIF method
+ *
+ * Note: currently we discard the output
+ */
+static int radeon_atif_call(acpi_handle handle)
+{
+ acpi_status status;
+ union acpi_object atif_arg_elements[2];
+ struct acpi_object_list atif_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+ atif_arg.count = 2;
+ atif_arg.pointer = &atif_arg_elements[0];
+
+ atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atif_arg_elements[0].integer.value = 0;
+ atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atif_arg_elements[1].integer.value = 0;
+
+ status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+
+ /* Fail only if calling the method fails and ATIF is supported */
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
+ kfree(buffer.pointer);
+ return 1;
+ }
+
+ kfree(buffer.pointer);
+ return 0;
+}
+
+/* Call all ACPI methods here */
+int radeon_acpi_init(struct radeon_device *rdev)
+{
+ acpi_handle handle;
+ int ret;
+
+ /* No need to proceed if we're sure that ATIF is not supported */
+ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
+ return 0;
+
+ /* Get the device handle */
+ handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+
+ /* Call the ATIF method */
+ ret = radeon_atif_call(handle);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index f40dfb77f9b..bd2f33e5c91 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -156,7 +156,13 @@ int radeon_agp_init(struct radeon_device *rdev)
}
mode.mode = info.mode;
- agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+ /* chips with the agp to pcie bridge don't have the AGP_STATUS register
+ * Just use the whatever mode the host sets up.
+ */
+ if (rdev->family <= CHIP_RV350)
+ agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+ else
+ agp_status = mode.mode;
is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
if (is_v3) {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 646f96f97c7..25e1dd19779 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -733,6 +733,7 @@ static struct radeon_asic evergreen_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
@@ -857,21 +858,3 @@ int radeon_asic_init(struct radeon_device *rdev)
return 0;
}
-/*
- * Wrapper around modesetting bits. Move to radeon_clocks.c?
- */
-int radeon_clocks_init(struct radeon_device *rdev)
-{
- int r;
-
- r = radeon_static_clocks_init(rdev->ddev);
- if (r) {
- return r;
- }
- DRM_INFO("Clocks initialized !\n");
- return 0;
-}
-
-void radeon_clocks_fini(struct radeon_device *rdev)
-{
-}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c0bbaa64157..a5aff755f0d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -113,6 +113,7 @@ void r100_wb_fini(struct radeon_device *rdev);
int r100_wb_init(struct radeon_device *rdev);
int r100_cp_reset(struct radeon_device *rdev);
void r100_vga_render_disable(struct radeon_device *rdev);
+void r100_restore_sanity(struct radeon_device *rdev);
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
struct radeon_bo *robj);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 10673ae59cf..68932ba7b8a 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -32,11 +32,11 @@
/* from radeon_encoder.c */
extern uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
- uint8_t dac);
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
extern void radeon_link_encoder_connector(struct drm_device *dev);
extern void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
/* from radeon_connector.c */
@@ -46,13 +46,14 @@ radeon_add_atom_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- bool linkb, uint32_t igp_lane_info,
+ uint32_t igp_lane_info,
uint16_t connector_object_id,
- struct radeon_hpd *hpd);
+ struct radeon_hpd *hpd,
+ struct radeon_router *router);
/* from radeon_legacy_encoder.c */
extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
union atom_supported_devices {
@@ -84,6 +85,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
+ /* some evergreen boards have bad data for this entry */
+ if (ASIC_IS_DCE4(rdev)) {
+ if ((i == 7) &&
+ (gpio->usClkMaskRegisterIndex == 0x1936) &&
+ (gpio->sucI2cId.ucAccess == 0)) {
+ gpio->sucI2cId.ucAccess = 0x97;
+ gpio->ucDataMaskShift = 8;
+ gpio->ucDataEnShift = 8;
+ gpio->ucDataY_Shift = 8;
+ gpio->ucDataA_Shift = 8;
+ }
+ }
+
if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -114,7 +128,8 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
i2c.i2c_id = gpio->sucI2cId.ucAccess;
- i2c.valid = true;
+ if (i2c.mask_clk_reg)
+ i2c.valid = true;
break;
}
}
@@ -123,6 +138,80 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
return i2c;
}
+void radeon_atombios_i2c_init(struct radeon_device *rdev)
+{
+ struct atom_context *ctx = rdev->mode_info.atom_context;
+ ATOM_GPIO_I2C_ASSIGMENT *gpio;
+ struct radeon_i2c_bus_rec i2c;
+ int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+ struct _ATOM_GPIO_I2C_INFO *i2c_info;
+ uint16_t data_offset, size;
+ int i, num_indices;
+ char stmp[32];
+
+ memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+
+ if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+ for (i = 0; i < num_indices; i++) {
+ gpio = &i2c_info->asGPIO_Info[i];
+ i2c.valid = false;
+
+ /* some evergreen boards have bad data for this entry */
+ if (ASIC_IS_DCE4(rdev)) {
+ if ((i == 7) &&
+ (gpio->usClkMaskRegisterIndex == 0x1936) &&
+ (gpio->sucI2cId.ucAccess == 0)) {
+ gpio->sucI2cId.ucAccess = 0x97;
+ gpio->ucDataMaskShift = 8;
+ gpio->ucDataEnShift = 8;
+ gpio->ucDataY_Shift = 8;
+ gpio->ucDataA_Shift = 8;
+ }
+ }
+
+ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+ i2c.hw_capable = true;
+ else
+ i2c.hw_capable = false;
+
+ if (gpio->sucI2cId.ucAccess == 0xa0)
+ i2c.mm_i2c = true;
+ else
+ i2c.mm_i2c = false;
+
+ i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+ if (i2c.mask_clk_reg) {
+ i2c.valid = true;
+ sprintf(stmp, "0x%x", i2c.i2c_id);
+ rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
+ }
+ }
+ }
+}
+
static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
u8 id)
{
@@ -164,6 +253,8 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
struct radeon_hpd hpd;
u32 reg;
+ memset(&hpd, 0, sizeof(struct radeon_hpd));
+
if (ASIC_IS_DCE4(rdev))
reg = EVERGREEN_DC_GPIO_HPD_A;
else
@@ -206,6 +297,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
uint16_t *line_mux,
struct radeon_hpd *hpd)
{
+ struct radeon_device *rdev = dev->dev_private;
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x791e) &&
@@ -225,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
+ /* MSI K9A2GM V2/V3 board has no HDMI or DVI */
+ if ((dev->pdev->device == 0x796e) &&
+ (dev->pdev->subsystem_vendor == 0x1462) &&
+ (dev->pdev->subsystem_device == 0x7302)) {
+ if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
+ (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+ return false;
+ }
+
/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
if ((dev->pdev->device == 0x7941) &&
(dev->pdev->subsystem_vendor == 0x147b) &&
@@ -308,13 +409,22 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
- /* Acer laptop reports DVI-D as DVI-I */
+ /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */
if ((dev->pdev->device == 0x95c4) &&
(dev->pdev->subsystem_vendor == 0x1025) &&
(dev->pdev->subsystem_device == 0x013c)) {
+ struct radeon_gpio_rec gpio;
+
if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
- (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
+ (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
+ gpio = radeon_lookup_gpio(rdev, 6);
+ *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
*connector_type = DRM_MODE_CONNECTOR_DVID;
+ } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
+ gpio = radeon_lookup_gpio(rdev, 7);
+ *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ }
}
/* XFX Pine Group device rv730 reports no VGA DDC lines
@@ -399,13 +509,14 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
u16 size, data_offset;
u8 frev, crev;
ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+ ATOM_OBJECT_TABLE *router_obj;
ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
ATOM_OBJECT_HEADER *obj_header;
- int i, j, path_size, device_support;
+ int i, j, k, path_size, device_support;
int connector_type;
u16 igp_lane_info, conn_id, connector_object_id;
- bool linkb;
struct radeon_i2c_bus_rec ddc_bus;
+ struct radeon_router router;
struct radeon_gpio_rec gpio;
struct radeon_hpd hpd;
@@ -415,6 +526,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
if (crev < 2)
return false;
+ router.valid = false;
+
obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
(ctx->bios + data_offset +
@@ -422,6 +535,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
(ctx->bios + data_offset +
le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+ router_obj = (ATOM_OBJECT_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usRouterObjectTableOffset));
device_support = le16_to_cpu(obj_header->usDeviceSupport);
path_size = 0;
@@ -431,7 +547,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
addr += path_size;
path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
path_size += le16_to_cpu(path->usSize);
- linkb = false;
+
if (device_support & le16_to_cpu(path->usDeviceTag)) {
uint8_t con_obj_id, con_obj_num, con_obj_type;
@@ -508,33 +624,83 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
continue;
- for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2);
- j++) {
- uint8_t enc_obj_id, enc_obj_num, enc_obj_type;
+ for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
+ uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
- enc_obj_id =
+ grph_obj_id =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- enc_obj_num =
+ grph_obj_num =
(le16_to_cpu(path->usGraphicObjIds[j]) &
ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- enc_obj_type =
+ grph_obj_type =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
- /* FIXME: add support for router objects */
- if (enc_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
- if (enc_obj_num == 2)
- linkb = true;
- else
- linkb = false;
+ if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
+ u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
radeon_add_atom_encoder(dev,
- enc_obj_id,
+ encoder_obj,
le16_to_cpu
(path->
usDeviceTag));
+ } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
+ router.valid = false;
+ for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
+ u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID);
+ if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
+ ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(router_obj->asObjects[k].usRecordOffset));
+ ATOM_I2C_RECORD *i2c_record;
+ ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+ ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
+ ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
+ (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
+ int enum_id;
+
+ router.router_id = router_obj_id;
+ for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst;
+ enum_id++) {
+ if (le16_to_cpu(path->usConnObjectId) ==
+ le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id]))
+ break;
+ }
+
+ while (record->ucRecordType > 0 &&
+ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+ switch (record->ucRecordType) {
+ case ATOM_I2C_RECORD_TYPE:
+ i2c_record =
+ (ATOM_I2C_RECORD *)
+ record;
+ i2c_config =
+ (ATOM_I2C_ID_CONFIG_ACCESS *)
+ &i2c_record->sucI2cId;
+ router.i2c_info =
+ radeon_lookup_i2c_gpio(rdev,
+ i2c_config->
+ ucAccess);
+ router.i2c_addr = i2c_record->ucI2CAddr >> 1;
+ break;
+ case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
+ ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
+ record;
+ router.valid = true;
+ router.mux_type = ddc_path->ucMuxType;
+ router.mux_control_pin = ddc_path->ucMuxControlPin;
+ router.mux_state = ddc_path->ucMuxState[enum_id];
+ break;
+ }
+ record = (ATOM_COMMON_RECORD_HEADER *)
+ ((char *)record + record->ucRecordSize);
+ }
+ }
+ }
}
}
@@ -612,9 +778,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
le16_to_cpu(path->
usDeviceTag),
connector_type, &ddc_bus,
- linkb, igp_lane_info,
+ igp_lane_info,
connector_object_id,
- &hpd);
+ &hpd,
+ &router);
}
}
@@ -691,6 +858,9 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
int i, j, max_device;
struct bios_connector *bios_connectors;
size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
+ struct radeon_router router;
+
+ router.valid = false;
bios_connectors = kzalloc(bc_size, GFP_KERNEL);
if (!bios_connectors)
@@ -723,7 +893,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
}
if (i == ATOM_DEVICE_CV_INDEX) {
- DRM_DEBUG("Skipping Component Video\n");
+ DRM_DEBUG_KMS("Skipping Component Video\n");
continue;
}
@@ -797,13 +967,13 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
radeon_add_atom_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
(1 << i),
dac),
(1 << i));
else
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
(1 << i),
dac),
(1 << i));
@@ -860,9 +1030,10 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
bios_connectors[i].
connector_type,
&bios_connectors[i].ddc_bus,
- false, 0,
+ 0,
connector_object_id,
- &bios_connectors[i].hpd);
+ &bios_connectors[i].hpd,
+ &router);
}
}
@@ -1032,24 +1203,21 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
u8 frev, crev;
u16 data_offset;
+ /* sideport is AMD only */
+ if (rdev->family == CHIP_RS600)
+ return false;
+
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
igp_info = (union igp_info *)(mode_info->atom_context->bios +
data_offset);
switch (crev) {
case 1:
- /* AMD IGPS */
- if ((rdev->family == CHIP_RS690) ||
- (rdev->family == CHIP_RS740)) {
- if (igp_info->info.ulBootUpMemoryClock)
- return true;
- } else {
- if (igp_info->info.ucMemoryType & 0xf0)
- return true;
- }
+ if (igp_info->info.ulBootUpMemoryClock)
+ return true;
break;
case 2:
- if (igp_info->info_2.ucMemoryType & 0x0f)
+ if (igp_info->info_2.ulBootUpSidePortClock)
return true;
break;
default:
@@ -1095,7 +1263,7 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
(tmds_info->asMiscInfo[i].
ucPLL_VoltageSwing & 0xf) << 16;
- DRM_DEBUG("TMDS PLL From ATOMBIOS %u %x\n",
+ DRM_DEBUG_KMS("TMDS PLL From ATOMBIOS %u %x\n",
tmds->tmds_pll[i].freq,
tmds->tmds_pll[i].value);
@@ -1171,6 +1339,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
union lvds_info *lvds_info;
uint8_t frev, crev;
struct radeon_encoder_atom_dig *lvds = NULL;
+ int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
@@ -1234,6 +1403,12 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
}
encoder->native_mode = lvds->native_mode;
+
+ if (encoder_enum == 2)
+ lvds->linkb = true;
+ else
+ lvds->linkb = false;
+
}
return lvds;
}
@@ -1524,7 +1699,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
thermal_controller_names[power_info->info.ucOverdriveThermalController],
power_info->info.ucOverdriveControllerAddress >> 1);
i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
- rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
struct i2c_board_info info = { };
const char *name = thermal_controller_names[power_info->info.
@@ -1789,14 +1964,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
}
/* add the i2c bus for thermal/fan chip */
- /* no support for internal controller yet */
if (controller->ucType > 0) {
- if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
- (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
- (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) {
+ if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
} else if ((controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
(controller->ucType ==
@@ -1809,7 +1992,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
- rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
struct i2c_board_info info = { };
const char *name = pp_lib_thermal_controller_names[controller->ucType];
@@ -1922,6 +2105,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+ if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ break;
}
rdev->pm.power_state[state_index].flags = 0;
if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
@@ -2179,11 +2367,11 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("TV1 connected\n");
+ DRM_DEBUG_KMS("TV1 connected\n");
bios_3_scratch |= ATOM_S3_TV1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_TV1;
} else {
- DRM_DEBUG("TV1 disconnected\n");
+ DRM_DEBUG_KMS("TV1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_TV1_MASK;
bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1;
@@ -2192,11 +2380,11 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) {
if (connected) {
- DRM_DEBUG("CV connected\n");
+ DRM_DEBUG_KMS("CV connected\n");
bios_3_scratch |= ATOM_S3_CV_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_CV;
} else {
- DRM_DEBUG("CV disconnected\n");
+ DRM_DEBUG_KMS("CV disconnected\n");
bios_0_scratch &= ~ATOM_S0_CV_MASK;
bios_3_scratch &= ~ATOM_S3_CV_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV;
@@ -2205,12 +2393,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("LCD1 connected\n");
+ DRM_DEBUG_KMS("LCD1 connected\n");
bios_0_scratch |= ATOM_S0_LCD1;
bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
} else {
- DRM_DEBUG("LCD1 disconnected\n");
+ DRM_DEBUG_KMS("LCD1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_LCD1;
bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
@@ -2219,12 +2407,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("CRT1 connected\n");
+ DRM_DEBUG_KMS("CRT1 connected\n");
bios_0_scratch |= ATOM_S0_CRT1_COLOR;
bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
} else {
- DRM_DEBUG("CRT1 disconnected\n");
+ DRM_DEBUG_KMS("CRT1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
@@ -2233,12 +2421,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
if (connected) {
- DRM_DEBUG("CRT2 connected\n");
+ DRM_DEBUG_KMS("CRT2 connected\n");
bios_0_scratch |= ATOM_S0_CRT2_COLOR;
bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
} else {
- DRM_DEBUG("CRT2 disconnected\n");
+ DRM_DEBUG_KMS("CRT2 disconnected\n");
bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
@@ -2247,12 +2435,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP1 connected\n");
+ DRM_DEBUG_KMS("DFP1 connected\n");
bios_0_scratch |= ATOM_S0_DFP1;
bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
} else {
- DRM_DEBUG("DFP1 disconnected\n");
+ DRM_DEBUG_KMS("DFP1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP1;
bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
@@ -2261,12 +2449,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP2 connected\n");
+ DRM_DEBUG_KMS("DFP2 connected\n");
bios_0_scratch |= ATOM_S0_DFP2;
bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
} else {
- DRM_DEBUG("DFP2 disconnected\n");
+ DRM_DEBUG_KMS("DFP2 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP2;
bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
@@ -2275,12 +2463,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP3 connected\n");
+ DRM_DEBUG_KMS("DFP3 connected\n");
bios_0_scratch |= ATOM_S0_DFP3;
bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
} else {
- DRM_DEBUG("DFP3 disconnected\n");
+ DRM_DEBUG_KMS("DFP3 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP3;
bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
@@ -2289,12 +2477,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP4 connected\n");
+ DRM_DEBUG_KMS("DFP4 connected\n");
bios_0_scratch |= ATOM_S0_DFP4;
bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
} else {
- DRM_DEBUG("DFP4 disconnected\n");
+ DRM_DEBUG_KMS("DFP4 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP4;
bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
@@ -2303,12 +2491,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP5 connected\n");
+ DRM_DEBUG_KMS("DFP5 connected\n");
bios_0_scratch |= ATOM_S0_DFP5;
bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
} else {
- DRM_DEBUG("DFP5 disconnected\n");
+ DRM_DEBUG_KMS("DFP5 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP5;
bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 2c921373999..654787ec43f 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -53,7 +53,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
return false;
rdev->bios = NULL;
- vram_base = drm_get_resource_start(rdev->ddev, 0);
+ vram_base = pci_resource_start(rdev->pdev, 0);
bios = ioremap(vram_base, size);
if (!bios) {
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index f64936cc4dd..5249af8931e 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -91,6 +91,85 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
return mclk;
}
+#ifdef CONFIG_OF
+/*
+ * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
+ * tree. Hopefully, ATI OF driver is kind enough to fill these
+ */
+static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct device_node *dp = rdev->pdev->dev.of_node;
+ const u32 *val;
+ struct radeon_pll *p1pll = &rdev->clock.p1pll;
+ struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *spll = &rdev->clock.spll;
+ struct radeon_pll *mpll = &rdev->clock.mpll;
+
+ if (dp == NULL)
+ return false;
+ val = of_get_property(dp, "ATY,RefCLK", NULL);
+ if (!val || !*val) {
+ printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n");
+ return false;
+ }
+ p1pll->reference_freq = p2pll->reference_freq = (*val) / 10;
+ p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+ if (p1pll->reference_div < 2)
+ p1pll->reference_div = 12;
+ p2pll->reference_div = p1pll->reference_div;
+
+ /* These aren't in the device-tree */
+ if (rdev->family >= CHIP_R420) {
+ p1pll->pll_in_min = 100;
+ p1pll->pll_in_max = 1350;
+ p1pll->pll_out_min = 20000;
+ p1pll->pll_out_max = 50000;
+ p2pll->pll_in_min = 100;
+ p2pll->pll_in_max = 1350;
+ p2pll->pll_out_min = 20000;
+ p2pll->pll_out_max = 50000;
+ } else {
+ p1pll->pll_in_min = 40;
+ p1pll->pll_in_max = 500;
+ p1pll->pll_out_min = 12500;
+ p1pll->pll_out_max = 35000;
+ p2pll->pll_in_min = 40;
+ p2pll->pll_in_max = 500;
+ p2pll->pll_out_min = 12500;
+ p2pll->pll_out_max = 35000;
+ }
+
+ spll->reference_freq = mpll->reference_freq = p1pll->reference_freq;
+ spll->reference_div = mpll->reference_div =
+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+ RADEON_M_SPLL_REF_DIV_MASK;
+
+ val = of_get_property(dp, "ATY,SCLK", NULL);
+ if (val && *val)
+ rdev->clock.default_sclk = (*val) / 10;
+ else
+ rdev->clock.default_sclk =
+ radeon_legacy_get_engine_clock(rdev);
+
+ val = of_get_property(dp, "ATY,MCLK", NULL);
+ if (val && *val)
+ rdev->clock.default_mclk = (*val) / 10;
+ else
+ rdev->clock.default_mclk =
+ radeon_legacy_get_memory_clock(rdev);
+
+ DRM_INFO("Using device-tree clock info\n");
+
+ return true;
+}
+#else
+static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_OF */
+
void radeon_get_clock_info(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -105,6 +184,8 @@ void radeon_get_clock_info(struct drm_device *dev)
ret = radeon_atom_get_clock_info(dev);
else
ret = radeon_combios_get_clock_info(dev);
+ if (!ret)
+ ret = radeon_read_clocks_OF(dev);
if (ret) {
if (p1pll->reference_div < 2) {
@@ -246,6 +327,14 @@ void radeon_get_clock_info(struct drm_device *dev)
mpll->max_feedback_div = 0xff;
mpll->best_vco = 0;
+ if (!rdev->clock.default_sclk)
+ rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
+ if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
+ rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
+
+ rdev->pm.current_sclk = rdev->clock.default_sclk;
+ rdev->pm.current_mclk = rdev->clock.default_mclk;
+
}
/* 10 khz */
@@ -816,53 +905,3 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
}
}
-static void radeon_apply_clock_quirks(struct radeon_device *rdev)
-{
- uint32_t tmp;
-
- /* XXX make sure engine is idle */
-
- if (rdev->family < CHIP_RS600) {
- tmp = RREG32_PLL(RADEON_SCLK_CNTL);
- if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
- tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
- if ((rdev->family == CHIP_RV250)
- || (rdev->family == CHIP_RV280))
- tmp |=
- RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
- if ((rdev->family == CHIP_RV350)
- || (rdev->family == CHIP_RV380))
- tmp |= R300_SCLK_FORCE_VAP;
- if (rdev->family == CHIP_R420)
- tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
- WREG32_PLL(RADEON_SCLK_CNTL, tmp);
- } else if (rdev->family < CHIP_R600) {
- tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
- tmp |= AVIVO_CP_FORCEON;
- WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
-
- tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
- tmp |= AVIVO_E2_FORCEON;
- WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
-
- tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
- tmp |= AVIVO_IDCT_FORCEON;
- WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
- }
-}
-
-int radeon_static_clocks_init(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
-
- /* XXX make sure engine is idle */
-
- if (radeon_dynclks != -1) {
- if (radeon_dynclks) {
- if (rdev->asic->set_clock_gating)
- radeon_set_clock_gating(rdev, 1);
- }
- }
- radeon_apply_clock_quirks(rdev);
- return 0;
-}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 2417d7b06fd..a04b7a6ad95 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -39,8 +39,8 @@
/* from radeon_encoder.c */
extern uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
- uint8_t dac);
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
extern void radeon_link_encoder_connector(struct drm_device *dev);
/* from radeon_connector.c */
@@ -55,7 +55,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
/* from radeon_legacy_encoder.c */
extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
/* old legacy ATI BIOS routines */
@@ -480,9 +480,66 @@ radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
}
static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
- int ddc_line)
+ enum radeon_combios_ddc ddc,
+ u32 clk_mask,
+ u32 data_mask)
{
struct radeon_i2c_bus_rec i2c;
+ int ddc_line = 0;
+
+ /* ddc id = mask reg
+ * DDC_NONE_DETECTED = none
+ * DDC_DVI = RADEON_GPIO_DVI_DDC
+ * DDC_VGA = RADEON_GPIO_VGA_DDC
+ * DDC_LCD = RADEON_GPIOPAD_MASK
+ * DDC_GPIO = RADEON_MDGPIO_MASK
+ * r1xx/r2xx
+ * DDC_MONID = RADEON_GPIO_MONID
+ * DDC_CRT2 = RADEON_GPIO_CRT2_DDC
+ * r3xx
+ * DDC_MONID = RADEON_GPIO_MONID
+ * DDC_CRT2 = RADEON_GPIO_DVI_DDC
+ * rs3xx/rs4xx
+ * DDC_MONID = RADEON_GPIOPAD_MASK
+ * DDC_CRT2 = RADEON_GPIO_MONID
+ */
+ switch (ddc) {
+ case DDC_NONE_DETECTED:
+ default:
+ ddc_line = 0;
+ break;
+ case DDC_DVI:
+ ddc_line = RADEON_GPIO_DVI_DDC;
+ break;
+ case DDC_VGA:
+ ddc_line = RADEON_GPIO_VGA_DDC;
+ break;
+ case DDC_LCD:
+ ddc_line = RADEON_GPIOPAD_MASK;
+ break;
+ case DDC_GPIO:
+ ddc_line = RADEON_MDGPIO_MASK;
+ break;
+ case DDC_MONID:
+ if (rdev->family == CHIP_RS300 ||
+ rdev->family == CHIP_RS400 ||
+ rdev->family == CHIP_RS480)
+ ddc_line = RADEON_GPIOPAD_MASK;
+ else
+ ddc_line = RADEON_GPIO_MONID;
+ break;
+ case DDC_CRT2:
+ if (rdev->family == CHIP_RS300 ||
+ rdev->family == CHIP_RS400 ||
+ rdev->family == CHIP_RS480)
+ ddc_line = RADEON_GPIO_MONID;
+ else if (rdev->family >= CHIP_R300) {
+ ddc_line = RADEON_GPIO_DVI_DDC;
+ ddc = DDC_DVI;
+ } else
+ ddc_line = RADEON_GPIO_CRT2_DDC;
+ break;
+ }
if (ddc_line == RADEON_GPIOPAD_MASK) {
i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
@@ -503,15 +560,6 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
i2c.y_clk_reg = RADEON_MDGPIO_Y;
i2c.y_data_reg = RADEON_MDGPIO_Y;
} else {
- i2c.mask_clk_mask = RADEON_GPIO_EN_1;
- i2c.mask_data_mask = RADEON_GPIO_EN_0;
- i2c.a_clk_mask = RADEON_GPIO_A_1;
- i2c.a_data_mask = RADEON_GPIO_A_0;
- i2c.en_clk_mask = RADEON_GPIO_EN_1;
- i2c.en_data_mask = RADEON_GPIO_EN_0;
- i2c.y_clk_mask = RADEON_GPIO_Y_1;
- i2c.y_data_mask = RADEON_GPIO_Y_0;
-
i2c.mask_clk_reg = ddc_line;
i2c.mask_data_reg = ddc_line;
i2c.a_clk_reg = ddc_line;
@@ -522,6 +570,26 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
i2c.y_data_reg = ddc_line;
}
+ if (clk_mask && data_mask) {
+ i2c.mask_clk_mask = clk_mask;
+ i2c.mask_data_mask = data_mask;
+ i2c.a_clk_mask = clk_mask;
+ i2c.a_data_mask = data_mask;
+ i2c.en_clk_mask = clk_mask;
+ i2c.en_data_mask = data_mask;
+ i2c.y_clk_mask = clk_mask;
+ i2c.y_data_mask = data_mask;
+ } else {
+ i2c.mask_clk_mask = RADEON_GPIO_EN_1;
+ i2c.mask_data_mask = RADEON_GPIO_EN_0;
+ i2c.a_clk_mask = RADEON_GPIO_A_1;
+ i2c.a_data_mask = RADEON_GPIO_A_0;
+ i2c.en_clk_mask = RADEON_GPIO_EN_1;
+ i2c.en_data_mask = RADEON_GPIO_EN_0;
+ i2c.y_clk_mask = RADEON_GPIO_Y_1;
+ i2c.y_data_mask = RADEON_GPIO_Y_0;
+ }
+
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
@@ -599,7 +667,8 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
break;
}
i2c.mm_i2c = false;
- i2c.i2c_id = 0;
+
+ i2c.i2c_id = ddc;
i2c.hpd = RADEON_HPD_NONE;
if (ddc_line)
@@ -610,6 +679,62 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
return i2c;
}
+void radeon_combios_i2c_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct radeon_i2c_bus_rec i2c;
+
+
+ i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC");
+
+ i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC");
+
+ i2c.valid = true;
+ i2c.hw_capable = true;
+ i2c.mm_i2c = true;
+ i2c.i2c_id = 0xa0;
+ rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C");
+
+ if (rdev->family == CHIP_RS300 ||
+ rdev->family == CHIP_RS400 ||
+ rdev->family == CHIP_RS480) {
+ u16 offset;
+ u8 id, blocks, clk, data;
+ int i;
+
+ i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+ rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+
+ offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+ if (offset) {
+ blocks = RBIOS8(offset + 2);
+ for (i = 0; i < blocks; i++) {
+ id = RBIOS8(offset + 3 + (i * 5) + 0);
+ if (id == 136) {
+ clk = RBIOS8(offset + 3 + (i * 5) + 3);
+ data = RBIOS8(offset + 3 + (i * 5) + 4);
+ i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
+ clk, data);
+ rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
+ break;
+ }
+ }
+ }
+
+ } else if (rdev->family >= CHIP_R300) {
+ i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+ } else {
+ i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+
+ i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+ rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC");
+ }
+}
+
bool radeon_combios_get_clock_info(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -693,6 +818,10 @@ bool radeon_combios_sideport_present(struct radeon_device *rdev)
struct drm_device *dev = rdev->ddev;
u16 igp_info;
+ /* sideport is AMD only */
+ if (rdev->family == CHIP_RS400)
+ return false;
+
igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
if (igp_info) {
@@ -1205,7 +1334,7 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
RBIOS32(tmds_info + i * 10 + 0x08);
tmds->tmds_pll[i].freq =
RBIOS16(tmds_info + i * 10 + 0x10);
- DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
+ DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
tmds->tmds_pll[i].freq,
tmds->tmds_pll[i].value);
}
@@ -1223,7 +1352,7 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
stride += 10;
else
stride += 6;
- DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
+ DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
tmds->tmds_pll[i].freq,
tmds->tmds_pll[i].value);
}
@@ -1243,8 +1372,8 @@ bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
struct radeon_i2c_bus_rec i2c_bus;
/* default for macs */
- i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
/* XXX some macs have duallink chips */
switch (rdev->mode_info.connector_table) {
@@ -1265,47 +1394,16 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
uint16_t offset;
- uint8_t ver, id, blocks, clk, data;
- int i;
+ uint8_t ver;
enum radeon_combios_ddc gpio;
struct radeon_i2c_bus_rec i2c_bus;
tmds->i2c_bus = NULL;
if (rdev->flags & RADEON_IS_IGP) {
- offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
- if (offset) {
- ver = RBIOS8(offset);
- DRM_INFO("GPIO Table revision: %d\n", ver);
- blocks = RBIOS8(offset + 2);
- for (i = 0; i < blocks; i++) {
- id = RBIOS8(offset + 3 + (i * 5) + 0);
- if (id == 136) {
- clk = RBIOS8(offset + 3 + (i * 5) + 3);
- data = RBIOS8(offset + 3 + (i * 5) + 4);
- i2c_bus.valid = true;
- i2c_bus.mask_clk_mask = (1 << clk);
- i2c_bus.mask_data_mask = (1 << data);
- i2c_bus.a_clk_mask = (1 << clk);
- i2c_bus.a_data_mask = (1 << data);
- i2c_bus.en_clk_mask = (1 << clk);
- i2c_bus.en_data_mask = (1 << data);
- i2c_bus.y_clk_mask = (1 << clk);
- i2c_bus.y_data_mask = (1 << data);
- i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK;
- i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK;
- i2c_bus.a_clk_reg = RADEON_GPIOPAD_A;
- i2c_bus.a_data_reg = RADEON_GPIOPAD_A;
- i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN;
- i2c_bus.en_data_reg = RADEON_GPIOPAD_EN;
- i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y;
- i2c_bus.y_data_reg = RADEON_GPIOPAD_Y;
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- tmds->dvo_chip = DVO_SIL164;
- tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
- break;
- }
- }
- }
+ i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ tmds->dvo_chip = DVO_SIL164;
+ tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
} else {
offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
if (offset) {
@@ -1314,37 +1412,15 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
tmds->slave_addr = RBIOS8(offset + 4 + 2);
tmds->slave_addr >>= 1; /* 7 bit addressing */
gpio = RBIOS8(offset + 4 + 3);
- switch (gpio) {
- case DDC_MONID:
- i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- break;
- case DDC_DVI:
- i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- break;
- case DDC_VGA:
- i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- break;
- case DDC_CRT2:
- /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
- if (rdev->family >= CHIP_R300)
- i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
- else
- i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- break;
- case DDC_LCD: /* MM i2c */
+ if (gpio == DDC_LCD) {
+ /* MM i2c */
i2c_bus.valid = true;
i2c_bus.hw_capable = true;
i2c_bus.mm_i2c = true;
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- break;
- default:
- DRM_ERROR("Unsupported gpio %d\n", gpio);
- break;
- }
+ i2c_bus.i2c_id = 0xa0;
+ } else
+ i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
+ tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
}
}
@@ -1409,6 +1485,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
/* PowerMac8,1 ? */
/* imac g5 isight */
rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
+ } else if ((rdev->pdev->device == 0x4a48) &&
+ (rdev->pdev->subsystem_vendor == 0x1002) &&
+ (rdev->pdev->subsystem_device == 0x4a48)) {
+ /* Mac X800 */
+ rdev->mode_info.connector_table = CT_MAC_X800;
} else
#endif /* CONFIG_PPC_PMAC */
#ifdef CONFIG_PPC64
@@ -1426,10 +1507,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
/* these are the most common settings */
if (rdev->flags & RADEON_SINGLE_CRTC) {
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1441,10 +1522,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
&hpd);
} else if (rdev->flags & RADEON_IS_MOBILITY) {
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(rdev, 0);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1456,10 +1537,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
&hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1471,15 +1552,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
&hpd);
} else {
/* DVI-I - tv dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1492,10 +1573,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
&hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1512,7 +1593,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1528,10 +1609,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (ibook)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1540,10 +1621,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* VGA - TV DAC */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1555,7 +1636,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1569,10 +1650,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1581,15 +1662,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* DVI-I - primary dac, ext tmds */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1604,7 +1685,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1618,10 +1699,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1630,15 +1711,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* DVI-I - primary dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1652,7 +1733,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1666,10 +1747,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (powerbook vga)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1678,10 +1759,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1693,7 +1774,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1707,15 +1788,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (mini external tmds)\n",
rdev->mode_info.connector_table);
/* DVI-I - tv dac, ext tmds */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1730,7 +1811,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1744,15 +1825,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (mini internal tmds)\n",
rdev->mode_info.connector_table);
/* DVI-I - tv dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1766,7 +1847,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1780,10 +1861,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (imac g5 isight)\n",
rdev->mode_info.connector_table);
/* DVI-D - int tmds */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
@@ -1792,10 +1873,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
&hpd);
/* VGA - tv dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1807,7 +1888,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1821,10 +1902,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (emac)\n",
rdev->mode_info.connector_table);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1833,10 +1914,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_VGA,
&hpd);
/* VGA - tv dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1848,7 +1929,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1862,10 +1943,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (rn50-power)\n",
rdev->mode_info.connector_table);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1873,10 +1954,10 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
CONNECTOR_OBJECT_ID_VGA,
&hpd);
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1885,6 +1966,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_VGA,
&hpd);
break;
+ case CT_MAC_X800:
+ DRM_INFO("Connector Table: %d (mac x800)\n",
+ rdev->mode_info.connector_table);
+ /* DVI - primary dac, internal tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* DVI - tv dac, dvo */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP2_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP2_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP2_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+ &hpd);
+ break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
@@ -1903,31 +2026,6 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
struct radeon_i2c_bus_rec *ddc_i2c,
struct radeon_hpd *hpd)
{
- struct radeon_device *rdev = dev->dev_private;
-
- /* XPRESS DDC quirks */
- if ((rdev->family == CHIP_RS400 ||
- rdev->family == CHIP_RS480) &&
- ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
- *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
- else if ((rdev->family == CHIP_RS400 ||
- rdev->family == CHIP_RS480) &&
- ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
- *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIOPAD_MASK);
- ddc_i2c->mask_clk_mask = (0x20 << 8);
- ddc_i2c->mask_data_mask = 0x80;
- ddc_i2c->a_clk_mask = (0x20 << 8);
- ddc_i2c->a_data_mask = 0x80;
- ddc_i2c->en_clk_mask = (0x20 << 8);
- ddc_i2c->en_data_mask = 0x80;
- ddc_i2c->y_clk_mask = (0x20 << 8);
- ddc_i2c->y_data_mask = 0x80;
- }
-
- /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
- if ((rdev->family >= CHIP_R300) &&
- ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
- *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
@@ -2031,27 +2129,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
connector = (tmp >> 12) & 0xf;
ddc_type = (tmp >> 8) & 0xf;
- switch (ddc_type) {
- case DDC_MONID:
- ddc_i2c =
- combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
- break;
- case DDC_DVI:
- ddc_i2c =
- combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
- break;
- case DDC_VGA:
- ddc_i2c =
- combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
- break;
- case DDC_CRT2:
- ddc_i2c =
- combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
- break;
- default:
- ddc_i2c.valid = false;
- break;
- }
+ ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
switch (connector) {
case CONNECTOR_PROPRIETARY_LEGACY:
@@ -2078,7 +2156,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
else
devices = ATOM_DEVICE_DFP1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev, devices, 0),
devices);
radeon_add_legacy_connector(dev, i, devices,
@@ -2092,7 +2170,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (tmp & 0x1) {
devices = ATOM_DEVICE_CRT2_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
@@ -2100,7 +2178,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else {
devices = ATOM_DEVICE_CRT1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
@@ -2120,7 +2198,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (tmp & 0x1) {
devices |= ATOM_DEVICE_CRT2_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
@@ -2128,7 +2206,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else {
devices |= ATOM_DEVICE_CRT1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
@@ -2137,7 +2215,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if ((tmp >> 4) & 0x1) {
devices |= ATOM_DEVICE_DFP2_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
@@ -2146,7 +2224,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else {
devices |= ATOM_DEVICE_DFP1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
@@ -2171,7 +2249,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
}
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev, devices, 0),
devices);
radeon_add_legacy_connector(dev, i, devices,
@@ -2184,7 +2262,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
case CONNECTOR_CTV_LEGACY:
case CONNECTOR_STV_LEGACY:
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
@@ -2208,20 +2286,20 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
uint16_t tmds_info =
combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
if (tmds_info) {
- DRM_DEBUG("Found DFP table, assuming DVI connector\n");
+ DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_connector(dev,
0,
@@ -2234,14 +2312,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else {
uint16_t crt_info =
combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
- DRM_DEBUG("Found CRT table, assuming VGA connector\n");
+ DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
if (crt_info) {
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_connector(dev,
0,
@@ -2251,7 +2329,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
CONNECTOR_OBJECT_ID_VGA,
&hpd);
} else {
- DRM_DEBUG("No connector info found\n");
+ DRM_DEBUG_KMS("No connector info found\n");
return false;
}
}
@@ -2266,7 +2344,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
COMBIOS_LCD_DDC_INFO_TABLE);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -2274,73 +2352,28 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (lcd_ddc_info) {
ddc_type = RBIOS8(lcd_ddc_info + 2);
switch (ddc_type) {
- case DDC_MONID:
- ddc_i2c =
- combios_setup_i2c_bus
- (rdev, RADEON_GPIO_MONID);
- break;
- case DDC_DVI:
- ddc_i2c =
- combios_setup_i2c_bus
- (rdev, RADEON_GPIO_DVI_DDC);
- break;
- case DDC_VGA:
- ddc_i2c =
- combios_setup_i2c_bus
- (rdev, RADEON_GPIO_VGA_DDC);
- break;
- case DDC_CRT2:
- ddc_i2c =
- combios_setup_i2c_bus
- (rdev, RADEON_GPIO_CRT2_DDC);
- break;
case DDC_LCD:
ddc_i2c =
- combios_setup_i2c_bus
- (rdev, RADEON_GPIOPAD_MASK);
- ddc_i2c.mask_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.mask_data_mask =
- RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.a_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.a_data_mask =
- RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.en_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.en_data_mask =
- RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.y_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.y_data_mask =
- RBIOS32(lcd_ddc_info + 7);
+ combios_setup_i2c_bus(rdev,
+ DDC_LCD,
+ RBIOS32(lcd_ddc_info + 3),
+ RBIOS32(lcd_ddc_info + 7));
+ radeon_i2c_add(rdev, &ddc_i2c, "LCD");
break;
case DDC_GPIO:
ddc_i2c =
- combios_setup_i2c_bus
- (rdev, RADEON_MDGPIO_MASK);
- ddc_i2c.mask_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.mask_data_mask =
- RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.a_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.a_data_mask =
- RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.en_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.en_data_mask =
- RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.y_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.y_data_mask =
- RBIOS32(lcd_ddc_info + 7);
+ combios_setup_i2c_bus(rdev,
+ DDC_GPIO,
+ RBIOS32(lcd_ddc_info + 3),
+ RBIOS32(lcd_ddc_info + 7));
+ radeon_i2c_add(rdev, &ddc_i2c, "LCD");
break;
default:
- ddc_i2c.valid = false;
+ ddc_i2c =
+ combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
break;
}
- DRM_DEBUG("LCD DDC Info Table found!\n");
+ DRM_DEBUG_KMS("LCD DDC Info Table found!\n");
} else
ddc_i2c.valid = false;
@@ -2365,7 +2398,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
hpd.hpd = RADEON_HPD_NONE;
ddc_i2c.valid = false;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
@@ -2941,9 +2974,8 @@ static void combios_write_ram_size(struct drm_device *dev)
if (rev < 3) {
mem_cntl = RBIOS32(offset + 1);
mem_size = RBIOS16(offset + 5);
- if (((rdev->flags & RADEON_FAMILY_MASK) < CHIP_R200) &&
- ((dev->pdev->device != 0x515e)
- && (dev->pdev->device != 0x5969)))
+ if ((rdev->family < CHIP_R200) &&
+ !ASIC_IS_RN50(rdev))
WREG32(RADEON_MEM_CNTL, mem_cntl);
}
}
@@ -2954,10 +2986,8 @@ static void combios_write_ram_size(struct drm_device *dev)
if (offset) {
rev = RBIOS8(offset - 1);
if (rev < 1) {
- if (((rdev->flags & RADEON_FAMILY_MASK) <
- CHIP_R200)
- && ((dev->pdev->device != 0x515e)
- && (dev->pdev->device != 0x5969))) {
+ if ((rdev->family < CHIP_R200)
+ && !ASIC_IS_RN50(rdev)) {
int ram = 0;
int mem_addr_mapping = 0;
@@ -3121,14 +3151,14 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("TV1 connected\n");
+ DRM_DEBUG_KMS("TV1 connected\n");
/* fix me */
bios_4_scratch |= RADEON_TV1_ATTACHED_SVIDEO;
/*save->bios_4_scratch |= RADEON_TV1_ATTACHED_COMP; */
bios_5_scratch |= RADEON_TV1_ON;
bios_5_scratch |= RADEON_ACC_REQ_TV1;
} else {
- DRM_DEBUG("TV1 disconnected\n");
+ DRM_DEBUG_KMS("TV1 disconnected\n");
bios_4_scratch &= ~RADEON_TV1_ATTACHED_MASK;
bios_5_scratch &= ~RADEON_TV1_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_TV1;
@@ -3137,12 +3167,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("LCD1 connected\n");
+ DRM_DEBUG_KMS("LCD1 connected\n");
bios_4_scratch |= RADEON_LCD1_ATTACHED;
bios_5_scratch |= RADEON_LCD1_ON;
bios_5_scratch |= RADEON_ACC_REQ_LCD1;
} else {
- DRM_DEBUG("LCD1 disconnected\n");
+ DRM_DEBUG_KMS("LCD1 disconnected\n");
bios_4_scratch &= ~RADEON_LCD1_ATTACHED;
bios_5_scratch &= ~RADEON_LCD1_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_LCD1;
@@ -3151,12 +3181,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("CRT1 connected\n");
+ DRM_DEBUG_KMS("CRT1 connected\n");
bios_4_scratch |= RADEON_CRT1_ATTACHED_COLOR;
bios_5_scratch |= RADEON_CRT1_ON;
bios_5_scratch |= RADEON_ACC_REQ_CRT1;
} else {
- DRM_DEBUG("CRT1 disconnected\n");
+ DRM_DEBUG_KMS("CRT1 disconnected\n");
bios_4_scratch &= ~RADEON_CRT1_ATTACHED_MASK;
bios_5_scratch &= ~RADEON_CRT1_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_CRT1;
@@ -3165,12 +3195,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
if (connected) {
- DRM_DEBUG("CRT2 connected\n");
+ DRM_DEBUG_KMS("CRT2 connected\n");
bios_4_scratch |= RADEON_CRT2_ATTACHED_COLOR;
bios_5_scratch |= RADEON_CRT2_ON;
bios_5_scratch |= RADEON_ACC_REQ_CRT2;
} else {
- DRM_DEBUG("CRT2 disconnected\n");
+ DRM_DEBUG_KMS("CRT2 disconnected\n");
bios_4_scratch &= ~RADEON_CRT2_ATTACHED_MASK;
bios_5_scratch &= ~RADEON_CRT2_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_CRT2;
@@ -3179,12 +3209,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP1 connected\n");
+ DRM_DEBUG_KMS("DFP1 connected\n");
bios_4_scratch |= RADEON_DFP1_ATTACHED;
bios_5_scratch |= RADEON_DFP1_ON;
bios_5_scratch |= RADEON_ACC_REQ_DFP1;
} else {
- DRM_DEBUG("DFP1 disconnected\n");
+ DRM_DEBUG_KMS("DFP1 disconnected\n");
bios_4_scratch &= ~RADEON_DFP1_ATTACHED;
bios_5_scratch &= ~RADEON_DFP1_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_DFP1;
@@ -3193,12 +3223,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP2 connected\n");
+ DRM_DEBUG_KMS("DFP2 connected\n");
bios_4_scratch |= RADEON_DFP2_ATTACHED;
bios_5_scratch |= RADEON_DFP2_ON;
bios_5_scratch |= RADEON_ACC_REQ_DFP2;
} else {
- DRM_DEBUG("DFP2 disconnected\n");
+ DRM_DEBUG_KMS("DFP2 disconnected\n");
bios_4_scratch &= ~RADEON_DFP2_ATTACHED;
bios_5_scratch &= ~RADEON_DFP2_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_DFP2;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index adccbc2c202..ecc1a8fafbf 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -214,7 +214,7 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
- DRM_DEBUG("Adding native panel mode %s\n", mode->name);
+ DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name);
} else if (native_mode->hdisplay != 0 &&
native_mode->vdisplay != 0) {
/* mac laptops without an edid */
@@ -226,7 +226,7 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
- DRM_DEBUG("Adding cvt approximation of native panel mode %s\n", mode->name);
+ DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}
return mode;
}
@@ -312,6 +312,20 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
}
}
+ if (property == rdev->mode_info.underscan_property) {
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->underscan_type != val) {
+ radeon_encoder->underscan_type = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
if (property == rdev->mode_info.tv_std_property) {
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC);
if (!encoder) {
@@ -467,7 +481,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_lvds_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -504,8 +519,6 @@ static void radeon_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (radeon_connector->ddc_bus)
- radeon_i2c_destroy(radeon_connector->ddc_bus);
if (radeon_connector->edid)
kfree(radeon_connector->edid);
kfree(radeon_connector->con_priv);
@@ -522,7 +535,7 @@ static int radeon_lvds_set_property(struct drm_connector *connector,
struct radeon_encoder *radeon_encoder;
enum radeon_rmx_type rmx_type;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (property != dev->mode_config.scaling_mode_property)
return 0;
@@ -582,7 +595,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_vga_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
@@ -679,7 +693,8 @@ static int radeon_tv_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_tv_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
@@ -736,7 +751,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
* we have to check if this analog encoder is shared with anyone else (TV)
* if its shared we have to set the other connector to disconnected.
*/
-static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_dvi_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
@@ -941,8 +957,6 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
- if (radeon_connector->ddc_bus)
- radeon_i2c_destroy(radeon_connector->ddc_bus);
if (radeon_connector->edid)
kfree(radeon_connector->edid);
if (radeon_dig_connector->dp_i2c_bus)
@@ -962,32 +976,35 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
return ret;
}
-static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_dp_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
- u8 sink_type;
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- sink_type = radeon_dp_getsinktype(radeon_connector);
- if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
- (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
- if (radeon_dp_getdpcd(radeon_connector)) {
- radeon_dig_connector->dp_sink_type = sink_type;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ /* eDP is always DP */
+ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+ if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
- }
} else {
- if (radeon_ddc_probe(radeon_connector)) {
- radeon_dig_connector->dp_sink_type = sink_type;
- ret = connector_status_connected;
+ radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if (radeon_dp_getdpcd(radeon_connector))
+ ret = connector_status_connected;
+ } else {
+ if (radeon_ddc_probe(radeon_connector))
+ ret = connector_status_connected;
}
}
+ radeon_connector_update_scratch_regs(connector, ret);
return ret;
}
@@ -1027,10 +1044,10 @@ radeon_add_atom_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- bool linkb,
uint32_t igp_lane_info,
uint16_t connector_object_id,
- struct radeon_hpd *hpd)
+ struct radeon_hpd *hpd,
+ struct radeon_router *router)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
@@ -1039,10 +1056,16 @@ radeon_add_atom_connector(struct drm_device *dev,
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
- /* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return;
+ /* if the user selected tv=0 don't try and add the connector */
+ if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+ (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+ (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+ (radeon_tv == 0))
+ return;
+
/* see if we already added it */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
@@ -1055,6 +1078,11 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->shared_ddc = true;
shared_ddc = true;
}
+ if (radeon_connector->router_bus && router->valid &&
+ (radeon_connector->router.router_id == router->router_id)) {
+ radeon_connector->shared_ddc = false;
+ shared_ddc = false;
+ }
}
}
@@ -1069,12 +1097,18 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
+ radeon_connector->router = *router;
+ if (router->valid) {
+ radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
+ if (!radeon_connector->router_bus)
+ goto failed;
+ }
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1082,13 +1116,15 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1096,19 +1132,20 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
- radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1116,6 +1153,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_property,
+ UNDERSCAN_AUTO);
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1128,19 +1169,22 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
- radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_property,
+ UNDERSCAN_AUTO);
subpixel_order = SubPixelHorizontalRGB;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
@@ -1148,7 +1192,6 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
- radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
@@ -1161,10 +1204,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
goto failed;
- if (connector_type == DRM_MODE_CONNECTOR_eDP)
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "eDP");
- else
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1172,33 +1212,36 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_property,
+ UNDERSCAN_AUTO);
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
- if (radeon_tv == 1) {
- drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.load_detect_property,
- 1);
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.tv_std_property,
- radeon_atombios_get_tv_info(rdev));
- }
+ drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+ radeon_connector->dac_load_detect = true;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ radeon_atombios_get_tv_info(rdev));
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
break;
case DRM_MODE_CONNECTOR_LVDS:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
- radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1209,7 +1252,7 @@ radeon_add_atom_connector(struct drm_device *dev,
break;
}
- if (hpd->hpd == RADEON_HPD_NONE) {
+ if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
if (i2c_bus->valid)
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
} else
@@ -1220,8 +1263,6 @@ radeon_add_atom_connector(struct drm_device *dev,
return;
failed:
- if (radeon_connector->ddc_bus)
- radeon_i2c_destroy(radeon_connector->ddc_bus);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -1240,10 +1281,16 @@ radeon_add_legacy_connector(struct drm_device *dev,
struct radeon_connector *radeon_connector;
uint32_t subpixel_order = SubPixelNone;
- /* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return;
+ /* if the user selected tv=0 don't try and add the connector */
+ if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+ (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+ (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+ (radeon_tv == 0))
+ return;
+
/* see if we already added it */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
@@ -1268,7 +1315,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1276,13 +1323,15 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1290,13 +1339,15 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1311,30 +1362,30 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
- if (radeon_tv == 1) {
- drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- radeon_connector->dac_load_detect = true;
- /* RS400,RC410,RS480 chipset seems to report a lot
- * of false positive on load detect, we haven't yet
- * found a way to make load detect reliable on those
- * chipset, thus just disable it for TV.
- */
- if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
- radeon_connector->dac_load_detect = false;
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.load_detect_property,
- radeon_connector->dac_load_detect);
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.tv_std_property,
- radeon_combios_get_tv_info(rdev));
- }
+ drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+ radeon_connector->dac_load_detect = true;
+ /* RS400,RC410,RS480 chipset seems to report a lot
+ * of false positive on load detect, we haven't yet
+ * found a way to make load detect reliable on those
+ * chipset, thus just disable it for TV.
+ */
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
+ radeon_connector->dac_load_detect = false;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ radeon_connector->dac_load_detect);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ radeon_combios_get_tv_info(rdev));
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1345,7 +1396,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
}
- if (hpd->hpd == RADEON_HPD_NONE) {
+ if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
if (i2c_bus->valid)
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
} else
@@ -1355,8 +1406,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
return;
failed:
- if (radeon_connector->ddc_bus)
- radeon_i2c_destroy(radeon_connector->ddc_bus);
drm_connector_cleanup(connector);
kfree(connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 2f042a3c0e6..eb6b9eed734 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2120,8 +2120,8 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
else
dev_priv->flags |= RADEON_IS_PCI;
- ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
- drm_get_resource_len(dev, 2), _DRM_REGISTERS,
+ ret = drm_addmap(dev, pci_resource_start(dev->pdev, 2),
+ pci_resource_len(dev->pdev, 2), _DRM_REGISTERS,
_DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
if (ret != 0)
return ret;
@@ -2194,9 +2194,9 @@ int radeon_driver_firstopen(struct drm_device *dev)
dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
- dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
+ dev_priv->fb_aper_offset = pci_resource_start(dev->pdev, 0);
ret = drm_addmap(dev, dev_priv->fb_aper_offset,
- drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
+ pci_resource_len(dev->pdev, 0), _DRM_FRAME_BUFFER,
_DRM_WRITE_COMBINING, &map);
if (ret != 0)
return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ae0fb7356e6..fcc79b5d22d 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -72,7 +72,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
if (p->relocs[i].gobj == NULL) {
DRM_ERROR("gem object lookup failed 0x%x\n",
r->handle);
- return -EINVAL;
+ return -ENOENT;
}
p->relocs_ptr[i] = &p->relocs[i];
p->relocs[i].robj = p->relocs[i].gobj->driver_private;
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 4eb67c0e099..5731fc9b1ae 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -170,7 +170,7 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
- return -EINVAL;
+ return -ENOENT;
}
ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index dd279da9054..256d204a6d2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
+ if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->real_vram_size = mc->aper_size;
mc->mc_vram_size = mc->aper_size;
@@ -293,30 +293,20 @@ bool radeon_card_posted(struct radeon_device *rdev)
void radeon_update_bandwidth_info(struct radeon_device *rdev)
{
fixed20_12 a;
- u32 sclk, mclk;
+ u32 sclk = rdev->pm.current_sclk;
+ u32 mclk = rdev->pm.current_mclk;
- if (rdev->flags & RADEON_IS_IGP) {
- sclk = radeon_get_engine_clock(rdev);
- mclk = rdev->clock.default_mclk;
-
- a.full = dfixed_const(100);
- rdev->pm.sclk.full = dfixed_const(sclk);
- rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
- rdev->pm.mclk.full = dfixed_const(mclk);
- rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+ /* sclk/mclk in Mhz */
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+ if (rdev->flags & RADEON_IS_IGP) {
a.full = dfixed_const(16);
/* core_bandwidth = sclk(Mhz) * 16 */
rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
- } else {
- sclk = radeon_get_engine_clock(rdev);
- mclk = radeon_get_memory_clock(rdev);
-
- a.full = dfixed_const(100);
- rdev->pm.sclk.full = dfixed_const(sclk);
- rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
- rdev->pm.mclk.full = dfixed_const(mclk);
- rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
}
}
@@ -347,7 +337,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
return -ENOMEM;
rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (!rdev->dummy_page.addr) {
+ if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
+ dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
__free_page(rdev->dummy_page.page);
rdev->dummy_page.page = NULL;
return -ENOMEM;
@@ -415,6 +406,22 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
return r;
}
+static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+
+ WREG32_IO(reg*4, val);
+}
+
+static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+ uint32_t r;
+
+ r = RREG32_IO(reg*4);
+ return r;
+}
+
int radeon_atombios_init(struct radeon_device *rdev)
{
struct card_info *atom_card_info =
@@ -427,6 +434,15 @@ int radeon_atombios_init(struct radeon_device *rdev)
atom_card_info->dev = rdev->ddev;
atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write;
+ /* needed for iio ops */
+ if (rdev->rio_mem) {
+ atom_card_info->ioreg_read = cail_ioreg_read;
+ atom_card_info->ioreg_write = cail_ioreg_write;
+ } else {
+ DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
+ atom_card_info->ioreg_read = cail_reg_read;
+ atom_card_info->ioreg_write = cail_reg_write;
+ }
atom_card_info->mc_read = cail_mc_read;
atom_card_info->mc_write = cail_mc_write;
atom_card_info->pll_read = cail_pll_read;
@@ -573,7 +589,7 @@ int radeon_device_init(struct radeon_device *rdev,
struct pci_dev *pdev,
uint32_t flags)
{
- int r;
+ int r, i;
int dma_bits;
rdev->shutdown = false;
@@ -650,8 +666,8 @@ int radeon_device_init(struct radeon_device *rdev,
/* Registers mapping */
/* TODO: block userspace mapping of io register */
- rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2);
- rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2);
+ rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
+ rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
if (rdev->rmmio == NULL) {
return -ENOMEM;
@@ -659,6 +675,17 @@ int radeon_device_init(struct radeon_device *rdev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
+ /* io port mapping */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
+ rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
+ rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
+ break;
+ }
+ }
+ if (rdev->rio_mem == NULL)
+ DRM_ERROR("Unable to find PCI I/O BAR\n");
+
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
@@ -701,6 +728,9 @@ void radeon_device_fini(struct radeon_device *rdev)
destroy_workqueue(rdev->wq);
vga_switcheroo_unregister_client(rdev->pdev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
+ if (rdev->rio_mem)
+ pci_iounmap(rdev->pdev, rdev->rio_mem);
+ rdev->rio_mem = NULL;
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8154cdf796e..b92d2f2fcbe 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -42,7 +42,7 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
struct radeon_device *rdev = dev->dev_private;
int i;
- DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
+ DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
@@ -75,7 +75,7 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
struct radeon_device *rdev = dev->dev_private;
int i;
- DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
+ DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
@@ -161,17 +161,13 @@ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
}
static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
+ u16 *blue, uint32_t start, uint32_t size)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int i;
-
- if (size != 256) {
- return;
- }
+ int end = (start + size > 256) ? 256 : start + size, i;
/* userspace palettes are always correct as is */
- for (i = 0; i < 256; i++) {
+ for (i = start; i < end; i++) {
radeon_crtc->lut_r[i] = red[i] >> 6;
radeon_crtc->lut_g[i] = green[i] >> 6;
radeon_crtc->lut_b[i] = blue[i] >> 6;
@@ -319,6 +315,10 @@ static void radeon_print_display_setup(struct drm_device *dev)
radeon_connector->ddc_bus->rec.en_data_reg,
radeon_connector->ddc_bus->rec.y_clk_reg,
radeon_connector->ddc_bus->rec.y_data_reg);
+ if (radeon_connector->router_bus)
+ DRM_INFO(" DDC Router 0x%x/0x%x\n",
+ radeon_connector->router.mux_control_pin,
+ radeon_connector->router.mux_state);
} else {
if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev)
DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP5_SUPPORT)
DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_DFP6_SUPPORT)
+ DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_TV1_SUPPORT)
DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_CV_SUPPORT)
@@ -395,6 +397,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
struct radeon_device *rdev = dev->dev_private;
int ret = 0;
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.valid)
+ radeon_router_select_port(radeon_connector);
+
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
@@ -425,6 +431,10 @@ static int radeon_ddc_dump(struct drm_connector *connector)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret = 0;
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.valid)
+ radeon_router_select_port(radeon_connector);
+
if (!radeon_connector->ddc_bus)
return -1;
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
@@ -469,7 +479,7 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
uint32_t post_div;
u32 pll_out_min, pll_out_max;
- DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
+ DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
freq = freq * 1000;
if (pll->flags & RADEON_PLL_IS_LCD) {
@@ -558,15 +568,17 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
current_freq = radeon_div(tmp, ref_div * post_div);
if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
- error = freq - current_freq;
- error = error < 0 ? 0xffffffff : error;
+ if (freq < current_freq)
+ error = 0xffffffff;
+ else
+ error = freq - current_freq;
} else
error = abs(current_freq - freq);
vco_diff = abs(vco - best_vco);
if ((best_vco == 0 && error < best_error) ||
(best_vco != 0 &&
- (error < best_error - 100 ||
+ ((best_error > 100 && error < best_error - 100) ||
(abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
best_post_div = post_div;
best_ref_div = ref_div;
@@ -803,7 +815,7 @@ done:
*ref_div_p = ref_div;
*post_div_p = post_div;
- DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+ DRM_DEBUG_KMS("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
}
void radeon_compute_pll(struct radeon_pll *pll,
@@ -831,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
- if (radeon_fb->obj)
+ if (radeon_fb->obj) {
drm_gem_object_unreference_unlocked(radeon_fb->obj);
+ }
drm_framebuffer_cleanup(fb);
kfree(radeon_fb);
}
@@ -874,13 +887,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
if (obj == NULL) {
dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
"can't create framebuffer\n", mode_cmd->handle);
- return NULL;
+ return ERR_PTR(-ENOENT);
}
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
- if (radeon_fb == NULL) {
- return NULL;
- }
+ if (radeon_fb == NULL)
+ return ERR_PTR(-ENOMEM);
radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
@@ -919,6 +931,12 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
{ TV_STD_SECAM, "secam" },
};
+static struct drm_prop_enum_list radeon_underscan_enum_list[] =
+{ { UNDERSCAN_OFF, "off" },
+ { UNDERSCAN_ON, "on" },
+ { UNDERSCAN_AUTO, "auto" },
+};
+
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int i, sz;
@@ -972,6 +990,18 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
radeon_tv_std_enum_list[i].name);
}
+ sz = ARRAY_SIZE(radeon_underscan_enum_list);
+ rdev->mode_info.underscan_property =
+ drm_property_create(rdev->ddev,
+ DRM_MODE_PROP_ENUM,
+ "underscan", sz);
+ for (i = 0; i < sz; i++) {
+ drm_property_add_enum(rdev->mode_info.underscan_property,
+ i,
+ radeon_underscan_enum_list[i].type,
+ radeon_underscan_enum_list[i].name);
+ }
+
return 0;
}
@@ -1020,6 +1050,9 @@ int radeon_modeset_init(struct radeon_device *rdev)
return ret;
}
+ /* init i2c buses */
+ radeon_i2c_init(rdev);
+
/* check combios for a valid hardcoded EDID - Sun servers */
if (!rdev->is_atom_bios) {
/* check for hardcoded EDID in BIOS */
@@ -1060,6 +1093,20 @@ void radeon_modeset_fini(struct radeon_device *rdev)
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
+ /* free i2c buses */
+ radeon_i2c_fini(rdev);
+}
+
+static bool is_hdtv_mode(struct drm_display_mode *mode)
+{
+ /* try and guess if this is a tv or a monitor */
+ if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
+ (mode->vdisplay == 576) || /* 576p */
+ (mode->vdisplay == 720) || /* 720p */
+ (mode->vdisplay == 1080)) /* 1080p */
+ return true;
+ else
+ return false;
}
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
@@ -1067,15 +1114,26 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_encoder *radeon_encoder;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
bool first = true;
+ u32 src_v = 1, dst_v = 1;
+ u32 src_h = 1, dst_h = 1;
+
+ radeon_crtc->h_border = 0;
+ radeon_crtc->v_border = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- radeon_encoder = to_radeon_encoder(encoder);
if (encoder->crtc != crtc)
continue;
+ radeon_encoder = to_radeon_encoder(encoder);
+ connector = radeon_get_connector_for_encoder(encoder);
+ radeon_connector = to_radeon_connector(connector);
+
if (first) {
/* set scaling */
if (radeon_encoder->rmx_type == RMX_OFF)
@@ -1089,27 +1147,47 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
memcpy(&radeon_crtc->native_mode,
&radeon_encoder->native_mode,
sizeof(struct drm_display_mode));
+ src_v = crtc->mode.vdisplay;
+ dst_v = radeon_crtc->native_mode.vdisplay;
+ src_h = crtc->mode.hdisplay;
+ dst_h = radeon_crtc->native_mode.hdisplay;
+
+ /* fix up for overscan on hdmi */
+ if (ASIC_IS_AVIVO(rdev) &&
+ (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
+ ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
+ ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
+ drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ is_hdtv_mode(mode)))) {
+ radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
+ radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
+ radeon_crtc->rmx_type = RMX_FULL;
+ src_v = crtc->mode.vdisplay;
+ dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2);
+ src_h = crtc->mode.hdisplay;
+ dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2);
+ }
first = false;
} else {
if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
/* WARNING: Right now this can't happen but
* in the future we need to check that scaling
- * are consistent accross different encoder
+ * are consistent across different encoder
* (ie all encoder can work with the same
* scaling).
*/
- DRM_ERROR("Scaling not consistent accross encoder.\n");
+ DRM_ERROR("Scaling not consistent across encoder.\n");
return false;
}
}
}
if (radeon_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b;
- a.full = dfixed_const(crtc->mode.vdisplay);
- b.full = dfixed_const(radeon_crtc->native_mode.hdisplay);
+ a.full = dfixed_const(src_v);
+ b.full = dfixed_const(dst_v);
radeon_crtc->vsc.full = dfixed_div(a, b);
- a.full = dfixed_const(crtc->mode.hdisplay);
- b.full = dfixed_const(radeon_crtc->native_mode.vdisplay);
+ a.full = dfixed_const(src_h);
+ b.full = dfixed_const(dst_h);
radeon_crtc->hsc.full = dfixed_div(a, b);
} else {
radeon_crtc->vsc.full = dfixed_const(1);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e166fe4d7c3..795403b0e2c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -46,9 +46,10 @@
* - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
* - 2.4.0 - add crtc id query
* - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
+ * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 5
+#define KMS_DRIVER_MINOR 6
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -238,7 +239,7 @@ static struct drm_driver kms_driver;
static int __devinit
radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_dev(pdev, ent, &kms_driver);
+ return drm_get_pci_dev(pdev, ent, &kms_driver);
}
static void
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index e0b30b264c2..2c293e8304d 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -81,7 +81,7 @@ void radeon_setup_encoder_clones(struct drm_device *dev)
}
uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
{
struct radeon_device *rdev = dev->dev_private;
uint32_t ret = 0;
@@ -97,59 +97,59 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
if ((rdev->family == CHIP_RS300) ||
(rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480))
- ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+ ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_DAC1;
+ ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
break;
case 2: /* dac b */
if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+ ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
else {
/*if (rdev->family == CHIP_R200)
- ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
else*/
- ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
}
break;
case 3: /* external dac */
if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+ ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
break;
}
break;
case ATOM_DEVICE_LCD1_SUPPORT:
if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_LVDS;
+ ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
break;
case ATOM_DEVICE_DFP1_SUPPORT:
if ((rdev->family == CHIP_RS300) ||
(rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480))
- ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+ ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+ ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
break;
case ATOM_DEVICE_LCD2_SUPPORT:
case ATOM_DEVICE_DFP2_SUPPORT:
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
- ret = ENCODER_OBJECT_ID_INTERNAL_DDI;
+ ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+ ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
break;
case ATOM_DEVICE_DFP3_SUPPORT:
- ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
break;
}
@@ -205,14 +205,14 @@ void radeon_encoder_set_active_device(struct drm_encoder *encoder)
if (connector->encoder == encoder) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices;
- DRM_DEBUG("setting active device to %08x from %08x %08x for encoder %d\n",
+ DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
radeon_encoder->active_device, radeon_encoder->devices,
radeon_connector->devices, encoder->encoder_type);
}
}
}
-static struct drm_connector *
+struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -228,32 +228,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
-static struct radeon_connector_atom_dig *
-radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
-
- if (!rdev->is_atom_bios)
- return NULL;
-
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
- return NULL;
-
- radeon_connector = to_radeon_connector(connector);
-
- if (!radeon_connector->con_priv)
- return NULL;
-
- dig_connector = radeon_connector->con_priv;
-
- return dig_connector;
-}
-
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
@@ -512,14 +486,12 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct radeon_connector_atom_dig *dig_connector =
- radeon_get_atom_connector_priv_from_encoder(encoder);
union lvds_encoder_control args;
int index = 0;
int hdmi_detected = 0;
uint8_t frev, crev;
- if (!dig || !dig_connector)
+ if (!dig)
return;
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
@@ -562,7 +534,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= (1 << 1);
} else {
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -601,7 +573,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
}
} else {
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -623,6 +595,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
@@ -636,9 +610,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
- return ATOM_ENCODER_MODE_HDMI;
- else if (radeon_connector->use_digital)
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_CRT;
@@ -646,9 +624,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
- return ATOM_ENCODER_MODE_HDMI;
- else
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -660,9 +642,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid))
- return ATOM_ENCODER_MODE_HDMI;
- else
+ else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_DVIA:
@@ -729,13 +715,24 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct radeon_connector_atom_dig *dig_connector =
- radeon_get_atom_connector_priv_from_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
union dig_encoder_control args;
int index = 0;
uint8_t frev, crev;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ }
- if (!dig || !dig_connector)
+ /* no dig encoder assigned */
+ if (dig->dig_encoder == -1)
return;
memset(&args, 0, sizeof(args));
@@ -757,9 +754,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
- if (dig_connector->dp_clock == 270000)
+ if (dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
- args.v1.ucLaneNum = dig_connector->dp_lane_count;
+ args.v1.ucLaneNum = dp_lane_count;
} else if (radeon_encoder->pixel_clock > 165000)
args.v1.ucLaneNum = 8;
else
@@ -781,7 +778,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
break;
}
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
@@ -804,38 +801,47 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct radeon_connector_atom_dig *dig_connector =
- radeon_get_atom_connector_priv_from_encoder(encoder);
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
union dig_transmitter_control args;
int index = 0;
uint8_t frev, crev;
bool is_dp = false;
int pll_id = 0;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int connector_object_id = 0;
+ int igp_lane_info = 0;
- if (!dig || !dig_connector)
- return;
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
- connector = radeon_get_connector_for_encoder(encoder);
- radeon_connector = to_radeon_connector(connector);
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ connector_object_id =
+ (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ igp_lane_info = dig_connector->igp_lane_info;
+ }
+
+ /* no dig encoder assigned */
+ if (dig->dig_encoder == -1)
+ return;
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
is_dp = true;
memset(&args, 0, sizeof(args));
- if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
- else {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
- break;
- }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+ break;
}
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
@@ -843,14 +849,14 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
- args.v1.usInitInfo = radeon_connector->connector_object_id;
+ args.v1.usInitInfo = connector_object_id;
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v1.asMode.ucLaneSel = lane_num;
args.v1.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v1.usPixelClock =
- cpu_to_le16(dig_connector->dp_clock / 10);
+ cpu_to_le16(dp_clock / 10);
else if (radeon_encoder->pixel_clock > 165000)
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
@@ -858,13 +864,13 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
if (ASIC_IS_DCE4(rdev)) {
if (is_dp)
- args.v3.ucLaneNum = dig_connector->dp_lane_count;
+ args.v3.ucLaneNum = dp_lane_count;
else if (radeon_encoder->pixel_clock > 165000)
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
- if (dig_connector->linkb) {
+ if (dig->linkb) {
args.v3.acConfig.ucLinkSel = 1;
args.v3.acConfig.ucEncoderSel = 1;
}
@@ -904,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
} else if (ASIC_IS_DCE32(rdev)) {
args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v2.acConfig.ucLinkSel = 1;
switch (radeon_encoder->encoder_id) {
@@ -938,23 +944,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
if ((rdev->flags & RADEON_IS_IGP) &&
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
- if (dig_connector->igp_lane_info & 0x1)
+ if (igp_lane_info & 0x1)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else if (dig_connector->igp_lane_info & 0x2)
+ else if (igp_lane_info & 0x2)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
- else if (dig_connector->igp_lane_info & 0x4)
+ else if (igp_lane_info & 0x4)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
- else if (dig_connector->igp_lane_info & 0x8)
+ else if (igp_lane_info & 0x8)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
} else {
- if (dig_connector->igp_lane_info & 0x3)
+ if (igp_lane_info & 0x3)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
- else if (dig_connector->igp_lane_info & 0xc)
+ else if (igp_lane_info & 0xc)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
}
}
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
@@ -1021,7 +1027,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
memset(&args, 0, sizeof(args));
- DRM_DEBUG("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
+ DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
radeon_encoder->active_device);
switch (radeon_encoder->encoder_id) {
@@ -1072,8 +1078,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
- if (!ASIC_IS_DCE4(rdev))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
@@ -1085,8 +1090,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- if (!ASIC_IS_DCE4(rdev))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
@@ -1290,24 +1294,22 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
uint32_t dig_enc_in_use = 0;
if (ASIC_IS_DCE4(rdev)) {
- struct radeon_connector_atom_dig *dig_connector =
- radeon_get_atom_connector_priv_from_encoder(encoder);
-
+ dig = radeon_encoder->enc_priv;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig_connector->linkb)
+ if (dig->linkb)
return 1;
else
return 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig_connector->linkb)
+ if (dig->linkb)
return 3;
else
return 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig_connector->linkb)
+ if (dig->linkb)
return 5;
else
return 4;
@@ -1484,7 +1486,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
uint32_t bios_0_scratch;
if (!atombios_dac_load_detect(encoder, connector)) {
- DRM_DEBUG("detect returned false \n");
+ DRM_DEBUG_KMS("detect returned false \n");
return connector_status_unknown;
}
@@ -1493,7 +1495,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
else
bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
- DRM_DEBUG("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+ DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT1_MASK)
return connector_status_connected;
@@ -1641,6 +1643,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
struct radeon_encoder_atom_dig *
radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
{
+ int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
if (!dig)
@@ -1650,11 +1653,16 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
dig->coherent_mode = true;
dig->dig_encoder = -1;
+ if (encoder_enum == 2)
+ dig->linkb = true;
+ else
+ dig->linkb = false;
+
return dig;
}
void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
@@ -1663,7 +1671,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->encoder_id == encoder_id) {
+ if (radeon_encoder->encoder_enum == encoder_enum) {
radeon_encoder->devices |= supported_device;
return;
}
@@ -1691,9 +1699,11 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
radeon_encoder->enc_priv = NULL;
- radeon_encoder->encoder_id = encoder_id;
+ radeon_encoder->encoder_enum = encoder_enum;
+ radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
+ radeon_encoder->underscan_type = UNDERSCAN_OFF;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
@@ -1707,6 +1717,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
} else {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ if (ASIC_IS_AVIVO(rdev))
+ radeon_encoder->underscan_type = UNDERSCAN_AUTO;
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
@@ -1736,6 +1748,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
} else {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ if (ASIC_IS_AVIVO(rdev))
+ radeon_encoder->underscan_type = UNDERSCAN_AUTO;
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index dc1634bb0c1..9cdf6a35bc2 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -94,8 +94,10 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
ret = radeon_bo_reserve(rbo, false);
if (likely(ret == 0)) {
radeon_bo_kunmap(rbo);
+ radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
+ drm_gem_object_handle_unreference(gobj);
drm_gem_object_unreference_unlocked(gobj);
}
@@ -118,7 +120,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_kernel,
+ false, true,
&gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
@@ -224,7 +226,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
- info->flags = FBINFO_DEFAULT;
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
@@ -325,8 +327,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
{
struct fb_info *info;
struct radeon_framebuffer *rfb = &rfbdev->rfb;
- struct radeon_bo *rbo;
- int r;
if (rfbdev->helper.fbdev) {
info = rfbdev->helper.fbdev;
@@ -338,14 +338,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
}
if (rfb->obj) {
- rbo = rfb->obj->driver_private;
- r = radeon_bo_reserve(rbo, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(rbo);
- radeon_bo_unpin(rbo);
- radeon_bo_unreserve(rbo);
- }
- drm_gem_object_unreference_unlocked(rfb->obj);
+ radeonfb_destroy_pinned_object(rfb->obj);
+ rfb->obj = NULL;
}
drm_fb_helper_fini(&rfbdev->helper);
drm_framebuffer_cleanup(&rfb->base);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index a72a3ee5d69..d1e595d9172 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
return r;
}
r = drm_gem_handle_create(filp, gobj, &handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(gobj);
if (r) {
- drm_gem_object_unreference_unlocked(gobj);
return r;
}
- drm_gem_object_handle_unreference_unlocked(gobj);
args->handle = handle;
return 0;
}
@@ -226,7 +226,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
/* just do a BO wait for now */
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
- return -EINVAL;
+ return -ENOENT;
}
robj = gobj->driver_private;
@@ -245,7 +245,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
- return -EINVAL;
+ return -ENOENT;
}
robj = gobj->driver_private;
args->addr_ptr = radeon_bo_mmap_offset(robj);
@@ -264,7 +264,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
- return -EINVAL;
+ return -ENOENT;
}
robj = gobj->driver_private;
r = radeon_bo_wait(robj, &cur_placement, true);
@@ -294,7 +294,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
- return -EINVAL;
+ return -ENOENT;
}
robj = gobj->driver_private;
r = radeon_bo_wait(robj, NULL, false);
@@ -316,7 +316,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
DRM_DEBUG("%d \n", args->handle);
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
- return -EINVAL;
+ return -ENOENT;
robj = gobj->driver_private;
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
drm_gem_object_unreference_unlocked(gobj);
@@ -334,7 +334,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
DRM_DEBUG("\n");
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
- return -EINVAL;
+ return -ENOENT;
rbo = gobj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 5def6f5dff3..6a13ee38a5b 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -52,6 +52,10 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
}
};
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.valid)
+ radeon_router_select_port(radeon_connector);
+
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
if (ret == 2)
return true;
@@ -95,6 +99,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
}
}
+ /* switch the pads to ddc mode */
+ if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
+ temp = RREG32(rec->mask_clk_reg);
+ temp &= ~(1 << 16);
+ WREG32(rec->mask_clk_reg, temp);
+ }
+
/* clear the output pin values */
temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
WREG32(rec->a_clk_reg, temp);
@@ -202,7 +213,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap)
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
{
- u32 sclk = radeon_get_engine_clock(rdev);
+ u32 sclk = rdev->pm.current_sclk;
u32 prescale = 0;
u32 nm;
u8 n, m, loop;
@@ -960,6 +971,59 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
kfree(i2c);
}
+/* Add the default buses */
+void radeon_i2c_init(struct radeon_device *rdev)
+{
+ if (rdev->is_atom_bios)
+ radeon_atombios_i2c_init(rdev);
+ else
+ radeon_combios_i2c_init(rdev);
+}
+
+/* remove all the buses */
+void radeon_i2c_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+ if (rdev->i2c_bus[i]) {
+ radeon_i2c_destroy(rdev->i2c_bus[i]);
+ rdev->i2c_bus[i] = NULL;
+ }
+ }
+}
+
+/* Add additional buses */
+void radeon_i2c_add(struct radeon_device *rdev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name)
+{
+ struct drm_device *dev = rdev->ddev;
+ int i;
+
+ for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+ if (!rdev->i2c_bus[i]) {
+ rdev->i2c_bus[i] = radeon_i2c_create(dev, rec, name);
+ return;
+ }
+ }
+}
+
+/* looks up bus based on id */
+struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
+ struct radeon_i2c_bus_rec *i2c_bus)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+ if (rdev->i2c_bus[i] &&
+ (rdev->i2c_bus[i]->rec.i2c_id == i2c_bus->i2c_id)) {
+ return rdev->i2c_bus[i];
+ }
+ }
+ return NULL;
+}
+
struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
{
return NULL;
@@ -1020,3 +1084,28 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
addr, val);
}
+/* router switching */
+void radeon_router_select_port(struct radeon_connector *radeon_connector)
+{
+ u8 val;
+
+ if (!radeon_connector->router.valid)
+ return;
+
+ radeon_i2c_get_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x3, &val);
+ val &= radeon_connector->router.mux_control_pin;
+ radeon_i2c_put_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x3, val);
+ radeon_i2c_get_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x1, &val);
+ val &= radeon_connector->router.mux_control_pin;
+ val |= radeon_connector->router.mux_state;
+ radeon_i2c_put_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x1, val);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 059bfa4098d..a108c7ed14f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -121,11 +121,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
* chips. Disable MSI on them for now.
*/
if ((rdev->family >= CHIP_RV380) &&
- (!(rdev->flags & RADEON_IS_IGP))) {
+ (!(rdev->flags & RADEON_IS_IGP)) &&
+ (!(rdev->flags & RADEON_IS_AGP))) {
int ret = pci_enable_msi(rdev->pdev);
if (!ret) {
rdev->msi_enabled = 1;
- DRM_INFO("radeon: using MSI.\n");
+ dev_info(rdev->dev, "radeon: using MSI.\n");
}
}
rdev->irq.installed = true;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index ab389f89fa8..8fbbe1c6ebb 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -49,7 +49,7 @@ int radeon_driver_unload_kms(struct drm_device *dev)
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
struct radeon_device *rdev;
- int r;
+ int r, acpi_status;
rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
if (rdev == NULL) {
@@ -77,6 +77,12 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
goto out;
}
+
+ /* Call ACPI methods */
+ acpi_status = radeon_acpi_init(rdev);
+ if (acpi_status)
+ dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
+
/* Again modeset_init should fail only on fatal error
* otherwise it should provide enough functionalities
* for shadowfb to run
@@ -106,7 +112,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
info = data;
value_ptr = (uint32_t *)((unsigned long)info->value);
- value = *value_ptr;
+ if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value)))
+ return -EFAULT;
+
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
value = dev->pci_device;
@@ -135,15 +143,51 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
}
if (!found) {
- DRM_DEBUG("unknown crtc id %d\n", value);
+ DRM_DEBUG_KMS("unknown crtc id %d\n", value);
return -EINVAL;
}
break;
case RADEON_INFO_ACCEL_WORKING2:
value = rdev->accel_working;
break;
+ case RADEON_INFO_TILING_CONFIG:
+ if (rdev->family >= CHIP_CEDAR)
+ value = rdev->config.evergreen.tile_config;
+ else if (rdev->family >= CHIP_RV770)
+ value = rdev->config.rv770.tile_config;
+ else if (rdev->family >= CHIP_R600)
+ value = rdev->config.r600.tile_config;
+ else {
+ DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
+ return -EINVAL;
+ }
+ break;
+ case RADEON_INFO_WANT_HYPERZ:
+ /* The "value" here is both an input and output parameter.
+ * If the input value is 1, filp requests hyper-z access.
+ * If the input value is 0, filp revokes its hyper-z access.
+ *
+ * When returning, the value is 1 if filp owns hyper-z access,
+ * 0 otherwise. */
+ if (value >= 2) {
+ DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
+ return -EINVAL;
+ }
+ mutex_lock(&dev->struct_mutex);
+ if (value == 1) {
+ /* wants hyper-z */
+ if (!rdev->hyperz_filp)
+ rdev->hyperz_filp = filp;
+ } else if (value == 0) {
+ /* revokes hyper-z */
+ if (rdev->hyperz_filp == filp)
+ rdev->hyperz_filp = NULL;
+ }
+ value = rdev->hyperz_filp == filp ? 1 : 0;
+ mutex_unlock(&dev->struct_mutex);
+ break;
default:
- DRM_DEBUG("Invalid request %d\n", info->request);
+ DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
}
if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
@@ -159,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
*/
int radeon_driver_firstopen_kms(struct drm_device *dev)
{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (rdev->powered_down)
+ return -EINVAL;
return 0;
}
@@ -181,9 +229,11 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
void radeon_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
+ struct radeon_device *rdev = dev->dev_private;
+ if (rdev->hyperz_filp == file_priv)
+ rdev->hyperz_filp = NULL;
}
-
/*
* VBlank related functions.
*/
@@ -278,45 +328,45 @@ KMS_INVALID_IOCTL(radeon_surface_free_kms)
struct drm_ioctl_desc radeon_ioctls_kms[] = {
- DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
/* KMS */
- DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index e1e5255396a..305049afde1 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
if (!ref_div)
return 1;
- vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
+ vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
/*
* This is horribly crude: the VCO frequency range is divided into
@@ -362,10 +362,10 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
uint32_t gen_cntl_reg, gen_cntl_val;
int r;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG("No FB bound\n");
+ DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -528,7 +528,7 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
uint32_t crtc_v_sync_strt_wid;
bool is_tv = false;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -757,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
}
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (!use_bios_divs) {
radeon_compute_pll(pll, mode->clock,
@@ -772,7 +772,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (!post_div->divider)
post_div = &post_divs[0];
- DRM_DEBUG("dc=%u, fd=%d, rd=%d, pd=%d\n",
+ DRM_DEBUG_KMS("dc=%u, fd=%d, rd=%d, pd=%d\n",
(unsigned)freq,
feedback_div,
reference_div,
@@ -841,12 +841,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
| RADEON_P2PLL_SLEEP
| RADEON_P2PLL_ATOMIC_UPDATE_EN));
- DRM_DEBUG("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+ DRM_DEBUG_KMS("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
(unsigned)pll_ref_div,
(unsigned)pll_fb_post_div,
(unsigned)htotal_cntl,
RREG32_PLL(RADEON_P2PLL_CNTL));
- DRM_DEBUG("Wrote2: rd=%u, fd=%u, pd=%u\n",
+ DRM_DEBUG_KMS("Wrote2: rd=%u, fd=%u, pd=%u\n",
(unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK,
(unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK,
(unsigned)((pll_fb_post_div &
@@ -947,12 +947,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
| RADEON_PPLL_ATOMIC_UPDATE_EN
| RADEON_PPLL_VGA_ATOMIC_UPDATE_EN));
- DRM_DEBUG("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+ DRM_DEBUG_KMS("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
pll_ref_div,
pll_fb_post_div,
(unsigned)htotal_cntl,
RREG32_PLL(RADEON_PPLL_CNTL));
- DRM_DEBUG("Wrote: rd=%d, fd=%d, pd=%d\n",
+ DRM_DEBUG_KMS("Wrote: rd=%d, fd=%d, pd=%d\n",
pll_ref_div & RADEON_PPLL_REF_DIV_MASK,
pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK,
(pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 5688a0cf6bb..0b8397000f4 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -47,7 +47,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
int panel_pwr_delay = 2000;
bool is_mac = false;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (radeon_encoder->enc_priv) {
if (rdev->is_atom_bios) {
@@ -151,7 +151,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
@@ -167,7 +167,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
} else {
struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
if (lvds) {
- DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
+ DRM_DEBUG_KMS("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
lvds_gen_cntl = lvds->lvds_gen_cntl;
lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
(0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
@@ -250,7 +250,7 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL);
uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -315,7 +315,7 @@ static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (radeon_crtc->crtc_id == 0) {
if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
@@ -446,7 +446,7 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL);
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -502,7 +502,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl;
int i;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL);
tmp &= 0xfffff;
@@ -610,7 +610,7 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -666,7 +666,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t fp2_gen_cntl;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (rdev->is_atom_bios) {
radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -760,7 +760,7 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0;
uint32_t tv_master_cntl = 0;
bool is_tv;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
@@ -878,7 +878,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0, disp_tv_out_cntl = 0;
bool is_tv = false;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
@@ -1075,10 +1075,10 @@ static bool r300_legacy_tv_detect(struct drm_encoder *encoder,
tmp = RREG32(RADEON_TV_DAC_CNTL);
if ((tmp & RADEON_TV_DAC_GDACDET) != 0) {
found = true;
- DRM_DEBUG("S-video TV connection detected\n");
+ DRM_DEBUG_KMS("S-video TV connection detected\n");
} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
found = true;
- DRM_DEBUG("Composite TV connection detected\n");
+ DRM_DEBUG_KMS("Composite TV connection detected\n");
}
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
@@ -1141,10 +1141,10 @@ static bool radeon_legacy_tv_detect(struct drm_encoder *encoder,
tmp = RREG32(RADEON_TV_DAC_CNTL);
if (tmp & RADEON_TV_DAC_GDACDET) {
found = true;
- DRM_DEBUG("S-video TV connection detected\n");
+ DRM_DEBUG_KMS("S-video TV connection detected\n");
} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
found = true;
- DRM_DEBUG("Composite TV connection detected\n");
+ DRM_DEBUG_KMS("Composite TV connection detected\n");
}
WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tv_pre_dac_mux_cntl);
@@ -1345,7 +1345,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra
}
void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
@@ -1354,7 +1354,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->encoder_id == encoder_id) {
+ if (radeon_encoder->encoder_enum == encoder_enum) {
radeon_encoder->devices |= supported_device;
return;
}
@@ -1374,7 +1374,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
radeon_encoder->enc_priv = NULL;
- radeon_encoder->encoder_id = encoder_id;
+ radeon_encoder->encoder_enum = encoder_enum;
+ radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 03204039774..c7b6cb428d0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -496,7 +496,7 @@ static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
restart -= v_offset + h_offset;
- DRM_DEBUG("compute_restarts: def = %u h = %d v = %d, p1 = %04x, p2 = %04x, restart = %d\n",
+ DRM_DEBUG_KMS("compute_restarts: def = %u h = %d v = %d, p1 = %04x, p2 = %04x, restart = %d\n",
const_ptr->def_restart, tv_dac->h_pos, tv_dac->v_pos, p1, p2, restart);
tv_dac->tv.hrestart = restart % h_total;
@@ -505,7 +505,7 @@ static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
restart /= v_total;
tv_dac->tv.frestart = restart % f_total;
- DRM_DEBUG("compute_restart: F/H/V=%u,%u,%u\n",
+ DRM_DEBUG_KMS("compute_restart: F/H/V=%u,%u,%u\n",
(unsigned)tv_dac->tv.frestart,
(unsigned)tv_dac->tv.vrestart,
(unsigned)tv_dac->tv.hrestart);
@@ -523,7 +523,7 @@ static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
tv_dac->tv.timing_cntl = (tv_dac->tv.timing_cntl & ~RADEON_H_INC_MASK) |
((u32)h_inc << RADEON_H_INC_SHIFT);
- DRM_DEBUG("compute_restart: h_size = %d h_inc = %d\n", tv_dac->h_size, h_inc);
+ DRM_DEBUG_KMS("compute_restart: h_size = %d h_inc = %d\n", tv_dac->h_size, h_inc);
return h_changed;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 95696aa57ac..17a6602b588 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -66,6 +66,12 @@ enum radeon_tv_std {
TV_STD_PAL_N,
};
+enum radeon_underscan_type {
+ UNDERSCAN_OFF,
+ UNDERSCAN_ON,
+ UNDERSCAN_AUTO,
+};
+
enum radeon_hpd_id {
RADEON_HPD_1 = 0,
RADEON_HPD_2,
@@ -76,6 +82,8 @@ enum radeon_hpd_id {
RADEON_HPD_NONE = 0xff,
};
+#define RADEON_MAX_I2C_BUS 16
+
/* radeon gpio-based i2c
* 1. "mask" reg and bits
* grabs the gpio pins for software use
@@ -196,7 +204,7 @@ struct radeon_i2c_chan {
/* mostly for macs, but really any system without connector tables */
enum radeon_connector_table {
- CT_NONE,
+ CT_NONE = 0,
CT_GENERIC,
CT_IBOOK,
CT_POWERBOOK_EXTERNAL,
@@ -207,6 +215,7 @@ enum radeon_connector_table {
CT_IMAC_G5_ISIGHT,
CT_EMAC,
CT_RN50_POWER,
+ CT_MAC_X800,
};
enum radeon_dvo_chip {
@@ -226,10 +235,12 @@ struct radeon_mode_info {
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
struct drm_property *load_detect_property;
- /* TV standard load detect */
+ /* TV standard */
struct drm_property *tv_std_property;
/* legacy TMDS PLL detect */
struct drm_property *tmds_pll_property;
+ /* underscan */
+ struct drm_property *underscan_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
@@ -266,6 +277,8 @@ struct radeon_crtc {
uint32_t legacy_display_base_addr;
uint32_t legacy_cursor_offset;
enum radeon_rmx_type rmx_type;
+ u8 h_border;
+ u8 v_border;
fixed20_12 vsc;
fixed20_12 hsc;
struct drm_display_mode native_mode;
@@ -330,6 +343,7 @@ struct radeon_atom_ss {
};
struct radeon_encoder_atom_dig {
+ bool linkb;
/* atom dig */
bool coherent_mode;
int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
@@ -348,12 +362,14 @@ struct radeon_encoder_atom_dac {
struct radeon_encoder {
struct drm_encoder base;
+ uint32_t encoder_enum;
uint32_t encoder_id;
uint32_t devices;
uint32_t active_device;
uint32_t flags;
uint32_t pixel_clock;
enum radeon_rmx_type rmx_type;
+ enum radeon_underscan_type underscan_type;
struct drm_display_mode native_mode;
void *enc_priv;
int audio_polling_active;
@@ -365,7 +381,6 @@ struct radeon_encoder {
struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
- bool linkb;
/* displayport */
struct radeon_i2c_chan *dp_i2c_bus;
u8 dpcd[8];
@@ -387,12 +402,22 @@ struct radeon_hpd {
struct radeon_gpio_rec gpio;
};
+struct radeon_router {
+ bool valid;
+ u32 router_id;
+ struct radeon_i2c_bus_rec i2c_info;
+ u8 i2c_addr;
+ u8 mux_type;
+ u8 mux_control_pin;
+ u8 mux_state;
+};
+
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
uint32_t devices;
struct radeon_i2c_chan *ddc_bus;
- /* some systems have a an hdmi and vga port with a shared ddc line */
+ /* some systems have an hdmi and vga port with a shared ddc line */
bool shared_ddc;
bool use_digital;
/* we need to mind the EDID between detect
@@ -402,6 +427,8 @@ struct radeon_connector {
bool dac_load_detect;
uint16_t connector_object_id;
struct radeon_hpd hpd;
+ struct radeon_router router;
+ struct radeon_i2c_chan *router_bus;
};
struct radeon_framebuffer {
@@ -414,6 +441,9 @@ radeon_combios_get_tv_info(struct radeon_device *rdev);
extern enum radeon_tv_std
radeon_atombios_get_tv_info(struct radeon_device *rdev);
+extern struct drm_connector *
+radeon_get_connector_for_encoder(struct drm_encoder *encoder);
+
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
@@ -431,6 +461,15 @@ extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte);
+extern void radeon_i2c_init(struct radeon_device *rdev);
+extern void radeon_i2c_fini(struct radeon_device *rdev);
+extern void radeon_combios_i2c_init(struct radeon_device *rdev);
+extern void radeon_atombios_i2c_init(struct radeon_device *rdev);
+extern void radeon_i2c_add(struct radeon_device *rdev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name);
+extern struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
+ struct radeon_i2c_bus_rec *i2c_bus);
extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
@@ -446,6 +485,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 slave_addr,
u8 addr,
u8 val);
+extern void radeon_router_select_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
@@ -561,7 +601,6 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d
void radeon_enc_destroy(struct drm_encoder *encoder);
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev);
-extern int radeon_static_clocks_init(struct drm_device *dev);
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d5b9373ce06..0afd1e62347 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -110,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
+retry:
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
mutex_lock(&rdev->vram_mutex);
@@ -118,10 +119,15 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
&radeon_ttm_bo_destroy);
mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
+ if (r != -ERESTARTSYS) {
+ if (domain == RADEON_GEM_DOMAIN_VRAM) {
+ domain |= RADEON_GEM_DOMAIN_GTT;
+ goto retry;
+ }
dev_err(rdev->dev,
"object_init failed for (%lu, 0x%08X)\n",
size, domain);
+ }
return r;
}
*bo_ptr = bo;
@@ -321,6 +327,7 @@ int radeon_bo_list_validate(struct list_head *head)
{
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
+ u32 domain;
int r;
list_for_each_entry(lobj, head, list) {
@@ -333,17 +340,19 @@ int radeon_bo_list_validate(struct list_head *head)
list_for_each_entry(lobj, head, list) {
bo = lobj->bo;
if (!bo->pin_count) {
- if (lobj->wdomain) {
- radeon_ttm_placement_from_domain(bo,
- lobj->wdomain);
- } else {
- radeon_ttm_placement_from_domain(bo,
- lobj->rdomain);
- }
+ domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
+
+ retry:
+ radeon_ttm_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement,
true, false, false);
- if (unlikely(r))
+ if (unlikely(r)) {
+ if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
+ domain |= RADEON_GEM_DOMAIN_GTT;
+ goto retry;
+ }
return r;
+ }
}
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 3fa6984d989..f87efec7623 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -27,6 +27,8 @@
#include <linux/acpi.h>
#endif
#include <linux/power_supply.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200
@@ -60,9 +62,9 @@ static int radeon_acpi_event(struct notifier_block *nb,
if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
if (power_supply_is_system_supplied() > 0)
- DRM_DEBUG("pm: AC\n");
+ DRM_DEBUG_DRIVER("pm: AC\n");
else
- DRM_DEBUG("pm: DC\n");
+ DRM_DEBUG_DRIVER("pm: DC\n");
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
@@ -196,7 +198,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
radeon_set_engine_clock(rdev, sclk);
radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.current_sclk = sclk;
- DRM_DEBUG("Setting: e: %d\n", sclk);
+ DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
}
/* set memory clock */
@@ -205,7 +207,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
radeon_set_memory_clock(rdev, mclk);
radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.current_mclk = mclk;
- DRM_DEBUG("Setting: m: %d\n", mclk);
+ DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
}
if (misc_after)
@@ -217,13 +219,18 @@ static void radeon_set_power_state(struct radeon_device *rdev)
rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
} else
- DRM_DEBUG("pm: GUI not idle!!!\n");
+ DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
}
static void radeon_pm_set_clocks(struct radeon_device *rdev)
{
int i;
+ /* no need to take locks, etc. if nothing's going to change */
+ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+ (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+ return;
+
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
mutex_lock(&rdev->cp.mutex);
@@ -292,27 +299,27 @@ static void radeon_pm_print_states(struct radeon_device *rdev)
struct radeon_power_state *power_state;
struct radeon_pm_clock_info *clock_info;
- DRM_DEBUG("%d Power State(s)\n", rdev->pm.num_power_states);
+ DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
for (i = 0; i < rdev->pm.num_power_states; i++) {
power_state = &rdev->pm.power_state[i];
- DRM_DEBUG("State %d: %s\n", i,
+ DRM_DEBUG_DRIVER("State %d: %s\n", i,
radeon_pm_state_type_name[power_state->type]);
if (i == rdev->pm.default_power_state_index)
- DRM_DEBUG("\tDefault");
+ DRM_DEBUG_DRIVER("\tDefault");
if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
- DRM_DEBUG("\t%d PCIE Lanes\n", power_state->pcie_lanes);
+ DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
- DRM_DEBUG("\tSingle display only\n");
- DRM_DEBUG("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
+ DRM_DEBUG_DRIVER("\tSingle display only\n");
+ DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
for (j = 0; j < power_state->num_clock_modes; j++) {
clock_info = &(power_state->clock_info[j]);
if (rdev->flags & RADEON_IS_IGP)
- DRM_DEBUG("\t\t%d e: %d%s\n",
+ DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n",
j,
clock_info->sclk * 10,
clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
else
- DRM_DEBUG("\t\t%d e: %d\tm: %d\tv: %d%s\n",
+ DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n",
j,
clock_info->sclk * 10,
clock_info->mclk * 10,
@@ -424,6 +431,93 @@ fail:
static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
+static ssize_t radeon_hwmon_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+ u32 temp;
+
+ switch (rdev->pm.int_thermal_type) {
+ case THERMAL_TYPE_RV6XX:
+ temp = rv6xx_get_temp(rdev);
+ break;
+ case THERMAL_TYPE_RV770:
+ temp = rv770_get_temp(rdev);
+ break;
+ case THERMAL_TYPE_EVERGREEN:
+ temp = evergreen_get_temp(rdev);
+ break;
+ default:
+ temp = 0;
+ break;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
+static ssize_t radeon_hwmon_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "radeon\n");
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
+
+static struct attribute *hwmon_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_name.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group hwmon_attrgroup = {
+ .attrs = hwmon_attributes,
+};
+
+static int radeon_hwmon_init(struct radeon_device *rdev)
+{
+ int err = 0;
+
+ rdev->pm.int_hwmon_dev = NULL;
+
+ switch (rdev->pm.int_thermal_type) {
+ case THERMAL_TYPE_RV6XX:
+ case THERMAL_TYPE_RV770:
+ case THERMAL_TYPE_EVERGREEN:
+ rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
+ if (IS_ERR(rdev->pm.int_hwmon_dev)) {
+ err = PTR_ERR(rdev->pm.int_hwmon_dev);
+ dev_err(rdev->dev,
+ "Unable to register hwmon device: %d\n", err);
+ break;
+ }
+ dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
+ err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
+ &hwmon_attrgroup);
+ if (err) {
+ dev_err(rdev->dev,
+ "Unable to create hwmon sysfs file: %d\n", err);
+ hwmon_device_unregister(rdev->dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static void radeon_hwmon_fini(struct radeon_device *rdev)
+{
+ if (rdev->pm.int_hwmon_dev) {
+ sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
+ hwmon_device_unregister(rdev->pm.int_hwmon_dev);
+ }
+}
+
void radeon_pm_suspend(struct radeon_device *rdev)
{
bool flush_wq = false;
@@ -462,6 +556,7 @@ void radeon_pm_resume(struct radeon_device *rdev)
int radeon_pm_init(struct radeon_device *rdev)
{
int ret;
+
/* default to profile method */
rdev->pm.pm_method = PM_METHOD_PROFILE;
rdev->pm.profile = PM_PROFILE_DEFAULT;
@@ -471,6 +566,7 @@ int radeon_pm_init(struct radeon_device *rdev)
rdev->pm.dynpm_can_downclock = true;
rdev->pm.current_sclk = rdev->clock.default_sclk;
rdev->pm.current_mclk = rdev->clock.default_mclk;
+ rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
if (rdev->bios) {
if (rdev->is_atom_bios)
@@ -481,6 +577,10 @@ int radeon_pm_init(struct radeon_device *rdev)
radeon_pm_init_profile(rdev);
}
+ /* set up the internal thermal sensor if applicable */
+ ret = radeon_hwmon_init(rdev);
+ if (ret)
+ return ret;
if (rdev->pm.num_power_states > 1) {
/* where's the best place to put these? */
ret = device_create_file(rdev->dev, &dev_attr_power_profile);
@@ -536,8 +636,7 @@ void radeon_pm_fini(struct radeon_device *rdev)
#endif
}
- if (rdev->pm.i2c_bus)
- radeon_i2c_destroy(rdev->pm.i2c_bus);
+ radeon_hwmon_fini(rdev);
}
void radeon_pm_compute_clocks(struct radeon_device *rdev)
@@ -576,7 +675,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
- DRM_DEBUG("radeon: dynamic power management deactivated\n");
+ DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
}
} else if (rdev->pm.active_crtc_count == 1) {
/* TODO: Increase clocks if needed for current mode */
@@ -593,7 +692,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
- DRM_DEBUG("radeon: dynamic power management activated\n");
+ DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
}
} else { /* count == 0 */
if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
@@ -689,7 +788,7 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish
bool in_vbl = radeon_pm_in_vbl(rdev);
if (in_vbl == false)
- DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc,
+ DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
finish ? "exit" : "entry");
return in_vbl;
}
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index b3ba44c0a81..4ae5a3d1074 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -3228,34 +3228,34 @@ void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
}
struct drm_ioctl_desc radeon_ioctls[] = {
- DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
+ DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
};
int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e9918d88f5b..84c53e41a88 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -59,28 +59,28 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
/*
* Global memory.
*/
-static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
+static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
{
return ttm_mem_global_init(ref->object);
}
-static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
+static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
static int radeon_ttm_global_init(struct radeon_device *rdev)
{
- struct ttm_global_reference *global_ref;
+ struct drm_global_reference *global_ref;
int r;
rdev->mman.mem_global_referenced = false;
global_ref = &rdev->mman.mem_global_ref;
- global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &radeon_ttm_mem_global_init;
global_ref->release = &radeon_ttm_mem_global_release;
- r = ttm_global_item_ref(global_ref);
+ r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM memory accounting "
"subsystem.\n");
@@ -90,14 +90,14 @@ static int radeon_ttm_global_init(struct radeon_device *rdev)
rdev->mman.bo_global_ref.mem_glob =
rdev->mman.mem_global_ref.object;
global_ref = &rdev->mman.bo_global_ref.ref;
- global_ref->global_type = TTM_GLOBAL_TTM_BO;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
- r = ttm_global_item_ref(global_ref);
+ r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- ttm_global_item_unref(&rdev->mman.mem_global_ref);
+ drm_global_item_unref(&rdev->mman.mem_global_ref);
return r;
}
@@ -108,8 +108,8 @@ static int radeon_ttm_global_init(struct radeon_device *rdev)
static void radeon_ttm_global_fini(struct radeon_device *rdev)
{
if (rdev->mman.mem_global_referenced) {
- ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
- ttm_global_item_unref(&rdev->mman.mem_global_ref);
+ drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
+ drm_global_item_unref(&rdev->mman.mem_global_ref);
rdev->mman.mem_global_referenced = false;
}
}
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index 1e97b2d129f..b506ec1cab4 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -187,7 +187,6 @@ r300 0x4f60
0x4364 RS_INST_13
0x4368 RS_INST_14
0x436C RS_INST_15
-0x43A4 SC_HYPERZ_EN
0x43A8 SC_EDGERULE
0x43B0 SC_CLIP_0_A
0x43B4 SC_CLIP_0_B
@@ -716,16 +715,4 @@ r300 0x4f60
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
-0x4F1C ZB_BW_CNTL
-0x4F28 ZB_DEPTHCLEARVALUE
-0x4F30 ZB_ZMASK_OFFSET
-0x4F34 ZB_ZMASK_PITCH
-0x4F38 ZB_ZMASK_WRINDEX
-0x4F3C ZB_ZMASK_DWORD
-0x4F40 ZB_ZMASK_RDINDEX
-0x4F44 ZB_HIZ_OFFSET
-0x4F48 ZB_HIZ_WRINDEX
-0x4F4C ZB_HIZ_DWORD
-0x4F50 ZB_HIZ_RDINDEX
-0x4F54 ZB_HIZ_PITCH
0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index e958980d00f..8c1214c2390 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -130,6 +130,7 @@ r420 0x4f60
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
+0x4028 GB_Z_PEQ_CONFIG
0x4100 TX_INVALTAGS
0x4200 GA_POINT_S0
0x4204 GA_POINT_T0
@@ -187,7 +188,6 @@ r420 0x4f60
0x4364 RS_INST_13
0x4368 RS_INST_14
0x436C RS_INST_15
-0x43A4 SC_HYPERZ_EN
0x43A8 SC_EDGERULE
0x43B0 SC_CLIP_0_A
0x43B4 SC_CLIP_0_B
@@ -782,16 +782,4 @@ r420 0x4f60
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
-0x4F1C ZB_BW_CNTL
-0x4F28 ZB_DEPTHCLEARVALUE
-0x4F30 ZB_ZMASK_OFFSET
-0x4F34 ZB_ZMASK_PITCH
-0x4F38 ZB_ZMASK_WRINDEX
-0x4F3C ZB_ZMASK_DWORD
-0x4F40 ZB_ZMASK_RDINDEX
-0x4F44 ZB_HIZ_OFFSET
-0x4F48 ZB_HIZ_WRINDEX
-0x4F4C ZB_HIZ_DWORD
-0x4F50 ZB_HIZ_RDINDEX
-0x4F54 ZB_HIZ_PITCH
0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 83e8bc0c2bb..0828d80396f 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -187,7 +187,6 @@ rs600 0x6d40
0x4364 RS_INST_13
0x4368 RS_INST_14
0x436C RS_INST_15
-0x43A4 SC_HYPERZ_EN
0x43A8 SC_EDGERULE
0x43B0 SC_CLIP_0_A
0x43B4 SC_CLIP_0_B
@@ -782,16 +781,4 @@ rs600 0x6d40
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
-0x4F1C ZB_BW_CNTL
-0x4F28 ZB_DEPTHCLEARVALUE
-0x4F30 ZB_ZMASK_OFFSET
-0x4F34 ZB_ZMASK_PITCH
-0x4F38 ZB_ZMASK_WRINDEX
-0x4F3C ZB_ZMASK_DWORD
-0x4F40 ZB_ZMASK_RDINDEX
-0x4F44 ZB_HIZ_OFFSET
-0x4F48 ZB_HIZ_WRINDEX
-0x4F4C ZB_HIZ_DWORD
-0x4F50 ZB_HIZ_RDINDEX
-0x4F54 ZB_HIZ_PITCH
0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 1e46233985e..b3f9f1d9200 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -235,7 +235,6 @@ rv515 0x6d40
0x4354 RS_INST_13
0x4358 RS_INST_14
0x435C RS_INST_15
-0x43A4 SC_HYPERZ_EN
0x43A8 SC_EDGERULE
0x43B0 SC_CLIP_0_A
0x43B4 SC_CLIP_0_B
@@ -317,6 +316,7 @@ rv515 0x6d40
0x4BD0 FG_FOG_COLOR_B
0x4BD4 FG_ALPHA_FUNC
0x4BD8 FG_DEPTH_SRC
+0x4BE0 FG_ALPHA_VALUE
0x4C00 US_ALU_CONST_R_0
0x4C04 US_ALU_CONST_G_0
0x4C08 US_ALU_CONST_B_0
@@ -479,17 +479,5 @@ rv515 0x6d40
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
-0x4F1C ZB_BW_CNTL
-0x4F28 ZB_DEPTHCLEARVALUE
-0x4F30 ZB_ZMASK_OFFSET
-0x4F34 ZB_ZMASK_PITCH
-0x4F38 ZB_ZMASK_WRINDEX
-0x4F3C ZB_ZMASK_DWORD
-0x4F40 ZB_ZMASK_RDINDEX
-0x4F44 ZB_HIZ_OFFSET
-0x4F48 ZB_HIZ_WRINDEX
-0x4F4C ZB_HIZ_DWORD
-0x4F50 ZB_HIZ_RDINDEX
-0x4F54 ZB_HIZ_PITCH
0x4F58 ZB_ZPASS_DATA
0x4FD4 ZB_STENCILREFMASK_BF
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index f454c9a5e7f..ae2b76b9a38 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -55,14 +55,6 @@ void rs400_gart_adjust_size(struct radeon_device *rdev)
rdev->mc.gtt_size = 32 * 1024 * 1024;
return;
}
- if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
- /* FIXME: RS400 & RS480 seems to have issue with GART size
- * if 4G of system memory (needs more testing)
- */
- /* XXX is this still an issue with proper alignment? */
- rdev->mc.gtt_size = 32 * 1024 * 1024;
- DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n");
- }
}
void rs400_gart_tlb_flush(struct radeon_device *rdev)
@@ -483,6 +475,8 @@ int rs400_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6dc15ea8ba3..cc05b230d7e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -686,8 +686,8 @@ void rs600_mc_init(struct radeon_device *rdev)
{
u64 base;
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
@@ -696,7 +696,6 @@ void rs600_mc_init(struct radeon_device *rdev)
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
base = RREG32_MC(R_000004_MC_FB_LOCATION);
base = G_000004_MC_FB_START(base) << 16;
- rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = 0;
radeon_gtt_location(rdev, &rdev->mc);
@@ -813,6 +812,13 @@ static int rs600_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
return r;
}
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing audio\n");
+ return r;
+ }
+
return 0;
}
@@ -839,6 +845,7 @@ int rs600_resume(struct radeon_device *rdev)
int rs600_suspend(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
r100_cp_disable(rdev);
r100_wb_disable(rdev);
rs600_irq_disable(rdev);
@@ -848,6 +855,7 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -871,6 +879,8 @@ int rs600_init(struct radeon_device *rdev)
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index ce4ecbe1081..3e3f75718be 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -154,13 +154,13 @@ void rs690_mc_init(struct radeon_device *rdev)
rdev->mc.vram_width = 128;
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
- rs690_pm_info(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+ rs690_pm_info(rdev);
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
radeon_gtt_location(rdev, &rdev->mc);
@@ -398,7 +398,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
struct drm_display_mode *mode1 = NULL;
struct rs690_watermark wm0;
struct rs690_watermark wm1;
- u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
+ u32 tmp;
+ u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
+ u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
@@ -495,10 +497,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
}
- WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
- WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
- WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
- WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
if (dfixed_trunc(wm0.dbpp) > 64)
a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
@@ -528,13 +526,7 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
- WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
- WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
- WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
- S_006D48_D2MODE_PRIORITY_A_OFF(1));
- WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
- S_006D4C_D2MODE_PRIORITY_B_OFF(1));
- } else {
+ } else if (mode1) {
if (dfixed_trunc(wm1.dbpp) > 64)
a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
@@ -563,13 +555,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
- WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
- S_006548_D1MODE_PRIORITY_A_OFF(1));
- WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
- S_00654C_D1MODE_PRIORITY_B_OFF(1));
- WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
- WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
+
+ WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+ WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -641,6 +632,13 @@ static int rs690_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
return r;
}
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing audio\n");
+ return r;
+ }
+
return 0;
}
@@ -667,6 +665,7 @@ int rs690_resume(struct radeon_device *rdev)
int rs690_suspend(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
r100_cp_disable(rdev);
r100_wb_disable(rdev);
rs600_irq_disable(rdev);
@@ -676,6 +675,7 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -699,6 +699,8 @@ int rs690_init(struct radeon_device *rdev)
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
if (!radeon_get_bios(rdev)) {
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 0c9c169a685..4d6e86041a9 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -469,6 +469,8 @@ int rv515_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -925,7 +927,9 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
struct drm_display_mode *mode1 = NULL;
struct rv515_watermark wm0;
struct rv515_watermark wm1;
- u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
+ u32 tmp;
+ u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
+ u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
@@ -999,10 +1003,6 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
}
- WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
- WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
- WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
- WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
if (dfixed_trunc(wm0.dbpp) > 64)
a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
@@ -1032,11 +1032,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
- WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
- WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
- WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
- WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
- } else {
+ } else if (mode1) {
if (dfixed_trunc(wm1.dbpp) > 64)
a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
@@ -1065,11 +1061,12 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
- WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
- WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
- WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
- WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
+
+ WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+ WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
void rv515_bandwidth_update(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index b7fd8206492..bfa59db374d 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -42,6 +42,21 @@
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
+/* get temperature in millidegrees */
+u32 rv770_get_temp(struct radeon_device *rdev)
+{
+ u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+ ASIC_T_SHIFT;
+ u32 actual_temp = 0;
+
+ if ((temp >> 9) & 1)
+ actual_temp = 0;
+ else
+ actual_temp = (temp >> 1) & 0xff;
+
+ return actual_temp * 1000;
+}
+
void rv770_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -189,7 +204,10 @@ static void rv770_mc_program(struct radeon_device *rdev)
WREG32((0x2c20 + j), 0x00000000);
WREG32((0x2c24 + j), 0x00000000);
}
- WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+ /* r7xx hw bug. Read from HDP_DEBUG1 rather
+ * than writing to HDP_REG_COHERENCY_FLUSH_CNTL
+ */
+ tmp = RREG32(HDP_DEBUG1);
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
@@ -659,8 +677,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
r600_count_pipe_bits((cc_rb_backend_disable &
R7XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
- gb_tiling_config |= BACKEND_MAP(backend_map);
+ rdev->config.rv770.tile_config = gb_tiling_config;
+ gb_tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
@@ -886,6 +905,54 @@ static void rv770_gpu_init(struct radeon_device *rdev)
}
+static int rv770_vram_scratch_init(struct radeon_device *rdev)
+{
+ int r;
+ u64 gpu_addr;
+
+ if (rdev->vram_scratch.robj == NULL) {
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
+ true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->vram_scratch.robj);
+ if (r) {
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->vram_scratch.robj,
+ RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->vram_scratch.robj,
+ (void **)&rdev->vram_scratch.ptr);
+ if (r)
+ radeon_bo_unpin(rdev->vram_scratch.robj);
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+
+ return r;
+}
+
+static void rv770_vram_scratch_fini(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->vram_scratch.robj == NULL) {
+ return;
+ }
+ r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->vram_scratch.robj);
+ radeon_bo_unpin(rdev->vram_scratch.robj);
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+ }
+ radeon_bo_unref(&rdev->vram_scratch.robj);
+}
+
int rv770_mc_init(struct radeon_device *rdev)
{
u32 tmp;
@@ -919,8 +986,8 @@ int rv770_mc_init(struct radeon_device *rdev)
}
rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
@@ -951,6 +1018,9 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
}
+ r = rv770_vram_scratch_init(rdev);
+ if (r)
+ return r;
rv770_gpu_init(rdev);
r = r600_blit_init(rdev);
if (r) {
@@ -1004,11 +1074,6 @@ int rv770_resume(struct radeon_device *rdev)
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- /* Initialize clocks */
- r = radeon_clocks_init(rdev);
- if (r) {
- return r;
- }
r = rv770_startup(rdev);
if (r) {
@@ -1099,9 +1164,6 @@ int rv770_init(struct radeon_device *rdev)
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- r = radeon_clocks_init(rdev);
- if (r)
- return r;
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -1176,9 +1238,9 @@ void rv770_fini(struct radeon_device *rdev)
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
+ rv770_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_clocks_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9506f8cb99e..b7a5a20e81d 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -122,12 +122,18 @@
#define GUI_ACTIVE (1<<31)
#define GRBM_STATUS2 0x8014
+#define CG_MULT_THERMAL_STATUS 0x740
+#define ASIC_T(x) ((x) << 16)
+#define ASIC_T_MASK 0x3FF0000
+#define ASIC_T_SHIFT 16
+
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
#define HDP_TILING_CONFIG 0x2F3C
+#define HDP_DEBUG1 0x2F34
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index fa05cda8c98..bf5f83ea14f 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -573,13 +573,13 @@ int savage_driver_firstopen(struct drm_device *dev)
dev_priv->mtrr[2].handle = -1;
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
fb_rsrc = 0;
- fb_base = drm_get_resource_start(dev, 0);
+ fb_base = pci_resource_start(dev->pdev, 0);
fb_size = SAVAGE_FB_SIZE_S3;
mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
aper_rsrc = 0;
aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
/* this should always be true */
- if (drm_get_resource_len(dev, 0) == 0x08000000) {
+ if (pci_resource_len(dev->pdev, 0) == 0x08000000) {
/* Don't make MMIO write-cobining! We need 3
* MTRRs. */
dev_priv->mtrr[0].base = fb_base;
@@ -599,18 +599,19 @@ int savage_driver_firstopen(struct drm_device *dev)
dev_priv->mtrr[2].size, DRM_MTRR_WC);
} else {
DRM_ERROR("strange pci_resource_len %08llx\n",
- (unsigned long long)drm_get_resource_len(dev, 0));
+ (unsigned long long)
+ pci_resource_len(dev->pdev, 0));
}
} else if (dev_priv->chipset != S3_SUPERSAVAGE &&
dev_priv->chipset != S3_SAVAGE2000) {
- mmio_base = drm_get_resource_start(dev, 0);
+ mmio_base = pci_resource_start(dev->pdev, 0);
fb_rsrc = 1;
- fb_base = drm_get_resource_start(dev, 1);
+ fb_base = pci_resource_start(dev->pdev, 1);
fb_size = SAVAGE_FB_SIZE_S4;
aper_rsrc = 1;
aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
/* this should always be true */
- if (drm_get_resource_len(dev, 1) == 0x08000000) {
+ if (pci_resource_len(dev->pdev, 1) == 0x08000000) {
/* Can use one MTRR to cover both fb and
* aperture. */
dev_priv->mtrr[0].base = fb_base;
@@ -620,15 +621,16 @@ int savage_driver_firstopen(struct drm_device *dev)
dev_priv->mtrr[0].size, DRM_MTRR_WC);
} else {
DRM_ERROR("strange pci_resource_len %08llx\n",
- (unsigned long long)drm_get_resource_len(dev, 1));
+ (unsigned long long)
+ pci_resource_len(dev->pdev, 1));
}
} else {
- mmio_base = drm_get_resource_start(dev, 0);
+ mmio_base = pci_resource_start(dev->pdev, 0);
fb_rsrc = 1;
- fb_base = drm_get_resource_start(dev, 1);
- fb_size = drm_get_resource_len(dev, 1);
+ fb_base = pci_resource_start(dev->pdev, 1);
+ fb_size = pci_resource_len(dev->pdev, 1);
aper_rsrc = 2;
- aperture_base = drm_get_resource_start(dev, 2);
+ aperture_base = pci_resource_start(dev->pdev, 2);
/* Automatic MTRR setup will do the right thing. */
}
@@ -1080,10 +1082,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
}
struct drm_ioctl_desc savage_ioctls[] = {
- DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
};
int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 4fd1f067d38..776bf9e9ea1 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -47,9 +47,8 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
- if (ret) {
+ if (ret)
kfree(dev_priv);
- }
return ret;
}
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index af22111397d..7fe2b63412c 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -78,7 +78,7 @@ static unsigned long sis_sman_mm_offset(void *private, void *ref)
#else /* CONFIG_FB_SIS[_MODULE] */
#define SIS_MM_ALIGN_SHIFT 4
-#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1)
+#define SIS_MM_ALIGN_MASK ((1 << SIS_MM_ALIGN_SHIFT) - 1)
#endif /* CONFIG_FB_SIS[_MODULE] */
@@ -225,9 +225,8 @@ static drm_local_map_t *sis_reg_init(struct drm_device *dev)
map = entry->map;
if (!map)
continue;
- if (map->type == _DRM_REGISTERS) {
+ if (map->type == _DRM_REGISTERS)
return map;
- }
}
return NULL;
}
@@ -264,10 +263,10 @@ int sis_idle(struct drm_device *dev)
end = jiffies + (DRM_HZ * 3);
- for (i=0; i<4; ++i) {
+ for (i = 0; i < 4; ++i) {
do {
idle_reg = SIS_READ(0x85cc);
- } while ( !time_after_eq(jiffies, end) &&
+ } while (!time_after_eq(jiffies, end) &&
((idle_reg & 0x80000000) != 0x80000000));
}
@@ -301,7 +300,7 @@ void sis_lastclose(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
}
-void sis_reclaim_buffers_locked(struct drm_device * dev,
+void sis_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
@@ -312,9 +311,8 @@ void sis_reclaim_buffers_locked(struct drm_device * dev,
return;
}
- if (dev->driver->dma_quiescent) {
+ if (dev->driver->dma_quiescent)
dev->driver->dma_quiescent(dev);
- }
drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
mutex_unlock(&dev->struct_mutex);
@@ -322,12 +320,12 @@ void sis_reclaim_buffers_locked(struct drm_device * dev,
}
struct drm_ioctl_desc sis_ioctls[] = {
- DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
};
int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 4256e200647..b256d4adfaf 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -3,7 +3,7 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
- ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
+ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 555ebb12ace..cb4cf7ef4d1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -476,7 +476,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
++put_count;
}
if (bo->mem.mm_node) {
- bo->mem.mm_node->private = NULL;
drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL;
}
@@ -670,7 +669,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
spin_lock(&glob->lru_lock);
if (evict_mem.mm_node) {
- evict_mem.mm_node->private = NULL;
drm_mm_put_block(evict_mem.mm_node);
evict_mem.mm_node = NULL;
}
@@ -929,8 +927,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = node;
mem->mem_type = mem_type;
mem->placement = cur_flags;
- if (node)
- node->private = bo;
return 0;
}
@@ -973,7 +969,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
interruptible, no_wait_reserve, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
- mem->mm_node->private = bo;
return 0;
}
if (ret == -ERESTARTSYS)
@@ -1029,7 +1024,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
- mem.mm_node->private = NULL;
drm_mm_put_block(mem.mm_node);
spin_unlock(&glob->lru_lock);
}
@@ -1401,7 +1395,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
kfree(glob);
}
-void ttm_bo_global_release(struct ttm_global_reference *ref)
+void ttm_bo_global_release(struct drm_global_reference *ref)
{
struct ttm_bo_global *glob = ref->object;
@@ -1410,7 +1404,7 @@ void ttm_bo_global_release(struct ttm_global_reference *ref)
}
EXPORT_SYMBOL(ttm_bo_global_release);
-int ttm_bo_global_init(struct ttm_global_reference *ref)
+int ttm_bo_global_init(struct drm_global_reference *ref)
{
struct ttm_bo_global_ref *bo_ref =
container_of(ref, struct ttm_bo_global_ref, ref);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 13012a1f148..3451a82adba 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -351,10 +351,9 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
fbo->vm_node = NULL;
+ atomic_set(&fbo->cpu_writers, 0);
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
- if (fbo->mem.mm_node)
- fbo->mem.mm_node->private = (void *)fbo;
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 9a6edbfeaa9..902d7cf9fb4 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -70,8 +70,6 @@ static int __init ttm_init(void)
if (unlikely(ret != 0))
return ret;
- ttm_global_init();
-
atomic_set(&device_released, 0);
ret = drm_class_device_register(&ttm_drm_class_device);
if (unlikely(ret != 0))
@@ -81,7 +79,6 @@ static int __init ttm_init(void)
out_no_dev_reg:
atomic_set(&device_released, 1);
wake_up_all(&exit_q);
- ttm_global_release();
return ret;
}
@@ -95,7 +92,6 @@ static void __exit ttm_exit(void)
*/
wait_event(exit_q, atomic_read(&device_released) == 1);
- ttm_global_release();
}
module_init(ttm_init);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index ca904799f01..b1e02fffd3c 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -69,7 +69,7 @@ struct ttm_page_pool {
spinlock_t lock;
bool fill_lock;
struct list_head list;
- int gfp_flags;
+ gfp_t gfp_flags;
unsigned npages;
char *name;
unsigned long nfrees;
@@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
* This function is reentrant if caller updates count depending on number of
* pages returned in pages array.
*/
-static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
{
struct page **caching_array;
@@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p = NULL;
- int gfp_flags = GFP_USER;
+ gfp_t gfp_flags = GFP_USER;
int r;
/* set zero flag for page allocation if required */
@@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
return 0;
}
-void ttm_page_alloc_fini()
+void ttm_page_alloc_fini(void)
{
int i;
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index bfb92d28326..cc0ffa9abd0 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -58,28 +58,29 @@
*((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
*((uint32_t *)(vb) + 1) = (nData); \
vb = ((uint32_t *)vb) + 2; \
- dev_priv->dma_low +=8; \
+ dev_priv->dma_low += 8; \
}
#define via_flush_write_combine() DRM_MEMORYBARRIER()
-#define VIA_OUT_RING_QW(w1,w2) \
+#define VIA_OUT_RING_QW(w1, w2) do { \
*vb++ = (w1); \
*vb++ = (w2); \
- dev_priv->dma_low += 8;
+ dev_priv->dma_low += 8; \
+} while (0)
-static void via_cmdbuf_start(drm_via_private_t * dev_priv);
-static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
-static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
-static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
-static int via_wait_idle(drm_via_private_t * dev_priv);
-static void via_pad_cache(drm_via_private_t * dev_priv, int qwords);
+static void via_cmdbuf_start(drm_via_private_t *dev_priv);
+static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
+static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
+static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
+static int via_wait_idle(drm_via_private_t *dev_priv);
+static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
/*
* Free space in command buffer.
*/
-static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv)
+static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
{
uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
@@ -93,7 +94,7 @@ static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv)
* How much does the command regulator lag behind?
*/
-static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv)
+static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
{
uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
@@ -108,7 +109,7 @@ static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv)
*/
static inline int
-via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
+via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size)
{
uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
uint32_t cur_addr, hw_addr, next_addr;
@@ -146,14 +147,13 @@ static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
dev_priv->dma_high) {
via_cmdbuf_rewind(dev_priv);
}
- if (via_cmdbuf_wait(dev_priv, size) != 0) {
+ if (via_cmdbuf_wait(dev_priv, size) != 0)
return NULL;
- }
return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
-int via_dma_cleanup(struct drm_device * dev)
+int via_dma_cleanup(struct drm_device *dev)
{
if (dev->dev_private) {
drm_via_private_t *dev_priv =
@@ -171,9 +171,9 @@ int via_dma_cleanup(struct drm_device * dev)
return 0;
}
-static int via_initialize(struct drm_device * dev,
- drm_via_private_t * dev_priv,
- drm_via_dma_init_t * init)
+static int via_initialize(struct drm_device *dev,
+ drm_via_private_t *dev_priv,
+ drm_via_dma_init_t *init)
{
if (!dev_priv || !dev_priv->mmio) {
DRM_ERROR("via_dma_init called before via_map_init\n");
@@ -258,7 +258,7 @@ static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *fil
return retcode;
}
-static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd)
+static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd)
{
drm_via_private_t *dev_priv;
uint32_t *vb;
@@ -271,9 +271,8 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
return -EFAULT;
}
- if (cmd->size > VIA_PCI_BUF_SIZE) {
+ if (cmd->size > VIA_PCI_BUF_SIZE)
return -ENOMEM;
- }
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
return -EFAULT;
@@ -291,9 +290,8 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
}
vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
- if (vb == NULL) {
+ if (vb == NULL)
return -EAGAIN;
- }
memcpy(vb, dev_priv->pci_buf, cmd->size);
@@ -311,13 +309,12 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
return 0;
}
-int via_driver_dma_quiescent(struct drm_device * dev)
+int via_driver_dma_quiescent(struct drm_device *dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
- if (!via_wait_idle(dev_priv)) {
+ if (!via_wait_idle(dev_priv))
return -EBUSY;
- }
return 0;
}
@@ -339,22 +336,17 @@ static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *fi
DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
ret = via_dispatch_cmdbuffer(dev, cmdbuf);
- if (ret) {
- return ret;
- }
-
- return 0;
+ return ret;
}
-static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
- drm_via_cmdbuffer_t * cmd)
+static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
+ drm_via_cmdbuffer_t *cmd)
{
drm_via_private_t *dev_priv = dev->dev_private;
int ret;
- if (cmd->size > VIA_PCI_BUF_SIZE) {
+ if (cmd->size > VIA_PCI_BUF_SIZE)
return -ENOMEM;
- }
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
return -EFAULT;
@@ -380,19 +372,14 @@ static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file
DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
- if (ret) {
- return ret;
- }
-
- return 0;
+ return ret;
}
-static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
+static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv,
uint32_t * vb, int qw_count)
{
- for (; qw_count > 0; --qw_count) {
+ for (; qw_count > 0; --qw_count)
VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
- }
return vb;
}
@@ -401,7 +388,7 @@ static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
*
* Returns virtual pointer to ring buffer.
*/
-static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
+static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv)
{
return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
@@ -411,18 +398,18 @@ static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
* modifying the pause address stored in the buffer itself. If
* the regulator has already paused, restart it.
*/
-static int via_hook_segment(drm_via_private_t * dev_priv,
+static int via_hook_segment(drm_via_private_t *dev_priv,
uint32_t pause_addr_hi, uint32_t pause_addr_lo,
int no_pci_fire)
{
int paused, count;
volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
- uint32_t reader,ptr;
+ uint32_t reader, ptr;
uint32_t diff;
paused = 0;
via_flush_write_combine();
- (void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
+ (void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
*paused_at = pause_addr_lo;
via_flush_write_combine();
@@ -435,7 +422,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
/*
- * If there is a possibility that the command reader will
+ * If there is a possibility that the command reader will
* miss the new pause address and pause on the old one,
* In that case we need to program the new start address
* using PCI.
@@ -443,9 +430,9 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
count = 10000000;
- while(diff == 0 && count--) {
+ while (diff == 0 && count--) {
paused = (VIA_READ(0x41c) & 0x80000000);
- if (paused)
+ if (paused)
break;
reader = *(dev_priv->hw_addr_ptr);
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
@@ -477,7 +464,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
return paused;
}
-static int via_wait_idle(drm_via_private_t * dev_priv)
+static int via_wait_idle(drm_via_private_t *dev_priv)
{
int count = 10000000;
@@ -491,9 +478,9 @@ static int via_wait_idle(drm_via_private_t * dev_priv)
return count;
}
-static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
- uint32_t addr, uint32_t * cmd_addr_hi,
- uint32_t * cmd_addr_lo, int skip_wait)
+static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
+ uint32_t addr, uint32_t *cmd_addr_hi,
+ uint32_t *cmd_addr_lo, int skip_wait)
{
uint32_t agp_base;
uint32_t cmd_addr, addr_lo, addr_hi;
@@ -521,7 +508,7 @@ static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
return vb;
}
-static void via_cmdbuf_start(drm_via_private_t * dev_priv)
+static void via_cmdbuf_start(drm_via_private_t *dev_priv)
{
uint32_t pause_addr_lo, pause_addr_hi;
uint32_t start_addr, start_addr_lo;
@@ -580,7 +567,7 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)
dev_priv->dma_diff = ptr - reader;
}
-static void via_pad_cache(drm_via_private_t * dev_priv, int qwords)
+static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
{
uint32_t *vb;
@@ -590,7 +577,7 @@ static void via_pad_cache(drm_via_private_t * dev_priv, int qwords)
via_align_buffer(dev_priv, vb, qwords);
}
-static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
+static inline void via_dummy_bitblt(drm_via_private_t *dev_priv)
{
uint32_t *vb = via_get_dma(dev_priv);
SetReg2DAGP(0x0C, (0 | (0 << 16)));
@@ -598,7 +585,7 @@ static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
}
-static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
+static void via_cmdbuf_jump(drm_via_private_t *dev_priv)
{
uint32_t agp_base;
uint32_t pause_addr_lo, pause_addr_hi;
@@ -617,9 +604,8 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
*/
dev_priv->dma_low = 0;
- if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
+ if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0)
DRM_ERROR("via_cmdbuf_jump failed\n");
- }
via_dummy_bitblt(dev_priv);
via_dummy_bitblt(dev_priv);
@@ -657,12 +643,12 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
}
-static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
+static void via_cmdbuf_rewind(drm_via_private_t *dev_priv)
{
via_cmdbuf_jump(dev_priv);
}
-static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
+static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type)
{
uint32_t pause_addr_lo, pause_addr_hi;
@@ -670,12 +656,12 @@ static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
}
-static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
+static void via_cmdbuf_pause(drm_via_private_t *dev_priv)
{
via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
}
-static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
+static void via_cmdbuf_reset(drm_via_private_t *dev_priv)
{
via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
via_wait_idle(dev_priv);
@@ -708,9 +694,8 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
case VIA_CMDBUF_SPACE:
while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
&& --count) {
- if (!d_siz->wait) {
+ if (!d_siz->wait)
break;
- }
}
if (!count) {
DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
@@ -720,9 +705,8 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
case VIA_CMDBUF_LAG:
while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
&& --count) {
- if (!d_siz->wait) {
+ if (!d_siz->wait)
break;
- }
}
if (!count) {
DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
@@ -738,20 +722,20 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
}
struct drm_ioctl_desc via_ioctls[] = {
- DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
+ DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
};
int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 4c54f043068..9b5b4d9dd62 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -70,7 +70,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
descriptor_this_page;
dma_addr_t next = vsg->chain_start;
- while(num_desc--) {
+ while (num_desc--) {
if (descriptor_this_page-- == 0) {
cur_descriptor_page--;
descriptor_this_page = vsg->descriptors_per_page - 1;
@@ -174,19 +174,19 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
struct page *page;
int i;
- switch(vsg->state) {
+ switch (vsg->state) {
case dr_via_device_mapped:
via_unmap_blit_from_device(pdev, vsg);
case dr_via_desc_pages_alloc:
- for (i=0; i<vsg->num_desc_pages; ++i) {
+ for (i = 0; i < vsg->num_desc_pages; ++i) {
if (vsg->desc_pages[i] != NULL)
- free_page((unsigned long)vsg->desc_pages[i]);
+ free_page((unsigned long)vsg->desc_pages[i]);
}
kfree(vsg->desc_pages);
case dr_via_pages_locked:
- for (i=0; i<vsg->num_pages; ++i) {
- if ( NULL != (page = vsg->pages[i])) {
- if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
+ for (i = 0; i < vsg->num_pages; ++i) {
+ if (NULL != (page = vsg->pages[i])) {
+ if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
SetPageDirty(page);
page_cache_release(page);
}
@@ -232,7 +232,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int ret;
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
- vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
+ vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
first_pfn + 1;
if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
@@ -268,7 +268,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
{
int i;
- vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
+ vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
vsg->descriptors_per_page;
@@ -276,7 +276,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
return -ENOMEM;
vsg->state = dr_via_desc_pages_alloc;
- for (i=0; i<vsg->num_desc_pages; ++i) {
+ for (i = 0; i < vsg->num_desc_pages; ++i) {
if (NULL == (vsg->desc_pages[i] =
(drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
return -ENOMEM;
@@ -318,21 +318,20 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
int cur;
int done_transfer;
- unsigned long irqsave=0;
+ unsigned long irqsave = 0;
uint32_t status = 0;
DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
engine, from_irq, (unsigned long) blitq);
- if (from_irq) {
+ if (from_irq)
spin_lock(&blitq->blit_lock);
- } else {
+ else
spin_lock_irqsave(&blitq->blit_lock, irqsave);
- }
done_transfer = blitq->is_active &&
- (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
- done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
+ ((status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
+ done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
cur = blitq->cur;
if (done_transfer) {
@@ -377,18 +376,16 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
if (!timer_pending(&blitq->poll_timer))
mod_timer(&blitq->poll_timer, jiffies + 1);
} else {
- if (timer_pending(&blitq->poll_timer)) {
+ if (timer_pending(&blitq->poll_timer))
del_timer(&blitq->poll_timer);
- }
via_dmablit_engine_off(dev, engine);
}
}
- if (from_irq) {
+ if (from_irq)
spin_unlock(&blitq->blit_lock);
- } else {
+ else
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- }
}
@@ -414,10 +411,9 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
((blitq->cur_blit_handle - handle) <= (1 << 23));
if (queue && active) {
- slot = handle - blitq->done_blit_handle + blitq->cur -1;
- if (slot >= VIA_NUM_BLIT_SLOTS) {
+ slot = handle - blitq->done_blit_handle + blitq->cur - 1;
+ if (slot >= VIA_NUM_BLIT_SLOTS)
slot -= VIA_NUM_BLIT_SLOTS;
- }
*queue = blitq->blit_queue + slot;
}
@@ -506,12 +502,12 @@ via_dmablit_workqueue(struct work_struct *work)
int cur_released;
- DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
+ DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
spin_lock_irqsave(&blitq->blit_lock, irqsave);
- while(blitq->serviced != blitq->cur) {
+ while (blitq->serviced != blitq->cur) {
cur_released = blitq->serviced++;
@@ -545,13 +541,13 @@ via_dmablit_workqueue(struct work_struct *work)
void
via_init_dmablit(struct drm_device *dev)
{
- int i,j;
+ int i, j;
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_blitq_t *blitq;
pci_set_master(dev->pdev);
- for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
+ for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
blitq = dev_priv->blit_queues + i;
blitq->dev = dev;
blitq->cur_blit_handle = 0;
@@ -564,9 +560,8 @@ via_init_dmablit(struct drm_device *dev)
blitq->is_active = 0;
blitq->aborting = 0;
spin_lock_init(&blitq->blit_lock);
- for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
+ for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
- }
DRM_INIT_WAITQUEUE(&blitq->busy_queue);
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
setup_timer(&blitq->poll_timer, via_dmablit_timer,
@@ -685,18 +680,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
static int
via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
{
- int ret=0;
+ int ret = 0;
unsigned long irqsave;
DRM_DEBUG("Num free is %d\n", blitq->num_free);
spin_lock_irqsave(&blitq->blit_lock, irqsave);
- while(blitq->num_free == 0) {
+ while (blitq->num_free == 0) {
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
- if (ret) {
+ if (ret)
return (-EINTR == ret) ? -EAGAIN : ret;
- }
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
@@ -719,7 +713,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
blitq->num_free++;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- DRM_WAKEUP( &blitq->busy_queue );
+ DRM_WAKEUP(&blitq->busy_queue);
}
/*
@@ -744,9 +738,8 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
engine = (xfer->to_fb) ? 0 : 1;
blitq = dev_priv->blit_queues + engine;
- if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
+ if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
return ret;
- }
if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
via_dmablit_release_slot(blitq);
return -ENOMEM;
@@ -780,7 +773,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
*/
int
-via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv )
+via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_blitsync_t *sync = data;
int err;
@@ -804,7 +797,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
*/
int
-via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
+via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_dmablit_t *xfer = data;
int err;
diff --git a/drivers/gpu/drm/via/via_dmablit.h b/drivers/gpu/drm/via/via_dmablit.h
index 7408a547a03..9b662a327ce 100644
--- a/drivers/gpu/drm/via/via_dmablit.h
+++ b/drivers/gpu/drm/via/via_dmablit.h
@@ -45,12 +45,12 @@ typedef struct _drm_via_sg_info {
int num_desc;
enum dma_data_direction direction;
unsigned char *bounce_buffer;
- dma_addr_t chain_start;
+ dma_addr_t chain_start;
uint32_t free_on_sequence;
- unsigned int descriptors_per_page;
+ unsigned int descriptors_per_page;
int aborted;
enum {
- dr_via_device_mapped,
+ dr_via_device_mapped,
dr_via_desc_pages_alloc,
dr_via_pages_locked,
dr_via_pages_alloc,
@@ -68,7 +68,7 @@ typedef struct _drm_via_blitq {
unsigned num_free;
unsigned num_outstanding;
unsigned long end;
- int aborting;
+ int aborting;
int is_active;
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
spinlock_t blit_lock;
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index cafcb844a22..9cf87d91232 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -107,9 +107,9 @@ enum via_family {
#define VIA_BASE ((dev_priv->mmio))
#define VIA_READ(reg) DRM_READ32(VIA_BASE, reg)
-#define VIA_WRITE(reg,val) DRM_WRITE32(VIA_BASE, reg, val)
+#define VIA_WRITE(reg, val) DRM_WRITE32(VIA_BASE, reg, val)
#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
-#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
+#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val)
extern struct drm_ioctl_desc via_ioctls[];
extern int via_max_ioctl;
@@ -121,28 +121,28 @@ extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *fil
extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv );
-extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv );
+extern int via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
extern int via_driver_unload(struct drm_device *dev);
-extern int via_init_context(struct drm_device * dev, int context);
-extern int via_final_context(struct drm_device * dev, int context);
+extern int via_init_context(struct drm_device *dev, int context);
+extern int via_final_context(struct drm_device *dev, int context);
-extern int via_do_cleanup_map(struct drm_device * dev);
+extern int via_do_cleanup_map(struct drm_device *dev);
extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
extern int via_enable_vblank(struct drm_device *dev, int crtc);
extern void via_disable_vblank(struct drm_device *dev, int crtc);
extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
-extern void via_driver_irq_preinstall(struct drm_device * dev);
+extern void via_driver_irq_preinstall(struct drm_device *dev);
extern int via_driver_irq_postinstall(struct drm_device *dev);
-extern void via_driver_irq_uninstall(struct drm_device * dev);
+extern void via_driver_irq_uninstall(struct drm_device *dev);
-extern int via_dma_cleanup(struct drm_device * dev);
+extern int via_dma_cleanup(struct drm_device *dev);
extern void via_init_command_verifier(void);
-extern int via_driver_dma_quiescent(struct drm_device * dev);
+extern int via_driver_dma_quiescent(struct drm_device *dev);
extern void via_init_futex(drm_via_private_t *dev_priv);
extern void via_cleanup_futex(drm_via_private_t *dev_priv);
extern void via_release_futex(drm_via_private_t *dev_priv, int context);
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index 34079f251cd..d391f48ef87 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -141,11 +141,10 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
atomic_inc(&cur_irq->irq_received);
DRM_WAKEUP(&cur_irq->irq_queue);
handled = 1;
- if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
+ if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
via_dmablit_handler(dev, 0, 1);
- } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
+ else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
via_dmablit_handler(dev, 1, 1);
- }
}
cur_irq++;
}
@@ -160,7 +159,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
}
-static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
+static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
{
u32 status;
@@ -207,7 +206,7 @@ void via_disable_vblank(struct drm_device *dev, int crtc)
}
static int
-via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence,
+via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
unsigned int *sequence)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -260,7 +259,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
* drm_dma.h hooks
*/
-void via_driver_irq_preinstall(struct drm_device * dev)
+void via_driver_irq_preinstall(struct drm_device *dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
@@ -329,7 +328,7 @@ int via_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-void via_driver_irq_uninstall(struct drm_device * dev)
+void via_driver_irq_uninstall(struct drm_device *dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 6e6f9159163..6cca9a709f7 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -25,7 +25,7 @@
#include "via_drm.h"
#include "via_drv.h"
-static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
+static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
{
drm_via_private_t *dev_priv = dev->dev_private;
@@ -68,7 +68,7 @@ static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
return 0;
}
-int via_do_cleanup_map(struct drm_device * dev)
+int via_do_cleanup_map(struct drm_device *dev)
{
via_dma_cleanup(dev);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index f694cb5eded..6cc2dadae3e 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -31,7 +31,7 @@
#include "drm_sman.h"
#define VIA_MM_ALIGN_SHIFT 4
-#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1)
+#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
@@ -172,7 +172,7 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
-void via_reclaim_buffers_locked(struct drm_device * dev,
+void via_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv)
{
drm_via_private_t *dev_priv = dev->dev_private;
@@ -183,9 +183,8 @@ void via_reclaim_buffers_locked(struct drm_device * dev,
return;
}
- if (dev->driver->dma_quiescent) {
+ if (dev->driver->dma_quiescent)
dev->driver->dma_quiescent(dev);
- }
drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
index 46a57919874..48957b856d4 100644
--- a/drivers/gpu/drm/via/via_verifier.c
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -235,7 +235,7 @@ static hazard_t table2[256];
static hazard_t table3[256];
static __inline__ int
-eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
+eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words)
{
if ((buf_end - *buf) >= num_words) {
*buf += num_words;
@@ -252,7 +252,7 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
unsigned long offset,
unsigned long size,
- struct drm_device * dev)
+ struct drm_device *dev)
{
struct drm_map_list *r_list;
drm_local_map_t *map = seq->map_cache;
@@ -344,7 +344,7 @@ static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
}
static __inline__ int
-investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
+investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq)
{
register uint32_t tmp, *tmp_addr;
@@ -518,7 +518,7 @@ investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
static __inline__ int
via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
- drm_via_state_t * cur_seq)
+ drm_via_state_t *cur_seq)
{
drm_via_private_t *dev_priv =
(drm_via_private_t *) cur_seq->dev->dev_private;
@@ -621,8 +621,8 @@ via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
}
static __inline__ verifier_state_t
-via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
- drm_via_state_t * hc_state)
+via_check_header2(uint32_t const **buffer, const uint32_t *buf_end,
+ drm_via_state_t *hc_state)
{
uint32_t cmd;
int hz_mode;
@@ -706,16 +706,15 @@ via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
return state_error;
}
}
- if (hc_state->unfinished && finish_current_sequence(hc_state)) {
+ if (hc_state->unfinished && finish_current_sequence(hc_state))
return state_error;
- }
*buffer = buf;
return state_command;
}
static __inline__ verifier_state_t
-via_parse_header2(drm_via_private_t * dev_priv, uint32_t const **buffer,
- const uint32_t * buf_end, int *fire_count)
+via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end, int *fire_count)
{
uint32_t cmd;
const uint32_t *buf = *buffer;
@@ -833,8 +832,8 @@ via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
}
static __inline__ verifier_state_t
-via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
- const uint32_t * buf_end)
+via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end)
{
register uint32_t cmd;
const uint32_t *buf = *buffer;
@@ -851,7 +850,7 @@ via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
}
static __inline__ verifier_state_t
-via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
+via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
{
uint32_t data;
const uint32_t *buf = *buffer;
@@ -884,8 +883,8 @@ via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
}
static __inline__ verifier_state_t
-via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
- const uint32_t * buf_end)
+via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end)
{
uint32_t addr, count, i;
const uint32_t *buf = *buffer;
@@ -893,9 +892,8 @@ via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
addr = *buf++ & ~VIA_VIDEOMASK;
i = count = *buf;
buf += 3;
- while (i--) {
+ while (i--)
VIA_WRITE(addr, *buf++);
- }
if (count & 3)
buf += 4 - (count & 3);
*buffer = buf;
@@ -940,8 +938,8 @@ via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
}
static __inline__ verifier_state_t
-via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer,
- const uint32_t * buf_end)
+via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end)
{
uint32_t addr, count, i;
@@ -1037,7 +1035,7 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
}
int
-via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
+via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
unsigned int size)
{
@@ -1085,9 +1083,8 @@ via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
return -EINVAL;
}
}
- if (state == state_error) {
+ if (state == state_error)
return -EINVAL;
- }
return 0;
}
@@ -1096,13 +1093,11 @@ setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
{
int i;
- for (i = 0; i < 256; ++i) {
+ for (i = 0; i < 256; ++i)
table[i] = forbidden_command;
- }
- for (i = 0; i < size; ++i) {
+ for (i = 0; i < size; ++i)
table[init_table[i].code] = init_table[i].hz;
- }
}
void via_init_command_verifier(void)
diff --git a/drivers/gpu/drm/via/via_verifier.h b/drivers/gpu/drm/via/via_verifier.h
index d6f8214b69f..26b6d361ab9 100644
--- a/drivers/gpu/drm/via/via_verifier.h
+++ b/drivers/gpu/drm/via/via_verifier.h
@@ -54,8 +54,8 @@ typedef struct {
const uint32_t *buf_start;
} drm_via_state_t;
-extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
- struct drm_device * dev, int agp);
+extern int via_verify_command_stream(const uint32_t *buf, unsigned int size,
+ struct drm_device *dev, int agp);
extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
unsigned int size);
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
index 6efac8117c9..675d311f038 100644
--- a/drivers/gpu/drm/via/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
@@ -29,7 +29,7 @@
#include "via_drm.h"
#include "via_drv.h"
-void via_init_futex(drm_via_private_t * dev_priv)
+void via_init_futex(drm_via_private_t *dev_priv)
{
unsigned int i;
@@ -41,11 +41,11 @@ void via_init_futex(drm_via_private_t * dev_priv)
}
}
-void via_cleanup_futex(drm_via_private_t * dev_priv)
+void via_cleanup_futex(drm_via_private_t *dev_priv)
{
}
-void via_release_futex(drm_via_private_t * dev_priv, int context)
+void via_release_futex(drm_via_private_t *dev_priv, int context)
{
unsigned int i;
volatile int *lock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index b793c8c9acb..a96ed6d9d01 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -99,47 +99,47 @@
*/
#define VMW_IOCTL_DEF(ioctl, func, flags) \
- [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
/**
* Ioctl definitions.
*/
static struct drm_ioctl_desc vmw_ioctls[] = {
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
+ VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+ VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
+ VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
+ VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
+ VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
+ VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
+ VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
+ VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
+ VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
+ VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
+ VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
+ VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
};
@@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = {
{0, 0, 0}
};
-static char *vmw_devname = "vmwgfx";
+static int enable_fbdev;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr);
+MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
+module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+
static void vmw_print_capabilities(uint32_t capabilities)
{
DRM_INFO("Capabilities:\n");
@@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
- vmw_kms_save_vga(dev_priv);
-
ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
if (unlikely(ret != 0)) {
DRM_ERROR("Unable to initialize FIFO.\n");
@@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv)
static void vmw_release_device(struct vmw_private *dev_priv)
{
vmw_fifo_release(dev_priv, &dev_priv->fifo);
- vmw_kms_restore_vga(dev_priv);
}
+int vmw_3d_resource_inc(struct vmw_private *dev_priv)
+{
+ int ret = 0;
+
+ mutex_lock(&dev_priv->release_mutex);
+ if (unlikely(dev_priv->num_3d_resources++ == 0)) {
+ ret = vmw_request_device(dev_priv);
+ if (unlikely(ret != 0))
+ --dev_priv->num_3d_resources;
+ }
+ mutex_unlock(&dev_priv->release_mutex);
+ return ret;
+}
+
+
+void vmw_3d_resource_dec(struct vmw_private *dev_priv)
+{
+ int32_t n3d;
+
+ mutex_lock(&dev_priv->release_mutex);
+ if (unlikely(--dev_priv->num_3d_resources == 0))
+ vmw_release_device(dev_priv);
+ n3d = (int32_t) dev_priv->num_3d_resources;
+ mutex_unlock(&dev_priv->release_mutex);
+
+ BUG_ON(n3d < 0);
+}
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
@@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->last_read_sequence = (uint32_t) -100;
mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
+ mutex_init(&dev_priv->release_mutex);
rwlock_init(&dev_priv->resource_lock);
idr_init(&dev_priv->context_idr);
idr_init(&dev_priv->surface_idr);
@@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
+ dev_priv->enable_fb = enable_fbdev;
+
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
@@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev->dev_private = dev_priv;
- if (!dev->devname)
- dev->devname = vmw_devname;
-
- if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
- ret = drm_irq_install(dev);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed installing irq: %d\n", ret);
- goto out_no_irq;
- }
- }
-
ret = pci_request_regions(dev->pdev, "vmwgfx probe");
dev_priv->stealth = (ret != 0);
if (dev_priv->stealth) {
@@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_device;
}
}
- ret = vmw_request_device(dev_priv);
+ ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
- goto out_no_device;
- vmw_kms_init(dev_priv);
+ goto out_no_kms;
vmw_overlay_init(dev_priv);
- vmw_fb_init(dev_priv);
+ if (dev_priv->enable_fb) {
+ ret = vmw_3d_resource_inc(dev_priv);
+ if (unlikely(ret != 0))
+ goto out_no_fifo;
+ vmw_kms_save_vga(dev_priv);
+ vmw_fb_init(dev_priv);
+ DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
+ "Detected device 3D availability.\n" :
+ "Detected no device 3D availability.\n");
+ } else {
+ DRM_INFO("Delayed 3D detection since we're not "
+ "running the device in SVGA mode yet.\n");
+ }
+
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+ ret = drm_irq_install(dev);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed installing irq: %d\n", ret);
+ goto out_no_irq;
+ }
+ }
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
register_pm_notifier(&dev_priv->pm_nb);
- DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
-
return 0;
-out_no_device:
- if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- drm_irq_uninstall(dev_priv->dev);
- if (dev->devname == vmw_devname)
- dev->devname = NULL;
out_no_irq:
+ if (dev_priv->enable_fb) {
+ vmw_fb_close(dev_priv);
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv);
+ }
+out_no_fifo:
+ vmw_overlay_close(dev_priv);
+ vmw_kms_close(dev_priv);
+out_no_kms:
+ if (dev_priv->stealth)
+ pci_release_region(dev->pdev, 2);
+ else
+ pci_release_regions(dev->pdev);
+out_no_device:
ttm_object_device_release(&dev_priv->tdev);
out_err4:
iounmap(dev_priv->mmio_virt);
@@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev)
unregister_pm_notifier(&dev_priv->pm_nb);
- vmw_fb_close(dev_priv);
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
+ if (dev_priv->enable_fb) {
+ vmw_fb_close(dev_priv);
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv);
+ }
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
- vmw_release_device(dev_priv);
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
pci_release_regions(dev->pdev);
- if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- drm_irq_uninstall(dev_priv->dev);
- if (dev->devname == vmw_devname)
- dev->devname = NULL;
ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
@@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
- if (unlikely(ioctl->cmd != cmd)) {
+ if (unlikely(ioctl->cmd_drv != cmd)) {
DRM_ERROR("Invalid command format, ioctl %d\n",
nr - DRM_COMMAND_BASE);
return -EINVAL;
@@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0;
+ if (!dev_priv->enable_fb) {
+ ret = vmw_3d_resource_inc(dev_priv);
+ if (unlikely(ret != 0))
+ return ret;
+ vmw_kms_save_vga(dev_priv);
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_TRACES, 0);
+ mutex_unlock(&dev_priv->hw_mutex);
+ }
+
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev,
return 0;
out_no_active_lock:
- vmw_release_device(dev_priv);
+ if (!dev_priv->enable_fb) {
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+ mutex_unlock(&dev_priv->hw_mutex);
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv);
+ }
return ret;
}
@@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev,
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+ if (!dev_priv->enable_fb) {
+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ if (unlikely(ret != 0))
+ DRM_ERROR("Unable to clean VRAM on master drop.\n");
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+ mutex_unlock(&dev_priv->hw_mutex);
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv);
+ }
+
dev_priv->active_master = &dev_priv->fbdev_master;
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
- vmw_fb_on(dev_priv);
+ if (dev_priv->enable_fb)
+ vmw_fb_on(dev_priv);
}
@@ -722,6 +796,7 @@ static struct drm_driver driver = {
.irq_postinstall = vmw_irq_postinstall,
.irq_uninstall = vmw_irq_uninstall,
.irq_handler = vmw_irq_handler,
+ .get_vblank_counter = vmw_get_vblank_counter,
.reclaim_buffers_locked = NULL,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
@@ -764,7 +839,7 @@ static struct drm_driver driver = {
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_dev(pdev, ent, &driver);
+ return drm_get_pci_dev(pdev, ent, &driver);
}
static int __init vmwgfx_init(void)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index eaad5209533..58de6393f61 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -164,7 +164,7 @@ struct vmw_vga_topology_state {
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
- struct ttm_global_reference mem_global_ref;
+ struct drm_global_reference mem_global_ref;
struct vmw_fifo_state fifo;
@@ -277,6 +277,7 @@ struct vmw_private {
bool stealth;
bool is_opened;
+ bool enable_fb;
/**
* Master management.
@@ -285,6 +286,9 @@ struct vmw_private {
struct vmw_master *active_master;
struct vmw_master fbdev_master;
struct notifier_block pm_nb;
+
+ struct mutex release_mutex;
+ uint32_t num_3d_resources;
};
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
return val;
}
+int vmw_3d_resource_inc(struct vmw_private *dev_priv);
+void vmw_3d_resource_dec(struct vmw_private *dev_priv);
+
/**
* GMR utilities - vmwgfx_gmr.c
*/
@@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned bbp, unsigned depth);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
/**
* Overlay control - vmwgfx_overlay.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index b0866f04ec7..409e172f4ab 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -528,7 +528,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
* Dirty & Deferred IO
*/
par->dirty.x1 = par->dirty.x2 = 0;
- par->dirty.y1 = par->dirty.y1 = 0;
+ par->dirty.y1 = par->dirty.y2 = 0;
par->dirty.active = true;
spin_lock_init(&par->dirty.lock);
info->fbdefio = &vmw_defio;
@@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
goto err_unlock;
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ bo->mem.mm_node->start < bo->num_pages)
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
+ false, false);
+
ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
/* Could probably bug on */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index e6a1eb7ea95..0fe31766e4c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mutex_lock(&dev_priv->hw_mutex);
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+ dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
min = 4;
@@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
dev_priv->config_done_state);
vmw_write(dev_priv, SVGA_REG_ENABLE,
dev_priv->enable_state);
+ vmw_write(dev_priv, SVGA_REG_TRACES,
+ dev_priv->traces_state);
mutex_unlock(&dev_priv->hw_mutex);
vmw_fence_queue_takedown(&fifo->fence_queue);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 437ac786277..e882ba099f0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -737,7 +737,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
- return NULL;
+ return ERR_PTR(ret);
}
return &vfb->base;
@@ -747,7 +747,7 @@ try_dmabuf:
ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
if (ret) {
DRM_ERROR("failed to find buffer: %i\n", ret);
- return NULL;
+ return ERR_PTR(-ENOENT);
}
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
@@ -758,7 +758,7 @@ try_dmabuf:
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
- return NULL;
+ return ERR_PTR(ret);
}
return &vfb->base;
@@ -768,7 +768,7 @@ err_not_scanout:
/* vmw_user_surface_lookup takes one ref */
vmw_surface_unreference(&surface);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
static struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ if (i == 0 && vmw_priv->num_displays == 1 &&
+ save->width == 0 && save->height == 0) {
+
+ /*
+ * It should be fairly safe to assume that these
+ * values are uninitialized.
+ */
+
+ save->width = vmw_priv->vga_width - save->pos_x;
+ save->height = vmw_priv->vga_height - save->pos_y;
+ }
}
+
return 0;
}
@@ -984,3 +996,8 @@ out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
}
+
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index cfaf690a5b2..11cb39e3acc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -27,6 +27,8 @@
#include "vmwgfx_kms.h"
+#define VMWGFX_LDU_NUM_DU 8
+
#define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc)
#define vmw_encoder_to_ldu(x) \
@@ -79,7 +81,7 @@ static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
- uint32_t size)
+ uint32_t start, uint32_t size)
{
}
@@ -335,7 +337,8 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
}
static enum drm_connector_status
- vmw_ldu_connector_detect(struct drm_connector *connector)
+ vmw_ldu_connector_detect(struct drm_connector *connector,
+ bool force)
{
if (vmw_connector_to_ldu(connector)->pref_active)
return connector_status_connected;
@@ -516,7 +519,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- connector->status = vmw_ldu_connector_detect(connector);
+ connector->status = vmw_ldu_connector_detect(connector, true);
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_LVDS);
@@ -535,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+ int i;
+ int ret;
+
if (dev_priv->ldu_priv) {
DRM_INFO("ldu system already on\n");
return -EINVAL;
@@ -552,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
drm_mode_create_dirty_info_property(dev_priv->dev);
- vmw_ldu_init(dev_priv, 0);
- /* for old hardware without multimon only enable one display */
if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
- vmw_ldu_init(dev_priv, 1);
- vmw_ldu_init(dev_priv, 2);
- vmw_ldu_init(dev_priv, 3);
- vmw_ldu_init(dev_priv, 4);
- vmw_ldu_init(dev_priv, 5);
- vmw_ldu_init(dev_priv, 6);
- vmw_ldu_init(dev_priv, 7);
+ for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
+ vmw_ldu_init(dev_priv, i);
+ ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
+ } else {
+ /* for old hardware without multimon only enable one display */
+ vmw_ldu_init(dev_priv, 0);
+ ret = drm_vblank_init(dev, 1);
}
- return 0;
+ return ret;
}
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+
+ drm_vblank_cleanup(dev);
if (!dev_priv->ldu_priv)
return -ENOSYS;
@@ -610,7 +618,7 @@ int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
ldu->pref_height = 600;
ldu->pref_active = false;
}
- con->status = vmw_ldu_connector_detect(con);
+ con->status = vmw_ldu_connector_detect(con, true);
}
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 5f2d5df01e5..c8c40e9979d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
cmd->body.cid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_3d_resource_dec(dev_priv);
}
static int vmw_context_init(struct vmw_private *dev_priv,
@@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
cmd->body.cid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ (void) vmw_3d_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
}
@@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
cmd->body.sid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_3d_resource_dec(dev_priv);
}
void vmw_surface_res_free(struct vmw_resource *res)
@@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv,
}
vmw_fifo_commit(dev_priv, submit_size);
+ (void) vmw_3d_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_surface_destroy);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index e3df4adfb4d..83123287c60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -44,29 +44,29 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
}
-static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref)
+static int vmw_ttm_mem_global_init(struct drm_global_reference *ref)
{
DRM_INFO("global init.\n");
return ttm_mem_global_init(ref->object);
}
-static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref)
+static void vmw_ttm_mem_global_release(struct drm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
int vmw_ttm_global_init(struct vmw_private *dev_priv)
{
- struct ttm_global_reference *global_ref;
+ struct drm_global_reference *global_ref;
int ret;
global_ref = &dev_priv->mem_global_ref;
- global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &vmw_ttm_mem_global_init;
global_ref->release = &vmw_ttm_mem_global_release;
- ret = ttm_global_item_ref(global_ref);
+ ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM memory accounting.\n");
return ret;
@@ -75,11 +75,11 @@ int vmw_ttm_global_init(struct vmw_private *dev_priv)
dev_priv->bo_global_ref.mem_glob =
dev_priv->mem_global_ref.object;
global_ref = &dev_priv->bo_global_ref.ref;
- global_ref->global_type = TTM_GLOBAL_TTM_BO;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
- ret = ttm_global_item_ref(global_ref);
+ ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM buffer objects.\n");
@@ -88,12 +88,12 @@ int vmw_ttm_global_init(struct vmw_private *dev_priv)
return 0;
out_no_bo:
- ttm_global_item_unref(&dev_priv->mem_global_ref);
+ drm_global_item_unref(&dev_priv->mem_global_ref);
return ret;
}
void vmw_ttm_global_release(struct vmw_private *dev_priv)
{
- ttm_global_item_unref(&dev_priv->bo_global_ref.ref);
- ttm_global_item_unref(&dev_priv->mem_global_ref);
+ drm_global_item_unref(&dev_priv->bo_global_ref.ref);
+ drm_global_item_unref(&dev_priv->mem_global_ref);
}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index b87569e96b1..f366f968155 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
}
-void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
{
struct vga_device *vgadev;
unsigned long flags;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e635199a0cd..3f729248602 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1285,8 +1285,11 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
@@ -1299,6 +1302,7 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
@@ -1577,7 +1581,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
index f44bdc084cb..8ca7f65cf2f 100644
--- a/drivers/hid/hid-egalax.c
+++ b/drivers/hid/hid-egalax.c
@@ -159,6 +159,13 @@ static int egalax_event(struct hid_device *hid, struct hid_field *field,
{
struct egalax_data *td = hid_get_drvdata(hid);
+ /* Note, eGalax has two product lines: the first is resistive and
+ * uses a standard parallel multitouch protocol (product ID ==
+ * 48xx). The second is capacitive and uses an unusual "serial"
+ * protocol with a different message for each multitouch finger
+ * (product ID == 72xx). We do not yet generate a correct event
+ * sequence for the capacitive/serial protocol.
+ */
if (hid->claimed & HID_CLAIMED_INPUT) {
struct input_dev *input = field->hidinput->input;
@@ -246,6 +253,8 @@ static void egalax_remove(struct hid_device *hdev)
static const struct hid_device_id egalax_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
{ }
};
MODULE_DEVICE_TABLE(hid, egalax_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index d3fc13ae094..765a4f53eb5 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -105,6 +105,7 @@
#define USB_VENDOR_ID_ASUS 0x0486
#define USB_DEVICE_ID_ASUS_T91MT 0x0185
+#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186
#define USB_VENDOR_ID_ASUSTEK 0x0b05
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
@@ -128,6 +129,7 @@
#define USB_VENDOR_ID_BTC 0x046e
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
+#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
#define USB_VENDOR_ID_CANDO 0x2087
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
@@ -149,6 +151,7 @@
#define USB_VENDOR_ID_CHICONY 0x04f2
#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418
+#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
#define USB_VENDOR_ID_CIDC 0x1677
@@ -188,6 +191,7 @@
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -506,6 +510,7 @@
#define USB_VENDOR_ID_UCLOGIC 0x5543
#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
+#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001
#define USB_VENDOR_ID_VERNIER 0x08f7
#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
index e91437c1890..ac5421d568f 100644
--- a/drivers/hid/hid-mosart.c
+++ b/drivers/hid/hid-mosart.c
@@ -239,6 +239,7 @@ static void mosart_remove(struct hid_device *hdev)
static const struct hid_device_id mosart_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
{ }
};
MODULE_DEVICE_TABLE(hid, mosart_devices);
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 346f0e34987..bc2e0774062 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -547,11 +547,11 @@ static void picolcd_fb_destroy(struct fb_info *info)
ref_cnt--;
mutex_lock(&info->lock);
(*ref_cnt)--;
- may_release = !ref_cnt;
+ may_release = !*ref_cnt;
mutex_unlock(&info->lock);
if (may_release) {
- framebuffer_release(info);
vfree((u8 *)info->fix.smem_start);
+ framebuffer_release(info);
}
}
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 5771f851f85..956ed9ac19d 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -64,6 +64,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static const struct hid_device_id ts_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
{ }
};
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 807dcd1555a..724f46ed612 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -230,7 +230,7 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
input_report_key(input, BTN_RIGHT, 0);
input_report_key(input, BTN_MIDDLE, 0);
input_report_abs(input, ABS_DISTANCE,
- input->absmax[ABS_DISTANCE]);
+ input_abs_get_max(input, ABS_DISTANCE));
} else {
input_report_key(input, BTN_TOUCH, 0);
input_report_key(input, BTN_STYLUS, 0);
@@ -383,38 +383,37 @@ move_on:
/* Basics */
input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL);
- input->absbit[0] |= BIT(ABS_X) | BIT(ABS_Y) |
- BIT(ABS_PRESSURE) | BIT(ABS_DISTANCE);
- input->relbit[0] |= BIT(REL_WHEEL);
- set_bit(BTN_TOOL_PEN, input->keybit);
- set_bit(BTN_TOUCH, input->keybit);
- set_bit(BTN_STYLUS, input->keybit);
- set_bit(BTN_STYLUS2, input->keybit);
- set_bit(BTN_LEFT, input->keybit);
- set_bit(BTN_RIGHT, input->keybit);
- set_bit(BTN_MIDDLE, input->keybit);
+
+ __set_bit(REL_WHEEL, input->relbit);
+
+ __set_bit(BTN_TOOL_PEN, input->keybit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ __set_bit(BTN_STYLUS, input->keybit);
+ __set_bit(BTN_STYLUS2, input->keybit);
+ __set_bit(BTN_LEFT, input->keybit);
+ __set_bit(BTN_RIGHT, input->keybit);
+ __set_bit(BTN_MIDDLE, input->keybit);
/* Pad */
input->evbit[0] |= BIT(EV_MSC);
- input->mscbit[0] |= BIT(MSC_SERIAL);
- set_bit(BTN_0, input->keybit);
- set_bit(BTN_1, input->keybit);
- set_bit(BTN_TOOL_FINGER, input->keybit);
- /* Distance, rubber and mouse */
- input->absbit[0] |= BIT(ABS_DISTANCE);
- set_bit(BTN_TOOL_RUBBER, input->keybit);
- set_bit(BTN_TOOL_MOUSE, input->keybit);
+ __set_bit(MSC_SERIAL, input->mscbit);
- input->absmax[ABS_PRESSURE] = 511;
- input->absmax[ABS_DISTANCE] = 32;
+ __set_bit(BTN_0, input->keybit);
+ __set_bit(BTN_1, input->keybit);
+ __set_bit(BTN_TOOL_FINGER, input->keybit);
- input->absmax[ABS_X] = 16704;
- input->absmax[ABS_Y] = 12064;
- input->absfuzz[ABS_X] = 4;
- input->absfuzz[ABS_Y] = 4;
+ /* Distance, rubber and mouse */
+ __set_bit(BTN_TOOL_RUBBER, input->keybit);
+ __set_bit(BTN_TOOL_MOUSE, input->keybit);
+
+ input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
+ input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
+ input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
return 0;
+
err_free:
kfree(wdata);
return ret;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index b729c028667..599041a7f67 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -828,6 +828,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
}
} else {
int skipped_report_id = 0;
+ int report_id = buf[0];
if (buf[0] == 0x0) {
/* Don't send the Report ID */
buf++;
@@ -837,7 +838,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
HID_REQ_SET_REPORT,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- ((report_type + 1) << 8) | *buf,
+ ((report_type + 1) << 8) | report_id,
interface->desc.bInterfaceNumber, buf, count,
USB_CTRL_SET_TIMEOUT);
/* count also the report id, if this was a numbered report. */
@@ -1445,6 +1446,11 @@ static const struct hid_device_id hid_usb_table[] = {
{ }
};
+struct usb_interface *usbhid_find_interface(int minor)
+{
+ return usb_find_interface(&hid_driver, minor);
+}
+
static struct hid_driver hid_usb_driver = {
.name = "generic-usb",
.id_table = hid_usb_table,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2643d314762..70da3181c8a 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -33,6 +33,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
+ { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -69,6 +70,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
@@ -77,6 +79,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
+ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
+
{ 0, 0 }
};
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 254a003af04..681e620eb95 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -266,13 +266,15 @@ static int hiddev_open(struct inode *inode, struct file *file)
{
struct hiddev_list *list;
struct usb_interface *intf;
+ struct hid_device *hid;
struct hiddev *hiddev;
int res;
- intf = usb_find_interface(&hiddev_driver, iminor(inode));
+ intf = usbhid_find_interface(iminor(inode));
if (!intf)
return -ENODEV;
- hiddev = usb_get_intfdata(intf);
+ hid = usb_get_intfdata(intf);
+ hiddev = hid->hiddev;
if (!(list = kzalloc(sizeof(struct hiddev_list), GFP_KERNEL)))
return -ENOMEM;
@@ -587,7 +589,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct hiddev_list *list = file->private_data;
struct hiddev *hiddev = list->hiddev;
struct hid_device *hid = hiddev->hid;
- struct usb_device *dev = hid_to_usb_dev(hid);
+ struct usb_device *dev;
struct hiddev_collection_info cinfo;
struct hiddev_report_info rinfo;
struct hiddev_field_info finfo;
@@ -601,9 +603,11 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* Called without BKL by compat methods so no BKL taken */
/* FIXME: Who or what stop this racing with a disconnect ?? */
- if (!hiddev->exist)
+ if (!hiddev->exist || !hid)
return -EIO;
+ dev = hid_to_usb_dev(hid);
+
switch (cmd) {
case HIDIOCGVERSION:
@@ -888,7 +892,6 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
hid->hiddev = hiddev;
hiddev->hid = hid;
hiddev->exist = 1;
- usb_set_intfdata(usbhid->intf, usbhid);
retval = usb_register_dev(usbhid->intf, &hiddev_class);
if (retval) {
err_hid("Not able to get a minor for this device.");
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 693fd3e720d..89d2e847dcc 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -42,6 +42,7 @@ void usbhid_submit_report
(struct hid_device *hid, struct hid_report *report, unsigned char dir);
int usbhid_get_power(struct hid_device *hid);
void usbhid_put_power(struct hid_device *hid);
+struct usb_interface *usbhid_find_interface(int minor);
/* iofl flags */
#define HID_CTRL_RUNNING 1
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index e19cf8eb6cc..97499d00615 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -405,7 +405,14 @@ config SENSORS_CORETEMP
help
If you say yes here you get support for the temperature
sensor inside your CPU. Most of the family 6 CPUs
- are supported. Check documentation/driver for details.
+ are supported. Check Documentation/hwmon/coretemp for details.
+
+config SENSORS_PKGTEMP
+ tristate "Intel processor package temperature sensor"
+ depends on X86 && EXPERIMENTAL
+ help
+ If you say yes here you get support for the package level temperature
+ sensor inside your CPU. Check documentation/driver for details.
config SENSORS_IBMAEM
tristate "IBM Active Energy Manager temperature/power sensors and control"
@@ -446,6 +453,28 @@ config SENSORS_IT87
This driver can also be built as a module. If so, the module
will be called it87.
+config SENSORS_JZ4740
+ tristate "Ingenic JZ4740 SoC ADC driver"
+ depends on MACH_JZ4740 && MFD_JZ4740_ADC
+ help
+ If you say yes here you get support for reading adc values from the ADCIN
+ pin on Ingenic JZ4740 SoC based boards.
+
+ This driver can also be build as a module. If so, the module will be
+ called jz4740-hwmon.
+
+config SENSORS_JC42
+ tristate "JEDEC JC42.4 compliant temperature sensors"
+ depends on I2C
+ help
+ If you say yes here you get support for Jedec JC42.4 compliant
+ temperature sensors. Support will include, but not be limited to,
+ ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
+ MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3.
+
+ This driver can also be built as a module. If so, the module
+ will be called jc42.
+
config SENSORS_LM63
tristate "National Semiconductor LM63 and LM64"
depends on I2C
@@ -683,7 +712,8 @@ config SENSORS_PC87427
functions of the National Semiconductor PC87427 Super-I/O chip.
The chip has two distinct logical devices, one for fan speed
monitoring and control, and one for voltage and temperature
- monitoring. Only fan speed monitoring is supported right now.
+ monitoring. Fan speed monitoring and control are supported, as
+ well as temperature monitoring. Voltages aren't supported yet.
This driver can also be built as a module. If so, the module
will be called pc87427.
@@ -739,6 +769,21 @@ config SENSORS_SIS5595
This driver can also be built as a module. If so, the module
will be called sis5595.
+config SENSORS_SMM665
+ tristate "Summit Microelectronics SMM665"
+ depends on I2C && EXPERIMENTAL
+ default n
+ help
+ If you say yes here you get support for the hardware monitoring
+ features of the Summit Microelectronics SMM665/SMM665B Six-Channel
+ Active DC Output Controller / Monitor.
+
+ Other supported chips are SMM465, SMM665C, SMM764, and SMM766.
+ Support for those chips is untested.
+
+ This driver can also be built as a module. If so, the module will
+ be called smm665.
+
config SENSORS_DME1737
tristate "SMSC DME1737, SCH311x and compatibles"
depends on I2C && EXPERIMENTAL
@@ -761,6 +806,16 @@ config SENSORS_EMC1403
Threshold values can be configured using sysfs.
Data from the different diodes are accessible via sysfs.
+config SENSORS_EMC2103
+ tristate "SMSC EMC2103"
+ depends on I2C
+ help
+ If you say yes here you get support for the temperature
+ and fan sensors of the SMSC EMC2103 chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called emc2103.
+
config SENSORS_SMSC47M1
tristate "SMSC LPC47M10x and compatibles"
help
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 2138ceb1a71..e3c2484f6c5 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -39,9 +39,11 @@ obj-$(CONFIG_SENSORS_AMS) += ams/
obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
+obj-$(CONFIG_SENSORS_PKGTEMP) += pkgtemp.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
+obj-$(CONFIG_SENSORS_EMC2103) += emc2103.o
obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
obj-$(CONFIG_SENSORS_F75375S) += f75375s.o
@@ -55,6 +57,8 @@ obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
obj-$(CONFIG_SENSORS_IT87) += it87.o
+obj-$(CONFIG_SENSORS_JC42) += jc42.o
+obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o
@@ -86,6 +90,7 @@ obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
obj-$(CONFIG_SENSORS_S3C) += s3c-hwmon.o
obj-$(CONFIG_SENSORS_SHT15) += sht15.o
obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
+obj-$(CONFIG_SENSORS_SMM665) += smm665.o
obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 15c1a9616af..0683e6be662 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -79,7 +79,7 @@ struct adm1031_data {
int chip_type;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
- unsigned int update_rate; /* In milliseconds */
+ unsigned int update_interval; /* In milliseconds */
/* The chan_select_table contains the possible configurations for
* auto fan control.
*/
@@ -743,23 +743,23 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
-/* Update Rate */
-static const unsigned int update_rates[] = {
+/* Update Interval */
+static const unsigned int update_intervals[] = {
16000, 8000, 4000, 2000, 1000, 500, 250, 125,
};
-static ssize_t show_update_rate(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t show_update_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
- return sprintf(buf, "%u\n", data->update_rate);
+ return sprintf(buf, "%u\n", data->update_interval);
}
-static ssize_t set_update_rate(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t set_update_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
@@ -771,12 +771,15 @@ static ssize_t set_update_rate(struct device *dev,
if (err)
return err;
- /* find the nearest update rate from the table */
- for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) {
- if (val >= update_rates[i])
+ /*
+ * Find the nearest update interval from the table.
+ * Use it to determine the matching update rate.
+ */
+ for (i = 0; i < ARRAY_SIZE(update_intervals) - 1; i++) {
+ if (val >= update_intervals[i])
break;
}
- /* if not found, we point to the last entry (lowest update rate) */
+ /* if not found, we point to the last entry (lowest update interval) */
/* set the new update rate while preserving other settings */
reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
@@ -785,14 +788,14 @@ static ssize_t set_update_rate(struct device *dev,
adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
mutex_lock(&data->update_lock);
- data->update_rate = update_rates[i];
+ data->update_interval = update_intervals[i];
mutex_unlock(&data->update_lock);
return count;
}
-static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate,
- set_update_rate);
+static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
+ set_update_interval);
static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -830,7 +833,7 @@ static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
- &dev_attr_update_rate.attr,
+ &dev_attr_update_interval.attr,
&dev_attr_alarms.attr,
NULL
@@ -981,7 +984,8 @@ static void adm1031_init_client(struct i2c_client *client)
mask = ADM1031_UPDATE_RATE_MASK;
read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
- data->update_rate = update_rates[i];
+ /* Save it as update interval */
+ data->update_interval = update_intervals[i];
}
static struct adm1031_data *adm1031_update_device(struct device *dev)
@@ -993,7 +997,8 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
mutex_lock(&data->update_lock);
- next_update = data->last_updated + msecs_to_jiffies(data->update_rate);
+ next_update = data->last_updated
+ + msecs_to_jiffies(data->update_interval);
if (time_after(jiffies, next_update) || !data->valid) {
dev_dbg(&client->dev, "Starting adm1031 update\n");
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index b300a2048af..52319340e18 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -160,30 +160,12 @@ static const struct attribute_group ads7871_group = {
static int __devinit ads7871_probe(struct spi_device *spi)
{
- int status, ret, err = 0;
+ int ret, err;
uint8_t val;
struct ads7871_data *pdata;
dev_dbg(&spi->dev, "probe\n");
- pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
- if (!pdata) {
- err = -ENOMEM;
- goto exit;
- }
-
- status = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
- if (status < 0)
- goto error_free;
-
- pdata->hwmon_dev = hwmon_device_register(&spi->dev);
- if (IS_ERR(pdata->hwmon_dev)) {
- err = PTR_ERR(pdata->hwmon_dev);
- goto error_remove;
- }
-
- spi_set_drvdata(spi, pdata);
-
/* Configure the SPI bus */
spi->mode = (SPI_MODE_0);
spi->bits_per_word = 8;
@@ -201,6 +183,24 @@ static int __devinit ads7871_probe(struct spi_device *spi)
we need to make sure we really have a chip*/
if (val != ret) {
err = -ENODEV;
+ goto exit;
+ }
+
+ pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
+ if (!pdata) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
+ if (err < 0)
+ goto error_free;
+
+ spi_set_drvdata(spi, pdata);
+
+ pdata->hwmon_dev = hwmon_device_register(&spi->dev);
+ if (IS_ERR(pdata->hwmon_dev)) {
+ err = PTR_ERR(pdata->hwmon_dev);
goto error_remove;
}
diff --git a/drivers/hwmon/ams/ams.h b/drivers/hwmon/ams/ams.h
index b28d7e27a03..90f094d4545 100644
--- a/drivers/hwmon/ams/ams.h
+++ b/drivers/hwmon/ams/ams.h
@@ -23,7 +23,7 @@ struct ams {
/* General properties */
struct device_node *of_node;
- struct of_device *of_dev;
+ struct platform_device *of_dev;
char has_device;
char vflag;
u32 orient1;
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 3b973f30b1f..89b4f3babe8 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -1150,9 +1150,6 @@ static int asc7621_detect(struct i2c_client *client,
{
struct i2c_adapter *adapter = client->adapter;
int company, verstep, chip_index;
- struct device *dev;
-
- dev = &client->dev;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
@@ -1169,13 +1166,11 @@ static int asc7621_detect(struct i2c_client *client,
if (company == asc7621_chips[chip_index].company_id &&
verstep == asc7621_chips[chip_index].verstep_id) {
- strlcpy(client->name, asc7621_chips[chip_index].name,
- I2C_NAME_SIZE);
strlcpy(info->type, asc7621_chips[chip_index].name,
I2C_NAME_SIZE);
- dev_info(&adapter->dev, "Matched %s\n",
- asc7621_chips[chip_index].name);
+ dev_info(&adapter->dev, "Matched %s at 0x%02x\n",
+ asc7621_chips[chip_index].name, client->addr);
return 0;
}
}
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 05344af5073..a23b17a78ac 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,6 +36,7 @@
#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/processor.h>
+#include <asm/smp.h>
#define DRVNAME "coretemp"
@@ -423,9 +424,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
int err;
struct platform_device *pdev;
struct pdev_entry *pdev_entry;
-#ifdef CONFIG_SMP
struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
+
+ /*
+ * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+ * sensors. We check this bit only, all the early CPUs
+ * without thermal sensors will be filtered out.
+ */
+ if (!cpu_has(c, X86_FEATURE_DTS)) {
+ printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
+ " has no thermal sensor.\n", c->x86_model);
+ return 0;
+ }
mutex_lock(&pdev_list_mutex);
@@ -480,17 +490,24 @@ exit:
return err;
}
-#ifdef CONFIG_HOTPLUG_CPU
static void coretemp_device_remove(unsigned int cpu)
{
- struct pdev_entry *p, *n;
+ struct pdev_entry *p;
+ unsigned int i;
+
mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- if (p->cpu == cpu) {
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
- }
+ list_for_each_entry(p, &pdev_list, list) {
+ if (p->cpu != cpu)
+ continue;
+
+ platform_device_unregister(p->pdev);
+ list_del(&p->list);
+ mutex_unlock(&pdev_list_mutex);
+ kfree(p);
+ for_each_cpu(i, cpu_sibling_mask(cpu))
+ if (i != cpu && !coretemp_device_add(i))
+ break;
+ return;
}
mutex_unlock(&pdev_list_mutex);
}
@@ -515,12 +532,10 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
static struct notifier_block coretemp_cpu_notifier __refdata = {
.notifier_call = coretemp_cpu_callback,
};
-#endif /* !CONFIG_HOTPLUG_CPU */
static int __init coretemp_init(void)
{
int i, err = -ENODEV;
- struct pdev_entry *p, *n;
/* quick check if we run Intel */
if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
@@ -530,43 +545,23 @@ static int __init coretemp_init(void)
if (err)
goto exit;
- for_each_online_cpu(i) {
- struct cpuinfo_x86 *c = &cpu_data(i);
- /*
- * CPUID.06H.EAX[0] indicates whether the CPU has thermal
- * sensors. We check this bit only, all the early CPUs
- * without thermal sensors will be filtered out.
- */
- if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) {
- err = coretemp_device_add(i);
- if (err)
- goto exit_devices_unreg;
+ for_each_online_cpu(i)
+ coretemp_device_add(i);
- } else {
- printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
- " has no thermal sensor.\n", c->x86_model);
- }
- }
+#ifndef CONFIG_HOTPLUG_CPU
if (list_empty(&pdev_list)) {
err = -ENODEV;
goto exit_driver_unreg;
}
+#endif
-#ifdef CONFIG_HOTPLUG_CPU
register_hotcpu_notifier(&coretemp_cpu_notifier);
-#endif
return 0;
-exit_devices_unreg:
- mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
- }
- mutex_unlock(&pdev_list_mutex);
+#ifndef CONFIG_HOTPLUG_CPU
exit_driver_unreg:
platform_driver_unregister(&coretemp_driver);
+#endif
exit:
return err;
}
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 0e4b5642638..8dee3f38fdf 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -89,6 +89,35 @@ static ssize_t store_temp(struct device *dev,
return count;
}
+static ssize_t store_bit(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct thermal_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
+ unsigned long val;
+ int retval;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ retval = i2c_smbus_read_byte_data(client, sda->nr);
+ if (retval < 0)
+ goto fail;
+
+ retval &= ~sda->index;
+ if (val)
+ retval |= sda->index;
+
+ retval = i2c_smbus_write_byte_data(client, sda->index, retval);
+ if (retval == 0)
+ retval = count;
+fail:
+ mutex_unlock(&data->mutex);
+ return retval;
+}
+
static ssize_t show_hyst(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -200,6 +229,9 @@ static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO,
static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR,
show_hyst, store_hyst, 0x1A);
+static SENSOR_DEVICE_ATTR_2(power_state, S_IRUGO | S_IWUSR,
+ show_bit, store_bit, 0x03, 0x40);
+
static struct attribute *mid_att_thermal[] = {
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -225,6 +257,7 @@ static struct attribute *mid_att_thermal[] = {
&sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_power_state.dev_attr.attr,
NULL
};
@@ -275,7 +308,6 @@ static int emc1403_probe(struct i2c_client *client,
res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
if (res) {
dev_warn(&client->dev, "create group failed\n");
- hwmon_device_unregister(data->hwmon_dev);
goto thermal_error1;
}
data->hwmon_dev = hwmon_device_register(&client->dev);
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
new file mode 100644
index 00000000000..af914ad93ec
--- /dev/null
+++ b/drivers/hwmon/emc2103.c
@@ -0,0 +1,740 @@
+/*
+ emc2103.c - Support for SMSC EMC2103
+ Copyright (c) 2010 SMSC
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+/* Addresses scanned */
+static const unsigned short normal_i2c[] = { 0x2E, I2C_CLIENT_END };
+
+static const u8 REG_TEMP[4] = { 0x00, 0x02, 0x04, 0x06 };
+static const u8 REG_TEMP_MIN[4] = { 0x3c, 0x38, 0x39, 0x3a };
+static const u8 REG_TEMP_MAX[4] = { 0x34, 0x30, 0x31, 0x32 };
+
+#define REG_CONF1 0x20
+#define REG_TEMP_MAX_ALARM 0x24
+#define REG_TEMP_MIN_ALARM 0x25
+#define REG_FAN_CONF1 0x42
+#define REG_FAN_TARGET_LO 0x4c
+#define REG_FAN_TARGET_HI 0x4d
+#define REG_FAN_TACH_HI 0x4e
+#define REG_FAN_TACH_LO 0x4f
+#define REG_PRODUCT_ID 0xfd
+#define REG_MFG_ID 0xfe
+
+/* equation 4 from datasheet: rpm = (3932160 * multipler) / count */
+#define FAN_RPM_FACTOR 3932160
+
+/* 2103-2 and 2103-4's 3rd temperature sensor can be connected to two diodes
+ * in anti-parallel mode, and in this configuration both can be read
+ * independently (so we have 4 temperature inputs). The device can't
+ * detect if it's connected in this mode, so we have to manually enable
+ * it. Default is to leave the device in the state it's already in (-1).
+ * This parameter allows APD mode to be optionally forced on or off */
+static int apd = -1;
+module_param(apd, bool, 0);
+MODULE_PARM_DESC(init, "Set to zero to disable anti-parallel diode mode");
+
+struct temperature {
+ s8 degrees;
+ u8 fraction; /* 0-7 multiples of 0.125 */
+};
+
+struct emc2103_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ bool valid; /* registers are valid */
+ bool fan_rpm_control;
+ int temp_count; /* num of temp sensors */
+ unsigned long last_updated; /* in jiffies */
+ struct temperature temp[4]; /* internal + 3 external */
+ s8 temp_min[4]; /* no fractional part */
+ s8 temp_max[4]; /* no fractional part */
+ u8 temp_min_alarm;
+ u8 temp_max_alarm;
+ u8 fan_multiplier;
+ u16 fan_tach;
+ u16 fan_target;
+};
+
+static int read_u8_from_i2c(struct i2c_client *client, u8 i2c_reg, u8 *output)
+{
+ int status = i2c_smbus_read_byte_data(client, i2c_reg);
+ if (status < 0) {
+ dev_warn(&client->dev, "reg 0x%02x, err %d\n",
+ i2c_reg, status);
+ } else {
+ *output = status;
+ }
+ return status;
+}
+
+static void read_temp_from_i2c(struct i2c_client *client, u8 i2c_reg,
+ struct temperature *temp)
+{
+ u8 degrees, fractional;
+
+ if (read_u8_from_i2c(client, i2c_reg, &degrees) < 0)
+ return;
+
+ if (read_u8_from_i2c(client, i2c_reg + 1, &fractional) < 0)
+ return;
+
+ temp->degrees = degrees;
+ temp->fraction = (fractional & 0xe0) >> 5;
+}
+
+static void read_fan_from_i2c(struct i2c_client *client, u16 *output,
+ u8 hi_addr, u8 lo_addr)
+{
+ u8 high_byte, lo_byte;
+
+ if (read_u8_from_i2c(client, hi_addr, &high_byte) < 0)
+ return;
+
+ if (read_u8_from_i2c(client, lo_addr, &lo_byte) < 0)
+ return;
+
+ *output = ((u16)high_byte << 5) | (lo_byte >> 3);
+}
+
+static void write_fan_target_to_i2c(struct i2c_client *client, u16 new_target)
+{
+ u8 high_byte = (new_target & 0x1fe0) >> 5;
+ u8 low_byte = (new_target & 0x001f) << 3;
+ i2c_smbus_write_byte_data(client, REG_FAN_TARGET_LO, low_byte);
+ i2c_smbus_write_byte_data(client, REG_FAN_TARGET_HI, high_byte);
+}
+
+static void read_fan_config_from_i2c(struct i2c_client *client)
+
+{
+ struct emc2103_data *data = i2c_get_clientdata(client);
+ u8 conf1;
+
+ if (read_u8_from_i2c(client, REG_FAN_CONF1, &conf1) < 0)
+ return;
+
+ data->fan_multiplier = 1 << ((conf1 & 0x60) >> 5);
+ data->fan_rpm_control = (conf1 & 0x80) != 0;
+}
+
+static struct emc2103_data *emc2103_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct emc2103_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
+ || !data->valid) {
+ int i;
+
+ for (i = 0; i < data->temp_count; i++) {
+ read_temp_from_i2c(client, REG_TEMP[i], &data->temp[i]);
+ read_u8_from_i2c(client, REG_TEMP_MIN[i],
+ &data->temp_min[i]);
+ read_u8_from_i2c(client, REG_TEMP_MAX[i],
+ &data->temp_max[i]);
+ }
+
+ read_u8_from_i2c(client, REG_TEMP_MIN_ALARM,
+ &data->temp_min_alarm);
+ read_u8_from_i2c(client, REG_TEMP_MAX_ALARM,
+ &data->temp_max_alarm);
+
+ read_fan_from_i2c(client, &data->fan_tach,
+ REG_FAN_TACH_HI, REG_FAN_TACH_LO);
+ read_fan_from_i2c(client, &data->fan_target,
+ REG_FAN_TARGET_HI, REG_FAN_TARGET_LO);
+ read_fan_config_from_i2c(client);
+
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
+static ssize_t
+show_temp(struct device *dev, struct device_attribute *da, char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct emc2103_data *data = emc2103_update_device(dev);
+ int millidegrees = data->temp[nr].degrees * 1000
+ + data->temp[nr].fraction * 125;
+ return sprintf(buf, "%d\n", millidegrees);
+}
+
+static ssize_t
+show_temp_min(struct device *dev, struct device_attribute *da, char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct emc2103_data *data = emc2103_update_device(dev);
+ int millidegrees = data->temp_min[nr] * 1000;
+ return sprintf(buf, "%d\n", millidegrees);
+}
+
+static ssize_t
+show_temp_max(struct device *dev, struct device_attribute *da, char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct emc2103_data *data = emc2103_update_device(dev);
+ int millidegrees = data->temp_max[nr] * 1000;
+ return sprintf(buf, "%d\n", millidegrees);
+}
+
+static ssize_t
+show_temp_fault(struct device *dev, struct device_attribute *da, char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct emc2103_data *data = emc2103_update_device(dev);
+ bool fault = (data->temp[nr].degrees == -128);
+ return sprintf(buf, "%d\n", fault ? 1 : 0);
+}
+
+static ssize_t
+show_temp_min_alarm(struct device *dev, struct device_attribute *da, char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct emc2103_data *data = emc2103_update_device(dev);
+ bool alarm = data->temp_min_alarm & (1 << nr);
+ return sprintf(buf, "%d\n", alarm ? 1 : 0);
+}
+
+static ssize_t
+show_temp_max_alarm(struct device *dev, struct device_attribute *da, char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct emc2103_data *data = emc2103_update_device(dev);
+ bool alarm = data->temp_max_alarm & (1 << nr);
+ return sprintf(buf, "%d\n", alarm ? 1 : 0);
+}
+
+static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct emc2103_data *data = i2c_get_clientdata(client);
+ long val;
+
+ int result = strict_strtol(buf, 10, &val);
+ if (result < 0)
+ return -EINVAL;
+
+ val = DIV_ROUND_CLOSEST(val, 1000);
+ if ((val < -63) || (val > 127))
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->temp_min[nr] = val;
+ i2c_smbus_write_byte_data(client, REG_TEMP_MIN[nr], val);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct emc2103_data *data = i2c_get_clientdata(client);
+ long val;
+
+ int result = strict_strtol(buf, 10, &val);
+ if (result < 0)
+ return -EINVAL;
+
+ val = DIV_ROUND_CLOSEST(val, 1000);
+ if ((val < -63) || (val > 127))
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->temp_max[nr] = val;
+ i2c_smbus_write_byte_data(client, REG_TEMP_MAX[nr], val);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t
+show_fan(struct device *dev, struct device_attribute *da, char *buf)
+{
+ struct emc2103_data *data = emc2103_update_device(dev);
+ int rpm = 0;
+ if (data->fan_tach != 0)
+ rpm = (FAN_RPM_FACTOR * data->fan_multiplier) / data->fan_tach;
+ return sprintf(buf, "%d\n", rpm);
+}
+
+static ssize_t
+show_fan_div(struct device *dev, struct device_attribute *da, char *buf)
+{
+ struct emc2103_data *data = emc2103_update_device(dev);
+ int fan_div = 8 / data->fan_multiplier;
+ return sprintf(buf, "%d\n", fan_div);
+}
+
+/* Note: we also update the fan target here, because its value is
+ determined in part by the fan clock divider. This follows the principle
+ of least surprise; the user doesn't expect the fan target to change just
+ because the divider changed. */
+static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct emc2103_data *data = emc2103_update_device(dev);
+ struct i2c_client *client = to_i2c_client(dev);
+ int new_range_bits, old_div = 8 / data->fan_multiplier;
+ long new_div;
+
+ int status = strict_strtol(buf, 10, &new_div);
+ if (status < 0)
+ return -EINVAL;
+
+ if (new_div == old_div) /* No change */
+ return count;
+
+ switch (new_div) {
+ case 1:
+ new_range_bits = 3;
+ break;
+ case 2:
+ new_range_bits = 2;
+ break;
+ case 4:
+ new_range_bits = 1;
+ break;
+ case 8:
+ new_range_bits = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&data->update_lock);
+
+ status = i2c_smbus_read_byte_data(client, REG_FAN_CONF1);
+ if (status < 0) {
+ dev_dbg(&client->dev, "reg 0x%02x, err %d\n",
+ REG_FAN_CONF1, status);
+ mutex_unlock(&data->update_lock);
+ return -EIO;
+ }
+ status &= 0x9F;
+ status |= (new_range_bits << 5);
+ i2c_smbus_write_byte_data(client, REG_FAN_CONF1, status);
+
+ data->fan_multiplier = 8 / new_div;
+
+ /* update fan target if high byte is not disabled */
+ if ((data->fan_target & 0x1fe0) != 0x1fe0) {
+ u16 new_target = (data->fan_target * old_div) / new_div;
+ data->fan_target = min(new_target, (u16)0x1fff);
+ write_fan_target_to_i2c(client, data->fan_target);
+ }
+
+ /* invalidate data to force re-read from hardware */
+ data->valid = false;
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t
+show_fan_target(struct device *dev, struct device_attribute *da, char *buf)
+{
+ struct emc2103_data *data = emc2103_update_device(dev);
+ int rpm = 0;
+
+ /* high byte of 0xff indicates disabled so return 0 */
+ if ((data->fan_target != 0) && ((data->fan_target & 0x1fe0) != 0x1fe0))
+ rpm = (FAN_RPM_FACTOR * data->fan_multiplier)
+ / data->fan_target;
+
+ return sprintf(buf, "%d\n", rpm);
+}
+
+static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct emc2103_data *data = emc2103_update_device(dev);
+ struct i2c_client *client = to_i2c_client(dev);
+ long rpm_target;
+
+ int result = strict_strtol(buf, 10, &rpm_target);
+ if (result < 0)
+ return -EINVAL;
+
+ /* Datasheet states 16384 as maximum RPM target (table 3.2) */
+ if ((rpm_target < 0) || (rpm_target > 16384))
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+
+ if (rpm_target == 0)
+ data->fan_target = 0x1fff;
+ else
+ data->fan_target = SENSORS_LIMIT(
+ (FAN_RPM_FACTOR * data->fan_multiplier) / rpm_target,
+ 0, 0x1fff);
+
+ write_fan_target_to_i2c(client, data->fan_target);
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t
+show_fan_fault(struct device *dev, struct device_attribute *da, char *buf)
+{
+ struct emc2103_data *data = emc2103_update_device(dev);
+ bool fault = ((data->fan_tach & 0x1fe0) == 0x1fe0);
+ return sprintf(buf, "%d\n", fault ? 1 : 0);
+}
+
+static ssize_t
+show_pwm_enable(struct device *dev, struct device_attribute *da, char *buf)
+{
+ struct emc2103_data *data = emc2103_update_device(dev);
+ return sprintf(buf, "%d\n", data->fan_rpm_control ? 3 : 0);
+}
+
+static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct emc2103_data *data = i2c_get_clientdata(client);
+ long new_value;
+ u8 conf_reg;
+
+ int result = strict_strtol(buf, 10, &new_value);
+ if (result < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ switch (new_value) {
+ case 0:
+ data->fan_rpm_control = false;
+ break;
+ case 3:
+ data->fan_rpm_control = true;
+ break;
+ default:
+ mutex_unlock(&data->update_lock);
+ return -EINVAL;
+ }
+
+ read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+
+ if (data->fan_rpm_control)
+ conf_reg |= 0x80;
+ else
+ conf_reg &= ~0x80;
+
+ i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg);
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, show_temp_min,
+ set_temp_min, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max,
+ set_temp_max, 0);
+static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_temp_min_alarm,
+ NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_temp_max_alarm,
+ NULL, 0);
+
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, show_temp_min,
+ set_temp_min, 1);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
+ set_temp_max, 1);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_temp_min_alarm,
+ NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_temp_max_alarm,
+ NULL, 1);
+
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR, show_temp_min,
+ set_temp_min, 2);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
+ set_temp_max, 2);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_temp_min_alarm,
+ NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_temp_max_alarm,
+ NULL, 2);
+
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO | S_IWUSR, show_temp_min,
+ set_temp_min, 3);
+static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max,
+ set_temp_max, 3);
+static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_temp_min_alarm,
+ NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_temp_max_alarm,
+ NULL, 3);
+
+static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
+static DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, show_fan_div, set_fan_div);
+static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_fan_target,
+ set_fan_target);
+static DEVICE_ATTR(fan1_fault, S_IRUGO, show_fan_fault, NULL);
+
+static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
+ set_pwm_enable);
+
+/* sensors present on all models */
+static struct attribute *emc2103_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_fault.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &dev_attr_fan1_input.attr,
+ &dev_attr_fan1_div.attr,
+ &dev_attr_fan1_target.attr,
+ &dev_attr_fan1_fault.attr,
+ &dev_attr_pwm1_enable.attr,
+ NULL
+};
+
+/* extra temperature sensors only present on 2103-2 and 2103-4 */
+static struct attribute *emc2103_attributes_temp3[] = {
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_fault.dev_attr.attr,
+ &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ NULL
+};
+
+/* extra temperature sensors only present on 2103-2 and 2103-4 in APD mode */
+static struct attribute *emc2103_attributes_temp4[] = {
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_min.dev_attr.attr,
+ &sensor_dev_attr_temp4_max.dev_attr.attr,
+ &sensor_dev_attr_temp4_fault.dev_attr.attr,
+ &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group emc2103_group = {
+ .attrs = emc2103_attributes,
+};
+
+static const struct attribute_group emc2103_temp3_group = {
+ .attrs = emc2103_attributes_temp3,
+};
+
+static const struct attribute_group emc2103_temp4_group = {
+ .attrs = emc2103_attributes_temp4,
+};
+
+static int
+emc2103_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct emc2103_data *data;
+ int status;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ data = kzalloc(sizeof(struct emc2103_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* 2103-2 and 2103-4 have 3 external diodes, 2103-1 has 1 */
+ status = i2c_smbus_read_byte_data(client, REG_PRODUCT_ID);
+ if (status == 0x24) {
+ /* 2103-1 only has 1 external diode */
+ data->temp_count = 2;
+ } else {
+ /* 2103-2 and 2103-4 have 3 or 4 external diodes */
+ status = i2c_smbus_read_byte_data(client, REG_CONF1);
+ if (status < 0) {
+ dev_dbg(&client->dev, "reg 0x%02x, err %d\n", REG_CONF1,
+ status);
+ goto exit_free;
+ }
+
+ /* detect current state of hardware */
+ data->temp_count = (status & 0x01) ? 4 : 3;
+
+ /* force APD state if module parameter is set */
+ if (apd == 0) {
+ /* force APD mode off */
+ data->temp_count = 3;
+ status &= ~(0x01);
+ i2c_smbus_write_byte_data(client, REG_CONF1, status);
+ } else if (apd == 1) {
+ /* force APD mode on */
+ data->temp_count = 4;
+ status |= 0x01;
+ i2c_smbus_write_byte_data(client, REG_CONF1, status);
+ }
+ }
+
+ /* Register sysfs hooks */
+ status = sysfs_create_group(&client->dev.kobj, &emc2103_group);
+ if (status)
+ goto exit_free;
+
+ if (data->temp_count >= 3) {
+ status = sysfs_create_group(&client->dev.kobj,
+ &emc2103_temp3_group);
+ if (status)
+ goto exit_remove;
+ }
+
+ if (data->temp_count == 4) {
+ status = sysfs_create_group(&client->dev.kobj,
+ &emc2103_temp4_group);
+ if (status)
+ goto exit_remove_temp3;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ status = PTR_ERR(data->hwmon_dev);
+ goto exit_remove_temp4;
+ }
+
+ dev_info(&client->dev, "%s: sensor '%s'\n",
+ dev_name(data->hwmon_dev), client->name);
+
+ return 0;
+
+exit_remove_temp4:
+ if (data->temp_count == 4)
+ sysfs_remove_group(&client->dev.kobj, &emc2103_temp4_group);
+exit_remove_temp3:
+ if (data->temp_count >= 3)
+ sysfs_remove_group(&client->dev.kobj, &emc2103_temp3_group);
+exit_remove:
+ sysfs_remove_group(&client->dev.kobj, &emc2103_group);
+exit_free:
+ kfree(data);
+ return status;
+}
+
+static int emc2103_remove(struct i2c_client *client)
+{
+ struct emc2103_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+
+ if (data->temp_count == 4)
+ sysfs_remove_group(&client->dev.kobj, &emc2103_temp4_group);
+
+ if (data->temp_count >= 3)
+ sysfs_remove_group(&client->dev.kobj, &emc2103_temp3_group);
+
+ sysfs_remove_group(&client->dev.kobj, &emc2103_group);
+
+ kfree(data);
+ return 0;
+}
+
+static const struct i2c_device_id emc2103_ids[] = {
+ { "emc2103", 0, },
+ { /* LIST END */ }
+};
+MODULE_DEVICE_TABLE(i2c, emc2103_ids);
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int
+emc2103_detect(struct i2c_client *new_client, struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = new_client->adapter;
+ int manufacturer, product;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ manufacturer = i2c_smbus_read_byte_data(new_client, REG_MFG_ID);
+ if (manufacturer != 0x5D)
+ return -ENODEV;
+
+ product = i2c_smbus_read_byte_data(new_client, REG_PRODUCT_ID);
+ if ((product != 0x24) && (product != 0x26))
+ return -ENODEV;
+
+ strlcpy(info->type, "emc2103", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static struct i2c_driver emc2103_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "emc2103",
+ },
+ .probe = emc2103_probe,
+ .remove = emc2103_remove,
+ .id_table = emc2103_ids,
+ .detect = emc2103_detect,
+ .address_list = normal_i2c,
+};
+
+static int __init sensors_emc2103_init(void)
+{
+ return i2c_add_driver(&emc2103_driver);
+}
+
+static void __exit sensors_emc2103_exit(void)
+{
+ i2c_del_driver(&emc2103_driver);
+}
+
+MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
+MODULE_DESCRIPTION("SMSC EMC2103 hwmon driver");
+MODULE_LICENSE("GPL");
+
+module_init(sensors_emc2103_init);
+module_exit(sensors_emc2103_exit);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 537841ef44b..75afb3b0e07 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -111,7 +111,7 @@ static struct platform_device *f71882fg_pdev;
/* Super-I/O Function prototypes */
static inline int superio_inb(int base, int reg);
static inline int superio_inw(int base, int reg);
-static inline void superio_enter(int base);
+static inline int superio_enter(int base);
static inline void superio_select(int base, int ld);
static inline void superio_exit(int base);
@@ -861,11 +861,20 @@ static int superio_inw(int base, int reg)
return val;
}
-static inline void superio_enter(int base)
+static inline int superio_enter(int base)
{
+ /* Don't step on other drivers' I/O space by accident */
+ if (!request_muxed_region(base, 2, DRVNAME)) {
+ printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
+ base);
+ return -EBUSY;
+ }
+
/* according to the datasheet the key must be send twice! */
outb(SIO_UNLOCK_KEY, base);
outb(SIO_UNLOCK_KEY, base);
+
+ return 0;
}
static inline void superio_select(int base, int ld)
@@ -877,6 +886,7 @@ static inline void superio_select(int base, int ld)
static inline void superio_exit(int base)
{
outb(SIO_LOCK_KEY, base);
+ release_region(base, 2);
}
static inline int fan_from_reg(u16 reg)
@@ -2175,21 +2185,15 @@ static int f71882fg_remove(struct platform_device *pdev)
static int __init f71882fg_find(int sioaddr, unsigned short *address,
struct f71882fg_sio_data *sio_data)
{
- int err = -ENODEV;
u16 devid;
-
- /* Don't step on other drivers' I/O space by accident */
- if (!request_region(sioaddr, 2, DRVNAME)) {
- printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
- (int)sioaddr);
- return -EBUSY;
- }
-
- superio_enter(sioaddr);
+ int err = superio_enter(sioaddr);
+ if (err)
+ return err;
devid = superio_inw(sioaddr, SIO_REG_MANID);
if (devid != SIO_FINTEK_ID) {
pr_debug(DRVNAME ": Not a Fintek device\n");
+ err = -ENODEV;
goto exit;
}
@@ -2213,6 +2217,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
default:
printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n",
(unsigned int)devid);
+ err = -ENODEV;
goto exit;
}
@@ -2223,12 +2228,14 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
printk(KERN_WARNING DRVNAME ": Device not activated\n");
+ err = -ENODEV;
goto exit;
}
*address = superio_inw(sioaddr, SIO_REG_ADDR);
if (*address == 0) {
printk(KERN_WARNING DRVNAME ": Base address not set\n");
+ err = -ENODEV;
goto exit;
}
*address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */
@@ -2239,7 +2246,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
(int)superio_inb(sioaddr, SIO_REG_DEVREV));
exit:
superio_exit(sioaddr);
- release_region(sioaddr, 2);
return err;
}
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 0f58ecc5334..9638d58f99f 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -79,7 +79,7 @@ enum chips { f75373, f75375 };
#define F75375_REG_PWM2_DROP_DUTY 0x6C
#define FAN_CTRL_LINEAR(nr) (4 + nr)
-#define FAN_CTRL_MODE(nr) (5 + ((nr) * 2))
+#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2))
/*
* Data structures and manipulation thereof
@@ -298,7 +298,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
return -EINVAL;
fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
- fanmode = ~(3 << FAN_CTRL_MODE(nr));
+ fanmode &= ~(3 << FAN_CTRL_MODE(nr));
switch (val) {
case 0: /* Full speed */
@@ -350,7 +350,7 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
conf = f75375_read8(client, F75375_REG_CONFIG1);
- conf = ~(1 << FAN_CTRL_LINEAR(nr));
+ conf &= ~(1 << FAN_CTRL_LINEAR(nr));
if (val == 0)
conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index be2d131e405..bfd42f18924 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -522,6 +522,7 @@ static struct dmi_system_id __initdata hdaps_whitelist[] = {
HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T42p", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T42"),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T43"),
+ HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T400", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T60", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T61p", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T61", HDAPS_BOTH_AXES),
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index 7580f55e67e..36e95753223 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
+ AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
+ AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
{ NULL, }
/* Laptop models without axis info (yet):
* "NC6910" "HP Compaq 6910"
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 25763d2223b..f7701295937 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -259,6 +259,7 @@ struct it87_sio_data {
u8 revision;
u8 vid_value;
u8 beep_pin;
+ u8 internal; /* Internal sensors can be labeled */
/* Features skipped based on config or DMI */
u8 skip_vid;
u8 skip_fan;
@@ -1194,6 +1195,22 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static ssize_t show_label(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ static const char *labels[] = {
+ "+5V",
+ "5VSB",
+ "Vbat",
+ };
+ int nr = to_sensor_dev_attr(attr)->index;
+
+ return sprintf(buf, "%s\n", labels[nr]);
+}
+static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 2);
+
static ssize_t show_name(struct device *dev, struct device_attribute
*devattr, char *buf)
{
@@ -1434,6 +1451,17 @@ static const struct attribute_group it87_group_vid = {
.attrs = it87_attributes_vid,
};
+static struct attribute *it87_attributes_label[] = {
+ &sensor_dev_attr_in3_label.dev_attr.attr,
+ &sensor_dev_attr_in7_label.dev_attr.attr,
+ &sensor_dev_attr_in8_label.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group it87_group_label = {
+ .attrs = it87_attributes_vid,
+};
+
/* SuperIO detection - will change isa_address if a chip is found */
static int __init it87_find(unsigned short *address,
struct it87_sio_data *sio_data)
@@ -1487,6 +1515,9 @@ static int __init it87_find(unsigned short *address,
pr_info("it87: Found IT%04xF chip at 0x%x, revision %d\n",
chip_type, *address, sio_data->revision);
+ /* in8 (Vbat) is always internal */
+ sio_data->internal = (1 << 2);
+
/* Read GPIO config and VID value from LDN 7 (GPIO) */
if (sio_data->type == it87) {
/* The IT8705F doesn't have VID pins at all */
@@ -1540,9 +1571,9 @@ static int __init it87_find(unsigned short *address,
pr_notice("it87: Routing internal VCCH to in7\n");
}
if (reg & (1 << 0))
- pr_info("it87: in3 is VCC (+5V)\n");
+ sio_data->internal |= (1 << 0);
if (reg & (1 << 1))
- pr_info("it87: in7 is VCCH (+5V Stand-By)\n");
+ sio_data->internal |= (1 << 1);
sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
}
@@ -1600,6 +1631,7 @@ static void it87_remove_files(struct device *dev)
}
if (!sio_data->skip_vid)
sysfs_remove_group(&dev->kobj, &it87_group_vid);
+ sysfs_remove_group(&dev->kobj, &it87_group_label);
}
static int __devinit it87_probe(struct platform_device *pdev)
@@ -1725,6 +1757,16 @@ static int __devinit it87_probe(struct platform_device *pdev)
goto ERROR4;
}
+ /* Export labels for internal sensors */
+ for (i = 0; i < 3; i++) {
+ if (!(sio_data->internal & (1 << i)))
+ continue;
+ err = sysfs_create_file(&dev->kobj,
+ it87_attributes_label[i]);
+ if (err)
+ goto ERROR4;
+ }
+
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
new file mode 100644
index 00000000000..340fc78c8dd
--- /dev/null
+++ b/drivers/hwmon/jc42.c
@@ -0,0 +1,593 @@
+/*
+ * jc42.c - driver for Jedec JC42.4 compliant temperature sensors
+ *
+ * Copyright (c) 2010 Ericsson AB.
+ *
+ * Derived from lm77.c by Andras BALI <drewie@freemail.hu>.
+ *
+ * JC42.4 compliant temperature sensors are typically used on memory modules.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+/* Addresses to scan */
+static const unsigned short normal_i2c[] = {
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, I2C_CLIENT_END };
+
+/* JC42 registers. All registers are 16 bit. */
+#define JC42_REG_CAP 0x00
+#define JC42_REG_CONFIG 0x01
+#define JC42_REG_TEMP_UPPER 0x02
+#define JC42_REG_TEMP_LOWER 0x03
+#define JC42_REG_TEMP_CRITICAL 0x04
+#define JC42_REG_TEMP 0x05
+#define JC42_REG_MANID 0x06
+#define JC42_REG_DEVICEID 0x07
+
+/* Status bits in temperature register */
+#define JC42_ALARM_CRIT_BIT 15
+#define JC42_ALARM_MAX_BIT 14
+#define JC42_ALARM_MIN_BIT 13
+
+/* Configuration register defines */
+#define JC42_CFG_CRIT_ONLY (1 << 2)
+#define JC42_CFG_SHUTDOWN (1 << 8)
+#define JC42_CFG_HYST_SHIFT 9
+#define JC42_CFG_HYST_MASK 0x03
+
+/* Capabilities */
+#define JC42_CAP_RANGE (1 << 2)
+
+/* Manufacturer IDs */
+#define ADT_MANID 0x11d4 /* Analog Devices */
+#define MAX_MANID 0x004d /* Maxim */
+#define IDT_MANID 0x00b3 /* IDT */
+#define MCP_MANID 0x0054 /* Microchip */
+#define NXP_MANID 0x1131 /* NXP Semiconductors */
+#define ONS_MANID 0x1b09 /* ON Semiconductor */
+#define STM_MANID 0x104a /* ST Microelectronics */
+
+/* Supported chips */
+
+/* Analog Devices */
+#define ADT7408_DEVID 0x0801
+#define ADT7408_DEVID_MASK 0xffff
+
+/* IDT */
+#define TS3000B3_DEVID 0x2903 /* Also matches TSE2002B3 */
+#define TS3000B3_DEVID_MASK 0xffff
+
+/* Maxim */
+#define MAX6604_DEVID 0x3e00
+#define MAX6604_DEVID_MASK 0xffff
+
+/* Microchip */
+#define MCP98242_DEVID 0x2000
+#define MCP98242_DEVID_MASK 0xfffc
+
+#define MCP98243_DEVID 0x2100
+#define MCP98243_DEVID_MASK 0xfffc
+
+#define MCP9843_DEVID 0x0000 /* Also matches mcp9805 */
+#define MCP9843_DEVID_MASK 0xfffe
+
+/* NXP */
+#define SE97_DEVID 0xa200
+#define SE97_DEVID_MASK 0xfffc
+
+#define SE98_DEVID 0xa100
+#define SE98_DEVID_MASK 0xfffc
+
+/* ON Semiconductor */
+#define CAT6095_DEVID 0x0800 /* Also matches CAT34TS02 */
+#define CAT6095_DEVID_MASK 0xffe0
+
+/* ST Microelectronics */
+#define STTS424_DEVID 0x0101
+#define STTS424_DEVID_MASK 0xffff
+
+#define STTS424E_DEVID 0x0000
+#define STTS424E_DEVID_MASK 0xfffe
+
+static u16 jc42_hysteresis[] = { 0, 1500, 3000, 6000 };
+
+struct jc42_chips {
+ u16 manid;
+ u16 devid;
+ u16 devid_mask;
+};
+
+static struct jc42_chips jc42_chips[] = {
+ { ADT_MANID, ADT7408_DEVID, ADT7408_DEVID_MASK },
+ { IDT_MANID, TS3000B3_DEVID, TS3000B3_DEVID_MASK },
+ { MAX_MANID, MAX6604_DEVID, MAX6604_DEVID_MASK },
+ { MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
+ { MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
+ { MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK },
+ { NXP_MANID, SE97_DEVID, SE97_DEVID_MASK },
+ { ONS_MANID, CAT6095_DEVID, CAT6095_DEVID_MASK },
+ { NXP_MANID, SE98_DEVID, SE98_DEVID_MASK },
+ { STM_MANID, STTS424_DEVID, STTS424_DEVID_MASK },
+ { STM_MANID, STTS424E_DEVID, STTS424E_DEVID_MASK },
+};
+
+/* Each client has this additional data */
+struct jc42_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock; /* protect register access */
+ bool extended; /* true if extended range supported */
+ bool valid;
+ unsigned long last_updated; /* In jiffies */
+ u16 orig_config; /* original configuration */
+ u16 config; /* current configuration */
+ u16 temp_input; /* Temperatures */
+ u16 temp_crit;
+ u16 temp_min;
+ u16 temp_max;
+};
+
+static int jc42_probe(struct i2c_client *client,
+ const struct i2c_device_id *id);
+static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info);
+static int jc42_remove(struct i2c_client *client);
+static int jc42_read_value(struct i2c_client *client, u8 reg);
+static int jc42_write_value(struct i2c_client *client, u8 reg, u16 value);
+
+static struct jc42_data *jc42_update_device(struct device *dev);
+
+static const struct i2c_device_id jc42_id[] = {
+ { "adt7408", 0 },
+ { "cat94ts02", 0 },
+ { "cat6095", 0 },
+ { "jc42", 0 },
+ { "max6604", 0 },
+ { "mcp9805", 0 },
+ { "mcp98242", 0 },
+ { "mcp98243", 0 },
+ { "mcp9843", 0 },
+ { "se97", 0 },
+ { "se97b", 0 },
+ { "se98", 0 },
+ { "stts424", 0 },
+ { "tse2002b3", 0 },
+ { "ts3000b3", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, jc42_id);
+
+#ifdef CONFIG_PM
+
+static int jc42_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct jc42_data *data = i2c_get_clientdata(client);
+
+ data->config |= JC42_CFG_SHUTDOWN;
+ jc42_write_value(client, JC42_REG_CONFIG, data->config);
+ return 0;
+}
+
+static int jc42_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct jc42_data *data = i2c_get_clientdata(client);
+
+ data->config &= ~JC42_CFG_SHUTDOWN;
+ jc42_write_value(client, JC42_REG_CONFIG, data->config);
+ return 0;
+}
+
+static const struct dev_pm_ops jc42_dev_pm_ops = {
+ .suspend = jc42_suspend,
+ .resume = jc42_resume,
+};
+
+#define JC42_DEV_PM_OPS (&jc42_dev_pm_ops)
+#else
+#define JC42_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+/* This is the driver that will be inserted */
+static struct i2c_driver jc42_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "jc42",
+ .pm = JC42_DEV_PM_OPS,
+ },
+ .probe = jc42_probe,
+ .remove = jc42_remove,
+ .id_table = jc42_id,
+ .detect = jc42_detect,
+ .address_list = normal_i2c,
+};
+
+#define JC42_TEMP_MIN_EXTENDED (-40000)
+#define JC42_TEMP_MIN 0
+#define JC42_TEMP_MAX 125000
+
+static u16 jc42_temp_to_reg(int temp, bool extended)
+{
+ int ntemp = SENSORS_LIMIT(temp,
+ extended ? JC42_TEMP_MIN_EXTENDED :
+ JC42_TEMP_MIN, JC42_TEMP_MAX);
+
+ /* convert from 0.001 to 0.0625 resolution */
+ return (ntemp * 2 / 125) & 0x1fff;
+}
+
+static int jc42_temp_from_reg(s16 reg)
+{
+ reg &= 0x1fff;
+
+ /* sign extend register */
+ if (reg & 0x1000)
+ reg |= 0xf000;
+
+ /* convert from 0.0625 to 0.001 resolution */
+ return reg * 125 / 2;
+}
+
+/* sysfs stuff */
+
+/* read routines for temperature limits */
+#define show(value) \
+static ssize_t show_##value(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct jc42_data *data = jc42_update_device(dev); \
+ if (IS_ERR(data)) \
+ return PTR_ERR(data); \
+ return sprintf(buf, "%d\n", jc42_temp_from_reg(data->value)); \
+}
+
+show(temp_input);
+show(temp_crit);
+show(temp_min);
+show(temp_max);
+
+/* read routines for hysteresis values */
+static ssize_t show_temp_crit_hyst(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct jc42_data *data = jc42_update_device(dev);
+ int temp, hyst;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ temp = jc42_temp_from_reg(data->temp_crit);
+ hyst = jc42_hysteresis[(data->config >> JC42_CFG_HYST_SHIFT)
+ & JC42_CFG_HYST_MASK];
+ return sprintf(buf, "%d\n", temp - hyst);
+}
+
+static ssize_t show_temp_max_hyst(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct jc42_data *data = jc42_update_device(dev);
+ int temp, hyst;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ temp = jc42_temp_from_reg(data->temp_max);
+ hyst = jc42_hysteresis[(data->config >> JC42_CFG_HYST_SHIFT)
+ & JC42_CFG_HYST_MASK];
+ return sprintf(buf, "%d\n", temp - hyst);
+}
+
+/* write routines */
+#define set(value, reg) \
+static ssize_t set_##value(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct i2c_client *client = to_i2c_client(dev); \
+ struct jc42_data *data = i2c_get_clientdata(client); \
+ int err, ret = count; \
+ long val; \
+ if (strict_strtol(buf, 10, &val) < 0) \
+ return -EINVAL; \
+ mutex_lock(&data->update_lock); \
+ data->value = jc42_temp_to_reg(val, data->extended); \
+ err = jc42_write_value(client, reg, data->value); \
+ if (err < 0) \
+ ret = err; \
+ mutex_unlock(&data->update_lock); \
+ return ret; \
+}
+
+set(temp_min, JC42_REG_TEMP_LOWER);
+set(temp_max, JC42_REG_TEMP_UPPER);
+set(temp_crit, JC42_REG_TEMP_CRITICAL);
+
+/* JC42.4 compliant chips only support four hysteresis values.
+ * Pick best choice and go from there. */
+static ssize_t set_temp_crit_hyst(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct jc42_data *data = i2c_get_clientdata(client);
+ long val;
+ int diff, hyst;
+ int err;
+ int ret = count;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ diff = jc42_temp_from_reg(data->temp_crit) - val;
+ hyst = 0;
+ if (diff > 0) {
+ if (diff < 2250)
+ hyst = 1; /* 1.5 degrees C */
+ else if (diff < 4500)
+ hyst = 2; /* 3.0 degrees C */
+ else
+ hyst = 3; /* 6.0 degrees C */
+ }
+
+ mutex_lock(&data->update_lock);
+ data->config = (data->config
+ & ~(JC42_CFG_HYST_MASK << JC42_CFG_HYST_SHIFT))
+ | (hyst << JC42_CFG_HYST_SHIFT);
+ err = jc42_write_value(client, JC42_REG_CONFIG, data->config);
+ if (err < 0)
+ ret = err;
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static ssize_t show_alarm(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u16 bit = to_sensor_dev_attr(attr)->index;
+ struct jc42_data *data = jc42_update_device(dev);
+ u16 val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = data->temp_input;
+ if (bit != JC42_ALARM_CRIT_BIT && (data->config & JC42_CFG_CRIT_ONLY))
+ val = 0;
+ return sprintf(buf, "%u\n", (val >> bit) & 1);
+}
+
+static DEVICE_ATTR(temp1_input, S_IRUGO,
+ show_temp_input, NULL);
+static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO,
+ show_temp_crit, set_temp_crit);
+static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
+ show_temp_min, set_temp_min);
+static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
+ show_temp_max, set_temp_max);
+
+static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO,
+ show_temp_crit_hyst, set_temp_crit_hyst);
+static DEVICE_ATTR(temp1_max_hyst, S_IRUGO,
+ show_temp_max_hyst, NULL);
+
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL,
+ JC42_ALARM_CRIT_BIT);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL,
+ JC42_ALARM_MIN_BIT);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL,
+ JC42_ALARM_MAX_BIT);
+
+static struct attribute *jc42_attributes[] = {
+ &dev_attr_temp1_input.attr,
+ &dev_attr_temp1_crit.attr,
+ &dev_attr_temp1_min.attr,
+ &dev_attr_temp1_max.attr,
+ &dev_attr_temp1_crit_hyst.attr,
+ &dev_attr_temp1_max_hyst.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group jc42_group = {
+ .attrs = jc42_attributes,
+};
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int jc42_detect(struct i2c_client *new_client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = new_client->adapter;
+ int i, config, cap, manid, devid;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ cap = jc42_read_value(new_client, JC42_REG_CAP);
+ config = jc42_read_value(new_client, JC42_REG_CONFIG);
+ manid = jc42_read_value(new_client, JC42_REG_MANID);
+ devid = jc42_read_value(new_client, JC42_REG_DEVICEID);
+
+ if (cap < 0 || config < 0 || manid < 0 || devid < 0)
+ return -ENODEV;
+
+ if ((cap & 0xff00) || (config & 0xf800))
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(jc42_chips); i++) {
+ struct jc42_chips *chip = &jc42_chips[i];
+ if (manid == chip->manid &&
+ (devid & chip->devid_mask) == chip->devid) {
+ strlcpy(info->type, "jc42", I2C_NAME_SIZE);
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+static int jc42_probe(struct i2c_client *new_client,
+ const struct i2c_device_id *id)
+{
+ struct jc42_data *data;
+ int config, cap, err;
+
+ data = kzalloc(sizeof(struct jc42_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(new_client, data);
+ mutex_init(&data->update_lock);
+
+ cap = jc42_read_value(new_client, JC42_REG_CAP);
+ if (cap < 0) {
+ err = -EINVAL;
+ goto exit_free;
+ }
+ data->extended = !!(cap & JC42_CAP_RANGE);
+
+ config = jc42_read_value(new_client, JC42_REG_CONFIG);
+ if (config < 0) {
+ err = -EINVAL;
+ goto exit_free;
+ }
+ data->orig_config = config;
+ if (config & JC42_CFG_SHUTDOWN) {
+ config &= ~JC42_CFG_SHUTDOWN;
+ jc42_write_value(new_client, JC42_REG_CONFIG, config);
+ }
+ data->config = config;
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&new_client->dev.kobj, &jc42_group);
+ if (err)
+ goto exit_free;
+
+ data->hwmon_dev = hwmon_device_register(&new_client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto exit_remove;
+ }
+
+ return 0;
+
+exit_remove:
+ sysfs_remove_group(&new_client->dev.kobj, &jc42_group);
+exit_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int jc42_remove(struct i2c_client *client)
+{
+ struct jc42_data *data = i2c_get_clientdata(client);
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &jc42_group);
+ if (data->config != data->orig_config)
+ jc42_write_value(client, JC42_REG_CONFIG, data->orig_config);
+ kfree(data);
+ return 0;
+}
+
+/* All registers are word-sized. */
+static int jc42_read_value(struct i2c_client *client, u8 reg)
+{
+ int ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+ return swab16(ret);
+}
+
+static int jc42_write_value(struct i2c_client *client, u8 reg, u16 value)
+{
+ return i2c_smbus_write_word_data(client, reg, swab16(value));
+}
+
+static struct jc42_data *jc42_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct jc42_data *data = i2c_get_clientdata(client);
+ struct jc42_data *ret = data;
+ int val;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ val = jc42_read_value(client, JC42_REG_TEMP);
+ if (val < 0) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp_input = val;
+
+ val = jc42_read_value(client, JC42_REG_TEMP_CRITICAL);
+ if (val < 0) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp_crit = val;
+
+ val = jc42_read_value(client, JC42_REG_TEMP_LOWER);
+ if (val < 0) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp_min = val;
+
+ val = jc42_read_value(client, JC42_REG_TEMP_UPPER);
+ if (val < 0) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp_max = val;
+
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static int __init sensors_jc42_init(void)
+{
+ return i2c_add_driver(&jc42_driver);
+}
+
+static void __exit sensors_jc42_exit(void)
+{
+ i2c_del_driver(&jc42_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_DESCRIPTION("JC42 driver");
+MODULE_LICENSE("GPL");
+
+module_init(sensors_jc42_init);
+module_exit(sensors_jc42_exit);
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
new file mode 100644
index 00000000000..1c8b3d9e205
--- /dev/null
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * JZ4740 SoC HWMON driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/completion.h>
+#include <linux/mfd/core.h>
+
+#include <linux/hwmon.h>
+
+struct jz4740_hwmon {
+ struct resource *mem;
+ void __iomem *base;
+
+ int irq;
+
+ struct mfd_cell *cell;
+ struct device *hwmon;
+
+ struct completion read_completion;
+
+ struct mutex lock;
+};
+
+static ssize_t jz4740_hwmon_show_name(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return sprintf(buf, "jz4740\n");
+}
+
+static irqreturn_t jz4740_hwmon_irq(int irq, void *data)
+{
+ struct jz4740_hwmon *hwmon = data;
+
+ complete(&hwmon->read_completion);
+ return IRQ_HANDLED;
+}
+
+static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
+ struct completion *completion = &hwmon->read_completion;
+ unsigned long t;
+ unsigned long val;
+ int ret;
+
+ mutex_lock(&hwmon->lock);
+
+ INIT_COMPLETION(*completion);
+
+ enable_irq(hwmon->irq);
+ hwmon->cell->enable(to_platform_device(dev));
+
+ t = wait_for_completion_interruptible_timeout(completion, HZ);
+
+ if (t > 0) {
+ val = readw(hwmon->base) & 0xfff;
+ val = (val * 3300) >> 12;
+ ret = sprintf(buf, "%lu\n", val);
+ } else {
+ ret = t ? t : -ETIMEDOUT;
+ }
+
+ hwmon->cell->disable(to_platform_device(dev));
+ disable_irq(hwmon->irq);
+
+ mutex_unlock(&hwmon->lock);
+
+ return ret;
+}
+
+static DEVICE_ATTR(name, S_IRUGO, jz4740_hwmon_show_name, NULL);
+static DEVICE_ATTR(in0_input, S_IRUGO, jz4740_hwmon_read_adcin, NULL);
+
+static struct attribute *jz4740_hwmon_attributes[] = {
+ &dev_attr_name.attr,
+ &dev_attr_in0_input.attr,
+ NULL
+};
+
+static const struct attribute_group jz4740_hwmon_attr_group = {
+ .attrs = jz4740_hwmon_attributes,
+};
+
+static int __devinit jz4740_hwmon_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct jz4740_hwmon *hwmon;
+
+ hwmon = kmalloc(sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon) {
+ dev_err(&pdev->dev, "Failed to allocate driver structure\n");
+ return -ENOMEM;
+ }
+
+ hwmon->cell = pdev->dev.platform_data;
+
+ hwmon->irq = platform_get_irq(pdev, 0);
+ if (hwmon->irq < 0) {
+ ret = hwmon->irq;
+ dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
+ goto err_free;
+ }
+
+ hwmon->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!hwmon->mem) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
+ goto err_free;
+ }
+
+ hwmon->mem = request_mem_region(hwmon->mem->start,
+ resource_size(hwmon->mem), pdev->name);
+ if (!hwmon->mem) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev, "Failed to request mmio memory region\n");
+ goto err_free;
+ }
+
+ hwmon->base = ioremap_nocache(hwmon->mem->start,
+ resource_size(hwmon->mem));
+ if (!hwmon->base) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
+ goto err_release_mem_region;
+ }
+
+ init_completion(&hwmon->read_completion);
+ mutex_init(&hwmon->lock);
+
+ platform_set_drvdata(pdev, hwmon);
+
+ ret = request_irq(hwmon->irq, jz4740_hwmon_irq, 0, pdev->name, hwmon);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
+ goto err_iounmap;
+ }
+ disable_irq(hwmon->irq);
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to create sysfs group: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ hwmon->hwmon = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(hwmon->hwmon)) {
+ ret = PTR_ERR(hwmon->hwmon);
+ goto err_remove_file;
+ }
+
+ return 0;
+
+err_remove_file:
+ sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
+err_free_irq:
+ free_irq(hwmon->irq, hwmon);
+err_iounmap:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(hwmon->base);
+err_release_mem_region:
+ release_mem_region(hwmon->mem->start, resource_size(hwmon->mem));
+err_free:
+ kfree(hwmon);
+
+ return ret;
+}
+
+static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
+{
+ struct jz4740_hwmon *hwmon = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(hwmon->hwmon);
+ sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
+
+ free_irq(hwmon->irq, hwmon);
+
+ iounmap(hwmon->base);
+ release_mem_region(hwmon->mem->start, resource_size(hwmon->mem));
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(hwmon);
+
+ return 0;
+}
+
+struct platform_driver jz4740_hwmon_driver = {
+ .probe = jz4740_hwmon_probe,
+ .remove = __devexit_p(jz4740_hwmon_remove),
+ .driver = {
+ .name = "jz4740-hwmon",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init jz4740_hwmon_init(void)
+{
+ return platform_driver_register(&jz4740_hwmon_driver);
+}
+module_init(jz4740_hwmon_init);
+
+static void __exit jz4740_hwmon_exit(void)
+{
+ platform_driver_unregister(&jz4740_hwmon_driver);
+}
+module_exit(jz4740_hwmon_exit);
+
+MODULE_DESCRIPTION("JZ4740 SoC HWMON driver");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:jz4740-hwmon");
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 8bdf80d9159..39ead2a4d3c 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -143,6 +143,37 @@ static const struct pci_device_id k8temp_ids[] = {
MODULE_DEVICE_TABLE(pci, k8temp_ids);
+static int __devinit is_rev_g_desktop(u8 model)
+{
+ u32 brandidx;
+
+ if (model < 0x69)
+ return 0;
+
+ if (model == 0xc1 || model == 0x6c || model == 0x7c)
+ return 0;
+
+ /*
+ * Differentiate between AM2 and ASB1.
+ * See "Constructing the processor Name String" in "Revision
+ * Guide for AMD NPT Family 0Fh Processors" (33610).
+ */
+ brandidx = cpuid_ebx(0x80000001);
+ brandidx = (brandidx >> 9) & 0x1f;
+
+ /* Single core */
+ if ((model == 0x6f || model == 0x7f) &&
+ (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc))
+ return 0;
+
+ /* Dual core */
+ if (model == 0x6b &&
+ (brandidx == 0xb || brandidx == 0xc))
+ return 0;
+
+ return 1;
+}
+
static int __devinit k8temp_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -179,9 +210,7 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
"wrong - check erratum #141\n");
}
- if ((model >= 0x69) &&
- !(model == 0xc1 || model == 0x6c || model == 0x7c ||
- model == 0x6b || model == 0x6f || model == 0x7f)) {
+ if (is_rev_g_desktop(model)) {
/*
* RevG desktop CPUs (i.e. no socket S1G1 or
* ASB1 parts) need additional offset,
@@ -252,12 +281,13 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
&sensor_dev_attr_temp3_input.dev_attr);
if (err)
goto exit_remove;
- if (data->sensorsp & SEL_PLACE)
+ if (data->sensorsp & SEL_PLACE) {
err = device_create_file(&pdev->dev,
&sensor_dev_attr_temp4_input.
dev_attr);
if (err)
goto exit_remove;
+ }
}
err = device_create_file(&pdev->dev, &dev_attr_name);
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 6138f036b15..fc591ae5310 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
wake_up_interruptible(&lis3_dev.misc_wait);
kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
out:
- if (lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
+ if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
lis3_dev.idev->input->users)
return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
@@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
* io-apic is not configurable (and generates a warning) but I keep it
* in case of support for other hardware.
*/
- if (dev->whoami == WAI_8B)
+ if (dev->pdata && dev->whoami == WAI_8B)
thread_fn = lis302dl_interrupt_thread1_8b;
else
thread_fn = NULL;
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c
index dc1f5402c1d..8e5933b72d1 100644
--- a/drivers/hwmon/lis3lv02d_i2c.c
+++ b/drivers/hwmon/lis3lv02d_i2c.c
@@ -121,7 +121,7 @@ static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
{
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
- if (!lis3->pdata->wakeup_flags)
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweroff(lis3);
return 0;
}
@@ -130,7 +130,7 @@ static int lis3lv02d_i2c_resume(struct i2c_client *client)
{
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
- if (!lis3->pdata->wakeup_flags)
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweron(lis3);
return 0;
}
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index 82b16808a27..b9be5e3a22b 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -92,7 +92,7 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
{
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
- if (!lis3->pdata->wakeup_flags)
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweroff(&lis3_dev);
return 0;
@@ -102,7 +102,7 @@ static int lis3lv02d_spi_resume(struct spi_device *spi)
{
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
- if (!lis3->pdata->wakeup_flags)
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweron(lis3);
return 0;
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 393f354f92a..ab5b87a8167 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -280,10 +280,49 @@ static int lm75_detect(struct i2c_client *new_client,
return 0;
}
+#ifdef CONFIG_PM
+static int lm75_suspend(struct device *dev)
+{
+ int status;
+ struct i2c_client *client = to_i2c_client(dev);
+ status = lm75_read_value(client, LM75_REG_CONF);
+ if (status < 0) {
+ dev_dbg(&client->dev, "Can't read config? %d\n", status);
+ return status;
+ }
+ status = status | LM75_SHUTDOWN;
+ lm75_write_value(client, LM75_REG_CONF, status);
+ return 0;
+}
+
+static int lm75_resume(struct device *dev)
+{
+ int status;
+ struct i2c_client *client = to_i2c_client(dev);
+ status = lm75_read_value(client, LM75_REG_CONF);
+ if (status < 0) {
+ dev_dbg(&client->dev, "Can't read config? %d\n", status);
+ return status;
+ }
+ status = status & ~LM75_SHUTDOWN;
+ lm75_write_value(client, LM75_REG_CONF, status);
+ return 0;
+}
+
+static const struct dev_pm_ops lm75_dev_pm_ops = {
+ .suspend = lm75_suspend,
+ .resume = lm75_resume,
+};
+#define LM75_DEV_PM_OPS (&lm75_dev_pm_ops)
+#else
+#define LM75_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static struct i2c_driver lm75_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm75",
+ .pm = LM75_DEV_PM_OPS,
},
.probe = lm75_probe,
.remove = lm75_remove,
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h
index 7c93454bb4e..e547a3eb4de 100644
--- a/drivers/hwmon/lm75.h
+++ b/drivers/hwmon/lm75.h
@@ -30,6 +30,7 @@
/* straight from the datasheet */
#define LM75_TEMP_MIN (-55000)
#define LM75_TEMP_MAX 125000
+#define LM75_SHUTDOWN 0x01
/* TEMP: 0.001C/bit (-55C to +125C)
REG: (0.5C/bit, two's complement) << 7 */
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 94741d42112..464340f2549 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -91,7 +91,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev);
struct lm95241_data {
struct device *hwmon_dev;
struct mutex update_lock;
- unsigned long last_updated, rate; /* in jiffies */
+ unsigned long last_updated, interval; /* in jiffies */
char valid; /* zero until following fields are valid */
/* registers values */
u8 local_h, local_l; /* local */
@@ -114,23 +114,23 @@ show_temp(local);
show_temp(remote1);
show_temp(remote2);
-static ssize_t show_rate(struct device *dev, struct device_attribute *attr,
+static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm95241_data *data = lm95241_update_device(dev);
- snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->rate / HZ);
+ snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval / HZ);
return strlen(buf);
}
-static ssize_t set_rate(struct device *dev, struct device_attribute *attr,
+static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm95241_data *data = i2c_get_clientdata(client);
- strict_strtol(buf, 10, &data->rate);
- data->rate = data->rate * HZ / 1000;
+ strict_strtol(buf, 10, &data->interval);
+ data->interval = data->interval * HZ / 1000;
return count;
}
@@ -286,7 +286,8 @@ static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min1, set_min1);
static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2);
static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1);
static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2);
-static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO, show_rate, set_rate);
+static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
+ set_interval);
static struct attribute *lm95241_attributes[] = {
&dev_attr_temp1_input.attr,
@@ -298,7 +299,7 @@ static struct attribute *lm95241_attributes[] = {
&dev_attr_temp3_min.attr,
&dev_attr_temp2_max.attr,
&dev_attr_temp3_max.attr,
- &dev_attr_rate.attr,
+ &dev_attr_update_interval.attr,
NULL
};
@@ -376,7 +377,7 @@ static void lm95241_init_client(struct i2c_client *client)
{
struct lm95241_data *data = i2c_get_clientdata(client);
- data->rate = HZ; /* 1 sec default */
+ data->interval = HZ; /* 1 sec default */
data->valid = 0;
data->config = CFG_CR0076;
data->model = 0;
@@ -410,7 +411,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + data->rate) ||
+ if (time_after(jiffies, data->last_updated + data->interval) ||
!data->valid) {
dev_dbg(&client->dev, "Updating lm95241 data.\n");
data->local_h =
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 21d201befc2..65930832930 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -21,6 +21,7 @@
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/i2c/ltc4245.h>
/* Here are names of the chip's registers (a.k.a. commands) */
enum ltc4245_cmd {
@@ -60,8 +61,72 @@ struct ltc4245_data {
/* Voltage registers */
u8 vregs[0x0d];
+
+ /* GPIO ADC registers */
+ bool use_extra_gpios;
+ int gpios[3];
};
+/*
+ * Update the readings from the GPIO pins. If the driver has been configured to
+ * sample all GPIO's as analog voltages, a round-robin sampling method is used.
+ * Otherwise, only the configured GPIO pin is sampled.
+ *
+ * LOCKING: must hold data->update_lock
+ */
+static void ltc4245_update_gpios(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ltc4245_data *data = i2c_get_clientdata(client);
+ u8 gpio_curr, gpio_next, gpio_reg;
+ int i;
+
+ /* no extra gpio support, we're basically done */
+ if (!data->use_extra_gpios) {
+ data->gpios[0] = data->vregs[LTC4245_GPIOADC - 0x10];
+ return;
+ }
+
+ /*
+ * If the last reading was too long ago, then we mark all old GPIO
+ * readings as stale by setting them to -EAGAIN
+ */
+ if (time_after(jiffies, data->last_updated + 5 * HZ)) {
+ dev_dbg(&client->dev, "Marking GPIOs invalid\n");
+ for (i = 0; i < ARRAY_SIZE(data->gpios); i++)
+ data->gpios[i] = -EAGAIN;
+ }
+
+ /*
+ * Get the current GPIO pin
+ *
+ * The datasheet calls these GPIO[1-3], but we'll calculate the zero
+ * based array index instead, and call them GPIO[0-2]. This is much
+ * easier to think about.
+ */
+ gpio_curr = (data->cregs[LTC4245_GPIO] & 0xc0) >> 6;
+ if (gpio_curr > 0)
+ gpio_curr -= 1;
+
+ /* Read the GPIO voltage from the GPIOADC register */
+ data->gpios[gpio_curr] = data->vregs[LTC4245_GPIOADC - 0x10];
+
+ /* Find the next GPIO pin to read */
+ gpio_next = (gpio_curr + 1) % ARRAY_SIZE(data->gpios);
+
+ /*
+ * Calculate the correct setting for the GPIO register so it will
+ * sample the next GPIO pin
+ */
+ gpio_reg = (data->cregs[LTC4245_GPIO] & 0x3f) | ((gpio_next + 1) << 6);
+
+ /* Update the GPIO register */
+ i2c_smbus_write_byte_data(client, LTC4245_GPIO, gpio_reg);
+
+ /* Update saved data */
+ data->cregs[LTC4245_GPIO] = gpio_reg;
+}
+
static struct ltc4245_data *ltc4245_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -93,6 +158,9 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
data->vregs[i] = val;
}
+ /* Update GPIO readings */
+ ltc4245_update_gpios(dev);
+
data->last_updated = jiffies;
data->valid = 1;
}
@@ -233,6 +301,22 @@ static ssize_t ltc4245_show_alarm(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%u\n", (reg & mask) ? 1 : 0);
}
+static ssize_t ltc4245_show_gpio(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ltc4245_data *data = ltc4245_update_device(dev);
+ int val = data->gpios[attr->index];
+
+ /* handle stale GPIO's */
+ if (val < 0)
+ return val;
+
+ /* Convert to millivolts and print */
+ return snprintf(buf, PAGE_SIZE, "%u\n", val * 10);
+}
+
/* These macros are used below in constructing device attribute objects
* for use with sysfs_create_group() to make a sysfs device file
* for each register.
@@ -254,6 +338,10 @@ static ssize_t ltc4245_show_alarm(struct device *dev,
static SENSOR_DEVICE_ATTR_2(name, S_IRUGO, \
ltc4245_show_alarm, NULL, (mask), reg)
+#define LTC4245_GPIO_VOLTAGE(name, gpio_num) \
+ static SENSOR_DEVICE_ATTR(name, S_IRUGO, \
+ ltc4245_show_gpio, NULL, gpio_num)
+
/* Construct a sensor_device_attribute structure for each register */
/* Input voltages */
@@ -293,7 +381,9 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2);
LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2);
/* GPIO voltages */
-LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC);
+LTC4245_GPIO_VOLTAGE(in9_input, 0);
+LTC4245_GPIO_VOLTAGE(in10_input, 1);
+LTC4245_GPIO_VOLTAGE(in11_input, 2);
/* Power Consumption (virtual) */
LTC4245_POWER(power1_input, LTC4245_12VSENSE);
@@ -304,7 +394,7 @@ LTC4245_POWER(power4_input, LTC4245_VEESENSE);
/* Finally, construct an array of pointers to members of the above objects,
* as required for sysfs_create_group()
*/
-static struct attribute *ltc4245_attributes[] = {
+static struct attribute *ltc4245_std_attributes[] = {
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
@@ -345,10 +435,77 @@ static struct attribute *ltc4245_attributes[] = {
NULL,
};
-static const struct attribute_group ltc4245_group = {
- .attrs = ltc4245_attributes,
+static struct attribute *ltc4245_gpio_attributes[] = {
+ &sensor_dev_attr_in10_input.dev_attr.attr,
+ &sensor_dev_attr_in11_input.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ltc4245_std_group = {
+ .attrs = ltc4245_std_attributes,
+};
+
+static const struct attribute_group ltc4245_gpio_group = {
+ .attrs = ltc4245_gpio_attributes,
};
+static int ltc4245_sysfs_create_groups(struct i2c_client *client)
+{
+ struct ltc4245_data *data = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
+ int ret;
+
+ /* register the standard sysfs attributes */
+ ret = sysfs_create_group(&dev->kobj, &ltc4245_std_group);
+ if (ret) {
+ dev_err(dev, "unable to register standard attributes\n");
+ return ret;
+ }
+
+ /* if we're using the extra gpio support, register it's attributes */
+ if (data->use_extra_gpios) {
+ ret = sysfs_create_group(&dev->kobj, &ltc4245_gpio_group);
+ if (ret) {
+ dev_err(dev, "unable to register gpio attributes\n");
+ sysfs_remove_group(&dev->kobj, &ltc4245_std_group);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ltc4245_sysfs_remove_groups(struct i2c_client *client)
+{
+ struct ltc4245_data *data = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
+
+ if (data->use_extra_gpios)
+ sysfs_remove_group(&dev->kobj, &ltc4245_gpio_group);
+
+ sysfs_remove_group(&dev->kobj, &ltc4245_std_group);
+}
+
+static bool ltc4245_use_extra_gpios(struct i2c_client *client)
+{
+ struct ltc4245_platform_data *pdata = dev_get_platdata(&client->dev);
+#ifdef CONFIG_OF
+ struct device_node *np = client->dev.of_node;
+#endif
+
+ /* prefer platform data */
+ if (pdata)
+ return pdata->use_extra_gpios;
+
+#ifdef CONFIG_OF
+ /* fallback on OF */
+ if (of_find_property(np, "ltc4245,use-extra-gpios", NULL))
+ return true;
+#endif
+
+ return false;
+}
+
static int ltc4245_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -367,15 +524,16 @@ static int ltc4245_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
+ data->use_extra_gpios = ltc4245_use_extra_gpios(client);
/* Initialize the LTC4245 chip */
i2c_smbus_write_byte_data(client, LTC4245_FAULT1, 0x00);
i2c_smbus_write_byte_data(client, LTC4245_FAULT2, 0x00);
/* Register sysfs hooks */
- ret = sysfs_create_group(&client->dev.kobj, &ltc4245_group);
+ ret = ltc4245_sysfs_create_groups(client);
if (ret)
- goto out_sysfs_create_group;
+ goto out_sysfs_create_groups;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -386,8 +544,8 @@ static int ltc4245_probe(struct i2c_client *client,
return 0;
out_hwmon_device_register:
- sysfs_remove_group(&client->dev.kobj, &ltc4245_group);
-out_sysfs_create_group:
+ ltc4245_sysfs_remove_groups(client);
+out_sysfs_create_groups:
kfree(data);
out_kzalloc:
return ret;
@@ -398,8 +556,7 @@ static int ltc4245_remove(struct i2c_client *client)
struct ltc4245_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &ltc4245_group);
-
+ ltc4245_sysfs_remove_groups(client);
kfree(data);
return 0;
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index ce3c7bc8181..d5226c9e120 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -18,7 +18,7 @@
* Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <linux/mfd/mc13783-private.h>
+#include <linux/mfd/mc13783.h>
#include <linux/platform_device.h>
#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
@@ -144,6 +144,14 @@ static const struct attribute_group mc13783_group_ts = {
.attrs = mc13783_attr_ts,
};
+static int mc13783_adc_use_touchscreen(struct platform_device *pdev)
+{
+ struct mc13783_adc_priv *priv = platform_get_drvdata(pdev);
+ unsigned flags = mc13783_get_flags(priv->mc13783);
+
+ return flags & MC13783_USE_TOUCHSCREEN;
+}
+
static int __init mc13783_adc_probe(struct platform_device *pdev)
{
struct mc13783_adc_priv *priv;
@@ -162,10 +170,11 @@ static int __init mc13783_adc_probe(struct platform_device *pdev)
if (ret)
goto out_err_create1;
- if (!(priv->mc13783->flags & MC13783_USE_TOUCHSCREEN))
+ if (!mc13783_adc_use_touchscreen(pdev)) {
ret = sysfs_create_group(&pdev->dev.kobj, &mc13783_group_ts);
if (ret)
goto out_err_create2;
+ }
priv->hwmon_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(priv->hwmon_dev)) {
@@ -180,7 +189,7 @@ static int __init mc13783_adc_probe(struct platform_device *pdev)
out_err_register:
- if (!(priv->mc13783->flags & MC13783_USE_TOUCHSCREEN))
+ if (!mc13783_adc_use_touchscreen(pdev))
sysfs_remove_group(&pdev->dev.kobj, &mc13783_group_ts);
out_err_create2:
@@ -199,7 +208,7 @@ static int __devexit mc13783_adc_remove(struct platform_device *pdev)
hwmon_device_unregister(priv->hwmon_dev);
- if (!(priv->mc13783->flags & MC13783_USE_TOUCHSCREEN))
+ if (!mc13783_adc_use_touchscreen(pdev))
sysfs_remove_group(&pdev->dev.kobj, &mc13783_group_ts);
sysfs_remove_group(&pdev->dev.kobj, &mc13783_group);
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 4a64b85d4ec..68e69a49633 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1610,11 +1610,8 @@ static struct pc87360_data *pc87360_update_device(struct device *dev)
static int __init pc87360_device_add(unsigned short address)
{
- struct resource res = {
- .name = "pc87360",
- .flags = IORESOURCE_IO,
- };
- int err, i;
+ struct resource res[3];
+ int err, i, res_count;
pdev = platform_device_alloc("pc87360", address);
if (!pdev) {
@@ -1623,22 +1620,28 @@ static int __init pc87360_device_add(unsigned short address)
goto exit;
}
+ memset(res, 0, 3 * sizeof(struct resource));
+ res_count = 0;
for (i = 0; i < 3; i++) {
if (!extra_isa[i])
continue;
- res.start = extra_isa[i];
- res.end = extra_isa[i] + PC87360_EXTENT - 1;
+ res[res_count].start = extra_isa[i];
+ res[res_count].end = extra_isa[i] + PC87360_EXTENT - 1;
+ res[res_count].name = "pc87360",
+ res[res_count].flags = IORESOURCE_IO,
- err = acpi_check_resource_conflict(&res);
+ err = acpi_check_resource_conflict(&res[res_count]);
if (err)
goto exit_device_put;
- err = platform_device_add_resources(pdev, &res, 1);
- if (err) {
- printk(KERN_ERR "pc87360: Device resource[%d] "
- "addition failed (%d)\n", i, err);
- goto exit_device_put;
- }
+ res_count++;
+ }
+
+ err = platform_device_add_resources(pdev, res, res_count);
+ if (err) {
+ printk(KERN_ERR "pc87360: Device resources addition failed "
+ "(%d)\n", err);
+ goto exit_device_put;
}
err = platform_device_add(pdev);
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 3170b26d244..9ec4daaf6ca 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -1,7 +1,7 @@
/*
* pc87427.c - hardware monitoring driver for the
* National Semiconductor PC87427 Super-I/O chip
- * Copyright (C) 2006 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2006, 2008, 2010 Jean Delvare <khali@linux-fr.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -15,10 +15,11 @@
* Supports the following chips:
*
* Chip #vin #fan #pwm #temp devid
- * PC87427 - 8 - - 0xF2
+ * PC87427 - 8 4 6 0xF2
*
* This driver assumes that no more than one chip is present.
- * Only fan inputs are supported so far, although the chip can do much more.
+ * Only fans are fully supported so far. Temperatures are in read-only
+ * mode, and voltages aren't supported at all.
*/
#include <linux/module.h>
@@ -57,6 +58,25 @@ struct pc87427_data {
u16 fan[8]; /* register values */
u16 fan_min[8]; /* register values */
u8 fan_status[8]; /* register values */
+
+ u8 pwm_enabled; /* bit vector */
+ u8 pwm_auto_ok; /* bit vector */
+ u8 pwm_enable[4]; /* register values */
+ u8 pwm[4]; /* register values */
+
+ u8 temp_enabled; /* bit vector */
+ s16 temp[6]; /* register values */
+ s8 temp_min[6]; /* register values */
+ s8 temp_max[6]; /* register values */
+ s8 temp_crit[6]; /* register values */
+ u8 temp_status[6]; /* register values */
+ u8 temp_type[6]; /* register values */
+};
+
+struct pc87427_sio_data {
+ unsigned short address[2];
+ u8 has_fanin;
+ u8 has_fanout;
};
/*
@@ -65,6 +85,13 @@ struct pc87427_data {
#define SIOREG_LDSEL 0x07 /* Logical device select */
#define SIOREG_DEVID 0x20 /* Device ID */
+#define SIOREG_CF2 0x22 /* Configuration 2 */
+#define SIOREG_CF3 0x23 /* Configuration 3 */
+#define SIOREG_CF4 0x24 /* Configuration 4 */
+#define SIOREG_CF5 0x25 /* Configuration 5 */
+#define SIOREG_CFB 0x2B /* Configuration B */
+#define SIOREG_CFC 0x2C /* Configuration C */
+#define SIOREG_CFD 0x2D /* Configuration D */
#define SIOREG_ACT 0x30 /* Device activation */
#define SIOREG_MAP 0x50 /* I/O or memory mapping */
#define SIOREG_IOBASE 0x60 /* I/O base address */
@@ -102,6 +129,8 @@ static inline void superio_exit(int sioaddr)
#define BANK_FM(nr) (nr)
#define BANK_FT(nr) (0x08 + (nr))
#define BANK_FC(nr) (0x10 + (nr) * 2)
+#define BANK_TM(nr) (nr)
+#define BANK_VM(nr) (0x08 + (nr))
/*
* I/O access functions
@@ -179,6 +208,127 @@ static inline u16 fan_to_reg(unsigned long val)
}
/*
+ * PWM registers and conversions
+ */
+
+#define PC87427_REG_PWM_ENABLE 0x10
+#define PC87427_REG_PWM_DUTY 0x12
+
+#define PWM_ENABLE_MODE_MASK (7 << 4)
+#define PWM_ENABLE_CTLEN (1 << 0)
+
+#define PWM_MODE_MANUAL (0 << 4)
+#define PWM_MODE_AUTO (1 << 4)
+#define PWM_MODE_OFF (2 << 4)
+#define PWM_MODE_ON (7 << 4)
+
+/* Dedicated function to read all registers related to a given PWM output.
+ This saves us quite a few locks and bank selections.
+ Must be called with data->lock held.
+ nr is from 0 to 3 */
+static void pc87427_readall_pwm(struct pc87427_data *data, u8 nr)
+{
+ int iobase = data->address[LD_FAN];
+
+ outb(BANK_FC(nr), iobase + PC87427_REG_BANK);
+ data->pwm_enable[nr] = inb(iobase + PC87427_REG_PWM_ENABLE);
+ data->pwm[nr] = inb(iobase + PC87427_REG_PWM_DUTY);
+}
+
+static inline int pwm_enable_from_reg(u8 reg)
+{
+ switch (reg & PWM_ENABLE_MODE_MASK) {
+ case PWM_MODE_ON:
+ return 0;
+ case PWM_MODE_MANUAL:
+ case PWM_MODE_OFF:
+ return 1;
+ case PWM_MODE_AUTO:
+ return 2;
+ default:
+ return -EPROTO;
+ }
+}
+
+static inline u8 pwm_enable_to_reg(unsigned long val, u8 pwmval)
+{
+ switch (val) {
+ default:
+ return PWM_MODE_ON;
+ case 1:
+ return pwmval ? PWM_MODE_MANUAL : PWM_MODE_OFF;
+ case 2:
+ return PWM_MODE_AUTO;
+ }
+}
+
+/*
+ * Temperature registers and conversions
+ */
+
+#define PC87427_REG_TEMP_STATUS 0x10
+#define PC87427_REG_TEMP 0x14
+#define PC87427_REG_TEMP_MAX 0x18
+#define PC87427_REG_TEMP_MIN 0x19
+#define PC87427_REG_TEMP_CRIT 0x1a
+#define PC87427_REG_TEMP_TYPE 0x1d
+
+#define TEMP_STATUS_CHANEN (1 << 0)
+#define TEMP_STATUS_LOWFLG (1 << 1)
+#define TEMP_STATUS_HIGHFLG (1 << 2)
+#define TEMP_STATUS_CRITFLG (1 << 3)
+#define TEMP_STATUS_SENSERR (1 << 5)
+#define TEMP_TYPE_MASK (3 << 5)
+
+#define TEMP_TYPE_THERMISTOR (1 << 5)
+#define TEMP_TYPE_REMOTE_DIODE (2 << 5)
+#define TEMP_TYPE_LOCAL_DIODE (3 << 5)
+
+/* Dedicated function to read all registers related to a given temperature
+ input. This saves us quite a few locks and bank selections.
+ Must be called with data->lock held.
+ nr is from 0 to 5 */
+static void pc87427_readall_temp(struct pc87427_data *data, u8 nr)
+{
+ int iobase = data->address[LD_TEMP];
+
+ outb(BANK_TM(nr), iobase + PC87427_REG_BANK);
+ data->temp[nr] = le16_to_cpu(inw(iobase + PC87427_REG_TEMP));
+ data->temp_max[nr] = inb(iobase + PC87427_REG_TEMP_MAX);
+ data->temp_min[nr] = inb(iobase + PC87427_REG_TEMP_MIN);
+ data->temp_crit[nr] = inb(iobase + PC87427_REG_TEMP_CRIT);
+ data->temp_type[nr] = inb(iobase + PC87427_REG_TEMP_TYPE);
+ data->temp_status[nr] = inb(iobase + PC87427_REG_TEMP_STATUS);
+ /* Clear fan alarm bits */
+ outb(data->temp_status[nr], iobase + PC87427_REG_TEMP_STATUS);
+}
+
+static inline unsigned int temp_type_from_reg(u8 reg)
+{
+ switch (reg & TEMP_TYPE_MASK) {
+ case TEMP_TYPE_THERMISTOR:
+ return 4;
+ case TEMP_TYPE_REMOTE_DIODE:
+ case TEMP_TYPE_LOCAL_DIODE:
+ return 3;
+ default:
+ return 0;
+ }
+}
+
+/* We assume 8-bit thermal sensors; 9-bit thermal sensors are possible
+ too, but I have no idea how to figure out when they are used. */
+static inline long temp_from_reg(s16 reg)
+{
+ return reg * 1000 / 256;
+}
+
+static inline long temp_from_reg8(s8 reg)
+{
+ return reg * 1000;
+}
+
+/*
* Data interface
*/
@@ -198,6 +348,21 @@ static struct pc87427_data *pc87427_update_device(struct device *dev)
continue;
pc87427_readall_fan(data, i);
}
+
+ /* PWM outputs */
+ for (i = 0; i < 4; i++) {
+ if (!(data->pwm_enabled & (1 << i)))
+ continue;
+ pc87427_readall_pwm(data, i);
+ }
+
+ /* Temperature channels */
+ for (i = 0; i < 6; i++) {
+ if (!(data->temp_enabled & (1 << i)))
+ continue;
+ pc87427_readall_temp(data, i);
+ }
+
data->last_updated = jiffies;
done:
@@ -208,9 +373,8 @@ done:
static ssize_t show_fan_input(struct device *dev, struct device_attribute
*devattr, char *buf)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87427_data *data = pc87427_update_device(dev);
- int nr = attr->index;
+ int nr = to_sensor_dev_attr(devattr)->index;
return sprintf(buf, "%lu\n", fan_from_reg(data->fan[nr]));
}
@@ -218,9 +382,8 @@ static ssize_t show_fan_input(struct device *dev, struct device_attribute
static ssize_t show_fan_min(struct device *dev, struct device_attribute
*devattr, char *buf)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87427_data *data = pc87427_update_device(dev);
- int nr = attr->index;
+ int nr = to_sensor_dev_attr(devattr)->index;
return sprintf(buf, "%lu\n", fan_from_reg(data->fan_min[nr]));
}
@@ -228,9 +391,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute
static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
*devattr, char *buf)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87427_data *data = pc87427_update_device(dev);
- int nr = attr->index;
+ int nr = to_sensor_dev_attr(devattr)->index;
return sprintf(buf, "%d\n", !!(data->fan_status[nr]
& FAN_STATUS_LOSPD));
@@ -239,9 +401,8 @@ static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
static ssize_t show_fan_fault(struct device *dev, struct device_attribute
*devattr, char *buf)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87427_data *data = pc87427_update_device(dev);
- int nr = attr->index;
+ int nr = to_sensor_dev_attr(devattr)->index;
return sprintf(buf, "%d\n", !!(data->fan_status[nr]
& FAN_STATUS_STALL));
@@ -251,11 +412,13 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct pc87427_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- int nr = attr->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int nr = to_sensor_dev_attr(devattr)->index;
+ unsigned long val;
int iobase = data->address[LD_FAN];
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
mutex_lock(&data->lock);
outb(BANK_FM(nr), iobase + PC87427_REG_BANK);
/* The low speed limit registers are read-only while monitoring
@@ -377,6 +540,390 @@ static const struct attribute_group pc87427_group_fan[8] = {
{ .attrs = pc87427_attributes_fan[7] },
};
+/* Must be called with data->lock held and pc87427_readall_pwm() freshly
+ called */
+static void update_pwm_enable(struct pc87427_data *data, int nr, u8 mode)
+{
+ int iobase = data->address[LD_FAN];
+ data->pwm_enable[nr] &= ~PWM_ENABLE_MODE_MASK;
+ data->pwm_enable[nr] |= mode;
+ outb(data->pwm_enable[nr], iobase + PC87427_REG_PWM_ENABLE);
+}
+
+static ssize_t show_pwm_enable(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct pc87427_data *data = pc87427_update_device(dev);
+ int nr = to_sensor_dev_attr(devattr)->index;
+ int pwm_enable;
+
+ pwm_enable = pwm_enable_from_reg(data->pwm_enable[nr]);
+ if (pwm_enable < 0)
+ return pwm_enable;
+ return sprintf(buf, "%d\n", pwm_enable);
+}
+
+static ssize_t set_pwm_enable(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ struct pc87427_data *data = dev_get_drvdata(dev);
+ int nr = to_sensor_dev_attr(devattr)->index;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0 || val > 2)
+ return -EINVAL;
+ /* Can't go to automatic mode if it isn't configured */
+ if (val == 2 && !(data->pwm_auto_ok & (1 << nr)))
+ return -EINVAL;
+
+ mutex_lock(&data->lock);
+ pc87427_readall_pwm(data, nr);
+ update_pwm_enable(data, nr, pwm_enable_to_reg(val, data->pwm[nr]));
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t show_pwm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct pc87427_data *data = pc87427_update_device(dev);
+ int nr = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%d\n", (int)data->pwm[nr]);
+}
+
+static ssize_t set_pwm(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ struct pc87427_data *data = dev_get_drvdata(dev);
+ int nr = to_sensor_dev_attr(devattr)->index;
+ unsigned long val;
+ int iobase = data->address[LD_FAN];
+ u8 mode;
+
+ if (strict_strtoul(buf, 10, &val) < 0 || val > 0xff)
+ return -EINVAL;
+
+ mutex_lock(&data->lock);
+ pc87427_readall_pwm(data, nr);
+ mode = data->pwm_enable[nr] & PWM_ENABLE_MODE_MASK;
+ if (mode != PWM_MODE_MANUAL && mode != PWM_MODE_OFF) {
+ dev_notice(dev, "Can't set PWM%d duty cycle while not in "
+ "manual mode\n", nr + 1);
+ mutex_unlock(&data->lock);
+ return -EPERM;
+ }
+
+ /* We may have to change the mode */
+ if (mode == PWM_MODE_MANUAL && val == 0) {
+ /* Transition from Manual to Off */
+ update_pwm_enable(data, nr, PWM_MODE_OFF);
+ mode = PWM_MODE_OFF;
+ dev_dbg(dev, "Switching PWM%d from %s to %s\n", nr + 1,
+ "manual", "off");
+ } else if (mode == PWM_MODE_OFF && val != 0) {
+ /* Transition from Off to Manual */
+ update_pwm_enable(data, nr, PWM_MODE_MANUAL);
+ mode = PWM_MODE_MANUAL;
+ dev_dbg(dev, "Switching PWM%d from %s to %s\n", nr + 1,
+ "off", "manual");
+ }
+
+ data->pwm[nr] = val;
+ if (mode == PWM_MODE_MANUAL)
+ outb(val, iobase + PC87427_REG_PWM_DUTY);
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
+ show_pwm_enable, set_pwm_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO,
+ show_pwm_enable, set_pwm_enable, 1);
+static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO,
+ show_pwm_enable, set_pwm_enable, 2);
+static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO,
+ show_pwm_enable, set_pwm_enable, 3);
+
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0);
+static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
+static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2);
+static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 3);
+
+static struct attribute *pc87427_attributes_pwm[4][3] = {
+ {
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_pwm2_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm2.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_pwm3_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm3.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_pwm4_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm4.dev_attr.attr,
+ NULL
+ }
+};
+
+static const struct attribute_group pc87427_group_pwm[4] = {
+ { .attrs = pc87427_attributes_pwm[0] },
+ { .attrs = pc87427_attributes_pwm[1] },
+ { .attrs = pc87427_attributes_pwm[2] },
+ { .attrs = pc87427_attributes_pwm[3] },
+};
+
+static ssize_t show_temp_input(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct pc87427_data *data = pc87427_update_device(dev);
+ int nr = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%ld\n", temp_from_reg(data->temp[nr]));
+}
+
+static ssize_t show_temp_min(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct pc87427_data *data = pc87427_update_device(dev);
+ int nr = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%ld\n", temp_from_reg8(data->temp_min[nr]));
+}
+
+static ssize_t show_temp_max(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct pc87427_data *data = pc87427_update_device(dev);
+ int nr = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%ld\n", temp_from_reg8(data->temp_max[nr]));
+}
+
+static ssize_t show_temp_crit(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct pc87427_data *data = pc87427_update_device(dev);
+ int nr = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%ld\n", temp_from_reg8(data->temp_crit[nr]));
+}
+
+static ssize_t show_temp_type(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct pc87427_data *data = pc87427_update_device(dev);
+ int nr = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%u\n", temp_type_from_reg(data->temp_type[nr]));
+}
+
+static ssize_t show_temp_min_alarm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct pc87427_data *data = pc87427_update_device(dev);
+ int nr = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%d\n", !!(data->temp_status[nr]
+ & TEMP_STATUS_LOWFLG));
+}
+
+static ssize_t show_temp_max_alarm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct pc87427_data *data = pc87427_update_device(dev);
+ int nr = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%d\n", !!(data->temp_status[nr]
+ & TEMP_STATUS_HIGHFLG));
+}
+
+static ssize_t show_temp_crit_alarm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct pc87427_data *data = pc87427_update_device(dev);
+ int nr = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%d\n", !!(data->temp_status[nr]
+ & TEMP_STATUS_CRITFLG));
+}
+
+static ssize_t show_temp_fault(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct pc87427_data *data = pc87427_update_device(dev);
+ int nr = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%d\n", !!(data->temp_status[nr]
+ & TEMP_STATUS_SENSERR));
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_input, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_input, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp_input, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp_input, NULL, 5);
+
+static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO, show_temp_min, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO, show_temp_min, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO, show_temp_min, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_min, S_IRUGO, show_temp_min, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_min, S_IRUGO, show_temp_min, NULL, 5);
+
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_max, S_IRUGO, show_temp_max, NULL, 5);
+
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp_crit, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, show_temp_crit, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO, show_temp_crit, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_crit, S_IRUGO, show_temp_crit, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_crit, S_IRUGO, show_temp_crit, NULL, 5);
+
+static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_type, S_IRUGO, show_temp_type, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_type, S_IRUGO, show_temp_type, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_type, S_IRUGO, show_temp_type, NULL, 5);
+
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
+ show_temp_min_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO,
+ show_temp_min_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO,
+ show_temp_min_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO,
+ show_temp_min_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO,
+ show_temp_min_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_min_alarm, S_IRUGO,
+ show_temp_min_alarm, NULL, 5);
+
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
+ show_temp_max_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO,
+ show_temp_max_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO,
+ show_temp_max_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO,
+ show_temp_max_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO,
+ show_temp_max_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_max_alarm, S_IRUGO,
+ show_temp_max_alarm, NULL, 5);
+
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
+ show_temp_crit_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO,
+ show_temp_crit_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO,
+ show_temp_crit_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO,
+ show_temp_crit_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO,
+ show_temp_crit_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_crit_alarm, S_IRUGO,
+ show_temp_crit_alarm, NULL, 5);
+
+static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_temp_fault, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_temp_fault, NULL, 5);
+
+static struct attribute *pc87427_attributes_temp[6][10] = {
+ {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_type.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_type.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit.dev_attr.attr,
+ &sensor_dev_attr_temp3_type.dev_attr.attr,
+ &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_min.dev_attr.attr,
+ &sensor_dev_attr_temp4_max.dev_attr.attr,
+ &sensor_dev_attr_temp4_crit.dev_attr.attr,
+ &sensor_dev_attr_temp4_type.dev_attr.attr,
+ &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp5_input.dev_attr.attr,
+ &sensor_dev_attr_temp5_min.dev_attr.attr,
+ &sensor_dev_attr_temp5_max.dev_attr.attr,
+ &sensor_dev_attr_temp5_crit.dev_attr.attr,
+ &sensor_dev_attr_temp5_type.dev_attr.attr,
+ &sensor_dev_attr_temp5_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp6_input.dev_attr.attr,
+ &sensor_dev_attr_temp6_min.dev_attr.attr,
+ &sensor_dev_attr_temp6_max.dev_attr.attr,
+ &sensor_dev_attr_temp6_crit.dev_attr.attr,
+ &sensor_dev_attr_temp6_type.dev_attr.attr,
+ &sensor_dev_attr_temp6_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp6_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp6_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp6_fault.dev_attr.attr,
+ NULL
+ }
+};
+
+static const struct attribute_group pc87427_group_temp[6] = {
+ { .attrs = pc87427_attributes_temp[0] },
+ { .attrs = pc87427_attributes_temp[1] },
+ { .attrs = pc87427_attributes_temp[2] },
+ { .attrs = pc87427_attributes_temp[3] },
+ { .attrs = pc87427_attributes_temp[4] },
+ { .attrs = pc87427_attributes_temp[5] },
+};
+
static ssize_t show_name(struct device *dev, struct device_attribute
*devattr, char *buf)
{
@@ -391,8 +938,49 @@ static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
* Device detection, attach and detach
*/
+static void pc87427_release_regions(struct platform_device *pdev, int count)
+{
+ struct resource *res;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_IO, i);
+ release_region(res->start, resource_size(res));
+ }
+}
+
+static int __devinit pc87427_request_regions(struct platform_device *pdev,
+ int count)
+{
+ struct resource *res;
+ int i, err = 0;
+
+ for (i = 0; i < count; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_IO, i);
+ if (!res) {
+ err = -ENOENT;
+ dev_err(&pdev->dev, "Missing resource #%d\n", i);
+ break;
+ }
+ if (!request_region(res->start, resource_size(res), DRVNAME)) {
+ err = -EBUSY;
+ dev_err(&pdev->dev,
+ "Failed to request region 0x%lx-0x%lx\n",
+ (unsigned long)res->start,
+ (unsigned long)res->end);
+ break;
+ }
+ }
+
+ if (err && i)
+ pc87427_release_regions(pdev, i);
+
+ return err;
+}
+
static void __devinit pc87427_init_device(struct device *dev)
{
+ struct pc87427_sio_data *sio_data = dev->platform_data;
struct pc87427_data *data = dev_get_drvdata(dev);
int i;
u8 reg;
@@ -400,10 +988,12 @@ static void __devinit pc87427_init_device(struct device *dev)
/* The FMC module should be ready */
reg = pc87427_read8(data, LD_FAN, PC87427_REG_BANK);
if (!(reg & 0x80))
- dev_warn(dev, "FMC module not ready!\n");
+ dev_warn(dev, "%s module not ready!\n", "FMC");
/* Check which fans are enabled */
for (i = 0; i < 8; i++) {
+ if (!(sio_data->has_fanin & (1 << i))) /* Not wired */
+ continue;
reg = pc87427_read8_bank(data, LD_FAN, BANK_FM(i),
PC87427_REG_FAN_STATUS);
if (reg & FAN_STATUS_MONEN)
@@ -411,37 +1001,93 @@ static void __devinit pc87427_init_device(struct device *dev)
}
if (!data->fan_enabled) {
- dev_dbg(dev, "Enabling all fan inputs\n");
- for (i = 0; i < 8; i++)
+ dev_dbg(dev, "Enabling monitoring of all fans\n");
+ for (i = 0; i < 8; i++) {
+ if (!(sio_data->has_fanin & (1 << i))) /* Not wired */
+ continue;
pc87427_write8_bank(data, LD_FAN, BANK_FM(i),
PC87427_REG_FAN_STATUS,
FAN_STATUS_MONEN);
- data->fan_enabled = 0xff;
+ }
+ data->fan_enabled = sio_data->has_fanin;
+ }
+
+ /* Check which PWM outputs are enabled */
+ for (i = 0; i < 4; i++) {
+ if (!(sio_data->has_fanout & (1 << i))) /* Not wired */
+ continue;
+ reg = pc87427_read8_bank(data, LD_FAN, BANK_FC(i),
+ PC87427_REG_PWM_ENABLE);
+ if (reg & PWM_ENABLE_CTLEN)
+ data->pwm_enabled |= (1 << i);
+
+ /* We don't expose an interface to reconfigure the automatic
+ fan control mode, so only allow to return to this mode if
+ it was originally set. */
+ if ((reg & PWM_ENABLE_MODE_MASK) == PWM_MODE_AUTO) {
+ dev_dbg(dev, "PWM%d is in automatic control mode\n",
+ i + 1);
+ data->pwm_auto_ok |= (1 << i);
+ }
+ }
+
+ /* The HMC module should be ready */
+ reg = pc87427_read8(data, LD_TEMP, PC87427_REG_BANK);
+ if (!(reg & 0x80))
+ dev_warn(dev, "%s module not ready!\n", "HMC");
+
+ /* Check which temperature channels are enabled */
+ for (i = 0; i < 6; i++) {
+ reg = pc87427_read8_bank(data, LD_TEMP, BANK_TM(i),
+ PC87427_REG_TEMP_STATUS);
+ if (reg & TEMP_STATUS_CHANEN)
+ data->temp_enabled |= (1 << i);
+ }
+}
+
+static void pc87427_remove_files(struct device *dev)
+{
+ struct pc87427_data *data = dev_get_drvdata(dev);
+ int i;
+
+ device_remove_file(dev, &dev_attr_name);
+ for (i = 0; i < 8; i++) {
+ if (!(data->fan_enabled & (1 << i)))
+ continue;
+ sysfs_remove_group(&dev->kobj, &pc87427_group_fan[i]);
+ }
+ for (i = 0; i < 4; i++) {
+ if (!(data->pwm_enabled & (1 << i)))
+ continue;
+ sysfs_remove_group(&dev->kobj, &pc87427_group_pwm[i]);
+ }
+ for (i = 0; i < 6; i++) {
+ if (!(data->temp_enabled & (1 << i)))
+ continue;
+ sysfs_remove_group(&dev->kobj, &pc87427_group_temp[i]);
}
}
static int __devinit pc87427_probe(struct platform_device *pdev)
{
+ struct pc87427_sio_data *sio_data = pdev->dev.platform_data;
struct pc87427_data *data;
- struct resource *res;
- int i, err;
+ int i, err, res_count;
- if (!(data = kzalloc(sizeof(struct pc87427_data), GFP_KERNEL))) {
+ data = kzalloc(sizeof(struct pc87427_data), GFP_KERNEL);
+ if (!data) {
err = -ENOMEM;
printk(KERN_ERR DRVNAME ": Out of memory\n");
goto exit;
}
- /* This will need to be revisited when we add support for
- temperature and voltage monitoring. */
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, resource_size(res), DRVNAME)) {
- err = -EBUSY;
- dev_err(&pdev->dev, "Failed to request region 0x%lx-0x%lx\n",
- (unsigned long)res->start, (unsigned long)res->end);
+ data->address[0] = sio_data->address[0];
+ data->address[1] = sio_data->address[1];
+ res_count = (data->address[0] != 0) + (data->address[1] != 0);
+
+ err = pc87427_request_regions(pdev, res_count);
+ if (err)
goto exit_kfree;
- }
- data->address[0] = res->start;
mutex_init(&data->lock);
data->name = "pc87427";
@@ -449,13 +1095,31 @@ static int __devinit pc87427_probe(struct platform_device *pdev)
pc87427_init_device(&pdev->dev);
/* Register sysfs hooks */
- if ((err = device_create_file(&pdev->dev, &dev_attr_name)))
+ err = device_create_file(&pdev->dev, &dev_attr_name);
+ if (err)
goto exit_release_region;
for (i = 0; i < 8; i++) {
if (!(data->fan_enabled & (1 << i)))
continue;
- if ((err = sysfs_create_group(&pdev->dev.kobj,
- &pc87427_group_fan[i])))
+ err = sysfs_create_group(&pdev->dev.kobj,
+ &pc87427_group_fan[i]);
+ if (err)
+ goto exit_remove_files;
+ }
+ for (i = 0; i < 4; i++) {
+ if (!(data->pwm_enabled & (1 << i)))
+ continue;
+ err = sysfs_create_group(&pdev->dev.kobj,
+ &pc87427_group_pwm[i]);
+ if (err)
+ goto exit_remove_files;
+ }
+ for (i = 0; i < 6; i++) {
+ if (!(data->temp_enabled & (1 << i)))
+ continue;
+ err = sysfs_create_group(&pdev->dev.kobj,
+ &pc87427_group_temp[i]);
+ if (err)
goto exit_remove_files;
}
@@ -469,13 +1133,9 @@ static int __devinit pc87427_probe(struct platform_device *pdev)
return 0;
exit_remove_files:
- for (i = 0; i < 8; i++) {
- if (!(data->fan_enabled & (1 << i)))
- continue;
- sysfs_remove_group(&pdev->dev.kobj, &pc87427_group_fan[i]);
- }
+ pc87427_remove_files(&pdev->dev);
exit_release_region:
- release_region(res->start, resource_size(res));
+ pc87427_release_regions(pdev, res_count);
exit_kfree:
platform_set_drvdata(pdev, NULL);
kfree(data);
@@ -486,21 +1146,16 @@ exit:
static int __devexit pc87427_remove(struct platform_device *pdev)
{
struct pc87427_data *data = platform_get_drvdata(pdev);
- struct resource *res;
- int i;
+ int res_count;
+
+ res_count = (data->address[0] != 0) + (data->address[1] != 0);
hwmon_device_unregister(data->hwmon_dev);
- device_remove_file(&pdev->dev, &dev_attr_name);
- for (i = 0; i < 8; i++) {
- if (!(data->fan_enabled & (1 << i)))
- continue;
- sysfs_remove_group(&pdev->dev.kobj, &pc87427_group_fan[i]);
- }
+ pc87427_remove_files(&pdev->dev);
platform_set_drvdata(pdev, NULL);
kfree(data);
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start, resource_size(res));
+ pc87427_release_regions(pdev, res_count);
return 0;
}
@@ -515,34 +1170,50 @@ static struct platform_driver pc87427_driver = {
.remove = __devexit_p(pc87427_remove),
};
-static int __init pc87427_device_add(unsigned short address)
+static int __init pc87427_device_add(const struct pc87427_sio_data *sio_data)
{
- struct resource res = {
- .start = address,
- .end = address + REGION_LENGTH - 1,
- .name = logdev_str[0],
- .flags = IORESOURCE_IO,
+ struct resource res[2] = {
+ { .flags = IORESOURCE_IO },
+ { .flags = IORESOURCE_IO },
};
- int err;
+ int err, i, res_count;
- err = acpi_check_resource_conflict(&res);
- if (err)
- goto exit;
+ res_count = 0;
+ for (i = 0; i < 2; i++) {
+ if (!sio_data->address[i])
+ continue;
+ res[res_count].start = sio_data->address[i];
+ res[res_count].end = sio_data->address[i] + REGION_LENGTH - 1;
+ res[res_count].name = logdev_str[i];
- pdev = platform_device_alloc(DRVNAME, address);
+ err = acpi_check_resource_conflict(&res[res_count]);
+ if (err)
+ goto exit;
+
+ res_count++;
+ }
+
+ pdev = platform_device_alloc(DRVNAME, res[0].start);
if (!pdev) {
err = -ENOMEM;
printk(KERN_ERR DRVNAME ": Device allocation failed\n");
goto exit;
}
- err = platform_device_add_resources(pdev, &res, 1);
+ err = platform_device_add_resources(pdev, res, res_count);
if (err) {
printk(KERN_ERR DRVNAME ": Device resource addition failed "
"(%d)\n", err);
goto exit_device_put;
}
+ err = platform_device_add_data(pdev, sio_data,
+ sizeof(struct pc87427_sio_data));
+ if (err) {
+ printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+ goto exit_device_put;
+ }
+
err = platform_device_add(pdev);
if (err) {
printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
@@ -558,9 +1229,10 @@ exit:
return err;
}
-static int __init pc87427_find(int sioaddr, unsigned short *address)
+static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data)
{
u16 val;
+ u8 cfg, cfg_b;
int i, err = 0;
/* Identify device */
@@ -571,7 +1243,7 @@ static int __init pc87427_find(int sioaddr, unsigned short *address)
}
for (i = 0; i < 2; i++) {
- address[i] = 0;
+ sio_data->address[i] = 0;
/* Select logical device */
superio_outb(sioaddr, SIOREG_LDSEL, logdev[i]);
@@ -596,9 +1268,58 @@ static int __init pc87427_find(int sioaddr, unsigned short *address)
"for logical device 0x%02x\n", logdev[i]);
continue;
}
- address[i] = val;
+ sio_data->address[i] = val;
}
+ /* No point in loading the driver if everything is disabled */
+ if (!sio_data->address[0] && !sio_data->address[1]) {
+ err = -ENODEV;
+ goto exit;
+ }
+
+ /* Check which fan inputs are wired */
+ sio_data->has_fanin = (1 << 2) | (1 << 3); /* FANIN2, FANIN3 */
+
+ cfg = superio_inb(sioaddr, SIOREG_CF2);
+ if (!(cfg & (1 << 3)))
+ sio_data->has_fanin |= (1 << 0); /* FANIN0 */
+ if (!(cfg & (1 << 2)))
+ sio_data->has_fanin |= (1 << 4); /* FANIN4 */
+
+ cfg = superio_inb(sioaddr, SIOREG_CFD);
+ if (!(cfg & (1 << 0)))
+ sio_data->has_fanin |= (1 << 1); /* FANIN1 */
+
+ cfg = superio_inb(sioaddr, SIOREG_CF4);
+ if (!(cfg & (1 << 0)))
+ sio_data->has_fanin |= (1 << 7); /* FANIN7 */
+ cfg_b = superio_inb(sioaddr, SIOREG_CFB);
+ if (!(cfg & (1 << 1)) && (cfg_b & (1 << 3)))
+ sio_data->has_fanin |= (1 << 5); /* FANIN5 */
+ cfg = superio_inb(sioaddr, SIOREG_CF3);
+ if ((cfg & (1 << 3)) && !(cfg_b & (1 << 5)))
+ sio_data->has_fanin |= (1 << 6); /* FANIN6 */
+
+ /* Check which fan outputs are wired */
+ sio_data->has_fanout = (1 << 0); /* FANOUT0 */
+ if (cfg_b & (1 << 0))
+ sio_data->has_fanout |= (1 << 3); /* FANOUT3 */
+
+ cfg = superio_inb(sioaddr, SIOREG_CFC);
+ if (!(cfg & (1 << 4))) {
+ if (cfg_b & (1 << 1))
+ sio_data->has_fanout |= (1 << 1); /* FANOUT1 */
+ if (cfg_b & (1 << 2))
+ sio_data->has_fanout |= (1 << 2); /* FANOUT2 */
+ }
+
+ /* FANOUT1 and FANOUT2 can each be routed to 2 different pins */
+ cfg = superio_inb(sioaddr, SIOREG_CF5);
+ if (cfg & (1 << 6))
+ sio_data->has_fanout |= (1 << 1); /* FANOUT1 */
+ if (cfg & (1 << 5))
+ sio_data->has_fanout |= (1 << 2); /* FANOUT2 */
+
exit:
superio_exit(sioaddr);
return err;
@@ -607,15 +1328,10 @@ exit:
static int __init pc87427_init(void)
{
int err;
- unsigned short address[2];
-
- if (pc87427_find(0x2e, address)
- && pc87427_find(0x4e, address))
- return -ENODEV;
+ struct pc87427_sio_data sio_data;
- /* For now the driver only handles fans so we only care about the
- first address. */
- if (!address[0])
+ if (pc87427_find(0x2e, &sio_data)
+ && pc87427_find(0x4e, &sio_data))
return -ENODEV;
err = platform_driver_register(&pc87427_driver);
@@ -623,7 +1339,7 @@ static int __init pc87427_init(void)
goto exit;
/* Sets global pdev as a side effect */
- err = pc87427_device_add(address[0]);
+ err = pc87427_device_add(&sio_data);
if (err)
goto exit_driver;
diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c
new file mode 100644
index 00000000000..f11903936c8
--- /dev/null
+++ b/drivers/hwmon/pkgtemp.c
@@ -0,0 +1,455 @@
+/*
+ * pkgtemp.c - Linux kernel module for processor package hardware monitoring
+ *
+ * Copyright (C) 2010 Fenghua Yu <fenghua.yu@intel.com>
+ *
+ * Inspired from many hwmon drivers especially coretemp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+#include <asm/msr.h>
+#include <asm/processor.h>
+
+#define DRVNAME "pkgtemp"
+
+enum { SHOW_TEMP, SHOW_TJMAX, SHOW_TTARGET, SHOW_LABEL, SHOW_NAME };
+
+/*
+ * Functions declaration
+ */
+
+static struct pkgtemp_data *pkgtemp_update_device(struct device *dev);
+
+struct pkgtemp_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ const char *name;
+ u32 id;
+ u16 phys_proc_id;
+ char valid; /* zero until following fields are valid */
+ unsigned long last_updated; /* in jiffies */
+ int temp;
+ int tjmax;
+ int ttarget;
+ u8 alarm;
+};
+
+/*
+ * Sysfs stuff
+ */
+
+static ssize_t show_name(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ int ret;
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pkgtemp_data *data = dev_get_drvdata(dev);
+
+ if (attr->index == SHOW_NAME)
+ ret = sprintf(buf, "%s\n", data->name);
+ else /* show label */
+ ret = sprintf(buf, "physical id %d\n",
+ data->phys_proc_id);
+ return ret;
+}
+
+static ssize_t show_alarm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct pkgtemp_data *data = pkgtemp_update_device(dev);
+ /* read the Out-of-spec log, never clear */
+ return sprintf(buf, "%d\n", data->alarm);
+}
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pkgtemp_data *data = pkgtemp_update_device(dev);
+ int err = 0;
+
+ if (attr->index == SHOW_TEMP)
+ err = data->valid ? sprintf(buf, "%d\n", data->temp) : -EAGAIN;
+ else if (attr->index == SHOW_TJMAX)
+ err = sprintf(buf, "%d\n", data->tjmax);
+ else
+ err = sprintf(buf, "%d\n", data->ttarget);
+ return err;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, SHOW_TEMP);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp, NULL, SHOW_TJMAX);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL, SHOW_TTARGET);
+static DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL);
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME);
+
+static struct attribute *pkgtemp_attributes[] = {
+ &sensor_dev_attr_name.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &dev_attr_temp1_crit_alarm.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group pkgtemp_group = {
+ .attrs = pkgtemp_attributes,
+};
+
+static struct pkgtemp_data *pkgtemp_update_device(struct device *dev)
+{
+ struct pkgtemp_data *data = dev_get_drvdata(dev);
+ unsigned int cpu;
+ int err;
+
+ mutex_lock(&data->update_lock);
+
+ if (!data->valid || time_after(jiffies, data->last_updated + HZ)) {
+ u32 eax, edx;
+
+ data->valid = 0;
+ cpu = data->id;
+ err = rdmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_STATUS,
+ &eax, &edx);
+ if (!err) {
+ data->alarm = (eax >> 5) & 1;
+ data->temp = data->tjmax - (((eax >> 16)
+ & 0x7f) * 1000);
+ data->valid = 1;
+ } else
+ dev_dbg(dev, "Temperature data invalid (0x%x)\n", eax);
+
+ data->last_updated = jiffies;
+ }
+
+ mutex_unlock(&data->update_lock);
+ return data;
+}
+
+static int get_tjmax(int cpu, struct device *dev)
+{
+ int default_tjmax = 100000;
+ int err;
+ u32 eax, edx;
+ u32 val;
+
+ /* IA32_TEMPERATURE_TARGET contains the TjMax value */
+ err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+ if (!err) {
+ val = (eax >> 16) & 0xff;
+ if ((val > 80) && (val < 120)) {
+ dev_info(dev, "TjMax is %d C.\n", val);
+ return val * 1000;
+ }
+ }
+ dev_warn(dev, "Unable to read TjMax from CPU.\n");
+ return default_tjmax;
+}
+
+static int __devinit pkgtemp_probe(struct platform_device *pdev)
+{
+ struct pkgtemp_data *data;
+ int err;
+ u32 eax, edx;
+#ifdef CONFIG_SMP
+ struct cpuinfo_x86 *c = &cpu_data(pdev->id);
+#endif
+
+ data = kzalloc(sizeof(struct pkgtemp_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Out of memory\n");
+ goto exit;
+ }
+
+ data->id = pdev->id;
+#ifdef CONFIG_SMP
+ data->phys_proc_id = c->phys_proc_id;
+#endif
+ data->name = "pkgtemp";
+ mutex_init(&data->update_lock);
+
+ /* test if we can access the THERM_STATUS MSR */
+ err = rdmsr_safe_on_cpu(data->id, MSR_IA32_PACKAGE_THERM_STATUS,
+ &eax, &edx);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Unable to access THERM_STATUS MSR, giving up\n");
+ goto exit_free;
+ }
+
+ data->tjmax = get_tjmax(data->id, &pdev->dev);
+ platform_set_drvdata(pdev, data);
+
+ err = rdmsr_safe_on_cpu(data->id, MSR_IA32_TEMPERATURE_TARGET,
+ &eax, &edx);
+ if (err) {
+ dev_warn(&pdev->dev, "Unable to read"
+ " IA32_TEMPERATURE_TARGET MSR\n");
+ } else {
+ data->ttarget = data->tjmax - (((eax >> 8) & 0xff) * 1000);
+ err = device_create_file(&pdev->dev,
+ &sensor_dev_attr_temp1_max.dev_attr);
+ if (err)
+ goto exit_free;
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group);
+ if (err)
+ goto exit_dev;
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ dev_err(&pdev->dev, "Class registration failed (%d)\n",
+ err);
+ goto exit_class;
+ }
+
+ return 0;
+
+exit_class:
+ sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
+exit_dev:
+ device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
+exit_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int __devexit pkgtemp_remove(struct platform_device *pdev)
+{
+ struct pkgtemp_data *data = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
+ device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
+ platform_set_drvdata(pdev, NULL);
+ kfree(data);
+ return 0;
+}
+
+static struct platform_driver pkgtemp_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRVNAME,
+ },
+ .probe = pkgtemp_probe,
+ .remove = __devexit_p(pkgtemp_remove),
+};
+
+struct pdev_entry {
+ struct list_head list;
+ struct platform_device *pdev;
+ unsigned int cpu;
+#ifdef CONFIG_SMP
+ u16 phys_proc_id;
+#endif
+};
+
+static LIST_HEAD(pdev_list);
+static DEFINE_MUTEX(pdev_list_mutex);
+
+static int __cpuinit pkgtemp_device_add(unsigned int cpu)
+{
+ int err;
+ struct platform_device *pdev;
+ struct pdev_entry *pdev_entry;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (!cpu_has(c, X86_FEATURE_PTS))
+ return 0;
+
+ mutex_lock(&pdev_list_mutex);
+
+#ifdef CONFIG_SMP
+ /* Only keep the first entry in each package */
+ list_for_each_entry(pdev_entry, &pdev_list, list) {
+ if (c->phys_proc_id == pdev_entry->phys_proc_id) {
+ err = 0; /* Not an error */
+ goto exit;
+ }
+ }
+#endif
+
+ pdev = platform_device_alloc(DRVNAME, cpu);
+ if (!pdev) {
+ err = -ENOMEM;
+ printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ goto exit;
+ }
+
+ pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
+ if (!pdev_entry) {
+ err = -ENOMEM;
+ goto exit_device_put;
+ }
+
+ err = platform_device_add(pdev);
+ if (err) {
+ printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
+ err);
+ goto exit_device_free;
+ }
+
+#ifdef CONFIG_SMP
+ pdev_entry->phys_proc_id = c->phys_proc_id;
+#endif
+ pdev_entry->pdev = pdev;
+ pdev_entry->cpu = cpu;
+ list_add_tail(&pdev_entry->list, &pdev_list);
+ mutex_unlock(&pdev_list_mutex);
+
+ return 0;
+
+exit_device_free:
+ kfree(pdev_entry);
+exit_device_put:
+ platform_device_put(pdev);
+exit:
+ mutex_unlock(&pdev_list_mutex);
+ return err;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void pkgtemp_device_remove(unsigned int cpu)
+{
+ struct pdev_entry *p;
+ unsigned int i;
+ int err;
+
+ mutex_lock(&pdev_list_mutex);
+ list_for_each_entry(p, &pdev_list, list) {
+ if (p->cpu != cpu)
+ continue;
+
+ platform_device_unregister(p->pdev);
+ list_del(&p->list);
+ mutex_unlock(&pdev_list_mutex);
+ kfree(p);
+ for_each_cpu(i, cpu_core_mask(cpu)) {
+ if (i != cpu) {
+ err = pkgtemp_device_add(i);
+ if (!err)
+ break;
+ }
+ }
+ return;
+ }
+ mutex_unlock(&pdev_list_mutex);
+}
+
+static int __cpuinit pkgtemp_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long) hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ pkgtemp_device_add(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ pkgtemp_device_remove(cpu);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block pkgtemp_cpu_notifier __refdata = {
+ .notifier_call = pkgtemp_cpu_callback,
+};
+#endif /* !CONFIG_HOTPLUG_CPU */
+
+static int __init pkgtemp_init(void)
+{
+ int i, err = -ENODEV;
+ struct pdev_entry *p, *n;
+
+ /* quick check if we run Intel */
+ if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
+ goto exit;
+
+ err = platform_driver_register(&pkgtemp_driver);
+ if (err)
+ goto exit;
+
+ for_each_online_cpu(i) {
+ err = pkgtemp_device_add(i);
+ if (err)
+ goto exit_devices_unreg;
+ }
+ if (list_empty(&pdev_list)) {
+ err = -ENODEV;
+ goto exit_driver_unreg;
+ }
+
+#ifdef CONFIG_HOTPLUG_CPU
+ register_hotcpu_notifier(&pkgtemp_cpu_notifier);
+#endif
+ return 0;
+
+exit_devices_unreg:
+ mutex_lock(&pdev_list_mutex);
+ list_for_each_entry_safe(p, n, &pdev_list, list) {
+ platform_device_unregister(p->pdev);
+ list_del(&p->list);
+ kfree(p);
+ }
+ mutex_unlock(&pdev_list_mutex);
+exit_driver_unreg:
+ platform_driver_unregister(&pkgtemp_driver);
+exit:
+ return err;
+}
+
+static void __exit pkgtemp_exit(void)
+{
+ struct pdev_entry *p, *n;
+#ifdef CONFIG_HOTPLUG_CPU
+ unregister_hotcpu_notifier(&pkgtemp_cpu_notifier);
+#endif
+ mutex_lock(&pdev_list_mutex);
+ list_for_each_entry_safe(p, n, &pdev_list, list) {
+ platform_device_unregister(p->pdev);
+ list_del(&p->list);
+ kfree(p);
+ }
+ mutex_unlock(&pdev_list_mutex);
+ platform_driver_unregister(&pkgtemp_driver);
+}
+
+MODULE_AUTHOR("Fenghua Yu <fenghua.yu@intel.com>");
+MODULE_DESCRIPTION("Intel processor package temperature monitor");
+MODULE_LICENSE("GPL");
+
+module_init(pkgtemp_init)
+module_exit(pkgtemp_exit)
diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c
new file mode 100644
index 00000000000..425df5bccd4
--- /dev/null
+++ b/drivers/hwmon/smm665.c
@@ -0,0 +1,743 @@
+/*
+ * Driver for SMM665 Power Controller / Monitor
+ *
+ * Copyright (C) 2010 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This driver should also work for SMM465, SMM764, and SMM766, but is untested
+ * for those chips. Only monitoring functionality is implemented.
+ *
+ * Datasheets:
+ * http://www.summitmicro.com/prod_select/summary/SMM665/SMM665B_2089_20.pdf
+ * http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/delay.h>
+
+/* Internal reference voltage (VREF, x 1000 */
+#define SMM665_VREF_ADC_X1000 1250
+
+/* module parameters */
+static int vref = SMM665_VREF_ADC_X1000;
+module_param(vref, int, 0);
+MODULE_PARM_DESC(vref, "Reference voltage in mV");
+
+enum chips { smm465, smm665, smm665c, smm764, smm766 };
+
+/*
+ * ADC channel addresses
+ */
+#define SMM665_MISC16_ADC_DATA_A 0x00
+#define SMM665_MISC16_ADC_DATA_B 0x01
+#define SMM665_MISC16_ADC_DATA_C 0x02
+#define SMM665_MISC16_ADC_DATA_D 0x03
+#define SMM665_MISC16_ADC_DATA_E 0x04
+#define SMM665_MISC16_ADC_DATA_F 0x05
+#define SMM665_MISC16_ADC_DATA_VDD 0x06
+#define SMM665_MISC16_ADC_DATA_12V 0x07
+#define SMM665_MISC16_ADC_DATA_INT_TEMP 0x08
+#define SMM665_MISC16_ADC_DATA_AIN1 0x09
+#define SMM665_MISC16_ADC_DATA_AIN2 0x0a
+
+/*
+ * Command registers
+ */
+#define SMM665_MISC8_CMD_STS 0x80
+#define SMM665_MISC8_STATUS1 0x81
+#define SMM665_MISC8_STATUSS2 0x82
+#define SMM665_MISC8_IO_POLARITY 0x83
+#define SMM665_MISC8_PUP_POLARITY 0x84
+#define SMM665_MISC8_ADOC_STATUS1 0x85
+#define SMM665_MISC8_ADOC_STATUS2 0x86
+#define SMM665_MISC8_WRITE_PROT 0x87
+#define SMM665_MISC8_STS_TRACK 0x88
+
+/*
+ * Configuration registers and register groups
+ */
+#define SMM665_ADOC_ENABLE 0x0d
+#define SMM665_LIMIT_BASE 0x80 /* First limit register */
+
+/*
+ * Limit register bit masks
+ */
+#define SMM665_TRIGGER_RST 0x8000
+#define SMM665_TRIGGER_HEALTHY 0x4000
+#define SMM665_TRIGGER_POWEROFF 0x2000
+#define SMM665_TRIGGER_SHUTDOWN 0x1000
+#define SMM665_ADC_MASK 0x03ff
+
+#define smm665_is_critical(lim) ((lim) & (SMM665_TRIGGER_RST \
+ | SMM665_TRIGGER_POWEROFF \
+ | SMM665_TRIGGER_SHUTDOWN))
+/*
+ * Fault register bit definitions
+ * Values are merged from status registers 1/2,
+ * with status register 1 providing the upper 8 bits.
+ */
+#define SMM665_FAULT_A 0x0001
+#define SMM665_FAULT_B 0x0002
+#define SMM665_FAULT_C 0x0004
+#define SMM665_FAULT_D 0x0008
+#define SMM665_FAULT_E 0x0010
+#define SMM665_FAULT_F 0x0020
+#define SMM665_FAULT_VDD 0x0040
+#define SMM665_FAULT_12V 0x0080
+#define SMM665_FAULT_TEMP 0x0100
+#define SMM665_FAULT_AIN1 0x0200
+#define SMM665_FAULT_AIN2 0x0400
+
+/*
+ * I2C Register addresses
+ *
+ * The configuration register needs to be the configured base register.
+ * The command/status register address is derived from it.
+ */
+#define SMM665_REGMASK 0x78
+#define SMM665_CMDREG_BASE 0x48
+#define SMM665_CONFREG_BASE 0x50
+
+/*
+ * Equations given by chip manufacturer to calculate voltage/temperature values
+ * vref = Reference voltage on VREF_ADC pin (module parameter)
+ * adc = 10bit ADC value read back from registers
+ */
+
+/* Voltage A-F and VDD */
+#define SMM665_VMON_ADC_TO_VOLTS(adc) ((adc) * vref / 256)
+
+/* Voltage 12VIN */
+#define SMM665_12VIN_ADC_TO_VOLTS(adc) ((adc) * vref * 3 / 256)
+
+/* Voltage AIN1, AIN2 */
+#define SMM665_AIN_ADC_TO_VOLTS(adc) ((adc) * vref / 512)
+
+/* Temp Sensor */
+#define SMM665_TEMP_ADC_TO_CELSIUS(adc) ((adc) <= 511) ? \
+ ((int)(adc) * 1000 / 4) : \
+ (((int)(adc) - 0x400) * 1000 / 4)
+
+#define SMM665_NUM_ADC 11
+
+/*
+ * Chip dependent ADC conversion time, in uS
+ */
+#define SMM665_ADC_WAIT_SMM665 70
+#define SMM665_ADC_WAIT_SMM766 185
+
+struct smm665_data {
+ enum chips type;
+ int conversion_time; /* ADC conversion time */
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+ u16 adc[SMM665_NUM_ADC]; /* adc values (raw) */
+ u16 faults; /* fault status */
+ /* The following values are in mV */
+ int critical_min_limit[SMM665_NUM_ADC];
+ int alarm_min_limit[SMM665_NUM_ADC];
+ int critical_max_limit[SMM665_NUM_ADC];
+ int alarm_max_limit[SMM665_NUM_ADC];
+ struct i2c_client *cmdreg;
+};
+
+/*
+ * smm665_read16()
+ *
+ * Read 16 bit value from <reg>, <reg+1>. Upper 8 bits are in <reg>.
+ */
+static int smm665_read16(struct i2c_client *client, int reg)
+{
+ int rv, val;
+
+ rv = i2c_smbus_read_byte_data(client, reg);
+ if (rv < 0)
+ return rv;
+ val = rv << 8;
+ rv = i2c_smbus_read_byte_data(client, reg + 1);
+ if (rv < 0)
+ return rv;
+ val |= rv;
+ return val;
+}
+
+/*
+ * Read adc value.
+ */
+static int smm665_read_adc(struct smm665_data *data, int adc)
+{
+ struct i2c_client *client = data->cmdreg;
+ int rv;
+ int radc;
+
+ /*
+ * Algorithm for reading ADC, per SMM665 datasheet
+ *
+ * {[S][addr][W][Ack]} {[offset][Ack]} {[S][addr][R][Nack]}
+ * [wait conversion time]
+ * {[S][addr][R][Ack]} {[datahi][Ack]} {[datalo][Ack][P]}
+ *
+ * To implement the first part of this exchange,
+ * do a full read transaction and expect a failure/Nack.
+ * This sets up the address pointer on the SMM665
+ * and starts the ADC conversion.
+ * Then do a two-byte read transaction.
+ */
+ rv = i2c_smbus_read_byte_data(client, adc << 3);
+ if (rv != -ENXIO) {
+ /*
+ * We expect ENXIO to reflect NACK
+ * (per Documentation/i2c/fault-codes).
+ * Everything else is an error.
+ */
+ dev_dbg(&client->dev,
+ "Unexpected return code %d when setting ADC index", rv);
+ return (rv < 0) ? rv : -EIO;
+ }
+
+ udelay(data->conversion_time);
+
+ /*
+ * Now read two bytes.
+ *
+ * Neither i2c_smbus_read_byte() nor
+ * i2c_smbus_read_block_data() worked here,
+ * so use i2c_smbus_read_word_data() instead.
+ * We could also try to use i2c_master_recv(),
+ * but that is not always supported.
+ */
+ rv = i2c_smbus_read_word_data(client, 0);
+ if (rv < 0) {
+ dev_dbg(&client->dev, "Failed to read ADC value: error %d", rv);
+ return -1;
+ }
+ /*
+ * Validate/verify readback adc channel (in bit 11..14).
+ * High byte is in lower 8 bit of rv, so only shift by 3.
+ */
+ radc = (rv >> 3) & 0x0f;
+ if (radc != adc) {
+ dev_dbg(&client->dev, "Unexpected RADC: Expected %d got %d",
+ adc, radc);
+ return -EIO;
+ }
+ /*
+ * Chip replies with H/L, while SMBus expects L/H.
+ * Thus, byte order is reversed, and we have to swap
+ * the result.
+ */
+ rv = swab16(rv) & SMM665_ADC_MASK;
+
+ return rv;
+}
+
+static struct smm665_data *smm665_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct smm665_data *data = i2c_get_clientdata(client);
+ struct smm665_data *ret = data;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ int i, val;
+
+ /*
+ * read status registers
+ */
+ val = smm665_read16(client, SMM665_MISC8_STATUS1);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->faults = val;
+
+ /* Read adc registers */
+ for (i = 0; i < SMM665_NUM_ADC; i++) {
+ val = smm665_read_adc(data, i);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->adc[i] = val;
+ }
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+/* Return converted value from given adc */
+static int smm665_convert(u16 adcval, int index)
+{
+ int val = 0;
+
+ switch (index) {
+ case SMM665_MISC16_ADC_DATA_12V:
+ val = SMM665_12VIN_ADC_TO_VOLTS(adcval & SMM665_ADC_MASK);
+ break;
+
+ case SMM665_MISC16_ADC_DATA_VDD:
+ case SMM665_MISC16_ADC_DATA_A:
+ case SMM665_MISC16_ADC_DATA_B:
+ case SMM665_MISC16_ADC_DATA_C:
+ case SMM665_MISC16_ADC_DATA_D:
+ case SMM665_MISC16_ADC_DATA_E:
+ case SMM665_MISC16_ADC_DATA_F:
+ val = SMM665_VMON_ADC_TO_VOLTS(adcval & SMM665_ADC_MASK);
+ break;
+
+ case SMM665_MISC16_ADC_DATA_AIN1:
+ case SMM665_MISC16_ADC_DATA_AIN2:
+ val = SMM665_AIN_ADC_TO_VOLTS(adcval & SMM665_ADC_MASK);
+ break;
+
+ case SMM665_MISC16_ADC_DATA_INT_TEMP:
+ val = SMM665_TEMP_ADC_TO_CELSIUS(adcval & SMM665_ADC_MASK);
+ break;
+
+ default:
+ /* If we get here, the developer messed up */
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return val;
+}
+
+static int smm665_get_min(struct device *dev, int index)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct smm665_data *data = i2c_get_clientdata(client);
+
+ return data->alarm_min_limit[index];
+}
+
+static int smm665_get_max(struct device *dev, int index)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct smm665_data *data = i2c_get_clientdata(client);
+
+ return data->alarm_max_limit[index];
+}
+
+static int smm665_get_lcrit(struct device *dev, int index)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct smm665_data *data = i2c_get_clientdata(client);
+
+ return data->critical_min_limit[index];
+}
+
+static int smm665_get_crit(struct device *dev, int index)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct smm665_data *data = i2c_get_clientdata(client);
+
+ return data->critical_max_limit[index];
+}
+
+static ssize_t smm665_show_crit_alarm(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct smm665_data *data = smm665_update_device(dev);
+ int val = 0;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (data->faults & (1 << attr->index))
+ val = 1;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t smm665_show_input(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct smm665_data *data = smm665_update_device(dev);
+ int adc = attr->index;
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = smm665_convert(data->adc[adc], adc);
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+#define SMM665_SHOW(what) \
+ static ssize_t smm665_show_##what(struct device *dev, \
+ struct device_attribute *da, char *buf) \
+{ \
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); \
+ const int val = smm665_get_##what(dev, attr->index); \
+ return snprintf(buf, PAGE_SIZE, "%d\n", val); \
+}
+
+SMM665_SHOW(min);
+SMM665_SHOW(max);
+SMM665_SHOW(lcrit);
+SMM665_SHOW(crit);
+
+/* These macros are used below in constructing device attribute objects
+ * for use with sysfs_create_group() to make a sysfs device file
+ * for each register.
+ */
+
+#define SMM665_ATTR(name, type, cmd_idx) \
+ static SENSOR_DEVICE_ATTR(name##_##type, S_IRUGO, \
+ smm665_show_##type, NULL, cmd_idx)
+
+/* Construct a sensor_device_attribute structure for each register */
+
+/* Input voltages */
+SMM665_ATTR(in1, input, SMM665_MISC16_ADC_DATA_12V);
+SMM665_ATTR(in2, input, SMM665_MISC16_ADC_DATA_VDD);
+SMM665_ATTR(in3, input, SMM665_MISC16_ADC_DATA_A);
+SMM665_ATTR(in4, input, SMM665_MISC16_ADC_DATA_B);
+SMM665_ATTR(in5, input, SMM665_MISC16_ADC_DATA_C);
+SMM665_ATTR(in6, input, SMM665_MISC16_ADC_DATA_D);
+SMM665_ATTR(in7, input, SMM665_MISC16_ADC_DATA_E);
+SMM665_ATTR(in8, input, SMM665_MISC16_ADC_DATA_F);
+SMM665_ATTR(in9, input, SMM665_MISC16_ADC_DATA_AIN1);
+SMM665_ATTR(in10, input, SMM665_MISC16_ADC_DATA_AIN2);
+
+/* Input voltages min */
+SMM665_ATTR(in1, min, SMM665_MISC16_ADC_DATA_12V);
+SMM665_ATTR(in2, min, SMM665_MISC16_ADC_DATA_VDD);
+SMM665_ATTR(in3, min, SMM665_MISC16_ADC_DATA_A);
+SMM665_ATTR(in4, min, SMM665_MISC16_ADC_DATA_B);
+SMM665_ATTR(in5, min, SMM665_MISC16_ADC_DATA_C);
+SMM665_ATTR(in6, min, SMM665_MISC16_ADC_DATA_D);
+SMM665_ATTR(in7, min, SMM665_MISC16_ADC_DATA_E);
+SMM665_ATTR(in8, min, SMM665_MISC16_ADC_DATA_F);
+SMM665_ATTR(in9, min, SMM665_MISC16_ADC_DATA_AIN1);
+SMM665_ATTR(in10, min, SMM665_MISC16_ADC_DATA_AIN2);
+
+/* Input voltages max */
+SMM665_ATTR(in1, max, SMM665_MISC16_ADC_DATA_12V);
+SMM665_ATTR(in2, max, SMM665_MISC16_ADC_DATA_VDD);
+SMM665_ATTR(in3, max, SMM665_MISC16_ADC_DATA_A);
+SMM665_ATTR(in4, max, SMM665_MISC16_ADC_DATA_B);
+SMM665_ATTR(in5, max, SMM665_MISC16_ADC_DATA_C);
+SMM665_ATTR(in6, max, SMM665_MISC16_ADC_DATA_D);
+SMM665_ATTR(in7, max, SMM665_MISC16_ADC_DATA_E);
+SMM665_ATTR(in8, max, SMM665_MISC16_ADC_DATA_F);
+SMM665_ATTR(in9, max, SMM665_MISC16_ADC_DATA_AIN1);
+SMM665_ATTR(in10, max, SMM665_MISC16_ADC_DATA_AIN2);
+
+/* Input voltages lcrit */
+SMM665_ATTR(in1, lcrit, SMM665_MISC16_ADC_DATA_12V);
+SMM665_ATTR(in2, lcrit, SMM665_MISC16_ADC_DATA_VDD);
+SMM665_ATTR(in3, lcrit, SMM665_MISC16_ADC_DATA_A);
+SMM665_ATTR(in4, lcrit, SMM665_MISC16_ADC_DATA_B);
+SMM665_ATTR(in5, lcrit, SMM665_MISC16_ADC_DATA_C);
+SMM665_ATTR(in6, lcrit, SMM665_MISC16_ADC_DATA_D);
+SMM665_ATTR(in7, lcrit, SMM665_MISC16_ADC_DATA_E);
+SMM665_ATTR(in8, lcrit, SMM665_MISC16_ADC_DATA_F);
+SMM665_ATTR(in9, lcrit, SMM665_MISC16_ADC_DATA_AIN1);
+SMM665_ATTR(in10, lcrit, SMM665_MISC16_ADC_DATA_AIN2);
+
+/* Input voltages crit */
+SMM665_ATTR(in1, crit, SMM665_MISC16_ADC_DATA_12V);
+SMM665_ATTR(in2, crit, SMM665_MISC16_ADC_DATA_VDD);
+SMM665_ATTR(in3, crit, SMM665_MISC16_ADC_DATA_A);
+SMM665_ATTR(in4, crit, SMM665_MISC16_ADC_DATA_B);
+SMM665_ATTR(in5, crit, SMM665_MISC16_ADC_DATA_C);
+SMM665_ATTR(in6, crit, SMM665_MISC16_ADC_DATA_D);
+SMM665_ATTR(in7, crit, SMM665_MISC16_ADC_DATA_E);
+SMM665_ATTR(in8, crit, SMM665_MISC16_ADC_DATA_F);
+SMM665_ATTR(in9, crit, SMM665_MISC16_ADC_DATA_AIN1);
+SMM665_ATTR(in10, crit, SMM665_MISC16_ADC_DATA_AIN2);
+
+/* critical alarms */
+SMM665_ATTR(in1, crit_alarm, SMM665_FAULT_12V);
+SMM665_ATTR(in2, crit_alarm, SMM665_FAULT_VDD);
+SMM665_ATTR(in3, crit_alarm, SMM665_FAULT_A);
+SMM665_ATTR(in4, crit_alarm, SMM665_FAULT_B);
+SMM665_ATTR(in5, crit_alarm, SMM665_FAULT_C);
+SMM665_ATTR(in6, crit_alarm, SMM665_FAULT_D);
+SMM665_ATTR(in7, crit_alarm, SMM665_FAULT_E);
+SMM665_ATTR(in8, crit_alarm, SMM665_FAULT_F);
+SMM665_ATTR(in9, crit_alarm, SMM665_FAULT_AIN1);
+SMM665_ATTR(in10, crit_alarm, SMM665_FAULT_AIN2);
+
+/* Temperature */
+SMM665_ATTR(temp1, input, SMM665_MISC16_ADC_DATA_INT_TEMP);
+SMM665_ATTR(temp1, min, SMM665_MISC16_ADC_DATA_INT_TEMP);
+SMM665_ATTR(temp1, max, SMM665_MISC16_ADC_DATA_INT_TEMP);
+SMM665_ATTR(temp1, lcrit, SMM665_MISC16_ADC_DATA_INT_TEMP);
+SMM665_ATTR(temp1, crit, SMM665_MISC16_ADC_DATA_INT_TEMP);
+SMM665_ATTR(temp1, crit_alarm, SMM665_FAULT_TEMP);
+
+/*
+ * Finally, construct an array of pointers to members of the above objects,
+ * as required for sysfs_create_group()
+ */
+static struct attribute *smm665_attributes[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_min.dev_attr.attr,
+ &sensor_dev_attr_in1_max.dev_attr.attr,
+ &sensor_dev_attr_in1_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in1_crit.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_min.dev_attr.attr,
+ &sensor_dev_attr_in2_max.dev_attr.attr,
+ &sensor_dev_attr_in2_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in2_crit.dev_attr.attr,
+ &sensor_dev_attr_in2_crit_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in3_min.dev_attr.attr,
+ &sensor_dev_attr_in3_max.dev_attr.attr,
+ &sensor_dev_attr_in3_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in3_crit.dev_attr.attr,
+ &sensor_dev_attr_in3_crit_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in4_min.dev_attr.attr,
+ &sensor_dev_attr_in4_max.dev_attr.attr,
+ &sensor_dev_attr_in4_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in4_crit.dev_attr.attr,
+ &sensor_dev_attr_in4_crit_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in5_min.dev_attr.attr,
+ &sensor_dev_attr_in5_max.dev_attr.attr,
+ &sensor_dev_attr_in5_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in5_crit.dev_attr.attr,
+ &sensor_dev_attr_in5_crit_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in6_min.dev_attr.attr,
+ &sensor_dev_attr_in6_max.dev_attr.attr,
+ &sensor_dev_attr_in6_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in6_crit.dev_attr.attr,
+ &sensor_dev_attr_in6_crit_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_in7_input.dev_attr.attr,
+ &sensor_dev_attr_in7_min.dev_attr.attr,
+ &sensor_dev_attr_in7_max.dev_attr.attr,
+ &sensor_dev_attr_in7_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in7_crit.dev_attr.attr,
+ &sensor_dev_attr_in7_crit_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_in8_input.dev_attr.attr,
+ &sensor_dev_attr_in8_min.dev_attr.attr,
+ &sensor_dev_attr_in8_max.dev_attr.attr,
+ &sensor_dev_attr_in8_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in8_crit.dev_attr.attr,
+ &sensor_dev_attr_in8_crit_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_in9_input.dev_attr.attr,
+ &sensor_dev_attr_in9_min.dev_attr.attr,
+ &sensor_dev_attr_in9_max.dev_attr.attr,
+ &sensor_dev_attr_in9_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in9_crit.dev_attr.attr,
+ &sensor_dev_attr_in9_crit_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_in10_input.dev_attr.attr,
+ &sensor_dev_attr_in10_min.dev_attr.attr,
+ &sensor_dev_attr_in10_max.dev_attr.attr,
+ &sensor_dev_attr_in10_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in10_crit.dev_attr.attr,
+ &sensor_dev_attr_in10_crit_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_lcrit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+
+ NULL,
+};
+
+static const struct attribute_group smm665_group = {
+ .attrs = smm665_attributes,
+};
+
+static int smm665_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct smm665_data *data;
+ int i, ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ if (i2c_smbus_read_byte_data(client, SMM665_ADOC_ENABLE) < 0)
+ return -ENODEV;
+
+ ret = -ENOMEM;
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out_return;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ data->type = id->driver_data;
+ data->cmdreg = i2c_new_dummy(adapter, (client->addr & ~SMM665_REGMASK)
+ | SMM665_CMDREG_BASE);
+ if (!data->cmdreg)
+ goto out_kfree;
+
+ switch (data->type) {
+ case smm465:
+ case smm665:
+ data->conversion_time = SMM665_ADC_WAIT_SMM665;
+ break;
+ case smm665c:
+ case smm764:
+ case smm766:
+ data->conversion_time = SMM665_ADC_WAIT_SMM766;
+ break;
+ }
+
+ ret = -ENODEV;
+ if (i2c_smbus_read_byte_data(data->cmdreg, SMM665_MISC8_CMD_STS) < 0)
+ goto out_unregister;
+
+ /*
+ * Read limits.
+ *
+ * Limit registers start with register SMM665_LIMIT_BASE.
+ * Each channel uses 8 registers, providing four limit values
+ * per channel. Each limit value requires two registers, with the
+ * high byte in the first register and the low byte in the second
+ * register. The first two limits are under limit values, followed
+ * by two over limit values.
+ *
+ * Limit register order matches the ADC register order, so we use
+ * ADC register defines throughout the code to index limit registers.
+ *
+ * We save the first retrieved value both as "critical" and "alarm"
+ * value. The second value overwrites either the critical or the
+ * alarm value, depending on its configuration. This ensures that both
+ * critical and alarm values are initialized, even if both registers are
+ * configured as critical or non-critical.
+ */
+ for (i = 0; i < SMM665_NUM_ADC; i++) {
+ int val;
+
+ val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8);
+ if (unlikely(val < 0))
+ goto out_unregister;
+ data->critical_min_limit[i] = data->alarm_min_limit[i]
+ = smm665_convert(val, i);
+ val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8 + 2);
+ if (unlikely(val < 0))
+ goto out_unregister;
+ if (smm665_is_critical(val))
+ data->critical_min_limit[i] = smm665_convert(val, i);
+ else
+ data->alarm_min_limit[i] = smm665_convert(val, i);
+ val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8 + 4);
+ if (unlikely(val < 0))
+ goto out_unregister;
+ data->critical_max_limit[i] = data->alarm_max_limit[i]
+ = smm665_convert(val, i);
+ val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8 + 6);
+ if (unlikely(val < 0))
+ goto out_unregister;
+ if (smm665_is_critical(val))
+ data->critical_max_limit[i] = smm665_convert(val, i);
+ else
+ data->alarm_max_limit[i] = smm665_convert(val, i);
+ }
+
+ /* Register sysfs hooks */
+ ret = sysfs_create_group(&client->dev.kobj, &smm665_group);
+ if (ret)
+ goto out_unregister;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto out_remove_group;
+ }
+
+ return 0;
+
+out_remove_group:
+ sysfs_remove_group(&client->dev.kobj, &smm665_group);
+out_unregister:
+ i2c_unregister_device(data->cmdreg);
+out_kfree:
+ kfree(data);
+out_return:
+ return ret;
+}
+
+static int smm665_remove(struct i2c_client *client)
+{
+ struct smm665_data *data = i2c_get_clientdata(client);
+
+ i2c_unregister_device(data->cmdreg);
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &smm665_group);
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct i2c_device_id smm665_id[] = {
+ {"smm465", smm465},
+ {"smm665", smm665},
+ {"smm665c", smm665c},
+ {"smm764", smm764},
+ {"smm766", smm766},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, smm665_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver smm665_driver = {
+ .driver = {
+ .name = "smm665",
+ },
+ .probe = smm665_probe,
+ .remove = smm665_remove,
+ .id_table = smm665_id,
+};
+
+static int __init smm665_init(void)
+{
+ return i2c_add_driver(&smm665_driver);
+}
+
+static void __exit smm665_exit(void)
+{
+ i2c_del_driver(&smm665_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("SMM665 driver");
+MODULE_LICENSE("GPL");
+
+module_init(smm665_init);
+module_exit(smm665_exit);
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 5da5942cf97..d863e13a50b 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -234,7 +234,7 @@ static const struct attribute_group env_group = {
.attrs = env_attributes,
};
-static int __devinit env_probe(struct of_device *op,
+static int __devinit env_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct env *p = kzalloc(sizeof(*p), GFP_KERNEL);
@@ -276,7 +276,7 @@ out_free:
goto out;
}
-static int __devexit env_remove(struct of_device *op)
+static int __devexit env_remove(struct platform_device *op)
{
struct env *p = dev_get_drvdata(&op->dev);
@@ -311,12 +311,12 @@ static struct of_platform_driver env_driver = {
static int __init env_init(void)
{
- return of_register_driver(&env_driver, &of_bus_type);
+ return of_register_platform_driver(&env_driver);
}
static void __exit env_exit(void)
{
- of_unregister_driver(&env_driver);
+ of_unregister_platform_driver(&env_driver);
}
module_init(env_init);
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index 7442cf75485..ffb793af680 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -39,7 +39,7 @@
#define DRVNAME "via_cputemp"
-enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME } SHOW;
+enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME };
/*
* Functions declaration
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 0dcaba9b718..072c58008a6 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -39,6 +39,7 @@
w83627dhg 9 5 4 3 0xa020 0xc1 0x5ca3
w83627dhg-p 9 5 4 3 0xb070 0xc1 0x5ca3
w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3
+ w83667hg-b 9 5 3 3 0xb350 0xc1 0x5ca3
*/
#include <linux/module.h>
@@ -55,7 +56,7 @@
#include <linux/io.h>
#include "lm75.h"
-enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg };
+enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg, w83667hg_b };
/* used to set data->name = w83627ehf_device_names[data->sio_kind] */
static const char * w83627ehf_device_names[] = {
@@ -63,6 +64,7 @@ static const char * w83627ehf_device_names[] = {
"w83627dhg",
"w83627dhg",
"w83667hg",
+ "w83667hg",
};
static unsigned short force_id;
@@ -91,6 +93,7 @@ MODULE_PARM_DESC(force_id, "Override the detected device ID");
#define SIO_W83627DHG_ID 0xa020
#define SIO_W83627DHG_P_ID 0xb070
#define SIO_W83667HG_ID 0xa510
+#define SIO_W83667HG_B_ID 0xb350
#define SIO_ID_MASK 0xFFF0
static inline void
@@ -124,6 +127,7 @@ superio_enter(int ioreg)
static inline void
superio_exit(int ioreg)
{
+ outb(0xaa, ioreg);
outb(0x02, ioreg);
outb(0x02, ioreg + 1);
}
@@ -201,8 +205,14 @@ static const u8 W83627EHF_REG_TOLERANCE[] = { 0x07, 0x07, 0x14, 0x62 };
static const u8 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 };
static const u8 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 };
static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 };
-static const u8 W83627EHF_REG_FAN_MAX_OUTPUT[] = { 0xff, 0x67, 0xff, 0x69 };
-static const u8 W83627EHF_REG_FAN_STEP_OUTPUT[] = { 0xff, 0x68, 0xff, 0x6a };
+
+static const u8 W83627EHF_REG_FAN_MAX_OUTPUT_COMMON[]
+ = { 0xff, 0x67, 0xff, 0x69 };
+static const u8 W83627EHF_REG_FAN_STEP_OUTPUT_COMMON[]
+ = { 0xff, 0x68, 0xff, 0x6a };
+
+static const u8 W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B[] = { 0x67, 0x69, 0x6b };
+static const u8 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[] = { 0x68, 0x6a, 0x6c };
/*
* Conversions
@@ -277,6 +287,11 @@ struct w83627ehf_data {
struct device *hwmon_dev;
struct mutex lock;
+ const u8 *REG_FAN_START_OUTPUT;
+ const u8 *REG_FAN_STOP_OUTPUT;
+ const u8 *REG_FAN_MAX_OUTPUT;
+ const u8 *REG_FAN_STEP_OUTPUT;
+
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
@@ -524,7 +539,10 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
}
}
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < data->pwm_num; i++) {
+ if (!(data->has_fan & (1 << i)))
+ continue;
+
/* pwmcfg, tolerance mapped for i=0, i=1 to same reg */
if (i != 1) {
pwmcfg = w83627ehf_read_value(data,
@@ -546,6 +564,17 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
W83627EHF_REG_FAN_STOP_OUTPUT[i]);
data->fan_stop_time[i] = w83627ehf_read_value(data,
W83627EHF_REG_FAN_STOP_TIME[i]);
+
+ if (data->REG_FAN_MAX_OUTPUT[i] != 0xff)
+ data->fan_max_output[i] =
+ w83627ehf_read_value(data,
+ data->REG_FAN_MAX_OUTPUT[i]);
+
+ if (data->REG_FAN_STEP_OUTPUT[i] != 0xff)
+ data->fan_step_output[i] =
+ w83627ehf_read_value(data,
+ data->REG_FAN_STEP_OUTPUT[i]);
+
data->target_temp[i] =
w83627ehf_read_value(data,
W83627EHF_REG_TARGET[i]) &
@@ -1126,7 +1155,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 1, 255); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
- w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \
+ w83627ehf_write_value(data, data->REG_##REG[nr], val); \
mutex_unlock(&data->update_lock); \
return count; \
}
@@ -1206,12 +1235,26 @@ static struct sensor_device_attribute sda_sf3_arrays[] = {
store_fan_stop_output, 1),
SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
store_fan_stop_output, 2),
+};
- /* pwm1 and pwm3 don't support max and step settings */
+
+/*
+ * pwm1 and pwm3 don't support max and step settings on all chips.
+ * Need to check support while generating/removing attribute files.
+ */
+static struct sensor_device_attribute sda_sf3_max_step_arrays[] = {
+ SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
+ store_fan_max_output, 0),
+ SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
+ store_fan_step_output, 0),
SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
store_fan_max_output, 1),
SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
store_fan_step_output, 1),
+ SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
+ store_fan_max_output, 2),
+ SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
+ store_fan_step_output, 2),
};
static ssize_t
@@ -1235,6 +1278,12 @@ static void w83627ehf_device_remove_files(struct device *dev)
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++)
device_remove_file(dev, &sda_sf3_arrays[i].dev_attr);
+ for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
+ struct sensor_device_attribute *attr =
+ &sda_sf3_max_step_arrays[i];
+ if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
+ device_remove_file(dev, &attr->dev_attr);
+ }
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++)
device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr);
for (i = 0; i < data->in_num; i++) {
@@ -1343,22 +1392,37 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
/* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9;
/* 667HG has 3 pwms */
- data->pwm_num = (sio_data->kind == w83667hg) ? 3 : 4;
+ data->pwm_num = (sio_data->kind == w83667hg
+ || sio_data->kind == w83667hg_b) ? 3 : 4;
/* Check temp3 configuration bit for 667HG */
- if (sio_data->kind == w83667hg) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
data->temp3_disable = w83627ehf_read_value(data,
W83627EHF_REG_TEMP_CONFIG[1]) & 0x01;
data->in6_skip = !data->temp3_disable;
}
+ data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
+ data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
+ if (sio_data->kind == w83667hg_b) {
+ data->REG_FAN_MAX_OUTPUT =
+ W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B;
+ data->REG_FAN_STEP_OUTPUT =
+ W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B;
+ } else {
+ data->REG_FAN_MAX_OUTPUT =
+ W83627EHF_REG_FAN_MAX_OUTPUT_COMMON;
+ data->REG_FAN_STEP_OUTPUT =
+ W83627EHF_REG_FAN_STEP_OUTPUT_COMMON;
+ }
+
/* Initialize the chip */
w83627ehf_init_device(data);
data->vrm = vid_which_vrm();
superio_enter(sio_data->sioreg);
/* Read VID value */
- if (sio_data->kind == w83667hg) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
/* W83667HG has different pins for VID input and output, so
we can get the VID input values directly at logical device D
0xe3. */
@@ -1409,7 +1473,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
}
/* fan4 and fan5 share some pins with the GPIO and serial flash */
- if (sio_data->kind == w83667hg) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40;
} else {
@@ -1440,6 +1504,15 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
&sda_sf3_arrays[i].dev_attr)))
goto exit_remove;
+ for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
+ struct sensor_device_attribute *attr =
+ &sda_sf3_max_step_arrays[i];
+ if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) {
+ err = device_create_file(dev, &attr->dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ }
/* if fan4 is enabled create the sf3 files for it */
if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4)
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) {
@@ -1556,6 +1629,7 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
static const char __initdata sio_name_W83627DHG[] = "W83627DHG";
static const char __initdata sio_name_W83627DHG_P[] = "W83627DHG-P";
static const char __initdata sio_name_W83667HG[] = "W83667HG";
+ static const char __initdata sio_name_W83667HG_B[] = "W83667HG-B";
u16 val;
const char *sio_name;
@@ -1588,6 +1662,10 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
sio_data->kind = w83667hg;
sio_name = sio_name_W83667HG;
break;
+ case SIO_W83667HG_B_ID:
+ sio_data->kind = w83667hg_b;
+ sio_name = sio_name_W83667HG_B;
+ break;
default:
if (val != 0xffff)
pr_debug(DRVNAME ": unsupported chip ID: 0x%04x\n",
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index d06083fdffb..30f06e956bf 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -47,6 +47,19 @@ config I2C_CHARDEV
This support is also available as a module. If so, the module
will be called i2c-dev.
+config I2C_MUX
+ tristate "I2C bus multiplexing support"
+ depends on EXPERIMENTAL
+ help
+ Say Y here if you want the I2C core to support the ability to
+ handle multiplexed I2C bus topologies, by presenting each
+ multiplexed segment as a I2C adapter.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-mux.
+
+source drivers/i2c/muxes/Kconfig
+
config I2C_HELPER_AUTO
bool "Autoselect pertinent helper modules"
default y
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index a7d9b4be9bb..c00fd66388f 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -6,7 +6,8 @@ obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o
obj-$(CONFIG_I2C) += i2c-core.o
obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o
obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
-obj-y += algos/ busses/
+obj-$(CONFIG_I2C_MUX) += i2c-mux.o
+obj-y += algos/ busses/ muxes/
ifeq ($(CONFIG_I2C_DEBUG_CORE),y)
EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index bceafbfa726..6539ac2907e 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -448,6 +448,13 @@ config I2C_NOMADIK
If you say yes to this option, support will be included for the
I2C interface from ST-Ericsson's Nomadik and Ux500 architectures.
+config I2C_NUC900
+ tristate "NUC900 I2C Driver"
+ depends on ARCH_W90X900
+ help
+ Say Y here to include support for I2C controller in the
+ Winbond/Nuvoton NUC900 based System-on-Chip devices.
+
config I2C_OCORES
tristate "OpenCores I2C Controller"
depends on EXPERIMENTAL
@@ -496,8 +503,8 @@ config I2C_PMCMSP
will be called i2c-pmcmsp.
config I2C_PNX
- tristate "I2C bus support for Philips PNX targets"
- depends on ARCH_PNX4008
+ tristate "I2C bus support for Philips PNX and NXP LPC targets"
+ depends on ARCH_PNX4008 || ARCH_LPC32XX
help
This driver supports the Philips IP3204 I2C IP block master and/or
slave controller
@@ -521,12 +528,19 @@ config I2C_PXA_SLAVE
is necessary for systems where the PXA may be a target on the
I2C bus.
+config HAVE_S3C2410_I2C
+ bool
+ help
+ This will include I2C support for Samsung SoCs. If you want to
+ include I2C support for any machine, kindly select this in the
+ respective Kconfig file.
+
config I2C_S3C2410
tristate "S3C2410 I2C Driver"
- depends on ARCH_S3C2410 || ARCH_S3C64XX
+ depends on HAVE_S3C2410_I2C
help
Say Y here to include support for I2C controller in the
- Samsung S3C2410 based System-on-Chip devices.
+ Samsung SoCs.
config I2C_S6000
tristate "S6000 I2C support"
@@ -549,7 +563,7 @@ config I2C_SH7760
config I2C_SH_MOBILE
tristate "SuperH Mobile I2C Controller"
- depends on SUPERH
+ depends on SUPERH || ARCH_SHMOBILE
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Renesas SH-Mobile processor.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 936880bd1dc..c3ef49230cb 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o
+obj-$(CONFIG_I2C_NUC900) += i2c-nuc900.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index b02b4533651..f7bd2613cec 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -105,7 +105,7 @@ struct i2c_reg {
struct cpm_i2c {
char *base;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
struct i2c_adapter adap;
uint dp_addr;
int version; /* CPM1=1, CPM2=2 */
@@ -428,7 +428,7 @@ static const struct i2c_adapter cpm_ops = {
static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
{
- struct of_device *ofdev = cpm->ofdev;
+ struct platform_device *ofdev = cpm->ofdev;
const u32 *data;
int len, ret, i;
void __iomem *i2c_base;
@@ -634,7 +634,7 @@ static void cpm_i2c_shutdown(struct cpm_i2c *cpm)
cpm_muram_free(cpm->i2c_addr);
}
-static int __devinit cpm_i2c_probe(struct of_device *ofdev,
+static int __devinit cpm_i2c_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
int result, len;
@@ -652,6 +652,7 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev,
cpm->adap = cpm_ops;
i2c_set_adapdata(&cpm->adap, cpm);
cpm->adap.dev.parent = &ofdev->dev;
+ cpm->adap.dev.of_node = of_node_get(ofdev->dev.of_node);
result = cpm_i2c_setup(cpm);
if (result) {
@@ -676,11 +677,6 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev,
dev_dbg(&ofdev->dev, "hw routines for %s registered.\n",
cpm->adap.name);
- /*
- * register OF I2C devices
- */
- of_register_i2c_devices(&cpm->adap, ofdev->dev.of_node);
-
return 0;
out_shut:
cpm_i2c_shutdown(cpm);
@@ -691,7 +687,7 @@ out_free:
return result;
}
-static int __devexit cpm_i2c_remove(struct of_device *ofdev)
+static int __devexit cpm_i2c_remove(struct platform_device *ofdev)
{
struct cpm_i2c *cpm = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 4523364e672..b8feac5f2ef 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -36,14 +36,16 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/cpufreq.h>
+#include <linux/gpio.h>
#include <mach/hardware.h>
-
#include <mach/i2c.h>
/* ----- global defines ----------------------------------------------- */
#define DAVINCI_I2C_TIMEOUT (1*HZ)
+#define DAVINCI_I2C_MAX_TRIES 2
#define I2C_DAVINCI_INTR_ALL (DAVINCI_I2C_IMR_AAS | \
DAVINCI_I2C_IMR_SCD | \
DAVINCI_I2C_IMR_ARDY | \
@@ -72,37 +74,29 @@
#define DAVINCI_I2C_IVR_NACK 0x02
#define DAVINCI_I2C_IVR_AL 0x01
-#define DAVINCI_I2C_STR_BB (1 << 12)
-#define DAVINCI_I2C_STR_RSFULL (1 << 11)
-#define DAVINCI_I2C_STR_SCD (1 << 5)
-#define DAVINCI_I2C_STR_ARDY (1 << 2)
-#define DAVINCI_I2C_STR_NACK (1 << 1)
-#define DAVINCI_I2C_STR_AL (1 << 0)
-
-#define DAVINCI_I2C_MDR_NACK (1 << 15)
-#define DAVINCI_I2C_MDR_STT (1 << 13)
-#define DAVINCI_I2C_MDR_STP (1 << 11)
-#define DAVINCI_I2C_MDR_MST (1 << 10)
-#define DAVINCI_I2C_MDR_TRX (1 << 9)
-#define DAVINCI_I2C_MDR_XA (1 << 8)
-#define DAVINCI_I2C_MDR_RM (1 << 7)
-#define DAVINCI_I2C_MDR_IRS (1 << 5)
-
-#define DAVINCI_I2C_IMR_AAS (1 << 6)
-#define DAVINCI_I2C_IMR_SCD (1 << 5)
-#define DAVINCI_I2C_IMR_XRDY (1 << 4)
-#define DAVINCI_I2C_IMR_RRDY (1 << 3)
-#define DAVINCI_I2C_IMR_ARDY (1 << 2)
-#define DAVINCI_I2C_IMR_NACK (1 << 1)
-#define DAVINCI_I2C_IMR_AL (1 << 0)
-
-#define MOD_REG_BIT(val, mask, set) do { \
- if (set) { \
- val |= mask; \
- } else { \
- val &= ~mask; \
- } \
-} while (0)
+#define DAVINCI_I2C_STR_BB BIT(12)
+#define DAVINCI_I2C_STR_RSFULL BIT(11)
+#define DAVINCI_I2C_STR_SCD BIT(5)
+#define DAVINCI_I2C_STR_ARDY BIT(2)
+#define DAVINCI_I2C_STR_NACK BIT(1)
+#define DAVINCI_I2C_STR_AL BIT(0)
+
+#define DAVINCI_I2C_MDR_NACK BIT(15)
+#define DAVINCI_I2C_MDR_STT BIT(13)
+#define DAVINCI_I2C_MDR_STP BIT(11)
+#define DAVINCI_I2C_MDR_MST BIT(10)
+#define DAVINCI_I2C_MDR_TRX BIT(9)
+#define DAVINCI_I2C_MDR_XA BIT(8)
+#define DAVINCI_I2C_MDR_RM BIT(7)
+#define DAVINCI_I2C_MDR_IRS BIT(5)
+
+#define DAVINCI_I2C_IMR_AAS BIT(6)
+#define DAVINCI_I2C_IMR_SCD BIT(5)
+#define DAVINCI_I2C_IMR_XRDY BIT(4)
+#define DAVINCI_I2C_IMR_RRDY BIT(3)
+#define DAVINCI_I2C_IMR_ARDY BIT(2)
+#define DAVINCI_I2C_IMR_NACK BIT(1)
+#define DAVINCI_I2C_IMR_AL BIT(0)
struct davinci_i2c_dev {
struct device *dev;
@@ -113,8 +107,13 @@ struct davinci_i2c_dev {
u8 *buf;
size_t buf_len;
int irq;
+ int stop;
u8 terminate;
struct i2c_adapter adapter;
+#ifdef CONFIG_CPU_FREQ
+ struct completion xfr_complete;
+ struct notifier_block freq_transition;
+#endif
};
/* default platform data to use if not supplied in the platform_device */
@@ -134,12 +133,59 @@ static inline u16 davinci_i2c_read_reg(struct davinci_i2c_dev *i2c_dev, int reg)
return __raw_readw(i2c_dev->base + reg);
}
-/*
- * This functions configures I2C and brings I2C out of reset.
- * This function is called during I2C init function. This function
- * also gets called if I2C encounters any errors.
+/* Generate a pulse on the i2c clock pin. */
+static void generic_i2c_clock_pulse(unsigned int scl_pin)
+{
+ u16 i;
+
+ if (scl_pin) {
+ /* Send high and low on the SCL line */
+ for (i = 0; i < 9; i++) {
+ gpio_set_value(scl_pin, 0);
+ udelay(20);
+ gpio_set_value(scl_pin, 1);
+ udelay(20);
+ }
+ }
+}
+
+/* This routine does i2c bus recovery as specified in the
+ * i2c protocol Rev. 03 section 3.16 titled "Bus clear"
*/
-static int i2c_davinci_init(struct davinci_i2c_dev *dev)
+static void i2c_recover_bus(struct davinci_i2c_dev *dev)
+{
+ u32 flag = 0;
+ struct davinci_i2c_platform_data *pdata = dev->dev->platform_data;
+
+ dev_err(dev->dev, "initiating i2c bus recovery\n");
+ /* Send NACK to the slave */
+ flag = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
+ flag |= DAVINCI_I2C_MDR_NACK;
+ /* write the data into mode register */
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
+ if (pdata)
+ generic_i2c_clock_pulse(pdata->scl_pin);
+ /* Send STOP */
+ flag = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
+ flag |= DAVINCI_I2C_MDR_STP;
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
+}
+
+static inline void davinci_i2c_reset_ctrl(struct davinci_i2c_dev *i2c_dev,
+ int val)
+{
+ u16 w;
+
+ w = davinci_i2c_read_reg(i2c_dev, DAVINCI_I2C_MDR_REG);
+ if (!val) /* put I2C into reset */
+ w &= ~DAVINCI_I2C_MDR_IRS;
+ else /* take I2C out of reset */
+ w |= DAVINCI_I2C_MDR_IRS;
+
+ davinci_i2c_write_reg(i2c_dev, DAVINCI_I2C_MDR_REG, w);
+}
+
+static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
{
struct davinci_i2c_platform_data *pdata = dev->dev->platform_data;
u16 psc;
@@ -148,15 +194,6 @@ static int i2c_davinci_init(struct davinci_i2c_dev *dev)
u32 clkh;
u32 clkl;
u32 input_clock = clk_get_rate(dev->clk);
- u16 w;
-
- if (!pdata)
- pdata = &davinci_i2c_platform_data_default;
-
- /* put I2C into reset */
- w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
- MOD_REG_BIT(w, DAVINCI_I2C_MDR_IRS, 0);
- davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
/* NOTE: I2C Clock divider programming info
* As per I2C specs the following formulas provide prescaler
@@ -188,12 +225,32 @@ static int i2c_davinci_init(struct davinci_i2c_dev *dev)
davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKH_REG, clkh);
davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKL_REG, clkl);
+ dev_dbg(dev->dev, "input_clock = %d, CLK = %d\n", input_clock, clk);
+}
+
+/*
+ * This function configures I2C and brings I2C out of reset.
+ * This function is called during I2C init function. This function
+ * also gets called if I2C encounters any errors.
+ */
+static int i2c_davinci_init(struct davinci_i2c_dev *dev)
+{
+ struct davinci_i2c_platform_data *pdata = dev->dev->platform_data;
+
+ if (!pdata)
+ pdata = &davinci_i2c_platform_data_default;
+
+ /* put I2C into reset */
+ davinci_i2c_reset_ctrl(dev, 0);
+
+ /* compute clock dividers */
+ i2c_davinci_calc_clk_dividers(dev);
+
/* Respond at reserved "SMBus Host" slave address" (and zero);
* we seem to have no option to not respond...
*/
davinci_i2c_write_reg(dev, DAVINCI_I2C_OAR_REG, 0x08);
- dev_dbg(dev->dev, "input_clock = %d, CLK = %d\n", input_clock, clk);
dev_dbg(dev->dev, "PSC = %d\n",
davinci_i2c_read_reg(dev, DAVINCI_I2C_PSC_REG));
dev_dbg(dev->dev, "CLKL = %d\n",
@@ -204,9 +261,7 @@ static int i2c_davinci_init(struct davinci_i2c_dev *dev)
pdata->bus_freq, pdata->bus_delay);
/* Take the I2C module out of reset: */
- w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
- MOD_REG_BIT(w, DAVINCI_I2C_MDR_IRS, 1);
- davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
+ davinci_i2c_reset_ctrl(dev, 1);
/* Enable interrupts */
davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, I2C_DAVINCI_INTR_ALL);
@@ -221,14 +276,22 @@ static int i2c_davinci_wait_bus_not_busy(struct davinci_i2c_dev *dev,
char allow_sleep)
{
unsigned long timeout;
+ static u16 to_cnt;
timeout = jiffies + dev->adapter.timeout;
while (davinci_i2c_read_reg(dev, DAVINCI_I2C_STR_REG)
& DAVINCI_I2C_STR_BB) {
- if (time_after(jiffies, timeout)) {
- dev_warn(dev->dev,
- "timeout waiting for bus ready\n");
- return -ETIMEDOUT;
+ if (to_cnt <= DAVINCI_I2C_MAX_TRIES) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(dev->dev,
+ "timeout waiting for bus ready\n");
+ to_cnt++;
+ return -ETIMEDOUT;
+ } else {
+ to_cnt = 0;
+ i2c_recover_bus(dev);
+ i2c_davinci_init(dev);
+ }
}
if (allow_sleep)
schedule_timeout(1);
@@ -250,9 +313,6 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
u16 w;
int r;
- if (msg->len == 0)
- return -EINVAL;
-
if (!pdata)
pdata = &davinci_i2c_platform_data_default;
/* Introduce a delay, required for some boards (e.g Davinci EVM) */
@@ -264,6 +324,7 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
dev->buf = msg->buf;
dev->buf_len = msg->len;
+ dev->stop = stop;
davinci_i2c_write_reg(dev, DAVINCI_I2C_CNT_REG, dev->buf_len);
@@ -281,23 +342,40 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
flag |= DAVINCI_I2C_MDR_TRX;
if (stop)
flag |= DAVINCI_I2C_MDR_STP;
+ if (msg->len == 0) {
+ flag |= DAVINCI_I2C_MDR_RM;
+ flag &= ~DAVINCI_I2C_MDR_STP;
+ }
/* Enable receive or transmit interrupts */
w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG);
if (msg->flags & I2C_M_RD)
- MOD_REG_BIT(w, DAVINCI_I2C_IMR_RRDY, 1);
+ w |= DAVINCI_I2C_IMR_RRDY;
else
- MOD_REG_BIT(w, DAVINCI_I2C_IMR_XRDY, 1);
+ w |= DAVINCI_I2C_IMR_XRDY;
davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, w);
dev->terminate = 0;
- /* write the data into mode register */
+
+ /*
+ * First byte should be set here, not after interrupt,
+ * because transmit-data-ready interrupt can come before
+ * NACK-interrupt during sending of previous message and
+ * ICDXR may have wrong data
+ */
+ if ((!(msg->flags & I2C_M_RD)) && dev->buf_len) {
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++);
+ dev->buf_len--;
+ }
+
+ /* write the data into mode register; start transmitting */
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
dev->adapter.timeout);
if (r == 0) {
dev_err(dev->dev, "controller timed out\n");
+ i2c_recover_bus(dev);
i2c_davinci_init(dev);
dev->buf_len = 0;
return -ETIMEDOUT;
@@ -334,7 +412,7 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
return msg->len;
if (stop) {
w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
- MOD_REG_BIT(w, DAVINCI_I2C_MDR_STP, 1);
+ w |= DAVINCI_I2C_MDR_STP;
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
}
return -EREMOTEIO;
@@ -367,12 +445,17 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
if (ret < 0)
return ret;
}
+
+#ifdef CONFIG_CPU_FREQ
+ complete(&dev->xfr_complete);
+#endif
+
return num;
}
static u32 i2c_davinci_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static void terminate_read(struct davinci_i2c_dev *dev)
@@ -431,6 +514,14 @@ static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id)
case DAVINCI_I2C_IVR_ARDY:
davinci_i2c_write_reg(dev,
DAVINCI_I2C_STR_REG, DAVINCI_I2C_STR_ARDY);
+ if (((dev->buf_len == 0) && (dev->stop != 0)) ||
+ (dev->cmd_err & DAVINCI_I2C_STR_NACK)) {
+ w = davinci_i2c_read_reg(dev,
+ DAVINCI_I2C_MDR_REG);
+ w |= DAVINCI_I2C_MDR_STP;
+ davinci_i2c_write_reg(dev,
+ DAVINCI_I2C_MDR_REG, w);
+ }
complete(&dev->cmd_complete);
break;
@@ -462,7 +553,7 @@ static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id)
w = davinci_i2c_read_reg(dev,
DAVINCI_I2C_IMR_REG);
- MOD_REG_BIT(w, DAVINCI_I2C_IMR_XRDY, 0);
+ w &= ~DAVINCI_I2C_IMR_XRDY;
davinci_i2c_write_reg(dev,
DAVINCI_I2C_IMR_REG,
w);
@@ -491,6 +582,48 @@ static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id)
return count ? IRQ_HANDLED : IRQ_NONE;
}
+#ifdef CONFIG_CPU_FREQ
+static int i2c_davinci_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct davinci_i2c_dev *dev;
+
+ dev = container_of(nb, struct davinci_i2c_dev, freq_transition);
+ if (val == CPUFREQ_PRECHANGE) {
+ wait_for_completion(&dev->xfr_complete);
+ davinci_i2c_reset_ctrl(dev, 0);
+ } else if (val == CPUFREQ_POSTCHANGE) {
+ i2c_davinci_calc_clk_dividers(dev);
+ davinci_i2c_reset_ctrl(dev, 1);
+ }
+
+ return 0;
+}
+
+static inline int i2c_davinci_cpufreq_register(struct davinci_i2c_dev *dev)
+{
+ dev->freq_transition.notifier_call = i2c_davinci_cpufreq_transition;
+
+ return cpufreq_register_notifier(&dev->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void i2c_davinci_cpufreq_deregister(struct davinci_i2c_dev *dev)
+{
+ cpufreq_unregister_notifier(&dev->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+#else
+static inline int i2c_davinci_cpufreq_register(struct davinci_i2c_dev *dev)
+{
+ return 0;
+}
+
+static inline void i2c_davinci_cpufreq_deregister(struct davinci_i2c_dev *dev)
+{
+}
+#endif
+
static struct i2c_algorithm i2c_davinci_algo = {
.master_xfer = i2c_davinci_xfer,
.functionality = i2c_davinci_func,
@@ -530,6 +663,9 @@ static int davinci_i2c_probe(struct platform_device *pdev)
}
init_completion(&dev->cmd_complete);
+#ifdef CONFIG_CPU_FREQ
+ init_completion(&dev->xfr_complete);
+#endif
dev->dev = get_device(&pdev->dev);
dev->irq = irq->start;
platform_set_drvdata(pdev, dev);
@@ -541,7 +677,12 @@ static int davinci_i2c_probe(struct platform_device *pdev)
}
clk_enable(dev->clk);
- dev->base = (void __iomem *)IO_ADDRESS(mem->start);
+ dev->base = ioremap(mem->start, resource_size(mem));
+ if (!dev->base) {
+ r = -EBUSY;
+ goto err_mem_ioremap;
+ }
+
i2c_davinci_init(dev);
r = request_irq(dev->irq, i2c_davinci_isr, 0, pdev->name, dev);
@@ -550,6 +691,12 @@ static int davinci_i2c_probe(struct platform_device *pdev)
goto err_unuse_clocks;
}
+ r = i2c_davinci_cpufreq_register(dev);
+ if (r) {
+ dev_err(&pdev->dev, "failed to register cpufreq\n");
+ goto err_free_irq;
+ }
+
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
@@ -571,6 +718,8 @@ static int davinci_i2c_probe(struct platform_device *pdev)
err_free_irq:
free_irq(dev->irq, dev);
err_unuse_clocks:
+ iounmap(dev->base);
+err_mem_ioremap:
clk_disable(dev->clk);
clk_put(dev->clk);
dev->clk = NULL;
@@ -589,6 +738,8 @@ static int davinci_i2c_remove(struct platform_device *pdev)
struct davinci_i2c_dev *dev = platform_get_drvdata(pdev);
struct resource *mem;
+ i2c_davinci_cpufreq_deregister(dev);
+
platform_set_drvdata(pdev, NULL);
i2c_del_adapter(&dev->adapter);
put_device(&pdev->dev);
@@ -599,6 +750,7 @@ static int davinci_i2c_remove(struct platform_device *pdev)
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
free_irq(IRQ_I2C, dev);
+ iounmap(dev->base);
kfree(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -606,6 +758,41 @@ static int davinci_i2c_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int davinci_i2c_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct davinci_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ /* put I2C into reset */
+ davinci_i2c_reset_ctrl(i2c_dev, 0);
+ clk_disable(i2c_dev->clk);
+
+ return 0;
+}
+
+static int davinci_i2c_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct davinci_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ clk_enable(i2c_dev->clk);
+ /* take I2C out of reset */
+ davinci_i2c_reset_ctrl(i2c_dev, 1);
+
+ return 0;
+}
+
+static const struct dev_pm_ops davinci_i2c_pm = {
+ .suspend = davinci_i2c_suspend,
+ .resume = davinci_i2c_resume,
+};
+
+#define davinci_i2c_pm_ops (&davinci_i2c_pm)
+#else
+#define davinci_i2c_pm_ops NULL
+#endif
+
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:i2c_davinci");
@@ -615,6 +802,7 @@ static struct platform_driver davinci_i2c_driver = {
.driver = {
.name = "i2c_davinci",
.owner = THIS_MODULE,
+ .pm = davinci_i2c_pm_ops,
},
};
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index bf344135647..43ca32fddde 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -661,7 +661,7 @@ static inline u8 iic_clckdiv(unsigned int opb)
return (u8)((opb + 9) / 10 - 1);
}
-static int __devinit iic_request_irq(struct of_device *ofdev,
+static int __devinit iic_request_irq(struct platform_device *ofdev,
struct ibm_iic_private *dev)
{
struct device_node *np = ofdev->dev.of_node;
@@ -692,7 +692,7 @@ static int __devinit iic_request_irq(struct of_device *ofdev,
/*
* Register single IIC interface
*/
-static int __devinit iic_probe(struct of_device *ofdev,
+static int __devinit iic_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -745,6 +745,7 @@ static int __devinit iic_probe(struct of_device *ofdev,
/* Register it with i2c layer */
adap = &dev->adap;
adap->dev.parent = &ofdev->dev;
+ adap->dev.of_node = of_node_get(np);
strlcpy(adap->name, "IBM IIC", sizeof(adap->name));
i2c_set_adapdata(adap, dev);
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
@@ -760,9 +761,6 @@ static int __devinit iic_probe(struct of_device *ofdev,
dev_info(&ofdev->dev, "using %s mode\n",
dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
- /* Now register all the child nodes */
- of_register_i2c_devices(adap, np);
-
return 0;
error_cleanup:
@@ -782,7 +780,7 @@ error_cleanup:
/*
* Cleanup initialized IIC interface
*/
-static int __devexit iic_remove(struct of_device *ofdev)
+static int __devexit iic_remove(struct platform_device *ofdev)
{
struct ibm_iic_private *dev = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index df00eb1f11f..a1c419a716a 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -63,6 +63,7 @@ struct mpc_i2c {
wait_queue_head_t queue;
struct i2c_adapter adap;
int irq;
+ u32 real_clk;
};
struct mpc_i2c_divider {
@@ -96,20 +97,23 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
/* Sometimes 9th clock pulse isn't generated, and slave doesn't release
* the bus, because it wants to send ACK.
* Following sequence of enabling/disabling and sending start/stop generates
- * the pulse, so it's all OK.
+ * the 9 pulses, so it's all OK.
*/
static void mpc_i2c_fixup(struct mpc_i2c *i2c)
{
- writeccr(i2c, 0);
- udelay(30);
- writeccr(i2c, CCR_MEN);
- udelay(30);
- writeccr(i2c, CCR_MSTA | CCR_MTX);
- udelay(30);
- writeccr(i2c, CCR_MSTA | CCR_MTX | CCR_MEN);
- udelay(30);
- writeccr(i2c, CCR_MEN);
- udelay(30);
+ int k;
+ u32 delay_val = 1000000 / i2c->real_clk + 1;
+
+ if (delay_val < 2)
+ delay_val = 2;
+
+ for (k = 9; k; k--) {
+ writeccr(i2c, 0);
+ writeccr(i2c, CCR_MSTA | CCR_MTX | CCR_MEN);
+ udelay(delay_val);
+ writeccr(i2c, CCR_MEN);
+ udelay(delay_val << 1);
+ }
}
static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
@@ -190,15 +194,18 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] __devinitconst = {
};
static int __devinit mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
- int prescaler)
+ int prescaler, u32 *real_clk)
{
const struct mpc_i2c_divider *div = NULL;
unsigned int pvr = mfspr(SPRN_PVR);
u32 divider;
int i;
- if (clock == MPC_I2C_CLOCK_LEGACY)
+ if (clock == MPC_I2C_CLOCK_LEGACY) {
+ /* see below - default fdr = 0x3f -> div = 2048 */
+ *real_clk = mpc5xxx_get_bus_frequency(node) / 2048;
return -EINVAL;
+ }
/* Determine divider value */
divider = mpc5xxx_get_bus_frequency(node) / clock;
@@ -216,7 +223,8 @@ static int __devinit mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
break;
}
- return div ? (int)div->fdr : -EINVAL;
+ *real_clk = mpc5xxx_get_bus_frequency(node) / div->divider;
+ return (int)div->fdr;
}
static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
@@ -231,13 +239,14 @@ static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
return;
}
- ret = mpc_i2c_get_fdr_52xx(node, clock, prescaler);
+ ret = mpc_i2c_get_fdr_52xx(node, clock, prescaler, &i2c->real_clk);
fdr = (ret >= 0) ? ret : 0x3f; /* backward compatibility */
writeb(fdr & 0xff, i2c->base + MPC_I2C_FDR);
if (ret >= 0)
- dev_info(i2c->dev, "clock %d Hz (fdr=%d)\n", clock, fdr);
+ dev_info(i2c->dev, "clock %u Hz (fdr=%d)\n", i2c->real_clk,
+ fdr);
}
#else /* !(CONFIG_PPC_MPC52xx || CONFIG_PPC_MPC512x) */
static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
@@ -334,14 +343,17 @@ static u32 __devinit mpc_i2c_get_sec_cfg_8xxx(void)
}
static int __devinit mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
- u32 prescaler)
+ u32 prescaler, u32 *real_clk)
{
const struct mpc_i2c_divider *div = NULL;
u32 divider;
int i;
- if (clock == MPC_I2C_CLOCK_LEGACY)
+ if (clock == MPC_I2C_CLOCK_LEGACY) {
+ /* see below - default fdr = 0x1031 -> div = 16 * 3072 */
+ *real_clk = fsl_get_sys_freq() / prescaler / (16 * 3072);
return -EINVAL;
+ }
/* Determine proper divider value */
if (of_device_is_compatible(node, "fsl,mpc8544-i2c"))
@@ -364,6 +376,7 @@ static int __devinit mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
break;
}
+ *real_clk = fsl_get_sys_freq() / prescaler / div->divider;
return div ? (int)div->fdr : -EINVAL;
}
@@ -380,7 +393,7 @@ static void __devinit mpc_i2c_setup_8xxx(struct device_node *node,
return;
}
- ret = mpc_i2c_get_fdr_8xxx(node, clock, prescaler);
+ ret = mpc_i2c_get_fdr_8xxx(node, clock, prescaler, &i2c->real_clk);
fdr = (ret >= 0) ? ret : 0x1031; /* backward compatibility */
writeb(fdr & 0xff, i2c->base + MPC_I2C_FDR);
@@ -388,7 +401,7 @@ static void __devinit mpc_i2c_setup_8xxx(struct device_node *node,
if (ret >= 0)
dev_info(i2c->dev, "clock %d Hz (dfsrr=%d fdr=%d)\n",
- clock, fdr >> 8, fdr & 0xff);
+ i2c->real_clk, fdr >> 8, fdr & 0xff);
}
#else /* !CONFIG_FSL_SOC */
@@ -500,10 +513,14 @@ static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
return -EINTR;
}
if (time_after(jiffies, orig_jiffies + HZ)) {
+ u8 status = readb(i2c->base + MPC_I2C_SR);
+
dev_dbg(i2c->dev, "timeout\n");
- if (readb(i2c->base + MPC_I2C_SR) ==
- (CSR_MCF | CSR_MBB | CSR_RXAK))
+ if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) {
+ writeb(status & ~CSR_MAL,
+ i2c->base + MPC_I2C_SR);
mpc_i2c_fixup(i2c);
+ }
return -EIO;
}
schedule();
@@ -543,7 +560,7 @@ static struct i2c_adapter mpc_ops = {
.timeout = HZ,
};
-static int __devinit fsl_i2c_probe(struct of_device *op,
+static int __devinit fsl_i2c_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct mpc_i2c *i2c;
@@ -595,18 +612,26 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
mpc_i2c_setup_8xxx(op->dev.of_node, i2c, clock, 0);
}
+ prop = of_get_property(op->dev.of_node, "fsl,timeout", &plen);
+ if (prop && plen == sizeof(u32)) {
+ mpc_ops.timeout = *prop * HZ / 1000000;
+ if (mpc_ops.timeout < 5)
+ mpc_ops.timeout = 5;
+ }
+ dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ);
+
dev_set_drvdata(&op->dev, i2c);
i2c->adap = mpc_ops;
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.dev.parent = &op->dev;
+ i2c->adap.dev.of_node = of_node_get(op->dev.of_node);
result = i2c_add_adapter(&i2c->adap);
if (result < 0) {
dev_err(i2c->dev, "failed to add adapter\n");
goto fail_add;
}
- of_register_i2c_devices(&i2c->adap, op->dev.of_node);
return result;
@@ -621,7 +646,7 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
return result;
};
-static int __devexit fsl_i2c_remove(struct of_device *op)
+static int __devexit fsl_i2c_remove(struct platform_device *op)
{
struct mpc_i2c *i2c = dev_get_drvdata(&op->dev);
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
new file mode 100644
index 00000000000..92d770d7bbc
--- /dev/null
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -0,0 +1,709 @@
+/*
+ * linux/drivers/i2c/busses/i2c-nuc900.c
+ *
+ * Copyright (c) 2010 Nuvoton technology corporation.
+ *
+ * This driver based on S3C2410 I2C driver of Ben Dooks <ben-Y5A6D6n0/KfQXOPxS62xeg@public.gmane.org>.
+ * Written by Wan ZongShun <mcuos.com-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <mach/mfp.h>
+#include <mach/i2c.h>
+
+/* nuc900 i2c registers offset */
+
+#define CSR 0x00
+#define DIVIDER 0x04
+#define CMDR 0x08
+#define SWR 0x0C
+#define RXR 0x10
+#define TXR 0x14
+
+/* nuc900 i2c CSR register bits */
+
+#define IRQEN 0x003
+#define I2CBUSY 0x400
+#define I2CSTART 0x018
+#define IRQFLAG 0x004
+#define ARBIT_LOST 0x200
+#define SLAVE_ACK 0x800
+
+/* nuc900 i2c CMDR register bits */
+
+#define I2C_CMD_START 0x10
+#define I2C_CMD_STOP 0x08
+#define I2C_CMD_READ 0x04
+#define I2C_CMD_WRITE 0x02
+#define I2C_CMD_NACK 0x01
+
+/* i2c controller state */
+
+enum nuc900_i2c_state {
+ STATE_IDLE,
+ STATE_START,
+ STATE_READ,
+ STATE_WRITE,
+ STATE_STOP
+};
+
+/* i2c controller private data */
+
+struct nuc900_i2c {
+ spinlock_t lock;
+ wait_queue_head_t wait;
+
+ struct i2c_msg *msg;
+ unsigned int msg_num;
+ unsigned int msg_idx;
+ unsigned int msg_ptr;
+ unsigned int irq;
+
+ enum nuc900_i2c_state state;
+
+ void __iomem *regs;
+ struct clk *clk;
+ struct device *dev;
+ struct resource *ioarea;
+ struct i2c_adapter adap;
+};
+
+/* nuc900_i2c_master_complete
+ *
+ * complete the message and wake up the caller, using the given return code,
+ * or zero to mean ok.
+*/
+
+static inline void nuc900_i2c_master_complete(struct nuc900_i2c *i2c, int ret)
+{
+ dev_dbg(i2c->dev, "master_complete %d\n", ret);
+
+ i2c->msg_ptr = 0;
+ i2c->msg = NULL;
+ i2c->msg_idx++;
+ i2c->msg_num = 0;
+ if (ret)
+ i2c->msg_idx = ret;
+
+ wake_up(&i2c->wait);
+}
+
+/* irq enable/disable functions */
+
+static inline void nuc900_i2c_disable_irq(struct nuc900_i2c *i2c)
+{
+ unsigned long tmp;
+
+ tmp = readl(i2c->regs + CSR);
+ writel(tmp & ~IRQEN, i2c->regs + CSR);
+}
+
+static inline void nuc900_i2c_enable_irq(struct nuc900_i2c *i2c)
+{
+ unsigned long tmp;
+
+ tmp = readl(i2c->regs + CSR);
+ writel(tmp | IRQEN, i2c->regs + CSR);
+}
+
+
+/* nuc900_i2c_message_start
+ *
+ * put the start of a message onto the bus
+*/
+
+static void nuc900_i2c_message_start(struct nuc900_i2c *i2c,
+ struct i2c_msg *msg)
+{
+ unsigned int addr = (msg->addr & 0x7f) << 1;
+
+ if (msg->flags & I2C_M_RD)
+ addr |= 0x1;
+ writel(addr & 0xff, i2c->regs + TXR);
+ writel(I2C_CMD_START | I2C_CMD_WRITE, i2c->regs + CMDR);
+}
+
+static inline void nuc900_i2c_stop(struct nuc900_i2c *i2c, int ret)
+{
+
+ dev_dbg(i2c->dev, "STOP\n");
+
+ /* stop the transfer */
+ i2c->state = STATE_STOP;
+ writel(I2C_CMD_STOP, i2c->regs + CMDR);
+
+ nuc900_i2c_master_complete(i2c, ret);
+ nuc900_i2c_disable_irq(i2c);
+}
+
+/* helper functions to determine the current state in the set of
+ * messages we are sending
+*/
+
+/* is_lastmsg()
+ *
+ * returns TRUE if the current message is the last in the set
+*/
+
+static inline int is_lastmsg(struct nuc900_i2c *i2c)
+{
+ return i2c->msg_idx >= (i2c->msg_num - 1);
+}
+
+/* is_msglast
+ *
+ * returns TRUE if we this is the last byte in the current message
+*/
+
+static inline int is_msglast(struct nuc900_i2c *i2c)
+{
+ return i2c->msg_ptr == i2c->msg->len-1;
+}
+
+/* is_msgend
+ *
+ * returns TRUE if we reached the end of the current message
+*/
+
+static inline int is_msgend(struct nuc900_i2c *i2c)
+{
+ return i2c->msg_ptr >= i2c->msg->len;
+}
+
+/* i2c_nuc900_irq_nextbyte
+ *
+ * process an interrupt and work out what to do
+ */
+
+static void i2c_nuc900_irq_nextbyte(struct nuc900_i2c *i2c,
+ unsigned long iicstat)
+{
+ unsigned char byte;
+
+ switch (i2c->state) {
+
+ case STATE_IDLE:
+ dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__);
+ break;
+
+ case STATE_STOP:
+ dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__);
+ nuc900_i2c_disable_irq(i2c);
+ break;
+
+ case STATE_START:
+ /* last thing we did was send a start condition on the
+ * bus, or started a new i2c message
+ */
+
+ if (iicstat & SLAVE_ACK &&
+ !(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
+ /* ack was not received... */
+
+ dev_dbg(i2c->dev, "ack was not received\n");
+ nuc900_i2c_stop(i2c, -ENXIO);
+ break;
+ }
+
+ if (i2c->msg->flags & I2C_M_RD)
+ i2c->state = STATE_READ;
+ else
+ i2c->state = STATE_WRITE;
+
+ /* terminate the transfer if there is nothing to do
+ * as this is used by the i2c probe to find devices.
+ */
+
+ if (is_lastmsg(i2c) && i2c->msg->len == 0) {
+ nuc900_i2c_stop(i2c, 0);
+ break;
+ }
+
+ if (i2c->state == STATE_READ)
+ goto prepare_read;
+
+ /* fall through to the write state, as we will need to
+ * send a byte as well
+ */
+
+ case STATE_WRITE:
+ /* we are writing data to the device... check for the
+ * end of the message, and if so, work out what to do
+ */
+
+ if (!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
+ if (iicstat & SLAVE_ACK) {
+ dev_dbg(i2c->dev, "WRITE: No Ack\n");
+
+ nuc900_i2c_stop(i2c, -ECONNREFUSED);
+ break;
+ }
+ }
+
+retry_write:
+
+ if (!is_msgend(i2c)) {
+ byte = i2c->msg->buf[i2c->msg_ptr++];
+ writeb(byte, i2c->regs + TXR);
+ writel(I2C_CMD_WRITE, i2c->regs + CMDR);
+
+ } else if (!is_lastmsg(i2c)) {
+ /* we need to go to the next i2c message */
+
+ dev_dbg(i2c->dev, "WRITE: Next Message\n");
+
+ i2c->msg_ptr = 0;
+ i2c->msg_idx++;
+ i2c->msg++;
+
+ /* check to see if we need to do another message */
+ if (i2c->msg->flags & I2C_M_NOSTART) {
+
+ if (i2c->msg->flags & I2C_M_RD) {
+ /* cannot do this, the controller
+ * forces us to send a new START
+ * when we change direction
+ */
+
+ nuc900_i2c_stop(i2c, -EINVAL);
+ }
+
+ goto retry_write;
+ } else {
+ /* send the new start */
+ nuc900_i2c_message_start(i2c, i2c->msg);
+ i2c->state = STATE_START;
+ }
+
+ } else {
+ /* send stop */
+
+ nuc900_i2c_stop(i2c, 0);
+ }
+ break;
+
+ case STATE_READ:
+ /* we have a byte of data in the data register, do
+ * something with it, and then work out wether we are
+ * going to do any more read/write
+ */
+
+ byte = readb(i2c->regs + RXR);
+ i2c->msg->buf[i2c->msg_ptr++] = byte;
+
+prepare_read:
+ if (is_msglast(i2c)) {
+ /* last byte of buffer */
+
+ if (is_lastmsg(i2c))
+ writel(I2C_CMD_READ | I2C_CMD_NACK,
+ i2c->regs + CMDR);
+
+ } else if (is_msgend(i2c)) {
+ /* ok, we've read the entire buffer, see if there
+ * is anything else we need to do
+ */
+
+ if (is_lastmsg(i2c)) {
+ /* last message, send stop and complete */
+ dev_dbg(i2c->dev, "READ: Send Stop\n");
+
+ nuc900_i2c_stop(i2c, 0);
+ } else {
+ /* go to the next transfer */
+ dev_dbg(i2c->dev, "READ: Next Transfer\n");
+
+ i2c->msg_ptr = 0;
+ i2c->msg_idx++;
+ i2c->msg++;
+
+ writel(I2C_CMD_READ, i2c->regs + CMDR);
+ }
+
+ } else {
+ writel(I2C_CMD_READ, i2c->regs + CMDR);
+ }
+
+ break;
+ }
+}
+
+/* nuc900_i2c_irq
+ *
+ * top level IRQ servicing routine
+*/
+
+static irqreturn_t nuc900_i2c_irq(int irqno, void *dev_id)
+{
+ struct nuc900_i2c *i2c = dev_id;
+ unsigned long status;
+
+ status = readl(i2c->regs + CSR);
+ writel(status | IRQFLAG, i2c->regs + CSR);
+
+ if (status & ARBIT_LOST) {
+ /* deal with arbitration loss */
+ dev_err(i2c->dev, "deal with arbitration loss\n");
+ goto out;
+ }
+
+ if (i2c->state == STATE_IDLE) {
+ dev_dbg(i2c->dev, "IRQ: error i2c->state == IDLE\n");
+ goto out;
+ }
+
+ /* pretty much this leaves us with the fact that we've
+ * transmitted or received whatever byte we last sent
+ */
+
+ i2c_nuc900_irq_nextbyte(i2c, status);
+
+ out:
+ return IRQ_HANDLED;
+}
+
+
+/* nuc900_i2c_set_master
+ *
+ * get the i2c bus for a master transaction
+*/
+
+static int nuc900_i2c_set_master(struct nuc900_i2c *i2c)
+{
+ int timeout = 400;
+
+ while (timeout-- > 0) {
+ if (((readl(i2c->regs + SWR) & I2CSTART) == I2CSTART) &&
+ ((readl(i2c->regs + CSR) & I2CBUSY) == 0)) {
+ return 0;
+ }
+
+ msleep(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/* nuc900_i2c_doxfer
+ *
+ * this starts an i2c transfer
+*/
+
+static int nuc900_i2c_doxfer(struct nuc900_i2c *i2c,
+ struct i2c_msg *msgs, int num)
+{
+ unsigned long iicstat, timeout;
+ int spins = 20;
+ int ret;
+
+ ret = nuc900_i2c_set_master(i2c);
+ if (ret != 0) {
+ dev_err(i2c->dev, "cannot get bus (error %d)\n", ret);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ spin_lock_irq(&i2c->lock);
+
+ i2c->msg = msgs;
+ i2c->msg_num = num;
+ i2c->msg_ptr = 0;
+ i2c->msg_idx = 0;
+ i2c->state = STATE_START;
+
+ nuc900_i2c_message_start(i2c, msgs);
+ spin_unlock_irq(&i2c->lock);
+
+ timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
+
+ ret = i2c->msg_idx;
+
+ /* having these next two as dev_err() makes life very
+ * noisy when doing an i2cdetect
+ */
+
+ if (timeout == 0)
+ dev_dbg(i2c->dev, "timeout\n");
+ else if (ret != num)
+ dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
+
+ /* ensure the stop has been through the bus */
+
+ dev_dbg(i2c->dev, "waiting for bus idle\n");
+
+ /* first, try busy waiting briefly */
+ do {
+ iicstat = readl(i2c->regs + CSR);
+ } while ((iicstat & I2CBUSY) && --spins);
+
+ /* if that timed out sleep */
+ if (!spins) {
+ msleep(1);
+ iicstat = readl(i2c->regs + CSR);
+ }
+
+ if (iicstat & I2CBUSY)
+ dev_warn(i2c->dev, "timeout waiting for bus idle\n");
+
+ out:
+ return ret;
+}
+
+/* nuc900_i2c_xfer
+ *
+ * first port of call from the i2c bus code when an message needs
+ * transferring across the i2c bus.
+*/
+
+static int nuc900_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct nuc900_i2c *i2c = (struct nuc900_i2c *)adap->algo_data;
+ int retry;
+ int ret;
+
+ nuc900_i2c_enable_irq(i2c);
+
+ for (retry = 0; retry < adap->retries; retry++) {
+
+ ret = nuc900_i2c_doxfer(i2c, msgs, num);
+
+ if (ret != -EAGAIN)
+ return ret;
+
+ dev_dbg(i2c->dev, "Retrying transmission (%d)\n", retry);
+
+ udelay(100);
+ }
+
+ return -EREMOTEIO;
+}
+
+/* declare our i2c functionality */
+static u32 nuc900_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+}
+
+/* i2c bus registration info */
+
+static const struct i2c_algorithm nuc900_i2c_algorithm = {
+ .master_xfer = nuc900_i2c_xfer,
+ .functionality = nuc900_i2c_func,
+};
+
+/* nuc900_i2c_probe
+ *
+ * called by the bus driver when a suitable device is found
+*/
+
+static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
+{
+ struct nuc900_i2c *i2c;
+ struct nuc900_platform_i2c *pdata;
+ struct resource *res;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ i2c = kzalloc(sizeof(struct nuc900_i2c), GFP_KERNEL);
+ if (!i2c) {
+ dev_err(&pdev->dev, "no memory for state\n");
+ return -ENOMEM;
+ }
+
+ strlcpy(i2c->adap.name, "nuc900-i2c0", sizeof(i2c->adap.name));
+ i2c->adap.owner = THIS_MODULE;
+ i2c->adap.algo = &nuc900_i2c_algorithm;
+ i2c->adap.retries = 2;
+ i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+
+ spin_lock_init(&i2c->lock);
+ init_waitqueue_head(&i2c->wait);
+
+ /* find the clock and enable it */
+
+ i2c->dev = &pdev->dev;
+ i2c->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2c->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ ret = -ENOENT;
+ goto err_noclk;
+ }
+
+ dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
+
+ clk_enable(i2c->clk);
+
+ /* map the registers */
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "cannot find IO resource\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ i2c->ioarea = request_mem_region(res->start, resource_size(res),
+ pdev->name);
+
+ if (i2c->ioarea == NULL) {
+ dev_err(&pdev->dev, "cannot request IO\n");
+ ret = -ENXIO;
+ goto err_clk;
+ }
+
+ i2c->regs = ioremap(res->start, resource_size(res));
+
+ if (i2c->regs == NULL) {
+ dev_err(&pdev->dev, "cannot map IO\n");
+ ret = -ENXIO;
+ goto err_ioarea;
+ }
+
+ dev_dbg(&pdev->dev, "registers %p (%p, %p)\n",
+ i2c->regs, i2c->ioarea, res);
+
+ /* setup info block for the i2c core */
+
+ i2c->adap.algo_data = i2c;
+ i2c->adap.dev.parent = &pdev->dev;
+
+ mfp_set_groupg(&pdev->dev);
+
+ clk_get_rate(i2c->clk);
+
+ ret = (i2c->clk.apbfreq)/(pdata->bus_freq * 5) - 1;
+ writel(ret & 0xffff, i2c->regs + DIVIDER);
+
+ /* find the IRQ for this unit (note, this relies on the init call to
+ * ensure no current IRQs pending
+ */
+
+ i2c->irq = ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ dev_err(&pdev->dev, "cannot find IRQ\n");
+ goto err_iomap;
+ }
+
+ ret = request_irq(i2c->irq, nuc900_i2c_irq, IRQF_DISABLED | IRQF_SHARED,
+ dev_name(&pdev->dev), i2c);
+
+ if (ret != 0) {
+ dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
+ goto err_iomap;
+ }
+
+ /* Note, previous versions of the driver used i2c_add_adapter()
+ * to add the bus at any number. We now pass the bus number via
+ * the platform data, so if unset it will now default to always
+ * being bus 0.
+ */
+
+ i2c->adap.nr = pdata->bus_num;
+
+ ret = i2c_add_numbered_adapter(&i2c->adap);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add bus to i2c core\n");
+ goto err_irq;
+ }
+
+ platform_set_drvdata(pdev, i2c);
+
+ dev_info(&pdev->dev, "%s: NUC900 I2C adapter\n",
+ dev_name(&i2c->adap.dev));
+ return 0;
+
+ err_irq:
+ free_irq(i2c->irq, i2c);
+
+ err_iomap:
+ iounmap(i2c->regs);
+
+ err_ioarea:
+ release_resource(i2c->ioarea);
+ kfree(i2c->ioarea);
+
+ err_clk:
+ clk_disable(i2c->clk);
+ clk_put(i2c->clk);
+
+ err_noclk:
+ kfree(i2c);
+ return ret;
+}
+
+/* nuc900_i2c_remove
+ *
+ * called when device is removed from the bus
+*/
+
+static int __devexit nuc900_i2c_remove(struct platform_device *pdev)
+{
+ struct nuc900_i2c *i2c = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&i2c->adap);
+ free_irq(i2c->irq, i2c);
+
+ clk_disable(i2c->clk);
+ clk_put(i2c->clk);
+
+ iounmap(i2c->regs);
+
+ release_resource(i2c->ioarea);
+ kfree(i2c->ioarea);
+ kfree(i2c);
+
+ return 0;
+}
+
+static struct platform_driver nuc900_i2c_driver = {
+ .probe = nuc900_i2c_probe,
+ .remove = __devexit_p(nuc900_i2c_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "nuc900-i2c0",
+ },
+};
+
+static int __init i2c_adap_nuc900_init(void)
+{
+ return platform_driver_register(&nuc900_i2c_driver);
+}
+
+static void __exit i2c_adap_nuc900_exit(void)
+{
+ platform_driver_unregister(&nuc900_i2c_driver);
+}
+subsys_initcall(i2c_adap_nuc900_init);
+module_exit(i2c_adap_nuc900_exit);
+
+MODULE_DESCRIPTION("NUC900 I2C Bus driver");
+MODULE_AUTHOR("Wan ZongShun, <mcuos.com-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nuc900-i2c0");
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 0e9f85d0a83..56dbe54e881 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -218,7 +218,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
return result;
} else if (result == 0) {
dev_dbg(i2c->dev, "%s: timeout\n", __func__);
- result = -ETIMEDOUT;
+ return -ETIMEDOUT;
}
return 0;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 7674efb5537..b33c78586bf 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -680,6 +680,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
if (r == 0)
r = num;
+
+ omap_i2c_wait_for_bb(dev);
out:
omap_i2c_idle(dev);
return r;
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 020ff23d762..c94e51b2651 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1001,7 +1001,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
struct pxa_i2c *i2c;
struct resource *res;
struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
- struct platform_device_id *id = platform_get_device_id(dev);
+ const struct platform_device_id *id = platform_get_device_id(dev);
int ret;
int irq;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 72902e0bbfa..bf831bf8158 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -662,8 +662,8 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
unsigned long sda_delay;
if (pdata->sda_delay) {
- sda_delay = (freq / 1000) * pdata->sda_delay;
- sda_delay /= 1000000;
+ sda_delay = clkin * pdata->sda_delay;
+ sda_delay = DIV_ROUND_UP(sda_delay, 1000000);
sda_delay = DIV_ROUND_UP(sda_delay, 5);
if (sda_delay > 3)
sda_delay = 3;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index ffb405d7c6f..598c49acaeb 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -119,8 +119,10 @@ struct sh_mobile_i2c_data {
struct i2c_adapter adap;
struct clk *clk;
+ u_int8_t icic;
u_int8_t iccl;
u_int8_t icch;
+ u_int8_t flags;
spinlock_t lock;
wait_queue_head_t wait;
@@ -129,15 +131,17 @@ struct sh_mobile_i2c_data {
int sr;
};
+#define IIC_FLAG_HAS_ICIC67 (1 << 0)
+
#define NORMAL_SPEED 100000 /* FAST_SPEED 400000 */
/* Register offsets */
-#define ICDR(pd) (pd->reg + 0x00)
-#define ICCR(pd) (pd->reg + 0x04)
-#define ICSR(pd) (pd->reg + 0x08)
-#define ICIC(pd) (pd->reg + 0x0c)
-#define ICCL(pd) (pd->reg + 0x10)
-#define ICCH(pd) (pd->reg + 0x14)
+#define ICDR 0x00
+#define ICCR 0x04
+#define ICSR 0x08
+#define ICIC 0x0c
+#define ICCL 0x10
+#define ICCH 0x14
/* Register bits */
#define ICCR_ICE 0x80
@@ -155,11 +159,32 @@ struct sh_mobile_i2c_data {
#define ICSR_WAIT 0x02
#define ICSR_DTE 0x01
+#define ICIC_ICCLB8 0x80
+#define ICIC_ICCHB8 0x40
#define ICIC_ALE 0x08
#define ICIC_TACKE 0x04
#define ICIC_WAITE 0x02
#define ICIC_DTEE 0x01
+static void iic_wr(struct sh_mobile_i2c_data *pd, int offs, unsigned char data)
+{
+ if (offs == ICIC)
+ data |= pd->icic;
+
+ iowrite8(data, pd->reg + offs);
+}
+
+static unsigned char iic_rd(struct sh_mobile_i2c_data *pd, int offs)
+{
+ return ioread8(pd->reg + offs);
+}
+
+static void iic_set_clr(struct sh_mobile_i2c_data *pd, int offs,
+ unsigned char set, unsigned char clr)
+{
+ iic_wr(pd, offs, (iic_rd(pd, offs) | set) & ~clr);
+}
+
static void activate_ch(struct sh_mobile_i2c_data *pd)
{
unsigned long i2c_clk;
@@ -187,6 +212,14 @@ static void activate_ch(struct sh_mobile_i2c_data *pd)
else
pd->iccl = (u_int8_t)(num/denom);
+ /* one more bit of ICCL in ICIC */
+ if (pd->flags & IIC_FLAG_HAS_ICIC67) {
+ if ((num/denom) > 0xff)
+ pd->icic |= ICIC_ICCLB8;
+ else
+ pd->icic &= ~ICIC_ICCLB8;
+ }
+
/* Calculate the value for icch. From the data sheet:
icch = (p clock / transfer rate) * (H / (L + H)) */
num = i2c_clk * 4;
@@ -196,25 +229,33 @@ static void activate_ch(struct sh_mobile_i2c_data *pd)
else
pd->icch = (u_int8_t)(num/denom);
+ /* one more bit of ICCH in ICIC */
+ if (pd->flags & IIC_FLAG_HAS_ICIC67) {
+ if ((num/denom) > 0xff)
+ pd->icic |= ICIC_ICCHB8;
+ else
+ pd->icic &= ~ICIC_ICCHB8;
+ }
+
/* Enable channel and configure rx ack */
- iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd));
+ iic_set_clr(pd, ICCR, ICCR_ICE, 0);
/* Mask all interrupts */
- iowrite8(0, ICIC(pd));
+ iic_wr(pd, ICIC, 0);
/* Set the clock */
- iowrite8(pd->iccl, ICCL(pd));
- iowrite8(pd->icch, ICCH(pd));
+ iic_wr(pd, ICCL, pd->iccl);
+ iic_wr(pd, ICCH, pd->icch);
}
static void deactivate_ch(struct sh_mobile_i2c_data *pd)
{
/* Clear/disable interrupts */
- iowrite8(0, ICSR(pd));
- iowrite8(0, ICIC(pd));
+ iic_wr(pd, ICSR, 0);
+ iic_wr(pd, ICIC, 0);
/* Disable channel */
- iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd));
+ iic_set_clr(pd, ICCR, 0, ICCR_ICE);
/* Disable clock and mark device as idle */
clk_disable(pd->clk);
@@ -233,35 +274,35 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd,
switch (op) {
case OP_START: /* issue start and trigger DTE interrupt */
- iowrite8(0x94, ICCR(pd));
+ iic_wr(pd, ICCR, 0x94);
break;
case OP_TX_FIRST: /* disable DTE interrupt and write data */
- iowrite8(ICIC_WAITE | ICIC_ALE | ICIC_TACKE, ICIC(pd));
- iowrite8(data, ICDR(pd));
+ iic_wr(pd, ICIC, ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+ iic_wr(pd, ICDR, data);
break;
case OP_TX: /* write data */
- iowrite8(data, ICDR(pd));
+ iic_wr(pd, ICDR, data);
break;
case OP_TX_STOP: /* write data and issue a stop afterwards */
- iowrite8(data, ICDR(pd));
- iowrite8(0x90, ICCR(pd));
+ iic_wr(pd, ICDR, data);
+ iic_wr(pd, ICCR, 0x90);
break;
case OP_TX_TO_RX: /* select read mode */
- iowrite8(0x81, ICCR(pd));
+ iic_wr(pd, ICCR, 0x81);
break;
case OP_RX: /* just read data */
- ret = ioread8(ICDR(pd));
+ ret = iic_rd(pd, ICDR);
break;
case OP_RX_STOP: /* enable DTE interrupt, issue stop */
- iowrite8(ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE,
- ICIC(pd));
- iowrite8(0xc0, ICCR(pd));
+ iic_wr(pd, ICIC,
+ ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+ iic_wr(pd, ICCR, 0xc0);
break;
case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */
- iowrite8(ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE,
- ICIC(pd));
- ret = ioread8(ICDR(pd));
- iowrite8(0xc0, ICCR(pd));
+ iic_wr(pd, ICIC,
+ ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+ ret = iic_rd(pd, ICDR);
+ iic_wr(pd, ICCR, 0xc0);
break;
}
@@ -367,7 +408,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
unsigned char sr;
int wakeup;
- sr = ioread8(ICSR(pd));
+ sr = iic_rd(pd, ICSR);
pd->sr |= sr; /* remember state */
dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr,
@@ -376,7 +417,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
if (sr & (ICSR_AL | ICSR_TACK)) {
/* don't interrupt transaction - continue to issue stop */
- iowrite8(sr & ~(ICSR_AL | ICSR_TACK), ICSR(pd));
+ iic_wr(pd, ICSR, sr & ~(ICSR_AL | ICSR_TACK));
wakeup = 0;
} else if (pd->msg->flags & I2C_M_RD)
wakeup = sh_mobile_i2c_isr_rx(pd);
@@ -384,7 +425,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
wakeup = sh_mobile_i2c_isr_tx(pd);
if (sr & ICSR_WAIT) /* TODO: add delay here to support slow acks */
- iowrite8(sr & ~ICSR_WAIT, ICSR(pd));
+ iic_wr(pd, ICSR, sr & ~ICSR_WAIT);
if (wakeup) {
pd->sr |= SW_DONE;
@@ -402,21 +443,21 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
}
/* Initialize channel registers */
- iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd));
+ iic_set_clr(pd, ICCR, 0, ICCR_ICE);
/* Enable channel and configure rx ack */
- iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd));
+ iic_set_clr(pd, ICCR, ICCR_ICE, 0);
/* Set the clock */
- iowrite8(pd->iccl, ICCL(pd));
- iowrite8(pd->icch, ICCH(pd));
+ iic_wr(pd, ICCL, pd->iccl);
+ iic_wr(pd, ICCH, pd->icch);
pd->msg = usr_msg;
pd->pos = -1;
pd->sr = 0;
/* Enable all interrupts to begin with */
- iowrite8(ICIC_WAITE | ICIC_ALE | ICIC_TACKE | ICIC_DTEE, ICIC(pd));
+ iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
return 0;
}
@@ -451,7 +492,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
retry_count = 1000;
again:
- val = ioread8(ICSR(pd));
+ val = iic_rd(pd, ICSR);
dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr);
@@ -576,6 +617,12 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
goto err_irq;
}
+ /* The IIC blocks on SH-Mobile ARM processors
+ * come with two new bits in ICIC.
+ */
+ if (size > 0x17)
+ pd->flags |= IIC_FLAG_HAS_ICIC67;
+
/* Enable Runtime PM for this device.
*
* Also tell the Runtime PM core to ignore children
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 0815e10da7c..6649176de94 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -20,7 +20,9 @@
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>.
All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl>
SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and
- Jean Delvare <khali@linux-fr.org> */
+ Jean Delvare <khali@linux-fr.org>
+ Mux support by Rodolfo Giometti <giometti@enneenne.com> and
+ Michael Lawnick <michael.lawnick.ext@nsn.com> */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -30,6 +32,8 @@
#include <linux/init.h>
#include <linux/idr.h>
#include <linux/mutex.h>
+#include <linux/of_i2c.h>
+#include <linux/of_device.h>
#include <linux/completion.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
@@ -70,6 +74,10 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
if (!client)
return 0;
+ /* Attempt an OF style match */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
driver = to_i2c_driver(drv);
/* match on an id table if there is one */
if (driver->id_table)
@@ -417,11 +425,87 @@ static int __i2c_check_addr_busy(struct device *dev, void *addrp)
return 0;
}
+/* walk up mux tree */
+static int i2c_check_mux_parents(struct i2c_adapter *adapter, int addr)
+{
+ int result;
+
+ result = device_for_each_child(&adapter->dev, &addr,
+ __i2c_check_addr_busy);
+
+ if (!result && i2c_parent_is_i2c_adapter(adapter))
+ result = i2c_check_mux_parents(
+ to_i2c_adapter(adapter->dev.parent), addr);
+
+ return result;
+}
+
+/* recurse down mux tree */
+static int i2c_check_mux_children(struct device *dev, void *addrp)
+{
+ int result;
+
+ if (dev->type == &i2c_adapter_type)
+ result = device_for_each_child(dev, addrp,
+ i2c_check_mux_children);
+ else
+ result = __i2c_check_addr_busy(dev, addrp);
+
+ return result;
+}
+
static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
{
- return device_for_each_child(&adapter->dev, &addr,
- __i2c_check_addr_busy);
+ int result = 0;
+
+ if (i2c_parent_is_i2c_adapter(adapter))
+ result = i2c_check_mux_parents(
+ to_i2c_adapter(adapter->dev.parent), addr);
+
+ if (!result)
+ result = device_for_each_child(&adapter->dev, &addr,
+ i2c_check_mux_children);
+
+ return result;
+}
+
+/**
+ * i2c_lock_adapter - Get exclusive access to an I2C bus segment
+ * @adapter: Target I2C bus segment
+ */
+void i2c_lock_adapter(struct i2c_adapter *adapter)
+{
+ if (i2c_parent_is_i2c_adapter(adapter))
+ i2c_lock_adapter(to_i2c_adapter(adapter->dev.parent));
+ else
+ rt_mutex_lock(&adapter->bus_lock);
+}
+EXPORT_SYMBOL_GPL(i2c_lock_adapter);
+
+/**
+ * i2c_trylock_adapter - Try to get exclusive access to an I2C bus segment
+ * @adapter: Target I2C bus segment
+ */
+static int i2c_trylock_adapter(struct i2c_adapter *adapter)
+{
+ if (i2c_parent_is_i2c_adapter(adapter))
+ return i2c_trylock_adapter(to_i2c_adapter(adapter->dev.parent));
+ else
+ return rt_mutex_trylock(&adapter->bus_lock);
+}
+
+/**
+ * i2c_unlock_adapter - Release exclusive access to an I2C bus segment
+ * @adapter: Target I2C bus segment
+ */
+void i2c_unlock_adapter(struct i2c_adapter *adapter)
+{
+ if (i2c_parent_is_i2c_adapter(adapter))
+ i2c_unlock_adapter(to_i2c_adapter(adapter->dev.parent));
+ else
+ rt_mutex_unlock(&adapter->bus_lock);
}
+EXPORT_SYMBOL_GPL(i2c_unlock_adapter);
/**
* i2c_new_device - instantiate an i2c device
@@ -627,9 +711,9 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
return -EINVAL;
/* Keep track of the added device */
- i2c_lock_adapter(adap);
+ mutex_lock(&adap->userspace_clients_lock);
list_add_tail(&client->detected, &adap->userspace_clients);
- i2c_unlock_adapter(adap);
+ mutex_unlock(&adap->userspace_clients_lock);
dev_info(dev, "%s: Instantiated device %s at 0x%02hx\n", "new_device",
info.type, info.addr);
@@ -668,7 +752,7 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
/* Make sure the device was added through sysfs */
res = -ENOENT;
- i2c_lock_adapter(adap);
+ mutex_lock(&adap->userspace_clients_lock);
list_for_each_entry_safe(client, next, &adap->userspace_clients,
detected) {
if (client->addr == addr) {
@@ -681,7 +765,7 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
break;
}
}
- i2c_unlock_adapter(adap);
+ mutex_unlock(&adap->userspace_clients_lock);
if (res < 0)
dev_err(dev, "%s: Can't find device in list\n",
@@ -708,10 +792,11 @@ static const struct attribute_group *i2c_adapter_attr_groups[] = {
NULL
};
-static struct device_type i2c_adapter_type = {
+struct device_type i2c_adapter_type = {
.groups = i2c_adapter_attr_groups,
.release = i2c_adapter_dev_release,
};
+EXPORT_SYMBOL_GPL(i2c_adapter_type);
#ifdef CONFIG_I2C_COMPAT
static struct class_compat *i2c_adapter_compat_class;
@@ -754,7 +839,7 @@ static int __process_new_adapter(struct device_driver *d, void *data)
static int i2c_register_adapter(struct i2c_adapter *adap)
{
- int res = 0, dummy;
+ int res = 0;
/* Can't register until after driver model init */
if (unlikely(WARN_ON(!i2c_bus_type.p))) {
@@ -763,6 +848,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
}
rt_mutex_init(&adap->bus_lock);
+ mutex_init(&adap->userspace_clients_lock);
INIT_LIST_HEAD(&adap->userspace_clients);
/* Set default timeout to 1 second if not already set */
@@ -790,10 +876,12 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
if (adap->nr < __i2c_first_dynamic_bus_num)
i2c_scan_static_board_info(adap);
+ /* Register devices from the device tree */
+ of_i2c_register_devices(adap);
+
/* Notify drivers */
mutex_lock(&core_lock);
- dummy = bus_for_each_drv(&i2c_bus_type, NULL, adap,
- __process_new_adapter);
+ bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter);
mutex_unlock(&core_lock);
return 0;
@@ -966,7 +1054,7 @@ int i2c_del_adapter(struct i2c_adapter *adap)
return res;
/* Remove devices instantiated from sysfs */
- i2c_lock_adapter(adap);
+ mutex_lock(&adap->userspace_clients_lock);
list_for_each_entry_safe(client, next, &adap->userspace_clients,
detected) {
dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name,
@@ -974,7 +1062,7 @@ int i2c_del_adapter(struct i2c_adapter *adap)
list_del(&client->detected);
i2c_unregister_device(client);
}
- i2c_unlock_adapter(adap);
+ mutex_unlock(&adap->userspace_clients_lock);
/* Detach any active clients. This can't fail, thus we do not
checking the returned value. */
@@ -1229,12 +1317,12 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
#endif
if (in_atomic() || irqs_disabled()) {
- ret = rt_mutex_trylock(&adap->bus_lock);
+ ret = i2c_trylock_adapter(adap);
if (!ret)
/* I2C activity is ongoing. */
return -EAGAIN;
} else {
- rt_mutex_lock(&adap->bus_lock);
+ i2c_lock_adapter(adap);
}
/* Retry automatically on arbitration loss */
@@ -1246,7 +1334,7 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (time_after(jiffies, orig_jiffies + adap->timeout))
break;
}
- rt_mutex_unlock(&adap->bus_lock);
+ i2c_unlock_adapter(adap);
return ret;
} else {
@@ -1341,13 +1429,17 @@ static int i2c_default_probe(struct i2c_adapter *adap, unsigned short addr)
I2C_SMBUS_BYTE_DATA, &dummy);
else
#endif
- if ((addr & ~0x07) == 0x30 || (addr & ~0x0f) == 0x50
- || !i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK))
- err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0,
- I2C_SMBUS_BYTE, &dummy);
- else
+ if (!((addr & ~0x07) == 0x30 || (addr & ~0x0f) == 0x50)
+ && i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK))
err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_WRITE, 0,
I2C_SMBUS_QUICK, NULL);
+ else if (i2c_check_functionality(adap, I2C_FUNC_SMBUS_READ_BYTE))
+ err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0,
+ I2C_SMBUS_BYTE, &dummy);
+ else {
+ dev_warn(&adap->dev, "No suitable probing method supported\n");
+ err = -EOPNOTSUPP;
+ }
return err >= 0;
}
@@ -1428,16 +1520,6 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
if (!(adapter->class & driver->class))
goto exit_free;
- /* Stop here if the bus doesn't support probing */
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_BYTE)) {
- if (address_list[0] == I2C_CLIENT_END)
- goto exit_free;
-
- dev_warn(&adapter->dev, "Probing not supported\n");
- err = -EOPNOTSUPP;
- goto exit_free;
- }
-
for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) {
dev_dbg(&adapter->dev, "found normal entry for adapter %d, "
"addr 0x%02x\n", adap_id, address_list[i]);
@@ -1452,18 +1534,23 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
return err;
}
+int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr)
+{
+ return i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0,
+ I2C_SMBUS_QUICK, NULL) >= 0;
+}
+EXPORT_SYMBOL_GPL(i2c_probe_func_quick_read);
+
struct i2c_client *
i2c_new_probed_device(struct i2c_adapter *adap,
struct i2c_board_info *info,
- unsigned short const *addr_list)
+ unsigned short const *addr_list,
+ int (*probe)(struct i2c_adapter *, unsigned short addr))
{
int i;
- /* Stop here if the bus doesn't support probing */
- if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_READ_BYTE)) {
- dev_err(&adap->dev, "Probing not supported\n");
- return NULL;
- }
+ if (!probe)
+ probe = i2c_default_probe;
for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) {
/* Check address validity */
@@ -1481,7 +1568,7 @@ i2c_new_probed_device(struct i2c_adapter *adap,
}
/* Test address responsiveness */
- if (i2c_default_probe(adap, addr_list[i]))
+ if (probe(adap, addr_list[i]))
break;
}
@@ -1993,7 +2080,7 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
flags &= I2C_M_TEN | I2C_CLIENT_PEC;
if (adapter->algo->smbus_xfer) {
- rt_mutex_lock(&adapter->bus_lock);
+ i2c_lock_adapter(adapter);
/* Retry automatically on arbitration loss */
orig_jiffies = jiffies;
@@ -2007,7 +2094,7 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
orig_jiffies + adapter->timeout))
break;
}
- rt_mutex_unlock(&adapter->bus_lock);
+ i2c_unlock_adapter(adapter);
} else
res = i2c_smbus_xfer_emulated(adapter, addr, flags, read_write,
command, protocol, data);
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index e0694e4d86c..5f3a52d517c 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -167,13 +167,9 @@ static ssize_t i2cdev_write(struct file *file, const char __user *buf,
if (count > 8192)
count = 8192;
- tmp = kmalloc(count, GFP_KERNEL);
- if (tmp == NULL)
- return -ENOMEM;
- if (copy_from_user(tmp, buf, count)) {
- kfree(tmp);
- return -EFAULT;
- }
+ tmp = memdup_user(buf, count);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n",
iminor(file->f_path.dentry->d_inode), count);
@@ -193,12 +189,50 @@ static int i2cdev_check(struct device *dev, void *addrp)
return dev->driver ? -EBUSY : 0;
}
+/* walk up mux tree */
+static int i2cdev_check_mux_parents(struct i2c_adapter *adapter, int addr)
+{
+ int result;
+
+ result = device_for_each_child(&adapter->dev, &addr, i2cdev_check);
+
+ if (!result && i2c_parent_is_i2c_adapter(adapter))
+ result = i2cdev_check_mux_parents(
+ to_i2c_adapter(adapter->dev.parent), addr);
+
+ return result;
+}
+
+/* recurse down mux tree */
+static int i2cdev_check_mux_children(struct device *dev, void *addrp)
+{
+ int result;
+
+ if (dev->type == &i2c_adapter_type)
+ result = device_for_each_child(dev, addrp,
+ i2cdev_check_mux_children);
+ else
+ result = i2cdev_check(dev, addrp);
+
+ return result;
+}
+
/* This address checking function differs from the one in i2c-core
in that it considers an address with a registered device, but no
driver bound to it, as NOT busy. */
static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr)
{
- return device_for_each_child(&adapter->dev, &addr, i2cdev_check);
+ int result = 0;
+
+ if (i2c_parent_is_i2c_adapter(adapter))
+ result = i2cdev_check_mux_parents(
+ to_i2c_adapter(adapter->dev.parent), addr);
+
+ if (!result)
+ result = device_for_each_child(&adapter->dev, &addr,
+ i2cdev_check_mux_children);
+
+ return result;
}
static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
@@ -219,9 +253,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS)
return -EINVAL;
- rdwr_pa = (struct i2c_msg *)
- kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg),
- GFP_KERNEL);
+ rdwr_pa = kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg), GFP_KERNEL);
if (!rdwr_pa)
return -ENOMEM;
@@ -247,15 +279,9 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
break;
}
data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
- rdwr_pa[i].buf = kmalloc(rdwr_pa[i].len, GFP_KERNEL);
- if (rdwr_pa[i].buf == NULL) {
- res = -ENOMEM;
- break;
- }
- if (copy_from_user(rdwr_pa[i].buf, data_ptrs[i],
- rdwr_pa[i].len)) {
- ++i; /* Needs to be kfreed too */
- res = -EFAULT;
+ rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
+ if (IS_ERR(rdwr_pa[i].buf)) {
+ res = PTR_ERR(rdwr_pa[i].buf);
break;
}
}
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
new file mode 100644
index 00000000000..d32a4843fc3
--- /dev/null
+++ b/drivers/i2c/i2c-mux.c
@@ -0,0 +1,165 @@
+/*
+ * Multiplexed I2C bus driver.
+ *
+ * Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
+ * Copyright (c) 2009-2010 NSN GmbH & Co KG <michael.lawnick.ext@nsn.com>
+ *
+ * Simplifies access to complex multiplexed I2C bus topologies, by presenting
+ * each multiplexed bus segment as an additional I2C adapter.
+ * Supports multi-level mux'ing (mux behind a mux).
+ *
+ * Based on:
+ * i2c-virt.c from Kumar Gala <galak@kernel.crashing.org>
+ * i2c-virtual.c from Ken Harrenstien, Copyright (c) 2004 Google, Inc.
+ * i2c-virtual.c from Brian Kuschak <bkuschak@yahoo.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+
+/* multiplexer per channel data */
+struct i2c_mux_priv {
+ struct i2c_adapter adap;
+ struct i2c_algorithm algo;
+
+ struct i2c_adapter *parent;
+ void *mux_dev; /* the mux chip/device */
+ u32 chan_id; /* the channel id */
+
+ int (*select)(struct i2c_adapter *, void *mux_dev, u32 chan_id);
+ int (*deselect)(struct i2c_adapter *, void *mux_dev, u32 chan_id);
+};
+
+static int i2c_mux_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct i2c_mux_priv *priv = adap->algo_data;
+ struct i2c_adapter *parent = priv->parent;
+ int ret;
+
+ /* Switch to the right mux port and perform the transfer. */
+
+ ret = priv->select(parent, priv->mux_dev, priv->chan_id);
+ if (ret >= 0)
+ ret = parent->algo->master_xfer(parent, msgs, num);
+ if (priv->deselect)
+ priv->deselect(parent, priv->mux_dev, priv->chan_id);
+
+ return ret;
+}
+
+static int i2c_mux_smbus_xfer(struct i2c_adapter *adap,
+ u16 addr, unsigned short flags,
+ char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct i2c_mux_priv *priv = adap->algo_data;
+ struct i2c_adapter *parent = priv->parent;
+ int ret;
+
+ /* Select the right mux port and perform the transfer. */
+
+ ret = priv->select(parent, priv->mux_dev, priv->chan_id);
+ if (ret >= 0)
+ ret = parent->algo->smbus_xfer(parent, addr, flags,
+ read_write, command, size, data);
+ if (priv->deselect)
+ priv->deselect(parent, priv->mux_dev, priv->chan_id);
+
+ return ret;
+}
+
+/* Return the parent's functionality */
+static u32 i2c_mux_functionality(struct i2c_adapter *adap)
+{
+ struct i2c_mux_priv *priv = adap->algo_data;
+ struct i2c_adapter *parent = priv->parent;
+
+ return parent->algo->functionality(parent);
+}
+
+struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
+ void *mux_dev, u32 force_nr, u32 chan_id,
+ int (*select) (struct i2c_adapter *,
+ void *, u32),
+ int (*deselect) (struct i2c_adapter *,
+ void *, u32))
+{
+ struct i2c_mux_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(struct i2c_mux_priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ /* Set up private adapter data */
+ priv->parent = parent;
+ priv->mux_dev = mux_dev;
+ priv->chan_id = chan_id;
+ priv->select = select;
+ priv->deselect = deselect;
+
+ /* Need to do algo dynamically because we don't know ahead
+ * of time what sort of physical adapter we'll be dealing with.
+ */
+ if (parent->algo->master_xfer)
+ priv->algo.master_xfer = i2c_mux_master_xfer;
+ if (parent->algo->smbus_xfer)
+ priv->algo.smbus_xfer = i2c_mux_smbus_xfer;
+ priv->algo.functionality = i2c_mux_functionality;
+
+ /* Now fill out new adapter structure */
+ snprintf(priv->adap.name, sizeof(priv->adap.name),
+ "i2c-%d-mux (chan_id %d)", i2c_adapter_id(parent), chan_id);
+ priv->adap.owner = THIS_MODULE;
+ priv->adap.id = parent->id;
+ priv->adap.algo = &priv->algo;
+ priv->adap.algo_data = priv;
+ priv->adap.dev.parent = &parent->dev;
+
+ if (force_nr) {
+ priv->adap.nr = force_nr;
+ ret = i2c_add_numbered_adapter(&priv->adap);
+ } else {
+ ret = i2c_add_adapter(&priv->adap);
+ }
+ if (ret < 0) {
+ dev_err(&parent->dev,
+ "failed to add mux-adapter (error=%d)\n",
+ ret);
+ kfree(priv);
+ return NULL;
+ }
+
+ dev_info(&parent->dev, "Added multiplexed i2c bus %d\n",
+ i2c_adapter_id(&priv->adap));
+
+ return &priv->adap;
+}
+EXPORT_SYMBOL_GPL(i2c_add_mux_adapter);
+
+int i2c_del_mux_adapter(struct i2c_adapter *adap)
+{
+ struct i2c_mux_priv *priv = adap->algo_data;
+ int ret;
+
+ ret = i2c_del_adapter(adap);
+ if (ret < 0)
+ return ret;
+ kfree(priv);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i2c_del_mux_adapter);
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
+MODULE_DESCRIPTION("I2C driver for multiplexed I2C busses");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
new file mode 100644
index 00000000000..4c9a99c4fcb
--- /dev/null
+++ b/drivers/i2c/muxes/Kconfig
@@ -0,0 +1,18 @@
+#
+# Multiplexer I2C chip drivers configuration
+#
+
+menu "Multiplexer I2C Chip support"
+ depends on I2C_MUX
+
+config I2C_MUX_PCA954x
+ tristate "Philips PCA954x I2C Mux/switches"
+ depends on EXPERIMENTAL
+ help
+ If you say yes here you get support for the Philips PCA954x
+ I2C mux/switch devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called pca954x.
+
+endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
new file mode 100644
index 00000000000..bd83b527481
--- /dev/null
+++ b/drivers/i2c/muxes/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for multiplexer I2C chip drivers.
+
+obj-$(CONFIG_I2C_MUX_PCA954x) += pca954x.o
+
+ifeq ($(CONFIG_I2C_DEBUG_BUS),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/i2c/muxes/pca954x.c b/drivers/i2c/muxes/pca954x.c
new file mode 100644
index 00000000000..6f9accf3189
--- /dev/null
+++ b/drivers/i2c/muxes/pca954x.c
@@ -0,0 +1,301 @@
+/*
+ * I2C multiplexer
+ *
+ * Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
+ *
+ * This module supports the PCA954x series of I2C multiplexer/switch chips
+ * made by Philips Semiconductors.
+ * This includes the:
+ * PCA9540, PCA9542, PCA9543, PCA9544, PCA9545, PCA9546, PCA9547
+ * and PCA9548.
+ *
+ * These chips are all controlled via the I2C bus itself, and all have a
+ * single 8-bit register. The upstream "parent" bus fans out to two,
+ * four, or eight downstream busses or channels; which of these
+ * are selected is determined by the chip type and register contents. A
+ * mux can select only one sub-bus at a time; a switch can select any
+ * combination simultaneously.
+ *
+ * Based on:
+ * pca954x.c from Kumar Gala <galak@kernel.crashing.org>
+ * Copyright (C) 2006
+ *
+ * Based on:
+ * pca954x.c from Ken Harrenstien
+ * Copyright (C) 2004 Google, Inc. (Ken Harrenstien)
+ *
+ * Based on:
+ * i2c-virtual_cb.c from Brian Kuschak <bkuschak@yahoo.com>
+ * and
+ * pca9540.c from Jean Delvare <khali@linux-fr.org>.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+
+#include <linux/i2c/pca954x.h>
+
+#define PCA954X_MAX_NCHANS 8
+
+enum pca_type {
+ pca_9540,
+ pca_9542,
+ pca_9543,
+ pca_9544,
+ pca_9545,
+ pca_9546,
+ pca_9547,
+ pca_9548,
+};
+
+struct pca954x {
+ enum pca_type type;
+ struct i2c_adapter *virt_adaps[PCA954X_MAX_NCHANS];
+
+ u8 last_chan; /* last register value */
+};
+
+struct chip_desc {
+ u8 nchans;
+ u8 enable; /* used for muxes only */
+ enum muxtype {
+ pca954x_ismux = 0,
+ pca954x_isswi
+ } muxtype;
+};
+
+/* Provide specs for the PCA954x types we know about */
+static const struct chip_desc chips[] = {
+ [pca_9540] = {
+ .nchans = 2,
+ .enable = 0x4,
+ .muxtype = pca954x_ismux,
+ },
+ [pca_9543] = {
+ .nchans = 2,
+ .muxtype = pca954x_isswi,
+ },
+ [pca_9544] = {
+ .nchans = 4,
+ .enable = 0x4,
+ .muxtype = pca954x_ismux,
+ },
+ [pca_9545] = {
+ .nchans = 4,
+ .muxtype = pca954x_isswi,
+ },
+ [pca_9547] = {
+ .nchans = 8,
+ .enable = 0x8,
+ .muxtype = pca954x_ismux,
+ },
+ [pca_9548] = {
+ .nchans = 8,
+ .muxtype = pca954x_isswi,
+ },
+};
+
+static const struct i2c_device_id pca954x_id[] = {
+ { "pca9540", pca_9540 },
+ { "pca9542", pca_9540 },
+ { "pca9543", pca_9543 },
+ { "pca9544", pca_9544 },
+ { "pca9545", pca_9545 },
+ { "pca9546", pca_9545 },
+ { "pca9547", pca_9547 },
+ { "pca9548", pca_9548 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pca954x_id);
+
+/* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer()
+ for this as they will try to lock adapter a second time */
+static int pca954x_reg_write(struct i2c_adapter *adap,
+ struct i2c_client *client, u8 val)
+{
+ int ret = -ENODEV;
+
+ if (adap->algo->master_xfer) {
+ struct i2c_msg msg;
+ char buf[1];
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 1;
+ buf[0] = val;
+ msg.buf = buf;
+ ret = adap->algo->master_xfer(adap, &msg, 1);
+ } else {
+ union i2c_smbus_data data;
+ ret = adap->algo->smbus_xfer(adap, client->addr,
+ client->flags,
+ I2C_SMBUS_WRITE,
+ val, I2C_SMBUS_BYTE, &data);
+ }
+
+ return ret;
+}
+
+static int pca954x_select_chan(struct i2c_adapter *adap,
+ void *client, u32 chan)
+{
+ struct pca954x *data = i2c_get_clientdata(client);
+ const struct chip_desc *chip = &chips[data->type];
+ u8 regval;
+ int ret = 0;
+
+ /* we make switches look like muxes, not sure how to be smarter */
+ if (chip->muxtype == pca954x_ismux)
+ regval = chan | chip->enable;
+ else
+ regval = 1 << chan;
+
+ /* Only select the channel if its different from the last channel */
+ if (data->last_chan != regval) {
+ ret = pca954x_reg_write(adap, client, regval);
+ data->last_chan = regval;
+ }
+
+ return ret;
+}
+
+static int pca954x_deselect_mux(struct i2c_adapter *adap,
+ void *client, u32 chan)
+{
+ struct pca954x *data = i2c_get_clientdata(client);
+
+ /* Deselect active channel */
+ data->last_chan = 0;
+ return pca954x_reg_write(adap, client, data->last_chan);
+}
+
+/*
+ * I2C init/probing/exit functions
+ */
+static int __devinit pca954x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
+ struct pca954x_platform_data *pdata = client->dev.platform_data;
+ int num, force;
+ struct pca954x *data;
+ int ret = -ENODEV;
+
+ if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE))
+ goto err;
+
+ data = kzalloc(sizeof(struct pca954x), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ i2c_set_clientdata(client, data);
+
+ /* Read the mux register at addr to verify
+ * that the mux is in fact present.
+ */
+ if (i2c_smbus_read_byte(client) < 0) {
+ dev_warn(&client->dev, "probe failed\n");
+ goto exit_free;
+ }
+
+ data->type = id->driver_data;
+ data->last_chan = 0; /* force the first selection */
+
+ /* Now create an adapter for each channel */
+ for (num = 0; num < chips[data->type].nchans; num++) {
+ force = 0; /* dynamic adap number */
+ if (pdata) {
+ if (num < pdata->num_modes)
+ /* force static number */
+ force = pdata->modes[num].adap_id;
+ else
+ /* discard unconfigured channels */
+ break;
+ }
+
+ data->virt_adaps[num] =
+ i2c_add_mux_adapter(adap, client,
+ force, num, pca954x_select_chan,
+ (pdata && pdata->modes[num].deselect_on_exit)
+ ? pca954x_deselect_mux : NULL);
+
+ if (data->virt_adaps[num] == NULL) {
+ ret = -ENODEV;
+ dev_err(&client->dev,
+ "failed to register multiplexed adapter"
+ " %d as bus %d\n", num, force);
+ goto virt_reg_failed;
+ }
+ }
+
+ dev_info(&client->dev,
+ "registered %d multiplexed busses for I2C %s %s\n",
+ num, chips[data->type].muxtype == pca954x_ismux
+ ? "mux" : "switch", client->name);
+
+ return 0;
+
+virt_reg_failed:
+ for (num--; num >= 0; num--)
+ i2c_del_mux_adapter(data->virt_adaps[num]);
+exit_free:
+ kfree(data);
+err:
+ return ret;
+}
+
+static int __devexit pca954x_remove(struct i2c_client *client)
+{
+ struct pca954x *data = i2c_get_clientdata(client);
+ const struct chip_desc *chip = &chips[data->type];
+ int i, err;
+
+ for (i = 0; i < chip->nchans; ++i)
+ if (data->virt_adaps[i]) {
+ err = i2c_del_mux_adapter(data->virt_adaps[i]);
+ if (err)
+ return err;
+ data->virt_adaps[i] = NULL;
+ }
+
+ kfree(data);
+ return 0;
+}
+
+static struct i2c_driver pca954x_driver = {
+ .driver = {
+ .name = "pca954x",
+ .owner = THIS_MODULE,
+ },
+ .probe = pca954x_probe,
+ .remove = __devexit_p(pca954x_remove),
+ .id_table = pca954x_id,
+};
+
+static int __init pca954x_init(void)
+{
+ return i2c_add_driver(&pca954x_driver);
+}
+
+static void __exit pca954x_exit(void)
+{
+ i2c_del_driver(&pca954x_driver);
+}
+
+module_init(pca954x_init);
+module_exit(pca954x_exit);
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
+MODULE_DESCRIPTION("PCA954x I2C mux/switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index f9daffd7d0e..e88a2cf1771 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -190,7 +190,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
BUG_ON(sense_len > sizeof(*sense));
- if (blk_sense_request(rq) || drive->sense_rq_armed)
+ if (rq->cmd_type == REQ_TYPE_SENSE || drive->sense_rq_armed)
return;
memset(sense, 0, sizeof(*sense));
@@ -307,13 +307,16 @@ EXPORT_SYMBOL_GPL(ide_cd_expiry);
int ide_cd_get_xferlen(struct request *rq)
{
- if (blk_fs_request(rq))
+ switch (rq->cmd_type) {
+ case REQ_TYPE_FS:
return 32768;
- else if (blk_sense_request(rq) || blk_pc_request(rq) ||
- rq->cmd_type == REQ_TYPE_ATA_PC)
+ case REQ_TYPE_SENSE:
+ case REQ_TYPE_BLOCK_PC:
+ case REQ_TYPE_ATA_PC:
return blk_rq_bytes(rq);
- else
+ default:
return 0;
+ }
}
EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
@@ -474,12 +477,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
if (uptodate == 0)
drive->failed_pc = NULL;
- if (blk_special_request(rq)) {
+ if (rq->cmd_type == REQ_TYPE_SPECIAL) {
rq->errors = 0;
error = 0;
} else {
- if (blk_fs_request(rq) == 0 && uptodate <= 0) {
+ if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) {
if (rq->errors == 0)
rq->errors = -EIO;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 64207df8da8..31fc76960a8 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -31,6 +31,7 @@
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/seq_file.h>
+#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
@@ -176,7 +177,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
if (!sense->valid)
break;
if (failed_command == NULL ||
- !blk_fs_request(failed_command))
+ failed_command->cmd_type != REQ_TYPE_FS)
break;
sector = (sense->information[0] << 24) |
(sense->information[1] << 16) |
@@ -292,7 +293,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
"stat 0x%x",
rq->cmd[0], rq->cmd_type, err, stat);
- if (blk_sense_request(rq)) {
+ if (rq->cmd_type == REQ_TYPE_SENSE) {
/*
* We got an error trying to get sense info from the drive
* (probably while trying to recover from a former error).
@@ -303,7 +304,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
}
/* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
- if (blk_pc_request(rq) && !rq->errors)
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors)
rq->errors = SAM_STAT_CHECK_CONDITION;
if (blk_noretry_request(rq))
@@ -311,13 +312,14 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
switch (sense_key) {
case NOT_READY:
- if (blk_fs_request(rq) && rq_data_dir(rq) == WRITE) {
+ if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) {
if (ide_cd_breathe(drive, rq))
return 1;
} else {
cdrom_saw_media_change(drive);
- if (blk_fs_request(rq) && !blk_rq_quiet(rq))
+ if (rq->cmd_type == REQ_TYPE_FS &&
+ !(rq->cmd_flags & REQ_QUIET))
printk(KERN_ERR PFX "%s: tray open\n",
drive->name);
}
@@ -326,7 +328,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
case UNIT_ATTENTION:
cdrom_saw_media_change(drive);
- if (blk_fs_request(rq) == 0)
+ if (rq->cmd_type != REQ_TYPE_FS)
return 0;
/*
@@ -352,7 +354,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
* No point in retrying after an illegal request or data
* protect error.
*/
- if (!blk_rq_quiet(rq))
+ if (!(rq->cmd_flags & REQ_QUIET))
ide_dump_status(drive, "command error", stat);
do_end_request = 1;
break;
@@ -361,20 +363,20 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
* No point in re-trying a zillion times on a bad sector.
* If we got here the error is not correctable.
*/
- if (!blk_rq_quiet(rq))
+ if (!(rq->cmd_flags & REQ_QUIET))
ide_dump_status(drive, "media error "
"(bad sector)", stat);
do_end_request = 1;
break;
case BLANK_CHECK:
/* disk appears blank? */
- if (!blk_rq_quiet(rq))
+ if (!(rq->cmd_flags & REQ_QUIET))
ide_dump_status(drive, "media error (blank)",
stat);
do_end_request = 1;
break;
default:
- if (blk_fs_request(rq) == 0)
+ if (rq->cmd_type != REQ_TYPE_FS)
break;
if (err & ~ATA_ABORTED) {
/* go to the default handler for other errors */
@@ -385,7 +387,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
do_end_request = 1;
}
- if (blk_fs_request(rq) == 0) {
+ if (rq->cmd_type != REQ_TYPE_FS) {
rq->cmd_flags |= REQ_FAILED;
do_end_request = 1;
}
@@ -506,15 +508,22 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
return (flags & REQ_FAILED) ? -EIO : 0;
}
-static void ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
+/*
+ * returns true if rq has been completed
+ */
+static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
{
unsigned int nr_bytes = cmd->nbytes - cmd->nleft;
if (cmd->tf_flags & IDE_TFLAG_WRITE)
nr_bytes -= cmd->last_xfer_len;
- if (nr_bytes > 0)
+ if (nr_bytes > 0) {
ide_complete_rq(drive, 0, nr_bytes);
+ return true;
+ }
+
+ return false;
}
static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
@@ -525,7 +534,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
ide_expiry_t *expiry = NULL;
int dma_error = 0, dma, thislen, uptodate = 0;
int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
- int sense = blk_sense_request(rq);
+ int sense = (rq->cmd_type == REQ_TYPE_SENSE);
unsigned int timeout;
u16 len;
u8 ireason, stat;
@@ -568,7 +577,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
ide_read_bcount_and_ireason(drive, &len, &ireason);
- thislen = blk_fs_request(rq) ? len : cmd->nleft;
+ thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft;
if (thislen > len)
thislen = len;
@@ -577,7 +586,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
/* If DRQ is clear, the command has completed. */
if ((stat & ATA_DRQ) == 0) {
- if (blk_fs_request(rq)) {
+ if (rq->cmd_type == REQ_TYPE_FS) {
/*
* If we're not done reading/writing, complain.
* Otherwise, complete the command normally.
@@ -591,7 +600,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
rq->cmd_flags |= REQ_FAILED;
uptodate = 0;
}
- } else if (!blk_pc_request(rq)) {
+ } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
ide_cd_request_sense_fixup(drive, cmd);
uptodate = cmd->nleft ? 0 : 1;
@@ -640,7 +649,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
/* pad, if necessary */
if (len > 0) {
- if (blk_fs_request(rq) == 0 || write == 0)
+ if (rq->cmd_type != REQ_TYPE_FS || write == 0)
ide_pad_transfer(drive, write, len);
else {
printk(KERN_ERR PFX "%s: confused, missing data\n",
@@ -649,11 +658,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
}
}
- if (blk_pc_request(rq)) {
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
timeout = rq->timeout;
} else {
timeout = ATAPI_WAIT_PC;
- if (!blk_fs_request(rq))
+ if (rq->cmd_type != REQ_TYPE_FS)
expiry = ide_cd_expiry;
}
@@ -662,7 +671,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
return ide_started;
out_end:
- if (blk_pc_request(rq) && rc == 0) {
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) {
rq->resid_len = 0;
blk_end_request_all(rq, 0);
hwif->rq = NULL;
@@ -670,7 +679,7 @@ out_end:
if (sense && uptodate)
ide_cd_complete_failed_rq(drive, rq);
- if (blk_fs_request(rq)) {
+ if (rq->cmd_type == REQ_TYPE_FS) {
if (cmd->nleft == 0)
uptodate = 1;
} else {
@@ -679,10 +688,11 @@ out_end:
}
if (uptodate == 0 && rq->bio)
- ide_cd_error_cmd(drive, cmd);
+ if (ide_cd_error_cmd(drive, cmd))
+ return ide_stopped;
/* make sure it's fully ended */
- if (blk_fs_request(rq) == 0) {
+ if (rq->cmd_type != REQ_TYPE_FS) {
rq->resid_len -= cmd->nbytes - cmd->nleft;
if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
rq->resid_len += cmd->last_xfer_len;
@@ -742,7 +752,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x",
rq->cmd[0], rq->cmd_type);
- if (blk_pc_request(rq))
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
rq->cmd_flags |= REQ_QUIET;
else
rq->cmd_flags &= ~REQ_FAILED;
@@ -783,21 +793,26 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
if (drive->debug_mask & IDE_DBG_RQ)
blk_dump_rq_flags(rq, "ide_cd_do_request");
- if (blk_fs_request(rq)) {
+ switch (rq->cmd_type) {
+ case REQ_TYPE_FS:
if (cdrom_start_rw(drive, rq) == ide_stopped)
goto out_end;
- } else if (blk_sense_request(rq) || blk_pc_request(rq) ||
- rq->cmd_type == REQ_TYPE_ATA_PC) {
+ break;
+ case REQ_TYPE_SENSE:
+ case REQ_TYPE_BLOCK_PC:
+ case REQ_TYPE_ATA_PC:
if (!rq->timeout)
rq->timeout = ATAPI_WAIT_PC;
cdrom_do_block_pc(drive, rq);
- } else if (blk_special_request(rq)) {
+ break;
+ case REQ_TYPE_SPECIAL:
/* right now this can only be a reset... */
uptodate = 1;
goto out_end;
- } else
+ default:
BUG();
+ }
/* prepare sense request for this command */
ide_prep_sense(drive, rq);
@@ -809,7 +824,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
cmd.rq = rq;
- if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
+ if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd);
}
@@ -1365,9 +1380,9 @@ static int ide_cdrom_prep_pc(struct request *rq)
static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
{
- if (blk_fs_request(rq))
+ if (rq->cmd_type == REQ_TYPE_FS)
return ide_cdrom_prep_fs(q, rq);
- else if (blk_pc_request(rq))
+ else if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
return ide_cdrom_prep_pc(rq);
return 0;
@@ -1584,17 +1599,19 @@ static struct ide_driver ide_cdrom_driver = {
static int idecd_open(struct block_device *bdev, fmode_t mode)
{
- struct cdrom_info *info = ide_cd_get(bdev->bd_disk);
- int rc = -ENOMEM;
+ struct cdrom_info *info;
+ int rc = -ENXIO;
+ lock_kernel();
+ info = ide_cd_get(bdev->bd_disk);
if (!info)
- return -ENXIO;
+ goto out;
rc = cdrom_open(&info->devinfo, bdev, mode);
-
if (rc < 0)
ide_cd_put(info);
-
+out:
+ unlock_kernel();
return rc;
}
@@ -1602,9 +1619,11 @@ static int idecd_release(struct gendisk *disk, fmode_t mode)
{
struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
+ lock_kernel();
cdrom_release(&info->devinfo, mode);
ide_cd_put(info);
+ unlock_kernel();
return 0;
}
@@ -1648,7 +1667,7 @@ static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
return 0;
}
-static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
+static int idecd_locked_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
@@ -1670,6 +1689,19 @@ static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
return err;
}
+static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = idecd_locked_ioctl(bdev, mode, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
+
static int idecd_media_changed(struct gendisk *disk)
{
struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
@@ -1690,7 +1722,7 @@ static const struct block_device_operations idecd_ops = {
.owner = THIS_MODULE,
.open = idecd_open,
.release = idecd_release,
- .locked_ioctl = idecd_ioctl,
+ .ioctl = idecd_ioctl,
.media_changed = idecd_media_changed,
.revalidate_disk = idecd_revalidate_disk
};
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 02712bf045c..766b3deeb23 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -454,7 +454,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
touch it at all. */
if (cgc->data_direction == CGC_DATA_WRITE)
- flags |= REQ_RW;
+ flags |= REQ_WRITE;
if (cgc->sense)
memset(cgc->sense, 0, sizeof(struct request_sense));
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 0b7815d2581..2a4cb9c18f0 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -43,7 +43,6 @@
#include <asm/io.h>
#include <asm/system.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -98,9 +97,8 @@ static int ide_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = info;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->io.IOAddrLines = 3;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -229,24 +227,27 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev,
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
+ pdev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+
pdev->conf.ConfigIndex = cfg->index;
- pdev->io.BasePort1 = io->win[0].base;
- pdev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- if (!(io->flags & CISTPL_IO_16BIT))
- pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ pdev->resource[0]->start = io->win[0].base;
+ if (!(io->flags & CISTPL_IO_16BIT)) {
+ pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ }
if (io->nwin == 2) {
- pdev->io.NumPorts1 = 8;
- pdev->io.BasePort2 = io->win[1].base;
- pdev->io.NumPorts2 = (stk->is_kme) ? 2 : 1;
- if (pcmcia_request_io(pdev, &pdev->io) != 0)
+ pdev->resource[0]->end = 8;
+ pdev->resource[1]->start = io->win[1].base;
+ pdev->resource[1]->end = (stk->is_kme) ? 2 : 1;
+ if (pcmcia_request_io(pdev) != 0)
return -ENODEV;
- stk->ctl_base = pdev->io.BasePort2;
+ stk->ctl_base = pdev->resource[1]->start;
} else if ((io->nwin == 1) && (io->win[0].len >= 16)) {
- pdev->io.NumPorts1 = io->win[0].len;
- pdev->io.NumPorts2 = 0;
- if (pcmcia_request_io(pdev, &pdev->io) != 0)
+ pdev->resource[0]->end = io->win[0].len;
+ pdev->resource[1]->end = 0;
+ if (pcmcia_request_io(pdev) != 0)
return -ENODEV;
- stk->ctl_base = pdev->io.BasePort1 + 0x0e;
+ stk->ctl_base = pdev->resource[0]->start + 0x0e;
} else
return -ENODEV;
/* If we've got this far, we're done */
@@ -280,7 +281,7 @@ static int ide_config(struct pcmcia_device *link)
if (pcmcia_loop_config(link, pcmcia_check_one_config, stk))
goto failed; /* No suitable config found */
}
- io_base = link->io.BasePort1;
+ io_base = link->resource[0]->start;
ctl_base = stk->ctl_base;
if (!link->irq)
@@ -297,7 +298,7 @@ static int ide_config(struct pcmcia_device *link)
outb(0x81, ctl_base+1);
host = idecs_register(io_base, ctl_base, link->irq, link);
- if (host == NULL && link->io.NumPorts1 == 0x20) {
+ if (host == NULL && resource_size(link->resource[0]) == 0x20) {
outb(0x02, ctl_base + 0x10);
host = idecs_register(io_base + 0x10, ctl_base + 0x10,
link->irq, link);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 33d65039cce..7433e07de30 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -184,7 +184,7 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ide_hwif_t *hwif = drive->hwif;
BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
- BUG_ON(!blk_fs_request(rq));
+ BUG_ON(rq->cmd_type != REQ_TYPE_FS);
ledtrig_ide_activity();
@@ -427,10 +427,15 @@ static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
}
-static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
+static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
{
ide_drive_t *drive = q->queuedata;
- struct ide_cmd *cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+ struct ide_cmd *cmd;
+
+ if (!(rq->cmd_flags & REQ_FLUSH))
+ return BLKPREP_OK;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
/* FIXME: map struct ide_taskfile on rq->cmd[] */
BUG_ON(cmd == NULL);
@@ -448,6 +453,8 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
rq->special = cmd;
cmd->rq = rq;
+
+ return BLKPREP_OK;
}
ide_devset_get(multcount, mult_count);
@@ -513,7 +520,6 @@ static void update_ordered(ide_drive_t *drive)
{
u16 *id = drive->id;
unsigned ordered = QUEUE_ORDERED_NONE;
- prepare_flush_fn *prep_fn = NULL;
if (drive->dev_flags & IDE_DFLAG_WCACHE) {
unsigned long long capacity;
@@ -538,12 +544,12 @@ static void update_ordered(ide_drive_t *drive)
if (barrier) {
ordered = QUEUE_ORDERED_DRAIN_FLUSH;
- prep_fn = idedisk_prepare_flush;
+ blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
}
} else
ordered = QUEUE_ORDERED_DRAIN;
- blk_queue_ordered(drive->queue, ordered, prep_fn);
+ blk_queue_ordered(drive->queue, ordered);
}
ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
diff --git a/drivers/ide/ide-disk_ioctl.c b/drivers/ide/ide-disk_ioctl.c
index 7b783dd7c0b..ec94c66918f 100644
--- a/drivers/ide/ide-disk_ioctl.c
+++ b/drivers/ide/ide-disk_ioctl.c
@@ -1,6 +1,7 @@
#include <linux/kernel.h>
#include <linux/ide.h>
#include <linux/hdreg.h>
+#include <linux/smp_lock.h>
#include "ide-disk.h"
@@ -18,9 +19,13 @@ int ide_disk_ioctl(ide_drive_t *drive, struct block_device *bdev, fmode_t mode,
{
int err;
+ lock_kernel();
err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings);
if (err != -EOPNOTSUPP)
- return err;
+ goto out;
- return generic_ide_ioctl(drive, bdev, cmd, arg);
+ err = generic_ide_ioctl(drive, bdev, cmd, arg);
+out:
+ unlock_kernel();
+ return err;
}
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index e9abf2c3c33..c0aa93fb7a6 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -122,7 +122,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
return ide_stopped;
/* retry only "normal" I/O: */
- if (!blk_fs_request(rq)) {
+ if (rq->cmd_type != REQ_TYPE_FS) {
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
struct ide_cmd *cmd = rq->special;
@@ -146,7 +146,8 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
{
struct request *rq = drive->hwif->rq;
- if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) {
+ if (rq && rq->cmd_type == REQ_TYPE_SPECIAL &&
+ rq->cmd[0] == REQ_DRIVE_RESET) {
if (err <= 0 && rq->errors == 0)
rq->errors = -EIO;
ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 4713bdca20b..5406b6ea3ad 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -73,7 +73,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
drive->failed_pc = NULL;
if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
- (rq && blk_pc_request(rq)))
+ (rq && rq->cmd_type == REQ_TYPE_BLOCK_PC))
uptodate = 1; /* FIXME */
else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
@@ -98,7 +98,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
"Aborting request!\n");
}
- if (blk_special_request(rq))
+ if (rq->cmd_type == REQ_TYPE_SPECIAL)
rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
return uptodate;
@@ -207,7 +207,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
memcpy(rq->cmd, pc->c, 12);
pc->rq = rq;
- if (rq->cmd_flags & REQ_RW)
+ if (rq->cmd_flags & REQ_WRITE)
pc->flags |= PC_FLAG_WRITING;
pc->flags |= PC_FLAG_DMA_OK;
@@ -247,14 +247,16 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
} else
printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
- if (blk_special_request(rq)) {
+ if (rq->cmd_type == REQ_TYPE_SPECIAL) {
rq->errors = 0;
ide_complete_rq(drive, 0, blk_rq_bytes(rq));
return ide_stopped;
} else
goto out_end;
}
- if (blk_fs_request(rq)) {
+
+ switch (rq->cmd_type) {
+ case REQ_TYPE_FS:
if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
(blk_rq_sectors(rq) % floppy->bs_factor)) {
printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
@@ -263,13 +265,18 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
}
pc = &floppy->queued_pc;
idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
- } else if (blk_special_request(rq) || blk_sense_request(rq)) {
+ break;
+ case REQ_TYPE_SPECIAL:
+ case REQ_TYPE_SENSE:
pc = (struct ide_atapi_pc *)rq->special;
- } else if (blk_pc_request(rq)) {
+ break;
+ case REQ_TYPE_BLOCK_PC:
pc = &floppy->queued_pc;
idefloppy_blockpc_cmd(floppy, pc, rq);
- } else
+ break;
+ default:
BUG();
+ }
ide_prep_sense(drive, rq);
@@ -280,7 +287,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
cmd.rq = rq;
- if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
+ if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd);
}
@@ -290,7 +297,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
return ide_floppy_issue_pc(drive, &cmd, pc);
out_end:
drive->failed_pc = NULL;
- if (blk_fs_request(rq) == 0 && rq->errors == 0)
+ if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
rq->errors = -EIO;
ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
return ide_stopped;
diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
index 9c2288234de..fd3d05ab341 100644
--- a/drivers/ide/ide-floppy_ioctl.c
+++ b/drivers/ide/ide-floppy_ioctl.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/ide.h>
#include <linux/cdrom.h>
+#include <linux/smp_lock.h>
#include <asm/unaligned.h>
@@ -275,12 +276,15 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
void __user *argp = (void __user *)arg;
int err;
- if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR)
- return ide_floppy_lockdoor(drive, &pc, arg, cmd);
+ lock_kernel();
+ if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) {
+ err = ide_floppy_lockdoor(drive, &pc, arg, cmd);
+ goto out;
+ }
err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp);
if (err != -ENOTTY)
- return err;
+ goto out;
/*
* skip SCSI_IOCTL_SEND_COMMAND (deprecated)
@@ -293,5 +297,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
if (err == -ENOTTY)
err = generic_ide_ioctl(drive, bdev, cmd, arg);
+out:
+ unlock_kernel();
return err;
}
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 79399534782..70aeeb18833 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -1,3 +1,4 @@
+#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -237,6 +238,18 @@ out_put_idkp:
return ret;
}
+static int ide_gd_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ide_gd_open(bdev, mode);
+ unlock_kernel();
+
+ return ret;
+}
+
+
static int ide_gd_release(struct gendisk *disk, fmode_t mode)
{
struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
@@ -244,6 +257,7 @@ static int ide_gd_release(struct gendisk *disk, fmode_t mode)
ide_debug_log(IDE_DBG_FUNC, "enter");
+ lock_kernel();
if (idkp->openers == 1)
drive->disk_ops->flush(drive);
@@ -255,6 +269,7 @@ static int ide_gd_release(struct gendisk *disk, fmode_t mode)
idkp->openers--;
ide_disk_put(idkp);
+ unlock_kernel();
return 0;
}
@@ -321,9 +336,9 @@ static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
static const struct block_device_operations ide_gd_ops = {
.owner = THIS_MODULE,
- .open = ide_gd_open,
+ .open = ide_gd_unlocked_open,
.release = ide_gd_release,
- .locked_ioctl = ide_gd_ioctl,
+ .ioctl = ide_gd_ioctl,
.getgeo = ide_gd_getgeo,
.media_changed = ide_gd_media_changed,
.unlock_native_capacity = ide_gd_unlock_native_capacity,
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 172ac921815..a381be81407 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(ide_complete_rq);
void ide_kill_rq(ide_drive_t *drive, struct request *rq)
{
- u8 drv_req = blk_special_request(rq) && rq->rq_disk;
+ u8 drv_req = (rq->cmd_type == REQ_TYPE_SPECIAL) && rq->rq_disk;
u8 media = drive->media;
drive->failed_pc = NULL;
@@ -145,7 +145,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
} else {
if (media == ide_tape)
rq->errors = IDE_DRV_ERROR_GENERAL;
- else if (blk_fs_request(rq) == 0 && rq->errors == 0)
+ else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
rq->errors = -EIO;
}
@@ -307,7 +307,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
{
ide_startstop_t startstop;
- BUG_ON(!blk_rq_started(rq));
+ BUG_ON(!(rq->cmd_flags & REQ_STARTED));
#ifdef DEBUG
printk("%s: start_request: current=0x%08lx\n",
@@ -353,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
pm->pm_step == IDE_PM_COMPLETED)
ide_complete_pm_rq(drive, rq);
return startstop;
- } else if (!rq->rq_disk && blk_special_request(rq))
+ } else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_SPECIAL)
/*
* TODO: Once all ULDs have been modified to
* check for specific op codes rather than
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 1c08311b0a0..92406097efe 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -191,10 +191,10 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
#ifdef DEBUG_PM
printk("%s: completing PM request, %s\n", drive->name,
- blk_pm_suspend_request(rq) ? "suspend" : "resume");
+ (rq->cmd_type == REQ_TYPE_PM_SUSPEND) ? "suspend" : "resume");
#endif
spin_lock_irqsave(q->queue_lock, flags);
- if (blk_pm_suspend_request(rq))
+ if (rq->cmd_type == REQ_TYPE_PM_SUSPEND)
blk_stop_queue(q);
else
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
@@ -210,11 +210,11 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
struct request_pm_state *pm = rq->special;
- if (blk_pm_suspend_request(rq) &&
+ if (rq->cmd_type == REQ_TYPE_PM_SUSPEND &&
pm->pm_step == IDE_PM_START_SUSPEND)
/* Mark drive blocked when starting the suspend sequence. */
drive->dev_flags |= IDE_DFLAG_BLOCKED;
- else if (blk_pm_resume_request(rq) &&
+ else if (rq->cmd_type == REQ_TYPE_PM_RESUME &&
pm->pm_step == IDE_PM_START_RESUME) {
/*
* The first thing we do on wakeup is to wait for BSY bit to
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 4c3d1bfec0c..068cef0a987 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1448,19 +1448,13 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
if (hwif == NULL)
continue;
- if (hwif->present)
- hwif_register_devices(hwif);
- }
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif == NULL)
- continue;
-
ide_sysfs_register_port(hwif);
ide_proc_register_port(hwif);
- if (hwif->present)
+ if (hwif->present) {
ide_proc_port_register_devices(hwif);
+ hwif_register_devices(hwif);
+ }
}
return j ? 0 : -1;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index b07232880ec..6d622cb5ac8 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -32,6 +32,7 @@
#include <linux/errno.h>
#include <linux/genhd.h>
#include <linux/seq_file.h>
+#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/ide.h>
@@ -577,7 +578,8 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
rq->cmd[0], (unsigned long long)blk_rq_pos(rq),
blk_rq_sectors(rq));
- BUG_ON(!(blk_special_request(rq) || blk_sense_request(rq)));
+ BUG_ON(!(rq->cmd_type == REQ_TYPE_SPECIAL ||
+ rq->cmd_type == REQ_TYPE_SENSE));
/* Retry a failed packet command */
if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) {
@@ -1905,7 +1907,11 @@ static const struct file_operations idetape_fops = {
static int idetape_open(struct block_device *bdev, fmode_t mode)
{
- struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk, false, 0);
+ struct ide_tape_obj *tape;
+
+ lock_kernel();
+ tape = ide_tape_get(bdev->bd_disk, false, 0);
+ unlock_kernel();
if (!tape)
return -ENXIO;
@@ -1917,7 +1923,10 @@ static int idetape_release(struct gendisk *disk, fmode_t mode)
{
struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj);
+ lock_kernel();
ide_tape_put(tape);
+ unlock_kernel();
+
return 0;
}
@@ -1926,9 +1935,14 @@ static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
{
struct ide_tape_obj *tape = ide_drv_g(bdev->bd_disk, ide_tape_obj);
ide_drive_t *drive = tape->drive;
- int err = generic_ide_ioctl(drive, bdev, cmd, arg);
+ int err;
+
+ lock_kernel();
+ err = generic_ide_ioctl(drive, bdev, cmd, arg);
if (err == -EINVAL)
err = idetape_blkdev_ioctl(drive, cmd, arg);
+ unlock_kernel();
+
return err;
}
@@ -1936,7 +1950,7 @@ static const struct block_device_operations idetape_block_ops = {
.owner = THIS_MODULE,
.open = idetape_open,
.release = idetape_release,
- .locked_ioctl = idetape_ioctl,
+ .ioctl = idetape_ioctl,
};
static int ide_tape_probe(ide_drive_t *drive)
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 67fb73559fd..34b9872f35d 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -480,13 +480,9 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
u16 nsect = 0;
char __user *buf = (char __user *)arg;
- req_task = kzalloc(tasksize, GFP_KERNEL);
- if (req_task == NULL)
- return -ENOMEM;
- if (copy_from_user(req_task, buf, tasksize)) {
- kfree(req_task);
- return -EFAULT;
- }
+ req_task = memdup_user(buf, tasksize);
+ if (IS_ERR(req_task))
+ return PTR_ERR(req_task);
taskout = req_task->out_size;
taskin = req_task->in_size;
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 3cb9c4e056f..fa896210ed7 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -177,7 +177,7 @@ EXPORT_SYMBOL_GPL(ide_pci_clk);
module_param_named(pci_clock, ide_pci_clk, int, 0);
MODULE_PARM_DESC(pci_clock, "PCI bus clock frequency (in MHz)");
-static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
+static int ide_set_dev_param_mask(const char *s, const struct kernel_param *kp)
{
int a, b, i, j = 1;
unsigned int *dev_param_mask = (unsigned int *)kp->arg;
@@ -200,34 +200,40 @@ static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
return 0;
}
+static struct kernel_param_ops param_ops_ide_dev_mask = {
+ .set = ide_set_dev_param_mask
+};
+
+#define param_check_ide_dev_mask(name, p) param_check_uint(name, p)
+
static unsigned int ide_nodma;
-module_param_call(nodma, ide_set_dev_param_mask, NULL, &ide_nodma, 0);
+module_param_named(nodma, ide_nodma, ide_dev_mask, 0);
MODULE_PARM_DESC(nodma, "disallow DMA for a device");
static unsigned int ide_noflush;
-module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0);
+module_param_named(noflush, ide_noflush, ide_dev_mask, 0);
MODULE_PARM_DESC(noflush, "disable flush requests for a device");
static unsigned int ide_nohpa;
-module_param_call(nohpa, ide_set_dev_param_mask, NULL, &ide_nohpa, 0);
+module_param_named(nohpa, ide_nohpa, ide_dev_mask, 0);
MODULE_PARM_DESC(nohpa, "disable Host Protected Area for a device");
static unsigned int ide_noprobe;
-module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0);
+module_param_named(noprobe, ide_noprobe, ide_dev_mask, 0);
MODULE_PARM_DESC(noprobe, "skip probing for a device");
static unsigned int ide_nowerr;
-module_param_call(nowerr, ide_set_dev_param_mask, NULL, &ide_nowerr, 0);
+module_param_named(nowerr, ide_nowerr, ide_dev_mask, 0);
MODULE_PARM_DESC(nowerr, "ignore the ATA_DF bit for a device");
static unsigned int ide_cdroms;
-module_param_call(cdrom, ide_set_dev_param_mask, NULL, &ide_cdroms, 0);
+module_param_named(cdrom, ide_cdroms, ide_dev_mask, 0);
MODULE_PARM_DESC(cdrom, "force device as a CD-ROM");
struct chs_geom {
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index 1d80f1fdbc9..7002765b593 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -64,7 +64,7 @@ static void tx4938ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
pair = ide_get_pair_dev(drive);
if (pair)
- safe = min(safe, pair->pio_mode - XFER_PIO_0);
+ safe = min_t(u8, safe, pair->pio_mode - XFER_PIO_0);
tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, safe);
}
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 3c736775187..bed3e39aac9 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -114,7 +114,7 @@ static void tx4939ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
pair = ide_get_pair_dev(drive);
if (pair)
- safe = min(safe, pair->pio_mode - XFER_PIO_0);
+ safe = min_t(u8, safe, pair->pio_mode - XFER_PIO_0);
/*
* Update Command Transfer Mode for master/slave and Data
* Transfer Mode for this drive.
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index 101f4002238..d2a0997b78f 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -79,7 +79,7 @@ static struct via_isa_bridge {
{ "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt6415", PCI_DEVICE_ID_VIA_6410, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST },
+ { "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST },
{ "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
index fb5c5186d4a..8489eb58a52 100644
--- a/drivers/idle/Kconfig
+++ b/drivers/idle/Kconfig
@@ -1,9 +1,8 @@
config INTEL_IDLE
- tristate "Cpuidle Driver for Intel Processors"
+ bool "Cpuidle Driver for Intel Processors"
depends on CPU_IDLE
depends on X86
depends on CPU_SUP_INTEL
- depends on EXPERIMENTAL
help
Enable intel_idle, a cpuidle driver that includes knowledge of
native Intel hardware idle features. The acpi_idle driver
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 03d202b1ff2..0906fc5b69b 100755..100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -77,15 +77,13 @@ static struct cpuidle_driver intel_idle_driver = {
};
/* intel_idle.max_cstate=0 disables driver */
static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
-static int power_policy = 7; /* 0 = max perf; 15 = max powersave */
-static unsigned int substates;
-static int (*choose_substate)(int);
+static unsigned int mwait_substates;
/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
static unsigned int lapic_timer_reliable_states;
-static struct cpuidle_device *intel_idle_cpuidle_devices;
+static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
static struct cpuidle_state *cpuidle_state_table;
@@ -110,7 +108,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
.name = "NHM-C3",
.desc = "MWAIT 0x10",
.driver_data = (void *) 0x10,
- .flags = CPUIDLE_FLAG_TIME_VALID,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 20,
.power_usage = 500,
.target_residency = 80,
@@ -119,7 +117,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
.name = "NHM-C6",
.desc = "MWAIT 0x20",
.driver_data = (void *) 0x20,
- .flags = CPUIDLE_FLAG_TIME_VALID,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.power_usage = 350,
.target_residency = 800,
@@ -151,7 +149,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.name = "ATM-C4",
.desc = "MWAIT 0x30",
.driver_data = (void *) 0x30,
- .flags = CPUIDLE_FLAG_TIME_VALID,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.power_usage = 250,
.target_residency = 400,
@@ -161,48 +159,13 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.name = "ATM-C6",
.desc = "MWAIT 0x40",
.driver_data = (void *) 0x40,
- .flags = CPUIDLE_FLAG_TIME_VALID,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.power_usage = 150,
.target_residency = 800,
.enter = NULL }, /* disabled */
};
-/*
- * choose_tunable_substate()
- *
- * Run-time decision on which C-state substate to invoke
- * If power_policy = 0, choose shallowest substate (0)
- * If power_policy = 15, choose deepest substate
- * If power_policy = middle, choose middle substate etc.
- */
-static int choose_tunable_substate(int cstate)
-{
- unsigned int num_substates;
- unsigned int substate_choice;
-
- power_policy &= 0xF; /* valid range: 0-15 */
- cstate &= 7; /* valid range: 0-7 */
-
- num_substates = (substates >> ((cstate) * 4)) & MWAIT_SUBSTATE_MASK;
-
- if (num_substates <= 1)
- return 0;
-
- substate_choice = ((power_policy + (power_policy + 1) *
- (num_substates - 1)) / 16);
-
- return substate_choice;
-}
-
-/*
- * choose_zero_substate()
- */
-static int choose_zero_substate(int cstate)
-{
- return 0;
-}
-
/**
* intel_idle
* @dev: cpuidle_device
@@ -220,10 +183,18 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
- eax = eax + (choose_substate)(cstate);
-
local_irq_disable();
+ /*
+ * If the state flag indicates that the TLB will be flushed or if this
+ * is the deepest c-state supported, do a voluntary leave mm to avoid
+ * costly and mostly unnecessary wakeups for flushing the user TLB's
+ * associated with the active mm.
+ */
+ if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED ||
+ (&dev->states[dev->state_count - 1] == state))
+ leave_mm(cpu);
+
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
@@ -259,7 +230,7 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
*/
static int intel_idle_probe(void)
{
- unsigned int eax, ebx, ecx, edx;
+ unsigned int eax, ebx, ecx;
if (max_cstate == 0) {
pr_debug(PREFIX "disabled\n");
@@ -275,17 +246,13 @@ static int intel_idle_probe(void)
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return -ENODEV;
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
return -ENODEV;
-#ifdef DEBUG
- if (substates == 0) /* can over-ride via modparam */
-#endif
- substates = edx;
- pr_debug(PREFIX "MWAIT substates: 0x%x\n", substates);
+ pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
lapic_timer_reliable_states = 0xFFFFFFFF;
@@ -299,18 +266,18 @@ static int intel_idle_probe(void)
case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */
case 0x1F: /* Core i7 and i5 Processor - Nehalem */
case 0x2E: /* Nehalem-EX Xeon */
+ case 0x2F: /* Westmere-EX Xeon */
lapic_timer_reliable_states = (1 << 1); /* C1 */
case 0x25: /* Westmere */
case 0x2C: /* Westmere */
cpuidle_state_table = nehalem_cstates;
- choose_substate = choose_tunable_substate;
break;
case 0x1C: /* 28 - Atom Processor */
+ case 0x26: /* 38 - Lincroft Atom Processor */
lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
cpuidle_state_table = atom_cstates;
- choose_substate = choose_zero_substate;
break;
#ifdef FUTURE_USE
case 0x17: /* 23 - Core 2 Duo */
@@ -376,7 +343,7 @@ static int intel_idle_cpuidle_devices_init(void)
}
/* does the state exist in CPUID.MWAIT? */
- num_substates = (substates >> ((cstate) * 4))
+ num_substates = (mwait_substates >> ((cstate) * 4))
& MWAIT_SUBSTATE_MASK;
if (num_substates == 0)
continue;
@@ -450,11 +417,7 @@ static void __exit intel_idle_exit(void)
module_init(intel_idle_init);
module_exit(intel_idle_exit);
-module_param(power_policy, int, 0644);
module_param(max_cstate, int, 0444);
-#ifdef DEBUG
-module_param(substates, int, 0444);
-#endif
MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION);
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index adaefabc40e..c5a031b79d0 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -172,7 +172,7 @@ static DEFINE_SPINLOCK(dv1394_cards_lock);
static inline struct video_card* file_to_video_card(struct file *file)
{
- return (struct video_card*) file->private_data;
+ return file->private_data;
}
/*** FRAME METHODS *********************************************************/
@@ -610,7 +610,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
} else {
u32 transmit_sec, transmit_cyc;
- u32 ts_cyc, ts_off;
+ u32 ts_cyc;
/* DMA is stopped, so this is the very first frame */
video->active_frame = this_frame;
@@ -636,7 +636,6 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
transmit_sec += transmit_cyc/8000;
transmit_cyc %= 8000;
- ts_off = ct_off;
ts_cyc = transmit_cyc + 3;
ts_cyc %= 8000;
@@ -1784,7 +1783,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
struct video_card *video = NULL;
if (file->private_data) {
- video = (struct video_card*) file->private_data;
+ video = file->private_data;
} else {
/* look up the card by ID */
@@ -2004,7 +2003,7 @@ static void ir_tasklet_func(unsigned long data)
int sof=0; /* start-of-frame flag */
struct frame *f;
- u16 packet_length, packet_time;
+ u16 packet_length;
int i, dbc=0;
struct DMA_descriptor_block *block = NULL;
u16 xferstatus;
@@ -2024,11 +2023,6 @@ static void ir_tasklet_func(unsigned long data)
sizeof(struct packet));
packet_length = le16_to_cpu(p->data_length);
- packet_time = le16_to_cpu(p->timestamp);
-
- irq_printk("received packet %02d, timestamp=%04x, length=%04x, sof=%02x%02x\n", video->current_packet,
- packet_time, packet_length,
- p->data[0], p->data[1]);
/* get the descriptor based on packet_buffer cursor */
f = video->frames[video->current_packet / MAX_PACKETS];
@@ -2320,7 +2314,6 @@ static void dv1394_add_host(struct hpsb_host *host)
static void dv1394_host_reset(struct hpsb_host *host)
{
- struct ti_ohci *ohci;
struct video_card *video = NULL, *tmp_vid;
unsigned long flags;
@@ -2328,9 +2321,6 @@ static void dv1394_host_reset(struct hpsb_host *host)
if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
return;
- ohci = (struct ti_ohci *)host->hostdata;
-
-
/* find the corresponding video_cards */
spin_lock_irqsave(&dv1394_cards_lock, flags);
list_for_each_entry(tmp_vid, &dv1394_cards, list) {
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index a4e9dcb6d4a..63403822330 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -58,7 +58,6 @@
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
-#include <linux/ethtool.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/unaligned.h>
@@ -173,8 +172,6 @@ static netdev_tx_t ether1394_tx(struct sk_buff *skb,
struct net_device *dev);
static void ether1394_iso(struct hpsb_iso *iso);
-static const struct ethtool_ops ethtool_ops;
-
static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
quadlet_t *data, u64 addr, size_t len, u16 flags);
static void ether1394_add_host(struct hpsb_host *host);
@@ -525,8 +522,6 @@ static void ether1394_init_dev(struct net_device *dev)
dev->header_ops = &ether1394_header_ops;
dev->netdev_ops = &ether1394_netdev_ops;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
-
dev->watchdog_timeo = ETHER1394_TIMEOUT;
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
dev->features = NETIF_F_HIGHDMA;
@@ -1258,7 +1253,6 @@ static void ether1394_iso(struct hpsb_iso *iso)
char *buf;
struct eth1394_host_info *hi;
struct net_device *dev;
- struct eth1394_priv *priv;
unsigned int len;
u32 specifier_id;
u16 source_id;
@@ -1288,8 +1282,6 @@ static void ether1394_iso(struct hpsb_iso *iso)
(be32_to_cpu(data[1]) & 0xff000000) >> 24;
source_id = be32_to_cpu(data[0]) >> 16;
- priv = netdev_priv(dev);
-
if (info->channel != (iso->host->csr.broadcast_channel & 0x3f)
|| specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
/* This packet is not for us */
@@ -1698,17 +1690,6 @@ fail:
return NETDEV_TX_OK;
}
-static void ether1394_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, driver_name);
- strcpy(info->bus_info, "ieee1394"); /* FIXME provide more detail? */
-}
-
-static const struct ethtool_ops ethtool_ops = {
- .get_drvinfo = ether1394_get_drvinfo
-};
-
static int __init ether1394_init_module(void)
{
int err;
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index d0dc1db80b2..50815022cff 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -1106,7 +1106,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
if (recv->block_irq_interval * 4 > iso->buf_packets)
recv->block_irq_interval = iso->buf_packets / 4;
if (recv->block_irq_interval < 1)
- recv->block_irq_interval = 1;
+ recv->block_irq_interval = 1;
/* choose a buffer stride */
/* must be a power of 2, and <= PAGE_SIZE */
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index b563d5e9fa2..f3401427404 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -440,7 +440,7 @@ static struct pending_request *next_complete_req(struct file_info *fi)
static ssize_t raw1394_read(struct file *file, char __user * buffer,
size_t count, loff_t * offset_is_ignored)
{
- struct file_info *fi = (struct file_info *)file->private_data;
+ struct file_info *fi = file->private_data;
struct pending_request *req;
ssize_t ret;
@@ -1015,7 +1015,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid,
struct arm_addr *arm_addr = NULL;
struct arm_request *arm_req = NULL;
struct arm_response *arm_resp = NULL;
- int found = 0, size = 0, rcode = -1, length_conflict = 0;
+ int found = 0, size = 0, rcode = -1;
struct arm_request_response *arm_req_resp = NULL;
DBGMSG("arm_write called by node: %X "
@@ -1054,7 +1054,6 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid,
}
if (arm_addr->rec_length < length) {
DBGMSG("arm_write blocklength too big -> rcode_data_error");
- length_conflict = 1;
rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */
}
if (rcode == -1) {
@@ -2245,7 +2244,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req)
static ssize_t raw1394_write(struct file *file, const char __user * buffer,
size_t count, loff_t * offset_is_ignored)
{
- struct file_info *fi = (struct file_info *)file->private_data;
+ struct file_info *fi = file->private_data;
struct pending_request *req;
ssize_t retval = -EBADFD;
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 4565cb5d3d1..d6e251a300c 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -1350,12 +1350,11 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
struct csr1212_keyval *kv;
struct csr1212_dentry *dentry;
u64 management_agent_addr;
- u32 unit_characteristics, firmware_revision, model;
+ u32 firmware_revision, model;
unsigned workarounds;
int i;
management_agent_addr = 0;
- unit_characteristics = 0;
firmware_revision = SBP2_ROM_VALUE_MISSING;
model = ud->flags & UNIT_DIRECTORY_MODEL_ID ?
ud->model_id : SBP2_ROM_VALUE_MISSING;
@@ -1372,17 +1371,15 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
lu->lun = ORB_SET_LUN(kv->value.immediate);
break;
- case SBP2_UNIT_CHARACTERISTICS_KEY:
- /* FIXME: This is ignored so far.
- * See SBP-2 clause 7.4.8. */
- unit_characteristics = kv->value.immediate;
- break;
case SBP2_FIRMWARE_REVISION_KEY:
firmware_revision = kv->value.immediate;
break;
default:
+ /* FIXME: Check for SBP2_UNIT_CHARACTERISTICS_KEY
+ * mgt_ORB_timeout and ORB_size, SBP-2 clause 7.4.8. */
+
/* FIXME: Check for SBP2_DEVICE_TYPE_AND_LUN_KEY.
* Its "ordered" bit has consequences for command ORB
* list handling. See SBP-2 clauses 4.6, 7.4.11, 10.2 */
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index a42bd6893bc..5c74f796d7f 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -720,7 +720,7 @@ static inline unsigned video1394_buffer_state(struct dma_iso_ctx *d,
static long video1394_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct file_ctx *ctx = (struct file_ctx *)file->private_data;
+ struct file_ctx *ctx = file->private_data;
struct ti_ohci *ohci = ctx->ohci;
unsigned long flags;
void __user *argp = (void __user *)arg;
@@ -1045,14 +1045,9 @@ static long video1394_ioctl(struct file *file,
if (get_user(qv, &p->packet_sizes))
return -EFAULT;
- psizes = kmalloc(buf_size, GFP_KERNEL);
- if (!psizes)
- return -ENOMEM;
-
- if (copy_from_user(psizes, qv, buf_size)) {
- kfree(psizes);
- return -EFAULT;
- }
+ psizes = memdup_user(qv, buf_size);
+ if (IS_ERR(psizes))
+ return PTR_ERR(psizes);
}
spin_lock_irqsave(&d->lock,flags);
@@ -1177,7 +1172,7 @@ static long video1394_ioctl(struct file *file,
static int video1394_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct file_ctx *ctx = (struct file_ctx *)file->private_data;
+ struct file_ctx *ctx = file->private_data;
if (ctx->current_ctx == NULL) {
PRINT(KERN_ERR, ctx->ohci->host->id,
@@ -1244,7 +1239,7 @@ static int video1394_open(struct inode *inode, struct file *file)
static int video1394_release(struct inode *inode, struct file *file)
{
- struct file_ctx *ctx = (struct file_ctx *)file->private_data;
+ struct file_ctx *ctx = file->private_data;
struct ti_ohci *ohci = ctx->ohci;
struct list_head *lh, *next;
u64 mask;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index ad63b79afac..64e0903091a 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -2409,10 +2409,12 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
msg_response = CM_MSG_RESPONSE_REP;
break;
case IB_CM_ESTABLISHED:
- cm_state = cm_id->state;
- lap_state = IB_CM_MRA_LAP_SENT;
- msg_response = CM_MSG_RESPONSE_OTHER;
- break;
+ if (cm_id->lap_state == IB_CM_LAP_RCVD) {
+ cm_state = cm_id->state;
+ lap_state = IB_CM_MRA_LAP_SENT;
+ msg_response = CM_MSG_RESPONSE_OTHER;
+ break;
+ }
default:
ret = -EINVAL;
goto error1;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 6babb72b39f..5fa85690951 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1085,7 +1085,6 @@ err_cdev:
static void ib_umad_kill_port(struct ib_umad_port *port)
{
struct ib_umad_file *file;
- int already_dead;
int id;
dev_set_drvdata(port->dev, NULL);
@@ -1103,7 +1102,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
list_for_each_entry(file, &port->file_list, port_list) {
mutex_lock(&file->mutex);
- already_dead = file->agents_dead;
file->agents_dead = 1;
mutex_unlock(&file->mutex);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index a7da9be43e6..e0fa2223871 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -310,8 +310,8 @@ EXPORT_SYMBOL(ib_create_qp);
static const struct {
int valid;
- enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETY + 1];
- enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETY + 1];
+ enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETHERTYPE + 1];
+ enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETHERTYPE + 1];
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 },
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 8f0caf7d448..78fbe9ffe7f 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
#define T3_MAX_PBL_SIZE 256
#define T3_MAX_RQ_SIZE 1024
#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 262144
+#define T3_MAX_CQ_DEPTH 65536
#define T3_MAX_NUM_STAG (1<<15)
#define T3_MAX_MR_SIZE 0x100000000ULL
#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index abd683ea326..13c88871dc3 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -137,7 +137,7 @@ static void stop_ep_timer(struct iwch_ep *ep)
put_ep(&ep->com);
}
-int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
+static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
{
int error = 0;
struct cxio_rdev *rdev;
@@ -463,7 +463,8 @@ static int send_connect(struct iwch_ep *ep)
V_MSS_IDX(mtu_idx) |
V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+ opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+ V_CONG_CONTROL_FLAVOR(cong_flavor);
skb->priority = CPL_PRIORITY_SETUP;
set_arp_failure_handler(skb, act_open_req_arp_failure);
@@ -1280,7 +1281,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
V_MSS_IDX(mtu_idx) |
V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+ opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+ V_CONG_CONTROL_FLAVOR(cong_flavor);
rpl = cplhdr(skb);
rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 9bbb65bba67..c64d27bf2c1 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -371,7 +371,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
qhp->wq.sq_size_log2);
- if (num_wrs <= 0) {
+ if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
err = -ENOMEM;
goto out;
@@ -554,7 +554,7 @@ int iwch_bind_mw(struct ib_qp *qp,
}
num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
qhp->wq.sq_size_log2);
- if ((num_wrs) <= 0) {
+ if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
return -ENOMEM;
}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 8c9b483a0d9..32d352a88d5 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -61,6 +61,10 @@ static char *states[] = {
NULL,
};
+static int dack_mode;
+module_param(dack_mode, int, 0644);
+MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
+
int c4iw_max_read_depth = 8;
module_param(c4iw_max_read_depth, int, 0644);
MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
@@ -469,11 +473,12 @@ static int send_connect(struct c4iw_ep *ep)
__func__);
return -ENOMEM;
}
- set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
opt0 = KEEP_ALIVE(1) |
+ DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
L2T_IDX(ep->l2t->idx) |
@@ -780,11 +785,11 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
}
- if (ep->com.cm_id) {
- PDBG("%s ep %p tid %u status %d\n", __func__, ep,
- ep->hwtid, status);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
+
+ PDBG("%s ep %p tid %u status %d\n", __func__, ep,
+ ep->hwtid, status);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+
if (status < 0) {
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
@@ -845,8 +850,10 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
INIT_TP_WR(req, ep->hwtid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
ep->hwtid));
- req->credit_dack = cpu_to_be32(credits);
- set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx);
+ req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
+ F_RX_DACK_CHANGE |
+ V_RX_DACK_MODE(dack_mode));
+ set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
c4iw_ofld_send(&ep->com.dev->rdev, skb);
return credits;
}
@@ -1264,6 +1271,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
opt0 = KEEP_ALIVE(1) |
+ DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
L2T_IDX(ep->l2t->idx) |
@@ -1287,7 +1295,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
ep->hwtid));
rpl->opt0 = cpu_to_be64(opt0);
rpl->opt2 = cpu_to_be32(opt2);
- set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
return;
@@ -1344,7 +1352,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
u16 rss_qid;
u32 mtu;
int step;
- int txq_idx;
+ int txq_idx, ctrlq_idx;
parent_ep = lookup_stid(t, stid);
PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
@@ -1376,6 +1384,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
txq_idx = cxgb4_port_idx(pdev) * step;
+ ctrlq_idx = cxgb4_port_idx(pdev);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
dev_put(pdev);
@@ -1387,6 +1396,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
+ ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(dst->neighbour->dev) * step];
@@ -1426,6 +1436,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
child_ep->rss_qid = rss_qid;
child_ep->mtu = mtu;
child_ep->txq_idx = txq_idx;
+ child_ep->ctrlq_idx = ctrlq_idx;
PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
tx_chan, smac_idx, rss_qid);
@@ -1473,8 +1484,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
int closing = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(hdr);
- int start_timer = 0;
- int stop_timer = 0;
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1511,7 +1520,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
wake_up(&ep->com.waitq);
break;
case FPDU_MODE:
- start_timer = 1;
+ start_ep_timer(ep);
__state_set(&ep->com, CLOSING);
closing = 1;
peer_close_upcall(ep);
@@ -1524,7 +1533,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
disconnect = 0;
break;
case MORIBUND:
- stop_timer = 1;
+ stop_ep_timer(ep);
if (ep->com.cm_id && ep->com.qp) {
attrs.next_state = C4IW_QP_STATE_IDLE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
@@ -1547,10 +1556,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
}
- if (start_timer)
- start_ep_timer(ep);
- if (stop_timer)
- stop_ep_timer(ep);
if (disconnect)
c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
if (release)
@@ -1579,7 +1584,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned long flags;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(req);
- int stop_timer = 0;
ep = lookup_tid(t, tid);
if (is_neg_adv_abort(req->status)) {
@@ -1594,10 +1598,10 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
case CONNECTING:
break;
case MPA_REQ_WAIT:
- stop_timer = 1;
+ stop_ep_timer(ep);
break;
case MPA_REQ_SENT:
- stop_timer = 1;
+ stop_ep_timer(ep);
connect_reply_upcall(ep, -ECONNRESET);
break;
case MPA_REP_SENT:
@@ -1621,7 +1625,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
break;
case MORIBUND:
case CLOSING:
- stop_timer = 1;
+ stop_ep_timer(ep);
/*FALLTHROUGH*/
case FPDU_MODE:
if (ep->com.cm_id && ep->com.qp) {
@@ -1667,8 +1671,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
rpl->cmd = CPL_ABORT_NO_RST;
c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
out:
- if (stop_timer)
- stop_ep_timer(ep);
if (release)
release_ep_resources(ep);
return 0;
@@ -1683,7 +1685,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
int release = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(rpl);
- int stop_timer = 0;
ep = lookup_tid(t, tid);
@@ -1697,7 +1698,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
__state_set(&ep->com, MORIBUND);
break;
case MORIBUND:
- stop_timer = 1;
+ stop_ep_timer(ep);
if ((ep->com.cm_id) && (ep->com.qp)) {
attrs.next_state = C4IW_QP_STATE_IDLE;
c4iw_modify_qp(ep->com.qp->rhp,
@@ -1717,8 +1718,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
break;
}
spin_unlock_irqrestore(&ep->com.lock, flags);
- if (stop_timer)
- stop_ep_timer(ep);
if (release)
release_ep_resources(ep);
return 0;
@@ -1957,6 +1956,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->txq_idx = cxgb4_port_idx(pdev) * step;
step = ep->com.dev->rdev.lldi.nrxq /
ep->com.dev->rdev.lldi.nchan;
+ ep->ctrlq_idx = cxgb4_port_idx(pdev);
ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(pdev) * step];
dev_put(pdev);
@@ -1971,6 +1971,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
step = ep->com.dev->rdev.lldi.ntxq /
ep->com.dev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
+ ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev);
step = ep->com.dev->rdev.lldi.nrxq /
ep->com.dev->rdev.lldi.nchan;
ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
@@ -2049,8 +2050,15 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto fail3;
/* wait for pass_open_rpl */
- wait_event(ep->com.waitq, ep->com.rpl_done);
- err = ep->com.rpl_err;
+ wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
+ if (ep->com.rpl_done)
+ err = ep->com.rpl_err;
+ else {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(ep->com.dev->rdev.lldi.pdev));
+ ep->com.dev->rdev.flags = T4_FATAL_ERROR;
+ err = -EIO;
+ }
if (!err) {
cm_id->provider_data = ep;
goto out;
@@ -2079,10 +2087,17 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
err = listen_stop(ep);
if (err)
goto done;
- wait_event(ep->com.waitq, ep->com.rpl_done);
+ wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
+ if (ep->com.rpl_done)
+ err = ep->com.rpl_err;
+ else {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(ep->com.dev->rdev.lldi.pdev));
+ ep->com.dev->rdev.flags = T4_FATAL_ERROR;
+ err = -EIO;
+ }
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
done:
- err = ep->com.rpl_err;
cm_id->rem_ref(cm_id);
c4iw_put_ep(&ep->com);
return err;
@@ -2095,8 +2110,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
int close = 0;
int fatal = 0;
struct c4iw_rdev *rdev;
- int start_timer = 0;
- int stop_timer = 0;
spin_lock_irqsave(&ep->com.lock, flags);
@@ -2120,7 +2133,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
ep->com.state = ABORTING;
else {
ep->com.state = CLOSING;
- start_timer = 1;
+ start_ep_timer(ep);
}
set_bit(CLOSE_SENT, &ep->com.flags);
break;
@@ -2128,7 +2141,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
close = 1;
if (abrupt) {
- stop_timer = 1;
+ stop_ep_timer(ep);
ep->com.state = ABORTING;
} else
ep->com.state = MORIBUND;
@@ -2146,10 +2159,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
}
spin_unlock_irqrestore(&ep->com.lock, flags);
- if (start_timer)
- start_ep_timer(ep);
- if (stop_timer)
- stop_ep_timer(ep);
if (close) {
if (abrupt)
ret = abort_connection(ep, NULL, gfp);
@@ -2244,7 +2253,7 @@ static void process_work(struct work_struct *work)
{
struct sk_buff *skb = NULL;
struct c4iw_dev *dev;
- struct cpl_act_establish *rpl = cplhdr(skb);
+ struct cpl_act_establish *rpl;
unsigned int opcode;
int ret;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index fac5c6e6801..b3daf39eed4 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -43,7 +43,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
int ret;
wr_len = sizeof *res_wr + sizeof *res;
- skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
@@ -118,7 +118,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
/* build fw_ri_res_wr */
wr_len = sizeof *res_wr + sizeof *res;
- skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto err4;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index d870f9c17c1..9bbf491d5d9 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -250,12 +250,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
rdev->cqmask = rdev->lldi.ucq_density - 1;
PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
- "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x\n",
+ "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
+ "qp qid start %u size %u cq qid start %u size %u\n",
__func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
rdev->lldi.vr->pbl.start,
rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
- rdev->lldi.vr->rq.size);
+ rdev->lldi.vr->rq.size,
+ rdev->lldi.vr->qp.start,
+ rdev->lldi.vr->qp.size,
+ rdev->lldi.vr->cq.start,
+ rdev->lldi.vr->cq.size);
PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
"qpmask 0x%x cqshift %lu cqmask 0x%x\n",
(unsigned)pci_resource_len(rdev->lldi.pdev, 2),
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index d33e1a66881..ed459b8f800 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -619,6 +619,7 @@ struct c4iw_ep {
u16 plen;
u16 rss_qid;
u16 txq_idx;
+ u16 ctrlq_idx;
u8 tos;
};
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 82b5703b894..269373a62f2 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -59,7 +59,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
wr_len = roundup(sizeof *req + sizeof *sc +
roundup(copy_len, T4_ULPTX_MIN_IO), 16);
- skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 86b93f2ecca..93f6e5bf0ec 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -130,7 +130,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
/* build fw_ri_res_wr */
wr_len = sizeof *res_wr + 2 * sizeof *res;
- skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto err7;
@@ -162,7 +162,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
V_FW_RI_RES_WR_DCAEN(0) |
V_FW_RI_RES_WR_DCACPU(0) |
- V_FW_RI_RES_WR_FBMIN(3) |
+ V_FW_RI_RES_WR_FBMIN(2) |
V_FW_RI_RES_WR_FBMAX(3) |
V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
V_FW_RI_RES_WR_CIDXFTHRESH(0) |
@@ -185,7 +185,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
V_FW_RI_RES_WR_DCAEN(0) |
V_FW_RI_RES_WR_DCACPU(0) |
- V_FW_RI_RES_WR_FBMIN(3) |
+ V_FW_RI_RES_WR_FBMIN(2) |
V_FW_RI_RES_WR_FBMAX(3) |
V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
V_FW_RI_RES_WR_CIDXFTHRESH(0) |
@@ -235,12 +235,78 @@ err1:
return -ENOMEM;
}
-static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
+ struct ib_send_wr *wr, int max, u32 *plenp)
{
+ u8 *dstp, *srcp;
+ u32 plen = 0;
int i;
+ int rem, len;
+
+ dstp = (u8 *)immdp->data;
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) > max)
+ return -EMSGSIZE;
+ srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
+ plen += wr->sg_list[i].length;
+ rem = wr->sg_list[i].length;
+ while (rem) {
+ if (dstp == (u8 *)&sq->queue[sq->size])
+ dstp = (u8 *)sq->queue;
+ if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
+ len = rem;
+ else
+ len = (u8 *)&sq->queue[sq->size] - dstp;
+ memcpy(dstp, srcp, len);
+ dstp += len;
+ srcp += len;
+ rem -= len;
+ }
+ }
+ immdp->op = FW_RI_DATA_IMMD;
+ immdp->r1 = 0;
+ immdp->r2 = 0;
+ immdp->immdlen = cpu_to_be32(plen);
+ *plenp = plen;
+ return 0;
+}
+
+static int build_isgl(__be64 *queue_start, __be64 *queue_end,
+ struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
+ int num_sge, u32 *plenp)
+
+{
+ int i;
+ u32 plen = 0;
+ __be64 *flitp = (__be64 *)isglp->sge;
+
+ for (i = 0; i < num_sge; i++) {
+ if ((plen + sg_list[i].length) < plen)
+ return -EMSGSIZE;
+ plen += sg_list[i].length;
+ *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
+ sg_list[i].length);
+ if (++flitp == queue_end)
+ flitp = queue_start;
+ *flitp = cpu_to_be64(sg_list[i].addr);
+ if (++flitp == queue_end)
+ flitp = queue_start;
+ }
+ isglp->op = FW_RI_DATA_ISGL;
+ isglp->r1 = 0;
+ isglp->nsge = cpu_to_be16(num_sge);
+ isglp->r2 = 0;
+ if (plenp)
+ *plenp = plen;
+ return 0;
+}
+
+static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
+ struct ib_send_wr *wr, u8 *len16)
+{
u32 plen;
int size;
- u8 *datap;
+ int ret;
if (wr->num_sge > T4_MAX_SEND_SGE)
return -EINVAL;
@@ -267,43 +333,23 @@ static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
default:
return -EINVAL;
}
+
plen = 0;
if (wr->num_sge) {
if (wr->send_flags & IB_SEND_INLINE) {
- datap = (u8 *)wqe->send.u.immd_src[0].data;
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) >
- T4_MAX_SEND_INLINE) {
- return -EMSGSIZE;
- }
- plen += wr->sg_list[i].length;
- memcpy(datap,
- (void *)(unsigned long)wr->sg_list[i].addr,
- wr->sg_list[i].length);
- datap += wr->sg_list[i].length;
- }
- wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
- wqe->send.u.immd_src[0].r1 = 0;
- wqe->send.u.immd_src[0].r2 = 0;
- wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
+ ret = build_immd(sq, wqe->send.u.immd_src, wr,
+ T4_MAX_SEND_INLINE, &plen);
+ if (ret)
+ return ret;
size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
plen;
} else {
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) < plen)
- return -EMSGSIZE;
- plen += wr->sg_list[i].length;
- wqe->send.u.isgl_src[0].sge[i].stag =
- cpu_to_be32(wr->sg_list[i].lkey);
- wqe->send.u.isgl_src[0].sge[i].len =
- cpu_to_be32(wr->sg_list[i].length);
- wqe->send.u.isgl_src[0].sge[i].to =
- cpu_to_be64(wr->sg_list[i].addr);
- }
- wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
- wqe->send.u.isgl_src[0].r1 = 0;
- wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
- wqe->send.u.isgl_src[0].r2 = 0;
+ ret = build_isgl((__be64 *)sq->queue,
+ (__be64 *)&sq->queue[sq->size],
+ wqe->send.u.isgl_src,
+ wr->sg_list, wr->num_sge, &plen);
+ if (ret)
+ return ret;
size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
wr->num_sge * sizeof(struct fw_ri_sge);
}
@@ -313,62 +359,40 @@ static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
wqe->send.u.immd_src[0].r2 = 0;
wqe->send.u.immd_src[0].immdlen = 0;
size = sizeof wqe->send + sizeof(struct fw_ri_immd);
+ plen = 0;
}
*len16 = DIV_ROUND_UP(size, 16);
wqe->send.plen = cpu_to_be32(plen);
return 0;
}
-static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
+ struct ib_send_wr *wr, u8 *len16)
{
- int i;
u32 plen;
int size;
- u8 *datap;
+ int ret;
- if (wr->num_sge > T4_MAX_WRITE_SGE)
+ if (wr->num_sge > T4_MAX_SEND_SGE)
return -EINVAL;
wqe->write.r2 = 0;
wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
- plen = 0;
if (wr->num_sge) {
if (wr->send_flags & IB_SEND_INLINE) {
- datap = (u8 *)wqe->write.u.immd_src[0].data;
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) >
- T4_MAX_WRITE_INLINE) {
- return -EMSGSIZE;
- }
- plen += wr->sg_list[i].length;
- memcpy(datap,
- (void *)(unsigned long)wr->sg_list[i].addr,
- wr->sg_list[i].length);
- datap += wr->sg_list[i].length;
- }
- wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
- wqe->write.u.immd_src[0].r1 = 0;
- wqe->write.u.immd_src[0].r2 = 0;
- wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
+ ret = build_immd(sq, wqe->write.u.immd_src, wr,
+ T4_MAX_WRITE_INLINE, &plen);
+ if (ret)
+ return ret;
size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
plen;
} else {
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) < plen)
- return -EMSGSIZE;
- plen += wr->sg_list[i].length;
- wqe->write.u.isgl_src[0].sge[i].stag =
- cpu_to_be32(wr->sg_list[i].lkey);
- wqe->write.u.isgl_src[0].sge[i].len =
- cpu_to_be32(wr->sg_list[i].length);
- wqe->write.u.isgl_src[0].sge[i].to =
- cpu_to_be64(wr->sg_list[i].addr);
- }
- wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
- wqe->write.u.isgl_src[0].r1 = 0;
- wqe->write.u.isgl_src[0].nsge =
- cpu_to_be16(wr->num_sge);
- wqe->write.u.isgl_src[0].r2 = 0;
+ ret = build_isgl((__be64 *)sq->queue,
+ (__be64 *)&sq->queue[sq->size],
+ wqe->write.u.isgl_src,
+ wr->sg_list, wr->num_sge, &plen);
+ if (ret)
+ return ret;
size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
wr->num_sge * sizeof(struct fw_ri_sge);
}
@@ -378,6 +402,7 @@ static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
wqe->write.u.immd_src[0].r2 = 0;
wqe->write.u.immd_src[0].immdlen = 0;
size = sizeof wqe->write + sizeof(struct fw_ri_immd);
+ plen = 0;
}
*len16 = DIV_ROUND_UP(size, 16);
wqe->write.plen = cpu_to_be32(plen);
@@ -416,29 +441,13 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
struct ib_recv_wr *wr, u8 *len16)
{
- int i;
- int plen = 0;
+ int ret;
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) < plen)
- return -EMSGSIZE;
- plen += wr->sg_list[i].length;
- wqe->recv.isgl.sge[i].stag =
- cpu_to_be32(wr->sg_list[i].lkey);
- wqe->recv.isgl.sge[i].len =
- cpu_to_be32(wr->sg_list[i].length);
- wqe->recv.isgl.sge[i].to =
- cpu_to_be64(wr->sg_list[i].addr);
- }
- for (; i < T4_MAX_RECV_SGE; i++) {
- wqe->recv.isgl.sge[i].stag = 0;
- wqe->recv.isgl.sge[i].len = 0;
- wqe->recv.isgl.sge[i].to = 0;
- }
- wqe->recv.isgl.op = FW_RI_DATA_ISGL;
- wqe->recv.isgl.r1 = 0;
- wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
- wqe->recv.isgl.r2 = 0;
+ ret = build_isgl((__be64 *)qhp->wq.rq.queue,
+ (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
+ &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
+ if (ret)
+ return ret;
*len16 = DIV_ROUND_UP(sizeof wqe->recv +
wr->num_sge * sizeof(struct fw_ri_sge), 16);
return 0;
@@ -547,7 +556,9 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
break;
}
- wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
+ wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
+ qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
+
fw_flags = 0;
if (wr->send_flags & IB_SEND_SOLICITED)
fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
@@ -564,12 +575,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->opcode = FW_RI_SEND;
else
swsqe->opcode = FW_RI_SEND_WITH_INV;
- err = build_rdma_send(wqe, wr, &len16);
+ err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
break;
case IB_WR_RDMA_WRITE:
fw_opcode = FW_RI_RDMA_WRITE_WR;
swsqe->opcode = FW_RI_RDMA_WRITE;
- err = build_rdma_write(wqe, wr, &len16);
+ err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
break;
case IB_WR_RDMA_READ:
case IB_WR_RDMA_READ_WITH_INV:
@@ -619,8 +630,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->opcode, swsqe->read_len);
wr = wr->next;
num_wrs--;
- t4_sq_produce(&qhp->wq);
- idx++;
+ t4_sq_produce(&qhp->wq, len16);
+ idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
}
if (t4_wq_db_enabled(&qhp->wq))
t4_ring_sq_db(&qhp->wq, idx);
@@ -656,7 +667,9 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
*bad_wr = wr;
break;
}
- wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
+ wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
+ qhp->wq.rq.wq_pidx *
+ T4_EQ_ENTRY_SIZE);
if (num_wrs)
err = build_rdma_recv(qhp, wqe, wr, &len16);
else
@@ -675,15 +688,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wqe->recv.r2[1] = 0;
wqe->recv.r2[2] = 0;
wqe->recv.len16 = len16;
- if (len16 < 5)
- wqe->flits[8] = 0;
-
PDBG("%s cookie 0x%llx pidx %u\n", __func__,
(unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
- t4_rq_produce(&qhp->wq);
+ t4_rq_produce(&qhp->wq, len16);
+ idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
wr = wr->next;
num_wrs--;
- idx++;
}
if (t4_wq_db_enabled(&qhp->wq))
t4_ring_rq_db(&qhp->wq, idx);
@@ -951,7 +961,8 @@ static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
__flush_qp(qhp, rchp, schp, flag);
}
-static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
+static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
+ struct c4iw_ep *ep)
{
struct fw_ri_wr *wqe;
int ret;
@@ -959,12 +970,12 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
struct sk_buff *skb;
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
- qhp->ep->hwtid);
+ ep->hwtid);
- skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
if (!skb)
return -ENOMEM;
- set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof *wqe);
@@ -972,7 +983,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
FW_WR_OP(FW_RI_INIT_WR) |
FW_WR_COMPL(1));
wqe->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(qhp->ep->hwtid) |
+ FW_WR_FLOWID(ep->hwtid) |
FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
wqe->cookie = (u64)&wr_wait;
@@ -1035,7 +1046,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
qhp->ep->hwtid);
- skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
@@ -1202,17 +1213,16 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
case C4IW_QP_STATE_CLOSING:
BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
qhp->attr.state = C4IW_QP_STATE_CLOSING;
+ ep = qhp->ep;
if (!internal) {
abort = 0;
disconnect = 1;
- ep = qhp->ep;
c4iw_get_ep(&ep->com);
}
spin_unlock_irqrestore(&qhp->lock, flag);
- ret = rdma_fini(rhp, qhp);
+ ret = rdma_fini(rhp, qhp, ep);
spin_lock_irqsave(&qhp->lock, flag);
if (ret) {
- ep = qhp->ep;
c4iw_get_ep(&ep->com);
disconnect = abort = 1;
goto err;
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index fb195d1d901..83b23dfa250 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -110,11 +110,12 @@ static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
spin_lock_init(&rdev->resource.qid_fifo_lock);
- if (kfifo_alloc(&rdev->resource.qid_fifo, T4_MAX_QIDS * sizeof(u32),
- GFP_KERNEL))
+ if (kfifo_alloc(&rdev->resource.qid_fifo, rdev->lldi.vr->qp.size *
+ sizeof(u32), GFP_KERNEL))
return -ENOMEM;
- for (i = T4_QID_BASE; i < T4_QID_BASE + T4_MAX_QIDS; i++)
+ for (i = rdev->lldi.vr->qp.start;
+ i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
if (!(i & rdev->qpmask))
kfifo_in(&rdev->resource.qid_fifo,
(unsigned char *) &i, sizeof(u32));
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 9cf8d85bfcf..24f369046ef 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -36,8 +36,6 @@
#include "t4_msg.h"
#include "t4fw_ri_api.h"
-#define T4_QID_BASE 1024
-#define T4_MAX_QIDS 256
#define T4_MAX_NUM_QP (1<<16)
#define T4_MAX_NUM_CQ (1<<15)
#define T4_MAX_NUM_PD (1<<15)
@@ -65,10 +63,10 @@ struct t4_status_page {
u8 db_off;
};
-#define T4_EQ_SIZE 64
+#define T4_EQ_ENTRY_SIZE 64
#define T4_SQ_NUM_SLOTS 4
-#define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS)
+#define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
@@ -84,7 +82,7 @@ struct t4_status_page {
#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
#define T4_RQ_NUM_SLOTS 2
-#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
+#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
#define T4_MAX_RECV_SGE 4
union t4_wr {
@@ -97,20 +95,18 @@ union t4_wr {
struct fw_ri_fr_nsmr_wr fr;
struct fw_ri_inv_lstag_wr inv;
struct t4_status_page status;
- __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
+ __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
};
union t4_recv_wr {
struct fw_ri_recv_wr recv;
struct t4_status_page status;
- __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
+ __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
};
static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
enum fw_wr_opcodes opcode, u8 flags, u8 len16)
{
- int slots_used;
-
wqe->send.opcode = (u8)opcode;
wqe->send.flags = flags;
wqe->send.wrid = wrid;
@@ -118,12 +114,6 @@ static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
wqe->send.r1[1] = 0;
wqe->send.r1[2] = 0;
wqe->send.len16 = len16;
-
- slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE);
- while (slots_used < T4_SQ_NUM_SLOTS) {
- wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0;
- slots_used++;
- }
}
/* CQE/AE status codes */
@@ -289,6 +279,7 @@ struct t4_sq {
u16 size;
u16 cidx;
u16 pidx;
+ u16 wq_pidx;
};
struct t4_swrqe {
@@ -310,6 +301,7 @@ struct t4_rq {
u16 size;
u16 cidx;
u16 pidx;
+ u16 wq_pidx;
};
struct t4_wq {
@@ -340,11 +332,14 @@ static inline u32 t4_rq_avail(struct t4_wq *wq)
return wq->rq.size - 1 - wq->rq.in_use;
}
-static inline void t4_rq_produce(struct t4_wq *wq)
+static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
{
wq->rq.in_use++;
if (++wq->rq.pidx == wq->rq.size)
wq->rq.pidx = 0;
+ wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
+ if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
+ wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
}
static inline void t4_rq_consume(struct t4_wq *wq)
@@ -370,11 +365,14 @@ static inline u32 t4_sq_avail(struct t4_wq *wq)
return wq->sq.size - 1 - wq->sq.in_use;
}
-static inline void t4_sq_produce(struct t4_wq *wq)
+static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
{
wq->sq.in_use++;
if (++wq->sq.pidx == wq->sq.size)
wq->sq.pidx = 0;
+ wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
+ if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
+ wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
}
static inline void t4_sq_consume(struct t4_wq *wq)
@@ -386,14 +384,12 @@ static inline void t4_sq_consume(struct t4_wq *wq)
static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
{
- inc *= T4_SQ_NUM_SLOTS;
wmb();
writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
}
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
{
- inc *= T4_RQ_NUM_SLOTS;
wmb();
writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
}
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index fc706bd07fa..dc193c29267 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -826,4 +826,14 @@ struct ulptx_idata {
#define S_ULPTX_NSGE 0
#define M_ULPTX_NSGE 0xFFFF
#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+
+#define S_RX_DACK_MODE 29
+#define M_RX_DACK_MODE 0x3
+#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
+#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
+
+#define S_RX_DACK_CHANGE 31
+#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
+#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
+
#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 0136abd50dd..aaf6023a483 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -112,7 +112,7 @@ struct ehca_sport {
struct ehca_shca {
struct ib_device ib_device;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
u8 num_ports;
int hw_level;
struct list_head shca_list;
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
index 3b87589b8ea..d9b1bb40f48 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -122,21 +122,21 @@ int ehca_create_eq(struct ehca_shca *shca,
/* register interrupt handlers and initialize work queues */
if (type == EHCA_EQ) {
+ tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
+
ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
IRQF_DISABLED, "ehca_eq",
(void *)shca);
if (ret < 0)
ehca_err(ib_dev, "Can't map interrupt handler.");
-
- tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
} else if (type == EHCA_NEQ) {
+ tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
+
ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
IRQF_DISABLED, "ehca_neq",
(void *)shca);
if (ret < 0)
ehca_err(ib_dev, "Can't map interrupt handler.");
-
- tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
}
eq->is_initialized = 1;
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index ecb51b396c4..c240e9972cb 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -360,7 +360,8 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
* a firmware property, so it's valid across all adapters
*/
if (ehca_lock_hcalls == -1)
- ehca_lock_hcalls = !(shca->hca_cap & HCA_CAP_H_ALLOC_RES_SYNC);
+ ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC,
+ shca->hca_cap);
/* translate supported MR page sizes; always support 4K */
shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
@@ -712,7 +713,7 @@ static struct attribute_group ehca_dev_attr_grp = {
.attrs = ehca_dev_attrs
};
-static int __devinit ehca_probe(struct of_device *dev,
+static int __devinit ehca_probe(struct platform_device *dev,
const struct of_device_id *id)
{
struct ehca_shca *shca;
@@ -878,7 +879,7 @@ probe1:
return -EINVAL;
}
-static int __devexit ehca_remove(struct of_device *dev)
+static int __devexit ehca_remove(struct platform_device *dev)
{
struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
unsigned long flags;
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 31a68b9c52d..53f4cd4fc19 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -933,11 +933,6 @@ int ehca_unmap_fmr(struct list_head *fmr_list)
/* check all FMR belong to same SHCA, and check internal flag */
list_for_each_entry(ib_fmr, fmr_list, list) {
prev_shca = shca;
- if (!ib_fmr) {
- ehca_gen_err("bad fmr=%p in list", ib_fmr);
- ret = -EINVAL;
- goto unmap_fmr_exit0;
- }
shca = container_of(ib_fmr->device, struct ehca_shca,
ib_device);
e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 47d388ec1cd..32fb34201ab 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -251,7 +251,7 @@ static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
return ST_UD;
case IB_QPT_RAW_IPV6:
return -EINVAL;
- case IB_QPT_RAW_ETY:
+ case IB_QPT_RAW_ETHERTYPE:
return -EINVAL;
default:
ehca_gen_err("Invalid ibqptype=%x", ibqptype);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 4d5dc3304d4..e6f9cdd94c7 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -269,6 +269,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
struct ehca_cq *cq,
struct ehca_alloc_cq_parms *param)
{
+ int rc;
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
@@ -283,8 +284,19 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
param->act_nr_of_entries = (u32)outs[3];
param->act_pages = (u32)outs[4];
- if (ret == H_SUCCESS)
- hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
+ if (ret == H_SUCCESS) {
+ rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
+ if (rc) {
+ ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
+ rc, outs[5]);
+
+ ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+ adapter_handle.handle, /* r4 */
+ cq->ipz_cq_handle.handle, /* r5 */
+ 0, 0, 0, 0, 0);
+ ret = H_NO_MEM;
+ }
+ }
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
@@ -295,6 +307,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_alloc_qp_parms *parms, int is_user)
{
+ int rc;
u64 ret;
u64 allocate_controls, max_r10_reg, r11, r12;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
@@ -358,8 +371,19 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
parms->rqueue.queue_size =
(u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
- if (ret == H_SUCCESS)
- hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
+ if (ret == H_SUCCESS) {
+ rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
+ if (rc) {
+ ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
+ rc, outs[6]);
+
+ ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+ adapter_handle.handle, /* r4 */
+ parms->qp_handle.handle, /* r5 */
+ 0, 0, 0, 0, 0);
+ ret = H_NO_MEM;
+ }
+ }
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c
index b3e0e72e8a7..077376ff3d2 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.c
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.c
@@ -42,10 +42,9 @@
#include "ehca_classes.h"
#include "hipz_hw.h"
-int hcall_map_page(u64 physaddr, u64 *mapaddr)
+u64 hcall_map_page(u64 physaddr)
{
- *mapaddr = (u64)(ioremap(physaddr, EHCA_PAGESIZE));
- return 0;
+ return (u64)ioremap(physaddr, EHCA_PAGESIZE);
}
int hcall_unmap_page(u64 mapaddr)
@@ -58,9 +57,9 @@ int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
u64 paddr_kernel, u64 paddr_user)
{
if (!is_user) {
- int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle);
- if (ret)
- return ret;
+ galpas->kernel.fw_handle = hcall_map_page(paddr_kernel);
+ if (!galpas->kernel.fw_handle)
+ return -ENOMEM;
} else
galpas->kernel.fw_handle = 0;
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h
index 204227d5303..d1b02991024 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.h
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.h
@@ -83,7 +83,7 @@ int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
int hcp_galpas_dtor(struct h_galpas *galpas);
-int hcall_map_page(u64 physaddr, u64 * mapaddr);
+u64 hcall_map_page(u64 physaddr);
int hcall_unmap_page(u64 mapaddr);
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 21337468c65..765f0fc1da7 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -390,6 +390,8 @@ done:
ipath_enable_armlaunch(dd);
}
+static void cleanup_device(struct ipath_devdata *dd);
+
static int __devinit ipath_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -616,8 +618,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
goto bail;
bail_irqsetup:
- if (pdev->irq)
- free_irq(pdev->irq, dd);
+ cleanup_device(dd);
+
+ if (dd->ipath_irq)
+ dd->ipath_f_free_irq(dd);
+
+ if (dd->ipath_f_cleanup)
+ dd->ipath_f_cleanup(dd);
bail_iounmap:
iounmap((volatile void __iomem *) dd->ipath_kregbase);
@@ -635,7 +642,7 @@ bail:
return ret;
}
-static void __devexit cleanup_device(struct ipath_devdata *dd)
+static void cleanup_device(struct ipath_devdata *dd)
{
int port;
struct ipath_portdata **tmp;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 3603ae89b60..f4ceecd9684 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1817,7 +1817,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
case IB_QPT_RAW_IPV6:
op_mod = 2;
break;
- case IB_QPT_RAW_ETY:
+ case IB_QPT_RAW_ETHERTYPE:
op_mod = 3;
break;
default:
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index de7b9d7166f..0c9f0aa5d4e 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -110,8 +110,8 @@ static unsigned int sysfs_nonidx_addr;
static unsigned int sysfs_idx_addr;
static struct pci_device_id nes_pci_table[] = {
- {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID},
- {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR, PCI_ANY_ID, PCI_ANY_ID},
+ { PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020), },
+ { PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR), },
{0}
};
@@ -259,13 +259,11 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
unsigned long flags;
struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
struct nes_adapter *nesadapter = nesdev->nesadapter;
- u32 qp_id;
atomic_inc(&qps_destroyed);
/* Free the control structures */
- qp_id = nesqp->hwqp.qp_id;
if (nesqp->pbl_vbase) {
pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
@@ -441,7 +439,6 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
struct net_device *netdev = NULL;
struct nes_device *nesdev = NULL;
int ret = 0;
- struct nes_vnic *nesvnic = NULL;
void __iomem *mmio_regs = NULL;
u8 hw_rev;
@@ -664,25 +661,21 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
nes_notifiers_registered++;
/* Initialize network devices */
- if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) {
- goto bail7;
- }
-
- /* Register network device */
- ret = register_netdev(netdev);
- if (ret) {
- printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret);
- nes_netdev_destroy(netdev);
- goto bail7;
- }
+ if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
+ goto bail7;
- nes_print_macaddr(netdev);
- /* create a CM core for this netdev */
- nesvnic = netdev_priv(netdev);
+ /* Register network device */
+ ret = register_netdev(netdev);
+ if (ret) {
+ printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret);
+ nes_netdev_destroy(netdev);
+ goto bail7;
+ }
- nesdev->netdev_count++;
- nesdev->nesadapter->netdev_count++;
+ nes_print_macaddr(netdev);
+ nesdev->netdev_count++;
+ nesdev->nesadapter->netdev_count++;
printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n",
pci_name(pcidev));
@@ -1104,7 +1097,7 @@ static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf)
i++;
}
- return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta);
+ return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta_value);
}
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index cc78fee1dd5..b3d145e82b4 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -262,6 +262,7 @@ struct nes_device {
u16 base_doorbell_index;
u16 currcq_count;
u16 deepcq_count;
+ u8 iw_status;
u8 msi_enabled;
u8 netdev_count;
u8 napi_isr_ran;
@@ -527,6 +528,7 @@ void nes_cm_disconn_worker(void *);
int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32);
int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
struct nes_ib_device *nes_init_ofa_device(struct net_device *);
+void nes_port_ibevent(struct nes_vnic *nesvnic);
void nes_destroy_ofa_device(struct nes_ib_device *);
int nes_register_ofa_device(struct nes_ib_device *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index d876d0435cd..6220d9d75b5 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
static void nes_retrans_expired(struct nes_cm_node *cm_node)
{
struct iw_cm_id *cm_id = cm_node->cm_id;
- switch (cm_node->state) {
+ enum nes_cm_node_state state = cm_node->state;
+ cm_node->state = NES_CM_STATE_CLOSED;
+ switch (state) {
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_CLOSING:
rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
case NES_CM_STATE_FIN_WAIT1:
if (cm_node->cm_id)
cm_id->rem_ref(cm_id);
- cm_node->state = NES_CM_STATE_CLOSED;
send_reset(cm_node, NULL);
break;
default:
@@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
break;
case NES_CM_STATE_MPAREQ_RCVD:
passive_state = atomic_add_return(1, &cm_node->passive_state);
- if (passive_state == NES_SEND_RESET_EVENT)
- create_event(cm_node, NES_CM_EVENT_RESET);
- cm_node->state = NES_CM_STATE_CLOSED;
dev_kfree_skb_any(skb);
break;
case NES_CM_STATE_ESTABLISHED:
@@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
case NES_CM_STATE_CLOSED:
drop_packet(skb);
break;
+ case NES_CM_STATE_FIN_WAIT2:
case NES_CM_STATE_FIN_WAIT1:
case NES_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -1719,8 +1718,6 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
{
int datasize = 0;
u32 inc_sequence;
- u32 rem_seq_ack;
- u32 rem_seq;
int ret = 0;
int optionsize;
optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
@@ -1730,8 +1727,6 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
skb_pull(skb, tcph->doff << 2);
inc_sequence = ntohl(tcph->seq);
- rem_seq = ntohl(tcph->seq);
- rem_seq_ack = ntohl(tcph->ack_seq);
datasize = skb->len;
switch (cm_node->state) {
case NES_CM_STATE_SYN_RCVD:
@@ -2565,7 +2560,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
u16 last_ae;
u8 original_hw_tcp_state;
u8 original_ibqp_state;
- enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK;
+ enum iw_cm_event_status disconn_status = IW_CM_EVENT_STATUS_OK;
int issue_disconn = 0;
int issue_close = 0;
int issue_flush = 0;
@@ -2706,7 +2701,7 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
nesibdev = nesvnic->nesibdev;
nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
- atomic_read(&nesvnic->netdev->refcnt));
+ netdev_refcnt_read(nesvnic->netdev));
if (nesqp->active_conn) {
@@ -2781,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return -EINVAL;
}
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == NES_SEND_RESET_EVENT) {
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
+ return -ECONNRESET;
+ }
+
/* associate the node with the QP */
nesqp->cm_node = (void *)cm_node;
cm_node->nesqp = nesqp;
@@ -2790,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
atomic_inc(&cm_accepts);
nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
- atomic_read(&nesvnic->netdev->refcnt));
+ netdev_refcnt_read(nesvnic->netdev));
/* allocate the ietf frame and space for private data */
nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev,
@@ -2983,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
"ret=%d\n", __func__, __LINE__, ret);
- passive_state = atomic_add_return(1, &cm_node->passive_state);
- if (passive_state == NES_SEND_RESET_EVENT)
- create_event(cm_node, NES_CM_EVENT_RESET);
return 0;
}
@@ -3128,17 +3126,15 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
struct nes_vnic *nesvnic;
struct nes_cm_listener *cm_node;
struct nes_cm_info cm_info;
- struct nes_adapter *adapter;
int err;
-
nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n",
cm_id, ntohs(cm_id->local_addr.sin_port));
nesvnic = to_nesvnic(cm_id->device);
if (!nesvnic)
return -EINVAL;
- adapter = nesvnic->nesdev->nesadapter;
+
nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
nesvnic, nesvnic->netdev, nesvnic->netdev->name);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 57874a16508..1980a461c49 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1970,7 +1970,7 @@ void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
dev_kfree_skb(
nesvnic->nic.tx_skb[nesvnic->nic.sq_tail]);
- nesvnic->nic.sq_tail = (++nesvnic->nic.sq_tail)
+ nesvnic->nic.sq_tail = (nesvnic->nic.sq_tail + 1)
& (nesvnic->nic.sq_size - 1);
}
@@ -2737,9 +2737,9 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
nesnic->sq_tail &= nesnic->sq_size-1;
if (sq_cqes > 128) {
barrier();
- /* restart the queue if it had been stopped */
- if (netif_queue_stopped(nesvnic->netdev))
- netif_wake_queue(nesvnic->netdev);
+ /* restart the queue if it had been stopped */
+ if (netif_queue_stopped(nesvnic->netdev))
+ netif_wake_queue(nesvnic->netdev);
sq_cqes = 0;
}
} else {
@@ -2999,11 +2999,8 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
static u8 *locate_mpa(u8 *pkt, u32 aeq_info)
{
- u16 pkt_len;
-
if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) {
/* skip over ethernet header */
- pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2));
pkt += ETH_HLEN;
/* Skip over IP and TCP headers */
@@ -3283,9 +3280,15 @@ static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *n
else
mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG;
- nes_terminate_start_timer(nesqp);
- nesqp->term_flags |= NES_TERM_SENT;
- nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
+ if (!nesdev->iw_status) {
+ nesqp->term_flags = NES_TERM_DONE;
+ nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_ERROR, 0, 0);
+ nes_cm_disconn(nesqp);
+ } else {
+ nes_terminate_start_timer(nesqp);
+ nesqp->term_flags |= NES_TERM_SENT;
+ nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
+ }
}
static void nes_terminate_send_fin(struct nes_device *nesdev,
@@ -3465,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
return; /* Ignore it, wait for close complete */
if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
+ if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
+ (nesqp->ibqp_state == IB_QPS_RTS) &&
+ ((nesadapter->eeprom_version >> 16) != NES_A0)) {
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+ nes_cm_disconn(nesqp);
+ }
nesqp->cm_id->add_ref(nesqp->cm_id);
schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3474,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
async_event_id, nesqp->last_aeq, tcp_state);
}
-
break;
case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
if (nesqp->term_flags) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index bbbfe9fc5a5..1204c3432b6 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -45,6 +45,7 @@
#define NES_PHY_TYPE_KR 9
#define NES_MULTICAST_PF_MAX 8
+#define NES_A0 3
enum pci_regs {
NES_INT_STAT = 0x0000,
@@ -1100,11 +1101,12 @@ struct nes_adapter {
u32 wqm_wat;
u32 core_clock;
u32 firmware_version;
+ u32 eeprom_version;
u32 nic_rx_eth_route_err;
u32 et_rx_coalesce_usecs;
- u32 et_rx_max_coalesced_frames;
+ u32 et_rx_max_coalesced_frames;
u32 et_rx_coalesce_usecs_irq;
u32 et_rx_max_coalesced_frames_irq;
u32 et_pkt_rate_low;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 42e7aad1ec2..10560c796fd 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -232,6 +232,13 @@ static int nes_netdev_open(struct net_device *netdev)
NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
first_nesvnic = nesvnic;
}
+
+ if (nesvnic->of_device_registered) {
+ nesdev->iw_status = 1;
+ nesdev->nesadapter->send_term_ok = 1;
+ nes_port_ibevent(nesvnic);
+ }
+
if (first_nesvnic->linkup) {
/* Enable network packets */
nesvnic->linkup = 1;
@@ -309,9 +316,9 @@ static int nes_netdev_stop(struct net_device *netdev)
if (nesvnic->of_device_registered) {
- nes_destroy_ofa_device(nesvnic->nesibdev);
- nesvnic->nesibdev = NULL;
- nesvnic->of_device_registered = 0;
+ nesdev->nesadapter->send_term_ok = 0;
+ nesdev->iw_status = 0;
+ nes_port_ibevent(nesvnic);
}
nes_destroy_nic_qp(nesvnic);
@@ -463,7 +470,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
u16 nhoffset;
u16 wqes_needed;
u16 wqes_available;
- u32 old_head;
u32 wqe_misc;
/*
@@ -503,7 +509,6 @@ sq_no_longer_full:
if (skb_is_gso(skb)) {
nesvnic->segmented_tso_requests++;
nesvnic->tso_requests++;
- old_head = nesnic->sq_head;
/* Basically 4 fragments available per WQE with extended fragments */
wqes_needed = nr_frags >> 2;
wqes_needed += (nr_frags&3)?1:0;
@@ -1441,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
- NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+ NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 0;
} else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
- NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+ NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 1;
}
if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index a9f5dd272f1..f9c417c6b3b 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -190,6 +190,11 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada
nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
(u32)((u8)eeprom_data);
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 10);
+ printk(PFX "EEPROM version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data);
+ nesadapter->eeprom_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
+ (u32)((u8)eeprom_data);
+
no_fw_rev:
/* eeprom is valid */
eeprom_offset = nesadapter->software_eeprom_offset;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 9bc2d744b2e..546fc22405f 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -518,7 +518,7 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
memset(props, 0, sizeof(*props));
memcpy(&props->sys_image_guid, nesvnic->netdev->dev_addr, 6);
- props->fw_ver = nesdev->nesadapter->fw_ver;
+ props->fw_ver = nesdev->nesadapter->firmware_version;
props->device_cap_flags = nesdev->nesadapter->device_cap_flags;
props->vendor_id = nesdev->nesadapter->vendor_id;
props->vendor_part_id = nesdev->nesadapter->vendor_part_id;
@@ -785,7 +785,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n",
nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context,
- atomic_read(&nesvnic->netdev->refcnt));
+ netdev_refcnt_read(nesvnic->netdev));
err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds,
nesadapter->max_pd, &pd_num, &nesadapter->next_pd);
@@ -1416,7 +1416,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
/* update the QP table */
nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
nes_debug(NES_DBG_QP, "netdev refcnt=%u\n",
- atomic_read(&nesvnic->netdev->refcnt));
+ netdev_refcnt_read(nesvnic->netdev));
return &nesqp->ibqp;
}
@@ -1941,7 +1941,7 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
u8 use_256_pbls = 0;
u8 use_4k_pbls = 0;
u16 use_two_level = (pbl_count_4k > 1) ? 1 : 0;
- struct nes_root_vpbl new_root = {0, 0, 0};
+ struct nes_root_vpbl new_root = { 0, NULL, NULL };
u32 opcode = 0;
u16 major_code;
@@ -2112,13 +2112,12 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
u32 driver_key = 0;
u32 root_pbl_index = 0;
u32 cur_pbl_index = 0;
- int err = 0, pbl_depth = 0;
+ int err = 0;
int ret = 0;
u16 pbl_count = 0;
u8 single_page = 1;
u8 stag_key = 0;
- pbl_depth = 0;
region_length = 0;
vpbl.pbl_vbase = NULL;
root_vpbl.pbl_vbase = NULL;
@@ -2931,7 +2930,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int ret;
u16 original_last_aeq;
u8 issue_modify_qp = 0;
- u8 issue_disconnect = 0;
u8 dont_wait = 0;
nes_debug(NES_DBG_MOD_QP, "QP%u: QP State=%u, cur QP State=%u,"
@@ -3058,6 +3056,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
nesqp->hte_added = 0;
}
if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) &&
+ (nesdev->iw_status) &&
(nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) {
next_iwarp_state |= NES_CQP_QP_RESET;
} else {
@@ -3082,7 +3081,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
nesqp->iwarp_state);
- issue_disconnect = 1;
} else {
nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
@@ -3936,6 +3934,17 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
return nesibdev;
}
+void nes_port_ibevent(struct nes_vnic *nesvnic)
+{
+ struct nes_ib_device *nesibdev = nesvnic->nesibdev;
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct ib_event event;
+ event.device = &nesibdev->ibdev;
+ event.element.port_num = nesvnic->logical_port + 1;
+ event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+ ib_dispatch_event(&event);
+}
+
/**
* nes_destroy_ofa_device
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 3593983df7b..61de0654820 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -45,6 +45,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/io.h>
#include <linux/fs.h>
#include <linux/completion.h>
@@ -326,6 +327,9 @@ struct qib_verbs_txreq {
#define QIB_DEFAULT_MTU 4096
+/* max number of IB ports supported per HCA */
+#define QIB_MAX_IB_PORTS 2
+
/*
* Possible IB config parameters for f_get/set_ib_table()
*/
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index b3955ed8f79..145da404088 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -279,7 +279,7 @@ struct qib_base_info {
* may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference.
*/
-#define QIB_USER_SWMINOR 10
+#define QIB_USER_SWMINOR 11
#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
@@ -302,6 +302,18 @@ struct qib_base_info {
#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
/*
+ * If the unit is specified via open, HCA choice is fixed. If port is
+ * specified, it's also fixed. Otherwise we try to spread contexts
+ * across ports and HCAs, using different algorithims. WITHIN is
+ * the old default, prior to this mechanism.
+ */
+#define QIB_PORT_ALG_ACROSS 0 /* round robin contexts across HCAs, then
+ * ports; this is the default */
+#define QIB_PORT_ALG_WITHIN 1 /* use all contexts on an HCA (round robin
+ * active ports within), then next HCA */
+#define QIB_PORT_ALG_COUNT 2 /* number of algorithm choices */
+
+/*
* This structure is passed to qib_userinit() to tell the driver where
* user code buffers are, sizes, etc. The offsets and sizes of the
* fields must remain unchanged, for binary compatibility. It can
@@ -319,7 +331,7 @@ struct qib_user_info {
/* size of struct base_info to write to */
__u32 spu_base_info_size;
- __u32 _spu_unused3;
+ __u32 spu_port_alg; /* which QIB_PORT_ALG_*; unused user minor < 11 */
/*
* If two or more processes wish to share a context, each process
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index f15ce076ac4..9cd193603fb 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -335,7 +335,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
}
- for (last = 0, i = 1; !last; i += !last) {
+ for (last = 0, i = 1; !last && i <= 64; i += !last) {
hdr = dd->f_get_msgheader(dd, rhf_addr);
eflags = qib_hdrget_err_flags(rhf_addr);
etype = qib_hdrget_rcv_type(rhf_addr);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index a142a9eb522..6b11645edf3 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1294,128 +1294,130 @@ bail:
return ret;
}
-static inline int usable(struct qib_pportdata *ppd, int active_only)
+static inline int usable(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
- u32 linkok = active_only ? QIBL_LINKACTIVE :
- (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE);
return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
- (ppd->lflags & linkok);
+ (ppd->lflags & QIBL_LINKACTIVE);
}
-static int find_free_ctxt(int unit, struct file *fp,
- const struct qib_user_info *uinfo)
+/*
+ * Select a context on the given device, either using a requested port
+ * or the port based on the context number.
+ */
+static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
+ const struct qib_user_info *uinfo)
{
- struct qib_devdata *dd = qib_lookup(unit);
struct qib_pportdata *ppd = NULL;
- int ret;
- u32 ctxt;
+ int ret, ctxt;
- if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) {
- ret = -ENODEV;
- goto bail;
- }
-
- /*
- * If users requests specific port, only try that one port, else
- * select "best" port below, based on context.
- */
- if (uinfo->spu_port) {
- ppd = dd->pport + uinfo->spu_port - 1;
- if (!usable(ppd, 0)) {
+ if (port) {
+ if (!usable(dd->pport + port - 1)) {
ret = -ENETDOWN;
- goto bail;
- }
+ goto done;
+ } else
+ ppd = dd->pport + port - 1;
}
-
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
- if (dd->rcd[ctxt])
- continue;
- /*
- * The setting and clearing of user context rcd[x] protected
- * by the qib_mutex
- */
- if (!ppd) {
- /* choose port based on ctxt, if up, else 1st up */
- ppd = dd->pport + (ctxt % dd->num_pports);
- if (!usable(ppd, 0)) {
- int i;
- for (i = 0; i < dd->num_pports; i++) {
- ppd = dd->pport + i;
- if (usable(ppd, 0))
- break;
- }
- if (i == dd->num_pports) {
- ret = -ENETDOWN;
- goto bail;
- }
- }
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
+ ctxt++)
+ ;
+ if (ctxt == dd->cfgctxts) {
+ ret = -EBUSY;
+ goto done;
+ }
+ if (!ppd) {
+ u32 pidx = ctxt % dd->num_pports;
+ if (usable(dd->pport + pidx))
+ ppd = dd->pport + pidx;
+ else {
+ for (pidx = 0; pidx < dd->num_pports && !ppd;
+ pidx++)
+ if (usable(dd->pport + pidx))
+ ppd = dd->pport + pidx;
}
- ret = setup_ctxt(ppd, ctxt, fp, uinfo);
- goto bail;
}
- ret = -EBUSY;
+ ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
+done:
+ return ret;
+}
+
+static int find_free_ctxt(int unit, struct file *fp,
+ const struct qib_user_info *uinfo)
+{
+ struct qib_devdata *dd = qib_lookup(unit);
+ int ret;
+
+ if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
+ ret = -ENODEV;
+ else
+ ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
-bail:
return ret;
}
-static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo)
+static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
+ unsigned alg)
{
- struct qib_pportdata *ppd;
- int ret = 0, devmax;
- int npresent, nup;
- int ndev;
+ struct qib_devdata *udd = NULL;
+ int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
u32 port = uinfo->spu_port, ctxt;
devmax = qib_count_units(&npresent, &nup);
+ if (!npresent) {
+ ret = -ENXIO;
+ goto done;
+ }
+ if (nup == 0) {
+ ret = -ENETDOWN;
+ goto done;
+ }
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
-
- /* device portion of usable() */
- if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
- continue;
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
- if (dd->rcd[ctxt])
+ if (alg == QIB_PORT_ALG_ACROSS) {
+ unsigned inuse = ~0U;
+ /* find device (with ACTIVE ports) with fewest ctxts in use */
+ for (ndev = 0; ndev < devmax; ndev++) {
+ struct qib_devdata *dd = qib_lookup(ndev);
+ unsigned cused = 0, cfree = 0;
+ if (!dd)
continue;
- if (port) {
- if (port > dd->num_pports)
- continue;
- ppd = dd->pport + port - 1;
- if (!usable(ppd, 0))
- continue;
- } else {
- /*
- * choose port based on ctxt, if up, else
- * first port that's up for multi-port HCA
- */
- ppd = dd->pport + (ctxt % dd->num_pports);
- if (!usable(ppd, 0)) {
- int j;
-
- ppd = NULL;
- for (j = 0; j < dd->num_pports &&
- !ppd; j++)
- if (usable(dd->pport + j, 0))
- ppd = dd->pport + j;
- if (!ppd)
- continue; /* to next unit */
- }
+ if (port && port <= dd->num_pports &&
+ usable(dd->pport + port - 1))
+ dusable = 1;
+ else
+ for (i = 0; i < dd->num_pports; i++)
+ if (usable(dd->pport + i))
+ dusable++;
+ if (!dusable)
+ continue;
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
+ ctxt++)
+ if (dd->rcd[ctxt])
+ cused++;
+ else
+ cfree++;
+ if (cfree && cused < inuse) {
+ udd = dd;
+ inuse = cused;
}
- ret = setup_ctxt(ppd, ctxt, fp, uinfo);
+ }
+ if (udd) {
+ ret = choose_port_ctxt(fp, udd, port, uinfo);
goto done;
}
+ } else {
+ for (ndev = 0; ndev < devmax; ndev++) {
+ struct qib_devdata *dd = qib_lookup(ndev);
+ if (dd) {
+ ret = choose_port_ctxt(fp, dd, port, uinfo);
+ if (!ret)
+ goto done;
+ if (ret == -EBUSY)
+ dusable++;
+ }
+ }
}
-
- if (npresent) {
- if (nup == 0)
- ret = -ENETDOWN;
- else
- ret = -EBUSY;
- } else
- ret = -ENXIO;
+ ret = dusable ? -EBUSY : -ENETDOWN;
done:
return ret;
@@ -1481,7 +1483,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
{
int ret;
int i_minor;
- unsigned swmajor, swminor;
+ unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
/* Check to be sure we haven't already initialized this file */
if (ctxt_fp(fp)) {
@@ -1498,6 +1500,9 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
swminor = uinfo->spu_userversion & 0xffff;
+ if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
+ alg = uinfo->spu_port_alg;
+
mutex_lock(&qib_mutex);
if (qib_compatible_subctxts(swmajor, swminor) &&
@@ -1514,7 +1519,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
if (i_minor)
ret = find_free_ctxt(i_minor - 1, fp, uinfo);
else
- ret = get_a_ctxt(fp, uinfo);
+ ret = get_a_ctxt(fp, uinfo, alg);
done_chk_sdma:
if (!ret) {
@@ -1862,7 +1867,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd)
{
int ret = 0;
- if (!usable(rcd->ppd, 1)) {
+ if (!usable(rcd->ppd)) {
int i;
/*
* if link is down, or otherwise not usable, delay
@@ -1881,7 +1886,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd)
set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
&rcd->user_event_mask[i]);
}
- for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++)
+ for (i = 0; !usable(rcd->ppd) && i < 300; i++)
msleep(100);
ret = -ENETDOWN;
}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 844954bf417..9f989c0ba9d 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -135,8 +135,8 @@ static ssize_t driver_names_read(struct file *file, char __user *buf,
}
static const struct file_operations driver_ops[] = {
- { .read = driver_stats_read, },
- { .read = driver_names_read, },
+ { .read = driver_stats_read, .llseek = generic_file_llseek, },
+ { .read = driver_names_read, .llseek = generic_file_llseek, },
};
/* read the per-device counters */
@@ -164,8 +164,8 @@ static ssize_t dev_names_read(struct file *file, char __user *buf,
}
static const struct file_operations cntr_ops[] = {
- { .read = dev_counters_read, },
- { .read = dev_names_read, },
+ { .read = dev_counters_read, .llseek = generic_file_llseek, },
+ { .read = dev_names_read, .llseek = generic_file_llseek, },
};
/*
@@ -210,9 +210,9 @@ static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
}
static const struct file_operations portcntr_ops[] = {
- { .read = portnames_read, },
- { .read = portcntrs_1_read, },
- { .read = portcntrs_2_read, },
+ { .read = portnames_read, .llseek = generic_file_llseek, },
+ { .read = portcntrs_1_read, .llseek = generic_file_llseek, },
+ { .read = portcntrs_2_read, .llseek = generic_file_llseek, },
};
/*
@@ -261,8 +261,8 @@ static ssize_t qsfp_2_read(struct file *file, char __user *buf,
}
static const struct file_operations qsfp_ops[] = {
- { .read = qsfp_1_read, },
- { .read = qsfp_2_read, },
+ { .read = qsfp_1_read, .llseek = generic_file_llseek, },
+ { .read = qsfp_2_read, .llseek = generic_file_llseek, },
};
static ssize_t flash_read(struct file *file, char __user *buf,
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5eedf83e2c3..584d443b533 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -5864,7 +5864,7 @@ static void write_7322_initregs(struct qib_devdata *dd)
* Doesn't clear any of the error bits that might be set.
*/
val = TIDFLOW_ERRBITS; /* these are W1C */
- for (i = 0; i < dd->ctxtcnt; i++) {
+ for (i = 0; i < dd->cfgctxts; i++) {
int flow;
for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
@@ -7271,6 +7271,8 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
data = qib_read_kreg_port(ppd, krp_serdesctrl);
+ /* Turn off IB latency mode */
+ data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
qib_write_kreg_port(ppd, krp_serdesctrl, data |
SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index a873dd596e8..f1d16d3a01f 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -93,7 +93,7 @@ unsigned long *qib_cpulist;
void qib_set_ctxtcnt(struct qib_devdata *dd)
{
if (!qib_cfgctxts)
- dd->cfgctxts = dd->ctxtcnt;
+ dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
else if (qib_cfgctxts < dd->num_pports)
dd->cfgctxts = dd->ctxtcnt;
else if (qib_cfgctxts <= dd->ctxtcnt)
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index e0f65e39076..6c39851d2de 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -450,7 +450,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
*
* Flushes both send and receive work queues.
* Returns true if last WQE event should be generated.
- * The QP s_lock should be held and interrupts disabled.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
* If we are already in error state, just return.
*/
int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 40c0a373719..a0931119bd7 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -868,7 +868,7 @@ done:
/*
* Back up requester to resend the last un-ACKed request.
- * The QP s_lock should be held and interrupts disabled.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
*/
static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
{
@@ -911,7 +911,8 @@ static void rc_timeout(unsigned long arg)
struct qib_ibport *ibp;
unsigned long flags;
- spin_lock_irqsave(&qp->s_lock, flags);
+ spin_lock_irqsave(&qp->r_lock, flags);
+ spin_lock(&qp->s_lock);
if (qp->s_flags & QIB_S_TIMER) {
ibp = to_iport(qp->ibqp.device, qp->port_num);
ibp->n_rc_timeouts++;
@@ -920,7 +921,8 @@ static void rc_timeout(unsigned long arg)
qib_restart_rc(qp, qp->s_last_psn + 1, 1);
qib_schedule_send(qp);
}
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irqrestore(&qp->r_lock, flags);
}
/*
@@ -1414,10 +1416,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
spin_lock_irqsave(&qp->s_lock, flags);
- /* Double check we can process this now that we hold the s_lock. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
- goto ack_done;
-
/* Ignore invalid responses. */
if (qib_cmp24(psn, qp->s_next_psn) >= 0)
goto ack_done;
@@ -1661,9 +1659,6 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
ibp->n_rc_dupreq++;
spin_lock_irqsave(&qp->s_lock, flags);
- /* Double check we can process this now that we hold the s_lock. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
- goto unlock_done;
for (i = qp->r_head_ack_queue; ; i = prev) {
if (i == qp->s_tail_ack_queue)
@@ -1878,9 +1873,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
psn = be32_to_cpu(ohdr->bth[2]);
opcode >>= 24;
- /* Prevent simultaneous processing after APM on different CPUs */
- spin_lock(&qp->r_lock);
-
/*
* Process responses (ACKs) before anything else. Note that the
* packet sequence number will be for something in the send work
@@ -1891,14 +1883,14 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
hdrsize, pmtu, rcd);
- goto runlock;
+ return;
}
/* Compute 24 bits worth of difference. */
diff = qib_cmp24(psn, qp->r_psn);
if (unlikely(diff)) {
if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
- goto runlock;
+ return;
goto send_ack;
}
@@ -2090,9 +2082,6 @@ send_last:
if (next > QIB_MAX_RDMA_ATOMIC)
next = 0;
spin_lock_irqsave(&qp->s_lock, flags);
- /* Double check we can process this while holding the s_lock. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
- goto srunlock;
if (unlikely(next == qp->s_tail_ack_queue)) {
if (!qp->s_ack_queue[next].sent)
goto nack_inv_unlck;
@@ -2146,7 +2135,7 @@ send_last:
qp->s_flags |= QIB_S_RESP_PENDING;
qib_schedule_send(qp);
- goto srunlock;
+ goto sunlock;
}
case OP(COMPARE_SWAP):
@@ -2165,9 +2154,6 @@ send_last:
if (next > QIB_MAX_RDMA_ATOMIC)
next = 0;
spin_lock_irqsave(&qp->s_lock, flags);
- /* Double check we can process this while holding the s_lock. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
- goto srunlock;
if (unlikely(next == qp->s_tail_ack_queue)) {
if (!qp->s_ack_queue[next].sent)
goto nack_inv_unlck;
@@ -2213,7 +2199,7 @@ send_last:
qp->s_flags |= QIB_S_RESP_PENDING;
qib_schedule_send(qp);
- goto srunlock;
+ goto sunlock;
}
default:
@@ -2227,7 +2213,7 @@ send_last:
/* Send an ACK if requested or required. */
if (psn & (1 << 31))
goto send_ack;
- goto runlock;
+ return;
rnr_nak:
qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
@@ -2238,7 +2224,7 @@ rnr_nak:
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
- goto runlock;
+ return;
nack_op_err:
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
@@ -2250,7 +2236,7 @@ nack_op_err:
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
- goto runlock;
+ return;
nack_inv_unlck:
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -2264,7 +2250,7 @@ nack_inv:
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
- goto runlock;
+ return;
nack_acc_unlck:
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -2274,13 +2260,6 @@ nack_acc:
qp->r_ack_psn = qp->r_psn;
send_ack:
qib_send_rc_ack(qp);
-runlock:
- spin_unlock(&qp->r_lock);
- return;
-
-srunlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- spin_unlock(&qp->r_lock);
return;
sunlock:
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index b8456881f7f..cad44491320 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -656,6 +656,7 @@ unmap:
}
qp = tx->qp;
qib_put_txreq(tx);
+ spin_lock(&qp->r_lock);
spin_lock(&qp->s_lock);
if (qp->ibqp.qp_type == IB_QPT_RC) {
/* XXX what about error sending RDMA read responses? */
@@ -664,6 +665,7 @@ unmap:
} else if (qp->s_wqe)
qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->r_lock);
/* return zero to process the next send work request */
goto unlock;
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index dab4d9f4a2c..d50a33fe8bb 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -347,7 +347,7 @@ static struct kobj_type qib_sl2vl_ktype = {
#define QIB_DIAGC_ATTR(N) \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
- .attr = { .name = __stringify(N), .mode = 0444 }, \
+ .attr = { .name = __stringify(N), .mode = 0664 }, \
.counter = offsetof(struct qib_ibport, n_##N) \
}
@@ -403,8 +403,27 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
}
+static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t size)
+{
+ struct qib_diagc_attr *dattr =
+ container_of(attr, struct qib_diagc_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, diagc_kobj);
+ struct qib_ibport *qibp = &ppd->ibport_data;
+ char *endp;
+ long val = simple_strtol(buf, &endp, 0);
+
+ if (val < 0 || endp == buf)
+ return -EINVAL;
+
+ *(u32 *)((char *) qibp + dattr->counter) = val;
+ return size;
+}
+
static const struct sysfs_ops qib_diagc_ops = {
.show = diagc_attr_show,
+ .store = diagc_attr_store,
};
static struct kobj_type qib_diagc_ktype = {
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index af30232b683..7f36454c225 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -170,7 +170,7 @@ static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
unsigned cnt)
{
- struct qib_pportdata *ppd, *pppd[dd->num_pports];
+ struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
unsigned i;
unsigned long flags;
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 6c7fe78cca6..b9c8b6346c1 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -272,9 +272,6 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
opcode >>= 24;
memset(&wc, 0, sizeof wc);
- /* Prevent simultaneous processing after APM on different CPUs */
- spin_lock(&qp->r_lock);
-
/* Compare the PSN verses the expected PSN. */
if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
/*
@@ -534,7 +531,6 @@ rdma_last:
}
qp->r_psn++;
qp->r_state = opcode;
- spin_unlock(&qp->r_lock);
return;
rewind:
@@ -542,12 +538,10 @@ rewind:
qp->r_sge.num_sge = 0;
drop:
ibp->n_pkt_drops++;
- spin_unlock(&qp->r_lock);
return;
op_err:
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- spin_unlock(&qp->r_lock);
return;
sunlock:
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index c838cda7334..e1b3da2a1f8 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -535,13 +535,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
wc.byte_len = tlen + sizeof(struct ib_grh);
/*
- * We need to serialize getting a receive work queue entry and
- * generating a completion for it against QPs sending to this QP
- * locally.
- */
- spin_lock(&qp->r_lock);
-
- /*
* Get the next work request entry to find where to put the data.
*/
if (qp->r_flags & QIB_R_REUSE_SGE)
@@ -552,19 +545,19 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
ret = qib_get_rwqe(qp, 0);
if (ret < 0) {
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- goto bail_unlock;
+ return;
}
if (!ret) {
if (qp->ibqp.qp_num == 0)
ibp->n_vl15_dropped++;
- goto bail_unlock;
+ return;
}
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= QIB_R_REUSE_SGE;
ibp->n_pkt_drops++;
- goto bail_unlock;
+ return;
}
if (has_grh) {
qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -579,7 +572,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
- goto bail_unlock;
+ return;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
@@ -601,7 +594,5 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
-bail_unlock:
- spin_unlock(&qp->r_lock);
bail:;
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index cda8f4173d2..9fab4048885 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -550,10 +550,12 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
{
struct qib_ibport *ibp = &rcd->ppd->ibport_data;
+ spin_lock(&qp->r_lock);
+
/* Check for valid receive state. */
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++;
- return;
+ goto unlock;
}
switch (qp->ibqp.qp_type) {
@@ -577,6 +579,9 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
default:
break;
}
+
+unlock:
+ spin_unlock(&qp->r_lock);
}
/**
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 0b9ef071658..95a08a8ca8a 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -170,7 +170,7 @@ static void iser_create_send_desc(struct iser_conn *ib_conn,
}
-int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
+static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
{
int i, j;
u64 dma_addr;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index ed3f9ebae88..7f8f16bad75 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -811,6 +811,38 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
return len;
}
+static int srp_post_recv(struct srp_target_port *target)
+{
+ unsigned long flags;
+ struct srp_iu *iu;
+ struct ib_sge list;
+ struct ib_recv_wr wr, *bad_wr;
+ unsigned int next;
+ int ret;
+
+ spin_lock_irqsave(target->scsi_host->host_lock, flags);
+
+ next = target->rx_head & (SRP_RQ_SIZE - 1);
+ wr.wr_id = next;
+ iu = target->rx_ring[next];
+
+ list.addr = iu->dma;
+ list.length = iu->size;
+ list.lkey = target->srp_host->srp_dev->mr->lkey;
+
+ wr.next = NULL;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+
+ ret = ib_post_recv(target->qp, &wr, &bad_wr);
+ if (!ret)
+ ++target->rx_head;
+
+ spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
+
+ return ret;
+}
+
static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
{
struct srp_request *req;
@@ -868,6 +900,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{
struct ib_device *dev;
struct srp_iu *iu;
+ int res;
u8 opcode;
iu = target->rx_ring[wc->wr_id];
@@ -879,21 +912,10 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
opcode = *(u8 *) iu->buf;
if (0) {
- int i;
-
shost_printk(KERN_ERR, target->scsi_host,
PFX "recv completion, opcode 0x%02x\n", opcode);
-
- for (i = 0; i < wc->byte_len; ++i) {
- if (i % 8 == 0)
- printk(KERN_ERR " [%02x] ", i);
- printk(" %02x", ((u8 *) iu->buf)[i]);
- if ((i + 1) % 8 == 0)
- printk("\n");
- }
-
- if (wc->byte_len % 8)
- printk("\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
+ iu->buf, wc->byte_len, true);
}
switch (opcode) {
@@ -915,6 +937,11 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
DMA_FROM_DEVICE);
+
+ res = srp_post_recv(target);
+ if (res != 0)
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "Recv failed with error code %d\n", res);
}
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
@@ -954,45 +981,6 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
}
}
-static int __srp_post_recv(struct srp_target_port *target)
-{
- struct srp_iu *iu;
- struct ib_sge list;
- struct ib_recv_wr wr, *bad_wr;
- unsigned int next;
- int ret;
-
- next = target->rx_head & (SRP_RQ_SIZE - 1);
- wr.wr_id = next;
- iu = target->rx_ring[next];
-
- list.addr = iu->dma;
- list.length = iu->size;
- list.lkey = target->srp_host->srp_dev->mr->lkey;
-
- wr.next = NULL;
- wr.sg_list = &list;
- wr.num_sge = 1;
-
- ret = ib_post_recv(target->qp, &wr, &bad_wr);
- if (!ret)
- ++target->rx_head;
-
- return ret;
-}
-
-static int srp_post_recv(struct srp_target_port *target)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(target->scsi_host->host_lock, flags);
- ret = __srp_post_recv(target);
- spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
-
- return ret;
-}
-
/*
* Must be called with target->scsi_host->host_lock held to protect
* req_lim and tx_head. Lock cannot be dropped between call here and
@@ -1102,11 +1090,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
goto err;
}
- if (__srp_post_recv(target)) {
- shost_printk(KERN_ERR, target->scsi_host, PFX "Recv failed\n");
- goto err_unmap;
- }
-
ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
DMA_TO_DEVICE);
@@ -1249,6 +1232,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
int attr_mask = 0;
int comp = 0;
int opcode = 0;
+ int i;
switch (event->event) {
case IB_CM_REQ_ERROR:
@@ -1298,7 +1282,11 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
if (target->status)
break;
- target->status = srp_post_recv(target);
+ for (i = 0; i < SRP_RQ_SIZE; i++) {
+ target->status = srp_post_recv(target);
+ if (target->status)
+ break;
+ }
if (target->status)
break;
@@ -1564,6 +1552,18 @@ static ssize_t show_orig_dgid(struct device *dev,
return sprintf(buf, "%pI6\n", target->orig_dgid);
}
+static ssize_t show_req_lim(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ if (target->state == SRP_TARGET_DEAD ||
+ target->state == SRP_TARGET_REMOVED)
+ return -ENODEV;
+
+ return sprintf(buf, "%d\n", target->req_lim);
+}
+
static ssize_t show_zero_req_lim(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1598,6 +1598,7 @@ static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
+static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
@@ -1609,6 +1610,7 @@ static struct device_attribute *srp_host_attrs[] = {
&dev_attr_pkey,
&dev_attr_dgid,
&dev_attr_orig_dgid,
+ &dev_attr_req_lim,
&dev_attr_zero_req_lim,
&dev_attr_local_ib_port,
&dev_attr_local_ib_device,
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 054edf346e0..c908c5f8364 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -492,13 +492,15 @@ static int str_to_user(const char *str, unsigned int maxlen, void __user *p)
}
#define OLD_KEY_MAX 0x1ff
-static int handle_eviocgbit(struct input_dev *dev, unsigned int cmd, void __user *p, int compat_mode)
+static int handle_eviocgbit(struct input_dev *dev,
+ unsigned int type, unsigned int size,
+ void __user *p, int compat_mode)
{
static unsigned long keymax_warn_time;
unsigned long *bits;
int len;
- switch (_IOC_NR(cmd) & EV_MAX) {
+ switch (type) {
case 0: bits = dev->evbit; len = EV_MAX; break;
case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
@@ -517,7 +519,7 @@ static int handle_eviocgbit(struct input_dev *dev, unsigned int cmd, void __user
* EVIOCGBIT(EV_KEY, KEY_MAX) and not realize that 'len'
* should be in bytes, not in bits.
*/
- if ((_IOC_NR(cmd) & EV_MAX) == EV_KEY && _IOC_SIZE(cmd) == OLD_KEY_MAX) {
+ if (type == EV_KEY && size == OLD_KEY_MAX) {
len = OLD_KEY_MAX;
if (printk_timed_ratelimit(&keymax_warn_time, 10 * 1000))
printk(KERN_WARNING
@@ -528,7 +530,7 @@ static int handle_eviocgbit(struct input_dev *dev, unsigned int cmd, void __user
BITS_TO_LONGS(OLD_KEY_MAX) * sizeof(long));
}
- return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode);
+ return bits_to_user(bits, len, size, p, compat_mode);
}
#undef OLD_KEY_MAX
@@ -542,8 +544,10 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
struct ff_effect effect;
int __user *ip = (int __user *)p;
unsigned int i, t, u, v;
+ unsigned int size;
int error;
+ /* First we check for fixed-length commands */
switch (cmd) {
case EVIOCGVERSION:
@@ -610,112 +614,102 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
return evdev_grab(evdev, client);
else
return evdev_ungrab(evdev, client);
+ }
- default:
-
- if (_IOC_TYPE(cmd) != 'E')
- return -EINVAL;
-
- if (_IOC_DIR(cmd) == _IOC_READ) {
+ size = _IOC_SIZE(cmd);
- if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0)))
- return handle_eviocgbit(dev, cmd, p, compat_mode);
+ /* Now check variable-length commands */
+#define EVIOC_MASK_SIZE(nr) ((nr) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0)))
- return bits_to_user(dev->key, KEY_MAX, _IOC_SIZE(cmd),
- p, compat_mode);
+ switch (EVIOC_MASK_SIZE(cmd)) {
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCGLED(0)))
- return bits_to_user(dev->led, LED_MAX, _IOC_SIZE(cmd),
- p, compat_mode);
+ case EVIOCGKEY(0):
+ return bits_to_user(dev->key, KEY_MAX, size, p, compat_mode);
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSND(0)))
- return bits_to_user(dev->snd, SND_MAX, _IOC_SIZE(cmd),
- p, compat_mode);
+ case EVIOCGLED(0):
+ return bits_to_user(dev->led, LED_MAX, size, p, compat_mode);
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSW(0)))
- return bits_to_user(dev->sw, SW_MAX, _IOC_SIZE(cmd),
- p, compat_mode);
+ case EVIOCGSND(0):
+ return bits_to_user(dev->snd, SND_MAX, size, p, compat_mode);
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0)))
- return str_to_user(dev->name, _IOC_SIZE(cmd), p);
+ case EVIOCGSW(0):
+ return bits_to_user(dev->sw, SW_MAX, size, p, compat_mode);
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0)))
- return str_to_user(dev->phys, _IOC_SIZE(cmd), p);
+ case EVIOCGNAME(0):
+ return str_to_user(dev->name, size, p);
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCGUNIQ(0)))
- return str_to_user(dev->uniq, _IOC_SIZE(cmd), p);
+ case EVIOCGPHYS(0):
+ return str_to_user(dev->phys, size, p);
- if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
+ case EVIOCGUNIQ(0):
+ return str_to_user(dev->uniq, size, p);
- t = _IOC_NR(cmd) & ABS_MAX;
+ case EVIOC_MASK_SIZE(EVIOCSFF):
+ if (input_ff_effect_from_user(p, size, &effect))
+ return -EFAULT;
- abs.value = dev->abs[t];
- abs.minimum = dev->absmin[t];
- abs.maximum = dev->absmax[t];
- abs.fuzz = dev->absfuzz[t];
- abs.flat = dev->absflat[t];
- abs.resolution = dev->absres[t];
+ error = input_ff_upload(dev, &effect, file);
- if (copy_to_user(p, &abs, min_t(size_t,
- _IOC_SIZE(cmd),
- sizeof(struct input_absinfo))))
- return -EFAULT;
+ if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
+ return -EFAULT;
- return 0;
- }
+ return error;
+ }
- }
+ /* Multi-number variable-length handlers */
+ if (_IOC_TYPE(cmd) != 'E')
+ return -EINVAL;
- if (_IOC_DIR(cmd) == _IOC_WRITE) {
+ if (_IOC_DIR(cmd) == _IOC_READ) {
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCSFF)) {
+ if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0)))
+ return handle_eviocgbit(dev,
+ _IOC_NR(cmd) & EV_MAX, size,
+ p, compat_mode);
- if (input_ff_effect_from_user(p, _IOC_SIZE(cmd), &effect))
- return -EFAULT;
+ if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
- error = input_ff_upload(dev, &effect, file);
+ t = _IOC_NR(cmd) & ABS_MAX;
+ abs = dev->absinfo[t];
- if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
- return -EFAULT;
+ if (copy_to_user(p, &abs, min_t(size_t,
+ size, sizeof(struct input_absinfo))))
+ return -EFAULT;
- return error;
- }
+ return 0;
+ }
+ }
- if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
+ if (_IOC_DIR(cmd) == _IOC_READ) {
- t = _IOC_NR(cmd) & ABS_MAX;
+ if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
- if (copy_from_user(&abs, p, min_t(size_t,
- _IOC_SIZE(cmd),
- sizeof(struct input_absinfo))))
- return -EFAULT;
+ t = _IOC_NR(cmd) & ABS_MAX;
- /* We can't change number of reserved MT slots */
- if (t == ABS_MT_SLOT)
- return -EINVAL;
+ if (copy_from_user(&abs, p, min_t(size_t,
+ size, sizeof(struct input_absinfo))))
+ return -EFAULT;
- /*
- * Take event lock to ensure that we are not
- * changing device parameters in the middle
- * of event.
- */
- spin_lock_irq(&dev->event_lock);
+ if (size < sizeof(struct input_absinfo))
+ abs.resolution = 0;
- dev->abs[t] = abs.value;
- dev->absmin[t] = abs.minimum;
- dev->absmax[t] = abs.maximum;
- dev->absfuzz[t] = abs.fuzz;
- dev->absflat[t] = abs.flat;
- dev->absres[t] = _IOC_SIZE(cmd) < sizeof(struct input_absinfo) ?
- 0 : abs.resolution;
+ /* We can't change number of reserved MT slots */
+ if (t == ABS_MT_SLOT)
+ return -EINVAL;
- spin_unlock_irq(&dev->event_lock);
+ /*
+ * Take event lock to ensure that we are not
+ * changing device parameters in the middle
+ * of event.
+ */
+ spin_lock_irq(&dev->event_lock);
+ dev->absinfo[t] = abs;
+ spin_unlock_irq(&dev->event_lock);
- return 0;
- }
+ return 0;
}
}
+
return -EINVAL;
}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e1243b4b32a..ab698205651 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -182,7 +182,7 @@ static int input_handle_abs_event(struct input_dev *dev,
is_mt_event = code >= ABS_MT_FIRST && code <= ABS_MT_LAST;
if (!is_mt_event) {
- pold = &dev->abs[code];
+ pold = &dev->absinfo[code].value;
} else if (dev->mt) {
struct input_mt_slot *mtslot = &dev->mt[dev->slot];
pold = &mtslot->abs[code - ABS_MT_FIRST];
@@ -196,7 +196,7 @@ static int input_handle_abs_event(struct input_dev *dev,
if (pold) {
*pval = input_defuzz_abs_event(*pval, *pold,
- dev->absfuzz[code]);
+ dev->absinfo[code].fuzz);
if (*pold == *pval)
return INPUT_IGNORE_EVENT;
@@ -204,8 +204,8 @@ static int input_handle_abs_event(struct input_dev *dev,
}
/* Flush pending "slot" event */
- if (is_mt_event && dev->slot != dev->abs[ABS_MT_SLOT]) {
- dev->abs[ABS_MT_SLOT] = dev->slot;
+ if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
+ input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot);
}
@@ -391,6 +391,43 @@ void input_inject_event(struct input_handle *handle,
EXPORT_SYMBOL(input_inject_event);
/**
+ * input_alloc_absinfo - allocates array of input_absinfo structs
+ * @dev: the input device emitting absolute events
+ *
+ * If the absinfo struct the caller asked for is already allocated, this
+ * functions will not do anything.
+ */
+void input_alloc_absinfo(struct input_dev *dev)
+{
+ if (!dev->absinfo)
+ dev->absinfo = kcalloc(ABS_CNT, sizeof(struct input_absinfo),
+ GFP_KERNEL);
+
+ WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__);
+}
+EXPORT_SYMBOL(input_alloc_absinfo);
+
+void input_set_abs_params(struct input_dev *dev, unsigned int axis,
+ int min, int max, int fuzz, int flat)
+{
+ struct input_absinfo *absinfo;
+
+ input_alloc_absinfo(dev);
+ if (!dev->absinfo)
+ return;
+
+ absinfo = &dev->absinfo[axis];
+ absinfo->minimum = min;
+ absinfo->maximum = max;
+ absinfo->fuzz = fuzz;
+ absinfo->flat = flat;
+
+ dev->absbit[BIT_WORD(axis)] |= BIT_MASK(axis);
+}
+EXPORT_SYMBOL(input_set_abs_params);
+
+
+/**
* input_grab_device - grabs device for exclusive use
* @handle: input handle that wants to own the device
*
@@ -1308,6 +1345,7 @@ static void input_dev_release(struct device *device)
input_ff_destroy(dev);
input_mt_destroy_slots(dev);
+ kfree(dev->absinfo);
kfree(dev);
module_put(THIS_MODULE);
@@ -1561,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device);
* @dev: input device supporting MT events and finger tracking
* @num_slots: number of slots used by the device
*
- * This function allocates all necessary memory for MT slot handling
- * in the input device, and adds ABS_MT_SLOT to the device capabilities.
+ * This function allocates all necessary memory for MT slot handling in the
+ * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
+ * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
*/
int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
{
+ int i;
+
if (!num_slots)
return 0;
@@ -1576,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
dev->mtsize = num_slots;
input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
+ /* Mark slots as 'unused' */
+ for (i = 0; i < num_slots; i++)
+ dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
+
return 0;
}
EXPORT_SYMBOL(input_mt_create_slots);
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 63834585c28..d85bd8a7967 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -530,7 +530,7 @@ static int joydev_ioctl_common(struct joydev *joydev,
{
struct input_dev *dev = joydev->handle.dev;
size_t len;
- int i, j;
+ int i;
const char *name;
/* Process fixed-sized commands. */
@@ -562,12 +562,11 @@ static int joydev_ioctl_common(struct joydev *joydev,
case JSIOCSCORR:
if (copy_from_user(joydev->corr, argp,
sizeof(joydev->corr[0]) * joydev->nabs))
- return -EFAULT;
+ return -EFAULT;
for (i = 0; i < joydev->nabs; i++) {
- j = joydev->abspam[i];
- joydev->abs[i] = joydev_correct(dev->abs[j],
- &joydev->corr[i]);
+ int val = input_abs_get_val(dev, joydev->abspam[i]);
+ joydev->abs[i] = joydev_correct(val, &joydev->corr[i]);
}
return 0;
@@ -848,25 +847,27 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
for (i = 0; i < joydev->nabs; i++) {
j = joydev->abspam[i];
- if (dev->absmax[j] == dev->absmin[j]) {
+ if (input_abs_get_max(dev, j) == input_abs_get_min(dev, j)) {
joydev->corr[i].type = JS_CORR_NONE;
- joydev->abs[i] = dev->abs[j];
+ joydev->abs[i] = input_abs_get_val(dev, j);
continue;
}
joydev->corr[i].type = JS_CORR_BROKEN;
- joydev->corr[i].prec = dev->absfuzz[j];
- joydev->corr[i].coef[0] =
- (dev->absmax[j] + dev->absmin[j]) / 2 - dev->absflat[j];
- joydev->corr[i].coef[1] =
- (dev->absmax[j] + dev->absmin[j]) / 2 + dev->absflat[j];
+ joydev->corr[i].prec = input_abs_get_fuzz(dev, j);
+
+ t = (input_abs_get_max(dev, j) + input_abs_get_min(dev, j)) / 2;
+ joydev->corr[i].coef[0] = t - input_abs_get_flat(dev, j);
+ joydev->corr[i].coef[1] = t + input_abs_get_flat(dev, j);
- t = (dev->absmax[j] - dev->absmin[j]) / 2 - 2 * dev->absflat[j];
+ t = (input_abs_get_max(dev, j) - input_abs_get_min(dev, j)) / 2
+ - 2 * input_abs_get_flat(dev, j);
if (t) {
joydev->corr[i].coef[2] = (1 << 29) / t;
joydev->corr[i].coef[3] = (1 << 29) / t;
- joydev->abs[i] = joydev_correct(dev->abs[j],
- joydev->corr + i);
+ joydev->abs[i] =
+ joydev_correct(input_abs_get_val(dev, j),
+ joydev->corr + i);
}
}
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index 6489f4010c4..d259b41354b 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -342,7 +342,8 @@ static int a3d_connect(struct gameport *gameport, struct gameport_driver *drv)
for (i = 0; i < 4; i++) {
if (i < 2)
- input_set_abs_params(input_dev, axes[i], 48, input_dev->abs[axes[i]] * 2 - 48, 0, 8);
+ input_set_abs_params(input_dev, axes[i],
+ 48, input_abs_get_val(input_dev, axes[i]) * 2 - 48, 0, 8);
else
input_set_abs_params(input_dev, axes[i], 2, 253, 0, 0);
input_set_abs_params(input_dev, ABS_HAT0X + i, -1, 1, 0, 0);
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index 89c4c084d4a..b992fbf91f2 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -452,7 +452,7 @@ static void adi_init_center(struct adi *adi)
for (i = 0; i < adi->axes10 + adi->axes8 + (adi->hats + (adi->pad != -1)) * 2; i++) {
t = adi->abs[i];
- x = adi->dev->abs[t];
+ x = input_abs_get_val(adi->dev, t);
if (t == ABS_THROTTLE || t == ABS_RUDDER || adi->id == ADI_ID_WGPE)
x = i < adi->axes10 ? 512 : 128;
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index 05022f07ec7..0bc86204213 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -139,8 +139,8 @@ static int __init amijoy_init(void)
amijoy_dev[i]->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
for (j = 0; j < 2; j++) {
- amijoy_dev[i]->absmin[ABS_X + j] = -1;
- amijoy_dev[i]->absmax[ABS_X + j] = 1;
+ input_set_abs_params(amijoy_dev[i], ABS_X + j,
+ -1, 1, 0, 0);
}
err = input_register_device(amijoy_dev[i]);
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 45ac70eae0a..0536b1b2f01 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -318,11 +318,8 @@ static int gf2k_connect(struct gameport *gameport, struct gameport_driver *drv)
for (i = 0; i < gf2k_axes[gf2k->id]; i++)
set_bit(gf2k_abs[i], input_dev->absbit);
- for (i = 0; i < gf2k_hats[gf2k->id]; i++) {
- set_bit(ABS_HAT0X + i, input_dev->absbit);
- input_dev->absmin[ABS_HAT0X + i] = -1;
- input_dev->absmax[ABS_HAT0X + i] = 1;
- }
+ for (i = 0; i < gf2k_hats[gf2k->id]; i++)
+ input_set_abs_params(input_dev, ABS_HAT0X + i, -1, 1, 0, 0);
for (i = 0; i < gf2k_joys[gf2k->id]; i++)
set_bit(gf2k_btn_joy[i], input_dev->keybit);
@@ -334,11 +331,14 @@ static int gf2k_connect(struct gameport *gameport, struct gameport_driver *drv)
gf2k_read(gf2k, data);
for (i = 0; i < gf2k_axes[gf2k->id]; i++) {
- input_dev->absmax[gf2k_abs[i]] = (i < 2) ? input_dev->abs[gf2k_abs[i]] * 2 - 32 :
- input_dev->abs[gf2k_abs[0]] + input_dev->abs[gf2k_abs[1]] - 32;
- input_dev->absmin[gf2k_abs[i]] = 32;
- input_dev->absfuzz[gf2k_abs[i]] = 8;
- input_dev->absflat[gf2k_abs[i]] = (i < 2) ? 24 : 0;
+ int max = i < 2 ?
+ input_abs_get_val(input_dev, gf2k_abs[i]) * 2 :
+ input_abs_get_val(input_dev, gf2k_abs[0]) +
+ input_abs_get_val(input_dev, gf2k_abs[1]);
+ int flat = i < 2 ? 24 : 0;
+
+ input_set_abs_params(input_dev, gf2k_abs[i],
+ 32, max - 32, 8, flat);
}
err = input_register_device(gf2k->dev);
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index 2478289aeee..16fb19d1ca2 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -270,18 +270,14 @@ static int interact_connect(struct gameport *gameport, struct gameport_driver *d
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
for (i = 0; (t = interact_type[interact->type].abs[i]) >= 0; i++) {
- set_bit(t, input_dev->absbit);
- if (i < interact_type[interact->type].b8) {
- input_dev->absmin[t] = 0;
- input_dev->absmax[t] = 255;
- } else {
- input_dev->absmin[t] = -1;
- input_dev->absmax[t] = 1;
- }
+ if (i < interact_type[interact->type].b8)
+ input_set_abs_params(input_dev, t, 0, 255, 0, 0);
+ else
+ input_set_abs_params(input_dev, t, -1, 1, 0, 0);
}
for (i = 0; (t = interact_type[interact->type].btn[i]) >= 0; i++)
- set_bit(t, input_dev->keybit);
+ __set_bit(t, input_dev->keybit);
err = input_register_device(interact->dev);
if (err)
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index ca13a6bec33..b8d86115644 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -761,17 +761,21 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
for (j = 0; (bits = sw_bit[sw->type][j]); j++) {
+ int min, max, fuzz, flat;
+
code = sw_abs[sw->type][j];
- set_bit(code, input_dev->absbit);
- input_dev->absmax[code] = (1 << bits) - 1;
- input_dev->absmin[code] = (bits == 1) ? -1 : 0;
- input_dev->absfuzz[code] = ((bits >> 1) >= 2) ? (1 << ((bits >> 1) - 2)) : 0;
- if (code != ABS_THROTTLE)
- input_dev->absflat[code] = (bits >= 5) ? (1 << (bits - 5)) : 0;
+ min = bits == 1 ? -1 : 0;
+ max = (1 << bits) - 1;
+ fuzz = (bits >> 1) >= 2 ? 1 << ((bits >> 1) - 2) : 0;
+ flat = code == ABS_THROTTLE || bits < 5 ?
+ 0 : 1 << (bits - 5);
+
+ input_set_abs_params(input_dev, code,
+ min, max, fuzz, flat);
}
for (j = 0; (code = sw_btn[sw->type][j]); j++)
- set_bit(code, input_dev->keybit);
+ __set_bit(code, input_dev->keybit);
dbg("%s%s [%d-bit id %d data %d]\n", sw->name, comment, m, l, k);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 269a846f369..f9fb7fa10af 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -148,6 +148,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0006, "Pelican 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index b171f63fe4d..9cc488d2149 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -395,6 +395,16 @@ config KEYBOARD_SH_KEYSC
To compile this driver as a module, choose M here: the
module will be called sh_keysc.
+config KEYBOARD_STMPE
+ tristate "STMPE keypad support"
+ depends on MFD_STMPE
+ help
+ Say Y here if you want to use the keypad controller on STMPE I/O
+ expanders.
+
+ To compile this driver as a module, choose M here: the module will be
+ called stmpe-keypad.
+
config KEYBOARD_DAVINCI
tristate "TI DaVinci Key Scan"
depends on ARCH_DAVINCI_DM365
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 1a66d5f1ca8..504b591be0c 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o
obj-$(CONFIG_KEYBOARD_SAMSUNG) += samsung-keypad.o
obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
+obj-$(CONFIG_KEYBOARD_STMPE) += stmpe-keypad.o
obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index a9fd147f2ba..6069abe31e4 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -39,6 +39,8 @@ struct gpio_keys_drvdata {
struct input_dev *input;
struct mutex disable_lock;
unsigned int n_buttons;
+ int (*enable)(struct device *dev);
+ void (*disable)(struct device *dev);
struct gpio_button_data data[0];
};
@@ -423,6 +425,21 @@ fail2:
return error;
}
+static int gpio_keys_open(struct input_dev *input)
+{
+ struct gpio_keys_drvdata *ddata = input_get_drvdata(input);
+
+ return ddata->enable ? ddata->enable(input->dev.parent) : 0;
+}
+
+static void gpio_keys_close(struct input_dev *input)
+{
+ struct gpio_keys_drvdata *ddata = input_get_drvdata(input);
+
+ if (ddata->disable)
+ ddata->disable(input->dev.parent);
+}
+
static int __devinit gpio_keys_probe(struct platform_device *pdev)
{
struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
@@ -444,13 +461,18 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
ddata->input = input;
ddata->n_buttons = pdata->nbuttons;
+ ddata->enable = pdata->enable;
+ ddata->disable = pdata->disable;
mutex_init(&ddata->disable_lock);
platform_set_drvdata(pdev, ddata);
+ input_set_drvdata(input, ddata);
input->name = pdev->name;
input->phys = "gpio-keys/input0";
input->dev.parent = &pdev->dev;
+ input->open = gpio_keys_open;
+ input->close = gpio_keys_close;
input->id.bustype = BUS_HOST;
input->id.vendor = 0x0001;
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index c83f4b2ec7d..19fa94af207 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -232,15 +232,16 @@ static void hil_dev_handle_ptr_events(struct hil_dev *ptr)
if (absdev) {
val = lo + (hi << 8);
#ifdef TABLET_AUTOADJUST
- if (val < dev->absmin[ABS_X + i])
- dev->absmin[ABS_X + i] = val;
- if (val > dev->absmax[ABS_X + i])
- dev->absmax[ABS_X + i] = val;
+ if (val < input_abs_get_min(dev, ABS_X + i))
+ input_abs_set_min(dev, ABS_X + i, val);
+ if (val > input_abs_get_max(dev, ABS_X + i))
+ input_abs_set_max(dev, ABS_X + i, val);
#endif
- if (i%3) val = dev->absmax[ABS_X + i] - val;
+ if (i % 3)
+ val = input_abs_get_max(dev, ABS_X + i) - val;
input_report_abs(dev, ABS_X + i, val);
} else {
- val = (int) (((int8_t)lo) | ((int8_t)hi << 8));
+ val = (int) (((int8_t) lo) | ((int8_t) hi << 8));
if (i % 3)
val *= -1;
input_report_rel(dev, REL_X + i, val);
@@ -387,9 +388,11 @@ static void hil_dev_pointer_setup(struct hil_dev *ptr)
#ifdef TABLET_AUTOADJUST
for (i = 0; i < ABS_MAX; i++) {
- int diff = input_dev->absmax[ABS_X + i] / 10;
- input_dev->absmin[ABS_X + i] += diff;
- input_dev->absmax[ABS_X + i] -= diff;
+ int diff = input_abs_get_max(input_dev, ABS_X + i) / 10;
+ input_abs_set_min(input_dev, ABS_X + i,
+ input_abs_get_min(input_dev, ABS_X + i) + diff);
+ input_abs_set_max(input_dev, ABS_X + i,
+ input_abs_get_max(input_dev, ABS_X + i) - diff);
}
#endif
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 0e53b3bc39a..f32404f9918 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -567,8 +567,6 @@ static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
clk_put(keypad->clk);
input_unregister_device(keypad->input_dev);
- input_free_device(keypad->input_dev);
-
iounmap(keypad->mmio_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
new file mode 100644
index 00000000000..ab7610ca10e
--- /dev/null
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/mfd/stmpe.h>
+
+/* These are at the same addresses in all STMPE variants */
+#define STMPE_KPC_COL 0x60
+#define STMPE_KPC_ROW_MSB 0x61
+#define STMPE_KPC_ROW_LSB 0x62
+#define STMPE_KPC_CTRL_MSB 0x63
+#define STMPE_KPC_CTRL_LSB 0x64
+#define STMPE_KPC_COMBI_KEY_0 0x65
+#define STMPE_KPC_COMBI_KEY_1 0x66
+#define STMPE_KPC_COMBI_KEY_2 0x67
+#define STMPE_KPC_DATA_BYTE0 0x68
+#define STMPE_KPC_DATA_BYTE1 0x69
+#define STMPE_KPC_DATA_BYTE2 0x6a
+#define STMPE_KPC_DATA_BYTE3 0x6b
+#define STMPE_KPC_DATA_BYTE4 0x6c
+
+#define STMPE_KPC_CTRL_LSB_SCAN (0x1 << 0)
+#define STMPE_KPC_CTRL_LSB_DEBOUNCE (0x7f << 1)
+#define STMPE_KPC_CTRL_MSB_SCAN_COUNT (0xf << 4)
+
+#define STMPE_KPC_ROW_MSB_ROWS 0xff
+
+#define STMPE_KPC_DATA_UP (0x1 << 7)
+#define STMPE_KPC_DATA_ROW (0xf << 3)
+#define STMPE_KPC_DATA_COL (0x7 << 0)
+#define STMPE_KPC_DATA_NOKEY_MASK 0x78
+
+#define STMPE_KEYPAD_MAX_DEBOUNCE 127
+#define STMPE_KEYPAD_MAX_SCAN_COUNT 15
+
+#define STMPE_KEYPAD_MAX_ROWS 8
+#define STMPE_KEYPAD_MAX_COLS 8
+#define STMPE_KEYPAD_ROW_SHIFT 3
+#define STMPE_KEYPAD_KEYMAP_SIZE \
+ (STMPE_KEYPAD_MAX_ROWS * STMPE_KEYPAD_MAX_COLS)
+
+/**
+ * struct stmpe_keypad_variant - model-specific attributes
+ * @auto_increment: whether the KPC_DATA_BYTE register address
+ * auto-increments on multiple read
+ * @num_data: number of data bytes
+ * @num_normal_data: number of normal keys' data bytes
+ * @max_cols: maximum number of columns supported
+ * @max_rows: maximum number of rows supported
+ * @col_gpios: bitmask of gpios which can be used for columns
+ * @row_gpios: bitmask of gpios which can be used for rows
+ */
+struct stmpe_keypad_variant {
+ bool auto_increment;
+ int num_data;
+ int num_normal_data;
+ int max_cols;
+ int max_rows;
+ unsigned int col_gpios;
+ unsigned int row_gpios;
+};
+
+static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
+ [STMPE1601] = {
+ .auto_increment = true,
+ .num_data = 5,
+ .num_normal_data = 3,
+ .max_cols = 8,
+ .max_rows = 8,
+ .col_gpios = 0x000ff, /* GPIO 0 - 7 */
+ .row_gpios = 0x0ff00, /* GPIO 8 - 15 */
+ },
+ [STMPE2401] = {
+ .auto_increment = false,
+ .num_data = 3,
+ .num_normal_data = 2,
+ .max_cols = 8,
+ .max_rows = 12,
+ .col_gpios = 0x0000ff, /* GPIO 0 - 7*/
+ .row_gpios = 0x1fef00, /* GPIO 8-14, 16-20 */
+ },
+ [STMPE2403] = {
+ .auto_increment = true,
+ .num_data = 5,
+ .num_normal_data = 3,
+ .max_cols = 8,
+ .max_rows = 12,
+ .col_gpios = 0x0000ff, /* GPIO 0 - 7*/
+ .row_gpios = 0x1fef00, /* GPIO 8-14, 16-20 */
+ },
+};
+
+struct stmpe_keypad {
+ struct stmpe *stmpe;
+ struct input_dev *input;
+ const struct stmpe_keypad_variant *variant;
+ const struct stmpe_keypad_platform_data *plat;
+
+ unsigned int rows;
+ unsigned int cols;
+
+ unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE];
+};
+
+static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data)
+{
+ const struct stmpe_keypad_variant *variant = keypad->variant;
+ struct stmpe *stmpe = keypad->stmpe;
+ int ret;
+ int i;
+
+ if (variant->auto_increment)
+ return stmpe_block_read(stmpe, STMPE_KPC_DATA_BYTE0,
+ variant->num_data, data);
+
+ for (i = 0; i < variant->num_data; i++) {
+ ret = stmpe_reg_read(stmpe, STMPE_KPC_DATA_BYTE0 + i);
+ if (ret < 0)
+ return ret;
+
+ data[i] = ret;
+ }
+
+ return 0;
+}
+
+static irqreturn_t stmpe_keypad_irq(int irq, void *dev)
+{
+ struct stmpe_keypad *keypad = dev;
+ struct input_dev *input = keypad->input;
+ const struct stmpe_keypad_variant *variant = keypad->variant;
+ u8 fifo[variant->num_data];
+ int ret;
+ int i;
+
+ ret = stmpe_keypad_read_data(keypad, fifo);
+ if (ret < 0)
+ return IRQ_NONE;
+
+ for (i = 0; i < variant->num_normal_data; i++) {
+ u8 data = fifo[i];
+ int row = (data & STMPE_KPC_DATA_ROW) >> 3;
+ int col = data & STMPE_KPC_DATA_COL;
+ int code = MATRIX_SCAN_CODE(row, col, STMPE_KEYPAD_ROW_SHIFT);
+ bool up = data & STMPE_KPC_DATA_UP;
+
+ if ((data & STMPE_KPC_DATA_NOKEY_MASK)
+ == STMPE_KPC_DATA_NOKEY_MASK)
+ continue;
+
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code], !up);
+ input_sync(input);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
+{
+ const struct stmpe_keypad_variant *variant = keypad->variant;
+ unsigned int col_gpios = variant->col_gpios;
+ unsigned int row_gpios = variant->row_gpios;
+ struct stmpe *stmpe = keypad->stmpe;
+ unsigned int pins = 0;
+ int i;
+
+ /*
+ * Figure out which pins need to be set to the keypad alternate
+ * function.
+ *
+ * {cols,rows}_gpios are bitmasks of which pins on the chip can be used
+ * for the keypad.
+ *
+ * keypad->{cols,rows} are a bitmask of which pins (of the ones useable
+ * for the keypad) are used on the board.
+ */
+
+ for (i = 0; i < variant->max_cols; i++) {
+ int num = __ffs(col_gpios);
+
+ if (keypad->cols & (1 << i))
+ pins |= 1 << num;
+
+ col_gpios &= ~(1 << num);
+ }
+
+ for (i = 0; i < variant->max_rows; i++) {
+ int num = __ffs(row_gpios);
+
+ if (keypad->rows & (1 << i))
+ pins |= 1 << num;
+
+ row_gpios &= ~(1 << num);
+ }
+
+ return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
+}
+
+static int __devinit stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
+{
+ const struct stmpe_keypad_platform_data *plat = keypad->plat;
+ const struct stmpe_keypad_variant *variant = keypad->variant;
+ struct stmpe *stmpe = keypad->stmpe;
+ int ret;
+
+ if (plat->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
+ return -EINVAL;
+
+ if (plat->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
+ return -EINVAL;
+
+ ret = stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
+ if (ret < 0)
+ return ret;
+
+ ret = stmpe_keypad_altfunc_init(keypad);
+ if (ret < 0)
+ return ret;
+
+ ret = stmpe_reg_write(stmpe, STMPE_KPC_COL, keypad->cols);
+ if (ret < 0)
+ return ret;
+
+ ret = stmpe_reg_write(stmpe, STMPE_KPC_ROW_LSB, keypad->rows);
+ if (ret < 0)
+ return ret;
+
+ if (variant->max_rows > 8) {
+ ret = stmpe_set_bits(stmpe, STMPE_KPC_ROW_MSB,
+ STMPE_KPC_ROW_MSB_ROWS,
+ keypad->rows >> 8);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = stmpe_set_bits(stmpe, STMPE_KPC_CTRL_MSB,
+ STMPE_KPC_CTRL_MSB_SCAN_COUNT,
+ plat->scan_count << 4);
+ if (ret < 0)
+ return ret;
+
+ return stmpe_set_bits(stmpe, STMPE_KPC_CTRL_LSB,
+ STMPE_KPC_CTRL_LSB_SCAN |
+ STMPE_KPC_CTRL_LSB_DEBOUNCE,
+ STMPE_KPC_CTRL_LSB_SCAN |
+ (plat->debounce_ms << 1));
+}
+
+static int __devinit stmpe_keypad_probe(struct platform_device *pdev)
+{
+ struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ struct stmpe_keypad_platform_data *plat;
+ struct stmpe_keypad *keypad;
+ struct input_dev *input;
+ int ret;
+ int irq;
+ int i;
+
+ plat = stmpe->pdata->keypad;
+ if (!plat)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ keypad = kzalloc(sizeof(struct stmpe_keypad), GFP_KERNEL);
+ if (!keypad)
+ return -ENOMEM;
+
+ input = input_allocate_device();
+ if (!input) {
+ ret = -ENOMEM;
+ goto out_freekeypad;
+ }
+
+ input->name = "STMPE keypad";
+ input->id.bustype = BUS_I2C;
+ input->dev.parent = &pdev->dev;
+
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+
+ __set_bit(EV_KEY, input->evbit);
+ if (!plat->no_autorepeat)
+ __set_bit(EV_REP, input->evbit);
+
+ input->keycode = keypad->keymap;
+ input->keycodesize = sizeof(keypad->keymap[0]);
+ input->keycodemax = ARRAY_SIZE(keypad->keymap);
+
+ matrix_keypad_build_keymap(plat->keymap_data, STMPE_KEYPAD_ROW_SHIFT,
+ input->keycode, input->keybit);
+
+ for (i = 0; i < plat->keymap_data->keymap_size; i++) {
+ unsigned int key = plat->keymap_data->keymap[i];
+
+ keypad->cols |= 1 << KEY_COL(key);
+ keypad->rows |= 1 << KEY_ROW(key);
+ }
+
+ keypad->stmpe = stmpe;
+ keypad->plat = plat;
+ keypad->input = input;
+ keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
+
+ ret = stmpe_keypad_chip_init(keypad);
+ if (ret < 0)
+ goto out_freeinput;
+
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to register input device: %d\n", ret);
+ goto out_freeinput;
+ }
+
+ ret = request_threaded_irq(irq, NULL, stmpe_keypad_irq, IRQF_ONESHOT,
+ "stmpe-keypad", keypad);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
+ goto out_unregisterinput;
+ }
+
+ platform_set_drvdata(pdev, keypad);
+
+ return 0;
+
+out_unregisterinput:
+ input_unregister_device(input);
+ input = NULL;
+out_freeinput:
+ input_free_device(input);
+out_freekeypad:
+ kfree(keypad);
+ return ret;
+}
+
+static int __devexit stmpe_keypad_remove(struct platform_device *pdev)
+{
+ struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
+ struct stmpe *stmpe = keypad->stmpe;
+ int irq = platform_get_irq(pdev, 0);
+
+ stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD);
+
+ free_irq(irq, keypad);
+ input_unregister_device(keypad->input);
+ platform_set_drvdata(pdev, NULL);
+ kfree(keypad);
+
+ return 0;
+}
+
+static struct platform_driver stmpe_keypad_driver = {
+ .driver.name = "stmpe-keypad",
+ .driver.owner = THIS_MODULE,
+ .probe = stmpe_keypad_probe,
+ .remove = __devexit_p(stmpe_keypad_remove),
+};
+
+static int __init stmpe_keypad_init(void)
+{
+ return platform_driver_register(&stmpe_keypad_driver);
+}
+module_init(stmpe_keypad_init);
+
+static void __exit stmpe_keypad_exit(void)
+{
+ platform_driver_unregister(&stmpe_keypad_driver);
+}
+module_exit(stmpe_keypad_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("STMPExxxx keypad driver");
+MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index e2ca0170808..de5900d5078 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -724,7 +724,6 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
pdata = &ac->pdata;
ac->input = input_dev;
- ac->disabled = true;
ac->dev = dev;
ac->irq = irq;
ac->bops = bops;
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index e148749b585..23257652b8e 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -38,7 +38,8 @@ enum {
};
static int ati_remote2_set_mask(const char *val,
- struct kernel_param *kp, unsigned int max)
+ const struct kernel_param *kp,
+ unsigned int max)
{
unsigned long mask;
int ret;
@@ -59,28 +60,31 @@ static int ati_remote2_set_mask(const char *val,
}
static int ati_remote2_set_channel_mask(const char *val,
- struct kernel_param *kp)
+ const struct kernel_param *kp)
{
pr_debug("%s()\n", __func__);
return ati_remote2_set_mask(val, kp, ATI_REMOTE2_MAX_CHANNEL_MASK);
}
-static int ati_remote2_get_channel_mask(char *buffer, struct kernel_param *kp)
+static int ati_remote2_get_channel_mask(char *buffer,
+ const struct kernel_param *kp)
{
pr_debug("%s()\n", __func__);
return sprintf(buffer, "0x%04x", *(unsigned int *)kp->arg);
}
-static int ati_remote2_set_mode_mask(const char *val, struct kernel_param *kp)
+static int ati_remote2_set_mode_mask(const char *val,
+ const struct kernel_param *kp)
{
pr_debug("%s()\n", __func__);
return ati_remote2_set_mask(val, kp, ATI_REMOTE2_MAX_MODE_MASK);
}
-static int ati_remote2_get_mode_mask(char *buffer, struct kernel_param *kp)
+static int ati_remote2_get_mode_mask(char *buffer,
+ const struct kernel_param *kp)
{
pr_debug("%s()\n", __func__);
@@ -89,15 +93,19 @@ static int ati_remote2_get_mode_mask(char *buffer, struct kernel_param *kp)
static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK;
#define param_check_channel_mask(name, p) __param_check(name, p, unsigned int)
-#define param_set_channel_mask ati_remote2_set_channel_mask
-#define param_get_channel_mask ati_remote2_get_channel_mask
+static struct kernel_param_ops param_ops_channel_mask = {
+ .set = ati_remote2_set_channel_mask,
+ .get = ati_remote2_get_channel_mask,
+};
module_param(channel_mask, channel_mask, 0644);
MODULE_PARM_DESC(channel_mask, "Bitmask of channels to accept <15:Channel16>...<1:Channel2><0:Channel1>");
static unsigned int mode_mask = ATI_REMOTE2_MAX_MODE_MASK;
#define param_check_mode_mask(name, p) __param_check(name, p, unsigned int)
-#define param_set_mode_mask ati_remote2_set_mode_mask
-#define param_get_mode_mask ati_remote2_get_mode_mask
+static struct kernel_param_ops param_ops_mode_mask = {
+ .set = ati_remote2_set_mode_mask,
+ .get = ati_remote2_get_mode_mask,
+};
module_param(mode_mask, mode_mask, 0644);
MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>");
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 9946d73624b..9dfd6e5f786 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -115,7 +115,8 @@ static int __devinit ixp4xx_spkr_probe(struct platform_device *dev)
input_dev->event = ixp4xx_spkr_event;
err = request_irq(IRQ_IXP4XX_TIMER2, &ixp4xx_spkr_interrupt,
- IRQF_DISABLED | IRQF_TIMER, "ixp4xx-beeper", (void *) dev->id);
+ IRQF_DISABLED | IRQF_NO_SUSPEND, "ixp4xx-beeper",
+ (void *) dev->id);
if (err)
goto err_free_device;
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 1dacae4b43f..8e130bf7d32 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -173,7 +173,7 @@ static int __devinit sparcspkr_probe(struct device *dev)
return 0;
}
-static int sparcspkr_shutdown(struct of_device *dev)
+static int sparcspkr_shutdown(struct platform_device *dev)
{
struct sparcspkr_state *state = dev_get_drvdata(&dev->dev);
struct input_dev *input_dev = state->input_dev;
@@ -184,7 +184,7 @@ static int sparcspkr_shutdown(struct of_device *dev)
return 0;
}
-static int __devinit bbc_beep_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit bbc_beep_probe(struct platform_device *op, const struct of_device_id *match)
{
struct sparcspkr_state *state;
struct bbc_beep_info *info;
@@ -231,7 +231,7 @@ out_err:
return err;
}
-static int __devexit bbc_remove(struct of_device *op)
+static int __devexit bbc_remove(struct platform_device *op)
{
struct sparcspkr_state *state = dev_get_drvdata(&op->dev);
struct input_dev *input_dev = state->input_dev;
@@ -269,7 +269,7 @@ static struct of_platform_driver bbc_beep_driver = {
.shutdown = sparcspkr_shutdown,
};
-static int __devinit grover_beep_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit grover_beep_probe(struct platform_device *op, const struct of_device_id *match)
{
struct sparcspkr_state *state;
struct grover_beep_info *info;
@@ -312,7 +312,7 @@ out_err:
return err;
}
-static int __devexit grover_remove(struct of_device *op)
+static int __devexit grover_remove(struct platform_device *op)
{
struct sparcspkr_state *state = dev_get_drvdata(&op->dev);
struct grover_beep_info *info = &state->u.grover;
@@ -353,14 +353,12 @@ static struct of_platform_driver grover_beep_driver = {
static int __init sparcspkr_init(void)
{
- int err = of_register_driver(&bbc_beep_driver,
- &of_platform_bus_type);
+ int err = of_register_platform_driver(&bbc_beep_driver);
if (!err) {
- err = of_register_driver(&grover_beep_driver,
- &of_platform_bus_type);
+ err = of_register_platform_driver(&grover_beep_driver);
if (err)
- of_unregister_driver(&bbc_beep_driver);
+ of_unregister_platform_driver(&bbc_beep_driver);
}
return err;
@@ -368,8 +366,8 @@ static int __init sparcspkr_init(void)
static void __exit sparcspkr_exit(void)
{
- of_unregister_driver(&bbc_beep_driver);
- of_unregister_driver(&grover_beep_driver);
+ of_unregister_platform_driver(&bbc_beep_driver);
+ of_unregister_platform_driver(&grover_beep_driver);
}
module_init(sparcspkr_init);
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index b71eb55f2db..0d4266a533a 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -304,21 +304,25 @@ static int uinput_validate_absbits(struct input_dev *dev)
if (!test_bit(cnt, dev->absbit))
continue;
- if ((dev->absmax[cnt] <= dev->absmin[cnt])) {
+ if (input_abs_get_max(dev, cnt) <= input_abs_get_min(dev, cnt)) {
printk(KERN_DEBUG
"%s: invalid abs[%02x] min:%d max:%d\n",
UINPUT_NAME, cnt,
- dev->absmin[cnt], dev->absmax[cnt]);
+ input_abs_get_min(dev, cnt),
+ input_abs_get_max(dev, cnt));
retval = -EINVAL;
break;
}
- if (dev->absflat[cnt] > (dev->absmax[cnt] - dev->absmin[cnt])) {
+ if (input_abs_get_flat(dev, cnt) >
+ input_abs_get_max(dev, cnt) - input_abs_get_min(dev, cnt)) {
printk(KERN_DEBUG
- "%s: absflat[%02x] out of range: %d "
+ "%s: abs_flat #%02x out of range: %d "
"(min:%d/max:%d)\n",
- UINPUT_NAME, cnt, dev->absflat[cnt],
- dev->absmin[cnt], dev->absmax[cnt]);
+ UINPUT_NAME, cnt,
+ input_abs_get_flat(dev, cnt),
+ input_abs_get_min(dev, cnt),
+ input_abs_get_max(dev, cnt));
retval = -EINVAL;
break;
}
@@ -343,7 +347,7 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
struct uinput_user_dev *user_dev;
struct input_dev *dev;
char *name;
- int size;
+ int i, size;
int retval;
if (count != sizeof(struct uinput_user_dev))
@@ -387,11 +391,12 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
dev->id.product = user_dev->id.product;
dev->id.version = user_dev->id.version;
- size = sizeof(int) * ABS_CNT;
- memcpy(dev->absmax, user_dev->absmax, size);
- memcpy(dev->absmin, user_dev->absmin, size);
- memcpy(dev->absfuzz, user_dev->absfuzz, size);
- memcpy(dev->absflat, user_dev->absflat, size);
+ for (i = 0; i < ABS_CNT; i++) {
+ input_abs_set_max(dev, i, user_dev->absmax[i]);
+ input_abs_set_min(dev, i, user_dev->absmin[i]);
+ input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
+ input_abs_set_flat(dev, i, user_dev->absflat[i]);
+ }
/* check if absmin/absmax/absfuzz/absflat are filled as
* told in Documentation/input/input-programming.txt */
@@ -806,6 +811,8 @@ static struct miscdevice uinput_misc = {
.minor = UINPUT_MINOR,
.name = UINPUT_NAME,
};
+MODULE_ALIAS_MISCDEV(UINPUT_MINOR);
+MODULE_ALIAS("devname:" UINPUT_NAME);
static int __init uinput_init(void)
{
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 05edd75abca..a9cf7683163 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -205,8 +205,8 @@ struct atp {
bool overflow_warned;
int x_old; /* last reported x/y, */
int y_old; /* used for smoothing */
- u8 xy_cur[ATP_XSENSORS + ATP_YSENSORS];
- u8 xy_old[ATP_XSENSORS + ATP_YSENSORS];
+ signed char xy_cur[ATP_XSENSORS + ATP_YSENSORS];
+ signed char xy_old[ATP_XSENSORS + ATP_YSENSORS];
int xy_acc[ATP_XSENSORS + ATP_YSENSORS];
int idlecount; /* number of empty packets */
struct work_struct work;
@@ -531,7 +531,7 @@ static void atp_complete_geyser_1_2(struct urb *urb)
for (i = 0; i < ATP_XSENSORS + ATP_YSENSORS; i++) {
/* accumulate the change */
- int change = dev->xy_old[i] - dev->xy_cur[i];
+ signed char change = dev->xy_old[i] - dev->xy_cur[i];
dev->xy_acc[i] -= change;
/* prevent down drifting */
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index ea67c49146a..b9523176391 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input,
const struct bcm5974_config *cfg,
const struct tp_finger *f)
{
- input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
- input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
- input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
- input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+ raw2int(f->force_major) << 1);
+ input_report_abs(input, ABS_MT_TOUCH_MINOR,
+ raw2int(f->force_minor) << 1);
+ input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+ raw2int(f->size_major) << 1);
+ input_report_abs(input, ABS_MT_WIDTH_MINOR,
+ raw2int(f->size_minor) << 1);
input_report_abs(input, ABS_MT_ORIENTATION,
MAX_FINGER_ORIENTATION - raw2int(f->orientation));
input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index b18862b2a70..48311204ba5 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -185,7 +185,6 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
int fingers;
- static int old_fingers;
if (etd->fw_version < 0x020000) {
/*
@@ -203,10 +202,13 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
}
if (etd->jumpy_cursor) {
- /* Discard packets that are likely to have bogus coordinates */
- if (fingers > old_fingers) {
+ if (fingers != 1) {
+ etd->single_finger_reports = 0;
+ } else if (etd->single_finger_reports < 2) {
+ /* Discard first 2 reports of one finger, bogus */
+ etd->single_finger_reports++;
elantech_debug("discarding packet\n");
- goto discard_packet_v1;
+ return;
}
}
@@ -238,9 +240,6 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
}
input_sync(dev);
-
- discard_packet_v1:
- old_fingers = fingers;
}
/*
@@ -258,6 +257,14 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
input_report_key(dev, BTN_TOUCH, fingers != 0);
switch (fingers) {
+ case 3:
+ /*
+ * Same as one finger, except report of more than 3 fingers:
+ * byte 3: n4 . w1 w0 . . . .
+ */
+ if (packet[3] & 0x80)
+ fingers = 4;
+ /* pass through... */
case 1:
/*
* byte 1: . . . . . x10 x9 x8
@@ -310,6 +317,7 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
+ input_report_key(dev, BTN_TOOL_QUADTAP, fingers == 4);
input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
@@ -467,6 +475,7 @@ static void elantech_set_input_params(struct psmouse *psmouse)
break;
case 2:
+ __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
input_set_abs_params(dev, ABS_X, ETP_XMIN_V2, ETP_XMAX_V2, 0, 0);
input_set_abs_params(dev, ABS_Y, ETP_YMIN_V2, ETP_YMAX_V2, 0, 0);
input_set_abs_params(dev, ABS_HAT0X, ETP_2FT_XMIN, ETP_2FT_XMAX, 0, 0);
@@ -733,13 +742,13 @@ int elantech_init(struct psmouse *psmouse)
etd->capabilities = param[0];
/*
- * This firmware seems to suffer from misreporting coordinates when
+ * This firmware suffers from misreporting coordinates when
* a touch action starts causing the mouse cursor or scrolled page
* to jump. Enable a workaround.
*/
- if (etd->fw_version == 0x020022) {
- pr_info("firmware version 2.0.34 detected, enabling jumpy cursor workaround\n");
- etd->jumpy_cursor = 1;
+ if (etd->fw_version == 0x020022 || etd->fw_version == 0x020600) {
+ pr_info("firmware version 2.0.34/2.6.0 detected, enabling jumpy cursor workaround\n");
+ etd->jumpy_cursor = true;
}
if (elantech_set_absolute_mode(psmouse)) {
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index ac57bde1bb9..aa4aac5d219 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -100,10 +100,11 @@ struct elantech_data {
unsigned char reg_26;
unsigned char debug;
unsigned char capabilities;
- unsigned char paritycheck;
- unsigned char jumpy_cursor;
+ bool paritycheck;
+ bool jumpy_cursor;
unsigned char hw_version;
- unsigned int fw_version;
+ unsigned int fw_version;
+ unsigned int single_finger_reports;
unsigned char parity[256];
};
diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c
index 3941f97cfa6..7b02b652e26 100644
--- a/drivers/input/mouse/pc110pad.c
+++ b/drivers/input/mouse/pc110pad.c
@@ -145,8 +145,8 @@ static int __init pc110pad_init(void)
pc110pad_dev->absbit[0] = BIT_MASK(ABS_X) | BIT_MASK(ABS_Y);
pc110pad_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
- pc110pad_dev->absmax[ABS_X] = 0x1ff;
- pc110pad_dev->absmax[ABS_Y] = 0x0ff;
+ input_abs_set_max(pc110pad_dev, ABS_X, 0x1ff);
+ input_abs_set_max(pc110pad_dev, ABS_Y, 0x0ff);
pc110pad_dev->open = pc110pad_open;
pc110pad_dev->close = pc110pad_close;
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 979c5021528..73a7af2542a 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -39,11 +39,13 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static unsigned int psmouse_max_proto = PSMOUSE_AUTO;
-static int psmouse_set_maxproto(const char *val, struct kernel_param *kp);
-static int psmouse_get_maxproto(char *buffer, struct kernel_param *kp);
+static int psmouse_set_maxproto(const char *val, const struct kernel_param *);
+static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp);
+static struct kernel_param_ops param_ops_proto_abbrev = {
+ .set = psmouse_set_maxproto,
+ .get = psmouse_get_maxproto,
+};
#define param_check_proto_abbrev(name, p) __param_check(name, p, unsigned int)
-#define param_set_proto_abbrev psmouse_set_maxproto
-#define param_get_proto_abbrev psmouse_get_maxproto
module_param_named(proto, psmouse_max_proto, proto_abbrev, 0644);
MODULE_PARM_DESC(proto, "Highest protocol extension to probe (bare, imps, exps, any). Useful for KVM switches.");
@@ -1679,7 +1681,7 @@ static ssize_t psmouse_attr_set_resolution(struct psmouse *psmouse, void *data,
}
-static int psmouse_set_maxproto(const char *val, struct kernel_param *kp)
+static int psmouse_set_maxproto(const char *val, const struct kernel_param *kp)
{
const struct psmouse_protocol *proto;
@@ -1696,7 +1698,7 @@ static int psmouse_set_maxproto(const char *val, struct kernel_param *kp)
return 0;
}
-static int psmouse_get_maxproto(char *buffer, struct kernel_param *kp)
+static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
{
int type = *((unsigned int *)kp->arg);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 8c324403b9f..96b70a43515 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -635,8 +635,8 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
__clear_bit(REL_X, dev->relbit);
__clear_bit(REL_Y, dev->relbit);
- dev->absres[ABS_X] = priv->x_res;
- dev->absres[ABS_Y] = priv->y_res;
+ input_abs_set_res(dev, ABS_X, priv->x_res);
+ input_abs_set_res(dev, ABS_Y, priv->y_res);
if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
/* Clickpads report only left button */
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index d8f68f77007..d528a2dba06 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -22,6 +22,7 @@
#include <linux/random.h>
#include <linux/major.h>
#include <linux/device.h>
+#include <linux/kernel.h>
#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
#include <linux/miscdevice.h>
#endif
@@ -134,11 +135,14 @@ static void mousedev_touchpad_event(struct input_dev *dev,
switch (code) {
case ABS_X:
+
fx(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
- size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
+ size = input_abs_get_max(dev, ABS_X) -
+ input_abs_get_min(dev, ABS_X);
if (size == 0)
size = 256 * 2;
+
tmp = ((value - fx(2)) * 256 * FRACTION_DENOM) / size;
tmp += mousedev->frac_dx;
mousedev->packet.dx = tmp / FRACTION_DENOM;
@@ -150,10 +154,12 @@ static void mousedev_touchpad_event(struct input_dev *dev,
case ABS_Y:
fy(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
- /* use X size to keep the same scale */
- size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
+ /* use X size for ABS_Y to keep the same scale */
+ size = input_abs_get_max(dev, ABS_X) -
+ input_abs_get_min(dev, ABS_X);
if (size == 0)
size = 256 * 2;
+
tmp = -((value - fy(2)) * 256 * FRACTION_DENOM) / size;
tmp += mousedev->frac_dy;
mousedev->packet.dy = tmp / FRACTION_DENOM;
@@ -167,33 +173,35 @@ static void mousedev_touchpad_event(struct input_dev *dev,
static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
unsigned int code, int value)
{
- int size;
+ int min, max, size;
switch (code) {
case ABS_X:
- size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
+ min = input_abs_get_min(dev, ABS_X);
+ max = input_abs_get_max(dev, ABS_X);
+
+ size = max - min;
if (size == 0)
size = xres ? : 1;
- if (value > dev->absmax[ABS_X])
- value = dev->absmax[ABS_X];
- if (value < dev->absmin[ABS_X])
- value = dev->absmin[ABS_X];
- mousedev->packet.x =
- ((value - dev->absmin[ABS_X]) * xres) / size;
+
+ clamp(value, min, max);
+
+ mousedev->packet.x = ((value - min) * xres) / size;
mousedev->packet.abs_event = 1;
break;
case ABS_Y:
- size = dev->absmax[ABS_Y] - dev->absmin[ABS_Y];
+ min = input_abs_get_min(dev, ABS_Y);
+ max = input_abs_get_max(dev, ABS_Y);
+
+ size = max - min;
if (size == 0)
size = yres ? : 1;
- if (value > dev->absmax[ABS_Y])
- value = dev->absmax[ABS_Y];
- if (value < dev->absmin[ABS_Y])
- value = dev->absmin[ABS_Y];
- mousedev->packet.y = yres -
- ((value - dev->absmin[ABS_Y]) * yres) / size;
+
+ clamp(value, min, max);
+
+ mousedev->packet.y = yres - ((value - min) * yres) / size;
mousedev->packet.abs_event = 1;
break;
}
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index 847f4aad7ed..5d48bb66aa7 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -27,6 +27,11 @@
#include <asm/irq.h>
#elif defined(CONFIG_SH_CAYMAN)
#include <asm/irq.h>
+#elif defined(CONFIG_PPC)
+extern int of_i8042_kbd_irq;
+extern int of_i8042_aux_irq;
+# define I8042_KBD_IRQ of_i8042_kbd_irq
+# define I8042_AUX_IRQ of_i8042_aux_irq
#else
# define I8042_KBD_IRQ 1
# define I8042_AUX_IRQ 12
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 04e32f2d124..c5cc4508d6d 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -49,7 +49,7 @@ static inline void i8042_write_command(int val)
#define OBP_PS2MS_NAME1 "kdmouse"
#define OBP_PS2MS_NAME2 "mouse"
-static int __devinit sparc_i8042_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit sparc_i8042_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
@@ -57,20 +57,20 @@ static int __devinit sparc_i8042_probe(struct of_device *op, const struct of_dev
while (dp) {
if (!strcmp(dp->name, OBP_PS2KBD_NAME1) ||
!strcmp(dp->name, OBP_PS2KBD_NAME2)) {
- struct of_device *kbd = of_find_device_by_node(dp);
- unsigned int irq = kbd->irqs[0];
+ struct platform_device *kbd = of_find_device_by_node(dp);
+ unsigned int irq = kbd->archdata.irqs[0];
if (irq == 0xffffffff)
- irq = op->irqs[0];
+ irq = op->archdata.irqs[0];
i8042_kbd_irq = irq;
kbd_iobase = of_ioremap(&kbd->resource[0],
0, 8, "kbd");
kbd_res = &kbd->resource[0];
} else if (!strcmp(dp->name, OBP_PS2MS_NAME1) ||
!strcmp(dp->name, OBP_PS2MS_NAME2)) {
- struct of_device *ms = of_find_device_by_node(dp);
- unsigned int irq = ms->irqs[0];
+ struct platform_device *ms = of_find_device_by_node(dp);
+ unsigned int irq = ms->archdata.irqs[0];
if (irq == 0xffffffff)
- irq = op->irqs[0];
+ irq = op->archdata.irqs[0];
i8042_aux_irq = irq;
}
@@ -80,7 +80,7 @@ static int __devinit sparc_i8042_probe(struct of_device *op, const struct of_dev
return 0;
}
-static int __devexit sparc_i8042_remove(struct of_device *op)
+static int __devexit sparc_i8042_remove(struct platform_device *op)
{
of_iounmap(kbd_res, kbd_iobase, 8);
@@ -116,8 +116,7 @@ static int __init i8042_platform_init(void)
if (!kbd_iobase)
return -ENODEV;
} else {
- int err = of_register_driver(&sparc_i8042_driver,
- &of_bus_type);
+ int err = of_register_platform_driver(&sparc_i8042_driver);
if (err)
return err;
@@ -141,7 +140,7 @@ static inline void i8042_platform_exit(void)
struct device_node *root = of_find_node_by_path("/");
if (strcmp(root->name, "SUNW,JavaStation-1"))
- of_unregister_driver(&sparc_i8042_driver);
+ of_unregister_platform_driver(&sparc_i8042_driver);
}
#else /* !CONFIG_PCI */
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 258b98b9d7c..f5851316048 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -61,10 +61,6 @@ static bool i8042_noloop;
module_param_named(noloop, i8042_noloop, bool, 0);
MODULE_PARM_DESC(noloop, "Disable the AUX Loopback command while probing for the AUX port");
-static unsigned int i8042_blink_frequency = 500;
-module_param_named(panicblink, i8042_blink_frequency, uint, 0600);
-MODULE_PARM_DESC(panicblink, "Frequency with which keyboard LEDs should blink when kernel panics");
-
#ifdef CONFIG_X86
static bool i8042_dritek;
module_param_named(dritek, i8042_dritek, bool, 0);
@@ -1030,8 +1026,8 @@ static void i8042_controller_reset(void)
/*
- * i8042_panic_blink() will flash the keyboard LEDs and is called when
- * kernel panics. Flashing LEDs is useful for users running X who may
+ * i8042_panic_blink() will turn the keyboard LEDs on or off and is called
+ * when kernel panics. Flashing LEDs is useful for users running X who may
* not see the console and will help distingushing panics from "real"
* lockups.
*
@@ -1041,22 +1037,12 @@ static void i8042_controller_reset(void)
#define DELAY do { mdelay(1); if (++delay > 10) return delay; } while(0)
-static long i8042_panic_blink(long count)
+static long i8042_panic_blink(int state)
{
long delay = 0;
- static long last_blink;
- static char led;
-
- /*
- * We expect frequency to be about 1/2s. KDB uses about 1s.
- * Make sure they are different.
- */
- if (!i8042_blink_frequency)
- return 0;
- if (count - last_blink < i8042_blink_frequency)
- return 0;
+ char led;
- led ^= 0x01 | 0x04;
+ led = (state) ? 0x01 | 0x04 : 0;
while (i8042_read_status() & I8042_STR_IBF)
DELAY;
dbg("%02x -> i8042 (panic blink)", 0xed);
@@ -1069,7 +1055,6 @@ static long i8042_panic_blink(long count)
dbg("%02x -> i8042 (panic blink)", led);
i8042_write_data(led);
DELAY;
- last_blink = count;
return delay;
}
@@ -1500,8 +1485,8 @@ static int __init i8042_init(void)
static void __exit i8042_exit(void)
{
- platform_driver_unregister(&i8042_driver);
platform_device_unregister(i8042_platform_device);
+ platform_driver_unregister(&i8042_driver);
i8042_platform_exit();
panic_blink = NULL;
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index e2c028d2638..bb14449fb02 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -232,7 +232,7 @@ static void sxps2_close(struct serio *pserio)
* It returns 0, if the driver is bound to the PS/2 device, or a negative
* value if there is an error.
*/
-static int __devinit xps2_of_probe(struct of_device *ofdev,
+static int __devinit xps2_of_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct resource r_irq; /* Interrupt resources */
@@ -332,7 +332,7 @@ failed1:
* if the driver module is being unloaded. It frees any resources allocated to
* the device.
*/
-static int __devexit xps2_of_remove(struct of_device *of_dev)
+static int __devexit xps2_of_remove(struct platform_device *of_dev)
{
struct device *dev = &of_dev->dev;
struct xps2data *drvdata = dev_get_drvdata(dev);
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 51b80b08d46..57b25b84d1f 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -987,20 +987,17 @@ static int aiptek_program_tablet(struct aiptek *aiptek)
/* Query getXextension */
if ((ret = aiptek_query(aiptek, 0x01, 0x00)) < 0)
return ret;
- aiptek->inputdev->absmin[ABS_X] = 0;
- aiptek->inputdev->absmax[ABS_X] = ret - 1;
+ input_set_abs_params(aiptek->inputdev, ABS_X, 0, ret - 1, 0, 0);
/* Query getYextension */
if ((ret = aiptek_query(aiptek, 0x01, 0x01)) < 0)
return ret;
- aiptek->inputdev->absmin[ABS_Y] = 0;
- aiptek->inputdev->absmax[ABS_Y] = ret - 1;
+ input_set_abs_params(aiptek->inputdev, ABS_Y, 0, ret - 1, 0, 0);
/* Query getPressureLevels */
if ((ret = aiptek_query(aiptek, 0x08, 0x00)) < 0)
return ret;
- aiptek->inputdev->absmin[ABS_PRESSURE] = 0;
- aiptek->inputdev->absmax[ABS_PRESSURE] = ret - 1;
+ input_set_abs_params(aiptek->inputdev, ABS_PRESSURE, 0, ret - 1, 0, 0);
/* Depending on whether we are in absolute or relative mode, we will
* do a switchToTablet(absolute) or switchToMouse(relative) command.
@@ -1054,8 +1051,8 @@ static ssize_t show_tabletSize(struct device *dev, struct device_attribute *attr
struct aiptek *aiptek = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%dx%d\n",
- aiptek->inputdev->absmax[ABS_X] + 1,
- aiptek->inputdev->absmax[ABS_Y] + 1);
+ input_abs_get_max(aiptek->inputdev, ABS_X) + 1,
+ input_abs_get_max(aiptek->inputdev, ABS_Y) + 1);
}
/* These structs define the sysfs files, param #1 is the name of the
@@ -1843,7 +1840,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
for (i = 0; i < ARRAY_SIZE(speeds); ++i) {
aiptek->curSetting.programmableDelay = speeds[i];
(void)aiptek_program_tablet(aiptek);
- if (aiptek->inputdev->absmax[ABS_X] > 0) {
+ if (input_abs_get_max(aiptek->inputdev, ABS_X) > 0) {
dev_info(&intf->dev,
"Aiptek using %d ms programming speed\n",
aiptek->curSetting.programmableDelay);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index ce0b4608dad..6e29badb969 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
if (features->type == WACOM_G4 ||
features->type == WACOM_MO) {
input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
- rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
+ rw = (data[7] & 0x04) - (data[7] & 0x03);
} else {
input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
- rw = -(signed)data[6];
+ rw = -(signed char)data[6];
}
input_report_rel(input, REL_WHEEL, rw);
}
@@ -687,10 +687,10 @@ static void wacom_tpc_finger_in(struct wacom_wac *wacom, char *data, int idx)
* protocol.
*/
if (wacom->last_finger != finger) {
- if (x == input->abs[ABS_X])
+ if (x == input_abs_get_val(input, ABS_X))
x++;
- if (y == input->abs[ABS_Y])
+ if (y == input_abs_get_val(input, ABS_Y))
y++;
}
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 61f35184f76..0069d9703fd 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -628,4 +628,14 @@ config TOUCHSCREEN_TPS6507X
To compile this driver as a module, choose M here: the
module will be called tps6507x_ts.
+config TOUCHSCREEN_STMPE
+ tristate "STMicroelectronics STMPE touchscreens"
+ depends on MFD_STMPE
+ help
+ Say Y here if you want support for STMicroelectronics
+ STMPE touchscreen controllers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called stmpe-ts.
+
endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index bd6f30b4ff7..28217e1dcaf 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o
obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
obj-$(CONFIG_TOUCHSCREEN_QT602240) += qt602240_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
+obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index 4eb7df0b7f8..5ec0946938f 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -75,7 +75,7 @@ static int cy8ctmg110_write_regs(struct cy8ctmg110 *tsc, unsigned char reg,
unsigned char len, unsigned char *value)
{
struct i2c_client *client = tsc->client;
- unsigned int ret;
+ int ret;
unsigned char i2c_data[6];
BUG_ON(len > 5);
@@ -86,7 +86,7 @@ static int cy8ctmg110_write_regs(struct cy8ctmg110 *tsc, unsigned char reg,
ret = i2c_master_send(client, i2c_data, len + 1);
if (ret != 1) {
dev_err(&client->dev, "i2c write data cmd failed\n");
- return ret;
+ return ret ? ret : -EIO;
}
return 0;
@@ -96,7 +96,7 @@ static int cy8ctmg110_read_regs(struct cy8ctmg110 *tsc,
unsigned char *data, unsigned char len, unsigned char cmd)
{
struct i2c_client *client = tsc->client;
- unsigned int ret;
+ int ret;
struct i2c_msg msg[2] = {
/* first write slave position to i2c devices */
{ client->addr, 0, 1, &cmd },
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
new file mode 100644
index 00000000000..656148ec002
--- /dev/null
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -0,0 +1,397 @@
+/* STMicroelectronics STMPE811 Touchscreen Driver
+ *
+ * (C) 2010 Luotao Fu <l.fu@pengutronix.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/stmpe.h>
+
+/* Register layouts and functionalities are identical on all stmpexxx variants
+ * with touchscreen controller
+ */
+#define STMPE_REG_INT_STA 0x0B
+#define STMPE_REG_ADC_CTRL1 0x20
+#define STMPE_REG_ADC_CTRL2 0x21
+#define STMPE_REG_TSC_CTRL 0x40
+#define STMPE_REG_TSC_CFG 0x41
+#define STMPE_REG_FIFO_TH 0x4A
+#define STMPE_REG_FIFO_STA 0x4B
+#define STMPE_REG_FIFO_SIZE 0x4C
+#define STMPE_REG_TSC_DATA_XYZ 0x52
+#define STMPE_REG_TSC_FRACTION_Z 0x56
+#define STMPE_REG_TSC_I_DRIVE 0x58
+
+#define OP_MOD_XYZ 0
+
+#define STMPE_TSC_CTRL_TSC_EN (1<<0)
+
+#define STMPE_FIFO_STA_RESET (1<<0)
+
+#define STMPE_IRQ_TOUCH_DET 0
+
+#define SAMPLE_TIME(x) ((x & 0xf) << 4)
+#define MOD_12B(x) ((x & 0x1) << 3)
+#define REF_SEL(x) ((x & 0x1) << 1)
+#define ADC_FREQ(x) (x & 0x3)
+#define AVE_CTRL(x) ((x & 0x3) << 6)
+#define DET_DELAY(x) ((x & 0x7) << 3)
+#define SETTLING(x) (x & 0x7)
+#define FRACTION_Z(x) (x & 0x7)
+#define I_DRIVE(x) (x & 0x1)
+#define OP_MODE(x) ((x & 0x7) << 1)
+
+#define STMPE_TS_NAME "stmpe-ts"
+#define XY_MASK 0xfff
+
+struct stmpe_touch {
+ struct stmpe *stmpe;
+ struct input_dev *idev;
+ struct delayed_work work;
+ struct device *dev;
+ u8 sample_time;
+ u8 mod_12b;
+ u8 ref_sel;
+ u8 adc_freq;
+ u8 ave_ctrl;
+ u8 touch_det_delay;
+ u8 settling;
+ u8 fraction_z;
+ u8 i_drive;
+};
+
+static int __stmpe_reset_fifo(struct stmpe *stmpe)
+{
+ int ret;
+
+ ret = stmpe_set_bits(stmpe, STMPE_REG_FIFO_STA,
+ STMPE_FIFO_STA_RESET, STMPE_FIFO_STA_RESET);
+ if (ret)
+ return ret;
+
+ return stmpe_set_bits(stmpe, STMPE_REG_FIFO_STA,
+ STMPE_FIFO_STA_RESET, 0);
+}
+
+static void stmpe_work(struct work_struct *work)
+{
+ int int_sta;
+ u32 timeout = 40;
+
+ struct stmpe_touch *ts =
+ container_of(work, struct stmpe_touch, work.work);
+
+ int_sta = stmpe_reg_read(ts->stmpe, STMPE_REG_INT_STA);
+
+ /*
+ * touch_det sometimes get desasserted or just get stuck. This appears
+ * to be a silicon bug, We still have to clearify this with the
+ * manufacture. As a workaround We release the key anyway if the
+ * touch_det keeps coming in after 4ms, while the FIFO contains no value
+ * during the whole time.
+ */
+ while ((int_sta & (1 << STMPE_IRQ_TOUCH_DET)) && (timeout > 0)) {
+ timeout--;
+ int_sta = stmpe_reg_read(ts->stmpe, STMPE_REG_INT_STA);
+ udelay(100);
+ }
+
+ /* reset the FIFO before we report release event */
+ __stmpe_reset_fifo(ts->stmpe);
+
+ input_report_abs(ts->idev, ABS_PRESSURE, 0);
+ input_sync(ts->idev);
+}
+
+static irqreturn_t stmpe_ts_handler(int irq, void *data)
+{
+ u8 data_set[4];
+ int x, y, z;
+ struct stmpe_touch *ts = data;
+
+ /*
+ * Cancel scheduled polling for release if we have new value
+ * available. Wait if the polling is already running.
+ */
+ cancel_delayed_work_sync(&ts->work);
+
+ /*
+ * The FIFO sometimes just crashes and stops generating interrupts. This
+ * appears to be a silicon bug. We still have to clearify this with
+ * the manufacture. As a workaround we disable the TSC while we are
+ * collecting data and flush the FIFO after reading
+ */
+ stmpe_set_bits(ts->stmpe, STMPE_REG_TSC_CTRL,
+ STMPE_TSC_CTRL_TSC_EN, 0);
+
+ stmpe_block_read(ts->stmpe, STMPE_REG_TSC_DATA_XYZ, 4, data_set);
+
+ x = (data_set[0] << 4) | (data_set[1] >> 4);
+ y = ((data_set[1] & 0xf) << 8) | data_set[2];
+ z = data_set[3];
+
+ input_report_abs(ts->idev, ABS_X, x);
+ input_report_abs(ts->idev, ABS_Y, y);
+ input_report_abs(ts->idev, ABS_PRESSURE, z);
+ input_sync(ts->idev);
+
+ /* flush the FIFO after we have read out our values. */
+ __stmpe_reset_fifo(ts->stmpe);
+
+ /* reenable the tsc */
+ stmpe_set_bits(ts->stmpe, STMPE_REG_TSC_CTRL,
+ STMPE_TSC_CTRL_TSC_EN, STMPE_TSC_CTRL_TSC_EN);
+
+ /* start polling for touch_det to detect release */
+ schedule_delayed_work(&ts->work, HZ / 50);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit stmpe_init_hw(struct stmpe_touch *ts)
+{
+ int ret;
+ u8 adc_ctrl1, adc_ctrl1_mask, tsc_cfg, tsc_cfg_mask;
+ struct stmpe *stmpe = ts->stmpe;
+ struct device *dev = ts->dev;
+
+ ret = stmpe_enable(stmpe, STMPE_BLOCK_TOUCHSCREEN | STMPE_BLOCK_ADC);
+ if (ret) {
+ dev_err(dev, "Could not enable clock for ADC and TS\n");
+ return ret;
+ }
+
+ adc_ctrl1 = SAMPLE_TIME(ts->sample_time) | MOD_12B(ts->mod_12b) |
+ REF_SEL(ts->ref_sel);
+ adc_ctrl1_mask = SAMPLE_TIME(0xff) | MOD_12B(0xff) | REF_SEL(0xff);
+
+ ret = stmpe_set_bits(stmpe, STMPE_REG_ADC_CTRL1,
+ adc_ctrl1_mask, adc_ctrl1);
+ if (ret) {
+ dev_err(dev, "Could not setup ADC\n");
+ return ret;
+ }
+
+ ret = stmpe_set_bits(stmpe, STMPE_REG_ADC_CTRL2,
+ ADC_FREQ(0xff), ADC_FREQ(ts->adc_freq));
+ if (ret) {
+ dev_err(dev, "Could not setup ADC\n");
+ return ret;
+ }
+
+ tsc_cfg = AVE_CTRL(ts->ave_ctrl) | DET_DELAY(ts->touch_det_delay) |
+ SETTLING(ts->settling);
+ tsc_cfg_mask = AVE_CTRL(0xff) | DET_DELAY(0xff) | SETTLING(0xff);
+
+ ret = stmpe_set_bits(stmpe, STMPE_REG_TSC_CFG, tsc_cfg_mask, tsc_cfg);
+ if (ret) {
+ dev_err(dev, "Could not config touch\n");
+ return ret;
+ }
+
+ ret = stmpe_set_bits(stmpe, STMPE_REG_TSC_FRACTION_Z,
+ FRACTION_Z(0xff), FRACTION_Z(ts->fraction_z));
+ if (ret) {
+ dev_err(dev, "Could not config touch\n");
+ return ret;
+ }
+
+ ret = stmpe_set_bits(stmpe, STMPE_REG_TSC_I_DRIVE,
+ I_DRIVE(0xff), I_DRIVE(ts->i_drive));
+ if (ret) {
+ dev_err(dev, "Could not config touch\n");
+ return ret;
+ }
+
+ /* set FIFO to 1 for single point reading */
+ ret = stmpe_reg_write(stmpe, STMPE_REG_FIFO_TH, 1);
+ if (ret) {
+ dev_err(dev, "Could not set FIFO\n");
+ return ret;
+ }
+
+ ret = stmpe_set_bits(stmpe, STMPE_REG_TSC_CTRL,
+ OP_MODE(0xff), OP_MODE(OP_MOD_XYZ));
+ if (ret) {
+ dev_err(dev, "Could not set mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stmpe_ts_open(struct input_dev *dev)
+{
+ struct stmpe_touch *ts = input_get_drvdata(dev);
+ int ret = 0;
+
+ ret = __stmpe_reset_fifo(ts->stmpe);
+ if (ret)
+ return ret;
+
+ return stmpe_set_bits(ts->stmpe, STMPE_REG_TSC_CTRL,
+ STMPE_TSC_CTRL_TSC_EN, STMPE_TSC_CTRL_TSC_EN);
+}
+
+static void stmpe_ts_close(struct input_dev *dev)
+{
+ struct stmpe_touch *ts = input_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&ts->work);
+
+ stmpe_set_bits(ts->stmpe, STMPE_REG_TSC_CTRL,
+ STMPE_TSC_CTRL_TSC_EN, 0);
+}
+
+static int __devinit stmpe_input_probe(struct platform_device *pdev)
+{
+ struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ struct stmpe_platform_data *pdata = stmpe->pdata;
+ struct stmpe_touch *ts;
+ struct input_dev *idev;
+ struct stmpe_ts_platform_data *ts_pdata = NULL;
+ int ret = 0;
+ int ts_irq;
+
+ ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
+ if (ts_irq < 0)
+ return ts_irq;
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ goto err_out;
+
+ idev = input_allocate_device();
+ if (!idev)
+ goto err_free_ts;
+
+ platform_set_drvdata(pdev, ts);
+ ts->stmpe = stmpe;
+ ts->idev = idev;
+ ts->dev = &pdev->dev;
+
+ if (pdata)
+ ts_pdata = pdata->ts;
+
+ if (ts_pdata) {
+ ts->sample_time = ts_pdata->sample_time;
+ ts->mod_12b = ts_pdata->mod_12b;
+ ts->ref_sel = ts_pdata->ref_sel;
+ ts->adc_freq = ts_pdata->adc_freq;
+ ts->ave_ctrl = ts_pdata->ave_ctrl;
+ ts->touch_det_delay = ts_pdata->touch_det_delay;
+ ts->settling = ts_pdata->settling;
+ ts->fraction_z = ts_pdata->fraction_z;
+ ts->i_drive = ts_pdata->i_drive;
+ }
+
+ INIT_DELAYED_WORK(&ts->work, stmpe_work);
+
+ ret = request_threaded_irq(ts_irq, NULL, stmpe_ts_handler,
+ IRQF_ONESHOT, STMPE_TS_NAME, ts);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request IRQ %d\n", ts_irq);
+ goto err_free_input;
+ }
+
+ ret = stmpe_init_hw(ts);
+ if (ret)
+ goto err_free_irq;
+
+ idev->name = STMPE_TS_NAME;
+ idev->id.bustype = BUS_I2C;
+ idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ idev->open = stmpe_ts_open;
+ idev->close = stmpe_ts_close;
+
+ input_set_drvdata(idev, ts);
+
+ input_set_abs_params(idev, ABS_X, 0, XY_MASK, 0, 0);
+ input_set_abs_params(idev, ABS_Y, 0, XY_MASK, 0, 0);
+ input_set_abs_params(idev, ABS_PRESSURE, 0x0, 0xff, 0, 0);
+
+ ret = input_register_device(idev);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register input device\n");
+ goto err_free_irq;
+ }
+
+ return ret;
+
+err_free_irq:
+ free_irq(ts_irq, ts);
+err_free_input:
+ input_free_device(idev);
+ platform_set_drvdata(pdev, NULL);
+err_free_ts:
+ kfree(ts);
+err_out:
+ return ret;
+}
+
+static int __devexit stmpe_ts_remove(struct platform_device *pdev)
+{
+ struct stmpe_touch *ts = platform_get_drvdata(pdev);
+ unsigned int ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
+
+ stmpe_disable(ts->stmpe, STMPE_BLOCK_TOUCHSCREEN);
+
+ free_irq(ts_irq, ts);
+
+ platform_set_drvdata(pdev, NULL);
+
+ input_unregister_device(ts->idev);
+ input_free_device(ts->idev);
+
+ kfree(ts);
+
+ return 0;
+}
+
+static struct platform_driver stmpe_ts_driver = {
+ .driver = {
+ .name = STMPE_TS_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = stmpe_input_probe,
+ .remove = __devexit_p(stmpe_ts_remove),
+};
+
+static int __init stmpe_ts_init(void)
+{
+ return platform_driver_register(&stmpe_ts_driver);
+}
+
+module_init(stmpe_ts_init);
+
+static void __exit stmpe_ts_exit(void)
+{
+ platform_driver_unregister(&stmpe_ts_driver);
+}
+
+module_exit(stmpe_ts_exit);
+
+MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>");
+MODULE_DESCRIPTION("STMPEXXX touchscreen driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" STMPE_TS_NAME);
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index e14081675bb..ebb11907d40 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -339,7 +339,7 @@ static struct xenbus_driver xenkbd_driver = {
static int __init xenkbd_init(void)
{
- if (!xen_domain())
+ if (!xen_pv_domain())
return -ENODEV;
/* Nothing to do if running in dom0. */
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 2978bdaa6b8..e54e79d4e2c 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -1515,8 +1515,13 @@ static int decodeFVteln(char *teln, unsigned long *bmaskp, int *activep)
while (*s) {
int digit1 = 0;
int digit2 = 0;
- if (!isdigit(*s)) return -3;
- while (isdigit(*s)) { digit1 = digit1*10 + (*s - '0'); s++; }
+ char *endp;
+
+ digit1 = simple_strtoul(s, &endp, 10);
+ if (s == endp)
+ return -3;
+ s = endp;
+
if (digit1 <= 0 || digit1 > 30) return -4;
if (*s == 0 || *s == ',' || *s == ' ') {
bmask |= (1 << digit1);
@@ -1526,8 +1531,12 @@ static int decodeFVteln(char *teln, unsigned long *bmaskp, int *activep)
}
if (*s != '-') return -5;
s++;
- if (!isdigit(*s)) return -3;
- while (isdigit(*s)) { digit2 = digit2*10 + (*s - '0'); s++; }
+
+ digit2 = simple_strtoul(s, &endp, 10);
+ if (s == endp)
+ return -3;
+ s = endp;
+
if (digit2 <= 0 || digit2 > 30) return -4;
if (*s == 0 || *s == ',' || *s == ' ') {
if (digit1 > digit2)
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 70cf6bac7a5..48e6d220f62 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -77,7 +77,7 @@ static void deflect_timer_expire(ulong arg)
case DEFLECT_ALERT:
cs->ics.command = ISDN_CMD_REDIR; /* protocol */
- strcpy(cs->ics.parm.setup.phone,cs->deflect_dest);
+ strlcpy(cs->ics.parm.setup.phone, cs->deflect_dest, sizeof(cs->ics.parm.setup.phone));
strcpy(cs->ics.parm.setup.eazmsn,"Testtext delayed");
divert_if.ll_cmd(&cs->ics);
spin_lock_irqsave(&divert_lock, flags);
@@ -251,7 +251,7 @@ int deflect_extern_action(u_char cmd, ulong callid, char *to_nr)
case 2: /* redir */
del_timer(&cs->timer);
- strcpy(cs->ics.parm.setup.phone, to_nr);
+ strlcpy(cs->ics.parm.setup.phone, to_nr, sizeof(cs->ics.parm.setup.phone));
strcpy(cs->ics.parm.setup.eazmsn, "Testtext manual");
ic.command = ISDN_CMD_REDIR;
if ((i = divert_if.ll_cmd(&ic)))
@@ -480,7 +480,7 @@ static int isdn_divert_icall(isdn_ctrl *ic)
if (!cs->timer.expires)
{ strcpy(ic->parm.setup.eazmsn,"Testtext direct");
ic->parm.setup.screen = dv->rule.screen;
- strcpy(ic->parm.setup.phone,dv->rule.to_nr);
+ strlcpy(ic->parm.setup.phone, dv->rule.to_nr, sizeof(ic->parm.setup.phone));
cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
retval = 5;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 707d9c94cf9..178942a2ee6 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -109,6 +109,9 @@ struct bas_cardstate {
struct urb *urb_int_in; /* URB for interrupt pipe */
unsigned char *int_in_buf;
+ struct work_struct int_in_wq; /* for usb_clear_halt() */
+ struct timer_list timer_int_in; /* int read retry delay */
+ int retry_int_in;
spinlock_t lock; /* locks all following */
int basstate; /* bitmap (BS_*) */
@@ -169,7 +172,7 @@ static char *get_usb_rcmsg(int rc)
case -EAGAIN:
return "start frame too early or too much scheduled";
case -EFBIG:
- return "too many isochronous frames requested";
+ return "too many isoc frames requested";
case -EPIPE:
return "endpoint stalled";
case -EMSGSIZE:
@@ -200,13 +203,13 @@ static char *get_usb_statmsg(int status)
case -ENOENT:
return "unlinked (sync)";
case -EINPROGRESS:
- return "pending";
+ return "URB still pending";
case -EPROTO:
- return "bit stuffing error, timeout, or unknown USB error";
+ return "bitstuff error, timeout, or unknown USB error";
case -EILSEQ:
return "CRC mismatch, timeout, or unknown USB error";
case -ETIME:
- return "timed out";
+ return "USB response timeout";
case -EPIPE:
return "endpoint stalled";
case -ECOMM:
@@ -214,15 +217,15 @@ static char *get_usb_statmsg(int status)
case -ENOSR:
return "OUT buffer underrun";
case -EOVERFLOW:
- return "too much data";
+ return "endpoint babble";
case -EREMOTEIO:
- return "short packet detected";
+ return "short packet";
case -ENODEV:
return "device removed";
case -EXDEV:
- return "partial isochronous transfer";
+ return "partial isoc transfer";
case -EINVAL:
- return "invalid argument";
+ return "ISO madness";
case -ECONNRESET:
return "unlinked (async)";
case -ESHUTDOWN:
@@ -350,7 +353,7 @@ static inline void error_hangup(struct bc_state *bcs)
* reset Gigaset device because of an unrecoverable error
* This function may be called from any context, and takes care of
* scheduling the necessary actions for execution outside of interrupt context.
- * cs->lock must not be held.
+ * cs->hw.bas->lock must not be held.
* argument:
* controller state structure
*/
@@ -358,7 +361,9 @@ static inline void error_reset(struct cardstate *cs)
{
/* reset interrupt pipe to recover (ignore errors) */
update_basstate(cs->hw.bas, BS_RESETTING, 0);
- req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT);
+ if (req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT))
+ /* submission failed, escalate to USB port reset */
+ usb_queue_reset_device(cs->hw.bas->interface);
}
/* check_pending
@@ -438,23 +443,27 @@ static void cmd_in_timeout(unsigned long data)
return;
}
- if (ucs->retry_cmd_in++ < BAS_RETRY) {
- dev_notice(cs->dev, "control read: timeout, retry %d\n",
- ucs->retry_cmd_in);
- rc = atread_submit(cs, BAS_TIMEOUT);
- if (rc >= 0 || rc == -ENODEV)
- /* resubmitted or disconnected */
- /* - bypass regular exit block */
- return;
- } else {
+ if (ucs->retry_cmd_in++ >= BAS_RETRY) {
dev_err(cs->dev,
"control read: timeout, giving up after %d tries\n",
ucs->retry_cmd_in);
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+ error_reset(cs);
+ return;
+ }
+
+ gig_dbg(DEBUG_USBREQ, "%s: timeout, retry %d",
+ __func__, ucs->retry_cmd_in);
+ rc = atread_submit(cs, BAS_TIMEOUT);
+ if (rc < 0) {
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+ if (rc != -ENODEV)
+ error_reset(cs);
}
- kfree(ucs->rcvbuf);
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
- error_reset(cs);
}
/* read_ctrl_callback
@@ -470,18 +479,11 @@ static void read_ctrl_callback(struct urb *urb)
struct cardstate *cs = inbuf->cs;
struct bas_cardstate *ucs = cs->hw.bas;
int status = urb->status;
- int have_data = 0;
unsigned numbytes;
int rc;
update_basstate(ucs, 0, BS_ATRDPEND);
wake_up(&ucs->waitqueue);
-
- if (!ucs->rcvbuf_size) {
- dev_warn(cs->dev, "%s: no receive in progress\n", __func__);
- return;
- }
-
del_timer(&ucs->timer_cmd_in);
switch (status) {
@@ -495,19 +497,10 @@ static void read_ctrl_callback(struct urb *urb)
numbytes = ucs->rcvbuf_size;
}
- /* copy received bytes to inbuf */
- have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes);
-
- if (unlikely(numbytes < ucs->rcvbuf_size)) {
- /* incomplete - resubmit for remaining bytes */
- ucs->rcvbuf_size -= numbytes;
- ucs->retry_cmd_in = 0;
- rc = atread_submit(cs, BAS_TIMEOUT);
- if (rc >= 0 || rc == -ENODEV)
- /* resubmitted or disconnected */
- /* - bypass regular exit block */
- return;
- error_reset(cs);
+ /* copy received bytes to inbuf, notify event layer */
+ if (gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes)) {
+ gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
+ gigaset_schedule_event(cs);
}
break;
@@ -516,37 +509,32 @@ static void read_ctrl_callback(struct urb *urb)
case -EINPROGRESS: /* pending */
case -ENODEV: /* device removed */
case -ESHUTDOWN: /* device shut down */
- /* no action necessary */
+ /* no further action necessary */
gig_dbg(DEBUG_USBREQ, "%s: %s",
__func__, get_usb_statmsg(status));
break;
- default: /* severe trouble */
- dev_warn(cs->dev, "control read: %s\n",
- get_usb_statmsg(status));
+ default: /* other errors: retry */
if (ucs->retry_cmd_in++ < BAS_RETRY) {
- dev_notice(cs->dev, "control read: retry %d\n",
- ucs->retry_cmd_in);
+ gig_dbg(DEBUG_USBREQ, "%s: %s, retry %d", __func__,
+ get_usb_statmsg(status), ucs->retry_cmd_in);
rc = atread_submit(cs, BAS_TIMEOUT);
- if (rc >= 0 || rc == -ENODEV)
- /* resubmitted or disconnected */
- /* - bypass regular exit block */
+ if (rc >= 0)
+ /* successfully resubmitted, skip freeing */
return;
- } else {
- dev_err(cs->dev,
- "control read: giving up after %d tries\n",
- ucs->retry_cmd_in);
+ if (rc == -ENODEV)
+ /* disconnect, no further action necessary */
+ break;
}
+ dev_err(cs->dev, "control read: %s, giving up after %d tries\n",
+ get_usb_statmsg(status), ucs->retry_cmd_in);
error_reset(cs);
}
+ /* read finished, free buffer */
kfree(ucs->rcvbuf);
ucs->rcvbuf = NULL;
ucs->rcvbuf_size = 0;
- if (have_data) {
- gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
- gigaset_schedule_event(cs);
- }
}
/* atread_submit
@@ -605,14 +593,67 @@ static int atread_submit(struct cardstate *cs, int timeout)
if (timeout > 0) {
gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
- ucs->timer_cmd_in.expires = jiffies + timeout * HZ / 10;
- ucs->timer_cmd_in.data = (unsigned long) cs;
- ucs->timer_cmd_in.function = cmd_in_timeout;
- add_timer(&ucs->timer_cmd_in);
+ mod_timer(&ucs->timer_cmd_in, jiffies + timeout * HZ / 10);
}
return 0;
}
+/* int_in_work
+ * workqueue routine to clear halt on interrupt in endpoint
+ */
+
+static void int_in_work(struct work_struct *work)
+{
+ struct bas_cardstate *ucs =
+ container_of(work, struct bas_cardstate, int_in_wq);
+ struct urb *urb = ucs->urb_int_in;
+ struct cardstate *cs = urb->context;
+ int rc;
+
+ /* clear halt condition */
+ rc = usb_clear_halt(ucs->udev, urb->pipe);
+ gig_dbg(DEBUG_USBREQ, "clear_halt: %s", get_usb_rcmsg(rc));
+ if (rc == 0)
+ /* success, resubmit interrupt read URB */
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (rc != 0 && rc != -ENODEV) {
+ dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc));
+ rc = usb_lock_device_for_reset(ucs->udev, ucs->interface);
+ if (rc == 0) {
+ rc = usb_reset_device(ucs->udev);
+ usb_unlock_device(ucs->udev);
+ }
+ }
+ ucs->retry_int_in = 0;
+}
+
+/* int_in_resubmit
+ * timer routine for interrupt read delayed resubmit
+ * argument:
+ * controller state structure
+ */
+static void int_in_resubmit(unsigned long data)
+{
+ struct cardstate *cs = (struct cardstate *) data;
+ struct bas_cardstate *ucs = cs->hw.bas;
+ int rc;
+
+ if (ucs->retry_int_in++ >= BAS_RETRY) {
+ dev_err(cs->dev, "interrupt read: giving up after %d tries\n",
+ ucs->retry_int_in);
+ usb_queue_reset_device(ucs->interface);
+ return;
+ }
+
+ gig_dbg(DEBUG_USBREQ, "%s: retry %d", __func__, ucs->retry_int_in);
+ rc = usb_submit_urb(ucs->urb_int_in, GFP_ATOMIC);
+ if (rc != 0 && rc != -ENODEV) {
+ dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
+ get_usb_rcmsg(rc));
+ usb_queue_reset_device(ucs->interface);
+ }
+}
+
/* read_int_callback
* USB completion handler for interrupt pipe input
* called by the USB subsystem in interrupt context
@@ -633,19 +674,29 @@ static void read_int_callback(struct urb *urb)
switch (status) {
case 0: /* success */
+ ucs->retry_int_in = 0;
break;
+ case -EPIPE: /* endpoint stalled */
+ schedule_work(&ucs->int_in_wq);
+ /* fall through */
case -ENOENT: /* cancelled */
case -ECONNRESET: /* cancelled (async) */
case -EINPROGRESS: /* pending */
- /* ignore silently */
+ case -ENODEV: /* device removed */
+ case -ESHUTDOWN: /* device shut down */
+ /* no further action necessary */
gig_dbg(DEBUG_USBREQ, "%s: %s",
__func__, get_usb_statmsg(status));
return;
- case -ENODEV: /* device removed */
- case -ESHUTDOWN: /* device shut down */
- gig_dbg(DEBUG_USBREQ, "%s: device disconnected", __func__);
+ case -EPROTO: /* protocol error or unplug */
+ case -EILSEQ:
+ case -ETIME:
+ /* resubmit after delay */
+ gig_dbg(DEBUG_USBREQ, "%s: %s",
+ __func__, get_usb_statmsg(status));
+ mod_timer(&ucs->timer_int_in, jiffies + HZ / 10);
return;
- default: /* severe trouble */
+ default: /* other errors: just resubmit */
dev_warn(cs->dev, "interrupt read: %s\n",
get_usb_statmsg(status));
goto resubmit;
@@ -723,6 +774,13 @@ static void read_int_callback(struct urb *urb)
break;
}
spin_lock_irqsave(&cs->lock, flags);
+ if (ucs->basstate & BS_ATRDPEND) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ dev_warn(cs->dev,
+ "HD_RECEIVEATDATA_ACK(%d) during HD_READ_ATMESSAGE(%d) ignored\n",
+ l, ucs->rcvbuf_size);
+ break;
+ }
if (ucs->rcvbuf_size) {
/* throw away previous buffer - we have no queue */
dev_err(cs->dev,
@@ -735,7 +793,6 @@ static void read_int_callback(struct urb *urb)
if (ucs->rcvbuf == NULL) {
spin_unlock_irqrestore(&cs->lock, flags);
dev_err(cs->dev, "out of memory receiving AT data\n");
- error_reset(cs);
break;
}
ucs->rcvbuf_size = l;
@@ -745,13 +802,10 @@ static void read_int_callback(struct urb *urb)
kfree(ucs->rcvbuf);
ucs->rcvbuf = NULL;
ucs->rcvbuf_size = 0;
- if (rc != -ENODEV) {
- spin_unlock_irqrestore(&cs->lock, flags);
- error_reset(cs);
- break;
- }
}
spin_unlock_irqrestore(&cs->lock, flags);
+ if (rc < 0 && rc != -ENODEV)
+ error_reset(cs);
break;
case HD_RESET_INTERRUPT_PIPE_ACK:
@@ -818,6 +872,7 @@ static void read_iso_callback(struct urb *urb)
tasklet_hi_schedule(&ubc->rcvd_tasklet);
} else {
/* tasklet still busy, drop data and resubmit URB */
+ gig_dbg(DEBUG_ISO, "%s: overrun", __func__);
ubc->loststatus = status;
for (i = 0; i < BAS_NUMFRAMES; i++) {
ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
@@ -833,13 +888,11 @@ static void read_iso_callback(struct urb *urb)
urb->dev = bcs->cs->hw.bas->udev;
urb->transfer_flags = URB_ISO_ASAP;
urb->number_of_packets = BAS_NUMFRAMES;
- gig_dbg(DEBUG_ISO, "%s: isoc read overrun/resubmit",
- __func__);
rc = usb_submit_urb(urb, GFP_ATOMIC);
if (unlikely(rc != 0 && rc != -ENODEV)) {
dev_err(bcs->cs->dev,
- "could not resubmit isochronous read "
- "URB: %s\n", get_usb_rcmsg(rc));
+ "could not resubmit isoc read URB: %s\n",
+ get_usb_rcmsg(rc));
dump_urb(DEBUG_ISO, "isoc read", urb);
error_hangup(bcs);
}
@@ -1081,7 +1134,7 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
gig_dbg(DEBUG_ISO, "%s: disconnected", __func__);
else
dev_err(ucx->bcs->cs->dev,
- "could not submit isochronous write URB: %s\n",
+ "could not submit isoc write URB: %s\n",
get_usb_rcmsg(rc));
return rc;
}
@@ -1126,7 +1179,7 @@ static void write_iso_tasklet(unsigned long data)
ubc->isooutovfl = NULL;
spin_unlock_irqrestore(&ubc->isooutlock, flags);
if (ovfl) {
- dev_err(cs->dev, "isochronous write buffer underrun\n");
+ dev_err(cs->dev, "isoc write underrun\n");
error_hangup(bcs);
break;
}
@@ -1151,7 +1204,7 @@ static void write_iso_tasklet(unsigned long data)
if (next) {
/* couldn't put it back */
dev_err(cs->dev,
- "losing isochronous write URB\n");
+ "losing isoc write URB\n");
error_hangup(bcs);
}
}
@@ -1178,10 +1231,10 @@ static void write_iso_tasklet(unsigned long data)
if (ifd->status ||
ifd->actual_length != ifd->length) {
dev_warn(cs->dev,
- "isochronous write: frame %d: %s, "
- "only %d of %d bytes sent\n",
- i, get_usb_statmsg(ifd->status),
- ifd->actual_length, ifd->length);
+ "isoc write: frame %d[%d/%d]: %s\n",
+ i, ifd->actual_length,
+ ifd->length,
+ get_usb_statmsg(ifd->status));
offset = (ifd->offset +
ifd->actual_length)
% BAS_OUTBUFSIZE;
@@ -1190,11 +1243,11 @@ static void write_iso_tasklet(unsigned long data)
}
break;
case -EPIPE: /* stall - probably underrun */
- dev_err(cs->dev, "isochronous write stalled\n");
+ dev_err(cs->dev, "isoc write: stalled\n");
error_hangup(bcs);
break;
- default: /* severe trouble */
- dev_warn(cs->dev, "isochronous write: %s\n",
+ default: /* other errors */
+ dev_warn(cs->dev, "isoc write: %s\n",
get_usb_statmsg(status));
}
@@ -1250,6 +1303,7 @@ static void read_iso_tasklet(unsigned long data)
struct cardstate *cs = bcs->cs;
struct urb *urb;
int status;
+ struct usb_iso_packet_descriptor *ifd;
char *rcvbuf;
unsigned long flags;
int totleft, numbytes, offset, frame, rc;
@@ -1267,8 +1321,7 @@ static void read_iso_tasklet(unsigned long data)
ubc->isoindone = NULL;
if (unlikely(ubc->loststatus != -EINPROGRESS)) {
dev_warn(cs->dev,
- "isochronous read overrun, "
- "dropped URB with status: %s, %d bytes lost\n",
+ "isoc read overrun, URB dropped (status: %s, %d bytes)\n",
get_usb_statmsg(ubc->loststatus),
ubc->isoinlost);
ubc->loststatus = -EINPROGRESS;
@@ -1298,11 +1351,11 @@ static void read_iso_tasklet(unsigned long data)
__func__, get_usb_statmsg(status));
continue; /* -> skip */
case -EPIPE:
- dev_err(cs->dev, "isochronous read stalled\n");
+ dev_err(cs->dev, "isoc read: stalled\n");
error_hangup(bcs);
continue; /* -> skip */
- default: /* severe trouble */
- dev_warn(cs->dev, "isochronous read: %s\n",
+ default: /* other error */
+ dev_warn(cs->dev, "isoc read: %s\n",
get_usb_statmsg(status));
goto error;
}
@@ -1310,40 +1363,52 @@ static void read_iso_tasklet(unsigned long data)
rcvbuf = urb->transfer_buffer;
totleft = urb->actual_length;
for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) {
- numbytes = urb->iso_frame_desc[frame].actual_length;
- if (unlikely(urb->iso_frame_desc[frame].status))
+ ifd = &urb->iso_frame_desc[frame];
+ numbytes = ifd->actual_length;
+ switch (ifd->status) {
+ case 0: /* success */
+ break;
+ case -EPROTO: /* protocol error or unplug */
+ case -EILSEQ:
+ case -ETIME:
+ /* probably just disconnected, ignore */
+ gig_dbg(DEBUG_ISO,
+ "isoc read: frame %d[%d]: %s\n",
+ frame, numbytes,
+ get_usb_statmsg(ifd->status));
+ break;
+ default: /* other error */
+ /* report, assume transferred bytes are ok */
dev_warn(cs->dev,
- "isochronous read: frame %d[%d]: %s\n",
+ "isoc read: frame %d[%d]: %s\n",
frame, numbytes,
- get_usb_statmsg(
- urb->iso_frame_desc[frame].status));
+ get_usb_statmsg(ifd->status));
+ }
if (unlikely(numbytes > BAS_MAXFRAME))
dev_warn(cs->dev,
- "isochronous read: frame %d: "
- "numbytes (%d) > BAS_MAXFRAME\n",
- frame, numbytes);
+ "isoc read: frame %d[%d]: %s\n",
+ frame, numbytes,
+ "exceeds max frame size");
if (unlikely(numbytes > totleft)) {
dev_warn(cs->dev,
- "isochronous read: frame %d: "
- "numbytes (%d) > totleft (%d)\n",
- frame, numbytes, totleft);
+ "isoc read: frame %d[%d]: %s\n",
+ frame, numbytes,
+ "exceeds total transfer length");
numbytes = totleft;
}
- offset = urb->iso_frame_desc[frame].offset;
+ offset = ifd->offset;
if (unlikely(offset + numbytes > BAS_INBUFSIZE)) {
dev_warn(cs->dev,
- "isochronous read: frame %d: "
- "offset (%d) + numbytes (%d) "
- "> BAS_INBUFSIZE\n",
- frame, offset, numbytes);
+ "isoc read: frame %d[%d]: %s\n",
+ frame, numbytes,
+ "exceeds end of buffer");
numbytes = BAS_INBUFSIZE - offset;
}
gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs);
totleft -= numbytes;
}
if (unlikely(totleft > 0))
- dev_warn(cs->dev,
- "isochronous read: %d data bytes missing\n",
+ dev_warn(cs->dev, "isoc read: %d data bytes missing\n",
totleft);
error:
@@ -1359,9 +1424,9 @@ error:
rc = usb_submit_urb(urb, GFP_ATOMIC);
if (unlikely(rc != 0 && rc != -ENODEV)) {
dev_err(cs->dev,
- "could not resubmit isochronous read URB: %s\n",
+ "could not resubmit isoc read URB: %s\n",
get_usb_rcmsg(rc));
- dump_urb(DEBUG_ISO, "resubmit iso read", urb);
+ dump_urb(DEBUG_ISO, "resubmit isoc read", urb);
error_hangup(bcs);
}
}
@@ -1373,12 +1438,12 @@ error:
/* req_timeout
* timeout routine for control output request
* argument:
- * B channel control structure
+ * controller state structure
*/
static void req_timeout(unsigned long data)
{
- struct bc_state *bcs = (struct bc_state *) data;
- struct bas_cardstate *ucs = bcs->cs->hw.bas;
+ struct cardstate *cs = (struct cardstate *) data;
+ struct bas_cardstate *ucs = cs->hw.bas;
int pending;
unsigned long flags;
@@ -1395,38 +1460,44 @@ static void req_timeout(unsigned long data)
break;
case HD_OPEN_ATCHANNEL:
- dev_err(bcs->cs->dev, "timeout opening AT channel\n");
- error_reset(bcs->cs);
+ dev_err(cs->dev, "timeout opening AT channel\n");
+ error_reset(cs);
break;
- case HD_OPEN_B2CHANNEL:
case HD_OPEN_B1CHANNEL:
- dev_err(bcs->cs->dev, "timeout opening channel %d\n",
- bcs->channel + 1);
- error_hangup(bcs);
+ dev_err(cs->dev, "timeout opening channel 1\n");
+ error_hangup(&cs->bcs[0]);
+ break;
+
+ case HD_OPEN_B2CHANNEL:
+ dev_err(cs->dev, "timeout opening channel 2\n");
+ error_hangup(&cs->bcs[1]);
break;
case HD_CLOSE_ATCHANNEL:
- dev_err(bcs->cs->dev, "timeout closing AT channel\n");
- error_reset(bcs->cs);
+ dev_err(cs->dev, "timeout closing AT channel\n");
+ error_reset(cs);
break;
- case HD_CLOSE_B2CHANNEL:
case HD_CLOSE_B1CHANNEL:
- dev_err(bcs->cs->dev, "timeout closing channel %d\n",
- bcs->channel + 1);
- error_reset(bcs->cs);
+ dev_err(cs->dev, "timeout closing channel 1\n");
+ error_reset(cs);
+ break;
+
+ case HD_CLOSE_B2CHANNEL:
+ dev_err(cs->dev, "timeout closing channel 2\n");
+ error_reset(cs);
break;
case HD_RESET_INTERRUPT_PIPE:
/* error recovery escalation */
- dev_err(bcs->cs->dev,
+ dev_err(cs->dev,
"reset interrupt pipe timeout, attempting USB reset\n");
- usb_queue_reset_device(bcs->cs->hw.bas->interface);
+ usb_queue_reset_device(ucs->interface);
break;
default:
- dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n",
+ dev_warn(cs->dev, "request 0x%02x timed out, clearing\n",
pending);
}
@@ -1557,10 +1628,7 @@ static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
if (timeout > 0) {
gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
- ucs->timer_ctrl.expires = jiffies + timeout * HZ / 10;
- ucs->timer_ctrl.data = (unsigned long) bcs;
- ucs->timer_ctrl.function = req_timeout;
- add_timer(&ucs->timer_ctrl);
+ mod_timer(&ucs->timer_ctrl, jiffies + timeout * HZ / 10);
}
spin_unlock_irqrestore(&ucs->lock, flags);
@@ -1590,21 +1658,20 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
if (cs->hw.bas->basstate & BS_SUSPEND) {
dev_notice(cs->dev,
- "not starting isochronous I/O, "
- "suspend in progress\n");
+ "not starting isoc I/O, suspend in progress\n");
spin_unlock_irqrestore(&cs->lock, flags);
return -EHOSTUNREACH;
}
ret = starturbs(bcs);
if (ret < 0) {
+ spin_unlock_irqrestore(&cs->lock, flags);
dev_err(cs->dev,
- "could not start isochronous I/O for channel B%d: %s\n",
+ "could not start isoc I/O for channel B%d: %s\n",
bcs->channel + 1,
ret == -EFAULT ? "null URB" : get_usb_rcmsg(ret));
if (ret != -ENODEV)
error_hangup(bcs);
- spin_unlock_irqrestore(&cs->lock, flags);
return ret;
}
@@ -1614,11 +1681,11 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
dev_err(cs->dev, "could not open channel B%d\n",
bcs->channel + 1);
stopurbs(bcs->hw.bas);
- if (ret != -ENODEV)
- error_hangup(bcs);
}
spin_unlock_irqrestore(&cs->lock, flags);
+ if (ret < 0 && ret != -ENODEV)
+ error_hangup(bcs);
return ret;
}
@@ -1826,10 +1893,7 @@ static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
if (!(update_basstate(ucs, BS_ATTIMER, BS_ATREADY) & BS_ATTIMER)) {
gig_dbg(DEBUG_OUTPUT, "setting ATREADY timeout of %d/10 secs",
ATRDY_TIMEOUT);
- ucs->timer_atrdy.expires = jiffies + ATRDY_TIMEOUT * HZ / 10;
- ucs->timer_atrdy.data = (unsigned long) cs;
- ucs->timer_atrdy.function = atrdy_timeout;
- add_timer(&ucs->timer_atrdy);
+ mod_timer(&ucs->timer_atrdy, jiffies + ATRDY_TIMEOUT * HZ / 10);
}
return 0;
}
@@ -1914,6 +1978,28 @@ static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
* The next command will reopen the AT channel automatically.
*/
if (cb->len == 3 && !memcmp(cb->buf, "+++", 3)) {
+ /* If an HD_RECEIVEATDATA_ACK message remains unhandled
+ * because of an error, the base never sends another one.
+ * The response channel is thus effectively blocked.
+ * Closing and reopening the AT channel does *not* clear
+ * this condition.
+ * As a stopgap measure, submit a zero-length AT read
+ * before closing the AT channel. This has the undocumented
+ * effect of triggering a new HD_RECEIVEATDATA_ACK message
+ * from the base if necessary.
+ * The subsequent AT channel close then discards any pending
+ * messages.
+ */
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!(cs->hw.bas->basstate & BS_ATRDPEND)) {
+ kfree(cs->hw.bas->rcvbuf);
+ cs->hw.bas->rcvbuf = NULL;
+ cs->hw.bas->rcvbuf_size = 0;
+ cs->hw.bas->retry_cmd_in = 0;
+ atread_submit(cs, 0);
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+
rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT);
if (cb->wake_tasklet)
tasklet_schedule(cb->wake_tasklet);
@@ -2010,7 +2096,7 @@ static int gigaset_freebcshw(struct bc_state *bcs)
/* kill URBs and tasklets before freeing - better safe than sorry */
ubc->running = 0;
- gig_dbg(DEBUG_INIT, "%s: killing iso URBs", __func__);
+ gig_dbg(DEBUG_INIT, "%s: killing isoc URBs", __func__);
for (i = 0; i < BAS_OUTURBS; ++i) {
usb_kill_urb(ubc->isoouturbs[i].urb);
usb_free_urb(ubc->isoouturbs[i].urb);
@@ -2131,10 +2217,12 @@ static int gigaset_initcshw(struct cardstate *cs)
ucs->pending = 0;
ucs->basstate = 0;
- init_timer(&ucs->timer_ctrl);
- init_timer(&ucs->timer_atrdy);
- init_timer(&ucs->timer_cmd_in);
+ setup_timer(&ucs->timer_ctrl, req_timeout, (unsigned long) cs);
+ setup_timer(&ucs->timer_atrdy, atrdy_timeout, (unsigned long) cs);
+ setup_timer(&ucs->timer_cmd_in, cmd_in_timeout, (unsigned long) cs);
+ setup_timer(&ucs->timer_int_in, int_in_resubmit, (unsigned long) cs);
init_waitqueue_head(&ucs->waitqueue);
+ INIT_WORK(&ucs->int_in_wq, int_in_work);
return 1;
}
@@ -2282,6 +2370,7 @@ static int gigaset_probe(struct usb_interface *interface,
get_usb_rcmsg(rc));
goto error;
}
+ ucs->retry_int_in = 0;
/* tell the device that the driver is ready */
rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0);
@@ -2334,10 +2423,12 @@ static void gigaset_disconnect(struct usb_interface *interface)
/* stop driver (common part) */
gigaset_stop(cs);
- /* stop timers and URBs, free ressources */
+ /* stop delayed work and URBs, free ressources */
del_timer_sync(&ucs->timer_ctrl);
del_timer_sync(&ucs->timer_atrdy);
del_timer_sync(&ucs->timer_cmd_in);
+ del_timer_sync(&ucs->timer_int_in);
+ cancel_work_sync(&ucs->int_in_wq);
freeurbs(cs);
usb_set_intfdata(interface, NULL);
kfree(ucs->rcvbuf);
@@ -2400,10 +2491,14 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
/* in case of timeout, proceed anyway */
}
- /* kill all URBs and timers that might still be pending */
+ /* kill all URBs and delayed work that might still be pending */
usb_kill_urb(ucs->urb_ctrl);
usb_kill_urb(ucs->urb_int_in);
del_timer_sync(&ucs->timer_ctrl);
+ del_timer_sync(&ucs->timer_atrdy);
+ del_timer_sync(&ucs->timer_cmd_in);
+ del_timer_sync(&ucs->timer_int_in);
+ cancel_work_sync(&ucs->int_in_wq);
gig_dbg(DEBUG_SUSPEND, "suspend complete");
return 0;
@@ -2425,6 +2520,7 @@ static int gigaset_resume(struct usb_interface *intf)
get_usb_rcmsg(rc));
return rc;
}
+ ucs->retry_int_in = 0;
/* clear suspend flag to reallow activity */
update_basstate(ucs, 0, BS_SUSPEND);
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 3ca561eccd9..db621db67f6 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -1026,32 +1026,6 @@ struct cardstate *gigaset_get_cs_by_id(int id)
return ret;
}
-void gigaset_debugdrivers(void)
-{
- unsigned long flags;
- static struct cardstate *cs;
- struct gigaset_driver *drv;
- unsigned i;
-
- spin_lock_irqsave(&driver_lock, flags);
- list_for_each_entry(drv, &drivers, list) {
- gig_dbg(DEBUG_DRIVER, "driver %p", drv);
- spin_lock(&drv->lock);
- for (i = 0; i < drv->minors; ++i) {
- gig_dbg(DEBUG_DRIVER, " index %u", i);
- cs = drv->cs + i;
- gig_dbg(DEBUG_DRIVER, " cardstate %p", cs);
- gig_dbg(DEBUG_DRIVER, " flags 0x%02x", cs->flags);
- gig_dbg(DEBUG_DRIVER, " minor_index %u",
- cs->minor_index);
- gig_dbg(DEBUG_DRIVER, " driver %p", cs->driver);
- gig_dbg(DEBUG_DRIVER, " i4l id %d", cs->myid);
- }
- spin_unlock(&drv->lock);
- }
- spin_unlock_irqrestore(&driver_lock, flags);
-}
-
static struct cardstate *gigaset_get_cs_by_minor(unsigned minor)
{
unsigned long flags;
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index a69512fb119..6dd360734cf 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -70,7 +70,6 @@ enum debuglevel {
DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */
DEBUG_LLDATA = 0x00100, /* sent/received LL data */
DEBUG_EVENT = 0x00200, /* event processing */
- DEBUG_DRIVER = 0x00400, /* driver structure */
DEBUG_HDLC = 0x00800, /* M10x HDLC processing */
DEBUG_CHANNEL = 0x01000, /* channel allocation/deallocation */
DEBUG_TRANSCMD = 0x02000, /* AT-COMMANDS+RESPONSES */
@@ -727,7 +726,7 @@ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
/* Deallocate driver structure. */
void gigaset_freedriver(struct gigaset_driver *drv);
-void gigaset_debugdrivers(void);
+
struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty);
struct cardstate *gigaset_get_cs_by_id(int id);
void gigaset_blockdriver(struct gigaset_driver *drv);
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 34bca37d65b..9bec8b96996 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -201,8 +201,6 @@ static int command_from_LL(isdn_ctrl *cntrl)
int i;
size_t l;
- gigaset_debugdrivers();
-
gig_dbg(DEBUG_CMD, "driver: %d, command: %d, arg: 0x%lx",
cntrl->driver, cntrl->command, cntrl->arg);
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index 2dfd346fc88..f39ccdf87a1 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -842,13 +842,14 @@ static inline void trans_receive(unsigned char *src, unsigned count,
if (unlikely(bcs->ignore)) {
bcs->ignore--;
- hdlc_flush(bcs);
return;
}
skb = bcs->rx_skb;
- if (skb == NULL)
+ if (skb == NULL) {
skb = gigaset_new_rx_skb(bcs);
- bcs->hw.bas->goodbytes += skb->len;
+ if (skb == NULL)
+ return;
+ }
dobytes = bcs->rx_bufsize - skb->len;
while (count > 0) {
dst = skb_put(skb, count < dobytes ? count : dobytes);
@@ -860,6 +861,7 @@ static inline void trans_receive(unsigned char *src, unsigned count,
if (dobytes == 0) {
dump_bytes(DEBUG_STREAM_DUMP,
"rcv data", skb->data, skb->len);
+ bcs->hw.bas->goodbytes += skb->len;
gigaset_skb_rcvd(bcs, skb);
skb = gigaset_new_rx_skb(bcs);
if (skb == NULL)
diff --git a/drivers/isdn/hardware/avm/Kconfig b/drivers/isdn/hardware/avm/Kconfig
index 5dbcbe3a54a..b99b906ea9b 100644
--- a/drivers/isdn/hardware/avm/Kconfig
+++ b/drivers/isdn/hardware/avm/Kconfig
@@ -36,12 +36,13 @@ config ISDN_DRV_AVMB1_T1ISA
config ISDN_DRV_AVMB1_B1PCMCIA
tristate "AVM B1/M1/M2 PCMCIA support"
+ depends on PCMCIA
help
Enable support for the PCMCIA version of the AVM B1 card.
config ISDN_DRV_AVMB1_AVM_CS
tristate "AVM B1/M1/M2 PCMCIA cs module"
- depends on ISDN_DRV_AVMB1_B1PCMCIA && PCMCIA
+ depends on ISDN_DRV_AVMB1_B1PCMCIA
help
Enable the PCMCIA client driver for the AVM B1/M1/M2
PCMCIA cards.
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index f410d0eb2fe..09b1795516f 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -20,7 +20,6 @@
#include <asm/io.h>
#include <asm/system.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -76,9 +75,8 @@ static int avmcs_probe(struct pcmcia_device *p_dev)
{
/* The io structure describes IO port mapping */
- p_dev->io.NumPorts1 = 16;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = 16;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
@@ -120,13 +118,9 @@ static int avmcs_configcheck(struct pcmcia_device *p_dev,
if (cf->io.nwin <= 0)
return -ENODEV;
- p_dev->io.BasePort1 = cf->io.win[0].base;
- p_dev->io.NumPorts1 = cf->io.win[0].len;
- p_dev->io.NumPorts2 = 0;
- printk(KERN_INFO "avm_cs: testing i/o %#x-%#x\n",
- p_dev->io.BasePort1,
- p_dev->io.BasePort1+p_dev->io.NumPorts1-1);
- return pcmcia_request_io(p_dev, &p_dev->io);
+ p_dev->resource[0]->start = cf->io.win[0].base;
+ p_dev->resource[0]->end = cf->io.win[0].len;
+ return pcmcia_request_io(p_dev);
}
static int avmcs_config(struct pcmcia_device *link)
@@ -192,9 +186,10 @@ static int avmcs_config(struct pcmcia_device *link)
default:
case AVM_CARDTYPE_B1: addcard = b1pcmcia_addcard_b1; break;
}
- if ((i = (*addcard)(link->io.BasePort1, link->irq)) < 0) {
- dev_err(&link->dev, "avm_cs: failed to add AVM-Controller at i/o %#x, irq %d\n",
- link->io.BasePort1, link->irq);
+ if ((i = (*addcard)(link->resource[0]->start, link->irq)) < 0) {
+ dev_err(&link->dev,
+ "avm_cs: failed to add AVM-Controller at i/o %#x, irq %d\n",
+ (unsigned int) link->resource[0]->start, link->irq);
avmcs_release(link);
return -ENODEV;
}
@@ -212,7 +207,7 @@ static int avmcs_config(struct pcmcia_device *link)
static void avmcs_release(struct pcmcia_device *link)
{
- b1pcmcia_delcard(link->io.BasePort1, link->irq);
+ b1pcmcia_delcard(link->resource[0]->start, link->irq);
pcmcia_disable_device(link);
} /* avmcs_release */
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 7715d3242ec..d3530f6e811 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -1273,6 +1273,7 @@ static int __devinit c4_probe(struct pci_dev *dev,
if (retval != 0) {
printk(KERN_ERR "c4: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
nr, param.port, param.irq, param.membase);
+ pci_disable_device(dev);
return -ENODEV;
}
return 0;
diff --git a/drivers/isdn/hardware/avm/t1pci.c b/drivers/isdn/hardware/avm/t1pci.c
index 5a3f8309801..a79eb5afb92 100644
--- a/drivers/isdn/hardware/avm/t1pci.c
+++ b/drivers/isdn/hardware/avm/t1pci.c
@@ -210,6 +210,7 @@ static int __devinit t1pci_probe(struct pci_dev *dev,
if (retval != 0) {
printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
param.port, param.irq, param.membase);
+ pci_disable_device(dev);
return -ENODEV;
}
return 0;
diff --git a/drivers/isdn/hardware/eicon/debug.c b/drivers/isdn/hardware/eicon/debug.c
index 33ce89eed65..36264012088 100644
--- a/drivers/isdn/hardware/eicon/debug.c
+++ b/drivers/isdn/hardware/eicon/debug.c
@@ -862,7 +862,7 @@ void diva_mnt_add_xdi_adapter (const DESCRIPTOR* d) {
diva_os_spin_lock_magic_t old_irql, old_irql1;
dword sec, usec, logical, serial, org_mask;
int id, best_id = 0, free_id = -1;
- char tmp[256];
+ char tmp[128];
diva_dbg_entry_head_t* pmsg = NULL;
int len;
word size;
diff --git a/drivers/isdn/hardware/eicon/debuglib.h b/drivers/isdn/hardware/eicon/debuglib.h
index 8ea587783e1..02eed6b4354 100644
--- a/drivers/isdn/hardware/eicon/debuglib.h
+++ b/drivers/isdn/hardware/eicon/debuglib.h
@@ -249,7 +249,7 @@ typedef struct _DbgHandle_
} regTime ; /* timestamp for registration */
void *pIrp ; /* ptr to pending i/o request */
unsigned long dbgMask ; /* current debug mask */
- char drvName[16] ; /* ASCII name of registered driver */
+ char drvName[128] ; /* ASCII name of registered driver */
char drvTag[64] ; /* revision string */
DbgEnd dbg_end ; /* function for debug closing */
DbgLog dbg_prt ; /* function for debug appending */
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index d2dd61d65d5..af25e1f3efd 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -1094,6 +1094,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pr_info("mISDN: do not have informations about adapter at %s\n",
pci_name(pdev));
kfree(card);
+ pci_disable_device(pdev);
return -EINVAL;
} else
pr_notice("mISDN: found adapter %s at %s\n",
@@ -1103,7 +1104,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, card);
err = setup_instance(card);
if (err) {
- pci_disable_device(card->pdev);
+ pci_disable_device(pdev);
kfree(card);
pci_set_drvdata(pdev, NULL);
} else if (ent->driver_data == INF_SCT_1) {
@@ -1114,6 +1115,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
sc = kzalloc(sizeof(struct inf_hw), GFP_KERNEL);
if (!sc) {
release_card(card);
+ pci_disable_device(pdev);
return -ENOMEM;
}
sc->irq = card->irq;
@@ -1121,6 +1123,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
sc->ci = card->ci + i;
err = setup_instance(sc);
if (err) {
+ pci_disable_device(pdev);
kfree(sc);
release_card(card);
break;
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index a80a7617f16..94263c22b87 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -20,7 +20,6 @@
#include <asm/io.h>
#include <asm/system.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -79,11 +78,10 @@ static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
dev_dbg(&p_dev->dev, "avma1cs_attach()\n");
/* The io structure describes IO port mapping */
- p_dev->io.NumPorts1 = 16;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.NumPorts2 = 16;
- p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
- p_dev->io.IOAddrLines = 5;
+ p_dev->resource[0]->end = 16;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->resource[1]->end = 16;
+ p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_16;
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
@@ -127,13 +125,10 @@ static int avma1cs_configcheck(struct pcmcia_device *p_dev,
if (cf->io.nwin <= 0)
return -ENODEV;
- p_dev->io.BasePort1 = cf->io.win[0].base;
- p_dev->io.NumPorts1 = cf->io.win[0].len;
- p_dev->io.NumPorts2 = 0;
- printk(KERN_INFO "avma1_cs: testing i/o %#x-%#x\n",
- p_dev->io.BasePort1,
- p_dev->io.BasePort1+p_dev->io.NumPorts1-1);
- return pcmcia_request_io(p_dev, &p_dev->io);
+ p_dev->resource[0]->start = cf->io.win[0].base;
+ p_dev->resource[0]->end = cf->io.win[0].len;
+ p_dev->io_lines = 5;
+ return pcmcia_request_io(p_dev);
}
@@ -181,16 +176,18 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
}
printk(KERN_NOTICE "avma1_cs: checking at i/o %#x, irq %d\n",
- link->io.BasePort1, link->irq);
+ (unsigned int) link->resource[0]->start, link->irq);
icard.para[0] = link->irq;
- icard.para[1] = link->io.BasePort1;
+ icard.para[1] = link->resource[0]->start;
icard.protocol = isdnprot;
icard.typ = ISDN_CTYPE_A1_PCMCIA;
i = hisax_init_pcmcia(link, &busy, &icard);
if (i < 0) {
- printk(KERN_ERR "avma1_cs: failed to initialize AVM A1 PCMCIA %d at i/o %#x\n", i, link->io.BasePort1);
+ printk(KERN_ERR "avma1_cs: failed to initialize AVM A1 "
+ "PCMCIA %d at i/o %#x\n", i,
+ (unsigned int) link->resource[0]->start);
avma1cs_release(link);
return -ENODEV;
}
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index 218927e3a4e..b3c08aaf41c 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -46,7 +46,6 @@
#include <asm/io.h>
#include <asm/system.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -127,9 +126,8 @@ static int __devinit elsa_cs_probe(struct pcmcia_device *link)
and attributes of IO windows) are fixed by the nature of the
device, and can be hard-wired here.
*/
- link->io.NumPorts1 = 8;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 3;
+ link->resource[0]->end = 8;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -174,16 +172,18 @@ static int elsa_cs_configcheck(struct pcmcia_device *p_dev,
{
int j;
+ p_dev->io_lines = 3;
+
if ((cf->io.nwin > 0) && cf->io.win[0].base) {
printk(KERN_INFO "(elsa_cs: looks like the 96 model)\n");
- p_dev->io.BasePort1 = cf->io.win[0].base;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[0]->start = cf->io.win[0].base;
+ if (!pcmcia_request_io(p_dev))
return 0;
} else {
printk(KERN_INFO "(elsa_cs: looks like the 97 model)\n");
for (j = 0x2f0; j > 0x100; j -= 0x10) {
- p_dev->io.BasePort1 = j;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[0]->start = j;
+ if (!pcmcia_request_io(p_dev))
return 0;
}
}
@@ -215,23 +215,21 @@ static int __devinit elsa_cs_config(struct pcmcia_device *link)
link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1+link->io.NumPorts1-1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2+link->io.NumPorts2-1);
+ if (link->resource[0])
+ printk(" & %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(" & %pR", link->resource[1]);
printk("\n");
icard.para[0] = link->irq;
- icard.para[1] = link->io.BasePort1;
+ icard.para[1] = link->resource[0]->start;
icard.protocol = protocol;
icard.typ = ISDN_CTYPE_ELSA_PCMCIA;
i = hisax_init_pcmcia(link, &(((local_info_t*)link->priv)->busy), &icard);
if (i < 0) {
- printk(KERN_ERR "elsa_cs: failed to initialize Elsa PCMCIA %d at i/o %#x\n",
- i, link->io.BasePort1);
+ printk(KERN_ERR "elsa_cs: failed to initialize Elsa "
+ "PCMCIA %d with %pR\n", i, link->resource[0]);
elsa_cs_release(link);
} else
((local_info_t*)link->priv)->cardnr = i;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index be5faf4aa86..5aa138eb0b3 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -234,13 +234,14 @@ read_fifo(struct IsdnCardState *cs, u_char fifo, int trans_max)
count++;
if (count > trans_max)
count = trans_max; /* limit length */
- if ((skb = dev_alloc_skb(count))) {
- dst = skb_put(skb, count);
- while (count--)
+ skb = dev_alloc_skb(count);
+ if (skb) {
+ dst = skb_put(skb, count);
+ while (count--)
*dst++ = Read_hfc(cs, HFCSX_FIF_DRD);
- return(skb);
- }
- else return(NULL); /* no memory */
+ return skb;
+ } else
+ return NULL; /* no memory */
}
do {
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 1f4feaab21a..a024192b672 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -46,7 +46,6 @@
#include <asm/io.h>
#include <asm/system.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -130,9 +129,8 @@ static int __devinit sedlbauer_probe(struct pcmcia_device *link)
/* from old sedl_cs
*/
/* The io structure describes IO port mapping */
- link->io.NumPorts1 = 8;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.IOAddrLines = 3;
+ link->resource[0]->end = 8;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
link->conf.Attributes = 0;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -173,8 +171,6 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- win_req_t *req = priv_data;
-
if (cfg->index == 0)
return -ENODEV;
@@ -202,52 +198,25 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
+ p_dev->io_lines = 3;
+ if (pcmcia_request_io(p_dev) != 0)
return -ENODEV;
}
- /*
- Now set up a common memory window, if needed. There is room
- in the struct pcmcia_device structure for one memory window handle,
- but if the base addresses need to be saved, or if multiple
- windows are needed, the info should go in the private data
- structure for this device.
-
- Note that the memory window base is a physical address, and
- needs to be mapped to virtual space with ioremap() before it
- is used.
- */
- if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
- cistpl_mem_t *mem = (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
- memreq_t map;
- req->Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM;
- req->Attributes |= WIN_ENABLE;
- req->Base = mem->win[0].host_addr;
- req->Size = mem->win[0].len;
- req->AccessSpeed = 0;
- if (pcmcia_request_window(p_dev, req, &p_dev->win) != 0)
- return -ENODEV;
- map.Page = 0;
- map.CardOffset = mem->win[0].card_addr;
- if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0)
- return -ENODEV;
- }
return 0;
}
@@ -255,16 +224,11 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
static int __devinit sedlbauer_config(struct pcmcia_device *link)
{
- win_req_t *req;
int ret;
IsdnCard_t icard;
dev_dbg(&link->dev, "sedlbauer_config(0x%p)\n", link);
- req = kzalloc(sizeof(win_req_t), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
/*
In this loop, we scan the CIS for configuration table entries,
each of which describes a valid card configuration, including
@@ -277,7 +241,7 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
these things without consulting the CIS, and most client drivers
will only use the CIS to fill in implementation-defined details.
*/
- ret = pcmcia_loop_config(link, sedlbauer_config_check, req);
+ ret = pcmcia_loop_config(link, sedlbauer_config_check, NULL);
if (ret)
goto failed;
@@ -297,27 +261,22 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1+link->io.NumPorts1-1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2+link->io.NumPorts2-1);
- if (link->win)
- printk(", mem 0x%06lx-0x%06lx", req->Base,
- req->Base+req->Size-1);
+ if (link->resource[0])
+ printk(" & %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(" & %pR", link->resource[1]);
printk("\n");
icard.para[0] = link->irq;
- icard.para[1] = link->io.BasePort1;
+ icard.para[1] = link->resource[0]->start;
icard.protocol = protocol;
icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA;
ret = hisax_init_pcmcia(link,
&(((local_info_t *)link->priv)->stop), &icard);
if (ret < 0) {
- printk(KERN_ERR "sedlbauer_cs: failed to initialize SEDLBAUER PCMCIA %d at i/o %#x\n",
- ret, link->io.BasePort1);
+ printk(KERN_ERR "sedlbauer_cs: failed to initialize SEDLBAUER PCMCIA %d with %pR\n",
+ ret, link->resource[0]);
sedlbauer_release(link);
return -ENODEV;
} else
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index 5771955cc53..7296102ca25 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -27,7 +27,6 @@
#include <asm/io.h>
#include <asm/system.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -107,9 +106,8 @@ static int __devinit teles_probe(struct pcmcia_device *link)
and attributes of IO windows) are fixed by the nature of the
device, and can be hard-wired here.
*/
- link->io.NumPorts1 = 96;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 5;
+ link->resource[0]->end = 96;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -154,16 +152,18 @@ static int teles_cs_configcheck(struct pcmcia_device *p_dev,
{
int j;
+ p_dev->io_lines = 5;
+
if ((cf->io.nwin > 0) && cf->io.win[0].base) {
printk(KERN_INFO "(teles_cs: looks like the 96 model)\n");
- p_dev->io.BasePort1 = cf->io.win[0].base;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[0]->start = cf->io.win[0].base;
+ if (!pcmcia_request_io(p_dev))
return 0;
} else {
printk(KERN_INFO "(teles_cs: looks like the 97 model)\n");
for (j = 0x2f0; j > 0x100; j -= 0x10) {
- p_dev->io.BasePort1 = j;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[0]->start = j;
+ if (!pcmcia_request_io(p_dev))
return 0;
}
}
@@ -195,23 +195,21 @@ static int __devinit teles_cs_config(struct pcmcia_device *link)
link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1+link->io.NumPorts1-1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2+link->io.NumPorts2-1);
+ if (link->resource[0])
+ printk(" & %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(" & %pR", link->resource[1]);
printk("\n");
icard.para[0] = link->irq;
- icard.para[1] = link->io.BasePort1;
+ icard.para[1] = link->resource[0]->start;
icard.protocol = protocol;
icard.typ = ISDN_CTYPE_TELESPCMCIA;
i = hisax_init_pcmcia(link, &(((local_info_t*)link->priv)->busy), &icard);
if (i < 0) {
printk(KERN_ERR "teles_cs: failed to initialize Teles PCMCIA %d at i/o %#x\n",
- i, link->io.BasePort1);
+ i, (unsigned int) link->resource[0]->start);
teles_cs_release(link);
return -ENODEV;
}
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 51dc60da333..c463162843b 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -14,7 +14,7 @@
#include <linux/isdn.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include "isdn_common.h"
#include "isdn_tty.h"
#ifdef CONFIG_ISDN_AUDIO
@@ -28,6 +28,7 @@
/* Prototypes */
+static DEFINE_MUTEX(modem_info_mutex);
static int isdn_tty_edit_at(const char *, int, modem_info *);
static void isdn_tty_check_esc(const u_char *, u_char, int, int *, u_long *);
static void isdn_tty_modem_reset_regs(modem_info *, int);
@@ -1354,14 +1355,14 @@ isdn_tty_tiocmget(struct tty_struct *tty, struct file *file)
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
- lock_kernel();
+ mutex_lock(&modem_info_mutex);
#ifdef ISDN_DEBUG_MODEM_IOCTL
printk(KERN_DEBUG "ttyI%d ioctl TIOCMGET\n", info->line);
#endif
control = info->mcr;
status = info->msr;
- unlock_kernel();
+ mutex_unlock(&modem_info_mutex);
return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0)
| ((control & UART_MCR_DTR) ? TIOCM_DTR : 0)
| ((status & UART_MSR_DCD) ? TIOCM_CAR : 0)
@@ -1385,7 +1386,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file,
printk(KERN_DEBUG "ttyI%d ioctl TIOCMxxx: %x %x\n", info->line, set, clear);
#endif
- lock_kernel();
+ mutex_lock(&modem_info_mutex);
if (set & TIOCM_RTS)
info->mcr |= UART_MCR_RTS;
if (set & TIOCM_DTR) {
@@ -1407,7 +1408,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file,
isdn_tty_modem_hup(info, 1);
}
}
- unlock_kernel();
+ mutex_unlock(&modem_info_mutex);
return 0;
}
@@ -3515,7 +3516,7 @@ isdn_tty_parse_at(modem_info * info)
{
atemu *m = &info->emu;
char *p;
- char ds[40];
+ char ds[ISDN_MSNLEN];
#ifdef ISDN_DEBUG_AT
printk(KERN_DEBUG "AT: '%s'\n", m->mdmcmd);
@@ -3594,7 +3595,7 @@ isdn_tty_parse_at(modem_info * info)
break;
case '3':
p++;
- sprintf(ds, "\r\n%d", info->emu.charge);
+ snprintf(ds, sizeof(ds), "\r\n%d", info->emu.charge);
isdn_tty_at_cout(ds, info);
break;
default:;
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 713ef2b805a..76d9e673b4e 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -1237,6 +1237,7 @@ dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb)
if (dsp->cmx_delay)
dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
& CMX_BUFF_MASK;
+ else
dsp->rx_W = (dsp->rx_R + (dsp_poll >> 1))
& CMX_BUFF_MASK;
} else {
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 22f38e48ac4..5b59796ed25 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -972,7 +972,7 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: got new ip address from user "
"space.\n", __func__);
- l1oip_socket_open(hc);
+ l1oip_socket_open(hc);
break;
case MISDN_CTRL_UNSETPEER:
if (debug & DEBUG_L1OIP_SOCKET)
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index b159bd59e64..a5b632e6755 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -18,7 +18,6 @@
#include <linux/slab.h>
#include <linux/mISDNif.h>
#include <linux/kthread.h>
-#include <linux/smp_lock.h>
#include "core.h"
static u_int *debug;
@@ -205,13 +204,7 @@ mISDNStackd(void *data)
struct mISDNstack *st = data;
int err = 0;
-#ifdef CONFIG_SMP
- lock_kernel();
-#endif
sigfillset(&current->blocked);
-#ifdef CONFIG_SMP
- unlock_kernel();
-#endif
if (*debug & DEBUG_MSG_THREAD)
printk(KERN_DEBUG "mISDNStackd %s started\n",
dev_name(&st->dev->dev));
diff --git a/drivers/isdn/pcbit/edss1.c b/drivers/isdn/pcbit/edss1.c
index d5920ae22d7..80c9c16fd5e 100644
--- a/drivers/isdn/pcbit/edss1.c
+++ b/drivers/isdn/pcbit/edss1.c
@@ -33,7 +33,7 @@
#include "callbacks.h"
-char * isdn_state_table[] = {
+const char * const isdn_state_table[] = {
"Closed",
"Call initiated",
"Overlap sending",
diff --git a/drivers/isdn/pcbit/edss1.h b/drivers/isdn/pcbit/edss1.h
index 0b64f97015d..39f8346e28c 100644
--- a/drivers/isdn/pcbit/edss1.h
+++ b/drivers/isdn/pcbit/edss1.h
@@ -90,7 +90,7 @@ struct fsm_timer_entry {
unsigned long timeout; /* in seconds */
};
-extern char * isdn_state_table[];
+extern const char * const isdn_state_table[];
void pcbit_fsm_event(struct pcbit_dev *, struct pcbit_chan *,
unsigned short event, struct callb_data *);
diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
index 485be8b1e1b..f0225bc0f26 100644
--- a/drivers/isdn/sc/interrupt.c
+++ b/drivers/isdn/sc/interrupt.c
@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
}
else if(callid>=0x0000 && callid<=0x7FFF)
{
+ int len;
+
pr_debug("%s: Got Incoming Call\n",
sc_adapter[card]->devicename);
- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
- strcpy(setup.eazmsn,
- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
+ sizeof(setup.phone));
+ if (len >= sizeof(setup.phone))
+ continue;
+ len = strlcpy(setup.eazmsn,
+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
+ sizeof(setup.eazmsn));
+ if (len >= sizeof(setup.eazmsn))
+ continue;
setup.si1 = 7;
setup.si2 = 0;
setup.plan = 0;
@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
* Handle a GetMyNumber Rsp
*/
if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
+ rcvmsg.msg_data.byte_array,
+ sizeof(rcvmsg.msg_data.byte_array));
continue;
}
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 5dcdf9d69b3..19dc4b61a10 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -351,7 +351,7 @@ static ssize_t bd2802_store_reg##reg_addr(struct device *dev, \
return count; \
} \
static struct device_attribute bd2802_reg##reg_addr##_attr = { \
- .attr = {.name = reg_name, .mode = 0644, .owner = THIS_MODULE}, \
+ .attr = {.name = reg_name, .mode = 0644}, \
.store = bd2802_store_reg##reg_addr, \
};
@@ -482,7 +482,6 @@ static struct device_attribute bd2802_adv_conf_attr = {
.attr = {
.name = "advanced_configuration",
.mode = 0644,
- .owner = THIS_MODULE
},
.show = bd2802_show_adv_conf,
.store = bd2802_store_adv_conf,
@@ -519,7 +518,6 @@ static struct device_attribute bd2802_##attr_name##_attr = { \
.attr = { \
.name = name_str, \
.mode = 0644, \
- .owner = THIS_MODULE \
}, \
.show = bd2802_show_##attr_name, \
.store = bd2802_store_##attr_name, \
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index cc22eeefa10..ea57e05d08f 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -224,7 +224,7 @@ struct gpio_led_of_platform_data {
struct gpio_led_data led_data[];
};
-static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
+static int __devinit of_gpio_leds_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node, *child;
@@ -283,7 +283,7 @@ err:
return ret;
}
-static int __devexit of_gpio_leds_remove(struct of_device *ofdev)
+static int __devexit of_gpio_leds_remove(struct platform_device *ofdev)
{
struct gpio_led_of_platform_data *pdata = dev_get_drvdata(&ofdev->dev);
int i;
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 74dce4ba026..350eb34f049 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -81,7 +81,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
int cmd_level;
int slow_level;
- read_lock(&led_dat->rw_lock);
+ read_lock_irq(&led_dat->rw_lock);
cmd_level = gpio_get_value(led_dat->cmd);
slow_level = gpio_get_value(led_dat->slow);
@@ -95,7 +95,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
}
}
- read_unlock(&led_dat->rw_lock);
+ read_unlock_irq(&led_dat->rw_lock);
return ret;
}
@@ -104,8 +104,9 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
enum ns2_led_modes mode)
{
int i;
+ unsigned long flags;
- write_lock(&led_dat->rw_lock);
+ write_lock_irqsave(&led_dat->rw_lock, flags);
for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
if (mode == ns2_led_modval[i].mode) {
@@ -116,7 +117,7 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
}
}
- write_unlock(&led_dat->rw_lock);
+ write_unlock_irqrestore(&led_dat->rw_lock, flags);
}
static void ns2_led_set(struct led_classdev *led_cdev,
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index 6999ce59fd1..8eb40afbd0f 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -15,7 +15,7 @@ field##_show (struct device *dev, struct device_attribute *attr, \
static ssize_t
compatible_show (struct device *dev, struct device_attribute *attr, char *buf)
{
- struct of_device *of;
+ struct platform_device *of;
const char *compat;
int cplen;
int length = 0;
@@ -41,10 +41,7 @@ compatible_show (struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t modalias_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct of_device *ofdev = to_of_device(dev);
- int len;
-
- len = of_device_get_modalias(ofdev, buf, PAGE_SIZE - 2);
+ int len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2);
buf[len] = '\n';
buf[len+1] = 0;
@@ -55,9 +52,9 @@ static ssize_t modalias_show (struct device *dev, struct device_attribute *attr,
static ssize_t devspec_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct of_device *ofdev;
+ struct platform_device *ofdev;
- ofdev = to_of_device(dev);
+ ofdev = to_platform_device(dev);
return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
}
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 2506c957712..e58c3d33e03 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -75,7 +75,7 @@ struct smu_cmd_buf {
struct smu_device {
spinlock_t lock;
struct device_node *of_node;
- struct of_device *of_dev;
+ struct platform_device *of_dev;
int doorbell; /* doorbell gpio */
u32 __iomem *db_buf; /* doorbell buffer */
struct device_node *db_node;
@@ -645,7 +645,7 @@ static void smu_expose_childs(struct work_struct *unused)
static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs);
-static int smu_platform_probe(struct of_device* dev,
+static int smu_platform_probe(struct platform_device* dev,
const struct of_device_id *match)
{
if (!smu)
@@ -695,7 +695,7 @@ static int __init smu_init_sysfs(void)
device_initcall(smu_init_sysfs);
-struct of_device *smu_get_ofdev(void)
+struct platform_device *smu_get_ofdev(void)
{
if (!smu)
return NULL;
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index c42eeb43042..d0d221332db 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -84,7 +84,7 @@ struct thermostat {
static enum {ADT7460, ADT7467} therm_type;
static int therm_bus, therm_address;
-static struct of_device * of_dev;
+static struct platform_device * of_dev;
static struct thermostat* thermostat;
static struct task_struct *thread_therm = NULL;
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index e60605bd0ea..44549272333 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -148,7 +148,7 @@
* Driver statics
*/
-static struct of_device * of_dev;
+static struct platform_device * of_dev;
static struct i2c_adapter * u3_0;
static struct i2c_adapter * u3_1;
static struct i2c_adapter * k2;
@@ -2210,7 +2210,7 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
}
}
-static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match)
+static int fcu_of_probe(struct platform_device* dev, const struct of_device_id *match)
{
state = state_detached;
@@ -2221,7 +2221,7 @@ static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match)
return i2c_add_driver(&therm_pm72_driver);
}
-static int fcu_of_remove(struct of_device* dev)
+static int fcu_of_remove(struct platform_device* dev)
{
i2c_del_driver(&therm_pm72_driver);
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 5c9367acf0c..c89f396e4c5 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -52,7 +52,7 @@ static struct {
struct task_struct *poll_task;
struct mutex lock;
- struct of_device *of_dev;
+ struct platform_device *of_dev;
struct i2c_client *thermostat;
struct i2c_client *fan;
@@ -322,10 +322,10 @@ do_attach( struct i2c_adapter *adapter )
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "therm_ds1775", I2C_NAME_SIZE);
- i2c_new_probed_device(adapter, &info, scan_ds1775);
+ i2c_new_probed_device(adapter, &info, scan_ds1775, NULL);
strlcpy(info.type, "therm_adm1030", I2C_NAME_SIZE);
- i2c_new_probed_device(adapter, &info, scan_adm1030);
+ i2c_new_probed_device(adapter, &info, scan_adm1030, NULL);
if( x.thermostat && x.fan ) {
x.running = 1;
@@ -444,13 +444,13 @@ static struct i2c_driver g4fan_driver = {
/************************************************************************/
static int
-therm_of_probe( struct of_device *dev, const struct of_device_id *match )
+therm_of_probe( struct platform_device *dev, const struct of_device_id *match )
{
return i2c_add_driver( &g4fan_driver );
}
static int
-therm_of_remove( struct of_device *dev )
+therm_of_remove( struct platform_device *dev )
{
i2c_del_driver( &g4fan_driver );
return 0;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 3d4fc0f7b00..2d17e76066b 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -45,6 +45,7 @@
#include <linux/syscalls.h>
#include <linux/suspend.h>
#include <linux/cpu.h>
+#include <linux/compat.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
@@ -400,11 +401,12 @@ static int __init via_pmu_start(void)
printk(KERN_ERR "via-pmu: can't map interrupt\n");
return -ENODEV;
}
- /* We set IRQF_TIMER because we don't want the interrupt to be disabled
- * between the 2 passes of driver suspend, we control our own disabling
- * for that one
+ /* We set IRQF_NO_SUSPEND because we don't want the interrupt
+ * to be disabled between the 2 passes of driver suspend, we
+ * control our own disabling for that one
*/
- if (request_irq(irq, via_pmu_interrupt, IRQF_TIMER, "VIA-PMU", (void *)0)) {
+ if (request_irq(irq, via_pmu_interrupt, IRQF_NO_SUSPEND,
+ "VIA-PMU", (void *)0)) {
printk(KERN_ERR "via-pmu: can't request irq %d\n", irq);
return -ENODEV;
}
@@ -2348,11 +2350,52 @@ static long pmu_unlocked_ioctl(struct file *filp,
return ret;
}
+#ifdef CONFIG_COMPAT
+#define PMU_IOC_GET_BACKLIGHT32 _IOR('B', 1, compat_size_t)
+#define PMU_IOC_SET_BACKLIGHT32 _IOW('B', 2, compat_size_t)
+#define PMU_IOC_GET_MODEL32 _IOR('B', 3, compat_size_t)
+#define PMU_IOC_HAS_ADB32 _IOR('B', 4, compat_size_t)
+#define PMU_IOC_CAN_SLEEP32 _IOR('B', 5, compat_size_t)
+#define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t)
+
+static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg)
+{
+ switch (cmd) {
+ case PMU_IOC_SLEEP:
+ break;
+ case PMU_IOC_GET_BACKLIGHT32:
+ cmd = PMU_IOC_GET_BACKLIGHT;
+ break;
+ case PMU_IOC_SET_BACKLIGHT32:
+ cmd = PMU_IOC_SET_BACKLIGHT;
+ break;
+ case PMU_IOC_GET_MODEL32:
+ cmd = PMU_IOC_GET_MODEL;
+ break;
+ case PMU_IOC_HAS_ADB32:
+ cmd = PMU_IOC_HAS_ADB;
+ break;
+ case PMU_IOC_CAN_SLEEP32:
+ cmd = PMU_IOC_CAN_SLEEP;
+ break;
+ case PMU_IOC_GRAB_BACKLIGHT32:
+ cmd = PMU_IOC_GRAB_BACKLIGHT;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
static const struct file_operations pmu_device_fops = {
.read = pmu_read,
.write = pmu_write,
.poll = pmu_fpoll,
.unlocked_ioctl = pmu_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_pmu_ioctl,
+#endif
.open = pmu_open,
.release = pmu_release,
};
diff --git a/drivers/md/.gitignore b/drivers/md/.gitignore
deleted file mode 100644
index a7afec6b19c..00000000000
--- a/drivers/md/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-mktables
-raid6altivec*.c
-raid6int*.c
-raid6tables.c
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 4a6feac8c94..bf1a95e3155 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -121,7 +121,7 @@ config MD_RAID10
config MD_RAID456
tristate "RAID-4/RAID-5/RAID-6 mode"
depends on BLK_DEV_MD
- select MD_RAID6_PQ
+ select RAID6_PQ
select ASYNC_MEMCPY
select ASYNC_XOR
select ASYNC_PQ
@@ -165,22 +165,6 @@ config MULTICORE_RAID456
If unsure, say N.
-config MD_RAID6_PQ
- tristate
-
-config ASYNC_RAID6_TEST
- tristate "Self test for hardware accelerated raid6 recovery"
- depends on MD_RAID6_PQ
- select ASYNC_RAID6_RECOV
- ---help---
- This is a one-shot self test that permutes through the
- recovery of all the possible two disk failure scenarios for a
- N-disk array. Recovery is performed with the asynchronous
- raid6 recovery routines, and will optionally use an offload
- engine if one is available.
-
- If unsure, say N.
-
config MD_MULTIPATH
tristate "Multipath I/O support"
depends on BLK_DEV_MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index e355e7f6a53..5e3aac41919 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -12,13 +12,6 @@ dm-log-userspace-y \
+= dm-log-userspace-base.o dm-log-userspace-transfer.o
md-mod-y += md.o bitmap.o
raid456-y += raid5.o
-raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \
- raid6int1.o raid6int2.o raid6int4.o \
- raid6int8.o raid6int16.o raid6int32.o \
- raid6altivec1.o raid6altivec2.o raid6altivec4.o \
- raid6altivec8.o \
- raid6mmx.o raid6sse1.o raid6sse2.o
-hostprogs-y += mktables
# Note: link order is important. All raid personalities
# and must come before md.o, as they each initialise
@@ -29,7 +22,6 @@ obj-$(CONFIG_MD_LINEAR) += linear.o
obj-$(CONFIG_MD_RAID0) += raid0.o
obj-$(CONFIG_MD_RAID1) += raid1.o
obj-$(CONFIG_MD_RAID10) += raid10.o
-obj-$(CONFIG_MD_RAID6_PQ) += raid6_pq.o
obj-$(CONFIG_MD_RAID456) += raid456.o
obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_MD_FAULTY) += faulty.o
@@ -45,75 +37,6 @@ obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
-quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
- < $< > $@ || ( rm -f $@ && exit 1 )
-
-ifeq ($(CONFIG_ALTIVEC),y)
-altivec_flags := -maltivec -mabi=altivec
-endif
-
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
endif
-
-targets += raid6int1.c
-$(obj)/raid6int1.c: UNROLL := 1
-$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += raid6int2.c
-$(obj)/raid6int2.c: UNROLL := 2
-$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += raid6int4.c
-$(obj)/raid6int4.c: UNROLL := 4
-$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += raid6int8.c
-$(obj)/raid6int8.c: UNROLL := 8
-$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += raid6int16.c
-$(obj)/raid6int16.c: UNROLL := 16
-$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += raid6int32.c
-$(obj)/raid6int32.c: UNROLL := 32
-$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-CFLAGS_raid6altivec1.o += $(altivec_flags)
-targets += raid6altivec1.c
-$(obj)/raid6altivec1.c: UNROLL := 1
-$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-CFLAGS_raid6altivec2.o += $(altivec_flags)
-targets += raid6altivec2.c
-$(obj)/raid6altivec2.c: UNROLL := 2
-$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-CFLAGS_raid6altivec4.o += $(altivec_flags)
-targets += raid6altivec4.c
-$(obj)/raid6altivec4.c: UNROLL := 4
-$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-CFLAGS_raid6altivec8.o += $(altivec_flags)
-targets += raid6altivec8.c
-$(obj)/raid6altivec8.c: UNROLL := 8
-$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-quiet_cmd_mktable = TABLE $@
- cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
-
-targets += raid6tables.c
-$(obj)/raid6tables.c: $(obj)/mktables FORCE
- $(call if_changed,mktable)
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 1742435ce3a..ed4900ade93 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -13,7 +13,6 @@
* Still to do:
*
* flush after percent set rather than just time based. (maybe both).
- * wait if count gets too high, wake when it drops to half.
*/
#include <linux/blkdev.h>
@@ -30,6 +29,7 @@
#include "md.h"
#include "bitmap.h"
+#include <linux/dm-dirty-log.h>
/* debug macros */
#define DEBUG 0
@@ -51,9 +51,6 @@
#define INJECT_FATAL_FAULT_3 0 /* undef */
#endif
-//#define DPRINTK PRINTK /* set this NULL to avoid verbose debug output */
-#define DPRINTK(x...) do { } while(0)
-
#ifndef PRINTK
# if DEBUG > 0
# define PRINTK(x...) printk(KERN_DEBUG x)
@@ -62,12 +59,11 @@
# endif
#endif
-static inline char * bmname(struct bitmap *bitmap)
+static inline char *bmname(struct bitmap *bitmap)
{
return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
}
-
/*
* just a placeholder - calls kmalloc for bitmap pages
*/
@@ -78,7 +74,7 @@ static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
#ifdef INJECT_FAULTS_1
page = NULL;
#else
- page = kmalloc(PAGE_SIZE, GFP_NOIO);
+ page = kzalloc(PAGE_SIZE, GFP_NOIO);
#endif
if (!page)
printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap));
@@ -107,7 +103,8 @@ static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page)
* if we find our page, we increment the page's refcount so that it stays
* allocated while we're using it
*/
-static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create)
+static int bitmap_checkpage(struct bitmap *bitmap,
+ unsigned long page, int create)
__releases(bitmap->lock)
__acquires(bitmap->lock)
{
@@ -121,7 +118,6 @@ __acquires(bitmap->lock)
return -EINVAL;
}
-
if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
return 0;
@@ -131,43 +127,34 @@ __acquires(bitmap->lock)
if (!create)
return -ENOENT;
- spin_unlock_irq(&bitmap->lock);
-
/* this page has not been allocated yet */
- if ((mappage = bitmap_alloc_page(bitmap)) == NULL) {
+ spin_unlock_irq(&bitmap->lock);
+ mappage = bitmap_alloc_page(bitmap);
+ spin_lock_irq(&bitmap->lock);
+
+ if (mappage == NULL) {
PRINTK("%s: bitmap map page allocation failed, hijacking\n",
bmname(bitmap));
/* failed - set the hijacked flag so that we can use the
* pointer as a counter */
- spin_lock_irq(&bitmap->lock);
if (!bitmap->bp[page].map)
bitmap->bp[page].hijacked = 1;
- goto out;
- }
-
- /* got a page */
-
- spin_lock_irq(&bitmap->lock);
-
- /* recheck the page */
-
- if (bitmap->bp[page].map || bitmap->bp[page].hijacked) {
+ } else if (bitmap->bp[page].map ||
+ bitmap->bp[page].hijacked) {
/* somebody beat us to getting the page */
bitmap_free_page(bitmap, mappage);
return 0;
- }
+ } else {
- /* no page was in place and we have one, so install it */
+ /* no page was in place and we have one, so install it */
- memset(mappage, 0, PAGE_SIZE);
- bitmap->bp[page].map = mappage;
- bitmap->missing_pages--;
-out:
+ bitmap->bp[page].map = mappage;
+ bitmap->missing_pages--;
+ }
return 0;
}
-
/* if page is completely empty, put it back on the free list, or dealloc it */
/* if page was hijacked, unmark the flag so it might get alloced next time */
/* Note: lock should be held when calling this */
@@ -183,26 +170,15 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
bitmap->bp[page].hijacked = 0;
bitmap->bp[page].map = NULL;
- return;
+ } else {
+ /* normal case, free the page */
+ ptr = bitmap->bp[page].map;
+ bitmap->bp[page].map = NULL;
+ bitmap->missing_pages++;
+ bitmap_free_page(bitmap, ptr);
}
-
- /* normal case, free the page */
-
-#if 0
-/* actually ... let's not. We will probably need the page again exactly when
- * memory is tight and we are flusing to disk
- */
- return;
-#else
- ptr = bitmap->bp[page].map;
- bitmap->bp[page].map = NULL;
- bitmap->missing_pages++;
- bitmap_free_page(bitmap, ptr);
- return;
-#endif
}
-
/*
* bitmap file handling - read and write the bitmap file and its superblock
*/
@@ -220,11 +196,14 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset,
mdk_rdev_t *rdev;
sector_t target;
+ int did_alloc = 0;
- if (!page)
+ if (!page) {
page = alloc_page(GFP_KERNEL);
- if (!page)
- return ERR_PTR(-ENOMEM);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+ did_alloc = 1;
+ }
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (! test_bit(In_sync, &rdev->flags)
@@ -242,6 +221,8 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset,
return page;
}
}
+ if (did_alloc)
+ put_page(page);
return ERR_PTR(-EIO);
}
@@ -286,49 +267,51 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
mddev_t *mddev = bitmap->mddev;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
- int size = PAGE_SIZE;
- loff_t offset = mddev->bitmap_info.offset;
- if (page->index == bitmap->file_pages-1)
- size = roundup(bitmap->last_page_size,
- bdev_logical_block_size(rdev->bdev));
- /* Just make sure we aren't corrupting data or
- * metadata
- */
- if (mddev->external) {
- /* Bitmap could be anywhere. */
- if (rdev->sb_start + offset + (page->index *(PAGE_SIZE/512)) >
- rdev->data_offset &&
- rdev->sb_start + offset <
- rdev->data_offset + mddev->dev_sectors +
- (PAGE_SIZE/512))
- goto bad_alignment;
- } else if (offset < 0) {
- /* DATA BITMAP METADATA */
- if (offset
- + (long)(page->index * (PAGE_SIZE/512))
- + size/512 > 0)
- /* bitmap runs in to metadata */
- goto bad_alignment;
- if (rdev->data_offset + mddev->dev_sectors
- > rdev->sb_start + offset)
- /* data runs in to bitmap */
- goto bad_alignment;
- } else if (rdev->sb_start < rdev->data_offset) {
- /* METADATA BITMAP DATA */
- if (rdev->sb_start
- + offset
- + page->index*(PAGE_SIZE/512) + size/512
- > rdev->data_offset)
- /* bitmap runs in to data */
- goto bad_alignment;
- } else {
- /* DATA METADATA BITMAP - no problems */
- }
- md_super_write(mddev, rdev,
- rdev->sb_start + offset
- + page->index * (PAGE_SIZE/512),
- size,
- page);
+ int size = PAGE_SIZE;
+ loff_t offset = mddev->bitmap_info.offset;
+ if (page->index == bitmap->file_pages-1)
+ size = roundup(bitmap->last_page_size,
+ bdev_logical_block_size(rdev->bdev));
+ /* Just make sure we aren't corrupting data or
+ * metadata
+ */
+ if (mddev->external) {
+ /* Bitmap could be anywhere. */
+ if (rdev->sb_start + offset + (page->index
+ * (PAGE_SIZE/512))
+ > rdev->data_offset
+ &&
+ rdev->sb_start + offset
+ < (rdev->data_offset + mddev->dev_sectors
+ + (PAGE_SIZE/512)))
+ goto bad_alignment;
+ } else if (offset < 0) {
+ /* DATA BITMAP METADATA */
+ if (offset
+ + (long)(page->index * (PAGE_SIZE/512))
+ + size/512 > 0)
+ /* bitmap runs in to metadata */
+ goto bad_alignment;
+ if (rdev->data_offset + mddev->dev_sectors
+ > rdev->sb_start + offset)
+ /* data runs in to bitmap */
+ goto bad_alignment;
+ } else if (rdev->sb_start < rdev->data_offset) {
+ /* METADATA BITMAP DATA */
+ if (rdev->sb_start
+ + offset
+ + page->index*(PAGE_SIZE/512) + size/512
+ > rdev->data_offset)
+ /* bitmap runs in to data */
+ goto bad_alignment;
+ } else {
+ /* DATA METADATA BITMAP - no problems */
+ }
+ md_super_write(mddev, rdev,
+ rdev->sb_start + offset
+ + page->index * (PAGE_SIZE/512),
+ size,
+ page);
}
if (wait)
@@ -364,10 +347,9 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
bh = bh->b_this_page;
}
- if (wait) {
+ if (wait)
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes)==0);
- }
}
if (bitmap->flags & BITMAP_WRITE_ERROR)
bitmap_file_kick(bitmap);
@@ -424,7 +406,7 @@ static struct page *read_page(struct file *file, unsigned long index,
struct buffer_head *bh;
sector_t block;
- PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE,
+ PRINTK("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT);
page = alloc_page(GFP_KERNEL);
@@ -478,7 +460,7 @@ static struct page *read_page(struct file *file, unsigned long index,
}
out:
if (IS_ERR(page))
- printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n",
+ printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %ld\n",
(int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT,
PTR_ERR(page));
@@ -664,11 +646,14 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
old = le32_to_cpu(sb->state) & bits;
switch (op) {
- case MASK_SET: sb->state |= cpu_to_le32(bits);
- break;
- case MASK_UNSET: sb->state &= cpu_to_le32(~bits);
- break;
- default: BUG();
+ case MASK_SET:
+ sb->state |= cpu_to_le32(bits);
+ break;
+ case MASK_UNSET:
+ sb->state &= cpu_to_le32(~bits);
+ break;
+ default:
+ BUG();
}
kunmap_atomic(sb, KM_USER0);
return old;
@@ -710,12 +695,14 @@ static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned lon
static inline struct page *filemap_get_page(struct bitmap *bitmap,
unsigned long chunk)
{
- if (file_page_index(bitmap, chunk) >= bitmap->file_pages) return NULL;
+ if (bitmap->filemap == NULL)
+ return NULL;
+ if (file_page_index(bitmap, chunk) >= bitmap->file_pages)
+ return NULL;
return bitmap->filemap[file_page_index(bitmap, chunk)
- file_page_index(bitmap, 0)];
}
-
static void bitmap_file_unmap(struct bitmap *bitmap)
{
struct page **map, *sb_page;
@@ -766,7 +753,6 @@ static void bitmap_file_put(struct bitmap *bitmap)
}
}
-
/*
* bitmap_file_kick - if an error occurs while manipulating the bitmap file
* then it is no longer reliable, so we stop using it and we mark the file
@@ -785,7 +771,6 @@ static void bitmap_file_kick(struct bitmap *bitmap)
ptr = d_path(&bitmap->file->f_path, path,
PAGE_SIZE);
-
printk(KERN_ALERT
"%s: kicking failed bitmap file %s from array!\n",
bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
@@ -803,27 +788,36 @@ static void bitmap_file_kick(struct bitmap *bitmap)
}
enum bitmap_page_attr {
- BITMAP_PAGE_DIRTY = 0, // there are set bits that need to be synced
- BITMAP_PAGE_CLEAN = 1, // there are bits that might need to be cleared
- BITMAP_PAGE_NEEDWRITE=2, // there are cleared bits that need to be synced
+ BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
+ BITMAP_PAGE_CLEAN = 1, /* there are bits that might need to be cleared */
+ BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
};
static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ if (page)
+ __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ else
+ __set_bit(attr, &bitmap->logattrs);
}
static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ if (page)
+ __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ else
+ __clear_bit(attr, &bitmap->logattrs);
}
static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ if (page)
+ return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ else
+ return test_bit(attr, &bitmap->logattrs);
}
/*
@@ -836,30 +830,32 @@ static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *p
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
{
unsigned long bit;
- struct page *page;
+ struct page *page = NULL;
void *kaddr;
unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap);
if (!bitmap->filemap) {
- return;
- }
-
- page = filemap_get_page(bitmap, chunk);
- if (!page) return;
- bit = file_page_offset(bitmap, chunk);
+ struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log;
+ if (log)
+ log->type->mark_region(log, chunk);
+ } else {
- /* set the bit */
- kaddr = kmap_atomic(page, KM_USER0);
- if (bitmap->flags & BITMAP_HOSTENDIAN)
- set_bit(bit, kaddr);
- else
- ext2_set_bit(bit, kaddr);
- kunmap_atomic(kaddr, KM_USER0);
- PRINTK("set file bit %lu page %lu\n", bit, page->index);
+ page = filemap_get_page(bitmap, chunk);
+ if (!page)
+ return;
+ bit = file_page_offset(bitmap, chunk);
+ /* set the bit */
+ kaddr = kmap_atomic(page, KM_USER0);
+ if (bitmap->flags & BITMAP_HOSTENDIAN)
+ set_bit(bit, kaddr);
+ else
+ ext2_set_bit(bit, kaddr);
+ kunmap_atomic(kaddr, KM_USER0);
+ PRINTK("set file bit %lu page %lu\n", bit, page->index);
+ }
/* record page number so it gets flushed to disk when unplug occurs */
set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
-
}
/* this gets called when the md device is ready to unplug its underlying
@@ -874,6 +870,16 @@ void bitmap_unplug(struct bitmap *bitmap)
if (!bitmap)
return;
+ if (!bitmap->filemap) {
+ /* Must be using a dirty_log */
+ struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log;
+ dirty = test_and_clear_bit(BITMAP_PAGE_DIRTY, &bitmap->logattrs);
+ need_write = test_and_clear_bit(BITMAP_PAGE_NEEDWRITE, &bitmap->logattrs);
+ if (dirty || need_write)
+ if (log->type->flush(log))
+ bitmap->flags |= BITMAP_WRITE_ERROR;
+ goto out;
+ }
/* look at each page to see if there are any set bits that need to be
* flushed out to disk */
@@ -892,7 +898,7 @@ void bitmap_unplug(struct bitmap *bitmap)
wait = 1;
spin_unlock_irqrestore(&bitmap->lock, flags);
- if (dirty | need_write)
+ if (dirty || need_write)
write_page(bitmap, page, 0);
}
if (wait) { /* if any writes were performed, we need to wait on them */
@@ -902,9 +908,11 @@ void bitmap_unplug(struct bitmap *bitmap)
else
md_super_wait(bitmap->mddev);
}
+out:
if (bitmap->flags & BITMAP_WRITE_ERROR)
bitmap_file_kick(bitmap);
}
+EXPORT_SYMBOL(bitmap_unplug);
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
@@ -943,12 +951,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
printk(KERN_INFO "%s: bitmap file is out of date, doing full "
"recovery\n", bmname(bitmap));
- bytes = (chunks + 7) / 8;
+ bytes = DIV_ROUND_UP(bitmap->chunks, 8);
if (!bitmap->mddev->bitmap_info.external)
bytes += sizeof(bitmap_super_t);
-
- num_pages = (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
+ num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
if (file && i_size_read(file->f_mapping->host) < bytes) {
printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
@@ -966,7 +973,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
/* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
bitmap->filemap_attr = kzalloc(
- roundup( DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
+ roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
GFP_KERNEL);
if (!bitmap->filemap_attr)
goto err;
@@ -1021,7 +1028,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
if (outofdate) {
/*
* if bitmap is out of date, dirty the
- * whole page and write it out
+ * whole page and write it out
*/
paddr = kmap_atomic(page, KM_USER0);
memset(paddr + offset, 0xff,
@@ -1052,7 +1059,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
}
}
- /* everything went OK */
+ /* everything went OK */
ret = 0;
bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
@@ -1080,21 +1087,16 @@ void bitmap_write_all(struct bitmap *bitmap)
*/
int i;
- for (i=0; i < bitmap->file_pages; i++)
+ for (i = 0; i < bitmap->file_pages; i++)
set_page_attr(bitmap, bitmap->filemap[i],
BITMAP_PAGE_NEEDWRITE);
}
-
static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
{
sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
bitmap->bp[page].count += inc;
-/*
- if (page == 0) printk("count page 0, offset %llu: %d gives %d\n",
- (unsigned long long)offset, inc, bitmap->bp[page].count);
-*/
bitmap_checkfree(bitmap, page);
}
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
@@ -1114,6 +1116,7 @@ void bitmap_daemon_work(mddev_t *mddev)
struct page *page = NULL, *lastpage = NULL;
int blocks;
void *paddr;
+ struct dm_dirty_log *log = mddev->bitmap_info.log;
/* Use a mutex to guard daemon_work against
* bitmap_destroy.
@@ -1138,11 +1141,12 @@ void bitmap_daemon_work(mddev_t *mddev)
spin_lock_irqsave(&bitmap->lock, flags);
for (j = 0; j < bitmap->chunks; j++) {
bitmap_counter_t *bmc;
- if (!bitmap->filemap)
- /* error or shutdown */
- break;
-
- page = filemap_get_page(bitmap, j);
+ if (!bitmap->filemap) {
+ if (!log)
+ /* error or shutdown */
+ break;
+ } else
+ page = filemap_get_page(bitmap, j);
if (page != lastpage) {
/* skip this page unless it's marked as needing cleaning */
@@ -1197,14 +1201,11 @@ void bitmap_daemon_work(mddev_t *mddev)
(sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
&blocks, 0);
if (bmc) {
-/*
- if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc);
-*/
if (*bmc)
bitmap->allclean = 0;
if (*bmc == 2) {
- *bmc=1; /* maybe clear the bit next time */
+ *bmc = 1; /* maybe clear the bit next time */
set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
} else if (*bmc == 1 && !bitmap->need_sync) {
/* we can clear the bit */
@@ -1214,14 +1215,17 @@ void bitmap_daemon_work(mddev_t *mddev)
-1);
/* clear the bit */
- paddr = kmap_atomic(page, KM_USER0);
- if (bitmap->flags & BITMAP_HOSTENDIAN)
- clear_bit(file_page_offset(bitmap, j),
- paddr);
- else
- ext2_clear_bit(file_page_offset(bitmap, j),
- paddr);
- kunmap_atomic(paddr, KM_USER0);
+ if (page) {
+ paddr = kmap_atomic(page, KM_USER0);
+ if (bitmap->flags & BITMAP_HOSTENDIAN)
+ clear_bit(file_page_offset(bitmap, j),
+ paddr);
+ else
+ ext2_clear_bit(file_page_offset(bitmap, j),
+ paddr);
+ kunmap_atomic(paddr, KM_USER0);
+ } else
+ log->type->clear_region(log, j);
}
} else
j |= PAGE_COUNTER_MASK;
@@ -1229,12 +1233,16 @@ void bitmap_daemon_work(mddev_t *mddev)
spin_unlock_irqrestore(&bitmap->lock, flags);
/* now sync the final page */
- if (lastpage != NULL) {
+ if (lastpage != NULL || log != NULL) {
spin_lock_irqsave(&bitmap->lock, flags);
if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
- write_page(bitmap, lastpage, 0);
+ if (lastpage)
+ write_page(bitmap, lastpage, 0);
+ else
+ if (log->type->flush(log))
+ bitmap->flags |= BITMAP_WRITE_ERROR;
} else {
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
@@ -1243,7 +1251,7 @@ void bitmap_daemon_work(mddev_t *mddev)
done:
if (bitmap->allclean == 0)
- bitmap->mddev->thread->timeout =
+ bitmap->mddev->thread->timeout =
bitmap->mddev->bitmap_info.daemon_sleep;
mutex_unlock(&mddev->bitmap_info.mutex);
}
@@ -1262,34 +1270,38 @@ __acquires(bitmap->lock)
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
sector_t csize;
+ int err;
- if (bitmap_checkpage(bitmap, page, create) < 0) {
+ err = bitmap_checkpage(bitmap, page, create);
+
+ if (bitmap->bp[page].hijacked ||
+ bitmap->bp[page].map == NULL)
+ csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) +
+ PAGE_COUNTER_SHIFT - 1);
+ else
csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
- *blocks = csize - (offset & (csize- 1));
+ *blocks = csize - (offset & (csize - 1));
+
+ if (err < 0)
return NULL;
- }
+
/* now locked ... */
if (bitmap->bp[page].hijacked) { /* hijacked pointer */
/* should we use the first or second counter field
* of the hijacked pointer? */
int hi = (pageoff > PAGE_COUNTER_MASK);
- csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) +
- PAGE_COUNTER_SHIFT - 1);
- *blocks = csize - (offset & (csize- 1));
return &((bitmap_counter_t *)
&bitmap->bp[page].map)[hi];
- } else { /* page is allocated */
- csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
- *blocks = csize - (offset & (csize- 1));
+ } else /* page is allocated */
return (bitmap_counter_t *)
&(bitmap->bp[page].map[pageoff]);
- }
}
int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
{
- if (!bitmap) return 0;
+ if (!bitmap)
+ return 0;
if (behind) {
int bw;
@@ -1322,17 +1334,16 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
prepare_to_wait(&bitmap->overflow_wait, &__wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&bitmap->lock);
- blk_unplug(bitmap->mddev->queue);
+ md_unplug(bitmap->mddev);
schedule();
finish_wait(&bitmap->overflow_wait, &__wait);
continue;
}
- switch(*bmc) {
+ switch (*bmc) {
case 0:
bitmap_file_set_bit(bitmap, offset);
- bitmap_count_page(bitmap,offset, 1);
- blk_plug_device_unlocked(bitmap->mddev->queue);
+ bitmap_count_page(bitmap, offset, 1);
/* fall through */
case 1:
*bmc = 2;
@@ -1345,16 +1356,19 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
offset += blocks;
if (sectors > blocks)
sectors -= blocks;
- else sectors = 0;
+ else
+ sectors = 0;
}
bitmap->allclean = 0;
return 0;
}
+EXPORT_SYMBOL(bitmap_startwrite);
void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
int success, int behind)
{
- if (!bitmap) return;
+ if (!bitmap)
+ return;
if (behind) {
if (atomic_dec_and_test(&bitmap->behind_writes))
wake_up(&bitmap->behind_wait);
@@ -1381,7 +1395,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1;
- sysfs_notify_dirent(bitmap->sysfs_can_clear);
+ sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
}
if (!success && ! (*bmc & NEEDED_MASK))
@@ -1391,18 +1405,22 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
wake_up(&bitmap->overflow_wait);
(*bmc)--;
- if (*bmc <= 2) {
+ if (*bmc <= 2)
set_page_attr(bitmap,
- filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
+ filemap_get_page(
+ bitmap,
+ offset >> CHUNK_BLOCK_SHIFT(bitmap)),
BITMAP_PAGE_CLEAN);
- }
+
spin_unlock_irqrestore(&bitmap->lock, flags);
offset += blocks;
if (sectors > blocks)
sectors -= blocks;
- else sectors = 0;
+ else
+ sectors = 0;
}
}
+EXPORT_SYMBOL(bitmap_endwrite);
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
int degraded)
@@ -1455,14 +1473,14 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
}
return rv;
}
+EXPORT_SYMBOL(bitmap_start_sync);
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted)
{
bitmap_counter_t *bmc;
unsigned long flags;
-/*
- if (offset == 0) printk("bitmap_end_sync 0 (%d)\n", aborted);
-*/ if (bitmap == NULL) {
+
+ if (bitmap == NULL) {
*blocks = 1024;
return;
}
@@ -1471,26 +1489,23 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int ab
if (bmc == NULL)
goto unlock;
/* locked */
-/*
- if (offset == 0) printk("bitmap_end sync found 0x%x, blocks %d\n", *bmc, *blocks);
-*/
if (RESYNC(*bmc)) {
*bmc &= ~RESYNC_MASK;
if (!NEEDED(*bmc) && aborted)
*bmc |= NEEDED_MASK;
else {
- if (*bmc <= 2) {
+ if (*bmc <= 2)
set_page_attr(bitmap,
filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
BITMAP_PAGE_CLEAN);
- }
}
}
unlock:
spin_unlock_irqrestore(&bitmap->lock, flags);
bitmap->allclean = 0;
}
+EXPORT_SYMBOL(bitmap_end_sync);
void bitmap_close_sync(struct bitmap *bitmap)
{
@@ -1507,6 +1522,7 @@ void bitmap_close_sync(struct bitmap *bitmap)
sector += blocks;
}
}
+EXPORT_SYMBOL(bitmap_close_sync);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
{
@@ -1536,6 +1552,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
bitmap->last_end_sync = jiffies;
sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
}
+EXPORT_SYMBOL(bitmap_cond_end_sync);
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
{
@@ -1552,9 +1569,9 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
spin_unlock_irq(&bitmap->lock);
return;
}
- if (! *bmc) {
+ if (!*bmc) {
struct page *page;
- *bmc = 1 | (needed?NEEDED_MASK:0);
+ *bmc = 1 | (needed ? NEEDED_MASK : 0);
bitmap_count_page(bitmap, offset, 1);
page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
@@ -1663,15 +1680,17 @@ int bitmap_create(mddev_t *mddev)
unsigned long pages;
struct file *file = mddev->bitmap_info.file;
int err;
- sector_t start;
- struct sysfs_dirent *bm;
+ struct sysfs_dirent *bm = NULL;
BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
- if (!file && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */
+ if (!file
+ && !mddev->bitmap_info.offset
+ && !mddev->bitmap_info.log) /* bitmap disabled, nothing to do */
return 0;
BUG_ON(file && mddev->bitmap_info.offset);
+ BUG_ON(mddev->bitmap_info.offset && mddev->bitmap_info.log);
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
@@ -1685,7 +1704,8 @@ int bitmap_create(mddev_t *mddev)
bitmap->mddev = mddev;
- bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
+ if (mddev->kobj.sd)
+ bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
if (bm) {
bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
sysfs_put(bm);
@@ -1719,9 +1739,9 @@ int bitmap_create(mddev_t *mddev)
bitmap->chunkshift = ffz(~mddev->bitmap_info.chunksize);
/* now that chunksize and chunkshift are set, we can use these macros */
- chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
+ chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
CHUNK_BLOCK_SHIFT(bitmap);
- pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
+ pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
BUG_ON(!pages);
@@ -1741,27 +1761,11 @@ int bitmap_create(mddev_t *mddev)
if (!bitmap->bp)
goto error;
- /* now that we have some pages available, initialize the in-memory
- * bitmap from the on-disk bitmap */
- start = 0;
- if (mddev->degraded == 0
- || bitmap->events_cleared == mddev->events)
- /* no need to keep dirty bits to optimise a re-add of a missing device */
- start = mddev->recovery_cp;
- err = bitmap_init_from_disk(bitmap, start);
-
- if (err)
- goto error;
-
printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
pages, bmname(bitmap));
mddev->bitmap = bitmap;
- mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
- md_wakeup_thread(mddev->thread);
-
- bitmap_update_sb(bitmap);
return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
@@ -1770,15 +1774,69 @@ int bitmap_create(mddev_t *mddev)
return err;
}
+int bitmap_load(mddev_t *mddev)
+{
+ int err = 0;
+ sector_t sector = 0;
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (!bitmap)
+ goto out;
+
+ /* Clear out old bitmap info first: Either there is none, or we
+ * are resuming after someone else has possibly changed things,
+ * so we should forget old cached info.
+ * All chunks should be clean, but some might need_sync.
+ */
+ while (sector < mddev->resync_max_sectors) {
+ int blocks;
+ bitmap_start_sync(bitmap, sector, &blocks, 0);
+ sector += blocks;
+ }
+ bitmap_close_sync(bitmap);
+
+ if (mddev->bitmap_info.log) {
+ unsigned long i;
+ struct dm_dirty_log *log = mddev->bitmap_info.log;
+ for (i = 0; i < bitmap->chunks; i++)
+ if (!log->type->in_sync(log, i, 1))
+ bitmap_set_memory_bits(bitmap,
+ (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap),
+ 1);
+ } else {
+ sector_t start = 0;
+ if (mddev->degraded == 0
+ || bitmap->events_cleared == mddev->events)
+ /* no need to keep dirty bits to optimise a
+ * re-add of a missing device */
+ start = mddev->recovery_cp;
+
+ err = bitmap_init_from_disk(bitmap, start);
+ }
+ if (err)
+ goto out;
+
+ mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
+ md_wakeup_thread(mddev->thread);
+
+ bitmap_update_sb(bitmap);
+
+ if (bitmap->flags & BITMAP_WRITE_ERROR)
+ err = -EIO;
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(bitmap_load);
+
static ssize_t
location_show(mddev_t *mddev, char *page)
{
ssize_t len;
- if (mddev->bitmap_info.file) {
+ if (mddev->bitmap_info.file)
len = sprintf(page, "file");
- } else if (mddev->bitmap_info.offset) {
+ else if (mddev->bitmap_info.offset)
len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
- } else
+ else
len = sprintf(page, "none");
len += sprintf(page+len, "\n");
return len;
@@ -1867,7 +1925,7 @@ timeout_show(mddev_t *mddev, char *page)
ssize_t len;
unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
-
+
len = sprintf(page, "%lu", secs);
if (jifs)
len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
@@ -2049,12 +2107,3 @@ struct attribute_group md_bitmap_group = {
.attrs = md_bitmap_attrs,
};
-
-/* the bitmap API -- for raid personalities */
-EXPORT_SYMBOL(bitmap_startwrite);
-EXPORT_SYMBOL(bitmap_endwrite);
-EXPORT_SYMBOL(bitmap_start_sync);
-EXPORT_SYMBOL(bitmap_end_sync);
-EXPORT_SYMBOL(bitmap_unplug);
-EXPORT_SYMBOL(bitmap_close_sync);
-EXPORT_SYMBOL(bitmap_cond_end_sync);
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 3797dea4723..e872a7bad6b 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -222,6 +222,10 @@ struct bitmap {
unsigned long file_pages; /* number of pages in the file */
int last_page_size; /* bytes in the last page */
+ unsigned long logattrs; /* used when filemap_attr doesn't exist
+ * because we are working with a dirty_log
+ */
+
unsigned long flags;
int allclean;
@@ -243,12 +247,14 @@ struct bitmap {
wait_queue_head_t behind_wait;
struct sysfs_dirent *sysfs_can_clear;
+
};
/* the bitmap API */
/* these are used only by md/bitmap */
int bitmap_create(mddev_t *mddev);
+int bitmap_load(mddev_t *mddev);
void bitmap_flush(mddev_t *mddev);
void bitmap_destroy(mddev_t *mddev);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 3bdbb611570..368e8e98f70 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -107,11 +107,10 @@ struct crypt_config {
struct workqueue_struct *io_queue;
struct workqueue_struct *crypt_queue;
- /*
- * crypto related data
- */
+ char *cipher;
+ char *cipher_mode;
+
struct crypt_iv_operations *iv_gen_ops;
- char *iv_mode;
union {
struct iv_essiv_private essiv;
struct iv_benbi_private benbi;
@@ -135,8 +134,6 @@ struct crypt_config {
unsigned int dmreq_start;
struct ablkcipher_request *req;
- char cipher[CRYPTO_MAX_ALG_NAME];
- char chainmode[CRYPTO_MAX_ALG_NAME];
struct crypto_ablkcipher *tfm;
unsigned long flags;
unsigned int key_size;
@@ -999,82 +996,135 @@ static int crypt_wipe_key(struct crypt_config *cc)
return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
}
-/*
- * Construct an encryption mapping:
- * <cipher> <key> <iv_offset> <dev_path> <start>
- */
-static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+static void crypt_dtr(struct dm_target *ti)
{
- struct crypt_config *cc;
- struct crypto_ablkcipher *tfm;
- char *tmp;
- char *cipher;
- char *chainmode;
- char *ivmode;
- char *ivopts;
- unsigned int key_size;
- unsigned long long tmpll;
+ struct crypt_config *cc = ti->private;
- if (argc != 5) {
- ti->error = "Not enough arguments";
+ ti->private = NULL;
+
+ if (!cc)
+ return;
+
+ if (cc->io_queue)
+ destroy_workqueue(cc->io_queue);
+ if (cc->crypt_queue)
+ destroy_workqueue(cc->crypt_queue);
+
+ if (cc->bs)
+ bioset_free(cc->bs);
+
+ if (cc->page_pool)
+ mempool_destroy(cc->page_pool);
+ if (cc->req_pool)
+ mempool_destroy(cc->req_pool);
+ if (cc->io_pool)
+ mempool_destroy(cc->io_pool);
+
+ if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
+ cc->iv_gen_ops->dtr(cc);
+
+ if (cc->tfm && !IS_ERR(cc->tfm))
+ crypto_free_ablkcipher(cc->tfm);
+
+ if (cc->dev)
+ dm_put_device(ti, cc->dev);
+
+ kzfree(cc->cipher);
+ kzfree(cc->cipher_mode);
+
+ /* Must zero key material before freeing */
+ kzfree(cc);
+}
+
+static int crypt_ctr_cipher(struct dm_target *ti,
+ char *cipher_in, char *key)
+{
+ struct crypt_config *cc = ti->private;
+ char *tmp, *cipher, *chainmode, *ivmode, *ivopts;
+ char *cipher_api = NULL;
+ int ret = -EINVAL;
+
+ /* Convert to crypto api definition? */
+ if (strchr(cipher_in, '(')) {
+ ti->error = "Bad cipher specification";
return -EINVAL;
}
- tmp = argv[0];
+ /*
+ * Legacy dm-crypt cipher specification
+ * cipher-mode-iv:ivopts
+ */
+ tmp = cipher_in;
cipher = strsep(&tmp, "-");
+
+ cc->cipher = kstrdup(cipher, GFP_KERNEL);
+ if (!cc->cipher)
+ goto bad_mem;
+
+ if (tmp) {
+ cc->cipher_mode = kstrdup(tmp, GFP_KERNEL);
+ if (!cc->cipher_mode)
+ goto bad_mem;
+ }
+
chainmode = strsep(&tmp, "-");
ivopts = strsep(&tmp, "-");
ivmode = strsep(&ivopts, ":");
if (tmp)
- DMWARN("Unexpected additional cipher options");
-
- key_size = strlen(argv[1]) >> 1;
-
- cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
- if (cc == NULL) {
- ti->error =
- "Cannot allocate transparent encryption context";
- return -ENOMEM;
- }
+ DMWARN("Ignoring unexpected additional cipher options");
- /* Compatibility mode for old dm-crypt cipher strings */
- if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
+ /* Compatibility mode for old dm-crypt mappings */
+ if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
+ kfree(cc->cipher_mode);
+ cc->cipher_mode = kstrdup("cbc-plain", GFP_KERNEL);
chainmode = "cbc";
ivmode = "plain";
}
if (strcmp(chainmode, "ecb") && !ivmode) {
- ti->error = "This chaining mode requires an IV mechanism";
- goto bad_cipher;
+ ti->error = "IV mechanism required";
+ return -EINVAL;
}
- if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
- chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
- ti->error = "Chain mode + cipher name is too long";
- goto bad_cipher;
+ cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
+ if (!cipher_api)
+ goto bad_mem;
+
+ ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
+ "%s(%s)", chainmode, cipher);
+ if (ret < 0) {
+ kfree(cipher_api);
+ goto bad_mem;
}
- tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0);
- if (IS_ERR(tfm)) {
+ /* Allocate cipher */
+ cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0);
+ if (IS_ERR(cc->tfm)) {
+ ret = PTR_ERR(cc->tfm);
ti->error = "Error allocating crypto tfm";
- goto bad_cipher;
+ goto bad;
}
- strcpy(cc->cipher, cipher);
- strcpy(cc->chainmode, chainmode);
- cc->tfm = tfm;
-
- if (crypt_set_key(cc, argv[1]) < 0) {
+ /* Initialize and set key */
+ ret = crypt_set_key(cc, key);
+ if (ret < 0) {
ti->error = "Error decoding and setting key";
- goto bad_ivmode;
+ goto bad;
}
- /*
- * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
- * See comments at iv code
- */
+ /* Initialize IV */
+ cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm);
+ if (cc->iv_size)
+ /* at least a 64 bit sector number should fit in our buffer */
+ cc->iv_size = max(cc->iv_size,
+ (unsigned int)(sizeof(u64) / sizeof(u8)));
+ else if (ivmode) {
+ DMWARN("Selected cipher does not support IVs");
+ ivmode = NULL;
+ }
+ /* Choose ivmode, see comments at iv code. */
if (ivmode == NULL)
cc->iv_gen_ops = NULL;
else if (strcmp(ivmode, "plain") == 0)
@@ -1088,159 +1138,138 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
else if (strcmp(ivmode, "null") == 0)
cc->iv_gen_ops = &crypt_iv_null_ops;
else {
+ ret = -EINVAL;
ti->error = "Invalid IV mode";
- goto bad_ivmode;
+ goto bad;
}
- if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
- cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
- goto bad_ivmode;
-
- if (cc->iv_gen_ops && cc->iv_gen_ops->init &&
- cc->iv_gen_ops->init(cc) < 0) {
- ti->error = "Error initialising IV";
- goto bad_slab_pool;
+ /* Allocate IV */
+ if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
+ ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
+ if (ret < 0) {
+ ti->error = "Error creating IV";
+ goto bad;
+ }
}
- cc->iv_size = crypto_ablkcipher_ivsize(tfm);
- if (cc->iv_size)
- /* at least a 64 bit sector number should fit in our buffer */
- cc->iv_size = max(cc->iv_size,
- (unsigned int)(sizeof(u64) / sizeof(u8)));
- else {
- if (cc->iv_gen_ops) {
- DMWARN("Selected cipher does not support IVs");
- if (cc->iv_gen_ops->dtr)
- cc->iv_gen_ops->dtr(cc);
- cc->iv_gen_ops = NULL;
+ /* Initialize IV (set keys for ESSIV etc) */
+ if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
+ ret = cc->iv_gen_ops->init(cc);
+ if (ret < 0) {
+ ti->error = "Error initialising IV";
+ goto bad;
}
}
+ ret = 0;
+bad:
+ kfree(cipher_api);
+ return ret;
+
+bad_mem:
+ ti->error = "Cannot allocate cipher strings";
+ return -ENOMEM;
+}
+
+/*
+ * Construct an encryption mapping:
+ * <cipher> <key> <iv_offset> <dev_path> <start>
+ */
+static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct crypt_config *cc;
+ unsigned int key_size;
+ unsigned long long tmpll;
+ int ret;
+
+ if (argc != 5) {
+ ti->error = "Not enough arguments";
+ return -EINVAL;
+ }
+
+ key_size = strlen(argv[1]) >> 1;
+
+ cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
+ if (!cc) {
+ ti->error = "Cannot allocate encryption context";
+ return -ENOMEM;
+ }
+
+ ti->private = cc;
+ ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
+ if (ret < 0)
+ goto bad;
+
+ ret = -ENOMEM;
cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
if (!cc->io_pool) {
ti->error = "Cannot allocate crypt io mempool";
- goto bad_slab_pool;
+ goto bad;
}
cc->dmreq_start = sizeof(struct ablkcipher_request);
- cc->dmreq_start += crypto_ablkcipher_reqsize(tfm);
+ cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm);
cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
- cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) &
+ cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) &
~(crypto_tfm_ctx_alignment() - 1);
cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
sizeof(struct dm_crypt_request) + cc->iv_size);
if (!cc->req_pool) {
ti->error = "Cannot allocate crypt request mempool";
- goto bad_req_pool;
+ goto bad;
}
cc->req = NULL;
cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
if (!cc->page_pool) {
ti->error = "Cannot allocate page mempool";
- goto bad_page_pool;
+ goto bad;
}
cc->bs = bioset_create(MIN_IOS, 0);
if (!cc->bs) {
ti->error = "Cannot allocate crypt bioset";
- goto bad_bs;
+ goto bad;
}
+ ret = -EINVAL;
if (sscanf(argv[2], "%llu", &tmpll) != 1) {
ti->error = "Invalid iv_offset sector";
- goto bad_device;
+ goto bad;
}
cc->iv_offset = tmpll;
- if (sscanf(argv[4], "%llu", &tmpll) != 1) {
- ti->error = "Invalid device sector";
- goto bad_device;
- }
- cc->start = tmpll;
-
if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
ti->error = "Device lookup failed";
- goto bad_device;
+ goto bad;
}
- if (ivmode && cc->iv_gen_ops) {
- if (ivopts)
- *(ivopts - 1) = ':';
- cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
- if (!cc->iv_mode) {
- ti->error = "Error kmallocing iv_mode string";
- goto bad_ivmode_string;
- }
- strcpy(cc->iv_mode, ivmode);
- } else
- cc->iv_mode = NULL;
+ if (sscanf(argv[4], "%llu", &tmpll) != 1) {
+ ti->error = "Invalid device sector";
+ goto bad;
+ }
+ cc->start = tmpll;
+ ret = -ENOMEM;
cc->io_queue = create_singlethread_workqueue("kcryptd_io");
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
- goto bad_io_queue;
+ goto bad;
}
cc->crypt_queue = create_singlethread_workqueue("kcryptd");
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
- goto bad_crypt_queue;
+ goto bad;
}
ti->num_flush_requests = 1;
- ti->private = cc;
return 0;
-bad_crypt_queue:
- destroy_workqueue(cc->io_queue);
-bad_io_queue:
- kfree(cc->iv_mode);
-bad_ivmode_string:
- dm_put_device(ti, cc->dev);
-bad_device:
- bioset_free(cc->bs);
-bad_bs:
- mempool_destroy(cc->page_pool);
-bad_page_pool:
- mempool_destroy(cc->req_pool);
-bad_req_pool:
- mempool_destroy(cc->io_pool);
-bad_slab_pool:
- if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
- cc->iv_gen_ops->dtr(cc);
-bad_ivmode:
- crypto_free_ablkcipher(tfm);
-bad_cipher:
- /* Must zero key material before freeing */
- kzfree(cc);
- return -EINVAL;
-}
-
-static void crypt_dtr(struct dm_target *ti)
-{
- struct crypt_config *cc = (struct crypt_config *) ti->private;
-
- destroy_workqueue(cc->io_queue);
- destroy_workqueue(cc->crypt_queue);
-
- if (cc->req)
- mempool_free(cc->req, cc->req_pool);
-
- bioset_free(cc->bs);
- mempool_destroy(cc->page_pool);
- mempool_destroy(cc->req_pool);
- mempool_destroy(cc->io_pool);
-
- kfree(cc->iv_mode);
- if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
- cc->iv_gen_ops->dtr(cc);
- crypto_free_ablkcipher(cc->tfm);
- dm_put_device(ti, cc->dev);
-
- /* Must zero key material before freeing */
- kzfree(cc);
+bad:
+ crypt_dtr(ti);
+ return ret;
}
static int crypt_map(struct dm_target *ti, struct bio *bio,
@@ -1255,7 +1284,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
return DM_MAPIO_REMAPPED;
}
- io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
+ io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
if (bio_data_dir(io->base_bio) == READ)
kcryptd_queue_io(io);
@@ -1268,7 +1297,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
static int crypt_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
- struct crypt_config *cc = (struct crypt_config *) ti->private;
+ struct crypt_config *cc = ti->private;
unsigned int sz = 0;
switch (type) {
@@ -1277,11 +1306,10 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE:
- if (cc->iv_mode)
- DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
- cc->iv_mode);
+ if (cc->cipher_mode)
+ DMEMIT("%s-%s ", cc->cipher, cc->cipher_mode);
else
- DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
+ DMEMIT("%s ", cc->cipher);
if (cc->key_size > 0) {
if ((maxlen - sz) < ((cc->key_size << 1) + 1))
@@ -1378,7 +1406,7 @@ static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
return max_size;
bvm->bi_bdev = cc->dev->bdev;
- bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin;
+ bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 852052880d7..baa11912cc9 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -198,6 +198,7 @@ out:
atomic_set(&dc->may_delay, 1);
ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
ti->private = dc;
return 0;
@@ -281,14 +282,13 @@ static int delay_map(struct dm_target *ti, struct bio *bio,
bio->bi_bdev = dc->dev_write->bdev;
if (bio_sectors(bio))
bio->bi_sector = dc->start_write +
- (bio->bi_sector - ti->begin);
+ dm_target_offset(ti, bio->bi_sector);
return delay_bio(dc, dc->write_delay, bio);
}
bio->bi_bdev = dc->dev_read->bdev;
- bio->bi_sector = dc->start_read +
- (bio->bi_sector - ti->begin);
+ bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
return delay_bio(dc, dc->read_delay, bio);
}
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 2b7907b6dd0..0bdb201c2c2 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -173,7 +173,9 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
/* Validate the chunk size against the device block size */
if (chunk_size %
- (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) {
+ (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9) ||
+ chunk_size %
+ (bdev_logical_block_size(dm_snap_origin(store->snap)->bdev) >> 9)) {
*error = "Chunk size is not a multiple of device blocksize";
return -EINVAL;
}
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index e8dfa06af3b..0b2536247cf 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -126,8 +126,9 @@ struct dm_exception_store {
};
/*
- * Obtain the cow device used by a given snapshot.
+ * Obtain the origin or cow device used by a given snapshot.
*/
+struct dm_dev *dm_snap_origin(struct dm_snapshot *snap);
struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
/*
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 10f457ca6af..0590c75b0ab 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -356,7 +356,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
BUG_ON(num_regions > DM_IO_MAX_REGIONS);
if (sync)
- rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+ rw |= REQ_SYNC | REQ_UNPLUG;
/*
* For multiple regions we need to be careful to rewind
@@ -364,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
*/
for (i = 0; i < num_regions; i++) {
*dp = old_pages;
- if (where[i].count || (rw & (1 << BIO_RW_BARRIER)))
+ if (where[i].count || (rw & REQ_HARDBARRIER))
do_region(rw, i, where + i, dp, io);
}
@@ -412,8 +412,8 @@ retry:
}
set_current_state(TASK_RUNNING);
- if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
- rw &= ~(1 << BIO_RW_BARRIER);
+ if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) {
+ rw &= ~REQ_HARDBARRIER;
goto retry;
}
@@ -479,8 +479,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
* New collapsed (a)synchronous interface.
*
* If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
- * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
- * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
+ * the queue with blk_unplug() some time later or set REQ_SYNC in
+io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
* the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
*/
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index d7500e1c26f..3e39193e503 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -249,55 +249,66 @@ static void __hash_remove(struct hash_cell *hc)
static void dm_hash_remove_all(int keep_open_devices)
{
- int i, dev_skipped, dev_removed;
+ int i, dev_skipped;
struct hash_cell *hc;
- struct list_head *tmp, *n;
+ struct mapped_device *md;
+
+retry:
+ dev_skipped = 0;
down_write(&_hash_lock);
-retry:
- dev_skipped = dev_removed = 0;
for (i = 0; i < NUM_BUCKETS; i++) {
- list_for_each_safe (tmp, n, _name_buckets + i) {
- hc = list_entry(tmp, struct hash_cell, name_list);
+ list_for_each_entry(hc, _name_buckets + i, name_list) {
+ md = hc->md;
+ dm_get(md);
- if (keep_open_devices &&
- dm_lock_for_deletion(hc->md)) {
+ if (keep_open_devices && dm_lock_for_deletion(md)) {
+ dm_put(md);
dev_skipped++;
continue;
}
+
__hash_remove(hc);
- dev_removed = 1;
- }
- }
- /*
- * Some mapped devices may be using other mapped devices, so if any
- * still exist, repeat until we make no further progress.
- */
- if (dev_skipped) {
- if (dev_removed)
- goto retry;
+ up_write(&_hash_lock);
- DMWARN("remove_all left %d open device(s)", dev_skipped);
+ dm_put(md);
+ if (likely(keep_open_devices))
+ dm_destroy(md);
+ else
+ dm_destroy_immediate(md);
+
+ /*
+ * Some mapped devices may be using other mapped
+ * devices, so repeat until we make no further
+ * progress. If a new mapped device is created
+ * here it will also get removed.
+ */
+ goto retry;
+ }
}
up_write(&_hash_lock);
+
+ if (dev_skipped)
+ DMWARN("remove_all left %d open device(s)", dev_skipped);
}
-static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
- const char *new)
+static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
+ const char *new)
{
char *new_name, *old_name;
struct hash_cell *hc;
struct dm_table *table;
+ struct mapped_device *md;
/*
* duplicate new.
*/
new_name = kstrdup(new, GFP_KERNEL);
if (!new_name)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
down_write(&_hash_lock);
@@ -306,24 +317,24 @@ static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
*/
hc = __get_name_cell(new);
if (hc) {
- DMWARN("asked to rename to an already existing name %s -> %s",
- old, new);
+ DMWARN("asked to rename to an already-existing name %s -> %s",
+ param->name, new);
dm_put(hc->md);
up_write(&_hash_lock);
kfree(new_name);
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
}
/*
* Is there such a device as 'old' ?
*/
- hc = __get_name_cell(old);
+ hc = __get_name_cell(param->name);
if (!hc) {
- DMWARN("asked to rename a non existent device %s -> %s",
- old, new);
+ DMWARN("asked to rename a non-existent device %s -> %s",
+ param->name, new);
up_write(&_hash_lock);
kfree(new_name);
- return -ENXIO;
+ return ERR_PTR(-ENXIO);
}
/*
@@ -345,13 +356,14 @@ static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
dm_table_put(table);
}
- if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie))
- *flags |= DM_UEVENT_GENERATED_FLAG;
+ if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr))
+ param->flags |= DM_UEVENT_GENERATED_FLAG;
- dm_put(hc->md);
+ md = hc->md;
up_write(&_hash_lock);
kfree(old_name);
- return 0;
+
+ return md;
}
/*-----------------------------------------------------------------
@@ -573,7 +585,7 @@ static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md,
* Fills in a dm_ioctl structure, ready for sending back to
* userland.
*/
-static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
+static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
{
struct gendisk *disk = dm_disk(md);
struct dm_table *table;
@@ -617,8 +629,6 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
dm_table_put(table);
}
}
-
- return 0;
}
static int dev_create(struct dm_ioctl *param, size_t param_size)
@@ -640,15 +650,17 @@ static int dev_create(struct dm_ioctl *param, size_t param_size)
r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
if (r) {
dm_put(md);
+ dm_destroy(md);
return r;
}
param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
- r = __dev_status(md, param);
+ __dev_status(md, param);
+
dm_put(md);
- return r;
+ return 0;
}
/*
@@ -742,6 +754,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
param->flags |= DM_UEVENT_GENERATED_FLAG;
dm_put(md);
+ dm_destroy(md);
return 0;
}
@@ -762,6 +775,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
{
int r;
char *new_name = (char *) param + param->data_start;
+ struct mapped_device *md;
if (new_name < param->data ||
invalid_str(new_name, (void *) param + param_size) ||
@@ -774,10 +788,14 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
if (r)
return r;
- param->data_size = 0;
+ md = dm_hash_rename(param, new_name);
+ if (IS_ERR(md))
+ return PTR_ERR(md);
+
+ __dev_status(md, param);
+ dm_put(md);
- return dm_hash_rename(param->event_nr, &param->flags, param->name,
- new_name);
+ return 0;
}
static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
@@ -818,8 +836,6 @@ static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
geometry.start = indata[3];
r = dm_set_geometry(md, &geometry);
- if (!r)
- r = __dev_status(md, param);
param->data_size = 0;
@@ -843,13 +859,17 @@ static int do_suspend(struct dm_ioctl *param)
if (param->flags & DM_NOFLUSH_FLAG)
suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
- if (!dm_suspended_md(md))
+ if (!dm_suspended_md(md)) {
r = dm_suspend(md, suspend_flags);
+ if (r)
+ goto out;
+ }
- if (!r)
- r = __dev_status(md, param);
+ __dev_status(md, param);
+out:
dm_put(md);
+
return r;
}
@@ -911,7 +931,7 @@ static int do_resume(struct dm_ioctl *param)
dm_table_destroy(old_map);
if (!r)
- r = __dev_status(md, param);
+ __dev_status(md, param);
dm_put(md);
return r;
@@ -935,16 +955,16 @@ static int dev_suspend(struct dm_ioctl *param, size_t param_size)
*/
static int dev_status(struct dm_ioctl *param, size_t param_size)
{
- int r;
struct mapped_device *md;
md = find_device(param);
if (!md)
return -ENXIO;
- r = __dev_status(md, param);
+ __dev_status(md, param);
dm_put(md);
- return r;
+
+ return 0;
}
/*
@@ -1019,7 +1039,7 @@ static void retrieve_status(struct dm_table *table,
*/
static int dev_wait(struct dm_ioctl *param, size_t param_size)
{
- int r;
+ int r = 0;
struct mapped_device *md;
struct dm_table *table;
@@ -1040,9 +1060,7 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size)
* changed to trigger the event, so we may as well tell
* him and save an ioctl.
*/
- r = __dev_status(md, param);
- if (r)
- goto out;
+ __dev_status(md, param);
table = dm_get_live_or_inactive_table(md, param);
if (table) {
@@ -1050,8 +1068,9 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size)
dm_table_put(table);
}
- out:
+out:
dm_put(md);
+
return r;
}
@@ -1112,28 +1131,9 @@ static int populate_table(struct dm_table *table,
next = spec->next;
}
- r = dm_table_set_type(table);
- if (r) {
- DMWARN("unable to set table type");
- return r;
- }
-
return dm_table_complete(table);
}
-static int table_prealloc_integrity(struct dm_table *t,
- struct mapped_device *md)
-{
- struct list_head *devices = dm_table_get_devices(t);
- struct dm_dev_internal *dd;
-
- list_for_each_entry(dd, devices, list)
- if (bdev_get_integrity(dd->dm_dev.bdev))
- return blk_integrity_register(dm_disk(md), NULL);
-
- return 0;
-}
-
static int table_load(struct dm_ioctl *param, size_t param_size)
{
int r;
@@ -1155,21 +1155,30 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
goto out;
}
- r = table_prealloc_integrity(t, md);
- if (r) {
- DMERR("%s: could not register integrity profile.",
- dm_device_name(md));
+ /* Protect md->type and md->queue against concurrent table loads. */
+ dm_lock_md_type(md);
+ if (dm_get_md_type(md) == DM_TYPE_NONE)
+ /* Initial table load: acquire type of table. */
+ dm_set_md_type(md, dm_table_get_type(t));
+ else if (dm_get_md_type(md) != dm_table_get_type(t)) {
+ DMWARN("can't change device type after initial table load.");
dm_table_destroy(t);
+ dm_unlock_md_type(md);
+ r = -EINVAL;
goto out;
}
- r = dm_table_alloc_md_mempools(t);
+ /* setup md->queue to reflect md's type (may block) */
+ r = dm_setup_md_queue(md);
if (r) {
- DMWARN("unable to allocate mempools for this table");
+ DMWARN("unable to set up device queue for new table.");
dm_table_destroy(t);
+ dm_unlock_md_type(md);
goto out;
}
+ dm_unlock_md_type(md);
+ /* stage inactive table */
down_write(&_hash_lock);
hc = dm_get_mdptr(md);
if (!hc || hc->md != md) {
@@ -1186,7 +1195,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
up_write(&_hash_lock);
param->flags |= DM_INACTIVE_PRESENT_FLAG;
- r = __dev_status(md, param);
+ __dev_status(md, param);
out:
dm_put(md);
@@ -1196,7 +1205,6 @@ out:
static int table_clear(struct dm_ioctl *param, size_t param_size)
{
- int r;
struct hash_cell *hc;
struct mapped_device *md;
@@ -1216,11 +1224,12 @@ static int table_clear(struct dm_ioctl *param, size_t param_size)
param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
- r = __dev_status(hc->md, param);
+ __dev_status(hc->md, param);
md = hc->md;
up_write(&_hash_lock);
dm_put(md);
- return r;
+
+ return 0;
}
/*
@@ -1265,7 +1274,6 @@ static void retrieve_deps(struct dm_table *table,
static int table_deps(struct dm_ioctl *param, size_t param_size)
{
- int r = 0;
struct mapped_device *md;
struct dm_table *table;
@@ -1273,9 +1281,7 @@ static int table_deps(struct dm_ioctl *param, size_t param_size)
if (!md)
return -ENXIO;
- r = __dev_status(md, param);
- if (r)
- goto out;
+ __dev_status(md, param);
table = dm_get_live_or_inactive_table(md, param);
if (table) {
@@ -1283,9 +1289,9 @@ static int table_deps(struct dm_ioctl *param, size_t param_size)
dm_table_put(table);
}
- out:
dm_put(md);
- return r;
+
+ return 0;
}
/*
@@ -1294,7 +1300,6 @@ static int table_deps(struct dm_ioctl *param, size_t param_size)
*/
static int table_status(struct dm_ioctl *param, size_t param_size)
{
- int r;
struct mapped_device *md;
struct dm_table *table;
@@ -1302,9 +1307,7 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
if (!md)
return -ENXIO;
- r = __dev_status(md, param);
- if (r)
- goto out;
+ __dev_status(md, param);
table = dm_get_live_or_inactive_table(md, param);
if (table) {
@@ -1312,9 +1315,9 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
dm_table_put(table);
}
-out:
dm_put(md);
- return r;
+
+ return 0;
}
/*
@@ -1333,10 +1336,6 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
if (!md)
return -ENXIO;
- r = __dev_status(md, param);
- if (r)
- goto out;
-
if (tmsg < (struct dm_target_msg *) param->data ||
invalid_str(tmsg->message, (void *) param + param_size)) {
DMWARN("Invalid target message parameters.");
@@ -1593,18 +1592,22 @@ static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u)
#endif
static const struct file_operations _ctl_fops = {
+ .open = nonseekable_open,
.unlocked_ioctl = dm_ctl_ioctl,
.compat_ioctl = dm_compat_ctl_ioctl,
.owner = THIS_MODULE,
};
static struct miscdevice _dm_misc = {
- .minor = MISC_DYNAMIC_MINOR,
+ .minor = MAPPER_CTRL_MINOR,
.name = DM_NAME,
- .nodename = "mapper/control",
+ .nodename = DM_DIR "/" DM_CONTROL_NODE,
.fops = &_ctl_fops
};
+MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR);
+MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE);
+
/*
* Create misc character device and link to DM_DIR/control.
*/
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index addf8347504..d8587bac568 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -345,7 +345,7 @@ static int run_io_job(struct kcopyd_job *job)
{
int r;
struct dm_io_request io_req = {
- .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG),
+ .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG,
.mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages,
.mem.offset = job->offset,
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 9200dbf2391..3921e3bb43c 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -53,6 +53,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
ti->private = lc;
return 0;
@@ -73,7 +74,7 @@ static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
{
struct linear_c *lc = ti->private;
- return lc->start + (bi_sector - ti->begin);
+ return lc->start + dm_target_offset(ti, bi_sector);
}
static void linear_map_bio(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 826bce7343b..487ecda90ad 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -706,6 +706,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
if (as->argc < nr_params) {
ti->error = "not enough path parameters";
+ r = -EINVAL;
goto bad;
}
@@ -892,6 +893,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
}
ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
return 0;
@@ -1271,6 +1273,15 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (error == -EOPNOTSUPP)
return error;
+ if (clone->cmd_flags & REQ_DISCARD)
+ /*
+ * Pass all discard request failures up.
+ * FIXME: only fail_path if the discard failed due to a
+ * transport problem. This requires precise understanding
+ * of the underlying failure (e.g. the SCSI sense).
+ */
+ return error;
+
if (mpio->pgpath)
fail_path(mpio->pgpath);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ddda531723d..7c081bcbc3c 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -445,7 +445,7 @@ static sector_t map_sector(struct mirror *m, struct bio *bio)
{
if (unlikely(!bio->bi_size))
return 0;
- return m->offset + (bio->bi_sector - m->ms->ti->begin);
+ return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
}
static void map_bio(struct mirror *m, struct bio *bio)
@@ -1211,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
if (error == -EOPNOTSUPP)
goto out;
- if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
+ if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
goto out;
if (unlikely(error)) {
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index c097d8a4823..cc2bdb83f9a 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -266,7 +266,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
*/
static chunk_t area_location(struct pstore *ps, chunk_t area)
{
- return 1 + ((ps->exceptions_per_area + 1) * area);
+ return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
}
/*
@@ -780,8 +780,8 @@ static int persistent_commit_merge(struct dm_exception_store *store,
* ps->current_area does not get reduced by prepare_merge() until
* after commit_merge() has removed the nr_merged previous exceptions.
*/
- ps->next_free = (area_location(ps, ps->current_area) - 1) +
- (ps->current_committed + 1) + NUM_SNAPSHOT_HDR_CHUNKS;
+ ps->next_free = area_location(ps, ps->current_area) +
+ ps->current_committed + 1;
return 0;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 54853773510..5974d3094d9 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -148,6 +148,12 @@ struct dm_snapshot {
#define RUNNING_MERGE 0
#define SHUTDOWN_MERGE 1
+struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
+{
+ return s->origin;
+}
+EXPORT_SYMBOL(dm_snap_origin);
+
struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
{
return s->cow;
@@ -1065,10 +1071,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
origin_mode = FMODE_WRITE;
}
- origin_path = argv[0];
- argv++;
- argc--;
-
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s) {
ti->error = "Cannot allocate snapshot context private "
@@ -1077,6 +1079,16 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
+ origin_path = argv[0];
+ argv++;
+ argc--;
+
+ r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
+ if (r) {
+ ti->error = "Cannot get origin device";
+ goto bad_origin;
+ }
+
cow_path = argv[0];
argv++;
argc--;
@@ -1097,12 +1109,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argv += args_used;
argc -= args_used;
- r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
- if (r) {
- ti->error = "Cannot get origin device";
- goto bad_origin;
- }
-
s->ti = ti;
s->valid = 1;
s->active = 0;
@@ -1212,15 +1218,15 @@ bad_kcopyd:
dm_exception_table_exit(&s->complete, exception_cache);
bad_hash_tables:
- dm_put_device(ti, s->origin);
-
-bad_origin:
dm_exception_store_destroy(s->store);
bad_store:
dm_put_device(ti, s->cow);
bad_cow:
+ dm_put_device(ti, s->origin);
+
+bad_origin:
kfree(s);
bad:
@@ -1314,12 +1320,12 @@ static void snapshot_dtr(struct dm_target *ti)
mempool_destroy(s->pending_pool);
- dm_put_device(ti, s->origin);
-
dm_exception_store_destroy(s->store);
dm_put_device(ti, s->cow);
+ dm_put_device(ti, s->origin);
+
kfree(s);
}
@@ -1686,7 +1692,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
chunk_t chunk;
if (unlikely(bio_empty_barrier(bio))) {
- if (!map_context->flush_request)
+ if (!map_context->target_request_nr)
bio->bi_bdev = s->origin->bdev;
else
bio->bi_bdev = s->cow->bdev;
@@ -1899,8 +1905,14 @@ static int snapshot_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct dm_snapshot *snap = ti->private;
+ int r;
+
+ r = fn(ti, snap->origin, 0, ti->len, data);
+
+ if (!r)
+ r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
- return fn(ti, snap->origin, 0, ti->len, data);
+ return r;
}
@@ -2159,6 +2171,21 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
return 0;
}
+static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct dm_dev *dev = ti->private;
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = dev->bdev;
+ bvm->bi_sector = bvm->bi_sector;
+
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
static int origin_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
@@ -2176,6 +2203,7 @@ static struct target_type origin_target = {
.map = origin_map,
.resume = origin_resume,
.status = origin_status,
+ .merge = origin_merge,
.iterate_devices = origin_iterate_devices,
};
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e610725db76..c297f6da91e 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -25,6 +25,8 @@ struct stripe {
struct stripe_c {
uint32_t stripes;
+ int stripes_shift;
+ sector_t stripes_mask;
/* The size of this target / num. stripes */
sector_t stripe_width;
@@ -162,16 +164,22 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Set pointer to dm target; used in trigger_event */
sc->ti = ti;
-
sc->stripes = stripes;
sc->stripe_width = width;
+
+ if (stripes & (stripes - 1))
+ sc->stripes_shift = -1;
+ else {
+ sc->stripes_shift = ffs(stripes) - 1;
+ sc->stripes_mask = ((sector_t) stripes) - 1;
+ }
+
ti->split_io = chunk_size;
ti->num_flush_requests = stripes;
+ ti->num_discard_requests = stripes;
+ sc->chunk_shift = ffs(chunk_size) - 1;
sc->chunk_mask = ((sector_t) chunk_size) - 1;
- for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++)
- chunk_size >>= 1;
- sc->chunk_shift--;
/*
* Get the stripe destinations.
@@ -207,26 +215,79 @@ static void stripe_dtr(struct dm_target *ti)
kfree(sc);
}
+static void stripe_map_sector(struct stripe_c *sc, sector_t sector,
+ uint32_t *stripe, sector_t *result)
+{
+ sector_t offset = dm_target_offset(sc->ti, sector);
+ sector_t chunk = offset >> sc->chunk_shift;
+
+ if (sc->stripes_shift < 0)
+ *stripe = sector_div(chunk, sc->stripes);
+ else {
+ *stripe = chunk & sc->stripes_mask;
+ chunk >>= sc->stripes_shift;
+ }
+
+ *result = (chunk << sc->chunk_shift) | (offset & sc->chunk_mask);
+}
+
+static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector,
+ uint32_t target_stripe, sector_t *result)
+{
+ uint32_t stripe;
+
+ stripe_map_sector(sc, sector, &stripe, result);
+ if (stripe == target_stripe)
+ return;
+ *result &= ~sc->chunk_mask; /* round down */
+ if (target_stripe < stripe)
+ *result += sc->chunk_mask + 1; /* next chunk */
+}
+
+static int stripe_map_discard(struct stripe_c *sc, struct bio *bio,
+ uint32_t target_stripe)
+{
+ sector_t begin, end;
+
+ stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
+ stripe_map_range_sector(sc, bio->bi_sector + bio_sectors(bio),
+ target_stripe, &end);
+ if (begin < end) {
+ bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
+ bio->bi_sector = begin + sc->stripe[target_stripe].physical_start;
+ bio->bi_size = to_bytes(end - begin);
+ return DM_MAPIO_REMAPPED;
+ } else {
+ /* The range doesn't map to the target stripe */
+ bio_endio(bio, 0);
+ return DM_MAPIO_SUBMITTED;
+ }
+}
+
static int stripe_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
- struct stripe_c *sc = (struct stripe_c *) ti->private;
- sector_t offset, chunk;
+ struct stripe_c *sc = ti->private;
uint32_t stripe;
+ unsigned target_request_nr;
if (unlikely(bio_empty_barrier(bio))) {
- BUG_ON(map_context->flush_request >= sc->stripes);
- bio->bi_bdev = sc->stripe[map_context->flush_request].dev->bdev;
+ target_request_nr = map_context->target_request_nr;
+ BUG_ON(target_request_nr >= sc->stripes);
+ bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev;
return DM_MAPIO_REMAPPED;
}
+ if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ target_request_nr = map_context->target_request_nr;
+ BUG_ON(target_request_nr >= sc->stripes);
+ return stripe_map_discard(sc, bio, target_request_nr);
+ }
- offset = bio->bi_sector - ti->begin;
- chunk = offset >> sc->chunk_shift;
- stripe = sector_div(chunk, sc->stripes);
+ stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
+ bio->bi_sector += sc->stripe[stripe].physical_start;
bio->bi_bdev = sc->stripe[stripe].dev->bdev;
- bio->bi_sector = sc->stripe[stripe].physical_start +
- (chunk << sc->chunk_shift) + (offset & sc->chunk_mask);
+
return DM_MAPIO_REMAPPED;
}
@@ -284,7 +345,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
if (!error)
return 0; /* I/O complete */
- if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
+ if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
return error;
if (error == -EOPNOTSUPP)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 9924ea23032..f9fc07d7a4b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -54,6 +54,8 @@ struct dm_table {
sector_t *highs;
struct dm_target *targets;
+ unsigned discards_supported:1;
+
/*
* Indicates the rw permissions for the new logical
* device. This should be a combination of FMODE_READ
@@ -203,6 +205,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
INIT_LIST_HEAD(&t->devices);
atomic_set(&t->holders, 0);
+ t->discards_supported = 1;
if (!num_targets)
num_targets = KEYS_PER_NODE;
@@ -245,7 +248,7 @@ void dm_table_destroy(struct dm_table *t)
msleep(1);
smp_mb();
- /* free the indexes (see dm_table_complete) */
+ /* free the indexes */
if (t->depth >= 2)
vfree(t->index[t->depth - 2]);
@@ -770,6 +773,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
+ if (!tgt->num_discard_requests)
+ t->discards_supported = 0;
+
return 0;
bad:
@@ -778,7 +784,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return r;
}
-int dm_table_set_type(struct dm_table *t)
+static int dm_table_set_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0;
@@ -900,7 +906,7 @@ static int setup_indexes(struct dm_table *t)
/*
* Builds the btree to index the map.
*/
-int dm_table_complete(struct dm_table *t)
+static int dm_table_build_index(struct dm_table *t)
{
int r = 0;
unsigned int leaf_nodes;
@@ -919,6 +925,55 @@ int dm_table_complete(struct dm_table *t)
return r;
}
+/*
+ * Register the mapped device for blk_integrity support if
+ * the underlying devices support it.
+ */
+static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
+{
+ struct list_head *devices = dm_table_get_devices(t);
+ struct dm_dev_internal *dd;
+
+ list_for_each_entry(dd, devices, list)
+ if (bdev_get_integrity(dd->dm_dev.bdev))
+ return blk_integrity_register(dm_disk(md), NULL);
+
+ return 0;
+}
+
+/*
+ * Prepares the table for use by building the indices,
+ * setting the type, and allocating mempools.
+ */
+int dm_table_complete(struct dm_table *t)
+{
+ int r;
+
+ r = dm_table_set_type(t);
+ if (r) {
+ DMERR("unable to set table type");
+ return r;
+ }
+
+ r = dm_table_build_index(t);
+ if (r) {
+ DMERR("unable to build btrees");
+ return r;
+ }
+
+ r = dm_table_prealloc_integrity(t, t->md);
+ if (r) {
+ DMERR("could not register integrity profile.");
+ return r;
+ }
+
+ r = dm_table_alloc_md_mempools(t);
+ if (r)
+ DMERR("unable to allocate mempools");
+
+ return r;
+}
+
static DEFINE_MUTEX(_event_lock);
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context)
@@ -1086,6 +1141,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
+ if (!dm_table_supports_discards(t))
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+ else
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+
dm_table_set_integrity(t);
/*
@@ -1232,6 +1292,39 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
return t->md;
}
+static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && blk_queue_discard(q);
+}
+
+bool dm_table_supports_discards(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ if (!t->discards_supported)
+ return 0;
+
+ /*
+ * Ensure that at least one underlying device supports discards.
+ * t->devices includes internal dm devices such as mirror logs
+ * so we need to use iterate_devices here, which targets
+ * supporting discard must provide.
+ */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, device_discard_capable, NULL))
+ return 1;
+ }
+
+ return 0;
+}
+
EXPORT_SYMBOL(dm_vcalloc);
EXPORT_SYMBOL(dm_get_device);
EXPORT_SYMBOL(dm_put_device);
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 11dea11dc0b..8da366cf381 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -113,6 +113,11 @@ void dm_unregister_target(struct target_type *tt)
*/
static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
{
+ /*
+ * Return error for discards instead of -EOPNOTSUPP
+ */
+ tt->num_discard_requests = 1;
+
return 0;
}
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index bbc97030c0c..cc2b3cb8194 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -22,6 +22,11 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
+ /*
+ * Silently drop discards, avoiding -EOPNOTSUPP.
+ */
+ ti->num_discard_requests = 1;
+
return 0;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d21e1284604..ac384b2a6a3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -15,10 +15,12 @@
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/buffer_head.h>
+#include <linux/smp_lock.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/hdreg.h>
+#include <linux/delay.h>
#include <trace/events/block.h>
@@ -123,6 +125,10 @@ struct mapped_device {
unsigned long flags;
struct request_queue *queue;
+ unsigned type;
+ /* Protect queue and type against concurrent access. */
+ struct mutex type_lock;
+
struct gendisk *disk;
char name[16];
@@ -338,6 +344,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mapped_device *md;
+ lock_kernel();
spin_lock(&_minor_lock);
md = bdev->bd_disk->private_data;
@@ -355,6 +362,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
out:
spin_unlock(&_minor_lock);
+ unlock_kernel();
return md ? 0 : -ENXIO;
}
@@ -362,8 +370,12 @@ out:
static int dm_blk_close(struct gendisk *disk, fmode_t mode)
{
struct mapped_device *md = disk->private_data;
+
+ lock_kernel();
atomic_dec(&md->open_count);
dm_put(md);
+ unlock_kernel();
+
return 0;
}
@@ -614,7 +626,7 @@ static void dec_pending(struct dm_io *io, int error)
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md)) {
- if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
+ if (!(io->bio->bi_rw & REQ_HARDBARRIER))
bio_list_add_head(&md->deferred,
io->bio);
} else
@@ -626,13 +638,19 @@ static void dec_pending(struct dm_io *io, int error)
io_error = io->error;
bio = io->bio;
- if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
+ if (bio->bi_rw & REQ_HARDBARRIER) {
/*
* There can be just one barrier request so we use
* a per-device variable for error reporting.
* Note that you can't touch the bio after end_io_acct
+ *
+ * We ignore -EOPNOTSUPP for empty flush reported by
+ * underlying devices. We assume that if the device
+ * doesn't support empty barriers, it doesn't need
+ * cache flushing commands.
*/
- if (!md->barrier_error && io_error != -EOPNOTSUPP)
+ if (!md->barrier_error &&
+ !(bio_empty_barrier(bio) && io_error == -EOPNOTSUPP))
md->barrier_error = io_error;
end_io_acct(io);
free_io(md, io);
@@ -792,12 +810,12 @@ static void dm_end_request(struct request *clone, int error)
{
int rw = rq_data_dir(clone);
int run_queue = 1;
- bool is_barrier = blk_barrier_rq(clone);
+ bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER;
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
- if (blk_pc_request(rq) && !is_barrier) {
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) {
rq->errors = clone->errors;
rq->resid_len = clone->resid_len;
@@ -844,7 +862,7 @@ void dm_requeue_unmapped_request(struct request *clone)
struct request_queue *q = rq->q;
unsigned long flags;
- if (unlikely(blk_barrier_rq(clone))) {
+ if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
/*
* Barrier clones share an original request.
* Leave it to dm_end_request(), which handles this special
@@ -943,7 +961,7 @@ static void dm_complete_request(struct request *clone, int error)
struct dm_rq_target_io *tio = clone->end_io_data;
struct request *rq = tio->orig;
- if (unlikely(blk_barrier_rq(clone))) {
+ if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
/*
* Barrier clones share an original request. So can't use
* softirq_done with the original.
@@ -972,7 +990,7 @@ void dm_kill_unmapped_request(struct request *clone, int error)
struct dm_rq_target_io *tio = clone->end_io_data;
struct request *rq = tio->orig;
- if (unlikely(blk_barrier_rq(clone))) {
+ if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
/*
* Barrier clones share an original request.
* Leave it to dm_end_request(), which handles this special
@@ -1012,17 +1030,27 @@ static void end_clone_request(struct request *clone, int error)
dm_complete_request(clone, error);
}
-static sector_t max_io_len(struct mapped_device *md,
- sector_t sector, struct dm_target *ti)
+/*
+ * Return maximum size of I/O possible at the supplied sector up to the current
+ * target boundary.
+ */
+static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
{
- sector_t offset = sector - ti->begin;
- sector_t len = ti->len - offset;
+ sector_t target_offset = dm_target_offset(ti, sector);
+
+ return ti->len - target_offset;
+}
+
+static sector_t max_io_len(sector_t sector, struct dm_target *ti)
+{
+ sector_t len = max_io_len_target_boundary(sector, ti);
/*
* Does the target need to split even further ?
*/
if (ti->split_io) {
sector_t boundary;
+ sector_t offset = dm_target_offset(ti, sector);
boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
- offset;
if (len > boundary)
@@ -1106,7 +1134,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
clone->bi_sector = sector;
clone->bi_bdev = bio->bi_bdev;
- clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
+ clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
clone->bi_vcnt = 1;
clone->bi_size = to_bytes(len);
clone->bi_io_vec->bv_offset = offset;
@@ -1133,7 +1161,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
__bio_clone(clone, bio);
- clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
+ clone->bi_rw &= ~REQ_HARDBARRIER;
clone->bi_destructor = dm_bio_destructor;
clone->bi_sector = sector;
clone->bi_idx = idx;
@@ -1164,36 +1192,96 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
return tio;
}
-static void __flush_target(struct clone_info *ci, struct dm_target *ti,
- unsigned flush_nr)
+static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
+ unsigned request_nr, sector_t len)
{
struct dm_target_io *tio = alloc_tio(ci, ti);
struct bio *clone;
- tio->info.flush_request = flush_nr;
+ tio->info.target_request_nr = request_nr;
- clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
+ /*
+ * Discard requests require the bio's inline iovecs be initialized.
+ * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
+ * and discard, so no need for concern about wasted bvec allocations.
+ */
+ clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
__bio_clone(clone, ci->bio);
clone->bi_destructor = dm_bio_destructor;
+ if (len) {
+ clone->bi_sector = ci->sector;
+ clone->bi_size = to_bytes(len);
+ }
__map_bio(ti, clone, tio);
}
+static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
+ unsigned num_requests, sector_t len)
+{
+ unsigned request_nr;
+
+ for (request_nr = 0; request_nr < num_requests; request_nr++)
+ __issue_target_request(ci, ti, request_nr, len);
+}
+
static int __clone_and_map_empty_barrier(struct clone_info *ci)
{
- unsigned target_nr = 0, flush_nr;
+ unsigned target_nr = 0;
struct dm_target *ti;
while ((ti = dm_table_get_target(ci->map, target_nr++)))
- for (flush_nr = 0; flush_nr < ti->num_flush_requests;
- flush_nr++)
- __flush_target(ci, ti, flush_nr);
+ __issue_target_requests(ci, ti, ti->num_flush_requests, 0);
ci->sector_count = 0;
return 0;
}
+/*
+ * Perform all io with a single clone.
+ */
+static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
+{
+ struct bio *clone, *bio = ci->bio;
+ struct dm_target_io *tio;
+
+ tio = alloc_tio(ci, ti);
+ clone = clone_bio(bio, ci->sector, ci->idx,
+ bio->bi_vcnt - ci->idx, ci->sector_count,
+ ci->md->bs);
+ __map_bio(ti, clone, tio);
+ ci->sector_count = 0;
+}
+
+static int __clone_and_map_discard(struct clone_info *ci)
+{
+ struct dm_target *ti;
+ sector_t len;
+
+ do {
+ ti = dm_table_find_target(ci->map, ci->sector);
+ if (!dm_target_is_valid(ti))
+ return -EIO;
+
+ /*
+ * Even though the device advertised discard support,
+ * reconfiguration might have changed that since the
+ * check was performed.
+ */
+ if (!ti->num_discard_requests)
+ return -EOPNOTSUPP;
+
+ len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
+
+ __issue_target_requests(ci, ti, ti->num_discard_requests, len);
+
+ ci->sector += len;
+ } while (ci->sector_count -= len);
+
+ return 0;
+}
+
static int __clone_and_map(struct clone_info *ci)
{
struct bio *clone, *bio = ci->bio;
@@ -1204,27 +1292,21 @@ static int __clone_and_map(struct clone_info *ci)
if (unlikely(bio_empty_barrier(bio)))
return __clone_and_map_empty_barrier(ci);
+ if (unlikely(bio->bi_rw & REQ_DISCARD))
+ return __clone_and_map_discard(ci);
+
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
- max = max_io_len(ci->md, ci->sector, ti);
-
- /*
- * Allocate a target io object.
- */
- tio = alloc_tio(ci, ti);
+ max = max_io_len(ci->sector, ti);
if (ci->sector_count <= max) {
/*
* Optimise for the simple case where we can do all of
* the remaining io with a single clone.
*/
- clone = clone_bio(bio, ci->sector, ci->idx,
- bio->bi_vcnt - ci->idx, ci->sector_count,
- ci->md->bs);
- __map_bio(ti, clone, tio);
- ci->sector_count = 0;
+ __clone_and_map_simple(ci, ti);
} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
/*
@@ -1245,6 +1327,7 @@ static int __clone_and_map(struct clone_info *ci)
len += bv_len;
}
+ tio = alloc_tio(ci, ti);
clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
ci->md->bs);
__map_bio(ti, clone, tio);
@@ -1267,13 +1350,12 @@ static int __clone_and_map(struct clone_info *ci)
if (!dm_target_is_valid(ti))
return -EIO;
- max = max_io_len(ci->md, ci->sector, ti);
-
- tio = alloc_tio(ci, ti);
+ max = max_io_len(ci->sector, ti);
}
len = min(remaining, max);
+ tio = alloc_tio(ci, ti);
clone = split_bvec(bio, ci->sector, ci->idx,
bv->bv_offset + offset, len,
ci->md->bs);
@@ -1301,7 +1383,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
ci.map = dm_get_live_table(md);
if (unlikely(!ci.map)) {
- if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
+ if (!(bio->bi_rw & REQ_HARDBARRIER))
bio_io_error(bio);
else
if (!md->barrier_error)
@@ -1355,7 +1437,7 @@ static int dm_merge_bvec(struct request_queue *q,
/*
* Find maximum amount of I/O that won't need splitting
*/
- max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
+ max_sectors = min(max_io_len(bvm->bi_sector, ti),
(sector_t) BIO_MAX_SECTORS);
max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
if (max_size < 0)
@@ -1414,7 +1496,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
* we have to queue this io for later.
*/
if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
- unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
up_read(&md->io_lock);
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -1455,20 +1537,9 @@ static int dm_request(struct request_queue *q, struct bio *bio)
return _dm_request(q, bio);
}
-/*
- * Mark this request as flush request, so that dm_request_fn() can
- * recognize.
- */
-static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq)
-{
- rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
- rq->cmd[0] = REQ_LB_OP_FLUSH;
-}
-
static bool dm_rq_is_flush_request(struct request *rq)
{
- if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK &&
- rq->cmd[0] == REQ_LB_OP_FLUSH)
+ if (rq->cmd_flags & REQ_FLUSH)
return true;
else
return false;
@@ -1849,6 +1920,28 @@ static const struct block_device_operations dm_blk_dops;
static void dm_wq_work(struct work_struct *work);
static void dm_rq_barrier_work(struct work_struct *work);
+static void dm_init_md_queue(struct mapped_device *md)
+{
+ /*
+ * Request-based dm devices cannot be stacked on top of bio-based dm
+ * devices. The type of this dm device has not been decided yet.
+ * The type is decided at the first table loading time.
+ * To prevent problematic device stacking, clear the queue flag
+ * for request stacking support until then.
+ *
+ * This queue is new, so no concurrency on the queue_flags.
+ */
+ queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
+
+ md->queue->queuedata = md;
+ md->queue->backing_dev_info.congested_fn = dm_any_congested;
+ md->queue->backing_dev_info.congested_data = md;
+ blk_queue_make_request(md->queue, dm_request);
+ blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
+ md->queue->unplug_fn = dm_unplug_all;
+ blk_queue_merge_bvec(md->queue, dm_merge_bvec);
+}
+
/*
* Allocate and initialise a blank device with a given minor.
*/
@@ -1874,8 +1967,10 @@ static struct mapped_device *alloc_dev(int minor)
if (r < 0)
goto bad_minor;
+ md->type = DM_TYPE_NONE;
init_rwsem(&md->io_lock);
mutex_init(&md->suspend_lock);
+ mutex_init(&md->type_lock);
spin_lock_init(&md->deferred_lock);
spin_lock_init(&md->barrier_error_lock);
rwlock_init(&md->map_lock);
@@ -1886,34 +1981,11 @@ static struct mapped_device *alloc_dev(int minor)
INIT_LIST_HEAD(&md->uevent_list);
spin_lock_init(&md->uevent_lock);
- md->queue = blk_init_queue(dm_request_fn, NULL);
+ md->queue = blk_alloc_queue(GFP_KERNEL);
if (!md->queue)
goto bad_queue;
- /*
- * Request-based dm devices cannot be stacked on top of bio-based dm
- * devices. The type of this dm device has not been decided yet,
- * although we initialized the queue using blk_init_queue().
- * The type is decided at the first table loading time.
- * To prevent problematic device stacking, clear the queue flag
- * for request stacking support until then.
- *
- * This queue is new, so no concurrency on the queue_flags.
- */
- queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
- md->saved_make_request_fn = md->queue->make_request_fn;
- md->queue->queuedata = md;
- md->queue->backing_dev_info.congested_fn = dm_any_congested;
- md->queue->backing_dev_info.congested_data = md;
- blk_queue_make_request(md->queue, dm_request);
- blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
- md->queue->unplug_fn = dm_unplug_all;
- blk_queue_merge_bvec(md->queue, dm_merge_bvec);
- blk_queue_softirq_done(md->queue, dm_softirq_done);
- blk_queue_prep_rq(md->queue, dm_prep_fn);
- blk_queue_lld_busy(md->queue, dm_lld_busy);
- blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH,
- dm_rq_prepare_flush);
+ dm_init_md_queue(md);
md->disk = alloc_disk(1);
if (!md->disk)
@@ -2128,6 +2200,72 @@ int dm_create(int minor, struct mapped_device **result)
return 0;
}
+/*
+ * Functions to manage md->type.
+ * All are required to hold md->type_lock.
+ */
+void dm_lock_md_type(struct mapped_device *md)
+{
+ mutex_lock(&md->type_lock);
+}
+
+void dm_unlock_md_type(struct mapped_device *md)
+{
+ mutex_unlock(&md->type_lock);
+}
+
+void dm_set_md_type(struct mapped_device *md, unsigned type)
+{
+ md->type = type;
+}
+
+unsigned dm_get_md_type(struct mapped_device *md)
+{
+ return md->type;
+}
+
+/*
+ * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
+ */
+static int dm_init_request_based_queue(struct mapped_device *md)
+{
+ struct request_queue *q = NULL;
+
+ if (md->queue->elevator)
+ return 1;
+
+ /* Fully initialize the queue */
+ q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
+ if (!q)
+ return 0;
+
+ md->queue = q;
+ md->saved_make_request_fn = md->queue->make_request_fn;
+ dm_init_md_queue(md);
+ blk_queue_softirq_done(md->queue, dm_softirq_done);
+ blk_queue_prep_rq(md->queue, dm_prep_fn);
+ blk_queue_lld_busy(md->queue, dm_lld_busy);
+ blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
+
+ elv_register_queue(md->queue);
+
+ return 1;
+}
+
+/*
+ * Setup the DM device's queue based on md's type
+ */
+int dm_setup_md_queue(struct mapped_device *md)
+{
+ if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
+ !dm_init_request_based_queue(md)) {
+ DMWARN("Cannot initialize queue for request-based mapped device");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static struct mapped_device *dm_find_md(dev_t dev)
{
struct mapped_device *md;
@@ -2141,6 +2279,7 @@ static struct mapped_device *dm_find_md(dev_t dev)
md = idr_find(&_minor_idr, minor);
if (md && (md == MINOR_ALLOCED ||
(MINOR(disk_devt(dm_disk(md))) != minor) ||
+ dm_deleting_md(md) ||
test_bit(DMF_FREEING, &md->flags))) {
md = NULL;
goto out;
@@ -2175,6 +2314,7 @@ void dm_set_mdptr(struct mapped_device *md, void *ptr)
void dm_get(struct mapped_device *md)
{
atomic_inc(&md->holders);
+ BUG_ON(test_bit(DMF_FREEING, &md->flags));
}
const char *dm_device_name(struct mapped_device *md)
@@ -2183,27 +2323,55 @@ const char *dm_device_name(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_device_name);
-void dm_put(struct mapped_device *md)
+static void __dm_destroy(struct mapped_device *md, bool wait)
{
struct dm_table *map;
- BUG_ON(test_bit(DMF_FREEING, &md->flags));
+ might_sleep();
- if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
- map = dm_get_live_table(md);
- idr_replace(&_minor_idr, MINOR_ALLOCED,
- MINOR(disk_devt(dm_disk(md))));
- set_bit(DMF_FREEING, &md->flags);
- spin_unlock(&_minor_lock);
- if (!dm_suspended_md(md)) {
- dm_table_presuspend_targets(map);
- dm_table_postsuspend_targets(map);
- }
- dm_sysfs_exit(md);
- dm_table_put(map);
- dm_table_destroy(__unbind(md));
- free_dev(md);
+ spin_lock(&_minor_lock);
+ map = dm_get_live_table(md);
+ idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
+ set_bit(DMF_FREEING, &md->flags);
+ spin_unlock(&_minor_lock);
+
+ if (!dm_suspended_md(md)) {
+ dm_table_presuspend_targets(map);
+ dm_table_postsuspend_targets(map);
}
+
+ /*
+ * Rare, but there may be I/O requests still going to complete,
+ * for example. Wait for all references to disappear.
+ * No one should increment the reference count of the mapped_device,
+ * after the mapped_device state becomes DMF_FREEING.
+ */
+ if (wait)
+ while (atomic_read(&md->holders))
+ msleep(1);
+ else if (atomic_read(&md->holders))
+ DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
+ dm_device_name(md), atomic_read(&md->holders));
+
+ dm_sysfs_exit(md);
+ dm_table_put(map);
+ dm_table_destroy(__unbind(md));
+ free_dev(md);
+}
+
+void dm_destroy(struct mapped_device *md)
+{
+ __dm_destroy(md, true);
+}
+
+void dm_destroy_immediate(struct mapped_device *md)
+{
+ __dm_destroy(md, false);
+}
+
+void dm_put(struct mapped_device *md)
+{
+ atomic_dec(&md->holders);
}
EXPORT_SYMBOL_GPL(dm_put);
@@ -2258,7 +2426,12 @@ static void process_barrier(struct mapped_device *md, struct bio *bio)
if (!bio_empty_barrier(bio)) {
__split_and_process_bio(md, bio);
- dm_flush(md);
+ /*
+ * If the request isn't supported, don't waste time with
+ * the second flush.
+ */
+ if (md->barrier_error != -EOPNOTSUPP)
+ dm_flush(md);
}
if (md->barrier_error != DM_ENDIO_REQUEUE)
@@ -2296,7 +2469,7 @@ static void dm_wq_work(struct work_struct *work)
if (dm_request_based(md))
generic_make_request(c);
else {
- if (bio_rw_flagged(c, BIO_RW_BARRIER))
+ if (c->bi_rw & REQ_HARDBARRIER)
process_barrier(md, c);
else
__split_and_process_bio(md, c);
@@ -2315,11 +2488,11 @@ static void dm_queue_flush(struct mapped_device *md)
queue_work(md->wq, &md->work);
}
-static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr)
+static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr)
{
struct dm_rq_target_io *tio = clone->end_io_data;
- tio->info.flush_request = flush_nr;
+ tio->info.target_request_nr = request_nr;
}
/* Issue barrier requests to targets and wait for their completion. */
@@ -2337,7 +2510,7 @@ static int dm_rq_barrier(struct mapped_device *md)
ti = dm_table_get_target(map, i);
for (j = 0; j < ti->num_flush_requests; j++) {
clone = clone_rq(md->flush_request, md, GFP_NOIO);
- dm_rq_set_flush_nr(clone, j);
+ dm_rq_set_target_request_nr(clone, j);
atomic_inc(&md->pending[rq_data_dir(clone)]);
map_request(ti, clone, md);
}
@@ -2403,13 +2576,6 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
goto out;
}
- /* cannot change the device type, once a table is bound */
- if (md->map &&
- (dm_table_get_type(md->map) != dm_table_get_type(table))) {
- DMWARN("can't change the device type after a table is bound");
- goto out;
- }
-
map = __bind(md, table, &limits);
out:
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index bad1724d486..0c2dd5f4af7 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -59,13 +59,20 @@ void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
int dm_table_any_busy_target(struct dm_table *t);
-int dm_table_set_type(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
+bool dm_table_supports_discards(struct dm_table *t);
int dm_table_alloc_md_mempools(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
+void dm_lock_md_type(struct mapped_device *md);
+void dm_unlock_md_type(struct mapped_device *md);
+void dm_set_md_type(struct mapped_device *md, unsigned type);
+unsigned dm_get_md_type(struct mapped_device *md);
+
+int dm_setup_md_queue(struct mapped_device *md);
+
/*
* To check the return value from dm_table_find_target().
*/
@@ -122,6 +129,11 @@ void dm_linear_exit(void);
int dm_stripe_init(void);
void dm_stripe_exit(void);
+/*
+ * mapped_device operations
+ */
+void dm_destroy(struct mapped_device *md);
+void dm_destroy_immediate(struct mapped_device *md);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 7e0e057db9a..ba19060bcf3 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -294,7 +294,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
dev_info_t *tmp_dev;
sector_t start_sector;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
md_barrier_request(mddev, bio);
return 0;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cb20d0b0555..f20d13e717d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -36,6 +36,7 @@
#include <linux/blkdev.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
+#include <linux/smp_lock.h>
#include <linux/buffer_head.h> /* for invalidate_bdev */
#include <linux/poll.h>
#include <linux/ctype.h>
@@ -261,7 +262,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
* Once ->stop is called and completes, the module will be completely
* unused.
*/
-static void mddev_suspend(mddev_t *mddev)
+void mddev_suspend(mddev_t *mddev)
{
BUG_ON(mddev->suspended);
mddev->suspended = 1;
@@ -269,13 +270,15 @@ static void mddev_suspend(mddev_t *mddev)
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
}
+EXPORT_SYMBOL_GPL(mddev_suspend);
-static void mddev_resume(mddev_t *mddev)
+void mddev_resume(mddev_t *mddev)
{
mddev->suspended = 0;
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
}
+EXPORT_SYMBOL_GPL(mddev_resume);
int mddev_congested(mddev_t *mddev, int bits)
{
@@ -353,7 +356,7 @@ static void md_submit_barrier(struct work_struct *ws)
/* an empty barrier - all done */
bio_endio(bio, 0);
else {
- bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
+ bio->bi_rw &= ~REQ_HARDBARRIER;
if (mddev->pers->make_request(mddev, bio))
generic_make_request(bio);
mddev->barrier = POST_REQUEST_BARRIER;
@@ -384,6 +387,51 @@ void md_barrier_request(mddev_t *mddev, struct bio *bio)
}
EXPORT_SYMBOL(md_barrier_request);
+/* Support for plugging.
+ * This mirrors the plugging support in request_queue, but does not
+ * require having a whole queue
+ */
+static void plugger_work(struct work_struct *work)
+{
+ struct plug_handle *plug =
+ container_of(work, struct plug_handle, unplug_work);
+ plug->unplug_fn(plug);
+}
+static void plugger_timeout(unsigned long data)
+{
+ struct plug_handle *plug = (void *)data;
+ kblockd_schedule_work(NULL, &plug->unplug_work);
+}
+void plugger_init(struct plug_handle *plug,
+ void (*unplug_fn)(struct plug_handle *))
+{
+ plug->unplug_flag = 0;
+ plug->unplug_fn = unplug_fn;
+ init_timer(&plug->unplug_timer);
+ plug->unplug_timer.function = plugger_timeout;
+ plug->unplug_timer.data = (unsigned long)plug;
+ INIT_WORK(&plug->unplug_work, plugger_work);
+}
+EXPORT_SYMBOL_GPL(plugger_init);
+
+void plugger_set_plug(struct plug_handle *plug)
+{
+ if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag))
+ mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1);
+}
+EXPORT_SYMBOL_GPL(plugger_set_plug);
+
+int plugger_remove_plug(struct plug_handle *plug)
+{
+ if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) {
+ del_timer(&plug->unplug_timer);
+ return 1;
+ } else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(plugger_remove_plug);
+
+
static inline mddev_t *mddev_get(mddev_t *mddev)
{
atomic_inc(&mddev->active);
@@ -416,7 +464,7 @@ static void mddev_put(mddev_t *mddev)
spin_unlock(&all_mddevs_lock);
}
-static void mddev_init(mddev_t *mddev)
+void mddev_init(mddev_t *mddev)
{
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
@@ -436,6 +484,7 @@ static void mddev_init(mddev_t *mddev)
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
}
+EXPORT_SYMBOL_GPL(mddev_init);
static mddev_t * mddev_find(dev_t unit)
{
@@ -532,25 +581,31 @@ static void mddev_unlock(mddev_t * mddev)
* an access to the files will try to take reconfig_mutex
* while holding the file unremovable, which leads to
* a deadlock.
- * So hold open_mutex instead - we are allowed to take
- * it while holding reconfig_mutex, and md_run can
- * use it to wait for the remove to complete.
+ * So hold set sysfs_active while the remove in happeing,
+ * and anything else which might set ->to_remove or my
+ * otherwise change the sysfs namespace will fail with
+ * -EBUSY if sysfs_active is still set.
+ * We set sysfs_active under reconfig_mutex and elsewhere
+ * test it under the same mutex to ensure its correct value
+ * is seen.
*/
struct attribute_group *to_remove = mddev->to_remove;
mddev->to_remove = NULL;
- mutex_lock(&mddev->open_mutex);
+ mddev->sysfs_active = 1;
mutex_unlock(&mddev->reconfig_mutex);
- if (to_remove != &md_redundancy_group)
- sysfs_remove_group(&mddev->kobj, to_remove);
- if (mddev->pers == NULL ||
- mddev->pers->sync_request == NULL) {
- sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
- if (mddev->sysfs_action)
- sysfs_put(mddev->sysfs_action);
- mddev->sysfs_action = NULL;
+ if (mddev->kobj.sd) {
+ if (to_remove != &md_redundancy_group)
+ sysfs_remove_group(&mddev->kobj, to_remove);
+ if (mddev->pers == NULL ||
+ mddev->pers->sync_request == NULL) {
+ sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+ if (mddev->sysfs_action)
+ sysfs_put(mddev->sysfs_action);
+ mddev->sysfs_action = NULL;
+ }
}
- mutex_unlock(&mddev->open_mutex);
+ mddev->sysfs_active = 0;
} else
mutex_unlock(&mddev->reconfig_mutex);
@@ -675,11 +730,11 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
* if zero is reached.
* If an error occurred, call md_error
*
- * As we might need to resubmit the request if BIO_RW_BARRIER
+ * As we might need to resubmit the request if REQ_HARDBARRIER
* causes ENOTSUPP, we allocate a spare bio...
*/
struct bio *bio = bio_alloc(GFP_NOIO, 1);
- int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
+ int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
bio->bi_bdev = rdev->bdev;
bio->bi_sector = sector;
@@ -691,7 +746,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
atomic_inc(&mddev->pending_writes);
if (!test_bit(BarriersNotsupp, &rdev->flags)) {
struct bio *rbio;
- rw |= (1<<BIO_RW_BARRIER);
+ rw |= REQ_HARDBARRIER;
rbio = bio_clone(bio, GFP_NOIO);
rbio->bi_private = bio;
rbio->bi_end_io = super_written_barrier;
@@ -736,7 +791,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
struct completion event;
int ret;
- rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+ rw |= REQ_SYNC | REQ_UNPLUG;
bio->bi_bdev = bdev;
bio->bi_sector = sector;
@@ -1588,7 +1643,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
- }
+ } else
+ max_dev = le32_to_cpu(sb->max_dev);
+
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
@@ -1811,11 +1868,9 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
goto fail;
ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
- if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
- kobject_del(&rdev->kobj);
- goto fail;
- }
- rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, NULL, "state");
+ if (sysfs_create_link(&rdev->kobj, ko, "block"))
+ /* failure here is OK */;
+ rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
list_add_rcu(&rdev->same_set, &mddev->disks);
bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
@@ -2083,16 +2138,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
* with the rest of the array)
*/
mdk_rdev_t *rdev;
-
- /* First make sure individual recovery_offsets are correct */
- list_for_each_entry(rdev, &mddev->disks, same_set) {
- if (rdev->raid_disk >= 0 &&
- mddev->delta_disks >= 0 &&
- !test_bit(In_sync, &rdev->flags) &&
- mddev->curr_resync_completed > rdev->recovery_offset)
- rdev->recovery_offset = mddev->curr_resync_completed;
-
- }
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->sb_events == mddev->events ||
(nospares &&
@@ -2114,13 +2159,27 @@ static void md_update_sb(mddev_t * mddev, int force_change)
int sync_req;
int nospares = 0;
- mddev->utime = get_seconds();
- if (mddev->external)
- return;
repeat:
+ /* First make sure individual recovery_offsets are correct */
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (rdev->raid_disk >= 0 &&
+ mddev->delta_disks >= 0 &&
+ !test_bit(In_sync, &rdev->flags) &&
+ mddev->curr_resync_completed > rdev->recovery_offset)
+ rdev->recovery_offset = mddev->curr_resync_completed;
+
+ }
+ if (!mddev->persistent) {
+ clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+ wake_up(&mddev->sb_wait);
+ return;
+ }
+
spin_lock_irq(&mddev->write_lock);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ mddev->utime = get_seconds();
+
if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
force_change = 1;
if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
@@ -2168,19 +2227,6 @@ repeat:
MD_BUG();
mddev->events --;
}
-
- /*
- * do not write anything to disk if using
- * nonpersistent superblocks
- */
- if (!mddev->persistent) {
- if (!mddev->external)
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
-
- spin_unlock_irq(&mddev->write_lock);
- wake_up(&mddev->sb_wait);
- return;
- }
sync_sbs(mddev, nospares);
spin_unlock_irq(&mddev->write_lock);
@@ -2334,8 +2380,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
set_bit(In_sync, &rdev->flags);
err = 0;
}
- if (!err && rdev->sysfs_state)
- sysfs_notify_dirent(rdev->sysfs_state);
+ if (!err)
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
}
static struct rdev_sysfs_entry rdev_state =
@@ -2430,14 +2476,10 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
rdev->raid_disk = -1;
return err;
} else
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
- printk(KERN_WARNING
- "md: cannot register "
- "%s for %s\n",
- nm, mdname(rdev->mddev));
-
+ /* failure here is OK */;
/* don't wakeup anyone, leave that to userspace. */
} else {
if (slot >= rdev->mddev->raid_disks)
@@ -2447,7 +2489,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
clear_bit(Faulty, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
set_bit(In_sync, &rdev->flags);
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
}
return len;
}
@@ -2695,6 +2737,24 @@ static struct kobj_type rdev_ktype = {
.default_attrs = rdev_default_attrs,
};
+void md_rdev_init(mdk_rdev_t *rdev)
+{
+ rdev->desc_nr = -1;
+ rdev->saved_raid_disk = -1;
+ rdev->raid_disk = -1;
+ rdev->flags = 0;
+ rdev->data_offset = 0;
+ rdev->sb_events = 0;
+ rdev->last_read_error.tv_sec = 0;
+ rdev->last_read_error.tv_nsec = 0;
+ atomic_set(&rdev->nr_pending, 0);
+ atomic_set(&rdev->read_errors, 0);
+ atomic_set(&rdev->corrected_errors, 0);
+
+ INIT_LIST_HEAD(&rdev->same_set);
+ init_waitqueue_head(&rdev->blocked_wait);
+}
+EXPORT_SYMBOL_GPL(md_rdev_init);
/*
* Import a device. If 'super_format' >= 0, then sanity check the superblock
*
@@ -2718,6 +2778,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
return ERR_PTR(-ENOMEM);
}
+ md_rdev_init(rdev);
if ((err = alloc_disk_sb(rdev)))
goto abort_free;
@@ -2727,18 +2788,6 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
kobject_init(&rdev->kobj, &rdev_ktype);
- rdev->desc_nr = -1;
- rdev->saved_raid_disk = -1;
- rdev->raid_disk = -1;
- rdev->flags = 0;
- rdev->data_offset = 0;
- rdev->sb_events = 0;
- rdev->last_read_error.tv_sec = 0;
- rdev->last_read_error.tv_nsec = 0;
- atomic_set(&rdev->nr_pending, 0);
- atomic_set(&rdev->read_errors, 0);
- atomic_set(&rdev->corrected_errors, 0);
-
size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
if (!size) {
printk(KERN_WARNING
@@ -2767,9 +2816,6 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
}
}
- INIT_LIST_HEAD(&rdev->same_set);
- init_waitqueue_head(&rdev->blocked_wait);
-
return rdev;
abort_free:
@@ -2960,7 +3006,9 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
* - new personality will access other array.
*/
- if (mddev->sync_thread || mddev->reshape_position != MaxSector)
+ if (mddev->sync_thread ||
+ mddev->reshape_position != MaxSector ||
+ mddev->sysfs_active)
return -EBUSY;
if (!mddev->pers->quiesce) {
@@ -3324,7 +3372,7 @@ array_state_show(mddev_t *mddev, char *page)
case 0:
if (mddev->in_sync)
st = clean;
- else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
st = write_pending;
else if (mddev->safemode)
st = active_idle;
@@ -3405,9 +3453,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
- if (mddev->persistent)
- set_bit(MD_CHANGE_CLEAN,
- &mddev->flags);
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
err = 0;
} else
@@ -3419,8 +3465,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
case active:
if (mddev->pers) {
restart_array(mddev);
- if (mddev->external)
- clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
@@ -3437,7 +3482,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
if (err)
return err;
else {
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
return len;
}
}
@@ -3735,7 +3780,7 @@ action_store(mddev_t *mddev, const char *page, size_t len)
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
- sysfs_notify_dirent(mddev->sysfs_action);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
return len;
}
@@ -4281,13 +4326,14 @@ static int md_alloc(dev_t dev, char *name)
disk->disk_name);
error = 0;
}
- if (sysfs_create_group(&mddev->kobj, &md_bitmap_group))
+ if (mddev->kobj.sd &&
+ sysfs_create_group(&mddev->kobj, &md_bitmap_group))
printk(KERN_DEBUG "pointless warning\n");
abort:
mutex_unlock(&disks_mutex);
- if (!error) {
+ if (!error && mddev->kobj.sd) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
- mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, NULL, "array_state");
+ mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
}
mddev_put(mddev);
return error;
@@ -4325,14 +4371,14 @@ static void md_safemode_timeout(unsigned long data)
if (!atomic_read(&mddev->writes_pending)) {
mddev->safemode = 1;
if (mddev->external)
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
}
md_wakeup_thread(mddev->thread);
}
static int start_dirty_degraded;
-static int md_run(mddev_t *mddev)
+int md_run(mddev_t *mddev)
{
int err;
mdk_rdev_t *rdev;
@@ -4344,13 +4390,9 @@ static int md_run(mddev_t *mddev)
if (mddev->pers)
return -EBUSY;
-
- /* These two calls synchronise us with the
- * sysfs_remove_group calls in mddev_unlock,
- * so they must have completed.
- */
- mutex_lock(&mddev->open_mutex);
- mutex_unlock(&mddev->open_mutex);
+ /* Cannot run until previous stop completes properly */
+ if (mddev->sysfs_active)
+ return -EBUSY;
/*
* Analyze all RAID superblock(s)
@@ -4397,7 +4439,7 @@ static int md_run(mddev_t *mddev)
return -EINVAL;
}
}
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
}
spin_lock(&pers_lock);
@@ -4496,11 +4538,12 @@ static int md_run(mddev_t *mddev)
return err;
}
if (mddev->pers->sync_request) {
- if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
+ if (mddev->kobj.sd &&
+ sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
- mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
+ mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
@@ -4518,8 +4561,7 @@ static int md_run(mddev_t *mddev)
char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
- printk("md: cannot register %s for %s\n",
- nm, mdname(mddev));
+ /* failure here is OK */;
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -4531,12 +4573,12 @@ static int md_run(mddev_t *mddev)
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
md_new_event(mddev);
- sysfs_notify_dirent(mddev->sysfs_state);
- if (mddev->sysfs_action)
- sysfs_notify_dirent(mddev->sysfs_action);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
sysfs_notify(&mddev->kobj, NULL, "degraded");
return 0;
}
+EXPORT_SYMBOL_GPL(md_run);
static int do_md_run(mddev_t *mddev)
{
@@ -4545,7 +4587,11 @@ static int do_md_run(mddev_t *mddev)
err = md_run(mddev);
if (err)
goto out;
-
+ err = bitmap_load(mddev);
+ if (err) {
+ bitmap_destroy(mddev);
+ goto out;
+ }
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
@@ -4573,7 +4619,7 @@ static int restart_array(mddev_t *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
@@ -4644,9 +4690,10 @@ static void md_clean(mddev_t *mddev)
mddev->bitmap_info.chunksize = 0;
mddev->bitmap_info.daemon_sleep = 0;
mddev->bitmap_info.max_write_behind = 0;
+ mddev->plug = NULL;
}
-static void md_stop_writes(mddev_t *mddev)
+void md_stop_writes(mddev_t *mddev)
{
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -4666,11 +4713,10 @@ static void md_stop_writes(mddev_t *mddev)
md_update_sb(mddev, 1);
}
}
+EXPORT_SYMBOL_GPL(md_stop_writes);
-static void md_stop(mddev_t *mddev)
+void md_stop(mddev_t *mddev)
{
- md_stop_writes(mddev);
-
mddev->pers->stop(mddev);
if (mddev->pers->sync_request && mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
@@ -4678,6 +4724,7 @@ static void md_stop(mddev_t *mddev)
mddev->pers = NULL;
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
+EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(mddev_t *mddev, int is_open)
{
@@ -4697,7 +4744,7 @@ static int md_set_readonly(mddev_t *mddev, int is_open)
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
err = 0;
}
out:
@@ -4711,26 +4758,29 @@ out:
*/
static int do_md_stop(mddev_t * mddev, int mode, int is_open)
{
- int err = 0;
struct gendisk *disk = mddev->gendisk;
mdk_rdev_t *rdev;
mutex_lock(&mddev->open_mutex);
- if (atomic_read(&mddev->openers) > is_open) {
+ if (atomic_read(&mddev->openers) > is_open ||
+ mddev->sysfs_active) {
printk("md: %s still in use.\n",mdname(mddev));
- err = -EBUSY;
- } else if (mddev->pers) {
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
+ if (mddev->pers) {
if (mddev->ro)
set_disk_ro(disk, 0);
+ md_stop_writes(mddev);
md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->unplug_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
/* tell userspace to handle 'inactive' */
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0) {
@@ -4740,21 +4790,17 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
}
set_capacity(disk, 0);
+ mutex_unlock(&mddev->open_mutex);
revalidate_disk(disk);
if (mddev->ro)
mddev->ro = 0;
-
- err = 0;
- }
- mutex_unlock(&mddev->open_mutex);
- if (err)
- return err;
+ } else
+ mutex_unlock(&mddev->open_mutex);
/*
* Free resources if final stop
*/
if (mode == 0) {
-
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
@@ -4771,13 +4817,11 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
-
}
- err = 0;
blk_integrity_unregister(disk);
md_new_event(mddev);
- sysfs_notify_dirent(mddev->sysfs_state);
- return err;
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
+ return 0;
}
#ifndef MODULE
@@ -5138,7 +5182,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (err)
export_rdev(rdev);
else
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
md_update_sb(mddev, 1);
if (mddev->degraded)
@@ -5331,8 +5375,11 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
err = 0;
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
- if (fd >= 0)
+ if (fd >= 0) {
err = bitmap_create(mddev);
+ if (!err)
+ err = bitmap_load(mddev);
+ }
if (fd < 0 || err) {
bitmap_destroy(mddev);
fd = -1; /* make sure to put the file */
@@ -5581,6 +5628,8 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
mddev->bitmap_info.default_offset;
mddev->pers->quiesce(mddev, 1);
rv = bitmap_create(mddev);
+ if (!rv)
+ rv = bitmap_load(mddev);
if (rv)
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
@@ -5813,7 +5862,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
if (mddev->ro == 2) {
mddev->ro = 0;
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
} else {
@@ -5902,6 +5951,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
mddev_t *mddev = mddev_find(bdev->bd_dev);
int err;
+ lock_kernel();
if (mddev->gendisk != bdev->bd_disk) {
/* we are racing with mddev_put which is discarding this
* bd_disk.
@@ -5910,6 +5960,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
/* Wait until bdev->bd_disk is definitely gone */
flush_scheduled_work();
/* Then retry the open from the top */
+ unlock_kernel();
return -ERESTARTSYS;
}
BUG_ON(mddev != bdev->bd_disk->private_data);
@@ -5923,6 +5974,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
check_disk_size_change(mddev->gendisk, bdev);
out:
+ unlock_kernel();
return err;
}
@@ -5931,8 +5983,10 @@ static int md_release(struct gendisk *disk, fmode_t mode)
mddev_t *mddev = disk->private_data;
BUG_ON(!mddev);
+ lock_kernel();
atomic_dec(&mddev->openers);
mddev_put(mddev);
+ unlock_kernel();
return 0;
}
@@ -6059,10 +6113,12 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
+ if (mddev->event_work.func)
+ schedule_work(&mddev->event_work);
md_new_event_inintr(mddev);
}
@@ -6514,15 +6570,15 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
spin_unlock_irq(&mddev->write_lock);
}
if (did_change)
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
}
@@ -6558,22 +6614,31 @@ int md_allow_write(mddev_t *mddev)
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
spin_unlock_irq(&mddev->write_lock);
md_update_sb(mddev, 0);
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
} else
spin_unlock_irq(&mddev->write_lock);
- if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
return -EAGAIN;
else
return 0;
}
EXPORT_SYMBOL_GPL(md_allow_write);
+void md_unplug(mddev_t *mddev)
+{
+ if (mddev->queue)
+ blk_unplug(mddev->queue);
+ if (mddev->plug)
+ mddev->plug->unplug_fn(mddev->plug);
+}
+
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
void md_do_sync(mddev_t *mddev)
@@ -6752,7 +6817,7 @@ void md_do_sync(mddev_t *mddev)
>= mddev->resync_max - mddev->curr_resync_completed
)) {
/* time to update curr_resync_completed */
- blk_unplug(mddev->queue);
+ md_unplug(mddev);
wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed =
@@ -6829,7 +6894,7 @@ void md_do_sync(mddev_t *mddev)
* about not overloading the IO subsystem. (things like an
* e2fsck being done on the RAID array should execute fast)
*/
- blk_unplug(mddev->queue);
+ md_unplug(mddev);
cond_resched();
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@@ -6848,7 +6913,7 @@ void md_do_sync(mddev_t *mddev)
* this also signals 'finished resyncing' to md_stop
*/
out:
- blk_unplug(mddev->queue);
+ md_unplug(mddev);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
@@ -6950,10 +7015,7 @@ static int remove_and_add_spares(mddev_t *mddev)
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
- printk(KERN_WARNING
- "md: cannot register "
- "%s for %s\n",
- nm, mdname(mddev));
+ /* failure here is OK */;
spares++;
md_new_event(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -7009,7 +7071,7 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
- (mddev->flags && !mddev->external) ||
+ (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
@@ -7039,14 +7101,13 @@ void md_check_recovery(mddev_t *mddev)
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
- if (mddev->persistent)
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
spin_unlock_irq(&mddev->write_lock);
if (did_change)
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
}
if (mddev->flags)
@@ -7085,7 +7146,7 @@ void md_check_recovery(mddev_t *mddev)
mddev->recovery = 0;
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- sysfs_notify_dirent(mddev->sysfs_action);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
goto unlock;
}
@@ -7147,7 +7208,7 @@ void md_check_recovery(mddev_t *mddev)
mddev->recovery = 0;
} else
md_wakeup_thread(mddev->sync_thread);
- sysfs_notify_dirent(mddev->sysfs_action);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
}
unlock:
@@ -7156,7 +7217,7 @@ void md_check_recovery(mddev_t *mddev)
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
&mddev->recovery))
if (mddev->sysfs_action)
- sysfs_notify_dirent(mddev->sysfs_action);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
}
mddev_unlock(mddev);
}
@@ -7164,7 +7225,7 @@ void md_check_recovery(mddev_t *mddev)
void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
{
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
wait_event_timeout(rdev->blocked_wait,
!test_bit(Blocked, &rdev->flags),
msecs_to_jiffies(5000));
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 10597bfec00..3931299788d 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -29,6 +29,26 @@
typedef struct mddev_s mddev_t;
typedef struct mdk_rdev_s mdk_rdev_t;
+/* generic plugging support - like that provided with request_queue,
+ * but does not require a request_queue
+ */
+struct plug_handle {
+ void (*unplug_fn)(struct plug_handle *);
+ struct timer_list unplug_timer;
+ struct work_struct unplug_work;
+ unsigned long unplug_flag;
+};
+#define PLUGGED_FLAG 1
+void plugger_init(struct plug_handle *plug,
+ void (*unplug_fn)(struct plug_handle *));
+void plugger_set_plug(struct plug_handle *plug);
+int plugger_remove_plug(struct plug_handle *plug);
+static inline void plugger_flush(struct plug_handle *plug)
+{
+ del_timer_sync(&plug->unplug_timer);
+ cancel_work_sync(&plug->unplug_work);
+}
+
/*
* MD's 'extended' device
*/
@@ -67,7 +87,7 @@ struct mdk_rdev_s
#define Faulty 1 /* device is known to have a fault */
#define In_sync 2 /* device is in_sync with rest of array */
#define WriteMostly 4 /* Avoid reading if at all possible */
-#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */
+#define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */
#define AllReserved 6 /* If whole device is reserved for
* one array */
#define AutoDetected 7 /* added by auto-detect */
@@ -120,11 +140,15 @@ struct mddev_s
unsigned long flags;
#define MD_CHANGE_DEVS 0 /* Some device status has changed */
#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
-#define MD_CHANGE_PENDING 2 /* superblock update in progress */
+#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
int suspended;
atomic_t active_io;
int ro;
+ int sysfs_active; /* set when sysfs deletes
+ * are happening, so run/
+ * takeover/stop are not safe
+ */
struct gendisk *gendisk;
@@ -254,7 +278,7 @@ struct mddev_s
* fails. Only supported
*/
struct bio *biolist; /* bios that need to be retried
- * because BIO_RW_BARRIER is not supported
+ * because REQ_HARDBARRIER is not supported
*/
atomic_t recovery_active; /* blocks scheduled, but not written */
@@ -297,9 +321,14 @@ struct mddev_s
* hot-adding a bitmap. It should
* eventually be settable by sysfs.
*/
+ /* When md is serving under dm, it might use a
+ * dirty_log to store the bits.
+ */
+ struct dm_dirty_log *log;
+
struct mutex mutex;
unsigned long chunksize;
- unsigned long daemon_sleep; /* how many seconds between updates? */
+ unsigned long daemon_sleep; /* how many jiffies between updates? */
unsigned long max_write_behind; /* write-behind mode */
int external;
} bitmap_info;
@@ -308,6 +337,8 @@ struct mddev_s
struct list_head all_mddevs;
struct attribute_group *to_remove;
+ struct plug_handle *plug; /* if used by personality */
+
/* Generic barrier handling.
* If there is a pending barrier request, all other
* writes are blocked while the devices are flushed.
@@ -318,6 +349,7 @@ struct mddev_s
struct bio *barrier;
atomic_t flush_pending;
struct work_struct barrier_work;
+ struct work_struct event_work; /* used by dm to report failure event */
};
@@ -382,6 +414,18 @@ struct md_sysfs_entry {
};
extern struct attribute_group md_bitmap_group;
+static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
+{
+ if (sd)
+ return sysfs_get_dirent(sd, NULL, name);
+ return sd;
+}
+static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
+{
+ if (sd)
+ sysfs_notify_dirent(sd);
+}
+
static inline char * mdname (mddev_t * mddev)
{
return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
@@ -474,5 +518,14 @@ extern int md_integrity_register(mddev_t *mddev);
extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void restore_bitmap_write_access(struct file *file);
+extern void md_unplug(mddev_t *mddev);
+
+extern void mddev_init(mddev_t *mddev);
+extern int md_run(mddev_t *mddev);
+extern void md_stop(mddev_t *mddev);
+extern void md_stop_writes(mddev_t *mddev);
+extern void md_rdev_init(mdk_rdev_t *rdev);
+extern void mddev_suspend(mddev_t *mddev);
+extern void mddev_resume(mddev_t *mddev);
#endif /* _MD_MD_H */
diff --git a/drivers/md/mktables.c b/drivers/md/mktables.c
deleted file mode 100644
index 3b1500843bb..00000000000
--- a/drivers/md/mktables.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
- *
- * This file is part of the Linux kernel, and is made available under
- * the terms of the GNU General Public License version 2 or (at your
- * option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * mktables.c
- *
- * Make RAID-6 tables. This is a host user space program to be run at
- * compile time.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <inttypes.h>
-#include <stdlib.h>
-#include <time.h>
-
-static uint8_t gfmul(uint8_t a, uint8_t b)
-{
- uint8_t v = 0;
-
- while (b) {
- if (b & 1)
- v ^= a;
- a = (a << 1) ^ (a & 0x80 ? 0x1d : 0);
- b >>= 1;
- }
-
- return v;
-}
-
-static uint8_t gfpow(uint8_t a, int b)
-{
- uint8_t v = 1;
-
- b %= 255;
- if (b < 0)
- b += 255;
-
- while (b) {
- if (b & 1)
- v = gfmul(v, a);
- a = gfmul(a, a);
- b >>= 1;
- }
-
- return v;
-}
-
-int main(int argc, char *argv[])
-{
- int i, j, k;
- uint8_t v;
- uint8_t exptbl[256], invtbl[256];
-
- printf("#include <linux/raid/pq.h>\n");
-
- /* Compute multiplication table */
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfmul[256][256] =\n"
- "{\n");
- for (i = 0; i < 256; i++) {
- printf("\t{\n");
- for (j = 0; j < 256; j += 8) {
- printf("\t\t");
- for (k = 0; k < 8; k++)
- printf("0x%02x,%c", gfmul(i, j + k),
- (k == 7) ? '\n' : ' ');
- }
- printf("\t},\n");
- }
- printf("};\n");
- printf("#ifdef __KERNEL__\n");
- printf("EXPORT_SYMBOL(raid6_gfmul);\n");
- printf("#endif\n");
-
- /* Compute power-of-2 table (exponent) */
- v = 1;
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfexp[256] =\n" "{\n");
- for (i = 0; i < 256; i += 8) {
- printf("\t");
- for (j = 0; j < 8; j++) {
- exptbl[i + j] = v;
- printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
- v = gfmul(v, 2);
- if (v == 1)
- v = 0; /* For entry 255, not a real entry */
- }
- }
- printf("};\n");
- printf("#ifdef __KERNEL__\n");
- printf("EXPORT_SYMBOL(raid6_gfexp);\n");
- printf("#endif\n");
-
- /* Compute inverse table x^-1 == x^254 */
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfinv[256] =\n" "{\n");
- for (i = 0; i < 256; i += 8) {
- printf("\t");
- for (j = 0; j < 8; j++) {
- invtbl[i + j] = v = gfpow(i + j, 254);
- printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
- }
- }
- printf("};\n");
- printf("#ifdef __KERNEL__\n");
- printf("EXPORT_SYMBOL(raid6_gfinv);\n");
- printf("#endif\n");
-
- /* Compute inv(2^x + 1) (exponent-xor-inverse) table */
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfexi[256] =\n" "{\n");
- for (i = 0; i < 256; i += 8) {
- printf("\t");
- for (j = 0; j < 8; j++)
- printf("0x%02x,%c", invtbl[exptbl[i + j] ^ 1],
- (j == 7) ? '\n' : ' ');
- }
- printf("};\n");
- printf("#ifdef __KERNEL__\n");
- printf("EXPORT_SYMBOL(raid6_gfexi);\n");
- printf("#endif\n");
-
- return 0;
-}
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 410fb60699a..0307d217e7a 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio, int error)
if (uptodate)
multipath_end_bh_io(mp_bh, 0);
- else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
+ else if (!(bio->bi_rw & REQ_RAHEAD)) {
/*
* oops, IO error:
*/
@@ -142,7 +142,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
md_barrier_request(mddev, bio);
return 0;
}
@@ -163,7 +163,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
mp_bh->bio = *bio;
mp_bh->bio.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
- mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
+ mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio);
@@ -398,7 +398,7 @@ static void multipathd (mddev_t *mddev)
*bio = *(mp_bh->master_bio);
bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
- bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
+ bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
generic_make_request(bio);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 563abed5a2c..6f7af46d623 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -483,7 +483,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
struct strip_zone *zone;
mdk_rdev_t *tmp_dev;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
md_barrier_request(mddev, bio);
return 0;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a948da8012d..ad83a4dcadc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -787,8 +787,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
struct bio_list bl;
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
- const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
- bool do_barriers;
+ const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
+ unsigned long do_barriers;
mdk_rdev_t *blocked_rdev;
/*
@@ -822,7 +822,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
finish_wait(&conf->wait_barrier, &w);
}
if (unlikely(!mddev->barriers_work &&
- bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ (bio->bi_rw & REQ_HARDBARRIER))) {
if (rw == WRITE)
md_write_end(mddev);
bio_endio(bio, -EOPNOTSUPP);
@@ -877,7 +877,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
- read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+ read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r1_bio;
generic_make_request(read_bio);
@@ -959,7 +959,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_set(&r1_bio->remaining, 0);
atomic_set(&r1_bio->behind_remaining, 0);
- do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
+ do_barriers = bio->bi_rw & REQ_HARDBARRIER;
if (do_barriers)
set_bit(R1BIO_Barrier, &r1_bio->state);
@@ -975,8 +975,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) |
- (do_sync << BIO_RW_SYNCIO);
+ mbio->bi_rw = WRITE | do_barriers | do_sync;
mbio->bi_private = r1_bio;
if (behind_pages) {
@@ -1121,6 +1120,8 @@ static int raid1_spare_active(mddev_t *mddev)
{
int i;
conf_t *conf = mddev->private;
+ int count = 0;
+ unsigned long flags;
/*
* Find all failed disks within the RAID1 configuration
@@ -1132,15 +1133,16 @@ static int raid1_spare_active(mddev_t *mddev)
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_and_set_bit(In_sync, &rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded--;
- spin_unlock_irqrestore(&conf->device_lock, flags);
+ count++;
+ sysfs_notify_dirent(rdev->sysfs_state);
}
}
+ spin_lock_irqsave(&conf->device_lock, flags);
+ mddev->degraded -= count;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
print_conf(conf);
- return 0;
+ return count;
}
@@ -1633,7 +1635,7 @@ static void raid1d(mddev_t *mddev)
sync_request_write(mddev, r1_bio);
unplug = 1;
} else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
- /* some requests in the r1bio were BIO_RW_BARRIER
+ /* some requests in the r1bio were REQ_HARDBARRIER
* requests which failed with -EOPNOTSUPP. Hohumm..
* Better resubmit without the barrier.
* We know which devices to resubmit for, because
@@ -1641,7 +1643,7 @@ static void raid1d(mddev_t *mddev)
* We already have a nr_pending reference on these rdevs.
*/
int i;
- const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
+ const unsigned long do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
clear_bit(R1BIO_Barrier, &r1_bio->state);
for (i=0; i < conf->raid_disks; i++)
@@ -1662,8 +1664,7 @@ static void raid1d(mddev_t *mddev)
conf->mirrors[i].rdev->data_offset;
bio->bi_bdev = conf->mirrors[i].rdev->bdev;
bio->bi_end_io = raid1_end_write_request;
- bio->bi_rw = WRITE |
- (do_sync << BIO_RW_SYNCIO);
+ bio->bi_rw = WRITE | do_sync;
bio->bi_private = r1_bio;
r1_bio->bios[i] = bio;
generic_make_request(bio);
@@ -1698,7 +1699,7 @@ static void raid1d(mddev_t *mddev)
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
} else {
- const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
+ const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
r1_bio->bios[r1_bio->read_disk] =
mddev->ro ? IO_BLOCKED : NULL;
r1_bio->read_disk = disk;
@@ -1715,7 +1716,7 @@ static void raid1d(mddev_t *mddev)
bio->bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
- bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+ bio->bi_rw = READ | do_sync;
bio->bi_private = r1_bio;
unplug = 1;
generic_make_request(bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 42e64e4e5e2..84718383124 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -799,12 +799,12 @@ static int make_request(mddev_t *mddev, struct bio * bio)
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
- const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+ const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
struct bio_list bl;
unsigned long flags;
mdk_rdev_t *blocked_rdev;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
md_barrier_request(mddev, bio);
return 0;
}
@@ -825,11 +825,29 @@ static int make_request(mddev_t *mddev, struct bio * bio)
*/
bp = bio_split(bio,
chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
+
+ /* Each of these 'make_request' calls will call 'wait_barrier'.
+ * If the first succeeds but the second blocks due to the resync
+ * thread raising the barrier, we will deadlock because the
+ * IO to the underlying device will be queued in generic_make_request
+ * and will never complete, so will never reduce nr_pending.
+ * So increment nr_waiting here so no new raise_barriers will
+ * succeed, and so the second wait_barrier cannot block.
+ */
+ spin_lock_irq(&conf->resync_lock);
+ conf->nr_waiting++;
+ spin_unlock_irq(&conf->resync_lock);
+
if (make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
if (make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
+ spin_lock_irq(&conf->resync_lock);
+ conf->nr_waiting--;
+ wake_up(&conf->wait_barrier);
+ spin_unlock_irq(&conf->resync_lock);
+
bio_pair_release(bp);
return 0;
bad_map:
@@ -879,7 +897,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
- read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+ read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r10_bio;
generic_make_request(read_bio);
@@ -947,7 +965,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
conf->mirrors[d].rdev->data_offset;
mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO);
+ mbio->bi_rw = WRITE | do_sync;
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@@ -1098,6 +1116,8 @@ static int raid10_spare_active(mddev_t *mddev)
int i;
conf_t *conf = mddev->private;
mirror_info_t *tmp;
+ int count = 0;
+ unsigned long flags;
/*
* Find all non-in_sync disks within the RAID10 configuration
@@ -1108,15 +1128,16 @@ static int raid10_spare_active(mddev_t *mddev)
if (tmp->rdev
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded--;
- spin_unlock_irqrestore(&conf->device_lock, flags);
+ count++;
+ sysfs_notify_dirent(tmp->rdev->sysfs_state);
}
}
+ spin_lock_irqsave(&conf->device_lock, flags);
+ mddev->degraded -= count;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
print_conf(conf);
- return 0;
+ return count;
}
@@ -1716,7 +1737,7 @@ static void raid10d(mddev_t *mddev)
raid_end_bio_io(r10_bio);
bio_put(bio);
} else {
- const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
+ const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
bio_put(bio);
rdev = conf->mirrors[mirror].rdev;
if (printk_ratelimit())
@@ -1730,7 +1751,7 @@ static void raid10d(mddev_t *mddev)
bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
- bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+ bio->bi_rw = READ | do_sync;
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
unplug = 1;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 96c690279fc..69b0a169e43 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -201,11 +201,11 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state)) {
list_add_tail(&sh->lru, &conf->delayed_list);
- blk_plug_device(conf->mddev->queue);
+ plugger_set_plug(&conf->plug);
} else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
sh->bm_seq - conf->seq_write > 0) {
list_add_tail(&sh->lru, &conf->bitmap_list);
- blk_plug_device(conf->mddev->queue);
+ plugger_set_plug(&conf->plug);
} else {
clear_bit(STRIPE_BIT_DELAY, &sh->state);
list_add_tail(&sh->lru, &conf->handle_list);
@@ -434,7 +434,6 @@ static int has_failed(raid5_conf_t *conf)
}
static void unplug_slaves(mddev_t *mddev);
-static void raid5_unplug_device(struct request_queue *q);
static struct stripe_head *
get_active_stripe(raid5_conf_t *conf, sector_t sector,
@@ -464,7 +463,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
< (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked),
conf->device_lock,
- raid5_unplug_device(conf->mddev->queue)
+ md_raid5_unplug_device(conf)
);
conf->inactive_blocked = 0;
} else
@@ -1337,10 +1336,14 @@ static int grow_stripes(raid5_conf_t *conf, int num)
struct kmem_cache *sc;
int devs = max(conf->raid_disks, conf->previous_raid_disks);
- sprintf(conf->cache_name[0],
- "raid%d-%s", conf->level, mdname(conf->mddev));
- sprintf(conf->cache_name[1],
- "raid%d-%s-alt", conf->level, mdname(conf->mddev));
+ if (conf->mddev->gendisk)
+ sprintf(conf->cache_name[0],
+ "raid%d-%s", conf->level, mdname(conf->mddev));
+ else
+ sprintf(conf->cache_name[0],
+ "raid%d-%p", conf->level, conf->mddev);
+ sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
+
conf->active_name = 0;
sc = kmem_cache_create(conf->cache_name[conf->active_name],
sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
@@ -3614,7 +3617,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
list_add_tail(&sh->lru, &conf->hold_list);
}
} else
- blk_plug_device(conf->mddev->queue);
+ plugger_set_plug(&conf->plug);
}
static void activate_bit_delay(raid5_conf_t *conf)
@@ -3655,36 +3658,44 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void raid5_unplug_device(struct request_queue *q)
+void md_raid5_unplug_device(raid5_conf_t *conf)
{
- mddev_t *mddev = q->queuedata;
- raid5_conf_t *conf = mddev->private;
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
- if (blk_remove_plug(q)) {
+ if (plugger_remove_plug(&conf->plug)) {
conf->seq_flush++;
raid5_activate_delayed(conf);
}
- md_wakeup_thread(mddev->thread);
+ md_wakeup_thread(conf->mddev->thread);
spin_unlock_irqrestore(&conf->device_lock, flags);
- unplug_slaves(mddev);
+ unplug_slaves(conf->mddev);
}
+EXPORT_SYMBOL_GPL(md_raid5_unplug_device);
-static int raid5_congested(void *data, int bits)
+static void raid5_unplug(struct plug_handle *plug)
+{
+ raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
+ md_raid5_unplug_device(conf);
+}
+
+static void raid5_unplug_queue(struct request_queue *q)
+{
+ mddev_t *mddev = q->queuedata;
+ md_raid5_unplug_device(mddev->private);
+}
+
+int md_raid5_congested(mddev_t *mddev, int bits)
{
- mddev_t *mddev = data;
raid5_conf_t *conf = mddev->private;
/* No difference between reads and writes. Just check
* how busy the stripe_cache is
*/
- if (mddev_congested(mddev, bits))
- return 1;
if (conf->inactive_blocked)
return 1;
if (conf->quiesce)
@@ -3694,6 +3705,15 @@ static int raid5_congested(void *data, int bits)
return 0;
}
+EXPORT_SYMBOL_GPL(md_raid5_congested);
+
+static int raid5_congested(void *data, int bits)
+{
+ mddev_t *mddev = data;
+
+ return mddev_congested(mddev, bits) ||
+ md_raid5_congested(mddev, bits);
+}
/* We want read requests to align with chunks where possible,
* but write requests don't need to.
@@ -3958,7 +3978,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
const int rw = bio_data_dir(bi);
int remaining;
- if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
+ if (unlikely(bi->bi_rw & REQ_HARDBARRIER)) {
/* Drain all pending writes. We only really need
* to ensure they have been submitted, but this is
* easier.
@@ -4075,7 +4095,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
* add failed due to overlap. Flush everything
* and wait a while
*/
- raid5_unplug_device(mddev->queue);
+ md_raid5_unplug_device(conf);
release_stripe(sh);
schedule();
goto retry;
@@ -4566,23 +4586,15 @@ raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
return 0;
}
-static ssize_t
-raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
+int
+raid5_set_cache_size(mddev_t *mddev, int size)
{
raid5_conf_t *conf = mddev->private;
- unsigned long new;
int err;
- if (len >= PAGE_SIZE)
+ if (size <= 16 || size > 32768)
return -EINVAL;
- if (!conf)
- return -ENODEV;
-
- if (strict_strtoul(page, 10, &new))
- return -EINVAL;
- if (new <= 16 || new > 32768)
- return -EINVAL;
- while (new < conf->max_nr_stripes) {
+ while (size < conf->max_nr_stripes) {
if (drop_one_stripe(conf))
conf->max_nr_stripes--;
else
@@ -4591,11 +4603,32 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
err = md_allow_write(mddev);
if (err)
return err;
- while (new > conf->max_nr_stripes) {
+ while (size > conf->max_nr_stripes) {
if (grow_one_stripe(conf))
conf->max_nr_stripes++;
else break;
}
+ return 0;
+}
+EXPORT_SYMBOL(raid5_set_cache_size);
+
+static ssize_t
+raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
+{
+ raid5_conf_t *conf = mddev->private;
+ unsigned long new;
+ int err;
+
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+ if (!conf)
+ return -ENODEV;
+
+ if (strict_strtoul(page, 10, &new))
+ return -EINVAL;
+ err = raid5_set_cache_size(mddev, new);
+ if (err)
+ return err;
return len;
}
@@ -4958,7 +4991,7 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
static int run(mddev_t *mddev)
{
raid5_conf_t *conf;
- int working_disks = 0, chunk_size;
+ int working_disks = 0;
int dirty_parity_disks = 0;
mdk_rdev_t *rdev;
sector_t reshape_offset = 0;
@@ -5144,42 +5177,47 @@ static int run(mddev_t *mddev)
"reshape");
}
- /* read-ahead size must cover two whole stripes, which is
- * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
- */
- {
- int data_disks = conf->previous_raid_disks - conf->max_degraded;
- int stripe = data_disks *
- ((mddev->chunk_sectors << 9) / PAGE_SIZE);
- if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
- }
/* Ok, everything is just fine now */
if (mddev->to_remove == &raid5_attrs_group)
mddev->to_remove = NULL;
- else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
+ else if (mddev->kobj.sd &&
+ sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
printk(KERN_WARNING
- "md/raid:%s: failed to create sysfs attributes.\n",
+ "raid5: failed to create sysfs attributes for %s\n",
mdname(mddev));
+ md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
- mddev->queue->queue_lock = &conf->device_lock;
+ plugger_init(&conf->plug, raid5_unplug);
+ mddev->plug = &conf->plug;
+ if (mddev->queue) {
+ int chunk_size;
+ /* read-ahead size must cover two whole stripes, which
+ * is 2 * (datadisks) * chunksize where 'n' is the
+ * number of raid devices
+ */
+ int data_disks = conf->previous_raid_disks - conf->max_degraded;
+ int stripe = data_disks *
+ ((mddev->chunk_sectors << 9) / PAGE_SIZE);
+ if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
+ mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
- mddev->queue->unplug_fn = raid5_unplug_device;
- mddev->queue->backing_dev_info.congested_data = mddev;
- mddev->queue->backing_dev_info.congested_fn = raid5_congested;
+ blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
- md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
+ mddev->queue->backing_dev_info.congested_data = mddev;
+ mddev->queue->backing_dev_info.congested_fn = raid5_congested;
+ mddev->queue->queue_lock = &conf->device_lock;
+ mddev->queue->unplug_fn = raid5_unplug_queue;
- blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
- chunk_size = mddev->chunk_sectors << 9;
- blk_queue_io_min(mddev->queue, chunk_size);
- blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->raid_disks - conf->max_degraded));
+ chunk_size = mddev->chunk_sectors << 9;
+ blk_queue_io_min(mddev->queue, chunk_size);
+ blk_queue_io_opt(mddev->queue, chunk_size *
+ (conf->raid_disks - conf->max_degraded));
- list_for_each_entry(rdev, &mddev->disks, same_set)
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ }
return 0;
abort:
@@ -5200,8 +5238,9 @@ static int stop(mddev_t *mddev)
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
- mddev->queue->backing_dev_info.congested_fn = NULL;
- blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+ if (mddev->queue)
+ mddev->queue->backing_dev_info.congested_fn = NULL;
+ plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/
free_conf(conf);
mddev->private = NULL;
mddev->to_remove = &raid5_attrs_group;
@@ -5291,6 +5330,8 @@ static int raid5_spare_active(mddev_t *mddev)
int i;
raid5_conf_t *conf = mddev->private;
struct disk_info *tmp;
+ int count = 0;
+ unsigned long flags;
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->disks + i;
@@ -5298,14 +5339,15 @@ static int raid5_spare_active(mddev_t *mddev)
&& tmp->rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded--;
- spin_unlock_irqrestore(&conf->device_lock, flags);
+ count++;
+ sysfs_notify_dirent(tmp->rdev->sysfs_state);
}
}
+ spin_lock_irqsave(&conf->device_lock, flags);
+ mddev->degraded -= count;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
print_raid5_conf(conf);
- return 0;
+ return count;
}
static int raid5_remove_disk(mddev_t *mddev, int number)
@@ -5545,10 +5587,7 @@ static int raid5_start_reshape(mddev_t *mddev)
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
- printk(KERN_WARNING
- "md/raid:%s: failed to create "
- " link %s\n",
- mdname(mddev), nm);
+ /* Failure here is OK */;
} else
break;
}
@@ -5603,7 +5642,7 @@ static void end_reshape(raid5_conf_t *conf)
/* read-ahead size must cover two whole stripes, which is
* 2 * (datadisks) * chunksize where 'n' is the number of raid devices
*/
- {
+ if (conf->mddev->queue) {
int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 0f86f5e3672..36eaed5dfd6 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -388,7 +388,7 @@ struct raid5_private_data {
* two caches.
*/
int active_name;
- char cache_name[2][20];
+ char cache_name[2][32];
struct kmem_cache *slab_cache; /* for allocating stripes */
int seq_flush, seq_write;
@@ -398,6 +398,9 @@ struct raid5_private_data {
* (fresh device added).
* Cleared when a sync completes.
*/
+
+ struct plug_handle plug;
+
/* per cpu variables */
struct raid5_percpu {
struct page *spare_page; /* Used when checking P/Q in raid6 */
@@ -497,4 +500,8 @@ static inline int algorithm_is_DDF(int layout)
{
return layout >= 8 && layout <= 10;
}
+
+extern int md_raid5_congested(mddev_t *mddev, int bits);
+extern void md_raid5_unplug_device(raid5_conf_t *conf);
+extern int raid5_set_cache_size(mddev_t *mddev, int size);
#endif
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c
deleted file mode 100644
index 1f8784bfd44..00000000000
--- a/drivers/md/raid6algos.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6algos.c
- *
- * Algorithm list and algorithm selection for RAID-6
- */
-
-#include <linux/raid/pq.h>
-#include <linux/gfp.h>
-#ifndef __KERNEL__
-#include <sys/mman.h>
-#include <stdio.h>
-#else
-#if !RAID6_USE_EMPTY_ZERO_PAGE
-/* In .bss so it's zeroed */
-const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
-EXPORT_SYMBOL(raid6_empty_zero_page);
-#endif
-#endif
-
-struct raid6_calls raid6_call;
-EXPORT_SYMBOL_GPL(raid6_call);
-
-const struct raid6_calls * const raid6_algos[] = {
- &raid6_intx1,
- &raid6_intx2,
- &raid6_intx4,
- &raid6_intx8,
-#if defined(__ia64__)
- &raid6_intx16,
- &raid6_intx32,
-#endif
-#if defined(__i386__) && !defined(__arch_um__)
- &raid6_mmxx1,
- &raid6_mmxx2,
- &raid6_sse1x1,
- &raid6_sse1x2,
- &raid6_sse2x1,
- &raid6_sse2x2,
-#endif
-#if defined(__x86_64__) && !defined(__arch_um__)
- &raid6_sse2x1,
- &raid6_sse2x2,
- &raid6_sse2x4,
-#endif
-#ifdef CONFIG_ALTIVEC
- &raid6_altivec1,
- &raid6_altivec2,
- &raid6_altivec4,
- &raid6_altivec8,
-#endif
- NULL
-};
-
-#ifdef __KERNEL__
-#define RAID6_TIME_JIFFIES_LG2 4
-#else
-/* Need more time to be stable in userspace */
-#define RAID6_TIME_JIFFIES_LG2 9
-#define time_before(x, y) ((x) < (y))
-#endif
-
-/* Try to pick the best algorithm */
-/* This code uses the gfmul table as convenient data set to abuse */
-
-int __init raid6_select_algo(void)
-{
- const struct raid6_calls * const * algo;
- const struct raid6_calls * best;
- char *syndromes;
- void *dptrs[(65536/PAGE_SIZE)+2];
- int i, disks;
- unsigned long perf, bestperf;
- int bestprefer;
- unsigned long j0, j1;
-
- disks = (65536/PAGE_SIZE)+2;
- for ( i = 0 ; i < disks-2 ; i++ ) {
- dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i;
- }
-
- /* Normal code - use a 2-page allocation to avoid D$ conflict */
- syndromes = (void *) __get_free_pages(GFP_KERNEL, 1);
-
- if ( !syndromes ) {
- printk("raid6: Yikes! No memory available.\n");
- return -ENOMEM;
- }
-
- dptrs[disks-2] = syndromes;
- dptrs[disks-1] = syndromes + PAGE_SIZE;
-
- bestperf = 0; bestprefer = 0; best = NULL;
-
- for ( algo = raid6_algos ; *algo ; algo++ ) {
- if ( !(*algo)->valid || (*algo)->valid() ) {
- perf = 0;
-
- preempt_disable();
- j0 = jiffies;
- while ( (j1 = jiffies) == j0 )
- cpu_relax();
- while (time_before(jiffies,
- j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
- (*algo)->gen_syndrome(disks, PAGE_SIZE, dptrs);
- perf++;
- }
- preempt_enable();
-
- if ( (*algo)->prefer > bestprefer ||
- ((*algo)->prefer == bestprefer &&
- perf > bestperf) ) {
- best = *algo;
- bestprefer = best->prefer;
- bestperf = perf;
- }
- printk("raid6: %-8s %5ld MB/s\n", (*algo)->name,
- (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
- }
- }
-
- if (best) {
- printk("raid6: using algorithm %s (%ld MB/s)\n",
- best->name,
- (bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
- raid6_call = *best;
- } else
- printk("raid6: Yikes! No algorithm found!\n");
-
- free_pages((unsigned long)syndromes, 1);
-
- return best ? 0 : -EINVAL;
-}
-
-static void raid6_exit(void)
-{
- do { } while (0);
-}
-
-subsys_initcall(raid6_select_algo);
-module_exit(raid6_exit);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
diff --git a/drivers/md/raid6altivec.uc b/drivers/md/raid6altivec.uc
deleted file mode 100644
index 2654d5c854b..00000000000
--- a/drivers/md/raid6altivec.uc
+++ /dev/null
@@ -1,130 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6altivec$#.c
- *
- * $#-way unrolled portable integer math RAID-6 instruction set
- *
- * This file is postprocessed using unroll.awk
- *
- * <benh> hpa: in process,
- * you can just "steal" the vec unit with enable_kernel_altivec() (but
- * bracked this with preempt_disable/enable or in a lock)
- */
-
-#include <linux/raid/pq.h>
-
-#ifdef CONFIG_ALTIVEC
-
-#include <altivec.h>
-#ifdef __KERNEL__
-# include <asm/system.h>
-# include <asm/cputable.h>
-#endif
-
-/*
- * This is the C data type to use. We use a vector of
- * signed char so vec_cmpgt() will generate the right
- * instruction.
- */
-
-typedef vector signed char unative_t;
-
-#define NBYTES(x) ((vector signed char) {x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
-#define NSIZE sizeof(unative_t)
-
-/*
- * The SHLBYTE() operation shifts each byte left by 1, *not*
- * rolling over into the next byte
- */
-static inline __attribute_const__ unative_t SHLBYTE(unative_t v)
-{
- return vec_add(v,v);
-}
-
-/*
- * The MASK() operation returns 0xFF in any byte for which the high
- * bit is 1, 0x00 for any byte for which the high bit is 0.
- */
-static inline __attribute_const__ unative_t MASK(unative_t v)
-{
- unative_t zv = NBYTES(0);
-
- /* vec_cmpgt returns a vector bool char; thus the need for the cast */
- return (unative_t)vec_cmpgt(zv, v);
-}
-
-
-/* This is noinline to make damned sure that gcc doesn't move any of the
- Altivec code around the enable/disable code */
-static void noinline
-raid6_altivec$#_gen_syndrome_real(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
- unative_t x1d = NBYTES(0x1d);
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
- wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
- wp$$ = vec_xor(wp$$, wd$$);
- w2$$ = MASK(wq$$);
- w1$$ = SHLBYTE(wq$$);
- w2$$ = vec_and(w2$$, x1d);
- w1$$ = vec_xor(w1$$, w2$$);
- wq$$ = vec_xor(w1$$, wd$$);
- }
- *(unative_t *)&p[d+NSIZE*$$] = wp$$;
- *(unative_t *)&q[d+NSIZE*$$] = wq$$;
- }
-}
-
-static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- preempt_disable();
- enable_kernel_altivec();
-
- raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs);
-
- preempt_enable();
-}
-
-int raid6_have_altivec(void);
-#if $# == 1
-int raid6_have_altivec(void)
-{
- /* This assumes either all CPUs have Altivec or none does */
-# ifdef __KERNEL__
- return cpu_has_feature(CPU_FTR_ALTIVEC);
-# else
- return 1;
-# endif
-}
-#endif
-
-const struct raid6_calls raid6_altivec$# = {
- raid6_altivec$#_gen_syndrome,
- raid6_have_altivec,
- "altivecx$#",
- 0
-};
-
-#endif /* CONFIG_ALTIVEC */
diff --git a/drivers/md/raid6int.uc b/drivers/md/raid6int.uc
deleted file mode 100644
index d1e276a14fa..00000000000
--- a/drivers/md/raid6int.uc
+++ /dev/null
@@ -1,117 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6int$#.c
- *
- * $#-way unrolled portable integer math RAID-6 instruction set
- *
- * This file is postprocessed using unroll.awk
- */
-
-#include <linux/raid/pq.h>
-
-/*
- * This is the C data type to use
- */
-
-/* Change this from BITS_PER_LONG if there is something better... */
-#if BITS_PER_LONG == 64
-# define NBYTES(x) ((x) * 0x0101010101010101UL)
-# define NSIZE 8
-# define NSHIFT 3
-# define NSTRING "64"
-typedef u64 unative_t;
-#else
-# define NBYTES(x) ((x) * 0x01010101U)
-# define NSIZE 4
-# define NSHIFT 2
-# define NSTRING "32"
-typedef u32 unative_t;
-#endif
-
-
-
-/*
- * IA-64 wants insane amounts of unrolling. On other architectures that
- * is just a waste of space.
- */
-#if ($# <= 8) || defined(__ia64__)
-
-
-/*
- * These sub-operations are separate inlines since they can sometimes be
- * specially optimized using architecture-specific hacks.
- */
-
-/*
- * The SHLBYTE() operation shifts each byte left by 1, *not*
- * rolling over into the next byte
- */
-static inline __attribute_const__ unative_t SHLBYTE(unative_t v)
-{
- unative_t vv;
-
- vv = (v << 1) & NBYTES(0xfe);
- return vv;
-}
-
-/*
- * The MASK() operation returns 0xFF in any byte for which the high
- * bit is 1, 0x00 for any byte for which the high bit is 0.
- */
-static inline __attribute_const__ unative_t MASK(unative_t v)
-{
- unative_t vv;
-
- vv = v & NBYTES(0x80);
- vv = (vv << 1) - (vv >> 7); /* Overflow on the top bit is OK */
- return vv;
-}
-
-
-static void raid6_int$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
- wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
- wp$$ ^= wd$$;
- w2$$ = MASK(wq$$);
- w1$$ = SHLBYTE(wq$$);
- w2$$ &= NBYTES(0x1d);
- w1$$ ^= w2$$;
- wq$$ = w1$$ ^ wd$$;
- }
- *(unative_t *)&p[d+NSIZE*$$] = wp$$;
- *(unative_t *)&q[d+NSIZE*$$] = wq$$;
- }
-}
-
-const struct raid6_calls raid6_intx$# = {
- raid6_int$#_gen_syndrome,
- NULL, /* always valid */
- "int" NSTRING "x$#",
- 0
-};
-
-#endif
diff --git a/drivers/md/raid6mmx.c b/drivers/md/raid6mmx.c
deleted file mode 100644
index e7f6c13132b..00000000000
--- a/drivers/md/raid6mmx.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6mmx.c
- *
- * MMX implementation of RAID-6 syndrome functions
- */
-
-#if defined(__i386__) && !defined(__arch_um__)
-
-#include <linux/raid/pq.h>
-#include "raid6x86.h"
-
-/* Shared with raid6sse1.c */
-const struct raid6_mmx_constants {
- u64 x1d;
-} raid6_mmx_constants = {
- 0x1d1d1d1d1d1d1d1dULL,
-};
-
-static int raid6_have_mmx(void)
-{
- /* Not really "boot_cpu" but "all_cpus" */
- return boot_cpu_has(X86_FEATURE_MMX);
-}
-
-/*
- * Plain MMX implementation
- */
-static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
- asm volatile("pxor %mm5,%mm5"); /* Zero temp */
-
- for ( d = 0 ; d < bytes ; d += 8 ) {
- asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("movq %mm2,%mm4"); /* Q[0] */
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d]));
- asm volatile("pcmpgtb %mm4,%mm5");
- asm volatile("paddb %mm4,%mm4");
- asm volatile("pand %mm0,%mm5");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm5,%mm5");
- asm volatile("pxor %mm6,%mm2");
- asm volatile("pxor %mm6,%mm4");
- }
- asm volatile("movq %%mm2,%0" : "=m" (p[d]));
- asm volatile("pxor %mm2,%mm2");
- asm volatile("movq %%mm4,%0" : "=m" (q[d]));
- asm volatile("pxor %mm4,%mm4");
- }
-
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_mmxx1 = {
- raid6_mmx1_gen_syndrome,
- raid6_have_mmx,
- "mmxx1",
- 0
-};
-
-/*
- * Unrolled-by-2 MMX implementation
- */
-static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
- asm volatile("pxor %mm5,%mm5"); /* Zero temp */
- asm volatile("pxor %mm7,%mm7"); /* Zero temp */
-
- for ( d = 0 ; d < bytes ; d += 16 ) {
- asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8]));
- asm volatile("movq %mm2,%mm4"); /* Q[0] */
- asm volatile("movq %mm3,%mm6"); /* Q[1] */
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- asm volatile("pcmpgtb %mm4,%mm5");
- asm volatile("pcmpgtb %mm6,%mm7");
- asm volatile("paddb %mm4,%mm4");
- asm volatile("paddb %mm6,%mm6");
- asm volatile("pand %mm0,%mm5");
- asm volatile("pand %mm0,%mm7");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm7,%mm6");
- asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d]));
- asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8]));
- asm volatile("pxor %mm5,%mm2");
- asm volatile("pxor %mm7,%mm3");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm7,%mm6");
- asm volatile("pxor %mm5,%mm5");
- asm volatile("pxor %mm7,%mm7");
- }
- asm volatile("movq %%mm2,%0" : "=m" (p[d]));
- asm volatile("movq %%mm3,%0" : "=m" (p[d+8]));
- asm volatile("movq %%mm4,%0" : "=m" (q[d]));
- asm volatile("movq %%mm6,%0" : "=m" (q[d+8]));
- }
-
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_mmxx2 = {
- raid6_mmx2_gen_syndrome,
- raid6_have_mmx,
- "mmxx2",
- 0
-};
-
-#endif
diff --git a/drivers/md/raid6recov.c b/drivers/md/raid6recov.c
deleted file mode 100644
index 2609f00e0d6..00000000000
--- a/drivers/md/raid6recov.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6recov.c
- *
- * RAID-6 data recovery in dual failure mode. In single failure mode,
- * use the RAID-5 algorithm (or, in the case of Q failure, just reconstruct
- * the syndrome.)
- */
-
-#include <linux/raid/pq.h>
-
-/* Recover two failed data blocks. */
-void raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
- void **ptrs)
-{
- u8 *p, *q, *dp, *dq;
- u8 px, qx, db;
- const u8 *pbmul; /* P multiplier table for B data */
- const u8 *qmul; /* Q multiplier table (for both) */
-
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
-
- /* Compute syndrome with zero for the missing data pages
- Use the dead data pages as temporary storage for
- delta p and delta q */
- dp = (u8 *)ptrs[faila];
- ptrs[faila] = (void *)raid6_empty_zero_page;
- ptrs[disks-2] = dp;
- dq = (u8 *)ptrs[failb];
- ptrs[failb] = (void *)raid6_empty_zero_page;
- ptrs[disks-1] = dq;
-
- raid6_call.gen_syndrome(disks, bytes, ptrs);
-
- /* Restore pointer table */
- ptrs[faila] = dp;
- ptrs[failb] = dq;
- ptrs[disks-2] = p;
- ptrs[disks-1] = q;
-
- /* Now, pick the proper data tables */
- pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
- qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]];
-
- /* Now do it... */
- while ( bytes-- ) {
- px = *p ^ *dp;
- qx = qmul[*q ^ *dq];
- *dq++ = db = pbmul[px] ^ qx; /* Reconstructed B */
- *dp++ = db ^ px; /* Reconstructed A */
- p++; q++;
- }
-}
-EXPORT_SYMBOL_GPL(raid6_2data_recov);
-
-/* Recover failure of one data block plus the P block */
-void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs)
-{
- u8 *p, *q, *dq;
- const u8 *qmul; /* Q multiplier table */
-
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
-
- /* Compute syndrome with zero for the missing data page
- Use the dead data page as temporary storage for delta q */
- dq = (u8 *)ptrs[faila];
- ptrs[faila] = (void *)raid6_empty_zero_page;
- ptrs[disks-1] = dq;
-
- raid6_call.gen_syndrome(disks, bytes, ptrs);
-
- /* Restore pointer table */
- ptrs[faila] = dq;
- ptrs[disks-1] = q;
-
- /* Now, pick the proper data tables */
- qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
-
- /* Now do it... */
- while ( bytes-- ) {
- *p++ ^= *dq = qmul[*q ^ *dq];
- q++; dq++;
- }
-}
-EXPORT_SYMBOL_GPL(raid6_datap_recov);
-
-#ifndef __KERNEL__
-/* Testing only */
-
-/* Recover two failed blocks. */
-void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs)
-{
- if ( faila > failb ) {
- int tmp = faila;
- faila = failb;
- failb = tmp;
- }
-
- if ( failb == disks-1 ) {
- if ( faila == disks-2 ) {
- /* P+Q failure. Just rebuild the syndrome. */
- raid6_call.gen_syndrome(disks, bytes, ptrs);
- } else {
- /* data+Q failure. Reconstruct data from P,
- then rebuild syndrome. */
- /* NOT IMPLEMENTED - equivalent to RAID-5 */
- }
- } else {
- if ( failb == disks-2 ) {
- /* data+P failure. */
- raid6_datap_recov(disks, bytes, faila, ptrs);
- } else {
- /* data+data failure. */
- raid6_2data_recov(disks, bytes, faila, failb, ptrs);
- }
- }
-}
-
-#endif
diff --git a/drivers/md/raid6sse1.c b/drivers/md/raid6sse1.c
deleted file mode 100644
index b274dd5eab8..00000000000
--- a/drivers/md/raid6sse1.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6sse1.c
- *
- * SSE-1/MMXEXT implementation of RAID-6 syndrome functions
- *
- * This is really an MMX implementation, but it requires SSE-1 or
- * AMD MMXEXT for prefetch support and a few other features. The
- * support for nontemporal memory accesses is enough to make this
- * worthwhile as a separate implementation.
- */
-
-#if defined(__i386__) && !defined(__arch_um__)
-
-#include <linux/raid/pq.h>
-#include "raid6x86.h"
-
-/* Defined in raid6mmx.c */
-extern const struct raid6_mmx_constants {
- u64 x1d;
-} raid6_mmx_constants;
-
-static int raid6_have_sse1_or_mmxext(void)
-{
- /* Not really boot_cpu but "all_cpus" */
- return boot_cpu_has(X86_FEATURE_MMX) &&
- (boot_cpu_has(X86_FEATURE_XMM) ||
- boot_cpu_has(X86_FEATURE_MMXEXT));
-}
-
-/*
- * Plain SSE1 implementation
- */
-static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
- asm volatile("pxor %mm5,%mm5"); /* Zero temp */
-
- for ( d = 0 ; d < bytes ; d += 8 ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
- asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
- asm volatile("movq %mm2,%mm4"); /* Q[0] */
- asm volatile("movq %0,%%mm6" : : "m" (dptr[z0-1][d]));
- for ( z = z0-2 ; z >= 0 ; z-- ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
- asm volatile("pcmpgtb %mm4,%mm5");
- asm volatile("paddb %mm4,%mm4");
- asm volatile("pand %mm0,%mm5");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm5,%mm5");
- asm volatile("pxor %mm6,%mm2");
- asm volatile("pxor %mm6,%mm4");
- asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d]));
- }
- asm volatile("pcmpgtb %mm4,%mm5");
- asm volatile("paddb %mm4,%mm4");
- asm volatile("pand %mm0,%mm5");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm5,%mm5");
- asm volatile("pxor %mm6,%mm2");
- asm volatile("pxor %mm6,%mm4");
-
- asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
- asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
- }
-
- asm volatile("sfence" : : : "memory");
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_sse1x1 = {
- raid6_sse11_gen_syndrome,
- raid6_have_sse1_or_mmxext,
- "sse1x1",
- 1 /* Has cache hints */
-};
-
-/*
- * Unrolled-by-2 SSE1 implementation
- */
-static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
- asm volatile("pxor %mm5,%mm5"); /* Zero temp */
- asm volatile("pxor %mm7,%mm7"); /* Zero temp */
-
- /* We uniformly assume a single prefetch covers at least 16 bytes */
- for ( d = 0 ; d < bytes ; d += 16 ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
- asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); /* P[1] */
- asm volatile("movq %mm2,%mm4"); /* Q[0] */
- asm volatile("movq %mm3,%mm6"); /* Q[1] */
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
- asm volatile("pcmpgtb %mm4,%mm5");
- asm volatile("pcmpgtb %mm6,%mm7");
- asm volatile("paddb %mm4,%mm4");
- asm volatile("paddb %mm6,%mm6");
- asm volatile("pand %mm0,%mm5");
- asm volatile("pand %mm0,%mm7");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm7,%mm6");
- asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d]));
- asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8]));
- asm volatile("pxor %mm5,%mm2");
- asm volatile("pxor %mm7,%mm3");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm7,%mm6");
- asm volatile("pxor %mm5,%mm5");
- asm volatile("pxor %mm7,%mm7");
- }
- asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
- asm volatile("movntq %%mm3,%0" : "=m" (p[d+8]));
- asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
- asm volatile("movntq %%mm6,%0" : "=m" (q[d+8]));
- }
-
- asm volatile("sfence" : :: "memory");
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_sse1x2 = {
- raid6_sse12_gen_syndrome,
- raid6_have_sse1_or_mmxext,
- "sse1x2",
- 1 /* Has cache hints */
-};
-
-#endif
diff --git a/drivers/md/raid6sse2.c b/drivers/md/raid6sse2.c
deleted file mode 100644
index 6ed6c6c0389..00000000000
--- a/drivers/md/raid6sse2.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6sse2.c
- *
- * SSE-2 implementation of RAID-6 syndrome functions
- *
- */
-
-#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
-
-#include <linux/raid/pq.h>
-#include "raid6x86.h"
-
-static const struct raid6_sse_constants {
- u64 x1d[2];
-} raid6_sse_constants __attribute__((aligned(16))) = {
- { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL },
-};
-
-static int raid6_have_sse2(void)
-{
- /* Not really boot_cpu but "all_cpus" */
- return boot_cpu_has(X86_FEATURE_MMX) &&
- boot_cpu_has(X86_FEATURE_FXSR) &&
- boot_cpu_has(X86_FEATURE_XMM) &&
- boot_cpu_has(X86_FEATURE_XMM2);
-}
-
-/*
- * Plain SSE2 implementation
- */
-static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
- asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
-
- for ( d = 0 ; d < bytes ; d += 16 ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
- asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
- asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
- asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d]));
- for ( z = z0-2 ; z >= 0 ; z-- ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
- asm volatile("pcmpgtb %xmm4,%xmm5");
- asm volatile("paddb %xmm4,%xmm4");
- asm volatile("pand %xmm0,%xmm5");
- asm volatile("pxor %xmm5,%xmm4");
- asm volatile("pxor %xmm5,%xmm5");
- asm volatile("pxor %xmm6,%xmm2");
- asm volatile("pxor %xmm6,%xmm4");
- asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d]));
- }
- asm volatile("pcmpgtb %xmm4,%xmm5");
- asm volatile("paddb %xmm4,%xmm4");
- asm volatile("pand %xmm0,%xmm5");
- asm volatile("pxor %xmm5,%xmm4");
- asm volatile("pxor %xmm5,%xmm5");
- asm volatile("pxor %xmm6,%xmm2");
- asm volatile("pxor %xmm6,%xmm4");
-
- asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
- asm volatile("pxor %xmm2,%xmm2");
- asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
- asm volatile("pxor %xmm4,%xmm4");
- }
-
- asm volatile("sfence" : : : "memory");
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_sse2x1 = {
- raid6_sse21_gen_syndrome,
- raid6_have_sse2,
- "sse2x1",
- 1 /* Has cache hints */
-};
-
-/*
- * Unrolled-by-2 SSE2 implementation
- */
-static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
- asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
- asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
-
- /* We uniformly assume a single prefetch covers at least 32 bytes */
- for ( d = 0 ; d < bytes ; d += 32 ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
- asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */
- asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
- asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
- asm volatile("pcmpgtb %xmm4,%xmm5");
- asm volatile("pcmpgtb %xmm6,%xmm7");
- asm volatile("paddb %xmm4,%xmm4");
- asm volatile("paddb %xmm6,%xmm6");
- asm volatile("pand %xmm0,%xmm5");
- asm volatile("pand %xmm0,%xmm7");
- asm volatile("pxor %xmm5,%xmm4");
- asm volatile("pxor %xmm7,%xmm6");
- asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d]));
- asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16]));
- asm volatile("pxor %xmm5,%xmm2");
- asm volatile("pxor %xmm7,%xmm3");
- asm volatile("pxor %xmm5,%xmm4");
- asm volatile("pxor %xmm7,%xmm6");
- asm volatile("pxor %xmm5,%xmm5");
- asm volatile("pxor %xmm7,%xmm7");
- }
- asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
- asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
- asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
- asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
- }
-
- asm volatile("sfence" : : : "memory");
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_sse2x2 = {
- raid6_sse22_gen_syndrome,
- raid6_have_sse2,
- "sse2x2",
- 1 /* Has cache hints */
-};
-
-#endif
-
-#if defined(__x86_64__) && !defined(__arch_um__)
-
-/*
- * Unrolled-by-4 SSE2 implementation
- */
-static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
- asm volatile("pxor %xmm2,%xmm2"); /* P[0] */
- asm volatile("pxor %xmm3,%xmm3"); /* P[1] */
- asm volatile("pxor %xmm4,%xmm4"); /* Q[0] */
- asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
- asm volatile("pxor %xmm6,%xmm6"); /* Q[1] */
- asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
- asm volatile("pxor %xmm10,%xmm10"); /* P[2] */
- asm volatile("pxor %xmm11,%xmm11"); /* P[3] */
- asm volatile("pxor %xmm12,%xmm12"); /* Q[2] */
- asm volatile("pxor %xmm13,%xmm13"); /* Zero temp */
- asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */
- asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */
-
- for ( d = 0 ; d < bytes ; d += 64 ) {
- for ( z = z0 ; z >= 0 ; z-- ) {
- /* The second prefetch seems to improve performance... */
- asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
- asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
- asm volatile("pcmpgtb %xmm4,%xmm5");
- asm volatile("pcmpgtb %xmm6,%xmm7");
- asm volatile("pcmpgtb %xmm12,%xmm13");
- asm volatile("pcmpgtb %xmm14,%xmm15");
- asm volatile("paddb %xmm4,%xmm4");
- asm volatile("paddb %xmm6,%xmm6");
- asm volatile("paddb %xmm12,%xmm12");
- asm volatile("paddb %xmm14,%xmm14");
- asm volatile("pand %xmm0,%xmm5");
- asm volatile("pand %xmm0,%xmm7");
- asm volatile("pand %xmm0,%xmm13");
- asm volatile("pand %xmm0,%xmm15");
- asm volatile("pxor %xmm5,%xmm4");
- asm volatile("pxor %xmm7,%xmm6");
- asm volatile("pxor %xmm13,%xmm12");
- asm volatile("pxor %xmm15,%xmm14");
- asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
- asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
- asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
- asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
- asm volatile("pxor %xmm5,%xmm2");
- asm volatile("pxor %xmm7,%xmm3");
- asm volatile("pxor %xmm13,%xmm10");
- asm volatile("pxor %xmm15,%xmm11");
- asm volatile("pxor %xmm5,%xmm4");
- asm volatile("pxor %xmm7,%xmm6");
- asm volatile("pxor %xmm13,%xmm12");
- asm volatile("pxor %xmm15,%xmm14");
- asm volatile("pxor %xmm5,%xmm5");
- asm volatile("pxor %xmm7,%xmm7");
- asm volatile("pxor %xmm13,%xmm13");
- asm volatile("pxor %xmm15,%xmm15");
- }
- asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
- asm volatile("pxor %xmm2,%xmm2");
- asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
- asm volatile("pxor %xmm3,%xmm3");
- asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
- asm volatile("pxor %xmm10,%xmm10");
- asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
- asm volatile("pxor %xmm11,%xmm11");
- asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
- asm volatile("pxor %xmm4,%xmm4");
- asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
- asm volatile("pxor %xmm6,%xmm6");
- asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
- asm volatile("pxor %xmm12,%xmm12");
- asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
- asm volatile("pxor %xmm14,%xmm14");
- }
-
- asm volatile("sfence" : : : "memory");
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_sse2x4 = {
- raid6_sse24_gen_syndrome,
- raid6_have_sse2,
- "sse2x4",
- 1 /* Has cache hints */
-};
-
-#endif
diff --git a/drivers/md/raid6test/Makefile b/drivers/md/raid6test/Makefile
deleted file mode 100644
index 2874cbef529..00000000000
--- a/drivers/md/raid6test/Makefile
+++ /dev/null
@@ -1,75 +0,0 @@
-#
-# This is a simple Makefile to test some of the RAID-6 code
-# from userspace.
-#
-
-CC = gcc
-OPTFLAGS = -O2 # Adjust as desired
-CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
-LD = ld
-AWK = awk
-AR = ar
-RANLIB = ranlib
-
-.c.o:
- $(CC) $(CFLAGS) -c -o $@ $<
-
-%.c: ../%.c
- cp -f $< $@
-
-%.uc: ../%.uc
- cp -f $< $@
-
-all: raid6.a raid6test
-
-raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
- raid6int32.o \
- raid6mmx.o raid6sse1.o raid6sse2.o \
- raid6altivec1.o raid6altivec2.o raid6altivec4.o raid6altivec8.o \
- raid6recov.o raid6algos.o \
- raid6tables.o
- rm -f $@
- $(AR) cq $@ $^
- $(RANLIB) $@
-
-raid6test: test.c raid6.a
- $(CC) $(CFLAGS) -o raid6test $^
-
-raid6altivec1.c: raid6altivec.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=1 < raid6altivec.uc > $@
-
-raid6altivec2.c: raid6altivec.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=2 < raid6altivec.uc > $@
-
-raid6altivec4.c: raid6altivec.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=4 < raid6altivec.uc > $@
-
-raid6altivec8.c: raid6altivec.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=8 < raid6altivec.uc > $@
-
-raid6int1.c: raid6int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=1 < raid6int.uc > $@
-
-raid6int2.c: raid6int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=2 < raid6int.uc > $@
-
-raid6int4.c: raid6int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=4 < raid6int.uc > $@
-
-raid6int8.c: raid6int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=8 < raid6int.uc > $@
-
-raid6int16.c: raid6int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=16 < raid6int.uc > $@
-
-raid6int32.c: raid6int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=32 < raid6int.uc > $@
-
-raid6tables.c: mktables
- ./mktables > raid6tables.c
-
-clean:
- rm -f *.o *.a mktables mktables.c raid6int.uc raid6*.c raid6test
-
-spotless: clean
- rm -f *~
diff --git a/drivers/md/raid6test/test.c b/drivers/md/raid6test/test.c
deleted file mode 100644
index 7a930318b17..00000000000
--- a/drivers/md/raid6test/test.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
- *
- * This file is part of the Linux kernel, and is made available under
- * the terms of the GNU General Public License version 2 or (at your
- * option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6test.c
- *
- * Test RAID-6 recovery with various algorithms
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <linux/raid/pq.h>
-
-#define NDISKS 16 /* Including P and Q */
-
-const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
-struct raid6_calls raid6_call;
-
-char *dataptrs[NDISKS];
-char data[NDISKS][PAGE_SIZE];
-char recovi[PAGE_SIZE], recovj[PAGE_SIZE];
-
-static void makedata(void)
-{
- int i, j;
-
- for (i = 0; i < NDISKS; i++) {
- for (j = 0; j < PAGE_SIZE; j++)
- data[i][j] = rand();
-
- dataptrs[i] = data[i];
- }
-}
-
-static char disk_type(int d)
-{
- switch (d) {
- case NDISKS-2:
- return 'P';
- case NDISKS-1:
- return 'Q';
- default:
- return 'D';
- }
-}
-
-static int test_disks(int i, int j)
-{
- int erra, errb;
-
- memset(recovi, 0xf0, PAGE_SIZE);
- memset(recovj, 0xba, PAGE_SIZE);
-
- dataptrs[i] = recovi;
- dataptrs[j] = recovj;
-
- raid6_dual_recov(NDISKS, PAGE_SIZE, i, j, (void **)&dataptrs);
-
- erra = memcmp(data[i], recovi, PAGE_SIZE);
- errb = memcmp(data[j], recovj, PAGE_SIZE);
-
- if (i < NDISKS-2 && j == NDISKS-1) {
- /* We don't implement the DQ failure scenario, since it's
- equivalent to a RAID-5 failure (XOR, then recompute Q) */
- erra = errb = 0;
- } else {
- printf("algo=%-8s faila=%3d(%c) failb=%3d(%c) %s\n",
- raid6_call.name,
- i, disk_type(i),
- j, disk_type(j),
- (!erra && !errb) ? "OK" :
- !erra ? "ERRB" :
- !errb ? "ERRA" : "ERRAB");
- }
-
- dataptrs[i] = data[i];
- dataptrs[j] = data[j];
-
- return erra || errb;
-}
-
-int main(int argc, char *argv[])
-{
- const struct raid6_calls *const *algo;
- int i, j;
- int err = 0;
-
- makedata();
-
- for (algo = raid6_algos; *algo; algo++) {
- if (!(*algo)->valid || (*algo)->valid()) {
- raid6_call = **algo;
-
- /* Nuke syndromes */
- memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE);
-
- /* Generate assumed good syndrome */
- raid6_call.gen_syndrome(NDISKS, PAGE_SIZE,
- (void **)&dataptrs);
-
- for (i = 0; i < NDISKS-1; i++)
- for (j = i+1; j < NDISKS; j++)
- err += test_disks(i, j);
- }
- printf("\n");
- }
-
- printf("\n");
- /* Pick the best algorithm test */
- raid6_select_algo();
-
- if (err)
- printf("\n*** ERRORS FOUND ***\n");
-
- return err;
-}
diff --git a/drivers/md/raid6x86.h b/drivers/md/raid6x86.h
deleted file mode 100644
index 4c22c156855..00000000000
--- a/drivers/md/raid6x86.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* ----------------------------------------------------------------------- *
- *
- * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6x86.h
- *
- * Definitions common to x86 and x86-64 RAID-6 code only
- */
-
-#ifndef LINUX_RAID_RAID6X86_H
-#define LINUX_RAID_RAID6X86_H
-
-#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
-
-#ifdef __KERNEL__ /* Real code */
-
-#include <asm/i387.h>
-
-#else /* Dummy code for user space testing */
-
-static inline void kernel_fpu_begin(void)
-{
-}
-
-static inline void kernel_fpu_end(void)
-{
-}
-
-#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions
- * (fast save and restore) */
-#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
-#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
-#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
-
-/* Should work well enough on modern CPUs for testing */
-static inline int boot_cpu_has(int flag)
-{
- u32 eax = (flag >> 5) ? 0x80000001 : 1;
- u32 edx;
-
- asm volatile("cpuid"
- : "+a" (eax), "=d" (edx)
- : : "ecx", "ebx");
-
- return (edx >> (flag & 31)) & 1;
-}
-
-#endif /* ndef __KERNEL__ */
-
-#endif
-#endif
diff --git a/drivers/md/unroll.awk b/drivers/md/unroll.awk
deleted file mode 100644
index c6aa03631df..00000000000
--- a/drivers/md/unroll.awk
+++ /dev/null
@@ -1,20 +0,0 @@
-
-# This filter requires one command line option of form -vN=n
-# where n must be a decimal number.
-#
-# Repeat each input line containing $$ n times, replacing $$ with 0...n-1.
-# Replace each $# with n, and each $* with a single $.
-
-BEGIN {
- n = N + 0
-}
-{
- if (/\$\$/) { rep = n } else { rep = 1 }
- for (i = 0; i < rep; ++i) {
- tmp = $0
- gsub(/\$\$/, i, tmp)
- gsub(/\$\#/, n, tmp)
- gsub(/\$\*/, "$", tmp)
- print tmp
- }
-}
diff --git a/drivers/media/IR/Kconfig b/drivers/media/IR/Kconfig
index 999a8250b3c..490c57cc4cf 100644
--- a/drivers/media/IR/Kconfig
+++ b/drivers/media/IR/Kconfig
@@ -1,13 +1,22 @@
-config IR_CORE
- tristate
+menuconfig IR_CORE
+ tristate "Infrared remote controller adapters"
depends on INPUT
default INPUT
+ ---help---
+ Enable support for Remote Controllers on Linux. This is
+ needed in order to support several video capture adapters.
+
+ Enable this option if you have a video capture board even
+ if you don't need IR, as otherwise, you may not be able to
+ compile the driver for your adapter.
config VIDEO_IR
tristate
depends on IR_CORE
default IR_CORE
+if IR_CORE
+
config LIRC
tristate
default y
@@ -16,7 +25,7 @@ config LIRC
Enable this option to build the Linux Infrared Remote
Control (LIRC) core device interface driver. The LIRC
interface passes raw IR to and from userspace, where the
- LIRC daemon handles protocol decoding for IR reception ann
+ LIRC daemon handles protocol decoding for IR reception and
encoding for IR transmitting (aka "blasting").
source "drivers/media/IR/keymaps/Kconfig"
@@ -103,3 +112,31 @@ config IR_MCEUSB
To compile this driver as a module, choose M here: the
module will be called mceusb.
+
+config IR_ENE
+ tristate "ENE eHome Receiver/Transciever (pnp id: ENE0100/ENE02xxx)"
+ depends on PNP
+ depends on IR_CORE
+ ---help---
+ Say Y here to enable support for integrated infrared receiver
+ /transciever made by ENE.
+
+ You can see if you have it by looking at lspnp output.
+ Output should include ENE0100 ENE0200 or something similiar.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ene_ir.
+
+config IR_STREAMZAP
+ tristate "Streamzap PC Remote IR Receiver"
+ depends on USB_ARCH_HAS_HCD
+ depends on IR_CORE
+ select USB
+ ---help---
+ Say Y here if you want to use a Streamzap PC Remote
+ Infrared Receiver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called streamzap.
+
+endif #IR_CORE
diff --git a/drivers/media/IR/Makefile b/drivers/media/IR/Makefile
index 2ae4f3abfdb..53676838fe9 100644
--- a/drivers/media/IR/Makefile
+++ b/drivers/media/IR/Makefile
@@ -16,3 +16,5 @@ obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
# stand-alone IR receivers/transmitters
obj-$(CONFIG_IR_IMON) += imon.o
obj-$(CONFIG_IR_MCEUSB) += mceusb.o
+obj-$(CONFIG_IR_ENE) += ene_ir.o
+obj-$(CONFIG_IR_STREAMZAP) += streamzap.o
diff --git a/drivers/media/IR/ene_ir.c b/drivers/media/IR/ene_ir.c
new file mode 100644
index 00000000000..5447750f5e3
--- /dev/null
+++ b/drivers/media/IR/ene_ir.c
@@ -0,0 +1,1023 @@
+/*
+ * driver for ENE KB3926 B/C/D CIR (pnp id: ENE0XXX)
+ *
+ * Copyright (C) 2010 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pnp.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <media/ir-core.h>
+#include <media/ir-common.h>
+#include "ene_ir.h"
+
+
+static int sample_period = -1;
+static int enable_idle = 1;
+static int input = 1;
+static int debug;
+static int txsim;
+
+static int ene_irq_status(struct ene_device *dev);
+
+/* read a hardware register */
+static u8 ene_hw_read_reg(struct ene_device *dev, u16 reg)
+{
+ u8 retval;
+ outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
+ outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
+ retval = inb(dev->hw_io + ENE_IO);
+
+ ene_dbg_verbose("reg %04x == %02x", reg, retval);
+ return retval;
+}
+
+/* write a hardware register */
+static void ene_hw_write_reg(struct ene_device *dev, u16 reg, u8 value)
+{
+ outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
+ outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
+ outb(value, dev->hw_io + ENE_IO);
+
+ ene_dbg_verbose("reg %04x <- %02x", reg, value);
+}
+
+/* change specific bits in hardware register */
+static void ene_hw_write_reg_mask(struct ene_device *dev,
+ u16 reg, u8 value, u8 mask)
+{
+ u8 regvalue;
+
+ outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
+ outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
+
+ regvalue = inb(dev->hw_io + ENE_IO) & ~mask;
+ regvalue |= (value & mask);
+ outb(regvalue, dev->hw_io + ENE_IO);
+
+ ene_dbg_verbose("reg %04x <- %02x (mask=%02x)", reg, value, mask);
+}
+
+/* detect hardware features */
+static int ene_hw_detect(struct ene_device *dev)
+{
+ u8 chip_major, chip_minor;
+ u8 hw_revision, old_ver;
+ u8 tmp;
+ u8 fw_capabilities;
+ int pll_freq;
+
+ tmp = ene_hw_read_reg(dev, ENE_HW_UNK);
+ ene_hw_write_reg(dev, ENE_HW_UNK, tmp & ~ENE_HW_UNK_CLR);
+
+ chip_major = ene_hw_read_reg(dev, ENE_HW_VER_MAJOR);
+ chip_minor = ene_hw_read_reg(dev, ENE_HW_VER_MINOR);
+
+ ene_hw_write_reg(dev, ENE_HW_UNK, tmp);
+ hw_revision = ene_hw_read_reg(dev, ENE_HW_VERSION);
+ old_ver = ene_hw_read_reg(dev, ENE_HW_VER_OLD);
+
+ pll_freq = (ene_hw_read_reg(dev, ENE_PLLFRH) << 4) +
+ (ene_hw_read_reg(dev, ENE_PLLFRL) >> 4);
+
+ if (pll_freq != 1000)
+ dev->rx_period_adjust = 4;
+ else
+ dev->rx_period_adjust = 2;
+
+
+ ene_printk(KERN_NOTICE, "PLL freq = %d\n", pll_freq);
+
+ if (hw_revision == 0xFF) {
+
+ ene_printk(KERN_WARNING, "device seems to be disabled\n");
+ ene_printk(KERN_WARNING,
+ "send a mail to lirc-list@lists.sourceforge.net\n");
+ ene_printk(KERN_WARNING, "please attach output of acpidump\n");
+ return -ENODEV;
+ }
+
+ if (chip_major == 0x33) {
+ ene_printk(KERN_WARNING, "chips 0x33xx aren't supported\n");
+ return -ENODEV;
+ }
+
+ if (chip_major == 0x39 && chip_minor == 0x26 && hw_revision == 0xC0) {
+ dev->hw_revision = ENE_HW_C;
+ } else if (old_ver == 0x24 && hw_revision == 0xC0) {
+ dev->hw_revision = ENE_HW_B;
+ ene_printk(KERN_NOTICE, "KB3926B detected\n");
+ } else {
+ dev->hw_revision = ENE_HW_D;
+ ene_printk(KERN_WARNING,
+ "unknown ENE chip detected, assuming KB3926D\n");
+ ene_printk(KERN_WARNING,
+ "driver support might be not complete");
+
+ }
+
+ ene_printk(KERN_DEBUG,
+ "chip is 0x%02x%02x - kbver = 0x%02x, rev = 0x%02x\n",
+ chip_major, chip_minor, old_ver, hw_revision);
+
+ /* detect features hardware supports */
+ if (dev->hw_revision < ENE_HW_C)
+ return 0;
+
+ fw_capabilities = ene_hw_read_reg(dev, ENE_FW2);
+ ene_dbg("Firmware capabilities: %02x", fw_capabilities);
+
+ dev->hw_gpio40_learning = fw_capabilities & ENE_FW2_GP40_AS_LEARN;
+ dev->hw_learning_and_tx_capable = fw_capabilities & ENE_FW2_LEARNING;
+
+ dev->hw_fan_as_normal_input = dev->hw_learning_and_tx_capable &&
+ (fw_capabilities & ENE_FW2_FAN_AS_NRML_IN);
+
+ ene_printk(KERN_NOTICE, "hardware features:\n");
+ ene_printk(KERN_NOTICE,
+ "learning and transmit %s, gpio40_learn %s, fan_in %s\n",
+ dev->hw_learning_and_tx_capable ? "on" : "off",
+ dev->hw_gpio40_learning ? "on" : "off",
+ dev->hw_fan_as_normal_input ? "on" : "off");
+
+ if (dev->hw_learning_and_tx_capable) {
+ ene_printk(KERN_WARNING,
+ "Device supports transmitting, but that support is\n");
+ ene_printk(KERN_WARNING,
+ "lightly tested. Please test it and mail\n");
+ ene_printk(KERN_WARNING,
+ "lirc-list@lists.sourceforge.net\n");
+ }
+ return 0;
+}
+
+/* this enables/disables IR input via gpio40*/
+static void ene_enable_gpio40_receive(struct ene_device *dev, int enable)
+{
+ ene_hw_write_reg_mask(dev, ENE_CIR_CONF2, enable ?
+ 0 : ENE_CIR_CONF2_GPIO40DIS,
+ ENE_CIR_CONF2_GPIO40DIS);
+}
+
+/* this enables/disables IR via standard input */
+static void ene_enable_normal_receive(struct ene_device *dev, int enable)
+{
+ ene_hw_write_reg(dev, ENE_CIR_CONF1, enable ? ENE_CIR_CONF1_RX_ON : 0);
+}
+
+/* this enables/disables IR input via unused fan tachtometer input */
+static void ene_enable_fan_receive(struct ene_device *dev, int enable)
+{
+ if (!enable)
+ ene_hw_write_reg(dev, ENE_FAN_AS_IN1, 0);
+ else {
+ ene_hw_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN);
+ ene_hw_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN);
+ }
+ dev->rx_fan_input_inuse = enable;
+}
+
+
+/* Sense current received carrier */
+static int ene_rx_sense_carrier(struct ene_device *dev)
+{
+ int period = ene_hw_read_reg(dev, ENE_RX_CARRIER);
+ int carrier;
+ ene_dbg("RX: hardware carrier period = %02x", period);
+
+ if (!(period & ENE_RX_CARRIER_VALID))
+ return 0;
+
+ period &= ~ENE_RX_CARRIER_VALID;
+
+ if (!period)
+ return 0;
+
+ carrier = 2000000 / period;
+ ene_dbg("RX: sensed carrier = %d Hz", carrier);
+ return carrier;
+}
+
+/* determine which input to use*/
+static void ene_rx_set_inputs(struct ene_device *dev)
+{
+ int learning_mode = dev->learning_enabled;
+
+ ene_dbg("RX: setup receiver, learning mode = %d", learning_mode);
+
+ ene_enable_normal_receive(dev, 1);
+
+ /* old hardware doesn't support learning mode for sure */
+ if (dev->hw_revision <= ENE_HW_B)
+ return;
+
+ /* receiver not learning capable, still set gpio40 correctly */
+ if (!dev->hw_learning_and_tx_capable) {
+ ene_enable_gpio40_receive(dev, !dev->hw_gpio40_learning);
+ return;
+ }
+
+ /* enable learning mode */
+ if (learning_mode) {
+ ene_enable_gpio40_receive(dev, dev->hw_gpio40_learning);
+
+ /* fan input is not used for learning */
+ if (dev->hw_fan_as_normal_input)
+ ene_enable_fan_receive(dev, 0);
+
+ /* disable learning mode */
+ } else {
+ if (dev->hw_fan_as_normal_input) {
+ ene_enable_fan_receive(dev, 1);
+ ene_enable_normal_receive(dev, 0);
+ } else
+ ene_enable_gpio40_receive(dev,
+ !dev->hw_gpio40_learning);
+ }
+
+ /* set few additional settings for this mode */
+ ene_hw_write_reg_mask(dev, ENE_CIR_CONF1, learning_mode ?
+ ENE_CIR_CONF1_LEARN1 : 0, ENE_CIR_CONF1_LEARN1);
+
+ ene_hw_write_reg_mask(dev, ENE_CIR_CONF2, learning_mode ?
+ ENE_CIR_CONF2_LEARN2 : 0, ENE_CIR_CONF2_LEARN2);
+
+ if (dev->rx_fan_input_inuse) {
+ dev->props->rx_resolution = ENE_SAMPLE_PERIOD_FAN * 1000;
+
+ dev->props->timeout =
+ ENE_FAN_VALUE_MASK * ENE_SAMPLE_PERIOD_FAN * 1000;
+ } else {
+ dev->props->rx_resolution = sample_period * 1000;
+ dev->props->timeout = ENE_MAXGAP * 1000;
+ }
+}
+
+/* Enable the device for receive */
+static void ene_rx_enable(struct ene_device *dev)
+{
+ u8 reg_value;
+
+ if (dev->hw_revision < ENE_HW_C) {
+ ene_hw_write_reg(dev, ENEB_IRQ, dev->irq << 1);
+ ene_hw_write_reg(dev, ENEB_IRQ_UNK1, 0x01);
+ } else {
+ reg_value = ene_hw_read_reg(dev, ENEC_IRQ) & 0xF0;
+ reg_value |= ENEC_IRQ_UNK_EN;
+ reg_value &= ~ENEC_IRQ_STATUS;
+ reg_value |= (dev->irq & ENEC_IRQ_MASK);
+ ene_hw_write_reg(dev, ENEC_IRQ, reg_value);
+ ene_hw_write_reg(dev, ENE_TX_UNK1, 0x63);
+ }
+
+ ene_hw_write_reg(dev, ENE_CIR_CONF2, 0x00);
+ ene_rx_set_inputs(dev);
+
+ /* set sampling period */
+ ene_hw_write_reg(dev, ENE_CIR_SAMPLE_PERIOD, sample_period);
+
+ /* ack any pending irqs - just in case */
+ ene_irq_status(dev);
+
+ /* enable firmware bits */
+ ene_hw_write_reg_mask(dev, ENE_FW1,
+ ENE_FW1_ENABLE | ENE_FW1_IRQ,
+ ENE_FW1_ENABLE | ENE_FW1_IRQ);
+
+ /* enter idle mode */
+ ir_raw_event_set_idle(dev->idev, 1);
+ ir_raw_event_reset(dev->idev);
+
+}
+
+/* Disable the device receiver */
+static void ene_rx_disable(struct ene_device *dev)
+{
+ /* disable inputs */
+ ene_enable_normal_receive(dev, 0);
+
+ if (dev->hw_fan_as_normal_input)
+ ene_enable_fan_receive(dev, 0);
+
+ /* disable hardware IRQ and firmware flag */
+ ene_hw_write_reg_mask(dev, ENE_FW1, 0, ENE_FW1_ENABLE | ENE_FW1_IRQ);
+
+ ir_raw_event_set_idle(dev->idev, 1);
+ ir_raw_event_reset(dev->idev);
+}
+
+
+/* prepare transmission */
+static void ene_tx_prepare(struct ene_device *dev)
+{
+ u8 conf1;
+
+ conf1 = ene_hw_read_reg(dev, ENE_CIR_CONF1);
+ dev->saved_conf1 = conf1;
+
+ if (dev->hw_revision == ENE_HW_C)
+ conf1 &= ~ENE_CIR_CONF1_TX_CLEAR;
+
+ /* Enable TX engine */
+ conf1 |= ENE_CIR_CONF1_TX_ON;
+
+ /* Set carrier */
+ if (dev->tx_period) {
+
+ /* NOTE: duty cycle handling is just a guess, it might
+ not be aviable. Default values were tested */
+ int tx_period_in500ns = dev->tx_period * 2;
+
+ int tx_pulse_width_in_500ns =
+ tx_period_in500ns / (100 / dev->tx_duty_cycle);
+
+ if (!tx_pulse_width_in_500ns)
+ tx_pulse_width_in_500ns = 1;
+
+ ene_dbg("TX: pulse distance = %d * 500 ns", tx_period_in500ns);
+ ene_dbg("TX: pulse width = %d * 500 ns",
+ tx_pulse_width_in_500ns);
+
+ ene_hw_write_reg(dev, ENE_TX_PERIOD, ENE_TX_PERIOD_UNKBIT |
+ tx_period_in500ns);
+
+ ene_hw_write_reg(dev, ENE_TX_PERIOD_PULSE,
+ tx_pulse_width_in_500ns);
+
+ conf1 |= ENE_CIR_CONF1_TX_CARR;
+ } else
+ conf1 &= ~ENE_CIR_CONF1_TX_CARR;
+
+ ene_hw_write_reg(dev, ENE_CIR_CONF1, conf1);
+
+}
+
+/* end transmission */
+static void ene_tx_complete(struct ene_device *dev)
+{
+ ene_hw_write_reg(dev, ENE_CIR_CONF1, dev->saved_conf1);
+ dev->tx_buffer = NULL;
+}
+
+/* set transmit mask */
+static void ene_tx_hw_set_transmiter_mask(struct ene_device *dev)
+{
+ u8 txport1 = ene_hw_read_reg(dev, ENE_TX_PORT1) & ~ENE_TX_PORT1_EN;
+ u8 txport2 = ene_hw_read_reg(dev, ENE_TX_PORT2) & ~ENE_TX_PORT2_EN;
+
+ if (dev->transmitter_mask & 0x01)
+ txport1 |= ENE_TX_PORT1_EN;
+
+ if (dev->transmitter_mask & 0x02)
+ txport2 |= ENE_TX_PORT2_EN;
+
+ ene_hw_write_reg(dev, ENE_TX_PORT1, txport1);
+ ene_hw_write_reg(dev, ENE_TX_PORT2, txport2);
+}
+
+/* TX one sample - must be called with dev->hw_lock*/
+static void ene_tx_sample(struct ene_device *dev)
+{
+ u8 raw_tx;
+ u32 sample;
+
+ if (!dev->tx_buffer) {
+ ene_dbg("TX: attempt to transmit NULL buffer");
+ return;
+ }
+
+ /* Grab next TX sample */
+ if (!dev->tx_sample) {
+again:
+ if (dev->tx_pos == dev->tx_len + 1) {
+ if (!dev->tx_done) {
+ ene_dbg("TX: no more data to send");
+ dev->tx_done = 1;
+ goto exit;
+ } else {
+ ene_dbg("TX: last sample sent by hardware");
+ ene_tx_complete(dev);
+ complete(&dev->tx_complete);
+ return;
+ }
+ }
+
+ sample = dev->tx_buffer[dev->tx_pos++];
+ dev->tx_sample_pulse = !dev->tx_sample_pulse;
+
+ ene_dbg("TX: sample %8d (%s)", sample, dev->tx_sample_pulse ?
+ "pulse" : "space");
+
+ dev->tx_sample = DIV_ROUND_CLOSEST(sample, ENE_TX_SMPL_PERIOD);
+
+ /* guard against too short samples */
+ if (!dev->tx_sample)
+ goto again;
+ }
+
+ raw_tx = min(dev->tx_sample , (unsigned int)ENE_TX_SMLP_MASK);
+ dev->tx_sample -= raw_tx;
+
+ if (dev->tx_sample_pulse)
+ raw_tx |= ENE_TX_PULSE_MASK;
+
+ ene_hw_write_reg(dev, ENE_TX_INPUT1 + dev->tx_reg, raw_tx);
+ dev->tx_reg = !dev->tx_reg;
+exit:
+ /* simulate TX done interrupt */
+ if (txsim)
+ mod_timer(&dev->tx_sim_timer, jiffies + HZ / 500);
+}
+
+/* timer to simulate tx done interrupt */
+static void ene_tx_irqsim(unsigned long data)
+{
+ struct ene_device *dev = (struct ene_device *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->hw_lock, flags);
+ ene_tx_sample(dev);
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+}
+
+
+/* read irq status and ack it */
+static int ene_irq_status(struct ene_device *dev)
+{
+ u8 irq_status;
+ u8 fw_flags1, fw_flags2;
+ int cur_rx_pointer;
+ int retval = 0;
+
+ fw_flags2 = ene_hw_read_reg(dev, ENE_FW2);
+ cur_rx_pointer = !!(fw_flags2 & ENE_FW2_BUF_HIGH);
+
+ if (dev->hw_revision < ENE_HW_C) {
+ irq_status = ene_hw_read_reg(dev, ENEB_IRQ_STATUS);
+
+ if (!(irq_status & ENEB_IRQ_STATUS_IR))
+ return 0;
+
+ ene_hw_write_reg(dev, ENEB_IRQ_STATUS,
+ irq_status & ~ENEB_IRQ_STATUS_IR);
+ dev->rx_pointer = cur_rx_pointer;
+ return ENE_IRQ_RX;
+ }
+
+ irq_status = ene_hw_read_reg(dev, ENEC_IRQ);
+
+ if (!(irq_status & ENEC_IRQ_STATUS))
+ return 0;
+
+ /* original driver does that twice - a workaround ? */
+ ene_hw_write_reg(dev, ENEC_IRQ, irq_status & ~ENEC_IRQ_STATUS);
+ ene_hw_write_reg(dev, ENEC_IRQ, irq_status & ~ENEC_IRQ_STATUS);
+
+ /* clear unknown flag in F8F9 */
+ if (fw_flags2 & ENE_FW2_IRQ_CLR)
+ ene_hw_write_reg(dev, ENE_FW2, fw_flags2 & ~ENE_FW2_IRQ_CLR);
+
+ /* check if this is a TX interrupt */
+ fw_flags1 = ene_hw_read_reg(dev, ENE_FW1);
+ if (fw_flags1 & ENE_FW1_TXIRQ) {
+ ene_hw_write_reg(dev, ENE_FW1, fw_flags1 & ~ENE_FW1_TXIRQ);
+ retval |= ENE_IRQ_TX;
+ }
+
+ /* Check if this is RX interrupt */
+ if (dev->rx_pointer != cur_rx_pointer) {
+ retval |= ENE_IRQ_RX;
+ dev->rx_pointer = cur_rx_pointer;
+
+ } else if (!(retval & ENE_IRQ_TX)) {
+ ene_dbg("RX: interrupt without change in RX pointer(%d)",
+ dev->rx_pointer);
+ retval |= ENE_IRQ_RX;
+ }
+
+ if ((retval & ENE_IRQ_RX) && (retval & ENE_IRQ_TX))
+ ene_dbg("both RX and TX interrupt at same time");
+
+ return retval;
+}
+
+/* interrupt handler */
+static irqreturn_t ene_isr(int irq, void *data)
+{
+ u16 hw_value;
+ int i, hw_sample;
+ int pulse;
+ int irq_status;
+ unsigned long flags;
+ int carrier = 0;
+ irqreturn_t retval = IRQ_NONE;
+ struct ene_device *dev = (struct ene_device *)data;
+ struct ir_raw_event ev;
+
+
+ spin_lock_irqsave(&dev->hw_lock, flags);
+ irq_status = ene_irq_status(dev);
+
+ if (!irq_status)
+ goto unlock;
+
+ retval = IRQ_HANDLED;
+
+ if (irq_status & ENE_IRQ_TX) {
+
+ if (!dev->hw_learning_and_tx_capable) {
+ ene_dbg("TX interrupt on unsupported device!");
+ goto unlock;
+ }
+ ene_tx_sample(dev);
+ }
+
+ if (!(irq_status & ENE_IRQ_RX))
+ goto unlock;
+
+
+ if (dev->carrier_detect_enabled || debug)
+ carrier = ene_rx_sense_carrier(dev);
+#if 0
+ /* TODO */
+ if (dev->carrier_detect_enabled && carrier)
+ ir_raw_event_report_frequency(dev->idev, carrier);
+#endif
+
+ for (i = 0; i < ENE_SAMPLES_SIZE; i++) {
+ hw_value = ene_hw_read_reg(dev,
+ ENE_SAMPLE_BUFFER + dev->rx_pointer * 4 + i);
+
+ if (dev->rx_fan_input_inuse) {
+ /* read high part of the sample */
+ hw_value |= ene_hw_read_reg(dev,
+ ENE_SAMPLE_BUFFER_FAN +
+ dev->rx_pointer * 4 + i) << 8;
+ pulse = hw_value & ENE_FAN_SMPL_PULS_MSK;
+
+ /* clear space bit, and other unused bits */
+ hw_value &= ENE_FAN_VALUE_MASK;
+ hw_sample = hw_value * ENE_SAMPLE_PERIOD_FAN;
+
+ } else {
+ pulse = !(hw_value & ENE_SAMPLE_SPC_MASK);
+ hw_value &= ENE_SAMPLE_VALUE_MASK;
+ hw_sample = hw_value * sample_period;
+
+ if (dev->rx_period_adjust) {
+ hw_sample *= (100 - dev->rx_period_adjust);
+ hw_sample /= 100;
+ }
+ }
+ /* no more data */
+ if (!(hw_value))
+ break;
+
+ ene_dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
+
+
+ ev.duration = hw_sample * 1000;
+ ev.pulse = pulse;
+ ir_raw_event_store_with_filter(dev->idev, &ev);
+ }
+
+ ir_raw_event_handle(dev->idev);
+unlock:
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+ return retval;
+}
+
+/* Initialize default settings */
+static void ene_setup_settings(struct ene_device *dev)
+{
+ dev->tx_period = 32;
+ dev->tx_duty_cycle = 25; /*%*/
+ dev->transmitter_mask = 3;
+
+ /* Force learning mode if (input == 2), otherwise
+ let user set it with LIRC_SET_REC_CARRIER */
+ dev->learning_enabled =
+ (input == 2 && dev->hw_learning_and_tx_capable);
+
+ dev->rx_pointer = -1;
+
+}
+
+/* outside interface: called on first open*/
+static int ene_open(void *data)
+{
+ struct ene_device *dev = (struct ene_device *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->hw_lock, flags);
+ dev->in_use = 1;
+ ene_setup_settings(dev);
+ ene_rx_enable(dev);
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+ return 0;
+}
+
+/* outside interface: called on device close*/
+static void ene_close(void *data)
+{
+ struct ene_device *dev = (struct ene_device *)data;
+ unsigned long flags;
+ spin_lock_irqsave(&dev->hw_lock, flags);
+
+ ene_rx_disable(dev);
+ dev->in_use = 0;
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+}
+
+/* outside interface: set transmitter mask */
+static int ene_set_tx_mask(void *data, u32 tx_mask)
+{
+ struct ene_device *dev = (struct ene_device *)data;
+ unsigned long flags;
+ ene_dbg("TX: attempt to set transmitter mask %02x", tx_mask);
+
+ /* invalid txmask */
+ if (!tx_mask || tx_mask & ~0x3) {
+ ene_dbg("TX: invalid mask");
+ /* return count of transmitters */
+ return 2;
+ }
+
+ spin_lock_irqsave(&dev->hw_lock, flags);
+ dev->transmitter_mask = tx_mask;
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+ return 0;
+}
+
+/* outside interface : set tx carrier */
+static int ene_set_tx_carrier(void *data, u32 carrier)
+{
+ struct ene_device *dev = (struct ene_device *)data;
+ unsigned long flags;
+ u32 period = 1000000 / carrier; /* (1 / freq) (* # usec in 1 sec) */
+
+ ene_dbg("TX: attempt to set tx carrier to %d kHz", carrier);
+
+ if (period && (period > ENE_TX_PERIOD_MAX ||
+ period < ENE_TX_PERIOD_MIN)) {
+
+ ene_dbg("TX: out of range %d-%d carrier, "
+ "falling back to 32 kHz",
+ 1000 / ENE_TX_PERIOD_MIN,
+ 1000 / ENE_TX_PERIOD_MAX);
+
+ period = 32; /* this is just a coincidence!!! */
+ }
+ ene_dbg("TX: set carrier to %d kHz", carrier);
+
+ spin_lock_irqsave(&dev->hw_lock, flags);
+ dev->tx_period = period;
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+ return 0;
+}
+
+
+/* outside interface: enable learning mode */
+static int ene_set_learning_mode(void *data, int enable)
+{
+ struct ene_device *dev = (struct ene_device *)data;
+ unsigned long flags;
+ if (enable == dev->learning_enabled)
+ return 0;
+
+ spin_lock_irqsave(&dev->hw_lock, flags);
+ dev->learning_enabled = enable;
+ ene_rx_set_inputs(dev);
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+ return 0;
+}
+
+/* outside interface: set rec carrier */
+static int ene_set_rec_carrier(void *data, u32 min, u32 max)
+{
+ struct ene_device *dev = (struct ene_device *)data;
+ ene_set_learning_mode(dev,
+ max > ENE_NORMAL_RX_HI || min < ENE_NORMAL_RX_LOW);
+ return 0;
+}
+
+/* outside interface: enable or disable idle mode */
+static void ene_rx_set_idle(void *data, int idle)
+{
+ struct ene_device *dev = (struct ene_device *)data;
+ ene_dbg("%sabling idle mode", idle ? "en" : "dis");
+
+ ene_hw_write_reg_mask(dev, ENE_CIR_SAMPLE_PERIOD,
+ (enable_idle && idle) ? 0 : ENE_CIR_SAMPLE_OVERFLOW,
+ ENE_CIR_SAMPLE_OVERFLOW);
+}
+
+
+/* outside interface: transmit */
+static int ene_transmit(void *data, int *buf, u32 n)
+{
+ struct ene_device *dev = (struct ene_device *)data;
+ unsigned long flags;
+
+ dev->tx_buffer = buf;
+ dev->tx_len = n / sizeof(int);
+ dev->tx_pos = 0;
+ dev->tx_reg = 0;
+ dev->tx_done = 0;
+ dev->tx_sample = 0;
+ dev->tx_sample_pulse = 0;
+
+ ene_dbg("TX: %d samples", dev->tx_len);
+
+ spin_lock_irqsave(&dev->hw_lock, flags);
+
+ ene_tx_hw_set_transmiter_mask(dev);
+ ene_tx_prepare(dev);
+
+ /* Transmit first two samples */
+ ene_tx_sample(dev);
+ ene_tx_sample(dev);
+
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+
+ if (wait_for_completion_timeout(&dev->tx_complete, 2 * HZ) == 0) {
+ ene_dbg("TX: timeout");
+ spin_lock_irqsave(&dev->hw_lock, flags);
+ ene_tx_complete(dev);
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+ } else
+ ene_dbg("TX: done");
+ return n;
+}
+
+
+/* probe entry */
+static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
+{
+ int error = -ENOMEM;
+ struct ir_dev_props *ir_props;
+ struct input_dev *input_dev;
+ struct ene_device *dev;
+
+ /* allocate memory */
+ input_dev = input_allocate_device();
+ ir_props = kzalloc(sizeof(struct ir_dev_props), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL);
+
+ if (!input_dev || !ir_props || !dev)
+ goto error;
+
+ /* validate resources */
+ error = -ENODEV;
+
+ if (!pnp_port_valid(pnp_dev, 0) ||
+ pnp_port_len(pnp_dev, 0) < ENE_MAX_IO)
+ goto error;
+
+ if (!pnp_irq_valid(pnp_dev, 0))
+ goto error;
+
+ dev->hw_io = pnp_port_start(pnp_dev, 0);
+ dev->irq = pnp_irq(pnp_dev, 0);
+ spin_lock_init(&dev->hw_lock);
+
+ /* claim the resources */
+ error = -EBUSY;
+ if (!request_region(dev->hw_io, ENE_MAX_IO, ENE_DRIVER_NAME))
+ goto error;
+
+ if (request_irq(dev->irq, ene_isr,
+ IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev))
+ goto error;
+
+ pnp_set_drvdata(pnp_dev, dev);
+ dev->pnp_dev = pnp_dev;
+
+ /* detect hardware version and features */
+ error = ene_hw_detect(dev);
+ if (error)
+ goto error;
+
+ ene_setup_settings(dev);
+
+ if (!dev->hw_learning_and_tx_capable && txsim) {
+ dev->hw_learning_and_tx_capable = 1;
+ setup_timer(&dev->tx_sim_timer, ene_tx_irqsim,
+ (long unsigned int)dev);
+ ene_printk(KERN_WARNING,
+ "Simulation of TX activated\n");
+ }
+
+ ir_props->driver_type = RC_DRIVER_IR_RAW;
+ ir_props->allowed_protos = IR_TYPE_ALL;
+ ir_props->priv = dev;
+ ir_props->open = ene_open;
+ ir_props->close = ene_close;
+ ir_props->min_timeout = ENE_MINGAP * 1000;
+ ir_props->max_timeout = ENE_MAXGAP * 1000;
+ ir_props->timeout = ENE_MAXGAP * 1000;
+
+ if (dev->hw_revision == ENE_HW_B)
+ ir_props->s_idle = ene_rx_set_idle;
+
+
+ dev->props = ir_props;
+ dev->idev = input_dev;
+
+ /* don't allow too short/long sample periods */
+ if (sample_period < 5 || sample_period > 0x7F)
+ sample_period = -1;
+
+ /* choose default sample period */
+ if (sample_period == -1) {
+
+ sample_period = 50;
+
+ /* on revB, hardware idle mode eats first sample
+ if we set too low sample period */
+ if (dev->hw_revision == ENE_HW_B && enable_idle)
+ sample_period = 75;
+ }
+
+ ir_props->rx_resolution = sample_period * 1000;
+
+ if (dev->hw_learning_and_tx_capable) {
+
+ ir_props->s_learning_mode = ene_set_learning_mode;
+
+ if (input == 0)
+ ir_props->s_rx_carrier_range = ene_set_rec_carrier;
+
+ init_completion(&dev->tx_complete);
+ ir_props->tx_ir = ene_transmit;
+ ir_props->s_tx_mask = ene_set_tx_mask;
+ ir_props->s_tx_carrier = ene_set_tx_carrier;
+ ir_props->tx_resolution = ENE_TX_SMPL_PERIOD * 1000;
+ /* ir_props->s_carrier_report = ene_set_carrier_report; */
+ }
+
+
+ device_set_wakeup_capable(&pnp_dev->dev, 1);
+ device_set_wakeup_enable(&pnp_dev->dev, 1);
+
+ if (dev->hw_learning_and_tx_capable)
+ input_dev->name = "ENE eHome Infrared Remote Transceiver";
+ else
+ input_dev->name = "ENE eHome Infrared Remote Receiver";
+
+
+ error = -ENODEV;
+ if (ir_input_register(input_dev, RC_MAP_RC6_MCE, ir_props,
+ ENE_DRIVER_NAME))
+ goto error;
+
+
+ ene_printk(KERN_NOTICE, "driver has been succesfully loaded\n");
+ return 0;
+error:
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+ if (dev->hw_io)
+ release_region(dev->hw_io, ENE_MAX_IO);
+
+ input_free_device(input_dev);
+ kfree(ir_props);
+ kfree(dev);
+ return error;
+}
+
+/* main unload function */
+static void ene_remove(struct pnp_dev *pnp_dev)
+{
+ struct ene_device *dev = pnp_get_drvdata(pnp_dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->hw_lock, flags);
+ ene_rx_disable(dev);
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+
+ free_irq(dev->irq, dev);
+ release_region(dev->hw_io, ENE_MAX_IO);
+ ir_input_unregister(dev->idev);
+ kfree(dev->props);
+ kfree(dev);
+}
+
+/* enable wake on IR (wakes on specific button on original remote) */
+static void ene_enable_wake(struct ene_device *dev, int enable)
+{
+ enable = enable && device_may_wakeup(&dev->pnp_dev->dev);
+
+ ene_dbg("wake on IR %s", enable ? "enabled" : "disabled");
+
+ ene_hw_write_reg_mask(dev, ENE_FW1, enable ?
+ ENE_FW1_WAKE : 0, ENE_FW1_WAKE);
+}
+
+#ifdef CONFIG_PM
+static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
+{
+ struct ene_device *dev = pnp_get_drvdata(pnp_dev);
+ ene_enable_wake(dev, 1);
+ return 0;
+}
+
+static int ene_resume(struct pnp_dev *pnp_dev)
+{
+ struct ene_device *dev = pnp_get_drvdata(pnp_dev);
+ if (dev->in_use)
+ ene_rx_enable(dev);
+
+ ene_enable_wake(dev, 0);
+ return 0;
+}
+#endif
+
+static void ene_shutdown(struct pnp_dev *pnp_dev)
+{
+ struct ene_device *dev = pnp_get_drvdata(pnp_dev);
+ ene_enable_wake(dev, 1);
+}
+
+static const struct pnp_device_id ene_ids[] = {
+ {.id = "ENE0100",},
+ {.id = "ENE0200",},
+ {.id = "ENE0201",},
+ {.id = "ENE0202",},
+ {},
+};
+
+static struct pnp_driver ene_driver = {
+ .name = ENE_DRIVER_NAME,
+ .id_table = ene_ids,
+ .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
+
+ .probe = ene_probe,
+ .remove = __devexit_p(ene_remove),
+#ifdef CONFIG_PM
+ .suspend = ene_suspend,
+ .resume = ene_resume,
+#endif
+ .shutdown = ene_shutdown,
+};
+
+static int __init ene_init(void)
+{
+ return pnp_register_driver(&ene_driver);
+}
+
+static void ene_exit(void)
+{
+ pnp_unregister_driver(&ene_driver);
+}
+
+module_param(sample_period, int, S_IRUGO);
+MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)");
+
+module_param(enable_idle, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_idle,
+ "Enables turning off signal sampling after long inactivity time; "
+ "if disabled might help detecting input signal (default: enabled)"
+ " (KB3926B only)");
+
+module_param(input, bool, S_IRUGO);
+MODULE_PARM_DESC(input, "select which input to use "
+ "0 - auto, 1 - standard, 2 - wideband(KB3926C+)");
+
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable debug (debug=2 verbose debug output)");
+
+module_param(txsim, bool, S_IRUGO);
+MODULE_PARM_DESC(txsim,
+ "Simulate TX features on unsupported hardware (dangerous)");
+
+MODULE_DEVICE_TABLE(pnp, ene_ids);
+MODULE_DESCRIPTION
+ ("Infrared input driver for KB3926B/KB3926C/KB3926D "
+ "(aka ENE0100/ENE0200/ENE0201) CIR port");
+
+MODULE_AUTHOR("Maxim Levitsky");
+MODULE_LICENSE("GPL");
+
+module_init(ene_init);
+module_exit(ene_exit);
diff --git a/drivers/media/IR/ene_ir.h b/drivers/media/IR/ene_ir.h
new file mode 100644
index 00000000000..54c76af0d03
--- /dev/null
+++ b/drivers/media/IR/ene_ir.h
@@ -0,0 +1,235 @@
+/*
+ * driver for ENE KB3926 B/C/D CIR (also known as ENE0XXX)
+ *
+ * Copyright (C) 2010 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+#include <linux/spinlock.h>
+
+
+/* hardware address */
+#define ENE_STATUS 0 /* hardware status - unused */
+#define ENE_ADDR_HI 1 /* hi byte of register address */
+#define ENE_ADDR_LO 2 /* low byte of register address */
+#define ENE_IO 3 /* read/write window */
+#define ENE_MAX_IO 4
+
+/* 8 bytes of samples, divided in 2 halfs*/
+#define ENE_SAMPLE_BUFFER 0xF8F0 /* regular sample buffer */
+#define ENE_SAMPLE_SPC_MASK 0x80 /* sample is space */
+#define ENE_SAMPLE_VALUE_MASK 0x7F
+#define ENE_SAMPLE_OVERFLOW 0x7F
+#define ENE_SAMPLES_SIZE 4
+
+/* fan input sample buffer */
+#define ENE_SAMPLE_BUFFER_FAN 0xF8FB /* this buffer holds high byte of */
+ /* each sample of normal buffer */
+#define ENE_FAN_SMPL_PULS_MSK 0x8000 /* this bit of combined sample */
+ /* if set, says that sample is pulse */
+#define ENE_FAN_VALUE_MASK 0x0FFF /* mask for valid bits of the value */
+
+/* first firmware register */
+#define ENE_FW1 0xF8F8
+#define ENE_FW1_ENABLE 0x01 /* enable fw processing */
+#define ENE_FW1_TXIRQ 0x02 /* TX interrupt pending */
+#define ENE_FW1_WAKE 0x40 /* enable wake from S3 */
+#define ENE_FW1_IRQ 0x80 /* enable interrupt */
+
+/* second firmware register */
+#define ENE_FW2 0xF8F9
+#define ENE_FW2_BUF_HIGH 0x01 /* which half of the buffer to read */
+#define ENE_FW2_IRQ_CLR 0x04 /* clear this on IRQ */
+#define ENE_FW2_GP40_AS_LEARN 0x08 /* normal input is used as */
+ /* learning input */
+#define ENE_FW2_FAN_AS_NRML_IN 0x40 /* fan is used as normal input */
+#define ENE_FW2_LEARNING 0x80 /* hardware supports learning and TX */
+
+/* transmitter ports */
+#define ENE_TX_PORT2 0xFC01 /* this enables one or both */
+#define ENE_TX_PORT2_EN 0x20 /* TX ports */
+#define ENE_TX_PORT1 0xFC08
+#define ENE_TX_PORT1_EN 0x02
+
+/* IRQ registers block (for revision B) */
+#define ENEB_IRQ 0xFD09 /* IRQ number */
+#define ENEB_IRQ_UNK1 0xFD17 /* unknown setting = 1 */
+#define ENEB_IRQ_STATUS 0xFD80 /* irq status */
+#define ENEB_IRQ_STATUS_IR 0x20 /* IR irq */
+
+/* fan as input settings - only if learning capable */
+#define ENE_FAN_AS_IN1 0xFE30 /* fan init reg 1 */
+#define ENE_FAN_AS_IN1_EN 0xCD
+#define ENE_FAN_AS_IN2 0xFE31 /* fan init reg 2 */
+#define ENE_FAN_AS_IN2_EN 0x03
+#define ENE_SAMPLE_PERIOD_FAN 61 /* fan input has fixed sample period */
+
+/* IRQ registers block (for revision C,D) */
+#define ENEC_IRQ 0xFE9B /* new irq settings register */
+#define ENEC_IRQ_MASK 0x0F /* irq number mask */
+#define ENEC_IRQ_UNK_EN 0x10 /* always enabled */
+#define ENEC_IRQ_STATUS 0x20 /* irq status and ACK */
+
+/* CIR block settings */
+#define ENE_CIR_CONF1 0xFEC0
+#define ENE_CIR_CONF1_TX_CLEAR 0x01 /* clear that on revC */
+ /* while transmitting */
+#define ENE_CIR_CONF1_RX_ON 0x07 /* normal receiver enabled */
+#define ENE_CIR_CONF1_LEARN1 0x08 /* enabled on learning mode */
+#define ENE_CIR_CONF1_TX_ON 0x30 /* enabled on transmit */
+#define ENE_CIR_CONF1_TX_CARR 0x80 /* send TX carrier or not */
+
+#define ENE_CIR_CONF2 0xFEC1 /* unknown setting = 0 */
+#define ENE_CIR_CONF2_LEARN2 0x10 /* set on enable learning */
+#define ENE_CIR_CONF2_GPIO40DIS 0x20 /* disable input via gpio40 */
+
+#define ENE_CIR_SAMPLE_PERIOD 0xFEC8 /* sample period in us */
+#define ENE_CIR_SAMPLE_OVERFLOW 0x80 /* interrupt on overflows if set */
+
+
+/* Two byte tx buffer */
+#define ENE_TX_INPUT1 0xFEC9
+#define ENE_TX_INPUT2 0xFECA
+#define ENE_TX_PULSE_MASK 0x80 /* Transmitted sample is pulse */
+#define ENE_TX_SMLP_MASK 0x7F
+#define ENE_TX_SMPL_PERIOD 50 /* transmit sample period - fixed */
+
+
+/* Unknown TX setting - TX sample period ??? */
+#define ENE_TX_UNK1 0xFECB /* set to 0x63 */
+
+/* Current received carrier period */
+#define ENE_RX_CARRIER 0xFECC /* RX period (500 ns) */
+#define ENE_RX_CARRIER_VALID 0x80 /* Register content valid */
+
+
+/* TX period (1/carrier) */
+#define ENE_TX_PERIOD 0xFECE /* TX period (500 ns) */
+#define ENE_TX_PERIOD_UNKBIT 0x80 /* This bit set on transmit*/
+#define ENE_TX_PERIOD_PULSE 0xFECF /* TX pulse period (500 ns)*/
+
+/* Hardware versions */
+#define ENE_HW_VERSION 0xFF00 /* hardware revision */
+#define ENE_PLLFRH 0xFF16
+#define ENE_PLLFRL 0xFF17
+
+#define ENE_HW_UNK 0xFF1D
+#define ENE_HW_UNK_CLR 0x04
+#define ENE_HW_VER_MAJOR 0xFF1E /* chip version */
+#define ENE_HW_VER_MINOR 0xFF1F
+#define ENE_HW_VER_OLD 0xFD00
+
+/* Normal/Learning carrier ranges - only valid if we have learning input*/
+/* TODO: test */
+#define ENE_NORMAL_RX_LOW 34
+#define ENE_NORMAL_RX_HI 38
+
+/* Tx carrier range */
+/* Hardware might be able to do more, but this range is enough for
+ all purposes */
+#define ENE_TX_PERIOD_MAX 32 /* corresponds to 29.4 kHz */
+#define ENE_TX_PERIOD_MIN 16 /* corrsponds to 62.5 kHz */
+
+
+
+/* Minimal and maximal gaps */
+
+/* Normal case:
+ Minimal gap is 0x7F * sample period
+ Maximum gap depends on hardware.
+ For KB3926B, it is unlimited, for newer models its around
+ 250000, after which HW stops sending samples, and that is
+ not possible to change */
+
+/* Fan case:
+ Both minimal and maximal gaps are same, and equal to 0xFFF * 0x61
+ And there is nothing to change this setting
+*/
+
+#define ENE_MAXGAP 250000
+#define ENE_MINGAP (127 * sample_period)
+
+/******************************************************************************/
+
+#define ENE_DRIVER_NAME "ene_ir"
+
+#define ENE_IRQ_RX 1
+#define ENE_IRQ_TX 2
+
+#define ENE_HW_B 1 /* 3926B */
+#define ENE_HW_C 2 /* 3926C */
+#define ENE_HW_D 3 /* 3926D */
+
+#define ene_printk(level, text, ...) \
+ printk(level ENE_DRIVER_NAME ": " text, ## __VA_ARGS__)
+
+#define ene_dbg(text, ...) \
+ if (debug) \
+ printk(KERN_DEBUG \
+ ENE_DRIVER_NAME ": " text "\n" , ## __VA_ARGS__)
+
+#define ene_dbg_verbose(text, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG \
+ ENE_DRIVER_NAME ": " text "\n" , ## __VA_ARGS__)
+
+
+struct ene_device {
+ struct pnp_dev *pnp_dev;
+ struct input_dev *idev;
+ struct ir_dev_props *props;
+ int in_use;
+
+ /* hw IO settings */
+ unsigned long hw_io;
+ int irq;
+ spinlock_t hw_lock;
+
+ /* HW features */
+ int hw_revision; /* hardware revision */
+ bool hw_learning_and_tx_capable; /* learning capable */
+ bool hw_gpio40_learning; /* gpio40 is learning */
+ bool hw_fan_as_normal_input; /* fan input is used as */
+ /* regular input */
+ /* HW state*/
+ int rx_pointer; /* hw pointer to rx buffer */
+ bool rx_fan_input_inuse; /* is fan input in use for rx*/
+ int tx_reg; /* current reg used for TX */
+ u8 saved_conf1; /* saved FEC0 reg */
+
+ /* TX sample handling */
+ unsigned int tx_sample; /* current sample for TX */
+ bool tx_sample_pulse; /* current sample is pulse */
+
+ /* TX buffer */
+ int *tx_buffer; /* input samples buffer*/
+ int tx_pos; /* position in that bufer */
+ int tx_len; /* current len of tx buffer */
+ int tx_done; /* done transmitting */
+ /* one more sample pending*/
+ struct completion tx_complete; /* TX completion */
+ struct timer_list tx_sim_timer;
+
+ /* TX settings */
+ int tx_period;
+ int tx_duty_cycle;
+ int transmitter_mask;
+
+ /* RX settings */
+ bool learning_enabled; /* learning input enabled */
+ bool carrier_detect_enabled; /* carrier detect enabled */
+ int rx_period_adjust;
+};
diff --git a/drivers/media/IR/imon.c b/drivers/media/IR/imon.c
index 65c125e44e9..c185422ef28 100644
--- a/drivers/media/IR/imon.c
+++ b/drivers/media/IR/imon.c
@@ -87,7 +87,6 @@ static ssize_t lcd_write(struct file *file, const char *buf,
struct imon_context {
struct device *dev;
struct ir_dev_props *props;
- struct ir_input_dev *ir;
/* Newer devices have two interfaces */
struct usb_device *usbdev_intf0;
struct usb_device *usbdev_intf1;
@@ -1656,7 +1655,6 @@ static struct input_dev *imon_init_idev(struct imon_context *ictx)
{
struct input_dev *idev;
struct ir_dev_props *props;
- struct ir_input_dev *ir;
int ret, i;
idev = input_allocate_device();
@@ -1671,12 +1669,6 @@ static struct input_dev *imon_init_idev(struct imon_context *ictx)
goto props_alloc_failed;
}
- ir = kzalloc(sizeof(struct ir_input_dev), GFP_KERNEL);
- if (!ir) {
- dev_err(ictx->dev, "remote ir input dev allocation failed\n");
- goto ir_dev_alloc_failed;
- }
-
snprintf(ictx->name_idev, sizeof(ictx->name_idev),
"iMON Remote (%04x:%04x)", ictx->vendor, ictx->product);
idev->name = ictx->name_idev;
@@ -1706,14 +1698,9 @@ static struct input_dev *imon_init_idev(struct imon_context *ictx)
props->change_protocol = imon_ir_change_protocol;
ictx->props = props;
- ictx->ir = ir;
- memcpy(&ir->dev, ictx->dev, sizeof(struct device));
-
usb_to_input_id(ictx->usbdev_intf0, &idev->id);
idev->dev.parent = ictx->dev;
- input_set_drvdata(idev, ir);
-
ret = ir_input_register(idev, RC_MAP_IMON_PAD, props, MOD_NAME);
if (ret < 0) {
dev_err(ictx->dev, "remote input dev register failed\n");
@@ -1723,8 +1710,6 @@ static struct input_dev *imon_init_idev(struct imon_context *ictx)
return idev;
idev_register_failed:
- kfree(ir);
-ir_dev_alloc_failed:
kfree(props);
props_alloc_failed:
input_free_device(idev);
@@ -1944,7 +1929,6 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
urb_submit_failed:
ir_input_unregister(ictx->idev);
- input_free_device(ictx->idev);
idev_setup_failed:
find_endpoint_failed:
mutex_unlock(&ictx->lock);
@@ -2014,10 +1998,8 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
return ictx;
urb_submit_failed:
- if (ictx->touch) {
+ if (ictx->touch)
input_unregister_device(ictx->touch);
- input_free_device(ictx->touch);
- }
touch_setup_failed:
find_endpoint_failed:
mutex_unlock(&ictx->lock);
diff --git a/drivers/media/IR/ir-core-priv.h b/drivers/media/IR/ir-core-priv.h
index babd52061bc..a85a8c7c905 100644
--- a/drivers/media/IR/ir-core-priv.h
+++ b/drivers/media/IR/ir-core-priv.h
@@ -32,7 +32,7 @@ struct ir_raw_handler {
struct ir_raw_event_ctrl {
struct list_head list; /* to keep track of raw clients */
- struct work_struct rx_work; /* for the rx decoding workqueue */
+ struct task_struct *thread;
struct kfifo kfifo; /* fifo for the pulse/space durations */
ktime_t last_event; /* when last event occurred */
enum raw_event_type last_type; /* last event type */
@@ -41,10 +41,13 @@ struct ir_raw_event_ctrl {
/* raw decoder state follows */
struct ir_raw_event prev_ev;
+ struct ir_raw_event this_ev;
struct nec_dec {
int state;
unsigned count;
u32 bits;
+ bool is_nec_x;
+ bool necx_repeat;
} nec;
struct rc5_dec {
int state;
@@ -76,7 +79,7 @@ struct ir_raw_event_ctrl {
struct lirc_codec {
struct ir_input_dev *ir_dev;
struct lirc_driver *drv;
- int lircdata;
+ int carrier_low;
} lirc;
};
@@ -104,10 +107,9 @@ static inline void decrease_duration(struct ir_raw_event *ev, unsigned duration)
ev->duration -= duration;
}
-#define TO_US(duration) (((duration) + 500) / 1000)
+#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
#define IS_RESET(ev) (ev.duration == 0)
-
/*
* Routines from ir-sysfs.c - Meant to be called only internally inside
* ir-core
@@ -126,7 +128,8 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler);
void ir_raw_init(void);
-
+int ir_rcmap_init(void);
+void ir_rcmap_cleanup(void);
/*
* Decoder initialization code
*
diff --git a/drivers/media/IR/ir-jvc-decoder.c b/drivers/media/IR/ir-jvc-decoder.c
index 8894d8b3604..77a89c4de01 100644
--- a/drivers/media/IR/ir-jvc-decoder.c
+++ b/drivers/media/IR/ir-jvc-decoder.c
@@ -32,6 +32,7 @@ enum jvc_state {
STATE_BIT_SPACE,
STATE_TRAILER_PULSE,
STATE_TRAILER_SPACE,
+ STATE_CHECK_REPEAT,
};
/**
@@ -60,6 +61,7 @@ static int ir_jvc_decode(struct input_dev *input_dev, struct ir_raw_event ev)
IR_dprintk(2, "JVC decode started at state %d (%uus %s)\n",
data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+again:
switch (data->state) {
case STATE_INACTIVE:
@@ -149,8 +151,18 @@ static int ir_jvc_decode(struct input_dev *input_dev, struct ir_raw_event ev)
}
data->count = 0;
- data->state = STATE_BIT_PULSE;
+ data->state = STATE_CHECK_REPEAT;
return 0;
+
+ case STATE_CHECK_REPEAT:
+ if (!ev.pulse)
+ break;
+
+ if (eq_margin(ev.duration, JVC_HEADER_PULSE, JVC_UNIT / 2))
+ data->state = STATE_INACTIVE;
+ else
+ data->state = STATE_BIT_PULSE;
+ goto again;
}
out:
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index 15a0f192d41..7e82a9df726 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -339,6 +339,8 @@ void ir_repeat(struct input_dev *dev)
spin_lock_irqsave(&ir->keylock, flags);
+ input_event(dev, EV_MSC, MSC_SCAN, ir->last_scancode);
+
if (!ir->keypressed)
goto out;
@@ -370,6 +372,8 @@ void ir_keydown(struct input_dev *dev, int scancode, u8 toggle)
spin_lock_irqsave(&ir->keylock, flags);
+ input_event(dev, EV_MSC, MSC_SCAN, scancode);
+
/* Repeat event? */
if (ir->keypressed &&
ir->last_scancode == scancode &&
@@ -383,9 +387,11 @@ void ir_keydown(struct input_dev *dev, int scancode, u8 toggle)
ir->last_toggle = toggle;
ir->last_keycode = keycode;
+
if (keycode == KEY_RESERVED)
goto out;
+
/* Register a keypress */
ir->keypressed = true;
IR_dprintk(1, "%s: key down event, key 0x%04x, scancode 0x%04x\n",
@@ -428,7 +434,7 @@ static void ir_close(struct input_dev *input_dev)
*/
int __ir_input_register(struct input_dev *input_dev,
const struct ir_scancode_table *rc_tab,
- const struct ir_dev_props *props,
+ struct ir_dev_props *props,
const char *driver_name)
{
struct ir_input_dev *ir_dev;
@@ -480,6 +486,8 @@ int __ir_input_register(struct input_dev *input_dev,
set_bit(EV_KEY, input_dev->evbit);
set_bit(EV_REP, input_dev->evbit);
+ set_bit(EV_MSC, input_dev->evbit);
+ set_bit(MSC_SCAN, input_dev->mscbit);
if (ir_setkeytable(input_dev, &ir_dev->rc_tab, rc_tab)) {
rc = -ENOMEM;
@@ -499,7 +507,8 @@ int __ir_input_register(struct input_dev *input_dev,
IR_dprintk(1, "Registered input device on %s for %s remote%s.\n",
driver_name, rc_tab->name,
- ir_dev->props->driver_type == RC_DRIVER_IR_RAW ? " in raw mode" : "");
+ (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_IR_RAW) ?
+ " in raw mode" : "");
return 0;
diff --git a/drivers/media/IR/ir-lirc-codec.c b/drivers/media/IR/ir-lirc-codec.c
index 3ba482d96c4..77b5946413c 100644
--- a/drivers/media/IR/ir-lirc-codec.c
+++ b/drivers/media/IR/ir-lirc-codec.c
@@ -32,6 +32,7 @@
static int ir_lirc_decode(struct input_dev *input_dev, struct ir_raw_event ev)
{
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ int sample;
if (!(ir_dev->raw->enabled_protocols & IR_TYPE_LIRC))
return 0;
@@ -39,18 +40,20 @@ static int ir_lirc_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!ir_dev->raw->lirc.drv || !ir_dev->raw->lirc.drv->rbuf)
return -EINVAL;
+ if (IS_RESET(ev))
+ return 0;
+
IR_dprintk(2, "LIRC data transfer started (%uus %s)\n",
TO_US(ev.duration), TO_STR(ev.pulse));
- ir_dev->raw->lirc.lircdata += ev.duration / 1000;
+ sample = ev.duration / 1000;
if (ev.pulse)
- ir_dev->raw->lirc.lircdata |= PULSE_BIT;
+ sample |= PULSE_BIT;
lirc_buffer_write(ir_dev->raw->lirc.drv->rbuf,
- (unsigned char *) &ir_dev->raw->lirc.lircdata);
+ (unsigned char *) &sample);
wake_up(&ir_dev->raw->lirc.drv->rbuf->wait_poll);
- ir_dev->raw->lirc.lircdata = 0;
return 0;
}
@@ -92,13 +95,14 @@ out:
return ret;
}
-static long ir_lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long __user arg)
{
struct lirc_codec *lirc;
struct ir_input_dev *ir_dev;
int ret = 0;
void *drv_data;
- unsigned long val;
+ unsigned long val = 0;
lirc = lirc_get_pdata(filep);
if (!lirc)
@@ -110,47 +114,106 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long ar
drv_data = ir_dev->props->priv;
- switch (cmd) {
- case LIRC_SET_TRANSMITTER_MASK:
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
ret = get_user(val, (unsigned long *)arg);
if (ret)
return ret;
+ }
+
+ switch (cmd) {
- if (ir_dev->props && ir_dev->props->s_tx_mask)
+ /* legacy support */
+ case LIRC_GET_SEND_MODE:
+ val = LIRC_CAN_SEND_PULSE & LIRC_CAN_SEND_MASK;
+ break;
+
+ case LIRC_SET_SEND_MODE:
+ if (val != (LIRC_MODE_PULSE & LIRC_CAN_SEND_MASK))
+ return -EINVAL;
+ break;
+
+ /* TX settings */
+ case LIRC_SET_TRANSMITTER_MASK:
+ if (ir_dev->props->s_tx_mask)
ret = ir_dev->props->s_tx_mask(drv_data, (u32)val);
else
return -EINVAL;
break;
case LIRC_SET_SEND_CARRIER:
- ret = get_user(val, (unsigned long *)arg);
- if (ret)
- return ret;
-
- if (ir_dev->props && ir_dev->props->s_tx_carrier)
+ if (ir_dev->props->s_tx_carrier)
ir_dev->props->s_tx_carrier(drv_data, (u32)val);
else
return -EINVAL;
break;
- case LIRC_GET_SEND_MODE:
- val = LIRC_CAN_SEND_PULSE & LIRC_CAN_SEND_MASK;
- ret = put_user(val, (unsigned long *)arg);
+ case LIRC_SET_SEND_DUTY_CYCLE:
+ if (!ir_dev->props->s_tx_duty_cycle)
+ return -ENOSYS;
+
+ if (val <= 0 || val >= 100)
+ return -EINVAL;
+
+ ir_dev->props->s_tx_duty_cycle(ir_dev->props->priv, val);
break;
- case LIRC_SET_SEND_MODE:
- ret = get_user(val, (unsigned long *)arg);
- if (ret)
- return ret;
+ /* RX settings */
+ case LIRC_SET_REC_CARRIER:
+ if (ir_dev->props->s_rx_carrier_range)
+ ret = ir_dev->props->s_rx_carrier_range(
+ ir_dev->props->priv,
+ ir_dev->raw->lirc.carrier_low, val);
+ else
+ return -ENOSYS;
- if (val != (LIRC_MODE_PULSE & LIRC_CAN_SEND_MASK))
+ if (!ret)
+ ir_dev->raw->lirc.carrier_low = 0;
+ break;
+
+ case LIRC_SET_REC_CARRIER_RANGE:
+ if (val >= 0)
+ ir_dev->raw->lirc.carrier_low = val;
+ break;
+
+
+ case LIRC_GET_REC_RESOLUTION:
+ val = ir_dev->props->rx_resolution;
+ break;
+
+ case LIRC_SET_WIDEBAND_RECEIVER:
+ if (ir_dev->props->s_learning_mode)
+ return ir_dev->props->s_learning_mode(
+ ir_dev->props->priv, !!val);
+ else
+ return -ENOSYS;
+
+ /* Generic timeout support */
+ case LIRC_GET_MIN_TIMEOUT:
+ if (!ir_dev->props->max_timeout)
+ return -ENOSYS;
+ val = ir_dev->props->min_timeout / 1000;
+ break;
+
+ case LIRC_GET_MAX_TIMEOUT:
+ if (!ir_dev->props->max_timeout)
+ return -ENOSYS;
+ val = ir_dev->props->max_timeout / 1000;
+ break;
+
+ case LIRC_SET_REC_TIMEOUT:
+ if (val < ir_dev->props->min_timeout ||
+ val > ir_dev->props->max_timeout)
return -EINVAL;
+ ir_dev->props->timeout = val * 1000;
break;
default:
return lirc_dev_fop_ioctl(filep, cmd, arg);
}
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ ret = put_user(val, (unsigned long *)arg);
+
return ret;
}
@@ -196,13 +259,28 @@ static int ir_lirc_register(struct input_dev *input_dev)
features = LIRC_CAN_REC_MODE2;
if (ir_dev->props->tx_ir) {
+
features |= LIRC_CAN_SEND_PULSE;
if (ir_dev->props->s_tx_mask)
features |= LIRC_CAN_SET_TRANSMITTER_MASK;
if (ir_dev->props->s_tx_carrier)
features |= LIRC_CAN_SET_SEND_CARRIER;
+
+ if (ir_dev->props->s_tx_duty_cycle)
+ features |= LIRC_CAN_SET_REC_DUTY_CYCLE;
}
+ if (ir_dev->props->s_rx_carrier_range)
+ features |= LIRC_CAN_SET_REC_CARRIER |
+ LIRC_CAN_SET_REC_CARRIER_RANGE;
+
+ if (ir_dev->props->s_learning_mode)
+ features |= LIRC_CAN_USE_WIDEBAND_RECEIVER;
+
+ if (ir_dev->props->max_timeout)
+ features |= LIRC_CAN_SET_REC_TIMEOUT;
+
+
snprintf(drv->name, sizeof(drv->name), "ir-lirc-codec (%s)",
ir_dev->driver_name);
drv->minor = -1;
@@ -224,8 +302,6 @@ static int ir_lirc_register(struct input_dev *input_dev)
ir_dev->raw->lirc.drv = drv;
ir_dev->raw->lirc.ir_dev = ir_dev;
- ir_dev->raw->lirc.lircdata = PULSE_MASK;
-
return 0;
lirc_register_failed:
diff --git a/drivers/media/IR/ir-nec-decoder.c b/drivers/media/IR/ir-nec-decoder.c
index 52e0f378ae3..d597421d654 100644
--- a/drivers/media/IR/ir-nec-decoder.c
+++ b/drivers/media/IR/ir-nec-decoder.c
@@ -20,12 +20,13 @@
#define NEC_HEADER_PULSE (16 * NEC_UNIT)
#define NECX_HEADER_PULSE (8 * NEC_UNIT) /* Less common NEC variant */
#define NEC_HEADER_SPACE (8 * NEC_UNIT)
-#define NEC_REPEAT_SPACE (8 * NEC_UNIT)
+#define NEC_REPEAT_SPACE (4 * NEC_UNIT)
#define NEC_BIT_PULSE (1 * NEC_UNIT)
#define NEC_BIT_0_SPACE (1 * NEC_UNIT)
#define NEC_BIT_1_SPACE (3 * NEC_UNIT)
#define NEC_TRAILER_PULSE (1 * NEC_UNIT)
#define NEC_TRAILER_SPACE (10 * NEC_UNIT) /* even longer in reality */
+#define NECX_REPEAT_BITS 1
enum nec_state {
STATE_INACTIVE,
@@ -67,8 +68,12 @@ static int ir_nec_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!ev.pulse)
break;
- if (!eq_margin(ev.duration, NEC_HEADER_PULSE, NEC_UNIT / 2) &&
- !eq_margin(ev.duration, NECX_HEADER_PULSE, NEC_UNIT / 2))
+ if (eq_margin(ev.duration, NEC_HEADER_PULSE, NEC_UNIT / 2)) {
+ data->is_nec_x = false;
+ data->necx_repeat = false;
+ } else if (eq_margin(ev.duration, NECX_HEADER_PULSE, NEC_UNIT / 2))
+ data->is_nec_x = true;
+ else
break;
data->count = 0;
@@ -105,6 +110,17 @@ static int ir_nec_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (ev.pulse)
break;
+ if (data->necx_repeat && data->count == NECX_REPEAT_BITS &&
+ geq_margin(ev.duration,
+ NEC_TRAILER_SPACE, NEC_UNIT / 2)) {
+ IR_dprintk(1, "Repeat last key\n");
+ ir_repeat(input_dev);
+ data->state = STATE_INACTIVE;
+ return 0;
+
+ } else if (data->count > NECX_REPEAT_BITS)
+ data->necx_repeat = false;
+
data->bits <<= 1;
if (eq_margin(ev.duration, NEC_BIT_1_SPACE, NEC_UNIT / 2))
data->bits |= 1;
@@ -159,6 +175,9 @@ static int ir_nec_decode(struct input_dev *input_dev, struct ir_raw_event ev)
IR_dprintk(1, "NEC scancode 0x%04x\n", scancode);
}
+ if (data->is_nec_x)
+ data->necx_repeat = true;
+
ir_keydown(input_dev, scancode, 0);
data->state = STATE_INACTIVE;
return 0;
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c
index 6f192ef31db..43094e7eccf 100644
--- a/drivers/media/IR/ir-raw-event.c
+++ b/drivers/media/IR/ir-raw-event.c
@@ -12,9 +12,10 @@
* GNU General Public License for more details.
*/
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
#include <linux/sched.h>
+#include <linux/freezer.h>
#include "ir-core-priv.h"
/* Define the max number of pulse/space transitions to buffer */
@@ -24,7 +25,7 @@
static LIST_HEAD(ir_raw_client_list);
/* Used to handle IR raw handler extensions */
-static DEFINE_SPINLOCK(ir_raw_handler_lock);
+static DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
static u64 available_protocols;
@@ -33,20 +34,30 @@ static u64 available_protocols;
static struct work_struct wq_load;
#endif
-static void ir_raw_event_work(struct work_struct *work)
+static int ir_raw_event_thread(void *data)
{
struct ir_raw_event ev;
struct ir_raw_handler *handler;
- struct ir_raw_event_ctrl *raw =
- container_of(work, struct ir_raw_event_ctrl, rx_work);
-
- while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev)) {
- spin_lock(&ir_raw_handler_lock);
- list_for_each_entry(handler, &ir_raw_handler_list, list)
- handler->decode(raw->input_dev, ev);
- spin_unlock(&ir_raw_handler_lock);
- raw->prev_ev = ev;
+ struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
+
+ while (!kthread_should_stop()) {
+ try_to_freeze();
+
+ mutex_lock(&ir_raw_handler_lock);
+
+ while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev)) {
+ list_for_each_entry(handler, &ir_raw_handler_list, list)
+ handler->decode(raw->input_dev, ev);
+ raw->prev_ev = ev;
+ }
+
+ mutex_unlock(&ir_raw_handler_lock);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
}
+
+ return 0;
}
/**
@@ -66,6 +77,9 @@ int ir_raw_event_store(struct input_dev *input_dev, struct ir_raw_event *ev)
if (!ir->raw)
return -EINVAL;
+ IR_dprintk(2, "sample: (05%dus %s)\n",
+ TO_US(ev->duration), TO_STR(ev->pulse));
+
if (kfifo_in(&ir->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
return -ENOMEM;
@@ -126,6 +140,90 @@ int ir_raw_event_store_edge(struct input_dev *input_dev, enum raw_event_type typ
EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
/**
+ * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
+ * @input_dev: the struct input_dev device descriptor
+ * @type: the type of the event that has occurred
+ *
+ * This routine (which may be called from an interrupt context) works
+ * in similiar manner to ir_raw_event_store_edge.
+ * This routine is intended for devices with limited internal buffer
+ * It automerges samples of same type, and handles timeouts
+ */
+int ir_raw_event_store_with_filter(struct input_dev *input_dev,
+ struct ir_raw_event *ev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+ struct ir_raw_event_ctrl *raw = ir->raw;
+
+ if (!raw || !ir->props)
+ return -EINVAL;
+
+ /* Ignore spaces in idle mode */
+ if (ir->idle && !ev->pulse)
+ return 0;
+ else if (ir->idle)
+ ir_raw_event_set_idle(input_dev, 0);
+
+ if (!raw->this_ev.duration) {
+ raw->this_ev = *ev;
+ } else if (ev->pulse == raw->this_ev.pulse) {
+ raw->this_ev.duration += ev->duration;
+ } else {
+ ir_raw_event_store(input_dev, &raw->this_ev);
+ raw->this_ev = *ev;
+ }
+
+ /* Enter idle mode if nessesary */
+ if (!ev->pulse && ir->props->timeout &&
+ raw->this_ev.duration >= ir->props->timeout)
+ ir_raw_event_set_idle(input_dev, 1);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
+
+void ir_raw_event_set_idle(struct input_dev *input_dev, int idle)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+ struct ir_raw_event_ctrl *raw = ir->raw;
+ ktime_t now;
+ u64 delta;
+
+ if (!ir->props)
+ return;
+
+ if (!ir->raw)
+ goto out;
+
+ if (idle) {
+ IR_dprintk(2, "enter idle mode\n");
+ raw->last_event = ktime_get();
+ } else {
+ IR_dprintk(2, "exit idle mode\n");
+
+ now = ktime_get();
+ delta = ktime_to_ns(ktime_sub(now, ir->raw->last_event));
+
+ WARN_ON(raw->this_ev.pulse);
+
+ raw->this_ev.duration =
+ min(raw->this_ev.duration + delta,
+ (u64)IR_MAX_DURATION);
+
+ ir_raw_event_store(input_dev, &raw->this_ev);
+
+ if (raw->this_ev.duration == IR_MAX_DURATION)
+ ir_raw_event_reset(input_dev);
+
+ raw->this_ev.duration = 0;
+ }
+out:
+ if (ir->props->s_idle)
+ ir->props->s_idle(ir->props->priv, idle);
+ ir->idle = idle;
+}
+EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
+
+/**
* ir_raw_event_handle() - schedules the decoding of stored ir data
* @input_dev: the struct input_dev device descriptor
*
@@ -138,7 +236,7 @@ void ir_raw_event_handle(struct input_dev *input_dev)
if (!ir->raw)
return;
- schedule_work(&ir->raw->rx_work);
+ wake_up_process(ir->raw->thread);
}
EXPORT_SYMBOL_GPL(ir_raw_event_handle);
@@ -147,9 +245,9 @@ u64
ir_raw_get_allowed_protocols()
{
u64 protocols;
- spin_lock(&ir_raw_handler_lock);
+ mutex_lock(&ir_raw_handler_lock);
protocols = available_protocols;
- spin_unlock(&ir_raw_handler_lock);
+ mutex_unlock(&ir_raw_handler_lock);
return protocols;
}
@@ -167,7 +265,7 @@ int ir_raw_event_register(struct input_dev *input_dev)
return -ENOMEM;
ir->raw->input_dev = input_dev;
- INIT_WORK(&ir->raw->rx_work, ir_raw_event_work);
+
ir->raw->enabled_protocols = ~0;
rc = kfifo_alloc(&ir->raw->kfifo, sizeof(s64) * MAX_IR_EVENT_SIZE,
GFP_KERNEL);
@@ -177,12 +275,21 @@ int ir_raw_event_register(struct input_dev *input_dev)
return rc;
}
- spin_lock(&ir_raw_handler_lock);
+ ir->raw->thread = kthread_run(ir_raw_event_thread, ir->raw,
+ "rc%u", (unsigned int)ir->devno);
+
+ if (IS_ERR(ir->raw->thread)) {
+ kfree(ir->raw);
+ ir->raw = NULL;
+ return PTR_ERR(ir->raw->thread);
+ }
+
+ mutex_lock(&ir_raw_handler_lock);
list_add_tail(&ir->raw->list, &ir_raw_client_list);
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (handler->raw_register)
handler->raw_register(ir->raw->input_dev);
- spin_unlock(&ir_raw_handler_lock);
+ mutex_unlock(&ir_raw_handler_lock);
return 0;
}
@@ -195,14 +302,14 @@ void ir_raw_event_unregister(struct input_dev *input_dev)
if (!ir->raw)
return;
- cancel_work_sync(&ir->raw->rx_work);
+ kthread_stop(ir->raw->thread);
- spin_lock(&ir_raw_handler_lock);
+ mutex_lock(&ir_raw_handler_lock);
list_del(&ir->raw->list);
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (handler->raw_unregister)
handler->raw_unregister(ir->raw->input_dev);
- spin_unlock(&ir_raw_handler_lock);
+ mutex_unlock(&ir_raw_handler_lock);
kfifo_free(&ir->raw->kfifo);
kfree(ir->raw);
@@ -217,13 +324,13 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
{
struct ir_raw_event_ctrl *raw;
- spin_lock(&ir_raw_handler_lock);
+ mutex_lock(&ir_raw_handler_lock);
list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
if (ir_raw_handler->raw_register)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_register(raw->input_dev);
available_protocols |= ir_raw_handler->protocols;
- spin_unlock(&ir_raw_handler_lock);
+ mutex_unlock(&ir_raw_handler_lock);
return 0;
}
@@ -233,13 +340,13 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
{
struct ir_raw_event_ctrl *raw;
- spin_lock(&ir_raw_handler_lock);
+ mutex_lock(&ir_raw_handler_lock);
list_del(&ir_raw_handler->list);
if (ir_raw_handler->raw_unregister)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_unregister(raw->input_dev);
available_protocols &= ~ir_raw_handler->protocols;
- spin_unlock(&ir_raw_handler_lock);
+ mutex_unlock(&ir_raw_handler_lock);
}
EXPORT_SYMBOL(ir_raw_handler_unregister);
diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c
index 6273047e915..96dafc425c8 100644
--- a/drivers/media/IR/ir-sysfs.c
+++ b/drivers/media/IR/ir-sysfs.c
@@ -325,6 +325,7 @@ static int __init ir_core_init(void)
/* Initialize/load the decoders/keymap code that will be used */
ir_raw_init();
+ ir_rcmap_init();
return 0;
}
@@ -332,6 +333,7 @@ static int __init ir_core_init(void)
static void __exit ir_core_exit(void)
{
class_unregister(&ir_input_class);
+ ir_rcmap_cleanup();
}
module_init(ir_core_init);
diff --git a/drivers/media/IR/keymaps/Makefile b/drivers/media/IR/keymaps/Makefile
index cbee06243b5..950e5d953c6 100644
--- a/drivers/media/IR/keymaps/Makefile
+++ b/drivers/media/IR/keymaps/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-dm1105-nec.o \
rc-dntv-live-dvb-t.o \
rc-dntv-live-dvbt-pro.o \
- rc-empty.o \
rc-em-terratec.o \
rc-encore-enltv2.o \
rc-encore-enltv.o \
@@ -59,6 +58,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-purpletv.o \
rc-pv951.o \
rc-rc5-hauppauge-new.o \
+ rc-rc5-streamzap.o \
rc-rc5-tv.o \
rc-rc6-mce.o \
rc-real-audio-220-32-keys.o \
diff --git a/drivers/media/IR/keymaps/rc-empty.c b/drivers/media/IR/keymaps/rc-empty.c
deleted file mode 100644
index 3b338d84b47..00000000000
--- a/drivers/media/IR/keymaps/rc-empty.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/* empty.h - Keytable for empty Remote Controller
- *
- * keymap imported from ir-keymaps.c
- *
- * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <media/rc-map.h>
-
-/* empty keytable, can be used as placeholder for not-yet created keytables */
-
-static struct ir_scancode empty[] = {
- { 0x2a, KEY_COFFEE },
-};
-
-static struct rc_keymap empty_map = {
- .map = {
- .scan = empty,
- .size = ARRAY_SIZE(empty),
- .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
- .name = RC_MAP_EMPTY,
- }
-};
-
-static int __init init_rc_map_empty(void)
-{
- return ir_register_map(&empty_map);
-}
-
-static void __exit exit_rc_map_empty(void)
-{
- ir_unregister_map(&empty_map);
-}
-
-module_init(init_rc_map_empty)
-module_exit(exit_rc_map_empty)
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-rc5-streamzap.c b/drivers/media/IR/keymaps/rc-rc5-streamzap.c
new file mode 100644
index 00000000000..4c19c58b46d
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-rc5-streamzap.c
@@ -0,0 +1,81 @@
+/* rc-rc5-streamzap.c - Keytable for Streamzap PC Remote, for use
+ * with the Streamzap PC Remote IR Receiver.
+ *
+ * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode rc5_streamzap[] = {
+/*
+ * FIXME: The Streamzap remote isn't actually true RC-5, it has an extra
+ * bit in it, which presently throws the in-kernel RC-5 decoder for a loop.
+ * We either have to enhance the decoder to support it, add a new decoder,
+ * or just rely on lirc userspace decoding.
+ */
+ { 0x00, KEY_NUMERIC_0 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x0a, KEY_POWER },
+ { 0x0b, KEY_MUTE },
+ { 0x0c, KEY_CHANNELUP },
+ { 0x0d, KEY_VOLUMEUP },
+ { 0x0e, KEY_CHANNELDOWN },
+ { 0x0f, KEY_VOLUMEDOWN },
+ { 0x10, KEY_UP },
+ { 0x11, KEY_LEFT },
+ { 0x12, KEY_OK },
+ { 0x13, KEY_RIGHT },
+ { 0x14, KEY_DOWN },
+ { 0x15, KEY_MENU },
+ { 0x16, KEY_EXIT },
+ { 0x17, KEY_PLAY },
+ { 0x18, KEY_PAUSE },
+ { 0x19, KEY_STOP },
+ { 0x1a, KEY_BACK },
+ { 0x1b, KEY_FORWARD },
+ { 0x1c, KEY_RECORD },
+ { 0x1d, KEY_REWIND },
+ { 0x1e, KEY_FASTFORWARD },
+ { 0x20, KEY_RED },
+ { 0x21, KEY_GREEN },
+ { 0x22, KEY_YELLOW },
+ { 0x23, KEY_BLUE },
+
+};
+
+static struct rc_keymap rc5_streamzap_map = {
+ .map = {
+ .scan = rc5_streamzap,
+ .size = ARRAY_SIZE(rc5_streamzap),
+ .ir_type = IR_TYPE_RC5,
+ .name = RC_MAP_RC5_STREAMZAP,
+ }
+};
+
+static int __init init_rc_map_rc5_streamzap(void)
+{
+ return ir_register_map(&rc5_streamzap_map);
+}
+
+static void __exit exit_rc_map_rc5_streamzap(void)
+{
+ ir_unregister_map(&rc5_streamzap_map);
+}
+
+module_init(init_rc_map_rc5_streamzap)
+module_exit(exit_rc_map_rc5_streamzap)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-rc6-mce.c b/drivers/media/IR/keymaps/rc-rc6-mce.c
index c6726a8039b..64264f7f838 100644
--- a/drivers/media/IR/keymaps/rc-rc6-mce.c
+++ b/drivers/media/IR/keymaps/rc-rc6-mce.c
@@ -74,6 +74,8 @@ static struct ir_scancode rc6_mce[] = {
{ 0x800f045a, KEY_SUBTITLE }, /* Caption/Teletext */
{ 0x800f044d, KEY_TITLE },
+ { 0x800f044e, KEY_PRINT }, /* Print - HP OEM version of remote */
+
{ 0x800f040c, KEY_POWER },
{ 0x800f040d, KEY_PROG1 }, /* Windows MCE button */
diff --git a/drivers/media/IR/mceusb.c b/drivers/media/IR/mceusb.c
index 78bf7f77a1a..ac6bb2c01a4 100644
--- a/drivers/media/IR/mceusb.c
+++ b/drivers/media/IR/mceusb.c
@@ -228,7 +228,6 @@ static struct usb_device_id std_tx_mask_list[] = {
/* data structure for each usb transceiver */
struct mceusb_dev {
/* ir-core bits */
- struct ir_input_dev *irdev;
struct ir_dev_props *props;
struct ir_raw_event rawir;
@@ -428,7 +427,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
}
}
-static void usb_async_callback(struct urb *urb, struct pt_regs *regs)
+static void mce_async_callback(struct urb *urb, struct pt_regs *regs)
{
struct mceusb_dev *ir;
int len;
@@ -477,7 +476,7 @@ static void mce_request_packet(struct mceusb_dev *ir,
/* outbound data */
usb_fill_int_urb(async_urb, ir->usbdev,
usb_sndintpipe(ir->usbdev, ep->bEndpointAddress),
- async_buf, size, (usb_complete_t) usb_async_callback,
+ async_buf, size, (usb_complete_t)mce_async_callback,
ir, ep->bInterval);
memcpy(async_buf, data, size);
@@ -739,7 +738,7 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
if (ir->send_flags == RECV_FLAG_IN_PROGRESS) {
ir->send_flags = SEND_FLAG_COMPLETE;
- dev_dbg(&ir->irdev->dev, "setup answer received %d bytes\n",
+ dev_dbg(ir->dev, "setup answer received %d bytes\n",
buf_len);
}
@@ -861,7 +860,6 @@ static struct input_dev *mceusb_init_input_dev(struct mceusb_dev *ir)
{
struct input_dev *idev;
struct ir_dev_props *props;
- struct ir_input_dev *irdev;
struct device *dev = ir->dev;
int ret = -ENODEV;
@@ -878,12 +876,6 @@ static struct input_dev *mceusb_init_input_dev(struct mceusb_dev *ir)
goto props_alloc_failed;
}
- irdev = kzalloc(sizeof(struct ir_input_dev), GFP_KERNEL);
- if (!irdev) {
- dev_err(dev, "remote ir input dev allocation failed\n");
- goto ir_dev_alloc_failed;
- }
-
snprintf(ir->name, sizeof(ir->name), "Media Center Ed. eHome "
"Infrared Remote Transceiver (%04x:%04x)",
le16_to_cpu(ir->usbdev->descriptor.idVendor),
@@ -902,9 +894,6 @@ static struct input_dev *mceusb_init_input_dev(struct mceusb_dev *ir)
props->tx_ir = mceusb_tx_ir;
ir->props = props;
- ir->irdev = irdev;
-
- input_set_drvdata(idev, irdev);
ret = ir_input_register(idev, RC_MAP_RC6_MCE, props, DRIVER_NAME);
if (ret < 0) {
@@ -915,8 +904,6 @@ static struct input_dev *mceusb_init_input_dev(struct mceusb_dev *ir)
return idev;
irdev_failed:
- kfree(irdev);
-ir_dev_alloc_failed:
kfree(props);
props_alloc_failed:
input_free_device(idev);
@@ -932,7 +919,6 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
struct usb_endpoint_descriptor *ep = NULL;
struct usb_endpoint_descriptor *ep_in = NULL;
struct usb_endpoint_descriptor *ep_out = NULL;
- struct usb_host_config *config;
struct mceusb_dev *ir = NULL;
int pipe, maxp, i;
char buf[63], name[128] = "";
@@ -942,7 +928,6 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, ": %s called\n", __func__);
- config = dev->actconfig;
idesc = intf->cur_altsetting;
is_gen3 = usb_match_id(intf, gen3_list) ? 1 : 0;
diff --git a/drivers/media/IR/rc-map.c b/drivers/media/IR/rc-map.c
index 46a8f1524b5..689143f2fff 100644
--- a/drivers/media/IR/rc-map.c
+++ b/drivers/media/IR/rc-map.c
@@ -82,3 +82,26 @@ void ir_unregister_map(struct rc_keymap *map)
}
EXPORT_SYMBOL_GPL(ir_unregister_map);
+
+static struct ir_scancode empty[] = {
+ { 0x2a, KEY_COFFEE },
+};
+
+static struct rc_keymap empty_map = {
+ .map = {
+ .scan = empty,
+ .size = ARRAY_SIZE(empty),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_EMPTY,
+ }
+};
+
+int ir_rcmap_init(void)
+{
+ return ir_register_map(&empty_map);
+}
+
+void ir_rcmap_cleanup(void)
+{
+ ir_unregister_map(&empty_map);
+}
diff --git a/drivers/media/IR/streamzap.c b/drivers/media/IR/streamzap.c
new file mode 100644
index 00000000000..058e29fd478
--- /dev/null
+++ b/drivers/media/IR/streamzap.c
@@ -0,0 +1,741 @@
+/*
+ * Streamzap Remote Control driver
+ *
+ * Copyright (c) 2005 Christoph Bartelmus <lirc@bartelmus.de>
+ * Copyright (c) 2010 Jarod Wilson <jarod@wilsonet.com>
+ *
+ * This driver was based on the work of Greg Wickham and Adrian
+ * Dewhurst. It was substantially rewritten to support correct signal
+ * gaps and now maintains a delay buffer, which is used to present
+ * consistent timing behaviour to user space applications. Without the
+ * delay buffer an ugly hack would be required in lircd, which can
+ * cause sluggish signal decoding in certain situations.
+ *
+ * Ported to in-kernel ir-core interface by Jarod Wilson
+ *
+ * This driver is based on the USB skeleton driver packaged with the
+ * kernel; copyright (C) 2001-2003 Greg Kroah-Hartman (greg@kroah.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/input.h>
+#include <media/ir-core.h>
+
+#define DRIVER_VERSION "1.60"
+#define DRIVER_NAME "streamzap"
+#define DRIVER_DESC "Streamzap Remote Control driver"
+
+#ifdef CONFIG_USB_DEBUG
+static int debug = 1;
+#else
+static int debug;
+#endif
+
+#define USB_STREAMZAP_VENDOR_ID 0x0e9c
+#define USB_STREAMZAP_PRODUCT_ID 0x0000
+
+/* table of devices that work with this driver */
+static struct usb_device_id streamzap_table[] = {
+ /* Streamzap Remote Control */
+ { USB_DEVICE(USB_STREAMZAP_VENDOR_ID, USB_STREAMZAP_PRODUCT_ID) },
+ /* Terminating entry */
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, streamzap_table);
+
+#define STREAMZAP_PULSE_MASK 0xf0
+#define STREAMZAP_SPACE_MASK 0x0f
+#define STREAMZAP_TIMEOUT 0xff
+#define STREAMZAP_RESOLUTION 256
+
+/* number of samples buffered */
+#define SZ_BUF_LEN 128
+
+enum StreamzapDecoderState {
+ PulseSpace,
+ FullPulse,
+ FullSpace,
+ IgnorePulse
+};
+
+/* structure to hold our device specific stuff */
+struct streamzap_ir {
+
+ /* ir-core */
+ struct ir_dev_props *props;
+ struct ir_raw_event rawir;
+
+ /* core device info */
+ struct device *dev;
+ struct input_dev *idev;
+
+ /* usb */
+ struct usb_device *usbdev;
+ struct usb_interface *interface;
+ struct usb_endpoint_descriptor *endpoint;
+ struct urb *urb_in;
+
+ /* buffer & dma */
+ unsigned char *buf_in;
+ dma_addr_t dma_in;
+ unsigned int buf_in_len;
+
+ /* timer used to support delay buffering */
+ struct timer_list delay_timer;
+ bool timer_running;
+ spinlock_t timer_lock;
+ struct timer_list flush_timer;
+ bool flush;
+
+ /* delay buffer */
+ struct kfifo fifo;
+ bool fifo_initialized;
+
+ /* track what state we're in */
+ enum StreamzapDecoderState decoder_state;
+ /* tracks whether we are currently receiving some signal */
+ bool idle;
+ /* sum of signal lengths received since signal start */
+ unsigned long sum;
+ /* start time of signal; necessary for gap tracking */
+ struct timeval signal_last;
+ struct timeval signal_start;
+ /* bool timeout_enabled; */
+
+ char name[128];
+ char phys[64];
+};
+
+
+/* local function prototypes */
+static int streamzap_probe(struct usb_interface *interface,
+ const struct usb_device_id *id);
+static void streamzap_disconnect(struct usb_interface *interface);
+static void streamzap_callback(struct urb *urb);
+static int streamzap_suspend(struct usb_interface *intf, pm_message_t message);
+static int streamzap_resume(struct usb_interface *intf);
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver streamzap_driver = {
+ .name = DRIVER_NAME,
+ .probe = streamzap_probe,
+ .disconnect = streamzap_disconnect,
+ .suspend = streamzap_suspend,
+ .resume = streamzap_resume,
+ .id_table = streamzap_table,
+};
+
+static void streamzap_stop_timer(struct streamzap_ir *sz)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sz->timer_lock, flags);
+ if (sz->timer_running) {
+ sz->timer_running = false;
+ spin_unlock_irqrestore(&sz->timer_lock, flags);
+ del_timer_sync(&sz->delay_timer);
+ } else {
+ spin_unlock_irqrestore(&sz->timer_lock, flags);
+ }
+}
+
+static void streamzap_flush_timeout(unsigned long arg)
+{
+ struct streamzap_ir *sz = (struct streamzap_ir *)arg;
+
+ dev_info(sz->dev, "%s: callback firing\n", __func__);
+
+ /* finally start accepting data */
+ sz->flush = false;
+}
+
+static void streamzap_delay_timeout(unsigned long arg)
+{
+ struct streamzap_ir *sz = (struct streamzap_ir *)arg;
+ struct ir_raw_event rawir = { .pulse = false, .duration = 0 };
+ unsigned long flags;
+ int len, ret;
+ static unsigned long delay;
+ bool wake = false;
+
+ /* deliver data every 10 ms */
+ delay = msecs_to_jiffies(10);
+
+ spin_lock_irqsave(&sz->timer_lock, flags);
+
+ if (kfifo_len(&sz->fifo) > 0) {
+ ret = kfifo_out(&sz->fifo, &rawir, sizeof(rawir));
+ if (ret != sizeof(rawir))
+ dev_err(sz->dev, "Problem w/kfifo_out...\n");
+ ir_raw_event_store(sz->idev, &rawir);
+ wake = true;
+ }
+
+ len = kfifo_len(&sz->fifo);
+ if (len > 0) {
+ while ((len < SZ_BUF_LEN / 2) &&
+ (len < SZ_BUF_LEN * sizeof(int))) {
+ ret = kfifo_out(&sz->fifo, &rawir, sizeof(rawir));
+ if (ret != sizeof(rawir))
+ dev_err(sz->dev, "Problem w/kfifo_out...\n");
+ ir_raw_event_store(sz->idev, &rawir);
+ wake = true;
+ len = kfifo_len(&sz->fifo);
+ }
+ if (sz->timer_running)
+ mod_timer(&sz->delay_timer, jiffies + delay);
+
+ } else {
+ sz->timer_running = false;
+ }
+
+ if (wake)
+ ir_raw_event_handle(sz->idev);
+
+ spin_unlock_irqrestore(&sz->timer_lock, flags);
+}
+
+static void streamzap_flush_delay_buffer(struct streamzap_ir *sz)
+{
+ struct ir_raw_event rawir = { .pulse = false, .duration = 0 };
+ bool wake = false;
+ int ret;
+
+ while (kfifo_len(&sz->fifo) > 0) {
+ ret = kfifo_out(&sz->fifo, &rawir, sizeof(rawir));
+ if (ret != sizeof(rawir))
+ dev_err(sz->dev, "Problem w/kfifo_out...\n");
+ ir_raw_event_store(sz->idev, &rawir);
+ wake = true;
+ }
+
+ if (wake)
+ ir_raw_event_handle(sz->idev);
+}
+
+static void sz_push(struct streamzap_ir *sz)
+{
+ struct ir_raw_event rawir = { .pulse = false, .duration = 0 };
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&sz->timer_lock, flags);
+ if (kfifo_len(&sz->fifo) >= sizeof(int) * SZ_BUF_LEN) {
+ ret = kfifo_out(&sz->fifo, &rawir, sizeof(rawir));
+ if (ret != sizeof(rawir))
+ dev_err(sz->dev, "Problem w/kfifo_out...\n");
+ ir_raw_event_store(sz->idev, &rawir);
+ }
+
+ kfifo_in(&sz->fifo, &sz->rawir, sizeof(rawir));
+
+ if (!sz->timer_running) {
+ sz->delay_timer.expires = jiffies + (HZ / 10);
+ add_timer(&sz->delay_timer);
+ sz->timer_running = true;
+ }
+
+ spin_unlock_irqrestore(&sz->timer_lock, flags);
+}
+
+static void sz_push_full_pulse(struct streamzap_ir *sz,
+ unsigned char value)
+{
+ if (sz->idle) {
+ long deltv;
+
+ sz->signal_last = sz->signal_start;
+ do_gettimeofday(&sz->signal_start);
+
+ deltv = sz->signal_start.tv_sec - sz->signal_last.tv_sec;
+ sz->rawir.pulse = false;
+ if (deltv > 15) {
+ /* really long time */
+ sz->rawir.duration = IR_MAX_DURATION;
+ } else {
+ sz->rawir.duration = (int)(deltv * 1000000 +
+ sz->signal_start.tv_usec -
+ sz->signal_last.tv_usec);
+ sz->rawir.duration -= sz->sum;
+ sz->rawir.duration *= 1000;
+ sz->rawir.duration &= IR_MAX_DURATION;
+ }
+ dev_dbg(sz->dev, "ls %u\n", sz->rawir.duration);
+ sz_push(sz);
+
+ sz->idle = 0;
+ sz->sum = 0;
+ }
+
+ sz->rawir.pulse = true;
+ sz->rawir.duration = ((int) value) * STREAMZAP_RESOLUTION;
+ sz->rawir.duration += STREAMZAP_RESOLUTION / 2;
+ sz->sum += sz->rawir.duration;
+ sz->rawir.duration *= 1000;
+ sz->rawir.duration &= IR_MAX_DURATION;
+ dev_dbg(sz->dev, "p %u\n", sz->rawir.duration);
+ sz_push(sz);
+}
+
+static void sz_push_half_pulse(struct streamzap_ir *sz,
+ unsigned char value)
+{
+ sz_push_full_pulse(sz, (value & STREAMZAP_PULSE_MASK) >> 4);
+}
+
+static void sz_push_full_space(struct streamzap_ir *sz,
+ unsigned char value)
+{
+ sz->rawir.pulse = false;
+ sz->rawir.duration = ((int) value) * STREAMZAP_RESOLUTION;
+ sz->rawir.duration += STREAMZAP_RESOLUTION / 2;
+ sz->sum += sz->rawir.duration;
+ sz->rawir.duration *= 1000;
+ dev_dbg(sz->dev, "s %u\n", sz->rawir.duration);
+ sz_push(sz);
+}
+
+static void sz_push_half_space(struct streamzap_ir *sz,
+ unsigned long value)
+{
+ sz_push_full_space(sz, value & STREAMZAP_SPACE_MASK);
+}
+
+/**
+ * streamzap_callback - usb IRQ handler callback
+ *
+ * This procedure is invoked on reception of data from
+ * the usb remote.
+ */
+static void streamzap_callback(struct urb *urb)
+{
+ struct streamzap_ir *sz;
+ unsigned int i;
+ int len;
+ #if 0
+ static int timeout = (((STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION) &
+ IR_MAX_DURATION) | 0x03000000);
+ #endif
+
+ if (!urb)
+ return;
+
+ sz = urb->context;
+ len = urb->actual_length;
+
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /*
+ * this urb is terminated, clean up.
+ * sz might already be invalid at this point
+ */
+ dev_err(sz->dev, "urb terminated, status: %d\n", urb->status);
+ return;
+ default:
+ break;
+ }
+
+ dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len);
+ if (!sz->flush) {
+ for (i = 0; i < urb->actual_length; i++) {
+ dev_dbg(sz->dev, "%d: %x\n", i,
+ (unsigned char)sz->buf_in[i]);
+ switch (sz->decoder_state) {
+ case PulseSpace:
+ if ((sz->buf_in[i] & STREAMZAP_PULSE_MASK) ==
+ STREAMZAP_PULSE_MASK) {
+ sz->decoder_state = FullPulse;
+ continue;
+ } else if ((sz->buf_in[i] & STREAMZAP_SPACE_MASK)
+ == STREAMZAP_SPACE_MASK) {
+ sz_push_half_pulse(sz, sz->buf_in[i]);
+ sz->decoder_state = FullSpace;
+ continue;
+ } else {
+ sz_push_half_pulse(sz, sz->buf_in[i]);
+ sz_push_half_space(sz, sz->buf_in[i]);
+ }
+ break;
+ case FullPulse:
+ sz_push_full_pulse(sz, sz->buf_in[i]);
+ sz->decoder_state = IgnorePulse;
+ break;
+ case FullSpace:
+ if (sz->buf_in[i] == STREAMZAP_TIMEOUT) {
+ sz->idle = 1;
+ streamzap_stop_timer(sz);
+ #if 0
+ if (sz->timeout_enabled) {
+ sz->rawir.pulse = false;
+ sz->rawir.duration = timeout;
+ sz->rawir.duration *= 1000;
+ sz_push(sz);
+ }
+ #endif
+ streamzap_flush_delay_buffer(sz);
+ } else
+ sz_push_full_space(sz, sz->buf_in[i]);
+ sz->decoder_state = PulseSpace;
+ break;
+ case IgnorePulse:
+ if ((sz->buf_in[i]&STREAMZAP_SPACE_MASK) ==
+ STREAMZAP_SPACE_MASK) {
+ sz->decoder_state = FullSpace;
+ continue;
+ }
+ sz_push_half_space(sz, sz->buf_in[i]);
+ sz->decoder_state = PulseSpace;
+ break;
+ }
+ }
+ }
+
+ usb_submit_urb(urb, GFP_ATOMIC);
+
+ return;
+}
+
+static struct input_dev *streamzap_init_input_dev(struct streamzap_ir *sz)
+{
+ struct input_dev *idev;
+ struct ir_dev_props *props;
+ struct device *dev = sz->dev;
+ int ret;
+
+ idev = input_allocate_device();
+ if (!idev) {
+ dev_err(dev, "remote input dev allocation failed\n");
+ goto idev_alloc_failed;
+ }
+
+ props = kzalloc(sizeof(struct ir_dev_props), GFP_KERNEL);
+ if (!props) {
+ dev_err(dev, "remote ir dev props allocation failed\n");
+ goto props_alloc_failed;
+ }
+
+ snprintf(sz->name, sizeof(sz->name), "Streamzap PC Remote Infrared "
+ "Receiver (%04x:%04x)",
+ le16_to_cpu(sz->usbdev->descriptor.idVendor),
+ le16_to_cpu(sz->usbdev->descriptor.idProduct));
+
+ idev->name = sz->name;
+ usb_make_path(sz->usbdev, sz->phys, sizeof(sz->phys));
+ strlcat(sz->phys, "/input0", sizeof(sz->phys));
+ idev->phys = sz->phys;
+
+ props->priv = sz;
+ props->driver_type = RC_DRIVER_IR_RAW;
+ /* FIXME: not sure about supported protocols, check on this */
+ props->allowed_protos = IR_TYPE_RC5 | IR_TYPE_RC6;
+
+ sz->props = props;
+
+ ret = ir_input_register(idev, RC_MAP_RC5_STREAMZAP, props, DRIVER_NAME);
+ if (ret < 0) {
+ dev_err(dev, "remote input device register failed\n");
+ goto irdev_failed;
+ }
+
+ return idev;
+
+irdev_failed:
+ kfree(props);
+props_alloc_failed:
+ input_free_device(idev);
+idev_alloc_failed:
+ return NULL;
+}
+
+static int streamzap_delay_buf_init(struct streamzap_ir *sz)
+{
+ int ret;
+
+ ret = kfifo_alloc(&sz->fifo, sizeof(int) * SZ_BUF_LEN,
+ GFP_KERNEL);
+ if (ret == 0)
+ sz->fifo_initialized = 1;
+
+ return ret;
+}
+
+static void streamzap_start_flush_timer(struct streamzap_ir *sz)
+{
+ sz->flush_timer.expires = jiffies + HZ;
+ sz->flush = true;
+ add_timer(&sz->flush_timer);
+
+ sz->urb_in->dev = sz->usbdev;
+ if (usb_submit_urb(sz->urb_in, GFP_ATOMIC))
+ dev_err(sz->dev, "urb submit failed\n");
+}
+
+/**
+ * streamzap_probe
+ *
+ * Called by usb-core to associated with a candidate device
+ * On any failure the return value is the ERROR
+ * On success return 0
+ */
+static int __devinit streamzap_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct usb_host_interface *iface_host;
+ struct streamzap_ir *sz = NULL;
+ char buf[63], name[128] = "";
+ int retval = -ENOMEM;
+ int pipe, maxp;
+
+ /* Allocate space for device driver specific data */
+ sz = kzalloc(sizeof(struct streamzap_ir), GFP_KERNEL);
+ if (!sz)
+ return -ENOMEM;
+
+ sz->usbdev = usbdev;
+ sz->interface = intf;
+
+ /* Check to ensure endpoint information matches requirements */
+ iface_host = intf->cur_altsetting;
+
+ if (iface_host->desc.bNumEndpoints != 1) {
+ dev_err(&intf->dev, "%s: Unexpected desc.bNumEndpoints (%d)\n",
+ __func__, iface_host->desc.bNumEndpoints);
+ retval = -ENODEV;
+ goto free_sz;
+ }
+
+ sz->endpoint = &(iface_host->endpoint[0].desc);
+ if ((sz->endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ != USB_DIR_IN) {
+ dev_err(&intf->dev, "%s: endpoint doesn't match input device "
+ "02%02x\n", __func__, sz->endpoint->bEndpointAddress);
+ retval = -ENODEV;
+ goto free_sz;
+ }
+
+ if ((sz->endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ != USB_ENDPOINT_XFER_INT) {
+ dev_err(&intf->dev, "%s: endpoint attributes don't match xfer "
+ "02%02x\n", __func__, sz->endpoint->bmAttributes);
+ retval = -ENODEV;
+ goto free_sz;
+ }
+
+ pipe = usb_rcvintpipe(usbdev, sz->endpoint->bEndpointAddress);
+ maxp = usb_maxpacket(usbdev, pipe, usb_pipeout(pipe));
+
+ if (maxp == 0) {
+ dev_err(&intf->dev, "%s: endpoint Max Packet Size is 0!?!\n",
+ __func__);
+ retval = -ENODEV;
+ goto free_sz;
+ }
+
+ /* Allocate the USB buffer and IRQ URB */
+ sz->buf_in = usb_alloc_coherent(usbdev, maxp, GFP_ATOMIC, &sz->dma_in);
+ if (!sz->buf_in)
+ goto free_sz;
+
+ sz->urb_in = usb_alloc_urb(0, GFP_KERNEL);
+ if (!sz->urb_in)
+ goto free_buf_in;
+
+ sz->dev = &intf->dev;
+ sz->buf_in_len = maxp;
+
+ if (usbdev->descriptor.iManufacturer
+ && usb_string(usbdev, usbdev->descriptor.iManufacturer,
+ buf, sizeof(buf)) > 0)
+ strlcpy(name, buf, sizeof(name));
+
+ if (usbdev->descriptor.iProduct
+ && usb_string(usbdev, usbdev->descriptor.iProduct,
+ buf, sizeof(buf)) > 0)
+ snprintf(name + strlen(name), sizeof(name) - strlen(name),
+ " %s", buf);
+
+ retval = streamzap_delay_buf_init(sz);
+ if (retval) {
+ dev_err(&intf->dev, "%s: delay buffer init failed\n", __func__);
+ goto free_urb_in;
+ }
+
+ sz->idev = streamzap_init_input_dev(sz);
+ if (!sz->idev)
+ goto input_dev_fail;
+
+ sz->idle = true;
+ sz->decoder_state = PulseSpace;
+ #if 0
+ /* not yet supported, depends on patches from maxim */
+ /* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
+ sz->timeout_enabled = false;
+ sz->min_timeout = STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION * 1000;
+ sz->max_timeout = STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION * 1000;
+ #endif
+
+ init_timer(&sz->delay_timer);
+ sz->delay_timer.function = streamzap_delay_timeout;
+ sz->delay_timer.data = (unsigned long)sz;
+ spin_lock_init(&sz->timer_lock);
+
+ init_timer(&sz->flush_timer);
+ sz->flush_timer.function = streamzap_flush_timeout;
+ sz->flush_timer.data = (unsigned long)sz;
+
+ do_gettimeofday(&sz->signal_start);
+
+ /* Complete final initialisations */
+ usb_fill_int_urb(sz->urb_in, usbdev, pipe, sz->buf_in,
+ maxp, (usb_complete_t)streamzap_callback,
+ sz, sz->endpoint->bInterval);
+ sz->urb_in->transfer_dma = sz->dma_in;
+ sz->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_set_intfdata(intf, sz);
+
+ streamzap_start_flush_timer(sz);
+
+ dev_info(sz->dev, "Registered %s on usb%d:%d\n", name,
+ usbdev->bus->busnum, usbdev->devnum);
+
+ return 0;
+
+input_dev_fail:
+ kfifo_free(&sz->fifo);
+free_urb_in:
+ usb_free_urb(sz->urb_in);
+free_buf_in:
+ usb_free_coherent(usbdev, maxp, sz->buf_in, sz->dma_in);
+free_sz:
+ kfree(sz);
+
+ return retval;
+}
+
+/**
+ * streamzap_disconnect
+ *
+ * Called by the usb core when the device is removed from the system.
+ *
+ * This routine guarantees that the driver will not submit any more urbs
+ * by clearing dev->usbdev. It is also supposed to terminate any currently
+ * active urbs. Unfortunately, usb_bulk_msg(), used in streamzap_read(),
+ * does not provide any way to do this.
+ */
+static void streamzap_disconnect(struct usb_interface *interface)
+{
+ struct streamzap_ir *sz = usb_get_intfdata(interface);
+ struct usb_device *usbdev = interface_to_usbdev(interface);
+
+ usb_set_intfdata(interface, NULL);
+
+ if (!sz)
+ return;
+
+ if (sz->flush) {
+ sz->flush = false;
+ del_timer_sync(&sz->flush_timer);
+ }
+
+ streamzap_stop_timer(sz);
+
+ sz->usbdev = NULL;
+ ir_input_unregister(sz->idev);
+ usb_kill_urb(sz->urb_in);
+ usb_free_urb(sz->urb_in);
+ usb_free_coherent(usbdev, sz->buf_in_len, sz->buf_in, sz->dma_in);
+
+ kfree(sz);
+}
+
+static int streamzap_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct streamzap_ir *sz = usb_get_intfdata(intf);
+
+ if (sz->flush) {
+ sz->flush = false;
+ del_timer_sync(&sz->flush_timer);
+ }
+
+ streamzap_stop_timer(sz);
+
+ usb_kill_urb(sz->urb_in);
+
+ return 0;
+}
+
+static int streamzap_resume(struct usb_interface *intf)
+{
+ struct streamzap_ir *sz = usb_get_intfdata(intf);
+
+ if (sz->fifo_initialized)
+ kfifo_reset(&sz->fifo);
+
+ sz->flush_timer.expires = jiffies + HZ;
+ sz->flush = true;
+ add_timer(&sz->flush_timer);
+
+ if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) {
+ dev_err(sz->dev, "Error sumbiting urb\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * streamzap_init
+ */
+static int __init streamzap_init(void)
+{
+ int ret;
+
+ /* register this driver with the USB subsystem */
+ ret = usb_register(&streamzap_driver);
+ if (ret < 0)
+ printk(KERN_ERR DRIVER_NAME ": usb register failed, "
+ "result = %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * streamzap_exit
+ */
+static void __exit streamzap_exit(void)
+{
+ usb_deregister(&streamzap_driver);
+}
+
+
+module_init(streamzap_init);
+module_exit(streamzap_exit);
+
+MODULE_AUTHOR("Jarod Wilson <jarod@wilsonet.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable debugging messages");
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 409a4261e5b..b3ed5daaacf 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -34,7 +34,7 @@ config MEDIA_TUNER
menuconfig MEDIA_TUNER_CUSTOMISE
bool "Customize analog and hybrid tuner modules to build"
depends on MEDIA_TUNER
- default n
+ default y if EMBEDDED
help
This allows the user to deselect tuner drivers unnecessary
for their hardware from the build. Use this option with care
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index 248a2a9d841..caa4e18ed1c 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -1763,7 +1763,15 @@ static struct dvb_frontend_ops dst_dvbt_ops = {
.frequency_min = 137000000,
.frequency_max = 858000000,
.frequency_stepsize = 166667,
- .caps = FE_CAN_FEC_AUTO | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO
+ .caps = FE_CAN_FEC_AUTO |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_32 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 |
+ FE_CAN_QAM_256 |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO
},
.release = dst_release,
diff --git a/drivers/media/dvb/dm1105/Kconfig b/drivers/media/dvb/dm1105/Kconfig
index 695239227cb..a6ceb08f118 100644
--- a/drivers/media/dvb/dm1105/Kconfig
+++ b/drivers/media/dvb/dm1105/Kconfig
@@ -9,7 +9,7 @@ config DVB_DM1105
select DVB_CX24116 if !DVB_FE_CUSTOMISE
select DVB_SI21XX if !DVB_FE_CUSTOMISE
select DVB_DS3000 if !DVB_FE_CUSTOMISE
- select VIDEO_IR
+ depends on VIDEO_IR
help
Support for cards based on the SDMC DM1105 PCI chip like
DvbWorld 2002
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 553b48ac191..fdc19bba212 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -1,6 +1,6 @@
config DVB_USB
tristate "Support for various USB DVB devices"
- depends on DVB_CORE && USB && I2C && INPUT
+ depends on DVB_CORE && USB && I2C && IR_CORE
help
By enabling this you will be able to choose the various supported
USB1.1 and USB2.0 DVB devices.
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c
index 75afe4f81e3..7424b0493f9 100644
--- a/drivers/media/dvb/firewire/firedtv-fw.c
+++ b/drivers/media/dvb/firewire/firedtv-fw.c
@@ -194,8 +194,8 @@ static const struct firedtv_backend backend = {
static void handle_fcp(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
- int speed, unsigned long long offset,
- void *payload, size_t length, void *callback_data)
+ unsigned long long offset, void *payload, size_t length,
+ void *callback_data)
{
struct firedtv *f, *fdtv = NULL;
struct fw_device *device;
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 51d578a758a..b5f6a04f9c1 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -1,7 +1,7 @@
config DVB_FE_CUSTOMISE
bool "Customise the frontend modules to build"
depends on DVB_CORE
- default N
+ default y if EMBEDDED
help
This allows the user to select/deselect frontend drivers for their
hardware from the build.
diff --git a/drivers/media/dvb/mantis/Kconfig b/drivers/media/dvb/mantis/Kconfig
index decdeda840d..fd0830ed10d 100644
--- a/drivers/media/dvb/mantis/Kconfig
+++ b/drivers/media/dvb/mantis/Kconfig
@@ -1,6 +1,6 @@
config MANTIS_CORE
tristate "Mantis/Hopper PCI bridge based devices"
- depends on PCI && I2C && INPUT
+ depends on PCI && I2C && INPUT && IR_CORE
help
Support for PCI cards based on the Mantis and Hopper PCi bridge.
diff --git a/drivers/media/dvb/siano/Kconfig b/drivers/media/dvb/siano/Kconfig
index 85a222c4eaa..e520bceee0a 100644
--- a/drivers/media/dvb/siano/Kconfig
+++ b/drivers/media/dvb/siano/Kconfig
@@ -4,7 +4,7 @@
config SMS_SIANO_MDTV
tristate "Siano SMS1xxx based MDTV receiver"
- depends on DVB_CORE && INPUT && HAS_DMA
+ depends on DVB_CORE && IR_CORE && HAS_DMA
---help---
Choose Y or M here if you have MDTV receiver with a Siano chipset.
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index 7f2c94a15ab..d93468cd3a8 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1113,9 +1113,11 @@ struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
*/
prepare_to_wait(&coredev->buffer_mng_waitq, &wait, TASK_INTERRUPTIBLE);
-
- if (list_empty(&coredev->buffers))
+ if (list_empty(&coredev->buffers)) {
+ spin_unlock_irqrestore(&coredev->bufferslock, flags);
schedule();
+ spin_lock_irqsave(&coredev->bufferslock, flags);
+ }
finish_wait(&coredev->buffer_mng_waitq, &wait);
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index 32a7ec65ec4..debea8d1d31 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -98,7 +98,7 @@ config DVB_BUDGET_CI
select DVB_LNBP21 if !DVB_FE_CUSTOMISE
select DVB_TDA10023 if !DVB_FE_CUSTOMISE
select MEDIA_TUNER_TDA827X if !MEDIA_TUNER_CUSTOMISE
- select VIDEO_IR
+ depends on VIDEO_IR
help
Support for simple SAA7146 based DVB cards
(so called Budget- or Nova-PCI cards) without onboard
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 2e15903b976..f6e4d047535 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -83,7 +83,7 @@ config VIDEO_FIXED_MINOR_RANGES
config VIDEO_HELPER_CHIPS_AUTO
bool "Autoselect pertinent encoders/decoders and other helper chips"
- default y
+ default y if !EMBEDDED
---help---
Most video cards may require additional modules to encode or
decode audio/video standards. This option will autoselect
@@ -792,10 +792,11 @@ config SOC_CAMERA_MT9M001
and colour models.
config SOC_CAMERA_MT9M111
- tristate "mt9m111 and mt9m112 support"
+ tristate "mt9m111, mt9m112 and mt9m131 support"
depends on SOC_CAMERA && I2C
help
- This driver supports MT9M111 and MT9M112 cameras from Micron
+ This driver supports MT9M111, MT9M112 and MT9M131 cameras from
+ Micron/Aptina
config SOC_CAMERA_MT9T031
tristate "mt9t031 support"
@@ -1016,4 +1017,13 @@ config VIDEO_MEM2MEM_TESTDEV
This is a virtual test device for the memory-to-memory driver
framework.
+config VIDEO_SAMSUNG_S5P_FIMC
+ tristate "Samsung S5P FIMC (video postprocessor) driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P
+ select VIDEOBUF_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a v4l2 driver for the S5P camera interface
+ (video postprocessor)
+
endif # V4L_MEM2MEM_DRIVERS
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 1051ecc602e..40f98fba5f8 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -11,7 +11,7 @@ stkwebcam-objs := stk-webcam.o stk-sensor.o
omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
- v4l2-event.o
+ v4l2-event.o v4l2-ctrls.o
# V4L2 core modules
@@ -163,6 +163,7 @@ obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o
obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) += s5p-fimc/
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
diff --git a/drivers/media/video/bt8xx/Kconfig b/drivers/media/video/bt8xx/Kconfig
index 3077c45015f..1a4a89fdf76 100644
--- a/drivers/media/video/bt8xx/Kconfig
+++ b/drivers/media/video/bt8xx/Kconfig
@@ -4,7 +4,7 @@ config VIDEO_BT848
select I2C_ALGOBIT
select VIDEO_BTCX
select VIDEOBUF_DMA_SG
- select VIDEO_IR
+ depends on VIDEO_IR
select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_MSP3400 if VIDEO_HELPER_CHIPS_AUTO
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index 407fa61e4cd..685d6597ee7 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -411,7 +411,7 @@ void __devinit init_bttv_i2c_ir(struct bttv *btv)
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
- i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list);
+ i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list, NULL);
}
}
diff --git a/drivers/media/video/cs53l32a.c b/drivers/media/video/cs53l32a.c
index 3cc135a98d8..cc9e84d75ea 100644
--- a/drivers/media/video/cs53l32a.c
+++ b/drivers/media/video/cs53l32a.c
@@ -26,10 +26,10 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("i2c device driver for cs53l32a Audio ADC");
@@ -43,6 +43,21 @@ module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging messages, 0=Off (default), 1=On");
+struct cs53l32a_state {
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+};
+
+static inline struct cs53l32a_state *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct cs53l32a_state, sd);
+}
+
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct cs53l32a_state, hdl)->sd;
+}
+
/* ----------------------------------------------------------------------- */
static int cs53l32a_write(struct v4l2_subdev *sd, u8 reg, u8 value)
@@ -74,31 +89,20 @@ static int cs53l32a_s_routing(struct v4l2_subdev *sd,
return 0;
}
-static int cs53l32a_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int cs53l32a_s_ctrl(struct v4l2_ctrl *ctrl)
{
- if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
- ctrl->value = (cs53l32a_read(sd, 0x03) & 0xc0) != 0;
- return 0;
- }
- if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
- return -EINVAL;
- ctrl->value = (s8)cs53l32a_read(sd, 0x04);
- return 0;
-}
+ struct v4l2_subdev *sd = to_sd(ctrl);
-static int cs53l32a_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
- cs53l32a_write(sd, 0x03, ctrl->value ? 0xf0 : 0x30);
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ cs53l32a_write(sd, 0x03, ctrl->val ? 0xf0 : 0x30);
+ return 0;
+ case V4L2_CID_AUDIO_VOLUME:
+ cs53l32a_write(sd, 0x04, (u8)ctrl->val);
+ cs53l32a_write(sd, 0x05, (u8)ctrl->val);
return 0;
}
- if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
- return -EINVAL;
- if (ctrl->value > 12 || ctrl->value < -96)
- return -EINVAL;
- cs53l32a_write(sd, 0x04, (u8) ctrl->value);
- cs53l32a_write(sd, 0x05, (u8) ctrl->value);
- return 0;
+ return -EINVAL;
}
static int cs53l32a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
@@ -111,23 +115,30 @@ static int cs53l32a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_id
static int cs53l32a_log_status(struct v4l2_subdev *sd)
{
+ struct cs53l32a_state *state = to_state(sd);
u8 v = cs53l32a_read(sd, 0x01);
- u8 m = cs53l32a_read(sd, 0x03);
- s8 vol = cs53l32a_read(sd, 0x04);
- v4l2_info(sd, "Input: %d%s\n", (v >> 4) & 3,
- (m & 0xC0) ? " (muted)" : "");
- v4l2_info(sd, "Volume: %d dB\n", vol);
+ v4l2_info(sd, "Input: %d\n", (v >> 4) & 3);
+ v4l2_ctrl_handler_log_status(&state->hdl, sd->name);
return 0;
}
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops cs53l32a_ctrl_ops = {
+ .s_ctrl = cs53l32a_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops cs53l32a_core_ops = {
.log_status = cs53l32a_log_status,
.g_chip_ident = cs53l32a_g_chip_ident,
- .g_ctrl = cs53l32a_g_ctrl,
- .s_ctrl = cs53l32a_s_ctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_audio_ops cs53l32a_audio_ops = {
@@ -151,6 +162,7 @@ static const struct v4l2_subdev_ops cs53l32a_ops = {
static int cs53l32a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct cs53l32a_state *state;
struct v4l2_subdev *sd;
int i;
@@ -164,9 +176,10 @@ static int cs53l32a_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
- sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
- if (sd == NULL)
+ state = kzalloc(sizeof(struct cs53l32a_state), GFP_KERNEL);
+ if (state == NULL)
return -ENOMEM;
+ sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &cs53l32a_ops);
for (i = 1; i <= 7; i++) {
@@ -175,15 +188,29 @@ static int cs53l32a_probe(struct i2c_client *client,
v4l2_dbg(1, debug, sd, "Read Reg %d %02x\n", i, v);
}
+ v4l2_ctrl_handler_init(&state->hdl, 2);
+ v4l2_ctrl_new_std(&state->hdl, &cs53l32a_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, -96, 12, 1, 0);
+ v4l2_ctrl_new_std(&state->hdl, &cs53l32a_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ sd->ctrl_handler = &state->hdl;
+ if (state->hdl.error) {
+ int err = state->hdl.error;
+
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
+ return err;
+ }
+
/* Set cs53l32a internal register for Adaptec 2010/2410 setup */
- cs53l32a_write(sd, 0x01, (u8) 0x21);
- cs53l32a_write(sd, 0x02, (u8) 0x29);
- cs53l32a_write(sd, 0x03, (u8) 0x30);
- cs53l32a_write(sd, 0x04, (u8) 0x00);
- cs53l32a_write(sd, 0x05, (u8) 0x00);
- cs53l32a_write(sd, 0x06, (u8) 0x00);
- cs53l32a_write(sd, 0x07, (u8) 0x00);
+ cs53l32a_write(sd, 0x01, 0x21);
+ cs53l32a_write(sd, 0x02, 0x29);
+ cs53l32a_write(sd, 0x03, 0x30);
+ cs53l32a_write(sd, 0x04, 0x00);
+ cs53l32a_write(sd, 0x05, 0x00);
+ cs53l32a_write(sd, 0x06, 0x00);
+ cs53l32a_write(sd, 0x07, 0x00);
/* Display results, should be 0x21,0x29,0x30,0x00,0x00,0x00,0x00 */
@@ -198,9 +225,11 @@ static int cs53l32a_probe(struct i2c_client *client,
static int cs53l32a_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct cs53l32a_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
- kfree(sd);
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
return 0;
}
diff --git a/drivers/media/video/cx18/Kconfig b/drivers/media/video/cx18/Kconfig
index baf7e91ee0f..76c054d1eef 100644
--- a/drivers/media/video/cx18/Kconfig
+++ b/drivers/media/video/cx18/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_CX18
depends on VIDEO_V4L2 && DVB_CORE && PCI && I2C && EXPERIMENTAL
depends on INPUT # due to VIDEO_IR
select I2C_ALGOBIT
- select VIDEO_IR
+ depends on VIDEO_IR
select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_CX2341X
diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c
index 809f7d37129..73ce90c2f57 100644
--- a/drivers/media/video/cx18/cx18-i2c.c
+++ b/drivers/media/video/cx18/cx18-i2c.c
@@ -117,7 +117,8 @@ static int cx18_i2c_new_ir(struct cx18 *cx, struct i2c_adapter *adap, u32 hw,
break;
}
- return i2c_new_probed_device(adap, &info, addr_list) == NULL ? -1 : 0;
+ return i2c_new_probed_device(adap, &info, addr_list, NULL) == NULL ?
+ -1 : 0;
}
int cx18_i2c_register(struct cx18 *cx, unsigned idx)
diff --git a/drivers/media/video/cx231xx/Kconfig b/drivers/media/video/cx231xx/Kconfig
index 477d4ab5e9a..5ac7eceecec 100644
--- a/drivers/media/video/cx231xx/Kconfig
+++ b/drivers/media/video/cx231xx/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_CX231XX
depends on VIDEO_DEV && I2C && INPUT
select VIDEO_TUNER
select VIDEO_TVEEPROM
- select VIDEO_IR
+ depends on VIDEO_IR
select VIDEOBUF_VMALLOC
select VIDEO_CX25840
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c
index 2bf44ef10fe..e5c3c8da4be 100644
--- a/drivers/media/video/cx2341x.c
+++ b/drivers/media/video/cx2341x.c
@@ -38,6 +38,145 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
+/********************** COMMON CODE *********************/
+
+/* definitions for audio properties bits 29-28 */
+#define CX2341X_AUDIO_ENCODING_METHOD_MPEG 0
+#define CX2341X_AUDIO_ENCODING_METHOD_AC3 1
+#define CX2341X_AUDIO_ENCODING_METHOD_LPCM 2
+
+static const char *cx2341x_get_name(u32 id)
+{
+ switch (id) {
+ case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
+ return "Spatial Filter Mode";
+ case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER:
+ return "Spatial Filter";
+ case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
+ return "Spatial Luma Filter Type";
+ case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
+ return "Spatial Chroma Filter Type";
+ case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
+ return "Temporal Filter Mode";
+ case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER:
+ return "Temporal Filter";
+ case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
+ return "Median Filter Type";
+ case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP:
+ return "Median Luma Filter Maximum";
+ case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM:
+ return "Median Luma Filter Minimum";
+ case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP:
+ return "Median Chroma Filter Maximum";
+ case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM:
+ return "Median Chroma Filter Minimum";
+ case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
+ return "Insert Navigation Packets";
+ }
+ return NULL;
+}
+
+static const char **cx2341x_get_menu(u32 id)
+{
+ static const char *cx2341x_video_spatial_filter_mode_menu[] = {
+ "Manual",
+ "Auto",
+ NULL
+ };
+
+ static const char *cx2341x_video_luma_spatial_filter_type_menu[] = {
+ "Off",
+ "1D Horizontal",
+ "1D Vertical",
+ "2D H/V Separable",
+ "2D Symmetric non-separable",
+ NULL
+ };
+
+ static const char *cx2341x_video_chroma_spatial_filter_type_menu[] = {
+ "Off",
+ "1D Horizontal",
+ NULL
+ };
+
+ static const char *cx2341x_video_temporal_filter_mode_menu[] = {
+ "Manual",
+ "Auto",
+ NULL
+ };
+
+ static const char *cx2341x_video_median_filter_type_menu[] = {
+ "Off",
+ "Horizontal",
+ "Vertical",
+ "Horizontal/Vertical",
+ "Diagonal",
+ NULL
+ };
+
+ switch (id) {
+ case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
+ return cx2341x_video_spatial_filter_mode_menu;
+ case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
+ return cx2341x_video_luma_spatial_filter_type_menu;
+ case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
+ return cx2341x_video_chroma_spatial_filter_type_menu;
+ case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
+ return cx2341x_video_temporal_filter_mode_menu;
+ case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
+ return cx2341x_video_median_filter_type_menu;
+ }
+ return NULL;
+}
+
+static void cx2341x_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
+ s32 *min, s32 *max, s32 *step, s32 *def, u32 *flags)
+{
+ *name = cx2341x_get_name(id);
+ *flags = 0;
+
+ switch (id) {
+ case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
+ case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
+ case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
+ case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
+ case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
+ *type = V4L2_CTRL_TYPE_MENU;
+ *min = 0;
+ *step = 0;
+ break;
+ case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
+ *type = V4L2_CTRL_TYPE_BOOLEAN;
+ *min = 0;
+ *max = *step = 1;
+ break;
+ default:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
+ }
+ switch (id) {
+ case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
+ case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
+ case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
+ *flags |= V4L2_CTRL_FLAG_UPDATE;
+ break;
+ case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER:
+ case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER:
+ case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP:
+ case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM:
+ case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP:
+ case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM:
+ *flags |= V4L2_CTRL_FLAG_SLIDER;
+ break;
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ }
+}
+
+
+/********************** OLD CODE *********************/
+
/* Must be sorted from low to high control ID! */
const u32 cx2341x_mpeg_ctrls[] = {
V4L2_CID_MPEG_CLASS,
@@ -134,8 +273,6 @@ static const struct cx2341x_mpeg_params default_params = {
.video_chroma_median_filter_top = 255,
.video_chroma_median_filter_bottom = 0,
};
-
-
/* Map the control ID to the correct field in the cx2341x_mpeg_params
struct. Return -EINVAL if the ID is unknown, else return 0. */
static int cx2341x_get_ctrl(const struct cx2341x_mpeg_params *params,
@@ -415,83 +552,33 @@ static int cx2341x_ctrl_query_fill(struct v4l2_queryctrl *qctrl,
{
const char *name;
- qctrl->flags = 0;
switch (qctrl->id) {
/* MPEG controls */
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
- name = "Spatial Filter Mode";
- break;
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER:
- name = "Spatial Filter";
- break;
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
- name = "Spatial Luma Filter Type";
- break;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
- name = "Spatial Chroma Filter Type";
- break;
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
- name = "Temporal Filter Mode";
- break;
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER:
- name = "Temporal Filter";
- break;
case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
- name = "Median Filter Type";
- break;
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP:
- name = "Median Luma Filter Maximum";
- break;
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM:
- name = "Median Luma Filter Minimum";
- break;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP:
- name = "Median Chroma Filter Maximum";
- break;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM:
- name = "Median Chroma Filter Minimum";
- break;
case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
- name = "Insert Navigation Packets";
- break;
+ cx2341x_ctrl_fill(qctrl->id, &name, &qctrl->type,
+ &min, &max, &step, &def, &qctrl->flags);
+ qctrl->minimum = min;
+ qctrl->maximum = max;
+ qctrl->step = step;
+ qctrl->default_value = def;
+ qctrl->reserved[0] = qctrl->reserved[1] = 0;
+ strlcpy(qctrl->name, name, sizeof(qctrl->name));
+ return 0;
default:
return v4l2_ctrl_query_fill(qctrl, min, max, step, def);
}
- switch (qctrl->id) {
- case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
- case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
- case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
- case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
- case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
- qctrl->type = V4L2_CTRL_TYPE_MENU;
- min = 0;
- step = 1;
- break;
- case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
- qctrl->type = V4L2_CTRL_TYPE_BOOLEAN;
- min = 0;
- max = 1;
- step = 1;
- break;
- default:
- qctrl->type = V4L2_CTRL_TYPE_INTEGER;
- break;
- }
- switch (qctrl->id) {
- case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
- case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
- case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
- qctrl->flags |= V4L2_CTRL_FLAG_UPDATE;
- break;
- }
- qctrl->minimum = min;
- qctrl->maximum = max;
- qctrl->step = step;
- qctrl->default_value = def;
- qctrl->reserved[0] = qctrl->reserved[1] = 0;
- snprintf(qctrl->name, sizeof(qctrl->name), name);
- return 0;
}
int cx2341x_ctrl_query(const struct cx2341x_mpeg_params *params,
@@ -797,42 +884,6 @@ const char **cx2341x_ctrl_get_menu(const struct cx2341x_mpeg_params *p, u32 id)
NULL
};
- static const char *cx2341x_video_spatial_filter_mode_menu[] = {
- "Manual",
- "Auto",
- NULL
- };
-
- static const char *cx2341x_video_luma_spatial_filter_type_menu[] = {
- "Off",
- "1D Horizontal",
- "1D Vertical",
- "2D H/V Separable",
- "2D Symmetric non-separable",
- NULL
- };
-
- static const char *cx2341x_video_chroma_spatial_filter_type_menu[] = {
- "Off",
- "1D Horizontal",
- NULL
- };
-
- static const char *cx2341x_video_temporal_filter_mode_menu[] = {
- "Manual",
- "Auto",
- NULL
- };
-
- static const char *cx2341x_video_median_filter_type_menu[] = {
- "Off",
- "Horizontal",
- "Vertical",
- "Horizontal/Vertical",
- "Diagonal",
- NULL
- };
-
switch (id) {
case V4L2_CID_MPEG_STREAM_TYPE:
return (p->capabilities & CX2341X_CAP_HAS_TS) ?
@@ -844,26 +895,17 @@ const char **cx2341x_ctrl_get_menu(const struct cx2341x_mpeg_params *p, u32 id)
case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
return NULL;
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
- return cx2341x_video_spatial_filter_mode_menu;
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
- return cx2341x_video_luma_spatial_filter_type_menu;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
- return cx2341x_video_chroma_spatial_filter_type_menu;
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
- return cx2341x_video_temporal_filter_mode_menu;
case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
- return cx2341x_video_median_filter_type_menu;
+ return cx2341x_get_menu(id);
default:
return v4l2_ctrl_get_menu(id);
}
}
EXPORT_SYMBOL(cx2341x_ctrl_get_menu);
-/* definitions for audio properties bits 29-28 */
-#define CX2341X_AUDIO_ENCODING_METHOD_MPEG 0
-#define CX2341X_AUDIO_ENCODING_METHOD_AC3 1
-#define CX2341X_AUDIO_ENCODING_METHOD_LPCM 2
-
static void cx2341x_calc_audio_properties(struct cx2341x_mpeg_params *params)
{
params->audio_properties =
@@ -1195,9 +1237,490 @@ void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix)
}
EXPORT_SYMBOL(cx2341x_log_status);
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
+
+/********************** NEW CODE *********************/
+
+static inline struct cx2341x_handler *to_cxhdl(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct cx2341x_handler, hdl);
+}
+
+static int cx2341x_hdl_api(struct cx2341x_handler *hdl,
+ u32 cmd, int args, ...)
+{
+ u32 data[CX2341X_MBOX_MAX_DATA];
+ va_list vargs;
+ int i;
+
+ va_start(vargs, args);
+
+ for (i = 0; i < args; i++)
+ data[i] = va_arg(vargs, int);
+ va_end(vargs);
+ return hdl->func(hdl->priv, cmd, args, 0, data);
+}
+
+/* ctrl->handler->lock is held, so it is safe to access cur.val */
+static inline int cx2341x_neq(struct v4l2_ctrl *ctrl)
+{
+ return ctrl && ctrl->val != ctrl->cur.val;
+}
+
+static int cx2341x_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct cx2341x_handler *hdl = to_cxhdl(ctrl);
+ s32 val = ctrl->val;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES: {
+ /* video gop cluster */
+ int b = val + 1;
+ int gop = hdl->video_gop_size->val;
+
+ gop = b * ((gop + b - 1) / b);
+
+ /* Max GOP size = 34 */
+ while (gop > 34)
+ gop -= b;
+ hdl->video_gop_size->val = gop;
+ break;
+ }
+
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ /* stream type cluster */
+ hdl->video_encoding->val =
+ (hdl->stream_type->val == V4L2_MPEG_STREAM_TYPE_MPEG1_SS ||
+ hdl->stream_type->val == V4L2_MPEG_STREAM_TYPE_MPEG1_VCD) ?
+ V4L2_MPEG_VIDEO_ENCODING_MPEG_1 :
+ V4L2_MPEG_VIDEO_ENCODING_MPEG_2;
+ if (hdl->video_encoding->val == V4L2_MPEG_VIDEO_ENCODING_MPEG_1)
+ /* MPEG-1 implies CBR */
+ hdl->video_bitrate_mode->val =
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
+ /* peak bitrate shall be >= normal bitrate */
+ if (hdl->video_bitrate_mode->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR &&
+ hdl->video_bitrate_peak->val < hdl->video_bitrate->val)
+ hdl->video_bitrate_peak->val = hdl->video_bitrate->val;
+ break;
+ }
+ return 0;
+}
+
+static int cx2341x_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ static const int mpeg_stream_type[] = {
+ 0, /* MPEG-2 PS */
+ 1, /* MPEG-2 TS */
+ 2, /* MPEG-1 SS */
+ 14, /* DVD */
+ 11, /* VCD */
+ 12, /* SVCD */
+ };
+ struct cx2341x_handler *hdl = to_cxhdl(ctrl);
+ s32 val = ctrl->val;
+ u32 props;
+ int err;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ if (hdl->ops && hdl->ops->s_stream_vbi_fmt)
+ return hdl->ops->s_stream_vbi_fmt(hdl, val);
+ return 0;
+
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ return cx2341x_hdl_api(hdl,
+ CX2341X_ENC_SET_ASPECT_RATIO, 1, val + 1);
+
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
+ return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_GOP_CLOSURE, 1, val);
+
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ return cx2341x_hdl_api(hdl, CX2341X_ENC_MUTE_AUDIO, 1, val);
+
+ case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION:
+ return cx2341x_hdl_api(hdl,
+ CX2341X_ENC_SET_FRAME_DROP_RATE, 1, val);
+
+ case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
+ return cx2341x_hdl_api(hdl, CX2341X_ENC_MISC, 2, 7, val);
+
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
+ /* audio properties cluster */
+ props = (hdl->audio_sampling_freq->val << 0) |
+ (hdl->audio_mode->val << 8) |
+ (hdl->audio_mode_extension->val << 10) |
+ (hdl->audio_crc->val << 14);
+ if (hdl->audio_emphasis->val == V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17)
+ props |= 3 << 12;
+ else
+ props |= hdl->audio_emphasis->val << 12;
+
+ if (hdl->audio_encoding->val == V4L2_MPEG_AUDIO_ENCODING_AC3) {
+ props |=
+#if 1
+ /* Not sure if this MPEG Layer II setting is required */
+ ((3 - V4L2_MPEG_AUDIO_ENCODING_LAYER_2) << 2) |
+#endif
+ (hdl->audio_ac3_bitrate->val << 4) |
+ (CX2341X_AUDIO_ENCODING_METHOD_AC3 << 28);
+ } else {
+ /* Assuming MPEG Layer II */
+ props |=
+ ((3 - hdl->audio_encoding->val) << 2) |
+ ((1 + hdl->audio_l2_bitrate->val) << 4);
+ }
+ err = cx2341x_hdl_api(hdl,
+ CX2341X_ENC_SET_AUDIO_PROPERTIES, 1, props);
+ if (err)
+ return err;
+
+ hdl->audio_properties = props;
+ if (hdl->audio_ac3_bitrate) {
+ int is_ac3 = hdl->audio_encoding->val ==
+ V4L2_MPEG_AUDIO_ENCODING_AC3;
+
+ v4l2_ctrl_activate(hdl->audio_ac3_bitrate, is_ac3);
+ v4l2_ctrl_activate(hdl->audio_l2_bitrate, !is_ac3);
+ }
+ v4l2_ctrl_activate(hdl->audio_mode_extension,
+ hdl->audio_mode->val == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO);
+ if (cx2341x_neq(hdl->audio_sampling_freq) &&
+ hdl->ops && hdl->ops->s_audio_sampling_freq)
+ return hdl->ops->s_audio_sampling_freq(hdl, hdl->audio_sampling_freq->val);
+ if (cx2341x_neq(hdl->audio_mode) &&
+ hdl->ops && hdl->ops->s_audio_mode)
+ return hdl->ops->s_audio_mode(hdl, hdl->audio_mode->val);
+ return 0;
+
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ /* video gop cluster */
+ return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_GOP_PROPERTIES, 2,
+ hdl->video_gop_size->val,
+ hdl->video_b_frames->val + 1);
+
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ /* stream type cluster */
+ err = cx2341x_hdl_api(hdl,
+ CX2341X_ENC_SET_STREAM_TYPE, 1, mpeg_stream_type[val]);
+ if (err)
+ return err;
+
+ err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_BIT_RATE, 5,
+ hdl->video_bitrate_mode->val,
+ hdl->video_bitrate->val,
+ hdl->video_bitrate_peak->val / 400, 0, 0);
+ if (err)
+ return err;
+
+ v4l2_ctrl_activate(hdl->video_bitrate_mode,
+ hdl->video_encoding->val != V4L2_MPEG_VIDEO_ENCODING_MPEG_1);
+ v4l2_ctrl_activate(hdl->video_bitrate_peak,
+ hdl->video_bitrate_mode->val != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
+ if (cx2341x_neq(hdl->video_encoding) &&
+ hdl->ops && hdl->ops->s_video_encoding)
+ return hdl->ops->s_video_encoding(hdl, hdl->video_encoding->val);
+ return 0;
+
+ case V4L2_CID_MPEG_VIDEO_MUTE:
+ /* video mute cluster */
+ return cx2341x_hdl_api(hdl, CX2341X_ENC_MUTE_VIDEO, 1,
+ hdl->video_mute->val |
+ (hdl->video_mute_yuv->val << 8));
+
+ case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: {
+ int active_filter;
+
+ /* video filter mode */
+ err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_DNR_FILTER_MODE, 2,
+ hdl->video_spatial_filter_mode->val |
+ (hdl->video_temporal_filter_mode->val << 1),
+ hdl->video_median_filter_type->val);
+ if (err)
+ return err;
+
+ active_filter = hdl->video_spatial_filter_mode->val !=
+ V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO;
+ v4l2_ctrl_activate(hdl->video_spatial_filter, active_filter);
+ v4l2_ctrl_activate(hdl->video_luma_spatial_filter_type, active_filter);
+ v4l2_ctrl_activate(hdl->video_chroma_spatial_filter_type, active_filter);
+ active_filter = hdl->video_temporal_filter_mode->val !=
+ V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO;
+ v4l2_ctrl_activate(hdl->video_temporal_filter, active_filter);
+ active_filter = hdl->video_median_filter_type->val !=
+ V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF;
+ v4l2_ctrl_activate(hdl->video_luma_median_filter_bottom, active_filter);
+ v4l2_ctrl_activate(hdl->video_luma_median_filter_top, active_filter);
+ v4l2_ctrl_activate(hdl->video_chroma_median_filter_bottom, active_filter);
+ v4l2_ctrl_activate(hdl->video_chroma_median_filter_top, active_filter);
+ return 0;
+ }
+
+ case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
+ /* video filter type cluster */
+ return cx2341x_hdl_api(hdl,
+ CX2341X_ENC_SET_SPATIAL_FILTER_TYPE, 2,
+ hdl->video_luma_spatial_filter_type->val,
+ hdl->video_chroma_spatial_filter_type->val);
+
+ case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER:
+ /* video filter cluster */
+ return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_DNR_FILTER_PROPS, 2,
+ hdl->video_spatial_filter->val,
+ hdl->video_temporal_filter->val);
+
+ case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP:
+ /* video median cluster */
+ return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_CORING_LEVELS, 4,
+ hdl->video_luma_median_filter_bottom->val,
+ hdl->video_luma_median_filter_top->val,
+ hdl->video_chroma_median_filter_bottom->val,
+ hdl->video_chroma_median_filter_top->val);
+ }
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops cx2341x_ops = {
+ .try_ctrl = cx2341x_try_ctrl,
+ .s_ctrl = cx2341x_s_ctrl,
+};
+
+static struct v4l2_ctrl *cx2341x_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
+ u32 id, s32 min, s32 max, s32 step, s32 def)
+{
+ struct v4l2_ctrl_config cfg;
+
+ cx2341x_ctrl_fill(id, &cfg.name, &cfg.type, &min, &max, &step, &def, &cfg.flags);
+ cfg.ops = &cx2341x_ops;
+ cfg.id = id;
+ cfg.min = min;
+ cfg.max = max;
+ cfg.def = def;
+ if (cfg.type == V4L2_CTRL_TYPE_MENU) {
+ cfg.step = 0;
+ cfg.menu_skip_mask = step;
+ cfg.qmenu = cx2341x_get_menu(id);
+ } else {
+ cfg.step = step;
+ cfg.menu_skip_mask = 0;
+ }
+ return v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+}
+
+static struct v4l2_ctrl *cx2341x_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
+ u32 id, s32 min, s32 max, s32 step, s32 def)
+{
+ return v4l2_ctrl_new_std(hdl, &cx2341x_ops, id, min, max, step, def);
+}
+
+static struct v4l2_ctrl *cx2341x_ctrl_new_menu(struct v4l2_ctrl_handler *hdl,
+ u32 id, s32 max, s32 mask, s32 def)
+{
+ return v4l2_ctrl_new_std_menu(hdl, &cx2341x_ops, id, max, mask, def);
+}
+
+int cx2341x_handler_init(struct cx2341x_handler *cxhdl,
+ unsigned nr_of_controls_hint)
+{
+ struct v4l2_ctrl_handler *hdl = &cxhdl->hdl;
+ u32 caps = cxhdl->capabilities;
+ int has_sliced_vbi = caps & CX2341X_CAP_HAS_SLICED_VBI;
+ int has_ac3 = caps & CX2341X_CAP_HAS_AC3;
+ int has_ts = caps & CX2341X_CAP_HAS_TS;
+
+ cxhdl->width = 720;
+ cxhdl->height = 480;
+
+ v4l2_ctrl_handler_init(hdl, nr_of_controls_hint);
+
+ /* Add controls in ascending control ID order for fastest
+ insertion time. */
+ cxhdl->stream_type = cx2341x_ctrl_new_menu(hdl,
+ V4L2_CID_MPEG_STREAM_TYPE,
+ V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD, has_ts ? 0 : 2,
+ V4L2_MPEG_STREAM_TYPE_MPEG2_PS);
+ cxhdl->stream_vbi_fmt = cx2341x_ctrl_new_menu(hdl,
+ V4L2_CID_MPEG_STREAM_VBI_FMT,
+ V4L2_MPEG_STREAM_VBI_FMT_IVTV, has_sliced_vbi ? 0 : 2,
+ V4L2_MPEG_STREAM_VBI_FMT_NONE);
+ cxhdl->audio_sampling_freq = cx2341x_ctrl_new_menu(hdl,
+ V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ,
+ V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000, 0,
+ V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000);
+ cxhdl->audio_encoding = cx2341x_ctrl_new_menu(hdl,
+ V4L2_CID_MPEG_AUDIO_ENCODING,
+ V4L2_MPEG_AUDIO_ENCODING_AC3, has_ac3 ? ~0x12 : ~0x2,
+ V4L2_MPEG_AUDIO_ENCODING_LAYER_2);
+ cxhdl->audio_l2_bitrate = cx2341x_ctrl_new_menu(hdl,
+ V4L2_CID_MPEG_AUDIO_L2_BITRATE,
+ V4L2_MPEG_AUDIO_L2_BITRATE_384K, 0x1ff,
+ V4L2_MPEG_AUDIO_L2_BITRATE_224K);
+ cxhdl->audio_mode = cx2341x_ctrl_new_menu(hdl,
+ V4L2_CID_MPEG_AUDIO_MODE,
+ V4L2_MPEG_AUDIO_MODE_MONO, 0,
+ V4L2_MPEG_AUDIO_MODE_STEREO);
+ cxhdl->audio_mode_extension = cx2341x_ctrl_new_menu(hdl,
+ V4L2_CID_MPEG_AUDIO_MODE_EXTENSION,
+ V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16, 0,
+ V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4);
+ cxhdl->audio_emphasis = cx2341x_ctrl_new_menu(hdl,
+ V4L2_CID_MPEG_AUDIO_EMPHASIS,
+ V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17, 0,
+ V4L2_MPEG_AUDIO_EMPHASIS_NONE);
+ cxhdl->audio_crc = cx2341x_ctrl_new_menu(hdl,
+ V4L2_CID_MPEG_AUDIO_CRC,
+ V4L2_MPEG_AUDIO_CRC_CRC16, 0,
+ V4L2_MPEG_AUDIO_CRC_NONE);
+
+ cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_AUDIO_MUTE, 0, 1, 1, 0);
+ if (has_ac3)
+ cxhdl->audio_ac3_bitrate = cx2341x_ctrl_new_menu(hdl,
+ V4L2_CID_MPEG_AUDIO_AC3_BITRATE,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_448K, 0x03,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_224K);
+ cxhdl->video_encoding = cx2341x_ctrl_new_menu(hdl,
+ V4L2_CID_MPEG_VIDEO_ENCODING,
+ V4L2_MPEG_VIDEO_ENCODING_MPEG_2, 0,
+ V4L2_MPEG_VIDEO_ENCODING_MPEG_2);
+ cx2341x_ctrl_new_menu(hdl,
+ V4L2_CID_MPEG_VIDEO_ASPECT,
+ V4L2_MPEG_VIDEO_ASPECT_221x100, 0,
+ V4L2_MPEG_VIDEO_ASPECT_4x3);
+ cxhdl->video_b_frames = cx2341x_ctrl_new_std(hdl,
+ V4L2_CID_MPEG_VIDEO_B_FRAMES, 0, 33, 1, 2);
+ cxhdl->video_gop_size = cx2341x_ctrl_new_std(hdl,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 1, 34, 1, cxhdl->is_50hz ? 12 : 15);
+ cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_GOP_CLOSURE, 0, 1, 1, 1);
+ cxhdl->video_bitrate_mode = cx2341x_ctrl_new_menu(hdl,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
+ cxhdl->video_bitrate = cx2341x_ctrl_new_std(hdl,
+ V4L2_CID_MPEG_VIDEO_BITRATE,
+ 0, 27000000, 1, 6000000);
+ cxhdl->video_bitrate_peak = cx2341x_ctrl_new_std(hdl,
+ V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
+ 0, 27000000, 1, 8000000);
+ cx2341x_ctrl_new_std(hdl,
+ V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION, 0, 255, 1, 0);
+ cxhdl->video_mute = cx2341x_ctrl_new_std(hdl,
+ V4L2_CID_MPEG_VIDEO_MUTE, 0, 1, 1, 0);
+ cxhdl->video_mute_yuv = cx2341x_ctrl_new_std(hdl,
+ V4L2_CID_MPEG_VIDEO_MUTE_YUV, 0, 0xffffff, 1, 0x008080);
+
+ /* CX23415/6 specific */
+ cxhdl->video_spatial_filter_mode = cx2341x_ctrl_new_custom(hdl,
+ V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE,
+ V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL,
+ V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO, 0,
+ V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL);
+ cxhdl->video_spatial_filter = cx2341x_ctrl_new_custom(hdl,
+ V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER,
+ 0, 15, 1, 0);
+ cxhdl->video_luma_spatial_filter_type = cx2341x_ctrl_new_custom(hdl,
+ V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE,
+ V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF,
+ V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE,
+ 0,
+ V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR);
+ cxhdl->video_chroma_spatial_filter_type = cx2341x_ctrl_new_custom(hdl,
+ V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE,
+ V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF,
+ V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR,
+ 0,
+ V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR);
+ cxhdl->video_temporal_filter_mode = cx2341x_ctrl_new_custom(hdl,
+ V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE,
+ V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL,
+ V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO,
+ 0,
+ V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL);
+ cxhdl->video_temporal_filter = cx2341x_ctrl_new_custom(hdl,
+ V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER,
+ 0, 31, 1, 8);
+ cxhdl->video_median_filter_type = cx2341x_ctrl_new_custom(hdl,
+ V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE,
+ V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF,
+ V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG,
+ 0,
+ V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF);
+ cxhdl->video_luma_median_filter_bottom = cx2341x_ctrl_new_custom(hdl,
+ V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM,
+ 0, 255, 1, 0);
+ cxhdl->video_luma_median_filter_top = cx2341x_ctrl_new_custom(hdl,
+ V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP,
+ 0, 255, 1, 255);
+ cxhdl->video_chroma_median_filter_bottom = cx2341x_ctrl_new_custom(hdl,
+ V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM,
+ 0, 255, 1, 0);
+ cxhdl->video_chroma_median_filter_top = cx2341x_ctrl_new_custom(hdl,
+ V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP,
+ 0, 255, 1, 255);
+ cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS,
+ 0, 1, 1, 0);
+
+ if (hdl->error) {
+ int err = hdl->error;
+
+ v4l2_ctrl_handler_free(hdl);
+ return err;
+ }
+
+ v4l2_ctrl_cluster(8, &cxhdl->audio_sampling_freq);
+ v4l2_ctrl_cluster(2, &cxhdl->video_b_frames);
+ v4l2_ctrl_cluster(5, &cxhdl->stream_type);
+ v4l2_ctrl_cluster(2, &cxhdl->video_mute);
+ v4l2_ctrl_cluster(3, &cxhdl->video_spatial_filter_mode);
+ v4l2_ctrl_cluster(2, &cxhdl->video_luma_spatial_filter_type);
+ v4l2_ctrl_cluster(2, &cxhdl->video_spatial_filter);
+ v4l2_ctrl_cluster(4, &cxhdl->video_luma_median_filter_top);
+
+ return 0;
+}
+EXPORT_SYMBOL(cx2341x_handler_init);
+
+void cx2341x_handler_set_50hz(struct cx2341x_handler *cxhdl, int is_50hz)
+{
+ cxhdl->is_50hz = is_50hz;
+ cxhdl->video_gop_size->default_value = cxhdl->is_50hz ? 12 : 15;
+}
+EXPORT_SYMBOL(cx2341x_handler_set_50hz);
+
+int cx2341x_handler_setup(struct cx2341x_handler *cxhdl)
+{
+ int h = cxhdl->height;
+ int w = cxhdl->width;
+ int err;
+
+ err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_OUTPUT_PORT, 2, cxhdl->port, 0);
+ if (err)
+ return err;
+ err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_FRAME_RATE, 1, cxhdl->is_50hz);
+ if (err)
+ return err;
+
+ if (v4l2_ctrl_g_ctrl(cxhdl->video_encoding) == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) {
+ w /= 2;
+ h /= 2;
+ }
+ err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_FRAME_SIZE, 2, h, w);
+ if (err)
+ return err;
+ return v4l2_ctrl_handler_setup(&cxhdl->hdl);
+}
+EXPORT_SYMBOL(cx2341x_handler_setup);
+
+void cx2341x_handler_set_busy(struct cx2341x_handler *cxhdl, int busy)
+{
+ v4l2_ctrl_grab(cxhdl->audio_sampling_freq, busy);
+ v4l2_ctrl_grab(cxhdl->audio_encoding, busy);
+ v4l2_ctrl_grab(cxhdl->audio_l2_bitrate, busy);
+ v4l2_ctrl_grab(cxhdl->audio_ac3_bitrate, busy);
+ v4l2_ctrl_grab(cxhdl->stream_vbi_fmt, busy);
+ v4l2_ctrl_grab(cxhdl->stream_type, busy);
+ v4l2_ctrl_grab(cxhdl->video_bitrate_mode, busy);
+ v4l2_ctrl_grab(cxhdl->video_bitrate, busy);
+ v4l2_ctrl_grab(cxhdl->video_bitrate_peak, busy);
+}
+EXPORT_SYMBOL(cx2341x_handler_set_busy);
diff --git a/drivers/media/video/cx23885/Kconfig b/drivers/media/video/cx23885/Kconfig
index bcdda9a9aa9..e1367b35647 100644
--- a/drivers/media/video/cx23885/Kconfig
+++ b/drivers/media/video/cx23885/Kconfig
@@ -5,7 +5,7 @@ config VIDEO_CX23885
select VIDEO_BTCX
select VIDEO_TUNER
select VIDEO_TVEEPROM
- select VIDEO_IR
+ depends on IR_CORE
select VIDEOBUF_DVB
select VIDEOBUF_DMA_SG
select VIDEO_CX25840
diff --git a/drivers/media/video/cx23885/Makefile b/drivers/media/video/cx23885/Makefile
index 5787ae24363..e2ee95f660d 100644
--- a/drivers/media/video/cx23885/Makefile
+++ b/drivers/media/video/cx23885/Makefile
@@ -1,7 +1,8 @@
cx23885-objs := cx23885-cards.o cx23885-video.o cx23885-vbi.o \
cx23885-core.o cx23885-i2c.o cx23885-dvb.o cx23885-417.o \
- cx23885-ioctl.o cx23885-ir.o cx23885-input.o cx23888-ir.o \
- netup-init.o cimax2.o netup-eeprom.o cx23885-f300.o
+ cx23885-ioctl.o cx23885-ir.o cx23885-av.o cx23885-input.o \
+ cx23888-ir.o netup-init.o cimax2.o netup-eeprom.o \
+ cx23885-f300.o
obj-$(CONFIG_VIDEO_CX23885) += cx23885.o
diff --git a/drivers/media/video/cx23885/cx23885-av.c b/drivers/media/video/cx23885/cx23885-av.c
new file mode 100644
index 00000000000..134ebddd860
--- /dev/null
+++ b/drivers/media/video/cx23885/cx23885-av.c
@@ -0,0 +1,35 @@
+/*
+ * Driver for the Conexant CX23885/7/8 PCIe bridge
+ *
+ * AV device support routines - non-input, non-vl42_subdev routines
+ *
+ * Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include "cx23885.h"
+
+void cx23885_av_work_handler(struct work_struct *work)
+{
+ struct cx23885_dev *dev =
+ container_of(work, struct cx23885_dev, cx25840_work);
+ bool handled;
+
+ v4l2_subdev_call(dev->sd_cx25840, core, interrupt_service_routine,
+ PCI_MSK_AV_CORE, &handled);
+ cx23885_irq_enable(dev, PCI_MSK_AV_CORE);
+}
diff --git a/drivers/media/video/cx23885/cx23885-av.h b/drivers/media/video/cx23885/cx23885-av.h
new file mode 100644
index 00000000000..d2915c3e53a
--- /dev/null
+++ b/drivers/media/video/cx23885/cx23885-av.h
@@ -0,0 +1,27 @@
+/*
+ * Driver for the Conexant CX23885/7/8 PCIe bridge
+ *
+ * AV device support routines - non-input, non-vl42_subdev routines
+ *
+ * Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _CX23885_AV_H_
+#define _CX23885_AV_H_
+void cx23885_av_work_handler(struct work_struct *work);
+#endif
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 2014daedee8..e76ce8709af 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -30,6 +30,16 @@
#include "netup-init.h"
#include "cx23888-ir.h"
+static unsigned int enable_885_ir;
+module_param(enable_885_ir, int, 0644);
+MODULE_PARM_DESC(enable_885_ir,
+ "Enable integrated IR controller for supported\n"
+ "\t\t CX2388[57] boards that are wired for it:\n"
+ "\t\t\tHVR-1250 (reported safe)\n"
+ "\t\t\tTeVii S470 (reported unsafe)\n"
+ "\t\t This can cause an interrupt storm with some cards.\n"
+ "\t\t Default: 0 [Disabled]");
+
/* ------------------------------------------------------------------ */
/* board config info */
@@ -626,6 +636,9 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
case 79101:
/* WinTV-HVR1250 (PCIe, Retail, IR, half height,
ATSC and Basic analog */
+ case 79501:
+ /* WinTV-HVR1250 (PCIe, No IR, half height,
+ ATSC [at least] and Basic analog) */
case 79561:
/* WinTV-HVR1250 (PCIe, OEM, No IR, half height,
ATSC and Basic analog */
@@ -959,9 +972,37 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
int cx23885_ir_init(struct cx23885_dev *dev)
{
+ static struct v4l2_subdev_io_pin_config ir_rxtx_pin_cfg[] = {
+ {
+ .flags = V4L2_SUBDEV_IO_PIN_INPUT,
+ .pin = CX23885_PIN_IR_RX_GPIO19,
+ .function = CX23885_PAD_IR_RX,
+ .value = 0,
+ .strength = CX25840_PIN_DRIVE_MEDIUM,
+ }, {
+ .flags = V4L2_SUBDEV_IO_PIN_OUTPUT,
+ .pin = CX23885_PIN_IR_TX_GPIO20,
+ .function = CX23885_PAD_IR_TX,
+ .value = 0,
+ .strength = CX25840_PIN_DRIVE_MEDIUM,
+ }
+ };
+ const size_t ir_rxtx_pin_cfg_count = ARRAY_SIZE(ir_rxtx_pin_cfg);
+
+ static struct v4l2_subdev_io_pin_config ir_rx_pin_cfg[] = {
+ {
+ .flags = V4L2_SUBDEV_IO_PIN_INPUT,
+ .pin = CX23885_PIN_IR_RX_GPIO19,
+ .function = CX23885_PAD_IR_RX,
+ .value = 0,
+ .strength = CX25840_PIN_DRIVE_MEDIUM,
+ }
+ };
+ const size_t ir_rx_pin_cfg_count = ARRAY_SIZE(ir_rx_pin_cfg);
+
+ struct v4l2_subdev_ir_parameters params;
int ret = 0;
switch (dev->board) {
- case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_HAUPPAUGE_HVR1800:
@@ -979,7 +1020,41 @@ int cx23885_ir_init(struct cx23885_dev *dev)
if (ret)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_888_IR);
- dev->pci_irqmask |= PCI_MSK_IR;
+ v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
+ ir_rxtx_pin_cfg_count, ir_rxtx_pin_cfg);
+ /*
+ * For these boards we need to invert the Tx output via the
+ * IR controller to have the LED off while idle
+ */
+ v4l2_subdev_call(dev->sd_ir, ir, tx_g_parameters, &params);
+ params.enable = false;
+ params.shutdown = false;
+ params.invert_level = true;
+ v4l2_subdev_call(dev->sd_ir, ir, tx_s_parameters, &params);
+ params.shutdown = true;
+ v4l2_subdev_call(dev->sd_ir, ir, tx_s_parameters, &params);
+ break;
+ case CX23885_BOARD_TEVII_S470:
+ if (!enable_885_ir)
+ break;
+ dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE);
+ if (dev->sd_ir == NULL) {
+ ret = -ENODEV;
+ break;
+ }
+ v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
+ ir_rx_pin_cfg_count, ir_rx_pin_cfg);
+ break;
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ if (!enable_885_ir)
+ break;
+ dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE);
+ if (dev->sd_ir == NULL) {
+ ret = -ENODEV;
+ break;
+ }
+ v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
+ ir_rxtx_pin_cfg_count, ir_rxtx_pin_cfg);
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
request_module("ir-kbd-i2c");
@@ -994,11 +1069,16 @@ void cx23885_ir_fini(struct cx23885_dev *dev)
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
- dev->pci_irqmask &= ~PCI_MSK_IR;
- cx_clear(PCI_INT_MSK, PCI_MSK_IR);
+ cx23885_irq_remove(dev, PCI_MSK_IR);
cx23888_ir_remove(dev);
dev->sd_ir = NULL;
break;
+ case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ cx23885_irq_remove(dev, PCI_MSK_AV_CORE);
+ /* sd_ir is a duplicate pointer to the AV Core, just clear it */
+ dev->sd_ir = NULL;
+ break;
}
}
@@ -1007,8 +1087,13 @@ void cx23885_ir_pci_int_enable(struct cx23885_dev *dev)
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
- if (dev->sd_ir && (dev->pci_irqmask & PCI_MSK_IR))
- cx_set(PCI_INT_MSK, PCI_MSK_IR);
+ if (dev->sd_ir)
+ cx23885_irq_add_enable(dev, PCI_MSK_IR);
+ break;
+ case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ if (dev->sd_ir)
+ cx23885_irq_add_enable(dev, PCI_MSK_AV_CORE);
break;
}
}
@@ -1028,6 +1113,13 @@ void cx23885_card_setup(struct cx23885_dev *dev)
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ if (dev->i2c_bus[0].i2c_rc == 0) {
+ if (eeprom[0x80] != 0x84)
+ hauppauge_eeprom(dev, eeprom+0xc0);
+ else
+ hauppauge_eeprom(dev, eeprom+0x80);
+ }
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
@@ -1136,6 +1228,11 @@ void cx23885_card_setup(struct cx23885_dev *dev)
* loaded, ensure this happens.
*/
switch (dev->board) {
+ case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ /* Currently only enabled for the integrated IR controller */
+ if (!enable_885_ir)
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
@@ -1151,7 +1248,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", "cx25840", 0x88 >> 1, NULL);
- v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
+ if (dev->sd_cx25840) {
+ dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
+ v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
+ }
break;
}
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index ff76f64edac..f6b62e7398a 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -34,6 +34,7 @@
#include "cimax2.h"
#include "cx23888-ir.h"
#include "cx23885-ir.h"
+#include "cx23885-av.h"
#include "cx23885-input.h"
MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
@@ -299,6 +300,83 @@ static struct sram_channel cx23887_sram_channels[] = {
},
};
+void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
+
+ dev->pci_irqmask |= mask;
+
+ spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
+}
+
+void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
+
+ dev->pci_irqmask |= mask;
+ cx_set(PCI_INT_MSK, mask);
+
+ spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
+}
+
+void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
+{
+ u32 v;
+ unsigned long flags;
+ spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
+
+ v = mask & dev->pci_irqmask;
+ if (v)
+ cx_set(PCI_INT_MSK, v);
+
+ spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
+}
+
+static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
+{
+ cx23885_irq_enable(dev, 0xffffffff);
+}
+
+void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
+
+ cx_clear(PCI_INT_MSK, mask);
+
+ spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
+}
+
+static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
+{
+ cx23885_irq_disable(dev, 0xffffffff);
+}
+
+void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
+
+ dev->pci_irqmask &= ~mask;
+ cx_clear(PCI_INT_MSK, mask);
+
+ spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
+}
+
+static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
+{
+ u32 v;
+ unsigned long flags;
+ spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
+
+ v = cx_read(PCI_INT_MSK);
+
+ spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
+ return v;
+}
+
static int cx23885_risc_decode(u32 risc)
{
static char *instr[16] = {
@@ -548,7 +626,7 @@ static void cx23885_shutdown(struct cx23885_dev *dev)
cx_write(UART_CTL, 0);
/* Disable Interrupts */
- cx_write(PCI_INT_MSK, 0);
+ cx23885_irq_disable_all(dev);
cx_write(VID_A_INT_MSK, 0);
cx_write(VID_B_INT_MSK, 0);
cx_write(VID_C_INT_MSK, 0);
@@ -774,6 +852,8 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
{
int i;
+ spin_lock_init(&dev->pci_irqmask_lock);
+
mutex_init(&dev->lock);
mutex_init(&dev->gpio_lock);
@@ -820,9 +900,9 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
dev->pci_bus = dev->pci->bus->number;
dev->pci_slot = PCI_SLOT(dev->pci->devfn);
- dev->pci_irqmask = 0x001f00;
+ cx23885_irq_add(dev, 0x001f00);
if (cx23885_boards[dev->board].cimax > 0)
- dev->pci_irqmask |= 0x01800000; /* for CiMaxes */
+ cx23885_irq_add(dev, 0x01800000); /* for CiMaxes */
/* External Master 1 Bus */
dev->i2c_bus[0].nr = 0;
@@ -1156,7 +1236,7 @@ static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
cx_read(DEV_CNTRL2));
dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
- cx_read(PCI_INT_MSK));
+ cx23885_irq_get_mask(dev));
dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
cx_read(AUDIO_INT_INT_MSK));
dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
@@ -1292,7 +1372,8 @@ static int cx23885_start_dma(struct cx23885_tsport *port,
dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
cx_set(port->reg_dma_ctl, port->dma_ctl_val);
- cx_set(PCI_INT_MSK, dev->pci_irqmask | port->pci_irqmask);
+ cx23885_irq_add(dev, port->pci_irqmask);
+ cx23885_irq_enable_all(dev);
break;
default:
BUG();
@@ -1650,10 +1731,10 @@ static irqreturn_t cx23885_irq(int irq, void *dev_id)
u32 ts1_status, ts1_mask;
u32 ts2_status, ts2_mask;
int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
- bool ir_handled = false;
+ bool subdev_handled;
pci_status = cx_read(PCI_INT_STAT);
- pci_mask = cx_read(PCI_INT_MSK);
+ pci_mask = cx23885_irq_get_mask(dev);
vida_status = cx_read(VID_A_INT_STAT);
vida_mask = cx_read(VID_A_INT_MSK);
ts1_status = cx_read(VID_B_INT_STAT);
@@ -1681,7 +1762,7 @@ static irqreturn_t cx23885_irq(int irq, void *dev_id)
PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
- PCI_MSK_IR)) {
+ PCI_MSK_AV_CORE | PCI_MSK_IR)) {
if (pci_status & PCI_MSK_RISC_RD)
dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
@@ -1731,6 +1812,10 @@ static irqreturn_t cx23885_irq(int irq, void *dev_id)
dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
PCI_MSK_GPIO1);
+ if (pci_status & PCI_MSK_AV_CORE)
+ dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
+ PCI_MSK_AV_CORE);
+
if (pci_status & PCI_MSK_IR)
dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
PCI_MSK_IR);
@@ -1765,12 +1850,22 @@ static irqreturn_t cx23885_irq(int irq, void *dev_id)
handled += cx23885_video_irq(dev, vida_status);
if (pci_status & PCI_MSK_IR) {
- v4l2_subdev_call(dev->sd_ir, ir, interrupt_service_routine,
- pci_status, &ir_handled);
- if (ir_handled)
+ subdev_handled = false;
+ v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
+ pci_status, &subdev_handled);
+ if (subdev_handled)
handled++;
}
+ if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
+ cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
+ if (!schedule_work(&dev->cx25840_work))
+ printk(KERN_ERR "%s: failed to set up deferred work for"
+ " AV Core/IR interrupt. Interrupt is disabled"
+ " and won't be re-enabled\n", dev->name);
+ handled++;
+ }
+
if (handled)
cx_write(PCI_INT_STAT, pci_status);
out:
@@ -1788,11 +1883,11 @@ static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
dev = to_cx23885(sd->v4l2_dev);
switch (notification) {
- case V4L2_SUBDEV_IR_RX_NOTIFY: /* Called in an IRQ context */
+ case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
if (sd == dev->sd_ir)
cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
break;
- case V4L2_SUBDEV_IR_TX_NOTIFY: /* Called in an IRQ context */
+ case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
if (sd == dev->sd_ir)
cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
break;
@@ -1801,6 +1896,7 @@ static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
{
+ INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
@@ -1967,7 +2063,7 @@ static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
switch (dev->board) {
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
- cx_set(PCI_INT_MSK, 0x01800000); /* for NetUP */
+ cx23885_irq_add_enable(dev, 0x01800000); /* for NetUP */
break;
}
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
index d4746e06451..ed3d8f55029 100644
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -99,7 +99,7 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
if (!i2c_wait_done(i2c_adap))
return -EIO;
if (!i2c_slave_did_ack(i2c_adap))
- return -EIO;
+ return -ENXIO;
dprintk(1, "%s() returns 0\n", __func__);
return 0;
@@ -120,11 +120,12 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
cx_write(bus->reg_wdata, wdata);
cx_write(bus->reg_ctrl, ctrl);
- retval = i2c_wait_done(i2c_adap);
- if (retval < 0)
- goto err;
- if (retval == 0)
+ if (!i2c_wait_done(i2c_adap))
goto eio;
+ if (!i2c_slave_did_ack(i2c_adap)) {
+ retval = -ENXIO;
+ goto err;
+ }
if (i2c_debug) {
printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]);
if (!(ctrl & I2C_NOSTOP))
@@ -145,10 +146,7 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
cx_write(bus->reg_wdata, wdata);
cx_write(bus->reg_ctrl, ctrl);
- retval = i2c_wait_done(i2c_adap);
- if (retval < 0)
- goto err;
- if (retval == 0)
+ if (!i2c_wait_done(i2c_adap))
goto eio;
if (i2c_debug) {
dprintk(1, " %02x", msg->buf[cnt]);
@@ -185,7 +183,7 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
if (!i2c_wait_done(i2c_adap))
return -EIO;
if (!i2c_slave_did_ack(i2c_adap))
- return -EIO;
+ return -ENXIO;
dprintk(1, "%s() returns 0\n", __func__);
@@ -209,11 +207,12 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
cx_write(bus->reg_addr, msg->addr << 25);
cx_write(bus->reg_ctrl, ctrl);
- retval = i2c_wait_done(i2c_adap);
- if (retval < 0)
- goto err;
- if (retval == 0)
+ if (!i2c_wait_done(i2c_adap))
goto eio;
+ if (cnt == 0 && !i2c_slave_did_ack(i2c_adap)) {
+ retval = -ENXIO;
+ goto err;
+ }
msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff;
if (i2c_debug) {
dprintk(1, " %02x", msg->buf[cnt]);
@@ -365,17 +364,10 @@ int cx23885_i2c_register(struct cx23885_i2c *bus)
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
- /*
- * We can't call i2c_new_probed_device() because it uses
- * quick writes for probing and the IR receiver device only
- * replies to reads.
- */
- if (i2c_smbus_xfer(&bus->i2c_adap, addr_list[0], 0,
- I2C_SMBUS_READ, 0, I2C_SMBUS_QUICK,
- NULL) >= 0) {
- info.addr = addr_list[0];
- i2c_new_device(&bus->i2c_adap, &info);
- }
+ /* Use quick read command for probe, some IR chips don't
+ * support writes */
+ i2c_new_probed_device(&bus->i2c_adap, &info, addr_list,
+ i2c_probe_func_quick_read);
}
return bus->i2c_rc;
diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
index d0b1613ede2..bb61870b8d6 100644
--- a/drivers/media/video/cx23885/cx23885-input.c
+++ b/drivers/media/video/cx23885/cx23885-input.c
@@ -44,40 +44,26 @@
#define MODULE_NAME "cx23885"
-static void convert_measurement(u32 x, struct ir_raw_event *y)
-{
- if (x == V4L2_SUBDEV_IR_PULSE_RX_SEQ_END) {
- y->pulse = false;
- y->duration = V4L2_SUBDEV_IR_PULSE_MAX_WIDTH_NS;
- return;
- }
-
- y->pulse = (x & V4L2_SUBDEV_IR_PULSE_LEVEL_MASK) ? true : false;
- y->duration = x & V4L2_SUBDEV_IR_PULSE_MAX_WIDTH_NS;
-}
-
static void cx23885_input_process_measurements(struct cx23885_dev *dev,
bool overrun)
{
struct cx23885_kernel_ir *kernel_ir = dev->kernel_ir;
- struct ir_raw_event kernel_ir_event;
- u32 sd_ir_data[64];
ssize_t num;
int count, i;
bool handle = false;
+ struct ir_raw_event ir_core_event[64];
do {
num = 0;
- v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) sd_ir_data,
- sizeof(sd_ir_data), &num);
+ v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
+ sizeof(ir_core_event), &num);
- count = num / sizeof(u32);
+ count = num / sizeof(struct ir_raw_event);
for (i = 0; i < count; i++) {
- convert_measurement(sd_ir_data[i], &kernel_ir_event);
ir_raw_event_store(kernel_ir->inp_dev,
- &kernel_ir_event);
+ &ir_core_event[i]);
handle = true;
}
} while (num != 0);
@@ -99,8 +85,10 @@ void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events)
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
+ case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
/*
- * The only board we handle right now. However other boards
+ * The only boards we handle right now. However other boards
* using the CX2388x integrated IR controller should be similar
*/
break;
@@ -148,6 +136,7 @@ static int cx23885_input_ir_start(struct cx23885_dev *dev)
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
/*
* The IR controller on this board only returns pulse widths.
* Any other mode setting will fail to set up the device.
@@ -170,7 +159,38 @@ static int cx23885_input_ir_start(struct cx23885_dev *dev)
* mark is received as low logic level;
* falling edges are detected as rising edges; etc.
*/
- params.invert = true;
+ params.invert_level = true;
+ break;
+ case CX23885_BOARD_TEVII_S470:
+ /*
+ * The IR controller on this board only returns pulse widths.
+ * Any other mode setting will fail to set up the device.
+ */
+ params.mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
+ params.enable = true;
+ params.interrupt_enable = true;
+ params.shutdown = false;
+
+ /* Setup for a standard NEC protocol */
+ params.carrier_freq = 37917; /* Hz, 455 kHz/12 for NEC */
+ params.carrier_range_lower = 33000; /* Hz */
+ params.carrier_range_upper = 43000; /* Hz */
+ params.duty_cycle = 33; /* percent, 33 percent for NEC */
+
+ /*
+ * NEC max pulse width: (64/3)/(455 kHz/12) * 16 nec_units
+ * (64/3)/(455 kHz/12) * 16 nec_units * 1.375 = 12378022 ns
+ */
+ params.max_pulse_width = 12378022; /* ns */
+
+ /*
+ * NEC noise filter min width: (64/3)/(455 kHz/12) * 1 nec_unit
+ * (64/3)/(455 kHz/12) * 1 nec_units * 0.625 = 351648 ns
+ */
+ params.noise_filter_min_width = 351648; /* ns */
+
+ params.modulation = false;
+ params.invert_level = true;
break;
}
v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params);
@@ -244,12 +264,20 @@ int cx23885_input_init(struct cx23885_dev *dev)
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
- /* Integrated CX23888 IR controller */
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ /* Integrated CX2388[58] IR controller */
driver_type = RC_DRIVER_IR_RAW;
allowed_protos = IR_TYPE_ALL;
/* The grey Hauppauge RC-5 remote */
rc_map = RC_MAP_RC5_HAUPPAUGE_NEW;
break;
+ case CX23885_BOARD_TEVII_S470:
+ /* Integrated CX23885 IR controller */
+ driver_type = RC_DRIVER_IR_RAW;
+ allowed_protos = IR_TYPE_ALL;
+ /* A guess at the remote */
+ rc_map = RC_MAP_TEVII_NEC;
+ break;
default:
return -ENODEV;
}
diff --git a/drivers/media/video/cx23885/cx23885-ir.c b/drivers/media/video/cx23885/cx23885-ir.c
index 6ceabd4fba0..7125247dd25 100644
--- a/drivers/media/video/cx23885/cx23885-ir.c
+++ b/drivers/media/video/cx23885/cx23885-ir.c
@@ -72,7 +72,7 @@ void cx23885_ir_tx_work_handler(struct work_struct *work)
}
-/* Called in an IRQ context */
+/* Possibly called in an IRQ context */
void cx23885_ir_rx_v4l2_dev_notify(struct v4l2_subdev *sd, u32 events)
{
struct cx23885_dev *dev = to_cx23885(sd->v4l2_dev);
@@ -86,10 +86,18 @@ void cx23885_ir_rx_v4l2_dev_notify(struct v4l2_subdev *sd, u32 events)
set_bit(CX23885_IR_RX_HW_FIFO_OVERRUN, notifications);
if (events & V4L2_SUBDEV_IR_RX_SW_FIFO_OVERRUN)
set_bit(CX23885_IR_RX_SW_FIFO_OVERRUN, notifications);
- schedule_work(&dev->ir_rx_work);
+
+ /*
+ * For the integrated AV core, we are already in a workqueue context.
+ * For the CX23888 integrated IR, we are in an interrupt context.
+ */
+ if (sd == dev->sd_cx25840)
+ cx23885_ir_rx_work_handler(&dev->ir_rx_work);
+ else
+ schedule_work(&dev->ir_rx_work);
}
-/* Called in an IRQ context */
+/* Possibly called in an IRQ context */
void cx23885_ir_tx_v4l2_dev_notify(struct v4l2_subdev *sd, u32 events)
{
struct cx23885_dev *dev = to_cx23885(sd->v4l2_dev);
@@ -97,5 +105,13 @@ void cx23885_ir_tx_v4l2_dev_notify(struct v4l2_subdev *sd, u32 events)
if (events & V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ)
set_bit(CX23885_IR_TX_FIFO_SERVICE_REQ, notifications);
- schedule_work(&dev->ir_tx_work);
+
+ /*
+ * For the integrated AV core, we are already in a workqueue context.
+ * For the CX23888 integrated IR, we are in an interrupt context.
+ */
+ if (sd == dev->sd_cx25840)
+ cx23885_ir_tx_work_handler(&dev->ir_tx_work);
+ else
+ schedule_work(&dev->ir_tx_work);
}
diff --git a/drivers/media/video/cx23885/cx23885-reg.h b/drivers/media/video/cx23885/cx23885-reg.h
index c0bc9a06895..a28772db11f 100644
--- a/drivers/media/video/cx23885/cx23885-reg.h
+++ b/drivers/media/video/cx23885/cx23885-reg.h
@@ -213,6 +213,7 @@ Channel manager Data Structure entry = 20 DWORD
#define DEV_CNTRL2 0x00040000
#define PCI_MSK_IR (1 << 28)
+#define PCI_MSK_AV_CORE (1 << 27)
#define PCI_MSK_GPIO1 (1 << 24)
#define PCI_MSK_GPIO0 (1 << 23)
#define PCI_MSK_APB_DMA (1 << 12)
diff --git a/drivers/media/video/cx23885/cx23885-vbi.c b/drivers/media/video/cx23885/cx23885-vbi.c
index 708a8c766d1..c0b60382ad1 100644
--- a/drivers/media/video/cx23885/cx23885-vbi.c
+++ b/drivers/media/video/cx23885/cx23885-vbi.c
@@ -74,7 +74,7 @@ static int cx23885_start_vbi_dma(struct cx23885_dev *dev,
q->count = 1;
/* enable irqs */
- cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | 0x01);
+ cx23885_irq_add_enable(dev, 0x01);
cx_set(VID_A_INT_MSK, 0x000022);
/* start dma */
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 4e44dcda387..da66e5f8d91 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -441,7 +441,7 @@ static int cx23885_start_video_dma(struct cx23885_dev *dev,
q->count = 1;
/* enable irq */
- cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | 0x01);
+ cx23885_irq_add_enable(dev, 0x01);
cx_set(VID_A_INT_MSK, 0x000011);
/* start dma */
@@ -1205,6 +1205,21 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
return 0;
}
+static int vidioc_log_status(struct file *file, void *priv)
+{
+ struct cx23885_fh *fh = priv;
+ struct cx23885_dev *dev = fh->dev;
+
+ printk(KERN_INFO
+ "%s/0: ============ START LOG STATUS ============\n",
+ dev->name);
+ call_all(dev, core, log_status);
+ printk(KERN_INFO
+ "%s/0: ============= END LOG STATUS =============\n",
+ dev->name);
+ return 0;
+}
+
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qctrl)
{
@@ -1410,6 +1425,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
+ .vidioc_log_status = vidioc_log_status,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
@@ -1449,7 +1465,7 @@ static const struct v4l2_file_operations radio_fops = {
void cx23885_video_unregister(struct cx23885_dev *dev)
{
dprintk(1, "%s()\n", __func__);
- cx_clear(PCI_INT_MSK, 1);
+ cx23885_irq_remove(dev, 0x01);
if (dev->video_dev) {
if (video_is_registered(dev->video_dev))
@@ -1486,7 +1502,8 @@ int cx23885_video_register(struct cx23885_dev *dev)
VID_A_DMA_CTL, 0x11, 0x00);
/* Don't enable VBI yet */
- cx_set(PCI_INT_MSK, 1);
+
+ cx23885_irq_add_enable(dev, 0x01);
if (TUNER_ABSENT != dev->tuner_type) {
struct v4l2_subdev *sd = NULL;
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index a33f2b71467..ed94b17dd8a 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -325,6 +325,7 @@ struct cx23885_dev {
u32 __iomem *lmmio;
u8 __iomem *bmmio;
int pci_irqmask;
+ spinlock_t pci_irqmask_lock; /* protects mask reg too */
int hwrevision;
/* This valud is board specific and is used to configure the
@@ -365,6 +366,7 @@ struct cx23885_dev {
unsigned char radio_addr;
unsigned int has_radio;
struct v4l2_subdev *sd_cx25840;
+ struct work_struct cx25840_work;
/* Infrared */
struct v4l2_subdev *sd_ir;
@@ -403,7 +405,8 @@ static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev)
#define call_all(dev, o, f, args...) \
v4l2_device_call_all(&dev->v4l2_dev, 0, o, f, ##args)
-#define CX23885_HW_888_IR (1 << 0)
+#define CX23885_HW_888_IR (1 << 0)
+#define CX23885_HW_AV_CORE (1 << 1)
#define call_hw(dev, grpid, o, f, args...) \
v4l2_device_call_all(&dev->v4l2_dev, grpid, o, f, ##args)
@@ -484,6 +487,10 @@ extern u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask);
extern void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask,
int asoutput);
+extern void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask);
+extern void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask);
+extern void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask);
+extern void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask);
/* ----------------------------------------------------------- */
/* cx23885-cards.c */
diff --git a/drivers/media/video/cx23885/cx23888-ir.c b/drivers/media/video/cx23885/cx23888-ir.c
index f63d378257a..2502a0a6709 100644
--- a/drivers/media/video/cx23885/cx23888-ir.c
+++ b/drivers/media/video/cx23885/cx23888-ir.c
@@ -26,6 +26,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/ir-core.h>
#include "cx23885.h"
@@ -60,6 +61,8 @@ MODULE_PARM_DESC(ir_888_debug, "enable debug messages [CX23888 IR controller]");
#define CNTRL_CPL 0x00001000
#define CNTRL_LBM 0x00002000
#define CNTRL_R 0x00004000
+/* CX23888 specific control flag */
+#define CNTRL_IVO 0x00008000
#define CX23888_IR_TXCLK_REG 0x170004
#define TXCLK_TCD 0x0000FFFF
@@ -111,8 +114,18 @@ MODULE_PARM_DESC(ir_888_debug, "enable debug messages [CX23888 IR controller]");
#define CX23888_VIDCLK_FREQ 108000000 /* 108 MHz, BT.656 */
#define CX23888_IR_REFCLK_FREQ (CX23888_VIDCLK_FREQ / 2)
-#define CX23888_IR_RX_KFIFO_SIZE (512 * sizeof(u32))
-#define CX23888_IR_TX_KFIFO_SIZE (512 * sizeof(u32))
+/*
+ * We use this union internally for convenience, but callers to tx_write
+ * and rx_read will be expecting records of type struct ir_raw_event.
+ * Always ensure the size of this union is dictated by struct ir_raw_event.
+ */
+union cx23888_ir_fifo_rec {
+ u32 hw_fifo_data;
+ struct ir_raw_event ir_core_data;
+};
+
+#define CX23888_IR_RX_KFIFO_SIZE (256 * sizeof(union cx23888_ir_fifo_rec))
+#define CX23888_IR_TX_KFIFO_SIZE (256 * sizeof(union cx23888_ir_fifo_rec))
struct cx23888_ir_state {
struct v4l2_subdev sd;
@@ -423,6 +436,13 @@ static inline void control_tx_polarity_invert(struct cx23885_dev *dev,
invert ? CNTRL_CPL : 0);
}
+static inline void control_tx_level_invert(struct cx23885_dev *dev,
+ bool invert)
+{
+ cx23888_ir_and_or4(dev, CX23888_IR_CNTRL_REG, ~CNTRL_IVO,
+ invert ? CNTRL_IVO : 0);
+}
+
/*
* IR Rx & Tx Clock Register helpers
*/
@@ -449,8 +469,8 @@ static u32 txclk_tx_s_max_pulse_width(struct cx23885_dev *dev, u32 ns,
{
u64 pulse_clocks;
- if (ns > V4L2_SUBDEV_IR_PULSE_MAX_WIDTH_NS)
- ns = V4L2_SUBDEV_IR_PULSE_MAX_WIDTH_NS;
+ if (ns > IR_MAX_DURATION)
+ ns = IR_MAX_DURATION;
pulse_clocks = ns_to_pulse_clocks(ns);
*divider = pulse_clocks_to_clock_divider(pulse_clocks);
cx23888_ir_write4(dev, CX23888_IR_TXCLK_REG, *divider);
@@ -462,8 +482,8 @@ static u32 rxclk_rx_s_max_pulse_width(struct cx23885_dev *dev, u32 ns,
{
u64 pulse_clocks;
- if (ns > V4L2_SUBDEV_IR_PULSE_MAX_WIDTH_NS)
- ns = V4L2_SUBDEV_IR_PULSE_MAX_WIDTH_NS;
+ if (ns > IR_MAX_DURATION)
+ ns = IR_MAX_DURATION;
pulse_clocks = ns_to_pulse_clocks(ns);
*divider = pulse_clocks_to_clock_divider(pulse_clocks);
cx23888_ir_write4(dev, CX23888_IR_RXCLK_REG, *divider);
@@ -526,8 +546,8 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status,
u32 irqen = cx23888_ir_read4(dev, CX23888_IR_IRQEN_REG);
u32 stats = cx23888_ir_read4(dev, CX23888_IR_STATS_REG);
- u32 rx_data[FIFO_RX_DEPTH];
- int i, j, k;
+ union cx23888_ir_fifo_rec rx_data[FIFO_RX_DEPTH];
+ unsigned int i, j, k;
u32 events, v;
int tsr, rsr, rto, ror, tse, rse, rte, roe, kror;
@@ -588,11 +608,12 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status,
for (j = 0;
(v & FIFO_RX_NDV) && j < FIFO_RX_DEPTH; j++) {
v = cx23888_ir_read4(dev, CX23888_IR_FIFO_REG);
- rx_data[i++] = v & ~FIFO_RX_NDV;
+ rx_data[i].hw_fifo_data = v & ~FIFO_RX_NDV;
+ i++;
}
if (i == 0)
break;
- j = i * sizeof(u32);
+ j = i * sizeof(union cx23888_ir_fifo_rec);
k = kfifo_in_locked(&state->rx_kfifo,
(unsigned char *) rx_data, j,
&state->rx_kfifo_lock);
@@ -651,10 +672,11 @@ static int cx23888_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
u16 divider = (u16) atomic_read(&state->rxclk_divider);
unsigned int i, n;
- u32 *p;
- u32 u, v;
+ union cx23888_ir_fifo_rec *p;
+ unsigned u, v;
- n = count / sizeof(u32) * sizeof(u32);
+ n = count / sizeof(union cx23888_ir_fifo_rec)
+ * sizeof(union cx23888_ir_fifo_rec);
if (n == 0) {
*num = 0;
return 0;
@@ -662,26 +684,28 @@ static int cx23888_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
n = kfifo_out_locked(&state->rx_kfifo, buf, n, &state->rx_kfifo_lock);
- n /= sizeof(u32);
- *num = n * sizeof(u32);
+ n /= sizeof(union cx23888_ir_fifo_rec);
+ *num = n * sizeof(union cx23888_ir_fifo_rec);
+
+ for (p = (union cx23888_ir_fifo_rec *) buf, i = 0; i < n; p++, i++) {
- for (p = (u32 *) buf, i = 0; i < n; p++, i++) {
- if ((*p & FIFO_RXTX_RTO) == FIFO_RXTX_RTO) {
- *p = V4L2_SUBDEV_IR_PULSE_RX_SEQ_END;
+ if ((p->hw_fifo_data & FIFO_RXTX_RTO) == FIFO_RXTX_RTO) {
+ /* Assume RTO was because of no IR light input */
+ u = 0;
v4l2_dbg(2, ir_888_debug, sd, "rx read: end of rx\n");
- continue;
+ } else {
+ u = (p->hw_fifo_data & FIFO_RXTX_LVL) ? 1 : 0;
+ if (invert)
+ u = u ? 0 : 1;
}
- u = (*p & FIFO_RXTX_LVL) ? V4L2_SUBDEV_IR_PULSE_LEVEL_MASK : 0;
- if (invert)
- u = u ? 0 : V4L2_SUBDEV_IR_PULSE_LEVEL_MASK;
+ v = (unsigned) pulse_width_count_to_ns(
+ (u16) (p->hw_fifo_data & FIFO_RXTX), divider);
+ if (v > IR_MAX_DURATION)
+ v = IR_MAX_DURATION;
- v = (u32) pulse_width_count_to_ns((u16) (*p & FIFO_RXTX),
- divider);
- if (v >= V4L2_SUBDEV_IR_PULSE_MAX_WIDTH_NS)
- v = V4L2_SUBDEV_IR_PULSE_MAX_WIDTH_NS - 1;
-
- *p = u | v;
+ p->ir_core_data.pulse = u;
+ p->ir_core_data.duration = v;
v4l2_dbg(2, ir_888_debug, sd, "rx read: %10u ns %s\n",
v, u ? "mark" : "space");
@@ -740,7 +764,8 @@ static int cx23888_ir_rx_s_parameters(struct v4l2_subdev *sd,
o->mode = p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
- o->bytes_per_data_element = p->bytes_per_data_element = sizeof(u32);
+ o->bytes_per_data_element = p->bytes_per_data_element
+ = sizeof(union cx23888_ir_fifo_rec);
/* Before we tweak the hardware, we have to disable the receiver */
irqenable_rx(dev, 0);
@@ -762,12 +787,15 @@ static int cx23888_ir_rx_s_parameters(struct v4l2_subdev *sd,
&p->carrier_range_upper);
o->carrier_range_lower = p->carrier_range_lower;
o->carrier_range_upper = p->carrier_range_upper;
+
+ p->max_pulse_width =
+ (u32) pulse_width_count_to_ns(FIFO_RXTX, rxclk_divider);
} else {
p->max_pulse_width =
rxclk_rx_s_max_pulse_width(dev, p->max_pulse_width,
&rxclk_divider);
- o->max_pulse_width = p->max_pulse_width;
}
+ o->max_pulse_width = p->max_pulse_width;
atomic_set(&state->rxclk_divider, rxclk_divider);
p->noise_filter_min_width =
@@ -782,8 +810,8 @@ static int cx23888_ir_rx_s_parameters(struct v4l2_subdev *sd,
control_rx_s_edge_detection(dev, CNTRL_EDG_BOTH);
- o->invert = p->invert;
- atomic_set(&state->rx_invert, p->invert);
+ o->invert_level = p->invert_level;
+ atomic_set(&state->rx_invert, p->invert_level);
o->interrupt_enable = p->interrupt_enable;
o->enable = p->enable;
@@ -864,7 +892,8 @@ static int cx23888_ir_tx_s_parameters(struct v4l2_subdev *sd,
o->mode = p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
- o->bytes_per_data_element = p->bytes_per_data_element = sizeof(u32);
+ o->bytes_per_data_element = p->bytes_per_data_element
+ = sizeof(union cx23888_ir_fifo_rec);
/* Before we tweak the hardware, we have to disable the transmitter */
irqenable_tx(dev, 0);
@@ -880,12 +909,15 @@ static int cx23888_ir_tx_s_parameters(struct v4l2_subdev *sd,
p->duty_cycle = cduty_tx_s_duty_cycle(dev, p->duty_cycle);
o->duty_cycle = p->duty_cycle;
+
+ p->max_pulse_width =
+ (u32) pulse_width_count_to_ns(FIFO_RXTX, txclk_divider);
} else {
p->max_pulse_width =
txclk_tx_s_max_pulse_width(dev, p->max_pulse_width,
&txclk_divider);
- o->max_pulse_width = p->max_pulse_width;
}
+ o->max_pulse_width = p->max_pulse_width;
atomic_set(&state->txclk_divider, txclk_divider);
p->resolution = clock_divider_to_resolution(txclk_divider);
@@ -894,8 +926,11 @@ static int cx23888_ir_tx_s_parameters(struct v4l2_subdev *sd,
/* FIXME - make this dependent on resolution for better performance */
control_tx_irq_watermark(dev, TX_FIFO_HALF_EMPTY);
- control_tx_polarity_invert(dev, p->invert);
- o->invert = p->invert;
+ control_tx_polarity_invert(dev, p->invert_carrier_sense);
+ o->invert_carrier_sense = p->invert_carrier_sense;
+
+ control_tx_level_invert(dev, p->invert_level);
+ o->invert_level = p->invert_level;
o->interrupt_enable = p->interrupt_enable;
o->enable = p->enable;
@@ -988,12 +1023,10 @@ static int cx23888_ir_log_status(struct v4l2_subdev *sd)
"-%1d/+%1d, %u to %u Hz\n", i, j,
clock_divider_to_freq(rxclk, 16 + j),
clock_divider_to_freq(rxclk, 16 - i));
- } else {
- v4l2_info(sd, "\tMax measurable pulse width: %u us, "
- "%llu ns\n",
- pulse_width_count_to_us(FIFO_RXTX, rxclk),
- pulse_width_count_to_ns(FIFO_RXTX, rxclk));
}
+ v4l2_info(sd, "\tMax measurable pulse width: %u us, %llu ns\n",
+ pulse_width_count_to_us(FIFO_RXTX, rxclk),
+ pulse_width_count_to_ns(FIFO_RXTX, rxclk));
v4l2_info(sd, "\tLow pass filter: %s\n",
filtr ? "enabled" : "disabled");
if (filtr)
@@ -1025,19 +1058,20 @@ static int cx23888_ir_log_status(struct v4l2_subdev *sd)
cntrl & CNTRL_TFE ? "enabled" : "disabled");
v4l2_info(sd, "\tFIFO interrupt watermark: %s\n",
cntrl & CNTRL_TIC ? "not empty" : "half full or less");
- v4l2_info(sd, "\tSignal polarity: %s\n",
- cntrl & CNTRL_CPL ? "0:mark 1:space" : "0:space 1:mark");
+ v4l2_info(sd, "\tOutput pin level inversion %s\n",
+ cntrl & CNTRL_IVO ? "yes" : "no");
+ v4l2_info(sd, "\tCarrier polarity: %s\n",
+ cntrl & CNTRL_CPL ? "space:burst mark:noburst"
+ : "space:noburst mark:burst");
if (cntrl & CNTRL_MOD) {
v4l2_info(sd, "\tCarrier (16 clocks): %u Hz\n",
clock_divider_to_carrier_freq(txclk));
v4l2_info(sd, "\tCarrier duty cycle: %2u/16\n",
cduty + 1);
- } else {
- v4l2_info(sd, "\tMax pulse width: %u us, "
- "%llu ns\n",
- pulse_width_count_to_us(FIFO_RXTX, txclk),
- pulse_width_count_to_ns(FIFO_RXTX, txclk));
}
+ v4l2_info(sd, "\tMax pulse width: %u us, %llu ns\n",
+ pulse_width_count_to_us(FIFO_RXTX, txclk),
+ pulse_width_count_to_ns(FIFO_RXTX, txclk));
v4l2_info(sd, "\tBusy: %s\n",
stats & STATS_TBY ? "yes" : "no");
v4l2_info(sd, "\tFIFO service requested: %s\n",
@@ -1111,11 +1145,10 @@ static const struct v4l2_subdev_core_ops cx23888_ir_core_ops = {
.g_register = cx23888_ir_g_register,
.s_register = cx23888_ir_s_register,
#endif
+ .interrupt_service_routine = cx23888_ir_irq_handler,
};
static const struct v4l2_subdev_ir_ops cx23888_ir_ir_ops = {
- .interrupt_service_routine = cx23888_ir_irq_handler,
-
.rx_read = cx23888_ir_rx_read,
.rx_g_parameters = cx23888_ir_rx_g_parameters,
.rx_s_parameters = cx23888_ir_rx_s_parameters,
@@ -1131,7 +1164,7 @@ static const struct v4l2_subdev_ops cx23888_ir_controller_ops = {
};
static const struct v4l2_subdev_ir_parameters default_rx_params = {
- .bytes_per_data_element = sizeof(u32),
+ .bytes_per_data_element = sizeof(union cx23888_ir_fifo_rec),
.mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH,
.enable = false,
@@ -1146,11 +1179,11 @@ static const struct v4l2_subdev_ir_parameters default_rx_params = {
.noise_filter_min_width = 333333, /* ns */
.carrier_range_lower = 35000,
.carrier_range_upper = 37000,
- .invert = false,
+ .invert_level = false,
};
static const struct v4l2_subdev_ir_parameters default_tx_params = {
- .bytes_per_data_element = sizeof(u32),
+ .bytes_per_data_element = sizeof(union cx23888_ir_fifo_rec),
.mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH,
.enable = false,
@@ -1160,7 +1193,8 @@ static const struct v4l2_subdev_ir_parameters default_tx_params = {
.modulation = true,
.carrier_freq = 36000, /* 36 kHz - RC-5 carrier */
.duty_cycle = 25, /* 25 % - RC-5 carrier */
- .invert = false,
+ .invert_level = false,
+ .invert_carrier_sense = false,
};
int cx23888_ir_probe(struct cx23885_dev *dev)
diff --git a/drivers/media/video/cx25840/Makefile b/drivers/media/video/cx25840/Makefile
index 6e8665be895..2ee96d3973b 100644
--- a/drivers/media/video/cx25840/Makefile
+++ b/drivers/media/video/cx25840/Makefile
@@ -1,5 +1,5 @@
cx25840-objs := cx25840-core.o cx25840-audio.o cx25840-firmware.o \
- cx25840-vbi.o
+ cx25840-vbi.o cx25840-ir.o
obj-$(CONFIG_VIDEO_CX25840) += cx25840.o
diff --git a/drivers/media/video/cx25840/cx25840-audio.c b/drivers/media/video/cx25840/cx25840-audio.c
index 45608d50529..6faad34df3a 100644
--- a/drivers/media/video/cx25840/cx25840-audio.c
+++ b/drivers/media/video/cx25840/cx25840-audio.c
@@ -474,33 +474,10 @@ void cx25840_audio_set_path(struct i2c_client *client)
cx25840_and_or(client, 0x803, ~0x10, 0x10);
}
-static int get_volume(struct i2c_client *client)
-{
- struct cx25840_state *state = to_state(i2c_get_clientdata(client));
- int vol;
-
- if (state->unmute_volume >= 0)
- return state->unmute_volume;
-
- /* Volume runs +18dB to -96dB in 1/2dB steps
- * change to fit the msp3400 -114dB to +12dB range */
-
- /* check PATH1_VOLUME */
- vol = 228 - cx25840_read(client, 0x8d4);
- vol = (vol / 2) + 23;
- return vol << 9;
-}
-
static void set_volume(struct i2c_client *client, int volume)
{
- struct cx25840_state *state = to_state(i2c_get_clientdata(client));
int vol;
- if (state->unmute_volume >= 0) {
- state->unmute_volume = volume;
- return;
- }
-
/* Convert the volume to msp3400 values (0-127) */
vol = volume >> 9;
@@ -517,52 +494,6 @@ static void set_volume(struct i2c_client *client, int volume)
cx25840_write(client, 0x8d4, 228 - (vol * 2));
}
-static int get_bass(struct i2c_client *client)
-{
- /* bass is 49 steps +12dB to -12dB */
-
- /* check PATH1_EQ_BASS_VOL */
- int bass = cx25840_read(client, 0x8d9) & 0x3f;
- bass = (((48 - bass) * 0xffff) + 47) / 48;
- return bass;
-}
-
-static void set_bass(struct i2c_client *client, int bass)
-{
- /* PATH1_EQ_BASS_VOL */
- cx25840_and_or(client, 0x8d9, ~0x3f, 48 - (bass * 48 / 0xffff));
-}
-
-static int get_treble(struct i2c_client *client)
-{
- /* treble is 49 steps +12dB to -12dB */
-
- /* check PATH1_EQ_TREBLE_VOL */
- int treble = cx25840_read(client, 0x8db) & 0x3f;
- treble = (((48 - treble) * 0xffff) + 47) / 48;
- return treble;
-}
-
-static void set_treble(struct i2c_client *client, int treble)
-{
- /* PATH1_EQ_TREBLE_VOL */
- cx25840_and_or(client, 0x8db, ~0x3f, 48 - (treble * 48 / 0xffff));
-}
-
-static int get_balance(struct i2c_client *client)
-{
- /* balance is 7 bit, 0 to -96dB */
-
- /* check PATH1_BAL_LEVEL */
- int balance = cx25840_read(client, 0x8d5) & 0x7f;
- /* check PATH1_BAL_LEFT */
- if ((cx25840_read(client, 0x8d5) & 0x80) == 0)
- balance = 0x80 - balance;
- else
- balance = 0x80 + balance;
- return balance << 8;
-}
-
static void set_balance(struct i2c_client *client, int balance)
{
int bal = balance >> 8;
@@ -579,31 +510,6 @@ static void set_balance(struct i2c_client *client, int balance)
}
}
-static int get_mute(struct i2c_client *client)
-{
- struct cx25840_state *state = to_state(i2c_get_clientdata(client));
-
- return state->unmute_volume >= 0;
-}
-
-static void set_mute(struct i2c_client *client, int mute)
-{
- struct cx25840_state *state = to_state(i2c_get_clientdata(client));
-
- if (mute && state->unmute_volume == -1) {
- int vol = get_volume(client);
-
- set_volume(client, 0);
- state->unmute_volume = vol;
- }
- else if (!mute && state->unmute_volume != -1) {
- int vol = state->unmute_volume;
-
- state->unmute_volume = -1;
- set_volume(client, vol);
- }
-}
-
int cx25840_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -624,25 +530,31 @@ int cx25840_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
return retval;
}
-int cx25840_audio_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int cx25840_audio_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
switch (ctrl->id) {
case V4L2_CID_AUDIO_VOLUME:
- ctrl->value = get_volume(client);
+ if (state->mute->val)
+ set_volume(client, 0);
+ else
+ set_volume(client, state->volume->val);
break;
case V4L2_CID_AUDIO_BASS:
- ctrl->value = get_bass(client);
+ /* PATH1_EQ_BASS_VOL */
+ cx25840_and_or(client, 0x8d9, ~0x3f,
+ 48 - (ctrl->val * 48 / 0xffff));
break;
case V4L2_CID_AUDIO_TREBLE:
- ctrl->value = get_treble(client);
+ /* PATH1_EQ_TREBLE_VOL */
+ cx25840_and_or(client, 0x8db, ~0x3f,
+ 48 - (ctrl->val * 48 / 0xffff));
break;
case V4L2_CID_AUDIO_BALANCE:
- ctrl->value = get_balance(client);
- break;
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = get_mute(client);
+ set_balance(client, ctrl->val);
break;
default:
return -EINVAL;
@@ -650,28 +562,6 @@ int cx25840_audio_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return 0;
}
-int cx25840_audio_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME:
- set_volume(client, ctrl->value);
- break;
- case V4L2_CID_AUDIO_BASS:
- set_bass(client, ctrl->value);
- break;
- case V4L2_CID_AUDIO_TREBLE:
- set_treble(client, ctrl->value);
- break;
- case V4L2_CID_AUDIO_BALANCE:
- set_balance(client, ctrl->value);
- break;
- case V4L2_CID_AUDIO_MUTE:
- set_mute(client, ctrl->value);
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
+const struct v4l2_ctrl_ops cx25840_audio_ctrl_ops = {
+ .s_ctrl = cx25840_audio_s_ctrl,
+};
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index bb4872b2ceb..86ca8c2359d 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -15,6 +15,9 @@
*
* CX23885 support by Steven Toth <stoth@linuxtv.org>.
*
+ * CX2388[578] IRQ handling, IO Pin mux configuration and other small fixes are
+ * Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
@@ -48,6 +51,28 @@ MODULE_DESCRIPTION("Conexant CX25840 audio/video decoder driver");
MODULE_AUTHOR("Ulf Eklund, Chris Kennedy, Hans Verkuil, Tyler Trafford");
MODULE_LICENSE("GPL");
+#define CX25840_VID_INT_STAT_REG 0x410
+#define CX25840_VID_INT_STAT_BITS 0x0000ffff
+#define CX25840_VID_INT_MASK_BITS 0xffff0000
+#define CX25840_VID_INT_MASK_SHFT 16
+#define CX25840_VID_INT_MASK_REG 0x412
+
+#define CX23885_AUD_MC_INT_MASK_REG 0x80c
+#define CX23885_AUD_MC_INT_STAT_BITS 0xffff0000
+#define CX23885_AUD_MC_INT_CTRL_BITS 0x0000ffff
+#define CX23885_AUD_MC_INT_STAT_SHFT 16
+
+#define CX25840_AUD_INT_CTRL_REG 0x812
+#define CX25840_AUD_INT_STAT_REG 0x813
+
+#define CX23885_PIN_CTRL_IRQ_REG 0x123
+#define CX23885_PIN_CTRL_IRQ_IR_STAT 0x40
+#define CX23885_PIN_CTRL_IRQ_AUD_STAT 0x20
+#define CX23885_PIN_CTRL_IRQ_VID_STAT 0x10
+
+#define CX25840_IR_STATS_REG 0x210
+#define CX25840_IR_IRQEN_REG 0x214
+
static int cx25840_debug;
module_param_named(debug,cx25840_debug, int, 0644);
@@ -80,33 +105,53 @@ int cx25840_write4(struct i2c_client *client, u16 addr, u32 value)
u8 cx25840_read(struct i2c_client * client, u16 addr)
{
- u8 buffer[2];
- buffer[0] = addr >> 8;
- buffer[1] = addr & 0xff;
-
- if (i2c_master_send(client, buffer, 2) < 2)
- return 0;
-
- if (i2c_master_recv(client, buffer, 1) < 1)
+ struct i2c_msg msgs[2];
+ u8 tx_buf[2], rx_buf[1];
+
+ /* Write register address */
+ tx_buf[0] = addr >> 8;
+ tx_buf[1] = addr & 0xff;
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = (char *) tx_buf;
+
+ /* Read data from register */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = 1;
+ msgs[1].buf = (char *) rx_buf;
+
+ if (i2c_transfer(client->adapter, msgs, 2) < 2)
return 0;
- return buffer[0];
+ return rx_buf[0];
}
u32 cx25840_read4(struct i2c_client * client, u16 addr)
{
- u8 buffer[4];
- buffer[0] = addr >> 8;
- buffer[1] = addr & 0xff;
-
- if (i2c_master_send(client, buffer, 2) < 2)
- return 0;
-
- if (i2c_master_recv(client, buffer, 4) < 4)
+ struct i2c_msg msgs[2];
+ u8 tx_buf[2], rx_buf[4];
+
+ /* Write register address */
+ tx_buf[0] = addr >> 8;
+ tx_buf[1] = addr & 0xff;
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = (char *) tx_buf;
+
+ /* Read data from registers */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = 4;
+ msgs[1].buf = (char *) rx_buf;
+
+ if (i2c_transfer(client->adapter, msgs, 2) < 2)
return 0;
- return (buffer[3] << 24) | (buffer[2] << 16) |
- (buffer[1] << 8) | buffer[0];
+ return (rx_buf[3] << 24) | (rx_buf[2] << 16) | (rx_buf[1] << 8) |
+ rx_buf[0];
}
int cx25840_and_or(struct i2c_client *client, u16 addr, unsigned and_mask,
@@ -117,6 +162,14 @@ int cx25840_and_or(struct i2c_client *client, u16 addr, unsigned and_mask,
or_value);
}
+int cx25840_and_or4(struct i2c_client *client, u16 addr, u32 and_mask,
+ u32 or_value)
+{
+ return cx25840_write4(client, addr,
+ (cx25840_read4(client, addr) & and_mask) |
+ or_value);
+}
+
/* ----------------------------------------------------------------------- */
static int set_input(struct i2c_client *client, enum cx25840_video_input vid_input,
@@ -124,6 +177,158 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
/* ----------------------------------------------------------------------- */
+static int cx23885_s_io_pin_config(struct v4l2_subdev *sd, size_t n,
+ struct v4l2_subdev_io_pin_config *p)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int i;
+ u32 pin_ctrl;
+ u8 gpio_oe, gpio_data, strength;
+
+ pin_ctrl = cx25840_read4(client, 0x120);
+ gpio_oe = cx25840_read(client, 0x160);
+ gpio_data = cx25840_read(client, 0x164);
+
+ for (i = 0; i < n; i++) {
+ strength = p[i].strength;
+ if (strength > CX25840_PIN_DRIVE_FAST)
+ strength = CX25840_PIN_DRIVE_FAST;
+
+ switch (p[i].pin) {
+ case CX23885_PIN_IRQ_N_GPIO16:
+ if (p[i].function != CX23885_PAD_IRQ_N) {
+ /* GPIO16 */
+ pin_ctrl &= ~(0x1 << 25);
+ } else {
+ /* IRQ_N */
+ if (p[i].flags &
+ (V4L2_SUBDEV_IO_PIN_DISABLE |
+ V4L2_SUBDEV_IO_PIN_INPUT)) {
+ pin_ctrl &= ~(0x1 << 25);
+ } else {
+ pin_ctrl |= (0x1 << 25);
+ }
+ if (p[i].flags &
+ V4L2_SUBDEV_IO_PIN_ACTIVE_LOW) {
+ pin_ctrl &= ~(0x1 << 24);
+ } else {
+ pin_ctrl |= (0x1 << 24);
+ }
+ }
+ break;
+ case CX23885_PIN_IR_RX_GPIO19:
+ if (p[i].function != CX23885_PAD_GPIO19) {
+ /* IR_RX */
+ gpio_oe |= (0x1 << 0);
+ pin_ctrl &= ~(0x3 << 18);
+ pin_ctrl |= (strength << 18);
+ } else {
+ /* GPIO19 */
+ gpio_oe &= ~(0x1 << 0);
+ if (p[i].flags & V4L2_SUBDEV_IO_PIN_SET_VALUE) {
+ gpio_data &= ~(0x1 << 0);
+ gpio_data |= ((p[i].value & 0x1) << 0);
+ }
+ pin_ctrl &= ~(0x3 << 12);
+ pin_ctrl |= (strength << 12);
+ }
+ break;
+ case CX23885_PIN_IR_TX_GPIO20:
+ if (p[i].function != CX23885_PAD_GPIO20) {
+ /* IR_TX */
+ gpio_oe |= (0x1 << 1);
+ if (p[i].flags & V4L2_SUBDEV_IO_PIN_DISABLE)
+ pin_ctrl &= ~(0x1 << 10);
+ else
+ pin_ctrl |= (0x1 << 10);
+ pin_ctrl &= ~(0x3 << 18);
+ pin_ctrl |= (strength << 18);
+ } else {
+ /* GPIO20 */
+ gpio_oe &= ~(0x1 << 1);
+ if (p[i].flags & V4L2_SUBDEV_IO_PIN_SET_VALUE) {
+ gpio_data &= ~(0x1 << 1);
+ gpio_data |= ((p[i].value & 0x1) << 1);
+ }
+ pin_ctrl &= ~(0x3 << 12);
+ pin_ctrl |= (strength << 12);
+ }
+ break;
+ case CX23885_PIN_I2S_SDAT_GPIO21:
+ if (p[i].function != CX23885_PAD_GPIO21) {
+ /* I2S_SDAT */
+ /* TODO: Input or Output config */
+ gpio_oe |= (0x1 << 2);
+ pin_ctrl &= ~(0x3 << 22);
+ pin_ctrl |= (strength << 22);
+ } else {
+ /* GPIO21 */
+ gpio_oe &= ~(0x1 << 2);
+ if (p[i].flags & V4L2_SUBDEV_IO_PIN_SET_VALUE) {
+ gpio_data &= ~(0x1 << 2);
+ gpio_data |= ((p[i].value & 0x1) << 2);
+ }
+ pin_ctrl &= ~(0x3 << 12);
+ pin_ctrl |= (strength << 12);
+ }
+ break;
+ case CX23885_PIN_I2S_WCLK_GPIO22:
+ if (p[i].function != CX23885_PAD_GPIO22) {
+ /* I2S_WCLK */
+ /* TODO: Input or Output config */
+ gpio_oe |= (0x1 << 3);
+ pin_ctrl &= ~(0x3 << 22);
+ pin_ctrl |= (strength << 22);
+ } else {
+ /* GPIO22 */
+ gpio_oe &= ~(0x1 << 3);
+ if (p[i].flags & V4L2_SUBDEV_IO_PIN_SET_VALUE) {
+ gpio_data &= ~(0x1 << 3);
+ gpio_data |= ((p[i].value & 0x1) << 3);
+ }
+ pin_ctrl &= ~(0x3 << 12);
+ pin_ctrl |= (strength << 12);
+ }
+ break;
+ case CX23885_PIN_I2S_BCLK_GPIO23:
+ if (p[i].function != CX23885_PAD_GPIO23) {
+ /* I2S_BCLK */
+ /* TODO: Input or Output config */
+ gpio_oe |= (0x1 << 4);
+ pin_ctrl &= ~(0x3 << 22);
+ pin_ctrl |= (strength << 22);
+ } else {
+ /* GPIO23 */
+ gpio_oe &= ~(0x1 << 4);
+ if (p[i].flags & V4L2_SUBDEV_IO_PIN_SET_VALUE) {
+ gpio_data &= ~(0x1 << 4);
+ gpio_data |= ((p[i].value & 0x1) << 4);
+ }
+ pin_ctrl &= ~(0x3 << 12);
+ pin_ctrl |= (strength << 12);
+ }
+ break;
+ }
+ }
+
+ cx25840_write(client, 0x164, gpio_data);
+ cx25840_write(client, 0x160, gpio_oe);
+ cx25840_write4(client, 0x120, pin_ctrl);
+ return 0;
+}
+
+static int common_s_io_pin_config(struct v4l2_subdev *sd, size_t n,
+ struct v4l2_subdev_io_pin_config *pincfg)
+{
+ struct cx25840_state *state = to_state(sd);
+
+ if (is_cx2388x(state))
+ return cx23885_s_io_pin_config(sd, n, pincfg);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
static void init_dll1(struct i2c_client *client)
{
/* This is the Hauppauge sequence used to
@@ -420,6 +625,13 @@ static void cx23885_initialize(struct i2c_client *client)
/* start microcontroller */
cx25840_and_or(client, 0x803, ~0x10, 0x10);
+
+ /* Disable and clear video interrupts - we don't use them */
+ cx25840_write4(client, CX25840_VID_INT_STAT_REG, 0xffffffff);
+
+ /* Disable and clear audio interrupts - we don't use them */
+ cx25840_write(client, CX25840_AUD_INT_CTRL_REG, 0xff);
+ cx25840_write(client, CX25840_AUD_INT_STAT_REG, 0xff);
}
/* ----------------------------------------------------------------------- */
@@ -909,102 +1121,29 @@ static int set_v4lstd(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
-static int cx25840_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct cx25840_state *state = to_state(sd);
+ struct v4l2_subdev *sd = to_sd(ctrl);
struct i2c_client *client = v4l2_get_subdevdata(sd);
switch (ctrl->id) {
- case CX25840_CID_ENABLE_PVR150_WORKAROUND:
- state->pvr150_workaround = ctrl->value;
- set_input(client, state->vid_input, state->aud_input);
- break;
-
case V4L2_CID_BRIGHTNESS:
- if (ctrl->value < 0 || ctrl->value > 255) {
- v4l_err(client, "invalid brightness setting %d\n",
- ctrl->value);
- return -ERANGE;
- }
-
- cx25840_write(client, 0x414, ctrl->value - 128);
+ cx25840_write(client, 0x414, ctrl->val - 128);
break;
case V4L2_CID_CONTRAST:
- if (ctrl->value < 0 || ctrl->value > 127) {
- v4l_err(client, "invalid contrast setting %d\n",
- ctrl->value);
- return -ERANGE;
- }
-
- cx25840_write(client, 0x415, ctrl->value << 1);
+ cx25840_write(client, 0x415, ctrl->val << 1);
break;
case V4L2_CID_SATURATION:
- if (ctrl->value < 0 || ctrl->value > 127) {
- v4l_err(client, "invalid saturation setting %d\n",
- ctrl->value);
- return -ERANGE;
- }
-
- cx25840_write(client, 0x420, ctrl->value << 1);
- cx25840_write(client, 0x421, ctrl->value << 1);
+ cx25840_write(client, 0x420, ctrl->val << 1);
+ cx25840_write(client, 0x421, ctrl->val << 1);
break;
case V4L2_CID_HUE:
- if (ctrl->value < -128 || ctrl->value > 127) {
- v4l_err(client, "invalid hue setting %d\n", ctrl->value);
- return -ERANGE;
- }
-
- cx25840_write(client, 0x422, ctrl->value);
+ cx25840_write(client, 0x422, ctrl->val);
break;
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_MUTE:
- if (is_cx2583x(state))
- return -EINVAL;
- return cx25840_audio_s_ctrl(sd, ctrl);
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int cx25840_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct cx25840_state *state = to_state(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- switch (ctrl->id) {
- case CX25840_CID_ENABLE_PVR150_WORKAROUND:
- ctrl->value = state->pvr150_workaround;
- break;
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = (s8)cx25840_read(client, 0x414) + 128;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = cx25840_read(client, 0x415) >> 1;
- break;
- case V4L2_CID_SATURATION:
- ctrl->value = cx25840_read(client, 0x420) >> 1;
- break;
- case V4L2_CID_HUE:
- ctrl->value = (s8)cx25840_read(client, 0x422);
- break;
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_MUTE:
- if (is_cx2583x(state))
- return -EINVAL;
- return cx25840_audio_g_ctrl(sd, ctrl);
default:
return -EINVAL;
}
@@ -1163,8 +1302,6 @@ static void log_audio_status(struct i2c_client *client)
default: p = "not defined";
}
v4l_info(client, "Detected audio standard: %s\n", p);
- v4l_info(client, "Audio muted: %s\n",
- (state->unmute_volume >= 0) ? "yes" : "no");
v4l_info(client, "Audio microcontroller: %s\n",
(download_ctl & 0x10) ?
((mute_ctl & 0x2) ? "detecting" : "running") : "stopped");
@@ -1381,40 +1518,6 @@ static int cx25840_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int cx25840_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- struct cx25840_state *state = to_state(sd);
-
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
- case V4L2_CID_CONTRAST:
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(qc, 0, 127, 1, 64);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
- default:
- break;
- }
- if (is_cx2583x(state))
- return -EINVAL;
-
- switch (qc->id) {
- case V4L2_CID_AUDIO_VOLUME:
- return v4l2_ctrl_query_fill(qc, 0, 65535,
- 65535 / 100, state->default_volume);
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768);
- default:
- return -EINVAL;
- }
- return -EINVAL;
-}
-
static int cx25840_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct cx25840_state *state = to_state(sd);
@@ -1576,24 +1679,134 @@ static int cx25840_log_status(struct v4l2_subdev *sd)
log_video_status(client);
if (!is_cx2583x(state))
log_audio_status(client);
+ cx25840_ir_log_status(sd);
+ v4l2_ctrl_handler_log_status(&state->hdl, sd->name);
+ return 0;
+}
+
+static int cx25840_s_config(struct v4l2_subdev *sd, int irq, void *platform_data)
+{
+ struct cx25840_state *state = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (platform_data) {
+ struct cx25840_platform_data *pdata = platform_data;
+
+ state->pvr150_workaround = pdata->pvr150_workaround;
+ set_input(client, state->vid_input, state->aud_input);
+ }
return 0;
}
+static int cx23885_irq_handler(struct v4l2_subdev *sd, u32 status,
+ bool *handled)
+{
+ struct cx25840_state *state = to_state(sd);
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ u8 irq_stat, aud_stat, aud_en, ir_stat, ir_en;
+ u32 vid_stat, aud_mc_stat;
+ bool block_handled;
+ int ret = 0;
+
+ irq_stat = cx25840_read(c, CX23885_PIN_CTRL_IRQ_REG);
+ v4l_dbg(2, cx25840_debug, c, "AV Core IRQ status (entry): %s %s %s\n",
+ irq_stat & CX23885_PIN_CTRL_IRQ_IR_STAT ? "ir" : " ",
+ irq_stat & CX23885_PIN_CTRL_IRQ_AUD_STAT ? "aud" : " ",
+ irq_stat & CX23885_PIN_CTRL_IRQ_VID_STAT ? "vid" : " ");
+
+ if ((is_cx23885(state) || is_cx23887(state))) {
+ ir_stat = cx25840_read(c, CX25840_IR_STATS_REG);
+ ir_en = cx25840_read(c, CX25840_IR_IRQEN_REG);
+ v4l_dbg(2, cx25840_debug, c,
+ "AV Core ir IRQ status: %#04x disables: %#04x\n",
+ ir_stat, ir_en);
+ if (irq_stat & CX23885_PIN_CTRL_IRQ_IR_STAT) {
+ block_handled = false;
+ ret = cx25840_ir_irq_handler(sd,
+ status, &block_handled);
+ if (block_handled)
+ *handled = true;
+ }
+ }
+
+ aud_stat = cx25840_read(c, CX25840_AUD_INT_STAT_REG);
+ aud_en = cx25840_read(c, CX25840_AUD_INT_CTRL_REG);
+ v4l_dbg(2, cx25840_debug, c,
+ "AV Core audio IRQ status: %#04x disables: %#04x\n",
+ aud_stat, aud_en);
+ aud_mc_stat = cx25840_read4(c, CX23885_AUD_MC_INT_MASK_REG);
+ v4l_dbg(2, cx25840_debug, c,
+ "AV Core audio MC IRQ status: %#06x enables: %#06x\n",
+ aud_mc_stat >> CX23885_AUD_MC_INT_STAT_SHFT,
+ aud_mc_stat & CX23885_AUD_MC_INT_CTRL_BITS);
+ if (irq_stat & CX23885_PIN_CTRL_IRQ_AUD_STAT) {
+ if (aud_stat) {
+ cx25840_write(c, CX25840_AUD_INT_STAT_REG, aud_stat);
+ *handled = true;
+ }
+ }
+
+ vid_stat = cx25840_read4(c, CX25840_VID_INT_STAT_REG);
+ v4l_dbg(2, cx25840_debug, c,
+ "AV Core video IRQ status: %#06x disables: %#06x\n",
+ vid_stat & CX25840_VID_INT_STAT_BITS,
+ vid_stat >> CX25840_VID_INT_MASK_SHFT);
+ if (irq_stat & CX23885_PIN_CTRL_IRQ_VID_STAT) {
+ if (vid_stat & CX25840_VID_INT_STAT_BITS) {
+ cx25840_write4(c, CX25840_VID_INT_STAT_REG, vid_stat);
+ *handled = true;
+ }
+ }
+
+ irq_stat = cx25840_read(c, CX23885_PIN_CTRL_IRQ_REG);
+ v4l_dbg(2, cx25840_debug, c, "AV Core IRQ status (exit): %s %s %s\n",
+ irq_stat & CX23885_PIN_CTRL_IRQ_IR_STAT ? "ir" : " ",
+ irq_stat & CX23885_PIN_CTRL_IRQ_AUD_STAT ? "aud" : " ",
+ irq_stat & CX23885_PIN_CTRL_IRQ_VID_STAT ? "vid" : " ");
+
+ return ret;
+}
+
+static int cx25840_irq_handler(struct v4l2_subdev *sd, u32 status,
+ bool *handled)
+{
+ struct cx25840_state *state = to_state(sd);
+
+ *handled = false;
+
+ /* Only support the CX2388[578] AV Core for now */
+ if (is_cx2388x(state))
+ return cx23885_irq_handler(sd, status, handled);
+
+ return -ENODEV;
+}
+
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops cx25840_ctrl_ops = {
+ .s_ctrl = cx25840_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops cx25840_core_ops = {
.log_status = cx25840_log_status,
+ .s_config = cx25840_s_config,
.g_chip_ident = cx25840_g_chip_ident,
- .g_ctrl = cx25840_g_ctrl,
- .s_ctrl = cx25840_s_ctrl,
- .queryctrl = cx25840_queryctrl,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
.s_std = cx25840_s_std,
.reset = cx25840_reset,
.load_fw = cx25840_load_fw,
+ .s_io_pin_config = common_s_io_pin_config,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = cx25840_g_register,
.s_register = cx25840_s_register,
#endif
+ .interrupt_service_routine = cx25840_irq_handler,
};
static const struct v4l2_subdev_tuner_ops cx25840_tuner_ops = {
@@ -1628,6 +1841,7 @@ static const struct v4l2_subdev_ops cx25840_ops = {
.audio = &cx25840_audio_ops,
.video = &cx25840_video_ops,
.vbi = &cx25840_vbi_ops,
+ .ir = &cx25840_ir_ops,
};
/* ----------------------------------------------------------------------- */
@@ -1675,6 +1889,7 @@ static int cx25840_probe(struct i2c_client *client,
{
struct cx25840_state *state;
struct v4l2_subdev *sd;
+ int default_volume;
u32 id = V4L2_IDENT_NONE;
u16 device_id;
@@ -1718,6 +1933,7 @@ static int cx25840_probe(struct i2c_client *client,
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &cx25840_ops);
+
switch (id) {
case V4L2_IDENT_CX23885_AV:
v4l_info(client, "cx23885 A/V decoder found @ 0x%x (%s)\n",
@@ -1762,22 +1978,62 @@ static int cx25840_probe(struct i2c_client *client,
state->audclk_freq = 48000;
state->pvr150_workaround = 0;
state->audmode = V4L2_TUNER_MODE_LANG1;
- state->unmute_volume = -1;
- state->default_volume = 228 - cx25840_read(client, 0x8d4);
- state->default_volume = ((state->default_volume / 2) + 23) << 9;
state->vbi_line_offset = 8;
state->id = id;
state->rev = device_id;
+ v4l2_ctrl_handler_init(&state->hdl, 9);
+ v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ if (!is_cx2583x(state)) {
+ default_volume = 228 - cx25840_read(client, 0x8d4);
+ default_volume = ((default_volume / 2) + 23) << 9;
+
+ state->volume = v4l2_ctrl_new_std(&state->hdl,
+ &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME,
+ 0, 65335, 65535 / 100, default_volume);
+ state->mute = v4l2_ctrl_new_std(&state->hdl,
+ &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops,
+ V4L2_CID_AUDIO_BALANCE,
+ 0, 65535, 65535 / 100, 32768);
+ v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops,
+ V4L2_CID_AUDIO_BASS,
+ 0, 65535, 65535 / 100, 32768);
+ v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops,
+ V4L2_CID_AUDIO_TREBLE,
+ 0, 65535, 65535 / 100, 32768);
+ }
+ sd->ctrl_handler = &state->hdl;
+ if (state->hdl.error) {
+ int err = state->hdl.error;
+
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
+ return err;
+ }
+ v4l2_ctrl_cluster(2, &state->volume);
+ v4l2_ctrl_handler_setup(&state->hdl);
+ cx25840_ir_probe(sd);
return 0;
}
static int cx25840_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct cx25840_state *state = to_state(sd);
+ cx25840_ir_remove(sd);
v4l2_device_unregister_subdev(sd);
- kfree(to_state(sd));
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
return 0;
}
diff --git a/drivers/media/video/cx25840/cx25840-core.h b/drivers/media/video/cx25840/cx25840-core.h
index 04393b97156..bd4ada28b49 100644
--- a/drivers/media/video/cx25840/cx25840-core.h
+++ b/drivers/media/video/cx25840/cx25840-core.h
@@ -24,19 +24,20 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include <linux/i2c.h>
-/* ENABLE_PVR150_WORKAROUND activates a workaround for a hardware bug that is
- present in Hauppauge PVR-150 (and possibly PVR-500) cards that have
- certain NTSC tuners (tveeprom tuner model numbers 85, 99 and 112). The
- audio autodetect fails on some channels for these models and the workaround
- is to select the audio standard explicitly. Many thanks to Hauppauge for
- providing this information. */
-#define CX25840_CID_ENABLE_PVR150_WORKAROUND (V4L2_CID_PRIVATE_BASE+0)
+struct cx25840_ir_state;
struct cx25840_state {
struct i2c_client *c;
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+ struct {
+ /* volume cluster */
+ struct v4l2_ctrl *volume;
+ struct v4l2_ctrl *mute;
+ };
int pvr150_workaround;
int radio;
v4l2_std_id std;
@@ -44,14 +45,13 @@ struct cx25840_state {
enum cx25840_audio_input aud_input;
u32 audclk_freq;
int audmode;
- int unmute_volume; /* -1 if not muted */
- int default_volume;
int vbi_line_offset;
u32 id;
u32 rev;
int is_initialized;
wait_queue_head_t fw_wait; /* wake up when the fw load is finished */
struct work_struct fw_work; /* work entry for fw load */
+ struct cx25840_ir_state *ir_state;
};
static inline struct cx25840_state *to_state(struct v4l2_subdev *sd)
@@ -59,6 +59,11 @@ static inline struct cx25840_state *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct cx25840_state, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct cx25840_state, hdl)->sd;
+}
+
static inline bool is_cx2583x(struct cx25840_state *state)
{
return state->id == V4L2_IDENT_CX25836 ||
@@ -77,6 +82,21 @@ static inline bool is_cx2388x(struct cx25840_state *state)
state->id == V4L2_IDENT_CX23888_AV;
}
+static inline bool is_cx23885(struct cx25840_state *state)
+{
+ return state->id == V4L2_IDENT_CX23885_AV;
+}
+
+static inline bool is_cx23887(struct cx25840_state *state)
+{
+ return state->id == V4L2_IDENT_CX23887_AV;
+}
+
+static inline bool is_cx23888(struct cx25840_state *state)
+{
+ return state->id == V4L2_IDENT_CX23888_AV;
+}
+
/* ----------------------------------------------------------------------- */
/* cx25850-core.c */
int cx25840_write(struct i2c_client *client, u16 addr, u8 value);
@@ -84,6 +104,8 @@ int cx25840_write4(struct i2c_client *client, u16 addr, u32 value);
u8 cx25840_read(struct i2c_client *client, u16 addr);
u32 cx25840_read4(struct i2c_client *client, u16 addr);
int cx25840_and_or(struct i2c_client *client, u16 addr, unsigned mask, u8 value);
+int cx25840_and_or4(struct i2c_client *client, u16 addr, u32 and_mask,
+ u32 or_value);
void cx25840_std_setup(struct i2c_client *client);
/* ----------------------------------------------------------------------- */
@@ -94,8 +116,8 @@ int cx25840_loadfw(struct i2c_client *client);
/* cx25850-audio.c */
void cx25840_audio_set_path(struct i2c_client *client);
int cx25840_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
-int cx25840_audio_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl);
-int cx25840_audio_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl);
+
+extern const struct v4l2_ctrl_ops cx25840_audio_ctrl_ops;
/* ----------------------------------------------------------------------- */
/* cx25850-vbi.c */
@@ -104,4 +126,12 @@ int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *
int cx25840_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
int cx25840_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi);
+/* ----------------------------------------------------------------------- */
+/* cx25850-ir.c */
+extern const struct v4l2_subdev_ir_ops cx25840_ir_ops;
+int cx25840_ir_log_status(struct v4l2_subdev *sd);
+int cx25840_ir_irq_handler(struct v4l2_subdev *sd, u32 status, bool *handled);
+int cx25840_ir_probe(struct v4l2_subdev *sd);
+int cx25840_ir_remove(struct v4l2_subdev *sd);
+
#endif
diff --git a/drivers/media/video/cx25840/cx25840-ir.c b/drivers/media/video/cx25840/cx25840-ir.c
new file mode 100644
index 00000000000..c2b4c14dc9a
--- /dev/null
+++ b/drivers/media/video/cx25840/cx25840-ir.c
@@ -0,0 +1,1279 @@
+/*
+ * Driver for the Conexant CX2584x Audio/Video decoder chip and related cores
+ *
+ * Integrated Consumer Infrared Controller
+ *
+ * Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <media/cx25840.h>
+#include <media/ir-core.h>
+
+#include "cx25840-core.h"
+
+static unsigned int ir_debug;
+module_param(ir_debug, int, 0644);
+MODULE_PARM_DESC(ir_debug, "enable integrated IR debug messages");
+
+#define CX25840_IR_REG_BASE 0x200
+
+#define CX25840_IR_CNTRL_REG 0x200
+#define CNTRL_WIN_3_3 0x00000000
+#define CNTRL_WIN_4_3 0x00000001
+#define CNTRL_WIN_3_4 0x00000002
+#define CNTRL_WIN_4_4 0x00000003
+#define CNTRL_WIN 0x00000003
+#define CNTRL_EDG_NONE 0x00000000
+#define CNTRL_EDG_FALL 0x00000004
+#define CNTRL_EDG_RISE 0x00000008
+#define CNTRL_EDG_BOTH 0x0000000C
+#define CNTRL_EDG 0x0000000C
+#define CNTRL_DMD 0x00000010
+#define CNTRL_MOD 0x00000020
+#define CNTRL_RFE 0x00000040
+#define CNTRL_TFE 0x00000080
+#define CNTRL_RXE 0x00000100
+#define CNTRL_TXE 0x00000200
+#define CNTRL_RIC 0x00000400
+#define CNTRL_TIC 0x00000800
+#define CNTRL_CPL 0x00001000
+#define CNTRL_LBM 0x00002000
+#define CNTRL_R 0x00004000
+
+#define CX25840_IR_TXCLK_REG 0x204
+#define TXCLK_TCD 0x0000FFFF
+
+#define CX25840_IR_RXCLK_REG 0x208
+#define RXCLK_RCD 0x0000FFFF
+
+#define CX25840_IR_CDUTY_REG 0x20C
+#define CDUTY_CDC 0x0000000F
+
+#define CX25840_IR_STATS_REG 0x210
+#define STATS_RTO 0x00000001
+#define STATS_ROR 0x00000002
+#define STATS_RBY 0x00000004
+#define STATS_TBY 0x00000008
+#define STATS_RSR 0x00000010
+#define STATS_TSR 0x00000020
+
+#define CX25840_IR_IRQEN_REG 0x214
+#define IRQEN_RTE 0x00000001
+#define IRQEN_ROE 0x00000002
+#define IRQEN_RSE 0x00000010
+#define IRQEN_TSE 0x00000020
+#define IRQEN_MSK 0x00000033
+
+#define CX25840_IR_FILTR_REG 0x218
+#define FILTR_LPF 0x0000FFFF
+
+#define CX25840_IR_FIFO_REG 0x23C
+#define FIFO_RXTX 0x0000FFFF
+#define FIFO_RXTX_LVL 0x00010000
+#define FIFO_RXTX_RTO 0x0001FFFF
+#define FIFO_RX_NDV 0x00020000
+#define FIFO_RX_DEPTH 8
+#define FIFO_TX_DEPTH 8
+
+#define CX25840_VIDCLK_FREQ 108000000 /* 108 MHz, BT.656 */
+#define CX25840_IR_REFCLK_FREQ (CX25840_VIDCLK_FREQ / 2)
+
+/*
+ * We use this union internally for convenience, but callers to tx_write
+ * and rx_read will be expecting records of type struct ir_raw_event.
+ * Always ensure the size of this union is dictated by struct ir_raw_event.
+ */
+union cx25840_ir_fifo_rec {
+ u32 hw_fifo_data;
+ struct ir_raw_event ir_core_data;
+};
+
+#define CX25840_IR_RX_KFIFO_SIZE (256 * sizeof(union cx25840_ir_fifo_rec))
+#define CX25840_IR_TX_KFIFO_SIZE (256 * sizeof(union cx25840_ir_fifo_rec))
+
+struct cx25840_ir_state {
+ struct i2c_client *c;
+
+ struct v4l2_subdev_ir_parameters rx_params;
+ struct mutex rx_params_lock; /* protects Rx parameter settings cache */
+ atomic_t rxclk_divider;
+ atomic_t rx_invert;
+
+ struct kfifo rx_kfifo;
+ spinlock_t rx_kfifo_lock; /* protect Rx data kfifo */
+
+ struct v4l2_subdev_ir_parameters tx_params;
+ struct mutex tx_params_lock; /* protects Tx parameter settings cache */
+ atomic_t txclk_divider;
+};
+
+static inline struct cx25840_ir_state *to_ir_state(struct v4l2_subdev *sd)
+{
+ struct cx25840_state *state = to_state(sd);
+ return state ? state->ir_state : NULL;
+}
+
+
+/*
+ * Rx and Tx Clock Divider register computations
+ *
+ * Note the largest clock divider value of 0xffff corresponds to:
+ * (0xffff + 1) * 1000 / 108/2 MHz = 1,213,629.629... ns
+ * which fits in 21 bits, so we'll use unsigned int for time arguments.
+ */
+static inline u16 count_to_clock_divider(unsigned int d)
+{
+ if (d > RXCLK_RCD + 1)
+ d = RXCLK_RCD;
+ else if (d < 2)
+ d = 1;
+ else
+ d--;
+ return (u16) d;
+}
+
+static inline u16 ns_to_clock_divider(unsigned int ns)
+{
+ return count_to_clock_divider(
+ DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ / 1000000 * ns, 1000));
+}
+
+static inline unsigned int clock_divider_to_ns(unsigned int divider)
+{
+ /* Period of the Rx or Tx clock in ns */
+ return DIV_ROUND_CLOSEST((divider + 1) * 1000,
+ CX25840_IR_REFCLK_FREQ / 1000000);
+}
+
+static inline u16 carrier_freq_to_clock_divider(unsigned int freq)
+{
+ return count_to_clock_divider(
+ DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, freq * 16));
+}
+
+static inline unsigned int clock_divider_to_carrier_freq(unsigned int divider)
+{
+ return DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, (divider + 1) * 16);
+}
+
+static inline u16 freq_to_clock_divider(unsigned int freq,
+ unsigned int rollovers)
+{
+ return count_to_clock_divider(
+ DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, freq * rollovers));
+}
+
+static inline unsigned int clock_divider_to_freq(unsigned int divider,
+ unsigned int rollovers)
+{
+ return DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ,
+ (divider + 1) * rollovers);
+}
+
+/*
+ * Low Pass Filter register calculations
+ *
+ * Note the largest count value of 0xffff corresponds to:
+ * 0xffff * 1000 / 108/2 MHz = 1,213,611.11... ns
+ * which fits in 21 bits, so we'll use unsigned int for time arguments.
+ */
+static inline u16 count_to_lpf_count(unsigned int d)
+{
+ if (d > FILTR_LPF)
+ d = FILTR_LPF;
+ else if (d < 4)
+ d = 0;
+ return (u16) d;
+}
+
+static inline u16 ns_to_lpf_count(unsigned int ns)
+{
+ return count_to_lpf_count(
+ DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ / 1000000 * ns, 1000));
+}
+
+static inline unsigned int lpf_count_to_ns(unsigned int count)
+{
+ /* Duration of the Low Pass Filter rejection window in ns */
+ return DIV_ROUND_CLOSEST(count * 1000,
+ CX25840_IR_REFCLK_FREQ / 1000000);
+}
+
+static inline unsigned int lpf_count_to_us(unsigned int count)
+{
+ /* Duration of the Low Pass Filter rejection window in us */
+ return DIV_ROUND_CLOSEST(count, CX25840_IR_REFCLK_FREQ / 1000000);
+}
+
+/*
+ * FIFO register pulse width count compuations
+ */
+static u32 clock_divider_to_resolution(u16 divider)
+{
+ /*
+ * Resolution is the duration of 1 tick of the readable portion of
+ * of the pulse width counter as read from the FIFO. The two lsb's are
+ * not readable, hence the << 2. This function returns ns.
+ */
+ return DIV_ROUND_CLOSEST((1 << 2) * ((u32) divider + 1) * 1000,
+ CX25840_IR_REFCLK_FREQ / 1000000);
+}
+
+static u64 pulse_width_count_to_ns(u16 count, u16 divider)
+{
+ u64 n;
+ u32 rem;
+
+ /*
+ * The 2 lsb's of the pulse width timer count are not readable, hence
+ * the (count << 2) | 0x3
+ */
+ n = (((u64) count << 2) | 0x3) * (divider + 1) * 1000; /* millicycles */
+ rem = do_div(n, CX25840_IR_REFCLK_FREQ / 1000000); /* / MHz => ns */
+ if (rem >= CX25840_IR_REFCLK_FREQ / 1000000 / 2)
+ n++;
+ return n;
+}
+
+#if 0
+/* Keep as we will need this for Transmit functionality */
+static u16 ns_to_pulse_width_count(u32 ns, u16 divider)
+{
+ u64 n;
+ u32 d;
+ u32 rem;
+
+ /*
+ * The 2 lsb's of the pulse width timer count are not accessable, hence
+ * the (1 << 2)
+ */
+ n = ((u64) ns) * CX25840_IR_REFCLK_FREQ / 1000000; /* millicycles */
+ d = (1 << 2) * ((u32) divider + 1) * 1000; /* millicycles/count */
+ rem = do_div(n, d);
+ if (rem >= d / 2)
+ n++;
+
+ if (n > FIFO_RXTX)
+ n = FIFO_RXTX;
+ else if (n == 0)
+ n = 1;
+ return (u16) n;
+}
+
+#endif
+static unsigned int pulse_width_count_to_us(u16 count, u16 divider)
+{
+ u64 n;
+ u32 rem;
+
+ /*
+ * The 2 lsb's of the pulse width timer count are not readable, hence
+ * the (count << 2) | 0x3
+ */
+ n = (((u64) count << 2) | 0x3) * (divider + 1); /* cycles */
+ rem = do_div(n, CX25840_IR_REFCLK_FREQ / 1000000); /* / MHz => us */
+ if (rem >= CX25840_IR_REFCLK_FREQ / 1000000 / 2)
+ n++;
+ return (unsigned int) n;
+}
+
+/*
+ * Pulse Clocks computations: Combined Pulse Width Count & Rx Clock Counts
+ *
+ * The total pulse clock count is an 18 bit pulse width timer count as the most
+ * significant part and (up to) 16 bit clock divider count as a modulus.
+ * When the Rx clock divider ticks down to 0, it increments the 18 bit pulse
+ * width timer count's least significant bit.
+ */
+static u64 ns_to_pulse_clocks(u32 ns)
+{
+ u64 clocks;
+ u32 rem;
+ clocks = CX25840_IR_REFCLK_FREQ / 1000000 * (u64) ns; /* millicycles */
+ rem = do_div(clocks, 1000); /* /1000 = cycles */
+ if (rem >= 1000 / 2)
+ clocks++;
+ return clocks;
+}
+
+static u16 pulse_clocks_to_clock_divider(u64 count)
+{
+ u32 rem;
+
+ rem = do_div(count, (FIFO_RXTX << 2) | 0x3);
+
+ /* net result needs to be rounded down and decremented by 1 */
+ if (count > RXCLK_RCD + 1)
+ count = RXCLK_RCD;
+ else if (count < 2)
+ count = 1;
+ else
+ count--;
+ return (u16) count;
+}
+
+/*
+ * IR Control Register helpers
+ */
+enum tx_fifo_watermark {
+ TX_FIFO_HALF_EMPTY = 0,
+ TX_FIFO_EMPTY = CNTRL_TIC,
+};
+
+enum rx_fifo_watermark {
+ RX_FIFO_HALF_FULL = 0,
+ RX_FIFO_NOT_EMPTY = CNTRL_RIC,
+};
+
+static inline void control_tx_irq_watermark(struct i2c_client *c,
+ enum tx_fifo_watermark level)
+{
+ cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_TIC, level);
+}
+
+static inline void control_rx_irq_watermark(struct i2c_client *c,
+ enum rx_fifo_watermark level)
+{
+ cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_RIC, level);
+}
+
+static inline void control_tx_enable(struct i2c_client *c, bool enable)
+{
+ cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~(CNTRL_TXE | CNTRL_TFE),
+ enable ? (CNTRL_TXE | CNTRL_TFE) : 0);
+}
+
+static inline void control_rx_enable(struct i2c_client *c, bool enable)
+{
+ cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~(CNTRL_RXE | CNTRL_RFE),
+ enable ? (CNTRL_RXE | CNTRL_RFE) : 0);
+}
+
+static inline void control_tx_modulation_enable(struct i2c_client *c,
+ bool enable)
+{
+ cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_MOD,
+ enable ? CNTRL_MOD : 0);
+}
+
+static inline void control_rx_demodulation_enable(struct i2c_client *c,
+ bool enable)
+{
+ cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_DMD,
+ enable ? CNTRL_DMD : 0);
+}
+
+static inline void control_rx_s_edge_detection(struct i2c_client *c,
+ u32 edge_types)
+{
+ cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_EDG_BOTH,
+ edge_types & CNTRL_EDG_BOTH);
+}
+
+static void control_rx_s_carrier_window(struct i2c_client *c,
+ unsigned int carrier,
+ unsigned int *carrier_range_low,
+ unsigned int *carrier_range_high)
+{
+ u32 v;
+ unsigned int c16 = carrier * 16;
+
+ if (*carrier_range_low < DIV_ROUND_CLOSEST(c16, 16 + 3)) {
+ v = CNTRL_WIN_3_4;
+ *carrier_range_low = DIV_ROUND_CLOSEST(c16, 16 + 4);
+ } else {
+ v = CNTRL_WIN_3_3;
+ *carrier_range_low = DIV_ROUND_CLOSEST(c16, 16 + 3);
+ }
+
+ if (*carrier_range_high > DIV_ROUND_CLOSEST(c16, 16 - 3)) {
+ v |= CNTRL_WIN_4_3;
+ *carrier_range_high = DIV_ROUND_CLOSEST(c16, 16 - 4);
+ } else {
+ v |= CNTRL_WIN_3_3;
+ *carrier_range_high = DIV_ROUND_CLOSEST(c16, 16 - 3);
+ }
+ cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_WIN, v);
+}
+
+static inline void control_tx_polarity_invert(struct i2c_client *c,
+ bool invert)
+{
+ cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_CPL,
+ invert ? CNTRL_CPL : 0);
+}
+
+/*
+ * IR Rx & Tx Clock Register helpers
+ */
+static unsigned int txclk_tx_s_carrier(struct i2c_client *c,
+ unsigned int freq,
+ u16 *divider)
+{
+ *divider = carrier_freq_to_clock_divider(freq);
+ cx25840_write4(c, CX25840_IR_TXCLK_REG, *divider);
+ return clock_divider_to_carrier_freq(*divider);
+}
+
+static unsigned int rxclk_rx_s_carrier(struct i2c_client *c,
+ unsigned int freq,
+ u16 *divider)
+{
+ *divider = carrier_freq_to_clock_divider(freq);
+ cx25840_write4(c, CX25840_IR_RXCLK_REG, *divider);
+ return clock_divider_to_carrier_freq(*divider);
+}
+
+static u32 txclk_tx_s_max_pulse_width(struct i2c_client *c, u32 ns,
+ u16 *divider)
+{
+ u64 pulse_clocks;
+
+ if (ns > IR_MAX_DURATION)
+ ns = IR_MAX_DURATION;
+ pulse_clocks = ns_to_pulse_clocks(ns);
+ *divider = pulse_clocks_to_clock_divider(pulse_clocks);
+ cx25840_write4(c, CX25840_IR_TXCLK_REG, *divider);
+ return (u32) pulse_width_count_to_ns(FIFO_RXTX, *divider);
+}
+
+static u32 rxclk_rx_s_max_pulse_width(struct i2c_client *c, u32 ns,
+ u16 *divider)
+{
+ u64 pulse_clocks;
+
+ if (ns > IR_MAX_DURATION)
+ ns = IR_MAX_DURATION;
+ pulse_clocks = ns_to_pulse_clocks(ns);
+ *divider = pulse_clocks_to_clock_divider(pulse_clocks);
+ cx25840_write4(c, CX25840_IR_RXCLK_REG, *divider);
+ return (u32) pulse_width_count_to_ns(FIFO_RXTX, *divider);
+}
+
+/*
+ * IR Tx Carrier Duty Cycle register helpers
+ */
+static unsigned int cduty_tx_s_duty_cycle(struct i2c_client *c,
+ unsigned int duty_cycle)
+{
+ u32 n;
+ n = DIV_ROUND_CLOSEST(duty_cycle * 100, 625); /* 16ths of 100% */
+ if (n != 0)
+ n--;
+ if (n > 15)
+ n = 15;
+ cx25840_write4(c, CX25840_IR_CDUTY_REG, n);
+ return DIV_ROUND_CLOSEST((n + 1) * 100, 16);
+}
+
+/*
+ * IR Filter Register helpers
+ */
+static u32 filter_rx_s_min_width(struct i2c_client *c, u32 min_width_ns)
+{
+ u32 count = ns_to_lpf_count(min_width_ns);
+ cx25840_write4(c, CX25840_IR_FILTR_REG, count);
+ return lpf_count_to_ns(count);
+}
+
+/*
+ * IR IRQ Enable Register helpers
+ */
+static inline void irqenable_rx(struct v4l2_subdev *sd, u32 mask)
+{
+ struct cx25840_state *state = to_state(sd);
+
+ if (is_cx23885(state) || is_cx23887(state))
+ mask ^= IRQEN_MSK;
+ mask &= (IRQEN_RTE | IRQEN_ROE | IRQEN_RSE);
+ cx25840_and_or4(state->c, CX25840_IR_IRQEN_REG,
+ ~(IRQEN_RTE | IRQEN_ROE | IRQEN_RSE), mask);
+}
+
+static inline void irqenable_tx(struct v4l2_subdev *sd, u32 mask)
+{
+ struct cx25840_state *state = to_state(sd);
+
+ if (is_cx23885(state) || is_cx23887(state))
+ mask ^= IRQEN_MSK;
+ mask &= IRQEN_TSE;
+ cx25840_and_or4(state->c, CX25840_IR_IRQEN_REG, ~IRQEN_TSE, mask);
+}
+
+/*
+ * V4L2 Subdevice IR Ops
+ */
+int cx25840_ir_irq_handler(struct v4l2_subdev *sd, u32 status, bool *handled)
+{
+ struct cx25840_state *state = to_state(sd);
+ struct cx25840_ir_state *ir_state = to_ir_state(sd);
+ struct i2c_client *c = NULL;
+ unsigned long flags;
+
+ union cx25840_ir_fifo_rec rx_data[FIFO_RX_DEPTH];
+ unsigned int i, j, k;
+ u32 events, v;
+ int tsr, rsr, rto, ror, tse, rse, rte, roe, kror;
+ u32 cntrl, irqen, stats;
+
+ *handled = false;
+ if (ir_state == NULL)
+ return -ENODEV;
+
+ c = ir_state->c;
+
+ /* Only support the IR controller for the CX2388[57] AV Core for now */
+ if (!(is_cx23885(state) || is_cx23887(state)))
+ return -ENODEV;
+
+ cntrl = cx25840_read4(c, CX25840_IR_CNTRL_REG);
+ irqen = cx25840_read4(c, CX25840_IR_IRQEN_REG);
+ if (is_cx23885(state) || is_cx23887(state))
+ irqen ^= IRQEN_MSK;
+ stats = cx25840_read4(c, CX25840_IR_STATS_REG);
+
+ tsr = stats & STATS_TSR; /* Tx FIFO Service Request */
+ rsr = stats & STATS_RSR; /* Rx FIFO Service Request */
+ rto = stats & STATS_RTO; /* Rx Pulse Width Timer Time Out */
+ ror = stats & STATS_ROR; /* Rx FIFO Over Run */
+
+ tse = irqen & IRQEN_TSE; /* Tx FIFO Service Request IRQ Enable */
+ rse = irqen & IRQEN_RSE; /* Rx FIFO Service Reuqest IRQ Enable */
+ rte = irqen & IRQEN_RTE; /* Rx Pulse Width Timer Time Out IRQ Enable */
+ roe = irqen & IRQEN_ROE; /* Rx FIFO Over Run IRQ Enable */
+
+ v4l2_dbg(2, ir_debug, sd, "IR IRQ Status: %s %s %s %s %s %s\n",
+ tsr ? "tsr" : " ", rsr ? "rsr" : " ",
+ rto ? "rto" : " ", ror ? "ror" : " ",
+ stats & STATS_TBY ? "tby" : " ",
+ stats & STATS_RBY ? "rby" : " ");
+
+ v4l2_dbg(2, ir_debug, sd, "IR IRQ Enables: %s %s %s %s\n",
+ tse ? "tse" : " ", rse ? "rse" : " ",
+ rte ? "rte" : " ", roe ? "roe" : " ");
+
+ /*
+ * Transmitter interrupt service
+ */
+ if (tse && tsr) {
+ /*
+ * TODO:
+ * Check the watermark threshold setting
+ * Pull FIFO_TX_DEPTH or FIFO_TX_DEPTH/2 entries from tx_kfifo
+ * Push the data to the hardware FIFO.
+ * If there was nothing more to send in the tx_kfifo, disable
+ * the TSR IRQ and notify the v4l2_device.
+ * If there was something in the tx_kfifo, check the tx_kfifo
+ * level and notify the v4l2_device, if it is low.
+ */
+ /* For now, inhibit TSR interrupt until Tx is implemented */
+ irqenable_tx(sd, 0);
+ events = V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ;
+ v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_TX_NOTIFY, &events);
+ *handled = true;
+ }
+
+ /*
+ * Receiver interrupt service
+ */
+ kror = 0;
+ if ((rse && rsr) || (rte && rto)) {
+ /*
+ * Receive data on RSR to clear the STATS_RSR.
+ * Receive data on RTO, since we may not have yet hit the RSR
+ * watermark when we receive the RTO.
+ */
+ for (i = 0, v = FIFO_RX_NDV;
+ (v & FIFO_RX_NDV) && !kror; i = 0) {
+ for (j = 0;
+ (v & FIFO_RX_NDV) && j < FIFO_RX_DEPTH; j++) {
+ v = cx25840_read4(c, CX25840_IR_FIFO_REG);
+ rx_data[i].hw_fifo_data = v & ~FIFO_RX_NDV;
+ i++;
+ }
+ if (i == 0)
+ break;
+ j = i * sizeof(union cx25840_ir_fifo_rec);
+ k = kfifo_in_locked(&ir_state->rx_kfifo,
+ (unsigned char *) rx_data, j,
+ &ir_state->rx_kfifo_lock);
+ if (k != j)
+ kror++; /* rx_kfifo over run */
+ }
+ *handled = true;
+ }
+
+ events = 0;
+ v = 0;
+ if (kror) {
+ events |= V4L2_SUBDEV_IR_RX_SW_FIFO_OVERRUN;
+ v4l2_err(sd, "IR receiver software FIFO overrun\n");
+ }
+ if (roe && ror) {
+ /*
+ * The RX FIFO Enable (CNTRL_RFE) must be toggled to clear
+ * the Rx FIFO Over Run status (STATS_ROR)
+ */
+ v |= CNTRL_RFE;
+ events |= V4L2_SUBDEV_IR_RX_HW_FIFO_OVERRUN;
+ v4l2_err(sd, "IR receiver hardware FIFO overrun\n");
+ }
+ if (rte && rto) {
+ /*
+ * The IR Receiver Enable (CNTRL_RXE) must be toggled to clear
+ * the Rx Pulse Width Timer Time Out (STATS_RTO)
+ */
+ v |= CNTRL_RXE;
+ events |= V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED;
+ }
+ if (v) {
+ /* Clear STATS_ROR & STATS_RTO as needed by reseting hardware */
+ cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl & ~v);
+ cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl);
+ *handled = true;
+ }
+ spin_lock_irqsave(&ir_state->rx_kfifo_lock, flags);
+ if (kfifo_len(&ir_state->rx_kfifo) >= CX25840_IR_RX_KFIFO_SIZE / 2)
+ events |= V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ;
+ spin_unlock_irqrestore(&ir_state->rx_kfifo_lock, flags);
+
+ if (events)
+ v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_RX_NOTIFY, &events);
+ return 0;
+}
+
+/* Receiver */
+static int cx25840_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
+ ssize_t *num)
+{
+ struct cx25840_ir_state *ir_state = to_ir_state(sd);
+ bool invert;
+ u16 divider;
+ unsigned int i, n;
+ union cx25840_ir_fifo_rec *p;
+ unsigned u, v;
+
+ if (ir_state == NULL)
+ return -ENODEV;
+
+ invert = (bool) atomic_read(&ir_state->rx_invert);
+ divider = (u16) atomic_read(&ir_state->rxclk_divider);
+
+ n = count / sizeof(union cx25840_ir_fifo_rec)
+ * sizeof(union cx25840_ir_fifo_rec);
+ if (n == 0) {
+ *num = 0;
+ return 0;
+ }
+
+ n = kfifo_out_locked(&ir_state->rx_kfifo, buf, n,
+ &ir_state->rx_kfifo_lock);
+
+ n /= sizeof(union cx25840_ir_fifo_rec);
+ *num = n * sizeof(union cx25840_ir_fifo_rec);
+
+ for (p = (union cx25840_ir_fifo_rec *) buf, i = 0; i < n; p++, i++) {
+
+ if ((p->hw_fifo_data & FIFO_RXTX_RTO) == FIFO_RXTX_RTO) {
+ /* Assume RTO was because of no IR light input */
+ u = 0;
+ v4l2_dbg(2, ir_debug, sd, "rx read: end of rx\n");
+ } else {
+ u = (p->hw_fifo_data & FIFO_RXTX_LVL) ? 1 : 0;
+ if (invert)
+ u = u ? 0 : 1;
+ }
+
+ v = (unsigned) pulse_width_count_to_ns(
+ (u16) (p->hw_fifo_data & FIFO_RXTX), divider);
+ if (v > IR_MAX_DURATION)
+ v = IR_MAX_DURATION;
+
+ p->ir_core_data.pulse = u;
+ p->ir_core_data.duration = v;
+
+ v4l2_dbg(2, ir_debug, sd, "rx read: %10u ns %s\n",
+ v, u ? "mark" : "space");
+ }
+ return 0;
+}
+
+static int cx25840_ir_rx_g_parameters(struct v4l2_subdev *sd,
+ struct v4l2_subdev_ir_parameters *p)
+{
+ struct cx25840_ir_state *ir_state = to_ir_state(sd);
+
+ if (ir_state == NULL)
+ return -ENODEV;
+
+ mutex_lock(&ir_state->rx_params_lock);
+ memcpy(p, &ir_state->rx_params,
+ sizeof(struct v4l2_subdev_ir_parameters));
+ mutex_unlock(&ir_state->rx_params_lock);
+ return 0;
+}
+
+static int cx25840_ir_rx_shutdown(struct v4l2_subdev *sd)
+{
+ struct cx25840_ir_state *ir_state = to_ir_state(sd);
+ struct i2c_client *c;
+
+ if (ir_state == NULL)
+ return -ENODEV;
+
+ c = ir_state->c;
+ mutex_lock(&ir_state->rx_params_lock);
+
+ /* Disable or slow down all IR Rx circuits and counters */
+ irqenable_rx(sd, 0);
+ control_rx_enable(c, false);
+ control_rx_demodulation_enable(c, false);
+ control_rx_s_edge_detection(c, CNTRL_EDG_NONE);
+ filter_rx_s_min_width(c, 0);
+ cx25840_write4(c, CX25840_IR_RXCLK_REG, RXCLK_RCD);
+
+ ir_state->rx_params.shutdown = true;
+
+ mutex_unlock(&ir_state->rx_params_lock);
+ return 0;
+}
+
+static int cx25840_ir_rx_s_parameters(struct v4l2_subdev *sd,
+ struct v4l2_subdev_ir_parameters *p)
+{
+ struct cx25840_ir_state *ir_state = to_ir_state(sd);
+ struct i2c_client *c;
+ struct v4l2_subdev_ir_parameters *o;
+ u16 rxclk_divider;
+
+ if (ir_state == NULL)
+ return -ENODEV;
+
+ if (p->shutdown)
+ return cx25840_ir_rx_shutdown(sd);
+
+ if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH)
+ return -ENOSYS;
+
+ c = ir_state->c;
+ o = &ir_state->rx_params;
+
+ mutex_lock(&ir_state->rx_params_lock);
+
+ o->shutdown = p->shutdown;
+
+ p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
+ o->mode = p->mode;
+
+ p->bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec);
+ o->bytes_per_data_element = p->bytes_per_data_element;
+
+ /* Before we tweak the hardware, we have to disable the receiver */
+ irqenable_rx(sd, 0);
+ control_rx_enable(c, false);
+
+ control_rx_demodulation_enable(c, p->modulation);
+ o->modulation = p->modulation;
+
+ if (p->modulation) {
+ p->carrier_freq = rxclk_rx_s_carrier(c, p->carrier_freq,
+ &rxclk_divider);
+
+ o->carrier_freq = p->carrier_freq;
+
+ p->duty_cycle = 50;
+ o->duty_cycle = p->duty_cycle;
+
+ control_rx_s_carrier_window(c, p->carrier_freq,
+ &p->carrier_range_lower,
+ &p->carrier_range_upper);
+ o->carrier_range_lower = p->carrier_range_lower;
+ o->carrier_range_upper = p->carrier_range_upper;
+
+ p->max_pulse_width =
+ (u32) pulse_width_count_to_ns(FIFO_RXTX, rxclk_divider);
+ } else {
+ p->max_pulse_width =
+ rxclk_rx_s_max_pulse_width(c, p->max_pulse_width,
+ &rxclk_divider);
+ }
+ o->max_pulse_width = p->max_pulse_width;
+ atomic_set(&ir_state->rxclk_divider, rxclk_divider);
+
+ p->noise_filter_min_width =
+ filter_rx_s_min_width(c, p->noise_filter_min_width);
+ o->noise_filter_min_width = p->noise_filter_min_width;
+
+ p->resolution = clock_divider_to_resolution(rxclk_divider);
+ o->resolution = p->resolution;
+
+ /* FIXME - make this dependent on resolution for better performance */
+ control_rx_irq_watermark(c, RX_FIFO_HALF_FULL);
+
+ control_rx_s_edge_detection(c, CNTRL_EDG_BOTH);
+
+ o->invert_level = p->invert_level;
+ atomic_set(&ir_state->rx_invert, p->invert_level);
+
+ o->interrupt_enable = p->interrupt_enable;
+ o->enable = p->enable;
+ if (p->enable) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ir_state->rx_kfifo_lock, flags);
+ kfifo_reset(&ir_state->rx_kfifo);
+ spin_unlock_irqrestore(&ir_state->rx_kfifo_lock, flags);
+ if (p->interrupt_enable)
+ irqenable_rx(sd, IRQEN_RSE | IRQEN_RTE | IRQEN_ROE);
+ control_rx_enable(c, p->enable);
+ }
+
+ mutex_unlock(&ir_state->rx_params_lock);
+ return 0;
+}
+
+/* Transmitter */
+static int cx25840_ir_tx_write(struct v4l2_subdev *sd, u8 *buf, size_t count,
+ ssize_t *num)
+{
+ struct cx25840_ir_state *ir_state = to_ir_state(sd);
+ struct i2c_client *c;
+
+ if (ir_state == NULL)
+ return -ENODEV;
+
+ c = ir_state->c;
+#if 0
+ /*
+ * FIXME - the code below is an incomplete and untested sketch of what
+ * may need to be done. The critical part is to get 4 (or 8) pulses
+ * from the tx_kfifo, or converted from ns to the proper units from the
+ * input, and push them off to the hardware Tx FIFO right away, if the
+ * HW TX fifo needs service. The rest can be pushed to the tx_kfifo in
+ * a less critical timeframe. Also watch out for overruning the
+ * tx_kfifo - don't let it happen and let the caller know not all his
+ * pulses were written.
+ */
+ u32 *ns_pulse = (u32 *) buf;
+ unsigned int n;
+ u32 fifo_pulse[FIFO_TX_DEPTH];
+ u32 mark;
+
+ /* Compute how much we can fit in the tx kfifo */
+ n = CX25840_IR_TX_KFIFO_SIZE - kfifo_len(ir_state->tx_kfifo);
+ n = min(n, (unsigned int) count);
+ n /= sizeof(u32);
+
+ /* FIXME - turn on Tx Fifo service interrupt
+ * check hardware fifo level, and other stuff
+ */
+ for (i = 0; i < n; ) {
+ for (j = 0; j < FIFO_TX_DEPTH / 2 && i < n; j++) {
+ mark = ns_pulse[i] & LEVEL_MASK;
+ fifo_pulse[j] = ns_to_pulse_width_count(
+ ns_pulse[i] &
+ ~LEVEL_MASK,
+ ir_state->txclk_divider);
+ if (mark)
+ fifo_pulse[j] &= FIFO_RXTX_LVL;
+ i++;
+ }
+ kfifo_put(ir_state->tx_kfifo, (u8 *) fifo_pulse,
+ j * sizeof(u32));
+ }
+ *num = n * sizeof(u32);
+#else
+ /* For now enable the Tx FIFO Service interrupt & pretend we did work */
+ irqenable_tx(sd, IRQEN_TSE);
+ *num = count;
+#endif
+ return 0;
+}
+
+static int cx25840_ir_tx_g_parameters(struct v4l2_subdev *sd,
+ struct v4l2_subdev_ir_parameters *p)
+{
+ struct cx25840_ir_state *ir_state = to_ir_state(sd);
+
+ if (ir_state == NULL)
+ return -ENODEV;
+
+ mutex_lock(&ir_state->tx_params_lock);
+ memcpy(p, &ir_state->tx_params,
+ sizeof(struct v4l2_subdev_ir_parameters));
+ mutex_unlock(&ir_state->tx_params_lock);
+ return 0;
+}
+
+static int cx25840_ir_tx_shutdown(struct v4l2_subdev *sd)
+{
+ struct cx25840_ir_state *ir_state = to_ir_state(sd);
+ struct i2c_client *c;
+
+ if (ir_state == NULL)
+ return -ENODEV;
+
+ c = ir_state->c;
+ mutex_lock(&ir_state->tx_params_lock);
+
+ /* Disable or slow down all IR Tx circuits and counters */
+ irqenable_tx(sd, 0);
+ control_tx_enable(c, false);
+ control_tx_modulation_enable(c, false);
+ cx25840_write4(c, CX25840_IR_TXCLK_REG, TXCLK_TCD);
+
+ ir_state->tx_params.shutdown = true;
+
+ mutex_unlock(&ir_state->tx_params_lock);
+ return 0;
+}
+
+static int cx25840_ir_tx_s_parameters(struct v4l2_subdev *sd,
+ struct v4l2_subdev_ir_parameters *p)
+{
+ struct cx25840_ir_state *ir_state = to_ir_state(sd);
+ struct i2c_client *c;
+ struct v4l2_subdev_ir_parameters *o;
+ u16 txclk_divider;
+
+ if (ir_state == NULL)
+ return -ENODEV;
+
+ if (p->shutdown)
+ return cx25840_ir_tx_shutdown(sd);
+
+ if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH)
+ return -ENOSYS;
+
+ c = ir_state->c;
+ o = &ir_state->tx_params;
+ mutex_lock(&ir_state->tx_params_lock);
+
+ o->shutdown = p->shutdown;
+
+ p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
+ o->mode = p->mode;
+
+ p->bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec);
+ o->bytes_per_data_element = p->bytes_per_data_element;
+
+ /* Before we tweak the hardware, we have to disable the transmitter */
+ irqenable_tx(sd, 0);
+ control_tx_enable(c, false);
+
+ control_tx_modulation_enable(c, p->modulation);
+ o->modulation = p->modulation;
+
+ if (p->modulation) {
+ p->carrier_freq = txclk_tx_s_carrier(c, p->carrier_freq,
+ &txclk_divider);
+ o->carrier_freq = p->carrier_freq;
+
+ p->duty_cycle = cduty_tx_s_duty_cycle(c, p->duty_cycle);
+ o->duty_cycle = p->duty_cycle;
+
+ p->max_pulse_width =
+ (u32) pulse_width_count_to_ns(FIFO_RXTX, txclk_divider);
+ } else {
+ p->max_pulse_width =
+ txclk_tx_s_max_pulse_width(c, p->max_pulse_width,
+ &txclk_divider);
+ }
+ o->max_pulse_width = p->max_pulse_width;
+ atomic_set(&ir_state->txclk_divider, txclk_divider);
+
+ p->resolution = clock_divider_to_resolution(txclk_divider);
+ o->resolution = p->resolution;
+
+ /* FIXME - make this dependent on resolution for better performance */
+ control_tx_irq_watermark(c, TX_FIFO_HALF_EMPTY);
+
+ control_tx_polarity_invert(c, p->invert_carrier_sense);
+ o->invert_carrier_sense = p->invert_carrier_sense;
+
+ /*
+ * FIXME: we don't have hardware help for IO pin level inversion
+ * here like we have on the CX23888.
+ * Act on this with some mix of logical inversion of data levels,
+ * carrier polarity, and carrier duty cycle.
+ */
+ o->invert_level = p->invert_level;
+
+ o->interrupt_enable = p->interrupt_enable;
+ o->enable = p->enable;
+ if (p->enable) {
+ /* reset tx_fifo here */
+ if (p->interrupt_enable)
+ irqenable_tx(sd, IRQEN_TSE);
+ control_tx_enable(c, p->enable);
+ }
+
+ mutex_unlock(&ir_state->tx_params_lock);
+ return 0;
+}
+
+
+/*
+ * V4L2 Subdevice Core Ops support
+ */
+int cx25840_ir_log_status(struct v4l2_subdev *sd)
+{
+ struct cx25840_state *state = to_state(sd);
+ struct i2c_client *c = state->c;
+ char *s;
+ int i, j;
+ u32 cntrl, txclk, rxclk, cduty, stats, irqen, filtr;
+
+ /* The CX23888 chip doesn't have an IR controller on the A/V core */
+ if (is_cx23888(state))
+ return 0;
+
+ cntrl = cx25840_read4(c, CX25840_IR_CNTRL_REG);
+ txclk = cx25840_read4(c, CX25840_IR_TXCLK_REG) & TXCLK_TCD;
+ rxclk = cx25840_read4(c, CX25840_IR_RXCLK_REG) & RXCLK_RCD;
+ cduty = cx25840_read4(c, CX25840_IR_CDUTY_REG) & CDUTY_CDC;
+ stats = cx25840_read4(c, CX25840_IR_STATS_REG);
+ irqen = cx25840_read4(c, CX25840_IR_IRQEN_REG);
+ if (is_cx23885(state) || is_cx23887(state))
+ irqen ^= IRQEN_MSK;
+ filtr = cx25840_read4(c, CX25840_IR_FILTR_REG) & FILTR_LPF;
+
+ v4l2_info(sd, "IR Receiver:\n");
+ v4l2_info(sd, "\tEnabled: %s\n",
+ cntrl & CNTRL_RXE ? "yes" : "no");
+ v4l2_info(sd, "\tDemodulation from a carrier: %s\n",
+ cntrl & CNTRL_DMD ? "enabled" : "disabled");
+ v4l2_info(sd, "\tFIFO: %s\n",
+ cntrl & CNTRL_RFE ? "enabled" : "disabled");
+ switch (cntrl & CNTRL_EDG) {
+ case CNTRL_EDG_NONE:
+ s = "disabled";
+ break;
+ case CNTRL_EDG_FALL:
+ s = "falling edge";
+ break;
+ case CNTRL_EDG_RISE:
+ s = "rising edge";
+ break;
+ case CNTRL_EDG_BOTH:
+ s = "rising & falling edges";
+ break;
+ default:
+ s = "??? edge";
+ break;
+ }
+ v4l2_info(sd, "\tPulse timers' start/stop trigger: %s\n", s);
+ v4l2_info(sd, "\tFIFO data on pulse timer overflow: %s\n",
+ cntrl & CNTRL_R ? "not loaded" : "overflow marker");
+ v4l2_info(sd, "\tFIFO interrupt watermark: %s\n",
+ cntrl & CNTRL_RIC ? "not empty" : "half full or greater");
+ v4l2_info(sd, "\tLoopback mode: %s\n",
+ cntrl & CNTRL_LBM ? "loopback active" : "normal receive");
+ if (cntrl & CNTRL_DMD) {
+ v4l2_info(sd, "\tExpected carrier (16 clocks): %u Hz\n",
+ clock_divider_to_carrier_freq(rxclk));
+ switch (cntrl & CNTRL_WIN) {
+ case CNTRL_WIN_3_3:
+ i = 3;
+ j = 3;
+ break;
+ case CNTRL_WIN_4_3:
+ i = 4;
+ j = 3;
+ break;
+ case CNTRL_WIN_3_4:
+ i = 3;
+ j = 4;
+ break;
+ case CNTRL_WIN_4_4:
+ i = 4;
+ j = 4;
+ break;
+ default:
+ i = 0;
+ j = 0;
+ break;
+ }
+ v4l2_info(sd, "\tNext carrier edge window: 16 clocks "
+ "-%1d/+%1d, %u to %u Hz\n", i, j,
+ clock_divider_to_freq(rxclk, 16 + j),
+ clock_divider_to_freq(rxclk, 16 - i));
+ }
+ v4l2_info(sd, "\tMax measurable pulse width: %u us, %llu ns\n",
+ pulse_width_count_to_us(FIFO_RXTX, rxclk),
+ pulse_width_count_to_ns(FIFO_RXTX, rxclk));
+ v4l2_info(sd, "\tLow pass filter: %s\n",
+ filtr ? "enabled" : "disabled");
+ if (filtr)
+ v4l2_info(sd, "\tMin acceptable pulse width (LPF): %u us, "
+ "%u ns\n",
+ lpf_count_to_us(filtr),
+ lpf_count_to_ns(filtr));
+ v4l2_info(sd, "\tPulse width timer timed-out: %s\n",
+ stats & STATS_RTO ? "yes" : "no");
+ v4l2_info(sd, "\tPulse width timer time-out intr: %s\n",
+ irqen & IRQEN_RTE ? "enabled" : "disabled");
+ v4l2_info(sd, "\tFIFO overrun: %s\n",
+ stats & STATS_ROR ? "yes" : "no");
+ v4l2_info(sd, "\tFIFO overrun interrupt: %s\n",
+ irqen & IRQEN_ROE ? "enabled" : "disabled");
+ v4l2_info(sd, "\tBusy: %s\n",
+ stats & STATS_RBY ? "yes" : "no");
+ v4l2_info(sd, "\tFIFO service requested: %s\n",
+ stats & STATS_RSR ? "yes" : "no");
+ v4l2_info(sd, "\tFIFO service request interrupt: %s\n",
+ irqen & IRQEN_RSE ? "enabled" : "disabled");
+
+ v4l2_info(sd, "IR Transmitter:\n");
+ v4l2_info(sd, "\tEnabled: %s\n",
+ cntrl & CNTRL_TXE ? "yes" : "no");
+ v4l2_info(sd, "\tModulation onto a carrier: %s\n",
+ cntrl & CNTRL_MOD ? "enabled" : "disabled");
+ v4l2_info(sd, "\tFIFO: %s\n",
+ cntrl & CNTRL_TFE ? "enabled" : "disabled");
+ v4l2_info(sd, "\tFIFO interrupt watermark: %s\n",
+ cntrl & CNTRL_TIC ? "not empty" : "half full or less");
+ v4l2_info(sd, "\tCarrier polarity: %s\n",
+ cntrl & CNTRL_CPL ? "space:burst mark:noburst"
+ : "space:noburst mark:burst");
+ if (cntrl & CNTRL_MOD) {
+ v4l2_info(sd, "\tCarrier (16 clocks): %u Hz\n",
+ clock_divider_to_carrier_freq(txclk));
+ v4l2_info(sd, "\tCarrier duty cycle: %2u/16\n",
+ cduty + 1);
+ }
+ v4l2_info(sd, "\tMax pulse width: %u us, %llu ns\n",
+ pulse_width_count_to_us(FIFO_RXTX, txclk),
+ pulse_width_count_to_ns(FIFO_RXTX, txclk));
+ v4l2_info(sd, "\tBusy: %s\n",
+ stats & STATS_TBY ? "yes" : "no");
+ v4l2_info(sd, "\tFIFO service requested: %s\n",
+ stats & STATS_TSR ? "yes" : "no");
+ v4l2_info(sd, "\tFIFO service request interrupt: %s\n",
+ irqen & IRQEN_TSE ? "enabled" : "disabled");
+
+ return 0;
+}
+
+
+const struct v4l2_subdev_ir_ops cx25840_ir_ops = {
+ .rx_read = cx25840_ir_rx_read,
+ .rx_g_parameters = cx25840_ir_rx_g_parameters,
+ .rx_s_parameters = cx25840_ir_rx_s_parameters,
+
+ .tx_write = cx25840_ir_tx_write,
+ .tx_g_parameters = cx25840_ir_tx_g_parameters,
+ .tx_s_parameters = cx25840_ir_tx_s_parameters,
+};
+
+
+static const struct v4l2_subdev_ir_parameters default_rx_params = {
+ .bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec),
+ .mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH,
+
+ .enable = false,
+ .interrupt_enable = false,
+ .shutdown = true,
+
+ .modulation = true,
+ .carrier_freq = 36000, /* 36 kHz - RC-5, and RC-6 carrier */
+
+ /* RC-5: 666,667 ns = 1/36 kHz * 32 cycles * 1 mark * 0.75 */
+ /* RC-6: 333,333 ns = 1/36 kHz * 16 cycles * 1 mark * 0.75 */
+ .noise_filter_min_width = 333333, /* ns */
+ .carrier_range_lower = 35000,
+ .carrier_range_upper = 37000,
+ .invert_level = false,
+};
+
+static const struct v4l2_subdev_ir_parameters default_tx_params = {
+ .bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec),
+ .mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH,
+
+ .enable = false,
+ .interrupt_enable = false,
+ .shutdown = true,
+
+ .modulation = true,
+ .carrier_freq = 36000, /* 36 kHz - RC-5 carrier */
+ .duty_cycle = 25, /* 25 % - RC-5 carrier */
+ .invert_level = false,
+ .invert_carrier_sense = false,
+};
+
+int cx25840_ir_probe(struct v4l2_subdev *sd)
+{
+ struct cx25840_state *state = to_state(sd);
+ struct cx25840_ir_state *ir_state;
+ struct v4l2_subdev_ir_parameters default_params;
+
+ /* Only init the IR controller for the CX2388[57] AV Core for now */
+ if (!(is_cx23885(state) || is_cx23887(state)))
+ return 0;
+
+ ir_state = kzalloc(sizeof(struct cx25840_ir_state), GFP_KERNEL);
+ if (ir_state == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&ir_state->rx_kfifo_lock);
+ if (kfifo_alloc(&ir_state->rx_kfifo,
+ CX25840_IR_RX_KFIFO_SIZE, GFP_KERNEL)) {
+ kfree(ir_state);
+ return -ENOMEM;
+ }
+
+ ir_state->c = state->c;
+ state->ir_state = ir_state;
+
+ /* Ensure no interrupts arrive yet */
+ if (is_cx23885(state) || is_cx23887(state))
+ cx25840_write4(ir_state->c, CX25840_IR_IRQEN_REG, IRQEN_MSK);
+ else
+ cx25840_write4(ir_state->c, CX25840_IR_IRQEN_REG, 0);
+
+ mutex_init(&ir_state->rx_params_lock);
+ memcpy(&default_params, &default_rx_params,
+ sizeof(struct v4l2_subdev_ir_parameters));
+ v4l2_subdev_call(sd, ir, rx_s_parameters, &default_params);
+
+ mutex_init(&ir_state->tx_params_lock);
+ memcpy(&default_params, &default_tx_params,
+ sizeof(struct v4l2_subdev_ir_parameters));
+ v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params);
+
+ return 0;
+}
+
+int cx25840_ir_remove(struct v4l2_subdev *sd)
+{
+ struct cx25840_state *state = to_state(sd);
+ struct cx25840_ir_state *ir_state = to_ir_state(sd);
+
+ if (ir_state == NULL)
+ return -ENODEV;
+
+ cx25840_ir_rx_shutdown(sd);
+ cx25840_ir_tx_shutdown(sd);
+
+ kfifo_free(&ir_state->rx_kfifo);
+ kfree(ir_state);
+ state->ir_state = NULL;
+ return 0;
+}
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index c7e5851d348..99dbae11759 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -6,7 +6,7 @@ config VIDEO_CX88
select VIDEOBUF_DMA_SG
select VIDEO_TUNER
select VIDEO_TVEEPROM
- select VIDEO_IR
+ depends on VIDEO_IR
select VIDEO_WM8775 if VIDEO_HELPER_CHIPS_AUTO
---help---
This is a video4linux driver for Conexant 2388x based
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 375ad53f796..82db555b22d 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -193,24 +193,13 @@ void cx88_i2c_init_ir(struct cx88_core *core)
0x18, 0x6b, 0x71,
I2C_CLIENT_END
};
- const unsigned short *addrp;
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
- /*
- * We can't call i2c_new_probed_device() because it uses
- * quick writes for probing and at least some R receiver
- * devices only reply to reads.
- */
- for (addrp = addr_list; *addrp != I2C_CLIENT_END; addrp++) {
- if (i2c_smbus_xfer(&core->i2c_adap, *addrp, 0,
- I2C_SMBUS_READ, 0,
- I2C_SMBUS_QUICK, NULL) >= 0) {
- info.addr = *addrp;
- i2c_new_device(&core->i2c_adap, &info);
- break;
- }
- }
+ /* Use quick read command for probe, some IR chips don't
+ * support writes */
+ i2c_new_probed_device(&core->i2c_adap, &info, addr_list,
+ i2c_probe_func_quick_read);
}
}
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index c7be0e09782..66aefd6eef5 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_EM28XX
depends on VIDEO_DEV && I2C && INPUT
select VIDEO_TUNER
select VIDEO_TVEEPROM
- select VIDEO_IR
+ depends on VIDEO_IR
select VIDEOBUF_VMALLOC
select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO
select VIDEO_TVP5150 if VIDEO_HELPER_CHIPS_AUTO
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index ffbe544e30f..e7efb4bffab 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -2385,7 +2385,7 @@ void em28xx_register_i2c_ir(struct em28xx *dev)
if (dev->init_data.name)
info.platform_data = &dev->init_data;
- i2c_new_probed_device(&dev->i2c_adap, &info, addr_list);
+ i2c_new_probed_device(&dev->i2c_adap, &info, addr_list, NULL);
}
void em28xx_card_setup(struct em28xx *dev)
diff --git a/drivers/media/video/fsl-viu.c b/drivers/media/video/fsl-viu.c
index 8f1c94f7e00..43d208f1f58 100644
--- a/drivers/media/video/fsl-viu.c
+++ b/drivers/media/video/fsl-viu.c
@@ -1418,7 +1418,7 @@ static struct video_device viu_template = {
.current_norm = V4L2_STD_NTSC_M,
};
-static int __devinit viu_of_probe(struct of_device *op,
+static int __devinit viu_of_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct viu_dev *viu_dev;
@@ -1549,7 +1549,7 @@ err:
return ret;
}
-static int __devexit viu_of_remove(struct of_device *op)
+static int __devexit viu_of_remove(struct platform_device *op)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
@@ -1570,7 +1570,7 @@ static int __devexit viu_of_remove(struct of_device *op)
}
#ifdef CONFIG_PM
-static int viu_suspend(struct of_device *op, pm_message_t state)
+static int viu_suspend(struct platform_device *op, pm_message_t state)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
@@ -1579,7 +1579,7 @@ static int viu_suspend(struct of_device *op, pm_message_t state)
return 0;
}
-static int viu_resume(struct of_device *op)
+static int viu_resume(struct platform_device *op)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index d951b0f0e05..b9846106913 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -55,7 +55,7 @@ MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA USB Camera Driver");
MODULE_LICENSE("GPL");
-#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 9, 0)
+#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 10, 0)
#ifdef GSPCA_DEBUG
int gspca_debug = D_ERR | D_PROBE;
@@ -440,10 +440,15 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
frame->v4l2_buf.sequence = ++gspca_dev->sequence;
gspca_dev->image = frame->data;
gspca_dev->image_len = 0;
- } else if (gspca_dev->last_packet_type == DISCARD_PACKET) {
- if (packet_type == LAST_PACKET)
- gspca_dev->last_packet_type = packet_type;
- return;
+ } else {
+ switch (gspca_dev->last_packet_type) {
+ case DISCARD_PACKET:
+ if (packet_type == LAST_PACKET)
+ gspca_dev->last_packet_type = packet_type;
+ return;
+ case LAST_PACKET:
+ return;
+ }
}
/* append the packet to the frame buffer */
@@ -454,6 +459,12 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
gspca_dev->frsz);
packet_type = DISCARD_PACKET;
} else {
+/* !! image is NULL only when last pkt is LAST or DISCARD
+ if (gspca_dev->image == NULL) {
+ err("gspca_frame_add() image == NULL");
+ return;
+ }
+ */
memcpy(gspca_dev->image + gspca_dev->image_len,
data, len);
gspca_dev->image_len += len;
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index ee17b034bf6..370544361be 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -66,7 +66,11 @@ struct sd {
#define BRIDGE_SN9C110 2
#define BRIDGE_SN9C120 3
u8 sensor; /* Type of image sensor chip */
-enum {
+ u8 i2c_addr;
+
+ u8 jpeg_hdr[JPEG_HDR_SZ];
+};
+enum sensors {
SENSOR_ADCM1700,
SENSOR_GC0307,
SENSOR_HV7131R,
@@ -81,10 +85,6 @@ enum {
SENSOR_PO2030N,
SENSOR_SOI768,
SENSOR_SP80708,
-} sensors;
- u8 i2c_addr;
-
- u8 jpeg_hdr[JPEG_HDR_SZ];
};
/* V4L2 controls supported by the driver */
diff --git a/drivers/media/video/gspca/sq930x.c b/drivers/media/video/gspca/sq930x.c
index 37cee5e063c..7ae6522d4ed 100644
--- a/drivers/media/video/gspca/sq930x.c
+++ b/drivers/media/video/gspca/sq930x.c
@@ -23,7 +23,6 @@
#define MODULE_NAME "sq930x"
#include "gspca.h"
-#include "jpeg.h"
MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>\n"
"Gerard Klaver <gerard at gkall dot hobby dot nl\n"
@@ -31,8 +30,6 @@ MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>\n"
MODULE_DESCRIPTION("GSPCA/SQ930x USB Camera Driver");
MODULE_LICENSE("GPL");
-#define BULK_TRANSFER_LEN 5128
-
/* Structure to hold all of our device specific stuff */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
@@ -40,28 +37,20 @@ struct sd {
u16 expo;
u8 gain;
- u8 quality; /* webcam quality 0..3 */
-#define QUALITY_DEF 1
-
- u8 gpio[2];
-
- u8 eof_len;
u8 do_ctrl;
-
+ u8 gpio[2];
u8 sensor;
-enum {
+ u8 type;
+#define Generic 0
+#define Creative_live_motion 1
+};
+enum sensors {
SENSOR_ICX098BQ,
SENSOR_LZ24BP,
SENSOR_MI0360,
- SENSOR_MT9V111,
+ SENSOR_MT9V111, /* = MI360SOC */
SENSOR_OV7660,
SENSOR_OV9630,
-} sensors;
- u8 type;
-#define Generic 0
-#define Creative_live_motion 1
-
- u8 jpeg_hdr[JPEG_HDR_SZ];
};
static int sd_setexpo(struct gspca_dev *gspca_dev, __s32 val);
@@ -78,7 +67,7 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0x0001,
.maximum = 0x0fff,
.step = 1,
-#define EXPO_DEF 0x027d
+#define EXPO_DEF 0x0356
.default_value = EXPO_DEF,
},
.set = sd_setexpo,
@@ -92,7 +81,7 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0x01,
.maximum = 0xff,
.step = 1,
-#define GAIN_DEF 0x61
+#define GAIN_DEF 0x8d
.default_value = GAIN_DEF,
},
.set = sd_setgain,
@@ -101,30 +90,18 @@ static const struct ctrl sd_ctrls[] = {
};
static struct v4l2_pix_format vga_mode[] = {
- {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
- .bytesperline = 160,
- .sizeimage = 160 * 120 * 5 / 8 + 590,
- .colorspace = V4L2_COLORSPACE_JPEG,
- .priv = 0},
- {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
+ {320, 240, V4L2_PIX_FMT_SRGGB8, V4L2_FIELD_NONE,
.bytesperline = 320,
- .sizeimage = 320 * 240 * 4 / 8 + 590,
- .colorspace = V4L2_COLORSPACE_JPEG,
- .priv = 1},
- {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
+ .sizeimage = 320 * 240,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0},
+ {640, 480, V4L2_PIX_FMT_SRGGB8, V4L2_FIELD_NONE,
.bytesperline = 640,
- .sizeimage = 640 * 480 * 3 / 8 + 590,
- .colorspace = V4L2_COLORSPACE_JPEG,
- .priv = 2},
+ .sizeimage = 640 * 480,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 1},
};
-/* JPEG quality indexed by webcam quality */
-#define QUAL_0 90
-#define QUAL_1 85
-#define QUAL_2 75
-#define QUAL_3 70
-static const u8 quality_tb[4] = { QUAL_0, QUAL_1, QUAL_2, QUAL_3 };
-
/* sq930x registers */
#define SQ930_CTRL_UCBUS_IO 0x0001
#define SQ930_CTRL_I2C_IO 0x0002
@@ -302,7 +279,7 @@ static const struct i2c_write_cmd mt9v111_init_0[] = {
{0x01, 0x0001}, /* select IFP/SOC registers */
{0x06, 0x300c}, /* operating mode control */
{0x08, 0xcc00}, /* output format control (RGB) */
- {0x01, 0x0004}, /* select core registers */
+ {0x01, 0x0004}, /* select sensor core registers */
};
static const struct i2c_write_cmd mt9v111_init_1[] = {
{0x03, 0x01e5}, /* window height */
@@ -330,7 +307,8 @@ static const struct i2c_write_cmd mt9v111_init_3[] = {
{0x62, 0x0405},
};
static const struct i2c_write_cmd mt9v111_init_4[] = {
- {0x05, 0x00ce}, /* horizontal blanking */
+/* {0x05, 0x00ce}, */
+ {0x05, 0x005d}, /* horizontal blanking */
};
static const struct ucbus_write_cmd ov7660_start_0[] = {
@@ -343,78 +321,58 @@ static const struct ucbus_write_cmd ov9630_start_0[] = {
{0xf334, 0x3e}, {0xf335, 0xf8}, {0xf33f, 0x03}
};
+/* start parameters indexed by [sensor][mode] */
static const struct cap_s {
u8 cc_sizeid;
u8 cc_bytes[32];
-} capconfig[4][3] = {
+} capconfig[4][2] = {
[SENSOR_ICX098BQ] = {
- {0, /* JPEG, 160x120 */
+ {2, /* Bayer 320x240 */
+ {0x05, 0x1f, 0x20, 0x0e, 0x00, 0x9f, 0x02, 0xee,
+ 0x01, 0x01, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {4, /* Bayer 640x480 */
{0x01, 0x1f, 0x20, 0x0e, 0x00, 0x9f, 0x02, 0xee,
0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
- 0x02, 0x8b, 0x00, 0x8b, 0x00, 0x41, 0x01, 0x41,
- 0x01, 0x41, 0x01, 0x05, 0x40, 0x01, 0xf0, 0x00} },
- {2, /* JPEG, 320x240 */
- {0x01, 0x1f, 0x20, 0x0e, 0x00, 0x9f, 0x02, 0xee,
- 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
- 0x02, 0xdf, 0x01, 0x00, 0x00, 0x3f, 0x01, 0x3f,
- 0x01, 0x00, 0x00, 0x05, 0x40, 0x01, 0xf0, 0x00} },
- {4, /* JPEG, 640x480 */
- {0x01, 0x22, 0x20, 0x0e, 0x00, 0xa2, 0x02, 0xf0,
- 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
- 0x07, 0xe1, 0x01, 0xe1, 0x01, 0x3f, 0x01, 0x3f,
- 0x01, 0x3f, 0x01, 0x05, 0x80, 0x02, 0xe0, 0x01} },
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
},
[SENSOR_LZ24BP] = {
- {0, /* JPEG, 160x120 */
- {0x01, 0x1f, 0x20, 0x0e, 0x00, 0x9f, 0x02, 0xee,
- 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
- 0x02, 0x8b, 0x00, 0x8b, 0x00, 0x41, 0x01, 0x41,
- 0x01, 0x41, 0x01, 0x05, 0x40, 0x01, 0xf0, 0x00} },
- {2, /* JPEG, 320x240 */
+ {2, /* Bayer 320x240 */
+ {0x05, 0x22, 0x20, 0x0e, 0x00, 0xa2, 0x02, 0xee,
+ 0x01, 0x01, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {4, /* Bayer 640x480 */
{0x01, 0x22, 0x20, 0x0e, 0x00, 0xa2, 0x02, 0xee,
0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
- 0x02, 0xdf, 0x01, 0x00, 0x00, 0x3f, 0x01, 0x3f,
- 0x01, 0x00, 0x00, 0x05, 0x40, 0x01, 0xf0, 0x00} },
- {4, /* JPEG, 640x480 */
- {0x01, 0x22, 0x20, 0x0e, 0x00, 0xa2, 0x02, 0xf0,
- 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
- 0x07, 0xe1, 0x01, 0xe1, 0x01, 0x3f, 0x01, 0x3f,
- 0x01, 0x3f, 0x01, 0x05, 0x80, 0x02, 0xe0, 0x01} },
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
},
[SENSOR_MI0360] = {
- {0, /* JPEG, 160x120 */
- {0x05, 0x3d, 0x20, 0x0b, 0x00, 0xbd, 0x02, 0x0b,
- 0x02, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
- 0x02, 0x01, 0x01, 0x01, 0x01, 0x9f, 0x00, 0x9f,
- 0x00, 0x9f, 0x01, 0x05, 0xa0, 0x00, 0x80, 0x00} },
- {2, /* JPEG, 320x240 */
+ {2, /* Bayer 320x240 */
+ {0x05, 0x02, 0x20, 0x01, 0x20, 0x82, 0x02, 0xe1,
+ 0x01, 0x01, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {4, /* Bayer 640x480 */
{0x01, 0x02, 0x20, 0x01, 0x20, 0x82, 0x02, 0xe1,
-/*fixme 03 e3 */
0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
- 0x02, 0xdf, 0x01, 0x00, 0x00, 0x3f, 0x01, 0x3f,
- 0x01, 0x00, 0x00, 0x05, 0x40, 0x01, 0xf0, 0x00} },
- {4, /* JPEG, 640x480 */
- {0x01, 0x02, 0x20, 0x01, 0x20, 0x82, 0x02, 0xe3,
- 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
- 0x07, 0xe1, 0x01, 0xe1, 0x01, 0x3f, 0x01, 0x3f,
- 0x01, 0x3f, 0x01, 0x05, 0x80, 0x02, 0xe0, 0x01} },
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
},
[SENSOR_MT9V111] = {
- {0, /* JPEG, 160x120 */
- {0x05, 0x3d, 0x20, 0x0b, 0x00, 0xbd, 0x02, 0x0b,
- 0x02, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
- 0x02, 0x01, 0x01, 0x01, 0x01, 0x9f, 0x00, 0x9f,
- 0x00, 0x9f, 0x01, 0x05, 0xa0, 0x00, 0x80, 0x00} },
- {2, /* JPEG, 320x240 */
- {0x01, 0x02, 0x20, 0x03, 0x20, 0x82, 0x02, 0xe3,
- 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
- 0x02, 0xdf, 0x01, 0x00, 0x00, 0x3f, 0x01, 0x3f,
- 0x01, 0x00, 0x00, 0x05, 0x40, 0x01, 0xf0, 0x00} },
- {4, /* JPEG, 640x480 */
- {0x01, 0x02, 0x20, 0x03, 0x20, 0x82, 0x02, 0xe3,
+ {2, /* Bayer 320x240 */
+ {0x05, 0x02, 0x20, 0x01, 0x20, 0x82, 0x02, 0xe1,
+ 0x01, 0x01, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {4, /* Bayer 640x480 */
+ {0x01, 0x02, 0x20, 0x01, 0x20, 0x82, 0x02, 0xe1,
0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8,
- 0x07, 0xe1, 0x01, 0xe1, 0x01, 0x3f, 0x01, 0x3f,
- 0x01, 0x3f, 0x01, 0x05, 0x80, 0x02, 0xe0, 0x01} },
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
},
};
@@ -864,7 +822,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
buf[i++] = 0x35; /* reg = global gain */
buf[i++] = 0x00; /* val H */
buf[i++] = sensor->i2c_dum;
- buf[i++] = sd->gain; /* val L */
+ buf[i++] = 0x80 + sd->gain / 2; /* val L */
buf[i++] = 0x00;
buf[i++] = 0x00;
buf[i++] = 0x00;
@@ -889,10 +847,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->nmodes = ARRAY_SIZE(vga_mode);
cam->bulk = 1;
- cam->bulk_size = BULK_TRANSFER_LEN;
-/* cam->bulk_nurbs = 2; fixme: if no setexpo sync */
- sd->quality = QUALITY_DEF;
sd->gain = GAIN_DEF;
sd->expo = EXPO_DEF;
@@ -945,13 +900,10 @@ static int sd_init(struct gspca_dev *gspca_dev)
if (sd->sensor == SENSOR_MI0360) {
/* no sensor probe for icam tracer */
- if (gspca_dev->usb_buf[5] == 0xf6) { /* if CMOS */
+ if (gspca_dev->usb_buf[5] == 0xf6) /* if CMOS */
sd->sensor = SENSOR_ICX098BQ;
- gspca_dev->cam.cam_mode = &vga_mode[1];
- gspca_dev->cam.nmodes = 1; /* only 320x240 */
- } else {
+ else
cmos_probe(gspca_dev);
- }
}
PDEBUG(D_PROBE, "Sensor %s", sensor_tb[sd->sensor].name);
@@ -960,51 +912,24 @@ static int sd_init(struct gspca_dev *gspca_dev)
return gspca_dev->usb_err;
}
-/* special function to create the quantization tables of the JPEG header */
-static void sd_jpeg_set_qual(u8 *jpeg_hdr,
- int quality)
-{
- int i, sc1, sc2;
-
- quality = quality_tb[quality]; /* convert to JPEG quality */
-/*
- * approximative qualities for Y and U/V:
- * quant = 0:94%/91% 1:91%/87% 2:82%/73% 3:69%/56%
- * should have:
- * quant = 0:94%/91% 1:91%/87.5% 2:81.5%/72% 3:69%/54.5%
- */
- sc1 = 200 - quality * 2;
- quality = quality * 7 / 5 - 40; /* UV quality */
- sc2 = 200 - quality * 2;
- for (i = 0; i < 64; i++) {
- jpeg_hdr[JPEG_QT0_OFFSET + i] =
- (jpeg_head[JPEG_QT0_OFFSET + i] * sc1 + 50) / 100;
- jpeg_hdr[JPEG_QT1_OFFSET + i] =
- (jpeg_head[JPEG_QT1_OFFSET + i] * sc2 + 50) / 100;
- }
-}
-
/* send the start/stop commands to the webcam */
static void send_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
const struct cap_s *cap;
- int mode, quality;
+ int mode;
mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
cap = &capconfig[sd->sensor][mode];
- quality = sd->quality;
- reg_wb(gspca_dev, (quality << 12)
- | 0x0a00 /* 900 for Bayer */
- | SQ930_CTRL_CAP_START,
- 0x0500 /* a00 for Bayer */
- | cap->cc_sizeid,
+ reg_wb(gspca_dev, 0x0900 | SQ930_CTRL_CAP_START,
+ 0x0a00 | cap->cc_sizeid,
cap->cc_bytes, 32);
-};
+}
+
static void send_stop(struct gspca_dev *gspca_dev)
{
reg_w(gspca_dev, SQ930_CTRL_CAP_STOP, 0);
-};
+}
/* function called at start time before URB creation */
static int sd_isoc_init(struct gspca_dev *gspca_dev)
@@ -1013,6 +938,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
gspca_dev->cam.bulk_nurbs = 1; /* there must be one URB only */
sd->do_ctrl = 0;
+ gspca_dev->cam.bulk_size = gspca_dev->width * gspca_dev->height + 8;
return 0;
}
@@ -1022,11 +948,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int mode;
- /* initialize the JPEG header */
- jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
- 0x21); /* JPEG 422 */
- sd_jpeg_set_qual(sd->jpeg_hdr, sd->quality);
-
bridge_init(sd);
global_init(sd, 0);
msleep(100);
@@ -1071,7 +992,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
ARRAY_SIZE(lz24bp_start_2),
6);
mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
- lz24bp_ppl(sd, mode == 2 ? 0x0564 : 0x0310);
+ lz24bp_ppl(sd, mode == 1 ? 0x0564 : 0x0310);
msleep(10);
break;
case SENSOR_MI0360:
@@ -1095,7 +1016,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* 1st start */
send_start(gspca_dev);
msleep(60);
- reg_w(gspca_dev, SQ930_CTRL_CAP_STOP, 0x0000);
+ send_stop(gspca_dev);
i2c_write(sd,
mi0360_start_4, ARRAY_SIZE(mi0360_start_4));
@@ -1113,7 +1034,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
ARRAY_SIZE(mt9v111_init_2));
ucbus_write(gspca_dev, mt9v111_start_1,
ARRAY_SIZE(mt9v111_start_1),
- 8);
+ 5);
i2c_write(sd, mt9v111_init_3,
ARRAY_SIZE(mt9v111_init_3));
i2c_write(sd, mt9v111_init_4,
@@ -1125,8 +1046,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
out:
msleep(1000);
- sd->eof_len = 0; /* init packet scan */
-
if (sd->sensor == SENSOR_MT9V111)
gpio_set(sd, SQ930_GPIO_DFL_LED, SQ930_GPIO_DFL_LED);
@@ -1166,94 +1085,17 @@ static void sd_dq_callback(struct gspca_dev *gspca_dev)
msleep(100);
}
-/* move a packet adding 0x00 after 0xff */
-static void add_packet(struct gspca_dev *gspca_dev,
- u8 *data,
- int len)
-{
- int i;
-
- i = 0;
- do {
- if (data[i] == 0xff) {
- gspca_frame_add(gspca_dev, INTER_PACKET,
- data, i + 1);
- len -= i;
- data += i;
- *data = 0x00;
- i = 0;
- }
- } while (++i < len);
- gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
-}
-
-/* end a frame and start a new one */
-static void eof_sof(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- static const u8 ffd9[] = {0xff, 0xd9};
-
- /* if control set, stop bulk transfer */
- if (sd->do_ctrl
- && gspca_dev->last_packet_type == INTER_PACKET)
- gspca_dev->cam.bulk_nurbs = 0;
- gspca_frame_add(gspca_dev, LAST_PACKET,
- ffd9, 2);
- gspca_frame_add(gspca_dev, FIRST_PACKET,
- sd->jpeg_hdr, JPEG_HDR_SZ);
-}
-
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 *p;
- int l;
-
- len -= 8; /* ignore last 8 bytes (00 00 55 aa 55 aa 00 00) */
-
- /*
- * the end/start of frame is indicated by
- * 0x00 * 16 - 0xab * 8
- * aligned on 8 bytes boundary
- */
- if (sd->eof_len != 0) { /* if 'abababab' in previous pkt */
- if (*((u32 *) data) == 0xabababab) {
- /*fixme: should remove previous 0000ababab*/
- eof_sof(gspca_dev);
- data += 4;
- len -= 4;
- }
- sd->eof_len = 0;
- }
- p = data;
- l = len;
- for (;;) {
- if (*((u32 *) p) == 0xabababab) {
- if (l < 8) { /* (may be 4 only) */
- sd->eof_len = 1;
- break;
- }
- if (*((u32 *) p + 1) == 0xabababab) {
- add_packet(gspca_dev, data, p - data - 16);
- /* remove previous zeros */
- eof_sof(gspca_dev);
- p += 8;
- l -= 8;
- if (l <= 0)
- return;
- len = l;
- data = p;
- continue;
- }
- }
- p += 4;
- l -= 4;
- if (l <= 0)
- break;
- }
- add_packet(gspca_dev, data, len);
+
+ if (sd->do_ctrl)
+ gspca_dev->cam.bulk_nurbs = 0;
+ gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
+ gspca_frame_add(gspca_dev, INTER_PACKET, data, len - 8);
+ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
}
static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
@@ -1291,45 +1133,6 @@ static int sd_getexpo(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-static int sd_set_jcomp(struct gspca_dev *gspca_dev,
- struct v4l2_jpegcompression *jcomp)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- int quality;
-
- if (jcomp->quality >= (QUAL_0 + QUAL_1) / 2)
- quality = 0;
- else if (jcomp->quality >= (QUAL_1 + QUAL_2) / 2)
- quality = 1;
- else if (jcomp->quality >= (QUAL_2 + QUAL_3) / 2)
- quality = 2;
- else
- quality = 3;
-
- if (quality != sd->quality) {
- sd->quality = quality;
- if (gspca_dev->streaming) {
- send_stop(gspca_dev);
- sd_jpeg_set_qual(sd->jpeg_hdr, sd->quality);
- msleep(70);
- send_start(gspca_dev);
- }
- }
- return gspca_dev->usb_err;
-}
-
-static int sd_get_jcomp(struct gspca_dev *gspca_dev,
- struct v4l2_jpegcompression *jcomp)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- memset(jcomp, 0, sizeof *jcomp);
- jcomp->quality = quality_tb[sd->quality];
- jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT
- | V4L2_JPEG_MARKER_DQT;
- return 0;
-}
-
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
@@ -1342,8 +1145,6 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
.dq_callback = sd_dq_callback,
- .get_jcomp = sd_get_jcomp,
- .set_jcomp = sd_set_jcomp,
};
/* Table of supported USB devices */
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 2a0f12d55e4..3b3b983f2b9 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -55,12 +55,12 @@ struct sd {
u8 effect;
u8 sensor;
-enum {
+};
+enum sensors {
SENSOR_OM6802,
SENSOR_OTHER,
SENSOR_TAS5130A,
SENSOR_LT168G, /* must verify if this is the actual model */
-} sensors;
};
/* V4L2 controls supported by the driver */
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 031266a4081..b16fd47e8ce 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -39,6 +39,10 @@ struct sd {
u8 vflip;
u8 lightfreq;
s8 sharpness;
+ u16 exposure;
+ u8 gain;
+ u8 autogain;
+ u8 backlight;
u8 image_offset;
@@ -77,6 +81,14 @@ static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setbacklight(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getbacklight(struct gspca_dev *gspca_dev, __s32 *val);
static const struct ctrl sd_ctrls[] = {
#define BRIGHTNESS_IDX 0
@@ -185,6 +197,66 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_setsharpness,
.get = sd_getsharpness,
},
+#define GAIN_IDX 7
+ {
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0,
+ .maximum = 78,
+ .step = 1,
+#define GAIN_DEF 0
+ .default_value = GAIN_DEF,
+ },
+ .set = sd_setgain,
+ .get = sd_getgain,
+ },
+#define EXPOSURE_IDX 8
+ {
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+#define EXPOSURE_DEF 450
+ .minimum = 0,
+ .maximum = 4095,
+ .step = 1,
+ .default_value = EXPOSURE_DEF,
+ },
+ .set = sd_setexposure,
+ .get = sd_getexposure,
+ },
+#define AUTOGAIN_IDX 9
+ {
+ {
+ .id = V4L2_CID_AUTOGAIN,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Automatic Gain and Exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define AUTOGAIN_DEF 1
+ .default_value = AUTOGAIN_DEF,
+ },
+ .set = sd_setautogain,
+ .get = sd_getautogain,
+ },
+#define BACKLIGHT_IDX 10
+ {
+ {
+ .id = V4L2_CID_BACKLIGHT_COMPENSATION,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Backlight Compensation",
+ .minimum = 0,
+ .maximum = 15,
+ .step = 1,
+#define BACKLIGHT_DEF 15
+ .default_value = BACKLIGHT_DEF,
+ },
+ .set = sd_setbacklight,
+ .get = sd_getbacklight,
+ },
};
/* table of the disabled controls */
@@ -192,33 +264,51 @@ static u32 ctrl_dis[] = {
/* SENSOR_HV7131R 0 */
(1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
| (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << LIGHTFREQ_IDX)
- | (1 << SHARPNESS_IDX),
+ | (1 << SHARPNESS_IDX)
+ | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
+ | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
/* SENSOR_MI0360 1 */
(1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
| (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << LIGHTFREQ_IDX)
- | (1 << SHARPNESS_IDX),
+ | (1 << SHARPNESS_IDX)
+ | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
+ | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
/* SENSOR_MI1310_SOC 2 */
(1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX),
+ | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX)
+ | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
+ | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
/* SENSOR_MI1320 3 */
(1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX),
+ | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX)
+ | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
+ | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
/* SENSOR_MI1320_SOC 4 */
(1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX),
+ | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX)
+ | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
+ | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
/* SENSOR_OV7660 5 */
(1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX),
+ | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX)
+ | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
+ | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
/* SENSOR_OV7670 6 */
(1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << SHARPNESS_IDX),
+ | (1 << SHARPNESS_IDX)
+ | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
+ | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
/* SENSOR_PO1200 7 */
(1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << LIGHTFREQ_IDX),
+ | (1 << LIGHTFREQ_IDX)
+ | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
+ | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
/* SENSOR_PO3130NC 8 */
(1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
| (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << LIGHTFREQ_IDX)
- | (1 << SHARPNESS_IDX),
+ | (1 << SHARPNESS_IDX)
+ | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
+ | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
/* SENSOR_POxxxx 9 */
(1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << LIGHTFREQ_IDX),
};
@@ -2825,7 +2915,9 @@ static const u8 poxxxx_init_common[][4] = {
{0x00, 0x1e, 0xc6, 0xaa},
{0x00, 0x00, 0x40, 0xdd},
{0x00, 0x1d, 0x05, 0xaa},
-
+ {}
+};
+static const u8 poxxxx_gamma[][4] = {
{0x00, 0xd6, 0x22, 0xaa}, /* gamma 0 */
{0x00, 0x73, 0x00, 0xaa},
{0x00, 0x74, 0x0a, 0xaa},
@@ -2867,19 +2959,9 @@ static const u8 poxxxx_init_common[][4] = {
{0x00, 0x7c, 0xba, 0xaa},
{0x00, 0x7d, 0xd4, 0xaa},
{0x00, 0x7e, 0xea, 0xaa},
-
- {0x00, 0xaa, 0xff, 0xaa}, /* back light comp */
- {0x00, 0xc4, 0x03, 0xaa},
- {0x00, 0xc5, 0x19, 0xaa},
- {0x00, 0xc6, 0x03, 0xaa},
- {0x00, 0xc7, 0x91, 0xaa},
- {0x00, 0xc8, 0x01, 0xaa},
- {0x00, 0xc9, 0xdd, 0xaa},
- {0x00, 0xca, 0x02, 0xaa},
- {0x00, 0xcb, 0x37, 0xaa},
-
-/* read d1 */
- {0x00, 0xd1, 0x3c, 0xaa},
+ {}
+};
+static const u8 poxxxx_init_start_3[][4] = {
{0x00, 0xb8, 0x28, 0xaa},
{0x00, 0xb9, 0x1e, 0xaa},
{0x00, 0xb6, 0x14, 0xaa},
@@ -2959,9 +3041,6 @@ static const u8 poxxxx_init_end_1[][4] = {
{0x00, 0xb3, 0x08, 0xaa},
{0x00, 0xb4, 0x0b, 0xaa},
{0x00, 0xb5, 0x0d, 0xaa},
- {0x00, 0x59, 0x7e, 0xaa}, /* sharpness */
- {0x00, 0x16, 0x00, 0xaa}, /* white balance */
- {0x00, 0x18, 0x00, 0xaa},
{}
};
static const u8 poxxxx_init_end_2[][4] = {
@@ -3312,6 +3391,33 @@ static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->bridge = id->driver_info >> 8;
+ sd->flags = id->driver_info & 0xff;
+
+ if (id->idVendor == 0x046d &&
+ (id->idProduct == 0x0892 || id->idProduct == 0x0896))
+ sd->sensor = SENSOR_POxxxx; /* no probe */
+
+ sd->brightness = BRIGHTNESS_DEF;
+ sd->contrast = CONTRAST_DEF;
+ sd->colors = COLOR_DEF;
+ sd->hflip = HFLIP_DEF;
+ sd->vflip = VFLIP_DEF;
+ sd->lightfreq = FREQ_DEF;
+ sd->sharpness = SHARPNESS_DEF;
+ sd->gain = GAIN_DEF;
+ sd->exposure = EXPOSURE_DEF;
+ sd->autogain = AUTOGAIN_DEF;
+ sd->backlight = BACKLIGHT_DEF;
+
+ return 0;
+}
+
+/* this function is called at probe and resume time */
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
int sensor;
static u8 npkt[] = { /* number of packets per ISOC message */
@@ -3327,14 +3433,11 @@ static int sd_config(struct gspca_dev *gspca_dev,
128, /* POxxxx 9 */
};
- cam = &gspca_dev->cam;
- sd->bridge = id->driver_info >> 8;
- sd->flags = id->driver_info & 0xff;
- if (id->idVendor == 0x046d &&
- (id->idProduct == 0x0892 || id->idProduct == 0x0896))
- sensor = SENSOR_POxxxx;
- else
+ if (sd->sensor != SENSOR_POxxxx)
sensor = vc032x_probe_sensor(gspca_dev);
+ else
+ sensor = sd->sensor;
+
switch (sensor) {
case -1:
PDEBUG(D_PROBE, "Unknown sensor...");
@@ -3373,6 +3476,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
}
sd->sensor = sensor;
+ cam = &gspca_dev->cam;
if (sd->bridge == BRIDGE_VC0321) {
cam->cam_mode = vc0321_mode;
cam->nmodes = ARRAY_SIZE(vc0321_mode);
@@ -3401,28 +3505,11 @@ static int sd_config(struct gspca_dev *gspca_dev,
}
}
cam->npkt = npkt[sd->sensor];
-
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
- sd->colors = COLOR_DEF;
- sd->hflip = HFLIP_DEF;
- sd->vflip = VFLIP_DEF;
- sd->lightfreq = FREQ_DEF;
- sd->sharpness = SHARPNESS_DEF;
-
gspca_dev->ctrl_dis = ctrl_dis[sd->sensor];
if (sd->sensor == SENSOR_OV7670)
sd->flags |= FL_HFLIP | FL_VFLIP;
- return 0;
-}
-
-/* this function is called at probe and resume time */
-static int sd_init(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
if (sd->bridge == BRIDGE_VC0321) {
reg_r(gspca_dev, 0x8a, 0, 3);
reg_w(gspca_dev, 0x87, 0x00, 0x0f0f);
@@ -3433,8 +3520,8 @@ static int sd_init(struct gspca_dev *gspca_dev)
if (gspca_dev->usb_buf[0] != 0) {
reg_w(gspca_dev, 0xa0, 0x26, 0xb300);
reg_w(gspca_dev, 0xa0, 0x04, 0xb300);
- reg_w(gspca_dev, 0xa0, 0x00, 0xb300);
}
+ reg_w(gspca_dev, 0xa0, 0x00, 0xb300);
}
}
return gspca_dev->usb_err;
@@ -3551,6 +3638,82 @@ static void setsharpness(struct gspca_dev *gspca_dev)
break;
}
}
+static void setgain(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (gspca_dev->ctrl_dis & (1 << GAIN_IDX))
+ return;
+ i2c_write(gspca_dev, 0x15, &sd->gain, 1);
+}
+
+static void setexposure(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u8 data;
+
+ if (gspca_dev->ctrl_dis & (1 << EXPOSURE_IDX))
+ return;
+ data = sd->exposure >> 8;
+ i2c_write(gspca_dev, 0x1a, &data, 1);
+ data = sd->exposure;
+ i2c_write(gspca_dev, 0x1b, &data, 1);
+}
+
+static void setautogain(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ static const u8 data[2] = {0x28, 0x3c};
+
+ if (gspca_dev->ctrl_dis & (1 << AUTOGAIN_IDX))
+ return;
+ i2c_write(gspca_dev, 0xd1, &data[sd->autogain], 1);
+}
+
+static void setgamma(struct gspca_dev *gspca_dev)
+{
+/*fixme:to do */
+ usb_exchange(gspca_dev, poxxxx_gamma);
+}
+
+static void setbacklight(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u16 v;
+ u8 data;
+
+ data = (sd->backlight << 4) | 0x0f;
+ i2c_write(gspca_dev, 0xaa, &data, 1);
+ v = 613 + 12 * sd->backlight;
+ data = v >> 8;
+ i2c_write(gspca_dev, 0xc4, &data, 1);
+ data = v;
+ i2c_write(gspca_dev, 0xc5, &data, 1);
+ v = 1093 - 12 * sd->backlight;
+ data = v >> 8;
+ i2c_write(gspca_dev, 0xc6, &data, 1);
+ data = v;
+ i2c_write(gspca_dev, 0xc7, &data, 1);
+ v = 342 + 9 * sd->backlight;
+ data = v >> 8;
+ i2c_write(gspca_dev, 0xc8, &data, 1);
+ data = v;
+ i2c_write(gspca_dev, 0xc9, &data, 1);
+ v = 702 - 9 * sd->backlight;
+ data = v >> 8;
+ i2c_write(gspca_dev, 0xca, &data, 1);
+ data = v;
+ i2c_write(gspca_dev, 0xcb, &data, 1);
+}
+
+static void setwb(struct gspca_dev *gspca_dev)
+{
+/*fixme:to do - valid when reg d1 = 0x1c - (reg16 + reg15 = 0xa3)*/
+ static const u8 data[2] = {0x00, 0x00};
+
+ i2c_write(gspca_dev, 0x16, &data[0], 1);
+ i2c_write(gspca_dev, 0x18, &data[1], 1);
+}
static int sd_start(struct gspca_dev *gspca_dev)
{
@@ -3662,6 +3825,16 @@ static int sd_start(struct gspca_dev *gspca_dev)
default:
/* case SENSOR_POxxxx: */
usb_exchange(gspca_dev, poxxxx_init_common);
+ setgamma(gspca_dev);
+ setbacklight(gspca_dev);
+ setbrightness(gspca_dev);
+ setcontrast(gspca_dev);
+ setcolors(gspca_dev);
+ setsharpness(gspca_dev);
+ setautogain(gspca_dev);
+ setexposure(gspca_dev);
+ setgain(gspca_dev);
+ usb_exchange(gspca_dev, poxxxx_init_start_3);
if (mode)
init = poxxxx_initQVGA;
else
@@ -3693,7 +3866,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
}
msleep(100);
- setsharpness(gspca_dev);
sethvflip(gspca_dev);
setlightfreq(gspca_dev);
}
@@ -3704,14 +3876,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0xa0, 0x0000, 0xbfff);
break;
case SENSOR_POxxxx:
- setcolors(gspca_dev);
- setbrightness(gspca_dev);
- setcontrast(gspca_dev);
-
- /* led on */
- msleep(80);
- reg_w(gspca_dev, 0x89, 0xffff, 0xfdff);
usb_exchange(gspca_dev, poxxxx_init_end_2);
+ setwb(gspca_dev);
+ msleep(80); /* led on */
+ reg_w(gspca_dev, 0x89, 0xffff, 0xfdff);
break;
}
return gspca_dev->usb_err;
@@ -3911,6 +4079,80 @@ static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
+static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->gain = val;
+ if (gspca_dev->streaming)
+ setgain(gspca_dev);
+ return gspca_dev->usb_err;
+}
+
+static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->gain;
+ return 0;
+}
+
+static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->exposure = val;
+ if (gspca_dev->streaming)
+ setexposure(gspca_dev);
+ return gspca_dev->usb_err;
+}
+
+static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->exposure;
+ return 0;
+}
+
+static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->autogain = val;
+ if (gspca_dev->streaming)
+ setautogain(gspca_dev);
+
+ return gspca_dev->usb_err;
+}
+
+static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->autogain;
+ return 0;
+}
+
+static int sd_setbacklight(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->backlight = val;
+ if (gspca_dev->streaming)
+ setbacklight(gspca_dev);
+
+ return gspca_dev->usb_err;
+}
+
+static int sd_getbacklight(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->backlight;
+ return 0;
+}
+
static int sd_querymenu(struct gspca_dev *gspca_dev,
struct v4l2_querymenu *menu)
{
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 4473f0fb8b7..0666038a51b 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -21,7 +21,9 @@
#define MODULE_NAME "zc3xx"
+#ifdef CONFIG_INPUT
#include <linux/input.h>
+#endif
#include "gspca.h"
#include "jpeg.h"
@@ -50,33 +52,38 @@ struct sd {
#define QUALITY_MAX 80
#define QUALITY_DEF 70
+ u8 bridge;
u8 sensor; /* Type of image sensor chip */
-/* !! values used in different tables */
-#define SENSOR_ADCM2700 0
-#define SENSOR_CS2102 1
-#define SENSOR_CS2102K 2
-#define SENSOR_GC0305 3
-#define SENSOR_HDCS2020b 4
-#define SENSOR_HV7131B 5
-#define SENSOR_HV7131C 6
-#define SENSOR_ICM105A 7
-#define SENSOR_MC501CB 8
-#define SENSOR_MI0360SOC 9
-#define SENSOR_OV7620 10
-/*#define SENSOR_OV7648 10 - same values */
-#define SENSOR_OV7630C 11
-#define SENSOR_PAS106 12
-#define SENSOR_PAS202B 13
-#define SENSOR_PB0330 14 /* (MI0360) */
-#define SENSOR_PO2030 15
-#define SENSOR_TAS5130CK 16
-#define SENSOR_TAS5130CXX 17
-#define SENSOR_TAS5130C_VF0250 18
-#define SENSOR_MAX 19
- unsigned short chip_revision;
+ u16 chip_revision;
u8 jpeg_hdr[JPEG_HDR_SZ];
};
+enum bridges {
+ BRIDGE_ZC301,
+ BRIDGE_ZC303,
+};
+enum sensors {
+ SENSOR_ADCM2700,
+ SENSOR_CS2102,
+ SENSOR_CS2102K,
+ SENSOR_GC0305,
+ SENSOR_HDCS2020b,
+ SENSOR_HV7131B,
+ SENSOR_HV7131R,
+ SENSOR_ICM105A,
+ SENSOR_MC501CB,
+ SENSOR_MT9V111_1, /* (mi360soc) zc301 */
+ SENSOR_MT9V111_3, /* (mi360soc) zc303 */
+ SENSOR_OV7620, /* OV7648 - same values */
+ SENSOR_OV7630C,
+ SENSOR_PAS106,
+ SENSOR_PAS202B,
+ SENSOR_PB0330,
+ SENSOR_PO2030,
+ SENSOR_TAS5130C,
+ SENSOR_TAS5130C_VF0250,
+ SENSOR_MAX
+};
/* V4L2 controls supported by the driver */
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
@@ -2074,6 +2081,7 @@ static const struct usb_action hv7131b_NoFlikerScale[] = { /* 320x240 */
{}
};
+/* from lPEPI264v.inf (hv7131b!) */
static const struct usb_action hv7131r_InitialScale[] = {
{0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
{0xa0, 0x10, ZC3XX_R002_CLOCKSELECT},
@@ -2081,8 +2089,8 @@ static const struct usb_action hv7131r_InitialScale[] = {
{0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
{0xa0, 0x77, ZC3XX_R101_SENSORCORRECTION},
{0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
- {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC},
+ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH},
{0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW},
{0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH},
@@ -2095,6 +2103,8 @@ static const struct usb_action hv7131r_InitialScale[] = {
{0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW},
{0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW},
{0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW},
+ {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC},
+ {0xdd, 0x00, 0x0200},
{0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xaa, 0x01, 0x000c},
{0xaa, 0x11, 0x0000},
@@ -2103,10 +2113,10 @@ static const struct usb_action hv7131r_InitialScale[] = {
{0xaa, 0x15, 0x00e8},
{0xaa, 0x16, 0x0002},
{0xaa, 0x17, 0x0088},
-
+ {0xaa, 0x30, 0x000b},
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
{0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
- {0xa0, 0x89, ZC3XX_R18D_YTARGET},
+ {0xa0, 0x78, ZC3XX_R18D_YTARGET},
{0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN},
{0xa0, 0x00, 0x01ad},
{0xa0, 0xc0, 0x019b},
@@ -2116,96 +2126,44 @@ static const struct usb_action hv7131r_InitialScale[] = {
{0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
{0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
{0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
- {0xa1, 0x01, 0x0002},
- {0xa0, 0x00, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x02, ZC3XX_R090_I2CCOMMAND},
- {0xa1, 0x01, 0x0091},
- {0xa1, 0x01, 0x0095},
- {0xa1, 0x01, 0x0096},
-
- {0xa1, 0x01, 0x0008},
- {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */
- {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */
- {0xa1, 0x01, 0x01c8},
- {0xa1, 0x01, 0x01c9},
- {0xa1, 0x01, 0x01ca},
- {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */
-
- {0xa0, 0x60, ZC3XX_R10A_RGB00}, /* matrix */
- {0xa0, 0xf0, ZC3XX_R10B_RGB01},
- {0xa0, 0xf0, ZC3XX_R10C_RGB02},
- {0xa0, 0xf0, ZC3XX_R10D_RGB10},
- {0xa0, 0x60, ZC3XX_R10E_RGB11},
- {0xa0, 0xf0, ZC3XX_R10F_RGB12},
- {0xa0, 0xf0, ZC3XX_R110_RGB20},
- {0xa0, 0xf0, ZC3XX_R111_RGB21},
- {0xa0, 0x60, ZC3XX_R112_RGB22},
- {0xa1, 0x01, 0x0180},
- {0xa0, 0x10, ZC3XX_R180_AUTOCORRECTENABLE},
- {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
- {0xaa, 0x25, 0x0007},
- {0xaa, 0x26, 0x0053},
- {0xaa, 0x27, 0x0000},
-
- {0xa0, 0x10, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 2f */
- {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, /* 9b */
- {0xa0, 0x60, ZC3XX_R192_EXPOSURELIMITLOW}, /* 80 */
- {0xa0, 0x01, ZC3XX_R195_ANTIFLICKERHIGH},
- {0xa0, 0xd4, ZC3XX_R196_ANTIFLICKERMID},
- {0xa0, 0xc0, ZC3XX_R197_ANTIFLICKERLOW},
- {0xa0, 0x10, ZC3XX_R18C_AEFREEZE},
- {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE},
- {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN},
- {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF},
- {0xa0, 0x13, ZC3XX_R1AA_DIGITALGAINSTEP},
- {0xa1, 0x01, 0x001d},
- {0xa1, 0x01, 0x001e},
- {0xa1, 0x01, 0x001f},
- {0xa1, 0x01, 0x0020},
- {0xa0, 0x40, ZC3XX_R180_AUTOCORRECTENABLE},
- {0xa1, 0x01, 0x0180},
- {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
{}
};
-
static const struct usb_action hv7131r_Initial[] = {
{0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
-
- {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, /* diff */
+ {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT},
{0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT},
{0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
{0xa0, 0x77, ZC3XX_R101_SENSORCORRECTION},
{0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
-
- {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC},
-
+ {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH},
{0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW},
{0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH},
- {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 1e0 */
+ {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW},
{0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW},
{0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW},
{0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH},
- {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW},
+ {0xa0, 0xe6, ZC3XX_R09C_WINHEIGHTLOW},
{0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH},
- {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW},
+ {0xa0, 0x86, ZC3XX_R09E_WINWIDTHLOW},
{0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW},
{0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW},
+ {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC},
+ {0xdd, 0x00, 0x0200},
{0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xaa, 0x01, 0x000c},
{0xaa, 0x11, 0x0000},
{0xaa, 0x13, 0x0000},
{0xaa, 0x14, 0x0001},
- {0xaa, 0x15, 0x00e8},
+ {0xaa, 0x15, 0x00e6},
{0xaa, 0x16, 0x0002},
- {0xaa, 0x17, 0x0088},
-
- {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00 */
-
+ {0xaa, 0x17, 0x0086},
+ {0xaa, 0x30, 0x000b},
+ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
{0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
- {0xa0, 0x89, ZC3XX_R18D_YTARGET},
+ {0xa0, 0x78, ZC3XX_R18D_YTARGET},
{0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN},
{0xa0, 0x00, 0x01ad},
{0xa0, 0xc0, 0x019b},
@@ -2215,58 +2173,114 @@ static const struct usb_action hv7131r_Initial[] = {
{0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
{0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
{0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
- {0xa1, 0x01, 0x0002},
- {0xa0, 0x00, ZC3XX_R092_I2CADDRESSSELECT},
- /* read the i2c chips ident */
- {0xa0, 0x02, ZC3XX_R090_I2CCOMMAND},
- {0xa1, 0x01, 0x0091},
- {0xa1, 0x01, 0x0095},
- {0xa1, 0x01, 0x0096},
-
- {0xa1, 0x01, 0x0008},
- {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */
- {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */
- {0xa1, 0x01, 0x01c8},
- {0xa1, 0x01, 0x01c9},
- {0xa1, 0x01, 0x01ca},
- {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */
-
- {0xa0, 0x60, ZC3XX_R10A_RGB00}, /* matrix */
- {0xa0, 0xf0, ZC3XX_R10B_RGB01},
- {0xa0, 0xf0, ZC3XX_R10C_RGB02},
- {0xa0, 0xf0, ZC3XX_R10D_RGB10},
- {0xa0, 0x60, ZC3XX_R10E_RGB11},
- {0xa0, 0xf0, ZC3XX_R10F_RGB12},
- {0xa0, 0xf0, ZC3XX_R110_RGB20},
- {0xa0, 0xf0, ZC3XX_R111_RGB21},
- {0xa0, 0x60, ZC3XX_R112_RGB22},
- {0xa1, 0x01, 0x0180},
- {0xa0, 0x10, ZC3XX_R180_AUTOCORRECTENABLE},
+ {}
+};
+static const struct usb_action hv7131r_50HZ[] = {
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
- {0xaa, 0x25, 0x0007},
- {0xaa, 0x26, 0x0053},
- {0xaa, 0x27, 0x0000},
-
- {0xa0, 0x10, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 2f */
- {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, /* 9b */
- {0xa0, 0x60, ZC3XX_R192_EXPOSURELIMITLOW}, /* 80 */
-
+ {0xa0, 0x06, ZC3XX_R190_EXPOSURELIMITHIGH},
+ {0xa0, 0x68, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0xa0, ZC3XX_R192_EXPOSURELIMITLOW},
+ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
+ {0xa0, 0xea, ZC3XX_R196_ANTIFLICKERMID},
+ {0xa0, 0x60, ZC3XX_R197_ANTIFLICKERLOW},
+ {0xa0, 0x18, ZC3XX_R18C_AEFREEZE},
+ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE},
+ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF},
+ {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP},
+ {0xa0, 0x00, ZC3XX_R01D_HSYNC_0},
+ {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1},
+ {0xa0, 0x00, ZC3XX_R01F_HSYNC_2},
+ {0xa0, 0x08, ZC3XX_R020_HSYNC_3},
+ {}
+};
+static const struct usb_action hv7131r_50HZScale[] = {
+ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
+ {0xa0, 0x0c, ZC3XX_R190_EXPOSURELIMITHIGH},
+ {0xa0, 0xd1, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0x40, ZC3XX_R192_EXPOSURELIMITLOW},
{0xa0, 0x01, ZC3XX_R195_ANTIFLICKERHIGH},
{0xa0, 0xd4, ZC3XX_R196_ANTIFLICKERMID},
{0xa0, 0xc0, ZC3XX_R197_ANTIFLICKERLOW},
-
- {0xa0, 0x10, ZC3XX_R18C_AEFREEZE},
+ {0xa0, 0x18, ZC3XX_R18C_AEFREEZE},
{0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE},
- {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN},
{0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF},
- {0xa0, 0x13, ZC3XX_R1AA_DIGITALGAINSTEP},
- {0xa1, 0x01, 0x001d},
- {0xa1, 0x01, 0x001e},
- {0xa1, 0x01, 0x001f},
- {0xa1, 0x01, 0x0020},
- {0xa0, 0x40, ZC3XX_R180_AUTOCORRECTENABLE},
- {0xa1, 0x01, 0x0180},
- {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
+ {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP},
+ {0xa0, 0x00, ZC3XX_R01D_HSYNC_0},
+ {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1},
+ {0xa0, 0x00, ZC3XX_R01F_HSYNC_2},
+ {0xa0, 0x08, ZC3XX_R020_HSYNC_3},
+ {}
+};
+static const struct usb_action hv7131r_60HZ[] = {
+ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
+ {0xa0, 0x06, ZC3XX_R190_EXPOSURELIMITHIGH},
+ {0xa0, 0x1a, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0x80, ZC3XX_R192_EXPOSURELIMITLOW},
+ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
+ {0xa0, 0xc3, ZC3XX_R196_ANTIFLICKERMID},
+ {0xa0, 0x50, ZC3XX_R197_ANTIFLICKERLOW},
+ {0xa0, 0x18, ZC3XX_R18C_AEFREEZE},
+ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE},
+ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF},
+ {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP},
+ {0xa0, 0x00, ZC3XX_R01D_HSYNC_0},
+ {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1},
+ {0xa0, 0x00, ZC3XX_R01F_HSYNC_2},
+ {0xa0, 0x08, ZC3XX_R020_HSYNC_3},
+ {}
+};
+static const struct usb_action hv7131r_60HZScale[] = {
+ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
+ {0xa0, 0x0c, ZC3XX_R190_EXPOSURELIMITHIGH},
+ {0xa0, 0x35, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW},
+ {0xa0, 0x01, ZC3XX_R195_ANTIFLICKERHIGH},
+ {0xa0, 0x86, ZC3XX_R196_ANTIFLICKERMID},
+ {0xa0, 0xa0, ZC3XX_R197_ANTIFLICKERLOW},
+ {0xa0, 0x18, ZC3XX_R18C_AEFREEZE},
+ {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE},
+ {0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF},
+ {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP},
+ {0xa0, 0x00, ZC3XX_R01D_HSYNC_0},
+ {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1},
+ {0xa0, 0x00, ZC3XX_R01F_HSYNC_2},
+ {0xa0, 0x08, ZC3XX_R020_HSYNC_3},
+ {}
+};
+static const struct usb_action hv7131r_NoFliker[] = {
+ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
+ {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH},
+ {0xa0, 0xf8, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW},
+ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
+ {0xa0, 0x02, ZC3XX_R196_ANTIFLICKERMID},
+ {0xa0, 0x58, ZC3XX_R197_ANTIFLICKERLOW},
+ {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE},
+ {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE},
+ {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF},
+ {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP},
+ {0xa0, 0x00, ZC3XX_R01D_HSYNC_0},
+ {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1},
+ {0xa0, 0x00, ZC3XX_R01F_HSYNC_2},
+ {0xa0, 0x08, ZC3XX_R020_HSYNC_3},
+ {}
+};
+static const struct usb_action hv7131r_NoFlikerScale[] = {
+ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
+ {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH},
+ {0xa0, 0xf8, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW},
+ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
+ {0xa0, 0x04, ZC3XX_R196_ANTIFLICKERMID},
+ {0xa0, 0xb0, ZC3XX_R197_ANTIFLICKERLOW},
+ {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE},
+ {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE},
+ {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF},
+ {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP},
+ {0xa0, 0x00, ZC3XX_R01D_HSYNC_0},
+ {0xa0, 0xd0, ZC3XX_R01E_HSYNC_1},
+ {0xa0, 0x00, ZC3XX_R01F_HSYNC_2},
+ {0xa0, 0x08, ZC3XX_R020_HSYNC_3},
{}
};
@@ -3350,7 +3364,7 @@ static const struct usb_action ov7620_NoFliker[] = {
{0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */
{0xa0, 0x01, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,01,cc */
/* {0xa0, 0x44, ZC3XX_R002_CLOCKSELECT}, * 00,02,44,cc
- - if mode1 (320x240) */
+ * if mode1 (320x240) */
/* ?? was
{0xa0, 0x00, 0x0039}, * 00,00,00,dd *
{0xa1, 0x01, 0x0037}, */
@@ -3439,7 +3453,6 @@ static const struct usb_action ov7630c_InitialScale[] = {
{0xa0, 0xf8, ZC3XX_R110_RGB20},
{0xa0, 0xf8, ZC3XX_R111_RGB21},
{0xa0, 0x50, ZC3XX_R112_RGB22},
-/* 0x03, */
{0xa1, 0x01, 0x0008},
{0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */
{0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */
@@ -3719,7 +3732,7 @@ static const struct usb_action pas106b_InitialScale[] = { /* 176x144 */
{0xaa, 0x0e, 0x0002},
{0xaa, 0x14, 0x0081},
-/* Other registors */
+/* Other registers */
{0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION},
/* Frame retreiving */
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
@@ -3730,7 +3743,7 @@ static const struct usb_action pas106b_InitialScale[] = { /* 176x144 */
/* Sharpness */
{0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE},
{0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
-/* Other registors */
+/* Other registers */
{0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
/* Auto exposure and white balance */
{0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
@@ -3837,7 +3850,7 @@ static const struct usb_action pas106b_Initial[] = { /* 352x288 */
{0xaa, 0x0e, 0x0002},
{0xaa, 0x14, 0x0081},
-/* Other registors */
+/* Other registers */
{0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION},
/* Frame retreiving */
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
@@ -3848,7 +3861,7 @@ static const struct usb_action pas106b_Initial[] = { /* 352x288 */
/* Sharpness */
{0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE},
{0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
-/* Other registors */
+/* Other registers */
{0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
/* Auto exposure and white balance */
{0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
@@ -4241,8 +4254,8 @@ static const struct usb_action pas202b_NoFlikerScale[] = {
{}
};
-/* mi0360soc and pb0330 from vm30x.inf for 0ac8:301b and 0ac8:303b 07/02/13 */
-static const struct usb_action mi0360soc_Initial[] = { /* 640x480 */
+/* mt9v111 (mi0360soc) and pb0330 from vm30x.inf 0ac8:301b 07/02/13 */
+static const struct usb_action mt9v111_1_Initial[] = { /* 640x480 */
{0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
{0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
{0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT},
@@ -4253,14 +4266,14 @@ static const struct usb_action mi0360soc_Initial[] = { /* 640x480 */
{0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW},
{0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR},
{0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
- {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC}, /*jfm: was 03*/
-/* {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, */
+ {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW},
{0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW},
{0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW},
{0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW},
{0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR},
{0xdd, 0x00, 0x0200},
+ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xaa, 0x01, 0x0001},
{0xaa, 0x06, 0x0000},
{0xaa, 0x08, 0x0483},
@@ -4270,18 +4283,18 @@ static const struct usb_action mi0360soc_Initial[] = { /* 640x480 */
{0xaa, 0x03, 0x01e5}, /*jfm: was 01e7*/
{0xaa, 0x04, 0x0285}, /*jfm: was 0287*/
{0xaa, 0x07, 0x3002},
- {0xaa, 0x20, 0x5100}, /*jfm: was 1100*/
- {0xaa, 0x35, 0x507f}, /*jfm: was 0050*/
+ {0xaa, 0x20, 0x5100},
+ {0xaa, 0x35, 0x507f},
{0xaa, 0x30, 0x0005},
{0xaa, 0x31, 0x0000},
{0xaa, 0x58, 0x0078},
{0xaa, 0x62, 0x0411},
- {0xaa, 0x2b, 0x0028},
+ {0xaa, 0x2b, 0x007f},
{0xaa, 0x2c, 0x007f}, /*jfm: was 0030*/
{0xaa, 0x2d, 0x007f}, /*jfm: was 0030*/
{0xaa, 0x2e, 0x007f}, /*jfm: was 0030*/
{0xa0, 0x10, ZC3XX_R087_EXPTIMEMID},
- {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, /*jfm: was 37*/
+ {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION},
{0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
{0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
@@ -4291,12 +4304,12 @@ static const struct usb_action mi0360soc_Initial[] = { /* 640x480 */
{0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
{0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
{0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN},
- {0xa0, 0x6c, ZC3XX_R18D_YTARGET}, /* jfm: was 78 */
+ {0xa0, 0x6c, ZC3XX_R18D_YTARGET},
{0xa0, 0x61, ZC3XX_R116_RGAIN},
{0xa0, 0x65, ZC3XX_R118_BGAIN},
{}
};
-static const struct usb_action mi0360soc_InitialScale[] = { /* 320x240 */
+static const struct usb_action mt9v111_1_InitialScale[] = { /* 320x240 */
{0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
{0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
{0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT},
@@ -4307,14 +4320,14 @@ static const struct usb_action mi0360soc_InitialScale[] = { /* 320x240 */
{0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW},
{0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR},
{0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
- {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC}, /*jfm: was 03*/
-/* {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, */
+ {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW},
{0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW},
{0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW},
{0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW},
{0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR},
{0xdd, 0x00, 0x0200},
+ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xaa, 0x01, 0x0001},
{0xaa, 0x06, 0x0000},
{0xaa, 0x08, 0x0483},
@@ -4324,7 +4337,7 @@ static const struct usb_action mi0360soc_InitialScale[] = { /* 320x240 */
{0xaa, 0x03, 0x01e7},
{0xaa, 0x04, 0x0287},
{0xaa, 0x07, 0x3002},
- {0xaa, 0x20, 0x5100}, /*jfm: was 1100*/
+ {0xaa, 0x20, 0x5100},
{0xaa, 0x35, 0x007f}, /*jfm: was 0050*/
{0xaa, 0x30, 0x0005},
{0xaa, 0x31, 0x0000},
@@ -4335,7 +4348,7 @@ static const struct usb_action mi0360soc_InitialScale[] = { /* 320x240 */
{0xaa, 0x2d, 0x007f}, /*jfm: was 30*/
{0xaa, 0x2e, 0x007f}, /*jfm: was 28*/
{0xa0, 0x10, ZC3XX_R087_EXPTIMEMID},
- {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, /*jfm: was 37*/
+ {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION},
{0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
{0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
@@ -4345,12 +4358,12 @@ static const struct usb_action mi0360soc_InitialScale[] = { /* 320x240 */
{0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
{0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
{0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN},
- {0xa0, 0x6c, ZC3XX_R18D_YTARGET}, /*jfm: was 78*/
+ {0xa0, 0x6c, ZC3XX_R18D_YTARGET},
{0xa0, 0x61, ZC3XX_R116_RGAIN},
{0xa0, 0x65, ZC3XX_R118_BGAIN},
{}
};
-static const struct usb_action mi360soc_AE50HZ[] = {
+static const struct usb_action mt9v111_1_AE50HZ[] = {
{0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
{0xbb, 0x00, 0x0562},
@@ -4373,7 +4386,7 @@ static const struct usb_action mi360soc_AE50HZ[] = {
{0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
{}
};
-static const struct usb_action mi360soc_AE50HZScale[] = {
+static const struct usb_action mt9v111_1_AE50HZScale[] = {
{0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
{0xbb, 0x00, 0x0509},
@@ -4395,11 +4408,11 @@ static const struct usb_action mi360soc_AE50HZScale[] = {
{0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
{}
};
-static const struct usb_action mi360soc_AE60HZ[] = {
+static const struct usb_action mt9v111_1_AE60HZ[] = {
{0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
- {0xbb, 0x00, 0x053d},
- {0xbb, 0x01, 0x096e},
+ {0xaa, 0x05, 0x003d},
+ {0xaa, 0x09, 0x016e},
{0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
{0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID},
{0xa0, 0xdd, ZC3XX_R192_EXPOSURELIMITLOW},
@@ -4418,7 +4431,7 @@ static const struct usb_action mi360soc_AE60HZ[] = {
{0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
{}
};
-static const struct usb_action mi360soc_AE60HZScale[] = {
+static const struct usb_action mt9v111_1_AE60HZScale[] = {
{0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
{0xbb, 0x00, 0x0509},
@@ -4440,7 +4453,7 @@ static const struct usb_action mi360soc_AE60HZScale[] = {
{0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
{}
};
-static const struct usb_action mi360soc_AENoFliker[] = {
+static const struct usb_action mt9v111_1_AENoFliker[] = {
{0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
{0xbb, 0x00, 0x0509},
@@ -4463,7 +4476,7 @@ static const struct usb_action mi360soc_AENoFliker[] = {
{0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
{}
};
-static const struct usb_action mi360soc_AENoFlikerScale[] = {
+static const struct usb_action mt9v111_1_AENoFlikerScale[] = {
{0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
{0xbb, 0x00, 0x0534},
@@ -4486,6 +4499,251 @@ static const struct usb_action mi360soc_AENoFlikerScale[] = {
{0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
{}
};
+/* from usbvm303.inf 0ac8:303b 07/03/25 (3 - tas5130c) */
+static const struct usb_action mt9v111_3_Initial[] = {
+ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
+ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
+ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT},
+ {0xa0, 0x04, ZC3XX_R002_CLOCKSELECT},
+ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH},
+ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW},
+ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH},
+ {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW},
+ {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR},
+ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
+ {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC},
+ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW},
+ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW},
+ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW},
+ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW},
+ {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR},
+ {0xdd, 0x00, 0x0200},
+ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
+ {0xaa, 0x01, 0x0001}, /* select IFP/SOC registers */
+ {0xaa, 0x06, 0x0000}, /* operating mode control */
+ {0xaa, 0x08, 0x0483}, /* output format control */
+ /* H red first, V red or blue first,
+ * raw Bayer, auto flicker */
+ {0xaa, 0x01, 0x0004}, /* select sensor core registers */
+ {0xaa, 0x08, 0x0006}, /* row start */
+ {0xaa, 0x02, 0x0011}, /* column start */
+ {0xaa, 0x03, 0x01e5}, /* window height - 1 */
+ {0xaa, 0x04, 0x0285}, /* window width - 1 */
+ {0xaa, 0x07, 0x3002}, /* output control */
+ {0xaa, 0x20, 0x1100}, /* read mode: bits 8 & 12 (?) */
+ {0xaa, 0x35, 0x007f}, /* global gain */
+ {0xaa, 0x30, 0x0005},
+ {0xaa, 0x31, 0x0000},
+ {0xaa, 0x58, 0x0078},
+ {0xaa, 0x62, 0x0411},
+ {0xaa, 0x2b, 0x007f}, /* green1 gain */
+ {0xaa, 0x2c, 0x007f}, /* blue gain */
+ {0xaa, 0x2d, 0x007f}, /* red gain */
+ {0xaa, 0x2e, 0x007f}, /* green2 gain */
+ {0xa0, 0x10, ZC3XX_R087_EXPTIMEMID},
+ {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION},
+ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
+ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
+ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
+ {0xa0, 0x00, 0x01ad},
+ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE},
+ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
+ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
+ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
+ {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN},
+ {0xa0, 0x80, ZC3XX_R18D_YTARGET},
+ {0xa0, 0x61, ZC3XX_R116_RGAIN},
+ {0xa0, 0x65, ZC3XX_R118_BGAIN},
+ {}
+};
+static const struct usb_action mt9v111_3_InitialScale[] = {
+ {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
+ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
+ {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT},
+ {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT},
+ {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH},
+ {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW},
+ {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH},
+ {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW},
+ {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR},
+ {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
+ {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC},
+ {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW},
+ {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW},
+ {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW},
+ {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW},
+ {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR},
+ {0xdd, 0x00, 0x0200},
+ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
+ {0xaa, 0x01, 0x0001},
+ {0xaa, 0x06, 0x0000},
+ {0xaa, 0x08, 0x0483},
+ {0xaa, 0x01, 0x0004},
+ {0xaa, 0x08, 0x0006},
+ {0xaa, 0x02, 0x0011},
+ {0xaa, 0x03, 0x01e7},
+ {0xaa, 0x04, 0x0287},
+ {0xaa, 0x07, 0x3002},
+ {0xaa, 0x20, 0x1100},
+ {0xaa, 0x35, 0x007f},
+ {0xaa, 0x30, 0x0005},
+ {0xaa, 0x31, 0x0000},
+ {0xaa, 0x58, 0x0078},
+ {0xaa, 0x62, 0x0411},
+ {0xaa, 0x2b, 0x007f},
+ {0xaa, 0x2c, 0x007f},
+ {0xaa, 0x2d, 0x007f},
+ {0xaa, 0x2e, 0x007f},
+ {0xa0, 0x10, ZC3XX_R087_EXPTIMEMID},
+ {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION},
+ {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
+ {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
+ {0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
+ {0xa0, 0x00, 0x01ad},
+ {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE},
+ {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
+ {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
+ {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
+ {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN},
+ {0xa0, 0x80, ZC3XX_R18D_YTARGET},
+ {0xa0, 0x61, ZC3XX_R116_RGAIN},
+ {0xa0, 0x65, ZC3XX_R118_BGAIN},
+ {}
+};
+static const struct usb_action mt9v111_3_AE50HZ[] = {
+ {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
+ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
+ {0xaa, 0x05, 0x0009}, /* horizontal blanking */
+ {0xaa, 0x09, 0x01ce}, /* shutter width */
+ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
+ {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0xd2, ZC3XX_R192_EXPOSURELIMITLOW},
+ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
+ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID},
+ {0xa0, 0x9a, ZC3XX_R197_ANTIFLICKERLOW},
+ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE},
+ {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE},
+ {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF},
+ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP},
+ {0xa0, 0xd7, ZC3XX_R01D_HSYNC_0},
+ {0xa0, 0xf4, ZC3XX_R01E_HSYNC_1},
+ {0xa0, 0xf9, ZC3XX_R01F_HSYNC_2},
+ {0xa0, 0xff, ZC3XX_R020_HSYNC_3},
+ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
+ {}
+};
+static const struct usb_action mt9v111_3_AE50HZScale[] = {
+ {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
+ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
+ {0xaa, 0x05, 0x0009},
+ {0xaa, 0x09, 0x01ce},
+ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
+ {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0xd2, ZC3XX_R192_EXPOSURELIMITLOW},
+ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
+ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID},
+ {0xa0, 0x9a, ZC3XX_R197_ANTIFLICKERLOW},
+ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE},
+ {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE},
+ {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF},
+ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP},
+ {0xa0, 0xd7, ZC3XX_R01D_HSYNC_0},
+ {0xa0, 0xf4, ZC3XX_R01E_HSYNC_1},
+ {0xa0, 0xf9, ZC3XX_R01F_HSYNC_2},
+ {0xa0, 0xff, ZC3XX_R020_HSYNC_3},
+ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
+ {}
+};
+static const struct usb_action mt9v111_3_AE60HZ[] = {
+ {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
+ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
+ {0xaa, 0x05, 0x0009},
+ {0xaa, 0x09, 0x0083},
+ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
+ {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0x8f, ZC3XX_R192_EXPOSURELIMITLOW},
+ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
+ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID},
+ {0xa0, 0x81, ZC3XX_R197_ANTIFLICKERLOW},
+ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE},
+ {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE},
+ {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF},
+ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP},
+ {0xa0, 0xd7, ZC3XX_R01D_HSYNC_0},
+ {0xa0, 0xf4, ZC3XX_R01E_HSYNC_1},
+ {0xa0, 0xf9, ZC3XX_R01F_HSYNC_2},
+ {0xa0, 0xff, ZC3XX_R020_HSYNC_3},
+ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
+ {}
+};
+static const struct usb_action mt9v111_3_AE60HZScale[] = {
+ {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
+ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
+ {0xaa, 0x05, 0x0009},
+ {0xaa, 0x09, 0x0083},
+ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
+ {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0x8f, ZC3XX_R192_EXPOSURELIMITLOW},
+ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
+ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID},
+ {0xa0, 0x81, ZC3XX_R197_ANTIFLICKERLOW},
+ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE},
+ {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE},
+ {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF},
+ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP},
+ {0xa0, 0xd7, ZC3XX_R01D_HSYNC_0},
+ {0xa0, 0xf4, ZC3XX_R01E_HSYNC_1},
+ {0xa0, 0xf9, ZC3XX_R01F_HSYNC_2},
+ {0xa0, 0xff, ZC3XX_R020_HSYNC_3},
+ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
+ {}
+};
+static const struct usb_action mt9v111_3_AENoFliker[] = {
+ {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
+ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
+ {0xaa, 0x05, 0x0034},
+ {0xaa, 0x09, 0x0260},
+ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
+ {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0xf0, ZC3XX_R192_EXPOSURELIMITLOW},
+ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
+ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID},
+ {0xa0, 0x04, ZC3XX_R197_ANTIFLICKERLOW},
+ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE},
+ {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE},
+ {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF},
+ {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP},
+ {0xa0, 0x34, ZC3XX_R01D_HSYNC_0},
+ {0xa0, 0x60, ZC3XX_R01E_HSYNC_1},
+ {0xa0, 0x90, ZC3XX_R01F_HSYNC_2},
+ {0xa0, 0xe0, ZC3XX_R020_HSYNC_3},
+ {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN},
+ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
+ {}
+};
+static const struct usb_action mt9v111_3_AENoFlikerScale[] = {
+ {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
+ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
+ {0xaa, 0x05, 0x0034},
+ {0xaa, 0x09, 0x0260},
+ {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
+ {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0xf0, ZC3XX_R192_EXPOSURELIMITLOW},
+ {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
+ {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID},
+ {0xa0, 0x04, ZC3XX_R197_ANTIFLICKERLOW},
+ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE},
+ {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE},
+ {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF},
+ {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP},
+ {0xa0, 0x34, ZC3XX_R01D_HSYNC_0},
+ {0xa0, 0x60, ZC3XX_R01E_HSYNC_1},
+ {0xa0, 0x90, ZC3XX_R01F_HSYNC_2},
+ {0xa0, 0xe0, ZC3XX_R020_HSYNC_3},
+ {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN},
+ {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
+ {}
+};
static const struct usb_action pb0330_Initial[] = { /* 640x480 */
{0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
@@ -4928,419 +5186,7 @@ static const struct usb_action po2030_NoFliker[] = {
{}
};
-/* TEST */
-static const struct usb_action tas5130cK_InitialScale[] = {
- {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
- {0xa0, 0x01, 0x003b},
- {0xa0, 0x0e, 0x003a},
- {0xa0, 0x01, 0x0038},
- {0xa0, 0x0b, 0x0039},
- {0xa0, 0x00, 0x0038},
- {0xa0, 0x0b, 0x0039},
- {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
- {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
- {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT},
- {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT},
- {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH},
- {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW},
- {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH},
- {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW},
- {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR},
- {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
- {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC},
- {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW},
- {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW},
- {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW},
- {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW},
- {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR},
- {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
- {0xa0, 0x01, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x06, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x08, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x83, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x04, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x01, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x08, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x06, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x02, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x11, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x03, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0xE7, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x01, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x04, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x87, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x02, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x07, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x02, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x30, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x20, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x51, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x35, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x7F, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x30, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x05, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x31, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x58, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x78, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x62, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x11, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x04, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x2B, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x7f, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x2c, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x7f, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x2D, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x7f, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x2e, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x7f, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x10, ZC3XX_R087_EXPTIMEMID},
- {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION},
- {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
- {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
- {0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
- {0xa0, 0x09, 0x01ad},
- {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE},
- {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
- {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
- {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
- {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN},
- {0xa0, 0x6c, ZC3XX_R18D_YTARGET},
- {0xa0, 0x61, ZC3XX_R116_RGAIN},
- {0xa0, 0x65, ZC3XX_R118_BGAIN},
- {0xa0, 0x09, 0x01ad},
- {0xa0, 0x15, 0x01ae},
- {0xa0, 0x4c, ZC3XX_R10A_RGB00}, /* matrix */
- {0xa0, 0xf1, ZC3XX_R10B_RGB01},
- {0xa0, 0x03, ZC3XX_R10C_RGB02},
- {0xa0, 0xfe, ZC3XX_R10D_RGB10},
- {0xa0, 0x51, ZC3XX_R10E_RGB11},
- {0xa0, 0xf1, ZC3XX_R10F_RGB12},
- {0xa0, 0xec, ZC3XX_R110_RGB20},
- {0xa0, 0x03, ZC3XX_R111_RGB21},
- {0xa0, 0x51, ZC3XX_R112_RGB22},
- {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
- {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */
- {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */
- {0xa0, 0x38, ZC3XX_R120_GAMMA00}, /* gamma > 5 */
- {0xa0, 0x51, ZC3XX_R121_GAMMA01},
- {0xa0, 0x6e, ZC3XX_R122_GAMMA02},
- {0xa0, 0x8c, ZC3XX_R123_GAMMA03},
- {0xa0, 0xa2, ZC3XX_R124_GAMMA04},
- {0xa0, 0xb6, ZC3XX_R125_GAMMA05},
- {0xa0, 0xc8, ZC3XX_R126_GAMMA06},
- {0xa0, 0xd6, ZC3XX_R127_GAMMA07},
- {0xa0, 0xe2, ZC3XX_R128_GAMMA08},
- {0xa0, 0xed, ZC3XX_R129_GAMMA09},
- {0xa0, 0xf5, ZC3XX_R12A_GAMMA0A},
- {0xa0, 0xfc, ZC3XX_R12B_GAMMA0B},
- {0xa0, 0xff, ZC3XX_R12C_GAMMA0C},
- {0xa0, 0xff, ZC3XX_R12D_GAMMA0D},
- {0xa0, 0xff, ZC3XX_R12E_GAMMA0E},
- {0xa0, 0xff, ZC3XX_R12F_GAMMA0F},
- {0xa0, 0x12, ZC3XX_R130_GAMMA10},
- {0xa0, 0x1b, ZC3XX_R131_GAMMA11},
- {0xa0, 0x1d, ZC3XX_R132_GAMMA12},
- {0xa0, 0x1a, ZC3XX_R133_GAMMA13},
- {0xa0, 0x15, ZC3XX_R134_GAMMA14},
- {0xa0, 0x12, ZC3XX_R135_GAMMA15},
- {0xa0, 0x0f, ZC3XX_R136_GAMMA16},
- {0xa0, 0x0d, ZC3XX_R137_GAMMA17},
- {0xa0, 0x0b, ZC3XX_R138_GAMMA18},
- {0xa0, 0x09, ZC3XX_R139_GAMMA19},
- {0xa0, 0x07, ZC3XX_R13A_GAMMA1A},
- {0xa0, 0x05, ZC3XX_R13B_GAMMA1B},
- {0xa0, 0x00, ZC3XX_R13C_GAMMA1C},
- {0xa0, 0x00, ZC3XX_R13D_GAMMA1D},
- {0xa0, 0x00, ZC3XX_R13E_GAMMA1E},
- {0xa0, 0x01, ZC3XX_R13F_GAMMA1F},
- {0xa0, 0x4c, ZC3XX_R10A_RGB00}, /* matrix */
- {0xa0, 0xf1, ZC3XX_R10B_RGB01},
- {0xa0, 0x03, ZC3XX_R10C_RGB02},
- {0xa0, 0xfe, ZC3XX_R10D_RGB10},
- {0xa0, 0x51, ZC3XX_R10E_RGB11},
- {0xa0, 0xf1, ZC3XX_R10F_RGB12},
- {0xa0, 0xec, ZC3XX_R110_RGB20},
- {0xa0, 0x03, ZC3XX_R111_RGB21},
- {0xa0, 0x51, ZC3XX_R112_RGB22},
- {0xa0, 0x10, ZC3XX_R180_AUTOCORRECTENABLE},
- {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
- {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
- {0xa0, 0x05, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x09, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x09, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x34, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x01, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
- {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID},
- {0xa0, 0xd2, ZC3XX_R192_EXPOSURELIMITLOW},
- {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
- {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID},
- {0xa0, 0x9a, ZC3XX_R197_ANTIFLICKERLOW},
- {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE},
- {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE},
- {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF},
- {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP},
- {0xa0, 0xd7, ZC3XX_R01D_HSYNC_0},
- {0xa0, 0xf4, ZC3XX_R01E_HSYNC_1},
- {0xa0, 0xf9, ZC3XX_R01F_HSYNC_2},
- {0xa0, 0xff, ZC3XX_R020_HSYNC_3},
- {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
- {0xa0, 0x09, 0x01ad},
- {0xa0, 0x15, 0x01ae},
- {0xa0, 0x40, ZC3XX_R180_AUTOCORRECTENABLE},
- {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
- {}
-};
-
-static const struct usb_action tas5130cK_Initial[] = {
- {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
- {0xa0, 0x01, 0x003b},
- {0xa0, 0x0e, 0x003a},
- {0xa0, 0x01, 0x0038},
- {0xa0, 0x0b, 0x0039},
- {0xa0, 0x00, 0x0038},
- {0xa0, 0x0b, 0x0039},
- {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
- {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
- {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT},
- {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT},
- {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH},
- {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW},
- {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH},
- {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW},
- {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR},
- {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
- {0xa0, 0x07, ZC3XX_R012_VIDEOCONTROLFUNC},
- {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW},
- {0xa0, 0x00, ZC3XX_R09A_WINXSTARTLOW},
- {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW},
- {0xa0, 0x00, ZC3XX_R11C_FIRSTXLOW},
- {0xa0, 0xdc, ZC3XX_R08B_I2CDEVICEADDR},
- {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
- {0xa0, 0x01, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x01, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x06, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x08, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x83, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x04, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x01, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x04, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x08, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x06, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x02, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x11, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x03, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0xe5, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x01, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x04, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x85, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x02, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x07, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x02, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x30, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x20, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x51, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x35, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x7F, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x50, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x30, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x05, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x31, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x00, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x58, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x78, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x62, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x11, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x04, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x2B, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x7f, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x2C, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x7F, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x2D, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x7f, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x2e, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x7f, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x10, ZC3XX_R087_EXPTIMEMID},
- {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION},
- {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
- {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
- {0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
- {0xa0, 0x09, 0x01ad},
- {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE},
- {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
- {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
- {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
- {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN},
- {0xa0, 0x6c, ZC3XX_R18D_YTARGET},
- {0xa0, 0x61, ZC3XX_R116_RGAIN},
- {0xa0, 0x65, ZC3XX_R118_BGAIN},
- {0xa0, 0x09, 0x01ad},
- {0xa0, 0x15, 0x01ae},
- {0xa0, 0x4c, ZC3XX_R10A_RGB00}, /* matrix */
- {0xa0, 0xf1, ZC3XX_R10B_RGB01},
- {0xa0, 0x03, ZC3XX_R10C_RGB02},
- {0xa0, 0xfe, ZC3XX_R10D_RGB10},
- {0xa0, 0x51, ZC3XX_R10E_RGB11},
- {0xa0, 0xf1, ZC3XX_R10F_RGB12},
- {0xa0, 0xec, ZC3XX_R110_RGB20},
- {0xa0, 0x03, ZC3XX_R111_RGB21},
- {0xa0, 0x51, ZC3XX_R112_RGB22},
- {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
- {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */
- {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */
- {0xa0, 0x38, ZC3XX_R120_GAMMA00}, /* gamma > 5 */
- {0xa0, 0x51, ZC3XX_R121_GAMMA01},
- {0xa0, 0x6e, ZC3XX_R122_GAMMA02},
- {0xa0, 0x8c, ZC3XX_R123_GAMMA03},
- {0xa0, 0xa2, ZC3XX_R124_GAMMA04},
- {0xa0, 0xb6, ZC3XX_R125_GAMMA05},
- {0xa0, 0xc8, ZC3XX_R126_GAMMA06},
- {0xa0, 0xd6, ZC3XX_R127_GAMMA07},
- {0xa0, 0xe2, ZC3XX_R128_GAMMA08},
- {0xa0, 0xed, ZC3XX_R129_GAMMA09},
- {0xa0, 0xf5, ZC3XX_R12A_GAMMA0A},
- {0xa0, 0xfc, ZC3XX_R12B_GAMMA0B},
- {0xa0, 0xff, ZC3XX_R12C_GAMMA0C},
- {0xa0, 0xff, ZC3XX_R12D_GAMMA0D},
- {0xa0, 0xff, ZC3XX_R12E_GAMMA0E},
- {0xa0, 0xff, ZC3XX_R12F_GAMMA0F},
- {0xa0, 0x12, ZC3XX_R130_GAMMA10},
- {0xa0, 0x1b, ZC3XX_R131_GAMMA11},
- {0xa0, 0x1d, ZC3XX_R132_GAMMA12},
- {0xa0, 0x1a, ZC3XX_R133_GAMMA13},
- {0xa0, 0x15, ZC3XX_R134_GAMMA14},
- {0xa0, 0x12, ZC3XX_R135_GAMMA15},
- {0xa0, 0x0f, ZC3XX_R136_GAMMA16},
- {0xa0, 0x0d, ZC3XX_R137_GAMMA17},
- {0xa0, 0x0b, ZC3XX_R138_GAMMA18},
- {0xa0, 0x09, ZC3XX_R139_GAMMA19},
- {0xa0, 0x07, ZC3XX_R13A_GAMMA1A},
- {0xa0, 0x05, ZC3XX_R13B_GAMMA1B},
- {0xa0, 0x00, ZC3XX_R13C_GAMMA1C},
- {0xa0, 0x00, ZC3XX_R13D_GAMMA1D},
- {0xa0, 0x00, ZC3XX_R13E_GAMMA1E},
- {0xa0, 0x01, ZC3XX_R13F_GAMMA1F},
- {0xa0, 0x4c, ZC3XX_R10A_RGB00}, /* matrix */
- {0xa0, 0xf1, ZC3XX_R10B_RGB01},
- {0xa0, 0x03, ZC3XX_R10C_RGB02},
- {0xa0, 0xfe, ZC3XX_R10D_RGB10},
- {0xa0, 0x51, ZC3XX_R10E_RGB11},
- {0xa0, 0xf1, ZC3XX_R10F_RGB12},
- {0xa0, 0xec, ZC3XX_R110_RGB20},
- {0xa0, 0x03, ZC3XX_R111_RGB21},
- {0xa0, 0x51, ZC3XX_R112_RGB22},
- {0xa0, 0x10, ZC3XX_R180_AUTOCORRECTENABLE},
- {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
- {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
- {0xa0, 0x05, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0x62, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x00, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x09, ZC3XX_R092_I2CADDRESSSELECT},
- {0xa0, 0xaa, ZC3XX_R093_I2CSETVALUE},
- {0xa0, 0x01, ZC3XX_R094_I2CWRITEACK},
- {0xa0, 0x01, ZC3XX_R090_I2CCOMMAND},
- {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
- {0xa0, 0x03, ZC3XX_R191_EXPOSURELIMITMID},
- {0xa0, 0x9b, ZC3XX_R192_EXPOSURELIMITLOW},
- {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
- {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID},
- {0xa0, 0x47, ZC3XX_R197_ANTIFLICKERLOW},
- {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE},
- {0xa0, 0x1c, ZC3XX_R18F_AEUNFREEZE},
- {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF},
- {0xa0, 0x66, ZC3XX_R1AA_DIGITALGAINSTEP},
- {0xa0, 0x62, ZC3XX_R01D_HSYNC_0},
- {0xa0, 0x90, ZC3XX_R01E_HSYNC_1},
- {0xa0, 0xc8, ZC3XX_R01F_HSYNC_2},
- {0xa0, 0xff, ZC3XX_R020_HSYNC_3},
- {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN},
- {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
- {0xa0, 0x09, 0x01ad},
- {0xa0, 0x15, 0x01ae},
- {0xa0, 0x40, ZC3XX_R180_AUTOCORRECTENABLE},
- {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
- {0xa0, 0x30, 0x0007},
- {0xa0, 0x02, ZC3XX_R008_CLOCKSETTING},
- {0xa0, 0x00, 0x0007},
- {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
- {}
-};
-
-static const struct usb_action tas5130cxx_InitialScale[] = { /* 320x240 */
+static const struct usb_action tas5130c_InitialScale[] = { /* 320x240 */
{0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
{0xa0, 0x50, ZC3XX_R002_CLOCKSELECT},
{0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
@@ -5377,7 +5223,7 @@ static const struct usb_action tas5130cxx_InitialScale[] = { /* 320x240 */
{0xa0, 0x02, ZC3XX_R0A6_EXPOSUREBLACKLVL},
{}
};
-static const struct usb_action tas5130cxx_Initial[] = { /* 640x480 */
+static const struct usb_action tas5130c_Initial[] = { /* 640x480 */
{0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
{0xa0, 0x40, ZC3XX_R002_CLOCKSELECT},
{0xa0, 0x00, ZC3XX_R008_CLOCKSETTING},
@@ -5413,7 +5259,7 @@ static const struct usb_action tas5130cxx_Initial[] = { /* 640x480 */
{0xa0, 0x02, ZC3XX_R0A6_EXPOSUREBLACKLVL},
{}
};
-static const struct usb_action tas5130cxx_50HZ[] = {
+static const struct usb_action tas5130c_50HZ[] = {
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
{0xaa, 0xa3, 0x0001}, /* 00,a3,01,aa */
{0xaa, 0xa4, 0x0063}, /* 00,a4,63,aa */
@@ -5438,7 +5284,7 @@ static const struct usb_action tas5130cxx_50HZ[] = {
{0xa0, 0x50, ZC3XX_R11D_GLOBALGAIN},
{}
};
-static const struct usb_action tas5130cxx_50HZScale[] = {
+static const struct usb_action tas5130c_50HZScale[] = {
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
{0xaa, 0xa3, 0x0001}, /* 00,a3,01,aa */
{0xaa, 0xa4, 0x0077}, /* 00,a4,77,aa */
@@ -5463,7 +5309,7 @@ static const struct usb_action tas5130cxx_50HZScale[] = {
{0xa0, 0x50, ZC3XX_R11D_GLOBALGAIN},
{}
};
-static const struct usb_action tas5130cxx_60HZ[] = {
+static const struct usb_action tas5130c_60HZ[] = {
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
{0xaa, 0xa3, 0x0001}, /* 00,a3,01,aa */
{0xaa, 0xa4, 0x0036}, /* 00,a4,36,aa */
@@ -5488,7 +5334,7 @@ static const struct usb_action tas5130cxx_60HZ[] = {
{0xa0, 0x50, ZC3XX_R11D_GLOBALGAIN},
{}
};
-static const struct usb_action tas5130cxx_60HZScale[] = {
+static const struct usb_action tas5130c_60HZScale[] = {
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
{0xaa, 0xa3, 0x0001}, /* 00,a3,01,aa */
{0xaa, 0xa4, 0x0077}, /* 00,a4,77,aa */
@@ -5513,7 +5359,7 @@ static const struct usb_action tas5130cxx_60HZScale[] = {
{0xa0, 0x50, ZC3XX_R11D_GLOBALGAIN},
{}
};
-static const struct usb_action tas5130cxx_NoFliker[] = {
+static const struct usb_action tas5130c_NoFliker[] = {
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
{0xaa, 0xa3, 0x0001}, /* 00,a3,01,aa */
{0xaa, 0xa4, 0x0040}, /* 00,a4,40,aa */
@@ -5539,7 +5385,7 @@ static const struct usb_action tas5130cxx_NoFliker[] = {
{}
};
-static const struct usb_action tas5130cxx_NoFlikerScale[] = {
+static const struct usb_action tas5130c_NoFlikerScale[] = {
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
{0xaa, 0xa3, 0x0001}, /* 00,a3,01,aa */
{0xaa, 0xa4, 0x0090}, /* 00,a4,90,aa */
@@ -5840,13 +5686,22 @@ static const struct usb_action tas5130c_vf0250_NoFliker[] = {
static u8 reg_r_i(struct gspca_dev *gspca_dev,
u16 index)
{
- usb_control_msg(gspca_dev->dev,
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return 0;
+ ret = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
0xa1,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x01, /* value */
index, gspca_dev->usb_buf, 1,
500);
+ if (ret < 0) {
+ PDEBUG(D_ERR, "reg_r_i err %d", ret);
+ gspca_dev->usb_err = ret;
+ return 0;
+ }
return gspca_dev->usb_buf[0];
}
@@ -5860,24 +5715,32 @@ static u8 reg_r(struct gspca_dev *gspca_dev,
return ret;
}
-static void reg_w_i(struct usb_device *dev,
+static void reg_w_i(struct gspca_dev *gspca_dev,
u8 value,
u16 index)
{
- usb_control_msg(dev,
- usb_sndctrlpipe(dev, 0),
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+ ret = usb_control_msg(gspca_dev->dev,
+ usb_sndctrlpipe(gspca_dev->dev, 0),
0xa0,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0,
500);
+ if (ret < 0) {
+ PDEBUG(D_ERR, "reg_w_i err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
-static void reg_w(struct usb_device *dev,
+static void reg_w(struct gspca_dev *gspca_dev,
u8 value,
u16 index)
{
PDEBUG(D_USBO, "reg w [%04x] = %02x", index, value);
- reg_w_i(dev, value, index);
+ reg_w_i(gspca_dev, value, index);
}
static u16 i2c_read(struct gspca_dev *gspca_dev,
@@ -5886,8 +5749,10 @@ static u16 i2c_read(struct gspca_dev *gspca_dev,
u8 retbyte;
u16 retval;
- reg_w_i(gspca_dev->dev, reg, 0x0092);
- reg_w_i(gspca_dev->dev, 0x02, 0x0090); /* <- read command */
+ if (gspca_dev->usb_err < 0)
+ return 0;
+ reg_w_i(gspca_dev, reg, 0x0092);
+ reg_w_i(gspca_dev, 0x02, 0x0090); /* <- read command */
msleep(20);
retbyte = reg_r_i(gspca_dev, 0x0091); /* read status */
if (retbyte != 0x00)
@@ -5906,10 +5771,12 @@ static u8 i2c_write(struct gspca_dev *gspca_dev,
{
u8 retbyte;
- reg_w_i(gspca_dev->dev, reg, 0x92);
- reg_w_i(gspca_dev->dev, valL, 0x93);
- reg_w_i(gspca_dev->dev, valH, 0x94);
- reg_w_i(gspca_dev->dev, 0x01, 0x90); /* <- write command */
+ if (gspca_dev->usb_err < 0)
+ return 0;
+ reg_w_i(gspca_dev, reg, 0x92);
+ reg_w_i(gspca_dev, valL, 0x93);
+ reg_w_i(gspca_dev, valH, 0x94);
+ reg_w_i(gspca_dev, 0x01, 0x90); /* <- write command */
msleep(1);
retbyte = reg_r_i(gspca_dev, 0x0091); /* read status */
if (retbyte != 0x00)
@@ -5925,7 +5792,7 @@ static void usb_exchange(struct gspca_dev *gspca_dev,
while (action->req) {
switch (action->req) {
case 0xa0: /* write register */
- reg_w(gspca_dev->dev, action->val, action->idx);
+ reg_w(gspca_dev, action->val, action->idx);
break;
case 0xa1: /* read status */
reg_r(gspca_dev, action->idx);
@@ -5974,38 +5841,37 @@ static void setmatrix(struct gspca_dev *gspca_dev)
static const u8 vf0250_matrix[9] =
{0x7b, 0xea, 0xea, 0xea, 0x7b, 0xea, 0xea, 0xea, 0x7b};
static const u8 *matrix_tb[SENSOR_MAX] = {
- adcm2700_matrix, /* SENSOR_ADCM2700 0 */
- ov7620_matrix, /* SENSOR_CS2102 1 */
- NULL, /* SENSOR_CS2102K 2 */
- gc0305_matrix, /* SENSOR_GC0305 3 */
- NULL, /* SENSOR_HDCS2020b 4 */
- NULL, /* SENSOR_HV7131B 5 */
- NULL, /* SENSOR_HV7131C 6 */
- NULL, /* SENSOR_ICM105A 7 */
- NULL, /* SENSOR_MC501CB 8 */
- gc0305_matrix, /* SENSOR_MI0360SOC 9 */
- ov7620_matrix, /* SENSOR_OV7620 10 */
- NULL, /* SENSOR_OV7630C 11 */
- NULL, /* SENSOR_PAS106 12 */
- pas202b_matrix, /* SENSOR_PAS202B 13 */
- gc0305_matrix, /* SENSOR_PB0330 14 */
- po2030_matrix, /* SENSOR_PO2030 15 */
- NULL, /* SENSOR_TAS5130CK 16 */
- tas5130c_matrix, /* SENSOR_TAS5130CXX 17 */
- vf0250_matrix, /* SENSOR_TAS5130C_VF0250 18 */
+ [SENSOR_ADCM2700] = adcm2700_matrix,
+ [SENSOR_CS2102] = ov7620_matrix,
+ [SENSOR_CS2102K] = NULL,
+ [SENSOR_GC0305] = gc0305_matrix,
+ [SENSOR_HDCS2020b] = NULL,
+ [SENSOR_HV7131B] = NULL,
+ [SENSOR_HV7131R] = NULL,
+ [SENSOR_ICM105A] = po2030_matrix,
+ [SENSOR_MC501CB] = NULL,
+ [SENSOR_MT9V111_1] = gc0305_matrix,
+ [SENSOR_MT9V111_3] = gc0305_matrix,
+ [SENSOR_OV7620] = ov7620_matrix,
+ [SENSOR_OV7630C] = NULL,
+ [SENSOR_PAS106] = NULL,
+ [SENSOR_PAS202B] = pas202b_matrix,
+ [SENSOR_PB0330] = gc0305_matrix,
+ [SENSOR_PO2030] = po2030_matrix,
+ [SENSOR_TAS5130C] = tas5130c_matrix,
+ [SENSOR_TAS5130C_VF0250] = vf0250_matrix,
};
matrix = matrix_tb[sd->sensor];
if (matrix == NULL)
return; /* matrix already loaded */
for (i = 0; i < ARRAY_SIZE(ov7620_matrix); i++)
- reg_w(gspca_dev->dev, matrix[i], 0x010a + i);
+ reg_w(gspca_dev, matrix[i], 0x010a + i);
}
static void setsharpness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
int sharpness;
static const u8 sharpness_tb[][2] = {
{0x02, 0x03},
@@ -6015,17 +5881,16 @@ static void setsharpness(struct gspca_dev *gspca_dev)
};
sharpness = sd->sharpness;
- reg_w(dev, sharpness_tb[sharpness][0], 0x01c6);
+ reg_w(gspca_dev, sharpness_tb[sharpness][0], 0x01c6);
reg_r(gspca_dev, 0x01c8);
reg_r(gspca_dev, 0x01c9);
reg_r(gspca_dev, 0x01ca);
- reg_w(dev, sharpness_tb[sharpness][1], 0x01cb);
+ reg_w(gspca_dev, sharpness_tb[sharpness][1], 0x01cb);
}
static void setcontrast(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
const u8 *Tgamma;
int g, i, brightness, contrast, adj, gp1, gp2;
u8 gr[16];
@@ -6063,7 +5928,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
g = 0xff;
else if (g < 0)
g = 0;
- reg_w(dev, g, 0x0120 + i); /* gamma */
+ reg_w(gspca_dev, g, 0x0120 + i); /* gamma */
if (contrast > 0)
adj--;
else if (contrast < 0)
@@ -6077,13 +5942,12 @@ static void setcontrast(struct gspca_dev *gspca_dev)
}
gr[15] = (0xff - gp2) / 2;
for (i = 0; i < 16; i++)
- reg_w(dev, gr[i], 0x0130 + i); /* gradient */
+ reg_w(gspca_dev, gr[i], 0x0130 + i); /* gradient */
}
static void setquality(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
u8 frxt;
switch (sd->sensor) {
@@ -6096,9 +5960,9 @@ static void setquality(struct gspca_dev *gspca_dev)
return;
}
/*fixme: is it really 0008 0007 0018 for all other sensors? */
- reg_w(dev, QUANT_VAL, 0x0008);
+ reg_w(gspca_dev, QUANT_VAL, 0x0008);
frxt = 0x30;
- reg_w(dev, frxt, 0x0007);
+ reg_w(gspca_dev, frxt, 0x0007);
#if QUANT_VAL == 0 || QUANT_VAL == 1 || QUANT_VAL == 2
frxt = 0xff;
#elif QUANT_VAL == 3
@@ -6108,7 +5972,7 @@ static void setquality(struct gspca_dev *gspca_dev)
#else
frxt = 0x20;
#endif
- reg_w(dev, frxt, 0x0018);
+ reg_w(gspca_dev, frxt, 0x0018);
}
/* Matches the sensor's internal frame rate to the lighting frequency.
@@ -6116,87 +5980,86 @@ static void setquality(struct gspca_dev *gspca_dev)
* 50Hz, for European and Asian lighting (default)
* 60Hz, for American lighting
* 0 = No Fliker (for outdoore usage)
- * Returns: 0 for success
*/
-static int setlightfreq(struct gspca_dev *gspca_dev)
+static void setlightfreq(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int i, mode;
const struct usb_action *zc3_freq;
static const struct usb_action *freq_tb[SENSOR_MAX][6] = {
-/* SENSOR_ADCM2700 0 */
+ [SENSOR_ADCM2700] =
{adcm2700_NoFliker, adcm2700_NoFliker,
adcm2700_50HZ, adcm2700_50HZ,
adcm2700_60HZ, adcm2700_60HZ},
-/* SENSOR_CS2102 1 */
+ [SENSOR_CS2102] =
{cs2102_NoFliker, cs2102_NoFlikerScale,
cs2102_50HZ, cs2102_50HZScale,
cs2102_60HZ, cs2102_60HZScale},
-/* SENSOR_CS2102K 2 */
+ [SENSOR_CS2102K] =
{cs2102_NoFliker, cs2102_NoFlikerScale,
NULL, NULL, /* currently disabled */
NULL, NULL},
-/* SENSOR_GC0305 3 */
+ [SENSOR_GC0305] =
{gc0305_NoFliker, gc0305_NoFliker,
gc0305_50HZ, gc0305_50HZ,
gc0305_60HZ, gc0305_60HZ},
-/* SENSOR_HDCS2020b 4 */
+ [SENSOR_HDCS2020b] =
{hdcs2020b_NoFliker, hdcs2020b_NoFliker,
hdcs2020b_50HZ, hdcs2020b_50HZ,
hdcs2020b_60HZ, hdcs2020b_60HZ},
-/* SENSOR_HV7131B 5 */
+ [SENSOR_HV7131B] =
{hv7131b_NoFliker, hv7131b_NoFlikerScale,
hv7131b_50HZ, hv7131b_50HZScale,
hv7131b_60HZ, hv7131b_60HZScale},
-/* SENSOR_HV7131C 6 */
- {NULL, NULL,
- NULL, NULL,
- NULL, NULL},
-/* SENSOR_ICM105A 7 */
+ [SENSOR_HV7131R] =
+ {hv7131r_NoFliker, hv7131r_NoFlikerScale,
+ hv7131r_50HZ, hv7131r_50HZScale,
+ hv7131r_60HZ, hv7131r_60HZScale},
+ [SENSOR_ICM105A] =
{icm105a_NoFliker, icm105a_NoFlikerScale,
icm105a_50HZ, icm105a_50HZScale,
icm105a_60HZ, icm105a_60HZScale},
-/* SENSOR_MC501CB 8 */
+ [SENSOR_MC501CB] =
{mc501cb_NoFliker, mc501cb_NoFlikerScale,
mc501cb_50HZ, mc501cb_50HZScale,
mc501cb_60HZ, mc501cb_60HZScale},
-/* SENSOR_MI0360SOC 9 */
- {mi360soc_AENoFliker, mi360soc_AENoFlikerScale,
- mi360soc_AE50HZ, mi360soc_AE50HZScale,
- mi360soc_AE60HZ, mi360soc_AE60HZScale},
-/* SENSOR_OV7620 10 */
+ [SENSOR_MT9V111_1] =
+ {mt9v111_1_AENoFliker, mt9v111_1_AENoFlikerScale,
+ mt9v111_1_AE50HZ, mt9v111_1_AE50HZScale,
+ mt9v111_1_AE60HZ, mt9v111_1_AE60HZScale},
+ [SENSOR_MT9V111_3] =
+ {mt9v111_3_AENoFliker, mt9v111_3_AENoFlikerScale,
+ mt9v111_3_AE50HZ, mt9v111_3_AE50HZScale,
+ mt9v111_3_AE60HZ, mt9v111_3_AE60HZScale},
+ [SENSOR_OV7620] =
{ov7620_NoFliker, ov7620_NoFliker,
ov7620_50HZ, ov7620_50HZ,
ov7620_60HZ, ov7620_60HZ},
-/* SENSOR_OV7630C 11 */
+ [SENSOR_OV7630C] =
{NULL, NULL,
NULL, NULL,
NULL, NULL},
-/* SENSOR_PAS106 12 */
+ [SENSOR_PAS106] =
{pas106b_NoFliker, pas106b_NoFliker,
pas106b_50HZ, pas106b_50HZ,
pas106b_60HZ, pas106b_60HZ},
-/* SENSOR_PAS202B 13 */
+ [SENSOR_PAS202B] =
{pas202b_NoFliker, pas202b_NoFlikerScale,
pas202b_50HZ, pas202b_50HZScale,
pas202b_60HZ, pas202b_60HZScale},
-/* SENSOR_PB0330 14 */
+ [SENSOR_PB0330] =
{pb0330_NoFliker, pb0330_NoFlikerScale,
pb0330_50HZ, pb0330_50HZScale,
pb0330_60HZ, pb0330_60HZScale},
-/* SENSOR_PO2030 15 */
+ [SENSOR_PO2030] =
{po2030_NoFliker, po2030_NoFliker,
po2030_50HZ, po2030_50HZ,
po2030_60HZ, po2030_60HZ},
-/* SENSOR_TAS5130CK 16 */
- {tas5130cxx_NoFliker, tas5130cxx_NoFlikerScale,
- tas5130cxx_50HZ, tas5130cxx_50HZScale,
- tas5130cxx_60HZ, tas5130cxx_60HZScale},
-/* SENSOR_TAS5130CXX 17 */
- {tas5130cxx_NoFliker, tas5130cxx_NoFlikerScale,
- tas5130cxx_50HZ, tas5130cxx_50HZScale,
- tas5130cxx_60HZ, tas5130cxx_60HZScale},
-/* SENSOR_TAS5130C_VF0250 18 */
+ [SENSOR_TAS5130C] =
+ {tas5130c_NoFliker, tas5130c_NoFlikerScale,
+ tas5130c_50HZ, tas5130c_50HZScale,
+ tas5130c_60HZ, tas5130c_60HZScale},
+ [SENSOR_TAS5130C_VF0250] =
{tas5130c_vf0250_NoFliker, tas5130c_vf0250_NoFlikerScale,
tas5130c_vf0250_50HZ, tas5130c_vf0250_50HZScale,
tas5130c_vf0250_60HZ, tas5130c_vf0250_60HZScale},
@@ -6207,29 +6070,28 @@ static int setlightfreq(struct gspca_dev *gspca_dev)
if (mode)
i++; /* 320x240 */
zc3_freq = freq_tb[sd->sensor][i];
- if (zc3_freq != NULL) {
- usb_exchange(gspca_dev, zc3_freq);
- switch (sd->sensor) {
- case SENSOR_GC0305:
- if (mode /* if 320x240 */
- && sd->lightfreq == 1) /* and 50Hz */
- reg_w(gspca_dev->dev, 0x85, 0x018d);
- /* win: 0x80, 0x018d */
- break;
- case SENSOR_OV7620:
- if (!mode) { /* if 640x480 */
- if (sd->lightfreq != 0) /* and 50 or 60 Hz */
- reg_w(gspca_dev->dev, 0x40, 0x0002);
- else
- reg_w(gspca_dev->dev, 0x44, 0x0002);
- }
- break;
- case SENSOR_PAS202B:
- reg_w(gspca_dev->dev, 0x00, 0x01a7);
- break;
+ if (zc3_freq == NULL)
+ return;
+ usb_exchange(gspca_dev, zc3_freq);
+ switch (sd->sensor) {
+ case SENSOR_GC0305:
+ if (mode /* if 320x240 */
+ && sd->lightfreq == 1) /* and 50Hz */
+ reg_w(gspca_dev, 0x85, 0x018d);
+ /* win: 0x80, 0x018d */
+ break;
+ case SENSOR_OV7620:
+ if (!mode) { /* if 640x480 */
+ if (sd->lightfreq != 0) /* and 50 or 60 Hz */
+ reg_w(gspca_dev, 0x40, 0x0002);
+ else
+ reg_w(gspca_dev, 0x44, 0x0002);
}
+ break;
+ case SENSOR_PAS202B:
+ reg_w(gspca_dev, 0x00, 0x01a7);
+ break;
}
- return 0;
}
static void setautogain(struct gspca_dev *gspca_dev)
@@ -6241,45 +6103,46 @@ static void setautogain(struct gspca_dev *gspca_dev)
autoval = 0x42;
else
autoval = 0x02;
- reg_w(gspca_dev->dev, autoval, 0x0180);
+ reg_w(gspca_dev, autoval, 0x0180);
}
-static void send_unknown(struct usb_device *dev, int sensor)
+static void send_unknown(struct gspca_dev *gspca_dev, int sensor)
{
- reg_w(dev, 0x01, 0x0000); /* led off */
+ reg_w(gspca_dev, 0x01, 0x0000); /* led off */
switch (sensor) {
case SENSOR_PAS106:
- reg_w(dev, 0x03, 0x003a);
- reg_w(dev, 0x0c, 0x003b);
- reg_w(dev, 0x08, 0x0038);
+ reg_w(gspca_dev, 0x03, 0x003a);
+ reg_w(gspca_dev, 0x0c, 0x003b);
+ reg_w(gspca_dev, 0x08, 0x0038);
break;
case SENSOR_ADCM2700:
case SENSOR_GC0305:
case SENSOR_OV7620:
- case SENSOR_MI0360SOC:
+ case SENSOR_MT9V111_1:
+ case SENSOR_MT9V111_3:
case SENSOR_PB0330:
case SENSOR_PO2030:
- reg_w(dev, 0x0d, 0x003a);
- reg_w(dev, 0x02, 0x003b);
- reg_w(dev, 0x00, 0x0038);
+ reg_w(gspca_dev, 0x0d, 0x003a);
+ reg_w(gspca_dev, 0x02, 0x003b);
+ reg_w(gspca_dev, 0x00, 0x0038);
break;
case SENSOR_PAS202B:
- reg_w(dev, 0x03, 0x003b);
- reg_w(dev, 0x0c, 0x003a);
- reg_w(dev, 0x0b, 0x0039);
- reg_w(dev, 0x0b, 0x0038);
+ reg_w(gspca_dev, 0x03, 0x003b);
+ reg_w(gspca_dev, 0x0c, 0x003a);
+ reg_w(gspca_dev, 0x0b, 0x0039);
+ reg_w(gspca_dev, 0x0b, 0x0038);
break;
}
}
/* start probe 2 wires */
-static void start_2wr_probe(struct usb_device *dev, int sensor)
+static void start_2wr_probe(struct gspca_dev *gspca_dev, int sensor)
{
- reg_w(dev, 0x01, 0x0000);
- reg_w(dev, sensor, 0x0010);
- reg_w(dev, 0x01, 0x0001);
- reg_w(dev, 0x03, 0x0012);
- reg_w(dev, 0x01, 0x0012);
+ reg_w(gspca_dev, 0x01, 0x0000);
+ reg_w(gspca_dev, sensor, 0x0010);
+ reg_w(gspca_dev, 0x01, 0x0001);
+ reg_w(gspca_dev, 0x03, 0x0012);
+ reg_w(gspca_dev, 0x01, 0x0012);
/* msleep(2); */
}
@@ -6287,14 +6150,14 @@ static int sif_probe(struct gspca_dev *gspca_dev)
{
u16 checkword;
- start_2wr_probe(gspca_dev->dev, 0x0f); /* PAS106 */
- reg_w(gspca_dev->dev, 0x08, 0x008d);
+ start_2wr_probe(gspca_dev, 0x0f); /* PAS106 */
+ reg_w(gspca_dev, 0x08, 0x008d);
msleep(150);
checkword = ((i2c_read(gspca_dev, 0x00) & 0x0f) << 4)
| ((i2c_read(gspca_dev, 0x01) & 0xf0) >> 4);
PDEBUG(D_PROBE, "probe sif 0x%04x", checkword);
if (checkword == 0x0007) {
- send_unknown(gspca_dev->dev, SENSOR_PAS106);
+ send_unknown(gspca_dev, SENSOR_PAS106);
return 0x0f; /* PAS106 */
}
return -1;
@@ -6302,23 +6165,22 @@ static int sif_probe(struct gspca_dev *gspca_dev)
static int vga_2wr_probe(struct gspca_dev *gspca_dev)
{
- struct usb_device *dev = gspca_dev->dev;
u16 retword;
- start_2wr_probe(dev, 0x00); /* HV7131B */
+ start_2wr_probe(gspca_dev, 0x00); /* HV7131B */
i2c_write(gspca_dev, 0x01, 0xaa, 0x00);
retword = i2c_read(gspca_dev, 0x01);
if (retword != 0)
return 0x00; /* HV7131B */
- start_2wr_probe(dev, 0x04); /* CS2102 */
+ start_2wr_probe(gspca_dev, 0x04); /* CS2102 */
i2c_write(gspca_dev, 0x01, 0xaa, 0x00);
retword = i2c_read(gspca_dev, 0x01);
if (retword != 0)
return 0x04; /* CS2102 */
- start_2wr_probe(dev, 0x06); /* OmniVision */
- reg_w(dev, 0x08, 0x008d);
+ start_2wr_probe(gspca_dev, 0x06); /* OmniVision */
+ reg_w(gspca_dev, 0x08, 0x008d);
i2c_write(gspca_dev, 0x11, 0xaa, 0x00);
retword = i2c_read(gspca_dev, 0x11);
if (retword != 0) {
@@ -6327,14 +6189,14 @@ static int vga_2wr_probe(struct gspca_dev *gspca_dev)
goto ov_check;
}
- start_2wr_probe(dev, 0x08); /* HDCS2020 */
+ start_2wr_probe(gspca_dev, 0x08); /* HDCS2020 */
i2c_write(gspca_dev, 0x1c, 0x00, 0x00);
i2c_write(gspca_dev, 0x15, 0xaa, 0x00);
retword = i2c_read(gspca_dev, 0x15);
if (retword != 0)
return 0x08; /* HDCS2020 */
- start_2wr_probe(dev, 0x0a); /* PB0330 */
+ start_2wr_probe(gspca_dev, 0x0a); /* PB0330 */
i2c_write(gspca_dev, 0x07, 0xaa, 0xaa);
retword = i2c_read(gspca_dev, 0x07);
if (retword != 0)
@@ -6346,23 +6208,23 @@ static int vga_2wr_probe(struct gspca_dev *gspca_dev)
if (retword != 0)
return 0x0a; /* PB0330 ?? */
- start_2wr_probe(dev, 0x0c); /* ICM105A */
+ start_2wr_probe(gspca_dev, 0x0c); /* ICM105A */
i2c_write(gspca_dev, 0x01, 0x11, 0x00);
retword = i2c_read(gspca_dev, 0x01);
if (retword != 0)
return 0x0c; /* ICM105A */
- start_2wr_probe(dev, 0x0e); /* PAS202BCB */
- reg_w(dev, 0x08, 0x008d);
+ start_2wr_probe(gspca_dev, 0x0e); /* PAS202BCB */
+ reg_w(gspca_dev, 0x08, 0x008d);
i2c_write(gspca_dev, 0x03, 0xaa, 0x00);
msleep(50);
retword = i2c_read(gspca_dev, 0x03);
if (retword != 0) {
- send_unknown(dev, SENSOR_PAS202B);
+ send_unknown(gspca_dev, SENSOR_PAS202B);
return 0x0e; /* PAS202BCB */
}
- start_2wr_probe(dev, 0x02); /* TAS5130C */
+ start_2wr_probe(gspca_dev, 0x02); /* TAS5130C */
i2c_write(gspca_dev, 0x01, 0xaa, 0x00);
retword = i2c_read(gspca_dev, 0x01);
if (retword != 0)
@@ -6371,20 +6233,20 @@ ov_check:
reg_r(gspca_dev, 0x0010); /* ?? */
reg_r(gspca_dev, 0x0010);
- reg_w(dev, 0x01, 0x0000);
- reg_w(dev, 0x01, 0x0001);
- reg_w(dev, 0x06, 0x0010); /* OmniVision */
- reg_w(dev, 0xa1, 0x008b);
- reg_w(dev, 0x08, 0x008d);
+ reg_w(gspca_dev, 0x01, 0x0000);
+ reg_w(gspca_dev, 0x01, 0x0001);
+ reg_w(gspca_dev, 0x06, 0x0010); /* OmniVision */
+ reg_w(gspca_dev, 0xa1, 0x008b);
+ reg_w(gspca_dev, 0x08, 0x008d);
msleep(500);
- reg_w(dev, 0x01, 0x0012);
+ reg_w(gspca_dev, 0x01, 0x0012);
i2c_write(gspca_dev, 0x12, 0x80, 0x00); /* sensor reset */
retword = i2c_read(gspca_dev, 0x0a) << 8;
retword |= i2c_read(gspca_dev, 0x0b);
PDEBUG(D_PROBE, "probe 2wr ov vga 0x%04x", retword);
switch (retword) {
case 0x7631: /* OV7630C */
- reg_w(dev, 0x06, 0x0010);
+ reg_w(gspca_dev, 0x06, 0x0010);
break;
case 0x7620: /* OV7620 */
case 0x7648: /* OV7648 */
@@ -6401,32 +6263,31 @@ struct sensor_by_chipset_revision {
};
static const struct sensor_by_chipset_revision chipset_revision_sensor[] = {
{0xc000, 0x12}, /* TAS5130C */
- {0xc001, 0x13}, /* MI0360SOC */
+ {0xc001, 0x13}, /* MT9V111 */
{0xe001, 0x13},
{0x8001, 0x13},
{0x8000, 0x14}, /* CS2102K */
- {0x8400, 0x15}, /* TAS5130K */
+ {0x8400, 0x15}, /* MT9V111 */
{0xe400, 0x15},
};
static int vga_3wr_probe(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
int i;
u8 retbyte;
u16 retword;
/*fixme: lack of 8b=b3 (11,12)-> 10, 8b=e0 (14,15,16)-> 12 found in gspcav1*/
- reg_w(dev, 0x02, 0x0010);
+ reg_w(gspca_dev, 0x02, 0x0010);
reg_r(gspca_dev, 0x0010);
- reg_w(dev, 0x01, 0x0000);
- reg_w(dev, 0x00, 0x0010);
- reg_w(dev, 0x01, 0x0001);
- reg_w(dev, 0x91, 0x008b);
- reg_w(dev, 0x03, 0x0012);
- reg_w(dev, 0x01, 0x0012);
- reg_w(dev, 0x05, 0x0012);
+ reg_w(gspca_dev, 0x01, 0x0000);
+ reg_w(gspca_dev, 0x00, 0x0010);
+ reg_w(gspca_dev, 0x01, 0x0001);
+ reg_w(gspca_dev, 0x91, 0x008b);
+ reg_w(gspca_dev, 0x03, 0x0012);
+ reg_w(gspca_dev, 0x01, 0x0012);
+ reg_w(gspca_dev, 0x05, 0x0012);
retword = i2c_read(gspca_dev, 0x14);
if (retword != 0)
return 0x11; /* HV7131R */
@@ -6437,93 +6298,90 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev)
if (retword != 0)
return 0x11; /* HV7131R */
- reg_w(dev, 0x02, 0x0010);
+ reg_w(gspca_dev, 0x02, 0x0010);
retword = reg_r(gspca_dev, 0x000b) << 8;
retword |= reg_r(gspca_dev, 0x000a);
PDEBUG(D_PROBE, "probe 3wr vga 1 0x%04x", retword);
reg_r(gspca_dev, 0x0010);
- /* value 0x4001 is meaningless */
- if (retword != 0x4001) {
- if ((retword & 0xff00) == 0x6400)
- return 0x02; /* TAS5130C */
- for (i = 0; i < ARRAY_SIZE(chipset_revision_sensor); i++) {
- if (chipset_revision_sensor[i].revision == retword) {
- sd->chip_revision = retword;
- send_unknown(dev, SENSOR_PB0330);
- return chipset_revision_sensor[i]
- .internal_sensor_id;
- }
+ if ((retword & 0xff00) == 0x6400)
+ return 0x02; /* TAS5130C */
+ for (i = 0; i < ARRAY_SIZE(chipset_revision_sensor); i++) {
+ if (chipset_revision_sensor[i].revision == retword) {
+ sd->chip_revision = retword;
+ send_unknown(gspca_dev, SENSOR_PB0330);
+ return chipset_revision_sensor[i]
+ .internal_sensor_id;
}
}
- reg_w(dev, 0x01, 0x0000); /* check PB0330 */
- reg_w(dev, 0x01, 0x0001);
- reg_w(dev, 0xdd, 0x008b);
- reg_w(dev, 0x0a, 0x0010);
- reg_w(dev, 0x03, 0x0012);
- reg_w(dev, 0x01, 0x0012);
+ reg_w(gspca_dev, 0x01, 0x0000); /* check PB0330 */
+ reg_w(gspca_dev, 0x01, 0x0001);
+ reg_w(gspca_dev, 0xdd, 0x008b);
+ reg_w(gspca_dev, 0x0a, 0x0010);
+ reg_w(gspca_dev, 0x03, 0x0012);
+ reg_w(gspca_dev, 0x01, 0x0012);
retword = i2c_read(gspca_dev, 0x00);
if (retword != 0) {
- PDEBUG(D_PROBE, "probe 3wr vga type 0a ?");
+ PDEBUG(D_PROBE, "probe 3wr vga type 0a");
return 0x0a; /* PB0330 */
}
- reg_w(dev, 0x01, 0x0000);
- reg_w(dev, 0x01, 0x0001);
- reg_w(dev, 0x98, 0x008b);
- reg_w(dev, 0x01, 0x0010);
- reg_w(dev, 0x03, 0x0012);
+ reg_w(gspca_dev, 0x01, 0x0000);
+ reg_w(gspca_dev, 0x01, 0x0001);
+ reg_w(gspca_dev, 0x98, 0x008b);
+ reg_w(gspca_dev, 0x01, 0x0010);
+ reg_w(gspca_dev, 0x03, 0x0012);
msleep(2);
- reg_w(dev, 0x01, 0x0012);
+ reg_w(gspca_dev, 0x01, 0x0012);
retword = i2c_read(gspca_dev, 0x00);
if (retword != 0) {
PDEBUG(D_PROBE, "probe 3wr vga type %02x", retword);
if (retword == 0x0011) /* VF0250 */
return 0x0250;
if (retword == 0x0029) /* gc0305 */
- send_unknown(dev, SENSOR_GC0305);
+ send_unknown(gspca_dev, SENSOR_GC0305);
return retword;
}
- reg_w(dev, 0x01, 0x0000); /* check OmniVision */
- reg_w(dev, 0x01, 0x0001);
- reg_w(dev, 0xa1, 0x008b);
- reg_w(dev, 0x08, 0x008d);
- reg_w(dev, 0x06, 0x0010);
- reg_w(dev, 0x01, 0x0012);
- reg_w(dev, 0x05, 0x0012);
+ reg_w(gspca_dev, 0x01, 0x0000); /* check OmniVision */
+ reg_w(gspca_dev, 0x01, 0x0001);
+ reg_w(gspca_dev, 0xa1, 0x008b);
+ reg_w(gspca_dev, 0x08, 0x008d);
+ reg_w(gspca_dev, 0x06, 0x0010);
+ reg_w(gspca_dev, 0x01, 0x0012);
+ reg_w(gspca_dev, 0x05, 0x0012);
if (i2c_read(gspca_dev, 0x1c) == 0x007f /* OV7610 - manufacturer ID */
&& i2c_read(gspca_dev, 0x1d) == 0x00a2) {
- send_unknown(dev, SENSOR_OV7620);
+ send_unknown(gspca_dev, SENSOR_OV7620);
return 0x06; /* OmniVision confirm ? */
}
- reg_w(dev, 0x01, 0x0000);
- reg_w(dev, 0x00, 0x0002);
- reg_w(dev, 0x01, 0x0010);
- reg_w(dev, 0x01, 0x0001);
- reg_w(dev, 0xee, 0x008b);
- reg_w(dev, 0x03, 0x0012);
- reg_w(dev, 0x01, 0x0012);
- reg_w(dev, 0x05, 0x0012);
+ reg_w(gspca_dev, 0x01, 0x0000);
+ reg_w(gspca_dev, 0x00, 0x0002);
+ reg_w(gspca_dev, 0x01, 0x0010);
+ reg_w(gspca_dev, 0x01, 0x0001);
+ reg_w(gspca_dev, 0xee, 0x008b);
+ reg_w(gspca_dev, 0x03, 0x0012);
+ reg_w(gspca_dev, 0x01, 0x0012);
+ reg_w(gspca_dev, 0x05, 0x0012);
retword = i2c_read(gspca_dev, 0x00) << 8; /* ID 0 */
retword |= i2c_read(gspca_dev, 0x01); /* ID 1 */
PDEBUG(D_PROBE, "probe 3wr vga 2 0x%04x", retword);
if (retword == 0x2030) {
retbyte = i2c_read(gspca_dev, 0x02); /* revision number */
PDEBUG(D_PROBE, "sensor PO2030 rev 0x%02x", retbyte);
- send_unknown(dev, SENSOR_PO2030);
+ send_unknown(gspca_dev, SENSOR_PO2030);
return retword;
}
- reg_w(dev, 0x01, 0x0000);
- reg_w(dev, 0x0a, 0x0010);
- reg_w(dev, 0xd3, 0x008b);
- reg_w(dev, 0x01, 0x0001);
- reg_w(dev, 0x03, 0x0012);
- reg_w(dev, 0x01, 0x0012);
- reg_w(dev, 0x05, 0x0012);
- reg_w(dev, 0xd3, 0x008b);
+ reg_w(gspca_dev, 0x01, 0x0000);
+ reg_w(gspca_dev, 0x0a, 0x0010);
+ reg_w(gspca_dev, 0xd3, 0x008b);
+ reg_w(gspca_dev, 0x01, 0x0001);
+ reg_w(gspca_dev, 0x03, 0x0012);
+ reg_w(gspca_dev, 0x01, 0x0012);
+ reg_w(gspca_dev, 0x05, 0x0012);
+ reg_w(gspca_dev, 0xd3, 0x008b);
retword = i2c_read(gspca_dev, 0x01);
if (retword != 0) {
PDEBUG(D_PROBE, "probe 3wr vga type 0a ? ret: %04x", retword);
@@ -6560,54 +6418,74 @@ static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct sd *sd = (struct sd *) gspca_dev;
+
+ if (id->idProduct == 0x301b)
+ sd->bridge = BRIDGE_ZC301;
+ else
+ sd->bridge = BRIDGE_ZC303;
+
+ /* define some sensors from the vendor/product */
+ sd->sensor = id->driver_info;
+
+ sd->sharpness = SHARPNESS_DEF;
+ sd->brightness = BRIGHTNESS_DEF;
+ sd->contrast = CONTRAST_DEF;
+ sd->autogain = AUTOGAIN_DEF;
+ sd->lightfreq = FREQ_DEF;
+ sd->quality = QUALITY_DEF;
+
+ return 0;
+}
+
+/* this function is called at probe and resume time */
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
int sensor;
static const u8 gamma[SENSOR_MAX] = {
- 4, /* SENSOR_ADCM2700 0 */
- 4, /* SENSOR_CS2102 1 */
- 5, /* SENSOR_CS2102K 2 */
- 4, /* SENSOR_GC0305 3 */
- 4, /* SENSOR_HDCS2020b 4 */
- 4, /* SENSOR_HV7131B 5 */
- 4, /* SENSOR_HV7131C 6 */
- 4, /* SENSOR_ICM105A 7 */
- 4, /* SENSOR_MC501CB 8 */
- 4, /* SENSOR_MI0360SOC 9 */
- 3, /* SENSOR_OV7620 10 */
- 4, /* SENSOR_OV7630C 11 */
- 4, /* SENSOR_PAS106 12 */
- 4, /* SENSOR_PAS202B 13 */
- 4, /* SENSOR_PB0330 14 */
- 4, /* SENSOR_PO2030 15 */
- 4, /* SENSOR_TAS5130CK 16 */
- 3, /* SENSOR_TAS5130CXX 17 */
- 3, /* SENSOR_TAS5130C_VF0250 18 */
+ [SENSOR_ADCM2700] = 4,
+ [SENSOR_CS2102] = 4,
+ [SENSOR_CS2102K] = 5,
+ [SENSOR_GC0305] = 4,
+ [SENSOR_HDCS2020b] = 4,
+ [SENSOR_HV7131B] = 4,
+ [SENSOR_HV7131R] = 4,
+ [SENSOR_ICM105A] = 4,
+ [SENSOR_MC501CB] = 4,
+ [SENSOR_MT9V111_1] = 4,
+ [SENSOR_MT9V111_3] = 4,
+ [SENSOR_OV7620] = 3,
+ [SENSOR_OV7630C] = 4,
+ [SENSOR_PAS106] = 4,
+ [SENSOR_PAS202B] = 4,
+ [SENSOR_PB0330] = 4,
+ [SENSOR_PO2030] = 4,
+ [SENSOR_TAS5130C] = 3,
+ [SENSOR_TAS5130C_VF0250] = 3,
};
static const u8 mode_tb[SENSOR_MAX] = {
- 2, /* SENSOR_ADCM2700 0 */
- 1, /* SENSOR_CS2102 1 */
- 1, /* SENSOR_CS2102K 2 */
- 1, /* SENSOR_GC0305 3 */
- 1, /* SENSOR_HDCS2020b 4 */
- 1, /* SENSOR_HV7131B 5 */
- 1, /* SENSOR_HV7131C 6 */
- 1, /* SENSOR_ICM105A 7 */
- 2, /* SENSOR_MC501CB 8 */
- 1, /* SENSOR_MI0360SOC 9 */
- 2, /* SENSOR_OV7620 10 */
- 1, /* SENSOR_OV7630C 11 */
- 0, /* SENSOR_PAS106 12 */
- 1, /* SENSOR_PAS202B 13 */
- 1, /* SENSOR_PB0330 14 */
- 1, /* SENSOR_PO2030 15 */
- 1, /* SENSOR_TAS5130CK 16 */
- 1, /* SENSOR_TAS5130CXX 17 */
- 1, /* SENSOR_TAS5130C_VF0250 18 */
+ [SENSOR_ADCM2700] = 2,
+ [SENSOR_CS2102] = 1,
+ [SENSOR_CS2102K] = 1,
+ [SENSOR_GC0305] = 1,
+ [SENSOR_HDCS2020b] = 1,
+ [SENSOR_HV7131B] = 1,
+ [SENSOR_HV7131R] = 1,
+ [SENSOR_ICM105A] = 1,
+ [SENSOR_MC501CB] = 2,
+ [SENSOR_MT9V111_1] = 1,
+ [SENSOR_MT9V111_3] = 1,
+ [SENSOR_OV7620] = 2,
+ [SENSOR_OV7630C] = 1,
+ [SENSOR_PAS106] = 0,
+ [SENSOR_PAS202B] = 1,
+ [SENSOR_PB0330] = 1,
+ [SENSOR_PO2030] = 1,
+ [SENSOR_TAS5130C] = 1,
+ [SENSOR_TAS5130C_VF0250] = 1,
};
- /* define some sensors from the vendor/product */
- sd->sharpness = SHARPNESS_DEF;
- sd->sensor = id->driver_info;
sensor = zcxx_probeSensor(gspca_dev);
if (sensor >= 0)
PDEBUG(D_PROBE, "probe sensor -> %04x", sensor);
@@ -6626,8 +6504,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
break;
default:
PDEBUG(D_PROBE,
- "Sensor UNKNOWN_0 force Tas5130");
- sd->sensor = SENSOR_TAS5130CXX;
+ "Unknown sensor - set to TAS5130C");
+ sd->sensor = SENSOR_TAS5130C;
}
break;
case 0:
@@ -6642,14 +6520,14 @@ static int sd_config(struct gspca_dev *gspca_dev,
break;
default:
/* case 2: * hv7131r */
- PDEBUG(D_PROBE, "Find Sensor HV7131R(c)");
- sd->sensor = SENSOR_HV7131C;
+ PDEBUG(D_PROBE, "Find Sensor HV7131R");
+ sd->sensor = SENSOR_HV7131R;
break;
}
break;
case 0x02:
PDEBUG(D_PROBE, "Sensor TAS5130C");
- sd->sensor = SENSOR_TAS5130CXX;
+ sd->sensor = SENSOR_TAS5130C;
break;
case 0x04:
PDEBUG(D_PROBE, "Find Sensor CS2102");
@@ -6681,17 +6559,20 @@ static int sd_config(struct gspca_dev *gspca_dev,
case 0x10:
case 0x12:
PDEBUG(D_PROBE, "Find Sensor TAS5130C");
- sd->sensor = SENSOR_TAS5130CXX;
+ sd->sensor = SENSOR_TAS5130C;
break;
case 0x11:
- PDEBUG(D_PROBE, "Find Sensor HV7131R(c)");
- sd->sensor = SENSOR_HV7131C;
+ PDEBUG(D_PROBE, "Find Sensor HV7131R");
+ sd->sensor = SENSOR_HV7131R;
break;
case 0x13:
+ case 0x15:
PDEBUG(D_PROBE,
- "Find Sensor MI0360SOC. Chip revision %x",
+ "Sensor MT9V111. Chip revision %04x",
sd->chip_revision);
- sd->sensor = SENSOR_MI0360SOC;
+ sd->sensor = sd->bridge == BRIDGE_ZC301
+ ? SENSOR_MT9V111_1
+ : SENSOR_MT9V111_3;
break;
case 0x14:
PDEBUG(D_PROBE,
@@ -6699,12 +6580,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->chip_revision);
sd->sensor = SENSOR_CS2102K;
break;
- case 0x15:
- PDEBUG(D_PROBE,
- "Find Sensor TAS5130CK?. Chip revision %x",
- sd->chip_revision);
- sd->sensor = SENSOR_TAS5130CK;
- break;
case 0x16:
PDEBUG(D_PROBE, "Find Sensor ADCM2700");
sd->sensor = SENSOR_ADCM2700;
@@ -6741,13 +6616,11 @@ static int sd_config(struct gspca_dev *gspca_dev,
}
if (sensor < 0x20) {
if (sensor == -1 || sensor == 0x10 || sensor == 0x12)
- reg_w(gspca_dev->dev, 0x02, 0x0010);
+ reg_w(gspca_dev, 0x02, 0x0010);
reg_r(gspca_dev, 0x0010);
}
cam = &gspca_dev->cam;
-/*fixme:test*/
- gspca_dev->nbalt--;
switch (mode_tb[sd->sensor]) {
case 0:
cam->cam_mode = sif_mode;
@@ -6763,58 +6636,62 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->nmodes = ARRAY_SIZE(broken_vga_mode);
break;
}
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
sd->gamma = gamma[sd->sensor];
- sd->autogain = AUTOGAIN_DEF;
- sd->lightfreq = FREQ_DEF;
- sd->quality = QUALITY_DEF;
switch (sd->sensor) {
- case SENSOR_HV7131B:
- case SENSOR_HV7131C:
case SENSOR_OV7630C:
gspca_dev->ctrl_dis = (1 << LIGHTFREQ_IDX);
break;
}
- return 0;
-}
-
-/* this function is called at probe and resume time */
-static int sd_init(struct gspca_dev *gspca_dev)
-{
/* switch off the led */
- reg_w(gspca_dev->dev, 0x01, 0x0000);
- return 0;
+ reg_w(gspca_dev, 0x01, 0x0000);
+ return gspca_dev->usb_err;
}
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
int mode;
static const struct usb_action *init_tb[SENSOR_MAX][2] = {
- {adcm2700_Initial, adcm2700_InitialScale}, /* 0 */
- {cs2102_Initial, cs2102_InitialScale}, /* 1 */
- {cs2102K_Initial, cs2102K_InitialScale}, /* 2 */
- {gc0305_Initial, gc0305_InitialScale}, /* 3 */
- {hdcs2020b_Initial, hdcs2020b_InitialScale}, /* 4 */
- {hv7131b_Initial, hv7131b_InitialScale}, /* 5 */
- {hv7131r_Initial, hv7131r_InitialScale}, /* 6 */
- {icm105a_Initial, icm105a_InitialScale}, /* 7 */
- {mc501cb_Initial, mc501cb_InitialScale}, /* 8 */
- {mi0360soc_Initial, mi0360soc_InitialScale}, /* 9 */
- {ov7620_Initial, ov7620_InitialScale}, /* 10 */
- {ov7630c_Initial, ov7630c_InitialScale}, /* 11 */
- {pas106b_Initial, pas106b_InitialScale}, /* 12 */
- {pas202b_Initial, pas202b_InitialScale}, /* 13 */
- {pb0330_Initial, pb0330_InitialScale}, /* 14 */
- {po2030_Initial, po2030_InitialScale}, /* 15 */
- {tas5130cK_Initial, tas5130cK_InitialScale}, /* 16 */
- {tas5130cxx_Initial, tas5130cxx_InitialScale}, /* 17 */
+ [SENSOR_ADCM2700] =
+ {adcm2700_Initial, adcm2700_InitialScale},
+ [SENSOR_CS2102] =
+ {cs2102_Initial, cs2102_InitialScale},
+ [SENSOR_CS2102K] =
+ {cs2102K_Initial, cs2102K_InitialScale},
+ [SENSOR_GC0305] =
+ {gc0305_Initial, gc0305_InitialScale},
+ [SENSOR_HDCS2020b] =
+ {hdcs2020b_Initial, hdcs2020b_InitialScale},
+ [SENSOR_HV7131B] =
+ {hv7131b_Initial, hv7131b_InitialScale},
+ [SENSOR_HV7131R] =
+ {hv7131r_Initial, hv7131r_InitialScale},
+ [SENSOR_ICM105A] =
+ {icm105a_Initial, icm105a_InitialScale},
+ [SENSOR_MC501CB] =
+ {mc501cb_Initial, mc501cb_InitialScale},
+ [SENSOR_MT9V111_1] =
+ {mt9v111_1_Initial, mt9v111_1_InitialScale},
+ [SENSOR_MT9V111_3] =
+ {mt9v111_3_Initial, mt9v111_3_InitialScale},
+ [SENSOR_OV7620] =
+ {ov7620_Initial, ov7620_InitialScale},
+ [SENSOR_OV7630C] =
+ {ov7630c_Initial, ov7630c_InitialScale},
+ [SENSOR_PAS106] =
+ {pas106b_Initial, pas106b_InitialScale},
+ [SENSOR_PAS202B] =
+ {pas202b_Initial, pas202b_InitialScale},
+ [SENSOR_PB0330] =
+ {pb0330_Initial, pb0330_InitialScale},
+ [SENSOR_PO2030] =
+ {po2030_Initial, po2030_InitialScale},
+ [SENSOR_TAS5130C] =
+ {tas5130c_Initial, tas5130c_InitialScale},
+ [SENSOR_TAS5130C_VF0250] =
{tas5130c_vf0250_Initial, tas5130c_vf0250_InitialScale},
- /* 18 */
};
/* create the JPEG header */
@@ -6824,7 +6701,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
switch (sd->sensor) {
- case SENSOR_HV7131C:
+ case SENSOR_HV7131R:
zcxx_probeSensor(gspca_dev);
break;
case SENSOR_PAS106:
@@ -6838,22 +6715,22 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_GC0305:
case SENSOR_OV7620:
case SENSOR_PO2030:
- case SENSOR_TAS5130CXX:
+ case SENSOR_TAS5130C:
case SENSOR_TAS5130C_VF0250:
/* msleep(100); * ?? */
reg_r(gspca_dev, 0x0002); /* --> 0x40 */
- reg_w(dev, 0x09, 0x01ad); /* (from win traces) */
- reg_w(dev, 0x15, 0x01ae);
- if (sd->sensor == SENSOR_TAS5130CXX)
+ reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */
+ reg_w(gspca_dev, 0x15, 0x01ae);
+ if (sd->sensor == SENSOR_TAS5130C)
break;
- reg_w(dev, 0x0d, 0x003a);
- reg_w(dev, 0x02, 0x003b);
- reg_w(dev, 0x00, 0x0038);
+ reg_w(gspca_dev, 0x0d, 0x003a);
+ reg_w(gspca_dev, 0x02, 0x003b);
+ reg_w(gspca_dev, 0x00, 0x0038);
break;
case SENSOR_PAS202B:
- reg_w(dev, 0x03, 0x003b);
- reg_w(dev, 0x0c, 0x003a);
- reg_w(dev, 0x0b, 0x0039);
+ reg_w(gspca_dev, 0x03, 0x003b);
+ reg_w(gspca_dev, 0x0c, 0x003a);
+ reg_w(gspca_dev, 0x0b, 0x0039);
break;
}
@@ -6862,15 +6739,15 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_ADCM2700:
case SENSOR_OV7620:
reg_r(gspca_dev, 0x0008);
- reg_w(dev, 0x00, 0x0008);
+ reg_w(gspca_dev, 0x00, 0x0008);
break;
case SENSOR_PAS202B:
case SENSOR_GC0305:
- case SENSOR_TAS5130CXX:
+ case SENSOR_TAS5130C:
reg_r(gspca_dev, 0x0008);
/* fall thru */
case SENSOR_PO2030:
- reg_w(dev, 0x03, 0x0008);
+ reg_w(gspca_dev, 0x03, 0x0008);
break;
}
setsharpness(gspca_dev);
@@ -6880,7 +6757,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_CS2102K: /* gamma set in xxx_Initial */
case SENSOR_HDCS2020b:
case SENSOR_OV7630C:
- case SENSOR_TAS5130CK:
break;
default:
setcontrast(gspca_dev);
@@ -6891,7 +6767,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_OV7620:
case SENSOR_PAS202B:
reg_r(gspca_dev, 0x0180); /* from win */
- reg_w(dev, 0x00, 0x0180);
+ reg_w(gspca_dev, 0x00, 0x0180);
break;
default:
setquality(gspca_dev);
@@ -6901,29 +6777,29 @@ static int sd_start(struct gspca_dev *gspca_dev)
switch (sd->sensor) {
case SENSOR_ADCM2700:
- reg_w(dev, 0x09, 0x01ad); /* (from win traces) */
- reg_w(dev, 0x15, 0x01ae);
- reg_w(dev, 0x02, 0x0180);
+ reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */
+ reg_w(gspca_dev, 0x15, 0x01ae);
+ reg_w(gspca_dev, 0x02, 0x0180);
/* ms-win + */
- reg_w(dev, 0x40, 0x0117);
+ reg_w(gspca_dev, 0x40, 0x0117);
break;
case SENSOR_GC0305:
- case SENSOR_TAS5130CXX:
- reg_w(dev, 0x09, 0x01ad); /* (from win traces) */
- reg_w(dev, 0x15, 0x01ae);
+ case SENSOR_TAS5130C:
+ reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */
+ reg_w(gspca_dev, 0x15, 0x01ae);
/* fall thru */
case SENSOR_PAS202B:
case SENSOR_PO2030:
-/* reg_w(dev, 0x40, ZC3XX_R117_GGAIN); * (from win traces) */
+/* reg_w(gspca_dev, 0x40, ZC3XX_R117_GGAIN); * (from win traces) */
reg_r(gspca_dev, 0x0180);
break;
case SENSOR_OV7620:
- reg_w(dev, 0x09, 0x01ad);
- reg_w(dev, 0x15, 0x01ae);
+ reg_w(gspca_dev, 0x09, 0x01ad);
+ reg_w(gspca_dev, 0x15, 0x01ae);
i2c_read(gspca_dev, 0x13); /*fixme: returns 0xa3 */
i2c_write(gspca_dev, 0x13, 0xa3, 0x00);
/*fixme: returned value to send? */
- reg_w(dev, 0x40, 0x0117);
+ reg_w(gspca_dev, 0x40, 0x0117);
reg_r(gspca_dev, 0x0180);
break;
}
@@ -6932,11 +6808,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
switch (sd->sensor) {
case SENSOR_PO2030:
msleep(50);
- reg_w(dev, 0x00, 0x0007); /* (from win traces) */
- reg_w(dev, 0x02, ZC3XX_R008_CLOCKSETTING);
+ reg_w(gspca_dev, 0x00, 0x0007); /* (from win traces) */
+ reg_w(gspca_dev, 0x02, ZC3XX_R008_CLOCKSETTING);
break;
}
- return 0;
+ return gspca_dev->usb_err;
}
/* called on streamoff with alt 0 and on disconnect */
@@ -6946,7 +6822,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
if (!gspca_dev->present)
return;
- send_unknown(gspca_dev->dev, sd->sensor);
+ send_unknown(gspca_dev, sd->sensor);
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
@@ -6981,7 +6857,7 @@ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
sd->brightness = val;
if (gspca_dev->streaming)
setcontrast(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
@@ -6999,7 +6875,7 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
sd->contrast = val;
if (gspca_dev->streaming)
setcontrast(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
@@ -7017,7 +6893,7 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
sd->autogain = val;
if (gspca_dev->streaming)
setautogain(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
@@ -7035,7 +6911,7 @@ static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val)
sd->gamma = val;
if (gspca_dev->streaming)
setcontrast(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val)
@@ -7053,7 +6929,7 @@ static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
sd->lightfreq = val;
if (gspca_dev->streaming)
setlightfreq(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
@@ -7071,7 +6947,7 @@ static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
sd->sharpness = val;
if (gspca_dev->streaming)
setsharpness(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
@@ -7116,7 +6992,7 @@ static int sd_set_jcomp(struct gspca_dev *gspca_dev,
sd->quality = jcomp->quality;
if (gspca_dev->streaming)
jpeg_set_qual(sd->jpeg_hdr, sd->quality);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_get_jcomp(struct gspca_dev *gspca_dev,
@@ -7220,7 +7096,6 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x10fd, 0x8050)},
{} /* end of entry */
};
-#undef DVNAME
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
diff --git a/drivers/media/video/ivtv/Kconfig b/drivers/media/video/ivtv/Kconfig
index c46bfb1569e..be4af1fa557 100644
--- a/drivers/media/video/ivtv/Kconfig
+++ b/drivers/media/video/ivtv/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_IVTV
depends on VIDEO_V4L2 && PCI && I2C
depends on INPUT # due to VIDEO_IR
select I2C_ALGOBIT
- select VIDEO_IR
+ depends on VIDEO_IR
select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_CX2341X
diff --git a/drivers/media/video/ivtv/ivtv-controls.c b/drivers/media/video/ivtv/ivtv-controls.c
index b588e30cbcf..b31ee1bceef 100644
--- a/drivers/media/video/ivtv/ivtv-controls.c
+++ b/drivers/media/video/ivtv/ivtv-controls.c
@@ -17,163 +17,14 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/kernel.h>
-#include <linux/slab.h>
#include "ivtv-driver.h"
-#include "ivtv-cards.h"
#include "ivtv-ioctl.h"
-#include "ivtv-routing.h"
-#include "ivtv-i2c.h"
-#include "ivtv-mailbox.h"
#include "ivtv-controls.h"
-/* Must be sorted from low to high control ID! */
-static const u32 user_ctrls[] = {
- V4L2_CID_USER_CLASS,
- V4L2_CID_BRIGHTNESS,
- V4L2_CID_CONTRAST,
- V4L2_CID_SATURATION,
- V4L2_CID_HUE,
- V4L2_CID_AUDIO_VOLUME,
- V4L2_CID_AUDIO_BALANCE,
- V4L2_CID_AUDIO_BASS,
- V4L2_CID_AUDIO_TREBLE,
- V4L2_CID_AUDIO_MUTE,
- V4L2_CID_AUDIO_LOUDNESS,
- 0
-};
-
-static const u32 *ctrl_classes[] = {
- user_ctrls,
- cx2341x_mpeg_ctrls,
- NULL
-};
-
-
-int ivtv_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qctrl)
-{
- struct ivtv *itv = ((struct ivtv_open_id *)fh)->itv;
- const char *name;
-
- qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
- if (qctrl->id == 0)
- return -EINVAL;
-
- switch (qctrl->id) {
- /* Standard V4L2 controls */
- case V4L2_CID_USER_CLASS:
- return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
- case V4L2_CID_BRIGHTNESS:
- case V4L2_CID_HUE:
- case V4L2_CID_SATURATION:
- case V4L2_CID_CONTRAST:
- if (v4l2_subdev_call(itv->sd_video, core, queryctrl, qctrl))
- qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
- return 0;
-
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_MUTE:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_AUDIO_LOUDNESS:
- if (v4l2_subdev_call(itv->sd_audio, core, queryctrl, qctrl))
- qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
- return 0;
-
- default:
- if (cx2341x_ctrl_query(&itv->params, qctrl))
- qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
- return 0;
- }
- strncpy(qctrl->name, name, sizeof(qctrl->name) - 1);
- qctrl->name[sizeof(qctrl->name) - 1] = 0;
- return 0;
-}
-
-int ivtv_querymenu(struct file *file, void *fh, struct v4l2_querymenu *qmenu)
-{
- struct ivtv *itv = ((struct ivtv_open_id *)fh)->itv;
- struct v4l2_queryctrl qctrl;
-
- qctrl.id = qmenu->id;
- ivtv_queryctrl(file, fh, &qctrl);
- return v4l2_ctrl_query_menu(qmenu, &qctrl,
- cx2341x_ctrl_get_menu(&itv->params, qmenu->id));
-}
-
-static int ivtv_try_ctrl(struct file *file, void *fh,
- struct v4l2_ext_control *vctrl)
-{
- struct v4l2_queryctrl qctrl;
- const char **menu_items = NULL;
- int err;
-
- qctrl.id = vctrl->id;
- err = ivtv_queryctrl(file, fh, &qctrl);
- if (err)
- return err;
- if (qctrl.type == V4L2_CTRL_TYPE_MENU)
- menu_items = v4l2_ctrl_get_menu(qctrl.id);
- return v4l2_ctrl_check(vctrl, &qctrl, menu_items);
-}
-
-static int ivtv_s_ctrl(struct ivtv *itv, struct v4l2_control *vctrl)
-{
- switch (vctrl->id) {
- /* Standard V4L2 controls */
- case V4L2_CID_BRIGHTNESS:
- case V4L2_CID_HUE:
- case V4L2_CID_SATURATION:
- case V4L2_CID_CONTRAST:
- return v4l2_subdev_call(itv->sd_video, core, s_ctrl, vctrl);
-
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_MUTE:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_AUDIO_LOUDNESS:
- return v4l2_subdev_call(itv->sd_audio, core, s_ctrl, vctrl);
-
- default:
- IVTV_DEBUG_IOCTL("invalid control 0x%x\n", vctrl->id);
- return -EINVAL;
- }
- return 0;
-}
-
-static int ivtv_g_ctrl(struct ivtv *itv, struct v4l2_control *vctrl)
+static int ivtv_s_stream_vbi_fmt(struct cx2341x_handler *cxhdl, u32 fmt)
{
- switch (vctrl->id) {
- /* Standard V4L2 controls */
- case V4L2_CID_BRIGHTNESS:
- case V4L2_CID_HUE:
- case V4L2_CID_SATURATION:
- case V4L2_CID_CONTRAST:
- return v4l2_subdev_call(itv->sd_video, core, g_ctrl, vctrl);
-
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_MUTE:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_AUDIO_LOUDNESS:
- return v4l2_subdev_call(itv->sd_audio, core, g_ctrl, vctrl);
- default:
- IVTV_DEBUG_IOCTL("invalid control 0x%x\n", vctrl->id);
- return -EINVAL;
- }
- return 0;
-}
-
-static int ivtv_setup_vbi_fmt(struct ivtv *itv, enum v4l2_mpeg_stream_vbi_fmt fmt)
-{
- if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_CAPTURE))
- return -EINVAL;
- if (atomic_read(&itv->capturing) > 0)
- return -EBUSY;
+ struct ivtv *itv = container_of(cxhdl, struct ivtv, cxhdl);
/* First try to allocate sliced VBI buffers if needed. */
if (fmt && itv->vbi.sliced_mpeg_data[0] == NULL) {
@@ -208,106 +59,43 @@ static int ivtv_setup_vbi_fmt(struct ivtv *itv, enum v4l2_mpeg_stream_vbi_fmt fm
return 0;
}
-int ivtv_g_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
+static int ivtv_s_video_encoding(struct cx2341x_handler *cxhdl, u32 val)
{
- struct ivtv *itv = ((struct ivtv_open_id *)fh)->itv;
- struct v4l2_control ctrl;
-
- if (c->ctrl_class == V4L2_CTRL_CLASS_USER) {
- int i;
- int err = 0;
-
- for (i = 0; i < c->count; i++) {
- ctrl.id = c->controls[i].id;
- ctrl.value = c->controls[i].value;
- err = ivtv_g_ctrl(itv, &ctrl);
- c->controls[i].value = ctrl.value;
- if (err) {
- c->error_idx = i;
- break;
- }
- }
- return err;
- }
- if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG)
- return cx2341x_ext_ctrls(&itv->params, 0, c, VIDIOC_G_EXT_CTRLS);
- return -EINVAL;
+ struct ivtv *itv = container_of(cxhdl, struct ivtv, cxhdl);
+ int is_mpeg1 = val == V4L2_MPEG_VIDEO_ENCODING_MPEG_1;
+ struct v4l2_mbus_framefmt fmt;
+
+ /* fix videodecoder resolution */
+ fmt.width = cxhdl->width / (is_mpeg1 ? 2 : 1);
+ fmt.height = cxhdl->height;
+ fmt.code = V4L2_MBUS_FMT_FIXED;
+ v4l2_subdev_call(itv->sd_video, video, s_mbus_fmt, &fmt);
+ return 0;
}
-int ivtv_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
+static int ivtv_s_audio_sampling_freq(struct cx2341x_handler *cxhdl, u32 idx)
{
- struct ivtv *itv = ((struct ivtv_open_id *)fh)->itv;
- struct v4l2_control ctrl;
-
- if (c->ctrl_class == V4L2_CTRL_CLASS_USER) {
- int i;
- int err = 0;
-
- for (i = 0; i < c->count; i++) {
- ctrl.id = c->controls[i].id;
- ctrl.value = c->controls[i].value;
- err = ivtv_s_ctrl(itv, &ctrl);
- c->controls[i].value = ctrl.value;
- if (err) {
- c->error_idx = i;
- break;
- }
- }
- return err;
- }
- if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
- static u32 freqs[3] = { 44100, 48000, 32000 };
- struct cx2341x_mpeg_params p = itv->params;
- int err = cx2341x_ext_ctrls(&p, atomic_read(&itv->capturing), c, VIDIOC_S_EXT_CTRLS);
- unsigned idx;
-
- if (err)
- return err;
+ static const u32 freqs[3] = { 44100, 48000, 32000 };
+ struct ivtv *itv = container_of(cxhdl, struct ivtv, cxhdl);
- if (p.video_encoding != itv->params.video_encoding) {
- int is_mpeg1 = p.video_encoding ==
- V4L2_MPEG_VIDEO_ENCODING_MPEG_1;
- struct v4l2_mbus_framefmt fmt;
-
- /* fix videodecoder resolution */
- fmt.width = itv->params.width / (is_mpeg1 ? 2 : 1);
- fmt.height = itv->params.height;
- fmt.code = V4L2_MBUS_FMT_FIXED;
- v4l2_subdev_call(itv->sd_video, video, s_mbus_fmt, &fmt);
- }
- err = cx2341x_update(itv, ivtv_api_func, &itv->params, &p);
- if (!err && itv->params.stream_vbi_fmt != p.stream_vbi_fmt)
- err = ivtv_setup_vbi_fmt(itv, p.stream_vbi_fmt);
- itv->params = p;
- itv->dualwatch_stereo_mode = p.audio_properties & 0x0300;
- idx = p.audio_properties & 0x03;
- /* The audio clock of the digitizer must match the codec sample
- rate otherwise you get some very strange effects. */
- if (idx < ARRAY_SIZE(freqs))
- ivtv_call_all(itv, audio, s_clock_freq, freqs[idx]);
- return err;
- }
- return -EINVAL;
+ /* The audio clock of the digitizer must match the codec sample
+ rate otherwise you get some very strange effects. */
+ if (idx < ARRAY_SIZE(freqs))
+ ivtv_call_all(itv, audio, s_clock_freq, freqs[idx]);
+ return 0;
}
-int ivtv_try_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
+static int ivtv_s_audio_mode(struct cx2341x_handler *cxhdl, u32 val)
{
- struct ivtv *itv = ((struct ivtv_open_id *)fh)->itv;
+ struct ivtv *itv = container_of(cxhdl, struct ivtv, cxhdl);
- if (c->ctrl_class == V4L2_CTRL_CLASS_USER) {
- int i;
- int err = 0;
-
- for (i = 0; i < c->count; i++) {
- err = ivtv_try_ctrl(file, fh, &c->controls[i]);
- if (err) {
- c->error_idx = i;
- break;
- }
- }
- return err;
- }
- if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG)
- return cx2341x_ext_ctrls(&itv->params, atomic_read(&itv->capturing), c, VIDIOC_TRY_EXT_CTRLS);
- return -EINVAL;
+ itv->dualwatch_stereo_mode = val;
+ return 0;
}
+
+struct cx2341x_handler_ops ivtv_cxhdl_ops = {
+ .s_audio_mode = ivtv_s_audio_mode,
+ .s_audio_sampling_freq = ivtv_s_audio_sampling_freq,
+ .s_video_encoding = ivtv_s_video_encoding,
+ .s_stream_vbi_fmt = ivtv_s_stream_vbi_fmt,
+};
diff --git a/drivers/media/video/ivtv/ivtv-controls.h b/drivers/media/video/ivtv/ivtv-controls.h
index 1c7721e23c9..d12893dd018 100644
--- a/drivers/media/video/ivtv/ivtv-controls.h
+++ b/drivers/media/video/ivtv/ivtv-controls.h
@@ -21,10 +21,6 @@
#ifndef IVTV_CONTROLS_H
#define IVTV_CONTROLS_H
-int ivtv_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *a);
-int ivtv_g_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *a);
-int ivtv_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *a);
-int ivtv_try_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *a);
-int ivtv_querymenu(struct file *file, void *fh, struct v4l2_querymenu *a);
+extern struct cx2341x_handler_ops ivtv_cxhdl_ops;
#endif
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 90daa6e751d..e421d15b0f5 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -53,6 +53,7 @@
#include "ivtv-cards.h"
#include "ivtv-vbi.h"
#include "ivtv-routing.h"
+#include "ivtv-controls.h"
#include "ivtv-gpio.h"
#include <media/tveeprom.h>
@@ -705,6 +706,8 @@ done:
*/
static int __devinit ivtv_init_struct1(struct ivtv *itv)
{
+ struct sched_param param = { .sched_priority = 99 };
+
itv->base_addr = pci_resource_start(itv->pdev, 0);
itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */
itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */
@@ -716,21 +719,24 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
spin_lock_init(&itv->lock);
spin_lock_init(&itv->dma_reg_lock);
- itv->irq_work_queues = create_singlethread_workqueue(itv->v4l2_dev.name);
- if (itv->irq_work_queues == NULL) {
- IVTV_ERR("Could not create ivtv workqueue\n");
+ init_kthread_worker(&itv->irq_worker);
+ itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker,
+ itv->v4l2_dev.name);
+ if (IS_ERR(itv->irq_worker_task)) {
+ IVTV_ERR("Could not create ivtv task\n");
return -1;
}
+ /* must use the FIFO scheduler as it is realtime sensitive */
+ sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, &param);
- INIT_WORK(&itv->irq_work_queue, ivtv_irq_work_handler);
+ init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
/* start counting open_id at 1 */
itv->open_id = 1;
/* Initial settings */
- cx2341x_fill_defaults(&itv->params);
- itv->params.port = CX2341X_PORT_MEMORY;
- itv->params.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
+ itv->cxhdl.port = CX2341X_PORT_MEMORY;
+ itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
init_waitqueue_head(&itv->eos_waitq);
init_waitqueue_head(&itv->event_waitq);
init_waitqueue_head(&itv->vsync_waitq);
@@ -1000,13 +1006,20 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
retval = -ENOMEM;
goto err;
}
+ retval = cx2341x_handler_init(&itv->cxhdl, 50);
+ if (retval)
+ goto err;
+ itv->v4l2_dev.ctrl_handler = &itv->cxhdl.hdl;
+ itv->cxhdl.ops = &ivtv_cxhdl_ops;
+ itv->cxhdl.priv = itv;
+ itv->cxhdl.func = ivtv_api_func;
IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr);
/* PCI Device Setup */
retval = ivtv_setup_pci(itv, pdev, pci_id);
if (retval == -EIO)
- goto free_workqueue;
+ goto free_worker;
if (retval == -ENXIO)
goto free_mem;
@@ -1121,7 +1134,7 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
itv->yuv_info.v4l2_src_w = itv->yuv_info.osd_full_w;
itv->yuv_info.v4l2_src_h = itv->yuv_info.osd_full_h;
- itv->params.video_gop_size = itv->is_60hz ? 15 : 12;
+ cx2341x_handler_set_50hz(&itv->cxhdl, itv->is_50hz);
itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_MPG] = 0x08000;
itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_PCM] = 0x01200;
@@ -1218,8 +1231,8 @@ free_mem:
release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (itv->has_cx23415)
release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
-free_workqueue:
- destroy_workqueue(itv->irq_work_queues);
+free_worker:
+ kthread_stop(itv->irq_worker_task);
err:
if (retval == 0)
retval = -ENODEV;
@@ -1263,15 +1276,8 @@ int ivtv_init_on_first_open(struct ivtv *itv)
IVTV_DEBUG_INFO("Getting firmware version..\n");
ivtv_firmware_versions(itv);
- if (itv->card->hw_all & IVTV_HW_CX25840) {
- struct v4l2_control ctrl;
-
+ if (itv->card->hw_all & IVTV_HW_CX25840)
v4l2_subdev_call(itv->sd_video, core, load_fw);
- /* CX25840_CID_ENABLE_PVR150_WORKAROUND */
- ctrl.id = V4L2_CID_PRIVATE_BASE;
- ctrl.value = itv->pvr150_workaround;
- v4l2_subdev_call(itv->sd_video, core, s_ctrl, &ctrl);
- }
vf.tuner = 0;
vf.type = V4L2_TUNER_ANALOG_TV;
@@ -1323,6 +1329,8 @@ int ivtv_init_on_first_open(struct ivtv *itv)
/* For cards with video out, this call needs interrupts enabled */
ivtv_s_std(NULL, &fh, &itv->tuner_std);
+ /* Setup initial controls */
+ cx2341x_handler_setup(&itv->cxhdl);
return 0;
}
@@ -1363,9 +1371,9 @@ static void ivtv_remove(struct pci_dev *pdev)
ivtv_set_irq_mask(itv, 0xffffffff);
del_timer_sync(&itv->dma_timer);
- /* Stop all Work Queues */
- flush_workqueue(itv->irq_work_queues);
- destroy_workqueue(itv->irq_work_queues);
+ /* Kill irq worker */
+ flush_kthread_worker(&itv->irq_worker);
+ kthread_stop(itv->irq_worker_task);
ivtv_streams_cleanup(itv, 1);
ivtv_udma_free(itv);
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index bd084df4448..75803141481 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -51,7 +51,7 @@
#include <linux/unistd.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
-#include <linux/workqueue.h>
+#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
@@ -62,6 +62,7 @@
#include <linux/dvb/audio.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/tuner.h>
@@ -260,7 +261,6 @@ struct ivtv_mailbox_data {
#define IVTV_F_I_DEC_PAUSED 20 /* the decoder is paused */
#define IVTV_F_I_INITED 21 /* set after first open */
#define IVTV_F_I_FAILED 22 /* set if first open failed */
-#define IVTV_F_I_WORK_INITED 23 /* worker thread was initialized */
/* Event notifications */
#define IVTV_F_I_EV_DEC_STOPPED 28 /* decoder stopped event */
@@ -632,6 +632,8 @@ struct ivtv {
struct ivtv_options options; /* user options */
struct v4l2_device v4l2_dev;
+ struct cx2341x_handler cxhdl;
+ struct v4l2_ctrl_handler hdl_gpio;
struct v4l2_subdev sd_gpio; /* GPIO sub-device */
u16 instance;
@@ -649,7 +651,6 @@ struct ivtv {
v4l2_std_id std_out; /* current TV output standard */
u8 audio_stereo_mode; /* decoder setting how to handle stereo MPEG audio */
u8 audio_bilingual_mode; /* decoder setting how to handle bilingual MPEG audio */
- struct cx2341x_mpeg_params params; /* current encoder parameters */
/* Locking */
@@ -666,8 +667,9 @@ struct ivtv {
/* Interrupts & DMA */
u32 irqmask; /* active interrupts */
u32 irq_rr_idx; /* round-robin stream index */
- struct workqueue_struct *irq_work_queues; /* workqueue for PIO/YUV/VBI actions */
- struct work_struct irq_work_queue; /* work entry */
+ struct kthread_worker irq_worker; /* kthread worker for PIO/YUV/VBI actions */
+ struct task_struct *irq_worker_task; /* task for irq_worker */
+ struct kthread_work irq_work; /* kthread work entry */
spinlock_t dma_reg_lock; /* lock access to DMA engine registers */
int cur_dma_stream; /* index of current stream doing DMA (-1 if none) */
int cur_pio_stream; /* index of current stream doing PIO (-1 if none) */
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index a6a2cdb8156..d727485da88 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -150,12 +150,10 @@ void ivtv_release_stream(struct ivtv_stream *s)
static void ivtv_dualwatch(struct ivtv *itv)
{
struct v4l2_tuner vt;
- u32 new_bitmap;
u32 new_stereo_mode;
- const u32 stereo_mask = 0x0300;
- const u32 dual = 0x0200;
+ const u32 dual = 0x02;
- new_stereo_mode = itv->params.audio_properties & stereo_mask;
+ new_stereo_mode = v4l2_ctrl_g_ctrl(itv->cxhdl.audio_mode);
memset(&vt, 0, sizeof(vt));
ivtv_call_all(itv, tuner, g_tuner, &vt);
if (vt.audmode == V4L2_TUNER_MODE_LANG1_LANG2 && (vt.rxsubchans & V4L2_TUNER_SUB_LANG2))
@@ -164,16 +162,10 @@ static void ivtv_dualwatch(struct ivtv *itv)
if (new_stereo_mode == itv->dualwatch_stereo_mode)
return;
- new_bitmap = new_stereo_mode | (itv->params.audio_properties & ~stereo_mask);
-
- IVTV_DEBUG_INFO("dualwatch: change stereo flag from 0x%x to 0x%x. new audio_bitmask=0x%ux\n",
- itv->dualwatch_stereo_mode, new_stereo_mode, new_bitmap);
-
- if (ivtv_vapi(itv, CX2341X_ENC_SET_AUDIO_PROPERTIES, 1, new_bitmap) == 0) {
- itv->dualwatch_stereo_mode = new_stereo_mode;
- return;
- }
- IVTV_DEBUG_INFO("dualwatch: changing stereo flag failed\n");
+ IVTV_DEBUG_INFO("dualwatch: change stereo flag from 0x%x to 0x%x.\n",
+ itv->dualwatch_stereo_mode, new_stereo_mode);
+ if (v4l2_ctrl_s_ctrl(itv->cxhdl.audio_mode, new_stereo_mode))
+ IVTV_DEBUG_INFO("dualwatch: changing stereo flag failed\n");
}
static void ivtv_update_pgm_info(struct ivtv *itv)
@@ -894,7 +886,8 @@ int ivtv_v4l2_close(struct file *filp)
if (atomic_read(&itv->capturing) > 0) {
/* Undo video mute */
ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
- itv->params.video_mute | (itv->params.video_mute_yuv << 8));
+ v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
+ (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
}
/* Done! Unmute and continue. */
ivtv_unmute(itv);
diff --git a/drivers/media/video/ivtv/ivtv-firmware.c b/drivers/media/video/ivtv/ivtv-firmware.c
index d8bf2b01729..4df01947a7d 100644
--- a/drivers/media/video/ivtv/ivtv-firmware.c
+++ b/drivers/media/video/ivtv/ivtv-firmware.c
@@ -248,9 +248,9 @@ void ivtv_init_mpeg_decoder(struct ivtv *itv)
volatile u8 __iomem *mem_offset;
data[0] = 0;
- data[1] = itv->params.width; /* YUV source width */
- data[2] = itv->params.height;
- data[3] = itv->params.audio_properties; /* Audio settings to use,
+ data[1] = itv->cxhdl.width; /* YUV source width */
+ data[2] = itv->cxhdl.height;
+ data[3] = itv->cxhdl.audio_properties; /* Audio settings to use,
bitmap. see docs. */
if (ivtv_api(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, data)) {
IVTV_ERR("ivtv_init_mpeg_decoder failed to set decoder source\n");
diff --git a/drivers/media/video/ivtv/ivtv-gpio.c b/drivers/media/video/ivtv/ivtv-gpio.c
index aede061cae5..8f0d0778905 100644
--- a/drivers/media/video/ivtv/ivtv-gpio.c
+++ b/drivers/media/video/ivtv/ivtv-gpio.c
@@ -24,6 +24,7 @@
#include "ivtv-gpio.h"
#include "tuner-xc2028.h"
#include <media/tuner.h>
+#include <media/v4l2-ctrls.h>
/*
* GPIO assignment of Yuan MPG600/MPG160
@@ -149,16 +150,10 @@ static inline struct ivtv *sd_to_ivtv(struct v4l2_subdev *sd)
return container_of(sd, struct ivtv, sd_gpio);
}
-static struct v4l2_queryctrl gpio_ctrl_mute = {
- .id = V4L2_CID_AUDIO_MUTE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- .flags = 0,
-};
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct ivtv, hdl_gpio)->sd_gpio;
+}
static int subdev_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
{
@@ -262,40 +257,24 @@ static int subdev_s_audio_routing(struct v4l2_subdev *sd,
return 0;
}
-static int subdev_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int subdev_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
struct ivtv *itv = sd_to_ivtv(sd);
u16 mask, data;
- if (ctrl->id != V4L2_CID_AUDIO_MUTE)
- return -EINVAL;
- mask = itv->card->gpio_audio_mute.mask;
- data = itv->card->gpio_audio_mute.mute;
- ctrl->value = (read_reg(IVTV_REG_GPIO_OUT) & mask) == data;
- return 0;
-}
-
-static int subdev_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct ivtv *itv = sd_to_ivtv(sd);
- u16 mask, data;
-
- if (ctrl->id != V4L2_CID_AUDIO_MUTE)
- return -EINVAL;
- mask = itv->card->gpio_audio_mute.mask;
- data = ctrl->value ? itv->card->gpio_audio_mute.mute : 0;
- if (mask)
- write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
- return 0;
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ mask = itv->card->gpio_audio_mute.mask;
+ data = ctrl->val ? itv->card->gpio_audio_mute.mute : 0;
+ if (mask)
+ write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) |
+ (data & mask), IVTV_REG_GPIO_OUT);
+ return 0;
+ }
+ return -EINVAL;
}
-static int subdev_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- if (qc->id != V4L2_CID_AUDIO_MUTE)
- return -EINVAL;
- *qc = gpio_ctrl_mute;
- return 0;
-}
static int subdev_log_status(struct v4l2_subdev *sd)
{
@@ -304,6 +283,7 @@ static int subdev_log_status(struct v4l2_subdev *sd)
IVTV_INFO("GPIO status: DIR=0x%04x OUT=0x%04x IN=0x%04x\n",
read_reg(IVTV_REG_GPIO_DIR), read_reg(IVTV_REG_GPIO_OUT),
read_reg(IVTV_REG_GPIO_IN));
+ v4l2_ctrl_handler_log_status(&itv->hdl_gpio, sd->name);
return 0;
}
@@ -327,11 +307,19 @@ static int subdev_s_video_routing(struct v4l2_subdev *sd,
return 0;
}
+static const struct v4l2_ctrl_ops gpio_ctrl_ops = {
+ .s_ctrl = subdev_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops subdev_core_ops = {
.log_status = subdev_log_status,
- .g_ctrl = subdev_g_ctrl,
- .s_ctrl = subdev_s_ctrl,
- .queryctrl = subdev_queryctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_tuner_ops subdev_tuner_ops = {
@@ -375,5 +363,12 @@ int ivtv_gpio_init(struct ivtv *itv)
v4l2_subdev_init(&itv->sd_gpio, &subdev_ops);
snprintf(itv->sd_gpio.name, sizeof(itv->sd_gpio.name), "%s-gpio", itv->v4l2_dev.name);
itv->sd_gpio.grp_id = IVTV_HW_GPIO;
+ v4l2_ctrl_handler_init(&itv->hdl_gpio, 1);
+ v4l2_ctrl_new_std(&itv->hdl_gpio, &gpio_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ if (itv->hdl_gpio.error)
+ return itv->hdl_gpio.error;
+ itv->sd_gpio.ctrl_handler = &itv->hdl_gpio;
+ v4l2_ctrl_handler_setup(&itv->hdl_gpio);
return v4l2_device_register_subdev(&itv->v4l2_dev, &itv->sd_gpio);
}
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index a5b92d109c6..a74fa099c56 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -63,6 +63,7 @@
#include "ivtv-cards.h"
#include "ivtv-gpio.h"
#include "ivtv-i2c.h"
+#include <media/cx25840.h>
/* i2c implementation for cx23415/6 chip, ivtv project.
* Author: Kevin Thayer (nufan_wfk at yahoo.com)
@@ -182,8 +183,8 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
return -1;
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, type, I2C_NAME_SIZE);
- return i2c_new_probed_device(adap, &info, addr_list) == NULL
- ? -1 : 0;
+ return i2c_new_probed_device(adap, &info, addr_list, NULL)
+ == NULL ? -1 : 0;
}
/* Only allow one IR receiver to be registered per board */
@@ -220,7 +221,8 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
info.platform_data = init_data;
strlcpy(info.type, type, I2C_NAME_SIZE);
- return i2c_new_probed_device(adap, &info, addr_list) == NULL ? -1 : 0;
+ return i2c_new_probed_device(adap, &info, addr_list, NULL) == NULL ?
+ -1 : 0;
}
/* Instantiate the IR receiver device using probing -- undesirable */
@@ -248,7 +250,7 @@ struct i2c_client *ivtv_i2c_new_ir_legacy(struct ivtv *itv)
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
- return i2c_new_probed_device(&itv->i2c_adap, &info, addr_list);
+ return i2c_new_probed_device(&itv->i2c_adap, &info, addr_list, NULL);
}
int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
@@ -292,6 +294,12 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
if (hw == IVTV_HW_UPD64031A || hw == IVTV_HW_UPD6408X) {
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
adap, mod, type, 0, I2C_ADDRS(hw_addrs[idx]));
+ } else if (hw == IVTV_HW_CX25840) {
+ struct cx25840_platform_data pdata;
+
+ pdata.pvr150_workaround = itv->pvr150_workaround;
+ sd = v4l2_i2c_new_subdev_cfg(&itv->v4l2_dev,
+ adap, mod, type, 0, &pdata, hw_addrs[idx], NULL);
} else {
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
adap, mod, type, hw_addrs[idx], NULL);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 11ac2fa33ef..4eed9123683 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -162,7 +162,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
data[0] |= (speed > 1000 || speed < -1500) ? 0x40000000 : 0;
data[1] = (speed < 0);
data[2] = speed < 0 ? 3 : 7;
- data[3] = itv->params.video_b_frames;
+ data[3] = v4l2_ctrl_g_ctrl(itv->cxhdl.video_b_frames);
data[4] = (speed == 1500 || speed == 500) ? itv->speed_mute_audio : 0;
data[5] = 0;
data[6] = 0;
@@ -339,8 +339,8 @@ static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
struct ivtv *itv = id->itv;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
- pixfmt->width = itv->params.width;
- pixfmt->height = itv->params.height;
+ pixfmt->width = itv->cxhdl.width;
+ pixfmt->height = itv->cxhdl.height;
pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
pixfmt->field = V4L2_FIELD_INTERLACED;
pixfmt->priv = 0;
@@ -568,7 +568,6 @@ static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
{
struct ivtv_open_id *id = fh;
struct ivtv *itv = id->itv;
- struct cx2341x_mpeg_params *p = &itv->params;
struct v4l2_mbus_framefmt mbus_fmt;
int ret = ivtv_try_fmt_vid_cap(file, fh, fmt);
int w = fmt->fmt.pix.width;
@@ -577,15 +576,15 @@ static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
if (ret)
return ret;
- if (p->width == w && p->height == h)
+ if (itv->cxhdl.width == w && itv->cxhdl.height == h)
return 0;
if (atomic_read(&itv->capturing) > 0)
return -EBUSY;
- p->width = w;
- p->height = h;
- if (p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1)
+ itv->cxhdl.width = w;
+ itv->cxhdl.height = h;
+ if (v4l2_ctrl_g_ctrl(itv->cxhdl.video_encoding) == V4L2_MPEG_VIDEO_ENCODING_MPEG_1)
fmt->fmt.pix.width /= 2;
mbus_fmt.width = fmt->fmt.pix.width;
mbus_fmt.height = h;
@@ -1114,9 +1113,10 @@ int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
itv->std = *std;
itv->is_60hz = (*std & V4L2_STD_525_60) ? 1 : 0;
- itv->params.is_50hz = itv->is_50hz = !itv->is_60hz;
- itv->params.width = 720;
- itv->params.height = itv->is_50hz ? 576 : 480;
+ itv->is_50hz = !itv->is_60hz;
+ cx2341x_handler_set_50hz(&itv->cxhdl, itv->is_50hz);
+ itv->cxhdl.width = 720;
+ itv->cxhdl.height = itv->is_50hz ? 576 : 480;
itv->vbi.count = itv->is_50hz ? 18 : 12;
itv->vbi.start[0] = itv->is_50hz ? 6 : 10;
itv->vbi.start[1] = itv->is_50hz ? 318 : 273;
@@ -1157,7 +1157,7 @@ int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
itv->main_rect.left = itv->main_rect.top = 0;
itv->main_rect.width = 720;
- itv->main_rect.height = itv->params.height;
+ itv->main_rect.height = itv->cxhdl.height;
ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
720, itv->main_rect.height, 0, 0);
yi->main_rect = itv->main_rect;
@@ -1554,7 +1554,7 @@ static int ivtv_log_status(struct file *file, void *fh)
}
IVTV_INFO("Tuner: %s\n",
test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ? "Radio" : "TV");
- cx2341x_log_status(&itv->params, itv->v4l2_dev.name);
+ v4l2_ctrl_handler_log_status(&itv->cxhdl.hdl, itv->v4l2_dev.name);
IVTV_INFO("Status flags: 0x%08lx\n", itv->i_flags);
for (i = 0; i < IVTV_MAX_STREAMS; i++) {
struct ivtv_stream *s = &itv->streams[i];
@@ -1942,11 +1942,6 @@ static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
.vidioc_s_register = ivtv_s_register,
#endif
.vidioc_default = ivtv_default,
- .vidioc_queryctrl = ivtv_queryctrl,
- .vidioc_querymenu = ivtv_querymenu,
- .vidioc_g_ext_ctrls = ivtv_g_ext_ctrls,
- .vidioc_s_ext_ctrls = ivtv_s_ext_ctrls,
- .vidioc_try_ext_ctrls = ivtv_try_ext_ctrls,
.vidioc_subscribe_event = ivtv_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index fea1ec33b0d..9b4faf00919 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -71,19 +71,10 @@ static void ivtv_pio_work_handler(struct ivtv *itv)
write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
}
-void ivtv_irq_work_handler(struct work_struct *work)
+void ivtv_irq_work_handler(struct kthread_work *work)
{
- struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
+ struct ivtv *itv = container_of(work, struct ivtv, irq_work);
- DEFINE_WAIT(wait);
-
- if (test_and_clear_bit(IVTV_F_I_WORK_INITED, &itv->i_flags)) {
- struct sched_param param = { .sched_priority = 99 };
-
- /* This thread must use the FIFO scheduler as it
- is realtime sensitive. */
- sched_setscheduler(current, SCHED_FIFO, &param);
- }
if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
ivtv_pio_work_handler(itv);
@@ -975,7 +966,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
}
if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
- queue_work(itv->irq_work_queues, &itv->irq_work_queue);
+ queue_kthread_work(&itv->irq_worker, &itv->irq_work);
}
spin_unlock(&itv->dma_reg_lock);
diff --git a/drivers/media/video/ivtv/ivtv-irq.h b/drivers/media/video/ivtv/ivtv-irq.h
index f879a5822e7..1e84433737c 100644
--- a/drivers/media/video/ivtv/ivtv-irq.h
+++ b/drivers/media/video/ivtv/ivtv-irq.h
@@ -46,7 +46,7 @@
irqreturn_t ivtv_irq_handler(int irq, void *dev_id);
-void ivtv_irq_work_handler(struct work_struct *work);
+void ivtv_irq_work_handler(struct kthread_work *work);
void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock);
void ivtv_unfinished_dma(unsigned long arg);
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 55df4190c28..512607e0cda 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -210,6 +210,7 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
s->vdev->num = num;
s->vdev->v4l2_dev = &itv->v4l2_dev;
+ s->vdev->ctrl_handler = itv->v4l2_dev.ctrl_handler;
s->vdev->fops = ivtv_stream_info[type].fops;
s->vdev->release = video_device_release;
s->vdev->tvnorms = V4L2_STD_ALL;
@@ -451,7 +452,6 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
{
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv *itv = s->itv;
- struct cx2341x_mpeg_params *p = &itv->params;
int captype = 0, subtype = 0;
int enable_passthrough = 0;
@@ -472,7 +472,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
}
itv->mpg_data_received = itv->vbi_data_inserted = 0;
itv->dualwatch_jiffies = jiffies;
- itv->dualwatch_stereo_mode = p->audio_properties & 0x0300;
+ itv->dualwatch_stereo_mode = v4l2_ctrl_g_ctrl(itv->cxhdl.audio_mode);
itv->search_pack_header = 0;
break;
@@ -560,12 +560,12 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
itv->pgm_info_offset, itv->pgm_info_num);
/* Setup API for Stream */
- cx2341x_update(itv, ivtv_api_func, NULL, p);
+ cx2341x_handler_setup(&itv->cxhdl);
/* mute if capturing radio */
if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags))
ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
- 1 | (p->video_mute_yuv << 8));
+ 1 | (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
}
/* Vsync Setup */
@@ -581,6 +581,8 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
clear_bit(IVTV_F_I_EOS, &itv->i_flags);
+ cx2341x_handler_set_busy(&itv->cxhdl, 1);
+
/* Initialize Digitizer for Capture */
/* Avoid tinny audio problem - ensure audio clocks are going */
v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1);
@@ -617,7 +619,6 @@ static int ivtv_setup_v4l2_decode_stream(struct ivtv_stream *s)
{
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv *itv = s->itv;
- struct cx2341x_mpeg_params *p = &itv->params;
int datatype;
u16 width;
u16 height;
@@ -627,8 +628,8 @@ static int ivtv_setup_v4l2_decode_stream(struct ivtv_stream *s)
IVTV_DEBUG_INFO("Setting some initial decoder settings\n");
- width = p->width;
- height = p->height;
+ width = itv->cxhdl.width;
+ height = itv->cxhdl.height;
/* set audio mode to left/stereo for dual/stereo mode. */
ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
@@ -668,7 +669,7 @@ static int ivtv_setup_v4l2_decode_stream(struct ivtv_stream *s)
break;
}
if (ivtv_vapi(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, datatype,
- width, height, p->audio_properties)) {
+ width, height, itv->cxhdl.audio_properties)) {
IVTV_DEBUG_WARN("Couldn't initialize decoder source\n");
}
@@ -847,6 +848,8 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
return 0;
}
+ cx2341x_handler_set_busy(&itv->cxhdl, 0);
+
/* Set the following Interrupt mask bits for capture */
ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
del_timer(&itv->dma_timer);
@@ -967,7 +970,8 @@ int ivtv_passthrough_mode(struct ivtv *itv, int enable)
/* Setup capture if not already done */
if (atomic_read(&itv->capturing) == 0) {
- cx2341x_update(itv, ivtv_api_func, NULL, &itv->params);
+ cx2341x_handler_setup(&itv->cxhdl);
+ cx2341x_handler_set_busy(&itv->cxhdl, 1);
}
/* Start Passthrough Mode */
@@ -988,6 +992,8 @@ int ivtv_passthrough_mode(struct ivtv *itv, int enable)
clear_bit(IVTV_F_S_PASSTHROUGH, &dec_stream->s_flags);
clear_bit(IVTV_F_S_STREAMING, &dec_stream->s_flags);
itv->output_mode = OUT_NONE;
+ if (atomic_read(&itv->capturing) == 0)
+ cx2341x_handler_set_busy(&itv->cxhdl, 0);
return 0;
}
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index e9df3cb02cc..0e412131da7 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -283,51 +283,6 @@ void msp_set_scart(struct i2c_client *client, int in, int out)
msp_write_dem(client, 0x40, state->i2s_mode);
}
-void msp_set_audio(struct i2c_client *client)
-{
- struct msp_state *state = to_state(i2c_get_clientdata(client));
- int bal = 0, bass, treble, loudness;
- int val = 0;
- int reallymuted = state->muted | state->scan_in_progress;
-
- if (!reallymuted)
- val = (state->volume * 0x7f / 65535) << 8;
-
- v4l_dbg(1, msp_debug, client, "mute=%s scanning=%s volume=%d\n",
- state->muted ? "on" : "off",
- state->scan_in_progress ? "yes" : "no",
- state->volume);
-
- msp_write_dsp(client, 0x0000, val);
- msp_write_dsp(client, 0x0007, reallymuted ? 0x1 : (val | 0x1));
- if (state->has_scart2_out_volume)
- msp_write_dsp(client, 0x0040, reallymuted ? 0x1 : (val | 0x1));
- if (state->has_headphones)
- msp_write_dsp(client, 0x0006, val);
- if (!state->has_sound_processing)
- return;
-
- if (val)
- bal = (u8)((state->balance / 256) - 128);
- bass = ((state->bass - 32768) * 0x60 / 65535) << 8;
- treble = ((state->treble - 32768) * 0x60 / 65535) << 8;
- loudness = state->loudness ? ((5 * 4) << 8) : 0;
-
- v4l_dbg(1, msp_debug, client, "balance=%d bass=%d treble=%d loudness=%d\n",
- state->balance, state->bass, state->treble, state->loudness);
-
- msp_write_dsp(client, 0x0001, bal << 8);
- msp_write_dsp(client, 0x0002, bass);
- msp_write_dsp(client, 0x0003, treble);
- msp_write_dsp(client, 0x0004, loudness);
- if (!state->has_headphones)
- return;
- msp_write_dsp(client, 0x0030, bal << 8);
- msp_write_dsp(client, 0x0031, bass);
- msp_write_dsp(client, 0x0032, treble);
- msp_write_dsp(client, 0x0033, loudness);
-}
-
/* ------------------------------------------------------------------------ */
static void msp_wake_thread(struct i2c_client *client)
@@ -363,98 +318,73 @@ int msp_sleep(struct msp_state *state, int timeout)
/* ------------------------------------------------------------------------ */
-static int msp_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int msp_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct msp_state *state = to_state(sd);
+ struct msp_state *state = ctrl_to_state(ctrl);
+ struct i2c_client *client = v4l2_get_subdevdata(&state->sd);
+ int val = ctrl->val;
switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME:
- ctrl->value = state->volume;
- break;
-
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = state->muted;
- break;
-
- case V4L2_CID_AUDIO_BALANCE:
- if (!state->has_sound_processing)
- return -EINVAL;
- ctrl->value = state->balance;
- break;
-
- case V4L2_CID_AUDIO_BASS:
- if (!state->has_sound_processing)
- return -EINVAL;
- ctrl->value = state->bass;
+ case V4L2_CID_AUDIO_VOLUME: {
+ /* audio volume cluster */
+ int reallymuted = state->muted->val | state->scan_in_progress;
+
+ if (!reallymuted)
+ val = (val * 0x7f / 65535) << 8;
+
+ v4l_dbg(1, msp_debug, client, "mute=%s scanning=%s volume=%d\n",
+ state->muted->val ? "on" : "off",
+ state->scan_in_progress ? "yes" : "no",
+ state->volume->val);
+
+ msp_write_dsp(client, 0x0000, val);
+ msp_write_dsp(client, 0x0007, reallymuted ? 0x1 : (val | 0x1));
+ if (state->has_scart2_out_volume)
+ msp_write_dsp(client, 0x0040, reallymuted ? 0x1 : (val | 0x1));
+ if (state->has_headphones)
+ msp_write_dsp(client, 0x0006, val);
break;
-
- case V4L2_CID_AUDIO_TREBLE:
- if (!state->has_sound_processing)
- return -EINVAL;
- ctrl->value = state->treble;
- break;
-
- case V4L2_CID_AUDIO_LOUDNESS:
- if (!state->has_sound_processing)
- return -EINVAL;
- ctrl->value = state->loudness;
- break;
-
- default:
- return -EINVAL;
}
- return 0;
-}
-
-static int msp_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct msp_state *state = to_state(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME:
- state->volume = ctrl->value;
- if (state->volume == 0)
- state->balance = 32768;
- break;
-
- case V4L2_CID_AUDIO_MUTE:
- if (ctrl->value < 0 || ctrl->value >= 2)
- return -ERANGE;
- state->muted = ctrl->value;
- break;
case V4L2_CID_AUDIO_BASS:
- if (!state->has_sound_processing)
- return -EINVAL;
- state->bass = ctrl->value;
+ val = ((val - 32768) * 0x60 / 65535) << 8;
+ msp_write_dsp(client, 0x0002, val);
+ if (state->has_headphones)
+ msp_write_dsp(client, 0x0031, val);
break;
case V4L2_CID_AUDIO_TREBLE:
- if (!state->has_sound_processing)
- return -EINVAL;
- state->treble = ctrl->value;
+ val = ((val - 32768) * 0x60 / 65535) << 8;
+ msp_write_dsp(client, 0x0003, val);
+ if (state->has_headphones)
+ msp_write_dsp(client, 0x0032, val);
break;
case V4L2_CID_AUDIO_LOUDNESS:
- if (!state->has_sound_processing)
- return -EINVAL;
- state->loudness = ctrl->value;
+ val = val ? ((5 * 4) << 8) : 0;
+ msp_write_dsp(client, 0x0004, val);
+ if (state->has_headphones)
+ msp_write_dsp(client, 0x0033, val);
break;
case V4L2_CID_AUDIO_BALANCE:
- if (!state->has_sound_processing)
- return -EINVAL;
- state->balance = ctrl->value;
+ val = (u8)((val / 256) - 128);
+ msp_write_dsp(client, 0x0001, val << 8);
+ if (state->has_headphones)
+ msp_write_dsp(client, 0x0030, val << 8);
break;
default:
return -EINVAL;
}
- msp_set_audio(client);
return 0;
}
+void msp_update_volume(struct msp_state *state)
+{
+ v4l2_ctrl_s_ctrl(state->volume, v4l2_ctrl_g_ctrl(state->volume));
+}
+
/* --- v4l2 ioctls --- */
static int msp_s_radio(struct v4l2_subdev *sd)
{
@@ -472,7 +402,7 @@ static int msp_s_radio(struct v4l2_subdev *sd)
msp3400c_set_mode(client, MSP_MODE_FM_RADIO);
msp3400c_set_carrier(client, MSP_CARRIER(10.7),
MSP_CARRIER(10.7));
- msp_set_audio(client);
+ msp_update_volume(state);
break;
case OPMODE_AUTODETECT:
case OPMODE_AUTOSELECT:
@@ -592,33 +522,6 @@ static int msp_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
return 0;
}
-static int msp_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- struct msp_state *state = to_state(sd);
-
- switch (qc->id) {
- case V4L2_CID_AUDIO_VOLUME:
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 58880);
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
- default:
- break;
- }
- if (!state->has_sound_processing)
- return -EINVAL;
- switch (qc->id) {
- case V4L2_CID_AUDIO_LOUDNESS:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
static int msp_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
{
struct msp_state *state = to_state(sd);
@@ -633,19 +536,14 @@ static int msp_log_status(struct v4l2_subdev *sd)
struct msp_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
const char *p;
+ char prefix[V4L2_SUBDEV_NAME_SIZE + 20];
if (state->opmode == OPMODE_AUTOSELECT)
msp_detect_stereo(client);
v4l_info(client, "%s rev1 = 0x%04x rev2 = 0x%04x\n",
client->name, state->rev1, state->rev2);
- v4l_info(client, "Audio: volume %d%s\n",
- state->volume, state->muted ? " (muted)" : "");
- if (state->has_sound_processing) {
- v4l_info(client, "Audio: balance %d bass %d treble %d loudness %s\n",
- state->balance, state->bass,
- state->treble,
- state->loudness ? "on" : "off");
- }
+ snprintf(prefix, sizeof(prefix), "%s: Audio: ", sd->name);
+ v4l2_ctrl_handler_log_status(&state->hdl, prefix);
switch (state->mode) {
case MSP_MODE_AM_DETECT: p = "AM (for carrier detect)"; break;
case MSP_MODE_FM_RADIO: p = "FM Radio"; break;
@@ -695,12 +593,20 @@ static int msp_resume(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops msp_ctrl_ops = {
+ .s_ctrl = msp_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops msp_core_ops = {
.log_status = msp_log_status,
.g_chip_ident = msp_g_chip_ident,
- .g_ctrl = msp_g_ctrl,
- .s_ctrl = msp_s_ctrl,
- .queryctrl = msp_queryctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
.s_std = msp_s_std,
};
@@ -728,6 +634,7 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct msp_state *state;
struct v4l2_subdev *sd;
+ struct v4l2_ctrl_handler *hdl;
int (*thread_func)(void *data) = NULL;
int msp_hard;
int msp_family;
@@ -752,13 +659,7 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
state->v4l2_std = V4L2_STD_NTSC;
state->audmode = V4L2_TUNER_MODE_STEREO;
- state->volume = 58880; /* 0db gain */
- state->balance = 32768; /* 0db gain */
- state->bass = 32768;
- state->treble = 32768;
- state->loudness = 0;
state->input = -1;
- state->muted = 0;
state->i2s_mode = 0;
init_waitqueue_head(&state->wq);
/* These are the reset input/output positions */
@@ -777,8 +678,6 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -ENODEV;
}
- msp_set_audio(client);
-
msp_family = ((state->rev1 >> 4) & 0x0f) + 3;
msp_product = (state->rev2 >> 8) & 0xff;
msp_prod_hi = msp_product / 10;
@@ -849,6 +748,34 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
state->opmode = OPMODE_MANUAL;
}
+ hdl = &state->hdl;
+ v4l2_ctrl_handler_init(hdl, 6);
+ if (state->has_sound_processing) {
+ v4l2_ctrl_new_std(hdl, &msp_ctrl_ops,
+ V4L2_CID_AUDIO_BASS, 0, 65535, 65535 / 100, 32768);
+ v4l2_ctrl_new_std(hdl, &msp_ctrl_ops,
+ V4L2_CID_AUDIO_TREBLE, 0, 65535, 65535 / 100, 32768);
+ v4l2_ctrl_new_std(hdl, &msp_ctrl_ops,
+ V4L2_CID_AUDIO_LOUDNESS, 0, 1, 1, 0);
+ }
+ state->volume = v4l2_ctrl_new_std(hdl, &msp_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, 0, 65535, 65535 / 100, 58880);
+ v4l2_ctrl_new_std(hdl, &msp_ctrl_ops,
+ V4L2_CID_AUDIO_BALANCE, 0, 65535, 65535 / 100, 32768);
+ state->muted = v4l2_ctrl_new_std(hdl, &msp_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ sd->ctrl_handler = hdl;
+ if (hdl->error) {
+ int err = hdl->error;
+
+ v4l2_ctrl_handler_free(hdl);
+ kfree(state);
+ return err;
+ }
+
+ v4l2_ctrl_cluster(2, &state->volume);
+ v4l2_ctrl_handler_setup(hdl);
+
/* hello world :-) */
v4l_info(client, "MSP%d4%02d%c-%c%d found @ 0x%x (%s)\n",
msp_family, msp_product,
@@ -903,6 +830,7 @@ static int msp_remove(struct i2c_client *client)
}
msp_reset(client);
+ v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
return 0;
}
diff --git a/drivers/media/video/msp3400-driver.h b/drivers/media/video/msp3400-driver.h
index d6b3e6d0eef..32a478e532f 100644
--- a/drivers/media/video/msp3400-driver.h
+++ b/drivers/media/video/msp3400-driver.h
@@ -6,6 +6,7 @@
#include <media/msp3400.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
/* ---------------------------------------------------------------------- */
@@ -51,6 +52,7 @@ extern int msp_stereo_thresh;
struct msp_state {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
int rev1, rev2;
int ident;
u8 has_nicam;
@@ -87,9 +89,12 @@ struct msp_state {
int audmode;
int rxsubchans;
- int volume, muted;
- int balance, loudness;
- int bass, treble;
+ struct {
+ /* volume cluster */
+ struct v4l2_ctrl *volume;
+ struct v4l2_ctrl *muted;
+ };
+
int scan_in_progress;
/* thread */
@@ -104,6 +109,11 @@ static inline struct msp_state *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct msp_state, sd);
}
+static inline struct msp_state *ctrl_to_state(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct msp_state, hdl);
+}
+
/* msp3400-driver.c */
int msp_write_dem(struct i2c_client *client, int addr, int val);
int msp_write_dsp(struct i2c_client *client, int addr, int val);
@@ -111,7 +121,7 @@ int msp_read_dem(struct i2c_client *client, int addr);
int msp_read_dsp(struct i2c_client *client, int addr);
int msp_reset(struct i2c_client *client);
void msp_set_scart(struct i2c_client *client, int in, int out);
-void msp_set_audio(struct i2c_client *client);
+void msp_update_volume(struct msp_state *state);
int msp_sleep(struct msp_state *state, int timeout);
/* msp3400-kthreads.c */
diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c
index d5a69c5ee5e..b376fcdee65 100644
--- a/drivers/media/video/msp3400-kthreads.c
+++ b/drivers/media/video/msp3400-kthreads.c
@@ -496,13 +496,13 @@ restart:
v4l_dbg(1, msp_debug, client,
"thread: no carrier scan\n");
state->scan_in_progress = 0;
- msp_set_audio(client);
+ msp_update_volume(state);
continue;
}
/* mute audio */
state->scan_in_progress = 1;
- msp_set_audio(client);
+ msp_update_volume(state);
msp3400c_set_mode(client, MSP_MODE_AM_DETECT);
val1 = val2 = 0;
@@ -634,7 +634,7 @@ no_second:
/* unmute */
state->scan_in_progress = 0;
msp3400c_set_audmode(client);
- msp_set_audio(client);
+ msp_update_volume(state);
if (msp_debug)
msp3400c_print_mode(client);
@@ -679,13 +679,13 @@ restart:
v4l_dbg(1, msp_debug, client,
"thread: no carrier scan\n");
state->scan_in_progress = 0;
- msp_set_audio(client);
+ msp_update_volume(state);
continue;
}
/* mute audio */
state->scan_in_progress = 1;
- msp_set_audio(client);
+ msp_update_volume(state);
/* start autodetect. Note: autodetect is not supported for
NTSC-M and radio, hence we force the standard in those
@@ -797,7 +797,7 @@ restart:
/* unmute */
msp3400c_set_audmode(client);
state->scan_in_progress = 0;
- msp_set_audio(client);
+ msp_update_volume(state);
/* monitor tv audio mode, the first time don't wait
so long to get a quick stereo/bilingual result */
@@ -974,7 +974,7 @@ restart:
v4l_dbg(1, msp_debug, client,
"thread: no carrier scan\n");
state->scan_in_progress = 0;
- msp_set_audio(client);
+ msp_update_volume(state);
continue;
}
@@ -1020,7 +1020,7 @@ unmute:
}
/* unmute: dispatch sound to scart output, set scart volume */
- msp_set_audio(client);
+ msp_update_volume(state);
/* restore ACB */
if (msp_write_dsp(client, 0x13, state->acb))
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index 31cc3d04bcc..758a4db27d6 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -1,5 +1,5 @@
/*
- * Driver for MT9M111/MT9M112 CMOS Image Sensor from Micron
+ * Driver for MT9M111/MT9M112/MT9M131 CMOS Image Sensor from Micron/Aptina
*
* Copyright (C) 2008, Robert Jarzmik <robert.jarzmik@free.fr>
*
@@ -19,11 +19,14 @@
#include <media/soc_camera.h>
/*
- * mt9m111 and mt9m112 i2c address is 0x5d or 0x48 (depending on SAddr pin)
+ * MT9M111, MT9M112 and MT9M131:
+ * i2c address is 0x48 or 0x5d (depending on SADDR pin)
* The platform has to define i2c_board_info and call i2c_register_board_info()
*/
-/* mt9m111: Sensor register addresses */
+/*
+ * Sensor core register addresses (0x000..0x0ff)
+ */
#define MT9M111_CHIP_VERSION 0x000
#define MT9M111_ROW_START 0x001
#define MT9M111_COLUMN_START 0x002
@@ -72,8 +75,9 @@
#define MT9M111_CTXT_CTRL_LED_FLASH_EN (1 << 2)
#define MT9M111_CTXT_CTRL_VBLANK_SEL_B (1 << 1)
#define MT9M111_CTXT_CTRL_HBLANK_SEL_B (1 << 0)
+
/*
- * mt9m111: Colorpipe register addresses (0x100..0x1ff)
+ * Colorpipe register addresses (0x100..0x1ff)
*/
#define MT9M111_OPER_MODE_CTRL 0x106
#define MT9M111_OUTPUT_FORMAT_CTRL 0x108
@@ -109,8 +113,9 @@
#define MT9M111_OUTFMT_SWAP_YCbCr_C_Y (1 << 1)
#define MT9M111_OUTFMT_SWAP_RGB_EVEN (1 << 1)
#define MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr (1 << 0)
+
/*
- * mt9m111: Camera control register addresses (0x200..0x2ff not implemented)
+ * Camera control register addresses (0x200..0x2ff not implemented)
*/
#define reg_read(reg) mt9m111_reg_read(client, MT9M111_##reg)
@@ -160,7 +165,8 @@ enum mt9m111_context {
struct mt9m111 {
struct v4l2_subdev subdev;
- int model; /* V4L2_IDENT_MT9M11x* codes from v4l2-chip-ident.h */
+ int model; /* V4L2_IDENT_MT9M111 or V4L2_IDENT_MT9M112 code
+ * from v4l2-chip-ident.h */
enum mt9m111_context context;
struct v4l2_rect rect;
const struct mt9m111_datafmt *fmt;
@@ -934,7 +940,7 @@ static int mt9m111_init(struct i2c_client *client)
if (!ret)
ret = mt9m111_set_autoexposure(client, mt9m111->autoexposure);
if (ret)
- dev_err(&client->dev, "mt9m11x init failed: %d\n", ret);
+ dev_err(&client->dev, "mt9m111 init failed: %d\n", ret);
return ret;
}
@@ -963,27 +969,27 @@ static int mt9m111_video_probe(struct soc_camera_device *icd,
mt9m111->swap_rgb_even_odd = 1;
mt9m111->swap_rgb_red_blue = 1;
- ret = mt9m111_init(client);
- if (ret)
- goto ei2c;
-
data = reg_read(CHIP_VERSION);
switch (data) {
- case 0x143a: /* MT9M111 */
+ case 0x143a: /* MT9M111 or MT9M131 */
mt9m111->model = V4L2_IDENT_MT9M111;
+ dev_info(&client->dev,
+ "Detected a MT9M111/MT9M131 chip ID %x\n", data);
break;
case 0x148c: /* MT9M112 */
mt9m111->model = V4L2_IDENT_MT9M112;
+ dev_info(&client->dev, "Detected a MT9M112 chip ID %x\n", data);
break;
default:
ret = -ENODEV;
dev_err(&client->dev,
- "No MT9M11x chip detected, register read %x\n", data);
+ "No MT9M111/MT9M112/MT9M131 chip detected register read %x\n",
+ data);
goto ei2c;
}
- dev_info(&client->dev, "Detected a MT9M11x chip ID %x\n", data);
+ ret = mt9m111_init(client);
ei2c:
return ret;
@@ -1034,13 +1040,13 @@ static int mt9m111_probe(struct i2c_client *client,
int ret;
if (!icd) {
- dev_err(&client->dev, "MT9M11x: missing soc-camera data!\n");
+ dev_err(&client->dev, "mt9m111: soc-camera data missing!\n");
return -EINVAL;
}
icl = to_soc_camera_link(icd);
if (!icl) {
- dev_err(&client->dev, "MT9M11x driver needs platform data\n");
+ dev_err(&client->dev, "mt9m111: driver needs platform data\n");
return -EINVAL;
}
@@ -1114,6 +1120,6 @@ static void __exit mt9m111_mod_exit(void)
module_init(mt9m111_mod_init);
module_exit(mt9m111_mod_exit);
-MODULE_DESCRIPTION("Micron MT9M111/MT9M112 Camera driver");
+MODULE_DESCRIPTION("Micron/Aptina MT9M111/MT9M112/MT9M131 Camera driver");
MODULE_AUTHOR("Robert Jarzmik");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 026bef0ba40..66ff174151b 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -785,6 +785,8 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd,
if (ret < 0)
return ret;
+ if (common_flags & SOCAM_PCLK_SAMPLE_RISING)
+ csicr1 |= CSICR1_REDGE;
if (common_flags & SOCAM_PCLK_SAMPLE_FALLING)
csicr1 |= CSICR1_INV_PCLK;
if (common_flags & SOCAM_VSYNC_ACTIVE_HIGH)
@@ -1201,7 +1203,7 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
buf = list_entry(pcdev->capture.next,
struct mx2_buffer, vb.queue);
- buf->bufnum = bufnum;
+ buf->bufnum = !bufnum;
list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debugifc.c b/drivers/media/video/pvrusb2/pvrusb2-debugifc.c
index e9b11e119f6..4279ebb811a 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-debugifc.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-debugifc.c
@@ -94,8 +94,6 @@ static int debugifc_parse_unsigned_number(const char *buf,unsigned int count,
u32 *num_ptr)
{
u32 result = 0;
- u32 val;
- int ch;
int radix = 10;
if ((count >= 2) && (buf[0] == '0') &&
((buf[1] == 'x') || (buf[1] == 'X'))) {
@@ -107,17 +105,9 @@ static int debugifc_parse_unsigned_number(const char *buf,unsigned int count,
}
while (count--) {
- ch = *buf++;
- if ((ch >= '0') && (ch <= '9')) {
- val = ch - '0';
- } else if ((ch >= 'a') && (ch <= 'f')) {
- val = ch - 'a' + 10;
- } else if ((ch >= 'A') && (ch <= 'F')) {
- val = ch - 'A' + 10;
- } else {
+ int val = hex_to_bin(*buf++);
+ if (val < 0 || val >= radix)
return -EINVAL;
- }
- if (val >= radix) return -EINVAL;
result *= radix;
result += val;
}
diff --git a/drivers/media/video/s5p-fimc/Makefile b/drivers/media/video/s5p-fimc/Makefile
new file mode 100644
index 00000000000..0d9d54132ec
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/Makefile
@@ -0,0 +1,3 @@
+
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) := s5p-fimc.o
+s5p-fimc-y := fimc-core.o fimc-reg.o
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
new file mode 100644
index 00000000000..b151c7be8a5
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -0,0 +1,1586 @@
+/*
+ * S5P camera interface (video postprocessor) driver
+ *
+ * Copyright (c) 2010 Samsung Electronics
+ *
+ * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf-dma-contig.h>
+
+#include "fimc-core.h"
+
+static char *fimc_clock_name[NUM_FIMC_CLOCKS] = { "sclk_fimc", "fimc" };
+
+static struct fimc_fmt fimc_formats[] = {
+ {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .depth = 16,
+ .color = S5P_FIMC_RGB565,
+ .buff_cnt = 1,
+ .planes_cnt = 1
+ }, {
+ .name = "BGR666",
+ .fourcc = V4L2_PIX_FMT_BGR666,
+ .depth = 32,
+ .color = S5P_FIMC_RGB666,
+ .buff_cnt = 1,
+ .planes_cnt = 1
+ }, {
+ .name = "XRGB-8-8-8-8, 24 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .depth = 32,
+ .color = S5P_FIMC_RGB888,
+ .buff_cnt = 1,
+ .planes_cnt = 1
+ }, {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .color = S5P_FIMC_YCBYCR422,
+ .buff_cnt = 1,
+ .planes_cnt = 1
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ .color = S5P_FIMC_CBYCRY422,
+ .buff_cnt = 1,
+ .planes_cnt = 1
+ }, {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .depth = 16,
+ .color = S5P_FIMC_CRYCBY422,
+ .buff_cnt = 1,
+ .planes_cnt = 1
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = 16,
+ .color = S5P_FIMC_YCRYCB422,
+ .buff_cnt = 1,
+ .planes_cnt = 1
+ }, {
+ .name = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .depth = 12,
+ .color = S5P_FIMC_YCBCR422,
+ .buff_cnt = 1,
+ .planes_cnt = 3
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .depth = 16,
+ .color = S5P_FIMC_YCBCR422,
+ .buff_cnt = 1,
+ .planes_cnt = 2
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .depth = 16,
+ .color = S5P_FIMC_RGB565,
+ .buff_cnt = 1,
+ .planes_cnt = 2
+ }, {
+ .name = "YUV 4:2:0 planar, YCbCr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .color = S5P_FIMC_YCBCR420,
+ .buff_cnt = 1,
+ .planes_cnt = 3
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = 12,
+ .color = S5P_FIMC_YCBCR420,
+ .buff_cnt = 1,
+ .planes_cnt = 2
+ }
+ };
+
+static struct v4l2_queryctrl fimc_ctrls[] = {
+ {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Horizontal flip",
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Vertical flip",
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ROTATE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Rotation (CCW)",
+ .minimum = 0,
+ .maximum = 270,
+ .step = 90,
+ .default_value = 0,
+ },
+};
+
+
+static struct v4l2_queryctrl *get_ctrl(int id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fimc_ctrls); ++i)
+ if (id == fimc_ctrls[i].id)
+ return &fimc_ctrls[i];
+ return NULL;
+}
+
+static int fimc_check_scaler_ratio(struct v4l2_rect *r, struct fimc_frame *f)
+{
+ if (r->width > f->width) {
+ if (f->width > (r->width * SCALER_MAX_HRATIO))
+ return -EINVAL;
+ } else {
+ if ((f->width * SCALER_MAX_HRATIO) < r->width)
+ return -EINVAL;
+ }
+
+ if (r->height > f->height) {
+ if (f->height > (r->height * SCALER_MAX_VRATIO))
+ return -EINVAL;
+ } else {
+ if ((f->height * SCALER_MAX_VRATIO) < r->height)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
+{
+ if (src >= tar * 64) {
+ return -EINVAL;
+ } else if (src >= tar * 32) {
+ *ratio = 32;
+ *shift = 5;
+ } else if (src >= tar * 16) {
+ *ratio = 16;
+ *shift = 4;
+ } else if (src >= tar * 8) {
+ *ratio = 8;
+ *shift = 3;
+ } else if (src >= tar * 4) {
+ *ratio = 4;
+ *shift = 2;
+ } else if (src >= tar * 2) {
+ *ratio = 2;
+ *shift = 1;
+ } else {
+ *ratio = 1;
+ *shift = 0;
+ }
+
+ return 0;
+}
+
+static int fimc_set_scaler_info(struct fimc_ctx *ctx)
+{
+ struct fimc_scaler *sc = &ctx->scaler;
+ struct fimc_frame *s_frame = &ctx->s_frame;
+ struct fimc_frame *d_frame = &ctx->d_frame;
+ int tx, ty, sx, sy;
+ int ret;
+
+ tx = d_frame->width;
+ ty = d_frame->height;
+ if (tx <= 0 || ty <= 0) {
+ v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
+ "invalid target size: %d x %d", tx, ty);
+ return -EINVAL;
+ }
+
+ sx = s_frame->width;
+ sy = s_frame->height;
+ if (sx <= 0 || sy <= 0) {
+ err("invalid source size: %d x %d", sx, sy);
+ return -EINVAL;
+ }
+
+ sc->real_width = sx;
+ sc->real_height = sy;
+ dbg("sx= %d, sy= %d, tx= %d, ty= %d", sx, sy, tx, ty);
+
+ ret = fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor);
+ if (ret)
+ return ret;
+
+ ret = fimc_get_scaler_factor(sy, ty, &sc->pre_vratio, &sc->vfactor);
+ if (ret)
+ return ret;
+
+ sc->pre_dst_width = sx / sc->pre_hratio;
+ sc->pre_dst_height = sy / sc->pre_vratio;
+
+ sc->main_hratio = (sx << 8) / (tx << sc->hfactor);
+ sc->main_vratio = (sy << 8) / (ty << sc->vfactor);
+
+ sc->scaleup_h = (tx >= sx) ? 1 : 0;
+ sc->scaleup_v = (ty >= sy) ? 1 : 0;
+
+ /* check to see if input and output size/format differ */
+ if (s_frame->fmt->color == d_frame->fmt->color
+ && s_frame->width == d_frame->width
+ && s_frame->height == d_frame->height)
+ sc->copy_mode = 1;
+ else
+ sc->copy_mode = 0;
+
+ return 0;
+}
+
+
+static irqreturn_t fimc_isr(int irq, void *priv)
+{
+ struct fimc_vid_buffer *src_buf, *dst_buf;
+ struct fimc_dev *fimc = (struct fimc_dev *)priv;
+ struct fimc_ctx *ctx;
+
+ BUG_ON(!fimc);
+ fimc_hw_clear_irq(fimc);
+
+ spin_lock(&fimc->slock);
+
+ if (test_and_clear_bit(ST_M2M_PEND, &fimc->state)) {
+ ctx = v4l2_m2m_get_curr_priv(fimc->m2m.m2m_dev);
+ if (!ctx || !ctx->m2m_ctx)
+ goto isr_unlock;
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ if (src_buf && dst_buf) {
+ spin_lock(&fimc->irqlock);
+ src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
+ wake_up(&src_buf->vb.done);
+ wake_up(&dst_buf->vb.done);
+ spin_unlock(&fimc->irqlock);
+ v4l2_m2m_job_finish(fimc->m2m.m2m_dev, ctx->m2m_ctx);
+ }
+ }
+
+isr_unlock:
+ spin_unlock(&fimc->slock);
+ return IRQ_HANDLED;
+}
+
+/* The color format (planes_cnt, buff_cnt) must be already configured. */
+static int fimc_prepare_addr(struct fimc_ctx *ctx,
+ struct fimc_vid_buffer *buf, enum v4l2_buf_type type)
+{
+ struct fimc_frame *frame;
+ struct fimc_addr *paddr;
+ u32 pix_size;
+ int ret = 0;
+
+ frame = ctx_m2m_get_frame(ctx, type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+ paddr = &frame->paddr;
+
+ if (!buf)
+ return -EINVAL;
+
+ pix_size = frame->width * frame->height;
+
+ dbg("buff_cnt= %d, planes_cnt= %d, frame->size= %d, pix_size= %d",
+ frame->fmt->buff_cnt, frame->fmt->planes_cnt,
+ frame->size, pix_size);
+
+ if (frame->fmt->buff_cnt == 1) {
+ paddr->y = videobuf_to_dma_contig(&buf->vb);
+ switch (frame->fmt->planes_cnt) {
+ case 1:
+ paddr->cb = 0;
+ paddr->cr = 0;
+ break;
+ case 2:
+ /* decompose Y into Y/Cb */
+ paddr->cb = (u32)(paddr->y + pix_size);
+ paddr->cr = 0;
+ break;
+ case 3:
+ paddr->cb = (u32)(paddr->y + pix_size);
+ /* decompose Y into Y/Cb/Cr */
+ if (S5P_FIMC_YCBCR420 == frame->fmt->color)
+ paddr->cr = (u32)(paddr->cb
+ + (pix_size >> 2));
+ else /* 422 */
+ paddr->cr = (u32)(paddr->cb
+ + (pix_size >> 1));
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ dbg("PHYS_ADDR: type= %d, y= 0x%X cb= 0x%X cr= 0x%X ret= %d",
+ type, paddr->y, paddr->cb, paddr->cr, ret);
+
+ return ret;
+}
+
+/* Set order for 1 and 2 plane YCBCR 4:2:2 formats. */
+static void fimc_set_yuv_order(struct fimc_ctx *ctx)
+{
+ /* The one only mode supported in SoC. */
+ ctx->in_order_2p = S5P_FIMC_LSB_CRCB;
+ ctx->out_order_2p = S5P_FIMC_LSB_CRCB;
+
+ /* Set order for 1 plane input formats. */
+ switch (ctx->s_frame.fmt->color) {
+ case S5P_FIMC_YCRYCB422:
+ ctx->in_order_1p = S5P_FIMC_IN_YCRYCB;
+ break;
+ case S5P_FIMC_CBYCRY422:
+ ctx->in_order_1p = S5P_FIMC_IN_CBYCRY;
+ break;
+ case S5P_FIMC_CRYCBY422:
+ ctx->in_order_1p = S5P_FIMC_IN_CRYCBY;
+ break;
+ case S5P_FIMC_YCBYCR422:
+ default:
+ ctx->in_order_1p = S5P_FIMC_IN_YCBYCR;
+ break;
+ }
+ dbg("ctx->in_order_1p= %d", ctx->in_order_1p);
+
+ switch (ctx->d_frame.fmt->color) {
+ case S5P_FIMC_YCRYCB422:
+ ctx->out_order_1p = S5P_FIMC_OUT_YCRYCB;
+ break;
+ case S5P_FIMC_CBYCRY422:
+ ctx->out_order_1p = S5P_FIMC_OUT_CBYCRY;
+ break;
+ case S5P_FIMC_CRYCBY422:
+ ctx->out_order_1p = S5P_FIMC_OUT_CRYCBY;
+ break;
+ case S5P_FIMC_YCBYCR422:
+ default:
+ ctx->out_order_1p = S5P_FIMC_OUT_YCBYCR;
+ break;
+ }
+ dbg("ctx->out_order_1p= %d", ctx->out_order_1p);
+}
+
+/**
+ * fimc_prepare_config - check dimensions, operation and color mode
+ * and pre-calculate offset and the scaling coefficients.
+ *
+ * @ctx: hardware context information
+ * @flags: flags indicating which parameters to check/update
+ *
+ * Return: 0 if dimensions are valid or non zero otherwise.
+ */
+static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
+{
+ struct fimc_frame *s_frame, *d_frame;
+ struct fimc_vid_buffer *buf = NULL;
+ struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+ int ret = 0;
+
+ s_frame = &ctx->s_frame;
+ d_frame = &ctx->d_frame;
+
+ if (flags & FIMC_PARAMS) {
+ if ((ctx->out_path == FIMC_DMA) &&
+ (ctx->rotation == 90 || ctx->rotation == 270)) {
+ swap(d_frame->f_width, d_frame->f_height);
+ swap(d_frame->width, d_frame->height);
+ }
+
+ /* Prepare the output offset ratios for scaler. */
+ d_frame->dma_offset.y_h = d_frame->offs_h;
+ if (!variant->pix_hoff)
+ d_frame->dma_offset.y_h *= (d_frame->fmt->depth >> 3);
+
+ d_frame->dma_offset.y_v = d_frame->offs_v;
+
+ d_frame->dma_offset.cb_h = d_frame->offs_h;
+ d_frame->dma_offset.cb_v = d_frame->offs_v;
+
+ d_frame->dma_offset.cr_h = d_frame->offs_h;
+ d_frame->dma_offset.cr_v = d_frame->offs_v;
+
+ if (!variant->pix_hoff && d_frame->fmt->planes_cnt == 3) {
+ d_frame->dma_offset.cb_h >>= 1;
+ d_frame->dma_offset.cb_v >>= 1;
+ d_frame->dma_offset.cr_h >>= 1;
+ d_frame->dma_offset.cr_v >>= 1;
+ }
+
+ dbg("out offset: color= %d, y_h= %d, y_v= %d",
+ d_frame->fmt->color,
+ d_frame->dma_offset.y_h, d_frame->dma_offset.y_v);
+
+ /* Prepare the input offset ratios for scaler. */
+ s_frame->dma_offset.y_h = s_frame->offs_h;
+ if (!variant->pix_hoff)
+ s_frame->dma_offset.y_h *= (s_frame->fmt->depth >> 3);
+ s_frame->dma_offset.y_v = s_frame->offs_v;
+
+ s_frame->dma_offset.cb_h = s_frame->offs_h;
+ s_frame->dma_offset.cb_v = s_frame->offs_v;
+
+ s_frame->dma_offset.cr_h = s_frame->offs_h;
+ s_frame->dma_offset.cr_v = s_frame->offs_v;
+
+ if (!variant->pix_hoff && s_frame->fmt->planes_cnt == 3) {
+ s_frame->dma_offset.cb_h >>= 1;
+ s_frame->dma_offset.cb_v >>= 1;
+ s_frame->dma_offset.cr_h >>= 1;
+ s_frame->dma_offset.cr_v >>= 1;
+ }
+
+ dbg("in offset: color= %d, y_h= %d, y_v= %d",
+ s_frame->fmt->color, s_frame->dma_offset.y_h,
+ s_frame->dma_offset.y_v);
+
+ fimc_set_yuv_order(ctx);
+
+ /* Check against the scaler ratio. */
+ if (s_frame->height > (SCALER_MAX_VRATIO * d_frame->height) ||
+ s_frame->width > (SCALER_MAX_HRATIO * d_frame->width)) {
+ err("out of scaler range");
+ return -EINVAL;
+ }
+ }
+
+ /* Input DMA mode is not allowed when the scaler is disabled. */
+ ctx->scaler.enabled = 1;
+
+ if (flags & FIMC_SRC_ADDR) {
+ buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ ret = fimc_prepare_addr(ctx, buf,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (ret)
+ return ret;
+ }
+
+ if (flags & FIMC_DST_ADDR) {
+ buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ ret = fimc_prepare_addr(ctx, buf,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ }
+
+ return ret;
+}
+
+static void fimc_dma_run(void *priv)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc;
+ unsigned long flags;
+ u32 ret;
+
+ if (WARN(!ctx, "null hardware context"))
+ return;
+
+ fimc = ctx->fimc_dev;
+
+ spin_lock_irqsave(&ctx->slock, flags);
+ set_bit(ST_M2M_PEND, &fimc->state);
+
+ ctx->state |= (FIMC_SRC_ADDR | FIMC_DST_ADDR);
+ ret = fimc_prepare_config(ctx, ctx->state);
+ if (ret) {
+ err("general configuration error");
+ goto dma_unlock;
+ }
+
+ if (fimc->m2m.ctx != ctx)
+ ctx->state |= FIMC_PARAMS;
+
+ fimc_hw_set_input_addr(fimc, &ctx->s_frame.paddr);
+
+ if (ctx->state & FIMC_PARAMS) {
+ fimc_hw_set_input_path(ctx);
+ fimc_hw_set_in_dma(ctx);
+ if (fimc_set_scaler_info(ctx)) {
+ err("scaler configuration error");
+ goto dma_unlock;
+ }
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_scaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ }
+
+ fimc_hw_set_output_path(ctx);
+ if (ctx->state & (FIMC_DST_ADDR | FIMC_PARAMS))
+ fimc_hw_set_output_addr(fimc, &ctx->d_frame.paddr);
+
+ if (ctx->state & FIMC_PARAMS)
+ fimc_hw_set_out_dma(ctx);
+
+ if (ctx->scaler.enabled)
+ fimc_hw_start_scaler(fimc);
+ fimc_hw_en_capture(ctx);
+
+ ctx->state = 0;
+ fimc_hw_start_in_dma(fimc);
+
+ fimc->m2m.ctx = ctx;
+
+dma_unlock:
+ spin_unlock_irqrestore(&ctx->slock, flags);
+}
+
+static void fimc_job_abort(void *priv)
+{
+ /* Nothing done in job_abort. */
+}
+
+static void fimc_buf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int fimc_buf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct fimc_ctx *ctx = vq->priv_data;
+ struct fimc_frame *frame;
+
+ frame = ctx_m2m_get_frame(ctx, vq->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ *size = (frame->width * frame->height * frame->fmt->depth) >> 3;
+ if (0 == *count)
+ *count = 1;
+ return 0;
+}
+
+static int fimc_buf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb, enum v4l2_field field)
+{
+ struct fimc_ctx *ctx = vq->priv_data;
+ struct v4l2_device *v4l2_dev = &ctx->fimc_dev->m2m.v4l2_dev;
+ struct fimc_frame *frame;
+ int ret;
+
+ frame = ctx_m2m_get_frame(ctx, vq->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ if (vb->baddr) {
+ if (vb->bsize < frame->size) {
+ v4l2_err(v4l2_dev,
+ "User-provided buffer too small (%d < %d)\n",
+ vb->bsize, frame->size);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ } else if (vb->state != VIDEOBUF_NEEDS_INIT
+ && vb->bsize < frame->size) {
+ return -EINVAL;
+ }
+
+ vb->width = frame->width;
+ vb->height = frame->height;
+ vb->bytesperline = (frame->width * frame->fmt->depth) >> 3;
+ vb->size = frame->size;
+ vb->field = field;
+
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret) {
+ v4l2_err(v4l2_dev, "Iolock failed\n");
+ fimc_buf_release(vq, vb);
+ return ret;
+ }
+ }
+ vb->state = VIDEOBUF_PREPARED;
+
+ return 0;
+}
+
+static void fimc_buf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct fimc_ctx *ctx = vq->priv_data;
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vq, vb);
+}
+
+static struct videobuf_queue_ops fimc_qops = {
+ .buf_setup = fimc_buf_setup,
+ .buf_prepare = fimc_buf_prepare,
+ .buf_queue = fimc_buf_queue,
+ .buf_release = fimc_buf_release,
+};
+
+static int fimc_m2m_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
+ strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->version = KERNEL_VERSION(1, 0, 0);
+ cap->capabilities = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
+
+ return 0;
+}
+
+static int fimc_m2m_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct fimc_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(fimc_formats))
+ return -EINVAL;
+
+ fmt = &fimc_formats[f->index];
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int fimc_m2m_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_frame *frame;
+
+ frame = ctx_m2m_get_frame(ctx, f->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ f->fmt.pix.width = frame->width;
+ f->fmt.pix.height = frame->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = frame->fmt->fourcc;
+
+ return 0;
+}
+
+static struct fimc_fmt *find_format(struct v4l2_format *f)
+{
+ struct fimc_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
+ fmt = &fimc_formats[i];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ break;
+ }
+ if (i == ARRAY_SIZE(fimc_formats))
+ return NULL;
+
+ return fmt;
+}
+
+static int fimc_m2m_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct fimc_fmt *fmt;
+ u32 max_width, max_height, mod_x, mod_y;
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct samsung_fimc_variant *variant = fimc->variant;
+
+ fmt = find_format(f);
+ if (!fmt) {
+ v4l2_err(&fimc->m2m.v4l2_dev,
+ "Fourcc format (0x%X) invalid.\n", pix->pixelformat);
+ return -EINVAL;
+ }
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+ else if (V4L2_FIELD_NONE != pix->field)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ max_width = variant->scaler_dis_w;
+ max_height = variant->scaler_dis_w;
+ mod_x = variant->min_inp_pixsize;
+ mod_y = variant->min_inp_pixsize;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ max_width = variant->out_rot_dis_w;
+ max_height = variant->out_rot_dis_w;
+ mod_x = variant->min_out_pixsize;
+ mod_y = variant->min_out_pixsize;
+ } else {
+ err("Wrong stream type (%d)", f->type);
+ return -EINVAL;
+ }
+
+ dbg("max_w= %d, max_h= %d", max_width, max_height);
+
+ if (pix->height > max_height)
+ pix->height = max_height;
+ if (pix->width > max_width)
+ pix->width = max_width;
+
+ if (tiled_fmt(fmt)) {
+ mod_x = 64; /* 64x32 tile */
+ mod_y = 32;
+ }
+
+ dbg("mod_x= 0x%X, mod_y= 0x%X", mod_x, mod_y);
+
+ pix->width = (pix->width == 0) ? mod_x : ALIGN(pix->width, mod_x);
+ pix->height = (pix->height == 0) ? mod_y : ALIGN(pix->height, mod_y);
+
+ if (pix->bytesperline == 0 ||
+ pix->bytesperline * 8 / fmt->depth > pix->width)
+ pix->bytesperline = (pix->width * fmt->depth) >> 3;
+
+ if (pix->sizeimage == 0)
+ pix->sizeimage = pix->height * pix->bytesperline;
+
+ dbg("pix->bytesperline= %d, fmt->depth= %d",
+ pix->bytesperline, fmt->depth);
+
+ return 0;
+}
+
+
+static int fimc_m2m_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = priv;
+ struct v4l2_device *v4l2_dev = &ctx->fimc_dev->m2m.v4l2_dev;
+ struct videobuf_queue *src_vq, *dst_vq;
+ struct fimc_frame *frame;
+ struct v4l2_pix_format *pix;
+ unsigned long flags;
+ int ret = 0;
+
+ BUG_ON(!ctx);
+
+ ret = fimc_m2m_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ctx->fimc_dev->lock);
+
+ src_vq = v4l2_m2m_get_src_vq(ctx->m2m_ctx);
+ dst_vq = v4l2_m2m_get_dst_vq(ctx->m2m_ctx);
+
+ mutex_lock(&src_vq->vb_lock);
+ mutex_lock(&dst_vq->vb_lock);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (videobuf_queue_is_busy(src_vq)) {
+ v4l2_err(v4l2_dev, "%s queue busy\n", __func__);
+ ret = -EBUSY;
+ goto s_fmt_out;
+ }
+ frame = &ctx->s_frame;
+ spin_lock_irqsave(&ctx->slock, flags);
+ ctx->state |= FIMC_SRC_FMT;
+ spin_unlock_irqrestore(&ctx->slock, flags);
+
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (videobuf_queue_is_busy(dst_vq)) {
+ v4l2_err(v4l2_dev, "%s queue busy\n", __func__);
+ ret = -EBUSY;
+ goto s_fmt_out;
+ }
+ frame = &ctx->d_frame;
+ spin_lock_irqsave(&ctx->slock, flags);
+ ctx->state |= FIMC_DST_FMT;
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ } else {
+ v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
+ "Wrong buffer/video queue type (%d)\n", f->type);
+ return -EINVAL;
+ }
+
+ pix = &f->fmt.pix;
+ frame->fmt = find_format(f);
+ if (!frame->fmt) {
+ ret = -EINVAL;
+ goto s_fmt_out;
+ }
+
+ frame->f_width = pix->bytesperline * 8 / frame->fmt->depth;
+ frame->f_height = pix->sizeimage/pix->bytesperline;
+ frame->width = pix->width;
+ frame->height = pix->height;
+ frame->o_width = pix->width;
+ frame->o_height = pix->height;
+ frame->offs_h = 0;
+ frame->offs_v = 0;
+ frame->size = (pix->width * pix->height * frame->fmt->depth) >> 3;
+ src_vq->field = dst_vq->field = pix->field;
+ spin_lock_irqsave(&ctx->slock, flags);
+ ctx->state |= FIMC_PARAMS;
+ spin_unlock_irqrestore(&ctx->slock, flags);
+
+ dbg("f_width= %d, f_height= %d", frame->f_width, frame->f_height);
+
+s_fmt_out:
+ mutex_unlock(&dst_vq->vb_lock);
+ mutex_unlock(&src_vq->vb_lock);
+ mutex_unlock(&ctx->fimc_dev->lock);
+ return ret;
+}
+
+static int fimc_m2m_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct fimc_ctx *ctx = priv;
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int fimc_m2m_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = priv;
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int fimc_m2m_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = priv;
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int fimc_m2m_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = priv;
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int fimc_m2m_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_ctx *ctx = priv;
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int fimc_m2m_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_ctx *ctx = priv;
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+int fimc_m2m_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ struct v4l2_queryctrl *c;
+ c = get_ctrl(qc->id);
+ if (!c)
+ return -EINVAL;
+ *qc = *c;
+ return 0;
+}
+
+int fimc_m2m_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct fimc_ctx *ctx = priv;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctrl->value = (FLIP_X_AXIS & ctx->flip) ? 1 : 0;
+ break;
+ case V4L2_CID_VFLIP:
+ ctrl->value = (FLIP_Y_AXIS & ctx->flip) ? 1 : 0;
+ break;
+ case V4L2_CID_ROTATE:
+ ctrl->value = ctx->rotation;
+ break;
+ default:
+ v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev, "Invalid control\n");
+ return -EINVAL;
+ }
+ dbg("ctrl->value= %d", ctrl->value);
+ return 0;
+}
+
+static int check_ctrl_val(struct fimc_ctx *ctx,
+ struct v4l2_control *ctrl)
+{
+ struct v4l2_queryctrl *c;
+ c = get_ctrl(ctrl->id);
+ if (!c)
+ return -EINVAL;
+
+ if (ctrl->value < c->minimum || ctrl->value > c->maximum
+ || (c->step != 0 && ctrl->value % c->step != 0)) {
+ v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
+ "Invalid control value\n");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+int fimc_m2m_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct fimc_ctx *ctx = priv;
+ struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+ unsigned long flags;
+ int ret = 0;
+
+ ret = check_ctrl_val(ctx, ctrl);
+ if (ret)
+ return ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ if (ctx->rotation != 0)
+ return 0;
+ if (ctrl->value)
+ ctx->flip |= FLIP_X_AXIS;
+ else
+ ctx->flip &= ~FLIP_X_AXIS;
+ break;
+
+ case V4L2_CID_VFLIP:
+ if (ctx->rotation != 0)
+ return 0;
+ if (ctrl->value)
+ ctx->flip |= FLIP_Y_AXIS;
+ else
+ ctx->flip &= ~FLIP_Y_AXIS;
+ break;
+
+ case V4L2_CID_ROTATE:
+ if (ctrl->value == 90 || ctrl->value == 270) {
+ if (ctx->out_path == FIMC_LCDFIFO &&
+ !variant->has_inp_rot) {
+ return -EINVAL;
+ } else if (ctx->in_path == FIMC_DMA &&
+ !variant->has_out_rot) {
+ return -EINVAL;
+ }
+ }
+ ctx->rotation = ctrl->value;
+ if (ctrl->value == 180)
+ ctx->flip = FLIP_XY_AXIS;
+ break;
+
+ default:
+ v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev, "Invalid control\n");
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&ctx->slock, flags);
+ ctx->state |= FIMC_PARAMS;
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ return 0;
+}
+
+
+static int fimc_m2m_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cr)
+{
+ struct fimc_frame *frame;
+ struct fimc_ctx *ctx = fh;
+
+ frame = ctx_m2m_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->bounds.left = 0;
+ cr->bounds.top = 0;
+ cr->bounds.width = frame->f_width;
+ cr->bounds.height = frame->f_height;
+ cr->defrect.left = frame->offs_h;
+ cr->defrect.top = frame->offs_v;
+ cr->defrect.width = frame->o_width;
+ cr->defrect.height = frame->o_height;
+ return 0;
+}
+
+static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+{
+ struct fimc_frame *frame;
+ struct fimc_ctx *ctx = file->private_data;
+
+ frame = ctx_m2m_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->c.left = frame->offs_h;
+ cr->c.top = frame->offs_v;
+ cr->c.width = frame->width;
+ cr->c.height = frame->height;
+
+ return 0;
+}
+
+static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+{
+ struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ unsigned long flags;
+ struct fimc_frame *f;
+ u32 min_size;
+ int ret = 0;
+
+ if (cr->c.top < 0 || cr->c.left < 0) {
+ v4l2_err(&fimc->m2m.v4l2_dev,
+ "doesn't support negative values for top & left\n");
+ return -EINVAL;
+ }
+
+ if (cr->c.width <= 0 || cr->c.height <= 0) {
+ v4l2_err(&fimc->m2m.v4l2_dev,
+ "crop width and height must be greater than 0\n");
+ return -EINVAL;
+ }
+
+ f = ctx_m2m_get_frame(ctx, cr->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ /* Adjust to required pixel boundary. */
+ min_size = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
+ fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
+
+ cr->c.width = round_down(cr->c.width, min_size);
+ cr->c.height = round_down(cr->c.height, min_size);
+ cr->c.left = round_down(cr->c.left + 1, min_size);
+ cr->c.top = round_down(cr->c.top + 1, min_size);
+
+ if ((cr->c.left + cr->c.width > f->o_width)
+ || (cr->c.top + cr->c.height > f->o_height)) {
+ v4l2_err(&fimc->m2m.v4l2_dev, "Error in S_CROP params\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ctx->slock, flags);
+ if ((ctx->state & FIMC_SRC_FMT) && (ctx->state & FIMC_DST_FMT)) {
+ /* Check for the pixel scaling ratio when cropping input img. */
+ if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ ret = fimc_check_scaler_ratio(&cr->c, &ctx->d_frame);
+ else if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ ret = fimc_check_scaler_ratio(&cr->c, &ctx->s_frame);
+
+ if (ret) {
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ v4l2_err(&fimc->m2m.v4l2_dev, "Out of scaler range");
+ return -EINVAL;
+ }
+ }
+ ctx->state |= FIMC_PARAMS;
+ spin_unlock_irqrestore(&ctx->slock, flags);
+
+ f->offs_h = cr->c.left;
+ f->offs_v = cr->c.top;
+ f->width = cr->c.width;
+ f->height = cr->c.height;
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
+ .vidioc_querycap = fimc_m2m_querycap,
+
+ .vidioc_enum_fmt_vid_cap = fimc_m2m_enum_fmt,
+ .vidioc_enum_fmt_vid_out = fimc_m2m_enum_fmt,
+
+ .vidioc_g_fmt_vid_cap = fimc_m2m_g_fmt,
+ .vidioc_g_fmt_vid_out = fimc_m2m_g_fmt,
+
+ .vidioc_try_fmt_vid_cap = fimc_m2m_try_fmt,
+ .vidioc_try_fmt_vid_out = fimc_m2m_try_fmt,
+
+ .vidioc_s_fmt_vid_cap = fimc_m2m_s_fmt,
+ .vidioc_s_fmt_vid_out = fimc_m2m_s_fmt,
+
+ .vidioc_reqbufs = fimc_m2m_reqbufs,
+ .vidioc_querybuf = fimc_m2m_querybuf,
+
+ .vidioc_qbuf = fimc_m2m_qbuf,
+ .vidioc_dqbuf = fimc_m2m_dqbuf,
+
+ .vidioc_streamon = fimc_m2m_streamon,
+ .vidioc_streamoff = fimc_m2m_streamoff,
+
+ .vidioc_queryctrl = fimc_m2m_queryctrl,
+ .vidioc_g_ctrl = fimc_m2m_g_ctrl,
+ .vidioc_s_ctrl = fimc_m2m_s_ctrl,
+
+ .vidioc_g_crop = fimc_m2m_g_crop,
+ .vidioc_s_crop = fimc_m2m_s_crop,
+ .vidioc_cropcap = fimc_m2m_cropcap
+
+};
+
+static void queue_init(void *priv, struct videobuf_queue *vq,
+ enum v4l2_buf_type type)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ videobuf_queue_dma_contig_init(vq, &fimc_qops,
+ fimc->m2m.v4l2_dev.dev,
+ &fimc->irqlock, type, V4L2_FIELD_NONE,
+ sizeof(struct fimc_vid_buffer), priv);
+}
+
+static int fimc_m2m_open(struct file *file)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_ctx *ctx = NULL;
+ int err = 0;
+
+ mutex_lock(&fimc->lock);
+ fimc->m2m.refcnt++;
+ set_bit(ST_OUTDMA_RUN, &fimc->state);
+ mutex_unlock(&fimc->lock);
+
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->private_data = ctx;
+ ctx->fimc_dev = fimc;
+ /* default format */
+ ctx->s_frame.fmt = &fimc_formats[0];
+ ctx->d_frame.fmt = &fimc_formats[0];
+ /* per user process device context initialization */
+ ctx->state = 0;
+ ctx->flags = 0;
+ ctx->effect.type = S5P_FIMC_EFFECT_ORIGINAL;
+ ctx->in_path = FIMC_DMA;
+ ctx->out_path = FIMC_DMA;
+ spin_lock_init(&ctx->slock);
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(ctx, fimc->m2m.m2m_dev, queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ err = PTR_ERR(ctx->m2m_ctx);
+ kfree(ctx);
+ }
+ return err;
+}
+
+static int fimc_m2m_release(struct file *file)
+{
+ struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ kfree(ctx);
+ mutex_lock(&fimc->lock);
+ if (--fimc->m2m.refcnt <= 0)
+ clear_bit(ST_OUTDMA_RUN, &fimc->state);
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static unsigned int fimc_m2m_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct fimc_ctx *ctx = file->private_data;
+ return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+}
+
+
+static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct fimc_ctx *ctx = file->private_data;
+ return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations fimc_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_m2m_open,
+ .release = fimc_m2m_release,
+ .poll = fimc_m2m_poll,
+ .ioctl = video_ioctl2,
+ .mmap = fimc_m2m_mmap,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = fimc_dma_run,
+ .job_abort = fimc_job_abort,
+};
+
+
+static int fimc_register_m2m_device(struct fimc_dev *fimc)
+{
+ struct video_device *vfd;
+ struct platform_device *pdev;
+ struct v4l2_device *v4l2_dev;
+ int ret = 0;
+
+ if (!fimc)
+ return -ENODEV;
+
+ pdev = fimc->pdev;
+ v4l2_dev = &fimc->m2m.v4l2_dev;
+
+ /* set name if it is empty */
+ if (!v4l2_dev->name[0])
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
+ "%s.m2m", dev_name(&pdev->dev));
+
+ ret = v4l2_device_register(&pdev->dev, v4l2_dev);
+ if (ret)
+ return ret;;
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(v4l2_dev, "Failed to allocate video device\n");
+ goto err_m2m_r1;
+ }
+
+ vfd->fops = &fimc_m2m_fops;
+ vfd->ioctl_ops = &fimc_m2m_ioctl_ops;
+ vfd->minor = -1;
+ vfd->release = video_device_release;
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s:m2m", dev_name(&pdev->dev));
+
+ video_set_drvdata(vfd, fimc);
+ platform_set_drvdata(pdev, fimc);
+
+ fimc->m2m.vfd = vfd;
+ fimc->m2m.m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(fimc->m2m.m2m_dev)) {
+ v4l2_err(v4l2_dev, "failed to initialize v4l2-m2m device\n");
+ ret = PTR_ERR(fimc->m2m.m2m_dev);
+ goto err_m2m_r2;
+ }
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(v4l2_dev,
+ "%s(): failed to register video device\n", __func__);
+ goto err_m2m_r3;
+ }
+ v4l2_info(v4l2_dev,
+ "FIMC m2m driver registered as /dev/video%d\n", vfd->num);
+
+ return 0;
+
+err_m2m_r3:
+ v4l2_m2m_release(fimc->m2m.m2m_dev);
+err_m2m_r2:
+ video_device_release(fimc->m2m.vfd);
+err_m2m_r1:
+ v4l2_device_unregister(v4l2_dev);
+
+ return ret;
+}
+
+static void fimc_unregister_m2m_device(struct fimc_dev *fimc)
+{
+ if (fimc) {
+ v4l2_m2m_release(fimc->m2m.m2m_dev);
+ video_unregister_device(fimc->m2m.vfd);
+ video_device_release(fimc->m2m.vfd);
+ v4l2_device_unregister(&fimc->m2m.v4l2_dev);
+ }
+}
+
+static void fimc_clk_release(struct fimc_dev *fimc)
+{
+ int i;
+ for (i = 0; i < NUM_FIMC_CLOCKS; i++) {
+ if (fimc->clock[i]) {
+ clk_disable(fimc->clock[i]);
+ clk_put(fimc->clock[i]);
+ }
+ }
+}
+
+static int fimc_clk_get(struct fimc_dev *fimc)
+{
+ int i;
+ for (i = 0; i < NUM_FIMC_CLOCKS; i++) {
+ fimc->clock[i] = clk_get(&fimc->pdev->dev, fimc_clock_name[i]);
+ if (IS_ERR(fimc->clock[i])) {
+ dev_err(&fimc->pdev->dev,
+ "failed to get fimc clock: %s\n",
+ fimc_clock_name[i]);
+ return -ENXIO;
+ }
+ clk_enable(fimc->clock[i]);
+ }
+ return 0;
+}
+
+static int fimc_probe(struct platform_device *pdev)
+{
+ struct fimc_dev *fimc;
+ struct resource *res;
+ struct samsung_fimc_driverdata *drv_data;
+ int ret = 0;
+
+ dev_dbg(&pdev->dev, "%s():\n", __func__);
+
+ drv_data = (struct samsung_fimc_driverdata *)
+ platform_get_device_id(pdev)->driver_data;
+
+ if (pdev->id >= drv_data->devs_cnt) {
+ dev_err(&pdev->dev, "Invalid platform device id: %d\n",
+ pdev->id);
+ return -EINVAL;
+ }
+
+ fimc = kzalloc(sizeof(struct fimc_dev), GFP_KERNEL);
+ if (!fimc)
+ return -ENOMEM;
+
+ fimc->id = pdev->id;
+ fimc->variant = drv_data->variant[fimc->id];
+ fimc->pdev = pdev;
+ fimc->state = ST_IDLE;
+
+ spin_lock_init(&fimc->irqlock);
+ spin_lock_init(&fimc->slock);
+
+ mutex_init(&fimc->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to find the registers\n");
+ ret = -ENOENT;
+ goto err_info;
+ }
+
+ fimc->regs_res = request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev));
+ if (!fimc->regs_res) {
+ dev_err(&pdev->dev, "failed to obtain register region\n");
+ ret = -ENOENT;
+ goto err_info;
+ }
+
+ fimc->regs = ioremap(res->start, resource_size(res));
+ if (!fimc->regs) {
+ dev_err(&pdev->dev, "failed to map registers\n");
+ ret = -ENXIO;
+ goto err_req_region;
+ }
+
+ ret = fimc_clk_get(fimc);
+ if (ret)
+ goto err_regs_unmap;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get IRQ resource\n");
+ ret = -ENXIO;
+ goto err_clk;
+ }
+ fimc->irq = res->start;
+
+ fimc_hw_reset(fimc);
+
+ ret = request_irq(fimc->irq, fimc_isr, 0, pdev->name, fimc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to install irq (%d)\n", ret);
+ goto err_clk;
+ }
+
+ fimc->work_queue = create_workqueue(dev_name(&fimc->pdev->dev));
+ if (!fimc->work_queue)
+ goto err_irq;
+
+ ret = fimc_register_m2m_device(fimc);
+ if (ret)
+ goto err_wq;
+
+ fimc_hw_en_lastirq(fimc, true);
+
+ dev_dbg(&pdev->dev, "%s(): fimc-%d registered successfully\n",
+ __func__, fimc->id);
+
+ return 0;
+
+err_wq:
+ destroy_workqueue(fimc->work_queue);
+err_irq:
+ free_irq(fimc->irq, fimc);
+err_clk:
+ fimc_clk_release(fimc);
+err_regs_unmap:
+ iounmap(fimc->regs);
+err_req_region:
+ release_resource(fimc->regs_res);
+ kfree(fimc->regs_res);
+err_info:
+ kfree(fimc);
+ dev_err(&pdev->dev, "failed to install\n");
+ return ret;
+}
+
+static int __devexit fimc_remove(struct platform_device *pdev)
+{
+ struct fimc_dev *fimc =
+ (struct fimc_dev *)platform_get_drvdata(pdev);
+
+ v4l2_info(&fimc->m2m.v4l2_dev, "Removing %s\n", pdev->name);
+
+ free_irq(fimc->irq, fimc);
+
+ fimc_hw_reset(fimc);
+
+ fimc_unregister_m2m_device(fimc);
+ fimc_clk_release(fimc);
+ iounmap(fimc->regs);
+ release_resource(fimc->regs_res);
+ kfree(fimc->regs_res);
+ kfree(fimc);
+ return 0;
+}
+
+static struct samsung_fimc_variant fimc01_variant_s5p = {
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+
+ .scaler_en_w = 3264,
+ .scaler_dis_w = 8192,
+ .in_rot_en_h = 1920,
+ .in_rot_dis_w = 8192,
+ .out_rot_en_w = 1920,
+ .out_rot_dis_w = 4224,
+};
+
+static struct samsung_fimc_variant fimc2_variant_s5p = {
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+
+ .scaler_en_w = 4224,
+ .scaler_dis_w = 8192,
+ .in_rot_en_h = 1920,
+ .in_rot_dis_w = 8192,
+ .out_rot_en_w = 1920,
+ .out_rot_dis_w = 4224,
+};
+
+static struct samsung_fimc_variant fimc01_variant_s5pv210 = {
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 32,
+
+ .scaler_en_w = 4224,
+ .scaler_dis_w = 8192,
+ .in_rot_en_h = 1920,
+ .in_rot_dis_w = 8192,
+ .out_rot_en_w = 1920,
+ .out_rot_dis_w = 4224,
+};
+
+static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 32,
+
+ .scaler_en_w = 1920,
+ .scaler_dis_w = 8192,
+ .in_rot_en_h = 1280,
+ .in_rot_dis_w = 8192,
+ .out_rot_en_w = 1280,
+ .out_rot_dis_w = 1920,
+};
+
+static struct samsung_fimc_driverdata fimc_drvdata_s5p = {
+ .variant = {
+ [0] = &fimc01_variant_s5p,
+ [1] = &fimc01_variant_s5p,
+ [2] = &fimc2_variant_s5p,
+ },
+ .devs_cnt = 3
+};
+
+static struct samsung_fimc_driverdata fimc_drvdata_s5pv210 = {
+ .variant = {
+ [0] = &fimc01_variant_s5pv210,
+ [1] = &fimc01_variant_s5pv210,
+ [2] = &fimc2_variant_s5pv210,
+ },
+ .devs_cnt = 3
+};
+
+static struct platform_device_id fimc_driver_ids[] = {
+ {
+ .name = "s5p-fimc",
+ .driver_data = (unsigned long)&fimc_drvdata_s5p,
+ }, {
+ .name = "s5pv210-fimc",
+ .driver_data = (unsigned long)&fimc_drvdata_s5pv210,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static struct platform_driver fimc_driver = {
+ .probe = fimc_probe,
+ .remove = __devexit_p(fimc_remove),
+ .id_table = fimc_driver_ids,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+static char banner[] __initdata = KERN_INFO
+ "S5PC Camera Interface V4L2 Driver, (c) 2010 Samsung Electronics\n";
+
+static int __init fimc_init(void)
+{
+ u32 ret;
+ printk(banner);
+
+ ret = platform_driver_register(&fimc_driver);
+ if (ret) {
+ printk(KERN_ERR "FIMC platform driver register failed\n");
+ return -1;
+ }
+ return 0;
+}
+
+static void __exit fimc_exit(void)
+{
+ platform_driver_unregister(&fimc_driver);
+}
+
+module_init(fimc_init);
+module_exit(fimc_exit);
+
+MODULE_AUTHOR("Sylwester Nawrocki, s.nawrocki@samsung.com");
+MODULE_DESCRIPTION("S3C/S5P FIMC (video postprocessor) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
new file mode 100644
index 00000000000..6b3e0cd73cd
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics
+ *
+ * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_CORE_H_
+#define FIMC_CORE_H_
+
+#include <linux/types.h>
+#include <media/videobuf-core.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <linux/videodev2.h>
+#include "regs-fimc.h"
+
+#define err(fmt, args...) \
+ printk(KERN_ERR "%s:%d: " fmt "\n", __func__, __LINE__, ##args)
+
+#ifdef DEBUG
+#define dbg(fmt, args...) \
+ printk(KERN_DEBUG "%s:%d: " fmt "\n", __func__, __LINE__, ##args)
+#else
+#define dbg(fmt, args...)
+#endif
+
+#define NUM_FIMC_CLOCKS 2
+#define MODULE_NAME "s5p-fimc"
+#define FIMC_MAX_DEVS 3
+#define FIMC_MAX_OUT_BUFS 4
+#define SCALER_MAX_HRATIO 64
+#define SCALER_MAX_VRATIO 64
+
+enum {
+ ST_IDLE,
+ ST_OUTDMA_RUN,
+ ST_M2M_PEND,
+};
+
+#define fimc_m2m_active(dev) test_bit(ST_OUTDMA_RUN, &(dev)->state)
+#define fimc_m2m_pending(dev) test_bit(ST_M2M_PEND, &(dev)->state)
+
+enum fimc_datapath {
+ FIMC_ITU_CAM_A,
+ FIMC_ITU_CAM_B,
+ FIMC_MIPI_CAM,
+ FIMC_DMA,
+ FIMC_LCDFIFO,
+ FIMC_WRITEBACK
+};
+
+enum fimc_color_fmt {
+ S5P_FIMC_RGB565,
+ S5P_FIMC_RGB666,
+ S5P_FIMC_RGB888,
+ S5P_FIMC_YCBCR420,
+ S5P_FIMC_YCBCR422,
+ S5P_FIMC_YCBYCR422,
+ S5P_FIMC_YCRYCB422,
+ S5P_FIMC_CBYCRY422,
+ S5P_FIMC_CRYCBY422,
+ S5P_FIMC_RGB30_LOCAL,
+ S5P_FIMC_YCBCR444_LOCAL,
+ S5P_FIMC_MAX_COLOR = S5P_FIMC_YCBCR444_LOCAL,
+ S5P_FIMC_COLOR_MASK = 0x0F,
+};
+
+/* Y/Cb/Cr components order at DMA output for 1 plane YCbCr 4:2:2 formats. */
+#define S5P_FIMC_OUT_CRYCBY S5P_CIOCTRL_ORDER422_CRYCBY
+#define S5P_FIMC_OUT_CBYCRY S5P_CIOCTRL_ORDER422_YCRYCB
+#define S5P_FIMC_OUT_YCRYCB S5P_CIOCTRL_ORDER422_CBYCRY
+#define S5P_FIMC_OUT_YCBYCR S5P_CIOCTRL_ORDER422_YCBYCR
+
+/* Input Y/Cb/Cr components order for 1 plane YCbCr 4:2:2 color formats. */
+#define S5P_FIMC_IN_CRYCBY S5P_MSCTRL_ORDER422_CRYCBY
+#define S5P_FIMC_IN_CBYCRY S5P_MSCTRL_ORDER422_YCRYCB
+#define S5P_FIMC_IN_YCRYCB S5P_MSCTRL_ORDER422_CBYCRY
+#define S5P_FIMC_IN_YCBYCR S5P_MSCTRL_ORDER422_YCBYCR
+
+/* Cb/Cr chrominance components order for 2 plane Y/CbCr 4:2:2 formats. */
+#define S5P_FIMC_LSB_CRCB S5P_CIOCTRL_ORDER422_2P_LSB_CRCB
+
+/* The embedded image effect selection */
+#define S5P_FIMC_EFFECT_ORIGINAL S5P_CIIMGEFF_FIN_BYPASS
+#define S5P_FIMC_EFFECT_ARBITRARY S5P_CIIMGEFF_FIN_ARBITRARY
+#define S5P_FIMC_EFFECT_NEGATIVE S5P_CIIMGEFF_FIN_NEGATIVE
+#define S5P_FIMC_EFFECT_ARTFREEZE S5P_CIIMGEFF_FIN_ARTFREEZE
+#define S5P_FIMC_EFFECT_EMBOSSING S5P_CIIMGEFF_FIN_EMBOSSING
+#define S5P_FIMC_EFFECT_SIKHOUETTE S5P_CIIMGEFF_FIN_SILHOUETTE
+
+/* The hardware context state. */
+#define FIMC_PARAMS (1 << 0)
+#define FIMC_SRC_ADDR (1 << 1)
+#define FIMC_DST_ADDR (1 << 2)
+#define FIMC_SRC_FMT (1 << 3)
+#define FIMC_DST_FMT (1 << 4)
+
+/* Image conversion flags */
+#define FIMC_IN_DMA_ACCESS_TILED (1 << 0)
+#define FIMC_IN_DMA_ACCESS_LINEAR (0 << 0)
+#define FIMC_OUT_DMA_ACCESS_TILED (1 << 1)
+#define FIMC_OUT_DMA_ACCESS_LINEAR (0 << 1)
+#define FIMC_SCAN_MODE_PROGRESSIVE (0 << 2)
+#define FIMC_SCAN_MODE_INTERLACED (1 << 2)
+/* YCbCr data dynamic range for RGB-YUV color conversion. Y/Cb/Cr: (0 ~ 255) */
+#define FIMC_COLOR_RANGE_WIDE (0 << 3)
+/* Y (16 ~ 235), Cb/Cr (16 ~ 240) */
+#define FIMC_COLOR_RANGE_NARROW (1 << 3)
+
+#define FLIP_NONE 0
+#define FLIP_X_AXIS 1
+#define FLIP_Y_AXIS 2
+#define FLIP_XY_AXIS (FLIP_X_AXIS | FLIP_Y_AXIS)
+
+/**
+ * struct fimc_fmt - the driver's internal color format data
+ * @name: format description
+ * @fourcc: the fourcc code for this format
+ * @color: the corresponding fimc_color_fmt
+ * @depth: number of bits per pixel
+ * @buff_cnt: number of physically non-contiguous data planes
+ * @planes_cnt: number of physically contiguous data planes
+ */
+struct fimc_fmt {
+ char *name;
+ u32 fourcc;
+ u32 color;
+ u32 depth;
+ u16 buff_cnt;
+ u16 planes_cnt;
+};
+
+/**
+ * struct fimc_dma_offset - pixel offset information for DMA
+ * @y_h: y value horizontal offset
+ * @y_v: y value vertical offset
+ * @cb_h: cb value horizontal offset
+ * @cb_v: cb value vertical offset
+ * @cr_h: cr value horizontal offset
+ * @cr_v: cr value vertical offset
+ */
+struct fimc_dma_offset {
+ int y_h;
+ int y_v;
+ int cb_h;
+ int cb_v;
+ int cr_h;
+ int cr_v;
+};
+
+/**
+ * struct fimc_effect - the configuration data for the "Arbitrary" image effect
+ * @type: effect type
+ * @pat_cb: cr value when type is "arbitrary"
+ * @pat_cr: cr value when type is "arbitrary"
+ */
+struct fimc_effect {
+ u32 type;
+ u8 pat_cb;
+ u8 pat_cr;
+};
+
+/**
+ * struct fimc_scaler - the configuration data for FIMC inetrnal scaler
+ *
+ * @enabled: the flag set when the scaler is used
+ * @hfactor: horizontal shift factor
+ * @vfactor: vertical shift factor
+ * @pre_hratio: horizontal ratio of the prescaler
+ * @pre_vratio: vertical ratio of the prescaler
+ * @pre_dst_width: the prescaler's destination width
+ * @pre_dst_height: the prescaler's destination height
+ * @scaleup_h: flag indicating scaling up horizontally
+ * @scaleup_v: flag indicating scaling up vertically
+ * @main_hratio: the main scaler's horizontal ratio
+ * @main_vratio: the main scaler's vertical ratio
+ * @real_width: source width - offset
+ * @real_height: source height - offset
+ * @copy_mode: flag set if one-to-one mode is used, i.e. no scaling
+ * and color format conversion
+ */
+struct fimc_scaler {
+ u32 enabled;
+ u32 hfactor;
+ u32 vfactor;
+ u32 pre_hratio;
+ u32 pre_vratio;
+ u32 pre_dst_width;
+ u32 pre_dst_height;
+ u32 scaleup_h;
+ u32 scaleup_v;
+ u32 main_hratio;
+ u32 main_vratio;
+ u32 real_width;
+ u32 real_height;
+ u32 copy_mode;
+};
+
+/**
+ * struct fimc_addr - the FIMC physical address set for DMA
+ *
+ * @y: luminance plane physical address
+ * @cb: Cb plane physical address
+ * @cr: Cr plane physical address
+ */
+struct fimc_addr {
+ u32 y;
+ u32 cb;
+ u32 cr;
+};
+
+/**
+ * struct fimc_vid_buffer - the driver's video buffer
+ * @vb: v4l videobuf buffer
+ */
+struct fimc_vid_buffer {
+ struct videobuf_buffer vb;
+};
+
+/**
+ * struct fimc_frame - input/output frame format properties
+ *
+ * @f_width: image full width (virtual screen size)
+ * @f_height: image full height (virtual screen size)
+ * @o_width: original image width as set by S_FMT
+ * @o_height: original image height as set by S_FMT
+ * @offs_h: image horizontal pixel offset
+ * @offs_v: image vertical pixel offset
+ * @width: image pixel width
+ * @height: image pixel weight
+ * @paddr: image frame buffer physical addresses
+ * @buf_cnt: number of buffers depending on a color format
+ * @size: image size in bytes
+ * @color: color format
+ * @dma_offset: DMA offset in bytes
+ */
+struct fimc_frame {
+ u32 f_width;
+ u32 f_height;
+ u32 o_width;
+ u32 o_height;
+ u32 offs_h;
+ u32 offs_v;
+ u32 width;
+ u32 height;
+ u32 size;
+ struct fimc_addr paddr;
+ struct fimc_dma_offset dma_offset;
+ struct fimc_fmt *fmt;
+};
+
+/**
+ * struct fimc_m2m_device - v4l2 memory-to-memory device data
+ * @vfd: the video device node for v4l2 m2m mode
+ * @v4l2_dev: v4l2 device for m2m mode
+ * @m2m_dev: v4l2 memory-to-memory device data
+ * @ctx: hardware context data
+ * @refcnt: the reference counter
+ */
+struct fimc_m2m_device {
+ struct video_device *vfd;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct fimc_ctx *ctx;
+ int refcnt;
+};
+
+/**
+ * struct samsung_fimc_variant - camera interface variant information
+ *
+ * @pix_hoff: indicate whether horizontal offset is in pixels or in bytes
+ * @has_inp_rot: set if has input rotator
+ * @has_out_rot: set if has output rotator
+ * @min_inp_pixsize: minimum input pixel size
+ * @min_out_pixsize: minimum output pixel size
+ * @scaler_en_w: maximum input pixel width when the scaler is enabled
+ * @scaler_dis_w: maximum input pixel width when the scaler is disabled
+ * @in_rot_en_h: maximum input width when the input rotator is used
+ * @in_rot_dis_w: maximum input width when the input rotator is used
+ * @out_rot_en_w: maximum output width for the output rotator enabled
+ * @out_rot_dis_w: maximum output width for the output rotator enabled
+ */
+struct samsung_fimc_variant {
+ unsigned int pix_hoff:1;
+ unsigned int has_inp_rot:1;
+ unsigned int has_out_rot:1;
+
+ u16 min_inp_pixsize;
+ u16 min_out_pixsize;
+ u16 scaler_en_w;
+ u16 scaler_dis_w;
+ u16 in_rot_en_h;
+ u16 in_rot_dis_w;
+ u16 out_rot_en_w;
+ u16 out_rot_dis_w;
+};
+
+/**
+ * struct samsung_fimc_driverdata - per-device type driver data for init time.
+ *
+ * @variant: the variant information for this driver.
+ * @dev_cnt: number of fimc sub-devices available in SoC
+ */
+struct samsung_fimc_driverdata {
+ struct samsung_fimc_variant *variant[FIMC_MAX_DEVS];
+ int devs_cnt;
+};
+
+struct fimc_ctx;
+
+/**
+ * struct fimc_subdev - abstraction for a FIMC entity
+ *
+ * @slock: the spinlock protecting this data structure
+ * @lock: the mutex protecting this data structure
+ * @pdev: pointer to the FIMC platform device
+ * @id: FIMC device index (0..2)
+ * @clock[]: the clocks required for FIMC operation
+ * @regs: the mapped hardware registers
+ * @regs_res: the resource claimed for IO registers
+ * @irq: interrupt number of the FIMC subdevice
+ * @irqlock: spinlock protecting videbuffer queue
+ * @m2m: memory-to-memory V4L2 device information
+ * @state: the FIMC device state flags
+ */
+struct fimc_dev {
+ spinlock_t slock;
+ struct mutex lock;
+ struct platform_device *pdev;
+ struct samsung_fimc_variant *variant;
+ int id;
+ struct clk *clock[NUM_FIMC_CLOCKS];
+ void __iomem *regs;
+ struct resource *regs_res;
+ int irq;
+ spinlock_t irqlock;
+ struct workqueue_struct *work_queue;
+ struct fimc_m2m_device m2m;
+ unsigned long state;
+};
+
+/**
+ * fimc_ctx - the device context data
+ *
+ * @lock: mutex protecting this data structure
+ * @s_frame: source frame properties
+ * @d_frame: destination frame properties
+ * @out_order_1p: output 1-plane YCBCR order
+ * @out_order_2p: output 2-plane YCBCR order
+ * @in_order_1p input 1-plane YCBCR order
+ * @in_order_2p: input 2-plane YCBCR order
+ * @in_path: input mode (DMA or camera)
+ * @out_path: output mode (DMA or FIFO)
+ * @scaler: image scaler properties
+ * @effect: image effect
+ * @rotation: image clockwise rotation in degrees
+ * @flip: image flip mode
+ * @flags: an additional flags for image conversion
+ * @state: flags to keep track of user configuration
+ * @fimc_dev: the FIMC device this context applies to
+ * @m2m_ctx: memory-to-memory device context
+ */
+struct fimc_ctx {
+ spinlock_t slock;
+ struct fimc_frame s_frame;
+ struct fimc_frame d_frame;
+ u32 out_order_1p;
+ u32 out_order_2p;
+ u32 in_order_1p;
+ u32 in_order_2p;
+ enum fimc_datapath in_path;
+ enum fimc_datapath out_path;
+ struct fimc_scaler scaler;
+ struct fimc_effect effect;
+ int rotation;
+ u32 flip;
+ u32 flags;
+ u32 state;
+ struct fimc_dev *fimc_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+};
+
+
+static inline int tiled_fmt(struct fimc_fmt *fmt)
+{
+ return 0;
+}
+
+static inline void fimc_hw_clear_irq(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + S5P_CIGCTRL);
+ cfg |= S5P_CIGCTRL_IRQ_CLR;
+ writel(cfg, dev->regs + S5P_CIGCTRL);
+}
+
+static inline void fimc_hw_start_scaler(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+ cfg |= S5P_CISCCTRL_SCALERSTART;
+ writel(cfg, dev->regs + S5P_CISCCTRL);
+}
+
+static inline void fimc_hw_stop_scaler(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+ cfg &= ~S5P_CISCCTRL_SCALERSTART;
+ writel(cfg, dev->regs + S5P_CISCCTRL);
+}
+
+static inline void fimc_hw_dis_capture(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + S5P_CIIMGCPT);
+ cfg &= ~(S5P_CIIMGCPT_IMGCPTEN | S5P_CIIMGCPT_IMGCPTEN_SC);
+ writel(cfg, dev->regs + S5P_CIIMGCPT);
+}
+
+static inline void fimc_hw_start_in_dma(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + S5P_MSCTRL);
+ cfg |= S5P_MSCTRL_ENVID;
+ writel(cfg, dev->regs + S5P_MSCTRL);
+}
+
+static inline void fimc_hw_stop_in_dma(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + S5P_MSCTRL);
+ cfg &= ~S5P_MSCTRL_ENVID;
+ writel(cfg, dev->regs + S5P_MSCTRL);
+}
+
+static inline struct fimc_frame *ctx_m2m_get_frame(struct fimc_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ struct fimc_frame *frame;
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT == type) {
+ frame = &ctx->s_frame;
+ } else if (V4L2_BUF_TYPE_VIDEO_CAPTURE == type) {
+ frame = &ctx->d_frame;
+ } else {
+ v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
+ "Wrong buffer/video queue type (%d)\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return frame;
+}
+
+/* -----------------------------------------------------*/
+/* fimc-reg.c */
+void fimc_hw_reset(struct fimc_dev *dev);
+void fimc_hw_set_rotation(struct fimc_ctx *ctx);
+void fimc_hw_set_target_format(struct fimc_ctx *ctx);
+void fimc_hw_set_out_dma(struct fimc_ctx *ctx);
+void fimc_hw_en_lastirq(struct fimc_dev *dev, int enable);
+void fimc_hw_en_irq(struct fimc_dev *dev, int enable);
+void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
+void fimc_hw_set_scaler(struct fimc_ctx *ctx);
+void fimc_hw_en_capture(struct fimc_ctx *ctx);
+void fimc_hw_set_effect(struct fimc_ctx *ctx);
+void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
+void fimc_hw_set_input_path(struct fimc_ctx *ctx);
+void fimc_hw_set_output_path(struct fimc_ctx *ctx);
+void fimc_hw_set_input_addr(struct fimc_dev *dev, struct fimc_addr *paddr);
+void fimc_hw_set_output_addr(struct fimc_dev *dev, struct fimc_addr *paddr);
+
+#endif /* FIMC_CORE_H_ */
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
new file mode 100644
index 00000000000..5570f1ce0c9
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -0,0 +1,527 @@
+/*
+ * Register interface file for Samsung Camera Interface (FIMC) driver
+ *
+ * Copyright (c) 2010 Samsung Electronics
+ *
+ * Sylwester Nawrocki, s.nawrocki@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <mach/map.h>
+
+#include "fimc-core.h"
+
+
+void fimc_hw_reset(struct fimc_dev *dev)
+{
+ u32 cfg;
+
+ cfg = readl(dev->regs + S5P_CISRCFMT);
+ cfg |= S5P_CISRCFMT_ITU601_8BIT;
+ writel(cfg, dev->regs + S5P_CISRCFMT);
+
+ /* Software reset. */
+ cfg = readl(dev->regs + S5P_CIGCTRL);
+ cfg |= (S5P_CIGCTRL_SWRST | S5P_CIGCTRL_IRQ_LEVEL);
+ writel(cfg, dev->regs + S5P_CIGCTRL);
+ msleep(1);
+
+ cfg = readl(dev->regs + S5P_CIGCTRL);
+ cfg &= ~S5P_CIGCTRL_SWRST;
+ writel(cfg, dev->regs + S5P_CIGCTRL);
+
+}
+
+void fimc_hw_set_rotation(struct fimc_ctx *ctx)
+{
+ u32 cfg, flip;
+ struct fimc_dev *dev = ctx->fimc_dev;
+
+ cfg = readl(dev->regs + S5P_CITRGFMT);
+ cfg &= ~(S5P_CITRGFMT_INROT90 | S5P_CITRGFMT_OUTROT90);
+
+ flip = readl(dev->regs + S5P_MSCTRL);
+ flip &= ~S5P_MSCTRL_FLIP_MASK;
+
+ /*
+ * The input and output rotator cannot work simultaneously.
+ * Use the output rotator in output DMA mode or the input rotator
+ * in direct fifo output mode.
+ */
+ if (ctx->rotation == 90 || ctx->rotation == 270) {
+ if (ctx->out_path == FIMC_LCDFIFO) {
+ cfg |= S5P_CITRGFMT_INROT90;
+ if (ctx->rotation == 270)
+ flip |= S5P_MSCTRL_FLIP_180;
+ } else {
+ cfg |= S5P_CITRGFMT_OUTROT90;
+ if (ctx->rotation == 270)
+ cfg |= S5P_CITRGFMT_FLIP_180;
+ }
+ } else if (ctx->rotation == 180) {
+ if (ctx->out_path == FIMC_LCDFIFO)
+ flip |= S5P_MSCTRL_FLIP_180;
+ else
+ cfg |= S5P_CITRGFMT_FLIP_180;
+ }
+ if (ctx->rotation == 180 || ctx->rotation == 270)
+ writel(flip, dev->regs + S5P_MSCTRL);
+ writel(cfg, dev->regs + S5P_CITRGFMT);
+}
+
+static u32 fimc_hw_get_in_flip(u32 ctx_flip)
+{
+ u32 flip = S5P_MSCTRL_FLIP_NORMAL;
+
+ switch (ctx_flip) {
+ case FLIP_X_AXIS:
+ flip = S5P_MSCTRL_FLIP_X_MIRROR;
+ break;
+ case FLIP_Y_AXIS:
+ flip = S5P_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case FLIP_XY_AXIS:
+ flip = S5P_MSCTRL_FLIP_180;
+ break;
+ }
+
+ return flip;
+}
+
+static u32 fimc_hw_get_target_flip(u32 ctx_flip)
+{
+ u32 flip = S5P_CITRGFMT_FLIP_NORMAL;
+
+ switch (ctx_flip) {
+ case FLIP_X_AXIS:
+ flip = S5P_CITRGFMT_FLIP_X_MIRROR;
+ break;
+ case FLIP_Y_AXIS:
+ flip = S5P_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case FLIP_XY_AXIS:
+ flip = S5P_CITRGFMT_FLIP_180;
+ break;
+ case FLIP_NONE:
+ break;
+
+ }
+ return flip;
+}
+
+void fimc_hw_set_target_format(struct fimc_ctx *ctx)
+{
+ u32 cfg;
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->d_frame;
+
+ dbg("w= %d, h= %d color: %d", frame->width,
+ frame->height, frame->fmt->color);
+
+ cfg = readl(dev->regs + S5P_CITRGFMT);
+ cfg &= ~(S5P_CITRGFMT_FMT_MASK | S5P_CITRGFMT_HSIZE_MASK |
+ S5P_CITRGFMT_VSIZE_MASK);
+
+ switch (frame->fmt->color) {
+ case S5P_FIMC_RGB565:
+ case S5P_FIMC_RGB666:
+ case S5P_FIMC_RGB888:
+ cfg |= S5P_CITRGFMT_RGB;
+ break;
+ case S5P_FIMC_YCBCR420:
+ cfg |= S5P_CITRGFMT_YCBCR420;
+ break;
+ case S5P_FIMC_YCBYCR422:
+ case S5P_FIMC_YCRYCB422:
+ case S5P_FIMC_CBYCRY422:
+ case S5P_FIMC_CRYCBY422:
+ if (frame->fmt->planes_cnt == 1)
+ cfg |= S5P_CITRGFMT_YCBCR422_1P;
+ else
+ cfg |= S5P_CITRGFMT_YCBCR422;
+ break;
+ default:
+ break;
+ }
+
+ cfg |= S5P_CITRGFMT_HSIZE(frame->width);
+ cfg |= S5P_CITRGFMT_VSIZE(frame->height);
+
+ if (ctx->rotation == 0) {
+ cfg &= ~S5P_CITRGFMT_FLIP_MASK;
+ cfg |= fimc_hw_get_target_flip(ctx->flip);
+ }
+ writel(cfg, dev->regs + S5P_CITRGFMT);
+
+ cfg = readl(dev->regs + S5P_CITAREA) & ~S5P_CITAREA_MASK;
+ cfg |= (frame->width * frame->height);
+ writel(cfg, dev->regs + S5P_CITAREA);
+}
+
+static void fimc_hw_set_out_dma_size(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->d_frame;
+ u32 cfg = 0;
+
+ if (ctx->rotation == 90 || ctx->rotation == 270) {
+ cfg |= S5P_ORIG_SIZE_HOR(frame->f_height);
+ cfg |= S5P_ORIG_SIZE_VER(frame->f_width);
+ } else {
+ cfg |= S5P_ORIG_SIZE_HOR(frame->f_width);
+ cfg |= S5P_ORIG_SIZE_VER(frame->f_height);
+ }
+ writel(cfg, dev->regs + S5P_ORGOSIZE);
+}
+
+void fimc_hw_set_out_dma(struct fimc_ctx *ctx)
+{
+ u32 cfg;
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->d_frame;
+ struct fimc_dma_offset *offset = &frame->dma_offset;
+
+ /* Set the input dma offsets. */
+ cfg = 0;
+ cfg |= S5P_CIO_OFFS_HOR(offset->y_h);
+ cfg |= S5P_CIO_OFFS_VER(offset->y_v);
+ writel(cfg, dev->regs + S5P_CIOYOFF);
+
+ cfg = 0;
+ cfg |= S5P_CIO_OFFS_HOR(offset->cb_h);
+ cfg |= S5P_CIO_OFFS_VER(offset->cb_v);
+ writel(cfg, dev->regs + S5P_CIOCBOFF);
+
+ cfg = 0;
+ cfg |= S5P_CIO_OFFS_HOR(offset->cr_h);
+ cfg |= S5P_CIO_OFFS_VER(offset->cr_v);
+ writel(cfg, dev->regs + S5P_CIOCROFF);
+
+ fimc_hw_set_out_dma_size(ctx);
+
+ /* Configure chroma components order. */
+ cfg = readl(dev->regs + S5P_CIOCTRL);
+
+ cfg &= ~(S5P_CIOCTRL_ORDER2P_MASK | S5P_CIOCTRL_ORDER422_MASK |
+ S5P_CIOCTRL_YCBCR_PLANE_MASK);
+
+ if (frame->fmt->planes_cnt == 1)
+ cfg |= ctx->out_order_1p;
+ else if (frame->fmt->planes_cnt == 2)
+ cfg |= ctx->out_order_2p | S5P_CIOCTRL_YCBCR_2PLANE;
+ else if (frame->fmt->planes_cnt == 3)
+ cfg |= S5P_CIOCTRL_YCBCR_3PLANE;
+
+ writel(cfg, dev->regs + S5P_CIOCTRL);
+}
+
+static void fimc_hw_en_autoload(struct fimc_dev *dev, int enable)
+{
+ u32 cfg = readl(dev->regs + S5P_ORGISIZE);
+ if (enable)
+ cfg |= S5P_CIREAL_ISIZE_AUTOLOAD_EN;
+ else
+ cfg &= ~S5P_CIREAL_ISIZE_AUTOLOAD_EN;
+ writel(cfg, dev->regs + S5P_ORGISIZE);
+}
+
+void fimc_hw_en_lastirq(struct fimc_dev *dev, int enable)
+{
+ unsigned long flags;
+ u32 cfg;
+
+ spin_lock_irqsave(&dev->slock, flags);
+
+ cfg = readl(dev->regs + S5P_CIOCTRL);
+ if (enable)
+ cfg |= S5P_CIOCTRL_LASTIRQ_ENABLE;
+ else
+ cfg &= ~S5P_CIOCTRL_LASTIRQ_ENABLE;
+ writel(cfg, dev->regs + S5P_CIOCTRL);
+
+ spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_scaler *sc = &ctx->scaler;
+ u32 cfg = 0, shfactor;
+
+ shfactor = 10 - (sc->hfactor + sc->vfactor);
+
+ cfg |= S5P_CISCPRERATIO_SHFACTOR(shfactor);
+ cfg |= S5P_CISCPRERATIO_HOR(sc->pre_hratio);
+ cfg |= S5P_CISCPRERATIO_VER(sc->pre_vratio);
+ writel(cfg, dev->regs + S5P_CISCPRERATIO);
+
+ cfg = 0;
+ cfg |= S5P_CISCPREDST_WIDTH(sc->pre_dst_width);
+ cfg |= S5P_CISCPREDST_HEIGHT(sc->pre_dst_height);
+ writel(cfg, dev->regs + S5P_CISCPREDST);
+}
+
+void fimc_hw_set_scaler(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_scaler *sc = &ctx->scaler;
+ struct fimc_frame *src_frame = &ctx->s_frame;
+ struct fimc_frame *dst_frame = &ctx->d_frame;
+ u32 cfg = 0;
+
+ if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
+ cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
+
+ if (!sc->enabled)
+ cfg |= S5P_CISCCTRL_SCALERBYPASS;
+
+ if (sc->scaleup_h)
+ cfg |= S5P_CISCCTRL_SCALEUP_H;
+
+ if (sc->scaleup_v)
+ cfg |= S5P_CISCCTRL_SCALEUP_V;
+
+ if (sc->copy_mode)
+ cfg |= S5P_CISCCTRL_ONE2ONE;
+
+
+ if (ctx->in_path == FIMC_DMA) {
+ if (src_frame->fmt->color == S5P_FIMC_RGB565)
+ cfg |= S5P_CISCCTRL_INRGB_FMT_RGB565;
+ else if (src_frame->fmt->color == S5P_FIMC_RGB666)
+ cfg |= S5P_CISCCTRL_INRGB_FMT_RGB666;
+ else if (src_frame->fmt->color == S5P_FIMC_RGB888)
+ cfg |= S5P_CISCCTRL_INRGB_FMT_RGB888;
+ }
+
+ if (ctx->out_path == FIMC_DMA) {
+ if (dst_frame->fmt->color == S5P_FIMC_RGB565)
+ cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB565;
+ else if (dst_frame->fmt->color == S5P_FIMC_RGB666)
+ cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB666;
+ else if (dst_frame->fmt->color == S5P_FIMC_RGB888)
+ cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB888;
+ } else {
+ cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB888;
+
+ if (ctx->flags & FIMC_SCAN_MODE_INTERLACED)
+ cfg |= S5P_CISCCTRL_INTERLACE;
+ }
+
+ dbg("main_hratio= 0x%X main_vratio= 0x%X",
+ sc->main_hratio, sc->main_vratio);
+
+ cfg |= S5P_CISCCTRL_SC_HORRATIO(sc->main_hratio);
+ cfg |= S5P_CISCCTRL_SC_VERRATIO(sc->main_vratio);
+
+ writel(cfg, dev->regs + S5P_CISCCTRL);
+}
+
+void fimc_hw_en_capture(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ u32 cfg;
+
+ cfg = readl(dev->regs + S5P_CIIMGCPT);
+ /* One shot mode for output DMA or freerun for FIFO. */
+ if (ctx->out_path == FIMC_DMA)
+ cfg |= S5P_CIIMGCPT_CPT_FREN_ENABLE;
+ else
+ cfg &= ~S5P_CIIMGCPT_CPT_FREN_ENABLE;
+
+ if (ctx->scaler.enabled)
+ cfg |= S5P_CIIMGCPT_IMGCPTEN_SC;
+
+ writel(cfg | S5P_CIIMGCPT_IMGCPTEN, dev->regs + S5P_CIIMGCPT);
+}
+
+void fimc_hw_set_effect(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_effect *effect = &ctx->effect;
+ u32 cfg = (S5P_CIIMGEFF_IE_ENABLE | S5P_CIIMGEFF_IE_SC_AFTER);
+
+ cfg |= effect->type;
+
+ if (effect->type == S5P_FIMC_EFFECT_ARBITRARY) {
+ cfg |= S5P_CIIMGEFF_PAT_CB(effect->pat_cb);
+ cfg |= S5P_CIIMGEFF_PAT_CR(effect->pat_cr);
+ }
+
+ writel(cfg, dev->regs + S5P_CIIMGEFF);
+}
+
+static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->s_frame;
+ u32 cfg_o = 0;
+ u32 cfg_r = 0;
+
+ if (FIMC_LCDFIFO == ctx->out_path)
+ cfg_r |= S5P_CIREAL_ISIZE_AUTOLOAD_EN;
+
+ cfg_o |= S5P_ORIG_SIZE_HOR(frame->f_width);
+ cfg_o |= S5P_ORIG_SIZE_VER(frame->f_height);
+ cfg_r |= S5P_CIREAL_ISIZE_WIDTH(frame->width);
+ cfg_r |= S5P_CIREAL_ISIZE_HEIGHT(frame->height);
+
+ writel(cfg_o, dev->regs + S5P_ORGISIZE);
+ writel(cfg_r, dev->regs + S5P_CIREAL_ISIZE);
+}
+
+void fimc_hw_set_in_dma(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->s_frame;
+ struct fimc_dma_offset *offset = &frame->dma_offset;
+ u32 cfg = 0;
+
+ /* Set the pixel offsets. */
+ cfg |= S5P_CIO_OFFS_HOR(offset->y_h);
+ cfg |= S5P_CIO_OFFS_VER(offset->y_v);
+ writel(cfg, dev->regs + S5P_CIIYOFF);
+
+ cfg = 0;
+ cfg |= S5P_CIO_OFFS_HOR(offset->cb_h);
+ cfg |= S5P_CIO_OFFS_VER(offset->cb_v);
+ writel(cfg, dev->regs + S5P_CIICBOFF);
+
+ cfg = 0;
+ cfg |= S5P_CIO_OFFS_HOR(offset->cr_h);
+ cfg |= S5P_CIO_OFFS_VER(offset->cr_v);
+ writel(cfg, dev->regs + S5P_CIICROFF);
+
+ /* Input original and real size. */
+ fimc_hw_set_in_dma_size(ctx);
+
+ /* Autoload is used currently only in FIFO mode. */
+ fimc_hw_en_autoload(dev, ctx->out_path == FIMC_LCDFIFO);
+
+ /* Set the input DMA to process single frame only. */
+ cfg = readl(dev->regs + S5P_MSCTRL);
+ cfg &= ~(S5P_MSCTRL_FLIP_MASK
+ | S5P_MSCTRL_INFORMAT_MASK
+ | S5P_MSCTRL_IN_BURST_COUNT_MASK
+ | S5P_MSCTRL_INPUT_MASK
+ | S5P_MSCTRL_C_INT_IN_MASK
+ | S5P_MSCTRL_2P_IN_ORDER_MASK);
+
+ cfg |= (S5P_MSCTRL_FRAME_COUNT(1) | S5P_MSCTRL_INPUT_MEMORY);
+
+ switch (frame->fmt->color) {
+ case S5P_FIMC_RGB565:
+ case S5P_FIMC_RGB666:
+ case S5P_FIMC_RGB888:
+ cfg |= S5P_MSCTRL_INFORMAT_RGB;
+ break;
+ case S5P_FIMC_YCBCR420:
+ cfg |= S5P_MSCTRL_INFORMAT_YCBCR420;
+
+ if (frame->fmt->planes_cnt == 2)
+ cfg |= ctx->in_order_2p | S5P_MSCTRL_C_INT_IN_2PLANE;
+ else
+ cfg |= S5P_MSCTRL_C_INT_IN_3PLANE;
+
+ break;
+ case S5P_FIMC_YCBYCR422:
+ case S5P_FIMC_YCRYCB422:
+ case S5P_FIMC_CBYCRY422:
+ case S5P_FIMC_CRYCBY422:
+ if (frame->fmt->planes_cnt == 1) {
+ cfg |= ctx->in_order_1p
+ | S5P_MSCTRL_INFORMAT_YCBCR422_1P;
+ } else {
+ cfg |= S5P_MSCTRL_INFORMAT_YCBCR422;
+
+ if (frame->fmt->planes_cnt == 2)
+ cfg |= ctx->in_order_2p
+ | S5P_MSCTRL_C_INT_IN_2PLANE;
+ else
+ cfg |= S5P_MSCTRL_C_INT_IN_3PLANE;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Input DMA flip mode (and rotation).
+ * Do not allow simultaneous rotation and flipping.
+ */
+ if (!ctx->rotation && ctx->out_path == FIMC_LCDFIFO)
+ cfg |= fimc_hw_get_in_flip(ctx->flip);
+
+ writel(cfg, dev->regs + S5P_MSCTRL);
+
+ /* Input/output DMA linear/tiled mode. */
+ cfg = readl(dev->regs + S5P_CIDMAPARAM);
+ cfg &= ~S5P_CIDMAPARAM_TILE_MASK;
+
+ if (tiled_fmt(ctx->s_frame.fmt))
+ cfg |= S5P_CIDMAPARAM_R_64X32;
+
+ if (tiled_fmt(ctx->d_frame.fmt))
+ cfg |= S5P_CIDMAPARAM_W_64X32;
+
+ writel(cfg, dev->regs + S5P_CIDMAPARAM);
+}
+
+
+void fimc_hw_set_input_path(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+
+ u32 cfg = readl(dev->regs + S5P_MSCTRL);
+ cfg &= ~S5P_MSCTRL_INPUT_MASK;
+
+ if (ctx->in_path == FIMC_DMA)
+ cfg |= S5P_MSCTRL_INPUT_MEMORY;
+ else
+ cfg |= S5P_MSCTRL_INPUT_EXTCAM;
+
+ writel(cfg, dev->regs + S5P_MSCTRL);
+}
+
+void fimc_hw_set_output_path(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+
+ u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+ cfg &= ~S5P_CISCCTRL_LCDPATHEN_FIFO;
+ if (ctx->out_path == FIMC_LCDFIFO)
+ cfg |= S5P_CISCCTRL_LCDPATHEN_FIFO;
+ writel(cfg, dev->regs + S5P_CISCCTRL);
+}
+
+void fimc_hw_set_input_addr(struct fimc_dev *dev, struct fimc_addr *paddr)
+{
+ u32 cfg = 0;
+
+ cfg = readl(dev->regs + S5P_CIREAL_ISIZE);
+ cfg |= S5P_CIREAL_ISIZE_ADDR_CH_DIS;
+ writel(cfg, dev->regs + S5P_CIREAL_ISIZE);
+
+ writel(paddr->y, dev->regs + S5P_CIIYSA0);
+ writel(paddr->cb, dev->regs + S5P_CIICBSA0);
+ writel(paddr->cr, dev->regs + S5P_CIICRSA0);
+
+ cfg &= ~S5P_CIREAL_ISIZE_ADDR_CH_DIS;
+ writel(cfg, dev->regs + S5P_CIREAL_ISIZE);
+}
+
+void fimc_hw_set_output_addr(struct fimc_dev *dev, struct fimc_addr *paddr)
+{
+ int i;
+ /* Set all the output register sets to point to single video buffer. */
+ for (i = 0; i < FIMC_MAX_OUT_BUFS; i++) {
+ writel(paddr->y, dev->regs + S5P_CIOYSA(i));
+ writel(paddr->cb, dev->regs + S5P_CIOCBSA(i));
+ writel(paddr->cr, dev->regs + S5P_CIOCRSA(i));
+ }
+}
diff --git a/drivers/media/video/s5p-fimc/regs-fimc.h b/drivers/media/video/s5p-fimc/regs-fimc.h
new file mode 100644
index 00000000000..a3cfe824db0
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/regs-fimc.h
@@ -0,0 +1,293 @@
+/*
+ * Register definition file for Samsung Camera Interface (FIMC) driver
+ *
+ * Copyright (c) 2010 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef REGS_FIMC_H_
+#define REGS_FIMC_H_
+
+#define S5P_CIOYSA(__x) (0x18 + (__x) * 4)
+#define S5P_CIOCBSA(__x) (0x28 + (__x) * 4)
+#define S5P_CIOCRSA(__x) (0x38 + (__x) * 4)
+
+/* Input source format */
+#define S5P_CISRCFMT 0x00
+#define S5P_CISRCFMT_ITU601_8BIT (1 << 31)
+#define S5P_CISRCFMT_ITU601_16BIT (1 << 29)
+#define S5P_CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define S5P_CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define S5P_CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define S5P_CISRCFMT_ORDER422_CRYCBY (3 << 14)
+#define S5P_CISRCFMT_HSIZE(x) ((x) << 16)
+#define S5P_CISRCFMT_VSIZE(x) ((x) << 0)
+
+/* Window offset */
+#define S5P_CIWDOFST 0x04
+#define S5P_CIWDOFST_WINOFSEN (1 << 31)
+#define S5P_CIWDOFST_CLROVFIY (1 << 30)
+#define S5P_CIWDOFST_CLROVRLB (1 << 29)
+#define S5P_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
+#define S5P_CIWDOFST_CLROVFICB (1 << 15)
+#define S5P_CIWDOFST_CLROVFICR (1 << 14)
+#define S5P_CIWDOFST_WINHOROFST(x) ((x) << 16)
+#define S5P_CIWDOFST_WINVEROFST(x) ((x) << 0)
+#define S5P_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
+
+/* Global control */
+#define S5P_CIGCTRL 0x08
+#define S5P_CIGCTRL_SWRST (1 << 31)
+#define S5P_CIGCTRL_CAMRST_A (1 << 30)
+#define S5P_CIGCTRL_SELCAM_ITU_A (1 << 29)
+#define S5P_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
+#define S5P_CIGCTRL_TESTPAT_NORMAL (0 << 27)
+#define S5P_CIGCTRL_TESTPAT_COLOR_BAR (1 << 27)
+#define S5P_CIGCTRL_TESTPAT_HOR_INC (2 << 27)
+#define S5P_CIGCTRL_TESTPAT_VER_INC (3 << 27)
+#define S5P_CIGCTRL_TESTPAT_MASK (3 << 27)
+#define S5P_CIGCTRL_TESTPAT_SHIFT (27)
+#define S5P_CIGCTRL_INVPOLPCLK (1 << 26)
+#define S5P_CIGCTRL_INVPOLVSYNC (1 << 25)
+#define S5P_CIGCTRL_INVPOLHREF (1 << 24)
+#define S5P_CIGCTRL_IRQ_OVFEN (1 << 22)
+#define S5P_CIGCTRL_HREF_MASK (1 << 21)
+#define S5P_CIGCTRL_IRQ_LEVEL (1 << 20)
+#define S5P_CIGCTRL_IRQ_CLR (1 << 19)
+#define S5P_CIGCTRL_IRQ_ENABLE (1 << 16)
+#define S5P_CIGCTRL_SHDW_DISABLE (1 << 12)
+#define S5P_CIGCTRL_SELCAM_MIPI_A (1 << 7)
+#define S5P_CIGCTRL_CAMIF_SELWB (1 << 6)
+#define S5P_CIGCTRL_INVPOLHSYNC (1 << 4)
+#define S5P_CIGCTRL_SELCAM_MIPI (1 << 3)
+#define S5P_CIGCTRL_INTERLACE (1 << 0)
+
+/* Window offset 2 */
+#define S5P_CIWDOFST2 0x14
+#define S5P_CIWDOFST2_HOROFF_MASK (0xfff << 16)
+#define S5P_CIWDOFST2_VEROFF_MASK (0xfff << 0)
+#define S5P_CIWDOFST2_HOROFF(x) ((x) << 16)
+#define S5P_CIWDOFST2_VEROFF(x) ((x) << 0)
+
+/* Output DMA Y plane start address */
+#define S5P_CIOYSA1 0x18
+#define S5P_CIOYSA2 0x1c
+#define S5P_CIOYSA3 0x20
+#define S5P_CIOYSA4 0x24
+
+/* Output DMA Cb plane start address */
+#define S5P_CIOCBSA1 0x28
+#define S5P_CIOCBSA2 0x2c
+#define S5P_CIOCBSA3 0x30
+#define S5P_CIOCBSA4 0x34
+
+/* Output DMA Cr plane start address */
+#define S5P_CIOCRSA1 0x38
+#define S5P_CIOCRSA2 0x3c
+#define S5P_CIOCRSA3 0x40
+#define S5P_CIOCRSA4 0x44
+
+/* Target image format */
+#define S5P_CITRGFMT 0x48
+#define S5P_CITRGFMT_INROT90 (1 << 31)
+#define S5P_CITRGFMT_YCBCR420 (0 << 29)
+#define S5P_CITRGFMT_YCBCR422 (1 << 29)
+#define S5P_CITRGFMT_YCBCR422_1P (2 << 29)
+#define S5P_CITRGFMT_RGB (3 << 29)
+#define S5P_CITRGFMT_FMT_MASK (3 << 29)
+#define S5P_CITRGFMT_HSIZE_MASK (0xfff << 16)
+#define S5P_CITRGFMT_FLIP_SHIFT (14)
+#define S5P_CITRGFMT_FLIP_NORMAL (0 << 14)
+#define S5P_CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define S5P_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define S5P_CITRGFMT_FLIP_180 (3 << 14)
+#define S5P_CITRGFMT_FLIP_MASK (3 << 14)
+#define S5P_CITRGFMT_OUTROT90 (1 << 13)
+#define S5P_CITRGFMT_VSIZE_MASK (0xfff << 0)
+#define S5P_CITRGFMT_HSIZE(x) ((x) << 16)
+#define S5P_CITRGFMT_VSIZE(x) ((x) << 0)
+
+/* Output DMA control */
+#define S5P_CIOCTRL 0x4c
+#define S5P_CIOCTRL_ORDER422_MASK (3 << 0)
+#define S5P_CIOCTRL_ORDER422_CRYCBY (0 << 0)
+#define S5P_CIOCTRL_ORDER422_YCRYCB (1 << 0)
+#define S5P_CIOCTRL_ORDER422_CBYCRY (2 << 0)
+#define S5P_CIOCTRL_ORDER422_YCBYCR (3 << 0)
+#define S5P_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
+#define S5P_CIOCTRL_YCBCR_3PLANE (0 << 3)
+#define S5P_CIOCTRL_YCBCR_2PLANE (1 << 3)
+#define S5P_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
+#define S5P_CIOCTRL_ORDER2P_SHIFT (24)
+#define S5P_CIOCTRL_ORDER2P_MASK (3 << 24)
+#define S5P_CIOCTRL_ORDER422_2P_LSB_CRCB (0 << 24)
+
+/* Pre-scaler control 1 */
+#define S5P_CISCPRERATIO 0x50
+#define S5P_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
+#define S5P_CISCPRERATIO_HOR(x) ((x) << 16)
+#define S5P_CISCPRERATIO_VER(x) ((x) << 0)
+
+#define S5P_CISCPREDST 0x54
+#define S5P_CISCPREDST_WIDTH(x) ((x) << 16)
+#define S5P_CISCPREDST_HEIGHT(x) ((x) << 0)
+
+/* Main scaler control */
+#define S5P_CISCCTRL 0x58
+#define S5P_CISCCTRL_SCALERBYPASS (1 << 31)
+#define S5P_CISCCTRL_SCALEUP_H (1 << 30)
+#define S5P_CISCCTRL_SCALEUP_V (1 << 29)
+#define S5P_CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define S5P_CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define S5P_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define S5P_CISCCTRL_INTERLACE (1 << 25)
+#define S5P_CISCCTRL_SCALERSTART (1 << 15)
+#define S5P_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define S5P_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define S5P_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define S5P_CISCCTRL_INRGB_FMT_MASK (3 << 13)
+#define S5P_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define S5P_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define S5P_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define S5P_CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
+#define S5P_CISCCTRL_RGB_EXT (1 << 10)
+#define S5P_CISCCTRL_ONE2ONE (1 << 9)
+#define S5P_CISCCTRL_SC_HORRATIO(x) ((x) << 16)
+#define S5P_CISCCTRL_SC_VERRATIO(x) ((x) << 0)
+
+/* Target area */
+#define S5P_CITAREA 0x5c
+#define S5P_CITAREA_MASK 0x0fffffff
+
+/* General status */
+#define S5P_CISTATUS 0x64
+#define S5P_CISTATUS_OVFIY (1 << 31)
+#define S5P_CISTATUS_OVFICB (1 << 30)
+#define S5P_CISTATUS_OVFICR (1 << 29)
+#define S5P_CISTATUS_VSYNC (1 << 28)
+#define S5P_CISTATUS_WINOFF_EN (1 << 25)
+#define S5P_CISTATUS_IMGCPT_EN (1 << 22)
+#define S5P_CISTATUS_IMGCPT_SCEN (1 << 21)
+#define S5P_CISTATUS_VSYNC_A (1 << 20)
+#define S5P_CISTATUS_VSYNC_B (1 << 19)
+#define S5P_CISTATUS_OVRLB (1 << 18)
+#define S5P_CISTATUS_FRAME_END (1 << 17)
+#define S5P_CISTATUS_LASTCAPT_END (1 << 16)
+#define S5P_CISTATUS_VVALID_A (1 << 15)
+#define S5P_CISTATUS_VVALID_B (1 << 14)
+
+/* Image capture control */
+#define S5P_CIIMGCPT 0xc0
+#define S5P_CIIMGCPT_IMGCPTEN (1 << 31)
+#define S5P_CIIMGCPT_IMGCPTEN_SC (1 << 30)
+#define S5P_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
+#define S5P_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Frame capture sequence */
+#define S5P_CICPTSEQ 0xc4
+
+/* Image effect */
+#define S5P_CIIMGEFF 0xd0
+#define S5P_CIIMGEFF_IE_DISABLE (0 << 30)
+#define S5P_CIIMGEFF_IE_ENABLE (1 << 30)
+#define S5P_CIIMGEFF_IE_SC_BEFORE (0 << 29)
+#define S5P_CIIMGEFF_IE_SC_AFTER (1 << 29)
+#define S5P_CIIMGEFF_FIN_BYPASS (0 << 26)
+#define S5P_CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define S5P_CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define S5P_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define S5P_CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define S5P_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define S5P_CIIMGEFF_FIN_MASK (7 << 26)
+#define S5P_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
+#define S5P_CIIMGEFF_PAT_CB(x) ((x) << 13)
+#define S5P_CIIMGEFF_PAT_CR(x) ((x) << 0)
+
+/* Input DMA Y/Cb/Cr plane start address 0 */
+#define S5P_CIIYSA0 0xd4
+#define S5P_CIICBSA0 0xd8
+#define S5P_CIICRSA0 0xdc
+
+/* Real input DMA image size */
+#define S5P_CIREAL_ISIZE 0xf8
+#define S5P_CIREAL_ISIZE_AUTOLOAD_EN (1 << 31)
+#define S5P_CIREAL_ISIZE_ADDR_CH_DIS (1 << 30)
+#define S5P_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
+#define S5P_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
+
+
+/* Input DMA control */
+#define S5P_MSCTRL 0xfc
+#define S5P_MSCTRL_IN_BURST_COUNT_MASK (3 << 24)
+#define S5P_MSCTRL_2P_IN_ORDER_MASK (3 << 16)
+#define S5P_MSCTRL_2P_IN_ORDER_SHIFT 16
+#define S5P_MSCTRL_C_INT_IN_3PLANE (0 << 15)
+#define S5P_MSCTRL_C_INT_IN_2PLANE (1 << 15)
+#define S5P_MSCTRL_C_INT_IN_MASK (1 << 15)
+#define S5P_MSCTRL_FLIP_SHIFT 13
+#define S5P_MSCTRL_FLIP_MASK (3 << 13)
+#define S5P_MSCTRL_FLIP_NORMAL (0 << 13)
+#define S5P_MSCTRL_FLIP_X_MIRROR (1 << 13)
+#define S5P_MSCTRL_FLIP_Y_MIRROR (2 << 13)
+#define S5P_MSCTRL_FLIP_180 (3 << 13)
+#define S5P_MSCTRL_ORDER422_SHIFT 4
+#define S5P_MSCTRL_ORDER422_CRYCBY (0 << 4)
+#define S5P_MSCTRL_ORDER422_YCRYCB (1 << 4)
+#define S5P_MSCTRL_ORDER422_CBYCRY (2 << 4)
+#define S5P_MSCTRL_ORDER422_YCBYCR (3 << 4)
+#define S5P_MSCTRL_ORDER422_MASK (3 << 4)
+#define S5P_MSCTRL_INPUT_EXTCAM (0 << 3)
+#define S5P_MSCTRL_INPUT_MEMORY (1 << 3)
+#define S5P_MSCTRL_INPUT_MASK (1 << 3)
+#define S5P_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
+#define S5P_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
+#define S5P_MSCTRL_INFORMAT_YCBCR422_1P (2 << 1)
+#define S5P_MSCTRL_INFORMAT_RGB (3 << 1)
+#define S5P_MSCTRL_INFORMAT_MASK (3 << 1)
+#define S5P_MSCTRL_ENVID (1 << 0)
+#define S5P_MSCTRL_FRAME_COUNT(x) ((x) << 24)
+
+/* Input DMA Y/Cb/Cr plane start address 1 */
+#define S5P_CIIYSA1 0x144
+#define S5P_CIICBSA1 0x148
+#define S5P_CIICRSA1 0x14c
+
+/* Output DMA Y/Cb/Cr offset */
+#define S5P_CIOYOFF 0x168
+#define S5P_CIOCBOFF 0x16c
+#define S5P_CIOCROFF 0x170
+
+/* Input DMA Y/Cb/Cr offset */
+#define S5P_CIIYOFF 0x174
+#define S5P_CIICBOFF 0x178
+#define S5P_CIICROFF 0x17c
+
+#define S5P_CIO_OFFS_VER(x) ((x) << 16)
+#define S5P_CIO_OFFS_HOR(x) ((x) << 0)
+
+/* Input DMA original image size */
+#define S5P_ORGISIZE 0x180
+
+/* Output DMA original image size */
+#define S5P_ORGOSIZE 0x184
+
+#define S5P_ORIG_SIZE_VER(x) ((x) << 16)
+#define S5P_ORIG_SIZE_HOR(x) ((x) << 0)
+
+/* Real output DMA image size (extension register) */
+#define S5P_CIEXTEN 0x188
+
+#define S5P_CIDMAPARAM 0x18c
+#define S5P_CIDMAPARAM_R_LINEAR (0 << 29)
+#define S5P_CIDMAPARAM_R_64X32 (3 << 29)
+#define S5P_CIDMAPARAM_W_LINEAR (0 << 13)
+#define S5P_CIDMAPARAM_W_64X32 (3 << 13)
+#define S5P_CIDMAPARAM_TILE_MASK ((3 << 29) | (3 << 13))
+
+/* MIPI CSI image format */
+#define S5P_CSIIMGFMT 0x194
+
+#endif /* REGS_FIMC_H_ */
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index 76da7436868..ee963f4d01b 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -45,6 +45,7 @@
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-i2c-drv.h>
#include <media/saa7115.h>
@@ -65,16 +66,19 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
struct saa711x_state {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+
+ struct {
+ /* chroma gain control cluster */
+ struct v4l2_ctrl *agc;
+ struct v4l2_ctrl *gain;
+ };
+
v4l2_std_id std;
int input;
int output;
int enable;
int radio;
- int bright;
- int contrast;
- int hue;
- int sat;
- int chroma_agc;
int width;
int height;
u32 ident;
@@ -90,6 +94,11 @@ static inline struct saa711x_state *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct saa711x_state, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct saa711x_state, hdl)->sd;
+}
+
/* ----------------------------------------------------------------------- */
static inline int saa711x_write(struct v4l2_subdev *sd, u8 reg, u8 value)
@@ -741,96 +750,53 @@ static int saa711x_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
return 0;
}
-static int saa711x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int saa711x_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
struct saa711x_state *state = to_state(sd);
- u8 val;
switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- if (ctrl->value < 0 || ctrl->value > 255) {
- v4l2_err(sd, "invalid brightness setting %d\n", ctrl->value);
- return -ERANGE;
- }
-
- state->bright = ctrl->value;
- saa711x_write(sd, R_0A_LUMA_BRIGHT_CNTL, state->bright);
- break;
-
- case V4L2_CID_CONTRAST:
- if (ctrl->value < 0 || ctrl->value > 127) {
- v4l2_err(sd, "invalid contrast setting %d\n", ctrl->value);
- return -ERANGE;
- }
-
- state->contrast = ctrl->value;
- saa711x_write(sd, R_0B_LUMA_CONTRAST_CNTL, state->contrast);
- break;
-
- case V4L2_CID_SATURATION:
- if (ctrl->value < 0 || ctrl->value > 127) {
- v4l2_err(sd, "invalid saturation setting %d\n", ctrl->value);
- return -ERANGE;
- }
-
- state->sat = ctrl->value;
- saa711x_write(sd, R_0C_CHROMA_SAT_CNTL, state->sat);
- break;
-
- case V4L2_CID_HUE:
- if (ctrl->value < -128 || ctrl->value > 127) {
- v4l2_err(sd, "invalid hue setting %d\n", ctrl->value);
- return -ERANGE;
- }
-
- state->hue = ctrl->value;
- saa711x_write(sd, R_0D_CHROMA_HUE_CNTL, state->hue);
- break;
case V4L2_CID_CHROMA_AGC:
- val = saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL);
- state->chroma_agc = ctrl->value;
- if (ctrl->value)
- val &= 0x7f;
- else
- val |= 0x80;
- saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, val);
+ /* chroma gain cluster */
+ if (state->agc->cur.val)
+ state->gain->cur.val =
+ saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL) & 0x7f;
break;
- case V4L2_CID_CHROMA_GAIN:
- /* Chroma gain cannot be set when AGC is enabled */
- if (state->chroma_agc == 1)
- return -EINVAL;
- saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, ctrl->value | 0x80);
- break;
- default:
- return -EINVAL;
}
-
return 0;
}
-static int saa711x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int saa711x_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
struct saa711x_state *state = to_state(sd);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- ctrl->value = state->bright;
+ saa711x_write(sd, R_0A_LUMA_BRIGHT_CNTL, ctrl->val);
break;
+
case V4L2_CID_CONTRAST:
- ctrl->value = state->contrast;
+ saa711x_write(sd, R_0B_LUMA_CONTRAST_CNTL, ctrl->val);
break;
+
case V4L2_CID_SATURATION:
- ctrl->value = state->sat;
+ saa711x_write(sd, R_0C_CHROMA_SAT_CNTL, ctrl->val);
break;
+
case V4L2_CID_HUE:
- ctrl->value = state->hue;
+ saa711x_write(sd, R_0D_CHROMA_HUE_CNTL, ctrl->val);
break;
+
case V4L2_CID_CHROMA_AGC:
- ctrl->value = state->chroma_agc;
- break;
- case V4L2_CID_CHROMA_GAIN:
- ctrl->value = saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL) & 0x7f;
+ /* chroma gain cluster */
+ if (state->agc->val)
+ saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, state->gain->val);
+ else
+ saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, state->gain->val | 0x80);
+ v4l2_ctrl_activate(state->gain, !state->agc->val);
break;
+
default:
return -EINVAL;
}
@@ -1223,25 +1189,6 @@ static int saa711x_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
return 0;
}
-static int saa711x_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
- case V4L2_CID_CONTRAST:
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(qc, 0, 127, 1, 64);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
- case V4L2_CID_CHROMA_AGC:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- case V4L2_CID_CHROMA_GAIN:
- return v4l2_ctrl_query_fill(qc, 0, 127, 1, 48);
- default:
- return -EINVAL;
- }
-}
-
static int saa711x_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct saa711x_state *state = to_state(sd);
@@ -1518,17 +1465,27 @@ static int saa711x_log_status(struct v4l2_subdev *sd)
break;
}
v4l2_info(sd, "Width, Height: %d, %d\n", state->width, state->height);
+ v4l2_ctrl_handler_log_status(&state->hdl, sd->name);
return 0;
}
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops saa711x_ctrl_ops = {
+ .s_ctrl = saa711x_s_ctrl,
+ .g_volatile_ctrl = saa711x_g_volatile_ctrl,
+};
+
static const struct v4l2_subdev_core_ops saa711x_core_ops = {
.log_status = saa711x_log_status,
.g_chip_ident = saa711x_g_chip_ident,
- .g_ctrl = saa711x_g_ctrl,
- .s_ctrl = saa711x_s_ctrl,
- .queryctrl = saa711x_queryctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
.s_std = saa711x_s_std,
.reset = saa711x_reset,
.s_gpio = saa711x_s_gpio,
@@ -1579,8 +1536,9 @@ static int saa711x_probe(struct i2c_client *client,
{
struct saa711x_state *state;
struct v4l2_subdev *sd;
- int i;
- char name[17];
+ struct v4l2_ctrl_handler *hdl;
+ int i;
+ char name[17];
char chip_id;
int autodetect = !id || id->driver_data == 1;
@@ -1619,15 +1577,38 @@ static int saa711x_probe(struct i2c_client *client,
return -ENOMEM;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &saa711x_ops);
+
+ hdl = &state->hdl;
+ v4l2_ctrl_handler_init(hdl, 6);
+ /* add in ascending ID order */
+ v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ state->agc = v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops,
+ V4L2_CID_CHROMA_AGC, 0, 1, 1, 1);
+ state->gain = v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops,
+ V4L2_CID_CHROMA_GAIN, 0, 127, 1, 40);
+ state->gain->is_volatile = 1;
+ sd->ctrl_handler = hdl;
+ if (hdl->error) {
+ int err = hdl->error;
+
+ v4l2_ctrl_handler_free(hdl);
+ kfree(state);
+ return err;
+ }
+ state->agc->flags |= V4L2_CTRL_FLAG_UPDATE;
+ v4l2_ctrl_cluster(2, &state->agc);
+
state->input = -1;
state->output = SAA7115_IPORT_ON;
state->enable = 1;
state->radio = 0;
- state->bright = 128;
- state->contrast = 64;
- state->hue = 0;
- state->sat = 64;
- state->chroma_agc = 1;
switch (chip_id) {
case '1':
state->ident = V4L2_IDENT_SAA7111;
@@ -1675,6 +1656,7 @@ static int saa711x_probe(struct i2c_client *client,
if (state->ident > V4L2_IDENT_SAA7111A)
saa711x_writeregs(sd, saa7115_init_misc);
saa711x_set_v4lstd(sd, V4L2_STD_NTSC);
+ v4l2_ctrl_handler_setup(hdl);
v4l2_dbg(1, debug, sd, "status: (1E) 0x%02x, (1F) 0x%02x\n",
saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC),
@@ -1689,6 +1671,7 @@ static int saa711x_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
kfree(to_state(sd));
return 0;
}
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index 22bfd62c955..fda005e0167 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -2,7 +2,7 @@ config VIDEO_SAA7134
tristate "Philips SAA7134 support"
depends on VIDEO_DEV && PCI && I2C && INPUT
select VIDEOBUF_DMA_SG
- select VIDEO_IR
+ depends on VIDEO_IR
select VIDEO_TUNER
select VIDEO_TVEEPROM
select CRC32
diff --git a/drivers/media/video/saa717x.c b/drivers/media/video/saa717x.c
index 78d69950c00..45f8bfc1342 100644
--- a/drivers/media/video/saa717x.c
+++ b/drivers/media/video/saa717x.c
@@ -38,6 +38,7 @@
#include <linux/videodev2.h>
#include <linux/i2c.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Philips SAA717x audio/video decoder driver");
@@ -55,14 +56,11 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
struct saa717x_state {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
v4l2_std_id std;
int input;
int enable;
int radio;
- int bright;
- int contrast;
- int hue;
- int sat;
int playback;
int audio;
int tuner_audio_mode;
@@ -81,6 +79,11 @@ static inline struct saa717x_state *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct saa717x_state, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct saa717x_state, hdl)->sd;
+}
+
/* ----------------------------------------------------------------------- */
/* for audio mode */
@@ -774,29 +777,6 @@ static void set_audio_mode(struct v4l2_subdev *sd, int audio_mode)
saa717x_write(sd, 0x470, reg_set_audio_template[audio_mode][1]);
}
-/* write regs to video output level (bright,contrast,hue,sat) */
-static void set_video_output_level_regs(struct v4l2_subdev *sd,
- struct saa717x_state *decoder)
-{
- /* brightness ffh (bright) - 80h (ITU level) - 00h (dark) */
- saa717x_write(sd, 0x10a, decoder->bright);
-
- /* contrast 7fh (max: 1.984) - 44h (ITU) - 40h (1.0) -
- 0h (luminance off) 40: i2c dump
- c0h (-1.0 inverse chrominance)
- 80h (-2.0 inverse chrominance) */
- saa717x_write(sd, 0x10b, decoder->contrast);
-
- /* saturation? 7fh(max)-40h(ITU)-0h(color off)
- c0h (-1.0 inverse chrominance)
- 80h (-2.0 inverse chrominance) */
- saa717x_write(sd, 0x10c, decoder->sat);
-
- /* color hue (phase) control
- 7fh (+178.6) - 0h (0 normal) - 80h (-180.0) */
- saa717x_write(sd, 0x10d, decoder->hue);
-}
-
/* write regs to set audio volume, bass and treble */
static int set_audio_regs(struct v4l2_subdev *sd,
struct saa717x_state *decoder)
@@ -829,9 +809,9 @@ static int set_audio_regs(struct v4l2_subdev *sd,
saa717x_write(sd, 0x480, val);
- /* bass and treble; go to another function */
/* set bass and treble */
- val = decoder->audio_main_bass | (decoder->audio_main_treble << 8);
+ val = decoder->audio_main_bass & 0x1f;
+ val |= (decoder->audio_main_treble & 0x1f) << 5;
saa717x_write(sd, 0x488, val);
return 0;
}
@@ -893,218 +873,55 @@ static void set_v_scale(struct v4l2_subdev *sd, int task, int yscale)
saa717x_write(sd, 0x71 + task_shift, yscale >> 8);
}
-static int saa717x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct saa717x_state *state = to_state(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- if (ctrl->value < 0 || ctrl->value > 255) {
- v4l2_err(sd, "invalid brightness setting %d\n", ctrl->value);
- return -ERANGE;
- }
-
- state->bright = ctrl->value;
- v4l2_dbg(1, debug, sd, "bright:%d\n", state->bright);
- saa717x_write(sd, 0x10a, state->bright);
- break;
-
- case V4L2_CID_CONTRAST:
- if (ctrl->value < 0 || ctrl->value > 127) {
- v4l2_err(sd, "invalid contrast setting %d\n", ctrl->value);
- return -ERANGE;
- }
-
- state->contrast = ctrl->value;
- v4l2_dbg(1, debug, sd, "contrast:%d\n", state->contrast);
- saa717x_write(sd, 0x10b, state->contrast);
- break;
-
- case V4L2_CID_SATURATION:
- if (ctrl->value < 0 || ctrl->value > 127) {
- v4l2_err(sd, "invalid saturation setting %d\n", ctrl->value);
- return -ERANGE;
- }
-
- state->sat = ctrl->value;
- v4l2_dbg(1, debug, sd, "sat:%d\n", state->sat);
- saa717x_write(sd, 0x10c, state->sat);
- break;
-
- case V4L2_CID_HUE:
- if (ctrl->value < -128 || ctrl->value > 127) {
- v4l2_err(sd, "invalid hue setting %d\n", ctrl->value);
- return -ERANGE;
- }
-
- state->hue = ctrl->value;
- v4l2_dbg(1, debug, sd, "hue:%d\n", state->hue);
- saa717x_write(sd, 0x10d, state->hue);
- break;
-
- case V4L2_CID_AUDIO_MUTE:
- state->audio_main_mute = ctrl->value;
- set_audio_regs(sd, state);
- break;
-
- case V4L2_CID_AUDIO_VOLUME:
- state->audio_main_volume = ctrl->value;
- set_audio_regs(sd, state);
- break;
-
- case V4L2_CID_AUDIO_BALANCE:
- state->audio_main_balance = ctrl->value;
- set_audio_regs(sd, state);
- break;
-
- case V4L2_CID_AUDIO_TREBLE:
- state->audio_main_treble = ctrl->value;
- set_audio_regs(sd, state);
- break;
-
- case V4L2_CID_AUDIO_BASS:
- state->audio_main_bass = ctrl->value;
- set_audio_regs(sd, state);
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int saa717x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int saa717x_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
struct saa717x_state *state = to_state(sd);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- ctrl->value = state->bright;
- break;
+ saa717x_write(sd, 0x10a, ctrl->val);
+ return 0;
case V4L2_CID_CONTRAST:
- ctrl->value = state->contrast;
- break;
+ saa717x_write(sd, 0x10b, ctrl->val);
+ return 0;
case V4L2_CID_SATURATION:
- ctrl->value = state->sat;
- break;
+ saa717x_write(sd, 0x10c, ctrl->val);
+ return 0;
case V4L2_CID_HUE:
- ctrl->value = state->hue;
- break;
+ saa717x_write(sd, 0x10d, ctrl->val);
+ return 0;
case V4L2_CID_AUDIO_MUTE:
- ctrl->value = state->audio_main_mute;
+ state->audio_main_mute = ctrl->val;
break;
case V4L2_CID_AUDIO_VOLUME:
- ctrl->value = state->audio_main_volume;
+ state->audio_main_volume = ctrl->val;
break;
case V4L2_CID_AUDIO_BALANCE:
- ctrl->value = state->audio_main_balance;
+ state->audio_main_balance = ctrl->val;
break;
case V4L2_CID_AUDIO_TREBLE:
- ctrl->value = state->audio_main_treble;
+ state->audio_main_treble = ctrl->val;
break;
case V4L2_CID_AUDIO_BASS:
- ctrl->value = state->audio_main_bass;
+ state->audio_main_bass = ctrl->val;
break;
default:
- return -EINVAL;
+ return 0;
}
-
+ set_audio_regs(sd, state);
return 0;
}
-static struct v4l2_queryctrl saa717x_qctrl[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128,
- .flags = 0,
- }, {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 64,
- .flags = 0,
- }, {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 64,
- .flags = 0,
- }, {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = -128,
- .maximum = 127,
- .step = 1,
- .default_value = 0,
- .flags = 0,
- }, {
- .id = V4L2_CID_AUDIO_VOLUME,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Volume",
- .minimum = 0,
- .maximum = 65535,
- .step = 65535 / 100,
- .default_value = 58880,
- .flags = 0,
- }, {
- .id = V4L2_CID_AUDIO_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Balance",
- .minimum = 0,
- .maximum = 65535,
- .step = 65535 / 100,
- .default_value = 32768,
- .flags = 0,
- }, {
- .id = V4L2_CID_AUDIO_MUTE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- .flags = 0,
- }, {
- .id = V4L2_CID_AUDIO_BASS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Bass",
- .minimum = 0,
- .maximum = 65535,
- .step = 65535 / 100,
- .default_value = 32768,
- }, {
- .id = V4L2_CID_AUDIO_TREBLE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Treble",
- .minimum = 0,
- .maximum = 65535,
- .step = 65535 / 100,
- .default_value = 32768,
- },
-};
-
static int saa717x_s_video_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
@@ -1158,18 +975,6 @@ static int saa717x_s_video_routing(struct v4l2_subdev *sd,
return 0;
}
-static int saa717x_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(saa717x_qctrl); i++)
- if (qc->id && qc->id == saa717x_qctrl[i].id) {
- memcpy(qc, &saa717x_qctrl[i], sizeof(*qc));
- return 0;
- }
- return -EINVAL;
-}
-
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int saa717x_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
@@ -1386,17 +1191,34 @@ static int saa717x_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
return 0;
}
+static int saa717x_log_status(struct v4l2_subdev *sd)
+{
+ struct saa717x_state *state = to_state(sd);
+
+ v4l2_ctrl_handler_log_status(&state->hdl, sd->name);
+ return 0;
+}
+
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops saa717x_ctrl_ops = {
+ .s_ctrl = saa717x_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops saa717x_core_ops = {
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = saa717x_g_register,
.s_register = saa717x_s_register,
#endif
- .queryctrl = saa717x_queryctrl,
- .g_ctrl = saa717x_g_ctrl,
- .s_ctrl = saa717x_s_ctrl,
.s_std = saa717x_s_std,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
+ .log_status = saa717x_log_status,
};
static const struct v4l2_subdev_tuner_ops saa717x_tuner_ops = {
@@ -1432,6 +1254,7 @@ static int saa717x_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct saa717x_state *decoder;
+ struct v4l2_ctrl_handler *hdl;
struct v4l2_subdev *sd;
u8 id = 0;
char *p = "";
@@ -1467,16 +1290,41 @@ static int saa717x_probe(struct i2c_client *client,
p = "saa7171";
v4l2_info(sd, "%s found @ 0x%x (%s)\n", p,
client->addr << 1, client->adapter->name);
+
+ hdl = &decoder->hdl;
+ v4l2_ctrl_handler_init(hdl, 9);
+ /* add in ascending ID order */
+ v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 68);
+ v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 64);
+ v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, 0, 65535, 65535 / 100, 42000);
+ v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops,
+ V4L2_CID_AUDIO_BALANCE, 0, 65535, 65535 / 100, 32768);
+ v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops,
+ V4L2_CID_AUDIO_BASS, -16, 15, 1, 0);
+ v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops,
+ V4L2_CID_AUDIO_TREBLE, -16, 15, 1, 0);
+ v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ sd->ctrl_handler = hdl;
+ if (hdl->error) {
+ int err = hdl->error;
+
+ v4l2_ctrl_handler_free(hdl);
+ kfree(decoder);
+ return err;
+ }
+
decoder->std = V4L2_STD_NTSC;
decoder->input = -1;
decoder->enable = 1;
- /* tune these parameters */
- decoder->bright = 0x80;
- decoder->contrast = 0x44;
- decoder->sat = 0x40;
- decoder->hue = 0x00;
-
/* FIXME!! */
decoder->playback = 0; /* initially capture mode used */
decoder->audio = 1; /* DECODER_AUDIO_48_KHZ */
@@ -1487,23 +1335,13 @@ static int saa717x_probe(struct i2c_client *client,
/* set volume, bass and treble */
decoder->audio_main_vol_l = 6;
decoder->audio_main_vol_r = 6;
- decoder->audio_main_bass = 0;
- decoder->audio_main_treble = 0;
- decoder->audio_main_mute = 0;
- decoder->audio_main_balance = 32768;
- /* normalize (24 to -40 (not -84) -> 65535 to 0) */
- decoder->audio_main_volume =
- (decoder->audio_main_vol_r + 41) * 65535 / (24 - (-40));
v4l2_dbg(1, debug, sd, "writing init values\n");
/* FIXME!! */
saa717x_write_regs(sd, reg_init_initialize);
- set_video_output_level_regs(sd, decoder);
- /* set bass,treble to 0db 20041101 K.Ohta */
- decoder->audio_main_bass = 0;
- decoder->audio_main_treble = 0;
- set_audio_regs(sd, decoder);
+
+ v4l2_ctrl_handler_setup(hdl);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(2*HZ);
@@ -1515,6 +1353,7 @@ static int saa717x_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
kfree(to_state(sd));
return 0;
}
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index f2032939fd4..a499cacec1f 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -779,9 +779,12 @@ static int soc_camera_s_crop(struct file *file, void *fh,
ret = ici->ops->get_crop(icd, &current_crop);
/* Prohibit window size change with initialised buffers */
- if (icf->vb_vidq.bufs[0] && !ret &&
- (a->c.width != current_crop.c.width ||
- a->c.height != current_crop.c.height)) {
+ if (ret < 0) {
+ dev_err(&icd->dev,
+ "S_CROP denied: getting current crop failed\n");
+ } else if (icf->vb_vidq.bufs[0] &&
+ (a->c.width != current_crop.c.width ||
+ a->c.height != current_crop.c.height)) {
dev_err(&icd->dev,
"S_CROP denied: queue initialised and sizes differ\n");
ret = -EBUSY;
diff --git a/drivers/media/video/tlg2300/Kconfig b/drivers/media/video/tlg2300/Kconfig
index 2c29ec659b4..1686ebfa695 100644
--- a/drivers/media/video/tlg2300/Kconfig
+++ b/drivers/media/video/tlg2300/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_TLG2300
depends on VIDEO_DEV && I2C && INPUT && SND && DVB_CORE
select VIDEO_TUNER
select VIDEO_TVEEPROM
- select VIDEO_IR
+ depends on VIDEO_IR
select VIDEOBUF_VMALLOC
select SND_PCM
select VIDEOBUF_DVB
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index 8085ac39244..48f5c76ab52 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -179,7 +179,7 @@ static const struct i2c_reg_value tvp7002_init_default[] = {
/* Register parameters for 480P */
static const struct i2c_reg_value tvp7002_parms_480P[] = {
{ TVP7002_HPLL_FDBK_DIV_MSBS, 0x35, TVP7002_WRITE },
- { TVP7002_HPLL_FDBK_DIV_LSBS, 0x0a, TVP7002_WRITE },
+ { TVP7002_HPLL_FDBK_DIV_LSBS, 0xa0, TVP7002_WRITE },
{ TVP7002_HPLL_CRTL, 0x02, TVP7002_WRITE },
{ TVP7002_HPLL_PHASE_SEL, 0x14, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_LSBS, 0x91, TVP7002_WRITE },
@@ -223,7 +223,7 @@ static const struct i2c_reg_value tvp7002_parms_576P[] = {
/* Register parameters for 1080I60 */
static const struct i2c_reg_value tvp7002_parms_1080I60[] = {
{ TVP7002_HPLL_FDBK_DIV_MSBS, 0x89, TVP7002_WRITE },
- { TVP7002_HPLL_FDBK_DIV_LSBS, 0x08, TVP7002_WRITE },
+ { TVP7002_HPLL_FDBK_DIV_LSBS, 0x80, TVP7002_WRITE },
{ TVP7002_HPLL_CRTL, 0x98, TVP7002_WRITE },
{ TVP7002_HPLL_PHASE_SEL, 0x14, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_LSBS, 0x06, TVP7002_WRITE },
@@ -245,7 +245,7 @@ static const struct i2c_reg_value tvp7002_parms_1080I60[] = {
/* Register parameters for 1080P60 */
static const struct i2c_reg_value tvp7002_parms_1080P60[] = {
{ TVP7002_HPLL_FDBK_DIV_MSBS, 0x89, TVP7002_WRITE },
- { TVP7002_HPLL_FDBK_DIV_LSBS, 0x08, TVP7002_WRITE },
+ { TVP7002_HPLL_FDBK_DIV_LSBS, 0x80, TVP7002_WRITE },
{ TVP7002_HPLL_CRTL, 0xE0, TVP7002_WRITE },
{ TVP7002_HPLL_PHASE_SEL, 0x14, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_LSBS, 0x06, TVP7002_WRITE },
@@ -289,7 +289,7 @@ static const struct i2c_reg_value tvp7002_parms_1080I50[] = {
/* Register parameters for 720P60 */
static const struct i2c_reg_value tvp7002_parms_720P60[] = {
{ TVP7002_HPLL_FDBK_DIV_MSBS, 0x67, TVP7002_WRITE },
- { TVP7002_HPLL_FDBK_DIV_LSBS, 0x02, TVP7002_WRITE },
+ { TVP7002_HPLL_FDBK_DIV_LSBS, 0x20, TVP7002_WRITE },
{ TVP7002_HPLL_CRTL, 0xa0, TVP7002_WRITE },
{ TVP7002_HPLL_PHASE_SEL, 0x16, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_LSBS, 0x47, TVP7002_WRITE },
@@ -311,7 +311,7 @@ static const struct i2c_reg_value tvp7002_parms_720P60[] = {
/* Register parameters for 720P50 */
static const struct i2c_reg_value tvp7002_parms_720P50[] = {
{ TVP7002_HPLL_FDBK_DIV_MSBS, 0x7b, TVP7002_WRITE },
- { TVP7002_HPLL_FDBK_DIV_LSBS, 0x0c, TVP7002_WRITE },
+ { TVP7002_HPLL_FDBK_DIV_LSBS, 0xc0, TVP7002_WRITE },
{ TVP7002_HPLL_CRTL, 0x98, TVP7002_WRITE },
{ TVP7002_HPLL_PHASE_SEL, 0x16, TVP7002_WRITE },
{ TVP7002_AVID_START_PIXEL_LSBS, 0x47, TVP7002_WRITE },
diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
index 5ac37c6c431..f1fcf974496 100644
--- a/drivers/media/video/usbvideo/usbvideo.c
+++ b/drivers/media/video/usbvideo/usbvideo.c
@@ -282,19 +282,15 @@ static void usbvideo_OverlayChar(struct uvd *uvd, struct usbvideo_frame *frame,
};
unsigned short digit;
int ix, iy;
+ int value;
if ((uvd == NULL) || (frame == NULL))
return;
- if (ch >= '0' && ch <= '9')
- ch -= '0';
- else if (ch >= 'A' && ch <= 'F')
- ch = 10 + (ch - 'A');
- else if (ch >= 'a' && ch <= 'f')
- ch = 10 + (ch - 'a');
- else
+ value = hex_to_bin(ch);
+ if (value < 0)
return;
- digit = digits[ch];
+ digit = digits[value];
for (iy=0; iy < 5; iy++) {
for (ix=0; ix < 3; ix++) {
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 7eaf99b22a4..8bdd940f32e 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -2145,6 +2145,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_STREAM_NO_FID },
+ /* Miricle 307K */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x17dc,
+ .idProduct = 0x0202,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_STREAM_NO_FID },
/* Lenovo Thinkpad SL400/SL500 */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 133c78d113a..e9928a41508 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -78,12 +78,14 @@
*
*/
-void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
+void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
+ int drop_corrupted)
{
mutex_init(&queue->mutex);
spin_lock_init(&queue->irqlock);
INIT_LIST_HEAD(&queue->mainqueue);
INIT_LIST_HEAD(&queue->irqqueue);
+ queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
queue->type = type;
}
@@ -435,8 +437,10 @@ int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
uvc_queue_cancel(queue, 0);
INIT_LIST_HEAD(&queue->mainqueue);
- for (i = 0; i < queue->count; ++i)
+ for (i = 0; i < queue->count; ++i) {
+ queue->buffer[i].error = 0;
queue->buffer[i].state = UVC_BUF_STATE_IDLE;
+ }
queue->flags &= ~UVC_QUEUE_STREAMING;
}
@@ -488,8 +492,8 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *nextbuf;
unsigned long flags;
- if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
- buf->buf.length != buf->buf.bytesused) {
+ if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
+ buf->error = 0;
buf->state = UVC_BUF_STATE_QUEUED;
buf->buf.bytesused = 0;
return buf;
@@ -497,6 +501,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
spin_lock_irqsave(&queue->irqlock, flags);
list_del(&buf->queue);
+ buf->error = 0;
buf->state = UVC_BUF_STATE_DONE;
if (!list_empty(&queue->irqqueue))
nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 53f3ef4635e..e27cf0d3b6d 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -555,6 +555,9 @@ static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
if (urb->iso_frame_desc[i].status < 0) {
uvc_trace(UVC_TRACE_FRAME, "USB isochronous frame "
"lost (%d).\n", urb->iso_frame_desc[i].status);
+ /* Mark the buffer as faulty. */
+ if (buf != NULL)
+ buf->error = 1;
continue;
}
@@ -579,8 +582,14 @@ static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
uvc_video_decode_end(stream, buf, mem,
urb->iso_frame_desc[i].actual_length);
- if (buf->state == UVC_BUF_STATE_READY)
+ if (buf->state == UVC_BUF_STATE_READY) {
+ if (buf->buf.length != buf->buf.bytesused &&
+ !(stream->cur_format->flags &
+ UVC_FMT_FLAG_COMPRESSED))
+ buf->error = 1;
+
buf = uvc_queue_next_buffer(&stream->queue, buf);
+ }
}
}
@@ -1104,7 +1113,7 @@ int uvc_video_init(struct uvc_streaming *stream)
atomic_set(&stream->active, 0);
/* Initialize the video buffers queue. */
- uvc_queue_init(&stream->queue, stream->type);
+ uvc_queue_init(&stream->queue, stream->type, !uvc_no_drop_param);
/* Alternate setting 0 should be the default, yet the XBox Live Vision
* Cam (and possibly other devices) crash or otherwise misbehave if
@@ -1197,12 +1206,6 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
return 0;
}
- if ((stream->cur_format->flags & UVC_FMT_FLAG_COMPRESSED) ||
- uvc_no_drop_param)
- stream->queue.flags &= ~UVC_QUEUE_DROP_INCOMPLETE;
- else
- stream->queue.flags |= UVC_QUEUE_DROP_INCOMPLETE;
-
ret = uvc_queue_enable(&stream->queue, 1);
if (ret < 0)
return ret;
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index ac272456fbf..bdacf3beabf 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -379,11 +379,12 @@ struct uvc_buffer {
struct list_head queue;
wait_queue_head_t wait;
enum uvc_buffer_state state;
+ unsigned int error;
};
#define UVC_QUEUE_STREAMING (1 << 0)
#define UVC_QUEUE_DISCONNECTED (1 << 1)
-#define UVC_QUEUE_DROP_INCOMPLETE (1 << 2)
+#define UVC_QUEUE_DROP_CORRUPTED (1 << 2)
struct uvc_video_queue {
enum v4l2_buf_type type;
@@ -562,7 +563,7 @@ extern struct uvc_driver uvc_driver;
/* Video buffers queue management. */
extern void uvc_queue_init(struct uvc_video_queue *queue,
- enum v4l2_buf_type type);
+ enum v4l2_buf_type type, int drop_corrupted);
extern int uvc_alloc_buffers(struct uvc_video_queue *queue,
unsigned int nbuffers, unsigned int buflength);
extern int uvc_free_buffers(struct uvc_video_queue *queue);
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 4e53b0b3339..8ee1179be92 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -62,6 +62,7 @@
#define __OLD_VIDIOC_ /* To allow fixing old calls*/
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-chip-ident.h>
#include <linux/videodev2.h>
@@ -172,487 +173,17 @@ int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
}
EXPORT_SYMBOL(v4l2_ctrl_check);
-/* Returns NULL or a character pointer array containing the menu for
- the given control ID. The pointer array ends with a NULL pointer.
- An empty string signifies a menu entry that is invalid. This allows
- drivers to disable certain options if it is not supported. */
-const char **v4l2_ctrl_get_menu(u32 id)
-{
- static const char *mpeg_audio_sampling_freq[] = {
- "44.1 kHz",
- "48 kHz",
- "32 kHz",
- NULL
- };
- static const char *mpeg_audio_encoding[] = {
- "MPEG-1/2 Layer I",
- "MPEG-1/2 Layer II",
- "MPEG-1/2 Layer III",
- "MPEG-2/4 AAC",
- "AC-3",
- NULL
- };
- static const char *mpeg_audio_l1_bitrate[] = {
- "32 kbps",
- "64 kbps",
- "96 kbps",
- "128 kbps",
- "160 kbps",
- "192 kbps",
- "224 kbps",
- "256 kbps",
- "288 kbps",
- "320 kbps",
- "352 kbps",
- "384 kbps",
- "416 kbps",
- "448 kbps",
- NULL
- };
- static const char *mpeg_audio_l2_bitrate[] = {
- "32 kbps",
- "48 kbps",
- "56 kbps",
- "64 kbps",
- "80 kbps",
- "96 kbps",
- "112 kbps",
- "128 kbps",
- "160 kbps",
- "192 kbps",
- "224 kbps",
- "256 kbps",
- "320 kbps",
- "384 kbps",
- NULL
- };
- static const char *mpeg_audio_l3_bitrate[] = {
- "32 kbps",
- "40 kbps",
- "48 kbps",
- "56 kbps",
- "64 kbps",
- "80 kbps",
- "96 kbps",
- "112 kbps",
- "128 kbps",
- "160 kbps",
- "192 kbps",
- "224 kbps",
- "256 kbps",
- "320 kbps",
- NULL
- };
- static const char *mpeg_audio_ac3_bitrate[] = {
- "32 kbps",
- "40 kbps",
- "48 kbps",
- "56 kbps",
- "64 kbps",
- "80 kbps",
- "96 kbps",
- "112 kbps",
- "128 kbps",
- "160 kbps",
- "192 kbps",
- "224 kbps",
- "256 kbps",
- "320 kbps",
- "384 kbps",
- "448 kbps",
- "512 kbps",
- "576 kbps",
- "640 kbps",
- NULL
- };
- static const char *mpeg_audio_mode[] = {
- "Stereo",
- "Joint Stereo",
- "Dual",
- "Mono",
- NULL
- };
- static const char *mpeg_audio_mode_extension[] = {
- "Bound 4",
- "Bound 8",
- "Bound 12",
- "Bound 16",
- NULL
- };
- static const char *mpeg_audio_emphasis[] = {
- "No Emphasis",
- "50/15 us",
- "CCITT J17",
- NULL
- };
- static const char *mpeg_audio_crc[] = {
- "No CRC",
- "16-bit CRC",
- NULL
- };
- static const char *mpeg_video_encoding[] = {
- "MPEG-1",
- "MPEG-2",
- "MPEG-4 AVC",
- NULL
- };
- static const char *mpeg_video_aspect[] = {
- "1x1",
- "4x3",
- "16x9",
- "2.21x1",
- NULL
- };
- static const char *mpeg_video_bitrate_mode[] = {
- "Variable Bitrate",
- "Constant Bitrate",
- NULL
- };
- static const char *mpeg_stream_type[] = {
- "MPEG-2 Program Stream",
- "MPEG-2 Transport Stream",
- "MPEG-1 System Stream",
- "MPEG-2 DVD-compatible Stream",
- "MPEG-1 VCD-compatible Stream",
- "MPEG-2 SVCD-compatible Stream",
- NULL
- };
- static const char *mpeg_stream_vbi_fmt[] = {
- "No VBI",
- "Private packet, IVTV format",
- NULL
- };
- static const char *camera_power_line_frequency[] = {
- "Disabled",
- "50 Hz",
- "60 Hz",
- NULL
- };
- static const char *camera_exposure_auto[] = {
- "Auto Mode",
- "Manual Mode",
- "Shutter Priority Mode",
- "Aperture Priority Mode",
- NULL
- };
- static const char *colorfx[] = {
- "None",
- "Black & White",
- "Sepia",
- "Negative",
- "Emboss",
- "Sketch",
- "Sky blue",
- "Grass green",
- "Skin whiten",
- "Vivid",
- NULL
- };
- static const char *tune_preemphasis[] = {
- "No preemphasis",
- "50 useconds",
- "75 useconds",
- NULL,
- };
-
- switch (id) {
- case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
- return mpeg_audio_sampling_freq;
- case V4L2_CID_MPEG_AUDIO_ENCODING:
- return mpeg_audio_encoding;
- case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
- return mpeg_audio_l1_bitrate;
- case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
- return mpeg_audio_l2_bitrate;
- case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
- return mpeg_audio_l3_bitrate;
- case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
- return mpeg_audio_ac3_bitrate;
- case V4L2_CID_MPEG_AUDIO_MODE:
- return mpeg_audio_mode;
- case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
- return mpeg_audio_mode_extension;
- case V4L2_CID_MPEG_AUDIO_EMPHASIS:
- return mpeg_audio_emphasis;
- case V4L2_CID_MPEG_AUDIO_CRC:
- return mpeg_audio_crc;
- case V4L2_CID_MPEG_VIDEO_ENCODING:
- return mpeg_video_encoding;
- case V4L2_CID_MPEG_VIDEO_ASPECT:
- return mpeg_video_aspect;
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- return mpeg_video_bitrate_mode;
- case V4L2_CID_MPEG_STREAM_TYPE:
- return mpeg_stream_type;
- case V4L2_CID_MPEG_STREAM_VBI_FMT:
- return mpeg_stream_vbi_fmt;
- case V4L2_CID_POWER_LINE_FREQUENCY:
- return camera_power_line_frequency;
- case V4L2_CID_EXPOSURE_AUTO:
- return camera_exposure_auto;
- case V4L2_CID_COLORFX:
- return colorfx;
- case V4L2_CID_TUNE_PREEMPHASIS:
- return tune_preemphasis;
- default:
- return NULL;
- }
-}
-EXPORT_SYMBOL(v4l2_ctrl_get_menu);
-
-/* Return the control name. */
-const char *v4l2_ctrl_get_name(u32 id)
-{
- switch (id) {
- /* USER controls */
- case V4L2_CID_USER_CLASS: return "User Controls";
- case V4L2_CID_BRIGHTNESS: return "Brightness";
- case V4L2_CID_CONTRAST: return "Contrast";
- case V4L2_CID_SATURATION: return "Saturation";
- case V4L2_CID_HUE: return "Hue";
- case V4L2_CID_AUDIO_VOLUME: return "Volume";
- case V4L2_CID_AUDIO_BALANCE: return "Balance";
- case V4L2_CID_AUDIO_BASS: return "Bass";
- case V4L2_CID_AUDIO_TREBLE: return "Treble";
- case V4L2_CID_AUDIO_MUTE: return "Mute";
- case V4L2_CID_AUDIO_LOUDNESS: return "Loudness";
- case V4L2_CID_BLACK_LEVEL: return "Black Level";
- case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic";
- case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance";
- case V4L2_CID_RED_BALANCE: return "Red Balance";
- case V4L2_CID_BLUE_BALANCE: return "Blue Balance";
- case V4L2_CID_GAMMA: return "Gamma";
- case V4L2_CID_EXPOSURE: return "Exposure";
- case V4L2_CID_AUTOGAIN: return "Gain, Automatic";
- case V4L2_CID_GAIN: return "Gain";
- case V4L2_CID_HFLIP: return "Horizontal Flip";
- case V4L2_CID_VFLIP: return "Vertical Flip";
- case V4L2_CID_HCENTER: return "Horizontal Center";
- case V4L2_CID_VCENTER: return "Vertical Center";
- case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
- case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
- case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
- case V4L2_CID_SHARPNESS: return "Sharpness";
- case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
- case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
- case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
- case V4L2_CID_COLOR_KILLER: return "Color Killer";
- case V4L2_CID_COLORFX: return "Color Effects";
- case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic";
- case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter";
- case V4L2_CID_ROTATE: return "Rotate";
- case V4L2_CID_BG_COLOR: return "Background Color";
-
- /* MPEG controls */
- case V4L2_CID_MPEG_CLASS: return "MPEG Encoder Controls";
- case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency";
- case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding";
- case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate";
- case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate";
- case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate";
- case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate";
- case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate";
- case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode";
- case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension";
- case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis";
- case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC";
- case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute";
- case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding";
- case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect";
- case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames";
- case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size";
- case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure";
- case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown";
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode";
- case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate";
- case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate";
- case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
- case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
- case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
- case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type";
- case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID";
- case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID";
- case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID";
- case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID";
- case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID";
- case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID";
- case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format";
-
- /* CAMERA controls */
- case V4L2_CID_CAMERA_CLASS: return "Camera Controls";
- case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure";
- case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute";
- case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate";
- case V4L2_CID_PAN_RELATIVE: return "Pan, Relative";
- case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative";
- case V4L2_CID_PAN_RESET: return "Pan, Reset";
- case V4L2_CID_TILT_RESET: return "Tilt, Reset";
- case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute";
- case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute";
- case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
- case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
- case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic";
- case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
- case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
- case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
- case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
- case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
- case V4L2_CID_PRIVACY: return "Privacy";
-
- /* FM Radio Modulator control */
- case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls";
- case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation";
- case V4L2_CID_RDS_TX_PI: return "RDS Program ID";
- case V4L2_CID_RDS_TX_PTY: return "RDS Program Type";
- case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name";
- case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text";
- case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
- case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
- case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
- case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Feature Enabled";
- case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
- case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
- case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
- case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time";
- case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
- case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
- case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
- case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-emphasis settings";
- case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
- case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
-
- default:
- return NULL;
- }
-}
-EXPORT_SYMBOL(v4l2_ctrl_get_name);
-
/* Fill in a struct v4l2_queryctrl */
int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def)
{
- const char *name = v4l2_ctrl_get_name(qctrl->id);
+ const char *name;
+
+ v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type,
+ &min, &max, &step, &def, &qctrl->flags);
- qctrl->flags = 0;
if (name == NULL)
return -EINVAL;
- switch (qctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- case V4L2_CID_AUDIO_LOUDNESS:
- case V4L2_CID_AUTO_WHITE_BALANCE:
- case V4L2_CID_AUTOGAIN:
- case V4L2_CID_HFLIP:
- case V4L2_CID_VFLIP:
- case V4L2_CID_HUE_AUTO:
- case V4L2_CID_CHROMA_AGC:
- case V4L2_CID_COLOR_KILLER:
- case V4L2_CID_MPEG_AUDIO_MUTE:
- case V4L2_CID_MPEG_VIDEO_MUTE:
- case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
- case V4L2_CID_MPEG_VIDEO_PULLDOWN:
- case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
- case V4L2_CID_FOCUS_AUTO:
- case V4L2_CID_PRIVACY:
- case V4L2_CID_AUDIO_LIMITER_ENABLED:
- case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
- case V4L2_CID_PILOT_TONE_ENABLED:
- qctrl->type = V4L2_CTRL_TYPE_BOOLEAN;
- min = 0;
- max = step = 1;
- break;
- case V4L2_CID_PAN_RESET:
- case V4L2_CID_TILT_RESET:
- qctrl->type = V4L2_CTRL_TYPE_BUTTON;
- qctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
- min = max = step = def = 0;
- break;
- case V4L2_CID_POWER_LINE_FREQUENCY:
- case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
- case V4L2_CID_MPEG_AUDIO_ENCODING:
- case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
- case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
- case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
- case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
- case V4L2_CID_MPEG_AUDIO_MODE:
- case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
- case V4L2_CID_MPEG_AUDIO_EMPHASIS:
- case V4L2_CID_MPEG_AUDIO_CRC:
- case V4L2_CID_MPEG_VIDEO_ENCODING:
- case V4L2_CID_MPEG_VIDEO_ASPECT:
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- case V4L2_CID_MPEG_STREAM_TYPE:
- case V4L2_CID_MPEG_STREAM_VBI_FMT:
- case V4L2_CID_EXPOSURE_AUTO:
- case V4L2_CID_COLORFX:
- case V4L2_CID_TUNE_PREEMPHASIS:
- qctrl->type = V4L2_CTRL_TYPE_MENU;
- step = 1;
- break;
- case V4L2_CID_RDS_TX_PS_NAME:
- case V4L2_CID_RDS_TX_RADIO_TEXT:
- qctrl->type = V4L2_CTRL_TYPE_STRING;
- break;
- case V4L2_CID_USER_CLASS:
- case V4L2_CID_CAMERA_CLASS:
- case V4L2_CID_MPEG_CLASS:
- case V4L2_CID_FM_TX_CLASS:
- qctrl->type = V4L2_CTRL_TYPE_CTRL_CLASS;
- qctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- min = max = step = def = 0;
- break;
- case V4L2_CID_BG_COLOR:
- qctrl->type = V4L2_CTRL_TYPE_INTEGER;
- step = 1;
- min = 0;
- /* Max is calculated as RGB888 that is 2^24 */
- max = 0xFFFFFF;
- break;
- default:
- qctrl->type = V4L2_CTRL_TYPE_INTEGER;
- break;
- }
- switch (qctrl->id) {
- case V4L2_CID_MPEG_AUDIO_ENCODING:
- case V4L2_CID_MPEG_AUDIO_MODE:
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- case V4L2_CID_MPEG_VIDEO_B_FRAMES:
- case V4L2_CID_MPEG_STREAM_TYPE:
- qctrl->flags |= V4L2_CTRL_FLAG_UPDATE;
- break;
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_BRIGHTNESS:
- case V4L2_CID_CONTRAST:
- case V4L2_CID_SATURATION:
- case V4L2_CID_HUE:
- case V4L2_CID_RED_BALANCE:
- case V4L2_CID_BLUE_BALANCE:
- case V4L2_CID_GAMMA:
- case V4L2_CID_SHARPNESS:
- case V4L2_CID_CHROMA_GAIN:
- case V4L2_CID_RDS_TX_DEVIATION:
- case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
- case V4L2_CID_AUDIO_LIMITER_DEVIATION:
- case V4L2_CID_AUDIO_COMPRESSION_GAIN:
- case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD:
- case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME:
- case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME:
- case V4L2_CID_PILOT_TONE_DEVIATION:
- case V4L2_CID_PILOT_TONE_FREQUENCY:
- case V4L2_CID_TUNE_POWER_LEVEL:
- case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- qctrl->flags |= V4L2_CTRL_FLAG_SLIDER;
- break;
- case V4L2_CID_PAN_RELATIVE:
- case V4L2_CID_TILT_RELATIVE:
- case V4L2_CID_FOCUS_RELATIVE:
- case V4L2_CID_IRIS_RELATIVE:
- case V4L2_CID_ZOOM_RELATIVE:
- qctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
- break;
- }
qctrl->minimum = min;
qctrl->maximum = max;
qctrl->step = step;
@@ -850,7 +381,8 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
/* Create the i2c client */
if (info->addr == 0 && probe_addrs)
- client = i2c_new_probed_device(adapter, info, probe_addrs);
+ client = i2c_new_probed_device(adapter, info, probe_addrs,
+ NULL);
else
client = i2c_new_device(adapter, info);
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index d2f20c2acae..073f01390cd 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -228,11 +228,6 @@ static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (file->f_op->unlocked_ioctl)
ret = file->f_op->unlocked_ioctl(file, cmd, arg);
- else if (file->f_op->ioctl) {
- lock_kernel();
- ret = file->f_op->ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
- unlock_kernel();
- }
return ret;
}
@@ -973,7 +968,7 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
{
long ret = -ENOIOCTLCMD;
- if (!file->f_op->ioctl && !file->f_op->unlocked_ioctl)
+ if (!file->f_op->unlocked_ioctl)
return ret;
switch (cmd) {
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
new file mode 100644
index 00000000000..ea8d32cd425
--- /dev/null
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -0,0 +1,1852 @@
+/*
+ V4L2 controls framework implementation.
+
+ Copyright (C) 2010 Hans Verkuil <hverkuil@xs4all.nl>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+
+/* Internal temporary helper struct, one for each v4l2_ext_control */
+struct ctrl_helper {
+ /* The control corresponding to the v4l2_ext_control ID field. */
+ struct v4l2_ctrl *ctrl;
+ /* Used internally to mark whether this control was already
+ processed. */
+ bool handled;
+};
+
+/* Returns NULL or a character pointer array containing the menu for
+ the given control ID. The pointer array ends with a NULL pointer.
+ An empty string signifies a menu entry that is invalid. This allows
+ drivers to disable certain options if it is not supported. */
+const char **v4l2_ctrl_get_menu(u32 id)
+{
+ static const char *mpeg_audio_sampling_freq[] = {
+ "44.1 kHz",
+ "48 kHz",
+ "32 kHz",
+ NULL
+ };
+ static const char *mpeg_audio_encoding[] = {
+ "MPEG-1/2 Layer I",
+ "MPEG-1/2 Layer II",
+ "MPEG-1/2 Layer III",
+ "MPEG-2/4 AAC",
+ "AC-3",
+ NULL
+ };
+ static const char *mpeg_audio_l1_bitrate[] = {
+ "32 kbps",
+ "64 kbps",
+ "96 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "288 kbps",
+ "320 kbps",
+ "352 kbps",
+ "384 kbps",
+ "416 kbps",
+ "448 kbps",
+ NULL
+ };
+ static const char *mpeg_audio_l2_bitrate[] = {
+ "32 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ "384 kbps",
+ NULL
+ };
+ static const char *mpeg_audio_l3_bitrate[] = {
+ "32 kbps",
+ "40 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ NULL
+ };
+ static const char *mpeg_audio_ac3_bitrate[] = {
+ "32 kbps",
+ "40 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ "384 kbps",
+ "448 kbps",
+ "512 kbps",
+ "576 kbps",
+ "640 kbps",
+ NULL
+ };
+ static const char *mpeg_audio_mode[] = {
+ "Stereo",
+ "Joint Stereo",
+ "Dual",
+ "Mono",
+ NULL
+ };
+ static const char *mpeg_audio_mode_extension[] = {
+ "Bound 4",
+ "Bound 8",
+ "Bound 12",
+ "Bound 16",
+ NULL
+ };
+ static const char *mpeg_audio_emphasis[] = {
+ "No Emphasis",
+ "50/15 us",
+ "CCITT J17",
+ NULL
+ };
+ static const char *mpeg_audio_crc[] = {
+ "No CRC",
+ "16-bit CRC",
+ NULL
+ };
+ static const char *mpeg_video_encoding[] = {
+ "MPEG-1",
+ "MPEG-2",
+ "MPEG-4 AVC",
+ NULL
+ };
+ static const char *mpeg_video_aspect[] = {
+ "1x1",
+ "4x3",
+ "16x9",
+ "2.21x1",
+ NULL
+ };
+ static const char *mpeg_video_bitrate_mode[] = {
+ "Variable Bitrate",
+ "Constant Bitrate",
+ NULL
+ };
+ static const char *mpeg_stream_type[] = {
+ "MPEG-2 Program Stream",
+ "MPEG-2 Transport Stream",
+ "MPEG-1 System Stream",
+ "MPEG-2 DVD-compatible Stream",
+ "MPEG-1 VCD-compatible Stream",
+ "MPEG-2 SVCD-compatible Stream",
+ NULL
+ };
+ static const char *mpeg_stream_vbi_fmt[] = {
+ "No VBI",
+ "Private packet, IVTV format",
+ NULL
+ };
+ static const char *camera_power_line_frequency[] = {
+ "Disabled",
+ "50 Hz",
+ "60 Hz",
+ NULL
+ };
+ static const char *camera_exposure_auto[] = {
+ "Auto Mode",
+ "Manual Mode",
+ "Shutter Priority Mode",
+ "Aperture Priority Mode",
+ NULL
+ };
+ static const char *colorfx[] = {
+ "None",
+ "Black & White",
+ "Sepia",
+ "Negative",
+ "Emboss",
+ "Sketch",
+ "Sky blue",
+ "Grass green",
+ "Skin whiten",
+ "Vivid",
+ NULL
+ };
+ static const char *tune_preemphasis[] = {
+ "No preemphasis",
+ "50 useconds",
+ "75 useconds",
+ NULL,
+ };
+
+ switch (id) {
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
+ return mpeg_audio_sampling_freq;
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ return mpeg_audio_encoding;
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
+ return mpeg_audio_l1_bitrate;
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
+ return mpeg_audio_l2_bitrate;
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
+ return mpeg_audio_l3_bitrate;
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
+ return mpeg_audio_ac3_bitrate;
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ return mpeg_audio_mode;
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
+ return mpeg_audio_mode_extension;
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS:
+ return mpeg_audio_emphasis;
+ case V4L2_CID_MPEG_AUDIO_CRC:
+ return mpeg_audio_crc;
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ return mpeg_video_encoding;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ return mpeg_video_aspect;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ return mpeg_video_bitrate_mode;
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ return mpeg_stream_type;
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ return mpeg_stream_vbi_fmt;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ return camera_power_line_frequency;
+ case V4L2_CID_EXPOSURE_AUTO:
+ return camera_exposure_auto;
+ case V4L2_CID_COLORFX:
+ return colorfx;
+ case V4L2_CID_TUNE_PREEMPHASIS:
+ return tune_preemphasis;
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_menu);
+
+/* Return the control name. */
+const char *v4l2_ctrl_get_name(u32 id)
+{
+ switch (id) {
+ /* USER controls */
+ /* Keep the order of the 'case's the same as in videodev2.h! */
+ case V4L2_CID_USER_CLASS: return "User Controls";
+ case V4L2_CID_BRIGHTNESS: return "Brightness";
+ case V4L2_CID_CONTRAST: return "Contrast";
+ case V4L2_CID_SATURATION: return "Saturation";
+ case V4L2_CID_HUE: return "Hue";
+ case V4L2_CID_AUDIO_VOLUME: return "Volume";
+ case V4L2_CID_AUDIO_BALANCE: return "Balance";
+ case V4L2_CID_AUDIO_BASS: return "Bass";
+ case V4L2_CID_AUDIO_TREBLE: return "Treble";
+ case V4L2_CID_AUDIO_MUTE: return "Mute";
+ case V4L2_CID_AUDIO_LOUDNESS: return "Loudness";
+ case V4L2_CID_BLACK_LEVEL: return "Black Level";
+ case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic";
+ case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance";
+ case V4L2_CID_RED_BALANCE: return "Red Balance";
+ case V4L2_CID_BLUE_BALANCE: return "Blue Balance";
+ case V4L2_CID_GAMMA: return "Gamma";
+ case V4L2_CID_EXPOSURE: return "Exposure";
+ case V4L2_CID_AUTOGAIN: return "Gain, Automatic";
+ case V4L2_CID_GAIN: return "Gain";
+ case V4L2_CID_HFLIP: return "Horizontal Flip";
+ case V4L2_CID_VFLIP: return "Vertical Flip";
+ case V4L2_CID_HCENTER: return "Horizontal Center";
+ case V4L2_CID_VCENTER: return "Vertical Center";
+ case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
+ case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
+ case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
+ case V4L2_CID_SHARPNESS: return "Sharpness";
+ case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
+ case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
+ case V4L2_CID_COLOR_KILLER: return "Color Killer";
+ case V4L2_CID_COLORFX: return "Color Effects";
+ case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic";
+ case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter";
+ case V4L2_CID_ROTATE: return "Rotate";
+ case V4L2_CID_BG_COLOR: return "Background Color";
+ case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
+
+ /* MPEG controls */
+ /* Keep the order of the 'case's the same as in videodev2.h! */
+ case V4L2_CID_MPEG_CLASS: return "MPEG Encoder Controls";
+ case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type";
+ case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID";
+ case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID";
+ case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID";
+ case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format";
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency";
+ case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding";
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate";
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate";
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate";
+ case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode";
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension";
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis";
+ case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC";
+ case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute";
+ case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate";
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding";
+ case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect";
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames";
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size";
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure";
+ case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown";
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode";
+ case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate";
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate";
+ case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
+ case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
+ case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
+
+ /* CAMERA controls */
+ /* Keep the order of the 'case's the same as in videodev2.h! */
+ case V4L2_CID_CAMERA_CLASS: return "Camera Controls";
+ case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure";
+ case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute";
+ case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate";
+ case V4L2_CID_PAN_RELATIVE: return "Pan, Relative";
+ case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative";
+ case V4L2_CID_PAN_RESET: return "Pan, Reset";
+ case V4L2_CID_TILT_RESET: return "Tilt, Reset";
+ case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute";
+ case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute";
+ case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
+ case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
+ case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic";
+ case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
+ case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
+ case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
+ case V4L2_CID_PRIVACY: return "Privacy";
+ case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
+ case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
+
+ /* FM Radio Modulator control */
+ /* Keep the order of the 'case's the same as in videodev2.h! */
+ case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls";
+ case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation";
+ case V4L2_CID_RDS_TX_PI: return "RDS Program ID";
+ case V4L2_CID_RDS_TX_PTY: return "RDS Program Type";
+ case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name";
+ case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text";
+ case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
+ case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
+ case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
+ case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Feature Enabled";
+ case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
+ case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
+ case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
+ case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time";
+ case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
+ case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
+ case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
+ case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-emphasis settings";
+ case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
+ case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
+
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_name);
+
+void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
+ s32 *min, s32 *max, s32 *step, s32 *def, u32 *flags)
+{
+ *name = v4l2_ctrl_get_name(id);
+ *flags = 0;
+
+ switch (id) {
+ case V4L2_CID_AUDIO_MUTE:
+ case V4L2_CID_AUDIO_LOUDNESS:
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ case V4L2_CID_AUTOGAIN:
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ case V4L2_CID_HUE_AUTO:
+ case V4L2_CID_CHROMA_AGC:
+ case V4L2_CID_COLOR_KILLER:
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ case V4L2_CID_MPEG_VIDEO_MUTE:
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
+ case V4L2_CID_MPEG_VIDEO_PULLDOWN:
+ case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
+ case V4L2_CID_FOCUS_AUTO:
+ case V4L2_CID_PRIVACY:
+ case V4L2_CID_AUDIO_LIMITER_ENABLED:
+ case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
+ case V4L2_CID_PILOT_TONE_ENABLED:
+ *type = V4L2_CTRL_TYPE_BOOLEAN;
+ *min = 0;
+ *max = *step = 1;
+ break;
+ case V4L2_CID_PAN_RESET:
+ case V4L2_CID_TILT_RESET:
+ *type = V4L2_CTRL_TYPE_BUTTON;
+ *flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
+ *min = *max = *step = *def = 0;
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS:
+ case V4L2_CID_MPEG_AUDIO_CRC:
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_COLORFX:
+ case V4L2_CID_TUNE_PREEMPHASIS:
+ *type = V4L2_CTRL_TYPE_MENU;
+ break;
+ case V4L2_CID_RDS_TX_PS_NAME:
+ case V4L2_CID_RDS_TX_RADIO_TEXT:
+ *type = V4L2_CTRL_TYPE_STRING;
+ break;
+ case V4L2_CID_USER_CLASS:
+ case V4L2_CID_CAMERA_CLASS:
+ case V4L2_CID_MPEG_CLASS:
+ case V4L2_CID_FM_TX_CLASS:
+ *type = V4L2_CTRL_TYPE_CTRL_CLASS;
+ /* You can neither read not write these */
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
+ *min = *max = *step = *def = 0;
+ break;
+ case V4L2_CID_BG_COLOR:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *step = 1;
+ *min = 0;
+ /* Max is calculated as RGB888 that is 2^24 */
+ *max = 0xFFFFFF;
+ break;
+ default:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
+ }
+ switch (id) {
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ *flags |= V4L2_CTRL_FLAG_UPDATE;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_AUDIO_BALANCE:
+ case V4L2_CID_AUDIO_BASS:
+ case V4L2_CID_AUDIO_TREBLE:
+ case V4L2_CID_BRIGHTNESS:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_HUE:
+ case V4L2_CID_RED_BALANCE:
+ case V4L2_CID_BLUE_BALANCE:
+ case V4L2_CID_GAMMA:
+ case V4L2_CID_SHARPNESS:
+ case V4L2_CID_CHROMA_GAIN:
+ case V4L2_CID_RDS_TX_DEVIATION:
+ case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
+ case V4L2_CID_AUDIO_LIMITER_DEVIATION:
+ case V4L2_CID_AUDIO_COMPRESSION_GAIN:
+ case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD:
+ case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME:
+ case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME:
+ case V4L2_CID_PILOT_TONE_DEVIATION:
+ case V4L2_CID_PILOT_TONE_FREQUENCY:
+ case V4L2_CID_TUNE_POWER_LEVEL:
+ case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
+ *flags |= V4L2_CTRL_FLAG_SLIDER;
+ break;
+ case V4L2_CID_PAN_RELATIVE:
+ case V4L2_CID_TILT_RELATIVE:
+ case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_IRIS_RELATIVE:
+ case V4L2_CID_ZOOM_RELATIVE:
+ *flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
+ break;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_fill);
+
+/* Helper function to determine whether the control type is compatible with
+ VIDIOC_G/S_CTRL. */
+static bool type_is_int(const struct v4l2_ctrl *ctrl)
+{
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER64:
+ case V4L2_CTRL_TYPE_STRING:
+ /* Nope, these need v4l2_ext_control */
+ return false;
+ default:
+ return true;
+ }
+}
+
+/* Helper function: copy the current control value back to the caller */
+static int cur_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl)
+{
+ u32 len;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_STRING:
+ len = strlen(ctrl->cur.string);
+ if (c->size < len + 1) {
+ c->size = len + 1;
+ return -ENOSPC;
+ }
+ return copy_to_user(c->string, ctrl->cur.string,
+ len + 1) ? -EFAULT : 0;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ c->value64 = ctrl->cur.val64;
+ break;
+ default:
+ c->value = ctrl->cur.val;
+ break;
+ }
+ return 0;
+}
+
+/* Helper function: copy the caller-provider value as the new control value */
+static int user_to_new(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl)
+{
+ int ret;
+ u32 size;
+
+ ctrl->has_new = 1;
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER64:
+ ctrl->val64 = c->value64;
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ size = c->size;
+ if (size == 0)
+ return -ERANGE;
+ if (size > ctrl->maximum + 1)
+ size = ctrl->maximum + 1;
+ ret = copy_from_user(ctrl->string, c->string, size);
+ if (!ret) {
+ char last = ctrl->string[size - 1];
+
+ ctrl->string[size - 1] = 0;
+ /* If the string was longer than ctrl->maximum,
+ then return an error. */
+ if (strlen(ctrl->string) == ctrl->maximum && last)
+ return -ERANGE;
+ }
+ return ret ? -EFAULT : 0;
+ default:
+ ctrl->val = c->value;
+ break;
+ }
+ return 0;
+}
+
+/* Helper function: copy the new control value back to the caller */
+static int new_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl)
+{
+ u32 len;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_STRING:
+ len = strlen(ctrl->string);
+ if (c->size < len + 1) {
+ c->size = ctrl->maximum + 1;
+ return -ENOSPC;
+ }
+ return copy_to_user(c->string, ctrl->string,
+ len + 1) ? -EFAULT : 0;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ c->value64 = ctrl->val64;
+ break;
+ default:
+ c->value = ctrl->val;
+ break;
+ }
+ return 0;
+}
+
+/* Copy the new value to the current value. */
+static void new_to_cur(struct v4l2_ctrl *ctrl)
+{
+ if (ctrl == NULL)
+ return;
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_STRING:
+ /* strings are always 0-terminated */
+ strcpy(ctrl->cur.string, ctrl->string);
+ break;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ ctrl->cur.val64 = ctrl->val64;
+ break;
+ default:
+ ctrl->cur.val = ctrl->val;
+ break;
+ }
+}
+
+/* Copy the current value to the new value */
+static void cur_to_new(struct v4l2_ctrl *ctrl)
+{
+ if (ctrl == NULL)
+ return;
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_STRING:
+ /* strings are always 0-terminated */
+ strcpy(ctrl->string, ctrl->cur.string);
+ break;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ ctrl->val64 = ctrl->cur.val64;
+ break;
+ default:
+ ctrl->val = ctrl->cur.val;
+ break;
+ }
+}
+
+/* Return non-zero if one or more of the controls in the cluster has a new
+ value that differs from the current value. */
+static int cluster_changed(struct v4l2_ctrl *master)
+{
+ int diff = 0;
+ int i;
+
+ for (i = 0; !diff && i < master->ncontrols; i++) {
+ struct v4l2_ctrl *ctrl = master->cluster[i];
+
+ if (ctrl == NULL)
+ continue;
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_BUTTON:
+ /* Button controls are always 'different' */
+ return 1;
+ case V4L2_CTRL_TYPE_STRING:
+ /* strings are always 0-terminated */
+ diff = strcmp(ctrl->string, ctrl->cur.string);
+ break;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ diff = ctrl->val64 != ctrl->cur.val64;
+ break;
+ default:
+ diff = ctrl->val != ctrl->cur.val;
+ break;
+ }
+ }
+ return diff;
+}
+
+/* Validate a new control */
+static int validate_new(struct v4l2_ctrl *ctrl)
+{
+ s32 val = ctrl->val;
+ char *s = ctrl->string;
+ u32 offset;
+ size_t len;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ /* Round towards the closest legal value */
+ val += ctrl->step / 2;
+ if (val < ctrl->minimum)
+ val = ctrl->minimum;
+ if (val > ctrl->maximum)
+ val = ctrl->maximum;
+ offset = val - ctrl->minimum;
+ offset = ctrl->step * (offset / ctrl->step);
+ val = ctrl->minimum + offset;
+ ctrl->val = val;
+ return 0;
+
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ ctrl->val = !!ctrl->val;
+ return 0;
+
+ case V4L2_CTRL_TYPE_MENU:
+ if (val < ctrl->minimum || val > ctrl->maximum)
+ return -ERANGE;
+ if (ctrl->qmenu[val][0] == '\0' ||
+ (ctrl->menu_skip_mask & (1 << val)))
+ return -EINVAL;
+ return 0;
+
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ ctrl->val64 = 0;
+ return 0;
+
+ case V4L2_CTRL_TYPE_INTEGER64:
+ return 0;
+
+ case V4L2_CTRL_TYPE_STRING:
+ len = strlen(s);
+ if (len < ctrl->minimum)
+ return -ERANGE;
+ if ((len - ctrl->minimum) % ctrl->step)
+ return -ERANGE;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline u32 node2id(struct list_head *node)
+{
+ return list_entry(node, struct v4l2_ctrl_ref, node)->ctrl->id;
+}
+
+/* Set the handler's error code if it wasn't set earlier already */
+static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err)
+{
+ if (hdl->error == 0)
+ hdl->error = err;
+ return err;
+}
+
+/* Initialize the handler */
+int v4l2_ctrl_handler_init(struct v4l2_ctrl_handler *hdl,
+ unsigned nr_of_controls_hint)
+{
+ mutex_init(&hdl->lock);
+ INIT_LIST_HEAD(&hdl->ctrls);
+ INIT_LIST_HEAD(&hdl->ctrl_refs);
+ hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
+ hdl->buckets = kzalloc(sizeof(hdl->buckets[0]) * hdl->nr_of_buckets,
+ GFP_KERNEL);
+ hdl->error = hdl->buckets ? 0 : -ENOMEM;
+ return hdl->error;
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_init);
+
+/* Free all controls and control refs */
+void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
+{
+ struct v4l2_ctrl_ref *ref, *next_ref;
+ struct v4l2_ctrl *ctrl, *next_ctrl;
+
+ if (hdl == NULL || hdl->buckets == NULL)
+ return;
+
+ mutex_lock(&hdl->lock);
+ /* Free all nodes */
+ list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
+ list_del(&ref->node);
+ kfree(ref);
+ }
+ /* Free all controls owned by the handler */
+ list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) {
+ list_del(&ctrl->node);
+ kfree(ctrl);
+ }
+ kfree(hdl->buckets);
+ hdl->buckets = NULL;
+ hdl->cached = NULL;
+ hdl->error = 0;
+ mutex_unlock(&hdl->lock);
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_free);
+
+/* For backwards compatibility: V4L2_CID_PRIVATE_BASE should no longer
+ be used except in G_CTRL, S_CTRL, QUERYCTRL and QUERYMENU when dealing
+ with applications that do not use the NEXT_CTRL flag.
+
+ We just find the n-th private user control. It's O(N), but that should not
+ be an issue in this particular case. */
+static struct v4l2_ctrl_ref *find_private_ref(
+ struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref;
+
+ id -= V4L2_CID_PRIVATE_BASE;
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ /* Search for private user controls that are compatible with
+ VIDIOC_G/S_CTRL. */
+ if (V4L2_CTRL_ID2CLASS(ref->ctrl->id) == V4L2_CTRL_CLASS_USER &&
+ V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) {
+ if (!type_is_int(ref->ctrl))
+ continue;
+ if (id == 0)
+ return ref;
+ id--;
+ }
+ }
+ return NULL;
+}
+
+/* Find a control with the given ID. */
+static struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref;
+ int bucket;
+
+ id &= V4L2_CTRL_ID_MASK;
+
+ /* Old-style private controls need special handling */
+ if (id >= V4L2_CID_PRIVATE_BASE)
+ return find_private_ref(hdl, id);
+ bucket = id % hdl->nr_of_buckets;
+
+ /* Simple optimization: cache the last control found */
+ if (hdl->cached && hdl->cached->ctrl->id == id)
+ return hdl->cached;
+
+ /* Not in cache, search the hash */
+ ref = hdl->buckets ? hdl->buckets[bucket] : NULL;
+ while (ref && ref->ctrl->id != id)
+ ref = ref->next;
+
+ if (ref)
+ hdl->cached = ref; /* cache it! */
+ return ref;
+}
+
+/* Find a control with the given ID. Take the handler's lock first. */
+static struct v4l2_ctrl_ref *find_ref_lock(
+ struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = NULL;
+
+ if (hdl) {
+ mutex_lock(&hdl->lock);
+ ref = find_ref(hdl, id);
+ mutex_unlock(&hdl->lock);
+ }
+ return ref;
+}
+
+/* Find a control with the given ID. */
+struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
+
+ return ref ? ref->ctrl : NULL;
+}
+EXPORT_SYMBOL(v4l2_ctrl_find);
+
+/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */
+static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl_ref *new_ref;
+ u32 id = ctrl->id;
+ u32 class_ctrl = V4L2_CTRL_ID2CLASS(id) | 1;
+ int bucket = id % hdl->nr_of_buckets; /* which bucket to use */
+
+ /* Automatically add the control class if it is not yet present. */
+ if (id != class_ctrl && find_ref_lock(hdl, class_ctrl) == NULL)
+ if (!v4l2_ctrl_new_std(hdl, NULL, class_ctrl, 0, 0, 0, 0))
+ return hdl->error;
+
+ if (hdl->error)
+ return hdl->error;
+
+ new_ref = kzalloc(sizeof(*new_ref), GFP_KERNEL);
+ if (!new_ref)
+ return handler_set_err(hdl, -ENOMEM);
+ new_ref->ctrl = ctrl;
+ if (ctrl->handler == hdl) {
+ /* By default each control starts in a cluster of its own.
+ new_ref->ctrl is basically a cluster array with one
+ element, so that's perfect to use as the cluster pointer.
+ But only do this for the handler that owns the control. */
+ ctrl->cluster = &new_ref->ctrl;
+ ctrl->ncontrols = 1;
+ }
+
+ INIT_LIST_HEAD(&new_ref->node);
+
+ mutex_lock(&hdl->lock);
+
+ /* Add immediately at the end of the list if the list is empty, or if
+ the last element in the list has a lower ID.
+ This ensures that when elements are added in ascending order the
+ insertion is an O(1) operation. */
+ if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) {
+ list_add_tail(&new_ref->node, &hdl->ctrl_refs);
+ goto insert_in_hash;
+ }
+
+ /* Find insert position in sorted list */
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ if (ref->ctrl->id < id)
+ continue;
+ /* Don't add duplicates */
+ if (ref->ctrl->id == id) {
+ kfree(new_ref);
+ goto unlock;
+ }
+ list_add(&new_ref->node, ref->node.prev);
+ break;
+ }
+
+insert_in_hash:
+ /* Insert the control node in the hash */
+ new_ref->next = hdl->buckets[bucket];
+ hdl->buckets[bucket] = new_ref;
+
+unlock:
+ mutex_unlock(&hdl->lock);
+ return 0;
+}
+
+/* Add a new control */
+static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, const char *name, enum v4l2_ctrl_type type,
+ s32 min, s32 max, u32 step, s32 def,
+ u32 flags, const char **qmenu, void *priv)
+{
+ struct v4l2_ctrl *ctrl;
+ unsigned sz_extra = 0;
+
+ if (hdl->error)
+ return NULL;
+
+ /* Sanity checks */
+ if (id == 0 || name == NULL || id >= V4L2_CID_PRIVATE_BASE ||
+ def < min || def > max || max < min ||
+ (type == V4L2_CTRL_TYPE_INTEGER && step == 0) ||
+ (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
+ (type == V4L2_CTRL_TYPE_STRING && max == 0)) {
+ handler_set_err(hdl, -ERANGE);
+ return NULL;
+ }
+
+ if (type == V4L2_CTRL_TYPE_BUTTON)
+ flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
+ else if (type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ else if (type == V4L2_CTRL_TYPE_STRING)
+ sz_extra += 2 * (max + 1);
+
+ ctrl = kzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
+ if (ctrl == NULL) {
+ handler_set_err(hdl, -ENOMEM);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&ctrl->node);
+ ctrl->handler = hdl;
+ ctrl->ops = ops;
+ ctrl->id = id;
+ ctrl->name = name;
+ ctrl->type = type;
+ ctrl->flags = flags;
+ ctrl->minimum = min;
+ ctrl->maximum = max;
+ ctrl->step = step;
+ ctrl->qmenu = qmenu;
+ ctrl->priv = priv;
+ ctrl->cur.val = ctrl->val = ctrl->default_value = def;
+
+ if (ctrl->type == V4L2_CTRL_TYPE_STRING) {
+ ctrl->cur.string = (char *)&ctrl[1] + sz_extra - (max + 1);
+ ctrl->string = (char *)&ctrl[1] + sz_extra - 2 * (max + 1);
+ if (ctrl->minimum)
+ memset(ctrl->cur.string, ' ', ctrl->minimum);
+ }
+ if (handler_new_ref(hdl, ctrl)) {
+ kfree(ctrl);
+ return NULL;
+ }
+ mutex_lock(&hdl->lock);
+ list_add_tail(&ctrl->node, &hdl->ctrls);
+ mutex_unlock(&hdl->lock);
+ return ctrl;
+}
+
+struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_config *cfg, void *priv)
+{
+ bool is_menu;
+ struct v4l2_ctrl *ctrl;
+ const char *name = cfg->name;
+ const char **qmenu = cfg->qmenu;
+ enum v4l2_ctrl_type type = cfg->type;
+ u32 flags = cfg->flags;
+ s32 min = cfg->min;
+ s32 max = cfg->max;
+ u32 step = cfg->step;
+ s32 def = cfg->def;
+
+ if (name == NULL)
+ v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
+ &def, &flags);
+
+ is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU);
+ if (is_menu)
+ WARN_ON(step);
+ else
+ WARN_ON(cfg->menu_skip_mask);
+ if (is_menu && qmenu == NULL)
+ qmenu = v4l2_ctrl_get_menu(cfg->id);
+
+ ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->id, name,
+ type, min, max,
+ is_menu ? cfg->menu_skip_mask : step,
+ def, flags, qmenu, priv);
+ if (ctrl) {
+ ctrl->is_private = cfg->is_private;
+ ctrl->is_volatile = cfg->is_volatile;
+ }
+ return ctrl;
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_custom);
+
+/* Helper function for standard non-menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, s32 min, s32 max, u32 step, s32 def)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type == V4L2_CTRL_TYPE_MENU) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, id, name, type,
+ min, max, step, def, flags, NULL, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std);
+
+/* Helper function for standard menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, s32 max, s32 mask, s32 def)
+{
+ const char **qmenu = v4l2_ctrl_get_menu(id);
+ const char *name;
+ enum v4l2_ctrl_type type;
+ s32 min;
+ s32 step;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type != V4L2_CTRL_TYPE_MENU) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, id, name, type,
+ 0, max, mask, def, flags, qmenu, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
+
+/* Add a control from another handler to this handler */
+struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl *ctrl)
+{
+ if (hdl == NULL || hdl->error)
+ return NULL;
+ if (ctrl == NULL) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ if (ctrl->handler == hdl)
+ return ctrl;
+ return handler_new_ref(hdl, ctrl) ? NULL : ctrl;
+}
+EXPORT_SYMBOL(v4l2_ctrl_add_ctrl);
+
+/* Add the controls from another handler to our own. */
+int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl_handler *add)
+{
+ struct v4l2_ctrl *ctrl;
+ int ret = 0;
+
+ /* Do nothing if either handler is NULL or if they are the same */
+ if (!hdl || !add || hdl == add)
+ return 0;
+ if (hdl->error)
+ return hdl->error;
+ mutex_lock(&add->lock);
+ list_for_each_entry(ctrl, &add->ctrls, node) {
+ /* Skip handler-private controls. */
+ if (ctrl->is_private)
+ continue;
+ ret = handler_new_ref(hdl, ctrl);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&add->lock);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_add_handler);
+
+/* Cluster controls */
+void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
+{
+ int i;
+
+ /* The first control is the master control and it must not be NULL */
+ BUG_ON(controls[0] == NULL);
+
+ for (i = 0; i < ncontrols; i++) {
+ if (controls[i]) {
+ controls[i]->cluster = controls;
+ controls[i]->ncontrols = ncontrols;
+ }
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_cluster);
+
+/* Activate/deactivate a control. */
+void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
+{
+ if (ctrl == NULL)
+ return;
+
+ if (!active)
+ /* set V4L2_CTRL_FLAG_INACTIVE */
+ set_bit(4, &ctrl->flags);
+ else
+ /* clear V4L2_CTRL_FLAG_INACTIVE */
+ clear_bit(4, &ctrl->flags);
+}
+EXPORT_SYMBOL(v4l2_ctrl_activate);
+
+/* Grab/ungrab a control.
+ Typically used when streaming starts and you want to grab controls,
+ preventing the user from changing them.
+
+ Just call this and the framework will block any attempts to change
+ these controls. */
+void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
+{
+ if (ctrl == NULL)
+ return;
+
+ if (grabbed)
+ /* set V4L2_CTRL_FLAG_GRABBED */
+ set_bit(1, &ctrl->flags);
+ else
+ /* clear V4L2_CTRL_FLAG_GRABBED */
+ clear_bit(1, &ctrl->flags);
+}
+EXPORT_SYMBOL(v4l2_ctrl_grab);
+
+/* Log the control name and value */
+static void log_ctrl(const struct v4l2_ctrl *ctrl,
+ const char *prefix, const char *colon)
+{
+ int fl_inact = ctrl->flags & V4L2_CTRL_FLAG_INACTIVE;
+ int fl_grabbed = ctrl->flags & V4L2_CTRL_FLAG_GRABBED;
+
+ if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
+ return;
+ if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ return;
+
+ printk(KERN_INFO "%s%s%s: ", prefix, colon, ctrl->name);
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ printk(KERN_CONT "%d", ctrl->cur.val);
+ break;
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ printk(KERN_CONT "%s", ctrl->cur.val ? "true" : "false");
+ break;
+ case V4L2_CTRL_TYPE_MENU:
+ printk(KERN_CONT "%s", ctrl->qmenu[ctrl->cur.val]);
+ break;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ printk(KERN_CONT "%lld", ctrl->cur.val64);
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ printk(KERN_CONT "%s", ctrl->cur.string);
+ break;
+ default:
+ printk(KERN_CONT "unknown type %d", ctrl->type);
+ break;
+ }
+ if (fl_inact && fl_grabbed)
+ printk(KERN_CONT " (inactive, grabbed)\n");
+ else if (fl_inact)
+ printk(KERN_CONT " (inactive)\n");
+ else if (fl_grabbed)
+ printk(KERN_CONT " (grabbed)\n");
+ else
+ printk(KERN_CONT "\n");
+}
+
+/* Log all controls owned by the handler */
+void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
+ const char *prefix)
+{
+ struct v4l2_ctrl *ctrl;
+ const char *colon = "";
+ int len;
+
+ if (hdl == NULL)
+ return;
+ if (prefix == NULL)
+ prefix = "";
+ len = strlen(prefix);
+ if (len && prefix[len - 1] != ' ')
+ colon = ": ";
+ mutex_lock(&hdl->lock);
+ list_for_each_entry(ctrl, &hdl->ctrls, node)
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
+ log_ctrl(ctrl, prefix, colon);
+ mutex_unlock(&hdl->lock);
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
+
+/* Call s_ctrl for all controls owned by the handler */
+int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
+{
+ struct v4l2_ctrl *ctrl;
+ int ret = 0;
+
+ if (hdl == NULL)
+ return 0;
+ mutex_lock(&hdl->lock);
+ list_for_each_entry(ctrl, &hdl->ctrls, node)
+ ctrl->done = false;
+
+ list_for_each_entry(ctrl, &hdl->ctrls, node) {
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int i;
+
+ /* Skip if this control was already handled by a cluster. */
+ if (ctrl->done)
+ continue;
+
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+
+ /* Skip button controls and read-only controls. */
+ if (ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
+ (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
+ continue;
+ ret = master->ops->s_ctrl(master);
+ if (ret)
+ break;
+ for (i = 0; i < master->ncontrols; i++)
+ if (master->cluster[i])
+ master->cluster[i]->done = true;
+ }
+ mutex_unlock(&hdl->lock);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
+
+/* Implement VIDIOC_QUERYCTRL */
+int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
+{
+ u32 id = qc->id & V4L2_CTRL_ID_MASK;
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl *ctrl;
+
+ if (hdl == NULL)
+ return -EINVAL;
+
+ mutex_lock(&hdl->lock);
+
+ /* Try to find it */
+ ref = find_ref(hdl, id);
+
+ if ((qc->id & V4L2_CTRL_FLAG_NEXT_CTRL) && !list_empty(&hdl->ctrl_refs)) {
+ /* Find the next control with ID > qc->id */
+
+ /* Did we reach the end of the control list? */
+ if (id >= node2id(hdl->ctrl_refs.prev)) {
+ ref = NULL; /* Yes, so there is no next control */
+ } else if (ref) {
+ /* We found a control with the given ID, so just get
+ the next one in the list. */
+ ref = list_entry(ref->node.next, typeof(*ref), node);
+ } else {
+ /* No control with the given ID exists, so start
+ searching for the next largest ID. We know there
+ is one, otherwise the first 'if' above would have
+ been true. */
+ list_for_each_entry(ref, &hdl->ctrl_refs, node)
+ if (id < ref->ctrl->id)
+ break;
+ }
+ }
+ mutex_unlock(&hdl->lock);
+ if (!ref)
+ return -EINVAL;
+
+ ctrl = ref->ctrl;
+ memset(qc, 0, sizeof(*qc));
+ qc->id = ctrl->id;
+ strlcpy(qc->name, ctrl->name, sizeof(qc->name));
+ qc->minimum = ctrl->minimum;
+ qc->maximum = ctrl->maximum;
+ qc->default_value = ctrl->default_value;
+ if (qc->type == V4L2_CTRL_TYPE_MENU)
+ qc->step = 1;
+ else
+ qc->step = ctrl->step;
+ qc->flags = ctrl->flags;
+ qc->type = ctrl->type;
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_queryctrl);
+
+int v4l2_subdev_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
+{
+ return v4l2_queryctrl(sd->ctrl_handler, qc);
+}
+EXPORT_SYMBOL(v4l2_subdev_queryctrl);
+
+/* Implement VIDIOC_QUERYMENU */
+int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
+{
+ struct v4l2_ctrl *ctrl;
+ u32 i = qm->index;
+
+ ctrl = v4l2_ctrl_find(hdl, qm->id);
+ if (!ctrl)
+ return -EINVAL;
+
+ qm->reserved = 0;
+ /* Sanity checks */
+ if (ctrl->qmenu == NULL ||
+ i < ctrl->minimum || i > ctrl->maximum)
+ return -EINVAL;
+ /* Use mask to see if this menu item should be skipped */
+ if (ctrl->menu_skip_mask & (1 << i))
+ return -EINVAL;
+ /* Empty menu items should also be skipped */
+ if (ctrl->qmenu[i] == NULL || ctrl->qmenu[i][0] == '\0')
+ return -EINVAL;
+ strlcpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_querymenu);
+
+int v4l2_subdev_querymenu(struct v4l2_subdev *sd, struct v4l2_querymenu *qm)
+{
+ return v4l2_querymenu(sd->ctrl_handler, qm);
+}
+EXPORT_SYMBOL(v4l2_subdev_querymenu);
+
+
+
+/* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS:
+
+ It is not a fully atomic operation, just best-effort only. After all, if
+ multiple controls have to be set through multiple i2c writes (for example)
+ then some initial writes may succeed while others fail. Thus leaving the
+ system in an inconsistent state. The question is how much effort you are
+ willing to spend on trying to make something atomic that really isn't.
+
+ From the point of view of an application the main requirement is that
+ when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an
+ error should be returned without actually affecting any controls.
+
+ If all the values are correct, then it is acceptable to just give up
+ in case of low-level errors.
+
+ It is important though that the application can tell when only a partial
+ configuration was done. The way we do that is through the error_idx field
+ of struct v4l2_ext_controls: if that is equal to the count field then no
+ controls were affected. Otherwise all controls before that index were
+ successful in performing their 'get' or 'set' operation, the control at
+ the given index failed, and you don't know what happened with the controls
+ after the failed one. Since if they were part of a control cluster they
+ could have been successfully processed (if a cluster member was encountered
+ at index < error_idx), they could have failed (if a cluster member was at
+ error_idx), or they may not have been processed yet (if the first cluster
+ member appeared after error_idx).
+
+ It is all fairly theoretical, though. In practice all you can do is to
+ bail out. If error_idx == count, then it is an application bug. If
+ error_idx < count then it is only an application bug if the error code was
+ EBUSY. That usually means that something started streaming just when you
+ tried to set the controls. In all other cases it is a driver/hardware
+ problem and all you can do is to retry or bail out.
+
+ Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that
+ never modifies controls the error_idx is just set to whatever control
+ has an invalid value.
+ */
+
+/* Prepare for the extended g/s/try functions.
+ Find the controls in the control array and do some basic checks. */
+static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct ctrl_helper *helpers,
+ bool try)
+{
+ u32 i;
+
+ for (i = 0; i < cs->count; i++) {
+ struct v4l2_ext_control *c = &cs->controls[i];
+ struct v4l2_ctrl *ctrl;
+ u32 id = c->id & V4L2_CTRL_ID_MASK;
+
+ if (try)
+ cs->error_idx = i;
+
+ if (cs->ctrl_class && V4L2_CTRL_ID2CLASS(id) != cs->ctrl_class)
+ return -EINVAL;
+
+ /* Old-style private controls are not allowed for
+ extended controls */
+ if (id >= V4L2_CID_PRIVATE_BASE)
+ return -EINVAL;
+ ctrl = v4l2_ctrl_find(hdl, id);
+ if (ctrl == NULL)
+ return -EINVAL;
+ if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED)
+ return -EINVAL;
+
+ helpers[i].ctrl = ctrl;
+ helpers[i].handled = false;
+ }
+ return 0;
+}
+
+typedef int (*cluster_func)(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl);
+
+/* Walk over all controls in v4l2_ext_controls belonging to the same cluster
+ and call the provided function. */
+static int cluster_walk(unsigned from,
+ struct v4l2_ext_controls *cs,
+ struct ctrl_helper *helpers,
+ cluster_func f)
+{
+ struct v4l2_ctrl **cluster = helpers[from].ctrl->cluster;
+ int ret = 0;
+ int i;
+
+ /* Find any controls from the same cluster and call the function */
+ for (i = from; !ret && i < cs->count; i++) {
+ struct v4l2_ctrl *ctrl = helpers[i].ctrl;
+
+ if (!helpers[i].handled && ctrl->cluster == cluster)
+ ret = f(&cs->controls[i], ctrl);
+ }
+ return ret;
+}
+
+static void cluster_done(unsigned from,
+ struct v4l2_ext_controls *cs,
+ struct ctrl_helper *helpers)
+{
+ struct v4l2_ctrl **cluster = helpers[from].ctrl->cluster;
+ int i;
+
+ /* Find any controls from the same cluster and mark them as handled */
+ for (i = from; i < cs->count; i++)
+ if (helpers[i].ctrl->cluster == cluster)
+ helpers[i].handled = true;
+}
+
+/* Handles the corner case where cs->count == 0. It checks whether the
+ specified control class exists. If that class ID is 0, then it checks
+ whether there are any controls at all. */
+static int class_check(struct v4l2_ctrl_handler *hdl, u32 ctrl_class)
+{
+ if (ctrl_class == 0)
+ return list_empty(&hdl->ctrl_refs) ? -EINVAL : 0;
+ return find_ref_lock(hdl, ctrl_class | 1) ? 0 : -EINVAL;
+}
+
+
+
+/* Get extended controls. Allocates the helpers array if needed. */
+int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
+{
+ struct ctrl_helper helper[4];
+ struct ctrl_helper *helpers = helper;
+ int ret;
+ int i;
+
+ cs->error_idx = cs->count;
+ cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
+
+ if (hdl == NULL)
+ return -EINVAL;
+
+ if (cs->count == 0)
+ return class_check(hdl, cs->ctrl_class);
+
+ if (cs->count > ARRAY_SIZE(helper)) {
+ helpers = kmalloc(sizeof(helper[0]) * cs->count, GFP_KERNEL);
+ if (helpers == NULL)
+ return -ENOMEM;
+ }
+
+ ret = prepare_ext_ctrls(hdl, cs, helpers, false);
+
+ for (i = 0; !ret && i < cs->count; i++)
+ if (helpers[i].ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ ret = -EACCES;
+
+ for (i = 0; !ret && i < cs->count; i++) {
+ struct v4l2_ctrl *ctrl = helpers[i].ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+
+ if (helpers[i].handled)
+ continue;
+
+ cs->error_idx = i;
+
+ v4l2_ctrl_lock(master);
+ /* g_volatile_ctrl will update the current control values */
+ if (ctrl->is_volatile && master->ops->g_volatile_ctrl)
+ ret = master->ops->g_volatile_ctrl(master);
+ /* If OK, then copy the current control values to the caller */
+ if (!ret)
+ ret = cluster_walk(i, cs, helpers, cur_to_user);
+ v4l2_ctrl_unlock(master);
+ cluster_done(i, cs, helpers);
+ }
+
+ if (cs->count > ARRAY_SIZE(helper))
+ kfree(helpers);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_g_ext_ctrls);
+
+int v4l2_subdev_g_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
+{
+ return v4l2_g_ext_ctrls(sd->ctrl_handler, cs);
+}
+EXPORT_SYMBOL(v4l2_subdev_g_ext_ctrls);
+
+/* Helper function to get a single control */
+static int get_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
+{
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int ret = 0;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ return -EACCES;
+
+ v4l2_ctrl_lock(master);
+ /* g_volatile_ctrl will update the current control values */
+ if (ctrl->is_volatile && master->ops->g_volatile_ctrl)
+ ret = master->ops->g_volatile_ctrl(master);
+ *val = ctrl->cur.val;
+ v4l2_ctrl_unlock(master);
+ return ret;
+}
+
+int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
+
+ if (ctrl == NULL || !type_is_int(ctrl))
+ return -EINVAL;
+ return get_ctrl(ctrl, &control->value);
+}
+EXPORT_SYMBOL(v4l2_g_ctrl);
+
+int v4l2_subdev_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control)
+{
+ return v4l2_g_ctrl(sd->ctrl_handler, control);
+}
+EXPORT_SYMBOL(v4l2_subdev_g_ctrl);
+
+s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ s32 val = 0;
+
+ /* It's a driver bug if this happens. */
+ WARN_ON(!type_is_int(ctrl));
+ get_ctrl(ctrl, &val);
+ return val;
+}
+EXPORT_SYMBOL(v4l2_ctrl_g_ctrl);
+
+
+/* Core function that calls try/s_ctrl and ensures that the new value is
+ copied to the current value on a set.
+ Must be called with ctrl->handler->lock held. */
+static int try_or_set_control_cluster(struct v4l2_ctrl *master, bool set)
+{
+ bool try = !set;
+ int ret = 0;
+ int i;
+
+ /* Go through the cluster and either validate the new value or
+ (if no new value was set), copy the current value to the new
+ value, ensuring a consistent view for the control ops when
+ called. */
+ for (i = 0; !ret && i < master->ncontrols; i++) {
+ struct v4l2_ctrl *ctrl = master->cluster[i];
+
+ if (ctrl == NULL)
+ continue;
+
+ if (ctrl->has_new) {
+ /* Double check this: it may have changed since the
+ last check in try_or_set_ext_ctrls(). */
+ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
+ return -EBUSY;
+
+ /* Validate if required */
+ if (!set)
+ ret = validate_new(ctrl);
+ continue;
+ }
+ /* No new value was set, so copy the current and force
+ a call to try_ctrl later, since the values for the cluster
+ may now have changed and the end result might be invalid. */
+ try = true;
+ cur_to_new(ctrl);
+ }
+
+ /* For larger clusters you have to call try_ctrl again to
+ verify that the controls are still valid after the
+ 'cur_to_new' above. */
+ if (!ret && master->ops->try_ctrl && try)
+ ret = master->ops->try_ctrl(master);
+
+ /* Don't set if there is no change */
+ if (!ret && set && cluster_changed(master)) {
+ ret = master->ops->s_ctrl(master);
+ /* If OK, then make the new values permanent. */
+ if (!ret)
+ for (i = 0; i < master->ncontrols; i++)
+ new_to_cur(master->cluster[i]);
+ }
+ return ret;
+}
+
+/* Try or set controls. */
+static int try_or_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct ctrl_helper *helpers,
+ bool set)
+{
+ unsigned i, j;
+ int ret = 0;
+
+ cs->error_idx = cs->count;
+ for (i = 0; i < cs->count; i++) {
+ struct v4l2_ctrl *ctrl = helpers[i].ctrl;
+
+ if (!set)
+ cs->error_idx = i;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return -EACCES;
+ /* This test is also done in try_set_control_cluster() which
+ is called in atomic context, so that has the final say,
+ but it makes sense to do an up-front check as well. Once
+ an error occurs in try_set_control_cluster() some other
+ controls may have been set already and we want to do a
+ best-effort to avoid that. */
+ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
+ return -EBUSY;
+ }
+
+ for (i = 0; !ret && i < cs->count; i++) {
+ struct v4l2_ctrl *ctrl = helpers[i].ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+
+ cs->error_idx = i;
+
+ if (helpers[i].handled)
+ continue;
+
+ v4l2_ctrl_lock(ctrl);
+
+ /* Reset the 'has_new' flags of the cluster */
+ for (j = 0; j < master->ncontrols; j++)
+ if (master->cluster[j])
+ master->cluster[j]->has_new = 0;
+
+ /* Copy the new caller-supplied control values.
+ user_to_new() sets 'has_new' to 1. */
+ ret = cluster_walk(i, cs, helpers, user_to_new);
+
+ if (!ret)
+ ret = try_or_set_control_cluster(master, set);
+
+ /* Copy the new values back to userspace. */
+ if (!ret)
+ ret = cluster_walk(i, cs, helpers, new_to_user);
+
+ v4l2_ctrl_unlock(ctrl);
+ cluster_done(i, cs, helpers);
+ }
+ return ret;
+}
+
+/* Try or try-and-set controls */
+static int try_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ bool set)
+{
+ struct ctrl_helper helper[4];
+ struct ctrl_helper *helpers = helper;
+ int ret;
+ int i;
+
+ cs->error_idx = cs->count;
+ cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
+
+ if (hdl == NULL)
+ return -EINVAL;
+
+ if (cs->count == 0)
+ return class_check(hdl, cs->ctrl_class);
+
+ if (cs->count > ARRAY_SIZE(helper)) {
+ helpers = kmalloc(sizeof(helper[0]) * cs->count, GFP_KERNEL);
+ if (!helpers)
+ return -ENOMEM;
+ }
+ ret = prepare_ext_ctrls(hdl, cs, helpers, !set);
+ if (ret)
+ goto free;
+
+ /* First 'try' all controls and abort on error */
+ ret = try_or_set_ext_ctrls(hdl, cs, helpers, false);
+ /* If this is a 'set' operation and the initial 'try' failed,
+ then set error_idx to count to tell the application that no
+ controls changed value yet. */
+ if (set)
+ cs->error_idx = cs->count;
+ if (!ret && set) {
+ /* Reset 'handled' state */
+ for (i = 0; i < cs->count; i++)
+ helpers[i].handled = false;
+ ret = try_or_set_ext_ctrls(hdl, cs, helpers, true);
+ }
+
+free:
+ if (cs->count > ARRAY_SIZE(helper))
+ kfree(helpers);
+ return ret;
+}
+
+int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(hdl, cs, false);
+}
+EXPORT_SYMBOL(v4l2_try_ext_ctrls);
+
+int v4l2_s_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(hdl, cs, true);
+}
+EXPORT_SYMBOL(v4l2_s_ext_ctrls);
+
+int v4l2_subdev_try_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(sd->ctrl_handler, cs, false);
+}
+EXPORT_SYMBOL(v4l2_subdev_try_ext_ctrls);
+
+int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(sd->ctrl_handler, cs, true);
+}
+EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls);
+
+/* Helper function for VIDIOC_S_CTRL compatibility */
+static int set_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
+{
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int ret;
+ int i;
+
+ v4l2_ctrl_lock(ctrl);
+
+ /* Reset the 'has_new' flags of the cluster */
+ for (i = 0; i < master->ncontrols; i++)
+ if (master->cluster[i])
+ master->cluster[i]->has_new = 0;
+
+ ctrl->val = *val;
+ ctrl->has_new = 1;
+ ret = try_or_set_control_cluster(master, false);
+ if (!ret)
+ ret = try_or_set_control_cluster(master, true);
+ *val = ctrl->cur.val;
+ v4l2_ctrl_unlock(ctrl);
+ return ret;
+}
+
+int v4l2_s_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
+
+ if (ctrl == NULL || !type_is_int(ctrl))
+ return -EINVAL;
+
+ return set_ctrl(ctrl, &control->value);
+}
+EXPORT_SYMBOL(v4l2_s_ctrl);
+
+int v4l2_subdev_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control)
+{
+ return v4l2_s_ctrl(sd->ctrl_handler, control);
+}
+EXPORT_SYMBOL(v4l2_subdev_s_ctrl);
+
+int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
+{
+ /* It's a driver bug if this happens. */
+ WARN_ON(!type_is_int(ctrl));
+ return set_ctrl(ctrl, &val);
+}
+EXPORT_SYMBOL(v4l2_ctrl_s_ctrl);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 9e89bf61779..cb77197d480 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -25,6 +25,7 @@
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -215,28 +216,24 @@ static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
return vdev->fops->poll(filp, poll);
}
-static int v4l2_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct video_device *vdev = video_devdata(filp);
+ int ret;
- if (!vdev->fops->ioctl)
- return -ENOTTY;
/* Allow ioctl to continue even if the device was unregistered.
Things like dequeueing buffers might still be useful. */
- return vdev->fops->ioctl(filp, cmd, arg);
-}
-
-static long v4l2_unlocked_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct video_device *vdev = video_devdata(filp);
+ if (vdev->fops->unlocked_ioctl) {
+ ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
+ } else if (vdev->fops->ioctl) {
+ /* TODO: convert all drivers to unlocked_ioctl */
+ lock_kernel();
+ ret = vdev->fops->ioctl(filp, cmd, arg);
+ unlock_kernel();
+ } else
+ ret = -ENOTTY;
- if (!vdev->fops->unlocked_ioctl)
- return -ENOTTY;
- /* Allow ioctl to continue even if the device was unregistered.
- Things like dequeueing buffers might still be useful. */
- return vdev->fops->unlocked_ioctl(filp, cmd, arg);
+ return ret;
}
#ifdef CONFIG_MMU
@@ -307,22 +304,6 @@ static int v4l2_release(struct inode *inode, struct file *filp)
return ret;
}
-static const struct file_operations v4l2_unlocked_fops = {
- .owner = THIS_MODULE,
- .read = v4l2_read,
- .write = v4l2_write,
- .open = v4l2_open,
- .get_unmapped_area = v4l2_get_unmapped_area,
- .mmap = v4l2_mmap,
- .unlocked_ioctl = v4l2_unlocked_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = v4l2_compat_ioctl32,
-#endif
- .release = v4l2_release,
- .poll = v4l2_poll,
- .llseek = no_llseek,
-};
-
static const struct file_operations v4l2_fops = {
.owner = THIS_MODULE,
.read = v4l2_read,
@@ -330,7 +311,7 @@ static const struct file_operations v4l2_fops = {
.open = v4l2_open,
.get_unmapped_area = v4l2_get_unmapped_area,
.mmap = v4l2_mmap,
- .ioctl = v4l2_ioctl,
+ .unlocked_ioctl = v4l2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = v4l2_compat_ioctl32,
#endif
@@ -447,8 +428,12 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
vdev->vfl_type = type;
vdev->cdev = NULL;
- if (vdev->v4l2_dev && vdev->v4l2_dev->dev)
- vdev->parent = vdev->v4l2_dev->dev;
+ if (vdev->v4l2_dev) {
+ if (vdev->v4l2_dev->dev)
+ vdev->parent = vdev->v4l2_dev->dev;
+ if (vdev->ctrl_handler == NULL)
+ vdev->ctrl_handler = vdev->v4l2_dev->ctrl_handler;
+ }
/* Part 2: find a free minor, device node number and device index. */
#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
@@ -521,10 +506,7 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
ret = -ENOMEM;
goto cleanup;
}
- if (vdev->fops->unlocked_ioctl)
- vdev->cdev->ops = &v4l2_unlocked_fops;
- else
- vdev->cdev->ops = &v4l2_fops;
+ vdev->cdev->ops = &v4l2_fops;
vdev->cdev->owner = vdev->fops->owner;
ret = cdev_add(vdev->cdev, MKDEV(VIDEO_MAJOR, vdev->minor), 1);
if (ret < 0) {
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 5a7dc4afe92..0b08f96b74a 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -26,6 +26,7 @@
#endif
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
{
@@ -115,6 +116,8 @@ EXPORT_SYMBOL_GPL(v4l2_device_unregister);
int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
struct v4l2_subdev *sd)
{
+ int err;
+
/* Check for valid input */
if (v4l2_dev == NULL || sd == NULL || !sd->name[0])
return -EINVAL;
@@ -122,6 +125,10 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
WARN_ON(sd->v4l2_dev != NULL);
if (!try_module_get(sd->owner))
return -ENODEV;
+ /* This just returns 0 if either of the two args is NULL */
+ err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler);
+ if (err)
+ return err;
sd->v4l2_dev = v4l2_dev;
spin_lock(&v4l2_dev->lock);
list_add_tail(&sd->list, &v4l2_dev->subdevs);
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 0eeceae5032..dd9283fcb56 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -26,6 +26,7 @@
#endif
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-chip-ident.h>
@@ -1259,9 +1260,12 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_queryctrl *p = arg;
- if (!ops->vidioc_queryctrl)
+ if (vfd->ctrl_handler)
+ ret = v4l2_queryctrl(vfd->ctrl_handler, p);
+ else if (ops->vidioc_queryctrl)
+ ret = ops->vidioc_queryctrl(file, fh, p);
+ else
break;
- ret = ops->vidioc_queryctrl(file, fh, p);
if (!ret)
dbgarg(cmd, "id=0x%x, type=%d, name=%s, min/max=%d/%d, "
"step=%d, default=%d, flags=0x%08x\n",
@@ -1276,7 +1280,9 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_control *p = arg;
- if (ops->vidioc_g_ctrl)
+ if (vfd->ctrl_handler)
+ ret = v4l2_g_ctrl(vfd->ctrl_handler, p);
+ else if (ops->vidioc_g_ctrl)
ret = ops->vidioc_g_ctrl(file, fh, p);
else if (ops->vidioc_g_ext_ctrls) {
struct v4l2_ext_controls ctrls;
@@ -1306,11 +1312,16 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
- if (!ops->vidioc_s_ctrl && !ops->vidioc_s_ext_ctrls)
+ if (!vfd->ctrl_handler &&
+ !ops->vidioc_s_ctrl && !ops->vidioc_s_ext_ctrls)
break;
dbgarg(cmd, "id=0x%x, value=%d\n", p->id, p->value);
+ if (vfd->ctrl_handler) {
+ ret = v4l2_s_ctrl(vfd->ctrl_handler, p);
+ break;
+ }
if (ops->vidioc_s_ctrl) {
ret = ops->vidioc_s_ctrl(file, fh, p);
break;
@@ -1332,10 +1343,12 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_ext_controls *p = arg;
p->error_idx = p->count;
- if (!ops->vidioc_g_ext_ctrls)
- break;
- if (check_ext_ctrls(p, 0))
+ if (vfd->ctrl_handler)
+ ret = v4l2_g_ext_ctrls(vfd->ctrl_handler, p);
+ else if (ops->vidioc_g_ext_ctrls && check_ext_ctrls(p, 0))
ret = ops->vidioc_g_ext_ctrls(file, fh, p);
+ else
+ break;
v4l_print_ext_ctrls(cmd, vfd, p, !ret);
break;
}
@@ -1344,10 +1357,12 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_ext_controls *p = arg;
p->error_idx = p->count;
- if (!ops->vidioc_s_ext_ctrls)
+ if (!vfd->ctrl_handler && !ops->vidioc_s_ext_ctrls)
break;
v4l_print_ext_ctrls(cmd, vfd, p, 1);
- if (check_ext_ctrls(p, 0))
+ if (vfd->ctrl_handler)
+ ret = v4l2_s_ext_ctrls(vfd->ctrl_handler, p);
+ else if (check_ext_ctrls(p, 0))
ret = ops->vidioc_s_ext_ctrls(file, fh, p);
break;
}
@@ -1356,10 +1371,12 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_ext_controls *p = arg;
p->error_idx = p->count;
- if (!ops->vidioc_try_ext_ctrls)
+ if (!vfd->ctrl_handler && !ops->vidioc_try_ext_ctrls)
break;
v4l_print_ext_ctrls(cmd, vfd, p, 1);
- if (check_ext_ctrls(p, 0))
+ if (vfd->ctrl_handler)
+ ret = v4l2_try_ext_ctrls(vfd->ctrl_handler, p);
+ else if (check_ext_ctrls(p, 0))
ret = ops->vidioc_try_ext_ctrls(file, fh, p);
break;
}
@@ -1367,9 +1384,12 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_querymenu *p = arg;
- if (!ops->vidioc_querymenu)
+ if (vfd->ctrl_handler)
+ ret = v4l2_querymenu(vfd->ctrl_handler, p);
+ else if (ops->vidioc_querymenu)
+ ret = ops->vidioc_querymenu(file, fh, p);
+ else
break;
- ret = ops->vidioc_querymenu(file, fh, p);
if (!ret)
dbgarg(cmd, "id=0x%x, index=%d, name=%s\n",
p->id, p->index, p->name);
diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c
index a11b99b4226..d5965543eca 100644
--- a/drivers/media/video/wm8739.c
+++ b/drivers/media/video/wm8739.c
@@ -27,11 +27,11 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-i2c-drv.h>
+#include <media/v4l2-ctrls.h>
MODULE_DESCRIPTION("wm8739 driver");
MODULE_AUTHOR("T. Adachi, Hans Verkuil");
@@ -54,12 +54,14 @@ enum {
struct wm8739_state {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+ struct {
+ /* audio cluster */
+ struct v4l2_ctrl *volume;
+ struct v4l2_ctrl *mute;
+ struct v4l2_ctrl *balance;
+ };
u32 clock_freq;
- u8 muted;
- u16 volume;
- u16 balance;
- u8 vol_l; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */
- u8 vol_r; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */
};
static inline struct wm8739_state *to_state(struct v4l2_subdev *sd)
@@ -67,6 +69,11 @@ static inline struct wm8739_state *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct wm8739_state, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct wm8739_state, hdl)->sd;
+}
+
/* ------------------------------------------------------------------------ */
static int wm8739_write(struct v4l2_subdev *sd, int reg, u16 val)
@@ -89,58 +96,17 @@ static int wm8739_write(struct v4l2_subdev *sd, int reg, u16 val)
return -1;
}
-/* write regs to set audio volume etc */
-static void wm8739_set_audio(struct v4l2_subdev *sd)
-{
- struct wm8739_state *state = to_state(sd);
- u16 mute = state->muted ? 0x80 : 0;
-
- /* Volume setting: bits 0-4, 0x1f = 12 dB, 0x00 = -34.5 dB
- * Default setting: 0x17 = 0 dB
- */
- wm8739_write(sd, R0, (state->vol_l & 0x1f) | mute);
- wm8739_write(sd, R1, (state->vol_r & 0x1f) | mute);
-}
-
-static int wm8739_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct wm8739_state *state = to_state(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = state->muted;
- break;
-
- case V4L2_CID_AUDIO_VOLUME:
- ctrl->value = state->volume;
- break;
-
- case V4L2_CID_AUDIO_BALANCE:
- ctrl->value = state->balance;
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int wm8739_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int wm8739_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
struct wm8739_state *state = to_state(sd);
unsigned int work_l, work_r;
+ u8 vol_l; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */
+ u8 vol_r; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */
+ u16 mute;
switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- state->muted = ctrl->value;
- break;
-
case V4L2_CID_AUDIO_VOLUME:
- state->volume = ctrl->value;
- break;
-
- case V4L2_CID_AUDIO_BALANCE:
- state->balance = ctrl->value;
break;
default:
@@ -148,52 +114,25 @@ static int wm8739_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
}
/* normalize ( 65535 to 0 -> 31 to 0 (12dB to -34.5dB) ) */
- work_l = (min(65536 - state->balance, 32768) * state->volume) / 32768;
- work_r = (min(state->balance, (u16)32768) * state->volume) / 32768;
+ work_l = (min(65536 - state->balance->val, 32768) * state->volume->val) / 32768;
+ work_r = (min(state->balance->val, 32768) * state->volume->val) / 32768;
- state->vol_l = (long)work_l * 31 / 65535;
- state->vol_r = (long)work_r * 31 / 65535;
+ vol_l = (long)work_l * 31 / 65535;
+ vol_r = (long)work_r * 31 / 65535;
/* set audio volume etc. */
- wm8739_set_audio(sd);
+ mute = state->mute->val ? 0x80 : 0;
+
+ /* Volume setting: bits 0-4, 0x1f = 12 dB, 0x00 = -34.5 dB
+ * Default setting: 0x17 = 0 dB
+ */
+ wm8739_write(sd, R0, (vol_l & 0x1f) | mute);
+ wm8739_write(sd, R1, (vol_r & 0x1f) | mute);
return 0;
}
/* ------------------------------------------------------------------------ */
-static struct v4l2_queryctrl wm8739_qctrl[] = {
- {
- .id = V4L2_CID_AUDIO_VOLUME,
- .name = "Volume",
- .minimum = 0,
- .maximum = 65535,
- .step = 65535/100,
- .default_value = 58880,
- .flags = 0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- }, {
- .id = V4L2_CID_AUDIO_MUTE,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- .flags = 0,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- }, {
- .id = V4L2_CID_AUDIO_BALANCE,
- .name = "Balance",
- .minimum = 0,
- .maximum = 65535,
- .step = 65535/100,
- .default_value = 32768,
- .flags = 0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- }
-};
-
-/* ------------------------------------------------------------------------ */
-
static int wm8739_s_clock_freq(struct v4l2_subdev *sd, u32 audiofreq)
{
struct wm8739_state *state = to_state(sd);
@@ -222,18 +161,6 @@ static int wm8739_s_clock_freq(struct v4l2_subdev *sd, u32 audiofreq)
return 0;
}
-static int wm8739_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(wm8739_qctrl); i++)
- if (qc->id && qc->id == wm8739_qctrl[i].id) {
- memcpy(qc, &wm8739_qctrl[i], sizeof(*qc));
- return 0;
- }
- return -EINVAL;
-}
-
static int wm8739_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -246,21 +173,26 @@ static int wm8739_log_status(struct v4l2_subdev *sd)
struct wm8739_state *state = to_state(sd);
v4l2_info(sd, "Frequency: %u Hz\n", state->clock_freq);
- v4l2_info(sd, "Volume L: %02x%s\n", state->vol_l & 0x1f,
- state->muted ? " (muted)" : "");
- v4l2_info(sd, "Volume R: %02x%s\n", state->vol_r & 0x1f,
- state->muted ? " (muted)" : "");
+ v4l2_ctrl_handler_log_status(&state->hdl, sd->name);
return 0;
}
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops wm8739_ctrl_ops = {
+ .s_ctrl = wm8739_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops wm8739_core_ops = {
.log_status = wm8739_log_status,
.g_chip_ident = wm8739_g_chip_ident,
- .queryctrl = wm8739_queryctrl,
- .g_ctrl = wm8739_g_ctrl,
- .s_ctrl = wm8739_s_ctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_audio_ops wm8739_audio_ops = {
@@ -289,17 +221,28 @@ static int wm8739_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
- state = kmalloc(sizeof(struct wm8739_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct wm8739_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &wm8739_ops);
- state->vol_l = 0x17; /* 0dB */
- state->vol_r = 0x17; /* 0dB */
- state->muted = 0;
- state->balance = 32768;
- /* normalize (12dB(31) to -34.5dB(0) [0dB(23)] -> 65535 to 0) */
- state->volume = ((long)state->vol_l + 1) * 65535 / 31;
+ v4l2_ctrl_handler_init(&state->hdl, 2);
+ state->volume = v4l2_ctrl_new_std(&state->hdl, &wm8739_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, 0, 65535, 65535 / 100, 50736);
+ state->mute = v4l2_ctrl_new_std(&state->hdl, &wm8739_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ state->balance = v4l2_ctrl_new_std(&state->hdl, &wm8739_ctrl_ops,
+ V4L2_CID_AUDIO_BALANCE, 0, 65535, 65535 / 100, 32768);
+ sd->ctrl_handler = &state->hdl;
+ if (state->hdl.error) {
+ int err = state->hdl.error;
+
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
+ return err;
+ }
+ v4l2_ctrl_cluster(3, &state->volume);
+
state->clock_freq = 48000;
/* Initialize wm8739 */
@@ -318,15 +261,17 @@ static int wm8739_probe(struct i2c_client *client,
/* activate */
wm8739_write(sd, R9, 0x001);
/* set volume/mute */
- wm8739_set_audio(sd);
+ v4l2_ctrl_handler_setup(&state->hdl);
return 0;
}
static int wm8739_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct wm8739_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&state->hdl);
kfree(to_state(sd));
return 0;
}
diff --git a/drivers/media/video/wm8775.c b/drivers/media/video/wm8775.c
index 5c2ba599c0c..23bad3fd6dc 100644
--- a/drivers/media/video/wm8775.c
+++ b/drivers/media/video/wm8775.c
@@ -35,6 +35,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("wm8775 driver");
@@ -53,8 +54,9 @@ enum {
struct wm8775_state {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *mute;
u8 input; /* Last selected input (0-0xf) */
- u8 muted;
};
static inline struct wm8775_state *to_state(struct v4l2_subdev *sd)
@@ -62,6 +64,11 @@ static inline struct wm8775_state *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct wm8775_state, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct wm8775_state, hdl)->sd;
+}
+
static int wm8775_write(struct v4l2_subdev *sd, int reg, u16 val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -95,7 +102,7 @@ static int wm8775_s_routing(struct v4l2_subdev *sd,
return -EINVAL;
}
state->input = input;
- if (state->muted)
+ if (!v4l2_ctrl_g_ctrl(state->mute))
return 0;
wm8775_write(sd, R21, 0x0c0);
wm8775_write(sd, R14, 0x1d4);
@@ -104,29 +111,21 @@ static int wm8775_s_routing(struct v4l2_subdev *sd,
return 0;
}
-static int wm8775_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int wm8775_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
struct wm8775_state *state = to_state(sd);
- if (ctrl->id != V4L2_CID_AUDIO_MUTE)
- return -EINVAL;
- ctrl->value = state->muted;
- return 0;
-}
-
-static int wm8775_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct wm8775_state *state = to_state(sd);
-
- if (ctrl->id != V4L2_CID_AUDIO_MUTE)
- return -EINVAL;
- state->muted = ctrl->value;
- wm8775_write(sd, R21, 0x0c0);
- wm8775_write(sd, R14, 0x1d4);
- wm8775_write(sd, R15, 0x1d4);
- if (!state->muted)
- wm8775_write(sd, R21, 0x100 + state->input);
- return 0;
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ wm8775_write(sd, R21, 0x0c0);
+ wm8775_write(sd, R14, 0x1d4);
+ wm8775_write(sd, R15, 0x1d4);
+ if (!ctrl->val)
+ wm8775_write(sd, R21, 0x100 + state->input);
+ return 0;
+ }
+ return -EINVAL;
}
static int wm8775_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
@@ -140,8 +139,8 @@ static int wm8775_log_status(struct v4l2_subdev *sd)
{
struct wm8775_state *state = to_state(sd);
- v4l2_info(sd, "Input: %d%s\n", state->input,
- state->muted ? " (muted)" : "");
+ v4l2_info(sd, "Input: %d\n", state->input);
+ v4l2_ctrl_handler_log_status(&state->hdl, sd->name);
return 0;
}
@@ -162,11 +161,20 @@ static int wm8775_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *fre
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops wm8775_ctrl_ops = {
+ .s_ctrl = wm8775_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops wm8775_core_ops = {
.log_status = wm8775_log_status,
.g_chip_ident = wm8775_g_chip_ident,
- .g_ctrl = wm8775_g_ctrl,
- .s_ctrl = wm8775_s_ctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_tuner_ops wm8775_tuner_ops = {
@@ -205,13 +213,24 @@ static int wm8775_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
- state = kmalloc(sizeof(struct wm8775_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct wm8775_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &wm8775_ops);
state->input = 2;
- state->muted = 0;
+
+ v4l2_ctrl_handler_init(&state->hdl, 1);
+ state->mute = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ sd->ctrl_handler = &state->hdl;
+ if (state->hdl.error) {
+ int err = state->hdl.error;
+
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
+ return err;
+ }
/* Initialize wm8775 */
@@ -248,9 +267,11 @@ static int wm8775_probe(struct i2c_client *client,
static int wm8775_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct wm8775_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
- kfree(to_state(sd));
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
return 0;
}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 8327e248520..d3f1a087ece 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -18,6 +18,7 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <linux/memstick.h>
#define DRIVER_NAME "mspro_block"
@@ -179,6 +180,7 @@ static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode)
struct mspro_block_data *msb = disk->private_data;
int rc = -ENXIO;
+ lock_kernel();
mutex_lock(&mspro_block_disk_lock);
if (msb && msb->card) {
@@ -190,6 +192,7 @@ static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode)
}
mutex_unlock(&mspro_block_disk_lock);
+ unlock_kernel();
return rc;
}
@@ -221,7 +224,11 @@ static int mspro_block_disk_release(struct gendisk *disk)
static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode)
{
- return mspro_block_disk_release(disk);
+ int ret;
+ lock_kernel();
+ ret = mspro_block_disk_release(disk);
+ unlock_kernel();
+ return ret;
}
static int mspro_block_bd_getgeo(struct block_device *bdev,
@@ -805,7 +812,8 @@ static void mspro_block_start(struct memstick_dev *card)
static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
{
- if (!blk_fs_request(req) && !blk_pc_request(req)) {
+ if (req->cmd_type != REQ_TYPE_FS &&
+ req->cmd_type != REQ_TYPE_BLOCK_PC) {
blk_dump_rq_flags(req, "MSPro unsupported request");
return BLKPREP_KILL;
}
@@ -1040,6 +1048,7 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
snprintf(s_attr->name, sizeof(s_attr->name),
"attr_x%02x", attr->entries[cnt].id);
+ sysfs_attr_init(&s_attr->dev_attr.attr);
s_attr->dev_attr.attr.name = s_attr->name;
s_attr->dev_attr.attr.mode = S_IRUGO;
s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id);
@@ -1330,13 +1339,14 @@ static void mspro_block_remove(struct memstick_dev *card)
struct mspro_block_data *msb = memstick_get_drvdata(card);
unsigned long flags;
- del_gendisk(msb->disk);
- dev_dbg(&card->dev, "mspro block remove\n");
spin_lock_irqsave(&msb->q_lock, flags);
msb->eject = 1;
blk_start_queue(msb->queue);
spin_unlock_irqrestore(&msb->q_lock, flags);
+ del_gendisk(msb->disk);
+ dev_dbg(&card->dev, "mspro block remove\n");
+
blk_cleanup_queue(msb->queue);
msb->queue = NULL;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index b88a244a1ed..6837a8ef937 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -50,6 +50,7 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pci.h>
@@ -109,8 +110,7 @@ MODULE_PARM_DESC(mpt_debug_level, " debug level - refer to mptdebug.h \
int mpt_fwfault_debug;
EXPORT_SYMBOL(mpt_fwfault_debug);
-module_param_call(mpt_fwfault_debug, param_set_int, param_get_int,
- &mpt_fwfault_debug, 0600);
+module_param(mpt_fwfault_debug, int, 0600);
MODULE_PARM_DESC(mpt_fwfault_debug, "Enable detection of Firmware fault"
" and halt Firmware on fault - (default=0)");
@@ -200,12 +200,9 @@ static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_valu
static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
#ifdef CONFIG_PROC_FS
-static int procmpt_summary_read(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-static int procmpt_version_read(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-static int procmpt_iocinfo_read(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
+static const struct file_operations mpt_summary_proc_fops;
+static const struct file_operations mpt_version_proc_fops;
+static const struct file_operations mpt_iocinfo_proc_fops;
#endif
static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
@@ -629,6 +626,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
* mpt_register - Register protocol-specific main callback handler.
* @cbfunc: callback function pointer
* @dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value)
+ * @func_name: call function's name
*
* This routine is called by a protocol-specific driver (SCSI host,
* LAN, SCSI target) to register its reply callback routine. Each
@@ -1725,7 +1723,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
u8 pcixcmd;
static int mpt_ids = 0;
#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *dent, *ent;
+ struct proc_dir_entry *dent;
#endif
ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC);
@@ -1980,16 +1978,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
*/
dent = proc_mkdir(ioc->name, mpt_proc_root_dir);
if (dent) {
- ent = create_proc_entry("info", S_IFREG|S_IRUGO, dent);
- if (ent) {
- ent->read_proc = procmpt_iocinfo_read;
- ent->data = ioc;
- }
- ent = create_proc_entry("summary", S_IFREG|S_IRUGO, dent);
- if (ent) {
- ent->read_proc = procmpt_summary_read;
- ent->data = ioc;
- }
+ proc_create_data("info", S_IRUGO, dent, &mpt_iocinfo_proc_fops, ioc);
+ proc_create_data("summary", S_IRUGO, dent, &mpt_summary_proc_fops, ioc);
}
#endif
@@ -6546,20 +6536,12 @@ mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
static int
procmpt_create(void)
{
- struct proc_dir_entry *ent;
-
mpt_proc_root_dir = proc_mkdir(MPT_PROCFS_MPTBASEDIR, NULL);
if (mpt_proc_root_dir == NULL)
return -ENOTDIR;
- ent = create_proc_entry("summary", S_IFREG|S_IRUGO, mpt_proc_root_dir);
- if (ent)
- ent->read_proc = procmpt_summary_read;
-
- ent = create_proc_entry("version", S_IFREG|S_IRUGO, mpt_proc_root_dir);
- if (ent)
- ent->read_proc = procmpt_version_read;
-
+ proc_create("summary", S_IRUGO, mpt_proc_root_dir, &mpt_summary_proc_fops);
+ proc_create("version", S_IRUGO, mpt_proc_root_dir, &mpt_version_proc_fops);
return 0;
}
@@ -6578,71 +6560,47 @@ procmpt_destroy(void)
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- * procmpt_summary_read - Handle read request of a summary file
- * @buf: Pointer to area to write information
- * @start: Pointer to start pointer
- * @offset: Offset to start writing
- * @request: Amount of read data requested
- * @eof: Pointer to EOF integer
- * @data: Pointer
- *
+/*
* Handles read request from /proc/mpt/summary or /proc/mpt/iocN/summary.
- * Returns number of characters written to process performing the read.
*/
-static int
-procmpt_summary_read(char *buf, char **start, off_t offset, int request, int *eof, void *data)
-{
- MPT_ADAPTER *ioc;
- char *out = buf;
- int len;
-
- if (data) {
- int more = 0;
+static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan);
- ioc = data;
- mpt_print_ioc_summary(ioc, out, &more, 0, 1);
+static int mpt_summary_proc_show(struct seq_file *m, void *v)
+{
+ MPT_ADAPTER *ioc = m->private;
- out += more;
+ if (ioc) {
+ seq_mpt_print_ioc_summary(ioc, m, 1);
} else {
list_for_each_entry(ioc, &ioc_list, list) {
- int more = 0;
-
- mpt_print_ioc_summary(ioc, out, &more, 0, 1);
-
- out += more;
- if ((out-buf) >= request)
- break;
+ seq_mpt_print_ioc_summary(ioc, m, 1);
}
}
- len = out - buf;
+ return 0;
+}
- MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len);
+static int mpt_summary_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mpt_summary_proc_show, PDE(inode)->data);
}
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- * procmpt_version_read - Handle read request from /proc/mpt/version.
- * @buf: Pointer to area to write information
- * @start: Pointer to start pointer
- * @offset: Offset to start writing
- * @request: Amount of read data requested
- * @eof: Pointer to EOF integer
- * @data: Pointer
- *
- * Returns number of characters written to process performing the read.
- */
-static int
-procmpt_version_read(char *buf, char **start, off_t offset, int request, int *eof, void *data)
+static const struct file_operations mpt_summary_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = mpt_summary_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mpt_version_proc_show(struct seq_file *m, void *v)
{
u8 cb_idx;
int scsi, fc, sas, lan, ctl, targ, dmp;
char *drvname;
- int len;
- len = sprintf(buf, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON);
- len += sprintf(buf+len, " Fusion MPT base driver\n");
+ seq_printf(m, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON);
+ seq_printf(m, " Fusion MPT base driver\n");
scsi = fc = sas = lan = ctl = targ = dmp = 0;
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
@@ -6670,98 +6628,97 @@ procmpt_version_read(char *buf, char **start, off_t offset, int request, int *eo
}
if (drvname)
- len += sprintf(buf+len, " Fusion MPT %s driver\n", drvname);
+ seq_printf(m, " Fusion MPT %s driver\n", drvname);
}
}
- MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len);
+ return 0;
}
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- * procmpt_iocinfo_read - Handle read request from /proc/mpt/iocN/info.
- * @buf: Pointer to area to write information
- * @start: Pointer to start pointer
- * @offset: Offset to start writing
- * @request: Amount of read data requested
- * @eof: Pointer to EOF integer
- * @data: Pointer
- *
- * Returns number of characters written to process performing the read.
- */
-static int
-procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eof, void *data)
+static int mpt_version_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mpt_version_proc_show, NULL);
+}
+
+static const struct file_operations mpt_version_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = mpt_version_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
{
- MPT_ADAPTER *ioc = data;
- int len;
+ MPT_ADAPTER *ioc = m->private;
char expVer[32];
int sz;
int p;
mpt_get_fw_exp_ver(expVer, ioc);
- len = sprintf(buf, "%s:", ioc->name);
+ seq_printf(m, "%s:", ioc->name);
if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
- len += sprintf(buf+len, " (f/w download boot flag set)");
+ seq_printf(m, " (f/w download boot flag set)");
// if (ioc->facts.IOCExceptions & MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL)
-// len += sprintf(buf+len, " CONFIG_CHECKSUM_FAIL!");
+// seq_printf(m, " CONFIG_CHECKSUM_FAIL!");
- len += sprintf(buf+len, "\n ProductID = 0x%04x (%s)\n",
+ seq_printf(m, "\n ProductID = 0x%04x (%s)\n",
ioc->facts.ProductID,
ioc->prod_name);
- len += sprintf(buf+len, " FWVersion = 0x%08x%s", ioc->facts.FWVersion.Word, expVer);
+ seq_printf(m, " FWVersion = 0x%08x%s", ioc->facts.FWVersion.Word, expVer);
if (ioc->facts.FWImageSize)
- len += sprintf(buf+len, " (fw_size=%d)", ioc->facts.FWImageSize);
- len += sprintf(buf+len, "\n MsgVersion = 0x%04x\n", ioc->facts.MsgVersion);
- len += sprintf(buf+len, " FirstWhoInit = 0x%02x\n", ioc->FirstWhoInit);
- len += sprintf(buf+len, " EventState = 0x%02x\n", ioc->facts.EventState);
+ seq_printf(m, " (fw_size=%d)", ioc->facts.FWImageSize);
+ seq_printf(m, "\n MsgVersion = 0x%04x\n", ioc->facts.MsgVersion);
+ seq_printf(m, " FirstWhoInit = 0x%02x\n", ioc->FirstWhoInit);
+ seq_printf(m, " EventState = 0x%02x\n", ioc->facts.EventState);
- len += sprintf(buf+len, " CurrentHostMfaHighAddr = 0x%08x\n",
+ seq_printf(m, " CurrentHostMfaHighAddr = 0x%08x\n",
ioc->facts.CurrentHostMfaHighAddr);
- len += sprintf(buf+len, " CurrentSenseBufferHighAddr = 0x%08x\n",
+ seq_printf(m, " CurrentSenseBufferHighAddr = 0x%08x\n",
ioc->facts.CurrentSenseBufferHighAddr);
- len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
- len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
+ seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
+ seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
- len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
(void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
/*
* Rounding UP to nearest 4-kB boundary here...
*/
sz = (ioc->req_sz * ioc->req_depth) + 128;
sz = ((sz + 0x1000UL - 1UL) / 0x1000) * 0x1000;
- len += sprintf(buf+len, " {CurReqSz=%d} x {CurReqDepth=%d} = %d bytes ^= 0x%x\n",
+ seq_printf(m, " {CurReqSz=%d} x {CurReqDepth=%d} = %d bytes ^= 0x%x\n",
ioc->req_sz, ioc->req_depth, ioc->req_sz*ioc->req_depth, sz);
- len += sprintf(buf+len, " {MaxReqSz=%d} {MaxReqDepth=%d}\n",
+ seq_printf(m, " {MaxReqSz=%d} {MaxReqDepth=%d}\n",
4*ioc->facts.RequestFrameSize,
ioc->facts.GlobalCredits);
- len += sprintf(buf+len, " Frames @ 0x%p (Dma @ 0x%p)\n",
+ seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
(void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
sz = (ioc->reply_sz * ioc->reply_depth) + 128;
- len += sprintf(buf+len, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
+ seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
- len += sprintf(buf+len, " {MaxRepSz=%d} {MaxRepDepth=%d}\n",
+ seq_printf(m, " {MaxRepSz=%d} {MaxRepDepth=%d}\n",
ioc->facts.CurReplyFrameSize,
ioc->facts.ReplyQueueDepth);
- len += sprintf(buf+len, " MaxDevices = %d\n",
+ seq_printf(m, " MaxDevices = %d\n",
(ioc->facts.MaxDevices==0) ? 255 : ioc->facts.MaxDevices);
- len += sprintf(buf+len, " MaxBuses = %d\n", ioc->facts.MaxBuses);
+ seq_printf(m, " MaxBuses = %d\n", ioc->facts.MaxBuses);
/* per-port info */
for (p=0; p < ioc->facts.NumberOfPorts; p++) {
- len += sprintf(buf+len, " PortNumber = %d (of %d)\n",
+ seq_printf(m, " PortNumber = %d (of %d)\n",
p+1,
ioc->facts.NumberOfPorts);
if (ioc->bus_type == FC) {
if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
- len += sprintf(buf+len, " LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
+ seq_printf(m, " LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
a[5], a[4], a[3], a[2], a[1], a[0]);
}
- len += sprintf(buf+len, " WWN = %08X%08X:%08X%08X\n",
+ seq_printf(m, " WWN = %08X%08X:%08X%08X\n",
ioc->fc_port_page0[p].WWNN.High,
ioc->fc_port_page0[p].WWNN.Low,
ioc->fc_port_page0[p].WWPN.High,
@@ -6769,9 +6726,21 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
}
}
- MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len);
+ return 0;
}
+static int mpt_iocinfo_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mpt_iocinfo_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations mpt_iocinfo_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = mpt_iocinfo_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif /* CONFIG_PROC_FS } */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -6837,6 +6806,39 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
*size = y;
}
+
+static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan)
+{
+ char expVer[32];
+
+ mpt_get_fw_exp_ver(expVer, ioc);
+
+ /*
+ * Shorter summary of attached ioc's...
+ */
+ seq_printf(m, "%s: %s, %s%08xh%s, Ports=%d, MaxQ=%d",
+ ioc->name,
+ ioc->prod_name,
+ MPT_FW_REV_MAGIC_ID_STRING, /* "FwRev=" or somesuch */
+ ioc->facts.FWVersion.Word,
+ expVer,
+ ioc->facts.NumberOfPorts,
+ ioc->req_depth);
+
+ if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
+ u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
+ seq_printf(m, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
+ a[5], a[4], a[3], a[2], a[1], a[0]);
+ }
+
+ seq_printf(m, ", IRQ=%d", ioc->pci_irq);
+
+ if (!ioc->active)
+ seq_printf(m, " (disabled)");
+
+ seq_putc(m, '\n');
+}
+
/**
* mpt_set_taskmgmt_in_progress_flag - set flags associated with task management
* @ioc: Pointer to MPT_ADAPTER structure
@@ -6922,7 +6924,6 @@ EXPORT_SYMBOL(mpt_halt_firmware);
* mpt_SoftResetHandler - Issues a less expensive reset
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Indicates if sleep or schedule must be called.
-
*
* Returns 0 for SUCCESS or -1 if FAILED.
*
@@ -7067,7 +7068,6 @@ mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
* mpt_Soft_Hard_ResetHandler - Try less expensive reset
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Indicates if sleep or schedule must be called.
-
*
* Returns 0 for SUCCESS or -1 if FAILED.
* Try for softreset first, only if it fails go for expensive
@@ -8004,6 +8004,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
* mpt_sas_log_info - Log information returned from SAS IOC.
* @ioc: Pointer to MPT_ADAPTER structure
* @log_info: U32 LogInfo reply word from the IOC
+ * @cb_idx: callback function's handle
*
* Refer to lsi/mpi_log_sas.h.
**/
@@ -8050,7 +8051,7 @@ union loginfo_type {
code_desc = ir_code_str[sas_loginfo.dw.code];
if (sas_loginfo.dw.subcode >=
ARRAY_SIZE(raid_sub_code_str))
- break;
+ break;
if (sas_loginfo.dw.code == 0)
sub_code_desc =
raid_sub_code_str[sas_loginfo.dw.subcode];
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 23ed3dec72a..f71f2294847 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -420,31 +420,6 @@ typedef struct _VirtDevice {
#define MPT_TARGET_FLAGS_LED_ON 0x80
/*
- * /proc/mpt interface
- */
-typedef struct {
- const char *name;
- mode_t mode;
- int pad;
- read_proc_t *read_proc;
- write_proc_t *write_proc;
-} mpt_proc_entry_t;
-
-#define MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len) \
-do { \
- len -= offset; \
- if (len < request) { \
- *eof = 1; \
- if (len <= 0) \
- return 0; \
- } else \
- len = request; \
- *start = buf + offset; \
- return len; \
-} while (0)
-
-
-/*
* IOCTL structure and associated defines
*/
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 06c655c5558..a3970e56ae5 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -389,12 +389,16 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
dev = &c->pdev->dev;
if (i2o_dma_realloc(dev, &c->dlct,
- le32_to_cpu(sb->expected_lct_size)))
+ le32_to_cpu(sb->expected_lct_size))) {
+ mutex_unlock(&c->lct_lock);
return -ENOMEM;
+ }
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
- if (IS_ERR(msg))
+ if (IS_ERR(msg)) {
+ mutex_unlock(&c->lct_lock);
return PTR_ERR(msg);
+ }
msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index fc593fbab69..f0f1e667000 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -53,6 +53,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2o.h>
+#include <linux/smp_lock.h>
#include <linux/mempool.h>
@@ -577,6 +578,7 @@ static int i2o_block_open(struct block_device *bdev, fmode_t mode)
if (!dev->i2o_dev)
return -ENODEV;
+ lock_kernel();
if (dev->power > 0x1f)
i2o_block_device_power(dev, 0x02);
@@ -585,6 +587,7 @@ static int i2o_block_open(struct block_device *bdev, fmode_t mode)
i2o_block_device_lock(dev->i2o_dev, -1);
osm_debug("Ready.\n");
+ unlock_kernel();
return 0;
};
@@ -615,6 +618,7 @@ static int i2o_block_release(struct gendisk *disk, fmode_t mode)
if (!dev->i2o_dev)
return 0;
+ lock_kernel();
i2o_block_device_flush(dev->i2o_dev);
i2o_block_device_unlock(dev->i2o_dev, -1);
@@ -625,6 +629,7 @@ static int i2o_block_release(struct gendisk *disk, fmode_t mode)
operation = 0x24;
i2o_block_device_power(dev, operation);
+ unlock_kernel();
return 0;
}
@@ -652,30 +657,40 @@ static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode,
{
struct gendisk *disk = bdev->bd_disk;
struct i2o_block_device *dev = disk->private_data;
+ int ret = -ENOTTY;
/* Anyone capable of this syscall can do *real bad* things */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ lock_kernel();
switch (cmd) {
case BLKI2OGRSTRAT:
- return put_user(dev->rcache, (int __user *)arg);
+ ret = put_user(dev->rcache, (int __user *)arg);
+ break;
case BLKI2OGWSTRAT:
- return put_user(dev->wcache, (int __user *)arg);
+ ret = put_user(dev->wcache, (int __user *)arg);
+ break;
case BLKI2OSRSTRAT:
+ ret = -EINVAL;
if (arg < 0 || arg > CACHE_SMARTFETCH)
- return -EINVAL;
+ break;
dev->rcache = arg;
+ ret = 0;
break;
case BLKI2OSWSTRAT:
+ ret = -EINVAL;
if (arg != 0
&& (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
- return -EINVAL;
+ break;
dev->wcache = arg;
+ ret = 0;
break;
}
- return -ENOTTY;
+ unlock_kernel();
+
+ return ret;
};
/**
@@ -712,7 +727,7 @@ static int i2o_block_transfer(struct request *req)
{
struct i2o_block_device *dev = req->rq_disk->private_data;
struct i2o_controller *c;
- u32 tid = dev->i2o_dev->lct_data.tid;
+ u32 tid;
struct i2o_message *msg;
u32 *mptr;
struct i2o_block_request *ireq = req->special;
@@ -728,6 +743,7 @@ static int i2o_block_transfer(struct request *req)
goto exit;
}
+ tid = dev->i2o_dev->lct_data.tid;
c = dev->i2o_dev->iop;
msg = i2o_msg_get(c);
@@ -883,7 +899,7 @@ static void i2o_block_request_fn(struct request_queue *q)
if (!req)
break;
- if (blk_fs_request(req)) {
+ if (req->cmd_type == REQ_TYPE_FS) {
struct i2o_block_delayed_request *dreq;
struct i2o_block_request *ireq = req->special;
unsigned int queue_depth;
@@ -930,7 +946,8 @@ static const struct block_device_operations i2o_block_fops = {
.owner = THIS_MODULE,
.open = i2o_block_open,
.release = i2o_block_release,
- .locked_ioctl = i2o_block_ioctl,
+ .ioctl = i2o_block_ioctl,
+ .compat_ioctl = i2o_block_ioctl,
.getgeo = i2o_block_getgeo,
.media_changed = i2o_block_media_changed
};
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index c4b117f5fb7..068ba0785bb 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -111,11 +111,11 @@ static int i2o_cfg_gethrt(unsigned long arg)
len = 8 + ((hrt->entry_len * hrt->num_entries) << 2);
- /* We did a get user...so assuming mem is ok...is this bad? */
- put_user(len, kcmd.reslen);
- if (len > reslen)
+ if (put_user(len, kcmd.reslen))
+ ret = -EFAULT;
+ else if (len > reslen)
ret = -ENOBUFS;
- if (copy_to_user(kcmd.resbuf, (void *)hrt, len))
+ else if (copy_to_user(kcmd.resbuf, (void *)hrt, len))
ret = -EFAULT;
return ret;
@@ -147,8 +147,9 @@ static int i2o_cfg_getlct(unsigned long arg)
lct = (i2o_lct *) c->lct;
len = (unsigned int)lct->table_size << 2;
- put_user(len, kcmd.reslen);
- if (len > reslen)
+ if (put_user(len, kcmd.reslen))
+ ret = -EFAULT;
+ else if (len > reslen)
ret = -ENOBUFS;
else if (copy_to_user(kcmd.resbuf, lct, len))
ret = -EFAULT;
@@ -208,8 +209,9 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
return -EAGAIN;
}
- put_user(len, kcmd.reslen);
- if (len > reslen)
+ if (put_user(len, kcmd.reslen))
+ ret = -EFAULT;
+ else if (len > reslen)
ret = -ENOBUFS;
else if (copy_to_user(kcmd.resbuf, res, len))
ret = -EFAULT;
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 3d45817e6dc..ea6b2197da8 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -528,7 +528,6 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
* Do the incoming paperwork
*/
i2o_dev = SCpnt->device->hostdata;
- c = i2o_dev->iop;
SCpnt->scsi_done = done;
@@ -538,7 +537,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
done(SCpnt);
goto exit;
}
-
+ c = i2o_dev->iop;
tid = i2o_dev->lct_data.tid;
osm_debug("qcmd: Tid = %03x\n", tid);
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 2c65a2c5729..07933f3f7e4 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -74,12 +74,12 @@ static struct mfd_cell backlight_devs[] = {
}
static struct resource led_resources[] = {
- PM8606_LED_RESOURCE(PM8606_LED1_RED, RGB2B),
- PM8606_LED_RESOURCE(PM8606_LED1_GREEN, RGB2C),
- PM8606_LED_RESOURCE(PM8606_LED1_BLUE, RGB2D),
- PM8606_LED_RESOURCE(PM8606_LED2_RED, RGB1B),
- PM8606_LED_RESOURCE(PM8606_LED2_GREEN, RGB1C),
- PM8606_LED_RESOURCE(PM8606_LED2_BLUE, RGB1D),
+ PM8606_LED_RESOURCE(PM8606_LED1_RED, RGB1B),
+ PM8606_LED_RESOURCE(PM8606_LED1_GREEN, RGB1C),
+ PM8606_LED_RESOURCE(PM8606_LED1_BLUE, RGB1D),
+ PM8606_LED_RESOURCE(PM8606_LED2_RED, RGB2B),
+ PM8606_LED_RESOURCE(PM8606_LED2_GREEN, RGB2C),
+ PM8606_LED_RESOURCE(PM8606_LED2_BLUE, RGB2D),
};
#define PM8606_LED_DEVS(_i) \
@@ -428,52 +428,44 @@ static int __devinit device_gpadc_init(struct pm860x_chip *chip,
{
struct i2c_client *i2c = (chip->id == CHIP_PM8607) ? chip->client \
: chip->companion;
- int use_gpadc = 0, data, ret;
+ int data;
+ int ret;
/* initialize GPADC without activating it */
- if (pdata && pdata->touch) {
- /* set GPADC MISC1 register */
- data = 0;
- data |= (pdata->touch->gpadc_prebias << 1)
- & PM8607_GPADC_PREBIAS_MASK;
- data |= (pdata->touch->slot_cycle << 3)
- & PM8607_GPADC_SLOT_CYCLE_MASK;
- data |= (pdata->touch->off_scale << 5)
- & PM8607_GPADC_OFF_SCALE_MASK;
- data |= (pdata->touch->sw_cal << 7)
- & PM8607_GPADC_SW_CAL_MASK;
- if (data) {
- ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
- if (ret < 0)
- goto out;
- }
- /* set tsi prebias time */
- if (pdata->touch->tsi_prebias) {
- data = pdata->touch->tsi_prebias;
- ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
- if (ret < 0)
- goto out;
- }
- /* set prebias & prechg time of pen detect */
- data = 0;
- data |= pdata->touch->pen_prebias & PM8607_PD_PREBIAS_MASK;
- data |= (pdata->touch->pen_prechg << 5)
- & PM8607_PD_PRECHG_MASK;
- if (data) {
- ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
- if (ret < 0)
- goto out;
- }
+ if (!pdata || !pdata->touch)
+ return -EINVAL;
- use_gpadc = 1;
+ /* set GPADC MISC1 register */
+ data = 0;
+ data |= (pdata->touch->gpadc_prebias << 1) & PM8607_GPADC_PREBIAS_MASK;
+ data |= (pdata->touch->slot_cycle << 3) & PM8607_GPADC_SLOT_CYCLE_MASK;
+ data |= (pdata->touch->off_scale << 5) & PM8607_GPADC_OFF_SCALE_MASK;
+ data |= (pdata->touch->sw_cal << 7) & PM8607_GPADC_SW_CAL_MASK;
+ if (data) {
+ ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
+ if (ret < 0)
+ goto out;
}
-
- /* turn on GPADC */
- if (use_gpadc) {
- ret = pm860x_set_bits(i2c, PM8607_GPADC_MISC1,
- PM8607_GPADC_EN, PM8607_GPADC_EN);
+ /* set tsi prebias time */
+ if (pdata->touch->tsi_prebias) {
+ data = pdata->touch->tsi_prebias;
+ ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
+ if (ret < 0)
+ goto out;
}
+ /* set prebias & prechg time of pen detect */
+ data = 0;
+ data |= pdata->touch->pen_prebias & PM8607_PD_PREBIAS_MASK;
+ data |= (pdata->touch->pen_prechg << 5) & PM8607_PD_PRECHG_MASK;
+ if (data) {
+ ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = pm860x_set_bits(i2c, PM8607_GPADC_MISC1,
+ PM8607_GPADC_EN, PM8607_GPADC_EN);
out:
return ret;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 9da0e504bbe..db51ea1c608 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -7,7 +7,16 @@ menuconfig MFD_SUPPORT
depends on HAS_IOMEM
default y
help
- Configure MFD device drivers.
+ Multifunction devices embed several functions (e.g. GPIOs,
+ touchscreens, keyboards, current regulators, power management chips,
+ etc...) in one single integrated circuit. They usually talk to the
+ main CPU through one or more IRQ lines and low speed data busses (SPI,
+ I2C, etc..). They appear as one single device to the main system
+ through the data bus and the MFD framework allows for sub devices
+ (a.k.a. functions) to appear as discrete platform devices.
+ MFDs are typically found on embedded platforms.
+
+ This option alone does not add any kernel code.
if MFD_SUPPORT
@@ -177,6 +186,38 @@ config TWL4030_CODEC
select MFD_CORE
default n
+config TWL6030_PWM
+ tristate "TWL6030 PWM (Pulse Width Modulator) Support"
+ depends on TWL4030_CORE
+ select HAVE_PWM
+ default n
+ help
+ Say yes here if you want support for TWL6030 PWM.
+ This is used to control charging LED brightness.
+
+config MFD_STMPE
+ bool "Support STMicroelectronics STMPE"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ Support for the STMPE family of I/O Expanders from
+ STMicroelectronics.
+
+ Currently supported devices are:
+
+ STMPE811: GPIO, Touchscreen
+ STMPE1601: GPIO, Keypad
+ STMPE2401: GPIO, Keypad
+ STMPE2403: GPIO, Keypad
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device. Currently available sub drivers are:
+
+ GPIO: stmpe-gpio
+ Keypad: stmpe-keypad
+ Touchscreen: stmpe-ts
+
config MFD_TC35892
bool "Support Toshiba TC35892"
depends on I2C=y && GENERIC_HARDIRQS
@@ -252,6 +293,16 @@ config MFD_MAX8925
accessing the device, additional drivers must be enabled in order
to use the functionality of the device.
+config MFD_MAX8998
+ bool "Maxim Semiconductor MAX8998 PMIC Support"
+ depends on I2C=y
+ select MFD_CORE
+ help
+ Say yes here to support for Maxim Semiconductor MAX8998. This is
+ a Power Management IC. This driver provies common support for
+ accessing the device, additional drivers must be enabled in order
+ to use the functionality of the device.
+
config MFD_WM8400
tristate "Support Wolfson Microelectronics WM8400"
select MFD_CORE
@@ -482,6 +533,28 @@ config MFD_JANZ_CMODIO
host many different types of MODULbus daughterboards, including
CAN and GPIO controllers.
+config MFD_JZ4740_ADC
+ tristate "Support for the JZ4740 SoC ADC core"
+ select MFD_CORE
+ depends on MACH_JZ4740
+ help
+ Say yes here if you want support for the ADC unit in the JZ4740 SoC.
+ This driver is necessary for jz4740-battery and jz4740-hwmon driver.
+
+config MFD_TPS6586X
+ tristate "TPS6586x Power Management chips"
+ depends on I2C && GPIOLIB
+ select MFD_CORE
+ help
+ If you say yes here you get support for the TPS6586X series of
+ Power Management chips.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
+ This driver can also be built as a module. If so, the module
+ will be called tps6586x.
+
endif # MFD_SUPPORT
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index fb503e77dc6..feaeeaeeddb 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
+obj-$(CONFIG_MFD_STMPE) += stmpe.o
obj-$(CONFIG_MFD_TC35892) += tc35892.o
obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
@@ -36,6 +37,7 @@ obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o
+obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o
obj-$(CONFIG_MFD_MC13783) += mc13783-core.o
@@ -56,6 +58,7 @@ obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
obj-$(CONFIG_PMIC_DA903X) += da903x.o
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
+obj-$(CONFIG_MFD_MAX8998) += max8998.o
pcf50633-objs := pcf50633-core.o pcf50633-irq.o
obj-$(CONFIG_MFD_PCF50633) += pcf50633.o
@@ -71,3 +74,5 @@ obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o
obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o
+obj-$(CONFIG_MFD_JZ4740_ADC) += jz4740-adc.o
+obj-$(CONFIG_MFD_TPS6586X) += tps6586x.o
diff --git a/drivers/mfd/ab3100-otp.c b/drivers/mfd/ab3100-otp.c
index 63d2b727ddb..8440010eb2b 100644
--- a/drivers/mfd/ab3100-otp.c
+++ b/drivers/mfd/ab3100-otp.c
@@ -199,7 +199,7 @@ static int __init ab3100_otp_probe(struct platform_device *pdev)
err = ab3100_otp_read(otp);
if (err)
- return err;
+ goto err_otp_read;
dev_info(&pdev->dev, "AB3100 OTP readout registered\n");
@@ -208,21 +208,21 @@ static int __init ab3100_otp_probe(struct platform_device *pdev)
err = device_create_file(&pdev->dev,
&ab3100_otp_attrs[i]);
if (err)
- goto out_no_sysfs;
+ goto err_create_file;
}
/* debugfs entries */
err = ab3100_otp_init_debugfs(&pdev->dev, otp);
if (err)
- goto out_no_debugfs;
+ goto err_init_debugfs;
return 0;
-out_no_sysfs:
- for (i = 0; i < ARRAY_SIZE(ab3100_otp_attrs); i++)
- device_remove_file(&pdev->dev,
- &ab3100_otp_attrs[i]);
-out_no_debugfs:
+err_init_debugfs:
+err_create_file:
+ while (--i >= 0)
+ device_remove_file(&pdev->dev, &ab3100_otp_attrs[i]);
+err_otp_read:
kfree(otp);
return err;
}
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index f54ab62e7bc..8a98739e6d9 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -589,16 +589,16 @@ static bool reg_read_allowed(const struct ab3550_reg_ranges *ranges, u8 reg)
}
/*
- * The exported register access functionality.
+ * The register access functionality.
*/
-int ab3550_get_chip_id(struct device *dev)
+static int ab3550_get_chip_id(struct device *dev)
{
struct ab3550 *ab = dev_get_drvdata(dev->parent);
return (int)ab->chip_id;
}
-int ab3550_mask_and_set_register_interruptible(struct device *dev, u8 bank,
- u8 reg, u8 bitmask, u8 bitvalues)
+static int ab3550_mask_and_set_register_interruptible(struct device *dev,
+ u8 bank, u8 reg, u8 bitmask, u8 bitvalues)
{
struct ab3550 *ab;
struct platform_device *pdev = to_platform_device(dev);
@@ -612,15 +612,15 @@ int ab3550_mask_and_set_register_interruptible(struct device *dev, u8 bank,
bitmask, bitvalues);
}
-int ab3550_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
- u8 value)
+static int ab3550_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 value)
{
return ab3550_mask_and_set_register_interruptible(dev, bank, reg, 0xFF,
value);
}
-int ab3550_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
- u8 *value)
+static int ab3550_get_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 *value)
{
struct ab3550 *ab;
struct platform_device *pdev = to_platform_device(dev);
@@ -633,7 +633,7 @@ int ab3550_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
return get_register_interruptible(ab, bank, reg, value);
}
-int ab3550_get_register_page_interruptible(struct device *dev, u8 bank,
+static int ab3550_get_register_page_interruptible(struct device *dev, u8 bank,
u8 first_reg, u8 *regvals, u8 numregs)
{
struct ab3550 *ab;
@@ -649,7 +649,8 @@ int ab3550_get_register_page_interruptible(struct device *dev, u8 bank,
numregs);
}
-int ab3550_event_registers_startup_state_get(struct device *dev, u8 *event)
+static int ab3550_event_registers_startup_state_get(struct device *dev,
+ u8 *event)
{
struct ab3550 *ab;
@@ -661,7 +662,7 @@ int ab3550_event_registers_startup_state_get(struct device *dev, u8 *event)
return 0;
}
-int ab3550_startup_irq_enabled(struct device *dev, unsigned int irq)
+static int ab3550_startup_irq_enabled(struct device *dev, unsigned int irq)
{
struct ab3550 *ab;
struct ab3550_platform_data *plf_data;
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index f3d26fa9c34..defa786dee3 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/ab8500.h>
+#include <linux/regulator/ab8500.h>
/*
* Interrupt register offsets
@@ -352,6 +353,7 @@ static struct mfd_cell ab8500_devs[] = {
{ .name = "ab8500-audio", },
{ .name = "ab8500-usb", },
{ .name = "ab8500-pwm", },
+ { .name = "ab8500-regulator", },
};
int __devinit ab8500_init(struct ab8500 *ab8500)
@@ -411,7 +413,7 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
goto out_removeirq;
}
- ret = mfd_add_devices(ab8500->dev, -1, ab8500_devs,
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
ARRAY_SIZE(ab8500_devs), NULL,
ab8500->irq_base);
if (ret)
diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c
index b81d4f768ef..e1c8b62b086 100644
--- a/drivers/mfd/ab8500-spi.c
+++ b/drivers/mfd/ab8500-spi.c
@@ -68,7 +68,12 @@ static int ab8500_spi_read(struct ab8500 *ab8500, u16 addr)
ret = spi_sync(spi, &msg);
if (!ret)
- ret = ab8500->rx_buf[0];
+ /*
+ * Only the 8 lowermost bytes are
+ * defined with value, the rest may
+ * vary depending on chip/board noise.
+ */
+ ret = ab8500->rx_buf[0] & 0xFFU;
return ret;
}
diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
index 3b3b97ec32a..f12720dbe12 100644
--- a/drivers/mfd/abx500-core.c
+++ b/drivers/mfd/abx500-core.c
@@ -36,7 +36,7 @@ int abx500_register_ops(struct device *dev, struct abx500_ops *ops)
struct abx500_device_entry *dev_entry;
dev_entry = kzalloc(sizeof(struct abx500_device_entry), GFP_KERNEL);
- if (IS_ERR(dev_entry)) {
+ if (!dev_entry) {
dev_err(dev, "register_ops kzalloc failed");
return -ENOMEM;
}
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index 3e75f02e477..33c923d215c 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -94,7 +94,8 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!res) {
dev_err(&pdev->dev, "no DMA resource\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto fail4;
}
davinci_vc->davinci_vcif.dma_tx_channel = res->start;
@@ -104,7 +105,8 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (!res) {
dev_err(&pdev->dev, "no DMA resource\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto fail4;
}
davinci_vc->davinci_vcif.dma_rx_channel = res->start;
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index 9ed630799ac..36a166bcdb0 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/mfd/core.h>
#include <linux/mfd/janz.h>
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
new file mode 100644
index 00000000000..3ad492cb6c4
--- /dev/null
+++ b/drivers/mfd/jz4740-adc.c
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * JZ4740 SoC ADC driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * This driver synchronizes access to the JZ4740 ADC core between the
+ * JZ4740 battery and hwmon drivers.
+ */
+
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/clk.h>
+#include <linux/mfd/core.h>
+
+#include <linux/jz4740-adc.h>
+
+
+#define JZ_REG_ADC_ENABLE 0x00
+#define JZ_REG_ADC_CFG 0x04
+#define JZ_REG_ADC_CTRL 0x08
+#define JZ_REG_ADC_STATUS 0x0c
+
+#define JZ_REG_ADC_TOUCHSCREEN_BASE 0x10
+#define JZ_REG_ADC_BATTERY_BASE 0x1c
+#define JZ_REG_ADC_HWMON_BASE 0x20
+
+#define JZ_ADC_ENABLE_TOUCH BIT(2)
+#define JZ_ADC_ENABLE_BATTERY BIT(1)
+#define JZ_ADC_ENABLE_ADCIN BIT(0)
+
+enum {
+ JZ_ADC_IRQ_ADCIN = 0,
+ JZ_ADC_IRQ_BATTERY,
+ JZ_ADC_IRQ_TOUCH,
+ JZ_ADC_IRQ_PENUP,
+ JZ_ADC_IRQ_PENDOWN,
+};
+
+struct jz4740_adc {
+ struct resource *mem;
+ void __iomem *base;
+
+ int irq;
+ int irq_base;
+
+ struct clk *clk;
+ atomic_t clk_ref;
+
+ spinlock_t lock;
+};
+
+static inline void jz4740_adc_irq_set_masked(struct jz4740_adc *adc, int irq,
+ bool masked)
+{
+ unsigned long flags;
+ uint8_t val;
+
+ irq -= adc->irq_base;
+
+ spin_lock_irqsave(&adc->lock, flags);
+
+ val = readb(adc->base + JZ_REG_ADC_CTRL);
+ if (masked)
+ val |= BIT(irq);
+ else
+ val &= ~BIT(irq);
+ writeb(val, adc->base + JZ_REG_ADC_CTRL);
+
+ spin_unlock_irqrestore(&adc->lock, flags);
+}
+
+static void jz4740_adc_irq_mask(unsigned int irq)
+{
+ struct jz4740_adc *adc = get_irq_chip_data(irq);
+ jz4740_adc_irq_set_masked(adc, irq, true);
+}
+
+static void jz4740_adc_irq_unmask(unsigned int irq)
+{
+ struct jz4740_adc *adc = get_irq_chip_data(irq);
+ jz4740_adc_irq_set_masked(adc, irq, false);
+}
+
+static void jz4740_adc_irq_ack(unsigned int irq)
+{
+ struct jz4740_adc *adc = get_irq_chip_data(irq);
+
+ irq -= adc->irq_base;
+ writeb(BIT(irq), adc->base + JZ_REG_ADC_STATUS);
+}
+
+static struct irq_chip jz4740_adc_irq_chip = {
+ .name = "jz4740-adc",
+ .mask = jz4740_adc_irq_mask,
+ .unmask = jz4740_adc_irq_unmask,
+ .ack = jz4740_adc_irq_ack,
+};
+
+static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc)
+{
+ struct jz4740_adc *adc = get_irq_desc_data(desc);
+ uint8_t status;
+ unsigned int i;
+
+ status = readb(adc->base + JZ_REG_ADC_STATUS);
+
+ for (i = 0; i < 5; ++i) {
+ if (status & BIT(i))
+ generic_handle_irq(adc->irq_base + i);
+ }
+}
+
+
+/* Refcounting for the ADC clock is done in here instead of in the clock
+ * framework, because it is the only clock which is shared between multiple
+ * devices and thus is the only clock which needs refcounting */
+static inline void jz4740_adc_clk_enable(struct jz4740_adc *adc)
+{
+ if (atomic_inc_return(&adc->clk_ref) == 1)
+ clk_enable(adc->clk);
+}
+
+static inline void jz4740_adc_clk_disable(struct jz4740_adc *adc)
+{
+ if (atomic_dec_return(&adc->clk_ref) == 0)
+ clk_disable(adc->clk);
+}
+
+static inline void jz4740_adc_set_enabled(struct jz4740_adc *adc, int engine,
+ bool enabled)
+{
+ unsigned long flags;
+ uint8_t val;
+
+ spin_lock_irqsave(&adc->lock, flags);
+
+ val = readb(adc->base + JZ_REG_ADC_ENABLE);
+ if (enabled)
+ val |= BIT(engine);
+ else
+ val &= BIT(engine);
+ writeb(val, adc->base + JZ_REG_ADC_ENABLE);
+
+ spin_unlock_irqrestore(&adc->lock, flags);
+}
+
+static int jz4740_adc_cell_enable(struct platform_device *pdev)
+{
+ struct jz4740_adc *adc = dev_get_drvdata(pdev->dev.parent);
+
+ jz4740_adc_clk_enable(adc);
+ jz4740_adc_set_enabled(adc, pdev->id, true);
+
+ return 0;
+}
+
+static int jz4740_adc_cell_disable(struct platform_device *pdev)
+{
+ struct jz4740_adc *adc = dev_get_drvdata(pdev->dev.parent);
+
+ jz4740_adc_set_enabled(adc, pdev->id, false);
+ jz4740_adc_clk_disable(adc);
+
+ return 0;
+}
+
+int jz4740_adc_set_config(struct device *dev, uint32_t mask, uint32_t val)
+{
+ struct jz4740_adc *adc = dev_get_drvdata(dev);
+ unsigned long flags;
+ uint32_t cfg;
+
+ if (!adc)
+ return -ENODEV;
+
+ spin_lock_irqsave(&adc->lock, flags);
+
+ cfg = readl(adc->base + JZ_REG_ADC_CFG);
+
+ cfg &= ~mask;
+ cfg |= val;
+
+ writel(cfg, adc->base + JZ_REG_ADC_CFG);
+
+ spin_unlock_irqrestore(&adc->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(jz4740_adc_set_config);
+
+static struct resource jz4740_hwmon_resources[] = {
+ {
+ .start = JZ_ADC_IRQ_ADCIN,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = JZ_REG_ADC_HWMON_BASE,
+ .end = JZ_REG_ADC_HWMON_BASE + 3,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource jz4740_battery_resources[] = {
+ {
+ .start = JZ_ADC_IRQ_BATTERY,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = JZ_REG_ADC_BATTERY_BASE,
+ .end = JZ_REG_ADC_BATTERY_BASE + 3,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+const struct mfd_cell jz4740_adc_cells[] = {
+ {
+ .id = 0,
+ .name = "jz4740-hwmon",
+ .num_resources = ARRAY_SIZE(jz4740_hwmon_resources),
+ .resources = jz4740_hwmon_resources,
+ .platform_data = (void *)&jz4740_adc_cells[0],
+ .data_size = sizeof(struct mfd_cell),
+
+ .enable = jz4740_adc_cell_enable,
+ .disable = jz4740_adc_cell_disable,
+ },
+ {
+ .id = 1,
+ .name = "jz4740-battery",
+ .num_resources = ARRAY_SIZE(jz4740_battery_resources),
+ .resources = jz4740_battery_resources,
+ .platform_data = (void *)&jz4740_adc_cells[1],
+ .data_size = sizeof(struct mfd_cell),
+
+ .enable = jz4740_adc_cell_enable,
+ .disable = jz4740_adc_cell_disable,
+ },
+};
+
+static int __devinit jz4740_adc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct jz4740_adc *adc;
+ struct resource *mem_base;
+ int irq;
+
+ adc = kmalloc(sizeof(*adc), GFP_KERNEL);
+ if (!adc) {
+ dev_err(&pdev->dev, "Failed to allocate driver structure\n");
+ return -ENOMEM;
+ }
+
+ adc->irq = platform_get_irq(pdev, 0);
+ if (adc->irq < 0) {
+ ret = adc->irq;
+ dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
+ goto err_free;
+ }
+
+ adc->irq_base = platform_get_irq(pdev, 1);
+ if (adc->irq_base < 0) {
+ ret = adc->irq_base;
+ dev_err(&pdev->dev, "Failed to get irq base: %d\n", ret);
+ goto err_free;
+ }
+
+ mem_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_base) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
+ goto err_free;
+ }
+
+ /* Only request the shared registers for the MFD driver */
+ adc->mem = request_mem_region(mem_base->start, JZ_REG_ADC_STATUS,
+ pdev->name);
+ if (!adc->mem) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev, "Failed to request mmio memory region\n");
+ goto err_free;
+ }
+
+ adc->base = ioremap_nocache(adc->mem->start, resource_size(adc->mem));
+ if (!adc->base) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
+ goto err_release_mem_region;
+ }
+
+ adc->clk = clk_get(&pdev->dev, "adc");
+ if (IS_ERR(adc->clk)) {
+ ret = PTR_ERR(adc->clk);
+ dev_err(&pdev->dev, "Failed to get clock: %d\n", ret);
+ goto err_iounmap;
+ }
+
+ spin_lock_init(&adc->lock);
+ atomic_set(&adc->clk_ref, 0);
+
+ platform_set_drvdata(pdev, adc);
+
+ for (irq = adc->irq_base; irq < adc->irq_base + 5; ++irq) {
+ set_irq_chip_data(irq, adc);
+ set_irq_chip_and_handler(irq, &jz4740_adc_irq_chip,
+ handle_level_irq);
+ }
+
+ set_irq_data(adc->irq, adc);
+ set_irq_chained_handler(adc->irq, jz4740_adc_irq_demux);
+
+ writeb(0x00, adc->base + JZ_REG_ADC_ENABLE);
+ writeb(0xff, adc->base + JZ_REG_ADC_CTRL);
+
+ ret = mfd_add_devices(&pdev->dev, 0, jz4740_adc_cells,
+ ARRAY_SIZE(jz4740_adc_cells), mem_base, adc->irq_base);
+ if (ret < 0)
+ goto err_clk_put;
+
+ return 0;
+
+err_clk_put:
+ clk_put(adc->clk);
+err_iounmap:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(adc->base);
+err_release_mem_region:
+ release_mem_region(adc->mem->start, resource_size(adc->mem));
+err_free:
+ kfree(adc);
+
+ return ret;
+}
+
+static int __devexit jz4740_adc_remove(struct platform_device *pdev)
+{
+ struct jz4740_adc *adc = platform_get_drvdata(pdev);
+
+ mfd_remove_devices(&pdev->dev);
+
+ set_irq_data(adc->irq, NULL);
+ set_irq_chained_handler(adc->irq, NULL);
+
+ iounmap(adc->base);
+ release_mem_region(adc->mem->start, resource_size(adc->mem));
+
+ clk_put(adc->clk);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(adc);
+
+ return 0;
+}
+
+struct platform_driver jz4740_adc_driver = {
+ .probe = jz4740_adc_probe,
+ .remove = __devexit_p(jz4740_adc_remove),
+ .driver = {
+ .name = "jz4740-adc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init jz4740_adc_init(void)
+{
+ return platform_driver_register(&jz4740_adc_driver);
+}
+module_init(jz4740_adc_init);
+
+static void __exit jz4740_adc_exit(void)
+{
+ platform_driver_unregister(&jz4740_adc_driver);
+}
+module_exit(jz4740_adc_exit);
+
+MODULE_DESCRIPTION("JZ4740 SoC ADC driver");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:jz4740-adc");
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index f621bcea3d0..428377a5a6f 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -90,6 +90,24 @@ static struct mfd_cell rtc_devs[] = {
},
};
+static struct resource onkey_resources[] = {
+ {
+ .name = "max8925-onkey",
+ .start = MAX8925_IRQ_GPM_SW_3SEC,
+ .end = MAX8925_IRQ_GPM_SW_3SEC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell onkey_devs[] = {
+ {
+ .name = "max8925-onkey",
+ .num_resources = 1,
+ .resources = &onkey_resources[0],
+ .id = -1,
+ },
+};
+
#define MAX8925_REG_RESOURCE(_start, _end) \
{ \
.start = MAX8925_##_start, \
@@ -411,24 +429,25 @@ static void max8925_irq_sync_unlock(unsigned int irq)
irq_tsc = cache_tsc;
for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) {
irq_data = &max8925_irqs[i];
+ /* 1 -- disable, 0 -- enable */
switch (irq_data->mask_reg) {
case MAX8925_CHG_IRQ1_MASK:
- irq_chg[0] &= irq_data->enable;
+ irq_chg[0] &= ~irq_data->enable;
break;
case MAX8925_CHG_IRQ2_MASK:
- irq_chg[1] &= irq_data->enable;
+ irq_chg[1] &= ~irq_data->enable;
break;
case MAX8925_ON_OFF_IRQ1_MASK:
- irq_on[0] &= irq_data->enable;
+ irq_on[0] &= ~irq_data->enable;
break;
case MAX8925_ON_OFF_IRQ2_MASK:
- irq_on[1] &= irq_data->enable;
+ irq_on[1] &= ~irq_data->enable;
break;
case MAX8925_RTC_IRQ_MASK:
- irq_rtc &= irq_data->enable;
+ irq_rtc &= ~irq_data->enable;
break;
case MAX8925_TSC_IRQ_MASK:
- irq_tsc &= irq_data->enable;
+ irq_tsc &= ~irq_data->enable;
break;
default:
dev_err(chip->dev, "wrong IRQ\n");
@@ -596,6 +615,15 @@ int __devinit max8925_device_init(struct max8925_chip *chip,
dev_err(chip->dev, "Failed to add rtc subdev\n");
goto out;
}
+
+ ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0],
+ ARRAY_SIZE(onkey_devs),
+ &onkey_resources[0], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add onkey subdev\n");
+ goto out_dev;
+ }
+
if (pdata && pdata->regulator[0]) {
ret = mfd_add_devices(chip->dev, 0, &regulator_devs[0],
ARRAY_SIZE(regulator_devs),
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
new file mode 100644
index 00000000000..73e6f5c4efc
--- /dev/null
+++ b/drivers/mfd/max8998.c
@@ -0,0 +1,158 @@
+/*
+ * max8698.c - mfd core driver for the Maxim 8998
+ *
+ * Copyright (C) 2009-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max8998.h>
+#include <linux/mfd/max8998-private.h>
+
+static struct mfd_cell max8998_devs[] = {
+ {
+ .name = "max8998-pmic",
+ }
+};
+
+static int max8998_i2c_device_read(struct max8998_dev *max8998, u8 reg, u8 *dest)
+{
+ struct i2c_client *client = max8998->i2c_client;
+ int ret;
+
+ mutex_lock(&max8998->iolock);
+ ret = i2c_smbus_read_byte_data(client, reg);
+ mutex_unlock(&max8998->iolock);
+ if (ret < 0)
+ return ret;
+
+ ret &= 0xff;
+ *dest = ret;
+ return 0;
+}
+
+static int max8998_i2c_device_write(struct max8998_dev *max8998, u8 reg, u8 value)
+{
+ struct i2c_client *client = max8998->i2c_client;
+ int ret;
+
+ mutex_lock(&max8998->iolock);
+ ret = i2c_smbus_write_byte_data(client, reg, value);
+ mutex_unlock(&max8998->iolock);
+ return ret;
+}
+
+static int max8998_i2c_device_update(struct max8998_dev *max8998, u8 reg,
+ u8 val, u8 mask)
+{
+ struct i2c_client *client = max8998->i2c_client;
+ int ret;
+
+ mutex_lock(&max8998->iolock);
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret >= 0) {
+ u8 old_val = ret & 0xff;
+ u8 new_val = (val & mask) | (old_val & (~mask));
+ ret = i2c_smbus_write_byte_data(client, reg, new_val);
+ if (ret >= 0)
+ ret = 0;
+ }
+ mutex_unlock(&max8998->iolock);
+ return ret;
+}
+
+static int max8998_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct max8998_dev *max8998;
+ int ret = 0;
+
+ max8998 = kzalloc(sizeof(struct max8998_dev), GFP_KERNEL);
+ if (max8998 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, max8998);
+ max8998->dev = &i2c->dev;
+ max8998->i2c_client = i2c;
+ max8998->dev_read = max8998_i2c_device_read;
+ max8998->dev_write = max8998_i2c_device_write;
+ max8998->dev_update = max8998_i2c_device_update;
+ mutex_init(&max8998->iolock);
+
+ ret = mfd_add_devices(max8998->dev, -1,
+ max8998_devs, ARRAY_SIZE(max8998_devs),
+ NULL, 0);
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ mfd_remove_devices(max8998->dev);
+ kfree(max8998);
+ return ret;
+}
+
+static int max8998_i2c_remove(struct i2c_client *i2c)
+{
+ struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(max8998->dev);
+ kfree(max8998);
+
+ return 0;
+}
+
+static const struct i2c_device_id max8998_i2c_id[] = {
+ { "max8998", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
+
+static struct i2c_driver max8998_i2c_driver = {
+ .driver = {
+ .name = "max8998",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8998_i2c_probe,
+ .remove = max8998_i2c_remove,
+ .id_table = max8998_i2c_id,
+};
+
+static int __init max8998_i2c_init(void)
+{
+ return i2c_add_driver(&max8998_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(max8998_i2c_init);
+
+static void __exit max8998_i2c_exit(void)
+{
+ i2c_del_driver(&max8998_i2c_driver);
+}
+module_exit(max8998_i2c_exit);
+
+MODULE_DESCRIPTION("MAXIM 8998 multi-function core driver");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c
index fecf38a4f02..6df34989c1f 100644
--- a/drivers/mfd/mc13783-core.c
+++ b/drivers/mfd/mc13783-core.c
@@ -11,9 +11,31 @@
*/
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
#include <linux/spi/spi.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/mc13783-private.h>
+#include <linux/mfd/mc13783.h>
+
+struct mc13783 {
+ struct spi_device *spidev;
+ struct mutex lock;
+ int irq;
+ int flags;
+
+ irq_handler_t irqhandler[MC13783_NUM_IRQ];
+ void *irqdata[MC13783_NUM_IRQ];
+
+ /* XXX these should go as platformdata to the regulator subdevice */
+ struct mc13783_regulator_init_data *regulators;
+ int num_regulators;
+};
+
+#define MC13783_REG_REVISION 7
+#define MC13783_REG_ADC_0 43
+#define MC13783_REG_ADC_1 44
+#define MC13783_REG_ADC_2 45
#define MC13783_IRQSTAT0 0
#define MC13783_IRQSTAT0_ADCDONEI (1 << 0)
@@ -226,6 +248,12 @@ int mc13783_reg_rmw(struct mc13783 *mc13783, unsigned int offset,
}
EXPORT_SYMBOL(mc13783_reg_rmw);
+int mc13783_get_flags(struct mc13783 *mc13783)
+{
+ return mc13783->flags;
+}
+EXPORT_SYMBOL(mc13783_get_flags);
+
int mc13783_irq_mask(struct mc13783 *mc13783, int irq)
{
int ret;
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index a3fb4bcb988..4ba85bbdb4c 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -128,6 +128,39 @@
#define MENELAUS_RESERVED14_IRQ 14 /* Reserved */
#define MENELAUS_RESERVED15_IRQ 15 /* Reserved */
+/* VCORE_CTRL1 register */
+#define VCORE_CTRL1_BYP_COMP (1 << 5)
+#define VCORE_CTRL1_HW_NSW (1 << 7)
+
+/* GPIO_CTRL register */
+#define GPIO_CTRL_SLOTSELEN (1 << 5)
+#define GPIO_CTRL_SLPCTLEN (1 << 6)
+#define GPIO1_DIR_INPUT (1 << 0)
+#define GPIO2_DIR_INPUT (1 << 1)
+#define GPIO3_DIR_INPUT (1 << 2)
+
+/* MCT_CTRL1 register */
+#define MCT_CTRL1_S1_CMD_OD (1 << 2)
+#define MCT_CTRL1_S2_CMD_OD (1 << 3)
+
+/* MCT_CTRL2 register */
+#define MCT_CTRL2_VS2_SEL_D0 (1 << 0)
+#define MCT_CTRL2_VS2_SEL_D1 (1 << 1)
+#define MCT_CTRL2_S1CD_BUFEN (1 << 4)
+#define MCT_CTRL2_S2CD_BUFEN (1 << 5)
+#define MCT_CTRL2_S1CD_DBEN (1 << 6)
+#define MCT_CTRL2_S2CD_BEN (1 << 7)
+
+/* MCT_CTRL3 register */
+#define MCT_CTRL3_SLOT1_EN (1 << 0)
+#define MCT_CTRL3_SLOT2_EN (1 << 1)
+#define MCT_CTRL3_S1_AUTO_EN (1 << 2)
+#define MCT_CTRL3_S2_AUTO_EN (1 << 3)
+
+/* MCT_PIN_ST register */
+#define MCT_PIN_ST_S1_CD_ST (1 << 0)
+#define MCT_PIN_ST_S2_CD_ST (1 << 1)
+
static void menelaus_work(struct work_struct *_menelaus);
struct menelaus_chip {
@@ -249,10 +282,10 @@ static void menelaus_mmc_cd_work(struct menelaus_chip *menelaus_hw)
return;
if (!(reg & 0x1))
- card_mask |= (1 << 0);
+ card_mask |= MCT_PIN_ST_S1_CD_ST;
if (!(reg & 0x2))
- card_mask |= (1 << 1);
+ card_mask |= MCT_PIN_ST_S2_CD_ST;
if (menelaus_hw->mmc_callback)
menelaus_hw->mmc_callback(menelaus_hw->mmc_callback_data,
@@ -277,14 +310,14 @@ int menelaus_set_mmc_opendrain(int slot, int enable)
val = ret;
if (slot == 1) {
if (enable)
- val |= 1 << 2;
+ val |= MCT_CTRL1_S1_CMD_OD;
else
- val &= ~(1 << 2);
+ val &= ~MCT_CTRL1_S1_CMD_OD;
} else {
if (enable)
- val |= 1 << 3;
+ val |= MCT_CTRL1_S2_CMD_OD;
else
- val &= ~(1 << 3);
+ val &= ~MCT_CTRL1_S2_CMD_OD;
}
ret = menelaus_write_reg(MENELAUS_MCT_CTRL1, val);
mutex_unlock(&the_menelaus->lock);
@@ -301,11 +334,11 @@ int menelaus_set_slot_sel(int enable)
ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
if (ret < 0)
goto out;
- ret |= 0x02;
+ ret |= GPIO2_DIR_INPUT;
if (enable)
- ret |= 1 << 5;
+ ret |= GPIO_CTRL_SLOTSELEN;
else
- ret &= ~(1 << 5);
+ ret &= ~GPIO_CTRL_SLOTSELEN;
ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret);
out:
mutex_unlock(&the_menelaus->lock);
@@ -330,14 +363,14 @@ int menelaus_set_mmc_slot(int slot, int enable, int power, int cd_en)
val = ret;
if (slot == 1) {
if (cd_en)
- val |= (1 << 4) | (1 << 6);
+ val |= MCT_CTRL2_S1CD_BUFEN | MCT_CTRL2_S1CD_DBEN;
else
- val &= ~((1 << 4) | (1 << 6));
+ val &= ~(MCT_CTRL2_S1CD_BUFEN | MCT_CTRL2_S1CD_DBEN);
} else {
if (cd_en)
- val |= (1 << 5) | (1 << 7);
+ val |= MCT_CTRL2_S2CD_BUFEN | MCT_CTRL2_S2CD_BEN;
else
- val &= ~((1 << 5) | (1 << 7));
+ val &= ~(MCT_CTRL2_S2CD_BUFEN | MCT_CTRL2_S2CD_BEN);
}
ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, val);
if (ret < 0)
@@ -349,25 +382,25 @@ int menelaus_set_mmc_slot(int slot, int enable, int power, int cd_en)
val = ret;
if (slot == 1) {
if (enable)
- val |= 1 << 0;
+ val |= MCT_CTRL3_SLOT1_EN;
else
- val &= ~(1 << 0);
+ val &= ~MCT_CTRL3_SLOT1_EN;
} else {
int b;
if (enable)
- ret |= 1 << 1;
+ val |= MCT_CTRL3_SLOT2_EN;
else
- ret &= ~(1 << 1);
+ val &= ~MCT_CTRL3_SLOT2_EN;
b = menelaus_read_reg(MENELAUS_MCT_CTRL2);
- b &= ~0x03;
+ b &= ~(MCT_CTRL2_VS2_SEL_D0 | MCT_CTRL2_VS2_SEL_D1);
b |= power;
ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, b);
if (ret < 0)
goto out;
}
/* Disable autonomous shutdown */
- val &= ~(0x03 << 2);
+ val &= ~(MCT_CTRL3_S1_AUTO_EN | MCT_CTRL3_S2_AUTO_EN);
ret = menelaus_write_reg(MENELAUS_MCT_CTRL3, val);
out:
mutex_unlock(&the_menelaus->lock);
@@ -552,7 +585,7 @@ int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV)
if (!the_menelaus->vcore_hw_mode) {
val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
/* HW mode, turn OFF byte comparator */
- val |= ((1 << 7) | (1 << 5));
+ val |= (VCORE_CTRL1_HW_NSW | VCORE_CTRL1_BYP_COMP);
ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
the_menelaus->vcore_hw_mode = 1;
}
@@ -749,7 +782,7 @@ int menelaus_set_regulator_sleep(int enable, u32 val)
ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
if (ret < 0)
goto out;
- t = ((1 << 6) | 0x04);
+ t = (GPIO_CTRL_SLPCTLEN | GPIO3_DIR_INPUT);
if (enable)
ret |= t;
else
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 7dd76bceaae..1823a57b7d8 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -70,7 +70,9 @@ static int mfd_add_device(struct device *parent, int id,
goto fail_res;
}
- platform_device_add_resources(pdev, res, cell->num_resources);
+ ret = platform_device_add_resources(pdev, res, cell->num_resources);
+ if (ret)
+ goto fail_res;
ret = platform_device_add(pdev);
if (ret)
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
new file mode 100644
index 00000000000..0754c5e9199
--- /dev/null
+++ b/drivers/mfd/stmpe.c
@@ -0,0 +1,985 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/stmpe.h>
+#include "stmpe.h"
+
+static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks)
+{
+ return stmpe->variant->enable(stmpe, blocks, true);
+}
+
+static int __stmpe_disable(struct stmpe *stmpe, unsigned int blocks)
+{
+ return stmpe->variant->enable(stmpe, blocks, false);
+}
+
+static int __stmpe_reg_read(struct stmpe *stmpe, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(stmpe->i2c, reg);
+ if (ret < 0)
+ dev_err(stmpe->dev, "failed to read reg %#x: %d\n",
+ reg, ret);
+
+ dev_vdbg(stmpe->dev, "rd: reg %#x => data %#x\n", reg, ret);
+
+ return ret;
+}
+
+static int __stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 val)
+{
+ int ret;
+
+ dev_vdbg(stmpe->dev, "wr: reg %#x <= %#x\n", reg, val);
+
+ ret = i2c_smbus_write_byte_data(stmpe->i2c, reg, val);
+ if (ret < 0)
+ dev_err(stmpe->dev, "failed to write reg %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+
+static int __stmpe_set_bits(struct stmpe *stmpe, u8 reg, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = __stmpe_reg_read(stmpe, reg);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~mask;
+ ret |= val;
+
+ return __stmpe_reg_write(stmpe, reg, ret);
+}
+
+static int __stmpe_block_read(struct stmpe *stmpe, u8 reg, u8 length,
+ u8 *values)
+{
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(stmpe->i2c, reg, length, values);
+ if (ret < 0)
+ dev_err(stmpe->dev, "failed to read regs %#x: %d\n",
+ reg, ret);
+
+ dev_vdbg(stmpe->dev, "rd: reg %#x (%d) => ret %#x\n", reg, length, ret);
+ stmpe_dump_bytes("stmpe rd: ", values, length);
+
+ return ret;
+}
+
+static int __stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length,
+ const u8 *values)
+{
+ int ret;
+
+ dev_vdbg(stmpe->dev, "wr: regs %#x (%d)\n", reg, length);
+ stmpe_dump_bytes("stmpe wr: ", values, length);
+
+ ret = i2c_smbus_write_i2c_block_data(stmpe->i2c, reg, length,
+ values);
+ if (ret < 0)
+ dev_err(stmpe->dev, "failed to write regs %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+
+/**
+ * stmpe_enable - enable blocks on an STMPE device
+ * @stmpe: Device to work on
+ * @blocks: Mask of blocks (enum stmpe_block values) to enable
+ */
+int stmpe_enable(struct stmpe *stmpe, unsigned int blocks)
+{
+ int ret;
+
+ mutex_lock(&stmpe->lock);
+ ret = __stmpe_enable(stmpe, blocks);
+ mutex_unlock(&stmpe->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmpe_enable);
+
+/**
+ * stmpe_disable - disable blocks on an STMPE device
+ * @stmpe: Device to work on
+ * @blocks: Mask of blocks (enum stmpe_block values) to enable
+ */
+int stmpe_disable(struct stmpe *stmpe, unsigned int blocks)
+{
+ int ret;
+
+ mutex_lock(&stmpe->lock);
+ ret = __stmpe_disable(stmpe, blocks);
+ mutex_unlock(&stmpe->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmpe_disable);
+
+/**
+ * stmpe_reg_read() - read a single STMPE register
+ * @stmpe: Device to read from
+ * @reg: Register to read
+ */
+int stmpe_reg_read(struct stmpe *stmpe, u8 reg)
+{
+ int ret;
+
+ mutex_lock(&stmpe->lock);
+ ret = __stmpe_reg_read(stmpe, reg);
+ mutex_unlock(&stmpe->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmpe_reg_read);
+
+/**
+ * stmpe_reg_write() - write a single STMPE register
+ * @stmpe: Device to write to
+ * @reg: Register to write
+ * @val: Value to write
+ */
+int stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 val)
+{
+ int ret;
+
+ mutex_lock(&stmpe->lock);
+ ret = __stmpe_reg_write(stmpe, reg, val);
+ mutex_unlock(&stmpe->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmpe_reg_write);
+
+/**
+ * stmpe_set_bits() - set the value of a bitfield in a STMPE register
+ * @stmpe: Device to write to
+ * @reg: Register to write
+ * @mask: Mask of bits to set
+ * @val: Value to set
+ */
+int stmpe_set_bits(struct stmpe *stmpe, u8 reg, u8 mask, u8 val)
+{
+ int ret;
+
+ mutex_lock(&stmpe->lock);
+ ret = __stmpe_set_bits(stmpe, reg, mask, val);
+ mutex_unlock(&stmpe->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmpe_set_bits);
+
+/**
+ * stmpe_block_read() - read multiple STMPE registers
+ * @stmpe: Device to read from
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Buffer to write to
+ */
+int stmpe_block_read(struct stmpe *stmpe, u8 reg, u8 length, u8 *values)
+{
+ int ret;
+
+ mutex_lock(&stmpe->lock);
+ ret = __stmpe_block_read(stmpe, reg, length, values);
+ mutex_unlock(&stmpe->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmpe_block_read);
+
+/**
+ * stmpe_block_write() - write multiple STMPE registers
+ * @stmpe: Device to write to
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Values to write
+ */
+int stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length,
+ const u8 *values)
+{
+ int ret;
+
+ mutex_lock(&stmpe->lock);
+ ret = __stmpe_block_write(stmpe, reg, length, values);
+ mutex_unlock(&stmpe->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmpe_block_write);
+
+/**
+ * stmpe_set_altfunc: set the alternate function for STMPE pins
+ * @stmpe: Device to configure
+ * @pins: Bitmask of pins to affect
+ * @block: block to enable alternate functions for
+ *
+ * @pins is assumed to have a bit set for each of the bits whose alternate
+ * function is to be changed, numbered according to the GPIOXY numbers.
+ *
+ * If the GPIO module is not enabled, this function automatically enables it in
+ * order to perform the change.
+ */
+int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins, enum stmpe_block block)
+{
+ struct stmpe_variant_info *variant = stmpe->variant;
+ u8 regaddr = stmpe->regs[STMPE_IDX_GPAFR_U_MSB];
+ int af_bits = variant->af_bits;
+ int numregs = DIV_ROUND_UP(stmpe->num_gpios * af_bits, 8);
+ int afperreg = 8 / af_bits;
+ int mask = (1 << af_bits) - 1;
+ u8 regs[numregs];
+ int af;
+ int ret;
+
+ mutex_lock(&stmpe->lock);
+
+ ret = __stmpe_enable(stmpe, STMPE_BLOCK_GPIO);
+ if (ret < 0)
+ goto out;
+
+ ret = __stmpe_block_read(stmpe, regaddr, numregs, regs);
+ if (ret < 0)
+ goto out;
+
+ af = variant->get_altfunc(stmpe, block);
+
+ while (pins) {
+ int pin = __ffs(pins);
+ int regoffset = numregs - (pin / afperreg) - 1;
+ int pos = (pin % afperreg) * (8 / afperreg);
+
+ regs[regoffset] &= ~(mask << pos);
+ regs[regoffset] |= af << pos;
+
+ pins &= ~(1 << pin);
+ }
+
+ ret = __stmpe_block_write(stmpe, regaddr, numregs, regs);
+
+out:
+ mutex_unlock(&stmpe->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmpe_set_altfunc);
+
+/*
+ * GPIO (all variants)
+ */
+
+static struct resource stmpe_gpio_resources[] = {
+ /* Start and end filled dynamically */
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell stmpe_gpio_cell = {
+ .name = "stmpe-gpio",
+ .resources = stmpe_gpio_resources,
+ .num_resources = ARRAY_SIZE(stmpe_gpio_resources),
+};
+
+/*
+ * Keypad (1601, 2401, 2403)
+ */
+
+static struct resource stmpe_keypad_resources[] = {
+ {
+ .name = "KEYPAD",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "KEYPAD_OVER",
+ .start = 1,
+ .end = 1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell stmpe_keypad_cell = {
+ .name = "stmpe-keypad",
+ .resources = stmpe_keypad_resources,
+ .num_resources = ARRAY_SIZE(stmpe_keypad_resources),
+};
+
+/*
+ * Touchscreen (STMPE811)
+ */
+
+static struct resource stmpe_ts_resources[] = {
+ {
+ .name = "TOUCH_DET",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "FIFO_TH",
+ .start = 1,
+ .end = 1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell stmpe_ts_cell = {
+ .name = "stmpe-ts",
+ .resources = stmpe_ts_resources,
+ .num_resources = ARRAY_SIZE(stmpe_ts_resources),
+};
+
+/*
+ * STMPE811
+ */
+
+static const u8 stmpe811_regs[] = {
+ [STMPE_IDX_CHIP_ID] = STMPE811_REG_CHIP_ID,
+ [STMPE_IDX_ICR_LSB] = STMPE811_REG_INT_CTRL,
+ [STMPE_IDX_IER_LSB] = STMPE811_REG_INT_EN,
+ [STMPE_IDX_ISR_MSB] = STMPE811_REG_INT_STA,
+ [STMPE_IDX_GPMR_LSB] = STMPE811_REG_GPIO_MP_STA,
+ [STMPE_IDX_GPSR_LSB] = STMPE811_REG_GPIO_SET_PIN,
+ [STMPE_IDX_GPCR_LSB] = STMPE811_REG_GPIO_CLR_PIN,
+ [STMPE_IDX_GPDR_LSB] = STMPE811_REG_GPIO_DIR,
+ [STMPE_IDX_GPRER_LSB] = STMPE811_REG_GPIO_RE,
+ [STMPE_IDX_GPFER_LSB] = STMPE811_REG_GPIO_FE,
+ [STMPE_IDX_GPAFR_U_MSB] = STMPE811_REG_GPIO_AF,
+ [STMPE_IDX_IEGPIOR_LSB] = STMPE811_REG_GPIO_INT_EN,
+ [STMPE_IDX_ISGPIOR_MSB] = STMPE811_REG_GPIO_INT_STA,
+ [STMPE_IDX_GPEDR_MSB] = STMPE811_REG_GPIO_ED,
+};
+
+static struct stmpe_variant_block stmpe811_blocks[] = {
+ {
+ .cell = &stmpe_gpio_cell,
+ .irq = STMPE811_IRQ_GPIOC,
+ .block = STMPE_BLOCK_GPIO,
+ },
+ {
+ .cell = &stmpe_ts_cell,
+ .irq = STMPE811_IRQ_TOUCH_DET,
+ .block = STMPE_BLOCK_TOUCHSCREEN,
+ },
+};
+
+static int stmpe811_enable(struct stmpe *stmpe, unsigned int blocks,
+ bool enable)
+{
+ unsigned int mask = 0;
+
+ if (blocks & STMPE_BLOCK_GPIO)
+ mask |= STMPE811_SYS_CTRL2_GPIO_OFF;
+
+ if (blocks & STMPE_BLOCK_ADC)
+ mask |= STMPE811_SYS_CTRL2_ADC_OFF;
+
+ if (blocks & STMPE_BLOCK_TOUCHSCREEN)
+ mask |= STMPE811_SYS_CTRL2_TSC_OFF;
+
+ return __stmpe_set_bits(stmpe, STMPE811_REG_SYS_CTRL2, mask,
+ enable ? 0 : mask);
+}
+
+static int stmpe811_get_altfunc(struct stmpe *stmpe, enum stmpe_block block)
+{
+ /* 0 for touchscreen, 1 for GPIO */
+ return block != STMPE_BLOCK_TOUCHSCREEN;
+}
+
+static struct stmpe_variant_info stmpe811 = {
+ .name = "stmpe811",
+ .id_val = 0x0811,
+ .id_mask = 0xffff,
+ .num_gpios = 8,
+ .af_bits = 1,
+ .regs = stmpe811_regs,
+ .blocks = stmpe811_blocks,
+ .num_blocks = ARRAY_SIZE(stmpe811_blocks),
+ .num_irqs = STMPE811_NR_INTERNAL_IRQS,
+ .enable = stmpe811_enable,
+ .get_altfunc = stmpe811_get_altfunc,
+};
+
+/*
+ * STMPE1601
+ */
+
+static const u8 stmpe1601_regs[] = {
+ [STMPE_IDX_CHIP_ID] = STMPE1601_REG_CHIP_ID,
+ [STMPE_IDX_ICR_LSB] = STMPE1601_REG_ICR_LSB,
+ [STMPE_IDX_IER_LSB] = STMPE1601_REG_IER_LSB,
+ [STMPE_IDX_ISR_MSB] = STMPE1601_REG_ISR_MSB,
+ [STMPE_IDX_GPMR_LSB] = STMPE1601_REG_GPIO_MP_LSB,
+ [STMPE_IDX_GPSR_LSB] = STMPE1601_REG_GPIO_SET_LSB,
+ [STMPE_IDX_GPCR_LSB] = STMPE1601_REG_GPIO_CLR_LSB,
+ [STMPE_IDX_GPDR_LSB] = STMPE1601_REG_GPIO_SET_DIR_LSB,
+ [STMPE_IDX_GPRER_LSB] = STMPE1601_REG_GPIO_RE_LSB,
+ [STMPE_IDX_GPFER_LSB] = STMPE1601_REG_GPIO_FE_LSB,
+ [STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB,
+ [STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB,
+ [STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB,
+ [STMPE_IDX_GPEDR_MSB] = STMPE1601_REG_GPIO_ED_MSB,
+};
+
+static struct stmpe_variant_block stmpe1601_blocks[] = {
+ {
+ .cell = &stmpe_gpio_cell,
+ .irq = STMPE24XX_IRQ_GPIOC,
+ .block = STMPE_BLOCK_GPIO,
+ },
+ {
+ .cell = &stmpe_keypad_cell,
+ .irq = STMPE24XX_IRQ_KEYPAD,
+ .block = STMPE_BLOCK_KEYPAD,
+ },
+};
+
+/* supported autosleep timeout delay (in msecs) */
+static const int stmpe_autosleep_delay[] = {
+ 4, 16, 32, 64, 128, 256, 512, 1024,
+};
+
+static int stmpe_round_timeout(int timeout)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(stmpe_autosleep_delay); i++) {
+ if (stmpe_autosleep_delay[i] >= timeout)
+ return i;
+ }
+
+ /*
+ * requests for delays longer than supported should not return the
+ * longest supported delay
+ */
+ return -EINVAL;
+}
+
+static int stmpe_autosleep(struct stmpe *stmpe, int autosleep_timeout)
+{
+ int ret;
+
+ if (!stmpe->variant->enable_autosleep)
+ return -ENOSYS;
+
+ mutex_lock(&stmpe->lock);
+ ret = stmpe->variant->enable_autosleep(stmpe, autosleep_timeout);
+ mutex_unlock(&stmpe->lock);
+
+ return ret;
+}
+
+/*
+ * Both stmpe 1601/2403 support same layout for autosleep
+ */
+static int stmpe1601_autosleep(struct stmpe *stmpe,
+ int autosleep_timeout)
+{
+ int ret, timeout;
+
+ /* choose the best available timeout */
+ timeout = stmpe_round_timeout(autosleep_timeout);
+ if (timeout < 0) {
+ dev_err(stmpe->dev, "invalid timeout\n");
+ return timeout;
+ }
+
+ ret = __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL2,
+ STMPE1601_AUTOSLEEP_TIMEOUT_MASK,
+ timeout);
+ if (ret < 0)
+ return ret;
+
+ return __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL2,
+ STPME1601_AUTOSLEEP_ENABLE,
+ STPME1601_AUTOSLEEP_ENABLE);
+}
+
+static int stmpe1601_enable(struct stmpe *stmpe, unsigned int blocks,
+ bool enable)
+{
+ unsigned int mask = 0;
+
+ if (blocks & STMPE_BLOCK_GPIO)
+ mask |= STMPE1601_SYS_CTRL_ENABLE_GPIO;
+
+ if (blocks & STMPE_BLOCK_KEYPAD)
+ mask |= STMPE1601_SYS_CTRL_ENABLE_KPC;
+
+ return __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL, mask,
+ enable ? mask : 0);
+}
+
+static int stmpe1601_get_altfunc(struct stmpe *stmpe, enum stmpe_block block)
+{
+ switch (block) {
+ case STMPE_BLOCK_PWM:
+ return 2;
+
+ case STMPE_BLOCK_KEYPAD:
+ return 1;
+
+ case STMPE_BLOCK_GPIO:
+ default:
+ return 0;
+ }
+}
+
+static struct stmpe_variant_info stmpe1601 = {
+ .name = "stmpe1601",
+ .id_val = 0x0210,
+ .id_mask = 0xfff0, /* at least 0x0210 and 0x0212 */
+ .num_gpios = 16,
+ .af_bits = 2,
+ .regs = stmpe1601_regs,
+ .blocks = stmpe1601_blocks,
+ .num_blocks = ARRAY_SIZE(stmpe1601_blocks),
+ .num_irqs = STMPE1601_NR_INTERNAL_IRQS,
+ .enable = stmpe1601_enable,
+ .get_altfunc = stmpe1601_get_altfunc,
+ .enable_autosleep = stmpe1601_autosleep,
+};
+
+/*
+ * STMPE24XX
+ */
+
+static const u8 stmpe24xx_regs[] = {
+ [STMPE_IDX_CHIP_ID] = STMPE24XX_REG_CHIP_ID,
+ [STMPE_IDX_ICR_LSB] = STMPE24XX_REG_ICR_LSB,
+ [STMPE_IDX_IER_LSB] = STMPE24XX_REG_IER_LSB,
+ [STMPE_IDX_ISR_MSB] = STMPE24XX_REG_ISR_MSB,
+ [STMPE_IDX_GPMR_LSB] = STMPE24XX_REG_GPMR_LSB,
+ [STMPE_IDX_GPSR_LSB] = STMPE24XX_REG_GPSR_LSB,
+ [STMPE_IDX_GPCR_LSB] = STMPE24XX_REG_GPCR_LSB,
+ [STMPE_IDX_GPDR_LSB] = STMPE24XX_REG_GPDR_LSB,
+ [STMPE_IDX_GPRER_LSB] = STMPE24XX_REG_GPRER_LSB,
+ [STMPE_IDX_GPFER_LSB] = STMPE24XX_REG_GPFER_LSB,
+ [STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB,
+ [STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB,
+ [STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,
+ [STMPE_IDX_GPEDR_MSB] = STMPE24XX_REG_GPEDR_MSB,
+};
+
+static struct stmpe_variant_block stmpe24xx_blocks[] = {
+ {
+ .cell = &stmpe_gpio_cell,
+ .irq = STMPE24XX_IRQ_GPIOC,
+ .block = STMPE_BLOCK_GPIO,
+ },
+ {
+ .cell = &stmpe_keypad_cell,
+ .irq = STMPE24XX_IRQ_KEYPAD,
+ .block = STMPE_BLOCK_KEYPAD,
+ },
+};
+
+static int stmpe24xx_enable(struct stmpe *stmpe, unsigned int blocks,
+ bool enable)
+{
+ unsigned int mask = 0;
+
+ if (blocks & STMPE_BLOCK_GPIO)
+ mask |= STMPE24XX_SYS_CTRL_ENABLE_GPIO;
+
+ if (blocks & STMPE_BLOCK_KEYPAD)
+ mask |= STMPE24XX_SYS_CTRL_ENABLE_KPC;
+
+ return __stmpe_set_bits(stmpe, STMPE24XX_REG_SYS_CTRL, mask,
+ enable ? mask : 0);
+}
+
+static int stmpe24xx_get_altfunc(struct stmpe *stmpe, enum stmpe_block block)
+{
+ switch (block) {
+ case STMPE_BLOCK_ROTATOR:
+ return 2;
+
+ case STMPE_BLOCK_KEYPAD:
+ return 1;
+
+ case STMPE_BLOCK_GPIO:
+ default:
+ return 0;
+ }
+}
+
+static struct stmpe_variant_info stmpe2401 = {
+ .name = "stmpe2401",
+ .id_val = 0x0101,
+ .id_mask = 0xffff,
+ .num_gpios = 24,
+ .af_bits = 2,
+ .regs = stmpe24xx_regs,
+ .blocks = stmpe24xx_blocks,
+ .num_blocks = ARRAY_SIZE(stmpe24xx_blocks),
+ .num_irqs = STMPE24XX_NR_INTERNAL_IRQS,
+ .enable = stmpe24xx_enable,
+ .get_altfunc = stmpe24xx_get_altfunc,
+};
+
+static struct stmpe_variant_info stmpe2403 = {
+ .name = "stmpe2403",
+ .id_val = 0x0120,
+ .id_mask = 0xffff,
+ .num_gpios = 24,
+ .af_bits = 2,
+ .regs = stmpe24xx_regs,
+ .blocks = stmpe24xx_blocks,
+ .num_blocks = ARRAY_SIZE(stmpe24xx_blocks),
+ .num_irqs = STMPE24XX_NR_INTERNAL_IRQS,
+ .enable = stmpe24xx_enable,
+ .get_altfunc = stmpe24xx_get_altfunc,
+ .enable_autosleep = stmpe1601_autosleep, /* same as stmpe1601 */
+};
+
+static struct stmpe_variant_info *stmpe_variant_info[] = {
+ [STMPE811] = &stmpe811,
+ [STMPE1601] = &stmpe1601,
+ [STMPE2401] = &stmpe2401,
+ [STMPE2403] = &stmpe2403,
+};
+
+static irqreturn_t stmpe_irq(int irq, void *data)
+{
+ struct stmpe *stmpe = data;
+ struct stmpe_variant_info *variant = stmpe->variant;
+ int num = DIV_ROUND_UP(variant->num_irqs, 8);
+ u8 israddr = stmpe->regs[STMPE_IDX_ISR_MSB];
+ u8 isr[num];
+ int ret;
+ int i;
+
+ ret = stmpe_block_read(stmpe, israddr, num, isr);
+ if (ret < 0)
+ return IRQ_NONE;
+
+ for (i = 0; i < num; i++) {
+ int bank = num - i - 1;
+ u8 status = isr[i];
+ u8 clear;
+
+ status &= stmpe->ier[bank];
+ if (!status)
+ continue;
+
+ clear = status;
+ while (status) {
+ int bit = __ffs(status);
+ int line = bank * 8 + bit;
+
+ handle_nested_irq(stmpe->irq_base + line);
+ status &= ~(1 << bit);
+ }
+
+ stmpe_reg_write(stmpe, israddr + i, clear);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void stmpe_irq_lock(unsigned int irq)
+{
+ struct stmpe *stmpe = get_irq_chip_data(irq);
+
+ mutex_lock(&stmpe->irq_lock);
+}
+
+static void stmpe_irq_sync_unlock(unsigned int irq)
+{
+ struct stmpe *stmpe = get_irq_chip_data(irq);
+ struct stmpe_variant_info *variant = stmpe->variant;
+ int num = DIV_ROUND_UP(variant->num_irqs, 8);
+ int i;
+
+ for (i = 0; i < num; i++) {
+ u8 new = stmpe->ier[i];
+ u8 old = stmpe->oldier[i];
+
+ if (new == old)
+ continue;
+
+ stmpe->oldier[i] = new;
+ stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_IER_LSB] - i, new);
+ }
+
+ mutex_unlock(&stmpe->irq_lock);
+}
+
+static void stmpe_irq_mask(unsigned int irq)
+{
+ struct stmpe *stmpe = get_irq_chip_data(irq);
+ int offset = irq - stmpe->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ stmpe->ier[regoffset] &= ~mask;
+}
+
+static void stmpe_irq_unmask(unsigned int irq)
+{
+ struct stmpe *stmpe = get_irq_chip_data(irq);
+ int offset = irq - stmpe->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ stmpe->ier[regoffset] |= mask;
+}
+
+static struct irq_chip stmpe_irq_chip = {
+ .name = "stmpe",
+ .bus_lock = stmpe_irq_lock,
+ .bus_sync_unlock = stmpe_irq_sync_unlock,
+ .mask = stmpe_irq_mask,
+ .unmask = stmpe_irq_unmask,
+};
+
+static int __devinit stmpe_irq_init(struct stmpe *stmpe)
+{
+ int num_irqs = stmpe->variant->num_irqs;
+ int base = stmpe->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + num_irqs; irq++) {
+ set_irq_chip_data(irq, stmpe);
+ set_irq_chip_and_handler(irq, &stmpe_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void stmpe_irq_remove(struct stmpe *stmpe)
+{
+ int num_irqs = stmpe->variant->num_irqs;
+ int base = stmpe->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + num_irqs; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ set_irq_chip_and_handler(irq, NULL, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+}
+
+static int __devinit stmpe_chip_init(struct stmpe *stmpe)
+{
+ unsigned int irq_trigger = stmpe->pdata->irq_trigger;
+ int autosleep_timeout = stmpe->pdata->autosleep_timeout;
+ struct stmpe_variant_info *variant = stmpe->variant;
+ u8 icr = STMPE_ICR_LSB_GIM;
+ unsigned int id;
+ u8 data[2];
+ int ret;
+
+ ret = stmpe_block_read(stmpe, stmpe->regs[STMPE_IDX_CHIP_ID],
+ ARRAY_SIZE(data), data);
+ if (ret < 0)
+ return ret;
+
+ id = (data[0] << 8) | data[1];
+ if ((id & variant->id_mask) != variant->id_val) {
+ dev_err(stmpe->dev, "unknown chip id: %#x\n", id);
+ return -EINVAL;
+ }
+
+ dev_info(stmpe->dev, "%s detected, chip id: %#x\n", variant->name, id);
+
+ /* Disable all modules -- subdrivers should enable what they need. */
+ ret = stmpe_disable(stmpe, ~0);
+ if (ret)
+ return ret;
+
+ if (irq_trigger == IRQF_TRIGGER_FALLING ||
+ irq_trigger == IRQF_TRIGGER_RISING)
+ icr |= STMPE_ICR_LSB_EDGE;
+
+ if (irq_trigger == IRQF_TRIGGER_RISING ||
+ irq_trigger == IRQF_TRIGGER_HIGH)
+ icr |= STMPE_ICR_LSB_HIGH;
+
+ if (stmpe->pdata->irq_invert_polarity)
+ icr ^= STMPE_ICR_LSB_HIGH;
+
+ if (stmpe->pdata->autosleep) {
+ ret = stmpe_autosleep(stmpe, autosleep_timeout);
+ if (ret)
+ return ret;
+ }
+
+ return stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_ICR_LSB], icr);
+}
+
+static int __devinit stmpe_add_device(struct stmpe *stmpe,
+ struct mfd_cell *cell, int irq)
+{
+ return mfd_add_devices(stmpe->dev, stmpe->pdata->id, cell, 1,
+ NULL, stmpe->irq_base + irq);
+}
+
+static int __devinit stmpe_devices_init(struct stmpe *stmpe)
+{
+ struct stmpe_variant_info *variant = stmpe->variant;
+ unsigned int platform_blocks = stmpe->pdata->blocks;
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < variant->num_blocks; i++) {
+ struct stmpe_variant_block *block = &variant->blocks[i];
+
+ if (!(platform_blocks & block->block))
+ continue;
+
+ platform_blocks &= ~block->block;
+ ret = stmpe_add_device(stmpe, block->cell, block->irq);
+ if (ret)
+ return ret;
+ }
+
+ if (platform_blocks)
+ dev_warn(stmpe->dev,
+ "platform wants blocks (%#x) not present on variant",
+ platform_blocks);
+
+ return ret;
+}
+
+static int __devinit stmpe_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct stmpe_platform_data *pdata = i2c->dev.platform_data;
+ struct stmpe *stmpe;
+ int ret;
+
+ if (!pdata)
+ return -EINVAL;
+
+ stmpe = kzalloc(sizeof(struct stmpe), GFP_KERNEL);
+ if (!stmpe)
+ return -ENOMEM;
+
+ mutex_init(&stmpe->irq_lock);
+ mutex_init(&stmpe->lock);
+
+ stmpe->dev = &i2c->dev;
+ stmpe->i2c = i2c;
+
+ stmpe->pdata = pdata;
+ stmpe->irq_base = pdata->irq_base;
+
+ stmpe->partnum = id->driver_data;
+ stmpe->variant = stmpe_variant_info[stmpe->partnum];
+ stmpe->regs = stmpe->variant->regs;
+ stmpe->num_gpios = stmpe->variant->num_gpios;
+
+ i2c_set_clientdata(i2c, stmpe);
+
+ ret = stmpe_chip_init(stmpe);
+ if (ret)
+ goto out_free;
+
+ ret = stmpe_irq_init(stmpe);
+ if (ret)
+ goto out_free;
+
+ ret = request_threaded_irq(stmpe->i2c->irq, NULL, stmpe_irq,
+ pdata->irq_trigger | IRQF_ONESHOT,
+ "stmpe", stmpe);
+ if (ret) {
+ dev_err(stmpe->dev, "failed to request IRQ: %d\n", ret);
+ goto out_removeirq;
+ }
+
+ ret = stmpe_devices_init(stmpe);
+ if (ret) {
+ dev_err(stmpe->dev, "failed to add children\n");
+ goto out_removedevs;
+ }
+
+ return 0;
+
+out_removedevs:
+ mfd_remove_devices(stmpe->dev);
+ free_irq(stmpe->i2c->irq, stmpe);
+out_removeirq:
+ stmpe_irq_remove(stmpe);
+out_free:
+ kfree(stmpe);
+ return ret;
+}
+
+static int __devexit stmpe_remove(struct i2c_client *client)
+{
+ struct stmpe *stmpe = i2c_get_clientdata(client);
+
+ mfd_remove_devices(stmpe->dev);
+
+ free_irq(stmpe->i2c->irq, stmpe);
+ stmpe_irq_remove(stmpe);
+
+ kfree(stmpe);
+
+ return 0;
+}
+
+static const struct i2c_device_id stmpe_id[] = {
+ { "stmpe811", STMPE811 },
+ { "stmpe1601", STMPE1601 },
+ { "stmpe2401", STMPE2401 },
+ { "stmpe2403", STMPE2403 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, stmpe_id);
+
+static struct i2c_driver stmpe_driver = {
+ .driver.name = "stmpe",
+ .driver.owner = THIS_MODULE,
+ .probe = stmpe_probe,
+ .remove = __devexit_p(stmpe_remove),
+ .id_table = stmpe_id,
+};
+
+static int __init stmpe_init(void)
+{
+ return i2c_add_driver(&stmpe_driver);
+}
+subsys_initcall(stmpe_init);
+
+static void __exit stmpe_exit(void)
+{
+ i2c_del_driver(&stmpe_driver);
+}
+module_exit(stmpe_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("STMPE MFD core driver");
+MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
new file mode 100644
index 00000000000..0dbdc4e8cd7
--- /dev/null
+++ b/drivers/mfd/stmpe.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#ifndef __STMPE_H
+#define __STMPE_H
+
+#ifdef STMPE_DUMP_BYTES
+static inline void stmpe_dump_bytes(const char *str, const void *buf,
+ size_t len)
+{
+ print_hex_dump_bytes(str, DUMP_PREFIX_OFFSET, buf, len);
+}
+#else
+static inline void stmpe_dump_bytes(const char *str, const void *buf,
+ size_t len)
+{
+}
+#endif
+
+/**
+ * struct stmpe_variant_block - information about block
+ * @cell: base mfd cell
+ * @irq: interrupt number to be added to each IORESOURCE_IRQ
+ * in the cell
+ * @block: block id; used for identification with platform data and for
+ * enable and altfunc callbacks
+ */
+struct stmpe_variant_block {
+ struct mfd_cell *cell;
+ int irq;
+ enum stmpe_block block;
+};
+
+/**
+ * struct stmpe_variant_info - variant-specific information
+ * @name: part name
+ * @id_val: content of CHIPID register
+ * @id_mask: bits valid in CHIPID register for comparison with id_val
+ * @num_gpios: number of GPIOS
+ * @af_bits: number of bits used to specify the alternate function
+ * @blocks: list of blocks present on this device
+ * @num_blocks: number of blocks present on this device
+ * @num_irqs: number of internal IRQs available on this device
+ * @enable: callback to enable the specified blocks.
+ * Called with the I/O lock held.
+ * @get_altfunc: callback to get the alternate function number for the
+ * specific block
+ * @enable_autosleep: callback to configure autosleep with specified timeout
+ */
+struct stmpe_variant_info {
+ const char *name;
+ u16 id_val;
+ u16 id_mask;
+ int num_gpios;
+ int af_bits;
+ const u8 *regs;
+ struct stmpe_variant_block *blocks;
+ int num_blocks;
+ int num_irqs;
+ int (*enable)(struct stmpe *stmpe, unsigned int blocks, bool enable);
+ int (*get_altfunc)(struct stmpe *stmpe, enum stmpe_block block);
+ int (*enable_autosleep)(struct stmpe *stmpe, int autosleep_timeout);
+};
+
+#define STMPE_ICR_LSB_HIGH (1 << 2)
+#define STMPE_ICR_LSB_EDGE (1 << 1)
+#define STMPE_ICR_LSB_GIM (1 << 0)
+
+/*
+ * STMPE811
+ */
+
+#define STMPE811_IRQ_TOUCH_DET 0
+#define STMPE811_IRQ_FIFO_TH 1
+#define STMPE811_IRQ_FIFO_OFLOW 2
+#define STMPE811_IRQ_FIFO_FULL 3
+#define STMPE811_IRQ_FIFO_EMPTY 4
+#define STMPE811_IRQ_TEMP_SENS 5
+#define STMPE811_IRQ_ADC 6
+#define STMPE811_IRQ_GPIOC 7
+#define STMPE811_NR_INTERNAL_IRQS 8
+
+#define STMPE811_REG_CHIP_ID 0x00
+#define STMPE811_REG_SYS_CTRL2 0x04
+#define STMPE811_REG_INT_CTRL 0x09
+#define STMPE811_REG_INT_EN 0x0A
+#define STMPE811_REG_INT_STA 0x0B
+#define STMPE811_REG_GPIO_INT_EN 0x0C
+#define STMPE811_REG_GPIO_INT_STA 0x0D
+#define STMPE811_REG_GPIO_SET_PIN 0x10
+#define STMPE811_REG_GPIO_CLR_PIN 0x11
+#define STMPE811_REG_GPIO_MP_STA 0x12
+#define STMPE811_REG_GPIO_DIR 0x13
+#define STMPE811_REG_GPIO_ED 0x14
+#define STMPE811_REG_GPIO_RE 0x15
+#define STMPE811_REG_GPIO_FE 0x16
+#define STMPE811_REG_GPIO_AF 0x17
+
+#define STMPE811_SYS_CTRL2_ADC_OFF (1 << 0)
+#define STMPE811_SYS_CTRL2_TSC_OFF (1 << 1)
+#define STMPE811_SYS_CTRL2_GPIO_OFF (1 << 2)
+#define STMPE811_SYS_CTRL2_TS_OFF (1 << 3)
+
+/*
+ * STMPE1601
+ */
+
+#define STMPE1601_IRQ_GPIOC 8
+#define STMPE1601_IRQ_PWM3 7
+#define STMPE1601_IRQ_PWM2 6
+#define STMPE1601_IRQ_PWM1 5
+#define STMPE1601_IRQ_PWM0 4
+#define STMPE1601_IRQ_KEYPAD_OVER 2
+#define STMPE1601_IRQ_KEYPAD 1
+#define STMPE1601_IRQ_WAKEUP 0
+#define STMPE1601_NR_INTERNAL_IRQS 9
+
+#define STMPE1601_REG_SYS_CTRL 0x02
+#define STMPE1601_REG_SYS_CTRL2 0x03
+#define STMPE1601_REG_ICR_LSB 0x11
+#define STMPE1601_REG_IER_LSB 0x13
+#define STMPE1601_REG_ISR_MSB 0x14
+#define STMPE1601_REG_CHIP_ID 0x80
+#define STMPE1601_REG_INT_EN_GPIO_MASK_LSB 0x17
+#define STMPE1601_REG_INT_STA_GPIO_MSB 0x18
+#define STMPE1601_REG_GPIO_MP_LSB 0x87
+#define STMPE1601_REG_GPIO_SET_LSB 0x83
+#define STMPE1601_REG_GPIO_CLR_LSB 0x85
+#define STMPE1601_REG_GPIO_SET_DIR_LSB 0x89
+#define STMPE1601_REG_GPIO_ED_MSB 0x8A
+#define STMPE1601_REG_GPIO_RE_LSB 0x8D
+#define STMPE1601_REG_GPIO_FE_LSB 0x8F
+#define STMPE1601_REG_GPIO_AF_U_MSB 0x92
+
+#define STMPE1601_SYS_CTRL_ENABLE_GPIO (1 << 3)
+#define STMPE1601_SYS_CTRL_ENABLE_KPC (1 << 1)
+#define STMPE1601_SYSCON_ENABLE_SPWM (1 << 0)
+
+/* The 1601/2403 share the same masks */
+#define STMPE1601_AUTOSLEEP_TIMEOUT_MASK (0x7)
+#define STPME1601_AUTOSLEEP_ENABLE (1 << 3)
+
+/*
+ * STMPE24xx
+ */
+
+#define STMPE24XX_IRQ_GPIOC 8
+#define STMPE24XX_IRQ_PWM2 7
+#define STMPE24XX_IRQ_PWM1 6
+#define STMPE24XX_IRQ_PWM0 5
+#define STMPE24XX_IRQ_ROT_OVER 4
+#define STMPE24XX_IRQ_ROT 3
+#define STMPE24XX_IRQ_KEYPAD_OVER 2
+#define STMPE24XX_IRQ_KEYPAD 1
+#define STMPE24XX_IRQ_WAKEUP 0
+#define STMPE24XX_NR_INTERNAL_IRQS 9
+
+#define STMPE24XX_REG_SYS_CTRL 0x02
+#define STMPE24XX_REG_ICR_LSB 0x11
+#define STMPE24XX_REG_IER_LSB 0x13
+#define STMPE24XX_REG_ISR_MSB 0x14
+#define STMPE24XX_REG_CHIP_ID 0x80
+#define STMPE24XX_REG_IEGPIOR_LSB 0x18
+#define STMPE24XX_REG_ISGPIOR_MSB 0x19
+#define STMPE24XX_REG_GPMR_LSB 0xA5
+#define STMPE24XX_REG_GPSR_LSB 0x85
+#define STMPE24XX_REG_GPCR_LSB 0x88
+#define STMPE24XX_REG_GPDR_LSB 0x8B
+#define STMPE24XX_REG_GPEDR_MSB 0x8C
+#define STMPE24XX_REG_GPRER_LSB 0x91
+#define STMPE24XX_REG_GPFER_LSB 0x94
+#define STMPE24XX_REG_GPAFR_U_MSB 0x9B
+
+#define STMPE24XX_SYS_CTRL_ENABLE_GPIO (1 << 3)
+#define STMPE24XX_SYSCON_ENABLE_PWM (1 << 2)
+#define STMPE24XX_SYS_CTRL_ENABLE_KPC (1 << 1)
+#define STMPE24XX_SYSCON_ENABLE_ROT (1 << 0)
+
+#endif
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 5041d33adf0..006c121f3f0 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -350,7 +350,6 @@ static int t7l66xb_probe(struct platform_device *dev)
t7l66xb->clk48m = clk_get(&dev->dev, "CLK_CK48M");
if (IS_ERR(t7l66xb->clk48m)) {
ret = PTR_ERR(t7l66xb->clk48m);
- clk_put(t7l66xb->clk32k);
goto err_clk48m_get;
}
@@ -425,6 +424,8 @@ static int t7l66xb_remove(struct platform_device *dev)
ret = pdata->disable(dev);
clk_disable(t7l66xb->clk48m);
clk_put(t7l66xb->clk48m);
+ clk_disable(t7l66xb->clk32k);
+ clk_put(t7l66xb->clk32k);
t7l66xb_detach_irq(dev);
iounmap(t7l66xb->scr);
release_resource(&t7l66xb->rscr);
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 517f9bcdeaa..6315f63f017 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -137,7 +137,7 @@ static struct mfd_cell tc6387xb_cells[] = {
},
};
-static int tc6387xb_probe(struct platform_device *dev)
+static int __devinit tc6387xb_probe(struct platform_device *dev)
{
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
struct resource *iomem, *rscr;
@@ -201,6 +201,7 @@ static int tc6387xb_probe(struct platform_device *dev)
if (!ret)
return 0;
+ iounmap(tc6387xb->scr);
err_ioremap:
release_resource(&tc6387xb->rscr);
err_resource:
@@ -211,14 +212,17 @@ err_no_irq:
return ret;
}
-static int tc6387xb_remove(struct platform_device *dev)
+static int __devexit tc6387xb_remove(struct platform_device *dev)
{
- struct clk *clk32k = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
mfd_remove_devices(&dev->dev);
- clk_disable(clk32k);
- clk_put(clk32k);
+ iounmap(tc6387xb->scr);
+ release_resource(&tc6387xb->rscr);
+ clk_disable(tc6387xb->clk32k);
+ clk_put(tc6387xb->clk32k);
platform_set_drvdata(dev, NULL);
+ kfree(tc6387xb);
return 0;
}
@@ -229,7 +233,7 @@ static struct platform_driver tc6387xb_platform_driver = {
.name = "tc6387xb",
},
.probe = tc6387xb_probe,
- .remove = tc6387xb_remove,
+ .remove = __devexit_p(tc6387xb_remove),
.suspend = tc6387xb_suspend,
.resume = tc6387xb_resume,
};
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index fcf9068810f..ef6c42c8917 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -732,9 +732,9 @@ err_gpio_add:
if (tc6393xb->gpio.base != -1)
temp = gpiochip_remove(&tc6393xb->gpio);
tcpd->disable(dev);
-err_clk_enable:
- clk_disable(tc6393xb->clk);
err_enable:
+ clk_disable(tc6393xb->clk);
+err_clk_enable:
iounmap(tc6393xb->scr);
err_ioremap:
release_resource(&tc6393xb->rscr);
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index d859dffed39..fc019764928 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -89,10 +89,8 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c,
int ret = 0;
tps6507x = kzalloc(sizeof(struct tps6507x_dev), GFP_KERNEL);
- if (tps6507x == NULL) {
- kfree(i2c);
+ if (tps6507x == NULL)
return -ENOMEM;
- }
i2c_set_clientdata(i2c, tps6507x);
tps6507x->dev = &i2c->dev;
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
new file mode 100644
index 00000000000..4cde31e6a25
--- /dev/null
+++ b/drivers/mfd/tps6586x.c
@@ -0,0 +1,375 @@
+/*
+ * Core driver for TI TPS6586x PMIC family
+ *
+ * Copyright (c) 2010 CompuLab Ltd.
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * Based on da903x.c.
+ * Copyright (C) 2008 Compulab, Ltd.
+ * Mike Rapoport <mike@compulab.co.il>
+ * Copyright (C) 2006-2008 Marvell International Ltd.
+ * Eric Miao <eric.miao@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps6586x.h>
+
+/* GPIO control registers */
+#define TPS6586X_GPIOSET1 0x5d
+#define TPS6586X_GPIOSET2 0x5e
+
+/* device id */
+#define TPS6586X_VERSIONCRC 0xcd
+#define TPS658621A_VERSIONCRC 0x15
+
+struct tps6586x {
+ struct mutex lock;
+ struct device *dev;
+ struct i2c_client *client;
+
+ struct gpio_chip gpio;
+};
+
+static inline int __tps6586x_read(struct i2c_client *client,
+ int reg, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
+ return ret;
+ }
+
+ *val = (uint8_t)ret;
+
+ return 0;
+}
+
+static inline int __tps6586x_reads(struct i2c_client *client, int reg,
+ int len, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client, reg, len, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed reading from 0x%02x\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int __tps6586x_write(struct i2c_client *client,
+ int reg, uint8_t val)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed writing 0x%02x to 0x%02x\n",
+ val, reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int __tps6586x_writes(struct i2c_client *client, int reg,
+ int len, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+int tps6586x_write(struct device *dev, int reg, uint8_t val)
+{
+ return __tps6586x_write(to_i2c_client(dev), reg, val);
+}
+EXPORT_SYMBOL_GPL(tps6586x_write);
+
+int tps6586x_writes(struct device *dev, int reg, int len, uint8_t *val)
+{
+ return __tps6586x_writes(to_i2c_client(dev), reg, len, val);
+}
+EXPORT_SYMBOL_GPL(tps6586x_writes);
+
+int tps6586x_read(struct device *dev, int reg, uint8_t *val)
+{
+ return __tps6586x_read(to_i2c_client(dev), reg, val);
+}
+EXPORT_SYMBOL_GPL(tps6586x_read);
+
+int tps6586x_reads(struct device *dev, int reg, int len, uint8_t *val)
+{
+ return __tps6586x_reads(to_i2c_client(dev), reg, len, val);
+}
+EXPORT_SYMBOL_GPL(tps6586x_reads);
+
+int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
+{
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
+ uint8_t reg_val;
+ int ret = 0;
+
+ mutex_lock(&tps6586x->lock);
+
+ ret = __tps6586x_read(to_i2c_client(dev), reg, &reg_val);
+ if (ret)
+ goto out;
+
+ if ((reg_val & bit_mask) == 0) {
+ reg_val |= bit_mask;
+ ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
+ }
+out:
+ mutex_unlock(&tps6586x->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps6586x_set_bits);
+
+int tps6586x_clr_bits(struct device *dev, int reg, uint8_t bit_mask)
+{
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
+ uint8_t reg_val;
+ int ret = 0;
+
+ mutex_lock(&tps6586x->lock);
+
+ ret = __tps6586x_read(to_i2c_client(dev), reg, &reg_val);
+ if (ret)
+ goto out;
+
+ if (reg_val & bit_mask) {
+ reg_val &= ~bit_mask;
+ ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
+ }
+out:
+ mutex_unlock(&tps6586x->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps6586x_clr_bits);
+
+int tps6586x_update(struct device *dev, int reg, uint8_t val, uint8_t mask)
+{
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
+ uint8_t reg_val;
+ int ret = 0;
+
+ mutex_lock(&tps6586x->lock);
+
+ ret = __tps6586x_read(tps6586x->client, reg, &reg_val);
+ if (ret)
+ goto out;
+
+ if ((reg_val & mask) != val) {
+ reg_val = (reg_val & ~mask) | val;
+ ret = __tps6586x_write(tps6586x->client, reg, reg_val);
+ }
+out:
+ mutex_unlock(&tps6586x->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps6586x_update);
+
+static int tps6586x_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps6586x *tps6586x = container_of(gc, struct tps6586x, gpio);
+ uint8_t val;
+ int ret;
+
+ ret = __tps6586x_read(tps6586x->client, TPS6586X_GPIOSET2, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & (1 << offset));
+}
+
+
+static void tps6586x_gpio_set(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct tps6586x *tps6586x = container_of(chip, struct tps6586x, gpio);
+
+ __tps6586x_write(tps6586x->client, TPS6586X_GPIOSET2,
+ value << offset);
+}
+
+static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps6586x *tps6586x = container_of(gc, struct tps6586x, gpio);
+ uint8_t val, mask;
+
+ tps6586x_gpio_set(gc, offset, value);
+
+ val = 0x1 << (offset * 2);
+ mask = 0x3 << (offset * 2);
+
+ return tps6586x_update(tps6586x->dev, TPS6586X_GPIOSET1, val, mask);
+}
+
+static void tps6586x_gpio_init(struct tps6586x *tps6586x, int gpio_base)
+{
+ int ret;
+
+ if (!gpio_base)
+ return;
+
+ tps6586x->gpio.owner = THIS_MODULE;
+ tps6586x->gpio.label = tps6586x->client->name;
+ tps6586x->gpio.dev = tps6586x->dev;
+ tps6586x->gpio.base = gpio_base;
+ tps6586x->gpio.ngpio = 4;
+ tps6586x->gpio.can_sleep = 1;
+
+ /* FIXME: add handling of GPIOs as dedicated inputs */
+ tps6586x->gpio.direction_output = tps6586x_gpio_output;
+ tps6586x->gpio.set = tps6586x_gpio_set;
+ tps6586x->gpio.get = tps6586x_gpio_get;
+
+ ret = gpiochip_add(&tps6586x->gpio);
+ if (ret)
+ dev_warn(tps6586x->dev, "GPIO registration failed: %d\n", ret);
+}
+
+static int __remove_subdev(struct device *dev, void *unused)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+static int tps6586x_remove_subdevs(struct tps6586x *tps6586x)
+{
+ return device_for_each_child(tps6586x->dev, NULL, __remove_subdev);
+}
+
+static int __devinit tps6586x_add_subdevs(struct tps6586x *tps6586x,
+ struct tps6586x_platform_data *pdata)
+{
+ struct tps6586x_subdev_info *subdev;
+ struct platform_device *pdev;
+ int i, ret = 0;
+
+ for (i = 0; i < pdata->num_subdevs; i++) {
+ subdev = &pdata->subdevs[i];
+
+ pdev = platform_device_alloc(subdev->name, subdev->id);
+
+ pdev->dev.parent = tps6586x->dev;
+ pdev->dev.platform_data = subdev->platform_data;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto failed;
+ }
+ return 0;
+
+failed:
+ tps6586x_remove_subdevs(tps6586x);
+ return ret;
+}
+
+static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tps6586x_platform_data *pdata = client->dev.platform_data;
+ struct tps6586x *tps6586x;
+ int ret;
+
+ if (!pdata) {
+ dev_err(&client->dev, "tps6586x requires platform data\n");
+ return -ENOTSUPP;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, TPS6586X_VERSIONCRC);
+ if (ret < 0) {
+ dev_err(&client->dev, "Chip ID read failed: %d\n", ret);
+ return -EIO;
+ }
+
+ if (ret != TPS658621A_VERSIONCRC) {
+ dev_err(&client->dev, "Unsupported chip ID: %x\n", ret);
+ return -ENODEV;
+ }
+
+ tps6586x = kzalloc(sizeof(struct tps6586x), GFP_KERNEL);
+ if (tps6586x == NULL)
+ return -ENOMEM;
+
+ tps6586x->client = client;
+ tps6586x->dev = &client->dev;
+ i2c_set_clientdata(client, tps6586x);
+
+ mutex_init(&tps6586x->lock);
+
+ ret = tps6586x_add_subdevs(tps6586x, pdata);
+ if (ret) {
+ dev_err(&client->dev, "add devices failed: %d\n", ret);
+ goto err_add_devs;
+ }
+
+ tps6586x_gpio_init(tps6586x, pdata->gpio_base);
+
+ return 0;
+
+err_add_devs:
+ kfree(tps6586x);
+ return ret;
+}
+
+static int __devexit tps6586x_i2c_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static const struct i2c_device_id tps6586x_id_table[] = {
+ { "tps6586x", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, tps6586x_id_table);
+
+static struct i2c_driver tps6586x_driver = {
+ .driver = {
+ .name = "tps6586x",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6586x_i2c_probe,
+ .remove = __devexit_p(tps6586x_i2c_remove),
+ .id_table = tps6586x_id_table,
+};
+
+static int __init tps6586x_init(void)
+{
+ return i2c_add_driver(&tps6586x_driver);
+}
+subsys_initcall(tps6586x_init);
+
+static void __exit tps6586x_exit(void)
+{
+ i2c_del_driver(&tps6586x_driver);
+}
+module_exit(tps6586x_exit);
+
+MODULE_DESCRIPTION("TPS6586X core driver");
+MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/mfd/twl6030-pwm.c b/drivers/mfd/twl6030-pwm.c
new file mode 100644
index 00000000000..5d25bdc7842
--- /dev/null
+++ b/drivers/mfd/twl6030-pwm.c
@@ -0,0 +1,163 @@
+/*
+ * twl6030_pwm.c
+ * Driver for PHOENIX (TWL6030) Pulse Width Modulator
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Hemanth V <hemanthv@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c/twl.h>
+#include <linux/slab.h>
+
+#define LED_PWM_CTRL1 0xF4
+#define LED_PWM_CTRL2 0xF5
+
+/* Max value for CTRL1 register */
+#define PWM_CTRL1_MAX 255
+
+/* Pull down disable */
+#define PWM_CTRL2_DIS_PD (1 << 6)
+
+/* Current control 2.5 milli Amps */
+#define PWM_CTRL2_CURR_02 (2 << 4)
+
+/* LED supply source */
+#define PWM_CTRL2_SRC_VAC (1 << 2)
+
+/* LED modes */
+#define PWM_CTRL2_MODE_HW (0 << 0)
+#define PWM_CTRL2_MODE_SW (1 << 0)
+#define PWM_CTRL2_MODE_DIS (2 << 0)
+
+#define PWM_CTRL2_MODE_MASK 0x3
+
+struct pwm_device {
+ const char *label;
+ unsigned int pwm_id;
+};
+
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ u8 duty_cycle;
+ int ret;
+
+ if (pwm == NULL || period_ns == 0 || duty_ns > period_ns)
+ return -EINVAL;
+
+ duty_cycle = (duty_ns * PWM_CTRL1_MAX) / period_ns;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, duty_cycle, LED_PWM_CTRL1);
+
+ if (ret < 0) {
+ pr_err("%s: Failed to configure PWM, Error %d\n",
+ pwm->label, ret);
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(pwm_config);
+
+int pwm_enable(struct pwm_device *pwm)
+{
+ u8 val;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, LED_PWM_CTRL2);
+ if (ret < 0) {
+ pr_err("%s: Failed to enable PWM, Error %d\n", pwm->label, ret);
+ return ret;
+ }
+
+ /* Change mode to software control */
+ val &= ~PWM_CTRL2_MODE_MASK;
+ val |= PWM_CTRL2_MODE_SW;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, LED_PWM_CTRL2);
+ if (ret < 0) {
+ pr_err("%s: Failed to enable PWM, Error %d\n", pwm->label, ret);
+ return ret;
+ }
+
+ twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, LED_PWM_CTRL2);
+ return 0;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+void pwm_disable(struct pwm_device *pwm)
+{
+ u8 val;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, LED_PWM_CTRL2);
+ if (ret < 0) {
+ pr_err("%s: Failed to disable PWM, Error %d\n",
+ pwm->label, ret);
+ return;
+ }
+
+ val &= ~PWM_CTRL2_MODE_MASK;
+ val |= PWM_CTRL2_MODE_HW;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, LED_PWM_CTRL2);
+ if (ret < 0) {
+ pr_err("%s: Failed to disable PWM, Error %d\n",
+ pwm->label, ret);
+ return;
+ }
+ return;
+}
+EXPORT_SYMBOL(pwm_disable);
+
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ u8 val;
+ int ret;
+ struct pwm_device *pwm;
+
+ pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL);
+ if (pwm == NULL) {
+ pr_err("%s: failed to allocate memory\n", label);
+ return NULL;
+ }
+
+ pwm->label = label;
+ pwm->pwm_id = pwm_id;
+
+ /* Configure PWM */
+ val = PWM_CTRL2_DIS_PD | PWM_CTRL2_CURR_02 | PWM_CTRL2_SRC_VAC |
+ PWM_CTRL2_MODE_HW;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, LED_PWM_CTRL2);
+
+ if (ret < 0) {
+ pr_err("%s: Failed to configure PWM, Error %d\n",
+ pwm->label, ret);
+
+ kfree(pwm);
+ return NULL;
+ }
+
+ return pwm;
+}
+EXPORT_SYMBOL(pwm_request);
+
+void pwm_free(struct pwm_device *pwm)
+{
+ pwm_disable(pwm);
+ kfree(pwm);
+}
+EXPORT_SYMBOL(pwm_free);
diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c
index dbe280153f9..d73f84ba0f0 100644
--- a/drivers/mfd/ucb1400_core.c
+++ b/drivers/mfd/ucb1400_core.c
@@ -114,7 +114,7 @@ static int ucb1400_core_probe(struct device *dev)
err3:
platform_device_put(ucb->ucb1400_ts);
err2:
- platform_device_unregister(ucb->ucb1400_gpio);
+ platform_device_del(ucb->ucb1400_gpio);
err1:
platform_device_put(ucb->ucb1400_gpio);
err0:
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 1a968f34d67..1e7aaaf6cc6 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -95,6 +95,7 @@ enum wm831x_parent {
WM8311 = 0x8311,
WM8312 = 0x8312,
WM8320 = 0x8320,
+ WM8321 = 0x8321,
};
static int wm831x_reg_locked(struct wm831x *wm831x, unsigned short reg)
@@ -1533,6 +1534,12 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
dev_info(wm831x->dev, "WM8320 revision %c\n", 'A' + rev);
break;
+ case WM8321:
+ parent = WM8321;
+ wm831x->num_gpio = 12;
+ dev_info(wm831x->dev, "WM8321 revision %c\n", 'A' + rev);
+ break;
+
default:
dev_err(wm831x->dev, "Unknown WM831x device %04x\n", ret);
ret = -EINVAL;
@@ -1607,6 +1614,12 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
NULL, 0);
break;
+ case WM8321:
+ ret = mfd_add_devices(wm831x->dev, -1,
+ wm8320_devs, ARRAY_SIZE(wm8320_devs),
+ NULL, 0);
+ break;
+
default:
/* If this happens the bus probe function is buggy */
BUG();
@@ -1744,10 +1757,8 @@ static int wm831x_i2c_probe(struct i2c_client *i2c,
struct wm831x *wm831x;
wm831x = kzalloc(sizeof(struct wm831x), GFP_KERNEL);
- if (wm831x == NULL) {
- kfree(i2c);
+ if (wm831x == NULL)
return -ENOMEM;
- }
i2c_set_clientdata(i2c, wm831x);
wm831x->dev = &i2c->dev;
@@ -1779,6 +1790,7 @@ static const struct i2c_device_id wm831x_i2c_id[] = {
{ "wm8311", WM8311 },
{ "wm8312", WM8312 },
{ "wm8320", WM8320 },
+ { "wm8321", WM8321 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id);
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 7dabe4dbd37..294183b6260 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -394,8 +394,13 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
irq = irq - wm831x->irq_base;
- if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11)
- return -EINVAL;
+ if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
+ /* Ignore internal-only IRQs */
+ if (irq >= 0 && irq < WM831X_NUM_IRQS)
+ return 0;
+ else
+ return -EINVAL;
+ }
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index b5807484b4c..e81cc31e420 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -536,6 +536,7 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int type, int mode)
}
out:
+ kfree(wm8350->reg_cache);
return ret;
}
@@ -700,7 +701,7 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
ret = wm8350_irq_init(wm8350, irq, pdata);
if (ret < 0)
- goto err;
+ goto err_free;
if (wm8350->irq_base) {
ret = request_threaded_irq(wm8350->irq_base +
@@ -738,8 +739,9 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
err_irq:
wm8350_irq_exit(wm8350);
-err:
+err_free:
kfree(wm8350->reg_cache);
+err:
return ret;
}
EXPORT_SYMBOL_GPL(wm8350_device_init);
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index ec71c936890..b3b2aaf89db 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -326,8 +326,10 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
ARRAY_SIZE(wm8994_main_supplies),
GFP_KERNEL);
- if (!wm8994->supplies)
+ if (!wm8994->supplies) {
+ ret = -ENOMEM;
goto err;
+ }
for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++)
wm8994->supplies[i].supply = wm8994_main_supplies[i];
@@ -495,10 +497,8 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
struct wm8994 *wm8994;
wm8994 = kzalloc(sizeof(struct wm8994), GFP_KERNEL);
- if (wm8994 == NULL) {
- kfree(i2c);
+ if (wm8994 == NULL)
return -ENOMEM;
- }
i2c_set_clientdata(i2c, wm8994);
wm8994->dev = &i2c->dev;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 9b089dfb173..b7433126074 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -72,7 +72,7 @@ config ATMEL_TCLIB
config ATMEL_TCB_CLKSRC
bool "TC Block Clocksource"
- depends on ATMEL_TCLIB && GENERIC_TIME
+ depends on ATMEL_TCLIB
default y
help
Select this to get a high precision clocksource based on a
@@ -240,7 +240,7 @@ config CS5535_MFGPT_DEFAULT_IRQ
config CS5535_CLOCK_EVENT_SRC
tristate "CS5535/CS5536 high-res timer (MFGPT) events"
- depends on GENERIC_TIME && GENERIC_CLOCKEVENTS && CS5535_MFGPT
+ depends on GENERIC_CLOCKEVENTS && CS5535_MFGPT
help
This driver provides a clock event source based on the MFGPT
timer(s) in the CS5535 and CS5536 companion chips.
@@ -304,6 +304,23 @@ config SENSORS_TSL2550
This driver can also be built as a module. If so, the module
will be called tsl2550.
+config SENSORS_BH1780
+ tristate "ROHM BH1780GLI ambient light sensor"
+ depends on I2C && SYSFS
+ help
+ If you say yes here you get support for the ROHM BH1780GLI
+ ambient light sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called bh1780gli.
+
+config HMC6352
+ tristate "Honeywell HMC6352 compass"
+ depends on I2C
+ help
+ This driver provides support for the Honeywell HMC6352 compass,
+ providing configuration and heading data via sysfs.
+
config EP93XX_PWM
tristate "EP93xx PWM support"
depends on ARCH_EP93XX
@@ -351,7 +368,7 @@ config VMWARE_BALLOON
If unsure, say N.
To compile this driver as a module, choose M here: the
- module will be called vmware_balloon.
+ module will be called vmw_balloon.
config ARM_CHARLCD
bool "ARM Ltd. Character LCD Driver"
@@ -363,6 +380,16 @@ config ARM_CHARLCD
line and the Linux version on the second line, but that's
still useful.
+config BMP085
+ tristate "BMP085 digital pressure sensor"
+ depends on I2C && SYSFS
+ help
+ If you say yes here you get support for the Bosch Sensortec
+ BMP086 digital pressure sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bmp085.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 67552d6e932..42eab95cde2 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -9,11 +9,13 @@ obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
+obj-$(CONFIG_BMP085) += bmp085.o
obj-$(CONFIG_ICS932S401) += ics932s401.o
obj-$(CONFIG_LKDTM) += lkdtm.o
obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
+obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
@@ -28,7 +30,8 @@ obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
+obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
obj-y += cb710/
-obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o
+obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
new file mode 100644
index 00000000000..714c6b48731
--- /dev/null
+++ b/drivers/misc/bh1780gli.c
@@ -0,0 +1,273 @@
+/*
+ * bh1780gli.c
+ * ROHM Ambient Light Sensor Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Hemanth V <hemanthv@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#define BH1780_REG_CONTROL 0x80
+#define BH1780_REG_PARTID 0x8A
+#define BH1780_REG_MANFID 0x8B
+#define BH1780_REG_DLOW 0x8C
+#define BH1780_REG_DHIGH 0x8D
+
+#define BH1780_REVMASK (0xf)
+#define BH1780_POWMASK (0x3)
+#define BH1780_POFF (0x0)
+#define BH1780_PON (0x3)
+
+/* power on settling time in ms */
+#define BH1780_PON_DELAY 2
+
+struct bh1780_data {
+ struct i2c_client *client;
+ int power_state;
+ /* lock for sysfs operations */
+ struct mutex lock;
+};
+
+static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg)
+{
+ int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_write_byte_data failed error %d\
+ Register (%s)\n", ret, msg);
+ return ret;
+}
+
+static int bh1780_read(struct bh1780_data *ddata, u8 reg, char *msg)
+{
+ int ret = i2c_smbus_read_byte_data(ddata->client, reg);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_read_byte_data failed error %d\
+ Register (%s)\n", ret, msg);
+ return ret;
+}
+
+static ssize_t bh1780_show_lux(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bh1780_data *ddata = platform_get_drvdata(pdev);
+ int lsb, msb;
+
+ lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW");
+ if (lsb < 0)
+ return lsb;
+
+ msb = bh1780_read(ddata, BH1780_REG_DHIGH, "DHIGH");
+ if (msb < 0)
+ return msb;
+
+ return sprintf(buf, "%d\n", (msb << 8) | lsb);
+}
+
+static ssize_t bh1780_show_power_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bh1780_data *ddata = platform_get_drvdata(pdev);
+ int state;
+
+ state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
+ if (state < 0)
+ return state;
+
+ return sprintf(buf, "%d\n", state & BH1780_POWMASK);
+}
+
+static ssize_t bh1780_store_power_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bh1780_data *ddata = platform_get_drvdata(pdev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 0, &val);
+ if (error)
+ return error;
+
+ if (val < BH1780_POFF || val > BH1780_PON)
+ return -EINVAL;
+
+ mutex_lock(&ddata->lock);
+
+ error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL");
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ msleep(BH1780_PON_DELAY);
+ ddata->power_state = val;
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL);
+
+static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
+ bh1780_show_power_state, bh1780_store_power_state);
+
+static struct attribute *bh1780_attributes[] = {
+ &dev_attr_power_state.attr,
+ &dev_attr_lux.attr,
+ NULL
+};
+
+static const struct attribute_group bh1780_attr_group = {
+ .attrs = bh1780_attributes,
+};
+
+static int __devinit bh1780_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct bh1780_data *ddata = NULL;
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) {
+ ret = -EIO;
+ goto err_op_failed;
+ }
+
+ ddata = kzalloc(sizeof(struct bh1780_data), GFP_KERNEL);
+ if (ddata == NULL) {
+ ret = -ENOMEM;
+ goto err_op_failed;
+ }
+
+ ddata->client = client;
+ i2c_set_clientdata(client, ddata);
+
+ ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
+ if (ret < 0)
+ goto err_op_failed;
+
+ dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
+ (ret & BH1780_REVMASK));
+
+ mutex_init(&ddata->lock);
+
+ ret = sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
+ if (ret)
+ goto err_op_failed;
+
+ return 0;
+
+err_op_failed:
+ kfree(ddata);
+ return ret;
+}
+
+static int __devexit bh1780_remove(struct i2c_client *client)
+{
+ struct bh1780_data *ddata;
+
+ ddata = i2c_get_clientdata(client);
+ sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
+ i2c_set_clientdata(client, NULL);
+ kfree(ddata);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bh1780_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct bh1780_data *ddata;
+ int state, ret;
+
+ ddata = i2c_get_clientdata(client);
+ state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
+ if (state < 0)
+ return state;
+
+ ddata->power_state = state & BH1780_POWMASK;
+
+ ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF,
+ "CONTROL");
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int bh1780_resume(struct i2c_client *client)
+{
+ struct bh1780_data *ddata;
+ int state, ret;
+
+ ddata = i2c_get_clientdata(client);
+ state = ddata->power_state;
+
+ ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
+ "CONTROL");
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+#else
+#define bh1780_suspend NULL
+#define bh1780_resume NULL
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id bh1780_id[] = {
+ { "bh1780", 0 },
+ { },
+};
+
+static struct i2c_driver bh1780_driver = {
+ .probe = bh1780_probe,
+ .remove = bh1780_remove,
+ .id_table = bh1780_id,
+ .suspend = bh1780_suspend,
+ .resume = bh1780_resume,
+ .driver = {
+ .name = "bh1780"
+ },
+};
+
+static int __init bh1780_init(void)
+{
+ return i2c_add_driver(&bh1780_driver);
+}
+
+static void __exit bh1780_exit(void)
+{
+ i2c_del_driver(&bh1780_driver);
+}
+
+module_init(bh1780_init)
+module_exit(bh1780_exit)
+
+MODULE_DESCRIPTION("BH1780GLI Ambient Light Sensor Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>");
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
new file mode 100644
index 00000000000..63ee4c1a531
--- /dev/null
+++ b/drivers/misc/bmp085.c
@@ -0,0 +1,482 @@
+/* Copyright (c) 2010 Christoph Mair <christoph.mair@gmail.com>
+
+ This driver supports the bmp085 digital barometric pressure
+ and temperature sensor from Bosch Sensortec. The datasheet
+ is avaliable from their website:
+ http://www.bosch-sensortec.com/content/language1/downloads/BST-BMP085-DS000-05.pdf
+
+ A pressure measurement is issued by reading from pressure0_input.
+ The return value ranges from 30000 to 110000 pascal with a resulution
+ of 1 pascal (0.01 millibar) which enables measurements from 9000m above
+ to 500m below sea level.
+
+ The temperature can be read from temp0_input. Values range from
+ -400 to 850 representing the ambient temperature in degree celsius
+ multiplied by 10.The resolution is 0.1 celsius.
+
+ Because ambient pressure is temperature dependent, a temperature
+ measurement will be executed automatically even if the user is reading
+ from pressure0_input. This happens if the last temperature measurement
+ has been executed more then one second ago.
+
+ To decrease RMS noise from pressure measurements, the bmp085 can
+ autonomously calculate the average of up to eight samples. This is
+ set up by writing to the oversampling sysfs file. Accepted values
+ are 0, 1, 2 and 3. 2^x when x is the value written to this file
+ specifies the number of samples used to calculate the ambient pressure.
+ RMS noise is specified with six pascal (without averaging) and decreases
+ down to 3 pascal when using an oversampling setting of 3.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+
+#define BMP085_I2C_ADDRESS 0x77
+#define BMP085_CHIP_ID 0x55
+
+#define BMP085_CALIBRATION_DATA_START 0xAA
+#define BMP085_CALIBRATION_DATA_LENGTH 11 /* 16 bit values */
+#define BMP085_CHIP_ID_REG 0xD0
+#define BMP085_VERSION_REG 0xD1
+#define BMP085_CTRL_REG 0xF4
+#define BMP085_TEMP_MEASUREMENT 0x2E
+#define BMP085_PRESSURE_MEASUREMENT 0x34
+#define BMP085_CONVERSION_REGISTER_MSB 0xF6
+#define BMP085_CONVERSION_REGISTER_LSB 0xF7
+#define BMP085_CONVERSION_REGISTER_XLSB 0xF8
+#define BMP085_TEMP_CONVERSION_TIME 5
+
+#define BMP085_CLIENT_NAME "bmp085"
+
+
+static const unsigned short normal_i2c[] = { BMP085_I2C_ADDRESS,
+ I2C_CLIENT_END };
+
+struct bmp085_calibration_data {
+ s16 AC1, AC2, AC3;
+ u16 AC4, AC5, AC6;
+ s16 B1, B2;
+ s16 MB, MC, MD;
+};
+
+
+/* Each client has this additional data */
+struct bmp085_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct bmp085_calibration_data calibration;
+ u32 raw_temperature;
+ u32 raw_pressure;
+ unsigned char oversampling_setting;
+ u32 last_temp_measurement;
+ s32 b6; /* calculated temperature correction coefficient */
+};
+
+
+static s32 bmp085_read_calibration_data(struct i2c_client *client)
+{
+ u16 tmp[BMP085_CALIBRATION_DATA_LENGTH];
+ struct bmp085_data *data = i2c_get_clientdata(client);
+ struct bmp085_calibration_data *cali = &(data->calibration);
+ s32 status = i2c_smbus_read_i2c_block_data(client,
+ BMP085_CALIBRATION_DATA_START,
+ BMP085_CALIBRATION_DATA_LENGTH*sizeof(u16),
+ (u8 *)tmp);
+ if (status < 0)
+ return status;
+
+ if (status != BMP085_CALIBRATION_DATA_LENGTH*sizeof(u16))
+ return -EIO;
+
+ cali->AC1 = be16_to_cpu(tmp[0]);
+ cali->AC2 = be16_to_cpu(tmp[1]);
+ cali->AC3 = be16_to_cpu(tmp[2]);
+ cali->AC4 = be16_to_cpu(tmp[3]);
+ cali->AC5 = be16_to_cpu(tmp[4]);
+ cali->AC6 = be16_to_cpu(tmp[5]);
+ cali->B1 = be16_to_cpu(tmp[6]);
+ cali->B2 = be16_to_cpu(tmp[7]);
+ cali->MB = be16_to_cpu(tmp[8]);
+ cali->MC = be16_to_cpu(tmp[9]);
+ cali->MD = be16_to_cpu(tmp[10]);
+ return 0;
+}
+
+
+static s32 bmp085_update_raw_temperature(struct bmp085_data *data)
+{
+ u16 tmp;
+ s32 status;
+
+ mutex_lock(&data->lock);
+ status = i2c_smbus_write_byte_data(data->client, BMP085_CTRL_REG,
+ BMP085_TEMP_MEASUREMENT);
+ if (status != 0) {
+ dev_err(&data->client->dev,
+ "Error while requesting temperature measurement.\n");
+ goto exit;
+ }
+ msleep(BMP085_TEMP_CONVERSION_TIME);
+
+ status = i2c_smbus_read_i2c_block_data(data->client,
+ BMP085_CONVERSION_REGISTER_MSB, sizeof(tmp), (u8 *)&tmp);
+ if (status < 0)
+ goto exit;
+ if (status != sizeof(tmp)) {
+ dev_err(&data->client->dev,
+ "Error while reading temperature measurement result\n");
+ status = -EIO;
+ goto exit;
+ }
+ data->raw_temperature = be16_to_cpu(tmp);
+ data->last_temp_measurement = jiffies;
+ status = 0; /* everything ok, return 0 */
+
+exit:
+ mutex_unlock(&data->lock);
+ return status;
+}
+
+static s32 bmp085_update_raw_pressure(struct bmp085_data *data)
+{
+ u32 tmp = 0;
+ s32 status;
+
+ mutex_lock(&data->lock);
+ status = i2c_smbus_write_byte_data(data->client, BMP085_CTRL_REG,
+ BMP085_PRESSURE_MEASUREMENT + (data->oversampling_setting<<6));
+ if (status != 0) {
+ dev_err(&data->client->dev,
+ "Error while requesting pressure measurement.\n");
+ goto exit;
+ }
+
+ /* wait for the end of conversion */
+ msleep(2+(3 << data->oversampling_setting));
+
+ /* copy data into a u32 (4 bytes), but skip the first byte. */
+ status = i2c_smbus_read_i2c_block_data(data->client,
+ BMP085_CONVERSION_REGISTER_MSB, 3, ((u8 *)&tmp)+1);
+ if (status < 0)
+ goto exit;
+ if (status != 3) {
+ dev_err(&data->client->dev,
+ "Error while reading pressure measurement results\n");
+ status = -EIO;
+ goto exit;
+ }
+ data->raw_pressure = be32_to_cpu((tmp));
+ data->raw_pressure >>= (8-data->oversampling_setting);
+ status = 0; /* everything ok, return 0 */
+
+exit:
+ mutex_unlock(&data->lock);
+ return status;
+}
+
+
+/*
+ * This function starts the temperature measurement and returns the value
+ * in tenth of a degree celsius.
+ */
+static s32 bmp085_get_temperature(struct bmp085_data *data, int *temperature)
+{
+ struct bmp085_calibration_data *cali = &data->calibration;
+ long x1, x2;
+ int status;
+
+ status = bmp085_update_raw_temperature(data);
+ if (status != 0)
+ goto exit;
+
+ x1 = ((data->raw_temperature - cali->AC6) * cali->AC5) >> 15;
+ x2 = (cali->MC << 11) / (x1 + cali->MD);
+ data->b6 = x1 + x2 - 4000;
+ /* if NULL just update b6. Used for pressure only measurements */
+ if (temperature != NULL)
+ *temperature = (x1+x2+8) >> 4;
+
+exit:
+ return status;;
+}
+
+/*
+ * This function starts the pressure measurement and returns the value
+ * in millibar. Since the pressure depends on the ambient temperature,
+ * a temperature measurement is executed if the last known value is older
+ * than one second.
+ */
+static s32 bmp085_get_pressure(struct bmp085_data *data, int *pressure)
+{
+ struct bmp085_calibration_data *cali = &data->calibration;
+ s32 x1, x2, x3, b3;
+ u32 b4, b7;
+ s32 p;
+ int status;
+
+ /* alt least every second force an update of the ambient temperature */
+ if (data->last_temp_measurement + 1*HZ < jiffies) {
+ status = bmp085_get_temperature(data, NULL);
+ if (status != 0)
+ goto exit;
+ }
+
+ status = bmp085_update_raw_pressure(data);
+ if (status != 0)
+ goto exit;
+
+ x1 = (data->b6 * data->b6) >> 12;
+ x1 *= cali->B2;
+ x1 >>= 11;
+
+ x2 = cali->AC2 * data->b6;
+ x2 >>= 11;
+
+ x3 = x1 + x2;
+
+ b3 = (((((s32)cali->AC1) * 4 + x3) << data->oversampling_setting) + 2);
+ b3 >>= 2;
+
+ x1 = (cali->AC3 * data->b6) >> 13;
+ x2 = (cali->B1 * ((data->b6 * data->b6) >> 12)) >> 16;
+ x3 = (x1 + x2 + 2) >> 2;
+ b4 = (cali->AC4 * (u32)(x3 + 32768)) >> 15;
+
+ b7 = ((u32)data->raw_pressure - b3) *
+ (50000 >> data->oversampling_setting);
+ p = ((b7 < 0x80000000) ? ((b7 << 1) / b4) : ((b7 / b4) * 2));
+
+ x1 = p >> 8;
+ x1 *= x1;
+ x1 = (x1 * 3038) >> 16;
+ x2 = (-7357 * p) >> 16;
+ p += (x1 + x2 + 3791) >> 4;
+
+ *pressure = p;
+
+exit:
+ return status;
+}
+
+/*
+ * This function sets the chip-internal oversampling. Valid values are 0..3.
+ * The chip will use 2^oversampling samples for internal averaging.
+ * This influences the measurement time and the accuracy; larger values
+ * increase both. The datasheet gives on overview on how measurement time,
+ * accuracy and noise correlate.
+ */
+static void bmp085_set_oversampling(struct bmp085_data *data,
+ unsigned char oversampling)
+{
+ if (oversampling > 3)
+ oversampling = 3;
+ data->oversampling_setting = oversampling;
+}
+
+/*
+ * Returns the currently selected oversampling. Range: 0..3
+ */
+static unsigned char bmp085_get_oversampling(struct bmp085_data *data)
+{
+ return data->oversampling_setting;
+}
+
+/* sysfs callbacks */
+static ssize_t set_oversampling(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bmp085_data *data = i2c_get_clientdata(client);
+ unsigned long oversampling;
+ int success = strict_strtoul(buf, 10, &oversampling);
+ if (success == 0) {
+ bmp085_set_oversampling(data, oversampling);
+ return count;
+ }
+ return success;
+}
+
+static ssize_t show_oversampling(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bmp085_data *data = i2c_get_clientdata(client);
+ return sprintf(buf, "%u\n", bmp085_get_oversampling(data));
+}
+static DEVICE_ATTR(oversampling, S_IWUSR | S_IRUGO,
+ show_oversampling, set_oversampling);
+
+
+static ssize_t show_temperature(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int temperature;
+ int status;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bmp085_data *data = i2c_get_clientdata(client);
+
+ status = bmp085_get_temperature(data, &temperature);
+ if (status != 0)
+ return status;
+ else
+ return sprintf(buf, "%d\n", temperature);
+}
+static DEVICE_ATTR(temp0_input, S_IRUGO, show_temperature, NULL);
+
+
+static ssize_t show_pressure(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int pressure;
+ int status;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bmp085_data *data = i2c_get_clientdata(client);
+
+ status = bmp085_get_pressure(data, &pressure);
+ if (status != 0)
+ return status;
+ else
+ return sprintf(buf, "%d\n", pressure);
+}
+static DEVICE_ATTR(pressure0_input, S_IRUGO, show_pressure, NULL);
+
+
+static struct attribute *bmp085_attributes[] = {
+ &dev_attr_temp0_input.attr,
+ &dev_attr_pressure0_input.attr,
+ &dev_attr_oversampling.attr,
+ NULL
+};
+
+static const struct attribute_group bmp085_attr_group = {
+ .attrs = bmp085_attributes,
+};
+
+static int bmp085_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ if (client->addr != BMP085_I2C_ADDRESS)
+ return -ENODEV;
+
+ if (i2c_smbus_read_byte_data(client, BMP085_CHIP_ID_REG) != BMP085_CHIP_ID)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int bmp085_init_client(struct i2c_client *client)
+{
+ unsigned char version;
+ int status;
+ struct bmp085_data *data = i2c_get_clientdata(client);
+ data->client = client;
+ status = bmp085_read_calibration_data(client);
+ if (status != 0)
+ goto exit;
+ version = i2c_smbus_read_byte_data(client, BMP085_VERSION_REG);
+ data->last_temp_measurement = 0;
+ data->oversampling_setting = 3;
+ mutex_init(&data->lock);
+ dev_info(&data->client->dev, "BMP085 ver. %d.%d found.\n",
+ (version & 0x0F), (version & 0xF0) >> 4);
+exit:
+ return status;
+}
+
+static int bmp085_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bmp085_data *data;
+ int err = 0;
+
+ data = kzalloc(sizeof(struct bmp085_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* default settings after POR */
+ data->oversampling_setting = 0x00;
+
+ i2c_set_clientdata(client, data);
+
+ /* Initialize the BMP085 chip */
+ err = bmp085_init_client(client);
+ if (err != 0)
+ goto exit_free;
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &bmp085_attr_group);
+ if (err)
+ goto exit_free;
+
+ dev_info(&data->client->dev, "Succesfully initialized bmp085!\n");
+ goto exit;
+
+exit_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int bmp085_remove(struct i2c_client *client)
+{
+ sysfs_remove_group(&client->dev.kobj, &bmp085_attr_group);
+ kfree(i2c_get_clientdata(client));
+ return 0;
+}
+
+static const struct i2c_device_id bmp085_id[] = {
+ { "bmp085", 0 },
+ { }
+};
+
+static struct i2c_driver bmp085_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bmp085"
+ },
+ .id_table = bmp085_id,
+ .probe = bmp085_probe,
+ .remove = bmp085_remove,
+
+ .detect = bmp085_detect,
+ .address_list = normal_i2c
+};
+
+static int __init bmp085_init(void)
+{
+ return i2c_add_driver(&bmp085_driver);
+}
+
+static void __exit bmp085_exit(void)
+{
+ i2c_del_driver(&bmp085_driver);
+}
+
+
+MODULE_AUTHOR("Christoph Mair <christoph.mair@gmail.com");
+MODULE_DESCRIPTION("BMP085 driver");
+MODULE_LICENSE("GPL");
+
+module_init(bmp085_init);
+module_exit(bmp085_exit);
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index 2d44b330010..6f6218061b0 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -211,6 +211,17 @@ EXPORT_SYMBOL_GPL(cs5535_mfgpt_alloc_timer);
*/
void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer)
{
+ unsigned long flags;
+ uint16_t val;
+
+ /* timer can be made available again only if never set up */
+ val = cs5535_mfgpt_read(timer, MFGPT_REG_SETUP);
+ if (!(val & MFGPT_SETUP_SETUP)) {
+ spin_lock_irqsave(&timer->chip->lock, flags);
+ __set_bit(timer->nr, timer->chip->avail);
+ spin_unlock_irqrestore(&timer->chip->lock, flags);
+ }
+
kfree(timer);
}
EXPORT_SYMBOL_GPL(cs5535_mfgpt_free_timer);
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
new file mode 100644
index 00000000000..234bfcaf209
--- /dev/null
+++ b/drivers/misc/hmc6352.c
@@ -0,0 +1,166 @@
+/*
+ * hmc6352.c - Honeywell Compass Driver
+ *
+ * Copyright (C) 2009 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+
+static DEFINE_MUTEX(compass_mutex);
+
+static int compass_command(struct i2c_client *c, u8 cmd)
+{
+ int ret = i2c_master_send(c, &cmd, 1);
+ if (ret < 0)
+ dev_warn(&c->dev, "command '%c' failed.\n", cmd);
+ return ret;
+}
+
+static int compass_store(struct device *dev, const char *buf, size_t count,
+ const char *map)
+{
+ struct i2c_client *c = to_i2c_client(dev);
+ int ret;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+ if (val >= strlen(map))
+ return -EINVAL;
+ mutex_lock(&compass_mutex);
+ ret = compass_command(c, map[val]);
+ mutex_unlock(&compass_mutex);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static ssize_t compass_calibration_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return compass_store(dev, buf, count, "EC");
+}
+
+static ssize_t compass_power_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return compass_store(dev, buf, count, "SW");
+}
+
+static ssize_t compass_heading_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char i2c_data[2];
+ unsigned int ret;
+
+ mutex_lock(&compass_mutex);
+ ret = compass_command(client, 'A');
+ if (ret != 1) {
+ mutex_unlock(&compass_mutex);
+ return ret;
+ }
+ msleep(10); /* sending 'A' cmd we need to wait for 7-10 millisecs */
+ ret = i2c_master_recv(client, i2c_data, 2);
+ mutex_unlock(&compass_mutex);
+ if (ret != 1) {
+ dev_warn(dev, "i2c read data cmd failed\n");
+ return ret;
+ }
+ ret = (i2c_data[0] << 8) | i2c_data[1];
+ return sprintf(buf, "%d.%d\n", ret/10, ret%10);
+}
+
+
+static DEVICE_ATTR(heading0_input, S_IRUGO, compass_heading_data_show, NULL);
+static DEVICE_ATTR(calibration, S_IWUSR, NULL, compass_calibration_store);
+static DEVICE_ATTR(power_state, S_IWUSR, NULL, compass_power_mode_store);
+
+static struct attribute *mid_att_compass[] = {
+ &dev_attr_heading0_input.attr,
+ &dev_attr_calibration.attr,
+ &dev_attr_power_state.attr,
+ NULL
+};
+
+static const struct attribute_group m_compass_gr = {
+ .name = "hmc6352",
+ .attrs = mid_att_compass
+};
+
+static int hmc6352_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int res;
+
+ res = sysfs_create_group(&client->dev.kobj, &m_compass_gr);
+ if (res) {
+ dev_err(&client->dev, "device_create_file failed\n");
+ return res;
+ }
+ dev_info(&client->dev, "%s HMC6352 compass chip found\n",
+ client->name);
+ return 0;
+}
+
+static int hmc6352_remove(struct i2c_client *client)
+{
+ sysfs_remove_group(&client->dev.kobj, &m_compass_gr);
+ return 0;
+}
+
+static struct i2c_device_id hmc6352_id[] = {
+ { "hmc6352", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, hmc6352_id);
+
+static struct i2c_driver hmc6352_driver = {
+ .driver = {
+ .name = "hmc6352",
+ },
+ .probe = hmc6352_probe,
+ .remove = hmc6352_remove,
+ .id_table = hmc6352_id,
+};
+
+static int __init sensor_hmc6352_init(void)
+{
+ return i2c_add_driver(&hmc6352_driver);
+}
+
+static void __exit sensor_hmc6352_exit(void)
+{
+ i2c_del_driver(&hmc6352_driver);
+}
+
+module_init(sensor_hmc6352_init);
+module_exit(sensor_hmc6352_exit);
+
+MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
+MODULE_DESCRIPTION("hmc6352 Compass Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 98ad0120aa9..557a8c2a733 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -256,7 +256,8 @@ static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
{
- char *dma_va, *dma_pa;
+ char *dma_va;
+ dma_addr_t dma_pa;
struct ccb *driver_ccb, *ilo_ccb;
driver_ccb = &data->driver_ccb;
@@ -272,12 +273,12 @@ static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
return -ENOMEM;
dma_va = (char *)data->dma_va;
- dma_pa = (char *)data->dma_pa;
+ dma_pa = data->dma_pa;
memset(dma_va, 0, data->dma_size);
dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN);
- dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_START_ALIGN);
+ dma_pa = roundup(dma_pa, ILO_START_ALIGN);
/*
* Create two ccb's, one with virt addrs, one with phys addrs.
@@ -288,26 +289,26 @@ static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
fifo_setup(dma_va, NR_QENTRY);
driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE;
- ilo_ccb->ccb_u1.send_fifobar = dma_pa + FIFOHANDLESIZE;
+ ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE;
dma_va += fifo_sz(NR_QENTRY);
dma_pa += fifo_sz(NR_QENTRY);
dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ);
- dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_CACHE_SZ);
+ dma_pa = roundup(dma_pa, ILO_CACHE_SZ);
fifo_setup(dma_va, NR_QENTRY);
driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE;
- ilo_ccb->ccb_u3.recv_fifobar = dma_pa + FIFOHANDLESIZE;
+ ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE;
dma_va += fifo_sz(NR_QENTRY);
dma_pa += fifo_sz(NR_QENTRY);
driver_ccb->ccb_u2.send_desc = dma_va;
- ilo_ccb->ccb_u2.send_desc = dma_pa;
+ ilo_ccb->ccb_u2.send_desc_pa = dma_pa;
dma_pa += desc_mem_sz(NR_QENTRY);
dma_va += desc_mem_sz(NR_QENTRY);
driver_ccb->ccb_u4.recv_desc = dma_va;
- ilo_ccb->ccb_u4.recv_desc = dma_pa;
+ ilo_ccb->ccb_u4.recv_desc_pa = dma_pa;
driver_ccb->channel = slot;
ilo_ccb->channel = slot;
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index 247eb386a97..54e43adbdea 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -79,21 +79,21 @@ struct ilo_hwinfo {
struct ccb {
union {
char *send_fifobar;
- u64 padding1;
+ u64 send_fifobar_pa;
} ccb_u1;
union {
char *send_desc;
- u64 padding2;
+ u64 send_desc_pa;
} ccb_u2;
u64 send_ctrl;
union {
char *recv_fifobar;
- u64 padding3;
+ u64 recv_fifobar_pa;
} ccb_u3;
union {
char *recv_desc;
- u64 padding4;
+ u64 recv_desc_pa;
} ccb_u4;
u64 recv_ctrl;
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 5bfb2a2041b..ef34de7a802 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -124,9 +124,9 @@ static int count = DEFAULT_COUNT;
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
"default is 10");
-module_param(cpoint_name, charp, 0644);
+module_param(cpoint_name, charp, 0444);
MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
-module_param(cpoint_type, charp, 0644);
+module_param(cpoint_type, charp, 0444);
MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
"hitting the crash point");
module_param(cpoint_count, int, 0644);
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmw_balloon.c
index 2a1e804a71a..2a1e804a71a 100644
--- a/drivers/misc/vmware_balloon.c
+++ b/drivers/misc/vmw_balloon.c
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index cb9fbc83b09..d545f79f600 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -29,6 +29,7 @@
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
+#include <linux/smp_lock.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
@@ -107,6 +108,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
int ret = -ENXIO;
+ lock_kernel();
if (md) {
if (md->usage == 2)
check_disk_change(bdev);
@@ -117,6 +119,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
ret = -EROFS;
}
}
+ unlock_kernel();
return ret;
}
@@ -125,7 +128,9 @@ static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
{
struct mmc_blk_data *md = disk->private_data;
+ lock_kernel();
mmc_blk_put(md);
+ unlock_kernel();
return 0;
}
@@ -242,7 +247,76 @@ static u32 get_card_status(struct mmc_card *card, struct request *req)
return cmd.resp[0];
}
-static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ unsigned int from, nr, arg;
+ int err = 0;
+
+ mmc_claim_host(card->host);
+
+ if (!mmc_can_erase(card)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ from = blk_rq_pos(req);
+ nr = blk_rq_sectors(req);
+
+ if (mmc_can_trim(card))
+ arg = MMC_TRIM_ARG;
+ else
+ arg = MMC_ERASE_ARG;
+
+ err = mmc_erase(card, from, nr, arg);
+out:
+ spin_lock_irq(&md->lock);
+ __blk_end_request(req, err, blk_rq_bytes(req));
+ spin_unlock_irq(&md->lock);
+
+ mmc_release_host(card->host);
+
+ return err ? 0 : 1;
+}
+
+static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ unsigned int from, nr, arg;
+ int err = 0;
+
+ mmc_claim_host(card->host);
+
+ if (!mmc_can_secure_erase_trim(card)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ from = blk_rq_pos(req);
+ nr = blk_rq_sectors(req);
+
+ if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
+ arg = MMC_SECURE_TRIM1_ARG;
+ else
+ arg = MMC_SECURE_ERASE_ARG;
+
+ err = mmc_erase(card, from, nr, arg);
+ if (!err && arg == MMC_SECURE_TRIM1_ARG)
+ err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
+out:
+ spin_lock_irq(&md->lock);
+ __blk_end_request(req, err, blk_rq_bytes(req));
+ spin_unlock_irq(&md->lock);
+
+ mmc_release_host(card->host);
+
+ return err ? 0 : 1;
+}
+
+static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
@@ -470,6 +544,17 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
return 0;
}
+static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+{
+ if (req->cmd_flags & REQ_DISCARD) {
+ if (req->cmd_flags & REQ_SECURE)
+ return mmc_blk_issue_secdiscard_rq(mq, req);
+ else
+ return mmc_blk_issue_discard_rq(mq, req);
+ } else {
+ return mmc_blk_issue_rw_rq(mq, req);
+ }
+}
static inline int mmc_blk_readonly(struct mmc_card *card)
{
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 445d7db2277..5dd8576b5c1 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/scatterlist.h>
+#include <linux/swap.h> /* For nr_free_buffer_pages() */
#define RESULT_OK 0
#define RESULT_FAIL 1
@@ -25,6 +26,60 @@
#define BUFFER_ORDER 2
#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
+/*
+ * Limit the test area size to the maximum MMC HC erase group size. Note that
+ * the maximum SD allocation unit size is just 4MiB.
+ */
+#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
+
+/**
+ * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
+ * @page: first page in the allocation
+ * @order: order of the number of pages allocated
+ */
+struct mmc_test_pages {
+ struct page *page;
+ unsigned int order;
+};
+
+/**
+ * struct mmc_test_mem - allocated memory.
+ * @arr: array of allocations
+ * @cnt: number of allocations
+ */
+struct mmc_test_mem {
+ struct mmc_test_pages *arr;
+ unsigned int cnt;
+};
+
+/**
+ * struct mmc_test_area - information for performance tests.
+ * @max_sz: test area size (in bytes)
+ * @dev_addr: address on card at which to do performance tests
+ * @max_segs: maximum segments in scatterlist @sg
+ * @blocks: number of (512 byte) blocks currently mapped by @sg
+ * @sg_len: length of currently mapped scatterlist @sg
+ * @mem: allocated memory
+ * @sg: scatterlist
+ */
+struct mmc_test_area {
+ unsigned long max_sz;
+ unsigned int dev_addr;
+ unsigned int max_segs;
+ unsigned int blocks;
+ unsigned int sg_len;
+ struct mmc_test_mem *mem;
+ struct scatterlist *sg;
+};
+
+/**
+ * struct mmc_test_card - test information.
+ * @card: card under test
+ * @scratch: transfer buffer
+ * @buffer: transfer buffer
+ * @highmem: buffer for highmem tests
+ * @area: information for performance tests
+ */
struct mmc_test_card {
struct mmc_card *card;
@@ -33,6 +88,7 @@ struct mmc_test_card {
#ifdef CONFIG_HIGHMEM
struct page *highmem;
#endif
+ struct mmc_test_area area;
};
/*******************************************************************/
@@ -97,6 +153,12 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
mmc_set_data_timeout(mrq->data, test->card);
}
+static int mmc_test_busy(struct mmc_command *cmd)
+{
+ return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(cmd->resp[0]) == 7);
+}
+
/*
* Wait for the card to finish the busy state
*/
@@ -117,13 +179,13 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
if (ret)
break;
- if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) {
+ if (!busy && mmc_test_busy(&cmd)) {
busy = 1;
printk(KERN_INFO "%s: Warning: Host did not "
"wait for busy state to end.\n",
mmc_hostname(test->card->host));
}
- } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
+ } while (mmc_test_busy(&cmd));
return ret;
}
@@ -170,6 +232,248 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test,
return 0;
}
+static void mmc_test_free_mem(struct mmc_test_mem *mem)
+{
+ if (!mem)
+ return;
+ while (mem->cnt--)
+ __free_pages(mem->arr[mem->cnt].page,
+ mem->arr[mem->cnt].order);
+ kfree(mem->arr);
+ kfree(mem);
+}
+
+/*
+ * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
+ * there isn't much memory do not exceed 1/16th total lowmem pages.
+ */
+static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
+ unsigned long max_sz)
+{
+ unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
+ unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
+ unsigned long page_cnt = 0;
+ unsigned long limit = nr_free_buffer_pages() >> 4;
+ struct mmc_test_mem *mem;
+
+ if (max_page_cnt > limit)
+ max_page_cnt = limit;
+ if (max_page_cnt < min_page_cnt)
+ max_page_cnt = min_page_cnt;
+
+ mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
+ if (!mem)
+ return NULL;
+
+ mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_page_cnt,
+ GFP_KERNEL);
+ if (!mem->arr)
+ goto out_free;
+
+ while (max_page_cnt) {
+ struct page *page;
+ unsigned int order;
+ gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
+ __GFP_NORETRY;
+
+ order = get_order(max_page_cnt << PAGE_SHIFT);
+ while (1) {
+ page = alloc_pages(flags, order);
+ if (page || !order)
+ break;
+ order -= 1;
+ }
+ if (!page) {
+ if (page_cnt < min_page_cnt)
+ goto out_free;
+ break;
+ }
+ mem->arr[mem->cnt].page = page;
+ mem->arr[mem->cnt].order = order;
+ mem->cnt += 1;
+ if (max_page_cnt <= (1UL << order))
+ break;
+ max_page_cnt -= 1UL << order;
+ page_cnt += 1UL << order;
+ }
+
+ return mem;
+
+out_free:
+ mmc_test_free_mem(mem);
+ return NULL;
+}
+
+/*
+ * Map memory into a scatterlist. Optionally allow the same memory to be
+ * mapped more than once.
+ */
+static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
+ struct scatterlist *sglist, int repeat,
+ unsigned int max_segs, unsigned int *sg_len)
+{
+ struct scatterlist *sg = NULL;
+ unsigned int i;
+
+ sg_init_table(sglist, max_segs);
+
+ *sg_len = 0;
+ do {
+ for (i = 0; i < mem->cnt; i++) {
+ unsigned long len = PAGE_SIZE << mem->arr[i].order;
+
+ if (sz < len)
+ len = sz;
+ if (sg)
+ sg = sg_next(sg);
+ else
+ sg = sglist;
+ if (!sg)
+ return -EINVAL;
+ sg_set_page(sg, mem->arr[i].page, len, 0);
+ sz -= len;
+ *sg_len += 1;
+ if (!sz)
+ break;
+ }
+ } while (sz && repeat);
+
+ if (sz)
+ return -EINVAL;
+
+ if (sg)
+ sg_mark_end(sg);
+
+ return 0;
+}
+
+/*
+ * Map memory into a scatterlist so that no pages are contiguous. Allow the
+ * same memory to be mapped more than once.
+ */
+static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
+ unsigned long sz,
+ struct scatterlist *sglist,
+ unsigned int max_segs,
+ unsigned int *sg_len)
+{
+ struct scatterlist *sg = NULL;
+ unsigned int i = mem->cnt, cnt;
+ unsigned long len;
+ void *base, *addr, *last_addr = NULL;
+
+ sg_init_table(sglist, max_segs);
+
+ *sg_len = 0;
+ while (sz && i) {
+ base = page_address(mem->arr[--i].page);
+ cnt = 1 << mem->arr[i].order;
+ while (sz && cnt) {
+ addr = base + PAGE_SIZE * --cnt;
+ if (last_addr && last_addr + PAGE_SIZE == addr)
+ continue;
+ last_addr = addr;
+ len = PAGE_SIZE;
+ if (sz < len)
+ len = sz;
+ if (sg)
+ sg = sg_next(sg);
+ else
+ sg = sglist;
+ if (!sg)
+ return -EINVAL;
+ sg_set_page(sg, virt_to_page(addr), len, 0);
+ sz -= len;
+ *sg_len += 1;
+ }
+ }
+
+ if (sg)
+ sg_mark_end(sg);
+
+ return 0;
+}
+
+/*
+ * Calculate transfer rate in bytes per second.
+ */
+static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
+{
+ uint64_t ns;
+
+ ns = ts->tv_sec;
+ ns *= 1000000000;
+ ns += ts->tv_nsec;
+
+ bytes *= 1000000000;
+
+ while (ns > UINT_MAX) {
+ bytes >>= 1;
+ ns >>= 1;
+ }
+
+ if (!ns)
+ return 0;
+
+ do_div(bytes, (uint32_t)ns);
+
+ return bytes;
+}
+
+/*
+ * Print the transfer rate.
+ */
+static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
+ struct timespec *ts1, struct timespec *ts2)
+{
+ unsigned int rate, sectors = bytes >> 9;
+ struct timespec ts;
+
+ ts = timespec_sub(*ts2, *ts1);
+
+ rate = mmc_test_rate(bytes, &ts);
+
+ printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
+ "seconds (%u kB/s, %u KiB/s)\n",
+ mmc_hostname(test->card->host), sectors, sectors >> 1,
+ (sectors == 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
+ (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
+}
+
+/*
+ * Print the average transfer rate.
+ */
+static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
+ unsigned int count, struct timespec *ts1,
+ struct timespec *ts2)
+{
+ unsigned int rate, sectors = bytes >> 9;
+ uint64_t tot = bytes * count;
+ struct timespec ts;
+
+ ts = timespec_sub(*ts2, *ts1);
+
+ rate = mmc_test_rate(tot, &ts);
+
+ printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
+ "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
+ mmc_hostname(test->card->host), count, sectors, count,
+ sectors >> 1, (sectors == 1 ? ".5" : ""),
+ (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
+ rate / 1000, rate / 1024);
+}
+
+/*
+ * Return the card size in sectors.
+ */
+static unsigned int mmc_test_capacity(struct mmc_card *card)
+{
+ if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
+ return card->ext_csd.sectors;
+ else
+ return card->csd.capacity << (card->csd.read_blkbits - 9);
+}
+
/*******************************************************************/
/* Test preparation and cleanup */
/*******************************************************************/
@@ -893,8 +1197,419 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test)
return 0;
}
+#else
+
+static int mmc_test_no_highmem(struct mmc_test_card *test)
+{
+ printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
+ mmc_hostname(test->card->host));
+ return 0;
+}
+
#endif /* CONFIG_HIGHMEM */
+/*
+ * Map sz bytes so that it can be transferred.
+ */
+static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
+ int max_scatter)
+{
+ struct mmc_test_area *t = &test->area;
+
+ t->blocks = sz >> 9;
+
+ if (max_scatter) {
+ return mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
+ t->max_segs, &t->sg_len);
+ } else {
+ return mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
+ &t->sg_len);
+ }
+}
+
+/*
+ * Transfer bytes mapped by mmc_test_area_map().
+ */
+static int mmc_test_area_transfer(struct mmc_test_card *test,
+ unsigned int dev_addr, int write)
+{
+ struct mmc_test_area *t = &test->area;
+
+ return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
+ t->blocks, 512, write);
+}
+
+/*
+ * Map and transfer bytes.
+ */
+static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
+ unsigned int dev_addr, int write, int max_scatter,
+ int timed)
+{
+ struct timespec ts1, ts2;
+ int ret;
+
+ ret = mmc_test_area_map(test, sz, max_scatter);
+ if (ret)
+ return ret;
+
+ if (timed)
+ getnstimeofday(&ts1);
+
+ ret = mmc_test_area_transfer(test, dev_addr, write);
+ if (ret)
+ return ret;
+
+ if (timed)
+ getnstimeofday(&ts2);
+
+ if (timed)
+ mmc_test_print_rate(test, sz, &ts1, &ts2);
+
+ return 0;
+}
+
+/*
+ * Write the test area entirely.
+ */
+static int mmc_test_area_fill(struct mmc_test_card *test)
+{
+ return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr,
+ 1, 0, 0);
+}
+
+/*
+ * Erase the test area entirely.
+ */
+static int mmc_test_area_erase(struct mmc_test_card *test)
+{
+ struct mmc_test_area *t = &test->area;
+
+ if (!mmc_can_erase(test->card))
+ return 0;
+
+ return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
+ MMC_ERASE_ARG);
+}
+
+/*
+ * Cleanup struct mmc_test_area.
+ */
+static int mmc_test_area_cleanup(struct mmc_test_card *test)
+{
+ struct mmc_test_area *t = &test->area;
+
+ kfree(t->sg);
+ mmc_test_free_mem(t->mem);
+
+ return 0;
+}
+
+/*
+ * Initialize an area for testing large transfers. The size of the area is the
+ * preferred erase size which is a good size for optimal transfer speed. Note
+ * that is typically 4MiB for modern cards. The test area is set to the middle
+ * of the card because cards may have different charateristics at the front
+ * (for FAT file system optimization). Optionally, the area is erased (if the
+ * card supports it) which may improve write performance. Optionally, the area
+ * is filled with data for subsequent read tests.
+ */
+static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned long min_sz = 64 * 1024;
+ int ret;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9)
+ t->max_sz = TEST_AREA_MAX_SIZE;
+ else
+ t->max_sz = (unsigned long)test->card->pref_erase << 9;
+ /*
+ * Try to allocate enough memory for the whole area. Less is OK
+ * because the same memory can be mapped into the scatterlist more than
+ * once.
+ */
+ t->mem = mmc_test_alloc_mem(min_sz, t->max_sz);
+ if (!t->mem)
+ return -ENOMEM;
+
+ t->max_segs = DIV_ROUND_UP(t->max_sz, PAGE_SIZE);
+ t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
+ if (!t->sg) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ t->dev_addr = mmc_test_capacity(test->card) / 2;
+ t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
+
+ if (erase) {
+ ret = mmc_test_area_erase(test);
+ if (ret)
+ goto out_free;
+ }
+
+ if (fill) {
+ ret = mmc_test_area_fill(test);
+ if (ret)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ mmc_test_area_cleanup(test);
+ return ret;
+}
+
+/*
+ * Prepare for large transfers. Do not erase the test area.
+ */
+static int mmc_test_area_prepare(struct mmc_test_card *test)
+{
+ return mmc_test_area_init(test, 0, 0);
+}
+
+/*
+ * Prepare for large transfers. Do erase the test area.
+ */
+static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
+{
+ return mmc_test_area_init(test, 1, 0);
+}
+
+/*
+ * Prepare for large transfers. Erase and fill the test area.
+ */
+static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
+{
+ return mmc_test_area_init(test, 1, 1);
+}
+
+/*
+ * Test best-case performance. Best-case performance is expected from
+ * a single large transfer.
+ *
+ * An additional option (max_scatter) allows the measurement of the same
+ * transfer but with no contiguous pages in the scatter list. This tests
+ * the efficiency of DMA to handle scattered pages.
+ */
+static int mmc_test_best_performance(struct mmc_test_card *test, int write,
+ int max_scatter)
+{
+ return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr,
+ write, max_scatter, 1);
+}
+
+/*
+ * Best-case read performance.
+ */
+static int mmc_test_best_read_performance(struct mmc_test_card *test)
+{
+ return mmc_test_best_performance(test, 0, 0);
+}
+
+/*
+ * Best-case write performance.
+ */
+static int mmc_test_best_write_performance(struct mmc_test_card *test)
+{
+ return mmc_test_best_performance(test, 1, 0);
+}
+
+/*
+ * Best-case read performance into scattered pages.
+ */
+static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
+{
+ return mmc_test_best_performance(test, 0, 1);
+}
+
+/*
+ * Best-case write performance from scattered pages.
+ */
+static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
+{
+ return mmc_test_best_performance(test, 1, 1);
+}
+
+/*
+ * Single read performance by transfer size.
+ */
+static int mmc_test_profile_read_perf(struct mmc_test_card *test)
+{
+ unsigned long sz;
+ unsigned int dev_addr;
+ int ret;
+
+ for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
+ dev_addr = test->area.dev_addr + (sz >> 9);
+ ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
+ if (ret)
+ return ret;
+ }
+ dev_addr = test->area.dev_addr;
+ return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
+}
+
+/*
+ * Single write performance by transfer size.
+ */
+static int mmc_test_profile_write_perf(struct mmc_test_card *test)
+{
+ unsigned long sz;
+ unsigned int dev_addr;
+ int ret;
+
+ ret = mmc_test_area_erase(test);
+ if (ret)
+ return ret;
+ for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
+ dev_addr = test->area.dev_addr + (sz >> 9);
+ ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
+ if (ret)
+ return ret;
+ }
+ ret = mmc_test_area_erase(test);
+ if (ret)
+ return ret;
+ dev_addr = test->area.dev_addr;
+ return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
+}
+
+/*
+ * Single trim performance by transfer size.
+ */
+static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
+{
+ unsigned long sz;
+ unsigned int dev_addr;
+ struct timespec ts1, ts2;
+ int ret;
+
+ if (!mmc_can_trim(test->card))
+ return RESULT_UNSUP_CARD;
+
+ if (!mmc_can_erase(test->card))
+ return RESULT_UNSUP_HOST;
+
+ for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
+ dev_addr = test->area.dev_addr + (sz >> 9);
+ getnstimeofday(&ts1);
+ ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
+ if (ret)
+ return ret;
+ getnstimeofday(&ts2);
+ mmc_test_print_rate(test, sz, &ts1, &ts2);
+ }
+ dev_addr = test->area.dev_addr;
+ getnstimeofday(&ts1);
+ ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
+ if (ret)
+ return ret;
+ getnstimeofday(&ts2);
+ mmc_test_print_rate(test, sz, &ts1, &ts2);
+ return 0;
+}
+
+/*
+ * Consecutive read performance by transfer size.
+ */
+static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
+{
+ unsigned long sz;
+ unsigned int dev_addr, i, cnt;
+ struct timespec ts1, ts2;
+ int ret;
+
+ for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
+ cnt = test->area.max_sz / sz;
+ dev_addr = test->area.dev_addr;
+ getnstimeofday(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
+ if (ret)
+ return ret;
+ dev_addr += (sz >> 9);
+ }
+ getnstimeofday(&ts2);
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ }
+ return 0;
+}
+
+/*
+ * Consecutive write performance by transfer size.
+ */
+static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
+{
+ unsigned long sz;
+ unsigned int dev_addr, i, cnt;
+ struct timespec ts1, ts2;
+ int ret;
+
+ for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
+ ret = mmc_test_area_erase(test);
+ if (ret)
+ return ret;
+ cnt = test->area.max_sz / sz;
+ dev_addr = test->area.dev_addr;
+ getnstimeofday(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
+ if (ret)
+ return ret;
+ dev_addr += (sz >> 9);
+ }
+ getnstimeofday(&ts2);
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ }
+ return 0;
+}
+
+/*
+ * Consecutive trim performance by transfer size.
+ */
+static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
+{
+ unsigned long sz;
+ unsigned int dev_addr, i, cnt;
+ struct timespec ts1, ts2;
+ int ret;
+
+ if (!mmc_can_trim(test->card))
+ return RESULT_UNSUP_CARD;
+
+ if (!mmc_can_erase(test->card))
+ return RESULT_UNSUP_HOST;
+
+ for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
+ ret = mmc_test_area_erase(test);
+ if (ret)
+ return ret;
+ ret = mmc_test_area_fill(test);
+ if (ret)
+ return ret;
+ cnt = test->area.max_sz / sz;
+ dev_addr = test->area.dev_addr;
+ getnstimeofday(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_erase(test->card, dev_addr, sz >> 9,
+ MMC_TRIM_ARG);
+ if (ret)
+ return ret;
+ dev_addr += (sz >> 9);
+ }
+ getnstimeofday(&ts2);
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ }
+ return 0;
+}
+
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
@@ -1040,8 +1755,100 @@ static const struct mmc_test_case mmc_test_cases[] = {
.cleanup = mmc_test_cleanup,
},
+#else
+
+ {
+ .name = "Highmem write",
+ .run = mmc_test_no_highmem,
+ },
+
+ {
+ .name = "Highmem read",
+ .run = mmc_test_no_highmem,
+ },
+
+ {
+ .name = "Multi-block highmem write",
+ .run = mmc_test_no_highmem,
+ },
+
+ {
+ .name = "Multi-block highmem read",
+ .run = mmc_test_no_highmem,
+ },
+
#endif /* CONFIG_HIGHMEM */
+ {
+ .name = "Best-case read performance",
+ .prepare = mmc_test_area_prepare_fill,
+ .run = mmc_test_best_read_performance,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Best-case write performance",
+ .prepare = mmc_test_area_prepare_erase,
+ .run = mmc_test_best_write_performance,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Best-case read performance into scattered pages",
+ .prepare = mmc_test_area_prepare_fill,
+ .run = mmc_test_best_read_perf_max_scatter,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Best-case write performance from scattered pages",
+ .prepare = mmc_test_area_prepare_erase,
+ .run = mmc_test_best_write_perf_max_scatter,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Single read performance by transfer size",
+ .prepare = mmc_test_area_prepare_fill,
+ .run = mmc_test_profile_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Single write performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Single trim performance by transfer size",
+ .prepare = mmc_test_area_prepare_fill,
+ .run = mmc_test_profile_trim_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Consecutive read performance by transfer size",
+ .prepare = mmc_test_area_prepare_fill,
+ .run = mmc_test_profile_seq_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Consecutive write performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_seq_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Consecutive trim performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_seq_trim_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
};
static DEFINE_MUTEX(mmc_test_lock);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index d6ded247d94..e876678176b 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -30,9 +30,9 @@
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
/*
- * We only like normal block requests.
+ * We only like normal block requests and discards.
*/
- if (!blk_fs_request(req)) {
+ if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
blk_dump_rq_flags(req, "MMC bad request");
return BLKPREP_KILL;
}
@@ -128,8 +128,23 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
mq->req = NULL;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
- blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
+ blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+ if (mmc_can_erase(card)) {
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
+ mq->queue->limits.max_discard_sectors = UINT_MAX;
+ if (card->erased_byte == 0)
+ mq->queue->limits.discard_zeroes_data = 1;
+ if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
+ mq->queue->limits.discard_granularity =
+ card->erase_size << 9;
+ mq->queue->limits.discard_alignment =
+ card->erase_size << 9;
+ }
+ if (mmc_can_secure_erase_trim(card))
+ queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
+ mq->queue);
+ }
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_hw_segs == 1) {
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 49d9dcaeca4..7cd9749dc21 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -37,6 +37,8 @@ static ssize_t mmc_type_show(struct device *dev,
return sprintf(buf, "SD\n");
case MMC_TYPE_SDIO:
return sprintf(buf, "SDIO\n");
+ case MMC_TYPE_SD_COMBO:
+ return sprintf(buf, "SDcombo\n");
default:
return -EFAULT;
}
@@ -74,6 +76,9 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
case MMC_TYPE_SDIO:
type = "SDIO";
break;
+ case MMC_TYPE_SD_COMBO:
+ type = "SDcombo";
+ break;
default:
type = NULL;
}
@@ -239,6 +244,10 @@ int mmc_add_card(struct mmc_card *card)
case MMC_TYPE_SDIO:
type = "SDIO";
break;
+ case MMC_TYPE_SD_COMBO:
+ type = "SD-combo";
+ if (mmc_card_blockaddr(card))
+ type = "SDHC-combo";
default:
type = "?";
break;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 569e94da844..5db49b124ff 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1050,6 +1050,352 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
EXPORT_SYMBOL(mmc_detect_change);
+void mmc_init_erase(struct mmc_card *card)
+{
+ unsigned int sz;
+
+ if (is_power_of_2(card->erase_size))
+ card->erase_shift = ffs(card->erase_size) - 1;
+ else
+ card->erase_shift = 0;
+
+ /*
+ * It is possible to erase an arbitrarily large area of an SD or MMC
+ * card. That is not desirable because it can take a long time
+ * (minutes) potentially delaying more important I/O, and also the
+ * timeout calculations become increasingly hugely over-estimated.
+ * Consequently, 'pref_erase' is defined as a guide to limit erases
+ * to that size and alignment.
+ *
+ * For SD cards that define Allocation Unit size, limit erases to one
+ * Allocation Unit at a time. For MMC cards that define High Capacity
+ * Erase Size, whether it is switched on or not, limit to that size.
+ * Otherwise just have a stab at a good value. For modern cards it
+ * will end up being 4MiB. Note that if the value is too small, it
+ * can end up taking longer to erase.
+ */
+ if (mmc_card_sd(card) && card->ssr.au) {
+ card->pref_erase = card->ssr.au;
+ card->erase_shift = ffs(card->ssr.au) - 1;
+ } else if (card->ext_csd.hc_erase_size) {
+ card->pref_erase = card->ext_csd.hc_erase_size;
+ } else {
+ sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
+ if (sz < 128)
+ card->pref_erase = 512 * 1024 / 512;
+ else if (sz < 512)
+ card->pref_erase = 1024 * 1024 / 512;
+ else if (sz < 1024)
+ card->pref_erase = 2 * 1024 * 1024 / 512;
+ else
+ card->pref_erase = 4 * 1024 * 1024 / 512;
+ if (card->pref_erase < card->erase_size)
+ card->pref_erase = card->erase_size;
+ else {
+ sz = card->pref_erase % card->erase_size;
+ if (sz)
+ card->pref_erase += card->erase_size - sz;
+ }
+ }
+}
+
+static void mmc_set_mmc_erase_timeout(struct mmc_card *card,
+ struct mmc_command *cmd,
+ unsigned int arg, unsigned int qty)
+{
+ unsigned int erase_timeout;
+
+ if (card->ext_csd.erase_group_def & 1) {
+ /* High Capacity Erase Group Size uses HC timeouts */
+ if (arg == MMC_TRIM_ARG)
+ erase_timeout = card->ext_csd.trim_timeout;
+ else
+ erase_timeout = card->ext_csd.hc_erase_timeout;
+ } else {
+ /* CSD Erase Group Size uses write timeout */
+ unsigned int mult = (10 << card->csd.r2w_factor);
+ unsigned int timeout_clks = card->csd.tacc_clks * mult;
+ unsigned int timeout_us;
+
+ /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
+ if (card->csd.tacc_ns < 1000000)
+ timeout_us = (card->csd.tacc_ns * mult) / 1000;
+ else
+ timeout_us = (card->csd.tacc_ns / 1000) * mult;
+
+ /*
+ * ios.clock is only a target. The real clock rate might be
+ * less but not that much less, so fudge it by multiplying by 2.
+ */
+ timeout_clks <<= 1;
+ timeout_us += (timeout_clks * 1000) /
+ (card->host->ios.clock / 1000);
+
+ erase_timeout = timeout_us / 1000;
+
+ /*
+ * Theoretically, the calculation could underflow so round up
+ * to 1ms in that case.
+ */
+ if (!erase_timeout)
+ erase_timeout = 1;
+ }
+
+ /* Multiplier for secure operations */
+ if (arg & MMC_SECURE_ARGS) {
+ if (arg == MMC_SECURE_ERASE_ARG)
+ erase_timeout *= card->ext_csd.sec_erase_mult;
+ else
+ erase_timeout *= card->ext_csd.sec_trim_mult;
+ }
+
+ erase_timeout *= qty;
+
+ /*
+ * Ensure at least a 1 second timeout for SPI as per
+ * 'mmc_set_data_timeout()'
+ */
+ if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
+ erase_timeout = 1000;
+
+ cmd->erase_timeout = erase_timeout;
+}
+
+static void mmc_set_sd_erase_timeout(struct mmc_card *card,
+ struct mmc_command *cmd, unsigned int arg,
+ unsigned int qty)
+{
+ if (card->ssr.erase_timeout) {
+ /* Erase timeout specified in SD Status Register (SSR) */
+ cmd->erase_timeout = card->ssr.erase_timeout * qty +
+ card->ssr.erase_offset;
+ } else {
+ /*
+ * Erase timeout not specified in SD Status Register (SSR) so
+ * use 250ms per write block.
+ */
+ cmd->erase_timeout = 250 * qty;
+ }
+
+ /* Must not be less than 1 second */
+ if (cmd->erase_timeout < 1000)
+ cmd->erase_timeout = 1000;
+}
+
+static void mmc_set_erase_timeout(struct mmc_card *card,
+ struct mmc_command *cmd, unsigned int arg,
+ unsigned int qty)
+{
+ if (mmc_card_sd(card))
+ mmc_set_sd_erase_timeout(card, cmd, arg, qty);
+ else
+ mmc_set_mmc_erase_timeout(card, cmd, arg, qty);
+}
+
+static int mmc_do_erase(struct mmc_card *card, unsigned int from,
+ unsigned int to, unsigned int arg)
+{
+ struct mmc_command cmd;
+ unsigned int qty = 0;
+ int err;
+
+ /*
+ * qty is used to calculate the erase timeout which depends on how many
+ * erase groups (or allocation units in SD terminology) are affected.
+ * We count erasing part of an erase group as one erase group.
+ * For SD, the allocation units are always a power of 2. For MMC, the
+ * erase group size is almost certainly also power of 2, but it does not
+ * seem to insist on that in the JEDEC standard, so we fall back to
+ * division in that case. SD may not specify an allocation unit size,
+ * in which case the timeout is based on the number of write blocks.
+ *
+ * Note that the timeout for secure trim 2 will only be correct if the
+ * number of erase groups specified is the same as the total of all
+ * preceding secure trim 1 commands. Since the power may have been
+ * lost since the secure trim 1 commands occurred, it is generally
+ * impossible to calculate the secure trim 2 timeout correctly.
+ */
+ if (card->erase_shift)
+ qty += ((to >> card->erase_shift) -
+ (from >> card->erase_shift)) + 1;
+ else if (mmc_card_sd(card))
+ qty += to - from + 1;
+ else
+ qty += ((to / card->erase_size) -
+ (from / card->erase_size)) + 1;
+
+ if (!mmc_card_blockaddr(card)) {
+ from <<= 9;
+ to <<= 9;
+ }
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ if (mmc_card_sd(card))
+ cmd.opcode = SD_ERASE_WR_BLK_START;
+ else
+ cmd.opcode = MMC_ERASE_GROUP_START;
+ cmd.arg = from;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err) {
+ printk(KERN_ERR "mmc_erase: group start error %d, "
+ "status %#x\n", err, cmd.resp[0]);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ if (mmc_card_sd(card))
+ cmd.opcode = SD_ERASE_WR_BLK_END;
+ else
+ cmd.opcode = MMC_ERASE_GROUP_END;
+ cmd.arg = to;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err) {
+ printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n",
+ err, cmd.resp[0]);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ cmd.opcode = MMC_ERASE;
+ cmd.arg = arg;
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ mmc_set_erase_timeout(card, &cmd, arg, qty);
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err) {
+ printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n",
+ err, cmd.resp[0]);
+ err = -EIO;
+ goto out;
+ }
+
+ if (mmc_host_is_spi(card->host))
+ goto out;
+
+ do {
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ cmd.opcode = MMC_SEND_STATUS;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ /* Do not retry else we can't see errors */
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err || (cmd.resp[0] & 0xFDF92000)) {
+ printk(KERN_ERR "error %d requesting status %#x\n",
+ err, cmd.resp[0]);
+ err = -EIO;
+ goto out;
+ }
+ } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
+ R1_CURRENT_STATE(cmd.resp[0]) == 7);
+out:
+ return err;
+}
+
+/**
+ * mmc_erase - erase sectors.
+ * @card: card to erase
+ * @from: first sector to erase
+ * @nr: number of sectors to erase
+ * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
+ *
+ * Caller must claim host before calling this function.
+ */
+int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
+ unsigned int arg)
+{
+ unsigned int rem, to = from + nr;
+
+ if (!(card->host->caps & MMC_CAP_ERASE) ||
+ !(card->csd.cmdclass & CCC_ERASE))
+ return -EOPNOTSUPP;
+
+ if (!card->erase_size)
+ return -EOPNOTSUPP;
+
+ if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
+ return -EOPNOTSUPP;
+
+ if ((arg & MMC_SECURE_ARGS) &&
+ !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
+ return -EOPNOTSUPP;
+
+ if ((arg & MMC_TRIM_ARGS) &&
+ !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
+ return -EOPNOTSUPP;
+
+ if (arg == MMC_SECURE_ERASE_ARG) {
+ if (from % card->erase_size || nr % card->erase_size)
+ return -EINVAL;
+ }
+
+ if (arg == MMC_ERASE_ARG) {
+ rem = from % card->erase_size;
+ if (rem) {
+ rem = card->erase_size - rem;
+ from += rem;
+ if (nr > rem)
+ nr -= rem;
+ else
+ return 0;
+ }
+ rem = nr % card->erase_size;
+ if (rem)
+ nr -= rem;
+ }
+
+ if (nr == 0)
+ return 0;
+
+ to = from + nr;
+
+ if (to <= from)
+ return -EINVAL;
+
+ /* 'from' and 'to' are inclusive */
+ to -= 1;
+
+ return mmc_do_erase(card, from, to, arg);
+}
+EXPORT_SYMBOL(mmc_erase);
+
+int mmc_can_erase(struct mmc_card *card)
+{
+ if ((card->host->caps & MMC_CAP_ERASE) &&
+ (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_erase);
+
+int mmc_can_trim(struct mmc_card *card)
+{
+ if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_trim);
+
+int mmc_can_secure_erase_trim(struct mmc_card *card)
+{
+ if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_secure_erase_trim);
+
+int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
+ unsigned int nr)
+{
+ if (!card->erase_size)
+ return 0;
+ if (from % card->erase_size || nr % card->erase_size)
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL(mmc_erase_group_aligned);
void mmc_rescan(struct work_struct *work)
{
@@ -1057,6 +1403,17 @@ void mmc_rescan(struct work_struct *work)
container_of(work, struct mmc_host, detect.work);
u32 ocr;
int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->rescan_disable) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
mmc_bus_get(host);
@@ -1099,8 +1456,15 @@ void mmc_rescan(struct work_struct *work)
*/
err = mmc_send_io_op_cond(host, 0, &ocr);
if (!err) {
- if (mmc_attach_sdio(host, ocr))
- mmc_power_off(host);
+ if (mmc_attach_sdio(host, ocr)) {
+ mmc_claim_host(host);
+ /* try SDMEM (but not MMC) even if SDIO is broken */
+ if (mmc_send_app_op_cond(host, 0, &ocr))
+ goto out_fail;
+
+ if (mmc_attach_sd(host, ocr))
+ mmc_power_off(host);
+ }
goto out;
}
@@ -1124,6 +1488,7 @@ void mmc_rescan(struct work_struct *work)
goto out;
}
+out_fail:
mmc_release_host(host);
mmc_power_off(host);
@@ -1266,19 +1631,6 @@ int mmc_suspend_host(struct mmc_host *host)
if (host->bus_ops && !host->bus_dead) {
if (host->bus_ops->suspend)
err = host->bus_ops->suspend(host);
- if (err == -ENOSYS || !host->bus_ops->resume) {
- /*
- * We simply "remove" the card in this case.
- * It will be redetected on resume.
- */
- if (host->bus_ops->remove)
- host->bus_ops->remove(host);
- mmc_claim_host(host);
- mmc_detach_bus(host);
- mmc_release_host(host);
- host->pm_flags = 0;
- err = 0;
- }
}
mmc_bus_put(host);
@@ -1310,28 +1662,61 @@ int mmc_resume_host(struct mmc_host *host)
printk(KERN_WARNING "%s: error %d during resume "
"(card was removed?)\n",
mmc_hostname(host), err);
- if (host->bus_ops->remove)
- host->bus_ops->remove(host);
- mmc_claim_host(host);
- mmc_detach_bus(host);
- mmc_release_host(host);
- /* no need to bother upper layers */
err = 0;
}
}
mmc_bus_put(host);
- /*
- * We add a slight delay here so that resume can progress
- * in parallel.
- */
- mmc_detect_change(host, 1);
-
return err;
}
-
EXPORT_SYMBOL(mmc_resume_host);
+/* Do the card removal on suspend if card is assumed removeable
+ * Do that in pm notifier while userspace isn't yet frozen, so we will be able
+ to sync the card.
+*/
+int mmc_pm_notify(struct notifier_block *notify_block,
+ unsigned long mode, void *unused)
+{
+ struct mmc_host *host = container_of(
+ notify_block, struct mmc_host, pm_notify);
+ unsigned long flags;
+
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->rescan_disable = 1;
+ spin_unlock_irqrestore(&host->lock, flags);
+ cancel_delayed_work_sync(&host->detect);
+
+ if (!host->bus_ops || host->bus_ops->suspend)
+ break;
+
+ mmc_claim_host(host);
+
+ if (host->bus_ops->remove)
+ host->bus_ops->remove(host);
+
+ mmc_detach_bus(host);
+ mmc_release_host(host);
+ host->pm_flags = 0;
+ break;
+
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->rescan_disable = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
+ mmc_detect_change(host, 0);
+
+ }
+
+ return 0;
+}
#endif
static int __init mmc_init(void)
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index a811c52a165..9d9eef50e5d 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -29,6 +29,8 @@ struct mmc_bus_ops {
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
void mmc_detach_bus(struct mmc_host *host);
+void mmc_init_erase(struct mmc_card *card);
+
void mmc_set_chip_select(struct mmc_host *host, int mode);
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 47353909e34..d80cfdc8edd 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -17,6 +17,7 @@
#include <linux/pagemap.h>
#include <linux/leds.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <linux/mmc/host.h>
@@ -85,6 +86,9 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
+#ifdef CONFIG_PM
+ host->pm_notify.notifier_call = mmc_pm_notify;
+#endif
/*
* By default, hosts do not support SGIO or large requests.
@@ -133,6 +137,7 @@ int mmc_add_host(struct mmc_host *host)
#endif
mmc_start_host(host);
+ register_pm_notifier(&host->pm_notify);
return 0;
}
@@ -149,6 +154,7 @@ EXPORT_SYMBOL(mmc_add_host);
*/
void mmc_remove_host(struct mmc_host *host)
{
+ unregister_pm_notifier(&host->pm_notify);
mmc_stop_host(host);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 89f7a25b7ac..6909a54c39b 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -108,23 +108,34 @@ static int mmc_decode_cid(struct mmc_card *card)
return 0;
}
+static void mmc_set_erase_size(struct mmc_card *card)
+{
+ if (card->ext_csd.erase_group_def & 1)
+ card->erase_size = card->ext_csd.hc_erase_size;
+ else
+ card->erase_size = card->csd.erase_size;
+
+ mmc_init_erase(card);
+}
+
/*
* Given a 128-bit response, decode to our card CSD structure.
*/
static int mmc_decode_csd(struct mmc_card *card)
{
struct mmc_csd *csd = &card->csd;
- unsigned int e, m, csd_struct;
+ unsigned int e, m, a, b;
u32 *resp = card->raw_csd;
/*
* We only understand CSD structure v1.1 and v1.2.
* v1.2 has extra information in bits 15, 11 and 10.
+ * We also support eMMC v4.4 & v4.41.
*/
- csd_struct = UNSTUFF_BITS(resp, 126, 2);
- if (csd_struct != 1 && csd_struct != 2) {
+ csd->structure = UNSTUFF_BITS(resp, 126, 2);
+ if (csd->structure == 0) {
printk(KERN_ERR "%s: unrecognised CSD structure version %d\n",
- mmc_hostname(card->host), csd_struct);
+ mmc_hostname(card->host), csd->structure);
return -EINVAL;
}
@@ -151,6 +162,13 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
+ if (csd->write_blkbits >= 9) {
+ a = UNSTUFF_BITS(resp, 42, 5);
+ b = UNSTUFF_BITS(resp, 37, 5);
+ csd->erase_size = (a + 1) * (b + 1);
+ csd->erase_size <<= csd->write_blkbits - 9;
+ }
+
return 0;
}
@@ -207,11 +225,22 @@ static int mmc_read_ext_csd(struct mmc_card *card)
goto out;
}
+ /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
+ if (card->csd.structure == 3) {
+ int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
+ if (ext_csd_struct > 2) {
+ printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
+ "version %d\n", mmc_hostname(card->host),
+ ext_csd_struct);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
card->ext_csd.rev = ext_csd[EXT_CSD_REV];
if (card->ext_csd.rev > 5) {
- printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
- "version %d\n", mmc_hostname(card->host),
- card->ext_csd.rev);
+ printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
+ mmc_hostname(card->host), card->ext_csd.rev);
err = -EINVAL;
goto out;
}
@@ -222,7 +251,9 @@ static int mmc_read_ext_csd(struct mmc_card *card)
ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
- if (card->ext_csd.sectors)
+
+ /* Cards with density > 2GiB are sector addressed */
+ if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
mmc_card_set_blockaddr(card);
}
@@ -247,8 +278,30 @@ static int mmc_read_ext_csd(struct mmc_card *card)
if (sa_shift > 0 && sa_shift <= 0x17)
card->ext_csd.sa_timeout =
1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
+ card->ext_csd.erase_group_def =
+ ext_csd[EXT_CSD_ERASE_GROUP_DEF];
+ card->ext_csd.hc_erase_timeout = 300 *
+ ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
+ card->ext_csd.hc_erase_size =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
}
+ if (card->ext_csd.rev >= 4) {
+ card->ext_csd.sec_trim_mult =
+ ext_csd[EXT_CSD_SEC_TRIM_MULT];
+ card->ext_csd.sec_erase_mult =
+ ext_csd[EXT_CSD_SEC_ERASE_MULT];
+ card->ext_csd.sec_feature_support =
+ ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+ card->ext_csd.trim_timeout = 300 *
+ ext_csd[EXT_CSD_TRIM_MULT];
+ }
+
+ if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
+ card->erased_byte = 0xFF;
+ else
+ card->erased_byte = 0x0;
+
out:
kfree(ext_csd);
@@ -260,6 +313,8 @@ MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
card->raw_csd[2], card->raw_csd[3]);
MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
+MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
+MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
@@ -271,6 +326,8 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_cid.attr,
&dev_attr_csd.attr,
&dev_attr_date.attr,
+ &dev_attr_erase_size.attr,
+ &dev_attr_preferred_erase_size.attr,
&dev_attr_fwrev.attr,
&dev_attr_hwrev.attr,
&dev_attr_manfid.attr,
@@ -407,6 +464,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_read_ext_csd(card);
if (err)
goto free_card;
+ /* Erase size depends on CSD and Extended CSD */
+ mmc_set_erase_size(card);
}
/*
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 5eac21df480..0f524108555 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -59,7 +59,7 @@ static const unsigned int tacc_mant[] = {
/*
* Given the decoded CSD structure, decode the raw CID to our CID structure.
*/
-static void mmc_decode_cid(struct mmc_card *card)
+void mmc_decode_cid(struct mmc_card *card)
{
u32 *resp = card->raw_cid;
@@ -119,6 +119,13 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
+
+ if (UNSTUFF_BITS(resp, 46, 1)) {
+ csd->erase_size = 1;
+ } else if (csd->write_blkbits >= 9) {
+ csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
+ csd->erase_size <<= csd->write_blkbits - 9;
+ }
break;
case 1:
/*
@@ -147,6 +154,7 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->r2w_factor = 4; /* Unused */
csd->write_blkbits = 9;
csd->write_partial = 0;
+ csd->erase_size = 1;
break;
default:
printk(KERN_ERR "%s: unrecognised CSD structure version %d\n",
@@ -154,6 +162,8 @@ static int mmc_decode_csd(struct mmc_card *card)
return -EINVAL;
}
+ card->erase_size = csd->erase_size;
+
return 0;
}
@@ -179,10 +189,68 @@ static int mmc_decode_scr(struct mmc_card *card)
scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);
scr->bus_widths = UNSTUFF_BITS(resp, 48, 4);
+ if (UNSTUFF_BITS(resp, 55, 1))
+ card->erased_byte = 0xFF;
+ else
+ card->erased_byte = 0x0;
+
return 0;
}
/*
+ * Fetch and process SD Status register.
+ */
+static int mmc_read_ssr(struct mmc_card *card)
+{
+ unsigned int au, es, et, eo;
+ int err, i;
+ u32 *ssr;
+
+ if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
+ printk(KERN_WARNING "%s: card lacks mandatory SD Status "
+ "function.\n", mmc_hostname(card->host));
+ return 0;
+ }
+
+ ssr = kmalloc(64, GFP_KERNEL);
+ if (!ssr)
+ return -ENOMEM;
+
+ err = mmc_app_sd_status(card, ssr);
+ if (err) {
+ printk(KERN_WARNING "%s: problem reading SD Status "
+ "register.\n", mmc_hostname(card->host));
+ err = 0;
+ goto out;
+ }
+
+ for (i = 0; i < 16; i++)
+ ssr[i] = be32_to_cpu(ssr[i]);
+
+ /*
+ * UNSTUFF_BITS only works with four u32s so we have to offset the
+ * bitfield positions accordingly.
+ */
+ au = UNSTUFF_BITS(ssr, 428 - 384, 4);
+ if (au > 0 || au <= 9) {
+ card->ssr.au = 1 << (au + 4);
+ es = UNSTUFF_BITS(ssr, 408 - 384, 16);
+ et = UNSTUFF_BITS(ssr, 402 - 384, 6);
+ eo = UNSTUFF_BITS(ssr, 400 - 384, 2);
+ if (es && et) {
+ card->ssr.erase_timeout = (et * 1000) / es;
+ card->ssr.erase_offset = eo * 1000;
+ }
+ } else {
+ printk(KERN_WARNING "%s: SD Status: Invalid Allocation Unit "
+ "size.\n", mmc_hostname(card->host));
+ }
+out:
+ kfree(ssr);
+ return err;
+}
+
+/*
* Fetches and decodes switch information
*/
static int mmc_read_switch(struct mmc_card *card)
@@ -238,7 +306,7 @@ out:
/*
* Test if the card supports high-speed mode and, if so, switch to it.
*/
-static int mmc_switch_hs(struct mmc_card *card)
+int mmc_sd_switch_hs(struct mmc_card *card)
{
int err;
u8 *status;
@@ -272,9 +340,9 @@ static int mmc_switch_hs(struct mmc_card *card)
printk(KERN_WARNING "%s: Problem switching card "
"into high-speed mode!\n",
mmc_hostname(card->host));
+ err = 0;
} else {
- mmc_card_set_highspeed(card);
- mmc_set_timing(card->host, MMC_TIMING_SD_HS);
+ err = 1;
}
out:
@@ -289,6 +357,8 @@ MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
card->raw_csd[2], card->raw_csd[3]);
MMC_DEV_ATTR(scr, "%08x%08x\n", card->raw_scr[0], card->raw_scr[1]);
MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
+MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
+MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
@@ -302,6 +372,8 @@ static struct attribute *sd_std_attrs[] = {
&dev_attr_csd.attr,
&dev_attr_scr.attr,
&dev_attr_date.attr,
+ &dev_attr_erase_size.attr,
+ &dev_attr_preferred_erase_size.attr,
&dev_attr_fwrev.attr,
&dev_attr_hwrev.attr,
&dev_attr_manfid.attr,
@@ -320,26 +392,16 @@ static const struct attribute_group *sd_attr_groups[] = {
NULL,
};
-static struct device_type sd_type = {
+struct device_type sd_type = {
.groups = sd_attr_groups,
};
/*
- * Handle the detection and initialisation of a card.
- *
- * In the case of a resume, "oldcard" will contain the card
- * we're trying to reinitialise.
+ * Fetch CID from card.
*/
-static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
- struct mmc_card *oldcard)
+int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid)
{
- struct mmc_card *card;
int err;
- u32 cid[4];
- unsigned int max_dtr;
-
- BUG_ON(!host);
- WARN_ON(!host->claimed);
/*
* Since we're changing the OCR value, we seem to
@@ -361,92 +423,67 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
err = mmc_send_app_op_cond(host, ocr, NULL);
if (err)
- goto err;
+ return err;
- /*
- * Fetch CID from card.
- */
if (mmc_host_is_spi(host))
err = mmc_send_cid(host, cid);
else
err = mmc_all_send_cid(host, cid);
- if (err)
- goto err;
- if (oldcard) {
- if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
- err = -ENOENT;
- goto err;
- }
-
- card = oldcard;
- } else {
- /*
- * Allocate card structure.
- */
- card = mmc_alloc_card(host, &sd_type);
- if (IS_ERR(card)) {
- err = PTR_ERR(card);
- goto err;
- }
+ return err;
+}
- card->type = MMC_TYPE_SD;
- memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
- }
+int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card)
+{
+ int err;
/*
- * For native busses: get card RCA and quit open drain mode.
+ * Fetch CSD from card.
*/
- if (!mmc_host_is_spi(host)) {
- err = mmc_send_relative_addr(host, &card->rca);
- if (err)
- goto free_card;
+ err = mmc_send_csd(card, card->raw_csd);
+ if (err)
+ return err;
- mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
- }
+ err = mmc_decode_csd(card);
+ if (err)
+ return err;
- if (!oldcard) {
+ return 0;
+}
+
+int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
+ bool reinit)
+{
+ int err;
+
+ if (!reinit) {
/*
- * Fetch CSD from card.
+ * Fetch SCR from card.
*/
- err = mmc_send_csd(card, card->raw_csd);
- if (err)
- goto free_card;
-
- err = mmc_decode_csd(card);
+ err = mmc_app_send_scr(card, card->raw_scr);
if (err)
- goto free_card;
-
- mmc_decode_cid(card);
- }
+ return err;
- /*
- * Select card, as all following commands rely on that.
- */
- if (!mmc_host_is_spi(host)) {
- err = mmc_select_card(card);
+ err = mmc_decode_scr(card);
if (err)
- goto free_card;
- }
+ return err;
- if (!oldcard) {
/*
- * Fetch SCR from card.
+ * Fetch and process SD Status register.
*/
- err = mmc_app_send_scr(card, card->raw_scr);
+ err = mmc_read_ssr(card);
if (err)
- goto free_card;
+ return err;
- err = mmc_decode_scr(card);
- if (err < 0)
- goto free_card;
+ /* Erase init depends on CSD and SSR */
+ mmc_init_erase(card);
/*
* Fetch switch information from card.
*/
err = mmc_read_switch(card);
if (err)
- goto free_card;
+ return err;
}
/*
@@ -458,20 +495,34 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (mmc_host_is_spi(host)) {
err = mmc_spi_set_crc(host, use_spi_crc);
if (err)
- goto free_card;
+ return err;
}
/*
- * Attempt to change to high-speed (if supported)
+ * Check if read-only switch is active.
*/
- err = mmc_switch_hs(card);
- if (err)
- goto free_card;
+ if (!reinit) {
+ int ro = -1;
- /*
- * Compute bus speed.
- */
- max_dtr = (unsigned int)-1;
+ if (host->ops->get_ro)
+ ro = host->ops->get_ro(host);
+
+ if (ro < 0) {
+ printk(KERN_WARNING "%s: host does not "
+ "support reading read-only "
+ "switch. assuming write-enable.\n",
+ mmc_hostname(host));
+ } else if (ro > 0) {
+ mmc_card_set_readonly(card);
+ }
+ }
+
+ return 0;
+}
+
+unsigned mmc_sd_get_max_clock(struct mmc_card *card)
+{
+ unsigned max_dtr = (unsigned int)-1;
if (mmc_card_highspeed(card)) {
if (max_dtr > card->sw_caps.hs_max_dtr)
@@ -480,7 +531,97 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
max_dtr = card->csd.max_dtr;
}
- mmc_set_clock(host, max_dtr);
+ return max_dtr;
+}
+
+void mmc_sd_go_highspeed(struct mmc_card *card)
+{
+ mmc_card_set_highspeed(card);
+ mmc_set_timing(card->host, MMC_TIMING_SD_HS);
+}
+
+/*
+ * Handle the detection and initialisation of a card.
+ *
+ * In the case of a resume, "oldcard" will contain the card
+ * we're trying to reinitialise.
+ */
+static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
+ struct mmc_card *oldcard)
+{
+ struct mmc_card *card;
+ int err;
+ u32 cid[4];
+
+ BUG_ON(!host);
+ WARN_ON(!host->claimed);
+
+ err = mmc_sd_get_cid(host, ocr, cid);
+ if (err)
+ return err;
+
+ if (oldcard) {
+ if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0)
+ return -ENOENT;
+
+ card = oldcard;
+ } else {
+ /*
+ * Allocate card structure.
+ */
+ card = mmc_alloc_card(host, &sd_type);
+ if (IS_ERR(card))
+ return PTR_ERR(card);
+
+ card->type = MMC_TYPE_SD;
+ memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
+ }
+
+ /*
+ * For native busses: get card RCA and quit open drain mode.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_send_relative_addr(host, &card->rca);
+ if (err)
+ return err;
+
+ mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
+ }
+
+ if (!oldcard) {
+ err = mmc_sd_get_csd(host, card);
+ if (err)
+ return err;
+
+ mmc_decode_cid(card);
+ }
+
+ /*
+ * Select card, as all following commands rely on that.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ return err;
+ }
+
+ err = mmc_sd_setup_card(host, card, oldcard != NULL);
+ if (err)
+ goto free_card;
+
+ /*
+ * Attempt to change to high-speed (if supported)
+ */
+ err = mmc_sd_switch_hs(card);
+ if (err > 0)
+ mmc_sd_go_highspeed(card);
+ else if (err)
+ goto free_card;
+
+ /*
+ * Set bus speed.
+ */
+ mmc_set_clock(host, mmc_sd_get_max_clock(card));
/*
* Switch to wider bus (if supported).
@@ -494,30 +635,12 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
- /*
- * Check if read-only switch is active.
- */
- if (!oldcard) {
- if (!host->ops->get_ro || host->ops->get_ro(host) < 0) {
- printk(KERN_WARNING "%s: host does not "
- "support reading read-only "
- "switch. assuming write-enable.\n",
- mmc_hostname(host));
- } else {
- if (host->ops->get_ro(host) > 0)
- mmc_card_set_readonly(card);
- }
- }
-
- if (!oldcard)
- host->card = card;
-
+ host->card = card;
return 0;
free_card:
if (!oldcard)
mmc_remove_card(card);
-err:
return err;
}
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
new file mode 100644
index 00000000000..3d8800fa760
--- /dev/null
+++ b/drivers/mmc/core/sd.h
@@ -0,0 +1,17 @@
+#ifndef _MMC_CORE_SD_H
+#define _MMC_CORE_SD_H
+
+#include <linux/mmc/card.h>
+
+extern struct device_type sd_type;
+
+int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid);
+int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card);
+void mmc_decode_cid(struct mmc_card *card);
+int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
+ bool reinit);
+unsigned mmc_sd_get_max_clock(struct mmc_card *card);
+int mmc_sd_switch_hs(struct mmc_card *card);
+void mmc_sd_go_highspeed(struct mmc_card *card);
+
+#endif
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 63772e7e760..797cdb5887f 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -346,3 +346,51 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
return 0;
}
+int mmc_app_sd_status(struct mmc_card *card, void *ssr)
+{
+ int err;
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ struct scatterlist sg;
+
+ BUG_ON(!card);
+ BUG_ON(!card->host);
+ BUG_ON(!ssr);
+
+ /* NOTE: caller guarantees ssr is heap-allocated */
+
+ err = mmc_app_cmd(card->host, card);
+ if (err)
+ return err;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ cmd.opcode = SD_APP_SD_STATUS;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = 64;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, ssr, 64);
+
+ mmc_set_data_timeout(&data, card);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h
index 9742d8a3066..ffc2305d905 100644
--- a/drivers/mmc/core/sd_ops.h
+++ b/drivers/mmc/core/sd_ops.h
@@ -19,6 +19,7 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca);
int mmc_app_send_scr(struct mmc_card *card, u32 *scr);
int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp);
+int mmc_app_sd_status(struct mmc_card *card, void *ssr);
#endif
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index b9dee28ee7d..f332c52968b 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -18,6 +18,7 @@
#include "core.h"
#include "bus.h"
+#include "sd.h"
#include "sdio_bus.h"
#include "mmc_ops.h"
#include "sd_ops.h"
@@ -62,13 +63,19 @@ static int sdio_init_func(struct mmc_card *card, unsigned int fn)
func->num = fn;
- ret = sdio_read_fbr(func);
- if (ret)
- goto fail;
+ if (!(card->quirks & MMC_QUIRK_NONSTD_SDIO)) {
+ ret = sdio_read_fbr(func);
+ if (ret)
+ goto fail;
- ret = sdio_read_func_cis(func);
- if (ret)
- goto fail;
+ ret = sdio_read_func_cis(func);
+ if (ret)
+ goto fail;
+ } else {
+ func->vendor = func->card->cis.vendor;
+ func->device = func->card->cis.device;
+ func->max_blksize = func->card->cis.blksize;
+ }
card->sdio_func[fn - 1] = func;
@@ -159,9 +166,7 @@ static int sdio_enable_wide(struct mmc_card *card)
if (ret)
return ret;
- mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
-
- return 0;
+ return 1;
}
/*
@@ -221,10 +226,34 @@ static int sdio_disable_wide(struct mmc_card *card)
return 0;
}
+
+static int sdio_enable_4bit_bus(struct mmc_card *card)
+{
+ int err;
+
+ if (card->type == MMC_TYPE_SDIO)
+ return sdio_enable_wide(card);
+
+ if ((card->host->caps & MMC_CAP_4_BIT_DATA) &&
+ (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
+ err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
+ if (err)
+ return err;
+ } else
+ return 0;
+
+ err = sdio_enable_wide(card);
+ if (err <= 0)
+ mmc_app_set_bus_width(card, MMC_BUS_WIDTH_1);
+
+ return err;
+}
+
+
/*
* Test if the card supports high-speed mode and, if so, switch to it.
*/
-static int sdio_enable_hs(struct mmc_card *card)
+static int mmc_sdio_switch_hs(struct mmc_card *card, int enable)
{
int ret;
u8 speed;
@@ -239,16 +268,56 @@ static int sdio_enable_hs(struct mmc_card *card)
if (ret)
return ret;
- speed |= SDIO_SPEED_EHS;
+ if (enable)
+ speed |= SDIO_SPEED_EHS;
+ else
+ speed &= ~SDIO_SPEED_EHS;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
if (ret)
return ret;
- mmc_card_set_highspeed(card);
- mmc_set_timing(card->host, MMC_TIMING_SD_HS);
+ return 1;
+}
- return 0;
+/*
+ * Enable SDIO/combo card's high-speed mode. Return 0/1 if [not]supported.
+ */
+static int sdio_enable_hs(struct mmc_card *card)
+{
+ int ret;
+
+ ret = mmc_sdio_switch_hs(card, true);
+ if (ret <= 0 || card->type == MMC_TYPE_SDIO)
+ return ret;
+
+ ret = mmc_sd_switch_hs(card);
+ if (ret <= 0)
+ mmc_sdio_switch_hs(card, false);
+
+ return ret;
+}
+
+static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
+{
+ unsigned max_dtr;
+
+ if (mmc_card_highspeed(card)) {
+ /*
+ * The SDIO specification doesn't mention how
+ * the CIS transfer speed register relates to
+ * high-speed, but it seems that 50 MHz is
+ * mandatory.
+ */
+ max_dtr = 50000000;
+ } else {
+ max_dtr = card->cis.max_dtr;
+ }
+
+ if (card->type == MMC_TYPE_SD_COMBO)
+ max_dtr = min(max_dtr, mmc_sd_get_max_clock(card));
+
+ return max_dtr;
}
/*
@@ -293,7 +362,23 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
goto err;
}
- card->type = MMC_TYPE_SDIO;
+ if (ocr & R4_MEMORY_PRESENT
+ && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
+ card->type = MMC_TYPE_SD_COMBO;
+
+ if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
+ memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
+ mmc_remove_card(card);
+ return -ENOENT;
+ }
+ } else {
+ card->type = MMC_TYPE_SDIO;
+
+ if (oldcard && oldcard->type != MMC_TYPE_SDIO) {
+ mmc_remove_card(card);
+ return -ENOENT;
+ }
+ }
/*
* Call the optional HC's init_card function to handle quirks.
@@ -313,6 +398,17 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * Read CSD, before selecting the card
+ */
+ if (!oldcard && card->type == MMC_TYPE_SD_COMBO) {
+ err = mmc_sd_get_csd(host, card);
+ if (err)
+ return err;
+
+ mmc_decode_cid(card);
+ }
+
+ /*
* Select card, as all following commands rely on that.
*/
if (!powered_resume && !mmc_host_is_spi(host)) {
@@ -321,6 +417,23 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
goto remove;
}
+ if (card->quirks & MMC_QUIRK_NONSTD_SDIO) {
+ /*
+ * This is non-standard SDIO device, meaning it doesn't
+ * have any CIA (Common I/O area) registers present.
+ * It's host's responsibility to fill cccr and cis
+ * structures in init_card().
+ */
+ mmc_set_clock(host, card->cis.max_dtr);
+
+ if (card->cccr.high_speed) {
+ mmc_card_set_highspeed(card);
+ mmc_set_timing(card->host, MMC_TIMING_SD_HS);
+ }
+
+ goto finish;
+ }
+
/*
* Read the common registers.
*/
@@ -339,43 +452,57 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
int same = (card->cis.vendor == oldcard->cis.vendor &&
card->cis.device == oldcard->cis.device);
mmc_remove_card(card);
- if (!same) {
- err = -ENOENT;
- goto err;
- }
+ if (!same)
+ return -ENOENT;
+
card = oldcard;
return 0;
}
+ if (card->type == MMC_TYPE_SD_COMBO) {
+ err = mmc_sd_setup_card(host, card, oldcard != NULL);
+ /* handle as SDIO-only card if memory init failed */
+ if (err) {
+ mmc_go_idle(host);
+ if (mmc_host_is_spi(host))
+ /* should not fail, as it worked previously */
+ mmc_spi_set_crc(host, use_spi_crc);
+ card->type = MMC_TYPE_SDIO;
+ } else
+ card->dev.type = &sd_type;
+ }
+
+ /*
+ * If needed, disconnect card detection pull-up resistor.
+ */
+ err = sdio_disable_cd(card);
+ if (err)
+ goto remove;
+
/*
* Switch to high-speed (if supported).
*/
err = sdio_enable_hs(card);
- if (err)
+ if (err > 0)
+ mmc_sd_go_highspeed(card);
+ else if (err)
goto remove;
/*
* Change to the card's maximum speed.
*/
- if (mmc_card_highspeed(card)) {
- /*
- * The SDIO specification doesn't mention how
- * the CIS transfer speed register relates to
- * high-speed, but it seems that 50 MHz is
- * mandatory.
- */
- mmc_set_clock(host, 50000000);
- } else {
- mmc_set_clock(host, card->cis.max_dtr);
- }
+ mmc_set_clock(host, mmc_sdio_get_max_clock(card));
/*
* Switch to wider bus (if supported).
*/
- err = sdio_enable_wide(card);
- if (err)
+ err = sdio_enable_4bit_bus(card);
+ if (err > 0)
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+ else if (err)
goto remove;
+finish:
if (!oldcard)
host->card = card;
return 0;
@@ -487,9 +614,14 @@ static int mmc_sdio_resume(struct mmc_host *host)
mmc_claim_host(host);
err = mmc_sdio_init_card(host, host->ocr, host->card,
(host->pm_flags & MMC_PM_KEEP_POWER));
- if (!err)
+ if (!err) {
/* We may have switched to 1-bit mode during suspend. */
- err = sdio_enable_wide(host->card);
+ err = sdio_enable_4bit_bus(host->card);
+ if (err > 0) {
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+ err = 0;
+ }
+ }
if (!err && host->sdio_irqs)
mmc_signal_sdio_irq(host);
mmc_release_host(host);
@@ -574,13 +706,6 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
card->sdio_funcs = 0;
/*
- * If needed, disconnect card detection pull-up resistor.
- */
- err = sdio_disable_cd(card);
- if (err)
- goto remove;
-
- /*
* Initialize (but don't add) all present functions.
*/
for (i = 0; i < funcs; i++, card->sdio_funcs++) {
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index f06d06e7fdf..68d12794cfd 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -121,9 +121,18 @@ config MMC_SDHCI_PLTFM
If unsure, say N.
+config MMC_SDHCI_CNS3XXX
+ bool "SDHCI support on the Cavium Networks CNS3xxx SoC"
+ depends on ARCH_CNS3XXX
+ depends on MMC_SDHCI_PLTFM
+ help
+ This selects the SDHCI support for CNS3xxx System-on-Chip devices.
+
+ If unsure, say N.
+
config MMC_SDHCI_S3C
tristate "SDHCI support on Samsung S3C SoC"
- depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX)
+ depends on MMC_SDHCI && PLAT_SAMSUNG
help
This selects the Secure Digital Host Controller Interface (SDHCI)
often referrered to as the HSMMC block in some of the Samsung S3C
@@ -247,12 +256,13 @@ config MMC_IMX
If unsure, say N.
-config MMC_MSM7X00A
- tristate "Qualcomm MSM 7X00A SDCC Controller Support"
- depends on MMC && ARCH_MSM && !ARCH_MSM7X30
+config MMC_MSM
+ tristate "Qualcomm SDCC Controller Support"
+ depends on MMC && ARCH_MSM
help
This provides support for the SD/MMC cell found in the
- MSM 7X00A controllers from Qualcomm.
+ MSM and QSD SOCs from Qualcomm. The controller also has
+ support for SDIO devices.
config MMC_MXC
tristate "Freescale i.MX2/3 Multimedia Card Interface support"
@@ -432,3 +442,12 @@ config MMC_SH_MMCIF
This selects the MMC Host Interface controler (MMCIF).
This driver supports MMCIF in sh7724/sh7757/sh7372.
+
+config MMC_JZ4740
+ tristate "JZ4740 SD/Multimedia Card Interface support"
+ depends on MACH_JZ4740
+ help
+ This selects support for the SD/MMC controller on Ingenic JZ4740
+ SoCs.
+ If you have a board based on such a SoC and with a SD/MMC slot,
+ say Y or M here.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e30c2ee4889..840bcb52d82 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_MMC_IMX) += imxmmc.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
-obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -22,7 +21,7 @@ obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o
obj-$(CONFIG_MMC_AT91) += at91_mci.o
obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
-obj-$(CONFIG_MMC_MSM7X00A) += msm_sdcc.o
+obj-$(CONFIG_MMC_MSM) += msm_sdcc.o
obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o
obj-$(CONFIG_MMC_SPI) += mmc_spi.o
@@ -36,6 +35,11 @@ obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
+obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
+
+obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
+sdhci-platform-y := sdhci-pltfm.o
+sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 5f3a599ead0..87226cd202a 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -66,6 +66,7 @@
#include <linux/clk.h>
#include <linux/atmel_pdc.h>
#include <linux/gfp.h>
+#include <linux/highmem.h>
#include <linux/mmc/host.h>
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 9a68ff4353a..5a950b16d9e 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host)
while (delay--) {
reg = readw(host->base + MMC_REG_STATUS);
- if (reg & STATUS_CARD_BUS_CLK_RUN)
+ if (reg & STATUS_CARD_BUS_CLK_RUN) {
/* Check twice before cut */
reg = readw(host->base + MMC_REG_STATUS);
if (reg & STATUS_CARD_BUS_CLK_RUN)
return 0;
+ }
if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
return 0;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
new file mode 100644
index 00000000000..ad4f9870e3c
--- /dev/null
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -0,0 +1,1029 @@
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * JZ4740 SD/MMC controller driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/mmc/host.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/clk.h>
+
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <asm/mach-jz4740/gpio.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/mach-jz4740/jz4740_mmc.h>
+
+#define JZ_REG_MMC_STRPCL 0x00
+#define JZ_REG_MMC_STATUS 0x04
+#define JZ_REG_MMC_CLKRT 0x08
+#define JZ_REG_MMC_CMDAT 0x0C
+#define JZ_REG_MMC_RESTO 0x10
+#define JZ_REG_MMC_RDTO 0x14
+#define JZ_REG_MMC_BLKLEN 0x18
+#define JZ_REG_MMC_NOB 0x1C
+#define JZ_REG_MMC_SNOB 0x20
+#define JZ_REG_MMC_IMASK 0x24
+#define JZ_REG_MMC_IREG 0x28
+#define JZ_REG_MMC_CMD 0x2C
+#define JZ_REG_MMC_ARG 0x30
+#define JZ_REG_MMC_RESP_FIFO 0x34
+#define JZ_REG_MMC_RXFIFO 0x38
+#define JZ_REG_MMC_TXFIFO 0x3C
+
+#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
+#define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
+#define JZ_MMC_STRPCL_START_READWAIT BIT(5)
+#define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
+#define JZ_MMC_STRPCL_RESET BIT(3)
+#define JZ_MMC_STRPCL_START_OP BIT(2)
+#define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
+#define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
+#define JZ_MMC_STRPCL_CLOCK_START BIT(1)
+
+
+#define JZ_MMC_STATUS_IS_RESETTING BIT(15)
+#define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
+#define JZ_MMC_STATUS_PRG_DONE BIT(13)
+#define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
+#define JZ_MMC_STATUS_END_CMD_RES BIT(11)
+#define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
+#define JZ_MMC_STATUS_IS_READWAIT BIT(9)
+#define JZ_MMC_STATUS_CLK_EN BIT(8)
+#define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
+#define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
+#define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
+#define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
+#define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
+#define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
+#define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
+#define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
+
+#define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
+#define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
+
+
+#define JZ_MMC_CMDAT_IO_ABORT BIT(11)
+#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
+#define JZ_MMC_CMDAT_DMA_EN BIT(8)
+#define JZ_MMC_CMDAT_INIT BIT(7)
+#define JZ_MMC_CMDAT_BUSY BIT(6)
+#define JZ_MMC_CMDAT_STREAM BIT(5)
+#define JZ_MMC_CMDAT_WRITE BIT(4)
+#define JZ_MMC_CMDAT_DATA_EN BIT(3)
+#define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
+#define JZ_MMC_CMDAT_RSP_R1 1
+#define JZ_MMC_CMDAT_RSP_R2 2
+#define JZ_MMC_CMDAT_RSP_R3 3
+
+#define JZ_MMC_IRQ_SDIO BIT(7)
+#define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
+#define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
+#define JZ_MMC_IRQ_END_CMD_RES BIT(2)
+#define JZ_MMC_IRQ_PRG_DONE BIT(1)
+#define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
+
+
+#define JZ_MMC_CLK_RATE 24000000
+
+enum jz4740_mmc_state {
+ JZ4740_MMC_STATE_READ_RESPONSE,
+ JZ4740_MMC_STATE_TRANSFER_DATA,
+ JZ4740_MMC_STATE_SEND_STOP,
+ JZ4740_MMC_STATE_DONE,
+};
+
+struct jz4740_mmc_host {
+ struct mmc_host *mmc;
+ struct platform_device *pdev;
+ struct jz4740_mmc_platform_data *pdata;
+ struct clk *clk;
+
+ int irq;
+ int card_detect_irq;
+
+ struct resource *mem;
+ void __iomem *base;
+ struct mmc_request *req;
+ struct mmc_command *cmd;
+
+ unsigned long waiting;
+
+ uint32_t cmdat;
+
+ uint16_t irq_mask;
+
+ spinlock_t lock;
+
+ struct timer_list timeout_timer;
+ struct sg_mapping_iter miter;
+ enum jz4740_mmc_state state;
+};
+
+static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
+ unsigned int irq, bool enabled)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (enabled)
+ host->irq_mask &= ~irq;
+ else
+ host->irq_mask |= irq;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
+}
+
+static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
+ bool start_transfer)
+{
+ uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
+
+ if (start_transfer)
+ val |= JZ_MMC_STRPCL_START_OP;
+
+ writew(val, host->base + JZ_REG_MMC_STRPCL);
+}
+
+static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
+{
+ uint32_t status;
+ unsigned int timeout = 1000;
+
+ writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
+ do {
+ status = readl(host->base + JZ_REG_MMC_STATUS);
+ } while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
+}
+
+static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
+{
+ uint32_t status;
+ unsigned int timeout = 1000;
+
+ writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
+ udelay(10);
+ do {
+ status = readl(host->base + JZ_REG_MMC_STATUS);
+ } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
+}
+
+static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
+{
+ struct mmc_request *req;
+
+ req = host->req;
+ host->req = NULL;
+
+ mmc_request_done(host->mmc, req);
+}
+
+static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
+ unsigned int irq)
+{
+ unsigned int timeout = 0x800;
+ uint16_t status;
+
+ do {
+ status = readw(host->base + JZ_REG_MMC_IREG);
+ } while (!(status & irq) && --timeout);
+
+ if (timeout == 0) {
+ set_bit(0, &host->waiting);
+ mod_timer(&host->timeout_timer, jiffies + 5*HZ);
+ jz4740_mmc_set_irq_enabled(host, irq, true);
+ return true;
+ }
+
+ return false;
+}
+
+static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
+ struct mmc_data *data)
+{
+ int status;
+
+ status = readl(host->base + JZ_REG_MMC_STATUS);
+ if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
+ if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
+ host->req->cmd->error = -ETIMEDOUT;
+ data->error = -ETIMEDOUT;
+ } else {
+ host->req->cmd->error = -EIO;
+ data->error = -EIO;
+ }
+ }
+}
+
+static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
+ struct mmc_data *data)
+{
+ struct sg_mapping_iter *miter = &host->miter;
+ void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
+ uint32_t *buf;
+ bool timeout;
+ size_t i, j;
+
+ while (sg_miter_next(miter)) {
+ buf = miter->addr;
+ i = miter->length / 4;
+ j = i / 8;
+ i = i & 0x7;
+ while (j) {
+ timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
+ if (unlikely(timeout))
+ goto poll_timeout;
+
+ writel(buf[0], fifo_addr);
+ writel(buf[1], fifo_addr);
+ writel(buf[2], fifo_addr);
+ writel(buf[3], fifo_addr);
+ writel(buf[4], fifo_addr);
+ writel(buf[5], fifo_addr);
+ writel(buf[6], fifo_addr);
+ writel(buf[7], fifo_addr);
+ buf += 8;
+ --j;
+ }
+ if (unlikely(i)) {
+ timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
+ if (unlikely(timeout))
+ goto poll_timeout;
+
+ while (i) {
+ writel(*buf, fifo_addr);
+ ++buf;
+ --i;
+ }
+ }
+ data->bytes_xfered += miter->length;
+ }
+ sg_miter_stop(miter);
+
+ return false;
+
+poll_timeout:
+ miter->consumed = (void *)buf - miter->addr;
+ data->bytes_xfered += miter->consumed;
+ sg_miter_stop(miter);
+
+ return true;
+}
+
+static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
+ struct mmc_data *data)
+{
+ struct sg_mapping_iter *miter = &host->miter;
+ void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
+ uint32_t *buf;
+ uint32_t d;
+ uint16_t status;
+ size_t i, j;
+ unsigned int timeout;
+
+ while (sg_miter_next(miter)) {
+ buf = miter->addr;
+ i = miter->length;
+ j = i / 32;
+ i = i & 0x1f;
+ while (j) {
+ timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
+ if (unlikely(timeout))
+ goto poll_timeout;
+
+ buf[0] = readl(fifo_addr);
+ buf[1] = readl(fifo_addr);
+ buf[2] = readl(fifo_addr);
+ buf[3] = readl(fifo_addr);
+ buf[4] = readl(fifo_addr);
+ buf[5] = readl(fifo_addr);
+ buf[6] = readl(fifo_addr);
+ buf[7] = readl(fifo_addr);
+
+ buf += 8;
+ --j;
+ }
+
+ if (unlikely(i)) {
+ timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
+ if (unlikely(timeout))
+ goto poll_timeout;
+
+ while (i >= 4) {
+ *buf++ = readl(fifo_addr);
+ i -= 4;
+ }
+ if (unlikely(i > 0)) {
+ d = readl(fifo_addr);
+ memcpy(buf, &d, i);
+ }
+ }
+ data->bytes_xfered += miter->length;
+
+ /* This can go away once MIPS implements
+ * flush_kernel_dcache_page */
+ flush_dcache_page(miter->page);
+ }
+ sg_miter_stop(miter);
+
+ /* For whatever reason there is sometime one word more in the fifo then
+ * requested */
+ timeout = 1000;
+ status = readl(host->base + JZ_REG_MMC_STATUS);
+ while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
+ d = readl(fifo_addr);
+ status = readl(host->base + JZ_REG_MMC_STATUS);
+ }
+
+ return false;
+
+poll_timeout:
+ miter->consumed = (void *)buf - miter->addr;
+ data->bytes_xfered += miter->consumed;
+ sg_miter_stop(miter);
+
+ return true;
+}
+
+static void jz4740_mmc_timeout(unsigned long data)
+{
+ struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data;
+
+ if (!test_and_clear_bit(0, &host->waiting))
+ return;
+
+ jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
+
+ host->req->cmd->error = -ETIMEDOUT;
+ jz4740_mmc_request_done(host);
+}
+
+static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ int i;
+ uint16_t tmp;
+ void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
+
+ if (cmd->flags & MMC_RSP_136) {
+ tmp = readw(fifo_addr);
+ for (i = 0; i < 4; ++i) {
+ cmd->resp[i] = tmp << 24;
+ tmp = readw(fifo_addr);
+ cmd->resp[i] |= tmp << 8;
+ tmp = readw(fifo_addr);
+ cmd->resp[i] |= tmp >> 8;
+ }
+ } else {
+ cmd->resp[0] = readw(fifo_addr) << 24;
+ cmd->resp[0] |= readw(fifo_addr) << 8;
+ cmd->resp[0] |= readw(fifo_addr) & 0xff;
+ }
+}
+
+static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ uint32_t cmdat = host->cmdat;
+
+ host->cmdat &= ~JZ_MMC_CMDAT_INIT;
+ jz4740_mmc_clock_disable(host);
+
+ host->cmd = cmd;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ cmdat |= JZ_MMC_CMDAT_BUSY;
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1B:
+ case MMC_RSP_R1:
+ cmdat |= JZ_MMC_CMDAT_RSP_R1;
+ break;
+ case MMC_RSP_R2:
+ cmdat |= JZ_MMC_CMDAT_RSP_R2;
+ break;
+ case MMC_RSP_R3:
+ cmdat |= JZ_MMC_CMDAT_RSP_R3;
+ break;
+ default:
+ break;
+ }
+
+ if (cmd->data) {
+ cmdat |= JZ_MMC_CMDAT_DATA_EN;
+ if (cmd->data->flags & MMC_DATA_WRITE)
+ cmdat |= JZ_MMC_CMDAT_WRITE;
+ if (cmd->data->flags & MMC_DATA_STREAM)
+ cmdat |= JZ_MMC_CMDAT_STREAM;
+
+ writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
+ writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
+ }
+
+ writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
+ writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
+ writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
+
+ jz4740_mmc_clock_enable(host, 1);
+}
+
+static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
+{
+ struct mmc_command *cmd = host->req->cmd;
+ struct mmc_data *data = cmd->data;
+ int direction;
+
+ if (data->flags & MMC_DATA_READ)
+ direction = SG_MITER_TO_SG;
+ else
+ direction = SG_MITER_FROM_SG;
+
+ sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
+}
+
+
+static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
+{
+ struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
+ struct mmc_command *cmd = host->req->cmd;
+ struct mmc_request *req = host->req;
+ bool timeout = false;
+
+ if (cmd->error)
+ host->state = JZ4740_MMC_STATE_DONE;
+
+ switch (host->state) {
+ case JZ4740_MMC_STATE_READ_RESPONSE:
+ if (cmd->flags & MMC_RSP_PRESENT)
+ jz4740_mmc_read_response(host, cmd);
+
+ if (!cmd->data)
+ break;
+
+ jz_mmc_prepare_data_transfer(host);
+
+ case JZ4740_MMC_STATE_TRANSFER_DATA:
+ if (cmd->data->flags & MMC_DATA_READ)
+ timeout = jz4740_mmc_read_data(host, cmd->data);
+ else
+ timeout = jz4740_mmc_write_data(host, cmd->data);
+
+ if (unlikely(timeout)) {
+ host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
+ break;
+ }
+
+ jz4740_mmc_transfer_check_state(host, cmd->data);
+
+ timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
+ if (unlikely(timeout)) {
+ host->state = JZ4740_MMC_STATE_SEND_STOP;
+ break;
+ }
+ writew(JZ_MMC_IRQ_DATA_TRAN_DONE, host->base + JZ_REG_MMC_IREG);
+
+ case JZ4740_MMC_STATE_SEND_STOP:
+ if (!req->stop)
+ break;
+
+ jz4740_mmc_send_command(host, req->stop);
+
+ timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_PRG_DONE);
+ if (timeout) {
+ host->state = JZ4740_MMC_STATE_DONE;
+ break;
+ }
+ case JZ4740_MMC_STATE_DONE:
+ break;
+ }
+
+ if (!timeout)
+ jz4740_mmc_request_done(host);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t jz_mmc_irq(int irq, void *devid)
+{
+ struct jz4740_mmc_host *host = devid;
+ struct mmc_command *cmd = host->cmd;
+ uint16_t irq_reg, status, tmp;
+
+ irq_reg = readw(host->base + JZ_REG_MMC_IREG);
+
+ tmp = irq_reg;
+ irq_reg &= ~host->irq_mask;
+
+ tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
+ JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
+
+ if (tmp != irq_reg)
+ writew(tmp & ~irq_reg, host->base + JZ_REG_MMC_IREG);
+
+ if (irq_reg & JZ_MMC_IRQ_SDIO) {
+ writew(JZ_MMC_IRQ_SDIO, host->base + JZ_REG_MMC_IREG);
+ mmc_signal_sdio_irq(host->mmc);
+ irq_reg &= ~JZ_MMC_IRQ_SDIO;
+ }
+
+ if (host->req && cmd && irq_reg) {
+ if (test_and_clear_bit(0, &host->waiting)) {
+ del_timer(&host->timeout_timer);
+
+ status = readl(host->base + JZ_REG_MMC_STATUS);
+
+ if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
+ cmd->error = -ETIMEDOUT;
+ } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
+ cmd->error = -EIO;
+ } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
+ JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
+ if (cmd->data)
+ cmd->data->error = -EIO;
+ cmd->error = -EIO;
+ } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
+ JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
+ if (cmd->data)
+ cmd->data->error = -EIO;
+ cmd->error = -EIO;
+ }
+
+ jz4740_mmc_set_irq_enabled(host, irq_reg, false);
+ writew(irq_reg, host->base + JZ_REG_MMC_IREG);
+
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
+{
+ int div = 0;
+ int real_rate;
+
+ jz4740_mmc_clock_disable(host);
+ clk_set_rate(host->clk, JZ_MMC_CLK_RATE);
+
+ real_rate = clk_get_rate(host->clk);
+
+ while (real_rate > rate && div < 7) {
+ ++div;
+ real_rate >>= 1;
+ }
+
+ writew(div, host->base + JZ_REG_MMC_CLKRT);
+ return real_rate;
+}
+
+static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct jz4740_mmc_host *host = mmc_priv(mmc);
+
+ host->req = req;
+
+ writew(0xffff, host->base + JZ_REG_MMC_IREG);
+
+ writew(JZ_MMC_IRQ_END_CMD_RES, host->base + JZ_REG_MMC_IREG);
+ jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
+
+ host->state = JZ4740_MMC_STATE_READ_RESPONSE;
+ set_bit(0, &host->waiting);
+ mod_timer(&host->timeout_timer, jiffies + 5*HZ);
+ jz4740_mmc_send_command(host, req->cmd);
+}
+
+static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct jz4740_mmc_host *host = mmc_priv(mmc);
+ if (ios->clock)
+ jz4740_mmc_set_clock_rate(host, ios->clock);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ jz4740_mmc_reset(host);
+ if (gpio_is_valid(host->pdata->gpio_power))
+ gpio_set_value(host->pdata->gpio_power,
+ !host->pdata->power_active_low);
+ host->cmdat |= JZ_MMC_CMDAT_INIT;
+ clk_enable(host->clk);
+ break;
+ case MMC_POWER_ON:
+ break;
+ default:
+ if (gpio_is_valid(host->pdata->gpio_power))
+ gpio_set_value(host->pdata->gpio_power,
+ host->pdata->power_active_low);
+ clk_disable(host->clk);
+ break;
+ }
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
+ break;
+ case MMC_BUS_WIDTH_4:
+ host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
+ break;
+ default:
+ break;
+ }
+}
+
+static int jz4740_mmc_get_ro(struct mmc_host *mmc)
+{
+ struct jz4740_mmc_host *host = mmc_priv(mmc);
+ if (!gpio_is_valid(host->pdata->gpio_read_only))
+ return -ENOSYS;
+
+ return gpio_get_value(host->pdata->gpio_read_only) ^
+ host->pdata->read_only_active_low;
+}
+
+static int jz4740_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct jz4740_mmc_host *host = mmc_priv(mmc);
+ if (!gpio_is_valid(host->pdata->gpio_card_detect))
+ return -ENOSYS;
+
+ return gpio_get_value(host->pdata->gpio_card_detect) ^
+ host->pdata->card_detect_active_low;
+}
+
+static irqreturn_t jz4740_mmc_card_detect_irq(int irq, void *devid)
+{
+ struct jz4740_mmc_host *host = devid;
+
+ mmc_detect_change(host->mmc, HZ / 2);
+
+ return IRQ_HANDLED;
+}
+
+static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct jz4740_mmc_host *host = mmc_priv(mmc);
+ jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
+}
+
+static const struct mmc_host_ops jz4740_mmc_ops = {
+ .request = jz4740_mmc_request,
+ .set_ios = jz4740_mmc_set_ios,
+ .get_ro = jz4740_mmc_get_ro,
+ .get_cd = jz4740_mmc_get_cd,
+ .enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
+};
+
+static const struct jz_gpio_bulk_request jz4740_mmc_pins[] = {
+ JZ_GPIO_BULK_PIN(MSC_CMD),
+ JZ_GPIO_BULK_PIN(MSC_CLK),
+ JZ_GPIO_BULK_PIN(MSC_DATA0),
+ JZ_GPIO_BULK_PIN(MSC_DATA1),
+ JZ_GPIO_BULK_PIN(MSC_DATA2),
+ JZ_GPIO_BULK_PIN(MSC_DATA3),
+};
+
+static int __devinit jz4740_mmc_request_gpio(struct device *dev, int gpio,
+ const char *name, bool output, int value)
+{
+ int ret;
+
+ if (!gpio_is_valid(gpio))
+ return 0;
+
+ ret = gpio_request(gpio, name);
+ if (ret) {
+ dev_err(dev, "Failed to request %s gpio: %d\n", name, ret);
+ return ret;
+ }
+
+ if (output)
+ gpio_direction_output(gpio, value);
+ else
+ gpio_direction_input(gpio);
+
+ return 0;
+}
+
+static int __devinit jz4740_mmc_request_gpios(struct platform_device *pdev)
+{
+ int ret;
+ struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return 0;
+
+ ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_card_detect,
+ "MMC detect change", false, 0);
+ if (ret)
+ goto err;
+
+ ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_read_only,
+ "MMC read only", false, 0);
+ if (ret)
+ goto err_free_gpio_card_detect;
+
+ ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power,
+ "MMC read only", true, pdata->power_active_low);
+ if (ret)
+ goto err_free_gpio_read_only;
+
+ return 0;
+
+err_free_gpio_read_only:
+ if (gpio_is_valid(pdata->gpio_read_only))
+ gpio_free(pdata->gpio_read_only);
+err_free_gpio_card_detect:
+ if (gpio_is_valid(pdata->gpio_card_detect))
+ gpio_free(pdata->gpio_card_detect);
+err:
+ return ret;
+}
+
+static int __devinit jz4740_mmc_request_cd_irq(struct platform_device *pdev,
+ struct jz4740_mmc_host *host)
+{
+ struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!gpio_is_valid(pdata->gpio_card_detect))
+ return 0;
+
+ host->card_detect_irq = gpio_to_irq(pdata->gpio_card_detect);
+ if (host->card_detect_irq < 0) {
+ dev_warn(&pdev->dev, "Failed to get card detect irq\n");
+ return 0;
+ }
+
+ return request_irq(host->card_detect_irq, jz4740_mmc_card_detect_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "MMC card detect", host);
+}
+
+static void jz4740_mmc_free_gpios(struct platform_device *pdev)
+{
+ struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return;
+
+ if (gpio_is_valid(pdata->gpio_power))
+ gpio_free(pdata->gpio_power);
+ if (gpio_is_valid(pdata->gpio_read_only))
+ gpio_free(pdata->gpio_read_only);
+ if (gpio_is_valid(pdata->gpio_card_detect))
+ gpio_free(pdata->gpio_card_detect);
+}
+
+static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host)
+{
+ size_t num_pins = ARRAY_SIZE(jz4740_mmc_pins);
+ if (host->pdata && host->pdata->data_1bit)
+ num_pins -= 3;
+
+ return num_pins;
+}
+
+static int __devinit jz4740_mmc_probe(struct platform_device* pdev)
+{
+ int ret;
+ struct mmc_host *mmc;
+ struct jz4740_mmc_host *host;
+ struct jz4740_mmc_platform_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+
+ mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
+ if (!mmc) {
+ dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
+ return -ENOMEM;
+ }
+
+ host = mmc_priv(mmc);
+ host->pdata = pdata;
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0) {
+ ret = host->irq;
+ dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
+ goto err_free_host;
+ }
+
+ host->clk = clk_get(&pdev->dev, "mmc");
+ if (!host->clk) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev, "Failed to get mmc clock\n");
+ goto err_free_host;
+ }
+
+ host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!host->mem) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev, "Failed to get base platform memory\n");
+ goto err_clk_put;
+ }
+
+ host->mem = request_mem_region(host->mem->start,
+ resource_size(host->mem), pdev->name);
+ if (!host->mem) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev, "Failed to request base memory region\n");
+ goto err_clk_put;
+ }
+
+ host->base = ioremap_nocache(host->mem->start, resource_size(host->mem));
+ if (!host->base) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev, "Failed to ioremap base memory\n");
+ goto err_release_mem_region;
+ }
+
+ ret = jz_gpio_bulk_request(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request mmc pins: %d\n", ret);
+ goto err_iounmap;
+ }
+
+ ret = jz4740_mmc_request_gpios(pdev);
+ if (ret)
+ goto err_gpio_bulk_free;
+
+ mmc->ops = &jz4740_mmc_ops;
+ mmc->f_min = JZ_MMC_CLK_RATE / 128;
+ mmc->f_max = JZ_MMC_CLK_RATE;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->caps = (pdata && pdata->data_1bit) ? 0 : MMC_CAP_4_BIT_DATA;
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+
+ mmc->max_blk_size = (1 << 10) - 1;
+ mmc->max_blk_count = (1 << 15) - 1;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+
+ mmc->max_phys_segs = 128;
+ mmc->max_hw_segs = 128;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ host->mmc = mmc;
+ host->pdev = pdev;
+ spin_lock_init(&host->lock);
+ host->irq_mask = 0xffff;
+
+ ret = jz4740_mmc_request_cd_irq(pdev, host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request card detect irq\n");
+ goto err_free_gpios;
+ }
+
+ ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
+ dev_name(&pdev->dev), host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
+ goto err_free_card_detect_irq;
+ }
+
+ jz4740_mmc_reset(host);
+ jz4740_mmc_clock_disable(host);
+ setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
+ (unsigned long)host);
+ /* It is not important when it times out, it just needs to timeout. */
+ set_timer_slack(&host->timeout_timer, HZ);
+
+ platform_set_drvdata(pdev, host);
+ ret = mmc_add_host(mmc);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
+ goto err_free_irq;
+ }
+ dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n");
+
+ return 0;
+
+err_free_irq:
+ free_irq(host->irq, host);
+err_free_card_detect_irq:
+ if (host->card_detect_irq >= 0)
+ free_irq(host->card_detect_irq, host);
+err_free_gpios:
+ jz4740_mmc_free_gpios(pdev);
+err_gpio_bulk_free:
+ jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
+err_iounmap:
+ iounmap(host->base);
+err_release_mem_region:
+ release_mem_region(host->mem->start, resource_size(host->mem));
+err_clk_put:
+ clk_put(host->clk);
+err_free_host:
+ platform_set_drvdata(pdev, NULL);
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+static int __devexit jz4740_mmc_remove(struct platform_device *pdev)
+{
+ struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
+
+ del_timer_sync(&host->timeout_timer);
+ jz4740_mmc_set_irq_enabled(host, 0xff, false);
+ jz4740_mmc_reset(host);
+
+ mmc_remove_host(host->mmc);
+
+ free_irq(host->irq, host);
+ if (host->card_detect_irq >= 0)
+ free_irq(host->card_detect_irq, host);
+
+ jz4740_mmc_free_gpios(pdev);
+ jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
+
+ iounmap(host->base);
+ release_mem_region(host->mem->start, resource_size(host->mem));
+
+ clk_put(host->clk);
+
+ platform_set_drvdata(pdev, NULL);
+ mmc_free_host(host->mmc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int jz4740_mmc_suspend(struct device *dev)
+{
+ struct jz4740_mmc_host *host = dev_get_drvdata(dev);
+
+ mmc_suspend_host(host->mmc);
+
+ jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
+
+ return 0;
+}
+
+static int jz4740_mmc_resume(struct device *dev)
+{
+ struct jz4740_mmc_host *host = dev_get_drvdata(dev);
+
+ jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
+
+ mmc_resume_host(host->mmc);
+
+ return 0;
+}
+
+const struct dev_pm_ops jz4740_mmc_pm_ops = {
+ .suspend = jz4740_mmc_suspend,
+ .resume = jz4740_mmc_resume,
+ .poweroff = jz4740_mmc_suspend,
+ .restore = jz4740_mmc_resume,
+};
+
+#define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops)
+#else
+#define JZ4740_MMC_PM_OPS NULL
+#endif
+
+static struct platform_driver jz4740_mmc_driver = {
+ .probe = jz4740_mmc_probe,
+ .remove = __devexit_p(jz4740_mmc_remove),
+ .driver = {
+ .name = "jz4740-mmc",
+ .owner = THIS_MODULE,
+ .pm = JZ4740_MMC_PM_OPS,
+ },
+};
+
+static int __init jz4740_mmc_init(void)
+{
+ return platform_driver_register(&jz4740_mmc_driver);
+}
+module_init(jz4740_mmc_init);
+
+static void __exit jz4740_mmc_exit(void)
+{
+ platform_driver_unregister(&jz4740_mmc_driver);
+}
+module_exit(jz4740_mmc_exit);
+
+MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index ad847a24a67..62a35822003 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -182,7 +182,7 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
host->data_dma, sizeof(*host->data),
DMA_FROM_DEVICE);
- status = spi_sync(host->spi, &host->readback);
+ status = spi_sync_locked(host->spi, &host->readback);
if (host->dma_dev)
dma_sync_single_for_cpu(host->dma_dev,
@@ -541,7 +541,7 @@ mmc_spi_command_send(struct mmc_spi_host *host,
host->data_dma, sizeof(*host->data),
DMA_BIDIRECTIONAL);
}
- status = spi_sync(host->spi, &host->m);
+ status = spi_sync_locked(host->spi, &host->m);
if (host->dma_dev)
dma_sync_single_for_cpu(host->dma_dev,
@@ -685,7 +685,7 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
host->data_dma, sizeof(*scratch),
DMA_BIDIRECTIONAL);
- status = spi_sync(spi, &host->m);
+ status = spi_sync_locked(spi, &host->m);
if (status != 0) {
dev_dbg(&spi->dev, "write error (%d)\n", status);
@@ -822,7 +822,7 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
DMA_FROM_DEVICE);
}
- status = spi_sync(spi, &host->m);
+ status = spi_sync_locked(spi, &host->m);
if (host->dma_dev) {
dma_sync_single_for_cpu(host->dma_dev,
@@ -1018,7 +1018,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
host->data_dma, sizeof(*scratch),
DMA_BIDIRECTIONAL);
- tmp = spi_sync(spi, &host->m);
+ tmp = spi_sync_locked(spi, &host->m);
if (host->dma_dev)
dma_sync_single_for_cpu(host->dma_dev,
@@ -1084,6 +1084,9 @@ static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
#endif
+ /* request exclusive bus access */
+ spi_bus_lock(host->spi->master);
+
/* issue command; then optionally data and stop */
status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
if (status == 0 && mrq->data) {
@@ -1094,6 +1097,9 @@ static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
mmc_cs_off(host);
}
+ /* release the bus */
+ spi_bus_unlock(host->spi->master);
+
mmc_request_done(host->mmc, mrq);
}
@@ -1290,23 +1296,6 @@ mmc_spi_detect_irq(int irq, void *mmc)
return IRQ_HANDLED;
}
-struct count_children {
- unsigned n;
- struct bus_type *bus;
-};
-
-static int maybe_count_child(struct device *dev, void *c)
-{
- struct count_children *ccp = c;
-
- if (dev->bus == ccp->bus) {
- if (ccp->n)
- return -EBUSY;
- ccp->n++;
- }
- return 0;
-}
-
static int mmc_spi_probe(struct spi_device *spi)
{
void *ones;
@@ -1338,32 +1327,6 @@ static int mmc_spi_probe(struct spi_device *spi)
return status;
}
- /* We can use the bus safely iff nobody else will interfere with us.
- * Most commands consist of one SPI message to issue a command, then
- * several more to collect its response, then possibly more for data
- * transfer. Clocking access to other devices during that period will
- * corrupt the command execution.
- *
- * Until we have software primitives which guarantee non-interference,
- * we'll aim for a hardware-level guarantee.
- *
- * REVISIT we can't guarantee another device won't be added later...
- */
- if (spi->master->num_chipselect > 1) {
- struct count_children cc;
-
- cc.n = 0;
- cc.bus = spi->dev.bus;
- status = device_for_each_child(spi->dev.parent, &cc,
- maybe_count_child);
- if (status < 0) {
- dev_err(&spi->dev, "can't share SPI bus\n");
- return status;
- }
-
- dev_warn(&spi->dev, "ASSUMING SPI bus stays unshared!\n");
- }
-
/* We need a supply of ones to transmit. This is the only time
* the CPU touches these, so cache coherency isn't a concern.
*
@@ -1533,12 +1496,21 @@ static int __devexit mmc_spi_remove(struct spi_device *spi)
return 0;
}
+#if defined(CONFIG_OF)
+static struct of_device_id mmc_spi_of_match_table[] __devinitdata = {
+ { .compatible = "mmc-spi-slot", },
+ {},
+};
+#endif
static struct spi_driver mmc_spi_driver = {
.driver = {
.name = "mmc_spi",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+#if defined(CONFIG_OF)
+ .of_match_table = mmc_spi_of_match_table,
+#endif
},
.probe = mmc_spi_probe,
.remove = __devexit_p(mmc_spi_remove),
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 24e09454e52..ff7752348b1 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -160,18 +160,7 @@ msmsdcc_stop_data(struct msmsdcc_host *host)
uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
{
- switch (host->pdev_id) {
- case 1:
- return MSM_SDC1_PHYS + MMCIFIFO;
- case 2:
- return MSM_SDC2_PHYS + MMCIFIFO;
- case 3:
- return MSM_SDC3_PHYS + MMCIFIFO;
- case 4:
- return MSM_SDC4_PHYS + MMCIFIFO;
- }
- BUG();
- return 0;
+ return host->memres->start + MMCIFIFO;
}
static inline void
@@ -1057,26 +1046,10 @@ msmsdcc_init_dma(struct msmsdcc_host *host)
return 0;
}
-#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
-static void
-do_resume_work(struct work_struct *work)
-{
- struct msmsdcc_host *host =
- container_of(work, struct msmsdcc_host, resume_task);
- struct mmc_host *mmc = host->mmc;
-
- if (mmc) {
- mmc_resume_host(mmc);
- if (host->stat_irq)
- enable_irq(host->stat_irq);
- }
-}
-#endif
-
static int
msmsdcc_probe(struct platform_device *pdev)
{
- struct mmc_platform_data *plat = pdev->dev.platform_data;
+ struct msm_mmc_platform_data *plat = pdev->dev.platform_data;
struct msmsdcc_host *host;
struct mmc_host *mmc;
struct resource *cmd_irqres = NULL;
@@ -1145,15 +1118,6 @@ msmsdcc_probe(struct platform_device *pdev)
host->dmares = dmares;
spin_lock_init(&host->lock);
-#ifdef CONFIG_MMC_EMBEDDED_SDIO
- if (plat->embedded_sdio)
- mmc_set_embedded_sdio_data(mmc,
- &plat->embedded_sdio->cis,
- &plat->embedded_sdio->cccr,
- plat->embedded_sdio->funcs,
- plat->embedded_sdio->num_funcs);
-#endif
-
/*
* Setup DMA
*/
@@ -1314,6 +1278,24 @@ msmsdcc_probe(struct platform_device *pdev)
return ret;
}
+#ifdef CONFIG_PM
+#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
+static void
+do_resume_work(struct work_struct *work)
+{
+ struct msmsdcc_host *host =
+ container_of(work, struct msmsdcc_host, resume_task);
+ struct mmc_host *mmc = host->mmc;
+
+ if (mmc) {
+ mmc_resume_host(mmc);
+ if (host->stat_irq)
+ enable_irq(host->stat_irq);
+ }
+}
+#endif
+
+
static int
msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
{
@@ -1358,6 +1340,10 @@ msmsdcc_resume(struct platform_device *dev)
}
return 0;
}
+#else
+#define msmsdcc_suspend 0
+#define msmsdcc_resume 0
+#endif
static struct platform_driver msmsdcc_driver = {
.probe = msmsdcc_probe,
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index da0039c9285..ff2b0f74f6f 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -225,7 +225,7 @@ struct msmsdcc_host {
u32 pwr;
u32 saved_irq0mask; /* MMCIMASK0 reg value */
- struct mmc_platform_data *plat;
+ struct msm_mmc_platform_data *plat;
struct timer_list timer;
unsigned int oldstat;
@@ -235,10 +235,6 @@ struct msmsdcc_host {
int cmdpoll;
struct msmsdcc_stats stats;
-#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
- struct work_struct resume_task;
-#endif
-
/* Command parameters */
unsigned int cmd_timeout;
unsigned int cmd_pio_irqmask;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b032828c612..4526d2791f2 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -28,6 +28,7 @@
#include <linux/clk.h>
#include <linux/mmc/host.h>
#include <linux/mmc/core.h>
+#include <linux/mmc/mmc.h>
#include <linux/io.h>
#include <linux/semaphore.h>
#include <linux/gpio.h>
@@ -78,6 +79,7 @@
#define INT_EN_MASK 0x307F0033
#define BWR_ENABLE (1 << 4)
#define BRR_ENABLE (1 << 5)
+#define DTO_ENABLE (1 << 20)
#define INIT_STREAM (1 << 1)
#define DP_SELECT (1 << 21)
#define DDIR (1 << 4)
@@ -523,7 +525,8 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
}
-static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host)
+static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host,
+ struct mmc_command *cmd)
{
unsigned int irq_mask;
@@ -532,6 +535,10 @@ static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host)
else
irq_mask = INT_EN_MASK;
+ /* Disable timeout for erases */
+ if (cmd->opcode == MMC_ERASE)
+ irq_mask &= ~DTO_ENABLE;
+
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
@@ -782,7 +789,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
host->cmd = cmd;
- omap_hsmmc_enable_irq(host);
+ omap_hsmmc_enable_irq(host, cmd);
host->response_busy = 0;
if (cmd->flags & MMC_RSP_PRESENT) {
@@ -1273,8 +1280,11 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
struct mmc_data *data = host->mrq->data;
int dma_ch, req_in_progress;
- if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
- dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
+ if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
+ dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
+ ch_status);
+ return;
+ }
spin_lock(&host->irq_lock);
if (host->dma_ch < 0) {
@@ -1598,6 +1608,14 @@ static int omap_hsmmc_get_ro(struct mmc_host *mmc)
return mmc_slot(host).get_ro(host->dev, 0);
}
+static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ if (mmc_slot(host).init_card)
+ mmc_slot(host).init_card(card);
+}
+
static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
{
u32 hctl, capa, value;
@@ -1869,6 +1887,7 @@ static const struct mmc_host_ops omap_hsmmc_ops = {
.set_ios = omap_hsmmc_set_ios,
.get_cd = omap_hsmmc_get_cd,
.get_ro = omap_hsmmc_get_ro,
+ .init_card = omap_hsmmc_init_card,
/* NYET -- enable_sdio_irq */
};
@@ -1879,6 +1898,7 @@ static const struct mmc_host_ops omap_hsmmc_ps_ops = {
.set_ios = omap_hsmmc_set_ios,
.get_cd = omap_hsmmc_get_cd,
.get_ro = omap_hsmmc_get_ro,
+ .init_card = omap_hsmmc_init_card,
/* NYET -- enable_sdio_irq */
};
@@ -2094,12 +2114,25 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
mmc->max_seg_size = mmc->max_req_size;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
- MMC_CAP_WAIT_WHILE_BUSY;
+ MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
- if (mmc_slot(host).wires >= 8)
+ switch (mmc_slot(host).wires) {
+ case 8:
mmc->caps |= MMC_CAP_8_BIT_DATA;
- else if (mmc_slot(host).wires >= 4)
+ /* Fall through */
+ case 4:
mmc->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 1:
+ /* Nothing to crib here */
+ case 0:
+ /* Assuming nothing was given by board, Core use's 1-Bit */
+ break;
+ default:
+ /* Completely unexpected.. Core goes with 1-Bit Width */
+ dev_crit(mmc_dev(host->mmc), "Invalid width %d\n used!"
+ "using 1 instead\n", mmc_slot(host).wires);
+ }
if (mmc_slot(host).nonremovable)
mmc->caps |= MMC_CAP_NONREMOVABLE;
@@ -2272,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev)
int ret = 0;
struct platform_device *pdev = to_platform_device(dev);
struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
- pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
if (host && host->suspended)
return 0;
@@ -2291,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev)
}
}
cancel_work_sync(&host->mmc_carddetect_work);
- mmc_host_enable(host->mmc);
ret = mmc_suspend_host(host->mmc);
+ mmc_host_enable(host->mmc);
if (ret == 0) {
omap_hsmmc_disable_irq(host);
OMAP_HSMMC_WRITE(host->base, HCTL,
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2e16e0a90a5..976330de379 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
host->pio_active = XFER_NONE;
#ifdef CONFIG_MMC_S3C_PIODMA
- host->dodma = host->pdata->dma;
+ host->dodma = host->pdata->use_dma;
#endif
host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
new file mode 100644
index 00000000000..b7050b380d5
--- /dev/null
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -0,0 +1,97 @@
+/*
+ * SDHCI support for CNS3xxx SoC
+ *
+ * Copyright 2008 Cavium Networks
+ * Copyright 2010 MontaVista Software, LLC.
+ *
+ * Authors: Scott Shu
+ * Anton Vorontsov <avorontsov@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mmc/host.h>
+#include <linux/sdhci-pltfm.h>
+#include <mach/cns3xxx.h>
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host)
+{
+ return 150000000;
+}
+
+static void sdhci_cns3xxx_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ int div = 1;
+ u16 clk;
+ unsigned long timeout;
+
+ if (clock == host->clock)
+ return;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ goto out;
+
+ while (host->max_clk / div > clock) {
+ /*
+ * On CNS3xxx divider grows linearly up to 4, and then
+ * exponentially up to 256.
+ */
+ if (div < 4)
+ div += 1;
+ else if (div < 256)
+ div *= 2;
+ else
+ break;
+ }
+
+ dev_dbg(dev, "desired SD clock: %d, actual: %d\n",
+ clock, host->max_clk / div);
+
+ /* Divide by 3 is special. */
+ if (div != 3)
+ div >>= 1;
+
+ clk = div << SDHCI_DIVIDER_SHIFT;
+ clk |= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ timeout = 20;
+ while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+ & SDHCI_CLOCK_INT_STABLE)) {
+ if (timeout == 0) {
+ dev_warn(dev, "clock is unstable");
+ break;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+out:
+ host->clock = clock;
+}
+
+static struct sdhci_ops sdhci_cns3xxx_ops = {
+ .get_max_clock = sdhci_cns3xxx_get_max_clk,
+ .set_clock = sdhci_cns3xxx_set_clock,
+};
+
+struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
+ .ops = &sdhci_cns3xxx_ops,
+ .quirks = SDHCI_QUIRK_BROKEN_DMA |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_NONSTANDARD_CLOCK,
+};
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index a2e9820cd42..c51b71174c1 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -85,14 +85,14 @@ void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
#ifdef CONFIG_PM
-static int sdhci_of_suspend(struct of_device *ofdev, pm_message_t state)
+static int sdhci_of_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
return mmc_suspend_host(host->mmc);
}
-static int sdhci_of_resume(struct of_device *ofdev)
+static int sdhci_of_resume(struct platform_device *ofdev)
{
struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
@@ -115,7 +115,7 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
}
-static int __devinit sdhci_of_probe(struct of_device *ofdev,
+static int __devinit sdhci_of_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -154,6 +154,10 @@ static int __devinit sdhci_of_probe(struct of_device *ofdev,
host->ops = &sdhci_of_data->ops;
}
+ if (of_get_property(np, "sdhci,auto-cmd12", NULL))
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+
+
if (of_get_property(np, "sdhci,1-bit-only", NULL))
host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
@@ -179,7 +183,7 @@ err_addr_map:
return ret;
}
-static int __devexit sdhci_of_remove(struct of_device *ofdev)
+static int __devexit sdhci_of_remove(struct platform_device *ofdev)
{
struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 65483fdea45..e8aa99deae9 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -17,6 +17,7 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/device.h>
#include <linux/mmc/host.h>
@@ -84,7 +85,30 @@ static int ricoh_probe(struct sdhci_pci_chip *chip)
if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
+ return 0;
+}
+
+static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->caps =
+ ((0x21 << SDHCI_TIMEOUT_CLK_SHIFT)
+ & SDHCI_TIMEOUT_CLK_MASK) |
+
+ ((0x21 << SDHCI_CLOCK_BASE_SHIFT)
+ & SDHCI_CLOCK_BASE_MASK) |
+ SDHCI_TIMEOUT_CLK_UNIT |
+ SDHCI_CAN_VDD_330 |
+ SDHCI_CAN_DO_SDMA;
+ return 0;
+}
+
+static int ricoh_mmc_resume(struct sdhci_pci_chip *chip)
+{
+ /* Apply a delay to allow controller to settle */
+ /* Otherwise it becomes confused if card state changed
+ during suspend */
+ msleep(500);
return 0;
}
@@ -95,6 +119,15 @@ static const struct sdhci_pci_fixes sdhci_ricoh = {
SDHCI_QUIRK_CLOCK_BEFORE_RESET,
};
+static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
+ .probe_slot = ricoh_mmc_probe_slot,
+ .resume = ricoh_mmc_resume,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_CLOCK_BEFORE_RESET |
+ SDHCI_QUIRK_NO_CARD_NO_RESET |
+ SDHCI_QUIRK_MISSING_CAPS
+};
+
static const struct sdhci_pci_fixes sdhci_ene_712 = {
.quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_BROKEN_DMA,
@@ -374,6 +407,22 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
},
{
+ .vendor = PCI_VENDOR_ID_RICOH,
+ .device = 0x843,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_RICOH,
+ .device = 0xe822,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
+ },
+
+ {
.vendor = PCI_VENDOR_ID_ENE,
.device = PCI_DEVICE_ID_ENE_CB712_SD,
.subvendor = PCI_ANY_ID,
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index b6ee0d71969..e045e3c61dd 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/highmem.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
@@ -32,6 +33,7 @@
#include <linux/sdhci-pltfm.h>
#include "sdhci.h"
+#include "sdhci-pltfm.h"
/*****************************************************************************\
* *
@@ -51,10 +53,14 @@ static struct sdhci_ops sdhci_pltfm_ops = {
static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
+ const struct platform_device_id *platid = platform_get_device_id(pdev);
struct sdhci_host *host;
struct resource *iomem;
int ret;
+ if (!pdata && platid && platid->driver_data)
+ pdata = (void *)platid->driver_data;
+
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
ret = -ENOMEM;
@@ -150,6 +156,15 @@ static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id sdhci_pltfm_ids[] = {
+ { "sdhci", },
+#ifdef CONFIG_MMC_SDHCI_CNS3XXX
+ { "sdhci-cns3xxx", (kernel_ulong_t)&sdhci_cns3xxx_pdata },
+#endif
+ { },
+};
+MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
+
static struct platform_driver sdhci_pltfm_driver = {
.driver = {
.name = "sdhci",
@@ -157,6 +172,7 @@ static struct platform_driver sdhci_pltfm_driver = {
},
.probe = sdhci_pltfm_probe,
.remove = __devexit_p(sdhci_pltfm_remove),
+ .id_table = sdhci_pltfm_ids,
};
/*****************************************************************************\
@@ -181,4 +197,3 @@ module_exit(sdhci_drv_exit);
MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:sdhci");
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
new file mode 100644
index 00000000000..900f32902f7
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2010 MontaVista Software, LLC.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DRIVERS_MMC_SDHCI_PLTFM_H
+#define _DRIVERS_MMC_SDHCI_PLTFM_H
+
+#include <linux/sdhci-pltfm.h>
+
+extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
+
+#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index ad30f074ee1..aacb862ecc8 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/gpio.h>
#include <linux/mmc/host.h>
@@ -44,6 +45,8 @@ struct sdhci_s3c {
struct resource *ioarea;
struct s3c_sdhci_platdata *pdata;
unsigned int cur_clk;
+ int ext_cd_irq;
+ int ext_cd_gpio;
struct clk *clk_io;
struct clk *clk_bus[MAX_BUS_CLK];
@@ -110,11 +113,6 @@ static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host)
return max;
}
-static unsigned int sdhci_s3c_get_timeout_clk(struct sdhci_host *host)
-{
- return sdhci_s3c_get_max_clk(host) / 1000000;
-}
-
/**
* sdhci_s3c_consider_clock - consider one the bus clocks for current setting
* @ourhost: Our SDHCI instance.
@@ -188,7 +186,6 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
ourhost->cur_clk = best_src;
host->max_clk = clk_get_rate(clk);
- host->timeout_clk = sdhci_s3c_get_timeout_clk(host);
ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
@@ -209,12 +206,95 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
}
}
+/**
+ * sdhci_s3c_get_min_clock - callback to get minimal supported clock value
+ * @host: The SDHCI host being queried
+ *
+ * To init mmc host properly a minimal clock value is needed. For high system
+ * bus clock's values the standard formula gives values out of allowed range.
+ * The clock still can be set to lower values, if clock source other then
+ * system bus is selected.
+*/
+static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ unsigned int delta, min = UINT_MAX;
+ int src;
+
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ delta = sdhci_s3c_consider_clock(ourhost, src, 0);
+ if (delta == UINT_MAX)
+ continue;
+ /* delta is a negative value in this case */
+ if (-delta < min)
+ min = -delta;
+ }
+ return min;
+}
+
static struct sdhci_ops sdhci_s3c_ops = {
.get_max_clock = sdhci_s3c_get_max_clk,
- .get_timeout_clock = sdhci_s3c_get_timeout_clk,
.set_clock = sdhci_s3c_set_clock,
+ .get_min_clock = sdhci_s3c_get_min_clock,
};
+static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
+{
+ struct sdhci_host *host = platform_get_drvdata(dev);
+ unsigned long flags;
+
+ if (host) {
+ spin_lock_irqsave(&host->lock, flags);
+ if (state) {
+ dev_dbg(&dev->dev, "card inserted.\n");
+ host->flags &= ~SDHCI_DEVICE_DEAD;
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ } else {
+ dev_dbg(&dev->dev, "card removed.\n");
+ host->flags |= SDHCI_DEVICE_DEAD;
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ }
+ tasklet_schedule(&host->card_tasklet);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+}
+
+static irqreturn_t sdhci_s3c_gpio_card_detect_thread(int irq, void *dev_id)
+{
+ struct sdhci_s3c *sc = dev_id;
+ int status = gpio_get_value(sc->ext_cd_gpio);
+ if (sc->pdata->ext_cd_gpio_invert)
+ status = !status;
+ sdhci_s3c_notify_change(sc->pdev, status);
+ return IRQ_HANDLED;
+}
+
+static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
+{
+ struct s3c_sdhci_platdata *pdata = sc->pdata;
+ struct device *dev = &sc->pdev->dev;
+
+ if (gpio_request(pdata->ext_cd_gpio, "SDHCI EXT CD") == 0) {
+ sc->ext_cd_gpio = pdata->ext_cd_gpio;
+ sc->ext_cd_irq = gpio_to_irq(pdata->ext_cd_gpio);
+ if (sc->ext_cd_irq &&
+ request_threaded_irq(sc->ext_cd_irq, NULL,
+ sdhci_s3c_gpio_card_detect_thread,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ dev_name(dev), sc) == 0) {
+ int status = gpio_get_value(sc->ext_cd_gpio);
+ if (pdata->ext_cd_gpio_invert)
+ status = !status;
+ sdhci_s3c_notify_change(sc->pdev, status);
+ } else {
+ dev_warn(dev, "cannot request irq for card detect\n");
+ sc->ext_cd_irq = 0;
+ }
+ } else {
+ dev_err(dev, "cannot request gpio for card detect\n");
+ }
+}
+
static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
{
struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
@@ -252,6 +332,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
sc->host = host;
sc->pdev = pdev;
sc->pdata = pdata;
+ sc->ext_cd_gpio = -1; /* invalid gpio number */
platform_set_drvdata(pdev, host);
@@ -318,6 +399,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
/* Setup quirks for the controller */
host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
+ host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
#ifndef CONFIG_MMC_SDHCI_S3C_DMA
@@ -332,15 +414,34 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
* SDHCI block, or a missing configuration that needs to be set. */
host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
+ if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
+ pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
+ if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
+ host->mmc->caps = MMC_CAP_NONREMOVABLE;
+
host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE);
+ /* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
+ host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
+
ret = sdhci_add_host(host);
if (ret) {
dev_err(dev, "sdhci_add_host() failed\n");
goto err_add_host;
}
+ /* The following two methods of card detection might call
+ sdhci_s3c_notify_change() immediately, so they can be called
+ only after sdhci_add_host(). Setup errors are ignored. */
+ if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init)
+ pdata->ext_cd_init(&sdhci_s3c_notify_change);
+ if (pdata->cd_type == S3C_SDHCI_CD_GPIO &&
+ gpio_is_valid(pdata->ext_cd_gpio))
+ sdhci_s3c_setup_card_detect_gpio(sc);
+
return 0;
err_add_host:
@@ -365,15 +466,27 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
{
+ struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_s3c *sc = sdhci_priv(host);
int ptr;
+ if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup)
+ pdata->ext_cd_cleanup(&sdhci_s3c_notify_change);
+
+ if (sc->ext_cd_irq)
+ free_irq(sc->ext_cd_irq, sc);
+
+ if (gpio_is_valid(sc->ext_cd_gpio))
+ gpio_free(sc->ext_cd_gpio);
+
sdhci_remove_host(host, 1);
for (ptr = 0; ptr < 3; ptr++) {
- clk_disable(sc->clk_bus[ptr]);
- clk_put(sc->clk_bus[ptr]);
+ if (sc->clk_bus[ptr]) {
+ clk_disable(sc->clk_bus[ptr]);
+ clk_put(sc->clk_bus[ptr]);
+ }
}
clk_disable(sc->clk_io);
clk_put(sc->clk_io);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index c6d1bd8d4ac..401527d273b 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
+#include <linux/regulator/consumer.h>
#include <linux/leds.h>
@@ -817,8 +818,12 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
WARN_ON(!host->data);
mode = SDHCI_TRNS_BLK_CNT_EN;
- if (data->blocks > 1)
- mode |= SDHCI_TRNS_MULTI;
+ if (data->blocks > 1) {
+ if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
+ mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12;
+ else
+ mode |= SDHCI_TRNS_MULTI;
+ }
if (data->flags & MMC_DATA_READ)
mode |= SDHCI_TRNS_READ;
if (host->flags & SDHCI_REQ_USE_DMA)
@@ -1108,6 +1113,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
#ifndef SDHCI_USE_LEDS_CLASS
sdhci_activate_led(host);
#endif
+ if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) {
+ if (mrq->stop) {
+ mrq->data->stop = NULL;
+ mrq->stop = NULL;
+ }
+ }
host->mrq = mrq;
@@ -1159,12 +1170,18 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ if (ios->bus_width == MMC_BUS_WIDTH_8)
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+
if (ios->bus_width == MMC_BUS_WIDTH_4)
ctrl |= SDHCI_CTRL_4BITBUS;
else
ctrl &= ~SDHCI_CTRL_4BITBUS;
- if (ios->timing == MMC_TIMING_SD_HS)
+ if (ios->timing == MMC_TIMING_SD_HS &&
+ !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
ctrl |= SDHCI_CTRL_HISPD;
else
ctrl &= ~SDHCI_CTRL_HISPD;
@@ -1603,7 +1620,10 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
free_irq(host->irq, host);
- return 0;
+ if (host->vmmc)
+ ret = regulator_disable(host->vmmc);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(sdhci_suspend_host);
@@ -1612,6 +1632,13 @@ int sdhci_resume_host(struct sdhci_host *host)
{
int ret;
+ if (host->vmmc) {
+ int ret = regulator_enable(host->vmmc);
+ if (ret)
+ return ret;
+ }
+
+
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
host->ops->enable_dma(host);
@@ -1687,7 +1714,8 @@ int sdhci_add_host(struct sdhci_host *host)
host->version);
}
- caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
+ sdhci_readl(host, SDHCI_CAPABILITIES);
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
host->flags |= SDHCI_USE_SDMA;
@@ -1785,13 +1813,12 @@ int sdhci_add_host(struct sdhci_host *host)
* Set host parameters.
*/
mmc->ops = &sdhci_ops;
- if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK &&
- host->ops->set_clock && host->ops->get_min_clock)
+ if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else
mmc->f_min = host->max_clk / 256;
mmc->f_max = host->max_clk;
- mmc->caps = MMC_CAP_SDIO_IRQ;
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
mmc->caps |= MMC_CAP_4_BIT_DATA;
@@ -1884,6 +1911,14 @@ int sdhci_add_host(struct sdhci_host *host)
if (ret)
goto untasklet;
+ host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
+ if (IS_ERR(host->vmmc)) {
+ printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
+ host->vmmc = NULL;
+ } else {
+ regulator_enable(host->vmmc);
+ }
+
sdhci_init(host, 0);
#ifdef CONFIG_MMC_DEBUG
@@ -1968,6 +2003,11 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
+ if (host->vmmc) {
+ regulator_disable(host->vmmc);
+ regulator_put(host->vmmc);
+ }
+
kfree(host->adma_desc);
kfree(host->align_buffer);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index c8468134adc..d316bc79b63 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -72,6 +72,7 @@
#define SDHCI_CTRL_ADMA1 0x08
#define SDHCI_CTRL_ADMA32 0x10
#define SDHCI_CTRL_ADMA64 0x18
+#define SDHCI_CTRL_8BITBUS 0x20
#define SDHCI_POWER_CONTROL 0x29
#define SDHCI_POWER_ON 0x01
@@ -240,12 +241,20 @@ struct sdhci_host {
#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
/* Controller cannot support End Attribute in NOP ADMA descriptor */
#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
+/* Controller is missing device caps. Use caps provided by host */
+#define SDHCI_QUIRK_MISSING_CAPS (1<<27)
+/* Controller uses Auto CMD12 command to stop the transfer */
+#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
+/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
+#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
int irq; /* Device IRQ */
void __iomem * ioaddr; /* Mapped address */
const struct sdhci_ops *ops; /* Low level hw interface */
+ struct regulator *vmmc; /* Power regulator */
+
/* Internal data */
struct mmc_host *mmc; /* MMC structure */
u64 dma_mask; /* custom DMA mask */
@@ -292,6 +301,8 @@ struct sdhci_host {
struct timer_list timer; /* Timer for timeouts */
+ unsigned int caps; /* Alternative capabilities */
+
unsigned long private[0] ____cacheline_aligned;
};
@@ -407,6 +418,7 @@ static inline void *sdhci_priv(struct sdhci_host *host)
return (void *)host->private;
}
+extern void sdhci_card_detect(struct sdhci_host *host);
extern int sdhci_add_host(struct sdhci_host *host);
extern void sdhci_remove_host(struct sdhci_host *host, int dead);
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index e7507af3856..7aa65bb2af4 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -30,7 +30,6 @@
#include <linux/ioport.h>
#include <linux/scatterlist.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index ee7d0a5a51c..69d98e3bf6a 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
+ void *sg_virt;
unsigned short *buf;
unsigned int count;
unsigned long flags;
@@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
return;
}
- buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
- host->sg_off);
+ sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
+ buf = (unsigned short *)(sg_virt + host->sg_off);
count = host->sg_ptr->length - host->sg_off;
if (count > data->blksz)
@@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
host->sg_off += count;
- tmio_mmc_kunmap_atomic(host, &flags);
+ tmio_mmc_kunmap_atomic(sg_virt, &flags);
if (host->sg_off == host->sg_ptr->length)
tmio_mmc_next_sg(host);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 64f7d5dfc10..0fedc78e3ea 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -82,10 +82,7 @@
#define ack_mmc_irqs(host, i) \
do { \
- u32 mask;\
- mask = sd_ctrl_read32((host), CTL_STATUS); \
- mask &= ~((i) & TMIO_MASK_IRQ); \
- sd_ctrl_write32((host), CTL_STATUS, mask); \
+ sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
} while (0)
@@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
return --host->sg_len;
}
-static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
+static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
{
- struct scatterlist *sg = host->sg_ptr;
-
local_irq_save(*flags);
return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
}
-static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
+static inline void tmio_mmc_kunmap_atomic(void *virt,
unsigned long *flags)
{
- kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
+ kunmap_atomic(virt, KM_BIO_SRC_IRQ);
local_irq_restore(*flags);
}
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index f8210bf2d24..1e2cbf5d9aa 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -311,15 +311,17 @@ config SM_FTL
select MTD_BLKDEVS
select MTD_NAND_ECC
help
- This enables new and very EXPERMENTAL support for SmartMedia/xD
+ This enables EXPERIMENTAL R/W support for SmartMedia/xD
FTL (Flash translation layer).
- Write support isn't yet well tested, therefore this code IS likely to
- eat your card, so please don't use it together with valuable data.
- Use readonly driver (CONFIG_SSFDC) instead.
+ Write support is only lightly tested, therefore this driver
+ isn't recommended to use with valuable data (anyway if you have
+ valuable data, do backups regardless of software/hardware you
+ use, because you never know what will eat your data...)
+ If you only need R/O access, you can use older R/O driver
+ (CONFIG_SSFDC)
config MTD_OOPS
tristate "Log panic/oops to an MTD buffer"
- depends on MTD
help
This enables panic and oops messages to be logged to a circular
buffer in a flash partition where it can be read back at some
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index cec7ab98b2a..302372c08b5 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -2,7 +2,7 @@
drivers/mtd/afs.c: ARM Flash Layout/Partitioning
- Copyright (C) 2000 ARM Limited
+ Copyright © 2000 ARM Limited
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 62f3ea9de84..9e2b7e9e0ad 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -34,7 +34,6 @@
#include <linux/mtd/xip.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/compatmac.h>
#include <linux/mtd/cfi.h>
/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
@@ -63,6 +62,8 @@ static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_intelext_sync (struct mtd_info *);
static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len);
#ifdef CONFIG_MTD_OTP
static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
@@ -448,6 +449,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
mtd->sync = cfi_intelext_sync;
mtd->lock = cfi_intelext_lock;
mtd->unlock = cfi_intelext_unlock;
+ mtd->is_locked = cfi_intelext_is_locked;
mtd->suspend = cfi_intelext_suspend;
mtd->resume = cfi_intelext_resume;
mtd->flags = MTD_CAP_NORFLASH;
@@ -717,7 +719,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
chip = &newcfi->chips[0];
for (i = 0; i < cfi->numchips; i++) {
shared[i].writing = shared[i].erasing = NULL;
- spin_lock_init(&shared[i].lock);
+ mutex_init(&shared[i].lock);
for (j = 0; j < numparts; j++) {
*chip = cfi->chips[i];
chip->start += j << partshift;
@@ -886,7 +888,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
*/
struct flchip_shared *shared = chip->priv;
struct flchip *contender;
- spin_lock(&shared->lock);
+ mutex_lock(&shared->lock);
contender = shared->writing;
if (contender && contender != chip) {
/*
@@ -899,7 +901,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* get_chip returns success we're clear to go ahead.
*/
ret = mutex_trylock(&contender->mutex);
- spin_unlock(&shared->lock);
+ mutex_unlock(&shared->lock);
if (!ret)
goto retry;
mutex_unlock(&chip->mutex);
@@ -914,7 +916,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
mutex_unlock(&contender->mutex);
return ret;
}
- spin_lock(&shared->lock);
+ mutex_lock(&shared->lock);
/* We should not own chip if it is already
* in FL_SYNCING state. Put contender and retry. */
@@ -930,7 +932,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* on this chip. Sleep. */
if (mode == FL_ERASING && shared->erasing
&& shared->erasing->oldstate == FL_ERASING) {
- spin_unlock(&shared->lock);
+ mutex_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
@@ -944,7 +946,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
shared->writing = chip;
if (mode == FL_ERASING)
shared->erasing = chip;
- spin_unlock(&shared->lock);
+ mutex_unlock(&shared->lock);
}
ret = chip_ready(map, chip, adr, mode);
if (ret == -EAGAIN)
@@ -959,7 +961,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
if (chip->priv) {
struct flchip_shared *shared = chip->priv;
- spin_lock(&shared->lock);
+ mutex_lock(&shared->lock);
if (shared->writing == chip && chip->oldstate == FL_READY) {
/* We own the ability to write, but we're done */
shared->writing = shared->erasing;
@@ -967,7 +969,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
/* give back ownership to who we loaned it from */
struct flchip *loaner = shared->writing;
mutex_lock(&loaner->mutex);
- spin_unlock(&shared->lock);
+ mutex_unlock(&shared->lock);
mutex_unlock(&chip->mutex);
put_chip(map, loaner, loaner->start);
mutex_lock(&chip->mutex);
@@ -985,11 +987,11 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
* Don't let the switch below mess things up since
* we don't have ownership to resume anything.
*/
- spin_unlock(&shared->lock);
+ mutex_unlock(&shared->lock);
wake_up(&chip->wq);
return;
}
- spin_unlock(&shared->lock);
+ mutex_unlock(&shared->lock);
}
switch(chip->oldstate) {
@@ -2139,6 +2141,13 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
+static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
+ ofs, len, NULL) ? 1 : 0;
+}
+
#ifdef CONFIG_MTD_OTP
typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index d81079ef91a..3e6c47bdce5 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -33,7 +33,6 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
-#include <linux/mtd/compatmac.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
@@ -417,16 +416,26 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
*/
cfi_fixup_major_minor(cfi, extp);
+ /*
+ * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4
+ * see: http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_r20.pdf, page 19
+ * http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_100_20011201.pdf
+ * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
+ */
if (extp->MajorVersion != '1' ||
- (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
+ (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) {
printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
- "version %c.%c.\n", extp->MajorVersion,
- extp->MinorVersion);
+ "version %c.%c (%#02x/%#02x).\n",
+ extp->MajorVersion, extp->MinorVersion,
+ extp->MajorVersion, extp->MinorVersion);
kfree(extp);
kfree(mtd);
return NULL;
}
+ printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
+ extp->MajorVersion, extp->MinorVersion);
+
/* Install our own private info structure */
cfi->cmdset_priv = extp;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index e54e8c169d7..314af1f5a37 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -33,7 +33,6 @@
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/compatmac.h>
static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index b2acd32f4fb..8f5b96aa87a 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -235,9 +235,9 @@ static int __xipram cfi_chip_setup(struct map_info *map,
cfi_qry_mode_off(base, map, cfi);
xip_allowed(base, map);
- printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank. Manufacturer ID %#08x Chip ID %#08x\n",
map->name, cfi->interleave, cfi->device_type*8, base,
- map->bankwidth*8);
+ map->bankwidth*8, cfi->mfr, cfi->id);
return 1;
}
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index d7c2c672757..e503b2ca894 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -22,7 +22,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
-#include <linux/mtd/compatmac.h>
int __xipram cfi_qry_present(struct map_info *map, __u32 base,
struct cfi_private *cfi)
diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c
index c8576096822..da1f96f385c 100644
--- a/drivers/mtd/chips/chipreg.c
+++ b/drivers/mtd/chips/chipreg.c
@@ -10,7 +10,6 @@
#include <linux/slab.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/compatmac.h>
static DEFINE_SPINLOCK(chip_drvs_lock);
static LIST_HEAD(chip_drvs_list);
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index 494d30d0631..f2b87294687 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -25,7 +25,6 @@
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
-#include <linux/mtd/compatmac.h>
static int map_absent_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int map_absent_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 6bdc50c727e..67640ccb2d4 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
-#include <linux/mtd/compatmac.h>
static int mapram_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index 076090a67b9..593f73d480d 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
-#include <linux/mtd/compatmac.h>
static int maprom_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 1479da6d3aa..e790f38893b 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -1,7 +1,22 @@
/*
* Read flash partition table from command line
*
- * Copyright 2002 SYSGO Real-Time Solutions GmbH
+ * Copyright © 2002 SYSGO Real-Time Solutions GmbH
+ * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* The format for the command line is as follows:
*
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
index a19cda52da5..a99838bb2dc 100644
--- a/drivers/mtd/devices/docecc.c
+++ b/drivers/mtd/devices/docecc.c
@@ -31,7 +31,6 @@
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/mtd/compatmac.h> /* for min() in older kernels */
#include <linux/mtd/mtd.h>
#include <linux/mtd/doc2000.h>
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index 6e62922942b..d374603493a 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -49,7 +49,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/doc2000.h>
-#include <linux/mtd/compatmac.h>
/* Where to look for the devices? */
#ifndef CONFIG_MTD_DOCPROBE_ADDRESS
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 81e49a9b017..6f512b5c117 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -16,6 +16,8 @@
*/
#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/interrupt.h>
@@ -345,8 +347,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
spi_message_add_tail(&t[1], &m);
/* Byte count starts at zero. */
- if (retlen)
- *retlen = 0;
+ *retlen = 0;
mutex_lock(&flash->lock);
@@ -392,8 +393,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
dev_name(&flash->spi->dev), __func__, "to",
(u32)to, len);
- if (retlen)
- *retlen = 0;
+ *retlen = 0;
/* sanity checks */
if (!len)
@@ -464,8 +464,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
spi_sync(flash->spi, &m);
- if (retlen)
- *retlen += m.actual_length - m25p_cmdsz(flash);
+ *retlen += m.actual_length - m25p_cmdsz(flash);
}
}
@@ -483,8 +482,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t actual;
int cmd_sz, ret;
- if (retlen)
- *retlen = 0;
+ *retlen = 0;
/* sanity checks */
if (!len)
@@ -639,8 +637,18 @@ static const struct spi_device_id m25p_ids[] = {
{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
{ "at26df321", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+ /* EON -- en25pxx */
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+
+ /* Intel/Numonyx -- xxxs33b */
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
+ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
+ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
+
/* Macronix */
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
@@ -680,6 +688,16 @@ static const struct spi_device_id m25p_ids[] = {
{ "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
{ "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
+ { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
+ { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
+ { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
+ { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
+ { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
+ { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
+ { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
+ { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
+ { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
+
{ "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
{ "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
{ "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
@@ -694,6 +712,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
/* Catalyst / On Semiconductor -- non-JEDEC */
@@ -723,7 +742,7 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
if (tmp < 0) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
dev_name(&spi->dev), tmp);
- return NULL;
+ return ERR_PTR(tmp);
}
jedec = id[0];
jedec = jedec << 8;
@@ -731,14 +750,6 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
jedec = jedec << 8;
jedec |= id[2];
- /*
- * Some chips (like Numonyx M25P80) have JEDEC and non-JEDEC variants,
- * which depend on technology process. Officially RDID command doesn't
- * exist for non-JEDEC chips, but for compatibility they return ID 0.
- */
- if (jedec == 0)
- return NULL;
-
ext_jedec = id[3] << 8 | id[4];
for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) {
@@ -749,7 +760,7 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
return &m25p_ids[tmp];
}
}
- return NULL;
+ return ERR_PTR(-ENODEV);
}
@@ -782,7 +793,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
break;
}
- if (plat_id)
+ if (i < ARRAY_SIZE(m25p_ids) - 1)
id = plat_id;
else
dev_warn(&spi->dev, "unrecognized id %s\n", data->type);
@@ -794,9 +805,8 @@ static int __devinit m25p_probe(struct spi_device *spi)
const struct spi_device_id *jid;
jid = jedec_probe(spi);
- if (!jid) {
- dev_info(&spi->dev, "non-JEDEC variant of %s\n",
- id->name);
+ if (IS_ERR(jid)) {
+ return PTR_ERR(jid);
} else if (jid != id) {
/*
* JEDEC knows better, so overwrite platform ID. We
@@ -826,11 +836,12 @@ static int __devinit m25p_probe(struct spi_device *spi)
dev_set_drvdata(&spi->dev, flash);
/*
- * Atmel and SST serial flash tend to power
+ * Atmel, SST and Intel/Numonyx serial flash tend to power
* up with the software protection bits set
*/
if (info->jedec_id >> 16 == 0x1f ||
+ info->jedec_id >> 16 == 0x89 ||
info->jedec_id >> 16 == 0xbf) {
write_enable(flash);
write_sr(flash, 0);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 19817404ce7..c5015cc721d 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -141,7 +141,7 @@ static int dataflash_waitready(struct spi_device *spi)
*/
static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
{
- struct dataflash *priv = (struct dataflash *)mtd->priv;
+ struct dataflash *priv = mtd->priv;
struct spi_device *spi = priv->spi;
struct spi_transfer x = { .tx_dma = 0, };
struct spi_message msg;
@@ -231,7 +231,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
- struct dataflash *priv = (struct dataflash *)mtd->priv;
+ struct dataflash *priv = mtd->priv;
struct spi_transfer x[2] = { { .tx_dma = 0, }, };
struct spi_message msg;
unsigned int addr;
@@ -304,7 +304,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t * retlen, const u_char * buf)
{
- struct dataflash *priv = (struct dataflash *)mtd->priv;
+ struct dataflash *priv = mtd->priv;
struct spi_device *spi = priv->spi;
struct spi_transfer x[2] = { { .tx_dma = 0, }, };
struct spi_message msg;
@@ -515,7 +515,7 @@ static ssize_t otp_read(struct spi_device *spi, unsigned base,
static int dataflash_read_fact_otp(struct mtd_info *mtd,
loff_t from, size_t len, size_t *retlen, u_char *buf)
{
- struct dataflash *priv = (struct dataflash *)mtd->priv;
+ struct dataflash *priv = mtd->priv;
int status;
/* 64 bytes, from 0..63 ... start at 64 on-chip */
@@ -532,7 +532,7 @@ static int dataflash_read_fact_otp(struct mtd_info *mtd,
static int dataflash_read_user_otp(struct mtd_info *mtd,
loff_t from, size_t len, size_t *retlen, u_char *buf)
{
- struct dataflash *priv = (struct dataflash *)mtd->priv;
+ struct dataflash *priv = mtd->priv;
int status;
/* 64 bytes, from 0..63 ... start at 0 on-chip */
@@ -553,7 +553,7 @@ static int dataflash_write_user_otp(struct mtd_info *mtd,
const size_t l = 4 + 64;
uint8_t *scratch;
struct spi_transfer t;
- struct dataflash *priv = (struct dataflash *)mtd->priv;
+ struct dataflash *priv = mtd->priv;
int status;
if (len > 64)
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index fce5ff7589a..26a6e809013 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -14,7 +14,6 @@
#include <linux/ioport.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
-#include <linux/mtd/compatmac.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/mtdram.h>
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index fc8ea0a57ac..ef0aba0ce58 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -98,7 +98,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/pmc551.h>
-#include <linux/mtd/compatmac.h>
static struct mtd_info *pmc551list;
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index ab5d8cd02a1..684247a8a5e 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -454,7 +454,7 @@ static int __init sst25l_probe(struct spi_device *spi)
parts, nr_parts);
}
- } else if (data->nr_parts) {
+ } else if (data && data->nr_parts) {
dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
data->nr_parts, data->name);
}
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 62da9eb7032..4d6a64c387e 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -26,7 +26,7 @@
The initial developer of the original code is David A. Hinds
<dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ are Copyright © 1999 David A. Hinds. All Rights Reserved.
Alternatively, the contents of this file may be used under the
terms of the GNU General Public License version 2 (the "GPL"), in
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 015a7fe1b6e..d7592e67d04 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -1,11 +1,11 @@
/*
* inftlcore.c -- Linux driver for Inverse Flash Translation Layer (INFTL)
*
- * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
+ * Copyright © 2002, Greg Ungerer (gerg@snapgear.com)
*
* Based heavily on the nftlcore.c code which is:
- * (c) 1999 Machine Vision Holdings, Inc.
- * Author: David Woodhouse <dwmw2@infradead.org>
+ * Copyright © 1999 Machine Vision Holdings, Inc.
+ * Copyright © 1999 David Woodhouse <dwmw2@infradead.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 8f988d7d3c5..104052e774b 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -2,11 +2,11 @@
* inftlmount.c -- INFTL mount code with extensive checks.
*
* Author: Greg Ungerer (gerg@snapgear.com)
- * (C) Copyright 2002-2003, Greg Ungerer (gerg@snapgear.com)
+ * Copyright © 2002-2003, Greg Ungerer (gerg@snapgear.com)
*
* Based heavily on the nftlmount.c code which is:
* Author: Fabrice Bellard (fabrice.bellard@netgem.com)
- * Copyright (C) 2000 Netgem S.A.
+ * Copyright © 2000 Netgem S.A.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -34,7 +34,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nftl.h>
#include <linux/mtd/inftl.h>
-#include <linux/mtd/compatmac.h>
/*
* find_boot_record: Find the INFTL Media Header and its Spare copy which
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index fece5be5871..04fdfcca93f 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -98,7 +98,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
for (i = 0; i < numchips; i++) {
shared[i].writing = shared[i].erasing = NULL;
- spin_lock_init(&shared[i].lock);
+ mutex_init(&shared[i].lock);
for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
*chip = lpddr->chips[i];
chip->start += j << lpddr->chipshift;
@@ -217,7 +217,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
*/
struct flchip_shared *shared = chip->priv;
struct flchip *contender;
- spin_lock(&shared->lock);
+ mutex_lock(&shared->lock);
contender = shared->writing;
if (contender && contender != chip) {
/*
@@ -230,7 +230,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
* get_chip returns success we're clear to go ahead.
*/
ret = mutex_trylock(&contender->mutex);
- spin_unlock(&shared->lock);
+ mutex_unlock(&shared->lock);
if (!ret)
goto retry;
mutex_unlock(&chip->mutex);
@@ -245,7 +245,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
mutex_unlock(&contender->mutex);
return ret;
}
- spin_lock(&shared->lock);
+ mutex_lock(&shared->lock);
/* We should not own chip if it is already in FL_SYNCING
* state. Put contender and retry. */
@@ -261,7 +261,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
Must sleep in such a case. */
if (mode == FL_ERASING && shared->erasing
&& shared->erasing->oldstate == FL_ERASING) {
- spin_unlock(&shared->lock);
+ mutex_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
@@ -275,7 +275,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
shared->writing = chip;
if (mode == FL_ERASING)
shared->erasing = chip;
- spin_unlock(&shared->lock);
+ mutex_unlock(&shared->lock);
}
ret = chip_ready(map, chip, mode);
@@ -348,7 +348,7 @@ static void put_chip(struct map_info *map, struct flchip *chip)
{
if (chip->priv) {
struct flchip_shared *shared = chip->priv;
- spin_lock(&shared->lock);
+ mutex_lock(&shared->lock);
if (shared->writing == chip && chip->oldstate == FL_READY) {
/* We own the ability to write, but we're done */
shared->writing = shared->erasing;
@@ -356,7 +356,7 @@ static void put_chip(struct map_info *map, struct flchip *chip)
/* give back the ownership */
struct flchip *loaner = shared->writing;
mutex_lock(&loaner->mutex);
- spin_unlock(&shared->lock);
+ mutex_unlock(&shared->lock);
mutex_unlock(&chip->mutex);
put_chip(map, loaner);
mutex_lock(&chip->mutex);
@@ -374,11 +374,11 @@ static void put_chip(struct map_info *map, struct flchip *chip)
* Don't let the switch below mess things up since
* we don't have ownership to resume anything.
*/
- spin_unlock(&shared->lock);
+ mutex_unlock(&shared->lock);
wake_up(&chip->wq);
return;
}
- spin_unlock(&shared->lock);
+ mutex_unlock(&shared->lock);
}
switch (chip->oldstate) {
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index f22bc9f05dd..701d942c679 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -319,14 +319,6 @@ config MTD_CFI_FLAGADM
Mapping for the Flaga digital module. If you don't have one, ignore
this setting.
-config MTD_REDWOOD
- tristate "CFI Flash devices mapped on IBM Redwood"
- depends on MTD_CFI && ( REDWOOD_4 || REDWOOD_5 || REDWOOD_6 )
- help
- This enables access routines for the flash chips on the IBM
- Redwood board. If you have one of these boards and would like to
- use the flash chips on it, say 'Y'.
-
config MTD_SOLUTIONENGINE
tristate "CFI Flash device mapped on Hitachi SolutionEngine"
depends on SUPERH && SOLUTION_ENGINE && MTD_CFI && MTD_REDBOOT_PARTS
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index bb035cd54c7..f216bb57371 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
obj-$(CONFIG_MTD_EDB7312) += edb7312.o
obj-$(CONFIG_MTD_IMPA7) += impa7.o
obj-$(CONFIG_MTD_FORTUNET) += fortunet.o
-obj-$(CONFIG_MTD_REDWOOD) += redwood.o
obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
obj-$(CONFIG_MTD_NETtel) += nettel.o
obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index e0a5e0426ea..1f9fde0dad3 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -118,7 +118,7 @@ static void ixp4xx_copy_from(struct map_info *map, void *to,
*dest++ = BYTE1(data);
src += 2;
len -= 2;
- }
+ }
if (len > 0)
*dest++ = BYTE0(flash_read16(src));
@@ -185,6 +185,8 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
{
struct flash_platform_data *plat = dev->dev.platform_data;
struct ixp4xx_flash_info *info;
+ const char *part_type = NULL;
+ int nr_parts = 0;
int err = -1;
if (!plat)
@@ -218,9 +220,9 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
*/
info->map.bankwidth = 2;
info->map.name = dev_name(&dev->dev);
- info->map.read = ixp4xx_read16,
- info->map.write = ixp4xx_probe_write16,
- info->map.copy_from = ixp4xx_copy_from,
+ info->map.read = ixp4xx_read16;
+ info->map.write = ixp4xx_probe_write16;
+ info->map.copy_from = ixp4xx_copy_from;
info->res = request_mem_region(dev->resource->start,
resource_size(dev->resource),
@@ -248,11 +250,28 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
info->mtd->owner = THIS_MODULE;
/* Use the fast version */
- info->map.write = ixp4xx_write16,
+ info->map.write = ixp4xx_write16;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions,
+ dev->resource->start);
+#endif
+ if (nr_parts > 0) {
+ part_type = "dynamic";
+ } else {
+ info->partitions = plat->parts;
+ nr_parts = plat->nr_parts;
+ part_type = "static";
+ }
+ if (nr_parts == 0) {
+ printk(KERN_NOTICE "IXP4xx flash: no partition info "
+ "available, registering whole flash\n");
+ err = add_mtd_device(info->mtd);
+ } else {
+ printk(KERN_NOTICE "IXP4xx flash: using %s partition "
+ "definition\n", part_type);
+ err = add_mtd_partitions(info->mtd, info->partitions, nr_parts);
- err = parse_mtd_partitions(info->mtd, probes, &info->partitions, dev->resource->start);
- if (err > 0) {
- err = add_mtd_partitions(info->mtd, info->partitions, err);
if(err)
printk(KERN_ERR "Could not parse partitions\n");
}
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index e699e6ac23d..e9ca5ba7d9d 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -16,7 +16,6 @@
#include <asm/io.h>
#include <asm/system.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -103,7 +102,7 @@ static caddr_t remap_window(struct map_info *map, unsigned long to)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
window_handle_t win = (window_handle_t)map->map_priv_2;
- memreq_t mrq;
+ unsigned int offset;
int ret;
if (!pcmcia_dev_present(dev->p_dev)) {
@@ -111,15 +110,14 @@ static caddr_t remap_window(struct map_info *map, unsigned long to)
return 0;
}
- mrq.CardOffset = to & ~(dev->win_size-1);
- if(mrq.CardOffset != dev->offset) {
+ offset = to & ~(dev->win_size-1);
+ if (offset != dev->offset) {
DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x",
- dev->offset, mrq.CardOffset);
- mrq.Page = 0;
- ret = pcmcia_map_mem_page(dev->p_dev, win, &mrq);
+ dev->offset, offset);
+ ret = pcmcia_map_mem_page(dev->p_dev, win, offset);
if (ret != 0)
return NULL;
- dev->offset = mrq.CardOffset;
+ dev->offset = offset;
}
return dev->win_base + (to & (dev->win_size-1));
}
@@ -346,7 +344,6 @@ static void pcmciamtd_release(struct pcmcia_device *link)
iounmap(dev->win_base);
dev->win_base = NULL;
}
- pcmcia_release_window(link, link->win);
}
pcmcia_disable_device(link);
}
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 426461a5f0d..4c18b98a311 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -106,12 +106,12 @@ static int physmap_flash_probe(struct platform_device *dev)
for (i = 0; i < dev->num_resources; i++) {
printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n",
- (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1),
+ (unsigned long long)resource_size(&dev->resource[i]),
(unsigned long long)dev->resource[i].start);
if (!devm_request_mem_region(&dev->dev,
dev->resource[i].start,
- dev->resource[i].end - dev->resource[i].start + 1,
+ resource_size(&dev->resource[i]),
dev_name(&dev->dev))) {
dev_err(&dev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
@@ -120,7 +120,7 @@ static int physmap_flash_probe(struct platform_device *dev)
info->map[i].name = dev_name(&dev->dev);
info->map[i].phys = dev->resource[i].start;
- info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1;
+ info->map[i].size = resource_size(&dev->resource[i]);
info->map[i].bankwidth = physmap_data->width;
info->map[i].set_vpp = physmap_data->set_vpp;
info->map[i].pfow_base = physmap_data->pfow_base;
@@ -136,8 +136,12 @@ static int physmap_flash_probe(struct platform_device *dev)
simple_map_init(&info->map[i]);
probe_type = rom_probe_types;
- for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++)
- info->mtd[i] = do_map_probe(*probe_type, &info->map[i]);
+ if (physmap_data->probe_type == NULL) {
+ for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++)
+ info->mtd[i] = do_map_probe(*probe_type, &info->map[i]);
+ } else
+ info->mtd[i] = do_map_probe(physmap_data->probe_type, &info->map[i]);
+
if (info->mtd[i] == NULL) {
dev_err(&dev->dev, "map_probe failed\n");
err = -ENXIO;
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index ba124baa646..fe63f6bd663 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -22,6 +22,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/concat.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
@@ -43,7 +44,7 @@ struct of_flash {
#ifdef CONFIG_MTD_PARTITIONS
#define OF_FLASH_PARTS(info) ((info)->parts)
-static int parse_obsolete_partitions(struct of_device *dev,
+static int parse_obsolete_partitions(struct platform_device *dev,
struct of_flash *info,
struct device_node *dp)
{
@@ -93,7 +94,7 @@ static int parse_obsolete_partitions(struct of_device *dev,
#define parse_partitions(info, dev) (0)
#endif /* MTD_PARTITIONS */
-static int of_flash_remove(struct of_device *dev)
+static int of_flash_remove(struct platform_device *dev)
{
struct of_flash *info;
int i;
@@ -140,7 +141,7 @@ static int of_flash_remove(struct of_device *dev)
/* Helper function to handle probing of the obsolete "direct-mapped"
* compatible binding, which has an extra "probe-type" property
* describing the type of flash probe necessary. */
-static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
+static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
struct map_info *map)
{
struct device_node *dp = dev->dev.of_node;
@@ -215,7 +216,7 @@ static void __devinit of_free_probes(const char **probes)
}
#endif
-static int __devinit of_flash_probe(struct of_device *dev,
+static int __devinit of_flash_probe(struct platform_device *dev,
const struct of_device_id *match)
{
#ifdef CONFIG_MTD_PARTITIONS
@@ -353,7 +354,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
&info->parts, 0);
if (err < 0) {
of_free_probes(part_probe_types);
- return err;
+ goto err_out;
}
of_free_probes(part_probe_types);
@@ -361,14 +362,14 @@ static int __devinit of_flash_probe(struct of_device *dev,
if (err == 0) {
err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
if (err < 0)
- return err;
+ goto err_out;
}
#endif
if (err == 0) {
err = parse_obsolete_partitions(dev, info, dp);
if (err < 0)
- return err;
+ goto err_out;
}
if (err > 0)
diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c
deleted file mode 100644
index 933c0b63b01..00000000000
--- a/drivers/mtd/maps/redwood.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * drivers/mtd/maps/redwood.c
- *
- * FLASH map for the IBM Redwood 4/5/6 boards.
- *
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- * 2001-2003 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-
-#if !defined (CONFIG_REDWOOD_6)
-
-#define WINDOW_ADDR 0xffc00000
-#define WINDOW_SIZE 0x00400000
-
-#define RW_PART0_OF 0
-#define RW_PART0_SZ 0x10000
-#define RW_PART1_OF RW_PART0_SZ
-#define RW_PART1_SZ 0x200000 - 0x10000
-#define RW_PART2_OF 0x200000
-#define RW_PART2_SZ 0x10000
-#define RW_PART3_OF 0x210000
-#define RW_PART3_SZ 0x200000 - (0x10000 + 0x20000)
-#define RW_PART4_OF 0x3e0000
-#define RW_PART4_SZ 0x20000
-
-static struct mtd_partition redwood_flash_partitions[] = {
- {
- .name = "Redwood OpenBIOS Vital Product Data",
- .offset = RW_PART0_OF,
- .size = RW_PART0_SZ,
- .mask_flags = MTD_WRITEABLE /* force read-only */
- },
- {
- .name = "Redwood kernel",
- .offset = RW_PART1_OF,
- .size = RW_PART1_SZ
- },
- {
- .name = "Redwood OpenBIOS non-volatile storage",
- .offset = RW_PART2_OF,
- .size = RW_PART2_SZ,
- .mask_flags = MTD_WRITEABLE /* force read-only */
- },
- {
- .name = "Redwood filesystem",
- .offset = RW_PART3_OF,
- .size = RW_PART3_SZ
- },
- {
- .name = "Redwood OpenBIOS",
- .offset = RW_PART4_OF,
- .size = RW_PART4_SZ,
- .mask_flags = MTD_WRITEABLE /* force read-only */
- }
-};
-
-#else /* CONFIG_REDWOOD_6 */
-/* FIXME: the window is bigger - armin */
-#define WINDOW_ADDR 0xff800000
-#define WINDOW_SIZE 0x00800000
-
-#define RW_PART0_OF 0
-#define RW_PART0_SZ 0x400000 /* 4 MiB data */
-#define RW_PART1_OF RW_PART0_OF + RW_PART0_SZ
-#define RW_PART1_SZ 0x10000 /* 64K VPD */
-#define RW_PART2_OF RW_PART1_OF + RW_PART1_SZ
-#define RW_PART2_SZ 0x400000 - (0x10000 + 0x20000)
-#define RW_PART3_OF RW_PART2_OF + RW_PART2_SZ
-#define RW_PART3_SZ 0x20000
-
-static struct mtd_partition redwood_flash_partitions[] = {
- {
- .name = "Redwood filesystem",
- .offset = RW_PART0_OF,
- .size = RW_PART0_SZ
- },
- {
- .name = "Redwood OpenBIOS Vital Product Data",
- .offset = RW_PART1_OF,
- .size = RW_PART1_SZ,
- .mask_flags = MTD_WRITEABLE /* force read-only */
- },
- {
- .name = "Redwood kernel",
- .offset = RW_PART2_OF,
- .size = RW_PART2_SZ
- },
- {
- .name = "Redwood OpenBIOS",
- .offset = RW_PART3_OF,
- .size = RW_PART3_SZ,
- .mask_flags = MTD_WRITEABLE /* force read-only */
- }
-};
-
-#endif /* CONFIG_REDWOOD_6 */
-
-struct map_info redwood_flash_map = {
- .name = "IBM Redwood",
- .size = WINDOW_SIZE,
- .bankwidth = 2,
- .phys = WINDOW_ADDR,
-};
-
-
-#define NUM_REDWOOD_FLASH_PARTITIONS ARRAY_SIZE(redwood_flash_partitions)
-
-static struct mtd_info *redwood_mtd;
-
-static int __init init_redwood_flash(void)
-{
- int err;
-
- printk(KERN_NOTICE "redwood: flash mapping: %x at %x\n",
- WINDOW_SIZE, WINDOW_ADDR);
-
- redwood_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
-
- if (!redwood_flash_map.virt) {
- printk("init_redwood_flash: failed to ioremap\n");
- return -EIO;
- }
- simple_map_init(&redwood_flash_map);
-
- redwood_mtd = do_map_probe("cfi_probe",&redwood_flash_map);
-
- if (redwood_mtd) {
- redwood_mtd->owner = THIS_MODULE;
- err = add_mtd_partitions(redwood_mtd,
- redwood_flash_partitions,
- NUM_REDWOOD_FLASH_PARTITIONS);
- if (err) {
- printk("init_redwood_flash: add_mtd_partitions failed\n");
- iounmap(redwood_flash_map.virt);
- }
- return err;
-
- }
-
- iounmap(redwood_flash_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_redwood_flash(void)
-{
- if (redwood_mtd) {
- del_mtd_partitions(redwood_mtd);
- /* moved iounmap after map_destroy - armin */
- map_destroy(redwood_mtd);
- iounmap((void *)redwood_flash_map.virt);
- }
-}
-
-module_init(init_redwood_flash);
-module_exit(cleanup_redwood_flash);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("MontaVista Software <source@mvista.com>");
-MODULE_DESCRIPTION("MTD map driver for the IBM Redwood reference boards");
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 0391c2527bd..3582ba1f9b0 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -48,7 +48,7 @@ struct map_info uflash_map_templ = {
.bankwidth = UFLASH_BUSWIDTH,
};
-int uflash_devinit(struct of_device *op, struct device_node *dp)
+int uflash_devinit(struct platform_device *op, struct device_node *dp)
{
struct uflash_dev *up;
@@ -108,7 +108,7 @@ int uflash_devinit(struct of_device *op, struct device_node *dp)
return 0;
}
-static int __devinit uflash_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit uflash_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
@@ -121,7 +121,7 @@ static int __devinit uflash_probe(struct of_device *op, const struct of_device_i
return uflash_devinit(op, dp);
}
-static int __devexit uflash_remove(struct of_device *op)
+static int __devexit uflash_remove(struct platform_device *op)
{
struct uflash_dev *up = dev_get_drvdata(&op->dev);
@@ -160,12 +160,12 @@ static struct of_platform_driver uflash_driver = {
static int __init uflash_init(void)
{
- return of_register_driver(&uflash_driver, &of_bus_type);
+ return of_register_platform_driver(&uflash_driver);
}
static void __exit uflash_exit(void)
{
- of_unregister_driver(&uflash_driver);
+ of_unregister_platform_driver(&uflash_driver);
}
module_init(uflash_init);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 03e19c1965c..62e68707b07 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -1,7 +1,21 @@
/*
- * (C) 2003 David Woodhouse <dwmw2@infradead.org>
+ * Interface to Linux block layer for MTD 'translation layers'.
*
- * Interface to Linux 2.5 block layer for MTD 'translation layers'.
+ * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -15,6 +29,7 @@
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/mutex.h>
@@ -73,14 +88,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
buf = req->buffer;
- if (!blk_fs_request(req))
+ if (req->cmd_type != REQ_TYPE_FS)
return -EIO;
if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
get_capacity(req->rq_disk))
return -EIO;
- if (blk_discard_rq(req))
+ if (req->cmd_flags & REQ_DISCARD)
return tr->discard(dev, block, nsect);
switch(rq_data_dir(req)) {
@@ -164,8 +179,9 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
int ret;
if (!dev)
- return -ERESTARTSYS;
+ return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
+ lock_kernel();
mutex_lock(&dev->lock);
if (!dev->mtd) {
@@ -182,6 +198,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
+ unlock_kernel();
return ret;
}
@@ -193,6 +210,7 @@ static int blktrans_release(struct gendisk *disk, fmode_t mode)
if (!dev)
return ret;
+ lock_kernel();
mutex_lock(&dev->lock);
/* Release one reference, we sure its not the last one here*/
@@ -205,6 +223,7 @@ static int blktrans_release(struct gendisk *disk, fmode_t mode)
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
+ unlock_kernel();
return ret;
}
@@ -237,6 +256,7 @@ static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
if (!dev)
return ret;
+ lock_kernel();
mutex_lock(&dev->lock);
if (!dev->mtd)
@@ -245,11 +265,13 @@ static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
switch (cmd) {
case BLKFLSBUF:
ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
+ break;
default:
ret = -ENOTTY;
}
unlock:
mutex_unlock(&dev->lock);
+ unlock_kernel();
blktrans_dev_put(dev);
return ret;
}
@@ -258,7 +280,7 @@ static const struct block_device_operations mtd_blktrans_ops = {
.owner = THIS_MODULE,
.open = blktrans_open,
.release = blktrans_release,
- .locked_ioctl = blktrans_ioctl,
+ .ioctl = blktrans_ioctl,
.getgeo = blktrans_getgeo,
};
@@ -409,13 +431,14 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
BUG();
}
- /* Stop new requests to arrive */
- del_gendisk(old->disk);
-
if (old->disk_attributes)
sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
old->disk_attributes);
+ /* Stop new requests to arrive */
+ del_gendisk(old->disk);
+
+
/* Stop the thread */
kthread_stop(old->thread);
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index e6edbec609f..1e74ad96104 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -1,8 +1,23 @@
/*
* Direct MTD block device access
*
- * (C) 2000-2003 Nicolas Pitre <nico@fluxnic.net>
- * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ * Copyright © 2000-2003 Nicolas Pitre <nico@fluxnic.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
*/
#include <linux/fs.h>
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index d0d3f79f9d0..795a8c0a05b 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -1,7 +1,22 @@
/*
- * (C) 2003 David Woodhouse <dwmw2@infradead.org>
- *
* Simple read-only (writable only for RAM) mtdblock driver
+ *
+ * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
*/
#include <linux/init.h>
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 91c8013cf0d..a825002123c 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1,5 +1,19 @@
/*
- * Character-device access to raw MTD devices.
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -18,7 +32,7 @@
#include <linux/mount.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/compatmac.h>
+#include <linux/mtd/map.h>
#include <asm/uaccess.h>
@@ -675,6 +689,20 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
break;
}
+ case MEMISLOCKED:
+ {
+ struct erase_info_user einfo;
+
+ if (copy_from_user(&einfo, argp, sizeof(einfo)))
+ return -EFAULT;
+
+ if (!mtd->is_locked)
+ ret = -EOPNOTSUPP;
+ else
+ ret = mtd->is_locked(mtd, einfo.start, einfo.length);
+ break;
+ }
+
/* Legacy interface */
case MEMGETOOBSEL:
{
@@ -950,9 +978,34 @@ static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
#ifdef CONFIG_MMU
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
+ struct map_info *map = mtd->priv;
+ unsigned long start;
+ unsigned long off;
+ u32 len;
+
+ if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
+ off = vma->vm_pgoff << PAGE_SHIFT;
+ start = map->phys;
+ len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
+ start &= PAGE_MASK;
+ if ((vma->vm_end - vma->vm_start + off) > len)
+ return -EINVAL;
+
+ off += start;
+ vma->vm_pgoff = off >> PAGE_SHIFT;
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+
+#ifdef pgprot_noncached
+ if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+#endif
+ if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
- if (mtd->type == MTD_RAM || mtd->type == MTD_ROM)
return 0;
+ }
return -ENOSYS;
#else
return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 7e075621bbf..bf8de094310 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -1,11 +1,25 @@
/*
* MTD device concatenation layer
*
- * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
+ * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
+ * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
*
* NAND support by Christian Gan <cgan@iders.ca>
*
- * This code is GPL
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
*/
#include <linux/kernel.h>
@@ -540,10 +554,12 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
else
size = len;
- err = subdev->lock(subdev, ofs, size);
-
- if (err)
- break;
+ if (subdev->lock) {
+ err = subdev->lock(subdev, ofs, size);
+ if (err)
+ break;
+ } else
+ err = -EOPNOTSUPP;
len -= size;
if (len == 0)
@@ -578,10 +594,12 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
else
size = len;
- err = subdev->unlock(subdev, ofs, size);
-
- if (err)
- break;
+ if (subdev->unlock) {
+ err = subdev->unlock(subdev, ofs, size);
+ if (err)
+ break;
+ } else
+ err = -EOPNOTSUPP;
len -= size;
if (len == 0)
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index a1b8b70d2d0..527cebf58da 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -2,9 +2,23 @@
* Core registration and callback routines for MTD
* drivers and users.
*
- * bdi bits are:
- * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ * Copyright © 2006 Red Hat UK Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
*/
#include <linux/module.h>
@@ -17,7 +31,6 @@
#include <linux/err.h>
#include <linux/ioctl.h>
#include <linux/init.h>
-#include <linux/mtd/compatmac.h>
#include <linux/proc_fs.h>
#include <linux/idr.h>
#include <linux/backing-dev.h>
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 328313c3dcc..1ee72f3f051 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -1,7 +1,7 @@
/*
* MTD Oops/Panic logger
*
- * Copyright (C) 2007 Nokia Corporation. All rights reserved.
+ * Copyright © 2007 Nokia Corporation. All rights reserved.
*
* Author: Richard Purdie <rpurdie@openedhand.com>
*
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index b8043a9ba32..dc655856887 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -1,12 +1,24 @@
/*
* Simple MTD partitioning layer
*
- * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
+ * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
+ * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
*
- * This code is GPL
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * 02-21-2002 Thomas Gleixner <gleixner@autronix.de>
- * added support for read_oob, write_oob
*/
#include <linux/module.h>
@@ -17,7 +29,6 @@
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-#include <linux/mtd/compatmac.h>
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
@@ -264,6 +275,14 @@ static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return part->master->unlock(part->master, ofs + part->offset, len);
}
+static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_part *part = PART(mtd);
+ if ((len + ofs) > mtd->size)
+ return -EINVAL;
+ return part->master->is_locked(part->master, ofs + part->offset, len);
+}
+
static void part_sync(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
@@ -402,6 +421,8 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
slave->mtd.lock = part_lock;
if (master->unlock)
slave->mtd.unlock = part_unlock;
+ if (master->is_locked)
+ slave->mtd.is_locked = part_is_locked;
if (master->block_isbad)
slave->mtd.block_isbad = part_block_isbad;
if (master->block_markbad)
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index bd9a443ccf6..38e2ab07e7a 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -1,6 +1,8 @@
/* MTD-based superblock management
*
* Copyright © 2001-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
+ *
* Written by: David Howells <dhowells@redhat.com>
* David Woodhouse <dwmw2@infradead.org>
*
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index ffc3720929f..8b4b67c8a39 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -37,7 +37,6 @@ config MTD_SM_COMMON
config MTD_NAND_MUSEUM_IDS
bool "Enable chip ids for obsolete ancient NAND devices"
- depends on MTD_NAND
default n
help
Enable this option only when your board has first generation
@@ -61,6 +60,7 @@ config MTD_NAND_DENALI
config MTD_NAND_DENALI_SCRATCH_REG_ADDR
hex "Denali NAND size scratch register address"
default "0xFF108018"
+ depends on MTD_NAND_DENALI
help
Some platforms place the NAND chip size in a scratch register
because (some versions of) the driver aren't able to automatically
@@ -101,13 +101,13 @@ config MTD_NAND_AMS_DELTA
config MTD_NAND_OMAP2
tristate "NAND Flash device on OMAP2 and OMAP3"
- depends on ARM && MTD_NAND && (ARCH_OMAP2 || ARCH_OMAP3)
+ depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3)
help
Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
config MTD_NAND_OMAP_PREFETCH
bool "GPMC prefetch support for NAND Flash device"
- depends on MTD_NAND && MTD_NAND_OMAP2
+ depends on MTD_NAND_OMAP2
default y
help
The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
@@ -146,7 +146,7 @@ config MTD_NAND_AU1550
config MTD_NAND_BF5XX
tristate "Blackfin on-chip NAND Flash Controller driver"
- depends on (BF54x || BF52x) && MTD_NAND
+ depends on BF54x || BF52x
help
This enables the Blackfin on-chip NAND flash controller
@@ -236,7 +236,7 @@ config MTD_NAND_S3C2410_CLKSTOP
config MTD_NAND_BCM_UMI
tristate "NAND Flash support for BCM Reference Boards"
- depends on ARCH_BCMRING && MTD_NAND
+ depends on ARCH_BCMRING
help
This enables the NAND flash controller on the BCM UMI block.
@@ -395,7 +395,7 @@ endchoice
config MTD_NAND_PXA3xx
tristate "Support for NAND flash devices on PXA3xx"
- depends on MTD_NAND && (PXA3xx || ARCH_MMP)
+ depends on PXA3xx || ARCH_MMP
help
This enables the driver for the NAND flash device found on
PXA3xx processors
@@ -409,18 +409,18 @@ config MTD_NAND_PXA3xx_BUILTIN
config MTD_NAND_CM_X270
tristate "Support for NAND Flash on CM-X270 modules"
- depends on MTD_NAND && MACH_ARMCORE
+ depends on MACH_ARMCORE
config MTD_NAND_PASEMI
tristate "NAND support for PA Semi PWRficient"
- depends on MTD_NAND && PPC_PASEMI
+ depends on PPC_PASEMI
help
Enables support for NAND Flash interface on PA Semi PWRficient
based boards
config MTD_NAND_TMIO
tristate "NAND Flash device on Toshiba Mobile IO Controller"
- depends on MTD_NAND && MFD_TMIO
+ depends on MFD_TMIO
help
Support for NAND flash connected to a Toshiba Mobile IO
Controller in some PDAs, including the Sharp SL6000x.
@@ -434,7 +434,6 @@ config MTD_NAND_NANDSIM
config MTD_NAND_PLATFORM
tristate "Support for generic platform NAND driver"
- depends on MTD_NAND
help
This implements a generic NAND driver for on-SOC platform
devices. You will need to provide platform-specific functions
@@ -442,14 +441,14 @@ config MTD_NAND_PLATFORM
config MTD_ALAUDA
tristate "MTD driver for Olympus MAUSB-10 and Fujifilm DPC-R1"
- depends on MTD_NAND && USB
+ depends on USB
help
These two (and possibly other) Alauda-based cardreaders for
SmartMedia and xD allow raw flash access.
config MTD_NAND_ORION
tristate "NAND Flash support for Marvell Orion SoC"
- depends on PLAT_ORION && MTD_NAND
+ depends on PLAT_ORION
help
This enables the NAND flash controller on Orion machines.
@@ -458,7 +457,7 @@ config MTD_NAND_ORION
config MTD_NAND_FSL_ELBC
tristate "NAND support for Freescale eLBC controllers"
- depends on MTD_NAND && PPC_OF
+ depends on PPC_OF
help
Various Freescale chips, including the 8313, include a NAND Flash
Controller Module with built-in hardware ECC capabilities.
@@ -467,7 +466,7 @@ config MTD_NAND_FSL_ELBC
config MTD_NAND_FSL_UPM
tristate "Support for NAND on Freescale UPM"
- depends on MTD_NAND && (PPC_83xx || PPC_85xx)
+ depends on PPC_83xx || PPC_85xx
select FSL_LBC
help
Enables support for NAND Flash chips wired onto Freescale PowerPC
@@ -482,7 +481,7 @@ config MTD_NAND_MPC5121_NFC
config MTD_NAND_MXC
tristate "MXC NAND support"
- depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3
+ depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX51
help
This enables the driver for the NAND flash controller on the
MXC processors.
@@ -495,7 +494,7 @@ config MTD_NAND_NOMADIK
config MTD_NAND_SH_FLCTL
tristate "Support for NAND on Renesas SuperH FLCTL"
- depends on MTD_NAND && (SUPERH || ARCH_SHMOBILE)
+ depends on SUPERH || ARCH_SHMOBILE
help
Several Renesas SuperH CPU has FLCTL. This option enables support
for NAND Flash using FLCTL.
@@ -515,7 +514,7 @@ config MTD_NAND_TXX9NDFMC
config MTD_NAND_SOCRATES
tristate "Support for NAND on Socrates board"
- depends on MTD_NAND && SOCRATES
+ depends on SOCRATES
help
Enables support for NAND Flash chips wired onto Socrates board.
@@ -526,4 +525,10 @@ config MTD_NAND_NUC900
This enables the driver for the NAND Flash on evaluation board based
on w90p910 / NUC9xx.
+config MTD_NAND_JZ4740
+ tristate "Support for JZ4740 SoC NAND controller"
+ depends on MACH_JZ4740
+ help
+ Enables support for NAND Flash on JZ4740 SoC based boards.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index e8ab884ba47..ac83dcdac5d 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -46,5 +46,6 @@ obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
+obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 04d30887ca7..ccce0f03b5d 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -364,7 +364,7 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
}
}
-#ifdef CONFIG_MTD_PARTITIONS
+#ifdef CONFIG_MTD_CMDLINE_PARTS
static const char *part_probes[] = { "cmdlinepart", NULL };
#endif
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 2974995e194..6fbeefa3a76 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -20,9 +20,6 @@
* - DMA supported in ECC_HW
* - YAFFS tested as rootfs in both ECC_HW and ECC_SW
*
- * TODO:
- * Enable JFFS2 over NAND as rootfs
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -206,7 +203,7 @@ static void bf5xx_nand_hwcontrol(struct mtd_info *mtd, int cmd,
if (ctrl & NAND_CLE)
bfin_write_NFC_CMD(cmd);
- else
+ else if (ctrl & NAND_ALE)
bfin_write_NFC_ADDR(cmd);
SSYNC();
}
@@ -218,9 +215,9 @@ static void bf5xx_nand_hwcontrol(struct mtd_info *mtd, int cmd,
*/
static int bf5xx_nand_devready(struct mtd_info *mtd)
{
- unsigned short val = bfin_read_NFC_IRQSTAT();
+ unsigned short val = bfin_read_NFC_STAT();
- if ((val & NBUSYIRQ) == NBUSYIRQ)
+ if ((val & NBUSY) == NBUSY)
return 1;
else
return 0;
@@ -317,18 +314,16 @@ static int bf5xx_nand_correct_data_256(struct mtd_info *mtd, u_char *dat,
static int bf5xx_nand_correct_data(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
- struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
- struct bf5xx_nand_platform *plat = info->platform;
- unsigned short page_size = (plat->page_size ? 512 : 256);
+ struct nand_chip *chip = mtd->priv;
int ret;
ret = bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc);
- /* If page size is 512, correct second 256 bytes */
- if (page_size == 512) {
+ /* If ecc size is 512, correct second 256 bytes */
+ if (chip->ecc.size == 512) {
dat += 256;
- read_ecc += 8;
- calc_ecc += 8;
+ read_ecc += 3;
+ calc_ecc += 3;
ret |= bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc);
}
@@ -344,13 +339,12 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_code)
{
struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
- struct bf5xx_nand_platform *plat = info->platform;
- u16 page_size = (plat->page_size ? 512 : 256);
+ struct nand_chip *chip = mtd->priv;
u16 ecc0, ecc1;
u32 code[2];
u8 *p;
- /* first 4 bytes ECC code for 256 page size */
+ /* first 3 bytes ECC code for 256 page size */
ecc0 = bfin_read_NFC_ECC0();
ecc1 = bfin_read_NFC_ECC1();
@@ -358,12 +352,11 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]);
- /* first 3 bytes in ecc_code for 256 page size */
p = (u8 *) code;
memcpy(ecc_code, p, 3);
- /* second 4 bytes ECC code for 512 page size */
- if (page_size == 512) {
+ /* second 3 bytes ECC code for 512 ecc size */
+ if (chip->ecc.size == 512) {
ecc0 = bfin_read_NFC_ECC2();
ecc1 = bfin_read_NFC_ECC3();
code[1] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11);
@@ -483,8 +476,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
uint8_t *buf, int is_read)
{
struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
- struct bf5xx_nand_platform *plat = info->platform;
- unsigned short page_size = (plat->page_size ? 512 : 256);
+ struct nand_chip *chip = mtd->priv;
unsigned short val;
dev_dbg(info->device, " mtd->%p, buf->%p, is_read %d\n",
@@ -498,10 +490,10 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
*/
if (is_read)
invalidate_dcache_range((unsigned int)buf,
- (unsigned int)(buf + page_size));
+ (unsigned int)(buf + chip->ecc.size));
else
flush_dcache_range((unsigned int)buf,
- (unsigned int)(buf + page_size));
+ (unsigned int)(buf + chip->ecc.size));
/*
* This register must be written before each page is
@@ -510,6 +502,8 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
*/
bfin_write_NFC_RST(ECC_RST);
SSYNC();
+ while (bfin_read_NFC_RST() & ECC_RST)
+ cpu_relax();
disable_dma(CH_NFC);
clear_dma_irqstat(CH_NFC);
@@ -520,13 +514,13 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
/* The DMAs have different size on BF52x and BF54x */
#ifdef CONFIG_BF52x
- set_dma_x_count(CH_NFC, (page_size >> 1));
+ set_dma_x_count(CH_NFC, (chip->ecc.size >> 1));
set_dma_x_modify(CH_NFC, 2);
val = DI_EN | WDSIZE_16;
#endif
#ifdef CONFIG_BF54x
- set_dma_x_count(CH_NFC, (page_size >> 2));
+ set_dma_x_count(CH_NFC, (chip->ecc.size >> 2));
set_dma_x_modify(CH_NFC, 4);
val = DI_EN | WDSIZE_32;
#endif
@@ -548,12 +542,11 @@ static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd,
uint8_t *buf, int len)
{
struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
- struct bf5xx_nand_platform *plat = info->platform;
- unsigned short page_size = (plat->page_size ? 512 : 256);
+ struct nand_chip *chip = mtd->priv;
dev_dbg(info->device, "mtd->%p, buf->%p, int %d\n", mtd, buf, len);
- if (len == page_size)
+ if (len == chip->ecc.size)
bf5xx_nand_dma_rw(mtd, buf, 1);
else
bf5xx_nand_read_buf(mtd, buf, len);
@@ -563,17 +556,32 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
const uint8_t *buf, int len)
{
struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
- struct bf5xx_nand_platform *plat = info->platform;
- unsigned short page_size = (plat->page_size ? 512 : 256);
+ struct nand_chip *chip = mtd->priv;
dev_dbg(info->device, "mtd->%p, buf->%p, len %d\n", mtd, buf, len);
- if (len == page_size)
+ if (len == chip->ecc.size)
bf5xx_nand_dma_rw(mtd, (uint8_t *)buf, 0);
else
bf5xx_nand_write_buf(mtd, buf, len);
}
+static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
+ bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
+ bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
+
/*
* System initialization functions
*/
@@ -627,15 +635,14 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
/* setup NFC_CTL register */
dev_info(info->device,
- "page_size=%d, data_width=%d, wr_dly=%d, rd_dly=%d\n",
- (plat->page_size ? 512 : 256),
+ "data_width=%d, wr_dly=%d, rd_dly=%d\n",
(plat->data_width ? 16 : 8),
plat->wr_dly, plat->rd_dly);
- val = (plat->page_size << NFC_PG_SIZE_OFFSET) |
+ val = (1 << NFC_PG_SIZE_OFFSET) |
(plat->data_width << NFC_NWIDTH_OFFSET) |
(plat->rd_dly << NFC_RDDLY_OFFSET) |
- (plat->rd_dly << NFC_WRDLY_OFFSET);
+ (plat->wr_dly << NFC_WRDLY_OFFSET);
dev_dbg(info->device, "NFC_CTL is 0x%04x\n", val);
bfin_write_NFC_CTL(val);
@@ -675,7 +682,6 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
{
struct bf5xx_nand_info *info = to_nand_info(pdev);
- struct mtd_info *mtd = NULL;
platform_set_drvdata(pdev, NULL);
@@ -683,11 +689,7 @@ static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
* and their partitions, then go through freeing the
* resources used
*/
- mtd = &info->mtd;
- if (mtd) {
- nand_release(mtd);
- kfree(mtd);
- }
+ nand_release(&info->mtd);
peripheral_free_list(bfin_nfc_pin_req);
bf5xx_nand_dma_remove(info);
@@ -698,6 +700,33 @@ static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
return 0;
}
+static int bf5xx_nand_scan(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ return ret;
+
+ if (hardware_ecc) {
+ /*
+ * for nand with page size > 512B, think it as several sections with 512B
+ */
+ if (likely(mtd->writesize >= 512)) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ } else {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET));
+ SSYNC();
+ }
+ }
+
+ return nand_scan_tail(mtd);
+}
+
/*
* bf5xx_nand_probe
*
@@ -783,27 +812,20 @@ static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
chip->badblock_pattern = &bootrom_bbt;
chip->ecc.layout = &bootrom_ecclayout;
#endif
-
- if (plat->page_size == NFC_PG_SIZE_256) {
- chip->ecc.bytes = 3;
- chip->ecc.size = 256;
- } else if (plat->page_size == NFC_PG_SIZE_512) {
- chip->ecc.bytes = 6;
- chip->ecc.size = 512;
- }
-
chip->read_buf = bf5xx_nand_dma_read_buf;
chip->write_buf = bf5xx_nand_dma_write_buf;
chip->ecc.calculate = bf5xx_nand_calculate_ecc;
chip->ecc.correct = bf5xx_nand_correct_data;
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.hwctl = bf5xx_nand_enable_hwecc;
+ chip->ecc.read_page_raw = bf5xx_nand_read_page_raw;
+ chip->ecc.write_page_raw = bf5xx_nand_write_page_raw;
} else {
chip->ecc.mode = NAND_ECC_SOFT;
}
/* scan hardware nand chip and setup mtd info data struct */
- if (nand_scan(mtd, 1)) {
+ if (bf5xx_nand_scan(mtd)) {
err = -ENXIO;
goto out_err_nand_scan;
}
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 9c9d893affe..2ac7367afe7 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -311,7 +311,9 @@ static int nand_davinci_correct_4bit(struct mtd_info *mtd,
unsigned short ecc10[8];
unsigned short *ecc16;
u32 syndrome[4];
+ u32 ecc_state;
unsigned num_errors, corrected;
+ unsigned long timeo = jiffies + msecs_to_jiffies(100);
/* All bytes 0xff? It's an erased page; ignore its ECC. */
for (i = 0; i < 10; i++) {
@@ -361,6 +363,21 @@ compare:
*/
davinci_nand_writel(info, NANDFCR_OFFSET,
davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
+
+ /*
+ * ECC_STATE field reads 0x3 (Error correction complete) immediately
+ * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
+ * begin trying to poll for the state, you may fall right out of your
+ * loop without any of the correction calculations having taken place.
+ * The recommendation from the hardware team is to wait till ECC_STATE
+ * reads less than 4, which means ECC HW has entered correction state.
+ */
+ do {
+ ecc_state = (davinci_nand_readl(info,
+ NANDFSR_OFFSET) >> 8) & 0x0f;
+ cpu_relax();
+ } while ((ecc_state < 4) && time_before(jiffies, timeo));
+
for (;;) {
u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 3dfda9cc677..532fe07cf88 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/mtd/mtd.h>
#include <linux/module.h>
@@ -29,15 +30,15 @@
MODULE_LICENSE("GPL");
-/* We define a module parameter that allows the user to override
+/* We define a module parameter that allows the user to override
* the hardware and decide what timing mode should be used.
*/
#define NAND_DEFAULT_TIMINGS -1
static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
module_param(onfi_timing_mode, int, S_IRUGO);
-MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates"
- " use default timings");
+MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
+ " -1 indicates use default timings");
#define DENALI_NAND_NAME "denali-nand"
@@ -54,13 +55,13 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates
INTR_STATUS0__RST_COMP | \
INTR_STATUS0__ERASE_COMP)
-/* indicates whether or not the internal value for the flash bank is
- valid or not */
-#define CHIP_SELECT_INVALID -1
+/* indicates whether or not the internal value for the flash bank is
+ * valid or not */
+#define CHIP_SELECT_INVALID -1
#define SUPPORT_8BITECC 1
-/* This macro divides two integers and rounds fractional values up
+/* This macro divides two integers and rounds fractional values up
* to the nearest integer value. */
#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
@@ -70,7 +71,7 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates
#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
/* These constants are defined by the driver to enable common driver
- configuration options. */
+ * configuration options. */
#define SPARE_ACCESS 0x41
#define MAIN_ACCESS 0x42
#define MAIN_SPARE_ACCESS 0x43
@@ -83,7 +84,7 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates
#define ADDR_CYCLE 1
#define STATUS_CYCLE 2
-/* this is a helper macro that allows us to
+/* this is a helper macro that allows us to
* format the bank into the proper bits for the controller */
#define BANK(x) ((x) << 24)
@@ -95,73 +96,59 @@ static const struct pci_device_id denali_pci_ids[] = {
};
-/* these are static lookup tables that give us easy access to
- registers in the NAND controller.
+/* these are static lookup tables that give us easy access to
+ * registers in the NAND controller.
*/
-static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1,
- INTR_STATUS2,
+static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1,
+ INTR_STATUS2,
INTR_STATUS3};
static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0,
- DEVICE_RESET__BANK1,
- DEVICE_RESET__BANK2,
- DEVICE_RESET__BANK3};
+ DEVICE_RESET__BANK1,
+ DEVICE_RESET__BANK2,
+ DEVICE_RESET__BANK3};
static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT,
- INTR_STATUS1__TIME_OUT,
- INTR_STATUS2__TIME_OUT,
- INTR_STATUS3__TIME_OUT};
+ INTR_STATUS1__TIME_OUT,
+ INTR_STATUS2__TIME_OUT,
+ INTR_STATUS3__TIME_OUT};
static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
- INTR_STATUS1__RST_COMP,
- INTR_STATUS2__RST_COMP,
- INTR_STATUS3__RST_COMP};
-
-/* specifies the debug level of the driver */
-static int nand_debug_level = 0;
+ INTR_STATUS1__RST_COMP,
+ INTR_STATUS2__RST_COMP,
+ INTR_STATUS3__RST_COMP};
/* forward declarations */
static void clear_interrupts(struct denali_nand_info *denali);
-static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask);
-static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask);
+static uint32_t wait_for_irq(struct denali_nand_info *denali,
+ uint32_t irq_mask);
+static void denali_irq_enable(struct denali_nand_info *denali,
+ uint32_t int_mask);
static uint32_t read_interrupt_status(struct denali_nand_info *denali);
-#define DEBUG_DENALI 0
-
-/* This is a wrapper for writing to the denali registers.
- * this allows us to create debug information so we can
- * observe how the driver is programming the device.
- * it uses standard linux convention for (val, addr) */
-static void denali_write32(uint32_t value, void *addr)
-{
- iowrite32(value, addr);
-
-#if DEBUG_DENALI
- printk(KERN_ERR "wrote: 0x%x -> 0x%x\n", value, (uint32_t)((uint32_t)addr & 0x1fff));
-#endif
-}
-
-/* Certain operations for the denali NAND controller use an indexed mode to read/write
- data. The operation is performed by writing the address value of the command to
- the device memory followed by the data. This function abstracts this common
- operation.
+/* Certain operations for the denali NAND controller use
+ * an indexed mode to read/write data. The operation is
+ * performed by writing the address value of the command
+ * to the device memory followed by the data. This function
+ * abstracts this common operation.
*/
-static void index_addr(struct denali_nand_info *denali, uint32_t address, uint32_t data)
+static void index_addr(struct denali_nand_info *denali,
+ uint32_t address, uint32_t data)
{
- denali_write32(address, denali->flash_mem);
- denali_write32(data, denali->flash_mem + 0x10);
+ iowrite32(address, denali->flash_mem);
+ iowrite32(data, denali->flash_mem + 0x10);
}
/* Perform an indexed read of the device */
static void index_addr_read_data(struct denali_nand_info *denali,
uint32_t address, uint32_t *pdata)
{
- denali_write32(address, denali->flash_mem);
+ iowrite32(address, denali->flash_mem);
*pdata = ioread32(denali->flash_mem + 0x10);
}
-/* We need to buffer some data for some of the NAND core routines.
+/* We need to buffer some data for some of the NAND core routines.
* The operations manage buffering that data. */
static void reset_buf(struct denali_nand_info *denali)
{
@@ -182,75 +169,70 @@ static void read_status(struct denali_nand_info *denali)
/* initialize the data buffer to store status */
reset_buf(denali);
- /* initiate a device status read */
- cmd = MODE_11 | BANK(denali->flash_bank);
- index_addr(denali, cmd | COMMAND_CYCLE, 0x70);
- denali_write32(cmd | STATUS_CYCLE, denali->flash_mem);
-
- /* update buffer with status value */
- write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10));
-
-#if DEBUG_DENALI
- printk("device reporting status value of 0x%2x\n", denali->buf.buf[0]);
-#endif
+ cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
+ if (cmd)
+ write_byte_to_buf(denali, NAND_STATUS_WP);
+ else
+ write_byte_to_buf(denali, 0);
}
/* resets a specific device connected to the core */
static void reset_bank(struct denali_nand_info *denali)
{
uint32_t irq_status = 0;
- uint32_t irq_mask = reset_complete[denali->flash_bank] |
+ uint32_t irq_mask = reset_complete[denali->flash_bank] |
operation_timeout[denali->flash_bank];
int bank = 0;
clear_interrupts(denali);
bank = device_reset_banks[denali->flash_bank];
- denali_write32(bank, denali->flash_reg + DEVICE_RESET);
+ iowrite32(bank, denali->flash_reg + DEVICE_RESET);
irq_status = wait_for_irq(denali, irq_mask);
-
+
if (irq_status & operation_timeout[denali->flash_bank])
- {
- printk(KERN_ERR "reset bank failed.\n");
- }
+ dev_err(&denali->dev->dev, "reset bank failed.\n");
}
/* Reset the flash controller */
-static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali)
+static uint16_t denali_nand_reset(struct denali_nand_info *denali)
{
uint32_t i;
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
- denali_write32(reset_complete[i] | operation_timeout[i],
+ iowrite32(reset_complete[i] | operation_timeout[i],
denali->flash_reg + intr_status_addresses[i]);
for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
- denali_write32(device_reset_banks[i], denali->flash_reg + DEVICE_RESET);
- while (!(ioread32(denali->flash_reg + intr_status_addresses[i]) &
+ iowrite32(device_reset_banks[i],
+ denali->flash_reg + DEVICE_RESET);
+ while (!(ioread32(denali->flash_reg +
+ intr_status_addresses[i]) &
(reset_complete[i] | operation_timeout[i])))
- ;
+ cpu_relax();
if (ioread32(denali->flash_reg + intr_status_addresses[i]) &
operation_timeout[i])
- nand_dbg_print(NAND_DBG_WARN,
+ dev_dbg(&denali->dev->dev,
"NAND Reset operation timed out on bank %d\n", i);
}
for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
- denali_write32(reset_complete[i] | operation_timeout[i],
+ iowrite32(reset_complete[i] | operation_timeout[i],
denali->flash_reg + intr_status_addresses[i]);
return PASS;
}
-/* this routine calculates the ONFI timing values for a given mode and programs
- * the clocking register accordingly. The mode is determined by the get_onfi_nand_para
- routine.
+/* this routine calculates the ONFI timing values for a given mode and
+ * programs the clocking register accordingly. The mode is determined by
+ * the get_onfi_nand_para routine.
*/
-static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode)
+static void nand_onfi_timing_set(struct denali_nand_info *denali,
+ uint16_t mode)
{
uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
@@ -272,7 +254,7 @@ static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode
uint16_t acc_clks;
uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
en_lo = CEIL_DIV(Trp[mode], CLK_X);
@@ -309,7 +291,7 @@ static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode
acc_clks++;
if ((data_invalid - acc_clks * CLK_X) < 2)
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
+ dev_warn(&denali->dev->dev, "%s, Line %d: Warning!\n",
__FILE__, __LINE__);
addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
@@ -337,146 +319,34 @@ static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode
(ioread32(denali->flash_reg + DEVICE_ID) == 0x88))
acc_clks = 6;
- denali_write32(acc_clks, denali->flash_reg + ACC_CLKS);
- denali_write32(re_2_we, denali->flash_reg + RE_2_WE);
- denali_write32(re_2_re, denali->flash_reg + RE_2_RE);
- denali_write32(we_2_re, denali->flash_reg + WE_2_RE);
- denali_write32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
- denali_write32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
- denali_write32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
- denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
-}
-
-/* configures the initial ECC settings for the controller */
-static void set_ecc_config(struct denali_nand_info *denali)
-{
-#if SUPPORT_8BITECC
- if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
- (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) <= 128))
- denali_write32(8, denali->flash_reg + ECC_CORRECTION);
-#endif
-
- if ((ioread32(denali->flash_reg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
- == 1) {
- denali->dev_info.wECCBytesPerSector = 4;
- denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
- denali->dev_info.wNumPageSpareFlag =
- denali->dev_info.wPageSpareSize -
- denali->dev_info.wPageDataSize /
- (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
- denali->dev_info.wECCBytesPerSector
- - denali->dev_info.wSpareSkipBytes;
- } else {
- denali->dev_info.wECCBytesPerSector =
- (ioread32(denali->flash_reg + ECC_CORRECTION) &
- ECC_CORRECTION__VALUE) * 13 / 8;
- if ((denali->dev_info.wECCBytesPerSector) % 2 == 0)
- denali->dev_info.wECCBytesPerSector += 2;
- else
- denali->dev_info.wECCBytesPerSector += 1;
-
- denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
- denali->dev_info.wNumPageSpareFlag = denali->dev_info.wPageSpareSize -
- denali->dev_info.wPageDataSize /
- (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
- denali->dev_info.wECCBytesPerSector
- - denali->dev_info.wSpareSkipBytes;
- }
+ iowrite32(acc_clks, denali->flash_reg + ACC_CLKS);
+ iowrite32(re_2_we, denali->flash_reg + RE_2_WE);
+ iowrite32(re_2_re, denali->flash_reg + RE_2_RE);
+ iowrite32(we_2_re, denali->flash_reg + WE_2_RE);
+ iowrite32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
+ iowrite32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
+ iowrite32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
+ iowrite32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
}
/* queries the NAND device to see what ONFI modes it supports. */
static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
{
int i;
- uint16_t blks_lun_l, blks_lun_h, n_of_luns;
- uint32_t blockperlun, id;
-
- denali_write32(DEVICE_RESET__BANK0, denali->flash_reg + DEVICE_RESET);
-
- while (!((ioread32(denali->flash_reg + INTR_STATUS0) &
- INTR_STATUS0__RST_COMP) |
- (ioread32(denali->flash_reg + INTR_STATUS0) &
- INTR_STATUS0__TIME_OUT)))
- ;
-
- if (ioread32(denali->flash_reg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
- denali_write32(DEVICE_RESET__BANK1, denali->flash_reg + DEVICE_RESET);
- while (!((ioread32(denali->flash_reg + INTR_STATUS1) &
- INTR_STATUS1__RST_COMP) |
- (ioread32(denali->flash_reg + INTR_STATUS1) &
- INTR_STATUS1__TIME_OUT)))
- ;
-
- if (ioread32(denali->flash_reg + INTR_STATUS1) &
- INTR_STATUS1__RST_COMP) {
- denali_write32(DEVICE_RESET__BANK2,
- denali->flash_reg + DEVICE_RESET);
- while (!((ioread32(denali->flash_reg + INTR_STATUS2) &
- INTR_STATUS2__RST_COMP) |
- (ioread32(denali->flash_reg + INTR_STATUS2) &
- INTR_STATUS2__TIME_OUT)))
- ;
-
- if (ioread32(denali->flash_reg + INTR_STATUS2) &
- INTR_STATUS2__RST_COMP) {
- denali_write32(DEVICE_RESET__BANK3,
- denali->flash_reg + DEVICE_RESET);
- while (!((ioread32(denali->flash_reg + INTR_STATUS3) &
- INTR_STATUS3__RST_COMP) |
- (ioread32(denali->flash_reg + INTR_STATUS3) &
- INTR_STATUS3__TIME_OUT)))
- ;
- } else {
- printk(KERN_ERR "Getting a time out for bank 2!\n");
- }
- } else {
- printk(KERN_ERR "Getting a time out for bank 1!\n");
- }
- }
-
- denali_write32(INTR_STATUS0__TIME_OUT, denali->flash_reg + INTR_STATUS0);
- denali_write32(INTR_STATUS1__TIME_OUT, denali->flash_reg + INTR_STATUS1);
- denali_write32(INTR_STATUS2__TIME_OUT, denali->flash_reg + INTR_STATUS2);
- denali_write32(INTR_STATUS3__TIME_OUT, denali->flash_reg + INTR_STATUS3);
-
- denali->dev_info.wONFIDevFeatures =
- ioread32(denali->flash_reg + ONFI_DEVICE_FEATURES);
- denali->dev_info.wONFIOptCommands =
- ioread32(denali->flash_reg + ONFI_OPTIONAL_COMMANDS);
- denali->dev_info.wONFITimingMode =
- ioread32(denali->flash_reg + ONFI_TIMING_MODE);
- denali->dev_info.wONFIPgmCacheTimingMode =
- ioread32(denali->flash_reg + ONFI_PGM_CACHE_TIMING_MODE);
-
- n_of_luns = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
- ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
- blks_lun_l = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
- blks_lun_h = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
-
- blockperlun = (blks_lun_h << 16) | blks_lun_l;
-
- denali->dev_info.wTotalBlocks = n_of_luns * blockperlun;
-
+ /* we needn't to do a reset here because driver has already
+ * reset all the banks before
+ * */
if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
ONFI_TIMING_MODE__VALUE))
return FAIL;
for (i = 5; i > 0; i--) {
- if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) & (0x01 << i))
+ if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
+ (0x01 << i))
break;
}
- NAND_ONFi_Timing_Mode(denali, i);
-
- index_addr(denali, MODE_11 | 0, 0x90);
- index_addr(denali, MODE_11 | 1, 0);
-
- for (i = 0; i < 3; i++)
- index_addr_read_data(denali, MODE_11 | 2, &id);
-
- nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
-
- denali->dev_info.MLCDevice = id & 0x0C;
+ nand_onfi_timing_set(denali, i);
/* By now, all the ONFI devices we know support the page cache */
/* rw feature. So here we enable the pipeline_rw_ahead feature */
@@ -486,131 +356,78 @@ static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
return PASS;
}
-static void get_samsung_nand_para(struct denali_nand_info *denali)
+static void get_samsung_nand_para(struct denali_nand_info *denali,
+ uint8_t device_id)
{
- uint8_t no_of_planes;
- uint32_t blk_size;
- uint64_t plane_size, capacity;
- uint32_t id_bytes[5];
- int i;
-
- index_addr(denali, (uint32_t)(MODE_11 | 0), 0x90);
- index_addr(denali, (uint32_t)(MODE_11 | 1), 0);
- for (i = 0; i < 5; i++)
- index_addr_read_data(denali, (uint32_t)(MODE_11 | 2), &id_bytes[i]);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
- id_bytes[0], id_bytes[1], id_bytes[2],
- id_bytes[3], id_bytes[4]);
-
- if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
+ if (device_id == 0xd3) { /* Samsung K9WAG08U1A */
/* Set timing register values according to datasheet */
- denali_write32(5, denali->flash_reg + ACC_CLKS);
- denali_write32(20, denali->flash_reg + RE_2_WE);
- denali_write32(12, denali->flash_reg + WE_2_RE);
- denali_write32(14, denali->flash_reg + ADDR_2_DATA);
- denali_write32(3, denali->flash_reg + RDWR_EN_LO_CNT);
- denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT);
- denali_write32(2, denali->flash_reg + CS_SETUP_CNT);
+ iowrite32(5, denali->flash_reg + ACC_CLKS);
+ iowrite32(20, denali->flash_reg + RE_2_WE);
+ iowrite32(12, denali->flash_reg + WE_2_RE);
+ iowrite32(14, denali->flash_reg + ADDR_2_DATA);
+ iowrite32(3, denali->flash_reg + RDWR_EN_LO_CNT);
+ iowrite32(2, denali->flash_reg + RDWR_EN_HI_CNT);
+ iowrite32(2, denali->flash_reg + CS_SETUP_CNT);
}
-
- no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
- plane_size = (uint64_t)64 << ((id_bytes[4] & 0x70) >> 4);
- blk_size = 64 << ((ioread32(denali->flash_reg + DEVICE_PARAM_1) & 0x30) >> 4);
- capacity = (uint64_t)128 * plane_size * no_of_planes;
-
- do_div(capacity, blk_size);
- denali->dev_info.wTotalBlocks = capacity;
}
static void get_toshiba_nand_para(struct denali_nand_info *denali)
{
- void __iomem *scratch_reg;
uint32_t tmp;
/* Workaround to fix a controller bug which reports a wrong */
/* spare area size for some kind of Toshiba NAND device */
if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
(ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
- denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
- denali_write32(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+ iowrite32(tmp,
+ denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
#if SUPPORT_15BITECC
- denali_write32(15, denali->flash_reg + ECC_CORRECTION);
+ iowrite32(15, denali->flash_reg + ECC_CORRECTION);
#elif SUPPORT_8BITECC
- denali_write32(8, denali->flash_reg + ECC_CORRECTION);
+ iowrite32(8, denali->flash_reg + ECC_CORRECTION);
#endif
}
-
- /* As Toshiba NAND can not provide it's block number, */
- /* so here we need user to provide the correct block */
- /* number in a scratch register before the Linux NAND */
- /* driver is loaded. If no valid value found in the scratch */
- /* register, then we use default block number value */
- scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
- if (!scratch_reg) {
- printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
- __FILE__, __LINE__);
- denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
- denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
- if (denali->dev_info.wTotalBlocks < 512)
- denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- iounmap(scratch_reg);
- }
}
-static void get_hynix_nand_para(struct denali_nand_info *denali)
+static void get_hynix_nand_para(struct denali_nand_info *denali,
+ uint8_t device_id)
{
- void __iomem *scratch_reg;
uint32_t main_size, spare_size;
- switch (denali->dev_info.wDeviceID) {
+ switch (device_id) {
case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
- denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK);
- denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
- denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
- main_size = 4096 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
- spare_size = 224 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
- denali_write32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
- denali_write32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
- denali_write32(0, denali->flash_reg + DEVICE_WIDTH);
+ iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK);
+ iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
+ iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ main_size = 4096 *
+ ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ spare_size = 224 *
+ ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ iowrite32(main_size,
+ denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
+ iowrite32(spare_size,
+ denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+ iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
#if SUPPORT_15BITECC
- denali_write32(15, denali->flash_reg + ECC_CORRECTION);
+ iowrite32(15, denali->flash_reg + ECC_CORRECTION);
#elif SUPPORT_8BITECC
- denali_write32(8, denali->flash_reg + ECC_CORRECTION);
+ iowrite32(8, denali->flash_reg + ECC_CORRECTION);
#endif
- denali->dev_info.MLCDevice = 1;
break;
default:
- nand_dbg_print(NAND_DBG_WARN,
+ dev_warn(&denali->dev->dev,
"Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
"Will use default parameter values instead.\n",
- denali->dev_info.wDeviceID);
- }
-
- scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
- if (!scratch_reg) {
- printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
- __FILE__, __LINE__);
- denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
- denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
- if (denali->dev_info.wTotalBlocks < 512)
- denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- iounmap(scratch_reg);
+ device_id);
}
}
/* determines how many NAND chips are connected to the controller. Note for
- Intel CE4100 devices we don't support more than one device.
+ * Intel CE4100 devices we don't support more than one device.
*/
static void find_valid_banks(struct denali_nand_info *denali)
{
@@ -621,9 +438,10 @@ static void find_valid_banks(struct denali_nand_info *denali)
for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
- index_addr_read_data(denali, (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
+ index_addr_read_data(denali,
+ (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
- nand_dbg_print(NAND_DBG_DEBUG,
+ dev_dbg(&denali->dev->dev,
"Return 1st ID for bank[%d]: %x\n", i, id[i]);
if (i == 0) {
@@ -637,282 +455,126 @@ static void find_valid_banks(struct denali_nand_info *denali)
}
}
- if (denali->platform == INTEL_CE4100)
- {
+ if (denali->platform == INTEL_CE4100) {
/* Platform limitations of the CE4100 device limit
* users to a single chip solution for NAND.
- * Multichip support is not enabled.
- */
- if (denali->total_used_banks != 1)
- {
- printk(KERN_ERR "Sorry, Intel CE4100 only supports "
+ * Multichip support is not enabled.
+ */
+ if (denali->total_used_banks != 1) {
+ dev_err(&denali->dev->dev,
+ "Sorry, Intel CE4100 only supports "
"a single NAND device.\n");
BUG();
}
}
- nand_dbg_print(NAND_DBG_DEBUG,
+ dev_dbg(&denali->dev->dev,
"denali->total_used_banks: %d\n", denali->total_used_banks);
}
static void detect_partition_feature(struct denali_nand_info *denali)
{
+ /* For MRST platform, denali->fwblks represent the
+ * number of blocks firmware is taken,
+ * FW is in protect partition and MTD driver has no
+ * permission to access it. So let driver know how many
+ * blocks it can't touch.
+ * */
if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) &
PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
- denali->dev_info.wSpectraStartBlock =
+ denali->fwblks =
((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
MIN_MAX_BANK_1__MIN_VALUE) *
- denali->dev_info.wTotalBlocks)
+ denali->blksperchip)
+
(ioread32(denali->flash_reg + MIN_BLK_ADDR_1) &
MIN_BLK_ADDR_1__VALUE);
-
- denali->dev_info.wSpectraEndBlock =
- (((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
- MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
- denali->dev_info.wTotalBlocks)
- +
- (ioread32(denali->flash_reg + MAX_BLK_ADDR_1) &
- MAX_BLK_ADDR_1__VALUE);
-
- denali->dev_info.wTotalBlocks *= denali->total_used_banks;
-
- if (denali->dev_info.wSpectraEndBlock >=
- denali->dev_info.wTotalBlocks) {
- denali->dev_info.wSpectraEndBlock =
- denali->dev_info.wTotalBlocks - 1;
- }
-
- denali->dev_info.wDataBlockNum =
- denali->dev_info.wSpectraEndBlock -
- denali->dev_info.wSpectraStartBlock + 1;
- } else {
- denali->dev_info.wTotalBlocks *= denali->total_used_banks;
- denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
- denali->dev_info.wSpectraEndBlock =
- denali->dev_info.wTotalBlocks - 1;
- denali->dev_info.wDataBlockNum =
- denali->dev_info.wSpectraEndBlock -
- denali->dev_info.wSpectraStartBlock + 1;
- }
- } else {
- denali->dev_info.wTotalBlocks *= denali->total_used_banks;
- denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
- denali->dev_info.wSpectraEndBlock = denali->dev_info.wTotalBlocks - 1;
- denali->dev_info.wDataBlockNum =
- denali->dev_info.wSpectraEndBlock -
- denali->dev_info.wSpectraStartBlock + 1;
- }
+ } else
+ denali->fwblks = SPECTRA_START_BLOCK;
+ } else
+ denali->fwblks = SPECTRA_START_BLOCK;
}
-static void dump_device_info(struct denali_nand_info *denali)
-{
- nand_dbg_print(NAND_DBG_DEBUG, "denali->dev_info:\n");
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
- denali->dev_info.wDeviceMaker);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
- denali->dev_info.wDeviceID);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
- denali->dev_info.wDeviceType);
- nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
- denali->dev_info.wSpectraStartBlock);
- nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
- denali->dev_info.wSpectraEndBlock);
- nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
- denali->dev_info.wTotalBlocks);
- nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
- denali->dev_info.wPagesPerBlock);
- nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
- denali->dev_info.wPageSize);
- nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
- denali->dev_info.wPageDataSize);
- nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
- denali->dev_info.wPageSpareSize);
- nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
- denali->dev_info.wNumPageSpareFlag);
- nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
- denali->dev_info.wECCBytesPerSector);
- nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
- denali->dev_info.wBlockSize);
- nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
- denali->dev_info.wBlockDataSize);
- nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
- denali->dev_info.wDataBlockNum);
- nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
- denali->dev_info.bPlaneNum);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
- denali->dev_info.wDeviceMainAreaSize);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
- denali->dev_info.wDeviceSpareAreaSize);
- nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
- denali->dev_info.wDevicesConnected);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
- denali->dev_info.wDeviceWidth);
- nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
- denali->dev_info.wHWRevision);
- nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
- denali->dev_info.wHWFeatures);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
- denali->dev_info.wONFIDevFeatures);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
- denali->dev_info.wONFIOptCommands);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
- denali->dev_info.wONFITimingMode);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
- denali->dev_info.wONFIPgmCacheTimingMode);
- nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
- denali->dev_info.MLCDevice ? "Yes" : "No");
- nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
- denali->dev_info.wSpareSkipBytes);
- nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
- denali->dev_info.nBitsInPageNumber);
- nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
- denali->dev_info.nBitsInPageDataSize);
- nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
- denali->dev_info.nBitsInBlockDataSize);
-}
-
-static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali)
+static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
{
uint16_t status = PASS;
- uint8_t no_of_planes;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- denali->dev_info.wDeviceMaker = ioread32(denali->flash_reg + MANUFACTURER_ID);
- denali->dev_info.wDeviceID = ioread32(denali->flash_reg + DEVICE_ID);
- denali->dev_info.bDeviceParam0 = ioread32(denali->flash_reg + DEVICE_PARAM_0);
- denali->dev_info.bDeviceParam1 = ioread32(denali->flash_reg + DEVICE_PARAM_1);
- denali->dev_info.bDeviceParam2 = ioread32(denali->flash_reg + DEVICE_PARAM_2);
-
- denali->dev_info.MLCDevice = ioread32(denali->flash_reg + DEVICE_PARAM_0) & 0x0c;
+ uint32_t id_bytes[5], addr;
+ uint8_t i, maf_id, device_id;
+
+ dev_dbg(&denali->dev->dev,
+ "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ /* Use read id method to get device ID and other
+ * params. For some NAND chips, controller can't
+ * report the correct device ID by reading from
+ * DEVICE_ID register
+ * */
+ addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
+ index_addr(denali, (uint32_t)addr | 0, 0x90);
+ index_addr(denali, (uint32_t)addr | 1, 0);
+ for (i = 0; i < 5; i++)
+ index_addr_read_data(denali, addr | 2, &id_bytes[i]);
+ maf_id = id_bytes[0];
+ device_id = id_bytes[1];
if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
if (FAIL == get_onfi_nand_para(denali))
return FAIL;
- } else if (denali->dev_info.wDeviceMaker == 0xEC) { /* Samsung NAND */
- get_samsung_nand_para(denali);
- } else if (denali->dev_info.wDeviceMaker == 0x98) { /* Toshiba NAND */
+ } else if (maf_id == 0xEC) { /* Samsung NAND */
+ get_samsung_nand_para(denali, device_id);
+ } else if (maf_id == 0x98) { /* Toshiba NAND */
get_toshiba_nand_para(denali);
- } else if (denali->dev_info.wDeviceMaker == 0xAD) { /* Hynix NAND */
- get_hynix_nand_para(denali);
- } else {
- denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ } else if (maf_id == 0xAD) { /* Hynix NAND */
+ get_hynix_nand_para(denali, device_id);
}
- nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
- "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
- "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
+ dev_info(&denali->dev->dev,
+ "Dump timing register values:"
+ "acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
+ "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
"rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
ioread32(denali->flash_reg + ACC_CLKS),
ioread32(denali->flash_reg + RE_2_WE),
+ ioread32(denali->flash_reg + RE_2_RE),
ioread32(denali->flash_reg + WE_2_RE),
ioread32(denali->flash_reg + ADDR_2_DATA),
ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
ioread32(denali->flash_reg + CS_SETUP_CNT));
- denali->dev_info.wHWRevision = ioread32(denali->flash_reg + REVISION);
- denali->dev_info.wHWFeatures = ioread32(denali->flash_reg + FEATURES);
-
- denali->dev_info.wDeviceMainAreaSize =
- ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
- denali->dev_info.wDeviceSpareAreaSize =
- ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
-
- denali->dev_info.wPageDataSize =
- ioread32(denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
-
- /* Note: When using the Micon 4K NAND device, the controller will report
- * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
- * And if force set it to 218 bytes, the controller can not work
- * correctly. So just let it be. But keep in mind that this bug may
- * cause
- * other problems in future. - Yunpeng 2008-10-10
- */
- denali->dev_info.wPageSpareSize =
- ioread32(denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
-
- denali->dev_info.wPagesPerBlock = ioread32(denali->flash_reg + PAGES_PER_BLOCK);
-
- denali->dev_info.wPageSize =
- denali->dev_info.wPageDataSize + denali->dev_info.wPageSpareSize;
- denali->dev_info.wBlockSize =
- denali->dev_info.wPageSize * denali->dev_info.wPagesPerBlock;
- denali->dev_info.wBlockDataSize =
- denali->dev_info.wPagesPerBlock * denali->dev_info.wPageDataSize;
-
- denali->dev_info.wDeviceWidth = ioread32(denali->flash_reg + DEVICE_WIDTH);
- denali->dev_info.wDeviceType =
- ((ioread32(denali->flash_reg + DEVICE_WIDTH) > 0) ? 16 : 8);
-
- denali->dev_info.wDevicesConnected = ioread32(denali->flash_reg + DEVICES_CONNECTED);
-
- denali->dev_info.wSpareSkipBytes =
- ioread32(denali->flash_reg + SPARE_AREA_SKIP_BYTES) *
- denali->dev_info.wDevicesConnected;
-
- denali->dev_info.nBitsInPageNumber =
- ilog2(denali->dev_info.wPagesPerBlock);
- denali->dev_info.nBitsInPageDataSize =
- ilog2(denali->dev_info.wPageDataSize);
- denali->dev_info.nBitsInBlockDataSize =
- ilog2(denali->dev_info.wBlockDataSize);
-
- set_ecc_config(denali);
-
- no_of_planes = ioread32(denali->flash_reg + NUMBER_OF_PLANES) &
- NUMBER_OF_PLANES__VALUE;
-
- switch (no_of_planes) {
- case 0:
- case 1:
- case 3:
- case 7:
- denali->dev_info.bPlaneNum = no_of_planes + 1;
- break;
- default:
- status = FAIL;
- break;
- }
-
find_valid_banks(denali);
detect_partition_feature(denali);
- dump_device_info(denali);
-
/* If the user specified to override the default timings
- * with a specific ONFI mode, we apply those changes here.
+ * with a specific ONFI mode, we apply those changes here.
*/
if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
- {
- NAND_ONFi_Timing_Mode(denali, onfi_timing_mode);
- }
+ nand_onfi_timing_set(denali, onfi_timing_mode);
return status;
}
-static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali,
+static void denali_set_intr_modes(struct denali_nand_info *denali,
uint16_t INT_ENABLE)
{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
if (INT_ENABLE)
- denali_write32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
+ iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
else
- denali_write32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
+ iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
}
/* validation function to verify that the controlling software is making
- a valid request
+ * a valid request
*/
static inline bool is_flash_bank_valid(int flash_bank)
{
- return (flash_bank >= 0 && flash_bank < 4);
+ return (flash_bank >= 0 && flash_bank < 4);
}
static void denali_irq_init(struct denali_nand_info *denali)
@@ -920,49 +582,51 @@ static void denali_irq_init(struct denali_nand_info *denali)
uint32_t int_mask = 0;
/* Disable global interrupts */
- NAND_LLD_Enable_Disable_Interrupts(denali, false);
+ denali_set_intr_modes(denali, false);
int_mask = DENALI_IRQ_ALL;
/* Clear all status bits */
- denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS0);
- denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS1);
- denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS2);
- denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS3);
+ iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS0);
+ iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS1);
+ iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS2);
+ iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS3);
denali_irq_enable(denali, int_mask);
}
static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
{
- NAND_LLD_Enable_Disable_Interrupts(denali, false);
+ denali_set_intr_modes(denali, false);
free_irq(irqnum, denali);
}
-static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask)
+static void denali_irq_enable(struct denali_nand_info *denali,
+ uint32_t int_mask)
{
- denali_write32(int_mask, denali->flash_reg + INTR_EN0);
- denali_write32(int_mask, denali->flash_reg + INTR_EN1);
- denali_write32(int_mask, denali->flash_reg + INTR_EN2);
- denali_write32(int_mask, denali->flash_reg + INTR_EN3);
+ iowrite32(int_mask, denali->flash_reg + INTR_EN0);
+ iowrite32(int_mask, denali->flash_reg + INTR_EN1);
+ iowrite32(int_mask, denali->flash_reg + INTR_EN2);
+ iowrite32(int_mask, denali->flash_reg + INTR_EN3);
}
/* This function only returns when an interrupt that this driver cares about
- * occurs. This is to reduce the overhead of servicing interrupts
+ * occurs. This is to reduce the overhead of servicing interrupts
*/
static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
{
- return (read_interrupt_status(denali) & DENALI_IRQ_ALL);
+ return read_interrupt_status(denali) & DENALI_IRQ_ALL;
}
/* Interrupts are cleared by writing a 1 to the appropriate status bit */
-static inline void clear_interrupt(struct denali_nand_info *denali, uint32_t irq_mask)
+static inline void clear_interrupt(struct denali_nand_info *denali,
+ uint32_t irq_mask)
{
uint32_t intr_status_reg = 0;
intr_status_reg = intr_status_addresses[denali->flash_bank];
- denali_write32(irq_mask, denali->flash_reg + intr_status_reg);
+ iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
}
static void clear_interrupts(struct denali_nand_info *denali)
@@ -971,11 +635,7 @@ static void clear_interrupts(struct denali_nand_info *denali)
spin_lock_irq(&denali->irq_lock);
status = read_interrupt_status(denali);
-
-#if DEBUG_DENALI
- denali->irq_debug_array[denali->idx++] = 0x30000000 | status;
- denali->idx %= 32;
-#endif
+ clear_interrupt(denali, status);
denali->irq_status = 0x0;
spin_unlock_irq(&denali->irq_lock);
@@ -990,22 +650,9 @@ static uint32_t read_interrupt_status(struct denali_nand_info *denali)
return ioread32(denali->flash_reg + intr_status_reg);
}
-#if DEBUG_DENALI
-static void print_irq_log(struct denali_nand_info *denali)
-{
- int i = 0;
-
- printk("ISR debug log index = %X\n", denali->idx);
- for (i = 0; i < 32; i++)
- {
- printk("%08X: %08X\n", i, denali->irq_debug_array[i]);
- }
-}
-#endif
-
-/* This is the interrupt service routine. It handles all interrupts
- * sent to this device. Note that on CE4100, this is a shared
- * interrupt.
+/* This is the interrupt service routine. It handles all interrupts
+ * sent to this device. Note that on CE4100, this is a shared
+ * interrupt.
*/
static irqreturn_t denali_isr(int irq, void *dev_id)
{
@@ -1015,21 +662,14 @@ static irqreturn_t denali_isr(int irq, void *dev_id)
spin_lock(&denali->irq_lock);
- /* check to see if a valid NAND chip has
- * been selected.
+ /* check to see if a valid NAND chip has
+ * been selected.
*/
- if (is_flash_bank_valid(denali->flash_bank))
- {
- /* check to see if controller generated
+ if (is_flash_bank_valid(denali->flash_bank)) {
+ /* check to see if controller generated
* the interrupt, since this is a shared interrupt */
- if ((irq_status = denali_irq_detected(denali)) != 0)
- {
-#if DEBUG_DENALI
- denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status;
- denali->idx %= 32;
-
- printk("IRQ status = 0x%04x\n", irq_status);
-#endif
+ irq_status = denali_irq_detected(denali);
+ if (irq_status != 0) {
/* handle interrupt */
/* first acknowledge it */
clear_interrupt(denali, irq_status);
@@ -1054,149 +694,120 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
bool retry = false;
unsigned long timeout = msecs_to_jiffies(1000);
- do
- {
-#if DEBUG_DENALI
- printk("waiting for 0x%x\n", irq_mask);
-#endif
- comp_res = wait_for_completion_timeout(&denali->complete, timeout);
+ do {
+ comp_res =
+ wait_for_completion_timeout(&denali->complete, timeout);
spin_lock_irq(&denali->irq_lock);
intr_status = denali->irq_status;
-#if DEBUG_DENALI
- denali->irq_debug_array[denali->idx++] = 0x20000000 | (irq_mask << 16) | intr_status;
- denali->idx %= 32;
-#endif
-
- if (intr_status & irq_mask)
- {
+ if (intr_status & irq_mask) {
denali->irq_status &= ~irq_mask;
spin_unlock_irq(&denali->irq_lock);
-#if DEBUG_DENALI
- if (retry) printk("status on retry = 0x%x\n", intr_status);
-#endif
/* our interrupt was detected */
break;
- }
- else
- {
- /* these are not the interrupts you are looking for -
- need to wait again */
+ } else {
+ /* these are not the interrupts you are looking for -
+ * need to wait again */
spin_unlock_irq(&denali->irq_lock);
-#if DEBUG_DENALI
- print_irq_log(denali);
- printk("received irq nobody cared: irq_status = 0x%x,"
- " irq_mask = 0x%x, timeout = %ld\n", intr_status, irq_mask, comp_res);
-#endif
retry = true;
}
} while (comp_res != 0);
- if (comp_res == 0)
- {
+ if (comp_res == 0) {
/* timeout */
- printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
- intr_status, irq_mask);
+ printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
+ intr_status, irq_mask);
intr_status = 0;
}
return intr_status;
}
-/* This helper function setups the registers for ECC and whether or not
- the spare area will be transfered. */
-static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
+/* This helper function setups the registers for ECC and whether or not
+ * the spare area will be transfered. */
+static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
bool transfer_spare)
{
- int ecc_en_flag = 0, transfer_spare_flag = 0;
+ int ecc_en_flag = 0, transfer_spare_flag = 0;
/* set ECC, transfer spare bits if needed */
ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
/* Enable spare area/ECC per user's request. */
- denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
- denali_write32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
+ iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
+ iowrite32(transfer_spare_flag,
+ denali->flash_reg + TRANSFER_SPARE_REG);
}
-/* sends a pipeline command operation to the controller. See the Denali NAND
- controller's user guide for more information (section 4.2.3.6).
+/* sends a pipeline command operation to the controller. See the Denali NAND
+ * controller's user guide for more information (section 4.2.3.6).
*/
-static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en,
- bool transfer_spare, int access_type,
- int op)
+static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
+ bool ecc_en,
+ bool transfer_spare,
+ int access_type,
+ int op)
{
int status = PASS;
- uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0,
+ uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0,
irq_mask = 0;
- if (op == DENALI_READ) irq_mask = INTR_STATUS0__LOAD_COMP;
- else if (op == DENALI_WRITE) irq_mask = 0;
- else BUG();
+ if (op == DENALI_READ)
+ irq_mask = INTR_STATUS0__LOAD_COMP;
+ else if (op == DENALI_WRITE)
+ irq_mask = 0;
+ else
+ BUG();
setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
-#if DEBUG_DENALI
- spin_lock_irq(&denali->irq_lock);
- denali->irq_debug_array[denali->idx++] = 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) | (access_type << 4);
- denali->idx %= 32;
- spin_unlock_irq(&denali->irq_lock);
-#endif
-
-
/* clear interrupts */
- clear_interrupts(denali);
+ clear_interrupts(denali);
addr = BANK(denali->flash_bank) | denali->page;
- if (op == DENALI_WRITE && access_type != SPARE_ACCESS)
- {
- cmd = MODE_01 | addr;
- denali_write32(cmd, denali->flash_mem);
- }
- else if (op == DENALI_WRITE && access_type == SPARE_ACCESS)
- {
+ if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
+ cmd = MODE_01 | addr;
+ iowrite32(cmd, denali->flash_mem);
+ } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
/* read spare area */
- cmd = MODE_10 | addr;
+ cmd = MODE_10 | addr;
index_addr(denali, (uint32_t)cmd, access_type);
- cmd = MODE_01 | addr;
- denali_write32(cmd, denali->flash_mem);
- }
- else if (op == DENALI_READ)
- {
+ cmd = MODE_01 | addr;
+ iowrite32(cmd, denali->flash_mem);
+ } else if (op == DENALI_READ) {
/* setup page read request for access type */
- cmd = MODE_10 | addr;
+ cmd = MODE_10 | addr;
index_addr(denali, (uint32_t)cmd, access_type);
/* page 33 of the NAND controller spec indicates we should not
- use the pipeline commands in Spare area only mode. So we
+ use the pipeline commands in Spare area only mode. So we
don't.
*/
- if (access_type == SPARE_ACCESS)
- {
+ if (access_type == SPARE_ACCESS) {
cmd = MODE_01 | addr;
- denali_write32(cmd, denali->flash_mem);
- }
- else
- {
- index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count);
-
- /* wait for command to be accepted
- * can always use status0 bit as the mask is identical for each
+ iowrite32(cmd, denali->flash_mem);
+ } else {
+ index_addr(denali, (uint32_t)cmd,
+ 0x2000 | op | page_count);
+
+ /* wait for command to be accepted
+ * can always use status0 bit as the
+ * mask is identical for each
* bank. */
irq_status = wait_for_irq(denali, irq_mask);
- if (irq_status == 0)
- {
- printk(KERN_ERR "cmd, page, addr on timeout "
- "(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr);
+ if (irq_status == 0) {
+ dev_err(&denali->dev->dev,
+ "cmd, page, addr on timeout "
+ "(0x%x, 0x%x, 0x%x)\n",
+ cmd, denali->page, addr);
status = FAIL;
- }
- else
- {
+ } else {
cmd = MODE_01 | addr;
- denali_write32(cmd, denali->flash_mem);
+ iowrite32(cmd, denali->flash_mem);
}
}
}
@@ -1204,36 +815,35 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en
}
/* helper function that simply writes a buffer to the flash */
-static int write_data_to_flash_mem(struct denali_nand_info *denali, const uint8_t *buf,
- int len)
+static int write_data_to_flash_mem(struct denali_nand_info *denali,
+ const uint8_t *buf,
+ int len)
{
uint32_t i = 0, *buf32;
- /* verify that the len is a multiple of 4. see comment in
- * read_data_from_flash_mem() */
+ /* verify that the len is a multiple of 4. see comment in
+ * read_data_from_flash_mem() */
BUG_ON((len % 4) != 0);
/* write the data to the flash memory */
buf32 = (uint32_t *)buf;
for (i = 0; i < len / 4; i++)
- {
- denali_write32(*buf32++, denali->flash_mem + 0x10);
- }
- return i*4; /* intent is to return the number of bytes read */
+ iowrite32(*buf32++, denali->flash_mem + 0x10);
+ return i*4; /* intent is to return the number of bytes read */
}
/* helper function that simply reads a buffer from the flash */
-static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *buf,
- int len)
+static int read_data_from_flash_mem(struct denali_nand_info *denali,
+ uint8_t *buf,
+ int len)
{
uint32_t i = 0, *buf32;
/* we assume that len will be a multiple of 4, if not
* it would be nice to know about it ASAP rather than
- * have random failures...
- *
- * This assumption is based on the fact that this
- * function is designed to be used to read flash pages,
+ * have random failures...
+ * This assumption is based on the fact that this
+ * function is designed to be used to read flash pages,
* which are typically multiples of 4...
*/
@@ -1242,10 +852,8 @@ static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *bu
/* transfer the data from the flash */
buf32 = (uint32_t *)buf;
for (i = 0; i < len / 4; i++)
- {
*buf32++ = ioread32(denali->flash_mem + 0x10);
- }
- return i*4; /* intent is to return the number of bytes read */
+ return i*4; /* intent is to return the number of bytes read */
}
/* writes OOB data to the device */
@@ -1253,38 +861,26 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP |
+ uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP |
INTR_STATUS0__PROGRAM_FAIL;
int status = 0;
denali->page = page;
- if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
- DENALI_WRITE) == PASS)
- {
+ if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
+ DENALI_WRITE) == PASS) {
write_data_to_flash_mem(denali, buf, mtd->oobsize);
-#if DEBUG_DENALI
- spin_lock_irq(&denali->irq_lock);
- denali->irq_debug_array[denali->idx++] = 0x80000000 | mtd->oobsize;
- denali->idx %= 32;
- spin_unlock_irq(&denali->irq_lock);
-#endif
-
-
/* wait for operation to complete */
irq_status = wait_for_irq(denali, irq_mask);
- if (irq_status == 0)
- {
- printk(KERN_ERR "OOB write failed\n");
+ if (irq_status == 0) {
+ dev_err(&denali->dev->dev, "OOB write failed\n");
status = -EIO;
}
- }
- else
- {
- printk(KERN_ERR "unable to send pipeline command\n");
- status = -EIO;
+ } else {
+ dev_err(&denali->dev->dev, "unable to send pipeline command\n");
+ status = -EIO;
}
return status;
}
@@ -1293,60 +889,45 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, irq_status = 0, addr = 0x0, cmd = 0x0;
+ uint32_t irq_mask = INTR_STATUS0__LOAD_COMP,
+ irq_status = 0, addr = 0x0, cmd = 0x0;
denali->page = page;
-#if DEBUG_DENALI
- printk("read_oob %d\n", page);
-#endif
- if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
- DENALI_READ) == PASS)
- {
- read_data_from_flash_mem(denali, buf, mtd->oobsize);
+ if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
+ DENALI_READ) == PASS) {
+ read_data_from_flash_mem(denali, buf, mtd->oobsize);
- /* wait for command to be accepted
+ /* wait for command to be accepted
* can always use status0 bit as the mask is identical for each
* bank. */
irq_status = wait_for_irq(denali, irq_mask);
if (irq_status == 0)
- {
- printk(KERN_ERR "page on OOB timeout %d\n", denali->page);
- }
+ dev_err(&denali->dev->dev, "page on OOB timeout %d\n",
+ denali->page);
/* We set the device back to MAIN_ACCESS here as I observed
* instability with the controller if you do a block erase
* and the last transaction was a SPARE_ACCESS. Block erase
* is reliable (according to the MTD test infrastructure)
- * if you are in MAIN_ACCESS.
+ * if you are in MAIN_ACCESS.
*/
addr = BANK(denali->flash_bank) | denali->page;
- cmd = MODE_10 | addr;
+ cmd = MODE_10 | addr;
index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
-
-#if DEBUG_DENALI
- spin_lock_irq(&denali->irq_lock);
- denali->irq_debug_array[denali->idx++] = 0x60000000 | mtd->oobsize;
- denali->idx %= 32;
- spin_unlock_irq(&denali->irq_lock);
-#endif
}
}
-/* this function examines buffers to see if they contain data that
+/* this function examines buffers to see if they contain data that
* indicate that the buffer is part of an erased region of flash.
*/
bool is_erased(uint8_t *buf, int len)
{
int i = 0;
for (i = 0; i < len; i++)
- {
if (buf[i] != 0xFF)
- {
return false;
- }
- }
return true;
}
#define ECC_SECTOR_SIZE 512
@@ -1354,71 +935,70 @@ bool is_erased(uint8_t *buf, int len)
#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
-#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO))
-#define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8)
+#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE))
+#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
-static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
- uint8_t *oobbuf, uint32_t irq_status)
+static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
+ uint32_t irq_status)
{
bool check_erased_page = false;
- if (irq_status & INTR_STATUS0__ECC_ERR)
- {
+ if (irq_status & INTR_STATUS0__ECC_ERR) {
/* read the ECC errors. we'll ignore them for now */
uint32_t err_address = 0, err_correction_info = 0;
uint32_t err_byte = 0, err_sector = 0, err_device = 0;
uint32_t err_correction_value = 0;
+ denali_set_intr_modes(denali, false);
- do
- {
- err_address = ioread32(denali->flash_reg +
+ do {
+ err_address = ioread32(denali->flash_reg +
ECC_ERROR_ADDRESS);
err_sector = ECC_SECTOR(err_address);
err_byte = ECC_BYTE(err_address);
-
- err_correction_info = ioread32(denali->flash_reg +
+ err_correction_info = ioread32(denali->flash_reg +
ERR_CORRECTION_INFO);
- err_correction_value =
+ err_correction_value =
ECC_CORRECTION_VALUE(err_correction_info);
err_device = ECC_ERR_DEVICE(err_correction_info);
- if (ECC_ERROR_CORRECTABLE(err_correction_info))
- {
- /* offset in our buffer is computed as:
- sector number * sector size + offset in
- sector
- */
- int offset = err_sector * ECC_SECTOR_SIZE +
- err_byte;
- if (offset < denali->mtd.writesize)
- {
+ if (ECC_ERROR_CORRECTABLE(err_correction_info)) {
+ /* If err_byte is larger than ECC_SECTOR_SIZE,
+ * means error happend in OOB, so we ignore
+ * it. It's no need for us to correct it
+ * err_device is represented the NAND error
+ * bits are happened in if there are more
+ * than one NAND connected.
+ * */
+ if (err_byte < ECC_SECTOR_SIZE) {
+ int offset;
+ offset = (err_sector *
+ ECC_SECTOR_SIZE +
+ err_byte) *
+ denali->devnum +
+ err_device;
/* correct the ECC error */
buf[offset] ^= err_correction_value;
denali->mtd.ecc_stats.corrected++;
}
- else
- {
- /* bummer, couldn't correct the error */
- printk(KERN_ERR "ECC offset invalid\n");
- denali->mtd.ecc_stats.failed++;
- }
- }
- else
- {
- /* if the error is not correctable, need to
- * look at the page to see if it is an erased page.
- * if so, then it's not a real ECC error */
+ } else {
+ /* if the error is not correctable, need to
+ * look at the page to see if it is an erased
+ * page. if so, then it's not a real ECC error
+ * */
check_erased_page = true;
}
-
-#if DEBUG_DENALI
- printk("Detected ECC error in page %d: err_addr = 0x%08x,"
- " info to fix is 0x%08x\n", denali->page, err_address,
- err_correction_info);
-#endif
} while (!ECC_LAST_ERR(err_correction_info));
+ /* Once handle all ecc errors, controller will triger
+ * a ECC_TRANSACTION_DONE interrupt, so here just wait
+ * for a while for this interrupt
+ * */
+ while (!(read_interrupt_status(denali) &
+ INTR_STATUS0__ECC_TRANSACTION_DONE))
+ cpu_relax();
+ clear_interrupts(denali);
+ denali_set_intr_modes(denali, true);
}
return check_erased_page;
}
@@ -1428,9 +1008,10 @@ static void denali_enable_dma(struct denali_nand_info *denali, bool en)
{
uint32_t reg_val = 0x0;
- if (en) reg_val = DMA_ENABLE__FLAG;
+ if (en)
+ reg_val = DMA_ENABLE__FLAG;
- denali_write32(reg_val, denali->flash_reg + DMA_ENABLE);
+ iowrite32(reg_val, denali->flash_reg + DMA_ENABLE);
ioread32(denali->flash_reg + DMA_ENABLE);
}
@@ -1458,9 +1039,9 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op)
index_addr(denali, mode | 0x14000, 0x2400);
}
-/* writes a page. user specifies type, and this function handles the
- configuration details. */
-static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
+/* writes a page. user specifies type, and this function handles the
+ * configuration details. */
+static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, bool raw_xfer)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
@@ -1470,7 +1051,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
size_t size = denali->mtd.writesize + denali->mtd.oobsize;
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP |
+ uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP |
INTR_STATUS0__PROGRAM_FAIL;
/* if it is a raw xfer, we want to disable ecc, and send
@@ -1483,74 +1064,75 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
/* copy buffer into DMA buffer */
memcpy(denali->buf.buf, buf, mtd->writesize);
- if (raw_xfer)
- {
+ if (raw_xfer) {
/* transfer the data to the spare area */
- memcpy(denali->buf.buf + mtd->writesize,
- chip->oob_poi,
- mtd->oobsize);
+ memcpy(denali->buf.buf + mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize);
}
pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE);
clear_interrupts(denali);
- denali_enable_dma(denali, true);
+ denali_enable_dma(denali, true);
denali_setup_dma(denali, DENALI_WRITE);
/* wait for operation to complete */
irq_status = wait_for_irq(denali, irq_mask);
- if (irq_status == 0)
- {
- printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer);
- denali->status =
- (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL :
- PASS;
+ if (irq_status == 0) {
+ dev_err(&denali->dev->dev,
+ "timeout on write_page (type = %d)\n",
+ raw_xfer);
+ denali->status =
+ (irq_status & INTR_STATUS0__PROGRAM_FAIL) ?
+ NAND_STATUS_FAIL : PASS;
}
- denali_enable_dma(denali, false);
+ denali_enable_dma(denali, false);
pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE);
}
/* NAND core entry points */
-/* this is the callback that the NAND core calls to write a page. Since
- writing a page with ECC or without is similar, all the work is done
- by write_page above. */
-static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+/* this is the callback that the NAND core calls to write a page. Since
+ * writing a page with ECC or without is similar, all the work is done
+ * by write_page above.
+ * */
+static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf)
{
/* for regular page writes, we let HW handle all the ECC
- * data written to the device. */
+ * data written to the device. */
write_page(mtd, chip, buf, false);
}
-/* This is the callback that the NAND core calls to write a page without ECC.
- raw access is similiar to ECC page writes, so all the work is done in the
- write_page() function above.
+/* This is the callback that the NAND core calls to write a page without ECC.
+ * raw access is similiar to ECC page writes, so all the work is done in the
+ * write_page() function above.
*/
-static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf)
{
- /* for raw page writes, we want to disable ECC and simply write
+ /* for raw page writes, we want to disable ECC and simply write
whatever data is in the buffer. */
write_page(mtd, chip, buf, true);
}
-static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- return write_oob_data(mtd, chip->oob_poi, page);
+ return write_oob_data(mtd, chip->oob_poi, page);
}
-static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page, int sndcmd)
{
read_oob_data(mtd, chip->oob_poi, page);
- return 0; /* notify NAND core to send command to
- * NAND device. */
+ return 0; /* notify NAND core to send command to
+ NAND device. */
}
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1563,10 +1145,17 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
size_t size = denali->mtd.writesize + denali->mtd.oobsize;
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE |
+ uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE |
INTR_STATUS0__ECC_ERR;
bool check_erased_page = false;
+ if (page != denali->page) {
+ dev_err(&denali->dev->dev, "IN %s: page %d is not"
+ " equal to denali->page %d, investigate!!",
+ __func__, page, denali->page);
+ BUG();
+ }
+
setup_ecc_for_xfer(denali, true, false);
denali_enable_dma(denali, true);
@@ -1581,26 +1170,20 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
memcpy(buf, denali->buf.buf, mtd->writesize);
-
- check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status);
+
+ check_erased_page = handle_ecc(denali, buf, irq_status);
denali_enable_dma(denali, false);
- if (check_erased_page)
- {
+ if (check_erased_page) {
read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
/* check ECC failures that may have occurred on erased pages */
- if (check_erased_page)
- {
+ if (check_erased_page) {
if (!is_erased(buf, denali->mtd.writesize))
- {
denali->mtd.ecc_stats.failed++;
- }
if (!is_erased(buf, denali->mtd.oobsize))
- {
denali->mtd.ecc_stats.failed++;
- }
- }
+ }
}
return 0;
}
@@ -1616,7 +1199,14 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t irq_status = 0;
uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP;
-
+
+ if (page != denali->page) {
+ dev_err(&denali->dev->dev, "IN %s: page %d is not"
+ " equal to denali->page %d, investigate!!",
+ __func__, page, denali->page);
+ BUG();
+ }
+
setup_ecc_for_xfer(denali, false, true);
denali_enable_dma(denali, true);
@@ -1644,22 +1234,15 @@ static uint8_t denali_read_byte(struct mtd_info *mtd)
uint8_t result = 0xff;
if (denali->buf.head < denali->buf.tail)
- {
result = denali->buf.buf[denali->buf.head++];
- }
-#if DEBUG_DENALI
- printk("read byte -> 0x%02x\n", result);
-#endif
return result;
}
static void denali_select_chip(struct mtd_info *mtd, int chip)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
-#if DEBUG_DENALI
- printk("denali select chip %d\n", chip);
-#endif
+
spin_lock_irq(&denali->irq_lock);
denali->flash_bank = chip;
spin_unlock_irq(&denali->irq_lock);
@@ -1671,9 +1254,6 @@ static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
int status = denali->status;
denali->status = 0;
-#if DEBUG_DENALI
- printk("waitfunc %d\n", status);
-#endif
return status;
}
@@ -1683,95 +1263,93 @@ static void denali_erase(struct mtd_info *mtd, int page)
uint32_t cmd = 0x0, irq_status = 0;
-#if DEBUG_DENALI
- printk("erase page: %d\n", page);
-#endif
/* clear interrupts */
- clear_interrupts(denali);
+ clear_interrupts(denali);
/* setup page read request for access type */
cmd = MODE_10 | BANK(denali->flash_bank) | page;
index_addr(denali, (uint32_t)cmd, 0x1);
/* wait for erase to complete or failure to occur */
- irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP |
+ irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP |
INTR_STATUS0__ERASE_FAIL);
- denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? NAND_STATUS_FAIL :
- PASS;
+ denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ?
+ NAND_STATUS_FAIL : PASS;
}
-static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
+static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t addr, id;
+ int i;
-#if DEBUG_DENALI
- printk("cmdfunc: 0x%x %d %d\n", cmd, col, page);
-#endif
- switch (cmd)
- {
- case NAND_CMD_PAGEPROG:
- break;
- case NAND_CMD_STATUS:
- read_status(denali);
- break;
- case NAND_CMD_READID:
- reset_buf(denali);
- if (denali->flash_bank < denali->total_used_banks)
- {
- /* write manufacturer information into nand
- buffer for NAND subsystem to fetch.
- */
- write_byte_to_buf(denali, denali->dev_info.wDeviceMaker);
- write_byte_to_buf(denali, denali->dev_info.wDeviceID);
- write_byte_to_buf(denali, denali->dev_info.bDeviceParam0);
- write_byte_to_buf(denali, denali->dev_info.bDeviceParam1);
- write_byte_to_buf(denali, denali->dev_info.bDeviceParam2);
- }
- else
- {
- int i;
- for (i = 0; i < 5; i++)
- write_byte_to_buf(denali, 0xff);
- }
- break;
- case NAND_CMD_READ0:
- case NAND_CMD_SEQIN:
- denali->page = page;
- break;
- case NAND_CMD_RESET:
- reset_bank(denali);
- break;
- case NAND_CMD_READOOB:
- /* TODO: Read OOB data */
- break;
- default:
- printk(KERN_ERR ": unsupported command received 0x%x\n", cmd);
- break;
+ switch (cmd) {
+ case NAND_CMD_PAGEPROG:
+ break;
+ case NAND_CMD_STATUS:
+ read_status(denali);
+ break;
+ case NAND_CMD_READID:
+ reset_buf(denali);
+ /*sometimes ManufactureId read from register is not right
+ * e.g. some of Micron MT29F32G08QAA MLC NAND chips
+ * So here we send READID cmd to NAND insteand
+ * */
+ addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
+ index_addr(denali, (uint32_t)addr | 0, 0x90);
+ index_addr(denali, (uint32_t)addr | 1, 0);
+ for (i = 0; i < 5; i++) {
+ index_addr_read_data(denali,
+ (uint32_t)addr | 2,
+ &id);
+ write_byte_to_buf(denali, id);
+ }
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_SEQIN:
+ denali->page = page;
+ break;
+ case NAND_CMD_RESET:
+ reset_bank(denali);
+ break;
+ case NAND_CMD_READOOB:
+ /* TODO: Read OOB data */
+ break;
+ default:
+ printk(KERN_ERR ": unsupported command"
+ " received 0x%x\n", cmd);
+ break;
}
}
/* stubs for ECC functions not used by the NAND core */
-static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
+static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
uint8_t *ecc_code)
{
- printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n");
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ dev_err(&denali->dev->dev,
+ "denali_ecc_calculate called unexpectedly\n");
BUG();
return -EIO;
}
-static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
+static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
uint8_t *read_ecc, uint8_t *calc_ecc)
{
- printk(KERN_ERR "denali_ecc_correct called unexpectedly\n");
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ dev_err(&denali->dev->dev,
+ "denali_ecc_correct called unexpectedly\n");
BUG();
return -EIO;
}
static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
{
- printk(KERN_ERR "denali_ecc_hwctl called unexpectedly\n");
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ dev_err(&denali->dev->dev,
+ "denali_ecc_hwctl called unexpectedly\n");
BUG();
}
/* end NAND core entry points */
@@ -1779,38 +1357,39 @@ static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
/* Initialization code to bring the device up to a known good state */
static void denali_hw_init(struct denali_nand_info *denali)
{
- denali_irq_init(denali);
- NAND_Flash_Reset(denali);
- denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
- denali_write32(CHIP_EN_DONT_CARE__FLAG, denali->flash_reg + CHIP_ENABLE_DONT_CARE);
-
- denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES);
- denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
+ /* tell driver how many bit controller will skip before
+ * writing ECC code in OOB, this register may be already
+ * set by firmware. So we read this value out.
+ * if this value is 0, just let it be.
+ * */
+ denali->bbtskipbytes = ioread32(denali->flash_reg +
+ SPARE_AREA_SKIP_BYTES);
+ denali_nand_reset(denali);
+ iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
+ iowrite32(CHIP_EN_DONT_CARE__FLAG,
+ denali->flash_reg + CHIP_ENABLE_DONT_CARE);
+
+ iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
/* Should set value for these registers when init */
- denali_write32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
- denali_write32(1, denali->flash_reg + ECC_ENABLE);
+ iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
+ iowrite32(1, denali->flash_reg + ECC_ENABLE);
+ denali_nand_timing_set(denali);
+ denali_irq_init(denali);
}
-/* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */
-#define ECC_BYTES_SLC 4 * (2048 / ECC_SECTOR_SIZE)
-static struct nand_ecclayout nand_oob_slc = {
- .eccbytes = 4,
- .eccpos = { 0, 1, 2, 3 }, /* not used */
- .oobfree = {{
- .offset = ECC_BYTES_SLC,
- .length = 64 - ECC_BYTES_SLC
- }}
+/* Althogh controller spec said SLC ECC is forceb to be 4bit,
+ * but denali controller in MRST only support 15bit and 8bit ECC
+ * correction
+ * */
+#define ECC_8BITS 14
+static struct nand_ecclayout nand_8bit_oob = {
+ .eccbytes = 14,
};
-#define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE)
-static struct nand_ecclayout nand_oob_mlc_14bit = {
- .eccbytes = 14,
- .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */
- .oobfree = {{
- .offset = ECC_BYTES_MLC,
- .length = 64 - ECC_BYTES_MLC
- }}
+#define ECC_15BITS 26
+static struct nand_ecclayout nand_15bit_oob = {
+ .eccbytes = 26,
};
static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
@@ -1842,12 +1421,12 @@ void denali_drv_init(struct denali_nand_info *denali)
denali->idx = 0;
/* setup interrupt handler */
- /* the completion object will be used to notify
+ /* the completion object will be used to notify
* the callee that the interrupt is done */
init_completion(&denali->complete);
/* the spinlock will be used to synchronize the ISR
- * with any element that might be access shared
+ * with any element that might be access shared
* data (interrupt status) */
spin_lock_init(&denali->irq_lock);
@@ -1866,9 +1445,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
unsigned long csr_len, mem_len;
struct denali_nand_info *denali;
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
denali = kzalloc(sizeof(*denali), GFP_KERNEL);
if (!denali)
return -ENOMEM;
@@ -1876,19 +1452,18 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
ret = pci_enable_device(dev);
if (ret) {
printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
- goto failed_enable;
+ goto failed_alloc_memery;
}
if (id->driver_data == INTEL_CE4100) {
- /* Due to a silicon limitation, we can only support
- * ONFI timing mode 1 and below.
- */
- if (onfi_timing_mode < -1 || onfi_timing_mode > 1)
- {
- printk("Intel CE4100 only supports ONFI timing mode 1 "
- "or below\n");
+ /* Due to a silicon limitation, we can only support
+ * ONFI timing mode 1 and below.
+ */
+ if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
+ printk(KERN_ERR "Intel CE4100 only supports"
+ " ONFI timing mode 1 or below\n");
ret = -EINVAL;
- goto failed_enable;
+ goto failed_enable_dev;
}
denali->platform = INTEL_CE4100;
mem_base = pci_resource_start(dev, 0);
@@ -1898,108 +1473,74 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
} else {
denali->platform = INTEL_MRST;
csr_base = pci_resource_start(dev, 0);
- csr_len = pci_resource_start(dev, 0);
+ csr_len = pci_resource_len(dev, 0);
mem_base = pci_resource_start(dev, 1);
mem_len = pci_resource_len(dev, 1);
if (!mem_len) {
mem_base = csr_base + csr_len;
mem_len = csr_len;
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: No second BAR for PCI device; assuming %08Lx\n",
- (uint64_t)csr_base);
}
}
/* Is 32-bit DMA supported? */
ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
- if (ret)
- {
+ if (ret) {
printk(KERN_ERR "Spectra: no usable DMA configuration\n");
- goto failed_enable;
+ goto failed_enable_dev;
}
- denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
-
- if (pci_dma_mapping_error(dev, denali->buf.dma_buf))
- {
- printk(KERN_ERR "Spectra: failed to map DMA buffer\n");
- goto failed_enable;
+ denali->buf.dma_buf =
+ pci_map_single(dev, denali->buf.buf,
+ DENALI_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) {
+ dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
+ goto failed_enable_dev;
}
pci_set_master(dev);
denali->dev = dev;
+ denali->mtd.dev.parent = &dev->dev;
ret = pci_request_regions(dev, DENALI_NAND_NAME);
if (ret) {
printk(KERN_ERR "Spectra: Unable to request memory regions\n");
- goto failed_req_csr;
+ goto failed_dma_map;
}
denali->flash_reg = ioremap_nocache(csr_base, csr_len);
if (!denali->flash_reg) {
printk(KERN_ERR "Spectra: Unable to remap memory region\n");
ret = -ENOMEM;
- goto failed_remap_csr;
+ goto failed_req_regions;
}
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n",
- (uint64_t)csr_base, denali->flash_reg, csr_len);
denali->flash_mem = ioremap_nocache(mem_base, mem_len);
if (!denali->flash_mem) {
printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- iounmap(denali->flash_reg);
ret = -ENOMEM;
- goto failed_remap_csr;
+ goto failed_remap_reg;
}
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: Remapped flash base address: "
- "0x%p, len: %ld\n",
- denali->flash_mem, csr_len);
-
denali_hw_init(denali);
denali_drv_init(denali);
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
+ /* denali_isr register is done after all the hardware
+ * initilization is finished*/
if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
DENALI_NAND_NAME, denali)) {
printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
ret = -ENODEV;
- goto failed_request_irq;
+ goto failed_remap_mem;
}
/* now that our ISR is registered, we can enable interrupts */
- NAND_LLD_Enable_Disable_Interrupts(denali, true);
+ denali_set_intr_modes(denali, true);
pci_set_drvdata(dev, denali);
- NAND_Read_Device_ID(denali);
-
- /* MTD supported page sizes vary by kernel. We validate our
- kernel supports the device here.
- */
- if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
- {
- ret = -ENODEV;
- printk(KERN_ERR "Spectra: device size not supported by this "
- "version of MTD.");
- goto failed_nand;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
- "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
- "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
- "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
- ioread32(denali->flash_reg + ACC_CLKS),
- ioread32(denali->flash_reg + RE_2_WE),
- ioread32(denali->flash_reg + WE_2_RE),
- ioread32(denali->flash_reg + ADDR_2_DATA),
- ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
- ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
- ioread32(denali->flash_reg + CS_SETUP_CNT));
-
- denali->mtd.name = "Denali NAND";
+ denali->mtd.name = "denali-nand";
denali->mtd.owner = THIS_MODULE;
denali->mtd.priv = &denali->nand;
@@ -2009,18 +1550,46 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->nand.read_byte = denali_read_byte;
denali->nand.waitfunc = denali_waitfunc;
- /* scan for NAND devices attached to the controller
+ /* scan for NAND devices attached to the controller
* this is the first stage in a two step process to register
- * with the nand subsystem */
- if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL))
- {
+ * with the nand subsystem */
+ if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) {
ret = -ENXIO;
- goto failed_nand;
+ goto failed_req_irq;
}
-
- /* second stage of the NAND scan
- * this stage requires information regarding ECC and
- * bad block management. */
+
+ /* MTD supported page sizes vary by kernel. We validate our
+ * kernel supports the device here.
+ */
+ if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
+ ret = -ENODEV;
+ printk(KERN_ERR "Spectra: device size not supported by this "
+ "version of MTD.");
+ goto failed_req_irq;
+ }
+
+ /* support for multi nand
+ * MTD known nothing about multi nand,
+ * so we should tell it the real pagesize
+ * and anything necessery
+ */
+ denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ denali->nand.chipsize <<= (denali->devnum - 1);
+ denali->nand.page_shift += (denali->devnum - 1);
+ denali->nand.pagemask = (denali->nand.chipsize >>
+ denali->nand.page_shift) - 1;
+ denali->nand.bbt_erase_shift += (denali->devnum - 1);
+ denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
+ denali->nand.chip_shift += (denali->devnum - 1);
+ denali->mtd.writesize <<= (denali->devnum - 1);
+ denali->mtd.oobsize <<= (denali->devnum - 1);
+ denali->mtd.erasesize <<= (denali->devnum - 1);
+ denali->mtd.size = denali->nand.numchips * denali->nand.chipsize;
+ denali->bbtskipbytes *= denali->devnum;
+
+ /* second stage of the NAND scan
+ * this stage requires information regarding ECC and
+ * bad block management. */
/* Bad block management */
denali->nand.bbt_td = &bbt_main_descr;
@@ -2030,26 +1599,57 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
- if (denali->dev_info.MLCDevice)
- {
- denali->nand.ecc.layout = &nand_oob_mlc_14bit;
- denali->nand.ecc.bytes = ECC_BYTES_MLC;
- }
- else /* SLC */
- {
- denali->nand.ecc.layout = &nand_oob_slc;
- denali->nand.ecc.bytes = ECC_BYTES_SLC;
+ /* Denali Controller only support 15bit and 8bit ECC in MRST,
+ * so just let controller do 15bit ECC for MLC and 8bit ECC for
+ * SLC if possible.
+ * */
+ if (denali->nand.cellinfo & 0xc &&
+ (denali->mtd.oobsize > (denali->bbtskipbytes +
+ ECC_15BITS * (denali->mtd.writesize /
+ ECC_SECTOR_SIZE)))) {
+ /* if MLC OOB size is large enough, use 15bit ECC*/
+ denali->nand.ecc.layout = &nand_15bit_oob;
+ denali->nand.ecc.bytes = ECC_15BITS;
+ iowrite32(15, denali->flash_reg + ECC_CORRECTION);
+ } else if (denali->mtd.oobsize < (denali->bbtskipbytes +
+ ECC_8BITS * (denali->mtd.writesize /
+ ECC_SECTOR_SIZE))) {
+ printk(KERN_ERR "Your NAND chip OOB is not large enough to"
+ " contain 8bit ECC correction codes");
+ goto failed_req_irq;
+ } else {
+ denali->nand.ecc.layout = &nand_8bit_oob;
+ denali->nand.ecc.bytes = ECC_8BITS;
+ iowrite32(8, denali->flash_reg + ECC_CORRECTION);
}
- /* These functions are required by the NAND core framework, otherwise,
- the NAND core will assert. However, we don't need them, so we'll stub
- them out. */
+ denali->nand.ecc.bytes *= denali->devnum;
+ denali->nand.ecc.layout->eccbytes *=
+ denali->mtd.writesize / ECC_SECTOR_SIZE;
+ denali->nand.ecc.layout->oobfree[0].offset =
+ denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes;
+ denali->nand.ecc.layout->oobfree[0].length =
+ denali->mtd.oobsize - denali->nand.ecc.layout->eccbytes -
+ denali->bbtskipbytes;
+
+ /* Let driver know the total blocks number and
+ * how many blocks contained by each nand chip.
+ * blksperchip will help driver to know how many
+ * blocks is taken by FW.
+ * */
+ denali->totalblks = denali->mtd.size >>
+ denali->nand.phys_erase_shift;
+ denali->blksperchip = denali->totalblks / denali->nand.numchips;
+
+ /* These functions are required by the NAND core framework, otherwise,
+ * the NAND core will assert. However, we don't need them, so we'll stub
+ * them out. */
denali->nand.ecc.calculate = denali_ecc_calculate;
denali->nand.ecc.correct = denali_ecc_correct;
denali->nand.ecc.hwctl = denali_ecc_hwctl;
/* override the default read operations */
- denali->nand.ecc.size = denali->mtd.writesize;
+ denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
denali->nand.ecc.read_page = denali_read_page;
denali->nand.ecc.read_page_raw = denali_read_page_raw;
denali->nand.ecc.write_page = denali_write_page;
@@ -2058,30 +1658,33 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->nand.ecc.write_oob = denali_write_oob;
denali->nand.erase_cmd = denali_erase;
- if (nand_scan_tail(&denali->mtd))
- {
+ if (nand_scan_tail(&denali->mtd)) {
ret = -ENXIO;
- goto failed_nand;
+ goto failed_req_irq;
}
ret = add_mtd_device(&denali->mtd);
if (ret) {
- printk(KERN_ERR "Spectra: Failed to register MTD device: %d\n", ret);
- goto failed_nand;
+ dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
+ ret);
+ goto failed_req_irq;
}
return 0;
- failed_nand:
+failed_req_irq:
denali_irq_cleanup(dev->irq, denali);
- failed_request_irq:
- iounmap(denali->flash_reg);
+failed_remap_mem:
iounmap(denali->flash_mem);
- failed_remap_csr:
+failed_remap_reg:
+ iounmap(denali->flash_reg);
+failed_req_regions:
pci_release_regions(dev);
- failed_req_csr:
- pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+failed_dma_map:
+ pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
PCI_DMA_BIDIRECTIONAL);
- failed_enable:
+failed_enable_dev:
+ pci_disable_device(dev);
+failed_alloc_memery:
kfree(denali);
return ret;
}
@@ -2091,9 +1694,6 @@ static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_nand_info *denali = pci_get_drvdata(dev);
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
nand_release(&denali->mtd);
del_mtd_device(&denali->mtd);
@@ -2103,7 +1703,7 @@ static void denali_pci_remove(struct pci_dev *dev)
iounmap(denali->flash_mem);
pci_release_regions(dev);
pci_disable_device(dev);
- pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
PCI_DMA_BIDIRECTIONAL);
pci_set_drvdata(dev, NULL);
kfree(denali);
@@ -2120,7 +1720,8 @@ static struct pci_driver denali_pci_driver = {
static int __devinit denali_init(void)
{
- printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
+ printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n",
+ __DATE__, __TIME__);
return pci_register_driver(&denali_pci_driver);
}
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 422a29ab2f6..3918bcb1561 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -17,7 +17,7 @@
*
*/
-#include <linux/mtd/nand.h>
+#include <linux/mtd/nand.h>
#define DEVICE_RESET 0x0
#define DEVICE_RESET__BANK0 0x0001
@@ -29,7 +29,7 @@
#define TRANSFER_SPARE_REG__FLAG 0x0001
#define LOAD_WAIT_CNT 0x20
-#define LOAD_WAIT_CNT__VALUE 0xffff
+#define LOAD_WAIT_CNT__VALUE 0xffff
#define PROGRAM_WAIT_CNT 0x30
#define PROGRAM_WAIT_CNT__VALUE 0xffff
@@ -83,7 +83,7 @@
#define RE_2_WE 0x120
#define RE_2_WE__VALUE 0x003f
-#define ACC_CLKS 0x130
+#define ACC_CLKS 0x130
#define ACC_CLKS__VALUE 0x000f
#define NUMBER_OF_PLANES 0x140
@@ -140,7 +140,7 @@
#define DEVICES_CONNECTED 0x250
#define DEVICES_CONNECTED__VALUE 0x0007
-#define DIE_MASK 0x260
+#define DIE_MASK 0x260
#define DIE_MASK__VALUE 0x00ff
#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
@@ -152,7 +152,7 @@
#define RE_2_RE 0x290
#define RE_2_RE__VALUE 0x003f
-#define MANUFACTURER_ID 0x300
+#define MANUFACTURER_ID 0x300
#define MANUFACTURER_ID__VALUE 0x00ff
#define DEVICE_ID 0x310
@@ -173,13 +173,13 @@
#define LOGICAL_PAGE_SPARE_SIZE 0x360
#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
-#define REVISION 0x370
+#define REVISION 0x370
#define REVISION__VALUE 0xffff
#define ONFI_DEVICE_FEATURES 0x380
#define ONFI_DEVICE_FEATURES__VALUE 0x003f
-#define ONFI_OPTIONAL_COMMANDS 0x390
+#define ONFI_OPTIONAL_COMMANDS 0x390
#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
#define ONFI_TIMING_MODE 0x3a0
@@ -201,12 +201,12 @@
#define FEATURES 0x3f0
#define FEATURES__N_BANKS 0x0003
#define FEATURES__ECC_MAX_ERR 0x003c
-#define FEATURES__DMA 0x0040
+#define FEATURES__DMA 0x0040
#define FEATURES__CMD_DMA 0x0080
#define FEATURES__PARTITION 0x0100
#define FEATURES__XDMA_SIDEBAND 0x0200
#define FEATURES__GPREG 0x0400
-#define FEATURES__INDEX_ADDR 0x0800
+#define FEATURES__INDEX_ADDR 0x0800
#define TRANSFER_MODE 0x400
#define TRANSFER_MODE__VALUE 0x0003
@@ -235,12 +235,12 @@
#define INTR_EN0__DMA_CMD_COMP 0x0004
#define INTR_EN0__TIME_OUT 0x0008
#define INTR_EN0__PROGRAM_FAIL 0x0010
-#define INTR_EN0__ERASE_FAIL 0x0020
+#define INTR_EN0__ERASE_FAIL 0x0020
#define INTR_EN0__LOAD_COMP 0x0040
#define INTR_EN0__PROGRAM_COMP 0x0080
-#define INTR_EN0__ERASE_COMP 0x0100
+#define INTR_EN0__ERASE_COMP 0x0100
#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN0__LOCKED_BLK 0x0400
+#define INTR_EN0__LOCKED_BLK 0x0400
#define INTR_EN0__UNSUP_CMD 0x0800
#define INTR_EN0__INT_ACT 0x1000
#define INTR_EN0__RST_COMP 0x2000
@@ -253,7 +253,7 @@
#define ERR_PAGE_ADDR0 0x440
#define ERR_PAGE_ADDR0__VALUE 0xffff
-#define ERR_BLOCK_ADDR0 0x450
+#define ERR_BLOCK_ADDR0 0x450
#define ERR_BLOCK_ADDR0__VALUE 0xffff
#define INTR_STATUS1 0x460
@@ -280,12 +280,12 @@
#define INTR_EN1__DMA_CMD_COMP 0x0004
#define INTR_EN1__TIME_OUT 0x0008
#define INTR_EN1__PROGRAM_FAIL 0x0010
-#define INTR_EN1__ERASE_FAIL 0x0020
+#define INTR_EN1__ERASE_FAIL 0x0020
#define INTR_EN1__LOAD_COMP 0x0040
#define INTR_EN1__PROGRAM_COMP 0x0080
-#define INTR_EN1__ERASE_COMP 0x0100
+#define INTR_EN1__ERASE_COMP 0x0100
#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN1__LOCKED_BLK 0x0400
+#define INTR_EN1__LOCKED_BLK 0x0400
#define INTR_EN1__UNSUP_CMD 0x0800
#define INTR_EN1__INT_ACT 0x1000
#define INTR_EN1__RST_COMP 0x2000
@@ -298,7 +298,7 @@
#define ERR_PAGE_ADDR1 0x490
#define ERR_PAGE_ADDR1__VALUE 0xffff
-#define ERR_BLOCK_ADDR1 0x4a0
+#define ERR_BLOCK_ADDR1 0x4a0
#define ERR_BLOCK_ADDR1__VALUE 0xffff
#define INTR_STATUS2 0x4b0
@@ -325,12 +325,12 @@
#define INTR_EN2__DMA_CMD_COMP 0x0004
#define INTR_EN2__TIME_OUT 0x0008
#define INTR_EN2__PROGRAM_FAIL 0x0010
-#define INTR_EN2__ERASE_FAIL 0x0020
+#define INTR_EN2__ERASE_FAIL 0x0020
#define INTR_EN2__LOAD_COMP 0x0040
#define INTR_EN2__PROGRAM_COMP 0x0080
-#define INTR_EN2__ERASE_COMP 0x0100
+#define INTR_EN2__ERASE_COMP 0x0100
#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN2__LOCKED_BLK 0x0400
+#define INTR_EN2__LOCKED_BLK 0x0400
#define INTR_EN2__UNSUP_CMD 0x0800
#define INTR_EN2__INT_ACT 0x1000
#define INTR_EN2__RST_COMP 0x2000
@@ -343,7 +343,7 @@
#define ERR_PAGE_ADDR2 0x4e0
#define ERR_PAGE_ADDR2__VALUE 0xffff
-#define ERR_BLOCK_ADDR2 0x4f0
+#define ERR_BLOCK_ADDR2 0x4f0
#define ERR_BLOCK_ADDR2__VALUE 0xffff
#define INTR_STATUS3 0x500
@@ -370,12 +370,12 @@
#define INTR_EN3__DMA_CMD_COMP 0x0004
#define INTR_EN3__TIME_OUT 0x0008
#define INTR_EN3__PROGRAM_FAIL 0x0010
-#define INTR_EN3__ERASE_FAIL 0x0020
+#define INTR_EN3__ERASE_FAIL 0x0020
#define INTR_EN3__LOAD_COMP 0x0040
#define INTR_EN3__PROGRAM_COMP 0x0080
-#define INTR_EN3__ERASE_COMP 0x0100
+#define INTR_EN3__ERASE_COMP 0x0100
#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN3__LOCKED_BLK 0x0400
+#define INTR_EN3__LOCKED_BLK 0x0400
#define INTR_EN3__UNSUP_CMD 0x0800
#define INTR_EN3__INT_ACT 0x1000
#define INTR_EN3__RST_COMP 0x2000
@@ -388,7 +388,7 @@
#define ERR_PAGE_ADDR3 0x530
#define ERR_PAGE_ADDR3__VALUE 0xffff
-#define ERR_BLOCK_ADDR3 0x540
+#define ERR_BLOCK_ADDR3 0x540
#define ERR_BLOCK_ADDR3__VALUE 0xffff
#define DATA_INTR 0x550
@@ -412,9 +412,9 @@
#define GPREG_3__VALUE 0xffff
#define ECC_THRESHOLD 0x600
-#define ECC_THRESHOLD__VALUE 0x03ff
+#define ECC_THRESHOLD__VALUE 0x03ff
-#define ECC_ERROR_BLOCK_ADDRESS 0x610
+#define ECC_ERROR_BLOCK_ADDRESS 0x610
#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
#define ECC_ERROR_PAGE_ADDRESS 0x620
@@ -466,7 +466,7 @@
#define CHNL_ACTIVE__CHANNEL3 0x0008
#define ACTIVE_SRC_ID 0x800
-#define ACTIVE_SRC_ID__VALUE 0x00ff
+#define ACTIVE_SRC_ID__VALUE 0x00ff
#define PTN_INTR 0x810
#define PTN_INTR__CONFIG_ERROR 0x0001
@@ -485,7 +485,7 @@
#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
#define PERM_SRC_ID_0 0x830
-#define PERM_SRC_ID_0__SRCID 0x00ff
+#define PERM_SRC_ID_0__SRCID 0x00ff
#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
@@ -502,7 +502,7 @@
#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
#define PERM_SRC_ID_1 0x870
-#define PERM_SRC_ID_1__SRCID 0x00ff
+#define PERM_SRC_ID_1__SRCID 0x00ff
#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
@@ -519,7 +519,7 @@
#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
#define PERM_SRC_ID_2 0x8b0
-#define PERM_SRC_ID_2__SRCID 0x00ff
+#define PERM_SRC_ID_2__SRCID 0x00ff
#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
@@ -536,7 +536,7 @@
#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
#define PERM_SRC_ID_3 0x8f0
-#define PERM_SRC_ID_3__SRCID 0x00ff
+#define PERM_SRC_ID_3__SRCID 0x00ff
#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
@@ -553,7 +553,7 @@
#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
#define PERM_SRC_ID_4 0x930
-#define PERM_SRC_ID_4__SRCID 0x00ff
+#define PERM_SRC_ID_4__SRCID 0x00ff
#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
@@ -570,7 +570,7 @@
#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
#define PERM_SRC_ID_5 0x970
-#define PERM_SRC_ID_5__SRCID 0x00ff
+#define PERM_SRC_ID_5__SRCID 0x00ff
#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
@@ -587,7 +587,7 @@
#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
#define PERM_SRC_ID_6 0x9b0
-#define PERM_SRC_ID_6__SRCID 0x00ff
+#define PERM_SRC_ID_6__SRCID 0x00ff
#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
@@ -604,7 +604,7 @@
#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
#define PERM_SRC_ID_7 0x9f0
-#define PERM_SRC_ID_7__SRCID 0x00ff
+#define PERM_SRC_ID_7__SRCID 0x00ff
#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
@@ -620,47 +620,6 @@
#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
-/* flash.h */
-struct device_info_tag {
- uint16_t wDeviceMaker;
- uint16_t wDeviceID;
- uint8_t bDeviceParam0;
- uint8_t bDeviceParam1;
- uint8_t bDeviceParam2;
- uint32_t wDeviceType;
- uint32_t wSpectraStartBlock;
- uint32_t wSpectraEndBlock;
- uint32_t wTotalBlocks;
- uint16_t wPagesPerBlock;
- uint16_t wPageSize;
- uint16_t wPageDataSize;
- uint16_t wPageSpareSize;
- uint16_t wNumPageSpareFlag;
- uint16_t wECCBytesPerSector;
- uint32_t wBlockSize;
- uint32_t wBlockDataSize;
- uint32_t wDataBlockNum;
- uint8_t bPlaneNum;
- uint16_t wDeviceMainAreaSize;
- uint16_t wDeviceSpareAreaSize;
- uint16_t wDevicesConnected;
- uint16_t wDeviceWidth;
- uint16_t wHWRevision;
- uint16_t wHWFeatures;
-
- uint16_t wONFIDevFeatures;
- uint16_t wONFIOptCommands;
- uint16_t wONFITimingMode;
- uint16_t wONFIPgmCacheTimingMode;
-
- uint16_t MLCDevice;
- uint16_t wSpareSkipBytes;
-
- uint8_t nBitsInPageNumber;
- uint8_t nBitsInPageDataSize;
- uint8_t nBitsInBlockDataSize;
-};
-
/* ffsdefs.h */
#define CLEAR 0 /*use this to clear a field instead of "fail"*/
#define SET 1 /*use this to set a field instead of "pass"*/
@@ -676,24 +635,6 @@ struct device_info_tag {
#define CLK_X 5
#define CLK_MULTI 4
-/* ffsport.h */
-#define VERBOSE 1
-
-#define NAND_DBG_WARN 1
-#define NAND_DBG_DEBUG 2
-#define NAND_DBG_TRACE 3
-
-#ifdef VERBOSE
-#define nand_dbg_print(level, args...) \
- do { \
- if (level <= nand_debug_level) \
- printk(KERN_ALERT args); \
- } while (0)
-#else
-#define nand_dbg_print(level, args...)
-#endif
-
-
/* spectraswconfig.h */
#define CMD_DMA 0
@@ -772,10 +713,9 @@ struct device_info_tag {
#define ECC_SECTOR_SIZE 512
#define LLD_MAX_FLASH_BANKS 4
-#define DENALI_BUF_SIZE NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE
+#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
-struct nand_buf
-{
+struct nand_buf {
int head;
int tail;
uint8_t buf[DENALI_BUF_SIZE];
@@ -788,7 +728,6 @@ struct nand_buf
struct denali_nand_info {
struct mtd_info mtd;
struct nand_chip nand;
- struct device_info_tag dev_info;
int flash_bank; /* currently selected chip */
int status;
int platform;
@@ -806,11 +745,12 @@ struct denali_nand_info {
uint32_t irq_status;
int irq_debug_array[32];
int idx;
-};
-static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali);
-static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali);
-static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, uint16_t INT_ENABLE);
+ uint32_t devnum; /* represent how many nands connected */
+ uint32_t fwblks; /* represent how many blocks FW used */
+ uint32_t totalblks;
+ uint32_t blksperchip;
+ uint32_t bbtskipbytes;
+};
#endif /*_LLD_NAND_*/
-
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 47067bc9824..b7f8de7b278 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -29,7 +29,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/doc2000.h>
-#include <linux/mtd/compatmac.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/inftl.h>
@@ -146,6 +145,7 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
uint8_t parity;
uint16_t ds[4], s[5], tmp, errval[8], syn[4];
+ memset(syn, 0, sizeof(syn));
/* Convert the ecc bytes into words */
ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8);
ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6);
@@ -169,9 +169,9 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)];
}
- /* Calc s[i] = s[i] / alpha^(v + i) */
+ /* Calc syn[i] = s[i] / alpha^(v + i) */
for (i = 0; i < NROOTS; i++) {
- if (syn[i])
+ if (s[i])
syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i));
}
/* Call the decoder library */
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 5084cc51794..80de0bff6c3 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -958,7 +958,7 @@ static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl)
return 0;
}
-static int fsl_elbc_ctrl_remove(struct of_device *ofdev)
+static int fsl_elbc_ctrl_remove(struct platform_device *ofdev)
{
struct fsl_elbc_ctrl *ctrl = dev_get_drvdata(&ofdev->dev);
int i;
@@ -1013,7 +1013,7 @@ static irqreturn_t fsl_elbc_ctrl_irq(int irqno, void *data)
* in the chip probe function.
*/
-static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev,
+static int __devinit fsl_elbc_ctrl_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *child;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 1312eda57ba..4eff8b25e5a 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -217,7 +217,7 @@ err:
return ret;
}
-static int __devinit fun_probe(struct of_device *ofdev,
+static int __devinit fun_probe(struct platform_device *ofdev,
const struct of_device_id *ofid)
{
struct fsl_upm_nand *fun;
@@ -335,7 +335,7 @@ err1:
return ret;
}
-static int __devexit fun_remove(struct of_device *ofdev)
+static int __devexit fun_remove(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
int i;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
new file mode 100644
index 00000000000..67343fc31bd
--- /dev/null
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * JZ4740 SoC NAND controller driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/gpio.h>
+
+#include <asm/mach-jz4740/jz4740_nand.h>
+
+#define JZ_REG_NAND_CTRL 0x50
+#define JZ_REG_NAND_ECC_CTRL 0x100
+#define JZ_REG_NAND_DATA 0x104
+#define JZ_REG_NAND_PAR0 0x108
+#define JZ_REG_NAND_PAR1 0x10C
+#define JZ_REG_NAND_PAR2 0x110
+#define JZ_REG_NAND_IRQ_STAT 0x114
+#define JZ_REG_NAND_IRQ_CTRL 0x118
+#define JZ_REG_NAND_ERR(x) (0x11C + ((x) << 2))
+
+#define JZ_NAND_ECC_CTRL_PAR_READY BIT(4)
+#define JZ_NAND_ECC_CTRL_ENCODING BIT(3)
+#define JZ_NAND_ECC_CTRL_RS BIT(2)
+#define JZ_NAND_ECC_CTRL_RESET BIT(1)
+#define JZ_NAND_ECC_CTRL_ENABLE BIT(0)
+
+#define JZ_NAND_STATUS_ERR_COUNT (BIT(31) | BIT(30) | BIT(29))
+#define JZ_NAND_STATUS_PAD_FINISH BIT(4)
+#define JZ_NAND_STATUS_DEC_FINISH BIT(3)
+#define JZ_NAND_STATUS_ENC_FINISH BIT(2)
+#define JZ_NAND_STATUS_UNCOR_ERROR BIT(1)
+#define JZ_NAND_STATUS_ERROR BIT(0)
+
+#define JZ_NAND_CTRL_ENABLE_CHIP(x) BIT((x) << 1)
+#define JZ_NAND_CTRL_ASSERT_CHIP(x) BIT(((x) << 1) + 1)
+
+#define JZ_NAND_MEM_ADDR_OFFSET 0x10000
+#define JZ_NAND_MEM_CMD_OFFSET 0x08000
+
+struct jz_nand {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ void __iomem *base;
+ struct resource *mem;
+
+ void __iomem *bank_base;
+ struct resource *bank_mem;
+
+ struct jz_nand_platform_data *pdata;
+ bool is_reading;
+};
+
+static inline struct jz_nand *mtd_to_jz_nand(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct jz_nand, mtd);
+}
+
+static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ struct nand_chip *chip = mtd->priv;
+ uint32_t reg;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ BUG_ON((ctrl & NAND_ALE) && (ctrl & NAND_CLE));
+ if (ctrl & NAND_ALE)
+ chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_ADDR_OFFSET;
+ else if (ctrl & NAND_CLE)
+ chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_CMD_OFFSET;
+ else
+ chip->IO_ADDR_W = nand->bank_base;
+
+ reg = readl(nand->base + JZ_REG_NAND_CTRL);
+ if (ctrl & NAND_NCE)
+ reg |= JZ_NAND_CTRL_ASSERT_CHIP(0);
+ else
+ reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(0);
+ writel(reg, nand->base + JZ_REG_NAND_CTRL);
+ }
+ if (dat != NAND_CMD_NONE)
+ writeb(dat, chip->IO_ADDR_W);
+}
+
+static int jz_nand_dev_ready(struct mtd_info *mtd)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ return gpio_get_value_cansleep(nand->pdata->busy_gpio);
+}
+
+static void jz_nand_hwctl(struct mtd_info *mtd, int mode)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ uint32_t reg;
+
+ writel(0, nand->base + JZ_REG_NAND_IRQ_STAT);
+ reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+
+ reg |= JZ_NAND_ECC_CTRL_RESET;
+ reg |= JZ_NAND_ECC_CTRL_ENABLE;
+ reg |= JZ_NAND_ECC_CTRL_RS;
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ reg &= ~JZ_NAND_ECC_CTRL_ENCODING;
+ nand->is_reading = true;
+ break;
+ case NAND_ECC_WRITE:
+ reg |= JZ_NAND_ECC_CTRL_ENCODING;
+ nand->is_reading = false;
+ break;
+ default:
+ break;
+ }
+
+ writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+}
+
+static int jz_nand_calculate_ecc_rs(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ uint32_t reg, status;
+ int i;
+ unsigned int timeout = 1000;
+ static uint8_t empty_block_ecc[] = {0xcd, 0x9d, 0x90, 0x58, 0xf4,
+ 0x8b, 0xff, 0xb7, 0x6f};
+
+ if (nand->is_reading)
+ return 0;
+
+ do {
+ status = readl(nand->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -1;
+
+ reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+
+ for (i = 0; i < 9; ++i)
+ ecc_code[i] = readb(nand->base + JZ_REG_NAND_PAR0 + i);
+
+ /* If the written data is completly 0xff, we also want to write 0xff as
+ * ecc, otherwise we will get in trouble when doing subpage writes. */
+ if (memcmp(ecc_code, empty_block_ecc, 9) == 0)
+ memset(ecc_code, 0xff, 9);
+
+ return 0;
+}
+
+static void jz_nand_correct_data(uint8_t *dat, int index, int mask)
+{
+ int offset = index & 0x7;
+ uint16_t data;
+
+ index += (index >> 3);
+
+ data = dat[index];
+ data |= dat[index+1] << 8;
+
+ mask ^= (data >> offset) & 0x1ff;
+ data &= ~(0x1ff << offset);
+ data |= (mask << offset);
+
+ dat[index] = data & 0xff;
+ dat[index+1] = (data >> 8) & 0xff;
+}
+
+static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ int i, error_count, index;
+ uint32_t reg, status, error;
+ uint32_t t;
+ unsigned int timeout = 1000;
+
+ t = read_ecc[0];
+
+ if (t == 0xff) {
+ for (i = 1; i < 9; ++i)
+ t &= read_ecc[i];
+
+ t &= dat[0];
+ t &= dat[nand->chip.ecc.size / 2];
+ t &= dat[nand->chip.ecc.size - 1];
+
+ if (t == 0xff) {
+ for (i = 1; i < nand->chip.ecc.size - 1; ++i)
+ t &= dat[i];
+ if (t == 0xff)
+ return 0;
+ }
+ }
+
+ for (i = 0; i < 9; ++i)
+ writeb(read_ecc[i], nand->base + JZ_REG_NAND_PAR0 + i);
+
+ reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+ reg |= JZ_NAND_ECC_CTRL_PAR_READY;
+ writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+
+ do {
+ status = readl(nand->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -1;
+
+ reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+
+ if (status & JZ_NAND_STATUS_ERROR) {
+ if (status & JZ_NAND_STATUS_UNCOR_ERROR)
+ return -1;
+
+ error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29;
+
+ for (i = 0; i < error_count; ++i) {
+ error = readl(nand->base + JZ_REG_NAND_ERR(i));
+ index = ((error >> 16) & 0x1ff) - 1;
+ if (index >= 0 && index < 512)
+ jz_nand_correct_data(dat, index, error & 0x1ff);
+ }
+
+ return error_count;
+ }
+
+ return 0;
+}
+
+
+/* Copy paste of nand_read_page_hwecc_oob_first except for different eccpos
+ * handling. The ecc area is for 4k chips 72 bytes long and thus does not fit
+ * into the eccpos array. */
+static int jz_nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ unsigned int ecc_offset = chip->page_shift;
+
+ /* Read the OOB area first */
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+
+ stat = chip->ecc.correct(mtd, p, &chip->oob_poi[i], NULL);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+
+/* Copy-and-paste of nand_write_page_hwecc with different eccpos handling. */
+static void jz_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *p = buf;
+ unsigned int ecc_offset = chip->page_shift;
+
+ for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->write_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &chip->oob_poi[i]);
+ }
+
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+static const char *part_probes[] = {"cmdline", NULL};
+#endif
+
+static int jz_nand_ioremap_resource(struct platform_device *pdev,
+ const char *name, struct resource **res, void __iomem **base)
+{
+ int ret;
+
+ *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!*res) {
+ dev_err(&pdev->dev, "Failed to get platform %s memory\n", name);
+ ret = -ENXIO;
+ goto err;
+ }
+
+ *res = request_mem_region((*res)->start, resource_size(*res),
+ pdev->name);
+ if (!*res) {
+ dev_err(&pdev->dev, "Failed to request %s memory region\n", name);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ *base = ioremap((*res)->start, resource_size(*res));
+ if (!*base) {
+ dev_err(&pdev->dev, "Failed to ioremap %s memory region\n", name);
+ ret = -EBUSY;
+ goto err_release_mem;
+ }
+
+ return 0;
+
+err_release_mem:
+ release_mem_region((*res)->start, resource_size(*res));
+err:
+ *res = NULL;
+ *base = NULL;
+ return ret;
+}
+
+static int __devinit jz_nand_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct jz_nand *nand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *partition_info;
+ int num_partitions = 0;
+#endif
+
+ nand = kzalloc(sizeof(*nand), GFP_KERNEL);
+ if (!nand) {
+ dev_err(&pdev->dev, "Failed to allocate device structure.\n");
+ return -ENOMEM;
+ }
+
+ ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base);
+ if (ret)
+ goto err_free;
+ ret = jz_nand_ioremap_resource(pdev, "bank", &nand->bank_mem,
+ &nand->bank_base);
+ if (ret)
+ goto err_iounmap_mmio;
+
+ if (pdata && gpio_is_valid(pdata->busy_gpio)) {
+ ret = gpio_request(pdata->busy_gpio, "NAND busy pin");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to request busy gpio %d: %d\n",
+ pdata->busy_gpio, ret);
+ goto err_iounmap_mem;
+ }
+ }
+
+ mtd = &nand->mtd;
+ chip = &nand->chip;
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+ mtd->name = "jz4740-nand";
+
+ chip->ecc.hwctl = jz_nand_hwctl;
+ chip->ecc.calculate = jz_nand_calculate_ecc_rs;
+ chip->ecc.correct = jz_nand_correct_ecc_rs;
+ chip->ecc.mode = NAND_ECC_HW_OOB_FIRST;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 9;
+
+ chip->ecc.read_page = jz_nand_read_page_hwecc_oob_first;
+ chip->ecc.write_page = jz_nand_write_page_hwecc;
+
+ if (pdata)
+ chip->ecc.layout = pdata->ecc_layout;
+
+ chip->chip_delay = 50;
+ chip->cmd_ctrl = jz_nand_cmd_ctrl;
+
+ if (pdata && gpio_is_valid(pdata->busy_gpio))
+ chip->dev_ready = jz_nand_dev_ready;
+
+ chip->IO_ADDR_R = nand->bank_base;
+ chip->IO_ADDR_W = nand->bank_base;
+
+ nand->pdata = pdata;
+ platform_set_drvdata(pdev, nand);
+
+ writel(JZ_NAND_CTRL_ENABLE_CHIP(0), nand->base + JZ_REG_NAND_CTRL);
+
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to scan nand\n");
+ goto err_gpio_free;
+ }
+
+ if (pdata && pdata->ident_callback) {
+ pdata->ident_callback(pdev, chip, &pdata->partitions,
+ &pdata->num_partitions);
+ }
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to scan nand\n");
+ goto err_gpio_free;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ num_partitions = parse_mtd_partitions(mtd, part_probes,
+ &partition_info, 0);
+#endif
+ if (num_partitions <= 0 && pdata) {
+ num_partitions = pdata->num_partitions;
+ partition_info = pdata->partitions;
+ }
+
+ if (num_partitions > 0)
+ ret = add_mtd_partitions(mtd, partition_info, num_partitions);
+ else
+#endif
+ ret = add_mtd_device(mtd);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add mtd device\n");
+ goto err_nand_release;
+ }
+
+ dev_info(&pdev->dev, "Successfully registered JZ4740 NAND driver\n");
+
+ return 0;
+
+err_nand_release:
+ nand_release(&nand->mtd);
+err_gpio_free:
+ platform_set_drvdata(pdev, NULL);
+ gpio_free(pdata->busy_gpio);
+err_iounmap_mem:
+ iounmap(nand->bank_base);
+err_iounmap_mmio:
+ iounmap(nand->base);
+err_free:
+ kfree(nand);
+ return ret;
+}
+
+static int __devexit jz_nand_remove(struct platform_device *pdev)
+{
+ struct jz_nand *nand = platform_get_drvdata(pdev);
+
+ nand_release(&nand->mtd);
+
+ /* Deassert and disable all chips */
+ writel(0, nand->base + JZ_REG_NAND_CTRL);
+
+ iounmap(nand->bank_base);
+ release_mem_region(nand->bank_mem->start, resource_size(nand->bank_mem));
+ iounmap(nand->base);
+ release_mem_region(nand->mem->start, resource_size(nand->mem));
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(nand);
+
+ return 0;
+}
+
+struct platform_driver jz_nand_driver = {
+ .probe = jz_nand_probe,
+ .remove = __devexit_p(jz_nand_remove),
+ .driver = {
+ .name = "jz4740-nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init jz_nand_init(void)
+{
+ return platform_driver_register(&jz_nand_driver);
+}
+module_init(jz_nand_init);
+
+static void __exit jz_nand_exit(void)
+{
+ platform_driver_unregister(&jz_nand_driver);
+}
+module_exit(jz_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("NAND controller driver for JZ4740 SoC");
+MODULE_ALIAS("platform:jz4740-nand");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 0a130dcaa12..df0c1da4ff4 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -647,7 +647,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
iounmap(prv->csreg);
}
-static int __devinit mpc5121_nfc_probe(struct of_device *op,
+static int __devinit mpc5121_nfc_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device_node *rootnode, *dn = op->dev.of_node;
@@ -869,7 +869,7 @@ error:
return retval;
}
-static int __devexit mpc5121_nfc_remove(struct of_device *op)
+static int __devexit mpc5121_nfc_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct mtd_info *mtd = dev_get_drvdata(dev);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 0d76b169482..b2828e84d24 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -39,60 +39,98 @@
#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
+#define nfc_is_v3_2() cpu_is_mx51()
+#define nfc_is_v3() nfc_is_v3_2()
/* Addresses for NFC registers */
-#define NFC_BUF_SIZE 0xE00
-#define NFC_BUF_ADDR 0xE04
-#define NFC_FLASH_ADDR 0xE06
-#define NFC_FLASH_CMD 0xE08
-#define NFC_CONFIG 0xE0A
-#define NFC_ECC_STATUS_RESULT 0xE0C
-#define NFC_RSLTMAIN_AREA 0xE0E
-#define NFC_RSLTSPARE_AREA 0xE10
-#define NFC_WRPROT 0xE12
-#define NFC_V1_UNLOCKSTART_BLKADDR 0xe14
-#define NFC_V1_UNLOCKEND_BLKADDR 0xe16
-#define NFC_V21_UNLOCKSTART_BLKADDR 0xe20
-#define NFC_V21_UNLOCKEND_BLKADDR 0xe22
-#define NFC_NF_WRPRST 0xE18
-#define NFC_CONFIG1 0xE1A
-#define NFC_CONFIG2 0xE1C
-
-/* Set INT to 0, FCMD to 1, rest to 0 in NFC_CONFIG2 Register
- * for Command operation */
-#define NFC_CMD 0x1
-
-/* Set INT to 0, FADD to 1, rest to 0 in NFC_CONFIG2 Register
- * for Address operation */
-#define NFC_ADDR 0x2
-
-/* Set INT to 0, FDI to 1, rest to 0 in NFC_CONFIG2 Register
- * for Input operation */
-#define NFC_INPUT 0x4
-
-/* Set INT to 0, FDO to 001, rest to 0 in NFC_CONFIG2 Register
- * for Data Output operation */
-#define NFC_OUTPUT 0x8
-
-/* Set INT to 0, FD0 to 010, rest to 0 in NFC_CONFIG2 Register
- * for Read ID operation */
-#define NFC_ID 0x10
-
-/* Set INT to 0, FDO to 100, rest to 0 in NFC_CONFIG2 Register
- * for Read Status operation */
-#define NFC_STATUS 0x20
-
-/* Set INT to 1, rest to 0 in NFC_CONFIG2 Register for Read
- * Status operation */
-#define NFC_INT 0x8000
-
-#define NFC_SP_EN (1 << 2)
-#define NFC_ECC_EN (1 << 3)
-#define NFC_INT_MSK (1 << 4)
-#define NFC_BIG (1 << 5)
-#define NFC_RST (1 << 6)
-#define NFC_CE (1 << 7)
-#define NFC_ONE_CYCLE (1 << 8)
+#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00)
+#define NFC_V1_V2_BUF_ADDR (host->regs + 0x04)
+#define NFC_V1_V2_FLASH_ADDR (host->regs + 0x06)
+#define NFC_V1_V2_FLASH_CMD (host->regs + 0x08)
+#define NFC_V1_V2_CONFIG (host->regs + 0x0a)
+#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
+#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
+#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10)
+#define NFC_V1_V2_WRPROT (host->regs + 0x12)
+#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
+#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
+#define NFC_V21_UNLOCKSTART_BLKADDR (host->regs + 0x20)
+#define NFC_V21_UNLOCKEND_BLKADDR (host->regs + 0x22)
+#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18)
+#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a)
+#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c)
+
+#define NFC_V2_CONFIG1_ECC_MODE_4 (1 << 0)
+#define NFC_V1_V2_CONFIG1_SP_EN (1 << 2)
+#define NFC_V1_V2_CONFIG1_ECC_EN (1 << 3)
+#define NFC_V1_V2_CONFIG1_INT_MSK (1 << 4)
+#define NFC_V1_V2_CONFIG1_BIG (1 << 5)
+#define NFC_V1_V2_CONFIG1_RST (1 << 6)
+#define NFC_V1_V2_CONFIG1_CE (1 << 7)
+#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8)
+#define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9)
+#define NFC_V2_CONFIG1_FP_INT (1 << 11)
+
+#define NFC_V1_V2_CONFIG2_INT (1 << 15)
+
+/*
+ * Operation modes for the NFC. Valid for v1, v2 and v3
+ * type controllers.
+ */
+#define NFC_CMD (1 << 0)
+#define NFC_ADDR (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+
+#define NFC_V3_FLASH_CMD (host->regs_axi + 0x00)
+#define NFC_V3_FLASH_ADDR0 (host->regs_axi + 0x04)
+
+#define NFC_V3_CONFIG1 (host->regs_axi + 0x34)
+#define NFC_V3_CONFIG1_SP_EN (1 << 0)
+#define NFC_V3_CONFIG1_RBA(x) (((x) & 0x7 ) << 4)
+
+#define NFC_V3_ECC_STATUS_RESULT (host->regs_axi + 0x38)
+
+#define NFC_V3_LAUNCH (host->regs_axi + 0x40)
+
+#define NFC_V3_WRPROT (host->regs_ip + 0x0)
+#define NFC_V3_WRPROT_LOCK_TIGHT (1 << 0)
+#define NFC_V3_WRPROT_LOCK (1 << 1)
+#define NFC_V3_WRPROT_UNLOCK (1 << 2)
+#define NFC_V3_WRPROT_BLS_UNLOCK (2 << 6)
+
+#define NFC_V3_WRPROT_UNLOCK_BLK_ADD0 (host->regs_ip + 0x04)
+
+#define NFC_V3_CONFIG2 (host->regs_ip + 0x24)
+#define NFC_V3_CONFIG2_PS_512 (0 << 0)
+#define NFC_V3_CONFIG2_PS_2048 (1 << 0)
+#define NFC_V3_CONFIG2_PS_4096 (2 << 0)
+#define NFC_V3_CONFIG2_ONE_CYCLE (1 << 2)
+#define NFC_V3_CONFIG2_ECC_EN (1 << 3)
+#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4)
+#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5)
+#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6)
+#define NFC_V3_CONFIG2_PPB(x) (((x) & 0x3) << 7)
+#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12)
+#define NFC_V3_CONFIG2_INT_MSK (1 << 15)
+#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24)
+#define NFC_V3_CONFIG2_SPAS(x) (((x) & 0xff) << 16)
+
+#define NFC_V3_CONFIG3 (host->regs_ip + 0x28)
+#define NFC_V3_CONFIG3_ADD_OP(x) (((x) & 0x3) << 0)
+#define NFC_V3_CONFIG3_FW8 (1 << 3)
+#define NFC_V3_CONFIG3_SBB(x) (((x) & 0x7) << 8)
+#define NFC_V3_CONFIG3_NUM_OF_DEVICES(x) (((x) & 0x7) << 12)
+#define NFC_V3_CONFIG3_RBB_MODE (1 << 15)
+#define NFC_V3_CONFIG3_NO_SDMA (1 << 20)
+
+#define NFC_V3_IPC (host->regs_ip + 0x2C)
+#define NFC_V3_IPC_CREQ (1 << 0)
+#define NFC_V3_IPC_INT (1 << 31)
+
+#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34)
struct mxc_nand_host {
struct mtd_info mtd;
@@ -102,20 +140,30 @@ struct mxc_nand_host {
void *spare0;
void *main_area0;
- void *main_area1;
void __iomem *base;
void __iomem *regs;
+ void __iomem *regs_axi;
+ void __iomem *regs_ip;
int status_request;
struct clk *clk;
int clk_act;
int irq;
+ int eccsize;
wait_queue_head_t irq_waitq;
uint8_t *data_buf;
unsigned int buf_start;
int spare_len;
+
+ void (*preset)(struct mtd_info *);
+ void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_page)(struct mtd_info *, unsigned int);
+ void (*send_read_id)(struct mxc_nand_host *);
+ uint16_t (*get_dev_status)(struct mxc_nand_host *);
+ int (*check_int)(struct mxc_nand_host *);
};
/* OOB placement block for use with hardware ecc generation */
@@ -175,34 +223,52 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int check_int_v3(struct mxc_nand_host *host)
+{
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_IPC);
+ if (!(tmp & NFC_V3_IPC_INT))
+ return 0;
+
+ tmp &= ~NFC_V3_IPC_INT;
+ writel(tmp, NFC_V3_IPC);
+
+ return 1;
+}
+
+static int check_int_v1_v2(struct mxc_nand_host *host)
+{
+ uint32_t tmp;
+
+ tmp = readw(NFC_V1_V2_CONFIG2);
+ if (!(tmp & NFC_V1_V2_CONFIG2_INT))
+ return 0;
+
+ writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
+
+ return 1;
+}
+
/* This function polls the NANDFC to wait for the basic operation to
* complete by checking the INT bit of config2 register.
*/
static void wait_op_done(struct mxc_nand_host *host, int useirq)
{
- uint16_t tmp;
int max_retries = 8000;
if (useirq) {
- if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) {
+ if (!host->check_int(host)) {
enable_irq(host->irq);
- wait_event(host->irq_waitq,
- readw(host->regs + NFC_CONFIG2) & NFC_INT);
-
- tmp = readw(host->regs + NFC_CONFIG2);
- tmp &= ~NFC_INT;
- writew(tmp, host->regs + NFC_CONFIG2);
+ wait_event(host->irq_waitq, host->check_int(host));
}
} else {
while (max_retries-- > 0) {
- if (readw(host->regs + NFC_CONFIG2) & NFC_INT) {
- tmp = readw(host->regs + NFC_CONFIG2);
- tmp &= ~NFC_INT;
- writew(tmp, host->regs + NFC_CONFIG2);
+ if (host->check_int(host))
break;
- }
+
udelay(1);
}
if (max_retries < 0)
@@ -211,21 +277,33 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
}
}
+static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
+{
+ /* fill command */
+ writel(cmd, NFC_V3_FLASH_CMD);
+
+ /* send out command */
+ writel(NFC_CMD, NFC_V3_LAUNCH);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, useirq);
+}
+
/* This function issues the specified command to the NAND device and
* waits for completion. */
-static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq)
+static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
{
DEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
- writew(cmd, host->regs + NFC_FLASH_CMD);
- writew(NFC_CMD, host->regs + NFC_CONFIG2);
+ writew(cmd, NFC_V1_V2_FLASH_CMD);
+ writew(NFC_CMD, NFC_V1_V2_CONFIG2);
if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
int max_retries = 100;
/* Reset completion is indicated by NFC_CONFIG2 */
/* being set to 0 */
while (max_retries-- > 0) {
- if (readw(host->regs + NFC_CONFIG2) == 0) {
+ if (readw(NFC_V1_V2_CONFIG2) == 0) {
break;
}
udelay(1);
@@ -239,21 +317,48 @@ static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq)
}
}
+static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
+{
+ /* fill address */
+ writel(addr, NFC_V3_FLASH_ADDR0);
+
+ /* send out address */
+ writel(NFC_ADDR, NFC_V3_LAUNCH);
+
+ wait_op_done(host, 0);
+}
+
/* This function sends an address (or partial address) to the
* NAND device. The address is used to select the source/destination for
* a NAND command. */
-static void send_addr(struct mxc_nand_host *host, uint16_t addr, int islast)
+static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
{
DEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x %d)\n", addr, islast);
- writew(addr, host->regs + NFC_FLASH_ADDR);
- writew(NFC_ADDR, host->regs + NFC_CONFIG2);
+ writew(addr, NFC_V1_V2_FLASH_ADDR);
+ writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
/* Wait for operation to complete */
wait_op_done(host, islast);
}
-static void send_page(struct mtd_info *mtd, unsigned int ops)
+static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_CONFIG1);
+ tmp &= ~(7 << 4);
+ writel(tmp, NFC_V3_CONFIG1);
+
+ /* transfer data from NFC ram to nand */
+ writel(ops, NFC_V3_LAUNCH);
+
+ wait_op_done(host, false);
+}
+
+static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
@@ -267,63 +372,80 @@ static void send_page(struct mtd_info *mtd, unsigned int ops)
for (i = 0; i < bufs; i++) {
/* NANDFC buffer 0 is used for page read/write */
- writew(i, host->regs + NFC_BUF_ADDR);
+ writew(i, NFC_V1_V2_BUF_ADDR);
- writew(ops, host->regs + NFC_CONFIG2);
+ writew(ops, NFC_V1_V2_CONFIG2);
/* Wait for operation to complete */
wait_op_done(host, true);
}
}
+static void send_read_id_v3(struct mxc_nand_host *host)
+{
+ /* Read ID into main buffer */
+ writel(NFC_ID, NFC_V3_LAUNCH);
+
+ wait_op_done(host, true);
+
+ memcpy(host->data_buf, host->main_area0, 16);
+}
+
/* Request the NANDFC to perform a read of the NAND device ID. */
-static void send_read_id(struct mxc_nand_host *host)
+static void send_read_id_v1_v2(struct mxc_nand_host *host)
{
struct nand_chip *this = &host->nand;
/* NANDFC buffer 0 is used for device ID output */
- writew(0x0, host->regs + NFC_BUF_ADDR);
+ writew(0x0, NFC_V1_V2_BUF_ADDR);
- writew(NFC_ID, host->regs + NFC_CONFIG2);
+ writew(NFC_ID, NFC_V1_V2_CONFIG2);
/* Wait for operation to complete */
wait_op_done(host, true);
+ memcpy(host->data_buf, host->main_area0, 16);
+
if (this->options & NAND_BUSWIDTH_16) {
- void __iomem *main_buf = host->main_area0;
/* compress the ID info */
- writeb(readb(main_buf + 2), main_buf + 1);
- writeb(readb(main_buf + 4), main_buf + 2);
- writeb(readb(main_buf + 6), main_buf + 3);
- writeb(readb(main_buf + 8), main_buf + 4);
- writeb(readb(main_buf + 10), main_buf + 5);
+ host->data_buf[1] = host->data_buf[2];
+ host->data_buf[2] = host->data_buf[4];
+ host->data_buf[3] = host->data_buf[6];
+ host->data_buf[4] = host->data_buf[8];
+ host->data_buf[5] = host->data_buf[10];
}
- memcpy(host->data_buf, host->main_area0, 16);
+}
+
+static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
+{
+ writew(NFC_STATUS, NFC_V3_LAUNCH);
+ wait_op_done(host, true);
+
+ return readl(NFC_V3_CONFIG1) >> 16;
}
/* This function requests the NANDFC to perform a read of the
* NAND device status and returns the current status. */
-static uint16_t get_dev_status(struct mxc_nand_host *host)
+static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host)
{
- void __iomem *main_buf = host->main_area1;
+ void __iomem *main_buf = host->main_area0;
uint32_t store;
uint16_t ret;
- /* Issue status request to NAND device */
- /* store the main area1 first word, later do recovery */
- store = readl(main_buf);
- /* NANDFC buffer 1 is used for device status to prevent
- * corruption of read/write buffer on status requests. */
- writew(1, host->regs + NFC_BUF_ADDR);
+ writew(0x0, NFC_V1_V2_BUF_ADDR);
- writew(NFC_STATUS, host->regs + NFC_CONFIG2);
+ /*
+ * The device status is stored in main_area0. To
+ * prevent corruption of the buffer save the value
+ * and restore it afterwards.
+ */
+ store = readl(main_buf);
- /* Wait for operation to complete */
+ writew(NFC_STATUS, NFC_V1_V2_CONFIG2);
wait_op_done(host, true);
- /* Status is placed in first word of main buffer */
- /* get status, then recovery area 1 data */
ret = readw(main_buf);
+
writel(store, main_buf);
return ret;
@@ -347,7 +469,7 @@ static void mxc_nand_enable_hwecc(struct mtd_info *mtd, int mode)
*/
}
-static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
struct nand_chip *nand_chip = mtd->priv;
@@ -358,7 +480,7 @@ static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat,
* additional correction. 2-Bit errors cannot be corrected by
* HW ECC, so we need to return failure
*/
- uint16_t ecc_status = readw(host->regs + NFC_ECC_STATUS_RESULT);
+ uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT);
if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
DEBUG(MTD_DEBUG_LEVEL0,
@@ -369,6 +491,43 @@ static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat,
return 0;
}
+static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ u32 ecc_stat, err;
+ int no_subpages = 1;
+ int ret = 0;
+ u8 ecc_bit_mask, err_limit;
+
+ ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf;
+ err_limit = (host->eccsize == 4) ? 0x4 : 0x8;
+
+ no_subpages = mtd->writesize >> 9;
+
+ if (nfc_is_v21())
+ ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT);
+ else
+ ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT);
+
+ do {
+ err = ecc_stat & ecc_bit_mask;
+ if (err > err_limit) {
+ printk(KERN_WARNING "UnCorrectable RS-ECC Error\n");
+ return -1;
+ } else {
+ ret += err;
+ }
+ ecc_stat >>= 4;
+ } while (--no_subpages);
+
+ mtd->ecc_stats.corrected += ret;
+ pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
+
+ return ret;
+}
+
static int mxc_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
u_char *ecc_code)
{
@@ -383,7 +542,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
/* Check for status request */
if (host->status_request)
- return get_dev_status(host) & 0xFF;
+ return host->get_dev_status(host) & 0xFF;
ret = *(uint8_t *)(host->data_buf + host->buf_start);
host->buf_start++;
@@ -519,71 +678,166 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
* we will used the saved column address to index into
* the full page.
*/
- send_addr(host, 0, page_addr == -1);
+ host->send_addr(host, 0, page_addr == -1);
if (mtd->writesize > 512)
/* another col addr cycle for 2k page */
- send_addr(host, 0, false);
+ host->send_addr(host, 0, false);
}
/* Write out page address, if necessary */
if (page_addr != -1) {
/* paddr_0 - p_addr_7 */
- send_addr(host, (page_addr & 0xff), false);
+ host->send_addr(host, (page_addr & 0xff), false);
if (mtd->writesize > 512) {
if (mtd->size >= 0x10000000) {
/* paddr_8 - paddr_15 */
- send_addr(host, (page_addr >> 8) & 0xff, false);
- send_addr(host, (page_addr >> 16) & 0xff, true);
+ host->send_addr(host, (page_addr >> 8) & 0xff, false);
+ host->send_addr(host, (page_addr >> 16) & 0xff, true);
} else
/* paddr_8 - paddr_15 */
- send_addr(host, (page_addr >> 8) & 0xff, true);
+ host->send_addr(host, (page_addr >> 8) & 0xff, true);
} else {
/* One more address cycle for higher density devices */
if (mtd->size >= 0x4000000) {
/* paddr_8 - paddr_15 */
- send_addr(host, (page_addr >> 8) & 0xff, false);
- send_addr(host, (page_addr >> 16) & 0xff, true);
+ host->send_addr(host, (page_addr >> 8) & 0xff, false);
+ host->send_addr(host, (page_addr >> 16) & 0xff, true);
} else
/* paddr_8 - paddr_15 */
- send_addr(host, (page_addr >> 8) & 0xff, true);
+ host->send_addr(host, (page_addr >> 8) & 0xff, true);
}
}
}
-static void preset(struct mtd_info *mtd)
+/*
+ * v2 and v3 type controllers can do 4bit or 8bit ecc depending
+ * on how much oob the nand chip has. For 8bit ecc we need at least
+ * 26 bytes of oob data per 512 byte block.
+ */
+static int get_eccsize(struct mtd_info *mtd)
+{
+ int oobbytes_per_512 = 0;
+
+ oobbytes_per_512 = mtd->oobsize * 512 / mtd->writesize;
+
+ if (oobbytes_per_512 < 26)
+ return 4;
+ else
+ return 8;
+}
+
+static void preset_v1_v2(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
- uint16_t tmp;
-
- /* enable interrupt, disable spare enable */
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp &= ~NFC_INT_MSK;
- tmp &= ~NFC_SP_EN;
- if (nand_chip->ecc.mode == NAND_ECC_HW) {
- tmp |= NFC_ECC_EN;
+ uint16_t config1 = 0;
+
+ if (nand_chip->ecc.mode == NAND_ECC_HW)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+ if (nfc_is_v21())
+ config1 |= NFC_V2_CONFIG1_FP_INT;
+
+ if (!cpu_is_mx21())
+ config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ if (nfc_is_v21() && mtd->writesize) {
+ uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
+
+ host->eccsize = get_eccsize(mtd);
+ if (host->eccsize == 4)
+ config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
+
+ config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
} else {
- tmp &= ~NFC_ECC_EN;
+ host->eccsize = 1;
}
- writew(tmp, host->regs + NFC_CONFIG1);
+
+ writew(config1, NFC_V1_V2_CONFIG1);
/* preset operation */
/* Unlock the internal RAM Buffer */
- writew(0x2, host->regs + NFC_CONFIG);
+ writew(0x2, NFC_V1_V2_CONFIG);
/* Blocks to be unlocked */
if (nfc_is_v21()) {
- writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
- writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR);
} else if (nfc_is_v1()) {
- writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
- writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
+ writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
+ writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR);
} else
BUG();
/* Unlock Block Command for given address range */
- writew(0x4, host->regs + NFC_WRPROT);
+ writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static void preset_v3(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mxc_nand_host *host = chip->priv;
+ uint32_t config2, config3;
+ int i, addr_phases;
+
+ writel(NFC_V3_CONFIG1_RBA(0), NFC_V3_CONFIG1);
+ writel(NFC_V3_IPC_CREQ, NFC_V3_IPC);
+
+ /* Unlock the internal RAM Buffer */
+ writel(NFC_V3_WRPROT_BLS_UNLOCK | NFC_V3_WRPROT_UNLOCK,
+ NFC_V3_WRPROT);
+
+ /* Blocks to be unlocked */
+ for (i = 0; i < NAND_MAX_CHIPS; i++)
+ writel(0x0 | (0xffff << 16),
+ NFC_V3_WRPROT_UNLOCK_BLK_ADD0 + (i << 2));
+
+ writel(0, NFC_V3_IPC);
+
+ config2 = NFC_V3_CONFIG2_ONE_CYCLE |
+ NFC_V3_CONFIG2_2CMD_PHASES |
+ NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) |
+ NFC_V3_CONFIG2_ST_CMD(0x70) |
+ NFC_V3_CONFIG2_NUM_ADDR_PHASE0;
+
+ if (chip->ecc.mode == NAND_ECC_HW)
+ config2 |= NFC_V3_CONFIG2_ECC_EN;
+
+ addr_phases = fls(chip->pagemask) >> 3;
+
+ if (mtd->writesize == 2048) {
+ config2 |= NFC_V3_CONFIG2_PS_2048;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
+ } else if (mtd->writesize == 4096) {
+ config2 |= NFC_V3_CONFIG2_PS_4096;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
+ } else {
+ config2 |= NFC_V3_CONFIG2_PS_512;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases - 1);
+ }
+
+ if (mtd->writesize) {
+ config2 |= NFC_V3_CONFIG2_PPB(ffs(mtd->erasesize / mtd->writesize) - 6);
+ host->eccsize = get_eccsize(mtd);
+ if (host->eccsize == 8)
+ config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
+ }
+
+ writel(config2, NFC_V3_CONFIG2);
+
+ config3 = NFC_V3_CONFIG3_NUM_OF_DEVICES(0) |
+ NFC_V3_CONFIG3_NO_SDMA |
+ NFC_V3_CONFIG3_RBB_MODE |
+ NFC_V3_CONFIG3_SBB(6) | /* Reset default */
+ NFC_V3_CONFIG3_ADD_OP(0);
+
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ config3 |= NFC_V3_CONFIG3_FW8;
+
+ writel(config3, NFC_V3_CONFIG3);
+
+ writel(0, NFC_V3_DELAY_LINE);
}
/* Used by the upper layer to write command to NAND Flash for
@@ -604,15 +858,15 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
/* Command pre-processing step */
switch (command) {
case NAND_CMD_RESET:
- send_cmd(host, command, false);
- preset(mtd);
+ host->preset(mtd);
+ host->send_cmd(host, command, false);
break;
case NAND_CMD_STATUS:
host->buf_start = 0;
host->status_request = true;
- send_cmd(host, command, true);
+ host->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
@@ -625,13 +879,13 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
command = NAND_CMD_READ0; /* only READ0 is valid */
- send_cmd(host, command, false);
+ host->send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
if (mtd->writesize > 512)
- send_cmd(host, NAND_CMD_READSTART, true);
+ host->send_cmd(host, NAND_CMD_READSTART, true);
- send_page(mtd, NFC_OUTPUT);
+ host->send_page(mtd, NFC_OUTPUT);
memcpy(host->data_buf, host->main_area0, mtd->writesize);
copy_spare(mtd, true);
@@ -644,28 +898,28 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
host->buf_start = column;
- send_cmd(host, command, false);
+ host->send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
case NAND_CMD_PAGEPROG:
memcpy(host->main_area0, host->data_buf, mtd->writesize);
copy_spare(mtd, false);
- send_page(mtd, NFC_INPUT);
- send_cmd(host, command, true);
+ host->send_page(mtd, NFC_INPUT);
+ host->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
case NAND_CMD_READID:
- send_cmd(host, command, true);
+ host->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
- send_read_id(host);
+ host->send_read_id(host);
host->buf_start = column;
break;
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
- send_cmd(host, command, false);
+ host->send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
@@ -761,22 +1015,55 @@ static int __init mxcnd_probe(struct platform_device *pdev)
}
host->main_area0 = host->base;
- host->main_area1 = host->base + 0x200;
+
+ if (nfc_is_v1() || nfc_is_v21()) {
+ host->preset = preset_v1_v2;
+ host->send_cmd = send_cmd_v1_v2;
+ host->send_addr = send_addr_v1_v2;
+ host->send_page = send_page_v1_v2;
+ host->send_read_id = send_read_id_v1_v2;
+ host->get_dev_status = get_dev_status_v1_v2;
+ host->check_int = check_int_v1_v2;
+ }
if (nfc_is_v21()) {
- host->regs = host->base + 0x1000;
+ host->regs = host->base + 0x1e00;
host->spare0 = host->base + 0x1000;
host->spare_len = 64;
oob_smallpage = &nandv2_hw_eccoob_smallpage;
oob_largepage = &nandv2_hw_eccoob_largepage;
this->ecc.bytes = 9;
} else if (nfc_is_v1()) {
- host->regs = host->base;
+ host->regs = host->base + 0xe00;
host->spare0 = host->base + 0x800;
host->spare_len = 16;
oob_smallpage = &nandv1_hw_eccoob_smallpage;
oob_largepage = &nandv1_hw_eccoob_largepage;
this->ecc.bytes = 3;
+ host->eccsize = 1;
+ } else if (nfc_is_v3_2()) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ err = -ENODEV;
+ goto eirq;
+ }
+ host->regs_ip = ioremap(res->start, resource_size(res));
+ if (!host->regs_ip) {
+ err = -ENOMEM;
+ goto eirq;
+ }
+ host->regs_axi = host->base + 0x1e00;
+ host->spare0 = host->base + 0x1000;
+ host->spare_len = 64;
+ host->preset = preset_v3;
+ host->send_cmd = send_cmd_v3;
+ host->send_addr = send_addr_v3;
+ host->send_page = send_page_v3;
+ host->send_read_id = send_read_id_v3;
+ host->check_int = check_int_v3;
+ host->get_dev_status = get_dev_status_v3;
+ oob_smallpage = &nandv2_hw_eccoob_smallpage;
+ oob_largepage = &nandv2_hw_eccoob_largepage;
} else
BUG();
@@ -786,7 +1073,10 @@ static int __init mxcnd_probe(struct platform_device *pdev)
if (pdata->hw_ecc) {
this->ecc.calculate = mxc_nand_calculate_ecc;
this->ecc.hwctl = mxc_nand_enable_hwecc;
- this->ecc.correct = mxc_nand_correct_data;
+ if (nfc_is_v1())
+ this->ecc.correct = mxc_nand_correct_data_v1;
+ else
+ this->ecc.correct = mxc_nand_correct_data_v2_v3;
this->ecc.mode = NAND_ECC_HW;
} else {
this->ecc.mode = NAND_ECC_SOFT;
@@ -817,6 +1107,9 @@ static int __init mxcnd_probe(struct platform_device *pdev)
goto escan;
}
+ /* Call preset again, with correct writesize this time */
+ host->preset(mtd);
+
if (mtd->writesize == 2048)
this->ecc.layout = oob_largepage;
@@ -848,6 +1141,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
escan:
free_irq(host->irq, host);
eirq:
+ if (host->regs_ip)
+ iounmap(host->regs_ip);
iounmap(host->base);
eres:
clk_put(host->clk);
@@ -867,59 +1162,19 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
nand_release(&host->mtd);
free_irq(host->irq, host);
+ if (host->regs_ip)
+ iounmap(host->regs_ip);
iounmap(host->base);
kfree(host);
return 0;
}
-#ifdef CONFIG_PM
-static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct nand_chip *nand_chip = mtd->priv;
- struct mxc_nand_host *host = nand_chip->priv;
- int ret = 0;
-
- DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
-
- ret = mtd->suspend(mtd);
-
- /*
- * nand_suspend locks the device for exclusive access, so
- * the clock must already be off.
- */
- BUG_ON(!ret && host->clk_act);
-
- return ret;
-}
-
-static int mxcnd_resume(struct platform_device *pdev)
-{
- struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct nand_chip *nand_chip = mtd->priv;
- struct mxc_nand_host *host = nand_chip->priv;
- int ret = 0;
-
- DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
-
- mtd->resume(mtd);
-
- return ret;
-}
-
-#else
-# define mxcnd_suspend NULL
-# define mxcnd_resume NULL
-#endif /* CONFIG_PM */
-
static struct platform_driver mxcnd_driver = {
.driver = {
.name = DRIVER_NAME,
- },
+ },
.remove = __devexit_p(mxcnd_remove),
- .suspend = mxcnd_suspend,
- .resume = mxcnd_resume,
};
static int __init mxc_nd_init(void)
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 4a7b86423ee..d551ddd9537 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -42,7 +42,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/compatmac.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/leds.h>
@@ -347,7 +346,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
struct nand_chip *chip = mtd->priv;
u16 bad;
- if (chip->options & NAND_BB_LAST_PAGE)
+ if (chip->options & NAND_BBT_SCANLASTPAGE)
ofs += mtd->erasesize - mtd->writesize;
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
@@ -397,9 +396,9 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct nand_chip *chip = mtd->priv;
uint8_t buf[2] = { 0, 0 };
- int block, ret;
+ int block, ret, i = 0;
- if (chip->options & NAND_BB_LAST_PAGE)
+ if (chip->options & NAND_BBT_SCANLASTPAGE)
ofs += mtd->erasesize - mtd->writesize;
/* Get block number */
@@ -411,17 +410,31 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
if (chip->options & NAND_USE_FLASH_BBT)
ret = nand_update_bbt(mtd, ofs);
else {
- /* We write two bytes, so we dont have to mess with 16 bit
- * access
- */
nand_get_device(chip, mtd, FL_WRITING);
- ofs += mtd->oobsize;
- chip->ops.len = chip->ops.ooblen = 2;
- chip->ops.datbuf = NULL;
- chip->ops.oobbuf = buf;
- chip->ops.ooboffs = chip->badblockpos & ~0x01;
- ret = nand_do_write_oob(mtd, ofs, &chip->ops);
+ /* Write to first two pages and to byte 1 and 6 if necessary.
+ * If we write to more than one location, the first error
+ * encountered quits the procedure. We write two bytes per
+ * location, so we dont have to mess with 16 bit access.
+ */
+ do {
+ chip->ops.len = chip->ops.ooblen = 2;
+ chip->ops.datbuf = NULL;
+ chip->ops.oobbuf = buf;
+ chip->ops.ooboffs = chip->badblockpos & ~0x01;
+
+ ret = nand_do_write_oob(mtd, ofs, &chip->ops);
+
+ if (!ret && (chip->options & NAND_BBT_SCANBYTE1AND6)) {
+ chip->ops.ooboffs = NAND_SMALL_BADBLOCK_POS
+ & ~0x01;
+ ret = nand_do_write_oob(mtd, ofs, &chip->ops);
+ }
+ i++;
+ ofs += mtd->writesize;
+ } while (!ret && (chip->options & NAND_BBT_SCAN2NDPAGE) &&
+ i < 2);
+
nand_release_device(mtd);
}
if (!ret)
@@ -876,17 +889,17 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
}
/**
- * __nand_unlock - [REPLACABLE] unlocks specified locked blockes
+ * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
*
- * @param mtd - mtd info
- * @param ofs - offset to start unlock from
- * @param len - length to unlock
- * @invert - when = 0, unlock the range of blocks within the lower and
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ * @invert: when = 0, unlock the range of blocks within the lower and
* upper boundary address
- * whne = 1, unlock the range of blocks outside the boundaries
+ * when = 1, unlock the range of blocks outside the boundaries
* of the lower and upper boundary address
*
- * @return - unlock status
+ * return - unlock status
*/
static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
uint64_t len, int invert)
@@ -918,13 +931,13 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
}
/**
- * nand_unlock - [REPLACABLE] unlocks specified locked blockes
+ * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
*
- * @param mtd - mtd info
- * @param ofs - offset to start unlock from
- * @param len - length to unlock
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
*
- * @return - unlock status
+ * return - unlock status
*/
int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
@@ -969,16 +982,16 @@ out:
}
/**
- * nand_lock - [REPLACABLE] locks all blockes present in the device
+ * nand_lock - [REPLACEABLE] locks all blocks present in the device
*
- * @param mtd - mtd info
- * @param ofs - offset to start unlock from
- * @param len - length to unlock
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
*
- * @return - lock status
+ * return - lock status
*
- * This feature is not support in many NAND parts. 'Micron' NAND parts
- * do have this feature, but it allows only to lock all blocks not for
+ * This feature is not supported in many NAND parts. 'Micron' NAND parts
+ * do have this feature, but it allows only to lock all blocks, not for
* specified range for block.
*
* Implementing 'lock' feature by making use of 'unlock', for now.
@@ -2080,6 +2093,7 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
* nand_fill_oob - [Internal] Transfer client buffer to oob
* @chip: nand chip structure
* @oob: oob data buffer
+ * @len: oob data write length
* @ops: oob ops structure
*/
static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
@@ -2852,6 +2866,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
*/
if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
id_data[0] == NAND_MFR_SAMSUNG &&
+ (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
id_data[5] != 0x00) {
/* Calc pagesize */
mtd->writesize = 2048 << (extid & 0x03);
@@ -2920,9 +2935,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
/* Set the bad block position */
- chip->badblockpos = mtd->writesize > 512 ?
- NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
- chip->badblockbits = 8;
+ if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
+ chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
+ else
+ chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
/* Get chip options, preserve non chip based options */
chip->options &= ~NAND_CHIPOPTIONS_MSK;
@@ -2941,12 +2957,32 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/*
* Bad block marker is stored in the last page of each block
- * on Samsung and Hynix MLC devices
+ * on Samsung and Hynix MLC devices; stored in first two pages
+ * of each block on Micron devices with 2KiB pages and on
+ * SLC Samsung, Hynix, and AMD/Spansion. All others scan only
+ * the first page.
*/
if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
(*maf_id == NAND_MFR_SAMSUNG ||
*maf_id == NAND_MFR_HYNIX))
- chip->options |= NAND_BB_LAST_PAGE;
+ chip->options |= NAND_BBT_SCANLASTPAGE;
+ else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ (*maf_id == NAND_MFR_SAMSUNG ||
+ *maf_id == NAND_MFR_HYNIX ||
+ *maf_id == NAND_MFR_AMD)) ||
+ (mtd->writesize == 2048 &&
+ *maf_id == NAND_MFR_MICRON))
+ chip->options |= NAND_BBT_SCAN2NDPAGE;
+
+ /*
+ * Numonyx/ST 2K pages, x8 bus use BOTH byte 1 and 6
+ */
+ if (!(busw & NAND_BUSWIDTH_16) &&
+ *maf_id == NAND_MFR_STMICRO &&
+ mtd->writesize == 2048) {
+ chip->options |= NAND_BBT_SCANBYTE1AND6;
+ chip->badblockpos = 0;
+ }
/* Check for AND chips with 4 page planes */
if (chip->options & NAND_4PAGE_ARRAY)
@@ -3306,6 +3342,11 @@ void nand_release(struct mtd_info *mtd)
kfree(chip->bbt);
if (!(chip->options & NAND_OWN_BUFFERS))
kfree(chip->buffers);
+
+ /* Free bad block descriptor memory */
+ if (chip->badblock_pattern && chip->badblock_pattern->options
+ & NAND_BBT_DYNAMICSTRUCT)
+ kfree(chip->badblock_pattern);
}
EXPORT_SYMBOL_GPL(nand_lock);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index ad97c0ce73b..5fedf4a74f1 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -55,7 +55,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/compatmac.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
@@ -93,6 +92,28 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
return -1;
}
+ /* Check both positions 1 and 6 for pattern? */
+ if (td->options & NAND_BBT_SCANBYTE1AND6) {
+ if (td->options & NAND_BBT_SCANEMPTY) {
+ p += td->len;
+ end += NAND_SMALL_BADBLOCK_POS - td->offs;
+ /* Check region between positions 1 and 6 */
+ for (i = 0; i < NAND_SMALL_BADBLOCK_POS - td->offs - td->len;
+ i++) {
+ if (*p++ != 0xff)
+ return -1;
+ }
+ }
+ else {
+ p += NAND_SMALL_BADBLOCK_POS - td->offs;
+ }
+ /* Compare the pattern */
+ for (i = 0; i < td->len; i++) {
+ if (p[i] != td->pattern[i])
+ return -1;
+ }
+ }
+
if (td->options & NAND_BBT_SCANEMPTY) {
p += td->len;
end += td->len;
@@ -124,6 +145,13 @@ static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
if (p[td->offs + i] != td->pattern[i])
return -1;
}
+ /* Need to check location 1 AND 6? */
+ if (td->options & NAND_BBT_SCANBYTE1AND6) {
+ for (i = 0; i < td->len; i++) {
+ if (p[NAND_SMALL_BADBLOCK_POS + i] != td->pattern[i])
+ return -1;
+ }
+ }
return 0;
}
@@ -397,12 +425,10 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
if (bd->options & NAND_BBT_SCANALLPAGES)
len = 1 << (this->bbt_erase_shift - this->page_shift);
- else {
- if (bd->options & NAND_BBT_SCAN2NDPAGE)
- len = 2;
- else
- len = 1;
- }
+ else if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ len = 2;
+ else
+ len = 1;
if (!(bd->options & NAND_BBT_SCANEMPTY)) {
/* We need only read few bytes from the OOB area */
@@ -432,7 +458,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
from = (loff_t)startblock << (this->bbt_erase_shift - 1);
}
- if (this->options & NAND_BB_LAST_PAGE)
+ if (this->options & NAND_BBT_SCANLASTPAGE)
from += mtd->erasesize - (mtd->writesize * len);
for (i = startblock; i < numblocks;) {
@@ -1092,30 +1118,16 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
* while scanning a device for factory marked good / bad blocks. */
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
-static struct nand_bbt_descr smallpage_memorybased = {
- .options = NAND_BBT_SCAN2NDPAGE,
- .offs = 5,
- .len = 1,
- .pattern = scan_ff_pattern
-};
-
-static struct nand_bbt_descr largepage_memorybased = {
- .options = 0,
- .offs = 0,
- .len = 2,
- .pattern = scan_ff_pattern
-};
-
static struct nand_bbt_descr smallpage_flashbased = {
.options = NAND_BBT_SCAN2NDPAGE,
- .offs = 5,
+ .offs = NAND_SMALL_BADBLOCK_POS,
.len = 1,
.pattern = scan_ff_pattern
};
static struct nand_bbt_descr largepage_flashbased = {
.options = NAND_BBT_SCAN2NDPAGE,
- .offs = 0,
+ .offs = NAND_LARGE_BADBLOCK_POS,
.len = 2,
.pattern = scan_ff_pattern
};
@@ -1154,6 +1166,43 @@ static struct nand_bbt_descr bbt_mirror_descr = {
.pattern = mirror_pattern
};
+#define BBT_SCAN_OPTIONS (NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | \
+ NAND_BBT_SCANBYTE1AND6)
+/**
+ * nand_create_default_bbt_descr - [Internal] Creates a BBT descriptor structure
+ * @this: NAND chip to create descriptor for
+ *
+ * This function allocates and initializes a nand_bbt_descr for BBM detection
+ * based on the properties of "this". The new descriptor is stored in
+ * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
+ * passed to this function.
+ *
+ * TODO: Handle other flags, replace other static structs
+ * (e.g. handle NAND_BBT_FLASH for flash-based BBT,
+ * replace smallpage_flashbased)
+ *
+ */
+static int nand_create_default_bbt_descr(struct nand_chip *this)
+{
+ struct nand_bbt_descr *bd;
+ if (this->badblock_pattern) {
+ printk(KERN_WARNING "BBT descr already allocated; not replacing.\n");
+ return -EINVAL;
+ }
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd) {
+ printk(KERN_ERR "nand_create_default_bbt_descr: Out of memory\n");
+ return -ENOMEM;
+ }
+ bd->options = this->options & BBT_SCAN_OPTIONS;
+ bd->offs = this->badblockpos;
+ bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ bd->pattern = scan_ff_pattern;
+ bd->options |= NAND_BBT_DYNAMICSTRUCT;
+ this->badblock_pattern = bd;
+ return 0;
+}
+
/**
* nand_default_bbt - [NAND Interface] Select a default bad block table for the device
* @mtd: MTD device structure
@@ -1196,10 +1245,8 @@ int nand_default_bbt(struct mtd_info *mtd)
} else {
this->bbt_td = NULL;
this->bbt_md = NULL;
- if (!this->badblock_pattern) {
- this->badblock_pattern = (mtd->writesize > 512) ?
- &largepage_memorybased : &smallpage_memorybased;
- }
+ if (!this->badblock_pattern)
+ nand_create_default_bbt_descr(this);
}
return nand_scan_bbt(mtd, this->badblock_pattern);
}
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 89907ed9900..c65f19074bc 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -85,6 +85,7 @@ struct nand_flash_dev nand_flash_ids[] = {
{"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS},
{"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16},
{"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16},
+ {"NAND 128MiB 1,8V 16-bit", 0xAD, 0, 128, 0, LP_OPTIONS16},
/* 2 Gigabit */
{"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, LP_OPTIONS},
@@ -110,6 +111,9 @@ struct nand_flash_dev nand_flash_ids[] = {
{"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, LP_OPTIONS16},
{"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16},
+ /* 32 Gigabit */
+ {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS},
+
/*
* Renesas AND 1 Gigabit. Those chips do not support extended id and
* have a strange page/block layout ! The chosen minimum erasesize is
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 261337efe0e..c25648bb579 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -553,8 +553,8 @@ static uint64_t divide(uint64_t n, uint32_t d)
*/
static int init_nandsim(struct mtd_info *mtd)
{
- struct nand_chip *chip = (struct nand_chip *)mtd->priv;
- struct nandsim *ns = (struct nandsim *)(chip->priv);
+ struct nand_chip *chip = mtd->priv;
+ struct nandsim *ns = chip->priv;
int i, ret = 0;
uint64_t remains;
uint64_t next_offset;
@@ -1877,7 +1877,7 @@ static void switch_state(struct nandsim *ns)
static u_char ns_nand_read_byte(struct mtd_info *mtd)
{
- struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
u_char outb = 0x00;
/* Sanity and correctness checks */
@@ -1950,7 +1950,7 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
{
- struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
/* Sanity and correctness checks */
if (!ns->lines.ce) {
@@ -2132,7 +2132,7 @@ static uint16_t ns_nand_read_word(struct mtd_info *mtd)
static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
- struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
/* Check that chip is expecting data input */
if (!(ns->state & STATE_DATAIN_MASK)) {
@@ -2159,7 +2159,7 @@ static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
- struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
/* Sanity and correctness checks */
if (!ns->lines.ce) {
@@ -2352,7 +2352,7 @@ module_init(ns_init_module);
*/
static void __exit ns_cleanup_module(void)
{
- struct nandsim *ns = (struct nandsim *)(((struct nand_chip *)nsmtd->priv)->priv);
+ struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
int i;
free_nandsim(ns); /* Free nandsim private resources */
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 98fd2bdf8be..510554e6c11 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -35,7 +35,7 @@
struct ndfc_controller {
- struct of_device *ofdev;
+ struct platform_device *ofdev;
void __iomem *ndfcbase;
struct mtd_info mtd;
struct nand_chip chip;
@@ -225,7 +225,7 @@ err:
return ret;
}
-static int __devinit ndfc_probe(struct of_device *ofdev,
+static int __devinit ndfc_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct ndfc_controller *ndfc = &ndfc_ctrl;
@@ -277,7 +277,7 @@ static int __devinit ndfc_probe(struct of_device *ofdev,
return 0;
}
-static int __devexit ndfc_remove(struct of_device *ofdev)
+static int __devexit ndfc_remove(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index ee87325c771..513e0a76a4a 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -7,6 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define CONFIG_MTD_NAND_OMAP_HWECC
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
@@ -23,20 +24,8 @@
#include <plat/gpmc.h>
#include <plat/nand.h>
-#define GPMC_IRQ_STATUS 0x18
-#define GPMC_ECC_CONFIG 0x1F4
-#define GPMC_ECC_CONTROL 0x1F8
-#define GPMC_ECC_SIZE_CONFIG 0x1FC
-#define GPMC_ECC1_RESULT 0x200
-
#define DRIVER_NAME "omap2-nand"
-#define NAND_WP_OFF 0
-#define NAND_WP_BIT 0x00000010
-
-#define GPMC_BUF_FULL 0x00000001
-#define GPMC_BUF_EMPTY 0x00000000
-
#define NAND_Ecc_P1e (1 << 0)
#define NAND_Ecc_P2e (1 << 1)
#define NAND_Ecc_P4e (1 << 2)
@@ -139,34 +128,11 @@ struct omap_nand_info {
int gpmc_cs;
unsigned long phys_base;
- void __iomem *gpmc_cs_baseaddr;
- void __iomem *gpmc_baseaddr;
- void __iomem *nand_pref_fifo_add;
struct completion comp;
int dma_ch;
};
/**
- * omap_nand_wp - This function enable or disable the Write Protect feature
- * @mtd: MTD device structure
- * @mode: WP ON/OFF
- */
-static void omap_nand_wp(struct mtd_info *mtd, int mode)
-{
- struct omap_nand_info *info = container_of(mtd,
- struct omap_nand_info, mtd);
-
- unsigned long config = __raw_readl(info->gpmc_baseaddr + GPMC_CONFIG);
-
- if (mode)
- config &= ~(NAND_WP_BIT); /* WP is ON */
- else
- config |= (NAND_WP_BIT); /* WP is OFF */
-
- __raw_writel(config, (info->gpmc_baseaddr + GPMC_CONFIG));
-}
-
-/**
* omap_hwcontrol - hardware specific access to control-lines
* @mtd: MTD device structure
* @cmd: command to device
@@ -181,31 +147,17 @@ static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
- switch (ctrl) {
- case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
- info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
- GPMC_CS_NAND_COMMAND;
- info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
- GPMC_CS_NAND_DATA;
- break;
-
- case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
- info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
- GPMC_CS_NAND_ADDRESS;
- info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
- GPMC_CS_NAND_DATA;
- break;
-
- case NAND_CTRL_CHANGE | NAND_NCE:
- info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
- GPMC_CS_NAND_DATA;
- info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
- GPMC_CS_NAND_DATA;
- break;
- }
- if (cmd != NAND_CMD_NONE)
- __raw_writeb(cmd, info->nand.IO_ADDR_W);
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
+
+ else if (ctrl & NAND_ALE)
+ gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
+
+ else /* NAND_NCE */
+ gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
+ }
}
/**
@@ -232,11 +184,14 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
u_char *p = (u_char *)buf;
+ u32 status = 0;
while (len--) {
iowrite8(*p++, info->nand.IO_ADDR_W);
- while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
- GPMC_STATUS) & GPMC_BUF_FULL));
+ /* wait until buffer is available for write */
+ do {
+ status = gpmc_read_status(GPMC_STATUS_BUFFER);
+ } while (!status);
}
}
@@ -264,16 +219,16 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
u16 *p = (u16 *) buf;
-
+ u32 status = 0;
/* FIXME try bursts of writesw() or DMA ... */
len >>= 1;
while (len--) {
iowrite16(*p++, info->nand.IO_ADDR_W);
-
- while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
- GPMC_STATUS) & GPMC_BUF_FULL))
- ;
+ /* wait until buffer is available for write */
+ do {
+ status = gpmc_read_status(GPMC_STATUS_BUFFER);
+ } while (!status);
}
}
@@ -287,7 +242,7 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
- uint32_t pfpw_status = 0, r_count = 0;
+ uint32_t r_count = 0;
int ret = 0;
u32 *p = (u32 *)buf;
@@ -310,16 +265,16 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
else
omap_read_buf8(mtd, buf, len);
} else {
+ p = (u32 *) buf;
do {
- pfpw_status = gpmc_prefetch_status();
- r_count = ((pfpw_status >> 24) & 0x7F) >> 2;
- ioread32_rep(info->nand_pref_fifo_add, p, r_count);
+ r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ r_count = r_count >> 2;
+ ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
p += r_count;
len -= r_count << 2;
} while (len);
-
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset();
+ gpmc_prefetch_reset(info->gpmc_cs);
}
}
@@ -334,13 +289,13 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
- uint32_t pfpw_status = 0, w_count = 0;
+ uint32_t pref_count = 0, w_count = 0;
int i = 0, ret = 0;
- u16 *p = (u16 *) buf;
+ u16 *p;
/* take care of subpage writes */
if (len % 2 != 0) {
- writeb(*buf, info->nand.IO_ADDR_R);
+ writeb(*buf, info->nand.IO_ADDR_W);
p = (u16 *)(buf + 1);
len--;
}
@@ -354,16 +309,19 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
else
omap_write_buf8(mtd, buf, len);
} else {
- pfpw_status = gpmc_prefetch_status();
- while (pfpw_status & 0x3FFF) {
- w_count = ((pfpw_status >> 24) & 0x7F) >> 1;
+ p = (u16 *) buf;
+ while (len) {
+ w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ w_count = w_count >> 1;
for (i = 0; (i < w_count) && len; i++, len -= 2)
- iowrite16(*p++, info->nand_pref_fifo_add);
- pfpw_status = gpmc_prefetch_status();
+ iowrite16(*p++, info->nand.IO_ADDR_W);
}
-
+ /* wait for data to flushed-out before reset the prefetch */
+ do {
+ pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT);
+ } while (pref_count);
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset();
+ gpmc_prefetch_reset(info->gpmc_cs);
}
}
@@ -451,10 +409,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
/* setup and start DMA using dma_addr */
wait_for_completion(&info->comp);
- while (0x3fff & (prefetch_status = gpmc_prefetch_status()))
- ;
+ do {
+ prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
+ } while (prefetch_status);
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset();
+ gpmc_prefetch_reset(info->gpmc_cs);
dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
return 0;
@@ -530,29 +489,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
}
#ifdef CONFIG_MTD_NAND_OMAP_HWECC
-/**
- * omap_hwecc_init - Initialize the HW ECC for NAND flash in GPMC controller
- * @mtd: MTD device structure
- */
-static void omap_hwecc_init(struct mtd_info *mtd)
-{
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
- struct nand_chip *chip = mtd->priv;
- unsigned long val = 0x0;
-
- /* Read from ECC Control Register */
- val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONTROL);
- /* Clear all ECC | Enable Reg1 */
- val = ((0x00000001<<8) | 0x00000001);
- __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
-
- /* Read from ECC Size Config Register */
- val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
- /* ECCSIZE1=512 | Select eccResultsize[0-3] */
- val = ((((chip->ecc.size >> 1) - 1) << 22) | (0x0000000F));
- __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
-}
/**
* gen_true_ecc - This function will generate true ECC value
@@ -755,19 +691,7 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
{
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- unsigned long val = 0x0;
- unsigned long reg;
-
- /* Start Reading from HW ECC1_Result = 0x200 */
- reg = (unsigned long)(info->gpmc_baseaddr + GPMC_ECC1_RESULT);
- val = __raw_readl(reg);
- *ecc_code++ = val; /* P128e, ..., P1e */
- *ecc_code++ = val >> 16; /* P128o, ..., P1o */
- /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
- *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
- reg += 4;
-
- return 0;
+ return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
}
/**
@@ -781,32 +705,10 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
mtd);
struct nand_chip *chip = mtd->priv;
unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
- unsigned long val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONFIG);
-
- switch (mode) {
- case NAND_ECC_READ:
- __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
- /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
- val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
- break;
- case NAND_ECC_READSYN:
- __raw_writel(0x100, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
- /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
- val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
- break;
- case NAND_ECC_WRITE:
- __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
- /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
- val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
- break;
- default:
- DEBUG(MTD_DEBUG_LEVEL0, "Error: Unrecognized Mode[%d]!\n",
- mode);
- break;
- }
- __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONFIG);
+ gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
}
+
#endif
/**
@@ -834,14 +736,10 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
else
timeo += (HZ * 20) / 1000;
- this->IO_ADDR_W = (void *) info->gpmc_cs_baseaddr +
- GPMC_CS_NAND_COMMAND;
- this->IO_ADDR_R = (void *) info->gpmc_cs_baseaddr + GPMC_CS_NAND_DATA;
-
- __raw_writeb(NAND_CMD_STATUS & 0xFF, this->IO_ADDR_W);
-
+ gpmc_nand_write(info->gpmc_cs,
+ GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
while (time_before(jiffies, timeo)) {
- status = __raw_readb(this->IO_ADDR_R);
+ status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
if (status & NAND_STATUS_READY)
break;
cond_resched();
@@ -855,22 +753,22 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
*/
static int omap_dev_ready(struct mtd_info *mtd)
{
+ unsigned int val = 0;
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- unsigned int val = __raw_readl(info->gpmc_baseaddr + GPMC_IRQ_STATUS);
+ val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
if ((val & 0x100) == 0x100) {
/* Clear IRQ Interrupt */
val |= 0x100;
val &= ~(0x0);
- __raw_writel(val, info->gpmc_baseaddr + GPMC_IRQ_STATUS);
+ gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
} else {
unsigned int cnt = 0;
while (cnt++ < 0x1FF) {
if ((val & 0x100) == 0x100)
return 0;
- val = __raw_readl(info->gpmc_baseaddr +
- GPMC_IRQ_STATUS);
+ val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
}
}
@@ -901,8 +799,6 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->pdev = pdev;
info->gpmc_cs = pdata->cs;
- info->gpmc_baseaddr = pdata->gpmc_baseaddr;
- info->gpmc_cs_baseaddr = pdata->gpmc_cs_baseaddr;
info->phys_base = pdata->phys_base;
info->mtd.priv = &info->nand;
@@ -913,7 +809,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.options |= NAND_SKIP_BBTSCAN;
/* NAND write protect off */
- omap_nand_wp(&info->mtd, NAND_WP_OFF);
+ gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
pdev->dev.driver->name)) {
@@ -948,8 +844,6 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
}
if (use_prefetch) {
- /* copy the virtual address of nand base for fifo access */
- info->nand_pref_fifo_add = info->nand.IO_ADDR_R;
info->nand.read_buf = omap_read_buf_pref;
info->nand.write_buf = omap_write_buf_pref;
@@ -989,8 +883,6 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.ecc.correct = omap_correct_data;
info->nand.ecc.mode = NAND_ECC_HW;
- /* init HW ECC */
- omap_hwecc_init(&info->mtd);
#else
info->nand.ecc.mode = NAND_ECC_SOFT;
#endif
@@ -1040,7 +932,7 @@ static int omap_nand_remove(struct platform_device *pdev)
/* Release NAND device, its internal structures and partitions */
nand_release(&info->mtd);
- iounmap(info->nand_pref_fifo_add);
+ iounmap(info->nand.IO_ADDR_R);
kfree(&info->mtd);
return 0;
}
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index f02af24d033..6ddb2461d74 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -89,7 +89,7 @@ int pasemi_device_ready(struct mtd_info *mtd)
return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
}
-static int __devinit pasemi_nand_probe(struct of_device *ofdev,
+static int __devinit pasemi_nand_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct pci_dev *pdev;
@@ -185,7 +185,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev,
return err;
}
-static int __devexit pasemi_nand_remove(struct of_device *ofdev)
+static int __devexit pasemi_nand_remove(struct platform_device *ofdev)
{
struct nand_chip *chip;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 8d467315f02..317aff428e4 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -37,6 +37,11 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
struct resource *res;
int err = 0;
+ if (pdata->chip.nr_chips < 1) {
+ dev_err(&pdev->dev, "invalid number of chips specified\n");
+ return -EINVAL;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
@@ -91,7 +96,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
}
/* Scan to find existance of the device */
- if (nand_scan(&data->mtd, 1)) {
+ if (nand_scan(&data->mtd, pdata->chip.nr_chips)) {
err = -ENXIO;
goto out;
}
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index e02fa4f0e3c..4d01cda6884 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
#define tAR_NDTR1(r) (((r) >> 0) & 0xf)
/* convert nano-seconds to nand flash controller clock cycles */
-#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1)
+#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
/* convert nand flash controller clock cycles to nano-seconds */
#define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
@@ -1320,6 +1320,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
goto fail_free_irq;
}
+#ifdef CONFIG_MTD_PARTITIONS
if (mtd_has_cmdlinepart()) {
static const char *probes[] = { "cmdlinepart", NULL };
struct mtd_partition *parts;
@@ -1332,6 +1333,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
}
return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
+#else
+ return 0;
+#endif
fail_free_irq:
free_irq(irq, info);
@@ -1364,7 +1368,9 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
del_mtd_device(mtd);
+#ifdef CONFIG_MTD_PARTITIONS
del_mtd_partitions(mtd);
+#endif
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, info);
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index bcfc851fe55..5169ca6a66b 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -64,8 +64,8 @@ static inline void r852_write_reg_dword(struct r852_device *dev,
/* returns pointer to our private structure */
static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
{
- struct nand_chip *chip = (struct nand_chip *)mtd->priv;
- return (struct r852_device *)chip->priv;
+ struct nand_chip *chip = mtd->priv;
+ return chip->priv;
}
@@ -380,7 +380,7 @@ void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
*/
int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
- struct r852_device *dev = (struct r852_device *)chip->priv;
+ struct r852_device *dev = chip->priv;
unsigned long timeout;
int status;
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index a033c4cd8e1..67440b5beef 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -24,7 +24,6 @@
#include <linux/rslib.h>
#include <linux/bitrev.h>
#include <linux/module.h>
-#include <linux/mtd/compatmac.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 239aadfd01b..33d832dddfd 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -727,15 +727,12 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
if (set == NULL)
return add_mtd_device(&mtd->mtd);
- if (set->nr_partitions == 0) {
- mtd->mtd.name = set->name;
- nr_part = parse_mtd_partitions(&mtd->mtd, part_probes,
- &part_info, 0);
- } else {
- if (set->nr_partitions > 0 && set->partitions != NULL) {
- nr_part = set->nr_partitions;
- part_info = set->partitions;
- }
+ mtd->mtd.name = set->name;
+ nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0);
+
+ if (nr_part <= 0 && set->nr_partitions > 0) {
+ nr_part = set->nr_partitions;
+ part_info = set->partitions;
}
if (nr_part > 0 && part_info)
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index ac80fb362e6..4a8f367c295 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -109,7 +109,7 @@ static struct nand_flash_dev nand_xd_flash_ids[] = {
int sm_register_device(struct mtd_info *mtd, int smartmedia)
{
- struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ struct nand_chip *chip = mtd->priv;
int ret;
chip->options |= NAND_SKIP_BBTSCAN;
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index cc728b12de8..a8e403eebed 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -162,7 +162,7 @@ static const char *part_probes[] = { "cmdlinepart", NULL };
/*
* Probe for the NAND device.
*/
-static int __devinit socrates_nand_probe(struct of_device *ofdev,
+static int __devinit socrates_nand_probe(struct platform_device *ofdev,
const struct of_device_id *ofid)
{
struct socrates_nand_host *host;
@@ -276,7 +276,7 @@ out:
/*
* Remove a NAND device.
*/
-static int __devexit socrates_nand_remove(struct of_device *ofdev)
+static int __devexit socrates_nand_remove(struct platform_device *ofdev)
{
struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
struct mtd_info *mtd = &host->mtd;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index a4578bf903a..b155666acfb 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -1,11 +1,22 @@
-/* Linux driver for NAND Flash Translation Layer */
-/* (c) 1999 Machine Vision Holdings, Inc. */
-/* Author: David Woodhouse <dwmw2@infradead.org> */
-
/*
- The contents of this file are distributed under the GNU General
- Public License version 2. The author places no additional
- restrictions of any kind on it.
+ * Linux driver for NAND Flash Translation Layer
+ *
+ * Copyright © 1999 Machine Vision Holdings, Inc.
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define PRERELEASE
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index 8b22b1836e9..e3cd1ffad2f 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -2,7 +2,8 @@
* NFTL mount code with extensive checks
*
* Author: Fabrice Bellard (fabrice.bellard@netgem.com)
- * Copyright (C) 2000 Netgem S.A.
+ * Copyright © 2000 Netgem S.A.
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 4f0d635674f..8bf7dc6d1ce 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -1,11 +1,11 @@
/*
* Flash partitions described by the OF (or flattened) device tree
*
- * Copyright (C) 2006 MontaVista Software Inc.
+ * Copyright © 2006 MontaVista Software Inc.
* Author: Vitaly Wool <vwool@ru.mvista.com>
*
* Revised to handle newer style flash binding by:
- * Copyright (C) 2007 David Gibson, IBM Corporation.
+ * Copyright © 2007 David Gibson, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 9a49d68ba5f..3f32289fdbb 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -25,14 +25,14 @@ config MTD_ONENAND_GENERIC
config MTD_ONENAND_OMAP2
tristate "OneNAND on OMAP2/OMAP3 support"
- depends on MTD_ONENAND && (ARCH_OMAP2 || ARCH_OMAP3)
+ depends on ARCH_OMAP2 || ARCH_OMAP3
help
Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU
via the GPMC memory controller.
config MTD_ONENAND_SAMSUNG
tristate "OneNAND on Samsung SOC controller support"
- depends on MTD_ONENAND && (ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210)
+ depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210
help
Support for a OneNAND flash device connected to an Samsung SOC
S3C64XX/S5PC1XX controller.
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 26caf2590da..a2bb520286f 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -377,8 +377,11 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
default:
block = onenand_block(this, addr);
- page = (int) (addr - onenand_addr(this, block)) >> this->page_shift;
-
+ if (FLEXONENAND(this))
+ page = (int) (addr - onenand_addr(this, block))>>\
+ this->page_shift;
+ else
+ page = (int) (addr >> this->page_shift);
if (ONENAND_IS_2PLANE(this)) {
/* Make the even block number */
block &= ~1;
@@ -3730,17 +3733,16 @@ out:
}
/**
- * onenand_probe - [OneNAND Interface] Probe the OneNAND device
+ * onenand_chip_probe - [OneNAND Interface] The generic chip probe
* @param mtd MTD device structure
*
* OneNAND detection method:
* Compare the values from command with ones from register
*/
-static int onenand_probe(struct mtd_info *mtd)
+static int onenand_chip_probe(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
- int bram_maf_id, bram_dev_id, maf_id, dev_id, ver_id;
- int density;
+ int bram_maf_id, bram_dev_id, maf_id, dev_id;
int syscfg;
/* Save system configuration 1 */
@@ -3763,12 +3765,6 @@ static int onenand_probe(struct mtd_info *mtd)
/* Restore system configuration 1 */
this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
- /* Workaround */
- if (syscfg & ONENAND_SYS_CFG1_SYNC_WRITE) {
- bram_maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
- bram_dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
- }
-
/* Check manufacturer ID */
if (onenand_check_maf(bram_maf_id))
return -ENXIO;
@@ -3776,13 +3772,35 @@ static int onenand_probe(struct mtd_info *mtd)
/* Read manufacturer and device IDs from Register */
maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
- ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
- this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
/* Check OneNAND device */
if (maf_id != bram_maf_id || dev_id != bram_dev_id)
return -ENXIO;
+ return 0;
+}
+
+/**
+ * onenand_probe - [OneNAND Interface] Probe the OneNAND device
+ * @param mtd MTD device structure
+ */
+static int onenand_probe(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int maf_id, dev_id, ver_id;
+ int density;
+ int ret;
+
+ ret = this->chip_probe(mtd);
+ if (ret)
+ return ret;
+
+ /* Read manufacturer and device IDs from Register */
+ maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
+ dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
+ ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
+ this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
+
/* Flash device information */
onenand_print_device_info(dev_id, ver_id);
this->device_id = dev_id;
@@ -3909,6 +3927,9 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
if (!this->unlock_all)
this->unlock_all = onenand_unlock_all;
+ if (!this->chip_probe)
+ this->chip_probe = onenand_chip_probe;
+
if (!this->read_bufferram)
this->read_bufferram = onenand_read_bufferram;
if (!this->write_bufferram)
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index a91fcac1af0..01ab5b3c453 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
-#include <linux/mtd/compatmac.h>
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 2750317cb58..a460f1b748c 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -554,14 +554,13 @@ static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
do {
status = readl(base + S5PC110_DMA_TRANS_STATUS);
+ if (status & S5PC110_DMA_TRANS_STATUS_TE) {
+ writel(S5PC110_DMA_TRANS_CMD_TEC,
+ base + S5PC110_DMA_TRANS_CMD);
+ return -EIO;
+ }
} while (!(status & S5PC110_DMA_TRANS_STATUS_TD));
- if (status & S5PC110_DMA_TRANS_STATUS_TE) {
- writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD);
- writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
- return -EIO;
- }
-
writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
return 0;
@@ -571,13 +570,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset, size_t count)
{
struct onenand_chip *this = mtd->priv;
- void __iomem *bufferram;
void __iomem *p;
void *buf = (void *) buffer;
dma_addr_t dma_src, dma_dst;
int err;
- p = bufferram = this->base + area;
+ p = this->base + area;
if (ONENAND_CURRENT_BUFFERRAM(this)) {
if (area == ONENAND_DATARAM)
p += this->writesize;
@@ -621,7 +619,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
normal:
if (count != mtd->writesize) {
/* Copy the bufferram to memory to prevent unaligned access */
- memcpy(this->page_buf, bufferram, mtd->writesize);
+ memcpy(this->page_buf, p, mtd->writesize);
p = this->page_buf + offset;
}
@@ -630,6 +628,12 @@ normal:
return 0;
}
+static int s5pc110_chip_probe(struct mtd_info *mtd)
+{
+ /* Now just return 0 */
+ return 0;
+}
+
static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
{
unsigned int flags = INT_ACT | LOAD_CMP;
@@ -757,6 +761,7 @@ static void s3c_onenand_setup(struct mtd_info *mtd)
/* Use generic onenand functions */
onenand->cmd_map = s5pc1xx_cmd_map;
this->read_bufferram = s5pc110_read_bufferram;
+ this->chip_probe = s5pc110_chip_probe;
return;
} else {
BUG();
@@ -781,7 +786,6 @@ static int s3c_onenand_probe(struct platform_device *pdev)
struct mtd_info *mtd;
struct resource *r;
int size, err;
- unsigned long onenand_ctrl_cfg = 0;
pdata = pdev->dev.platform_data;
/* No need to check pdata. the platform data is optional */
@@ -900,14 +904,6 @@ static int s3c_onenand_probe(struct platform_device *pdev)
}
onenand->phys_base = onenand->base_res->start;
-
- onenand_ctrl_cfg = readl(onenand->dma_addr + 0x100);
- if ((onenand_ctrl_cfg & ONENAND_SYS_CFG1_SYNC_WRITE) &&
- onenand->dma_addr)
- writel(onenand_ctrl_cfg & ~ONENAND_SYS_CFG1_SYNC_WRITE,
- onenand->dma_addr + 0x100);
- else
- onenand_ctrl_cfg = 0;
}
if (onenand_scan(mtd, 1)) {
@@ -915,10 +911,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
goto scan_failed;
}
- if (onenand->type == TYPE_S5PC110) {
- if (onenand_ctrl_cfg && onenand->dma_addr)
- writel(onenand_ctrl_cfg, onenand->dma_addr + 0x100);
- } else {
+ if (onenand->type != TYPE_S5PC110) {
/* S3C doesn't handle subpage write */
mtd->subpage_sft = 0;
this->subpagesize = mtd->writesize;
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 2d600a1bf2a..7a87d07cd79 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -1,6 +1,24 @@
/*
* Parse RedBoot-style Flash Image System (FIS) tables and
* produce a Linux partition array to match.
+ *
+ * Copyright © 2001 Red Hat UK Limited
+ * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
*/
#include <linux/kernel.h>
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index 63b83c0d9a1..cc4d1805b86 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -1,7 +1,7 @@
/*
* rfd_ftl.c -- resident flash disk (flash translation layer)
*
- * Copyright (C) 2005 Sean Young <sean@mess.org>
+ * Copyright © 2005 Sean Young <sean@mess.org>
*
* This type of flash translation layer (FTL) is used by the Embedded BIOS
* by General Software. It is known as the Resident Flash Disk (RFD), see:
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 81c4ecdc11f..5cd18979333 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -1,6 +1,6 @@
/*
* Linux driver for SSFDC Flash Translation Layer (Read only)
- * (c) 2005 Eptar srl
+ * © 2005 Eptar srl
* Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
*
* Based on NTFL and MTDBLOCK_RO drivers
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 6bc1b8276c6..00b937e38c1 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -310,7 +310,7 @@ static int crosstest(void)
static int erasecrosstest(void)
{
size_t read = 0, written = 0;
- int err = 0, i, ebnum, ok = 1, ebnum2;
+ int err = 0, i, ebnum, ebnum2;
loff_t addr0;
char *readbuf = twopages;
@@ -357,8 +357,7 @@ static int erasecrosstest(void)
if (memcmp(writebuf, readbuf, pgsize)) {
printk(PRINT_PREF "verify failed!\n");
errcnt += 1;
- ok = 0;
- return err;
+ return -1;
}
printk(PRINT_PREF "erasing block %d\n", ebnum);
@@ -396,10 +395,10 @@ static int erasecrosstest(void)
if (memcmp(writebuf, readbuf, pgsize)) {
printk(PRINT_PREF "verify failed!\n");
errcnt += 1;
- ok = 0;
+ return -1;
}
- if (ok && !err)
+ if (!err)
printk(PRINT_PREF "erasecrosstest ok\n");
return err;
}
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug
index 2246f154e2f..61f6e5e4045 100644
--- a/drivers/mtd/ubi/Kconfig.debug
+++ b/drivers/mtd/ubi/Kconfig.debug
@@ -6,7 +6,7 @@ config MTD_UBI_DEBUG
depends on SYSFS
depends on MTD_UBI
select DEBUG_FS
- select KALLSYMS_ALL
+ select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
help
This option enables UBI debugging.
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 4dfa6b90c21..3d2d1a69e9a 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -798,18 +798,18 @@ static int rename_volumes(struct ubi_device *ubi,
goto out_free;
}
- re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
- if (!re) {
+ re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
+ if (!re1) {
err = -ENOMEM;
ubi_close_volume(desc);
goto out_free;
}
- re->remove = 1;
- re->desc = desc;
- list_add(&re->list, &rename_list);
+ re1->remove = 1;
+ re1->desc = desc;
+ list_add(&re1->list, &rename_list);
dbg_msg("will remove volume %d, name \"%s\"",
- re->desc->vol->vol_id, re->desc->vol->name);
+ re1->desc->vol->vol_id, re1->desc->vol->name);
}
mutex_lock(&ubi->device_mutex);
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 372a15ac999..69b52e9c948 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -843,7 +843,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
case UBI_COMPAT_DELETE:
ubi_msg("\"delete\" compatible internal volume %d:%d"
" found, will remove it", vol_id, lnum);
- err = add_to_list(si, pnum, ec, &si->corr);
+ err = add_to_list(si, pnum, ec, &si->erase);
if (err)
return err;
return 0;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index ee7b1d8fbb9..97a435672ea 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1212,7 +1212,8 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
- if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
+ if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
+ in_wl_tree(e, &ubi->erroneous)) {
spin_unlock(&ubi->wl_lock);
return 0;
}
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index baac246561b..4777a1cbcd8 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -337,10 +337,10 @@ el2_probe1(struct net_device *dev, int ioaddr)
/* Finish setting the board's parameters. */
ei_status.stop_page = EL2_MB1_STOP_PG;
ei_status.word16 = wordlength;
- ei_status.reset_8390 = &el2_reset_8390;
- ei_status.get_8390_hdr = &el2_get_8390_hdr;
- ei_status.block_input = &el2_block_input;
- ei_status.block_output = &el2_block_output;
+ ei_status.reset_8390 = el2_reset_8390;
+ ei_status.get_8390_hdr = el2_get_8390_hdr;
+ ei_status.block_input = el2_block_input;
+ ei_status.block_output = el2_block_output;
if (dev->irq == 2)
dev->irq = 9;
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 3bba835f1a2..cdf7226a7c4 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -662,7 +662,9 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n");
{
- char *ram_split[] = { "5:3", "3:1", "1:1", "3:5" };
+ static const char * const ram_split[] = {
+ "5:3", "3:1", "1:1", "3:5"
+ };
__u32 config;
EL3WINDOW(3);
vp->available_media = inw(ioaddr + Wn3_Options);
@@ -734,7 +736,7 @@ static int corkscrew_open(struct net_device *dev)
init_timer(&vp->timer);
vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
vp->timer.data = (unsigned long) dev;
- vp->timer.function = &corkscrew_timer; /* timer handler */
+ vp->timer.function = corkscrew_timer; /* timer handler */
add_timer(&vp->timer);
} else
dev->if_port = vp->default_media;
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index a7b0e5e43a5..ca00f0a1121 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -463,7 +463,7 @@ static int __init do_elmc_probe(struct net_device *dev)
/* we didn't find any 3c523 in the slots we checked for */
if (slot == MCA_NOTFOUND)
- return ((base_addr || irq) ? -ENXIO : -ENODEV);
+ return (base_addr || irq) ? -ENXIO : -ENODEV;
mca_set_adapter_name(slot, "3Com 3c523 Etherlink/MC");
mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index c754d88e5ec..e1da258bbfb 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -633,7 +633,11 @@ struct vortex_private {
open:1,
medialock:1,
must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
- large_frames:1; /* accept large frames */
+ large_frames:1, /* accept large frames */
+ handling_irq:1; /* private in_irq indicator */
+ /* {get|set}_wol operations are already serialized by rtnl.
+ * no additional locking is required for the enable_wol and acpi_set_WOL()
+ */
int drv_flags;
u16 status_enable;
u16 intr_enable;
@@ -646,7 +650,7 @@ struct vortex_private {
u16 io_size; /* Size of PCI region (for release_region) */
/* Serialises access to hardware other than MII and variables below.
- * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */
+ * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
spinlock_t lock;
spinlock_t mii_lock; /* Serialises access to MII */
@@ -1738,7 +1742,7 @@ vortex_open(struct net_device *dev)
/* Use the now-standard shared IRQ implementation. */
if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
- &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
+ boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
goto err;
}
@@ -1993,10 +1997,9 @@ vortex_error(struct net_device *dev, int status)
}
}
- if (status & RxEarly) { /* Rx early is unused. */
- vortex_rx(dev);
+ if (status & RxEarly) /* Rx early is unused. */
iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
- }
+
if (status & StatsFull) { /* Empty statistics. */
static int DoneDidThat;
if (vortex_debug > 4)
@@ -2133,6 +2136,15 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->name, vp->cur_tx);
}
+ /*
+ * We can't allow a recursion from our interrupt handler back into the
+ * tx routine, as they take the same spin lock, and that causes
+ * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in
+ * a bit
+ */
+ if (vp->handling_irq)
+ return NETDEV_TX_BUSY;
+
if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
if (vortex_debug > 0)
pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
@@ -2288,7 +2300,12 @@ vortex_interrupt(int irq, void *dev_id)
if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
if (status == 0xffff)
break;
+ if (status & RxEarly)
+ vortex_rx(dev);
+ spin_unlock(&vp->window_lock);
vortex_error(dev, status);
+ spin_lock(&vp->window_lock);
+ window_set(vp, 7);
}
if (--work_done < 0) {
@@ -2335,11 +2352,13 @@ boomerang_interrupt(int irq, void *dev_id)
ioaddr = vp->ioaddr;
+
/*
* It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
* and boomerang_start_xmit
*/
spin_lock(&vp->lock);
+ vp->handling_irq = 1;
status = ioread16(ioaddr + EL3_STATUS);
@@ -2447,6 +2466,7 @@ boomerang_interrupt(int irq, void *dev_id)
pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
+ vp->handling_irq = 0;
spin_unlock(&vp->lock);
return IRQ_HANDLED;
}
@@ -2922,28 +2942,31 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct vortex_private *vp = netdev_priv(dev);
- spin_lock_irq(&vp->lock);
+ if (!VORTEX_PCI(vp))
+ return;
+
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
if (vp->enable_wol)
wol->wolopts |= WAKE_MAGIC;
- spin_unlock_irq(&vp->lock);
}
static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct vortex_private *vp = netdev_priv(dev);
+
+ if (!VORTEX_PCI(vp))
+ return -EOPNOTSUPP;
+
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- spin_lock_irq(&vp->lock);
if (wol->wolopts & WAKE_MAGIC)
vp->enable_wol = 1;
else
vp->enable_wol = 0;
acpi_set_WOL(dev);
- spin_unlock_irq(&vp->lock);
return 0;
}
@@ -2971,7 +2994,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
int err;
struct vortex_private *vp = netdev_priv(dev);
- unsigned long flags;
pci_power_t state = 0;
if(VORTEX_PCI(vp))
@@ -2981,9 +3003,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
- spin_lock_irqsave(&vp->lock, flags);
err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
- spin_unlock_irqrestore(&vp->lock, flags);
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), state);
@@ -3188,6 +3208,9 @@ static void acpi_set_WOL(struct net_device *dev)
return;
}
+ if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
+ return;
+
/* Change the power state to D3; RxEnable doesn't take effect. */
pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
}
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 4a4f6b81e32..237d4ea5a41 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -561,7 +561,7 @@ rx_status_loop:
if (cp_rx_csum_ok(status))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb_put(skb, len);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ebe68395ecf..13d01f358f3 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -484,7 +484,7 @@ config XTENSA_XT2000_SONIC
config MIPS_AU1X00_ENET
tristate "MIPS AU1000 Ethernet support"
- depends on SOC_AU1X00
+ depends on MIPS_ALCHEMY
select PHYLIB
select CRC32
help
@@ -914,7 +914,7 @@ config SMC91X
tristate "SMC 91C9x/91C1xxx support"
select CRC32
select MII
- depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \
+ depends on ARM || M32R || SUPERH || \
MIPS || BLACKFIN || MN10300 || COLDFIRE
help
This is a driver for SMC's 91x series of Ethernet chipsets,
@@ -928,6 +928,16 @@ config SMC91X
The module will be called smc91x. If you want to compile it as a
module, say M here and read <file:Documentation/kbuild/modules.txt>.
+config PXA168_ETH
+ tristate "Marvell pxa168 ethernet support"
+ depends on CPU_PXA168
+ select PHYLIB
+ help
+ This driver supports the pxa168 Ethernet ports.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pxa168_eth.
+
config NET_NETX
tristate "NetX Ethernet support"
select MII
@@ -2418,7 +2428,7 @@ config UGETH_TX_ON_DEMAND
config MV643XX_ETH
tristate "Marvell Discovery (643XX) and Orion ethernet support"
- depends on MV64X60 || PPC32 || PLAT_ORION
+ depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
select INET_LRO
select PHYLIB
help
@@ -2505,6 +2515,18 @@ config S6GMAC
source "drivers/net/stmmac/Kconfig"
+config PCH_GBE
+ tristate "PCH Gigabit Ethernet"
+ depends on PCI
+ ---help---
+ This is a gigabit ethernet driver for Topcliff PCH.
+ Topcliff PCH is the platform controller hub that is used in Intel's
+ general embedded platform.
+ Topcliff PCH has Gigabit Ethernet interface.
+ Using this interface, it is able to access system devices connected
+ to Gigabit Ethernet.
+ This driver enables Gigabit Ethernet function.
+
endif # NETDEV_1000
#
@@ -2793,7 +2815,7 @@ config NIU
config PASEMI_MAC
tristate "PA Semi 1/10Gbit MAC"
- depends on PPC_PASEMI && PCI
+ depends on PPC_PASEMI && PCI && INET
select PHYLIB
select INET_LRO
help
@@ -2859,6 +2881,20 @@ config QLGE
To compile this driver as a module, choose M here: the module
will be called qlge.
+config BNA
+ tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
+ depends on PCI
+ ---help---
+ This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
+ cards.
+ To compile this driver as a module, choose M here: the module
+ will be called bna.
+
+ For general information and support, go to the Brocade support
+ website at:
+
+ <http://support.brocade.com>
+
source "drivers/net/sfc/Kconfig"
source "drivers/net/benet/Kconfig"
@@ -3192,6 +3228,17 @@ config PPPOE
which contains instruction on how to use this driver (under
the heading "Kernel mode PPPoE").
+config PPTP
+ tristate "PPP over IPv4 (PPTP) (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && PPP && NET_IPGRE_DEMUX
+ help
+ Support for PPP over IPv4.(Point-to-Point Tunneling Protocol)
+
+ This driver requires pppd plugin to work in client mode or
+ modified pptpd (poptop) to work in server mode.
+ See http://accel-pptp.sourceforge.net/ for information how to
+ utilize this module.
+
config PPPOATM
tristate "PPP over ATM"
depends on ATM && PPP
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 56e8c27f77c..b8bf93d4a13 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_ENIC) += enic/
obj-$(CONFIG_JME) += jme.o
obj-$(CONFIG_BE2NET) += benet/
obj-$(CONFIG_VMXNET3) += vmxnet3/
+obj-$(CONFIG_BNA) += bna/
gianfar_driver-objs := gianfar.o \
gianfar_ethtool.o \
@@ -162,6 +163,7 @@ obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
obj-$(CONFIG_PPPOL2TP) += pppox.o
+obj-$(CONFIG_PPTP) += pppox.o pptp.o
obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
@@ -244,6 +246,7 @@ obj-$(CONFIG_MYRI10GE) += myri10ge/
obj-$(CONFIG_SMC91X) += smc91x.o
obj-$(CONFIG_SMC911X) += smc911x.o
obj-$(CONFIG_SMSC911X) += smsc911x.o
+obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
obj-$(CONFIG_DM9000) += dm9000.o
obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
@@ -295,3 +298,4 @@ obj-$(CONFIG_WIMAX) += wimax/
obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
+obj-$(CONFIG_PCH_GBE) += pch_gbe/
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index b9a591604e5..41d9911202d 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -2033,7 +2033,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
skb->csum = htons(csum);
skb->ip_summed = CHECKSUM_COMPLETE;
} else {
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
/* send it up */
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 585c25f4b60..58a0ab4923e 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -396,7 +396,7 @@ static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
event_count = coal_conf->rx_event_count;
if( timeout > MAX_TIMEOUT ||
event_count > MAX_EVENT_COUNT )
- return -EINVAL;
+ return -EINVAL;
timeout = timeout * DELAY_TIMER_CONV;
writel(VAL0|STINTEN, mmio+INTEN0);
@@ -409,7 +409,7 @@ static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
event_count = coal_conf->tx_event_count;
if( timeout > MAX_TIMEOUT ||
event_count > MAX_EVENT_COUNT )
- return -EINVAL;
+ return -EINVAL;
timeout = timeout * DELAY_TIMER_CONV;
@@ -903,18 +903,18 @@ static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
}
/*
-This function reads the mib registers and returns the hardware statistics. It updates previous internal driver statistics with new values.
-*/
-static struct net_device_stats *amd8111e_get_stats(struct net_device * dev)
+ * This function reads the mib registers and returns the hardware statistics.
+ * It updates previous internal driver statistics with new values.
+ */
+static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
void __iomem *mmio = lp->mmio;
unsigned long flags;
- /* struct net_device_stats *prev_stats = &lp->prev_stats; */
- struct net_device_stats* new_stats = &lp->stats;
+ struct net_device_stats *new_stats = &dev->stats;
- if(!lp->opened)
- return &lp->stats;
+ if (!lp->opened)
+ return new_stats;
spin_lock_irqsave (&lp->lock, flags);
/* stats.rx_packets */
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
index ac36eb6981e..b5926af03a7 100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/amd8111e.h
@@ -787,7 +787,6 @@ struct amd8111e_priv{
struct vlan_group *vlgrp;
#endif
char opened;
- struct net_device_stats stats;
unsigned int drv_rx_errors;
struct amd8111e_coalesce_conf coal_conf;
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 0362c8d31a0..10d0dba572c 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -244,7 +244,7 @@ static int ipddp_delete(struct ipddp_route *rt)
}
spin_unlock_bh(&ipddp_route_lock);
- return (-ENOENT);
+ return -ENOENT;
}
/*
@@ -259,10 +259,10 @@ static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt)
if(f->ip == rt->ip &&
f->at.s_net == rt->at.s_net &&
f->at.s_node == rt->at.s_node)
- return (f);
+ return f;
}
- return (NULL);
+ return NULL;
}
static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -279,7 +279,7 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
switch(cmd)
{
case SIOCADDIPDDPRT:
- return (ipddp_create(&rcp));
+ return ipddp_create(&rcp);
case SIOCFINDIPDDPRT:
spin_lock_bh(&ipddp_route_lock);
@@ -297,7 +297,7 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -ENOENT;
case SIOCDELIPDDPRT:
- return (ipddp_delete(&rcp));
+ return ipddp_delete(&rcp);
default:
return -EINVAL;
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index adc07551739..e69eead12ec 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -727,7 +727,7 @@ static int sendup_buffer (struct net_device *dev)
if (ltc->command != LT_RCVLAP) {
printk("unknown command 0x%02x from ltpc card\n",ltc->command);
- return(-1);
+ return -1;
}
dnode = ltc->dnode;
snode = ltc->snode;
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 8c496fb1ac9..62f21106efe 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -300,8 +300,6 @@ am79c961_open(struct net_device *dev)
struct dev_priv *priv = netdev_priv(dev);
int ret;
- memset (&priv->stats, 0, sizeof (priv->stats));
-
ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
if (ret)
return ret;
@@ -347,8 +345,7 @@ am79c961_close(struct net_device *dev)
*/
static struct net_device_stats *am79c961_getstats (struct net_device *dev)
{
- struct dev_priv *priv = netdev_priv(dev);
- return &priv->stats;
+ return &dev->stats;
}
static void am79c961_mc_hash(char *addr, unsigned short *hash)
@@ -510,14 +507,14 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
am_writeword (dev, hdraddr + 2, RMD_OWN);
- priv->stats.rx_errors ++;
+ dev->stats.rx_errors++;
if (status & RMD_ERR) {
if (status & RMD_FRAM)
- priv->stats.rx_frame_errors ++;
+ dev->stats.rx_frame_errors++;
if (status & RMD_CRC)
- priv->stats.rx_crc_errors ++;
+ dev->stats.rx_crc_errors++;
} else if (status & RMD_STP)
- priv->stats.rx_length_errors ++;
+ dev->stats.rx_length_errors++;
continue;
}
@@ -531,12 +528,12 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
am_writeword(dev, hdraddr + 2, RMD_OWN);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
- priv->stats.rx_bytes += len;
- priv->stats.rx_packets ++;
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
} else {
am_writeword (dev, hdraddr + 2, RMD_OWN);
printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
- priv->stats.rx_dropped ++;
+ dev->stats.rx_dropped++;
break;
}
} while (1);
@@ -565,7 +562,7 @@ am79c961_tx(struct net_device *dev, struct dev_priv *priv)
if (status & TMD_ERR) {
u_int status2;
- priv->stats.tx_errors ++;
+ dev->stats.tx_errors++;
status2 = am_readword (dev, hdraddr + 6);
@@ -575,18 +572,18 @@ am79c961_tx(struct net_device *dev, struct dev_priv *priv)
am_writeword (dev, hdraddr + 6, 0);
if (status2 & TST_RTRY)
- priv->stats.collisions += 16;
+ dev->stats.collisions += 16;
if (status2 & TST_LCOL)
- priv->stats.tx_window_errors ++;
+ dev->stats.tx_window_errors++;
if (status2 & TST_LCAR)
- priv->stats.tx_carrier_errors ++;
+ dev->stats.tx_carrier_errors++;
if (status2 & TST_UFLO)
- priv->stats.tx_fifo_errors ++;
+ dev->stats.tx_fifo_errors++;
continue;
}
- priv->stats.tx_packets ++;
+ dev->stats.tx_packets++;
len = am_readword (dev, hdraddr + 4);
- priv->stats.tx_bytes += -len;
+ dev->stats.tx_bytes += -len;
} while (priv->txtail != priv->txhead);
netif_wake_queue(dev);
@@ -616,7 +613,7 @@ am79c961_interrupt(int irq, void *dev_id)
}
if (status & CSR0_MISS) {
handled = 1;
- priv->stats.rx_dropped ++;
+ dev->stats.rx_dropped++;
}
if (status & CSR0_CERR) {
handled = 1;
diff --git a/drivers/net/arm/am79c961a.h b/drivers/net/arm/am79c961a.h
index 483009fe6ec..fd634d32756 100644
--- a/drivers/net/arm/am79c961a.h
+++ b/drivers/net/arm/am79c961a.h
@@ -130,7 +130,6 @@
#define ISALED0_LNKST 0x8000
struct dev_priv {
- struct net_device_stats stats;
unsigned long rxbuffer[RX_BUFFERS];
unsigned long txbuffer[TX_BUFFERS];
unsigned char txhead;
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 4a5ec9470aa..5a77001b6d1 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -175,8 +175,6 @@ struct ep93xx_priv
struct net_device *dev;
struct napi_struct napi;
- struct net_device_stats stats;
-
struct mii_if_info mii;
u8 mdc_divisor;
};
@@ -230,12 +228,6 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d
pr_info("mdio write timed out\n");
}
-static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
-{
- struct ep93xx_priv *ep = netdev_priv(dev);
- return &(ep->stats);
-}
-
static int ep93xx_rx(struct net_device *dev, int processed, int budget)
{
struct ep93xx_priv *ep = netdev_priv(dev);
@@ -267,15 +259,15 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_RWE)) {
- ep->stats.rx_errors++;
+ dev->stats.rx_errors++;
if (rstat0 & RSTAT0_OE)
- ep->stats.rx_fifo_errors++;
+ dev->stats.rx_fifo_errors++;
if (rstat0 & RSTAT0_FE)
- ep->stats.rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA))
- ep->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
if (rstat0 & RSTAT0_CRCE)
- ep->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
goto err;
}
@@ -300,10 +292,10 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
netif_receive_skb(skb);
- ep->stats.rx_packets++;
- ep->stats.rx_bytes += length;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length;
} else {
- ep->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
}
err:
@@ -359,7 +351,7 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
int entry;
if (unlikely(skb->len > MAX_PKT_SIZE)) {
- ep->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -415,17 +407,17 @@ static void ep93xx_tx_complete(struct net_device *dev)
if (tstat0 & TSTAT0_TXWE) {
int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
- ep->stats.tx_packets++;
- ep->stats.tx_bytes += length;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += length;
} else {
- ep->stats.tx_errors++;
+ dev->stats.tx_errors++;
}
if (tstat0 & TSTAT0_OW)
- ep->stats.tx_window_errors++;
+ dev->stats.tx_window_errors++;
if (tstat0 & TSTAT0_TXU)
- ep->stats.tx_fifo_errors++;
- ep->stats.collisions += (tstat0 >> 16) & 0x1f;
+ dev->stats.tx_fifo_errors++;
+ dev->stats.collisions += (tstat0 >> 16) & 0x1f;
ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1);
if (ep->tx_pending == TX_QUEUE_ENTRIES)
@@ -758,7 +750,6 @@ static const struct net_device_ops ep93xx_netdev_ops = {
.ndo_open = ep93xx_open,
.ndo_stop = ep93xx_close,
.ndo_start_xmit = ep93xx_xmit,
- .ndo_get_stats = ep93xx_get_stats,
.ndo_do_ioctl = ep93xx_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index b17ab5153f5..b00781c02d5 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -68,7 +68,6 @@ static int ether1_open(struct net_device *dev);
static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t ether1_interrupt(int irq, void *dev_id);
static int ether1_close(struct net_device *dev);
-static struct net_device_stats *ether1_getstats(struct net_device *dev);
static void ether1_setmulticastlist(struct net_device *dev);
static void ether1_timeout(struct net_device *dev);
@@ -649,8 +648,6 @@ ether1_open (struct net_device *dev)
if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev))
return -EAGAIN;
- memset (&priv(dev)->stats, 0, sizeof (struct net_device_stats));
-
if (ether1_init_for_open (dev)) {
free_irq (dev->irq, dev);
return -EAGAIN;
@@ -673,7 +670,7 @@ ether1_timeout(struct net_device *dev)
if (ether1_init_for_open (dev))
printk (KERN_ERR "%s: unable to restart interface\n", dev->name);
- priv(dev)->stats.tx_errors++;
+ dev->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -802,21 +799,21 @@ again:
while (nop.nop_status & STAT_COMPLETE) {
if (nop.nop_status & STAT_OK) {
- priv(dev)->stats.tx_packets ++;
- priv(dev)->stats.collisions += (nop.nop_status & STAT_COLLISIONS);
+ dev->stats.tx_packets++;
+ dev->stats.collisions += (nop.nop_status & STAT_COLLISIONS);
} else {
- priv(dev)->stats.tx_errors ++;
+ dev->stats.tx_errors++;
if (nop.nop_status & STAT_COLLAFTERTX)
- priv(dev)->stats.collisions ++;
+ dev->stats.collisions++;
if (nop.nop_status & STAT_NOCARRIER)
- priv(dev)->stats.tx_carrier_errors ++;
+ dev->stats.tx_carrier_errors++;
if (nop.nop_status & STAT_TXLOSTCTS)
printk (KERN_WARNING "%s: cts lost\n", dev->name);
if (nop.nop_status & STAT_TXSLOWDMA)
- priv(dev)->stats.tx_fifo_errors ++;
+ dev->stats.tx_fifo_errors++;
if (nop.nop_status & STAT_COLLEXCESSIVE)
- priv(dev)->stats.collisions += 16;
+ dev->stats.collisions += 16;
}
if (nop.nop_link == caddr) {
@@ -879,13 +876,13 @@ ether1_recv_done (struct net_device *dev)
skb->protocol = eth_type_trans (skb, dev);
netif_rx (skb);
- priv(dev)->stats.rx_packets ++;
+ dev->stats.rx_packets++;
} else
- priv(dev)->stats.rx_dropped ++;
+ dev->stats.rx_dropped++;
} else {
printk(KERN_WARNING "%s: %s\n", dev->name,
(rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid");
- priv(dev)->stats.rx_dropped ++;
+ dev->stats.rx_dropped++;
}
nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS);
@@ -939,7 +936,7 @@ ether1_interrupt (int irq, void *dev_id)
printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name);
ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS);
writeb(CTRL_CA, REG_CONTROL);
- priv(dev)->stats.rx_dropped ++; /* we suspended due to lack of buffer space */
+ dev->stats.rx_dropped++; /* we suspended due to lack of buffer space */
} else
printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name,
ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS));
@@ -962,12 +959,6 @@ ether1_close (struct net_device *dev)
return 0;
}
-static struct net_device_stats *
-ether1_getstats (struct net_device *dev)
-{
- return &priv(dev)->stats;
-}
-
/*
* Set or clear the multicast filter for this adaptor.
* num_addrs == -1 Promiscuous mode, receive all packets.
@@ -994,7 +985,6 @@ static const struct net_device_ops ether1_netdev_ops = {
.ndo_open = ether1_open,
.ndo_stop = ether1_close,
.ndo_start_xmit = ether1_sendpacket,
- .ndo_get_stats = ether1_getstats,
.ndo_set_multicast_list = ether1_setmulticastlist,
.ndo_tx_timeout = ether1_timeout,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/ether1.h b/drivers/net/arm/ether1.h
index c8a4b2389d8..3a5830ab3dc 100644
--- a/drivers/net/arm/ether1.h
+++ b/drivers/net/arm/ether1.h
@@ -38,7 +38,6 @@
struct ether1_priv {
void __iomem *base;
- struct net_device_stats stats;
unsigned int tx_link;
unsigned int tx_head;
volatile unsigned int tx_tail;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 1361b7367c2..44a8746f401 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -81,7 +81,6 @@ static int ether3_open (struct net_device *dev);
static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
static irqreturn_t ether3_interrupt (int irq, void *dev_id);
static int ether3_close (struct net_device *dev);
-static struct net_device_stats *ether3_getstats (struct net_device *dev);
static void ether3_setmulticastlist (struct net_device *dev);
static void ether3_timeout(struct net_device *dev);
@@ -323,8 +322,6 @@ ether3_init_for_open(struct net_device *dev)
{
int i;
- memset(&priv(dev)->stats, 0, sizeof(struct net_device_stats));
-
/* Reset the chip */
ether3_outw(CFG2_RESET, REG_CONFIG2);
udelay(4);
@@ -442,15 +439,6 @@ ether3_close(struct net_device *dev)
}
/*
- * Get the current statistics. This may be called with the card open or
- * closed.
- */
-static struct net_device_stats *ether3_getstats(struct net_device *dev)
-{
- return &priv(dev)->stats;
-}
-
-/*
* Set or clear promiscuous/multicast mode filter for this adaptor.
*
* We don't attempt any packet filtering. The card may have a SEEQ 8004
@@ -490,7 +478,7 @@ static void ether3_timeout(struct net_device *dev)
local_irq_restore(flags);
priv(dev)->regs.config2 |= CFG2_CTRLO;
- priv(dev)->stats.tx_errors += 1;
+ dev->stats.tx_errors += 1;
ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
priv(dev)->tx_head = priv(dev)->tx_tail = 0;
@@ -509,7 +497,7 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
if (priv(dev)->broken) {
dev_kfree_skb(skb);
- priv(dev)->stats.tx_dropped ++;
+ dev->stats.tx_dropped++;
netif_start_queue(dev);
return NETDEV_TX_OK;
}
@@ -673,7 +661,7 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
} else
goto dropping;
} else {
- struct net_device_stats *stats = &priv(dev)->stats;
+ struct net_device_stats *stats = &dev->stats;
ether3_outw(next_ptr >> 8, REG_RECVEND);
if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++;
if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++;
@@ -685,14 +673,14 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
while (-- maxcnt);
done:
- priv(dev)->stats.rx_packets += received;
+ dev->stats.rx_packets += received;
priv(dev)->rx_head = next_ptr;
/*
* If rx went off line, then that means that the buffer may be full. We
* have dropped at least one packet.
*/
if (!(ether3_inw(REG_STATUS) & STAT_RXON)) {
- priv(dev)->stats.rx_dropped ++;
+ dev->stats.rx_dropped++;
ether3_outw(next_ptr, REG_RECVPTR);
ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
}
@@ -710,7 +698,7 @@ dropping:{
last_warned = jiffies;
printk("%s: memory squeeze, dropping packet.\n", dev->name);
}
- priv(dev)->stats.rx_dropped ++;
+ dev->stats.rx_dropped++;
goto done;
}
}
@@ -743,13 +731,13 @@ static void ether3_tx(struct net_device *dev)
* Update errors
*/
if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS)))
- priv(dev)->stats.tx_packets++;
+ dev->stats.tx_packets++;
else {
- priv(dev)->stats.tx_errors ++;
+ dev->stats.tx_errors++;
if (status & TXSTAT_16COLLISIONS)
- priv(dev)->stats.collisions += 16;
+ dev->stats.collisions += 16;
if (status & TXSTAT_BABBLED)
- priv(dev)->stats.tx_fifo_errors ++;
+ dev->stats.tx_fifo_errors++;
}
tx_tail = (tx_tail + 1) & 15;
@@ -773,7 +761,6 @@ static const struct net_device_ops ether3_netdev_ops = {
.ndo_open = ether3_open,
.ndo_stop = ether3_close,
.ndo_start_xmit = ether3_sendpacket,
- .ndo_get_stats = ether3_getstats,
.ndo_set_multicast_list = ether3_setmulticastlist,
.ndo_tx_timeout = ether3_timeout,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/ether3.h b/drivers/net/arm/ether3.h
index 1921a3a07da..2db63b08bdf 100644
--- a/drivers/net/arm/ether3.h
+++ b/drivers/net/arm/ether3.h
@@ -164,7 +164,6 @@ struct dev_priv {
unsigned char tx_head; /* buffer nr to insert next packet */
unsigned char tx_tail; /* buffer nr of transmitting packet */
unsigned int rx_head; /* address to fetch next packet from */
- struct net_device_stats stats;
struct timer_list timer;
int broken; /* 0 = ok, 1 = something went wrong */
};
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 4f1cc7164ad..6028226a727 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -241,7 +241,7 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
static spinlock_t mdio_lock;
static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
-struct mii_bus *mdio_bus;
+static struct mii_bus *mdio_bus;
static int ports_open;
static struct port *npe_port_tab[MAX_NPES];
static struct dma_pool *dma_pool;
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index b57d7dee389..3134e532623 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -362,7 +362,7 @@ static void *slow_memcpy( void *dst, const void *src, size_t len )
*cto++ = *cfrom++;
MFPDELAY();
}
- return( dst );
+ return dst;
}
@@ -449,7 +449,7 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag,
vbr[2] = save_berr;
local_irq_restore(flags);
- return( ret );
+ return ret;
}
static const struct net_device_ops lance_netdev_ops = {
@@ -526,7 +526,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
goto probe_ok;
probe_fail:
- return( 0 );
+ return 0;
probe_ok:
lp = netdev_priv(dev);
@@ -556,7 +556,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO,
"PAM/Riebl-ST Ethernet", dev)) {
printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
- return( 0 );
+ return 0;
}
dev->irq = (unsigned short)IRQ_AUTO_5;
}
@@ -568,12 +568,12 @@ static unsigned long __init lance_probe1( struct net_device *dev,
unsigned long irq = atari_register_vme_int();
if (!irq) {
printk( "Lance: request for VME interrupt failed\n" );
- return( 0 );
+ return 0;
}
if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO,
"Riebl-VME Ethernet", dev)) {
printk( "Lance: request for irq %ld failed\n", irq );
- return( 0 );
+ return 0;
}
dev->irq = irq;
}
@@ -637,7 +637,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
/* XXX MSch */
dev->watchdog_timeo = TX_TIMEOUT;
- return( 1 );
+ return 1;
}
@@ -666,7 +666,7 @@ static int lance_open( struct net_device *dev )
DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
dev->name, i, DREG ));
DREG = CSR0_STOP;
- return( -EIO );
+ return -EIO;
}
DREG = CSR0_IDON;
DREG = CSR0_STRT;
@@ -676,7 +676,7 @@ static int lance_open( struct net_device *dev )
DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
- return( 0 );
+ return 0;
}
@@ -1126,13 +1126,13 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
int i;
if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL)
- return( -EOPNOTSUPP );
+ return -EOPNOTSUPP;
if (netif_running(dev)) {
/* Only possible while card isn't started */
DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n",
dev->name ));
- return( -EIO );
+ return -EIO;
}
memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
@@ -1142,7 +1142,7 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
/* set also the magic for future sessions */
*RIEBL_MAGIC_ADDR = RIEBL_MAGIC;
- return( 0 );
+ return 0;
}
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 52abbbdf8a0..ef4115b897b 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -559,7 +559,6 @@ struct atl1c_adapter {
struct napi_struct napi;
struct atl1c_hw hw;
struct atl1c_hw_stats hw_stats;
- struct net_device_stats net_stats;
struct mii_if_info mii; /* MII interface info */
u16 rx_buffer_len;
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index d8501f06095..919080b2c3a 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -480,7 +480,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D);
}
if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2
- || hw->nic_type == athr_l2c || hw->nic_type == athr_l2c) {
+ || hw->nic_type == athr_l2c) {
atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
}
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index c7b8ef507eb..553230eb365 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -1562,7 +1562,7 @@ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw_stats *hw_stats = &adapter->hw_stats;
- struct net_device_stats *net_stats = &adapter->net_stats;
+ struct net_device_stats *net_stats = &netdev->stats;
atl1c_update_hw_stats(adapter);
net_stats->rx_packets = hw_stats->rx_ok;
@@ -1590,7 +1590,7 @@ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
net_stats->tx_window_errors = hw_stats->tx_late_col;
- return &adapter->net_stats;
+ return net_stats;
}
static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
@@ -1700,7 +1700,7 @@ static irqreturn_t atl1c_intr(int irq, void *data)
/* link event */
if (status & (ISR_GPHY | ISR_MANUAL)) {
- adapter->net_stats.tx_carrier_errors++;
+ netdev->stats.tx_carrier_errors++;
atl1c_link_chg_event(adapter);
break;
}
@@ -1719,7 +1719,7 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
* cannot figure out if the packet is fragmented or not,
* so we tell the KERNEL CHECKSUM_NONE
*/
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid)
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 1acea5774e8..56ace3fbe40 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1331,7 +1331,7 @@ static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter,
u16 pkt_flags;
u16 err_flags;
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
pkt_flags = prrs->pkt_flag;
err_flags = prrs->err_flag;
if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) &&
@@ -2316,7 +2316,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
init_timer(&adapter->phy_config_timer);
- adapter->phy_config_timer.function = &atl1e_phy_config;
+ adapter->phy_config_timer.function = atl1e_phy_config;
adapter->phy_config_timer.data = (unsigned long) adapter;
/* get user settings */
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 63b9ba0cc67..b8c053f7687 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1251,6 +1251,12 @@ static void atl1_free_ring_resources(struct atl1_adapter *adapter)
rrd_ring->desc = NULL;
rrd_ring->dma = 0;
+
+ adapter->cmb.dma = 0;
+ adapter->cmb.cmb = NULL;
+
+ adapter->smb.dma = 0;
+ adapter->smb.smb = NULL;
}
static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
@@ -1805,7 +1811,7 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
* the higher layers and let it be sorted out there.
*/
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
@@ -2094,9 +2100,9 @@ static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
{
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
- return ((next_to_clean > next_to_use) ?
+ return (next_to_clean > next_to_use) ?
next_to_clean - next_to_use - 1 :
- tpd_ring->count + next_to_clean - next_to_use - 1);
+ tpd_ring->count + next_to_clean - next_to_use - 1;
}
static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
@@ -2847,10 +2853,11 @@ static int atl1_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3cold, 0);
atl1_reset_hw(&adapter->hw);
- adapter->cmb.cmb->int_stats = 0;
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ adapter->cmb.cmb->int_stats = 0;
atl1_up(adapter);
+ }
netif_device_attach(netdev);
return 0;
@@ -3036,7 +3043,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
netif_carrier_off(netdev);
netif_stop_queue(netdev);
- setup_timer(&adapter->phy_config_timer, &atl1_phy_config,
+ setup_timer(&adapter->phy_config_timer, atl1_phy_config,
(unsigned long)adapter);
adapter->phy_timer_pending = false;
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 8da87383fb3..29c0265ccc5 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -51,10 +51,10 @@
#define ATL2_DRV_VERSION "2.2.3"
-static char atl2_driver_name[] = "atl2";
+static const char atl2_driver_name[] = "atl2";
static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
-static char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
-static char atl2_driver_version[] = ATL2_DRV_VERSION;
+static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
+static const char atl2_driver_version[] = ATL2_DRV_VERSION;
MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
@@ -1444,11 +1444,11 @@ static int __devinit atl2_probe(struct pci_dev *pdev,
atl2_check_options(adapter);
init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &atl2_watchdog;
+ adapter->watchdog_timer.function = atl2_watchdog;
adapter->watchdog_timer.data = (unsigned long) adapter;
init_timer(&adapter->phy_config_timer);
- adapter->phy_config_timer.function = &atl2_phy_config;
+ adapter->phy_config_timer.function = atl2_phy_config;
adapter->phy_config_timer.data = (unsigned long) adapter;
INIT_WORK(&adapter->reset_task, atl2_reset_task);
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index bd2f9d331da..dfd96b20547 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -445,7 +445,7 @@ static int net_open(struct net_device *dev)
init_timer(&lp->timer);
lp->timer.expires = jiffies + TIMED_CHECKER;
lp->timer.data = (unsigned long)dev;
- lp->timer.function = &atp_timed_checker; /* timer handler */
+ lp->timer.function = atp_timed_checker; /* timer handler */
add_timer(&lp->timer);
netif_start_queue(dev);
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 386d4feec65..43489f89c14 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -13,7 +13,7 @@
* converted to use linux-2.6.x's PHY framework
*
* Author: MontaVista Software, Inc.
- * ppopov@mvista.com or source@mvista.com
+ * ppopov@mvista.com or source@mvista.com
*
* ########################################################################
*
@@ -34,6 +34,8 @@
*
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -56,11 +58,11 @@
#include <linux/crc32.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/cpu.h>
+#include <linux/io.h>
-#include <asm/cpu.h>
#include <asm/mipsregs.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <asm/processor.h>
#include <au1000.h>
@@ -104,14 +106,6 @@ MODULE_VERSION(DRV_VERSION);
* complete immediately.
*/
-/* These addresses are only used if yamon doesn't tell us what
- * the mac address is, and the mac address is not passed on the
- * command line.
- */
-static unsigned char au1000_mac_addr[6] __devinitdata = {
- 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
-};
-
struct au1000_private *au_macs[NUM_ETH_INTERFACES];
/*
@@ -160,11 +154,11 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
spin_lock_irqsave(&aup->lock, flags);
- if(force_reset || (!aup->mac_enabled)) {
- *aup->enable = MAC_EN_CLOCK_ENABLE;
+ if (force_reset || (!aup->mac_enabled)) {
+ writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
au_sync_delay(2);
- *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
- | MAC_EN_CLOCK_ENABLE);
+ writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
+ | MAC_EN_CLOCK_ENABLE), &aup->enable);
au_sync_delay(2);
aup->mac_enabled = 1;
@@ -179,12 +173,12 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
{
struct au1000_private *aup = netdev_priv(dev);
- volatile u32 *const mii_control_reg = &aup->mac->mii_control;
- volatile u32 *const mii_data_reg = &aup->mac->mii_data;
+ u32 *const mii_control_reg = &aup->mac->mii_control;
+ u32 *const mii_data_reg = &aup->mac->mii_data;
u32 timedout = 20;
u32 mii_control;
- while (*mii_control_reg & MAC_MII_BUSY) {
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
netdev_err(dev, "read_MII busy timeout!!\n");
@@ -195,29 +189,29 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
mii_control = MAC_SET_MII_SELECT_REG(reg) |
MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
- *mii_control_reg = mii_control;
+ writel(mii_control, mii_control_reg);
timedout = 20;
- while (*mii_control_reg & MAC_MII_BUSY) {
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
netdev_err(dev, "mdio_read busy timeout!!\n");
return -1;
}
}
- return (int)*mii_data_reg;
+ return readl(mii_data_reg);
}
static void au1000_mdio_write(struct net_device *dev, int phy_addr,
int reg, u16 value)
{
struct au1000_private *aup = netdev_priv(dev);
- volatile u32 *const mii_control_reg = &aup->mac->mii_control;
- volatile u32 *const mii_data_reg = &aup->mac->mii_data;
+ u32 *const mii_control_reg = &aup->mac->mii_control;
+ u32 *const mii_data_reg = &aup->mac->mii_data;
u32 timedout = 20;
u32 mii_control;
- while (*mii_control_reg & MAC_MII_BUSY) {
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
netdev_err(dev, "mdio_write busy timeout!!\n");
@@ -228,18 +222,22 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr,
mii_control = MAC_SET_MII_SELECT_REG(reg) |
MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
- *mii_data_reg = value;
- *mii_control_reg = mii_control;
+ writel(value, mii_data_reg);
+ writel(mii_control, mii_control_reg);
}
static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
/* WARNING: bus->phy_map[phy_addr].attached_dev == dev does
- * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */
+ * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus)
+ */
struct net_device *const dev = bus->priv;
- au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
- * mii_bus is enabled */
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
return au1000_mdio_read(dev, phy_addr, regnum);
}
@@ -248,8 +246,11 @@ static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
{
struct net_device *const dev = bus->priv;
- au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
- * mii_bus is enabled */
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
au1000_mdio_write(dev, phy_addr, regnum, value);
return 0;
}
@@ -258,28 +259,37 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
{
struct net_device *const dev = bus->priv;
- au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
- * mii_bus is enabled */
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
return 0;
}
static void au1000_hard_stop(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
netif_dbg(aup, drv, dev, "hard stop\n");
- aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
+ reg = readl(&aup->mac->control);
+ reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
+ writel(reg, &aup->mac->control);
au_sync_delay(10);
}
static void au1000_enable_rx_tx(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
netif_dbg(aup, hw, dev, "enable_rx_tx\n");
- aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
+ reg = readl(&aup->mac->control);
+ reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
+ writel(reg, &aup->mac->control);
au_sync_delay(10);
}
@@ -289,6 +299,7 @@ au1000_adjust_link(struct net_device *dev)
struct au1000_private *aup = netdev_priv(dev);
struct phy_device *phydev = aup->phy_dev;
unsigned long flags;
+ u32 reg;
int status_change = 0;
@@ -320,14 +331,15 @@ au1000_adjust_link(struct net_device *dev)
/* switching duplex mode requires to disable rx and tx! */
au1000_hard_stop(dev);
- if (DUPLEX_FULL == phydev->duplex)
- aup->mac->control = ((aup->mac->control
- | MAC_FULL_DUPLEX)
- & ~MAC_DISABLE_RX_OWN);
- else
- aup->mac->control = ((aup->mac->control
- & ~MAC_FULL_DUPLEX)
- | MAC_DISABLE_RX_OWN);
+ reg = readl(&aup->mac->control);
+ if (DUPLEX_FULL == phydev->duplex) {
+ reg |= MAC_FULL_DUPLEX;
+ reg &= ~MAC_DISABLE_RX_OWN;
+ } else {
+ reg &= ~MAC_FULL_DUPLEX;
+ reg |= MAC_DISABLE_RX_OWN;
+ }
+ writel(reg, &aup->mac->control);
au_sync_delay(1);
au1000_enable_rx_tx(dev);
@@ -361,10 +373,11 @@ au1000_adjust_link(struct net_device *dev)
}
}
-static int au1000_mii_probe (struct net_device *dev)
+static int au1000_mii_probe(struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
struct phy_device *phydev = NULL;
+ int phy_addr;
if (aup->phy_static_config) {
BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
@@ -374,42 +387,46 @@ static int au1000_mii_probe (struct net_device *dev)
else
netdev_info(dev, "using PHY-less setup\n");
return 0;
- } else {
- int phy_addr;
-
- /* find the first (lowest address) PHY on the current MAC's MII bus */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
- if (aup->mii_bus->phy_map[phy_addr]) {
- phydev = aup->mii_bus->phy_map[phy_addr];
- if (!aup->phy_search_highest_addr)
- break; /* break out with first one found */
- }
-
- if (aup->phy1_search_mac0) {
- /* try harder to find a PHY */
- if (!phydev && (aup->mac_id == 1)) {
- /* no PHY found, maybe we have a dual PHY? */
- dev_info(&dev->dev, ": no PHY found on MAC1, "
- "let's see if it's attached to MAC0...\n");
-
- /* find the first (lowest address) non-attached PHY on
- * the MAC0 MII bus */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- struct phy_device *const tmp_phydev =
- aup->mii_bus->phy_map[phy_addr];
-
- if (aup->mac_id == 1)
- break;
-
- if (!tmp_phydev)
- continue; /* no PHY here... */
+ }
- if (tmp_phydev->attached_dev)
- continue; /* already claimed by MAC0 */
+ /* find the first (lowest address) PHY
+ * on the current MAC's MII bus
+ */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
+ if (aup->mii_bus->phy_map[phy_addr]) {
+ phydev = aup->mii_bus->phy_map[phy_addr];
+ if (!aup->phy_search_highest_addr)
+ /* break out with first one found */
+ break;
+ }
- phydev = tmp_phydev;
- break; /* found it */
- }
+ if (aup->phy1_search_mac0) {
+ /* try harder to find a PHY */
+ if (!phydev && (aup->mac_id == 1)) {
+ /* no PHY found, maybe we have a dual PHY? */
+ dev_info(&dev->dev, ": no PHY found on MAC1, "
+ "let's see if it's attached to MAC0...\n");
+
+ /* find the first (lowest address) non-attached
+ * PHY on the MAC0 MII bus
+ */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ struct phy_device *const tmp_phydev =
+ aup->mii_bus->phy_map[phy_addr];
+
+ if (aup->mac_id == 1)
+ break;
+
+ /* no PHY here... */
+ if (!tmp_phydev)
+ continue;
+
+ /* already claimed by MAC0 */
+ if (tmp_phydev->attached_dev)
+ continue;
+
+ phydev = tmp_phydev;
+ break; /* found it */
}
}
}
@@ -460,20 +477,20 @@ static int au1000_mii_probe (struct net_device *dev)
* has the virtual and dma address of a buffer suitable for
* both, receive and transmit operations.
*/
-static db_dest_t *au1000_GetFreeDB(struct au1000_private *aup)
+static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
{
- db_dest_t *pDB;
+ struct db_dest *pDB;
pDB = aup->pDBfree;
- if (pDB) {
+ if (pDB)
aup->pDBfree = pDB->pnext;
- }
+
return pDB;
}
-void au1000_ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
+void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
{
- db_dest_t *pDBfree = aup->pDBfree;
+ struct db_dest *pDBfree = aup->pDBfree;
if (pDBfree)
pDBfree->pnext = pDB;
aup->pDBfree = pDB;
@@ -486,9 +503,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev)
au1000_hard_stop(dev);
- *aup->enable = MAC_EN_CLOCK_ENABLE;
+ writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
au_sync_delay(2);
- *aup->enable = 0;
+ writel(0, &aup->enable);
au_sync_delay(2);
aup->tx_full = 0;
@@ -515,7 +532,7 @@ static void au1000_reset_mac(struct net_device *dev)
spin_lock_irqsave(&aup->lock, flags);
- au1000_reset_mac_unlocked (dev);
+ au1000_reset_mac_unlocked(dev);
spin_unlock_irqrestore(&aup->lock, flags);
}
@@ -532,11 +549,13 @@ au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
for (i = 0; i < NUM_RX_DMA; i++) {
aup->rx_dma_ring[i] =
- (volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i);
+ (struct rx_dma *)
+ (rx_base + sizeof(struct rx_dma)*i);
}
for (i = 0; i < NUM_TX_DMA; i++) {
aup->tx_dma_ring[i] =
- (volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i);
+ (struct tx_dma *)
+ (tx_base + sizeof(struct tx_dma)*i);
}
}
@@ -624,18 +643,21 @@ static int au1000_init(struct net_device *dev)
spin_lock_irqsave(&aup->lock, flags);
- aup->mac->control = 0;
+ writel(0, &aup->mac->control);
aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
aup->tx_tail = aup->tx_head;
aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
- aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4];
- aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
- dev->dev_addr[1]<<8 | dev->dev_addr[0];
+ writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
+ &aup->mac->mac_addr_high);
+ writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
+ dev->dev_addr[1]<<8 | dev->dev_addr[0],
+ &aup->mac->mac_addr_low);
- for (i = 0; i < NUM_RX_DMA; i++) {
+
+ for (i = 0; i < NUM_RX_DMA; i++)
aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
- }
+
au_sync();
control = MAC_RX_ENABLE | MAC_TX_ENABLE;
@@ -651,8 +673,8 @@ static int au1000_init(struct net_device *dev)
control |= MAC_FULL_DUPLEX;
}
- aup->mac->control = control;
- aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
+ writel(control, &aup->mac->control);
+ writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
au_sync();
spin_unlock_irqrestore(&aup->lock, flags);
@@ -689,9 +711,9 @@ static int au1000_rx(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
struct sk_buff *skb;
- volatile rx_dma_t *prxd;
+ struct rx_dma *prxd;
u32 buff_stat, status;
- db_dest_t *pDB;
+ struct db_dest *pDB;
u32 frmlen;
netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
@@ -721,24 +743,26 @@ static int au1000_rx(struct net_device *dev)
netif_rx(skb); /* pass the packet to upper layers */
} else {
if (au1000_debug > 4) {
+ pr_err("rx_error(s):");
if (status & RX_MISSED_FRAME)
- printk("rx miss\n");
+ pr_cont(" miss");
if (status & RX_WDOG_TIMER)
- printk("rx wdog\n");
+ pr_cont(" wdog");
if (status & RX_RUNT)
- printk("rx runt\n");
+ pr_cont(" runt");
if (status & RX_OVERLEN)
- printk("rx overlen\n");
+ pr_cont(" overlen");
if (status & RX_COLL)
- printk("rx coll\n");
+ pr_cont(" coll");
if (status & RX_MII_ERROR)
- printk("rx mii error\n");
+ pr_cont(" mii error");
if (status & RX_CRC_ERROR)
- printk("rx crc error\n");
+ pr_cont(" crc error");
if (status & RX_LEN_ERROR)
- printk("rx len error\n");
+ pr_cont(" len error");
if (status & RX_U_CNTRL_FRAME)
- printk("rx u control frame\n");
+ pr_cont(" u control frame");
+ pr_cont("\n");
}
}
prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
@@ -761,7 +785,8 @@ static void au1000_update_tx_stats(struct net_device *dev, u32 status)
if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
/* any other tx errors are only valid
- * in half duplex mode */
+ * in half duplex mode
+ */
ps->tx_errors++;
ps->tx_aborted_errors++;
}
@@ -782,7 +807,7 @@ static void au1000_update_tx_stats(struct net_device *dev, u32 status)
static void au1000_tx_ack(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- volatile tx_dma_t *ptxd;
+ struct tx_dma *ptxd;
ptxd = aup->tx_dma_ring[aup->tx_tail];
@@ -862,7 +887,7 @@ static int au1000_close(struct net_device *dev)
spin_lock_irqsave(&aup->lock, flags);
- au1000_reset_mac_unlocked (dev);
+ au1000_reset_mac_unlocked(dev);
/* stop the device */
netif_stop_queue(dev);
@@ -881,9 +906,9 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
struct net_device_stats *ps = &dev->stats;
- volatile tx_dma_t *ptxd;
+ struct tx_dma *ptxd;
u32 buff_stat;
- db_dest_t *pDB;
+ struct db_dest *pDB;
int i;
netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
@@ -910,9 +935,9 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
pDB = aup->tx_db_inuse[aup->tx_head];
skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
if (skb->len < ETH_ZLEN) {
- for (i = skb->len; i < ETH_ZLEN; i++) {
+ for (i = skb->len; i < ETH_ZLEN; i++)
((char *)pDB->vaddr)[i] = 0;
- }
+
ptxd->len = ETH_ZLEN;
} else
ptxd->len = skb->len;
@@ -943,15 +968,16 @@ static void au1000_tx_timeout(struct net_device *dev)
static void au1000_multicast_list(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
- netif_dbg(aup, drv, dev, "au1000_multicast_list: flags=%x\n", dev->flags);
-
+ netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
+ reg = readl(&aup->mac->control);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- aup->mac->control |= MAC_PROMISCUOUS;
+ reg |= MAC_PROMISCUOUS;
} else if ((dev->flags & IFF_ALLMULTI) ||
netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
- aup->mac->control |= MAC_PASS_ALL_MULTI;
- aup->mac->control &= ~MAC_PROMISCUOUS;
+ reg |= MAC_PASS_ALL_MULTI;
+ reg &= ~MAC_PROMISCUOUS;
netdev_info(dev, "Pass all multicast\n");
} else {
struct netdev_hw_addr *ha;
@@ -961,11 +987,12 @@ static void au1000_multicast_list(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev)
set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
(long *)mc_filter);
- aup->mac->multi_hash_high = mc_filter[1];
- aup->mac->multi_hash_low = mc_filter[0];
- aup->mac->control &= ~MAC_PROMISCUOUS;
- aup->mac->control |= MAC_HASH_MODE;
+ writel(mc_filter[1], &aup->mac->multi_hash_high);
+ writel(mc_filter[0], &aup->mac->multi_hash_low);
+ reg &= ~MAC_PROMISCUOUS;
+ reg |= MAC_HASH_MODE;
}
+ writel(reg, &aup->mac->control);
}
static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -999,10 +1026,9 @@ static int __devinit au1000_probe(struct platform_device *pdev)
struct au1000_private *aup = NULL;
struct au1000_eth_platform_data *pd;
struct net_device *dev = NULL;
- db_dest_t *pDB, *pDBfree;
+ struct db_dest *pDB, *pDBfree;
int irq, i, err = 0;
struct resource *base, *macen;
- char ethaddr[6];
base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!base) {
@@ -1025,13 +1051,15 @@ static int __devinit au1000_probe(struct platform_device *pdev)
goto out;
}
- if (!request_mem_region(base->start, resource_size(base), pdev->name)) {
+ if (!request_mem_region(base->start, resource_size(base),
+ pdev->name)) {
dev_err(&pdev->dev, "failed to request memory region for base registers\n");
err = -ENXIO;
goto out;
}
- if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) {
+ if (!request_mem_region(macen->start, resource_size(macen),
+ pdev->name)) {
dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
err = -ENXIO;
goto err_request;
@@ -1049,10 +1077,12 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup = netdev_priv(dev);
spin_lock_init(&aup->lock);
- aup->msg_enable = (au1000_debug < 4 ? AU1000_DEF_MSG_ENABLE : au1000_debug);
+ aup->msg_enable = (au1000_debug < 4 ?
+ AU1000_DEF_MSG_ENABLE : au1000_debug);
- /* Allocate the data buffers */
- /* Snooping works fine with eth on all au1xxx */
+ /* Allocate the data buffers
+ * Snooping works fine with eth on all au1xxx
+ */
aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
(NUM_TX_BUFFS + NUM_RX_BUFFS),
&aup->dma_addr, 0);
@@ -1063,15 +1093,17 @@ static int __devinit au1000_probe(struct platform_device *pdev)
}
/* aup->mac is the base address of the MAC's registers */
- aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base));
+ aup->mac = (struct mac_reg *)
+ ioremap_nocache(base->start, resource_size(base));
if (!aup->mac) {
dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
err = -ENXIO;
goto err_remap1;
}
- /* Setup some variables for quick register address access */
- aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen));
+ /* Setup some variables for quick register address access */
+ aup->enable = (u32 *)ioremap_nocache(macen->start,
+ resource_size(macen));
if (!aup->enable) {
dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
err = -ENXIO;
@@ -1079,33 +1111,26 @@ static int __devinit au1000_probe(struct platform_device *pdev)
}
aup->mac_id = pdev->id;
- if (pdev->id == 0) {
- if (prom_get_ethernet_addr(ethaddr) == 0)
- memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
- else {
- netdev_info(dev, "No MAC address found\n");
- /* Use the hard coded MAC addresses */
- }
-
+ if (pdev->id == 0)
au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
- } else if (pdev->id == 1)
+ else if (pdev->id == 1)
au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
- /*
- * Assign to the Ethernet ports two consecutive MAC addresses
- * to match those that are printed on their stickers
- */
- memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
- dev->dev_addr[5] += pdev->id;
+ /* set a random MAC now in case platform_data doesn't provide one */
+ random_ether_addr(dev->dev_addr);
- *aup->enable = 0;
+ writel(0, &aup->enable);
aup->mac_enabled = 0;
pd = pdev->dev.platform_data;
if (!pd) {
- dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n");
+ dev_info(&pdev->dev, "no platform_data passed,"
+ " PHY search on MAC0\n");
aup->phy1_search_mac0 = 1;
} else {
+ if (is_valid_ether_addr(pd->mac))
+ memcpy(dev->dev_addr, pd->mac, 6);
+
aup->phy_static_config = pd->phy_static_config;
aup->phy_search_highest_addr = pd->phy_search_highest_addr;
aup->phy1_search_mac0 = pd->phy1_search_mac0;
@@ -1115,8 +1140,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
}
if (aup->phy_busid && aup->phy_busid > 0) {
- dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII"
- "bus not supported yet\n");
+ dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
err = -ENODEV;
goto err_mdiobus_alloc;
}
@@ -1168,17 +1192,17 @@ static int __devinit au1000_probe(struct platform_device *pdev)
for (i = 0; i < NUM_RX_DMA; i++) {
pDB = au1000_GetFreeDB(aup);
- if (!pDB) {
+ if (!pDB)
goto err_out;
- }
+
aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
aup->rx_db_inuse[i] = pDB;
}
for (i = 0; i < NUM_TX_DMA; i++) {
pDB = au1000_GetFreeDB(aup);
- if (!pDB) {
+ if (!pDB)
goto err_out;
- }
+
aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
aup->tx_dma_ring[i]->len = 0;
aup->tx_db_inuse[i] = pDB;
@@ -1205,7 +1229,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
(unsigned long)base->start, irq);
if (version_printed++ == 0)
- printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
+ pr_info("%s version %s %s\n",
+ DRV_NAME, DRV_VERSION, DRV_AUTHOR);
return 0;
@@ -1214,7 +1239,8 @@ err_out:
mdiobus_unregister(aup->mii_bus);
/* here we should have a valid dev plus aup-> register addresses
- * so we can reset the mac properly.*/
+ * so we can reset the mac properly.
+ */
au1000_reset_mac(dev);
for (i = 0; i < NUM_RX_DMA; i++) {
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index d06ec008fbf..6229c774552 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -44,34 +44,34 @@
* Data Buffer Descriptor. Data buffers must be aligned on 32 byte
* boundary for both, receive and transmit.
*/
-typedef struct db_dest {
+struct db_dest {
struct db_dest *pnext;
- volatile u32 *vaddr;
+ u32 *vaddr;
dma_addr_t dma_addr;
-} db_dest_t;
+};
/*
* The transmit and receive descriptors are memory
* mapped registers.
*/
-typedef struct tx_dma {
+struct tx_dma {
u32 status;
u32 buff_stat;
u32 len;
u32 pad;
-} tx_dma_t;
+};
-typedef struct rx_dma {
+struct rx_dma {
u32 status;
u32 buff_stat;
u32 pad[2];
-} rx_dma_t;
+};
/*
* MAC control registers, memory mapped.
*/
-typedef struct mac_reg {
+struct mac_reg {
u32 control;
u32 mac_addr_high;
u32 mac_addr_low;
@@ -82,16 +82,16 @@ typedef struct mac_reg {
u32 flow_control;
u32 vlan1_tag;
u32 vlan2_tag;
-} mac_reg_t;
+};
struct au1000_private {
- db_dest_t *pDBfree;
- db_dest_t db[NUM_RX_BUFFS+NUM_TX_BUFFS];
- volatile rx_dma_t *rx_dma_ring[NUM_RX_DMA];
- volatile tx_dma_t *tx_dma_ring[NUM_TX_DMA];
- db_dest_t *rx_db_inuse[NUM_RX_DMA];
- db_dest_t *tx_db_inuse[NUM_TX_DMA];
+ struct db_dest *pDBfree;
+ struct db_dest db[NUM_RX_BUFFS+NUM_TX_BUFFS];
+ struct rx_dma *rx_dma_ring[NUM_RX_DMA];
+ struct tx_dma *tx_dma_ring[NUM_TX_DMA];
+ struct db_dest *rx_db_inuse[NUM_RX_DMA];
+ struct db_dest *tx_db_inuse[NUM_TX_DMA];
u32 rx_head;
u32 tx_head;
u32 tx_tail;
@@ -99,7 +99,9 @@ struct au1000_private {
int mac_id;
- int mac_enabled; /* whether MAC is currently enabled and running (req. for mdio) */
+ int mac_enabled; /* whether MAC is currently enabled and running
+ * (req. for mdio)
+ */
int old_link; /* used by au1000_adjust_link */
int old_speed;
@@ -117,9 +119,11 @@ struct au1000_private {
int phy_busid;
int phy_irq;
- /* These variables are just for quick access to certain regs addresses. */
- volatile mac_reg_t *mac; /* mac registers */
- volatile u32 *enable; /* address of MAC Enable Register */
+ /* These variables are just for quick access
+ * to certain regs addresses.
+ */
+ struct mac_reg *mac; /* mac registers */
+ u32 *enable; /* address of MAC Enable Register */
u32 vaddr; /* virtual address of rx/tx buffers */
dma_addr_t dma_addr; /* dma address of rx/tx buffers */
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 37617abc164..8e7c8a8e61c 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -818,7 +818,7 @@ static int b44_rx(struct b44 *bp, int budget)
copy_skb->data, len);
skb = copy_skb;
}
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, bp->dev);
netif_receive_skb(skb);
received++;
@@ -848,6 +848,15 @@ static int b44_poll(struct napi_struct *napi, int budget)
b44_tx(bp);
/* spin_unlock(&bp->tx_lock); */
}
+ if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
+ bp->istat &= ~ISTAT_RFO;
+ b44_disable_ints(bp);
+ ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+ netif_wake_queue(bp->dev);
+ }
+
spin_unlock_irqrestore(&bp->lock, flags);
work_done = 0;
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 0d2c5da0893..ecfef240a30 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -293,22 +293,22 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
/* if the packet does not have start of packet _and_
* end of packet flag set, then just recycle it */
if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
- priv->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
continue;
}
/* recycle packet if it's marked as bad */
if (unlikely(len_stat & DMADESC_ERR_MASK)) {
- priv->stats.rx_errors++;
+ dev->stats.rx_errors++;
if (len_stat & DMADESC_OVSIZE_MASK)
- priv->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
if (len_stat & DMADESC_CRC_MASK)
- priv->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
if (len_stat & DMADESC_UNDER_MASK)
- priv->stats.rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
if (len_stat & DMADESC_OV_MASK)
- priv->stats.rx_fifo_errors++;
+ dev->stats.rx_fifo_errors++;
continue;
}
@@ -324,7 +324,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
nskb = netdev_alloc_skb_ip_align(dev, len);
if (!nskb) {
/* forget packet, just rearm desc */
- priv->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
continue;
}
@@ -342,8 +342,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
- priv->stats.rx_packets++;
- priv->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
netif_receive_skb(skb);
} while (--budget > 0);
@@ -403,7 +403,7 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
spin_unlock(&priv->tx_lock);
if (desc->len_stat & DMADESC_UNDER_MASK)
- priv->stats.tx_errors++;
+ dev->stats.tx_errors++;
dev_kfree_skb(skb);
released++;
@@ -563,8 +563,8 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!priv->tx_desc_count)
netif_stop_queue(dev);
- priv->stats.tx_bytes += skb->len;
- priv->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
ret = NETDEV_TX_OK;
out_unlock:
@@ -798,7 +798,7 @@ static int bcm_enet_open(struct net_device *dev)
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
priv->mac_id ? "1" : "0", priv->phy_id);
- phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0,
+ phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
@@ -1141,17 +1141,6 @@ static int bcm_enet_stop(struct net_device *dev)
}
/*
- * core request to return device rx/tx stats
- */
-static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev)
-{
- struct bcm_enet_priv *priv;
-
- priv = netdev_priv(dev);
- return &priv->stats;
-}
-
-/*
* ethtool callbacks
*/
struct bcm_enet_stats {
@@ -1163,16 +1152,18 @@ struct bcm_enet_stats {
#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
offsetof(struct bcm_enet_priv, m)
+#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
+ offsetof(struct net_device_stats, m)
static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
- { "rx_packets", GEN_STAT(stats.rx_packets), -1 },
- { "tx_packets", GEN_STAT(stats.tx_packets), -1 },
- { "rx_bytes", GEN_STAT(stats.rx_bytes), -1 },
- { "tx_bytes", GEN_STAT(stats.tx_bytes), -1 },
- { "rx_errors", GEN_STAT(stats.rx_errors), -1 },
- { "tx_errors", GEN_STAT(stats.tx_errors), -1 },
- { "rx_dropped", GEN_STAT(stats.rx_dropped), -1 },
- { "tx_dropped", GEN_STAT(stats.tx_dropped), -1 },
+ { "rx_packets", DEV_STAT(rx_packets), -1 },
+ { "tx_packets", DEV_STAT(tx_packets), -1 },
+ { "rx_bytes", DEV_STAT(rx_bytes), -1 },
+ { "tx_bytes", DEV_STAT(tx_bytes), -1 },
+ { "rx_errors", DEV_STAT(rx_errors), -1 },
+ { "tx_errors", DEV_STAT(tx_errors), -1 },
+ { "rx_dropped", DEV_STAT(rx_dropped), -1 },
+ { "tx_dropped", DEV_STAT(tx_dropped), -1 },
{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
{ "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
@@ -1328,7 +1319,11 @@ static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
char *p;
s = &bcm_enet_gstrings_stats[i];
- p = (char *)priv + s->stat_offset;
+ if (s->mib_reg == -1)
+ p = (char *)&netdev->stats;
+ else
+ p = (char *)priv;
+ p += s->stat_offset;
data[i] = (s->sizeof_stat == sizeof(u64)) ?
*(u64 *)p : *(u32 *)p;
}
@@ -1605,7 +1600,6 @@ static const struct net_device_ops bcm_enet_ops = {
.ndo_open = bcm_enet_open,
.ndo_stop = bcm_enet_stop,
.ndo_start_xmit = bcm_enet_start_xmit,
- .ndo_get_stats = bcm_enet_get_stats,
.ndo_set_mac_address = bcm_enet_set_mac_address,
.ndo_set_multicast_list = bcm_enet_set_multicast_list,
.ndo_do_ioctl = bcm_enet_ioctl,
diff --git a/drivers/net/bcm63xx_enet.h b/drivers/net/bcm63xx_enet.h
index bd3684d42d7..0e3048b788c 100644
--- a/drivers/net/bcm63xx_enet.h
+++ b/drivers/net/bcm63xx_enet.h
@@ -274,7 +274,6 @@ struct bcm_enet_priv {
int pause_tx;
/* stats */
- struct net_device_stats stats;
struct bcm_enet_mib_counters mib;
/* after mib interrupt, mib registers update is done in this
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 99197bd54da..1afabb1e662 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -78,6 +78,8 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
#define MCC_CQ_LEN 256
+#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
+#define BE_MAX_MSIX_VECTORS (MAX_RSS_QS + 1 + 1)/* RSS qs + 1 def Rx + Tx */
#define BE_NAPI_WEIGHT 64
#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
@@ -157,10 +159,9 @@ struct be_mcc_obj {
bool rearm_cq;
};
-struct be_drvr_stats {
+struct be_tx_stats {
u32 be_tx_reqs; /* number of TX requests initiated */
u32 be_tx_stops; /* number of times TX Q was stopped */
- u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */
u32 be_tx_wrbs; /* number of tx WRBs used */
u32 be_tx_events; /* number of tx completion events */
u32 be_tx_compl; /* number of tx completion entries processed */
@@ -169,34 +170,6 @@ struct be_drvr_stats {
u64 be_tx_bytes_prev;
u64 be_tx_pkts;
u32 be_tx_rate;
-
- u32 cache_barrier[16];
-
- u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */
- u32 be_rx_polls; /* number of times NAPI called poll function */
- u32 be_rx_events; /* number of ucast rx completion events */
- u32 be_rx_compl; /* number of rx completion entries processed */
- ulong be_rx_jiffies;
- u64 be_rx_bytes;
- u64 be_rx_bytes_prev;
- u64 be_rx_pkts;
- u32 be_rx_rate;
- /* number of non ether type II frames dropped where
- * frame len > length field of Mac Hdr */
- u32 be_802_3_dropped_frames;
- /* number of non ether type II frames malformed where
- * in frame len < length field of Mac Hdr */
- u32 be_802_3_malformed_frames;
- u32 be_rxcp_err; /* Num rx completion entries w/ err set. */
- ulong rx_fps_jiffies; /* jiffies at last FPS calc */
- u32 be_rx_frags;
- u32 be_prev_rx_frags;
- u32 be_rx_fps; /* Rx frags per second */
-};
-
-struct be_stats_obj {
- struct be_drvr_stats drvr_stats;
- struct be_dma_mem cmd;
};
struct be_tx_obj {
@@ -214,10 +187,34 @@ struct be_rx_page_info {
bool last_page_user;
};
+struct be_rx_stats {
+ u32 rx_post_fail;/* number of ethrx buffer alloc failures */
+ u32 rx_polls; /* number of times NAPI called poll function */
+ u32 rx_events; /* number of ucast rx completion events */
+ u32 rx_compl; /* number of rx completion entries processed */
+ ulong rx_jiffies;
+ u64 rx_bytes;
+ u64 rx_bytes_prev;
+ u64 rx_pkts;
+ u32 rx_rate;
+ u32 rx_mcast_pkts;
+ u32 rxcp_err; /* Num rx completion entries w/ err set. */
+ ulong rx_fps_jiffies; /* jiffies at last FPS calc */
+ u32 rx_frags;
+ u32 prev_rx_frags;
+ u32 rx_fps; /* Rx frags per second */
+};
+
struct be_rx_obj {
+ struct be_adapter *adapter;
struct be_queue_info q;
struct be_queue_info cq;
struct be_rx_page_info page_info_tbl[RX_Q_LEN];
+ struct be_eq_obj rx_eq;
+ struct be_rx_stats stats;
+ u8 rss_id;
+ bool rx_post_starved; /* Zero rx frags have been posted to BE */
+ u32 cache_line_barrier[16];
};
struct be_vf_cfg {
@@ -228,7 +225,6 @@ struct be_vf_cfg {
u32 vf_tx_rate;
};
-#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */
#define BE_INVALID_PMAC_ID 0xffffffff
struct be_adapter {
struct pci_dev *pdev;
@@ -248,21 +244,21 @@ struct be_adapter {
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
- struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS];
+ struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
bool msix_enabled;
bool isr_registered;
/* TX Rings */
struct be_eq_obj tx_eq;
struct be_tx_obj tx_obj;
+ struct be_tx_stats tx_stats;
u32 cache_line_break[8];
/* Rx rings */
- struct be_eq_obj rx_eq;
- struct be_rx_obj rx_obj;
+ struct be_rx_obj rx_obj[MAX_RSS_QS + 1]; /* one default non-rss Q */
+ u32 num_rx_qs;
u32 big_page_size; /* Compounded page size shared by rx wrbs */
- bool rx_post_starved; /* Zero rx frags have been posted to BE */
struct vlan_group *vlan_grp;
u16 vlans_added;
@@ -270,7 +266,7 @@ struct be_adapter {
u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
struct be_dma_mem mc_cmd_mem;
- struct be_stats_obj stats;
+ struct be_dma_mem stats_cmd;
/* Work queue used to perform periodic tasks like getting statistics */
struct delayed_work work;
@@ -286,6 +282,7 @@ struct be_adapter {
bool promiscuous;
bool wol;
u32 function_mode;
+ u32 function_caps;
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
bool ue_detected;
@@ -312,10 +309,20 @@ struct be_adapter {
extern const struct ethtool_ops be_ethtool_ops;
-#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
+#define tx_stats(adapter) (&adapter->tx_stats)
+#define rx_stats(rxo) (&rxo->stats)
#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
+#define for_all_rx_queues(adapter, rxo, i) \
+ for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
+ i++, rxo++)
+
+/* Just skip the first default non-rss queue */
+#define for_all_rss_queues(adapter, rxo, i) \
+ for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
+ i++, rxo++)
+
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
@@ -413,6 +420,20 @@ static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
adapter->is_virtfn = (data != 0xAA);
}
+static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
+{
+ u32 addr;
+
+ addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
+
+ mac[5] = (u8)(addr & 0xFF);
+ mac[4] = (u8)((addr >> 8) & 0xFF);
+ mac[3] = (u8)((addr >> 16) & 0xFF);
+ mac[2] = 0xC9;
+ mac[1] = 0x00;
+ mac[0] = 0x00;
+}
+
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3d305494a60..bf2dc269de1 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -71,7 +71,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
if (compl_status == MCC_STATUS_SUCCESS) {
if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
struct be_cmd_resp_get_stats *resp =
- adapter->stats.cmd.va;
+ adapter->stats_cmd.va;
be_dws_le_to_cpu(&resp->hw_stats,
sizeof(resp->hw_stats));
netdev_stats_update(adapter);
@@ -98,9 +98,9 @@ static void be_async_link_state_process(struct be_adapter *adapter,
static inline bool is_link_state_evt(u32 trailer)
{
- return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_LINK_STATE);
+ ASYNC_EVENT_CODE_LINK_STATE;
}
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
@@ -140,10 +140,8 @@ int be_process_mcc(struct be_adapter *adapter, int *status)
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
/* Interpret flags as an async trailer */
- BUG_ON(!is_link_state_evt(compl->flags));
-
- /* Interpret compl as a async link evt */
- be_async_link_state_process(adapter,
+ if (is_link_state_evt(compl->flags))
+ be_async_link_state_process(adapter,
(struct be_async_event_link_state *) compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
*status = be_mcc_compl_process(adapter, compl);
@@ -207,7 +205,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
- be_dump_ue(adapter);
+ be_detect_dump_ue(adapter);
return -1;
}
@@ -756,7 +754,7 @@ int be_cmd_txq_create(struct be_adapter *adapter,
/* Uses mbox */
int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
- u16 max_frame_size, u32 if_id, u32 rss)
+ u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_eth_rx_create *req;
@@ -787,6 +785,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
rxq->id = le16_to_cpu(resp->id);
rxq->created = true;
+ *rss_id = resp->rss_id;
}
spin_unlock(&adapter->mbox_lock);
@@ -1261,7 +1260,8 @@ err:
}
/* Uses mbox */
-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode)
+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
+ u32 *mode, u32 *caps)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_query_fw_cfg *req;
@@ -1283,6 +1283,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode)
struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
*port_num = le32_to_cpu(resp->phys_port);
*mode = le32_to_cpu(resp->function_mode);
+ *caps = le32_to_cpu(resp->function_caps);
}
spin_unlock(&adapter->mbox_lock);
@@ -1313,6 +1314,37 @@ int be_cmd_reset_function(struct be_adapter *adapter)
return status;
}
+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_rss_config *req;
+ u32 myhash[10];
+ int status;
+
+ spin_lock(&adapter->mbox_lock);
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_ETH_RSS_CONFIG);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_RSS_CONFIG, sizeof(*req));
+
+ req->if_id = cpu_to_le32(adapter->if_handle);
+ req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
+ req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
+ memcpy(req->cpu_table, rsstable, table_size);
+ memcpy(req->hash, myhash, sizeof(myhash));
+ be_dws_cpu_to_le(req->hash, sizeof(req->hash));
+
+ status = be_mbox_notify_wait(adapter);
+
+ spin_unlock(&adapter->mbox_lock);
+ return status;
+}
+
/* Uses sync mcc */
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
u8 bcn, u8 sts, u8 state)
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index bdc10a28cfd..b7a40b172d1 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -147,6 +147,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
#define OPCODE_COMMON_GET_PHY_DETAILS 102
+#define OPCODE_ETH_RSS_CONFIG 1
#define OPCODE_ETH_ACPI_CONFIG 2
#define OPCODE_ETH_PROMISCUOUS 3
#define OPCODE_ETH_GET_STATISTICS 4
@@ -409,7 +410,7 @@ struct be_cmd_req_eth_rx_create {
struct be_cmd_resp_eth_rx_create {
struct be_cmd_resp_hdr hdr;
u16 id;
- u8 cpu_id;
+ u8 rss_id;
u8 rsvd0;
} __packed;
@@ -739,9 +740,10 @@ struct be_cmd_resp_modify_eq_delay {
} __packed;
/******************** Get FW Config *******************/
+#define BE_FUNCTION_CAPS_RSS 0x2
struct be_cmd_req_query_fw_cfg {
struct be_cmd_req_hdr hdr;
- u32 rsvd[30];
+ u32 rsvd[31];
};
struct be_cmd_resp_query_fw_cfg {
@@ -751,6 +753,26 @@ struct be_cmd_resp_query_fw_cfg {
u32 phys_port;
u32 function_mode;
u32 rsvd[26];
+ u32 function_caps;
+};
+
+/******************** RSS Config *******************/
+/* RSS types */
+#define RSS_ENABLE_NONE 0x0
+#define RSS_ENABLE_IPV4 0x1
+#define RSS_ENABLE_TCP_IPV4 0x2
+#define RSS_ENABLE_IPV6 0x4
+#define RSS_ENABLE_TCP_IPV6 0x8
+
+struct be_cmd_req_rss_config {
+ struct be_cmd_req_hdr hdr;
+ u32 if_id;
+ u16 enable_rss;
+ u16 cpu_table_size_log2;
+ u32 hash[10];
+ u8 cpu_table[128];
+ u8 flush;
+ u8 rsvd0[3];
};
/******************** Port Beacon ***************************/
@@ -937,7 +959,7 @@ extern int be_cmd_txq_create(struct be_adapter *adapter,
extern int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_queue_info *rxq, u16 cq_id,
u16 frag_size, u16 max_frame_size, u32 if_id,
- u32 rss);
+ u32 rss, u8 *rss_id);
extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int type);
extern int be_cmd_link_status_query(struct be_adapter *adapter,
@@ -960,8 +982,10 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter,
extern int be_cmd_get_flow_control(struct be_adapter *adapter,
u32 *tx_fc, u32 *rx_fc);
extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
- u32 *port_num, u32 *cap);
+ u32 *port_num, u32 *function_mode, u32 *function_caps);
extern int be_cmd_reset_function(struct be_adapter *adapter);
+extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+ u16 table_size);
extern int be_process_mcc(struct be_adapter *adapter, int *status);
extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
u8 port_num, u8 beacon, u8 status, u8 state);
@@ -992,5 +1016,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
extern int be_cmd_get_phy_info(struct be_adapter *adapter,
struct be_dma_mem *cmd);
extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_dump_ue(struct be_adapter *adapter);
+extern void be_detect_dump_ue(struct be_adapter *adapter);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index cd16243c7c3..0f46366ecc4 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -26,14 +26,16 @@ struct be_ethtool_stat {
int offset;
};
-enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT, ERXSTAT};
+enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
offsetof(_struct, field)
#define NETSTAT_INFO(field) #field, NETSTAT,\
FIELDINFO(struct net_device_stats,\
field)
-#define DRVSTAT_INFO(field) #field, DRVSTAT,\
- FIELDINFO(struct be_drvr_stats, field)
+#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
+ FIELDINFO(struct be_tx_stats, field)
+#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
+ FIELDINFO(struct be_rx_stats, field)
#define MISCSTAT_INFO(field) #field, MISCSTAT,\
FIELDINFO(struct be_rxf_stats, field)
#define PORTSTAT_INFO(field) #field, PORTSTAT,\
@@ -51,20 +53,12 @@ static const struct be_ethtool_stat et_stats[] = {
{NETSTAT_INFO(tx_errors)},
{NETSTAT_INFO(rx_dropped)},
{NETSTAT_INFO(tx_dropped)},
- {DRVSTAT_INFO(be_tx_reqs)},
- {DRVSTAT_INFO(be_tx_stops)},
- {DRVSTAT_INFO(be_fwd_reqs)},
- {DRVSTAT_INFO(be_tx_wrbs)},
- {DRVSTAT_INFO(be_rx_polls)},
- {DRVSTAT_INFO(be_tx_events)},
- {DRVSTAT_INFO(be_rx_events)},
- {DRVSTAT_INFO(be_tx_compl)},
- {DRVSTAT_INFO(be_rx_compl)},
- {DRVSTAT_INFO(be_ethrx_post_fail)},
- {DRVSTAT_INFO(be_802_3_dropped_frames)},
- {DRVSTAT_INFO(be_802_3_malformed_frames)},
- {DRVSTAT_INFO(be_tx_rate)},
- {DRVSTAT_INFO(be_rx_rate)},
+ {DRVSTAT_TX_INFO(be_tx_rate)},
+ {DRVSTAT_TX_INFO(be_tx_reqs)},
+ {DRVSTAT_TX_INFO(be_tx_wrbs)},
+ {DRVSTAT_TX_INFO(be_tx_stops)},
+ {DRVSTAT_TX_INFO(be_tx_events)},
+ {DRVSTAT_TX_INFO(be_tx_compl)},
{PORTSTAT_INFO(rx_unicast_frames)},
{PORTSTAT_INFO(rx_multicast_frames)},
{PORTSTAT_INFO(rx_broadcast_frames)},
@@ -90,6 +84,9 @@ static const struct be_ethtool_stat et_stats[] = {
{PORTSTAT_INFO(rx_non_rss_packets)},
{PORTSTAT_INFO(rx_ipv4_packets)},
{PORTSTAT_INFO(rx_ipv6_packets)},
+ {PORTSTAT_INFO(rx_switched_unicast_packets)},
+ {PORTSTAT_INFO(rx_switched_multicast_packets)},
+ {PORTSTAT_INFO(rx_switched_broadcast_packets)},
{PORTSTAT_INFO(tx_unicastframes)},
{PORTSTAT_INFO(tx_multicastframes)},
{PORTSTAT_INFO(tx_broadcastframes)},
@@ -102,11 +99,24 @@ static const struct be_ethtool_stat et_stats[] = {
{MISCSTAT_INFO(rx_drops_too_many_frags)},
{MISCSTAT_INFO(rx_drops_invalid_ring)},
{MISCSTAT_INFO(forwarded_packets)},
- {MISCSTAT_INFO(rx_drops_mtu)},
- {ERXSTAT_INFO(rx_drops_no_fragments)},
+ {MISCSTAT_INFO(rx_drops_mtu)}
};
#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
+/* Stats related to multi RX queues */
+static const struct be_ethtool_stat et_rx_stats[] = {
+ {DRVSTAT_RX_INFO(rx_bytes)},
+ {DRVSTAT_RX_INFO(rx_pkts)},
+ {DRVSTAT_RX_INFO(rx_rate)},
+ {DRVSTAT_RX_INFO(rx_polls)},
+ {DRVSTAT_RX_INFO(rx_events)},
+ {DRVSTAT_RX_INFO(rx_compl)},
+ {DRVSTAT_RX_INFO(rx_mcast_pkts)},
+ {DRVSTAT_RX_INFO(rx_post_fail)},
+ {ERXSTAT_INFO(rx_drops_no_fragments)}
+};
+#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
+
static const char et_self_tests[][ETH_GSTRING_LEN] = {
"MAC Loopback test",
"PHY Loopback test",
@@ -139,7 +149,7 @@ static int
be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *rx_eq = &adapter->rx_eq;
+ struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
struct be_eq_obj *tx_eq = &adapter->tx_eq;
coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
@@ -163,25 +173,49 @@ static int
be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *rx_eq = &adapter->rx_eq;
+ struct be_rx_obj *rxo;
+ struct be_eq_obj *rx_eq;
struct be_eq_obj *tx_eq = &adapter->tx_eq;
u32 tx_max, tx_min, tx_cur;
u32 rx_max, rx_min, rx_cur;
- int status = 0;
+ int status = 0, i;
if (coalesce->use_adaptive_tx_coalesce == 1)
return -EINVAL;
- /* if AIC is being turned on now, start with an EQD of 0 */
- if (rx_eq->enable_aic == 0 &&
- coalesce->use_adaptive_rx_coalesce == 1) {
- rx_eq->cur_eqd = 0;
+ for_all_rx_queues(adapter, rxo, i) {
+ rx_eq = &rxo->rx_eq;
+
+ if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
+ rx_eq->cur_eqd = 0;
+ rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
+
+ rx_max = coalesce->rx_coalesce_usecs_high;
+ rx_min = coalesce->rx_coalesce_usecs_low;
+ rx_cur = coalesce->rx_coalesce_usecs;
+
+ if (rx_eq->enable_aic) {
+ if (rx_max > BE_MAX_EQD)
+ rx_max = BE_MAX_EQD;
+ if (rx_min > rx_max)
+ rx_min = rx_max;
+ rx_eq->max_eqd = rx_max;
+ rx_eq->min_eqd = rx_min;
+ if (rx_eq->cur_eqd > rx_max)
+ rx_eq->cur_eqd = rx_max;
+ if (rx_eq->cur_eqd < rx_min)
+ rx_eq->cur_eqd = rx_min;
+ } else {
+ if (rx_cur > BE_MAX_EQD)
+ rx_cur = BE_MAX_EQD;
+ if (rx_eq->cur_eqd != rx_cur) {
+ status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
+ rx_cur);
+ if (!status)
+ rx_eq->cur_eqd = rx_cur;
+ }
+ }
}
- rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
-
- rx_max = coalesce->rx_coalesce_usecs_high;
- rx_min = coalesce->rx_coalesce_usecs_low;
- rx_cur = coalesce->rx_coalesce_usecs;
tx_max = coalesce->tx_coalesce_usecs_high;
tx_min = coalesce->tx_coalesce_usecs_low;
@@ -195,27 +229,6 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
tx_eq->cur_eqd = tx_cur;
}
- if (rx_eq->enable_aic) {
- if (rx_max > BE_MAX_EQD)
- rx_max = BE_MAX_EQD;
- if (rx_min > rx_max)
- rx_min = rx_max;
- rx_eq->max_eqd = rx_max;
- rx_eq->min_eqd = rx_min;
- if (rx_eq->cur_eqd > rx_max)
- rx_eq->cur_eqd = rx_max;
- if (rx_eq->cur_eqd < rx_min)
- rx_eq->cur_eqd = rx_min;
- } else {
- if (rx_cur > BE_MAX_EQD)
- rx_cur = BE_MAX_EQD;
- if (rx_eq->cur_eqd != rx_cur) {
- status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
- rx_cur);
- if (!status)
- rx_eq->cur_eqd = rx_cur;
- }
- }
return 0;
}
@@ -243,32 +256,25 @@ be_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_drvr_stats *drvr_stats = &adapter->stats.drvr_stats;
- struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
- struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
- struct be_port_rxf_stats *port_stats =
- &rxf_stats->port[adapter->port_num];
- struct net_device_stats *net_stats = &netdev->stats;
+ struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
struct be_erx_stats *erx_stats = &hw_stats->erx;
+ struct be_rx_obj *rxo;
void *p = NULL;
- int i;
+ int i, j;
for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
switch (et_stats[i].type) {
case NETSTAT:
- p = net_stats;
+ p = &netdev->stats;
break;
- case DRVSTAT:
- p = drvr_stats;
+ case DRVSTAT_TX:
+ p = &adapter->tx_stats;
break;
case PORTSTAT:
- p = port_stats;
+ p = &hw_stats->rxf.port[adapter->port_num];
break;
case MISCSTAT:
- p = rxf_stats;
- break;
- case ERXSTAT: /* Currently only one ERX stat is provided */
- p = (u32 *)erx_stats + adapter->rx_obj.q.id;
+ p = &hw_stats->rxf;
break;
}
@@ -276,19 +282,44 @@ be_get_ethtool_stats(struct net_device *netdev,
data[i] = (et_stats[i].size == sizeof(u64)) ?
*(u64 *)p: *(u32 *)p;
}
+
+ for_all_rx_queues(adapter, rxo, j) {
+ for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) {
+ switch (et_rx_stats[i].type) {
+ case DRVSTAT_RX:
+ p = (u8 *)&rxo->stats + et_rx_stats[i].offset;
+ break;
+ case ERXSTAT:
+ p = (u32 *)erx_stats + rxo->q.id;
+ break;
+ }
+ data[ETHTOOL_STATS_NUM + j * ETHTOOL_RXSTATS_NUM + i] =
+ (et_rx_stats[i].size == sizeof(u64)) ?
+ *(u64 *)p: *(u32 *)p;
+ }
+ }
}
static void
be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
uint8_t *data)
{
- int i;
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int i, j;
+
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ for (i = 0; i < adapter->num_rx_qs; i++) {
+ for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
+ sprintf(data, "rxq%d: %s", i,
+ et_rx_stats[j].desc);
+ data += ETH_GSTRING_LEN;
+ }
+ }
break;
case ETH_SS_TEST:
for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
@@ -301,11 +332,14 @@ be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
static int be_get_sset_count(struct net_device *netdev, int stringset)
{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
switch (stringset) {
case ETH_SS_TEST:
return ETHTOOL_TESTS_NUM;
case ETH_SS_STATS:
- return ETHTOOL_STATS_NUM;
+ return ETHTOOL_STATS_NUM +
+ adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
default:
return -EINVAL;
}
@@ -420,10 +454,10 @@ be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
struct be_adapter *adapter = netdev_priv(netdev);
- ring->rx_max_pending = adapter->rx_obj.q.len;
+ ring->rx_max_pending = adapter->rx_obj[0].q.len;
ring->tx_max_pending = adapter->tx_obj.q.len;
- ring->rx_pending = atomic_read(&adapter->rx_obj.q.used);
+ ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
ring->tx_pending = atomic_read(&adapter->tx_obj.q.used);
}
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 5d38046402b..a2ec5df0d73 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -167,8 +167,11 @@
#define FLASH_FCoE_BIOS_START_g3 (13631488)
#define FLASH_REDBOOT_START_g3 (262144)
-
-
+/************* Rx Packet Type Encoding **************/
+#define BE_UNICAST_PACKET 0
+#define BE_MULTICAST_PACKET 1
+#define BE_BROADCAST_PACKET 2
+#define BE_RSVD_PACKET 3
/*
* BE descriptors: host memory data structures whose formats
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 74e146f470c..9a1cd28b426 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -32,6 +32,10 @@ module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
+static bool multi_rxq = true;
+module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
+
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -111,6 +115,11 @@ static char *ue_status_hi_desc[] = {
"Unknown"
};
+static inline bool be_multi_rxq(struct be_adapter *adapter)
+{
+ return (adapter->num_rx_qs > 1);
+}
+
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
@@ -236,17 +245,27 @@ netdev_addr:
void netdev_stats_update(struct be_adapter *adapter)
{
- struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
+ struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
struct be_port_rxf_stats *port_stats =
&rxf_stats->port[adapter->port_num];
struct net_device_stats *dev_stats = &adapter->netdev->stats;
struct be_erx_stats *erx_stats = &hw_stats->erx;
+ struct be_rx_obj *rxo;
+ int i;
+
+ memset(dev_stats, 0, sizeof(*dev_stats));
+ for_all_rx_queues(adapter, rxo, i) {
+ dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
+ dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
+ dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
+ /* no space in linux buffers: best possible approximation */
+ dev_stats->rx_dropped +=
+ erx_stats->rx_drops_no_fragments[rxo->q.id];
+ }
- dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
- dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
- dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
- dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
+ dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
+ dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
/* bad pkts received */
dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -263,18 +282,11 @@ void netdev_stats_update(struct be_adapter *adapter)
port_stats->rx_ip_checksum_errs +
port_stats->rx_udp_checksum_errs;
- /* no space in linux buffers: best possible approximation */
- dev_stats->rx_dropped =
- erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
-
/* detailed rx errors */
dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
port_stats->rx_out_range_errors +
port_stats->rx_frame_too_long;
- /* receive ring buffer overflow */
- dev_stats->rx_over_errors = 0;
-
dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
/* frame alignment errors */
@@ -285,24 +297,6 @@ void netdev_stats_update(struct be_adapter *adapter)
dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
port_stats->rx_input_fifo_overflow +
rxf_stats->rx_drops_no_pbuf;
- /* receiver missed packetd */
- dev_stats->rx_missed_errors = 0;
-
- /* packet transmit problems */
- dev_stats->tx_errors = 0;
-
- /* no space available in linux */
- dev_stats->tx_dropped = 0;
-
- dev_stats->multicast = port_stats->rx_multicast_frames;
- dev_stats->collisions = 0;
-
- /* detailed tx_errors */
- dev_stats->tx_aborted_errors = 0;
- dev_stats->tx_carrier_errors = 0;
- dev_stats->tx_fifo_errors = 0;
- dev_stats->tx_heartbeat_errors = 0;
- dev_stats->tx_window_errors = 0;
}
void be_link_status_update(struct be_adapter *adapter, bool link_up)
@@ -326,10 +320,10 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
}
/* Update the EQ delay n BE based on the RX frags consumed / sec */
-static void be_rx_eqd_update(struct be_adapter *adapter)
+static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
{
- struct be_eq_obj *rx_eq = &adapter->rx_eq;
- struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
+ struct be_eq_obj *rx_eq = &rxo->rx_eq;
+ struct be_rx_stats *stats = &rxo->stats;
ulong now = jiffies;
u32 eqd;
@@ -346,12 +340,12 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
if ((now - stats->rx_fps_jiffies) < HZ)
return;
- stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
+ stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
((now - stats->rx_fps_jiffies) / HZ);
stats->rx_fps_jiffies = now;
- stats->be_prev_rx_frags = stats->be_rx_frags;
- eqd = stats->be_rx_fps / 110000;
+ stats->prev_rx_frags = stats->rx_frags;
+ eqd = stats->rx_fps / 110000;
eqd = eqd << 3;
if (eqd > rx_eq->max_eqd)
eqd = rx_eq->max_eqd;
@@ -365,11 +359,6 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
rx_eq->cur_eqd = eqd;
}
-static struct net_device_stats *be_get_stats(struct net_device *dev)
-{
- return &dev->stats;
-}
-
static u32 be_calc_rate(u64 bytes, unsigned long ticks)
{
u64 rate = bytes;
@@ -383,7 +372,7 @@ static u32 be_calc_rate(u64 bytes, unsigned long ticks)
static void be_tx_rate_update(struct be_adapter *adapter)
{
- struct be_drvr_stats *stats = drvr_stats(adapter);
+ struct be_tx_stats *stats = tx_stats(adapter);
ulong now = jiffies;
/* Wrapped around? */
@@ -405,7 +394,7 @@ static void be_tx_rate_update(struct be_adapter *adapter)
static void be_tx_stats_update(struct be_adapter *adapter,
u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
{
- struct be_drvr_stats *stats = drvr_stats(adapter);
+ struct be_tx_stats *stats = tx_stats(adapter);
stats->be_tx_reqs++;
stats->be_tx_wrbs += wrb_cnt;
stats->be_tx_bytes += copied;
@@ -656,14 +645,8 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *rx_eq = &adapter->rx_eq;
- struct be_eq_obj *tx_eq = &adapter->tx_eq;
- be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
- be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
adapter->vlan_grp = grp;
- be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
- be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
}
static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
@@ -825,37 +808,38 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
return status;
}
-static void be_rx_rate_update(struct be_adapter *adapter)
+static void be_rx_rate_update(struct be_rx_obj *rxo)
{
- struct be_drvr_stats *stats = drvr_stats(adapter);
+ struct be_rx_stats *stats = &rxo->stats;
ulong now = jiffies;
/* Wrapped around */
- if (time_before(now, stats->be_rx_jiffies)) {
- stats->be_rx_jiffies = now;
+ if (time_before(now, stats->rx_jiffies)) {
+ stats->rx_jiffies = now;
return;
}
/* Update the rate once in two seconds */
- if ((now - stats->be_rx_jiffies) < 2 * HZ)
+ if ((now - stats->rx_jiffies) < 2 * HZ)
return;
- stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
- - stats->be_rx_bytes_prev,
- now - stats->be_rx_jiffies);
- stats->be_rx_jiffies = now;
- stats->be_rx_bytes_prev = stats->be_rx_bytes;
+ stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
+ now - stats->rx_jiffies);
+ stats->rx_jiffies = now;
+ stats->rx_bytes_prev = stats->rx_bytes;
}
-static void be_rx_stats_update(struct be_adapter *adapter,
- u32 pktsize, u16 numfrags)
+static void be_rx_stats_update(struct be_rx_obj *rxo,
+ u32 pktsize, u16 numfrags, u8 pkt_type)
{
- struct be_drvr_stats *stats = drvr_stats(adapter);
+ struct be_rx_stats *stats = &rxo->stats;
- stats->be_rx_compl++;
- stats->be_rx_frags += numfrags;
- stats->be_rx_bytes += pktsize;
- stats->be_rx_pkts++;
+ stats->rx_compl++;
+ stats->rx_frags += numfrags;
+ stats->rx_bytes += pktsize;
+ stats->rx_pkts++;
+ if (pkt_type == BE_MULTICAST_PACKET)
+ stats->rx_mcast_pkts++;
}
static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -875,12 +859,14 @@ static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
}
static struct be_rx_page_info *
-get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
+get_rx_page_info(struct be_adapter *adapter,
+ struct be_rx_obj *rxo,
+ u16 frag_idx)
{
struct be_rx_page_info *rx_page_info;
- struct be_queue_info *rxq = &adapter->rx_obj.q;
+ struct be_queue_info *rxq = &rxo->q;
- rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
+ rx_page_info = &rxo->page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
if (rx_page_info->last_page_user) {
@@ -895,9 +881,10 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
/* Throwaway the data in the Rx completion */
static void be_rx_compl_discard(struct be_adapter *adapter,
- struct be_eth_rx_compl *rxcp)
+ struct be_rx_obj *rxo,
+ struct be_eth_rx_compl *rxcp)
{
- struct be_queue_info *rxq = &adapter->rx_obj.q;
+ struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
u16 rxq_idx, i, num_rcvd;
@@ -905,7 +892,7 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
for (i = 0; i < num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxq_idx);
+ page_info = get_rx_page_info(adapter, rxo, rxq_idx);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
index_inc(&rxq_idx, rxq->len);
@@ -916,20 +903,22 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
* skb_fill_rx_data forms a complete skb for an ether frame
* indicated by rxcp.
*/
-static void skb_fill_rx_data(struct be_adapter *adapter,
+static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
u16 num_rcvd)
{
- struct be_queue_info *rxq = &adapter->rx_obj.q;
+ struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
u16 rxq_idx, i, j;
u32 pktsize, hdr_len, curr_frag_len, size;
u8 *start;
+ u8 pkt_type;
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
+ pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
- page_info = get_rx_page_info(adapter, rxq_idx);
+ page_info = get_rx_page_info(adapter, rxo, rxq_idx);
start = page_address(page_info->page) + page_info->page_offset;
prefetch(start);
@@ -967,7 +956,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
for (i = 1, j = 0; i < num_rcvd; i++) {
size -= curr_frag_len;
index_inc(&rxq_idx, rxq->len);
- page_info = get_rx_page_info(adapter, rxq_idx);
+ page_info = get_rx_page_info(adapter, rxo, rxq_idx);
curr_frag_len = min(size, rx_frag_size);
@@ -993,11 +982,12 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
BUG_ON(j > MAX_SKB_FRAGS);
done:
- be_rx_stats_update(adapter, pktsize, num_rcvd);
+ be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
static void be_rx_compl_process(struct be_adapter *adapter,
+ struct be_rx_obj *rxo,
struct be_eth_rx_compl *rxcp)
{
struct sk_buff *skb;
@@ -1014,14 +1004,14 @@ static void be_rx_compl_process(struct be_adapter *adapter,
if (unlikely(!skb)) {
if (net_ratelimit())
dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
- be_rx_compl_discard(adapter, rxcp);
+ be_rx_compl_discard(adapter, rxo, rxcp);
return;
}
- skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
+ skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
if (do_pkt_csum(rxcp, adapter->rx_csum))
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1051,15 +1041,17 @@ static void be_rx_compl_process(struct be_adapter *adapter,
/* Process the RX completion indicated by rxcp when GRO is enabled */
static void be_rx_compl_process_gro(struct be_adapter *adapter,
- struct be_eth_rx_compl *rxcp)
+ struct be_rx_obj *rxo,
+ struct be_eth_rx_compl *rxcp)
{
struct be_rx_page_info *page_info;
struct sk_buff *skb = NULL;
- struct be_queue_info *rxq = &adapter->rx_obj.q;
- struct be_eq_obj *eq_obj = &adapter->rx_eq;
+ struct be_queue_info *rxq = &rxo->q;
+ struct be_eq_obj *eq_obj = &rxo->rx_eq;
u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
u16 i, rxq_idx = 0, vid, j;
u8 vtm;
+ u8 pkt_type;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
/* Is it a flush compl that has no data */
@@ -1070,6 +1062,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+ pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
/* vlanf could be wrongly set in some cards.
* ignore if vtm is not set */
@@ -1078,13 +1071,13 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
skb = napi_get_frags(&eq_obj->napi);
if (!skb) {
- be_rx_compl_discard(adapter, rxcp);
+ be_rx_compl_discard(adapter, rxo, rxcp);
return;
}
remaining = pkt_size;
for (i = 0, j = -1; i < num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxq_idx);
+ page_info = get_rx_page_info(adapter, rxo, rxq_idx);
curr_frag_len = min(remaining, rx_frag_size);
@@ -1125,12 +1118,12 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
}
- be_rx_stats_update(adapter, pkt_size, num_rcvd);
+ be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
}
-static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
+static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
{
- struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
+ struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
return NULL;
@@ -1138,7 +1131,7 @@ static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
rmb();
be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
- queue_tail_inc(&adapter->rx_obj.cq);
+ queue_tail_inc(&rxo->cq);
return rxcp;
}
@@ -1164,22 +1157,23 @@ static inline struct page *be_alloc_pages(u32 size)
* Allocate a page, split it to fragments of size rx_frag_size and post as
* receive buffers to BE
*/
-static void be_post_rx_frags(struct be_adapter *adapter)
+static void be_post_rx_frags(struct be_rx_obj *rxo)
{
- struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
+ struct be_adapter *adapter = rxo->adapter;
+ struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
- struct be_queue_info *rxq = &adapter->rx_obj.q;
+ struct be_queue_info *rxq = &rxo->q;
struct page *pagep = NULL;
struct be_eth_rx_d *rxd;
u64 page_dmaaddr = 0, frag_dmaaddr;
u32 posted, page_offset = 0;
- page_info = &page_info_tbl[rxq->head];
+ page_info = &rxo->page_info_tbl[rxq->head];
for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
if (!pagep) {
pagep = be_alloc_pages(adapter->big_page_size);
if (unlikely(!pagep)) {
- drvr_stats(adapter)->be_ethrx_post_fail++;
+ rxo->stats.rx_post_fail++;
break;
}
page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
@@ -1218,7 +1212,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
be_rxq_notify(adapter, rxq->id, posted);
} else if (atomic_read(&rxq->used) == 0) {
/* Let be_worker replenish when memory is available */
- adapter->rx_post_starved = true;
+ rxo->rx_post_starved = true;
}
}
@@ -1321,17 +1315,17 @@ static void be_eq_clean(struct be_adapter *adapter,
be_eq_notify(adapter, eq_obj->q.id, false, true, num);
}
-static void be_rx_q_clean(struct be_adapter *adapter)
+static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
{
struct be_rx_page_info *page_info;
- struct be_queue_info *rxq = &adapter->rx_obj.q;
- struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
+ struct be_queue_info *rxq = &rxo->q;
+ struct be_queue_info *rx_cq = &rxo->cq;
struct be_eth_rx_compl *rxcp;
u16 tail;
/* First cleanup pending rx completions */
- while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
- be_rx_compl_discard(adapter, rxcp);
+ while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
+ be_rx_compl_discard(adapter, rxo, rxcp);
be_rx_compl_reset(rxcp);
be_cq_notify(adapter, rx_cq->id, true, 1);
}
@@ -1339,7 +1333,7 @@ static void be_rx_q_clean(struct be_adapter *adapter)
/* Then free posted rx buffer that were not used */
tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
- page_info = get_rx_page_info(adapter, tail);
+ page_info = get_rx_page_info(adapter, rxo, tail);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
}
@@ -1517,92 +1511,101 @@ tx_eq_free:
static void be_rx_queues_destroy(struct be_adapter *adapter)
{
struct be_queue_info *q;
-
- q = &adapter->rx_obj.q;
- if (q->created) {
- be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
-
- /* After the rxq is invalidated, wait for a grace time
- * of 1ms for all dma to end and the flush compl to arrive
- */
- mdelay(1);
- be_rx_q_clean(adapter);
+ struct be_rx_obj *rxo;
+ int i;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ q = &rxo->q;
+ if (q->created) {
+ be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
+ /* After the rxq is invalidated, wait for a grace time
+ * of 1ms for all dma to end and the flush compl to
+ * arrive
+ */
+ mdelay(1);
+ be_rx_q_clean(adapter, rxo);
+ }
+ be_queue_free(adapter, q);
+
+ q = &rxo->cq;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+
+ /* Clear any residual events */
+ q = &rxo->rx_eq.q;
+ if (q->created) {
+ be_eq_clean(adapter, &rxo->rx_eq);
+ be_cmd_q_destroy(adapter, q, QTYPE_EQ);
+ }
+ be_queue_free(adapter, q);
}
- be_queue_free(adapter, q);
-
- q = &adapter->rx_obj.cq;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_CQ);
- be_queue_free(adapter, q);
-
- /* Clear any residual events */
- be_eq_clean(adapter, &adapter->rx_eq);
-
- q = &adapter->rx_eq.q;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_EQ);
- be_queue_free(adapter, q);
}
static int be_rx_queues_create(struct be_adapter *adapter)
{
struct be_queue_info *eq, *q, *cq;
- int rc;
+ struct be_rx_obj *rxo;
+ int rc, i;
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
- adapter->rx_eq.max_eqd = BE_MAX_EQD;
- adapter->rx_eq.min_eqd = 0;
- adapter->rx_eq.cur_eqd = 0;
- adapter->rx_eq.enable_aic = true;
-
- /* Alloc Rx Event queue */
- eq = &adapter->rx_eq.q;
- rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
- sizeof(struct be_eq_entry));
- if (rc)
- return rc;
-
- /* Ask BE to create Rx Event queue */
- rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
- if (rc)
- goto rx_eq_free;
-
- /* Alloc RX eth compl queue */
- cq = &adapter->rx_obj.cq;
- rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
- sizeof(struct be_eth_rx_compl));
- if (rc)
- goto rx_eq_destroy;
-
- /* Ask BE to create Rx eth compl queue */
- rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
- if (rc)
- goto rx_cq_free;
-
- /* Alloc RX eth queue */
- q = &adapter->rx_obj.q;
- rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
- if (rc)
- goto rx_cq_destroy;
-
- /* Ask BE to create Rx eth queue */
- rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
- BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
- if (rc)
- goto rx_q_free;
+ for_all_rx_queues(adapter, rxo, i) {
+ rxo->adapter = adapter;
+ rxo->rx_eq.max_eqd = BE_MAX_EQD;
+ rxo->rx_eq.enable_aic = true;
+
+ /* EQ */
+ eq = &rxo->rx_eq.q;
+ rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
+ sizeof(struct be_eq_entry));
+ if (rc)
+ goto err;
+
+ rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
+ if (rc)
+ goto err;
+
+ /* CQ */
+ cq = &rxo->cq;
+ rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
+ sizeof(struct be_eth_rx_compl));
+ if (rc)
+ goto err;
+
+ rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
+ if (rc)
+ goto err;
+
+ /* Rx Q */
+ q = &rxo->q;
+ rc = be_queue_alloc(adapter, q, RX_Q_LEN,
+ sizeof(struct be_eth_rx_d));
+ if (rc)
+ goto err;
+
+ rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
+ BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
+ (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
+ if (rc)
+ goto err;
+ }
+
+ if (be_multi_rxq(adapter)) {
+ u8 rsstable[MAX_RSS_QS];
+
+ for_all_rss_queues(adapter, rxo, i)
+ rsstable[i] = rxo->rss_id;
+
+ rc = be_cmd_rss_config(adapter, rsstable,
+ adapter->num_rx_qs - 1);
+ if (rc)
+ goto err;
+ }
return 0;
-rx_q_free:
- be_queue_free(adapter, q);
-rx_cq_destroy:
- be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
-rx_cq_free:
- be_queue_free(adapter, cq);
-rx_eq_destroy:
- be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
-rx_eq_free:
- be_queue_free(adapter, eq);
- return rc;
+err:
+ be_rx_queues_destroy(adapter);
+ return -1;
}
/* There are 8 evt ids per func. Retruns the evt id's bit number */
@@ -1614,24 +1617,31 @@ static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
static irqreturn_t be_intx(int irq, void *dev)
{
struct be_adapter *adapter = dev;
- int isr;
+ struct be_rx_obj *rxo;
+ int isr, i;
isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
(adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
if (!isr)
return IRQ_NONE;
- event_handle(adapter, &adapter->tx_eq);
- event_handle(adapter, &adapter->rx_eq);
+ if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
+ event_handle(adapter, &adapter->tx_eq);
+
+ for_all_rx_queues(adapter, rxo, i) {
+ if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
+ event_handle(adapter, &rxo->rx_eq);
+ }
return IRQ_HANDLED;
}
static irqreturn_t be_msix_rx(int irq, void *dev)
{
- struct be_adapter *adapter = dev;
+ struct be_rx_obj *rxo = dev;
+ struct be_adapter *adapter = rxo->adapter;
- event_handle(adapter, &adapter->rx_eq);
+ event_handle(adapter, &rxo->rx_eq);
return IRQ_HANDLED;
}
@@ -1645,14 +1655,14 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
return IRQ_HANDLED;
}
-static inline bool do_gro(struct be_adapter *adapter,
+static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
struct be_eth_rx_compl *rxcp)
{
int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
if (err)
- drvr_stats(adapter)->be_rxcp_err++;
+ rxo->stats.rxcp_err++;
return (tcp_frame && !err) ? true : false;
}
@@ -1660,29 +1670,29 @@ static inline bool do_gro(struct be_adapter *adapter,
int be_poll_rx(struct napi_struct *napi, int budget)
{
struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_adapter *adapter =
- container_of(rx_eq, struct be_adapter, rx_eq);
- struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
+ struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
+ struct be_adapter *adapter = rxo->adapter;
+ struct be_queue_info *rx_cq = &rxo->cq;
struct be_eth_rx_compl *rxcp;
u32 work_done;
- adapter->stats.drvr_stats.be_rx_polls++;
+ rxo->stats.rx_polls++;
for (work_done = 0; work_done < budget; work_done++) {
- rxcp = be_rx_compl_get(adapter);
+ rxcp = be_rx_compl_get(rxo);
if (!rxcp)
break;
- if (do_gro(adapter, rxcp))
- be_rx_compl_process_gro(adapter, rxcp);
+ if (do_gro(adapter, rxo, rxcp))
+ be_rx_compl_process_gro(adapter, rxo, rxcp);
else
- be_rx_compl_process(adapter, rxcp);
+ be_rx_compl_process(adapter, rxo, rxcp);
be_rx_compl_reset(rxcp);
}
/* Refill the queue */
- if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
- be_post_rx_frags(adapter);
+ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
+ be_post_rx_frags(rxo);
/* All consumed */
if (work_done < budget) {
@@ -1736,33 +1746,14 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
netif_wake_queue(adapter->netdev);
}
- drvr_stats(adapter)->be_tx_events++;
- drvr_stats(adapter)->be_tx_compl += tx_compl;
+ tx_stats(adapter)->be_tx_events++;
+ tx_stats(adapter)->be_tx_compl += tx_compl;
}
return 1;
}
-static inline bool be_detect_ue(struct be_adapter *adapter)
-{
- u32 online0 = 0, online1 = 0;
-
- pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
-
- pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
-
- if (!online0 || !online1) {
- adapter->ue_detected = true;
- dev_err(&adapter->pdev->dev,
- "UE Detected!! online0=%d online1=%d\n",
- online0, online1);
- return true;
- }
-
- return false;
-}
-
-void be_dump_ue(struct be_adapter *adapter)
+void be_detect_dump_ue(struct be_adapter *adapter)
{
u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
u32 i;
@@ -1779,6 +1770,11 @@ void be_dump_ue(struct be_adapter *adapter)
ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
+ if (ue_status_lo || ue_status_hi) {
+ adapter->ue_detected = true;
+ dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+ }
+
if (ue_status_lo) {
for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
if (ue_status_lo & 1)
@@ -1800,25 +1796,27 @@ static void be_worker(struct work_struct *work)
{
struct be_adapter *adapter =
container_of(work, struct be_adapter, work.work);
+ struct be_rx_obj *rxo;
+ int i;
if (!adapter->stats_ioctl_sent)
- be_cmd_get_stats(adapter, &adapter->stats.cmd);
-
- /* Set EQ delay */
- be_rx_eqd_update(adapter);
+ be_cmd_get_stats(adapter, &adapter->stats_cmd);
be_tx_rate_update(adapter);
- be_rx_rate_update(adapter);
- if (adapter->rx_post_starved) {
- adapter->rx_post_starved = false;
- be_post_rx_frags(adapter);
- }
- if (!adapter->ue_detected) {
- if (be_detect_ue(adapter))
- be_dump_ue(adapter);
+ for_all_rx_queues(adapter, rxo, i) {
+ be_rx_rate_update(rxo);
+ be_rx_eqd_update(adapter, rxo);
+
+ if (rxo->rx_post_starved) {
+ rxo->rx_post_starved = false;
+ be_post_rx_frags(rxo);
+ }
}
+ if (!adapter->ue_detected)
+ be_detect_dump_ue(adapter);
+
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}
@@ -1830,17 +1828,45 @@ static void be_msix_disable(struct be_adapter *adapter)
}
}
+static int be_num_rxqs_get(struct be_adapter *adapter)
+{
+ if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
+ !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
+ return 1 + MAX_RSS_QS; /* one default non-RSS queue */
+ } else {
+ dev_warn(&adapter->pdev->dev,
+ "No support for multiple RX queues\n");
+ return 1;
+ }
+}
+
static void be_msix_enable(struct be_adapter *adapter)
{
+#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
int i, status;
- for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
+ adapter->num_rx_qs = be_num_rxqs_get(adapter);
+
+ for (i = 0; i < (adapter->num_rx_qs + 1); i++)
adapter->msix_entries[i].entry = i;
status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- BE_NUM_MSIX_VECTORS);
- if (status == 0)
- adapter->msix_enabled = true;
+ adapter->num_rx_qs + 1);
+ if (status == 0) {
+ goto done;
+ } else if (status >= BE_MIN_MSIX_VECTORS) {
+ if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ status) == 0) {
+ adapter->num_rx_qs = status - 1;
+ dev_warn(&adapter->pdev->dev,
+ "Could alloc only %d MSIx vectors. "
+ "Using %d RX Qs\n", status, adapter->num_rx_qs);
+ goto done;
+ }
+ }
+ return;
+done:
+ adapter->msix_enabled = true;
}
static void be_sriov_enable(struct be_adapter *adapter)
@@ -1874,38 +1900,50 @@ static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
static int be_request_irq(struct be_adapter *adapter,
struct be_eq_obj *eq_obj,
- void *handler, char *desc)
+ void *handler, char *desc, void *context)
{
struct net_device *netdev = adapter->netdev;
int vec;
sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
vec = be_msix_vec_get(adapter, eq_obj->q.id);
- return request_irq(vec, handler, 0, eq_obj->desc, adapter);
+ return request_irq(vec, handler, 0, eq_obj->desc, context);
}
-static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
+static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
+ void *context)
{
int vec = be_msix_vec_get(adapter, eq_obj->q.id);
- free_irq(vec, adapter);
+ free_irq(vec, context);
}
static int be_msix_register(struct be_adapter *adapter)
{
- int status;
+ struct be_rx_obj *rxo;
+ int status, i;
+ char qname[10];
- status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
+ status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
+ adapter);
if (status)
goto err;
- status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
- if (status)
- goto free_tx_irq;
+ for_all_rx_queues(adapter, rxo, i) {
+ sprintf(qname, "rxq%d", i);
+ status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
+ qname, rxo);
+ if (status)
+ goto err_msix;
+ }
return 0;
-free_tx_irq:
- be_free_irq(adapter, &adapter->tx_eq);
+err_msix:
+ be_free_irq(adapter, &adapter->tx_eq, adapter);
+
+ for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
+ be_free_irq(adapter, &rxo->rx_eq, rxo);
+
err:
dev_warn(&adapter->pdev->dev,
"MSIX Request IRQ failed - err %d\n", status);
@@ -1945,6 +1983,8 @@ done:
static void be_irq_unregister(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct be_rx_obj *rxo;
+ int i;
if (!adapter->isr_registered)
return;
@@ -1956,8 +1996,11 @@ static void be_irq_unregister(struct be_adapter *adapter)
}
/* MSIx */
- be_free_irq(adapter, &adapter->tx_eq);
- be_free_irq(adapter, &adapter->rx_eq);
+ be_free_irq(adapter, &adapter->tx_eq, adapter);
+
+ for_all_rx_queues(adapter, rxo, i)
+ be_free_irq(adapter, &rxo->rx_eq, rxo);
+
done:
adapter->isr_registered = false;
}
@@ -1965,9 +2008,9 @@ done:
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *rx_eq = &adapter->rx_eq;
+ struct be_rx_obj *rxo;
struct be_eq_obj *tx_eq = &adapter->tx_eq;
- int vec;
+ int vec, i;
cancel_delayed_work_sync(&adapter->work);
@@ -1982,14 +2025,19 @@ static int be_close(struct net_device *netdev)
if (adapter->msix_enabled) {
vec = be_msix_vec_get(adapter, tx_eq->q.id);
synchronize_irq(vec);
- vec = be_msix_vec_get(adapter, rx_eq->q.id);
- synchronize_irq(vec);
+
+ for_all_rx_queues(adapter, rxo, i) {
+ vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
+ synchronize_irq(vec);
+ }
} else {
synchronize_irq(netdev->irq);
}
be_irq_unregister(adapter);
- napi_disable(&rx_eq->napi);
+ for_all_rx_queues(adapter, rxo, i)
+ napi_disable(&rxo->rx_eq.napi);
+
napi_disable(&tx_eq->napi);
/* Wait for all pending tx completions to arrive so that
@@ -2003,17 +2051,17 @@ static int be_close(struct net_device *netdev)
static int be_open(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *rx_eq = &adapter->rx_eq;
struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ struct be_rx_obj *rxo;
bool link_up;
- int status;
+ int status, i;
u8 mac_speed;
u16 link_speed;
- /* First time posting */
- be_post_rx_frags(adapter);
-
- napi_enable(&rx_eq->napi);
+ for_all_rx_queues(adapter, rxo, i) {
+ be_post_rx_frags(rxo);
+ napi_enable(&rxo->rx_eq.napi);
+ }
napi_enable(&tx_eq->napi);
be_irq_register(adapter);
@@ -2021,12 +2069,12 @@ static int be_open(struct net_device *netdev)
be_intr_set(adapter, true);
/* The evt queues are created in unarmed state; arm them */
- be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
+ for_all_rx_queues(adapter, rxo, i) {
+ be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
+ be_cq_notify(adapter, rxo->cq.id, true, 0);
+ }
be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
- /* Rx compl queue may be in unarmed state; rearm it */
- be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
-
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
@@ -2093,6 +2141,47 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
return status;
}
+/*
+ * Generate a seed MAC address from the PF MAC Address using jhash.
+ * MAC Address for VFs are assigned incrementally starting from the seed.
+ * These addresses are programmed in the ASIC by the PF and the VF driver
+ * queries for the MAC address during its probe.
+ */
+static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
+{
+ u32 vf = 0;
+ int status = 0;
+ u8 mac[ETH_ALEN];
+
+ be_vf_eth_addr_generate(adapter, mac);
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ status = be_cmd_pmac_add(adapter, mac,
+ adapter->vf_cfg[vf].vf_if_handle,
+ &adapter->vf_cfg[vf].vf_pmac_id);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Mac address add failed for VF %d\n", vf);
+ else
+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+
+ mac[5] += 1;
+ }
+ return status;
+}
+
+static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
+{
+ u32 vf;
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
+ be_cmd_pmac_del(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ adapter->vf_cfg[vf].vf_pmac_id);
+ }
+}
+
static int be_setup(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -2107,6 +2196,11 @@ static int be_setup(struct be_adapter *adapter)
BE_IF_FLAGS_PROMISCUOUS |
BE_IF_FLAGS_PASS_L3L4_ERRORS;
en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
+
+ if (be_multi_rxq(adapter)) {
+ cap_flags |= BE_IF_FLAGS_RSS;
+ en_flags |= BE_IF_FLAGS_RSS;
+ }
}
status = be_cmd_if_create(adapter, cap_flags, en_flags,
@@ -2152,10 +2246,20 @@ static int be_setup(struct be_adapter *adapter)
if (status != 0)
goto rx_qs_destroy;
+ if (be_physfn(adapter)) {
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto mcc_q_destroy;
+ }
+
adapter->link_speed = -1;
return 0;
+mcc_q_destroy:
+ if (be_physfn(adapter))
+ be_vf_eth_addr_rem(adapter);
+ be_mcc_queues_destroy(adapter);
rx_qs_destroy:
be_rx_queues_destroy(adapter);
tx_qs_destroy:
@@ -2172,6 +2276,9 @@ do_none:
static int be_clear(struct be_adapter *adapter)
{
+ if (be_physfn(adapter))
+ be_vf_eth_addr_rem(adapter);
+
be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter);
@@ -2399,7 +2506,6 @@ static struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
.ndo_start_xmit = be_xmit,
- .ndo_get_stats = be_get_stats,
.ndo_set_rx_mode = be_set_multicast_list,
.ndo_set_mac_address = be_mac_addr_set,
.ndo_change_mtu = be_change_mtu,
@@ -2416,6 +2522,8 @@ static struct net_device_ops be_netdev_ops = {
static void be_netdev_init(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ int i;
netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
@@ -2437,8 +2545,10 @@ static void be_netdev_init(struct net_device *netdev)
SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
- netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
- BE_NAPI_WEIGHT);
+ for_all_rx_queues(adapter, rxo, i)
+ netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
+ BE_NAPI_WEIGHT);
+
netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
@@ -2572,8 +2682,7 @@ done:
static void be_stats_cleanup(struct be_adapter *adapter)
{
- struct be_stats_obj *stats = &adapter->stats;
- struct be_dma_mem *cmd = &stats->cmd;
+ struct be_dma_mem *cmd = &adapter->stats_cmd;
if (cmd->va)
pci_free_consistent(adapter->pdev, cmd->size,
@@ -2582,8 +2691,7 @@ static void be_stats_cleanup(struct be_adapter *adapter)
static int be_stats_init(struct be_adapter *adapter)
{
- struct be_stats_obj *stats = &adapter->stats;
- struct be_dma_mem *cmd = &stats->cmd;
+ struct be_dma_mem *cmd = &adapter->stats_cmd;
cmd->size = sizeof(struct be_cmd_req_get_stats);
cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
@@ -2628,8 +2736,8 @@ static int be_get_config(struct be_adapter *adapter)
if (status)
return status;
- status = be_cmd_query_fw_cfg(adapter,
- &adapter->port_num, &adapter->function_mode);
+ status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
+ &adapter->function_mode, &adapter->function_caps);
if (status)
return status;
@@ -2664,7 +2772,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
struct be_adapter *adapter;
struct net_device *netdev;
-
status = pci_enable_device(pdev);
if (status)
goto do_none;
@@ -2697,11 +2804,8 @@ static int __devinit be_probe(struct pci_dev *pdev,
adapter->pdev = pdev;
pci_set_drvdata(pdev, adapter);
adapter->netdev = netdev;
- be_netdev_init(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
- be_msix_enable(adapter);
-
status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!status) {
netdev->features |= NETIF_F_HIGHDMA;
@@ -2745,12 +2849,15 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto stats_clean;
+ be_msix_enable(adapter);
+
INIT_DELAYED_WORK(&adapter->work, be_worker);
status = be_setup(adapter);
if (status)
- goto stats_clean;
+ goto msix_disable;
+ be_netdev_init(netdev);
status = register_netdev(netdev);
if (status != 0)
goto unsetup;
@@ -2760,12 +2867,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
unsetup:
be_clear(adapter);
+msix_disable:
+ be_msix_disable(adapter);
stats_clean:
be_stats_cleanup(adapter);
ctrl_clean:
be_ctrl_cleanup(adapter);
free_netdev:
- be_msix_disable(adapter);
be_sriov_disable(adapter);
free_netdev(adapter->netdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 012613fde3f..7a0e4156fad 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -803,15 +803,14 @@ static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompa
static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
struct bfin_mac_local *lp = netdev_priv(netdev);
- union skb_shared_tx *shtx = skb_tx(skb);
- if (shtx->hardware) {
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
int timeout_cnt = MAX_TIMEOUT_CNT;
/* When doing time stamping, keep the connection to the socket
* a while longer
*/
- shtx->in_progress = 1;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
/*
* The timestamping is done at the EMAC module's MII/RMII interface
@@ -991,7 +990,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
struct bfin_mac_local *lp = netdev_priv(dev);
u16 *data;
u32 data_align = (unsigned long)(skb->data) & 0x3;
- union skb_shared_tx *shtx = skb_tx(skb);
current_tx_ptr->skb = skb;
@@ -1005,7 +1003,7 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
* of this field are the length of the packet payload in bytes and the higher
* 4 bits are the timestamping enable field.
*/
- if (shtx->hardware)
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
*data |= 0x1000;
current_tx_ptr->desc_a.start_addr = (u32)data;
@@ -1015,7 +1013,7 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
} else {
*((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
/* enable timestamping for the sent packet */
- if (shtx->hardware)
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
*((u16 *)(current_tx_ptr->packet)) |= 0x1000;
memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
skb->len);
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 959add2410b..a1b8c8b8010 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1233,15 +1233,8 @@ static void bmac_reset_and_enable(struct net_device *dev)
}
spin_unlock_irqrestore(&bp->lock, flags);
}
-static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- struct bmac_data *bp = netdev_priv(dev);
- strcpy(info->driver, "bmac");
- strcpy(info->bus_info, dev_name(&bp->mdev->ofdev.dev));
-}
static const struct ethtool_ops bmac_ethtool_ops = {
- .get_drvinfo = bmac_get_drvinfo,
.get_link = ethtool_op_get_link,
};
@@ -1588,7 +1581,7 @@ bmac_proc_info(char *buffer, char **start, off_t offset, int length)
int i;
if (bmac_devs == NULL)
- return (-ENOSYS);
+ return -ENOSYS;
len += sprintf(buffer, "BMAC counters & registers\n");
diff --git a/drivers/net/bna/Makefile b/drivers/net/bna/Makefile
new file mode 100644
index 00000000000..a5d604de7fe
--- /dev/null
+++ b/drivers/net/bna/Makefile
@@ -0,0 +1,11 @@
+#
+# Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+# All rights reserved.
+#
+
+obj-$(CONFIG_BNA) += bna.o
+
+bna-objs := bnad.o bnad_ethtool.o bna_ctrl.o bna_txrx.o
+bna-objs += bfa_ioc.o bfa_ioc_ct.o bfa_cee.o cna_fwimg.o
+
+EXTRA_CFLAGS := -Idrivers/net/bna
diff --git a/drivers/net/bna/bfa_cee.c b/drivers/net/bna/bfa_cee.c
new file mode 100644
index 00000000000..f7b789a3b21
--- /dev/null
+++ b/drivers/net/bna/bfa_cee.c
@@ -0,0 +1,291 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "bfa_defs_cna.h"
+#include "cna.h"
+#include "bfa_cee.h"
+#include "bfi_cna.h"
+#include "bfa_ioc.h"
+
+#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
+#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
+
+static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg);
+static void bfa_cee_format_cee_cfg(void *buffer);
+
+static void
+bfa_cee_format_cee_cfg(void *buffer)
+{
+ struct bfa_cee_attr *cee_cfg = buffer;
+ bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
+}
+
+static void
+bfa_cee_stats_swap(struct bfa_cee_stats *stats)
+{
+ u32 *buffer = (u32 *)stats;
+ int i;
+
+ for (i = 0; i < (sizeof(struct bfa_cee_stats) / sizeof(u32));
+ i++) {
+ buffer[i] = ntohl(buffer[i]);
+ }
+}
+
+static void
+bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
+{
+ lldp_cfg->time_to_live =
+ ntohs(lldp_cfg->time_to_live);
+ lldp_cfg->enabled_system_cap =
+ ntohs(lldp_cfg->enabled_system_cap);
+}
+
+/**
+ * bfa_cee_attr_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE attributes
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+static u32
+bfa_cee_attr_meminfo(void)
+{
+ return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
+}
+/**
+ * bfa_cee_stats_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE stats
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+static u32
+bfa_cee_stats_meminfo(void)
+{
+ return roundup(sizeof(struct bfa_cee_stats), BFA_DMA_ALIGN_SZ);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for get-attributes responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
+{
+ cee->get_attr_status = status;
+ if (status == BFA_STATUS_OK) {
+ memcpy(cee->attr, cee->attr_dma.kva,
+ sizeof(struct bfa_cee_attr));
+ bfa_cee_format_cee_cfg(cee->attr);
+ }
+ cee->get_attr_pending = false;
+ if (cee->cbfn.get_attr_cbfn)
+ cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for get-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
+{
+ cee->get_stats_status = status;
+ if (status == BFA_STATUS_OK) {
+ memcpy(cee->stats, cee->stats_dma.kva,
+ sizeof(struct bfa_cee_stats));
+ bfa_cee_stats_swap(cee->stats);
+ }
+ cee->get_stats_pending = false;
+ if (cee->cbfn.get_stats_cbfn)
+ cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for reset-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
+{
+ cee->reset_stats_status = status;
+ cee->reset_stats_pending = false;
+ if (cee->cbfn.reset_stats_cbfn)
+ cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
+}
+/**
+ * bfa_nw_cee_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE module
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+u32
+bfa_nw_cee_meminfo(void)
+{
+ return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
+}
+
+/**
+ * bfa_nw_cee_mem_claim()
+ *
+ * @brief Initialized CEE DMA Memory
+ *
+ * @param[in] cee CEE module pointer
+ * dma_kva Kernel Virtual Address of CEE DMA Memory
+ * dma_pa Physical Address of CEE DMA Memory
+ *
+ * @return void
+ */
+void
+bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
+{
+ cee->attr_dma.kva = dma_kva;
+ cee->attr_dma.pa = dma_pa;
+ cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
+ cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
+ cee->attr = (struct bfa_cee_attr *) dma_kva;
+ cee->stats = (struct bfa_cee_stats *)
+ (dma_kva + bfa_cee_attr_meminfo());
+}
+
+/**
+ * bfa_cee_isrs()
+ *
+ * @brief Handles Mail-box interrupts for CEE module.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+static void
+bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
+{
+ union bfi_cee_i2h_msg_u *msg;
+ struct bfi_cee_get_rsp *get_rsp;
+ struct bfa_cee *cee = (struct bfa_cee *) cbarg;
+ msg = (union bfi_cee_i2h_msg_u *) m;
+ get_rsp = (struct bfi_cee_get_rsp *) m;
+ switch (msg->mh.msg_id) {
+ case BFI_CEE_I2H_GET_CFG_RSP:
+ bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_GET_STATS_RSP:
+ bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_RESET_STATS_RSP:
+ bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ default:
+ BUG_ON(1);
+ }
+}
+
+/**
+ * bfa_cee_hbfail()
+ *
+ * @brief CEE module heart-beat failure handler.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+static void
+bfa_cee_hbfail(void *arg)
+{
+ struct bfa_cee *cee;
+ cee = (struct bfa_cee *) arg;
+
+ if (cee->get_attr_pending == true) {
+ cee->get_attr_status = BFA_STATUS_FAILED;
+ cee->get_attr_pending = false;
+ if (cee->cbfn.get_attr_cbfn) {
+ cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->get_stats_pending == true) {
+ cee->get_stats_status = BFA_STATUS_FAILED;
+ cee->get_stats_pending = false;
+ if (cee->cbfn.get_stats_cbfn) {
+ cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->reset_stats_pending == true) {
+ cee->reset_stats_status = BFA_STATUS_FAILED;
+ cee->reset_stats_pending = false;
+ if (cee->cbfn.reset_stats_cbfn) {
+ cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+}
+
+/**
+ * bfa_nw_cee_attach()
+ *
+ * @brief CEE module-attach API
+ *
+ * @param[in] cee - Pointer to the CEE module data structure
+ * ioc - Pointer to the ioc module data structure
+ * dev - Pointer to the device driver module data structure
+ * The device driver specific mbox ISR functions have
+ * this pointer as one of the parameters.
+ *
+ * @return void
+ */
+void
+bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
+ void *dev)
+{
+ BUG_ON(!(cee != NULL));
+ cee->dev = dev;
+ cee->ioc = ioc;
+
+ bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
+ bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
+ bfa_nw_ioc_hbfail_register(cee->ioc, &cee->hbfail);
+}
diff --git a/drivers/net/bna/bfa_cee.h b/drivers/net/bna/bfa_cee.h
new file mode 100644
index 00000000000..20543d15b64
--- /dev/null
+++ b/drivers/net/bna/bfa_cee.h
@@ -0,0 +1,64 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_CEE_H__
+#define __BFA_CEE_H__
+
+#include "bfa_defs_cna.h"
+#include "bfa_ioc.h"
+
+typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status);
+typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status);
+typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status);
+typedef void (*bfa_cee_hbfail_cbfn_t) (void *dev, enum bfa_status status);
+
+struct bfa_cee_cbfn {
+ bfa_cee_get_attr_cbfn_t get_attr_cbfn;
+ void *get_attr_cbarg;
+ bfa_cee_get_stats_cbfn_t get_stats_cbfn;
+ void *get_stats_cbarg;
+ bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
+ void *reset_stats_cbarg;
+};
+
+struct bfa_cee {
+ void *dev;
+ bool get_attr_pending;
+ bool get_stats_pending;
+ bool reset_stats_pending;
+ enum bfa_status get_attr_status;
+ enum bfa_status get_stats_status;
+ enum bfa_status reset_stats_status;
+ struct bfa_cee_cbfn cbfn;
+ struct bfa_ioc_hbfail_notify hbfail;
+ struct bfa_cee_attr *attr;
+ struct bfa_cee_stats *stats;
+ struct bfa_dma attr_dma;
+ struct bfa_dma stats_dma;
+ struct bfa_ioc *ioc;
+ struct bfa_mbox_cmd get_cfg_mb;
+ struct bfa_mbox_cmd get_stats_mb;
+ struct bfa_mbox_cmd reset_stats_mb;
+};
+
+u32 bfa_nw_cee_meminfo(void);
+void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
+ u64 dma_pa);
+void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
+
+#endif /* __BFA_CEE_H__ */
diff --git a/drivers/net/bna/bfa_defs.h b/drivers/net/bna/bfa_defs.h
new file mode 100644
index 00000000000..29c1b8de2c2
--- /dev/null
+++ b/drivers/net/bna/bfa_defs.h
@@ -0,0 +1,243 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_DEFS_H__
+#define __BFA_DEFS_H__
+
+#include "cna.h"
+#include "bfa_defs_status.h"
+#include "bfa_defs_mfg_comm.h"
+
+#define BFA_STRING_32 32
+#define BFA_VERSION_LEN 64
+
+/**
+ * ---------------------- adapter definitions ------------
+ */
+
+/**
+ * BFA adapter level attributes.
+ */
+enum {
+ BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
+ /*
+ *!< adapter serial num length
+ */
+ BFA_ADAPTER_MODEL_NAME_LEN = 16, /*!< model name length */
+ BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*!< model description length */
+ BFA_ADAPTER_MFG_NAME_LEN = 8, /*!< manufacturer name length */
+ BFA_ADAPTER_SYM_NAME_LEN = 64, /*!< adapter symbolic name length */
+ BFA_ADAPTER_OS_TYPE_LEN = 64, /*!< adapter os type length */
+};
+
+struct bfa_adapter_attr {
+ char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
+ char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+ u32 card_type;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
+ char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
+ u64 pwwn;
+ char node_symname[FC_SYMNAME_MAX];
+ char hw_ver[BFA_VERSION_LEN];
+ char fw_ver[BFA_VERSION_LEN];
+ char optrom_ver[BFA_VERSION_LEN];
+ char os_type[BFA_ADAPTER_OS_TYPE_LEN];
+ struct bfa_mfg_vpd vpd;
+ struct mac mac;
+
+ u8 nports;
+ u8 max_speed;
+ u8 prototype;
+ char asic_rev;
+
+ u8 pcie_gen;
+ u8 pcie_lanes_orig;
+ u8 pcie_lanes;
+ u8 cna_capable;
+
+ u8 is_mezz;
+ u8 trunk_capable;
+};
+
+/**
+ * ---------------------- IOC definitions ------------
+ */
+
+enum {
+ BFA_IOC_DRIVER_LEN = 16,
+ BFA_IOC_CHIP_REV_LEN = 8,
+};
+
+/**
+ * Driver and firmware versions.
+ */
+struct bfa_ioc_driver_attr {
+ char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
+ char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
+ char fw_ver[BFA_VERSION_LEN]; /*!< firmware version */
+ char bios_ver[BFA_VERSION_LEN]; /*!< bios version */
+ char efi_ver[BFA_VERSION_LEN]; /*!< EFI version */
+ char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
+};
+
+/**
+ * IOC PCI device attributes
+ */
+struct bfa_ioc_pci_attr {
+ u16 vendor_id; /*!< PCI vendor ID */
+ u16 device_id; /*!< PCI device ID */
+ u16 ssid; /*!< subsystem ID */
+ u16 ssvid; /*!< subsystem vendor ID */
+ u32 pcifn; /*!< PCI device function */
+ u32 rsvd; /* padding */
+ char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
+};
+
+/**
+ * IOC states
+ */
+enum bfa_ioc_state {
+ BFA_IOC_RESET = 1, /*!< IOC is in reset state */
+ BFA_IOC_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
+ BFA_IOC_HWINIT = 3, /*!< IOC h/w is being initialized */
+ BFA_IOC_GETATTR = 4, /*!< IOC is being configured */
+ BFA_IOC_OPERATIONAL = 5, /*!< IOC is operational */
+ BFA_IOC_INITFAIL = 6, /*!< IOC hardware failure */
+ BFA_IOC_HBFAIL = 7, /*!< IOC heart-beat failure */
+ BFA_IOC_DISABLING = 8, /*!< IOC is being disabled */
+ BFA_IOC_DISABLED = 9, /*!< IOC is disabled */
+ BFA_IOC_FWMISMATCH = 10, /*!< IOC f/w different from drivers */
+};
+
+/**
+ * IOC firmware stats
+ */
+struct bfa_fw_ioc_stats {
+ u32 enable_reqs;
+ u32 disable_reqs;
+ u32 get_attr_reqs;
+ u32 dbg_sync;
+ u32 dbg_dump;
+ u32 unknown_reqs;
+};
+
+/**
+ * IOC driver stats
+ */
+struct bfa_ioc_drv_stats {
+ u32 ioc_isrs;
+ u32 ioc_enables;
+ u32 ioc_disables;
+ u32 ioc_hbfails;
+ u32 ioc_boots;
+ u32 stats_tmos;
+ u32 hb_count;
+ u32 disable_reqs;
+ u32 enable_reqs;
+ u32 disable_replies;
+ u32 enable_replies;
+};
+
+/**
+ * IOC statistics
+ */
+struct bfa_ioc_stats {
+ struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
+ struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
+};
+
+enum bfa_ioc_type {
+ BFA_IOC_TYPE_FC = 1,
+ BFA_IOC_TYPE_FCoE = 2,
+ BFA_IOC_TYPE_LL = 3,
+};
+
+/**
+ * IOC attributes returned in queries
+ */
+struct bfa_ioc_attr {
+ enum bfa_ioc_type ioc_type;
+ enum bfa_ioc_state state; /*!< IOC state */
+ struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */
+ struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */
+ struct bfa_ioc_pci_attr pci_attr;
+ u8 port_id; /*!< port number */
+ u8 rsvd[7]; /*!< 64bit align */
+};
+
+/**
+ * ---------------------- mfg definitions ------------
+ */
+
+/**
+ * Checksum size
+ */
+#define BFA_MFG_CHKSUM_SIZE 16
+
+#define BFA_MFG_PARTNUM_SIZE 14
+#define BFA_MFG_SUPPLIER_ID_SIZE 10
+#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
+#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
+#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
+
+#pragma pack(1)
+
+/**
+ * @brief BFA adapter manufacturing block definition.
+ *
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_block {
+ u8 version; /*!< manufacturing block version */
+ u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
+ u16 mfgsize; /*!< mfg block size */
+ u16 u16_chksum; /*!< old u16 checksum */
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+ u8 mfg_day; /*!< manufacturing day */
+ u8 mfg_month; /*!< manufacturing month */
+ u16 mfg_year; /*!< manufacturing year */
+ u64 mfg_wwn; /*!< wwn base for this adapter */
+ u8 num_wwn; /*!< number of wwns assigned */
+ u8 mfg_speeds; /*!< speeds allowed for this adapter */
+ u8 rsv[2];
+ char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+ char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+ char
+ supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+ char
+ supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+ mac_t mfg_mac; /*!< mac address */
+ u8 num_mac; /*!< number of mac addresses */
+ u8 rsv2;
+ u32 mfg_type; /*!< card type */
+ u8 rsv3[108];
+ u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
+};
+
+#pragma pack()
+
+/**
+ * ---------------------- pci definitions ------------
+ */
+
+#define bfa_asic_id_ct(devid) \
+ ((devid) == PCI_DEVICE_ID_BROCADE_CT || \
+ (devid) == PCI_DEVICE_ID_BROCADE_CT_FC)
+
+#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/net/bna/bfa_defs_cna.h b/drivers/net/bna/bfa_defs_cna.h
new file mode 100644
index 00000000000..7e0a9187bdd
--- /dev/null
+++ b/drivers/net/bna/bfa_defs_cna.h
@@ -0,0 +1,223 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_DEFS_CNA_H__
+#define __BFA_DEFS_CNA_H__
+
+#include "bfa_defs.h"
+
+/**
+ * @brief
+ * FC physical port statistics.
+ */
+struct bfa_port_fc_stats {
+ u64 secs_reset; /*!< Seconds since stats is reset */
+ u64 tx_frames; /*!< Tx frames */
+ u64 tx_words; /*!< Tx words */
+ u64 tx_lip; /*!< Tx LIP */
+ u64 tx_nos; /*!< Tx NOS */
+ u64 tx_ols; /*!< Tx OLS */
+ u64 tx_lr; /*!< Tx LR */
+ u64 tx_lrr; /*!< Tx LRR */
+ u64 rx_frames; /*!< Rx frames */
+ u64 rx_words; /*!< Rx words */
+ u64 lip_count; /*!< Rx LIP */
+ u64 nos_count; /*!< Rx NOS */
+ u64 ols_count; /*!< Rx OLS */
+ u64 lr_count; /*!< Rx LR */
+ u64 lrr_count; /*!< Rx LRR */
+ u64 invalid_crcs; /*!< Rx CRC err frames */
+ u64 invalid_crc_gd_eof; /*!< Rx CRC err good EOF frames */
+ u64 undersized_frm; /*!< Rx undersized frames */
+ u64 oversized_frm; /*!< Rx oversized frames */
+ u64 bad_eof_frm; /*!< Rx frames with bad EOF */
+ u64 error_frames; /*!< Errored frames */
+ u64 dropped_frames; /*!< Dropped frames */
+ u64 link_failures; /*!< Link Failure (LF) count */
+ u64 loss_of_syncs; /*!< Loss of sync count */
+ u64 loss_of_signals; /*!< Loss of signal count */
+ u64 primseq_errs; /*!< Primitive sequence protocol err. */
+ u64 bad_os_count; /*!< Invalid ordered sets */
+ u64 err_enc_out; /*!< Encoding err nonframe_8b10b */
+ u64 err_enc; /*!< Encoding err frame_8b10b */
+};
+
+/**
+ * @brief
+ * Eth Physical Port statistics.
+ */
+struct bfa_port_eth_stats {
+ u64 secs_reset; /*!< Seconds since stats is reset */
+ u64 frame_64; /*!< Frames 64 bytes */
+ u64 frame_65_127; /*!< Frames 65-127 bytes */
+ u64 frame_128_255; /*!< Frames 128-255 bytes */
+ u64 frame_256_511; /*!< Frames 256-511 bytes */
+ u64 frame_512_1023; /*!< Frames 512-1023 bytes */
+ u64 frame_1024_1518; /*!< Frames 1024-1518 bytes */
+ u64 frame_1519_1522; /*!< Frames 1519-1522 bytes */
+ u64 tx_bytes; /*!< Tx bytes */
+ u64 tx_packets; /*!< Tx packets */
+ u64 tx_mcast_packets; /*!< Tx multicast packets */
+ u64 tx_bcast_packets; /*!< Tx broadcast packets */
+ u64 tx_control_frame; /*!< Tx control frame */
+ u64 tx_drop; /*!< Tx drops */
+ u64 tx_jabber; /*!< Tx jabber */
+ u64 tx_fcs_error; /*!< Tx FCS errors */
+ u64 tx_fragments; /*!< Tx fragments */
+ u64 rx_bytes; /*!< Rx bytes */
+ u64 rx_packets; /*!< Rx packets */
+ u64 rx_mcast_packets; /*!< Rx multicast packets */
+ u64 rx_bcast_packets; /*!< Rx broadcast packets */
+ u64 rx_control_frames; /*!< Rx control frames */
+ u64 rx_unknown_opcode; /*!< Rx unknown opcode */
+ u64 rx_drop; /*!< Rx drops */
+ u64 rx_jabber; /*!< Rx jabber */
+ u64 rx_fcs_error; /*!< Rx FCS errors */
+ u64 rx_alignment_error; /*!< Rx alignment errors */
+ u64 rx_frame_length_error; /*!< Rx frame len errors */
+ u64 rx_code_error; /*!< Rx code errors */
+ u64 rx_fragments; /*!< Rx fragments */
+ u64 rx_pause; /*!< Rx pause */
+ u64 rx_zero_pause; /*!< Rx zero pause */
+ u64 tx_pause; /*!< Tx pause */
+ u64 tx_zero_pause; /*!< Tx zero pause */
+ u64 rx_fcoe_pause; /*!< Rx FCoE pause */
+ u64 rx_fcoe_zero_pause; /*!< Rx FCoE zero pause */
+ u64 tx_fcoe_pause; /*!< Tx FCoE pause */
+ u64 tx_fcoe_zero_pause; /*!< Tx FCoE zero pause */
+};
+
+/**
+ * @brief
+ * Port statistics.
+ */
+union bfa_port_stats_u {
+ struct bfa_port_fc_stats fc;
+ struct bfa_port_eth_stats eth;
+};
+
+#pragma pack(1)
+
+#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
+#define BFA_CEE_DCBX_MAX_PRIORITY (8)
+#define BFA_CEE_DCBX_MAX_PGID (8)
+
+#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
+#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
+#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
+#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008
+#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010
+#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020
+#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040
+#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080
+#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100
+#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200
+#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400
+
+/* LLDP string type */
+struct bfa_cee_lldp_str {
+ u8 sub_type;
+ u8 len;
+ u8 rsvd[2];
+ u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
+};
+
+/* LLDP paramters */
+struct bfa_cee_lldp_cfg {
+ struct bfa_cee_lldp_str chassis_id;
+ struct bfa_cee_lldp_str port_id;
+ struct bfa_cee_lldp_str port_desc;
+ struct bfa_cee_lldp_str sys_name;
+ struct bfa_cee_lldp_str sys_desc;
+ struct bfa_cee_lldp_str mgmt_addr;
+ u16 time_to_live;
+ u16 enabled_system_cap;
+};
+
+enum bfa_cee_dcbx_version {
+ DCBX_PROTOCOL_PRECEE = 1,
+ DCBX_PROTOCOL_CEE = 2,
+};
+
+enum bfa_cee_lls {
+ /* LLS is down because the TLV not sent by the peer */
+ CEE_LLS_DOWN_NO_TLV = 0,
+ /* LLS is down as advertised by the peer */
+ CEE_LLS_DOWN = 1,
+ CEE_LLS_UP = 2,
+};
+
+/* CEE/DCBX parameters */
+struct bfa_cee_dcbx_cfg {
+ u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY];
+ u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID];
+ u8 pfc_primap; /* bitmap of priorties with PFC enabled */
+ u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */
+ u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
+ u8 dcbx_version; /* operating version:CEE or preCEE */
+ u8 lls_fcoe; /* FCoE Logical Link Status */
+ u8 lls_lan; /* LAN Logical Link Status */
+ u8 rsvd[2];
+};
+
+/* CEE status */
+/* Making this to tri-state for the benefit of port list command */
+enum bfa_cee_status {
+ CEE_UP = 0,
+ CEE_PHY_UP = 1,
+ CEE_LOOPBACK = 2,
+ CEE_PHY_DOWN = 3,
+};
+
+/* CEE Query */
+struct bfa_cee_attr {
+ u8 cee_status;
+ u8 error_reason;
+ struct bfa_cee_lldp_cfg lldp_remote;
+ struct bfa_cee_dcbx_cfg dcbx_remote;
+ mac_t src_mac;
+ u8 link_speed;
+ u8 nw_priority;
+ u8 filler[2];
+};
+
+/* LLDP/DCBX/CEE Statistics */
+struct bfa_cee_stats {
+ u32 lldp_tx_frames; /*!< LLDP Tx Frames */
+ u32 lldp_rx_frames; /*!< LLDP Rx Frames */
+ u32 lldp_rx_frames_invalid; /*!< LLDP Rx Frames invalid */
+ u32 lldp_rx_frames_new; /*!< LLDP Rx Frames new */
+ u32 lldp_tlvs_unrecognized; /*!< LLDP Rx unrecognized TLVs */
+ u32 lldp_rx_shutdown_tlvs; /*!< LLDP Rx shutdown TLVs */
+ u32 lldp_info_aged_out; /*!< LLDP remote info aged out */
+ u32 dcbx_phylink_ups; /*!< DCBX phy link ups */
+ u32 dcbx_phylink_downs; /*!< DCBX phy link downs */
+ u32 dcbx_rx_tlvs; /*!< DCBX Rx TLVs */
+ u32 dcbx_rx_tlvs_invalid; /*!< DCBX Rx TLVs invalid */
+ u32 dcbx_control_tlv_error; /*!< DCBX control TLV errors */
+ u32 dcbx_feature_tlv_error; /*!< DCBX feature TLV errors */
+ u32 dcbx_cee_cfg_new; /*!< DCBX new CEE cfg rcvd */
+ u32 cee_status_down; /*!< CEE status down */
+ u32 cee_status_up; /*!< CEE status up */
+ u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */
+ u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */
+};
+
+#pragma pack()
+
+#endif /* __BFA_DEFS_CNA_H__ */
diff --git a/drivers/net/bna/bfa_defs_mfg_comm.h b/drivers/net/bna/bfa_defs_mfg_comm.h
new file mode 100644
index 00000000000..987978fcb3f
--- /dev/null
+++ b/drivers/net/bna/bfa_defs_mfg_comm.h
@@ -0,0 +1,244 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFA_DEFS_MFG_COMM_H__
+#define __BFA_DEFS_MFG_COMM_H__
+
+#include "cna.h"
+
+/**
+ * Manufacturing block version
+ */
+#define BFA_MFG_VERSION 2
+#define BFA_MFG_VERSION_UNINIT 0xFF
+
+/**
+ * Manufacturing block encrypted version
+ */
+#define BFA_MFG_ENC_VER 2
+
+/**
+ * Manufacturing block version 1 length
+ */
+#define BFA_MFG_VER1_LEN 128
+
+/**
+ * Manufacturing block header length
+ */
+#define BFA_MFG_HDR_LEN 4
+
+#define BFA_MFG_SERIALNUM_SIZE 11
+#define STRSZ(_n) (((_n) + 4) & ~3)
+
+/**
+ * Manufacturing card type
+ */
+enum {
+ BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
+ BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
+ BFA_MFG_TYPE_FC8P1 = 815, /*!< 8G 1port FC card */
+ BFA_MFG_TYPE_FC4P2 = 425, /*!< 4G 2port FC card */
+ BFA_MFG_TYPE_FC4P1 = 415, /*!< 4G 1port FC card */
+ BFA_MFG_TYPE_CNA10P2 = 1020, /*!< 10G 2port CNA card */
+ BFA_MFG_TYPE_CNA10P1 = 1010, /*!< 10G 1port CNA card */
+ BFA_MFG_TYPE_JAYHAWK = 804, /*!< Jayhawk mezz card */
+ BFA_MFG_TYPE_WANCHESE = 1007, /*!< Wanchese mezz card */
+ BFA_MFG_TYPE_ASTRA = 807, /*!< Astra mezz card */
+ BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old */
+ BFA_MFG_TYPE_LIGHTNING = 1741, /*!< Lightning mezz card */
+ BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
+};
+
+#pragma pack(1)
+
+/**
+ * Check if 1-port card
+ */
+#define bfa_mfg_is_1port(type) (( \
+ (type) == BFA_MFG_TYPE_FC8P1 || \
+ (type) == BFA_MFG_TYPE_FC4P1 || \
+ (type) == BFA_MFG_TYPE_CNA10P1))
+
+/**
+ * Check if Mezz card
+ */
+#define bfa_mfg_is_mezz(type) (( \
+ (type) == BFA_MFG_TYPE_JAYHAWK || \
+ (type) == BFA_MFG_TYPE_WANCHESE || \
+ (type) == BFA_MFG_TYPE_ASTRA || \
+ (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
+ (type) == BFA_MFG_TYPE_LIGHTNING))
+
+/**
+ * Check if card type valid
+ */
+#define bfa_mfg_is_card_type_valid(type) (( \
+ (type) == BFA_MFG_TYPE_FC8P2 || \
+ (type) == BFA_MFG_TYPE_FC8P1 || \
+ (type) == BFA_MFG_TYPE_FC4P2 || \
+ (type) == BFA_MFG_TYPE_FC4P1 || \
+ (type) == BFA_MFG_TYPE_CNA10P2 || \
+ (type) == BFA_MFG_TYPE_CNA10P1 || \
+ bfa_mfg_is_mezz(type)))
+
+/**
+ * Check if the card having old wwn/mac handling
+ */
+#define bfa_mfg_is_old_wwn_mac_model(type) (( \
+ (type) == BFA_MFG_TYPE_FC8P2 || \
+ (type) == BFA_MFG_TYPE_FC8P1 || \
+ (type) == BFA_MFG_TYPE_FC4P2 || \
+ (type) == BFA_MFG_TYPE_FC4P1 || \
+ (type) == BFA_MFG_TYPE_CNA10P2 || \
+ (type) == BFA_MFG_TYPE_CNA10P1 || \
+ (type) == BFA_MFG_TYPE_JAYHAWK || \
+ (type) == BFA_MFG_TYPE_WANCHESE))
+
+#define bfa_mfg_increment_wwn_mac(m, i) \
+do { \
+ u32 t = ((m)[0] << 16) | ((m)[1] << 8) | (m)[2]; \
+ t += (i); \
+ (m)[0] = (t >> 16) & 0xFF; \
+ (m)[1] = (t >> 8) & 0xFF; \
+ (m)[2] = t & 0xFF; \
+} while (0)
+
+#define bfa_mfg_adapter_prop_init_flash(card_type, prop) \
+do { \
+ switch ((card_type)) { \
+ case BFA_MFG_TYPE_FC8P2: \
+ case BFA_MFG_TYPE_JAYHAWK: \
+ case BFA_MFG_TYPE_ASTRA: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
+ BFI_ADAPTER_SETP(SPEED, 8); \
+ break; \
+ case BFA_MFG_TYPE_FC8P1: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
+ BFI_ADAPTER_SETP(SPEED, 8); \
+ break; \
+ case BFA_MFG_TYPE_FC4P2: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
+ BFI_ADAPTER_SETP(SPEED, 4); \
+ break; \
+ case BFA_MFG_TYPE_FC4P1: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
+ BFI_ADAPTER_SETP(SPEED, 4); \
+ break; \
+ case BFA_MFG_TYPE_CNA10P2: \
+ case BFA_MFG_TYPE_WANCHESE: \
+ case BFA_MFG_TYPE_LIGHTNING_P0: \
+ case BFA_MFG_TYPE_LIGHTNING: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 2); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
+ break; \
+ case BFA_MFG_TYPE_CNA10P1: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 1); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
+ break; \
+ default: \
+ (prop) = BFI_ADAPTER_UNSUPP; \
+ } \
+} while (0)
+
+enum {
+ CB_GPIO_TTV = (1), /*!< TTV debug capable cards */
+ CB_GPIO_FC8P2 = (2), /*!< 8G 2port FC card */
+ CB_GPIO_FC8P1 = (3), /*!< 8G 1port FC card */
+ CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
+ CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
+ CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
+ CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
+};
+
+#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
+do { \
+ if ((gpio) & CB_GPIO_PROTO) { \
+ (prop) |= BFI_ADAPTER_PROTO; \
+ (gpio) &= ~CB_GPIO_PROTO; \
+ } \
+ switch ((gpio)) { \
+ case CB_GPIO_TTV: \
+ (prop) |= BFI_ADAPTER_TTV; \
+ case CB_GPIO_DFLY: \
+ case CB_GPIO_FC8P2: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
+ (card_type) = BFA_MFG_TYPE_FC8P2; \
+ break; \
+ case CB_GPIO_FC8P1: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
+ (card_type) = BFA_MFG_TYPE_FC8P1; \
+ break; \
+ case CB_GPIO_FC4P2: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
+ (card_type) = BFA_MFG_TYPE_FC4P2; \
+ break; \
+ case CB_GPIO_FC4P1: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
+ (card_type) = BFA_MFG_TYPE_FC4P1; \
+ break; \
+ default: \
+ (prop) |= BFI_ADAPTER_UNSUPP; \
+ (card_type) = BFA_MFG_TYPE_INVALID; \
+ } \
+} while (0)
+
+/**
+ * VPD data length
+ */
+#define BFA_MFG_VPD_LEN 512
+#define BFA_MFG_VPD_LEN_INVALID 0
+
+#define BFA_MFG_VPD_PCI_HDR_OFF 137
+#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
+#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
+
+/**
+ * VPD vendor tag
+ */
+enum {
+ BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
+ BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
+ BFA_MFG_VPD_HP = 2, /*!< vendor HP */
+ BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
+ BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
+ BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
+ BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
+ BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
+};
+
+/**
+ * @brief BFA adapter flash vpd data definition.
+ *
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_vpd {
+ u8 version; /*!< vpd data version */
+ u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */
+ u8 chksum; /*!< u8 checksum */
+ u8 vendor; /*!< vendor */
+ u8 len; /*!< vpd data length excluding header */
+ u8 rsv;
+ u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
+};
+
+#pragma pack()
+
+#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/net/bna/bfa_defs_status.h b/drivers/net/bna/bfa_defs_status.h
new file mode 100644
index 00000000000..af951126375
--- /dev/null
+++ b/drivers/net/bna/bfa_defs_status.h
@@ -0,0 +1,216 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFA_DEFS_STATUS_H__
+#define __BFA_DEFS_STATUS_H__
+
+/**
+ * API status return values
+ *
+ * NOTE: The error msgs are auto generated from the comments. Only singe line
+ * comments are supported
+ */
+enum bfa_status {
+ BFA_STATUS_OK = 0,
+ BFA_STATUS_FAILED = 1,
+ BFA_STATUS_EINVAL = 2,
+ BFA_STATUS_ENOMEM = 3,
+ BFA_STATUS_ENOSYS = 4,
+ BFA_STATUS_ETIMER = 5,
+ BFA_STATUS_EPROTOCOL = 6,
+ BFA_STATUS_ENOFCPORTS = 7,
+ BFA_STATUS_NOFLASH = 8,
+ BFA_STATUS_BADFLASH = 9,
+ BFA_STATUS_SFP_UNSUPP = 10,
+ BFA_STATUS_UNKNOWN_VFID = 11,
+ BFA_STATUS_DATACORRUPTED = 12,
+ BFA_STATUS_DEVBUSY = 13,
+ BFA_STATUS_ABORTED = 14,
+ BFA_STATUS_NODEV = 15,
+ BFA_STATUS_HDMA_FAILED = 16,
+ BFA_STATUS_FLASH_BAD_LEN = 17,
+ BFA_STATUS_UNKNOWN_LWWN = 18,
+ BFA_STATUS_UNKNOWN_RWWN = 19,
+ BFA_STATUS_FCPT_LS_RJT = 20,
+ BFA_STATUS_VPORT_EXISTS = 21,
+ BFA_STATUS_VPORT_MAX = 22,
+ BFA_STATUS_UNSUPP_SPEED = 23,
+ BFA_STATUS_INVLD_DFSZ = 24,
+ BFA_STATUS_CNFG_FAILED = 25,
+ BFA_STATUS_CMD_NOTSUPP = 26,
+ BFA_STATUS_NO_ADAPTER = 27,
+ BFA_STATUS_LINKDOWN = 28,
+ BFA_STATUS_FABRIC_RJT = 29,
+ BFA_STATUS_UNKNOWN_VWWN = 30,
+ BFA_STATUS_NSLOGIN_FAILED = 31,
+ BFA_STATUS_NO_RPORTS = 32,
+ BFA_STATUS_NSQUERY_FAILED = 33,
+ BFA_STATUS_PORT_OFFLINE = 34,
+ BFA_STATUS_RPORT_OFFLINE = 35,
+ BFA_STATUS_TGTOPEN_FAILED = 36,
+ BFA_STATUS_BAD_LUNS = 37,
+ BFA_STATUS_IO_FAILURE = 38,
+ BFA_STATUS_NO_FABRIC = 39,
+ BFA_STATUS_EBADF = 40,
+ BFA_STATUS_EINTR = 41,
+ BFA_STATUS_EIO = 42,
+ BFA_STATUS_ENOTTY = 43,
+ BFA_STATUS_ENXIO = 44,
+ BFA_STATUS_EFOPEN = 45,
+ BFA_STATUS_VPORT_WWN_BP = 46,
+ BFA_STATUS_PORT_NOT_DISABLED = 47,
+ BFA_STATUS_BADFRMHDR = 48,
+ BFA_STATUS_BADFRMSZ = 49,
+ BFA_STATUS_MISSINGFRM = 50,
+ BFA_STATUS_LINKTIMEOUT = 51,
+ BFA_STATUS_NO_FCPIM_NEXUS = 52,
+ BFA_STATUS_CHECKSUM_FAIL = 53,
+ BFA_STATUS_GZME_FAILED = 54,
+ BFA_STATUS_SCSISTART_REQD = 55,
+ BFA_STATUS_IOC_FAILURE = 56,
+ BFA_STATUS_INVALID_WWN = 57,
+ BFA_STATUS_MISMATCH = 58,
+ BFA_STATUS_IOC_ENABLED = 59,
+ BFA_STATUS_ADAPTER_ENABLED = 60,
+ BFA_STATUS_IOC_NON_OP = 61,
+ BFA_STATUS_ADDR_MAP_FAILURE = 62,
+ BFA_STATUS_SAME_NAME = 63,
+ BFA_STATUS_PENDING = 64,
+ BFA_STATUS_8G_SPD = 65,
+ BFA_STATUS_4G_SPD = 66,
+ BFA_STATUS_AD_IS_ENABLE = 67,
+ BFA_STATUS_EINVAL_TOV = 68,
+ BFA_STATUS_EINVAL_QDEPTH = 69,
+ BFA_STATUS_VERSION_FAIL = 70,
+ BFA_STATUS_DIAG_BUSY = 71,
+ BFA_STATUS_BEACON_ON = 72,
+ BFA_STATUS_BEACON_OFF = 73,
+ BFA_STATUS_LBEACON_ON = 74,
+ BFA_STATUS_LBEACON_OFF = 75,
+ BFA_STATUS_PORT_NOT_INITED = 76,
+ BFA_STATUS_RPSC_ENABLED = 77,
+ BFA_STATUS_ENOFSAVE = 78,
+ BFA_STATUS_BAD_FILE = 79,
+ BFA_STATUS_RLIM_EN = 80,
+ BFA_STATUS_RLIM_DIS = 81,
+ BFA_STATUS_IOC_DISABLED = 82,
+ BFA_STATUS_ADAPTER_DISABLED = 83,
+ BFA_STATUS_BIOS_DISABLED = 84,
+ BFA_STATUS_AUTH_ENABLED = 85,
+ BFA_STATUS_AUTH_DISABLED = 86,
+ BFA_STATUS_ERROR_TRL_ENABLED = 87,
+ BFA_STATUS_ERROR_QOS_ENABLED = 88,
+ BFA_STATUS_NO_SFP_DEV = 89,
+ BFA_STATUS_MEMTEST_FAILED = 90,
+ BFA_STATUS_INVALID_DEVID = 91,
+ BFA_STATUS_QOS_ENABLED = 92,
+ BFA_STATUS_QOS_DISABLED = 93,
+ BFA_STATUS_INCORRECT_DRV_CONFIG = 94,
+ BFA_STATUS_REG_FAIL = 95,
+ BFA_STATUS_IM_INV_CODE = 96,
+ BFA_STATUS_IM_INV_VLAN = 97,
+ BFA_STATUS_IM_INV_ADAPT_NAME = 98,
+ BFA_STATUS_IM_LOW_RESOURCES = 99,
+ BFA_STATUS_IM_VLANID_IS_PVID = 100,
+ BFA_STATUS_IM_VLANID_EXISTS = 101,
+ BFA_STATUS_IM_FW_UPDATE_FAIL = 102,
+ BFA_STATUS_PORTLOG_ENABLED = 103,
+ BFA_STATUS_PORTLOG_DISABLED = 104,
+ BFA_STATUS_FILE_NOT_FOUND = 105,
+ BFA_STATUS_QOS_FC_ONLY = 106,
+ BFA_STATUS_RLIM_FC_ONLY = 107,
+ BFA_STATUS_CT_SPD = 108,
+ BFA_STATUS_LEDTEST_OP = 109,
+ BFA_STATUS_CEE_NOT_DN = 110,
+ BFA_STATUS_10G_SPD = 111,
+ BFA_STATUS_IM_INV_TEAM_NAME = 112,
+ BFA_STATUS_IM_DUP_TEAM_NAME = 113,
+ BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114,
+ BFA_STATUS_IM_ADAPT_HAS_VLANS = 115,
+ BFA_STATUS_IM_PVID_MISMATCH = 116,
+ BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117,
+ BFA_STATUS_IM_MTU_MISMATCH = 118,
+ BFA_STATUS_IM_RSS_MISMATCH = 119,
+ BFA_STATUS_IM_HDS_MISMATCH = 120,
+ BFA_STATUS_IM_OFFLOAD_MISMATCH = 121,
+ BFA_STATUS_IM_PORT_PARAMS = 122,
+ BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123,
+ BFA_STATUS_IM_CANNOT_REM_PRI = 124,
+ BFA_STATUS_IM_MAX_PORTS_REACHED = 125,
+ BFA_STATUS_IM_LAST_PORT_DELETE = 126,
+ BFA_STATUS_IM_NO_DRIVER = 127,
+ BFA_STATUS_IM_MAX_VLANS_REACHED = 128,
+ BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129,
+ BFA_STATUS_NO_MINPORT_DRIVER = 130,
+ BFA_STATUS_CARD_TYPE_MISMATCH = 131,
+ BFA_STATUS_BAD_ASICBLK = 132,
+ BFA_STATUS_NO_DRIVER = 133,
+ BFA_STATUS_INVALID_MAC = 134,
+ BFA_STATUS_IM_NO_VLAN = 135,
+ BFA_STATUS_IM_ETH_LB_FAILED = 136,
+ BFA_STATUS_IM_PVID_REMOVE = 137,
+ BFA_STATUS_IM_PVID_EDIT = 138,
+ BFA_STATUS_CNA_NO_BOOT = 139,
+ BFA_STATUS_IM_PVID_NON_ZERO = 140,
+ BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141,
+ BFA_STATUS_IM_GET_INETCFG_FAILED = 142,
+ BFA_STATUS_IM_NOT_BOUND = 143,
+ BFA_STATUS_INSUFFICIENT_PERMS = 144,
+ BFA_STATUS_IM_INV_VLAN_NAME = 145,
+ BFA_STATUS_CMD_NOTSUPP_CNA = 146,
+ BFA_STATUS_IM_PASSTHRU_EDIT = 147,
+ BFA_STATUS_IM_BIND_FAILED = 148,
+ BFA_STATUS_IM_UNBIND_FAILED = 149,
+ BFA_STATUS_IM_PORT_IN_TEAM = 150,
+ BFA_STATUS_IM_VLAN_NOT_FOUND = 151,
+ BFA_STATUS_IM_TEAM_NOT_FOUND = 152,
+ BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153,
+ BFA_STATUS_PBC = 154,
+ BFA_STATUS_DEVID_MISSING = 155,
+ BFA_STATUS_BAD_FWCFG = 156,
+ BFA_STATUS_CREATE_FILE = 157,
+ BFA_STATUS_INVALID_VENDOR = 158,
+ BFA_STATUS_SFP_NOT_READY = 159,
+ BFA_STATUS_FLASH_UNINIT = 160,
+ BFA_STATUS_FLASH_EMPTY = 161,
+ BFA_STATUS_FLASH_CKFAIL = 162,
+ BFA_STATUS_TRUNK_UNSUPP = 163,
+ BFA_STATUS_TRUNK_ENABLED = 164,
+ BFA_STATUS_TRUNK_DISABLED = 165,
+ BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166,
+ BFA_STATUS_BOOT_CODE_UPDATED = 167,
+ BFA_STATUS_BOOT_VERSION = 168,
+ BFA_STATUS_CARDTYPE_MISSING = 169,
+ BFA_STATUS_INVALID_CARDTYPE = 170,
+ BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171,
+ BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172,
+ BFA_STATUS_ETHBOOT_ENABLED = 173,
+ BFA_STATUS_ETHBOOT_DISABLED = 174,
+ BFA_STATUS_IOPROFILE_OFF = 175,
+ BFA_STATUS_NO_PORT_INSTANCE = 176,
+ BFA_STATUS_BOOT_CODE_TIMEDOUT = 177,
+ BFA_STATUS_NO_VPORT_LOCK = 178,
+ BFA_STATUS_VPORT_NO_CNFG = 179,
+ BFA_STATUS_MAX_VAL
+};
+
+enum bfa_eproto_status {
+ BFA_EPROTO_BAD_ACCEPT = 0,
+ BFA_EPROTO_UNKNOWN_RSP = 1
+};
+
+#endif /* __BFA_DEFS_STATUS_H__ */
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
new file mode 100644
index 00000000000..e94e5aa9751
--- /dev/null
+++ b/drivers/net/bna/bfa_ioc.c
@@ -0,0 +1,1732 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "bfa_ioc.h"
+#include "cna.h"
+#include "bfi.h"
+#include "bfi_ctreg.h"
+#include "bfa_defs.h"
+
+/**
+ * IOC local definitions
+ */
+
+#define bfa_ioc_timer_start(__ioc) \
+ mod_timer(&(__ioc)->ioc_timer, jiffies + \
+ msecs_to_jiffies(BFA_IOC_TOV))
+#define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
+
+#define bfa_ioc_recovery_timer_start(__ioc) \
+ mod_timer(&(__ioc)->ioc_timer, jiffies + \
+ msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
+
+#define bfa_sem_timer_start(__ioc) \
+ mod_timer(&(__ioc)->sem_timer, jiffies + \
+ msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
+#define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
+
+#define bfa_hb_timer_start(__ioc) \
+ mod_timer(&(__ioc)->hb_timer, jiffies + \
+ msecs_to_jiffies(BFA_IOC_HB_TOV))
+#define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
+
+/**
+ * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
+ */
+
+#define bfa_ioc_firmware_lock(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
+#define bfa_ioc_firmware_unlock(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
+#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
+#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
+#define bfa_ioc_notify_hbfail(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
+
+#define bfa_ioc_is_optrom(__ioc) \
+ (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
+
+#define bfa_ioc_mbox_cmd_pending(__ioc) \
+ (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
+ readl((__ioc)->ioc_regs.hfn_mbox_cmd))
+
+static bool bfa_nw_auto_recover = true;
+
+/*
+ * forward declarations
+ */
+static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
+static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
+static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
+static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
+static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
+static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
+static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
+static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
+static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
+static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
+static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
+static void bfa_ioc_recover(struct bfa_ioc *ioc);
+static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
+static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
+static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
+static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
+ u32 boot_param);
+static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
+static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
+static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
+ char *serial_num);
+static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
+ char *fw_ver);
+static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
+ char *chip_rev);
+static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
+ char *optrom_ver);
+static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
+ char *manufacturer);
+static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
+static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
+static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
+
+/**
+ * IOC state machine events
+ */
+enum ioc_event {
+ IOC_E_ENABLE = 1, /*!< IOC enable request */
+ IOC_E_DISABLE = 2, /*!< IOC disable request */
+ IOC_E_TIMEOUT = 3, /*!< f/w response timeout */
+ IOC_E_FWREADY = 4, /*!< f/w initialization done */
+ IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */
+ IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */
+ IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */
+ IOC_E_HBFAIL = 8, /*!< heartbeat failure */
+ IOC_E_HWERROR = 9, /*!< hardware error interrupt */
+ IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
+ IOC_E_DETACH = 11, /*!< driver detach cleanup */
+};
+
+bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
+
+static struct bfa_sm_table ioc_sm_table[] = {
+ {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
+ {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
+ {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
+ {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
+ {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
+ {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
+ {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
+ {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
+ {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
+ {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
+ {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
+ {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
+};
+
+/**
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
+{
+ ioc->retry_count = 0;
+ ioc->auto_recover = bfa_nw_auto_recover;
+}
+
+/**
+ * Beginning state. IOC is in reset state.
+ */
+static void
+bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ break;
+
+ case IOC_E_DETACH:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Semaphore should be acquired for version check.
+ */
+static void
+bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+/**
+ * Awaiting h/w semaphore to continue with version check.
+ */
+static void
+bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_SEMLOCKED:
+ if (bfa_ioc_firmware_lock(ioc)) {
+ ioc->retry_count = 0;
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ } else {
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
+ }
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ /* fall through */
+
+ case IOC_E_DETACH:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ case IOC_E_FWREADY:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Notify enable completion callback and generate mismatch AEN.
+ */
+static void
+bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
+{
+ /**
+ * Provide enable completion callback and AEN notification only once.
+ */
+ if (ioc->retry_count == 0)
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ ioc->retry_count++;
+ bfa_ioc_timer_start(ioc);
+}
+
+/**
+ * Awaiting firmware version match.
+ */
+static void
+bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ /* fall through */
+
+ case IOC_E_DETACH:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ case IOC_E_FWREADY:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Request for semaphore.
+ */
+static void
+bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+/**
+ * Awaiting semaphore for h/w initialzation.
+ */
+static void
+bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_SEMLOCKED:
+ ioc->retry_count = 0;
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_reset(ioc, false);
+}
+
+/**
+ * @brief
+ * Hardware is being initialized. Interrupts are enabled.
+ * Holding hardware semaphore lock.
+ */
+static void
+bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_FWREADY:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /* fall through */
+
+ case IOC_E_TIMEOUT:
+ ioc->retry_count++;
+ if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_reset(ioc, true);
+ break;
+ }
+
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_enable(ioc);
+}
+
+/**
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
+ */
+static void
+bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_FWRSP_ENABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /* fall through */
+
+ case IOC_E_TIMEOUT:
+ ioc->retry_count++;
+ if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
+ writel(BFI_IOC_UNINIT,
+ ioc->ioc_regs.ioc_fwstate);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ break;
+ }
+
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_FWREADY:
+ bfa_ioc_send_enable(ioc);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_getattr(ioc);
+}
+
+/**
+ * @brief
+ * IOC configuration in progress. Timer is active.
+ */
+static void
+bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_FWRSP_GETATTR:
+ bfa_ioc_timer_stop(ioc);
+ bfa_ioc_check_attr_wwns(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /* fall through */
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
+{
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
+ bfa_ioc_hb_monitor(ioc);
+}
+
+static void
+bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_hb_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_HWERROR:
+ case IOC_E_FWREADY:
+ /**
+ * Hard error or IOC recovery by other function.
+ * Treat it same as heartbeat failure.
+ */
+ bfa_ioc_hb_stop(ioc);
+ /* !!! fall through !!! */
+
+ case IOC_E_HBFAIL:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_disable(ioc);
+}
+
+/**
+ * IOC is being disabled
+ */
+static void
+bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_FWRSP_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /*
+ * !!! fall through !!!
+ */
+
+ case IOC_E_TIMEOUT:
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * IOC disable completion entry.
+ */
+static void
+bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_disable_comp(ioc);
+}
+
+static void
+bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ break;
+
+ case IOC_E_DISABLE:
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ break;
+
+ case IOC_E_FWREADY:
+ break;
+
+ case IOC_E_DETACH:
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
+{
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_ioc_timer_start(ioc);
+}
+
+/**
+ * @brief
+ * Hardware initialization failed.
+ */
+static void
+bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_ioc_timer_stop(ioc);
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
+{
+ struct list_head *qe;
+ struct bfa_ioc_hbfail_notify *notify;
+
+ /**
+ * Mark IOC as failed in hardware and stop firmware.
+ */
+ bfa_ioc_lpu_stop(ioc);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+
+ /**
+ * Notify other functions on HB failure.
+ */
+ bfa_ioc_notify_hbfail(ioc);
+
+ /**
+ * Notify driver and common modules registered for notification.
+ */
+ ioc->cbfn->hbfail_cbfn(ioc->bfa);
+ list_for_each(qe, &ioc->hb_notify_q) {
+ notify = (struct bfa_ioc_hbfail_notify *) qe;
+ notify->cbfn(notify->cbarg);
+ }
+
+ /**
+ * Flush any queued up mailbox requests.
+ */
+ bfa_ioc_mbox_hbfail(ioc);
+
+ /**
+ * Trigger auto-recovery after a delay.
+ */
+ if (ioc->auto_recover)
+ mod_timer(&ioc->ioc_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
+}
+
+/**
+ * @brief
+ * IOC heartbeat failure.
+ */
+static void
+bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+
+ case IOC_E_ENABLE:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ break;
+
+ case IOC_E_DISABLE:
+ if (ioc->auto_recover)
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ break;
+
+ case IOC_E_FWREADY:
+ /**
+ * Recovery is already initiated by other function.
+ */
+ break;
+
+ case IOC_E_HWERROR:
+ /*
+ * HB failure notification, ignore.
+ */
+ break;
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * BFA IOC private functions
+ */
+
+static void
+bfa_ioc_disable_comp(struct bfa_ioc *ioc)
+{
+ struct list_head *qe;
+ struct bfa_ioc_hbfail_notify *notify;
+
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+
+ /**
+ * Notify common modules registered for notification.
+ */
+ list_for_each(qe, &ioc->hb_notify_q) {
+ notify = (struct bfa_ioc_hbfail_notify *) qe;
+ notify->cbfn(notify->cbarg);
+ }
+}
+
+void
+bfa_nw_ioc_sem_timeout(void *ioc_arg)
+{
+ struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+bool
+bfa_nw_ioc_sem_get(void __iomem *sem_reg)
+{
+ u32 r32;
+ int cnt = 0;
+#define BFA_SEM_SPINCNT 3000
+
+ r32 = readl(sem_reg);
+
+ while (r32 && (cnt < BFA_SEM_SPINCNT)) {
+ cnt++;
+ udelay(2);
+ r32 = readl(sem_reg);
+ }
+
+ if (r32 == 0)
+ return true;
+
+ BUG_ON(!(cnt < BFA_SEM_SPINCNT));
+ return false;
+}
+
+void
+bfa_nw_ioc_sem_release(void __iomem *sem_reg)
+{
+ writel(1, sem_reg);
+}
+
+static void
+bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
+{
+ u32 r32;
+
+ /**
+ * First read to the semaphore register will return 0, subsequent reads
+ * will return 1. Semaphore is released by writing 1 to the register
+ */
+ r32 = readl(ioc->ioc_regs.ioc_sem_reg);
+ if (r32 == 0) {
+ bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
+ return;
+ }
+
+ mod_timer(&ioc->sem_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
+}
+
+void
+bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
+{
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+}
+
+static void
+bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
+{
+ del_timer(&ioc->sem_timer);
+}
+
+/**
+ * @brief
+ * Initialize LPU local memory (aka secondary memory / SRAM)
+ */
+static void
+bfa_ioc_lmem_init(struct bfa_ioc *ioc)
+{
+ u32 pss_ctl;
+ int i;
+#define PSS_LMEM_INIT_TIME 10000
+
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl &= ~__PSS_LMEM_RESET;
+ pss_ctl |= __PSS_LMEM_INIT_EN;
+
+ /*
+ * i2c workaround 12.5khz clock
+ */
+ pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+
+ /**
+ * wait for memory initialization to be complete
+ */
+ i = 0;
+ do {
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ i++;
+ } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
+
+ /**
+ * If memory initialization is not successful, IOC timeout will catch
+ * such failures.
+ */
+ BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
+
+ pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+static void
+bfa_ioc_lpu_start(struct bfa_ioc *ioc)
+{
+ u32 pss_ctl;
+
+ /**
+ * Take processor out of reset.
+ */
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl &= ~__PSS_LPU0_RESET;
+
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+static void
+bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
+{
+ u32 pss_ctl;
+
+ /**
+ * Put processors in reset.
+ */
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
+
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+/**
+ * Get driver and firmware versions.
+ */
+void
+bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
+{
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ int i;
+ u32 *fwsig = (u32 *) fwhdr;
+
+ pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+ pgoff = bfa_ioc_smem_pgoff(ioc, loff);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
+ i++) {
+ fwsig[i] =
+ swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+ loff += sizeof(u32);
+ }
+}
+
+/**
+ * Returns TRUE if same.
+ */
+bool
+bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
+{
+ struct bfi_ioc_image_hdr *drv_fwhdr;
+ int i;
+
+ drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+
+ for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
+ if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Return true if current running version is valid. Firmware signature and
+ * execution context (driver/bios) must match.
+ */
+static bool
+bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
+
+ /**
+ * If bios/efi boot (flash based) -- return true
+ */
+ if (bfa_ioc_is_optrom(ioc))
+ return true;
+
+ bfa_nw_ioc_fwver_get(ioc, &fwhdr);
+ drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+
+ if (fwhdr.signature != drv_fwhdr->signature)
+ return false;
+
+ if (fwhdr.exec != drv_fwhdr->exec)
+ return false;
+
+ return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
+}
+
+/**
+ * Conditionally flush any pending message from firmware at start.
+ */
+static void
+bfa_ioc_msgflush(struct bfa_ioc *ioc)
+{
+ u32 r32;
+
+ r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+ if (r32)
+ writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+}
+
+/**
+ * @img ioc_init_logic.jpg
+ */
+static void
+bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ bool fwvalid;
+
+ ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ if (force)
+ ioc_fwstate = BFI_IOC_UNINIT;
+
+ /**
+ * check if firmware is valid
+ */
+ fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
+ false : bfa_ioc_fwver_valid(ioc);
+
+ if (!fwvalid) {
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ return;
+ }
+
+ /**
+ * If hardware initialization is in progress (initialized by other IOC),
+ * just wait for an initialization completion interrupt.
+ */
+ if (ioc_fwstate == BFI_IOC_INITING) {
+ ioc->cbfn->reset_cbfn(ioc->bfa);
+ return;
+ }
+
+ /**
+ * If IOC function is disabled and firmware version is same,
+ * just re-enable IOC.
+ *
+ * If option rom, IOC must not be in operational state. With
+ * convergence, IOC will be in operational state when 2nd driver
+ * is loaded.
+ */
+ if (ioc_fwstate == BFI_IOC_DISABLED ||
+ (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
+ /**
+ * When using MSI-X any pending firmware ready event should
+ * be flushed. Otherwise MSI-X interrupts are not delivered.
+ */
+ bfa_ioc_msgflush(ioc);
+ ioc->cbfn->reset_cbfn(ioc->bfa);
+ bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+ return;
+ }
+
+ /**
+ * Initialize the h/w for any other states.
+ */
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+}
+
+void
+bfa_nw_ioc_timeout(void *ioc_arg)
+{
+ struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+
+ bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
+}
+
+static void
+bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
+{
+ u32 *msgp = (u32 *) ioc_msg;
+ u32 i;
+
+ BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
+
+ /*
+ * first write msg to mailbox registers
+ */
+ for (i = 0; i < len / sizeof(u32); i++)
+ writel(cpu_to_le32(msgp[i]),
+ ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
+
+ for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
+ writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
+
+ /*
+ * write 1 to mailbox CMD to trigger LPU event
+ */
+ writel(1, ioc->ioc_regs.hfn_mbox_cmd);
+ (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
+}
+
+static void
+bfa_ioc_send_enable(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_ctrl_req enable_req;
+ struct timeval tv;
+
+ bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
+ bfa_ioc_portid(ioc));
+ enable_req.ioc_class = ioc->ioc_mc;
+ do_gettimeofday(&tv);
+ enable_req.tv_sec = ntohl(tv.tv_sec);
+ bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
+}
+
+static void
+bfa_ioc_send_disable(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_ctrl_req disable_req;
+
+ bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
+ bfa_ioc_portid(ioc));
+ bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
+}
+
+static void
+bfa_ioc_send_getattr(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_getattr_req attr_req;
+
+ bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
+ bfa_ioc_portid(ioc));
+ bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
+ bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
+}
+
+void
+bfa_nw_ioc_hb_check(void *cbarg)
+{
+ struct bfa_ioc *ioc = cbarg;
+ u32 hb_count;
+
+ hb_count = readl(ioc->ioc_regs.heartbeat);
+ if (ioc->hb_count == hb_count) {
+ pr_crit("Firmware heartbeat failure at %d", hb_count);
+ bfa_ioc_recover(ioc);
+ return;
+ } else {
+ ioc->hb_count = hb_count;
+ }
+
+ bfa_ioc_mbox_poll(ioc);
+ mod_timer(&ioc->hb_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HB_TOV));
+}
+
+static void
+bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
+{
+ ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
+ mod_timer(&ioc->hb_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HB_TOV));
+}
+
+static void
+bfa_ioc_hb_stop(struct bfa_ioc *ioc)
+{
+ del_timer(&ioc->hb_timer);
+}
+
+/**
+ * @brief
+ * Initiate a full firmware download.
+ */
+static void
+bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
+ u32 boot_param)
+{
+ u32 *fwimg;
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ u32 chunkno = 0;
+ u32 i;
+
+ /**
+ * Initialize LMEM first before code download
+ */
+ bfa_ioc_lmem_init(ioc);
+
+ /**
+ * Flash based firmware boot
+ */
+ if (bfa_ioc_is_optrom(ioc))
+ boot_type = BFI_BOOT_TYPE_FLASH;
+ fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
+
+ pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+ pgoff = bfa_ioc_smem_pgoff(ioc, loff);
+
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
+ if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
+ chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
+ fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
+
+ /**
+ * write smem
+ */
+ writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
+ ((ioc->ioc_regs.smem_page_start) + (loff)));
+
+ loff += sizeof(u32);
+
+ /**
+ * handle page offset wrap around
+ */
+ loff = PSS_SMEM_PGOFF(loff);
+ if (loff == 0) {
+ pgnum++;
+ writel(pgnum,
+ ioc->ioc_regs.host_page_num_fn);
+ }
+ }
+
+ writel(bfa_ioc_smem_pgnum(ioc, 0),
+ ioc->ioc_regs.host_page_num_fn);
+
+ /*
+ * Set boot type and boot param at the end.
+ */
+ writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
+ + (BFI_BOOT_TYPE_OFF)));
+ writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
+ + (BFI_BOOT_PARAM_OFF)));
+}
+
+static void
+bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
+{
+ bfa_ioc_hwinit(ioc, force);
+}
+
+/**
+ * @brief
+ * Update BFA configuration from firmware configuration.
+ */
+static void
+bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_attr *attr = ioc->attr;
+
+ attr->adapter_prop = ntohl(attr->adapter_prop);
+ attr->card_type = ntohl(attr->card_type);
+ attr->maxfrsize = ntohs(attr->maxfrsize);
+
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
+}
+
+/**
+ * Attach time initialization of mbox logic.
+ */
+static void
+bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ int mc;
+
+ INIT_LIST_HEAD(&mod->cmd_q);
+ for (mc = 0; mc < BFI_MC_MAX; mc++) {
+ mod->mbhdlr[mc].cbfn = NULL;
+ mod->mbhdlr[mc].cbarg = ioc->bfa;
+ }
+}
+
+/**
+ * Mbox poll timer -- restarts any pending mailbox requests.
+ */
+static void
+bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ struct bfa_mbox_cmd *cmd;
+ u32 stat;
+
+ /**
+ * If no command pending, do nothing
+ */
+ if (list_empty(&mod->cmd_q))
+ return;
+
+ /**
+ * If previous command is not yet fetched by firmware, do nothing
+ */
+ stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
+ if (stat)
+ return;
+
+ /**
+ * Enqueue command to firmware.
+ */
+ bfa_q_deq(&mod->cmd_q, &cmd);
+ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+}
+
+/**
+ * Cleanup any pending requests.
+ */
+static void
+bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ struct bfa_mbox_cmd *cmd;
+
+ while (!list_empty(&mod->cmd_q))
+ bfa_q_deq(&mod->cmd_q, &cmd);
+}
+
+/**
+ * IOC public
+ */
+static enum bfa_status
+bfa_ioc_pll_init(struct bfa_ioc *ioc)
+{
+ /*
+ * Hold semaphore so that nobody can access the chip during init.
+ */
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+
+ bfa_ioc_pll_init_asic(ioc);
+
+ ioc->pllinit = true;
+ /*
+ * release semaphore.
+ */
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+
+ return BFA_STATUS_OK;
+}
+
+/**
+ * Interface used by diag module to do firmware boot with memory test
+ * as the entry vector.
+ */
+static void
+bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+{
+ void __iomem *rb;
+
+ bfa_ioc_stats(ioc, ioc_boots);
+
+ if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
+ return;
+
+ /**
+ * Initialize IOC state of all functions on a chip reset.
+ */
+ rb = ioc->pcidev.pci_bar_kva;
+ if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+ writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
+ } else {
+ writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
+ }
+
+ bfa_ioc_msgflush(ioc);
+ bfa_ioc_download_fw(ioc, boot_type, boot_param);
+
+ /**
+ * Enable interrupts just before starting LPU
+ */
+ ioc->cbfn->reset_cbfn(ioc->bfa);
+ bfa_ioc_lpu_start(ioc);
+}
+
+/**
+ * Enable/disable IOC failure auto recovery.
+ */
+void
+bfa_nw_ioc_auto_recover(bool auto_recover)
+{
+ bfa_nw_auto_recover = auto_recover;
+}
+
+static void
+bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
+{
+ u32 *msgp = mbmsg;
+ u32 r32;
+ int i;
+
+ /**
+ * read the MBOX msg
+ */
+ for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
+ i++) {
+ r32 = readl(ioc->ioc_regs.lpu_mbox +
+ i * sizeof(u32));
+ msgp[i] = htonl(r32);
+ }
+
+ /**
+ * turn off mailbox interrupt by clearing mailbox status
+ */
+ writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+ readl(ioc->ioc_regs.lpu_mbox_cmd);
+}
+
+static void
+bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
+{
+ union bfi_ioc_i2h_msg_u *msg;
+
+ msg = (union bfi_ioc_i2h_msg_u *) m;
+
+ bfa_ioc_stats(ioc, ioc_isrs);
+
+ switch (msg->mh.msg_id) {
+ case BFI_IOC_I2H_HBEAT:
+ break;
+
+ case BFI_IOC_I2H_READY_EVENT:
+ bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+ break;
+
+ case BFI_IOC_I2H_ENABLE_REPLY:
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
+ break;
+
+ case BFI_IOC_I2H_DISABLE_REPLY:
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
+ break;
+
+ case BFI_IOC_I2H_GETATTR_REPLY:
+ bfa_ioc_getattr_reply(ioc);
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+}
+
+/**
+ * IOC attach time initialization and setup.
+ *
+ * @param[in] ioc memory for IOC
+ * @param[in] bfa driver instance structure
+ */
+void
+bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
+{
+ ioc->bfa = bfa;
+ ioc->cbfn = cbfn;
+ ioc->fcmode = false;
+ ioc->pllinit = false;
+ ioc->dbg_fwsave_once = true;
+
+ bfa_ioc_mbox_attach(ioc);
+ INIT_LIST_HEAD(&ioc->hb_notify_q);
+
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+}
+
+/**
+ * Driver detach time IOC cleanup.
+ */
+void
+bfa_nw_ioc_detach(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_DETACH);
+}
+
+/**
+ * Setup IOC PCI properties.
+ *
+ * @param[in] pcidev PCI device information for this IOC
+ */
+void
+bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
+ enum bfi_mclass mc)
+{
+ ioc->ioc_mc = mc;
+ ioc->pcidev = *pcidev;
+ ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
+ ioc->cna = ioc->ctdev && !ioc->fcmode;
+
+ bfa_nw_ioc_set_ct_hwif(ioc);
+
+ bfa_ioc_map_port(ioc);
+ bfa_ioc_reg_init(ioc);
+}
+
+/**
+ * Initialize IOC dma memory
+ *
+ * @param[in] dm_kva kernel virtual address of IOC dma memory
+ * @param[in] dm_pa physical address of IOC dma memory
+ */
+void
+bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
+{
+ /**
+ * dma memory for firmware attribute
+ */
+ ioc->attr_dma.kva = dm_kva;
+ ioc->attr_dma.pa = dm_pa;
+ ioc->attr = (struct bfi_ioc_attr *) dm_kva;
+}
+
+/**
+ * Return size of dma memory required.
+ */
+u32
+bfa_nw_ioc_meminfo(void)
+{
+ return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
+}
+
+void
+bfa_nw_ioc_enable(struct bfa_ioc *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_enables);
+ ioc->dbg_fwsave_once = true;
+
+ bfa_fsm_send_event(ioc, IOC_E_ENABLE);
+}
+
+void
+bfa_nw_ioc_disable(struct bfa_ioc *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_disables);
+ bfa_fsm_send_event(ioc, IOC_E_DISABLE);
+}
+
+static u32
+bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
+{
+ return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
+}
+
+static u32
+bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
+{
+ return PSS_SMEM_PGOFF(fmaddr);
+}
+
+/**
+ * Register mailbox message handler function, to be called by common modules
+ */
+void
+bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
+ bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+
+ mod->mbhdlr[mc].cbfn = cbfn;
+ mod->mbhdlr[mc].cbarg = cbarg;
+}
+
+/**
+ * Queue a mailbox command request to firmware. Waits if mailbox is busy.
+ * Responsibility of caller to serialize
+ *
+ * @param[in] ioc IOC instance
+ * @param[i] cmd Mailbox command
+ */
+void
+bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ u32 stat;
+
+ /**
+ * If a previous command is pending, queue new command
+ */
+ if (!list_empty(&mod->cmd_q)) {
+ list_add_tail(&cmd->qe, &mod->cmd_q);
+ return;
+ }
+
+ /**
+ * If mailbox is busy, queue command for poll timer
+ */
+ stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
+ if (stat) {
+ list_add_tail(&cmd->qe, &mod->cmd_q);
+ return;
+ }
+
+ /**
+ * mailbox is free -- queue command to firmware
+ */
+ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+}
+
+/**
+ * Handle mailbox interrupts
+ */
+void
+bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ struct bfi_mbmsg m;
+ int mc;
+
+ bfa_ioc_msgget(ioc, &m);
+
+ /**
+ * Treat IOC message class as special.
+ */
+ mc = m.mh.msg_class;
+ if (mc == BFI_MC_IOC) {
+ bfa_ioc_isr(ioc, &m);
+ return;
+ }
+
+ if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+ return;
+
+ mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
+}
+
+void
+bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_HWERROR);
+}
+
+/**
+ * Add to IOC heartbeat failure notification queue. To be used by common
+ * modules such as cee, port, diag.
+ */
+void
+bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
+ struct bfa_ioc_hbfail_notify *notify)
+{
+ list_add_tail(&notify->qe, &ioc->hb_notify_q);
+}
+
+#define BFA_MFG_NAME "Brocade"
+static void
+bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
+ struct bfa_adapter_attr *ad_attr)
+{
+ struct bfi_ioc_attr *ioc_attr;
+
+ ioc_attr = ioc->attr;
+
+ bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
+ bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
+ bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
+ bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
+ memcpy(&ad_attr->vpd, &ioc_attr->vpd,
+ sizeof(struct bfa_mfg_vpd));
+
+ ad_attr->nports = bfa_ioc_get_nports(ioc);
+ ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
+
+ bfa_ioc_get_adapter_model(ioc, ad_attr->model);
+ /* For now, model descr uses same model string */
+ bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
+
+ ad_attr->card_type = ioc_attr->card_type;
+ ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
+
+ if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
+ ad_attr->prototype = 1;
+ else
+ ad_attr->prototype = 0;
+
+ ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
+ ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
+
+ ad_attr->pcie_gen = ioc_attr->pcie_gen;
+ ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
+ ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
+ ad_attr->asic_rev = ioc_attr->asic_rev;
+
+ bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
+
+ ad_attr->cna_capable = ioc->cna;
+ ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
+}
+
+static enum bfa_ioc_type
+bfa_ioc_get_type(struct bfa_ioc *ioc)
+{
+ if (!ioc->ctdev || ioc->fcmode)
+ return BFA_IOC_TYPE_FC;
+ else if (ioc->ioc_mc == BFI_MC_IOCFC)
+ return BFA_IOC_TYPE_FCoE;
+ else if (ioc->ioc_mc == BFI_MC_LL)
+ return BFA_IOC_TYPE_LL;
+ else {
+ BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
+ return BFA_IOC_TYPE_LL;
+ }
+}
+
+static void
+bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
+{
+ memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
+ memcpy(serial_num,
+ (void *)ioc->attr->brcd_serialnum,
+ BFA_ADAPTER_SERIAL_NUM_LEN);
+}
+
+static void
+bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
+{
+ memset(fw_ver, 0, BFA_VERSION_LEN);
+ memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
+}
+
+static void
+bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
+{
+ BUG_ON(!(chip_rev));
+
+ memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
+
+ chip_rev[0] = 'R';
+ chip_rev[1] = 'e';
+ chip_rev[2] = 'v';
+ chip_rev[3] = '-';
+ chip_rev[4] = ioc->attr->asic_rev;
+ chip_rev[5] = '\0';
+}
+
+static void
+bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
+{
+ memset(optrom_ver, 0, BFA_VERSION_LEN);
+ memcpy(optrom_ver, ioc->attr->optrom_version,
+ BFA_VERSION_LEN);
+}
+
+static void
+bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
+{
+ memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
+ memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+}
+
+static void
+bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
+{
+ struct bfi_ioc_attr *ioc_attr;
+
+ BUG_ON(!(model));
+ memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
+
+ ioc_attr = ioc->attr;
+
+ /**
+ * model name
+ */
+ snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
+ BFA_MFG_NAME, ioc_attr->card_type);
+}
+
+static enum bfa_ioc_state
+bfa_ioc_get_state(struct bfa_ioc *ioc)
+{
+ return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+}
+
+void
+bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
+{
+ memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
+
+ ioc_attr->state = bfa_ioc_get_state(ioc);
+ ioc_attr->port_id = ioc->port_id;
+
+ ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
+
+ bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
+
+ ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
+ ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
+ bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
+}
+
+/**
+ * WWN public
+ */
+static u64
+bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
+{
+ return ioc->attr->pwwn;
+}
+
+mac_t
+bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
+{
+ /*
+ * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
+ */
+ if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
+ return bfa_ioc_get_mfg_mac(ioc);
+ else
+ return ioc->attr->mac;
+}
+
+static mac_t
+bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
+{
+ mac_t m;
+
+ m = ioc->attr->mfg_mac;
+ if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
+ m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
+ else
+ bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
+ bfa_ioc_pcifn(ioc));
+
+ return m;
+}
+
+/**
+ * Firmware failure detected. Start recovery actions.
+ */
+static void
+bfa_ioc_recover(struct bfa_ioc *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
+}
+
+static void
+bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
+{
+ if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
+ return;
+
+}
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
new file mode 100644
index 00000000000..a73d84ec808
--- /dev/null
+++ b/drivers/net/bna/bfa_ioc.h
@@ -0,0 +1,300 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_IOC_H__
+#define __BFA_IOC_H__
+
+#include "bfa_sm.h"
+#include "bfi.h"
+#include "cna.h"
+
+#define BFA_IOC_TOV 3000 /* msecs */
+#define BFA_IOC_HWSEM_TOV 500 /* msecs */
+#define BFA_IOC_HB_TOV 500 /* msecs */
+#define BFA_IOC_HWINIT_MAX 2
+#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
+
+/**
+ * Generic Scatter Gather Element used by driver
+ */
+struct bfa_sge {
+ u32 sg_len;
+ void *sg_addr;
+};
+
+/**
+ * PCI device information required by IOC
+ */
+struct bfa_pcidev {
+ int pci_slot;
+ u8 pci_func;
+ u16 device_id;
+ void __iomem *pci_bar_kva;
+};
+
+/**
+ * Structure used to remember the DMA-able memory block's KVA and Physical
+ * Address
+ */
+struct bfa_dma {
+ void *kva; /* ! Kernel virtual address */
+ u64 pa; /* ! Physical address */
+};
+
+#define BFA_DMA_ALIGN_SZ 256
+
+/**
+ * smem size for Crossbow and Catapult
+ */
+#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
+#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
+
+/**
+ * @brief BFA dma address assignment macro
+ */
+#define bfa_dma_addr_set(dma_addr, pa) \
+ __bfa_dma_addr_set(&dma_addr, (u64)pa)
+
+static inline void
+__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
+{
+ dma_addr->a32.addr_lo = (u32) pa;
+ dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa));
+}
+
+/**
+ * @brief BFA dma address assignment macro. (big endian format)
+ */
+#define bfa_dma_be_addr_set(dma_addr, pa) \
+ __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
+static inline void
+__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
+{
+ dma_addr->a32.addr_lo = (u32) htonl(pa);
+ dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
+}
+
+struct bfa_ioc_regs {
+ void __iomem *hfn_mbox_cmd;
+ void __iomem *hfn_mbox;
+ void __iomem *lpu_mbox_cmd;
+ void __iomem *lpu_mbox;
+ void __iomem *pss_ctl_reg;
+ void __iomem *pss_err_status_reg;
+ void __iomem *app_pll_fast_ctl_reg;
+ void __iomem *app_pll_slow_ctl_reg;
+ void __iomem *ioc_sem_reg;
+ void __iomem *ioc_usage_sem_reg;
+ void __iomem *ioc_init_sem_reg;
+ void __iomem *ioc_usage_reg;
+ void __iomem *host_page_num_fn;
+ void __iomem *heartbeat;
+ void __iomem *ioc_fwstate;
+ void __iomem *ll_halt;
+ void __iomem *err_set;
+ void __iomem *shirq_isr_next;
+ void __iomem *shirq_msk_next;
+ void __iomem *smem_page_start;
+ u32 smem_pg0;
+};
+
+/**
+ * IOC Mailbox structures
+ */
+struct bfa_mbox_cmd {
+ struct list_head qe;
+ u32 msg[BFI_IOC_MSGSZ];
+};
+
+/**
+ * IOC mailbox module
+ */
+typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
+struct bfa_ioc_mbox_mod {
+ struct list_head cmd_q; /*!< pending mbox queue */
+ int nmclass; /*!< number of handlers */
+ struct {
+ bfa_ioc_mbox_mcfunc_t cbfn; /*!< message handlers */
+ void *cbarg;
+ } mbhdlr[BFI_MC_MAX];
+};
+
+/**
+ * IOC callback function interfaces
+ */
+typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
+typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
+struct bfa_ioc_cbfn {
+ bfa_ioc_enable_cbfn_t enable_cbfn;
+ bfa_ioc_disable_cbfn_t disable_cbfn;
+ bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
+ bfa_ioc_reset_cbfn_t reset_cbfn;
+};
+
+/**
+ * Heartbeat failure notification queue element.
+ */
+struct bfa_ioc_hbfail_notify {
+ struct list_head qe;
+ bfa_ioc_hbfail_cbfn_t cbfn;
+ void *cbarg;
+};
+
+/**
+ * Initialize a heartbeat failure notification structure
+ */
+#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
+ (__notify)->cbfn = (__cbfn); \
+ (__notify)->cbarg = (__cbarg); \
+} while (0)
+
+struct bfa_ioc {
+ bfa_fsm_t fsm;
+ struct bfa *bfa;
+ struct bfa_pcidev pcidev;
+ struct bfa_timer_mod *timer_mod;
+ struct timer_list ioc_timer;
+ struct timer_list sem_timer;
+ struct timer_list hb_timer;
+ u32 hb_count;
+ u32 retry_count;
+ struct list_head hb_notify_q;
+ void *dbg_fwsave;
+ int dbg_fwsave_len;
+ bool dbg_fwsave_once;
+ enum bfi_mclass ioc_mc;
+ struct bfa_ioc_regs ioc_regs;
+ struct bfa_ioc_drv_stats stats;
+ bool auto_recover;
+ bool fcmode;
+ bool ctdev;
+ bool cna;
+ bool pllinit;
+ bool stats_busy; /*!< outstanding stats */
+ u8 port_id;
+
+ struct bfa_dma attr_dma;
+ struct bfi_ioc_attr *attr;
+ struct bfa_ioc_cbfn *cbfn;
+ struct bfa_ioc_mbox_mod mbox_mod;
+ struct bfa_ioc_hwif *ioc_hwif;
+};
+
+struct bfa_ioc_hwif {
+ enum bfa_status (*ioc_pll_init) (void __iomem *rb, bool fcmode);
+ bool (*ioc_firmware_lock) (struct bfa_ioc *ioc);
+ void (*ioc_firmware_unlock) (struct bfa_ioc *ioc);
+ void (*ioc_reg_init) (struct bfa_ioc *ioc);
+ void (*ioc_map_port) (struct bfa_ioc *ioc);
+ void (*ioc_isr_mode_set) (struct bfa_ioc *ioc,
+ bool msix);
+ void (*ioc_notify_hbfail) (struct bfa_ioc *ioc);
+ void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
+};
+
+#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
+#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
+#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
+#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
+#define bfa_ioc_fetch_stats(__ioc, __stats) \
+ (((__stats)->drv_stats) = (__ioc)->stats)
+#define bfa_ioc_clr_stats(__ioc) \
+ memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
+#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
+#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
+#define bfa_ioc_speed_sup(__ioc) \
+ BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
+#define bfa_ioc_get_nports(__ioc) \
+ BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
+
+#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
+#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
+#define BFA_IOC_FWIMG_TYPE(__ioc) \
+ (((__ioc)->ctdev) ? \
+ (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
+ BFI_IMAGE_CB_FC)
+#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
+ (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
+#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
+
+/**
+ * IOC mailbox interface
+ */
+void bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd);
+void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
+void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
+ bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
+
+/**
+ * IOC interfaces
+ */
+
+#define bfa_ioc_pll_init_asic(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
+ (__ioc)->fcmode))
+
+#define bfa_ioc_isr_mode_set(__ioc, __msix) \
+ ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
+#define bfa_ioc_ownership_reset(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
+
+void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
+
+void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
+ struct bfa_ioc_cbfn *cbfn);
+void bfa_nw_ioc_auto_recover(bool auto_recover);
+void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
+void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
+ enum bfi_mclass mc);
+u32 bfa_nw_ioc_meminfo(void);
+void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa);
+void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
+void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
+
+void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
+
+void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
+void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
+ struct bfa_ioc_hbfail_notify *notify);
+bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
+void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
+void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
+void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
+ struct bfi_ioc_image_hdr *fwhdr);
+bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
+ struct bfi_ioc_image_hdr *fwhdr);
+mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+
+/*
+ * Timeout APIs
+ */
+void bfa_nw_ioc_timeout(void *ioc);
+void bfa_nw_ioc_hb_check(void *ioc);
+void bfa_nw_ioc_sem_timeout(void *ioc);
+
+/*
+ * F/W Image Size & Chunk
+ */
+u32 *bfa_cb_image_get_chunk(int type, u32 off);
+u32 bfa_cb_image_get_size(int type);
+
+#endif /* __BFA_IOC_H__ */
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
new file mode 100644
index 00000000000..121cfd6d48b
--- /dev/null
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -0,0 +1,392 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "bfa_ioc.h"
+#include "cna.h"
+#include "bfi.h"
+#include "bfi_ctreg.h"
+#include "bfa_defs.h"
+
+/*
+ * forward declarations
+ */
+static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
+static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
+
+static struct bfa_ioc_hwif nw_hwif_ct;
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
+{
+ nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
+ nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
+ nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
+ nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
+ nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
+ nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
+ nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
+ nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+
+ ioc->ioc_hwif = &nw_hwif_ct;
+}
+
+/**
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bool
+bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ u32 usecnt;
+ struct bfi_ioc_image_hdr fwhdr;
+
+ /**
+ * Firmware match check is relevant only for CNA.
+ */
+ if (!ioc->cna)
+ return true;
+
+ /**
+ * If bios boot (flash based) -- do not increment usage count
+ */
+ if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+ BFA_IOC_FWIMG_MINSZ)
+ return true;
+
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+
+ /**
+ * If usage count is 0, always return TRUE.
+ */
+ if (usecnt == 0) {
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ return true;
+ }
+
+ ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ /**
+ * Use count cannot be non-zero and chip in uninitialized state.
+ */
+ BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
+
+ /**
+ * Check if another driver with a different firmware is active
+ */
+ bfa_nw_ioc_fwver_get(ioc, &fwhdr);
+ if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ return false;
+ }
+
+ /**
+ * Same firmware version. Increment the reference count.
+ */
+ usecnt++;
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ return true;
+}
+
+static void
+bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
+{
+ u32 usecnt;
+
+ /**
+ * Firmware lock is relevant only for CNA.
+ */
+ if (!ioc->cna)
+ return;
+
+ /**
+ * If bios boot (flash based) -- do not decrement usage count
+ */
+ if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+ BFA_IOC_FWIMG_MINSZ)
+ return;
+
+ /**
+ * decrement usage count
+ */
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+ BUG_ON(!(usecnt > 0));
+
+ usecnt--;
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+}
+
+/**
+ * Notify other functions on HB failure.
+ */
+static void
+bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc)
+{
+ if (ioc->cna) {
+ writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
+ /* Wait for halt to take effect */
+ readl(ioc->ioc_regs.ll_halt);
+ } else {
+ writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+ readl(ioc->ioc_regs.err_set);
+ }
+}
+
+/**
+ * Host to LPU mailbox message addresses
+ */
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
+ { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
+ { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
+ { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
+ { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 0
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
+ { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
+ { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
+ { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
+ { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 1
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
+ { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
+ { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
+ { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
+ { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
+};
+
+static void
+bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
+{
+ void __iomem *rb;
+ int pcifn = bfa_ioc_pcifn(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+
+ if (ioc->port_id == 0) {
+ ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+ } else {
+ ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
+ ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+ }
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+ ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+ ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
+ ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
+ ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
+ ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
+ ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
+
+ /**
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+ /*
+ * err set reg : for notification of hb failure in fcmode
+ */
+ ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+/**
+ * Initialize IOC to port mapping.
+ */
+
+#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
+static void
+bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ /**
+ * For catapult, base port id on personality register and IOC type
+ */
+ r32 = readl(rb + FNC_PERS_REG);
+ r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
+ ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
+
+}
+
+/**
+ * Set interrupt mode for a function: INTX or MSIX
+ */
+static void
+bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32, mode;
+
+ r32 = readl(rb + FNC_PERS_REG);
+
+ mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
+ __F0_INTX_STATUS;
+
+ /**
+ * If already in desired mode, do not change anything
+ */
+ if (!msix && mode)
+ return;
+
+ if (msix)
+ mode = __F0_INTX_STATUS_MSIX;
+ else
+ mode = __F0_INTX_STATUS_INTA;
+
+ r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+ r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+
+ writel(r32, rb + FNC_PERS_REG);
+}
+
+/**
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
+{
+ if (ioc->cna) {
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(0, ioc->ioc_regs.ioc_usage_reg);
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ }
+
+ /*
+ * Read the hw sem reg to make sure that it is locked
+ * before we clear it. If it is not locked, writing 1
+ * will lock it instead of clearing it.
+ */
+ readl(ioc->ioc_regs.ioc_sem_reg);
+ bfa_nw_ioc_hw_sem_release(ioc);
+}
+
+static enum bfa_status
+bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
+{
+ u32 pll_sclk, pll_fclk, r32;
+
+ pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
+ __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
+ __APP_PLL_312_JITLMT0_1(3U) |
+ __APP_PLL_312_CNTLMT0_1(1U);
+ pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
+ __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
+ __APP_PLL_425_JITLMT0_1(3U) |
+ __APP_PLL_425_CNTLMT0_1(1U);
+ if (fcmode) {
+ writel(0, (rb + OP_MODE));
+ writel(__APP_EMS_CMLCKSEL |
+ __APP_EMS_REFCKBUFEN2 |
+ __APP_EMS_CHANNEL_SEL,
+ (rb + ETH_MAC_SER_REG));
+ } else {
+ writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
+ writel(__APP_EMS_REFCKBUFEN1,
+ (rb + ETH_MAC_SER_REG));
+ }
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(pll_sclk |
+ __APP_PLL_312_LOGIC_SOFT_RESET,
+ rb + APP_PLL_312_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_425_LOGIC_SOFT_RESET,
+ rb + APP_PLL_425_CTL_REG);
+ writel(pll_sclk |
+ __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
+ rb + APP_PLL_312_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
+ rb + APP_PLL_425_CTL_REG);
+ readl(rb + HOSTFN0_INT_MSK);
+ udelay(2000);
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(pll_sclk |
+ __APP_PLL_312_ENABLE,
+ rb + APP_PLL_312_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_425_ENABLE,
+ rb + APP_PLL_425_CTL_REG);
+ if (!fcmode) {
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
+ }
+ r32 = readl((rb + PSS_CTL_REG));
+ r32 &= ~__PSS_LMEM_RESET;
+ writel(r32, (rb + PSS_CTL_REG));
+ udelay(1000);
+ if (!fcmode) {
+ writel(0, (rb + PMM_1T_RESET_REG_P0));
+ writel(0, (rb + PMM_1T_RESET_REG_P1));
+ }
+
+ writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
+ udelay(1000);
+ r32 = readl((rb + MBIST_STAT_REG));
+ writel(0, (rb + MBIST_CTL_REG));
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/net/bna/bfa_sm.h b/drivers/net/bna/bfa_sm.h
new file mode 100644
index 00000000000..46462c49b6f
--- /dev/null
+++ b/drivers/net/bna/bfa_sm.h
@@ -0,0 +1,88 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * @file bfasm.h State machine defines
+ */
+
+#ifndef __BFA_SM_H__
+#define __BFA_SM_H__
+
+#include "cna.h"
+
+typedef void (*bfa_sm_t)(void *sm, int event);
+
+/**
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_sm_state_decl(oc, st, otype, etype) \
+ static void oc ## _sm_ ## st(otype * fsm, etype event)
+
+#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
+#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
+#define bfa_sm_get_state(_sm) ((_sm)->sm)
+#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
+
+/**
+ * For converting from state machine function to state encoding.
+ */
+struct bfa_sm_table {
+ bfa_sm_t sm; /*!< state machine function */
+ int state; /*!< state machine encoding */
+ char *name; /*!< state name for display */
+};
+#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
+
+/**
+ * State machine with entry actions.
+ */
+typedef void (*bfa_fsm_t)(void *fsm, int event);
+
+/**
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_fsm_state_decl(oc, st, otype, etype) \
+ static void oc ## _sm_ ## st(otype * fsm, etype event); \
+ static void oc ## _sm_ ## st ## _entry(otype * fsm)
+
+#define bfa_fsm_set_state(_fsm, _state) do { \
+ (_fsm)->fsm = (bfa_fsm_t)(_state); \
+ _state ## _entry(_fsm); \
+} while (0)
+
+#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
+#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
+#define bfa_fsm_cmp_state(_fsm, _state) \
+ ((_fsm)->fsm == (bfa_fsm_t)(_state))
+
+static inline int
+bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+#endif
diff --git a/drivers/net/bna/bfa_wc.h b/drivers/net/bna/bfa_wc.h
new file mode 100644
index 00000000000..d0e4caee67b
--- /dev/null
+++ b/drivers/net/bna/bfa_wc.h
@@ -0,0 +1,69 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * @file bfa_wc.h Generic wait counter.
+ */
+
+#ifndef __BFA_WC_H__
+#define __BFA_WC_H__
+
+typedef void (*bfa_wc_resume_t) (void *cbarg);
+
+struct bfa_wc {
+ bfa_wc_resume_t wc_resume;
+ void *wc_cbarg;
+ int wc_count;
+};
+
+static inline void
+bfa_wc_up(struct bfa_wc *wc)
+{
+ wc->wc_count++;
+}
+
+static inline void
+bfa_wc_down(struct bfa_wc *wc)
+{
+ wc->wc_count--;
+ if (wc->wc_count == 0)
+ wc->wc_resume(wc->wc_cbarg);
+}
+
+/**
+ * Initialize a waiting counter.
+ */
+static inline void
+bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
+{
+ wc->wc_resume = wc_resume;
+ wc->wc_cbarg = wc_cbarg;
+ wc->wc_count = 0;
+ bfa_wc_up(wc);
+}
+
+/**
+ * Wait for counter to reach zero
+ */
+static inline void
+bfa_wc_wait(struct bfa_wc *wc)
+{
+ bfa_wc_down(wc);
+}
+
+#endif
diff --git a/drivers/net/bna/bfi.h b/drivers/net/bna/bfi.h
new file mode 100644
index 00000000000..a9739681105
--- /dev/null
+++ b/drivers/net/bna/bfi.h
@@ -0,0 +1,392 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFI_H__
+#define __BFI_H__
+
+#include "bfa_defs.h"
+
+#pragma pack(1)
+
+/**
+ * BFI FW image type
+ */
+#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
+#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
+enum {
+ BFI_IMAGE_CB_FC,
+ BFI_IMAGE_CT_FC,
+ BFI_IMAGE_CT_CNA,
+ BFI_IMAGE_MAX,
+};
+
+/**
+ * Msg header common to all msgs
+ */
+struct bfi_mhdr {
+ u8 msg_class; /*!< @ref enum bfi_mclass */
+ u8 msg_id; /*!< msg opcode with in the class */
+ union {
+ struct {
+ u8 rsvd;
+ u8 lpu_id; /*!< msg destination */
+ } h2i;
+ u16 i2htok; /*!< token in msgs to host */
+ } mtag;
+};
+
+#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_op); \
+ (_mh).mtag.h2i.lpu_id = (_lpuid); \
+} while (0)
+
+#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_op); \
+ (_mh).mtag.i2htok = (_i2htok); \
+} while (0)
+
+/*
+ * Message opcodes: 0-127 to firmware, 128-255 to host
+ */
+#define BFI_I2H_OPCODE_BASE 128
+#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
+
+/**
+ ****************************************************************************
+ *
+ * Scatter Gather Element and Page definition
+ *
+ ****************************************************************************
+ */
+
+#define BFI_SGE_INLINE 1
+#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
+
+/**
+ * SG Flags
+ */
+enum {
+ BFI_SGE_DATA = 0, /*!< data address, not last */
+ BFI_SGE_DATA_CPL = 1, /*!< data addr, last in current page */
+ BFI_SGE_DATA_LAST = 3, /*!< data address, last */
+ BFI_SGE_LINK = 2, /*!< link address */
+ BFI_SGE_PGDLEN = 2, /*!< cumulative data length for page */
+};
+
+/**
+ * DMA addresses
+ */
+union bfi_addr_u {
+ struct {
+ u32 addr_lo;
+ u32 addr_hi;
+ } a32;
+};
+
+/**
+ * Scatter Gather Element
+ */
+struct bfi_sge {
+#ifdef __BIGENDIAN
+ u32 flags:2,
+ rsvd:2,
+ sg_len:28;
+#else
+ u32 sg_len:28,
+ rsvd:2,
+ flags:2;
+#endif
+ union bfi_addr_u sga;
+};
+
+/**
+ * Scatter Gather Page
+ */
+#define BFI_SGPG_DATA_SGES 7
+#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
+#define BFI_SGPG_RSVD_WD_LEN 8
+struct bfi_sgpg {
+ struct bfi_sge sges[BFI_SGPG_SGES_MAX];
+ u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
+};
+
+/*
+ * Large Message structure - 128 Bytes size Msgs
+ */
+#define BFI_LMSG_SZ 128
+#define BFI_LMSG_PL_WSZ \
+ ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
+
+struct bfi_msg {
+ struct bfi_mhdr mhdr;
+ u32 pl[BFI_LMSG_PL_WSZ];
+};
+
+/**
+ * Mailbox message structure
+ */
+#define BFI_MBMSG_SZ 7
+struct bfi_mbmsg {
+ struct bfi_mhdr mh;
+ u32 pl[BFI_MBMSG_SZ];
+};
+
+/**
+ * Message Classes
+ */
+enum bfi_mclass {
+ BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
+ BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
+ BFI_MC_FLASH = 3, /*!< Flash message class */
+ BFI_MC_CEE = 4, /*!< CEE */
+ BFI_MC_FCPORT = 5, /*!< FC port */
+ BFI_MC_IOCFC = 6, /*!< FC - IO Controller (IOC) */
+ BFI_MC_LL = 7, /*!< Link Layer */
+ BFI_MC_UF = 8, /*!< Unsolicited frame receive */
+ BFI_MC_FCXP = 9, /*!< FC Transport */
+ BFI_MC_LPS = 10, /*!< lport fc login services */
+ BFI_MC_RPORT = 11, /*!< Remote port */
+ BFI_MC_ITNIM = 12, /*!< I-T nexus (Initiator mode) */
+ BFI_MC_IOIM_READ = 13, /*!< read IO (Initiator mode) */
+ BFI_MC_IOIM_WRITE = 14, /*!< write IO (Initiator mode) */
+ BFI_MC_IOIM_IO = 15, /*!< IO (Initiator mode) */
+ BFI_MC_IOIM = 16, /*!< IO (Initiator mode) */
+ BFI_MC_IOIM_IOCOM = 17, /*!< good IO completion */
+ BFI_MC_TSKIM = 18, /*!< Initiator Task management */
+ BFI_MC_SBOOT = 19, /*!< SAN boot services */
+ BFI_MC_IPFC = 20, /*!< IP over FC Msgs */
+ BFI_MC_PORT = 21, /*!< Physical port */
+ BFI_MC_SFP = 22, /*!< SFP module */
+ BFI_MC_MSGQ = 23, /*!< MSGQ */
+ BFI_MC_ENET = 24, /*!< ENET commands/responses */
+ BFI_MC_MAX = 32
+};
+
+#define BFI_IOC_MAX_CQS 4
+#define BFI_IOC_MAX_CQS_ASIC 8
+#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
+
+#define BFI_BOOT_TYPE_OFF 8
+#define BFI_BOOT_PARAM_OFF 12
+
+#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
+#define BFI_BOOT_TYPE_FLASH 1
+#define BFI_BOOT_TYPE_MEMTEST 2
+
+#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
+#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
+
+/**
+ *----------------------------------------------------------------------
+ * IOC
+ *----------------------------------------------------------------------
+ */
+
+enum bfi_ioc_h2i_msgs {
+ BFI_IOC_H2I_ENABLE_REQ = 1,
+ BFI_IOC_H2I_DISABLE_REQ = 2,
+ BFI_IOC_H2I_GETATTR_REQ = 3,
+ BFI_IOC_H2I_DBG_SYNC = 4,
+ BFI_IOC_H2I_DBG_DUMP = 5,
+};
+
+enum bfi_ioc_i2h_msgs {
+ BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
+ BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
+ BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
+ BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
+ BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
+};
+
+/**
+ * BFI_IOC_H2I_GETATTR_REQ message
+ */
+struct bfi_ioc_getattr_req {
+ struct bfi_mhdr mh;
+ union bfi_addr_u attr_addr;
+};
+
+struct bfi_ioc_attr {
+ u64 mfg_pwwn; /*!< Mfg port wwn */
+ u64 mfg_nwwn; /*!< Mfg node wwn */
+ mac_t mfg_mac; /*!< Mfg mac */
+ u16 rsvd_a;
+ u64 pwwn;
+ u64 nwwn;
+ mac_t mac; /*!< PBC or Mfg mac */
+ u16 rsvd_b;
+ mac_t fcoe_mac;
+ u16 rsvd_c;
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ u8 pcie_gen;
+ u8 pcie_lanes_orig;
+ u8 pcie_lanes;
+ u8 rx_bbcredit; /*!< receive buffer credits */
+ u32 adapter_prop; /*!< adapter properties */
+ u16 maxfrsize; /*!< max receive frame size */
+ char asic_rev;
+ u8 rsvd_d;
+ char fw_version[BFA_VERSION_LEN];
+ char optrom_version[BFA_VERSION_LEN];
+ struct bfa_mfg_vpd vpd;
+ u32 card_type; /*!< card type */
+};
+
+/**
+ * BFI_IOC_I2H_GETATTR_REPLY message
+ */
+struct bfi_ioc_getattr_reply {
+ struct bfi_mhdr mh; /*!< Common msg header */
+ u8 status; /*!< cfg reply status */
+ u8 rsvd[3];
+};
+
+/**
+ * Firmware memory page offsets
+ */
+#define BFI_IOC_SMEM_PG0_CB (0x40)
+#define BFI_IOC_SMEM_PG0_CT (0x180)
+
+/**
+ * Firmware statistic offset
+ */
+#define BFI_IOC_FWSTATS_OFF (0x6B40)
+#define BFI_IOC_FWSTATS_SZ (4096)
+
+/**
+ * Firmware trace offset
+ */
+#define BFI_IOC_TRC_OFF (0x4b00)
+#define BFI_IOC_TRC_ENTS 256
+
+#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
+#define BFI_IOC_MD5SUM_SZ 4
+struct bfi_ioc_image_hdr {
+ u32 signature; /*!< constant signature */
+ u32 rsvd_a;
+ u32 exec; /*!< exec vector */
+ u32 param; /*!< parameters */
+ u32 rsvd_b[4];
+ u32 md5sum[BFI_IOC_MD5SUM_SZ];
+};
+
+/**
+ * BFI_IOC_I2H_READY_EVENT message
+ */
+struct bfi_ioc_rdy_event {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 init_status; /*!< init event status */
+ u8 rsvd[3];
+};
+
+struct bfi_ioc_hbeat {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 hb_count; /*!< current heart beat count */
+};
+
+/**
+ * IOC hardware/firmware state
+ */
+enum bfi_ioc_state {
+ BFI_IOC_UNINIT = 0, /*!< not initialized */
+ BFI_IOC_INITING = 1, /*!< h/w is being initialized */
+ BFI_IOC_HWINIT = 2, /*!< h/w is initialized */
+ BFI_IOC_CFG = 3, /*!< IOC configuration in progress */
+ BFI_IOC_OP = 4, /*!< IOC is operational */
+ BFI_IOC_DISABLING = 5, /*!< IOC is being disabled */
+ BFI_IOC_DISABLED = 6, /*!< IOC is disabled */
+ BFI_IOC_CFG_DISABLED = 7, /*!< IOC is being disabled;transient */
+ BFI_IOC_FAIL = 8, /*!< IOC heart-beat failure */
+ BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
+};
+
+#define BFI_IOC_ENDIAN_SIG 0x12345678
+
+enum {
+ BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
+ BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
+ BFI_ADAPTER_TYPE_SH = 16, /*!< adapter type shift */
+ BFI_ADAPTER_NPORTS_MK = 0xff00, /*!< number of ports mask */
+ BFI_ADAPTER_NPORTS_SH = 8, /*!< number of ports shift */
+ BFI_ADAPTER_SPEED_MK = 0xff, /*!< adapter speed mask */
+ BFI_ADAPTER_SPEED_SH = 0, /*!< adapter speed shift */
+ BFI_ADAPTER_PROTO = 0x100000, /*!< prototype adapaters */
+ BFI_ADAPTER_TTV = 0x200000, /*!< TTV debug capable */
+ BFI_ADAPTER_UNSUPP = 0x400000, /*!< unknown adapter type */
+};
+
+#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
+ (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
+ BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_SETP(__prop, __val) \
+ ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_IS_PROTO(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_PROTO)
+#define BFI_ADAPTER_IS_TTV(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_TTV)
+#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_UNSUPP)
+#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
+ ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
+ BFI_ADAPTER_UNSUPP))
+
+/**
+ * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
+ */
+struct bfi_ioc_ctrl_req {
+ struct bfi_mhdr mh;
+ u8 ioc_class;
+ u8 rsvd[3];
+ u32 tv_sec;
+};
+
+/**
+ * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
+ */
+struct bfi_ioc_ctrl_reply {
+ struct bfi_mhdr mh; /*!< Common msg header */
+ u8 status; /*!< enable/disable status */
+ u8 rsvd[3];
+};
+
+#define BFI_IOC_MSGSZ 8
+/**
+ * H2I Messages
+ */
+union bfi_ioc_h2i_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_ioc_ctrl_req enable_req;
+ struct bfi_ioc_ctrl_req disable_req;
+ struct bfi_ioc_getattr_req getattr_req;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+/**
+ * I2H Messages
+ */
+union bfi_ioc_i2h_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_ioc_rdy_event rdy_event;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+#pragma pack()
+
+#endif /* __BFI_H__ */
diff --git a/drivers/net/bna/bfi_cna.h b/drivers/net/bna/bfi_cna.h
new file mode 100644
index 00000000000..4eecabea397
--- /dev/null
+++ b/drivers/net/bna/bfi_cna.h
@@ -0,0 +1,199 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFI_CNA_H__
+#define __BFI_CNA_H__
+
+#include "bfi.h"
+#include "bfa_defs_cna.h"
+
+#pragma pack(1)
+
+enum bfi_port_h2i {
+ BFI_PORT_H2I_ENABLE_REQ = (1),
+ BFI_PORT_H2I_DISABLE_REQ = (2),
+ BFI_PORT_H2I_GET_STATS_REQ = (3),
+ BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
+};
+
+enum bfi_port_i2h {
+ BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
+ BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
+ BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
+ BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
+};
+
+/**
+ * Generic REQ type
+ */
+struct bfi_port_generic_req {
+ struct bfi_mhdr mh; /*!< msg header */
+ u32 msgtag; /*!< msgtag for reply */
+ u32 rsvd;
+};
+
+/**
+ * Generic RSP type
+ */
+struct bfi_port_generic_rsp {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 status; /*!< port enable status */
+ u8 rsvd[3];
+ u32 msgtag; /*!< msgtag for reply */
+};
+
+/**
+ * @todo
+ * BFI_PORT_H2I_ENABLE_REQ
+ */
+
+/**
+ * @todo
+ * BFI_PORT_I2H_ENABLE_RSP
+ */
+
+/**
+ * BFI_PORT_H2I_DISABLE_REQ
+ */
+
+/**
+ * BFI_PORT_I2H_DISABLE_RSP
+ */
+
+/**
+ * BFI_PORT_H2I_GET_STATS_REQ
+ */
+struct bfi_port_get_stats_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ union bfi_addr_u dma_addr;
+};
+
+/**
+ * BFI_PORT_I2H_GET_STATS_RSP
+ */
+
+/**
+ * BFI_PORT_H2I_CLEAR_STATS_REQ
+ */
+
+/**
+ * BFI_PORT_I2H_CLEAR_STATS_RSP
+ */
+
+union bfi_port_h2i_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_port_generic_req enable_req;
+ struct bfi_port_generic_req disable_req;
+ struct bfi_port_get_stats_req getstats_req;
+ struct bfi_port_generic_req clearstats_req;
+};
+
+union bfi_port_i2h_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_port_generic_rsp enable_rsp;
+ struct bfi_port_generic_rsp disable_rsp;
+ struct bfi_port_generic_rsp getstats_rsp;
+ struct bfi_port_generic_rsp clearstats_rsp;
+};
+
+/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
+enum bfi_cee_h2i_msgs {
+ BFI_CEE_H2I_GET_CFG_REQ = 1,
+ BFI_CEE_H2I_RESET_STATS = 2,
+ BFI_CEE_H2I_GET_STATS_REQ = 3,
+};
+
+/* @brief Mailbox reply and AEN messages from DCBX/LLDP firmware to host */
+enum bfi_cee_i2h_msgs {
+ BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
+ BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
+ BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
+};
+
+/* Data structures */
+
+/*
+ * @brief H2I command structure for resetting the stats.
+ * BFI_CEE_H2I_RESET_STATS
+ */
+struct bfi_lldp_reset_stats {
+ struct bfi_mhdr mh;
+};
+
+/*
+ * @brief H2I command structure for resetting the stats.
+ * BFI_CEE_H2I_RESET_STATS
+ */
+struct bfi_cee_reset_stats {
+ struct bfi_mhdr mh;
+};
+
+/*
+ * @brief get configuration command from host
+ * BFI_CEE_H2I_GET_CFG_REQ
+ */
+struct bfi_cee_get_req {
+ struct bfi_mhdr mh;
+ union bfi_addr_u dma_addr;
+};
+
+/*
+ * @brief reply message from firmware
+ * BFI_CEE_I2H_GET_CFG_RSP
+ */
+struct bfi_cee_get_rsp {
+ struct bfi_mhdr mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/*
+ * @brief get configuration command from host
+ * BFI_CEE_H2I_GET_STATS_REQ
+ */
+struct bfi_cee_stats_req {
+ struct bfi_mhdr mh;
+ union bfi_addr_u dma_addr;
+};
+
+/*
+ * @brief reply message from firmware
+ * BFI_CEE_I2H_GET_STATS_RSP
+ */
+struct bfi_cee_stats_rsp {
+ struct bfi_mhdr mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/* @brief mailbox command structures from host to firmware */
+union bfi_cee_h2i_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_cee_get_req get_req;
+ struct bfi_cee_stats_req stats_req;
+};
+
+/* @brief mailbox message structures from firmware to host */
+union bfi_cee_i2h_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_cee_get_rsp get_rsp;
+ struct bfi_cee_stats_rsp stats_rsp;
+};
+
+#pragma pack()
+
+#endif /* __BFI_CNA_H__ */
diff --git a/drivers/net/bna/bfi_ctreg.h b/drivers/net/bna/bfi_ctreg.h
new file mode 100644
index 00000000000..404ea351d4a
--- /dev/null
+++ b/drivers/net/bna/bfi_ctreg.h
@@ -0,0 +1,637 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/*
+ * bfi_ctreg.h catapult host block register definitions
+ *
+ * !!! Do not edit. Auto generated. !!!
+ */
+
+#ifndef __BFI_CTREG_H__
+#define __BFI_CTREG_H__
+
+#define HOSTFN0_LPU_MBOX0_0 0x00019200
+#define HOSTFN1_LPU_MBOX0_8 0x00019260
+#define LPU_HOSTFN0_MBOX0_0 0x00019280
+#define LPU_HOSTFN1_MBOX0_8 0x000192e0
+#define HOSTFN2_LPU_MBOX0_0 0x00019400
+#define HOSTFN3_LPU_MBOX0_8 0x00019460
+#define LPU_HOSTFN2_MBOX0_0 0x00019480
+#define LPU_HOSTFN3_MBOX0_8 0x000194e0
+#define HOSTFN0_INT_STATUS 0x00014000
+#define __HOSTFN0_HALT_OCCURRED 0x01000000
+#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
+#define __HOSTFN0_INT_STATUS_LVL_SH 20
+#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
+#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000
+#define __HOSTFN0_INT_STATUS_P_SH 16
+#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH)
+#define __HOSTFN0_INT_STATUS_F 0x0000ffff
+#define HOSTFN0_INT_MSK 0x00014004
+#define HOST_PAGE_NUM_FN0 0x00014008
+#define __HOST_PAGE_NUM_FN 0x000001ff
+#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c
+#define __MSIX_ERR_INDEX_FN 0x000001ff
+#define HOSTFN1_INT_STATUS 0x00014100
+#define __HOSTFN1_HALT_OCCURRED 0x01000000
+#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000
+#define __HOSTFN1_INT_STATUS_LVL_SH 20
+#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
+#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000
+#define __HOSTFN1_INT_STATUS_P_SH 16
+#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH)
+#define __HOSTFN1_INT_STATUS_F 0x0000ffff
+#define HOSTFN1_INT_MSK 0x00014104
+#define HOST_PAGE_NUM_FN1 0x00014108
+#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c
+#define APP_PLL_425_CTL_REG 0x00014204
+#define __P_425_PLL_LOCK 0x80000000
+#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000
+#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_425_RESET_TIMER_SH 17
+#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH)
+#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_425_CNTLMT0_1_SH 14
+#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
+#define __APP_PLL_425_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_425_JITLMT0_1_SH 12
+#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH)
+#define __APP_PLL_425_HREF 0x00000800
+#define __APP_PLL_425_HDIV 0x00000400
+#define __APP_PLL_425_P0_1_MK 0x00000300
+#define __APP_PLL_425_P0_1_SH 8
+#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH)
+#define __APP_PLL_425_Z0_2_MK 0x000000e0
+#define __APP_PLL_425_Z0_2_SH 5
+#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH)
+#define __APP_PLL_425_RSEL200500 0x00000010
+#define __APP_PLL_425_ENARST 0x00000008
+#define __APP_PLL_425_BYPASS 0x00000004
+#define __APP_PLL_425_LRESETN 0x00000002
+#define __APP_PLL_425_ENABLE 0x00000001
+#define APP_PLL_312_CTL_REG 0x00014208
+#define __P_312_PLL_LOCK 0x80000000
+#define __ENABLE_MAC_AHB_1 0x00800000
+#define __ENABLE_MAC_AHB_0 0x00400000
+#define __ENABLE_MAC_1 0x00200000
+#define __ENABLE_MAC_0 0x00100000
+#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_312_RESET_TIMER_SH 17
+#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH)
+#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_312_CNTLMT0_1_SH 14
+#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
+#define __APP_PLL_312_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_312_JITLMT0_1_SH 12
+#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH)
+#define __APP_PLL_312_HREF 0x00000800
+#define __APP_PLL_312_HDIV 0x00000400
+#define __APP_PLL_312_P0_1_MK 0x00000300
+#define __APP_PLL_312_P0_1_SH 8
+#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH)
+#define __APP_PLL_312_Z0_2_MK 0x000000e0
+#define __APP_PLL_312_Z0_2_SH 5
+#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH)
+#define __APP_PLL_312_RSEL200500 0x00000010
+#define __APP_PLL_312_ENARST 0x00000008
+#define __APP_PLL_312_BYPASS 0x00000004
+#define __APP_PLL_312_LRESETN 0x00000002
+#define __APP_PLL_312_ENABLE 0x00000001
+#define MBIST_CTL_REG 0x00014220
+#define __EDRAM_BISTR_START 0x00000004
+#define __MBIST_RESET 0x00000002
+#define __MBIST_START 0x00000001
+#define MBIST_STAT_REG 0x00014224
+#define __EDRAM_BISTR_STATUS 0x00000008
+#define __EDRAM_BISTR_DONE 0x00000004
+#define __MEM_BIT_STATUS 0x00000002
+#define __MBIST_DONE 0x00000001
+#define HOST_SEM0_REG 0x00014230
+#define __HOST_SEMAPHORE 0x00000001
+#define HOST_SEM1_REG 0x00014234
+#define HOST_SEM2_REG 0x00014238
+#define HOST_SEM3_REG 0x0001423c
+#define HOST_SEM0_INFO_REG 0x00014240
+#define HOST_SEM1_INFO_REG 0x00014244
+#define HOST_SEM2_INFO_REG 0x00014248
+#define HOST_SEM3_INFO_REG 0x0001424c
+#define ETH_MAC_SER_REG 0x00014288
+#define __APP_EMS_CKBUFAMPIN 0x00000020
+#define __APP_EMS_REFCLKSEL 0x00000010
+#define __APP_EMS_CMLCKSEL 0x00000008
+#define __APP_EMS_REFCKBUFEN2 0x00000004
+#define __APP_EMS_REFCKBUFEN1 0x00000002
+#define __APP_EMS_CHANNEL_SEL 0x00000001
+#define HOSTFN2_INT_STATUS 0x00014300
+#define __HOSTFN2_HALT_OCCURRED 0x01000000
+#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000
+#define __HOSTFN2_INT_STATUS_LVL_SH 20
+#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
+#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000
+#define __HOSTFN2_INT_STATUS_P_SH 16
+#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH)
+#define __HOSTFN2_INT_STATUS_F 0x0000ffff
+#define HOSTFN2_INT_MSK 0x00014304
+#define HOST_PAGE_NUM_FN2 0x00014308
+#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c
+#define HOSTFN3_INT_STATUS 0x00014400
+#define __HALT_OCCURRED 0x01000000
+#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000
+#define __HOSTFN3_INT_STATUS_LVL_SH 20
+#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
+#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000
+#define __HOSTFN3_INT_STATUS_P_SH 16
+#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH)
+#define __HOSTFN3_INT_STATUS_F 0x0000ffff
+#define HOSTFN3_INT_MSK 0x00014404
+#define HOST_PAGE_NUM_FN3 0x00014408
+#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c
+#define FNC_ID_REG 0x00014600
+#define __FUNCTION_NUMBER 0x00000007
+#define FNC_PERS_REG 0x00014604
+#define __F3_FUNCTION_ACTIVE 0x80000000
+#define __F3_FUNCTION_MODE 0x40000000
+#define __F3_PORT_MAP_MK 0x30000000
+#define __F3_PORT_MAP_SH 28
+#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
+#define __F3_VM_MODE 0x08000000
+#define __F3_INTX_STATUS_MK 0x07000000
+#define __F3_INTX_STATUS_SH 24
+#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
+#define __F2_FUNCTION_ACTIVE 0x00800000
+#define __F2_FUNCTION_MODE 0x00400000
+#define __F2_PORT_MAP_MK 0x00300000
+#define __F2_PORT_MAP_SH 20
+#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
+#define __F2_VM_MODE 0x00080000
+#define __F2_INTX_STATUS_MK 0x00070000
+#define __F2_INTX_STATUS_SH 16
+#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
+#define __F1_FUNCTION_ACTIVE 0x00008000
+#define __F1_FUNCTION_MODE 0x00004000
+#define __F1_PORT_MAP_MK 0x00003000
+#define __F1_PORT_MAP_SH 12
+#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
+#define __F1_VM_MODE 0x00000800
+#define __F1_INTX_STATUS_MK 0x00000700
+#define __F1_INTX_STATUS_SH 8
+#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
+#define __F0_FUNCTION_ACTIVE 0x00000080
+#define __F0_FUNCTION_MODE 0x00000040
+#define __F0_PORT_MAP_MK 0x00000030
+#define __F0_PORT_MAP_SH 4
+#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
+#define __F0_VM_MODE 0x00000008
+#define __F0_INTX_STATUS 0x00000007
+enum {
+ __F0_INTX_STATUS_MSIX = 0x0,
+ __F0_INTX_STATUS_INTA = 0x1,
+ __F0_INTX_STATUS_INTB = 0x2,
+ __F0_INTX_STATUS_INTC = 0x3,
+ __F0_INTX_STATUS_INTD = 0x4,
+};
+#define OP_MODE 0x0001460c
+#define __APP_ETH_CLK_LOWSPEED 0x00000004
+#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
+#define __GLOBAL_FCOE_MODE 0x00000001
+#define HOST_SEM4_REG 0x00014610
+#define HOST_SEM5_REG 0x00014614
+#define HOST_SEM6_REG 0x00014618
+#define HOST_SEM7_REG 0x0001461c
+#define HOST_SEM4_INFO_REG 0x00014620
+#define HOST_SEM5_INFO_REG 0x00014624
+#define HOST_SEM6_INFO_REG 0x00014628
+#define HOST_SEM7_INFO_REG 0x0001462c
+#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000
+#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1
+#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
+#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004
+#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1
+#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
+#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
+#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008
+#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
+#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1
+#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
+#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
+#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c
+#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
+#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1
+#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
+#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010
+#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1
+#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
+#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014
+#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1
+#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
+#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
+#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018
+#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
+#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1
+#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
+#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
+#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c
+#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
+#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1
+#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
+#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150
+#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1
+#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
+#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154
+#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1
+#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
+#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
+#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158
+#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
+#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1
+#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
+#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
+#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c
+#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
+#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1
+#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
+#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160
+#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1
+#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
+#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164
+#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1
+#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
+#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
+#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168
+#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
+#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1
+#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
+#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
+#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c
+#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
+#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1
+#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
+#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
+#define FW_INIT_HALT_P0 0x000191ac
+#define __FW_INIT_HALT_P 0x00000001
+#define FW_INIT_HALT_P1 0x000191bc
+#define CPE_PI_PTR_Q0 0x00038000
+#define __CPE_PI_UNUSED_MK 0xffff0000
+#define __CPE_PI_UNUSED_SH 16
+#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH)
+#define __CPE_PI_PTR 0x0000ffff
+#define CPE_PI_PTR_Q1 0x00038040
+#define CPE_CI_PTR_Q0 0x00038004
+#define __CPE_CI_UNUSED_MK 0xffff0000
+#define __CPE_CI_UNUSED_SH 16
+#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH)
+#define __CPE_CI_PTR 0x0000ffff
+#define CPE_CI_PTR_Q1 0x00038044
+#define CPE_DEPTH_Q0 0x00038008
+#define __CPE_DEPTH_UNUSED_MK 0xf8000000
+#define __CPE_DEPTH_UNUSED_SH 27
+#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH)
+#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000
+#define __CPE_MSIX_VEC_INDEX_SH 16
+#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH)
+#define __CPE_DEPTH 0x0000ffff
+#define CPE_DEPTH_Q1 0x00038048
+#define CPE_QCTRL_Q0 0x0003800c
+#define __CPE_CTRL_UNUSED30_MK 0xfc000000
+#define __CPE_CTRL_UNUSED30_SH 26
+#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH)
+#define __CPE_FUNC_INT_CTRL_MK 0x03000000
+#define __CPE_FUNC_INT_CTRL_SH 24
+#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH)
+enum {
+ __CPE_FUNC_INT_CTRL_DISABLE = 0x0,
+ __CPE_FUNC_INT_CTRL_F2NF = 0x1,
+ __CPE_FUNC_INT_CTRL_3QUART = 0x2,
+ __CPE_FUNC_INT_CTRL_HALF = 0x3,
+};
+#define __CPE_CTRL_UNUSED20_MK 0x00f00000
+#define __CPE_CTRL_UNUSED20_SH 20
+#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH)
+#define __CPE_SCI_TH_MK 0x000f0000
+#define __CPE_SCI_TH_SH 16
+#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH)
+#define __CPE_CTRL_UNUSED10_MK 0x0000c000
+#define __CPE_CTRL_UNUSED10_SH 14
+#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH)
+#define __CPE_ACK_PENDING 0x00002000
+#define __CPE_CTRL_UNUSED40_MK 0x00001c00
+#define __CPE_CTRL_UNUSED40_SH 10
+#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH)
+#define __CPE_PCIEID_MK 0x00000300
+#define __CPE_PCIEID_SH 8
+#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH)
+#define __CPE_CTRL_UNUSED00_MK 0x000000fe
+#define __CPE_CTRL_UNUSED00_SH 1
+#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH)
+#define __CPE_ESIZE 0x00000001
+#define CPE_QCTRL_Q1 0x0003804c
+#define __CPE_CTRL_UNUSED31_MK 0xfc000000
+#define __CPE_CTRL_UNUSED31_SH 26
+#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH)
+#define __CPE_CTRL_UNUSED21_MK 0x00f00000
+#define __CPE_CTRL_UNUSED21_SH 20
+#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH)
+#define __CPE_CTRL_UNUSED11_MK 0x0000c000
+#define __CPE_CTRL_UNUSED11_SH 14
+#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH)
+#define __CPE_CTRL_UNUSED41_MK 0x00001c00
+#define __CPE_CTRL_UNUSED41_SH 10
+#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH)
+#define __CPE_CTRL_UNUSED01_MK 0x000000fe
+#define __CPE_CTRL_UNUSED01_SH 1
+#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH)
+#define RME_PI_PTR_Q0 0x00038020
+#define __LATENCY_TIME_STAMP_MK 0xffff0000
+#define __LATENCY_TIME_STAMP_SH 16
+#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH)
+#define __RME_PI_PTR 0x0000ffff
+#define RME_PI_PTR_Q1 0x00038060
+#define RME_CI_PTR_Q0 0x00038024
+#define __DELAY_TIME_STAMP_MK 0xffff0000
+#define __DELAY_TIME_STAMP_SH 16
+#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH)
+#define __RME_CI_PTR 0x0000ffff
+#define RME_CI_PTR_Q1 0x00038064
+#define RME_DEPTH_Q0 0x00038028
+#define __RME_DEPTH_UNUSED_MK 0xf8000000
+#define __RME_DEPTH_UNUSED_SH 27
+#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH)
+#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000
+#define __RME_MSIX_VEC_INDEX_SH 16
+#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH)
+#define __RME_DEPTH 0x0000ffff
+#define RME_DEPTH_Q1 0x00038068
+#define RME_QCTRL_Q0 0x0003802c
+#define __RME_INT_LATENCY_TIMER_MK 0xff000000
+#define __RME_INT_LATENCY_TIMER_SH 24
+#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH)
+#define __RME_INT_DELAY_TIMER_MK 0x00ff0000
+#define __RME_INT_DELAY_TIMER_SH 16
+#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH)
+#define __RME_INT_DELAY_DISABLE 0x00008000
+#define __RME_DLY_DELAY_DISABLE 0x00004000
+#define __RME_ACK_PENDING 0x00002000
+#define __RME_FULL_INTERRUPT_DISABLE 0x00001000
+#define __RME_CTRL_UNUSED10_MK 0x00000c00
+#define __RME_CTRL_UNUSED10_SH 10
+#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH)
+#define __RME_PCIEID_MK 0x00000300
+#define __RME_PCIEID_SH 8
+#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH)
+#define __RME_CTRL_UNUSED00_MK 0x000000fe
+#define __RME_CTRL_UNUSED00_SH 1
+#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH)
+#define __RME_ESIZE 0x00000001
+#define RME_QCTRL_Q1 0x0003806c
+#define __RME_CTRL_UNUSED11_MK 0x00000c00
+#define __RME_CTRL_UNUSED11_SH 10
+#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH)
+#define __RME_CTRL_UNUSED01_MK 0x000000fe
+#define __RME_CTRL_UNUSED01_SH 1
+#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH)
+#define PSS_CTL_REG 0x00018800
+#define __PSS_I2C_CLK_DIV_MK 0x007f0000
+#define __PSS_I2C_CLK_DIV_SH 16
+#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
+#define __PSS_LMEM_INIT_DONE 0x00001000
+#define __PSS_LMEM_RESET 0x00000200
+#define __PSS_LMEM_INIT_EN 0x00000100
+#define __PSS_LPU1_RESET 0x00000002
+#define __PSS_LPU0_RESET 0x00000001
+#define PSS_ERR_STATUS_REG 0x00018810
+#define __PSS_LPU1_TCM_READ_ERR 0x00200000
+#define __PSS_LPU0_TCM_READ_ERR 0x00100000
+#define __PSS_LMEM5_CORR_ERR 0x00080000
+#define __PSS_LMEM4_CORR_ERR 0x00040000
+#define __PSS_LMEM3_CORR_ERR 0x00020000
+#define __PSS_LMEM2_CORR_ERR 0x00010000
+#define __PSS_LMEM1_CORR_ERR 0x00008000
+#define __PSS_LMEM0_CORR_ERR 0x00004000
+#define __PSS_LMEM5_UNCORR_ERR 0x00002000
+#define __PSS_LMEM4_UNCORR_ERR 0x00001000
+#define __PSS_LMEM3_UNCORR_ERR 0x00000800
+#define __PSS_LMEM2_UNCORR_ERR 0x00000400
+#define __PSS_LMEM1_UNCORR_ERR 0x00000200
+#define __PSS_LMEM0_UNCORR_ERR 0x00000100
+#define __PSS_BAL_PERR 0x00000080
+#define __PSS_DIP_IF_ERR 0x00000040
+#define __PSS_IOH_IF_ERR 0x00000020
+#define __PSS_TDS_IF_ERR 0x00000010
+#define __PSS_RDS_IF_ERR 0x00000008
+#define __PSS_SGM_IF_ERR 0x00000004
+#define __PSS_LPU1_RAM_ERR 0x00000002
+#define __PSS_LPU0_RAM_ERR 0x00000001
+#define ERR_SET_REG 0x00018818
+#define __PSS_ERR_STATUS_SET 0x003fffff
+#define PMM_1T_RESET_REG_P0 0x0002381c
+#define __PMM_1T_RESET_P 0x00000001
+#define PMM_1T_RESET_REG_P1 0x00023c1c
+#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
+#define __RXQ0_ADD_VECTORS_P 0x80000000
+#define __RXQ0_STOP_P 0x40000000
+#define __RXQ0_PRD_PTR_P 0x0000ffff
+#define HQM_QSET1_RXQ_DRBL_P0 0x00038080
+#define __RXQ1_ADD_VECTORS_P 0x80000000
+#define __RXQ1_STOP_P 0x40000000
+#define __RXQ1_PRD_PTR_P 0x0000ffff
+#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000
+#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080
+#define HQM_QSET0_TXQ_DRBL_P0 0x00038020
+#define __TXQ0_ADD_VECTORS_P 0x80000000
+#define __TXQ0_STOP_P 0x40000000
+#define __TXQ0_PRD_PTR_P 0x0000ffff
+#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0
+#define __TXQ1_ADD_VECTORS_P 0x80000000
+#define __TXQ1_STOP_P 0x40000000
+#define __TXQ1_PRD_PTR_P 0x0000ffff
+#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020
+#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0
+#define HQM_QSET0_IB_DRBL_1_P0 0x00038040
+#define __IB1_0_ACK_P 0x80000000
+#define __IB1_0_DISABLE_P 0x40000000
+#define __IB1_0_COALESCING_CFG_P_MK 0x00ff0000
+#define __IB1_0_COALESCING_CFG_P_SH 16
+#define __IB1_0_COALESCING_CFG_P(_v) ((_v) << __IB1_0_COALESCING_CFG_P_SH)
+#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
+#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0
+#define __IB1_1_ACK_P 0x80000000
+#define __IB1_1_DISABLE_P 0x40000000
+#define __IB1_1_COALESCING_CFG_P_MK 0x00ff0000
+#define __IB1_1_COALESCING_CFG_P_SH 16
+#define __IB1_1_COALESCING_CFG_P(_v) ((_v) << __IB1_1_COALESCING_CFG_P_SH)
+#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
+#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040
+#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0
+#define HQM_QSET0_IB_DRBL_2_P0 0x00038060
+#define __IB2_0_ACK_P 0x80000000
+#define __IB2_0_DISABLE_P 0x40000000
+#define __IB2_0_COALESCING_CFG_P_MK 0x00ff0000
+#define __IB2_0_COALESCING_CFG_P_SH 16
+#define __IB2_0_COALESCING_CFG_P(_v) ((_v) << __IB2_0_COALESCING_CFG_P_SH)
+#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
+#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0
+#define __IB2_1_ACK_P 0x80000000
+#define __IB2_1_DISABLE_P 0x40000000
+#define __IB2_1_COALESCING_CFG_P_MK 0x00ff0000
+#define __IB2_1_COALESCING_CFG_P_SH 16
+#define __IB2_1_COALESCING_CFG_P(_v) ((_v) << __IB2_1_COALESCING_CFG_P_SH)
+#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
+#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060
+#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0
+
+/*
+ * These definitions are either in error/missing in spec. Its auto-generated
+ * from hard coded values in regparse.pl.
+ */
+#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
+#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
+#define __EMPHPRE_AT_4G_FIX 0x00000003
+#define __SFP_TXRATE_EN_FIX 0x00000100
+#define __SFP_RXRATE_EN_FIX 0x00000080
+
+/*
+ * These register definitions are auto-generated from hard coded values
+ * in regparse.pl.
+ */
+
+/*
+ * These register mapping definitions are auto-generated from mapping tables
+ * in regparse.pl.
+ */
+#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
+#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
+#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
+#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
+#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
+
+#define CPE_DEPTH_Q(__n) \
+ (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
+#define CPE_QCTRL_Q(__n) \
+ (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
+#define CPE_PI_PTR_Q(__n) \
+ (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
+#define CPE_CI_PTR_Q(__n) \
+ (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
+#define RME_DEPTH_Q(__n) \
+ (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
+#define RME_QCTRL_Q(__n) \
+ (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
+#define RME_PI_PTR_Q(__n) \
+ (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
+#define RME_CI_PTR_Q(__n) \
+ (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
+#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
+ * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
+#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
+ * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
+#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
+ * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
+#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
+ * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
+#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
+ * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
+#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
+ * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
+#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
+ * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
+#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
+ * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
+
+#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+#define CPE_Q_MASK(__q) ((__q) & 0x3)
+#define RME_Q_MASK(__q) ((__q) & 0x3)
+
+/*
+ * PCI MSI-X vector defines
+ */
+enum {
+ BFA_MSIX_CPE_Q0 = 0,
+ BFA_MSIX_CPE_Q1 = 1,
+ BFA_MSIX_CPE_Q2 = 2,
+ BFA_MSIX_CPE_Q3 = 3,
+ BFA_MSIX_RME_Q0 = 4,
+ BFA_MSIX_RME_Q1 = 5,
+ BFA_MSIX_RME_Q2 = 6,
+ BFA_MSIX_RME_Q3 = 7,
+ BFA_MSIX_LPU_ERR = 8,
+ BFA_MSIX_CT_MAX = 9,
+};
+
+/*
+ * And corresponding host interrupt status bit field defines
+ */
+#define __HFN_INT_CPE_Q0 0x00000001U
+#define __HFN_INT_CPE_Q1 0x00000002U
+#define __HFN_INT_CPE_Q2 0x00000004U
+#define __HFN_INT_CPE_Q3 0x00000008U
+#define __HFN_INT_CPE_Q4 0x00000010U
+#define __HFN_INT_CPE_Q5 0x00000020U
+#define __HFN_INT_CPE_Q6 0x00000040U
+#define __HFN_INT_CPE_Q7 0x00000080U
+#define __HFN_INT_RME_Q0 0x00000100U
+#define __HFN_INT_RME_Q1 0x00000200U
+#define __HFN_INT_RME_Q2 0x00000400U
+#define __HFN_INT_RME_Q3 0x00000800U
+#define __HFN_INT_RME_Q4 0x00001000U
+#define __HFN_INT_RME_Q5 0x00002000U
+#define __HFN_INT_RME_Q6 0x00004000U
+#define __HFN_INT_RME_Q7 0x00008000U
+#define __HFN_INT_ERR_EMC 0x00010000U
+#define __HFN_INT_ERR_LPU0 0x00020000U
+#define __HFN_INT_ERR_LPU1 0x00040000U
+#define __HFN_INT_ERR_PSS 0x00080000U
+#define __HFN_INT_MBOX_LPU0 0x00100000U
+#define __HFN_INT_MBOX_LPU1 0x00200000U
+#define __HFN_INT_MBOX1_LPU0 0x00400000U
+#define __HFN_INT_MBOX1_LPU1 0x00800000U
+#define __HFN_INT_LL_HALT 0x01000000U
+#define __HFN_INT_CPE_MASK 0x000000ffU
+#define __HFN_INT_RME_MASK 0x0000ff00U
+
+/*
+ * catapult memory map.
+ */
+#define LL_PGN_HQM0 0x0096
+#define LL_PGN_HQM1 0x0097
+#define PSS_SMEM_PAGE_START 0x8000
+#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
+#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
+
+/*
+ * End of catapult memory map
+ */
+
+#endif /* __BFI_CTREG_H__ */
diff --git a/drivers/net/bna/bfi_ll.h b/drivers/net/bna/bfi_ll.h
new file mode 100644
index 00000000000..bee4d054066
--- /dev/null
+++ b/drivers/net/bna/bfi_ll.h
@@ -0,0 +1,438 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFI_LL_H__
+#define __BFI_LL_H__
+
+#include "bfi.h"
+
+#pragma pack(1)
+
+/**
+ * @brief
+ * "enums" for all LL mailbox messages other than IOC
+ */
+enum {
+ BFI_LL_H2I_MAC_UCAST_SET_REQ = 1,
+ BFI_LL_H2I_MAC_UCAST_ADD_REQ = 2,
+ BFI_LL_H2I_MAC_UCAST_DEL_REQ = 3,
+
+ BFI_LL_H2I_MAC_MCAST_ADD_REQ = 4,
+ BFI_LL_H2I_MAC_MCAST_DEL_REQ = 5,
+ BFI_LL_H2I_MAC_MCAST_FILTER_REQ = 6,
+ BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ = 7,
+
+ BFI_LL_H2I_PORT_ADMIN_REQ = 8,
+ BFI_LL_H2I_STATS_GET_REQ = 9,
+ BFI_LL_H2I_STATS_CLEAR_REQ = 10,
+
+ BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ = 11,
+ BFI_LL_H2I_RXF_DEFAULT_SET_REQ = 12,
+
+ BFI_LL_H2I_TXQ_STOP_REQ = 13,
+ BFI_LL_H2I_RXQ_STOP_REQ = 14,
+
+ BFI_LL_H2I_DIAG_LOOPBACK_REQ = 15,
+
+ BFI_LL_H2I_SET_PAUSE_REQ = 16,
+ BFI_LL_H2I_MTU_INFO_REQ = 17,
+
+ BFI_LL_H2I_RX_REQ = 18,
+} ;
+
+enum {
+ BFI_LL_I2H_MAC_UCAST_SET_RSP = BFA_I2HM(1),
+ BFI_LL_I2H_MAC_UCAST_ADD_RSP = BFA_I2HM(2),
+ BFI_LL_I2H_MAC_UCAST_DEL_RSP = BFA_I2HM(3),
+
+ BFI_LL_I2H_MAC_MCAST_ADD_RSP = BFA_I2HM(4),
+ BFI_LL_I2H_MAC_MCAST_DEL_RSP = BFA_I2HM(5),
+ BFI_LL_I2H_MAC_MCAST_FILTER_RSP = BFA_I2HM(6),
+ BFI_LL_I2H_MAC_MCAST_DEL_ALL_RSP = BFA_I2HM(7),
+
+ BFI_LL_I2H_PORT_ADMIN_RSP = BFA_I2HM(8),
+ BFI_LL_I2H_STATS_GET_RSP = BFA_I2HM(9),
+ BFI_LL_I2H_STATS_CLEAR_RSP = BFA_I2HM(10),
+
+ BFI_LL_I2H_RXF_PROMISCUOUS_SET_RSP = BFA_I2HM(11),
+ BFI_LL_I2H_RXF_DEFAULT_SET_RSP = BFA_I2HM(12),
+
+ BFI_LL_I2H_TXQ_STOP_RSP = BFA_I2HM(13),
+ BFI_LL_I2H_RXQ_STOP_RSP = BFA_I2HM(14),
+
+ BFI_LL_I2H_DIAG_LOOPBACK_RSP = BFA_I2HM(15),
+
+ BFI_LL_I2H_SET_PAUSE_RSP = BFA_I2HM(16),
+
+ BFI_LL_I2H_MTU_INFO_RSP = BFA_I2HM(17),
+ BFI_LL_I2H_RX_RSP = BFA_I2HM(18),
+
+ BFI_LL_I2H_LINK_DOWN_AEN = BFA_I2HM(19),
+ BFI_LL_I2H_LINK_UP_AEN = BFA_I2HM(20),
+
+ BFI_LL_I2H_PORT_ENABLE_AEN = BFA_I2HM(21),
+ BFI_LL_I2H_PORT_DISABLE_AEN = BFA_I2HM(22),
+} ;
+
+/**
+ * @brief bfi_ll_mac_addr_req is used by:
+ * BFI_LL_H2I_MAC_UCAST_SET_REQ
+ * BFI_LL_H2I_MAC_UCAST_ADD_REQ
+ * BFI_LL_H2I_MAC_UCAST_DEL_REQ
+ * BFI_LL_H2I_MAC_MCAST_ADD_REQ
+ * BFI_LL_H2I_MAC_MCAST_DEL_REQ
+ */
+struct bfi_ll_mac_addr_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 rxf_id;
+ u8 rsvd1[3];
+ mac_t mac_addr;
+ u8 rsvd2[2];
+};
+
+/**
+ * @brief bfi_ll_mcast_filter_req is used by:
+ * BFI_LL_H2I_MAC_MCAST_FILTER_REQ
+ */
+struct bfi_ll_mcast_filter_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 rxf_id;
+ u8 enable;
+ u8 rsvd[2];
+};
+
+/**
+ * @brief bfi_ll_mcast_del_all is used by:
+ * BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ
+ */
+struct bfi_ll_mcast_del_all_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 rxf_id;
+ u8 rsvd[3];
+};
+
+/**
+ * @brief bfi_ll_q_stop_req is used by:
+ * BFI_LL_H2I_TXQ_STOP_REQ
+ * BFI_LL_H2I_RXQ_STOP_REQ
+ */
+struct bfi_ll_q_stop_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 q_id_mask[2]; /* !< bit-mask for queue ids */
+};
+
+/**
+ * @brief bfi_ll_stats_req is used by:
+ * BFI_LL_I2H_STATS_GET_REQ
+ * BFI_LL_I2H_STATS_CLEAR_REQ
+ */
+struct bfi_ll_stats_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u16 stats_mask; /* !< bit-mask for non-function statistics */
+ u8 rsvd[2];
+ u32 rxf_id_mask[2]; /* !< bit-mask for RxF Statistics */
+ u32 txf_id_mask[2]; /* !< bit-mask for TxF Statistics */
+ union bfi_addr_u host_buffer; /* !< where statistics are returned */
+};
+
+/**
+ * @brief defines for "stats_mask" above.
+ */
+#define BFI_LL_STATS_MAC (1 << 0) /* !< MAC Statistics */
+#define BFI_LL_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
+#define BFI_LL_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
+#define BFI_LL_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
+#define BFI_LL_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
+
+#define BFI_LL_STATS_ALL 0x1f
+
+/**
+ * @brief bfi_ll_port_admin_req
+ */
+struct bfi_ll_port_admin_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 up;
+ u8 rsvd[3];
+};
+
+/**
+ * @brief bfi_ll_rxf_req is used by:
+ * BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
+ * BFI_LL_H2I_RXF_DEFAULT_SET_REQ
+ */
+struct bfi_ll_rxf_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 rxf_id;
+ u8 enable;
+ u8 rsvd[2];
+};
+
+/**
+ * @brief bfi_ll_rxf_multi_req is used by:
+ * BFI_LL_H2I_RX_REQ
+ */
+struct bfi_ll_rxf_multi_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 rxf_id_mask[2];
+ u8 enable;
+ u8 rsvd[3];
+};
+
+/**
+ * @brief enum for Loopback opmodes
+ */
+enum {
+ BFI_LL_DIAG_LB_OPMODE_EXT = 0,
+ BFI_LL_DIAG_LB_OPMODE_CBL = 1,
+};
+
+/**
+ * @brief bfi_ll_set_pause_req is used by:
+ * BFI_LL_H2I_SET_PAUSE_REQ
+ */
+struct bfi_ll_set_pause_req {
+ struct bfi_mhdr mh;
+ u8 tx_pause; /* 1 = enable, 0 = disable */
+ u8 rx_pause; /* 1 = enable, 0 = disable */
+ u8 rsvd[2];
+};
+
+/**
+ * @brief bfi_ll_mtu_info_req is used by:
+ * BFI_LL_H2I_MTU_INFO_REQ
+ */
+struct bfi_ll_mtu_info_req {
+ struct bfi_mhdr mh;
+ u16 mtu;
+ u8 rsvd[2];
+};
+
+/**
+ * @brief
+ * Response header format used by all responses
+ * For both responses and asynchronous notifications
+ */
+struct bfi_ll_rsp {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 error;
+ u8 rsvd[3];
+};
+
+/**
+ * @brief bfi_ll_cee_aen is used by:
+ * BFI_LL_I2H_LINK_DOWN_AEN
+ * BFI_LL_I2H_LINK_UP_AEN
+ */
+struct bfi_ll_aen {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 reason;
+ u8 cee_linkup;
+ u8 prio_map; /*!< LL priority bit-map */
+ u8 rsvd[2];
+};
+
+/**
+ * @brief
+ * The following error codes can be returned
+ * by the mbox commands
+ */
+enum {
+ BFI_LL_CMD_OK = 0,
+ BFI_LL_CMD_FAIL = 1,
+ BFI_LL_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */
+ BFI_LL_CMD_CAM_FULL = 3, /* !< CAM is full */
+ BFI_LL_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */
+ BFI_LL_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */
+ BFI_LL_CMD_WAITING = 6, /* !< Waiting for completion (VMware) */
+ BFI_LL_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
+} ;
+
+/* Statistics */
+#define BFI_LL_TXF_ID_MAX 64
+#define BFI_LL_RXF_ID_MAX 64
+
+/* TxF Frame Statistics */
+struct bfi_ll_stats_txf {
+ u64 ucast_octets;
+ u64 ucast;
+ u64 ucast_vlan;
+
+ u64 mcast_octets;
+ u64 mcast;
+ u64 mcast_vlan;
+
+ u64 bcast_octets;
+ u64 bcast;
+ u64 bcast_vlan;
+
+ u64 errors;
+ u64 filter_vlan; /* frames filtered due to VLAN */
+ u64 filter_mac_sa; /* frames filtered due to SA check */
+};
+
+/* RxF Frame Statistics */
+struct bfi_ll_stats_rxf {
+ u64 ucast_octets;
+ u64 ucast;
+ u64 ucast_vlan;
+
+ u64 mcast_octets;
+ u64 mcast;
+ u64 mcast_vlan;
+
+ u64 bcast_octets;
+ u64 bcast;
+ u64 bcast_vlan;
+ u64 frame_drops;
+};
+
+/* FC Tx Frame Statistics */
+struct bfi_ll_stats_fc_tx {
+ u64 txf_ucast_octets;
+ u64 txf_ucast;
+ u64 txf_ucast_vlan;
+
+ u64 txf_mcast_octets;
+ u64 txf_mcast;
+ u64 txf_mcast_vlan;
+
+ u64 txf_bcast_octets;
+ u64 txf_bcast;
+ u64 txf_bcast_vlan;
+
+ u64 txf_parity_errors;
+ u64 txf_timeout;
+ u64 txf_fid_parity_errors;
+};
+
+/* FC Rx Frame Statistics */
+struct bfi_ll_stats_fc_rx {
+ u64 rxf_ucast_octets;
+ u64 rxf_ucast;
+ u64 rxf_ucast_vlan;
+
+ u64 rxf_mcast_octets;
+ u64 rxf_mcast;
+ u64 rxf_mcast_vlan;
+
+ u64 rxf_bcast_octets;
+ u64 rxf_bcast;
+ u64 rxf_bcast_vlan;
+};
+
+/* RAD Frame Statistics */
+struct bfi_ll_stats_rad {
+ u64 rx_frames;
+ u64 rx_octets;
+ u64 rx_vlan_frames;
+
+ u64 rx_ucast;
+ u64 rx_ucast_octets;
+ u64 rx_ucast_vlan;
+
+ u64 rx_mcast;
+ u64 rx_mcast_octets;
+ u64 rx_mcast_vlan;
+
+ u64 rx_bcast;
+ u64 rx_bcast_octets;
+ u64 rx_bcast_vlan;
+
+ u64 rx_drops;
+};
+
+/* BPC Tx Registers */
+struct bfi_ll_stats_bpc {
+ /* transmit stats */
+ u64 tx_pause[8];
+ u64 tx_zero_pause[8]; /*!< Pause cancellation */
+ /*!<Pause initiation rather than retention */
+ u64 tx_first_pause[8];
+
+ /* receive stats */
+ u64 rx_pause[8];
+ u64 rx_zero_pause[8]; /*!< Pause cancellation */
+ /*!<Pause initiation rather than retention */
+ u64 rx_first_pause[8];
+};
+
+/* MAC Rx Statistics */
+struct bfi_ll_stats_mac {
+ u64 frame_64; /* both rx and tx counter */
+ u64 frame_65_127; /* both rx and tx counter */
+ u64 frame_128_255; /* both rx and tx counter */
+ u64 frame_256_511; /* both rx and tx counter */
+ u64 frame_512_1023; /* both rx and tx counter */
+ u64 frame_1024_1518; /* both rx and tx counter */
+ u64 frame_1519_1522; /* both rx and tx counter */
+
+ /* receive stats */
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_fcs_error;
+ u64 rx_multicast;
+ u64 rx_broadcast;
+ u64 rx_control_frames;
+ u64 rx_pause;
+ u64 rx_unknown_opcode;
+ u64 rx_alignment_error;
+ u64 rx_frame_length_error;
+ u64 rx_code_error;
+ u64 rx_carrier_sense_error;
+ u64 rx_undersize;
+ u64 rx_oversize;
+ u64 rx_fragments;
+ u64 rx_jabber;
+ u64 rx_drop;
+
+ /* transmit stats */
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_multicast;
+ u64 tx_broadcast;
+ u64 tx_pause;
+ u64 tx_deferral;
+ u64 tx_excessive_deferral;
+ u64 tx_single_collision;
+ u64 tx_muliple_collision;
+ u64 tx_late_collision;
+ u64 tx_excessive_collision;
+ u64 tx_total_collision;
+ u64 tx_pause_honored;
+ u64 tx_drop;
+ u64 tx_jabber;
+ u64 tx_fcs_error;
+ u64 tx_control_frame;
+ u64 tx_oversize;
+ u64 tx_undersize;
+ u64 tx_fragments;
+};
+
+/* Complete statistics */
+struct bfi_ll_stats {
+ struct bfi_ll_stats_mac mac_stats;
+ struct bfi_ll_stats_bpc bpc_stats;
+ struct bfi_ll_stats_rad rad_stats;
+ struct bfi_ll_stats_fc_rx fc_rx_stats;
+ struct bfi_ll_stats_fc_tx fc_tx_stats;
+ struct bfi_ll_stats_rxf rxf_stats[BFI_LL_RXF_ID_MAX];
+ struct bfi_ll_stats_txf txf_stats[BFI_LL_TXF_ID_MAX];
+};
+
+#pragma pack()
+
+#endif /* __BFI_LL_H__ */
diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
new file mode 100644
index 00000000000..df6676bbc84
--- /dev/null
+++ b/drivers/net/bna/bna.h
@@ -0,0 +1,550 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __BNA_H__
+#define __BNA_H__
+
+#include "bfa_wc.h"
+#include "bfa_ioc.h"
+#include "cna.h"
+#include "bfi_ll.h"
+#include "bna_types.h"
+
+extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
+
+/**
+ *
+ * Macros and constants
+ *
+ */
+
+#define BNA_IOC_TIMER_FREQ 200
+
+/* Log string size */
+#define BNA_MESSAGE_SIZE 256
+
+#define bna_device_timer(_dev) bfa_timer_beat(&((_dev)->timer_mod))
+
+/* MBOX API for PORT, TX, RX */
+#define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \
+do { \
+ memcpy(&((_qe)->cmd.msg[0]), (_cmd), (_cmd_len)); \
+ (_qe)->cbfn = (_cbfn); \
+ (_qe)->cbarg = (_cbarg); \
+} while (0)
+
+#define bna_is_small_rxq(rcb) ((rcb)->id == 1)
+
+#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
+ (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
+
+#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
+
+#define BNA_TO_POWER_OF_2(x) \
+do { \
+ int _shift = 0; \
+ while ((x) && (x) != 1) { \
+ (x) >>= 1; \
+ _shift++; \
+ } \
+ (x) <<= _shift; \
+} while (0)
+
+#define BNA_TO_POWER_OF_2_HIGH(x) \
+do { \
+ int n = 1; \
+ while (n < (x)) \
+ n <<= 1; \
+ (x) = n; \
+} while (0)
+
+/*
+ * input : _addr-> os dma addr in host endian format,
+ * output : _bna_dma_addr-> pointer to hw dma addr
+ */
+#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \
+do { \
+ u64 tmp_addr = \
+ cpu_to_be64((u64)(_addr)); \
+ (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
+ (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
+} while (0)
+
+/*
+ * input : _bna_dma_addr-> pointer to hw dma addr
+ * output : _addr-> os dma addr in host endian format
+ */
+#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \
+do { \
+ (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \
+ | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
+} while (0)
+
+#define containing_rec(addr, type, field) \
+ ((type *)((unsigned char *)(addr) - \
+ (unsigned char *)(&((type *)0)->field)))
+
+#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
+
+/* TxQ element is 64 bytes */
+#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
+#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
+
+#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
+ (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
+}
+
+/* RxQ element is 8 bytes */
+#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
+#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
+
+#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> \
+ BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
+ (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
+}
+
+/* CQ element is 16 bytes */
+#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
+#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
+
+#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ \
+ page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> \
+ BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
+ (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
+}
+
+#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
+ (&((_cast *)(_q_base))[(_qe_idx)])
+
+#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
+
+#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
+ ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
+
+#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
+ (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
+ (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
+ ((_q_depth) - 1))
+
+#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
+ ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
+ (_q_depth - 1))
+
+#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
+
+#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
+
+#define BNA_Q_PI_ADD(_q_ptr, _num) \
+ (_q_ptr)->q.producer_index = \
+ (((_q_ptr)->q.producer_index + (_num)) & \
+ ((_q_ptr)->q.q_depth - 1))
+
+#define BNA_Q_CI_ADD(_q_ptr, _num) \
+ (_q_ptr)->q.consumer_index = \
+ (((_q_ptr)->q.consumer_index + (_num)) \
+ & ((_q_ptr)->q.q_depth - 1))
+
+#define BNA_Q_FREE_COUNT(_q_ptr) \
+ (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
+
+#define BNA_Q_IN_USE_COUNT(_q_ptr) \
+ (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
+
+/* These macros build the data portion of the TxQ/RxQ doorbell */
+#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
+#define BNA_DOORBELL_Q_STOP (0x40000000)
+
+/* These macros build the data portion of the IB doorbell */
+#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
+ (0x80000000 | ((_timeout) << 16) | (_events))
+#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
+
+/* Set the coalescing timer for the given ib */
+#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
+ ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0));
+
+/* Acks 'events' # of events for a given ib */
+#define bna_ib_ack(_i_dbell, _events) \
+ (writel(((_i_dbell)->doorbell_ack | (_events)), \
+ (_i_dbell)->doorbell_addr));
+
+#define bna_txq_prod_indx_doorbell(_tcb) \
+ (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
+ (_tcb)->q_dbell));
+
+#define bna_rxq_prod_indx_doorbell(_rcb) \
+ (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
+ (_rcb)->q_dbell));
+
+#define BNA_LARGE_PKT_SIZE 1000
+
+#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
+do { \
+ if ((_len) > BNA_LARGE_PKT_SIZE) { \
+ (_pkt)->large_pkt_cnt++; \
+ } else { \
+ (_pkt)->small_pkt_cnt++; \
+ } \
+} while (0)
+
+#define call_rxf_stop_cbfn(rxf, status) \
+ if ((rxf)->stop_cbfn) { \
+ (*(rxf)->stop_cbfn)((rxf)->stop_cbarg, (status)); \
+ (rxf)->stop_cbfn = NULL; \
+ (rxf)->stop_cbarg = NULL; \
+ }
+
+#define call_rxf_start_cbfn(rxf, status) \
+ if ((rxf)->start_cbfn) { \
+ (*(rxf)->start_cbfn)((rxf)->start_cbarg, (status)); \
+ (rxf)->start_cbfn = NULL; \
+ (rxf)->start_cbarg = NULL; \
+ }
+
+#define call_rxf_cam_fltr_cbfn(rxf, status) \
+ if ((rxf)->cam_fltr_cbfn) { \
+ (*(rxf)->cam_fltr_cbfn)((rxf)->cam_fltr_cbarg, rxf->rx, \
+ (status)); \
+ (rxf)->cam_fltr_cbfn = NULL; \
+ (rxf)->cam_fltr_cbarg = NULL; \
+ }
+
+#define call_rxf_pause_cbfn(rxf, status) \
+ if ((rxf)->oper_state_cbfn) { \
+ (*(rxf)->oper_state_cbfn)((rxf)->oper_state_cbarg, rxf->rx,\
+ (status)); \
+ (rxf)->rxf_flags &= ~BNA_RXF_FL_OPERSTATE_CHANGED; \
+ (rxf)->oper_state_cbfn = NULL; \
+ (rxf)->oper_state_cbarg = NULL; \
+ }
+
+#define call_rxf_resume_cbfn(rxf, status) call_rxf_pause_cbfn(rxf, status)
+
+#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
+
+#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
+
+#define xxx_enable(mode, bitmask, xxx) \
+do { \
+ bitmask |= xxx; \
+ mode |= xxx; \
+} while (0)
+
+#define xxx_disable(mode, bitmask, xxx) \
+do { \
+ bitmask |= xxx; \
+ mode &= ~xxx; \
+} while (0)
+
+#define xxx_inactive(mode, bitmask, xxx) \
+do { \
+ bitmask &= ~xxx; \
+ mode &= ~xxx; \
+} while (0)
+
+#define is_promisc_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define is_promisc_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define is_default_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define is_default_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define is_allmulti_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define is_allmulti_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define GET_RXQS(rxp, q0, q1) do { \
+ switch ((rxp)->type) { \
+ case BNA_RXP_SINGLE: \
+ (q0) = rxp->rxq.single.only; \
+ (q1) = NULL; \
+ break; \
+ case BNA_RXP_SLR: \
+ (q0) = rxp->rxq.slr.large; \
+ (q1) = rxp->rxq.slr.small; \
+ break; \
+ case BNA_RXP_HDS: \
+ (q0) = rxp->rxq.hds.data; \
+ (q1) = rxp->rxq.hds.hdr; \
+ break; \
+ } \
+} while (0)
+
+/**
+ *
+ * Function prototypes
+ *
+ */
+
+/**
+ * BNA
+ */
+
+/* APIs for BNAD */
+void bna_res_req(struct bna_res_info *res_info);
+void bna_init(struct bna *bna, struct bnad *bnad,
+ struct bfa_pcidev *pcidev,
+ struct bna_res_info *res_info);
+void bna_uninit(struct bna *bna);
+void bna_stats_get(struct bna *bna);
+void bna_get_perm_mac(struct bna *bna, u8 *mac);
+
+/* APIs for Rx */
+int bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size);
+
+/* APIs for RxF */
+struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
+void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
+ struct bna_mac *mac);
+struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
+void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
+ struct bna_mac *mac);
+struct bna_rit_segment *
+bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size);
+void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
+ struct bna_rit_segment *seg);
+
+/**
+ * DEVICE
+ */
+
+/* APIs for BNAD */
+void bna_device_enable(struct bna_device *device);
+void bna_device_disable(struct bna_device *device,
+ enum bna_cleanup_type type);
+
+/**
+ * MBOX
+ */
+
+/* APIs for PORT, TX, RX */
+void bna_mbox_handler(struct bna *bna, u32 intr_status);
+void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
+
+/**
+ * PORT
+ */
+
+/* API for RX */
+int bna_port_mtu_get(struct bna_port *port);
+void bna_llport_admin_up(struct bna_llport *llport);
+void bna_llport_admin_down(struct bna_llport *llport);
+
+/* API for BNAD */
+void bna_port_enable(struct bna_port *port);
+void bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
+ void (*cbfn)(void *, enum bna_cb_status));
+void bna_port_pause_config(struct bna_port *port,
+ struct bna_pause_config *pause_config,
+ void (*cbfn)(struct bnad *, enum bna_cb_status));
+void bna_port_mtu_set(struct bna_port *port, int mtu,
+ void (*cbfn)(struct bnad *, enum bna_cb_status));
+void bna_port_mac_get(struct bna_port *port, mac_t *mac);
+
+/* Callbacks for TX, RX */
+void bna_port_cb_tx_stopped(struct bna_port *port,
+ enum bna_cb_status status);
+void bna_port_cb_rx_stopped(struct bna_port *port,
+ enum bna_cb_status status);
+
+/**
+ * IB
+ */
+
+/* APIs for BNA */
+void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
+
+/**
+ * TX MODULE AND TX
+ */
+
+/* APIs for BNA */
+void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
+int bna_tx_state_get(struct bna_tx *tx);
+
+/* APIs for PORT */
+void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
+void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
+void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
+void bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio);
+void bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link);
+
+/* APIs for BNAD */
+void bna_tx_res_req(int num_txq, int txq_depth,
+ struct bna_res_info *res_info);
+struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_tx_config *tx_cfg,
+ struct bna_tx_event_cbfn *tx_cbfn,
+ struct bna_res_info *res_info, void *priv);
+void bna_tx_destroy(struct bna_tx *tx);
+void bna_tx_enable(struct bna_tx *tx);
+void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_tx *,
+ enum bna_cb_status));
+void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
+
+/**
+ * RX MODULE, RX, RXF
+ */
+
+/* Internal APIs */
+void rxf_cb_cam_fltr_mbox_cmd(void *arg, int status);
+void rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
+ const struct bna_mac *mac_addr);
+void __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status);
+void bna_rxf_adv_init(struct bna_rxf *rxf,
+ struct bna_rx *rx,
+ struct bna_rx_config *q_config);
+int rxf_process_packet_filter_ucast(struct bna_rxf *rxf);
+int rxf_process_packet_filter_promisc(struct bna_rxf *rxf);
+int rxf_process_packet_filter_default(struct bna_rxf *rxf);
+int rxf_process_packet_filter_allmulti(struct bna_rxf *rxf);
+int rxf_clear_packet_filter_ucast(struct bna_rxf *rxf);
+int rxf_clear_packet_filter_promisc(struct bna_rxf *rxf);
+int rxf_clear_packet_filter_default(struct bna_rxf *rxf);
+int rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf);
+void rxf_reset_packet_filter_ucast(struct bna_rxf *rxf);
+void rxf_reset_packet_filter_promisc(struct bna_rxf *rxf);
+void rxf_reset_packet_filter_default(struct bna_rxf *rxf);
+void rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf);
+
+/* APIs for BNA */
+void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
+int bna_rx_state_get(struct bna_rx *rx);
+int bna_rxf_state_get(struct bna_rxf *rxf);
+
+/* APIs for PORT */
+void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
+void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
+void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
+
+/* APIs for BNAD */
+void bna_rx_res_req(struct bna_rx_config *rx_config,
+ struct bna_res_info *res_info);
+struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_rx_config *rx_cfg,
+ struct bna_rx_event_cbfn *rx_cbfn,
+ struct bna_res_info *res_info, void *priv);
+void bna_rx_destroy(struct bna_rx *rx);
+void bna_rx_enable(struct bna_rx *rx);
+void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
+void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
+void bna_rx_dim_update(struct bna_ccb *ccb);
+enum bna_cb_status
+bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
+ enum bna_rxmode bitmask,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
+void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
+void bna_rx_vlanfilter_enable(struct bna_rx *rx);
+void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_hds_disable(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+
+/**
+ * BNAD
+ */
+
+/* Callbacks for BNA */
+void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
+ struct bna_stats *stats);
+
+/* Callbacks for DEVICE */
+void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
+void bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status);
+void bnad_cb_device_enable_mbox_intr(struct bnad *bnad);
+void bnad_cb_device_disable_mbox_intr(struct bnad *bnad);
+
+/* Callbacks for port */
+void bnad_cb_port_link_status(struct bnad *bnad,
+ enum bna_link_status status);
+
+#endif /* __BNA_H__ */
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
new file mode 100644
index 00000000000..07b26598546
--- /dev/null
+++ b/drivers/net/bna/bna_ctrl.c
@@ -0,0 +1,3261 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include "bna.h"
+#include "bfa_sm.h"
+#include "bfa_wc.h"
+
+static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
+
+static void
+bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
+ int status)
+{
+ int i;
+ u8 prio_map;
+
+ port->llport.link_status = BNA_LINK_UP;
+ if (aen->cee_linkup)
+ port->llport.link_status = BNA_CEE_UP;
+
+ /* Compute the priority */
+ prio_map = aen->prio_map;
+ if (prio_map) {
+ for (i = 0; i < 8; i++) {
+ if ((prio_map >> i) & 0x1)
+ break;
+ }
+ port->priority = i;
+ } else
+ port->priority = 0;
+
+ /* Dispatch events */
+ bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
+ bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
+ port->link_cbfn(port->bna->bnad, port->llport.link_status);
+}
+
+static void
+bna_port_cb_link_down(struct bna_port *port, int status)
+{
+ port->llport.link_status = BNA_LINK_DOWN;
+
+ /* Dispatch events */
+ bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
+ port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
+}
+
+/**
+ * MBOX
+ */
+static int
+bna_is_aen(u8 msg_id)
+{
+ return msg_id == BFI_LL_I2H_LINK_DOWN_AEN ||
+ msg_id == BFI_LL_I2H_LINK_UP_AEN;
+}
+
+static void
+bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
+{
+ struct bfi_ll_aen *aen = (struct bfi_ll_aen *)(msg);
+
+ switch (aen->mh.msg_id) {
+ case BFI_LL_I2H_LINK_UP_AEN:
+ bna_port_cb_link_up(&bna->port, aen, aen->reason);
+ break;
+ case BFI_LL_I2H_LINK_DOWN_AEN:
+ bna_port_cb_link_down(&bna->port, aen->reason);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
+{
+ struct bna *bna = (struct bna *)(llarg);
+ struct bfi_ll_rsp *mb_rsp = (struct bfi_ll_rsp *)(msg);
+ struct bfi_mhdr *cmd_h, *rsp_h;
+ struct bna_mbox_qe *mb_qe = NULL;
+ int to_post = 0;
+ u8 aen = 0;
+ char message[BNA_MESSAGE_SIZE];
+
+ aen = bna_is_aen(mb_rsp->mh.msg_id);
+
+ if (!aen) {
+ mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
+ cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
+ rsp_h = (struct bfi_mhdr *)(&mb_rsp->mh);
+
+ if ((BFA_I2HM(cmd_h->msg_id) == rsp_h->msg_id) &&
+ (cmd_h->mtag.i2htok == rsp_h->mtag.i2htok)) {
+ /* Remove the request from posted_q, update state */
+ list_del(&mb_qe->qe);
+ bna->mbox_mod.msg_pending--;
+ if (list_empty(&bna->mbox_mod.posted_q))
+ bna->mbox_mod.state = BNA_MBOX_FREE;
+ else
+ to_post = 1;
+
+ /* Dispatch the cbfn */
+ if (mb_qe->cbfn)
+ mb_qe->cbfn(mb_qe->cbarg, mb_rsp->error);
+
+ /* Post the next entry, if needed */
+ if (to_post) {
+ mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
+ bfa_nw_ioc_mbox_queue(&bna->device.ioc,
+ &mb_qe->cmd);
+ }
+ } else {
+ snprintf(message, BNA_MESSAGE_SIZE,
+ "No matching rsp for [%d:%d:%d]\n",
+ mb_rsp->mh.msg_class, mb_rsp->mh.msg_id,
+ mb_rsp->mh.mtag.i2htok);
+ pr_info("%s", message);
+ }
+
+ } else
+ bna_mbox_aen_callback(bna, msg);
+}
+
+static void
+bna_err_handler(struct bna *bna, u32 intr_status)
+{
+ u32 init_halt;
+
+ if (intr_status & __HALT_STATUS_BITS) {
+ init_halt = readl(bna->device.ioc.ioc_regs.ll_halt);
+ init_halt &= ~__FW_INIT_HALT_P;
+ writel(init_halt, bna->device.ioc.ioc_regs.ll_halt);
+ }
+
+ bfa_nw_ioc_error_isr(&bna->device.ioc);
+}
+
+void
+bna_mbox_handler(struct bna *bna, u32 intr_status)
+{
+ if (BNA_IS_ERR_INTR(intr_status)) {
+ bna_err_handler(bna, intr_status);
+ return;
+ }
+ if (BNA_IS_MBOX_INTR(intr_status))
+ bfa_nw_ioc_mbox_isr(&bna->device.ioc);
+}
+
+void
+bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
+{
+ struct bfi_mhdr *mh;
+
+ mh = (struct bfi_mhdr *)(&mbox_qe->cmd.msg[0]);
+
+ mh->mtag.i2htok = htons(bna->mbox_mod.msg_ctr);
+ bna->mbox_mod.msg_ctr++;
+ bna->mbox_mod.msg_pending++;
+ if (bna->mbox_mod.state == BNA_MBOX_FREE) {
+ list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
+ bfa_nw_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
+ bna->mbox_mod.state = BNA_MBOX_POSTED;
+ } else {
+ list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
+ }
+}
+
+static void
+bna_mbox_flush_q(struct bna *bna, struct list_head *q)
+{
+ struct bna_mbox_qe *mb_qe = NULL;
+ struct bfi_mhdr *cmd_h;
+ struct list_head *mb_q;
+ void (*cbfn)(void *arg, int status);
+ void *cbarg;
+
+ mb_q = &bna->mbox_mod.posted_q;
+
+ while (!list_empty(mb_q)) {
+ bfa_q_deq(mb_q, &mb_qe);
+ cbfn = mb_qe->cbfn;
+ cbarg = mb_qe->cbarg;
+ bfa_q_qe_init(mb_qe);
+ bna->mbox_mod.msg_pending--;
+
+ cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
+ if (cbfn)
+ cbfn(cbarg, BNA_CB_NOT_EXEC);
+ }
+
+ bna->mbox_mod.state = BNA_MBOX_FREE;
+}
+
+static void
+bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
+{
+}
+
+static void
+bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
+{
+ bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
+}
+
+static void
+bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
+{
+ bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
+ mbox_mod->state = BNA_MBOX_FREE;
+ mbox_mod->msg_ctr = mbox_mod->msg_pending = 0;
+ INIT_LIST_HEAD(&mbox_mod->posted_q);
+ mbox_mod->bna = bna;
+}
+
+static void
+bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
+{
+ mbox_mod->bna = NULL;
+}
+
+/**
+ * LLPORT
+ */
+#define call_llport_stop_cbfn(llport, status)\
+do {\
+ if ((llport)->stop_cbfn)\
+ (llport)->stop_cbfn(&(llport)->bna->port, status);\
+ (llport)->stop_cbfn = NULL;\
+} while (0)
+
+static void bna_fw_llport_up(struct bna_llport *llport);
+static void bna_fw_cb_llport_up(void *arg, int status);
+static void bna_fw_llport_down(struct bna_llport *llport);
+static void bna_fw_cb_llport_down(void *arg, int status);
+static void bna_llport_start(struct bna_llport *llport);
+static void bna_llport_stop(struct bna_llport *llport);
+static void bna_llport_fail(struct bna_llport *llport);
+
+enum bna_llport_event {
+ LLPORT_E_START = 1,
+ LLPORT_E_STOP = 2,
+ LLPORT_E_FAIL = 3,
+ LLPORT_E_UP = 4,
+ LLPORT_E_DOWN = 5,
+ LLPORT_E_FWRESP_UP = 6,
+ LLPORT_E_FWRESP_DOWN = 7
+};
+
+enum bna_llport_state {
+ BNA_LLPORT_STOPPED = 1,
+ BNA_LLPORT_DOWN = 2,
+ BNA_LLPORT_UP_RESP_WAIT = 3,
+ BNA_LLPORT_DOWN_RESP_WAIT = 4,
+ BNA_LLPORT_UP = 5,
+ BNA_LLPORT_LAST_RESP_WAIT = 6
+};
+
+bfa_fsm_state_decl(bna_llport, stopped, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, down, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, up_resp_wait, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, down_resp_wait, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, up, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, last_resp_wait, struct bna_llport,
+ enum bna_llport_event);
+
+static struct bfa_sm_table llport_sm_table[] = {
+ {BFA_SM(bna_llport_sm_stopped), BNA_LLPORT_STOPPED},
+ {BFA_SM(bna_llport_sm_down), BNA_LLPORT_DOWN},
+ {BFA_SM(bna_llport_sm_up_resp_wait), BNA_LLPORT_UP_RESP_WAIT},
+ {BFA_SM(bna_llport_sm_down_resp_wait), BNA_LLPORT_DOWN_RESP_WAIT},
+ {BFA_SM(bna_llport_sm_up), BNA_LLPORT_UP},
+ {BFA_SM(bna_llport_sm_last_resp_wait), BNA_LLPORT_LAST_RESP_WAIT}
+};
+
+static void
+bna_llport_sm_stopped_entry(struct bna_llport *llport)
+{
+ llport->bna->port.link_cbfn((llport)->bna->bnad, BNA_LINK_DOWN);
+ call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
+}
+
+static void
+bna_llport_sm_stopped(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_START:
+ bfa_fsm_set_state(llport, bna_llport_sm_down);
+ break;
+
+ case LLPORT_E_STOP:
+ call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
+ break;
+
+ case LLPORT_E_FAIL:
+ break;
+
+ case LLPORT_E_DOWN:
+ /* This event is received due to Rx objects failing */
+ /* No-op */
+ break;
+
+ case LLPORT_E_FWRESP_UP:
+ case LLPORT_E_FWRESP_DOWN:
+ /**
+ * These events are received due to flushing of mbox when
+ * device fails
+ */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_down_entry(struct bna_llport *llport)
+{
+ bnad_cb_port_link_status((llport)->bna->bnad, BNA_LINK_DOWN);
+}
+
+static void
+bna_llport_sm_down(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_STOP:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_UP:
+ bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
+ bna_fw_llport_up(llport);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
+{
+ /**
+ * NOTE: Do not call bna_fw_llport_up() here. That will over step
+ * mbox due to down_resp_wait -> up_resp_wait transition on event
+ * LLPORT_E_UP
+ */
+}
+
+static void
+bna_llport_sm_up_resp_wait(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_STOP:
+ bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
+ break;
+
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_DOWN:
+ bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
+ break;
+
+ case LLPORT_E_FWRESP_UP:
+ bfa_fsm_set_state(llport, bna_llport_sm_up);
+ break;
+
+ case LLPORT_E_FWRESP_DOWN:
+ /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
+ bna_fw_llport_up(llport);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_down_resp_wait_entry(struct bna_llport *llport)
+{
+ /**
+ * NOTE: Do not call bna_fw_llport_down() here. That will over step
+ * mbox due to up_resp_wait -> down_resp_wait transition on event
+ * LLPORT_E_DOWN
+ */
+}
+
+static void
+bna_llport_sm_down_resp_wait(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_STOP:
+ bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
+ break;
+
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_UP:
+ bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
+ break;
+
+ case LLPORT_E_FWRESP_UP:
+ /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
+ bna_fw_llport_down(llport);
+ break;
+
+ case LLPORT_E_FWRESP_DOWN:
+ bfa_fsm_set_state(llport, bna_llport_sm_down);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_up_entry(struct bna_llport *llport)
+{
+}
+
+static void
+bna_llport_sm_up(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_STOP:
+ bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
+ bna_fw_llport_down(llport);
+ break;
+
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_DOWN:
+ bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
+ bna_fw_llport_down(llport);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_last_resp_wait_entry(struct bna_llport *llport)
+{
+}
+
+static void
+bna_llport_sm_last_resp_wait(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_DOWN:
+ /**
+ * This event is received due to Rx objects stopping in
+ * parallel to llport
+ */
+ /* No-op */
+ break;
+
+ case LLPORT_E_FWRESP_UP:
+ /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
+ bna_fw_llport_down(llport);
+ break;
+
+ case LLPORT_E_FWRESP_DOWN:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_fw_llport_admin_up(struct bna_llport *llport)
+{
+ struct bfi_ll_port_admin_req ll_req;
+
+ memset(&ll_req, 0, sizeof(ll_req));
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
+ ll_req.mh.mtag.h2i.lpu_id = 0;
+
+ ll_req.up = BNA_STATUS_T_ENABLED;
+
+ bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_llport_up, llport);
+
+ bna_mbox_send(llport->bna, &llport->mbox_qe);
+}
+
+static void
+bna_fw_llport_up(struct bna_llport *llport)
+{
+ if (llport->type == BNA_PORT_T_REGULAR)
+ bna_fw_llport_admin_up(llport);
+}
+
+static void
+bna_fw_cb_llport_up(void *arg, int status)
+{
+ struct bna_llport *llport = (struct bna_llport *)arg;
+
+ bfa_q_qe_init(&llport->mbox_qe.qe);
+ bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP);
+}
+
+static void
+bna_fw_llport_admin_down(struct bna_llport *llport)
+{
+ struct bfi_ll_port_admin_req ll_req;
+
+ memset(&ll_req, 0, sizeof(ll_req));
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
+ ll_req.mh.mtag.h2i.lpu_id = 0;
+
+ ll_req.up = BNA_STATUS_T_DISABLED;
+
+ bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_llport_down, llport);
+
+ bna_mbox_send(llport->bna, &llport->mbox_qe);
+}
+
+static void
+bna_fw_llport_down(struct bna_llport *llport)
+{
+ if (llport->type == BNA_PORT_T_REGULAR)
+ bna_fw_llport_admin_down(llport);
+}
+
+static void
+bna_fw_cb_llport_down(void *arg, int status)
+{
+ struct bna_llport *llport = (struct bna_llport *)arg;
+
+ bfa_q_qe_init(&llport->mbox_qe.qe);
+ bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
+}
+
+static void
+bna_port_cb_llport_stopped(struct bna_port *port,
+ enum bna_cb_status status)
+{
+ bfa_wc_down(&port->chld_stop_wc);
+}
+
+static void
+bna_llport_init(struct bna_llport *llport, struct bna *bna)
+{
+ llport->flags |= BNA_LLPORT_F_ENABLED;
+ llport->type = BNA_PORT_T_REGULAR;
+ llport->bna = bna;
+
+ llport->link_status = BNA_LINK_DOWN;
+
+ llport->admin_up_count = 0;
+
+ llport->stop_cbfn = NULL;
+
+ bfa_q_qe_init(&llport->mbox_qe.qe);
+
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+}
+
+static void
+bna_llport_uninit(struct bna_llport *llport)
+{
+ llport->flags &= ~BNA_LLPORT_F_ENABLED;
+
+ llport->bna = NULL;
+}
+
+static void
+bna_llport_start(struct bna_llport *llport)
+{
+ bfa_fsm_send_event(llport, LLPORT_E_START);
+}
+
+static void
+bna_llport_stop(struct bna_llport *llport)
+{
+ llport->stop_cbfn = bna_port_cb_llport_stopped;
+
+ bfa_fsm_send_event(llport, LLPORT_E_STOP);
+}
+
+static void
+bna_llport_fail(struct bna_llport *llport)
+{
+ bfa_fsm_send_event(llport, LLPORT_E_FAIL);
+}
+
+static int
+bna_llport_state_get(struct bna_llport *llport)
+{
+ return bfa_sm_to_state(llport_sm_table, llport->fsm);
+}
+
+void
+bna_llport_admin_up(struct bna_llport *llport)
+{
+ llport->admin_up_count++;
+
+ if (llport->admin_up_count == 1) {
+ llport->flags |= BNA_LLPORT_F_RX_ENABLED;
+ if (llport->flags & BNA_LLPORT_F_ENABLED)
+ bfa_fsm_send_event(llport, LLPORT_E_UP);
+ }
+}
+
+void
+bna_llport_admin_down(struct bna_llport *llport)
+{
+ llport->admin_up_count--;
+
+ if (llport->admin_up_count == 0) {
+ llport->flags &= ~BNA_LLPORT_F_RX_ENABLED;
+ if (llport->flags & BNA_LLPORT_F_ENABLED)
+ bfa_fsm_send_event(llport, LLPORT_E_DOWN);
+ }
+}
+
+/**
+ * PORT
+ */
+#define bna_port_chld_start(port)\
+do {\
+ enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
+ enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
+ bna_llport_start(&(port)->llport);\
+ bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
+ bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
+} while (0)
+
+#define bna_port_chld_stop(port)\
+do {\
+ enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
+ enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
+ bfa_wc_up(&(port)->chld_stop_wc);\
+ bfa_wc_up(&(port)->chld_stop_wc);\
+ bfa_wc_up(&(port)->chld_stop_wc);\
+ bna_llport_stop(&(port)->llport);\
+ bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
+ bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
+} while (0)
+
+#define bna_port_chld_fail(port)\
+do {\
+ bna_llport_fail(&(port)->llport);\
+ bna_tx_mod_fail(&(port)->bna->tx_mod);\
+ bna_rx_mod_fail(&(port)->bna->rx_mod);\
+} while (0)
+
+#define bna_port_rx_start(port)\
+do {\
+ enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
+ bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
+} while (0)
+
+#define bna_port_rx_stop(port)\
+do {\
+ enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
+ bfa_wc_up(&(port)->chld_stop_wc);\
+ bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
+} while (0)
+
+#define call_port_stop_cbfn(port, status)\
+do {\
+ if ((port)->stop_cbfn)\
+ (port)->stop_cbfn((port)->stop_cbarg, status);\
+ (port)->stop_cbfn = NULL;\
+ (port)->stop_cbarg = NULL;\
+} while (0)
+
+#define call_port_pause_cbfn(port, status)\
+do {\
+ if ((port)->pause_cbfn)\
+ (port)->pause_cbfn((port)->bna->bnad, status);\
+ (port)->pause_cbfn = NULL;\
+} while (0)
+
+#define call_port_mtu_cbfn(port, status)\
+do {\
+ if ((port)->mtu_cbfn)\
+ (port)->mtu_cbfn((port)->bna->bnad, status);\
+ (port)->mtu_cbfn = NULL;\
+} while (0)
+
+static void bna_fw_pause_set(struct bna_port *port);
+static void bna_fw_cb_pause_set(void *arg, int status);
+static void bna_fw_mtu_set(struct bna_port *port);
+static void bna_fw_cb_mtu_set(void *arg, int status);
+
+enum bna_port_event {
+ PORT_E_START = 1,
+ PORT_E_STOP = 2,
+ PORT_E_FAIL = 3,
+ PORT_E_PAUSE_CFG = 4,
+ PORT_E_MTU_CFG = 5,
+ PORT_E_CHLD_STOPPED = 6,
+ PORT_E_FWRESP_PAUSE = 7,
+ PORT_E_FWRESP_MTU = 8
+};
+
+enum bna_port_state {
+ BNA_PORT_STOPPED = 1,
+ BNA_PORT_MTU_INIT_WAIT = 2,
+ BNA_PORT_PAUSE_INIT_WAIT = 3,
+ BNA_PORT_LAST_RESP_WAIT = 4,
+ BNA_PORT_STARTED = 5,
+ BNA_PORT_PAUSE_CFG_WAIT = 6,
+ BNA_PORT_RX_STOP_WAIT = 7,
+ BNA_PORT_MTU_CFG_WAIT = 8,
+ BNA_PORT_CHLD_STOP_WAIT = 9
+};
+
+bfa_fsm_state_decl(bna_port, stopped, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, mtu_init_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, pause_init_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, last_resp_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, started, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, pause_cfg_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, rx_stop_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, mtu_cfg_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, chld_stop_wait, struct bna_port,
+ enum bna_port_event);
+
+static struct bfa_sm_table port_sm_table[] = {
+ {BFA_SM(bna_port_sm_stopped), BNA_PORT_STOPPED},
+ {BFA_SM(bna_port_sm_mtu_init_wait), BNA_PORT_MTU_INIT_WAIT},
+ {BFA_SM(bna_port_sm_pause_init_wait), BNA_PORT_PAUSE_INIT_WAIT},
+ {BFA_SM(bna_port_sm_last_resp_wait), BNA_PORT_LAST_RESP_WAIT},
+ {BFA_SM(bna_port_sm_started), BNA_PORT_STARTED},
+ {BFA_SM(bna_port_sm_pause_cfg_wait), BNA_PORT_PAUSE_CFG_WAIT},
+ {BFA_SM(bna_port_sm_rx_stop_wait), BNA_PORT_RX_STOP_WAIT},
+ {BFA_SM(bna_port_sm_mtu_cfg_wait), BNA_PORT_MTU_CFG_WAIT},
+ {BFA_SM(bna_port_sm_chld_stop_wait), BNA_PORT_CHLD_STOP_WAIT}
+};
+
+static void
+bna_port_sm_stopped_entry(struct bna_port *port)
+{
+ call_port_pause_cbfn(port, BNA_CB_SUCCESS);
+ call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
+ call_port_stop_cbfn(port, BNA_CB_SUCCESS);
+}
+
+static void
+bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_START:
+ bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
+ break;
+
+ case PORT_E_STOP:
+ call_port_stop_cbfn(port, BNA_CB_SUCCESS);
+ break;
+
+ case PORT_E_FAIL:
+ /* No-op */
+ break;
+
+ case PORT_E_PAUSE_CFG:
+ call_port_pause_cbfn(port, BNA_CB_SUCCESS);
+ break;
+
+ case PORT_E_MTU_CFG:
+ call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
+ break;
+
+ case PORT_E_CHLD_STOPPED:
+ /**
+ * This event is received due to LLPort, Tx and Rx objects
+ * failing
+ */
+ /* No-op */
+ break;
+
+ case PORT_E_FWRESP_PAUSE:
+ case PORT_E_FWRESP_MTU:
+ /**
+ * These events are received due to flushing of mbox when
+ * device fails
+ */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_mtu_init_wait_entry(struct bna_port *port)
+{
+ bna_fw_mtu_set(port);
+}
+
+static void
+bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_STOP:
+ bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
+ break;
+
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ break;
+
+ case PORT_E_PAUSE_CFG:
+ /* No-op */
+ break;
+
+ case PORT_E_MTU_CFG:
+ port->flags |= BNA_PORT_F_MTU_CHANGED;
+ break;
+
+ case PORT_E_FWRESP_MTU:
+ if (port->flags & BNA_PORT_F_MTU_CHANGED) {
+ port->flags &= ~BNA_PORT_F_MTU_CHANGED;
+ bna_fw_mtu_set(port);
+ } else {
+ bfa_fsm_set_state(port, bna_port_sm_pause_init_wait);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_pause_init_wait_entry(struct bna_port *port)
+{
+ bna_fw_pause_set(port);
+}
+
+static void
+bna_port_sm_pause_init_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_STOP:
+ bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
+ break;
+
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ break;
+
+ case PORT_E_PAUSE_CFG:
+ port->flags |= BNA_PORT_F_PAUSE_CHANGED;
+ break;
+
+ case PORT_E_MTU_CFG:
+ port->flags |= BNA_PORT_F_MTU_CHANGED;
+ break;
+
+ case PORT_E_FWRESP_PAUSE:
+ if (port->flags & BNA_PORT_F_PAUSE_CHANGED) {
+ port->flags &= ~BNA_PORT_F_PAUSE_CHANGED;
+ bna_fw_pause_set(port);
+ } else if (port->flags & BNA_PORT_F_MTU_CHANGED) {
+ port->flags &= ~BNA_PORT_F_MTU_CHANGED;
+ bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
+ } else {
+ bfa_fsm_set_state(port, bna_port_sm_started);
+ bna_port_chld_start(port);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_last_resp_wait_entry(struct bna_port *port)
+{
+}
+
+static void
+bna_port_sm_last_resp_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ case PORT_E_FWRESP_PAUSE:
+ case PORT_E_FWRESP_MTU:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_started_entry(struct bna_port *port)
+{
+ /**
+ * NOTE: Do not call bna_port_chld_start() here, since it will be
+ * inadvertently called during pause_cfg_wait->started transition
+ * as well
+ */
+ call_port_pause_cbfn(port, BNA_CB_SUCCESS);
+ call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
+}
+
+static void
+bna_port_sm_started(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_STOP:
+ bfa_fsm_set_state(port, bna_port_sm_chld_stop_wait);
+ break;
+
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_PAUSE_CFG:
+ bfa_fsm_set_state(port, bna_port_sm_pause_cfg_wait);
+ break;
+
+ case PORT_E_MTU_CFG:
+ bfa_fsm_set_state(port, bna_port_sm_rx_stop_wait);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_pause_cfg_wait_entry(struct bna_port *port)
+{
+ bna_fw_pause_set(port);
+}
+
+static void
+bna_port_sm_pause_cfg_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_FWRESP_PAUSE:
+ bfa_fsm_set_state(port, bna_port_sm_started);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_rx_stop_wait_entry(struct bna_port *port)
+{
+ bna_port_rx_stop(port);
+}
+
+static void
+bna_port_sm_rx_stop_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_CHLD_STOPPED:
+ bfa_fsm_set_state(port, bna_port_sm_mtu_cfg_wait);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_mtu_cfg_wait_entry(struct bna_port *port)
+{
+ bna_fw_mtu_set(port);
+}
+
+static void
+bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_FWRESP_MTU:
+ bfa_fsm_set_state(port, bna_port_sm_started);
+ bna_port_rx_start(port);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_chld_stop_wait_entry(struct bna_port *port)
+{
+ bna_port_chld_stop(port);
+}
+
+static void
+bna_port_sm_chld_stop_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_CHLD_STOPPED:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_fw_pause_set(struct bna_port *port)
+{
+ struct bfi_ll_set_pause_req ll_req;
+
+ memset(&ll_req, 0, sizeof(ll_req));
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_SET_PAUSE_REQ;
+ ll_req.mh.mtag.h2i.lpu_id = 0;
+
+ ll_req.tx_pause = port->pause_config.tx_pause;
+ ll_req.rx_pause = port->pause_config.rx_pause;
+
+ bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_pause_set, port);
+
+ bna_mbox_send(port->bna, &port->mbox_qe);
+}
+
+static void
+bna_fw_cb_pause_set(void *arg, int status)
+{
+ struct bna_port *port = (struct bna_port *)arg;
+
+ bfa_q_qe_init(&port->mbox_qe.qe);
+ bfa_fsm_send_event(port, PORT_E_FWRESP_PAUSE);
+}
+
+void
+bna_fw_mtu_set(struct bna_port *port)
+{
+ struct bfi_ll_mtu_info_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_MTU_INFO_REQ, 0);
+ ll_req.mtu = htons((u16)port->mtu);
+
+ bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_mtu_set, port);
+ bna_mbox_send(port->bna, &port->mbox_qe);
+}
+
+void
+bna_fw_cb_mtu_set(void *arg, int status)
+{
+ struct bna_port *port = (struct bna_port *)arg;
+
+ bfa_q_qe_init(&port->mbox_qe.qe);
+ bfa_fsm_send_event(port, PORT_E_FWRESP_MTU);
+}
+
+static void
+bna_port_cb_chld_stopped(void *arg)
+{
+ struct bna_port *port = (struct bna_port *)arg;
+
+ bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
+}
+
+static void
+bna_port_init(struct bna_port *port, struct bna *bna)
+{
+ port->bna = bna;
+ port->flags = 0;
+ port->mtu = 0;
+ port->type = BNA_PORT_T_REGULAR;
+
+ port->link_cbfn = bnad_cb_port_link_status;
+
+ port->chld_stop_wc.wc_resume = bna_port_cb_chld_stopped;
+ port->chld_stop_wc.wc_cbarg = port;
+ port->chld_stop_wc.wc_count = 0;
+
+ port->stop_cbfn = NULL;
+ port->stop_cbarg = NULL;
+
+ port->pause_cbfn = NULL;
+
+ port->mtu_cbfn = NULL;
+
+ bfa_q_qe_init(&port->mbox_qe.qe);
+
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+
+ bna_llport_init(&port->llport, bna);
+}
+
+static void
+bna_port_uninit(struct bna_port *port)
+{
+ bna_llport_uninit(&port->llport);
+
+ port->flags = 0;
+
+ port->bna = NULL;
+}
+
+static int
+bna_port_state_get(struct bna_port *port)
+{
+ return bfa_sm_to_state(port_sm_table, port->fsm);
+}
+
+static void
+bna_port_start(struct bna_port *port)
+{
+ port->flags |= BNA_PORT_F_DEVICE_READY;
+ if (port->flags & BNA_PORT_F_ENABLED)
+ bfa_fsm_send_event(port, PORT_E_START);
+}
+
+static void
+bna_port_stop(struct bna_port *port)
+{
+ port->stop_cbfn = bna_device_cb_port_stopped;
+ port->stop_cbarg = &port->bna->device;
+
+ port->flags &= ~BNA_PORT_F_DEVICE_READY;
+ bfa_fsm_send_event(port, PORT_E_STOP);
+}
+
+static void
+bna_port_fail(struct bna_port *port)
+{
+ port->flags &= ~BNA_PORT_F_DEVICE_READY;
+ bfa_fsm_send_event(port, PORT_E_FAIL);
+}
+
+void
+bna_port_cb_tx_stopped(struct bna_port *port, enum bna_cb_status status)
+{
+ bfa_wc_down(&port->chld_stop_wc);
+}
+
+void
+bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
+{
+ bfa_wc_down(&port->chld_stop_wc);
+}
+
+int
+bna_port_mtu_get(struct bna_port *port)
+{
+ return port->mtu;
+}
+
+void
+bna_port_enable(struct bna_port *port)
+{
+ if (port->fsm != (bfa_sm_t)bna_port_sm_stopped)
+ return;
+
+ port->flags |= BNA_PORT_F_ENABLED;
+
+ if (port->flags & BNA_PORT_F_DEVICE_READY)
+ bfa_fsm_send_event(port, PORT_E_START);
+}
+
+void
+bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
+ void (*cbfn)(void *, enum bna_cb_status))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ (*cbfn)(port->bna->bnad, BNA_CB_SUCCESS);
+ return;
+ }
+
+ port->stop_cbfn = cbfn;
+ port->stop_cbarg = port->bna->bnad;
+
+ port->flags &= ~BNA_PORT_F_ENABLED;
+
+ bfa_fsm_send_event(port, PORT_E_STOP);
+}
+
+void
+bna_port_pause_config(struct bna_port *port,
+ struct bna_pause_config *pause_config,
+ void (*cbfn)(struct bnad *, enum bna_cb_status))
+{
+ port->pause_config = *pause_config;
+
+ port->pause_cbfn = cbfn;
+
+ bfa_fsm_send_event(port, PORT_E_PAUSE_CFG);
+}
+
+void
+bna_port_mtu_set(struct bna_port *port, int mtu,
+ void (*cbfn)(struct bnad *, enum bna_cb_status))
+{
+ port->mtu = mtu;
+
+ port->mtu_cbfn = cbfn;
+
+ bfa_fsm_send_event(port, PORT_E_MTU_CFG);
+}
+
+void
+bna_port_mac_get(struct bna_port *port, mac_t *mac)
+{
+ *mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc);
+}
+
+/**
+ * DEVICE
+ */
+#define enable_mbox_intr(_device)\
+do {\
+ u32 intr_status;\
+ bna_intr_status_get((_device)->bna, intr_status);\
+ bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
+ bna_mbox_intr_enable((_device)->bna);\
+} while (0)
+
+#define disable_mbox_intr(_device)\
+do {\
+ bna_mbox_intr_disable((_device)->bna);\
+ bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
+} while (0)
+
+static const struct bna_chip_regs_offset reg_offset[] =
+{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
+ HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
+{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
+ HOSTFN1_INT_MASK, HOST_MSIX_ERR_INDEX_FN1},
+{HOST_PAGE_NUM_FN2, HOSTFN2_INT_STATUS,
+ HOSTFN2_INT_MASK, HOST_MSIX_ERR_INDEX_FN2},
+{HOST_PAGE_NUM_FN3, HOSTFN3_INT_STATUS,
+ HOSTFN3_INT_MASK, HOST_MSIX_ERR_INDEX_FN3},
+};
+
+enum bna_device_event {
+ DEVICE_E_ENABLE = 1,
+ DEVICE_E_DISABLE = 2,
+ DEVICE_E_IOC_READY = 3,
+ DEVICE_E_IOC_FAILED = 4,
+ DEVICE_E_IOC_DISABLED = 5,
+ DEVICE_E_IOC_RESET = 6,
+ DEVICE_E_PORT_STOPPED = 7,
+};
+
+enum bna_device_state {
+ BNA_DEVICE_STOPPED = 1,
+ BNA_DEVICE_IOC_READY_WAIT = 2,
+ BNA_DEVICE_READY = 3,
+ BNA_DEVICE_PORT_STOP_WAIT = 4,
+ BNA_DEVICE_IOC_DISABLE_WAIT = 5,
+ BNA_DEVICE_FAILED = 6
+};
+
+bfa_fsm_state_decl(bna_device, stopped, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, ioc_ready_wait, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, ready, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, port_stop_wait, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, ioc_disable_wait, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, failed, struct bna_device,
+ enum bna_device_event);
+
+static struct bfa_sm_table device_sm_table[] = {
+ {BFA_SM(bna_device_sm_stopped), BNA_DEVICE_STOPPED},
+ {BFA_SM(bna_device_sm_ioc_ready_wait), BNA_DEVICE_IOC_READY_WAIT},
+ {BFA_SM(bna_device_sm_ready), BNA_DEVICE_READY},
+ {BFA_SM(bna_device_sm_port_stop_wait), BNA_DEVICE_PORT_STOP_WAIT},
+ {BFA_SM(bna_device_sm_ioc_disable_wait), BNA_DEVICE_IOC_DISABLE_WAIT},
+ {BFA_SM(bna_device_sm_failed), BNA_DEVICE_FAILED},
+};
+
+static void
+bna_device_sm_stopped_entry(struct bna_device *device)
+{
+ if (device->stop_cbfn)
+ device->stop_cbfn(device->stop_cbarg, BNA_CB_SUCCESS);
+
+ device->stop_cbfn = NULL;
+ device->stop_cbarg = NULL;
+}
+
+static void
+bna_device_sm_stopped(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_ENABLE:
+ if (device->intr_type == BNA_INTR_T_MSIX)
+ bna_mbox_msix_idx_set(device);
+ bfa_nw_ioc_enable(&device->ioc);
+ bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
+ break;
+
+ case DEVICE_E_DISABLE:
+ bfa_fsm_set_state(device, bna_device_sm_stopped);
+ break;
+
+ case DEVICE_E_IOC_RESET:
+ enable_mbox_intr(device);
+ break;
+
+ case DEVICE_E_IOC_FAILED:
+ bfa_fsm_set_state(device, bna_device_sm_failed);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_ioc_ready_wait_entry(struct bna_device *device)
+{
+ /**
+ * Do not call bfa_ioc_enable() here. It must be called in the
+ * previous state due to failed -> ioc_ready_wait transition.
+ */
+}
+
+static void
+bna_device_sm_ioc_ready_wait(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_DISABLE:
+ if (device->ready_cbfn)
+ device->ready_cbfn(device->ready_cbarg,
+ BNA_CB_INTERRUPT);
+ device->ready_cbfn = NULL;
+ device->ready_cbarg = NULL;
+ bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
+ break;
+
+ case DEVICE_E_IOC_READY:
+ bfa_fsm_set_state(device, bna_device_sm_ready);
+ break;
+
+ case DEVICE_E_IOC_FAILED:
+ bfa_fsm_set_state(device, bna_device_sm_failed);
+ break;
+
+ case DEVICE_E_IOC_RESET:
+ enable_mbox_intr(device);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_ready_entry(struct bna_device *device)
+{
+ bna_mbox_mod_start(&device->bna->mbox_mod);
+ bna_port_start(&device->bna->port);
+
+ if (device->ready_cbfn)
+ device->ready_cbfn(device->ready_cbarg,
+ BNA_CB_SUCCESS);
+ device->ready_cbfn = NULL;
+ device->ready_cbarg = NULL;
+}
+
+static void
+bna_device_sm_ready(struct bna_device *device, enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_DISABLE:
+ bfa_fsm_set_state(device, bna_device_sm_port_stop_wait);
+ break;
+
+ case DEVICE_E_IOC_FAILED:
+ bfa_fsm_set_state(device, bna_device_sm_failed);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_port_stop_wait_entry(struct bna_device *device)
+{
+ bna_port_stop(&device->bna->port);
+}
+
+static void
+bna_device_sm_port_stop_wait(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_PORT_STOPPED:
+ bna_mbox_mod_stop(&device->bna->mbox_mod);
+ bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
+ break;
+
+ case DEVICE_E_IOC_FAILED:
+ disable_mbox_intr(device);
+ bna_port_fail(&device->bna->port);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_ioc_disable_wait_entry(struct bna_device *device)
+{
+ bfa_nw_ioc_disable(&device->ioc);
+}
+
+static void
+bna_device_sm_ioc_disable_wait(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_IOC_DISABLED:
+ disable_mbox_intr(device);
+ bfa_fsm_set_state(device, bna_device_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_failed_entry(struct bna_device *device)
+{
+ disable_mbox_intr(device);
+ bna_port_fail(&device->bna->port);
+ bna_mbox_mod_stop(&device->bna->mbox_mod);
+
+ if (device->ready_cbfn)
+ device->ready_cbfn(device->ready_cbarg,
+ BNA_CB_FAIL);
+ device->ready_cbfn = NULL;
+ device->ready_cbarg = NULL;
+}
+
+static void
+bna_device_sm_failed(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_DISABLE:
+ bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
+ break;
+
+ case DEVICE_E_IOC_RESET:
+ enable_mbox_intr(device);
+ bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+/* IOC callback functions */
+
+static void
+bna_device_cb_iocll_ready(void *dev, enum bfa_status error)
+{
+ struct bna_device *device = (struct bna_device *)dev;
+
+ if (error)
+ bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
+ else
+ bfa_fsm_send_event(device, DEVICE_E_IOC_READY);
+}
+
+static void
+bna_device_cb_iocll_disabled(void *dev)
+{
+ struct bna_device *device = (struct bna_device *)dev;
+
+ bfa_fsm_send_event(device, DEVICE_E_IOC_DISABLED);
+}
+
+static void
+bna_device_cb_iocll_failed(void *dev)
+{
+ struct bna_device *device = (struct bna_device *)dev;
+
+ bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
+}
+
+static void
+bna_device_cb_iocll_reset(void *dev)
+{
+ struct bna_device *device = (struct bna_device *)dev;
+
+ bfa_fsm_send_event(device, DEVICE_E_IOC_RESET);
+}
+
+static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
+ bna_device_cb_iocll_ready,
+ bna_device_cb_iocll_disabled,
+ bna_device_cb_iocll_failed,
+ bna_device_cb_iocll_reset
+};
+
+/* device */
+static void
+bna_adv_device_init(struct bna_device *device, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ u8 *kva;
+ u64 dma;
+
+ device->bna = bna;
+
+ kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+
+ /**
+ * Attach common modules (Diag, SFP, CEE, Port) and claim respective
+ * DMA memory.
+ */
+ BNA_GET_DMA_ADDR(
+ &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
+ kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
+
+ bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
+ bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
+ kva += bfa_nw_cee_meminfo();
+ dma += bfa_nw_cee_meminfo();
+
+}
+
+static void
+bna_device_init(struct bna_device *device, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ u64 dma;
+
+ device->bna = bna;
+
+ /**
+ * Attach IOC and claim:
+ * 1. DMA memory for IOC attributes
+ * 2. Kernel memory for FW trace
+ */
+ bfa_nw_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
+ bfa_nw_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
+
+ BNA_GET_DMA_ADDR(
+ &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
+ bfa_nw_ioc_mem_claim(&device->ioc,
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva,
+ dma);
+
+ bna_adv_device_init(device, bna, res_info);
+ /*
+ * Initialize mbox_mod only after IOC, so that mbox handler
+ * registration goes through
+ */
+ device->intr_type =
+ res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type;
+ device->vector =
+ res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.idl[0].vector;
+ bna_mbox_mod_init(&bna->mbox_mod, bna);
+
+ device->ready_cbfn = device->stop_cbfn = NULL;
+ device->ready_cbarg = device->stop_cbarg = NULL;
+
+ bfa_fsm_set_state(device, bna_device_sm_stopped);
+}
+
+static void
+bna_device_uninit(struct bna_device *device)
+{
+ bna_mbox_mod_uninit(&device->bna->mbox_mod);
+
+ bfa_nw_ioc_detach(&device->ioc);
+
+ device->bna = NULL;
+}
+
+static void
+bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
+{
+ struct bna_device *device = (struct bna_device *)arg;
+
+ bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
+}
+
+static int
+bna_device_status_get(struct bna_device *device)
+{
+ return device->fsm == (bfa_fsm_t)bna_device_sm_ready;
+}
+
+void
+bna_device_enable(struct bna_device *device)
+{
+ if (device->fsm != (bfa_fsm_t)bna_device_sm_stopped) {
+ bnad_cb_device_enabled(device->bna->bnad, BNA_CB_BUSY);
+ return;
+ }
+
+ device->ready_cbfn = bnad_cb_device_enabled;
+ device->ready_cbarg = device->bna->bnad;
+
+ bfa_fsm_send_event(device, DEVICE_E_ENABLE);
+}
+
+void
+bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ bnad_cb_device_disabled(device->bna->bnad, BNA_CB_SUCCESS);
+ return;
+ }
+
+ device->stop_cbfn = bnad_cb_device_disabled;
+ device->stop_cbarg = device->bna->bnad;
+
+ bfa_fsm_send_event(device, DEVICE_E_DISABLE);
+}
+
+static int
+bna_device_state_get(struct bna_device *device)
+{
+ return bfa_sm_to_state(device_sm_table, device->fsm);
+}
+
+const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
+ {12, 12},
+ {6, 10},
+ {5, 10},
+ {4, 8},
+ {3, 6},
+ {3, 6},
+ {2, 4},
+ {1, 2},
+};
+
+/* utils */
+
+static void
+bna_adv_res_req(struct bna_res_info *res_info)
+{
+ /* DMA memory for COMMON_MODULE */
+ res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
+ bfa_nw_cee_meminfo(), PAGE_SIZE);
+
+ /* Virtual memory for retreiving fw_trc */
+ res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
+
+ /* DMA memory for retreiving stats */
+ res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
+ ALIGN(BFI_HW_STATS_SIZE, PAGE_SIZE);
+
+ /* Virtual memory for soft stats */
+ res_info[BNA_RES_MEM_T_SWSTATS].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.len =
+ sizeof(struct bna_sw_stats);
+}
+
+static void
+bna_sw_stats_get(struct bna *bna, struct bna_sw_stats *sw_stats)
+{
+ struct bna_tx *tx;
+ struct bna_txq *txq;
+ struct bna_rx *rx;
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+ struct list_head *txq_qe;
+ struct list_head *rxp_qe;
+ struct list_head *mac_qe;
+ int i;
+
+ sw_stats->device_state = bna_device_state_get(&bna->device);
+ sw_stats->port_state = bna_port_state_get(&bna->port);
+ sw_stats->port_flags = bna->port.flags;
+ sw_stats->llport_state = bna_llport_state_get(&bna->port.llport);
+ sw_stats->priority = bna->port.priority;
+
+ i = 0;
+ list_for_each(qe, &bna->tx_mod.tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ sw_stats->tx_stats[i].tx_state = bna_tx_state_get(tx);
+ sw_stats->tx_stats[i].tx_flags = tx->flags;
+
+ sw_stats->tx_stats[i].num_txqs = 0;
+ sw_stats->tx_stats[i].txq_bmap[0] = 0;
+ sw_stats->tx_stats[i].txq_bmap[1] = 0;
+ list_for_each(txq_qe, &tx->txq_q) {
+ txq = (struct bna_txq *)txq_qe;
+ if (txq->txq_id < 32)
+ sw_stats->tx_stats[i].txq_bmap[0] |=
+ ((u32)1 << txq->txq_id);
+ else
+ sw_stats->tx_stats[i].txq_bmap[1] |=
+ ((u32)
+ 1 << (txq->txq_id - 32));
+ sw_stats->tx_stats[i].num_txqs++;
+ }
+
+ sw_stats->tx_stats[i].txf_id = tx->txf.txf_id;
+
+ i++;
+ }
+ sw_stats->num_active_tx = i;
+
+ i = 0;
+ list_for_each(qe, &bna->rx_mod.rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ sw_stats->rx_stats[i].rx_state = bna_rx_state_get(rx);
+ sw_stats->rx_stats[i].rx_flags = rx->rx_flags;
+
+ sw_stats->rx_stats[i].num_rxps = 0;
+ sw_stats->rx_stats[i].num_rxqs = 0;
+ sw_stats->rx_stats[i].rxq_bmap[0] = 0;
+ sw_stats->rx_stats[i].rxq_bmap[1] = 0;
+ sw_stats->rx_stats[i].cq_bmap[0] = 0;
+ sw_stats->rx_stats[i].cq_bmap[1] = 0;
+ list_for_each(rxp_qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)rxp_qe;
+
+ sw_stats->rx_stats[i].num_rxqs += 1;
+
+ if (rxp->type == BNA_RXP_SINGLE) {
+ if (rxp->rxq.single.only->rxq_id < 32) {
+ sw_stats->rx_stats[i].rxq_bmap[0] |=
+ ((u32)1 <<
+ rxp->rxq.single.only->rxq_id);
+ } else {
+ sw_stats->rx_stats[i].rxq_bmap[1] |=
+ ((u32)1 <<
+ (rxp->rxq.single.only->rxq_id - 32));
+ }
+ } else {
+ if (rxp->rxq.slr.large->rxq_id < 32) {
+ sw_stats->rx_stats[i].rxq_bmap[0] |=
+ ((u32)1 <<
+ rxp->rxq.slr.large->rxq_id);
+ } else {
+ sw_stats->rx_stats[i].rxq_bmap[1] |=
+ ((u32)1 <<
+ (rxp->rxq.slr.large->rxq_id - 32));
+ }
+
+ if (rxp->rxq.slr.small->rxq_id < 32) {
+ sw_stats->rx_stats[i].rxq_bmap[0] |=
+ ((u32)1 <<
+ rxp->rxq.slr.small->rxq_id);
+ } else {
+ sw_stats->rx_stats[i].rxq_bmap[1] |=
+ ((u32)1 <<
+ (rxp->rxq.slr.small->rxq_id - 32));
+ }
+ sw_stats->rx_stats[i].num_rxqs += 1;
+ }
+
+ if (rxp->cq.cq_id < 32)
+ sw_stats->rx_stats[i].cq_bmap[0] |=
+ (1 << rxp->cq.cq_id);
+ else
+ sw_stats->rx_stats[i].cq_bmap[1] |=
+ (1 << (rxp->cq.cq_id - 32));
+
+ sw_stats->rx_stats[i].num_rxps++;
+ }
+
+ sw_stats->rx_stats[i].rxf_id = rx->rxf.rxf_id;
+ sw_stats->rx_stats[i].rxf_state = bna_rxf_state_get(&rx->rxf);
+ sw_stats->rx_stats[i].rxf_oper_state = rx->rxf.rxf_oper_state;
+
+ sw_stats->rx_stats[i].num_active_ucast = 0;
+ if (rx->rxf.ucast_active_mac)
+ sw_stats->rx_stats[i].num_active_ucast++;
+ list_for_each(mac_qe, &rx->rxf.ucast_active_q)
+ sw_stats->rx_stats[i].num_active_ucast++;
+
+ sw_stats->rx_stats[i].num_active_mcast = 0;
+ list_for_each(mac_qe, &rx->rxf.mcast_active_q)
+ sw_stats->rx_stats[i].num_active_mcast++;
+
+ sw_stats->rx_stats[i].rxmode_active = rx->rxf.rxmode_active;
+ sw_stats->rx_stats[i].vlan_filter_status =
+ rx->rxf.vlan_filter_status;
+ memcpy(sw_stats->rx_stats[i].vlan_filter_table,
+ rx->rxf.vlan_filter_table,
+ sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32));
+
+ sw_stats->rx_stats[i].rss_status = rx->rxf.rss_status;
+ sw_stats->rx_stats[i].hds_status = rx->rxf.hds_status;
+
+ i++;
+ }
+ sw_stats->num_active_rx = i;
+}
+
+static void
+bna_fw_cb_stats_get(void *arg, int status)
+{
+ struct bna *bna = (struct bna *)arg;
+ u64 *p_stats;
+ int i, count;
+ int rxf_count, txf_count;
+ u64 rxf_bmap, txf_bmap;
+
+ bfa_q_qe_init(&bna->mbox_qe.qe);
+
+ if (status == 0) {
+ p_stats = (u64 *)bna->stats.hw_stats;
+ count = sizeof(struct bfi_ll_stats) / sizeof(u64);
+ for (i = 0; i < count; i++)
+ p_stats[i] = cpu_to_be64(p_stats[i]);
+
+ rxf_count = 0;
+ rxf_bmap = (u64)bna->stats.rxf_bmap[0] |
+ ((u64)bna->stats.rxf_bmap[1] << 32);
+ for (i = 0; i < BFI_LL_RXF_ID_MAX; i++)
+ if (rxf_bmap & ((u64)1 << i))
+ rxf_count++;
+
+ txf_count = 0;
+ txf_bmap = (u64)bna->stats.txf_bmap[0] |
+ ((u64)bna->stats.txf_bmap[1] << 32);
+ for (i = 0; i < BFI_LL_TXF_ID_MAX; i++)
+ if (txf_bmap & ((u64)1 << i))
+ txf_count++;
+
+ p_stats = (u64 *)&bna->stats.hw_stats->rxf_stats[0] +
+ ((rxf_count * sizeof(struct bfi_ll_stats_rxf) +
+ txf_count * sizeof(struct bfi_ll_stats_txf))/
+ sizeof(u64));
+
+ /* Populate the TXF stats from the firmware DMAed copy */
+ for (i = (BFI_LL_TXF_ID_MAX - 1); i >= 0; i--)
+ if (txf_bmap & ((u64)1 << i)) {
+ p_stats -= sizeof(struct bfi_ll_stats_txf)/
+ sizeof(u64);
+ memcpy(&bna->stats.hw_stats->txf_stats[i],
+ p_stats,
+ sizeof(struct bfi_ll_stats_txf));
+ }
+
+ /* Populate the RXF stats from the firmware DMAed copy */
+ for (i = (BFI_LL_RXF_ID_MAX - 1); i >= 0; i--)
+ if (rxf_bmap & ((u64)1 << i)) {
+ p_stats -= sizeof(struct bfi_ll_stats_rxf)/
+ sizeof(u64);
+ memcpy(&bna->stats.hw_stats->rxf_stats[i],
+ p_stats,
+ sizeof(struct bfi_ll_stats_rxf));
+ }
+
+ bna_sw_stats_get(bna, bna->stats.sw_stats);
+ bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
+ } else
+ bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
+}
+
+static void
+bna_fw_stats_get(struct bna *bna)
+{
+ struct bfi_ll_stats_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_GET_REQ, 0);
+ ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
+
+ ll_req.rxf_id_mask[0] = htonl(bna->rx_mod.rxf_bmap[0]);
+ ll_req.rxf_id_mask[1] = htonl(bna->rx_mod.rxf_bmap[1]);
+ ll_req.txf_id_mask[0] = htonl(bna->tx_mod.txf_bmap[0]);
+ ll_req.txf_id_mask[1] = htonl(bna->tx_mod.txf_bmap[1]);
+
+ ll_req.host_buffer.a32.addr_hi = bna->hw_stats_dma.msb;
+ ll_req.host_buffer.a32.addr_lo = bna->hw_stats_dma.lsb;
+
+ bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_stats_get, bna);
+ bna_mbox_send(bna, &bna->mbox_qe);
+
+ bna->stats.rxf_bmap[0] = bna->rx_mod.rxf_bmap[0];
+ bna->stats.rxf_bmap[1] = bna->rx_mod.rxf_bmap[1];
+ bna->stats.txf_bmap[0] = bna->tx_mod.txf_bmap[0];
+ bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
+}
+
+void
+bna_stats_get(struct bna *bna)
+{
+ if (bna_device_status_get(&bna->device))
+ bna_fw_stats_get(bna);
+ else
+ bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
+}
+
+/* IB */
+static void
+bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
+{
+ ib->ib_config.coalescing_timeo = coalescing_timeo;
+
+ if (ib->start_count)
+ ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
+ (u32)ib->ib_config.coalescing_timeo, 0);
+}
+
+/* RxF */
+void
+bna_rxf_adv_init(struct bna_rxf *rxf,
+ struct bna_rx *rx,
+ struct bna_rx_config *q_config)
+{
+ switch (q_config->rxp_type) {
+ case BNA_RXP_SINGLE:
+ /* No-op */
+ break;
+ case BNA_RXP_SLR:
+ rxf->ctrl_flags |= BNA_RXF_CF_SM_LG_RXQ;
+ break;
+ case BNA_RXP_HDS:
+ rxf->hds_cfg.hdr_type = q_config->hds_config.hdr_type;
+ rxf->hds_cfg.header_size =
+ q_config->hds_config.header_size;
+ rxf->forced_offset = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (q_config->rss_status == BNA_STATUS_T_ENABLED) {
+ rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
+ rxf->rss_cfg.hash_type = q_config->rss_config.hash_type;
+ rxf->rss_cfg.hash_mask = q_config->rss_config.hash_mask;
+ memcpy(&rxf->rss_cfg.toeplitz_hash_key[0],
+ &q_config->rss_config.toeplitz_hash_key[0],
+ sizeof(rxf->rss_cfg.toeplitz_hash_key));
+ }
+}
+
+static void
+rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
+{
+ struct bfi_ll_rxf_req req;
+
+ bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
+
+ req.rxf_id = rxf->rxf_id;
+ req.enable = status;
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
+ rxf_cb_cam_fltr_mbox_cmd, rxf);
+
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+static void
+__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
+{
+ struct bna_rx_fndb_ram *rx_fndb_ram;
+ u32 ctrl_flags;
+ int i;
+
+ rx_fndb_ram = (struct bna_rx_fndb_ram *)
+ BNA_GET_MEM_BASE_ADDR(rxf->rx->bna->pcidev.pci_bar_kva,
+ RX_FNDB_RAM_BASE_OFFSET);
+
+ for (i = 0; i < BFI_MAX_RXF; i++) {
+ if (status == BNA_STATUS_T_ENABLED) {
+ if (i == rxf->rxf_id)
+ continue;
+
+ ctrl_flags =
+ readl(&rx_fndb_ram[i].control_flags);
+ ctrl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
+ writel(ctrl_flags,
+ &rx_fndb_ram[i].control_flags);
+ } else {
+ ctrl_flags =
+ readl(&rx_fndb_ram[i].control_flags);
+ ctrl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
+ writel(ctrl_flags,
+ &rx_fndb_ram[i].control_flags);
+ }
+ }
+}
+
+int
+rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* Add additional MAC entries */
+ if (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_ADD_REQ, mac);
+ list_add_tail(&mac->qe, &rxf->ucast_active_q);
+ return 1;
+ }
+
+ /* Delete MAC addresses previousely added */
+ if (!list_empty(&rxf->ucast_pending_del_q)) {
+ bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_process_packet_filter_promisc(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* Enable/disable promiscuous mode */
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move promisc configuration from pending -> active */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active |= BNA_RXMODE_PROMISC;
+
+ /* Disable VLAN filter to allow all VLANs */
+ __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
+ BNA_STATUS_T_ENABLED);
+ return 1;
+ } else if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move promisc configuration from pending -> active */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_process_packet_filter_default(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* Enable/disable default mode */
+ if (is_default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move default configuration from pending -> active */
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active |= BNA_RXMODE_DEFAULT;
+
+ /* Disable VLAN filter to allow all VLANs */
+ __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
+ /* Redirect all other RxF vlan filtering to this one */
+ __rxf_default_function_config(rxf, BNA_STATUS_T_ENABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+ BNA_STATUS_T_ENABLED);
+ return 1;
+ } else if (is_default_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move default configuration from pending -> active */
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+ bna->rxf_default_id = BFI_MAX_RXF;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ /* Stop RxF vlan filter table redirection */
+ __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
+{
+ /* Enable/disable allmulti mode */
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move allmulti configuration from pending -> active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
+
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
+ BNA_STATUS_T_ENABLED);
+ return 1;
+ } else if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move allmulti configuration from pending -> active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_clear_packet_filter_ucast(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* 1. delete pending ucast entries */
+ if (!list_empty(&rxf->ucast_pending_del_q)) {
+ bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ return 1;
+ }
+
+ /* 2. clear active ucast entries; move them to pending_add_q */
+ if (!list_empty(&rxf->ucast_active_q)) {
+ bfa_q_deq(&rxf->ucast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
+ list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_clear_packet_filter_promisc(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* 6. Execute pending promisc mode disable command */
+ if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move promisc configuration from pending -> active */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ /* 7. Clear active promisc mode; move it to pending enable */
+ if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
+ /* move promisc configuration from active -> pending */
+ promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_clear_packet_filter_default(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* 8. Execute pending default mode disable command */
+ if (is_default_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move default configuration from pending -> active */
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+ bna->rxf_default_id = BFI_MAX_RXF;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ /* Stop RxF vlan filter table redirection */
+ __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ /* 9. Clear active default mode; move it to pending enable */
+ if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
+ /* move default configuration from active -> pending */
+ default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ /* Stop RxF vlan filter table redirection */
+ __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
+{
+ /* 10. Execute pending allmulti mode disable command */
+ if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move allmulti configuration from pending -> active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ /* 11. Clear active allmulti mode; move it to pending enable */
+ if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
+ /* move allmulti configuration from active -> pending */
+ allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+void
+rxf_reset_packet_filter_ucast(struct bna_rxf *rxf)
+{
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ /* 1. Move active ucast entries to pending_add_q */
+ while (!list_empty(&rxf->ucast_active_q)) {
+ bfa_q_deq(&rxf->ucast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->ucast_pending_add_q);
+ }
+
+ /* 2. Throw away delete pending ucast entries */
+ while (!list_empty(&rxf->ucast_pending_del_q)) {
+ bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ }
+}
+
+void
+rxf_reset_packet_filter_promisc(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* 6. Clear pending promisc mode disable */
+ if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+ }
+
+ /* 7. Move promisc mode config from active -> pending */
+ if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
+ promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ }
+
+}
+
+void
+rxf_reset_packet_filter_default(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* 8. Clear pending default mode disable */
+ if (is_default_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+ bna->rxf_default_id = BFI_MAX_RXF;
+ }
+
+ /* 9. Move default mode config from active -> pending */
+ if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
+ default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+ }
+}
+
+void
+rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
+{
+ /* 10. Clear pending allmulti mode disable */
+ if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ }
+
+ /* 11. Move allmulti mode config from active -> pending */
+ if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
+ allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ }
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+static int
+rxf_promisc_enable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ /* There can not be any pending disable command */
+
+ /* Do nothing if pending enable or already enabled */
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
+ /* Schedule enable */
+ } else {
+ /* Promisc mode should not be active in the system */
+ promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ bna->rxf_promisc_id = rxf->rxf_id;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+static int
+rxf_promisc_disable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ /* There can not be any pending disable */
+
+ /* Turn off pending enable command , if any */
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Promisc mode should not be active */
+ /* system promisc state should be pending */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ /* Remove the promisc state from the system */
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+
+ /* Schedule disable */
+ } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
+ /* Promisc mode should be active in the system */
+ promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+
+ /* Do nothing if already disabled */
+ } else {
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+static int
+rxf_default_enable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ /* There can not be any pending disable command */
+
+ /* Do nothing if pending enable or already enabled */
+ if (is_default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (rxf->rxmode_active & BNA_RXMODE_DEFAULT)) {
+ /* Schedule enable */
+ } else {
+ /* Default mode should not be active in the system */
+ default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ bna->rxf_default_id = rxf->rxf_id;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+static int
+rxf_default_disable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ /* There can not be any pending disable */
+
+ /* Turn off pending enable command , if any */
+ if (is_default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Promisc mode should not be active */
+ /* system default state should be pending */
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ /* Remove the default state from the system */
+ bna->rxf_default_id = BFI_MAX_RXF;
+
+ /* Schedule disable */
+ } else if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
+ /* Default mode should be active in the system */
+ default_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+
+ /* Do nothing if already disabled */
+ } else {
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+static int
+rxf_allmulti_enable(struct bna_rxf *rxf)
+{
+ int ret = 0;
+
+ /* There can not be any pending disable command */
+
+ /* Do nothing if pending enable or already enabled */
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
+ /* Schedule enable */
+ } else {
+ allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+static int
+rxf_allmulti_disable(struct bna_rxf *rxf)
+{
+ int ret = 0;
+
+ /* There can not be any pending disable */
+
+ /* Turn off pending enable command , if any */
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Allmulti mode should not be active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+
+ /* Schedule disable */
+ } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
+ allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/* RxF <- bnad */
+enum bna_cb_status
+bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
+ enum bna_rxmode bitmask,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int need_hw_config = 0;
+
+ /* Error checks */
+
+ if (is_promisc_enable(new_mode, bitmask)) {
+ /* If promisc mode is already enabled elsewhere in the system */
+ if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
+ (rx->bna->rxf_promisc_id != rxf->rxf_id))
+ goto err_return;
+
+ /* If default mode is already enabled in the system */
+ if (rx->bna->rxf_default_id != BFI_MAX_RXF)
+ goto err_return;
+
+ /* Trying to enable promiscuous and default mode together */
+ if (is_default_enable(new_mode, bitmask))
+ goto err_return;
+ }
+
+ if (is_default_enable(new_mode, bitmask)) {
+ /* If default mode is already enabled elsewhere in the system */
+ if ((rx->bna->rxf_default_id != BFI_MAX_RXF) &&
+ (rx->bna->rxf_default_id != rxf->rxf_id)) {
+ goto err_return;
+ }
+
+ /* If promiscuous mode is already enabled in the system */
+ if (rx->bna->rxf_promisc_id != BFI_MAX_RXF)
+ goto err_return;
+ }
+
+ /* Process the commands */
+
+ if (is_promisc_enable(new_mode, bitmask)) {
+ if (rxf_promisc_enable(rxf))
+ need_hw_config = 1;
+ } else if (is_promisc_disable(new_mode, bitmask)) {
+ if (rxf_promisc_disable(rxf))
+ need_hw_config = 1;
+ }
+
+ if (is_default_enable(new_mode, bitmask)) {
+ if (rxf_default_enable(rxf))
+ need_hw_config = 1;
+ } else if (is_default_disable(new_mode, bitmask)) {
+ if (rxf_default_disable(rxf))
+ need_hw_config = 1;
+ }
+
+ if (is_allmulti_enable(new_mode, bitmask)) {
+ if (rxf_allmulti_enable(rxf))
+ need_hw_config = 1;
+ } else if (is_allmulti_disable(new_mode, bitmask)) {
+ if (rxf_allmulti_disable(rxf))
+ need_hw_config = 1;
+ }
+
+ /* Trigger h/w if needed */
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ } else if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ return BNA_CB_FAIL;
+}
+
+void
+/* RxF <- bnad */
+bna_rx_vlanfilter_enable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ }
+}
+
+/* Rx */
+
+/* Rx <- bnad */
+void
+bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
+ bna_ib_coalescing_timeo_set(rxp->cq.ib, coalescing_timeo);
+ }
+}
+
+/* Rx <- bnad */
+void
+bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
+{
+ int i, j;
+
+ for (i = 0; i < BNA_LOAD_T_MAX; i++)
+ for (j = 0; j < BNA_BIAS_T_MAX; j++)
+ bna->rx_mod.dim_vector[i][j] = vector[i][j];
+}
+
+/* Rx <- bnad */
+void
+bna_rx_dim_update(struct bna_ccb *ccb)
+{
+ struct bna *bna = ccb->cq->rx->bna;
+ u32 load, bias;
+ u32 pkt_rt, small_rt, large_rt;
+ u8 coalescing_timeo;
+
+ if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
+ (ccb->pkt_rate.large_pkt_cnt == 0))
+ return;
+
+ /* Arrive at preconfigured coalescing timeo value based on pkt rate */
+
+ small_rt = ccb->pkt_rate.small_pkt_cnt;
+ large_rt = ccb->pkt_rate.large_pkt_cnt;
+
+ pkt_rt = small_rt + large_rt;
+
+ if (pkt_rt < BNA_PKT_RATE_10K)
+ load = BNA_LOAD_T_LOW_4;
+ else if (pkt_rt < BNA_PKT_RATE_20K)
+ load = BNA_LOAD_T_LOW_3;
+ else if (pkt_rt < BNA_PKT_RATE_30K)
+ load = BNA_LOAD_T_LOW_2;
+ else if (pkt_rt < BNA_PKT_RATE_40K)
+ load = BNA_LOAD_T_LOW_1;
+ else if (pkt_rt < BNA_PKT_RATE_50K)
+ load = BNA_LOAD_T_HIGH_1;
+ else if (pkt_rt < BNA_PKT_RATE_60K)
+ load = BNA_LOAD_T_HIGH_2;
+ else if (pkt_rt < BNA_PKT_RATE_80K)
+ load = BNA_LOAD_T_HIGH_3;
+ else
+ load = BNA_LOAD_T_HIGH_4;
+
+ if (small_rt > (large_rt << 1))
+ bias = 0;
+ else
+ bias = 1;
+
+ ccb->pkt_rate.small_pkt_cnt = 0;
+ ccb->pkt_rate.large_pkt_cnt = 0;
+
+ coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
+ ccb->rx_coalescing_timeo = coalescing_timeo;
+
+ /* Set it to IB */
+ bna_ib_coalescing_timeo_set(ccb->cq->ib, coalescing_timeo);
+}
+
+/* Tx */
+/* TX <- bnad */
+void
+bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_coalescing_timeo_set(txq->ib, coalescing_timeo);
+ }
+}
+
+/*
+ * Private data
+ */
+
+struct bna_ritseg_pool_cfg {
+ u32 pool_size;
+ u32 pool_entry_size;
+};
+init_ritseg_pool(ritseg_pool_cfg);
+
+/*
+ * Private functions
+ */
+static void
+bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ ucam_mod->ucmac = (struct bna_mac *)
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&ucam_mod->free_q);
+ for (i = 0; i < BFI_MAX_UCMAC; i++) {
+ bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
+ }
+
+ ucam_mod->bna = bna;
+}
+
+static void
+bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
+{
+ struct list_head *qe;
+ int i = 0;
+
+ list_for_each(qe, &ucam_mod->free_q)
+ i++;
+
+ ucam_mod->bna = NULL;
+}
+
+static void
+bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ mcam_mod->mcmac = (struct bna_mac *)
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&mcam_mod->free_q);
+ for (i = 0; i < BFI_MAX_MCMAC; i++) {
+ bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
+ }
+
+ mcam_mod->bna = bna;
+}
+
+static void
+bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
+{
+ struct list_head *qe;
+ int i = 0;
+
+ list_for_each(qe, &mcam_mod->free_q)
+ i++;
+
+ mcam_mod->bna = NULL;
+}
+
+static void
+bna_rit_mod_init(struct bna_rit_mod *rit_mod,
+ struct bna_res_info *res_info)
+{
+ int i;
+ int j;
+ int count;
+ int offset;
+
+ rit_mod->rit = (struct bna_rit_entry *)
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mdl[0].kva;
+ rit_mod->rit_segment = (struct bna_rit_segment *)
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mdl[0].kva;
+
+ count = 0;
+ offset = 0;
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ INIT_LIST_HEAD(&rit_mod->rit_seg_pool[i]);
+ for (j = 0; j < ritseg_pool_cfg[i].pool_size; j++) {
+ bfa_q_qe_init(&rit_mod->rit_segment[count].qe);
+ rit_mod->rit_segment[count].max_rit_size =
+ ritseg_pool_cfg[i].pool_entry_size;
+ rit_mod->rit_segment[count].rit_offset = offset;
+ rit_mod->rit_segment[count].rit =
+ &rit_mod->rit[offset];
+ list_add_tail(&rit_mod->rit_segment[count].qe,
+ &rit_mod->rit_seg_pool[i]);
+ count++;
+ offset += ritseg_pool_cfg[i].pool_entry_size;
+ }
+ }
+}
+
+static void
+bna_rit_mod_uninit(struct bna_rit_mod *rit_mod)
+{
+ struct bna_rit_segment *rit_segment;
+ struct list_head *qe;
+ int i;
+ int j;
+
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ j = 0;
+ list_for_each(qe, &rit_mod->rit_seg_pool[i]) {
+ rit_segment = (struct bna_rit_segment *)qe;
+ j++;
+ }
+ }
+}
+
+/*
+ * Public functions
+ */
+
+/* Called during probe(), before calling bna_init() */
+void
+bna_res_req(struct bna_res_info *res_info)
+{
+ bna_adv_res_req(res_info);
+
+ /* DMA memory for retrieving IOC attributes */
+ res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
+ ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
+
+ /* DMA memory for index segment of an IB */
+ res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.len =
+ BFI_IBIDX_SIZE * BFI_IBIDX_MAX_SEGSIZE;
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.num = BFI_MAX_IB;
+
+ /* Virtual memory for IB objects - stored by IB module */
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.len =
+ BFI_MAX_IB * sizeof(struct bna_ib);
+
+ /* Virtual memory for intr objects - stored by IB module */
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.len =
+ BFI_MAX_IB * sizeof(struct bna_intr);
+
+ /* Virtual memory for idx_seg objects - stored by IB module */
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.len =
+ BFI_IBIDX_TOTAL_SEGS * sizeof(struct bna_ibidx_seg);
+
+ /* Virtual memory for Tx objects - stored by Tx module */
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
+ BFI_MAX_TXQ * sizeof(struct bna_tx);
+
+ /* Virtual memory for TxQ - stored by Tx module */
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
+ BFI_MAX_TXQ * sizeof(struct bna_txq);
+
+ /* Virtual memory for Rx objects - stored by Rx module */
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
+ BFI_MAX_RXQ * sizeof(struct bna_rx);
+
+ /* Virtual memory for RxPath - stored by Rx module */
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
+ BFI_MAX_RXQ * sizeof(struct bna_rxp);
+
+ /* Virtual memory for RxQ - stored by Rx module */
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
+ BFI_MAX_RXQ * sizeof(struct bna_rxq);
+
+ /* Virtual memory for Unicast MAC address - stored by ucam module */
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
+ BFI_MAX_UCMAC * sizeof(struct bna_mac);
+
+ /* Virtual memory for Multicast MAC address - stored by mcam module */
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
+ BFI_MAX_MCMAC * sizeof(struct bna_mac);
+
+ /* Virtual memory for RIT entries */
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.len =
+ BFI_MAX_RIT_SIZE * sizeof(struct bna_rit_entry);
+
+ /* Virtual memory for RIT segment table */
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.len =
+ BFI_RIT_TOTAL_SEGS * sizeof(struct bna_rit_segment);
+
+ /* Interrupt resource for mailbox interrupt */
+ res_info[BNA_RES_INTR_T_MBOX].res_type = BNA_RES_T_INTR;
+ res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type =
+ BNA_INTR_T_MSIX;
+ res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.num = 1;
+}
+
+/* Called during probe() */
+void
+bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev,
+ struct bna_res_info *res_info)
+{
+ bna->bnad = bnad;
+ bna->pcidev = *pcidev;
+
+ bna->stats.hw_stats = (struct bfi_ll_stats *)
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
+ bna->hw_stats_dma.msb =
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
+ bna->hw_stats_dma.lsb =
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
+ bna->stats.sw_stats = (struct bna_sw_stats *)
+ res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mdl[0].kva;
+
+ bna->regs.page_addr = bna->pcidev.pci_bar_kva +
+ reg_offset[bna->pcidev.pci_func].page_addr;
+ bna->regs.fn_int_status = bna->pcidev.pci_bar_kva +
+ reg_offset[bna->pcidev.pci_func].fn_int_status;
+ bna->regs.fn_int_mask = bna->pcidev.pci_bar_kva +
+ reg_offset[bna->pcidev.pci_func].fn_int_mask;
+
+ if (bna->pcidev.pci_func < 3)
+ bna->port_num = 0;
+ else
+ bna->port_num = 1;
+
+ /* Also initializes diag, cee, sfp, phy_port and mbox_mod */
+ bna_device_init(&bna->device, bna, res_info);
+
+ bna_port_init(&bna->port, bna);
+
+ bna_tx_mod_init(&bna->tx_mod, bna, res_info);
+
+ bna_rx_mod_init(&bna->rx_mod, bna, res_info);
+
+ bna_ib_mod_init(&bna->ib_mod, bna, res_info);
+
+ bna_rit_mod_init(&bna->rit_mod, res_info);
+
+ bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
+
+ bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
+
+ bna->rxf_default_id = BFI_MAX_RXF;
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+
+ /* Mbox q element for posting stat request to f/w */
+ bfa_q_qe_init(&bna->mbox_qe.qe);
+}
+
+void
+bna_uninit(struct bna *bna)
+{
+ bna_mcam_mod_uninit(&bna->mcam_mod);
+
+ bna_ucam_mod_uninit(&bna->ucam_mod);
+
+ bna_rit_mod_uninit(&bna->rit_mod);
+
+ bna_ib_mod_uninit(&bna->ib_mod);
+
+ bna_rx_mod_uninit(&bna->rx_mod);
+
+ bna_tx_mod_uninit(&bna->tx_mod);
+
+ bna_port_uninit(&bna->port);
+
+ bna_device_uninit(&bna->device);
+
+ bna->bnad = NULL;
+}
+
+struct bna_mac *
+bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
+{
+ struct list_head *qe;
+
+ if (list_empty(&ucam_mod->free_q))
+ return NULL;
+
+ bfa_q_deq(&ucam_mod->free_q, &qe);
+
+ return (struct bna_mac *)qe;
+}
+
+void
+bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
+{
+ list_add_tail(&mac->qe, &ucam_mod->free_q);
+}
+
+struct bna_mac *
+bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
+{
+ struct list_head *qe;
+
+ if (list_empty(&mcam_mod->free_q))
+ return NULL;
+
+ bfa_q_deq(&mcam_mod->free_q, &qe);
+
+ return (struct bna_mac *)qe;
+}
+
+void
+bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
+{
+ list_add_tail(&mac->qe, &mcam_mod->free_q);
+}
+
+/**
+ * Note: This should be called in the same locking context as the call to
+ * bna_rit_mod_seg_get()
+ */
+int
+bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size)
+{
+ int i;
+
+ /* Select the pool for seg_size */
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
+ break;
+ }
+
+ if (i == BFI_RIT_SEG_TOTAL_POOLS)
+ return 0;
+
+ if (list_empty(&rit_mod->rit_seg_pool[i]))
+ return 0;
+
+ return 1;
+}
+
+struct bna_rit_segment *
+bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size)
+{
+ struct bna_rit_segment *seg;
+ struct list_head *qe;
+ int i;
+
+ /* Select the pool for seg_size */
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
+ break;
+ }
+
+ if (i == BFI_RIT_SEG_TOTAL_POOLS)
+ return NULL;
+
+ if (list_empty(&rit_mod->rit_seg_pool[i]))
+ return NULL;
+
+ bfa_q_deq(&rit_mod->rit_seg_pool[i], &qe);
+ seg = (struct bna_rit_segment *)qe;
+ bfa_q_qe_init(&seg->qe);
+ seg->rit_size = seg_size;
+
+ return seg;
+}
+
+void
+bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
+ struct bna_rit_segment *seg)
+{
+ int i;
+
+ /* Select the pool for seg->max_rit_size */
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ if (seg->max_rit_size == ritseg_pool_cfg[i].pool_entry_size)
+ break;
+ }
+
+ seg->rit_size = 0;
+ list_add_tail(&seg->qe, &rit_mod->rit_seg_pool[i]);
+}
diff --git a/drivers/net/bna/bna_hw.h b/drivers/net/bna/bna_hw.h
new file mode 100644
index 00000000000..806b224a4c6
--- /dev/null
+++ b/drivers/net/bna/bna_hw.h
@@ -0,0 +1,1490 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * File for interrupt macros and functions
+ */
+
+#ifndef __BNA_HW_H__
+#define __BNA_HW_H__
+
+#include "bfi_ctreg.h"
+
+/**
+ *
+ * SW imposed limits
+ *
+ */
+
+#ifndef BNA_BIOS_BUILD
+
+#define BFI_MAX_TXQ 64
+#define BFI_MAX_RXQ 64
+#define BFI_MAX_RXF 64
+#define BFI_MAX_IB 128
+#define BFI_MAX_RIT_SIZE 256
+#define BFI_RSS_RIT_SIZE 64
+#define BFI_NONRSS_RIT_SIZE 1
+#define BFI_MAX_UCMAC 256
+#define BFI_MAX_MCMAC 512
+#define BFI_IBIDX_SIZE 4
+#define BFI_MAX_VLAN 4095
+
+/**
+ * There are 2 free IB index pools:
+ * pool1: 120 segments of 1 index each
+ * pool8: 1 segment of 8 indexes
+ */
+#define BFI_IBIDX_POOL1_SIZE 116
+#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
+#define BFI_IBIDX_POOL2_SIZE 2
+#define BFI_IBIDX_POOL2_ENTRY_SIZE 2
+#define BFI_IBIDX_POOL8_SIZE 1
+#define BFI_IBIDX_POOL8_ENTRY_SIZE 8
+#define BFI_IBIDX_TOTAL_POOLS 3
+#define BFI_IBIDX_TOTAL_SEGS 119 /* (POOL1 + POOL2 + POOL8)_SIZE */
+#define BFI_IBIDX_MAX_SEGSIZE 8
+#define init_ibidx_pool(name) \
+static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
+{ \
+ { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE }, \
+ { BFI_IBIDX_POOL2_SIZE, BFI_IBIDX_POOL2_ENTRY_SIZE }, \
+ { BFI_IBIDX_POOL8_SIZE, BFI_IBIDX_POOL8_ENTRY_SIZE } \
+}
+
+/**
+ * There are 2 free RIT segment pools:
+ * Pool1: 192 segments of 1 RIT entry each
+ * Pool2: 1 segment of 64 RIT entry
+ */
+#define BFI_RIT_SEG_POOL1_SIZE 192
+#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
+#define BFI_RIT_SEG_POOLRSS_SIZE 1
+#define BFI_RIT_SEG_POOLRSS_ENTRY_SIZE 64
+#define BFI_RIT_SEG_TOTAL_POOLS 2
+#define BFI_RIT_TOTAL_SEGS 193 /* POOL1_SIZE + POOLRSS_SIZE */
+#define init_ritseg_pool(name) \
+static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
+{ \
+ { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE }, \
+ { BFI_RIT_SEG_POOLRSS_SIZE, BFI_RIT_SEG_POOLRSS_ENTRY_SIZE } \
+}
+
+#else /* BNA_BIOS_BUILD */
+
+#define BFI_MAX_TXQ 1
+#define BFI_MAX_RXQ 1
+#define BFI_MAX_RXF 1
+#define BFI_MAX_IB 2
+#define BFI_MAX_RIT_SIZE 2
+#define BFI_RSS_RIT_SIZE 64
+#define BFI_NONRSS_RIT_SIZE 1
+#define BFI_MAX_UCMAC 1
+#define BFI_MAX_MCMAC 8
+#define BFI_IBIDX_SIZE 4
+#define BFI_MAX_VLAN 4095
+/* There is one free pool: 2 segments of 1 index each */
+#define BFI_IBIDX_POOL1_SIZE 2
+#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
+#define BFI_IBIDX_TOTAL_POOLS 1
+#define BFI_IBIDX_TOTAL_SEGS 2 /* POOL1_SIZE */
+#define BFI_IBIDX_MAX_SEGSIZE 1
+#define init_ibidx_pool(name) \
+static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
+{ \
+ { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE } \
+}
+
+#define BFI_RIT_SEG_POOL1_SIZE 1
+#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
+#define BFI_RIT_SEG_TOTAL_POOLS 1
+#define BFI_RIT_TOTAL_SEGS 1 /* POOL1_SIZE */
+#define init_ritseg_pool(name) \
+static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
+{ \
+ { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE } \
+}
+
+#endif /* BNA_BIOS_BUILD */
+
+#define BFI_RSS_HASH_KEY_LEN 10
+
+#define BFI_COALESCING_TIMER_UNIT 5 /* 5us */
+#define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */
+#define BFI_MAX_INTERPKT_COUNT 0xFF
+#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */
+#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */
+#define BFI_TX_INTERPKT_COUNT 32
+#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */
+#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */
+#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */
+
+#define BFI_TXQ_WI_SIZE 64 /* bytes */
+#define BFI_RXQ_WI_SIZE 8 /* bytes */
+#define BFI_CQ_WI_SIZE 16 /* bytes */
+#define BFI_TX_MAX_WRR_QUOTA 0xFFF
+
+#define BFI_TX_MAX_VECTORS_PER_WI 4
+#define BFI_TX_MAX_VECTORS_PER_PKT 0xFF
+#define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF
+#define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF
+
+/* Small Q buffer size */
+#define BFI_SMALL_RXBUF_SIZE 128
+
+/* Defined separately since BFA_FLASH_DMA_BUF_SZ is in bfa_flash.c */
+#define BFI_FLASH_DMA_BUF_SZ 0x010000 /* 64K DMA */
+#define BFI_HW_STATS_SIZE 0x4000 /* 16K DMA */
+
+/**
+ *
+ * HW register offsets, macros
+ *
+ */
+
+/* DMA Block Register Host Window Start Address */
+#define DMA_BLK_REG_ADDR 0x00013000
+
+/* DMA Block Internal Registers */
+#define DMA_CTRL_REG0 (DMA_BLK_REG_ADDR + 0x000)
+#define DMA_CTRL_REG1 (DMA_BLK_REG_ADDR + 0x004)
+#define DMA_ERR_INT_STATUS (DMA_BLK_REG_ADDR + 0x008)
+#define DMA_ERR_INT_ENABLE (DMA_BLK_REG_ADDR + 0x00c)
+#define DMA_ERR_INT_STATUS_SET (DMA_BLK_REG_ADDR + 0x010)
+
+/* APP Block Register Address Offset from BAR0 */
+#define APP_BLK_REG_ADDR 0x00014000
+
+/* Host Function Interrupt Mask Registers */
+#define HOSTFN0_INT_MASK (APP_BLK_REG_ADDR + 0x004)
+#define HOSTFN1_INT_MASK (APP_BLK_REG_ADDR + 0x104)
+#define HOSTFN2_INT_MASK (APP_BLK_REG_ADDR + 0x304)
+#define HOSTFN3_INT_MASK (APP_BLK_REG_ADDR + 0x404)
+
+/**
+ * Host Function PCIe Error Registers
+ * Duplicates "Correctable" & "Uncorrectable"
+ * registers in PCIe Config space.
+ */
+#define FN0_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x014)
+#define FN1_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x114)
+#define FN2_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x314)
+#define FN3_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x414)
+
+/* Host Function Error Type Status Registers */
+#define FN0_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x018)
+#define FN1_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x118)
+#define FN2_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x318)
+#define FN3_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x418)
+
+/* Host Function Error Type Mask Registers */
+#define FN0_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x01c)
+#define FN1_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x11c)
+#define FN2_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x31c)
+#define FN3_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x41c)
+
+/* Catapult Host Semaphore Status Registers (App block) */
+#define HOST_SEM_STS0_REG (APP_BLK_REG_ADDR + 0x630)
+#define HOST_SEM_STS1_REG (APP_BLK_REG_ADDR + 0x634)
+#define HOST_SEM_STS2_REG (APP_BLK_REG_ADDR + 0x638)
+#define HOST_SEM_STS3_REG (APP_BLK_REG_ADDR + 0x63c)
+#define HOST_SEM_STS4_REG (APP_BLK_REG_ADDR + 0x640)
+#define HOST_SEM_STS5_REG (APP_BLK_REG_ADDR + 0x644)
+#define HOST_SEM_STS6_REG (APP_BLK_REG_ADDR + 0x648)
+#define HOST_SEM_STS7_REG (APP_BLK_REG_ADDR + 0x64c)
+
+/* PCIe Misc Register */
+#define PCIE_MISC_REG (APP_BLK_REG_ADDR + 0x200)
+
+/* Temp Sensor Control Registers */
+#define TEMPSENSE_CNTL_REG (APP_BLK_REG_ADDR + 0x250)
+#define TEMPSENSE_STAT_REG (APP_BLK_REG_ADDR + 0x254)
+
+/* APP Block local error registers */
+#define APP_LOCAL_ERR_STAT (APP_BLK_REG_ADDR + 0x258)
+#define APP_LOCAL_ERR_MSK (APP_BLK_REG_ADDR + 0x25c)
+
+/* PCIe Link Error registers */
+#define PCIE_LNK_ERR_STAT (APP_BLK_REG_ADDR + 0x260)
+#define PCIE_LNK_ERR_MSK (APP_BLK_REG_ADDR + 0x264)
+
+/**
+ * FCoE/FIP Ethertype Register
+ * 31:16 -- Chip wide value for FIP type
+ * 15:0 -- Chip wide value for FCoE type
+ */
+#define FCOE_FIP_ETH_TYPE (APP_BLK_REG_ADDR + 0x280)
+
+/**
+ * Reserved Ethertype Register
+ * 31:16 -- Reserved
+ * 15:0 -- Other ethertype
+ */
+#define RESV_ETH_TYPE (APP_BLK_REG_ADDR + 0x284)
+
+/**
+ * Host Command Status Registers
+ * Each set consists of 3 registers :
+ * clear, set, cmd
+ * 16 such register sets in all
+ * See catapult_spec.pdf for detailed functionality
+ * Put each type in a single macro accessed by _num ?
+ */
+#define HOST_CMDSTS0_CLR_REG (APP_BLK_REG_ADDR + 0x500)
+#define HOST_CMDSTS0_SET_REG (APP_BLK_REG_ADDR + 0x504)
+#define HOST_CMDSTS0_REG (APP_BLK_REG_ADDR + 0x508)
+#define HOST_CMDSTS1_CLR_REG (APP_BLK_REG_ADDR + 0x510)
+#define HOST_CMDSTS1_SET_REG (APP_BLK_REG_ADDR + 0x514)
+#define HOST_CMDSTS1_REG (APP_BLK_REG_ADDR + 0x518)
+#define HOST_CMDSTS2_CLR_REG (APP_BLK_REG_ADDR + 0x520)
+#define HOST_CMDSTS2_SET_REG (APP_BLK_REG_ADDR + 0x524)
+#define HOST_CMDSTS2_REG (APP_BLK_REG_ADDR + 0x528)
+#define HOST_CMDSTS3_CLR_REG (APP_BLK_REG_ADDR + 0x530)
+#define HOST_CMDSTS3_SET_REG (APP_BLK_REG_ADDR + 0x534)
+#define HOST_CMDSTS3_REG (APP_BLK_REG_ADDR + 0x538)
+#define HOST_CMDSTS4_CLR_REG (APP_BLK_REG_ADDR + 0x540)
+#define HOST_CMDSTS4_SET_REG (APP_BLK_REG_ADDR + 0x544)
+#define HOST_CMDSTS4_REG (APP_BLK_REG_ADDR + 0x548)
+#define HOST_CMDSTS5_CLR_REG (APP_BLK_REG_ADDR + 0x550)
+#define HOST_CMDSTS5_SET_REG (APP_BLK_REG_ADDR + 0x554)
+#define HOST_CMDSTS5_REG (APP_BLK_REG_ADDR + 0x558)
+#define HOST_CMDSTS6_CLR_REG (APP_BLK_REG_ADDR + 0x560)
+#define HOST_CMDSTS6_SET_REG (APP_BLK_REG_ADDR + 0x564)
+#define HOST_CMDSTS6_REG (APP_BLK_REG_ADDR + 0x568)
+#define HOST_CMDSTS7_CLR_REG (APP_BLK_REG_ADDR + 0x570)
+#define HOST_CMDSTS7_SET_REG (APP_BLK_REG_ADDR + 0x574)
+#define HOST_CMDSTS7_REG (APP_BLK_REG_ADDR + 0x578)
+#define HOST_CMDSTS8_CLR_REG (APP_BLK_REG_ADDR + 0x580)
+#define HOST_CMDSTS8_SET_REG (APP_BLK_REG_ADDR + 0x584)
+#define HOST_CMDSTS8_REG (APP_BLK_REG_ADDR + 0x588)
+#define HOST_CMDSTS9_CLR_REG (APP_BLK_REG_ADDR + 0x590)
+#define HOST_CMDSTS9_SET_REG (APP_BLK_REG_ADDR + 0x594)
+#define HOST_CMDSTS9_REG (APP_BLK_REG_ADDR + 0x598)
+#define HOST_CMDSTS10_CLR_REG (APP_BLK_REG_ADDR + 0x5A0)
+#define HOST_CMDSTS10_SET_REG (APP_BLK_REG_ADDR + 0x5A4)
+#define HOST_CMDSTS10_REG (APP_BLK_REG_ADDR + 0x5A8)
+#define HOST_CMDSTS11_CLR_REG (APP_BLK_REG_ADDR + 0x5B0)
+#define HOST_CMDSTS11_SET_REG (APP_BLK_REG_ADDR + 0x5B4)
+#define HOST_CMDSTS11_REG (APP_BLK_REG_ADDR + 0x5B8)
+#define HOST_CMDSTS12_CLR_REG (APP_BLK_REG_ADDR + 0x5C0)
+#define HOST_CMDSTS12_SET_REG (APP_BLK_REG_ADDR + 0x5C4)
+#define HOST_CMDSTS12_REG (APP_BLK_REG_ADDR + 0x5C8)
+#define HOST_CMDSTS13_CLR_REG (APP_BLK_REG_ADDR + 0x5D0)
+#define HOST_CMDSTS13_SET_REG (APP_BLK_REG_ADDR + 0x5D4)
+#define HOST_CMDSTS13_REG (APP_BLK_REG_ADDR + 0x5D8)
+#define HOST_CMDSTS14_CLR_REG (APP_BLK_REG_ADDR + 0x5E0)
+#define HOST_CMDSTS14_SET_REG (APP_BLK_REG_ADDR + 0x5E4)
+#define HOST_CMDSTS14_REG (APP_BLK_REG_ADDR + 0x5E8)
+#define HOST_CMDSTS15_CLR_REG (APP_BLK_REG_ADDR + 0x5F0)
+#define HOST_CMDSTS15_SET_REG (APP_BLK_REG_ADDR + 0x5F4)
+#define HOST_CMDSTS15_REG (APP_BLK_REG_ADDR + 0x5F8)
+
+/**
+ * LPU0 Block Register Address Offset from BAR0
+ * Range 0x18000 - 0x18033
+ */
+#define LPU0_BLK_REG_ADDR 0x00018000
+
+/**
+ * LPU0 Registers
+ * Should they be directly used from host,
+ * except for diagnostics ?
+ * CTL_REG : Control register
+ * CMD_REG : Triggers exec. of cmd. in
+ * Mailbox memory
+ */
+#define LPU0_MBOX_CTL_REG (LPU0_BLK_REG_ADDR + 0x000)
+#define LPU0_MBOX_CMD_REG (LPU0_BLK_REG_ADDR + 0x004)
+#define LPU0_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x008)
+#define LPU1_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x00c)
+#define LPU0_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x010)
+#define LPU1_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x014)
+#define LPU0_ERR_STATUS_REG (LPU0_BLK_REG_ADDR + 0x018)
+#define LPU0_ERR_SET_REG (LPU0_BLK_REG_ADDR + 0x020)
+
+/**
+ * LPU1 Block Register Address Offset from BAR0
+ * Range 0x18400 - 0x18433
+ */
+#define LPU1_BLK_REG_ADDR 0x00018400
+
+/**
+ * LPU1 Registers
+ * Same as LPU0 registers above
+ */
+#define LPU1_MBOX_CTL_REG (LPU1_BLK_REG_ADDR + 0x000)
+#define LPU1_MBOX_CMD_REG (LPU1_BLK_REG_ADDR + 0x004)
+#define LPU0_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x008)
+#define LPU1_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x00c)
+#define LPU0_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x010)
+#define LPU1_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x014)
+#define LPU1_ERR_STATUS_REG (LPU1_BLK_REG_ADDR + 0x018)
+#define LPU1_ERR_SET_REG (LPU1_BLK_REG_ADDR + 0x020)
+
+/**
+ * PSS Block Register Address Offset from BAR0
+ * Range 0x18800 - 0x188DB
+ */
+#define PSS_BLK_REG_ADDR 0x00018800
+
+/**
+ * PSS Registers
+ * For details, see catapult_spec.pdf
+ * ERR_STATUS_REG : Indicates error in PSS module
+ * RAM_ERR_STATUS_REG : Indicates RAM module that detected error
+ */
+#define ERR_STATUS_SET (PSS_BLK_REG_ADDR + 0x018)
+#define PSS_RAM_ERR_STATUS_REG (PSS_BLK_REG_ADDR + 0x01C)
+
+/**
+ * PSS Semaphore Lock Registers, total 16
+ * First read when unlocked returns 0,
+ * and is set to 1, atomically.
+ * Subsequent reads returns 1.
+ * To clear set the value to 0.
+ * Range : 0x20 to 0x5c
+ */
+#define PSS_SEM_LOCK_REG(_num) \
+ (PSS_BLK_REG_ADDR + 0x020 + ((_num) << 2))
+
+/**
+ * PSS Semaphore Status Registers,
+ * corresponding to the lock registers above
+ */
+#define PSS_SEM_STATUS_REG(_num) \
+ (PSS_BLK_REG_ADDR + 0x060 + ((_num) << 2))
+
+/**
+ * Catapult CPQ Registers
+ * Defines for Mailbox Registers
+ * Used to send mailbox commands to firmware from
+ * host. The data part is written to the MBox
+ * memory, registers are used to indicate that
+ * a commnad is resident in memory.
+ *
+ * Note : LPU0<->LPU1 mailboxes are not listed here
+ */
+#define CPQ_BLK_REG_ADDR 0x00019000
+
+#define HOSTFN0_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x130)
+#define HOSTFN0_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x134)
+#define LPU0_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x138)
+#define LPU1_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x13C)
+
+#define HOSTFN1_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x140)
+#define HOSTFN1_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x144)
+#define LPU0_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x148)
+#define LPU1_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x14C)
+
+#define HOSTFN2_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x170)
+#define HOSTFN2_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x174)
+#define LPU0_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x178)
+#define LPU1_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x17C)
+
+#define HOSTFN3_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x180)
+#define HOSTFN3_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x184)
+#define LPU0_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x188)
+#define LPU1_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x18C)
+
+/* Host Function Force Parity Error Registers */
+#define HOSTFN0_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x120)
+#define HOSTFN1_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x124)
+#define HOSTFN2_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x128)
+#define HOSTFN3_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x12C)
+
+/* LL Port[0|1] Halt Mask Registers */
+#define LL_HALT_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1A0)
+#define LL_HALT_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1B0)
+
+/* LL Port[0|1] Error Mask Registers */
+#define LL_ERR_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1D0)
+#define LL_ERR_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1D4)
+
+/* EMC FLI (Flash Controller) Block Register Address Offset from BAR0 */
+#define FLI_BLK_REG_ADDR 0x0001D000
+
+/* EMC FLI Registers */
+#define FLI_CMD_REG (FLI_BLK_REG_ADDR + 0x000)
+#define FLI_ADDR_REG (FLI_BLK_REG_ADDR + 0x004)
+#define FLI_CTL_REG (FLI_BLK_REG_ADDR + 0x008)
+#define FLI_WRDATA_REG (FLI_BLK_REG_ADDR + 0x00C)
+#define FLI_RDDATA_REG (FLI_BLK_REG_ADDR + 0x010)
+#define FLI_DEV_STATUS_REG (FLI_BLK_REG_ADDR + 0x014)
+#define FLI_SIG_WD_REG (FLI_BLK_REG_ADDR + 0x018)
+
+/**
+ * RO register
+ * 31:16 -- Vendor Id
+ * 15:0 -- Device Id
+ */
+#define FLI_DEV_VENDOR_REG (FLI_BLK_REG_ADDR + 0x01C)
+#define FLI_ERR_STATUS_REG (FLI_BLK_REG_ADDR + 0x020)
+
+/**
+ * RAD (RxAdm) Block Register Address Offset from BAR0
+ * RAD0 Range : 0x20000 - 0x203FF
+ * RAD1 Range : 0x20400 - 0x207FF
+ */
+#define RAD0_BLK_REG_ADDR 0x00020000
+#define RAD1_BLK_REG_ADDR 0x00020400
+
+/* RAD0 Registers */
+#define RAD0_CTL_REG (RAD0_BLK_REG_ADDR + 0x000)
+#define RAD0_PE_PARM_REG (RAD0_BLK_REG_ADDR + 0x004)
+#define RAD0_BCN_REG (RAD0_BLK_REG_ADDR + 0x008)
+
+/* Default function ID register */
+#define RAD0_DEFAULT_REG (RAD0_BLK_REG_ADDR + 0x00C)
+
+/* Default promiscuous ID register */
+#define RAD0_PROMISC_REG (RAD0_BLK_REG_ADDR + 0x010)
+
+#define RAD0_BCNQ_REG (RAD0_BLK_REG_ADDR + 0x014)
+
+/*
+ * This register selects 1 of 8 PM Q's using
+ * VLAN pri, for non-BCN packets without a VLAN tag
+ */
+#define RAD0_DEFAULTQ_REG (RAD0_BLK_REG_ADDR + 0x018)
+
+#define RAD0_ERR_STS (RAD0_BLK_REG_ADDR + 0x01C)
+#define RAD0_SET_ERR_STS (RAD0_BLK_REG_ADDR + 0x020)
+#define RAD0_ERR_INT_EN (RAD0_BLK_REG_ADDR + 0x024)
+#define RAD0_FIRST_ERR (RAD0_BLK_REG_ADDR + 0x028)
+#define RAD0_FORCE_ERR (RAD0_BLK_REG_ADDR + 0x02C)
+
+#define RAD0_IF_RCVD (RAD0_BLK_REG_ADDR + 0x030)
+#define RAD0_IF_RCVD_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x034)
+#define RAD0_IF_RCVD_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x038)
+#define RAD0_IF_RCVD_VLAN (RAD0_BLK_REG_ADDR + 0x03C)
+#define RAD0_IF_RCVD_UCAST (RAD0_BLK_REG_ADDR + 0x040)
+#define RAD0_IF_RCVD_UCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x044)
+#define RAD0_IF_RCVD_UCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x048)
+#define RAD0_IF_RCVD_UCAST_VLAN (RAD0_BLK_REG_ADDR + 0x04C)
+#define RAD0_IF_RCVD_MCAST (RAD0_BLK_REG_ADDR + 0x050)
+#define RAD0_IF_RCVD_MCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x054)
+#define RAD0_IF_RCVD_MCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x058)
+#define RAD0_IF_RCVD_MCAST_VLAN (RAD0_BLK_REG_ADDR + 0x05C)
+#define RAD0_IF_RCVD_BCAST (RAD0_BLK_REG_ADDR + 0x060)
+#define RAD0_IF_RCVD_BCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x064)
+#define RAD0_IF_RCVD_BCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x068)
+#define RAD0_IF_RCVD_BCAST_VLAN (RAD0_BLK_REG_ADDR + 0x06C)
+#define RAD0_DROPPED_FRAMES (RAD0_BLK_REG_ADDR + 0x070)
+
+#define RAD0_MAC_MAN_1H (RAD0_BLK_REG_ADDR + 0x080)
+#define RAD0_MAC_MAN_1L (RAD0_BLK_REG_ADDR + 0x084)
+#define RAD0_MAC_MAN_2H (RAD0_BLK_REG_ADDR + 0x088)
+#define RAD0_MAC_MAN_2L (RAD0_BLK_REG_ADDR + 0x08C)
+#define RAD0_MAC_MAN_3H (RAD0_BLK_REG_ADDR + 0x090)
+#define RAD0_MAC_MAN_3L (RAD0_BLK_REG_ADDR + 0x094)
+#define RAD0_MAC_MAN_4H (RAD0_BLK_REG_ADDR + 0x098)
+#define RAD0_MAC_MAN_4L (RAD0_BLK_REG_ADDR + 0x09C)
+
+#define RAD0_LAST4_IP (RAD0_BLK_REG_ADDR + 0x100)
+
+/* RAD1 Registers */
+#define RAD1_CTL_REG (RAD1_BLK_REG_ADDR + 0x000)
+#define RAD1_PE_PARM_REG (RAD1_BLK_REG_ADDR + 0x004)
+#define RAD1_BCN_REG (RAD1_BLK_REG_ADDR + 0x008)
+
+/* Default function ID register */
+#define RAD1_DEFAULT_REG (RAD1_BLK_REG_ADDR + 0x00C)
+
+/* Promiscuous function ID register */
+#define RAD1_PROMISC_REG (RAD1_BLK_REG_ADDR + 0x010)
+
+#define RAD1_BCNQ_REG (RAD1_BLK_REG_ADDR + 0x014)
+
+/*
+ * This register selects 1 of 8 PM Q's using
+ * VLAN pri, for non-BCN packets without a VLAN tag
+ */
+#define RAD1_DEFAULTQ_REG (RAD1_BLK_REG_ADDR + 0x018)
+
+#define RAD1_ERR_STS (RAD1_BLK_REG_ADDR + 0x01C)
+#define RAD1_SET_ERR_STS (RAD1_BLK_REG_ADDR + 0x020)
+#define RAD1_ERR_INT_EN (RAD1_BLK_REG_ADDR + 0x024)
+
+/**
+ * TXA Block Register Address Offset from BAR0
+ * TXA0 Range : 0x21000 - 0x213FF
+ * TXA1 Range : 0x21400 - 0x217FF
+ */
+#define TXA0_BLK_REG_ADDR 0x00021000
+#define TXA1_BLK_REG_ADDR 0x00021400
+
+/* TXA Registers */
+#define TXA0_CTRL_REG (TXA0_BLK_REG_ADDR + 0x000)
+#define TXA1_CTRL_REG (TXA1_BLK_REG_ADDR + 0x000)
+
+/**
+ * TSO Sequence # Registers (RO)
+ * Total 8 (for 8 queues)
+ * Holds the last seq.# for TSO frames
+ * See catapult_spec.pdf for more details
+ */
+#define TXA0_TSO_TCP_SEQ_REG(_num) \
+ (TXA0_BLK_REG_ADDR + 0x020 + ((_num) << 2))
+
+#define TXA1_TSO_TCP_SEQ_REG(_num) \
+ (TXA1_BLK_REG_ADDR + 0x020 + ((_num) << 2))
+
+/**
+ * TSO IP ID # Registers (RO)
+ * Total 8 (for 8 queues)
+ * Holds the last IP ID for TSO frames
+ * See catapult_spec.pdf for more details
+ */
+#define TXA0_TSO_IP_INFO_REG(_num) \
+ (TXA0_BLK_REG_ADDR + 0x040 + ((_num) << 2))
+
+#define TXA1_TSO_IP_INFO_REG(_num) \
+ (TXA1_BLK_REG_ADDR + 0x040 + ((_num) << 2))
+
+/**
+ * RXA Block Register Address Offset from BAR0
+ * RXA0 Range : 0x21800 - 0x21BFF
+ * RXA1 Range : 0x21C00 - 0x21FFF
+ */
+#define RXA0_BLK_REG_ADDR 0x00021800
+#define RXA1_BLK_REG_ADDR 0x00021C00
+
+/* RXA Registers */
+#define RXA0_CTL_REG (RXA0_BLK_REG_ADDR + 0x040)
+#define RXA1_CTL_REG (RXA1_BLK_REG_ADDR + 0x040)
+
+/**
+ * PPLB Block Register Address Offset from BAR0
+ * PPLB0 Range : 0x22000 - 0x223FF
+ * PPLB1 Range : 0x22400 - 0x227FF
+ */
+#define PLB0_BLK_REG_ADDR 0x00022000
+#define PLB1_BLK_REG_ADDR 0x00022400
+
+/**
+ * PLB Registers
+ * Holds RL timer used time stamps in RLT tagged frames
+ */
+#define PLB0_ECM_TIMER_REG (PLB0_BLK_REG_ADDR + 0x05C)
+#define PLB1_ECM_TIMER_REG (PLB1_BLK_REG_ADDR + 0x05C)
+
+/* Controls the rate-limiter on each of the priority class */
+#define PLB0_RL_CTL (PLB0_BLK_REG_ADDR + 0x060)
+#define PLB1_RL_CTL (PLB1_BLK_REG_ADDR + 0x060)
+
+/**
+ * Max byte register, total 8, 0-7
+ * see catapult_spec.pdf for details
+ */
+#define PLB0_RL_MAX_BC(_num) \
+ (PLB0_BLK_REG_ADDR + 0x064 + ((_num) << 2))
+#define PLB1_RL_MAX_BC(_num) \
+ (PLB1_BLK_REG_ADDR + 0x064 + ((_num) << 2))
+
+/**
+ * RL Time Unit Register for priority 0-7
+ * 4 bits per priority
+ * (2^rl_unit)*1us is the actual time period
+ */
+#define PLB0_RL_TU_PRIO (PLB0_BLK_REG_ADDR + 0x084)
+#define PLB1_RL_TU_PRIO (PLB1_BLK_REG_ADDR + 0x084)
+
+/**
+ * RL byte count register,
+ * bytes transmitted in (rl_unit*1)us time period
+ * 1 per priority, 8 in all, 0-7.
+ */
+#define PLB0_RL_BYTE_CNT(_num) \
+ (PLB0_BLK_REG_ADDR + 0x088 + ((_num) << 2))
+#define PLB1_RL_BYTE_CNT(_num) \
+ (PLB1_BLK_REG_ADDR + 0x088 + ((_num) << 2))
+
+/**
+ * RL Min factor register
+ * 2 bits per priority,
+ * 4 factors possible: 1, 0.5, 0.25, 0
+ * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
+ */
+#define PLB0_RL_MIN_REG (PLB0_BLK_REG_ADDR + 0x0A8)
+#define PLB1_RL_MIN_REG (PLB1_BLK_REG_ADDR + 0x0A8)
+
+/**
+ * RL Max factor register
+ * 2 bits per priority,
+ * 4 factors possible: 1, 0.5, 0.25, 0
+ * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
+ */
+#define PLB0_RL_MAX_REG (PLB0_BLK_REG_ADDR + 0x0AC)
+#define PLB1_RL_MAX_REG (PLB1_BLK_REG_ADDR + 0x0AC)
+
+/* MAC SERDES Address Paging register */
+#define PLB0_EMS_ADD_REG (PLB0_BLK_REG_ADDR + 0xD0)
+#define PLB1_EMS_ADD_REG (PLB1_BLK_REG_ADDR + 0xD0)
+
+/* LL EMS Registers */
+#define LL_EMS0_BLK_REG_ADDR 0x00026800
+#define LL_EMS1_BLK_REG_ADDR 0x00026C00
+
+/**
+ * BPC Block Register Address Offset from BAR0
+ * BPC0 Range : 0x23000 - 0x233FF
+ * BPC1 Range : 0x23400 - 0x237FF
+ */
+#define BPC0_BLK_REG_ADDR 0x00023000
+#define BPC1_BLK_REG_ADDR 0x00023400
+
+/**
+ * PMM Block Register Address Offset from BAR0
+ * PMM0 Range : 0x23800 - 0x23BFF
+ * PMM1 Range : 0x23C00 - 0x23FFF
+ */
+#define PMM0_BLK_REG_ADDR 0x00023800
+#define PMM1_BLK_REG_ADDR 0x00023C00
+
+/**
+ * HQM Block Register Address Offset from BAR0
+ * HQM0 Range : 0x24000 - 0x243FF
+ * HQM1 Range : 0x24400 - 0x247FF
+ */
+#define HQM0_BLK_REG_ADDR 0x00024000
+#define HQM1_BLK_REG_ADDR 0x00024400
+
+/**
+ * HQM Control Register
+ * Controls some aspects of IB
+ * See catapult_spec.pdf for details
+ */
+#define HQM0_CTL_REG (HQM0_BLK_REG_ADDR + 0x000)
+#define HQM1_CTL_REG (HQM1_BLK_REG_ADDR + 0x000)
+
+/**
+ * HQM Stop Q Semaphore Registers.
+ * Only one Queue resource can be stopped at
+ * any given time. This register controls access
+ * to the single stop Q resource.
+ * See catapult_spec.pdf for details
+ */
+#define HQM0_RXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x028)
+#define HQM0_TXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x02C)
+#define HQM1_RXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x028)
+#define HQM1_TXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x02C)
+
+/**
+ * LUT Block Register Address Offset from BAR0
+ * LUT0 Range : 0x25800 - 0x25BFF
+ * LUT1 Range : 0x25C00 - 0x25FFF
+ */
+#define LUT0_BLK_REG_ADDR 0x00025800
+#define LUT1_BLK_REG_ADDR 0x00025C00
+
+/**
+ * LUT Registers
+ * See catapult_spec.pdf for details
+ */
+#define LUT0_ERR_STS (LUT0_BLK_REG_ADDR + 0x000)
+#define LUT1_ERR_STS (LUT1_BLK_REG_ADDR + 0x000)
+#define LUT0_SET_ERR_STS (LUT0_BLK_REG_ADDR + 0x004)
+#define LUT1_SET_ERR_STS (LUT1_BLK_REG_ADDR + 0x004)
+
+/**
+ * TRC (Debug/Trace) Register Offset from BAR0
+ * Range : 0x26000 -- 0x263FFF
+ */
+#define TRC_BLK_REG_ADDR 0x00026000
+
+/**
+ * TRC Registers
+ * See catapult_spec.pdf for details of each
+ */
+#define TRC_CTL_REG (TRC_BLK_REG_ADDR + 0x000)
+#define TRC_MODS_REG (TRC_BLK_REG_ADDR + 0x004)
+#define TRC_TRGC_REG (TRC_BLK_REG_ADDR + 0x008)
+#define TRC_CNT1_REG (TRC_BLK_REG_ADDR + 0x010)
+#define TRC_CNT2_REG (TRC_BLK_REG_ADDR + 0x014)
+#define TRC_NXTS_REG (TRC_BLK_REG_ADDR + 0x018)
+#define TRC_DIRR_REG (TRC_BLK_REG_ADDR + 0x01C)
+
+/**
+ * TRC Trigger match filters, total 10
+ * Determines the trigger condition
+ */
+#define TRC_TRGM_REG(_num) \
+ (TRC_BLK_REG_ADDR + 0x040 + ((_num) << 2))
+
+/**
+ * TRC Next State filters, total 10
+ * Determines the next state conditions
+ */
+#define TRC_NXTM_REG(_num) \
+ (TRC_BLK_REG_ADDR + 0x080 + ((_num) << 2))
+
+/**
+ * TRC Store Match filters, total 10
+ * Determines the store conditions
+ */
+#define TRC_STRM_REG(_num) \
+ (TRC_BLK_REG_ADDR + 0x0C0 + ((_num) << 2))
+
+/* DOORBELLS ACCESS */
+
+/**
+ * Catapult doorbells
+ * Each doorbell-queue set has
+ * 1 RxQ, 1 TxQ, 2 IBs in that order
+ * Size of each entry in 32 bytes, even though only 1 word
+ * is used. For Non-VM case each doorbell-q set is
+ * separated by 128 bytes, for VM case it is separated
+ * by 4K bytes
+ * Non VM case Range : 0x38000 - 0x39FFF
+ * VM case Range : 0x100000 - 0x11FFFF
+ * The range applies to both HQMs
+ */
+#define HQM_DOORBELL_BLK_BASE_ADDR 0x00038000
+#define HQM_DOORBELL_VM_BLK_BASE_ADDR 0x00100000
+
+/* MEMORY ACCESS */
+
+/**
+ * Catapult H/W Block Memory Access Address
+ * To the host a memory space of 32K (page) is visible
+ * at a time. The address range is from 0x08000 to 0x0FFFF
+ */
+#define HW_BLK_HOST_MEM_ADDR 0x08000
+
+/**
+ * Catapult LUT Memory Access Page Numbers
+ * Range : LUT0 0xa0-0xa1
+ * LUT1 0xa2-0xa3
+ */
+#define LUT0_MEM_BLK_BASE_PG_NUM 0x000000A0
+#define LUT1_MEM_BLK_BASE_PG_NUM 0x000000A2
+
+/**
+ * Catapult RxFn Database Memory Block Base Offset
+ *
+ * The Rx function database exists in LUT block.
+ * In PCIe space this is accessible as a 256x32
+ * bit block. Each entry in this database is 4
+ * (4 byte) words. Max. entries is 64.
+ * Address of an entry corresponding to a function
+ * = base_addr + (function_no. * 16)
+ */
+#define RX_FNDB_RAM_BASE_OFFSET 0x0000B400
+
+/**
+ * Catapult TxFn Database Memory Block Base Offset Address
+ *
+ * The Tx function database exists in LUT block.
+ * In PCIe space this is accessible as a 64x32
+ * bit block. Each entry in this database is 1
+ * (4 byte) word. Max. entries is 64.
+ * Address of an entry corresponding to a function
+ * = base_addr + (function_no. * 4)
+ */
+#define TX_FNDB_RAM_BASE_OFFSET 0x0000B800
+
+/**
+ * Catapult Unicast CAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Shared by both the LL & FCoE driver.
+ * Size is 256x48 bits; mapped to PCIe space
+ * 512x32 bit blocks. For each address, bits
+ * are written in the order : [47:32] and then
+ * [31:0].
+ */
+#define UCAST_CAM_BASE_OFFSET 0x0000A800
+
+/**
+ * Catapult Unicast RAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Shared by both the LL & FCoE driver.
+ * Size is 256x9 bits.
+ */
+#define UCAST_RAM_BASE_OFFSET 0x0000B000
+
+/**
+ * Catapult Mulicast CAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Shared by both the LL & FCoE driver.
+ * Size is 256x48 bits; mapped to PCIe space
+ * 512x32 bit blocks. For each address, bits
+ * are written in the order : [47:32] and then
+ * [31:0].
+ */
+#define MCAST_CAM_BASE_OFFSET 0x0000A000
+
+/**
+ * Catapult VLAN RAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Size is 4096x66 bits; mapped to PCIe space as
+ * 8192x32 bit blocks.
+ * All the 4K entries are within the address range
+ * 0x0000 to 0x8000, so in the first LUT page.
+ */
+#define VLAN_RAM_BASE_OFFSET 0x00000000
+
+/**
+ * Catapult Tx Stats RAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Size is 1024x33 bits;
+ * Each Tx function has 64 bytes of space
+ */
+#define TX_STATS_RAM_BASE_OFFSET 0x00009000
+
+/**
+ * Catapult Rx Stats RAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Size is 1024x33 bits;
+ * Each Rx function has 64 bytes of space
+ */
+#define RX_STATS_RAM_BASE_OFFSET 0x00008000
+
+/* Catapult RXA Memory Access Page Numbers */
+#define RXA0_MEM_BLK_BASE_PG_NUM 0x0000008C
+#define RXA1_MEM_BLK_BASE_PG_NUM 0x0000008D
+
+/**
+ * Catapult Multicast Vector Table Base Offset Address
+ *
+ * Exists in RxA memory space.
+ * Organized as 512x65 bit block.
+ * However for each entry 16 bytes allocated (power of 2)
+ * Total size 512*16 bytes.
+ * There are two logical divisions, 256 entries each :
+ * a) Entries 0x00 to 0xff (256) -- Approx. MVT
+ * Offset 0x000 to 0xFFF
+ * b) Entries 0x100 to 0x1ff (256) -- Exact MVT
+ * Offsets 0x1000 to 0x1FFF
+ */
+#define MCAST_APPROX_MVT_BASE_OFFSET 0x00000000
+#define MCAST_EXACT_MVT_BASE_OFFSET 0x00001000
+
+/**
+ * Catapult RxQ Translate Table (RIT) Base Offset Address
+ *
+ * Exists in RxA memory space
+ * Total no. of entries 64
+ * Each entry is 1 (4 byte) word.
+ * 31:12 -- Reserved
+ * 11:0 -- Two 6 bit RxQ Ids
+ */
+#define FUNCTION_TO_RXQ_TRANSLATE 0x00002000
+
+/* Catapult RxAdm (RAD) Memory Access Page Numbers */
+#define RAD0_MEM_BLK_BASE_PG_NUM 0x00000086
+#define RAD1_MEM_BLK_BASE_PG_NUM 0x00000087
+
+/**
+ * Catapult RSS Table Base Offset Address
+ *
+ * Exists in RAD memory space.
+ * Each entry is 352 bits, but alligned on
+ * 64 byte (512 bit) boundary. Accessed
+ * 4 byte words, the whole entry can be
+ * broken into 11 word accesses.
+ */
+#define RSS_TABLE_BASE_OFFSET 0x00000800
+
+/**
+ * Catapult CPQ Block Page Number
+ * This value is written to the page number registers
+ * to access the memory associated with the mailboxes.
+ */
+#define CPQ_BLK_PG_NUM 0x00000005
+
+/**
+ * Clarification :
+ * LL functions are 2 & 3; can HostFn0/HostFn1
+ * <-> LPU0/LPU1 memories be used ?
+ */
+/**
+ * Catapult HostFn0/HostFn1 to LPU0/LPU1 Mbox memory
+ * Per catapult_spec.pdf, the offset of the mbox
+ * memory is in the register space at an offset of 0x200
+ */
+#define CPQ_BLK_REG_MBOX_ADDR (CPQ_BLK_REG_ADDR + 0x200)
+
+#define HOSTFN_LPU_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x000)
+
+/* Catapult LPU0/LPU1 to HostFn0/HostFn1 Mbox memory */
+#define LPU_HOSTFN_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x080)
+
+/**
+ * Catapult HQM Block Page Number
+ * This is written to the page number register for
+ * the appropriate function to access the memory
+ * associated with HQM
+ */
+#define HQM0_BLK_PG_NUM 0x00000096
+#define HQM1_BLK_PG_NUM 0x00000097
+
+/**
+ * Note that TxQ and RxQ entries are interlaced
+ * the HQM memory, i.e RXQ0, TXQ0, RXQ1, TXQ1.. etc.
+ */
+
+#define HQM_RXTX_Q_RAM_BASE_OFFSET 0x00004000
+
+/**
+ * CQ Memory
+ * Exists in HQM Memory space
+ * Each entry is 16 (4 byte) words of which
+ * only 12 words are used for configuration
+ * Total 64 entries per HQM memory space
+ */
+#define HQM_CQ_RAM_BASE_OFFSET 0x00006000
+
+/**
+ * Interrupt Block (IB) Memory
+ * Exists in HQM Memory space
+ * Each entry is 8 (4 byte) words of which
+ * only 5 words are used for configuration
+ * Total 128 entries per HQM memory space
+ */
+#define HQM_IB_RAM_BASE_OFFSET 0x00001000
+
+/**
+ * Index Table (IT) Memory
+ * Exists in HQM Memory space
+ * Each entry is 1 (4 byte) word which
+ * is used for configuration
+ * Total 128 entries per HQM memory space
+ */
+#define HQM_INDX_TBL_RAM_BASE_OFFSET 0x00002000
+
+/**
+ * PSS Block Memory Page Number
+ * This is written to the appropriate page number
+ * register to access the CPU memory.
+ * Also known as the PSS secondary memory (SMEM).
+ * Range : 0x180 to 0x1CF
+ * See catapult_spec.pdf for details
+ */
+#define PSS_BLK_PG_NUM 0x00000180
+
+/**
+ * Offsets of different instances of PSS SMEM
+ * 2.5M of continuous 1T memory space : 2 blocks
+ * of 1M each (32 pages each, page=32KB) and 4 smaller
+ * blocks of 128K each (4 pages each, page=32KB)
+ * PSS_LMEM_INST0 is used for firmware download
+ */
+#define PSS_LMEM_INST0 0x00000000
+#define PSS_LMEM_INST1 0x00100000
+#define PSS_LMEM_INST2 0x00200000
+#define PSS_LMEM_INST3 0x00220000
+#define PSS_LMEM_INST4 0x00240000
+#define PSS_LMEM_INST5 0x00260000
+
+#define BNA_PCI_REG_CT_ADDRSZ (0x40000)
+
+#define BNA_GET_PAGE_NUM(_base_page, _offset) \
+ ((_base_page) + ((_offset) >> 15))
+
+#define BNA_GET_PAGE_OFFSET(_offset) \
+ ((_offset) & 0x7fff)
+
+#define BNA_GET_MEM_BASE_ADDR(_bar0, _base_offset) \
+ ((_bar0) + HW_BLK_HOST_MEM_ADDR \
+ + BNA_GET_PAGE_OFFSET((_base_offset)))
+
+#define BNA_GET_VLAN_MEM_ENTRY_ADDR(_bar0, _fn_id, _vlan_id)\
+ (_bar0 + (HW_BLK_HOST_MEM_ADDR) \
+ + (BNA_GET_PAGE_OFFSET(VLAN_RAM_BASE_OFFSET)) \
+ + (((_fn_id) & 0x3f) << 9) \
+ + (((_vlan_id) & 0xfe0) >> 3))
+
+/**
+ *
+ * Interrupt related bits, flags and macros
+ *
+ */
+
+#define __LPU02HOST_MBOX0_STATUS_BITS 0x00100000
+#define __LPU12HOST_MBOX0_STATUS_BITS 0x00200000
+#define __LPU02HOST_MBOX1_STATUS_BITS 0x00400000
+#define __LPU12HOST_MBOX1_STATUS_BITS 0x00800000
+
+#define __LPU02HOST_MBOX0_MASK_BITS 0x00100000
+#define __LPU12HOST_MBOX0_MASK_BITS 0x00200000
+#define __LPU02HOST_MBOX1_MASK_BITS 0x00400000
+#define __LPU12HOST_MBOX1_MASK_BITS 0x00800000
+
+#define __LPU2HOST_MBOX_MASK_BITS \
+ (__LPU02HOST_MBOX0_MASK_BITS | __LPU02HOST_MBOX1_MASK_BITS | \
+ __LPU12HOST_MBOX0_MASK_BITS | __LPU12HOST_MBOX1_MASK_BITS)
+
+#define __LPU2HOST_IB_STATUS_BITS 0x0000ffff
+
+#define BNA_IS_LPU0_MBOX_INTR(_intr_status) \
+ ((_intr_status) & (__LPU02HOST_MBOX0_STATUS_BITS | \
+ __LPU02HOST_MBOX1_STATUS_BITS))
+
+#define BNA_IS_LPU1_MBOX_INTR(_intr_status) \
+ ((_intr_status) & (__LPU12HOST_MBOX0_STATUS_BITS | \
+ __LPU12HOST_MBOX1_STATUS_BITS))
+
+#define BNA_IS_MBOX_INTR(_intr_status) \
+ ((_intr_status) & \
+ (__LPU02HOST_MBOX0_STATUS_BITS | \
+ __LPU02HOST_MBOX1_STATUS_BITS | \
+ __LPU12HOST_MBOX0_STATUS_BITS | \
+ __LPU12HOST_MBOX1_STATUS_BITS))
+
+#define __EMC_ERROR_STATUS_BITS 0x00010000
+#define __LPU0_ERROR_STATUS_BITS 0x00020000
+#define __LPU1_ERROR_STATUS_BITS 0x00040000
+#define __PSS_ERROR_STATUS_BITS 0x00080000
+
+#define __HALT_STATUS_BITS 0x01000000
+
+#define __EMC_ERROR_MASK_BITS 0x00010000
+#define __LPU0_ERROR_MASK_BITS 0x00020000
+#define __LPU1_ERROR_MASK_BITS 0x00040000
+#define __PSS_ERROR_MASK_BITS 0x00080000
+
+#define __HALT_MASK_BITS 0x01000000
+
+#define __ERROR_MASK_BITS \
+ (__EMC_ERROR_MASK_BITS | __LPU0_ERROR_MASK_BITS | \
+ __LPU1_ERROR_MASK_BITS | __PSS_ERROR_MASK_BITS | \
+ __HALT_MASK_BITS)
+
+#define BNA_IS_ERR_INTR(_intr_status) \
+ ((_intr_status) & \
+ (__EMC_ERROR_STATUS_BITS | \
+ __LPU0_ERROR_STATUS_BITS | \
+ __LPU1_ERROR_STATUS_BITS | \
+ __PSS_ERROR_STATUS_BITS | \
+ __HALT_STATUS_BITS))
+
+#define BNA_IS_MBOX_ERR_INTR(_intr_status) \
+ (BNA_IS_MBOX_INTR((_intr_status)) | \
+ BNA_IS_ERR_INTR((_intr_status)))
+
+#define BNA_IS_INTX_DATA_INTR(_intr_status) \
+ ((_intr_status) & __LPU2HOST_IB_STATUS_BITS)
+
+#define BNA_INTR_STATUS_MBOX_CLR(_intr_status) \
+do { \
+ (_intr_status) &= ~(__LPU02HOST_MBOX0_STATUS_BITS | \
+ __LPU02HOST_MBOX1_STATUS_BITS | \
+ __LPU12HOST_MBOX0_STATUS_BITS | \
+ __LPU12HOST_MBOX1_STATUS_BITS); \
+} while (0)
+
+#define BNA_INTR_STATUS_ERR_CLR(_intr_status) \
+do { \
+ (_intr_status) &= ~(__EMC_ERROR_STATUS_BITS | \
+ __LPU0_ERROR_STATUS_BITS | \
+ __LPU1_ERROR_STATUS_BITS | \
+ __PSS_ERROR_STATUS_BITS | \
+ __HALT_STATUS_BITS); \
+} while (0)
+
+#define bna_intx_disable(_bna, _cur_mask) \
+{ \
+ (_cur_mask) = readl((_bna)->regs.fn_int_mask);\
+ writel(0xffffffff, (_bna)->regs.fn_int_mask);\
+}
+
+#define bna_intx_enable(bna, new_mask) \
+ writel((new_mask), (bna)->regs.fn_int_mask)
+
+#define bna_mbox_intr_disable(bna) \
+ writel((readl((bna)->regs.fn_int_mask) | \
+ (__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
+ (bna)->regs.fn_int_mask)
+
+#define bna_mbox_intr_enable(bna) \
+ writel((readl((bna)->regs.fn_int_mask) & \
+ ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
+ (bna)->regs.fn_int_mask)
+
+#define bna_intr_status_get(_bna, _status) \
+{ \
+ (_status) = readl((_bna)->regs.fn_int_status); \
+ if ((_status)) { \
+ writel((_status) & ~(__LPU02HOST_MBOX0_STATUS_BITS |\
+ __LPU02HOST_MBOX1_STATUS_BITS |\
+ __LPU12HOST_MBOX0_STATUS_BITS |\
+ __LPU12HOST_MBOX1_STATUS_BITS), \
+ (_bna)->regs.fn_int_status);\
+ } \
+}
+
+#define bna_intr_status_get_no_clr(_bna, _status) \
+ (_status) = readl((_bna)->regs.fn_int_status)
+
+#define bna_intr_mask_get(bna, mask) \
+ (*mask) = readl((bna)->regs.fn_int_mask)
+
+#define bna_intr_ack(bna, intr_bmap) \
+ writel((intr_bmap), (bna)->regs.fn_int_status)
+
+#define bna_ib_intx_disable(bna, ib_id) \
+ writel(readl((bna)->regs.fn_int_mask) | \
+ (1 << (ib_id)), \
+ (bna)->regs.fn_int_mask)
+
+#define bna_ib_intx_enable(bna, ib_id) \
+ writel(readl((bna)->regs.fn_int_mask) & \
+ ~(1 << (ib_id)), \
+ (bna)->regs.fn_int_mask)
+
+#define bna_mbox_msix_idx_set(_device) \
+do {\
+ writel(((_device)->vector & 0x000001FF), \
+ (_device)->bna->pcidev.pci_bar_kva + \
+ reg_offset[(_device)->bna->pcidev.pci_func].msix_idx);\
+} while (0)
+
+/**
+ *
+ * TxQ, RxQ, CQ related bits, offsets, macros
+ *
+ */
+
+#define BNA_Q_IDLE_STATE 0x00008001
+
+#define BNA_GET_DOORBELL_BASE_ADDR(_bar0) \
+ ((_bar0) + HQM_DOORBELL_BLK_BASE_ADDR)
+
+#define BNA_GET_DOORBELL_ENTRY_OFFSET(_entry) \
+ ((HQM_DOORBELL_BLK_BASE_ADDR) \
+ + (_entry << 7))
+
+#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
+ (0x80000000 | ((_timeout) << 16) | (_events))
+
+#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
+
+/* TxQ Entry Opcodes */
+#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
+#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
+#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
+
+/* TxQ Entry Control Flags */
+#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
+#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
+#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
+#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
+#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
+#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
+#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
+
+#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
+ (((_hdr_size) << 10) | ((_offset) & 0x3FF))
+
+/*
+ * Completion Q defines
+ */
+/* CQ Entry Flags */
+#define BNA_CQ_EF_MAC_ERROR (1 << 0)
+#define BNA_CQ_EF_FCS_ERROR (1 << 1)
+#define BNA_CQ_EF_TOO_LONG (1 << 2)
+#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
+
+#define BNA_CQ_EF_RSVD1 (1 << 4)
+#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
+#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
+#define BNA_CQ_EF_HDS_HEADER (1 << 7)
+
+#define BNA_CQ_EF_UDP (1 << 8)
+#define BNA_CQ_EF_TCP (1 << 9)
+#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
+#define BNA_CQ_EF_IPV6 (1 << 11)
+
+#define BNA_CQ_EF_IPV4 (1 << 12)
+#define BNA_CQ_EF_VLAN (1 << 13)
+#define BNA_CQ_EF_RSS (1 << 14)
+#define BNA_CQ_EF_RSVD2 (1 << 15)
+
+#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
+#define BNA_CQ_EF_MCAST (1 << 17)
+#define BNA_CQ_EF_BCAST (1 << 18)
+#define BNA_CQ_EF_REMOTE (1 << 19)
+
+#define BNA_CQ_EF_LOCAL (1 << 20)
+
+/**
+ *
+ * Data structures
+ *
+ */
+
+enum txf_flags {
+ BFI_TXF_CF_ENABLE = 1 << 0,
+ BFI_TXF_CF_VLAN_FILTER = 1 << 8,
+ BFI_TXF_CF_VLAN_ADMIT = 1 << 9,
+ BFI_TXF_CF_VLAN_INSERT = 1 << 10,
+ BFI_TXF_CF_RSVD1 = 1 << 11,
+ BFI_TXF_CF_MAC_SA_CHECK = 1 << 12,
+ BFI_TXF_CF_VLAN_WI_BASED = 1 << 13,
+ BFI_TXF_CF_VSWITCH_MCAST = 1 << 14,
+ BFI_TXF_CF_VSWITCH_UCAST = 1 << 15,
+ BFI_TXF_CF_RSVD2 = 0x7F << 1
+};
+
+enum ib_flags {
+ BFI_IB_CF_MASTER_ENABLE = (1 << 0),
+ BFI_IB_CF_MSIX_MODE = (1 << 1),
+ BFI_IB_CF_COALESCING_MODE = (1 << 2),
+ BFI_IB_CF_INTER_PKT_ENABLE = (1 << 3),
+ BFI_IB_CF_INT_ENABLE = (1 << 4),
+ BFI_IB_CF_INTER_PKT_DMA = (1 << 5),
+ BFI_IB_CF_ACK_PENDING = (1 << 6),
+ BFI_IB_CF_RESERVED1 = (1 << 7)
+};
+
+enum rss_hash_type {
+ BFI_RSS_T_V4_TCP = (1 << 11),
+ BFI_RSS_T_V4_IP = (1 << 10),
+ BFI_RSS_T_V6_TCP = (1 << 9),
+ BFI_RSS_T_V6_IP = (1 << 8)
+};
+enum hds_header_type {
+ BNA_HDS_T_V4_TCP = (1 << 11),
+ BNA_HDS_T_V4_UDP = (1 << 10),
+ BNA_HDS_T_V6_TCP = (1 << 9),
+ BNA_HDS_T_V6_UDP = (1 << 8),
+ BNA_HDS_FORCED = (1 << 7),
+};
+enum rxf_flags {
+ BNA_RXF_CF_SM_LG_RXQ = (1 << 15),
+ BNA_RXF_CF_DEFAULT_VLAN = (1 << 14),
+ BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE = (1 << 13),
+ BNA_RXF_CF_VLAN_STRIP = (1 << 12),
+ BNA_RXF_CF_RSS_ENABLE = (1 << 8)
+};
+struct bna_chip_regs_offset {
+ u32 page_addr;
+ u32 fn_int_status;
+ u32 fn_int_mask;
+ u32 msix_idx;
+};
+
+struct bna_chip_regs {
+ void __iomem *page_addr;
+ void __iomem *fn_int_status;
+ void __iomem *fn_int_mask;
+};
+
+struct bna_txq_mem {
+ u32 pg_tbl_addr_lo;
+ u32 pg_tbl_addr_hi;
+ u32 cur_q_entry_lo;
+ u32 cur_q_entry_hi;
+ u32 reserved1;
+ u32 reserved2;
+ u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
+ /* 15:0 ->producer pointer (index?) */
+ u32 entry_n_pg_size; /* 31:16->entry size */
+ /* 15:0 ->page size */
+ u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
+ /* 23:16->Int Blk Offset */
+ /* 15:0 ->consumer pointer(index?) */
+ u32 cns_ptr2_n_q_state; /* 31:16->cons. ptr 2; 15:0-> Q state */
+ u32 nxt_qid_n_fid_n_pri; /* 17:10->next */
+ /* QId;9:3->FID;2:0->Priority */
+ u32 wvc_n_cquota_n_rquota; /* 31:24->WI Vector Count; */
+ /* 23:12->Cfg Quota; */
+ /* 11:0 ->Run Quota */
+ u32 reserved3[4];
+};
+
+struct bna_rxq_mem {
+ u32 pg_tbl_addr_lo;
+ u32 pg_tbl_addr_hi;
+ u32 cur_q_entry_lo;
+ u32 cur_q_entry_hi;
+ u32 reserved1;
+ u32 reserved2;
+ u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
+ /* 15:0 ->producer pointer (index?) */
+ u32 entry_n_pg_size; /* 31:16->entry size */
+ /* 15:0 ->page size */
+ u32 sg_n_cq_n_cns_ptr; /* 31:28->reserved; 27:24->sg count */
+ /* 23:16->CQ; */
+ /* 15:0->consumer pointer(index?) */
+ u32 buf_sz_n_q_state; /* 31:16->buffer size; 15:0-> Q state */
+ u32 next_qid; /* 17:10->next QId */
+ u32 reserved3;
+ u32 reserved4[4];
+};
+
+struct bna_rxtx_q_mem {
+ struct bna_rxq_mem rxq;
+ struct bna_txq_mem txq;
+};
+
+struct bna_cq_mem {
+ u32 pg_tbl_addr_lo;
+ u32 pg_tbl_addr_hi;
+ u32 cur_q_entry_lo;
+ u32 cur_q_entry_hi;
+
+ u32 reserved1;
+ u32 reserved2;
+ u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
+ /* 15:0 ->producer pointer (index?) */
+ u32 entry_n_pg_size; /* 31:16->entry size */
+ /* 15:0 ->page size */
+ u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
+ /* 23:16->Int Blk Offset */
+ /* 15:0 ->consumer pointer(index?) */
+ u32 q_state; /* 31:16->reserved; 15:0-> Q state */
+ u32 reserved3[2];
+ u32 reserved4[4];
+};
+
+struct bna_ib_blk_mem {
+ u32 host_addr_lo;
+ u32 host_addr_hi;
+ u32 clsc_n_ctrl_n_msix; /* 31:24->coalescing; */
+ /* 23:16->coalescing cfg; */
+ /* 15:8 ->control; */
+ /* 7:0 ->msix; */
+ u32 ipkt_n_ent_n_idxof;
+ u32 ipkt_cnt_cfg_n_unacked;
+
+ u32 reserved[3];
+};
+
+struct bna_idx_tbl_mem {
+ u32 idx; /* !< 31:16->res;15:0->idx; */
+};
+
+struct bna_doorbell_qset {
+ u32 rxq[0x20 >> 2];
+ u32 txq[0x20 >> 2];
+ u32 ib0[0x20 >> 2];
+ u32 ib1[0x20 >> 2];
+};
+
+struct bna_rx_fndb_ram {
+ u32 rss_prop;
+ u32 size_routing_props;
+ u32 rit_hds_mcastq;
+ u32 control_flags;
+};
+
+struct bna_tx_fndb_ram {
+ u32 vlan_n_ctrl_flags;
+};
+
+/**
+ * @brief
+ * Structure which maps to RxFn Indirection Table (RIT)
+ * Size : 1 word
+ * See catapult_spec.pdf, RxA for details
+ */
+struct bna_rit_mem {
+ u32 rxq_ids; /* !< 31:12->res;11:0->two 6 bit RxQ Ids */
+};
+
+/**
+ * @brief
+ * Structure which maps to RSS Table entry
+ * Size : 16 words
+ * See catapult_spec.pdf, RAD for details
+ */
+struct bna_rss_mem {
+ /*
+ * 31:12-> res
+ * 11:8 -> protocol type
+ * 7:0 -> hash index
+ */
+ u32 type_n_hash;
+ u32 hash_key[10]; /* !< 40 byte Toeplitz hash key */
+ u32 reserved[5];
+};
+
+/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
+struct bna_dma_addr {
+ u32 msb;
+ u32 lsb;
+};
+
+struct bna_txq_wi_vector {
+ u16 reserved;
+ u16 length; /* Only 14 LSB are valid */
+ struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
+};
+
+typedef u16 bna_txq_wi_opcode_t;
+
+typedef u16 bna_txq_wi_ctrl_flag_t;
+
+/**
+ * TxQ Entry Structure
+ *
+ * BEWARE: Load values into this structure with correct endianess.
+ */
+struct bna_txq_entry {
+ union {
+ struct {
+ u8 reserved;
+ u8 num_vectors; /* number of vectors present */
+ bna_txq_wi_opcode_t opcode; /* Either */
+ /* BNA_TXQ_WI_SEND or */
+ /* BNA_TXQ_WI_SEND_LSO */
+ bna_txq_wi_ctrl_flag_t flags; /* OR of all the flags */
+ u16 l4_hdr_size_n_offset;
+ u16 vlan_tag;
+ u16 lso_mss; /* Only 14 LSB are valid */
+ u32 frame_length; /* Only 24 LSB are valid */
+ } wi;
+
+ struct {
+ u16 reserved;
+ bna_txq_wi_opcode_t opcode; /* Must be */
+ /* BNA_TXQ_WI_EXTENSION */
+ u32 reserved2[3]; /* Place holder for */
+ /* removed vector (12 bytes) */
+ } wi_ext;
+ } hdr;
+ struct bna_txq_wi_vector vector[4];
+};
+#define wi_hdr hdr.wi
+#define wi_ext_hdr hdr.wi_ext
+
+/* RxQ Entry Structure */
+struct bna_rxq_entry { /* Rx-Buffer */
+ struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */
+};
+
+typedef u32 bna_cq_e_flag_t;
+
+/* CQ Entry Structure */
+struct bna_cq_entry {
+ bna_cq_e_flag_t flags;
+ u16 vlan_tag;
+ u16 length;
+ u32 rss_hash;
+ u8 valid;
+ u8 reserved1;
+ u8 reserved2;
+ u8 rxq_id;
+};
+
+#endif /* __BNA_HW_H__ */
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
new file mode 100644
index 00000000000..ad93fdb0f42
--- /dev/null
+++ b/drivers/net/bna/bna_txrx.c
@@ -0,0 +1,4172 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include "bna.h"
+#include "bfa_sm.h"
+#include "bfi.h"
+
+/**
+ * IB
+ */
+#define bna_ib_find_free_ibidx(_mask, _pos)\
+do {\
+ (_pos) = 0;\
+ while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
+ ((1 << (_pos)) & (_mask)))\
+ (_pos)++;\
+} while (0)
+
+#define bna_ib_count_ibidx(_mask, _count)\
+do {\
+ int pos = 0;\
+ (_count) = 0;\
+ while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
+ if ((1 << pos) & (_mask))\
+ (_count) = pos + 1;\
+ pos++;\
+ } \
+} while (0)
+
+#define bna_ib_select_segpool(_count, _q_idx)\
+do {\
+ int i;\
+ (_q_idx) = -1;\
+ for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
+ if ((_count <= ibidx_pool[i].pool_entry_size)) {\
+ (_q_idx) = i;\
+ break;\
+ } \
+ } \
+} while (0)
+
+struct bna_ibidx_pool {
+ int pool_size;
+ int pool_entry_size;
+};
+init_ibidx_pool(ibidx_pool);
+
+static struct bna_intr *
+bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
+ int vector)
+{
+ struct bna_intr *intr;
+ struct list_head *qe;
+
+ list_for_each(qe, &ib_mod->intr_active_q) {
+ intr = (struct bna_intr *)qe;
+
+ if ((intr->intr_type == intr_type) &&
+ (intr->vector == vector)) {
+ intr->ref_count++;
+ return intr;
+ }
+ }
+
+ if (list_empty(&ib_mod->intr_free_q))
+ return NULL;
+
+ bfa_q_deq(&ib_mod->intr_free_q, &intr);
+ bfa_q_qe_init(&intr->qe);
+
+ intr->ref_count = 1;
+ intr->intr_type = intr_type;
+ intr->vector = vector;
+
+ list_add_tail(&intr->qe, &ib_mod->intr_active_q);
+
+ return intr;
+}
+
+static void
+bna_intr_put(struct bna_ib_mod *ib_mod,
+ struct bna_intr *intr)
+{
+ intr->ref_count--;
+
+ if (intr->ref_count == 0) {
+ intr->ib = NULL;
+ list_del(&intr->qe);
+ bfa_q_qe_init(&intr->qe);
+ list_add_tail(&intr->qe, &ib_mod->intr_free_q);
+ }
+}
+
+void
+bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+ int j;
+ int count;
+ u8 offset;
+ struct bna_doorbell_qset *qset;
+ unsigned long off;
+
+ ib_mod->bna = bna;
+
+ ib_mod->ib = (struct bna_ib *)
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
+ ib_mod->intr = (struct bna_intr *)
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
+ ib_mod->idx_seg = (struct bna_ibidx_seg *)
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&ib_mod->ib_free_q);
+ INIT_LIST_HEAD(&ib_mod->intr_free_q);
+ INIT_LIST_HEAD(&ib_mod->intr_active_q);
+
+ for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
+ INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
+
+ for (i = 0; i < BFI_MAX_IB; i++) {
+ ib_mod->ib[i].ib_id = i;
+
+ ib_mod->ib[i].ib_seg_host_addr_kva =
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
+ ib_mod->ib[i].ib_seg_host_addr.lsb =
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
+ ib_mod->ib[i].ib_seg_host_addr.msb =
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
+
+ qset = (struct bna_doorbell_qset *)0;
+ off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
+ * (0x20 >> 2)]);
+ ib_mod->ib[i].door_bell.doorbell_addr = off +
+ BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
+
+ bfa_q_qe_init(&ib_mod->ib[i].qe);
+ list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
+
+ bfa_q_qe_init(&ib_mod->intr[i].qe);
+ list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
+ }
+
+ count = 0;
+ offset = 0;
+ for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
+ for (j = 0; j < ibidx_pool[i].pool_size; j++) {
+ bfa_q_qe_init(&ib_mod->idx_seg[count]);
+ ib_mod->idx_seg[count].ib_seg_size =
+ ibidx_pool[i].pool_entry_size;
+ ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
+ list_add_tail(&ib_mod->idx_seg[count].qe,
+ &ib_mod->ibidx_seg_pool[i]);
+ count++;
+ offset += ibidx_pool[i].pool_entry_size;
+ }
+ }
+}
+
+void
+bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
+{
+ int i;
+ int j;
+ struct list_head *qe;
+
+ i = 0;
+ list_for_each(qe, &ib_mod->ib_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &ib_mod->intr_free_q)
+ i++;
+
+ for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
+ j = 0;
+ list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
+ j++;
+ }
+
+ ib_mod->bna = NULL;
+}
+
+static struct bna_ib *
+bna_ib_get(struct bna_ib_mod *ib_mod,
+ enum bna_intr_type intr_type,
+ int vector)
+{
+ struct bna_ib *ib;
+ struct bna_intr *intr;
+
+ if (intr_type == BNA_INTR_T_INTX)
+ vector = (1 << vector);
+
+ intr = bna_intr_get(ib_mod, intr_type, vector);
+ if (intr == NULL)
+ return NULL;
+
+ if (intr->ib) {
+ if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
+ bna_intr_put(ib_mod, intr);
+ return NULL;
+ }
+ intr->ib->ref_count++;
+ return intr->ib;
+ }
+
+ if (list_empty(&ib_mod->ib_free_q)) {
+ bna_intr_put(ib_mod, intr);
+ return NULL;
+ }
+
+ bfa_q_deq(&ib_mod->ib_free_q, &ib);
+ bfa_q_qe_init(&ib->qe);
+
+ ib->ref_count = 1;
+ ib->start_count = 0;
+ ib->idx_mask = 0;
+
+ ib->intr = intr;
+ ib->idx_seg = NULL;
+ intr->ib = ib;
+
+ ib->bna = ib_mod->bna;
+
+ return ib;
+}
+
+static void
+bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
+{
+ bna_intr_put(ib_mod, ib->intr);
+
+ ib->ref_count--;
+
+ if (ib->ref_count == 0) {
+ ib->intr = NULL;
+ ib->bna = NULL;
+ list_add_tail(&ib->qe, &ib_mod->ib_free_q);
+ }
+}
+
+/* Returns index offset - starting from 0 */
+static int
+bna_ib_reserve_idx(struct bna_ib *ib)
+{
+ struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
+ struct bna_ibidx_seg *idx_seg;
+ int idx;
+ int num_idx;
+ int q_idx;
+
+ /* Find the first free index position */
+ bna_ib_find_free_ibidx(ib->idx_mask, idx);
+ if (idx == BFI_IBIDX_MAX_SEGSIZE)
+ return -1;
+
+ /*
+ * Calculate the total number of indexes held by this IB,
+ * including the index newly reserved above.
+ */
+ bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
+
+ /* See if there is a free space in the index segment held by this IB */
+ if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
+ ib->idx_mask |= (1 << idx);
+ return idx;
+ }
+
+ if (ib->start_count)
+ return -1;
+
+ /* Allocate a new segment */
+ bna_ib_select_segpool(num_idx, q_idx);
+ while (1) {
+ if (q_idx == BFI_IBIDX_TOTAL_POOLS)
+ return -1;
+ if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
+ break;
+ q_idx++;
+ }
+ bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
+ bfa_q_qe_init(&idx_seg->qe);
+
+ /* Free the old segment */
+ if (ib->idx_seg) {
+ bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
+ list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
+ }
+
+ ib->idx_seg = idx_seg;
+
+ ib->idx_mask |= (1 << idx);
+
+ return idx;
+}
+
+static void
+bna_ib_release_idx(struct bna_ib *ib, int idx)
+{
+ struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
+ struct bna_ibidx_seg *idx_seg;
+ int num_idx;
+ int cur_q_idx;
+ int new_q_idx;
+
+ ib->idx_mask &= ~(1 << idx);
+
+ if (ib->start_count)
+ return;
+
+ bna_ib_count_ibidx(ib->idx_mask, num_idx);
+
+ /*
+ * Free the segment, if there are no more indexes in the segment
+ * held by this IB
+ */
+ if (!num_idx) {
+ bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
+ list_add_tail(&ib->idx_seg->qe,
+ &ib_mod->ibidx_seg_pool[cur_q_idx]);
+ ib->idx_seg = NULL;
+ return;
+ }
+
+ /* See if we can move to a smaller segment */
+ bna_ib_select_segpool(num_idx, new_q_idx);
+ bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
+ while (new_q_idx < cur_q_idx) {
+ if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
+ break;
+ new_q_idx++;
+ }
+ if (new_q_idx < cur_q_idx) {
+ /* Select the new smaller segment */
+ bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
+ bfa_q_qe_init(&idx_seg->qe);
+ /* Free the old segment */
+ list_add_tail(&ib->idx_seg->qe,
+ &ib_mod->ibidx_seg_pool[cur_q_idx]);
+ ib->idx_seg = idx_seg;
+ }
+}
+
+static int
+bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
+{
+ if (ib->start_count)
+ return -1;
+
+ ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
+ ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
+ ib->ib_config.interpkt_count = ib_config->interpkt_count;
+ ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
+
+ ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
+ if (ib->intr->intr_type == BNA_INTR_T_MSIX)
+ ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
+
+ return 0;
+}
+
+static void
+bna_ib_start(struct bna_ib *ib)
+{
+ struct bna_ib_blk_mem ib_cfg;
+ struct bna_ib_blk_mem *ib_mem;
+ u32 pg_num;
+ u32 intx_mask;
+ int i;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ ib->start_count++;
+
+ if (ib->start_count > 1)
+ return;
+
+ ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
+ ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
+
+ ib_cfg.clsc_n_ctrl_n_msix = (((u32)
+ ib->ib_config.coalescing_timeo << 16) |
+ ((u32)ib->ib_config.ctrl_flags << 8) |
+ (ib->intr->vector));
+ ib_cfg.ipkt_n_ent_n_idxof =
+ ((u32)
+ (ib->ib_config.interpkt_timeo & 0xf) << 16) |
+ ((u32)ib->idx_seg->ib_seg_size << 8) |
+ (ib->idx_seg->ib_idx_tbl_offset);
+ ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
+ ib->ib_config.interpkt_count << 24);
+
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
+ HQM_IB_RAM_BASE_OFFSET);
+ writel(pg_num, ib->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
+ HQM_IB_RAM_BASE_OFFSET);
+
+ ib_mem = (struct bna_ib_blk_mem *)0;
+ off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
+ writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
+
+ off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
+ writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
+
+ off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
+ writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
+
+ off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
+ writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
+
+ off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
+ writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
+
+ ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
+ (u32)ib->ib_config.coalescing_timeo, 0);
+
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
+ HQM_INDX_TBL_RAM_BASE_OFFSET);
+ writel(pg_num, ib->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
+ HQM_INDX_TBL_RAM_BASE_OFFSET);
+ for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
+ off = (unsigned long)
+ ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
+ writel(0, base_addr + off);
+ }
+
+ if (ib->intr->intr_type == BNA_INTR_T_INTX) {
+ bna_intx_disable(ib->bna, intx_mask);
+ intx_mask &= ~(ib->intr->vector);
+ bna_intx_enable(ib->bna, intx_mask);
+ }
+}
+
+static void
+bna_ib_stop(struct bna_ib *ib)
+{
+ u32 intx_mask;
+
+ ib->start_count--;
+
+ if (ib->start_count == 0) {
+ writel(BNA_DOORBELL_IB_INT_DISABLE,
+ ib->door_bell.doorbell_addr);
+ if (ib->intr->intr_type == BNA_INTR_T_INTX) {
+ bna_intx_disable(ib->bna, intx_mask);
+ intx_mask |= (ib->intr->vector);
+ bna_intx_enable(ib->bna, intx_mask);
+ }
+ }
+}
+
+static void
+bna_ib_fail(struct bna_ib *ib)
+{
+ ib->start_count = 0;
+}
+
+/**
+ * RXF
+ */
+static void rxf_enable(struct bna_rxf *rxf);
+static void rxf_disable(struct bna_rxf *rxf);
+static void __rxf_config_set(struct bna_rxf *rxf);
+static void __rxf_rit_set(struct bna_rxf *rxf);
+static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
+static int rxf_process_packet_filter(struct bna_rxf *rxf);
+static int rxf_clear_packet_filter(struct bna_rxf *rxf);
+static void rxf_reset_packet_filter(struct bna_rxf *rxf);
+static void rxf_cb_enabled(void *arg, int status);
+static void rxf_cb_disabled(void *arg, int status);
+static void bna_rxf_cb_stats_cleared(void *arg, int status);
+static void __rxf_enable(struct bna_rxf *rxf);
+static void __rxf_disable(struct bna_rxf *rxf);
+
+bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
+ enum bna_rxf_event);
+
+static struct bfa_sm_table rxf_sm_table[] = {
+ {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
+ {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
+ {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
+ {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
+ {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
+ {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
+ {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
+ {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
+ {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
+};
+
+static void
+bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
+{
+ call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
+}
+
+static void
+bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_START:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
+ break;
+
+ case RXF_E_STOP:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_FAIL:
+ /* No-op */
+ break;
+
+ case RXF_E_CAM_FLTR_MOD:
+ call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
+ break;
+
+ case RXF_E_STARTED:
+ case RXF_E_STOPPED:
+ case RXF_E_CAM_FLTR_RESP:
+ /**
+ * These events are received due to flushing of mbox
+ * when device fails
+ */
+ /* No-op */
+ break;
+
+ case RXF_E_PAUSE:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
+ call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
+ break;
+
+ case RXF_E_RESUME:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
+ call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
+{
+ __rxf_config_set(rxf);
+ __rxf_rit_set(rxf);
+ rxf_enable(rxf);
+}
+
+static void
+bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ /**
+ * STOP is originated from bnad. When this happens,
+ * it can not be waiting for filter update
+ */
+ call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
+ break;
+
+ case RXF_E_FAIL:
+ call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
+ call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CAM_FLTR_MOD:
+ /* No-op */
+ break;
+
+ case RXF_E_STARTED:
+ /**
+ * Force rxf_process_filter() to go through initial
+ * config
+ */
+ if ((rxf->ucast_active_mac != NULL) &&
+ (rxf->ucast_pending_set == 0))
+ rxf->ucast_pending_set = 1;
+
+ if (rxf->rss_status == BNA_STATUS_T_ENABLED)
+ rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
+
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
+ break;
+
+ case RXF_E_PAUSE:
+ case RXF_E_RESUME:
+ rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
+{
+ if (!rxf_process_packet_filter(rxf)) {
+ /* No more pending CAM entries to update */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ }
+}
+
+static void
+bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ /**
+ * STOP is originated from bnad. When this happens,
+ * it can not be waiting for filter update
+ */
+ call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
+ break;
+
+ case RXF_E_FAIL:
+ rxf_reset_packet_filter(rxf);
+ call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
+ call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CAM_FLTR_MOD:
+ /* No-op */
+ break;
+
+ case RXF_E_CAM_FLTR_RESP:
+ if (!rxf_process_packet_filter(rxf)) {
+ /* No more pending CAM entries to update */
+ call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ }
+ break;
+
+ case RXF_E_PAUSE:
+ case RXF_E_RESUME:
+ rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_started_entry(struct bna_rxf *rxf)
+{
+ call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
+
+ if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
+ if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
+ bfa_fsm_send_event(rxf, RXF_E_PAUSE);
+ else
+ bfa_fsm_send_event(rxf, RXF_E_RESUME);
+ }
+
+}
+
+static void
+bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
+ /* Hack to get FSM start clearing CAM entries */
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
+ break;
+
+ case RXF_E_FAIL:
+ rxf_reset_packet_filter(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CAM_FLTR_MOD:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
+ break;
+
+ case RXF_E_PAUSE:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
+ break;
+
+ case RXF_E_RESUME:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
+{
+ /**
+ * Note: Do not add rxf_clear_packet_filter here.
+ * It will overstep mbox when this transition happens:
+ * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
+ */
+}
+
+static void
+bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ /**
+ * FSM was in the process of stopping, initiated by
+ * bnad. When this happens, no one can be waiting for
+ * start or filter update
+ */
+ rxf_reset_packet_filter(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CAM_FLTR_RESP:
+ if (!rxf_clear_packet_filter(rxf)) {
+ /* No more pending CAM entries to clear */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
+ rxf_disable(rxf);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
+{
+ /**
+ * NOTE: Do not add rxf_disable here.
+ * It will overstep mbox when this transition happens:
+ * start_wait -> stop_wait on RXF_E_STOP event
+ */
+}
+
+static void
+bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ /**
+ * FSM was in the process of stopping, initiated by
+ * bnad. When this happens, no one can be waiting for
+ * start or filter update
+ */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_STARTED:
+ /**
+ * This event is received due to abrupt transition from
+ * bna_rxf_sm_start_wait state on receiving
+ * RXF_E_STOP event
+ */
+ rxf_disable(rxf);
+ break;
+
+ case RXF_E_STOPPED:
+ /**
+ * FSM was in the process of stopping, initiated by
+ * bnad. When this happens, no one can be waiting for
+ * start or filter update
+ */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
+ break;
+
+ case RXF_E_PAUSE:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
+ break;
+
+ case RXF_E_RESUME:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
+{
+ rxf->rxf_flags &=
+ ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
+ __rxf_disable(rxf);
+}
+
+static void
+bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ /**
+ * FSM was in the process of disabling rxf, initiated by
+ * bnad.
+ */
+ call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_STOPPED:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
+ call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ break;
+
+ /*
+ * Since PAUSE/RESUME can only be sent by bnad, we don't expect
+ * any other event during these states
+ */
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
+{
+ rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
+ rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
+ __rxf_enable(rxf);
+}
+
+static void
+bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ /**
+ * FSM was in the process of disabling rxf, initiated by
+ * bnad.
+ */
+ call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_STARTED:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
+ call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ break;
+
+ /*
+ * Since PAUSE/RESUME can only be sent by bnad, we don't expect
+ * any other event during these states
+ */
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
+{
+ __bna_rxf_stat_clr(rxf);
+}
+
+static void
+bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ case RXF_E_STAT_CLEARED:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+__rxf_enable(struct bna_rxf *rxf)
+{
+ struct bfi_ll_rxf_multi_req ll_req;
+ u32 bm[2] = {0, 0};
+
+ if (rxf->rxf_id < 32)
+ bm[0] = 1 << rxf->rxf_id;
+ else
+ bm[1] = 1 << (rxf->rxf_id - 32);
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
+ ll_req.rxf_id_mask[0] = htonl(bm[0]);
+ ll_req.rxf_id_mask[1] = htonl(bm[1]);
+ ll_req.enable = 1;
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
+ rxf_cb_enabled, rxf);
+
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+static void
+__rxf_disable(struct bna_rxf *rxf)
+{
+ struct bfi_ll_rxf_multi_req ll_req;
+ u32 bm[2] = {0, 0};
+
+ if (rxf->rxf_id < 32)
+ bm[0] = 1 << rxf->rxf_id;
+ else
+ bm[1] = 1 << (rxf->rxf_id - 32);
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
+ ll_req.rxf_id_mask[0] = htonl(bm[0]);
+ ll_req.rxf_id_mask[1] = htonl(bm[1]);
+ ll_req.enable = 0;
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
+ rxf_cb_disabled, rxf);
+
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+static void
+__rxf_config_set(struct bna_rxf *rxf)
+{
+ u32 i;
+ struct bna_rss_mem *rss_mem;
+ struct bna_rx_fndb_ram *rx_fndb_ram;
+ struct bna *bna = rxf->rx->bna;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ RSS_TABLE_BASE_OFFSET);
+
+ rss_mem = (struct bna_rss_mem *)0;
+
+ /* Configure RSS if required */
+ if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
+ /* configure RSS Table */
+ writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
+ bna->port_num, RSS_TABLE_BASE_OFFSET),
+ bna->regs.page_addr);
+
+ /* temporarily disable RSS, while hash value is written */
+ off = (unsigned long)&rss_mem[0].type_n_hash;
+ writel(0, base_addr + off);
+
+ for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
+ off = (unsigned long)
+ &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
+ writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
+ base_addr + off);
+ }
+
+ off = (unsigned long)&rss_mem[0].type_n_hash;
+ writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
+ base_addr + off);
+ }
+
+ /* Configure RxF */
+ writel(BNA_GET_PAGE_NUM(
+ LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
+ RX_FNDB_RAM_BASE_OFFSET),
+ bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ RX_FNDB_RAM_BASE_OFFSET);
+
+ rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
+
+ /* We always use RSS table 0 */
+ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
+ writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
+ base_addr + off);
+
+ /* small large buffer enable/disable */
+ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
+ writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
+ base_addr + off);
+
+ /* RIT offset, HDS forced offset, multicast RxQ Id */
+ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
+ writel((rxf->rit_segment->rit_offset << 16) |
+ (rxf->forced_offset << 8) |
+ (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
+ base_addr + off);
+
+ /*
+ * default vlan tag, default function enable, strip vlan bytes,
+ * HDS type, header size
+ */
+
+ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
+ writel(((u32)rxf->default_vlan_tag << 16) |
+ (rxf->ctrl_flags &
+ (BNA_RXF_CF_DEFAULT_VLAN |
+ BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
+ BNA_RXF_CF_VLAN_STRIP)) |
+ (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
+ rxf->hds_cfg.header_size,
+ base_addr + off);
+}
+
+void
+__rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
+{
+ struct bna *bna = rxf->rx->bna;
+ int i;
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
+ bna->regs.page_addr);
+
+ if (status == BNA_STATUS_T_ENABLED) {
+ /* enable VLAN filtering on this function */
+ for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
+ writel(rxf->vlan_filter_table[i],
+ BNA_GET_VLAN_MEM_ENTRY_ADDR
+ (bna->pcidev.pci_bar_kva, rxf->rxf_id,
+ i * 32));
+ }
+ } else {
+ /* disable VLAN filtering on this function */
+ for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
+ writel(0xffffffff,
+ BNA_GET_VLAN_MEM_ENTRY_ADDR
+ (bna->pcidev.pci_bar_kva, rxf->rxf_id,
+ i * 32));
+ }
+ }
+}
+
+static void
+__rxf_rit_set(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ struct bna_rit_mem *rit_mem;
+ int i;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ FUNCTION_TO_RXQ_TRANSLATE);
+
+ rit_mem = (struct bna_rit_mem *)0;
+
+ writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
+ FUNCTION_TO_RXQ_TRANSLATE),
+ bna->regs.page_addr);
+
+ for (i = 0; i < rxf->rit_segment->rit_size; i++) {
+ off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
+ writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
+ rxf->rit_segment->rit[i].small_rxq_id,
+ base_addr + off);
+ }
+}
+
+static void
+__bna_rxf_stat_clr(struct bna_rxf *rxf)
+{
+ struct bfi_ll_stats_req ll_req;
+ u32 bm[2] = {0, 0};
+
+ if (rxf->rxf_id < 32)
+ bm[0] = 1 << rxf->rxf_id;
+ else
+ bm[1] = 1 << (rxf->rxf_id - 32);
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
+ ll_req.stats_mask = 0;
+ ll_req.txf_id_mask[0] = 0;
+ ll_req.txf_id_mask[1] = 0;
+
+ ll_req.rxf_id_mask[0] = htonl(bm[0]);
+ ll_req.rxf_id_mask[1] = htonl(bm[1]);
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_rxf_cb_stats_cleared, rxf);
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+static void
+rxf_enable(struct bna_rxf *rxf)
+{
+ if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
+ bfa_fsm_send_event(rxf, RXF_E_STARTED);
+ else {
+ rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
+ __rxf_enable(rxf);
+ }
+}
+
+static void
+rxf_cb_enabled(void *arg, int status)
+{
+ struct bna_rxf *rxf = (struct bna_rxf *)arg;
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+ bfa_fsm_send_event(rxf, RXF_E_STARTED);
+}
+
+static void
+rxf_disable(struct bna_rxf *rxf)
+{
+ if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
+ bfa_fsm_send_event(rxf, RXF_E_STOPPED);
+ else
+ rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
+ __rxf_disable(rxf);
+}
+
+static void
+rxf_cb_disabled(void *arg, int status)
+{
+ struct bna_rxf *rxf = (struct bna_rxf *)arg;
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+ bfa_fsm_send_event(rxf, RXF_E_STOPPED);
+}
+
+void
+rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
+{
+ struct bna_rxf *rxf = (struct bna_rxf *)arg;
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
+}
+
+static void
+bna_rxf_cb_stats_cleared(void *arg, int status)
+{
+ struct bna_rxf *rxf = (struct bna_rxf *)arg;
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+ bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
+}
+
+void
+rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
+ const struct bna_mac *mac_addr)
+{
+ struct bfi_ll_mac_addr_req req;
+
+ bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
+
+ req.rxf_id = rxf->rxf_id;
+ memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
+ rxf_cb_cam_fltr_mbox_cmd, rxf);
+
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+static int
+rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* Add multicast entries */
+ if (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
+ list_add_tail(&mac->qe, &rxf->mcast_active_q);
+ return 1;
+ }
+
+ /* Delete multicast entries previousely added */
+ if (!list_empty(&rxf->mcast_pending_del_q)) {
+ bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
+{
+ /* Apply the VLAN filter */
+ if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
+ rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) &&
+ !(rxf->rxmode_active & BNA_RXMODE_DEFAULT))
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ }
+
+ /* Apply RSS configuration */
+ if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
+ rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
+ if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
+ /* RSS is being disabled */
+ rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
+ __rxf_rit_set(rxf);
+ __rxf_config_set(rxf);
+ } else {
+ /* RSS is being enabled or reconfigured */
+ rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
+ __rxf_rit_set(rxf);
+ __rxf_config_set(rxf);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Processes pending ucast, mcast entry addition/deletion and issues mailbox
+ * command. Also processes pending filter configuration - promiscuous mode,
+ * default mode, allmutli mode and issues mailbox command or directly applies
+ * to h/w
+ */
+static int
+rxf_process_packet_filter(struct bna_rxf *rxf)
+{
+ /* Set the default MAC first */
+ if (rxf->ucast_pending_set > 0) {
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
+ rxf->ucast_active_mac);
+ rxf->ucast_pending_set--;
+ return 1;
+ }
+
+ if (rxf_process_packet_filter_ucast(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_mcast(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_promisc(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_default(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_allmulti(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_vlan(rxf))
+ return 1;
+
+ return 0;
+}
+
+static int
+rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* 3. delete pending mcast entries */
+ if (!list_empty(&rxf->mcast_pending_del_q)) {
+ bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ return 1;
+ }
+
+ /* 4. clear active mcast entries; move them to pending_add_q */
+ if (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * In the rxf stop path, processes pending ucast/mcast delete queue and issues
+ * the mailbox command. Moves the active ucast/mcast entries to pending add q,
+ * so that they are added to CAM again in the rxf start path. Moves the current
+ * filter settings - promiscuous, default, allmutli - to pending filter
+ * configuration
+ */
+static int
+rxf_clear_packet_filter(struct bna_rxf *rxf)
+{
+ if (rxf_clear_packet_filter_ucast(rxf))
+ return 1;
+
+ if (rxf_clear_packet_filter_mcast(rxf))
+ return 1;
+
+ /* 5. clear active default MAC in the CAM */
+ if (rxf->ucast_pending_set > 0)
+ rxf->ucast_pending_set = 0;
+
+ if (rxf_clear_packet_filter_promisc(rxf))
+ return 1;
+
+ if (rxf_clear_packet_filter_default(rxf))
+ return 1;
+
+ if (rxf_clear_packet_filter_allmulti(rxf))
+ return 1;
+
+ return 0;
+}
+
+static void
+rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
+{
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ /* 3. Move active mcast entries to pending_add_q */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->mcast_pending_add_q);
+ }
+
+ /* 4. Throw away delete pending mcast entries */
+ while (!list_empty(&rxf->mcast_pending_del_q)) {
+ bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+}
+
+/**
+ * In the rxf fail path, throws away the ucast/mcast entries pending for
+ * deletion, moves all active ucast/mcast entries to pending queue so that
+ * they are added back to CAM in the rxf start path. Also moves the current
+ * filter configuration to pending filter configuration.
+ */
+static void
+rxf_reset_packet_filter(struct bna_rxf *rxf)
+{
+ rxf_reset_packet_filter_ucast(rxf);
+
+ rxf_reset_packet_filter_mcast(rxf);
+
+ /* 5. Turn off ucast set flag */
+ rxf->ucast_pending_set = 0;
+
+ rxf_reset_packet_filter_promisc(rxf);
+
+ rxf_reset_packet_filter_default(rxf);
+
+ rxf_reset_packet_filter_allmulti(rxf);
+}
+
+static void
+bna_rxf_init(struct bna_rxf *rxf,
+ struct bna_rx *rx,
+ struct bna_rx_config *q_config)
+{
+ struct list_head *qe;
+ struct bna_rxp *rxp;
+
+ /* rxf_id is initialized during rx_mod init */
+ rxf->rx = rx;
+
+ INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
+ INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
+ rxf->ucast_pending_set = 0;
+ INIT_LIST_HEAD(&rxf->ucast_active_q);
+ rxf->ucast_active_mac = NULL;
+
+ INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
+ INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
+ INIT_LIST_HEAD(&rxf->mcast_active_q);
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+
+ if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
+ rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
+
+ rxf->rxf_oper_state = (q_config->paused) ?
+ BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
+
+ bna_rxf_adv_init(rxf, rx, q_config);
+
+ rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
+ q_config->num_paths);
+
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ if (q_config->rxp_type == BNA_RXP_SINGLE)
+ rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
+ else
+ rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
+ break;
+ }
+
+ rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
+ memset(rxf->vlan_filter_table, 0,
+ (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
+
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+}
+
+static void
+bna_rxf_uninit(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac;
+
+ bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
+ rxf->rit_segment = NULL;
+
+ rxf->ucast_pending_set = 0;
+
+ while (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
+ bfa_q_qe_init(&mac->qe);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ }
+
+ if (rxf->ucast_active_mac) {
+ bfa_q_qe_init(&rxf->ucast_active_mac->qe);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
+ rxf->ucast_active_mac);
+ rxf->ucast_active_mac = NULL;
+ }
+
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
+ bfa_q_qe_init(&mac->qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ rxf->rx = NULL;
+}
+
+static void
+bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
+{
+ bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
+ if (rx->rxf.rxf_id < 32)
+ rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
+ else
+ rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
+ 1 << (rx->rxf.rxf_id - 32));
+}
+
+static void
+bna_rxf_start(struct bna_rxf *rxf)
+{
+ rxf->start_cbfn = bna_rx_cb_rxf_started;
+ rxf->start_cbarg = rxf->rx;
+ rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
+ bfa_fsm_send_event(rxf, RXF_E_START);
+}
+
+static void
+bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
+{
+ bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
+ if (rx->rxf.rxf_id < 32)
+ rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
+ else
+ rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
+ 1 << (rx->rxf.rxf_id - 32);
+}
+
+static void
+bna_rxf_stop(struct bna_rxf *rxf)
+{
+ rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
+ rxf->stop_cbarg = rxf->rx;
+ bfa_fsm_send_event(rxf, RXF_E_STOP);
+}
+
+static void
+bna_rxf_fail(struct bna_rxf *rxf)
+{
+ rxf->rxf_flags |= BNA_RXF_FL_FAILED;
+ bfa_fsm_send_event(rxf, RXF_E_FAIL);
+}
+
+int
+bna_rxf_state_get(struct bna_rxf *rxf)
+{
+ return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
+}
+
+enum bna_cb_status
+bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->ucast_active_mac == NULL) {
+ rxf->ucast_active_mac =
+ bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+ if (rxf->ucast_active_mac == NULL)
+ return BNA_CB_UCAST_CAM_FULL;
+ bfa_q_qe_init(&rxf->ucast_active_mac->qe);
+ }
+
+ memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
+ rxf->ucast_pending_set++;
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+
+ return BNA_CB_SUCCESS;
+}
+
+enum bna_cb_status
+bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ /* Check if already added */
+ list_for_each(qe, &rxf->mcast_active_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ /* Check if pending addition */
+ list_for_each(qe, &rxf->mcast_pending_add_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ if (mac == NULL)
+ return BNA_CB_MCAST_LIST_FULL;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, addr, ETH_ALEN);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+
+ return BNA_CB_SUCCESS;
+}
+
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head list_head;
+ struct list_head *qe;
+ u8 *mcaddr;
+ struct bna_mac *mac;
+ struct bna_mac *mac1;
+ int skip;
+ int delete;
+ int need_hw_config = 0;
+ int i;
+
+ /* Allocate nodes */
+ INIT_LIST_HEAD(&list_head);
+ for (i = 0, mcaddr = mclist; i < count; i++) {
+ mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ if (mac == NULL)
+ goto err_return;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, mcaddr, ETH_ALEN);
+ list_add_tail(&mac->qe, &list_head);
+
+ mcaddr += ETH_ALEN;
+ }
+
+ /* Schedule for addition */
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+
+ skip = 0;
+
+ /* Skip if already added */
+ list_for_each(qe, &rxf->mcast_active_q) {
+ mac1 = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
+ mac);
+ skip = 1;
+ break;
+ }
+ }
+
+ if (skip)
+ continue;
+
+ /* Skip if pending addition */
+ list_for_each(qe, &rxf->mcast_pending_add_q) {
+ mac1 = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
+ mac);
+ skip = 1;
+ break;
+ }
+ }
+
+ if (skip)
+ continue;
+
+ need_hw_config = 1;
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ }
+
+ /**
+ * Delete the entries that are in the pending_add_q but not
+ * in the new list
+ */
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
+ if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
+ delete = 0;
+ break;
+ }
+ mcaddr += ETH_ALEN;
+ }
+ if (delete)
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ else
+ list_add_tail(&mac->qe, &list_head);
+ }
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ }
+
+ /**
+ * Schedule entries for deletion that are in the active_q but not
+ * in the new list
+ */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
+ if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
+ delete = 0;
+ break;
+ }
+ mcaddr += ETH_ALEN;
+ }
+ if (delete) {
+ list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+ need_hw_config = 1;
+ } else {
+ list_add_tail(&mac->qe, &list_head);
+ }
+ }
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->mcast_active_q);
+ }
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ } else if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ return BNA_CB_MCAST_LIST_FULL;
+}
+
+void
+bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int index = (vlan_id >> 5);
+ int bit = (1 << (vlan_id & 0x1F));
+
+ rxf->vlan_filter_table[index] |= bit;
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ }
+}
+
+void
+bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int index = (vlan_id >> 5);
+ int bit = (1 << (vlan_id & 0x1F));
+
+ rxf->vlan_filter_table[index] &= ~bit;
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ }
+}
+
+/**
+ * RX
+ */
+#define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
+ struct bna_doorbell_qset *_qset; \
+ unsigned long off; \
+ (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
+ (q)->rcb->q_depth = (qdepth); \
+ (q)->rcb->unmap_q = unmapq_mem; \
+ (q)->rcb->rxq = (q); \
+ (q)->rcb->cq = &(rxp)->cq; \
+ (q)->rcb->bnad = (bna)->bnad; \
+ _qset = (struct bna_doorbell_qset *)0; \
+ off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
+ (q)->rcb->q_dbell = off + \
+ BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
+ (q)->rcb->id = _id; \
+} while (0)
+
+#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
+ (qcfg)->num_paths : ((qcfg)->num_paths * 2))
+
+#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
+ (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
+
+#define call_rx_stop_callback(rx, status) \
+ if ((rx)->stop_cbfn) { \
+ (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
+ (rx)->stop_cbfn = NULL; \
+ (rx)->stop_cbarg = NULL; \
+ }
+
+/*
+ * Since rx_enable is synchronous callback, there is no start_cbfn required.
+ * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
+ * for each rxpath.
+ */
+
+#define call_rx_disable_cbfn(rx, status) \
+ if ((rx)->disable_cbfn) { \
+ (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
+ status); \
+ (rx)->disable_cbfn = NULL; \
+ (rx)->disable_cbarg = NULL; \
+ } \
+
+#define rxqs_reqd(type, num_rxqs) \
+ (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
+
+#define rx_ib_fail(rx) \
+do { \
+ struct bna_rxp *rxp; \
+ struct list_head *qe; \
+ list_for_each(qe, &(rx)->rxp_q) { \
+ rxp = (struct bna_rxp *)qe; \
+ bna_ib_fail(rxp->cq.ib); \
+ } \
+} while (0)
+
+static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
+static void __bna_rxq_start(struct bna_rxq *rxq);
+static void __bna_cq_start(struct bna_cq *cq);
+static void bna_rit_create(struct bna_rx *rx);
+static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
+static void bna_rx_cb_rxq_stopped_all(void *arg);
+
+bfa_fsm_state_decl(bna_rx, stopped,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, rxf_start_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, started,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
+ struct bna_rx, enum bna_rx_event);
+
+static const struct bfa_sm_table rx_sm_table[] = {
+ {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
+ {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
+ {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
+ {BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
+ {BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
+};
+
+static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe_rxp;
+
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
+ }
+
+ call_rx_stop_callback(rx, BNA_CB_SUCCESS);
+}
+
+static void bna_rx_sm_stopped(struct bna_rx *rx,
+ enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_START:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
+ break;
+ case RX_E_STOP:
+ call_rx_stop_callback(rx, BNA_CB_SUCCESS);
+ break;
+ case RX_E_FAIL:
+ /* no-op */
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+
+}
+
+static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe_rxp;
+ struct bna_rxq *q0 = NULL, *q1 = NULL;
+
+ /* Setup the RIT */
+ bna_rit_create(rx);
+
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ bna_ib_start(rxp->cq.ib);
+ GET_RXQS(rxp, q0, q1);
+ q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
+ __bna_rxq_start(q0);
+ rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
+ if (q1) {
+ __bna_rxq_start(q1);
+ rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
+ }
+ __bna_cq_start(&rxp->cq);
+ }
+
+ bna_rxf_start(&rx->rxf);
+}
+
+static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
+ enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_STOP:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
+ break;
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ rx_ib_fail(rx);
+ bna_rxf_fail(&rx->rxf);
+ break;
+ case RX_E_RXF_STARTED:
+ bfa_fsm_set_state(rx, bna_rx_sm_started);
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+}
+
+void
+bna_rx_sm_started_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe_rxp;
+
+ /* Start IB */
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ bna_ib_ack(&rxp->cq.ib->door_bell, 0);
+ }
+
+ bna_llport_admin_up(&rx->bna->port.llport);
+}
+
+void
+bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_FAIL:
+ bna_llport_admin_down(&rx->bna->port.llport);
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ rx_ib_fail(rx);
+ bna_rxf_fail(&rx->rxf);
+ break;
+ case RX_E_STOP:
+ bna_llport_admin_down(&rx->bna->port.llport);
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+}
+
+void
+bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
+{
+ bna_rxf_stop(&rx->rxf);
+}
+
+void
+bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_RXF_STOPPED:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
+ break;
+ case RX_E_RXF_STARTED:
+ /**
+ * RxF was in the process of starting up when
+ * RXF_E_STOP was issued. Ignore this event
+ */
+ break;
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ rx_ib_fail(rx);
+ bna_rxf_fail(&rx->rxf);
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+
+}
+
+void
+bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp = NULL;
+ struct bna_rxq *q0 = NULL;
+ struct bna_rxq *q1 = NULL;
+ struct list_head *qe;
+ u32 rxq_mask[2] = {0, 0};
+
+ /* Only one call to multi-rxq-stop for all RXPs in this RX */
+ bfa_wc_up(&rx->rxq_stop_wc);
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ GET_RXQS(rxp, q0, q1);
+ if (q0->rxq_id < 32)
+ rxq_mask[0] |= ((u32)1 << q0->rxq_id);
+ else
+ rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
+ if (q1) {
+ if (q1->rxq_id < 32)
+ rxq_mask[0] |= ((u32)1 << q1->rxq_id);
+ else
+ rxq_mask[1] |= ((u32)
+ 1 << (q1->rxq_id - 32));
+ }
+ }
+
+ __bna_multi_rxq_stop(rxp, rxq_mask);
+}
+
+void
+bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ struct bna_rxp *rxp = NULL;
+ struct list_head *qe;
+
+ switch (event) {
+ case RX_E_RXQ_STOPPED:
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ bna_ib_stop(rxp->cq.ib);
+ }
+ /* Fall through */
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+}
+
+void
+__bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask)
+{
+ struct bfi_ll_q_stop_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0);
+ ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]);
+ ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]);
+ bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_rx_cb_multi_rxq_stopped, rxp);
+ bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe);
+}
+
+void
+__bna_rxq_start(struct bna_rxq *rxq)
+{
+ struct bna_rxtx_q_mem *q_mem;
+ struct bna_rxq_mem rxq_cfg, *rxq_mem;
+ struct bna_dma_addr cur_q_addr;
+ /* struct bna_doorbell_qset *qset; */
+ struct bna_qpt *qpt;
+ u32 pg_num;
+ struct bna *bna = rxq->rx->bna;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ qpt = &rxq->qpt;
+ cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
+
+ rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
+ rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
+ rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
+ rxq_cfg.cur_q_entry_hi = cur_q_addr.msb;
+
+ rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0;
+ rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) |
+ (qpt->page_size >> 2);
+ rxq_cfg.sg_n_cq_n_cns_ptr =
+ ((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0;
+ rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) |
+ BNA_Q_IDLE_STATE;
+ rxq_cfg.next_qid = 0x0 | (0x3 << 8);
+
+ /* Write the page number register */
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+ writel(pg_num, bna->regs.page_addr);
+
+ /* Write to h/w */
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+
+ q_mem = (struct bna_rxtx_q_mem *)0;
+ rxq_mem = &q_mem[rxq->rxq_id].rxq;
+
+ off = (unsigned long)&rxq_mem->pg_tbl_addr_lo;
+ writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off);
+
+ off = (unsigned long)&rxq_mem->pg_tbl_addr_hi;
+ writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off);
+
+ off = (unsigned long)&rxq_mem->cur_q_entry_lo;
+ writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off);
+
+ off = (unsigned long)&rxq_mem->cur_q_entry_hi;
+ writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off);
+
+ off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr;
+ writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
+
+ off = (unsigned long)&rxq_mem->entry_n_pg_size;
+ writel(rxq_cfg.entry_n_pg_size, base_addr + off);
+
+ off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr;
+ writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off);
+
+ off = (unsigned long)&rxq_mem->buf_sz_n_q_state;
+ writel(rxq_cfg.buf_sz_n_q_state, base_addr + off);
+
+ off = (unsigned long)&rxq_mem->next_qid;
+ writel(rxq_cfg.next_qid, base_addr + off);
+
+ rxq->rcb->producer_index = 0;
+ rxq->rcb->consumer_index = 0;
+}
+
+void
+__bna_cq_start(struct bna_cq *cq)
+{
+ struct bna_cq_mem cq_cfg, *cq_mem;
+ const struct bna_qpt *qpt;
+ struct bna_dma_addr cur_q_addr;
+ u32 pg_num;
+ struct bna *bna = cq->rx->bna;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ qpt = &cq->qpt;
+ cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
+
+ /*
+ * Fill out structure, to be subsequently written
+ * to hardware
+ */
+ cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
+ cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
+ cq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
+ cq_cfg.cur_q_entry_hi = cur_q_addr.msb;
+
+ cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
+ cq_cfg.entry_n_pg_size =
+ ((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2);
+ cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) |
+ ((u32)(cq->ib->ib_id & 0xff) << 16) | 0x0);
+ cq_cfg.q_state = BNA_Q_IDLE_STATE;
+
+ /* Write the page number register */
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
+ HQM_CQ_RAM_BASE_OFFSET);
+
+ writel(pg_num, bna->regs.page_addr);
+
+ /* H/W write */
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ HQM_CQ_RAM_BASE_OFFSET);
+
+ cq_mem = (struct bna_cq_mem *)0;
+
+ off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo;
+ writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi;
+ writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo;
+ writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi;
+ writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr;
+ writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size;
+ writel(cq_cfg.entry_n_pg_size, base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr;
+ writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].q_state;
+ writel(cq_cfg.q_state, base_addr + off);
+
+ cq->ccb->producer_index = 0;
+ *(cq->ccb->hw_producer_index) = 0;
+}
+
+void
+bna_rit_create(struct bna_rx *rx)
+{
+ struct list_head *qe_rxp;
+ struct bna *bna;
+ struct bna_rxp *rxp;
+ struct bna_rxq *q0 = NULL;
+ struct bna_rxq *q1 = NULL;
+ int offset;
+
+ bna = rx->bna;
+
+ offset = 0;
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ GET_RXQS(rxp, q0, q1);
+ rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id;
+ rx->rxf.rit_segment->rit[offset].small_rxq_id =
+ (q1 ? q1->rxq_id : 0);
+ offset++;
+ }
+}
+
+static int
+_rx_can_satisfy(struct bna_rx_mod *rx_mod,
+ struct bna_rx_config *rx_cfg)
+{
+ if ((rx_mod->rx_free_count == 0) ||
+ (rx_mod->rxp_free_count == 0) ||
+ (rx_mod->rxq_free_count == 0))
+ return 0;
+
+ if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
+ if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
+ (rx_mod->rxq_free_count < rx_cfg->num_paths))
+ return 0;
+ } else {
+ if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
+ (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
+ return 0;
+ }
+
+ if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths))
+ return 0;
+
+ return 1;
+}
+
+static struct bna_rxq *
+_get_free_rxq(struct bna_rx_mod *rx_mod)
+{
+ struct bna_rxq *rxq = NULL;
+ struct list_head *qe = NULL;
+
+ bfa_q_deq(&rx_mod->rxq_free_q, &qe);
+ if (qe) {
+ rx_mod->rxq_free_count--;
+ rxq = (struct bna_rxq *)qe;
+ }
+ return rxq;
+}
+
+static void
+_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
+{
+ bfa_q_qe_init(&rxq->qe);
+ list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
+ rx_mod->rxq_free_count++;
+}
+
+static struct bna_rxp *
+_get_free_rxp(struct bna_rx_mod *rx_mod)
+{
+ struct list_head *qe = NULL;
+ struct bna_rxp *rxp = NULL;
+
+ bfa_q_deq(&rx_mod->rxp_free_q, &qe);
+ if (qe) {
+ rx_mod->rxp_free_count--;
+
+ rxp = (struct bna_rxp *)qe;
+ }
+
+ return rxp;
+}
+
+static void
+_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
+{
+ bfa_q_qe_init(&rxp->qe);
+ list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
+ rx_mod->rxp_free_count++;
+}
+
+static struct bna_rx *
+_get_free_rx(struct bna_rx_mod *rx_mod)
+{
+ struct list_head *qe = NULL;
+ struct bna_rx *rx = NULL;
+
+ bfa_q_deq(&rx_mod->rx_free_q, &qe);
+ if (qe) {
+ rx_mod->rx_free_count--;
+
+ rx = (struct bna_rx *)qe;
+ bfa_q_qe_init(qe);
+ list_add_tail(&rx->qe, &rx_mod->rx_active_q);
+ }
+
+ return rx;
+}
+
+static void
+_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
+{
+ bfa_q_qe_init(&rx->qe);
+ list_add_tail(&rx->qe, &rx_mod->rx_free_q);
+ rx_mod->rx_free_count++;
+}
+
+static void
+_rx_init(struct bna_rx *rx, struct bna *bna)
+{
+ rx->bna = bna;
+ rx->rx_flags = 0;
+
+ INIT_LIST_HEAD(&rx->rxp_q);
+
+ rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all;
+ rx->rxq_stop_wc.wc_cbarg = rx;
+ rx->rxq_stop_wc.wc_count = 0;
+
+ rx->stop_cbfn = NULL;
+ rx->stop_cbarg = NULL;
+}
+
+static void
+_rxp_add_rxqs(struct bna_rxp *rxp,
+ struct bna_rxq *q0,
+ struct bna_rxq *q1)
+{
+ switch (rxp->type) {
+ case BNA_RXP_SINGLE:
+ rxp->rxq.single.only = q0;
+ rxp->rxq.single.reserved = NULL;
+ break;
+ case BNA_RXP_SLR:
+ rxp->rxq.slr.large = q0;
+ rxp->rxq.slr.small = q1;
+ break;
+ case BNA_RXP_HDS:
+ rxp->rxq.hds.data = q0;
+ rxp->rxq.hds.hdr = q1;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+_rxq_qpt_init(struct bna_rxq *rxq,
+ struct bna_rxp *rxp,
+ u32 page_count,
+ u32 page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
+ rxq->qpt.page_count = page_count;
+ rxq->qpt.page_size = page_size;
+
+ rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < rxq->qpt.page_count; i++) {
+ rxq->rcb->sw_qpt[i] = page_mem[i].kva;
+ ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+
+ }
+}
+
+static void
+_rxp_cqpt_setup(struct bna_rxp *rxp,
+ u32 page_count,
+ u32 page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
+ rxp->cq.qpt.page_count = page_count;
+ rxp->cq.qpt.page_size = page_size;
+
+ rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < rxp->cq.qpt.page_count; i++) {
+ rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
+
+ ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+
+ }
+}
+
+static void
+_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
+{
+ list_add_tail(&rxp->qe, &rx->rxp_q);
+}
+
+static void
+_init_rxmod_queues(struct bna_rx_mod *rx_mod)
+{
+ INIT_LIST_HEAD(&rx_mod->rx_free_q);
+ INIT_LIST_HEAD(&rx_mod->rxq_free_q);
+ INIT_LIST_HEAD(&rx_mod->rxp_free_q);
+ INIT_LIST_HEAD(&rx_mod->rx_active_q);
+
+ rx_mod->rx_free_count = 0;
+ rx_mod->rxq_free_count = 0;
+ rx_mod->rxp_free_count = 0;
+}
+
+static void
+_rx_ctor(struct bna_rx *rx, int id)
+{
+ bfa_q_qe_init(&rx->qe);
+ INIT_LIST_HEAD(&rx->rxp_q);
+ rx->bna = NULL;
+
+ rx->rxf.rxf_id = id;
+
+ /* FIXME: mbox_qe ctor()?? */
+ bfa_q_qe_init(&rx->mbox_qe.qe);
+
+ rx->stop_cbfn = NULL;
+ rx->stop_cbarg = NULL;
+}
+
+void
+bna_rx_cb_multi_rxq_stopped(void *arg, int status)
+{
+ struct bna_rxp *rxp = (struct bna_rxp *)arg;
+
+ bfa_wc_down(&rxp->rx->rxq_stop_wc);
+}
+
+void
+bna_rx_cb_rxq_stopped_all(void *arg)
+{
+ struct bna_rx *rx = (struct bna_rx *)arg;
+
+ bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
+}
+
+static void
+bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
+ enum bna_cb_status status)
+{
+ struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
+
+ bfa_wc_down(&rx_mod->rx_stop_wc);
+}
+
+static void
+bna_rx_mod_cb_rx_stopped_all(void *arg)
+{
+ struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
+
+ if (rx_mod->stop_cbfn)
+ rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
+ rx_mod->stop_cbfn = NULL;
+}
+
+static void
+bna_rx_start(struct bna_rx *rx)
+{
+ rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
+ if (rx->rx_flags & BNA_RX_F_ENABLE)
+ bfa_fsm_send_event(rx, RX_E_START);
+}
+
+static void
+bna_rx_stop(struct bna_rx *rx)
+{
+ rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
+ if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
+ bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS);
+ else {
+ rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
+ rx->stop_cbarg = &rx->bna->rx_mod;
+ bfa_fsm_send_event(rx, RX_E_STOP);
+ }
+}
+
+static void
+bna_rx_fail(struct bna_rx *rx)
+{
+ /* Indicate port is not enabled, and failed */
+ rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
+ rx->rx_flags |= BNA_RX_F_PORT_FAILED;
+ bfa_fsm_send_event(rx, RX_E_FAIL);
+}
+
+void
+bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED;
+ if (type == BNA_RX_T_LOOPBACK)
+ rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK;
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ if (rx->type == type)
+ bna_rx_start(rx);
+ }
+}
+
+void
+bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
+ rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
+
+ rx_mod->stop_cbfn = bna_port_cb_rx_stopped;
+
+ /**
+ * Before calling bna_rx_stop(), increment rx_stop_wc as many times
+ * as we are going to call bna_rx_stop
+ */
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ if (rx->type == type)
+ bfa_wc_up(&rx_mod->rx_stop_wc);
+ }
+
+ if (rx_mod->rx_stop_wc.wc_count == 0) {
+ rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
+ rx_mod->stop_cbfn = NULL;
+ return;
+ }
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ if (rx->type == type)
+ bna_rx_stop(rx);
+ }
+}
+
+void
+bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
+ rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ bna_rx_fail(rx);
+ }
+}
+
+void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int index;
+ struct bna_rx *rx_ptr;
+ struct bna_rxp *rxp_ptr;
+ struct bna_rxq *rxq_ptr;
+
+ rx_mod->bna = bna;
+ rx_mod->flags = 0;
+
+ rx_mod->rx = (struct bna_rx *)
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
+ rx_mod->rxp = (struct bna_rxp *)
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
+ rx_mod->rxq = (struct bna_rxq *)
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ /* Initialize the queues */
+ _init_rxmod_queues(rx_mod);
+
+ /* Build RX queues */
+ for (index = 0; index < BFI_MAX_RXQ; index++) {
+ rx_ptr = &rx_mod->rx[index];
+ _rx_ctor(rx_ptr, index);
+ list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
+ rx_mod->rx_free_count++;
+ }
+
+ /* build RX-path queue */
+ for (index = 0; index < BFI_MAX_RXQ; index++) {
+ rxp_ptr = &rx_mod->rxp[index];
+ rxp_ptr->cq.cq_id = index;
+ bfa_q_qe_init(&rxp_ptr->qe);
+ list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
+ rx_mod->rxp_free_count++;
+ }
+
+ /* build RXQ queue */
+ for (index = 0; index < BFI_MAX_RXQ; index++) {
+ rxq_ptr = &rx_mod->rxq[index];
+ rxq_ptr->rxq_id = index;
+
+ bfa_q_qe_init(&rxq_ptr->qe);
+ list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
+ rx_mod->rxq_free_count++;
+ }
+
+ rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all;
+ rx_mod->rx_stop_wc.wc_cbarg = rx_mod;
+ rx_mod->rx_stop_wc.wc_count = 0;
+}
+
+void
+bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
+{
+ struct list_head *qe;
+ int i;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rx_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rxp_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rxq_free_q)
+ i++;
+
+ rx_mod->bna = NULL;
+}
+
+int
+bna_rx_state_get(struct bna_rx *rx)
+{
+ return bfa_sm_to_state(rx_sm_table, rx->fsm);
+}
+
+void
+bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
+{
+ u32 cq_size, hq_size, dq_size;
+ u32 cpage_count, hpage_count, dpage_count;
+ struct bna_mem_info *mem_info;
+ u32 cq_depth;
+ u32 hq_depth;
+ u32 dq_depth;
+
+ dq_depth = q_cfg->q_depth;
+ hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
+ cq_depth = dq_depth + hq_depth;
+
+ BNA_TO_POWER_OF_2_HIGH(cq_depth);
+ cq_size = cq_depth * BFI_CQ_WI_SIZE;
+ cq_size = ALIGN(cq_size, PAGE_SIZE);
+ cpage_count = SIZE_TO_PAGES(cq_size);
+
+ BNA_TO_POWER_OF_2_HIGH(dq_depth);
+ dq_size = dq_depth * BFI_RXQ_WI_SIZE;
+ dq_size = ALIGN(dq_size, PAGE_SIZE);
+ dpage_count = SIZE_TO_PAGES(dq_size);
+
+ if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
+ BNA_TO_POWER_OF_2_HIGH(hq_depth);
+ hq_size = hq_depth * BFI_RXQ_WI_SIZE;
+ hq_size = ALIGN(hq_size, PAGE_SIZE);
+ hpage_count = SIZE_TO_PAGES(hq_size);
+ } else {
+ hpage_count = 0;
+ }
+
+ /* CCB structures */
+ res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_ccb);
+ mem_info->num = q_cfg->num_paths;
+
+ /* RCB structures */
+ res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_rcb);
+ mem_info->num = BNA_GET_RXQS(q_cfg);
+
+ /* Completion QPT */
+ res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = q_cfg->num_paths;
+
+ /* Completion s/w QPT */
+ res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = cpage_count * sizeof(void *);
+ mem_info->num = q_cfg->num_paths;
+
+ /* Completion QPT pages */
+ res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = cpage_count * q_cfg->num_paths;
+
+ /* Data QPTs */
+ res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = q_cfg->num_paths;
+
+ /* Data s/w QPTs */
+ res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = dpage_count * sizeof(void *);
+ mem_info->num = q_cfg->num_paths;
+
+ /* Data QPT pages */
+ res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = dpage_count * q_cfg->num_paths;
+
+ /* Hdr QPTs */
+ res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
+
+ /* Hdr s/w QPTs */
+ res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = hpage_count * sizeof(void *);
+ mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
+
+ /* Hdr QPT pages */
+ res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = (hpage_count ? PAGE_SIZE : 0);
+ mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
+
+ /* RX Interrupts */
+ res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
+ res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
+ res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
+}
+
+struct bna_rx *
+bna_rx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_rx_config *rx_cfg,
+ struct bna_rx_event_cbfn *rx_cbfn,
+ struct bna_res_info *res_info,
+ void *priv)
+{
+ struct bna_rx_mod *rx_mod = &bna->rx_mod;
+ struct bna_rx *rx;
+ struct bna_rxp *rxp;
+ struct bna_rxq *q0;
+ struct bna_rxq *q1;
+ struct bna_intr_info *intr_info;
+ u32 page_count;
+ struct bna_mem_descr *ccb_mem;
+ struct bna_mem_descr *rcb_mem;
+ struct bna_mem_descr *unmapq_mem;
+ struct bna_mem_descr *cqpt_mem;
+ struct bna_mem_descr *cswqpt_mem;
+ struct bna_mem_descr *cpage_mem;
+ struct bna_mem_descr *hqpt_mem; /* Header/Small Q qpt */
+ struct bna_mem_descr *dqpt_mem; /* Data/Large Q qpt */
+ struct bna_mem_descr *hsqpt_mem; /* s/w qpt for hdr */
+ struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */
+ struct bna_mem_descr *hpage_mem; /* hdr page mem */
+ struct bna_mem_descr *dpage_mem; /* data page mem */
+ int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0, ret;
+ int dpage_count, hpage_count, rcb_idx;
+ struct bna_ib_config ibcfg;
+ /* Fail if we don't have enough RXPs, RXQs */
+ if (!_rx_can_satisfy(rx_mod, rx_cfg))
+ return NULL;
+
+ /* Initialize resource pointers */
+ intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
+ ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
+ rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
+ unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
+ cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
+ cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
+ cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
+ hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
+ dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
+ hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
+ dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
+ hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
+ dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
+
+ /* Compute q depth & page count */
+ page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+
+ dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+
+ hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+ /* Get RX pointer */
+ rx = _get_free_rx(rx_mod);
+ _rx_init(rx, bna);
+ rx->priv = priv;
+ rx->type = rx_cfg->rx_type;
+
+ rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
+ rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
+ rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
+ rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
+ /* Following callbacks are mandatory */
+ rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
+ rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
+
+ if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) {
+ switch (rx->type) {
+ case BNA_RX_T_REGULAR:
+ if (!(rx->bna->rx_mod.flags &
+ BNA_RX_MOD_F_PORT_LOOPBACK))
+ rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
+ break;
+ case BNA_RX_T_LOOPBACK:
+ if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK)
+ rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
+ break;
+ }
+ }
+
+ for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) {
+ rxp = _get_free_rxp(rx_mod);
+ rxp->type = rx_cfg->rxp_type;
+ rxp->rx = rx;
+ rxp->cq.rx = rx;
+
+ /* Get required RXQs, and queue them to rx-path */
+ q0 = _get_free_rxq(rx_mod);
+ if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
+ q1 = NULL;
+ else
+ q1 = _get_free_rxq(rx_mod);
+
+ /* Initialize IB */
+ if (1 == intr_info->num) {
+ rxp->cq.ib = bna_ib_get(&bna->ib_mod,
+ intr_info->intr_type,
+ intr_info->idl[0].vector);
+ rxp->vector = intr_info->idl[0].vector;
+ } else {
+ rxp->cq.ib = bna_ib_get(&bna->ib_mod,
+ intr_info->intr_type,
+ intr_info->idl[i].vector);
+
+ /* Map the MSI-x vector used for this RXP */
+ rxp->vector = intr_info->idl[i].vector;
+ }
+
+ rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib);
+
+ ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO;
+ ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT;
+ ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
+ ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
+
+ ret = bna_ib_config(rxp->cq.ib, &ibcfg);
+
+ /* Link rxqs to rxp */
+ _rxp_add_rxqs(rxp, q0, q1);
+
+ /* Link rxp to rx */
+ _rx_add_rxp(rx, rxp);
+
+ q0->rx = rx;
+ q0->rxp = rxp;
+
+ /* Initialize RCB for the large / data q */
+ q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
+ RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0,
+ (void *)unmapq_mem[rcb_idx].kva);
+ rcb_idx++;
+ (q0)->rx_packets = (q0)->rx_bytes = 0;
+ (q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0;
+
+ /* Initialize RXQs */
+ _rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE,
+ &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
+ q0->rcb->page_idx = dpage_idx;
+ q0->rcb->page_count = dpage_count;
+ dpage_idx += dpage_count;
+
+ /* Call bnad to complete rcb setup */
+ if (rx->rcb_setup_cbfn)
+ rx->rcb_setup_cbfn(bnad, q0->rcb);
+
+ if (q1) {
+ q1->rx = rx;
+ q1->rxp = rxp;
+
+ q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
+ RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1,
+ (void *)unmapq_mem[rcb_idx].kva);
+ rcb_idx++;
+ (q1)->buffer_size = (rx_cfg)->small_buff_size;
+ (q1)->rx_packets = (q1)->rx_bytes = 0;
+ (q1)->rx_packets_with_error =
+ (q1)->rxbuf_alloc_failed = 0;
+
+ _rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE,
+ &hqpt_mem[i], &hsqpt_mem[i],
+ &hpage_mem[hpage_idx]);
+ q1->rcb->page_idx = hpage_idx;
+ q1->rcb->page_count = hpage_count;
+ hpage_idx += hpage_count;
+
+ /* Call bnad to complete rcb setup */
+ if (rx->rcb_setup_cbfn)
+ rx->rcb_setup_cbfn(bnad, q1->rcb);
+ }
+ /* Setup RXP::CQ */
+ rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
+ _rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
+ &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
+ rxp->cq.ccb->page_idx = cpage_idx;
+ rxp->cq.ccb->page_count = page_count;
+ cpage_idx += page_count;
+
+ rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
+ rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
+
+ rxp->cq.ccb->producer_index = 0;
+ rxp->cq.ccb->q_depth = rx_cfg->q_depth +
+ ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
+ 0 : rx_cfg->q_depth);
+ rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell;
+ rxp->cq.ccb->rcb[0] = q0->rcb;
+ if (q1)
+ rxp->cq.ccb->rcb[1] = q1->rcb;
+ rxp->cq.ccb->cq = &rxp->cq;
+ rxp->cq.ccb->bnad = bna->bnad;
+ rxp->cq.ccb->hw_producer_index =
+ ((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva +
+ (rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE));
+ *(rxp->cq.ccb->hw_producer_index) = 0;
+ rxp->cq.ccb->intr_type = intr_info->intr_type;
+ rxp->cq.ccb->intr_vector = (intr_info->num == 1) ?
+ intr_info->idl[0].vector :
+ intr_info->idl[i].vector;
+ rxp->cq.ccb->rx_coalescing_timeo =
+ rxp->cq.ib->ib_config.coalescing_timeo;
+ rxp->cq.ccb->id = i;
+
+ /* Call bnad to complete CCB setup */
+ if (rx->ccb_setup_cbfn)
+ rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
+
+ } /* for each rx-path */
+
+ bna_rxf_init(&rx->rxf, rx, rx_cfg);
+
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+
+ return rx;
+}
+
+void
+bna_rx_destroy(struct bna_rx *rx)
+{
+ struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
+ struct bna_ib_mod *ib_mod = &rx->bna->ib_mod;
+ struct bna_rxq *q0 = NULL;
+ struct bna_rxq *q1 = NULL;
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+
+ bna_rxf_uninit(&rx->rxf);
+
+ while (!list_empty(&rx->rxp_q)) {
+ bfa_q_deq(&rx->rxp_q, &rxp);
+ GET_RXQS(rxp, q0, q1);
+ /* Callback to bnad for destroying RCB */
+ if (rx->rcb_destroy_cbfn)
+ rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
+ q0->rcb = NULL;
+ q0->rxp = NULL;
+ q0->rx = NULL;
+ _put_free_rxq(rx_mod, q0);
+ if (q1) {
+ /* Callback to bnad for destroying RCB */
+ if (rx->rcb_destroy_cbfn)
+ rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
+ q1->rcb = NULL;
+ q1->rxp = NULL;
+ q1->rx = NULL;
+ _put_free_rxq(rx_mod, q1);
+ }
+ rxp->rxq.slr.large = NULL;
+ rxp->rxq.slr.small = NULL;
+ if (rxp->cq.ib) {
+ if (rxp->cq.ib_seg_offset != 0xff)
+ bna_ib_release_idx(rxp->cq.ib,
+ rxp->cq.ib_seg_offset);
+ bna_ib_put(ib_mod, rxp->cq.ib);
+ rxp->cq.ib = NULL;
+ }
+ /* Callback to bnad for destroying CCB */
+ if (rx->ccb_destroy_cbfn)
+ rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
+ rxp->cq.ccb = NULL;
+ rxp->rx = NULL;
+ _put_free_rxp(rx_mod, rxp);
+ }
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ if (qe == &rx->qe) {
+ list_del(&rx->qe);
+ bfa_q_qe_init(&rx->qe);
+ break;
+ }
+ }
+
+ rx->bna = NULL;
+ rx->priv = NULL;
+ _put_free_rx(rx_mod, rx);
+}
+
+void
+bna_rx_enable(struct bna_rx *rx)
+{
+ if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
+ return;
+
+ rx->rx_flags |= BNA_RX_F_ENABLE;
+ if (rx->rx_flags & BNA_RX_F_PORT_ENABLED)
+ bfa_fsm_send_event(rx, RX_E_START);
+}
+
+void
+bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ /* h/w should not be accessed. Treat we're stopped */
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ } else {
+ rx->stop_cbfn = cbfn;
+ rx->stop_cbarg = rx->bna->bnad;
+
+ rx->rx_flags &= ~BNA_RX_F_ENABLE;
+
+ bfa_fsm_send_event(rx, RX_E_STOP);
+ }
+}
+
+/**
+ * TX
+ */
+#define call_tx_stop_cbfn(tx, status)\
+do {\
+ if ((tx)->stop_cbfn)\
+ (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
+ (tx)->stop_cbfn = NULL;\
+ (tx)->stop_cbarg = NULL;\
+} while (0)
+
+#define call_tx_prio_change_cbfn(tx, status)\
+do {\
+ if ((tx)->prio_change_cbfn)\
+ (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
+ (tx)->prio_change_cbfn = NULL;\
+} while (0)
+
+static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx,
+ enum bna_cb_status status);
+static void bna_tx_cb_txq_stopped(void *arg, int status);
+static void bna_tx_cb_stats_cleared(void *arg, int status);
+static void __bna_tx_stop(struct bna_tx *tx);
+static void __bna_tx_start(struct bna_tx *tx);
+static void __bna_txf_stat_clr(struct bna_tx *tx);
+
+enum bna_tx_event {
+ TX_E_START = 1,
+ TX_E_STOP = 2,
+ TX_E_FAIL = 3,
+ TX_E_TXQ_STOPPED = 4,
+ TX_E_PRIO_CHANGE = 5,
+ TX_E_STAT_CLEARED = 6,
+};
+
+enum bna_tx_state {
+ BNA_TX_STOPPED = 1,
+ BNA_TX_STARTED = 2,
+ BNA_TX_TXQ_STOP_WAIT = 3,
+ BNA_TX_PRIO_STOP_WAIT = 4,
+ BNA_TX_STAT_CLR_WAIT = 5,
+};
+
+bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, started, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx,
+ enum bna_tx_event);
+
+static struct bfa_sm_table tx_sm_table[] = {
+ {BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED},
+ {BFA_SM(bna_tx_sm_started), BNA_TX_STARTED},
+ {BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT},
+ {BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT},
+ {BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT},
+};
+
+static void
+bna_tx_sm_stopped_entry(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+
+ call_tx_stop_cbfn(tx, BNA_CB_SUCCESS);
+}
+
+static void
+bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_START:
+ bfa_fsm_set_state(tx, bna_tx_sm_started);
+ break;
+
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_FAIL:
+ /* No-op */
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
+ break;
+
+ case TX_E_TXQ_STOPPED:
+ /**
+ * This event is received due to flushing of mbox when
+ * device fails
+ */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+bna_tx_sm_started_entry(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ __bna_tx_start(tx);
+
+ /* Start IB */
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_ack(&txq->ib->door_bell, 0);
+ }
+}
+
+static void
+bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
+ __bna_tx_stop(tx);
+ break;
+
+ case TX_E_FAIL:
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_fail(txq->ib);
+ (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx)
+{
+}
+
+static void
+bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ switch (event) {
+ case TX_E_FAIL:
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_TXQ_STOPPED:
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_stop(txq->ib);
+ }
+ bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
+{
+ __bna_tx_stop(tx);
+}
+
+static void
+bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
+ break;
+
+ case TX_E_FAIL:
+ call_tx_prio_change_cbfn(tx, BNA_CB_FAIL);
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_TXQ_STOPPED:
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_stop(txq->ib);
+ (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+ call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(tx, bna_tx_sm_started);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx)
+{
+ __bna_txf_stat_clr(tx);
+}
+
+static void
+bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_FAIL:
+ case TX_E_STAT_CLEARED:
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+__bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
+{
+ struct bna_rxtx_q_mem *q_mem;
+ struct bna_txq_mem txq_cfg;
+ struct bna_txq_mem *txq_mem;
+ struct bna_dma_addr cur_q_addr;
+ u32 pg_num;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ /* Fill out structure, to be subsequently written to hardware */
+ txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb;
+ txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb;
+ cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr));
+ txq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
+ txq_cfg.cur_q_entry_hi = cur_q_addr.msb;
+
+ txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0;
+
+ txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) |
+ (txq->qpt.page_size >> 2);
+ txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) |
+ ((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0);
+
+ txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
+ txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
+ (txq->priority & 0x3));
+ txq_cfg.wvc_n_cquota_n_rquota =
+ ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
+ (BFI_TX_MAX_WRR_QUOTA & 0xfff));
+
+ /* Setup the page and write to H/W */
+
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+ writel(pg_num, tx->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+ q_mem = (struct bna_rxtx_q_mem *)0;
+ txq_mem = &q_mem[txq->txq_id].txq;
+
+ /*
+ * The following 4 lines, is a hack b'cos the H/W needs to read
+ * these DMA addresses as little endian
+ */
+
+ off = (unsigned long)&txq_mem->pg_tbl_addr_lo;
+ writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off);
+
+ off = (unsigned long)&txq_mem->pg_tbl_addr_hi;
+ writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off);
+
+ off = (unsigned long)&txq_mem->cur_q_entry_lo;
+ writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off);
+
+ off = (unsigned long)&txq_mem->cur_q_entry_hi;
+ writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off);
+
+ off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr;
+ writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
+
+ off = (unsigned long)&txq_mem->entry_n_pg_size;
+ writel(txq_cfg.entry_n_pg_size, base_addr + off);
+
+ off = (unsigned long)&txq_mem->int_blk_n_cns_ptr;
+ writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off);
+
+ off = (unsigned long)&txq_mem->cns_ptr2_n_q_state;
+ writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off);
+
+ off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri;
+ writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off);
+
+ off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota;
+ writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off);
+
+ txq->tcb->producer_index = 0;
+ txq->tcb->consumer_index = 0;
+ *(txq->tcb->hw_consumer_index) = 0;
+
+}
+
+static void
+__bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq)
+{
+ struct bfi_ll_q_stop_req ll_req;
+ u32 bit_mask[2] = {0, 0};
+ if (txq->txq_id < 32)
+ bit_mask[0] = (u32)1 << txq->txq_id;
+ else
+ bit_mask[1] = (u32)1 << (txq->txq_id - 32);
+
+ memset(&ll_req, 0, sizeof(ll_req));
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ;
+ ll_req.mh.mtag.h2i.lpu_id = 0;
+ ll_req.q_id_mask[0] = htonl(bit_mask[0]);
+ ll_req.q_id_mask[1] = htonl(bit_mask[1]);
+
+ bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_tx_cb_txq_stopped, tx);
+
+ bna_mbox_send(tx->bna, &tx->mbox_qe);
+}
+
+static void
+__bna_txf_start(struct bna_tx *tx)
+{
+ struct bna_tx_fndb_ram *tx_fndb;
+ struct bna_txf *txf = &tx->txf;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET),
+ tx->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
+ TX_FNDB_RAM_BASE_OFFSET);
+
+ tx_fndb = (struct bna_tx_fndb_ram *)0;
+ off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
+
+ writel(((u32)txf->vlan << 16) | txf->ctrl_flags,
+ base_addr + off);
+
+ if (tx->txf.txf_id < 32)
+ tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id);
+ else
+ tx->bna->tx_mod.txf_bmap[1] |= ((u32)
+ 1 << (tx->txf.txf_id - 32));
+}
+
+static void
+__bna_txf_stop(struct bna_tx *tx)
+{
+ struct bna_tx_fndb_ram *tx_fndb;
+ u32 page_num;
+ u32 ctl_flags;
+ struct bna_txf *txf = &tx->txf;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ /* retrieve the running txf_flags & turn off enable bit */
+ page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET);
+ writel(page_num, tx->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
+ TX_FNDB_RAM_BASE_OFFSET);
+ tx_fndb = (struct bna_tx_fndb_ram *)0;
+ off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
+
+ ctl_flags = readl(base_addr + off);
+ ctl_flags &= ~BFI_TXF_CF_ENABLE;
+
+ writel(ctl_flags, base_addr + off);
+
+ if (tx->txf.txf_id < 32)
+ tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id);
+ else
+ tx->bna->tx_mod.txf_bmap[0] &= ~((u32)
+ 1 << (tx->txf.txf_id - 32));
+}
+
+static void
+__bna_txf_stat_clr(struct bna_tx *tx)
+{
+ struct bfi_ll_stats_req ll_req;
+ u32 txf_bmap[2] = {0, 0};
+ if (tx->txf.txf_id < 32)
+ txf_bmap[0] = ((u32)1 << tx->txf.txf_id);
+ else
+ txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32));
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
+ ll_req.stats_mask = 0;
+ ll_req.rxf_id_mask[0] = 0;
+ ll_req.rxf_id_mask[1] = 0;
+ ll_req.txf_id_mask[0] = htonl(txf_bmap[0]);
+ ll_req.txf_id_mask[1] = htonl(txf_bmap[1]);
+
+ bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_tx_cb_stats_cleared, tx);
+ bna_mbox_send(tx->bna, &tx->mbox_qe);
+}
+
+static void
+__bna_tx_start(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_start(txq->ib);
+ __bna_txq_start(tx, txq);
+ }
+
+ __bna_txf_start(tx);
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ txq->tcb->priority = txq->priority;
+ (tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+}
+
+static void
+__bna_tx_stop(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+
+ __bna_txf_stop(tx);
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bfa_wc_up(&tx->txq_stop_wc);
+ }
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ __bna_txq_stop(tx, txq);
+ }
+}
+
+static void
+bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ txq->qpt.kv_qpt_ptr = qpt_mem->kva;
+ txq->qpt.page_count = page_count;
+ txq->qpt.page_size = page_size;
+
+ txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < page_count; i++) {
+ txq->tcb->sw_qpt[i] = page_mem[i].kva;
+
+ ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+
+ }
+}
+
+static void
+bna_tx_free(struct bna_tx *tx)
+{
+ struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
+ struct bna_txq *txq;
+ struct bna_ib_mod *ib_mod = &tx->bna->ib_mod;
+ struct list_head *qe;
+
+ while (!list_empty(&tx->txq_q)) {
+ bfa_q_deq(&tx->txq_q, &txq);
+ bfa_q_qe_init(&txq->qe);
+ if (txq->ib) {
+ if (txq->ib_seg_offset != -1)
+ bna_ib_release_idx(txq->ib,
+ txq->ib_seg_offset);
+ bna_ib_put(ib_mod, txq->ib);
+ txq->ib = NULL;
+ }
+ txq->tcb = NULL;
+ txq->tx = NULL;
+ list_add_tail(&txq->qe, &tx_mod->txq_free_q);
+ }
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ if (qe == &tx->qe) {
+ list_del(&tx->qe);
+ bfa_q_qe_init(&tx->qe);
+ break;
+ }
+ }
+
+ tx->bna = NULL;
+ tx->priv = NULL;
+ list_add_tail(&tx->qe, &tx_mod->tx_free_q);
+}
+
+static void
+bna_tx_cb_txq_stopped(void *arg, int status)
+{
+ struct bna_tx *tx = (struct bna_tx *)arg;
+
+ bfa_q_qe_init(&tx->mbox_qe.qe);
+ bfa_wc_down(&tx->txq_stop_wc);
+}
+
+static void
+bna_tx_cb_txq_stopped_all(void *arg)
+{
+ struct bna_tx *tx = (struct bna_tx *)arg;
+
+ bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED);
+}
+
+static void
+bna_tx_cb_stats_cleared(void *arg, int status)
+{
+ struct bna_tx *tx = (struct bna_tx *)arg;
+
+ bfa_q_qe_init(&tx->mbox_qe.qe);
+
+ bfa_fsm_send_event(tx, TX_E_STAT_CLEARED);
+}
+
+static void
+bna_tx_start(struct bna_tx *tx)
+{
+ tx->flags |= BNA_TX_F_PORT_STARTED;
+ if (tx->flags & BNA_TX_F_ENABLED)
+ bfa_fsm_send_event(tx, TX_E_START);
+}
+
+static void
+bna_tx_stop(struct bna_tx *tx)
+{
+ tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
+ tx->stop_cbarg = &tx->bna->tx_mod;
+
+ tx->flags &= ~BNA_TX_F_PORT_STARTED;
+ bfa_fsm_send_event(tx, TX_E_STOP);
+}
+
+static void
+bna_tx_fail(struct bna_tx *tx)
+{
+ tx->flags &= ~BNA_TX_F_PORT_STARTED;
+ bfa_fsm_send_event(tx, TX_E_FAIL);
+}
+
+static void
+bna_tx_prio_changed(struct bna_tx *tx, int prio)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ txq->priority = prio;
+ }
+
+ bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE);
+}
+
+static void
+bna_tx_cee_link_status(struct bna_tx *tx, int cee_link)
+{
+ if (cee_link)
+ tx->flags |= BNA_TX_F_PRIO_LOCK;
+ else
+ tx->flags &= ~BNA_TX_F_PRIO_LOCK;
+}
+
+static void
+bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx,
+ enum bna_cb_status status)
+{
+ struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
+
+ bfa_wc_down(&tx_mod->tx_stop_wc);
+}
+
+static void
+bna_tx_mod_cb_tx_stopped_all(void *arg)
+{
+ struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
+
+ if (tx_mod->stop_cbfn)
+ tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
+ tx_mod->stop_cbfn = NULL;
+}
+
+void
+bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
+{
+ u32 q_size;
+ u32 page_count;
+ struct bna_mem_info *mem_info;
+
+ res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_tcb);
+ mem_info->num = num_txq;
+
+ q_size = txq_depth * BFI_TXQ_WI_SIZE;
+ q_size = ALIGN(q_size, PAGE_SIZE);
+ page_count = q_size >> PAGE_SHIFT;
+
+ res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = page_count * sizeof(struct bna_dma_addr);
+ mem_info->num = num_txq;
+
+ res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = page_count * sizeof(void *);
+ mem_info->num = num_txq;
+
+ res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = num_txq * page_count;
+
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
+ BNA_INTR_T_MSIX;
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
+}
+
+struct bna_tx *
+bna_tx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_tx_config *tx_cfg,
+ struct bna_tx_event_cbfn *tx_cbfn,
+ struct bna_res_info *res_info, void *priv)
+{
+ struct bna_intr_info *intr_info;
+ struct bna_tx_mod *tx_mod = &bna->tx_mod;
+ struct bna_tx *tx;
+ struct bna_txq *txq;
+ struct list_head *qe;
+ struct bna_ib_mod *ib_mod = &bna->ib_mod;
+ struct bna_doorbell_qset *qset;
+ struct bna_ib_config ib_config;
+ int page_count;
+ int page_size;
+ int page_idx;
+ int i;
+ unsigned long off;
+
+ intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
+ page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
+ tx_cfg->num_txq;
+ page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
+
+ /**
+ * Get resources
+ */
+
+ if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
+ return NULL;
+
+ /* Tx */
+
+ if (list_empty(&tx_mod->tx_free_q))
+ return NULL;
+ bfa_q_deq(&tx_mod->tx_free_q, &tx);
+ bfa_q_qe_init(&tx->qe);
+
+ /* TxQs */
+
+ INIT_LIST_HEAD(&tx->txq_q);
+ for (i = 0; i < tx_cfg->num_txq; i++) {
+ if (list_empty(&tx_mod->txq_free_q))
+ goto err_return;
+
+ bfa_q_deq(&tx_mod->txq_free_q, &txq);
+ bfa_q_qe_init(&txq->qe);
+ list_add_tail(&txq->qe, &tx->txq_q);
+ txq->ib = NULL;
+ txq->ib_seg_offset = -1;
+ txq->tx = tx;
+ }
+
+ /* IBs */
+ i = 0;
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+
+ if (intr_info->num == 1)
+ txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
+ intr_info->idl[0].vector);
+ else
+ txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
+ intr_info->idl[i].vector);
+
+ if (txq->ib == NULL)
+ goto err_return;
+
+ txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib);
+ if (txq->ib_seg_offset == -1)
+ goto err_return;
+
+ i++;
+ }
+
+ /*
+ * Initialize
+ */
+
+ /* Tx */
+
+ tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
+ tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
+ /* Following callbacks are mandatory */
+ tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
+ tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
+ tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
+
+ list_add_tail(&tx->qe, &tx_mod->tx_active_q);
+ tx->bna = bna;
+ tx->priv = priv;
+ tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all;
+ tx->txq_stop_wc.wc_cbarg = tx;
+ tx->txq_stop_wc.wc_count = 0;
+
+ tx->type = tx_cfg->tx_type;
+
+ tx->flags = 0;
+ if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) {
+ switch (tx->type) {
+ case BNA_TX_T_REGULAR:
+ if (!(tx->bna->tx_mod.flags &
+ BNA_TX_MOD_F_PORT_LOOPBACK))
+ tx->flags |= BNA_TX_F_PORT_STARTED;
+ break;
+ case BNA_TX_T_LOOPBACK:
+ if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK)
+ tx->flags |= BNA_TX_F_PORT_STARTED;
+ break;
+ }
+ }
+ if (tx->bna->tx_mod.cee_link)
+ tx->flags |= BNA_TX_F_PRIO_LOCK;
+
+ /* TxQ */
+
+ i = 0;
+ page_idx = 0;
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ txq->priority = tx_mod->priority;
+ txq->tcb = (struct bna_tcb *)
+ res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
+ txq->tx_packets = 0;
+ txq->tx_bytes = 0;
+
+ /* IB */
+
+ ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO;
+ ib_config.interpkt_timeo = 0; /* Not used */
+ ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT;
+ ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA |
+ BFI_IB_CF_INT_ENABLE |
+ BFI_IB_CF_COALESCING_MODE);
+ bna_ib_config(txq->ib, &ib_config);
+
+ /* TCB */
+
+ txq->tcb->producer_index = 0;
+ txq->tcb->consumer_index = 0;
+ txq->tcb->hw_consumer_index = (volatile u32 *)
+ ((volatile u8 *)txq->ib->ib_seg_host_addr_kva +
+ (txq->ib_seg_offset * BFI_IBIDX_SIZE));
+ *(txq->tcb->hw_consumer_index) = 0;
+ txq->tcb->q_depth = tx_cfg->txq_depth;
+ txq->tcb->unmap_q = (void *)
+ res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
+ qset = (struct bna_doorbell_qset *)0;
+ off = (unsigned long)&qset[txq->txq_id].txq[0];
+ txq->tcb->q_dbell = off +
+ BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
+ txq->tcb->i_dbell = &txq->ib->door_bell;
+ txq->tcb->intr_type = intr_info->intr_type;
+ txq->tcb->intr_vector = (intr_info->num == 1) ?
+ intr_info->idl[0].vector :
+ intr_info->idl[i].vector;
+ txq->tcb->txq = txq;
+ txq->tcb->bnad = bnad;
+ txq->tcb->id = i;
+
+ /* QPT, SWQPT, Pages */
+ bna_txq_qpt_setup(txq, page_count, page_size,
+ &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
+ &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
+ &res_info[BNA_TX_RES_MEM_T_PAGE].
+ res_u.mem_info.mdl[page_idx]);
+ txq->tcb->page_idx = page_idx;
+ txq->tcb->page_count = page_count;
+ page_idx += page_count;
+
+ /* Callback to bnad for setting up TCB */
+ if (tx->tcb_setup_cbfn)
+ (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
+
+ i++;
+ }
+
+ /* TxF */
+
+ tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED;
+ tx->txf.vlan = 0;
+
+ /* Mbox element */
+ bfa_q_qe_init(&tx->mbox_qe.qe);
+
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+
+ return tx;
+
+err_return:
+ bna_tx_free(tx);
+ return NULL;
+}
+
+void
+bna_tx_destroy(struct bna_tx *tx)
+{
+ /* Callback to bnad for destroying TCB */
+ if (tx->tcb_destroy_cbfn) {
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+ }
+
+ bna_tx_free(tx);
+}
+
+void
+bna_tx_enable(struct bna_tx *tx)
+{
+ if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
+ return;
+
+ tx->flags |= BNA_TX_F_ENABLED;
+
+ if (tx->flags & BNA_TX_F_PORT_STARTED)
+ bfa_fsm_send_event(tx, TX_E_START);
+}
+
+void
+bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ (*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS);
+ return;
+ }
+
+ tx->stop_cbfn = cbfn;
+ tx->stop_cbarg = tx->bna->bnad;
+
+ tx->flags &= ~BNA_TX_F_ENABLED;
+
+ bfa_fsm_send_event(tx, TX_E_STOP);
+}
+
+int
+bna_tx_state_get(struct bna_tx *tx)
+{
+ return bfa_sm_to_state(tx_sm_table, tx->fsm);
+}
+
+void
+bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ tx_mod->bna = bna;
+ tx_mod->flags = 0;
+
+ tx_mod->tx = (struct bna_tx *)
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
+ tx_mod->txq = (struct bna_txq *)
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&tx_mod->tx_free_q);
+ INIT_LIST_HEAD(&tx_mod->tx_active_q);
+
+ INIT_LIST_HEAD(&tx_mod->txq_free_q);
+
+ for (i = 0; i < BFI_MAX_TXQ; i++) {
+ tx_mod->tx[i].txf.txf_id = i;
+ bfa_q_qe_init(&tx_mod->tx[i].qe);
+ list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
+
+ tx_mod->txq[i].txq_id = i;
+ bfa_q_qe_init(&tx_mod->txq[i].qe);
+ list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
+ }
+
+ tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all;
+ tx_mod->tx_stop_wc.wc_cbarg = tx_mod;
+ tx_mod->tx_stop_wc.wc_count = 0;
+}
+
+void
+bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
+{
+ struct list_head *qe;
+ int i;
+
+ i = 0;
+ list_for_each(qe, &tx_mod->tx_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &tx_mod->txq_free_q)
+ i++;
+
+ tx_mod->bna = NULL;
+}
+
+void
+bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED;
+ if (type == BNA_TX_T_LOOPBACK)
+ tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ if (tx->type == type)
+ bna_tx_start(tx);
+ }
+}
+
+void
+bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
+ tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
+
+ tx_mod->stop_cbfn = bna_port_cb_tx_stopped;
+
+ /**
+ * Before calling bna_tx_stop(), increment tx_stop_wc as many times
+ * as we are going to call bna_tx_stop
+ */
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ if (tx->type == type)
+ bfa_wc_up(&tx_mod->tx_stop_wc);
+ }
+
+ if (tx_mod->tx_stop_wc.wc_count == 0) {
+ tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
+ tx_mod->stop_cbfn = NULL;
+ return;
+ }
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ if (tx->type == type)
+ bna_tx_stop(tx);
+ }
+}
+
+void
+bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
+ tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ bna_tx_fail(tx);
+ }
+}
+
+void
+bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ if (prio != tx_mod->priority) {
+ tx_mod->priority = prio;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ bna_tx_prio_changed(tx, prio);
+ }
+ }
+}
+
+void
+bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->cee_link = cee_link;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ bna_tx_cee_link_status(tx, cee_link);
+ }
+}
diff --git a/drivers/net/bna/bna_types.h b/drivers/net/bna/bna_types.h
new file mode 100644
index 00000000000..6877310f6ef
--- /dev/null
+++ b/drivers/net/bna/bna_types.h
@@ -0,0 +1,1128 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BNA_TYPES_H__
+#define __BNA_TYPES_H__
+
+#include "cna.h"
+#include "bna_hw.h"
+#include "bfa_cee.h"
+
+/**
+ *
+ * Forward declarations
+ *
+ */
+
+struct bna_txq;
+struct bna_tx;
+struct bna_rxq;
+struct bna_cq;
+struct bna_rx;
+struct bna_rxf;
+struct bna_port;
+struct bna;
+struct bnad;
+
+/**
+ *
+ * Enums, primitive data types
+ *
+ */
+
+enum bna_status {
+ BNA_STATUS_T_DISABLED = 0,
+ BNA_STATUS_T_ENABLED = 1
+};
+
+enum bna_cleanup_type {
+ BNA_HARD_CLEANUP = 0,
+ BNA_SOFT_CLEANUP = 1
+};
+
+enum bna_cb_status {
+ BNA_CB_SUCCESS = 0,
+ BNA_CB_FAIL = 1,
+ BNA_CB_INTERRUPT = 2,
+ BNA_CB_BUSY = 3,
+ BNA_CB_INVALID_MAC = 4,
+ BNA_CB_MCAST_LIST_FULL = 5,
+ BNA_CB_UCAST_CAM_FULL = 6,
+ BNA_CB_WAITING = 7,
+ BNA_CB_NOT_EXEC = 8
+};
+
+enum bna_res_type {
+ BNA_RES_T_MEM = 1,
+ BNA_RES_T_INTR = 2
+};
+
+enum bna_mem_type {
+ BNA_MEM_T_KVA = 1,
+ BNA_MEM_T_DMA = 2
+};
+
+enum bna_intr_type {
+ BNA_INTR_T_INTX = 1,
+ BNA_INTR_T_MSIX = 2
+};
+
+enum bna_res_req_type {
+ BNA_RES_MEM_T_COM = 0,
+ BNA_RES_MEM_T_ATTR = 1,
+ BNA_RES_MEM_T_FWTRC = 2,
+ BNA_RES_MEM_T_STATS = 3,
+ BNA_RES_MEM_T_SWSTATS = 4,
+ BNA_RES_MEM_T_IBIDX = 5,
+ BNA_RES_MEM_T_IB_ARRAY = 6,
+ BNA_RES_MEM_T_INTR_ARRAY = 7,
+ BNA_RES_MEM_T_IDXSEG_ARRAY = 8,
+ BNA_RES_MEM_T_TX_ARRAY = 9,
+ BNA_RES_MEM_T_TXQ_ARRAY = 10,
+ BNA_RES_MEM_T_RX_ARRAY = 11,
+ BNA_RES_MEM_T_RXP_ARRAY = 12,
+ BNA_RES_MEM_T_RXQ_ARRAY = 13,
+ BNA_RES_MEM_T_UCMAC_ARRAY = 14,
+ BNA_RES_MEM_T_MCMAC_ARRAY = 15,
+ BNA_RES_MEM_T_RIT_ENTRY = 16,
+ BNA_RES_MEM_T_RIT_SEGMENT = 17,
+ BNA_RES_INTR_T_MBOX = 18,
+ BNA_RES_T_MAX
+};
+
+enum bna_tx_res_req_type {
+ BNA_TX_RES_MEM_T_TCB = 0,
+ BNA_TX_RES_MEM_T_UNMAPQ = 1,
+ BNA_TX_RES_MEM_T_QPT = 2,
+ BNA_TX_RES_MEM_T_SWQPT = 3,
+ BNA_TX_RES_MEM_T_PAGE = 4,
+ BNA_TX_RES_INTR_T_TXCMPL = 5,
+ BNA_TX_RES_T_MAX,
+};
+
+enum bna_rx_mem_type {
+ BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
+ BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
+ BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
+ BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
+ BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
+ BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
+ BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
+ BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
+ BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
+ BNA_RX_RES_T_INTR = 12, /* Rx interrupts */
+ BNA_RX_RES_T_MAX = 13
+};
+
+enum bna_mbox_state {
+ BNA_MBOX_FREE = 0,
+ BNA_MBOX_POSTED = 1
+};
+
+enum bna_tx_type {
+ BNA_TX_T_REGULAR = 0,
+ BNA_TX_T_LOOPBACK = 1,
+};
+
+enum bna_tx_flags {
+ BNA_TX_F_PORT_STARTED = 1,
+ BNA_TX_F_ENABLED = 2,
+ BNA_TX_F_PRIO_LOCK = 4,
+};
+
+enum bna_tx_mod_flags {
+ BNA_TX_MOD_F_PORT_STARTED = 1,
+ BNA_TX_MOD_F_PORT_LOOPBACK = 2,
+};
+
+enum bna_rx_type {
+ BNA_RX_T_REGULAR = 0,
+ BNA_RX_T_LOOPBACK = 1,
+};
+
+enum bna_rxp_type {
+ BNA_RXP_SINGLE = 1,
+ BNA_RXP_SLR = 2,
+ BNA_RXP_HDS = 3
+};
+
+enum bna_rxmode {
+ BNA_RXMODE_PROMISC = 1,
+ BNA_RXMODE_DEFAULT = 2,
+ BNA_RXMODE_ALLMULTI = 4
+};
+
+enum bna_rx_event {
+ RX_E_START = 1,
+ RX_E_STOP = 2,
+ RX_E_FAIL = 3,
+ RX_E_RXF_STARTED = 4,
+ RX_E_RXF_STOPPED = 5,
+ RX_E_RXQ_STOPPED = 6,
+};
+
+enum bna_rx_state {
+ BNA_RX_STOPPED = 1,
+ BNA_RX_RXF_START_WAIT = 2,
+ BNA_RX_STARTED = 3,
+ BNA_RX_RXF_STOP_WAIT = 4,
+ BNA_RX_RXQ_STOP_WAIT = 5,
+};
+
+enum bna_rx_flags {
+ BNA_RX_F_ENABLE = 0x01, /* bnad enabled rxf */
+ BNA_RX_F_PORT_ENABLED = 0x02, /* Port object is enabled */
+ BNA_RX_F_PORT_FAILED = 0x04, /* Port in failed state */
+};
+
+enum bna_rx_mod_flags {
+ BNA_RX_MOD_F_PORT_STARTED = 1,
+ BNA_RX_MOD_F_PORT_LOOPBACK = 2,
+};
+
+enum bna_rxf_oper_state {
+ BNA_RXF_OPER_STATE_RUNNING = 0x01, /* rxf operational */
+ BNA_RXF_OPER_STATE_PAUSED = 0x02, /* rxf in PAUSED state */
+};
+
+enum bna_rxf_flags {
+ BNA_RXF_FL_STOP_PENDING = 0x01,
+ BNA_RXF_FL_FAILED = 0x02,
+ BNA_RXF_FL_RSS_CONFIG_PENDING = 0x04,
+ BNA_RXF_FL_OPERSTATE_CHANGED = 0x08,
+ BNA_RXF_FL_RXF_ENABLED = 0x10,
+ BNA_RXF_FL_VLAN_CONFIG_PENDING = 0x20,
+};
+
+enum bna_rxf_event {
+ RXF_E_START = 1,
+ RXF_E_STOP = 2,
+ RXF_E_FAIL = 3,
+ RXF_E_CAM_FLTR_MOD = 4,
+ RXF_E_STARTED = 5,
+ RXF_E_STOPPED = 6,
+ RXF_E_CAM_FLTR_RESP = 7,
+ RXF_E_PAUSE = 8,
+ RXF_E_RESUME = 9,
+ RXF_E_STAT_CLEARED = 10,
+};
+
+enum bna_rxf_state {
+ BNA_RXF_STOPPED = 1,
+ BNA_RXF_START_WAIT = 2,
+ BNA_RXF_CAM_FLTR_MOD_WAIT = 3,
+ BNA_RXF_STARTED = 4,
+ BNA_RXF_CAM_FLTR_CLR_WAIT = 5,
+ BNA_RXF_STOP_WAIT = 6,
+ BNA_RXF_PAUSE_WAIT = 7,
+ BNA_RXF_RESUME_WAIT = 8,
+ BNA_RXF_STAT_CLR_WAIT = 9,
+};
+
+enum bna_port_type {
+ BNA_PORT_T_REGULAR = 0,
+ BNA_PORT_T_LOOPBACK_INTERNAL = 1,
+ BNA_PORT_T_LOOPBACK_EXTERNAL = 2,
+};
+
+enum bna_link_status {
+ BNA_LINK_DOWN = 0,
+ BNA_LINK_UP = 1,
+ BNA_CEE_UP = 2
+};
+
+enum bna_llport_flags {
+ BNA_LLPORT_F_ENABLED = 1,
+ BNA_LLPORT_F_RX_ENABLED = 2
+};
+
+enum bna_port_flags {
+ BNA_PORT_F_DEVICE_READY = 1,
+ BNA_PORT_F_ENABLED = 2,
+ BNA_PORT_F_PAUSE_CHANGED = 4,
+ BNA_PORT_F_MTU_CHANGED = 8
+};
+
+enum bna_pkt_rates {
+ BNA_PKT_RATE_10K = 10000,
+ BNA_PKT_RATE_20K = 20000,
+ BNA_PKT_RATE_30K = 30000,
+ BNA_PKT_RATE_40K = 40000,
+ BNA_PKT_RATE_50K = 50000,
+ BNA_PKT_RATE_60K = 60000,
+ BNA_PKT_RATE_70K = 70000,
+ BNA_PKT_RATE_80K = 80000,
+};
+
+enum bna_dim_load_types {
+ BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */
+ BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */
+ BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */
+ BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */
+ BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */
+ BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */
+ BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */
+ BNA_LOAD_T_LOW_4 = 7, /* r < 10K */
+ BNA_LOAD_T_MAX = 8
+};
+
+enum bna_dim_bias_types {
+ BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */
+ BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */
+ BNA_BIAS_T_MAX = 2
+};
+
+struct bna_mac {
+ /* This should be the first one */
+ struct list_head qe;
+ u8 addr[ETH_ALEN];
+};
+
+struct bna_mem_descr {
+ u32 len;
+ void *kva;
+ struct bna_dma_addr dma;
+};
+
+struct bna_mem_info {
+ enum bna_mem_type mem_type;
+ u32 len;
+ u32 num;
+ u32 align_sz; /* 0/1 = no alignment */
+ struct bna_mem_descr *mdl;
+ void *cookie; /* For bnad to unmap dma later */
+};
+
+struct bna_intr_descr {
+ int vector;
+};
+
+struct bna_intr_info {
+ enum bna_intr_type intr_type;
+ int num;
+ struct bna_intr_descr *idl;
+};
+
+union bna_res_u {
+ struct bna_mem_info mem_info;
+ struct bna_intr_info intr_info;
+};
+
+struct bna_res_info {
+ enum bna_res_type res_type;
+ union bna_res_u res_u;
+};
+
+/* HW QPT */
+struct bna_qpt {
+ struct bna_dma_addr hw_qpt_ptr;
+ void *kv_qpt_ptr;
+ u32 page_count;
+ u32 page_size;
+};
+
+/**
+ *
+ * Device
+ *
+ */
+
+struct bna_device {
+ bfa_fsm_t fsm;
+ struct bfa_ioc ioc;
+
+ enum bna_intr_type intr_type;
+ int vector;
+
+ void (*ready_cbfn)(struct bnad *bnad, enum bna_cb_status status);
+ struct bnad *ready_cbarg;
+
+ void (*stop_cbfn)(struct bnad *bnad, enum bna_cb_status status);
+ struct bnad *stop_cbarg;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Mail box
+ *
+ */
+
+struct bna_mbox_qe {
+ /* This should be the first one */
+ struct list_head qe;
+
+ struct bfa_mbox_cmd cmd;
+ u32 cmd_len;
+ /* Callback for port, tx, rx, rxf */
+ void (*cbfn)(void *arg, int status);
+ void *cbarg;
+};
+
+struct bna_mbox_mod {
+ enum bna_mbox_state state;
+ struct list_head posted_q;
+ u32 msg_pending;
+ u32 msg_ctr;
+ struct bna *bna;
+};
+
+/**
+ *
+ * Port
+ *
+ */
+
+/* Pause configuration */
+struct bna_pause_config {
+ enum bna_status tx_pause;
+ enum bna_status rx_pause;
+};
+
+struct bna_llport {
+ bfa_fsm_t fsm;
+ enum bna_llport_flags flags;
+
+ enum bna_port_type type;
+
+ enum bna_link_status link_status;
+
+ int admin_up_count;
+
+ void (*stop_cbfn)(struct bna_port *, enum bna_cb_status);
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bna *bna;
+};
+
+struct bna_port {
+ bfa_fsm_t fsm;
+ enum bna_port_flags flags;
+
+ enum bna_port_type type;
+
+ struct bna_llport llport;
+
+ struct bna_pause_config pause_config;
+ u8 priority;
+ int mtu;
+
+ /* Callback for bna_port_disable(), port_stop() */
+ void (*stop_cbfn)(void *, enum bna_cb_status);
+ void *stop_cbarg;
+
+ /* Callback for bna_port_pause_config() */
+ void (*pause_cbfn)(struct bnad *, enum bna_cb_status);
+
+ /* Callback for bna_port_mtu_set() */
+ void (*mtu_cbfn)(struct bnad *, enum bna_cb_status);
+
+ void (*link_cbfn)(struct bnad *, enum bna_link_status);
+
+ struct bfa_wc chld_stop_wc;
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Interrupt Block
+ *
+ */
+
+/* IB index segment structure */
+struct bna_ibidx_seg {
+ /* This should be the first one */
+ struct list_head qe;
+
+ u8 ib_seg_size;
+ u8 ib_idx_tbl_offset;
+};
+
+/* Interrupt structure */
+struct bna_intr {
+ /* This should be the first one */
+ struct list_head qe;
+ int ref_count;
+
+ enum bna_intr_type intr_type;
+ int vector;
+
+ struct bna_ib *ib;
+};
+
+/* Doorbell structure */
+struct bna_ib_dbell {
+ void *__iomem doorbell_addr;
+ u32 doorbell_ack;
+};
+
+/* Interrupt timer configuration */
+struct bna_ib_config {
+ u8 coalescing_timeo; /* Unit is 5usec. */
+
+ int interpkt_count;
+ int interpkt_timeo;
+
+ enum ib_flags ctrl_flags;
+};
+
+/* IB structure */
+struct bna_ib {
+ /* This should be the first one */
+ struct list_head qe;
+
+ int ib_id;
+
+ int ref_count;
+ int start_count;
+
+ struct bna_dma_addr ib_seg_host_addr;
+ void *ib_seg_host_addr_kva;
+ u32 idx_mask; /* Size >= BNA_IBIDX_MAX_SEGSIZE */
+
+ struct bna_ibidx_seg *idx_seg;
+
+ struct bna_ib_dbell door_bell;
+
+ struct bna_intr *intr;
+
+ struct bna_ib_config ib_config;
+
+ struct bna *bna;
+};
+
+/* IB module - keeps track of IBs and interrupts */
+struct bna_ib_mod {
+ struct bna_ib *ib; /* BFI_MAX_IB entries */
+ struct bna_intr *intr; /* BFI_MAX_IB entries */
+ struct bna_ibidx_seg *idx_seg; /* BNA_IBIDX_TOTAL_SEGS */
+
+ struct list_head ib_free_q;
+
+ struct list_head ibidx_seg_pool[BFI_IBIDX_TOTAL_POOLS];
+
+ struct list_head intr_free_q;
+ struct list_head intr_active_q;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Tx object
+ *
+ */
+
+/* Tx datapath control structure */
+#define BNA_Q_NAME_SIZE 16
+struct bna_tcb {
+ /* Fast path */
+ void **sw_qpt;
+ void *unmap_q;
+ u32 producer_index;
+ u32 consumer_index;
+ volatile u32 *hw_consumer_index;
+ u32 q_depth;
+ void *__iomem q_dbell;
+ struct bna_ib_dbell *i_dbell;
+ int page_idx;
+ int page_count;
+ /* Control path */
+ struct bna_txq *txq;
+ struct bnad *bnad;
+ enum bna_intr_type intr_type;
+ int intr_vector;
+ u8 priority; /* Current priority */
+ unsigned long flags; /* Used by bnad as required */
+ int id;
+ char name[BNA_Q_NAME_SIZE];
+};
+
+/* TxQ QPT and configuration */
+struct bna_txq {
+ /* This should be the first one */
+ struct list_head qe;
+
+ int txq_id;
+
+ u8 priority;
+
+ struct bna_qpt qpt;
+ struct bna_tcb *tcb;
+ struct bna_ib *ib;
+ int ib_seg_offset;
+
+ struct bna_tx *tx;
+
+ u64 tx_packets;
+ u64 tx_bytes;
+};
+
+/* TxF structure (hardware Tx Function) */
+struct bna_txf {
+ int txf_id;
+ enum txf_flags ctrl_flags;
+ u16 vlan;
+};
+
+/* Tx object */
+struct bna_tx {
+ /* This should be the first one */
+ struct list_head qe;
+
+ bfa_fsm_t fsm;
+ enum bna_tx_flags flags;
+
+ enum bna_tx_type type;
+
+ struct list_head txq_q;
+ struct bna_txf txf;
+
+ /* Tx event handlers */
+ void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
+
+ /* callback for bna_tx_disable(), bna_tx_stop() */
+ void (*stop_cbfn)(void *arg, struct bna_tx *tx,
+ enum bna_cb_status status);
+ void *stop_cbarg;
+
+ /* callback for bna_tx_prio_set() */
+ void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx,
+ enum bna_cb_status status);
+
+ struct bfa_wc txq_stop_wc;
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bna *bna;
+ void *priv; /* bnad's cookie */
+};
+
+struct bna_tx_config {
+ int num_txq;
+ int txq_depth;
+ enum bna_tx_type tx_type;
+};
+
+struct bna_tx_event_cbfn {
+ /* Optional */
+ void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
+ /* Mandatory */
+ void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
+};
+
+/* Tx module - keeps track of free, active tx objects */
+struct bna_tx_mod {
+ struct bna_tx *tx; /* BFI_MAX_TXQ entries */
+ struct bna_txq *txq; /* BFI_MAX_TXQ entries */
+
+ struct list_head tx_free_q;
+ struct list_head tx_active_q;
+
+ struct list_head txq_free_q;
+
+ /* callback for bna_tx_mod_stop() */
+ void (*stop_cbfn)(struct bna_port *port,
+ enum bna_cb_status status);
+
+ struct bfa_wc tx_stop_wc;
+
+ enum bna_tx_mod_flags flags;
+
+ int priority;
+ int cee_link;
+
+ u32 txf_bmap[2];
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Receive Indirection Table
+ *
+ */
+
+/* One row of RIT table */
+struct bna_rit_entry {
+ u8 large_rxq_id; /* used for either large or data buffers */
+ u8 small_rxq_id; /* used for either small or header buffers */
+};
+
+/* RIT segment */
+struct bna_rit_segment {
+ struct list_head qe;
+
+ u32 rit_offset;
+ u32 rit_size;
+ /**
+ * max_rit_size: Varies per RIT segment depending on how RIT is
+ * partitioned
+ */
+ u32 max_rit_size;
+
+ struct bna_rit_entry *rit;
+};
+
+struct bna_rit_mod {
+ struct bna_rit_entry *rit;
+ struct bna_rit_segment *rit_segment;
+
+ struct list_head rit_seg_pool[BFI_RIT_SEG_TOTAL_POOLS];
+};
+
+/**
+ *
+ * Rx object
+ *
+ */
+
+/* Rx datapath control structure */
+struct bna_rcb {
+ /* Fast path */
+ void **sw_qpt;
+ void *unmap_q;
+ u32 producer_index;
+ u32 consumer_index;
+ u32 q_depth;
+ void *__iomem q_dbell;
+ int page_idx;
+ int page_count;
+ /* Control path */
+ struct bna_rxq *rxq;
+ struct bna_cq *cq;
+ struct bnad *bnad;
+ unsigned long flags;
+ int id;
+};
+
+/* RxQ structure - QPT, configuration */
+struct bna_rxq {
+ struct list_head qe;
+ int rxq_id;
+
+ int buffer_size;
+ int q_depth;
+
+ struct bna_qpt qpt;
+ struct bna_rcb *rcb;
+
+ struct bna_rxp *rxp;
+ struct bna_rx *rx;
+
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_packets_with_error;
+ u64 rxbuf_alloc_failed;
+};
+
+/* RxQ pair */
+union bna_rxq_u {
+ struct {
+ struct bna_rxq *hdr;
+ struct bna_rxq *data;
+ } hds;
+ struct {
+ struct bna_rxq *small;
+ struct bna_rxq *large;
+ } slr;
+ struct {
+ struct bna_rxq *only;
+ struct bna_rxq *reserved;
+ } single;
+};
+
+/* Packet rate for Dynamic Interrupt Moderation */
+struct bna_pkt_rate {
+ u32 small_pkt_cnt;
+ u32 large_pkt_cnt;
+};
+
+/* Completion control structure */
+struct bna_ccb {
+ /* Fast path */
+ void **sw_qpt;
+ u32 producer_index;
+ volatile u32 *hw_producer_index;
+ u32 q_depth;
+ struct bna_ib_dbell *i_dbell;
+ struct bna_rcb *rcb[2];
+ void *ctrl; /* For bnad */
+ struct bna_pkt_rate pkt_rate;
+ int page_idx;
+ int page_count;
+
+ /* Control path */
+ struct bna_cq *cq;
+ struct bnad *bnad;
+ enum bna_intr_type intr_type;
+ int intr_vector;
+ u8 rx_coalescing_timeo; /* For NAPI */
+ int id;
+ char name[BNA_Q_NAME_SIZE];
+};
+
+/* CQ QPT, configuration */
+struct bna_cq {
+ int cq_id;
+
+ struct bna_qpt qpt;
+ struct bna_ccb *ccb;
+
+ struct bna_ib *ib;
+ u8 ib_seg_offset;
+
+ struct bna_rx *rx;
+};
+
+struct bna_rss_config {
+ enum rss_hash_type hash_type;
+ u8 hash_mask;
+ u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
+};
+
+struct bna_hds_config {
+ enum hds_header_type hdr_type;
+ int header_size;
+};
+
+/* This structure is used during RX creation */
+struct bna_rx_config {
+ enum bna_rx_type rx_type;
+ int num_paths;
+ enum bna_rxp_type rxp_type;
+ int paused;
+ int q_depth;
+ /*
+ * Small/Large (or Header/Data) buffer size to be configured
+ * for SLR and HDS queue type. Large buffer size comes from
+ * port->mtu.
+ */
+ int small_buff_size;
+
+ enum bna_status rss_status;
+ struct bna_rss_config rss_config;
+
+ enum bna_status hds_status;
+ struct bna_hds_config hds_config;
+
+ enum bna_status vlan_strip_status;
+};
+
+/* Rx Path structure - one per MSIX vector/CPU */
+struct bna_rxp {
+ /* This should be the first one */
+ struct list_head qe;
+
+ enum bna_rxp_type type;
+ union bna_rxq_u rxq;
+ struct bna_cq cq;
+
+ struct bna_rx *rx;
+
+ /* MSI-x vector number for configuring RSS */
+ int vector;
+
+ struct bna_mbox_qe mbox_qe;
+};
+
+/* HDS configuration structure */
+struct bna_rxf_hds {
+ enum hds_header_type hdr_type;
+ int header_size;
+};
+
+/* RSS configuration structure */
+struct bna_rxf_rss {
+ enum rss_hash_type hash_type;
+ u8 hash_mask;
+ u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
+};
+
+/* RxF structure (hardware Rx Function) */
+struct bna_rxf {
+ bfa_fsm_t fsm;
+ int rxf_id;
+ enum rxf_flags ctrl_flags;
+ u16 default_vlan_tag;
+ enum bna_rxf_oper_state rxf_oper_state;
+ enum bna_status hds_status;
+ struct bna_rxf_hds hds_cfg;
+ enum bna_status rss_status;
+ struct bna_rxf_rss rss_cfg;
+ struct bna_rit_segment *rit_segment;
+ struct bna_rx *rx;
+ u32 forced_offset;
+ struct bna_mbox_qe mbox_qe;
+ int mcast_rxq_id;
+
+ /* callback for bna_rxf_start() */
+ void (*start_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
+ struct bna_rx *start_cbarg;
+
+ /* callback for bna_rxf_stop() */
+ void (*stop_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
+ struct bna_rx *stop_cbarg;
+
+ /* callback for bna_rxf_receive_enable() / bna_rxf_receive_disable() */
+ void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx,
+ enum bna_cb_status status);
+ struct bnad *oper_state_cbarg;
+
+ /**
+ * callback for:
+ * bna_rxf_ucast_set()
+ * bna_rxf_{ucast/mcast}_add(),
+ * bna_rxf_{ucast/mcast}_del(),
+ * bna_rxf_mode_set()
+ */
+ void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx,
+ enum bna_cb_status status);
+ struct bnad *cam_fltr_cbarg;
+
+ enum bna_rxf_flags rxf_flags;
+
+ /* List of unicast addresses yet to be applied to h/w */
+ struct list_head ucast_pending_add_q;
+ struct list_head ucast_pending_del_q;
+ int ucast_pending_set;
+ /* ucast addresses applied to the h/w */
+ struct list_head ucast_active_q;
+ struct bna_mac *ucast_active_mac;
+
+ /* List of multicast addresses yet to be applied to h/w */
+ struct list_head mcast_pending_add_q;
+ struct list_head mcast_pending_del_q;
+ /* multicast addresses applied to the h/w */
+ struct list_head mcast_active_q;
+
+ /* Rx modes yet to be applied to h/w */
+ enum bna_rxmode rxmode_pending;
+ enum bna_rxmode rxmode_pending_bitmask;
+ /* Rx modes applied to h/w */
+ enum bna_rxmode rxmode_active;
+
+ enum bna_status vlan_filter_status;
+ u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
+};
+
+/* Rx object */
+struct bna_rx {
+ /* This should be the first one */
+ struct list_head qe;
+
+ bfa_fsm_t fsm;
+
+ enum bna_rx_type type;
+
+ /* list-head for RX path objects */
+ struct list_head rxp_q;
+
+ struct bna_rxf rxf;
+
+ enum bna_rx_flags rx_flags;
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bfa_wc rxq_stop_wc;
+
+ /* Rx event handlers */
+ void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
+
+ /* callback for bna_rx_disable(), bna_rx_stop() */
+ void (*stop_cbfn)(void *arg, struct bna_rx *rx,
+ enum bna_cb_status status);
+ void *stop_cbarg;
+
+ struct bna *bna;
+ void *priv; /* bnad's cookie */
+};
+
+struct bna_rx_event_cbfn {
+ /* Optional */
+ void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
+ /* Mandatory */
+ void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
+};
+
+/* Rx module - keeps track of free, active rx objects */
+struct bna_rx_mod {
+ struct bna *bna; /* back pointer to parent */
+ struct bna_rx *rx; /* BFI_MAX_RXQ entries */
+ struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */
+ struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */
+
+ struct list_head rx_free_q;
+ struct list_head rx_active_q;
+ int rx_free_count;
+
+ struct list_head rxp_free_q;
+ int rxp_free_count;
+
+ struct list_head rxq_free_q;
+ int rxq_free_count;
+
+ enum bna_rx_mod_flags flags;
+
+ /* callback for bna_rx_mod_stop() */
+ void (*stop_cbfn)(struct bna_port *port,
+ enum bna_cb_status status);
+
+ struct bfa_wc rx_stop_wc;
+ u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX];
+ u32 rxf_bmap[2];
+};
+
+/**
+ *
+ * CAM
+ *
+ */
+
+struct bna_ucam_mod {
+ struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
+ struct list_head free_q;
+
+ struct bna *bna;
+};
+
+struct bna_mcam_mod {
+ struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
+ struct list_head free_q;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Statistics
+ *
+ */
+
+struct bna_tx_stats {
+ int tx_state;
+ int tx_flags;
+ int num_txqs;
+ u32 txq_bmap[2];
+ int txf_id;
+};
+
+struct bna_rx_stats {
+ int rx_state;
+ int rx_flags;
+ int num_rxps;
+ int num_rxqs;
+ u32 rxq_bmap[2];
+ u32 cq_bmap[2];
+ int rxf_id;
+ int rxf_state;
+ int rxf_oper_state;
+ int num_active_ucast;
+ int num_active_mcast;
+ int rxmode_active;
+ int vlan_filter_status;
+ u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
+ int rss_status;
+ int hds_status;
+};
+
+struct bna_sw_stats {
+ int device_state;
+ int port_state;
+ int port_flags;
+ int llport_state;
+ int priority;
+ int num_active_tx;
+ int num_active_rx;
+ struct bna_tx_stats tx_stats[BFI_MAX_TXQ];
+ struct bna_rx_stats rx_stats[BFI_MAX_RXQ];
+};
+
+struct bna_stats {
+ u32 txf_bmap[2];
+ u32 rxf_bmap[2];
+ struct bfi_ll_stats *hw_stats;
+ struct bna_sw_stats *sw_stats;
+};
+
+/**
+ *
+ * BNA
+ *
+ */
+
+struct bna {
+ struct bfa_pcidev pcidev;
+
+ int port_num;
+
+ struct bna_chip_regs regs;
+
+ struct bna_dma_addr hw_stats_dma;
+ struct bna_stats stats;
+
+ struct bna_device device;
+ struct bfa_cee cee;
+
+ struct bna_mbox_mod mbox_mod;
+
+ struct bna_port port;
+
+ struct bna_tx_mod tx_mod;
+
+ struct bna_rx_mod rx_mod;
+
+ struct bna_ib_mod ib_mod;
+
+ struct bna_ucam_mod ucam_mod;
+ struct bna_mcam_mod mcam_mod;
+
+ struct bna_rit_mod rit_mod;
+
+ int rxf_default_id;
+ int rxf_promisc_id;
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bnad *bnad;
+};
+
+#endif /* __BNA_TYPES_H__ */
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
new file mode 100644
index 00000000000..74c64d6c880
--- /dev/null
+++ b/drivers/net/bna/bnad.c
@@ -0,0 +1,3264 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+
+#include "bnad.h"
+#include "bna.h"
+#include "cna.h"
+
+static DEFINE_MUTEX(bnad_fwimg_mutex);
+
+/*
+ * Module params
+ */
+static uint bnad_msix_disable;
+module_param(bnad_msix_disable, uint, 0444);
+MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0444);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
+
+/*
+ * Global variables
+ */
+u32 bnad_rxqs_per_cq = 2;
+
+static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+/*
+ * Local MACROS
+ */
+#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
+
+#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
+
+#define BNAD_GET_MBOX_IRQ(_bnad) \
+ (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
+ ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
+ ((_bnad)->pcidev->irq))
+
+#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
+do { \
+ (_res_info)->res_type = BNA_RES_T_MEM; \
+ (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
+ (_res_info)->res_u.mem_info.num = (_num); \
+ (_res_info)->res_u.mem_info.len = \
+ sizeof(struct bnad_unmap_q) + \
+ (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
+} while (0)
+
+/*
+ * Reinitialize completions in CQ, once Rx is taken down
+ */
+static void
+bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bna_cq_entry *cmpl, *next_cmpl;
+ unsigned int wi_range, wis = 0, ccb_prod = 0;
+ int i;
+
+ BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
+ wi_range);
+
+ for (i = 0; i < ccb->q_depth; i++) {
+ wis++;
+ if (likely(--wi_range))
+ next_cmpl = cmpl + 1;
+ else {
+ BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
+ wis = 0;
+ BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
+ next_cmpl, wi_range);
+ }
+ cmpl->valid = 0;
+ cmpl = next_cmpl;
+ }
+}
+
+/*
+ * Frees all pending Tx Bufs
+ * At this point no activity is expected on the Q,
+ * so DMA unmap & freeing is fine.
+ */
+static void
+bnad_free_all_txbufs(struct bnad *bnad,
+ struct bna_tcb *tcb)
+{
+ u16 unmap_cons;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct sk_buff *skb = NULL;
+ int i;
+
+ unmap_array = unmap_q->unmap_array;
+
+ unmap_cons = 0;
+ while (unmap_cons < unmap_q->q_depth) {
+ skb = unmap_array[unmap_cons].skb;
+ if (!skb) {
+ unmap_cons++;
+ continue;
+ }
+ unmap_array[unmap_cons].skb = NULL;
+
+ pci_unmap_single(bnad->pcidev,
+ pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr), skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+
+ pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ unmap_cons++;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ pci_unmap_page(bnad->pcidev,
+ pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr),
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ 0);
+ unmap_cons++;
+ }
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/* Data Path Handlers */
+
+/*
+ * bnad_free_txbufs : Frees the Tx bufs on Tx completion
+ * Can be called in a) Interrupt context
+ * b) Sending context
+ * c) Tasklet context
+ */
+static u32
+bnad_free_txbufs(struct bnad *bnad,
+ struct bna_tcb *tcb)
+{
+ u32 sent_packets = 0, sent_bytes = 0;
+ u16 wis, unmap_cons, updated_hw_cons;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct sk_buff *skb;
+ int i;
+
+ /*
+ * Just return if TX is stopped. This check is useful
+ * when bnad_free_txbufs() runs out of a tasklet scheduled
+ * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
+ * but this routine runs actually after the cleanup has been
+ * executed.
+ */
+ if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
+ return 0;
+
+ updated_hw_cons = *(tcb->hw_consumer_index);
+
+ wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
+ updated_hw_cons, tcb->q_depth);
+
+ BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
+
+ unmap_array = unmap_q->unmap_array;
+ unmap_cons = unmap_q->consumer_index;
+
+ prefetch(&unmap_array[unmap_cons + 1]);
+ while (wis) {
+ skb = unmap_array[unmap_cons].skb;
+
+ unmap_array[unmap_cons].skb = NULL;
+
+ sent_packets++;
+ sent_bytes += skb->len;
+ wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+ pci_unmap_single(bnad->pcidev,
+ pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr), skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
+
+ prefetch(&unmap_array[unmap_cons + 1]);
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ prefetch(&unmap_array[unmap_cons + 1]);
+
+ pci_unmap_page(bnad->pcidev,
+ pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr),
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ 0);
+ BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
+ }
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Update consumer pointers. */
+ tcb->consumer_index = updated_hw_cons;
+ unmap_q->consumer_index = unmap_cons;
+
+ tcb->txq->tx_packets += sent_packets;
+ tcb->txq->tx_bytes += sent_bytes;
+
+ return sent_packets;
+}
+
+/* Tx Free Tasklet function */
+/* Frees for all the tcb's in all the Tx's */
+/*
+ * Scheduled from sending context, so that
+ * the fat Tx lock is not held for too long
+ * in the sending context.
+ */
+static void
+bnad_tx_free_tasklet(unsigned long bnad_ptr)
+{
+ struct bnad *bnad = (struct bnad *)bnad_ptr;
+ struct bna_tcb *tcb;
+ u32 acked;
+ int i, j;
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ tcb = bnad->tx_info[i].tcb[j];
+ if (!tcb)
+ continue;
+ if (((u16) (*tcb->hw_consumer_index) !=
+ tcb->consumer_index) &&
+ (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
+ &tcb->flags))) {
+ acked = bnad_free_txbufs(bnad, tcb);
+ bna_ib_ack(tcb->i_dbell, acked);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ }
+ }
+ }
+}
+
+static u32
+bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct net_device *netdev = bnad->netdev;
+ u32 sent;
+
+ if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+ return 0;
+
+ sent = bnad_free_txbufs(bnad, tcb);
+ if (sent) {
+ if (netif_queue_stopped(netdev) &&
+ netif_carrier_ok(netdev) &&
+ BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
+ BNAD_NETIF_WAKE_THRESHOLD) {
+ netif_wake_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+ bna_ib_ack(tcb->i_dbell, sent);
+ } else
+ bna_ib_ack(tcb->i_dbell, 0);
+
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+
+ return sent;
+}
+
+/* MSIX Tx Completion Handler */
+static irqreturn_t
+bnad_msix_tx(int irq, void *data)
+{
+ struct bna_tcb *tcb = (struct bna_tcb *)data;
+ struct bnad *bnad = tcb->bnad;
+
+ bnad_tx(bnad, tcb);
+
+ return IRQ_HANDLED;
+}
+
+static void
+bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ rcb->producer_index = 0;
+ rcb->consumer_index = 0;
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+}
+
+static void
+bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q;
+ struct sk_buff *skb;
+
+ unmap_q = rcb->unmap_q;
+ while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+ skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+ BUG_ON(!(skb));
+ unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+ pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
+ unmap_array[unmap_q->consumer_index],
+ dma_addr), rcb->rxq->buffer_size +
+ NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+ BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
+ }
+
+ bnad_reset_rcb(bnad, rcb);
+}
+
+static void
+bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ u16 to_alloc, alloced, unmap_prod, wi_range;
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct bna_rxq_entry *rxent;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ alloced = 0;
+ to_alloc =
+ BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
+
+ unmap_array = unmap_q->unmap_array;
+ unmap_prod = unmap_q->producer_index;
+
+ BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
+
+ while (to_alloc--) {
+ if (!wi_range) {
+ BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
+ wi_range);
+ }
+ skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
+ GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
+ goto finishing;
+ }
+ skb->dev = bnad->netdev;
+ skb_reserve(skb, NET_IP_ALIGN);
+ unmap_array[unmap_prod].skb = skb;
+ dma_addr = pci_map_single(bnad->pcidev, skb->data,
+ rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
+ pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+ dma_addr);
+ BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+ BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+ rxent++;
+ wi_range--;
+ alloced++;
+ }
+
+finishing:
+ if (likely(alloced)) {
+ unmap_q->producer_index = unmap_prod;
+ rcb->producer_index = unmap_prod;
+ smp_mb();
+ bna_rxq_prod_indx_doorbell(rcb);
+ }
+}
+
+/*
+ * Locking is required in the enable path
+ * because it is called from a napi poll
+ * context, where the bna_lock is not held
+ * unlike the IRQ context.
+ */
+static void
+bnad_enable_txrx_irqs(struct bnad *bnad)
+{
+ struct bna_tcb *tcb;
+ struct bna_ccb *ccb;
+ int i, j;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ tcb = bnad->tx_info[i].tcb[j];
+ bna_ib_coalescing_timer_set(tcb->i_dbell,
+ tcb->txq->ib->ib_config.coalescing_timeo);
+ bna_ib_ack(tcb->i_dbell, 0);
+ }
+ }
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
+ bnad_enable_rx_irq_unsafe(ccb);
+ }
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static inline void
+bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
+ if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
+ >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+ bnad_alloc_n_post_rxbufs(bnad, rcb);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
+ }
+}
+
+static u32
+bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
+{
+ struct bna_cq_entry *cmpl, *next_cmpl;
+ struct bna_rcb *rcb = NULL;
+ unsigned int wi_range, packets = 0, wis = 0;
+ struct bnad_unmap_q *unmap_q;
+ struct sk_buff *skb;
+ u32 flags;
+ u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
+ struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
+
+ prefetch(bnad->netdev);
+ BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
+ wi_range);
+ BUG_ON(!(wi_range <= ccb->q_depth));
+ while (cmpl->valid && packets < budget) {
+ packets++;
+ BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+
+ if (qid0 == cmpl->rxq_id)
+ rcb = ccb->rcb[0];
+ else
+ rcb = ccb->rcb[1];
+
+ unmap_q = rcb->unmap_q;
+
+ skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+ BUG_ON(!(skb));
+ unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+ pci_unmap_single(bnad->pcidev,
+ pci_unmap_addr(&unmap_q->
+ unmap_array[unmap_q->
+ consumer_index],
+ dma_addr),
+ rcb->rxq->buffer_size,
+ PCI_DMA_FROMDEVICE);
+ BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+
+ /* Should be more efficient ? Performance ? */
+ BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
+
+ wis++;
+ if (likely(--wi_range))
+ next_cmpl = cmpl + 1;
+ else {
+ BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
+ wis = 0;
+ BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
+ next_cmpl, wi_range);
+ BUG_ON(!(wi_range <= ccb->q_depth));
+ }
+ prefetch(next_cmpl);
+
+ flags = ntohl(cmpl->flags);
+ if (unlikely
+ (flags &
+ (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
+ BNA_CQ_EF_TOO_LONG))) {
+ dev_kfree_skb_any(skb);
+ rcb->rxq->rx_packets_with_error++;
+ goto next;
+ }
+
+ skb_put(skb, ntohs(cmpl->length));
+ if (likely
+ (bnad->rx_csum &&
+ (((flags & BNA_CQ_EF_IPV4) &&
+ (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+ (flags & BNA_CQ_EF_IPV6)) &&
+ (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+ (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ rcb->rxq->rx_packets++;
+ rcb->rxq->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+ if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
+ struct bnad_rx_ctrl *rx_ctrl =
+ (struct bnad_rx_ctrl *)ccb->ctrl;
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
+ ntohs(cmpl->vlan_tag), skb);
+ else
+ vlan_hwaccel_receive_skb(skb,
+ bnad->vlan_grp,
+ ntohs(cmpl->vlan_tag));
+
+ } else { /* Not VLAN tagged/stripped */
+ struct bnad_rx_ctrl *rx_ctrl =
+ (struct bnad_rx_ctrl *)ccb->ctrl;
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ napi_gro_receive(&rx_ctrl->napi, skb);
+ else
+ netif_receive_skb(skb);
+ }
+
+next:
+ cmpl->valid = 0;
+ cmpl = next_cmpl;
+ }
+
+ BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
+
+ if (likely(ccb)) {
+ bna_ib_ack(ccb->i_dbell, packets);
+ bnad_refill_rxq(bnad, ccb->rcb[0]);
+ if (ccb->rcb[1])
+ bnad_refill_rxq(bnad, ccb->rcb[1]);
+ } else
+ bna_ib_ack(ccb->i_dbell, 0);
+
+ return packets;
+}
+
+static void
+bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
+ bna_ib_ack(ccb->i_dbell, 0);
+}
+
+static void
+bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */
+ bnad_enable_rx_irq_unsafe(ccb);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
+ if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
+ bnad_disable_rx_irq(bnad, ccb);
+ __napi_schedule((&rx_ctrl->napi));
+ }
+ BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
+}
+
+/* MSIX Rx Path Handler */
+static irqreturn_t
+bnad_msix_rx(int irq, void *data)
+{
+ struct bna_ccb *ccb = (struct bna_ccb *)data;
+ struct bnad *bnad = ccb->bnad;
+
+ bnad_netif_rx_schedule_poll(bnad, ccb);
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handlers */
+
+/* Mbox Interrupt Handlers */
+static irqreturn_t
+bnad_msix_mbox_handler(int irq, void *data)
+{
+ u32 intr_status;
+ unsigned long flags;
+ struct net_device *netdev = data;
+ struct bnad *bnad;
+
+ bnad = netdev_priv(netdev);
+
+ /* BNA_ISR_GET(bnad); Inc Ref count */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ bna_intr_status_get(&bnad->bna, intr_status);
+
+ if (BNA_IS_MBOX_ERR_INTR(intr_status))
+ bna_mbox_handler(&bnad->bna, intr_status);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* BNAD_ISR_PUT(bnad); Dec Ref count */
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnad_isr(int irq, void *data)
+{
+ int i, j;
+ u32 intr_status;
+ unsigned long flags;
+ struct net_device *netdev = data;
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_rx_info *rx_info;
+ struct bnad_rx_ctrl *rx_ctrl;
+
+ if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
+ return IRQ_NONE;
+
+ bna_intr_status_get(&bnad->bna, intr_status);
+
+ if (unlikely(!intr_status))
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+ bna_mbox_handler(&bnad->bna, intr_status);
+ if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto done;
+ }
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Process data interrupts */
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ rx_ctrl = &rx_info->rx_ctrl[j];
+ if (rx_ctrl->ccb)
+ bnad_netif_rx_schedule_poll(bnad,
+ rx_ctrl->ccb);
+ }
+ }
+done:
+ return IRQ_HANDLED;
+}
+
+/*
+ * Called in interrupt / callback context
+ * with bna_lock held, so cfg_flags access is OK
+ */
+static void
+bnad_enable_mbox_irq(struct bnad *bnad)
+{
+ int irq = BNAD_GET_MBOX_IRQ(bnad);
+
+ if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ enable_irq(irq);
+
+ BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
+}
+
+/*
+ * Called with bnad->bna_lock held b'cos of
+ * bnad->cfg_flags access.
+ */
+static void
+bnad_disable_mbox_irq(struct bnad *bnad)
+{
+ int irq = BNAD_GET_MBOX_IRQ(bnad);
+
+
+ if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ disable_irq_nosync(irq);
+
+ BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
+}
+
+/* Control Path Handlers */
+
+/* Callbacks */
+void
+bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
+{
+ bnad_enable_mbox_irq(bnad);
+}
+
+void
+bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
+{
+ bnad_disable_mbox_irq(bnad);
+}
+
+void
+bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
+{
+ complete(&bnad->bnad_completions.ioc_comp);
+ bnad->bnad_completions.ioc_comp_status = status;
+}
+
+void
+bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
+{
+ complete(&bnad->bnad_completions.ioc_comp);
+ bnad->bnad_completions.ioc_comp_status = status;
+}
+
+static void
+bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
+{
+ struct bnad *bnad = (struct bnad *)arg;
+
+ complete(&bnad->bnad_completions.port_comp);
+
+ netif_carrier_off(bnad->netdev);
+}
+
+void
+bnad_cb_port_link_status(struct bnad *bnad,
+ enum bna_link_status link_status)
+{
+ bool link_up = 0;
+
+ link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
+
+ if (link_status == BNA_CEE_UP) {
+ set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
+ BNAD_UPDATE_CTR(bnad, cee_up);
+ } else
+ clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
+
+ if (link_up) {
+ if (!netif_carrier_ok(bnad->netdev)) {
+ pr_warn("bna: %s link up\n",
+ bnad->netdev->name);
+ netif_carrier_on(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, link_toggle);
+ if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
+ /* Force an immediate Transmit Schedule */
+ pr_info("bna: %s TX_STARTED\n",
+ bnad->netdev->name);
+ netif_wake_queue(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ } else {
+ netif_stop_queue(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ }
+ }
+ } else {
+ if (netif_carrier_ok(bnad->netdev)) {
+ pr_warn("bna: %s link down\n",
+ bnad->netdev->name);
+ netif_carrier_off(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, link_toggle);
+ }
+ }
+}
+
+static void
+bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
+ enum bna_cb_status status)
+{
+ struct bnad *bnad = (struct bnad *)arg;
+
+ complete(&bnad->bnad_completions.tx_comp);
+}
+
+static void
+bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_tx_info *tx_info =
+ (struct bnad_tx_info *)tcb->txq->tx->priv;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+
+ tx_info->tcb[tcb->id] = tcb;
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+ unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
+}
+
+static void
+bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_tx_info *tx_info =
+ (struct bnad_tx_info *)tcb->txq->tx->priv;
+
+ tx_info->tcb[tcb->id] = NULL;
+}
+
+static void
+bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+ unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
+}
+
+static void
+bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bnad_rx_info *rx_info =
+ (struct bnad_rx_info *)ccb->cq->rx->priv;
+
+ rx_info->rx_ctrl[ccb->id].ccb = ccb;
+ ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
+}
+
+static void
+bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bnad_rx_info *rx_info =
+ (struct bnad_rx_info *)ccb->cq->rx->priv;
+
+ rx_info->rx_ctrl[ccb->id].ccb = NULL;
+}
+
+static void
+bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_tx_info *tx_info =
+ (struct bnad_tx_info *)tcb->txq->tx->priv;
+
+ if (tx_info != &bnad->tx_info[0])
+ return;
+
+ clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
+ netif_stop_queue(bnad->netdev);
+ pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
+}
+
+static void
+bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
+ return;
+
+ if (netif_carrier_ok(bnad->netdev)) {
+ pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
+ netif_wake_queue(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+}
+
+static void
+bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_unmap_q *unmap_q;
+
+ if (!tcb || (!tcb->unmap_q))
+ return;
+
+ unmap_q = tcb->unmap_q;
+ if (!unmap_q->unmap_array)
+ return;
+
+ if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+ return;
+
+ bnad_free_all_txbufs(bnad, tcb);
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+}
+
+static void
+bnad_cb_rx_cleanup(struct bnad *bnad,
+ struct bna_ccb *ccb)
+{
+ bnad_cq_cmpl_init(bnad, ccb);
+
+ bnad_free_rxbufs(bnad, ccb->rcb[0]);
+ clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
+
+ if (ccb->rcb[1]) {
+ bnad_free_rxbufs(bnad, ccb->rcb[1]);
+ clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
+ }
+}
+
+static void
+bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ set_bit(BNAD_RXQ_STARTED, &rcb->flags);
+
+ /* Now allocate & post buffers for this RCB */
+ /* !!Allocation in callback context */
+ if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
+ if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
+ >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+ bnad_alloc_n_post_rxbufs(bnad, rcb);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
+ }
+}
+
+static void
+bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
+ enum bna_cb_status status)
+{
+ struct bnad *bnad = (struct bnad *)arg;
+
+ complete(&bnad->bnad_completions.rx_comp);
+}
+
+static void
+bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
+ enum bna_cb_status status)
+{
+ bnad->bnad_completions.mcast_comp_status = status;
+ complete(&bnad->bnad_completions.mcast_comp);
+}
+
+void
+bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
+ struct bna_stats *stats)
+{
+ if (status == BNA_CB_SUCCESS)
+ BNAD_UPDATE_CTR(bnad, hw_stats_updates);
+
+ if (!netif_running(bnad->netdev) ||
+ !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
+ return;
+
+ mod_timer(&bnad->stats_timer,
+ jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
+}
+
+/* Resource allocation, free functions */
+
+static void
+bnad_mem_free(struct bnad *bnad,
+ struct bna_mem_info *mem_info)
+{
+ int i;
+ dma_addr_t dma_pa;
+
+ if (mem_info->mdl == NULL)
+ return;
+
+ for (i = 0; i < mem_info->num; i++) {
+ if (mem_info->mdl[i].kva != NULL) {
+ if (mem_info->mem_type == BNA_MEM_T_DMA) {
+ BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
+ dma_pa);
+ pci_free_consistent(bnad->pcidev,
+ mem_info->mdl[i].len,
+ mem_info->mdl[i].kva, dma_pa);
+ } else
+ kfree(mem_info->mdl[i].kva);
+ }
+ }
+ kfree(mem_info->mdl);
+ mem_info->mdl = NULL;
+}
+
+static int
+bnad_mem_alloc(struct bnad *bnad,
+ struct bna_mem_info *mem_info)
+{
+ int i;
+ dma_addr_t dma_pa;
+
+ if ((mem_info->num == 0) || (mem_info->len == 0)) {
+ mem_info->mdl = NULL;
+ return 0;
+ }
+
+ mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
+ GFP_KERNEL);
+ if (mem_info->mdl == NULL)
+ return -ENOMEM;
+
+ if (mem_info->mem_type == BNA_MEM_T_DMA) {
+ for (i = 0; i < mem_info->num; i++) {
+ mem_info->mdl[i].len = mem_info->len;
+ mem_info->mdl[i].kva =
+ pci_alloc_consistent(bnad->pcidev,
+ mem_info->len, &dma_pa);
+
+ if (mem_info->mdl[i].kva == NULL)
+ goto err_return;
+
+ BNA_SET_DMA_ADDR(dma_pa,
+ &(mem_info->mdl[i].dma));
+ }
+ } else {
+ for (i = 0; i < mem_info->num; i++) {
+ mem_info->mdl[i].len = mem_info->len;
+ mem_info->mdl[i].kva = kzalloc(mem_info->len,
+ GFP_KERNEL);
+ if (mem_info->mdl[i].kva == NULL)
+ goto err_return;
+ }
+ }
+
+ return 0;
+
+err_return:
+ bnad_mem_free(bnad, mem_info);
+ return -ENOMEM;
+}
+
+/* Free IRQ for Mailbox */
+static void
+bnad_mbox_irq_free(struct bnad *bnad,
+ struct bna_intr_info *intr_info)
+{
+ int irq;
+ unsigned long flags;
+
+ if (intr_info->idl == NULL)
+ return;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_disable_mbox_irq(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ irq = BNAD_GET_MBOX_IRQ(bnad);
+ free_irq(irq, bnad->netdev);
+
+ kfree(intr_info->idl);
+}
+
+/*
+ * Allocates IRQ for Mailbox, but keep it disabled
+ * This will be enabled once we get the mbox enable callback
+ * from bna
+ */
+static int
+bnad_mbox_irq_alloc(struct bnad *bnad,
+ struct bna_intr_info *intr_info)
+{
+ int err;
+ unsigned long flags;
+ u32 irq;
+ irq_handler_t irq_handler;
+
+ /* Mbox should use only 1 vector */
+
+ intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
+ if (!intr_info->idl)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bnad->cfg_flags & BNAD_CF_MSIX) {
+ irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
+ irq = bnad->msix_table[bnad->msix_num - 1].vector;
+ flags = 0;
+ intr_info->intr_type = BNA_INTR_T_MSIX;
+ intr_info->idl[0].vector = bnad->msix_num - 1;
+ } else {
+ irq_handler = (irq_handler_t)bnad_isr;
+ irq = bnad->pcidev->irq;
+ flags = IRQF_SHARED;
+ intr_info->intr_type = BNA_INTR_T_INTX;
+ /* intr_info->idl.vector = 0 ? */
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
+
+ /*
+ * Set the Mbox IRQ disable flag, so that the IRQ handler
+ * called from request_irq() for SHARED IRQs do not execute
+ */
+ set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
+
+ err = request_irq(irq, irq_handler, flags,
+ bnad->mbox_irq_name, bnad->netdev);
+
+ if (err) {
+ kfree(intr_info->idl);
+ intr_info->idl = NULL;
+ return err;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ disable_irq_nosync(irq);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return 0;
+}
+
+static void
+bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
+{
+ kfree(intr_info->idl);
+ intr_info->idl = NULL;
+}
+
+/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
+static int
+bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
+ uint txrx_id, struct bna_intr_info *intr_info)
+{
+ int i, vector_start = 0;
+ u32 cfg_flags;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ cfg_flags = bnad->cfg_flags;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (cfg_flags & BNAD_CF_MSIX) {
+ intr_info->intr_type = BNA_INTR_T_MSIX;
+ intr_info->idl = kcalloc(intr_info->num,
+ sizeof(struct bna_intr_descr),
+ GFP_KERNEL);
+ if (!intr_info->idl)
+ return -ENOMEM;
+
+ switch (src) {
+ case BNAD_INTR_TX:
+ vector_start = txrx_id;
+ break;
+
+ case BNAD_INTR_RX:
+ vector_start = bnad->num_tx * bnad->num_txq_per_tx +
+ txrx_id;
+ break;
+
+ default:
+ BUG();
+ }
+
+ for (i = 0; i < intr_info->num; i++)
+ intr_info->idl[i].vector = vector_start + i;
+ } else {
+ intr_info->intr_type = BNA_INTR_T_INTX;
+ intr_info->num = 1;
+ intr_info->idl = kcalloc(intr_info->num,
+ sizeof(struct bna_intr_descr),
+ GFP_KERNEL);
+ if (!intr_info->idl)
+ return -ENOMEM;
+
+ switch (src) {
+ case BNAD_INTR_TX:
+ intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
+ break;
+
+ case BNAD_INTR_RX:
+ intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Unregisters Tx MSIX vector(s) from the kernel
+ */
+static void
+bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
+ int num_txqs)
+{
+ int i;
+ int vector_num;
+
+ for (i = 0; i < num_txqs; i++) {
+ if (tx_info->tcb[i] == NULL)
+ continue;
+
+ vector_num = tx_info->tcb[i]->intr_vector;
+ free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
+ }
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
+ */
+static int
+bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
+ uint tx_id, int num_txqs)
+{
+ int i;
+ int err;
+ int vector_num;
+
+ for (i = 0; i < num_txqs; i++) {
+ vector_num = tx_info->tcb[i]->intr_vector;
+ sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
+ tx_id + tx_info->tcb[i]->id);
+ err = request_irq(bnad->msix_table[vector_num].vector,
+ (irq_handler_t)bnad_msix_tx, 0,
+ tx_info->tcb[i]->name,
+ tx_info->tcb[i]);
+ if (err)
+ goto err_return;
+ }
+
+ return 0;
+
+err_return:
+ if (i > 0)
+ bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
+ return -1;
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Unregisters Rx MSIX vector(s) from the kernel
+ */
+static void
+bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
+ int num_rxps)
+{
+ int i;
+ int vector_num;
+
+ for (i = 0; i < num_rxps; i++) {
+ if (rx_info->rx_ctrl[i].ccb == NULL)
+ continue;
+
+ vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
+ free_irq(bnad->msix_table[vector_num].vector,
+ rx_info->rx_ctrl[i].ccb);
+ }
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
+ */
+static int
+bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
+ uint rx_id, int num_rxps)
+{
+ int i;
+ int err;
+ int vector_num;
+
+ for (i = 0; i < num_rxps; i++) {
+ vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
+ sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
+ bnad->netdev->name,
+ rx_id + rx_info->rx_ctrl[i].ccb->id);
+ err = request_irq(bnad->msix_table[vector_num].vector,
+ (irq_handler_t)bnad_msix_rx, 0,
+ rx_info->rx_ctrl[i].ccb->name,
+ rx_info->rx_ctrl[i].ccb);
+ if (err)
+ goto err_return;
+ }
+
+ return 0;
+
+err_return:
+ if (i > 0)
+ bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
+ return -1;
+}
+
+/* Free Tx object Resources */
+static void
+bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
+{
+ int i;
+
+ for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
+ }
+}
+
+/* Allocates memory and interrupt resources for Tx object */
+static int
+bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
+ uint tx_id)
+{
+ int i, err = 0;
+
+ for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ err = bnad_mem_alloc(bnad,
+ &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
+ &res_info[i].res_u.intr_info);
+ if (err)
+ goto err_return;
+ }
+ return 0;
+
+err_return:
+ bnad_tx_res_free(bnad, res_info);
+ return err;
+}
+
+/* Free Rx object Resources */
+static void
+bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
+{
+ int i;
+
+ for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
+ }
+}
+
+/* Allocates memory and interrupt resources for Rx object */
+static int
+bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
+ uint rx_id)
+{
+ int i, err = 0;
+
+ /* All memory needs to be allocated before setup_ccbs */
+ for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ err = bnad_mem_alloc(bnad,
+ &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
+ &res_info[i].res_u.intr_info);
+ if (err)
+ goto err_return;
+ }
+ return 0;
+
+err_return:
+ bnad_rx_res_free(bnad, res_info);
+ return err;
+}
+
+/* Timer callbacks */
+/* a) IOC timer */
+static void
+bnad_ioc_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_ioc_hb_check(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_ioc_sem_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * All timer routines use bnad->bna_lock to protect against
+ * the following race, which may occur in case of no locking:
+ * Time CPU m CPU n
+ * 0 1 = test_bit
+ * 1 clear_bit
+ * 2 del_timer_sync
+ * 3 mod_timer
+ */
+
+/* b) Dynamic Interrupt Moderation Timer */
+static void
+bnad_dim_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ struct bnad_rx_info *rx_info;
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i, j;
+ unsigned long flags;
+
+ if (!netif_carrier_ok(bnad->netdev))
+ return;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ rx_ctrl = &rx_info->rx_ctrl[j];
+ if (!rx_ctrl->ccb)
+ continue;
+ bna_rx_dim_update(rx_ctrl->ccb);
+ }
+ }
+
+ /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
+ if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
+ mod_timer(&bnad->dim_timer,
+ jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/* c) Statistics Timer */
+static void
+bnad_stats_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ if (!netif_running(bnad->netdev) ||
+ !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
+ return;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_stats_get(&bnad->bna);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * Set up timer for DIM
+ * Called with bnad->bna_lock held
+ */
+void
+bnad_dim_timer_start(struct bnad *bnad)
+{
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
+ !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
+ setup_timer(&bnad->dim_timer, bnad_dim_timeout,
+ (unsigned long)bnad);
+ set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
+ mod_timer(&bnad->dim_timer,
+ jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
+ }
+}
+
+/*
+ * Set up timer for statistics
+ * Called with mutex_lock(&bnad->conf_mutex) held
+ */
+static void
+bnad_stats_timer_start(struct bnad *bnad)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
+ setup_timer(&bnad->stats_timer, bnad_stats_timeout,
+ (unsigned long)bnad);
+ mod_timer(&bnad->stats_timer,
+ jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * Stops the stats timer
+ * Called with mutex_lock(&bnad->conf_mutex) held
+ */
+static void
+bnad_stats_timer_stop(struct bnad *bnad)
+{
+ int to_del = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
+ to_del = 1;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (to_del)
+ del_timer_sync(&bnad->stats_timer);
+}
+
+/* Utilities */
+
+static void
+bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
+{
+ int i = 1; /* Index 0 has broadcast address */
+ struct netdev_hw_addr *mc_addr;
+
+ netdev_for_each_mc_addr(mc_addr, netdev) {
+ memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
+ ETH_ALEN);
+ i++;
+ }
+}
+
+static int
+bnad_napi_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct bnad_rx_ctrl *rx_ctrl =
+ container_of(napi, struct bnad_rx_ctrl, napi);
+ struct bna_ccb *ccb;
+ struct bnad *bnad;
+ int rcvd = 0;
+
+ ccb = rx_ctrl->ccb;
+
+ bnad = ccb->bnad;
+
+ if (!netif_carrier_ok(bnad->netdev))
+ goto poll_exit;
+
+ rcvd = bnad_poll_cq(bnad, ccb, budget);
+ if (rcvd == budget)
+ return rcvd;
+
+poll_exit:
+ napi_complete((napi));
+
+ BNAD_UPDATE_CTR(bnad, netif_rx_complete);
+
+ bnad_enable_rx_irq(bnad, ccb);
+ return rcvd;
+}
+
+static int
+bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
+{
+ struct bnad_rx_ctrl *rx_ctrl =
+ container_of(napi, struct bnad_rx_ctrl, napi);
+ struct bna_ccb *ccb;
+ struct bnad *bnad;
+ int rcvd = 0;
+ int i, j;
+
+ ccb = rx_ctrl->ccb;
+
+ bnad = ccb->bnad;
+
+ if (!netif_carrier_ok(bnad->netdev))
+ goto poll_exit;
+
+ /* Handle Tx Completions, if any */
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++)
+ bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+ }
+
+ /* Handle Rx Completions */
+ rcvd = bnad_poll_cq(bnad, ccb, budget);
+ if (rcvd == budget)
+ return rcvd;
+poll_exit:
+ napi_complete((napi));
+
+ BNAD_UPDATE_CTR(bnad, netif_rx_complete);
+
+ bnad_enable_txrx_irqs(bnad);
+ return rcvd;
+}
+
+static void
+bnad_napi_enable(struct bnad *bnad, u32 rx_id)
+{
+ int (*napi_poll) (struct napi_struct *, int);
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ napi_poll = bnad_napi_poll_rx;
+ else
+ napi_poll = bnad_napi_poll_txrx;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Initialize & enable NAPI */
+ for (i = 0; i < bnad->num_rxp_per_rx; i++) {
+ rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
+ netif_napi_add(bnad->netdev, &rx_ctrl->napi,
+ napi_poll, 64);
+ napi_enable(&rx_ctrl->napi);
+ }
+}
+
+static void
+bnad_napi_disable(struct bnad *bnad, u32 rx_id)
+{
+ int i;
+
+ /* First disable and then clean up */
+ for (i = 0; i < bnad->num_rxp_per_rx; i++) {
+ napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
+ netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
+ }
+}
+
+/* Should be held with conf_lock held */
+void
+bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
+{
+ struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
+ struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
+ unsigned long flags;
+
+ if (!tx_info->tx)
+ return;
+
+ init_completion(&bnad->bnad_completions.tx_comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&bnad->bnad_completions.tx_comp);
+
+ if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
+ bnad_tx_msix_unregister(bnad, tx_info,
+ bnad->num_txq_per_tx);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_destroy(tx_info->tx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ tx_info->tx = NULL;
+
+ if (0 == tx_id)
+ tasklet_kill(&bnad->tx_free_tasklet);
+
+ bnad_tx_res_free(bnad, res_info);
+}
+
+/* Should be held with conf_lock held */
+int
+bnad_setup_tx(struct bnad *bnad, uint tx_id)
+{
+ int err;
+ struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
+ struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
+ struct bna_intr_info *intr_info =
+ &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
+ struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
+ struct bna_tx_event_cbfn tx_cbfn;
+ struct bna_tx *tx;
+ unsigned long flags;
+
+ /* Initialize the Tx object configuration */
+ tx_config->num_txq = bnad->num_txq_per_tx;
+ tx_config->txq_depth = bnad->txq_depth;
+ tx_config->tx_type = BNA_TX_T_REGULAR;
+
+ /* Initialize the tx event handlers */
+ tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
+ tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
+ tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
+ tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
+ tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
+
+ /* Get BNA's resource requirement for one tx object */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_res_req(bnad->num_txq_per_tx,
+ bnad->txq_depth, res_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Fill Unmap Q memory requirements */
+ BNAD_FILL_UNMAPQ_MEM_REQ(
+ &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
+ bnad->num_txq_per_tx,
+ BNAD_TX_UNMAPQ_DEPTH);
+
+ /* Allocate resources */
+ err = bnad_tx_res_alloc(bnad, res_info, tx_id);
+ if (err)
+ return err;
+
+ /* Ask BNA to create one Tx object, supplying required resources */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
+ tx_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (!tx)
+ goto err_return;
+ tx_info->tx = tx;
+
+ /* Register ISR for the Tx object */
+ if (intr_info->intr_type == BNA_INTR_T_MSIX) {
+ err = bnad_tx_msix_register(bnad, tx_info,
+ tx_id, bnad->num_txq_per_tx);
+ if (err)
+ goto err_return;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_enable(tx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return 0;
+
+err_return:
+ bnad_tx_res_free(bnad, res_info);
+ return err;
+}
+
+/* Setup the rx config for bna_rx_create */
+/* bnad decides the configuration */
+static void
+bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
+{
+ rx_config->rx_type = BNA_RX_T_REGULAR;
+ rx_config->num_paths = bnad->num_rxp_per_rx;
+
+ if (bnad->num_rxp_per_rx > 1) {
+ rx_config->rss_status = BNA_STATUS_T_ENABLED;
+ rx_config->rss_config.hash_type =
+ (BFI_RSS_T_V4_TCP |
+ BFI_RSS_T_V6_TCP |
+ BFI_RSS_T_V4_IP |
+ BFI_RSS_T_V6_IP);
+ rx_config->rss_config.hash_mask =
+ bnad->num_rxp_per_rx - 1;
+ get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
+ sizeof(rx_config->rss_config.toeplitz_hash_key));
+ } else {
+ rx_config->rss_status = BNA_STATUS_T_DISABLED;
+ memset(&rx_config->rss_config, 0,
+ sizeof(rx_config->rss_config));
+ }
+ rx_config->rxp_type = BNA_RXP_SLR;
+ rx_config->q_depth = bnad->rxq_depth;
+
+ rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
+
+ rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
+}
+
+/* Called with mutex_lock(&bnad->conf_mutex) held */
+void
+bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
+{
+ struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
+ struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
+ struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
+ unsigned long flags;
+ int dim_timer_del = 0;
+
+ if (!rx_info->rx)
+ return;
+
+ if (0 == rx_id) {
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ dim_timer_del = bnad_dim_timer_running(bnad);
+ if (dim_timer_del)
+ clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (dim_timer_del)
+ del_timer_sync(&bnad->dim_timer);
+ }
+
+ bnad_napi_disable(bnad, rx_id);
+
+ init_completion(&bnad->bnad_completions.rx_comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&bnad->bnad_completions.rx_comp);
+
+ if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
+ bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_destroy(rx_info->rx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ rx_info->rx = NULL;
+
+ bnad_rx_res_free(bnad, res_info);
+}
+
+/* Called with mutex_lock(&bnad->conf_mutex) held */
+int
+bnad_setup_rx(struct bnad *bnad, uint rx_id)
+{
+ int err;
+ struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
+ struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
+ struct bna_intr_info *intr_info =
+ &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
+ struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
+ struct bna_rx_event_cbfn rx_cbfn;
+ struct bna_rx *rx;
+ unsigned long flags;
+
+ /* Initialize the Rx object configuration */
+ bnad_init_rx_config(bnad, rx_config);
+
+ /* Initialize the Rx event handlers */
+ rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
+ rx_cbfn.rcb_destroy_cbfn = NULL;
+ rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
+ rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
+ rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
+ rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
+
+ /* Get BNA's resource requirement for one Rx object */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_res_req(rx_config, res_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Fill Unmap Q memory requirements */
+ BNAD_FILL_UNMAPQ_MEM_REQ(
+ &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
+ rx_config->num_paths +
+ ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
+ rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
+
+ /* Allocate resource */
+ err = bnad_rx_res_alloc(bnad, res_info, rx_id);
+ if (err)
+ return err;
+
+ /* Ask BNA to create one Rx object, supplying required resources */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
+ rx_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (!rx)
+ goto err_return;
+ rx_info->rx = rx;
+
+ /* Register ISR for the Rx object */
+ if (intr_info->intr_type == BNA_INTR_T_MSIX) {
+ err = bnad_rx_msix_register(bnad, rx_info, rx_id,
+ rx_config->num_paths);
+ if (err)
+ goto err_return;
+ }
+
+ /* Enable NAPI */
+ bnad_napi_enable(bnad, rx_id);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (0 == rx_id) {
+ /* Set up Dynamic Interrupt Moderation Vector */
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
+ bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
+
+ /* Enable VLAN filtering only on the default Rx */
+ bna_rx_vlanfilter_enable(rx);
+
+ /* Start the DIM timer */
+ bnad_dim_timer_start(bnad);
+ }
+
+ bna_rx_enable(rx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return 0;
+
+err_return:
+ bnad_cleanup_rx(bnad, rx_id);
+ return err;
+}
+
+/* Called with conf_lock & bnad->bna_lock held */
+void
+bnad_tx_coalescing_timeo_set(struct bnad *bnad)
+{
+ struct bnad_tx_info *tx_info;
+
+ tx_info = &bnad->tx_info[0];
+ if (!tx_info->tx)
+ return;
+
+ bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
+}
+
+/* Called with conf_lock & bnad->bna_lock held */
+void
+bnad_rx_coalescing_timeo_set(struct bnad *bnad)
+{
+ struct bnad_rx_info *rx_info;
+ int i;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ bna_rx_coalescing_timeo_set(rx_info->rx,
+ bnad->rx_coalescing_timeo);
+ }
+}
+
+/*
+ * Called with bnad->bna_lock held
+ */
+static int
+bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
+{
+ int ret;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return -EADDRNOTAVAIL;
+
+ /* If datapath is down, pretend everything went through */
+ if (!bnad->rx_info[0].rx)
+ return 0;
+
+ ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
+ if (ret != BNA_CB_SUCCESS)
+ return -EADDRNOTAVAIL;
+
+ return 0;
+}
+
+/* Should be called with conf_lock held */
+static int
+bnad_enable_default_bcast(struct bnad *bnad)
+{
+ struct bnad_rx_info *rx_info = &bnad->rx_info[0];
+ int ret;
+ unsigned long flags;
+
+ init_completion(&bnad->bnad_completions.mcast_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
+ bnad_cb_rx_mcast_add);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (ret == BNA_CB_SUCCESS)
+ wait_for_completion(&bnad->bnad_completions.mcast_comp);
+ else
+ return -ENODEV;
+
+ if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
+ return -ENODEV;
+
+ return 0;
+}
+
+/* Statistics utilities */
+void
+bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
+{
+ int i, j;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ if (bnad->rx_info[i].rx_ctrl[j].ccb) {
+ stats->rx_packets += bnad->rx_info[i].
+ rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
+ stats->rx_bytes += bnad->rx_info[i].
+ rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ stats->rx_packets +=
+ bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[1]->rxq->rx_packets;
+ stats->rx_bytes +=
+ bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[1]->rxq->rx_bytes;
+ }
+ }
+ }
+ }
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ if (bnad->tx_info[i].tcb[j]) {
+ stats->tx_packets +=
+ bnad->tx_info[i].tcb[j]->txq->tx_packets;
+ stats->tx_bytes +=
+ bnad->tx_info[i].tcb[j]->txq->tx_bytes;
+ }
+ }
+ }
+}
+
+/*
+ * Must be called with the bna_lock held.
+ */
+void
+bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
+{
+ struct bfi_ll_stats_mac *mac_stats;
+ u64 bmap;
+ int i;
+
+ mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
+ stats->rx_errors =
+ mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
+ mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
+ mac_stats->rx_undersize;
+ stats->tx_errors = mac_stats->tx_fcs_error +
+ mac_stats->tx_undersize;
+ stats->rx_dropped = mac_stats->rx_drop;
+ stats->tx_dropped = mac_stats->tx_drop;
+ stats->multicast = mac_stats->rx_multicast;
+ stats->collisions = mac_stats->tx_total_collision;
+
+ stats->rx_length_errors = mac_stats->rx_frame_length_error;
+
+ /* receive ring buffer overflow ?? */
+
+ stats->rx_crc_errors = mac_stats->rx_fcs_error;
+ stats->rx_frame_errors = mac_stats->rx_alignment_error;
+ /* recv'r fifo overrun */
+ bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
+ ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ stats->rx_fifo_errors +=
+ bnad->stats.bna_stats->
+ hw_stats->rxf_stats[i].frame_drops;
+ break;
+ }
+ bmap >>= 1;
+ }
+}
+
+static void
+bnad_mbox_irq_sync(struct bnad *bnad)
+{
+ u32 irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ irq = bnad->msix_table[bnad->msix_num - 1].vector;
+ else
+ irq = bnad->pcidev->irq;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ synchronize_irq(irq);
+}
+
+/* Utility used by bnad_start_xmit, for doing TSO */
+static int
+bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+ int err;
+
+ /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
+ BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
+ skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err) {
+ BNAD_UPDATE_CTR(bnad, tso_err);
+ return err;
+ }
+ }
+
+ /*
+ * For TSO, the TCP checksum field is seeded with pseudo-header sum
+ * excluding the length field.
+ */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ /* Do we really need these? */
+ iph->tot_len = 0;
+ iph->check = 0;
+
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+ IPPROTO_TCP, 0);
+ BNAD_UPDATE_CTR(bnad, tso4);
+ } else {
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+ BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
+ ipv6h->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
+ IPPROTO_TCP, 0);
+ BNAD_UPDATE_CTR(bnad, tso6);
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize Q numbers depending on Rx Paths
+ * Called with bnad->bna_lock held, because of cfg_flags
+ * access.
+ */
+static void
+bnad_q_num_init(struct bnad *bnad)
+{
+ int rxps;
+
+ rxps = min((uint)num_online_cpus(),
+ (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
+
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX))
+ rxps = 1; /* INTx */
+
+ bnad->num_rx = 1;
+ bnad->num_tx = 1;
+ bnad->num_rxp_per_rx = rxps;
+ bnad->num_txq_per_tx = BNAD_TXQ_NUM;
+}
+
+/*
+ * Adjusts the Q numbers, given a number of msix vectors
+ * Give preference to RSS as opposed to Tx priority Queues,
+ * in such a case, just use 1 Tx Q
+ * Called with bnad->bna_lock held b'cos of cfg_flags access
+ */
+static void
+bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
+{
+ bnad->num_txq_per_tx = 1;
+ if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
+ bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
+ (bnad->cfg_flags & BNAD_CF_MSIX)) {
+ bnad->num_rxp_per_rx = msix_vectors -
+ (bnad->num_tx * bnad->num_txq_per_tx) -
+ BNAD_MAILBOX_MSIX_VECTORS;
+ } else
+ bnad->num_rxp_per_rx = 1;
+}
+
+static void
+bnad_set_netdev_perm_addr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+
+ memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
+ if (is_zero_ether_addr(netdev->dev_addr))
+ memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
+}
+
+/* Enable / disable device */
+static void
+bnad_device_disable(struct bnad *bnad)
+{
+ unsigned long flags;
+
+ init_completion(&bnad->bnad_completions.ioc_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion(&bnad->bnad_completions.ioc_comp);
+}
+
+static int
+bnad_device_enable(struct bnad *bnad)
+{
+ int err = 0;
+ unsigned long flags;
+
+ init_completion(&bnad->bnad_completions.ioc_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_device_enable(&bnad->bna.device);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion(&bnad->bnad_completions.ioc_comp);
+
+ if (bnad->bnad_completions.ioc_comp_status)
+ err = bnad->bnad_completions.ioc_comp_status;
+
+ return err;
+}
+
+/* Free BNA resources */
+static void
+bnad_res_free(struct bnad *bnad)
+{
+ int i;
+ struct bna_res_info *res_info = &bnad->res_info[0];
+
+ for (i = 0; i < BNA_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
+ else
+ bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
+ }
+}
+
+/* Allocates memory and interrupt resources for BNA */
+static int
+bnad_res_alloc(struct bnad *bnad)
+{
+ int i, err;
+ struct bna_res_info *res_info = &bnad->res_info[0];
+
+ for (i = 0; i < BNA_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
+ else
+ err = bnad_mbox_irq_alloc(bnad,
+ &res_info[i].res_u.intr_info);
+ if (err)
+ goto err_return;
+ }
+ return 0;
+
+err_return:
+ bnad_res_free(bnad);
+ return err;
+}
+
+/* Interrupt enable / disable */
+static void
+bnad_enable_msix(struct bnad *bnad)
+{
+ int i, ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (bnad->msix_table)
+ return;
+
+ bnad->msix_table =
+ kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
+
+ if (!bnad->msix_table)
+ goto intx_mode;
+
+ for (i = 0; i < bnad->msix_num; i++)
+ bnad->msix_table[i].entry = i;
+
+ ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
+ if (ret > 0) {
+ /* Not enough MSI-X vectors. */
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ /* ret = #of vectors that we got */
+ bnad_q_num_adjust(bnad, ret);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
+ + (bnad->num_rx
+ * bnad->num_rxp_per_rx) +
+ BNAD_MAILBOX_MSIX_VECTORS;
+
+ /* Try once more with adjusted numbers */
+ /* If this fails, fall back to INTx */
+ ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+ bnad->msix_num);
+ if (ret)
+ goto intx_mode;
+
+ } else if (ret < 0)
+ goto intx_mode;
+ return;
+
+intx_mode:
+
+ kfree(bnad->msix_table);
+ bnad->msix_table = NULL;
+ bnad->msix_num = 0;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad->cfg_flags &= ~BNAD_CF_MSIX;
+ bnad_q_num_init(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_disable_msix(struct bnad *bnad)
+{
+ u32 cfg_flags;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ cfg_flags = bnad->cfg_flags;
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ bnad->cfg_flags &= ~BNAD_CF_MSIX;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (cfg_flags & BNAD_CF_MSIX) {
+ pci_disable_msix(bnad->pcidev);
+ kfree(bnad->msix_table);
+ bnad->msix_table = NULL;
+ }
+}
+
+/* Netdev entry points */
+static int
+bnad_open(struct net_device *netdev)
+{
+ int err;
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bna_pause_config pause_config;
+ int mtu;
+ unsigned long flags;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ /* Tx */
+ err = bnad_setup_tx(bnad, 0);
+ if (err)
+ goto err_return;
+
+ /* Rx */
+ err = bnad_setup_rx(bnad, 0);
+ if (err)
+ goto cleanup_tx;
+
+ /* Port */
+ pause_config.tx_pause = 0;
+ pause_config.rx_pause = 0;
+
+ mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
+ bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
+ bna_port_enable(&bnad->bna.port);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Enable broadcast */
+ bnad_enable_default_bcast(bnad);
+
+ /* Set the UCAST address */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Start the stats timer */
+ bnad_stats_timer_start(bnad);
+
+ mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
+
+cleanup_tx:
+ bnad_cleanup_tx(bnad, 0);
+
+err_return:
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static int
+bnad_stop(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ /* Stop the stats timer */
+ bnad_stats_timer_stop(bnad);
+
+ init_completion(&bnad->bnad_completions.port_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
+ bnad_cb_port_disabled);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion(&bnad->bnad_completions.port_comp);
+
+ bnad_cleanup_tx(bnad, 0);
+ bnad_cleanup_rx(bnad, 0);
+
+ /* Synchronize mailbox IRQ */
+ bnad_mbox_irq_sync(bnad);
+
+ mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
+}
+
+/* TX */
+/*
+ * bnad_start_xmit : Netdev entry point for Transmit
+ * Called under lock held by net_device
+ */
+static netdev_tx_t
+bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ u16 txq_prod, vlan_tag = 0;
+ u32 unmap_prod, wis, wis_used, wi_range;
+ u32 vectors, vect_id, i, acked;
+ u32 tx_id;
+ int err;
+
+ struct bnad_tx_info *tx_info;
+ struct bna_tcb *tcb;
+ struct bnad_unmap_q *unmap_q;
+ dma_addr_t dma_addr;
+ struct bna_txq_entry *txqent;
+ bna_txq_wi_ctrl_flag_t flags;
+
+ if (unlikely
+ (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /*
+ * Takes care of the Tx that is scheduled between clearing the flag
+ * and the netif_stop_queue() call.
+ */
+ if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ tx_id = 0;
+
+ tx_info = &bnad->tx_info[tx_id];
+ tcb = tx_info->tcb[tx_id];
+ unmap_q = tcb->unmap_q;
+
+ vectors = 1 + skb_shinfo(skb)->nr_frags;
+ if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
+ acked = 0;
+ if (unlikely
+ (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
+ vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+ if ((u16) (*tcb->hw_consumer_index) !=
+ tcb->consumer_index &&
+ !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
+ acked = bnad_free_txbufs(bnad, tcb);
+ bna_ib_ack(tcb->i_dbell, acked);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ } else {
+ netif_stop_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ }
+
+ smp_mb();
+ /*
+ * Check again to deal with race condition between
+ * netif_stop_queue here, and netif_wake_queue in
+ * interrupt handler which is not inside netif tx lock.
+ */
+ if (likely
+ (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
+ vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+ BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ return NETDEV_TX_BUSY;
+ } else {
+ netif_wake_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+ }
+
+ unmap_prod = unmap_q->producer_index;
+ wis_used = 1;
+ vect_id = 0;
+ flags = 0;
+
+ txq_prod = tcb->producer_index;
+ BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
+ BUG_ON(!(wi_range <= tcb->q_depth));
+ txqent->hdr.wi.reserved = 0;
+ txqent->hdr.wi.num_vectors = vectors;
+ txqent->hdr.wi.opcode =
+ htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
+ BNA_TXQ_WI_SEND));
+
+ if (bnad->vlan_grp && vlan_tx_tag_present(skb)) {
+ vlan_tag = (u16) vlan_tx_tag_get(skb);
+ flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+ }
+ if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
+ vlan_tag =
+ (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
+ flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+ }
+
+ txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+
+ if (skb_is_gso(skb)) {
+ err = bnad_tso_prepare(bnad, skb);
+ if (err) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+ flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (tcp_hdrlen(skb) >> 2,
+ skb_transport_offset(skb)));
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 proto = 0;
+
+ txqent->hdr.wi.lso_mss = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ proto = ip_hdr(skb)->protocol;
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
+ /* nexthdr may not be TCP immediately. */
+ proto = ipv6_hdr(skb)->nexthdr;
+ }
+ if (proto == IPPROTO_TCP) {
+ flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
+
+ BUG_ON(!(skb_headlen(skb) >=
+ skb_transport_offset(skb) + tcp_hdrlen(skb)));
+
+ } else if (proto == IPPROTO_UDP) {
+ flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, udpcsum_offload);
+
+ BUG_ON(!(skb_headlen(skb) >=
+ skb_transport_offset(skb) +
+ sizeof(struct udphdr)));
+ } else {
+ err = skb_checksum_help(skb);
+ BNAD_UPDATE_CTR(bnad, csum_help);
+ if (err) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, csum_help_err);
+ return NETDEV_TX_OK;
+ }
+ }
+ } else {
+ txqent->hdr.wi.lso_mss = 0;
+ txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+ }
+
+ txqent->hdr.wi.flags = htons(flags);
+
+ txqent->hdr.wi.frame_length = htonl(skb->len);
+
+ unmap_q->unmap_array[unmap_prod].skb = skb;
+ BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
+ txqent->vector[vect_id].length = htons(skb_headlen(skb));
+ dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr);
+
+ BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+ BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ u32 size = frag->size;
+
+ if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
+ vect_id = 0;
+ if (--wi_range)
+ txqent++;
+ else {
+ BNA_QE_INDX_ADD(txq_prod, wis_used,
+ tcb->q_depth);
+ wis_used = 0;
+ BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
+ txqent, wi_range);
+ BUG_ON(!(wi_range <= tcb->q_depth));
+ }
+ wis_used++;
+ txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+ }
+
+ BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
+ txqent->vector[vect_id].length = htons(size);
+ dma_addr =
+ pci_map_page(bnad->pcidev, frag->page,
+ frag->page_offset, size,
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr);
+ BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+ BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+ }
+
+ unmap_q->producer_index = unmap_prod;
+ BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
+ tcb->producer_index = txq_prod;
+
+ smp_mb();
+ bna_txq_prod_indx_doorbell(tcb);
+
+ if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
+ tasklet_schedule(&bnad->tx_free_tasklet);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Used spin_lock to synchronize reading of stats structures, which
+ * is written by BNA under the same lock.
+ */
+static struct rtnl_link_stats64 *
+bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ bnad_netdev_qstats_fill(bnad, stats);
+ bnad_netdev_hwstats_fill(bnad, stats);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return stats;
+}
+
+static void
+bnad_set_rx_mode(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ u32 new_mask, valid_mask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ new_mask = valid_mask = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
+ new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
+ valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
+ bnad->cfg_flags |= BNAD_CF_PROMISC;
+ }
+ } else {
+ if (bnad->cfg_flags & BNAD_CF_PROMISC) {
+ new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
+ valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
+ bnad->cfg_flags &= ~BNAD_CF_PROMISC;
+ }
+ }
+
+ if (netdev->flags & IFF_ALLMULTI) {
+ if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
+ new_mask |= BNA_RXMODE_ALLMULTI;
+ valid_mask |= BNA_RXMODE_ALLMULTI;
+ bnad->cfg_flags |= BNAD_CF_ALLMULTI;
+ }
+ } else {
+ if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
+ new_mask &= ~BNA_RXMODE_ALLMULTI;
+ valid_mask |= BNA_RXMODE_ALLMULTI;
+ bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
+ }
+ }
+
+ bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
+
+ if (!netdev_mc_empty(netdev)) {
+ u8 *mcaddr_list;
+ int mc_count = netdev_mc_count(netdev);
+
+ /* Index 0 holds the broadcast address */
+ mcaddr_list =
+ kzalloc((mc_count + 1) * ETH_ALEN,
+ GFP_ATOMIC);
+ if (!mcaddr_list)
+ goto unlock;
+
+ memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+
+ /* Copy rest of the MC addresses */
+ bnad_netdev_mc_list_get(netdev, mcaddr_list);
+
+ bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
+ mcaddr_list, NULL);
+
+ /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
+ kfree(mcaddr_list);
+ }
+unlock:
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * bna_lock is used to sync writes to netdev->addr
+ * conf_lock cannot be used since this call may be made
+ * in a non-blocking context.
+ */
+static int
+bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
+{
+ int err;
+ struct bnad *bnad = netdev_priv(netdev);
+ struct sockaddr *sa = (struct sockaddr *)mac_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
+
+ if (!err)
+ memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return err;
+}
+
+static int
+bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int mtu, err = 0;
+ unsigned long flags;
+
+ struct bnad *bnad = netdev_priv(netdev);
+
+ if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ netdev->mtu = new_mtu;
+
+ mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static void
+bnad_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *vlan_grp)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ bnad->vlan_grp = vlan_grp;
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static void
+bnad_vlan_rx_add_vid(struct net_device *netdev,
+ unsigned short vid)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ if (!bnad->rx_info[0].rx)
+ return;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev,
+ unsigned short vid)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ if (!bnad->rx_info[0].rx)
+ return;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+bnad_netpoll(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_rx_info *rx_info;
+ struct bnad_rx_ctrl *rx_ctrl;
+ u32 curr_mask;
+ int i, j;
+
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
+ bna_intx_disable(&bnad->bna, curr_mask);
+ bnad_isr(bnad->pcidev->irq, netdev);
+ bna_intx_enable(&bnad->bna, curr_mask);
+ } else {
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ rx_ctrl = &rx_info->rx_ctrl[j];
+ if (rx_ctrl->ccb) {
+ bnad_disable_rx_irq(bnad,
+ rx_ctrl->ccb);
+ bnad_netif_rx_schedule_poll(bnad,
+ rx_ctrl->ccb);
+ }
+ }
+ }
+ }
+}
+#endif
+
+static const struct net_device_ops bnad_netdev_ops = {
+ .ndo_open = bnad_open,
+ .ndo_stop = bnad_stop,
+ .ndo_start_xmit = bnad_start_xmit,
+ .ndo_get_stats64 = bnad_get_stats64,
+ .ndo_set_rx_mode = bnad_set_rx_mode,
+ .ndo_set_multicast_list = bnad_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = bnad_set_mac_address,
+ .ndo_change_mtu = bnad_change_mtu,
+ .ndo_vlan_rx_register = bnad_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bnad_netpoll
+#endif
+};
+
+static void
+bnad_netdev_init(struct bnad *bnad, bool using_dac)
+{
+ struct net_device *netdev = bnad->netdev;
+
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+
+ netdev->features |= NETIF_F_GRO;
+ pr_warn("bna: GRO enabled, using kernel stack GRO\n");
+
+ netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+
+ if (using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->features |=
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ netdev->vlan_features = netdev->features;
+ netdev->mem_start = bnad->mmio_start;
+ netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
+
+ netdev->netdev_ops = &bnad_netdev_ops;
+ bnad_set_ethtool_ops(netdev);
+}
+
+/*
+ * 1. Initialize the bnad structure
+ * 2. Setup netdev pointer in pci_dev
+ * 3. Initialze Tx free tasklet
+ * 4. Initialize no. of TxQ & CQs & MSIX vectors
+ */
+static int
+bnad_init(struct bnad *bnad,
+ struct pci_dev *pdev, struct net_device *netdev)
+{
+ unsigned long flags;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pci_set_drvdata(pdev, netdev);
+
+ bnad->netdev = netdev;
+ bnad->pcidev = pdev;
+ bnad->mmio_start = pci_resource_start(pdev, 0);
+ bnad->mmio_len = pci_resource_len(pdev, 0);
+ bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
+ if (!bnad->bar0) {
+ dev_err(&pdev->dev, "ioremap for bar0 failed\n");
+ pci_set_drvdata(pdev, NULL);
+ return -ENOMEM;
+ }
+ pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
+ (unsigned long long) bnad->mmio_len);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (!bnad_msix_disable)
+ bnad->cfg_flags = BNAD_CF_MSIX;
+
+ bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
+
+ bnad_q_num_init(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
+ (bnad->num_rx * bnad->num_rxp_per_rx) +
+ BNAD_MAILBOX_MSIX_VECTORS;
+
+ bnad->txq_depth = BNAD_TXQ_DEPTH;
+ bnad->rxq_depth = BNAD_RXQ_DEPTH;
+ bnad->rx_csum = true;
+
+ bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
+ bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
+
+ tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
+ (unsigned long)bnad);
+
+ return 0;
+}
+
+/*
+ * Must be called after bnad_pci_uninit()
+ * so that iounmap() and pci_set_drvdata(NULL)
+ * happens only after PCI uninitialization.
+ */
+static void
+bnad_uninit(struct bnad *bnad)
+{
+ if (bnad->bar0)
+ iounmap(bnad->bar0);
+ pci_set_drvdata(bnad->pcidev, NULL);
+}
+
+/*
+ * Initialize locks
+ a) Per device mutes used for serializing configuration
+ changes from OS interface
+ b) spin lock used to protect bna state machine
+ */
+static void
+bnad_lock_init(struct bnad *bnad)
+{
+ spin_lock_init(&bnad->bna_lock);
+ mutex_init(&bnad->conf_mutex);
+}
+
+static void
+bnad_lock_uninit(struct bnad *bnad)
+{
+ mutex_destroy(&bnad->conf_mutex);
+}
+
+/* PCI Initialization */
+static int
+bnad_pci_init(struct bnad *bnad,
+ struct pci_dev *pdev, bool *using_dac)
+{
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+ err = pci_request_regions(pdev, BNAD_NAME);
+ if (err)
+ goto disable_device;
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ *using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (err)
+ goto release_regions;
+ }
+ *using_dac = 0;
+ }
+ pci_set_master(pdev);
+ return 0;
+
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void
+bnad_pci_uninit(struct pci_dev *pdev)
+{
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int __devinit
+bnad_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pcidev_id)
+{
+ bool using_dac;
+ int err;
+ struct bnad *bnad;
+ struct bna *bna;
+ struct net_device *netdev;
+ struct bfa_pcidev pcidev_info;
+ unsigned long flags;
+
+ pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
+ pdev, pcidev_id, PCI_FUNC(pdev->devfn));
+
+ mutex_lock(&bnad_fwimg_mutex);
+ if (!cna_get_firmware_buf(pdev)) {
+ mutex_unlock(&bnad_fwimg_mutex);
+ pr_warn("Failed to load Firmware Image!\n");
+ return -ENODEV;
+ }
+ mutex_unlock(&bnad_fwimg_mutex);
+
+ /*
+ * Allocates sizeof(struct net_device + struct bnad)
+ * bnad = netdev->priv
+ */
+ netdev = alloc_etherdev(sizeof(struct bnad));
+ if (!netdev) {
+ dev_err(&pdev->dev, "alloc_etherdev failed\n");
+ err = -ENOMEM;
+ return err;
+ }
+ bnad = netdev_priv(netdev);
+
+ /*
+ * PCI initialization
+ * Output : using_dac = 1 for 64 bit DMA
+ * = 0 for 32 bit DMA
+ */
+ err = bnad_pci_init(bnad, pdev, &using_dac);
+ if (err)
+ goto free_netdev;
+
+ bnad_lock_init(bnad);
+ /*
+ * Initialize bnad structure
+ * Setup relation between pci_dev & netdev
+ * Init Tx free tasklet
+ */
+ err = bnad_init(bnad, pdev, netdev);
+ if (err)
+ goto pci_uninit;
+ /* Initialize netdev structure, set up ethtool ops */
+ bnad_netdev_init(bnad, using_dac);
+
+ bnad_enable_msix(bnad);
+
+ /* Get resource requirement form bna */
+ bna_res_req(&bnad->res_info[0]);
+
+ /* Allocate resources from bna */
+ err = bnad_res_alloc(bnad);
+ if (err)
+ goto free_netdev;
+
+ bna = &bnad->bna;
+
+ /* Setup pcidev_info for bna_init() */
+ pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+ pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+ pcidev_info.device_id = bnad->pcidev->device;
+ pcidev_info.pci_bar_kva = bnad->bar0;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad->stats.bna_stats = &bna->stats;
+
+ /* Set up timers */
+ setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
+ ((unsigned long)bnad));
+ setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
+ ((unsigned long)bnad));
+ setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
+ ((unsigned long)bnad));
+
+ /* Now start the timer before calling IOC */
+ mod_timer(&bnad->bna.device.ioc.ioc_timer,
+ jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
+
+ /*
+ * Start the chip
+ * Don't care even if err != 0, bna state machine will
+ * deal with it
+ */
+ err = bnad_device_enable(bnad);
+
+ /* Get the burnt-in mac */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_mac_get(&bna->port, &bnad->perm_addr);
+ bnad_set_netdev_perm_addr(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+
+ /*
+ * Make sure the link appears down to the stack
+ */
+ netif_carrier_off(netdev);
+
+ /* Finally, reguister with net_device layer */
+ err = register_netdev(netdev);
+ if (err) {
+ pr_err("BNA : Registering with netdev failed\n");
+ goto disable_device;
+ }
+
+ return 0;
+
+disable_device:
+ mutex_lock(&bnad->conf_mutex);
+ bnad_device_disable(bnad);
+ del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
+ del_timer_sync(&bnad->bna.device.ioc.sem_timer);
+ del_timer_sync(&bnad->bna.device.ioc.hb_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_uninit(bna);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ mutex_unlock(&bnad->conf_mutex);
+
+ bnad_res_free(bnad);
+ bnad_disable_msix(bnad);
+pci_uninit:
+ bnad_pci_uninit(pdev);
+ bnad_lock_uninit(bnad);
+ bnad_uninit(bnad);
+free_netdev:
+ free_netdev(netdev);
+ return err;
+}
+
+static void __devexit
+bnad_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnad *bnad;
+ struct bna *bna;
+ unsigned long flags;
+
+ if (!netdev)
+ return;
+
+ pr_info("%s bnad_pci_remove\n", netdev->name);
+ bnad = netdev_priv(netdev);
+ bna = &bnad->bna;
+
+ unregister_netdev(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ bnad_device_disable(bnad);
+ del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
+ del_timer_sync(&bnad->bna.device.ioc.sem_timer);
+ del_timer_sync(&bnad->bna.device.ioc.hb_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_uninit(bna);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ mutex_unlock(&bnad->conf_mutex);
+
+ bnad_res_free(bnad);
+ bnad_disable_msix(bnad);
+ bnad_pci_uninit(pdev);
+ bnad_lock_uninit(bnad);
+ bnad_uninit(bnad);
+ free_netdev(netdev);
+}
+
+static const struct pci_device_id bnad_pci_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
+ PCI_DEVICE_ID_BROCADE_CT),
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+ .class_mask = 0xffff00
+ }, {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static struct pci_driver bnad_pci_driver = {
+ .name = BNAD_NAME,
+ .id_table = bnad_pci_id_table,
+ .probe = bnad_pci_probe,
+ .remove = __devexit_p(bnad_pci_remove),
+};
+
+static int __init
+bnad_module_init(void)
+{
+ int err;
+
+ pr_info("Brocade 10G Ethernet driver\n");
+
+ bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
+
+ err = pci_register_driver(&bnad_pci_driver);
+ if (err < 0) {
+ pr_err("bna : PCI registration failed in module init "
+ "(%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit
+bnad_module_exit(void)
+{
+ pci_unregister_driver(&bnad_pci_driver);
+
+ if (bfi_fw)
+ release_firmware(bfi_fw);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
+MODULE_FIRMWARE(CNA_FW_FILE_CT);
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
new file mode 100644
index 00000000000..ebc3a907864
--- /dev/null
+++ b/drivers/net/bna/bnad.h
@@ -0,0 +1,332 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BNAD_H__
+#define __BNAD_H__
+
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
+#include <linux/ipv6.h>
+#include <linux/etherdevice.h>
+#include <linux/mutex.h>
+#include <linux/firmware.h>
+
+/* Fix for IA64 */
+#include <asm/checksum.h>
+#include <net/ip6_checksum.h>
+
+#include <net/ip.h>
+#include <net/tcp.h>
+
+#include "bna.h"
+
+#define BNAD_TXQ_DEPTH 2048
+#define BNAD_RXQ_DEPTH 2048
+
+#define BNAD_MAX_TXS 1
+#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */
+#define BNAD_TXQ_NUM 1
+
+#define BNAD_MAX_RXS 1
+#define BNAD_MAX_RXPS_PER_RX 16
+
+/*
+ * Control structure pointed to ccb->ctrl, which
+ * determines the NAPI / LRO behavior CCB
+ * There is 1:1 corres. between ccb & ctrl
+ */
+struct bnad_rx_ctrl {
+ struct bna_ccb *ccb;
+ struct napi_struct napi;
+};
+
+#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC
+
+#define BNAD_GET_TX_ID(_skb) (0)
+
+/*
+ * GLOBAL #defines (CONSTANTS)
+ */
+#define BNAD_NAME "bna"
+#define BNAD_NAME_LEN 64
+
+#define BNAD_VERSION "2.3.2.0"
+
+#define BNAD_MAILBOX_MSIX_VECTORS 1
+
+#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
+#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
+
+#define BNAD_MAX_Q_DEPTH 0x10000
+#define BNAD_MIN_Q_DEPTH 0x200
+
+#define BNAD_JUMBO_MTU 9000
+
+#define BNAD_NETIF_WAKE_THRESHOLD 8
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3
+
+/* Bit positions for tcb->flags */
+#define BNAD_TXQ_FREE_SENT 0
+
+/* Bit positions for rcb->flags */
+#define BNAD_RXQ_REFILL 0
+#define BNAD_RXQ_STARTED 1
+
+/*
+ * DATA STRUCTURES
+ */
+
+/* enums */
+enum bnad_intr_source {
+ BNAD_INTR_TX = 1,
+ BNAD_INTR_RX = 2
+};
+
+enum bnad_link_state {
+ BNAD_LS_DOWN = 0,
+ BNAD_LS_UP = 1
+};
+
+struct bnad_completion {
+ struct completion ioc_comp;
+ struct completion ucast_comp;
+ struct completion mcast_comp;
+ struct completion tx_comp;
+ struct completion rx_comp;
+ struct completion stats_comp;
+ struct completion port_comp;
+
+ u8 ioc_comp_status;
+ u8 ucast_comp_status;
+ u8 mcast_comp_status;
+ u8 tx_comp_status;
+ u8 rx_comp_status;
+ u8 stats_comp_status;
+ u8 port_comp_status;
+};
+
+/* Tx Rx Control Stats */
+struct bnad_drv_stats {
+ u64 netif_queue_stop;
+ u64 netif_queue_wakeup;
+ u64 tso4;
+ u64 tso6;
+ u64 tso_err;
+ u64 tcpcsum_offload;
+ u64 udpcsum_offload;
+ u64 csum_help;
+ u64 csum_help_err;
+
+ u64 hw_stats_updates;
+ u64 netif_rx_schedule;
+ u64 netif_rx_complete;
+ u64 netif_rx_dropped;
+
+ u64 link_toggle;
+ u64 cee_up;
+
+ u64 rxp_info_alloc_failed;
+ u64 mbox_intr_disabled;
+ u64 mbox_intr_enabled;
+ u64 tx_unmap_q_alloc_failed;
+ u64 rx_unmap_q_alloc_failed;
+
+ u64 rxbuf_alloc_failed;
+};
+
+/* Complete driver stats */
+struct bnad_stats {
+ struct bnad_drv_stats drv_stats;
+ struct bna_stats *bna_stats;
+};
+
+/* Tx / Rx Resources */
+struct bnad_tx_res_info {
+ struct bna_res_info res_info[BNA_TX_RES_T_MAX];
+};
+
+struct bnad_rx_res_info {
+ struct bna_res_info res_info[BNA_RX_RES_T_MAX];
+};
+
+struct bnad_tx_info {
+ struct bna_tx *tx; /* 1:1 between tx_info & tx */
+ struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
+} ____cacheline_aligned;
+
+struct bnad_rx_info {
+ struct bna_rx *rx; /* 1:1 between rx_info & rx */
+
+ struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX];
+} ____cacheline_aligned;
+
+/* Unmap queues for Tx / Rx cleanup */
+struct bnad_skb_unmap {
+ struct sk_buff *skb;
+ DECLARE_PCI_UNMAP_ADDR(dma_addr)
+};
+
+struct bnad_unmap_q {
+ u32 producer_index;
+ u32 consumer_index;
+ u32 q_depth;
+ /* This should be the last one */
+ struct bnad_skb_unmap unmap_array[1];
+};
+
+/* Bit mask values for bnad->cfg_flags */
+#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
+#define BNAD_CF_PROMISC 0x02
+#define BNAD_CF_ALLMULTI 0x04
+#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
+
+/* Defines for run_flags bit-mask */
+/* Set, tested & cleared using xxx_bit() functions */
+/* Values indicated bit positions */
+#define BNAD_RF_CEE_RUNNING 1
+#define BNAD_RF_HW_ERROR 2
+#define BNAD_RF_MBOX_IRQ_DISABLED 3
+#define BNAD_RF_TX_STARTED 4
+#define BNAD_RF_RX_STARTED 5
+#define BNAD_RF_DIM_TIMER_RUNNING 6
+#define BNAD_RF_STATS_TIMER_RUNNING 7
+
+struct bnad {
+ struct net_device *netdev;
+
+ /* Data path */
+ struct bnad_tx_info tx_info[BNAD_MAX_TXS];
+ struct bnad_rx_info rx_info[BNAD_MAX_RXS];
+
+ struct vlan_group *vlan_grp;
+ /*
+ * These q numbers are global only because
+ * they are used to calculate MSIx vectors.
+ * Actually the exact # of queues are per Tx/Rx
+ * object.
+ */
+ u32 num_tx;
+ u32 num_rx;
+ u32 num_txq_per_tx;
+ u32 num_rxp_per_rx;
+
+ u32 txq_depth;
+ u32 rxq_depth;
+
+ u8 tx_coalescing_timeo;
+ u8 rx_coalescing_timeo;
+
+ struct bna_rx_config rx_config[BNAD_MAX_RXS];
+ struct bna_tx_config tx_config[BNAD_MAX_TXS];
+
+ u32 rx_csum;
+
+ void __iomem *bar0; /* BAR0 address */
+
+ struct bna bna;
+
+ u32 cfg_flags;
+ unsigned long run_flags;
+
+ struct pci_dev *pcidev;
+ u64 mmio_start;
+ u64 mmio_len;
+
+ u32 msix_num;
+ struct msix_entry *msix_table;
+
+ struct mutex conf_mutex;
+ spinlock_t bna_lock ____cacheline_aligned;
+
+ /* Timers */
+ struct timer_list ioc_timer;
+ struct timer_list dim_timer;
+ struct timer_list stats_timer;
+
+ /* Control path resources, memory & irq */
+ struct bna_res_info res_info[BNA_RES_T_MAX];
+ struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS];
+ struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS];
+
+ struct bnad_completion bnad_completions;
+
+ /* Burnt in MAC address */
+ mac_t perm_addr;
+
+ struct tasklet_struct tx_free_tasklet;
+
+ /* Statistics */
+ struct bnad_stats stats;
+
+ struct bnad_diag *diag;
+
+ char adapter_name[BNAD_NAME_LEN];
+ char port_name[BNAD_NAME_LEN];
+ char mbox_irq_name[BNAD_NAME_LEN];
+};
+
+/*
+ * EXTERN VARIABLES
+ */
+extern struct firmware *bfi_fw;
+extern u32 bnad_rxqs_per_cq;
+
+/*
+ * EXTERN PROTOTYPES
+ */
+extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
+/* Netdev entry point prototypes */
+extern void bnad_set_ethtool_ops(struct net_device *netdev);
+
+/* Configuration & setup */
+extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
+extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
+
+extern int bnad_setup_rx(struct bnad *bnad, uint rx_id);
+extern int bnad_setup_tx(struct bnad *bnad, uint tx_id);
+extern void bnad_cleanup_tx(struct bnad *bnad, uint tx_id);
+extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id);
+
+/* Timer start/stop protos */
+extern void bnad_dim_timer_start(struct bnad *bnad);
+
+/* Statistics */
+extern void bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
+extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
+
+/**
+ * MACROS
+ */
+/* To set & get the stats counters */
+#define BNAD_UPDATE_CTR(_bnad, _ctr) \
+ (((_bnad)->stats.drv_stats._ctr)++)
+
+#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr)
+
+#define bnad_enable_rx_irq_unsafe(_ccb) \
+{ \
+ bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
+ (_ccb)->rx_coalescing_timeo); \
+ bna_ib_ack((_ccb)->i_dbell, 0); \
+}
+
+#define bnad_dim_timer_running(_bnad) \
+ (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && \
+ (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags))))
+
+#endif /* __BNAD_H__ */
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
new file mode 100644
index 00000000000..11fa2ea842c
--- /dev/null
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -0,0 +1,1277 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "cna.h"
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+
+#include "bna.h"
+
+#include "bnad.h"
+
+#define BNAD_NUM_TXF_COUNTERS 12
+#define BNAD_NUM_RXF_COUNTERS 10
+#define BNAD_NUM_CQ_COUNTERS 3
+#define BNAD_NUM_RXQ_COUNTERS 6
+#define BNAD_NUM_TXQ_COUNTERS 5
+
+#define BNAD_ETHTOOL_STATS_NUM \
+ (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
+ sizeof(struct bnad_drv_stats) / sizeof(u64) + \
+ offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64))
+
+static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_errors",
+ "tx_errors",
+ "rx_dropped",
+ "tx_dropped",
+ "multicast",
+ "collisions",
+
+ "rx_length_errors",
+ "rx_over_errors",
+ "rx_crc_errors",
+ "rx_frame_errors",
+ "rx_fifo_errors",
+ "rx_missed_errors",
+
+ "tx_aborted_errors",
+ "tx_carrier_errors",
+ "tx_fifo_errors",
+ "tx_heartbeat_errors",
+ "tx_window_errors",
+
+ "rx_compressed",
+ "tx_compressed",
+
+ "netif_queue_stop",
+ "netif_queue_wakeup",
+ "tso4",
+ "tso6",
+ "tso_err",
+ "tcpcsum_offload",
+ "udpcsum_offload",
+ "csum_help",
+ "csum_help_err",
+ "hw_stats_updates",
+ "netif_rx_schedule",
+ "netif_rx_complete",
+ "netif_rx_dropped",
+
+ "link_toggle",
+ "cee_up",
+
+ "rxp_info_alloc_failed",
+ "mbox_intr_disabled",
+ "mbox_intr_enabled",
+ "tx_unmap_q_alloc_failed",
+ "rx_unmap_q_alloc_failed",
+ "rxbuf_alloc_failed",
+
+ "mac_frame_64",
+ "mac_frame_65_127",
+ "mac_frame_128_255",
+ "mac_frame_256_511",
+ "mac_frame_512_1023",
+ "mac_frame_1024_1518",
+ "mac_frame_1518_1522",
+ "mac_rx_bytes",
+ "mac_rx_packets",
+ "mac_rx_fcs_error",
+ "mac_rx_multicast",
+ "mac_rx_broadcast",
+ "mac_rx_control_frames",
+ "mac_rx_pause",
+ "mac_rx_unknown_opcode",
+ "mac_rx_alignment_error",
+ "mac_rx_frame_length_error",
+ "mac_rx_code_error",
+ "mac_rx_carrier_sense_error",
+ "mac_rx_undersize",
+ "mac_rx_oversize",
+ "mac_rx_fragments",
+ "mac_rx_jabber",
+ "mac_rx_drop",
+
+ "mac_tx_bytes",
+ "mac_tx_packets",
+ "mac_tx_multicast",
+ "mac_tx_broadcast",
+ "mac_tx_pause",
+ "mac_tx_deferral",
+ "mac_tx_excessive_deferral",
+ "mac_tx_single_collision",
+ "mac_tx_muliple_collision",
+ "mac_tx_late_collision",
+ "mac_tx_excessive_collision",
+ "mac_tx_total_collision",
+ "mac_tx_pause_honored",
+ "mac_tx_drop",
+ "mac_tx_jabber",
+ "mac_tx_fcs_error",
+ "mac_tx_control_frame",
+ "mac_tx_oversize",
+ "mac_tx_undersize",
+ "mac_tx_fragments",
+
+ "bpc_tx_pause_0",
+ "bpc_tx_pause_1",
+ "bpc_tx_pause_2",
+ "bpc_tx_pause_3",
+ "bpc_tx_pause_4",
+ "bpc_tx_pause_5",
+ "bpc_tx_pause_6",
+ "bpc_tx_pause_7",
+ "bpc_tx_zero_pause_0",
+ "bpc_tx_zero_pause_1",
+ "bpc_tx_zero_pause_2",
+ "bpc_tx_zero_pause_3",
+ "bpc_tx_zero_pause_4",
+ "bpc_tx_zero_pause_5",
+ "bpc_tx_zero_pause_6",
+ "bpc_tx_zero_pause_7",
+ "bpc_tx_first_pause_0",
+ "bpc_tx_first_pause_1",
+ "bpc_tx_first_pause_2",
+ "bpc_tx_first_pause_3",
+ "bpc_tx_first_pause_4",
+ "bpc_tx_first_pause_5",
+ "bpc_tx_first_pause_6",
+ "bpc_tx_first_pause_7",
+
+ "bpc_rx_pause_0",
+ "bpc_rx_pause_1",
+ "bpc_rx_pause_2",
+ "bpc_rx_pause_3",
+ "bpc_rx_pause_4",
+ "bpc_rx_pause_5",
+ "bpc_rx_pause_6",
+ "bpc_rx_pause_7",
+ "bpc_rx_zero_pause_0",
+ "bpc_rx_zero_pause_1",
+ "bpc_rx_zero_pause_2",
+ "bpc_rx_zero_pause_3",
+ "bpc_rx_zero_pause_4",
+ "bpc_rx_zero_pause_5",
+ "bpc_rx_zero_pause_6",
+ "bpc_rx_zero_pause_7",
+ "bpc_rx_first_pause_0",
+ "bpc_rx_first_pause_1",
+ "bpc_rx_first_pause_2",
+ "bpc_rx_first_pause_3",
+ "bpc_rx_first_pause_4",
+ "bpc_rx_first_pause_5",
+ "bpc_rx_first_pause_6",
+ "bpc_rx_first_pause_7",
+
+ "rad_rx_frames",
+ "rad_rx_octets",
+ "rad_rx_vlan_frames",
+ "rad_rx_ucast",
+ "rad_rx_ucast_octets",
+ "rad_rx_ucast_vlan",
+ "rad_rx_mcast",
+ "rad_rx_mcast_octets",
+ "rad_rx_mcast_vlan",
+ "rad_rx_bcast",
+ "rad_rx_bcast_octets",
+ "rad_rx_bcast_vlan",
+ "rad_rx_drops",
+
+ "fc_rx_ucast_octets",
+ "fc_rx_ucast",
+ "fc_rx_ucast_vlan",
+ "fc_rx_mcast_octets",
+ "fc_rx_mcast",
+ "fc_rx_mcast_vlan",
+ "fc_rx_bcast_octets",
+ "fc_rx_bcast",
+ "fc_rx_bcast_vlan",
+
+ "fc_tx_ucast_octets",
+ "fc_tx_ucast",
+ "fc_tx_ucast_vlan",
+ "fc_tx_mcast_octets",
+ "fc_tx_mcast",
+ "fc_tx_mcast_vlan",
+ "fc_tx_bcast_octets",
+ "fc_tx_bcast",
+ "fc_tx_bcast_vlan",
+ "fc_tx_parity_errors",
+ "fc_tx_timeout",
+ "fc_tx_fid_parity_errors",
+};
+
+static int
+bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = SUPPORTED_10000baseT_Full;
+ cmd->advertising = ADVERTISED_10000baseT_Full;
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ cmd->phy_address = 0;
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->speed = SPEED_10000;
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ cmd->speed = -1;
+ cmd->duplex = -1;
+ }
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+
+ return 0;
+}
+
+static int
+bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ /* 10G full duplex setting supported only */
+ if (cmd->autoneg == AUTONEG_ENABLE)
+ return -EOPNOTSUPP; else {
+ if ((cmd->speed == SPEED_10000) && (cmd->duplex == DUPLEX_FULL))
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void
+bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bfa_ioc_attr *ioc_attr;
+ unsigned long flags;
+
+ strcpy(drvinfo->driver, BNAD_NAME);
+ strcpy(drvinfo->version, BNAD_VERSION);
+
+ ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
+ if (ioc_attr) {
+ memset(ioc_attr, 0, sizeof(*ioc_attr));
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ sizeof(drvinfo->fw_version) - 1);
+ kfree(ioc_attr);
+ }
+
+ strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
+}
+
+static int
+get_regs(struct bnad *bnad, u32 * regs)
+{
+ int num = 0, i;
+ u32 reg_addr;
+ unsigned long flags;
+
+#define BNAD_GET_REG(addr) \
+do { \
+ if (regs) \
+ regs[num++] = readl(bnad->bar0 + (addr)); \
+ else \
+ num++; \
+} while (0)
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ /* DMA Block Internal Registers */
+ BNAD_GET_REG(DMA_CTRL_REG0);
+ BNAD_GET_REG(DMA_CTRL_REG1);
+ BNAD_GET_REG(DMA_ERR_INT_STATUS);
+ BNAD_GET_REG(DMA_ERR_INT_ENABLE);
+ BNAD_GET_REG(DMA_ERR_INT_STATUS_SET);
+
+ /* APP Block Register Address Offset from BAR0 */
+ BNAD_GET_REG(HOSTFN0_INT_STATUS);
+ BNAD_GET_REG(HOSTFN0_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN0);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN0);
+ BNAD_GET_REG(FN0_PCIE_ERR_REG);
+ BNAD_GET_REG(FN0_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN0_ERR_TYPE_MSK_STATUS_REG);
+
+ BNAD_GET_REG(HOSTFN1_INT_STATUS);
+ BNAD_GET_REG(HOSTFN1_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN1);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN1);
+ BNAD_GET_REG(FN1_PCIE_ERR_REG);
+ BNAD_GET_REG(FN1_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN1_ERR_TYPE_MSK_STATUS_REG);
+
+ BNAD_GET_REG(PCIE_MISC_REG);
+
+ BNAD_GET_REG(HOST_SEM0_REG);
+ BNAD_GET_REG(HOST_SEM1_REG);
+ BNAD_GET_REG(HOST_SEM2_REG);
+ BNAD_GET_REG(HOST_SEM3_REG);
+ BNAD_GET_REG(HOST_SEM0_INFO_REG);
+ BNAD_GET_REG(HOST_SEM1_INFO_REG);
+ BNAD_GET_REG(HOST_SEM2_INFO_REG);
+ BNAD_GET_REG(HOST_SEM3_INFO_REG);
+
+ BNAD_GET_REG(TEMPSENSE_CNTL_REG);
+ BNAD_GET_REG(TEMPSENSE_STAT_REG);
+
+ BNAD_GET_REG(APP_LOCAL_ERR_STAT);
+ BNAD_GET_REG(APP_LOCAL_ERR_MSK);
+
+ BNAD_GET_REG(PCIE_LNK_ERR_STAT);
+ BNAD_GET_REG(PCIE_LNK_ERR_MSK);
+
+ BNAD_GET_REG(FCOE_FIP_ETH_TYPE);
+ BNAD_GET_REG(RESV_ETH_TYPE);
+
+ BNAD_GET_REG(HOSTFN2_INT_STATUS);
+ BNAD_GET_REG(HOSTFN2_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN2);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN2);
+ BNAD_GET_REG(FN2_PCIE_ERR_REG);
+ BNAD_GET_REG(FN2_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN2_ERR_TYPE_MSK_STATUS_REG);
+
+ BNAD_GET_REG(HOSTFN3_INT_STATUS);
+ BNAD_GET_REG(HOSTFN3_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN3);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN3);
+ BNAD_GET_REG(FN3_PCIE_ERR_REG);
+ BNAD_GET_REG(FN3_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN3_ERR_TYPE_MSK_STATUS_REG);
+
+ /* Host Command Status Registers */
+ reg_addr = HOST_CMDSTS0_CLR_REG;
+ for (i = 0; i < 16; i++) {
+ BNAD_GET_REG(reg_addr);
+ BNAD_GET_REG(reg_addr + 4);
+ BNAD_GET_REG(reg_addr + 8);
+ reg_addr += 0x10;
+ }
+
+ /* Function ID register */
+ BNAD_GET_REG(FNC_ID_REG);
+
+ /* Function personality register */
+ BNAD_GET_REG(FNC_PERS_REG);
+
+ /* Operation mode register */
+ BNAD_GET_REG(OP_MODE);
+
+ /* LPU0 Registers */
+ BNAD_GET_REG(LPU0_MBOX_CTL_REG);
+ BNAD_GET_REG(LPU0_MBOX_CMD_REG);
+ BNAD_GET_REG(LPU0_MBOX_LINK_0REG);
+ BNAD_GET_REG(LPU1_MBOX_LINK_0REG);
+ BNAD_GET_REG(LPU0_MBOX_STATUS_0REG);
+ BNAD_GET_REG(LPU1_MBOX_STATUS_0REG);
+ BNAD_GET_REG(LPU0_ERR_STATUS_REG);
+ BNAD_GET_REG(LPU0_ERR_SET_REG);
+
+ /* LPU1 Registers */
+ BNAD_GET_REG(LPU1_MBOX_CTL_REG);
+ BNAD_GET_REG(LPU1_MBOX_CMD_REG);
+ BNAD_GET_REG(LPU0_MBOX_LINK_1REG);
+ BNAD_GET_REG(LPU1_MBOX_LINK_1REG);
+ BNAD_GET_REG(LPU0_MBOX_STATUS_1REG);
+ BNAD_GET_REG(LPU1_MBOX_STATUS_1REG);
+ BNAD_GET_REG(LPU1_ERR_STATUS_REG);
+ BNAD_GET_REG(LPU1_ERR_SET_REG);
+
+ /* PSS Registers */
+ BNAD_GET_REG(PSS_CTL_REG);
+ BNAD_GET_REG(PSS_ERR_STATUS_REG);
+ BNAD_GET_REG(ERR_STATUS_SET);
+ BNAD_GET_REG(PSS_RAM_ERR_STATUS_REG);
+
+ /* Catapult CPQ Registers */
+ BNAD_GET_REG(HOSTFN0_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN0_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN0_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN0_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN0_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN0_MBOX1_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN1_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN1_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN1_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN1_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN1_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN1_MBOX1_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN2_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN2_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN2_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN2_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN2_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN2_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN2_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN2_MBOX1_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN3_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN3_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN3_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN3_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN3_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN3_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN3_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN3_MBOX1_CMD_STAT);
+
+ /* Host Function Force Parity Error Registers */
+ BNAD_GET_REG(HOSTFN0_LPU_FORCE_PERR);
+ BNAD_GET_REG(HOSTFN1_LPU_FORCE_PERR);
+ BNAD_GET_REG(HOSTFN2_LPU_FORCE_PERR);
+ BNAD_GET_REG(HOSTFN3_LPU_FORCE_PERR);
+
+ /* LL Port[0|1] Halt Mask Registers */
+ BNAD_GET_REG(LL_HALT_MSK_P0);
+ BNAD_GET_REG(LL_HALT_MSK_P1);
+
+ /* LL Port[0|1] Error Mask Registers */
+ BNAD_GET_REG(LL_ERR_MSK_P0);
+ BNAD_GET_REG(LL_ERR_MSK_P1);
+
+ /* EMC FLI Registers */
+ BNAD_GET_REG(FLI_CMD_REG);
+ BNAD_GET_REG(FLI_ADDR_REG);
+ BNAD_GET_REG(FLI_CTL_REG);
+ BNAD_GET_REG(FLI_WRDATA_REG);
+ BNAD_GET_REG(FLI_RDDATA_REG);
+ BNAD_GET_REG(FLI_DEV_STATUS_REG);
+ BNAD_GET_REG(FLI_SIG_WD_REG);
+
+ BNAD_GET_REG(FLI_DEV_VENDOR_REG);
+ BNAD_GET_REG(FLI_ERR_STATUS_REG);
+
+ /* RxAdm 0 Registers */
+ BNAD_GET_REG(RAD0_CTL_REG);
+ BNAD_GET_REG(RAD0_PE_PARM_REG);
+ BNAD_GET_REG(RAD0_BCN_REG);
+ BNAD_GET_REG(RAD0_DEFAULT_REG);
+ BNAD_GET_REG(RAD0_PROMISC_REG);
+ BNAD_GET_REG(RAD0_BCNQ_REG);
+ BNAD_GET_REG(RAD0_DEFAULTQ_REG);
+
+ BNAD_GET_REG(RAD0_ERR_STS);
+ BNAD_GET_REG(RAD0_SET_ERR_STS);
+ BNAD_GET_REG(RAD0_ERR_INT_EN);
+ BNAD_GET_REG(RAD0_FIRST_ERR);
+ BNAD_GET_REG(RAD0_FORCE_ERR);
+
+ BNAD_GET_REG(RAD0_MAC_MAN_1H);
+ BNAD_GET_REG(RAD0_MAC_MAN_1L);
+ BNAD_GET_REG(RAD0_MAC_MAN_2H);
+ BNAD_GET_REG(RAD0_MAC_MAN_2L);
+ BNAD_GET_REG(RAD0_MAC_MAN_3H);
+ BNAD_GET_REG(RAD0_MAC_MAN_3L);
+ BNAD_GET_REG(RAD0_MAC_MAN_4H);
+ BNAD_GET_REG(RAD0_MAC_MAN_4L);
+
+ BNAD_GET_REG(RAD0_LAST4_IP);
+
+ /* RxAdm 1 Registers */
+ BNAD_GET_REG(RAD1_CTL_REG);
+ BNAD_GET_REG(RAD1_PE_PARM_REG);
+ BNAD_GET_REG(RAD1_BCN_REG);
+ BNAD_GET_REG(RAD1_DEFAULT_REG);
+ BNAD_GET_REG(RAD1_PROMISC_REG);
+ BNAD_GET_REG(RAD1_BCNQ_REG);
+ BNAD_GET_REG(RAD1_DEFAULTQ_REG);
+
+ BNAD_GET_REG(RAD1_ERR_STS);
+ BNAD_GET_REG(RAD1_SET_ERR_STS);
+ BNAD_GET_REG(RAD1_ERR_INT_EN);
+
+ /* TxA0 Registers */
+ BNAD_GET_REG(TXA0_CTRL_REG);
+ /* TxA0 TSO Sequence # Registers (RO) */
+ for (i = 0; i < 8; i++) {
+ BNAD_GET_REG(TXA0_TSO_TCP_SEQ_REG(i));
+ BNAD_GET_REG(TXA0_TSO_IP_INFO_REG(i));
+ }
+
+ /* TxA1 Registers */
+ BNAD_GET_REG(TXA1_CTRL_REG);
+ /* TxA1 TSO Sequence # Registers (RO) */
+ for (i = 0; i < 8; i++) {
+ BNAD_GET_REG(TXA1_TSO_TCP_SEQ_REG(i));
+ BNAD_GET_REG(TXA1_TSO_IP_INFO_REG(i));
+ }
+
+ /* RxA Registers */
+ BNAD_GET_REG(RXA0_CTL_REG);
+ BNAD_GET_REG(RXA1_CTL_REG);
+
+ /* PLB0 Registers */
+ BNAD_GET_REG(PLB0_ECM_TIMER_REG);
+ BNAD_GET_REG(PLB0_RL_CTL);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB0_RL_MAX_BC(i));
+ BNAD_GET_REG(PLB0_RL_TU_PRIO);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB0_RL_BYTE_CNT(i));
+ BNAD_GET_REG(PLB0_RL_MIN_REG);
+ BNAD_GET_REG(PLB0_RL_MAX_REG);
+ BNAD_GET_REG(PLB0_EMS_ADD_REG);
+
+ /* PLB1 Registers */
+ BNAD_GET_REG(PLB1_ECM_TIMER_REG);
+ BNAD_GET_REG(PLB1_RL_CTL);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB1_RL_MAX_BC(i));
+ BNAD_GET_REG(PLB1_RL_TU_PRIO);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB1_RL_BYTE_CNT(i));
+ BNAD_GET_REG(PLB1_RL_MIN_REG);
+ BNAD_GET_REG(PLB1_RL_MAX_REG);
+ BNAD_GET_REG(PLB1_EMS_ADD_REG);
+
+ /* HQM Control Register */
+ BNAD_GET_REG(HQM0_CTL_REG);
+ BNAD_GET_REG(HQM0_RXQ_STOP_SEM);
+ BNAD_GET_REG(HQM0_TXQ_STOP_SEM);
+ BNAD_GET_REG(HQM1_CTL_REG);
+ BNAD_GET_REG(HQM1_RXQ_STOP_SEM);
+ BNAD_GET_REG(HQM1_TXQ_STOP_SEM);
+
+ /* LUT Registers */
+ BNAD_GET_REG(LUT0_ERR_STS);
+ BNAD_GET_REG(LUT0_SET_ERR_STS);
+ BNAD_GET_REG(LUT1_ERR_STS);
+ BNAD_GET_REG(LUT1_SET_ERR_STS);
+
+ /* TRC Registers */
+ BNAD_GET_REG(TRC_CTL_REG);
+ BNAD_GET_REG(TRC_MODS_REG);
+ BNAD_GET_REG(TRC_TRGC_REG);
+ BNAD_GET_REG(TRC_CNT1_REG);
+ BNAD_GET_REG(TRC_CNT2_REG);
+ BNAD_GET_REG(TRC_NXTS_REG);
+ BNAD_GET_REG(TRC_DIRR_REG);
+ for (i = 0; i < 10; i++)
+ BNAD_GET_REG(TRC_TRGM_REG(i));
+ for (i = 0; i < 10; i++)
+ BNAD_GET_REG(TRC_NXTM_REG(i));
+ for (i = 0; i < 10; i++)
+ BNAD_GET_REG(TRC_STRM_REG(i));
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+#undef BNAD_GET_REG
+ return num;
+}
+static int
+bnad_get_regs_len(struct net_device *netdev)
+{
+ int ret = get_regs(netdev_priv(netdev), NULL) * sizeof(u32);
+ return ret;
+}
+
+static void
+bnad_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
+{
+ memset(buf, 0, bnad_get_regs_len(netdev));
+ get_regs(netdev_priv(netdev), buf);
+}
+
+static void
+bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
+{
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+}
+
+static int
+bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ /* Lock rqd. to access bnad->bna_lock */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ coalesce->use_adaptive_rx_coalesce =
+ (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
+ BFI_COALESCING_TIMER_UNIT;
+ coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
+ BFI_COALESCING_TIMER_UNIT;
+ coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
+
+ return 0;
+}
+
+static int
+bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+ int dim_timer_del = 0;
+
+ if (coalesce->rx_coalesce_usecs == 0 ||
+ coalesce->rx_coalesce_usecs >
+ BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
+ return -EINVAL;
+
+ if (coalesce->tx_coalesce_usecs == 0 ||
+ coalesce->tx_coalesce_usecs >
+ BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+ /*
+ * Do not need to store rx_coalesce_usecs here
+ * Every time DIM is disabled, we can get it from the
+ * stack.
+ */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (coalesce->use_adaptive_rx_coalesce) {
+ if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
+ bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
+ bnad_dim_timer_start(bnad);
+ }
+ } else {
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
+ bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
+ dim_timer_del = bnad_dim_timer_running(bnad);
+ if (dim_timer_del) {
+ clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
+ &bnad->run_flags);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ del_timer_sync(&bnad->dim_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ }
+ bnad_rx_coalescing_timeo_set(bnad);
+ }
+ }
+ if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT) {
+ bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT;
+ bnad_tx_coalescing_timeo_set(bnad);
+ }
+
+ if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT) {
+ bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT;
+
+ if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
+ bnad_rx_coalescing_timeo_set(bnad);
+
+ }
+
+ /* Add Tx Inter-pkt DMA count? */
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static void
+bnad_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
+ ringparam->rx_mini_max_pending = 0;
+ ringparam->rx_jumbo_max_pending = 0;
+ ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
+
+ ringparam->rx_pending = bnad->rxq_depth;
+ ringparam->rx_mini_max_pending = 0;
+ ringparam->rx_jumbo_max_pending = 0;
+ ringparam->tx_pending = bnad->txq_depth;
+}
+
+static int
+bnad_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ int i, current_err, err = 0;
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ if (ringparam->rx_pending == bnad->rxq_depth &&
+ ringparam->tx_pending == bnad->txq_depth) {
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+ }
+
+ if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
+ ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
+ !BNA_POWER_OF_2(ringparam->rx_pending)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return -EINVAL;
+ }
+ if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
+ ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
+ !BNA_POWER_OF_2(ringparam->tx_pending)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return -EINVAL;
+ }
+
+ if (ringparam->rx_pending != bnad->rxq_depth) {
+ bnad->rxq_depth = ringparam->rx_pending;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ bnad_cleanup_rx(bnad, i);
+ current_err = bnad_setup_rx(bnad, i);
+ if (current_err && !err)
+ err = current_err;
+ }
+ }
+ if (ringparam->tx_pending != bnad->txq_depth) {
+ bnad->txq_depth = ringparam->tx_pending;
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ bnad_cleanup_tx(bnad, i);
+ current_err = bnad_setup_tx(bnad, i);
+ if (current_err && !err)
+ err = current_err;
+ }
+ }
+
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static void
+bnad_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ pauseparam->autoneg = 0;
+ pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause;
+ pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause;
+}
+
+static int
+bnad_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bna_pause_config pause_config;
+ unsigned long flags;
+
+ if (pauseparam->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause ||
+ pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) {
+ pause_config.rx_pause = pauseparam->rx_pause;
+ pause_config.tx_pause = pauseparam->tx_pause;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static u32
+bnad_get_rx_csum(struct net_device *netdev)
+{
+ u32 rx_csum;
+ struct bnad *bnad = netdev_priv(netdev);
+
+ rx_csum = bnad->rx_csum;
+ return rx_csum;
+}
+
+static int
+bnad_set_rx_csum(struct net_device *netdev, u32 rx_csum)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ bnad->rx_csum = rx_csum;
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static int
+bnad_set_tx_csum(struct net_device *netdev, u32 tx_csum)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ if (tx_csum) {
+ netdev->features |= NETIF_F_IP_CSUM;
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ } else {
+ netdev->features &= ~NETIF_F_IP_CSUM;
+ netdev->features &= ~NETIF_F_IPV6_CSUM;
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static int
+bnad_set_tso(struct net_device *netdev, u32 tso)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ if (tso) {
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ } else {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static void
+bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, q_num;
+ u64 bmap;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
+ BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
+ ETH_GSTRING_LEN));
+ memcpy(string, bnad_net_stats_strings[i],
+ ETH_GSTRING_LEN);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
+ ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ sprintf(string, "txf%d_ucast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_ucast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_ucast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_errors", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_filter_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_filter_mac_sa", i);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap >>= 1;
+ }
+
+ bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
+ ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ sprintf(string, "rxf%d_ucast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_ucast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_ucast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_frame_drops", i);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap >>= 1;
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ sprintf(string, "cq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_hw_producer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ sprintf(string, "rxq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_packets_with_error",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_allocbuf_failed", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ sprintf(string, "rxq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string,
+ "rxq%d_packets_with_error", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_allocbuf_failed",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_producer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_consumer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ sprintf(string, "txq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_hw_consumer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static int
+bnad_get_stats_count_locked(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, count, rxf_active_num = 0, txf_active_num = 0;
+ u64 bmap;
+
+ bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
+ ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
+ if (bmap & 1)
+ txf_active_num++;
+ bmap >>= 1;
+ }
+ bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
+ ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ if (bmap & 1)
+ rxf_active_num++;
+ bmap >>= 1;
+ }
+ count = BNAD_ETHTOOL_STATS_NUM +
+ txf_active_num * BNAD_NUM_TXF_COUNTERS +
+ rxf_active_num * BNAD_NUM_RXF_COUNTERS;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
+ count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
+ count += BNAD_NUM_RXQ_COUNTERS;
+ }
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
+ }
+ return count;
+}
+
+static int
+bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
+{
+ int i, j;
+ struct bna_rcb *rcb = NULL;
+ struct bna_tcb *tcb = NULL;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
+ buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
+ ccb->producer_index;
+ buf[bi++] = 0; /* ccb->consumer_index */
+ buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
+ ccb->hw_producer_index);
+ }
+ }
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb) {
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[0]->rxq) {
+ rcb = bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[0];
+ buf[bi++] = rcb->rxq->rx_packets;
+ buf[bi++] = rcb->rxq->rx_bytes;
+ buf[bi++] = rcb->rxq->
+ rx_packets_with_error;
+ buf[bi++] = rcb->rxq->
+ rxbuf_alloc_failed;
+ buf[bi++] = rcb->producer_index;
+ buf[bi++] = rcb->consumer_index;
+ }
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ rcb = bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[1];
+ buf[bi++] = rcb->rxq->rx_packets;
+ buf[bi++] = rcb->rxq->rx_bytes;
+ buf[bi++] = rcb->rxq->
+ rx_packets_with_error;
+ buf[bi++] = rcb->rxq->
+ rxbuf_alloc_failed;
+ buf[bi++] = rcb->producer_index;
+ buf[bi++] = rcb->consumer_index;
+ }
+ }
+ }
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ for (j = 0; j < bnad->num_txq_per_tx; j++)
+ if (bnad->tx_info[i].tcb[j] &&
+ bnad->tx_info[i].tcb[j]->txq) {
+ tcb = bnad->tx_info[i].tcb[j];
+ buf[bi++] = tcb->txq->tx_packets;
+ buf[bi++] = tcb->txq->tx_bytes;
+ buf[bi++] = tcb->producer_index;
+ buf[bi++] = tcb->consumer_index;
+ buf[bi++] = *(tcb->hw_consumer_index);
+ }
+ }
+
+ return bi;
+}
+
+static void
+bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
+ u64 *buf)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, bi;
+ unsigned long flags;
+ struct rtnl_link_stats64 *net_stats64;
+ u64 *stats64;
+ u64 bmap;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
+ mutex_unlock(&bnad->conf_mutex);
+ return;
+ }
+
+ /*
+ * Used bna_lock to sync reads from bna_stats, which is written
+ * under the same lock
+ */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bi = 0;
+ memset(buf, 0, stats->n_stats * sizeof(u64));
+
+ net_stats64 = (struct rtnl_link_stats64 *)buf;
+ bnad_netdev_qstats_fill(bnad, net_stats64);
+ bnad_netdev_hwstats_fill(bnad, net_stats64);
+
+ bi = sizeof(*net_stats64) / sizeof(u64);
+
+ /* Fill driver stats into ethtool buffers */
+ stats64 = (u64 *)&bnad->stats.drv_stats;
+ for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
+ buf[bi++] = stats64[i];
+
+ /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
+ stats64 = (u64 *) bnad->stats.bna_stats->hw_stats;
+ for (i = 0;
+ i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64);
+ i++)
+ buf[bi++] = stats64[i];
+
+ /* Fill txf stats into ethtool buffers */
+ bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
+ ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ stats64 = (u64 *)&bnad->stats.bna_stats->
+ hw_stats->txf_stats[i];
+ for (j = 0; j < sizeof(struct bfi_ll_stats_txf) /
+ sizeof(u64); j++)
+ buf[bi++] = stats64[j];
+ }
+ bmap >>= 1;
+ }
+
+ /* Fill rxf stats into ethtool buffers */
+ bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
+ ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ stats64 = (u64 *)&bnad->stats.bna_stats->
+ hw_stats->rxf_stats[i];
+ for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) /
+ sizeof(u64); j++)
+ buf[bi++] = stats64[j];
+ }
+ bmap >>= 1;
+ }
+
+ /* Fill per Q stats into ethtool buffers */
+ bi = bnad_per_q_stats_fill(bnad, buf, bi);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static int
+bnad_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return bnad_get_stats_count_locked(netdev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct ethtool_ops bnad_ethtool_ops = {
+ .get_settings = bnad_get_settings,
+ .set_settings = bnad_set_settings,
+ .get_drvinfo = bnad_get_drvinfo,
+ .get_regs_len = bnad_get_regs_len,
+ .get_regs = bnad_get_regs,
+ .get_wol = bnad_get_wol,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = bnad_get_coalesce,
+ .set_coalesce = bnad_set_coalesce,
+ .get_ringparam = bnad_get_ringparam,
+ .set_ringparam = bnad_set_ringparam,
+ .get_pauseparam = bnad_get_pauseparam,
+ .set_pauseparam = bnad_set_pauseparam,
+ .get_rx_csum = bnad_get_rx_csum,
+ .set_rx_csum = bnad_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = bnad_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = bnad_set_tso,
+ .get_strings = bnad_get_strings,
+ .get_ethtool_stats = bnad_get_ethtool_stats,
+ .get_sset_count = bnad_get_sset_count
+};
+
+void
+bnad_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
+}
diff --git a/drivers/net/bna/cna.h b/drivers/net/bna/cna.h
new file mode 100644
index 00000000000..bbd39dc6597
--- /dev/null
+++ b/drivers/net/bna/cna.h
@@ -0,0 +1,81 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __CNA_H__
+#define __CNA_H__
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/string.h>
+
+#include <linux/list.h>
+
+#define bfa_sm_fault(__mod, __event) do { \
+ pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
+ __event); \
+} while (0)
+
+extern char bfa_version[];
+
+#define CNA_FW_FILE_CT "ctfw_cna.bin"
+#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
+
+#pragma pack(1)
+
+#define MAC_ADDRLEN (6)
+typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
+
+#pragma pack()
+
+#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
+#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
+#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
+
+/*
+ * bfa_q_qe_init - to initialize a queue element
+ */
+#define bfa_q_qe_init(_qe) { \
+ bfa_q_next(_qe) = (struct list_head *) NULL; \
+ bfa_q_prev(_qe) = (struct list_head *) NULL; \
+}
+
+/*
+ * bfa_q_deq - dequeue an element from head of the queue
+ */
+#define bfa_q_deq(_q, _qe) { \
+ if (!list_empty(_q)) { \
+ (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
+ bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
+ (struct list_head *) (_q); \
+ bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
+ bfa_q_qe_init(*((struct list_head **) _qe)); \
+ } else { \
+ *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
+ } \
+}
+
+#endif /* __CNA_H__ */
diff --git a/drivers/net/bna/cna_fwimg.c b/drivers/net/bna/cna_fwimg.c
new file mode 100644
index 00000000000..e8f4ecd9ebb
--- /dev/null
+++ b/drivers/net/bna/cna_fwimg.c
@@ -0,0 +1,64 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include <linux/firmware.h>
+#include "cna.h"
+
+const struct firmware *bfi_fw;
+static u32 *bfi_image_ct_cna;
+static u32 bfi_image_ct_cna_size;
+
+static u32 *
+cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
+ u32 *bfi_image_size, char *fw_name)
+{
+ const struct firmware *fw;
+
+ if (request_firmware(&fw, fw_name, &pdev->dev)) {
+ pr_alert("Can't locate firmware %s\n", fw_name);
+ goto error;
+ }
+
+ *bfi_image = (u32 *)fw->data;
+ *bfi_image_size = fw->size/sizeof(u32);
+ bfi_fw = fw;
+
+ return *bfi_image;
+error:
+ return NULL;
+}
+
+u32 *
+cna_get_firmware_buf(struct pci_dev *pdev)
+{
+ if (bfi_image_ct_cna_size == 0)
+ cna_read_firmware(pdev, &bfi_image_ct_cna,
+ &bfi_image_ct_cna_size, CNA_FW_FILE_CT);
+ return bfi_image_ct_cna;
+}
+
+u32 *
+bfa_cb_image_get_chunk(int type, u32 off)
+{
+ return (u32 *)(bfi_image_ct_cna + off);
+}
+
+u32
+bfa_cb_image_get_size(int type)
+{
+ return bfi_image_ct_cna_size;
+}
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index e6a803f1c50..ae894bca4af 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -49,6 +49,7 @@
#include <linux/cache.h>
#include <linux/firmware.h>
#include <linux/log2.h>
+#include <linux/aer.h>
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
@@ -58,13 +59,13 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.0.17"
-#define DRV_MODULE_RELDATE "July 18, 2010"
-#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
-#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
-#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw"
-#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
-#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
+#define DRV_MODULE_VERSION "2.0.18"
+#define DRV_MODULE_RELDATE "Oct 7, 2010"
+#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.0.15.fw"
+#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
+#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.0.17.fw"
+#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
+#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
#define RUN_AT(x) (jiffies + (x))
@@ -265,7 +266,7 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
if (diff == TX_DESC_CNT)
diff = MAX_TX_DESC_CNT;
}
- return (bp->tx_ring_size - diff);
+ return bp->tx_ring_size - diff;
}
static u32
@@ -298,7 +299,7 @@ bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
static u32
bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
{
- return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
+ return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
}
static void
@@ -976,9 +977,9 @@ bnx2_report_fw_link(struct bnx2 *bp)
static char *
bnx2_xceiver_str(struct bnx2 *bp)
{
- return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
+ return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
- "Copper"));
+ "Copper");
}
static void
@@ -1268,30 +1269,9 @@ bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
val |= 0x02 << 8;
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
- u32 lo_water, hi_water;
-
- if (bp->flow_ctrl & FLOW_CTRL_TX)
- lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
- else
- lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
- if (lo_water >= bp->rx_ring_size)
- lo_water = 0;
-
- hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
-
- if (hi_water <= lo_water)
- lo_water = 0;
-
- hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
- lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
+ if (bp->flow_ctrl & FLOW_CTRL_TX)
+ val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
- if (hi_water > 0xf)
- hi_water = 0xf;
- else if (hi_water == 0)
- lo_water = 0;
- val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
- }
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
}
@@ -1372,8 +1352,7 @@ bnx2_set_mac_link(struct bnx2 *bp)
/* Acknowledge the interrupt. */
REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
- bnx2_init_all_rx_contexts(bp);
+ bnx2_init_all_rx_contexts(bp);
}
static void
@@ -1757,7 +1736,7 @@ __acquires(&bp->phy_lock)
u32 new_adv = 0;
if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
- return (bnx2_setup_remote_phy(bp, port));
+ return bnx2_setup_remote_phy(bp, port);
if (!(bp->autoneg & AUTONEG_SPEED)) {
u32 new_bmcr;
@@ -2170,10 +2149,10 @@ __acquires(&bp->phy_lock)
return 0;
if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
- return (bnx2_setup_serdes_phy(bp, port));
+ return bnx2_setup_serdes_phy(bp, port);
}
else {
- return (bnx2_setup_copper_phy(bp));
+ return bnx2_setup_copper_phy(bp);
}
}
@@ -3217,7 +3196,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
}
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (bp->rx_csum &&
(status & (L2_FHDR_STATUS_TCP_SEGMENT |
L2_FHDR_STATUS_UDP_DATAGRAM))) {
@@ -4973,6 +4952,11 @@ bnx2_init_chip(struct bnx2 *bp)
REG_WR(bp, BNX2_HC_CONFIG, val);
+ if (bp->rx_ticks < 25)
+ bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
+ else
+ bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
+
for (i = 1; i < bp->irq_nvecs; i++) {
u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
BNX2_HC_SB_CONFIG_1;
@@ -5241,18 +5225,20 @@ bnx2_init_all_rings(struct bnx2 *bp)
bnx2_init_rx_ring(bp, i);
if (bp->num_rx_rings > 1) {
- u32 tbl_32;
- u8 *tbl = (u8 *) &tbl_32;
-
- bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
- BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
+ u32 tbl_32 = 0;
for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
- tbl[i % 4] = i % (bp->num_rx_rings - 1);
- if ((i % 4) == 3)
- bnx2_reg_wr_ind(bp,
- BNX2_RXP_SCRATCH_RSS_TBL + i,
- cpu_to_be32(tbl_32));
+ int shift = (i % 8) << 2;
+
+ tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
+ if ((i % 8) == 7) {
+ REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
+ REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
+ BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
+ BNX2_RLUP_RSS_COMMAND_WRITE |
+ BNX2_RLUP_RSS_COMMAND_HASH_MASK);
+ tbl_32 = 0;
+ }
}
val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
@@ -6201,7 +6187,7 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
}
}
-static void
+static int
bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
{
int cpus = num_online_cpus();
@@ -6230,9 +6216,10 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
}
bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
- bp->dev->real_num_tx_queues = bp->num_tx_rings;
+ netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
bp->num_rx_rings = bp->irq_nvecs;
+ return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
}
/* Called with rtnl_lock */
@@ -6247,7 +6234,9 @@ bnx2_open(struct net_device *dev)
bnx2_set_power_state(bp, PCI_D0);
bnx2_disable_int(bp);
- bnx2_setup_int_mode(bp, disable_msi);
+ rc = bnx2_setup_int_mode(bp, disable_msi);
+ if (rc)
+ goto open_err;
bnx2_init_napi(bp);
bnx2_napi_enable(bp);
rc = bnx2_alloc_mem(bp);
@@ -7581,9 +7570,9 @@ bnx2_set_tx_csum(struct net_device *dev, u32 data)
struct bnx2 *bp = netdev_priv(dev);
if (CHIP_NUM(bp) == CHIP_NUM_5709)
- return (ethtool_op_set_tx_ipv6_csum(dev, data));
+ return ethtool_op_set_tx_ipv6_csum(dev, data);
else
- return (ethtool_op_set_tx_csum(dev, data));
+ return ethtool_op_set_tx_csum(dev, data);
}
static int
@@ -7704,7 +7693,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
dev->mtu = new_mtu;
- return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
+ return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -7890,6 +7879,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
int rc, i, j;
u32 reg;
u64 dma_mask, persist_dma_mask;
+ int err;
SET_NETDEV_DEV(dev, &pdev->dev);
bp = netdev_priv(dev);
@@ -7926,7 +7916,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
}
pci_set_master(pdev);
- pci_save_state(pdev);
bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (bp->pm_cap == 0) {
@@ -7981,6 +7970,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->flags |= BNX2_FLAG_PCIE;
if (CHIP_REV(bp) == CHIP_REV_Ax)
bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
+
+ /* AER (Advanced Error Reporting) hooks */
+ err = pci_enable_pcie_error_reporting(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_pcie_error_reporting "
+ "failed 0x%x\n", err);
+ /* non-fatal, continue */
+ }
+
} else {
bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
if (bp->pcix_cap == 0) {
@@ -8237,9 +8235,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->timer.data = (unsigned long) bp;
bp->timer.function = bnx2_timer;
+ pci_save_state(pdev);
+
return 0;
err_out_unmap:
+ if (bp->flags & BNX2_FLAG_PCIE)
+ pci_disable_pcie_error_reporting(pdev);
+
if (bp->regview) {
iounmap(bp->regview);
bp->regview = NULL;
@@ -8435,7 +8438,11 @@ bnx2_remove_one(struct pci_dev *pdev)
kfree(bp->temp_stats_blk);
+ if (bp->flags & BNX2_FLAG_PCIE)
+ pci_disable_pcie_error_reporting(pdev);
+
free_netdev(dev);
+
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -8527,25 +8534,38 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
+ pci_ers_result_t result;
+ int err;
rtnl_lock();
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset\n");
- rtnl_unlock();
- return PCI_ERS_RESULT_DISCONNECT;
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (netif_running(dev)) {
+ bnx2_set_power_state(bp, PCI_D0);
+ bnx2_init_nic(bp, 1);
+ }
+ result = PCI_ERS_RESULT_RECOVERED;
}
- pci_set_master(pdev);
- pci_restore_state(pdev);
- pci_save_state(pdev);
+ rtnl_unlock();
- if (netif_running(dev)) {
- bnx2_set_power_state(bp, PCI_D0);
- bnx2_init_nic(bp, 1);
+ if (!(bp->flags & BNX2_FLAG_PCIE))
+ return result;
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+ err); /* non-fatal, continue */
}
- rtnl_unlock();
- return PCI_ERS_RESULT_RECOVERED;
+ return result;
}
/**
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 2104c1005d0..efdfbc2a9e3 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -352,12 +352,7 @@ struct l2_fhdr {
#define BNX2_L2CTX_BD_PRE_READ 0x00000000
#define BNX2_L2CTX_CTX_SIZE 0x00000000
#define BNX2_L2CTX_CTX_TYPE 0x00000000
-#define BNX2_L2CTX_LO_WATER_MARK_DEFAULT 4
-#define BNX2_L2CTX_LO_WATER_MARK_SCALE 4
-#define BNX2_L2CTX_LO_WATER_MARK_DIS 0
-#define BNX2_L2CTX_HI_WATER_MARK_SHIFT 4
-#define BNX2_L2CTX_HI_WATER_MARK_SCALE 16
-#define BNX2_L2CTX_WATER_MARKS_MSK 0x000000ff
+#define BNX2_L2CTX_FLOW_CTRL_ENABLE 0x000000ff
#define BNX2_L2CTX_CTX_TYPE_SIZE_L2 ((0x20/20)<<16)
#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE (0xf<<28)
#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_UNDEFINED (0<<28)
@@ -4185,6 +4180,15 @@ struct l2_fhdr {
#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_IP_ONLY_XI (2L<<2)
#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_RES_XI (3L<<2)
+#define BNX2_RLUP_RSS_COMMAND 0x00002048
+#define BNX2_RLUP_RSS_COMMAND_RSS_IND_TABLE_ADDR (0xfUL<<0)
+#define BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK (0xffUL<<4)
+#define BNX2_RLUP_RSS_COMMAND_WRITE (1UL<<12)
+#define BNX2_RLUP_RSS_COMMAND_READ (1UL<<13)
+#define BNX2_RLUP_RSS_COMMAND_HASH_MASK (0x7UL<<14)
+
+#define BNX2_RLUP_RSS_DATA 0x0000204c
+
/*
* rbuf_reg definition
@@ -6077,6 +6081,7 @@ struct l2_fhdr {
#define BNX2_COM_SCRATCH 0x00120000
+#define BNX2_FW_RX_LOW_LATENCY 0x00120058
#define BNX2_FW_RX_DROP_COUNT 0x00120084
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 53af9c93e75..c49b643e009 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.52.53-3"
-#define DRV_MODULE_RELDATE "2010/18/04"
+#define DRV_MODULE_VERSION "1.60.00-1"
+#define DRV_MODULE_RELDATE "2010/10/06"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@@ -33,13 +33,11 @@
#define BNX2X_NEW_NAPI
-
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
#include "../cnic_if.h"
#endif
-
#ifdef BCM_CNIC
#define BNX2X_MIN_MSIX_VEC_CNT 3
#define BNX2X_MSIX_VEC_FP_START 2
@@ -129,16 +127,18 @@ void bnx2x_panic_dump(struct bnx2x *bp);
} while (0)
#endif
+#define bnx2x_mc_addr(ha) ((ha)->addr)
#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
#define U64_HI(x) (u32)(((u64)(x)) >> 32)
#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
-#define REG_ADDR(bp, offset) (bp->regview + offset)
+#define REG_ADDR(bp, offset) ((bp->regview) + (offset))
#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
+#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
@@ -160,6 +160,9 @@ void bnx2x_panic_dump(struct bnx2x *bp);
offset, len32); \
} while (0)
+#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
+ REG_WR_DMAE(bp, offset, valp, len32)
+
#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
do { \
memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
@@ -175,16 +178,59 @@ void bnx2x_panic_dump(struct bnx2x *bp);
offsetof(struct shmem2_region, field))
#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
+#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
+ offsetof(struct mf_cfg, field))
+#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \
+ offsetof(struct mf2_cfg, field))
-#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field)
-#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val)
+#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
+#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
+ MF_CFG_ADDR(bp, field), (val))
+#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field))
+
+#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \
+ (SHMEM2_RD((bp), size) > \
+ offsetof(struct shmem2_region, field)))
#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
+/* SP SB indices */
+
+/* General SP events - stats query, cfc delete, etc */
+#define HC_SP_INDEX_ETH_DEF_CONS 3
+
+/* EQ completions */
+#define HC_SP_INDEX_EQ_CONS 7
+
+/* iSCSI L2 */
+#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
+#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
+
+/**
+ * CIDs and CLIDs:
+ * CLIDs below is a CLID for func 0, then the CLID for other
+ * functions will be calculated by the formula:
+ *
+ * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
+ *
+ */
+/* iSCSI L2 */
+#define BNX2X_ISCSI_ETH_CL_ID 17
+#define BNX2X_ISCSI_ETH_CID 17
+
+/** Additional rings budgeting */
+#ifdef BCM_CNIC
+#define CNIC_CONTEXT_USE 1
+#else
+#define CNIC_CONTEXT_USE 0
+#endif /* BCM_CNIC */
+
#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
+#define SM_RX_ID 0
+#define SM_TX_ID 1
/* fast path */
@@ -254,11 +300,24 @@ union db_prod {
#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
+union host_hc_status_block {
+ /* pointer to fp status block e1x */
+ struct host_hc_status_block_e1x *e1x_sb;
+ /* pointer to fp status block e2 */
+ struct host_hc_status_block_e2 *e2_sb;
+};
struct bnx2x_fastpath {
+#define BNX2X_NAPI_WEIGHT 128
struct napi_struct napi;
- struct host_status_block *status_blk;
+ union host_hc_status_block status_blk;
+ /* chip independed shortcuts into sb structure */
+ __le16 *sb_index_values;
+ __le16 *sb_running_index;
+ /* chip independed shortcut into rx_prods_offset memory */
+ u32 ustorm_rx_prods_offset;
+
dma_addr_t status_blk_mapping;
struct sw_tx_bd *tx_buf_ring;
@@ -288,10 +347,15 @@ struct bnx2x_fastpath {
#define BNX2X_FP_STATE_OPEN 0xa0000
#define BNX2X_FP_STATE_HALTING 0xb0000
#define BNX2X_FP_STATE_HALTED 0xc0000
+#define BNX2X_FP_STATE_TERMINATING 0xd0000
+#define BNX2X_FP_STATE_TERMINATED 0xe0000
- u8 index; /* number in fp array */
- u8 cl_id; /* eth client id */
- u8 sb_id; /* status block number in HW */
+ u8 index; /* number in fp array */
+ u8 cl_id; /* eth client id */
+ u8 cl_qzone_id;
+ u8 fw_sb_id; /* status block number in FW */
+ u8 igu_sb_id; /* status block number in HW */
+ u32 cid;
union db_prod tx_db;
@@ -301,8 +365,7 @@ struct bnx2x_fastpath {
u16 tx_bd_cons;
__le16 *tx_cons_sb;
- __le16 fp_c_idx;
- __le16 fp_u_idx;
+ __le16 fp_hc_idx;
u16 rx_bd_prod;
u16 rx_bd_cons;
@@ -312,8 +375,6 @@ struct bnx2x_fastpath {
/* The last maximal completed SGE */
u16 last_max_sge;
__le16 *rx_cons_sb;
- __le16 *rx_bd_cons_sb;
-
unsigned long tx_pkt,
rx_pkt,
@@ -356,6 +417,8 @@ struct bnx2x_fastpath {
#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
#define MAX_TX_BD (NUM_TX_BD - 1)
#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
+#define INIT_JUMBO_TX_RING_SIZE MAX_TX_AVAIL
+#define INIT_TX_RING_SIZE MAX_TX_AVAIL
#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
(MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
#define TX_BD(x) ((x) & MAX_TX_BD)
@@ -369,6 +432,9 @@ struct bnx2x_fastpath {
#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
#define MAX_RX_BD (NUM_RX_BD - 1)
#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
+#define MIN_RX_AVAIL 128
+#define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL
+#define INIT_RX_RING_SIZE MAX_RX_AVAIL
#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
(MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
#define RX_BD(x) ((x) & MAX_RX_BD)
@@ -419,11 +485,12 @@ struct bnx2x_fastpath {
le32_to_cpu((bd)->addr_lo))
#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
-
+#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
+#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
#define DPM_TRIGER_TYPE 0x40
#define DOORBELL(bp, cid, val) \
do { \
- writel((u32)(val), bp->doorbells + (BCM_PAGE_SIZE * (cid)) + \
+ writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
DPM_TRIGER_TYPE); \
} while (0)
@@ -481,31 +548,15 @@ struct bnx2x_fastpath {
#define BNX2X_RX_SUM_FIX(cqe) \
BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
-
-#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES)
-#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES)
-
-#define U_SB_ETH_RX_CQ_INDEX HC_INDEX_U_ETH_RX_CQ_CONS
-#define U_SB_ETH_RX_BD_INDEX HC_INDEX_U_ETH_RX_BD_CONS
-#define C_SB_ETH_TX_CQ_INDEX HC_INDEX_C_ETH_TX_CQ_CONS
+#define U_SB_ETH_RX_CQ_INDEX 1
+#define U_SB_ETH_RX_BD_INDEX 2
+#define C_SB_ETH_TX_CQ_INDEX 5
#define BNX2X_RX_SB_INDEX \
- (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX])
-
-#define BNX2X_RX_SB_BD_INDEX \
- (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_BD_INDEX])
-
-#define BNX2X_RX_SB_INDEX_NUM \
- (((U_SB_ETH_RX_CQ_INDEX << \
- USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
- USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER) | \
- ((U_SB_ETH_RX_BD_INDEX << \
- USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT) & \
- USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER))
+ (&fp->sb_index_values[U_SB_ETH_RX_CQ_INDEX])
#define BNX2X_TX_SB_INDEX \
- (&fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX])
-
+ (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX])
/* end of fast path */
@@ -521,12 +572,19 @@ struct bnx2x_common {
#define CHIP_NUM_57710 0x164e
#define CHIP_NUM_57711 0x164f
#define CHIP_NUM_57711E 0x1650
+#define CHIP_NUM_57712 0x1662
+#define CHIP_NUM_57712E 0x1663
#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
+#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
+#define CHIP_IS_57712E(bp) (CHIP_NUM(bp) == CHIP_NUM_57712E)
#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
CHIP_IS_57711E(bp))
-#define IS_E1H_OFFSET CHIP_IS_E1H(bp)
+#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
+ CHIP_IS_57712E(bp))
+#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
+#define IS_E1H_OFFSET (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp))
#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000)
#define CHIP_REV_Ax 0x00000000
@@ -552,12 +610,34 @@ struct bnx2x_common {
u32 shmem_base;
u32 shmem2_base;
+ u32 mf_cfg_base;
+ u32 mf2_cfg_base;
u32 hw_config;
u32 bc_ver;
+
+ u8 int_block;
+#define INT_BLOCK_HC 0
+#define INT_BLOCK_IGU 1
+#define INT_BLOCK_MODE_NORMAL 0
+#define INT_BLOCK_MODE_BW_COMP 2
+#define CHIP_INT_MODE_IS_NBC(bp) \
+ (CHIP_IS_E2(bp) && \
+ !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
+#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
+
+ u8 chip_port_mode;
+#define CHIP_4_PORT_MODE 0x0
+#define CHIP_2_PORT_MODE 0x1
+#define CHIP_PORT_MODE_NONE 0x2
+#define CHIP_MODE(bp) (bp->common.chip_port_mode)
+#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
};
+/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
+#define BNX2X_IGU_STAS_MSG_VF_CNT 64
+#define BNX2X_IGU_STAS_MSG_PF_CNT 4
/* end of common */
@@ -566,13 +646,13 @@ struct bnx2x_common {
struct bnx2x_port {
u32 pmf;
- u32 link_config;
+ u32 link_config[LINK_CONFIG_SIZE];
- u32 supported;
+ u32 supported[LINK_CONFIG_SIZE];
/* link settings - missing defines */
#define SUPPORTED_2500baseX_Full (1 << 15)
- u32 advertising;
+ u32 advertising[LINK_CONFIG_SIZE];
/* link settings - missing defines */
#define ADVERTISED_2500baseX_Full (1 << 15)
@@ -589,27 +669,98 @@ struct bnx2x_port {
/* end of port */
+/* e1h Classification CAM line allocations */
+enum {
+ CAM_ETH_LINE = 0,
+ CAM_ISCSI_ETH_LINE,
+ CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE
+};
+#define BNX2X_VF_ID_INVALID 0xFF
-#ifdef BCM_CNIC
-#define MAX_CONTEXT 15
-#else
-#define MAX_CONTEXT 16
-#endif
+/*
+ * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
+ * control by the number of fast-path status blocks supported by the
+ * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
+ * status block represents an independent interrupts context that can
+ * serve a regular L2 networking queue. However special L2 queues such
+ * as the FCoE queue do not require a FP-SB and other components like
+ * the CNIC may consume FP-SB reducing the number of possible L2 queues
+ *
+ * If the maximum number of FP-SB available is X then:
+ * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
+ * regular L2 queues is Y=X-1
+ * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
+ * c. If the FCoE L2 queue is supported the actual number of L2 queues
+ * is Y+1
+ * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
+ * slow-path interrupts) or Y+2 if CNIC is supported (one additional
+ * FP interrupt context for the CNIC).
+ * e. The number of HW context (CID count) is always X or X+1 if FCoE
+ * L2 queue is supported. the cid for the FCoE L2 queue is always X.
+ */
+
+#define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */
+#define FP_SB_MAX_E2 16 /* fast-path interrupt contexts E2 */
+
+/*
+ * cid_cnt paramter below refers to the value returned by
+ * 'bnx2x_get_l2_cid_count()' routine
+ */
+
+/*
+ * The number of FP context allocated by the driver == max number of regular
+ * L2 queues + 1 for the FCoE L2 queue
+ */
+#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE)
union cdu_context {
struct eth_context eth;
char pad[1024];
};
+/* CDU host DB constants */
+#define CDU_ILT_PAGE_SZ_HW 3
+#define CDU_ILT_PAGE_SZ (4096 << CDU_ILT_PAGE_SZ_HW) /* 32K */
+#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
+
+#ifdef BCM_CNIC
+#define CNIC_ISCSI_CID_MAX 256
+#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX)
+#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
+#endif
+
+#define QM_ILT_PAGE_SZ_HW 3
+#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 32K */
+#define QM_CID_ROUND 1024
+
+#ifdef BCM_CNIC
+/* TM (timers) host DB constants */
+#define TM_ILT_PAGE_SZ_HW 2
+#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 16K */
+/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
+#define TM_CONN_NUM 1024
+#define TM_ILT_SZ (8 * TM_CONN_NUM)
+#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
+
+/* SRC (Searcher) host DB constants */
+#define SRC_ILT_PAGE_SZ_HW 3
+#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 32K */
+#define SRC_HASH_BITS 10
+#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
+#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
+#define SRC_T2_SZ SRC_ILT_SZ
+#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
+#endif
+
#define MAX_DMAE_C 8
/* DMA memory not used in fastpath */
struct bnx2x_slowpath {
- union cdu_context context[MAX_CONTEXT];
struct eth_stats_query fw_stats;
struct mac_configuration_cmd mac_config;
struct mac_configuration_cmd mcast_config;
+ struct client_init_ramrod_data client_init_data;
/* used by dmae command executer */
struct dmae_command dmae[MAX_DMAE_C];
@@ -634,40 +785,74 @@ struct bnx2x_slowpath {
#define MAX_DYNAMIC_ATTN_GRPS 8
struct attn_route {
- u32 sig[4];
+ u32 sig[5];
+};
+
+struct iro {
+ u32 base;
+ u16 m1;
+ u16 m2;
+ u16 m3;
+ u16 size;
+};
+
+struct hw_context {
+ union cdu_context *vcxt;
+ dma_addr_t cxt_mapping;
+ size_t size;
};
+/* forward */
+struct bnx2x_ilt;
+
typedef enum {
BNX2X_RECOVERY_DONE,
BNX2X_RECOVERY_INIT,
BNX2X_RECOVERY_WAIT,
} bnx2x_recovery_state_t;
+/**
+ * Event queue (EQ or event ring) MC hsi
+ * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
+ */
+#define NUM_EQ_PAGES 1
+#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem))
+#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1)
+#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
+#define EQ_DESC_MASK (NUM_EQ_DESC - 1)
+#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
+
+/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
+#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \
+ (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
+
+/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
+#define EQ_DESC(x) ((x) & EQ_DESC_MASK)
+
+#define BNX2X_EQ_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_EQ_CONS])
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
*/
- struct bnx2x_fastpath fp[MAX_CONTEXT];
+ struct bnx2x_fastpath *fp;
void __iomem *regview;
void __iomem *doorbells;
-#ifdef BCM_CNIC
-#define BNX2X_DB_SIZE (18*BCM_PAGE_SIZE)
-#else
-#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
-#endif
+ u16 db_size;
struct net_device *dev;
struct pci_dev *pdev;
+ struct iro *iro_arr;
+#define IRO (bp->iro_arr)
+
atomic_t intr_sem;
bnx2x_recovery_state_t recovery_state;
int is_leader;
-#ifdef BCM_CNIC
- struct msix_entry msix_table[MAX_CONTEXT+2];
-#else
- struct msix_entry msix_table[MAX_CONTEXT+1];
-#endif
+ struct msix_entry *msix_table;
#define INT_MODE_INTx 1
#define INT_MODE_MSI 2
@@ -679,7 +864,8 @@ struct bnx2x {
u32 rx_csum;
u32 rx_buf_size;
-#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
@@ -688,13 +874,12 @@ struct bnx2x {
#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
L1_CACHE_SHIFT : 8)
#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT)
+#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
- struct host_def_status_block *def_status_blk;
-#define DEF_SB_ID 16
- __le16 def_c_idx;
- __le16 def_u_idx;
- __le16 def_x_idx;
- __le16 def_t_idx;
+ struct host_sp_status_block *def_status_blk;
+#define DEF_SB_IGU_ID 16
+#define DEF_SB_ID HC_SP_SB_ID
+ __le16 def_idx;
__le16 def_att_idx;
u32 attn_state;
struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
@@ -706,10 +891,17 @@ struct bnx2x {
struct eth_spe *spq_prod_bd;
struct eth_spe *spq_last_bd;
__le16 *dsb_sp_prod;
- u16 spq_left; /* serialize spq */
+ atomic_t spq_left; /* serialize spq */
/* used to synchronize spq accesses */
spinlock_t spq_lock;
+ /* event queue */
+ union event_ring_elem *eq_ring;
+ dma_addr_t eq_mapping;
+ u16 eq_prod;
+ u16 eq_cons;
+ __le16 *eq_cons_sb;
+
/* Flags for marking that there is a STAT_QUERY or
SET_MAC ramrod pending */
int stats_pending;
@@ -728,18 +920,29 @@ struct bnx2x {
#define USING_DAC_FLAG 0x10
#define USING_MSIX_FLAG 0x20
#define USING_MSI_FLAG 0x40
+
#define TPA_ENABLE_FLAG 0x80
#define NO_MCP_FLAG 0x100
+#define DISABLE_MSI_FLAG 0x200
#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
#define HW_VLAN_TX_FLAG 0x400
#define HW_VLAN_RX_FLAG 0x800
#define MF_FUNC_DIS 0x1000
- int func;
-#define BP_PORT(bp) (bp->func % PORT_MAX)
-#define BP_FUNC(bp) (bp->func)
-#define BP_E1HVN(bp) (bp->func >> 1)
+ int pf_num; /* absolute PF number */
+ int pfid; /* per-path PF number */
+ int base_fw_ndsb;
+#define BP_PATH(bp) (!CHIP_IS_E2(bp) ? \
+ 0 : (bp->pf_num & 1))
+#define BP_PORT(bp) (bp->pfid & 1)
+#define BP_FUNC(bp) (bp->pfid)
+#define BP_ABS_FUNC(bp) (bp->pf_num)
+#define BP_E1HVN(bp) (bp->pfid >> 1)
+#define BP_VN(bp) (CHIP_MODE_IS_4_PORT(bp) ? \
+ 0 : BP_E1HVN(bp))
#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
+#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\
+ BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2 : 1))
#ifdef BCM_CNIC
#define BCM_CNIC_CID_START 16
@@ -769,10 +972,11 @@ struct bnx2x {
struct cmng_struct_per_port cmng;
u32 vn_weight_sum;
- u32 mf_config;
- u16 e1hov;
- u8 e1hmf;
-#define IS_E1HMF(bp) (bp->e1hmf != 0)
+ u32 mf_config[E1HVN_MAX];
+ u32 mf2_config[E2_FUNC_MAX];
+ u16 mf_ov;
+ u8 mf_mode;
+#define IS_MF(bp) (bp->mf_mode != 0)
u8 wol;
@@ -800,6 +1004,7 @@ struct bnx2x {
#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
+#define BNX2X_STATE_FUNC_STARTED 0x7000
#define BNX2X_STATE_DIAG 0xe000
#define BNX2X_STATE_ERROR 0xf000
@@ -808,6 +1013,15 @@ struct bnx2x {
int disable_tpa;
int int_mode;
+ struct tstorm_eth_mac_filter_config mac_filters;
+#define BNX2X_ACCEPT_NONE 0x0000
+#define BNX2X_ACCEPT_UNICAST 0x0001
+#define BNX2X_ACCEPT_MULTICAST 0x0002
+#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
+#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
+#define BNX2X_ACCEPT_BROADCAST 0x0010
+#define BNX2X_PROMISCUOUS_MODE 0x10000
+
u32 rx_mode;
#define BNX2X_RX_MODE_NONE 0
#define BNX2X_RX_MODE_NORMAL 1
@@ -816,34 +1030,41 @@ struct bnx2x {
#define BNX2X_MAX_MULTICAST 64
#define BNX2X_MAX_EMUL_MULTI 16
- u32 rx_mode_cl_mask;
-
+ u8 igu_dsb_id;
+ u8 igu_base_sb;
+ u8 igu_sb_cnt;
dma_addr_t def_status_blk_mapping;
struct bnx2x_slowpath *slowpath;
dma_addr_t slowpath_mapping;
+ struct hw_context context;
+
+ struct bnx2x_ilt *ilt;
+#define BP_ILT(bp) ((bp)->ilt)
+#define ILT_MAX_LINES 128
+
+ int l2_cid_count;
+#define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \
+ ILT_PAGE_CIDS))
+#define BNX2X_DB_SIZE(bp) ((bp)->l2_cid_count * (1 << BNX2X_DB_SHIFT))
+
+ int qm_cid_count;
int dropless_fc;
#ifdef BCM_CNIC
u32 cnic_flags;
#define BNX2X_CNIC_FLAG_MAC_SET 1
-
- void *t1;
- dma_addr_t t1_mapping;
void *t2;
dma_addr_t t2_mapping;
- void *timers;
- dma_addr_t timers_mapping;
- void *qm;
- dma_addr_t qm_mapping;
struct cnic_ops *cnic_ops;
void *cnic_data;
u32 cnic_tag;
struct cnic_eth_dev cnic_eth_dev;
- struct host_status_block *cnic_sb;
+ union host_hc_status_block cnic_sb;
dma_addr_t cnic_sb_mapping;
-#define CNIC_SB_ID(bp) BP_L_ID(bp)
+#define CNIC_SB_ID(bp) ((bp)->base_fw_ndsb + BP_L_ID(bp))
+#define CNIC_IGU_SB_ID(bp) ((bp)->igu_base_sb)
struct eth_spe *cnic_kwq;
struct eth_spe *cnic_kwq_prod;
struct eth_spe *cnic_kwq_cons;
@@ -913,32 +1134,201 @@ struct bnx2x {
const struct firmware *firmware;
};
+/**
+ * Init queue/func interface
+ */
+/* queue init flags */
+#define QUEUE_FLG_TPA 0x0001
+#define QUEUE_FLG_CACHE_ALIGN 0x0002
+#define QUEUE_FLG_STATS 0x0004
+#define QUEUE_FLG_OV 0x0008
+#define QUEUE_FLG_VLAN 0x0010
+#define QUEUE_FLG_COS 0x0020
+#define QUEUE_FLG_HC 0x0040
+#define QUEUE_FLG_DHC 0x0080
+#define QUEUE_FLG_OOO 0x0100
+
+#define QUEUE_DROP_IP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR
+#define QUEUE_DROP_TCP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR
+#define QUEUE_DROP_TTL0 TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0
+#define QUEUE_DROP_UDP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR
+
+
+
+/* rss capabilities */
+#define RSS_IPV4_CAP 0x0001
+#define RSS_IPV4_TCP_CAP 0x0002
+#define RSS_IPV6_CAP 0x0004
+#define RSS_IPV6_TCP_CAP 0x0008
-#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
- : MAX_CONTEXT)
#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
+#define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE)
+#define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1)
+
+#define RSS_IPV4_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
+
+#define RSS_IPV4_TCP_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
+
+#define RSS_IPV6_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
+
+#define RSS_IPV6_TCP_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
+
+/* func init flags */
+#define FUNC_FLG_RSS 0x0001
+#define FUNC_FLG_STATS 0x0002
+/* removed FUNC_FLG_UNMATCHED 0x0004 */
+#define FUNC_FLG_TPA 0x0008
+#define FUNC_FLG_SPQ 0x0010
+#define FUNC_FLG_LEADING 0x0020 /* PF only */
+
+#define FUNC_CONFIG(flgs) ((flgs) & (FUNC_FLG_RSS | FUNC_FLG_TPA | \
+ FUNC_FLG_LEADING))
+
+struct rxq_pause_params {
+ u16 bd_th_lo;
+ u16 bd_th_hi;
+ u16 rcq_th_lo;
+ u16 rcq_th_hi;
+ u16 sge_th_lo; /* valid iff QUEUE_FLG_TPA */
+ u16 sge_th_hi; /* valid iff QUEUE_FLG_TPA */
+ u16 pri_map;
+};
+
+struct bnx2x_rxq_init_params {
+ /* cxt*/
+ struct eth_context *cxt;
+
+ /* dma */
+ dma_addr_t dscr_map;
+ dma_addr_t sge_map;
+ dma_addr_t rcq_map;
+ dma_addr_t rcq_np_map;
+
+ u16 flags;
+ u16 drop_flags;
+ u16 mtu;
+ u16 buf_sz;
+ u16 fw_sb_id;
+ u16 cl_id;
+ u16 spcl_id;
+ u16 cl_qzone_id;
+
+ /* valid iff QUEUE_FLG_STATS */
+ u16 stat_id;
+
+ /* valid iff QUEUE_FLG_TPA */
+ u16 tpa_agg_sz;
+ u16 sge_buf_sz;
+ u16 max_sges_pkt;
+
+ /* valid iff QUEUE_FLG_CACHE_ALIGN */
+ u8 cache_line_log;
+
+ u8 sb_cq_index;
+ u32 cid;
+
+ /* desired interrupts per sec. valid iff QUEUE_FLG_HC */
+ u32 hc_rate;
+};
+
+struct bnx2x_txq_init_params {
+ /* cxt*/
+ struct eth_context *cxt;
+
+ /* dma */
+ dma_addr_t dscr_map;
+
+ u16 flags;
+ u16 fw_sb_id;
+ u8 sb_cq_index;
+ u8 cos; /* valid iff QUEUE_FLG_COS */
+ u16 stat_id; /* valid iff QUEUE_FLG_STATS */
+ u16 traffic_type;
+ u32 cid;
+ u16 hc_rate; /* desired interrupts per sec.*/
+ /* valid iff QUEUE_FLG_HC */
+
+};
+
+struct bnx2x_client_ramrod_params {
+ int *pstate;
+ int state;
+ u16 index;
+ u16 cl_id;
+ u32 cid;
+ u8 poll;
+#define CLIENT_IS_LEADING_RSS 0x02
+ u8 flags;
+};
+
+struct bnx2x_client_init_params {
+ struct rxq_pause_params pause;
+ struct bnx2x_rxq_init_params rxq_params;
+ struct bnx2x_txq_init_params txq_params;
+ struct bnx2x_client_ramrod_params ramrod_params;
+};
+
+struct bnx2x_rss_params {
+ int mode;
+ u16 cap;
+ u16 result_mask;
+};
+
+struct bnx2x_func_init_params {
+
+ /* rss */
+ struct bnx2x_rss_params *rss; /* valid iff FUNC_FLG_RSS */
+
+ /* dma */
+ dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
+ dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
+
+ u16 func_flgs;
+ u16 func_id; /* abs fid */
+ u16 pf_id;
+ u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
+};
+
#define for_each_queue(bp, var) \
for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
#define for_each_nondefault_queue(bp, var) \
for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
+#define WAIT_RAMROD_POLL 0x01
+#define WAIT_RAMROD_COMMON 0x02
+int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+ int *state_p, int flags);
+
+/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
u32 len32);
+void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+ u32 addr, u32 len);
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
+ bool with_comp, u8 comp_type);
+
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
-u32 bnx2x_fw_command(struct bnx2x *bp, u32 command);
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
- u32 addr, u32 len);
+
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
u32 data_hi, u32 data_lo, int common);
void bnx2x_update_coalesce(struct bnx2x *bp);
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
int wait)
@@ -957,6 +1347,40 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
return val;
}
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ do { \
+ x = pci_alloc_consistent(bp->pdev, size, y); \
+ if (x) \
+ memset(x, 0, size); \
+ } while (0)
+
+#define BNX2X_ILT_FREE(x, y, size) \
+ do { \
+ if (x) { \
+ pci_free_consistent(bp->pdev, size, x, y); \
+ x = NULL; \
+ y = 0; \
+ } \
+ } while (0)
+
+#define ILOG2(x) (ilog2((x)))
+
+#define ILT_NUM_PAGE_ENTRIES (3072)
+/* In 57710/11 we use whole table since we have 8 func
+ * In 57712 we have only 4 func, but use same size per func, then only half of
+ * the table in use
+ */
+#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)
+
+#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
+/*
+ * the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
/* load/unload mode */
#define LOAD_NORMAL 0
@@ -964,18 +1388,44 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define LOAD_DIAG 2
#define UNLOAD_NORMAL 0
#define UNLOAD_CLOSE 1
-#define UNLOAD_RECOVERY 2
+#define UNLOAD_RECOVERY 2
/* DMAE command defines */
-#define DMAE_CMD_SRC_PCI 0
-#define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC
+#define DMAE_TIMEOUT -1
+#define DMAE_PCI_ERROR -2 /* E2 and onward */
+#define DMAE_NOT_RDY -3
+#define DMAE_PCI_ERR_FLAG 0x80000000
+
+#define DMAE_SRC_PCI 0
+#define DMAE_SRC_GRC 1
+
+#define DMAE_DST_NONE 0
+#define DMAE_DST_PCI 1
+#define DMAE_DST_GRC 2
+
+#define DMAE_COMP_PCI 0
+#define DMAE_COMP_GRC 1
+
+/* E2 and onward - PCI error handling in the completion */
+
+#define DMAE_COMP_REGULAR 0
+#define DMAE_COM_SET_ERR 1
-#define DMAE_CMD_DST_PCI (1 << DMAE_COMMAND_DST_SHIFT)
-#define DMAE_CMD_DST_GRC (2 << DMAE_COMMAND_DST_SHIFT)
+#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \
+ DMAE_COMMAND_SRC_SHIFT)
+#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \
+ DMAE_COMMAND_SRC_SHIFT)
-#define DMAE_CMD_C_DST_PCI 0
-#define DMAE_CMD_C_DST_GRC (1 << DMAE_COMMAND_C_DST_SHIFT)
+#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \
+ DMAE_COMMAND_DST_SHIFT)
+#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \
+ DMAE_COMMAND_DST_SHIFT)
+
+#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \
+ DMAE_COMMAND_C_DST_SHIFT)
+#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \
+ DMAE_COMMAND_C_DST_SHIFT)
#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
@@ -991,10 +1441,20 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
+#define DMAE_SRC_PF 0
+#define DMAE_SRC_VF 1
+
+#define DMAE_DST_PF 0
+#define DMAE_DST_VF 1
+
+#define DMAE_C_SRC 0
+#define DMAE_C_DST 1
+
#define DMAE_LEN32_RD_MAX 0x80
#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
-#define DMAE_COMP_VAL 0xe0d0d0ae
+#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
+ indicates eror */
#define MAX_DMAE_C_PER_PORT 8
#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
@@ -1002,7 +1462,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
E1HVN_MAX)
-
/* PCIE link and speed */
#define PCICFG_LINK_WIDTH 0x1f00000
#define PCICFG_LINK_WIDTH_SHIFT 20
@@ -1031,7 +1490,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
-#define BNX2X_BTR 1
+#define BNX2X_BTR 4
#define MAX_SPQ_PENDING 8
@@ -1148,20 +1607,26 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
#define MULTI_MASK 0x7f
+#define BNX2X_SP_DSB_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_DEF_CONS])
-#define DEF_USB_FUNC_OFF (2 + 2*HC_USTORM_DEF_SB_NUM_INDICES)
-#define DEF_CSB_FUNC_OFF (2 + 2*HC_CSTORM_DEF_SB_NUM_INDICES)
-#define DEF_XSB_FUNC_OFF (2 + 2*HC_XSTORM_DEF_SB_NUM_INDICES)
-#define DEF_TSB_FUNC_OFF (2 + 2*HC_TSTORM_DEF_SB_NUM_INDICES)
-
-#define C_DEF_SB_SP_INDEX HC_INDEX_DEF_C_ETH_SLOW_PATH
+#define SET_FLAG(value, mask, flag) \
+ do {\
+ (value) &= ~(mask);\
+ (value) |= ((flag) << (mask##_SHIFT));\
+ } while (0)
-#define BNX2X_SP_DSB_INDEX \
-(&bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX])
+#define GET_FLAG(value, mask) \
+ (((value) &= (mask)) >> (mask##_SHIFT))
+#define GET_FIELD(value, fname) \
+ (((value) & (fname##_MASK)) >> (fname##_SHIFT))
#define CAM_IS_INVALID(x) \
-(x.target_table_entry.flags == TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
+ (GET_FLAG(x.flags, \
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
+ (T_ETH_MAC_COMMAND_INVALIDATE))
#define CAM_INVALIDATE(x) \
(x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
@@ -1177,21 +1642,29 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
#endif
+#ifndef ETH_MAX_RX_CLIENTS_E2
+#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
+#endif
+
#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
+/* Congestion management fairness mode */
+#define CMNG_FNS_NONE 0
+#define CMNG_FNS_MINMAX 1
+
+#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
+#define HC_SEG_ACCESS_ATTN 4
+#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/
+
#ifdef BNX2X_MAIN
#define BNX2X_EXTERN
#else
#define BNX2X_EXTERN extern
#endif
-BNX2X_EXTERN int load_count[3]; /* 0-common, 1-port0, 2-port1 */
-
-/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
+BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
-void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
-
#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 02bf710629a..97ef674dcc3 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -15,18 +15,19 @@
*
*/
-
#include <linux/etherdevice.h>
#include <linux/ip.h>
-#include <linux/ipv6.h>
+#include <net/ipv6.h>
#include <net/ip6_checksum.h>
+#include <linux/firmware.h>
#include "bnx2x_cmn.h"
#ifdef BCM_VLAN
#include <linux/if_vlan.h>
#endif
-static int bnx2x_poll(struct napi_struct *napi, int budget);
+#include "bnx2x_init.h"
+
/* free skb in the packet ring at pos idx
* return idx of last bd freed
@@ -51,7 +52,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
- BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
+ BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
#ifdef BNX2X_STOP_ON_ERROR
@@ -115,16 +116,10 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
pkt_cons = TX_BD(sw_cons);
- /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
-
- DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
- hw_cons, sw_cons, pkt_cons);
+ DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
+ " pkt_cons %u\n",
+ fp->index, hw_cons, sw_cons, pkt_cons);
-/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
- rmb();
- prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
- }
-*/
bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
sw_cons++;
}
@@ -140,7 +135,6 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
*/
smp_mb();
- /* TBD need a thresh? */
if (unlikely(netif_tx_queue_stopped(txq))) {
/* Taking tx_lock() is needed to prevent reenabling the queue
* while it's empty. This could have happen if rx_action() gets
@@ -189,14 +183,16 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
/* First mark all used pages */
for (i = 0; i < sge_len; i++)
- SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
+ SGE_MASK_CLEAR_BIT(fp,
+ RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
- sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
+ sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
/* Here we assume that the last SGE index is the biggest */
prefetch((void *)(fp->sge_mask));
- bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
+ bnx2x_update_last_max_sge(fp,
+ le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
last_max = RX_SGE(fp->last_max_sge);
last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
@@ -297,7 +293,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Run through the SGL and compose the fragmented skb */
for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
- u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
+ u16 sge_idx =
+ RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
/* FW gives the indices of the SGE as if the ring is an array
(meaning that "next" element will consume 2 indices) */
@@ -358,7 +355,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
#endif
prefetch(skb);
- prefetch(((char *)(skb)) + 128);
+ prefetch(((char *)(skb)) + L1_CACHE_BYTES);
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > bp->rx_buf_size) {
@@ -393,8 +390,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (!bnx2x_fill_frag_skb(bp, fp, skb,
&cqe->fast_path_cqe, cqe_idx)) {
#ifdef BCM_VLAN
- if ((bp->vlgrp != NULL) && is_vlan_cqe &&
- (!is_not_hwaccel_vlan_cqe))
+ if ((bp->vlgrp != NULL) &&
+ (le16_to_cpu(cqe->fast_path_cqe.
+ pars_flags.flags) & PARSING_FLAGS_VLAN))
vlan_gro_receive(&fp->napi, bp->vlgrp,
le16_to_cpu(cqe->fast_path_cqe.
vlan_tag), skb);
@@ -560,7 +558,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
dma_unmap_addr(rx_buf, mapping),
pad + RX_COPY_THRESH,
DMA_FROM_DEVICE);
- prefetch(((char *)(skb)) + 128);
+ prefetch(((char *)(skb)) + L1_CACHE_BYTES);
/* is this an error packet? */
if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
@@ -594,7 +592,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
skb_reserve(new_skb, pad);
skb_put(new_skb, len);
- bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
+ bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
skb = new_skb;
@@ -613,7 +611,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
"of alloc failure\n");
fp->eth_q_stats.rx_skb_alloc_failed++;
reuse_rx:
- bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
+ bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
goto next_rx;
}
@@ -622,7 +620,8 @@ reuse_rx:
/* Set Toeplitz hash for a none-LRO skb */
bnx2x_set_skb_rxhash(bp, cqe, skb);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
if (bp->rx_csum) {
if (likely(BNX2X_RX_CSUM_OK(cqe)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -685,9 +684,10 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
return IRQ_HANDLED;
}
- DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
- fp->index, fp->sb_id);
- bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
+ DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
+ "[fp %d fw_sd %d igusb %d]\n",
+ fp->index, fp->fw_sb_id, fp->igu_sb_id);
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -697,14 +697,12 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
/* Handle Rx and Tx according to MSI-X vector */
prefetch(fp->rx_cons_sb);
prefetch(fp->tx_cons_sb);
- prefetch(&fp->status_blk->u_status_block.status_block_index);
- prefetch(&fp->status_blk->c_status_block.status_block_index);
+ prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
return IRQ_HANDLED;
}
-
/* HW Lock for shared dual port PHYs */
void bnx2x_acquire_phy_lock(struct bnx2x *bp)
{
@@ -738,12 +736,13 @@ void bnx2x_link_report(struct bnx2x *bp)
netdev_info(bp->dev, "NIC Link is Up, ");
line_speed = bp->link_vars.line_speed;
- if (IS_E1HMF(bp)) {
+ if (IS_MF(bp)) {
u16 vn_max_rate;
vn_max_rate =
- ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+ ((bp->mf_config[BP_VN(bp)] &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
if (vn_max_rate < line_speed)
line_speed = vn_max_rate;
}
@@ -773,23 +772,73 @@ void bnx2x_link_report(struct bnx2x *bp)
}
}
+/* Returns the number of actually allocated BDs */
+static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
+ int rx_ring_size)
+{
+ struct bnx2x *bp = fp->bp;
+ u16 ring_prod, cqe_ring_prod;
+ int i;
+
+ fp->rx_comp_cons = 0;
+ cqe_ring_prod = ring_prod = 0;
+ for (i = 0; i < rx_ring_size; i++) {
+ if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+ BNX2X_ERR("was only able to allocate "
+ "%d rx skbs on queue[%d]\n", i, fp->index);
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+ break;
+ }
+ ring_prod = NEXT_RX_IDX(ring_prod);
+ cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
+ WARN_ON(ring_prod <= i);
+ }
+
+ fp->rx_bd_prod = ring_prod;
+ /* Limit the CQE producer by the CQE ring size */
+ fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
+ cqe_ring_prod);
+ fp->rx_pkt = fp->rx_calls = 0;
+
+ return i;
+}
+
+static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+ int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
+ MAX_RX_AVAIL/bp->num_queues;
+
+ rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
+
+ bnx2x_alloc_rx_bds(fp, rx_ring_size);
+
+ /* Warning!
+ * this will generate an interrupt (to the TSTORM)
+ * must only be done after chip is initialized
+ */
+ bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
+ fp->rx_sge_prod);
+}
+
void bnx2x_init_rx_rings(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
ETH_MAX_AGGREGATION_QUEUES_E1H;
- u16 ring_prod, cqe_ring_prod;
+ u16 ring_prod;
int i, j;
- bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
+ bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
+ BNX2X_FW_IP_HDR_ALIGN_PAD;
+
DP(NETIF_MSG_IFUP,
"mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
- if (bp->flags & TPA_ENABLE_FLAG) {
-
- for_each_queue(bp, j) {
- struct bnx2x_fastpath *fp = &bp->fp[j];
+ for_each_queue(bp, j) {
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+ if (!fp->disable_tpa) {
for (i = 0; i < max_agg_queues; i++) {
fp->tpa_pool[i].skb =
netdev_alloc_skb(bp->dev, bp->rx_buf_size);
@@ -807,6 +856,35 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
mapping, 0);
fp->tpa_state[i] = BNX2X_TPA_STOP;
}
+
+ /* "next page" elements initialization */
+ bnx2x_set_next_page_sgl(fp);
+
+ /* set SGEs bit mask */
+ bnx2x_init_sge_ring_bit_mask(fp);
+
+ /* Allocate SGEs and initialize the ring elements */
+ for (i = 0, ring_prod = 0;
+ i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
+
+ if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
+ BNX2X_ERR("was only able to allocate "
+ "%d rx sges\n", i);
+ BNX2X_ERR("disabling TPA for"
+ " queue[%d]\n", j);
+ /* Cleanup already allocated elements */
+ bnx2x_free_rx_sge_range(bp,
+ fp, ring_prod);
+ bnx2x_free_tpa_pool(bp,
+ fp, max_agg_queues);
+ fp->disable_tpa = 1;
+ ring_prod = 0;
+ break;
+ }
+ ring_prod = NEXT_SGE_IDX(ring_prod);
+ }
+
+ fp->rx_sge_prod = ring_prod;
}
}
@@ -814,109 +892,29 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
struct bnx2x_fastpath *fp = &bp->fp[j];
fp->rx_bd_cons = 0;
- fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
- fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
-
- /* "next page" elements initialization */
- /* SGE ring */
- for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
- struct eth_rx_sge *sge;
-
- sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
- sge->addr_hi =
- cpu_to_le32(U64_HI(fp->rx_sge_mapping +
- BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
- sge->addr_lo =
- cpu_to_le32(U64_LO(fp->rx_sge_mapping +
- BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
- }
- bnx2x_init_sge_ring_bit_mask(fp);
-
- /* RX BD ring */
- for (i = 1; i <= NUM_RX_RINGS; i++) {
- struct eth_rx_bd *rx_bd;
-
- rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
- rx_bd->addr_hi =
- cpu_to_le32(U64_HI(fp->rx_desc_mapping +
- BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
- rx_bd->addr_lo =
- cpu_to_le32(U64_LO(fp->rx_desc_mapping +
- BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
- }
+ bnx2x_set_next_page_rx_bd(fp);
/* CQ ring */
- for (i = 1; i <= NUM_RCQ_RINGS; i++) {
- struct eth_rx_cqe_next_page *nextpg;
-
- nextpg = (struct eth_rx_cqe_next_page *)
- &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
- nextpg->addr_hi =
- cpu_to_le32(U64_HI(fp->rx_comp_mapping +
- BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
- nextpg->addr_lo =
- cpu_to_le32(U64_LO(fp->rx_comp_mapping +
- BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
- }
-
- /* Allocate SGEs and initialize the ring elements */
- for (i = 0, ring_prod = 0;
- i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
-
- if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
- BNX2X_ERR("was only able to allocate "
- "%d rx sges\n", i);
- BNX2X_ERR("disabling TPA for queue[%d]\n", j);
- /* Cleanup already allocated elements */
- bnx2x_free_rx_sge_range(bp, fp, ring_prod);
- bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
- fp->disable_tpa = 1;
- ring_prod = 0;
- break;
- }
- ring_prod = NEXT_SGE_IDX(ring_prod);
- }
- fp->rx_sge_prod = ring_prod;
+ bnx2x_set_next_page_rx_cq(fp);
/* Allocate BDs and initialize BD ring */
- fp->rx_comp_cons = 0;
- cqe_ring_prod = ring_prod = 0;
- for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
- BNX2X_ERR("was only able to allocate "
- "%d rx skbs on queue[%d]\n", i, j);
- fp->eth_q_stats.rx_skb_alloc_failed++;
- break;
- }
- ring_prod = NEXT_RX_IDX(ring_prod);
- cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
- WARN_ON(ring_prod <= i);
- }
-
- fp->rx_bd_prod = ring_prod;
- /* must not have more available CQEs than BDs */
- fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
- cqe_ring_prod);
- fp->rx_pkt = fp->rx_calls = 0;
+ bnx2x_alloc_rx_bd_ring(fp);
- /* Warning!
- * this will generate an interrupt (to the TSTORM)
- * must only be done after chip is initialized
- */
- bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
- fp->rx_sge_prod);
if (j != 0)
continue;
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
- U64_LO(fp->rx_comp_mapping));
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
- U64_HI(fp->rx_comp_mapping));
+ if (!CHIP_IS_E2(bp)) {
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
+ U64_LO(fp->rx_comp_mapping));
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
+ U64_HI(fp->rx_comp_mapping));
+ }
}
}
+
static void bnx2x_free_tx_skbs(struct bnx2x *bp)
{
int i;
@@ -989,55 +987,49 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
}
}
-void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
+void bnx2x_free_irq(struct bnx2x *bp)
{
- if (bp->flags & USING_MSIX_FLAG) {
- if (!disable_only)
- bnx2x_free_msix_irqs(bp);
- pci_disable_msix(bp->pdev);
- bp->flags &= ~USING_MSIX_FLAG;
-
- } else if (bp->flags & USING_MSI_FLAG) {
- if (!disable_only)
- free_irq(bp->pdev->irq, bp->dev);
- pci_disable_msi(bp->pdev);
- bp->flags &= ~USING_MSI_FLAG;
-
- } else if (!disable_only)
+ if (bp->flags & USING_MSIX_FLAG)
+ bnx2x_free_msix_irqs(bp);
+ else if (bp->flags & USING_MSI_FLAG)
+ free_irq(bp->pdev->irq, bp->dev);
+ else
free_irq(bp->pdev->irq, bp->dev);
}
-static int bnx2x_enable_msix(struct bnx2x *bp)
+int bnx2x_enable_msix(struct bnx2x *bp)
{
- int i, rc, offset = 1;
- int igu_vec = 0;
+ int msix_vec = 0, i, rc, req_cnt;
- bp->msix_table[0].entry = igu_vec;
- DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
+ bp->msix_table[msix_vec].entry = msix_vec;
+ DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
+ bp->msix_table[0].entry);
+ msix_vec++;
#ifdef BCM_CNIC
- igu_vec = BP_L_ID(bp) + offset;
- bp->msix_table[1].entry = igu_vec;
- DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
- offset++;
+ bp->msix_table[msix_vec].entry = msix_vec;
+ DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
+ bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
+ msix_vec++;
#endif
for_each_queue(bp, i) {
- igu_vec = BP_L_ID(bp) + offset + i;
- bp->msix_table[i + offset].entry = igu_vec;
+ bp->msix_table[msix_vec].entry = msix_vec;
DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
- "(fastpath #%u)\n", i + offset, igu_vec, i);
+ "(fastpath #%u)\n", msix_vec, msix_vec, i);
+ msix_vec++;
}
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
- BNX2X_NUM_QUEUES(bp) + offset);
+ req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
+
+ rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
/*
* reconfigure number of tx/rx queues according to available
* MSI-X vectors
*/
if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
- /* vectors available for FP */
- int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
+ /* how less vectors we will have? */
+ int diff = req_cnt - rc;
DP(NETIF_MSG_IFUP,
"Trying to use less MSI-X vectors: %d\n", rc);
@@ -1049,12 +1041,17 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
"MSI-X is not attainable rc %d\n", rc);
return rc;
}
-
- bp->num_queues = min(bp->num_queues, fp_vec);
+ /*
+ * decrease number of queues by number of unallocated entries
+ */
+ bp->num_queues -= diff;
DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
bp->num_queues);
} else if (rc) {
+ /* fall to INTx if not enough memory */
+ if (rc == -ENOMEM)
+ bp->flags |= DISABLE_MSI_FLAG;
DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
return rc;
}
@@ -1083,7 +1080,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
bp->dev->name, i);
- rc = request_irq(bp->msix_table[i + offset].vector,
+ rc = request_irq(bp->msix_table[offset].vector,
bnx2x_msix_fp_int, 0, fp->name, fp);
if (rc) {
BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
@@ -1091,10 +1088,12 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
return -EBUSY;
}
+ offset++;
fp->state = BNX2X_FP_STATE_IRQ;
}
i = BNX2X_NUM_QUEUES(bp);
+ offset = 1 + CNIC_CONTEXT_USE;
netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
" ... fp[%d] %d\n",
bp->msix_table[0].vector,
@@ -1104,7 +1103,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
return 0;
}
-static int bnx2x_enable_msi(struct bnx2x *bp)
+int bnx2x_enable_msi(struct bnx2x *bp)
{
int rc;
@@ -1175,35 +1174,29 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
bnx2x_napi_disable(bp);
netif_tx_disable(bp->dev);
}
-static int bnx2x_set_num_queues(struct bnx2x *bp)
-{
- int rc = 0;
- switch (bp->int_mode) {
- case INT_MODE_INTx:
- case INT_MODE_MSI:
+void bnx2x_set_num_queues(struct bnx2x *bp)
+{
+ switch (bp->multi_mode) {
+ case ETH_RSS_MODE_DISABLED:
bp->num_queues = 1;
- DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
break;
- default:
- /* Set number of queues according to bp->multi_mode value */
- bnx2x_set_num_queues_msix(bp);
-
- DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
- bp->num_queues);
+ case ETH_RSS_MODE_REGULAR:
+ bp->num_queues = bnx2x_calc_num_queues(bp);
+ break;
- /* if we can't use MSI-X we only need one fp,
- * so try to enable MSI-X with the requested number of fp's
- * and fallback to MSI or legacy INTx with one fp
- */
- rc = bnx2x_enable_msix(bp);
- if (rc)
- /* failed to enable MSI-X */
- bp->num_queues = 1;
+ default:
+ bp->num_queues = 1;
break;
}
- bp->dev->real_num_tx_queues = bp->num_queues;
- return rc;
+}
+
+static void bnx2x_release_firmware(struct bnx2x *bp)
+{
+ kfree(bp->init_ops_offsets);
+ kfree(bp->init_ops);
+ kfree(bp->init_data);
+ release_firmware(bp->firmware);
}
/* must be called with rtnl_lock */
@@ -1212,6 +1205,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
u32 load_code;
int i, rc;
+ /* Set init arrays */
+ rc = bnx2x_init_firmware(bp);
+ if (rc) {
+ BNX2X_ERR("Error loading firmware\n");
+ return rc;
+ }
+
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
return -EPERM;
@@ -1219,83 +1219,64 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
- rc = bnx2x_set_num_queues(bp);
+ /* must be called before memory allocation and HW init */
+ bnx2x_ilt_set_info(bp);
- if (bnx2x_alloc_mem(bp)) {
- bnx2x_free_irq(bp, true);
+ if (bnx2x_alloc_mem(bp))
return -ENOMEM;
+
+ netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
+ rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
+ if (rc) {
+ BNX2X_ERR("Unable to update real_num_rx_queues\n");
+ goto load_error0;
}
for_each_queue(bp, i)
bnx2x_fp(bp, i, disable_tpa) =
((bp->flags & TPA_ENABLE_FLAG) == 0);
- for_each_queue(bp, i)
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, 128);
-
bnx2x_napi_enable(bp);
- if (bp->flags & USING_MSIX_FLAG) {
- rc = bnx2x_req_msix_irqs(bp);
- if (rc) {
- bnx2x_free_irq(bp, true);
- goto load_error1;
- }
- } else {
- /* Fall to INTx if failed to enable MSI-X due to lack of
- memory (in bnx2x_set_num_queues()) */
- if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
- bnx2x_enable_msi(bp);
- bnx2x_ack_int(bp);
- rc = bnx2x_req_irq(bp);
- if (rc) {
- BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
- bnx2x_free_irq(bp, true);
- goto load_error1;
- }
- if (bp->flags & USING_MSI_FLAG) {
- bp->dev->irq = bp->pdev->irq;
- netdev_info(bp->dev, "using MSI IRQ %d\n",
- bp->pdev->irq);
- }
- }
-
/* Send LOAD_REQUEST command to MCP
Returns the type of LOAD command:
if it is the first port to be initialized
common blocks should be initialized, otherwise - not
*/
if (!BP_NOMCP(bp)) {
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
if (!load_code) {
BNX2X_ERR("MCP response failure, aborting\n");
rc = -EBUSY;
- goto load_error2;
+ goto load_error1;
}
if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
rc = -EBUSY; /* other port in diagnostic mode */
- goto load_error2;
+ goto load_error1;
}
} else {
+ int path = BP_PATH(bp);
int port = BP_PORT(bp);
- DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
- load_count[0], load_count[1], load_count[2]);
- load_count[0]++;
- load_count[1 + port]++;
- DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
- load_count[0], load_count[1], load_count[2]);
- if (load_count[0] == 1)
+ DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ load_count[path][0]++;
+ load_count[path][1 + port]++;
+ DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ if (load_count[path][0] == 1)
load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
- else if (load_count[1 + port] == 1)
+ else if (load_count[path][1 + port] == 1)
load_code = FW_MSG_CODE_DRV_LOAD_PORT;
else
load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
}
if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
(load_code == FW_MSG_CODE_DRV_LOAD_PORT))
bp->port.pmf = 1;
else
@@ -1306,16 +1287,24 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_init_hw(bp, load_code);
if (rc) {
BNX2X_ERR("HW init failed, aborting\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ goto load_error2;
+ }
+
+ /* Connect to IRQs */
+ rc = bnx2x_setup_irqs(bp);
+ if (rc) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
goto load_error2;
}
/* Setup NIC internals and enable interrupts */
bnx2x_nic_init(bp, load_code);
- if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
+ if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
(bp->common.shmem2_base))
SHMEM2_WR(bp, dcc_support,
(SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
@@ -1323,7 +1312,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Send LOAD_DONE command to MCP */
if (!BP_NOMCP(bp)) {
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
if (!load_code) {
BNX2X_ERR("MCP response failure, aborting\n");
rc = -EBUSY;
@@ -1333,7 +1322,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
- rc = bnx2x_setup_leading(bp);
+ rc = bnx2x_func_start(bp);
+ if (rc) {
+ BNX2X_ERR("Function start failed!\n");
+#ifndef BNX2X_STOP_ON_ERROR
+ goto load_error3;
+#else
+ bp->panic = 1;
+ return -EBUSY;
+#endif
+ }
+
+ rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
if (rc) {
BNX2X_ERR("Setup leading failed!\n");
#ifndef BNX2X_STOP_ON_ERROR
@@ -1344,62 +1344,47 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
#endif
}
- if (CHIP_IS_E1H(bp))
- if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
- DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
- bp->flags |= MF_FUNC_DIS;
- }
+ if (!CHIP_IS_E1(bp) &&
+ (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
+ DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
+ bp->flags |= MF_FUNC_DIS;
+ }
- if (bp->state == BNX2X_STATE_OPEN) {
-#ifdef BCM_CNIC
- /* Enable Timer scan */
- REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
-#endif
- for_each_nondefault_queue(bp, i) {
- rc = bnx2x_setup_multi(bp, i);
- if (rc)
#ifdef BCM_CNIC
- goto load_error4;
-#else
- goto load_error3;
+ /* Enable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
#endif
- }
- if (CHIP_IS_E1(bp))
- bnx2x_set_eth_mac_addr_e1(bp, 1);
- else
- bnx2x_set_eth_mac_addr_e1h(bp, 1);
+ for_each_nondefault_queue(bp, i) {
+ rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
+ if (rc)
#ifdef BCM_CNIC
- /* Set iSCSI L2 MAC */
- mutex_lock(&bp->cnic_mutex);
- if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
- bnx2x_set_iscsi_eth_mac_addr(bp, 1);
- bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
- bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
- CNIC_SB_ID(bp));
- }
- mutex_unlock(&bp->cnic_mutex);
+ goto load_error4;
+#else
+ goto load_error3;
#endif
}
+ /* Now when Clients are configured we are ready to work */
+ bp->state = BNX2X_STATE_OPEN;
+
+ bnx2x_set_eth_mac(bp, 1);
+
if (bp->port.pmf)
bnx2x_initial_phy_init(bp, load_mode);
/* Start fast path */
switch (load_mode) {
case LOAD_NORMAL:
- if (bp->state == BNX2X_STATE_OPEN) {
- /* Tx queue should be only reenabled */
- netif_tx_wake_all_queues(bp->dev);
- }
+ /* Tx queue should be only reenabled */
+ netif_tx_wake_all_queues(bp->dev);
/* Initialize the receive filter. */
bnx2x_set_rx_mode(bp->dev);
break;
case LOAD_OPEN:
netif_tx_start_all_queues(bp->dev);
- if (bp->state != BNX2X_STATE_OPEN)
- netif_tx_disable(bp->dev);
+ smp_mb__after_clear_bit();
/* Initialize the receive filter. */
bnx2x_set_rx_mode(bp->dev);
break;
@@ -1427,6 +1412,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
#endif
bnx2x_inc_load_cnt(bp);
+ bnx2x_release_firmware(bp);
+
return 0;
#ifdef BCM_CNIC
@@ -1436,24 +1423,28 @@ load_error4:
#endif
load_error3:
bnx2x_int_disable_sync(bp, 1);
- if (!BP_NOMCP(bp)) {
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
- }
- bp->port.pmf = 0;
+
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
for_each_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-load_error2:
+
/* Release IRQs */
- bnx2x_free_irq(bp, false);
+ bnx2x_free_irq(bp);
+load_error2:
+ if (!BP_NOMCP(bp)) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ }
+
+ bp->port.pmf = 0;
load_error1:
bnx2x_napi_disable(bp);
- for_each_queue(bp, i)
- netif_napi_del(&bnx2x_fp(bp, i, napi));
+load_error0:
bnx2x_free_mem(bp);
+ bnx2x_release_firmware(bp);
+
return rc;
}
@@ -1481,21 +1472,26 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bp->rx_mode = BNX2X_RX_MODE_NONE;
bnx2x_set_storm_rx_mode(bp);
- /* Disable HW interrupts, NAPI and Tx */
- bnx2x_netif_stop(bp, 1);
- netif_carrier_off(bp->dev);
+ /* Stop Tx */
+ bnx2x_tx_disable(bp);
del_timer_sync(&bp->timer);
- SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
+
+ SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
(DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
- /* Release IRQs */
- bnx2x_free_irq(bp, false);
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
/* Cleanup the chip if needed */
if (unload_mode != UNLOAD_RECOVERY)
bnx2x_chip_cleanup(bp, unload_mode);
+ else {
+ /* Disable HW interrupts, NAPI and Tx */
+ bnx2x_netif_stop(bp, 1);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+ }
bp->port.pmf = 0;
@@ -1503,8 +1499,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bnx2x_free_skbs(bp);
for_each_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
- for_each_queue(bp, i)
- netif_napi_del(&bnx2x_fp(bp, i, napi));
+
bnx2x_free_mem(bp);
bp->state = BNX2X_STATE_CLOSED;
@@ -1522,6 +1517,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
return 0;
}
+
int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
{
u16 pmcsr;
@@ -1568,13 +1564,10 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
return 0;
}
-
-
/*
* net_device service functions
*/
-
-static int bnx2x_poll(struct napi_struct *napi, int budget)
+int bnx2x_poll(struct napi_struct *napi, int budget)
{
int work_done = 0;
struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
@@ -1603,27 +1596,28 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
/* Fall out from the NAPI loop if needed */
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
bnx2x_update_fpsb_idx(fp);
- /* bnx2x_has_rx_work() reads the status block, thus we need
- * to ensure that status block indices have been actually read
- * (bnx2x_update_fpsb_idx) prior to this check
- * (bnx2x_has_rx_work) so that we won't write the "newer"
- * value of the status block to IGU (if there was a DMA right
- * after bnx2x_has_rx_work and if there is no rmb, the memory
- * reading (bnx2x_update_fpsb_idx) may be postponed to right
- * before bnx2x_ack_sb). In this case there will never be
- * another interrupt until there is another update of the
- * status block, while there is still unhandled work.
- */
+ /* bnx2x_has_rx_work() reads the status block,
+ * thus we need to ensure that status block indices
+ * have been actually read (bnx2x_update_fpsb_idx)
+ * prior to this check (bnx2x_has_rx_work) so that
+ * we won't write the "newer" value of the status block
+ * to IGU (if there was a DMA right after
+ * bnx2x_has_rx_work and if there is no rmb, the memory
+ * reading (bnx2x_update_fpsb_idx) may be postponed
+ * to right before bnx2x_ack_sb). In this case there
+ * will never be another interrupt until there is
+ * another update of the status block, while there
+ * is still unhandled work.
+ */
rmb();
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
napi_complete(napi);
/* Re-enable interrupts */
- bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
- le16_to_cpu(fp->fp_c_idx),
- IGU_INT_NOP, 1);
- bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
- le16_to_cpu(fp->fp_u_idx),
+ DP(NETIF_MSG_HW,
+ "Update index to %d\n", fp->fp_hc_idx);
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
+ le16_to_cpu(fp->fp_hc_idx),
IGU_INT_ENABLE, 1);
break;
}
@@ -1633,7 +1627,6 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
return work_done;
}
-
/* we split the first BD into headers and data BDs
* to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs
@@ -1807,6 +1800,122 @@ exit_lbl:
}
#endif
+static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e2 *pbd,
+ u32 xmit_type)
+{
+ pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
+ ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
+ if ((xmit_type & XMIT_GSO_V6) &&
+ (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+}
+
+/**
+ * Update PBD in GSO case.
+ *
+ * @param skb
+ * @param tx_start_bd
+ * @param pbd
+ * @param xmit_type
+ */
+static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
+{
+ pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
+ pbd->tcp_flags = pbd_tcp_flags(skb);
+
+ if (xmit_type & XMIT_GSO_V4) {
+ pbd->ip_id = swab16(ip_hdr(skb)->id);
+ pbd->tcp_pseudo_csum =
+ swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+
+ } else
+ pbd->tcp_pseudo_csum =
+ swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+
+ pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
+}
+
+/**
+ *
+ * @param skb
+ * @param tx_start_bd
+ * @param pbd_e2
+ * @param xmit_type
+ *
+ * @return header len
+ */
+static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_parse_bd_e2 *pbd,
+ u32 xmit_type)
+{
+ pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
+
+ pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
+ skb->data) / 2) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
+
+ return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+}
+
+/**
+ *
+ * @param skb
+ * @param tx_start_bd
+ * @param pbd
+ * @param xmit_type
+ *
+ * @return Header length
+ */
+static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
+{
+ u8 hlen = (skb_network_header(skb) - skb->data) / 2;
+
+ /* for now NS flag is not used in Linux */
+ pbd->global_data =
+ (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
+
+ pbd->ip_hlen_w = (skb_transport_header(skb) -
+ skb_network_header(skb)) / 2;
+
+ hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
+
+ pbd->total_hlen_w = cpu_to_le16(hlen);
+ hlen = hlen*2;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
+
+ } else {
+ s8 fix = SKB_CS_OFF(skb); /* signed! */
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "hlen %d fix %d csum before fix %x\n",
+ le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
+
+ /* HW bug: fixup the CSUM */
+ pbd->tcp_pseudo_csum =
+ bnx2x_csum_fix(skb_transport_header(skb),
+ SKB_CS(skb), fix);
+
+ DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
+ pbd->tcp_pseudo_csum);
+ }
+
+ return hlen;
+}
+
/* called with netif_tx_lock
* bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue()
@@ -1819,7 +1928,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct sw_tx_bd *tx_buf;
struct eth_tx_start_bd *tx_start_bd;
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
- struct eth_tx_parse_bd *pbd = NULL;
+ struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
+ struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
u16 pkt_prod, bd_prod;
int nbd, fp_index;
dma_addr_t mapping;
@@ -1847,9 +1957,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
- " gso type %x xmit_type %x\n",
- skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
+ DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
+ "protocol(%x,%x) gso type %x xmit_type %x\n",
+ fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
eth = (struct ethhdr *)skb->data;
@@ -1895,10 +2005,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- tx_start_bd->general_data = (mac_type <<
- ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+ SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
+ mac_type);
+
/* header nbd */
- tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
+ SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
/* remember the first BD of the packet */
tx_buf->first_bd = fp->tx_bd_prod;
@@ -1912,34 +2023,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef BCM_VLAN
if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
(bp->flags & HW_VLAN_TX_FLAG)) {
- tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
- tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(vlan_tx_tag_get(skb));
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
} else
#endif
- tx_start_bd->vlan = cpu_to_le16(pkt_prod);
+ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
- pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
-
- memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
if (xmit_type & XMIT_CSUM) {
- hlen = (skb_network_header(skb) - skb->data) / 2;
-
- /* for now NS flag is not used in Linux */
- pbd->global_data =
- (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
- ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
-
- pbd->ip_hlen = (skb_transport_header(skb) -
- skb_network_header(skb)) / 2;
-
- hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
-
- pbd->total_hlen = cpu_to_le16(hlen);
- hlen = hlen*2;
-
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
if (xmit_type & XMIT_CSUM_V4)
@@ -1949,31 +2044,32 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->bd_flags.as_bitfield |=
ETH_TX_BD_FLAGS_IPV6;
- if (xmit_type & XMIT_CSUM_TCP) {
- pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
-
- } else {
- s8 fix = SKB_CS_OFF(skb); /* signed! */
-
- pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
-
- DP(NETIF_MSG_TX_QUEUED,
- "hlen %d fix %d csum before fix %x\n",
- le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
+ if (!(xmit_type & XMIT_CSUM_TCP))
+ tx_start_bd->bd_flags.as_bitfield |=
+ ETH_TX_BD_FLAGS_IS_UDP;
+ }
- /* HW bug: fixup the CSUM */
- pbd->tcp_pseudo_csum =
- bnx2x_csum_fix(skb_transport_header(skb),
- SKB_CS(skb), fix);
+ if (CHIP_IS_E2(bp)) {
+ pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
+ memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+ /* Set PBD in checksum offload case */
+ if (xmit_type & XMIT_CSUM)
+ hlen = bnx2x_set_pbd_csum_e2(bp,
+ skb, pbd_e2, xmit_type);
+ } else {
+ pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
+ memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+ /* Set PBD in checksum offload case */
+ if (xmit_type & XMIT_CSUM)
+ hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
- DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
- pbd->tcp_pseudo_csum);
- }
}
+ /* Map skb linear data for DMA */
mapping = dma_map_single(&bp->pdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
+ /* Setup the data pointer of the first BD of the packet */
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
@@ -1985,7 +2081,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
" nbytes %d flags %x vlan %x\n",
tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
- tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
+ tx_start_bd->bd_flags.as_bitfield,
+ le16_to_cpu(tx_start_bd->vlan_or_ethertype));
if (xmit_type & XMIT_GSO) {
@@ -1999,28 +2096,14 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb_headlen(skb) > hlen))
bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
hlen, bd_prod, ++nbd);
-
- pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
- pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
- pbd->tcp_flags = pbd_tcp_flags(skb);
-
- if (xmit_type & XMIT_GSO_V4) {
- pbd->ip_id = swab16(ip_hdr(skb)->id);
- pbd->tcp_pseudo_csum =
- swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0));
-
- } else
- pbd->tcp_pseudo_csum =
- swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0));
-
- pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
+ if (CHIP_IS_E2(bp))
+ bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
+ else
+ bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
}
tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
+ /* Handle fragmented skb */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2057,14 +2140,21 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (total_pkt_bd != NULL)
total_pkt_bd->total_pkt_bytes = pkt_size;
- if (pbd)
+ if (pbd_e1x)
DP(NETIF_MSG_TX_QUEUED,
- "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
+ "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
" tcp_flags %x xsum %x seq %u hlen %u\n",
- pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
- pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
- pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
-
+ pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
+ pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
+ pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
+ le16_to_cpu(pbd_e1x->total_hlen_w));
+ if (pbd_e2)
+ DP(NETIF_MSG_TX_QUEUED,
+ "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
+ pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
+ pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
+ pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
+ pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
/*
@@ -2078,7 +2168,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
fp->tx_db.data.prod += nbd;
barrier();
- DOORBELL(bp, fp->index, fp->tx_db.raw);
+
+ DOORBELL(bp, fp->cid, fp->tx_db.raw);
mmiowb();
@@ -2100,6 +2191,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+
/* called with rtnl_lock */
int bnx2x_change_mac_addr(struct net_device *dev, void *p)
{
@@ -2110,16 +2202,76 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return -EINVAL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- if (netif_running(dev)) {
- if (CHIP_IS_E1(bp))
- bnx2x_set_eth_mac_addr_e1(bp, 1);
- else
- bnx2x_set_eth_mac_addr_e1h(bp, 1);
+ if (netif_running(dev))
+ bnx2x_set_eth_mac(bp, 1);
+
+ return 0;
+}
+
+
+int bnx2x_setup_irqs(struct bnx2x *bp)
+{
+ int rc = 0;
+ if (bp->flags & USING_MSIX_FLAG) {
+ rc = bnx2x_req_msix_irqs(bp);
+ if (rc)
+ return rc;
+ } else {
+ bnx2x_ack_int(bp);
+ rc = bnx2x_req_irq(bp);
+ if (rc) {
+ BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
+ return rc;
+ }
+ if (bp->flags & USING_MSI_FLAG) {
+ bp->dev->irq = bp->pdev->irq;
+ netdev_info(bp->dev, "using MSI IRQ %d\n",
+ bp->pdev->irq);
+ }
}
return 0;
}
+void bnx2x_free_mem_bp(struct bnx2x *bp)
+{
+ kfree(bp->fp);
+ kfree(bp->msix_table);
+ kfree(bp->ilt);
+}
+
+int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
+{
+ struct bnx2x_fastpath *fp;
+ struct msix_entry *tbl;
+ struct bnx2x_ilt *ilt;
+
+ /* fp array */
+ fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
+ if (!fp)
+ goto alloc_err;
+ bp->fp = fp;
+
+ /* msix table */
+ tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
+ GFP_KERNEL);
+ if (!tbl)
+ goto alloc_err;
+ bp->msix_table = tbl;
+
+ /* ilt */
+ ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
+ if (!ilt)
+ goto alloc_err;
+ bp->ilt = ilt;
+
+ return 0;
+alloc_err:
+ bnx2x_free_mem_bp(bp);
+ return -ENOMEM;
+
+}
+
/* called with rtnl_lock */
int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
{
@@ -2169,21 +2321,10 @@ void bnx2x_vlan_rx_register(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
bp->vlgrp = vlgrp;
-
- /* Set flags according to the required capabilities */
- bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
-
- if (dev->features & NETIF_F_HW_VLAN_TX)
- bp->flags |= HW_VLAN_TX_FLAG;
-
- if (dev->features & NETIF_F_HW_VLAN_RX)
- bp->flags |= HW_VLAN_RX_FLAG;
-
- if (netif_running(dev))
- bnx2x_set_client_config(bp);
}
#endif
+
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -2244,6 +2385,8 @@ int bnx2x_resume(struct pci_dev *pdev)
bnx2x_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
+ /* Since the chip was reset, clear the FW sequence number */
+ bp->fw_seq = 0;
rc = bnx2x_nic_load(bp, LOAD_OPEN);
rtnl_unlock();
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index d1979b1a7ed..7f52cec9bb9 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -23,6 +23,7 @@
#include "bnx2x.h"
+extern int num_queues;
/*********************** Interfaces ****************************
* Functions that need to be implemented by each driver version
@@ -49,10 +50,11 @@ void bnx2x_link_set(struct bnx2x *bp);
* Query link status
*
* @param bp
+ * @param is_serdes
*
* @return 0 - link is UP
*/
-u8 bnx2x_link_test(struct bnx2x *bp);
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
/**
* Handles link status change
@@ -62,6 +64,15 @@ u8 bnx2x_link_test(struct bnx2x *bp);
void bnx2x__link_status_update(struct bnx2x *bp);
/**
+ * Report link status to upper layer
+ *
+ * @param bp
+ *
+ * @return int
+ */
+void bnx2x_link_report(struct bnx2x *bp);
+
+/**
* MSI-X slowpath interrupt handler
*
* @param irq
@@ -106,6 +117,13 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
void bnx2x_int_enable(struct bnx2x *bp);
/**
+ * Disable HW interrupts.
+ *
+ * @param bp
+ */
+void bnx2x_int_disable(struct bnx2x *bp);
+
+/**
* Disable interrupts. This function ensures that there are no
* ISRs or SP DPCs (sp_task) are running after it returns.
*
@@ -115,6 +133,15 @@ void bnx2x_int_enable(struct bnx2x *bp);
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
/**
+ * Loads device firmware
+ *
+ * @param bp
+ *
+ * @return int
+ */
+int bnx2x_init_firmware(struct bnx2x *bp);
+
+/**
* Init HW blocks according to current initialization stage:
* COMMON, PORT or FUNCTION.
*
@@ -153,32 +180,35 @@ int bnx2x_alloc_mem(struct bnx2x *bp);
void bnx2x_free_mem(struct bnx2x *bp);
/**
- * Bring up a leading (the first) eth Client.
+ * Setup eth Client.
*
* @param bp
+ * @param fp
+ * @param is_leading
*
* @return int
*/
-int bnx2x_setup_leading(struct bnx2x *bp);
+int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ int is_leading);
/**
- * Setup non-leading eth Client.
+ * Bring down an eth client.
*
* @param bp
- * @param fp
+ * @param p
*
* @return int
*/
-int bnx2x_setup_multi(struct bnx2x *bp, int index);
+int bnx2x_stop_fw_client(struct bnx2x *bp,
+ struct bnx2x_client_ramrod_params *p);
/**
- * Set number of quueus according to mode and number of available
- * msi-x vectors
+ * Set number of queues according to mode
*
* @param bp
*
*/
-void bnx2x_set_num_queues_msix(struct bnx2x *bp);
+void bnx2x_set_num_queues(struct bnx2x *bp);
/**
* Cleanup chip internals:
@@ -213,21 +243,12 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
/**
* Configure eth MAC address in the HW according to the value in
- * netdev->dev_addr for 57711
- *
- * @param bp driver handle
- * @param set
- */
-void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
-
-/**
- * Configure eth MAC address in the HW according to the value in
- * netdev->dev_addr for 57710
+ * netdev->dev_addr.
*
* @param bp driver handle
* @param set
*/
-void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set);
+void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
#ifdef BCM_CNIC
/**
@@ -247,18 +268,22 @@ int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
* Initialize status block in FW and HW
*
* @param bp driver handle
- * @param sb host_status_block
* @param dma_addr_t mapping
* @param int sb_id
+ * @param int vfid
+ * @param u8 vf_valid
+ * @param int fw_sb_id
+ * @param int igu_sb_id
*/
-void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
- dma_addr_t mapping, int sb_id);
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+ u8 vf_valid, int fw_sb_id, int igu_sb_id);
/**
- * Reconfigure FW/HW according to dev->flags rx mode
+ * Set MAC filtering configurations.
*
- * @param dev net_device
+ * @remarks called with netif_tx_lock from dev_mcast.c
*
+ * @param dev net_device
*/
void bnx2x_set_rx_mode(struct net_device *dev);
@@ -280,34 +305,162 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp);
* Perform statistics handling according to event
*
* @param bp driver handle
- * @param even tbnx2x_stats_event
+ * @param event bnx2x_stats_event
*/
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
/**
- * Configures FW with client paramteres (like HW VLAN removal)
- * for each active client.
+ * Handle ramrods completion
+ *
+ * @param fp fastpath handle for the event
+ * @param rr_cqe eth_rx_cqe
+ */
+void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
+
+/**
+ * Init/halt function before/after sending
+ * CLIENT_SETUP/CFC_DEL for the first/last client.
*
* @param bp
+ *
+ * @return int
*/
-void bnx2x_set_client_config(struct bnx2x *bp);
+int bnx2x_func_start(struct bnx2x *bp);
+int bnx2x_func_stop(struct bnx2x *bp);
/**
- * Handle sp events
+ * Prepare ILT configurations according to current driver
+ * parameters.
*
- * @param fp fastpath handle for the event
- * @param rr_cqe eth_rx_cqe
+ * @param bp
+ */
+void bnx2x_ilt_set_info(struct bnx2x *bp);
+
+/**
+ * Set power state to the requested value. Currently only D0 and
+ * D3hot are supported.
+ *
+ * @param bp
+ * @param state D0 or D3hot
+ *
+ * @return int
+ */
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+
+/* dev_close main block */
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
+
+/* dev_open main block */
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
+
+/* hard_xmit callback */
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+int bnx2x_change_mac_addr(struct net_device *dev, void *p);
+
+/* NAPI poll Rx part */
+int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
+
+/* NAPI poll Tx part */
+int bnx2x_tx_int(struct bnx2x_fastpath *fp);
+
+/* suspend/resume callbacks */
+int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
+int bnx2x_resume(struct pci_dev *pdev);
+
+/* Release IRQ vectors */
+void bnx2x_free_irq(struct bnx2x *bp);
+
+void bnx2x_init_rx_rings(struct bnx2x *bp);
+void bnx2x_free_skbs(struct bnx2x *bp);
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
+void bnx2x_netif_start(struct bnx2x *bp);
+
+/**
+ * Fill msix_table, request vectors, update num_queues according
+ * to number of available vectors
+ *
+ * @param bp
+ *
+ * @return int
+ */
+int bnx2x_enable_msix(struct bnx2x *bp);
+
+/**
+ * Request msi mode from OS, updated internals accordingly
+ *
+ * @param bp
+ *
+ * @return int
+ */
+int bnx2x_enable_msi(struct bnx2x *bp);
+
+/**
+ * Request IRQ vectors from OS.
+ *
+ * @param bp
+ *
+ * @return int
+ */
+int bnx2x_setup_irqs(struct bnx2x *bp);
+/**
+ * NAPI callback
+ *
+ * @param napi
+ * @param budget
+ *
+ * @return int
+ */
+int bnx2x_poll(struct napi_struct *napi, int budget);
+
+/**
+ * Allocate/release memories outsize main driver structure
+ *
+ * @param bp
+ *
+ * @return int
+ */
+int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
+void bnx2x_free_mem_bp(struct bnx2x *bp);
+
+/**
+ * Change mtu netdev callback
+ *
+ * @param dev
+ * @param new_mtu
+ *
+ * @return int
+ */
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
+
+/**
+ * tx timeout netdev callback
+ *
+ * @param dev
+ * @param new_mtu
+ *
+ * @return int
+ */
+void bnx2x_tx_timeout(struct net_device *dev);
+
+#ifdef BCM_VLAN
+/**
+ * vlan rx register netdev callback
+ *
+ * @param dev
+ * @param new_mtu
+ *
+ * @return int
*/
-void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
+void bnx2x_vlan_rx_register(struct net_device *dev,
+ struct vlan_group *vlgrp);
+#endif
static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
{
- struct host_status_block *fpsb = fp->status_blk;
-
barrier(); /* status block is written to by the chip */
- fp->fp_c_idx = fpsb->c_status_block.status_block_index;
- fp->fp_u_idx = fpsb->u_status_block.status_block_index;
+ fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
}
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
@@ -334,8 +487,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
wmb();
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
+ REG_WR(bp,
+ BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
((u32 *)&rx_prods)[i]);
mmiowb(); /* keep prod updates ordered */
@@ -345,10 +498,77 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
}
+static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
+ u8 segment, u16 index, u8 op,
+ u8 update, u32 igu_addr)
+{
+ struct igu_regular cmd_data = {0};
+
+ cmd_data.sb_id_and_flags =
+ ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
+ (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+ (update << IGU_REGULAR_BUPDATE_SHIFT) |
+ (op << IGU_REGULAR_ENABLE_INT_SHIFT));
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
+ cmd_data.sb_id_and_flags, igu_addr);
+ REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
+
+ /* Make sure that ACK is written */
+ mmiowb();
+ barrier();
+}
+
+static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
+ u8 idu_sb_id, bool is_Pf)
+{
+ u32 data, ctl, cnt = 100;
+ u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+ u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+ u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
+ u32 sb_bit = 1 << (idu_sb_id%32);
+ u32 func_encode = BP_FUNC(bp) |
+ ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
+ u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
+
+ /* Not supported in BC mode */
+ if (CHIP_INT_MODE_IS_BC(bp))
+ return;
+
+ data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
+ << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
+ IGU_REGULAR_CLEANUP_SET |
+ IGU_REGULAR_BCLEANUP;
+
+ ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
+ func_encode << IGU_CTRL_REG_FID_SHIFT |
+ IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ data, igu_addr_data);
+ REG_WR(bp, igu_addr_data, data);
+ mmiowb();
+ barrier();
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ ctl, igu_addr_ctl);
+ REG_WR(bp, igu_addr_ctl, ctl);
+ mmiowb();
+ barrier();
+
+ /* wait for clean up to finish */
+ while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
+ msleep(20);
-static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
- u8 storm, u16 index, u8 op, u8 update)
+ if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
+ DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
+ "idu_sb_id %d offset %d bit %d (cnt %d)\n",
+ idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
+ }
+}
+
+static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
+ u8 storm, u16 index, u8 op, u8 update)
{
u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
COMMAND_REG_INT_ACK);
@@ -369,7 +589,37 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
mmiowb();
barrier();
}
-static inline u16 bnx2x_ack_int(struct bnx2x *bp)
+
+static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+ u16 index, u8 op, u8 update)
+{
+ u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
+
+ bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
+ igu_addr);
+}
+
+static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
+ u16 index, u8 op, u8 update)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
+ else {
+ u8 segment;
+
+ if (CHIP_INT_MODE_IS_BC(bp))
+ segment = storm;
+ else if (igu_sb_id != bp->igu_dsb_id)
+ segment = IGU_SEG_ACCESS_DEF;
+ else if (storm == ATTENTION_ID)
+ segment = IGU_SEG_ACCESS_ATTN;
+ else
+ segment = IGU_SEG_ACCESS_DEF;
+ bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
+ }
+}
+
+static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
{
u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
COMMAND_REG_SIMD_MASK);
@@ -378,18 +628,36 @@ static inline u16 bnx2x_ack_int(struct bnx2x *bp)
DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
result, hc_addr);
+ barrier();
return result;
}
-/*
- * fast path service functions
- */
+static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
+{
+ u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
+ u32 result = REG_RD(bp, igu_addr);
+
+ DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
+ result, igu_addr);
+
+ barrier();
+ return result;
+}
+
+static inline u16 bnx2x_ack_int(struct bnx2x *bp)
+{
+ barrier();
+ if (bp->common.int_block == INT_BLOCK_HC)
+ return bnx2x_hc_ack_int(bp);
+ else
+ return bnx2x_igu_ack_int(bp);
+}
static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
{
/* Tell compiler that consumer and producer can change */
barrier();
- return (fp->tx_pkt_prod != fp->tx_pkt_cons);
+ return fp->tx_pkt_prod != fp->tx_pkt_cons;
}
static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
@@ -424,6 +692,29 @@ static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
return hw_cons != fp->tx_pkt_cons;
}
+static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
+{
+ u16 rx_cons_sb;
+
+ /* Tell compiler that status block fields can change */
+ barrier();
+ rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
+ if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+ rx_cons_sb++;
+ return (fp->rx_comp_cons != rx_cons_sb);
+}
+
+/**
+ * disables tx from stack point of view
+ *
+ * @param bp
+ */
+static inline void bnx2x_tx_disable(struct bnx2x *bp)
+{
+ netif_tx_disable(bp->dev);
+ netif_carrier_off(bp->dev);
+}
+
static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
struct bnx2x_fastpath *fp, u16 index)
{
@@ -436,7 +727,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
return;
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+ SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
__free_pages(page, PAGES_PER_SGE_SHIFT);
sw_buf->page = NULL;
@@ -444,13 +735,67 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
sge->addr_lo = 0;
}
-static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, int last)
+static inline void bnx2x_add_all_napi(struct bnx2x *bp)
{
int i;
- for (i = 0; i < last; i++)
- bnx2x_free_rx_sge(bp, fp, i);
+ /* Add NAPI objects */
+ for_each_queue(bp, i)
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, BNX2X_NAPI_WEIGHT);
+}
+
+static inline void bnx2x_del_all_napi(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_queue(bp, i)
+ netif_napi_del(&bnx2x_fp(bp, i, napi));
+}
+
+static inline void bnx2x_disable_msi(struct bnx2x *bp)
+{
+ if (bp->flags & USING_MSIX_FLAG) {
+ pci_disable_msix(bp->pdev);
+ bp->flags &= ~USING_MSIX_FLAG;
+ } else if (bp->flags & USING_MSI_FLAG) {
+ pci_disable_msi(bp->pdev);
+ bp->flags &= ~USING_MSI_FLAG;
+ }
+}
+
+static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
+{
+ return num_queues ?
+ min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
+ min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
+}
+
+static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
+{
+ int i, j;
+
+ for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+ int idx = RX_SGE_CNT * i - 1;
+
+ for (j = 0; j < 2; j++) {
+ SGE_MASK_CLEAR_BIT(fp, idx);
+ idx--;
+ }
+ }
+}
+
+static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
+{
+ /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
+ memset(fp->sge_mask, 0xff,
+ (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
+
+ /* Clear the two last indices in the page to 1:
+ these are the indices that correspond to the "next" element,
+ hence will never be indicated and should be removed from
+ the calculations. */
+ bnx2x_clear_sge_mask_next_elems(fp);
}
static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -479,6 +824,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
return 0;
}
+
static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
struct bnx2x_fastpath *fp, u16 index)
{
@@ -513,7 +859,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
* so there is no need to check for dma_mapping_error().
*/
static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
- struct sk_buff *skb, u16 cons, u16 prod)
+ u16 cons, u16 prod)
{
struct bnx2x *bp = fp->bp;
struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
@@ -531,32 +877,15 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
*prod_bd = *cons_bd;
}
-static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
+static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, int last)
{
- int i, j;
-
- for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
- int idx = RX_SGE_CNT * i - 1;
+ int i;
- for (j = 0; j < 2; j++) {
- SGE_MASK_CLEAR_BIT(fp, idx);
- idx--;
- }
- }
+ for (i = 0; i < last; i++)
+ bnx2x_free_rx_sge(bp, fp, i);
}
-static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
-{
- /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
- memset(fp->sge_mask, 0xff,
- (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
-
- /* Clear the two last indices in the page to 1:
- these are the indices that correspond to the "next" element,
- hence will never be indicated and should be removed from
- the calculations. */
- bnx2x_clear_sge_mask_next_elems(fp);
-}
static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
struct bnx2x_fastpath *fp, int last)
{
@@ -582,7 +911,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
}
-static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
+static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
{
int i, j;
@@ -601,7 +930,7 @@ static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
}
- fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
+ SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
fp->tx_db.data.zero_fill1 = 0;
fp->tx_db.data.prod = 0;
@@ -609,44 +938,100 @@ static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
fp->tx_pkt_cons = 0;
fp->tx_bd_prod = 0;
fp->tx_bd_cons = 0;
- fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
fp->tx_pkt = 0;
}
}
-static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
+
+static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
{
- u16 rx_cons_sb;
+ int i;
- /* Tell compiler that status block fields can change */
- barrier();
- rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
- if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
- rx_cons_sb++;
- return (fp->rx_comp_cons != rx_cons_sb);
+ for (i = 1; i <= NUM_RX_RINGS; i++) {
+ struct eth_rx_bd *rx_bd;
+
+ rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
+ rx_bd->addr_hi =
+ cpu_to_le32(U64_HI(fp->rx_desc_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+ rx_bd->addr_lo =
+ cpu_to_le32(U64_LO(fp->rx_desc_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+ }
+}
+
+static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
+{
+ int i;
+
+ for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+ struct eth_rx_sge *sge;
+
+ sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
+ sge->addr_hi =
+ cpu_to_le32(U64_HI(fp->rx_sge_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+
+ sge->addr_lo =
+ cpu_to_le32(U64_LO(fp->rx_sge_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+ }
+}
+
+static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
+{
+ int i;
+ for (i = 1; i <= NUM_RCQ_RINGS; i++) {
+ struct eth_rx_cqe_next_page *nextpg;
+
+ nextpg = (struct eth_rx_cqe_next_page *)
+ &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
+ nextpg->addr_hi =
+ cpu_to_le32(U64_HI(fp->rx_comp_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+ nextpg->addr_lo =
+ cpu_to_le32(U64_LO(fp->rx_comp_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+ }
+}
+
+
+
+static inline void __storm_memset_struct(struct bnx2x *bp,
+ u32 addr, size_t size, u32 *data)
+{
+ int i;
+ for (i = 0; i < size/4; i++)
+ REG_WR(bp, addr + (i * 4), data[i]);
+}
+
+static inline void storm_memset_mac_filters(struct bnx2x *bp,
+ struct tstorm_eth_mac_filter_config *mac_filters,
+ u16 abs_fid)
+{
+ size_t size = sizeof(struct tstorm_eth_mac_filter_config);
+
+ u32 addr = BAR_TSTRORM_INTMEM +
+ TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
+}
+
+static inline void storm_memset_cmng(struct bnx2x *bp,
+ struct cmng_struct_per_port *cmng,
+ u8 port)
+{
+ size_t size = sizeof(struct cmng_struct_per_port);
+
+ u32 addr = BAR_XSTRORM_INTMEM +
+ XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)cmng);
}
/* HW Lock for shared dual port PHYs */
void bnx2x_acquire_phy_lock(struct bnx2x *bp);
void bnx2x_release_phy_lock(struct bnx2x *bp);
-void bnx2x_link_report(struct bnx2x *bp);
-int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
-int bnx2x_tx_int(struct bnx2x_fastpath *fp);
-void bnx2x_init_rx_rings(struct bnx2x *bp);
-netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
-
-int bnx2x_change_mac_addr(struct net_device *dev, void *p);
-void bnx2x_tx_timeout(struct net_device *dev);
-void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
-void bnx2x_netif_start(struct bnx2x *bp);
-void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
-void bnx2x_free_irq(struct bnx2x *bp, bool disable_only);
-int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
-int bnx2x_resume(struct pci_dev *pdev);
-void bnx2x_free_skbs(struct bnx2x *bp);
-int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
-int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
-int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
-int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+#define BNX2X_FW_IP_HDR_ALIGN_PAD 2 /* FW places hdr with this padding */
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index 3bb9a91bb3f..dc18c25ca9e 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -31,14 +31,24 @@ struct dump_sign {
#define RI_E1 0x1
#define RI_E1H 0x2
+#define RI_E2 0x4
#define RI_ONLINE 0x100
-
+#define RI_PATH0_DUMP 0x200
+#define RI_PATH1_DUMP 0x400
#define RI_E1_OFFLINE (RI_E1)
#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
#define RI_E1H_OFFLINE (RI_E1H)
#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
-#define RI_ALL_OFFLINE (RI_E1 | RI_E1H)
-#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
+#define RI_E2_OFFLINE (RI_E2)
+#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
+#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
+#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
+#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
+#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
+#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
+#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
+#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
+#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
#define MAX_TIMER_PENDING 200
#define TIMER_SCAN_DONT_CARE 0xFF
@@ -513,6 +523,12 @@ static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = {
{ 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE }
};
+#define WREGS_COUNT_E2 1
+static const u32 read_reg_e2_0[] = { 0x1b1040, 0x1b1000 };
+
+static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = {
+ { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
+};
static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 };
@@ -531,4 +547,17 @@ static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
{ 0x1640d0, 0x1640d4 };
+#define PAGE_MODE_VALUES_E2 2
+
+#define PAGE_READ_REGS_E2 1
+
+#define PAGE_WRITE_REGS_E2 1
+
+static const u32 page_vals_e2[PAGE_MODE_VALUES_E2] = { 0, 128 };
+
+static const u32 page_write_regs_e2[PAGE_WRITE_REGS_E2] = { 328476 };
+
+static const struct reg_addr page_read_regs_e2[PAGE_READ_REGS_E2] = {
+ { 0x58000, 4608, RI_E2_ONLINE } };
+
#endif /* BNX2X_DUMP_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 8b75b05e34c..54fe0615a8b 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -25,70 +25,46 @@
#include "bnx2x_cmn.h"
#include "bnx2x_dump.h"
-
static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
-
- cmd->supported = bp->port.supported;
- cmd->advertising = bp->port.advertising;
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ /* Dual Media boards present all available port types */
+ cmd->supported = bp->port.supported[cfg_idx] |
+ (bp->port.supported[cfg_idx ^ 1] &
+ (SUPPORTED_TP | SUPPORTED_FIBRE));
+ cmd->advertising = bp->port.advertising[cfg_idx];
if ((bp->state == BNX2X_STATE_OPEN) &&
!(bp->flags & MF_FUNC_DIS) &&
(bp->link_vars.link_up)) {
cmd->speed = bp->link_vars.line_speed;
cmd->duplex = bp->link_vars.duplex;
- if (IS_E1HMF(bp)) {
- u16 vn_max_rate;
-
- vn_max_rate =
- ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
- if (vn_max_rate < cmd->speed)
- cmd->speed = vn_max_rate;
- }
} else {
- cmd->speed = -1;
- cmd->duplex = -1;
- }
- if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
- u32 ext_phy_type =
- XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
-
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- cmd->port = PORT_FIBRE;
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
- cmd->port = PORT_TP;
- break;
+ cmd->speed = bp->link_params.req_line_speed[cfg_idx];
+ cmd->duplex = bp->link_params.req_duplex[cfg_idx];
+ }
+ if (IS_MF(bp)) {
+ u16 vn_max_rate = ((bp->mf_config[BP_VN(bp)] &
+ FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT) *
+ 100;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
- BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
- bp->link_params.ext_phy_config);
- break;
+ if (vn_max_rate < cmd->speed)
+ cmd->speed = vn_max_rate;
+ }
- default:
- DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
- bp->link_params.ext_phy_config);
- break;
- }
- } else
+ if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
cmd->port = PORT_TP;
+ else if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
+ cmd->port = PORT_FIBRE;
+ else
+ BNX2X_ERR("XGXS PHY Failure detected\n");
cmd->phy_address = bp->mdio.prtad;
cmd->transceiver = XCVR_INTERNAL;
- if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
+ if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
cmd->autoneg = AUTONEG_ENABLE;
else
cmd->autoneg = AUTONEG_DISABLE;
@@ -110,9 +86,9 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
- u32 advertising;
+ u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
- if (IS_E1HMF(bp))
+ if (IS_MF(bp))
return 0;
DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
@@ -123,26 +99,81 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+ cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ old_multi_phy_config = bp->link_params.multi_phy_config;
+ switch (cmd->port) {
+ case PORT_TP:
+ if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
+ break; /* no port change */
+
+ if (!(bp->port.supported[0] & SUPPORTED_TP ||
+ bp->port.supported[1] & SUPPORTED_TP)) {
+ DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_FIBRE:
+ if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
+ break; /* no port change */
+
+ if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+ bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ return -EINVAL;
+ }
+ /* Save new config in case command complete successuly */
+ new_multi_phy_config = bp->link_params.multi_phy_config;
+ /* Get the new cfg_idx */
+ cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ /* Restore old config in case command failed */
+ bp->link_params.multi_phy_config = old_multi_phy_config;
+ DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
+
if (cmd->autoneg == AUTONEG_ENABLE) {
- if (!(bp->port.supported & SUPPORTED_Autoneg)) {
+ if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
DP(NETIF_MSG_LINK, "Autoneg not supported\n");
return -EINVAL;
}
/* advertise the requested speed and duplex if supported */
- cmd->advertising &= bp->port.supported;
+ cmd->advertising &= bp->port.supported[cfg_idx];
- bp->link_params.req_line_speed = SPEED_AUTO_NEG;
- bp->link_params.req_duplex = DUPLEX_FULL;
- bp->port.advertising |= (ADVERTISED_Autoneg |
+ bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
+ bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL;
+ bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg |
cmd->advertising);
} else { /* forced speed */
/* advertise the requested speed and duplex if supported */
- switch (cmd->speed) {
+ u32 speed = cmd->speed;
+ speed |= (cmd->speed_hi << 16);
+ switch (speed) {
case SPEED_10:
if (cmd->duplex == DUPLEX_FULL) {
- if (!(bp->port.supported &
+ if (!(bp->port.supported[cfg_idx] &
SUPPORTED_10baseT_Full)) {
DP(NETIF_MSG_LINK,
"10M full not supported\n");
@@ -152,7 +183,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
advertising = (ADVERTISED_10baseT_Full |
ADVERTISED_TP);
} else {
- if (!(bp->port.supported &
+ if (!(bp->port.supported[cfg_idx] &
SUPPORTED_10baseT_Half)) {
DP(NETIF_MSG_LINK,
"10M half not supported\n");
@@ -166,7 +197,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
case SPEED_100:
if (cmd->duplex == DUPLEX_FULL) {
- if (!(bp->port.supported &
+ if (!(bp->port.supported[cfg_idx] &
SUPPORTED_100baseT_Full)) {
DP(NETIF_MSG_LINK,
"100M full not supported\n");
@@ -176,7 +207,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
advertising = (ADVERTISED_100baseT_Full |
ADVERTISED_TP);
} else {
- if (!(bp->port.supported &
+ if (!(bp->port.supported[cfg_idx] &
SUPPORTED_100baseT_Half)) {
DP(NETIF_MSG_LINK,
"100M half not supported\n");
@@ -194,7 +225,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
}
- if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_1000baseT_Full)) {
DP(NETIF_MSG_LINK, "1G full not supported\n");
return -EINVAL;
}
@@ -210,7 +242,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
}
- if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
+ if (!(bp->port.supported[cfg_idx]
+ & SUPPORTED_2500baseX_Full)) {
DP(NETIF_MSG_LINK,
"2.5G full not supported\n");
return -EINVAL;
@@ -226,7 +259,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
}
- if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
+ if (!(bp->port.supported[cfg_idx]
+ & SUPPORTED_10000baseT_Full)) {
DP(NETIF_MSG_LINK, "10G full not supported\n");
return -EINVAL;
}
@@ -236,20 +270,23 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
default:
- DP(NETIF_MSG_LINK, "Unsupported speed\n");
+ DP(NETIF_MSG_LINK, "Unsupported speed %d\n", speed);
return -EINVAL;
}
- bp->link_params.req_line_speed = cmd->speed;
- bp->link_params.req_duplex = cmd->duplex;
- bp->port.advertising = advertising;
+ bp->link_params.req_line_speed[cfg_idx] = speed;
+ bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->port.advertising[cfg_idx] = advertising;
}
DP(NETIF_MSG_LINK, "req_line_speed %d\n"
DP_LEVEL " req_duplex %d advertising 0x%x\n",
- bp->link_params.req_line_speed, bp->link_params.req_duplex,
- bp->port.advertising);
+ bp->link_params.req_line_speed[cfg_idx],
+ bp->link_params.req_duplex[cfg_idx],
+ bp->port.advertising[cfg_idx]);
+ /* Set new config */
+ bp->link_params.multi_phy_config = new_multi_phy_config;
if (netif_running(dev)) {
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
bnx2x_link_set(bp);
@@ -260,6 +297,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
+#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
static int bnx2x_get_regs_len(struct net_device *dev)
{
@@ -277,7 +315,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
regdump_len += wreg_addrs_e1[i].size *
(1 + wreg_addrs_e1[i].read_regs_count);
- } else { /* E1H */
+ } else if (CHIP_IS_E1H(bp)) {
for (i = 0; i < REGS_COUNT; i++)
if (IS_E1H_ONLINE(reg_addrs[i].info))
regdump_len += reg_addrs[i].size;
@@ -286,6 +324,15 @@ static int bnx2x_get_regs_len(struct net_device *dev)
if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
regdump_len += wreg_addrs_e1h[i].size *
(1 + wreg_addrs_e1h[i].read_regs_count);
+ } else if (CHIP_IS_E2(bp)) {
+ for (i = 0; i < REGS_COUNT; i++)
+ if (IS_E2_ONLINE(reg_addrs[i].info))
+ regdump_len += reg_addrs[i].size;
+
+ for (i = 0; i < WREGS_COUNT_E2; i++)
+ if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
+ regdump_len += wreg_addrs_e2[i].size *
+ (1 + wreg_addrs_e2[i].read_regs_count);
}
regdump_len *= 4;
regdump_len += sizeof(struct dump_hdr);
@@ -293,6 +340,23 @@ static int bnx2x_get_regs_len(struct net_device *dev)
return regdump_len;
}
+static inline void bnx2x_read_pages_regs_e2(struct bnx2x *bp, u32 *p)
+{
+ u32 i, j, k, n;
+
+ for (i = 0; i < PAGE_MODE_VALUES_E2; i++) {
+ for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
+ REG_WR(bp, page_write_regs_e2[j], page_vals_e2[i]);
+ for (k = 0; k < PAGE_READ_REGS_E2; k++)
+ if (IS_E2_ONLINE(page_read_regs_e2[k].info))
+ for (n = 0; n <
+ page_read_regs_e2[k].size; n++)
+ *p++ = REG_RD(bp,
+ page_read_regs_e2[k].addr + n*4);
+ }
+ }
+}
+
static void bnx2x_get_regs(struct net_device *dev,
struct ethtool_regs *regs, void *_p)
{
@@ -312,7 +376,14 @@ static void bnx2x_get_regs(struct net_device *dev,
dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
- dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
+
+ if (CHIP_IS_E1(bp))
+ dump_hdr.info = RI_E1_ONLINE;
+ else if (CHIP_IS_E1H(bp))
+ dump_hdr.info = RI_E1H_ONLINE;
+ else if (CHIP_IS_E2(bp))
+ dump_hdr.info = RI_E2_ONLINE |
+ (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
p += dump_hdr.hdr_size + 1;
@@ -324,16 +395,25 @@ static void bnx2x_get_regs(struct net_device *dev,
*p++ = REG_RD(bp,
reg_addrs[i].addr + j*4);
- } else { /* E1H */
+ } else if (CHIP_IS_E1H(bp)) {
for (i = 0; i < REGS_COUNT; i++)
if (IS_E1H_ONLINE(reg_addrs[i].info))
for (j = 0; j < reg_addrs[i].size; j++)
*p++ = REG_RD(bp,
reg_addrs[i].addr + j*4);
+
+ } else if (CHIP_IS_E2(bp)) {
+ for (i = 0; i < REGS_COUNT; i++)
+ if (IS_E2_ONLINE(reg_addrs[i].info))
+ for (j = 0; j < reg_addrs[i].size; j++)
+ *p++ = REG_RD(bp,
+ reg_addrs[i].addr + j*4);
+
+ bnx2x_read_pages_regs_e2(bp, p);
}
}
-#define PHY_FW_VER_LEN 10
+#define PHY_FW_VER_LEN 20
static void bnx2x_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
@@ -436,7 +516,7 @@ static u32 bnx2x_get_link(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- if (bp->flags & MF_FUNC_DIS)
+ if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
return 0;
return bp->link_vars.link_up;
@@ -811,7 +891,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
int port = BP_PORT(bp);
int rc = 0;
-
+ u32 ext_phy_config;
if (!netif_running(dev))
return -EAGAIN;
@@ -827,6 +907,10 @@ static int bnx2x_set_eeprom(struct net_device *dev,
!bp->port.pmf)
return -EINVAL;
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+
if (eeprom->magic == 0x50485950) {
/* 'PHYP' (0x50485950): prepare phy for FW upgrade */
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -834,7 +918,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
bnx2x_acquire_phy_lock(bp);
rc |= bnx2x_link_reset(&bp->link_params,
&bp->link_vars, 0);
- if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
+ if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
MISC_REGISTERS_GPIO_HIGH, port);
@@ -855,10 +939,8 @@ static int bnx2x_set_eeprom(struct net_device *dev,
}
} else if (eeprom->magic == 0x53985943) {
/* 'PHYC' (0x53985943): PHY FW upgrade completed */
- if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
+ if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
- u8 ext_phy_addr =
- XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
/* DSP Remove Download Mode */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
@@ -866,7 +948,8 @@ static int bnx2x_set_eeprom(struct net_device *dev,
bnx2x_acquire_phy_lock(bp);
- bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
+ bnx2x_sfx7101_sp_sw_reset(bp,
+ &bp->link_params.phy[EXT_PHY1]);
/* wait 0.5 sec to allow it to run */
msleep(500);
@@ -879,6 +962,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
return rc;
}
+
static int bnx2x_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
@@ -920,7 +1004,14 @@ static void bnx2x_get_ringparam(struct net_device *dev,
ering->rx_mini_max_pending = 0;
ering->rx_jumbo_max_pending = 0;
- ering->rx_pending = bp->rx_ring_size;
+ if (bp->rx_ring_size)
+ ering->rx_pending = bp->rx_ring_size;
+ else
+ if (bp->state == BNX2X_STATE_OPEN && bp->num_queues)
+ ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
+ else
+ ering->rx_pending = MAX_RX_AVAIL;
+
ering->rx_mini_pending = 0;
ering->rx_jumbo_pending = 0;
@@ -940,6 +1031,7 @@ static int bnx2x_set_ringparam(struct net_device *dev,
}
if ((ering->rx_pending > MAX_RX_AVAIL) ||
+ (ering->rx_pending < MIN_RX_AVAIL) ||
(ering->tx_pending > MAX_TX_AVAIL) ||
(ering->tx_pending <= MAX_SKB_FRAGS + 4))
return -EINVAL;
@@ -959,10 +1051,9 @@ static void bnx2x_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct bnx2x *bp = netdev_priv(dev);
-
- epause->autoneg = (bp->link_params.req_flow_ctrl ==
- BNX2X_FLOW_CTRL_AUTO) &&
- (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
+ BNX2X_FLOW_CTRL_AUTO);
epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
BNX2X_FLOW_CTRL_RX);
@@ -978,37 +1069,39 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct bnx2x *bp = netdev_priv(dev);
-
- if (IS_E1HMF(bp))
+ u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ if (IS_MF(bp))
return 0;
DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
- bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+ bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
if (epause->rx_pause)
- bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
+ bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
if (epause->tx_pause)
- bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
+ bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
- if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
- bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
+ bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
if (epause->autoneg) {
- if (!(bp->port.supported & SUPPORTED_Autoneg)) {
+ if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
DP(NETIF_MSG_LINK, "autoneg not supported\n");
return -EINVAL;
}
- if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
- bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+ if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
+ bp->link_params.req_flow_ctrl[cfg_idx] =
+ BNX2X_FLOW_CTRL_AUTO;
+ }
}
DP(NETIF_MSG_LINK,
- "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
+ "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
if (netif_running(dev)) {
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -1185,6 +1278,9 @@ static int bnx2x_test_registers(struct bnx2x *bp)
for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
u32 offset, mask, save_val, val;
+ if (CHIP_IS_E2(bp) &&
+ reg_tbl[i].offset0 == HC_REG_AGG_INT_0)
+ continue;
offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
mask = reg_tbl[i].mask;
@@ -1192,6 +1288,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
save_val = REG_RD(bp, offset);
REG_WR(bp, offset, (wr_val & mask));
+
val = REG_RD(bp, offset);
/* Restore the original register's value */
@@ -1236,20 +1333,33 @@ static int bnx2x_test_memory(struct bnx2x *bp)
u32 offset;
u32 e1_mask;
u32 e1h_mask;
+ u32 e2_mask;
} prty_tbl[] = {
- { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
- { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
- { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
- { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
- { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
- { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
-
- { NULL, 0xffffffff, 0, 0 }
+ { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0, 0 },
+ { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2, 0 },
+ { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0, 0 },
+ { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0, 0 },
+ { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0, 0 },
+ { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0, 0 },
+
+ { NULL, 0xffffffff, 0, 0, 0 }
};
if (!netif_running(bp->dev))
return rc;
+ /* pre-Check the parity status */
+ for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
+ val = REG_RD(bp, prty_tbl[i].offset);
+ if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
+ (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
+ (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
+ DP(NETIF_MSG_HW,
+ "%s is 0x%x\n", prty_tbl[i].name, val);
+ goto test_mem_exit;
+ }
+ }
+
/* Go through all the memories */
for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
for (j = 0; j < mem_tbl[i].size; j++)
@@ -1259,7 +1369,8 @@ static int bnx2x_test_memory(struct bnx2x *bp)
for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
val = REG_RD(bp, prty_tbl[i].offset);
if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
- (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
+ (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
+ (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
DP(NETIF_MSG_HW,
"%s is 0x%x\n", prty_tbl[i].name, val);
goto test_mem_exit;
@@ -1272,12 +1383,12 @@ test_mem_exit:
return rc;
}
-static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
+static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
{
- int cnt = 1000;
+ int cnt = 1400;
if (link_up)
- while (bnx2x_link_test(bp) && cnt--)
+ while (bnx2x_link_test(bp, is_serdes) && cnt--)
msleep(10);
}
@@ -1293,7 +1404,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
u16 pkt_prod, bd_prod;
struct sw_tx_bd *tx_buf;
struct eth_tx_start_bd *tx_start_bd;
- struct eth_tx_parse_bd *pbd = NULL;
+ struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
+ struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
dma_addr_t mapping;
union eth_rx_cqe *cqe;
u8 cqe_fp_flags;
@@ -1304,7 +1416,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
/* check the loopback mode */
switch (loopback_mode) {
case BNX2X_PHY_LOOPBACK:
- if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
+ if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
return -EINVAL;
break;
case BNX2X_MAC_LOOPBACK:
@@ -1349,16 +1461,23 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
- tx_start_bd->vlan = cpu_to_le16(pkt_prod);
+ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- tx_start_bd->general_data = ((UNICAST_ADDRESS <<
- ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_ETH_ADDR_TYPE,
+ UNICAST_ADDRESS);
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_HDR_NBDS,
+ 1);
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
- pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
- memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
+ pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x;
+ pbd_e2 = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e2;
+
+ memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+ memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
wmb();
@@ -1377,6 +1496,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
if (tx_idx != tx_start_idx + num_pkts)
goto test_loopback_exit;
+ /* Unlike HC IGU won't generate an interrupt for status block
+ * updates that have been performed while interrupts were
+ * disabled.
+ */
+ if (bp->common.int_block == INT_BLOCK_IGU)
+ bnx2x_tx_int(fp_tx);
+
rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
if (rx_idx != rx_start_idx + num_pkts)
goto test_loopback_exit;
@@ -1519,8 +1645,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
config->hdr.length = 0;
if (CHIP_IS_E1(bp))
- /* use last unicast entries */
- config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
+ config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
else
config->hdr.offset = BP_FUNC(bp);
config->hdr.client_id = bp->fp->cl_id;
@@ -1528,9 +1653,9 @@ static int bnx2x_test_intr(struct bnx2x *bp)
bp->set_mac_pending++;
smp_wmb();
- rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(bnx2x_sp_mapping(bp, mac_config)),
- U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
+ U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
if (rc == 0) {
for (i = 0; i < 10; i++) {
if (!bp->set_mac_pending)
@@ -1549,7 +1674,7 @@ static void bnx2x_self_test(struct net_device *dev,
struct ethtool_test *etest, u64 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
-
+ u8 is_serdes;
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
printk(KERN_ERR "Handling parity error recovery. Try again later\n");
etest->flags |= ETH_TEST_FL_FAILED;
@@ -1562,8 +1687,9 @@ static void bnx2x_self_test(struct net_device *dev,
return;
/* offline tests are not supported in MF mode */
- if (IS_E1HMF(bp))
+ if (IS_MF(bp))
etest->flags &= ~ETH_TEST_FL_OFFLINE;
+ is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
if (etest->flags & ETH_TEST_FL_OFFLINE) {
int port = BP_PORT(bp);
@@ -1575,11 +1701,12 @@ static void bnx2x_self_test(struct net_device *dev,
/* disable input for TX port IF */
REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
- link_up = (bnx2x_link_test(bp) == 0);
+ link_up = bp->link_vars.link_up;
+
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
bnx2x_nic_load(bp, LOAD_DIAG);
/* wait until link state is restored */
- bnx2x_wait_for_link(bp, link_up);
+ bnx2x_wait_for_link(bp, link_up, is_serdes);
if (bnx2x_test_registers(bp) != 0) {
buf[0] = 1;
@@ -1589,6 +1716,7 @@ static void bnx2x_self_test(struct net_device *dev,
buf[1] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
+
buf[2] = bnx2x_test_loopback(bp, link_up);
if (buf[2] != 0)
etest->flags |= ETH_TEST_FL_FAILED;
@@ -1600,7 +1728,7 @@ static void bnx2x_self_test(struct net_device *dev,
bnx2x_nic_load(bp, LOAD_NORMAL);
/* wait until link state is restored */
- bnx2x_wait_for_link(bp, link_up);
+ bnx2x_wait_for_link(bp, link_up, is_serdes);
}
if (bnx2x_test_nvram(bp) != 0) {
buf[3] = 1;
@@ -1611,7 +1739,7 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
}
if (bp->port.pmf)
- if (bnx2x_link_test(bp) != 0) {
+ if (bnx2x_link_test(bp, is_serdes) != 0) {
buf[5] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
@@ -1752,8 +1880,8 @@ static const struct {
#define IS_PORT_STAT(i) \
((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
-#define IS_E1HMF_MODE_STAT(bp) \
- (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
+#define IS_MF_MODE_STAT(bp) \
+ (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
{
@@ -1764,10 +1892,10 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
case ETH_SS_STATS:
if (is_multi(bp)) {
num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
- if (!IS_E1HMF_MODE_STAT(bp))
+ if (!IS_MF_MODE_STAT(bp))
num_stats += BNX2X_NUM_STATS;
} else {
- if (IS_E1HMF_MODE_STAT(bp)) {
+ if (IS_MF_MODE_STAT(bp)) {
num_stats = 0;
for (i = 0; i < BNX2X_NUM_STATS; i++)
if (IS_FUNC_STAT(i))
@@ -1800,14 +1928,14 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
bnx2x_q_stats_arr[j].string, i);
k += BNX2X_NUM_Q_STATS;
}
- if (IS_E1HMF_MODE_STAT(bp))
+ if (IS_MF_MODE_STAT(bp))
break;
for (j = 0; j < BNX2X_NUM_STATS; j++)
strcpy(buf + (k + j)*ETH_GSTRING_LEN,
bnx2x_stats_arr[j].string);
} else {
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
continue;
strcpy(buf + j*ETH_GSTRING_LEN,
bnx2x_stats_arr[i].string);
@@ -1851,7 +1979,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
}
k += BNX2X_NUM_Q_STATS;
}
- if (IS_E1HMF_MODE_STAT(bp))
+ if (IS_MF_MODE_STAT(bp))
return;
hw_stats = (u32 *)&bp->eth_stats;
for (j = 0; j < BNX2X_NUM_STATS; j++) {
@@ -1872,7 +2000,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
} else {
hw_stats = (u32 *)&bp->eth_stats;
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
continue;
if (bnx2x_stats_arr[i].size == 0) {
/* skip this counter */
@@ -1910,10 +2038,11 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
for (i = 0; i < (data * 2); i++) {
if ((i % 2) == 0)
- bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
- SPEED_1000);
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_OPER, SPEED_1000);
else
- bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_OFF, 0);
msleep_interruptible(500);
if (signal_pending(current))
@@ -1921,7 +2050,7 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
}
if (bp->link_vars.link_up)
- bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
+ bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER,
bp->link_vars.line_speed);
return 0;
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h
index 08d71bf438d..f4e5b1ce814 100644
--- a/drivers/net/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x/bnx2x_fw_defs.h
@@ -7,369 +7,272 @@
* the Free Software Foundation.
*/
-
-#define CSTORM_ASSERT_LIST_INDEX_OFFSET \
- (IS_E1H_OFFSET ? 0x7000 : 0x1000)
-#define CSTORM_ASSERT_LIST_OFFSET(idx) \
- (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
-#define CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(function, index) \
- (IS_E1H_OFFSET ? (0x8622 + ((function>>1) * 0x40) + \
- ((function&1) * 0x100) + (index * 0x4)) : (0x3562 + (function * \
- 0x40) + (index * 0x4)))
-#define CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(function, index) \
- (IS_E1H_OFFSET ? (0x8822 + ((function>>1) * 0x80) + \
- ((function&1) * 0x200) + (index * 0x4)) : (0x35e2 + (function * \
- 0x80) + (index * 0x4)))
-#define CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8600 + ((function>>1) * 0x40) + \
- ((function&1) * 0x100)) : (0x3540 + (function * 0x40)))
-#define CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8800 + ((function>>1) * 0x80) + \
- ((function&1) * 0x200)) : (0x35c0 + (function * 0x80)))
-#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8608 + ((function>>1) * 0x40) + \
- ((function&1) * 0x100)) : (0x3548 + (function * 0x40)))
-#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8808 + ((function>>1) * 0x80) + \
- ((function&1) * 0x200)) : (0x35c8 + (function * 0x80)))
-#define CSTORM_FUNCTION_MODE_OFFSET \
- (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff)
-#define CSTORM_HC_BTR_C_OFFSET(port) \
- (IS_E1H_OFFSET ? (0x8c04 + (port * 0xf0)) : (0x36c4 + (port * 0xc0)))
-#define CSTORM_HC_BTR_U_OFFSET(port) \
- (IS_E1H_OFFSET ? (0x8de4 + (port * 0xf0)) : (0x3844 + (port * 0xc0)))
-#define CSTORM_ISCSI_CQ_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x6680 + (function * 0x8)) : (0x25a0 + \
- (function * 0x8)))
-#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x66c0 + (function * 0x8)) : (0x25b0 + \
- (function * 0x8)))
-#define CSTORM_ISCSI_EQ_CONS_OFFSET(function, eqIdx) \
- (IS_E1H_OFFSET ? (0x6040 + (function * 0xc0) + (eqIdx * 0x18)) : \
- (0x2410 + (function * 0xc0) + (eqIdx * 0x18)))
-#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(function, eqIdx) \
- (IS_E1H_OFFSET ? (0x6044 + (function * 0xc0) + (eqIdx * 0x18)) : \
- (0x2414 + (function * 0xc0) + (eqIdx * 0x18)))
-#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(function, eqIdx) \
- (IS_E1H_OFFSET ? (0x604c + (function * 0xc0) + (eqIdx * 0x18)) : \
- (0x241c + (function * 0xc0) + (eqIdx * 0x18)))
-#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(function, eqIdx) \
- (IS_E1H_OFFSET ? (0x6057 + (function * 0xc0) + (eqIdx * 0x18)) : \
- (0x2427 + (function * 0xc0) + (eqIdx * 0x18)))
-#define CSTORM_ISCSI_EQ_PROD_OFFSET(function, eqIdx) \
- (IS_E1H_OFFSET ? (0x6042 + (function * 0xc0) + (eqIdx * 0x18)) : \
- (0x2412 + (function * 0xc0) + (eqIdx * 0x18)))
-#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(function, eqIdx) \
- (IS_E1H_OFFSET ? (0x6056 + (function * 0xc0) + (eqIdx * 0x18)) : \
- (0x2426 + (function * 0xc0) + (eqIdx * 0x18)))
-#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(function, eqIdx) \
- (IS_E1H_OFFSET ? (0x6054 + (function * 0xc0) + (eqIdx * 0x18)) : \
- (0x2424 + (function * 0xc0) + (eqIdx * 0x18)))
-#define CSTORM_ISCSI_HQ_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x6640 + (function * 0x8)) : (0x2590 + \
- (function * 0x8)))
-#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x2404 + \
- (function * 0x8)))
-#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x2402 + \
- (function * 0x8)))
-#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x2400 + \
- (function * 0x8)))
-#define CSTORM_SB_HC_DISABLE_C_OFFSET(port, cpu_id, index) \
- (IS_E1H_OFFSET ? (0x811a + (port * 0x280) + (cpu_id * 0x28) + \
- (index * 0x4)) : (0x305a + (port * 0x280) + (cpu_id * 0x28) + \
- (index * 0x4)))
-#define CSTORM_SB_HC_DISABLE_U_OFFSET(port, cpu_id, index) \
- (IS_E1H_OFFSET ? (0xb01a + (port * 0x800) + (cpu_id * 0x80) + \
- (index * 0x4)) : (0x401a + (port * 0x800) + (cpu_id * 0x80) + \
- (index * 0x4)))
-#define CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, cpu_id, index) \
- (IS_E1H_OFFSET ? (0x8118 + (port * 0x280) + (cpu_id * 0x28) + \
- (index * 0x4)) : (0x3058 + (port * 0x280) + (cpu_id * 0x28) + \
- (index * 0x4)))
-#define CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, cpu_id, index) \
- (IS_E1H_OFFSET ? (0xb018 + (port * 0x800) + (cpu_id * 0x80) + \
- (index * 0x4)) : (0x4018 + (port * 0x800) + (cpu_id * 0x80) + \
- (index * 0x4)))
-#define CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, cpu_id) \
- (IS_E1H_OFFSET ? (0x8100 + (port * 0x280) + (cpu_id * 0x28)) : \
- (0x3040 + (port * 0x280) + (cpu_id * 0x28)))
-#define CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, cpu_id) \
- (IS_E1H_OFFSET ? (0xb000 + (port * 0x800) + (cpu_id * 0x80)) : \
- (0x4000 + (port * 0x800) + (cpu_id * 0x80)))
-#define CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, cpu_id) \
- (IS_E1H_OFFSET ? (0x8108 + (port * 0x280) + (cpu_id * 0x28)) : \
- (0x3048 + (port * 0x280) + (cpu_id * 0x28)))
-#define CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, cpu_id) \
- (IS_E1H_OFFSET ? (0xb008 + (port * 0x800) + (cpu_id * 0x80)) : \
- (0x4008 + (port * 0x800) + (cpu_id * 0x80)))
-#define CSTORM_SB_STATUS_BLOCK_C_SIZE 0x10
-#define CSTORM_SB_STATUS_BLOCK_U_SIZE 0x60
-#define CSTORM_STATS_FLAGS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
- (function * 0x8)))
-#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x3200 + (function * 0x20)) : 0xffffffff)
-#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
- (IS_E1H_OFFSET ? 0xa000 : 0x1000)
-#define TSTORM_ASSERT_LIST_OFFSET(idx) \
- (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
-#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
- (IS_E1H_OFFSET ? (0x33a0 + (port * 0x1a0) + (client_id * 0x10)) \
- : (0x9c0 + (port * 0x120) + (client_id * 0x10)))
-#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET \
- (IS_E1H_OFFSET ? 0x1ed8 : 0xffffffff)
+#ifndef BNX2X_FW_DEFS_H
+#define BNX2X_FW_DEFS_H
+
+#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[142].base)
+#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[141].base + ((assertListEntry) * IRO[141].m1))
+#define CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
+ (IRO[144].base + ((pfId) * IRO[144].m1))
+#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
+ (IRO[149].base + (((pfId)>>1) * IRO[149].m1) + (((pfId)&1) * \
+ IRO[149].m2))
+#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
+ (IRO[150].base + (((pfId)>>1) * IRO[150].m1) + (((pfId)&1) * \
+ IRO[150].m2))
+#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
+ (IRO[156].base + ((funcId) * IRO[156].m1))
+#define CSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[146].base + ((funcId) * IRO[146].m1))
+#define CSTORM_FUNCTION_MODE_OFFSET (IRO[153].base)
+#define CSTORM_IGU_MODE_OFFSET (IRO[154].base)
+#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+ (IRO[311].base + ((pfId) * IRO[311].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[312].base + ((pfId) * IRO[312].m1))
+ #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
+ (IRO[304].base + ((pfId) * IRO[304].m1) + ((iscsiEqId) * \
+ IRO[304].m2))
+ #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
+ (IRO[306].base + ((pfId) * IRO[306].m1) + ((iscsiEqId) * \
+ IRO[306].m2))
+ #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
+ (IRO[305].base + ((pfId) * IRO[305].m1) + ((iscsiEqId) * \
+ IRO[305].m2))
+ #define \
+ CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
+ (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * \
+ IRO[307].m2))
+ #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
+ (IRO[303].base + ((pfId) * IRO[303].m1) + ((iscsiEqId) * \
+ IRO[303].m2))
+ #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+ (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * \
+ IRO[309].m2))
+ #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
+ (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * \
+ IRO[308].m2))
+#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+ (IRO[310].base + ((pfId) * IRO[310].m1))
+#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[302].base + ((pfId) * IRO[302].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[301].base + ((pfId) * IRO[301].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[300].base + ((pfId) * IRO[300].m1))
+#define CSTORM_PATH_ID_OFFSET (IRO[159].base)
+#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
+ (IRO[137].base + ((pfId) * IRO[137].m1))
+#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
+ (IRO[136].base + ((pfId) * IRO[136].m1))
+#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[136].size)
+#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
+ (IRO[138].base + ((pfId) * IRO[138].m1))
+#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[138].size)
+#define CSTORM_STATS_FLAGS_OFFSET(pfId) \
+ (IRO[143].base + ((pfId) * IRO[143].m1))
+#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
+ (IRO[129].base + ((sbId) * IRO[129].m1))
+#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
+ (IRO[128].base + ((sbId) * IRO[128].m1))
+#define CSTORM_STATUS_BLOCK_SIZE (IRO[128].size)
+#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
+ (IRO[132].base + ((sbId) * IRO[132].m1))
+#define CSTORM_SYNC_BLOCK_SIZE (IRO[132].size)
+#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
+ (IRO[151].base + ((vfId) * IRO[151].m1))
+#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
+ (IRO[152].base + ((vfId) * IRO[152].m1))
+#define CSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[147].base + ((funcId) * IRO[147].m1))
+#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[199].base)
+#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
+ (IRO[198].base + ((pfId) * IRO[198].m1))
+#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[99].base)
+#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[98].base + ((assertListEntry) * IRO[98].m1))
+ #define TSTORM_CLIENT_CONFIG_OFFSET(portId, clientId) \
+ (IRO[197].base + ((portId) * IRO[197].m1) + ((clientId) * \
+ IRO[197].m2))
+#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[104].base)
#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
- (IS_E1H_OFFSET ? 0x1eda : 0xffffffff)
-#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
- (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \
- ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
- 0x28) + (index * 0x4)))
-#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \
- ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
-#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
- (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \
- ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
-#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x2940 + (function * 0x8)) : (0x4928 + \
- (function * 0x8)))
-#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x3000 + (function * 0x40)) : (0x1500 + \
- (function * 0x40)))
-#define TSTORM_FUNCTION_MODE_OFFSET \
- (IS_E1H_OFFSET ? 0x1ed0 : 0xffffffff)
-#define TSTORM_HC_BTR_OFFSET(port) \
- (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
-#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \
- (function * 0x80)))
-#define TSTORM_INDIRECTION_TABLE_SIZE 0x80
-#define TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(function, pblEntry) \
- (IS_E1H_OFFSET ? (0x60c0 + (function * 0x40) + (pblEntry * 0x8)) \
- : (0x4c30 + (function * 0x40) + (pblEntry * 0x8)))
-#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x6340 + (function * 0x8)) : (0x4cd0 + \
- (function * 0x8)))
-#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x4c04 + \
- (function * 0x8)))
-#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x4c02 + \
- (function * 0x8)))
-#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x4c00 + \
- (function * 0x8)))
-#define TSTORM_ISCSI_RQ_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x6080 + (function * 0x8)) : (0x4c20 + \
- (function * 0x8)))
-#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x6040 + (function * 0x8)) : (0x4c10 + \
- (function * 0x8)))
-#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x6042 + (function * 0x8)) : (0x4c12 + \
- (function * 0x8)))
-#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x6044 + (function * 0x8)) : (0x4c14 + \
- (function * 0x8)))
-#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x3008 + (function * 0x40)) : (0x1508 + \
- (function * 0x40)))
-#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
- (IS_E1H_OFFSET ? (0x2010 + (port * 0x490) + (stats_counter_id * \
- 0x40)) : (0x4010 + (port * 0x490) + (stats_counter_id * 0x40)))
-#define TSTORM_STATS_FLAGS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x29c0 + (function * 0x8)) : (0x4948 + \
- (function * 0x8)))
-#define TSTORM_TCP_MAX_CWND_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x4004 + (function * 0x8)) : (0x1fb4 + \
- (function * 0x8)))
-#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa000 : 0x3000)
-#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2000 : 0x1000)
-#define USTORM_ASSERT_LIST_INDEX_OFFSET \
- (IS_E1H_OFFSET ? 0x8000 : 0x1000)
-#define USTORM_ASSERT_LIST_OFFSET(idx) \
- (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
-#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \
- (IS_E1H_OFFSET ? (0x1010 + (port * 0x680) + (clientId * 0x40)) : \
- (0x4010 + (port * 0x360) + (clientId * 0x30)))
-#define USTORM_CQE_PAGE_NEXT_OFFSET(port, clientId) \
- (IS_E1H_OFFSET ? (0x1028 + (port * 0x680) + (clientId * 0x40)) : \
- (0x4028 + (port * 0x360) + (clientId * 0x30)))
-#define USTORM_ETH_PAUSE_ENABLED_OFFSET(port) \
- (IS_E1H_OFFSET ? (0x2ad4 + (port * 0x8)) : 0xffffffff)
-#define USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, clientId) \
- (IS_E1H_OFFSET ? (0x1030 + (port * 0x680) + (clientId * 0x40)) : \
- 0xffffffff)
-#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x2a50 + (function * 0x8)) : (0x1dd0 + \
- (function * 0x8)))
-#define USTORM_FUNCTION_MODE_OFFSET \
- (IS_E1H_OFFSET ? 0x2448 : 0xffffffff)
-#define USTORM_ISCSI_CQ_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x7044 + (function * 0x8)) : (0x2414 + \
- (function * 0x8)))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x7046 + (function * 0x8)) : (0x2416 + \
- (function * 0x8)))
-#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x7688 + (function * 0x8)) : (0x29c8 + \
- (function * 0x8)))
-#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x7648 + (function * 0x8)) : (0x29b8 + \
- (function * 0x8)))
-#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x7004 + (function * 0x8)) : (0x2404 + \
- (function * 0x8)))
-#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x7002 + (function * 0x8)) : (0x2402 + \
- (function * 0x8)))
-#define USTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x7000 + (function * 0x8)) : (0x2400 + \
- (function * 0x8)))
-#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x7040 + (function * 0x8)) : (0x2410 + \
- (function * 0x8)))
-#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x7080 + (function * 0x8)) : (0x2420 + \
- (function * 0x8)))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x7084 + (function * 0x8)) : (0x2424 + \
- (function * 0x8)))
-#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \
- (IS_E1H_OFFSET ? (0x1018 + (port * 0x680) + (clientId * 0x40)) : \
- (0x4018 + (port * 0x360) + (clientId * 0x30)))
-#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x1da8 + \
- (function * 0x8)))
-#define USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
- (IS_E1H_OFFSET ? (0x2450 + (port * 0x2d0) + (stats_counter_id * \
- 0x28)) : (0x1500 + (port * 0x2d0) + (stats_counter_id * 0x28)))
-#define USTORM_RX_PRODS_OFFSET(port, client_id) \
- (IS_E1H_OFFSET ? (0x1000 + (port * 0x680) + (client_id * 0x40)) \
- : (0x4000 + (port * 0x360) + (client_id * 0x30)))
-#define USTORM_STATS_FLAGS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x29f0 + (function * 0x8)) : (0x1db8 + \
- (function * 0x8)))
-#define USTORM_TPA_BTR_OFFSET (IS_E1H_OFFSET ? 0x3da5 : 0x5095)
-#define USTORM_TPA_BTR_SIZE 0x1
-#define XSTORM_ASSERT_LIST_INDEX_OFFSET \
- (IS_E1H_OFFSET ? 0x9000 : 0x1000)
-#define XSTORM_ASSERT_LIST_OFFSET(idx) \
- (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
-#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \
- (IS_E1H_OFFSET ? (0x24a8 + (port * 0x50)) : (0x3a80 + (port * 0x50)))
-#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
- (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \
- ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
- 0x28) + (index * 0x4)))
-#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \
- ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
-#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
- (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \
- ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
-#define XSTORM_E1HOV_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x2c10 + (function * 0x8)) : 0xffffffff)
-#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3a50 + \
- (function * 0x8)))
-#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x2588 + (function * 0x90)) : (0x3b60 + \
- (function * 0x90)))
-#define XSTORM_FUNCTION_MODE_OFFSET \
- (IS_E1H_OFFSET ? 0x2c50 : 0xffffffff)
-#define XSTORM_HC_BTR_OFFSET(port) \
- (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
-#define XSTORM_ISCSI_HQ_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x80c0 + (function * 0x8)) : (0x1c30 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8080 + (function * 0x8)) : (0x1c20 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8081 + (function * 0x8)) : (0x1c21 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8082 + (function * 0x8)) : (0x1c22 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8083 + (function * 0x8)) : (0x1c23 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8084 + (function * 0x8)) : (0x1c24 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8085 + (function * 0x8)) : (0x1c25 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8086 + (function * 0x8)) : (0x1c26 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8004 + (function * 0x8)) : (0x1c04 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8002 + (function * 0x8)) : (0x1c02 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8000 + (function * 0x8)) : (0x1c00 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x80c4 + (function * 0x8)) : (0x1c34 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_SQ_SIZE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x80c2 + (function * 0x8)) : (0x1c32 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8043 + (function * 0x8)) : (0x1c13 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8042 + (function * 0x8)) : (0x1c12 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8041 + (function * 0x8)) : (0x1c11 + \
- (function * 0x8)))
-#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x8040 + (function * 0x8)) : (0x1c10 + \
- (function * 0x8)))
-#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
- (IS_E1H_OFFSET ? (0xc000 + (port * 0x360) + (stats_counter_id * \
- 0x30)) : (0x3378 + (port * 0x360) + (stats_counter_id * 0x30)))
-#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x2548 + (function * 0x90)) : (0x3b20 + \
- (function * 0x90)))
-#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
- (function * 0x10)))
-#define XSTORM_SPQ_PROD_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
- (function * 0x10)))
-#define XSTORM_STATS_FLAGS_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3a40 + \
- (function * 0x8)))
-#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port) \
- (IS_E1H_OFFSET ? (0x4000 + (port * 0x8)) : (0x1960 + (port * 0x8)))
-#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port) \
- (IS_E1H_OFFSET ? (0x4001 + (port * 0x8)) : (0x1961 + (port * 0x8)))
-#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(function) \
- (IS_E1H_OFFSET ? (0x4060 + ((function>>1) * 0x8) + ((function&1) \
- * 0x4)) : (0x1978 + (function * 0x4)))
+ (IRO[105].base)
+#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
+ (IRO[96].base + ((pfId) * IRO[96].m1))
+#define TSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[101].base + ((funcId) * IRO[101].m1))
+#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
+ (IRO[195].base + ((pfId) * IRO[195].m1))
+#define TSTORM_FUNCTION_MODE_OFFSET (IRO[103].base)
+#define TSTORM_INDIRECTION_TABLE_OFFSET(pfId) \
+ (IRO[91].base + ((pfId) * IRO[91].m1))
+#define TSTORM_INDIRECTION_TABLE_SIZE (IRO[91].size)
+ #define \
+ TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfId, iscsiConBufPblEntry) \
+ (IRO[260].base + ((pfId) * IRO[260].m1) + ((iscsiConBufPblEntry) \
+ * IRO[260].m2))
+#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+ (IRO[264].base + ((pfId) * IRO[264].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
+ (IRO[265].base + ((pfId) * IRO[265].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
+ (IRO[266].base + ((pfId) * IRO[266].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+ (IRO[267].base + ((pfId) * IRO[267].m1))
+#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[263].base + ((pfId) * IRO[263].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[262].base + ((pfId) * IRO[262].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[261].base + ((pfId) * IRO[261].m1))
+#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[259].base + ((pfId) * IRO[259].m1))
+#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
+ (IRO[269].base + ((pfId) * IRO[269].m1))
+#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+ (IRO[256].base + ((pfId) * IRO[256].m1))
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[257].base + ((pfId) * IRO[257].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[258].base + ((pfId) * IRO[258].m1))
+#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
+ (IRO[196].base + ((pfId) * IRO[196].m1))
+ #define TSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, tStatCntId) \
+ (IRO[100].base + ((portId) * IRO[100].m1) + ((tStatCntId) * \
+ IRO[100].m2))
+#define TSTORM_STATS_FLAGS_OFFSET(pfId) \
+ (IRO[95].base + ((pfId) * IRO[95].m1))
+#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
+ (IRO[211].base + ((pfId) * IRO[211].m1))
+#define TSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[102].base + ((funcId) * IRO[102].m1))
+#define USTORM_AGG_DATA_OFFSET (IRO[201].base)
+#define USTORM_AGG_DATA_SIZE (IRO[201].size)
+#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[170].base)
+#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[169].base + ((assertListEntry) * IRO[169].m1))
+#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
+ (IRO[178].base + ((portId) * IRO[178].m1))
+#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
+ (IRO[172].base + ((pfId) * IRO[172].m1))
+#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
+ (IRO[313].base + ((pfId) * IRO[313].m1))
+#define USTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[174].base + ((funcId) * IRO[174].m1))
+#define USTORM_FUNCTION_MODE_OFFSET (IRO[177].base)
+#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+ (IRO[277].base + ((pfId) * IRO[277].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[278].base + ((pfId) * IRO[278].m1))
+#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+ (IRO[282].base + ((pfId) * IRO[282].m1))
+#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
+ (IRO[279].base + ((pfId) * IRO[279].m1))
+#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[275].base + ((pfId) * IRO[275].m1))
+#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[274].base + ((pfId) * IRO[274].m1))
+#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[273].base + ((pfId) * IRO[273].m1))
+#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+ (IRO[276].base + ((pfId) * IRO[276].m1))
+#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
+ (IRO[280].base + ((pfId) * IRO[280].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[281].base + ((pfId) * IRO[281].m1))
+#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
+ (IRO[176].base + ((pfId) * IRO[176].m1))
+ #define USTORM_PER_COUNTER_ID_STATS_OFFSET(portId, uStatCntId) \
+ (IRO[173].base + ((portId) * IRO[173].m1) + ((uStatCntId) * \
+ IRO[173].m2))
+ #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
+ (IRO[204].base + ((portId) * IRO[204].m1) + ((clientId) * \
+ IRO[204].m2))
+#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
+ (IRO[205].base + ((qzoneId) * IRO[205].m1))
+#define USTORM_STATS_FLAGS_OFFSET(pfId) \
+ (IRO[171].base + ((pfId) * IRO[171].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[202].base)
+#define USTORM_TPA_BTR_SIZE (IRO[202].size)
+#define USTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[175].base + ((funcId) * IRO[175].m1))
+#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[59].base)
+#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[58].base)
+#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[54].base)
+#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[53].base + ((assertListEntry) * IRO[53].m1))
+#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
+ (IRO[47].base + ((portId) * IRO[47].m1))
+#define XSTORM_E1HOV_OFFSET(pfId) \
+ (IRO[55].base + ((pfId) * IRO[55].m1))
+#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
+ (IRO[45].base + ((pfId) * IRO[45].m1))
+#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
+ (IRO[49].base + ((pfId) * IRO[49].m1))
+#define XSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[51].base + ((funcId) * IRO[51].m1))
+#define XSTORM_FUNCTION_MODE_OFFSET (IRO[56].base)
+#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+ (IRO[290].base + ((pfId) * IRO[290].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
+ (IRO[293].base + ((pfId) * IRO[293].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
+ (IRO[294].base + ((pfId) * IRO[294].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+ (IRO[295].base + ((pfId) * IRO[295].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+ (IRO[296].base + ((pfId) * IRO[296].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+ (IRO[297].base + ((pfId) * IRO[297].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+ (IRO[298].base + ((pfId) * IRO[298].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[299].base + ((pfId) * IRO[299].m1))
+#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[289].base + ((pfId) * IRO[289].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[288].base + ((pfId) * IRO[288].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[287].base + ((pfId) * IRO[287].m1))
+#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+ (IRO[292].base + ((pfId) * IRO[292].m1))
+#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
+ (IRO[291].base + ((pfId) * IRO[291].m1))
+#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
+ (IRO[286].base + ((pfId) * IRO[286].m1))
+#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+ (IRO[285].base + ((pfId) * IRO[285].m1))
+#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
+ (IRO[284].base + ((pfId) * IRO[284].m1))
+#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
+ (IRO[283].base + ((pfId) * IRO[283].m1))
+#define XSTORM_PATH_ID_OFFSET (IRO[65].base)
+ #define XSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, xStatCntId) \
+ (IRO[50].base + ((portId) * IRO[50].m1) + ((xStatCntId) * \
+ IRO[50].m2))
+#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
+ (IRO[48].base + ((pfId) * IRO[48].m1))
+#define XSTORM_SPQ_DATA_OFFSET(funcId) \
+ (IRO[32].base + ((funcId) * IRO[32].m1))
+#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
+#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
+ (IRO[30].base + ((funcId) * IRO[30].m1))
+#define XSTORM_SPQ_PROD_OFFSET(funcId) \
+ (IRO[31].base + ((funcId) * IRO[31].m1))
+#define XSTORM_STATS_FLAGS_OFFSET(pfId) \
+ (IRO[43].base + ((pfId) * IRO[43].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
+ (IRO[206].base + ((portId) * IRO[206].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
+ (IRO[207].base + ((portId) * IRO[207].m1))
+#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
+ (IRO[209].base + (((pfId)>>1) * IRO[209].m1) + (((pfId)&1) * \
+ IRO[209].m2))
+#define XSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[52].base + ((funcId) * IRO[52].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
-/**
-* This file defines HSI constants for the ETH flow
-*/
-#ifdef _EVEREST_MICROCODE
-#include "microcode_constants.h"
-#include "eth_rx_bd.h"
-#include "eth_tx_bd.h"
-#include "eth_rx_cqe.h"
-#include "eth_rx_sge.h"
-#include "eth_rx_cqe_next_page.h"
-#endif
-
/* RSS hash types */
#define DEFAULT_HASH_TYPE 0
#define IPV4_HASH_TYPE 1
@@ -389,11 +292,17 @@
#define U_ETH_NUM_OF_SGES_TO_FETCH 8
#define U_ETH_MAX_SGES_FOR_PACKET 3
+/*Tx params*/
+#define X_ETH_NO_VLAN 0
+#define X_ETH_OUTBAND_VLAN 1
+#define X_ETH_INBAND_VLAN 2
/* Rx ring params */
#define U_ETH_LOCAL_BD_RING_SIZE 8
#define U_ETH_LOCAL_SGE_RING_SIZE 10
#define U_ETH_SGL_SIZE 8
-
+ /* The fw will padd the buffer with this value, so the IP header \
+ will be align to 4 Byte */
+#define IP_HEADER_ALIGNMENT_PADDING 2
#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
(0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
@@ -409,16 +318,15 @@
#define U_ETH_UNDEFINED_Q 0xFF
/* values of command IDs in the ramrod message */
-#define RAMROD_CMD_ID_ETH_PORT_SETUP 80
-#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 85
-#define RAMROD_CMD_ID_ETH_STAT_QUERY 90
-#define RAMROD_CMD_ID_ETH_UPDATE 100
-#define RAMROD_CMD_ID_ETH_HALT 105
-#define RAMROD_CMD_ID_ETH_SET_MAC 110
-#define RAMROD_CMD_ID_ETH_CFC_DEL 115
-#define RAMROD_CMD_ID_ETH_PORT_DEL 120
-#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 125
-
+#define RAMROD_CMD_ID_ETH_UNUSED 0
+#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 1
+#define RAMROD_CMD_ID_ETH_UPDATE 2
+#define RAMROD_CMD_ID_ETH_HALT 3
+#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 4
+#define RAMROD_CMD_ID_ETH_ACTIVATE 5
+#define RAMROD_CMD_ID_ETH_DEACTIVATE 6
+#define RAMROD_CMD_ID_ETH_EMPTY 7
+#define RAMROD_CMD_ID_ETH_TERMINATE 8
/* command values for set mac command */
#define T_ETH_MAC_COMMAND_SET 0
@@ -431,7 +339,9 @@
/* Maximal L2 clients supported */
#define ETH_MAX_RX_CLIENTS_E1 18
-#define ETH_MAX_RX_CLIENTS_E1H 26
+#define ETH_MAX_RX_CLIENTS_E1H 28
+
+#define MAX_STAT_COUNTER_ID ETH_MAX_RX_CLIENTS_E1H
/* Maximal aggregation queues supported */
#define ETH_MAX_AGGREGATION_QUEUES_E1 32
@@ -443,6 +353,20 @@
#define ETH_RSS_MODE_VLAN_PRI 2
#define ETH_RSS_MODE_E1HOV_PRI 3
#define ETH_RSS_MODE_IP_DSCP 4
+#define ETH_RSS_MODE_E2_INTEG 5
+
+
+/* ETH vlan filtering modes */
+#define ETH_VLAN_FILTER_ANY_VLAN 0 /* Don't filter by vlan */
+#define ETH_VLAN_FILTER_SPECIFIC_VLAN \
+ 1 /* Only the vlan_id is allowed */
+#define ETH_VLAN_FILTER_CLASSIFY \
+ 2 /* vlan will be added to CAM for classification */
+
+/* Fast path CQE selection */
+#define ETH_FP_CQE_REGULAR 0
+#define ETH_FP_CQE_SGL 1
+#define ETH_FP_CQE_RAW 2
/**
@@ -458,6 +382,7 @@
#define RESERVED_CONNECTION_TYPE_0 5
#define RESERVED_CONNECTION_TYPE_1 6
#define RESERVED_CONNECTION_TYPE_2 7
+#define NONE_CONNECTION_TYPE 8
#define PROTOCOL_STATE_BIT_OFFSET 6
@@ -466,6 +391,16 @@
#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+/* values of command IDs in the ramrod message */
+#define RAMROD_CMD_ID_COMMON_FUNCTION_START 1
+#define RAMROD_CMD_ID_COMMON_FUNCTION_STOP 2
+#define RAMROD_CMD_ID_COMMON_CFC_DEL 3
+#define RAMROD_CMD_ID_COMMON_CFC_DEL_WB 4
+#define RAMROD_CMD_ID_COMMON_SET_MAC 5
+#define RAMROD_CMD_ID_COMMON_STAT_QUERY 6
+#define RAMROD_CMD_ID_COMMON_STOP_TRAFFIC 7
+#define RAMROD_CMD_ID_COMMON_START_TRAFFIC 8
+
/* microcode fixed page page size 4K (chains and ring segments) */
#define MC_PAGE_SIZE 4096
@@ -473,46 +408,26 @@
/* Host coalescing constants */
#define HC_IGU_BC_MODE 0
#define HC_IGU_NBC_MODE 1
+/* Host coalescing constants. E1 includes E1H as well */
+
+/* Number of indices per slow-path SB */
+#define HC_SP_SB_MAX_INDICES 16
+
+/* Number of indices per SB */
+#define HC_SB_MAX_INDICES_E1X 8
+#define HC_SB_MAX_INDICES_E2 8
+
+#define HC_SB_MAX_SB_E1X 32
+#define HC_SB_MAX_SB_E2 136
+
+#define HC_SP_SB_ID 0xde
#define HC_REGULAR_SEGMENT 0
#define HC_DEFAULT_SEGMENT 1
+#define HC_SB_MAX_SM 2
-/* index numbers */
-#define HC_USTORM_DEF_SB_NUM_INDICES 8
-#define HC_CSTORM_DEF_SB_NUM_INDICES 8
-#define HC_XSTORM_DEF_SB_NUM_INDICES 4
-#define HC_TSTORM_DEF_SB_NUM_INDICES 4
-#define HC_USTORM_SB_NUM_INDICES 4
-#define HC_CSTORM_SB_NUM_INDICES 4
-
-/* index values - which counter to update */
-
-#define HC_INDEX_U_TOE_RX_CQ_CONS 0
-#define HC_INDEX_U_ETH_RX_CQ_CONS 1
-#define HC_INDEX_U_ETH_RX_BD_CONS 2
-#define HC_INDEX_U_FCOE_EQ_CONS 3
-
-#define HC_INDEX_C_TOE_TX_CQ_CONS 0
-#define HC_INDEX_C_ETH_TX_CQ_CONS 1
-#define HC_INDEX_C_ISCSI_EQ_CONS 2
-
-#define HC_INDEX_DEF_X_SPQ_CONS 0
-
-#define HC_INDEX_DEF_C_RDMA_EQ_CONS 0
-#define HC_INDEX_DEF_C_RDMA_NAL_PROD 1
-#define HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS 2
-#define HC_INDEX_DEF_C_ETH_SLOW_PATH 3
-#define HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS 4
-#define HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS 5
-#define HC_INDEX_DEF_C_ETH_FCOE_CQ_CONS 6
-
-#define HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS 0
-#define HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS 1
-#define HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS 2
-#define HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS 3
-#define HC_INDEX_DEF_U_ETH_FCOE_RX_CQ_CONS 4
-#define HC_INDEX_DEF_U_ETH_FCOE_RX_BD_CONS 5
-
+#define HC_SB_MAX_DYNAMIC_INDICES 4
+#define HC_FUNCTION_DISABLED 0xff
/* used by the driver to get the SB offset */
#define USTORM_ID 0
#define CSTORM_ID 1
@@ -529,45 +444,17 @@
/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
-#define EMULATION_FREQUENCY_FACTOR 1600
-#define FPGA_FREQUENCY_FACTOR 100
#define TIMERS_TICK_SIZE_CHIP (1e-3)
-#define TIMERS_TICK_SIZE_EMUL \
- ((TIMERS_TICK_SIZE_CHIP)/((EMULATION_FREQUENCY_FACTOR)))
-#define TIMERS_TICK_SIZE_FPGA \
- ((TIMERS_TICK_SIZE_CHIP)/((FPGA_FREQUENCY_FACTOR)))
#define TSEMI_CLK1_RESUL_CHIP (1e-3)
-#define TSEMI_CLK1_RESUL_EMUL \
- ((TSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
-#define TSEMI_CLK1_RESUL_FPGA \
- ((TSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
-
-#define USEMI_CLK1_RESUL_CHIP (TIMERS_TICK_SIZE_CHIP)
-#define USEMI_CLK1_RESUL_EMUL (TIMERS_TICK_SIZE_EMUL)
-#define USEMI_CLK1_RESUL_FPGA (TIMERS_TICK_SIZE_FPGA)
#define XSEMI_CLK1_RESUL_CHIP (1e-3)
-#define XSEMI_CLK1_RESUL_EMUL \
- ((XSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
-#define XSEMI_CLK1_RESUL_FPGA \
- ((XSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
-
-#define XSEMI_CLK2_RESUL_CHIP (1e-6)
-#define XSEMI_CLK2_RESUL_EMUL \
- ((XSEMI_CLK2_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
-#define XSEMI_CLK2_RESUL_FPGA \
- ((XSEMI_CLK2_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6))
-#define SDM_TIMER_TICK_RESUL_EMUL \
- ((SDM_TIMER_TICK_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
-#define SDM_TIMER_TICK_RESUL_FPGA \
- ((SDM_TIMER_TICK_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
-
/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+
#define XSTORM_IP_ID_ROLL_HALF 0x8000
#define XSTORM_IP_ID_ROLL_ALL 0
@@ -576,10 +463,36 @@
#define NUM_OF_PROTOCOLS 4
#define NUM_OF_SAFC_BITS 16
#define MAX_COS_NUMBER 4
-#define MAX_T_STAT_COUNTER_ID 18
-#define MAX_X_STAT_COUNTER_ID 18
-#define MAX_U_STAT_COUNTER_ID 18
+#define FAIRNESS_COS_WRR_MODE 0
+#define FAIRNESS_COS_ETS_MODE 1
+
+
+/* Priority Flow Control (PFC) */
+#define MAX_PFC_PRIORITIES 8
+#define MAX_PFC_TRAFFIC_TYPES 8
+
+/* Available Traffic Types for Link Layer Flow Control */
+#define LLFC_TRAFFIC_TYPE_NW 0
+#define LLFC_TRAFFIC_TYPE_FCOE 1
+#define LLFC_TRAFFIC_TYPE_ISCSI 2
+ /***************** START OF E2 INTEGRATION \
+ CODE***************************************/
+#define LLFC_TRAFFIC_TYPE_NW_COS1_E2INTEG 3
+ /***************** END OF E2 INTEGRATION \
+ CODE***************************************/
+#define LLFC_TRAFFIC_TYPE_MAX 4
+
+ /* used by array traffic_type_to_priority[] to mark traffic type \
+ that is not mapped to priority*/
+#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
+
+#define LLFC_MODE_NONE 0
+#define LLFC_MODE_PFC 1
+#define LLFC_MODE_SAFC 2
+
+#define DCB_DISABLED 0
+#define DCB_ENABLED 1
#define UNKNOWN_ADDRESS 0
#define UNICAST_ADDRESS 1
@@ -587,8 +500,32 @@
#define BROADCAST_ADDRESS 3
#define SINGLE_FUNCTION 0
-#define MULTI_FUNCTION 1
+#define MULTI_FUNCTION_SD 1
+#define MULTI_FUNCTION_SI 2
#define IP_V4 0
#define IP_V6 1
+
+#define C_ERES_PER_PAGE \
+ (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
+#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
+
+#define EVENT_RING_OPCODE_VF_PF_CHANNEL 0
+#define EVENT_RING_OPCODE_FUNCTION_START 1
+#define EVENT_RING_OPCODE_FUNCTION_STOP 2
+#define EVENT_RING_OPCODE_CFC_DEL 3
+#define EVENT_RING_OPCODE_CFC_DEL_WB 4
+#define EVENT_RING_OPCODE_SET_MAC 5
+#define EVENT_RING_OPCODE_STAT_QUERY 6
+#define EVENT_RING_OPCODE_STOP_TRAFFIC 7
+#define EVENT_RING_OPCODE_START_TRAFFIC 8
+#define EVENT_RING_OPCODE_FORWARD_SETUP 9
+
+#define VF_PF_CHANNEL_STATE_READY 0
+#define VF_PF_CHANNEL_STATE_WAITING_FOR_ACK 1
+
+#define VF_PF_CHANNEL_STATE_MAX_NUMBER 2
+
+
+#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
index 3f5ee5d7cc2..f807262911e 100644
--- a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
@@ -31,6 +31,7 @@ struct bnx2x_fw_file_hdr {
struct bnx2x_fw_file_section csem_pram_data;
struct bnx2x_fw_file_section xsem_int_table_data;
struct bnx2x_fw_file_section xsem_pram_data;
+ struct bnx2x_fw_file_section iro_arr;
struct bnx2x_fw_file_section fw_version;
};
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index fd1f29e0317..18c8e23a0e8 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -6,6 +6,10 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
+#ifndef BNX2X_HSI_H
+#define BNX2X_HSI_H
+
+#include "bnx2x_fw_defs.h"
struct license_key {
u32 reserved[6];
@@ -78,6 +82,8 @@ struct shared_hw_cfg { /* NVRAM Offset */
#define SHARED_HW_CFG_LED_PHY11 0x000b0000
#define SHARED_HW_CFG_LED_MAC4 0x000c0000
#define SHARED_HW_CFG_LED_PHY8 0x000d0000
+#define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
+
#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
#define SHARED_HW_CFG_AN_ENABLE_SHIFT 24
@@ -120,6 +126,23 @@ struct shared_hw_cfg { /* NVRAM Offset */
#define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
#define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
+ /* Set the MDC/MDIO access for the first external phy */
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000
+
+ /* Set the MDC/MDIO access for the second external phy */
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
u32 power_dissipated; /* 0x11c */
#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24
@@ -221,11 +244,93 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u16 xgxs_config_tx[4]; /* 0x1A0 */
- u32 Reserved1[64]; /* 0x1A8 */
+ u32 Reserved1[57]; /* 0x1A8 */
+ u32 speed_capability_mask2; /* 0x28C */
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12G 0x00000080
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12_DOT_5G 0x00000100
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_13G 0x00000200
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_15G 0x00000400
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_16G 0x00000800
+
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12G 0x00800000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12_DOT_5G 0x01000000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_13G 0x02000000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_15G 0x04000000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_16G 0x08000000
+
+ /* In the case where two media types (e.g. copper and fiber) are
+ present and electrically active at the same time, PHY Selection
+ will determine which of the two PHYs will be designated as the
+ Active PHY and used for a connection to the network. */
+ u32 multi_phy_config; /* 0x290 */
+#define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007
+#define PORT_HW_CFG_PHY_SELECTION_SHIFT 0
+#define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000
+#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001
+#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002
+#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
+#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
+
+ /* When enabled, all second phy nvram parameters will be swapped
+ with the first phy parameters */
+#define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008
+#define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3
+#define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000
+#define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008
+
+
+ /* Address of the second external phy */
+ u32 external_phy_config2; /* 0x294 */
+#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF
+#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0
+
+ /* The second XGXS external PHY type */
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
+
+ /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as
+ 8706, 8726 and 8727) not all 4 values are needed. */
+ u16 xgxs_config2_rx[4]; /* 0x296 */
+ u16 xgxs_config2_tx[4]; /* 0x2A0 */
u32 lane_config;
#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0
+
#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff
#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00
@@ -515,10 +620,17 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
#define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
/* The default for MCP link configuration,
- uses the same defines as link_config */
+ uses the same defines as link_config */
u32 mfw_wol_link_cfg;
+ /* The default for the driver of the second external phy,
+ uses the same defines as link_config */
+ u32 link_config2; /* 0x47C */
- u32 reserved[19];
+ /* The default for MCP of the second external phy,
+ uses the same defines as link_config */
+ u32 mfw_wol_link_cfg2; /* 0x480 */
+
+ u32 Reserved2[17]; /* 0x484 */
};
@@ -551,6 +663,7 @@ struct shm_dev_info { /* size */
#define FUNC_7 7
#define E1_FUNC_MAX 2
#define E1H_FUNC_MAX 8
+#define E2_FUNC_MAX 4 /* per path */
#define VN_0 0
#define VN_1 1
@@ -686,8 +799,14 @@ struct drv_func_mb {
* The optic module verification commands require bootcode
* v5.0.6 or later
*/
-#define DRV_MSG_CODE_VRFY_OPT_MDL 0xa0000000
-#define REQ_BC_VER_4_VRFY_OPT_MDL 0x00050006
+#define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000
+#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
+ /*
+ * The specific optic module verification command requires bootcode
+ * v5.2.12 or later
+ */
+#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
+#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
@@ -703,6 +822,9 @@ struct drv_func_mb {
#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+ /* Load common chip is supported from bc 6.0.0 */
+#define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000
+#define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000
#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
@@ -903,11 +1025,22 @@ struct shmem_region { /* SharedMem Offset (size) */
struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
- struct drv_func_mb func_mb[E1H_FUNC_MAX];
+ struct drv_func_mb func_mb[]; /* 0x684
+ (44*2/4/8=0x58/0xb0/0x160) */
+
+}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */
- struct mf_cfg mf_cfg;
+struct fw_flr_ack {
+ u32 pf_ack;
+ u32 vf_ack[1];
+ u32 iov_dis_ack;
+};
-}; /* 0x6dc */
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ struct fw_flr_ack ack;
+};
struct shmem2_region {
@@ -922,7 +1055,25 @@ struct shmem2_region {
#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
#define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE
-
+ u32 ext_phy_fw_version2[PORT_MAX];
+ /*
+ * For backwards compatibility, if the mf_cfg_addr does not exist
+ * (the size filed is smaller than 0xc) the mf_cfg resides at the
+ * end of struct shmem_region
+ */
+ u32 mf_cfg_addr;
+#define SHMEM_MF_CFG_ADDR_NONE 0x00000000
+
+ struct fw_flr_mb flr_mb;
+ u32 reserved[3];
+ /*
+ * The other shmemX_base_addr holds the other path's shmem address
+ * required for example in case of common phy init, or for path1 to know
+ * the address of mcp debug trace which is located in offset from shmem
+ * of path0
+ */
+ u32 other_shmem_base_addr;
+ u32 other_shmem2_base_addr;
};
@@ -978,7 +1129,7 @@ struct emac_stats {
};
-struct bmac_stats {
+struct bmac1_stats {
u32 tx_stat_gtpkt_lo;
u32 tx_stat_gtpkt_hi;
u32 tx_stat_gtxpf_lo;
@@ -1082,10 +1233,126 @@ struct bmac_stats {
u32 rx_stat_gripj_hi;
};
+struct bmac2_stats {
+ u32 tx_stat_gtpk_lo; /* gtpok */
+ u32 tx_stat_gtpk_hi; /* gtpok */
+ u32 tx_stat_gtxpf_lo; /* gtpf */
+ u32 tx_stat_gtxpf_hi; /* gtpf */
+ u32 tx_stat_gtpp_lo; /* NEW BMAC2 */
+ u32 tx_stat_gtpp_hi; /* NEW BMAC2 */
+ u32 tx_stat_gtfcs_lo;
+ u32 tx_stat_gtfcs_hi;
+ u32 tx_stat_gtuca_lo; /* NEW BMAC2 */
+ u32 tx_stat_gtuca_hi; /* NEW BMAC2 */
+ u32 tx_stat_gtmca_lo;
+ u32 tx_stat_gtmca_hi;
+ u32 tx_stat_gtbca_lo;
+ u32 tx_stat_gtbca_hi;
+ u32 tx_stat_gtovr_lo;
+ u32 tx_stat_gtovr_hi;
+ u32 tx_stat_gtfrg_lo;
+ u32 tx_stat_gtfrg_hi;
+ u32 tx_stat_gtpkt1_lo; /* gtpkt */
+ u32 tx_stat_gtpkt1_hi; /* gtpkt */
+ u32 tx_stat_gt64_lo;
+ u32 tx_stat_gt64_hi;
+ u32 tx_stat_gt127_lo;
+ u32 tx_stat_gt127_hi;
+ u32 tx_stat_gt255_lo;
+ u32 tx_stat_gt255_hi;
+ u32 tx_stat_gt511_lo;
+ u32 tx_stat_gt511_hi;
+ u32 tx_stat_gt1023_lo;
+ u32 tx_stat_gt1023_hi;
+ u32 tx_stat_gt1518_lo;
+ u32 tx_stat_gt1518_hi;
+ u32 tx_stat_gt2047_lo;
+ u32 tx_stat_gt2047_hi;
+ u32 tx_stat_gt4095_lo;
+ u32 tx_stat_gt4095_hi;
+ u32 tx_stat_gt9216_lo;
+ u32 tx_stat_gt9216_hi;
+ u32 tx_stat_gt16383_lo;
+ u32 tx_stat_gt16383_hi;
+ u32 tx_stat_gtmax_lo;
+ u32 tx_stat_gtmax_hi;
+ u32 tx_stat_gtufl_lo;
+ u32 tx_stat_gtufl_hi;
+ u32 tx_stat_gterr_lo;
+ u32 tx_stat_gterr_hi;
+ u32 tx_stat_gtbyt_lo;
+ u32 tx_stat_gtbyt_hi;
+
+ u32 rx_stat_gr64_lo;
+ u32 rx_stat_gr64_hi;
+ u32 rx_stat_gr127_lo;
+ u32 rx_stat_gr127_hi;
+ u32 rx_stat_gr255_lo;
+ u32 rx_stat_gr255_hi;
+ u32 rx_stat_gr511_lo;
+ u32 rx_stat_gr511_hi;
+ u32 rx_stat_gr1023_lo;
+ u32 rx_stat_gr1023_hi;
+ u32 rx_stat_gr1518_lo;
+ u32 rx_stat_gr1518_hi;
+ u32 rx_stat_gr2047_lo;
+ u32 rx_stat_gr2047_hi;
+ u32 rx_stat_gr4095_lo;
+ u32 rx_stat_gr4095_hi;
+ u32 rx_stat_gr9216_lo;
+ u32 rx_stat_gr9216_hi;
+ u32 rx_stat_gr16383_lo;
+ u32 rx_stat_gr16383_hi;
+ u32 rx_stat_grmax_lo;
+ u32 rx_stat_grmax_hi;
+ u32 rx_stat_grpkt_lo;
+ u32 rx_stat_grpkt_hi;
+ u32 rx_stat_grfcs_lo;
+ u32 rx_stat_grfcs_hi;
+ u32 rx_stat_gruca_lo;
+ u32 rx_stat_gruca_hi;
+ u32 rx_stat_grmca_lo;
+ u32 rx_stat_grmca_hi;
+ u32 rx_stat_grbca_lo;
+ u32 rx_stat_grbca_hi;
+ u32 rx_stat_grxpf_lo; /* grpf */
+ u32 rx_stat_grxpf_hi; /* grpf */
+ u32 rx_stat_grpp_lo;
+ u32 rx_stat_grpp_hi;
+ u32 rx_stat_grxuo_lo; /* gruo */
+ u32 rx_stat_grxuo_hi; /* gruo */
+ u32 rx_stat_grjbr_lo;
+ u32 rx_stat_grjbr_hi;
+ u32 rx_stat_grovr_lo;
+ u32 rx_stat_grovr_hi;
+ u32 rx_stat_grxcf_lo; /* grcf */
+ u32 rx_stat_grxcf_hi; /* grcf */
+ u32 rx_stat_grflr_lo;
+ u32 rx_stat_grflr_hi;
+ u32 rx_stat_grpok_lo;
+ u32 rx_stat_grpok_hi;
+ u32 rx_stat_grmeg_lo;
+ u32 rx_stat_grmeg_hi;
+ u32 rx_stat_grmeb_lo;
+ u32 rx_stat_grmeb_hi;
+ u32 rx_stat_grbyt_lo;
+ u32 rx_stat_grbyt_hi;
+ u32 rx_stat_grund_lo;
+ u32 rx_stat_grund_hi;
+ u32 rx_stat_grfrg_lo;
+ u32 rx_stat_grfrg_hi;
+ u32 rx_stat_grerb_lo; /* grerrbyt */
+ u32 rx_stat_grerb_hi; /* grerrbyt */
+ u32 rx_stat_grfre_lo; /* grfrerr */
+ u32 rx_stat_grfre_hi; /* grfrerr */
+ u32 rx_stat_gripj_lo;
+ u32 rx_stat_gripj_hi;
+};
union mac_stats {
- struct emac_stats emac_stats;
- struct bmac_stats bmac_stats;
+ struct emac_stats emac_stats;
+ struct bmac1_stats bmac1_stats;
+ struct bmac2_stats bmac2_stats;
};
@@ -1259,17 +1526,17 @@ struct host_func_stats {
};
-#define BCM_5710_FW_MAJOR_VERSION 5
-#define BCM_5710_FW_MINOR_VERSION 2
-#define BCM_5710_FW_REVISION_VERSION 13
-#define BCM_5710_FW_ENGINEERING_VERSION 0
+#define BCM_5710_FW_MAJOR_VERSION 6
+#define BCM_5710_FW_MINOR_VERSION 0
+#define BCM_5710_FW_REVISION_VERSION 34
+#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
/*
* attention bits
*/
-struct atten_def_status_block {
+struct atten_sp_status_block {
__le32 attn_bits;
__le32 attn_bits_ack;
u8 status_block_id;
@@ -1327,7 +1594,60 @@ struct doorbell_set_prod {
/*
- * IGU driver acknowledgement register
+ * 3 lines. status block
+ */
+struct hc_status_block_e1x {
+ __le16 index_values[HC_SB_MAX_INDICES_E1X];
+ __le16 running_index[HC_SB_MAX_SM];
+ u32 rsrv;
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e1x {
+ struct hc_status_block_e1x sb;
+};
+
+
+/*
+ * 3 lines. status block
+ */
+struct hc_status_block_e2 {
+ __le16 index_values[HC_SB_MAX_INDICES_E2];
+ __le16 running_index[HC_SB_MAX_SM];
+ u32 reserved;
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e2 {
+ struct hc_status_block_e2 sb;
+};
+
+
+/*
+ * 5 lines. slow-path status block
+ */
+struct hc_sp_status_block {
+ __le16 index_values[HC_SP_SB_MAX_INDICES];
+ __le16 running_index;
+ __le16 rsrv;
+ u32 rsrv1;
+};
+
+/*
+ * host status block
+ */
+struct host_sp_status_block {
+ struct atten_sp_status_block atten_status_block;
+ struct hc_sp_status_block sp_sb;
+};
+
+
+/*
+ * IGU driver acknowledgment register
*/
struct igu_ack_register {
#if defined(__BIG_ENDIAN)
@@ -1417,6 +1737,24 @@ union igu_consprod_reg {
/*
+ * Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+ u32 ctrl_data;
+#define IGU_CTRL_REG_ADDRESS (0xFFF<<0)
+#define IGU_CTRL_REG_ADDRESS_SHIFT 0
+#define IGU_CTRL_REG_FID (0x7F<<12)
+#define IGU_CTRL_REG_FID_SHIFT 12
+#define IGU_CTRL_REG_RESERVED (0x1<<19)
+#define IGU_CTRL_REG_RESERVED_SHIFT 19
+#define IGU_CTRL_REG_TYPE (0x1<<20)
+#define IGU_CTRL_REG_TYPE_SHIFT 20
+#define IGU_CTRL_REG_UNUSED (0x7FF<<21)
+#define IGU_CTRL_REG_UNUSED_SHIFT 21
+};
+
+
+/*
* Parser parsing flags field
*/
struct parsing_flags {
@@ -1485,8 +1823,14 @@ struct dmae_command {
#define DMAE_COMMAND_DST_RESET_SHIFT 14
#define DMAE_COMMAND_E1HVN (0x3<<15)
#define DMAE_COMMAND_E1HVN_SHIFT 15
-#define DMAE_COMMAND_RESERVED0 (0x7FFF<<17)
-#define DMAE_COMMAND_RESERVED0_SHIFT 17
+#define DMAE_COMMAND_DST_VN (0x3<<17)
+#define DMAE_COMMAND_DST_VN_SHIFT 17
+#define DMAE_COMMAND_C_FUNC (0x1<<19)
+#define DMAE_COMMAND_C_FUNC_SHIFT 19
+#define DMAE_COMMAND_ERR_POLICY (0x3<<20)
+#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
+#define DMAE_COMMAND_RESERVED0 (0x3FF<<22)
+#define DMAE_COMMAND_RESERVED0_SHIFT 22
u32 src_addr_lo;
u32 src_addr_hi;
u32 dst_addr_lo;
@@ -1511,11 +1855,11 @@ struct dmae_command {
u16 crc16_c;
#endif
#if defined(__BIG_ENDIAN)
- u16 reserved2;
+ u16 reserved3;
u16 crc_t10;
#elif defined(__LITTLE_ENDIAN)
u16 crc_t10;
- u16 reserved2;
+ u16 reserved3;
#endif
#if defined(__BIG_ENDIAN)
u16 xsum8;
@@ -1536,96 +1880,20 @@ struct double_regpair {
/*
- * The eth storm context of Ustorm (configuration part)
+ * SDM operation gen command (generate aggregative interrupt)
*/
-struct ustorm_eth_st_context_config {
-#if defined(__BIG_ENDIAN)
- u8 flags;
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0)
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1)
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2)
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3)
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3
-#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4)
-#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
- u8 status_block_id;
- u8 clientId;
- u8 sb_index_numbers;
-#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
-#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
-#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
-#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
-#elif defined(__LITTLE_ENDIAN)
- u8 sb_index_numbers;
-#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
-#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
-#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
-#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
- u8 clientId;
- u8 status_block_id;
- u8 flags;
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0)
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1)
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2)
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3)
-#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3
-#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4)
-#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
-#endif
-#if defined(__BIG_ENDIAN)
- u16 bd_buff_size;
- u8 statistics_counter_id;
- u8 mc_alignment_log_size;
-#elif defined(__LITTLE_ENDIAN)
- u8 mc_alignment_log_size;
- u8 statistics_counter_id;
- u16 bd_buff_size;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 __local_sge_prod;
- u8 __local_bd_prod;
- u16 sge_buff_size;
-#elif defined(__LITTLE_ENDIAN)
- u16 sge_buff_size;
- u8 __local_bd_prod;
- u8 __local_sge_prod;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __sdm_bd_expected_counter;
- u8 cstorm_agg_int;
- u8 __expected_bds_on_ram;
-#elif defined(__LITTLE_ENDIAN)
- u8 __expected_bds_on_ram;
- u8 cstorm_agg_int;
- u16 __sdm_bd_expected_counter;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __ring_data_ram_addr;
- u16 __hc_cstorm_ram_addr;
-#elif defined(__LITTLE_ENDIAN)
- u16 __hc_cstorm_ram_addr;
- u16 __ring_data_ram_addr;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 reserved1;
- u8 max_sges_for_packet;
- u16 __bd_ring_ram_addr;
-#elif defined(__LITTLE_ENDIAN)
- u16 __bd_ring_ram_addr;
- u8 max_sges_for_packet;
- u8 reserved1;
-#endif
- u32 bd_page_base_lo;
- u32 bd_page_base_hi;
- u32 sge_page_base_lo;
- u32 sge_page_base_hi;
- struct regpair reserved2;
+struct sdm_op_gen {
+ __le32 command;
+#define SDM_OP_GEN_COMP_PARAM (0x1F<<0)
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE (0x7<<5)
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 5
+#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8)
+#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16)
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16
+#define SDM_OP_GEN_RESERVED (0x7FFF<<17)
+#define SDM_OP_GEN_RESERVED_SHIFT 17
};
/*
@@ -1644,20 +1912,13 @@ struct eth_rx_sge {
__le32 addr_hi;
};
-/*
- * Local BDs and SGEs rings (in ETH)
- */
-struct eth_local_rx_rings {
- struct eth_rx_bd __local_bd_ring[8];
- struct eth_rx_sge __local_sge_ring[10];
-};
+
/*
* The eth storm context of Ustorm
*/
struct ustorm_eth_st_context {
- struct ustorm_eth_st_context_config common;
- struct eth_local_rx_rings __rings;
+ u32 reserved0[48];
};
/*
@@ -1668,337 +1929,53 @@ struct tstorm_eth_st_context {
};
/*
- * The eth aggregative context section of Xstorm
- */
-struct xstorm_eth_extra_ag_context_section {
-#if defined(__BIG_ENDIAN)
- u8 __tcp_agg_vars1;
- u8 __reserved50;
- u16 __mss;
-#elif defined(__LITTLE_ENDIAN)
- u16 __mss;
- u8 __reserved50;
- u8 __tcp_agg_vars1;
-#endif
- u32 __snd_nxt;
- u32 __tx_wnd;
- u32 __snd_una;
- u32 __reserved53;
-#if defined(__BIG_ENDIAN)
- u8 __agg_val8_th;
- u8 __agg_val8;
- u16 __tcp_agg_vars2;
-#elif defined(__LITTLE_ENDIAN)
- u16 __tcp_agg_vars2;
- u8 __agg_val8;
- u8 __agg_val8_th;
-#endif
- u32 __reserved58;
- u32 __reserved59;
- u32 __reserved60;
- u32 __reserved61;
-#if defined(__BIG_ENDIAN)
- u16 __agg_val7_th;
- u16 __agg_val7;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_val7;
- u16 __agg_val7_th;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 __tcp_agg_vars5;
- u8 __tcp_agg_vars4;
- u8 __tcp_agg_vars3;
- u8 __reserved62;
-#elif defined(__LITTLE_ENDIAN)
- u8 __reserved62;
- u8 __tcp_agg_vars3;
- u8 __tcp_agg_vars4;
- u8 __tcp_agg_vars5;
-#endif
- u32 __tcp_agg_vars6;
-#if defined(__BIG_ENDIAN)
- u16 __agg_misc6;
- u16 __tcp_agg_vars7;
-#elif defined(__LITTLE_ENDIAN)
- u16 __tcp_agg_vars7;
- u16 __agg_misc6;
-#endif
- u32 __agg_val10;
- u32 __agg_val10_th;
-#if defined(__BIG_ENDIAN)
- u16 __reserved3;
- u8 __reserved2;
- u8 __da_only_cnt;
-#elif defined(__LITTLE_ENDIAN)
- u8 __da_only_cnt;
- u8 __reserved2;
- u16 __reserved3;
-#endif
-};
-
-/*
* The eth aggregative context of Xstorm
*/
struct xstorm_eth_ag_context {
-#if defined(__BIG_ENDIAN)
- u16 agg_val1;
- u8 __agg_vars1;
- u8 __state;
-#elif defined(__LITTLE_ENDIAN)
- u8 __state;
- u8 __agg_vars1;
- u16 agg_val1;
-#endif
+ u32 reserved0;
#if defined(__BIG_ENDIAN)
u8 cdu_reserved;
- u8 __agg_vars4;
- u8 __agg_vars3;
- u8 __agg_vars2;
+ u8 reserved2;
+ u16 reserved1;
#elif defined(__LITTLE_ENDIAN)
- u8 __agg_vars2;
- u8 __agg_vars3;
- u8 __agg_vars4;
+ u16 reserved1;
+ u8 reserved2;
u8 cdu_reserved;
#endif
- u32 __bd_prod;
-#if defined(__BIG_ENDIAN)
- u16 __agg_vars5;
- u16 __agg_val4_th;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_val4_th;
- u16 __agg_vars5;
-#endif
- struct xstorm_eth_extra_ag_context_section __extra_section;
-#if defined(__BIG_ENDIAN)
- u16 __agg_vars7;
- u8 __agg_val3_th;
- u8 __agg_vars6;
-#elif defined(__LITTLE_ENDIAN)
- u8 __agg_vars6;
- u8 __agg_val3_th;
- u16 __agg_vars7;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __agg_val11_th;
- u16 __agg_val11;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_val11;
- u16 __agg_val11_th;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 __reserved1;
- u8 __agg_val6_th;
- u16 __agg_val9;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_val9;
- u8 __agg_val6_th;
- u8 __reserved1;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __agg_val2_th;
- u16 __agg_val2;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_val2;
- u16 __agg_val2_th;
-#endif
- u32 __agg_vars8;
-#if defined(__BIG_ENDIAN)
- u16 __agg_misc0;
- u16 __agg_val4;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_val4;
- u16 __agg_misc0;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 __agg_val3;
- u8 __agg_val6;
- u8 __agg_val5_th;
- u8 __agg_val5;
-#elif defined(__LITTLE_ENDIAN)
- u8 __agg_val5;
- u8 __agg_val5_th;
- u8 __agg_val6;
- u8 __agg_val3;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __agg_misc1;
- u16 __bd_ind_max_val;
-#elif defined(__LITTLE_ENDIAN)
- u16 __bd_ind_max_val;
- u16 __agg_misc1;
-#endif
- u32 __reserved57;
- u32 __agg_misc4;
- u32 __agg_misc5;
-};
-
-/*
- * The eth extra aggregative context section of Tstorm
- */
-struct tstorm_eth_extra_ag_context_section {
- u32 __agg_val1;
-#if defined(__BIG_ENDIAN)
- u8 __tcp_agg_vars2;
- u8 __agg_val3;
- u16 __agg_val2;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_val2;
- u8 __agg_val3;
- u8 __tcp_agg_vars2;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __agg_val5;
- u8 __agg_val6;
- u8 __tcp_agg_vars3;
-#elif defined(__LITTLE_ENDIAN)
- u8 __tcp_agg_vars3;
- u8 __agg_val6;
- u16 __agg_val5;
-#endif
- u32 __reserved63;
- u32 __reserved64;
- u32 __reserved65;
- u32 __reserved66;
- u32 __reserved67;
- u32 __tcp_agg_vars1;
- u32 __reserved61;
- u32 __reserved62;
- u32 __reserved2;
+ u32 reserved3[30];
};
/*
* The eth aggregative context of Tstorm
*/
struct tstorm_eth_ag_context {
-#if defined(__BIG_ENDIAN)
- u16 __reserved54;
- u8 __agg_vars1;
- u8 __state;
-#elif defined(__LITTLE_ENDIAN)
- u8 __state;
- u8 __agg_vars1;
- u16 __reserved54;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __agg_val4;
- u16 __agg_vars2;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_vars2;
- u16 __agg_val4;
-#endif
- struct tstorm_eth_extra_ag_context_section __extra_section;
+ u32 __reserved0[14];
};
+
/*
* The eth aggregative context of Cstorm
*/
struct cstorm_eth_ag_context {
- u32 __agg_vars1;
-#if defined(__BIG_ENDIAN)
- u8 __aux1_th;
- u8 __aux1_val;
- u16 __agg_vars2;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_vars2;
- u8 __aux1_val;
- u8 __aux1_th;
-#endif
- u32 __num_of_treated_packet;
- u32 __last_packet_treated;
-#if defined(__BIG_ENDIAN)
- u16 __reserved58;
- u16 __reserved57;
-#elif defined(__LITTLE_ENDIAN)
- u16 __reserved57;
- u16 __reserved58;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 __reserved62;
- u8 __reserved61;
- u8 __reserved60;
- u8 __reserved59;
-#elif defined(__LITTLE_ENDIAN)
- u8 __reserved59;
- u8 __reserved60;
- u8 __reserved61;
- u8 __reserved62;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __reserved64;
- u16 __reserved63;
-#elif defined(__LITTLE_ENDIAN)
- u16 __reserved63;
- u16 __reserved64;
-#endif
- u32 __reserved65;
-#if defined(__BIG_ENDIAN)
- u16 __agg_vars3;
- u16 __rq_inv_cnt;
-#elif defined(__LITTLE_ENDIAN)
- u16 __rq_inv_cnt;
- u16 __agg_vars3;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __packet_index_th;
- u16 __packet_index;
-#elif defined(__LITTLE_ENDIAN)
- u16 __packet_index;
- u16 __packet_index_th;
-#endif
+ u32 __reserved0[10];
};
+
/*
* The eth aggregative context of Ustorm
*/
struct ustorm_eth_ag_context {
-#if defined(__BIG_ENDIAN)
- u8 __aux_counter_flags;
- u8 __agg_vars2;
- u8 __agg_vars1;
- u8 __state;
-#elif defined(__LITTLE_ENDIAN)
- u8 __state;
- u8 __agg_vars1;
- u8 __agg_vars2;
- u8 __aux_counter_flags;
-#endif
+ u32 __reserved0;
#if defined(__BIG_ENDIAN)
u8 cdu_usage;
- u8 __agg_misc2;
- u16 __agg_misc1;
+ u8 __reserved2;
+ u16 __reserved1;
#elif defined(__LITTLE_ENDIAN)
- u16 __agg_misc1;
- u8 __agg_misc2;
+ u16 __reserved1;
+ u8 __reserved2;
u8 cdu_usage;
#endif
- u32 __agg_misc4;
-#if defined(__BIG_ENDIAN)
- u8 __agg_val3_th;
- u8 __agg_val3;
- u16 __agg_misc3;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_misc3;
- u8 __agg_val3;
- u8 __agg_val3_th;
-#endif
- u32 __agg_val1;
- u32 __agg_misc4_th;
-#if defined(__BIG_ENDIAN)
- u16 __agg_val2_th;
- u16 __agg_val2;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_val2;
- u16 __agg_val2_th;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __reserved2;
- u8 __decision_rules;
- u8 __decision_rule_enable_bits;
-#elif defined(__LITTLE_ENDIAN)
- u8 __decision_rule_enable_bits;
- u8 __decision_rules;
- u16 __reserved2;
-#endif
+ u32 __reserved3[6];
};
/*
@@ -2022,18 +1999,16 @@ struct timers_block_context {
*/
struct eth_tx_bd_flags {
u8 as_bitfield;
-#define ETH_TX_BD_FLAGS_VLAN_TAG (0x1<<0)
-#define ETH_TX_BD_FLAGS_VLAN_TAG_SHIFT 0
-#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<1)
-#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 1
-#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<2)
-#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 2
-#define ETH_TX_BD_FLAGS_END_BD (0x1<<3)
-#define ETH_TX_BD_FLAGS_END_BD_SHIFT 3
+#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0)
+#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0
+#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1)
+#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1
+#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2)
+#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2
#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
-#define ETH_TX_BD_FLAGS_HDR_POOL (0x1<<5)
-#define ETH_TX_BD_FLAGS_HDR_POOL_SHIFT 5
+#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5)
+#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5
#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6)
#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7)
@@ -2048,7 +2023,7 @@ struct eth_tx_start_bd {
__le32 addr_hi;
__le16 nbd;
__le16 nbytes;
- __le16 vlan;
+ __le16 vlan_or_ethertype;
struct eth_tx_bd_flags bd_flags;
u8 general_data;
#define ETH_TX_START_BD_HDR_NBDS (0x3F<<0)
@@ -2061,48 +2036,48 @@ struct eth_tx_start_bd {
* Tx regular BD structure
*/
struct eth_tx_bd {
- u32 addr_lo;
- u32 addr_hi;
- u16 total_pkt_bytes;
- u16 nbytes;
+ __le32 addr_lo;
+ __le32 addr_hi;
+ __le16 total_pkt_bytes;
+ __le16 nbytes;
u8 reserved[4];
};
/*
- * Tx parsing BD structure for ETH,Relevant in START
+ * Tx parsing BD structure for ETH E1/E1h
*/
-struct eth_tx_parse_bd {
+struct eth_tx_parse_bd_e1x {
u8 global_data;
-#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET (0xF<<0)
-#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET_SHIFT 0
-#define ETH_TX_PARSE_BD_UDP_CS_FLG (0x1<<4)
-#define ETH_TX_PARSE_BD_UDP_CS_FLG_SHIFT 4
-#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
-#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
-#define ETH_TX_PARSE_BD_LLC_SNAP_EN (0x1<<6)
-#define ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT 6
-#define ETH_TX_PARSE_BD_NS_FLG (0x1<<7)
-#define ETH_TX_PARSE_BD_NS_FLG_SHIFT 7
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4)
+#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7
u8 tcp_flags;
-#define ETH_TX_PARSE_BD_FIN_FLG (0x1<<0)
-#define ETH_TX_PARSE_BD_FIN_FLG_SHIFT 0
-#define ETH_TX_PARSE_BD_SYN_FLG (0x1<<1)
-#define ETH_TX_PARSE_BD_SYN_FLG_SHIFT 1
-#define ETH_TX_PARSE_BD_RST_FLG (0x1<<2)
-#define ETH_TX_PARSE_BD_RST_FLG_SHIFT 2
-#define ETH_TX_PARSE_BD_PSH_FLG (0x1<<3)
-#define ETH_TX_PARSE_BD_PSH_FLG_SHIFT 3
-#define ETH_TX_PARSE_BD_ACK_FLG (0x1<<4)
-#define ETH_TX_PARSE_BD_ACK_FLG_SHIFT 4
-#define ETH_TX_PARSE_BD_URG_FLG (0x1<<5)
-#define ETH_TX_PARSE_BD_URG_FLG_SHIFT 5
-#define ETH_TX_PARSE_BD_ECE_FLG (0x1<<6)
-#define ETH_TX_PARSE_BD_ECE_FLG_SHIFT 6
-#define ETH_TX_PARSE_BD_CWR_FLG (0x1<<7)
-#define ETH_TX_PARSE_BD_CWR_FLG_SHIFT 7
- u8 ip_hlen;
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
+ u8 ip_hlen_w;
s8 reserved;
- __le16 total_hlen;
+ __le16 total_hlen_w;
__le16 tcp_pseudo_csum;
__le16 lso_mss;
__le16 ip_id;
@@ -2110,6 +2085,27 @@ struct eth_tx_parse_bd {
};
/*
+ * Tx parsing BD structure for ETH E2
+ */
+struct eth_tx_parse_bd_e2 {
+ __le16 dst_mac_addr_lo;
+ __le16 dst_mac_addr_mid;
+ __le16 dst_mac_addr_hi;
+ __le16 src_mac_addr_lo;
+ __le16 src_mac_addr_mid;
+ __le16 src_mac_addr_hi;
+ __le32 parsing_data;
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
+#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
+#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
+};
+
+/*
* The last BD in the BD memory will hold a pointer to the next BD memory
*/
struct eth_tx_next_bd {
@@ -2124,79 +2120,24 @@ struct eth_tx_next_bd {
union eth_tx_bd_types {
struct eth_tx_start_bd start_bd;
struct eth_tx_bd reg_bd;
- struct eth_tx_parse_bd parse_bd;
+ struct eth_tx_parse_bd_e1x parse_bd_e1x;
+ struct eth_tx_parse_bd_e2 parse_bd_e2;
struct eth_tx_next_bd next_bd;
};
+
/*
* The eth storm context of Xstorm
*/
struct xstorm_eth_st_context {
- u32 tx_bd_page_base_lo;
- u32 tx_bd_page_base_hi;
-#if defined(__BIG_ENDIAN)
- u16 tx_bd_cons;
- u8 statistics_data;
-#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
-#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
-#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
-#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
- u8 __local_tx_bd_prod;
-#elif defined(__LITTLE_ENDIAN)
- u8 __local_tx_bd_prod;
- u8 statistics_data;
-#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
-#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
-#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
-#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
- u16 tx_bd_cons;
-#endif
- u32 __reserved1;
- u32 __reserved2;
-#if defined(__BIG_ENDIAN)
- u8 __ram_cache_index;
- u8 __double_buffer_client;
- u16 __pkt_cons;
-#elif defined(__LITTLE_ENDIAN)
- u16 __pkt_cons;
- u8 __double_buffer_client;
- u8 __ram_cache_index;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __statistics_address;
- u16 __gso_next;
-#elif defined(__LITTLE_ENDIAN)
- u16 __gso_next;
- u16 __statistics_address;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 __local_tx_bd_cons;
- u8 safc_group_num;
- u8 safc_group_en;
- u8 __is_eth_conn;
-#elif defined(__LITTLE_ENDIAN)
- u8 __is_eth_conn;
- u8 safc_group_en;
- u8 safc_group_num;
- u8 __local_tx_bd_cons;
-#endif
- union eth_tx_bd_types __bds[13];
+ u32 reserved0[60];
};
/*
* The eth storm context of Cstorm
*/
struct cstorm_eth_st_context {
-#if defined(__BIG_ENDIAN)
- u16 __reserved0;
- u8 sb_index_number;
- u8 status_block_id;
-#elif defined(__LITTLE_ENDIAN)
- u8 status_block_id;
- u8 sb_index_number;
- u16 __reserved0;
-#endif
- u32 __reserved1[3];
+ u32 __reserved0[4];
};
/*
@@ -2244,103 +2185,114 @@ struct eth_tx_doorbell {
/*
- * cstorm default status block, generated by ustorm
- */
-struct cstorm_def_status_block_u {
- __le16 index_values[HC_USTORM_DEF_SB_NUM_INDICES];
- __le16 status_block_index;
- u8 func;
- u8 status_block_id;
- __le32 __flags;
-};
-
-/*
- * cstorm default status block, generated by cstorm
- */
-struct cstorm_def_status_block_c {
- __le16 index_values[HC_CSTORM_DEF_SB_NUM_INDICES];
- __le16 status_block_index;
- u8 func;
- u8 status_block_id;
- __le32 __flags;
-};
-
-/*
- * xstorm status block
+ * client init fc data
*/
-struct xstorm_def_status_block {
- __le16 index_values[HC_XSTORM_DEF_SB_NUM_INDICES];
- __le16 status_block_index;
- u8 func;
- u8 status_block_id;
- __le32 __flags;
+struct client_init_fc_data {
+ __le16 cqe_pause_thr_low;
+ __le16 cqe_pause_thr_high;
+ __le16 bd_pause_thr_low;
+ __le16 bd_pause_thr_high;
+ __le16 sge_pause_thr_low;
+ __le16 sge_pause_thr_high;
+ __le16 rx_cos_mask;
+ u8 safc_group_num;
+ u8 safc_group_en_flg;
+ u8 traffic_type;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 reserved2;
};
-/*
- * tstorm status block
- */
-struct tstorm_def_status_block {
- __le16 index_values[HC_TSTORM_DEF_SB_NUM_INDICES];
- __le16 status_block_index;
- u8 func;
- u8 status_block_id;
- __le32 __flags;
-};
/*
- * host status block
+ * client init ramrod data
*/
-struct host_def_status_block {
- struct atten_def_status_block atten_status_block;
- struct cstorm_def_status_block_u u_def_status_block;
- struct cstorm_def_status_block_c c_def_status_block;
- struct xstorm_def_status_block x_def_status_block;
- struct tstorm_def_status_block t_def_status_block;
+struct client_init_general_data {
+ u8 client_id;
+ u8 statistics_counter_id;
+ u8 statistics_en_flg;
+ u8 is_fcoe_flg;
+ u8 activate_flg;
+ u8 sp_client_id;
+ __le16 reserved0;
+ __le32 reserved1[2];
};
/*
- * cstorm status block, generated by ustorm
+ * client init rx data
*/
-struct cstorm_status_block_u {
- __le16 index_values[HC_USTORM_SB_NUM_INDICES];
- __le16 status_block_index;
- u8 func;
+struct client_init_rx_data {
+ u8 tpa_en_flg;
+ u8 vmqueue_mode_en_flg;
+ u8 extra_data_over_sgl_en_flg;
+ u8 cache_line_alignment_log_size;
+ u8 enable_dynamic_hc;
+ u8 max_sges_for_packet;
+ u8 client_qzone_id;
+ u8 drop_ip_cs_err_flg;
+ u8 drop_tcp_cs_err_flg;
+ u8 drop_ttl0_flg;
+ u8 drop_udp_cs_err_flg;
+ u8 inner_vlan_removal_enable_flg;
+ u8 outer_vlan_removal_enable_flg;
u8 status_block_id;
- __le32 __flags;
+ u8 rx_sb_index_number;
+ u8 reserved0[3];
+ __le16 bd_buff_size;
+ __le16 sge_buff_size;
+ __le16 mtu;
+ struct regpair bd_page_base;
+ struct regpair sge_page_base;
+ struct regpair cqe_page_base;
+ u8 is_leading_rss;
+ u8 is_approx_mcast;
+ __le16 max_agg_size;
+ __le32 reserved2[3];
+};
+
+/*
+ * client init tx data
+ */
+struct client_init_tx_data {
+ u8 enforce_security_flg;
+ u8 tx_status_block_id;
+ u8 tx_sb_index_number;
+ u8 reserved0;
+ __le16 mtu;
+ __le16 reserved1;
+ struct regpair tx_bd_page_base;
+ __le32 reserved2[2];
};
/*
- * cstorm status block, generated by cstorm
+ * client init ramrod data
*/
-struct cstorm_status_block_c {
- __le16 index_values[HC_CSTORM_SB_NUM_INDICES];
- __le16 status_block_index;
- u8 func;
- u8 status_block_id;
- __le32 __flags;
+struct client_init_ramrod_data {
+ struct client_init_general_data general;
+ struct client_init_rx_data rx;
+ struct client_init_tx_data tx;
+ struct client_init_fc_data fc;
};
+
/*
- * host status block
+ * The data contain client ID need to the ramrod
*/
-struct host_status_block {
- struct cstorm_status_block_u u_status_block;
- struct cstorm_status_block_c c_status_block;
+struct eth_common_ramrod_data {
+ u32 client_id;
+ u32 reserved1;
};
/*
- * The data for RSS setup ramrod
+ * union for sgl and raw data.
*/
-struct eth_client_setup_ramrod_data {
- u32 client_id;
- u8 is_rdma;
- u8 is_fcoe;
- u16 reserved1;
+union eth_sgl_or_raw_data {
+ __le16 sgl[8];
+ u32 raw_data[4];
};
-
/*
* regular eth FP CQE parameters struct
*/
@@ -2358,8 +2310,8 @@ struct eth_fast_path_rx_cqe {
#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4
#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5)
#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5
-#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6)
-#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x3<<6)
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 6
u8 status_flags;
#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
@@ -2380,7 +2332,7 @@ struct eth_fast_path_rx_cqe {
__le16 pkt_len;
__le16 len_on_bd;
struct parsing_flags pars_flags;
- __le16 sgl[8];
+ union eth_sgl_or_raw_data sgl_or_raw_data;
};
@@ -2392,11 +2344,10 @@ struct eth_halt_ramrod_data {
u32 reserved0;
};
-
/*
* The data for statistics query ramrod
*/
-struct eth_query_ramrod_data {
+struct common_query_ramrod_data {
#if defined(__BIG_ENDIAN)
u8 reserved0;
u8 collect_port;
@@ -2479,9 +2430,9 @@ struct spe_hdr {
__le16 type;
#define SPE_HDR_CONN_TYPE (0xFF<<0)
#define SPE_HDR_CONN_TYPE_SHIFT 0
-#define SPE_HDR_COMMON_RAMROD (0xFF<<8)
-#define SPE_HDR_COMMON_RAMROD_SHIFT 8
- __le16 reserved;
+#define SPE_HDR_FUNCTION_ID (0xFF<<8)
+#define SPE_HDR_FUNCTION_ID_SHIFT 8
+ __le16 reserved1;
};
/*
@@ -2489,12 +2440,10 @@ struct spe_hdr {
*/
union eth_specific_data {
u8 protocol_data[8];
- struct regpair mac_config_addr;
- struct eth_client_setup_ramrod_data client_setup_ramrod_data;
+ struct regpair client_init_ramrod_init_data;
struct eth_halt_ramrod_data halt_ramrod_data;
- struct regpair leading_cqe_addr;
struct regpair update_data_addr;
- struct eth_query_ramrod_data query_ramrod_data;
+ struct eth_common_ramrod_data common_ramrod_data;
};
/*
@@ -2519,7 +2468,7 @@ struct eth_tx_bds_array {
*/
struct tstorm_eth_function_common_config {
#if defined(__BIG_ENDIAN)
- u8 leading_client_id;
+ u8 reserved1;
u8 rss_result_mask;
u16 config_flags;
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
@@ -2532,16 +2481,12 @@ struct tstorm_eth_function_common_config {
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9)
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9
#elif defined(__LITTLE_ENDIAN)
u16 config_flags;
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
@@ -2554,18 +2499,14 @@ struct tstorm_eth_function_common_config {
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9)
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9
u8 rss_result_mask;
- u8 leading_client_id;
+ u8 reserved1;
#endif
u16 vlan_id[2];
};
@@ -2613,90 +2554,42 @@ struct mac_configuration_hdr {
u8 length;
u8 offset;
u16 client_id;
- u32 reserved1;
-};
-
-/*
- * MAC address in list for ramrod
- */
-struct tstorm_cam_entry {
- __le16 lsb_mac_addr;
- __le16 middle_mac_addr;
- __le16 msb_mac_addr;
- __le16 flags;
-#define TSTORM_CAM_ENTRY_PORT_ID (0x1<<0)
-#define TSTORM_CAM_ENTRY_PORT_ID_SHIFT 0
-#define TSTORM_CAM_ENTRY_RSRVVAL0 (0x7<<1)
-#define TSTORM_CAM_ENTRY_RSRVVAL0_SHIFT 1
-#define TSTORM_CAM_ENTRY_RESERVED0 (0xFFF<<4)
-#define TSTORM_CAM_ENTRY_RESERVED0_SHIFT 4
-};
-
-/*
- * MAC filtering: CAM target table entry
- */
-struct tstorm_cam_target_table_entry {
- u8 flags;
-#define TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST (0x1<<0)
-#define TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST_SHIFT 0
-#define TSTORM_CAM_TARGET_TABLE_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<1)
-#define TSTORM_CAM_TARGET_TABLE_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 1
-#define TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE (0x1<<2)
-#define TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE_SHIFT 2
-#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC (0x1<<3)
-#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC_SHIFT 3
-#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0 (0xF<<4)
-#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0_SHIFT 4
- u8 reserved1;
- u16 vlan_id;
- u32 clients_bit_vector;
+ u16 echo;
+ u16 reserved1;
};
/*
* MAC address in list for ramrod
*/
struct mac_configuration_entry {
- struct tstorm_cam_entry cam_entry;
- struct tstorm_cam_target_table_entry target_table_entry;
-};
-
-/*
- * MAC filtering configuration command
- */
-struct mac_configuration_cmd {
- struct mac_configuration_hdr hdr;
- struct mac_configuration_entry config_table[64];
-};
-
-
-/*
- * MAC address in list for ramrod
- */
-struct mac_configuration_entry_e1h {
__le16 lsb_mac_addr;
__le16 middle_mac_addr;
__le16 msb_mac_addr;
__le16 vlan_id;
- __le16 e1hov_id;
- u8 reserved0;
+ u8 pf_id;
u8 flags;
-#define MAC_CONFIGURATION_ENTRY_E1H_PORT (0x1<<0)
-#define MAC_CONFIGURATION_ENTRY_E1H_PORT_SHIFT 0
-#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE (0x1<<1)
-#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE_SHIFT 1
-#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC (0x1<<2)
-#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC_SHIFT 2
-#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1 (0x1F<<3)
-#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1_SHIFT 3
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0)
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1)
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2)
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4)
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4
+#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5)
+#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
+#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6)
+#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6
+ u16 reserved0;
u32 clients_bit_vector;
};
/*
* MAC filtering configuration command
*/
-struct mac_configuration_cmd_e1h {
+struct mac_configuration_cmd {
struct mac_configuration_hdr hdr;
- struct mac_configuration_entry_e1h config_table[32];
+ struct mac_configuration_entry config_table[64];
};
@@ -2709,65 +2602,6 @@ struct tstorm_eth_approximate_match_multicast_filtering {
/*
- * Configuration parameters per client in Tstorm
- */
-struct tstorm_eth_client_config {
-#if defined(__BIG_ENDIAN)
- u8 reserved0;
- u8 statistics_counter_id;
- u16 mtu;
-#elif defined(__LITTLE_ENDIAN)
- u16 mtu;
- u8 statistics_counter_id;
- u8 reserved0;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 drop_flags;
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
- u16 config_flags;
-#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
-#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
-#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
-#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
-#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
-#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
-#elif defined(__LITTLE_ENDIAN)
- u16 config_flags;
-#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
-#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
-#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
-#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
-#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
-#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
- u16 drop_flags;
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
-#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
-#endif
-};
-
-
-/*
* MAC filtering configuration parameters per port in Tstorm
*/
struct tstorm_eth_mac_filter_config {
@@ -2777,8 +2611,8 @@ struct tstorm_eth_mac_filter_config {
u32 mcast_accept_all;
u32 bcast_drop_all;
u32 bcast_accept_all;
- u32 strict_vlan;
u32 vlan_filter[2];
+ u32 unmatched_unicast;
u32 reserved;
};
@@ -2801,41 +2635,6 @@ struct tstorm_eth_tpa_exist {
/*
- * rx rings pause data for E1h only
- */
-struct ustorm_eth_rx_pause_data_e1h {
-#if defined(__BIG_ENDIAN)
- u16 bd_thr_low;
- u16 cqe_thr_low;
-#elif defined(__LITTLE_ENDIAN)
- u16 cqe_thr_low;
- u16 bd_thr_low;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 cos;
- u16 sge_thr_low;
-#elif defined(__LITTLE_ENDIAN)
- u16 sge_thr_low;
- u16 cos;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 bd_thr_high;
- u16 cqe_thr_high;
-#elif defined(__LITTLE_ENDIAN)
- u16 cqe_thr_high;
- u16 bd_thr_high;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 reserved0;
- u16 sge_thr_high;
-#elif defined(__LITTLE_ENDIAN)
- u16 sge_thr_high;
- u16 reserved0;
-#endif
-};
-
-
-/*
* Three RX producers for ETH
*/
struct ustorm_eth_rx_producers {
@@ -2857,6 +2656,18 @@ struct ustorm_eth_rx_producers {
/*
+ * cfc delete event data
+ */
+struct cfc_del_event_data {
+ u32 cid;
+ u8 error;
+ u8 reserved0;
+ u16 reserved1;
+ u32 reserved2;
+};
+
+
+/*
* per-port SAFC demo variables
*/
struct cmng_flags_per_port {
@@ -2872,8 +2683,10 @@ struct cmng_flags_per_port {
#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3
#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4)
#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4
-#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x7FFFFFF<<5)
-#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 5
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<5)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 5
+#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x3FFFFFF<<6)
+#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 6
};
@@ -2907,30 +2720,92 @@ struct safc_struct_per_port {
u8 __reserved0;
u16 __reserved1;
#endif
+ u8 cos_to_traffic_types[MAX_COS_NUMBER];
+ u32 __reserved2;
u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
};
/*
+ * per-port PFC variables
+ */
+struct pfc_struct_per_port {
+ u8 priority_to_traffic_types[MAX_PFC_PRIORITIES];
+#if defined(__BIG_ENDIAN)
+ u16 pfc_pause_quanta_in_nanosec;
+ u8 __reserved0;
+ u8 priority_non_pausable_mask;
+#elif defined(__LITTLE_ENDIAN)
+ u8 priority_non_pausable_mask;
+ u8 __reserved0;
+ u16 pfc_pause_quanta_in_nanosec;
+#endif
+};
+
+/*
+ * Priority and cos
+ */
+struct priority_cos {
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 cos;
+ u8 priority;
+#elif defined(__LITTLE_ENDIAN)
+ u8 priority;
+ u8 cos;
+ u16 reserved1;
+#endif
+ u32 reserved2;
+};
+
+/*
* Per-port congestion management variables
*/
struct cmng_struct_per_port {
struct rate_shaping_vars_per_port rs_vars;
struct fairness_vars_per_port fair_vars;
struct safc_struct_per_port safc_vars;
+ struct pfc_struct_per_port pfc_vars;
+#if defined(__BIG_ENDIAN)
+ u16 __reserved1;
+ u8 dcb_enabled;
+ u8 llfc_mode;
+#elif defined(__LITTLE_ENDIAN)
+ u8 llfc_mode;
+ u8 dcb_enabled;
+ u16 __reserved1;
+#endif
+ struct priority_cos
+ traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES];
struct cmng_flags_per_port flags;
};
+
+/*
+ * Dynamic HC counters set by the driver
+ */
+struct hc_dynamic_drv_counter {
+ u32 val[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+/*
+ * zone A per-queue data
+ */
+struct cstorm_queue_zone_data {
+ struct hc_dynamic_drv_counter hc_dyn_drv_cnt;
+ struct regpair reserved[2];
+};
+
/*
* Dynamic host coalescing init parameters
*/
struct dynamic_hc_config {
u32 threshold[3];
- u8 shift_per_protocol[HC_USTORM_SB_NUM_INDICES];
- u8 hc_timeout0[HC_USTORM_SB_NUM_INDICES];
- u8 hc_timeout1[HC_USTORM_SB_NUM_INDICES];
- u8 hc_timeout2[HC_USTORM_SB_NUM_INDICES];
- u8 hc_timeout3[HC_USTORM_SB_NUM_INDICES];
+ u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES];
};
@@ -2954,7 +2829,7 @@ struct xstorm_per_client_stats {
* Common statistics collected by the Xstorm (per port)
*/
struct xstorm_common_stats {
- struct xstorm_per_client_stats client_statistics[MAX_X_STAT_COUNTER_ID];
+ struct xstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
};
/*
@@ -2991,7 +2866,7 @@ struct tstorm_per_client_stats {
*/
struct tstorm_common_stats {
struct tstorm_per_port_stats port_statistics;
- struct tstorm_per_client_stats client_statistics[MAX_T_STAT_COUNTER_ID];
+ struct tstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
};
/*
@@ -3012,7 +2887,7 @@ struct ustorm_per_client_stats {
* Protocol-common statistics collected by the Ustorm
*/
struct ustorm_common_stats {
- struct ustorm_per_client_stats client_statistics[MAX_U_STAT_COUNTER_ID];
+ struct ustorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
};
/*
@@ -3026,6 +2901,70 @@ struct eth_stats_query {
/*
+ * set mac event data
+ */
+struct set_mac_event_data {
+ u16 echo;
+ u16 reserved0;
+ u32 reserved1;
+ u32 reserved2;
+};
+
+/*
+ * union for all event ring message types
+ */
+union event_data {
+ struct set_mac_event_data set_mac_event;
+ struct cfc_del_event_data cfc_del_event;
+};
+
+
+/*
+ * per PF event ring data
+ */
+struct event_ring_data {
+ struct regpair base_addr;
+#if defined(__BIG_ENDIAN)
+ u8 index_id;
+ u8 sb_id;
+ u16 producer;
+#elif defined(__LITTLE_ENDIAN)
+ u16 producer;
+ u8 sb_id;
+ u8 index_id;
+#endif
+ u32 reserved0;
+};
+
+
+/*
+ * event ring message element (each element is 128 bits)
+ */
+struct event_ring_msg {
+ u8 opcode;
+ u8 reserved0;
+ u16 reserved1;
+ union event_data data;
+};
+
+/*
+ * event ring next page element (128 bits)
+ */
+struct event_ring_next {
+ struct regpair addr;
+ u32 reserved[2];
+};
+
+/*
+ * union for event ring element types (each element is 128 bits)
+ */
+union event_ring_elem {
+ struct event_ring_msg message;
+ struct event_ring_next next_page;
+};
+
+
+/*
* per-vnic fairness variables
*/
struct fairness_vars_per_vn {
@@ -3064,6 +3003,137 @@ struct fw_version {
/*
+ * Dynamic Host-Coalescing - Driver(host) counters
+ */
+struct hc_dynamic_sb_drv_counters {
+ u32 dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+
+/*
+ * 2 bytes. configuration/state parameters for a single protocol index
+ */
+struct hc_index_data {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0)
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3)
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+ u8 timeout;
+#elif defined(__LITTLE_ENDIAN)
+ u8 timeout;
+ u8 flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0)
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3)
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+#endif
+};
+
+
+/*
+ * HC state-machine
+ */
+struct hc_status_block_sm {
+#if defined(__BIG_ENDIAN)
+ u8 igu_seg_id;
+ u8 igu_sb_id;
+ u8 timer_value;
+ u8 __flags;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __flags;
+ u8 timer_value;
+ u8 igu_sb_id;
+ u8 igu_seg_id;
+#endif
+ u32 time_to_expire;
+};
+
+/*
+ * hold PCI identification variables- used in various places in firmware
+ */
+struct pci_entity {
+#if defined(__BIG_ENDIAN)
+ u8 vf_valid;
+ u8 vf_id;
+ u8 vnic_id;
+ u8 pf_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 pf_id;
+ u8 vnic_id;
+ u8 vf_id;
+ u8 vf_valid;
+#endif
+};
+
+/*
+ * The fast-path status block meta-data, common to all chips
+ */
+struct hc_sb_data {
+ struct regpair host_sb_addr;
+ struct hc_status_block_sm state_machine[HC_SB_MAX_SM];
+ struct pci_entity p_func;
+#if defined(__BIG_ENDIAN)
+ u8 rsrv0;
+ u8 dhc_qzone_id;
+ u8 __dynamic_hc_level;
+ u8 same_igu_sb_1b;
+#elif defined(__LITTLE_ENDIAN)
+ u8 same_igu_sb_1b;
+ u8 __dynamic_hc_level;
+ u8 dhc_qzone_id;
+ u8 rsrv0;
+#endif
+ struct regpair rsrv1[2];
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_sp_status_block_data {
+ struct regpair host_sb_addr;
+#if defined(__BIG_ENDIAN)
+ u16 rsrv;
+ u8 igu_seg_id;
+ u8 igu_sb_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 igu_sb_id;
+ u8 igu_seg_id;
+ u16 rsrv;
+#endif
+ struct pci_entity p_func;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e1x {
+ struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X];
+ struct hc_sb_data common;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e2 {
+ struct hc_index_data index_data[HC_SB_MAX_INDICES_E2];
+ struct hc_sb_data common;
+};
+
+
+/*
* FW version stored in first line of pram
*/
struct pram_fw_version {
@@ -3086,11 +3156,21 @@ struct pram_fw_version {
/*
+ * Ethernet slow path element
+ */
+union protocol_common_specific_data {
+ u8 protocol_data[8];
+ struct regpair phy_address;
+ struct regpair mac_config_addr;
+ struct common_query_ramrod_data query_ramrod_data;
+};
+
+/*
* The send queue element
*/
struct protocol_common_spe {
struct spe_hdr hdr;
- struct regpair phy_address;
+ union protocol_common_specific_data data;
};
@@ -3123,7 +3203,7 @@ struct rate_shaping_vars_per_vn {
*/
struct slow_path_element {
struct spe_hdr hdr;
- u8 protocol_data[8];
+ struct regpair protocol_data;
};
@@ -3136,3 +3216,97 @@ struct stats_indication_flags {
};
+/*
+ * per-port PFC variables
+ */
+struct storm_pfc_struct_per_port {
+#if defined(__BIG_ENDIAN)
+ u16 mid_mac_addr;
+ u16 msb_mac_addr;
+#elif defined(__LITTLE_ENDIAN)
+ u16 msb_mac_addr;
+ u16 mid_mac_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 pfc_pause_quanta_in_nanosec;
+ u16 lsb_mac_addr;
+#elif defined(__LITTLE_ENDIAN)
+ u16 lsb_mac_addr;
+ u16 pfc_pause_quanta_in_nanosec;
+#endif
+};
+
+/*
+ * Per-port congestion management variables
+ */
+struct storm_cmng_struct_per_port {
+ struct storm_pfc_struct_per_port pfc_vars;
+};
+
+
+/*
+ * zone A per-queue data
+ */
+struct tstorm_queue_zone_data {
+ struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct tstorm_vf_zone_data {
+ struct regpair reserved;
+};
+
+
+/*
+ * zone A per-queue data
+ */
+struct ustorm_queue_zone_data {
+ struct ustorm_eth_rx_producers eth_rx_producers;
+ struct regpair reserved[3];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct ustorm_vf_zone_data {
+ struct regpair reserved;
+};
+
+
+/*
+ * data per VF-PF channel
+ */
+struct vf_pf_channel_data {
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u8 valid;
+ u8 state;
+#elif defined(__LITTLE_ENDIAN)
+ u8 state;
+ u8 valid;
+ u16 reserved0;
+#endif
+ u32 reserved1;
+};
+
+
+/*
+ * zone A per-queue data
+ */
+struct xstorm_queue_zone_data {
+ struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct xstorm_vf_zone_data {
+ struct regpair reserved;
+};
+
+#endif /* BNX2X_HSI_H */
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index 65b26cbfe3e..a9d54874a55 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -97,6 +97,9 @@
#define MISC_AEU_BLOCK 35
#define PGLUE_B_BLOCK 36
#define IGU_BLOCK 37
+#define ATC_BLOCK 38
+#define QM_4PORT_BLOCK 39
+#define XSEM_4PORT_BLOCK 40
/* Returns the index of start or end of a specific block stage in ops array*/
@@ -148,5 +151,46 @@ union init_op {
struct raw_op raw;
};
+#define INITOP_SET 0 /* set the HW directly */
+#define INITOP_CLEAR 1 /* clear the HW directly */
+#define INITOP_INIT 2 /* set the init-value array */
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+struct ilt_line {
+ dma_addr_t page_mapping;
+ void *page;
+ u32 size;
+};
+
+struct ilt_client_info {
+ u32 page_size;
+ u16 start;
+ u16 end;
+ u16 client_num;
+ u16 flags;
+#define ILT_CLIENT_SKIP_INIT 0x1
+#define ILT_CLIENT_SKIP_MEM 0x2
+};
+
+struct bnx2x_ilt {
+ u32 start_line;
+ struct ilt_line *lines;
+ struct ilt_client_info clients[4];
+#define ILT_CLIENT_CDU 0
+#define ILT_CLIENT_QM 1
+#define ILT_CLIENT_SRC 2
+#define ILT_CLIENT_TM 3
+};
+
+/****************************************************************************
+* SRC configuration
+****************************************************************************/
+struct src_ent {
+ u8 opaque[56];
+ u64 next;
+};
+
#endif /* BNX2X_INIT_H */
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index 2b1363a6fe7..e65de784182 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -151,6 +151,15 @@ static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
bnx2x_init_ind_wr(bp, addr, data, len);
}
+static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi)
+{
+ u32 wb_write[2];
+
+ wb_write[0] = val_lo;
+ wb_write[1] = val_hi;
+ REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
+}
+
static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
{
const u8 *data = NULL;
@@ -477,18 +486,30 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
- if (r_order == MAX_RD_ORD)
+ if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
- REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
+ if (CHIP_IS_E2(bp))
+ REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
+ else
+ REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
- if (CHIP_IS_E1H(bp)) {
+ if (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) {
/* MPS w_order optimal TH presently TH
* 128 0 0 2
* 256 1 1 3
* >=512 2 2 3
*/
- val = ((w_order == 0) ? 2 : 3);
+ /* DMAE is special */
+ if (CHIP_IS_E2(bp)) {
+ /* E2 can use optimal TH */
+ val = w_order;
+ REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
+ } else {
+ val = ((w_order == 0) ? 2 : 3);
+ REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
+ }
+
REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
@@ -498,9 +519,344 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
- REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
}
+
+ /* Validate number of tags suppoted by device */
+#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
+ val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
+ val &= 0xFF;
+ if (val <= 0x20)
+ REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
+}
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+/*
+ * This codes hides the low level HW interaction for ILT management and
+ * configuration. The API consists of a shadow ILT table which is set by the
+ * driver and a set of routines to use it to configure the HW.
+ *
+ */
+
+/* ILT HW init operations */
+
+/* ILT memory management operations */
+#define ILT_MEMOP_ALLOC 0
+#define ILT_MEMOP_FREE 1
+
+/* the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
+#define ILT_RANGE(f, l) (((l) << 10) | f)
+
+static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line,
+ u32 size, u8 memop)
+{
+ if (memop == ILT_MEMOP_FREE) {
+ BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
+ return 0;
+ }
+ BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
+ if (!line->page)
+ return -1;
+ line->size = size;
+ return 0;
+}
+
+
+static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
+{
+ int i, rc;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ if (!ilt || !ilt->lines)
+ return -1;
+
+ if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
+ return 0;
+
+ for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
+ rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
+ ilt_cli->page_size, memop);
+ }
+ return rc;
+}
+
+int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
+{
+ int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
+ if (!rc)
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
+ if (!rc)
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
+ if (!rc)
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
+
+ return rc;
+}
+
+static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
+ dma_addr_t page_mapping)
+{
+ u32 reg;
+
+ if (CHIP_IS_E1(bp))
+ reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
+ else
+ reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
+
+ bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
+}
+
+static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
+ int idx, u8 initop)
+{
+ dma_addr_t null_mapping;
+ int abs_idx = ilt->start_line + idx;
+
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
+ break;
+ case INITOP_CLEAR:
+ null_mapping = 0;
+ bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
+ break;
+ }
+}
+
+void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
+ struct ilt_client_info *ilt_cli,
+ u32 ilt_start, u8 initop)
+{
+ u32 start_reg = 0;
+ u32 end_reg = 0;
+
+ /* The boundary is either SET or INIT,
+ CLEAR => SET and for now SET ~~ INIT */
+
+ /* find the appropriate regs */
+ if (CHIP_IS_E1(bp)) {
+ switch (ilt_cli->client_num) {
+ case ILT_CLIENT_CDU:
+ start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
+ break;
+ case ILT_CLIENT_QM:
+ start_reg = PXP2_REG_PSWRQ_QM0_L2P;
+ break;
+ case ILT_CLIENT_SRC:
+ start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
+ break;
+ case ILT_CLIENT_TM:
+ start_reg = PXP2_REG_PSWRQ_TM0_L2P;
+ break;
+ }
+ REG_WR(bp, start_reg + BP_FUNC(bp)*4,
+ ILT_RANGE((ilt_start + ilt_cli->start),
+ (ilt_start + ilt_cli->end)));
+ } else {
+ switch (ilt_cli->client_num) {
+ case ILT_CLIENT_CDU:
+ start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
+ break;
+ case ILT_CLIENT_QM:
+ start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_QM_LAST_ILT;
+ break;
+ case ILT_CLIENT_SRC:
+ start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
+ break;
+ case ILT_CLIENT_TM:
+ start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_TM_LAST_ILT;
+ break;
+ }
+ REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
+ REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
+ }
+}
+
+void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
+ struct ilt_client_info *ilt_cli, u8 initop)
+{
+ int i;
+
+ if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+ return;
+
+ for (i = ilt_cli->start; i <= ilt_cli->end; i++)
+ bnx2x_ilt_line_init_op(bp, ilt, i, initop);
+
+ /* init/clear the ILT boundries */
+ bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
+}
+
+void bnx2x_ilt_client_init_op(struct bnx2x *bp,
+ struct ilt_client_info *ilt_cli, u8 initop)
+{
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+
+ bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
+}
+
+static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
+ int cli_num, u8 initop)
+{
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
+}
+
+void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
+{
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
+}
+
+static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
+ u32 psz_reg, u8 initop)
+{
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+ return;
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+}
+
+/*
+ * called during init common stage, ilt clients should be initialized
+ * prioir to calling this function
+ */
+void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
+{
+ bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
+ PXP2_REG_RQ_CDU_P_SIZE, initop);
+ bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
+ PXP2_REG_RQ_QM_P_SIZE, initop);
+ bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
+ PXP2_REG_RQ_SRC_P_SIZE, initop);
+ bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
+ PXP2_REG_RQ_TM_P_SIZE, initop);
+}
+
+/****************************************************************************
+* QM initializations
+****************************************************************************/
+#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
+#define QM_INIT_MIN_CID_COUNT 31
+#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
+
+/* called during init port stage */
+void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
+{
+ int port = BP_PORT(bp);
+
+ if (QM_INIT(qm_cid_count)) {
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
+ qm_cid_count/16 - 1);
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+ }
+}
+
+static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
+{
+ int i;
+ u32 wb_data[2];
+
+ wb_data[0] = wb_data[1] = 0;
+
+ for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
+ REG_WR(bp, QM_REG_BASEADDR + i*4,
+ qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
+ bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8,
+ wb_data, 2);
+
+ if (CHIP_IS_E1H(bp)) {
+ REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4,
+ qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
+ bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
+ wb_data, 2);
+ }
+ }
+}
+
+/* called during init common stage */
+void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
+{
+ if (!QM_INIT(qm_cid_count))
+ return;
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ bnx2x_qm_set_ptr_table(bp, qm_cid_count);
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+}
+
+/****************************************************************************
+* SRC initializations
+****************************************************************************/
+
+/* called during init func stage */
+void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
+ dma_addr_t t2_mapping, int src_cid_count)
+{
+ int i;
+ int port = BP_PORT(bp);
+
+ /* Initialize T2 */
+ for (i = 0; i < src_cid_count-1; i++)
+ t2[i].next = (u64)(t2_mapping + (i+1)*sizeof(struct src_ent));
+
+ /* tell the searcher where the T2 table is */
+ REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
+
+ bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
+ U64_LO(t2_mapping), U64_HI(t2_mapping));
+
+ bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
+ U64_LO((u64)t2_mapping +
+ (src_cid_count-1) * sizeof(struct src_ent)),
+ U64_HI((u64)t2_mapping +
+ (src_cid_count-1) * sizeof(struct src_ent)));
}
#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 0383e306631..3e99bf9c42b 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -28,7 +28,7 @@
/********************************************************/
#define ETH_HLEN 14
-#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/
+#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */
#define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
@@ -168,50 +168,19 @@
/**********************************************************/
/* INTERFACE */
/**********************************************************/
-#define CL45_WR_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \
- bnx2x_cl45_write(_bp, _port, 0, _phy_addr, \
- DEFAULT_PHY_DEV_ADDR, \
+
+#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+ bnx2x_cl45_write(_bp, _phy, \
+ (_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
-#define CL45_RD_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \
- bnx2x_cl45_read(_bp, _port, 0, _phy_addr, \
- DEFAULT_PHY_DEV_ADDR, \
+#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+ bnx2x_cl45_read(_bp, _phy, \
+ (_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
-static void bnx2x_set_serdes_access(struct link_params *params)
-{
- struct bnx2x *bp = params->bp;
- u32 emac_base = (params->port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-
- /* Set Clause 22 */
- REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 1);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
- udelay(500);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
- udelay(500);
- /* Set Clause 45 */
- REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 0);
-}
-static void bnx2x_set_phy_mdio(struct link_params *params, u8 phy_flags)
-{
- struct bnx2x *bp = params->bp;
-
- if (phy_flags & PHY_XGXS_FLAG) {
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
- params->port*0x18, 0);
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
- DEFAULT_PHY_DEV_ADDR);
- } else {
- bnx2x_set_serdes_access(params);
-
- REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
- params->port*0x10,
- DEFAULT_PHY_DEV_ADDR);
- }
-}
-
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -408,9 +377,60 @@ static u8 bnx2x_emac_enable(struct link_params *params,
return 0;
}
+static void bnx2x_update_bmac2(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ /*
+ * Set rx control: Strip CRC and enable BigMAC to relay
+ * control packets to the system as well
+ */
+ u32 wb_data[2];
+ struct bnx2x *bp = params->bp;
+ u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 val = 0x14;
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ /* Enable BigMAC to react on received Pause packets */
+ val |= (1<<5);
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL,
+ wb_data, 2);
+ udelay(30);
+
+ /* Tx control */
+ val = 0xc0;
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ val |= 0x800000;
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL,
+ wb_data, 2);
+
+ val = 0x8000;
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
+ wb_data, 2);
+
+ /* mac control */
+ val = 0x3; /* Enable RX and TX */
+ if (is_lb) {
+ val |= 0x4; /* Local loopback */
+ DP(NETIF_MSG_LINK, "enable bmac loopback\n");
+ }
+
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+}
-static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
+static u8 bnx2x_bmac1_enable(struct link_params *params,
+ struct link_vars *vars,
u8 is_lb)
{
struct bnx2x *bp = params->bp;
@@ -420,17 +440,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
u32 wb_data[2];
u32 val;
- DP(NETIF_MSG_LINK, "Enabling BigMAC\n");
- /* reset and unreset the BigMac */
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- msleep(1);
-
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-
- /* enable access for bmac registers */
- REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
+ DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
/* XGXS control */
wb_data[0] = 0x3c;
@@ -510,180 +520,121 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
wb_data, 2);
}
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
- REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
- REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
- val = 0;
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
- val = 1;
- REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
- REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
- REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
- REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
- REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
- REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
- vars->mac_type = MAC_TYPE_BMAC;
return 0;
}
-static void bnx2x_phy_deassert(struct link_params *params, u8 phy_flags)
-{
- struct bnx2x *bp = params->bp;
- u32 val;
-
- if (phy_flags & PHY_XGXS_FLAG) {
- DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:XGXS\n");
- val = XGXS_RESET_BITS;
-
- } else { /* SerDes */
- DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:SerDes\n");
- val = SERDES_RESET_BITS;
- }
-
- val = val << (params->port*16);
-
- /* reset and unreset the SerDes/XGXS */
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
- val);
- udelay(500);
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET,
- val);
- bnx2x_set_phy_mdio(params, phy_flags);
-}
-
-void bnx2x_link_status_update(struct link_params *params,
- struct link_vars *vars)
+static u8 bnx2x_bmac2_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
{
struct bnx2x *bp = params->bp;
- u8 link_10g;
u8 port = params->port;
+ u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 wb_data[2];
- if (params->switch_cfg == SWITCH_CFG_1G)
- vars->phy_flags = PHY_SERDES_FLAG;
- else
- vars->phy_flags = PHY_XGXS_FLAG;
- vars->link_status = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[port].link_status));
-
- vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
-
- if (vars->link_up) {
- DP(NETIF_MSG_LINK, "phy link up\n");
-
- vars->phy_link_up = 1;
- vars->duplex = DUPLEX_FULL;
- switch (vars->link_status &
- LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
- case LINK_10THD:
- vars->duplex = DUPLEX_HALF;
- /* fall thru */
- case LINK_10TFD:
- vars->line_speed = SPEED_10;
- break;
-
- case LINK_100TXHD:
- vars->duplex = DUPLEX_HALF;
- /* fall thru */
- case LINK_100T4:
- case LINK_100TXFD:
- vars->line_speed = SPEED_100;
- break;
-
- case LINK_1000THD:
- vars->duplex = DUPLEX_HALF;
- /* fall thru */
- case LINK_1000TFD:
- vars->line_speed = SPEED_1000;
- break;
-
- case LINK_2500THD:
- vars->duplex = DUPLEX_HALF;
- /* fall thru */
- case LINK_2500TFD:
- vars->line_speed = SPEED_2500;
- break;
-
- case LINK_10GTFD:
- vars->line_speed = SPEED_10000;
- break;
-
- case LINK_12GTFD:
- vars->line_speed = SPEED_12000;
- break;
+ DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
- case LINK_12_5GTFD:
- vars->line_speed = SPEED_12500;
- break;
+ wb_data[0] = 0;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+ udelay(30);
- case LINK_13GTFD:
- vars->line_speed = SPEED_13000;
- break;
+ /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
+ wb_data[0] = 0x3c;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr +
+ BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
- case LINK_15GTFD:
- vars->line_speed = SPEED_15000;
- break;
+ udelay(30);
- case LINK_16GTFD:
- vars->line_speed = SPEED_16000;
- break;
+ /* tx MAC SA */
+ wb_data[0] = ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ params->mac_addr[5]);
+ wb_data[1] = ((params->mac_addr[0] << 8) |
+ params->mac_addr[1]);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
+ wb_data, 2);
- default:
- break;
- }
+ udelay(30);
- if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
- vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
- else
- vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_TX;
+ /* Configure SAFC */
+ wb_data[0] = 0x1000200;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
+ wb_data, 2);
+ udelay(30);
- if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
- vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
- else
- vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_RX;
+ /* set rx mtu */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE,
+ wb_data, 2);
+ udelay(30);
- if (vars->phy_flags & PHY_XGXS_FLAG) {
- if (vars->line_speed &&
- ((vars->line_speed == SPEED_10) ||
- (vars->line_speed == SPEED_100))) {
- vars->phy_flags |= PHY_SGMII_FLAG;
- } else {
- vars->phy_flags &= ~PHY_SGMII_FLAG;
- }
- }
+ /* set tx mtu */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE,
+ wb_data, 2);
+ udelay(30);
+ /* set cnt max size */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
+ wb_data, 2);
+ udelay(30);
+ bnx2x_update_bmac2(params, vars, is_lb);
- /* anything 10 and over uses the bmac */
- link_10g = ((vars->line_speed == SPEED_10000) ||
- (vars->line_speed == SPEED_12000) ||
- (vars->line_speed == SPEED_12500) ||
- (vars->line_speed == SPEED_13000) ||
- (vars->line_speed == SPEED_15000) ||
- (vars->line_speed == SPEED_16000));
- if (link_10g)
- vars->mac_type = MAC_TYPE_BMAC;
- else
- vars->mac_type = MAC_TYPE_EMAC;
+ return 0;
+}
- } else { /* link down */
- DP(NETIF_MSG_LINK, "phy link down\n");
+u8 bnx2x_bmac_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ u8 rc, port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 val;
+ /* reset and unreset the BigMac */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ udelay(10);
- vars->phy_link_up = 0;
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- vars->line_speed = 0;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ /* enable access for bmac registers */
+ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
- /* indicate no mac active */
- vars->mac_type = MAC_TYPE_NONE;
- }
+ /* Enable BMAC according to BMAC type*/
+ if (CHIP_IS_E2(bp))
+ rc = bnx2x_bmac2_enable(params, vars, is_lb);
+ else
+ rc = bnx2x_bmac1_enable(params, vars, is_lb);
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
+ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
+ val = 0;
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ val = 1;
+ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
+ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
- DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n",
- vars->link_status, vars->phy_link_up);
- DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
- vars->line_speed, vars->duplex, vars->flow_ctrl);
+ vars->mac_type = MAC_TYPE_BMAC;
+ return rc;
}
+
static void bnx2x_update_mng(struct link_params *params, u32 link_status)
{
struct bnx2x *bp = params->bp;
@@ -706,13 +657,25 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
nig_bmac_enable) {
- /* Clear Rx Enable bit in BMAC_CONTROL register */
- REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
- wb_data, 2);
- wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
- wb_data, 2);
-
+ if (CHIP_IS_E2(bp)) {
+ /* Clear Rx Enable bit in BMAC_CONTROL register */
+ REG_RD_DMAE(bp, bmac_addr +
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+ wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+ REG_WR_DMAE(bp, bmac_addr +
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+ } else {
+ /* Clear Rx Enable bit in BMAC_CONTROL register */
+ REG_RD_DMAE(bp, bmac_addr +
+ BIGMAC_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+ wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+ REG_WR_DMAE(bp, bmac_addr +
+ BIGMAC_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+ }
msleep(1);
}
}
@@ -800,62 +763,69 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
return 0;
}
-static u32 bnx2x_get_emac_base(struct bnx2x *bp, u32 ext_phy_type, u8 port)
+static u32 bnx2x_get_emac_base(struct bnx2x *bp,
+ u32 mdc_mdio_access, u8 port)
{
- u32 emac_base;
-
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- /* All MDC/MDIO is directed through single EMAC */
+ u32 emac_base = 0;
+ switch (mdc_mdio_access) {
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
+ if (REG_RD(bp, NIG_REG_PORT_SWAP))
+ emac_base = GRCBASE_EMAC1;
+ else
+ emac_base = GRCBASE_EMAC0;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
if (REG_RD(bp, NIG_REG_PORT_SWAP))
emac_base = GRCBASE_EMAC0;
else
emac_base = GRCBASE_EMAC1;
break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
+ emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
break;
default:
- emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
break;
}
return emac_base;
}
-u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
- u8 phy_addr, u8 devad, u16 reg, u16 val)
+u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val)
{
u32 tmp, saved_mode;
u8 i, rc = 0;
- u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port);
/* set clause 45 mode, slow down the MDIO clock to 2.5MHz
* (a value of 49==0x31) and make sure that the AUTO poll is off
*/
- saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL |
EMAC_MDIO_MODE_CLOCK_CNT);
tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
(49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
- REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
+ REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
udelay(40);
/* address */
- tmp = ((phy_addr << 21) | (devad << 16) | reg |
+ tmp = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
for (i = 0; i < 50; i++) {
udelay(10);
- tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ tmp = REG_RD(bp, phy->mdio_ctrl +
+ EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
@@ -866,15 +836,15 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
rc = -EFAULT;
} else {
/* data */
- tmp = ((phy_addr << 21) | (devad << 16) | val |
+ tmp = ((phy->addr << 21) | (devad << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_45 |
EMAC_MDIO_COMM_START_BUSY);
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
for (i = 0; i < 50; i++) {
udelay(10);
- tmp = REG_RD(bp, mdio_ctrl +
+ tmp = REG_RD(bp, phy->mdio_ctrl +
EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
@@ -888,42 +858,41 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
}
/* Restore the saved mode */
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
return rc;
}
-u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
- u8 phy_addr, u8 devad, u16 reg, u16 *ret_val)
+u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val)
{
u32 val, saved_mode;
u16 i;
u8 rc = 0;
- u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port);
/* set clause 45 mode, slow down the MDIO clock to 2.5MHz
* (a value of 49==0x31) and make sure that the AUTO poll is off
*/
- saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
- val = saved_mode & ((EMAC_MDIO_MODE_AUTO_POLL |
+ saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
EMAC_MDIO_MODE_CLOCK_CNT));
val |= (EMAC_MDIO_MODE_CLAUSE_45 |
(49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
- REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
+ REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
udelay(40);
/* address */
- val = ((phy_addr << 21) | (devad << 16) | reg |
+ val = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
for (i = 0; i < 50; i++) {
udelay(10);
- val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
@@ -937,15 +906,15 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
} else {
/* data */
- val = ((phy_addr << 21) | (devad << 16) |
+ val = ((phy->addr << 21) | (devad << 16) |
EMAC_MDIO_COMM_COMMAND_READ_45 |
EMAC_MDIO_COMM_START_BUSY);
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
for (i = 0; i < 50; i++) {
udelay(10);
- val = REG_RD(bp, mdio_ctrl +
+ val = REG_RD(bp, phy->mdio_ctrl +
EMAC_REG_EMAC_MDIO_COMM);
if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
@@ -961,32 +930,262 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
}
/* Restore the saved mode */
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
return rc;
}
-static void bnx2x_set_aer_mmd(struct link_params *params,
- struct link_vars *vars)
+u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 *ret_val)
{
- struct bnx2x *bp = params->bp;
- u32 ser_lane;
- u16 offset;
+ u8 phy_index;
+ /**
+ * Probe for the phy according to the given phy_addr, and execute
+ * the read request on it
+ */
+ for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].addr == phy_addr) {
+ return bnx2x_cl45_read(params->bp,
+ &params->phy[phy_index], devad,
+ reg, ret_val);
+ }
+ }
+ return -EINVAL;
+}
+u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 val)
+{
+ u8 phy_index;
+ /**
+ * Probe for the phy according to the given phy_addr, and execute
+ * the write request on it
+ */
+ for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].addr == phy_addr) {
+ return bnx2x_cl45_write(params->bp,
+ &params->phy[phy_index], devad,
+ reg, val);
+ }
+ }
+ return -EINVAL;
+}
+
+static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
+ struct bnx2x_phy *phy)
+{
+ u32 ser_lane;
+ u16 offset, aer_val;
+ struct bnx2x *bp = params->bp;
ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
- offset = (vars->phy_flags & PHY_XGXS_FLAG) ?
- (params->phy_addr + ser_lane) : 0;
+ offset = phy->addr + ser_lane;
+ if (CHIP_IS_E2(bp))
+ aer_val = 0x2800 + offset - 1;
+ else
+ aer_val = 0x3800 + offset;
+ CL45_WR_OVER_CL22(bp, phy,
+ MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, aer_val);
+}
+static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ CL45_WR_OVER_CL22(bp, phy,
+ MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0x3800);
+}
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
- MDIO_REG_BANK_AER_BLOCK,
- MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
+/******************************************************************/
+/* Internal phy section */
+/******************************************************************/
+
+static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
+{
+ u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+ /* Set Clause 22 */
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
+ udelay(500);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
+ udelay(500);
+ /* Set Clause 45 */
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
+}
+
+static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
+{
+ u32 val;
+
+ DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
+
+ val = SERDES_RESET_BITS << (port*16);
+
+ /* reset and unreset the SerDes/XGXS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+ udelay(500);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+
+ bnx2x_set_serdes_access(bp, port);
+
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
+ port*0x10,
+ DEFAULT_PHY_DEV_ADDR);
}
-static void bnx2x_set_master_ln(struct link_params *params)
+static void bnx2x_xgxs_deassert(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port;
+ u32 val;
+ DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
+ port = params->port;
+
+ val = XGXS_RESET_BITS << (port*16);
+
+ /* reset and unreset the SerDes/XGXS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+ udelay(500);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
+ port*0x18, 0);
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ params->phy[INT_PHY].def_md_devad);
+}
+
+
+void bnx2x_link_status_update(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_10g;
+ u8 port = params->port;
+
+ vars->link_status = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
+
+ vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
+
+ if (vars->link_up) {
+ DP(NETIF_MSG_LINK, "phy link up\n");
+
+ vars->phy_link_up = 1;
+ vars->duplex = DUPLEX_FULL;
+ switch (vars->link_status &
+ LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+ case LINK_10THD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_10TFD:
+ vars->line_speed = SPEED_10;
+ break;
+
+ case LINK_100TXHD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_100T4:
+ case LINK_100TXFD:
+ vars->line_speed = SPEED_100;
+ break;
+
+ case LINK_1000THD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_1000TFD:
+ vars->line_speed = SPEED_1000;
+ break;
+
+ case LINK_2500THD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_2500TFD:
+ vars->line_speed = SPEED_2500;
+ break;
+
+ case LINK_10GTFD:
+ vars->line_speed = SPEED_10000;
+ break;
+
+ case LINK_12GTFD:
+ vars->line_speed = SPEED_12000;
+ break;
+
+ case LINK_12_5GTFD:
+ vars->line_speed = SPEED_12500;
+ break;
+
+ case LINK_13GTFD:
+ vars->line_speed = SPEED_13000;
+ break;
+
+ case LINK_15GTFD:
+ vars->line_speed = SPEED_15000;
+ break;
+
+ case LINK_16GTFD:
+ vars->line_speed = SPEED_16000;
+ break;
+
+ default:
+ break;
+ }
+ vars->flow_ctrl = 0;
+ if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
+ vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
+
+ if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
+ vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
+
+ if (!vars->flow_ctrl)
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ if (vars->line_speed &&
+ ((vars->line_speed == SPEED_10) ||
+ (vars->line_speed == SPEED_100))) {
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ } else {
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+ }
+
+ /* anything 10 and over uses the bmac */
+ link_10g = ((vars->line_speed == SPEED_10000) ||
+ (vars->line_speed == SPEED_12000) ||
+ (vars->line_speed == SPEED_12500) ||
+ (vars->line_speed == SPEED_13000) ||
+ (vars->line_speed == SPEED_15000) ||
+ (vars->line_speed == SPEED_16000));
+ if (link_10g)
+ vars->mac_type = MAC_TYPE_BMAC;
+ else
+ vars->mac_type = MAC_TYPE_EMAC;
+
+ } else { /* link down */
+ DP(NETIF_MSG_LINK, "phy link down\n");
+
+ vars->phy_link_up = 0;
+
+ vars->line_speed = 0;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ /* indicate no mac active */
+ vars->mac_type = MAC_TYPE_NONE;
+ }
+
+ DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n",
+ vars->link_status, vars->phy_link_up);
+ DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
+ vars->line_speed, vars->duplex, vars->flow_ctrl);
+}
+
+
+static void bnx2x_set_master_ln(struct link_params *params,
+ struct bnx2x_phy *phy)
{
struct bnx2x *bp = params->bp;
u16 new_master_ln, ser_lane;
@@ -995,47 +1194,44 @@ static void bnx2x_set_master_ln(struct link_params *params)
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
/* set the master_ln for AN */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
&new_master_ln);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2 ,
MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
(new_master_ln | ser_lane));
}
-static u8 bnx2x_reset_unicore(struct link_params *params)
+static u8 bnx2x_reset_unicore(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 set_serdes)
{
struct bnx2x *bp = params->bp;
u16 mii_control;
u16 i;
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
/* reset the unicore */
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
(mii_control |
MDIO_COMBO_IEEO_MII_CONTROL_RESET));
- if (params->switch_cfg == SWITCH_CFG_1G)
- bnx2x_set_serdes_access(params);
+ if (set_serdes)
+ bnx2x_set_serdes_access(bp, params->port);
/* wait for the reset to self clear */
for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
udelay(5);
/* the reset erased the previous bank value */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
&mii_control);
@@ -1051,7 +1247,8 @@ static u8 bnx2x_reset_unicore(struct link_params *params)
}
-static void bnx2x_set_swap_lanes(struct link_params *params)
+static void bnx2x_set_swap_lanes(struct link_params *params,
+ struct bnx2x_phy *phy)
{
struct bnx2x *bp = params->bp;
/* Each two bits represents a lane number:
@@ -1069,71 +1266,62 @@ static void bnx2x_set_swap_lanes(struct link_params *params)
PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
if (rx_lane_swap != 0x1b) {
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_RX_LN_SWAP,
(rx_lane_swap |
MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
} else {
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
}
if (tx_lane_swap != 0x1b) {
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TX_LN_SWAP,
(tx_lane_swap |
MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
} else {
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
}
}
-static void bnx2x_set_parallel_detection(struct link_params *params,
- u8 phy_flags)
+static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 control2;
-
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
&control2);
- if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
else
control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
- DP(NETIF_MSG_LINK, "params->speed_cap_mask = 0x%x, control2 = 0x%x\n",
- params->speed_cap_mask, control2);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
+ phy->speed_cap_mask, control2);
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
control2);
- if ((phy_flags & PHY_XGXS_FLAG) &&
- (params->speed_cap_mask &
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+ (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
DP(NETIF_MSG_LINK, "XGXS\n");
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
&control2);
@@ -1142,15 +1330,13 @@ static void bnx2x_set_parallel_detection(struct link_params *params,
control2 |=
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
control2);
/* Disable parallel detection of HiG */
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
@@ -1158,7 +1344,8 @@ static void bnx2x_set_parallel_detection(struct link_params *params,
}
}
-static void bnx2x_set_autoneg(struct link_params *params,
+static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
+ struct link_params *params,
struct link_vars *vars,
u8 enable_cl73)
{
@@ -1166,9 +1353,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
u16 reg_val;
/* CL37 Autoneg */
-
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
@@ -1179,15 +1364,13 @@ static void bnx2x_set_autoneg(struct link_params *params,
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
/* Enable/Disable Autodetection */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
@@ -1198,14 +1381,12 @@ static void bnx2x_set_autoneg(struct link_params *params,
else
reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
/* Enable TetonII and BAM autoneg */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_BAM_NEXT_PAGE,
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
&reg_val);
@@ -1218,23 +1399,20 @@ static void bnx2x_set_autoneg(struct link_params *params,
reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
}
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_BAM_NEXT_PAGE,
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
reg_val);
if (enable_cl73) {
/* Enable Cl73 FSM status bits */
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_USERB0,
MDIO_CL73_USERB0_CL73_UCTRL,
0xe);
/* Enable BAM Station Manager*/
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_USERB0,
MDIO_CL73_USERB0_CL73_BAM_CTRL1,
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -1242,20 +1420,18 @@ static void bnx2x_set_autoneg(struct link_params *params,
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
/* Advertise CL73 link speeds */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV2,
&reg_val);
- if (params->speed_cap_mask &
+ if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
- if (params->speed_cap_mask &
+ if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV2,
reg_val);
@@ -1266,38 +1442,35 @@ static void bnx2x_set_autoneg(struct link_params *params,
} else /* CL73 Autoneg Disabled */
reg_val = 0;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
}
/* program SerDes, forced speed */
-static void bnx2x_program_serdes(struct link_params *params,
+static void bnx2x_program_serdes(struct bnx2x_phy *phy,
+ struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
/* program duplex, disable autoneg and sgmii*/
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
- if (params->req_duplex == DUPLEX_FULL)
+ if (phy->req_duplex == DUPLEX_FULL)
reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
/* program speed
- needed only if the speed is greater than 1G (2.5G or 10G) */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, &reg_val);
/* clearing the speed value before setting the right speed */
@@ -1320,14 +1493,14 @@ static void bnx2x_program_serdes(struct link_params *params,
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
}
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, reg_val);
}
-static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
+static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
@@ -1335,29 +1508,28 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
/* configure the 48 bits for BAM AN */
/* set extended capabilities */
- if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
val |= MDIO_OVER_1G_UP1_2_5G;
- if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= MDIO_OVER_1G_UP1_10G;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_UP1, val);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_UP3, 0x400);
}
-static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
+static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
+ struct link_params *params, u16 *ieee_fc)
{
struct bnx2x *bp = params->bp;
*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
/* resolve pause mode and advertisement
* Please refer to Table 28B-3 of the 802.3ab-1999 spec */
- switch (params->req_flow_ctrl) {
+ switch (phy->req_flow_ctrl) {
case BNX2X_FLOW_CTRL_AUTO:
if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
*ieee_fc |=
@@ -1385,30 +1557,30 @@ static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
}
-static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
+static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
+ struct link_params *params,
u16 ieee_fc)
{
struct bnx2x *bp = params->bp;
u16 val;
/* for AN, we are always publishing full duplex */
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV1, &val);
val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV1, val);
}
-static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
+static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 enable_cl73)
{
struct bnx2x *bp = params->bp;
u16 mii_control;
@@ -1417,14 +1589,12 @@ static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
/* Enable and restart BAM/CL37 aneg */
if (enable_cl73) {
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
&mii_control);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
(mii_control |
@@ -1432,16 +1602,14 @@ static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
} else {
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
&mii_control);
DP(NETIF_MSG_LINK,
"bnx2x_restart_autoneg mii_control before = 0x%x\n",
mii_control);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
(mii_control |
@@ -1450,7 +1618,8 @@ static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
}
}
-static void bnx2x_initialize_sgmii_process(struct link_params *params,
+static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
+ struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
@@ -1458,8 +1627,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
/* in SGMII mode, the unicore is always slave */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
&control1);
@@ -1468,8 +1636,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
control1);
@@ -1479,8 +1646,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
/* set speed, disable autoneg */
u16 mii_control;
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
&mii_control);
@@ -1508,18 +1674,17 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
}
/* setting the full duplex */
- if (params->req_duplex == DUPLEX_FULL)
+ if (phy->req_duplex == DUPLEX_FULL)
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
mii_control);
} else { /* AN mode */
/* enable and restart AN */
- bnx2x_restart_autoneg(params, 0);
+ bnx2x_restart_autoneg(phy, params, 0);
}
}
@@ -1549,91 +1714,24 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
default:
break;
}
+ if (pause_result & (1<<0))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
+ if (pause_result & (1<<1))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
}
-static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
- struct link_vars *vars)
-{
- struct bnx2x *bp = params->bp;
- u8 ext_phy_addr;
- u16 ld_pause; /* local */
- u16 lp_pause; /* link partner */
- u16 an_complete; /* AN complete */
- u16 pause_result;
- u8 ret = 0;
- u32 ext_phy_type;
- u8 port = params->port;
- ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- /* read twice */
-
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_STATUS, &an_complete);
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_STATUS, &an_complete);
-
- if (an_complete & MDIO_AN_REG_STATUS_AN_COMPLETE) {
- ret = 1;
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV_PAUSE, &ld_pause);
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
- pause_result = (ld_pause &
- MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
- pause_result |= (lp_pause &
- MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
- DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
- pause_result);
- bnx2x_pause_resolve(vars, pause_result);
- if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
- ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LD, &ld_pause);
-
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LP, &lp_pause);
- pause_result = (ld_pause &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
- pause_result |= (lp_pause &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
-
- bnx2x_pause_resolve(vars, pause_result);
- DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
- pause_result);
- }
- }
- return ret;
-}
-
-static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
+static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 pd_10g, status2_1000x;
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ if (phy->req_line_speed != SPEED_AUTO_NEG)
+ return 0;
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
&status2_1000x);
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
&status2_1000x);
@@ -1643,8 +1741,7 @@ static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
return 1;
}
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
&pd_10g);
@@ -1657,9 +1754,10 @@ static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
return 0;
}
-static void bnx2x_flow_ctrl_resolve(struct link_params *params,
- struct link_vars *vars,
- u32 gp_status)
+static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 gp_status)
{
struct bnx2x *bp = params->bp;
u16 ld_pause; /* local driver */
@@ -1669,12 +1767,13 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
/* resolve from gp_status in case of AN complete and not sgmii */
- if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
- (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
- (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
- (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
- if (bnx2x_direct_parallel_detect_used(params)) {
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
+ (!(vars->phy_flags & PHY_SGMII_FLAG))) {
+ if (bnx2x_direct_parallel_detect_used(phy, params)) {
vars->flow_ctrl = params->req_fc_auto_adv;
return;
}
@@ -1684,13 +1783,11 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
(MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV1,
&ld_pause);
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_LP_ADV1,
&lp_pause);
@@ -1703,14 +1800,11 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
pause_result);
} else {
-
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
&ld_pause);
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
&lp_pause);
@@ -1722,26 +1816,18 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
pause_result);
}
bnx2x_pause_resolve(vars, pause_result);
- } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
- (bnx2x_ext_phy_resolve_fc(params, vars))) {
- return;
- } else {
- if (params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
- vars->flow_ctrl = params->req_fc_auto_adv;
- else
- vars->flow_ctrl = params->req_flow_ctrl;
}
DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
}
-static void bnx2x_check_fallback_to_cl37(struct link_params *params)
+static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 rx_status, ustat_val, cl37_fsm_recieved;
DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
/* Step 1: Make sure signal is detected */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_RX0,
MDIO_RX0_RX_STATUS,
&rx_status);
@@ -1749,16 +1835,14 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
(MDIO_RX0_RX_STATUS_SIGDET)) {
DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
"rx_status(0x80b0) = 0x%x\n", rx_status);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
return;
}
/* Step 2: Check CL73 state machine */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_USERB0,
MDIO_CL73_USERB0_CL73_USTAT1,
&ustat_val);
@@ -1773,8 +1857,7 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
}
/* Step 3: Check CL37 Message Pages received to indicate LP
supports only CL37 */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_REMOTE_PHY,
MDIO_REMOTE_PHY_MISC_RX_STATUS,
&cl37_fsm_recieved);
@@ -1792,25 +1875,45 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
connected to a device which does not support cl73, but does support
cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */
/* Disable CL73 */
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
0);
/* Restart CL37 autoneg */
- bnx2x_restart_autoneg(params, 0);
+ bnx2x_restart_autoneg(phy, params, 0);
DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
}
-static u8 bnx2x_link_settings_status(struct link_params *params,
- struct link_vars *vars,
- u32 gp_status,
- u8 ext_phy_link_up)
+
+static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 gp_status)
+{
+ if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+ if (bnx2x_direct_parallel_detect_used(phy, params))
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+
+static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 new_line_speed;
+ u16 new_line_speed , gp_status;
u8 rc = 0;
- vars->link_status = 0;
+ /* Read gp_status */
+ CL45_RD_OVER_CL22(bp, phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n",
gp_status);
@@ -1823,7 +1926,12 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
else
vars->duplex = DUPLEX_HALF;
- bnx2x_flow_ctrl_resolve(params, vars, gp_status);
+ if (SINGLE_MEDIA_DIRECT(params)) {
+ bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_xgxs_an_resolve(phy, params, vars,
+ gp_status);
+ }
switch (gp_status & GP_STATUS_SPEED_MASK) {
case GP_STATUS_10M:
@@ -1905,56 +2013,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
return -EINVAL;
}
- /* Upon link speed change set the NIG into drain mode.
- Comes to deals with possible FIFO glitch due to clk change
- when speed is decreased without link down indicator */
- if (new_line_speed != vars->line_speed) {
- if (XGXS_EXT_PHY_TYPE(params->ext_phy_config) !=
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT &&
- ext_phy_link_up) {
- DP(NETIF_MSG_LINK, "Internal link speed %d is"
- " different than the external"
- " link speed %d\n", new_line_speed,
- vars->line_speed);
- vars->phy_link_up = 0;
- return 0;
- }
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
- + params->port*4, 0);
- msleep(1);
- }
vars->line_speed = new_line_speed;
- vars->link_status |= LINK_STATUS_SERDES_LINK;
-
- if ((params->req_line_speed == SPEED_AUTO_NEG) &&
- ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
- (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
- (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
- (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) {
- vars->autoneg = AUTO_NEG_ENABLED;
-
- if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
- vars->autoneg |= AUTO_NEG_COMPLETE;
- vars->link_status |=
- LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
- }
-
- vars->autoneg |= AUTO_NEG_PARALLEL_DETECTION_USED;
- vars->link_status |=
- LINK_STATUS_PARALLEL_DETECTION_USED;
-
- }
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
- vars->link_status |=
- LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
-
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
- vars->link_status |=
- LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
} else { /* link_down */
DP(NETIF_MSG_LINK, "phy link down\n");
@@ -1963,38 +2022,32 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->autoneg = AUTO_NEG_DISABLED;
vars->mac_type = MAC_TYPE_NONE;
- if ((params->req_line_speed == SPEED_AUTO_NEG) &&
- ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT))) {
+ if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ SINGLE_MEDIA_DIRECT(params)) {
/* Check signal is detected */
- bnx2x_check_fallback_to_cl37(params);
+ bnx2x_check_fallback_to_cl37(phy, params);
}
}
DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
gp_status, vars->phy_link_up, vars->line_speed);
- DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x"
- " autoneg 0x%x\n",
- vars->duplex,
- vars->flow_ctrl, vars->autoneg);
- DP(NETIF_MSG_LINK, "link_status 0x%x\n", vars->link_status);
-
+ DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
}
static void bnx2x_set_gmii_tx_driver(struct link_params *params)
{
struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy = &params->phy[INT_PHY];
u16 lp_up2;
u16 tx_driver;
u16 bank;
/* read precomp */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_LP_UP2, &lp_up2);
@@ -2008,8 +2061,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
bank,
MDIO_TX0_TX_DRIVER, &tx_driver);
@@ -2018,8 +2070,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
(tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
tx_driver |= lp_up2;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
bank,
MDIO_TX0_TX_DRIVER, tx_driver);
}
@@ -2027,7 +2078,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
}
static u8 bnx2x_emac_program(struct link_params *params,
- u32 line_speed, u32 duplex)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -2039,7 +2090,7 @@ static u8 bnx2x_emac_program(struct link_params *params,
(EMAC_MODE_25G_MODE |
EMAC_MODE_PORT_MII_10M |
EMAC_MODE_HALF_DUPLEX));
- switch (line_speed) {
+ switch (vars->line_speed) {
case SPEED_10:
mode |= EMAC_MODE_PORT_MII_10M;
break;
@@ -2058,371 +2109,1373 @@ static u8 bnx2x_emac_program(struct link_params *params,
default:
/* 10G not valid for EMAC */
- DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", line_speed);
+ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+ vars->line_speed);
return -EINVAL;
}
- if (duplex == DUPLEX_HALF)
+ if (vars->duplex == DUPLEX_HALF)
mode |= EMAC_MODE_HALF_DUPLEX;
bnx2x_bits_en(bp,
GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
mode);
- bnx2x_set_led(params, LED_MODE_OPER, line_speed);
+ bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
return 0;
}
-/*****************************************************************************/
-/* External Phy section */
-/*****************************************************************************/
-void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
+static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
+ struct link_params *params)
{
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- msleep(1);
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+
+ u16 bank, i = 0;
+ struct bnx2x *bp = params->bp;
+
+ for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
+ bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
+ CL45_WR_OVER_CL22(bp, phy,
+ bank,
+ MDIO_RX0_RX_EQ_BOOST,
+ phy->rx_preemphasis[i]);
+ }
+
+ for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
+ bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
+ CL45_WR_OVER_CL22(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER,
+ phy->tx_preemphasis[i]);
+ }
}
-static void bnx2x_ext_phy_reset(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u32 ext_phy_type;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
+ u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
+ (params->loopback_mode == LOOPBACK_XGXS));
+ if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
+ if (SINGLE_MEDIA_DIRECT(params) &&
+ (params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
+ bnx2x_set_preemphasis(phy, params);
- DP(NETIF_MSG_LINK, "Port %x: bnx2x_ext_phy_reset\n", params->port);
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- /* The PHY reset is controled by GPIO 1
- * Give it 1ms of reset pulse
- */
- if (vars->phy_flags & PHY_XGXS_FLAG) {
+ /* forced speed requested? */
+ if (vars->line_speed != SPEED_AUTO_NEG ||
+ (SINGLE_MEDIA_DIRECT(params) &&
+ params->loopback_mode == LOOPBACK_EXT)) {
+ DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- DP(NETIF_MSG_LINK, "XGXS Direct\n");
- break;
+ /* disable autoneg */
+ bnx2x_set_autoneg(phy, params, vars, 0);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
+ /* program speed and duplex */
+ bnx2x_program_serdes(phy, params, vars);
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ } else { /* AN_mode */
+ DP(NETIF_MSG_LINK, "not SGMII, AN\n");
- /* HW reset */
- bnx2x_ext_phy_hw_reset(bp, params->port);
+ /* AN enabled */
+ bnx2x_set_brcm_cl37_advertisment(phy, params);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0xa040);
- break;
+ /* program duplex & pause advertisement (for aneg) */
+ bnx2x_set_ieee_aneg_advertisment(phy, params,
+ vars->ieee_fc);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- break;
+ /* enable autoneg */
+ bnx2x_set_autoneg(phy, params, vars, enable_cl73);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ /* enable and restart AN */
+ bnx2x_restart_autoneg(phy, params, enable_cl73);
+ }
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ } else { /* SGMII mode */
+ DP(NETIF_MSG_LINK, "SGMII\n");
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ bnx2x_initialize_sgmii_process(phy, params, vars);
+ }
+}
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
- break;
+static u8 bnx2x_init_serdes(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 rc;
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ bnx2x_set_aer_mmd_serdes(params->bp, phy);
+ rc = bnx2x_reset_unicore(params, phy, 1);
+ /* reset the SerDes and wait for reset bit return low */
+ if (rc != 0)
+ return rc;
+ bnx2x_set_aer_mmd_serdes(params->bp, phy);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- DP(NETIF_MSG_LINK, "XGXS 8072\n");
+ return rc;
+}
- /* Unset Low Power Mode and SW reset */
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 rc;
+ vars->phy_flags = PHY_XGXS_FLAG;
+ if ((phy->req_line_speed &&
+ ((phy->req_line_speed == SPEED_100) ||
+ (phy->req_line_speed == SPEED_10))) ||
+ (!phy->req_line_speed &&
+ (phy->speed_cap_mask >=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+ (phy->speed_cap_mask <
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ ))
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ else
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
- break;
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ bnx2x_set_aer_mmd_xgxs(params, phy);
+ bnx2x_set_master_ln(params, phy);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- DP(NETIF_MSG_LINK, "XGXS 8073\n");
+ rc = bnx2x_reset_unicore(params, phy, 0);
+ /* reset the SerDes and wait for reset bit return low */
+ if (rc != 0)
+ return rc;
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ bnx2x_set_aer_mmd_xgxs(params, phy);
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ /* setting the masterLn_def again after the reset */
+ bnx2x_set_master_ln(params, phy);
+ bnx2x_set_swap_lanes(params, phy);
+
+ return rc;
+}
+
+static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ u16 cnt, ctrl;
+ /* Wait for soft reset to get cleared upto 1 sec */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, &ctrl);
+ if (!(ctrl & (1<<15)))
break;
+ msleep(1);
+ }
+ DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
+ return cnt;
+}
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
+static void bnx2x_link_int_enable(struct link_params *params)
+{
+ u8 port = params->port;
+ u32 mask;
+ struct bnx2x *bp = params->bp;
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ /* setting the status to report on link up
+ for either XGXS or SerDes */
- /* HW reset */
- bnx2x_ext_phy_hw_reset(bp, params->port);
- break;
+ if (params->switch_cfg == SWITCH_CFG_10G) {
+ mask = (NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_XGXS0_LINK_STATUS);
+ DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
+ if (!(SINGLE_MEDIA_DIRECT(params)) &&
+ params->phy[INT_PHY].type !=
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
+ mask |= NIG_MASK_MI_INT;
+ DP(NETIF_MSG_LINK, "enabled external phy int\n");
+ }
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ } else { /* SerDes */
+ mask = NIG_MASK_SERDES0_LINK_STATUS;
+ DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
+ if (!(SINGLE_MEDIA_DIRECT(params)) &&
+ params->phy[INT_PHY].type !=
+ PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
+ mask |= NIG_MASK_MI_INT;
+ DP(NETIF_MSG_LINK, "enabled external phy int\n");
+ }
+ }
+ bnx2x_bits_en(bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ mask);
+
+ DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
+ (params->switch_cfg == SWITCH_CFG_10G),
+ REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+ DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
+ REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+ REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
+ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
+ DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+}
- /* HW reset */
- bnx2x_ext_phy_hw_reset(bp, params->port);
+static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
+ u8 exp_mi_int)
+{
+ u32 latch_status = 0;
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
- DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
- break;
+ /**
+ * Disable the MI INT ( external phy int ) by writing 1 to the
+ * status register. Link down indication is high-active-signal,
+ * so in this case we need to write the status to clear the XOR
+ */
+ /* Read Latched signals */
+ latch_status = REG_RD(bp,
+ NIG_REG_LATCH_STATUS_0 + port*8);
+ DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status);
+ /* Handle only those with latched-signal=up.*/
+ if (exp_mi_int)
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0
+ + port*4,
+ NIG_STATUS_EMAC0_MI_INT);
+ else
+ bnx2x_bits_dis(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0
+ + port*4,
+ NIG_STATUS_EMAC0_MI_INT);
- default:
- DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
- params->ext_phy_config);
- break;
+ if (latch_status & 1) {
+
+ /* For all latched-signal=up : Re-Arm Latch signals */
+ REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
+ (latch_status & 0xfffe) | (latch_status & 1));
+ }
+ /* For all latched-signal=up,Write original_signal to status */
+}
+
+static void bnx2x_link_int_ack(struct link_params *params,
+ struct link_vars *vars, u8 is_10g)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+
+ /* first reset all status
+ * we assume only one line will be change at a time */
+ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS));
+ if (vars->phy_link_up) {
+ if (is_10g) {
+ /* Disable the 10G link interrupt
+ * by writing 1 to the status register
+ */
+ DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ NIG_STATUS_XGXS0_LINK10G);
+
+ } else if (params->switch_cfg == SWITCH_CFG_10G) {
+ /* Disable the link interrupt
+ * by writing 1 to the relevant lane
+ * in the status register
+ */
+ u32 ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n",
+ vars->line_speed);
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ ((1 << ser_lane) <<
+ NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
+
+ } else { /* SerDes */
+ DP(NETIF_MSG_LINK, "SerDes phy link up\n");
+ /* Disable the link interrupt
+ * by writing 1 to the status register
+ */
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ NIG_STATUS_SERDES0_LINK_STATUS);
}
- } else { /* SerDes */
- ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
- DP(NETIF_MSG_LINK, "SerDes Direct\n");
- break;
+ }
+}
+
+static u8 bnx2x_format_ver(u32 num, u8 *str, u16 *len)
+{
+ u8 *str_ptr = str;
+ u32 mask = 0xf0000000;
+ u8 shift = 8*4;
+ u8 digit;
+ u8 remove_leading_zeros = 1;
+ if (*len < 10) {
+ /* Need more than 10chars for this format */
+ *str_ptr = '\0';
+ (*len)--;
+ return -EINVAL;
+ }
+ while (shift > 0) {
+
+ shift -= 4;
+ digit = ((num & mask) >> shift);
+ if (digit == 0 && remove_leading_zeros) {
+ mask = mask >> 4;
+ continue;
+ } else if (digit < 0xa)
+ *str_ptr = digit + '0';
+ else
+ *str_ptr = digit - 0xa + 'a';
+ remove_leading_zeros = 0;
+ str_ptr++;
+ (*len)--;
+ mask = mask >> 4;
+ if (shift == 4*4) {
+ *str_ptr = '.';
+ str_ptr++;
+ (*len)--;
+ remove_leading_zeros = 1;
+ }
+ }
+ return 0;
+}
+
+
+static u8 bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+ str[0] = '\0';
+ (*len)--;
+ return 0;
+}
+
+u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
+ u8 *version, u16 len)
+{
+ struct bnx2x *bp;
+ u32 spirom_ver = 0;
+ u8 status = 0;
+ u8 *ver_p = version;
+ u16 remain_len = len;
+ if (version == NULL || params == NULL)
+ return -EINVAL;
+ bp = params->bp;
+
+ /* Extract first external phy*/
+ version[0] = '\0';
+ spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr);
+
+ if (params->phy[EXT_PHY1].format_fw_ver) {
+ status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver,
+ ver_p,
+ &remain_len);
+ ver_p += (len - remain_len);
+ }
+ if ((params->num_phys == MAX_PHYS) &&
+ (params->phy[EXT_PHY2].ver_addr != 0)) {
+ spirom_ver = REG_RD(bp,
+ params->phy[EXT_PHY2].ver_addr);
+ if (params->phy[EXT_PHY2].format_fw_ver) {
+ *ver_p = '/';
+ ver_p++;
+ remain_len--;
+ status |= params->phy[EXT_PHY2].format_fw_ver(
+ spirom_ver,
+ ver_p,
+ &remain_len);
+ ver_p = version + (len - remain_len);
+ }
+ }
+ *ver_p = '\0';
+ return status;
+}
+
+static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+
+ if (phy->req_line_speed != SPEED_1000) {
+ u32 md_devad;
+
+ DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
+
+ /* change the uni_phy_addr in the nig */
+ md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
+ port*0x18));
+
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
+
+ bnx2x_cl45_write(bp, phy,
+ 5,
+ (MDIO_REG_BANK_AER_BLOCK +
+ (MDIO_AER_BLOCK_AER_REG & 0xf)),
+ 0x2800);
+
+ bnx2x_cl45_write(bp, phy,
+ 5,
+ (MDIO_REG_BANK_CL73_IEEEB0 +
+ (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+ 0x6041);
+ msleep(200);
+ /* set aer mmd back */
+ bnx2x_set_aer_mmd_xgxs(params, phy);
+
+ /* and md_devad */
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ md_devad);
+
+ } else {
+ u16 mii_ctrl;
+ DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
+ bnx2x_cl45_read(bp, phy, 5,
+ (MDIO_REG_BANK_COMBO_IEEE0 +
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ &mii_ctrl);
+ bnx2x_cl45_write(bp, phy, 5,
+ (MDIO_REG_BANK_COMBO_IEEE0 +
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ mii_ctrl |
+ MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
+ }
+}
+
+/*
+ *------------------------------------------------------------------------
+ * bnx2x_override_led_value -
+ *
+ * Override the led value of the requested led
+ *
+ *------------------------------------------------------------------------
+ */
+u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
+ u32 led_idx, u32 value)
+{
+ u32 reg_val;
+
+ /* If port 0 then use EMAC0, else use EMAC1*/
+ u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+ DP(NETIF_MSG_LINK,
+ "bnx2x_override_led_value() port %x led_idx %d value %d\n",
+ port, led_idx, value);
+
+ switch (led_idx) {
+ case 0: /* 10MB led */
+ /* Read the current value of the LED register in
+ the EMAC block */
+ reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
+ /* Set the OVERRIDE bit to 1 */
+ reg_val |= EMAC_LED_OVERRIDE;
+ /* If value is 1, set the 10M_OVERRIDE bit,
+ otherwise reset it.*/
+ reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
+ (reg_val & ~EMAC_LED_10MB_OVERRIDE);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ break;
+ case 1: /*100MB led */
+ /*Read the current value of the LED register in
+ the EMAC block */
+ reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
+ /* Set the OVERRIDE bit to 1 */
+ reg_val |= EMAC_LED_OVERRIDE;
+ /* If value is 1, set the 100M_OVERRIDE bit,
+ otherwise reset it.*/
+ reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
+ (reg_val & ~EMAC_LED_100MB_OVERRIDE);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ break;
+ case 2: /* 1000MB led */
+ /* Read the current value of the LED register in the
+ EMAC block */
+ reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
+ /* Set the OVERRIDE bit to 1 */
+ reg_val |= EMAC_LED_OVERRIDE;
+ /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
+ reset it. */
+ reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
+ (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ break;
+ case 3: /* 2500MB led */
+ /* Read the current value of the LED register in the
+ EMAC block*/
+ reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
+ /* Set the OVERRIDE bit to 1 */
+ reg_val |= EMAC_LED_OVERRIDE;
+ /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
+ reset it.*/
+ reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
+ (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ break;
+ case 4: /*10G led */
+ if (port == 0) {
+ REG_WR(bp, NIG_REG_LED_10G_P0,
+ value);
+ } else {
+ REG_WR(bp, NIG_REG_LED_10G_P1,
+ value);
+ }
+ break;
+ case 5: /* TRAFFIC led */
+ /* Find if the traffic control is via BMAC or EMAC */
+ if (port == 0)
+ reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
+ else
+ reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
+
+ /* Override the traffic led in the EMAC:*/
+ if (reg_val == 1) {
+ /* Read the current value of the LED register in
+ the EMAC block */
+ reg_val = REG_RD(bp, emac_base +
+ EMAC_REG_EMAC_LED);
+ /* Set the TRAFFIC_OVERRIDE bit to 1 */
+ reg_val |= EMAC_LED_OVERRIDE;
+ /* If value is 1, set the TRAFFIC bit, otherwise
+ reset it.*/
+ reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
+ (reg_val & ~EMAC_LED_TRAFFIC);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ } else { /* Override the traffic led in the BMAC: */
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
+ + port*4, 1);
+ REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
+ value);
+ }
+ break;
+ default:
+ DP(NETIF_MSG_LINK,
+ "bnx2x_override_led_value() unknown led index %d "
+ "(should be 0-5)\n", led_idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
- DP(NETIF_MSG_LINK, "SerDes 5482\n");
- bnx2x_ext_phy_hw_reset(bp, params->port);
+u8 bnx2x_set_led(struct link_params *params,
+ struct link_vars *vars, u8 mode, u32 speed)
+{
+ u8 port = params->port;
+ u16 hw_led_mode = params->hw_led_mode;
+ u8 rc = 0, phy_idx;
+ u32 tmp;
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
+ DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
+ speed, hw_led_mode);
+ /* In case */
+ for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].set_link_led) {
+ params->phy[phy_idx].set_link_led(
+ &params->phy[phy_idx], params, mode);
+ }
+ }
+
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ SHARED_HW_CFG_LED_MAC1);
+
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
+ break;
+
+ case LED_MODE_OPER:
+ /**
+ * For all other phys, OPER mode is same as ON, so in case
+ * link is down, do nothing
+ **/
+ if (!vars->link_up)
break;
+ case LED_MODE_ON:
+ if (SINGLE_MEDIA_DIRECT(params)) {
+ /**
+ * This is a work-around for HW issue found when link
+ * is up in CL73
+ */
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+ } else {
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ hw_led_mode);
+ }
- default:
- DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
- params->ext_phy_config);
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
+ port*4, 0);
+ /* Set blinking rate to ~15.9Hz */
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
+ LED_BLINK_RATE_VAL);
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
+ port*4, 1);
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED,
+ (tmp & (~EMAC_LED_OVERRIDE)));
+
+ if (CHIP_IS_E1(bp) &&
+ ((speed == SPEED_2500) ||
+ (speed == SPEED_1000) ||
+ (speed == SPEED_100) ||
+ (speed == SPEED_10))) {
+ /* On Everest 1 Ax chip versions for speeds less than
+ 10G LED scheme is different */
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
+ + port*4, 1);
+ REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
+ port*4, 0);
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
+ port*4, 1);
+ }
+ break;
+
+ default:
+ rc = -EINVAL;
+ DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
+ mode);
+ break;
+ }
+ return rc;
+
+}
+
+/**
+ * This function comes to reflect the actual link state read DIRECTLY from the
+ * HW
+ */
+u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+ u8 is_serdes)
+{
+ struct bnx2x *bp = params->bp;
+ u16 gp_status = 0, phy_index = 0;
+ u8 ext_phy_link_up = 0, serdes_phy_type;
+ struct link_vars temp_vars;
+
+ CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY],
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+ /* link is up only if both local phy and external phy are up */
+ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
+ return -ESRCH;
+
+ switch (params->num_phys) {
+ case 1:
+ /* No external PHY */
+ return 0;
+ case 2:
+ ext_phy_link_up = params->phy[EXT_PHY1].read_status(
+ &params->phy[EXT_PHY1],
+ params, &temp_vars);
+ break;
+ case 3: /* Dual Media */
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ serdes_phy_type = ((params->phy[phy_index].media_type ==
+ ETH_PHY_SFP_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_XFP_FIBER));
+
+ if (is_serdes != serdes_phy_type)
+ continue;
+ if (params->phy[phy_index].read_status) {
+ ext_phy_link_up |=
+ params->phy[phy_index].read_status(
+ &params->phy[phy_index],
+ params, &temp_vars);
+ }
+ }
+ break;
+ }
+ if (ext_phy_link_up)
+ return 0;
+ return -ESRCH;
+}
+
+static u8 bnx2x_link_initialize(struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 rc = 0;
+ u8 phy_index, non_ext_phy;
+ struct bnx2x *bp = params->bp;
+ /**
+ * In case of external phy existence, the line speed would be the
+ * line speed linked up by the external phy. In case it is direct
+ * only, then the line_speed during initialization will be
+ * equal to the req_line_speed
+ */
+ vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
+ /**
+ * Initialize the internal phy in case this is a direct board
+ * (no external phys), or this board has external phy which requires
+ * to first.
+ */
+
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(
+ &params->phy[INT_PHY],
+ params, vars);
+
+ /* init ext phy and enable link state int */
+ non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
+ (params->loopback_mode == LOOPBACK_XGXS));
+
+ if (non_ext_phy ||
+ (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
+ (params->loopback_mode == LOOPBACK_EXT_PHY)) {
+ struct bnx2x_phy *phy = &params->phy[INT_PHY];
+ if (vars->line_speed == SPEED_AUTO_NEG)
+ bnx2x_set_parallel_detection(phy, params);
+ bnx2x_init_internal_phy(phy, params, vars);
+ }
+
+ /* Init external phy*/
+ if (!non_ext_phy)
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ /**
+ * No need to initialize second phy in case of first
+ * phy only selection. In case of second phy, we do
+ * need to initialize the first phy, since they are
+ * connected.
+ **/
+ if (phy_index == EXT_PHY2 &&
+ (bnx2x_phy_selection(params) ==
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
+ DP(NETIF_MSG_LINK, "Not initializing"
+ "second phy\n");
+ continue;
+ }
+ params->phy[phy_index].config_init(
+ &params->phy[phy_index],
+ params, vars);
+ }
+
+ /* Reset the interrupt indication after phy was initialized */
+ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
+ params->port*4,
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+ return rc;
+}
+
+static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ /* reset the SerDes/XGXS */
+ REG_WR(params->bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_3_CLEAR,
+ (0x1ff << (params->port*16)));
+}
+
+static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_port;
+ /* HW reset */
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+ DP(NETIF_MSG_LINK, "reset external PHY\n");
+}
+
+static u8 bnx2x_update_link_down(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+
+ DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
+ bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
+
+ /* indicate no mac active */
+ vars->mac_type = MAC_TYPE_NONE;
+
+ /* update shared memory */
+ vars->link_status = 0;
+ vars->line_speed = 0;
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* activate nig drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+ /* disable emac */
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+ msleep(10);
+
+ /* reset BigMac */
+ bnx2x_bmac_rx_disable(bp, params->port);
+ REG_WR(bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ return 0;
+}
+
+static u8 bnx2x_update_link_up(struct link_params *params,
+ struct link_vars *vars,
+ u8 link_10g)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u8 rc = 0;
+
+ vars->link_status |= LINK_STATUS_LINK_UP;
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ vars->link_status |=
+ LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ vars->link_status |=
+ LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+
+ if (link_10g) {
+ bnx2x_bmac_enable(params, vars, 0);
+ bnx2x_set_led(params, vars,
+ LED_MODE_OPER, SPEED_10000);
+ } else {
+ rc = bnx2x_emac_program(params, vars);
+
+ bnx2x_emac_enable(params, vars, 0);
+
+ /* AN complete? */
+ if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+ && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
+ SINGLE_MEDIA_DIRECT(params))
+ bnx2x_set_gmii_tx_driver(params);
+ }
+
+ /* PBF - link up */
+ if (!(CHIP_IS_E2(bp)))
+ rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
+ vars->line_speed);
+
+ /* disable drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+
+ /* update shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+ msleep(20);
+ return rc;
+}
+/**
+ * The bnx2x_link_update function should be called upon link
+ * interrupt.
+ * Link is considered up as follows:
+ * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
+ * to be up
+ * - SINGLE_MEDIA - The link between the 577xx and the external
+ * phy (XGXS) need to up as well as the external link of the
+ * phy (PHY_EXT1)
+ * - DUAL_MEDIA - The link between the 577xx and the first
+ * external phy needs to be up, and at least one of the 2
+ * external phy link must be up.
+ */
+u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ struct link_vars phy_vars[MAX_PHYS];
+ u8 port = params->port;
+ u8 link_10g, phy_index;
+ u8 ext_phy_link_up = 0, cur_link_up, rc = 0;
+ u8 is_mi_int = 0;
+ u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
+ u8 active_external_phy = INT_PHY;
+ vars->link_status = 0;
+ for (phy_index = INT_PHY; phy_index < params->num_phys;
+ phy_index++) {
+ phy_vars[phy_index].flow_ctrl = 0;
+ phy_vars[phy_index].link_status = 0;
+ phy_vars[phy_index].line_speed = 0;
+ phy_vars[phy_index].duplex = DUPLEX_FULL;
+ phy_vars[phy_index].phy_link_up = 0;
+ phy_vars[phy_index].link_up = 0;
+ }
+
+ DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
+ port, (vars->phy_flags & PHY_XGXS_FLAG),
+ REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+
+ is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
+ port*0x18) > 0);
+ DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
+ REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+ is_mi_int,
+ REG_RD(bp,
+ NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+
+ DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+
+ /* disable emac */
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+ /**
+ * Step 1:
+ * Check external link change only for external phys, and apply
+ * priority selection between them in case the link on both phys
+ * is up. Note that the instead of the common vars, a temporary
+ * vars argument is used since each phy may have different link/
+ * speed/duplex result
+ */
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ struct bnx2x_phy *phy = &params->phy[phy_index];
+ if (!phy->read_status)
+ continue;
+ /* Read link status and params of this ext phy */
+ cur_link_up = phy->read_status(phy, params,
+ &phy_vars[phy_index]);
+ if (cur_link_up) {
+ DP(NETIF_MSG_LINK, "phy in index %d link is up\n",
+ phy_index);
+ } else {
+ DP(NETIF_MSG_LINK, "phy in index %d link is down\n",
+ phy_index);
+ continue;
+ }
+
+ if (!ext_phy_link_up) {
+ ext_phy_link_up = 1;
+ active_external_phy = phy_index;
+ } else {
+ switch (bnx2x_phy_selection(params)) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ /**
+ * In this option, the first PHY makes sure to pass the
+ * traffic through itself only.
+ * Its not clear how to reset the link on the second phy
+ **/
+ active_external_phy = EXT_PHY1;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ /**
+ * In this option, the first PHY makes sure to pass the
+ * traffic through the second PHY.
+ **/
+ active_external_phy = EXT_PHY2;
+ break;
+ default:
+ /**
+ * Link indication on both PHYs with the following cases
+ * is invalid:
+ * - FIRST_PHY means that second phy wasn't initialized,
+ * hence its link is expected to be down
+ * - SECOND_PHY means that first phy should not be able
+ * to link up by itself (using configuration)
+ * - DEFAULT should be overriden during initialiazation
+ **/
+ DP(NETIF_MSG_LINK, "Invalid link indication"
+ "mpc=0x%x. DISABLING LINK !!!\n",
+ params->multi_phy_config);
+ ext_phy_link_up = 0;
+ break;
+ }
+ }
+ }
+ prev_line_speed = vars->line_speed;
+ /**
+ * Step 2:
+ * Read the status of the internal phy. In case of
+ * DIRECT_SINGLE_MEDIA board, this link is the external link,
+ * otherwise this is the link between the 577xx and the first
+ * external phy
+ */
+ if (params->phy[INT_PHY].read_status)
+ params->phy[INT_PHY].read_status(
+ &params->phy[INT_PHY],
+ params, vars);
+ /**
+ * The INT_PHY flow control reside in the vars. This include the
+ * case where the speed or flow control are not set to AUTO.
+ * Otherwise, the active external phy flow control result is set
+ * to the vars. The ext_phy_line_speed is needed to check if the
+ * speed is different between the internal phy and external phy.
+ * This case may be result of intermediate link speed change.
+ */
+ if (active_external_phy > INT_PHY) {
+ vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
+ /**
+ * Link speed is taken from the XGXS. AN and FC result from
+ * the external phy.
+ */
+ vars->link_status |= phy_vars[active_external_phy].link_status;
+
+ /**
+ * if active_external_phy is first PHY and link is up - disable
+ * disable TX on second external PHY
+ */
+ if (active_external_phy == EXT_PHY1) {
+ if (params->phy[EXT_PHY2].phy_specific_func) {
+ DP(NETIF_MSG_LINK, "Disabling TX on"
+ " EXT_PHY2\n");
+ params->phy[EXT_PHY2].phy_specific_func(
+ &params->phy[EXT_PHY2],
+ params, DISABLE_TX);
+ }
+ }
+
+ ext_phy_line_speed = phy_vars[active_external_phy].line_speed;
+ vars->duplex = phy_vars[active_external_phy].duplex;
+ if (params->phy[active_external_phy].supported &
+ SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+ DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
+ active_external_phy);
+ }
+
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL) {
+ bnx2x_rearm_latch_signal(bp, port,
+ phy_index ==
+ active_external_phy);
break;
}
}
+ DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
+ " ext_phy_line_speed = %d\n", vars->flow_ctrl,
+ vars->link_status, ext_phy_line_speed);
+ /**
+ * Upon link speed change set the NIG into drain mode. Comes to
+ * deals with possible FIFO glitch due to clk change when speed
+ * is decreased without link down indicator
+ */
+
+ if (vars->phy_link_up) {
+ if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
+ (ext_phy_line_speed != vars->line_speed)) {
+ DP(NETIF_MSG_LINK, "Internal link speed %d is"
+ " different than the external"
+ " link speed %d\n", vars->line_speed,
+ ext_phy_line_speed);
+ vars->phy_link_up = 0;
+ } else if (prev_line_speed != vars->line_speed) {
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
+ + params->port*4, 0);
+ msleep(1);
+ }
+ }
+
+ /* anything 10 and over uses the bmac */
+ link_10g = ((vars->line_speed == SPEED_10000) ||
+ (vars->line_speed == SPEED_12000) ||
+ (vars->line_speed == SPEED_12500) ||
+ (vars->line_speed == SPEED_13000) ||
+ (vars->line_speed == SPEED_15000) ||
+ (vars->line_speed == SPEED_16000));
+
+ bnx2x_link_int_ack(params, vars, link_10g);
+
+ /**
+ * In case external phy link is up, and internal link is down
+ * (not initialized yet probably after link initialization, it
+ * needs to be initialized.
+ * Note that after link down-up as result of cable plug, the xgxs
+ * link would probably become up again without the need
+ * initialize it
+ */
+ if (!(SINGLE_MEDIA_DIRECT(params))) {
+ DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
+ " init_preceding = %d\n", ext_phy_link_up,
+ vars->phy_link_up,
+ params->phy[EXT_PHY1].flags &
+ FLAGS_INIT_XGXS_FIRST);
+ if (!(params->phy[EXT_PHY1].flags &
+ FLAGS_INIT_XGXS_FIRST)
+ && ext_phy_link_up && !vars->phy_link_up) {
+ vars->line_speed = ext_phy_line_speed;
+ if (vars->line_speed < SPEED_1000)
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ else
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+ bnx2x_init_internal_phy(&params->phy[INT_PHY],
+ params,
+ vars);
+ }
+ }
+ /**
+ * Link is up only if both local phy and external phy (in case of
+ * non-direct board) are up
+ */
+ vars->link_up = (vars->phy_link_up &&
+ (ext_phy_link_up ||
+ SINGLE_MEDIA_DIRECT(params)));
+
+ if (vars->link_up)
+ rc = bnx2x_update_link_up(params, vars, link_10g);
+ else
+ rc = bnx2x_update_link_down(params, vars);
+
+ return rc;
+}
+
+
+/*****************************************************************************/
+/* External Phy section */
+/*****************************************************************************/
+void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
+{
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ msleep(1);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
- u32 shmem_base, u32 spirom_ver)
+ u32 spirom_ver, u32 ver_addr)
{
DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
(u16)(spirom_ver>>16), (u16)spirom_ver, port);
- REG_WR(bp, shmem_base +
- offsetof(struct shmem_region,
- port_mb[port].ext_phy_fw_version),
- spirom_ver);
+
+ if (ver_addr)
+ REG_WR(bp, ver_addr, spirom_ver);
}
-static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, u8 port,
- u32 ext_phy_type, u8 ext_phy_addr,
- u32 shmem_base)
+static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 port)
{
u16 fw_ver1, fw_ver2;
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD,
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER1, &fw_ver1);
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD,
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2, &fw_ver2);
- bnx2x_save_spirom_version(bp, port, shmem_base,
- (u32)(fw_ver1<<16 | fw_ver2));
+ bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
+ phy->ver_addr);
}
-
-static void bnx2x_save_8481_spirom_version(struct bnx2x *bp, u8 port,
- u8 ext_phy_addr, u32 shmem_base)
+static void bnx2x_ext_phy_set_pause(struct link_params *params,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
{
- u16 val, fw_ver1, fw_ver2, cnt;
- /* For the 32 bits registers in 8481, access via MDIO2ARM interface.*/
- /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr, MDIO_PMA_DEVAD,
- 0xA819, 0x0014);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA81A,
- 0xc200);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA81B,
- 0x0000);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA81C,
- 0x0300);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA817,
- 0x0009);
+ u16 val;
+ struct bnx2x *bp = params->bp;
+ /* read modify write pause advertizing */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
- for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA818,
- &val);
- if (val & 1)
- break;
- udelay(5);
+ val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
+
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
}
- if (cnt == 100) {
- DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(1)\n");
- bnx2x_save_spirom_version(bp, port,
- shmem_base, 0);
- return;
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
}
+ DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
+}
+static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 ld_pause; /* local */
+ u16 lp_pause; /* link partner */
+ u16 pause_result;
+ u8 ret = 0;
+ /* read twice */
- /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr, MDIO_PMA_DEVAD,
- 0xA819, 0x0000);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr, MDIO_PMA_DEVAD,
- 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr, MDIO_PMA_DEVAD,
- 0xA817, 0x000A);
- for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA818,
- &val);
- if (val & 1)
- break;
- udelay(5);
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ ret = 1;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
+ DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
+ pause_result);
+ bnx2x_pause_resolve(vars, pause_result);
}
- if (cnt == 100) {
- DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(2)\n");
- bnx2x_save_spirom_version(bp, port,
- shmem_base, 0);
+ return ret;
+}
+
+static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &val);
+ if (val & (1<<5))
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ if ((val & (1<<0)) == 0)
+ vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+
+/******************************************************************/
+/* common BCM8073/BCM8727 PHY SECTION */
+/******************************************************************/
+static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ if (phy->req_line_speed == SPEED_10 ||
+ phy->req_line_speed == SPEED_100) {
+ vars->flow_ctrl = phy->req_flow_ctrl;
return;
}
- /* lower 16 bits of the register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA81B,
- &fw_ver1);
- /* upper 16 bits of register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA81C,
- &fw_ver2);
+ if (bnx2x_ext_phy_resolve_fc(phy, params, vars) &&
+ (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) {
+ u16 pause_result;
+ u16 ld_pause; /* local */
+ u16 lp_pause; /* link partner */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
- bnx2x_save_spirom_version(bp, port,
- shmem_base, (fw_ver2<<16) | fw_ver1);
+ bnx2x_pause_resolve(vars, pause_result);
+ DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
+ pause_result);
+ }
}
-static void bnx2x_bcm8072_external_rom_boot(struct link_params *params)
+static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 port)
{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
+ /* Boot port from external ROM */
+ /* EDC grst */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x0001);
- /* Need to wait 200ms after reset */
- msleep(200);
- /* Boot port from external ROM
- * Set ser_boot_ctl bit in the MISC_CTRL1 register
- */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ /* ucode reboot and rst */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x008c);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
/* Reset internal microprocessor */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* set micro reset = 0 */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
- /* Reset internal microprocessor */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* wait for 100ms for code download via SPI port */
- msleep(100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+ /* Release srst bit */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+ /* wait for 120ms for code download via SPI port */
+ msleep(120);
/* Clear ser_boot_ctl bit */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
- /* Wait 100ms */
- msleep(100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ bnx2x_save_bcm_spirom_ver(bp, phy, port);
+}
+
+static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ u16 val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
+
+ if (val == 0) {
+ /* Mustn't set low power mode in 8073 A0 */
+ return;
+ }
+
+ /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
+ val &= ~(1<<13);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
- bnx2x_save_bcm_spirom_ver(bp, port,
- ext_phy_type,
- ext_phy_addr,
- params->shmem_base);
+ /* PLL controls */
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
+
+ /* Tx Controls */
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
+
+ /* Rx Controls */
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
+
+ /* Enable PLL sequencer (use read-modify-write to set bit 13) */
+ bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
+ val |= (1<<13);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
}
-static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
+/******************************************************************/
+/* BCM8073 PHY SECTION */
+/******************************************************************/
+static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
{
/* This is only required for 8073A1, version 102 only */
-
- struct bnx2x *bp = params->bp;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
u16 val;
/* Read 8073 HW revision*/
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8073_CHIP_REV, &val);
@@ -2431,9 +3484,7 @@ static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
return 0;
}
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2, &val);
@@ -2444,15 +3495,11 @@ static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
return 1;
}
-static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
+static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
{
- struct bnx2x *bp = params->bp;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
u16 val, cnt, cnt1 ;
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8073_CHIP_REV, &val);
@@ -2466,9 +3513,7 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
poll Dev1, Reg $C820: */
for (cnt = 0; cnt < 1000; cnt++) {
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
&val);
@@ -2485,9 +3530,7 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
XAUI workaround has completed),
then continue on with system initialization.*/
for (cnt1 = 0; cnt1 < 1000; cnt1++) {
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8073_XAUI_WA, &val);
if (val & (1<<15)) {
@@ -2505,143 +3548,391 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
return -EINVAL;
}
-static void bnx2x_bcm8073_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port,
- u8 ext_phy_addr,
- u32 ext_phy_type,
- u32 shmem_base)
+static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy)
{
- /* Boot port from external ROM */
- /* EDC grst */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x0001);
+ /* Force KR or KX */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+}
- /* ucode reboot and rst */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x008c);
+static void bnx2x_8073_set_pause_cl37(struct link_params *params,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 cl37_val;
+ struct bnx2x *bp = params->bp;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val);
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ }
+ DP(NETIF_MSG_LINK,
+ "Ext phy AN advertize cl37 0x%x\n", cl37_val);
- /* Reset internal microprocessor */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
+ msleep(500);
+}
- /* Release srst bit */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0, tmp1;
+ u8 gpio_port;
+ DP(NETIF_MSG_LINK, "Init 8073\n");
- /* wait for 100ms for code download via SPI port */
- msleep(100);
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
- /* Clear ser_boot_ctl bit */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
- bnx2x_save_bcm_spirom_ver(bp, port,
- ext_phy_type,
- ext_phy_addr,
- shmem_base);
-}
+ /* enable LASI */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, (1<<2));
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x0004);
-static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port,
- u8 ext_phy_addr,
- u32 shmem_base)
-{
- bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- shmem_base);
-}
+ bnx2x_8073_set_pause_cl37(params, phy, vars);
-static void bnx2x_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port,
- u8 ext_phy_addr,
- u32 shmem_base)
-{
- bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- shmem_base);
+ bnx2x_8073_set_xaui_low_power_mode(bp, phy);
-}
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
-static void bnx2x_bcm8726_external_rom_boot(struct link_params *params)
-{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
- /* Need to wait 100ms after reset */
- msleep(100);
+ DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
- /* Micro controller re-boot */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x018B);
+ /* Enable CL37 BAM */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, val | 1);
- /* Set soft reset */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+ if (params->loopback_mode == LOOPBACK_EXT) {
+ bnx2x_807x_force_10G(bp, phy);
+ DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
+ return 0;
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
+ }
+ if (phy->req_line_speed != SPEED_AUTO_NEG) {
+ if (phy->req_line_speed == SPEED_10000) {
+ val = (1<<7);
+ } else if (phy->req_line_speed == SPEED_2500) {
+ val = (1<<5);
+ /* Note that 2.5G works only
+ when used with 1G advertisment */
+ } else
+ val = (1<<5);
+ } else {
+ val = 0;
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ val |= (1<<7);
+
+ /* Note that 2.5G works only when
+ used with 1G advertisment */
+ if (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+ val |= (1<<5);
+ DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
+ }
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
+
+ if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
+ (phy->req_line_speed == SPEED_AUTO_NEG)) ||
+ (phy->req_line_speed == SPEED_2500)) {
+ u16 phy_ver;
+ /* Allow 2.5G for A1 and above */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
+ &phy_ver);
+ DP(NETIF_MSG_LINK, "Add 2.5G\n");
+ if (phy_ver > 0)
+ tmp1 |= 1;
+ else
+ tmp1 &= 0xfffe;
+ } else {
+ DP(NETIF_MSG_LINK, "Disable 2.5G\n");
+ tmp1 &= 0xfffe;
+ }
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
+ /* Add support for CL37 (passive mode) II */
- /* wait for 150ms for microcode load */
- msleep(150);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
+ (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ?
+ 0x20 : 0x40)));
- /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ /* Add support for CL37 (passive mode) III */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
- msleep(200);
- bnx2x_save_bcm_spirom_ver(bp, port,
- ext_phy_type,
- ext_phy_addr,
- params->shmem_base);
+ /* The SNR will improve about 2db by changing
+ BW and FEE main tap. Rest commands are executed
+ after link is up*/
+ if (bnx2x_8073_is_snr_needed(bp, phy))
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
+ 0xFB0C);
+
+ /* Enable FEC (Forware Error Correction) Request in the AN */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
+ tmp1 |= (1<<15);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+
+ /* Restart autoneg */
+ msleep(500);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
+ ((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
+ return 0;
+}
+
+static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_up = 0;
+ u16 val1, val2;
+ u16 link_status = 0;
+ u16 an1000_status = 0;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+
+ DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
+
+ /* clear the interrupt LASI status register */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1);
+ /* Clear MSG-OUT */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+ /* Check the LASI */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
+
+ DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
+
+ /* Check the link status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ link_up = ((val1 & 4) == 4);
+ DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
+
+ if (link_up &&
+ ((phy->req_line_speed != SPEED_10000))) {
+ if (bnx2x_8073_xaui_wa(bp, phy) != 0)
+ return 0;
+ }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+
+ /* Check the link status on 1.1.2 */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
+ "an_link_status=0x%x\n", val2, val1, an1000_status);
+
+ link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
+ if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
+ /* The SNR will improve about 2dbby
+ changing the BW and FEE main tap.*/
+ /* The 1st write to change FFE main
+ tap is set before restart AN */
+ /* Change PLL Bandwidth in EDC
+ register */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
+ 0x26BC);
+
+ /* Change CDR Bandwidth in EDC register */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH,
+ 0x0333);
+ }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &link_status);
+
+ /* Bits 0..2 --> speed detected, bits 13..15--> link is down */
+ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+ params->port);
+ } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_2500;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n",
+ params->port);
+ } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+ params->port);
+ } else {
+ link_up = 0;
+ DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+ params->port);
+ }
+
+ if (link_up) {
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ bnx2x_8073_resolve_fc(phy, params, vars);
+ }
+ return link_up;
+}
+
+static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_port;
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
+ gpio_port);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+}
+
+/******************************************************************/
+/* BCM8705 PHY SECTION */
+/******************************************************************/
+static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "init 8705\n");
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+ bnx2x_wait_reset_complete(bp, phy);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
+ /* BCM8705 doesn't have microcode, hence the 0 */
+ bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
+ return 0;
+}
+
+static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 link_up = 0;
+ u16 val1, rx_sd;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "read status 8705\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, 0xc809, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, 0xc809, &val1);
+
+ DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
+ link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0));
+ if (link_up) {
+ vars->line_speed = SPEED_10000;
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+ return link_up;
}
-static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, u8 port,
- u32 ext_phy_type, u8 ext_phy_addr,
- u8 tx_en)
+/******************************************************************/
+/* SFP+ module Section */
+/******************************************************************/
+static void bnx2x_sfp_set_transmitter(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 port,
+ u8 tx_en)
{
u16 val;
DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
tx_en, port);
/* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER,
&val);
@@ -2651,58 +3942,42 @@ static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, u8 port,
else
val |= (1<<15);
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER,
val);
}
-static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params,
+static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
u16 i;
- u8 port = params->port;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
if (byte_cnt > 16) {
DP(NETIF_MSG_LINK, "Reading from eeprom is"
" is limited to 0xf\n");
return -EINVAL;
}
/* Set the read command byte count */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
(byte_cnt | 0xa000));
/* Set the read command address */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
addr);
/* Activate read command */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
0x2c0f);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2721,18 +3996,14 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params,
/* Read the buffer */
for (i = 0; i < byte_cnt; i++) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2743,14 +4014,12 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params,
return -EINVAL;
}
-static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
+static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val, i;
- u8 port = params->port;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
if (byte_cnt > 16) {
DP(NETIF_MSG_LINK, "Reading from eeprom is"
@@ -2759,40 +4028,30 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
}
/* Need to read from 1.8000 to clear it */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
&val);
/* Set the read command byte count */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
((byte_cnt < 2) ? 2 : byte_cnt));
/* Set the read command address */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
addr);
/* Set the destination address */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
0x8004,
MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
/* Activate read command */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
0x8002);
@@ -2802,9 +4061,7 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2823,18 +4080,14 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
/* Read the buffer */
for (i = 0; i < byte_cnt; i++) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2846,21 +4099,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
return -EINVAL;
}
-u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr,
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf)
{
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
- return bnx2x_8726_read_sfp_module_eeprom(params, addr,
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
+ return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
byte_cnt, o_buf);
- else if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
- return bnx2x_8727_read_sfp_module_eeprom(params, addr,
+ else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
+ return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
byte_cnt, o_buf);
return -EINVAL;
}
-static u8 bnx2x_get_edc_mode(struct link_params *params,
+static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
+ struct link_params *params,
u16 *edc_mode)
{
struct bnx2x *bp = params->bp;
@@ -2868,10 +4121,11 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
*edc_mode = EDC_MODE_LIMITING;
/* First check for copper cable */
- if (bnx2x_read_sfp_module_eeprom(params,
- SFP_EEPROM_CON_TYPE_ADDR,
- 1,
- &val) != 0) {
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_CON_TYPE_ADDR,
+ 1,
+ &val) != 0) {
DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
return -EINVAL;
}
@@ -2883,7 +4137,8 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
/* Check if its active cable( includes SFP+ module)
of passive cable*/
- if (bnx2x_read_sfp_module_eeprom(params,
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
SFP_EEPROM_FC_TX_TECH_ADDR,
1,
&copper_module_type) !=
@@ -2923,10 +4178,11 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
if (check_limiting_mode) {
u8 options[SFP_EEPROM_OPTIONS_SIZE];
- if (bnx2x_read_sfp_module_eeprom(params,
- SFP_EEPROM_OPTIONS_ADDR,
- SFP_EEPROM_OPTIONS_SIZE,
- options) != 0) {
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_OPTIONS_ADDR,
+ SFP_EEPROM_OPTIONS_SIZE,
+ options) != 0) {
DP(NETIF_MSG_LINK, "Failed to read Option"
" field from module EEPROM\n");
return -EINVAL;
@@ -2939,17 +4195,17 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
return 0;
}
-
/* This function read the relevant field from the module ( SFP+ ),
and verify it is compliant with this board */
-static u8 bnx2x_verify_sfp_module(struct link_params *params)
+static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
- u32 val;
- u32 fw_resp;
+ u32 val, cmd;
+ u32 fw_resp, fw_cmd_param;
char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
-
+ phy->flags &= ~FLAGS_SFP_NOT_APPROVED;
val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].config));
@@ -2959,29 +4215,44 @@ static u8 bnx2x_verify_sfp_module(struct link_params *params)
return 0;
}
- /* Ask the FW to validate the module */
- if (!(params->feature_config_flags &
- FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY)) {
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) {
+ /* Use specific phy request */
+ cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
+ } else if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
+ /* Use first phy request only in case of non-dual media*/
+ if (DUAL_MEDIA(params)) {
+ DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
+ "verification\n");
+ return -EINVAL;
+ }
+ cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
+ } else {
+ /* No support in OPT MDL detection */
DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
- "verification\n");
+ "verification\n");
return -EINVAL;
}
- fw_resp = bnx2x_fw_command(bp, DRV_MSG_CODE_VRFY_OPT_MDL);
+ fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
+ fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
DP(NETIF_MSG_LINK, "Approved module\n");
return 0;
}
/* format the warning message */
- if (bnx2x_read_sfp_module_eeprom(params,
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
SFP_EEPROM_VENDOR_NAME_ADDR,
SFP_EEPROM_VENDOR_NAME_SIZE,
(u8 *)vendor_name))
vendor_name[0] = '\0';
else
vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
- if (bnx2x_read_sfp_module_eeprom(params,
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
SFP_EEPROM_PART_NO_ADDR,
SFP_EEPROM_PART_NO_SIZE,
(u8 *)vendor_pn))
@@ -2989,22 +4260,78 @@ static u8 bnx2x_verify_sfp_module(struct link_params *params)
else
vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
- netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected, Port %d from %s part number %s\n",
+ netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected,"
+ " Port %d from %s part number %s\n",
params->port, vendor_name, vendor_pn);
+ phy->flags |= FLAGS_SFP_NOT_APPROVED;
return -EINVAL;
}
-static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
- u16 edc_mode)
+static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
+ struct link_params *params)
+
{
+ u8 val;
struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
+ u16 timeout;
+ /* Initialization time after hot-plug may take up to 300ms for some
+ phys type ( e.g. JDSU ) */
+ for (timeout = 0; timeout < 60; timeout++) {
+ if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
+ == 0) {
+ DP(NETIF_MSG_LINK, "SFP+ module initialization "
+ "took %d ms\n", timeout * 5);
+ return 0;
+ }
+ msleep(5);
+ }
+ return -EINVAL;
+}
+
+static void bnx2x_8727_power_module(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 is_power_up) {
+ /* Make sure GPIOs are not using for LED mode */
+ u16 val;
+ /*
+ * In the GPIO register, bit 4 is use to detemine if the GPIOs are
+ * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
+ * output
+ * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
+ * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
+ * where the 1st bit is the over-current(only input), and 2nd bit is
+ * for power( only output )
+ */
+
+ /*
+ * In case of NOC feature is disabled and power is up, set GPIO control
+ * as input to enable listening of over-current indication
+ */
+ if (phy->flags & FLAGS_NOC)
+ return;
+ if (!(phy->flags &
+ FLAGS_NOC) && is_power_up)
+ val = (1<<4);
+ else
+ /*
+ * Set GPIO control to OUTPUT, and set the power bit
+ * to according to the is_power_up
+ */
+ val = ((!(is_power_up)) << 1);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ val);
+}
+
+static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
u16 cur_limiting_mode;
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
&cur_limiting_mode);
@@ -3014,12 +4341,10 @@ static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
if (edc_mode == EDC_MODE_LIMITING) {
DP(NETIF_MSG_LINK,
"Setting LIMITING MODE\n");
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- EDC_MODE_LIMITING);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ EDC_MODE_LIMITING);
} else { /* LRM mode ( default )*/
DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
@@ -3030,27 +4355,19 @@ static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
if (cur_limiting_mode != EDC_MODE_LIMITING)
return 0;
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_LRM_MODE,
0);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
0x128);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_MISC_CTRL0,
0x4008);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_LRM_MODE,
0xaaaa);
@@ -3058,46 +4375,33 @@ static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
return 0;
}
-static u8 bnx2x_bcm8727_set_limiting_mode(struct link_params *params,
+static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
u16 edc_mode)
{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
u16 phy_identifier;
u16 rom_ver2_val;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
-
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER,
&phy_identifier);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER,
(phy_identifier & ~(1<<9)));
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
&rom_ver2_val);
/* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
(rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER,
(phy_identifier | (1<<9)));
@@ -3105,72 +4409,34 @@ static u8 bnx2x_bcm8727_set_limiting_mode(struct link_params *params,
return 0;
}
-
-static u8 bnx2x_wait_for_sfp_module_initialized(struct link_params *params)
+static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
{
- u8 val;
struct bnx2x *bp = params->bp;
- u16 timeout;
- /* Initialization time after hot-plug may take up to 300ms for some
- phys type ( e.g. JDSU ) */
- for (timeout = 0; timeout < 60; timeout++) {
- if (bnx2x_read_sfp_module_eeprom(params, 1, 1, &val)
- == 0) {
- DP(NETIF_MSG_LINK, "SFP+ module initialization "
- "took %d ms\n", timeout * 5);
- return 0;
- }
- msleep(5);
- }
- return -EINVAL;
-}
-static void bnx2x_8727_power_module(struct bnx2x *bp,
- struct link_params *params,
- u8 ext_phy_addr, u8 is_power_up) {
- /* Make sure GPIOs are not using for LED mode */
- u16 val;
- u8 port = params->port;
- /*
- * In the GPIO register, bit 4 is use to detemine if the GPIOs are
- * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
- * output
- * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
- * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
- * where the 1st bit is the over-current(only input), and 2nd bit is
- * for power( only output )
- */
-
- /*
- * In case of NOC feature is disabled and power is up, set GPIO control
- * as input to enable listening of over-current indication
- */
-
- if (!(params->feature_config_flags &
- FEATURE_CONFIG_BCM8727_NOC) && is_power_up)
- val = (1<<4);
- else
- /*
- * Set GPIO control to OUTPUT, and set the power bit
- * to according to the is_power_up
- */
- val = ((!(is_power_up)) << 1);
-
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_GPIO_CTRL,
- val);
+ switch (action) {
+ case DISABLE_TX:
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ break;
+ case ENABLE_TX:
+ if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
+ action);
+ return;
+ }
}
-static u8 bnx2x_sfp_module_detection(struct link_params *params)
+static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 edc_mode;
u8 rc = 0;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
+
u32 val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].config));
@@ -3178,10 +4444,10 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
params->port);
- if (bnx2x_get_edc_mode(params, &edc_mode) != 0) {
+ if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
return -EINVAL;
- } else if (bnx2x_verify_sfp_module(params) !=
+ } else if (bnx2x_verify_sfp_module(phy, params) !=
0) {
/* check SFP+ module compatibility */
DP(NETIF_MSG_LINK, "Module verification failed!!\n");
@@ -3190,13 +4456,12 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
MISC_REGISTERS_GPIO_HIGH,
params->port);
- if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
/* Shutdown SFP+ module */
DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
- bnx2x_8727_power_module(bp, params,
- ext_phy_addr, 0);
+ bnx2x_8727_power_module(bp, phy, 0);
return rc;
}
} else {
@@ -3208,15 +4473,15 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
}
/* power up the SFP module */
- if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
- bnx2x_8727_power_module(bp, params, ext_phy_addr, 1);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
+ bnx2x_8727_power_module(bp, phy, 1);
/* Check and set limiting mode / LRM mode on 8726.
On 8727 it is done automatically */
- if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
- bnx2x_bcm8726_set_limiting_mode(params, edc_mode);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
+ bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
else
- bnx2x_bcm8727_set_limiting_mode(params, edc_mode);
+ bnx2x_8727_set_limiting_mode(bp, phy, edc_mode);
/*
* Enable transmit for this module if the module is approved, or
* if unapproved modules should also enable the Tx laser
@@ -3224,11 +4489,9 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
if (rc == 0 ||
(val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, params->port,
- ext_phy_type, ext_phy_addr, 1);
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
else
- bnx2x_sfp_set_transmitter(bp, params->port,
- ext_phy_type, ext_phy_addr, 0);
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
return rc;
}
@@ -3236,6 +4499,7 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
void bnx2x_handle_module_detect_int(struct link_params *params)
{
struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy = &params->phy[EXT_PHY1];
u32 gpio_val;
u8 port = params->port;
@@ -3245,1349 +4509,587 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
params->port);
/* Get current gpio val refelecting module plugged in / out*/
- gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
+ gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
/* Call the handling function in case module is detected */
if (gpio_val == 0) {
bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
- port);
+ MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
+ port);
- if (bnx2x_wait_for_sfp_module_initialized(params) ==
- 0)
- bnx2x_sfp_module_detection(params);
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ bnx2x_sfp_module_detection(phy, params);
else
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
} else {
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
-
- u32 ext_phy_type =
- XGXS_EXT_PHY_TYPE(params->ext_phy_config);
u32 val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].
config));
bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
- port);
+ MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
+ port);
/* Module was plugged out. */
/* Disable transmit for this module */
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, params->port,
- ext_phy_type, ext_phy_addr, 0);
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
}
}
-static void bnx2x_bcm807x_force_10G(struct link_params *params)
-{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- /* Force KR or KX */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 0x2040);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_10G_CTRL2,
- 0x000b);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_BCM_CTRL,
- 0x0000);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL,
- 0x0000);
-}
-
-static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
+/******************************************************************/
+/* common BCM8706/BCM8726 PHY SECTION */
+/******************************************************************/
+static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
+ u8 link_up = 0;
+ u16 val1, val2, rx_sd, pcs_status;
struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u16 val;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_CHIP_REV, &val);
-
- if (val == 0) {
- /* Mustn't set low power mode in 8073 A0 */
- return;
+ DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
+ /* Clear RX Alarm*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
+ /* clear LASI indication*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
+ DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+
+ DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
+ " link_status 0x%x\n", rx_sd, pcs_status, val2);
+ /* link is up if both bit 0 of pmd_rx_sd and
+ * bit 0 of pcs_status are set, or if the autoneg bit
+ * 1 is set
+ */
+ link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
+ if (link_up) {
+ if (val2 & (1<<1))
+ vars->line_speed = SPEED_1000;
+ else
+ vars->line_speed = SPEED_10000;
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
}
-
- /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD,
- MDIO_XS_PLL_SEQUENCER, &val);
- val &= ~(1<<13);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
-
- /* PLL controls */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x805E, 0x1077);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x805D, 0x0000);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x805C, 0x030B);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x805B, 0x1240);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x805A, 0x2490);
-
- /* Tx Controls */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x80A7, 0x0C74);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x80A6, 0x9041);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x80A5, 0x4640);
-
- /* Rx Controls */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x80FE, 0x01C4);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x80FD, 0x9249);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x80FC, 0x2015);
-
- /* Enable PLL sequencer (use read-modify-write to set bit 13) */
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD,
- MDIO_XS_PLL_SEQUENCER, &val);
- val |= (1<<13);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
+ return link_up;
}
-static void bnx2x_8073_set_pause_cl37(struct link_params *params,
- struct link_vars *vars)
+/******************************************************************/
+/* BCM8706 PHY SECTION */
+/******************************************************************/
+static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
+ u16 cnt, val;
struct bnx2x *bp = params->bp;
- u16 cl37_val;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LD, &cl37_val);
-
- cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
- /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+ bnx2x_wait_reset_complete(bp, phy);
- if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
- cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+ /* Wait until fw is loaded */
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
+ if (val)
+ break;
+ msleep(10);
}
- if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
- cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ u8 i;
+ u16 reg;
+ for (i = 0; i < 4; i++) {
+ reg = MDIO_XS_8706_REG_BANK_RX0 +
+ i*(MDIO_XS_8706_REG_BANK_RX1 -
+ MDIO_XS_8706_REG_BANK_RX0);
+ bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val);
+ /* Clear first 3 bits of the control */
+ val &= ~0x7;
+ /* Set control bits according to configuration */
+ val |= (phy->rx_preemphasis[i] & 0x7);
+ DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706"
+ " reg 0x%x <-- val 0x%x\n", reg, val);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val);
+ }
}
- if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
- cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ /* Force speed */
+ if (phy->req_line_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
+ } else {
+ /* Force 1Gbps using autoneg with 1G advertisment */
+
+ /* Allow CL37 through CL73 */
+ DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+
+ /* Enable Full-Duplex advertisment on CL37 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
+ /* Enable CL37 AN */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+ /* 1G support */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5));
+
+ /* Enable clause 73 AN */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ 0x0400);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
+ 0x0004);
}
- DP(NETIF_MSG_LINK,
- "Ext phy AN advertize cl37 0x%x\n", cl37_val);
+ bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+ return 0;
+}
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LD, cl37_val);
- msleep(500);
+static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ return bnx2x_8706_8726_read_status(phy, params, vars);
}
-static void bnx2x_ext_phy_set_pause(struct link_params *params,
- struct link_vars *vars)
+/******************************************************************/
+/* BCM8726 PHY SECTION */
+/******************************************************************/
+static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
- u16 val;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- /* read modify write pause advertizing */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV_PAUSE, &val);
-
- val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
-
- /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
-
- if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
- val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
- }
- if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
- val |=
- MDIO_AN_REG_ADV_PAUSE_PAUSE;
- }
- DP(NETIF_MSG_LINK,
- "Ext phy AN advertize 0x%x\n", val);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV_PAUSE, val);
+ DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
}
-static void bnx2x_set_preemphasis(struct link_params *params)
+
+static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
+ struct link_params *params)
{
- u16 bank, i = 0;
struct bnx2x *bp = params->bp;
+ /* Need to wait 100ms after reset */
+ msleep(100);
- for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
- bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
- bank,
- MDIO_RX0_RX_EQ_BOOST,
- params->xgxs_config_rx[i]);
- }
-
- for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
- bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
- bank,
- MDIO_TX0_TX_DRIVER,
- params->xgxs_config_tx[i]);
- }
-}
+ /* Micro controller re-boot */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B);
+ /* Set soft reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
-static void bnx2x_8481_set_led4(struct link_params *params,
- u32 ext_phy_type, u8 ext_phy_addr)
-{
- struct bnx2x *bp = params->bp;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
- /* PHYC_CTL_LED_CTL */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, 0xa482);
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+ /* wait for 150ms for microcode load */
+ msleep(150);
- /* Unmask LED4 for 10G link */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
+ /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_SIGNAL_MASK, (1<<6));
- /* 'Interrupt Mask' */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- 0xFFFB, 0xFFFD);
-}
-static void bnx2x_8481_set_legacy_led_mode(struct link_params *params,
- u32 ext_phy_type, u8 ext_phy_addr)
-{
- struct bnx2x *bp = params->bp;
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
- /* LED1 (10G Link): Disable LED1 when 10/100/1000 link */
- /* LED2 (1G/100/10 Link): Enable LED2 when 10/100/1000 link) */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_SHADOW,
- (1<<15) | (0xd << 10) | (0xc<<4) | 0xe);
+ msleep(200);
+ bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
}
-static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
- u32 ext_phy_type, u8 ext_phy_addr)
+static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 val1;
-
- /* LED1 (10G Link) */
- /* Enable continuse based on source 7(10G-link) */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL,
- &val1);
- /* Set bit 2 to 0, and bits [1:0] to 10 */
- val1 &= ~((1<<0) | (1<<2) | (1<<7)); /* Clear bits 0,2,7*/
- val1 |= ((1<<1) | (1<<6)); /* Set bit 1, 6 */
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL,
- val1);
-
- /* Unmask LED1 for 10G link */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- &val1);
- /* Set bit 2 to 0, and bits [1:0] to 10 */
- val1 |= (1<<7);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- val1);
-
- /* LED2 (1G/100/10G Link) */
- /* Mask LED2 for 10G link */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0);
-
- /* Unmask LED3 for 10G link */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x6);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_BLINK,
- 0);
+ u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars);
+ if (link_up) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val1);
+ if (val1 & (1<<15)) {
+ DP(NETIF_MSG_LINK, "Tx is disabled\n");
+ link_up = 0;
+ vars->line_speed = 0;
+ }
+ }
+ return link_up;
}
-static void bnx2x_init_internal_phy(struct link_params *params,
- struct link_vars *vars,
- u8 enable_cl73)
+static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
+ u32 val;
+ u32 swap_val, swap_override, aeu_gpio_mask, offset;
+ DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
- if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
- if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
- (params->feature_config_flags &
- FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
- bnx2x_set_preemphasis(params);
-
- /* forced speed requested? */
- if (vars->line_speed != SPEED_AUTO_NEG ||
- ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
- params->loopback_mode == LOOPBACK_EXT)) {
- DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
-
- /* disable autoneg */
- bnx2x_set_autoneg(params, vars, 0);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+ bnx2x_wait_reset_complete(bp, phy);
+
+ bnx2x_8726_external_rom_boot(phy, params);
+
+ /* Need to call module detected on initialization since
+ the module detection triggered by actual module
+ insertion might occur before driver is loaded, and when
+ driver is loaded, it reset all registers, including the
+ transmitter */
+ bnx2x_sfp_module_detection(phy, params);
+
+ if (phy->req_line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x5);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ 0x400);
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ /* Set Flow control */
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ /* Enable RX-ALARM control to receive
+ interrupt for 1G speed change */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ 0x400);
+
+ } else { /* Default 10G. Set only LASI control */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
+ }
- /* program speed and duplex */
- bnx2x_program_serdes(params, vars);
+ /* Set TX PreEmphasis if needed */
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
+ "TX_CTRL2 0x%x\n",
+ phy->tx_preemphasis[0],
+ phy->tx_preemphasis[1]);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TX_CTRL1,
+ phy->tx_preemphasis[0]);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TX_CTRL2,
+ phy->tx_preemphasis[1]);
+ }
- } else { /* AN_mode */
- DP(NETIF_MSG_LINK, "not SGMII, AN\n");
+ /* Set GPIO3 to trigger SFP+ module insertion/removal */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
- /* AN enabled */
- bnx2x_set_brcm_cl37_advertisment(params);
+ /* The GPIO should be swapped if the swap register is set and active */
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
- /* program duplex & pause advertisement (for aneg) */
- bnx2x_set_ieee_aneg_advertisment(params,
- vars->ieee_fc);
+ /* Select function upon port-swap configuration */
+ if (params->port == 0) {
+ offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+ aeu_gpio_mask = (swap_val && swap_override) ?
+ AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
+ AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
+ } else {
+ offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
+ aeu_gpio_mask = (swap_val && swap_override) ?
+ AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
+ AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
+ }
+ val = REG_RD(bp, offset);
+ /* add GPIO3 to group */
+ val |= aeu_gpio_mask;
+ REG_WR(bp, offset, val);
+ return 0;
- /* enable autoneg */
- bnx2x_set_autoneg(params, vars, enable_cl73);
+}
- /* enable and restart AN */
- bnx2x_restart_autoneg(params, enable_cl73);
- }
+static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port);
+ /* Set serial boot control for external load */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL, 0x0001);
+}
- } else { /* SGMII mode */
- DP(NETIF_MSG_LINK, "SGMII\n");
+/******************************************************************/
+/* BCM8727 PHY SECTION */
+/******************************************************************/
- bnx2x_initialize_sgmii_process(params, vars);
+static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 led_mode_bitmask = 0;
+ u16 gpio_pins_bitmask = 0;
+ u16 val;
+ /* Only NOC flavor requires to set the LED specifically */
+ if (!(phy->flags & FLAGS_NOC))
+ return;
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ led_mode_bitmask = 0;
+ gpio_pins_bitmask = 0x03;
+ break;
+ case LED_MODE_ON:
+ led_mode_bitmask = 0;
+ gpio_pins_bitmask = 0x02;
+ break;
+ case LED_MODE_OPER:
+ led_mode_bitmask = 0x60;
+ gpio_pins_bitmask = 0x11;
+ break;
}
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
+ val &= 0xff8f;
+ val |= led_mode_bitmask;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ &val);
+ val &= 0xffe0;
+ val |= gpio_pins_bitmask;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ val);
+}
+static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params) {
+ u32 swap_val, swap_override;
+ u8 port;
+ /**
+ * The PHY reset is controlled by GPIO 1. Fake the port number
+ * to cancel the swap done in set_gpio()
+ */
+ struct bnx2x *bp = params->bp;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ port = (swap_val && swap_override) ^ 1;
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
-static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
+static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
+ u16 tmp1, val, mod_abs;
+ u16 rx_alarm_ctrl_val;
+ u16 lasi_ctrl_val;
struct bnx2x *bp = params->bp;
- u32 ext_phy_type;
- u8 ext_phy_addr;
- u16 cnt;
- u16 ctrl = 0;
- u16 val = 0;
- u8 rc = 0;
-
- if (vars->phy_flags & PHY_XGXS_FLAG) {
- ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
-
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- /* Make sure that the soft reset is off (expect for the 8072:
- * due to the lock, it will be done inside the specific
- * handling)
+ /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
+
+ bnx2x_wait_reset_complete(bp, phy);
+ rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
+ lasi_ctrl_val = 0x0004;
+
+ DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
+ /* enable LASI */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ rx_alarm_ctrl_val);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
+
+ /* Initially configure MOD_ABS to interrupt when
+ module is presence( bit 8) */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+ /* Set EDC off by setting OPTXLOS signal input to low
+ (bit 9).
+ When the EDC is off it locks onto a reference clock and
+ avoids becoming 'lost'.*/
+ mod_abs &= ~(1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs &= ~(1<<9);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+
+ /* Make MOD_ABS give interrupt on change */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
+ val |= (1<<12);
+ if (phy->flags & FLAGS_NOC)
+ val |= (3<<5);
+
+ /**
+ * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+ * status which reflect SFP+ module over-current
+ */
+ if (!(phy->flags & FLAGS_NOC))
+ val &= 0xff8f; /* Reset bits 4-6 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
+
+ bnx2x_8727_power_module(bp, phy, 1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
+
+ /* Set option 1G speed */
+ if (phy->req_line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
+ /**
+ * Power down the XAUI until link is up in case of dual-media
+ * and 1G
*/
- if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) {
- /* Wait for soft reset to get cleared upto 1 sec */
- for (cnt = 0; cnt < 1000; cnt++) {
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, &ctrl);
- if (!(ctrl & (1<<15)))
- break;
- msleep(1);
- }
- DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n",
- ctrl, cnt);
- }
-
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- DP(NETIF_MSG_LINK, "XGXS 8705\n");
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL,
- 0x8288);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- 0x7fbf);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CMU_PLL_BYPASS,
- 0x0100);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_WIS_DEVAD,
- MDIO_WIS_REG_LASI_CNTL, 0x1);
-
- /* BCM8705 doesn't have microcode, hence the 0 */
- bnx2x_save_spirom_version(bp, params->port,
- params->shmem_base, 0);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- /* Wait until fw is loaded */
- for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &val);
- if (val)
- break;
- msleep(10);
- }
- DP(NETIF_MSG_LINK, "XGXS 8706 is initialized "
- "after %d ms\n", cnt);
- if ((params->feature_config_flags &
- FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
- u8 i;
- u16 reg;
- for (i = 0; i < 4; i++) {
- reg = MDIO_XS_8706_REG_BANK_RX0 +
- i*(MDIO_XS_8706_REG_BANK_RX1 -
- MDIO_XS_8706_REG_BANK_RX0);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_XS_DEVAD,
- reg, &val);
- /* Clear first 3 bits of the control */
- val &= ~0x7;
- /* Set control bits according to
- configuation */
- val |= (params->xgxs_config_rx[i] &
- 0x7);
- DP(NETIF_MSG_LINK, "Setting RX"
- "Equalizer to BCM8706 reg 0x%x"
- " <-- val 0x%x\n", reg, val);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_XS_DEVAD,
- reg, val);
- }
- }
- /* Force speed */
- if (params->req_line_speed == SPEED_10000) {
- DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_DIGITAL_CTRL,
- 0x400);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL, 1);
- } else {
- /* Force 1Gbps using autoneg with 1G
- advertisment */
-
- /* Allow CL37 through CL73 */
- DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_CL73,
- 0x040c);
-
- /* Enable Full-Duplex advertisment on CL37 */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LP,
- 0x0020);
- /* Enable CL37 AN */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_AN,
- 0x1000);
- /* 1G support */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV, (1<<5));
-
- /* Enable clause 73 AN */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL,
- 0x1200);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- 0x0400);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL, 0x0004);
-
- }
- bnx2x_save_bcm_spirom_ver(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- params->shmem_base);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
- bnx2x_bcm8726_external_rom_boot(params);
-
- /* Need to call module detected on initialization since
- the module detection triggered by actual module
- insertion might occur before driver is loaded, and when
- driver is loaded, it reset all registers, including the
- transmitter */
- bnx2x_sfp_module_detection(params);
-
- /* Set Flow control */
- bnx2x_ext_phy_set_pause(params, vars);
- if (params->req_line_speed == SPEED_1000) {
- DP(NETIF_MSG_LINK, "Setting 1G force\n");
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0x40);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_10G_CTRL2, 0xD);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL, 0x5);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- 0x400);
- } else if ((params->req_line_speed ==
- SPEED_AUTO_NEG) &&
- ((params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
- DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV, 0x20);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_CL73, 0x040c);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LD, 0x0020);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_AN, 0x1000);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, 0x1200);
-
- /* Enable RX-ALARM control to receive
- interrupt for 1G speed change */
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL, 0x4);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- 0x400);
-
- } else { /* Default 10G. Set only LASI control */
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL, 1);
- }
-
- /* Set TX PreEmphasis if needed */
- if ((params->feature_config_flags &
- FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
- DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
- "TX_CTRL2 0x%x\n",
- params->xgxs_config_tx[0],
- params->xgxs_config_tx[1]);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8726_TX_CTRL1,
- params->xgxs_config_tx[0]);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8726_TX_CTRL2,
- params->xgxs_config_tx[1]);
- }
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- {
- u16 tmp1;
- u16 rx_alarm_ctrl_val;
- u16 lasi_ctrl_val;
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
- rx_alarm_ctrl_val = 0x400;
- lasi_ctrl_val = 0x0004;
- } else {
- rx_alarm_ctrl_val = (1<<2);
- lasi_ctrl_val = 0x0004;
- }
-
- /* enable LASI */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- rx_alarm_ctrl_val);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL,
- lasi_ctrl_val);
-
- bnx2x_8073_set_pause_cl37(params, vars);
-
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)
- bnx2x_bcm8072_external_rom_boot(params);
- else
- /* In case of 8073 with long xaui lines,
- don't set the 8073 xaui low power*/
- bnx2x_bcm8073_set_xaui_low_power_mode(params);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_M8051_MSGOUT_REG,
- &tmp1);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &tmp1);
-
- DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1):"
- "0x%x\n", tmp1);
-
- /* If this is forced speed, set to KR or KX
- * (all other are not supported)
- */
- if (params->loopback_mode == LOOPBACK_EXT) {
- bnx2x_bcm807x_force_10G(params);
- DP(NETIF_MSG_LINK,
- "Forced speed 10G on 807X\n");
- break;
- } else {
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_BCM_CTRL,
- 0x0002);
- }
- if (params->req_line_speed != SPEED_AUTO_NEG) {
- if (params->req_line_speed == SPEED_10000) {
- val = (1<<7);
- } else if (params->req_line_speed ==
- SPEED_2500) {
- val = (1<<5);
- /* Note that 2.5G works only
- when used with 1G advertisment */
- } else
- val = (1<<5);
- } else {
-
- val = 0;
- if (params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
- val |= (1<<7);
-
- /* Note that 2.5G works only when
- used with 1G advertisment */
- if (params->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
- val |= (1<<5);
- DP(NETIF_MSG_LINK,
- "807x autoneg val = 0x%x\n", val);
- }
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV, val);
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_2_5G, &tmp1);
-
- if (((params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
- (params->req_line_speed ==
- SPEED_AUTO_NEG)) ||
- (params->req_line_speed ==
- SPEED_2500)) {
- u16 phy_ver;
- /* Allow 2.5G for A1 and above */
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
+ if (DUAL_MEDIA(params)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val);
+ val |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_CHIP_REV, &phy_ver);
- DP(NETIF_MSG_LINK, "Add 2.5G\n");
- if (phy_ver > 0)
- tmp1 |= 1;
- else
- tmp1 &= 0xfffe;
- } else {
- DP(NETIF_MSG_LINK, "Disable 2.5G\n");
- tmp1 &= 0xfffe;
- }
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_2_5G, tmp1);
- }
-
- /* Add support for CL37 (passive mode) II */
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LD,
- &tmp1);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LD, (tmp1 |
- ((params->req_duplex == DUPLEX_FULL) ?
- 0x20 : 0x40)));
-
- /* Add support for CL37 (passive mode) III */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_AN, 0x1000);
-
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
- /* The SNR will improve about 2db by changing
- BW and FEE main tap. Rest commands are executed
- after link is up*/
- /*Change FFE main cursor to 5 in EDC register*/
- if (bnx2x_8073_is_snr_needed(params))
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_EDC_FFE_MAIN,
- 0xFB0C);
-
- /* Enable FEC (Forware Error Correction)
- Request in the AN */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV2, &tmp1);
-
- tmp1 |= (1<<15);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV2, tmp1);
-
- }
-
- bnx2x_ext_phy_set_pause(params, vars);
-
- /* Restart autoneg */
- msleep(500);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, 0x1200);
- DP(NETIF_MSG_LINK, "807x Autoneg Restart: "
- "Advertise 1G=%x, 10G=%x\n",
- ((val & (1<<5)) > 0),
- ((val & (1<<7)) > 0));
- break;
+ MDIO_PMA_REG_8727_PCS_GP, val);
}
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
+ } else {
+ /**
+ * Since the 8727 has only single reset pin, need to set the 10G
+ * registers although it is default
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
+ 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
+ 0x0008);
+ }
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- {
- u16 tmp1;
- u16 rx_alarm_ctrl_val;
- u16 lasi_ctrl_val;
-
- /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
-
- u16 mod_abs;
- rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
- lasi_ctrl_val = 0x0004;
-
- DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
- /* enable LASI */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- rx_alarm_ctrl_val);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL,
- lasi_ctrl_val);
-
- /* Initially configure MOD_ABS to interrupt when
- module is presence( bit 8) */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
- /* Set EDC off by setting OPTXLOS signal input to low
- (bit 9).
- When the EDC is off it locks onto a reference clock and
- avoids becoming 'lost'.*/
- mod_abs &= ~((1<<8) | (1<<9));
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
-
- /* Make MOD_ABS give interrupt on change */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_OPT_CTRL,
- &val);
- val |= (1<<12);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_OPT_CTRL,
- val);
-
- /* Set 8727 GPIOs to input to allow reading from the
- 8727 GPIO0 status which reflect SFP+ module
- over-current */
-
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_OPT_CTRL,
- &val);
- val &= 0xff8f; /* Reset bits 4-6 */
- bnx2x_cl45_write(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_OPT_CTRL,
- val);
-
- bnx2x_8727_power_module(bp, params, ext_phy_addr, 1);
- bnx2x_bcm8073_set_xaui_low_power_mode(params);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_M8051_MSGOUT_REG,
- &tmp1);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &tmp1);
-
- /* Set option 1G speed */
- if (params->req_line_speed == SPEED_1000) {
-
- DP(NETIF_MSG_LINK, "Setting 1G force\n");
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0x40);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_10G_CTRL2, 0xD);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_10G_CTRL2, &tmp1);
- DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
-
- } else if ((params->req_line_speed ==
- SPEED_AUTO_NEG) &&
- ((params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
-
- DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_PMA_REG_8727_MISC_CTRL, 0);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_AN, 0x1300);
- } else {
- /* Since the 8727 has only single reset pin,
- need to set the 10G registers although it is
- default */
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, 0x0020);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- 0x7, 0x0100);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0x2040);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_10G_CTRL2, 0x0008);
- }
-
- /* Set 2-wire transfer rate of SFP+ module EEPROM
- * to 100Khz since some DACs(direct attached cables) do
- * not work at 400Khz.
- */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
- 0xa001);
-
- /* Set TX PreEmphasis if needed */
- if ((params->feature_config_flags &
- FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
- DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
- "TX_CTRL2 0x%x\n",
- params->xgxs_config_tx[0],
- params->xgxs_config_tx[1]);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TX_CTRL1,
- params->xgxs_config_tx[0]);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TX_CTRL2,
- params->xgxs_config_tx[1]);
- }
-
- break;
- }
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- {
- u16 fw_ver1, fw_ver2;
- DP(NETIF_MSG_LINK,
- "Setting the SFX7101 LASI indication\n");
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL, 0x1);
- DP(NETIF_MSG_LINK,
- "Setting the SFX7101 LED to blink on traffic\n");
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
-
- bnx2x_ext_phy_set_pause(params, vars);
- /* Restart autoneg */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, &val);
- val |= 0x200;
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, val);
-
- /* Save spirom version */
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_VER1, &fw_ver1);
-
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_VER2, &fw_ver2);
-
- bnx2x_save_spirom_version(params->bp, params->port,
- params->shmem_base,
- (u32)(fw_ver1<<16 | fw_ver2));
- break;
- }
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
- /* This phy uses the NIG latch mechanism since link
- indication arrives through its LED4 and not via
- its LASI signal, so we get steady signal
- instead of clear on read */
- bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
- 1 << NIG_LATCH_BC_ENABLE_MI_INT);
-
- bnx2x_cl45_write(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0x0000);
-
- bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr);
- if (params->req_line_speed == SPEED_AUTO_NEG) {
-
- u16 autoneg_val, an_1000_val, an_10_100_val;
- /* set 1000 speed advertisement */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_1000T_CTRL,
- &an_1000_val);
-
- if (params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
- an_1000_val |= (1<<8);
- if (params->req_duplex == DUPLEX_FULL)
- an_1000_val |= (1<<9);
- DP(NETIF_MSG_LINK, "Advertising 1G\n");
- } else
- an_1000_val &= ~((1<<8) | (1<<9));
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_1000T_CTRL,
- an_1000_val);
-
- /* set 100 speed advertisement */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_AN_ADV,
- &an_10_100_val);
-
- if (params->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
- an_10_100_val |= (1<<7);
- if (params->req_duplex == DUPLEX_FULL)
- an_10_100_val |= (1<<8);
- DP(NETIF_MSG_LINK,
- "Advertising 100M\n");
- } else
- an_10_100_val &= ~((1<<7) | (1<<8));
-
- /* set 10 speed advertisement */
- if (params->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
- an_10_100_val |= (1<<5);
- if (params->req_duplex == DUPLEX_FULL)
- an_10_100_val |= (1<<6);
- DP(NETIF_MSG_LINK, "Advertising 10M\n");
- }
- else
- an_10_100_val &= ~((1<<5) | (1<<6));
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_AN_ADV,
- an_10_100_val);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_MII_CTRL,
- &autoneg_val);
-
- /* Disable forced speed */
- autoneg_val &= ~(1<<6|1<<13);
-
- /* Enable autoneg and restart autoneg
- for legacy speeds */
- autoneg_val |= (1<<9|1<<12);
-
- if (params->req_duplex == DUPLEX_FULL)
- autoneg_val |= (1<<8);
- else
- autoneg_val &= ~(1<<8);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_MII_CTRL,
- autoneg_val);
-
- if (params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
- DP(NETIF_MSG_LINK, "Advertising 10G\n");
- /* Restart autoneg for 10G*/
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, 0x3200);
- }
- } else {
- /* Force speed */
- u16 autoneg_ctrl, pma_ctrl;
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_MII_CTRL,
- &autoneg_ctrl);
-
- /* Disable autoneg */
- autoneg_ctrl &= ~(1<<12);
-
- /* Set 1000 force */
- switch (params->req_line_speed) {
- case SPEED_10000:
- DP(NETIF_MSG_LINK,
- "Unable to set 10G force !\n");
- break;
- case SPEED_1000:
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- &pma_ctrl);
- autoneg_ctrl &= ~(1<<13);
- autoneg_ctrl |= (1<<6);
- pma_ctrl &= ~(1<<13);
- pma_ctrl |= (1<<6);
- DP(NETIF_MSG_LINK,
- "Setting 1000M force\n");
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- pma_ctrl);
- break;
- case SPEED_100:
- autoneg_ctrl |= (1<<13);
- autoneg_ctrl &= ~(1<<6);
- DP(NETIF_MSG_LINK,
- "Setting 100M force\n");
- break;
- case SPEED_10:
- autoneg_ctrl &= ~(1<<13);
- autoneg_ctrl &= ~(1<<6);
- DP(NETIF_MSG_LINK,
- "Setting 10M force\n");
- break;
- }
-
- /* Duplex mode */
- if (params->req_duplex == DUPLEX_FULL) {
- autoneg_ctrl |= (1<<8);
- DP(NETIF_MSG_LINK,
- "Setting full duplex\n");
- } else
- autoneg_ctrl &= ~(1<<8);
-
- /* Update autoneg ctrl and pma ctrl */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_MII_CTRL,
- autoneg_ctrl);
- }
-
- /* Save spirom version */
- bnx2x_save_8481_spirom_version(bp, params->port,
- ext_phy_addr,
- params->shmem_base);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
- DP(NETIF_MSG_LINK,
- "XGXS PHY Failure detected 0x%x\n",
- params->ext_phy_config);
- rc = -EINVAL;
- break;
- default:
- DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
- params->ext_phy_config);
- rc = -EINVAL;
- break;
- }
-
- } else { /* SerDes */
-
- ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
- DP(NETIF_MSG_LINK, "SerDes Direct\n");
- break;
-
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
- DP(NETIF_MSG_LINK, "SerDes 5482\n");
- break;
-
- default:
- DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
- params->ext_phy_config);
- break;
- }
+ /* Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+ 0xa001);
+
+ /* Set TX PreEmphasis if needed */
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
+ phy->tx_preemphasis[0],
+ phy->tx_preemphasis[1]);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
+ phy->tx_preemphasis[0]);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2,
+ phy->tx_preemphasis[1]);
}
- return rc;
+
+ return 0;
}
-static void bnx2x_8727_handle_mod_abs(struct link_params *params)
+static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 mod_abs, rx_alarm_status;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
u32 val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].
config));
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
if (mod_abs & (1<<8)) {
@@ -4602,18 +5104,16 @@ static void bnx2x_8727_handle_mod_abs(struct link_params *params)
(bit 9).
When the EDC is off it locks onto a reference clock and
avoids becoming 'lost'.*/
- mod_abs &= ~((1<<8)|(1<<9));
- bnx2x_cl45_write(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ mod_abs &= ~(1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs &= ~(1<<9);
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
/* Clear RX alarm since it stays up as long as
the mod_abs wasn't changed */
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
@@ -4630,33 +5130,28 @@ static void bnx2x_8727_handle_mod_abs(struct link_params *params)
2. Restore the default polarity of the OPRXLOS signal and
this signal will then correctly indicate the presence or
absence of the Rx signal. (bit 9) */
- mod_abs |= ((1<<8)|(1<<9));
- bnx2x_cl45_write(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+ mod_abs |= (1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs |= (1<<9);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
/* Clear RX alarm since it stays up as long as
the mod_abs wasn't changed. This is need to be done
before calling the module detection, otherwise it will clear
the link update alarm */
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr, 0);
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
- if (bnx2x_wait_for_sfp_module_initialized(params)
- == 0)
- bnx2x_sfp_module_detection(params);
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ bnx2x_sfp_module_detection(phy, params);
else
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
}
@@ -4667,1298 +5162,1714 @@ static void bnx2x_8727_handle_mod_abs(struct link_params *params)
module plugged in/out */
}
+static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
-static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
- struct link_vars *vars,
- u8 is_mi_int)
{
struct bnx2x *bp = params->bp;
- u32 ext_phy_type;
- u8 ext_phy_addr;
- u16 val1 = 0, val2;
- u16 rx_sd, pcs_status;
- u8 ext_phy_link_up = 0;
- u8 port = params->port;
-
- if (vars->phy_flags & PHY_XGXS_FLAG) {
- ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- DP(NETIF_MSG_LINK, "XGXS Direct\n");
- ext_phy_link_up = 1;
- break;
+ u8 link_up = 0;
+ u16 link_status = 0;
+ u16 rx_alarm_status, lasi_ctrl, val1;
+
+ /* If PHY is not initialized, do not check link status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
+ &lasi_ctrl);
+ if (!lasi_ctrl)
+ return 0;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- DP(NETIF_MSG_LINK, "XGXS 8705\n");
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_WIS_DEVAD,
- MDIO_WIS_REG_LASI_STATUS, &val1);
- DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
-
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_WIS_DEVAD,
- MDIO_WIS_REG_LASI_STATUS, &val1);
- DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
-
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_SD, &rx_sd);
-
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- 1,
- 0xc809, &val1);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- 1,
- 0xc809, &val1);
-
- DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
- ext_phy_link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) &&
- ((val1 & (1<<8)) == 0));
- if (ext_phy_link_up)
- vars->line_speed = SPEED_10000;
- break;
+ /* Check the LASI */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
+ &rx_alarm_status);
+ vars->line_speed = 0;
+ DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
- /* Clear RX Alarm*/
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
- &val2);
- /* clear LASI indication*/
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
- &val1);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
- &val2);
- DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x-->"
- "0x%x\n", val1, val2);
-
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD,
- &rx_sd);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS,
- &pcs_status);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
- &val2);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
- &val2);
-
- DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x"
- " pcs_status 0x%x 1Gbps link_status 0x%x\n",
- rx_sd, pcs_status, val2);
- /* link is up if both bit 0 of pmd_rx_sd and
- * bit 0 of pcs_status are set, or if the autoneg bit
- 1 is set
- */
- ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
- (val2 & (1<<1)));
- if (ext_phy_link_up) {
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
- /* If transmitter is disabled,
- ignore false link up indication */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &val1);
- if (val1 & (1<<15)) {
- DP(NETIF_MSG_LINK, "Tx is "
- "disabled\n");
- ext_phy_link_up = 0;
- break;
- }
- }
- if (val2 & (1<<1))
- vars->line_speed = SPEED_1000;
- else
- vars->line_speed = SPEED_10000;
- }
- break;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- {
- u16 link_status = 0;
- u16 rx_alarm_status;
- /* Check the LASI */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
-
- DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
- rx_alarm_status);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
- DP(NETIF_MSG_LINK,
- "8727 LASI status 0x%x\n",
- val1);
+ /* Clear MSG-OUT */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
- /* Clear MSG-OUT */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_M8051_MSGOUT_REG,
- &val1);
+ /**
+ * If a module is present and there is need to check
+ * for over current
+ */
+ if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
+ /* Check over-current using 8727 GPIO0 input*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
+ &val1);
+
+ if ((val1 & (1<<8)) == 0) {
+ DP(NETIF_MSG_LINK, "8727 Power fault has been detected"
+ " on port %d\n", params->port);
+ netdev_err(bp->dev, "Error: Power fault on Port %d has"
+ " been detected and the power to "
+ "that SFP+ module has been removed"
+ " to prevent failure of the card."
+ " Please remove the SFP+ module and"
+ " restart the system to clear this"
+ " error.\n",
+ params->port);
/*
- * If a module is present and there is need to check
- * for over current
+ * Disable all RX_ALARMs except for
+ * mod_abs
*/
- if (!(params->feature_config_flags &
- FEATURE_CONFIG_BCM8727_NOC) &&
- !(rx_alarm_status & (1<<5))) {
- /* Check over-current using 8727 GPIO0 input*/
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_GPIO_CTRL,
- &val1);
-
- if ((val1 & (1<<8)) == 0) {
- DP(NETIF_MSG_LINK, "8727 Power fault"
- " has been detected on "
- "port %d\n",
- params->port);
- netdev_err(bp->dev, "Error: Power fault on Port %d has been detected and the power to that SFP+ module has been removed to prevent failure of the card. Please remove the SFP+ module and restart the system to clear this error.\n",
- params->port);
- /*
- * Disable all RX_ALARMs except for
- * mod_abs
- */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- (1<<5));
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &val1);
- /* Wait for module_absent_event */
- val1 |= (1<<8);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- val1);
- /* Clear RX alarm */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM,
- &rx_alarm_status);
- break;
- }
- } /* Over current check */
-
- /* When module absent bit is set, check module */
- if (rx_alarm_status & (1<<5)) {
- bnx2x_8727_handle_mod_abs(params);
- /* Enable all mod_abs and link detection bits */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- ((1<<5) | (1<<2)));
- }
-
- /* If transmitter is disabled,
- ignore false link up indication */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &val1);
- if (val1 & (1<<15)) {
- DP(NETIF_MSG_LINK, "Tx is disabled\n");
- ext_phy_link_up = 0;
- break;
- }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
- &link_status);
-
- /* Bits 0..2 --> speed detected,
- bits 13..15--> link is down */
- if ((link_status & (1<<2)) &&
- (!(link_status & (1<<15)))) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_10000;
- } else if ((link_status & (1<<0)) &&
- (!(link_status & (1<<13)))) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_1000;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " up in 1G\n", params->port);
- } else {
- ext_phy_link_up = 0;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " is down\n", params->port);
- }
- break;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
+ /* Wait for module_absent_event */
+ val1 |= (1<<8);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, val1);
+ /* Clear RX alarm */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+ return 0;
}
+ } /* Over current check */
+
+ /* When module absent bit is set, check module */
+ if (rx_alarm_status & (1<<5)) {
+ bnx2x_8727_handle_mod_abs(phy, params);
+ /* Enable all mod_abs and link detection bits */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ ((1<<5) | (1<<2)));
+ }
+ DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
+ bnx2x_8727_specific_func(phy, params, ENABLE_TX);
+ /* If transmitter is disabled, ignore false link up indication */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
+ if (val1 & (1<<15)) {
+ DP(NETIF_MSG_LINK, "Tx is disabled\n");
+ return 0;
+ }
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- {
- u16 link_status = 0;
- u16 an1000_status = 0;
-
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PCS_DEVAD,
- MDIO_PCS_REG_LASI_STATUS, &val1);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PCS_DEVAD,
- MDIO_PCS_REG_LASI_STATUS, &val2);
- DP(NETIF_MSG_LINK,
- "870x LASI status 0x%x->0x%x\n",
- val1, val2);
- } else {
- /* In 8073, port1 is directed through emac0 and
- * port0 is directed through emac1
- */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_STATUS, &val1);
-
- DP(NETIF_MSG_LINK,
- "8703 LASI status 0x%x\n",
- val1);
- }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
- /* clear the interrupt LASI status register */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PCS_DEVAD,
- MDIO_PCS_REG_STATUS, &val2);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PCS_DEVAD,
- MDIO_PCS_REG_STATUS, &val1);
- DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n",
- val2, val1);
- /* Clear MSG-OUT */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_M8051_MSGOUT_REG,
- &val1);
-
- /* Check the LASI */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &val2);
-
- DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
-
- /* Check the link status */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PCS_DEVAD,
- MDIO_PCS_REG_STATUS, &val2);
- DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_STATUS, &val2);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_STATUS, &val1);
- ext_phy_link_up = ((val1 & 4) == 4);
- DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
-
- if (ext_phy_link_up &&
- ((params->req_line_speed !=
- SPEED_10000))) {
- if (bnx2x_bcm8073_xaui_wa(params)
- != 0) {
- ext_phy_link_up = 0;
- break;
- }
- }
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LINK_STATUS,
- &an1000_status);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LINK_STATUS,
- &an1000_status);
-
- /* Check the link status on 1.1.2 */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_STATUS, &val2);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_STATUS, &val1);
- DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
- "an_link_status=0x%x\n",
- val2, val1, an1000_status);
-
- ext_phy_link_up = (((val1 & 4) == 4) ||
- (an1000_status & (1<<1)));
- if (ext_phy_link_up &&
- bnx2x_8073_is_snr_needed(params)) {
- /* The SNR will improve about 2dbby
- changing the BW and FEE main tap.*/
-
- /* The 1st write to change FFE main
- tap is set before restart AN */
- /* Change PLL Bandwidth in EDC
- register */
- bnx2x_cl45_write(bp, port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PLL_BANDWIDTH,
- 0x26BC);
-
- /* Change CDR Bandwidth in EDC
- register */
- bnx2x_cl45_write(bp, port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CDR_BANDWIDTH,
- 0x0333);
- }
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
- &link_status);
-
- /* Bits 0..2 --> speed detected,
- bits 13..15--> link is down */
- if ((link_status & (1<<2)) &&
- (!(link_status & (1<<15)))) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_10000;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " up in 10G\n", params->port);
- } else if ((link_status & (1<<1)) &&
- (!(link_status & (1<<14)))) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_2500;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " up in 2.5G\n", params->port);
- } else if ((link_status & (1<<0)) &&
- (!(link_status & (1<<13)))) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_1000;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " up in 1G\n", params->port);
- } else {
- ext_phy_link_up = 0;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " is down\n", params->port);
- }
- } else {
- /* See if 1G link is up for the 8072 */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LINK_STATUS,
- &an1000_status);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LINK_STATUS,
- &an1000_status);
- if (an1000_status & (1<<1)) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_1000;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " up in 1G\n", params->port);
- } else if (ext_phy_link_up) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_10000;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " up in 10G\n", params->port);
- }
- }
+ /* Bits 0..2 --> speed detected,
+ bits 13..15--> link is down */
+ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_10000;
+ } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+ params->port);
+ } else {
+ link_up = 0;
+ DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+ params->port);
+ }
+ if (link_up)
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+ if ((DUAL_MEDIA(params)) &&
+ (phy->req_line_speed == SPEED_1000)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val1);
+ /**
+ * In case of dual-media board and 1G, power up the XAUI side,
+ * otherwise power it down. For 10G it is done automatically
+ */
+ if (link_up)
+ val1 &= ~(3<<10);
+ else
+ val1 |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val1);
+ }
+ return link_up;
+}
+static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ /* Disable Transmitter */
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ /* Clear LASI */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
- break;
- }
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_STATUS, &val2);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_STATUS, &val1);
- DP(NETIF_MSG_LINK,
- "10G-base-T LASI status 0x%x->0x%x\n",
- val2, val1);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_STATUS, &val2);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_STATUS, &val1);
- DP(NETIF_MSG_LINK,
- "10G-base-T PMA status 0x%x->0x%x\n",
- val2, val1);
- ext_phy_link_up = ((val1 & 4) == 4);
- /* if link is up
- * print the AN outcome of the SFX7101 PHY
- */
- if (ext_phy_link_up) {
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_MASTER_STATUS,
- &val2);
- vars->line_speed = SPEED_10000;
- DP(NETIF_MSG_LINK,
- "SFX7101 AN status 0x%x->Master=%x\n",
- val2,
- (val2 & (1<<14)));
- }
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
- /* Check 10G-BaseT link status */
- /* Check PMD signal ok */
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- 0xFFFA,
- &val1);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_PMD_SIGNAL,
- &val2);
- DP(NETIF_MSG_LINK, "PMD_SIGNAL 1.a811 = 0x%x\n", val2);
-
- /* Check link 10G */
- if (val2 & (1<<11)) {
- vars->line_speed = SPEED_10000;
- ext_phy_link_up = 1;
- bnx2x_8481_set_10G_led_mode(params,
- ext_phy_type,
- ext_phy_addr);
- } else { /* Check Legacy speed link */
- u16 legacy_status, legacy_speed;
-
- /* Enable expansion register 0x42
- (Operation mode status) */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_EXPANSION_REG_ACCESS,
- 0xf42);
-
- /* Get legacy speed operation status */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
- &legacy_status);
-
- DP(NETIF_MSG_LINK, "Legacy speed status"
- " = 0x%x\n", legacy_status);
- ext_phy_link_up = ((legacy_status & (1<<11))
- == (1<<11));
- if (ext_phy_link_up) {
- legacy_speed = (legacy_status & (3<<9));
- if (legacy_speed == (0<<9))
- vars->line_speed = SPEED_10;
- else if (legacy_speed == (1<<9))
- vars->line_speed =
- SPEED_100;
- else if (legacy_speed == (2<<9))
- vars->line_speed =
- SPEED_1000;
- else /* Should not happen */
- vars->line_speed = 0;
-
- if (legacy_status & (1<<8))
- vars->duplex = DUPLEX_FULL;
- else
- vars->duplex = DUPLEX_HALF;
-
- DP(NETIF_MSG_LINK, "Link is up "
- "in %dMbps, is_duplex_full"
- "= %d\n",
- vars->line_speed,
- (vars->duplex == DUPLEX_FULL));
- bnx2x_8481_set_legacy_led_mode(params,
- ext_phy_type,
- ext_phy_addr);
- }
- }
- break;
- default:
- DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
- params->ext_phy_config);
- ext_phy_link_up = 0;
- break;
- }
- /* Set SGMII mode for external phy */
- if (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
- if (vars->line_speed < SPEED_1000)
- vars->phy_flags |= PHY_SGMII_FLAG;
- else
- vars->phy_flags &= ~PHY_SGMII_FLAG;
- }
+}
- } else { /* SerDes */
- ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
- DP(NETIF_MSG_LINK, "SerDes Direct\n");
- ext_phy_link_up = 1;
- break;
+/******************************************************************/
+/* BCM8481/BCM84823/BCM84833 PHY SECTION */
+/******************************************************************/
+static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 val, fw_ver1, fw_ver2, cnt;
+ struct bnx2x *bp = params->bp;
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
- DP(NETIF_MSG_LINK, "SerDes 5482\n");
- ext_phy_link_up = 1;
+ /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
+ /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
+ bnx2x_save_spirom_version(bp, params->port, 0,
+ phy->ver_addr);
+ return;
+ }
- default:
- DP(NETIF_MSG_LINK,
- "BAD SerDes ext_phy_config 0x%x\n",
- params->ext_phy_config);
- ext_phy_link_up = 0;
+
+ /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
break;
- }
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
+ bnx2x_save_spirom_version(bp, params->port, 0,
+ phy->ver_addr);
+ return;
}
- return ext_phy_link_up;
+ /* lower 16 bits of the register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+ /* upper 16 bits of register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+
+ bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
+ phy->ver_addr);
}
-static void bnx2x_link_int_enable(struct link_params *params)
+static void bnx2x_848xx_set_led(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
{
- u8 port = params->port;
- u32 ext_phy_type;
- u32 mask;
- struct bnx2x *bp = params->bp;
+ u16 val;
- /* setting the status to report on link up
- for either XGXS or SerDes */
+ /* PHYC_CTL_LED_CTL */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ val &= 0xFE00;
+ val |= 0x0092;
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x80);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x18);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0040);
- if (params->switch_cfg == SWITCH_CFG_10G) {
- mask = (NIG_MASK_XGXS0_LINK10G |
- NIG_MASK_XGXS0_LINK_STATUS);
- DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
- (ext_phy_type !=
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
- mask |= NIG_MASK_MI_INT;
- DP(NETIF_MSG_LINK, "enabled external phy int\n");
- }
+ /* 'Interrupt Mask' */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ 0xFFFB, 0xFFFD);
+}
- } else { /* SerDes */
- mask = NIG_MASK_SERDES0_LINK_STATUS;
- DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
- ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
- if ((ext_phy_type !=
- PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
- (ext_phy_type !=
- PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
- mask |= NIG_MASK_MI_INT;
- DP(NETIF_MSG_LINK, "enabled external phy int\n");
- }
+static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 autoneg_val, an_1000_val, an_10_100_val;
+ bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
+
+ bnx2x_848xx_set_led(bp, phy);
+
+ /* set 1000 speed advertisement */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+ &an_1000_val);
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_AN_ADV,
+ &an_10_100_val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+ &autoneg_val);
+ /* Disable forced speed */
+ autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+ an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8));
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
+ an_1000_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_1000_val |= (1<<9);
+ DP(NETIF_MSG_LINK, "Advertising 1G\n");
+ } else
+ an_1000_val &= ~((1<<8) | (1<<9));
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+ an_1000_val);
+
+ /* set 10 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
+ an_10_100_val |= (1<<7);
+ /* Enable autoneg and restart autoneg for legacy speeds */
+ autoneg_val |= (1<<9 | 1<<12);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<8);
+ DP(NETIF_MSG_LINK, "Advertising 100M\n");
+ }
+ /* set 10 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<6);
+ DP(NETIF_MSG_LINK, "Advertising 10M\n");
}
- bnx2x_bits_en(bp,
- NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
- mask);
- DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
- (params->switch_cfg == SWITCH_CFG_10G),
- REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
- DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
- REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
- REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
- REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
- DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
- REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
- REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
-}
+ /* Only 10/100 are allowed to work in FORCE mode */
+ if (phy->req_line_speed == SPEED_100) {
+ autoneg_val |= (1<<13);
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 100M force\n");
+ }
+ if (phy->req_line_speed == SPEED_10) {
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 10M force\n");
+ }
-static void bnx2x_8481_rearm_latch_signal(struct bnx2x *bp, u8 port,
- u8 is_mi_int)
-{
- u32 latch_status = 0, is_mi_int_status;
- /* Disable the MI INT ( external phy int )
- * by writing 1 to the status register. Link down indication
- * is high-active-signal, so in this case we need to write the
- * status to clear the XOR
- */
- /* Read Latched signals */
- latch_status = REG_RD(bp,
- NIG_REG_LATCH_STATUS_0 + port*8);
- is_mi_int_status = REG_RD(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4);
- DP(NETIF_MSG_LINK, "original_signal = 0x%x, nig_status = 0x%x,"
- "latch_status = 0x%x\n",
- is_mi_int, is_mi_int_status, latch_status);
- /* Handle only those with latched-signal=up.*/
- if (latch_status & 1) {
- /* For all latched-signal=up,Write original_signal to status */
- if (is_mi_int)
- bnx2x_bits_en(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0
- + port*4,
- NIG_STATUS_EMAC0_MI_INT);
- else
- bnx2x_bits_dis(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0
- + port*4,
- NIG_STATUS_EMAC0_MI_INT);
- /* For all latched-signal=up : Re-Arm Latch signals */
- REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
- (latch_status & 0xfffe) | (latch_status & 1));
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV,
+ an_10_100_val);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ autoneg_val |= (1<<8);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+ (phy->req_line_speed == SPEED_10000)) {
+ DP(NETIF_MSG_LINK, "Advertising 10G\n");
+ /* Restart autoneg for 10G*/
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+ 0x3200);
+ } else if (phy->req_line_speed != SPEED_10 &&
+ phy->req_line_speed != SPEED_100) {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ 1);
}
+ /* Save spirom version */
+ bnx2x_save_848xx_spirom_version(phy, params);
+
+ return 0;
}
-/*
- * link management
- */
-static void bnx2x_link_int_ack(struct link_params *params,
- struct link_vars *vars, u8 is_10g,
- u8 is_mi_int)
+
+static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port;
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
- /* first reset all status
- * we assume only one line will be change at a time */
- bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- (NIG_STATUS_XGXS0_LINK10G |
- NIG_STATUS_XGXS0_LINK_STATUS |
- NIG_STATUS_SERDES0_LINK_STATUS));
- if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config)
- == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) ||
- (XGXS_EXT_PHY_TYPE(params->ext_phy_config)
- == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823)) {
- bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int);
- }
- if (vars->phy_link_up) {
- if (is_10g) {
- /* Disable the 10G link interrupt
- * by writing 1 to the status register
- */
- DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
- bnx2x_bits_en(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- NIG_STATUS_XGXS0_LINK10G);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
- } else if (params->switch_cfg == SWITCH_CFG_10G) {
- /* Disable the link interrupt
- * by writing 1 to the relevant lane
- * in the status register
- */
- u32 ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+ return bnx2x_848xx_cmn_config_init(phy, params, vars);
+}
- DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n",
- vars->line_speed);
- bnx2x_bits_en(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- ((1 << ser_lane) <<
- NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
+static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port, initialize = 1;
+ u16 val;
+ u16 temp;
+ u32 actual_phy_selection;
+ u8 rc = 0;
- } else { /* SerDes */
- DP(NETIF_MSG_LINK, "SerDes phy link up\n");
- /* Disable the link interrupt
- * by writing 1 to the status register
- */
- bnx2x_bits_en(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- NIG_STATUS_SERDES0_LINK_STATUS);
- }
+ /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
- } else { /* link_down */
+ msleep(1);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+ msleep(200); /* 100 is not enough */
+
+ /* BCM84823 requires that XGXS links up first @ 10G for normal
+ behavior */
+ temp = vars->line_speed;
+ vars->line_speed = SPEED_10000;
+ bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
+ bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
+ vars->line_speed = temp;
+
+ /* Set dual-media configuration according to configuration */
+
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_MEDIA, &val);
+ val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+ MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
+ MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
+ MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
+ MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
+ val |= MDIO_CTL_REG_84823_CTRL_MAC_XFI |
+ MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L;
+
+ actual_phy_selection = bnx2x_phy_selection(params);
+
+ switch (actual_phy_selection) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ /* Do nothing. Essentialy this is like the priority copper */
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ /* Do nothing here. The first PHY won't be initialized at all */
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN;
+ initialize = 0;
+ break;
}
+ if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000)
+ val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_MEDIA, val);
+ DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
+ params->multi_phy_config, val);
+
+ if (initialize)
+ rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
+ else
+ bnx2x_save_848xx_spirom_version(phy, params);
+ return rc;
}
-static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len)
+static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
- u8 *str_ptr = str;
- u32 mask = 0xf0000000;
- u8 shift = 8*4;
- u8 digit;
- if (len < 10) {
- /* Need more than 10chars for this format */
- *str_ptr = '\0';
- return -EINVAL;
- }
- while (shift > 0) {
+ struct bnx2x *bp = params->bp;
+ u16 val, val1, val2;
+ u8 link_up = 0;
+
+ /* Check 10G-BaseT link status */
+ /* Check PMD signal ok */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, 0xFFFA, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+ &val2);
+ DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
+
+ /* Check link 10G */
+ if (val2 & (1<<11)) {
+ vars->line_speed = SPEED_10000;
+ link_up = 1;
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ } else { /* Check Legacy speed link */
+ u16 legacy_status, legacy_speed;
+
+ /* Enable expansion register 0x42 (Operation mode status) */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42);
+
+ /* Get legacy speed operation status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
+ &legacy_status);
+
+ DP(NETIF_MSG_LINK, "Legacy speed status"
+ " = 0x%x\n", legacy_status);
+ link_up = ((legacy_status & (1<<11)) == (1<<11));
+ if (link_up) {
+ legacy_speed = (legacy_status & (3<<9));
+ if (legacy_speed == (0<<9))
+ vars->line_speed = SPEED_10;
+ else if (legacy_speed == (1<<9))
+ vars->line_speed = SPEED_100;
+ else if (legacy_speed == (2<<9))
+ vars->line_speed = SPEED_1000;
+ else /* Should not happen */
+ vars->line_speed = 0;
- shift -= 4;
- digit = ((num & mask) >> shift);
- if (digit < 0xa)
- *str_ptr = digit + '0';
- else
- *str_ptr = digit - 0xa + 'a';
- str_ptr++;
- mask = mask >> 4;
- if (shift == 4*4) {
- *str_ptr = ':';
- str_ptr++;
+ if (legacy_status & (1<<8))
+ vars->duplex = DUPLEX_FULL;
+ else
+ vars->duplex = DUPLEX_HALF;
+
+ DP(NETIF_MSG_LINK, "Link is up in %dMbps,"
+ " is_duplex_full= %d\n", vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
+ /* Check legacy speed AN resolution */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_STATUS,
+ &val);
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
+ &val);
+ if ((val & (1<<0)) == 0)
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
}
}
- *str_ptr = '\0';
- return 0;
+ if (link_up) {
+ DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
+ vars->line_speed);
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+
+ return link_up;
}
-u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
- u8 *version, u16 len)
+static u8 bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
{
- struct bnx2x *bp;
- u32 ext_phy_type = 0;
- u32 spirom_ver = 0;
- u8 status;
+ u8 status = 0;
+ u32 spirom_ver;
+ spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
+ status = bnx2x_format_ver(spirom_ver, str, len);
+ return status;
+}
- if (version == NULL || params == NULL)
- return -EINVAL;
- bp = params->bp;
+static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+}
- spirom_ver = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[params->port].ext_phy_fw_version));
+static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ bnx2x_cl45_write(params->bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+ bnx2x_cl45_write(params->bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
+}
- status = 0;
- /* reset the returned value to zero */
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+}
- if (len < 5)
- return -EINVAL;
+static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
- version[0] = (spirom_ver & 0xFF);
- version[1] = (spirom_ver & 0xFF00) >> 8;
- version[2] = (spirom_ver & 0xFF0000) >> 16;
- version[3] = (spirom_ver & 0xFF000000) >> 24;
- version[4] = '\0';
+ switch (mode) {
+ case LED_MODE_OFF:
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- status = bnx2x_format_ver(spirom_ver, version, len);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
- spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 |
- (spirom_ver & 0x7F);
- status = bnx2x_format_ver(spirom_ver, version, len);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- version[0] = '\0';
- break;
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", params->port);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
- DP(NETIF_MSG_LINK, "bnx2x_get_ext_phy_fw_version:"
- " type is FAILURE!\n");
- status = -EINVAL;
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
+
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+ }
break;
+ case LED_MODE_FRONT_PANEL_OFF:
- default:
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
+ params->port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x20);
+
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+ }
break;
- }
- return status;
-}
+ case LED_MODE_ON:
-static void bnx2x_set_xgxs_loopback(struct link_params *params,
- struct link_vars *vars,
- u8 is_10g)
-{
- u8 port = params->port;
- struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", params->port);
- if (is_10g) {
- u32 md_devad;
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+ /* Set control reg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+ val &= 0x8000;
+ val |= 0x2492;
- DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
- /* change the uni_phy_addr in the nig */
- md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
- port*0x18));
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x20);
- bnx2x_cl45_write(bp, port, 0,
- params->phy_addr,
- 5,
- (MDIO_REG_BANK_AER_BLOCK +
- (MDIO_AER_BLOCK_AER_REG & 0xf)),
- 0x2800);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x20);
- bnx2x_cl45_write(bp, port, 0,
- params->phy_addr,
- 5,
- (MDIO_REG_BANK_CL73_IEEEB0 +
- (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
- 0x6041);
- msleep(200);
- /* set aer mmd back */
- bnx2x_set_aer_mmd(params, vars);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x20);
+ }
+ break;
- /* and md_devad */
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
- md_devad);
+ case LED_MODE_OPER:
- } else {
- u16 mii_control;
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", params->port);
- DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
- CL45_RD_OVER_CL22(bp, port,
- params->phy_addr,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ /* Set control reg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+
+ if (!((val &
+ MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+ >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){
+ DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ 0xa492);
+ }
- CL45_WR_OVER_CL22(bp, port,
- params->phy_addr,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- (mii_control |
- MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x10);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x80);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x98);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x40);
+
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x80);
+ }
+ break;
}
}
+/******************************************************************/
+/* SFX7101 PHY SECTION */
+/******************************************************************/
+static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ /* SFX7101_XGXS_TEST1 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
+}
-
-static void bnx2x_ext_phy_loopback(struct link_params *params)
+static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
+ u16 fw_ver1, fw_ver2, val;
struct bnx2x *bp = params->bp;
- u8 ext_phy_addr;
- u32 ext_phy_type;
+ DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n");
- if (params->switch_cfg == SWITCH_CFG_10G) {
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- /* CL37 Autoneg Enabled */
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN:
- DP(NETIF_MSG_LINK,
- "ext_phy_loopback: We should not get here\n");
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- DP(NETIF_MSG_LINK, "ext_phy_loopback: 8705\n");
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- DP(NETIF_MSG_LINK, "ext_phy_loopback: 8706\n");
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 0x0001);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- /* SFX7101_XGXS_TEST1 */
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_XS_DEVAD,
- MDIO_XS_SFX7101_XGXS_TEST1,
- 0x100);
- DP(NETIF_MSG_LINK,
- "ext_phy_loopback: set ext phy loopback\n");
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
+ DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ /* Restart autoneg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
+ val |= 0x200;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
+
+ /* Save spirom version */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
+ bnx2x_save_spirom_version(bp, params->port,
+ (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
+ return 0;
+}
- break;
- } /* switch external PHY type */
- } else {
- /* serdes */
- ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
- ext_phy_addr = (params->ext_phy_config &
- PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK)
- >> PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT;
+static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_up;
+ u16 val1, val2;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
+ val2, val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
+ val2, val1);
+ link_up = ((val1 & 4) == 4);
+ /* if link is up
+ * print the AN outcome of the SFX7101 PHY
+ */
+ if (link_up) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
+ &val2);
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
+ val2, (val2 & (1<<14)));
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
}
+ return link_up;
}
-/*
- *------------------------------------------------------------------------
- * bnx2x_override_led_value -
- *
- * Override the led value of the requsted led
- *
- *------------------------------------------------------------------------
- */
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
- u32 led_idx, u32 value)
+static u8 bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
- u32 reg_val;
+ if (*len < 5)
+ return -EINVAL;
+ str[0] = (spirom_ver & 0xFF);
+ str[1] = (spirom_ver & 0xFF00) >> 8;
+ str[2] = (spirom_ver & 0xFF0000) >> 16;
+ str[3] = (spirom_ver & 0xFF000000) >> 24;
+ str[4] = '\0';
+ *len -= 5;
+ return 0;
+}
- /* If port 0 then use EMAC0, else use EMAC1*/
- u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ u16 val, cnt;
- DP(NETIF_MSG_LINK,
- "bnx2x_override_led_value() port %x led_idx %d value %d\n",
- port, led_idx, value);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
- switch (led_idx) {
- case 0: /* 10MB led */
- /* Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 10M_OVERRIDE bit,
- otherwise reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_10MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 1: /*100MB led */
- /*Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 100M_OVERRIDE bit,
- otherwise reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_100MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ for (cnt = 0; cnt < 10; cnt++) {
+ msleep(50);
+ /* Writes a self-clearing reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET,
+ (val | (1<<15)));
+ /* Wait for clear */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
+
+ if ((val & (1<<15)) == 0)
+ break;
+ }
+}
+
+static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params) {
+ /* Low power mode is controlled by GPIO 2 */
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ /* The PHY reset is controlled by GPIO 1 */
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+}
+
+static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ u16 val = 0;
+ struct bnx2x *bp = params->bp;
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ val = 2;
break;
- case 2: /* 1000MB led */
- /* Read the current value of the LED register in the
- EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
- reset it. */
- reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ case LED_MODE_ON:
+ val = 1;
break;
- case 3: /* 2500MB led */
- /* Read the current value of the LED register in the
- EMAC block*/
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
- reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ case LED_MODE_OPER:
+ val = 0;
break;
- case 4: /*10G led */
- if (port == 0) {
- REG_WR(bp, NIG_REG_LED_10G_P0,
- value);
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7107_LINK_LED_CNTL,
+ val);
+}
+
+/******************************************************************/
+/* STATIC PHY DECLARATION */
+/******************************************************************/
+
+static struct bnx2x_phy phy_null = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
+ .addr = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = 0,
+ .media_type = ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)NULL,
+ .read_status = (read_status_t)NULL,
+ .link_reset = (link_reset_t)NULL,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_serdes = {
+ .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .flags = 0,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_UNSPECIFIED,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_init_serdes,
+ .read_status = (read_status_t)bnx2x_link_settings_status,
+ .link_reset = (link_reset_t)bnx2x_int_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_xgxs = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .flags = 0,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_UNSPECIFIED,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_init_xgxs,
+ .read_status = (read_status_t)bnx2x_link_settings_status,
+ .link_reset = (link_reset_t)bnx2x_int_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_7101 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
+ .addr = 0xff,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_7101_config_init,
+ .read_status = (read_status_t)bnx2x_7101_read_status,
+ .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset,
+ .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8073 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ .addr = 0xff,
+ .flags = FLAGS_HW_LOCK_REQUIRED,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_UNSPECIFIED,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8073_config_init,
+ .read_status = (read_status_t)bnx2x_8073_read_status,
+ .link_reset = (link_reset_t)bnx2x_8073_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8705 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
+ .addr = 0xff,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_XFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8705_config_init,
+ .read_status = (read_status_t)bnx2x_8705_read_status,
+ .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8706 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
+ .addr = 0xff,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_SFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8706_config_init,
+ .read_status = (read_status_t)bnx2x_8706_read_status,
+ .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_8726 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
+ .addr = 0xff,
+ .flags = (FLAGS_HW_LOCK_REQUIRED |
+ FLAGS_INIT_XGXS_FIRST),
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_SFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8726_config_init,
+ .read_status = (read_status_t)bnx2x_8726_read_status,
+ .link_reset = (link_reset_t)bnx2x_8726_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_8727 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
+ .addr = 0xff,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_SFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8727_config_init,
+ .read_status = (read_status_t)bnx2x_8727_read_status,
+ .link_reset = (link_reset_t)bnx2x_8727_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset,
+ .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
+};
+static struct bnx2x_phy phy_8481 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+ .addr = 0xff,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8481_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_8481_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_84823 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
+ .addr = 0xff,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+/*****************************************************************/
+/* */
+/* Populate the phy according. Main function: bnx2x_populate_phy */
+/* */
+/*****************************************************************/
+
+static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
+ struct bnx2x_phy *phy, u8 port,
+ u8 phy_index)
+{
+ /* Get the 4 lanes xgxs config rx and tx */
+ u32 rx = 0, tx = 0, i;
+ for (i = 0; i < 2; i++) {
+ /**
+ * INT_PHY and EXT_PHY1 share the same value location in the
+ * shmem. When num_phys is greater than 1, than this value
+ * applies only to EXT_PHY1
+ */
+ if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
+ rx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+
+ tx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
} else {
- REG_WR(bp, NIG_REG_LED_10G_P1,
- value);
+ rx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+
+ tx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
}
+
+ phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
+ phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff);
+
+ phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff);
+ phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
+ }
+}
+
+static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base,
+ u8 phy_index, u8 port)
+{
+ u32 ext_phy_config = 0;
+ switch (phy_index) {
+ case EXT_PHY1:
+ ext_phy_config = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config));
break;
- case 5: /* TRAFFIC led */
- /* Find if the traffic control is via BMAC or EMAC */
- if (port == 0)
- reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
- else
- reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
+ case EXT_PHY2:
+ ext_phy_config = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config2));
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index);
+ return -EINVAL;
+ }
- /* Override the traffic led in the EMAC:*/
- if (reg_val == 1) {
- /* Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base +
- EMAC_REG_EMAC_LED);
- /* Set the TRAFFIC_OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the TRAFFIC bit, otherwise
- reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
- (reg_val & ~EMAC_LED_TRAFFIC);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- } else { /* Override the traffic led in the BMAC: */
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
- + port*4, 1);
- REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
- value);
- }
+ return ext_phy_config;
+}
+static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
+ struct bnx2x_phy *phy)
+{
+ u32 phy_addr;
+ u32 chip_id;
+ u32 switch_cfg = (REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_feature_config[port].link_config)) &
+ PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
+ switch (switch_cfg) {
+ case SWITCH_CFG_1G:
+ phy_addr = REG_RD(bp,
+ NIG_REG_SERDES0_CTRL_PHY_ADDR +
+ port * 0x10);
+ *phy = phy_serdes;
+ break;
+ case SWITCH_CFG_10G:
+ phy_addr = REG_RD(bp,
+ NIG_REG_XGXS0_CTRL_PHY_ADDR +
+ port * 0x18);
+ *phy = phy_xgxs;
break;
default:
- DP(NETIF_MSG_LINK,
- "bnx2x_override_led_value() unknown led index %d "
- "(should be 0-5)\n", led_idx);
+ DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
return -EINVAL;
}
+ phy->addr = (u8)phy_addr;
+ phy->mdio_ctrl = bnx2x_get_emac_base(bp,
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
+ port);
+ if (CHIP_IS_E2(bp))
+ phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR;
+ else
+ phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
+ DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
+ port, phy->addr, phy->mdio_ctrl);
+
+ bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY);
return 0;
}
-
-u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed)
+static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
+ u8 phy_index,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u8 port,
+ struct bnx2x_phy *phy)
{
- u8 port = params->port;
- u16 hw_led_mode = params->hw_led_mode;
- u8 rc = 0;
- u32 tmp;
- u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- struct bnx2x *bp = params->bp;
- DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
- DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
- speed, hw_led_mode);
- switch (mode) {
- case LED_MODE_OFF:
- REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
- REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- SHARED_HW_CFG_LED_MAC1);
-
- tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
- EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
+ u32 ext_phy_config, phy_type, config2;
+ u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
+ ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base,
+ phy_index, port);
+ phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ /* Select the phy type */
+ switch (phy_type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED;
+ *phy = phy_8073;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+ *phy = phy_8705;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+ *phy = phy_8706;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8726;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
+ /* BCM8727_NOC => BCM8727 no over current */
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8727;
+ phy->flags |= FLAGS_NOC;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8727;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+ *phy = phy_8481;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+ *phy = phy_84823;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+ *phy = phy_7101;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+ *phy = phy_null;
+ return -EINVAL;
+ default:
+ *phy = phy_null;
+ return 0;
+ }
- case LED_MODE_OPER:
- if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
- REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
- REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
- } else {
- REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- hw_led_mode);
- }
+ phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
+ bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
- port*4, 0);
- /* Set blinking rate to ~15.9Hz */
- REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
- LED_BLINK_RATE_VAL);
- REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
- port*4, 1);
- tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
- EMAC_WR(bp, EMAC_REG_EMAC_LED,
- (tmp & (~EMAC_LED_OVERRIDE)));
+ /**
+ * The shmem address of the phy version is located on different
+ * structures. In case this structure is too old, do not set
+ * the address
+ */
+ config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
+ dev_info.shared_hw_config.config2));
+ if (phy_index == EXT_PHY1) {
+ phy->ver_addr = shmem_base + offsetof(struct shmem_region,
+ port_mb[port].ext_phy_fw_version);
+
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+ mdc_mdio_access = config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+ } else {
+ u32 size = REG_RD(bp, shmem2_base);
- if (CHIP_IS_E1(bp) &&
- ((speed == SPEED_2500) ||
- (speed == SPEED_1000) ||
- (speed == SPEED_100) ||
- (speed == SPEED_10))) {
- /* On Everest 1 Ax chip versions for speeds less than
- 10G LED scheme is different */
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
- + port*4, 1);
- REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
- port*4, 0);
- REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
- port*4, 1);
+ if (size >
+ offsetof(struct shmem2_region, ext_phy_fw_version2)) {
+ phy->ver_addr = shmem2_base +
+ offsetof(struct shmem2_region,
+ ext_phy_fw_version2[port]);
}
- break;
-
- default:
- rc = -EINVAL;
- DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
- mode);
- break;
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
+ mdc_mdio_access = (config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >>
+ (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
}
- return rc;
+ phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
+ /**
+ * In case mdc/mdio_access of the external phy is different than the
+ * mdc/mdio access of the XGXS, a HW lock must be taken in each access
+ * to prevent one port interfere with another port's CL45 operations.
+ */
+ if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
+ phy->flags |= FLAGS_HW_LOCK_REQUIRED;
+ DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
+ phy_type, port, phy_index);
+ DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n",
+ phy->addr, phy->mdio_ctrl);
+ return 0;
}
-u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars)
+static u8 bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
+ u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
{
- struct bnx2x *bp = params->bp;
- u16 gp_status = 0;
-
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
- /* link is up only if both local phy and external phy are up */
- if ((gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) &&
- bnx2x_ext_phy_is_link_up(params, vars, 1))
- return 0;
-
- return -ESRCH;
+ u8 status = 0;
+ phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
+ if (phy_index == INT_PHY)
+ return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
+ status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, phy);
+ return status;
}
-static u8 bnx2x_link_initialize(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_phy_def_cfg(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 phy_index)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 rc = 0;
- u8 non_ext_phy;
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- /* Activate the external PHY */
- bnx2x_ext_phy_reset(params, vars);
-
- bnx2x_set_aer_mmd(params, vars);
+ u32 link_config;
+ /* Populate the default phy configuration for MF mode */
+ if (phy_index == EXT_PHY2) {
+ link_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].link_config2));
+ phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].speed_capability_mask2));
+ } else {
+ link_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].link_config));
+ phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].speed_capability_mask));
+ }
+ DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
+ " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
+
+ phy->req_duplex = DUPLEX_FULL;
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ case PORT_FEATURE_LINK_SPEED_10M_HALF:
+ phy->req_duplex = DUPLEX_HALF;
+ case PORT_FEATURE_LINK_SPEED_10M_FULL:
+ phy->req_line_speed = SPEED_10;
+ break;
+ case PORT_FEATURE_LINK_SPEED_100M_HALF:
+ phy->req_duplex = DUPLEX_HALF;
+ case PORT_FEATURE_LINK_SPEED_100M_FULL:
+ phy->req_line_speed = SPEED_100;
+ break;
+ case PORT_FEATURE_LINK_SPEED_1G:
+ phy->req_line_speed = SPEED_1000;
+ break;
+ case PORT_FEATURE_LINK_SPEED_2_5G:
+ phy->req_line_speed = SPEED_2500;
+ break;
+ case PORT_FEATURE_LINK_SPEED_10G_CX4:
+ phy->req_line_speed = SPEED_10000;
+ break;
+ default:
+ phy->req_line_speed = SPEED_AUTO_NEG;
+ break;
+ }
- if (vars->phy_flags & PHY_XGXS_FLAG)
- bnx2x_set_master_ln(params);
+ switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) {
+ case PORT_FEATURE_FLOW_CONTROL_AUTO:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_TX:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_RX:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_BOTH:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+ break;
+ default:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ break;
+ }
+}
- rc = bnx2x_reset_unicore(params);
- /* reset the SerDes and wait for reset bit return low */
- if (rc != 0)
- return rc;
+u32 bnx2x_phy_selection(struct link_params *params)
+{
+ u32 phy_config_swapped, prio_cfg;
+ u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
+
+ phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+ prio_cfg = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SELECTION_MASK;
+
+ if (phy_config_swapped) {
+ switch (prio_cfg) {
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ }
+ } else
+ return_cfg = prio_cfg;
- bnx2x_set_aer_mmd(params, vars);
+ return return_cfg;
+}
- /* setting the masterLn_def again after the reset */
- if (vars->phy_flags & PHY_XGXS_FLAG) {
- bnx2x_set_master_ln(params);
- bnx2x_set_swap_lanes(params);
- }
- if (vars->phy_flags & PHY_XGXS_FLAG) {
- if ((params->req_line_speed &&
- ((params->req_line_speed == SPEED_100) ||
- (params->req_line_speed == SPEED_10))) ||
- (!params->req_line_speed &&
- (params->speed_cap_mask >=
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
- (params->speed_cap_mask <
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
- )) {
- vars->phy_flags |= PHY_SGMII_FLAG;
- } else {
- vars->phy_flags &= ~PHY_SGMII_FLAG;
+u8 bnx2x_phy_probe(struct link_params *params)
+{
+ u8 phy_index, actual_phy_idx, link_cfg_idx;
+ u32 phy_config_swapped;
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy;
+ params->num_phys = 0;
+ DP(NETIF_MSG_LINK, "Begin phy probe\n");
+ phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+ for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+ phy_index++) {
+ link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+ actual_phy_idx = phy_index;
+ if (phy_config_swapped) {
+ if (phy_index == EXT_PHY1)
+ actual_phy_idx = EXT_PHY2;
+ else if (phy_index == EXT_PHY2)
+ actual_phy_idx = EXT_PHY1;
+ }
+ DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x,"
+ " actual_phy_idx %x\n", phy_config_swapped,
+ phy_index, actual_phy_idx);
+ phy = &params->phy[actual_phy_idx];
+ if (bnx2x_populate_phy(bp, phy_index, params->shmem_base,
+ params->shmem2_base, params->port,
+ phy) != 0) {
+ params->num_phys = 0;
+ DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n",
+ phy_index);
+ for (phy_index = INT_PHY;
+ phy_index < MAX_PHYS;
+ phy_index++)
+ *phy = phy_null;
+ return -EINVAL;
}
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
+ break;
+
+ bnx2x_phy_def_cfg(params, phy, phy_index);
+ params->num_phys++;
}
- /* In case of external phy existance, the line speed would be the
- line speed linked up by the external phy. In case it is direct only,
- then the line_speed during initialization will be equal to the
- req_line_speed*/
- vars->line_speed = params->req_line_speed;
- bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc);
+ DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys);
+ return 0;
+}
- /* init ext phy and enable link state int */
- non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
- (params->loopback_mode == LOOPBACK_XGXS_10));
+u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
+{
+ if (phy_idx < params->num_phys)
+ return params->phy[phy_idx].supported;
+ return 0;
+}
- if (non_ext_phy ||
- (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
- (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
- (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) ||
- (params->loopback_mode == LOOPBACK_EXT_PHY)) {
- if (params->req_line_speed == SPEED_AUTO_NEG)
- bnx2x_set_parallel_detection(params, vars->phy_flags);
- bnx2x_init_internal_phy(params, vars, non_ext_phy);
- }
+static void set_phy_vars(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 actual_phy_idx, phy_index, link_cfg_idx;
+ u8 phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+ for (phy_index = INT_PHY; phy_index < params->num_phys;
+ phy_index++) {
+ link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+ actual_phy_idx = phy_index;
+ if (phy_config_swapped) {
+ if (phy_index == EXT_PHY1)
+ actual_phy_idx = EXT_PHY2;
+ else if (phy_index == EXT_PHY2)
+ actual_phy_idx = EXT_PHY1;
+ }
+ params->phy[actual_phy_idx].req_flow_ctrl =
+ params->req_flow_ctrl[link_cfg_idx];
- if (!non_ext_phy)
- rc |= bnx2x_ext_phy_init(params, vars);
+ params->phy[actual_phy_idx].req_line_speed =
+ params->req_line_speed[link_cfg_idx];
- bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- (NIG_STATUS_XGXS0_LINK10G |
- NIG_STATUS_XGXS0_LINK_STATUS |
- NIG_STATUS_SERDES0_LINK_STATUS));
+ params->phy[actual_phy_idx].speed_cap_mask =
+ params->speed_cap_mask[link_cfg_idx];
- return rc;
+ params->phy[actual_phy_idx].req_duplex =
+ params->req_duplex[link_cfg_idx];
+ DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
+ " speed_cap_mask %x\n",
+ params->phy[actual_phy_idx].req_flow_ctrl,
+ params->phy[actual_phy_idx].req_line_speed,
+ params->phy[actual_phy_idx].speed_cap_mask);
+ }
}
-
u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u32 val;
-
DP(NETIF_MSG_LINK, "Phy Initialization started\n");
- DP(NETIF_MSG_LINK, "req_speed %d, req_flowctrl %d\n",
- params->req_line_speed, params->req_flow_ctrl);
+ DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
+ params->req_line_speed[0], params->req_flow_ctrl[0]);
+ DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
+ params->req_line_speed[1], params->req_flow_ctrl[1]);
vars->link_status = 0;
vars->phy_link_up = 0;
vars->link_up = 0;
@@ -5966,11 +6877,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
vars->mac_type = MAC_TYPE_NONE;
-
- if (params->switch_cfg == SWITCH_CFG_1G)
- vars->phy_flags = PHY_SERDES_FLAG;
- else
- vars->phy_flags = PHY_XGXS_FLAG;
+ vars->phy_flags = 0;
/* disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
@@ -5981,6 +6888,13 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
bnx2x_emac_init(params, vars);
+ if (params->num_phys == 0) {
+ DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
+ return -EINVAL;
+ }
+ set_phy_vars(params);
+
+ DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
if (CHIP_REV_IS_FPGA(bp)) {
vars->link_up = 1;
@@ -5999,7 +6913,9 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
}
bnx2x_emac_enable(params, vars, 0);
- bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
+ if (!(CHIP_IS_E2(bp)))
+ bnx2x_pbf_update(params, vars->flow_ctrl,
+ vars->line_speed);
/* disable drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -6040,7 +6956,8 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->phy_flags = PHY_XGXS_FLAG;
- bnx2x_phy_deassert(params, vars->phy_flags);
+ bnx2x_xgxs_deassert(params);
+
/* set bmac loopback */
bnx2x_bmac_enable(params, vars, 1);
@@ -6057,80 +6974,66 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->phy_flags = PHY_XGXS_FLAG;
- bnx2x_phy_deassert(params, vars->phy_flags);
+ bnx2x_xgxs_deassert(params);
/* set bmac loopback */
bnx2x_emac_enable(params, vars, 1);
- bnx2x_emac_program(params, vars->line_speed,
- vars->duplex);
+ bnx2x_emac_program(params, vars);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
params->port*4, 0);
- } else if ((params->loopback_mode == LOOPBACK_XGXS_10) ||
+ } else if ((params->loopback_mode == LOOPBACK_XGXS) ||
(params->loopback_mode == LOOPBACK_EXT_PHY)) {
vars->link_up = 1;
- vars->line_speed = SPEED_10000;
- vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->duplex = DUPLEX_FULL;
+ if (params->req_line_speed[0] == SPEED_1000) {
+ vars->line_speed = SPEED_1000;
+ vars->mac_type = MAC_TYPE_EMAC;
+ } else {
+ vars->line_speed = SPEED_10000;
+ vars->mac_type = MAC_TYPE_BMAC;
+ }
- vars->phy_flags = PHY_XGXS_FLAG;
-
- val = REG_RD(bp,
- NIG_REG_XGXS0_CTRL_PHY_ADDR+
- params->port*0x18);
- params->phy_addr = (u8)val;
-
- bnx2x_phy_deassert(params, vars->phy_flags);
+ bnx2x_xgxs_deassert(params);
bnx2x_link_initialize(params, vars);
- vars->mac_type = MAC_TYPE_BMAC;
-
+ if (params->req_line_speed[0] == SPEED_1000) {
+ bnx2x_emac_program(params, vars);
+ bnx2x_emac_enable(params, vars, 0);
+ } else
bnx2x_bmac_enable(params, vars, 0);
- if (params->loopback_mode == LOOPBACK_XGXS_10) {
+ if (params->loopback_mode == LOOPBACK_XGXS) {
/* set 10G XGXS loopback */
- bnx2x_set_xgxs_loopback(params, vars, 1);
+ params->phy[INT_PHY].config_loopback(
+ &params->phy[INT_PHY],
+ params);
+
} else {
/* set external phy loopback */
- bnx2x_ext_phy_loopback(params);
+ u8 phy_index;
+ for (phy_index = EXT_PHY1;
+ phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].config_loopback)
+ params->phy[phy_index].config_loopback(
+ &params->phy[phy_index],
+ params);
+ }
}
+
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
params->port*4, 0);
- bnx2x_set_led(params, LED_MODE_OPER, vars->line_speed);
+ bnx2x_set_led(params, vars,
+ LED_MODE_OPER, vars->line_speed);
} else
/* No loopback */
{
- bnx2x_phy_deassert(params, vars->phy_flags);
- switch (params->switch_cfg) {
- case SWITCH_CFG_1G:
- vars->phy_flags |= PHY_SERDES_FLAG;
- if ((params->ext_phy_config &
- PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK) ==
- PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482) {
- vars->phy_flags |= PHY_SGMII_FLAG;
- }
-
- val = REG_RD(bp,
- NIG_REG_SERDES0_CTRL_PHY_ADDR+
- params->port*0x10);
-
- params->phy_addr = (u8)val;
-
- break;
- case SWITCH_CFG_10G:
- vars->phy_flags |= PHY_XGXS_FLAG;
- val = REG_RD(bp,
- NIG_REG_XGXS0_CTRL_PHY_ADDR+
- params->port*0x18);
- params->phy_addr = (u8)val;
-
- break;
- default:
- DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
- return -EINVAL;
- }
- DP(NETIF_MSG_LINK, "Phy address = 0x%x\n", params->phy_addr);
+ if (params->switch_cfg == SWITCH_CFG_10G)
+ bnx2x_xgxs_deassert(params);
+ else
+ bnx2x_serdes_deassert(bp, params->port);
bnx2x_link_initialize(params, vars);
msleep(30);
@@ -6138,29 +7041,11 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
}
return 0;
}
-
-static void bnx2x_8726_reset_phy(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
-{
- DP(NETIF_MSG_LINK, "bnx2x_8726_reset_phy port %d\n", port);
-
- /* Set serial boot control for external load */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL, 0x0001);
-}
-
u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
u8 reset_ext_phy)
{
struct bnx2x *bp = params->bp;
- u32 ext_phy_config = params->ext_phy_config;
- u8 port = params->port;
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
- u32 val = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
- port_feature_config[params->port].
- config));
+ u8 phy_index, port = params->port;
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
/* disable attentions */
vars->link_status = 0;
@@ -6189,73 +7074,21 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
* Hold it as vars low
*/
/* clear link led */
- bnx2x_set_led(params, LED_MODE_OFF, 0);
- if (reset_ext_phy) {
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- break;
+ bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- {
-
- /* Disable Transmitter */
- u8 ext_phy_addr =
- XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
- PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr, 0);
- break;
- }
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
- "low power mode\n",
- port);
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- port);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- {
- u8 ext_phy_addr =
- XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- /* Set soft reset */
- bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr);
- break;
- }
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
- {
- u8 ext_phy_addr =
- XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, 0x0000);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 1);
- break;
- }
- default:
- /* HW reset */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- port);
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- port);
- DP(NETIF_MSG_LINK, "reset external PHY\n");
+ if (reset_ext_phy) {
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ if (params->phy[phy_index].link_reset)
+ params->phy[phy_index].link_reset(
+ &params->phy[phy_index],
+ params);
}
}
- /* reset the SerDes/XGXS */
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
- (0x1ff << (port*16)));
+ if (params->phy[INT_PHY].link_reset)
+ params->phy[INT_PHY].link_reset(
+ &params->phy[INT_PHY], params);
/* reset BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -6269,183 +7102,41 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
return 0;
}
-static u8 bnx2x_update_link_down(struct link_params *params,
- struct link_vars *vars)
-{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
-
- DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
- bnx2x_set_led(params, LED_MODE_OFF, 0);
-
- /* indicate no mac active */
- vars->mac_type = MAC_TYPE_NONE;
-
- /* update shared memory */
- vars->link_status = 0;
- vars->line_speed = 0;
- bnx2x_update_mng(params, vars->link_status);
-
- /* activate nig drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
-
- /* disable emac */
- REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
-
- msleep(10);
-
- /* reset BigMac */
- bnx2x_bmac_rx_disable(bp, params->port);
- REG_WR(bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- return 0;
-}
-
-static u8 bnx2x_update_link_up(struct link_params *params,
- struct link_vars *vars,
- u8 link_10g, u32 gp_status)
-{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 rc = 0;
-
- vars->link_status |= LINK_STATUS_LINK_UP;
- if (link_10g) {
- bnx2x_bmac_enable(params, vars, 0);
- bnx2x_set_led(params, LED_MODE_OPER, SPEED_10000);
- } else {
- rc = bnx2x_emac_program(params, vars->line_speed,
- vars->duplex);
-
- bnx2x_emac_enable(params, vars, 0);
-
- /* AN complete? */
- if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
- if (!(vars->phy_flags &
- PHY_SGMII_FLAG))
- bnx2x_set_gmii_tx_driver(params);
- }
- }
-
- /* PBF - link up */
- rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
- vars->line_speed);
-
- /* disable drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
-
- /* update shared memory */
- bnx2x_update_mng(params, vars->link_status);
- msleep(20);
- return rc;
-}
-/* This function should called upon link interrupt */
-/* In case vars->link_up, driver needs to
- 1. Update the pbf
- 2. Disable drain
- 3. Update the shared memory
- 4. Indicate link up
- 5. Set LEDs
- Otherwise,
- 1. Update shared memory
- 2. Reset BigMac
- 3. Report link down
- 4. Unset LEDs
-*/
-u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
-{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u16 gp_status;
- u8 link_10g;
- u8 ext_phy_link_up, rc = 0;
- u32 ext_phy_type;
- u8 is_mi_int = 0;
-
- DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
- port, (vars->phy_flags & PHY_XGXS_FLAG),
- REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
-
- is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
- port*0x18) > 0);
- DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
- REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
- is_mi_int,
- REG_RD(bp,
- NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
-
- DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
- REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
- REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
-
- /* disable emac */
- REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
-
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- /* Check external link change only for non-direct */
- ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars, is_mi_int);
-
- /* Read gp_status */
- CL45_RD_OVER_CL22(bp, port, params->phy_addr,
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
-
- rc = bnx2x_link_settings_status(params, vars, gp_status,
- ext_phy_link_up);
- if (rc != 0)
- return rc;
-
- /* anything 10 and over uses the bmac */
- link_10g = ((vars->line_speed == SPEED_10000) ||
- (vars->line_speed == SPEED_12000) ||
- (vars->line_speed == SPEED_12500) ||
- (vars->line_speed == SPEED_13000) ||
- (vars->line_speed == SPEED_15000) ||
- (vars->line_speed == SPEED_16000));
-
- bnx2x_link_int_ack(params, vars, link_10g, is_mi_int);
-
- /* In case external phy link is up, and internal link is down
- ( not initialized yet probably after link initialization, it needs
- to be initialized.
- Note that after link down-up as result of cable plug,
- the xgxs link would probably become up again without the need to
- initialize it*/
-
- if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) &&
- (ext_phy_link_up && !vars->phy_link_up))
- bnx2x_init_internal_phy(params, vars, 0);
-
- /* link is up only if both local phy and external phy are up */
- vars->link_up = (ext_phy_link_up && vars->phy_link_up);
-
- if (vars->link_up)
- rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
- else
- rc = bnx2x_update_link_down(params, vars);
-
- return rc;
-}
-
-static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+/****************************************************************************/
+/* Common function */
+/****************************************************************************/
+static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
{
- u8 ext_phy_addr[PORT_MAX];
+ struct bnx2x_phy phy[PORT_MAX];
+ struct bnx2x_phy *phy_blk[PORT_MAX];
u16 val;
s8 port;
+ s8 port_of_path = 0;
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
- /* Extract the ext phy address for the port */
- u32 ext_phy_config = REG_RD(bp, shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].external_phy_config));
+ u32 shmem_base, shmem2_base;
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E2(bp)) {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ port_of_path = 0;
+ } else {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ port_of_path = port;
+ }
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port_of_path, &phy[port]) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate_phy failed\n");
+ return -EINVAL;
+ }
/* disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
@@ -6453,17 +7144,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
NIG_MASK_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
- ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
-
/* Need to take the phy out of low power mode in order
to write to access its registers */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
/* Reset the phy */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_write(bp, &phy[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_CTRL,
1<<15);
@@ -6472,15 +7159,28 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
/* Add delay of 150ms after reset */
msleep(150);
+ if (phy[PORT_0].addr & 0x1) {
+ phy_blk[PORT_0] = &(phy[PORT_1]);
+ phy_blk[PORT_1] = &(phy[PORT_0]);
+ } else {
+ phy_blk[PORT_0] = &(phy[PORT_0]);
+ phy_blk[PORT_1] = &(phy[PORT_1]);
+ }
+
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u16 fw_ver1;
+ if (CHIP_IS_E2(bp))
+ port_of_path = 0;
+ else
+ port_of_path = port;
- bnx2x_bcm8073_external_rom_boot(bp, port,
- ext_phy_addr[port], shmem_base);
+ DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
+ phy_blk[port]->addr);
+ bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path);
- bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_read(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER1, &fw_ver1);
if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
@@ -6492,16 +7192,12 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
}
/* Only set bit 10 = 1 (Tx power down) */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_read(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_TX_POWER_DOWN, &val);
/* Phase1 of TX_POWER_DOWN reset */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_write(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_TX_POWER_DOWN,
(val | 1<<10));
@@ -6515,28 +7211,20 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
/* Phase2 of POWER_DOWN_RESET */
/* Release bit 10 (Release Tx power down) */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_read(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_TX_POWER_DOWN, &val);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_write(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
msleep(15);
/* Read modify write the SPI-ROM version select register */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_read(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_EDC_FFE_MAIN, &val);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_write(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
@@ -6545,46 +7233,111 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
return 0;
-
}
+static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
+{
+ u32 val;
+ s8 port;
+ struct bnx2x_phy phy;
+ /* Use port1 because of the static port-swap */
+ /* Enable the module detection interrupt */
+ val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+ val |= ((1<<MISC_REGISTERS_GPIO_3)|
+ (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
+ REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+
+ bnx2x_ext_phy_hw_reset(bp, 1);
+ msleep(5);
+ for (port = 0; port < PORT_MAX; port++) {
+ u32 shmem_base, shmem2_base;
+
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E2(bp)) {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ } else {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ }
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, &phy) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return -EINVAL;
+ }
+
+ /* Reset phy*/
+ bnx2x_cl45_write(bp, &phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
+
+
+ /* Set fault module detected LED on */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
+ }
-static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+ return 0;
+}
+static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
{
- u8 ext_phy_addr[PORT_MAX];
- s8 port, first_port, i;
+ s8 port;
u32 swap_val, swap_override;
- DP(NETIF_MSG_LINK, "Executing BCM8727 common init\n");
+ struct bnx2x_phy phy[PORT_MAX];
+ struct bnx2x_phy *phy_blk[PORT_MAX];
+ s8 port_of_path;
swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
- bnx2x_ext_phy_hw_reset(bp, 1 ^ (swap_val && swap_override));
- msleep(5);
+ port = 1;
- if (swap_val && swap_override)
- first_port = PORT_0;
- else
- first_port = PORT_1;
+ bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override));
+
+ /* Calculate the port based on port swap */
+ port ^= (swap_val && swap_override);
+
+ msleep(5);
/* PART1 - Reset both phys */
- for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) {
- /* Extract the ext phy address for the port */
- u32 ext_phy_config = REG_RD(bp, shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].external_phy_config));
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ u32 shmem_base, shmem2_base;
+
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E2(bp)) {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ port_of_path = 0;
+ } else {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ port_of_path = port;
+ }
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port_of_path, &phy[port]) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return -EINVAL;
+ }
/* disable attentions */
- bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
- (NIG_MASK_XGXS0_LINK_STATUS |
- NIG_MASK_XGXS0_LINK10G |
- NIG_MASK_SERDES0_LINK_STATUS |
- NIG_MASK_MI_INT));
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
- ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
/* Reset the phy */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr[port],
+ bnx2x_cl45_write(bp, &phy[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_CTRL,
1<<15);
@@ -6592,16 +7345,25 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
/* Add delay of 150ms after reset */
msleep(150);
-
+ if (phy[PORT_0].addr & 0x1) {
+ phy_blk[PORT_0] = &(phy[PORT_1]);
+ phy_blk[PORT_1] = &(phy[PORT_0]);
+ } else {
+ phy_blk[PORT_0] = &(phy[PORT_0]);
+ phy_blk[PORT_1] = &(phy[PORT_1]);
+ }
/* PART2 - Download firmware to both phys */
- for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) {
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u16 fw_ver1;
-
- bnx2x_bcm8727_external_rom_boot(bp, port,
- ext_phy_addr[port], shmem_base);
-
- bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr[port],
+ if (CHIP_IS_E2(bp))
+ port_of_path = 0;
+ else
+ port_of_path = port;
+ DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
+ phy_blk[port]->addr);
+ bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path);
+ bnx2x_cl45_read(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER1, &fw_ver1);
if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
@@ -6616,82 +7378,35 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
return 0;
}
-
-static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base)
-{
- u8 ext_phy_addr;
- u32 val;
- s8 port;
-
- /* Use port1 because of the static port-swap */
- /* Enable the module detection interrupt */
- val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
- val |= ((1<<MISC_REGISTERS_GPIO_3)|
- (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
- REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
-
- bnx2x_ext_phy_hw_reset(bp, 1);
- msleep(5);
- for (port = 0; port < PORT_MAX; port++) {
- /* Extract the ext phy address for the port */
- u32 ext_phy_config = REG_RD(bp, shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].external_phy_config));
-
- ext_phy_addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
- DP(NETIF_MSG_LINK, "8726_common_init : ext_phy_addr = 0x%x\n",
- ext_phy_addr);
-
- bnx2x_8726_reset_phy(bp, port, ext_phy_addr);
-
- /* Set fault module detected LED on */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- port);
- }
-
- return 0;
-}
-
-
-static u8 bnx2x_84823_common_init_phy(struct bnx2x *bp, u32 shmem_base)
-{
- /* HW reset */
- bnx2x_ext_phy_hw_reset(bp, 1);
- return 0;
-}
-u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 ext_phy_type, u32 chip_id)
{
u8 rc = 0;
- u32 ext_phy_type;
-
- DP(NETIF_MSG_LINK, "Begin common phy init\n");
-
- /* Read the ext_phy_type for arbitrary port(0) */
- ext_phy_type = XGXS_EXT_PHY_TYPE(
- REG_RD(bp, shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[0].external_phy_config)));
switch (ext_phy_type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- {
- rc = bnx2x_8073_common_init_phy(bp, shmem_base);
+ rc = bnx2x_8073_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
break;
- }
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
- rc = bnx2x_8727_common_init_phy(bp, shmem_base);
+ rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
/* GPIO1 affects both ports, so there's need to pull
it for single port alone */
- rc = bnx2x_8726_common_init_phy(bp, shmem_base);
+ rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
- rc = bnx2x_84823_common_init_phy(bp, shmem_base);
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+ rc = -EINVAL;
break;
default:
DP(NETIF_MSG_LINK,
@@ -6703,33 +7418,81 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
return rc;
}
-void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
+u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u32 chip_id)
{
- u16 val, cnt;
+ u8 rc = 0;
+ u8 phy_index;
+ u32 ext_phy_type, ext_phy_config;
+ DP(NETIF_MSG_LINK, "Begin common phy init\n");
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
- phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET, &val);
+ if (CHIP_REV_IS_EMUL(bp))
+ return 0;
- for (cnt = 0; cnt < 10; cnt++) {
- msleep(50);
- /* Writes a self-clearing reset */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
- phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET,
- (val | (1<<15)));
- /* Wait for clear */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
- phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET, &val);
+ /* Read the ext_phy_type for arbitrary port(0) */
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ ext_phy_config = bnx2x_get_ext_phy_config(bp,
+ shmem_base_path[0],
+ phy_index, 0);
+ ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, ext_phy_type,
+ chip_id);
+ }
+ return rc;
+}
- if ((val & (1<<15)) == 0)
- break;
+u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
+{
+ u8 phy_index;
+ struct bnx2x_phy phy;
+ for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ 0, &phy) != 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return 0;
+ }
+
+ if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
+ return 1;
+ }
+ return 0;
+}
+
+u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u8 port)
+{
+ u8 phy_index, fan_failure_det_req = 0;
+ struct bnx2x_phy phy;
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, &phy)
+ != 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return 0;
+ }
+ fan_failure_det_req |= (phy.flags &
+ FLAGS_FAN_FAILURE_DET_REQ);
+ }
+ return fan_failure_det_req;
+}
+
+void bnx2x_hw_reset_phy(struct link_params *params)
+{
+ u8 phy_index;
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (params->phy[phy_index].hw_reset) {
+ params->phy[phy_index].hw_reset(
+ &params->phy[phy_index],
+ params);
+ params->phy[phy_index] = phy_null;
+ }
}
}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 40c2981de8e..58a4c719927 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2009 Broadcom Corporation
+/* Copyright 2008-2010 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -22,7 +22,8 @@
/***********************************************************/
/* Defines */
/***********************************************************/
-#define DEFAULT_PHY_DEV_ADDR 3
+#define DEFAULT_PHY_DEV_ADDR 3
+#define E2_DEFAULT_PHY_DEV_ADDR 5
@@ -46,9 +47,137 @@
#define SFP_EEPROM_PART_NO_ADDR 0x28
#define SFP_EEPROM_PART_NO_SIZE 16
#define PWR_FLT_ERR_MSG_LEN 250
+
+#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
+ ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
+#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
+ (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
+#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
+ ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
+
+/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
+#define SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1)
+/* Single Media board contains single external phy */
+#define SINGLE_MEDIA(params) (params->num_phys == 2)
+/* Dual Media board contains two external phy with different media */
+#define DUAL_MEDIA(params) (params->num_phys == 3)
+#define FW_PARAM_MDIO_CTRL_OFFSET 16
+#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
+ (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
/***********************************************************/
/* Structs */
/***********************************************************/
+#define INT_PHY 0
+#define EXT_PHY1 1
+#define EXT_PHY2 2
+#define MAX_PHYS 3
+
+/* Same configuration is shared between the XGXS and the first external phy */
+#define LINK_CONFIG_SIZE (MAX_PHYS - 1)
+#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \
+ 0 : (_phy_idx - 1))
+/***********************************************************/
+/* bnx2x_phy struct */
+/* Defines the required arguments and function per phy */
+/***********************************************************/
+struct link_vars;
+struct link_params;
+struct bnx2x_phy;
+
+typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
+ struct link_vars *vars);
+typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
+ struct link_vars *vars);
+typedef void (*link_reset_t)(struct bnx2x_phy *phy,
+ struct link_params *params);
+typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
+ struct link_params *params);
+typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
+typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
+typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode);
+typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
+ struct link_params *params, u32 action);
+
+struct bnx2x_phy {
+ u32 type;
+
+ /* Loaded during init */
+ u8 addr;
+
+ u8 flags;
+ /* Require HW lock */
+#define FLAGS_HW_LOCK_REQUIRED (1<<0)
+ /* No Over-Current detection */
+#define FLAGS_NOC (1<<1)
+ /* Fan failure detection required */
+#define FLAGS_FAN_FAILURE_DET_REQ (1<<2)
+ /* Initialize first the XGXS and only then the phy itself */
+#define FLAGS_INIT_XGXS_FIRST (1<<3)
+#define FLAGS_REARM_LATCH_SIGNAL (1<<6)
+#define FLAGS_SFP_NOT_APPROVED (1<<7)
+
+ u8 def_md_devad;
+ u8 reserved;
+ /* preemphasis values for the rx side */
+ u16 rx_preemphasis[4];
+
+ /* preemphasis values for the tx side */
+ u16 tx_preemphasis[4];
+
+ /* EMAC address for access MDIO */
+ u32 mdio_ctrl;
+
+ u32 supported;
+
+ u32 media_type;
+#define ETH_PHY_UNSPECIFIED 0x0
+#define ETH_PHY_SFP_FIBER 0x1
+#define ETH_PHY_XFP_FIBER 0x2
+#define ETH_PHY_DA_TWINAX 0x3
+#define ETH_PHY_BASE_T 0x4
+#define ETH_PHY_NOT_PRESENT 0xff
+
+ /* The address in which version is located*/
+ u32 ver_addr;
+
+ u16 req_flow_ctrl;
+
+ u16 req_line_speed;
+
+ u32 speed_cap_mask;
+
+ u16 req_duplex;
+ u16 rsrv;
+ /* Called per phy/port init, and it configures LASI, speed, autoneg,
+ duplex, flow control negotiation, etc. */
+ config_init_t config_init;
+
+ /* Called due to interrupt. It determines the link, speed */
+ read_status_t read_status;
+
+ /* Called when driver is unloading. Should reset the phy */
+ link_reset_t link_reset;
+
+ /* Set the loopback configuration for the phy */
+ config_loopback_t config_loopback;
+
+ /* Format the given raw number into str up to len */
+ format_fw_ver_t format_fw_ver;
+
+ /* Reset the phy (both ports) */
+ hw_reset_t hw_reset;
+
+ /* Set link led mode (on/off/oper)*/
+ set_link_led_t set_link_led;
+
+ /* PHY Specific tasks */
+ phy_specific_func_t phy_specific_func;
+#define DISABLE_TX 1
+#define ENABLE_TX 2
+};
+
/* Inputs parameters to the CLC */
struct link_params {
@@ -59,56 +188,50 @@ struct link_params {
#define LOOPBACK_NONE 0
#define LOOPBACK_EMAC 1
#define LOOPBACK_BMAC 2
-#define LOOPBACK_XGXS_10 3
+#define LOOPBACK_XGXS 3
#define LOOPBACK_EXT_PHY 4
#define LOOPBACK_EXT 5
- u16 req_duplex;
- u16 req_flow_ctrl;
- u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
- req_flow_ctrl is set to AUTO */
- u16 req_line_speed; /* Also determine AutoNeg */
-
/* Device parameters */
u8 mac_addr[6];
+ u16 req_duplex[LINK_CONFIG_SIZE];
+ u16 req_flow_ctrl[LINK_CONFIG_SIZE];
+
+ u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
+
/* shmem parameters */
u32 shmem_base;
- u32 speed_cap_mask;
+ u32 shmem2_base;
+ u32 speed_cap_mask[LINK_CONFIG_SIZE];
u32 switch_cfg;
#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
- u16 hw_led_mode; /* part of the hw_config read from the shmem */
-
- /* phy_addr populated by the phy_init function */
- u8 phy_addr;
- /*u8 reserved1;*/
-
u32 lane_config;
- u32 ext_phy_config;
-#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
- ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
-#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
- (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
-#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
- ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
/* Phy register parameter */
u32 chip_id;
- u16 xgxs_config_rx[4]; /* preemphasis values for the rx side */
- u16 xgxs_config_tx[4]; /* preemphasis values for the tx side */
-
u32 feature_config_flags;
#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
-#define FEATURE_CONFIG_BCM8727_NOC (1<<3)
+#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
+ /* Will be populated during common init */
+ struct bnx2x_phy phy[MAX_PHYS];
+
+ /* Will be populated during common init */
+ u8 num_phys;
+
+ u8 rsrv;
+ u16 hw_led_mode; /* part of the hw_config read from the shmem */
+ u32 multi_phy_config;
/* Device pointer passed to all callback functions */
struct bnx2x *bp;
+ u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
+ req_flow_ctrl is set to AUTO */
};
/* Output parameters */
@@ -129,12 +252,6 @@ struct link_vars {
u16 flow_ctrl;
u16 ieee_fc;
- u32 autoneg;
-#define AUTO_NEG_DISABLED 0x0
-#define AUTO_NEG_ENABLED 0x1
-#define AUTO_NEG_COMPLETE 0x2
-#define AUTO_NEG_PARALLEL_DETECTION_USED 0x3
-
/* The same definitions as the shmem parameter */
u32 link_status;
};
@@ -142,8 +259,6 @@ struct link_vars {
/***********************************************************/
/* Functions */
/***********************************************************/
-
-/* Initialize the phy */
u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output);
/* Reset the link. Should be called when driver or interface goes down
@@ -155,17 +270,21 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
/* bnx2x_link_update should be called upon link interrupt */
u8 bnx2x_link_update(struct link_params *input, struct link_vars *output);
-/* use the following cl45 functions to read/write from external_phy
+/* use the following phy functions to read/write from external_phy
In order to use it to read/write internal phy registers, use
DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
- Use ext_phy_type of 0 in case of cl22 over cl45
the register */
-u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
- u8 phy_addr, u8 devad, u16 reg, u16 *ret_val);
+u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 *ret_val);
+
+u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 val);
-u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
- u8 phy_addr, u8 devad, u16 reg, u16 val);
+u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val);
+u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
@@ -178,9 +297,12 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
Basically, the CLC takes care of the led for the link, but in case one needs
to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
blink the led, and LED_MODE_OFF to set the led off.*/
-u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed);
-#define LED_MODE_OFF 0
-#define LED_MODE_OPER 2
+u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
+ u8 mode, u32 speed);
+#define LED_MODE_OFF 0
+#define LED_MODE_ON 1
+#define LED_MODE_OPER 2
+#define LED_MODE_FRONT_PANEL_OFF 3
u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
@@ -190,17 +312,39 @@ void bnx2x_handle_module_detect_int(struct link_params *params);
/* Get the actual link status. In case it returns 0, link is up,
otherwise link is down*/
-u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars);
+u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars,
+ u8 is_serdes);
/* One-time initialization for external phy after power up */
-u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base);
+u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u32 chip_id);
/* Reset the external PHY using GPIO */
void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
-void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr);
+/* Reset the external of SFX7101 */
+void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
-u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr,
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf);
+void bnx2x_hw_reset_phy(struct link_params *params);
+
+/* Checks if HW lock is required for this phy/board type */
+u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
+ u32 shmem2_base);
+
+/* Returns the aggregative supported attributes of the phys on board */
+u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
+
+/* Check swap bit and adjust PHY order */
+u32 bnx2x_phy_selection(struct link_params *params);
+
+/* Probe the phys on board, and populate them in "params" */
+u8 bnx2x_phy_probe(struct link_params *params);
+/* Checks if fan failure detection is required on one of the phys on board */
+u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
+ u32 shmem2_base, u8 port);
+
#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index b4ec2b02a46..ead524bca8f 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -23,7 +23,6 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
@@ -57,7 +56,6 @@
#include "bnx2x_init_ops.h"
#include "bnx2x_cmn.h"
-
#include <linux/firmware.h>
#include "bnx2x_fw_file_hdr.h"
/* FW files */
@@ -66,8 +64,9 @@
__stringify(BCM_5710_FW_MINOR_VERSION) "." \
__stringify(BCM_5710_FW_REVISION_VERSION) "." \
__stringify(BCM_5710_FW_ENGINEERING_VERSION)
-#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
-#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -77,18 +76,20 @@ static char version[] __devinitdata =
DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Eliezer Tamir");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
+MODULE_DESCRIPTION("Broadcom NetXtreme II "
+ "BCM57710/57711/57711E/57712/57712E Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
+MODULE_FIRMWARE(FW_FILE_NAME_E2);
static int multi_mode = 1;
module_param(multi_mode, int, 0);
MODULE_PARM_DESC(multi_mode, " Multi queue mode "
"(0 Disable; 1 Enable (default))");
-static int num_queues;
+int num_queues;
module_param(num_queues, int, 0);
MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
" (default is as a number of CPUs)");
@@ -124,6 +125,8 @@ enum bnx2x_board_type {
BCM57710 = 0,
BCM57711 = 1,
BCM57711E = 2,
+ BCM57712 = 3,
+ BCM57712E = 4
};
/* indexed by board_type, above */
@@ -132,14 +135,24 @@ static struct {
} board_info[] __devinitdata = {
{ "Broadcom NetXtreme II BCM57710 XGb" },
{ "Broadcom NetXtreme II BCM57711 XGb" },
- { "Broadcom NetXtreme II BCM57711E XGb" }
+ { "Broadcom NetXtreme II BCM57711E XGb" },
+ { "Broadcom NetXtreme II BCM57712 XGb" },
+ { "Broadcom NetXtreme II BCM57712E XGb" }
};
+#ifndef PCI_DEVICE_ID_NX2_57712
+#define PCI_DEVICE_ID_NX2_57712 0x1662
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712E
+#define PCI_DEVICE_ID_NX2_57712E 0x1663
+#endif
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
{ 0 }
};
@@ -149,6 +162,244 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
* General service functions
****************************************************************************/
+static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
+ u32 addr, dma_addr_t mapping)
+{
+ REG_WR(bp, addr, U64_LO(mapping));
+ REG_WR(bp, addr + 4, U64_HI(mapping));
+}
+
+static inline void __storm_memset_fill(struct bnx2x *bp,
+ u32 addr, size_t size, u32 val)
+{
+ int i;
+ for (i = 0; i < size/4; i++)
+ REG_WR(bp, addr + (i * 4), val);
+}
+
+static inline void storm_memset_ustats_zero(struct bnx2x *bp,
+ u8 port, u16 stat_id)
+{
+ size_t size = sizeof(struct ustorm_per_client_stats);
+
+ u32 addr = BAR_USTRORM_INTMEM +
+ USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
+
+ __storm_memset_fill(bp, addr, size, 0);
+}
+
+static inline void storm_memset_tstats_zero(struct bnx2x *bp,
+ u8 port, u16 stat_id)
+{
+ size_t size = sizeof(struct tstorm_per_client_stats);
+
+ u32 addr = BAR_TSTRORM_INTMEM +
+ TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
+
+ __storm_memset_fill(bp, addr, size, 0);
+}
+
+static inline void storm_memset_xstats_zero(struct bnx2x *bp,
+ u8 port, u16 stat_id)
+{
+ size_t size = sizeof(struct xstorm_per_client_stats);
+
+ u32 addr = BAR_XSTRORM_INTMEM +
+ XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
+
+ __storm_memset_fill(bp, addr, size, 0);
+}
+
+
+static inline void storm_memset_spq_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
+{
+ REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
+}
+
+static inline void storm_memset_func_cfg(struct bnx2x *bp,
+ struct tstorm_eth_function_common_config *tcfg,
+ u16 abs_fid)
+{
+ size_t size = sizeof(struct tstorm_eth_function_common_config);
+
+ u32 addr = BAR_TSTRORM_INTMEM +
+ TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
+}
+
+static inline void storm_memset_xstats_flags(struct bnx2x *bp,
+ struct stats_indication_flags *flags,
+ u16 abs_fid)
+{
+ size_t size = sizeof(struct stats_indication_flags);
+
+ u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)flags);
+}
+
+static inline void storm_memset_tstats_flags(struct bnx2x *bp,
+ struct stats_indication_flags *flags,
+ u16 abs_fid)
+{
+ size_t size = sizeof(struct stats_indication_flags);
+
+ u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)flags);
+}
+
+static inline void storm_memset_ustats_flags(struct bnx2x *bp,
+ struct stats_indication_flags *flags,
+ u16 abs_fid)
+{
+ size_t size = sizeof(struct stats_indication_flags);
+
+ u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)flags);
+}
+
+static inline void storm_memset_cstats_flags(struct bnx2x *bp,
+ struct stats_indication_flags *flags,
+ u16 abs_fid)
+{
+ size_t size = sizeof(struct stats_indication_flags);
+
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)flags);
+}
+
+static inline void storm_memset_xstats_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = BAR_XSTRORM_INTMEM +
+ XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_tstats_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = BAR_TSTRORM_INTMEM +
+ TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_ustats_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = BAR_USTRORM_INTMEM +
+ USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_cstats_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+ u16 pf_id)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+}
+
+static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+ u8 enable)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+}
+
+static inline void storm_memset_eq_data(struct bnx2x *bp,
+ struct event_ring_data *eq_data,
+ u16 pfid)
+{
+ size_t size = sizeof(struct event_ring_data);
+
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
+}
+
+static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
+ u16 pfid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
+ REG_WR16(bp, addr, eq_prod);
+}
+
+static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
+ u16 fw_sb_id, u8 sb_index,
+ u8 ticks)
+{
+
+ int index_offset = CHIP_IS_E2(bp) ?
+ offsetof(struct hc_status_block_data_e2, index_data) :
+ offsetof(struct hc_status_block_data_e1x, index_data);
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
+ index_offset +
+ sizeof(struct hc_index_data)*sb_index +
+ offsetof(struct hc_index_data, timeout);
+ REG_WR8(bp, addr, ticks);
+ DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
+ port, fw_sb_id, sb_index, ticks);
+}
+static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
+ u16 fw_sb_id, u8 sb_index,
+ u8 disable)
+{
+ u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
+ int index_offset = CHIP_IS_E2(bp) ?
+ offsetof(struct hc_status_block_data_e2, index_data) :
+ offsetof(struct hc_status_block_data_e1x, index_data);
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
+ index_offset +
+ sizeof(struct hc_index_data)*sb_index +
+ offsetof(struct hc_index_data, flags);
+ u16 flags = REG_RD16(bp, addr);
+ /* clear and set */
+ flags &= ~HC_INDEX_DATA_HC_ENABLED;
+ flags |= enable_flag;
+ REG_WR16(bp, addr, flags);
+ DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
+ port, fw_sb_id, sb_index, disable);
+}
+
/* used only at init
* locking is done by mcp
*/
@@ -172,6 +423,75 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
return val;
}
+#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
+#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
+#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
+#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
+#define DMAE_DP_DST_NONE "dst_addr [none]"
+
+void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
+{
+ u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
+
+ switch (dmae->opcode & DMAE_COMMAND_DST) {
+ case DMAE_CMD_DST_PCI:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ case DMAE_CMD_DST_GRC:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ default:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
+ "dst_addr [none]\n"
+ DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ DP_LEVEL "src_addr [%08x] len [%d * 4] "
+ "dst_addr [none]\n"
+ DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ }
+
+}
+
const u32 dmae_reg_go_c[] = {
DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
@@ -195,85 +515,137 @@ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
REG_WR(bp, dmae_reg_go_c[idx], 1);
}
-void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
- u32 len32)
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
{
- struct dmae_command dmae;
- u32 *wb_comp = bnx2x_sp(bp, wb_comp);
- int cnt = 200;
+ return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
+ DMAE_CMD_C_ENABLE);
+}
- if (!bp->dmae_ready) {
- u32 *data = bnx2x_sp(bp, wb_data[0]);
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
+{
+ return opcode & ~DMAE_CMD_SRC_RESET;
+}
- DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
- " using indirect\n", dst_addr, len32);
- bnx2x_init_ind_wr(bp, dst_addr, data, len32);
- return;
- }
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
+ bool with_comp, u8 comp_type)
+{
+ u32 opcode = 0;
+
+ opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
+ (dst_type << DMAE_COMMAND_DST_SHIFT));
+
+ opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
- memset(&dmae, 0, sizeof(struct dmae_command));
+ opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
+ opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
+ (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
+ opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
- dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
- DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
#ifdef __BIG_ENDIAN
- DMAE_CMD_ENDIANITY_B_DW_SWAP |
+ opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
#else
- DMAE_CMD_ENDIANITY_DW_SWAP |
+ opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
#endif
- (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
- (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
- dmae.src_addr_lo = U64_LO(dma_addr);
- dmae.src_addr_hi = U64_HI(dma_addr);
- dmae.dst_addr_lo = dst_addr >> 2;
- dmae.dst_addr_hi = 0;
- dmae.len = len32;
- dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
- dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
- dmae.comp_val = DMAE_COMP_VAL;
-
- DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
- DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
- "dst_addr [%x:%08x (%08x)]\n"
- DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
- dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
- dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
- dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
- DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+ if (with_comp)
+ opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
+ return opcode;
+}
+
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u8 src_type, u8 dst_type)
+{
+ memset(dmae, 0, sizeof(struct dmae_command));
+
+ /* set the opcode */
+ dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
+ true, DMAE_COMP_PCI);
+
+ /* fill in the completion parameters */
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+}
+
+/* issue a dmae command over the init-channel and wailt for completion */
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
+{
+ u32 *wb_comp = bnx2x_sp(bp, wb_comp);
+ int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
+ int rc = 0;
+
+ DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+ /* lock the dmae channel */
mutex_lock(&bp->dmae_mutex);
+ /* reset completion */
*wb_comp = 0;
- bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
+ /* post the command on the channel used for initializations */
+ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+ /* wait for completion */
udelay(5);
-
- while (*wb_comp != DMAE_COMP_VAL) {
+ while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
if (!cnt) {
BNX2X_ERR("DMAE timeout!\n");
- break;
+ rc = DMAE_TIMEOUT;
+ goto unlock;
}
cnt--;
- /* adjust delay for emulation/FPGA */
- if (CHIP_REV_IS_SLOW(bp))
- msleep(100);
- else
- udelay(5);
+ udelay(50);
+ }
+ if (*wb_comp & DMAE_PCI_ERR_FLAG) {
+ BNX2X_ERR("DMAE PCI error!\n");
+ rc = DMAE_PCI_ERROR;
}
+ DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+ bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
+ bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+
+unlock:
mutex_unlock(&bp->dmae_mutex);
+ return rc;
+}
+
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
+ u32 len32)
+{
+ struct dmae_command dmae;
+
+ if (!bp->dmae_ready) {
+ u32 *data = bnx2x_sp(bp, wb_data[0]);
+
+ DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
+ " using indirect\n", dst_addr, len32);
+ bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+ return;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
+
+ /* fill in addresses and len */
+ dmae.src_addr_lo = U64_LO(dma_addr);
+ dmae.src_addr_hi = U64_HI(dma_addr);
+ dmae.dst_addr_lo = dst_addr >> 2;
+ dmae.dst_addr_hi = 0;
+ dmae.len = len32;
+
+ bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
+
+ /* issue the command and wait for completion */
+ bnx2x_issue_dmae_with_comp(bp, &dmae);
}
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
{
struct dmae_command dmae;
- u32 *wb_comp = bnx2x_sp(bp, wb_comp);
- int cnt = 200;
if (!bp->dmae_ready) {
u32 *data = bnx2x_sp(bp, wb_data[0]);
@@ -286,62 +658,20 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
return;
}
- memset(&dmae, 0, sizeof(struct dmae_command));
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
- dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
- DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
- DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
- DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
- (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
- (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+ /* fill in addresses and len */
dmae.src_addr_lo = src_addr >> 2;
dmae.src_addr_hi = 0;
dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
dmae.len = len32;
- dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
- dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
- dmae.comp_val = DMAE_COMP_VAL;
-
- DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
- DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
- "dst_addr [%x:%08x (%08x)]\n"
- DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
- dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
- dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
- dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
- mutex_lock(&bp->dmae_mutex);
+ bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
- memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
- *wb_comp = 0;
-
- bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
-
- udelay(5);
-
- while (*wb_comp != DMAE_COMP_VAL) {
-
- if (!cnt) {
- BNX2X_ERR("DMAE timeout!\n");
- break;
- }
- cnt--;
- /* adjust delay for emulation/FPGA */
- if (CHIP_REV_IS_SLOW(bp))
- msleep(100);
- else
- udelay(5);
- }
- DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
- bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
- bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
-
- mutex_unlock(&bp->dmae_mutex);
+ /* issue the command and wait for completion */
+ bnx2x_issue_dmae_with_comp(bp, &dmae);
}
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
@@ -508,19 +838,24 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
u32 mark, offset;
__be32 data[9];
int word;
-
+ u32 trace_shmem_base;
if (BP_NOMCP(bp)) {
BNX2X_ERR("NO MCP - can not dump\n");
return;
}
- addr = bp->common.shmem_base - 0x0800 + 4;
+ if (BP_PATH(bp) == 0)
+ trace_shmem_base = bp->common.shmem_base;
+ else
+ trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
+ addr = trace_shmem_base - 0x0800 + 4;
mark = REG_RD(bp, addr);
- mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
+ mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+ + ((mark + 0x3) & ~0x3) - 0x08000000;
pr_err("begin fw dump (mark 0x%x)\n", mark);
pr_err("");
- for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
+ for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
@@ -538,7 +873,12 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
void bnx2x_panic_dump(struct bnx2x *bp)
{
int i;
- u16 j, start, end;
+ u16 j;
+ struct hc_sp_status_block_data sp_sb_data;
+ int func = BP_FUNC(bp);
+#ifdef BNX2X_STOP_ON_ERROR
+ u16 start = 0, end = 0;
+#endif
bp->stats_state = STATS_STATE_DISABLED;
DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
@@ -547,44 +887,143 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* Indices */
/* Common */
- BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
- " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
" spq_prod_idx(0x%x)\n",
- bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
- bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
+ bp->def_idx, bp->def_att_idx,
+ bp->attn_state, bp->spq_prod_idx);
+ BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
+ bp->def_status_blk->atten_status_block.attn_bits,
+ bp->def_status_blk->atten_status_block.attn_bits_ack,
+ bp->def_status_blk->atten_status_block.status_block_id,
+ bp->def_status_blk->atten_status_block.attn_bits_index);
+ BNX2X_ERR(" def (");
+ for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+ pr_cont("0x%x%s",
+ bp->def_status_blk->sp_sb.index_values[i],
+ (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
+
+ for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
+ *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+ i*sizeof(u32));
+
+ pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
+ "pf_id(0x%x) vnic_id(0x%x) "
+ "vf_id(0x%x) vf_valid (0x%x)\n",
+ sp_sb_data.igu_sb_id,
+ sp_sb_data.igu_seg_id,
+ sp_sb_data.p_func.pf_id,
+ sp_sb_data.p_func.vnic_id,
+ sp_sb_data.p_func.vf_id,
+ sp_sb_data.p_func.vf_valid);
+
- /* Rx */
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
-
+ int loop;
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+ struct hc_status_block_sm *hc_sm_p =
+ CHIP_IS_E2(bp) ?
+ sb_data_e2.common.state_machine :
+ sb_data_e1x.common.state_machine;
+ struct hc_index_data *hc_index_p =
+ CHIP_IS_E2(bp) ?
+ sb_data_e2.index_data :
+ sb_data_e1x.index_data;
+ int data_size;
+ u32 *sb_data_p;
+
+ /* Rx */
BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
- " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
+ " rx_comp_prod(0x%x)"
" rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
i, fp->rx_bd_prod, fp->rx_bd_cons,
- le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
+ fp->rx_comp_prod,
fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
- " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
+ " fp_hc_idx(0x%x)\n",
fp->rx_sge_prod, fp->last_max_sge,
- le16_to_cpu(fp->fp_u_idx),
- fp->status_blk->u_status_block.status_block_index);
- }
-
- /* Tx */
- for_each_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
+ le16_to_cpu(fp->fp_hc_idx));
+ /* Tx */
BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
" tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
" *tx_cons_sb(0x%x)\n",
i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
- BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
- " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
- fp->status_blk->c_status_block.status_block_index,
- fp->tx_db.data.prod);
+
+ loop = CHIP_IS_E2(bp) ?
+ HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
+
+ /* host sb data */
+
+ BNX2X_ERR(" run indexes (");
+ for (j = 0; j < HC_SB_MAX_SM; j++)
+ pr_cont("0x%x%s",
+ fp->sb_running_index[j],
+ (j == HC_SB_MAX_SM - 1) ? ")" : " ");
+
+ BNX2X_ERR(" indexes (");
+ for (j = 0; j < loop; j++)
+ pr_cont("0x%x%s",
+ fp->sb_index_values[j],
+ (j == loop - 1) ? ")" : " ");
+ /* fw sb data */
+ data_size = CHIP_IS_E2(bp) ?
+ sizeof(struct hc_status_block_data_e2) :
+ sizeof(struct hc_status_block_data_e1x);
+ data_size /= sizeof(u32);
+ sb_data_p = CHIP_IS_E2(bp) ?
+ (u32 *)&sb_data_e2 :
+ (u32 *)&sb_data_e1x;
+ /* copy sb data in here */
+ for (j = 0; j < data_size; j++)
+ *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
+ j * sizeof(u32));
+
+ if (CHIP_IS_E2(bp)) {
+ pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
+ "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
+ sb_data_e2.common.p_func.pf_id,
+ sb_data_e2.common.p_func.vf_id,
+ sb_data_e2.common.p_func.vf_valid,
+ sb_data_e2.common.p_func.vnic_id,
+ sb_data_e2.common.same_igu_sb_1b);
+ } else {
+ pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
+ "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
+ sb_data_e1x.common.p_func.pf_id,
+ sb_data_e1x.common.p_func.vf_id,
+ sb_data_e1x.common.p_func.vf_valid,
+ sb_data_e1x.common.p_func.vnic_id,
+ sb_data_e1x.common.same_igu_sb_1b);
+ }
+
+ /* SB_SMs data */
+ for (j = 0; j < HC_SB_MAX_SM; j++) {
+ pr_cont("SM[%d] __flags (0x%x) "
+ "igu_sb_id (0x%x) igu_seg_id(0x%x) "
+ "time_to_expire (0x%x) "
+ "timer_value(0x%x)\n", j,
+ hc_sm_p[j].__flags,
+ hc_sm_p[j].igu_sb_id,
+ hc_sm_p[j].igu_seg_id,
+ hc_sm_p[j].time_to_expire,
+ hc_sm_p[j].timer_value);
+ }
+
+ /* Indecies data */
+ for (j = 0; j < loop; j++) {
+ pr_cont("INDEX[%d] flags (0x%x) "
+ "timeout (0x%x)\n", j,
+ hc_index_p[j].flags,
+ hc_index_p[j].timeout);
+ }
}
+#ifdef BNX2X_STOP_ON_ERROR
/* Rings */
/* Rx */
for_each_queue(bp, i) {
@@ -642,13 +1081,13 @@ void bnx2x_panic_dump(struct bnx2x *bp)
i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
}
}
-
+#endif
bnx2x_fw_dump(bp);
bnx2x_mc_assert(bp);
BNX2X_ERR("end crash dump -----------------\n");
}
-void bnx2x_int_enable(struct bnx2x *bp)
+static void bnx2x_hc_int_enable(struct bnx2x *bp)
{
int port = BP_PORT(bp);
u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -690,9 +1129,9 @@ void bnx2x_int_enable(struct bnx2x *bp)
mmiowb();
barrier();
- if (CHIP_IS_E1H(bp)) {
+ if (!CHIP_IS_E1(bp)) {
/* init leading/trailing edge */
- if (IS_E1HMF(bp)) {
+ if (IS_MF(bp)) {
val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
if (bp->port.pmf)
/* enable nig and gpio3 attention */
@@ -708,7 +1147,66 @@ void bnx2x_int_enable(struct bnx2x *bp)
mmiowb();
}
-static void bnx2x_int_disable(struct bnx2x *bp)
+static void bnx2x_igu_int_enable(struct bnx2x *bp)
+{
+ u32 val;
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+ int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+
+ val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ if (msix) {
+ val &= ~(IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ val |= (IGU_PF_CONF_FUNC_EN |
+ IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+ } else if (msi) {
+ val &= ~IGU_PF_CONF_INT_LINE_EN;
+ val |= (IGU_PF_CONF_FUNC_EN |
+ IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_ATTN_BIT_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ } else {
+ val &= ~IGU_PF_CONF_MSI_MSIX_EN;
+ val |= (IGU_PF_CONF_FUNC_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ }
+
+ DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
+ val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+
+ barrier();
+
+ /* init leading/trailing edge */
+ if (IS_MF(bp)) {
+ val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+ if (bp->port.pmf)
+ /* enable nig and gpio3 attention */
+ val |= 0x1100;
+ } else
+ val = 0xffff;
+
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+
+ /* Make sure that interrupts are indeed enabled from here on */
+ mmiowb();
+}
+
+void bnx2x_int_enable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_enable(bp);
+ else
+ bnx2x_igu_int_enable(bp);
+}
+
+static void bnx2x_hc_int_disable(struct bnx2x *bp)
{
int port = BP_PORT(bp);
u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -730,6 +1228,32 @@ static void bnx2x_int_disable(struct bnx2x *bp)
BNX2X_ERR("BUG! proper val not read from IGU!\n");
}
+static void bnx2x_igu_int_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+
+ DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+void bnx2x_int_disable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_disable(bp);
+ else
+ bnx2x_igu_int_disable(bp);
+}
+
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
{
int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -781,7 +1305,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
DP(NETIF_MSG_HW,
"resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
resource, HW_LOCK_MAX_RESOURCE_VALUE);
- return -EINVAL;
+ return false;
}
if (func <= 5)
@@ -800,7 +1324,6 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
return false;
}
-
#ifdef BCM_CNIC
static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
#endif
@@ -817,76 +1340,35 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
fp->index, cid, command, bp->state,
rr_cqe->ramrod_cqe.ramrod_type);
- bp->spq_left++;
-
- if (fp->index) {
- switch (command | fp->state) {
- case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
- BNX2X_FP_STATE_OPENING):
- DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
- cid);
- fp->state = BNX2X_FP_STATE_OPEN;
- break;
-
- case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
- DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
- cid);
- fp->state = BNX2X_FP_STATE_HALTED;
- break;
-
- default:
- BNX2X_ERR("unexpected MC reply (%d) "
- "fp[%d] state is %x\n",
- command, fp->index, fp->state);
- break;
- }
- mb(); /* force bnx2x_wait_ramrod() to see the change */
- return;
- }
-
- switch (command | bp->state) {
- case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
- DP(NETIF_MSG_IFUP, "got setup ramrod\n");
- bp->state = BNX2X_STATE_OPEN;
+ switch (command | fp->state) {
+ case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
+ DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
+ fp->state = BNX2X_FP_STATE_OPEN;
break;
- case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
- DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
- bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
+ case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
+ DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
fp->state = BNX2X_FP_STATE_HALTED;
break;
- case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
- DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
- bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
- break;
-
-#ifdef BCM_CNIC
- case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
- DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
- bnx2x_cnic_cfc_comp(bp, cid);
- break;
-#endif
-
- case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
- case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
- DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
- bp->set_mac_pending--;
- smp_wmb();
- break;
-
- case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
- DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
- bp->set_mac_pending--;
- smp_wmb();
+ case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
+ DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
+ fp->state = BNX2X_FP_STATE_TERMINATED;
break;
default:
- BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
- command, bp->state);
+ BNX2X_ERR("unexpected MC reply (%d) "
+ "fp[%d] state is %x\n",
+ command, fp->index, fp->state);
break;
}
- mb(); /* force bnx2x_wait_ramrod() to see the change */
+
+ smp_mb__before_atomic_inc();
+ atomic_inc(&bp->spq_left);
+ /* push the change in fp->state and towards the memory */
+ smp_wmb();
+
+ return;
}
irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
@@ -914,25 +1396,22 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
return IRQ_HANDLED;
#endif
- for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
+ for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- mask = 0x2 << fp->sb_id;
+ mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
if (status & mask) {
/* Handle Rx and Tx according to SB id */
prefetch(fp->rx_cons_sb);
- prefetch(&fp->status_blk->u_status_block.
- status_block_index);
prefetch(fp->tx_cons_sb);
- prefetch(&fp->status_blk->c_status_block.
- status_block_index);
+ prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
status &= ~mask;
}
}
#ifdef BCM_CNIC
- mask = 0x2 << CNIC_SB_ID(bp);
+ mask = 0x2;
if (status & (mask | 0x1)) {
struct cnic_ops *c_ops = NULL;
@@ -1227,49 +1706,91 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
return 0;
}
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
+{
+ u32 sel_phy_idx = 0;
+ if (bp->link_vars.link_up) {
+ sel_phy_idx = EXT_PHY1;
+ /* In case link is SERDES, check if the EXT_PHY2 is the one */
+ if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
+ (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
+ sel_phy_idx = EXT_PHY2;
+ } else {
+
+ switch (bnx2x_phy_selection(&bp->link_params)) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ sel_phy_idx = EXT_PHY1;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ sel_phy_idx = EXT_PHY2;
+ break;
+ }
+ }
+ /*
+ * The selected actived PHY is always after swapping (in case PHY
+ * swapping is enabled). So when swapping is enabled, we need to reverse
+ * the configuration
+ */
+
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ if (sel_phy_idx == EXT_PHY1)
+ sel_phy_idx = EXT_PHY2;
+ else if (sel_phy_idx == EXT_PHY2)
+ sel_phy_idx = EXT_PHY1;
+ }
+ return LINK_CONFIG_IDX(sel_phy_idx);
+}
+
void bnx2x_calc_fc_adv(struct bnx2x *bp)
{
+ u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
switch (bp->link_vars.ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
- bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
- ADVERTISED_Pause);
+ bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
break;
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
- bp->port.advertising |= (ADVERTISED_Asym_Pause |
- ADVERTISED_Pause);
+ bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
break;
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
- bp->port.advertising |= ADVERTISED_Asym_Pause;
+ bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
break;
default:
- bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
- ADVERTISED_Pause);
+ bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
break;
}
}
-
u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
{
if (!BP_NOMCP(bp)) {
u8 rc;
-
+ int cfx_idx = bnx2x_get_link_cfg_idx(bp);
+ u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
/* Initialize link parameters structure variables */
/* It is recommended to turn off RX FC for jumbo frames
for better performance */
- if (bp->dev->mtu > 5000)
+ if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
else
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
bnx2x_acquire_phy_lock(bp);
- if (load_mode == LOAD_DIAG)
- bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
+ if (load_mode == LOAD_DIAG) {
+ bp->link_params.loopback_mode = LOOPBACK_XGXS;
+ bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
+ }
rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
@@ -1281,7 +1802,7 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
bnx2x_link_report(bp);
}
-
+ bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
return rc;
}
BNX2X_ERR("Bootcode is missing - can not initialize link\n");
@@ -1292,6 +1813,7 @@ void bnx2x_link_set(struct bnx2x *bp)
{
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
+ bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
@@ -1310,13 +1832,14 @@ static void bnx2x__link_reset(struct bnx2x *bp)
BNX2X_ERR("Bootcode is missing - can not reset link\n");
}
-u8 bnx2x_link_test(struct bnx2x *bp)
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
{
u8 rc = 0;
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
- rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
+ rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
+ is_serdes);
bnx2x_release_phy_lock(bp);
} else
BNX2X_ERR("Bootcode is missing - can not test link\n");
@@ -1371,13 +1894,11 @@ static void bnx2x_init_port_minmax(struct bnx2x *bp)
static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
{
int all_zero = 1;
- int port = BP_PORT(bp);
int vn;
bp->vn_weight_sum = 0;
for (vn = VN_0; vn < E1HVN_MAX; vn++) {
- int func = 2*vn + port;
- u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
+ u32 vn_cfg = bp->mf_config[vn];
u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
@@ -1405,11 +1926,12 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
}
-static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
+static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
{
struct rate_shaping_vars_per_vn m_rs_vn;
struct fairness_vars_per_vn m_fair_vn;
- u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
+ u32 vn_cfg = bp->mf_config[vn];
+ int func = 2*vn + BP_PORT(bp);
u16 vn_min_rate, vn_max_rate;
int i;
@@ -1422,11 +1944,12 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
/* If min rate is zero - set it to 1 */
- if (!vn_min_rate)
+ if (bp->vn_weight_sum && (vn_min_rate == 0))
vn_min_rate = DEF_MIN_RATE;
vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
}
+
DP(NETIF_MSG_IFUP,
"func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
@@ -1467,6 +1990,83 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
((u32 *)(&m_fair_vn))[i]);
}
+static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
+{
+ if (CHIP_REV_IS_SLOW(bp))
+ return CMNG_FNS_NONE;
+ if (IS_MF(bp))
+ return CMNG_FNS_MINMAX;
+
+ return CMNG_FNS_NONE;
+}
+
+static void bnx2x_read_mf_cfg(struct bnx2x *bp)
+{
+ int vn;
+
+ if (BP_NOMCP(bp))
+ return; /* what should be the default bvalue in this case */
+
+ for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+ int /*abs*/func = 2*vn + BP_PORT(bp);
+ bp->mf_config[vn] =
+ MF_CFG_RD(bp, func_mf_config[func].config);
+ }
+}
+
+static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
+{
+
+ if (cmng_type == CMNG_FNS_MINMAX) {
+ int vn;
+
+ /* clear cmng_enables */
+ bp->cmng.flags.cmng_enables = 0;
+
+ /* read mf conf from shmem */
+ if (read_cfg)
+ bnx2x_read_mf_cfg(bp);
+
+ /* Init rate shaping and fairness contexts */
+ bnx2x_init_port_minmax(bp);
+
+ /* vn_weight_sum and enable fairness if not 0 */
+ bnx2x_calc_vn_weight_sum(bp);
+
+ /* calculate and set min-max rate for each vn */
+ for (vn = VN_0; vn < E1HVN_MAX; vn++)
+ bnx2x_init_vn_minmax(bp, vn);
+
+ /* always enable rate shaping and fairness */
+ bp->cmng.flags.cmng_enables |=
+ CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
+ if (!bp->vn_weight_sum)
+ DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
+ " fairness will be disabled\n");
+ return;
+ }
+
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "rate shaping and fairness are disabled\n");
+}
+
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func;
+ int vn;
+
+ /* Set the attention towards other drivers on the same port */
+ for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+ if (vn == BP_E1HVN(bp))
+ continue;
+
+ func = ((vn << 1) | port);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+ (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+ }
+}
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
@@ -1480,7 +2080,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
if (bp->link_vars.link_up) {
/* dropless flow control */
- if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
+ if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
int port = BP_PORT(bp);
u32 pause_enabled = 0;
@@ -1508,37 +2108,19 @@ static void bnx2x_link_attn(struct bnx2x *bp)
if (prev_link_status != bp->link_vars.link_status)
bnx2x_link_report(bp);
- if (IS_E1HMF(bp)) {
- int port = BP_PORT(bp);
- int func;
- int vn;
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
- /* Set the attention towards other drivers on the same port */
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
- if (vn == BP_E1HVN(bp))
- continue;
+ if (bp->link_vars.link_up && bp->link_vars.line_speed) {
+ int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
- func = ((vn << 1) | port);
- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
- (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
- }
-
- if (bp->link_vars.link_up) {
- int i;
-
- /* Init rate shaping and fairness contexts */
- bnx2x_init_port_minmax(bp);
-
- for (vn = VN_0; vn < E1HVN_MAX; vn++)
- bnx2x_init_vn_minmax(bp, 2*vn + port);
-
- /* Store it to internal memory */
- for (i = 0;
- i < sizeof(struct cmng_struct_per_port) / 4; i++)
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
- ((u32 *)(&bp->cmng))[i]);
- }
+ if (cmng_fns != CMNG_FNS_NONE) {
+ bnx2x_cmng_fns_init(bp, false, cmng_fns);
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+ } else
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "single function mode without fairness\n");
}
}
@@ -1554,7 +2136,9 @@ void bnx2x__link_status_update(struct bnx2x *bp)
else
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
- bnx2x_calc_vn_weight_sum(bp);
+ /* the link status update could be the result of a DCC event
+ hence re-read the shmem mf configuration */
+ bnx2x_read_mf_cfg(bp);
/* indicate link status */
bnx2x_link_report(bp);
@@ -1570,8 +2154,13 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
/* enable nig attention */
val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
- REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
- REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+ } else if (CHIP_IS_E2(bp)) {
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+ }
bnx2x_stats_handle(bp, STATS_EVENT_PMF);
}
@@ -1585,23 +2174,25 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
*/
/* send the MCP a request, block until there is a reply */
-u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
{
- int func = BP_FUNC(bp);
+ int mb_idx = BP_FW_MB_IDX(bp);
u32 seq = ++bp->fw_seq;
u32 rc = 0;
u32 cnt = 1;
u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
mutex_lock(&bp->fw_mb_mutex);
- SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
+ SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
+ SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
+
DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
do {
/* let the FW do it's magic ... */
msleep(delay);
- rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
+ rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
/* Give the FW up to 5 second (500*10ms) */
} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
@@ -1623,6 +2214,327 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
return rc;
}
+/* must be called under rtnl_lock */
+void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
+{
+ u32 mask = (1 << cl_id);
+
+ /* initial seeting is BNX2X_ACCEPT_NONE */
+ u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
+ u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
+ u8 unmatched_unicast = 0;
+
+ if (filters & BNX2X_PROMISCUOUS_MODE) {
+ /* promiscious - accept all, drop none */
+ drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
+ accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
+ }
+ if (filters & BNX2X_ACCEPT_UNICAST) {
+ /* accept matched ucast */
+ drop_all_ucast = 0;
+ }
+ if (filters & BNX2X_ACCEPT_MULTICAST) {
+ /* accept matched mcast */
+ drop_all_mcast = 0;
+ }
+ if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
+ /* accept all mcast */
+ drop_all_ucast = 0;
+ accp_all_ucast = 1;
+ }
+ if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
+ /* accept all mcast */
+ drop_all_mcast = 0;
+ accp_all_mcast = 1;
+ }
+ if (filters & BNX2X_ACCEPT_BROADCAST) {
+ /* accept (all) bcast */
+ drop_all_bcast = 0;
+ accp_all_bcast = 1;
+ }
+
+ bp->mac_filters.ucast_drop_all = drop_all_ucast ?
+ bp->mac_filters.ucast_drop_all | mask :
+ bp->mac_filters.ucast_drop_all & ~mask;
+
+ bp->mac_filters.mcast_drop_all = drop_all_mcast ?
+ bp->mac_filters.mcast_drop_all | mask :
+ bp->mac_filters.mcast_drop_all & ~mask;
+
+ bp->mac_filters.bcast_drop_all = drop_all_bcast ?
+ bp->mac_filters.bcast_drop_all | mask :
+ bp->mac_filters.bcast_drop_all & ~mask;
+
+ bp->mac_filters.ucast_accept_all = accp_all_ucast ?
+ bp->mac_filters.ucast_accept_all | mask :
+ bp->mac_filters.ucast_accept_all & ~mask;
+
+ bp->mac_filters.mcast_accept_all = accp_all_mcast ?
+ bp->mac_filters.mcast_accept_all | mask :
+ bp->mac_filters.mcast_accept_all & ~mask;
+
+ bp->mac_filters.bcast_accept_all = accp_all_bcast ?
+ bp->mac_filters.bcast_accept_all | mask :
+ bp->mac_filters.bcast_accept_all & ~mask;
+
+ bp->mac_filters.unmatched_unicast = unmatched_unicast ?
+ bp->mac_filters.unmatched_unicast | mask :
+ bp->mac_filters.unmatched_unicast & ~mask;
+}
+
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+{
+ if (FUNC_CONFIG(p->func_flgs)) {
+ struct tstorm_eth_function_common_config tcfg = {0};
+
+ /* tpa */
+ if (p->func_flgs & FUNC_FLG_TPA)
+ tcfg.config_flags |=
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
+
+ /* set rss flags */
+ if (p->func_flgs & FUNC_FLG_RSS) {
+ u16 rss_flgs = (p->rss->mode <<
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
+
+ if (p->rss->cap & RSS_IPV4_CAP)
+ rss_flgs |= RSS_IPV4_CAP_MASK;
+ if (p->rss->cap & RSS_IPV4_TCP_CAP)
+ rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
+ if (p->rss->cap & RSS_IPV6_CAP)
+ rss_flgs |= RSS_IPV6_CAP_MASK;
+ if (p->rss->cap & RSS_IPV6_TCP_CAP)
+ rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
+
+ tcfg.config_flags |= rss_flgs;
+ tcfg.rss_result_mask = p->rss->result_mask;
+
+ }
+
+ storm_memset_func_cfg(bp, &tcfg, p->func_id);
+ }
+
+ /* Enable the function in the FW */
+ storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
+ storm_memset_func_en(bp, p->func_id, 1);
+
+ /* statistics */
+ if (p->func_flgs & FUNC_FLG_STATS) {
+ struct stats_indication_flags stats_flags = {0};
+ stats_flags.collect_eth = 1;
+
+ storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
+ storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
+
+ storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
+ storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
+
+ storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
+ storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
+
+ storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
+ storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
+ }
+
+ /* spq */
+ if (p->func_flgs & FUNC_FLG_SPQ) {
+ storm_memset_spq_addr(bp, p->spq_map, p->func_id);
+ REG_WR(bp, XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
+ }
+}
+
+static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp)
+{
+ u16 flags = 0;
+
+ /* calculate queue flags */
+ flags |= QUEUE_FLG_CACHE_ALIGN;
+ flags |= QUEUE_FLG_HC;
+ flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
+
+#ifdef BCM_VLAN
+ flags |= QUEUE_FLG_VLAN;
+ DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
+#endif
+
+ if (!fp->disable_tpa)
+ flags |= QUEUE_FLG_TPA;
+
+ flags |= QUEUE_FLG_STATS;
+
+ return flags;
+}
+
+static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
+ struct bnx2x_rxq_init_params *rxq_init)
+{
+ u16 max_sge = 0;
+ u16 sge_sz = 0;
+ u16 tpa_agg_size = 0;
+
+ /* calculate queue flags */
+ u16 flags = bnx2x_get_cl_flags(bp, fp);
+
+ if (!fp->disable_tpa) {
+ pause->sge_th_hi = 250;
+ pause->sge_th_lo = 150;
+ tpa_agg_size = min_t(u32,
+ (min_t(u32, 8, MAX_SKB_FRAGS) *
+ SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
+ max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
+ SGE_PAGE_SHIFT;
+ max_sge = ((max_sge + PAGES_PER_SGE - 1) &
+ (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+ sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
+ 0xffff);
+ }
+
+ /* pause - not for e1 */
+ if (!CHIP_IS_E1(bp)) {
+ pause->bd_th_hi = 350;
+ pause->bd_th_lo = 250;
+ pause->rcq_th_hi = 350;
+ pause->rcq_th_lo = 250;
+ pause->sge_th_hi = 0;
+ pause->sge_th_lo = 0;
+ pause->pri_map = 1;
+ }
+
+ /* rxq setup */
+ rxq_init->flags = flags;
+ rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
+ rxq_init->dscr_map = fp->rx_desc_mapping;
+ rxq_init->sge_map = fp->rx_sge_mapping;
+ rxq_init->rcq_map = fp->rx_comp_mapping;
+ rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+ rxq_init->mtu = bp->dev->mtu;
+ rxq_init->buf_sz = bp->rx_buf_size;
+ rxq_init->cl_qzone_id = fp->cl_qzone_id;
+ rxq_init->cl_id = fp->cl_id;
+ rxq_init->spcl_id = fp->cl_id;
+ rxq_init->stat_id = fp->cl_id;
+ rxq_init->tpa_agg_sz = tpa_agg_size;
+ rxq_init->sge_buf_sz = sge_sz;
+ rxq_init->max_sges_pkt = max_sge;
+ rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+ rxq_init->fw_sb_id = fp->fw_sb_id;
+
+ rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
+
+ rxq_init->cid = HW_CID(bp, fp->cid);
+
+ rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
+}
+
+static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
+{
+ u16 flags = bnx2x_get_cl_flags(bp, fp);
+
+ txq_init->flags = flags;
+ txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
+ txq_init->dscr_map = fp->tx_desc_mapping;
+ txq_init->stat_id = fp->cl_id;
+ txq_init->cid = HW_CID(bp, fp->cid);
+ txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
+ txq_init->fw_sb_id = fp->fw_sb_id;
+ txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
+}
+
+void bnx2x_pf_init(struct bnx2x *bp)
+{
+ struct bnx2x_func_init_params func_init = {0};
+ struct bnx2x_rss_params rss = {0};
+ struct event_ring_data eq_data = { {0} };
+ u16 flags;
+
+ /* pf specific setups */
+ if (!CHIP_IS_E1(bp))
+ storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
+
+ if (CHIP_IS_E2(bp)) {
+ /* reset IGU PF statistics: MSIX + ATTN */
+ /* PF */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+ BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+ (CHIP_MODE_IS_4_PORT(bp) ?
+ BP_FUNC(bp) : BP_VN(bp))*4, 0);
+ /* ATTN */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+ BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+ BNX2X_IGU_STAS_MSG_PF_CNT*4 +
+ (CHIP_MODE_IS_4_PORT(bp) ?
+ BP_FUNC(bp) : BP_VN(bp))*4, 0);
+ }
+
+ /* function setup flags */
+ flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
+
+ if (CHIP_IS_E1x(bp))
+ flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
+ else
+ flags |= FUNC_FLG_TPA;
+
+ /**
+ * Although RSS is meaningless when there is a single HW queue we
+ * still need it enabled in order to have HW Rx hash generated.
+ *
+ * if (is_eth_multi(bp))
+ * flags |= FUNC_FLG_RSS;
+ */
+ flags |= FUNC_FLG_RSS;
+
+ /* function setup */
+ if (flags & FUNC_FLG_RSS) {
+ rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
+ RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
+ rss.mode = bp->multi_mode;
+ rss.result_mask = MULTI_MASK;
+ func_init.rss = &rss;
+ }
+
+ func_init.func_flgs = flags;
+ func_init.pf_id = BP_FUNC(bp);
+ func_init.func_id = BP_FUNC(bp);
+ func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
+ func_init.spq_map = bp->spq_mapping;
+ func_init.spq_prod = bp->spq_prod_idx;
+
+ bnx2x_func_init(bp, &func_init);
+
+ memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
+
+ /*
+ Congestion management values depend on the link rate
+ There is no active link so initial link rate is set to 10 Gbps.
+ When the link comes up The congestion management values are
+ re-calculated according to the actual link rate.
+ */
+ bp->link_vars.line_speed = SPEED_10000;
+ bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
+
+ /* Only the PMF sets the HW */
+ if (bp->port.pmf)
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+
+ /* no rx until link is up */
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+ bnx2x_set_storm_rx_mode(bp);
+
+ /* init Event Queue */
+ eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
+ eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
+ eq_data.producer = bp->eq_prod;
+ eq_data.index_id = HC_SP_INDEX_EQ_CONS;
+ eq_data.sb_id = DEF_SB_ID;
+ storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
+}
+
+
static void bnx2x_e1h_disable(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@@ -1649,40 +2561,6 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
*/
}
-static void bnx2x_update_min_max(struct bnx2x *bp)
-{
- int port = BP_PORT(bp);
- int vn, i;
-
- /* Init rate shaping and fairness contexts */
- bnx2x_init_port_minmax(bp);
-
- bnx2x_calc_vn_weight_sum(bp);
-
- for (vn = VN_0; vn < E1HVN_MAX; vn++)
- bnx2x_init_vn_minmax(bp, 2*vn + port);
-
- if (bp->port.pmf) {
- int func;
-
- /* Set the attention towards other drivers on the same port */
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
- if (vn == BP_E1HVN(bp))
- continue;
-
- func = ((vn << 1) | port);
- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
- (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
- }
-
- /* Store it to internal memory */
- for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
- ((u32 *)(&bp->cmng))[i]);
- }
-}
-
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
{
DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -1694,7 +2572,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
* where the bp->flags can change so it is done without any
* locks
*/
- if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
+ if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
bp->flags |= MF_FUNC_DIS;
@@ -1709,15 +2587,17 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
}
if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
- bnx2x_update_min_max(bp);
+ bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
+ bnx2x_link_sync_notify(bp);
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
}
/* Report results to MCP */
if (dcc_event)
- bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
else
- bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
}
/* must be called under the spq lock */
@@ -1744,16 +2624,17 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
/* Make sure that BD data is updated before writing the producer */
wmb();
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
- bp->spq_prod_idx);
+ REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+ bp->spq_prod_idx);
mmiowb();
}
/* the slow path queue is odd since completions arrive on the fastpath ring */
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
- u32 data_hi, u32 data_lo, int common)
+ u32 data_hi, u32 data_lo, int common)
{
struct eth_spe *spe;
+ u16 type;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -1762,7 +2643,7 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
spin_lock_bh(&bp->spq_lock);
- if (!bp->spq_left) {
+ if (!atomic_read(&bp->spq_left)) {
BNX2X_ERR("BUG! SPQ ring full!\n");
spin_unlock_bh(&bp->spq_lock);
bnx2x_panic();
@@ -1775,22 +2656,42 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
spe->hdr.conn_and_cmd_data =
cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
HW_CID(bp, cid));
- spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
+
if (common)
- spe->hdr.type |=
- cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
+ /* Common ramrods:
+ * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
+ * TRAFFIC_STOP, TRAFFIC_START
+ */
+ type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
+ & SPE_HDR_CONN_TYPE;
+ else
+ /* ETH ramrods: SETUP, HALT */
+ type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
+ & SPE_HDR_CONN_TYPE;
+
+ type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
- spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
- spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
+ spe->hdr.type = cpu_to_le16(type);
- bp->spq_left--;
+ spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
+ spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
+
+ /* stats ramrod has it's own slot on the spq */
+ if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
+ /* It's ok if the actual decrement is issued towards the memory
+ * somewhere between the spin_lock and spin_unlock. Thus no
+ * more explict memory barrier is needed.
+ */
+ atomic_dec(&bp->spq_left);
DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
- "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
+ "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
+ "type(0x%x) left %x\n",
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
(u32)(U64_LO(bp->spq_mapping) +
(void *)bp->spq_prod_bd - (void *)bp->spq), command,
- HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
+ HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
bnx2x_sp_prod_update(bp);
spin_unlock_bh(&bp->spq_lock);
@@ -1827,32 +2728,27 @@ static void bnx2x_release_alr(struct bnx2x *bp)
REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
}
+#define BNX2X_DEF_SB_ATT_IDX 0x0001
+#define BNX2X_DEF_SB_IDX 0x0002
+
static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
{
- struct host_def_status_block *def_sb = bp->def_status_blk;
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
u16 rc = 0;
barrier(); /* status block is written to by the chip */
if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
- rc |= 1;
+ rc |= BNX2X_DEF_SB_ATT_IDX;
}
- if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
- bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
- rc |= 2;
- }
- if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
- bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
- rc |= 4;
- }
- if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
- bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
- rc |= 8;
- }
- if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
- bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
- rc |= 16;
+
+ if (bp->def_idx != def_sb->sp_sb.running_index) {
+ bp->def_idx = def_sb->sp_sb.running_index;
+ rc |= BNX2X_DEF_SB_IDX;
}
+
+ /* Do not reorder: indecies reading should complete before handling */
+ barrier();
return rc;
}
@@ -1863,14 +2759,13 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
{
int port = BP_PORT(bp);
- u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
- COMMAND_REG_ATTN_BITS_SET);
u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
MISC_REG_AEU_MASK_ATTN_FUNC_0;
u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
NIG_REG_MASK_INTERRUPT_PORT0;
u32 aeu_mask;
u32 nig_mask = 0;
+ u32 reg_addr;
if (bp->attn_state & asserted)
BNX2X_ERR("IGU ERROR\n");
@@ -1945,9 +2840,15 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
} /* if hardwired */
- DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
- asserted, hc_addr);
- REG_WR(bp, hc_addr, asserted);
+ if (bp->common.int_block == INT_BLOCK_HC)
+ reg_addr = (HC_REG_COMMAND_REG + port*32 +
+ COMMAND_REG_ATTN_BITS_SET);
+ else
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
+
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
+ (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
+ REG_WR(bp, reg_addr, asserted);
/* now set back the mask */
if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -1959,12 +2860,16 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
static inline void bnx2x_fan_failure(struct bnx2x *bp)
{
int port = BP_PORT(bp);
-
+ u32 ext_phy_config;
/* mark the failure */
- bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
- bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+
+ ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+ ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
- bp->link_params.ext_phy_config);
+ ext_phy_config);
/* log the failure */
netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
@@ -1976,7 +2881,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
{
int port = BP_PORT(bp);
int reg_offset;
- u32 val, swap_val, swap_override;
+ u32 val;
reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
@@ -1990,30 +2895,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
BNX2X_ERR("SPIO5 hw attention\n");
/* Fan failure attention */
- switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- /* Low power mode is controlled by GPIO 2 */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- /* The PHY reset is controlled by GPIO 1 */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- /* The PHY reset is controlled by GPIO 1 */
- /* fake the port number to cancel the swap done in
- set_gpio() */
- swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
- swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
- port = (swap_val && swap_override) ^ 1;
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- break;
-
- default:
- break;
- }
+ bnx2x_hw_reset_phy(&bp->link_params);
bnx2x_fan_failure(bp);
}
@@ -2087,6 +2969,10 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
/* RQ_USDMDP_FIFO_OVERFLOW */
if (val & 0x18000)
BNX2X_ERR("FATAL error from PXP\n");
+ if (CHIP_IS_E2(bp)) {
+ val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
+ BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
+ }
}
if (attn & HW_INTERRUT_ASSERT_SET_2) {
@@ -2117,9 +3003,10 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
int func = BP_FUNC(bp);
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
- bp->mf_config = SHMEM_RD(bp,
- mf_cfg.func_mf_config[func].config);
- val = SHMEM_RD(bp, func_mb[func].drv_status);
+ bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
+ func_mf_config[BP_ABS_FUNC(bp)].config);
+ val = SHMEM_RD(bp,
+ func_mb[BP_FW_MB_IDX(bp)].drv_status);
if (val & DRV_STATUS_DCC_EVENT_MASK)
bnx2x_dcc_event(bp,
(val & DRV_STATUS_DCC_EVENT_MASK));
@@ -2149,13 +3036,13 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
if (attn & BNX2X_GRC_TIMEOUT) {
- val = CHIP_IS_E1H(bp) ?
- REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
+ val = CHIP_IS_E1(bp) ? 0 :
+ REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
BNX2X_ERR("GRC time-out 0x%08x\n", val);
}
if (attn & BNX2X_GRC_RSV) {
- val = CHIP_IS_E1H(bp) ?
- REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
+ val = CHIP_IS_E1(bp) ? 0 :
+ REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
BNX2X_ERR("GRC reserved 0x%08x\n", val);
}
REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
@@ -2168,6 +3055,7 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
+
/*
* should be run under rtnl lock
*/
@@ -2460,6 +3348,74 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp)
attn.sig[3]);
}
+
+static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+ if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
+
+ val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
+ BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "ADDRESS_ERROR\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "INCORRECT_RCV_BEHAVIOR\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "WAS_ERROR_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "VF_LENGTH_VIOLATION_ATTN\n");
+ if (val &
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "VF_GRC_SPACE_VIOLATION_ATTN\n");
+ if (val &
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "VF_MSIX_BAR_VIOLATION_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "TCPL_ERROR_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "TCPL_IN_TWO_RCBS_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "CSSNOOP_FIFO_OVERFLOW\n");
+ }
+ if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
+ val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
+ BNX2X_ERR("ATC hw attention 0x%x\n", val);
+ if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG"
+ "_ATC_TCPL_TO_NOT_PEND\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+ "ATC_GPA_MULTIPLE_HITS\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+ "ATC_RCPL_TO_EMPTY_CNT\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+ "ATC_IREQ_LESS_THAN_STU\n");
+ }
+
+ if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
+ BNX2X_ERR("FATAL parity attention set4 0x%x\n",
+ (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
+ }
+
+}
+
static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
{
struct attn_route attn, *group_mask;
@@ -2490,17 +3446,28 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
- DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
- attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
+ if (CHIP_IS_E2(bp))
+ attn.sig[4] =
+ REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
+ else
+ attn.sig[4] = 0;
+
+ DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
+ attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
if (deasserted & (1 << index)) {
group_mask = &bp->attn_group[index];
- DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
- index, group_mask->sig[0], group_mask->sig[1],
- group_mask->sig[2], group_mask->sig[3]);
+ DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
+ "%08x %08x %08x\n",
+ index,
+ group_mask->sig[0], group_mask->sig[1],
+ group_mask->sig[2], group_mask->sig[3],
+ group_mask->sig[4]);
+ bnx2x_attn_int_deasserted4(bp,
+ attn.sig[4] & group_mask->sig[4]);
bnx2x_attn_int_deasserted3(bp,
attn.sig[3] & group_mask->sig[3]);
bnx2x_attn_int_deasserted1(bp,
@@ -2514,11 +3481,15 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
bnx2x_release_alr(bp);
- reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
+ if (bp->common.int_block == INT_BLOCK_HC)
+ reg_addr = (HC_REG_COMMAND_REG + port*32 +
+ COMMAND_REG_ATTN_BITS_CLR);
+ else
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
val = ~deasserted;
- DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
- val, reg_addr);
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
+ (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
REG_WR(bp, reg_addr, val);
if (~bp->attn_state & deasserted)
@@ -2571,6 +3542,141 @@ static void bnx2x_attn_int(struct bnx2x *bp)
bnx2x_attn_int_deasserted(bp, deasserted);
}
+static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
+{
+ /* No memory barriers */
+ storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
+ mmiowb(); /* keep prod updates ordered */
+}
+
+#ifdef BCM_CNIC
+static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
+ union event_ring_elem *elem)
+{
+ if (!bp->cnic_eth_dev.starting_cid ||
+ cid < bp->cnic_eth_dev.starting_cid)
+ return 1;
+
+ DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
+
+ if (unlikely(elem->message.data.cfc_del_event.error)) {
+ BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
+ cid);
+ bnx2x_panic_dump(bp);
+ }
+ bnx2x_cnic_cfc_comp(bp, cid);
+ return 0;
+}
+#endif
+
+static void bnx2x_eq_int(struct bnx2x *bp)
+{
+ u16 hw_cons, sw_cons, sw_prod;
+ union event_ring_elem *elem;
+ u32 cid;
+ u8 opcode;
+ int spqe_cnt = 0;
+
+ hw_cons = le16_to_cpu(*bp->eq_cons_sb);
+
+ /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
+ * when we get the the next-page we nned to adjust so the loop
+ * condition below will be met. The next element is the size of a
+ * regular element and hence incrementing by 1
+ */
+ if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
+ hw_cons++;
+
+ /* This function may never run in parralel with itself for a
+ * specific bp, thus there is no need in "paired" read memory
+ * barrier here.
+ */
+ sw_cons = bp->eq_cons;
+ sw_prod = bp->eq_prod;
+
+ DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
+ hw_cons, sw_cons, atomic_read(&bp->spq_left));
+
+ for (; sw_cons != hw_cons;
+ sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
+
+
+ elem = &bp->eq_ring[EQ_DESC(sw_cons)];
+
+ cid = SW_CID(elem->message.data.cfc_del_event.cid);
+ opcode = elem->message.opcode;
+
+
+ /* handle eq element */
+ switch (opcode) {
+ case EVENT_RING_OPCODE_STAT_QUERY:
+ DP(NETIF_MSG_TIMER, "got statistics comp event\n");
+ /* nothing to do with stats comp */
+ continue;
+
+ case EVENT_RING_OPCODE_CFC_DEL:
+ /* handle according to cid range */
+ /*
+ * we may want to verify here that the bp state is
+ * HALTING
+ */
+ DP(NETIF_MSG_IFDOWN,
+ "got delete ramrod for MULTI[%d]\n", cid);
+#ifdef BCM_CNIC
+ if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
+ goto next_spqe;
+#endif
+ bnx2x_fp(bp, cid, state) =
+ BNX2X_FP_STATE_CLOSED;
+
+ goto next_spqe;
+ }
+
+ switch (opcode | bp->state) {
+ case (EVENT_RING_OPCODE_FUNCTION_START |
+ BNX2X_STATE_OPENING_WAIT4_PORT):
+ DP(NETIF_MSG_IFUP, "got setup ramrod\n");
+ bp->state = BNX2X_STATE_FUNC_STARTED;
+ break;
+
+ case (EVENT_RING_OPCODE_FUNCTION_STOP |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
+ break;
+
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
+ DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
+ bp->set_mac_pending = 0;
+ break;
+
+ case (EVENT_RING_OPCODE_SET_MAC |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
+ bp->set_mac_pending = 0;
+ break;
+ default:
+ /* unknown event log error and continue */
+ BNX2X_ERR("Unknown EQ event %d\n",
+ elem->message.opcode);
+ }
+next_spqe:
+ spqe_cnt++;
+ } /* for */
+
+ smp_mb__before_atomic_inc();
+ atomic_add(spqe_cnt, &bp->spq_left);
+
+ bp->eq_cons = sw_cons;
+ bp->eq_prod = sw_prod;
+ /* Make sure that above mem writes were issued towards the memory */
+ smp_wmb();
+
+ /* update producer */
+ bnx2x_update_eq_prod(bp, bp->eq_prod);
+}
+
static void bnx2x_sp_task(struct work_struct *work)
{
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
@@ -2589,31 +3695,29 @@ static void bnx2x_sp_task(struct work_struct *work)
DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
/* HW attentions */
- if (status & 0x1) {
+ if (status & BNX2X_DEF_SB_ATT_IDX) {
bnx2x_attn_int(bp);
- status &= ~0x1;
+ status &= ~BNX2X_DEF_SB_ATT_IDX;
}
- /* CStorm events: STAT_QUERY */
- if (status & 0x2) {
- DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
- status &= ~0x2;
+ /* SP events: STAT_QUERY and others */
+ if (status & BNX2X_DEF_SB_IDX) {
+
+ /* Handle EQ completions */
+ bnx2x_eq_int(bp);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
+ le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+
+ status &= ~BNX2X_DEF_SB_IDX;
}
if (unlikely(status))
DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
status);
- bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
- IGU_INT_NOP, 1);
- bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
- IGU_INT_NOP, 1);
- bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
- IGU_INT_NOP, 1);
- bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
- IGU_INT_NOP, 1);
- bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
- IGU_INT_ENABLE, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
+ le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
}
irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -2627,7 +3731,8 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
return IRQ_HANDLED;
}
- bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
+ IGU_INT_DISABLE, 0);
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -2671,7 +3776,7 @@ static void bnx2x_timer(unsigned long data)
}
if (!BP_NOMCP(bp)) {
- int func = BP_FUNC(bp);
+ int mb_idx = BP_FW_MB_IDX(bp);
u32 drv_pulse;
u32 mcp_pulse;
@@ -2679,9 +3784,9 @@ static void bnx2x_timer(unsigned long data)
bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
/* TBD - add SYSTEM_TIME */
drv_pulse = bp->fw_drv_pulse_wr_seq;
- SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
+ SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
- mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
+ mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
MCP_PULSE_SEQ_MASK);
/* The delta between driver pulse and mcp response
* should be 1 (before mcp response) or 0 (after mcp response)
@@ -2709,324 +3814,310 @@ timer_restart:
* nic init service functions
*/
-static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
+static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
{
- int port = BP_PORT(bp);
+ u32 i;
+ if (!(len%4) && !(addr%4))
+ for (i = 0; i < len; i += 4)
+ REG_WR(bp, addr + i, fill);
+ else
+ for (i = 0; i < len; i++)
+ REG_WR8(bp, addr + i, fill);
- /* "CSTORM" */
- bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
- CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
- CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
- bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
- CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
- CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
}
-void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
- dma_addr_t mapping, int sb_id)
+/* helper: writes FP SP data to FW - data_size in dwords */
+static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
+ int fw_sb_id,
+ u32 *sb_data_p,
+ u32 data_size)
{
- int port = BP_PORT(bp);
- int func = BP_FUNC(bp);
int index;
- u64 section;
+ for (index = 0; index < data_size; index++)
+ REG_WR(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
+ sizeof(u32)*index,
+ *(sb_data_p + index));
+}
+
+static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
+{
+ u32 *sb_data_p;
+ u32 data_size = 0;
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+
+ /* disable the function first */
+ if (CHIP_IS_E2(bp)) {
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+ sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
+ sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
+ sb_data_e2.common.p_func.vf_valid = false;
+ sb_data_p = (u32 *)&sb_data_e2;
+ data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+ } else {
+ memset(&sb_data_e1x, 0,
+ sizeof(struct hc_status_block_data_e1x));
+ sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
+ sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
+ sb_data_e1x.common.p_func.vf_valid = false;
+ sb_data_p = (u32 *)&sb_data_e1x;
+ data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+ }
+ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
- /* USTORM */
- section = ((u64)mapping) + offsetof(struct host_status_block,
- u_status_block);
- sb->u_status_block.status_block_id = sb_id;
-
- REG_WR(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
- REG_WR(bp, BAR_CSTRORM_INTMEM +
- ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
- U64_HI(section));
- REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
- CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
-
- for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
- REG_WR16(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
+ CSTORM_STATUS_BLOCK_SIZE);
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
+ CSTORM_SYNC_BLOCK_SIZE);
+}
- /* CSTORM */
- section = ((u64)mapping) + offsetof(struct host_status_block,
- c_status_block);
- sb->c_status_block.status_block_id = sb_id;
+/* helper: writes SP SB data to FW */
+static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
+ struct hc_sp_status_block_data *sp_sb_data)
+{
+ int func = BP_FUNC(bp);
+ int i;
+ for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
+ REG_WR(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+ i*sizeof(u32),
+ *((u32 *)sp_sb_data + i));
+}
+
+static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
+{
+ int func = BP_FUNC(bp);
+ struct hc_sp_status_block_data sp_sb_data;
+ memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+ sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
+ sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
+ sp_sb_data.p_func.vf_valid = false;
- REG_WR(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
- REG_WR(bp, BAR_CSTRORM_INTMEM +
- ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
- U64_HI(section));
- REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
- CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
+ bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
- for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
- REG_WR16(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
+ CSTORM_SP_STATUS_BLOCK_SIZE);
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
+ CSTORM_SP_SYNC_BLOCK_SIZE);
- bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
}
-static void bnx2x_zero_def_sb(struct bnx2x *bp)
+
+static inline
+void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
+ int igu_sb_id, int igu_seg_id)
{
- int func = BP_FUNC(bp);
+ hc_sm->igu_sb_id = igu_sb_id;
+ hc_sm->igu_seg_id = igu_seg_id;
+ hc_sm->timer_value = 0xFF;
+ hc_sm->time_to_expire = 0xFFFFFFFF;
+}
+
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+ u8 vf_valid, int fw_sb_id, int igu_sb_id)
+{
+ int igu_seg_id;
+
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+ struct hc_status_block_sm *hc_sm_p;
+ struct hc_index_data *hc_index_p;
+ int data_size;
+ u32 *sb_data_p;
+
+ if (CHIP_INT_MODE_IS_BC(bp))
+ igu_seg_id = HC_SEG_ACCESS_NORM;
+ else
+ igu_seg_id = IGU_SEG_ACCESS_NORM;
+
+ bnx2x_zero_fp_sb(bp, fw_sb_id);
+
+ if (CHIP_IS_E2(bp)) {
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+ sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
+ sb_data_e2.common.p_func.vf_id = vfid;
+ sb_data_e2.common.p_func.vf_valid = vf_valid;
+ sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
+ sb_data_e2.common.same_igu_sb_1b = true;
+ sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
+ sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
+ hc_sm_p = sb_data_e2.common.state_machine;
+ hc_index_p = sb_data_e2.index_data;
+ sb_data_p = (u32 *)&sb_data_e2;
+ data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+ } else {
+ memset(&sb_data_e1x, 0,
+ sizeof(struct hc_status_block_data_e1x));
+ sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
+ sb_data_e1x.common.p_func.vf_id = 0xff;
+ sb_data_e1x.common.p_func.vf_valid = false;
+ sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
+ sb_data_e1x.common.same_igu_sb_1b = true;
+ sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
+ sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
+ hc_sm_p = sb_data_e1x.common.state_machine;
+ hc_index_p = sb_data_e1x.index_data;
+ sb_data_p = (u32 *)&sb_data_e1x;
+ data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+ }
+
+ bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
+ igu_sb_id, igu_seg_id);
+ bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
+ igu_sb_id, igu_seg_id);
+
+ DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
- bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
- TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
- sizeof(struct tstorm_def_status_block)/4);
- bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
- CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
- sizeof(struct cstorm_def_status_block_u)/4);
- bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
- CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
- sizeof(struct cstorm_def_status_block_c)/4);
- bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
- XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
- sizeof(struct xstorm_def_status_block)/4);
+ /* write indecies to HW */
+ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
}
-static void bnx2x_init_def_sb(struct bnx2x *bp,
- struct host_def_status_block *def_sb,
- dma_addr_t mapping, int sb_id)
+static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
+ u8 sb_index, u8 disable, u16 usec)
{
int port = BP_PORT(bp);
+ u8 ticks = usec / BNX2X_BTR;
+
+ storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
+
+ disable = disable ? 1 : (usec ? 0 : 1);
+ storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
+}
+
+static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
+ u16 tx_usec, u16 rx_usec)
+{
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
+ false, rx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
+ false, tx_usec);
+}
+
+static void bnx2x_init_def_sb(struct bnx2x *bp)
+{
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ dma_addr_t mapping = bp->def_status_blk_mapping;
+ int igu_sp_sb_index;
+ int igu_seg_id;
+ int port = BP_PORT(bp);
int func = BP_FUNC(bp);
- int index, val, reg_offset;
+ int reg_offset;
u64 section;
+ int index;
+ struct hc_sp_status_block_data sp_sb_data;
+ memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ igu_sp_sb_index = DEF_SB_IGU_ID;
+ igu_seg_id = HC_SEG_ACCESS_DEF;
+ } else {
+ igu_sp_sb_index = bp->igu_dsb_id;
+ igu_seg_id = IGU_SEG_ACCESS_DEF;
+ }
/* ATTN */
- section = ((u64)mapping) + offsetof(struct host_def_status_block,
+ section = ((u64)mapping) + offsetof(struct host_sp_status_block,
atten_status_block);
- def_sb->atten_status_block.status_block_id = sb_id;
+ def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
bp->attn_state = 0;
reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
-
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
- bp->attn_group[index].sig[0] = REG_RD(bp,
- reg_offset + 0x10*index);
- bp->attn_group[index].sig[1] = REG_RD(bp,
- reg_offset + 0x4 + 0x10*index);
- bp->attn_group[index].sig[2] = REG_RD(bp,
- reg_offset + 0x8 + 0x10*index);
- bp->attn_group[index].sig[3] = REG_RD(bp,
- reg_offset + 0xc + 0x10*index);
+ int sindex;
+ /* take care of sig[0]..sig[4] */
+ for (sindex = 0; sindex < 4; sindex++)
+ bp->attn_group[index].sig[sindex] =
+ REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
+
+ if (CHIP_IS_E2(bp))
+ /*
+ * enable5 is separate from the rest of the registers,
+ * and therefore the address skip is 4
+ * and not 16 between the different groups
+ */
+ bp->attn_group[index].sig[4] = REG_RD(bp,
+ reg_offset + 0x10 + 0x4*index);
+ else
+ bp->attn_group[index].sig[4] = 0;
}
- reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
- HC_REG_ATTN_MSG0_ADDR_L);
-
- REG_WR(bp, reg_offset, U64_LO(section));
- REG_WR(bp, reg_offset + 4, U64_HI(section));
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
+ HC_REG_ATTN_MSG0_ADDR_L);
- reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
-
- val = REG_RD(bp, reg_offset);
- val |= sb_id;
- REG_WR(bp, reg_offset, val);
+ REG_WR(bp, reg_offset, U64_LO(section));
+ REG_WR(bp, reg_offset + 4, U64_HI(section));
+ } else if (CHIP_IS_E2(bp)) {
+ REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
+ REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
+ }
- /* USTORM */
- section = ((u64)mapping) + offsetof(struct host_def_status_block,
- u_def_status_block);
- def_sb->u_def_status_block.status_block_id = sb_id;
-
- REG_WR(bp, BAR_CSTRORM_INTMEM +
- CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
- REG_WR(bp, BAR_CSTRORM_INTMEM +
- ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
- U64_HI(section));
- REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
- CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
-
- for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
- REG_WR16(bp, BAR_CSTRORM_INTMEM +
- CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
+ section = ((u64)mapping) + offsetof(struct host_sp_status_block,
+ sp_sb);
- /* CSTORM */
- section = ((u64)mapping) + offsetof(struct host_def_status_block,
- c_def_status_block);
- def_sb->c_def_status_block.status_block_id = sb_id;
-
- REG_WR(bp, BAR_CSTRORM_INTMEM +
- CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
- REG_WR(bp, BAR_CSTRORM_INTMEM +
- ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
- U64_HI(section));
- REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
- CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
-
- for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
- REG_WR16(bp, BAR_CSTRORM_INTMEM +
- CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
+ bnx2x_zero_sp_sb(bp);
- /* TSTORM */
- section = ((u64)mapping) + offsetof(struct host_def_status_block,
- t_def_status_block);
- def_sb->t_def_status_block.status_block_id = sb_id;
-
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
- U64_HI(section));
- REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
- TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
-
- for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
- REG_WR16(bp, BAR_TSTRORM_INTMEM +
- TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
+ sp_sb_data.host_sb_addr.lo = U64_LO(section);
+ sp_sb_data.host_sb_addr.hi = U64_HI(section);
+ sp_sb_data.igu_sb_id = igu_sp_sb_index;
+ sp_sb_data.igu_seg_id = igu_seg_id;
+ sp_sb_data.p_func.pf_id = func;
+ sp_sb_data.p_func.vnic_id = BP_VN(bp);
+ sp_sb_data.p_func.vf_id = 0xff;
- /* XSTORM */
- section = ((u64)mapping) + offsetof(struct host_def_status_block,
- x_def_status_block);
- def_sb->x_def_status_block.status_block_id = sb_id;
-
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
- U64_HI(section));
- REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
- XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
-
- for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
- REG_WR16(bp, BAR_XSTRORM_INTMEM +
- XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
+ bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
bp->stats_pending = 0;
bp->set_mac_pending = 0;
- bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
}
void bnx2x_update_coalesce(struct bnx2x *bp)
{
- int port = BP_PORT(bp);
int i;
- for_each_queue(bp, i) {
- int sb_id = bp->fp[i].sb_id;
-
- /* HC_INDEX_U_ETH_RX_CQ_CONS */
- REG_WR8(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
- U_SB_ETH_RX_CQ_INDEX),
- bp->rx_ticks/(4 * BNX2X_BTR));
- REG_WR16(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
- U_SB_ETH_RX_CQ_INDEX),
- (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
-
- /* HC_INDEX_C_ETH_TX_CQ_CONS */
- REG_WR8(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
- C_SB_ETH_TX_CQ_INDEX),
- bp->tx_ticks/(4 * BNX2X_BTR));
- REG_WR16(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
- C_SB_ETH_TX_CQ_INDEX),
- (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
- }
+ for_each_queue(bp, i)
+ bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
+ bp->rx_ticks, bp->tx_ticks);
}
static void bnx2x_init_sp_ring(struct bnx2x *bp)
{
- int func = BP_FUNC(bp);
-
spin_lock_init(&bp->spq_lock);
+ atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
- bp->spq_left = MAX_SPQ_PENDING;
bp->spq_prod_idx = 0;
bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
bp->spq_prod_bd = bp->spq;
bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
-
- REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
- U64_LO(bp->spq_mapping));
- REG_WR(bp,
- XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
- U64_HI(bp->spq_mapping));
-
- REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
- bp->spq_prod_idx);
}
-static void bnx2x_init_context(struct bnx2x *bp)
+static void bnx2x_init_eq_ring(struct bnx2x *bp)
{
int i;
+ for (i = 1; i <= NUM_EQ_PAGES; i++) {
+ union event_ring_elem *elem =
+ &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
- /* Rx */
- for_each_queue(bp, i) {
- struct eth_context *context = bnx2x_sp(bp, context[i].eth);
- struct bnx2x_fastpath *fp = &bp->fp[i];
- u8 cl_id = fp->cl_id;
-
- context->ustorm_st_context.common.sb_index_numbers =
- BNX2X_RX_SB_INDEX_NUM;
- context->ustorm_st_context.common.clientId = cl_id;
- context->ustorm_st_context.common.status_block_id = fp->sb_id;
- context->ustorm_st_context.common.flags =
- (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
- USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
- context->ustorm_st_context.common.statistics_counter_id =
- cl_id;
- context->ustorm_st_context.common.mc_alignment_log_size =
- BNX2X_RX_ALIGN_SHIFT;
- context->ustorm_st_context.common.bd_buff_size =
- bp->rx_buf_size;
- context->ustorm_st_context.common.bd_page_base_hi =
- U64_HI(fp->rx_desc_mapping);
- context->ustorm_st_context.common.bd_page_base_lo =
- U64_LO(fp->rx_desc_mapping);
- if (!fp->disable_tpa) {
- context->ustorm_st_context.common.flags |=
- USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
- context->ustorm_st_context.common.sge_buff_size =
- (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
- 0xffff);
- context->ustorm_st_context.common.sge_page_base_hi =
- U64_HI(fp->rx_sge_mapping);
- context->ustorm_st_context.common.sge_page_base_lo =
- U64_LO(fp->rx_sge_mapping);
-
- context->ustorm_st_context.common.max_sges_for_packet =
- SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
- context->ustorm_st_context.common.max_sges_for_packet =
- ((context->ustorm_st_context.common.
- max_sges_for_packet + PAGES_PER_SGE - 1) &
- (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
- }
-
- context->ustorm_ag_context.cdu_usage =
- CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
- CDU_REGION_NUMBER_UCM_AG,
- ETH_CONNECTION_TYPE);
-
- context->xstorm_ag_context.cdu_reserved =
- CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
- CDU_REGION_NUMBER_XCM_AG,
- ETH_CONNECTION_TYPE);
- }
-
- /* Tx */
- for_each_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- struct eth_context *context =
- bnx2x_sp(bp, context[i].eth);
-
- context->cstorm_st_context.sb_index_number =
- C_SB_ETH_TX_CQ_INDEX;
- context->cstorm_st_context.status_block_id = fp->sb_id;
-
- context->xstorm_st_context.tx_bd_page_base_hi =
- U64_HI(fp->tx_desc_mapping);
- context->xstorm_st_context.tx_bd_page_base_lo =
- U64_LO(fp->tx_desc_mapping);
- context->xstorm_st_context.statistics_data = (fp->cl_id |
- XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
+ elem->next_page.addr.hi =
+ cpu_to_le32(U64_HI(bp->eq_mapping +
+ BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
+ elem->next_page.addr.lo =
+ cpu_to_le32(U64_LO(bp->eq_mapping +
+ BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
}
+ bp->eq_cons = 0;
+ bp->eq_prod = NUM_EQ_DESC;
+ bp->eq_cons_sb = BNX2X_EQ_INDEX;
}
static void bnx2x_init_ind_table(struct bnx2x *bp)
@@ -3045,47 +4136,11 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
bp->fp->cl_id + (i % bp->num_queues));
}
-void bnx2x_set_client_config(struct bnx2x *bp)
-{
- struct tstorm_eth_client_config tstorm_client = {0};
- int port = BP_PORT(bp);
- int i;
-
- tstorm_client.mtu = bp->dev->mtu;
- tstorm_client.config_flags =
- (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
- TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
-#ifdef BCM_VLAN
- if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
- tstorm_client.config_flags |=
- TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
- DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
- }
-#endif
-
- for_each_queue(bp, i) {
- tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
-
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
- ((u32 *)&tstorm_client)[0]);
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
- ((u32 *)&tstorm_client)[1]);
- }
-
- DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
- ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
-}
-
void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
{
- struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
int mode = bp->rx_mode;
- int mask = bp->rx_mode_cl_mask;
- int func = BP_FUNC(bp);
- int port = BP_PORT(bp);
- int i;
+ u16 cl_id;
+
/* All but management unicast packets should pass to the host as well */
u32 llh_mask =
NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
@@ -3093,28 +4148,32 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
- DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
-
switch (mode) {
case BNX2X_RX_MODE_NONE: /* no Rx */
- tstorm_mac_filter.ucast_drop_all = mask;
- tstorm_mac_filter.mcast_drop_all = mask;
- tstorm_mac_filter.bcast_drop_all = mask;
+ cl_id = BP_L_ID(bp);
+ bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
break;
case BNX2X_RX_MODE_NORMAL:
- tstorm_mac_filter.bcast_accept_all = mask;
+ cl_id = BP_L_ID(bp);
+ bnx2x_rxq_set_mac_filters(bp, cl_id,
+ BNX2X_ACCEPT_UNICAST |
+ BNX2X_ACCEPT_BROADCAST |
+ BNX2X_ACCEPT_MULTICAST);
break;
case BNX2X_RX_MODE_ALLMULTI:
- tstorm_mac_filter.mcast_accept_all = mask;
- tstorm_mac_filter.bcast_accept_all = mask;
+ cl_id = BP_L_ID(bp);
+ bnx2x_rxq_set_mac_filters(bp, cl_id,
+ BNX2X_ACCEPT_UNICAST |
+ BNX2X_ACCEPT_BROADCAST |
+ BNX2X_ACCEPT_ALL_MULTICAST);
break;
case BNX2X_RX_MODE_PROMISC:
- tstorm_mac_filter.ucast_accept_all = mask;
- tstorm_mac_filter.mcast_accept_all = mask;
- tstorm_mac_filter.bcast_accept_all = mask;
+ cl_id = BP_L_ID(bp);
+ bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
+
/* pass management unicast packets as well */
llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
break;
@@ -3125,262 +4184,64 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
}
REG_WR(bp,
- (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
+ BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
+ NIG_REG_LLH0_BRB1_DRV_MASK,
llh_mask);
- for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
- ((u32 *)&tstorm_mac_filter)[i]);
-
-/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
- ((u32 *)&tstorm_mac_filter)[i]); */
- }
+ DP(NETIF_MSG_IFUP, "rx mode %d\n"
+ "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
+ "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
+ bp->mac_filters.ucast_drop_all,
+ bp->mac_filters.mcast_drop_all,
+ bp->mac_filters.bcast_drop_all,
+ bp->mac_filters.ucast_accept_all,
+ bp->mac_filters.mcast_accept_all,
+ bp->mac_filters.bcast_accept_all
+ );
- if (mode != BNX2X_RX_MODE_NONE)
- bnx2x_set_client_config(bp);
+ storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
}
static void bnx2x_init_internal_common(struct bnx2x *bp)
{
int i;
- /* Zero this manually as its initialization is
- currently missing in the initTool */
- for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_AGG_DATA_OFFSET + i * 4, 0);
-}
-
-static void bnx2x_init_internal_port(struct bnx2x *bp)
-{
- int port = BP_PORT(bp);
-
- REG_WR(bp,
- BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
- REG_WR(bp,
- BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
-}
-
-static void bnx2x_init_internal_func(struct bnx2x *bp)
-{
- struct tstorm_eth_function_common_config tstorm_config = {0};
- struct stats_indication_flags stats_flags = {0};
- int port = BP_PORT(bp);
- int func = BP_FUNC(bp);
- int i, j;
- u32 offset;
- u16 max_agg_size;
-
- tstorm_config.config_flags = RSS_FLAGS(bp);
-
- if (is_multi(bp))
- tstorm_config.rss_result_mask = MULTI_MASK;
-
- /* Enable TPA if needed */
- if (bp->flags & TPA_ENABLE_FLAG)
- tstorm_config.config_flags |=
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
-
- if (IS_E1HMF(bp))
- tstorm_config.config_flags |=
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
-
- tstorm_config.leading_client_id = BP_L_ID(bp);
-
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
- (*(u32 *)&tstorm_config));
-
- bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
- bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
- bnx2x_set_storm_rx_mode(bp);
+ if (!CHIP_IS_E1(bp)) {
- for_each_queue(bp, i) {
- u8 cl_id = bp->fp[i].cl_id;
-
- /* reset xstorm per client statistics */
- offset = BAR_XSTRORM_INTMEM +
- XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
- for (j = 0;
- j < sizeof(struct xstorm_per_client_stats) / 4; j++)
- REG_WR(bp, offset + j*4, 0);
-
- /* reset tstorm per client statistics */
- offset = BAR_TSTRORM_INTMEM +
- TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
- for (j = 0;
- j < sizeof(struct tstorm_per_client_stats) / 4; j++)
- REG_WR(bp, offset + j*4, 0);
-
- /* reset ustorm per client statistics */
- offset = BAR_USTRORM_INTMEM +
- USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
- for (j = 0;
- j < sizeof(struct ustorm_per_client_stats) / 4; j++)
- REG_WR(bp, offset + j*4, 0);
- }
-
- /* Init statistics related context */
- stats_flags.collect_eth = 1;
-
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
- ((u32 *)&stats_flags)[0]);
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
- ((u32 *)&stats_flags)[1]);
-
- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
- ((u32 *)&stats_flags)[0]);
- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
- ((u32 *)&stats_flags)[1]);
-
- REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
- ((u32 *)&stats_flags)[0]);
- REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
- ((u32 *)&stats_flags)[1]);
-
- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
- ((u32 *)&stats_flags)[0]);
- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
- ((u32 *)&stats_flags)[1]);
-
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
- U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
- U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
- U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
- U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
- U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
- U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-
- if (CHIP_IS_E1H(bp)) {
+ /* xstorm needs to know whether to add ovlan to packets or not,
+ * in switch-independent we'll write 0 to here... */
REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
- IS_E1HMF(bp));
+ bp->mf_mode);
REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
- IS_E1HMF(bp));
+ bp->mf_mode);
REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
- IS_E1HMF(bp));
+ bp->mf_mode);
REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
- IS_E1HMF(bp));
-
- REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
- bp->e1hov);
+ bp->mf_mode);
}
- /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
- max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
- SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
- for_each_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
-
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
- U64_LO(fp->rx_comp_mapping));
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
- U64_HI(fp->rx_comp_mapping));
-
- /* Next page */
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
- U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
+ /* Zero this manually as its initialization is
+ currently missing in the initTool */
+ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
- U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
-
- REG_WR16(bp, BAR_USTRORM_INTMEM +
- USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
- max_agg_size);
- }
-
- /* dropless flow control */
- if (CHIP_IS_E1H(bp)) {
- struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
-
- rx_pause.bd_thr_low = 250;
- rx_pause.cqe_thr_low = 250;
- rx_pause.cos = 1;
- rx_pause.sge_thr_low = 0;
- rx_pause.bd_thr_high = 350;
- rx_pause.cqe_thr_high = 350;
- rx_pause.sge_thr_high = 0;
-
- for_each_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
-
- if (!fp->disable_tpa) {
- rx_pause.sge_thr_low = 150;
- rx_pause.sge_thr_high = 250;
- }
-
-
- offset = BAR_USTRORM_INTMEM +
- USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
- fp->cl_id);
- for (j = 0;
- j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
- j++)
- REG_WR(bp, offset + j*4,
- ((u32 *)&rx_pause)[j]);
- }
- }
-
- memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
-
- /* Init rate shaping and fairness contexts */
- if (IS_E1HMF(bp)) {
- int vn;
-
- /* During init there is no active link
- Until link is up, set link rate to 10Gbps */
- bp->link_vars.line_speed = SPEED_10000;
- bnx2x_init_port_minmax(bp);
-
- if (!BP_NOMCP(bp))
- bp->mf_config =
- SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
- bnx2x_calc_vn_weight_sum(bp);
-
- for (vn = VN_0; vn < E1HVN_MAX; vn++)
- bnx2x_init_vn_minmax(bp, 2*vn + port);
-
- /* Enable rate shaping and fairness */
- bp->cmng.flags.cmng_enables |=
- CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
-
- } else {
- /* rate shaping and fairness are disabled */
- DP(NETIF_MSG_IFUP,
- "single function mode minmax will be disabled\n");
+ USTORM_AGG_DATA_OFFSET + i * 4, 0);
+ if (CHIP_IS_E2(bp)) {
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
+ CHIP_INT_MODE_IS_BC(bp) ?
+ HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
}
+}
-
- /* Store cmng structures to internal memory */
- if (bp->port.pmf)
- for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
- ((u32 *)(&bp->cmng))[i]);
+static void bnx2x_init_internal_port(struct bnx2x *bp)
+{
+ /* port */
}
static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
{
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_COMMON:
+ case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
bnx2x_init_internal_common(bp);
/* no break */
@@ -3389,7 +4250,8 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
/* no break */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
- bnx2x_init_internal_func(bp);
+ /* internal memory per function is
+ initialized inside bnx2x_pf_init */
break;
default:
@@ -3398,43 +4260,63 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
}
}
+static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+
+ fp->state = BNX2X_FP_STATE_CLOSED;
+
+ fp->index = fp->cid = fp_idx;
+ fp->cl_id = BP_L_ID(bp) + fp_idx;
+ fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
+ fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
+ /* qZone id equals to FW (per path) client id */
+ fp->cl_qzone_id = fp->cl_id +
+ BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
+ ETH_MAX_RX_CLIENTS_E1H);
+ /* init shortcut */
+ fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
+ USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
+ USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+ /* Setup SB indicies */
+ fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
+ fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
+
+ DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
+ "cl_id %d fw_sb %d igu_sb %d\n",
+ fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
+ bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
+ fp->fw_sb_id, fp->igu_sb_id);
+
+ bnx2x_update_fpsb_idx(fp);
+}
+
void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
{
int i;
- for_each_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
-
- fp->bp = bp;
- fp->state = BNX2X_FP_STATE_CLOSED;
- fp->index = i;
- fp->cl_id = BP_L_ID(bp) + i;
+ for_each_queue(bp, i)
+ bnx2x_init_fp_sb(bp, i);
#ifdef BCM_CNIC
- fp->sb_id = fp->cl_id + 1;
-#else
- fp->sb_id = fp->cl_id;
+
+ bnx2x_init_sb(bp, bp->cnic_sb_mapping,
+ BNX2X_VF_ID_INVALID, false,
+ CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
+
#endif
- DP(NETIF_MSG_IFUP,
- "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
- i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
- bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
- fp->sb_id);
- bnx2x_update_fpsb_idx(fp);
- }
/* ensure status block indices were read */
rmb();
-
- bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
- DEF_SB_ID);
+ bnx2x_init_def_sb(bp);
bnx2x_update_dsb_idx(bp);
- bnx2x_update_coalesce(bp);
bnx2x_init_rx_rings(bp);
- bnx2x_init_tx_ring(bp);
+ bnx2x_init_tx_rings(bp);
bnx2x_init_sp_ring(bp);
- bnx2x_init_context(bp);
+ bnx2x_init_eq_ring(bp);
bnx2x_init_internal(bp, load_code);
+ bnx2x_pf_init(bp);
bnx2x_init_ind_table(bp);
bnx2x_stats_init(bp);
@@ -3495,7 +4377,6 @@ gunzip_nomem1:
static void bnx2x_gunzip_end(struct bnx2x *bp)
{
kfree(bp->strm->workspace);
-
kfree(bp->strm);
bp->strm = NULL;
@@ -3593,8 +4474,6 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
else
factor = 1;
- DP(NETIF_MSG_HW, "start part1\n");
-
/* Disable inputs of parser neighbor blocks */
REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
@@ -3731,9 +4610,19 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
static void enable_blocks_attention(struct bnx2x *bp)
{
REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
- REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
+ if (CHIP_IS_E2(bp))
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
+ else
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+ /*
+ * mask read length error interrupts in brb for parser
+ * (parsing unit and 'checksum and crc' unit)
+ * these errors are legal (PU reads fixed length and CAC can cause
+ * read length error on truncated packets)
+ */
+ REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
REG_WR(bp, QM_REG_QM_INT_MASK, 0);
REG_WR(bp, TM_REG_TM_INT_MASK, 0);
REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
@@ -3752,8 +4641,16 @@ static void enable_blocks_attention(struct bnx2x *bp)
REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
+
if (CHIP_REV_IS_FPGA(bp))
REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
+ else if (CHIP_IS_E2(bp))
+ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
+ (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
else
REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
@@ -3771,42 +4668,41 @@ static const struct {
u32 addr;
u32 mask;
} bnx2x_parity_mask[] = {
- {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
- {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
- {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
- {HC_REG_HC_PRTY_MASK, 0xffffffff},
- {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
- {QM_REG_QM_PRTY_MASK, 0x0},
- {DORQ_REG_DORQ_PRTY_MASK, 0x0},
+ {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
+ {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
+ {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
+ {HC_REG_HC_PRTY_MASK, 0x7},
+ {MISC_REG_MISC_PRTY_MASK, 0x1},
+ {QM_REG_QM_PRTY_MASK, 0x0},
+ {DORQ_REG_DORQ_PRTY_MASK, 0x0},
{GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
{GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
- {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
- {CDU_REG_CDU_PRTY_MASK, 0x0},
- {CFC_REG_CFC_PRTY_MASK, 0x0},
- {DBG_REG_DBG_PRTY_MASK, 0x0},
- {DMAE_REG_DMAE_PRTY_MASK, 0x0},
- {BRB1_REG_BRB1_PRTY_MASK, 0x0},
- {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
- {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
- {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
- {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
- {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
- {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
- {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
- {USEM_REG_USEM_PRTY_MASK_0, 0x0},
- {USEM_REG_USEM_PRTY_MASK_1, 0x0},
- {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
- {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
- {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
- {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
+ {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
+ {CDU_REG_CDU_PRTY_MASK, 0x0},
+ {CFC_REG_CFC_PRTY_MASK, 0x0},
+ {DBG_REG_DBG_PRTY_MASK, 0x0},
+ {DMAE_REG_DMAE_PRTY_MASK, 0x0},
+ {BRB1_REG_BRB1_PRTY_MASK, 0x0},
+ {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
+ {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
+ {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
+ {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
+ {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
+ {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
+ {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
+ {USEM_REG_USEM_PRTY_MASK_0, 0x0},
+ {USEM_REG_USEM_PRTY_MASK_1, 0x0},
+ {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
+ {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
+ {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
+ {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
};
static void enable_blocks_parity(struct bnx2x *bp)
{
- int i, mask_arr_len =
- sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
+ int i;
- for (i = 0; i < mask_arr_len; i++)
+ for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
REG_WR(bp, bnx2x_parity_mask[i].addr,
bnx2x_parity_mask[i].mask);
}
@@ -3862,17 +4758,12 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
*/
else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
for (port = PORT_0; port < PORT_MAX; port++) {
- u32 phy_type =
- SHMEM_RD(bp, dev_info.port_hw_config[port].
- external_phy_config) &
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
is_required |=
- ((phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
- (phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
- (phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
+ bnx2x_fan_failure_det_req(
+ bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base,
+ port);
}
DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
@@ -3896,26 +4787,97 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
}
-static int bnx2x_init_common(struct bnx2x *bp)
+static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
+{
+ u32 offset = 0;
+
+ if (CHIP_IS_E1(bp))
+ return;
+ if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
+ return;
+
+ switch (BP_ABS_FUNC(bp)) {
+ case 0:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
+ break;
+ case 1:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
+ break;
+ case 2:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
+ break;
+ case 3:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
+ break;
+ case 4:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
+ break;
+ case 5:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
+ break;
+ case 6:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
+ break;
+ case 7:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
+ break;
+ default:
+ return;
+ }
+
+ REG_WR(bp, offset, pretend_func_num);
+ REG_RD(bp, offset);
+ DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
+}
+
+static void bnx2x_pf_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+ val &= ~IGU_PF_CONF_FUNC_EN;
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
+}
+
+static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
{
u32 val, i;
-#ifdef BCM_CNIC
- u32 wb_write[2];
-#endif
- DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
+ DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
bnx2x_reset_common(bp);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
- if (CHIP_IS_E1H(bp))
- REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
+ if (!CHIP_IS_E1(bp))
+ REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
- REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
- msleep(30);
- REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
+ if (CHIP_IS_E2(bp)) {
+ u8 fid;
+
+ /**
+ * 4-port mode or 2-port mode we need to turn of master-enable
+ * for everyone, after that, turn it back on for self.
+ * so, we disregard multi-function or not, and always disable
+ * for all functions on the given path, this means 0,2,4,6 for
+ * path 0 and 1,3,5,7 for path 1
+ */
+ for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
+ if (fid == BP_ABS_FUNC(bp)) {
+ REG_WR(bp,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
+ 1);
+ continue;
+ }
+
+ bnx2x_pretend_func(bp, fid);
+ /* clear pf enable */
+ bnx2x_pf_disable(bp);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+ }
+ }
bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
if (CHIP_IS_E1(bp)) {
@@ -3943,12 +4905,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
#endif
- REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
-#ifdef BCM_CNIC
- REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
- REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
- REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
-#endif
+ bnx2x_ilt_init_page_size(bp, INITOP_SET);
if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
@@ -3967,9 +4924,65 @@ static int bnx2x_init_common(struct bnx2x *bp)
return -EBUSY;
}
+ /* Timers bug workaround E2 only. We need to set the entire ILT to
+ * have entries with value "0" and valid bit on.
+ * This needs to be done by the first PF that is loaded in a path
+ * (i.e. common phase)
+ */
+ if (CHIP_IS_E2(bp)) {
+ struct ilt_client_info ilt_cli;
+ struct bnx2x_ilt ilt;
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+ memset(&ilt, 0, sizeof(struct bnx2x_ilt));
+
+ /* initalize dummy TM client */
+ ilt_cli.start = 0;
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+ ilt_cli.client_num = ILT_CLIENT_TM;
+
+ /* Step 1: set zeroes to all ilt page entries with valid bit on
+ * Step 2: set the timers first/last ilt entry to point
+ * to the entire range to prevent ILT range error for 3rd/4th
+ * vnic (this code assumes existance of the vnic)
+ *
+ * both steps performed by call to bnx2x_ilt_client_init_op()
+ * with dummy TM client
+ *
+ * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
+ * and his brother are split registers
+ */
+ bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
+ bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
+ }
+
+
REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
+ if (CHIP_IS_E2(bp)) {
+ int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
+ (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
+ bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
+
+ bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
+
+ /* let the HW do it's magic ... */
+ do {
+ msleep(200);
+ val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
+ } while (factor-- && (val != 1));
+
+ if (val != 1) {
+ BNX2X_ERR("ATC_INIT failed\n");
+ return -EBUSY;
+ }
+ }
+
bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
/* clean the DMAE memory */
@@ -3988,20 +5001,12 @@ static int bnx2x_init_common(struct bnx2x *bp)
bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
-#ifdef BCM_CNIC
- wb_write[0] = 0;
- wb_write[1] = 0;
- for (i = 0; i < 64; i++) {
- REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
- bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
+ if (CHIP_MODE_IS_4_PORT(bp))
+ bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
+
+ /* QM queues pointers table */
+ bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
- if (CHIP_IS_E1H(bp)) {
- REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
- bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
- wb_write, 2);
- }
- }
-#endif
/* soft reset pulse */
REG_WR(bp, QM_REG_SOFT_RESET, 1);
REG_WR(bp, QM_REG_SOFT_RESET, 0);
@@ -4011,21 +5016,35 @@ static int bnx2x_init_common(struct bnx2x *bp)
#endif
bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
- REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
+ REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
+
if (!CHIP_REV_IS_SLOW(bp)) {
/* enable hw interrupt from doorbell Q */
REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
}
bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
+ if (CHIP_MODE_IS_4_PORT(bp)) {
+ REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
+ REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
+ }
+
bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
#ifndef BCM_CNIC
/* set NIC mode */
REG_WR(bp, PRS_REG_NIC_MODE, 1);
#endif
- if (CHIP_IS_E1H(bp))
- REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
+ if (!CHIP_IS_E1(bp))
+ REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
+
+ if (CHIP_IS_E2(bp)) {
+ /* Bit-map indicating which L2 hdrs may appear after the
+ basic Ethernet header */
+ int has_ovlan = IS_MF(bp);
+ REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
+ REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
+ }
bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
@@ -4042,6 +5061,9 @@ static int bnx2x_init_common(struct bnx2x *bp)
bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
+ if (CHIP_MODE_IS_4_PORT(bp))
+ bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
+
/* sync semi rtc */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
0x80000000);
@@ -4052,9 +5074,16 @@ static int bnx2x_init_common(struct bnx2x *bp)
bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
+ if (CHIP_IS_E2(bp)) {
+ int has_ovlan = IS_MF(bp);
+ REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
+ REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
+ }
+
REG_WR(bp, SRC_REG_SOFT_RST, 1);
for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
REG_WR(bp, i, random32());
+
bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
#ifdef BCM_CNIC
REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
@@ -4089,6 +5118,11 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
+
+ if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
+ REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
+
+ bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
@@ -4096,15 +5130,34 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_WR(bp, 0x2814, 0xffffffff);
REG_WR(bp, 0x3820, 0xffffffff);
+ if (CHIP_IS_E2(bp)) {
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
+ (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
+ PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
+ (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
+ (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
+ }
+
bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
- if (CHIP_IS_E1H(bp)) {
- REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
- REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
+ if (!CHIP_IS_E1(bp)) {
+ REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
+ REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
+ }
+ if (CHIP_IS_E2(bp)) {
+ /* Bit-map indicating which L2 hdrs may appear after the
+ basic Ethernet header */
+ REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
}
if (CHIP_REV_IS_SLOW(bp))
@@ -4128,28 +5181,22 @@ static int bnx2x_init_common(struct bnx2x *bp)
}
REG_WR(bp, CFC_REG_DEBUG0, 0);
- /* read NIG statistic
- to see if this is our first up since powerup */
- bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
- val = *bnx2x_sp(bp, wb_data[0]);
+ if (CHIP_IS_E1(bp)) {
+ /* read NIG statistic
+ to see if this is our first up since powerup */
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+ val = *bnx2x_sp(bp, wb_data[0]);
- /* do internal memory self test */
- if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
- BNX2X_ERR("internal mem self test failed\n");
- return -EBUSY;
+ /* do internal memory self test */
+ if ((val == 0) && bnx2x_int_mem_test(bp)) {
+ BNX2X_ERR("internal mem self test failed\n");
+ return -EBUSY;
+ }
}
- switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- bp->port.need_hw_lock = 1;
- break;
-
- default:
- break;
- }
+ bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base);
bnx2x_setup_fan_failure_detection(bp);
@@ -4161,16 +5208,30 @@ static int bnx2x_init_common(struct bnx2x *bp)
enable_blocks_parity(bp);
if (!BP_NOMCP(bp)) {
- bnx2x_acquire_phy_lock(bp);
- bnx2x_common_init_phy(bp, bp->common.shmem_base);
- bnx2x_release_phy_lock(bp);
+ /* In E2 2-PORT mode, same ext phy is used for the two paths */
+ if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
+ CHIP_IS_E1x(bp)) {
+ u32 shmem_base[2], shmem2_base[2];
+ shmem_base[0] = bp->common.shmem_base;
+ shmem2_base[0] = bp->common.shmem2_base;
+ if (CHIP_IS_E2(bp)) {
+ shmem_base[1] =
+ SHMEM2_RD(bp, other_shmem_base_addr);
+ shmem2_base[1] =
+ SHMEM2_RD(bp, other_shmem2_base_addr);
+ }
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
+ bp->common.chip_id);
+ bnx2x_release_phy_lock(bp);
+ }
} else
BNX2X_ERR("Bootcode is missing - can not initialize link\n");
return 0;
}
-static int bnx2x_init_port(struct bnx2x *bp)
+static int bnx2x_init_hw_port(struct bnx2x *bp)
{
int port = BP_PORT(bp);
int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
@@ -4184,14 +5245,23 @@ static int bnx2x_init_port(struct bnx2x *bp)
bnx2x_init_block(bp, PXP_BLOCK, init_stage);
bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
+ /* Timers bug workaround: disables the pf_master bit in pglue at
+ * common phase, we need to enable it here before any dmae access are
+ * attempted. Therefore we manually added the enable-master to the
+ * port phase (it also happens in the function phase)
+ */
+ if (CHIP_IS_E2(bp))
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
bnx2x_init_block(bp, TCM_BLOCK, init_stage);
bnx2x_init_block(bp, UCM_BLOCK, init_stage);
bnx2x_init_block(bp, CCM_BLOCK, init_stage);
bnx2x_init_block(bp, XCM_BLOCK, init_stage);
-#ifdef BCM_CNIC
- REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
+ /* QM cid (connection) count */
+ bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
+#ifdef BCM_CNIC
bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
@@ -4199,29 +5269,41 @@ static int bnx2x_init_port(struct bnx2x *bp)
bnx2x_init_block(bp, DQ_BLOCK, init_stage);
- bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
- if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
- /* no pause for emulation and FPGA */
- low = 0;
- high = 513;
- } else {
- if (IS_E1HMF(bp))
- low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
- else if (bp->dev->mtu > 4096) {
- if (bp->flags & ONE_PORT_FLAG)
- low = 160;
- else {
- val = bp->dev->mtu;
- /* (24*1024 + val*4)/256 */
- low = 96 + (val/64) + ((val % 64) ? 1 : 0);
- }
- } else
- low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
- high = low + 56; /* 14*1024/256 */
+ if (CHIP_MODE_IS_4_PORT(bp))
+ bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
+
+ if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
+ bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
+ if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
+ /* no pause for emulation and FPGA */
+ low = 0;
+ high = 513;
+ } else {
+ if (IS_MF(bp))
+ low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
+ else if (bp->dev->mtu > 4096) {
+ if (bp->flags & ONE_PORT_FLAG)
+ low = 160;
+ else {
+ val = bp->dev->mtu;
+ /* (24*1024 + val*4)/256 */
+ low = 96 + (val/64) +
+ ((val % 64) ? 1 : 0);
+ }
+ } else
+ low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
+ high = low + 56; /* 14*1024/256 */
+ }
+ REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
+ REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
}
- REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
- REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
+ if (CHIP_MODE_IS_4_PORT(bp)) {
+ REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
+ REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
+ REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
+ BRB1_REG_MAC_GUARANTIED_0), 40);
+ }
bnx2x_init_block(bp, PRS_BLOCK, init_stage);
@@ -4234,24 +5316,28 @@ static int bnx2x_init_port(struct bnx2x *bp)
bnx2x_init_block(bp, USEM_BLOCK, init_stage);
bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
+ if (CHIP_MODE_IS_4_PORT(bp))
+ bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
bnx2x_init_block(bp, UPB_BLOCK, init_stage);
bnx2x_init_block(bp, XPB_BLOCK, init_stage);
bnx2x_init_block(bp, PBF_BLOCK, init_stage);
- /* configure PBF to work without PAUSE mtu 9000 */
- REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+ if (!CHIP_IS_E2(bp)) {
+ /* configure PBF to work without PAUSE mtu 9000 */
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
- /* update threshold */
- REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
- /* update init credit */
- REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
+ /* update threshold */
+ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
+ /* update init credit */
+ REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
- /* probe changes */
- REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
- msleep(5);
- REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
+ /* probe changes */
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
+ udelay(50);
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
+ }
#ifdef BCM_CNIC
bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
@@ -4265,13 +5351,15 @@ static int bnx2x_init_port(struct bnx2x *bp)
}
bnx2x_init_block(bp, HC_BLOCK, init_stage);
+ bnx2x_init_block(bp, IGU_BLOCK, init_stage);
+
bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
/* init aeu_mask_attn_func_0/1:
* - SF mode: bits 3-7 are masked. only bits 0-2 are in use
* - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
* bits 4-7 are used for "per vn group attention" */
REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
- (IS_E1HMF(bp) ? 0xF7 : 0x7));
+ (IS_MF(bp) ? 0xF7 : 0x7));
bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
@@ -4283,11 +5371,25 @@ static int bnx2x_init_port(struct bnx2x *bp)
REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
- if (CHIP_IS_E1H(bp)) {
- /* 0x2 disable e1hov, 0x1 enable */
+ if (!CHIP_IS_E1(bp)) {
+ /* 0x2 disable mf_ov, 0x1 enable */
REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
- (IS_E1HMF(bp) ? 0x1 : 0x2));
+ (IS_MF(bp) ? 0x1 : 0x2));
+ if (CHIP_IS_E2(bp)) {
+ val = 0;
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ val = 1;
+ break;
+ case MULTI_FUNCTION_SI:
+ val = 2;
+ break;
+ }
+
+ REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
+ NIG_REG_LLH0_CLS_TYPE), val);
+ }
{
REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
@@ -4297,194 +5399,313 @@ static int bnx2x_init_port(struct bnx2x *bp)
bnx2x_init_block(bp, MCP_BLOCK, init_stage);
bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
-
- switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- {
- u32 swap_val, swap_override, aeu_gpio_mask, offset;
-
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
-
- /* The GPIO should be swapped if the swap register is
- set and active */
- swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
- swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
-
- /* Select function upon port-swap configuration */
- if (port == 0) {
- offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
- aeu_gpio_mask = (swap_val && swap_override) ?
- AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
- AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
- } else {
- offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
- aeu_gpio_mask = (swap_val && swap_override) ?
- AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
- AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
- }
- val = REG_RD(bp, offset);
- /* add GPIO3 to group */
- val |= aeu_gpio_mask;
- REG_WR(bp, offset, val);
- }
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- /* add SPIO 5 to group 0 */
- {
+ bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base);
+ if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
+ bp->common.shmem2_base, port)) {
u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
val = REG_RD(bp, reg_addr);
val |= AEU_INPUTS_ATTN_BITS_SPIO5;
REG_WR(bp, reg_addr, val);
- }
- break;
-
- default:
- break;
}
-
bnx2x__link_reset(bp);
return 0;
}
-#define ILT_PER_FUNC (768/2)
-#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
-/* the phys address is shifted right 12 bits and has an added
- 1=valid bit added to the 53rd bit
- then since this is a wide register(TM)
- we split it into two 32 bit writes
- */
-#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
-#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
-#define PXP_ONE_ILT(x) (((x) << 10) | x)
-#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
-
-#ifdef BCM_CNIC
-#define CNIC_ILT_LINES 127
-#define CNIC_CTX_PER_ILT 16
-#else
-#define CNIC_ILT_LINES 0
-#endif
-
static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
{
int reg;
- if (CHIP_IS_E1H(bp))
- reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
- else /* E1 */
+ if (CHIP_IS_E1(bp))
reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
+ else
+ reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
}
-static int bnx2x_init_func(struct bnx2x *bp)
+static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
+{
+ bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
+}
+
+static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
+{
+ u32 i, base = FUNC_ILT_BASE(func);
+ for (i = base; i < base + ILT_PER_FUNC; i++)
+ bnx2x_ilt_wr(bp, i, 0);
+}
+
+static int bnx2x_init_hw_func(struct bnx2x *bp)
{
int port = BP_PORT(bp);
int func = BP_FUNC(bp);
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ u16 cdu_ilt_start;
u32 addr, val;
int i;
DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
/* set MSI reconfigure capability */
- addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
- val = REG_RD(bp, addr);
- val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
- REG_WR(bp, addr, val);
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
+ val = REG_RD(bp, addr);
+ val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
+ REG_WR(bp, addr, val);
+ }
- i = FUNC_ILT_BASE(func);
+ ilt = BP_ILT(bp);
+ cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
- bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
- if (CHIP_IS_E1H(bp)) {
- REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
- REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
- } else /* E1 */
- REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
- PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
+ for (i = 0; i < L2_ILT_LINES(bp); i++) {
+ ilt->lines[cdu_ilt_start + i].page =
+ bp->context.vcxt + (ILT_PAGE_CIDS * i);
+ ilt->lines[cdu_ilt_start + i].page_mapping =
+ bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
+ /* cdu ilt pages are allocated manually so there's no need to
+ set the size */
+ }
+ bnx2x_ilt_init_op(bp, INITOP_SET);
#ifdef BCM_CNIC
- i += 1 + CNIC_ILT_LINES;
- bnx2x_ilt_wr(bp, i, bp->timers_mapping);
- if (CHIP_IS_E1(bp))
- REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
- else {
- REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
- REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
- }
+ bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
- i++;
- bnx2x_ilt_wr(bp, i, bp->qm_mapping);
- if (CHIP_IS_E1(bp))
- REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
- else {
- REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
- REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
+ /* T1 hash bits value determines the T1 number of entries */
+ REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
+#endif
+
+#ifndef BCM_CNIC
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif /* BCM_CNIC */
+
+ if (CHIP_IS_E2(bp)) {
+ u32 pf_conf = IGU_PF_CONF_FUNC_EN;
+
+ /* Turn on a single ISR mode in IGU if driver is going to use
+ * INT#x or MSI
+ */
+ if (!(bp->flags & USING_MSIX_FLAG))
+ pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ /*
+ * Timers workaround bug: function init part.
+ * Need to wait 20msec after initializing ILT,
+ * needed to make sure there are no requests in
+ * one of the PXP internal queues with "old" ILT addresses
+ */
+ msleep(20);
+ /*
+ * Master enable - Due to WB DMAE writes performed before this
+ * register is re-initialized as part of the regular function
+ * init
+ */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+ /* Enable the function in IGU */
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
}
- i++;
- bnx2x_ilt_wr(bp, i, bp->t1_mapping);
- if (CHIP_IS_E1(bp))
- REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
- else {
- REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
- REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
+ bp->dmae_ready = 1;
+
+ bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
+
+ if (CHIP_IS_E2(bp))
+ REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
+
+ bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
+
+ if (CHIP_IS_E2(bp)) {
+ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
+ BP_PATH(bp));
+ REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
+ BP_PATH(bp));
}
- /* tell the searcher where the T2 table is */
- REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
+ if (CHIP_MODE_IS_4_PORT(bp))
+ bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
- bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
- U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
+ if (CHIP_IS_E2(bp))
+ REG_WR(bp, QM_REG_PF_EN, 1);
- bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
- U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
- U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
+ bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
- REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
-#endif
+ if (CHIP_MODE_IS_4_PORT(bp))
+ bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
+
+ bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
+ if (CHIP_IS_E2(bp))
+ REG_WR(bp, PBF_REG_DISABLE_PF, 0);
+
+ bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
- if (CHIP_IS_E1H(bp)) {
- bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
+ if (CHIP_IS_E2(bp))
+ REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
+
+ if (IS_MF(bp)) {
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
- REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
+ REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
}
+ bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
+
/* HC init per function */
- if (CHIP_IS_E1H(bp)) {
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ if (CHIP_IS_E1H(bp)) {
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ }
+ bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
+
+ } else {
+ int num_segs, sb_idx, prod_offset;
+
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
- REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
- REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ if (CHIP_IS_E2(bp)) {
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ }
+
+ bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
+
+ if (CHIP_IS_E2(bp)) {
+ int dsb_idx = 0;
+ /**
+ * Producer memory:
+ * E2 mode: address 0-135 match to the mapping memory;
+ * 136 - PF0 default prod; 137 - PF1 default prod;
+ * 138 - PF2 default prod; 139 - PF3 default prod;
+ * 140 - PF0 attn prod; 141 - PF1 attn prod;
+ * 142 - PF2 attn prod; 143 - PF3 attn prod;
+ * 144-147 reserved.
+ *
+ * E1.5 mode - In backward compatible mode;
+ * for non default SB; each even line in the memory
+ * holds the U producer and each odd line hold
+ * the C producer. The first 128 producers are for
+ * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
+ * producers are for the DSB for each PF.
+ * Each PF has five segments: (the order inside each
+ * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods;
+ * 144-147 attn prods;
+ */
+ /* non-default-status-blocks */
+ num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
+ for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
+ prod_offset = (bp->igu_base_sb + sb_idx) *
+ num_segs;
+
+ for (i = 0; i < num_segs; i++) {
+ addr = IGU_REG_PROD_CONS_MEMORY +
+ (prod_offset + i) * 4;
+ REG_WR(bp, addr, 0);
+ }
+ /* send consumer update with value 0 */
+ bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_igu_clear_sb(bp,
+ bp->igu_base_sb + sb_idx);
+ }
+
+ /* default-status-blocks */
+ num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ dsb_idx = BP_FUNC(bp);
+ else
+ dsb_idx = BP_E1HVN(bp);
+
+ prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_BASE_DSB_PROD + dsb_idx :
+ IGU_NORM_BASE_DSB_PROD + dsb_idx);
+
+ for (i = 0; i < (num_segs * E1HVN_MAX);
+ i += E1HVN_MAX) {
+ addr = IGU_REG_PROD_CONS_MEMORY +
+ (prod_offset + i)*4;
+ REG_WR(bp, addr, 0);
+ }
+ /* send consumer update with 0 */
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ CSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ XSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ TSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
+ } else {
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
+ }
+ bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
+
+ /* !!! these should become driver const once
+ rf-tool supports split-68 const */
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
+ }
}
- bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
/* Reset PCIE errors for debug */
REG_WR(bp, 0x2114, 0xffffffff);
REG_WR(bp, 0x2120, 0xffffffff);
+ bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
+
+ bnx2x_phy_probe(&bp->link_params);
+
return 0;
}
int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
{
- int i, rc = 0;
+ int rc = 0;
DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
- BP_FUNC(bp), load_code);
+ BP_ABS_FUNC(bp), load_code);
bp->dmae_ready = 0;
mutex_init(&bp->dmae_mutex);
@@ -4494,21 +5715,20 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_COMMON:
- rc = bnx2x_init_common(bp);
+ case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+ rc = bnx2x_init_hw_common(bp, load_code);
if (rc)
goto init_hw_err;
/* no break */
case FW_MSG_CODE_DRV_LOAD_PORT:
- bp->dmae_ready = 1;
- rc = bnx2x_init_port(bp);
+ rc = bnx2x_init_hw_port(bp);
if (rc)
goto init_hw_err;
/* no break */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
- bp->dmae_ready = 1;
- rc = bnx2x_init_func(bp);
+ rc = bnx2x_init_hw_func(bp);
if (rc)
goto init_hw_err;
break;
@@ -4519,22 +5739,14 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
}
if (!BP_NOMCP(bp)) {
- int func = BP_FUNC(bp);
+ int mb_idx = BP_FW_MB_IDX(bp);
bp->fw_drv_pulse_wr_seq =
- (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
+ (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
DRV_PULSE_SEQ_MASK);
DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
}
- /* this needs to be done before gunzip end */
- bnx2x_zero_def_sb(bp);
- for_each_queue(bp, i)
- bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
-#ifdef BCM_CNIC
- bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
-#endif
-
init_hw_err:
bnx2x_gunzip_end(bp);
@@ -4547,7 +5759,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
#define BNX2X_PCI_FREE(x, y, size) \
do { \
if (x) { \
- dma_free_coherent(&bp->pdev->dev, size, x, y); \
+ dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
x = NULL; \
y = 0; \
} \
@@ -4556,7 +5768,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
#define BNX2X_FREE(x) \
do { \
if (x) { \
- vfree(x); \
+ kfree((void *)x); \
x = NULL; \
} \
} while (0)
@@ -4566,11 +5778,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
/* fastpath */
/* Common */
for_each_queue(bp, i) {
-
/* status blocks */
- BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
- bnx2x_fp(bp, i, status_blk_mapping),
- sizeof(struct host_status_block));
+ if (CHIP_IS_E2(bp))
+ BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
+ bnx2x_fp(bp, i, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
+ bnx2x_fp(bp, i, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
}
/* Rx */
for_each_queue(bp, i) {
@@ -4604,28 +5820,56 @@ void bnx2x_free_mem(struct bnx2x *bp)
/* end of fastpath */
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
- sizeof(struct host_def_status_block));
+ sizeof(struct host_sp_status_block));
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
+ BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
+ bp->context.size);
+
+ bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
+
+ BNX2X_FREE(bp->ilt->lines);
+
#ifdef BCM_CNIC
- BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
- BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
- BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
- BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
- BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
- sizeof(struct host_status_block));
+ if (CHIP_IS_E2(bp))
+ BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+
+ BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
#endif
+
BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
+ BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+
#undef BNX2X_PCI_FREE
#undef BNX2X_KFREE
}
-int bnx2x_alloc_mem(struct bnx2x *bp)
+static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
{
+ union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
+ if (CHIP_IS_E2(bp)) {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e2_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e2_sb->sb.running_index;
+ } else {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e1x_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e1x_sb->sb.running_index;
+ }
+}
+int bnx2x_alloc_mem(struct bnx2x *bp)
+{
#define BNX2X_PCI_ALLOC(x, y, size) \
do { \
x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
@@ -4636,10 +5880,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
#define BNX2X_ALLOC(x, size) \
do { \
- x = vmalloc(size); \
+ x = kzalloc(size, GFP_KERNEL); \
if (x == NULL) \
goto alloc_mem_err; \
- memset(x, 0, size); \
} while (0)
int i;
@@ -4647,12 +5890,19 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
/* fastpath */
/* Common */
for_each_queue(bp, i) {
+ union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
bnx2x_fp(bp, i, bp) = bp;
-
/* status blocks */
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
+ if (CHIP_IS_E2(bp))
+ BNX2X_PCI_ALLOC(sb->e2_sb,
&bnx2x_fp(bp, i, status_blk_mapping),
- sizeof(struct host_status_block));
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_ALLOC(sb->e1x_sb,
+ &bnx2x_fp(bp, i, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+
+ set_sb_shortcuts(bp, i);
}
/* Rx */
for_each_queue(bp, i) {
@@ -4688,37 +5938,41 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
}
/* end of fastpath */
+#ifdef BCM_CNIC
+ if (CHIP_IS_E2(bp))
+ BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+
+ /* allocate searcher T2 table */
+ BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+#endif
+
+
BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
- sizeof(struct host_def_status_block));
+ sizeof(struct host_sp_status_block));
BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
-#ifdef BCM_CNIC
- BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
-
- /* allocate searcher T2 table
- we allocate 1/4 of alloc num for T2
- (which is not entered into the ILT) */
- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
+ bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
- /* Initialize T2 (for 1024 connections) */
- for (i = 0; i < 16*1024; i += 64)
- *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
+ BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
+ bp->context.size);
- /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
- BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
+ BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
- /* QM queues (128*MAX_CONN) */
- BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
-
- BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
- sizeof(struct host_status_block));
-#endif
+ if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
+ goto alloc_mem_err;
/* Slow path ring */
BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
+ /* EQ */
+ BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
return 0;
alloc_mem_err:
@@ -4729,97 +5983,47 @@ alloc_mem_err:
#undef BNX2X_ALLOC
}
-
/*
* Init service functions
*/
-
-/**
- * Sets a MAC in a CAM for a few L2 Clients for E1 chip
- *
- * @param bp driver descriptor
- * @param set set or clear an entry (1 or 0)
- * @param mac pointer to a buffer containing a MAC
- * @param cl_bit_vec bit vector of clients to register a MAC for
- * @param cam_offset offset in a CAM to use
- * @param with_bcast set broadcast MAC as well
- */
-static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
- u32 cl_bit_vec, u8 cam_offset,
- u8 with_bcast)
+int bnx2x_func_start(struct bnx2x *bp)
{
- struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
- int port = BP_PORT(bp);
-
- /* CAM allocation
- * unicasts 0-31:port0 32-63:port1
- * multicast 64-127:port0 128-191:port1
- */
- config->hdr.length = 1 + (with_bcast ? 1 : 0);
- config->hdr.offset = cam_offset;
- config->hdr.client_id = 0xff;
- config->hdr.reserved1 = 0;
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
- /* primary MAC */
- config->config_table[0].cam_entry.msb_mac_addr =
- swab16(*(u16 *)&mac[0]);
- config->config_table[0].cam_entry.middle_mac_addr =
- swab16(*(u16 *)&mac[2]);
- config->config_table[0].cam_entry.lsb_mac_addr =
- swab16(*(u16 *)&mac[4]);
- config->config_table[0].cam_entry.flags = cpu_to_le16(port);
- if (set)
- config->config_table[0].target_table_entry.flags = 0;
- else
- CAM_INVALIDATE(config->config_table[0]);
- config->config_table[0].target_table_entry.clients_bit_vector =
- cpu_to_le32(cl_bit_vec);
- config->config_table[0].target_table_entry.vlan_id = 0;
+ /* Wait for completion */
+ return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
+ WAIT_RAMROD_COMMON);
+}
- DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
- (set ? "setting" : "clearing"),
- config->config_table[0].cam_entry.msb_mac_addr,
- config->config_table[0].cam_entry.middle_mac_addr,
- config->config_table[0].cam_entry.lsb_mac_addr);
-
- /* broadcast */
- if (with_bcast) {
- config->config_table[1].cam_entry.msb_mac_addr =
- cpu_to_le16(0xffff);
- config->config_table[1].cam_entry.middle_mac_addr =
- cpu_to_le16(0xffff);
- config->config_table[1].cam_entry.lsb_mac_addr =
- cpu_to_le16(0xffff);
- config->config_table[1].cam_entry.flags = cpu_to_le16(port);
- if (set)
- config->config_table[1].target_table_entry.flags =
- TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
- else
- CAM_INVALIDATE(config->config_table[1]);
- config->config_table[1].target_table_entry.clients_bit_vector =
- cpu_to_le32(cl_bit_vec);
- config->config_table[1].target_table_entry.vlan_id = 0;
- }
+int bnx2x_func_stop(struct bnx2x *bp)
+{
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
- U64_HI(bnx2x_sp_mapping(bp, mac_config)),
- U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
+ /* Wait for completion */
+ return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
+ 0, &(bp->state), WAIT_RAMROD_COMMON);
}
/**
- * Sets a MAC in a CAM for a few L2 Clients for E1H chip
+ * Sets a MAC in a CAM for a few L2 Clients for E1x chips
*
* @param bp driver descriptor
* @param set set or clear an entry (1 or 0)
* @param mac pointer to a buffer containing a MAC
* @param cl_bit_vec bit vector of clients to register a MAC for
* @param cam_offset offset in a CAM to use
+ * @param is_bcast is the set MAC a broadcast address (for E1 only)
*/
-static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
- u32 cl_bit_vec, u8 cam_offset)
+static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
+ u32 cl_bit_vec, u8 cam_offset,
+ u8 is_bcast)
{
- struct mac_configuration_cmd_e1h *config =
- (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
+ struct mac_configuration_cmd *config =
+ (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
+ int ramrod_flags = WAIT_RAMROD_COMMON;
+
+ bp->set_mac_pending = 1;
+ smp_wmb();
config->hdr.length = 1;
config->hdr.offset = cam_offset;
@@ -4836,29 +6040,41 @@ static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
config->config_table[0].clients_bit_vector =
cpu_to_le32(cl_bit_vec);
config->config_table[0].vlan_id = 0;
- config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
+ config->config_table[0].pf_id = BP_FUNC(bp);
if (set)
- config->config_table[0].flags = BP_PORT(bp);
+ SET_FLAG(config->config_table[0].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_SET);
else
- config->config_table[0].flags =
- MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
+ SET_FLAG(config->config_table[0].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+
+ if (is_bcast)
+ SET_FLAG(config->config_table[0].flags,
+ MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
- DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
+ DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
(set ? "setting" : "clearing"),
config->config_table[0].msb_mac_addr,
config->config_table[0].middle_mac_addr,
- config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
+ config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(bnx2x_sp_mapping(bp, mac_config)),
- U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
+ U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
+
+ /* Wait for a completion */
+ bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
}
-static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
- int *state_p, int poll)
+int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+ int *state_p, int flags)
{
/* can take a while if any port is running */
int cnt = 5000;
+ u8 poll = flags & WAIT_RAMROD_POLL;
+ u8 common = flags & WAIT_RAMROD_COMMON;
DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
poll ? "polling" : "waiting", state, idx);
@@ -4866,13 +6082,17 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
might_sleep();
while (cnt--) {
if (poll) {
- bnx2x_rx_int(bp->fp, 10);
- /* if index is different from 0
- * the reply for some commands will
- * be on the non default queue
- */
- if (idx)
- bnx2x_rx_int(&bp->fp[idx], 10);
+ if (common)
+ bnx2x_eq_int(bp);
+ else {
+ bnx2x_rx_int(bp->fp, 10);
+ /* if index is different from 0
+ * the reply for some commands will
+ * be on the non default queue
+ */
+ if (idx)
+ bnx2x_rx_int(&bp->fp[idx], 10);
+ }
}
mb(); /* state is changed by bnx2x_sp_event() */
@@ -4899,29 +6119,112 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
return -EBUSY;
}
-void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
+u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
{
- bp->set_mac_pending++;
- smp_wmb();
+ if (CHIP_IS_E1H(bp))
+ return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
+ else if (CHIP_MODE_IS_4_PORT(bp))
+ return BP_FUNC(bp) * 32 + rel_offset;
+ else
+ return BP_VN(bp) * 32 + rel_offset;
+}
+
+void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
+{
+ u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
+ bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
- bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
- (1 << bp->fp->cl_id), BP_FUNC(bp));
+ /* networking MAC */
+ bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
+ (1 << bp->fp->cl_id), cam_offset , 0);
- /* Wait for a completion */
- bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
+ if (CHIP_IS_E1(bp)) {
+ /* broadcast MAC */
+ u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
+ }
}
+static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
+{
+ int i = 0, old;
+ struct net_device *dev = bp->dev;
+ struct netdev_hw_addr *ha;
+ struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
+ dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ /* copy mac */
+ config_cmd->config_table[i].msb_mac_addr =
+ swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
+ config_cmd->config_table[i].middle_mac_addr =
+ swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
+ config_cmd->config_table[i].lsb_mac_addr =
+ swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
+
+ config_cmd->config_table[i].vlan_id = 0;
+ config_cmd->config_table[i].pf_id = BP_FUNC(bp);
+ config_cmd->config_table[i].clients_bit_vector =
+ cpu_to_le32(1 << BP_L_ID(bp));
+
+ SET_FLAG(config_cmd->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_SET);
-void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
+ DP(NETIF_MSG_IFUP,
+ "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
+ config_cmd->config_table[i].msb_mac_addr,
+ config_cmd->config_table[i].middle_mac_addr,
+ config_cmd->config_table[i].lsb_mac_addr);
+ i++;
+ }
+ old = config_cmd->hdr.length;
+ if (old > i) {
+ for (; i < old; i++) {
+ if (CAM_IS_INVALID(config_cmd->
+ config_table[i])) {
+ /* already invalidated */
+ break;
+ }
+ /* invalidate */
+ SET_FLAG(config_cmd->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+ }
+ }
+
+ config_cmd->hdr.length = i;
+ config_cmd->hdr.offset = offset;
+ config_cmd->hdr.client_id = 0xff;
+ config_cmd->hdr.reserved1 = 0;
+
+ bp->set_mac_pending = 1;
+ smp_wmb();
+
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+ U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+}
+static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
{
- bp->set_mac_pending++;
+ int i;
+ struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
+ dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
+ int ramrod_flags = WAIT_RAMROD_COMMON;
+
+ bp->set_mac_pending = 1;
smp_wmb();
- bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
- (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
- 1);
+ for (i = 0; i < config_cmd->hdr.length; i++)
+ SET_FLAG(config_cmd->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+ U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
/* Wait for a completion */
- bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
+ bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
+ ramrod_flags);
+
}
#ifdef BCM_CNIC
@@ -4937,174 +6240,463 @@ void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
*/
int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
{
- u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
-
- bp->set_mac_pending++;
- smp_wmb();
+ u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
+ bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
+ u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
+ u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
/* Send a SET_MAC ramrod */
- if (CHIP_IS_E1(bp))
- bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
- cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
- 1);
- else
- /* CAM allocation for E1H
- * unicasts: by func number
- * multicast: 20+FUNC*20, 20 each
- */
- bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
- cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
-
- /* Wait for a completion when setting */
- bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
-
+ bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
+ cam_offset, 0);
return 0;
}
#endif
-int bnx2x_setup_leading(struct bnx2x *bp)
-{
- int rc;
+static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
+ struct bnx2x_client_init_params *params,
+ u8 activate,
+ struct client_init_ramrod_data *data)
+{
+ /* Clear the buffer */
+ memset(data, 0, sizeof(*data));
+
+ /* general */
+ data->general.client_id = params->rxq_params.cl_id;
+ data->general.statistics_counter_id = params->rxq_params.stat_id;
+ data->general.statistics_en_flg =
+ (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
+ data->general.activate_flg = activate;
+ data->general.sp_client_id = params->rxq_params.spcl_id;
+
+ /* Rx data */
+ data->rx.tpa_en_flg =
+ (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
+ data->rx.vmqueue_mode_en_flg = 0;
+ data->rx.cache_line_alignment_log_size =
+ params->rxq_params.cache_line_log;
+ data->rx.enable_dynamic_hc =
+ (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
+ data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
+ data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
+ data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
+
+ /* We don't set drop flags */
+ data->rx.drop_ip_cs_err_flg = 0;
+ data->rx.drop_tcp_cs_err_flg = 0;
+ data->rx.drop_ttl0_flg = 0;
+ data->rx.drop_udp_cs_err_flg = 0;
+
+ data->rx.inner_vlan_removal_enable_flg =
+ (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
+ data->rx.outer_vlan_removal_enable_flg =
+ (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
+ data->rx.status_block_id = params->rxq_params.fw_sb_id;
+ data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
+ data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
+ data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
+ data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
+ data->rx.bd_page_base.lo =
+ cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
+ data->rx.bd_page_base.hi =
+ cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
+ data->rx.sge_page_base.lo =
+ cpu_to_le32(U64_LO(params->rxq_params.sge_map));
+ data->rx.sge_page_base.hi =
+ cpu_to_le32(U64_HI(params->rxq_params.sge_map));
+ data->rx.cqe_page_base.lo =
+ cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
+ data->rx.cqe_page_base.hi =
+ cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
+ data->rx.is_leading_rss =
+ (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
+ data->rx.is_approx_mcast = data->rx.is_leading_rss;
+
+ /* Tx data */
+ data->tx.enforce_security_flg = 0; /* VF specific */
+ data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
+ data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
+ data->tx.mtu = 0; /* VF specific */
+ data->tx.tx_bd_page_base.lo =
+ cpu_to_le32(U64_LO(params->txq_params.dscr_map));
+ data->tx.tx_bd_page_base.hi =
+ cpu_to_le32(U64_HI(params->txq_params.dscr_map));
+
+ /* flow control data */
+ data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
+ data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
+ data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
+ data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
+ data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
+ data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
+ data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
+
+ data->fc.safc_group_num = params->txq_params.cos;
+ data->fc.safc_group_en_flg =
+ (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
+ data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
+}
+
+static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
+{
+ /* ustorm cxt validation */
+ cxt->ustorm_ag_context.cdu_usage =
+ CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
+ ETH_CONNECTION_TYPE);
+ /* xcontext validation */
+ cxt->xstorm_ag_context.cdu_reserved =
+ CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
+ ETH_CONNECTION_TYPE);
+}
+
+int bnx2x_setup_fw_client(struct bnx2x *bp,
+ struct bnx2x_client_init_params *params,
+ u8 activate,
+ struct client_init_ramrod_data *data,
+ dma_addr_t data_mapping)
+{
+ u16 hc_usec;
+ int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+ int ramrod_flags = 0, rc;
+
+ /* HC and context validation values */
+ hc_usec = params->txq_params.hc_rate ?
+ 1000000 / params->txq_params.hc_rate : 0;
+ bnx2x_update_coalesce_sb_index(bp,
+ params->txq_params.fw_sb_id,
+ params->txq_params.sb_cq_index,
+ !(params->txq_params.flags & QUEUE_FLG_HC),
+ hc_usec);
+
+ *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
+
+ hc_usec = params->rxq_params.hc_rate ?
+ 1000000 / params->rxq_params.hc_rate : 0;
+ bnx2x_update_coalesce_sb_index(bp,
+ params->rxq_params.fw_sb_id,
+ params->rxq_params.sb_cq_index,
+ !(params->rxq_params.flags & QUEUE_FLG_HC),
+ hc_usec);
+
+ bnx2x_set_ctx_validation(params->rxq_params.cxt,
+ params->rxq_params.cid);
+
+ /* zero stats */
+ if (params->txq_params.flags & QUEUE_FLG_STATS)
+ storm_memset_xstats_zero(bp, BP_PORT(bp),
+ params->txq_params.stat_id);
+
+ if (params->rxq_params.flags & QUEUE_FLG_STATS) {
+ storm_memset_ustats_zero(bp, BP_PORT(bp),
+ params->rxq_params.stat_id);
+ storm_memset_tstats_zero(bp, BP_PORT(bp),
+ params->rxq_params.stat_id);
+ }
+
+ /* Fill the ramrod data */
+ bnx2x_fill_cl_init_data(bp, params, activate, data);
+
+ /* SETUP ramrod.
+ *
+ * bnx2x_sp_post() takes a spin_lock thus no other explict memory
+ * barrier except from mmiowb() is needed to impose a
+ * proper ordering of memory operations.
+ */
+ mmiowb();
- /* reset IGU state */
- bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
- /* SETUP ramrod */
- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
+ bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
+ U64_HI(data_mapping), U64_LO(data_mapping), 0);
/* Wait for completion */
- rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
-
+ rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
+ params->ramrod_params.index,
+ params->ramrod_params.pstate,
+ ramrod_flags);
return rc;
}
-int bnx2x_setup_multi(struct bnx2x *bp, int index)
+/**
+ * Configure interrupt mode according to current configuration.
+ * In case of MSI-X it will also try to enable MSI-X.
+ *
+ * @param bp
+ *
+ * @return int
+ */
+static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
{
- struct bnx2x_fastpath *fp = &bp->fp[index];
+ int rc = 0;
- /* reset IGU state */
- bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+ switch (bp->int_mode) {
+ case INT_MODE_MSI:
+ bnx2x_enable_msi(bp);
+ /* falling through... */
+ case INT_MODE_INTx:
+ bp->num_queues = 1;
+ DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
+ break;
+ default:
+ /* Set number of queues according to bp->multi_mode value */
+ bnx2x_set_num_queues(bp);
- /* SETUP ramrod */
- fp->state = BNX2X_FP_STATE_OPENING;
- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
- fp->cl_id, 0);
+ DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
+ bp->num_queues);
- /* Wait for completion */
- return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
- &(fp->state), 0);
-}
+ /* if we can't use MSI-X we only need one fp,
+ * so try to enable MSI-X with the requested number of fp's
+ * and fallback to MSI or legacy INTx with one fp
+ */
+ rc = bnx2x_enable_msix(bp);
+ if (rc) {
+ /* failed to enable MSI-X */
+ if (bp->multi_mode)
+ DP(NETIF_MSG_IFUP,
+ "Multi requested but failed to "
+ "enable MSI-X (%d), "
+ "set number of queues to %d\n",
+ bp->num_queues,
+ 1);
+ bp->num_queues = 1;
+
+ if (!(bp->flags & DISABLE_MSI_FLAG))
+ bnx2x_enable_msi(bp);
+ }
+ break;
+ }
-void bnx2x_set_num_queues_msix(struct bnx2x *bp)
+ return rc;
+}
+
+/* must be called prioir to any HW initializations */
+static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
{
+ return L2_ILT_LINES(bp);
+}
- switch (bp->multi_mode) {
- case ETH_RSS_MODE_DISABLED:
- bp->num_queues = 1;
- break;
+void bnx2x_ilt_set_info(struct bnx2x *bp)
+{
+ struct ilt_client_info *ilt_client;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ u16 line = 0;
- case ETH_RSS_MODE_REGULAR:
- if (num_queues)
- bp->num_queues = min_t(u32, num_queues,
- BNX2X_MAX_QUEUES(bp));
- else
- bp->num_queues = min_t(u32, num_online_cpus(),
- BNX2X_MAX_QUEUES(bp));
- break;
+ ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
+ DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
+ /* CDU */
+ ilt_client = &ilt->clients[ILT_CLIENT_CDU];
+ ilt_client->client_num = ILT_CLIENT_CDU;
+ ilt_client->page_size = CDU_ILT_PAGE_SZ;
+ ilt_client->flags = ILT_CLIENT_SKIP_MEM;
+ ilt_client->start = line;
+ line += L2_ILT_LINES(bp);
+#ifdef BCM_CNIC
+ line += CNIC_ILT_LINES;
+#endif
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+ /* QM */
+ if (QM_INIT(bp->qm_cid_count)) {
+ ilt_client = &ilt->clients[ILT_CLIENT_QM];
+ ilt_client->client_num = ILT_CLIENT_QM;
+ ilt_client->page_size = QM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+
+ /* 4 bytes for each cid */
+ line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
+ QM_ILT_PAGE_SZ);
+
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+ }
+ /* SRC */
+ ilt_client = &ilt->clients[ILT_CLIENT_SRC];
+#ifdef BCM_CNIC
+ ilt_client->client_num = ILT_CLIENT_SRC;
+ ilt_client->page_size = SRC_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += SRC_ILT_LINES;
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
- default:
- bp->num_queues = 1;
- break;
- }
-}
+#else
+ ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
+#endif
+ /* TM */
+ ilt_client = &ilt->clients[ILT_CLIENT_TM];
+#ifdef BCM_CNIC
+ ilt_client->client_num = ILT_CLIENT_TM;
+ ilt_client->page_size = TM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += TM_ILT_LINES;
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+#else
+ ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
+#endif
+}
-static int bnx2x_stop_multi(struct bnx2x *bp, int index)
+int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ int is_leading)
{
- struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct bnx2x_client_init_params params = { {0} };
int rc;
- /* halt the connection */
- fp->state = BNX2X_FP_STATE_HALTING;
- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
+ IGU_INT_ENABLE, 0);
- /* Wait for completion */
- rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
- &(fp->state), 1);
- if (rc) /* timeout */
- return rc;
+ params.ramrod_params.pstate = &fp->state;
+ params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
+ params.ramrod_params.index = fp->index;
+ params.ramrod_params.cid = fp->cid;
- /* delete cfc entry */
- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
+ if (is_leading)
+ params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
- /* Wait for completion */
- rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
- &(fp->state), 1);
+ bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
+
+ bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
+
+ rc = bnx2x_setup_fw_client(bp, &params, 1,
+ bnx2x_sp(bp, client_init_data),
+ bnx2x_sp_mapping(bp, client_init_data));
return rc;
}
-static int bnx2x_stop_leading(struct bnx2x *bp)
+int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
{
- __le16 dsb_sp_prod_idx;
- /* if the other port is handling traffic,
- this can take a lot of time */
- int cnt = 500;
int rc;
- might_sleep();
+ int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
- /* Send HALT ramrod */
- bp->fp[0].state = BNX2X_FP_STATE_HALTING;
- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
+ /* halt the connection */
+ *p->pstate = BNX2X_FP_STATE_HALTING;
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
+ p->cl_id, 0);
/* Wait for completion */
- rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
- &(bp->fp[0].state), 1);
+ rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
+ p->pstate, poll_flag);
if (rc) /* timeout */
return rc;
- dsb_sp_prod_idx = *bp->dsb_sp_prod;
+ *p->pstate = BNX2X_FP_STATE_TERMINATING;
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
+ p->cl_id, 0);
+ /* Wait for completion */
+ rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
+ p->pstate, poll_flag);
+ if (rc) /* timeout */
+ return rc;
- /* Send PORT_DELETE ramrod */
- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
- /* Wait for completion to arrive on default status block
- we are going to reset the chip anyway
- so there is not much to do if this times out
- */
- while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
- if (!cnt) {
- DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
- "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
- *bp->dsb_sp_prod, dsb_sp_prod_idx);
-#ifdef BNX2X_STOP_ON_ERROR
- bnx2x_panic();
-#endif
- rc = -EBUSY;
- break;
- }
- cnt--;
- msleep(1);
- rmb(); /* Refresh the dsb_sp_prod */
- }
- bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
- bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
+ /* delete cfc entry */
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
+ /* Wait for completion */
+ rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
+ p->pstate, WAIT_RAMROD_COMMON);
return rc;
}
+static int bnx2x_stop_client(struct bnx2x *bp, int index)
+{
+ struct bnx2x_client_ramrod_params client_stop = {0};
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+
+ client_stop.index = index;
+ client_stop.cid = fp->cid;
+ client_stop.cl_id = fp->cl_id;
+ client_stop.pstate = &(fp->state);
+ client_stop.poll = 0;
+
+ return bnx2x_stop_fw_client(bp, &client_stop);
+}
+
+
static void bnx2x_reset_func(struct bnx2x *bp)
{
int port = BP_PORT(bp);
int func = BP_FUNC(bp);
- int base, i;
+ int i;
+ int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
+ (CHIP_IS_E2(bp) ?
+ offsetof(struct hc_status_block_data_e2, common) :
+ offsetof(struct hc_status_block_data_e1x, common));
+ int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
+ int pfid_offset = offsetof(struct pci_entity, pf_id);
+
+ /* Disable the function in the FW */
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
+
+ /* FP SBs */
+ for_each_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ REG_WR8(bp,
+ BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
+ + pfunc_offset_fp + pfid_offset,
+ HC_FUNCTION_DISABLED);
+ }
+
+ /* SP SB */
+ REG_WR8(bp,
+ BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+ pfunc_offset_sp + pfid_offset,
+ HC_FUNCTION_DISABLED);
+
+
+ for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
+ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
+ 0);
/* Configure IGU */
- REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
- REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ } else {
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ }
#ifdef BCM_CNIC
/* Disable Timer scan */
@@ -5120,9 +6712,27 @@ static void bnx2x_reset_func(struct bnx2x *bp)
}
#endif
/* Clear ILT */
- base = FUNC_ILT_BASE(func);
- for (i = base; i < base + ILT_PER_FUNC; i++)
- bnx2x_ilt_wr(bp, i, 0);
+ bnx2x_clear_func_ilt(bp, func);
+
+ /* Timers workaround bug for E2: if this is vnic-3,
+ * we need to set the entire ilt range for this timers.
+ */
+ if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
+ struct ilt_client_info ilt_cli;
+ /* use dummy TM client */
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+ ilt_cli.start = 0;
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+ ilt_cli.client_num = ILT_CLIENT_TM;
+
+ bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
+ }
+
+ /* this assumes that reset_port() called before reset_func()*/
+ if (CHIP_IS_E2(bp))
+ bnx2x_pf_disable(bp);
+
+ bp->dmae_ready = 0;
}
static void bnx2x_reset_port(struct bnx2x *bp)
@@ -5154,7 +6764,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
{
DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
- BP_FUNC(bp), reset_code);
+ BP_ABS_FUNC(bp), reset_code);
switch (reset_code) {
case FW_MSG_CODE_DRV_UNLOAD_COMMON:
@@ -5191,7 +6801,6 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
cnt = 1000;
while (bnx2x_has_tx_work_unload(fp)) {
- bnx2x_tx_int(fp);
if (!cnt) {
BNX2X_ERR("timeout waiting for queue[%d]\n",
i);
@@ -5210,39 +6819,21 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
msleep(1);
if (CHIP_IS_E1(bp)) {
- struct mac_configuration_cmd *config =
- bnx2x_sp(bp, mcast_config);
-
- bnx2x_set_eth_mac_addr_e1(bp, 0);
-
- for (i = 0; i < config->hdr.length; i++)
- CAM_INVALIDATE(config->config_table[i]);
-
- config->hdr.length = i;
- if (CHIP_REV_IS_SLOW(bp))
- config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
- else
- config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
- config->hdr.client_id = bp->fp->cl_id;
- config->hdr.reserved1 = 0;
-
- bp->set_mac_pending++;
- smp_wmb();
-
- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
- U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
- U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
+ /* invalidate mc list,
+ * wait and poll (interrupts are off)
+ */
+ bnx2x_invlidate_e1_mc_list(bp);
+ bnx2x_set_eth_mac(bp, 0);
- } else { /* E1H */
+ } else {
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
- bnx2x_set_eth_mac_addr_e1h(bp, 0);
+ bnx2x_set_eth_mac(bp, 0);
for (i = 0; i < MC_HASH_SIZE; i++)
REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
-
- REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
}
+
#ifdef BCM_CNIC
/* Clear iSCSI L2 MAC */
mutex_lock(&bp->cnic_mutex);
@@ -5281,33 +6872,44 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
/* Close multi and leading connections
Completions for ramrods are collected in a synchronous way */
- for_each_nondefault_queue(bp, i)
- if (bnx2x_stop_multi(bp, i))
+ for_each_queue(bp, i)
+
+ if (bnx2x_stop_client(bp, i))
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#else
goto unload_error;
+#endif
- rc = bnx2x_stop_leading(bp);
+ rc = bnx2x_func_stop(bp);
if (rc) {
- BNX2X_ERR("Stop leading failed!\n");
+ BNX2X_ERR("Function stop failed!\n");
#ifdef BNX2X_STOP_ON_ERROR
- return -EBUSY;
+ return;
#else
goto unload_error;
#endif
}
-
+#ifndef BNX2X_STOP_ON_ERROR
unload_error:
+#endif
if (!BP_NOMCP(bp))
- reset_code = bnx2x_fw_command(bp, reset_code);
+ reset_code = bnx2x_fw_command(bp, reset_code, 0);
else {
- DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
- load_count[0], load_count[1], load_count[2]);
- load_count[0]--;
- load_count[1 + port]--;
- DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
- load_count[0], load_count[1], load_count[2]);
- if (load_count[0] == 0)
+ DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
+ "%d, %d, %d\n", BP_PATH(bp),
+ load_count[BP_PATH(bp)][0],
+ load_count[BP_PATH(bp)][1],
+ load_count[BP_PATH(bp)][2]);
+ load_count[BP_PATH(bp)][0]--;
+ load_count[BP_PATH(bp)][1 + port]--;
+ DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
+ "%d, %d, %d\n", BP_PATH(bp),
+ load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
+ load_count[BP_PATH(bp)][2]);
+ if (load_count[BP_PATH(bp)][0] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
- else if (load_count[1 + port] == 0)
+ else if (load_count[BP_PATH(bp)][1 + port] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
else
reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -5317,12 +6919,18 @@ unload_error:
(reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
bnx2x__link_reset(bp);
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 1);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+
/* Reset the chip */
bnx2x_reset_chip(bp, reset_code);
/* Report UNLOAD_DONE to MCP */
if (!BP_NOMCP(bp))
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
}
@@ -5348,7 +6956,6 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp)
}
}
-
/* Close gates #2, #3 and #4: */
static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
{
@@ -5394,15 +7001,13 @@ static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
{
/* Restore the `magic' bit value... */
- /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
- SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
- (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
MF_CFG_WR(bp, shared_mf_config.clp_mb,
(val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
}
-/* Prepares for MCP reset: takes care of CLP configurations.
+/**
+ * Prepares for MCP reset: takes care of CLP configurations.
*
* @param bp
* @param magic_val Old value of 'magic' bit.
@@ -5800,39 +7405,23 @@ reset_task_exit:
* Init service functions
*/
-static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
-{
- switch (func) {
- case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
- case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
- case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
- case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
- case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
- case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
- case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
- case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
- default:
- BNX2X_ERR("Unsupported function index: %d\n", func);
- return (u32)(-1);
- }
+u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+{
+ u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
+ u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
+ return base + (BP_ABS_FUNC(bp)) * stride;
}
-static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
+static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
{
- u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
+ u32 reg = bnx2x_get_pretend_reg(bp);
/* Flush all outstanding writes */
mmiowb();
/* Pretend to be function 0 */
REG_WR(bp, reg, 0);
- /* Flush the GRC transaction (in the chip) */
- new_val = REG_RD(bp, reg);
- if (new_val != 0) {
- BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
- new_val);
- BUG();
- }
+ REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
/* From now we are in the "like-E1" mode */
bnx2x_int_disable(bp);
@@ -5840,22 +7429,17 @@ static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
/* Flush all outstanding writes */
mmiowb();
- /* Restore the original funtion settings */
- REG_WR(bp, reg, orig_func);
- new_val = REG_RD(bp, reg);
- if (new_val != orig_func) {
- BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
- orig_func, new_val);
- BUG();
- }
+ /* Restore the original function */
+ REG_WR(bp, reg, BP_ABS_FUNC(bp));
+ REG_RD(bp, reg);
}
-static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
+static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
{
- if (CHIP_IS_E1H(bp))
- bnx2x_undi_int_disable_e1h(bp, func);
- else
+ if (CHIP_IS_E1(bp))
bnx2x_int_disable(bp);
+ else
+ bnx2x_undi_int_disable_e1h(bp);
}
static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
@@ -5872,8 +7456,8 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
if (val == 0x7) {
u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
- /* save our func */
- int func = BP_FUNC(bp);
+ /* save our pf_num */
+ int orig_pf_num = bp->pf_num;
u32 swap_en;
u32 swap_val;
@@ -5883,32 +7467,33 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
BNX2X_DEV_INFO("UNDI is active! reset device\n");
/* try unload UNDI on port 0 */
- bp->func = 0;
+ bp->pf_num = 0;
bp->fw_seq =
- (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
- reset_code = bnx2x_fw_command(bp, reset_code);
+ reset_code = bnx2x_fw_command(bp, reset_code, 0);
/* if UNDI is loaded on the other port */
if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
/* send "DONE" for previous unload */
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+ bnx2x_fw_command(bp,
+ DRV_MSG_CODE_UNLOAD_DONE, 0);
/* unload UNDI on port 1 */
- bp->func = 1;
+ bp->pf_num = 1;
bp->fw_seq =
- (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
- bnx2x_fw_command(bp, reset_code);
+ bnx2x_fw_command(bp, reset_code, 0);
}
/* now it's safe to release the lock */
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
- bnx2x_undi_int_disable(bp, func);
+ bnx2x_undi_int_disable(bp);
/* close input traffic and wait for it */
/* Do not rcv packets to BRB */
@@ -5944,14 +7529,13 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
/* send unload done to the MCP */
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
/* restore our func and fw_seq */
- bp->func = func;
+ bp->pf_num = orig_pf_num;
bp->fw_seq =
- (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
-
} else
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
}
@@ -5973,6 +7557,40 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
val = REG_RD(bp, MISC_REG_BOND_ID);
id |= (val & 0xf);
bp->common.chip_id = id;
+
+ /* Set doorbell size */
+ bp->db_size = (1 << BNX2X_DB_SHIFT);
+
+ if (CHIP_IS_E2(bp)) {
+ val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+ if ((val & 1) == 0)
+ val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
+ else
+ val = (val >> 1) & 1;
+ BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
+ "2_PORT_MODE");
+ bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
+ CHIP_2_PORT_MODE;
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ bp->pfid = (bp->pf_num >> 1); /* 0..3 */
+ else
+ bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
+ } else {
+ bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
+ bp->pfid = bp->pf_num; /* 0..7 */
+ }
+
+ /*
+ * set base FW non-default (fast path) status block id, this value is
+ * used to initialize the fw_sb_id saved on the fp/queue structure to
+ * determine the id used by the FW.
+ */
+ if (CHIP_IS_E1x(bp))
+ bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
+ else /* E2 */
+ bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
+
bp->link_params.chip_id = bp->common.chip_id;
BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
@@ -5990,14 +7608,15 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->common.flash_size, bp->common.flash_size);
bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
- bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
+ bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
+ MISC_REG_GENERIC_CR_1 :
+ MISC_REG_GENERIC_CR_0));
bp->link_params.shmem_base = bp->common.shmem_base;
+ bp->link_params.shmem2_base = bp->common.shmem2_base;
BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
bp->common.shmem_base, bp->common.shmem2_base);
- if (!bp->common.shmem_base ||
- (bp->common.shmem_base < 0xA0000) ||
- (bp->common.shmem_base >= 0xC0000)) {
+ if (!bp->common.shmem_base) {
BNX2X_DEV_INFO("MCP not active\n");
bp->flags |= NO_MCP_FLAG;
return;
@@ -6006,7 +7625,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
- BNX2X_ERROR("BAD MCP validity signature\n");
+ BNX2X_ERR("BAD MCP validity signature\n");
bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
@@ -6030,12 +7649,16 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
if (val < BNX2X_BC_VER) {
/* for now only warn
* later we might need to enforce this */
- BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
- "please upgrade BC\n", BNX2X_BC_VER, val);
+ BNX2X_ERR("This driver needs bc_ver %X but found %X, "
+ "please upgrade BC\n", BNX2X_BC_VER, val);
}
bp->link_params.feature_config_flags |=
- (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
- FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
+ (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
+ FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
+
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
+ FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
if (BP_E1HVN(bp) == 0) {
pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
@@ -6056,404 +7679,348 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
val, val2, val3, val4);
}
+#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
+#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
+
+static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
+{
+ int pfid = BP_FUNC(bp);
+ int vn = BP_E1HVN(bp);
+ int igu_sb_id;
+ u32 val;
+ u8 fid;
+
+ bp->igu_base_sb = 0xff;
+ bp->igu_sb_cnt = 0;
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
+ bp->l2_cid_count);
+
+ bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
+ FP_SB_MAX_E1x;
+
+ bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
+ (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
+
+ return;
+ }
+
+ /* IGU in normal mode - read CAM */
+ for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
+ igu_sb_id++) {
+ val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
+ if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
+ continue;
+ fid = IGU_FID(val);
+ if ((fid & IGU_FID_ENCODE_IS_PF)) {
+ if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
+ continue;
+ if (IGU_VEC(val) == 0)
+ /* default status block */
+ bp->igu_dsb_id = igu_sb_id;
+ else {
+ if (bp->igu_base_sb == 0xff)
+ bp->igu_base_sb = igu_sb_id;
+ bp->igu_sb_cnt++;
+ }
+ }
+ }
+ bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
+ if (bp->igu_sb_cnt == 0)
+ BNX2X_ERR("CAM configuration error\n");
+}
+
static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
u32 switch_cfg)
{
- int port = BP_PORT(bp);
- u32 ext_phy_type;
-
- switch (switch_cfg) {
- case SWITCH_CFG_1G:
- BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
-
- ext_phy_type =
- SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_2500baseX_Full |
- SUPPORTED_TP |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
+ int cfg_size = 0, idx, port = BP_PORT(bp);
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_TP |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
+ /* Aggregation of supported attributes of all external phys */
+ bp->port.supported[0] = 0;
+ bp->port.supported[1] = 0;
+ switch (bp->link_params.num_phys) {
+ case 1:
+ bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
+ cfg_size = 1;
+ break;
+ case 2:
+ bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
+ cfg_size = 1;
+ break;
+ case 3:
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ bp->port.supported[1] =
+ bp->link_params.phy[EXT_PHY1].supported;
+ bp->port.supported[0] =
+ bp->link_params.phy[EXT_PHY2].supported;
+ } else {
+ bp->port.supported[0] =
+ bp->link_params.phy[EXT_PHY1].supported;
+ bp->port.supported[1] =
+ bp->link_params.phy[EXT_PHY2].supported;
+ }
+ cfg_size = 2;
+ break;
+ }
- default:
- BNX2X_ERR("NVRAM config error. "
- "BAD SerDes ext_phy_config 0x%x\n",
- bp->link_params.ext_phy_config);
+ if (!(bp->port.supported[0] || bp->port.supported[1])) {
+ BNX2X_ERR("NVRAM config error. BAD phy config."
+ "PHY1 config 0x%x, PHY2 config 0x%x\n",
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config),
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config2));
return;
- }
+ }
+ switch (switch_cfg) {
+ case SWITCH_CFG_1G:
bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
port*0x10);
BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
break;
case SWITCH_CFG_10G:
- BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
-
- ext_phy_type =
- XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_2500baseX_Full |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_TP |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_2500baseX_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_FIBRE |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_FIBRE |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_TP |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_TP |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
- BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
- bp->link_params.ext_phy_config);
- break;
-
- default:
- BNX2X_ERR("NVRAM config error. "
- "BAD XGXS ext_phy_config 0x%x\n",
- bp->link_params.ext_phy_config);
- return;
- }
-
bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
port*0x18);
BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
-
break;
default:
BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
- bp->port.link_config);
+ bp->port.link_config[0]);
return;
}
- bp->link_params.phy_addr = bp->port.phy_addr;
-
- /* mask what we support according to speed_cap_mask */
- if (!(bp->link_params.speed_cap_mask &
+ /* mask what we support according to speed_cap_mask per configuration */
+ for (idx = 0; idx < cfg_size; idx++) {
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
- bp->port.supported &= ~SUPPORTED_10baseT_Half;
+ bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
- if (!(bp->link_params.speed_cap_mask &
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
- bp->port.supported &= ~SUPPORTED_10baseT_Full;
+ bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
- if (!(bp->link_params.speed_cap_mask &
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
- bp->port.supported &= ~SUPPORTED_100baseT_Half;
+ bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
- if (!(bp->link_params.speed_cap_mask &
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
- bp->port.supported &= ~SUPPORTED_100baseT_Full;
+ bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
- if (!(bp->link_params.speed_cap_mask &
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
- bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full);
+ bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
- if (!(bp->link_params.speed_cap_mask &
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
- bp->port.supported &= ~SUPPORTED_2500baseX_Full;
+ bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
- if (!(bp->link_params.speed_cap_mask &
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
- bp->port.supported &= ~SUPPORTED_10000baseT_Full;
+ bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
- BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
+ }
+
+ BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
+ bp->port.supported[1]);
}
static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
{
- bp->link_params.req_duplex = DUPLEX_FULL;
+ u32 link_config, idx, cfg_size = 0;
+ bp->port.advertising[0] = 0;
+ bp->port.advertising[1] = 0;
+ switch (bp->link_params.num_phys) {
+ case 1:
+ case 2:
+ cfg_size = 1;
+ break;
+ case 3:
+ cfg_size = 2;
+ break;
+ }
+ for (idx = 0; idx < cfg_size; idx++) {
+ bp->link_params.req_duplex[idx] = DUPLEX_FULL;
+ link_config = bp->port.link_config[idx];
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ case PORT_FEATURE_LINK_SPEED_AUTO:
+ if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_AUTO_NEG;
+ bp->port.advertising[idx] |=
+ bp->port.supported[idx];
+ } else {
+ /* force 10G, no AN */
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ continue;
+ }
+ break;
- switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
- case PORT_FEATURE_LINK_SPEED_AUTO:
- if (bp->port.supported & SUPPORTED_Autoneg) {
- bp->link_params.req_line_speed = SPEED_AUTO_NEG;
- bp->port.advertising = bp->port.supported;
- } else {
- u32 ext_phy_type =
- XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+ case PORT_FEATURE_LINK_SPEED_10M_FULL:
+ if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
- if ((ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
- (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
- /* force 10G, no AN */
- bp->link_params.req_line_speed = SPEED_10000;
- bp->port.advertising =
- (ADVERTISED_10000baseT_Full |
- ADVERTISED_FIBRE);
- break;
+ case PORT_FEATURE_LINK_SPEED_10M_HALF:
+ if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10;
+ bp->link_params.req_duplex[idx] =
+ DUPLEX_HALF;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Half |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
}
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " Autoneg not supported\n",
- bp->port.link_config);
- return;
- }
- break;
+ break;
- case PORT_FEATURE_LINK_SPEED_10M_FULL:
- if (bp->port.supported & SUPPORTED_10baseT_Full) {
- bp->link_params.req_line_speed = SPEED_10;
- bp->port.advertising = (ADVERTISED_10baseT_Full |
- ADVERTISED_TP);
- } else {
- BNX2X_ERROR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
- return;
- }
- break;
+ case PORT_FEATURE_LINK_SPEED_100M_FULL:
+ if (bp->port.supported[idx] &
+ SUPPORTED_100baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_100;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
- case PORT_FEATURE_LINK_SPEED_10M_HALF:
- if (bp->port.supported & SUPPORTED_10baseT_Half) {
- bp->link_params.req_line_speed = SPEED_10;
- bp->link_params.req_duplex = DUPLEX_HALF;
- bp->port.advertising = (ADVERTISED_10baseT_Half |
- ADVERTISED_TP);
- } else {
- BNX2X_ERROR("NVRAM config error. "
+ case PORT_FEATURE_LINK_SPEED_100M_HALF:
+ if (bp->port.supported[idx] &
+ SUPPORTED_100baseT_Half) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_100;
+ bp->link_params.req_duplex[idx] =
+ DUPLEX_HALF;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Half |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERROR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
- return;
- }
- break;
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
- case PORT_FEATURE_LINK_SPEED_100M_FULL:
- if (bp->port.supported & SUPPORTED_100baseT_Full) {
- bp->link_params.req_line_speed = SPEED_100;
- bp->port.advertising = (ADVERTISED_100baseT_Full |
- ADVERTISED_TP);
- } else {
- BNX2X_ERROR("NVRAM config error. "
+ case PORT_FEATURE_LINK_SPEED_1G:
+ if (bp->port.supported[idx] &
+ SUPPORTED_1000baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_1000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_1000baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERROR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
- return;
- }
- break;
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
- case PORT_FEATURE_LINK_SPEED_100M_HALF:
- if (bp->port.supported & SUPPORTED_100baseT_Half) {
- bp->link_params.req_line_speed = SPEED_100;
- bp->link_params.req_duplex = DUPLEX_HALF;
- bp->port.advertising = (ADVERTISED_100baseT_Half |
+ case PORT_FEATURE_LINK_SPEED_2_5G:
+ if (bp->port.supported[idx] &
+ SUPPORTED_2500baseX_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_2500;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_2500baseX_Full |
ADVERTISED_TP);
- } else {
- BNX2X_ERROR("NVRAM config error. "
+ } else {
+ BNX2X_ERROR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
- return;
- }
- break;
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
- case PORT_FEATURE_LINK_SPEED_1G:
- if (bp->port.supported & SUPPORTED_1000baseT_Full) {
- bp->link_params.req_line_speed = SPEED_1000;
- bp->port.advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_TP);
- } else {
- BNX2X_ERROR("NVRAM config error. "
+ case PORT_FEATURE_LINK_SPEED_10G_CX4:
+ case PORT_FEATURE_LINK_SPEED_10G_KX4:
+ case PORT_FEATURE_LINK_SPEED_10G_KR:
+ if (bp->port.supported[idx] &
+ SUPPORTED_10000baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ } else {
+ BNX2X_ERROR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
- return;
- }
- break;
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
- case PORT_FEATURE_LINK_SPEED_2_5G:
- if (bp->port.supported & SUPPORTED_2500baseX_Full) {
- bp->link_params.req_line_speed = SPEED_2500;
- bp->port.advertising = (ADVERTISED_2500baseX_Full |
- ADVERTISED_TP);
- } else {
+ default:
BNX2X_ERROR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
- return;
+ "BAD link speed link_config 0x%x\n",
+ link_config);
+ bp->link_params.req_line_speed[idx] =
+ SPEED_AUTO_NEG;
+ bp->port.advertising[idx] =
+ bp->port.supported[idx];
+ break;
}
- break;
- case PORT_FEATURE_LINK_SPEED_10G_CX4:
- case PORT_FEATURE_LINK_SPEED_10G_KX4:
- case PORT_FEATURE_LINK_SPEED_10G_KR:
- if (bp->port.supported & SUPPORTED_10000baseT_Full) {
- bp->link_params.req_line_speed = SPEED_10000;
- bp->port.advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_FIBRE);
- } else {
- BNX2X_ERROR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
- return;
+ bp->link_params.req_flow_ctrl[idx] = (link_config &
+ PORT_FEATURE_FLOW_CONTROL_MASK);
+ if ((bp->link_params.req_flow_ctrl[idx] ==
+ BNX2X_FLOW_CTRL_AUTO) &&
+ !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
+ bp->link_params.req_flow_ctrl[idx] =
+ BNX2X_FLOW_CTRL_NONE;
}
- break;
- default:
- BNX2X_ERROR("NVRAM config error. "
- "BAD link speed link_config 0x%x\n",
- bp->port.link_config);
- bp->link_params.req_line_speed = SPEED_AUTO_NEG;
- bp->port.advertising = bp->port.supported;
- break;
+ BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
+ " 0x%x advertising 0x%x\n",
+ bp->link_params.req_line_speed[idx],
+ bp->link_params.req_duplex[idx],
+ bp->link_params.req_flow_ctrl[idx],
+ bp->port.advertising[idx]);
}
-
- bp->link_params.req_flow_ctrl = (bp->port.link_config &
- PORT_FEATURE_FLOW_CONTROL_MASK);
- if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
- !(bp->port.supported & SUPPORTED_Autoneg))
- bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-
- BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
- " advertising 0x%x\n",
- bp->link_params.req_line_speed,
- bp->link_params.req_duplex,
- bp->link_params.req_flow_ctrl, bp->port.advertising);
}
static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
@@ -6469,48 +8036,28 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
int port = BP_PORT(bp);
u32 val, val2;
u32 config;
- u16 i;
- u32 ext_phy_type;
+ u32 ext_phy_type, ext_phy_config;;
bp->link_params.bp = bp;
bp->link_params.port = port;
bp->link_params.lane_config =
SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
- bp->link_params.ext_phy_config =
- SHMEM_RD(bp,
- dev_info.port_hw_config[port].external_phy_config);
- /* BCM8727_NOC => BCM8727 no over current */
- if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
- bp->link_params.ext_phy_config &=
- ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
- bp->link_params.ext_phy_config |=
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
- bp->link_params.feature_config_flags |=
- FEATURE_CONFIG_BCM8727_NOC;
- }
- bp->link_params.speed_cap_mask =
+ bp->link_params.speed_cap_mask[0] =
SHMEM_RD(bp,
dev_info.port_hw_config[port].speed_capability_mask);
-
- bp->port.link_config =
+ bp->link_params.speed_cap_mask[1] =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].speed_capability_mask2);
+ bp->port.link_config[0] =
SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
- /* Get the 4 lanes xgxs config rx and tx */
- for (i = 0; i < 2; i++) {
- val = SHMEM_RD(bp,
- dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
- bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
- bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
-
- val = SHMEM_RD(bp,
- dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
- bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
- bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
- }
+ bp->port.link_config[1] =
+ SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
+ bp->link_params.multi_phy_config =
+ SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
/* If the device is capable of WoL, set the default state according
* to the HW
*/
@@ -6518,14 +8065,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
(config & PORT_FEATURE_WOL_ENABLED));
- BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
- " speed_cap_mask 0x%08x link_config 0x%08x\n",
+ BNX2X_DEV_INFO("lane_config 0x%08x "
+ "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
bp->link_params.lane_config,
- bp->link_params.ext_phy_config,
- bp->link_params.speed_cap_mask, bp->port.link_config);
+ bp->link_params.speed_cap_mask[0],
+ bp->port.link_config[0]);
- bp->link_params.switch_cfg |= (bp->port.link_config &
- PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ bp->link_params.switch_cfg = (bp->port.link_config[0] &
+ PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ bnx2x_phy_probe(&bp->link_params);
bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
bnx2x_link_settings_requested(bp);
@@ -6534,14 +8082,17 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
* If connected directly, work with the internal PHY, otherwise, work
* with the external PHY
*/
- ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+ ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
- bp->mdio.prtad = bp->link_params.phy_addr;
+ bp->mdio.prtad = bp->port.phy_addr;
else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
(ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
bp->mdio.prtad =
- XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
+ XGXS_EXT_PHY_ADDR(ext_phy_config);
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
@@ -6558,41 +8109,74 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
{
- int func = BP_FUNC(bp);
+ int func = BP_ABS_FUNC(bp);
+ int vn;
u32 val, val2;
int rc = 0;
bnx2x_get_common_hwinfo(bp);
- bp->e1hov = 0;
- bp->e1hmf = 0;
- if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
- bp->mf_config =
- SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
+ if (CHIP_IS_E1x(bp)) {
+ bp->common.int_block = INT_BLOCK_HC;
+
+ bp->igu_dsb_id = DEF_SB_IGU_ID;
+ bp->igu_base_sb = 0;
+ bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
+ } else {
+ bp->common.int_block = INT_BLOCK_IGU;
+ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+ if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+ DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
+ bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
+ } else
+ DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
+
+ bnx2x_get_igu_cam_info(bp);
- val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
+ }
+ DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
+ bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
+
+ /*
+ * Initialize MF configuration
+ */
+
+ bp->mf_ov = 0;
+ bp->mf_mode = 0;
+ vn = BP_E1HVN(bp);
+ if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
+ if (SHMEM2_HAS(bp, mf_cfg_addr))
+ bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
+ else
+ bp->common.mf_cfg_base = bp->common.shmem_base +
+ offsetof(struct shmem_region, func_mb) +
+ E1H_FUNC_MAX * sizeof(struct drv_func_mb);
+ bp->mf_config[vn] =
+ MF_CFG_RD(bp, func_mf_config[func].config);
+
+ val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
FUNC_MF_CFG_E1HOV_TAG_MASK);
if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
- bp->e1hmf = 1;
+ bp->mf_mode = 1;
BNX2X_DEV_INFO("%s function mode\n",
- IS_E1HMF(bp) ? "multi" : "single");
+ IS_MF(bp) ? "multi" : "single");
- if (IS_E1HMF(bp)) {
- val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
+ if (IS_MF(bp)) {
+ val = (MF_CFG_RD(bp, func_mf_config[func].
e1hov_tag) &
FUNC_MF_CFG_E1HOV_TAG_MASK);
if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
- bp->e1hov = val;
- BNX2X_DEV_INFO("E1HOV for func %d is %d "
+ bp->mf_ov = val;
+ BNX2X_DEV_INFO("MF OV for func %d is %d "
"(0x%04x)\n",
- func, bp->e1hov, bp->e1hov);
+ func, bp->mf_ov, bp->mf_ov);
} else {
- BNX2X_ERROR("No valid E1HOV for func %d,"
+ BNX2X_ERROR("No valid MF OV for func %d,"
" aborting\n", func);
rc = -EPERM;
}
} else {
- if (BP_E1HVN(bp)) {
+ if (BP_VN(bp)) {
BNX2X_ERROR("VN %d in single function mode,"
" aborting\n", BP_E1HVN(bp));
rc = -EPERM;
@@ -6600,17 +8184,31 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
}
}
+ /* adjust igu_sb_cnt to MF for E1x */
+ if (CHIP_IS_E1x(bp) && IS_MF(bp))
+ bp->igu_sb_cnt /= E1HVN_MAX;
+
+ /*
+ * adjust E2 sb count: to be removed when FW will support
+ * more then 16 L2 clients
+ */
+#define MAX_L2_CLIENTS 16
+ if (CHIP_IS_E2(bp))
+ bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
+ MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
+
if (!BP_NOMCP(bp)) {
bnx2x_get_port_hwinfo(bp);
- bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
}
- if (IS_E1HMF(bp)) {
- val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
- val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
+ if (IS_MF(bp)) {
+ val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+ val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
(val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
@@ -6704,7 +8302,7 @@ out_not_found:
static int __devinit bnx2x_init_bp(struct bnx2x *bp)
{
- int func = BP_FUNC(bp);
+ int func;
int timer_interval;
int rc;
@@ -6724,7 +8322,13 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
rc = bnx2x_get_hwinfo(bp);
+ if (!rc)
+ rc = bnx2x_alloc_mem_bp(bp);
+
bnx2x_read_fwinfo(bp);
+
+ func = BP_FUNC(bp);
+
/* need to reset chip if undi was active */
if (!BP_NOMCP(bp))
bnx2x_undi_unload(bp);
@@ -6766,13 +8370,12 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->mrrs = mrrs;
bp->tx_ring_size = MAX_TX_AVAIL;
- bp->rx_ring_size = MAX_RX_AVAIL;
bp->rx_csum = 1;
/* make sure that the numbers are in the right granularity */
- bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
- bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
+ bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
+ bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
bp->current_interval = (poll ? poll : timer_interval);
@@ -6864,81 +8467,22 @@ void bnx2x_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
rx_mode = BNX2X_RX_MODE_PROMISC;
-
else if ((dev->flags & IFF_ALLMULTI) ||
((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
CHIP_IS_E1(bp)))
rx_mode = BNX2X_RX_MODE_ALLMULTI;
-
else { /* some multicasts */
if (CHIP_IS_E1(bp)) {
- int i, old, offset;
- struct netdev_hw_addr *ha;
- struct mac_configuration_cmd *config =
- bnx2x_sp(bp, mcast_config);
-
- i = 0;
- netdev_for_each_mc_addr(ha, dev) {
- config->config_table[i].
- cam_entry.msb_mac_addr =
- swab16(*(u16 *)&ha->addr[0]);
- config->config_table[i].
- cam_entry.middle_mac_addr =
- swab16(*(u16 *)&ha->addr[2]);
- config->config_table[i].
- cam_entry.lsb_mac_addr =
- swab16(*(u16 *)&ha->addr[4]);
- config->config_table[i].cam_entry.flags =
- cpu_to_le16(port);
- config->config_table[i].
- target_table_entry.flags = 0;
- config->config_table[i].target_table_entry.
- clients_bit_vector =
- cpu_to_le32(1 << BP_L_ID(bp));
- config->config_table[i].
- target_table_entry.vlan_id = 0;
-
- DP(NETIF_MSG_IFUP,
- "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
- config->config_table[i].
- cam_entry.msb_mac_addr,
- config->config_table[i].
- cam_entry.middle_mac_addr,
- config->config_table[i].
- cam_entry.lsb_mac_addr);
- i++;
- }
- old = config->hdr.length;
- if (old > i) {
- for (; i < old; i++) {
- if (CAM_IS_INVALID(config->
- config_table[i])) {
- /* already invalidated */
- break;
- }
- /* invalidate */
- CAM_INVALIDATE(config->
- config_table[i]);
- }
- }
-
- if (CHIP_REV_IS_SLOW(bp))
- offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
- else
- offset = BNX2X_MAX_MULTICAST*(1 + port);
-
- config->hdr.length = i;
- config->hdr.offset = offset;
- config->hdr.client_id = bp->fp->cl_id;
- config->hdr.reserved1 = 0;
-
- bp->set_mac_pending++;
- smp_wmb();
+ /*
+ * set mc list, do not wait as wait implies sleep
+ * and set_rx_mode can be invoked from non-sleepable
+ * context
+ */
+ u8 offset = (CHIP_REV_IS_SLOW(bp) ?
+ BNX2X_MAX_EMUL_MULTI*(1 + port) :
+ BNX2X_MAX_MULTICAST*(1 + port));
- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
- U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
- U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
- 0);
+ bnx2x_set_e1_mc_list(bp, offset);
} else { /* E1H */
/* Accept one or more multicasts */
struct netdev_hw_addr *ha;
@@ -6950,9 +8494,10 @@ void bnx2x_set_rx_mode(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
- ha->addr);
+ bnx2x_mc_addr(ha));
- crc = crc32c_le(0, ha->addr, ETH_ALEN);
+ crc = crc32c_le(0, bnx2x_mc_addr(ha),
+ ETH_ALEN);
bit = (crc >> 24) & 0xff;
regidx = bit >> 5;
bit &= 0x1f;
@@ -6969,7 +8514,6 @@ void bnx2x_set_rx_mode(struct net_device *dev)
bnx2x_set_storm_rx_mode(bp);
}
-
/* called with rtnl_lock */
static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
int devad, u16 addr)
@@ -6977,23 +8521,15 @@ static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
struct bnx2x *bp = netdev_priv(netdev);
u16 value;
int rc;
- u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
prtad, devad, addr);
- if (prtad != bp->mdio.prtad) {
- DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
- prtad, bp->mdio.prtad);
- return -EINVAL;
- }
-
/* The HW expects different devad if CL22 is used */
devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
bnx2x_acquire_phy_lock(bp);
- rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
- devad, addr, &value);
+ rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
bnx2x_release_phy_lock(bp);
DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
@@ -7007,24 +8543,16 @@ static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
u16 addr, u16 value)
{
struct bnx2x *bp = netdev_priv(netdev);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
int rc;
DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
" value 0x%x\n", prtad, devad, addr, value);
- if (prtad != bp->mdio.prtad) {
- DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
- prtad, bp->mdio.prtad);
- return -EINVAL;
- }
-
/* The HW expects different devad if CL22 is used */
devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
bnx2x_acquire_phy_lock(bp);
- rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
- devad, addr, value);
+ rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
bnx2x_release_phy_lock(bp);
return rc;
}
@@ -7085,7 +8613,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->dev = dev;
bp->pdev = pdev;
bp->flags = 0;
- bp->func = PCI_FUNC(pdev->devfn);
+ bp->pf_num = PCI_FUNC(pdev->devfn);
rc = pci_enable_device(pdev);
if (rc) {
@@ -7167,7 +8695,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
}
bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
- min_t(u64, BNX2X_DB_SIZE,
+ min_t(u64, BNX2X_DB_SIZE(bp),
pci_resource_len(pdev, 2)));
if (!bp->doorbells) {
dev_err(&bp->pdev->dev,
@@ -7254,7 +8782,7 @@ static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
*speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
}
-static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
+static int bnx2x_check_firmware(struct bnx2x *bp)
{
const struct firmware *firmware = bp->firmware;
struct bnx2x_fw_file_hdr *fw_hdr;
@@ -7343,6 +8871,30 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
}
}
+/**
+ * IRO array is stored in the following format:
+ * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
+ */
+static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be32 *source = (const __be32 *)_source;
+ struct iro *target = (struct iro *)_target;
+ u32 i, j, tmp;
+
+ for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
+ target[i].base = be32_to_cpu(source[j]);
+ j++;
+ tmp = be32_to_cpu(source[j]);
+ target[i].m1 = (tmp >> 16) & 0xffff;
+ target[i].m2 = tmp & 0xffff;
+ j++;
+ tmp = be32_to_cpu(source[j]);
+ target[i].m3 = (tmp >> 16) & 0xffff;
+ target[i].size = tmp & 0xffff;
+ j++;
+ }
+}
+
static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
{
const __be16 *source = (const __be16 *)_source;
@@ -7365,7 +8917,7 @@ do { \
(u8 *)bp->arr, len); \
} while (0)
-static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
+int bnx2x_init_firmware(struct bnx2x *bp)
{
const char *fw_file_name;
struct bnx2x_fw_file_hdr *fw_hdr;
@@ -7375,22 +8927,24 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
fw_file_name = FW_FILE_NAME_E1;
else if (CHIP_IS_E1H(bp))
fw_file_name = FW_FILE_NAME_E1H;
+ else if (CHIP_IS_E2(bp))
+ fw_file_name = FW_FILE_NAME_E2;
else {
- dev_err(dev, "Unsupported chip revision\n");
+ BNX2X_ERR("Unsupported chip revision\n");
return -EINVAL;
}
- dev_info(dev, "Loading %s\n", fw_file_name);
+ BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
- rc = request_firmware(&bp->firmware, fw_file_name, dev);
+ rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
if (rc) {
- dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
+ BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
rc = bnx2x_check_firmware(bp);
if (rc) {
- dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
+ BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
@@ -7424,9 +8978,13 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
be32_to_cpu(fw_hdr->csem_int_table_data.offset);
INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
be32_to_cpu(fw_hdr->csem_pram_data.offset);
+ /* IRO */
+ BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
return 0;
+iro_alloc_err:
+ kfree(bp->init_ops_offsets);
init_offsets_alloc_err:
kfree(bp->init_ops);
init_ops_alloc_err:
@@ -7437,6 +8995,15 @@ request_firmware_exit:
return rc;
}
+static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
+{
+ int cid_count = L2_FP_COUNT(l2_cid_count);
+
+#ifdef BCM_CNIC
+ cid_count += CNIC_CID_MAX;
+#endif
+ return roundup(cid_count, QM_CID_ROUND);
+}
static int __devinit bnx2x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -7444,10 +9011,30 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
struct net_device *dev = NULL;
struct bnx2x *bp;
int pcie_width, pcie_speed;
- int rc;
+ int rc, cid_count;
+
+ switch (ent->driver_data) {
+ case BCM57710:
+ case BCM57711:
+ case BCM57711E:
+ cid_count = FP_SB_MAX_E1x;
+ break;
+
+ case BCM57712:
+ case BCM57712E:
+ cid_count = FP_SB_MAX_E2;
+ break;
+
+ default:
+ pr_err("Unknown board_type (%ld), aborting\n",
+ ent->driver_data);
+ return ENODEV;
+ }
+
+ cid_count += CNIC_CONTEXT_USE;
/* dev zeroed in init_etherdev */
- dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
+ dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
if (!dev) {
dev_err(&pdev->dev, "Cannot allocate net device\n");
return -ENOMEM;
@@ -7458,6 +9045,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
+ bp->l2_cid_count = cid_count;
+
rc = bnx2x_init_dev(pdev, dev);
if (rc < 0) {
free_netdev(dev);
@@ -7468,12 +9057,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
if (rc)
goto init_one_exit;
- /* Set init arrays */
- rc = bnx2x_init_firmware(bp, &pdev->dev);
- if (rc) {
- dev_err(&pdev->dev, "Error loading firmware\n");
- goto init_one_exit;
- }
+ /* calc qm_cid_count */
+ bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
rc = register_netdev(dev);
if (rc) {
@@ -7481,11 +9066,23 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
goto init_one_exit;
}
+ /* Configure interupt mode: try to enable MSI-X/MSI if
+ * needed, set bp->num_queues appropriately.
+ */
+ bnx2x_set_int_mode(bp);
+
+ /* Add all NAPI objects */
+ bnx2x_add_all_napi(bp);
+
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
+
netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
" IRQ %d, ", board_info[ent->driver_data].name,
(CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
- pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
+ pcie_width,
+ ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
+ (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
+ "5GHz (Gen2)" : "2.5GHz",
dev->base_addr, bp->pdev->irq);
pr_cont("node addr %pM\n", dev->dev_addr);
@@ -7522,20 +9119,23 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+
+ /* Disable MSI/MSI-X */
+ bnx2x_disable_msi(bp);
+
/* Make sure RESET task is not scheduled before continuing */
cancel_delayed_work_sync(&bp->reset_task);
- kfree(bp->init_ops_offsets);
- kfree(bp->init_ops);
- kfree(bp->init_data);
- release_firmware(bp->firmware);
-
if (bp->regview)
iounmap(bp->regview);
if (bp->doorbells)
iounmap(bp->doorbells);
+ bnx2x_free_mem_bp(bp);
+
free_netdev(dev);
if (atomic_read(&pdev->enable_cnt) == 1)
@@ -7561,22 +9161,14 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
/* Release IRQs */
- bnx2x_free_irq(bp, false);
-
- if (CHIP_IS_E1(bp)) {
- struct mac_configuration_cmd *config =
- bnx2x_sp(bp, mcast_config);
-
- for (i = 0; i < config->hdr.length; i++)
- CAM_INVALIDATE(config->config_table[i]);
- }
+ bnx2x_free_irq(bp);
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
+
for_each_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
- for_each_queue(bp, i)
- netif_napi_del(&bnx2x_fp(bp, i, napi));
+
bnx2x_free_mem(bp);
bp->state = BNX2X_STATE_CLOSED;
@@ -7608,8 +9200,9 @@ static void bnx2x_eeh_recover(struct bnx2x *bp)
BNX2X_ERR("BAD MCP validity signature\n");
if (!BP_NOMCP(bp)) {
- bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
- & DRV_MSG_SEQ_NUMBER_MASK);
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
}
}
@@ -7692,7 +9285,8 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
struct bnx2x *bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ printk(KERN_ERR "Handling parity error recovery. "
+ "Try again later\n");
return;
}
@@ -7767,19 +9361,53 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
#endif
spin_lock_bh(&bp->spq_lock);
+ BUG_ON(bp->cnic_spq_pending < count);
bp->cnic_spq_pending -= count;
- for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
- bp->cnic_spq_pending++) {
- if (!bp->cnic_kwq_pending)
+ for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
+ u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
+ & SPE_HDR_CONN_TYPE) >>
+ SPE_HDR_CONN_TYPE_SHIFT;
+
+ /* Set validation for iSCSI L2 client before sending SETUP
+ * ramrod
+ */
+ if (type == ETH_CONNECTION_TYPE) {
+ u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
+ hdr.conn_and_cmd_data) >>
+ SPE_HDR_CMD_ID_SHIFT) & 0xff;
+
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
+ bnx2x_set_ctx_validation(&bp->context.
+ vcxt[BNX2X_ISCSI_ETH_CID].eth,
+ HW_CID(bp, BNX2X_ISCSI_ETH_CID));
+ }
+
+ /* There may be not more than 8 L2 and COMMON SPEs and not more
+ * than 8 L5 SPEs in the air.
+ */
+ if ((type == NONE_CONNECTION_TYPE) ||
+ (type == ETH_CONNECTION_TYPE)) {
+ if (!atomic_read(&bp->spq_left))
+ break;
+ else
+ atomic_dec(&bp->spq_left);
+ } else if (type == ISCSI_CONNECTION_TYPE) {
+ if (bp->cnic_spq_pending >=
+ bp->cnic_eth_dev.max_kwqe_pending)
+ break;
+ else
+ bp->cnic_spq_pending++;
+ } else {
+ BNX2X_ERR("Unknown SPE type: %d\n", type);
+ bnx2x_panic();
break;
+ }
spe = bnx2x_sp_get_next(bp);
*spe = *bp->cnic_kwq_cons;
- bp->cnic_kwq_pending--;
-
DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
@@ -7817,8 +9445,8 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev,
DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
spe->hdr.conn_and_cmd_data, spe->hdr.type,
- spe->data.mac_config_addr.hi,
- spe->data.mac_config_addr.lo,
+ spe->data.update_data_addr.hi,
+ spe->data.update_data_addr.lo,
bp->cnic_kwq_pending);
if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
@@ -7884,7 +9512,7 @@ static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
ctl.data.comp.cid = cid;
bnx2x_cnic_ctl_send_bh(bp, &ctl);
- bnx2x_cnic_sp_post(bp, 1);
+ bnx2x_cnic_sp_post(bp, 0);
}
static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
@@ -7901,8 +9529,8 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
break;
}
- case DRV_CTL_COMPLETION_CMD: {
- int count = ctl->data.comp.comp_count;
+ case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
+ int count = ctl->data.credit.credit_count;
bnx2x_cnic_sp_post(bp, count);
break;
@@ -7912,8 +9540,24 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
case DRV_CTL_START_L2_CMD: {
u32 cli = ctl->data.ring.client_id;
- bp->rx_mode_cl_mask |= (1 << cli);
- bnx2x_set_storm_rx_mode(bp);
+ /* Set iSCSI MAC address */
+ bnx2x_set_iscsi_eth_mac_addr(bp, 1);
+
+ mmiowb();
+ barrier();
+
+ /* Start accepting on iSCSI L2 ring. Accept all multicasts
+ * because it's the only way for UIO Client to accept
+ * multicasts (in non-promiscuous mode only one Client per
+ * function will receive multicast packets (leading in our
+ * case).
+ */
+ bnx2x_rxq_set_mac_filters(bp, cli,
+ BNX2X_ACCEPT_UNICAST |
+ BNX2X_ACCEPT_BROADCAST |
+ BNX2X_ACCEPT_ALL_MULTICAST);
+ storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
+
break;
}
@@ -7921,8 +9565,23 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
case DRV_CTL_STOP_L2_CMD: {
u32 cli = ctl->data.ring.client_id;
- bp->rx_mode_cl_mask &= ~(1 << cli);
- bnx2x_set_storm_rx_mode(bp);
+ /* Stop accepting on iSCSI L2 ring */
+ bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
+ storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
+
+ mmiowb();
+ barrier();
+
+ /* Unset iSCSI L2 MAC */
+ bnx2x_set_iscsi_eth_mac_addr(bp, 0);
+ break;
+ }
+ case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
+ int count = ctl->data.credit.credit_count;
+
+ smp_mb__before_atomic_inc();
+ atomic_add(count, &bp->spq_left);
+ smp_mb__after_atomic_inc();
break;
}
@@ -7946,10 +9605,16 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
}
- cp->irq_arr[0].status_blk = bp->cnic_sb;
+ if (CHIP_IS_E2(bp))
+ cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
+ else
+ cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
+
cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
+ cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
cp->irq_arr[1].status_blk = bp->def_status_blk;
cp->irq_arr[1].status_blk_num = DEF_SB_ID;
+ cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
cp->num_irq = 2;
}
@@ -7981,12 +9646,10 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
cp->num_irq = 0;
cp->drv_state = CNIC_DRV_STATE_REGD;
-
- bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
+ cp->iro_arr = bp->iro_arr;
bnx2x_setup_cnic_irq_info(bp);
- bnx2x_set_iscsi_eth_mac_addr(bp, 1);
- bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
+
rcu_assign_pointer(bp->cnic_ops, ops);
return 0;
@@ -8023,15 +9686,24 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
cp->io_base = bp->regview;
cp->io_base2 = bp->doorbells;
cp->max_kwqe_pending = 8;
- cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
- cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
+ cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
+ cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+ bnx2x_cid_ilt_lines(bp);
cp->ctx_tbl_len = CNIC_ILT_LINES;
- cp->starting_cid = BCM_CNIC_CID_START;
+ cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
cp->drv_ctl = bnx2x_drv_ctl;
cp->drv_register_cnic = bnx2x_register_cnic;
cp->drv_unregister_cnic = bnx2x_unregister_cnic;
-
+ cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+
+ DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
+ "starting cid %d\n",
+ cp->ctx_blk_size,
+ cp->ctx_tbl_offset,
+ cp->ctx_tbl_len,
+ cp->starting_cid);
return cp;
}
EXPORT_SYMBOL(bnx2x_cnic_probe);
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index a1f3bf0cd63..18a86284ebc 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -1,6 +1,6 @@
/* bnx2x_reg.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2009 Broadcom Corporation
+ * Copyright (c) 2007-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,7 +19,20 @@
*
*/
-
+#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
+#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
+#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1<<5)
+#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1<<3)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1<<4)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1)
+/* [RW 1] Initiate the ATC array - reset all the valid bits */
+#define ATC_REG_ATC_INIT_ARRAY 0x1100b8
+/* [R 1] ATC initalization done */
+#define ATC_REG_ATC_INIT_DONE 0x1100bc
+/* [RC 6] Interrupt register #0 read clear */
+#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
+/* [RW 19] Interrupt mask register #0 read/write */
+#define BRB1_REG_BRB1_INT_MASK 0x60128
/* [R 19] Interrupt register #0 read */
#define BRB1_REG_BRB1_INT_STS 0x6011c
/* [RW 4] Parity mask register #0 read/write */
@@ -27,9 +40,31 @@
/* [R 4] Parity register #0 read */
#define BRB1_REG_BRB1_PRTY_STS 0x6012c
/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
- address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
- BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. */
+ * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
+ * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
+ * following reset the first rbc access to this reg must be write; there can
+ * be no more rbc writes after the first one; there can be any number of rbc
+ * read following the first write; rbc access not following these rules will
+ * result in hang condition. */
#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200
+/* [RW 10] The number of free blocks below which the full signal to class 0
+ * is asserted */
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0
+/* [RW 10] The number of free blocks above which the full signal to class 0
+ * is de-asserted */
+#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4
+/* [RW 10] The number of free blocks below which the full signal to class 1
+ * is asserted */
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8
+/* [RW 10] The number of free blocks above which the full signal to class 1
+ * is de-asserted */
+#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc
+/* [RW 10] The number of free blocks below which the full signal to the LB
+ * port is asserted */
+#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0
+/* [RW 10] The number of free blocks above which the full signal to the LB
+ * port is de-asserted */
+#define BRB1_REG_FULL_LB_XON_THRESHOLD 0x601e4
/* [RW 10] The number of free blocks above which the High_llfc signal to
interface #n is de-asserted. */
#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c
@@ -44,6 +79,9 @@
/* [RW 10] The number of free blocks below which the Low_llfc signal to
interface #n is asserted. */
#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c
+/* [RW 10] The number of blocks guarantied for the MAC port */
+#define BRB1_REG_MAC_GUARANTIED_0 0x601e8
+#define BRB1_REG_MAC_GUARANTIED_1 0x60240
/* [R 24] The number of full blocks. */
#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090
/* [ST 32] The number of cycles that the write_full signal towards MAC #0
@@ -55,7 +93,19 @@
asserted. */
#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8
#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc
-/* [RW 10] Write client 0: De-assert pause threshold. */
+/* [RW 10] The number of free blocks below which the pause signal to class 0
+ * is asserted */
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0
+/* [RW 10] The number of free blocks above which the pause signal to class 0
+ * is de-asserted */
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4
+/* [RW 10] The number of free blocks below which the pause signal to class 1
+ * is asserted */
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8
+/* [RW 10] The number of free blocks above which the pause signal to class 1
+ * is de-asserted */
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc
+/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */
#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
/* [RW 10] Write client 0: Assert pause threshold. */
@@ -362,6 +412,7 @@
#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
/* [R 9] Number of Leaving LCIDs in Link List Block */
#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
+#define CFC_REG_WEAK_ENABLE_PF 0x104124
/* [RW 8] The event id for aggregated interrupt 0 */
#define CSDM_REG_AGG_INT_EVENT_0 0xc2038
#define CSDM_REG_AGG_INT_EVENT_10 0xc2060
@@ -590,10 +641,17 @@
#define CSEM_REG_TS_8_AS 0x200058
/* [RW 3] The arbitration scheme of time_slot 9 */
#define CSEM_REG_TS_9_AS 0x20005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define CSEM_REG_VFPF_ERR_NUM 0x200380
/* [RW 1] Parity mask register #0 read/write */
#define DBG_REG_DBG_PRTY_MASK 0xc0a8
/* [R 1] Parity register #0 read */
#define DBG_REG_DBG_PRTY_STS 0xc09c
+/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
+ * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
+ * 4.Completion function=0; 5.Error handling=0 */
+#define DMAE_REG_BACKWARD_COMP_EN 0x10207c
/* [RW 32] Commands memory. The address to command X; row Y is to calculated
as 14*X+Y. */
#define DMAE_REG_CMD_MEM 0x102400
@@ -758,6 +816,92 @@
#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068
#define HC_REG_VQID_0 0x108008
#define HC_REG_VQID_1 0x10800c
+#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1)
+#define IGU_REG_ATTENTION_ACK_BITS 0x130108
+/* [R 4] Debug: attn_fsm */
+#define IGU_REG_ATTN_FSM 0x130054
+#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c
+#define IGU_REG_ATTN_MSG_ADDR_L 0x130120
+/* [R 4] Debug: [3] - attention write done message is pending (0-no pending;
+ * 1-pending). [2:0] = PFID. Pending means attention message was sent; but
+ * write done didnt receive. */
+#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030
+#define IGU_REG_BLOCK_CONFIGURATION 0x130000
+#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124
+#define IGU_REG_COMMAND_REG_CTRL 0x13012c
+/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit
+ * is clear. The bits in this registers are set and clear via the producer
+ * command. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200
+/* [R 5] Debug: ctrl_fsm */
+#define IGU_REG_CTRL_FSM 0x130064
+/* [R 1] data availble for error memory. If this bit is clear do not red
+ * from error_handling_memory. */
+#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
+/* [R 11] Parity register #0 read */
+#define IGU_REG_IGU_PRTY_STS 0x13009c
+/* [R 4] Debug: int_handle_fsm */
+#define IGU_REG_INT_HANDLE_FSM 0x130050
+#define IGU_REG_LEADING_EDGE_LATCH 0x130134
+/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid.
+ * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF
+ * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */
+#define IGU_REG_MAPPING_MEMORY 0x131000
+#define IGU_REG_MAPPING_MEMORY_SIZE 136
+#define IGU_REG_PBA_STATUS_LSB 0x130138
+#define IGU_REG_PBA_STATUS_MSB 0x13013c
+#define IGU_REG_PCI_PF_MSI_EN 0x130140
+#define IGU_REG_PCI_PF_MSIX_EN 0x130144
+#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148
+/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
+ * pending; 1 = pending. Pendings means interrupt was asserted; and write
+ * done was not received. Data valid only in addresses 0-4. all the rest are
+ * zero. */
+#define IGU_REG_PENDING_BITS_STATUS 0x130300
+#define IGU_REG_PF_CONFIGURATION 0x130154
+/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping
+ * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default
+ * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod;
+ * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode
+ * - In backward compatible mode; for non default SB; each even line in the
+ * memory holds the U producer and each odd line hold the C producer. The
+ * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The
+ * last 20 producers are for the DSB for each PF. each PF has five segments
+ * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */
+#define IGU_REG_PROD_CONS_MEMORY 0x132000
+/* [R 3] Debug: pxp_arb_fsm */
+#define IGU_REG_PXP_ARB_FSM 0x130068
+/* [RW 6] Write one for each bit will reset the appropriate memory. When the
+ * memory reset finished the appropriate bit will be clear. Bit 0 - mapping
+ * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3
+ * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */
+#define IGU_REG_RESET_MEMORIES 0x130158
+/* [R 4] Debug: sb_ctrl_fsm */
+#define IGU_REG_SB_CTRL_FSM 0x13004c
+#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c
+#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160
+#define IGU_REG_SB_MASK_LSB 0x130164
+#define IGU_REG_SB_MASK_MSB 0x130168
+/* [RW 16] Number of command that were dropped without causing an interrupt
+ * due to: read access for WO BAR address; or write access for RO BAR
+ * address or any access for reserved address or PCI function error is set
+ * and address is not MSIX; PBA or cleanup */
+#define IGU_REG_SILENT_DROP 0x13016c
+/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 -
+ * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per
+ * PF; 68-71 number of ATTN messages per PF */
+#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800
+/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a
+ * timer mask command arrives. Value must be bigger than 100. */
+#define IGU_REG_TIMER_MASKING_VALUE 0x13003c
+#define IGU_REG_TRAILING_EDGE_LATCH 0x130104
+#define IGU_REG_VF_CONFIGURATION 0x130170
+/* [WB_R 32] Each bit represent write done pending bits status for that SB
+ * (MSI/MSIX message was sent and write done was not received yet). 0 =
+ * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_WRITE_DONE_PENDING 0x130480
+#define MCP_A_REG_MCPR_SCRATCH 0x3a0000
#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424
#define MCP_REG_MCPR_NVM_ADDR 0x8640c
#define MCP_REG_MCPR_NVM_CFG4 0x8642c
@@ -880,6 +1024,11 @@
rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
ump_tx_parity; [31] MCP Latched scpad_parity; */
#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458
+/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as
+ * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */
+#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700
/* [W 14] write to this register results with the clear of the latched
signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
@@ -1251,6 +1400,7 @@
#define MISC_REG_E1HMF_MODE 0xa5f8
/* [RW 32] Debug only: spare RW register reset by core reset */
#define MISC_REG_GENERIC_CR_0 0xa460
+#define MISC_REG_GENERIC_CR_1 0xa464
/* [RW 32] Debug only: spare RW register reset by por reset */
#define MISC_REG_GENERIC_POR_1 0xa474
/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
@@ -1373,6 +1523,14 @@
#define MISC_REG_PLL_STORM_CTRL_2 0xa298
#define MISC_REG_PLL_STORM_CTRL_3 0xa29c
#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0
+/* [R 1] Status of 4 port mode enable input pin. */
+#define MISC_REG_PORT4MODE_EN 0xa750
+/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 -
+ * the port4mode_en output is equal to 4 port mode input pin; if it is 1 -
+ * the port4mode_en output is equal to bit[1] of this register; [1] -
+ * Overwrite value. If bit[0] of this register is 1 this is the value that
+ * receives the port4mode_en output . */
+#define MISC_REG_PORT4MODE_EN_OVWR 0xa720
/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
write/read zero = the specific block is in reset; addr 0-wr- the write
value will be written to the register; addr 1-set - one will be written
@@ -1656,8 +1814,91 @@
/* [R 32] Interrupt register #0 read */
#define NIG_REG_NIG_INT_STS_0 0x103b0
#define NIG_REG_NIG_INT_STS_1 0x103c0
-/* [R 32] Parity register #0 read */
+/* [R 32] Legacy E1 and E1H location for parity error status register. */
#define NIG_REG_NIG_PRTY_STS 0x103d0
+/* [R 32] Parity register #0 read */
+#define NIG_REG_NIG_PRTY_STS_0 0x183bc
+#define NIG_REG_NIG_PRTY_STS_1 0x183cc
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038
+/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in
+ * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be
+ * disabled when this bit is set. */
+#define NIG_REG_P0_HWPFC_ENABLE 0x18078
+#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480
+#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c
+/* [RW 15] Specify which of the credit registers the client is to be mapped
+ * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
+ * clients that are not subject to WFQ credit blocking - their
+ * specifications here are not used. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0
+/* [RW 5] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client). Default value is set to enable
+ * strict priorities for clients 0-2 -- management and debug traffic. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8
+/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client). Default value is 0 for not using WFQ credit
+ * blocking. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc
+/* [RW 12] Specify the number of strict priority arbitration slots between
+ * two round-robin arbitration slots to avoid starvation. A value of 0 means
+ * no strict priority cycles - the strict priority with anti-starvation
+ * arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4
+/* [RW 15] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0]
+ * are for priority 0 client; bits [14:12] are for priority 4 client. The
+ * clients are assigned the following IDs: 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000)
+ * for management at priority 0; debug traffic at priorities 1 and 2; COS0
+ * traffic at priority 3; and COS1 traffic at priority 4. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4
+#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0
+#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0
/* [RW 1] Pause enable for port0. This register may get 1 only when
~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
port */
@@ -1742,6 +1983,10 @@
/* [RW 1] Disable processing further tasks from port 4 (after ending the
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
+#define PBF_REG_DISABLE_PF 0x1402e8
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
#define PBF_REG_IF_ENABLE_REG 0x140044
/* [RW 1] Init bit. When set the initial credits are copied to the credit
registers (except the port credits). Should be set and then reset after
@@ -1765,6 +2010,8 @@
#define PBF_REG_MAC_IF1_ENABLE 0x140034
/* [RW 1] Enable for the loopback interface. */
#define PBF_REG_MAC_LB_ENABLE 0x140040
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4
/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
not suppoterd. */
#define PBF_REG_P0_ARB_THRSH 0x1400e4
@@ -1804,6 +2051,259 @@
#define PB_REG_PB_PRTY_MASK 0x38
/* [R 4] Parity register #0 read */
#define PB_REG_PB_PRTY_STS 0x2c
+#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1<<6)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1<<7)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1<<4)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1<<3)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1<<5)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1<<2)
+/* [R 8] Config space A attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space A attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010
+/* [R 8] Config space B attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space B attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014
+/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE 0x9194
+/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask;
+ * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF 0x916c
+/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE 0x919c
+/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_A 0x9100
+/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_B 0x9108
+/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_VF_SHIFT_B 0x9110
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF 0x91ac
+/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates
+ * that the FLR register of the corresponding PF was set. Set by PXP. Reset
+ * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028
+/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418
+/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024
+/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018
+/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c
+/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020
+/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit
+ * 0 - Target memory read arrived with a correctable error. Bit 1 - Target
+ * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW
+ * arrived with a correctable error. Bit 3 - Configuration RW arrived with
+ * an uncorrectable error. Bit 4 - Completion with Configuration Request
+ * Retry Status. Bit 5 - Expansion ROM access received with a write request.
+ * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and
+ * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010;
+ * and pcie_rx_last not asserted. */
+#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434
+#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438
+/* [R 9] Interrupt register #0 read */
+#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
+/* [RC 9] Interrupt register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c
+/* [R 2] Parity register #0 read */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8
+/* [R 13] Details of first request received with error. [2:0] - PFID. [3] -
+ * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion
+ * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 -
+ * completer abort. 3 - Illegal value for this field. [12] valid - indicates
+ * if there was a completion error since the last time this register was
+ * cleared. */
+#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080
+/* [R 18] Details of first ATS Translation Completion request received with
+ * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code -
+ * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 -
+ * unsupported request. 2 - completer abort. 3 - Illegal value for this
+ * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a
+ * completion error since the last time this register was cleared. */
+#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084
+/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to
+ * a bit in this register in order to clear the corresponding bit in
+ * shadow_bme_pf_7_0 register. MCP should never use this unless a
+ * work-around is needed. Note: register contains bits from both paths. */
+#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458
+/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the
+ * VF enable register of the corresponding PF is written to 0 and was
+ * previously 1. Set by PXP. Reset by MCP writing 1 to
+ * sr_iov_disabled_request_clr. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030
+/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read
+ * completion did not return yet. 1 - tag is unused. Same functionality as
+ * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */
+#define PGLUE_B_REG_TAGS_63_32 0x9244
+/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE 0x9170
+/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_A 0x90c4
+/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_B 0x90cc
+/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_VF_SHIFT_B 0x90d4
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF 0x91a0
+/* [R 32] Address [31:0] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098
+/* [R 32] Address [63:32] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c
+/* [R 31] Details of first read request not submitted due to error. [4:0]
+ * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request.
+ * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] -
+ * VFID. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0
+/* [R 26] Details of first read request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4
+/* [R 32] Address [31:0] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088
+/* [R 32] Address [63:32] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c
+/* [R 31] Details of first write request not submitted due to error. [4:0]
+ * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25]
+ * - VFID. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090
+/* [R 26] Details of first write request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094
+/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask;
+ * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any
+ * value (Byte resolution address). */
+#define PGLUE_B_REG_USDM_INB_INT_A_0 0x9128
+#define PGLUE_B_REG_USDM_INB_INT_A_1 0x912c
+#define PGLUE_B_REG_USDM_INB_INT_A_2 0x9130
+#define PGLUE_B_REG_USDM_INB_INT_A_3 0x9134
+#define PGLUE_B_REG_USDM_INB_INT_A_4 0x9138
+#define PGLUE_B_REG_USDM_INB_INT_A_5 0x913c
+#define PGLUE_B_REG_USDM_INB_INT_A_6 0x9140
+/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE 0x917c
+/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE 0x9180
+/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE 0x9184
+/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_A 0x90d8
+/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_B 0x90e0
+/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_VF_SHIFT_B 0x90e8
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF 0x91a4
+/* [R 26] Details of first target VF request accessing VF GRC space that
+ * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write.
+ * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a
+ * request accessing VF GRC space that failed permission check since the
+ * last time this register was cleared. Permission checks are: function
+ * permission; R/W permission; address range permission. */
+#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234
+/* [R 31] Details of first target VF request with length violation (too many
+ * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address).
+ * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30]
+ * valid - indicates if there was a request with length violation since the
+ * last time this register was cleared. Length violations: length of more
+ * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and
+ * length is more than 1 DW. */
+#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230
+/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates
+ * that there was a completion with uncorrectable error for the
+ * corresponding PF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_pf_7_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c
+/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470
+/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_127_96_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078
+/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP
+ * writes 1 to a bit in this register in order to clear the corresponding
+ * bit in was_error_vf_127_96 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474
+/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_31_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c
+/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_31_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478
+/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_63_32_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070
+/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_63_32 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c
+/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_95_64_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074
+/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_95_64 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480
+/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE 0x9188
+/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_A 0x90ec
+/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_B 0x90f4
+/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_VF_SHIFT_B 0x90fc
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF 0x91a8
#define PRS_REG_A_PRSU_20 0x40134
/* [R 8] debug only: CFC load request current credit. Transaction based. */
#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164
@@ -1866,9 +2366,13 @@
#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018
#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c
#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PRS_REG_HDRS_AFTER_BASIC 0x40238
/* [RW 4] The increment value to send in the CFC load request message */
#define PRS_REG_INC_VALUE 0x40048
-/* [RW 1] If set indicates not to send messages to CFC on received packets */
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PRS_REG_MUST_HAVE_HDRS 0x40254
#define PRS_REG_NIC_MODE 0x40138
/* [RW 8] The 8-bit event ID for cases where there is no match on the
connection. Used in packet start message to TCM. */
@@ -1919,6 +2423,13 @@
#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
/* [R 8] debug only: TSDM current credit. Transaction based. */
#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1<<23)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1<<24)
+#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
+#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
/* [R 6] Debug only: Number of used entries in the data FIFO */
#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
/* [R 7] Debug only: Number of used entries in the header FIFO */
@@ -2244,8 +2755,17 @@
/* [RW 1] When '1'; requests will enter input buffers but wont get out
towards the glue */
#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330
-/* [RW 1] 1 - SR will be aligned by 64B; 0 - SR will be aligned by 8B */
+/* [RW 4] Determines alignment of write SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0
+/* [RW 4] Determines alignment of read SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
+#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c
+/* [RW 1] when set the new alignment method (E2) will be applied; when reset
+ * the original alignment method (E1 E1H) will be applied */
+#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930
/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
be asserted */
#define PXP2_REG_RQ_ELT_DISABLE 0x12066c
@@ -2436,7 +2956,8 @@
#define PXP_REG_PXP_INT_STS_1 0x103078
/* [RC 32] Interrupt register #0 read clear */
#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c
-/* [RW 26] Parity mask register #0 read/write */
+#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c
+/* [RW 27] Parity mask register #0 read/write */
#define PXP_REG_PXP_PRTY_MASK 0x103094
/* [R 26] Parity register #0 read */
#define PXP_REG_PXP_PRTY_STS 0x103088
@@ -2566,6 +3087,7 @@
#define QM_REG_PAUSESTATE7 0x16e698
/* [RW 2] The PCI attributes field used in the PCI request. */
#define QM_REG_PCIREQAT 0x168054
+#define QM_REG_PF_EN 0x16e70c
/* [R 16] The byte credit of port 0 */
#define QM_REG_PORT0BYTECRD 0x168300
/* [R 16] The byte credit of port 1 */
@@ -3402,6 +3924,14 @@
/* [R 32] Parity register #0 read */
#define TSEM_REG_TSEM_PRTY_STS_0 0x180114
#define TSEM_REG_TSEM_PRTY_STS_1 0x180124
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define TSEM_REG_VFPF_ERR_NUM 0x180380
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [10:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 12'ha64. */
+#define UCM_REG_AG_CTX 0xe2000
/* [R 5] Used to read the XX protection CAM occupancy counter. */
#define UCM_REG_CAM_OCCUP 0xe0170
/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3851,6 +4381,17 @@
/* [R 32] Parity register #0 read */
#define USEM_REG_USEM_PRTY_STS_0 0x300124
#define USEM_REG_USEM_PRTY_STS_1 0x300134
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define USEM_REG_VFPF_ERR_NUM 0x300380
+#define VFC_MEMORIES_RST_REG_CAM_RST (0x1<<0)
+#define VFC_MEMORIES_RST_REG_RAM_RST (0x1<<1)
+#define VFC_REG_MEMORIES_RST 0x1943c
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [12:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 13'ha64. */
+#define XCM_REG_AG_CTX 0x28000
/* [RW 2] The queue index for registration on Aux1 counter flag. */
#define XCM_REG_AUX1_Q 0x20134
/* [RW 2] Per each decision rule the queue index to register to. */
@@ -4333,6 +4874,9 @@
#define XSEM_REG_TS_8_AS 0x280058
/* [RW 3] The arbitration scheme of time_slot 9 */
#define XSEM_REG_TS_9_AS 0x28005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define XSEM_REG_VFPF_ERR_NUM 0x280380
/* [RW 32] Interrupt mask register #0 read/write */
#define XSEM_REG_XSEM_INT_MASK_0 0x280110
#define XSEM_REG_XSEM_INT_MASK_1 0x280120
@@ -4371,6 +4915,23 @@
#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
+#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3)
+#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
+#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3)
+#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3)
+#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3)
+#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3)
+#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRPP (0x51<<3)
+#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3)
+#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3)
+#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3)
+#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPP (0x24<<3)
#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
#define EMAC_LED_100MB_OVERRIDE (1L<<2)
#define EMAC_LED_10MB_OVERRIDE (1L<<3)
@@ -4478,6 +5039,8 @@
#define HW_LOCK_RESOURCE_SPIO 2
#define HW_LOCK_RESOURCE_UNDI 5
#define PRS_FLAG_OVERETH_IPV4 1
+#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
@@ -4504,6 +5067,8 @@
#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20)
#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (1<<0)
#define AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT (1<<31)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3)
#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (1<<3)
#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (1<<2)
#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (1<<5)
@@ -4796,6 +5361,253 @@
#define PCI_ID_VAL1 0x434
#define PCI_ID_VAL2 0x438
+#define PXPCS_TL_CONTROL_5 0x814
+#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
+#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/
+#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/
+#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/
+#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/
+#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/
+#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/
+#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/
+
+
+#define PXPCS_TL_FUNC345_STAT 0x854
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\
+ (1 << 28) /* Unsupported Request Error Status in function4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\
+ (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\
+ (1 << 26) /* Malformed TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\
+ (1 << 25) /* Receiver Overflow Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\
+ (1 << 24) /* Unexpected Completion Status Status in function 4, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\
+ (1 << 23) /* Receive UR Statusin function 4. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\
+ (1 << 22) /* Completer Timeout Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 4, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\
+ (1 << 20) /* Poisoned Error Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\
+ (1 << 18) /* Unsupported Request Error Status in function3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\
+ (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\
+ (1 << 16) /* Malformed TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\
+ (1 << 15) /* Receiver Overflow Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\
+ (1 << 14) /* Unexpected Completion Status Status in function 3, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\
+ (1 << 13) /* Receive UR Statusin function 3. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\
+ (1 << 12) /* Completer Timeout Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 3, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\
+ (1 << 10) /* Poisoned Error Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\
+ (1 << 8) /* Unsupported Request Error Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\
+ (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\
+ (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\
+ (1 << 5) /* Receiver Overflow Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\
+ (1 << 4) /* Unexpected Completion Status Status for Function 2, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\
+ (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\
+ (1 << 2) /* Completer Timeout Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 2, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\
+ (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define PXPCS_TL_FUNC678_STAT 0x85C
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\
+ (1 << 28) /* Unsupported Request Error Status in function7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\
+ (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\
+ (1 << 26) /* Malformed TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\
+ (1 << 25) /* Receiver Overflow Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\
+ (1 << 24) /* Unexpected Completion Status Status in function 7, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\
+ (1 << 23) /* Receive UR Statusin function 7. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\
+ (1 << 22) /* Completer Timeout Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 7, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\
+ (1 << 20) /* Poisoned Error Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\
+ (1 << 18) /* Unsupported Request Error Status in function6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\
+ (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\
+ (1 << 16) /* Malformed TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\
+ (1 << 15) /* Receiver Overflow Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\
+ (1 << 14) /* Unexpected Completion Status Status in function 6, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\
+ (1 << 13) /* Receive UR Statusin function 6. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\
+ (1 << 12) /* Completer Timeout Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 6, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\
+ (1 << 10) /* Poisoned Error Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\
+ (1 << 8) /* Unsupported Request Error Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\
+ (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\
+ (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\
+ (1 << 5) /* Receiver Overflow Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\
+ (1 << 4) /* Unexpected Completion Status Status for Function 5, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\
+ (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\
+ (1 << 2) /* Completer Timeout Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 5, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\
+ (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define BAR_USTRORM_INTMEM 0x400000
+#define BAR_CSTRORM_INTMEM 0x410000
+#define BAR_XSTRORM_INTMEM 0x420000
+#define BAR_TSTRORM_INTMEM 0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM 0x440000
+
+#define BAR_DOORBELL_OFFSET 0x800000
+
+#define BAR_ME_REGISTER 0x450000
+#define ME_REG_PF_NUM_SHIFT 0
+#define ME_REG_PF_NUM\
+ (7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
+#define ME_REG_VF_VALID (1<<8)
+#define ME_REG_VF_NUM_SHIFT 9
+#define ME_REG_VF_NUM_MASK (0x3f<<ME_REG_VF_NUM_SHIFT)
+#define ME_REG_VF_ERR (0x1<<3)
+#define ME_REG_ABS_PF_NUM_SHIFT 16
+#define ME_REG_ABS_PF_NUM\
+ (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
+
#define MDIO_REG_BANK_CL73_IEEEB0 0x0
#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
@@ -4964,6 +5776,8 @@
#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002
#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
@@ -5135,28 +5949,35 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
-#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309
#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
+#define MDIO_PMA_REG_8727_PCS_GP 0xc842
+
+#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
+#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08
#define MDIO_PMA_REG_7101_RESET 0xc000
#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
+#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
#define MDIO_PMA_REG_7101_VER1 0xc026
#define MDIO_PMA_REG_7101_VER2 0xc027
-#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
-#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
-#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
-#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
-#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
-#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
-#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
+#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
+#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
+#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
+#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
+#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
+#define MDIO_PMA_REG_8481_LED5_MASK 0xa838
+#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
+#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
#define MDIO_WIS_DEVAD 0x2
@@ -5188,6 +6009,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
+#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA
+
#define MDIO_AN_DEVAD 0x7
/*ieee*/
#define MDIO_AN_REG_CTRL 0x0000
@@ -5210,14 +6033,40 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_CL37_FC_LP 0xffe5
#define MDIO_AN_REG_8073_2_5G 0x8329
+#define MDIO_AN_REG_8073_BAM 0x8350
+#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
+#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
+#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
+#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
+/* BCM84823 only */
+#define MDIO_CTL_DEVAD 0x1e
+#define MDIO_CTL_REG_84823_MEDIA 0x401a
+#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
+ /* These pins configure the BCM84823 interface to MAC after reset. */
+#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
+#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
+ /* These pins configure the BCM84823 interface to Line after reset. */
+#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
+ /* When this pin is active high during reset, 10GBASE-T core is power
+ * down, When it is active low the 10GBASE-T is power up
+ */
+#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
+
+
#define IGU_FUNC_BASE 0x0400
#define IGU_ADDR_MSIX 0x0000
@@ -5239,6 +6088,11 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_INT_NOP 2
#define IGU_INT_NOP2 3
+#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0
+#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1
+#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2
+#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup 3
+
#define COMMAND_REG_INT_ACK 0x0
#define COMMAND_REG_PROD_UPD 0x4
#define COMMAND_REG_ATTN_BITS_UPD 0x8
@@ -5281,6 +6135,50 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
#define IGU_REG_RESERVED_UPPER 0x05ff
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
+
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 2 /* Parent PF */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+
+
+#define IGU_BC_DSB_NUM_SEGS 5
+#define IGU_BC_NDSB_NUM_SEGS 2
+#define IGU_NORM_DSB_NUM_SEGS 2
+#define IGU_NORM_NDSB_NUM_SEGS 1
+#define IGU_BC_BASE_DSB_PROD 128
+#define IGU_NORM_BASE_DSB_PROD 136
+
+#define IGU_CTRL_CMD_TYPE_WR\
+ 1
+#define IGU_CTRL_CMD_TYPE_RD\
+ 0
+
+#define IGU_SEG_ACCESS_NORM 0
+#define IGU_SEG_ACCESS_DEF 1
+#define IGU_SEG_ACCESS_ATTN 2
+
+ /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
+ [5:2] = 0; [1:0] = PF number) */
+#define IGU_FID_ENCODE_IS_PF (0x1<<6)
+#define IGU_FID_ENCODE_IS_PF_SHIFT 6
+#define IGU_FID_VF_NUM_MASK (0x3f)
+#define IGU_FID_PF_NUM_MASK (0x7)
+
+#define IGU_REG_MAPPING_MEMORY_VALID (1<<0)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK (0x3F<<1)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT 1
+#define IGU_REG_MAPPING_MEMORY_FID_MASK (0x7F<<7)
+#define IGU_REG_MAPPING_MEMORY_FID_SHIFT 7
#define CDU_REGION_NUMBER_XCM_AG 2
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index c7472446102..4733c835dad 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -14,8 +14,8 @@
* Statistics and Link management by Yitchak Gertner
*
*/
- #include "bnx2x_cmn.h"
- #include "bnx2x_stats.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_stats.h"
/* Statistics */
@@ -153,7 +153,7 @@ static inline long bnx2x_hilo(u32 *hiref)
static void bnx2x_storm_stats_post(struct bnx2x *bp)
{
if (!bp->stats_pending) {
- struct eth_query_ramrod_data ramrod_data = {0};
+ struct common_query_ramrod_data ramrod_data = {0};
int i, rc;
spin_lock_bh(&bp->stats_lock);
@@ -163,14 +163,11 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
for_each_queue(bp, i)
ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
- rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
((u32 *)&ramrod_data)[1],
- ((u32 *)&ramrod_data)[0], 0);
- if (rc == 0) {
- /* stats ramrod has it's own slot on the spq */
- bp->spq_left++;
+ ((u32 *)&ramrod_data)[0], 1);
+ if (rc == 0)
bp->stats_pending = 1;
- }
spin_unlock_bh(&bp->stats_lock);
}
@@ -188,20 +185,12 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
/* loader */
if (bp->executer_idx) {
int loader_idx = PMF_DMAE_C(bp);
+ u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_GRC);
+ opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
memset(dmae, 0, sizeof(struct dmae_command));
-
- dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
- DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
- DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
- DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
- DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
- (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
- DMAE_CMD_PORT_0) |
- (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+ dmae->opcode = opcode;
dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
@@ -253,26 +242,17 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
/* sanity */
- if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
+ if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) {
BNX2X_ERR("BUG!\n");
return;
}
bp->executer_idx = 0;
- opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
- DMAE_CMD_C_ENABLE |
- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
- DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
- DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
- (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
- (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
- dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
dmae->src_addr_lo = bp->port.port_stx >> 2;
dmae->src_addr_hi = 0;
dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
@@ -283,7 +263,7 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
dmae->comp_val = 1;
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
- dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
dmae->src_addr_hi = 0;
dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
@@ -304,7 +284,6 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
{
struct dmae_command *dmae;
int port = BP_PORT(bp);
- int vn = BP_E1HVN(bp);
u32 opcode;
int loader_idx = PMF_DMAE_C(bp);
u32 mac_addr;
@@ -319,16 +298,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
bp->executer_idx = 0;
/* MCP */
- opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
- DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
- DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
- DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
- (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
- (vn << DMAE_CMD_E1HVN_SHIFT));
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_GRC);
if (bp->port.port_stx) {
@@ -359,16 +330,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
}
/* MAC */
- opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
- DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
- DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
- DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
- (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
- (vn << DMAE_CMD_E1HVN_SHIFT));
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+ true, DMAE_COMP_GRC);
if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
@@ -379,13 +342,21 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
BIGMAC_REGISTER_TX_STAT_GTBYT */
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
dmae->opcode = opcode;
- dmae->src_addr_lo = (mac_addr +
+ if (CHIP_IS_E1x(bp)) {
+ dmae->src_addr_lo = (mac_addr +
BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+ dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
+ BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+ } else {
+ dmae->src_addr_lo = (mac_addr +
+ BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+ dmae->len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
+ BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+ }
+
dmae->src_addr_hi = 0;
dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
- dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
- BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
dmae->comp_addr_hi = 0;
dmae->comp_val = 1;
@@ -394,15 +365,31 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
BIGMAC_REGISTER_RX_STAT_GRIPJ */
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
dmae->opcode = opcode;
- dmae->src_addr_lo = (mac_addr +
- BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
dmae->src_addr_hi = 0;
- dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
- offsetof(struct bmac_stats, rx_stat_gr64_lo));
- dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
- offsetof(struct bmac_stats, rx_stat_gr64_lo));
- dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
- BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+ if (CHIP_IS_E1x(bp)) {
+ dmae->src_addr_lo = (mac_addr +
+ BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+ dmae->dst_addr_lo =
+ U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct bmac1_stats, rx_stat_gr64_lo));
+ dmae->dst_addr_hi =
+ U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct bmac1_stats, rx_stat_gr64_lo));
+ dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
+ BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+ } else {
+ dmae->src_addr_lo =
+ (mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+ dmae->dst_addr_lo =
+ U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct bmac2_stats, rx_stat_gr64_lo));
+ dmae->dst_addr_hi =
+ U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct bmac2_stats, rx_stat_gr64_lo));
+ dmae->len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
+ BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+ }
+
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
dmae->comp_addr_hi = 0;
dmae->comp_val = 1;
@@ -483,16 +470,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
dmae->comp_val = 1;
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
- dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
- DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
- DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
- DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
- (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
- (vn << DMAE_CMD_E1HVN_SHIFT));
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+ true, DMAE_COMP_PCI);
dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
dmae->src_addr_hi = 0;
@@ -522,16 +501,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
bp->executer_idx = 0;
memset(dmae, 0, sizeof(struct dmae_command));
- dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
- DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
- DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
- DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
- (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
- (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_PCI);
dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
dmae->dst_addr_lo = bp->func_stx >> 2;
@@ -571,7 +542,6 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
static void bnx2x_bmac_stats_update(struct bnx2x *bp)
{
- struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
struct bnx2x_eth_stats *estats = &bp->eth_stats;
struct {
@@ -579,35 +549,74 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
u32 hi;
} diff;
- UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
- UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
- UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
- UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
- UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
- UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
- UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
- UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
- UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
- UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
- UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
- UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
- UPDATE_STAT64(tx_stat_gt127,
+ if (CHIP_IS_E1x(bp)) {
+ struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
+
+ /* the macros below will use "bmac1_stats" type */
+ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+ UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+ UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+ UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+ UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+ UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+ UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+ UPDATE_STAT64(tx_stat_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_STAT64(tx_stat_gt255,
+ tx_stat_etherstatspkts128octetsto255octets);
+ UPDATE_STAT64(tx_stat_gt511,
+ tx_stat_etherstatspkts256octetsto511octets);
+ UPDATE_STAT64(tx_stat_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ UPDATE_STAT64(tx_stat_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
+ UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
+ UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
+ UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
+ UPDATE_STAT64(tx_stat_gterr,
+ tx_stat_dot3statsinternalmactransmiterrors);
+ UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
+
+ } else {
+ struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
+
+ /* the macros below will use "bmac2_stats" type */
+ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+ UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+ UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+ UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+ UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+ UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+ UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+ UPDATE_STAT64(tx_stat_gt127,
tx_stat_etherstatspkts65octetsto127octets);
- UPDATE_STAT64(tx_stat_gt255,
+ UPDATE_STAT64(tx_stat_gt255,
tx_stat_etherstatspkts128octetsto255octets);
- UPDATE_STAT64(tx_stat_gt511,
+ UPDATE_STAT64(tx_stat_gt511,
tx_stat_etherstatspkts256octetsto511octets);
- UPDATE_STAT64(tx_stat_gt1023,
+ UPDATE_STAT64(tx_stat_gt1023,
tx_stat_etherstatspkts512octetsto1023octets);
- UPDATE_STAT64(tx_stat_gt1518,
+ UPDATE_STAT64(tx_stat_gt1518,
tx_stat_etherstatspkts1024octetsto1522octets);
- UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
- UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
- UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
- UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
- UPDATE_STAT64(tx_stat_gterr,
+ UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
+ UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
+ UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
+ UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
+ UPDATE_STAT64(tx_stat_gterr,
tx_stat_dot3statsinternalmactransmiterrors);
- UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
+ UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
+ }
estats->pause_frames_received_hi =
pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
@@ -969,6 +978,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
{
struct bnx2x_eth_stats *estats = &bp->eth_stats;
struct net_device_stats *nstats = &bp->dev->stats;
+ unsigned long tmp;
int i;
nstats->rx_packets =
@@ -985,10 +995,10 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
- nstats->rx_dropped = estats->mac_discard;
+ tmp = estats->mac_discard;
for_each_queue(bp, i)
- nstats->rx_dropped +=
- le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+ tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+ nstats->rx_dropped = tmp;
nstats->tx_dropped = 0;
@@ -1123,24 +1133,17 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
bp->executer_idx = 0;
- opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
- DMAE_CMD_C_ENABLE |
- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
- DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
- DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
- (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
- (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
if (bp->port.port_stx) {
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
if (bp->func_stx)
- dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(
+ opcode, DMAE_COMP_GRC);
else
- dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(
+ opcode, DMAE_COMP_PCI);
dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
@@ -1164,7 +1167,8 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
if (bp->func_stx) {
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
- dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
+ dmae->opcode =
+ bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
dmae->dst_addr_lo = bp->func_stx >> 2;
@@ -1257,16 +1261,8 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
bp->executer_idx = 0;
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
- dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
- DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
- DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
- DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
- (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
- (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_PCI);
dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
@@ -1283,9 +1279,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
static void bnx2x_func_stats_base_init(struct bnx2x *bp)
{
- int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
- int port = BP_PORT(bp);
- int func;
+ int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
u32 func_stx;
/* sanity */
@@ -1298,9 +1292,9 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
func_stx = bp->func_stx;
for (vn = VN_0; vn < vn_max; vn++) {
- func = 2*vn + port;
+ int mb_idx = !CHIP_IS_E2(bp) ? 2*vn + BP_PORT(bp) : vn;
- bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
+ bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
bnx2x_func_stats_init(bp);
bnx2x_hw_stats_post(bp);
bnx2x_stats_comp(bp);
@@ -1324,16 +1318,8 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
bp->executer_idx = 0;
memset(dmae, 0, sizeof(struct dmae_command));
- dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
- DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
- DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
- DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
- (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
- (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+ true, DMAE_COMP_PCI);
dmae->src_addr_lo = bp->func_stx >> 2;
dmae->src_addr_hi = 0;
dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
@@ -1351,8 +1337,9 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
void bnx2x_stats_init(struct bnx2x *bp)
{
int port = BP_PORT(bp);
- int func = BP_FUNC(bp);
+ int mb_idx = BP_FW_MB_IDX(bp);
int i;
+ struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bp->stats_pending = 0;
bp->executer_idx = 0;
@@ -1361,7 +1348,7 @@ void bnx2x_stats_init(struct bnx2x *bp)
/* port and func stats for management */
if (!BP_NOMCP(bp)) {
bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
- bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
+ bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
} else {
bp->port.port_stx = 0;
@@ -1394,6 +1381,18 @@ void bnx2x_stats_init(struct bnx2x *bp)
memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
}
+ for_each_queue(bp, i) {
+ /* Set initial stats counter in the stats ramrod data to -1 */
+ int cl_id = bp->fp[i].cl_id;
+
+ stats->xstorm_common.client_statistics[cl_id].
+ stats_counter = 0xffff;
+ stats->ustorm_common.client_statistics[cl_id].
+ stats_counter = 0xffff;
+ stats->tstorm_common.client_statistics[cl_id].
+ stats_counter = 0xffff;
+ }
+
memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index 38a4e908f4f..afd15efa429 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -9,6 +9,10 @@
* Maintained by: Eilon Greenstein <eilong@broadcom.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
*/
#ifndef BNX2X_STATS_H
@@ -228,12 +232,8 @@ struct bnx2x_eth_stats {
/* Forward declaration */
struct bnx2x;
-
void bnx2x_stats_init(struct bnx2x *bp);
extern const u32 dmae_reg_go_c[];
-extern int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
- u32 data_hi, u32 data_lo, int common);
-
#endif /* BNX2X_STATS_H */
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 822f586d72a..079b9d1eead 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -252,7 +252,7 @@ static inline void __enable_port(struct port *port)
*/
static inline int __port_is_enabled(struct port *port)
{
- return(port->slave->state == BOND_STATE_ACTIVE);
+ return port->slave->state == BOND_STATE_ACTIVE;
}
/**
@@ -2466,6 +2466,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
if (!(dev->flags & IFF_MASTER))
goto out;
+ if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
+ goto out;
+
read_lock(&bond->lock);
slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
orig_dev);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c746b331771..26bb118c453 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -362,6 +362,9 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
goto out;
}
+ if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
+ goto out;
+
if (skb->len < sizeof(struct arp_pkt)) {
pr_debug("Packet is too small to be an ARP\n");
goto out;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2cc4cfc3189..7703d35de65 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -109,6 +109,7 @@ static char *arp_validate;
static char *fail_over_mac;
static int all_slaves_active = 0;
static struct bond_params bonding_defaults;
+static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -163,6 +164,8 @@ module_param(all_slaves_active, int, 0);
MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
"by setting active flag for all slaves. "
"0 for never (default), 1 for always.");
+module_param(resend_igmp, int, 0);
+MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link failure");
/*----------------------------- Global variables ----------------------------*/
@@ -865,18 +868,13 @@ static void bond_mc_del(struct bonding *bond, void *addr)
}
-/*
- * Retrieve the list of registered multicast addresses for the bonding
- * device and retransmit an IGMP JOIN request to the current active
- * slave.
- */
-static void bond_resend_igmp_join_requests(struct bonding *bond)
+static void __bond_resend_igmp_join_requests(struct net_device *dev)
{
struct in_device *in_dev;
struct ip_mc_list *im;
rcu_read_lock();
- in_dev = __in_dev_get_rcu(bond->dev);
+ in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
for (im = in_dev->mc_list; im; im = im->next)
ip_mc_rejoin_group(im);
@@ -886,6 +884,44 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
}
/*
+ * Retrieve the list of registered multicast addresses for the bonding
+ * device and retransmit an IGMP JOIN request to the current active
+ * slave.
+ */
+static void bond_resend_igmp_join_requests(struct bonding *bond)
+{
+ struct net_device *vlan_dev;
+ struct vlan_entry *vlan;
+
+ read_lock(&bond->lock);
+
+ /* rejoin all groups on bond device */
+ __bond_resend_igmp_join_requests(bond->dev);
+
+ /* rejoin all groups on vlan devices */
+ if (bond->vlgrp) {
+ list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+ vlan_dev = vlan_group_get_device(bond->vlgrp,
+ vlan->vlan_id);
+ if (vlan_dev)
+ __bond_resend_igmp_join_requests(vlan_dev);
+ }
+ }
+
+ if (--bond->igmp_retrans > 0)
+ queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
+
+ read_unlock(&bond->lock);
+}
+
+void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
+{
+ struct bonding *bond = container_of(work, struct bonding,
+ mcast_work.work);
+ bond_resend_igmp_join_requests(bond);
+}
+
+/*
* flush all members of flush->mc_list from device dev->mc_list
*/
static void bond_mc_list_flush(struct net_device *bond_dev,
@@ -944,7 +980,6 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
netdev_for_each_mc_addr(ha, bond->dev)
dev_mc_add(new_active->dev, ha->addr);
- bond_resend_igmp_join_requests(bond);
}
}
@@ -1180,9 +1215,12 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
}
}
- /* resend IGMP joins since all were sent on curr_active_slave */
- if (bond->params.mode == BOND_MODE_ROUNDROBIN) {
- bond_resend_igmp_join_requests(bond);
+ /* resend IGMP joins since active slave has changed or
+ * all were sent on curr_active_slave */
+ if ((USES_PRIMARY(bond->params.mode) && new_active) ||
+ bond->params.mode == BOND_MODE_ROUNDROBIN) {
+ bond->igmp_retrans = bond->params.resend_igmp;
+ queue_delayed_work(bond->wq, &bond->mcast_work, 0);
}
}
@@ -2368,8 +2406,11 @@ static void bond_miimon_commit(struct bonding *bond)
slave->state = BOND_STATE_BACKUP;
}
- pr_info("%s: link status definitely up for interface %s.\n",
- bond->dev->name, slave->dev->name);
+ bond_update_speed_duplex(slave);
+
+ pr_info("%s: link status definitely up for interface %s, %d Mbps %s duplex.\n",
+ bond->dev->name, slave->dev->name,
+ slave->speed, slave->duplex ? "full" : "half");
/* notify ad that the link status has changed */
if (bond->params.mode == BOND_MODE_8023AD)
@@ -2797,9 +2838,15 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* so it can wait
*/
bond_for_each_slave(bond, slave, i) {
+ unsigned long trans_start = dev_trans_start(slave->dev);
+
if (slave->link != BOND_LINK_UP) {
- if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) &&
- time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) {
+ if (time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + delta_in_ticks) &&
+ time_in_range(jiffies,
+ slave->dev->last_rx - delta_in_ticks,
+ slave->dev->last_rx + delta_in_ticks)) {
slave->link = BOND_LINK_UP;
slave->state = BOND_STATE_ACTIVE;
@@ -2827,8 +2874,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) ||
- (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) {
+ if (!time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + 2 * delta_in_ticks) ||
+ !time_in_range(jiffies,
+ slave->dev->last_rx - delta_in_ticks,
+ slave->dev->last_rx + 2 * delta_in_ticks)) {
slave->link = BOND_LINK_DOWN;
slave->state = BOND_STATE_BACKUP;
@@ -2883,13 +2934,16 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
{
struct slave *slave;
int i, commit = 0;
+ unsigned long trans_start;
bond_for_each_slave(bond, slave, i) {
slave->new_link = BOND_LINK_NOCHANGE;
if (slave->link != BOND_LINK_UP) {
- if (time_before_eq(jiffies, slave_last_rx(bond, slave) +
- delta_in_ticks)) {
+ if (time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + delta_in_ticks)) {
+
slave->new_link = BOND_LINK_UP;
commit++;
}
@@ -2902,8 +2956,9 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
* active. This avoids bouncing, as the last receive
* times need a full ARP monitor cycle to be updated.
*/
- if (!time_after_eq(jiffies, slave->jiffies +
- 2 * delta_in_ticks))
+ if (time_in_range(jiffies,
+ slave->jiffies - delta_in_ticks,
+ slave->jiffies + 2 * delta_in_ticks))
continue;
/*
@@ -2921,8 +2976,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
*/
if (slave->state == BOND_STATE_BACKUP &&
!bond->current_arp_slave &&
- time_after(jiffies, slave_last_rx(bond, slave) +
- 3 * delta_in_ticks)) {
+ !time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
+
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2933,11 +2990,15 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
* - (more than 2*delta since receive AND
* the bond has an IP address)
*/
+ trans_start = dev_trans_start(slave->dev);
if ((slave->state == BOND_STATE_ACTIVE) &&
- (time_after_eq(jiffies, dev_trans_start(slave->dev) +
- 2 * delta_in_ticks) ||
- (time_after_eq(jiffies, slave_last_rx(bond, slave)
- + 2 * delta_in_ticks)))) {
+ (!time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + 2 * delta_in_ticks) ||
+ !time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
+
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2956,6 +3017,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
{
struct slave *slave;
int i;
+ unsigned long trans_start;
bond_for_each_slave(bond, slave, i) {
switch (slave->new_link) {
@@ -2963,10 +3025,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
continue;
case BOND_LINK_UP:
+ trans_start = dev_trans_start(slave->dev);
if ((!bond->curr_active_slave &&
- time_before_eq(jiffies,
- dev_trans_start(slave->dev) +
- delta_in_ticks)) ||
+ time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + delta_in_ticks)) ||
bond->curr_active_slave != slave) {
slave->link = BOND_LINK_UP;
bond->current_arp_slave = NULL;
@@ -3290,6 +3353,8 @@ static void bond_info_show_slave(struct seq_file *seq,
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
seq_printf(seq, "MII Status: %s\n",
(slave->link == BOND_LINK_UP) ? "up" : "down");
+ seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
+ seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
seq_printf(seq, "Link Failure Count: %u\n",
slave->link_failure_count);
@@ -3722,6 +3787,8 @@ static int bond_open(struct net_device *bond_dev)
bond->kill_timers = 0;
+ INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
+
if (bond_is_lb(bond)) {
/* bond_alb_initialize must be called before the timer
* is started.
@@ -3806,6 +3873,8 @@ static int bond_close(struct net_device *bond_dev)
break;
}
+ if (delayed_work_pending(&bond->mcast_work))
+ cancel_delayed_work(&bond->mcast_work);
if (bond_is_lb(bond)) {
/* Must be called only after all
@@ -4656,6 +4725,10 @@ static void bond_setup(struct net_device *bond_dev)
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER);
+ /* By default, we enable GRO on bonding devices.
+ * Actual support requires lowlevel drivers are GRO ready.
+ */
+ bond_dev->features |= NETIF_F_GRO;
}
static void bond_work_cancel_all(struct bonding *bond)
@@ -4677,6 +4750,9 @@ static void bond_work_cancel_all(struct bonding *bond)
if (bond->params.mode == BOND_MODE_8023AD &&
delayed_work_pending(&bond->ad_work))
cancel_delayed_work(&bond->ad_work);
+
+ if (delayed_work_pending(&bond->mcast_work))
+ cancel_delayed_work(&bond->mcast_work);
}
/*
@@ -4869,6 +4945,13 @@ static int bond_check_params(struct bond_params *params)
all_slaves_active = 0;
}
+ if (resend_igmp < 0 || resend_igmp > 255) {
+ pr_warning("Warning: resend_igmp (%d) should be between "
+ "0 and 255, resetting to %d\n",
+ resend_igmp, BOND_DEFAULT_RESEND_IGMP);
+ resend_igmp = BOND_DEFAULT_RESEND_IGMP;
+ }
+
/* reset values for TLB/ALB */
if ((bond_mode == BOND_MODE_TLB) ||
(bond_mode == BOND_MODE_ALB)) {
@@ -5041,6 +5124,7 @@ static int bond_check_params(struct bond_params *params)
params->fail_over_mac = fail_over_mac_value;
params->tx_queues = tx_queues;
params->all_slaves_active = all_slaves_active;
+ params->resend_igmp = resend_igmp;
if (primary) {
strncpy(params->primary, primary, IFNAMSIZ);
@@ -5142,6 +5226,15 @@ int bond_create(struct net *net, const char *name)
res = dev_alloc_name(bond_dev, "bond%d");
if (res < 0)
goto out;
+ } else {
+ /*
+ * If we're given a name to register
+ * we need to ensure that its not already
+ * registered
+ */
+ res = -EEXIST;
+ if (__dev_get_by_name(net, name) != NULL)
+ goto out;
}
res = register_netdevice(bond_dev);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index c311aed9bd0..01b4c3f5d9e 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1592,6 +1592,49 @@ out:
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
bonding_show_slaves_active, bonding_store_slaves_active);
+/*
+ * Show and set the number of IGMP membership reports to send on link failure
+ */
+static ssize_t bonding_show_resend_igmp(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+
+ return sprintf(buf, "%d\n", bond->params.resend_igmp);
+}
+
+static ssize_t bonding_store_resend_igmp(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+
+ if (sscanf(buf, "%d", &new_value) != 1) {
+ pr_err("%s: no resend_igmp value specified.\n",
+ bond->dev->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (new_value < 0) {
+ pr_err("%s: Invalid resend_igmp value %d not in range 0-255; rejected.\n",
+ bond->dev->name, new_value);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pr_info("%s: Setting resend_igmp to %d.\n",
+ bond->dev->name, new_value);
+ bond->params.resend_igmp = new_value;
+out:
+ return ret;
+}
+
+static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
+ bonding_show_resend_igmp, bonding_store_resend_igmp);
+
static struct attribute *per_bond_attrs[] = {
&dev_attr_slaves.attr,
&dev_attr_mode.attr,
@@ -1619,6 +1662,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_ad_partner_mac.attr,
&dev_attr_queue_id.attr,
&dev_attr_all_slaves_active.attr,
+ &dev_attr_resend_igmp.attr,
NULL,
};
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index c6fdd851579..c15f2134748 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -136,6 +136,7 @@ struct bond_params {
__be32 arp_targets[BOND_MAX_ARP_TARGETS];
int tx_queues;
int all_slaves_active;
+ int resend_igmp;
};
struct bond_parm_tbl {
@@ -202,6 +203,7 @@ struct bonding {
s8 send_grat_arp;
s8 send_unsol_na;
s8 setup_by_slave;
+ s8 igmp_retrans;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_entry;
char proc_file_name[IFNAMSIZ];
@@ -223,6 +225,7 @@ struct bonding {
struct delayed_work arp_work;
struct delayed_work alb_work;
struct delayed_work ad_work;
+ struct delayed_work mcast_work;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_addr master_ipv6;
#endif
diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c
index 88edb986691..6e99d80ec40 100644
--- a/drivers/net/bsd_comp.c
+++ b/drivers/net/bsd_comp.c
@@ -429,7 +429,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
if (!db->lens)
{
bsd_free (db);
- return (NULL);
+ return NULL;
}
}
/*
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 631a6242b01..75bfc3a9d95 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -15,7 +15,7 @@ config CAIF_TTY
config CAIF_SPI_SLAVE
tristate "CAIF SPI transport driver for slave interface"
- depends on CAIF
+ depends on CAIF && HAS_DMA
default n
---help---
The CAIF Link layer SPI Protocol driver for Slave SPI interface.
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 077ccf840ed..2111dbfea6f 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -22,13 +22,13 @@
#include <net/caif/caif_spi.h>
#ifndef CONFIG_CAIF_SPI_SYNC
-#define SPI_DATA_POS SPI_CMD_SZ
+#define SPI_DATA_POS 0
static inline int forward_to_spi_cmd(struct cfspi *cfspi)
{
return cfspi->rx_cpck_len;
}
#else
-#define SPI_DATA_POS 0
+#define SPI_DATA_POS SPI_CMD_SZ
static inline int forward_to_spi_cmd(struct cfspi *cfspi)
{
return 0;
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index af753936e83..312b9c8f4f3 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -38,7 +38,7 @@
struct mpc5xxx_can_data {
unsigned int type;
- u32 (*get_clock)(struct of_device *ofdev, const char *clock_name,
+ u32 (*get_clock)(struct platform_device *ofdev, const char *clock_name,
int *mscan_clksrc);
};
@@ -48,7 +48,7 @@ static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = {
{}
};
-static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+static u32 __devinit mpc52xx_can_get_clock(struct platform_device *ofdev,
const char *clock_name,
int *mscan_clksrc)
{
@@ -101,7 +101,7 @@ static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
return freq;
}
#else /* !CONFIG_PPC_MPC52xx */
-static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+static u32 __devinit mpc52xx_can_get_clock(struct platform_device *ofdev,
const char *clock_name,
int *mscan_clksrc)
{
@@ -129,7 +129,7 @@ static struct of_device_id __devinitdata mpc512x_clock_ids[] = {
{}
};
-static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
const char *clock_name,
int *mscan_clksrc)
{
@@ -143,12 +143,12 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
if (!np_clock) {
dev_err(&ofdev->dev, "couldn't find clock node\n");
- return -ENODEV;
+ return 0;
}
clockctl = of_iomap(np_clock, 0);
if (!clockctl) {
dev_err(&ofdev->dev, "couldn't map clock registers\n");
- return 0;
+ goto exit_put;
}
/* Determine the MSCAN device index from the physical address */
@@ -233,13 +233,13 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
exit_unmap:
- of_node_put(np_clock);
iounmap(clockctl);
-
+exit_put:
+ of_node_put(np_clock);
return freq;
}
#else /* !CONFIG_PPC_MPC512x */
-static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
const char *clock_name,
int *mscan_clksrc)
{
@@ -247,7 +247,7 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
}
#endif /* CONFIG_PPC_MPC512x */
-static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
+static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev,
const struct of_device_id *id)
{
struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
@@ -317,7 +317,7 @@ exit_unmap_mem:
return err;
}
-static int __devexit mpc5xxx_can_remove(struct of_device *ofdev)
+static int __devexit mpc5xxx_can_remove(struct platform_device *ofdev)
{
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
struct mscan_priv *priv = netdev_priv(dev);
@@ -334,7 +334,7 @@ static int __devexit mpc5xxx_can_remove(struct of_device *ofdev)
#ifdef CONFIG_PM
static struct mscan_regs saved_regs;
-static int mpc5xxx_can_suspend(struct of_device *ofdev, pm_message_t state)
+static int mpc5xxx_can_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
struct mscan_priv *priv = netdev_priv(dev);
@@ -345,7 +345,7 @@ static int mpc5xxx_can_suspend(struct of_device *ofdev, pm_message_t state)
return 0;
}
-static int mpc5xxx_can_resume(struct of_device *ofdev)
+static int mpc5xxx_can_resume(struct platform_device *ofdev)
{
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
struct mscan_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index ac1a83d7c20..5bfccfdf3bb 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -67,7 +67,7 @@ static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
out_8(priv->reg_base + reg, val);
}
-static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
+static int __devexit sja1000_ofp_remove(struct platform_device *ofdev)
{
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
struct sja1000_priv *priv = netdev_priv(dev);
@@ -87,7 +87,7 @@ static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
return 0;
}
-static int __devinit sja1000_ofp_probe(struct of_device *ofdev,
+static int __devinit sja1000_ofp_probe(struct platform_device *ofdev,
const struct of_device_id *id)
{
struct device_node *np = ofdev->dev.of_node;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 28c88eeec75..d6b6d6aa565 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -419,7 +419,7 @@ static u16 cas_phy_read(struct cas *cp, int reg)
udelay(10);
cmd = readl(cp->regs + REG_MIF_FRAME);
if (cmd & MIF_FRAME_TURN_AROUND_LSB)
- return (cmd & MIF_FRAME_DATA_MASK);
+ return cmd & MIF_FRAME_DATA_MASK;
}
return 0xFFFF; /* -1 */
}
@@ -804,7 +804,7 @@ static int cas_reset_mii_phy(struct cas *cp)
break;
udelay(10);
}
- return (limit <= 0);
+ return limit <= 0;
}
static int cas_saturn_firmware_init(struct cas *cp)
@@ -2149,7 +2149,7 @@ end_copy_pkt:
skb->csum = csum_unfold(~csum);
skb->ip_summed = CHECKSUM_COMPLETE;
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
return len;
}
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index f01cfdb995d..340b537dc97 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1388,7 +1388,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
++st->rx_cso_good;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
st->vlan_xtract++;
@@ -1551,7 +1551,7 @@ static inline int responses_pending(const struct adapter *adapter)
const struct respQ *Q = &adapter->sge->respQ;
const struct respQ_e *e = &Q->entries[Q->cidx];
- return (e->GenerationBit == Q->genbit);
+ return e->GenerationBit == Q->genbit;
}
/*
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 599d178df62..63ebf76d239 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -314,14 +314,12 @@ static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr,
return 0;
}
-#if defined(CONFIG_CHELSIO_T1_1G)
static const struct mdio_ops mi1_mdio_ops = {
.init = mi1_mdio_init,
.read = mi1_mdio_read,
.write = mi1_mdio_write,
.mode_support = MDIO_SUPPORTS_C22
};
-#endif
#endif
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c
index c844111cffe..106a590f0d9 100644
--- a/drivers/net/chelsio/vsc7326.c
+++ b/drivers/net/chelsio/vsc7326.c
@@ -255,7 +255,7 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address)
else if ((result & (1 << 8)) != 0x0)
pr_err("bist read error: 0x%x\n", result);
- return (result & 0xff);
+ return result & 0xff;
}
static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 09610323a94..92bac19ad60 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -60,6 +60,7 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(CNIC_MODULE_VERSION);
static LIST_HEAD(cnic_dev_list);
+static LIST_HEAD(cnic_udev_list);
static DEFINE_RWLOCK(cnic_dev_lock);
static DEFINE_MUTEX(cnic_lock);
@@ -81,29 +82,34 @@ static struct cnic_ops cnic_bnx2x_ops = {
.cnic_ctl = cnic_ctl,
};
+static struct workqueue_struct *cnic_wq;
+
static void cnic_shutdown_rings(struct cnic_dev *);
static void cnic_init_rings(struct cnic_dev *);
static int cnic_cm_set_pg(struct cnic_sock *);
static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
{
- struct cnic_dev *dev = uinfo->priv;
- struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_uio_dev *udev = uinfo->priv;
+ struct cnic_dev *dev;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (cp->uio_dev != -1)
+ if (udev->uio_dev != -1)
return -EBUSY;
rtnl_lock();
- if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+ dev = udev->dev;
+
+ if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
rtnl_unlock();
return -ENODEV;
}
- cp->uio_dev = iminor(inode);
+ udev->uio_dev = iminor(inode);
+ cnic_shutdown_rings(dev);
cnic_init_rings(dev);
rtnl_unlock();
@@ -112,12 +118,9 @@ static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
{
- struct cnic_dev *dev = uinfo->priv;
- struct cnic_local *cp = dev->cnic_priv;
-
- cnic_shutdown_rings(dev);
+ struct cnic_uio_dev *udev = uinfo->priv;
- cp->uio_dev = -1;
+ udev->uio_dev = -1;
return 0;
}
@@ -242,14 +245,14 @@ static int cnic_in_use(struct cnic_sock *csk)
return test_bit(SK_F_INUSE, &csk->flags);
}
-static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
+static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
- info.cmd = DRV_CTL_COMPLETION_CMD;
- info.data.comp.comp_count = count;
+ info.cmd = cmd;
+ info.data.credit.credit_count = count;
ethdev->drv_ctl(dev->netdev, &info);
}
@@ -274,8 +277,9 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
u16 len = 0;
u32 msg_type = ISCSI_KEVENT_IF_DOWN;
struct cnic_ulp_ops *ulp_ops;
+ struct cnic_uio_dev *udev = cp->udev;
- if (cp->uio_dev == -1)
+ if (!udev || udev->uio_dev == -1)
return -ENODEV;
if (csk) {
@@ -406,8 +410,7 @@ static void cnic_uio_stop(void)
list_for_each_entry(dev, &cnic_dev_list, list) {
struct cnic_local *cp = dev->cnic_priv;
- if (cp->cnic_uinfo)
- cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+ cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
}
read_unlock(&cnic_dev_lock);
}
@@ -768,31 +771,45 @@ static void cnic_free_context(struct cnic_dev *dev)
}
}
-static void cnic_free_resc(struct cnic_dev *dev)
+static void __cnic_free_uio(struct cnic_uio_dev *udev)
{
- struct cnic_local *cp = dev->cnic_priv;
- int i = 0;
+ uio_unregister_device(&udev->cnic_uinfo);
- if (cp->cnic_uinfo) {
- while (cp->uio_dev != -1 && i < 15) {
- msleep(100);
- i++;
- }
- uio_unregister_device(cp->cnic_uinfo);
- kfree(cp->cnic_uinfo);
- cp->cnic_uinfo = NULL;
+ if (udev->l2_buf) {
+ dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
+ udev->l2_buf, udev->l2_buf_map);
+ udev->l2_buf = NULL;
}
- if (cp->l2_buf) {
- dma_free_coherent(&dev->pcidev->dev, cp->l2_buf_size,
- cp->l2_buf, cp->l2_buf_map);
- cp->l2_buf = NULL;
+ if (udev->l2_ring) {
+ dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
+ udev->l2_ring, udev->l2_ring_map);
+ udev->l2_ring = NULL;
}
- if (cp->l2_ring) {
- dma_free_coherent(&dev->pcidev->dev, cp->l2_ring_size,
- cp->l2_ring, cp->l2_ring_map);
- cp->l2_ring = NULL;
+ pci_dev_put(udev->pdev);
+ kfree(udev);
+}
+
+static void cnic_free_uio(struct cnic_uio_dev *udev)
+{
+ if (!udev)
+ return;
+
+ write_lock(&cnic_dev_lock);
+ list_del_init(&udev->list);
+ write_unlock(&cnic_dev_lock);
+ __cnic_free_uio(udev);
+}
+
+static void cnic_free_resc(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_uio_dev *udev = cp->udev;
+
+ if (udev) {
+ udev->dev = NULL;
+ cp->udev = NULL;
}
cnic_free_context(dev);
@@ -894,37 +911,68 @@ static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
return 0;
}
-static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages)
+static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_uio_dev *udev;
+
+ read_lock(&cnic_dev_lock);
+ list_for_each_entry(udev, &cnic_udev_list, list) {
+ if (udev->pdev == dev->pcidev) {
+ udev->dev = dev;
+ cp->udev = udev;
+ read_unlock(&cnic_dev_lock);
+ return 0;
+ }
+ }
+ read_unlock(&cnic_dev_lock);
+
+ udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
+ if (!udev)
+ return -ENOMEM;
- cp->l2_ring_size = pages * BCM_PAGE_SIZE;
- cp->l2_ring = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_ring_size,
- &cp->l2_ring_map,
- GFP_KERNEL | __GFP_COMP);
- if (!cp->l2_ring)
+ udev->uio_dev = -1;
+
+ udev->dev = dev;
+ udev->pdev = dev->pcidev;
+ udev->l2_ring_size = pages * BCM_PAGE_SIZE;
+ udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
+ &udev->l2_ring_map,
+ GFP_KERNEL | __GFP_COMP);
+ if (!udev->l2_ring)
return -ENOMEM;
- cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
- cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
- cp->l2_buf = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_buf_size,
- &cp->l2_buf_map,
- GFP_KERNEL | __GFP_COMP);
- if (!cp->l2_buf)
+ udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
+ udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
+ udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
+ &udev->l2_buf_map,
+ GFP_KERNEL | __GFP_COMP);
+ if (!udev->l2_buf)
return -ENOMEM;
+ write_lock(&cnic_dev_lock);
+ list_add(&udev->list, &cnic_udev_list);
+ write_unlock(&cnic_dev_lock);
+
+ pci_dev_get(udev->pdev);
+
+ cp->udev = udev;
+
return 0;
}
-static int cnic_alloc_uio(struct cnic_dev *dev) {
+static int cnic_init_uio(struct cnic_dev *dev)
+{
struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_uio_dev *udev = cp->udev;
struct uio_info *uinfo;
- int ret;
+ int ret = 0;
- uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
- if (!uinfo)
+ if (!udev)
return -ENOMEM;
+ uinfo = &udev->cnic_uinfo;
+
uinfo->mem[0].addr = dev->netdev->base_addr;
uinfo->mem[0].internal_addr = dev->regview;
uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
@@ -932,7 +980,7 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
- PAGE_MASK;
+ PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
else
@@ -942,19 +990,19 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
PAGE_MASK;
- uinfo->mem[1].size = sizeof(struct host_def_status_block);
+ uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
uinfo->name = "bnx2x_cnic";
}
uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
- uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
- uinfo->mem[2].size = cp->l2_ring_size;
+ uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
+ uinfo->mem[2].size = udev->l2_ring_size;
uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
- uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
- uinfo->mem[3].size = cp->l2_buf_size;
+ uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
+ uinfo->mem[3].size = udev->l2_buf_size;
uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
uinfo->version = CNIC_MODULE_VERSION;
@@ -963,16 +1011,17 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
uinfo->open = cnic_uio_open;
uinfo->release = cnic_uio_close;
- uinfo->priv = dev;
+ if (udev->uio_dev == -1) {
+ if (!uinfo->priv) {
+ uinfo->priv = udev;
- ret = uio_register_device(&dev->pcidev->dev, uinfo);
- if (ret) {
- kfree(uinfo);
- return ret;
+ ret = uio_register_device(&udev->pdev->dev, uinfo);
+ }
+ } else {
+ cnic_init_rings(dev);
}
- cp->cnic_uinfo = uinfo;
- return 0;
+ return ret;
}
static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
@@ -993,11 +1042,11 @@ static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
if (ret)
goto error;
- ret = cnic_alloc_l2_rings(dev, 2);
+ ret = cnic_alloc_uio_rings(dev, 2);
if (ret)
goto error;
- ret = cnic_alloc_uio(dev);
+ ret = cnic_init_uio(dev);
if (ret)
goto error;
@@ -1022,13 +1071,13 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
if (blks > cp->ethdev->ctx_tbl_len)
return -ENOMEM;
- cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL);
+ cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
if (cp->ctx_arr == NULL)
return -ENOMEM;
cp->ctx_blks = blks;
cp->ctx_blk_size = ctx_blk_size;
- if (BNX2X_CHIP_IS_E1H(cp->chip_id))
+ if (!BNX2X_CHIP_IS_57710(cp->chip_id))
cp->ctx_align = 0;
else
cp->ctx_align = ctx_blk_size;
@@ -1063,6 +1112,8 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
int i, j, n, ret, pages;
struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
+ cp->iro_arr = ethdev->iro_arr;
+
cp->max_cid_space = MAX_ISCSI_TBL_SZ;
cp->iscsi_start_cid = start_cid;
if (start_cid < BNX2X_ISCSI_START_CID) {
@@ -1127,15 +1178,13 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
- memset(cp->status_blk.bnx2x, 0, sizeof(*cp->status_blk.bnx2x));
-
cp->l2_rx_ring_size = 15;
- ret = cnic_alloc_l2_rings(dev, 4);
+ ret = cnic_alloc_uio_rings(dev, 4);
if (ret)
goto error;
- ret = cnic_alloc_uio(dev);
+ ret = cnic_init_uio(dev);
if (ret)
goto error;
@@ -1209,9 +1258,9 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
kwqe.hdr.conn_and_cmd_data =
cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
- BNX2X_HW_CID(cid, cp->func)));
+ BNX2X_HW_CID(cp, cid)));
kwqe.hdr.type = cpu_to_le16(type);
- kwqe.hdr.reserved = 0;
+ kwqe.hdr.reserved1 = 0;
kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
@@ -1246,8 +1295,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
- int func = cp->func, pages;
- int hq_bds;
+ int hq_bds, pages;
+ u32 pfid = cp->pfid;
cp->num_iscsi_tasks = req1->num_tasks_per_conn;
cp->num_ccells = req1->num_ccells_per_conn;
@@ -1264,60 +1313,60 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
return 0;
/* init Tstorm RAM */
- CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(func),
+ CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
req1->rq_num_wqes);
- CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
+ CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
+ TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
+ TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
/* init Ustorm RAM */
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
- USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func),
+ USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
req1->rq_buffer_size);
- CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(func),
+ CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_USTRORM_INTMEM +
- USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
+ USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
- USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
+ USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
- CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(func),
+ CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
req1->rq_num_wqes);
- CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(func),
+ CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
req1->cq_num_wqes);
- CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(func),
+ CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
/* init Xstorm RAM */
- CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
+ CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
+ XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
+ XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
- CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(func),
+ CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
hq_bds);
- CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(func),
+ CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
req1->num_tasks_per_conn);
- CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func),
+ CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
/* init Cstorm RAM */
- CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
+ CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
+ CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
+ CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
- CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(func),
+ CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
req1->cq_num_wqes);
- CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(func),
+ CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
hq_bds);
return 0;
@@ -1327,7 +1376,7 @@ static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
struct cnic_local *cp = dev->cnic_priv;
- int func = cp->func;
+ u32 pfid = cp->pfid;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
@@ -1339,21 +1388,21 @@ static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
}
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]);
+ TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4,
+ TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
req2->error_bit_map[1]);
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
- USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn);
+ USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
CNIC_WR(dev, BAR_USTRORM_INTMEM +
- USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]);
+ USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
CNIC_WR(dev, BAR_USTRORM_INTMEM +
- USTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4,
+ USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
req2->error_bit_map[1]);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn);
+ CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
@@ -1461,7 +1510,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
struct cnic_iscsi *iscsi = ctx->proto.iscsi;
u32 cid = ctx->cid;
- u32 hw_cid = BNX2X_HW_CID(cid, cp->func);
+ u32 hw_cid = BNX2X_HW_CID(cp, cid);
struct iscsi_context *ictx;
struct regpair context_addr;
int i, j, n = 2, n_max;
@@ -1527,8 +1576,10 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
ictx->tstorm_st_context.tcp.flags2 |=
TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
+ ictx->tstorm_st_context.tcp.ooo_support_mode =
+ TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
- ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
+ ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
ictx->ustorm_st_context.ring.rq.pbl_base.lo =
req2->rq_page_table_addr_lo;
@@ -1627,10 +1678,11 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
struct iscsi_kwqe_conn_offload1 *req1;
struct iscsi_kwqe_conn_offload2 *req2;
struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_context *ctx;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
u32 l5_cid;
- int ret;
+ int ret = 0;
if (num < 2) {
*work = num;
@@ -1654,9 +1706,15 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
kcqe.iscsi_conn_id = l5_cid;
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
+ ctx = &cp->ctx_tbl[l5_cid];
+ if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
+ kcqe.completion_status =
+ ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
+ goto done;
+ }
+
if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
atomic_dec(&cp->iscsi_conn);
- ret = 0;
goto done;
}
ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
@@ -1673,8 +1731,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
}
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
- kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp->ctx_tbl[l5_cid].cid,
- cp->func);
+ kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
done:
cqes[0] = (struct kcqe *) &kcqe;
@@ -1707,40 +1764,66 @@ static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
return ret;
}
+static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+ union l5cm_specific_data l5_data;
+ int ret;
+ u32 hw_cid, type;
+
+ init_waitqueue_head(&ctx->waitq);
+ ctx->wait_cond = 0;
+ memset(&l5_data, 0, sizeof(l5_data));
+ hw_cid = BNX2X_HW_CID(cp, ctx->cid);
+ type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
+ & SPE_HDR_CONN_TYPE;
+ type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+
+ ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
+ hw_cid, type, &l5_data);
+
+ if (ret == 0)
+ wait_event(ctx->waitq, ctx->wait_cond);
+
+ return ret;
+}
+
static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct iscsi_kwqe_conn_destroy *req =
(struct iscsi_kwqe_conn_destroy *) kwqe;
- union l5cm_specific_data l5_data;
u32 l5_cid = req->reserved0;
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
int ret = 0;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
- if (!(ctx->ctx_flags & CTX_FL_OFFLD_START))
+ if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
goto skip_cfc_delete;
- while (!time_after(jiffies, ctx->timestamp + (2 * HZ)))
- msleep(250);
+ if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
+ unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
- init_waitqueue_head(&ctx->waitq);
- ctx->wait_cond = 0;
- memset(&l5_data, 0, sizeof(l5_data));
- ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
- req->context_id,
- ETH_CONNECTION_TYPE |
- (1 << SPE_HDR_COMMON_RAMROD_SHIFT),
- &l5_data);
- if (ret == 0)
- wait_event(ctx->waitq, ctx->wait_cond);
+ if (delta > (2 * HZ))
+ delta = 0;
+
+ set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
+ queue_delayed_work(cnic_wq, &cp->delete_task, delta);
+ goto destroy_reply;
+ }
+
+ ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
skip_cfc_delete:
cnic_free_bnx2x_conn_resc(dev, l5_cid);
atomic_dec(&cp->iscsi_conn);
+ clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+destroy_reply:
memset(&kcqe, 0, sizeof(kcqe));
kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
kcqe.iscsi_conn_id = l5_cid;
@@ -1805,37 +1888,37 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- int func = CNIC_FUNC(cp);
+ u32 pfid = cp->pfid;
u8 *mac = dev->mac_addr;
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func), mac[0]);
+ XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func), mac[1]);
+ XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func), mac[2]);
+ XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func), mac[3]);
+ XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func), mac[4]);
+ XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func), mac[5]);
+ XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func), mac[5]);
+ TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func) + 1,
+ TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
mac[4]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func), mac[3]);
+ TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 1,
+ TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
mac[2]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 2,
+ TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
mac[1]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 3,
+ TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
mac[0]);
}
@@ -1851,10 +1934,10 @@ static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
}
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), xstorm_flags);
+ XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), tstorm_flags);
+ TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
}
static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
@@ -1929,7 +2012,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->func), csk->vlan_id);
+ XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
cnic_bnx2x_set_tcp_timestamp(dev,
kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
@@ -1937,7 +2020,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
if (!ret)
- ctx->ctx_flags |= CTX_FL_OFFLD_START;
+ set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
return ret;
}
@@ -2063,7 +2146,7 @@ static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
static void service_kcqes(struct cnic_dev *dev, int num_cqes)
{
struct cnic_local *cp = dev->cnic_priv;
- int i, j;
+ int i, j, comp = 0;
i = 0;
j = 1;
@@ -2074,7 +2157,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes)
u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
- cnic_kwq_completion(dev, 1);
+ comp++;
while (j < num_cqes) {
u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
@@ -2083,7 +2166,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes)
break;
if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
- cnic_kwq_completion(dev, 1);
+ comp++;
j++;
}
@@ -2113,6 +2196,8 @@ end:
i += j;
j = 1;
}
+ if (unlikely(comp))
+ cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
}
static u16 cnic_bnx2_next_idx(u16 idx)
@@ -2171,8 +2256,9 @@ static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
static int cnic_l2_completion(struct cnic_local *cp)
{
u16 hw_cons, sw_cons;
+ struct cnic_uio_dev *udev = cp->udev;
union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
- (cp->l2_ring + (2 * BCM_PAGE_SIZE));
+ (udev->l2_ring + (2 * BCM_PAGE_SIZE));
u32 cmd;
int comp = 0;
@@ -2203,13 +2289,14 @@ static int cnic_l2_completion(struct cnic_local *cp)
static void cnic_chk_pkt_rings(struct cnic_local *cp)
{
- u16 rx_cons = *cp->rx_cons_ptr;
- u16 tx_cons = *cp->tx_cons_ptr;
+ u16 rx_cons, tx_cons;
int comp = 0;
- if (!test_bit(CNIC_F_CNIC_UP, &cp->dev->flags))
+ if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
return;
+ rx_cons = *cp->rx_cons_ptr;
+ tx_cons = *cp->tx_cons_ptr;
if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
comp = cnic_l2_completion(cp);
@@ -2217,7 +2304,8 @@ static void cnic_chk_pkt_rings(struct cnic_local *cp)
cp->tx_cons = tx_cons;
cp->rx_cons = rx_cons;
- uio_event_notify(cp->cnic_uinfo);
+ if (cp->udev)
+ uio_event_notify(&cp->udev->cnic_uinfo);
}
if (comp)
clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
@@ -2318,14 +2406,38 @@ static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
}
+static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
+ u16 index, u8 op, u8 update)
+{
+ struct igu_regular cmd_data;
+ u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
+
+ cmd_data.sb_id_and_flags =
+ (index << IGU_REGULAR_SB_INDEX_SHIFT) |
+ (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+ (update << IGU_REGULAR_BUPDATE_SHIFT) |
+ (op << IGU_REGULAR_ENABLE_INT_SHIFT);
+
+
+ CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
+}
+
static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0,
+ cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
IGU_INT_DISABLE, 0);
}
+static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
+ IGU_INT_DISABLE, 0);
+}
+
static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
{
u32 last_status = *info->status_idx_ptr;
@@ -2357,8 +2469,12 @@ static void cnic_service_bnx2x_bh(unsigned long data)
status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
- cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID,
- status_idx, IGU_INT_ENABLE, 1);
+ if (BNX2X_CHIP_IS_E2(cp->chip_id))
+ cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
+ status_idx, IGU_INT_ENABLE, 1);
+ else
+ cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
+ status_idx, IGU_INT_ENABLE, 1);
}
static int cnic_service_bnx2x(void *data, void *status_blk)
@@ -2379,8 +2495,7 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
int if_type;
- if (cp->cnic_uinfo)
- cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+ cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
@@ -2728,6 +2843,13 @@ static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
if (l5_cid >= MAX_CM_SK_TBL_SZ)
return -EINVAL;
+ if (cp->ctx_tbl) {
+ struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+ if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+ return -EAGAIN;
+ }
+
csk1 = &cp->csk_tbl[l5_cid];
if (atomic_read(&csk1->ref_count))
return -EAGAIN;
@@ -3279,39 +3401,106 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
{
+ struct cnic_local *cp = dev->cnic_priv;
+ int i;
+
+ if (!cp->ctx_tbl)
+ return;
+
+ if (!netif_running(dev->netdev))
+ return;
+
+ for (i = 0; i < cp->max_cid_space; i++) {
+ struct cnic_context *ctx = &cp->ctx_tbl[i];
+
+ while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
+ msleep(10);
+
+ if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+ netdev_warn(dev->netdev, "CID %x not deleted\n",
+ ctx->cid);
+ }
+
+ cancel_delayed_work(&cp->delete_task);
+ flush_workqueue(cnic_wq);
+
+ if (atomic_read(&cp->iscsi_conn) != 0)
+ netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+ atomic_read(&cp->iscsi_conn));
}
static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- int func = CNIC_FUNC(cp);
+ u32 pfid = cp->pfid;
+ u32 port = CNIC_PORT(cp);
cnic_init_bnx2x_mac(dev);
cnic_bnx2x_set_tcp_timestamp(dev, 1);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func), 0);
+ XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
CNIC_WR(dev, BAR_XSTRORM_INTMEM +
- XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1);
+ XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
CNIC_WR(dev, BAR_XSTRORM_INTMEM +
- XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func),
+ XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
DEF_MAX_DA_COUNT);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func), DEF_TTL);
+ XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func), DEF_TOS);
+ XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func), 2);
+ XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
CNIC_WR(dev, BAR_XSTRORM_INTMEM +
- XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), DEF_SWS_TIMER);
+ XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
- CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(func),
+ CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
DEF_MAX_CWND);
return 0;
}
+static void cnic_delete_task(struct work_struct *work)
+{
+ struct cnic_local *cp;
+ struct cnic_dev *dev;
+ u32 i;
+ int need_resched = 0;
+
+ cp = container_of(work, struct cnic_local, delete_task.work);
+ dev = cp->dev;
+
+ for (i = 0; i < cp->max_cid_space; i++) {
+ struct cnic_context *ctx = &cp->ctx_tbl[i];
+
+ if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
+ !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
+ continue;
+
+ if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
+ need_resched = 1;
+ continue;
+ }
+
+ if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
+ continue;
+
+ cnic_bnx2x_destroy_ramrod(dev, i);
+
+ cnic_free_bnx2x_conn_resc(dev, i);
+ if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
+ atomic_dec(&cp->iscsi_conn);
+
+ clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+ }
+
+ if (need_resched)
+ queue_delayed_work(cnic_wq, &cp->delete_task,
+ msecs_to_jiffies(10));
+
+}
+
static int cnic_cm_open(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -3326,6 +3515,8 @@ static int cnic_cm_open(struct cnic_dev *dev)
if (err)
goto err_out;
+ INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
+
dev->cm_create = cnic_cm_create;
dev->cm_destroy = cnic_cm_destroy;
dev->cm_connect = cnic_cm_connect;
@@ -3418,11 +3609,24 @@ static void cnic_free_irq(struct cnic_dev *dev)
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
cp->disable_int_sync(dev);
- tasklet_disable(&cp->cnic_irq_task);
+ tasklet_kill(&cp->cnic_irq_task);
free_irq(ethdev->irq_arr[0].vector, dev);
}
}
+static int cnic_request_irq(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ int err;
+
+ err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
+ if (err)
+ tasklet_disable(&cp->cnic_irq_task);
+
+ return err;
+}
+
static int cnic_init_bnx2_irq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -3443,12 +3647,10 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
cp->last_status_idx = cp->status_blk.bnx2->status_idx;
tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
(unsigned long) dev);
- err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
- "cnic", dev);
- if (err) {
- tasklet_disable(&cp->cnic_irq_task);
+ err = cnic_request_irq(dev);
+ if (err)
return err;
- }
+
while (cp->status_blk.bnx2->status_completion_producer_index &&
i < 10) {
CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
@@ -3515,11 +3717,12 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct cnic_uio_dev *udev = cp->udev;
u32 cid_addr, tx_cid, sb_id;
u32 val, offset0, offset1, offset2, offset3;
int i;
struct tx_bd *txbd;
- dma_addr_t buf_map;
+ dma_addr_t buf_map, ring_map = udev->l2_ring_map;
struct status_block *s_blk = cp->status_blk.gen;
sb_id = cp->status_blk_num;
@@ -3561,18 +3764,18 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
cnic_ctx_wr(dev, cid_addr, offset1, val);
- txbd = (struct tx_bd *) cp->l2_ring;
+ txbd = (struct tx_bd *) udev->l2_ring;
- buf_map = cp->l2_buf_map;
+ buf_map = udev->l2_buf_map;
for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
}
- val = (u64) cp->l2_ring_map >> 32;
+ val = (u64) ring_map >> 32;
cnic_ctx_wr(dev, cid_addr, offset2, val);
txbd->tx_bd_haddr_hi = val;
- val = (u64) cp->l2_ring_map & 0xffffffff;
+ val = (u64) ring_map & 0xffffffff;
cnic_ctx_wr(dev, cid_addr, offset3, val);
txbd->tx_bd_haddr_lo = val;
}
@@ -3581,10 +3784,12 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct cnic_uio_dev *udev = cp->udev;
u32 cid_addr, sb_id, val, coal_reg, coal_val;
int i;
struct rx_bd *rxbd;
struct status_block *s_blk = cp->status_blk.gen;
+ dma_addr_t ring_map = udev->l2_ring_map;
sb_id = cp->status_blk_num;
cnic_init_context(dev, 2);
@@ -3618,22 +3823,22 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
- rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
+ rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE);
for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
- buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
+ buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
rxbd->rx_bd_len = cp->l2_single_buf_size;
rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
}
- val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
+ val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
rxbd->rx_bd_haddr_hi = val;
- val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+ val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
rxbd->rx_bd_haddr_lo = val;
@@ -3850,42 +4055,55 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
(unsigned long) dev);
- if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
- err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
- "cnic", dev);
- if (err)
- tasklet_disable(&cp->cnic_irq_task);
- }
+ if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
+ err = cnic_request_irq(dev);
+
return err;
}
+static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
+ u16 sb_id, u8 sb_index,
+ u8 disable)
+{
+
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
+ offsetof(struct hc_status_block_data_e1x, index_data) +
+ sizeof(struct hc_index_data)*sb_index +
+ offsetof(struct hc_index_data, flags);
+ u16 flags = CNIC_RD16(dev, addr);
+ /* clear and set */
+ flags &= ~HC_INDEX_DATA_HC_ENABLED;
+ flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
+ HC_INDEX_DATA_HC_ENABLED);
+ CNIC_WR16(dev, addr, flags);
+}
+
static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u8 sb_id = cp->status_blk_num;
- int port = CNIC_PORT(cp);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
- CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
- HC_INDEX_C_ISCSI_EQ_CONS),
- 64 / 12);
- CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
- CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
- HC_INDEX_C_ISCSI_EQ_CONS), 0);
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
+ offsetof(struct hc_status_block_data_e1x, index_data) +
+ sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
+ offsetof(struct hc_index_data, timeout), 64 / 12);
+ cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
}
static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
{
}
-static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
+static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
+ struct client_init_ramrod_data *data)
{
struct cnic_local *cp = dev->cnic_priv;
- union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring;
- struct eth_context *context;
- struct regpair context_addr;
- dma_addr_t buf_map;
- int func = CNIC_FUNC(cp);
+ struct cnic_uio_dev *udev = cp->udev;
+ union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
+ dma_addr_t buf_map, ring_map = udev->l2_ring_map;
+ struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int port = CNIC_PORT(cp);
int i;
int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
@@ -3893,7 +4111,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
memset(txbd, 0, BCM_PAGE_SIZE);
- buf_map = cp->l2_buf_map;
+ buf_map = udev->l2_buf_map;
for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
struct eth_tx_start_bd *start_bd = &txbd->start_bd;
struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
@@ -3910,33 +4128,23 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
}
- context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr);
- val = (u64) cp->l2_ring_map >> 32;
+ val = (u64) ring_map >> 32;
txbd->next_bd.addr_hi = cpu_to_le32(val);
- context->xstorm_st_context.tx_bd_page_base_hi = val;
+ data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
- val = (u64) cp->l2_ring_map & 0xffffffff;
+ val = (u64) ring_map & 0xffffffff;
txbd->next_bd.addr_lo = cpu_to_le32(val);
- context->xstorm_st_context.tx_bd_page_base_lo = val;
-
- context->cstorm_st_context.sb_index_number =
- HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS;
- context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID;
+ data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
- if (cli < MAX_X_STAT_COUNTER_ID)
- context->xstorm_st_context.statistics_data = cli |
- XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE;
-
- context->xstorm_ag_context.cdu_reserved =
- CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
- CDU_REGION_NUMBER_XCM_AG,
- ETH_CONNECTION_TYPE);
+ /* Other ramrod params */
+ data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
+ data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
/* reset xstorm per client statistics */
- if (cli < MAX_X_STAT_COUNTER_ID) {
+ if (cli < MAX_STAT_COUNTER_ID) {
val = BAR_XSTRORM_INTMEM +
XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
@@ -3944,111 +4152,77 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
}
cp->tx_cons_ptr =
- &cp->bnx2x_def_status_blk->c_def_status_block.index_values[
- HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS];
+ &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
}
-static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
+static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
+ struct client_init_ramrod_data *data)
{
struct cnic_local *cp = dev->cnic_priv;
- struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring +
+ struct cnic_uio_dev *udev = cp->udev;
+ struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
BCM_PAGE_SIZE);
struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
- (cp->l2_ring + (2 * BCM_PAGE_SIZE));
- struct eth_context *context;
- struct regpair context_addr;
+ (udev->l2_ring + (2 * BCM_PAGE_SIZE));
+ struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
int port = CNIC_PORT(cp);
- int func = CNIC_FUNC(cp);
int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
+ int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
u32 val;
- struct tstorm_eth_client_config tstorm_client = {0};
+ dma_addr_t ring_map = udev->l2_ring_map;
+
+ /* General data */
+ data->general.client_id = cli;
+ data->general.statistics_en_flg = 1;
+ data->general.statistics_counter_id = cli;
+ data->general.activate_flg = 1;
+ data->general.sp_client_id = cli;
for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
- buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
+ buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
}
- context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr);
- val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
+ val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
rxbd->addr_hi = cpu_to_le32(val);
+ data->rx.bd_page_base.hi = cpu_to_le32(val);
- context->ustorm_st_context.common.bd_page_base_hi = val;
-
- val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+ val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
rxbd->addr_lo = cpu_to_le32(val);
-
- context->ustorm_st_context.common.bd_page_base_lo = val;
-
- context->ustorm_st_context.common.sb_index_numbers =
- BNX2X_ISCSI_RX_SB_INDEX_NUM;
- context->ustorm_st_context.common.clientId = cli;
- context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID;
- if (cli < MAX_U_STAT_COUNTER_ID) {
- context->ustorm_st_context.common.flags =
- USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS;
- context->ustorm_st_context.common.statistics_counter_id = cli;
- }
- context->ustorm_st_context.common.mc_alignment_log_size = 0;
- context->ustorm_st_context.common.bd_buff_size =
- cp->l2_single_buf_size;
-
- context->ustorm_ag_context.cdu_usage =
- CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
- CDU_REGION_NUMBER_UCM_AG,
- ETH_CONNECTION_TYPE);
+ data->rx.bd_page_base.lo = cpu_to_le32(val);
rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
- val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
+ val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
rxcqe->addr_hi = cpu_to_le32(val);
+ data->rx.cqe_page_base.hi = cpu_to_le32(val);
- CNIC_WR(dev, BAR_USTRORM_INTMEM +
- USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val);
-
- CNIC_WR(dev, BAR_USTRORM_INTMEM +
- USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val);
-
- val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
+ val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
rxcqe->addr_lo = cpu_to_le32(val);
+ data->rx.cqe_page_base.lo = cpu_to_le32(val);
- CNIC_WR(dev, BAR_USTRORM_INTMEM +
- USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val);
-
- CNIC_WR(dev, BAR_USTRORM_INTMEM +
- USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val);
-
- /* client tstorm info */
- tstorm_client.mtu = cp->l2_single_buf_size - 14;
- tstorm_client.config_flags = TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE;
-
- if (cli < MAX_T_STAT_COUNTER_ID) {
- tstorm_client.config_flags |=
- TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
- tstorm_client.statistics_counter_id = cli;
- }
+ /* Other ramrod params */
+ data->rx.client_qzone_id = cl_qzone_id;
+ data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
+ data->rx.status_block_id = BNX2X_DEF_SB_ID;
- CNIC_WR(dev, BAR_TSTRORM_INTMEM +
- TSTORM_CLIENT_CONFIG_OFFSET(port, cli),
- ((u32 *)&tstorm_client)[0]);
- CNIC_WR(dev, BAR_TSTRORM_INTMEM +
- TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4,
- ((u32 *)&tstorm_client)[1]);
+ data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
+ data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
- /* reset tstorm per client statistics */
- if (cli < MAX_T_STAT_COUNTER_ID) {
+ data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
+ data->rx.outer_vlan_removal_enable_flg = 1;
+ /* reset tstorm and ustorm per client statistics */
+ if (cli < MAX_STAT_COUNTER_ID) {
val = BAR_TSTRORM_INTMEM +
TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
CNIC_WR(dev, val + i * 4, 0);
- }
- /* reset ustorm per client statistics */
- if (cli < MAX_U_STAT_COUNTER_ID) {
val = BAR_USTRORM_INTMEM +
USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
@@ -4056,21 +4230,22 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
}
cp->rx_cons_ptr =
- &cp->bnx2x_def_status_blk->u_def_status_block.index_values[
- HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS];
+ &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
}
static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- u32 base, addr, val;
+ u32 base, base2, addr, val;
int port = CNIC_PORT(cp);
dev->max_iscsi_conn = 0;
base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
- if (base < 0xa0000 || base >= 0xc0000)
+ if (base == 0)
return;
+ base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
+ MISC_REG_GENERIC_CR_0));
addr = BNX2X_SHMEM_ADDR(base,
dev_info.port_hw_config[port].iscsi_mac_upper);
@@ -4103,16 +4278,25 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
val16 ^= 0x1e1e;
dev->max_iscsi_conn = val16;
}
- if (BNX2X_CHIP_IS_E1H(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
int func = CNIC_FUNC(cp);
+ u32 mf_cfg_addr;
+
+ if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
+ mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
+ mf_cfg_addr));
+ else
+ mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
+
+ addr = mf_cfg_addr +
+ offsetof(struct mf_cfg, func_mf_config[func].e1hov_tag);
- addr = BNX2X_SHMEM_ADDR(base,
- mf_cfg.func_mf_config[func].e1hov_tag);
val = CNIC_RD(dev, addr);
val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
- addr = BNX2X_SHMEM_ADDR(base,
- mf_cfg.func_mf_config[func].config);
+ addr = mf_cfg_addr +
+ offsetof(struct mf_cfg,
+ func_mf_config[func].config);
val = CNIC_RD(dev, addr);
val &= FUNC_MF_CFG_PROTOCOL_MASK;
if (val != FUNC_MF_CFG_PROTOCOL_ISCSI)
@@ -4124,10 +4308,26 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
int func = CNIC_FUNC(cp), ret, i;
- int port = CNIC_PORT(cp);
- u16 eq_idx;
- u8 sb_id = cp->status_blk_num;
+ u32 pfid;
+
+ if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
+
+ if (!(val & 1))
+ val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
+ else
+ val = (val >> 1) & 1;
+
+ if (val)
+ cp->pfid = func >> 1;
+ else
+ cp->pfid = func & 0x6;
+ } else {
+ cp->pfid = func;
+ }
+ pfid = cp->pfid;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
cp->iscsi_start_cid);
@@ -4135,86 +4335,98 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
if (ret)
return -ENOMEM;
+ cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
+
cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0);
+ CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
cp->kcq1.sw_prod_idx = 0;
- cp->kcq1.hw_prod_idx_ptr =
- &cp->status_blk.bnx2x->c_status_block.index_values[
- HC_INDEX_C_ISCSI_EQ_CONS];
- cp->kcq1.status_idx_ptr =
- &cp->status_blk.bnx2x->c_status_block.status_block_index;
+ if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
+
+ cp->kcq1.hw_prod_idx_ptr =
+ &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
+ cp->kcq1.status_idx_ptr =
+ &sb->sb.running_index[SM_RX_ID];
+ } else {
+ struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
+
+ cp->kcq1.hw_prod_idx_ptr =
+ &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
+ cp->kcq1.status_idx_ptr =
+ &sb->sb.running_index[SM_RX_ID];
+ }
cnic_get_bnx2x_iscsi_info(dev);
/* Only 1 EQ */
CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0);
+ CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0),
+ CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4,
+ CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0),
+ CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4,
+ CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1);
+ CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, 0), cp->status_blk_num);
+ CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, 0),
- HC_INDEX_C_ISCSI_EQ_CONS);
+ CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
+ HC_INDEX_ISCSI_EQ_CONS);
for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i),
+ TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
cp->conn_buf_info.pgtbl[2 * i]);
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i) + 4,
+ TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
cp->conn_buf_info.pgtbl[(2 * i) + 1]);
}
CNIC_WR(dev, BAR_USTRORM_INTMEM +
- USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func),
+ USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
CNIC_WR(dev, BAR_USTRORM_INTMEM +
- USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func) + 4,
+ USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
+ CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
+
cnic_setup_bnx2x_context(dev);
- eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM +
- CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
- offsetof(struct cstorm_status_block_c,
- index_values[HC_INDEX_C_ISCSI_EQ_CONS]));
- if (eq_idx != 0) {
- netdev_err(dev->netdev, "EQ cons index %x != 0\n", eq_idx);
- return -EBUSY;
- }
ret = cnic_init_bnx2x_irq(dev);
if (ret)
return ret;
- cnic_init_bnx2x_tx_ring(dev);
- cnic_init_bnx2x_rx_ring(dev);
-
return 0;
}
static void cnic_init_rings(struct cnic_dev *dev)
{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_uio_dev *udev = cp->udev;
+
+ if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
+ return;
+
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
cnic_init_bnx2_tx_ring(dev);
cnic_init_bnx2_rx_ring(dev);
+ set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
- struct cnic_local *cp = dev->cnic_priv;
u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
+ u32 cl_qzone_id, type;
+ struct client_init_ramrod_data *data;
union l5cm_specific_data l5_data;
struct ustorm_eth_rx_producers rx_prods = {0};
u32 off, i;
@@ -4223,21 +4435,38 @@ static void cnic_init_rings(struct cnic_dev *dev)
rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
barrier();
+ cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
+
off = BAR_USTRORM_INTMEM +
- USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli);
+ (BNX2X_CHIP_IS_E2(cp->chip_id) ?
+ USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
+ USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
- cnic_init_bnx2x_tx_ring(dev);
- cnic_init_bnx2x_rx_ring(dev);
+ data = udev->l2_buf;
+
+ memset(data, 0, sizeof(*data));
+
+ cnic_init_bnx2x_tx_ring(dev, data);
+ cnic_init_bnx2x_rx_ring(dev, data);
+
+ l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
+ l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
+
+ type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
+ & SPE_HDR_CONN_TYPE;
+ type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+
+ set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
- l5_data.phy_address.lo = cli;
- l5_data.phy_address.hi = 0;
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
- BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
+ BNX2X_ISCSI_L2_CID, type, &l5_data);
+
i = 0;
while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
++i < 10)
@@ -4246,13 +4475,18 @@ static void cnic_init_rings(struct cnic_dev *dev)
if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
netdev_err(dev->netdev,
"iSCSI CLIENT_SETUP did not complete\n");
- cnic_kwq_completion(dev, 1);
+ cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1);
}
}
static void cnic_shutdown_rings(struct cnic_dev *dev)
{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
+ return;
+
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
cnic_shutdown_bnx2_rx_ring(dev);
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
@@ -4260,6 +4494,7 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
union l5cm_specific_data l5_data;
int i;
+ u32 type;
cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
@@ -4277,14 +4512,18 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
netdev_err(dev->netdev,
"iSCSI CLIENT_HALT did not complete\n");
- cnic_kwq_completion(dev, 1);
+ cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
memset(&l5_data, 0, sizeof(l5_data));
- cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
- BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE |
- (1 << SPE_HDR_COMMON_RAMROD_SHIFT), &l5_data);
+ type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
+ & SPE_HDR_CONN_TYPE;
+ type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+ cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
+ BNX2X_ISCSI_L2_CID, type, &l5_data);
msleep(10);
}
+ clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
}
static int cnic_register_netdev(struct cnic_dev *dev)
@@ -4327,7 +4566,6 @@ static int cnic_start_hw(struct cnic_dev *dev)
return -EALREADY;
dev->regview = ethdev->io_base;
- cp->chip_id = ethdev->chip_id;
pci_dev_get(dev->pcidev);
cp->func = PCI_FUNC(dev->pcidev->devfn);
cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
@@ -4379,17 +4617,11 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- u8 sb_id = cp->status_blk_num;
- int port = CNIC_PORT(cp);
cnic_free_irq(dev);
- CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
- CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
- offsetof(struct cstorm_status_block_c,
- index_values[HC_INDEX_C_ISCSI_EQ_CONS]),
- 0);
+ *cp->kcq1.hw_prod_idx_ptr = 0;
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0);
+ CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
CNIC_WR16(dev, cp->kcq1.io_addr, 0);
cnic_free_resc(dev);
}
@@ -4403,10 +4635,11 @@ static void cnic_stop_hw(struct cnic_dev *dev)
/* Need to wait for the ring shutdown event to complete
* before clearing the CNIC_UP flag.
*/
- while (cp->uio_dev != -1 && i < 15) {
+ while (cp->udev->uio_dev != -1 && i < 15) {
msleep(100);
i++;
}
+ cnic_shutdown_rings(dev);
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
@@ -4455,7 +4688,6 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
cp = cdev->cnic_priv;
cp->dev = cdev;
- cp->uio_dev = -1;
cp->l2_single_buf_size = 0x400;
cp->l2_rx_ring_size = 3;
@@ -4510,6 +4742,7 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
cp = cdev->cnic_priv;
cp->ethdev = ethdev;
cdev->pcidev = pdev;
+ cp->chip_id = ethdev->chip_id;
cp->cnic_ops = &cnic_bnx2_ops;
cp->start_hw = cnic_start_bnx2_hw;
@@ -4564,6 +4797,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cp = cdev->cnic_priv;
cp->ethdev = ethdev;
cdev->pcidev = pdev;
+ cp->chip_id = ethdev->chip_id;
cp->cnic_ops = &cnic_bnx2x_ops;
cp->start_hw = cnic_start_bnx2x_hw;
@@ -4575,7 +4809,10 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cp->stop_cm = cnic_cm_stop_bnx2x_hw;
cp->enable_int = cnic_enable_bnx2x_int;
cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
- cp->ack_int = cnic_ack_bnx2x_msix;
+ if (BNX2X_CHIP_IS_E2(cp->chip_id))
+ cp->ack_int = cnic_ack_bnx2x_e2_msix;
+ else
+ cp->ack_int = cnic_ack_bnx2x_msix;
cp->close_conn = cnic_close_bnx2x_conn;
cp->next_idx = cnic_bnx2x_next_idx;
cp->hw_idx = cnic_bnx2x_hw_idx;
@@ -4683,6 +4920,7 @@ static struct notifier_block cnic_netdev_notifier = {
static void cnic_release(void)
{
struct cnic_dev *dev;
+ struct cnic_uio_dev *udev;
while (!list_empty(&cnic_dev_list)) {
dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
@@ -4696,6 +4934,11 @@ static void cnic_release(void)
list_del_init(&dev->list);
cnic_free_dev(dev);
}
+ while (!list_empty(&cnic_udev_list)) {
+ udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
+ list);
+ cnic_free_uio(udev);
+ }
}
static int __init cnic_init(void)
@@ -4710,6 +4953,13 @@ static int __init cnic_init(void)
return rc;
}
+ cnic_wq = create_singlethread_workqueue("cnic_wq");
+ if (!cnic_wq) {
+ cnic_release();
+ unregister_netdevice_notifier(&cnic_netdev_notifier);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -4717,6 +4967,7 @@ static void __exit cnic_exit(void)
{
unregister_netdevice_notifier(&cnic_netdev_notifier);
cnic_release();
+ destroy_workqueue(cnic_wq);
}
module_init(cnic_init);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 275c36114d8..6a4a0ae5cfe 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -12,6 +12,13 @@
#ifndef CNIC_H
#define CNIC_H
+#define HC_INDEX_ISCSI_EQ_CONS 6
+
+#define HC_INDEX_FCOE_EQ_CONS 3
+
+#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
+#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
+
#define KWQ_PAGE_CNT 4
#define KCQ_PAGE_CNT 16
@@ -161,8 +168,9 @@ struct cnic_context {
wait_queue_head_t waitq;
int wait_cond;
unsigned long timestamp;
- u32 ctx_flags;
-#define CTX_FL_OFFLD_START 0x00000001
+ unsigned long ctx_flags;
+#define CTX_FL_OFFLD_START 0
+#define CTX_FL_DELETE_WAIT 1
u8 ulp_proto_id;
union {
struct cnic_iscsi *iscsi;
@@ -179,6 +187,31 @@ struct kcq_info {
u32 io_addr;
};
+struct iro {
+ u32 base;
+ u16 m1;
+ u16 m2;
+ u16 m3;
+ u16 size;
+};
+
+struct cnic_uio_dev {
+ struct uio_info cnic_uinfo;
+ u32 uio_dev;
+
+ int l2_ring_size;
+ void *l2_ring;
+ dma_addr_t l2_ring_map;
+
+ int l2_buf_size;
+ void *l2_buf;
+ dma_addr_t l2_buf_map;
+
+ struct cnic_dev *dev;
+ struct pci_dev *pdev;
+ struct list_head list;
+};
+
struct cnic_local {
spinlock_t cnic_ulp_lock;
@@ -192,19 +225,15 @@ struct cnic_local {
unsigned long cnic_local_flags;
#define CNIC_LCL_FL_KWQ_INIT 0x0
#define CNIC_LCL_FL_L2_WAIT 0x1
+#define CNIC_LCL_FL_RINGS_INITED 0x2
struct cnic_dev *dev;
struct cnic_eth_dev *ethdev;
- void *l2_ring;
- dma_addr_t l2_ring_map;
- int l2_ring_size;
- int l2_rx_ring_size;
+ struct cnic_uio_dev *udev;
- void *l2_buf;
- dma_addr_t l2_buf_map;
- int l2_buf_size;
+ int l2_rx_ring_size;
int l2_single_buf_size;
u16 *rx_cons_ptr;
@@ -212,6 +241,9 @@ struct cnic_local {
u16 rx_cons;
u16 tx_cons;
+ struct iro *iro_arr;
+#define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr)
+
struct cnic_dma kwq_info;
struct kwqe **kwq;
@@ -230,12 +262,16 @@ struct cnic_local {
union {
void *gen;
struct status_block_msix *bnx2;
- struct host_status_block *bnx2x;
+ struct host_hc_status_block_e1x *bnx2x_e1x;
+ /* index values - which counter to update */
+ #define SM_RX_ID 0
+ #define SM_TX_ID 1
} status_blk;
- struct host_def_status_block *bnx2x_def_status_blk;
+ struct host_sp_status_block *bnx2x_def_status_blk;
u32 status_blk_num;
+ u32 bnx2x_igu_sb_id;
u32 int_num;
u32 last_status_idx;
struct tasklet_struct cnic_irq_task;
@@ -264,6 +300,8 @@ struct cnic_local {
int hq_size;
int num_cqs;
+ struct delayed_work delete_task;
+
struct cnic_ctx *ctx_arr;
int ctx_blks;
int ctx_blk_size;
@@ -272,11 +310,9 @@ struct cnic_local {
u32 chip_id;
int func;
+ u32 pfid;
u32 shmem_base;
- u32 uio_dev;
- struct uio_info *cnic_uinfo;
-
struct cnic_ops *cnic_ops;
int (*start_hw)(struct cnic_dev *);
void (*stop_hw)(struct cnic_dev *);
@@ -335,18 +371,36 @@ struct bnx2x_bd_chain_next {
#define BNX2X_ISCSI_GLB_BUF_SIZE 64
#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff
#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff
-#define BNX2X_HW_CID(x, func) ((x) | (((func) % PORT_MAX) << 23) | \
- (((func) >> 1) << 17))
-#define BNX2X_SW_CID(x) (x & 0x1ffff)
+
+#define BNX2X_CHIP_NUM_57710 0x164e
#define BNX2X_CHIP_NUM_57711 0x164f
#define BNX2X_CHIP_NUM_57711E 0x1650
+#define BNX2X_CHIP_NUM_57712 0x1662
+#define BNX2X_CHIP_NUM_57712E 0x1663
+#define BNX2X_CHIP_NUM_57713 0x1651
+#define BNX2X_CHIP_NUM_57713E 0x1652
+
#define BNX2X_CHIP_NUM(x) (x >> 16)
+#define BNX2X_CHIP_IS_57710(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710)
#define BNX2X_CHIP_IS_57711(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
#define BNX2X_CHIP_IS_57711E(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
#define BNX2X_CHIP_IS_E1H(x) \
(BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
+#define BNX2X_CHIP_IS_57712(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712)
+#define BNX2X_CHIP_IS_57712E(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E)
+#define BNX2X_CHIP_IS_57713(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713)
+#define BNX2X_CHIP_IS_57713E(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E)
+#define BNX2X_CHIP_IS_E2(x) \
+ (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
+ BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
+
#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id)
#define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
@@ -358,19 +412,35 @@ struct bnx2x_bd_chain_next {
(BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \
((x) + 2) : ((x) + 1)
-#define BNX2X_DEF_SB_ID 16
+#define BNX2X_DEF_SB_ID HC_SP_SB_ID
-#define BNX2X_ISCSI_RX_SB_INDEX_NUM \
- ((HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS << \
- USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
- USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER)
+#define BNX2X_SHMEM_MF_BLK_OFFSET 0x7e4
#define BNX2X_SHMEM_ADDR(base, field) (base + \
offsetof(struct shmem_region, field))
-#define CNIC_PORT(cp) ((cp)->func % PORT_MAX)
+#define BNX2X_SHMEM2_ADDR(base, field) (base + \
+ offsetof(struct shmem2_region, field))
+
+#define BNX2X_SHMEM2_HAS(base, field) \
+ ((base) && \
+ (CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) > \
+ offsetof(struct shmem2_region, field)))
+
+#define CNIC_PORT(cp) ((cp)->pfid & 1)
#define CNIC_FUNC(cp) ((cp)->func)
-#define CNIC_E1HVN(cp) ((cp)->func >> 1)
+#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2(cp->chip_id) ? 0 :\
+ (CNIC_FUNC(cp) & 1))
+#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
+
+#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \
+ (CNIC_E1HVN(cp) << 17) | (x))
+
+#define BNX2X_SW_CID(x) (x & 0x1ffff)
+
+#define BNX2X_CL_QZONE_ID(cp, cli) \
+ (cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
+#define TCP_TSTORM_OOO_DROP_AND_PROC_ACK (0<<4)
#endif
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
index 7ce694d41b6..328e8b2765a 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/cnic_defs.h
@@ -14,6 +14,7 @@
/* KWQ (kernel work queue) request op codes */
#define L2_KWQE_OPCODE_VALUE_FLUSH (4)
+#define L2_KWQE_OPCODE_VALUE_VM_FREE_RX_QUEUE (8)
#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
@@ -48,11 +49,14 @@
#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
/* KCQ (kernel completion queue) completion status */
-#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
-#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
+#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
+#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
-#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83)
-#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89)
+#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83)
+#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89)
+
+#define L4_KCQE_OPCODE_VALUE_OOO_EVENT_NOTIFICATION (0xa0)
+#define L4_KCQE_OPCODE_VALUE_OOO_FLUSH (0xa1)
#define L4_LAYER_CODE (4)
#define L2_LAYER_CODE (2)
@@ -585,6 +589,100 @@ struct l4_kwq_upload {
*/
/*
+ * The iscsi aggregative context of Cstorm
+ */
+struct cstorm_iscsi_ag_context {
+ u32 agg_vars1;
+#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
+#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14)
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
+#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
+#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19)
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20)
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21)
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22)
+#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22
+#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
+#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
+#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
+#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
+#if defined(__BIG_ENDIAN)
+ u8 __aux1_th;
+ u8 __aux1_val;
+ u16 __agg_vars2;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_vars2;
+ u8 __aux1_val;
+ u8 __aux1_th;
+#endif
+ u32 rel_seq;
+ u32 rel_seq_th;
+#if defined(__BIG_ENDIAN)
+ u16 hq_cons;
+ u16 hq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 hq_prod;
+ u16 hq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 __reserved62;
+ u8 __reserved61;
+ u8 __reserved60;
+ u8 __reserved59;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __reserved59;
+ u8 __reserved60;
+ u8 __reserved61;
+ u8 __reserved62;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __reserved64;
+ u16 __cq_u_prod0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __cq_u_prod0;
+ u16 __reserved64;
+#endif
+ u32 __cq_u_prod1;
+#if defined(__BIG_ENDIAN)
+ u16 __agg_vars3;
+ u16 __cq_u_prod2;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __cq_u_prod2;
+ u16 __agg_vars3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __aux2_th;
+ u16 __cq_u_prod3;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __cq_u_prod3;
+ u16 __aux2_th;
+#endif
+};
+
+/*
* iSCSI context region, used only in iSCSI
*/
struct ustorm_iscsi_rq_db {
@@ -696,7 +794,7 @@ struct ustorm_iscsi_st_context {
struct regpair task_pbl_base;
struct regpair tce_phy_addr;
struct ustorm_iscsi_placement_db place_db;
- u32 data_rcv_seq;
+ u32 reserved8;
u32 rem_rcv_len;
#if defined(__BIG_ENDIAN)
u16 hdr_itt;
@@ -713,8 +811,10 @@ struct ustorm_iscsi_st_context {
#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
-#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2)
-#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
u8 task_pdu_cache_index;
u8 task_pbe_cache_index;
#elif defined(__LITTLE_ENDIAN)
@@ -725,8 +825,10 @@ struct ustorm_iscsi_st_context {
#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
-#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2)
-#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
u8 hdr_second_byte_union;
#endif
#if defined(__BIG_ENDIAN)
@@ -777,14 +879,14 @@ struct ustorm_iscsi_st_context {
*/
struct tstorm_tcp_st_context_section {
u32 flags1;
-#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B (0xFFFFFF<<0)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B_SHIFT 0
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT (0xFFFFFF<<0)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT 0
#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24)
#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24
#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25)
#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25
-#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS (0x1<<26)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS_SHIFT 26
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0 (0x1<<26)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0_SHIFT 26
#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27)
#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27
#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28)
@@ -793,11 +895,11 @@ struct tstorm_tcp_st_context_section {
#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29
#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30)
#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30
-#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3 (0x1<<31)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3_SHIFT 31
+#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN (0x1<<31)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN_SHIFT 31
u32 flags2;
-#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B (0xFFFFFF<<0)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B_SHIFT 0
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION (0xFFFFFF<<0)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT 0
#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24)
#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24
#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25)
@@ -810,18 +912,18 @@ struct tstorm_tcp_st_context_section {
#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28
#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29)
#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29
-#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED (0x1<<30)
-#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED_SHIFT 30
-#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO (0x1<<31)
-#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO_SHIFT 31
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK (0x1<<30)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK_SHIFT 30
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK (0x1<<31)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK_SHIFT 31
#if defined(__BIG_ENDIAN)
- u16 reserved_slowpath;
- u8 tcp_sm_state_3b;
- u8 rto_exp_3b;
+ u16 mss;
+ u8 tcp_sm_state;
+ u8 rto_exp;
#elif defined(__LITTLE_ENDIAN)
- u8 rto_exp_3b;
- u8 tcp_sm_state_3b;
- u16 reserved_slowpath;
+ u8 rto_exp;
+ u8 tcp_sm_state;
+ u16 mss;
#endif
u32 rcv_nxt;
u32 timestamp_recent;
@@ -846,11 +948,11 @@ struct tstorm_tcp_st_context_section {
#if defined(__BIG_ENDIAN)
u8 statistics_counter_id;
u8 ooo_support_mode;
- u8 snd_wnd_scale_4b;
+ u8 snd_wnd_scale;
u8 dup_ack_count;
#elif defined(__LITTLE_ENDIAN)
u8 dup_ack_count;
- u8 snd_wnd_scale_4b;
+ u8 snd_wnd_scale;
u8 ooo_support_mode;
u8 statistics_counter_id;
#endif
@@ -860,13 +962,21 @@ struct tstorm_tcp_st_context_section {
u32 isle_start_seq;
u32 isle_end_seq;
#if defined(__BIG_ENDIAN)
- u16 mss;
+ u16 second_isle_address;
u16 recent_seg_wnd;
#elif defined(__LITTLE_ENDIAN)
u16 recent_seg_wnd;
- u16 mss;
+ u16 second_isle_address;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 max_isles_ever_happened;
+ u8 isles_number;
+ u16 last_isle_address;
+#elif defined(__LITTLE_ENDIAN)
+ u16 last_isle_address;
+ u8 isles_number;
+ u8 max_isles_ever_happened;
#endif
- u32 reserved4;
u32 max_rt_time;
#if defined(__BIG_ENDIAN)
u16 lsb_mac_address;
@@ -876,7 +986,7 @@ struct tstorm_tcp_st_context_section {
u16 lsb_mac_address;
#endif
u32 msb_mac_address;
- u32 reserved2;
+ u32 rightmost_received_seq;
};
/*
@@ -951,7 +1061,7 @@ struct tstorm_iscsi_st_context_section {
u8 scratchpad_idx;
struct iscsi_term_vars term_vars;
#endif
- u32 reserved2;
+ u32 process_nxt;
};
/*
@@ -1174,24 +1284,12 @@ struct xstorm_iscsi_ag_context {
#endif
#if defined(__BIG_ENDIAN)
u8 cdu_reserved;
- u8 agg_vars4;
-#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
-#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
-#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
-#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
+ u8 __agg_vars4;
u8 agg_vars3;
#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
u8 agg_vars2;
#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
@@ -1222,21 +1320,9 @@ struct xstorm_iscsi_ag_context {
u8 agg_vars3;
#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6
- u8 agg_vars4;
-#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
-#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
-#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
-#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+ u8 __agg_vars4;
u8 cdu_reserved;
#endif
u32 more_to_send;
@@ -1270,8 +1356,8 @@ struct xstorm_iscsi_ag_context {
#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
-#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4)
-#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
@@ -1286,8 +1372,8 @@ struct xstorm_iscsi_ag_context {
#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
u8 agg_val3_th;
u8 agg_vars6;
#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
@@ -1310,8 +1396,8 @@ struct xstorm_iscsi_ag_context {
#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
-#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4)
-#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
@@ -1326,14 +1412,14 @@ struct xstorm_iscsi_ag_context {
#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
#endif
#if defined(__BIG_ENDIAN)
u16 __agg_val11_th;
- u16 __agg_val11;
+ u16 __gen_data;
#elif defined(__LITTLE_ENDIAN)
- u16 __agg_val11;
+ u16 __gen_data;
u16 __agg_val11_th;
#endif
#if defined(__BIG_ENDIAN)
@@ -1384,7 +1470,7 @@ struct xstorm_iscsi_ag_context {
#endif
u32 hq_cons_tcp_seq;
u32 exp_stat_sn;
- u32 agg_misc5;
+ u32 rst_seq_num;
};
/*
@@ -1478,12 +1564,12 @@ struct tstorm_iscsi_ag_context {
#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
u8 state;
#elif defined(__LITTLE_ENDIAN)
u8 state;
@@ -1496,63 +1582,63 @@ struct tstorm_iscsi_ag_context {
#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
u16 ulp_credit;
#endif
#if defined(__BIG_ENDIAN)
u16 __agg_val4;
u16 agg_vars2;
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
-#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11
-#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
-#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
#elif defined(__LITTLE_ENDIAN)
u16 agg_vars2;
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
-#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11
-#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
-#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
@@ -1563,100 +1649,6 @@ struct tstorm_iscsi_ag_context {
};
/*
- * The iscsi aggregative context of Cstorm
- */
-struct cstorm_iscsi_ag_context {
- u32 agg_vars1;
-#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
-#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
-#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
-#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
-#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
-#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
-#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
-#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
-#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
-#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
-#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
-#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
-#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
-#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14
-#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
-#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
-#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
-#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22
-#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
-#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
-#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
-#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
-#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
-#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
-#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
-#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
-#if defined(__BIG_ENDIAN)
- u8 __aux1_th;
- u8 __aux1_val;
- u16 __agg_vars2;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_vars2;
- u8 __aux1_val;
- u8 __aux1_th;
-#endif
- u32 rel_seq;
- u32 rel_seq_th;
-#if defined(__BIG_ENDIAN)
- u16 hq_cons;
- u16 hq_prod;
-#elif defined(__LITTLE_ENDIAN)
- u16 hq_prod;
- u16 hq_cons;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 __reserved62;
- u8 __reserved61;
- u8 __reserved60;
- u8 __reserved59;
-#elif defined(__LITTLE_ENDIAN)
- u8 __reserved59;
- u8 __reserved60;
- u8 __reserved61;
- u8 __reserved62;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __reserved64;
- u16 __cq_u_prod0;
-#elif defined(__LITTLE_ENDIAN)
- u16 __cq_u_prod0;
- u16 __reserved64;
-#endif
- u32 __cq_u_prod1;
-#if defined(__BIG_ENDIAN)
- u16 __agg_vars3;
- u16 __cq_u_prod2;
-#elif defined(__LITTLE_ENDIAN)
- u16 __cq_u_prod2;
- u16 __agg_vars3;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __aux2_th;
- u16 __cq_u_prod3;
-#elif defined(__LITTLE_ENDIAN)
- u16 __cq_u_prod3;
- u16 __aux2_th;
-#endif
-};
-
-/*
* The iscsi aggregative context of Ustorm
*/
struct ustorm_iscsi_ag_context {
@@ -1746,8 +1738,8 @@ struct ustorm_iscsi_ag_context {
#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
u8 decision_rule_enable_bits;
@@ -1790,8 +1782,8 @@ struct ustorm_iscsi_ag_context {
#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
u16 __reserved2;
@@ -1799,22 +1791,6 @@ struct ustorm_iscsi_ag_context {
};
/*
- * Timers connection context
- */
-struct iscsi_timers_block_context {
- u32 __reserved_0;
- u32 __reserved_1;
- u32 __reserved_2;
- u32 flags;
-#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
-#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
-#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
-#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
-#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
-#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
-};
-
-/*
* Ethernet context section, shared in TOE, RDMA and ISCSI
*/
struct xstorm_eth_context_section {
@@ -1963,7 +1939,7 @@ struct xstorm_tcp_context_section {
#endif
#if defined(__BIG_ENDIAN)
u8 original_nagle_1b;
- u8 ts_enabled_1b;
+ u8 ts_enabled;
u16 tcp_params;
#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
@@ -1973,8 +1949,8 @@ struct xstorm_tcp_context_section {
#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
-#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11)
-#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
@@ -1991,15 +1967,15 @@ struct xstorm_tcp_context_section {
#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
-#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11)
-#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
- u8 ts_enabled_1b;
+ u8 ts_enabled;
u8 original_nagle_1b;
#endif
#if defined(__BIG_ENDIAN)
@@ -2030,8 +2006,8 @@ struct xstorm_common_context_section {
#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
-#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7)
-#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7
+#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7)
+#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7
u8 ip_version_1b;
#elif defined(__LITTLE_ENDIAN)
u8 ip_version_1b;
@@ -2042,8 +2018,8 @@ struct xstorm_common_context_section {
#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
-#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7)
-#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7
+#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7)
+#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7
u16 reserved;
#endif
};
@@ -2284,7 +2260,7 @@ struct iscsi_context {
struct tstorm_iscsi_ag_context tstorm_ag_context;
struct cstorm_iscsi_ag_context cstorm_ag_context;
struct ustorm_iscsi_ag_context ustorm_ag_context;
- struct iscsi_timers_block_context timers_context;
+ struct timers_block_context timers_context;
struct regpair upb_context;
struct xstorm_iscsi_st_context xstorm_st_context;
struct regpair xpb_context;
@@ -2434,16 +2410,16 @@ struct l5cm_packet_size {
* l5cm connection parameters
*/
union l5cm_reduce_param_union {
- u32 passive_side_scramble_key;
- u32 pcs_id;
+ u32 opaque1;
+ u32 opaque2;
};
/*
* l5cm connection parameters
*/
struct l5cm_reduce_conn {
- union l5cm_reduce_param_union param;
- u32 isn;
+ union l5cm_reduce_param_union opaque1;
+ u32 opaque2;
};
/*
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 344c842d55a..0dbeaec4f03 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.1.3"
-#define CNIC_MODULE_RELDATE "June 24, 2010"
+#define CNIC_MODULE_VERSION "2.2.6"
+#define CNIC_MODULE_RELDATE "Oct 12, 2010"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -80,18 +80,15 @@ struct kcqe {
#define DRV_CTL_IO_RD_CMD 0x102
#define DRV_CTL_CTX_WR_CMD 0x103
#define DRV_CTL_CTXTBL_WR_CMD 0x104
-#define DRV_CTL_COMPLETION_CMD 0x105
+#define DRV_CTL_RET_L5_SPQ_CREDIT_CMD 0x105
#define DRV_CTL_START_L2_CMD 0x106
#define DRV_CTL_STOP_L2_CMD 0x107
+#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
struct cnic_ctl_completion {
u32 cid;
};
-struct drv_ctl_completion {
- u32 comp_count;
-};
-
struct cnic_ctl_info {
int cmd;
union {
@@ -100,6 +97,10 @@ struct cnic_ctl_info {
} data;
};
+struct drv_ctl_spq_credit {
+ u32 credit_count;
+};
+
struct drv_ctl_io {
u32 cid_addr;
u32 offset;
@@ -115,7 +116,7 @@ struct drv_ctl_l2_ring {
struct drv_ctl_info {
int cmd;
union {
- struct drv_ctl_completion comp;
+ struct drv_ctl_spq_credit credit;
struct drv_ctl_io io;
struct drv_ctl_l2_ring ring;
char bytes[MAX_DRV_CTL_DATA];
@@ -138,6 +139,7 @@ struct cnic_irq {
unsigned int vector;
void *status_blk;
u32 status_blk_num;
+ u32 status_blk_num2;
u32 irq_flags;
#define CNIC_IRQ_FL_MSIX 0x00000001
};
@@ -152,6 +154,7 @@ struct cnic_eth_dev {
struct pci_dev *pdev;
void __iomem *io_base;
void __iomem *io_base2;
+ void *iro_arr;
u32 ctx_tbl_offset;
u32 ctx_tbl_len;
@@ -160,7 +163,9 @@ struct cnic_eth_dev {
u32 max_iscsi_conn;
u32 max_fcoe_conn;
u32 max_rdma_conn;
- u32 reserved0[2];
+ u32 fcoe_init_cid;
+ u16 iscsi_l2_client_id;
+ u16 iscsi_l2_cid;
int num_irq;
struct cnic_irq irq_arr[MAX_CNIC_VEC];
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index e1f6156b371..fec939f8f65 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -38,7 +38,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
-#include <asm/gpio.h>
+#include <linux/gpio.h>
#include <asm/atomic.h>
MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
@@ -108,7 +108,7 @@ MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
#define CPMAC_RX_INT_CLEAR 0x019c
#define CPMAC_MAC_INT_ENABLE 0x01a8
#define CPMAC_MAC_INT_CLEAR 0x01ac
-#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
+#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
#define CPMAC_MAC_ADDR_MID 0x01d0
#define CPMAC_MAC_ADDR_HI 0x01d4
#define CPMAC_MAC_HASH_LO 0x01d8
@@ -227,7 +227,7 @@ static void cpmac_dump_regs(struct net_device *dev)
for (i = 0; i < CPMAC_REG_END; i += 4) {
if (i % 16 == 0) {
if (i)
- printk("\n");
+ pr_cont("\n");
printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
priv->regs + i);
}
@@ -262,7 +262,7 @@ static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
for (i = 0; i < skb->len; i++) {
if (i % 16 == 0) {
if (i)
- printk("\n");
+ pr_cont("\n");
printk(KERN_DEBUG "%s: data[%p]:", dev->name,
skb->data + i);
}
@@ -391,7 +391,7 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
if (likely(skb)) {
skb_put(desc->skb, desc->datalen);
desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
- desc->skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(desc->skb);
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += desc->datalen;
result = desc->skb;
@@ -506,7 +506,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
"restart rx from a descriptor that's "
"not free: %p\n",
priv->dev->name, restart);
- goto fatal_error;
+ goto fatal_error;
}
cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
@@ -873,7 +873,8 @@ static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
}
-static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+static void cpmac_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
{
struct cpmac_priv *priv = netdev_priv(dev);
@@ -888,7 +889,8 @@ static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam
ring->tx_pending = 1;
}
-static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+static int cpmac_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
{
struct cpmac_priv *priv = netdev_priv(dev);
@@ -1012,8 +1014,8 @@ static int cpmac_open(struct net_device *dev)
priv->rx_head->prev->hw_next = (u32)0;
- if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED,
- dev->name, dev))) {
+ res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
+ if (res) {
if (netif_msg_drv(priv))
printk(KERN_ERR "%s: failed to obtain irq\n",
dev->name);
@@ -1133,7 +1135,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
}
if (phy_id == PHY_MAX_ADDR) {
- dev_err(&pdev->dev, "no PHY present, falling back to switch on MDIO bus 0\n");
+ dev_err(&pdev->dev, "no PHY present, falling back "
+ "to switch on MDIO bus 0\n");
strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
phy_id = pdev->id;
}
@@ -1169,9 +1172,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, 0xff);
memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
- snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
+ snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
+ mdio_bus_id, phy_id);
- priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
+ priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phy)) {
@@ -1182,7 +1186,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
goto fail;
}
- if ((rc = register_netdev(dev))) {
+ rc = register_netdev(dev);
+ if (rc) {
printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
dev->name);
goto fail;
@@ -1248,11 +1253,13 @@ int __devinit cpmac_init(void)
cpmac_mii->reset(cpmac_mii);
- for (i = 0; i < 300; i++)
- if ((mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE)))
+ for (i = 0; i < 300; i++) {
+ mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
+ if (mask)
break;
else
msleep(10);
+ }
mask &= 0x7fffffff;
if (mask & (mask - 1)) {
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index ad19585d960..a04ce6a5f63 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1286,7 +1286,7 @@ irq_err:
/*
* Release resources when all the ports and offloading have been stopped.
*/
-static void cxgb_down(struct adapter *adapter)
+static void cxgb_down(struct adapter *adapter, int on_wq)
{
t3_sge_stop(adapter);
spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
@@ -1296,7 +1296,8 @@ static void cxgb_down(struct adapter *adapter)
free_irq_resources(adapter);
quiesce_rx(adapter);
t3_sge_stop(adapter);
- flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
+ if (!on_wq)
+ flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
}
static void schedule_chk_task(struct adapter *adap)
@@ -1374,7 +1375,7 @@ static int offload_close(struct t3cdev *tdev)
clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
if (!adapter->open_device_map)
- cxgb_down(adapter);
+ cxgb_down(adapter, 0);
cxgb3_offload_deactivate(adapter);
return 0;
@@ -1398,7 +1399,10 @@ static int cxgb_open(struct net_device *dev)
"Could not initialize offload capabilities\n");
}
- dev->real_num_tx_queues = pi->nqsets;
+ netif_set_real_num_tx_queues(dev, pi->nqsets);
+ err = netif_set_real_num_rx_queues(dev, pi->nqsets);
+ if (err)
+ return err;
link_start(dev);
t3_port_intr_enable(adapter, pi->port_id);
netif_tx_start_all_queues(dev);
@@ -1409,7 +1413,7 @@ static int cxgb_open(struct net_device *dev)
return 0;
}
-static int cxgb_close(struct net_device *dev)
+static int __cxgb_close(struct net_device *dev, int on_wq)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -1436,12 +1440,17 @@ static int cxgb_close(struct net_device *dev)
cancel_delayed_work_sync(&adapter->adap_check_task);
if (!adapter->open_device_map)
- cxgb_down(adapter);
+ cxgb_down(adapter, on_wq);
cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
return 0;
}
+static int cxgb_close(struct net_device *dev)
+{
+ return __cxgb_close(dev, 0);
+}
+
static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
@@ -2296,6 +2305,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
case CHELSIO_GET_QSET_NUM:{
struct ch_reg edata;
+ memset(&edata, 0, sizeof(struct ch_reg));
+
edata.cmd = CHELSIO_GET_QSET_NUM;
edata.val = pi->nqsets;
if (copy_to_user(useraddr, &edata, sizeof(edata)))
@@ -2862,7 +2873,7 @@ void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
spin_unlock(&adapter->work_lock);
}
-static int t3_adapter_error(struct adapter *adapter, int reset)
+static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
{
int i, ret = 0;
@@ -2877,7 +2888,7 @@ static int t3_adapter_error(struct adapter *adapter, int reset)
struct net_device *netdev = adapter->port[i];
if (netif_running(netdev))
- cxgb_close(netdev);
+ __cxgb_close(netdev, on_wq);
}
/* Stop SGE timers */
@@ -2948,7 +2959,7 @@ static void fatal_error_task(struct work_struct *work)
int err = 0;
rtnl_lock();
- err = t3_adapter_error(adapter, 1);
+ err = t3_adapter_error(adapter, 1, 1);
if (!err)
err = t3_reenable_adapter(adapter);
if (!err)
@@ -2998,7 +3009,7 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- ret = t3_adapter_error(adapter, 0);
+ ret = t3_adapter_error(adapter, 0, 0);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index c6485b39eb0..21db7491f61 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -64,7 +64,7 @@ static inline int offload_activated(struct t3cdev *tdev)
{
const struct adapter *adapter = tdev2adap(tdev);
- return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map));
+ return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
}
/**
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index cb42353c9fd..6990f6c6522 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -1997,6 +1997,10 @@
#define A_PL_RST 0x6f0
+#define S_FATALPERREN 4
+#define V_FATALPERREN(x) ((x) << S_FATALPERREN)
+#define F_FATALPERREN V_FATALPERREN(1U)
+
#define S_CRSTWRM 1
#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
#define F_CRSTWRM V_CRSTWRM(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 8ff96c6f6de..c5a142bea5e 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2022,7 +2022,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
if (unlikely(p->vlan_valid)) {
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 427c451be1a..421d5589cec 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1408,6 +1408,7 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
fatal++;
CH_ALERT(adapter, "%s (0x%x)\n",
acts->msg, status & acts->mask);
+ status &= ~acts->mask;
} else if (acts->msg)
CH_WARN(adapter, "%s (0x%x)\n",
acts->msg, status & acts->mask);
@@ -1843,11 +1844,10 @@ static int mac_intr_handler(struct adapter *adap, unsigned int idx)
t3_os_link_fault_handler(adap, idx);
}
- t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
-
if (cause & XGM_INTR_FATAL)
t3_fatal_err(adap);
+ t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
return cause != 0;
}
@@ -3569,6 +3569,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
t3_write_reg(adapter, A_PM1_TX_MODE, 0);
chan_init_hw(adapter, adapter->params.chan_map);
t3_sge_init(adapter, &adapter->params.sge);
+ t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
@@ -3682,7 +3683,7 @@ static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
mc7->name = name;
mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
- mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
+ mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
mc7->width = G_WIDTH(cfg);
}
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 6e562c0dad7..3ece9f5069f 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -463,6 +463,8 @@ struct sge {
u8 counter_val[SGE_NCOUNTERS];
unsigned int starve_thres;
u8 idma_state[2];
+ unsigned int egr_start;
+ unsigned int ingr_start;
void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
DECLARE_BITMAP(starving_fl, MAX_EGRQ);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index c327527fbbc..22169a73b7f 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -175,16 +175,26 @@ enum {
static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0xa000, 0), /* PE10K */
- CH_DEVICE(0x4001, 0),
- CH_DEVICE(0x4002, 0),
- CH_DEVICE(0x4003, 0),
- CH_DEVICE(0x4004, 0),
- CH_DEVICE(0x4005, 0),
- CH_DEVICE(0x4006, 0),
- CH_DEVICE(0x4007, 0),
- CH_DEVICE(0x4008, 0),
- CH_DEVICE(0x4009, 0),
- CH_DEVICE(0x400a, 0),
+ CH_DEVICE(0x4001, -1),
+ CH_DEVICE(0x4002, -1),
+ CH_DEVICE(0x4003, -1),
+ CH_DEVICE(0x4004, -1),
+ CH_DEVICE(0x4005, -1),
+ CH_DEVICE(0x4006, -1),
+ CH_DEVICE(0x4007, -1),
+ CH_DEVICE(0x4008, -1),
+ CH_DEVICE(0x4009, -1),
+ CH_DEVICE(0x400a, -1),
+ CH_DEVICE(0x4401, 4),
+ CH_DEVICE(0x4402, 4),
+ CH_DEVICE(0x4403, 4),
+ CH_DEVICE(0x4404, 4),
+ CH_DEVICE(0x4405, 4),
+ CH_DEVICE(0x4406, 4),
+ CH_DEVICE(0x4407, 4),
+ CH_DEVICE(0x4408, 4),
+ CH_DEVICE(0x4409, 4),
+ CH_DEVICE(0x440a, 4),
{ 0, }
};
@@ -423,10 +433,11 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
const struct cpl_sge_egr_update *p = (void *)rsp;
unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
- struct sge_txq *txq = q->adap->sge.egr_map[qid];
+ struct sge_txq *txq;
+ txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
txq->restarts++;
- if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
+ if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
struct sge_eth_txq *eq;
eq = container_of(txq, struct sge_eth_txq, q);
@@ -658,6 +669,15 @@ static int setup_rss(struct adapter *adap)
}
/*
+ * Return the channel of the ingress queue with the given qid.
+ */
+static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
+{
+ qid -= p->ingr_start;
+ return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
+}
+
+/*
* Wait until all NAPI handlers are descheduled.
*/
static void quiesce_rx(struct adapter *adap)
@@ -1671,27 +1691,41 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
return 0;
}
-/*
- * Translate a physical EEPROM address to virtual. The first 1K is accessed
- * through virtual addresses starting at 31K, the rest is accessed through
- * virtual addresses starting at 0. This mapping is correct only for PF0.
+/**
+ * eeprom_ptov - translate a physical EEPROM address to virtual
+ * @phys_addr: the physical EEPROM address
+ * @fn: the PCI function number
+ * @sz: size of function-specific area
+ *
+ * Translate a physical EEPROM address to virtual. The first 1K is
+ * accessed through virtual addresses starting at 31K, the rest is
+ * accessed through virtual addresses starting at 0.
+ *
+ * The mapping is as follows:
+ * [0..1K) -> [31K..32K)
+ * [1K..1K+A) -> [31K-A..31K)
+ * [1K+A..ES) -> [0..ES-A-1K)
+ *
+ * where A = @fn * @sz, and ES = EEPROM size.
*/
-static int eeprom_ptov(unsigned int phys_addr)
+static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
{
+ fn *= sz;
if (phys_addr < 1024)
return phys_addr + (31 << 10);
+ if (phys_addr < 1024 + fn)
+ return 31744 - fn + phys_addr - 1024;
if (phys_addr < EEPROMSIZE)
- return phys_addr - 1024;
+ return phys_addr - 1024 - fn;
return -EINVAL;
}
/*
* The next two routines implement eeprom read/write from physical addresses.
- * The physical->virtual translation is correct only for PF0.
*/
static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
{
- int vaddr = eeprom_ptov(phys_addr);
+ int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
@@ -1700,7 +1734,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
{
- int vaddr = eeprom_ptov(phys_addr);
+ int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
@@ -1743,6 +1777,14 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
aligned_offset = eeprom->offset & ~3;
aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+ if (adapter->fn > 0) {
+ u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
+
+ if (aligned_offset < start ||
+ aligned_offset + aligned_len > start + EEPROMPFSIZE)
+ return -EPERM;
+ }
+
if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
/*
* RMW possibly needed for first or last words.
@@ -2304,7 +2346,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
req->peer_port = htons(0);
req->local_ip = sip;
req->peer_ip = htonl(0);
- chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
+ chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
@@ -2346,7 +2388,7 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
req->peer_ip_hi = cpu_to_be64(0);
req->peer_ip_lo = cpu_to_be64(0);
- chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
+ chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
@@ -2721,7 +2763,10 @@ static int cxgb_open(struct net_device *dev)
return err;
}
- dev->real_num_tx_queues = pi->nqsets;
+ netif_set_real_num_tx_queues(dev, pi->nqsets);
+ err = netif_set_real_num_rx_queues(dev, pi->nqsets);
+ if (err)
+ return err;
err = link_start(dev);
if (!err)
netif_tx_start_all_queues(dev);
@@ -3061,12 +3106,16 @@ static int adap_init0(struct adapter *adap)
params[2] = FW_PARAM_PFVF(L2T_END);
params[3] = FW_PARAM_PFVF(FILTER_START);
params[4] = FW_PARAM_PFVF(FILTER_END);
- ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val);
+ params[5] = FW_PARAM_PFVF(IQFLINT_START);
+ params[6] = FW_PARAM_PFVF(EQ_START);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
if (ret < 0)
goto bye;
port_vec = val[0];
adap->tids.ftid_base = val[3];
adap->tids.nftids = val[4] - val[3] + 1;
+ adap->sge.ingr_start = val[5];
+ adap->sge.egr_start = val[6];
if (c.ofldcaps) {
/* query offload-related parameters */
@@ -3814,7 +3863,7 @@ static void __devexit remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
- } else if (PCI_FUNC(pdev->devfn) > 0)
+ } else
pci_release_regions(pdev);
}
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index bf38cfc5756..9967f3debce 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -557,7 +557,8 @@ out: cred = q->avail - cred;
if (unlikely(fl_starving(q))) {
smp_wmb();
- set_bit(q->cntxt_id, adap->sge.starving_fl);
+ set_bit(q->cntxt_id - adap->sge.egr_start,
+ adap->sge.starving_fl);
}
return cred;
@@ -974,7 +975,7 @@ out_free: dev_kfree_skb(skb);
}
cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
- TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0));
+ TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
cpl->pack = htons(0);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1213,7 +1214,8 @@ static void txq_stop_maperr(struct sge_ofld_txq *q)
{
q->mapping_err++;
q->q.stops++;
- set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr);
+ set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
+ q->adap->sge.txq_maperr);
}
/**
@@ -1603,7 +1605,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.rx_cso++;
}
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) {
struct vlan_group *grp = pi->vlan_grp;
@@ -1835,6 +1837,7 @@ static unsigned int process_intrq(struct adapter *adap)
if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
unsigned int qid = ntohl(rc->pldbuflen_qid);
+ qid -= adap->sge.ingr_start;
napi_schedule(&adap->sge.ingr_map[qid]->napi);
}
@@ -2050,14 +2053,14 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
/* set offset to -1 to distinguish ingress queues without FL */
iq->offset = fl ? 0 : -1;
- adap->sge.ingr_map[iq->cntxt_id] = iq;
+ adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
if (fl) {
fl->cntxt_id = ntohs(c.fl0id);
fl->avail = fl->pend_cred = 0;
fl->pidx = fl->cidx = 0;
fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
- adap->sge.egr_map[fl->cntxt_id] = fl;
+ adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
}
return 0;
@@ -2087,7 +2090,7 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
q->stops = q->restarts = 0;
q->stat = (void *)&q->desc[q->size];
q->cntxt_id = id;
- adap->sge.egr_map[id] = q;
+ adap->sge.egr_map[id - adap->sge.egr_start] = q;
}
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
@@ -2259,7 +2262,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
{
unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
- adap->sge.ingr_map[rq->cntxt_id] = NULL;
+ adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
rq->cntxt_id, fl_id, 0xffff);
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
index 10a05556577..c26b455f37d 100644
--- a/drivers/net/cxgb4/t4_hw.h
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -42,6 +42,7 @@ enum {
MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
EEPROMSIZE = 17408, /* Serial EEPROM physical size */
EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
+ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 0969f2fbc1b..940584a8a64 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -487,6 +487,11 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_CPMASK = 0x25,
FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26,
FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27,
+ FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28,
+ FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29,
+ FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
+ FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
+ FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
};
/*
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 7b6d07f50c7..555ecc5a2e9 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -748,7 +748,10 @@ static int cxgb4vf_open(struct net_device *dev)
/*
* Note that this interface is up and start everything up ...
*/
- dev->real_num_tx_queues = pi->nqsets;
+ netif_set_real_num_tx_queues(dev, pi->nqsets);
+ err = netif_set_real_num_rx_queues(dev, pi->nqsets);
+ if (err)
+ return err;
set_bit(pi->port_id, &adapter->open_device_map);
link_start(dev);
netif_tx_start_all_queues(dev);
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index eb5a1c9cb2d..f10864ddafb 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -1520,7 +1520,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
__skb_pull(skb, PKTSHIFT);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
- skb->dev->last_rx = jiffies; /* XXX removed 2.6.29 */
pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
@@ -1535,7 +1534,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
}
rxq->stats.rx_cso++;
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) {
struct vlan_group *grp = pi->vlan_grp;
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index f3650fd096f..1c51a757611 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -676,7 +676,7 @@ static int de620_rx_intr(struct net_device *dev)
de620_set_register(dev, W_NPRF, next_rx_page);
pr_debug("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page);
- return (next_rx_page != curr_page); /* That was slightly tricky... */
+ return next_rx_page != curr_page; /* That was slightly tricky... */
}
/*********************************************
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index d7de376d717..219eb5ad5c1 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -1255,7 +1255,7 @@ static int __devinit dec_lance_probe(struct device *bdev, const int type)
*/
init_timer(&lp->multicast_timer);
lp->multicast_timer.data = (unsigned long) dev;
- lp->multicast_timer.function = &lance_set_multicast_retry;
+ lp->multicast_timer.function = lance_set_multicast_retry;
ret = register_netdev(dev);
if (ret) {
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index e5667c55844..417e1438562 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -1024,7 +1024,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
&data) != DFX_K_SUCCESS) {
printk("%s: Could not read adapter factory MAC address!\n",
print_name);
- return(DFX_K_FAILURE);
+ return DFX_K_FAILURE;
}
le32 = cpu_to_le32(data);
memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
@@ -1033,7 +1033,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
&data) != DFX_K_SUCCESS) {
printk("%s: Could not read adapter factory MAC address!\n",
print_name);
- return(DFX_K_FAILURE);
+ return DFX_K_FAILURE;
}
le32 = cpu_to_le32(data);
memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
@@ -1075,7 +1075,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
if (top_v == NULL) {
printk("%s: Could not allocate memory for host buffers "
"and structures!\n", print_name);
- return(DFX_K_FAILURE);
+ return DFX_K_FAILURE;
}
memset(top_v, 0, alloc_size); /* zero out memory before continuing */
top_p = bp->kmalloced_dma; /* get physical address of buffer */
@@ -1145,7 +1145,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
- return(DFX_K_SUCCESS);
+ return DFX_K_SUCCESS;
}
@@ -1195,7 +1195,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
{
printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
- return(DFX_K_FAILURE);
+ return DFX_K_FAILURE;
}
/*
@@ -1229,7 +1229,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
NULL) != DFX_K_SUCCESS)
{
printk("%s: Could not set adapter burst size!\n", bp->dev->name);
- return(DFX_K_FAILURE);
+ return DFX_K_FAILURE;
}
/*
@@ -1246,7 +1246,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
NULL) != DFX_K_SUCCESS)
{
printk("%s: Could not set consumer block address!\n", bp->dev->name);
- return(DFX_K_FAILURE);
+ return DFX_K_FAILURE;
}
/*
@@ -1278,7 +1278,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
{
printk("%s: DMA command request failed!\n", bp->dev->name);
- return(DFX_K_FAILURE);
+ return DFX_K_FAILURE;
}
/* Set the initial values for eFDXEnable and MACTReq MIB objects */
@@ -1294,7 +1294,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
{
printk("%s: DMA command request failed!\n", bp->dev->name);
- return(DFX_K_FAILURE);
+ return DFX_K_FAILURE;
}
/* Initialize adapter CAM */
@@ -1302,7 +1302,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
{
printk("%s: Adapter CAM update failed!\n", bp->dev->name);
- return(DFX_K_FAILURE);
+ return DFX_K_FAILURE;
}
/* Initialize adapter filters */
@@ -1310,7 +1310,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
{
printk("%s: Adapter filters update failed!\n", bp->dev->name);
- return(DFX_K_FAILURE);
+ return DFX_K_FAILURE;
}
/*
@@ -1328,7 +1328,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
printk("%s: Receive buffer allocation failed\n", bp->dev->name);
if (get_buffers)
dfx_rcv_flush(bp);
- return(DFX_K_FAILURE);
+ return DFX_K_FAILURE;
}
/* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
@@ -1339,13 +1339,13 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
printk("%s: Start command failed\n", bp->dev->name);
if (get_buffers)
dfx_rcv_flush(bp);
- return(DFX_K_FAILURE);
+ return DFX_K_FAILURE;
}
/* Initialization succeeded, reenable PDQ interrupts */
dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
- return(DFX_K_SUCCESS);
+ return DFX_K_SUCCESS;
}
@@ -1434,7 +1434,7 @@ static int dfx_open(struct net_device *dev)
/* Set device structure info */
netif_start_queue(dev);
- return(0);
+ return 0;
}
@@ -1526,7 +1526,7 @@ static int dfx_close(struct net_device *dev)
free_irq(dev->irq, dev);
- return(0);
+ return 0;
}
@@ -2027,7 +2027,7 @@ static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
- return((struct net_device_stats *) &bp->stats);
+ return (struct net_device_stats *)&bp->stats;
/* Fill the bp->stats structure with the SMT MIB object values */
@@ -2128,7 +2128,7 @@ static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
- return((struct net_device_stats *) &bp->stats);
+ return (struct net_device_stats *)&bp->stats;
/* Fill the bp->stats structure with the FDDI counter values */
@@ -2144,7 +2144,7 @@ static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
- return((struct net_device_stats *) &bp->stats);
+ return (struct net_device_stats *)&bp->stats;
}
@@ -2354,7 +2354,7 @@ static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
{
DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
}
- return(0); /* always return zero */
+ return 0; /* always return zero */
}
@@ -2438,8 +2438,8 @@ static int dfx_ctl_update_cam(DFX_board_t *bp)
/* Issue command to update adapter CAM, then return */
if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
- return(DFX_K_FAILURE);
- return(DFX_K_SUCCESS);
+ return DFX_K_FAILURE;
+ return DFX_K_SUCCESS;
}
@@ -2504,8 +2504,8 @@ static int dfx_ctl_update_filters(DFX_board_t *bp)
/* Issue command to update adapter filters, then return */
if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
- return(DFX_K_FAILURE);
- return(DFX_K_SUCCESS);
+ return DFX_K_FAILURE;
+ return DFX_K_SUCCESS;
}
@@ -2561,7 +2561,7 @@ static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
(status == PI_STATE_K_HALTED) ||
(status == PI_STATE_K_DMA_UNAVAIL) ||
(status == PI_STATE_K_UPGRADE))
- return(DFX_K_OUTSTATE);
+ return DFX_K_OUTSTATE;
/* Put response buffer on the command response queue */
@@ -2599,7 +2599,7 @@ static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
udelay(100); /* wait for 100 microseconds */
}
if (timeout_cnt == 0)
- return(DFX_K_HW_TIMEOUT);
+ return DFX_K_HW_TIMEOUT;
/* Bump (and wrap) the completion index and write out to register */
@@ -2619,14 +2619,14 @@ static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
udelay(100); /* wait for 100 microseconds */
}
if (timeout_cnt == 0)
- return(DFX_K_HW_TIMEOUT);
+ return DFX_K_HW_TIMEOUT;
/* Bump (and wrap) the completion index and write out to register */
bp->cmd_rsp_reg.index.comp += 1;
bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
- return(DFX_K_SUCCESS);
+ return DFX_K_SUCCESS;
}
@@ -2700,7 +2700,7 @@ static int dfx_hw_port_ctrl_req(
udelay(100); /* wait for 100 microseconds */
}
if (timeout_cnt == 0)
- return(DFX_K_HW_TIMEOUT);
+ return DFX_K_HW_TIMEOUT;
/*
* If the address of host_data is non-zero, assume caller has supplied a
@@ -2710,7 +2710,7 @@ static int dfx_hw_port_ctrl_req(
if (host_data != NULL)
dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
- return(DFX_K_SUCCESS);
+ return DFX_K_SUCCESS;
}
@@ -2800,7 +2800,7 @@ static int dfx_hw_adap_state_rd(DFX_board_t *bp)
PI_UINT32 port_status; /* Port Status register value */
dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
- return((port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE);
+ return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
}
@@ -2852,8 +2852,8 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
udelay(100); /* wait for 100 microseconds */
}
if (timeout_cnt == 0)
- return(DFX_K_HW_TIMEOUT);
- return(DFX_K_SUCCESS);
+ return DFX_K_HW_TIMEOUT;
+ return DFX_K_SUCCESS;
}
/*
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index a2f238d20ca..e1a8216ff69 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -465,7 +465,7 @@ rio_open (struct net_device *dev)
init_timer (&np->timer);
np->timer.expires = jiffies + 1*HZ;
np->timer.data = (unsigned long) dev;
- np->timer.function = &rio_timer;
+ np->timer.function = rio_timer;
add_timer (&np->timer);
/* Start Tx/Rx */
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 4fd6b2b4554..9f6aeefa06b 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1056,7 +1056,7 @@ dm9000_rx(struct net_device *dev)
if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
netif_rx(skb);
dev->stats.rx_packets++;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 37dcfdc6345..ff2d29b1785 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -36,6 +36,7 @@
#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
#include <net/rtnetlink.h>
+#include <linux/u64_stats_sync.h>
static int numdummies = 1;
@@ -55,21 +56,69 @@ static void set_multicast_list(struct net_device *dev)
{
}
+struct pcpu_dstats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+};
+
+static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_dstats *dstats;
+ u64 tbytes, tpackets;
+ unsigned int start;
+
+ dstats = per_cpu_ptr(dev->dstats, i);
+ do {
+ start = u64_stats_fetch_begin(&dstats->syncp);
+ tbytes = dstats->tx_bytes;
+ tpackets = dstats->tx_packets;
+ } while (u64_stats_fetch_retry(&dstats->syncp, start));
+ stats->tx_bytes += tbytes;
+ stats->tx_packets += tpackets;
+ }
+ return stats;
+}
static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
{
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ dstats->tx_packets++;
+ dstats->tx_bytes += skb->len;
+ u64_stats_update_end(&dstats->syncp);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
+static int dummy_dev_init(struct net_device *dev)
+{
+ dev->dstats = alloc_percpu(struct pcpu_dstats);
+ if (!dev->dstats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void dummy_dev_free(struct net_device *dev)
+{
+ free_percpu(dev->dstats);
+ free_netdev(dev);
+}
+
static const struct net_device_ops dummy_netdev_ops = {
+ .ndo_init = dummy_dev_init,
.ndo_start_xmit = dummy_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = set_multicast_list,
.ndo_set_mac_address = dummy_set_address,
+ .ndo_get_stats64 = dummy_get_stats64,
};
static void dummy_setup(struct net_device *dev)
@@ -78,14 +127,17 @@ static void dummy_setup(struct net_device *dev)
/* Initialize the device structure. */
dev->netdev_ops = &dummy_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = dummy_dev_free;
/* Fill in device structure with ethernet-generic values. */
dev->tx_queue_len = 0;
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
+ dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
+ dev->features |= NETIF_F_NO_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
random_ether_addr(dev->dev_addr);
}
+
static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
{
if (tb[IFLA_ADDRESS]) {
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 8e2eab4e7c7..b0aa9e68990 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2215,10 +2215,10 @@ static int e100_change_mtu(struct net_device *netdev, int new_mtu)
static int e100_asf(struct nic *nic)
{
/* ASF can be enabled from eeprom */
- return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
+ return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
(nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
!(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
- ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
+ ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
}
static int e100_up(struct nic *nic)
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 99288b95aea..a881dd0093b 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -310,6 +310,9 @@ struct e1000_adapter {
int need_ioport;
bool discarding;
+
+ struct work_struct fifo_stall_task;
+ struct work_struct phy_info_task;
};
enum e1000_state_t {
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5cc39ed289c..cb3f84b8179 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -123,8 +123,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring);
static void e1000_set_rx_mode(struct net_device *netdev);
static void e1000_update_phy_info(unsigned long data);
+static void e1000_update_phy_info_task(struct work_struct *work);
static void e1000_watchdog(unsigned long data);
static void e1000_82547_tx_fifo_stall(unsigned long data);
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
struct net_device *netdev);
static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
@@ -519,8 +521,21 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_clean_all_rx_rings(adapter);
}
+void e1000_reinit_safe(struct e1000_adapter *adapter)
+{
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+ rtnl_lock();
+ e1000_down(adapter);
+ e1000_up(adapter);
+ rtnl_unlock();
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+}
+
void e1000_reinit_locked(struct e1000_adapter *adapter)
{
+ /* if rtnl_lock is not held the call path is bogus */
+ ASSERT_RTNL();
WARN_ON(in_interrupt());
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
@@ -790,6 +805,70 @@ static const struct net_device_ops e1000_netdev_ops = {
};
/**
+ * e1000_init_hw_struct - initialize members of hw struct
+ * @adapter: board private struct
+ * @hw: structure used by e1000_hw.c
+ *
+ * Factors out initialization of the e1000_hw struct to its own function
+ * that can be called very early at init (just after struct allocation).
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ * Returns negative error codes if MAC type setup fails.
+ */
+static int e1000_init_hw_struct(struct e1000_adapter *adapter,
+ struct e1000_hw *hw)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+ hw->max_frame_size = adapter->netdev->mtu +
+ ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+ hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+
+ /* identify the MAC */
+ if (e1000_set_mac_type(hw)) {
+ e_err(probe, "Unknown MAC Type\n");
+ return -EIO;
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->phy_init_script = 1;
+ break;
+ }
+
+ e1000_set_media_type(hw);
+ e1000_get_bus_info(hw);
+
+ hw->wait_autoneg_complete = false;
+ hw->tbi_compatibility_en = true;
+ hw->adaptive_ifs = true;
+
+ /* Copper options */
+
+ if (hw->media_type == e1000_media_type_copper) {
+ hw->mdix = AUTO_ALL_MODES;
+ hw->disable_polarity_correction = false;
+ hw->master_slave = E1000_MASTER_SLAVE;
+ }
+
+ return 0;
+}
+
+/**
* e1000_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in e1000_pci_tbl
@@ -826,22 +905,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- pr_err("No usable DMA config, aborting\n");
- goto err_dma;
- }
- }
- pci_using_dac = 0;
- }
-
err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
if (err)
goto err_pci_reg;
@@ -885,6 +948,32 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
}
}
+ /* make ready for any if (hw->...) below */
+ err = e1000_init_hw_struct(adapter, hw);
+ if (err)
+ goto err_sw_init;
+
+ /*
+ * there is a workaround being applied below that limits
+ * 64-bit DMA addresses to 64-bit hardware. There are some
+ * 32-bit adapters that Tx hang when given 64-bit DMA addresses
+ */
+ pci_using_dac = 0;
+ if ((hw->bus_type == e1000_bus_type_pcix) &&
+ !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ /*
+ * according to DMA-API-HOWTO, coherent calls will always
+ * succeed if the set call did
+ */
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ pci_using_dac = 1;
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ } else {
+ pr_err("No usable DMA config, aborting\n");
+ goto err_dma;
+ }
+
netdev->netdev_ops = &e1000_netdev_ops;
e1000_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
@@ -914,8 +1003,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
(hw->mac_type != e1000_82547))
netdev->features |= NETIF_F_TSO;
- if (pci_using_dac)
+ if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_HW_CSUM;
@@ -959,21 +1050,21 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (!is_valid_ether_addr(netdev->perm_addr))
e_err(probe, "Invalid MAC Address\n");
- e1000_get_bus_info(hw);
-
init_timer(&adapter->tx_fifo_stall_timer);
- adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
+ adapter->tx_fifo_stall_timer.function = e1000_82547_tx_fifo_stall;
adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &e1000_watchdog;
+ adapter->watchdog_timer.function = e1000_watchdog;
adapter->watchdog_timer.data = (unsigned long) adapter;
init_timer(&adapter->phy_info_timer);
- adapter->phy_info_timer.function = &e1000_update_phy_info;
+ adapter->phy_info_timer.function = e1000_update_phy_info;
adapter->phy_info_timer.data = (unsigned long)adapter;
+ INIT_WORK(&adapter->fifo_stall_task, e1000_82547_tx_fifo_stall_task);
INIT_WORK(&adapter->reset_task, e1000_reset_task);
+ INIT_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
e1000_check_options(adapter);
@@ -1072,6 +1163,7 @@ err_eeprom:
iounmap(hw->flash_address);
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
+err_dma:
err_sw_init:
iounmap(hw->hw_addr);
err_ioremap:
@@ -1079,7 +1171,6 @@ err_ioremap:
err_alloc_etherdev:
pci_release_selected_regions(pdev, bars);
err_pci_reg:
-err_dma:
pci_disable_device(pdev);
return err;
}
@@ -1131,62 +1222,12 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
* @adapter: board private structure to initialize
*
* e1000_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
+ * e1000_init_hw_struct MUST be called before this function
**/
static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
-
- /* PCI config space info */
-
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_id = pdev->subsystem_device;
- hw->revision_id = pdev->revision;
-
- pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
-
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- hw->max_frame_size = netdev->mtu +
- ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
- hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
-
- /* identify the MAC */
-
- if (e1000_set_mac_type(hw)) {
- e_err(probe, "Unknown MAC Type\n");
- return -EIO;
- }
-
- switch (hw->mac_type) {
- default:
- break;
- case e1000_82541:
- case e1000_82547:
- case e1000_82541_rev_2:
- case e1000_82547_rev_2:
- hw->phy_init_script = 1;
- break;
- }
-
- e1000_set_media_type(hw);
-
- hw->wait_autoneg_complete = false;
- hw->tbi_compatibility_en = true;
- hw->adaptive_ifs = true;
-
- /* Copper options */
-
- if (hw->media_type == e1000_media_type_copper) {
- hw->mdix = AUTO_ALL_MODES;
- hw->disable_polarity_correction = false;
- hw->master_slave = E1000_MASTER_SLAVE;
- }
adapter->num_tx_queues = 1;
adapter->num_rx_queues = 1;
@@ -2210,22 +2251,45 @@ static void e1000_set_rx_mode(struct net_device *netdev)
static void e1000_update_phy_info(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ schedule_work(&adapter->phy_info_task);
+}
+
+static void e1000_update_phy_info_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ phy_info_task);
struct e1000_hw *hw = &adapter->hw;
+
+ rtnl_lock();
e1000_phy_get_info(hw, &adapter->phy_info);
+ rtnl_unlock();
}
/**
* e1000_82547_tx_fifo_stall - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
**/
-
static void e1000_82547_tx_fifo_stall(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ schedule_work(&adapter->fifo_stall_task);
+}
+
+/**
+ * e1000_82547_tx_fifo_stall_task - task to complete work
+ * @work: work struct contained inside adapter struct
+ **/
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ fifo_stall_task);
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
u32 tctl;
+ rtnl_lock();
if (atomic_read(&adapter->tx_fifo_stall)) {
if ((er32(TDT) == er32(TDH)) &&
(er32(TDFT) == er32(TDFH)) &&
@@ -2246,6 +2310,7 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
}
}
+ rtnl_unlock();
}
bool e1000_has_link(struct e1000_adapter *adapter)
@@ -3113,7 +3178,7 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter =
container_of(work, struct e1000_adapter, reset_task);
- e1000_reinit_locked(adapter);
+ e1000_reinit_safe(adapter);
}
/**
@@ -3535,7 +3600,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
adapter->total_tx_packets += total_tx_packets;
netdev->stats.tx_bytes += total_tx_bytes;
netdev->stats.tx_packets += total_tx_packets;
- return (count < tx_ring->count);
+ return count < tx_ring->count;
}
/**
@@ -3552,7 +3617,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
struct e1000_hw *hw = &adapter->hw;
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
- skb->ip_summed = CHECKSUM_NONE;
+
+ skb_checksum_none_assert(skb);
/* 82543 or newer only */
if (unlikely(hw->mac_type < e1000_82543)) return;
@@ -3598,13 +3664,14 @@ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
__le16 vlan, struct sk_buff *skb)
{
- if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) {
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
- le16_to_cpu(vlan) &
- E1000_RXD_SPC_VLAN_MASK);
- } else {
- netif_receive_skb(skb);
- }
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+ if ((unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))))
+ vlan_gro_receive(&adapter->napi, adapter->vlgrp,
+ le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK,
+ skb);
+ else
+ napi_gro_receive(&adapter->napi, skb);
}
/**
@@ -3762,8 +3829,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
goto next_desc;
}
- skb->protocol = eth_type_trans(skb, netdev);
-
e1000_receive_skb(adapter, status, rx_desc->special, skb);
next_desc:
@@ -3926,8 +3991,6 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
((u32)(rx_desc->errors) << 24),
le16_to_cpu(rx_desc->csum), skb);
- skb->protocol = eth_type_trans(skb, netdev);
-
e1000_receive_skb(adapter, status, rx_desc->special, skb);
next_desc:
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index a4a0d2b6eb1..ca663f19d7d 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -936,12 +936,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
icr = er32(ICR);
- /* Install any alternate MAC address into RAR0 */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
- if (ret_val)
- return ret_val;
+ if (hw->mac.type == e1000_82571) {
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
- e1000e_set_laa_state_82571(hw, true);
+ e1000e_set_laa_state_82571(hw, true);
+ }
/* Reinitialize the 82571 serdes link state machine */
if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1618,14 +1620,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
{
s32 ret_val = 0;
- /*
- * If there's an alternate MAC address place it in RAR0
- * so that it will override the Si installed default perm
- * address.
- */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
- if (ret_val)
- goto out;
+ if (hw->mac.type == e1000_82571) {
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+ }
ret_val = e1000_read_mac_addr_generic(hw);
@@ -1797,7 +1801,8 @@ struct e1000_info e1000_82571_info = {
| FLAG_RESET_OVERWRITES_LAA /* errata */
| FLAG_TARC_SPEED_MODE_BIT /* errata */
| FLAG_APME_CHECK_PORT_B,
- .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
+ .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
+ | FLAG2_DMA_BURST,
.pba = 38,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@@ -1815,7 +1820,8 @@ struct e1000_info e1000_82572_info = {
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_TARC_SPEED_MODE_BIT, /* errata */
- .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
+ .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
+ | FLAG2_DMA_BURST,
.pba = 38,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@@ -1833,6 +1839,7 @@ struct e1000_info e1000_82573_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_SWSM_ON_LOAD,
+ .flags2 = FLAG2_DISABLE_ASPM_L1,
.pba = 20,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 307a72f483e..d3f7a9c3f97 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -446,7 +446,9 @@
/* Transmit Descriptor Control */
#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
/* Enable the counting of desc. still to be processed. */
@@ -621,6 +623,7 @@
#define E1000_FLASH_UPDATES 2000
/* NVM Word Offsets */
+#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
@@ -643,6 +646,9 @@
/* Mask bits for fields in Word 0x1a of the NVM */
#define NVM_WORD1A_ASPM_MASK 0x000C
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM 0x0800
+
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index f9a31c82f87..cee882dd67b 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -153,6 +153,33 @@ struct e1000_info;
/* Time to wait before putting the device into D3 if there's no link (in ms). */
#define LINK_TIMEOUT 100
+#define DEFAULT_RDTR 0
+#define DEFAULT_RADV 8
+#define BURST_RDTR 0x20
+#define BURST_RADV 0x20
+
+/*
+ * in the case of WTHRESH, it appears at least the 82571/2 hardware
+ * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
+ * WTHRESH=4, and since we want 64 bytes at a time written back, set
+ * it to 5
+ */
+#define E1000_TXDCTL_DMA_BURST_ENABLE \
+ (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
+ E1000_TXDCTL_COUNT_DESC | \
+ (5 << 16) | /* wthresh must be +1 more than desired */\
+ (1 << 8) | /* hthresh */ \
+ 0x1f) /* pthresh */
+
+#define E1000_RXDCTL_DMA_BURST_ENABLE \
+ (0x01000000 | /* set descriptor granularity */ \
+ (4 << 16) | /* set writeback threshold */ \
+ (4 << 8) | /* set prefetch threshold */ \
+ 0x20) /* set hthresh */
+
+#define E1000_TIDV_FPD (1 << 31)
+#define E1000_RDTR_FPD (1 << 31)
+
enum e1000_boards {
board_82571,
board_82572,
@@ -425,6 +452,8 @@ struct e1000_info {
#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
#define FLAG2_HAS_PHY_STATS (1 << 4)
#define FLAG2_HAS_EEE (1 << 5)
+#define FLAG2_DMA_BURST (1 << 6)
+#define FLAG2_DISABLE_AIM (1 << 8)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 45aebb4a6fe..24f8ac9cf70 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1494,6 +1494,7 @@ struct e1000_info e1000_es2_info = {
| FLAG_APME_CHECK_PORT_B
| FLAG_DISABLE_FC_PAUSE_TIME /* errata */
| FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
+ .flags2 = FLAG2_DMA_BURST,
.pba = 38,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_80003es2lan,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 6355a1b779d..b7f15b3f0e0 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -368,7 +368,7 @@ out:
static u32 e1000_get_rx_csum(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- return (adapter->flags & FLAG_RX_CSUM_ENABLED);
+ return adapter->flags & FLAG_RX_CSUM_ENABLED;
}
static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
@@ -389,7 +389,7 @@ static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
static u32 e1000_get_tx_csum(struct net_device *netdev)
{
- return ((netdev->features & NETIF_F_HW_CSUM) != 0);
+ return (netdev->features & NETIF_F_HW_CSUM) != 0;
}
static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 66ed08f726f..ba302a5c2c3 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -57,6 +57,7 @@ enum e1e_registers {
E1000_SCTL = 0x00024, /* SerDes Control - RW */
E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
+ E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
E1000_FCT = 0x00030, /* Flow Control Type - RW */
E1000_VET = 0x00038, /* VLAN Ether Type - RW */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 63930d12711..e3374d9a247 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -105,6 +105,10 @@
#define E1000_FEXTNVM_SW_CONFIG 1
#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
@@ -125,6 +129,7 @@
/* SMBus Address Phy Register */
#define HV_SMB_ADDR PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK 0x007F
#define HV_SMB_ADDR_PEC_EN 0x0200
#define HV_SMB_ADDR_VALID 0x0080
@@ -237,6 +242,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -272,7 +279,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- u32 ctrl;
+ u32 ctrl, fwsm;
s32 ret_val = 0;
phy->addr = 1;
@@ -294,7 +301,8 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
* disabled, then toggle the LANPHYPC Value bit to force
* the interconnect to PCIe mode.
*/
- if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ fwsm = er32(FWSM);
+ if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
ctrl = er32(CTRL);
ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -303,6 +311,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
ew32(CTRL, ctrl);
msleep(50);
+
+ /*
+ * Gate automatic PHY configuration by hardware on
+ * non-managed 82579
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
}
/*
@@ -315,6 +330,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
if (ret_val)
goto out;
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+ msleep(10);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
phy->id = e1000_phy_unknown;
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
@@ -561,13 +583,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
if (mac->type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
- /* Disable PHY configuration by hardware, config by software */
- if (mac->type == e1000_pch2lan) {
- u32 extcnf_ctrl = er32(EXTCNF_CTRL);
-
- extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
- ew32(EXTCNF_CTRL, extcnf_ctrl);
- }
+ /* Gate automatic PHY configuration by hardware on managed 82579 */
+ if ((mac->type == e1000_pch2lan) &&
+ (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
return 0;
}
@@ -652,6 +671,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
goto out;
}
+ if (hw->mac.type == e1000_pch2lan) {
+ ret_val = e1000_k1_workaround_lv(hw);
+ if (ret_val)
+ goto out;
+ }
+
/*
* Check if there was DownShift, must be checked
* immediately after link-up
@@ -895,6 +920,34 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
+ * @hw: pointer to the HW structure
+ *
+ * Assumes semaphore already acquired.
+ *
+ **/
+static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
+{
+ u16 phy_data;
+ u32 strap = er32(STRAP);
+ s32 ret_val = 0;
+
+ strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~HV_SMB_ADDR_MASK;
+ phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
+ phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
* @hw: pointer to the HW structure
*
@@ -903,7 +956,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
{
- struct e1000_adapter *adapter = hw->adapter;
struct e1000_phy_info *phy = &hw->phy;
u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
s32 ret_val = 0;
@@ -921,7 +973,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
if (phy->type != e1000_phy_igp_3)
return ret_val;
- if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) {
+ if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
break;
}
@@ -961,21 +1014,16 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
- if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
- ((hw->mac.type == e1000_pchlan) ||
- (hw->mac.type == e1000_pch2lan))) {
+ if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+ (hw->mac.type == e1000_pchlan)) ||
+ (hw->mac.type == e1000_pch2lan)) {
/*
* HW configures the SMBus address and LEDs when the
* OEM and LCD Write Enable bits are set in the NVM.
* When both NVM bits are cleared, SW will configure
* them instead.
*/
- data = er32(STRAP);
- data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
- reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
- reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
- ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
- reg_data);
+ ret_val = e1000_write_smbus_addr(hw);
if (ret_val)
goto out;
@@ -1440,10 +1488,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
goto out;
/* Enable jumbo frame workaround in the PHY */
- e1e_rphy(hw, PHY_REG(769, 20), &data);
- ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
- if (ret_val)
- goto out;
e1e_rphy(hw, PHY_REG(769, 23), &data);
data &= ~(0x7F << 5);
data |= (0x37 << 5);
@@ -1452,7 +1496,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
goto out;
e1e_rphy(hw, PHY_REG(769, 16), &data);
data &= ~(1 << 13);
- data |= (1 << 12);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
goto out;
@@ -1477,7 +1520,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
mac_reg = er32(RCTL);
mac_reg &= ~E1000_RCTL_SECRC;
- ew32(FFLT_DBG, mac_reg);
+ ew32(RCTL, mac_reg);
ret_val = e1000e_read_kmrn_reg(hw,
E1000_KMRNCTRLSTA_CTRL_OFFSET,
@@ -1503,17 +1546,12 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
goto out;
/* Write PHY register values back to h/w defaults */
- e1e_rphy(hw, PHY_REG(769, 20), &data);
- ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
- if (ret_val)
- goto out;
e1e_rphy(hw, PHY_REG(769, 23), &data);
data &= ~(0x7F << 5);
ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
if (ret_val)
goto out;
e1e_rphy(hw, PHY_REG(769, 16), &data);
- data &= ~(1 << 12);
data |= (1 << 13);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
@@ -1559,6 +1597,69 @@ out:
}
/**
+ * e1000_k1_gig_workaround_lv - K1 Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * Workaround to set the K1 beacon duration for 82579 parts
+ **/
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 status_reg = 0;
+ u32 mac_reg;
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+
+ /* Set K1 beacon duration based on 1Gbps speed or otherwise */
+ ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
+ if (ret_val)
+ goto out;
+
+ if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
+ == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
+ mac_reg = er32(FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+
+ if (status_reg & HV_M_STATUS_SPEED_1000)
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+ else
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
+
+ ew32(FEXTNVM4, mac_reg);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
+ * @hw: pointer to the HW structure
+ * @gate: boolean set to true to gate, false to ungate
+ *
+ * Gate/ungate the automatic PHY configuration via hardware; perform
+ * the configuration via software instead.
+ **/
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
+{
+ u32 extcnf_ctrl;
+
+ if (hw->mac.type != e1000_pch2lan)
+ return;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (gate)
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ else
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ return;
+}
+
+/**
* e1000_lan_init_done_ich8lan - Check for PHY config completion
* @hw: pointer to the HW structure
*
@@ -1602,6 +1703,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
if (e1000_check_reset_block(hw))
goto out;
+ /* Allow time for h/w to get to quiescent state after reset */
+ msleep(10);
+
/* Perform any necessary post-reset workarounds */
switch (hw->mac.type) {
case e1000_pchlan:
@@ -1630,6 +1734,13 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
/* Configure the LCD with the OEM bits in NVM */
ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ msleep(10);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
out:
return ret_val;
}
@@ -1646,6 +1757,11 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
+ /* Gate automatic PHY configuration by hardware on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
ret_val = e1000e_phy_hw_reset_generic(hw);
if (ret_val)
goto out;
@@ -2910,6 +3026,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
* external PHY is reset.
*/
ctrl |= E1000_CTRL_PHY_RST;
+
+ /*
+ * Gate automatic PHY configuration by hardware on
+ * non-managed 82579
+ */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
}
ret_val = e1000_acquire_swflag_ich8lan(hw);
e_dbg("Issuing a global reset to ich8lan\n");
@@ -3460,13 +3584,20 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
{
u32 phy_ctrl;
+ s32 ret_val;
phy_ctrl = er32(PHY_CTRL);
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
ew32(PHY_CTRL, phy_ctrl);
- if (hw->mac.type >= e1000_pchlan)
- e1000_phy_hw_reset_ich8lan(hw);
+ if (hw->mac.type >= e1000_pchlan) {
+ e1000_oem_bits_config_ich8lan(hw, true);
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ e1000_write_smbus_addr(hw);
+ hw->phy.ops.release(hw);
+ }
}
/**
@@ -3855,7 +3986,7 @@ struct e1000_info e1000_pch2_info = {
| FLAG_APME_IN_WUC,
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
- .pba = 18,
+ .pba = 26,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index df4a2792293..0fd4eb5ac5f 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -183,6 +183,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
u16 offset, nvm_alt_mac_addr_offset, nvm_data;
u8 alt_mac_addr[ETH_ALEN];
+ ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
+ if (ret_val)
+ goto out;
+
+ /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
+ if (!((nvm_data & NVM_COMPAT_LOM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
+ goto out;
+
ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
&nvm_alt_mac_addr_offset);
if (ret_val) {
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index c3dd590d87b..992b622fe20 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -475,7 +475,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
{
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
- skb->ip_summed = CHECKSUM_NONE;
+
+ skb_checksum_none_assert(skb);
/* Ignore Checksum bit is set */
if (status & E1000_RXD_STAT_IXSM)
@@ -1052,7 +1053,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
adapter->total_tx_packets += total_tx_packets;
netdev->stats.tx_bytes += total_tx_bytes;
netdev->stats.tx_packets += total_tx_packets;
- return (count < tx_ring->count);
+ return count < tx_ring->count;
}
/**
@@ -2289,6 +2290,11 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
goto set_itr_now;
}
+ if (adapter->flags2 & FLAG2_DISABLE_AIM) {
+ new_itr = 0;
+ goto set_itr_now;
+ }
+
adapter->tx_itr = e1000_update_itr(adapter,
adapter->tx_itr,
adapter->total_tx_packets,
@@ -2337,7 +2343,10 @@ set_itr_now:
if (adapter->msix_entries)
adapter->rx_ring->set_itr = 1;
else
- ew32(ITR, 1000000000 / (new_itr * 256));
+ if (new_itr)
+ ew32(ITR, 1000000000 / (new_itr * 256));
+ else
+ ew32(ITR, 0);
}
}
@@ -2649,6 +2658,26 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
/* Tx irq moderation */
ew32(TADV, adapter->tx_abs_int_delay);
+ if (adapter->flags2 & FLAG2_DMA_BURST) {
+ u32 txdctl = er32(TXDCTL(0));
+ txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
+ E1000_TXDCTL_WTHRESH);
+ /*
+ * set up some performance related parameters to encourage the
+ * hardware to use the bus more efficiently in bursts, depends
+ * on the tx_int_delay to be enabled,
+ * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
+ * hthresh = 1 ==> prefetch when one or more available
+ * pthresh = 0x1f ==> prefetch if internal cache 31 or less
+ * BEWARE: this seems to work but should be considered first if
+ * there are tx hangs or other tx related bugs
+ */
+ txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
+ ew32(TXDCTL(0), txdctl);
+ /* erratum work around: set txdctl the same for both queues */
+ ew32(TXDCTL(1), txdctl);
+ }
+
/* Program the Transmit Control Register */
tctl = er32(TCTL);
tctl &= ~E1000_TCTL_CT;
@@ -2704,6 +2733,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
u32 psrctl = 0;
u32 pages = 0;
+ /* Workaround Si errata on 82579 - configure jumbo frame flow */
+ if (hw->mac.type == e1000_pch2lan) {
+ s32 ret_val;
+
+ if (adapter->netdev->mtu > ETH_DATA_LEN)
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+ else
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+ }
+
/* Program MC offset vector base */
rctl = er32(RCTL);
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
@@ -2744,16 +2783,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
e1e_wphy(hw, 22, phy_data);
}
- /* Workaround Si errata on 82579 - configure jumbo frame flow */
- if (hw->mac.type == e1000_pch2lan) {
- s32 ret_val;
-
- if (rctl & E1000_RCTL_LPE)
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
- else
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
- }
-
/* Setup buffer sizes */
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
@@ -2871,12 +2900,35 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
e1e_flush();
msleep(10);
+ if (adapter->flags2 & FLAG2_DMA_BURST) {
+ /*
+ * set the writeback threshold (only takes effect if the RDTR
+ * is set). set GRAN=1 and write back up to 0x4 worth, and
+ * enable prefetching of 0x20 rx descriptors
+ * granularity = 01
+ * wthresh = 04,
+ * hthresh = 04,
+ * pthresh = 0x20
+ */
+ ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
+ ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
+
+ /*
+ * override the delay timers for enabling bursting, only if
+ * the value was not set by the user via module options
+ */
+ if (adapter->rx_int_delay == DEFAULT_RDTR)
+ adapter->rx_int_delay = BURST_RDTR;
+ if (adapter->rx_abs_int_delay == DEFAULT_RADV)
+ adapter->rx_abs_int_delay = BURST_RADV;
+ }
+
/* set the Receive Delay Timer Register */
ew32(RDTR, adapter->rx_int_delay);
/* irq moderation */
ew32(RADV, adapter->rx_abs_int_delay);
- if (adapter->itr_setting != 0)
+ if ((adapter->itr_setting != 0) && (adapter->itr != 0))
ew32(ITR, 1000000000 / (adapter->itr * 256));
ctrl_ext = er32(CTRL_EXT);
@@ -2921,11 +2973,13 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* packet size is equal or larger than the specified value (in 8 byte
* units), e.g. using jumbo frames when setting to E1000_ERT_2048
*/
- if (adapter->flags & FLAG_HAS_ERT) {
+ if ((adapter->flags & FLAG_HAS_ERT) ||
+ (adapter->hw.mac.type == e1000_pch2lan)) {
if (adapter->netdev->mtu > ETH_DATA_LEN) {
u32 rxdctl = er32(RXDCTL(0));
ew32(RXDCTL(0), rxdctl | 0x3);
- ew32(ERT, E1000_ERT_2048 | (1 << 13));
+ if (adapter->flags & FLAG_HAS_ERT)
+ ew32(ERT, E1000_ERT_2048 | (1 << 13));
/*
* With jumbo frames and early-receive enabled,
* excessive C-state transition latencies result in
@@ -3188,9 +3242,35 @@ void e1000e_reset(struct e1000_adapter *adapter)
fc->low_water = 0x05048;
fc->pause_time = 0x0650;
fc->refresh_time = 0x0400;
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ pba = 14;
+ ew32(PBA, pba);
+ }
break;
}
+ /*
+ * Disable Adaptive Interrupt Moderation if 2 full packets cannot
+ * fit in receive buffer and early-receive not supported.
+ */
+ if (adapter->itr_setting & 0x3) {
+ if (((adapter->max_frame_size * 2) > (pba << 10)) &&
+ !(adapter->flags & FLAG_HAS_ERT)) {
+ if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
+ dev_info(&adapter->pdev->dev,
+ "Interrupt Throttle Rate turned off\n");
+ adapter->flags2 |= FLAG2_DISABLE_AIM;
+ ew32(ITR, 0);
+ }
+ } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
+ dev_info(&adapter->pdev->dev,
+ "Interrupt Throttle Rate turned on\n");
+ adapter->flags2 &= ~FLAG2_DISABLE_AIM;
+ adapter->itr = 20000;
+ ew32(ITR, 1000000000 / (adapter->itr * 256));
+ }
+ }
+
/* Allow time for pending master requests to run */
mac->ops.reset_hw(hw);
@@ -3411,22 +3491,16 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_MSI_TEST_FAILED) {
adapter->int_mode = E1000E_INT_MODE_LEGACY;
- err = -EIO;
- e_info("MSI interrupt test failed!\n");
- }
+ e_info("MSI interrupt test failed, using legacy interrupt.\n");
+ } else
+ e_dbg("MSI interrupt test succeeded!\n");
free_irq(adapter->pdev->irq, netdev);
pci_disable_msi(adapter->pdev);
- if (err == -EIO)
- goto msi_test_failed;
-
- /* okay so the test worked, restore settings */
- e_dbg("MSI interrupt test succeeded!\n");
msi_test_failed:
e1000e_set_interrupt_capability(adapter);
- e1000_request_irq(adapter);
- return err;
+ return e1000_request_irq(adapter);
}
/**
@@ -3458,21 +3532,6 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
}
- /* success ! */
- if (!err)
- return 0;
-
- /* EIO means MSI test failed */
- if (err != -EIO)
- return err;
-
- /* back to INTx mode */
- e_warn("MSI interrupt test failed, using legacy interrupt.\n");
-
- e1000_free_irq(adapter);
-
- err = e1000_request_irq(adapter);
-
return err;
}
@@ -3530,7 +3589,8 @@ static int e1000_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter);
/* DMA latency requirement to workaround early-receive/jumbo issue */
- if (adapter->flags & FLAG_HAS_ERT)
+ if ((adapter->flags & FLAG_HAS_ERT) ||
+ (adapter->hw.mac.type == e1000_pch2lan))
pm_qos_add_request(&adapter->netdev->pm_qos_req,
PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
@@ -3639,7 +3699,8 @@ static int e1000_close(struct net_device *netdev)
if (adapter->flags & FLAG_HAS_AMT)
e1000_release_hw_control(adapter);
- if (adapter->flags & FLAG_HAS_ERT)
+ if ((adapter->flags & FLAG_HAS_ERT) ||
+ (adapter->hw.mac.type == e1000_pch2lan))
pm_qos_remove_request(&adapter->netdev->pm_qos_req);
pm_runtime_put_sync(&pdev->dev);
@@ -4255,6 +4316,16 @@ link_up:
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = 1;
+ /* flush partial descriptors to memory before detecting tx hang */
+ if (adapter->flags2 & FLAG2_DMA_BURST) {
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+ /*
+ * no need to flush the writes because the timeout code does
+ * an er32 first thing
+ */
+ }
+
/*
* With 82571 controllers, LAA may be overwritten due to controller
* reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4833,6 +4904,15 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
+ /* Jumbo frame workaround on 82579 requires CRC be stripped */
+ if ((adapter->hw.mac.type == e1000_pch2lan) &&
+ !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
+ (new_mtu > ETH_DATA_LEN)) {
+ e_err("Jumbo Frames not supported on 82579 when CRC "
+ "stripping is disabled.\n");
+ return -EINVAL;
+ }
+
/* 82573 Errata 17 */
if (((adapter->hw.mac.type == e1000_82573) ||
(adapter->hw.mac.type == e1000_82574)) &&
@@ -5703,8 +5783,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_HW_CSUM;
netdev->vlan_features |= NETIF_F_SG;
- if (pci_using_dac)
+ if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
if (e1000e_enable_mng_pass_thru(&adapter->hw))
adapter->flags |= FLAG_MNG_PT_ENABLED;
@@ -5745,11 +5827,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
}
init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &e1000_watchdog;
+ adapter->watchdog_timer.function = e1000_watchdog;
adapter->watchdog_timer.data = (unsigned long) adapter;
init_timer(&adapter->phy_info_timer);
- adapter->phy_info_timer.function = &e1000_update_phy_info;
+ adapter->phy_info_timer.function = e1000_update_phy_info;
adapter->phy_info_timer.data = (unsigned long) adapter;
INIT_WORK(&adapter->reset_task, e1000_reset_task);
@@ -5829,11 +5911,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
e1000_print_device_info(adapter);
- if (pci_dev_run_wake(pdev)) {
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- }
- pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
return 0;
@@ -5879,8 +5958,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
bool down = test_bit(__E1000_DOWN, &adapter->state);
- pm_runtime_get_sync(&pdev->dev);
-
/*
* flush_scheduled work may reschedule our watchdog task, so
* explicitly disable watchdog tasks from being rescheduled
@@ -5905,11 +5982,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
clear_bit(__E1000_DOWN, &adapter->state);
unregister_netdev(netdev);
- if (pci_dev_run_wake(pdev)) {
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
- }
- pm_runtime_put_noidle(&pdev->dev);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
/*
* Release control of h/w to f/w. If f/w is AMT enabled, this
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 34aeec13bb1..3d36911f77f 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -91,7 +91,6 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
* Valid Range: 0-65535
*/
E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
-#define DEFAULT_RDTR 0
#define MAX_RXDELAY 0xFFFF
#define MIN_RXDELAY 0
@@ -101,7 +100,6 @@ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
* Valid Range: 0-65535
*/
E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
-#define DEFAULT_RADV 8
#define MAX_RXABSDELAY 0xFFFF
#define MIN_RXABSDELAY 0
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 8d97f168f01..7c826319ee5 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1457,11 +1457,11 @@ hardware_send_packet(struct net_device *dev, void *buf, short length)
if (net_debug > 5)
printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name);
- /* determine how much of the transmit buffer space is available */
- if (lp->tx_end > lp->tx_start)
+ /* determine how much of the transmit buffer space is available */
+ if (lp->tx_end > lp->tx_start)
tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start);
- else if (lp->tx_end < lp->tx_start)
- tx_available = lp->tx_start - lp->tx_end;
+ else if (lp->tx_end < lp->tx_start)
+ tx_available = lp->tx_start - lp->tx_end;
else tx_available = lp->xmt_ram;
if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) {
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 0060e422f17..1321cb6401c 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0105"
+#define DRV_VERSION "EHEA_0106"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
@@ -400,6 +400,7 @@ struct ehea_port_res {
u32 poll_counter;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
+ int sq_restart_flag;
};
@@ -413,7 +414,7 @@ struct ehea_port_res {
struct ehea_adapter {
u64 handle;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
struct ehea_port *port[EHEA_MAX_PORTS];
struct ehea_eq *neq; /* notification event queue */
struct tasklet_struct neq_tasklet;
@@ -465,7 +466,7 @@ struct ehea_port {
struct net_device *netdev;
struct net_device_stats stats;
struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
- struct of_device ofdev; /* Open Firmware Device */
+ struct platform_device ofdev; /* Open Firmware Device */
struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
struct vlan_group *vgrp;
struct ehea_eq *qp_eq;
@@ -490,6 +491,8 @@ struct ehea_port {
u8 full_duplex;
u8 autoneg;
u8 num_def_qps;
+ wait_queue_head_t swqe_avail_wq;
+ wait_queue_head_t restart_wq;
};
struct port_res_cfg {
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 3beba70b7de..6932578816d 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -107,10 +107,10 @@ struct ehea_fw_handle_array ehea_fw_handles;
struct ehea_bcmc_reg_array ehea_bcmc_regs;
-static int __devinit ehea_probe_adapter(struct of_device *dev,
+static int __devinit ehea_probe_adapter(struct platform_device *dev,
const struct of_device_id *id);
-static int __devexit ehea_remove(struct of_device *dev);
+static int __devexit ehea_remove(struct platform_device *dev);
static struct of_device_id ehea_device_table[] = {
{
@@ -180,7 +180,7 @@ static void ehea_update_firmware_handles(void)
num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
if (num_fw_handles) {
- arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
+ arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
if (!arr)
goto out; /* Keep the existing array */
} else
@@ -265,7 +265,7 @@ static void ehea_update_bcmc_registrations(void)
}
if (num_registrations) {
- arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
+ arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
if (!arr)
goto out; /* Keep the existing array */
} else
@@ -776,6 +776,54 @@ static int ehea_proc_rwqes(struct net_device *dev,
return processed;
}
+#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
+
+static void reset_sq_restart_flag(struct ehea_port *port)
+{
+ int i;
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ struct ehea_port_res *pr = &port->port_res[i];
+ pr->sq_restart_flag = 0;
+ }
+ wake_up(&port->restart_wq);
+}
+
+static void check_sqs(struct ehea_port *port)
+{
+ struct ehea_swqe *swqe;
+ int swqe_index;
+ int i, k;
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ struct ehea_port_res *pr = &port->port_res[i];
+ int ret;
+ k = 0;
+ swqe = ehea_get_swqe(pr->qp, &swqe_index);
+ memset(swqe, 0, SWQE_HEADER_SIZE);
+ atomic_dec(&pr->swqe_avail);
+
+ swqe->tx_control |= EHEA_SWQE_PURGE;
+ swqe->wr_id = SWQE_RESTART_CHECK;
+ swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
+ swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
+ swqe->immediate_data_length = 80;
+
+ ehea_post_swqe(pr->qp, swqe);
+
+ ret = wait_event_timeout(port->restart_wq,
+ pr->sq_restart_flag == 0,
+ msecs_to_jiffies(100));
+
+ if (!ret) {
+ ehea_error("HW/SW queues out of sync");
+ ehea_schedule_port_reset(pr->port);
+ return;
+ }
+ }
+}
+
+
static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
{
struct sk_buff *skb;
@@ -793,6 +841,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
cqe_counter++;
rmb();
+
+ if (cqe->wr_id == SWQE_RESTART_CHECK) {
+ pr->sq_restart_flag = 1;
+ swqe_av++;
+ break;
+ }
+
if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
ehea_error("Bad send completion status=0x%04X",
cqe->status);
@@ -836,6 +891,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
pr->queue_stopped = 0;
}
spin_unlock_irqrestore(&pr->netif_queue, flags);
+ wake_up(&pr->port->swqe_avail_wq);
return cqe;
}
@@ -1862,7 +1918,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
struct hcp_ehea_port_cb7 *cb7;
u64 hret;
- if ((enable && port->promisc) || (!enable && !port->promisc))
+ if (enable == port->promisc)
return;
cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
@@ -2600,6 +2656,9 @@ static int ehea_open(struct net_device *dev)
netif_start_queue(dev);
}
+ init_waitqueue_head(&port->swqe_avail_wq);
+ init_waitqueue_head(&port->restart_wq);
+
mutex_unlock(&port->port_lock);
return ret;
@@ -2672,11 +2731,15 @@ static void ehea_flush_sq(struct ehea_port *port)
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
- int k = 0;
- while (atomic_read(&pr->swqe_avail) < swqe_max) {
- msleep(5);
- if (++k == 20)
- break;
+ int ret;
+
+ ret = wait_event_timeout(port->swqe_avail_wq,
+ atomic_read(&pr->swqe_avail) >= swqe_max,
+ msecs_to_jiffies(100));
+
+ if (!ret) {
+ ehea_error("WARNING: sq not flushed completely");
+ break;
}
}
}
@@ -2917,6 +2980,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
port_napi_disable(port);
mutex_unlock(&port->port_lock);
}
+ reset_sq_restart_flag(port);
}
/* Unregister old memory region */
@@ -2951,6 +3015,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
mutex_lock(&port->port_lock);
port_napi_enable(port);
ret = ehea_restart_qps(dev);
+ check_sqs(port);
if (!ret)
netif_wake_queue(dev);
mutex_unlock(&port->port_lock);
@@ -3376,7 +3441,7 @@ static ssize_t ehea_remove_port(struct device *dev,
static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
-int ehea_create_device_sysfs(struct of_device *dev)
+int ehea_create_device_sysfs(struct platform_device *dev)
{
int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
if (ret)
@@ -3387,13 +3452,13 @@ out:
return ret;
}
-void ehea_remove_device_sysfs(struct of_device *dev)
+void ehea_remove_device_sysfs(struct platform_device *dev)
{
device_remove_file(&dev->dev, &dev_attr_probe_port);
device_remove_file(&dev->dev, &dev_attr_remove_port);
}
-static int __devinit ehea_probe_adapter(struct of_device *dev,
+static int __devinit ehea_probe_adapter(struct platform_device *dev,
const struct of_device_id *id)
{
struct ehea_adapter *adapter;
@@ -3492,7 +3557,7 @@ out:
return ret;
}
-static int __devexit ehea_remove(struct of_device *dev)
+static int __devexit ehea_remove(struct platform_device *dev)
{
struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
int i;
@@ -3663,7 +3728,7 @@ int __init ehea_module_init(void)
if (ret)
ehea_info("failed registering memory remove notifier");
- ret = crash_shutdown_register(&ehea_crash_handler);
+ ret = crash_shutdown_register(ehea_crash_handler);
if (ret)
ehea_info("failed registering crash handler");
@@ -3688,7 +3753,7 @@ out3:
out2:
unregister_memory_notifier(&ehea_mem_nb);
unregister_reboot_notifier(&ehea_reboot_nb);
- crash_shutdown_unregister(&ehea_crash_handler);
+ crash_shutdown_unregister(ehea_crash_handler);
out:
return ret;
}
@@ -3701,7 +3766,7 @@ static void __exit ehea_module_exit(void)
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
unregister_reboot_notifier(&ehea_reboot_nb);
- ret = crash_shutdown_unregister(&ehea_crash_handler);
+ ret = crash_shutdown_unregister(ehea_crash_handler);
if (ret)
ehea_info("failed unregistering crash handler");
unregister_memory_notifier(&ehea_mem_nb);
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index f239aa8c6f4..ae623206f18 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -28,11 +28,10 @@
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_nic.h"
-#include "vnic_rss.h"
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "1.4.1.1"
+#define DRV_VERSION "1.4.1.2a"
#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 9aab85366d2..a1f92f19397 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -911,7 +911,9 @@ static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
static int enic_set_mac_address(struct net_device *netdev, void *p)
{
- return -EOPNOTSUPP;
+ struct sockaddr *saddr = p;
+
+ return enic_set_mac_addr(netdev, (char *)saddr->sa_data);
}
static int enic_dev_packet_filter(struct enic *enic, int directed,
@@ -1970,7 +1972,7 @@ static int enic_dev_hang_notify(struct enic *enic)
return err;
}
-int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
+static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
{
int err;
@@ -2145,25 +2147,14 @@ static const struct net_device_ops enic_netdev_ops = {
#endif
};
-void enic_dev_deinit(struct enic *enic)
+static void enic_dev_deinit(struct enic *enic)
{
netif_napi_del(&enic->napi);
enic_free_vnic_resources(enic);
enic_clear_intr_mode(enic);
}
-static int enic_dev_stats_clear(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_stats_clear(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-int enic_dev_init(struct enic *enic)
+static int enic_dev_init(struct enic *enic)
{
struct device *dev = enic_get_dev(enic);
struct net_device *netdev = enic->netdev;
@@ -2205,10 +2196,6 @@ int enic_dev_init(struct enic *enic)
enic_init_vnic_resources(enic);
- /* Clear LIF stats
- */
- enic_dev_stats_clear(enic);
-
err = enic_set_rq_alloc_buf(enic);
if (err) {
dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 29ede8a17a2..19a276cf768 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -35,7 +35,6 @@
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_nic.h"
-#include "vnic_rss.h"
#include "enic_res.h"
#include "enic.h"
@@ -149,22 +148,6 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
}
-int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len)
-{
- u64 a0 = (u64)key_pa, a1 = len;
- int wait = 1000;
-
- return vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait);
-}
-
-int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len)
-{
- u64 a0 = (u64)cpu_pa, a1 = len;
- int wait = 1000;
-
- return vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait);
-}
-
void enic_free_vnic_resources(struct enic *enic)
{
unsigned int i;
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 83bd172c356..3c59f541cb5 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -137,8 +137,6 @@ int enic_del_vlan(struct enic *enic, u16 vlanid);
int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en);
-int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len);
-int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len);
void enic_get_res_counts(struct enic *enic);
void enic_init_vnic_resources(struct enic *enic);
int enic_alloc_vnic_resources(struct enic *);
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 6a5b578a69e..11dc8f73e4b 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -74,6 +74,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
struct vnic_dev_bar *bar, unsigned int num_bars)
{
struct vnic_resource_header __iomem *rh;
+ struct mgmt_barmap_hdr __iomem *mrh;
struct vnic_resource __iomem *r;
u8 type;
@@ -85,22 +86,32 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
return -EINVAL;
}
- rh = bar->vaddr;
+ rh = bar->vaddr;
+ mrh = bar->vaddr;
if (!rh) {
pr_err("vNIC BAR0 res hdr not mem-mapped\n");
return -EINVAL;
}
- if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
- ioread32(&rh->version) != VNIC_RES_VERSION) {
- pr_err("vNIC BAR0 res magic/version error "
- "exp (%lx/%lx) curr (%x/%x)\n",
+ /* Check for mgmt vnic in addition to normal vnic */
+ if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
+ (ioread32(&rh->version) != VNIC_RES_VERSION)) {
+ if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
+ (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
+ pr_err("vNIC BAR0 res magic/version error "
+ "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
VNIC_RES_MAGIC, VNIC_RES_VERSION,
+ MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
ioread32(&rh->magic), ioread32(&rh->version));
- return -EINVAL;
+ return -EINVAL;
+ }
}
- r = (struct vnic_resource __iomem *)(rh + 1);
+ if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
+ r = (struct vnic_resource __iomem *)(mrh + 1);
+ else
+ r = (struct vnic_resource __iomem *)(rh + 1);
+
while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
@@ -175,22 +186,7 @@ void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
}
}
-dma_addr_t vnic_dev_get_res_bus_addr(struct vnic_dev *vdev,
- enum vnic_res_type type, unsigned int index)
-{
- switch (type) {
- case RES_TYPE_WQ:
- case RES_TYPE_RQ:
- case RES_TYPE_CQ:
- case RES_TYPE_INTR_CTRL:
- return vdev->res[type].bus_addr +
- index * VNIC_RES_STRIDE;
- default:
- return vdev->res[type].bus_addr;
- }
-}
-
-unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
{
/* The base address of the desc rings must be 512 byte aligned.
@@ -373,18 +369,6 @@ static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
return err;
}
-void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf)
-{
- vdev->proxy = PROXY_BY_BDF;
- vdev->proxy_index = bdf;
-}
-
-void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
-{
- vdev->proxy = PROXY_NONE;
- vdev->proxy_index = 0;
-}
-
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
{
@@ -477,13 +461,6 @@ int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
return err;
}
-int vnic_dev_stats_clear(struct vnic_dev *vdev)
-{
- u64 a0 = 0, a1 = 0;
- int wait = 1000;
- return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
-}
-
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
{
u64 a0, a1;
@@ -517,19 +494,6 @@ int vnic_dev_enable(struct vnic_dev *vdev)
return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
}
-int vnic_dev_enable_wait(struct vnic_dev *vdev)
-{
- u64 a0 = 0, a1 = 0;
- int wait = 1000;
- int err;
-
- err = vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
- if (err == ERR_ECMDUNKNOWN)
- return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
-
- return err;
-}
-
int vnic_dev_disable(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
@@ -561,14 +525,14 @@ int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
return 0;
}
-int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
+static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
}
-int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
+static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
@@ -669,26 +633,6 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
return err;
}
-int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
- int multicast, int broadcast, int promisc, int allmulti)
-{
- u64 a0, a1 = 0;
- int wait = 1000;
- int err;
-
- a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
- (multicast ? CMD_PFILTER_MULTICAST : 0) |
- (broadcast ? CMD_PFILTER_BROADCAST : 0) |
- (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
- (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
-
- err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER_ALL, &a0, &a1, wait);
- if (err)
- pr_err("Can't set packet filter\n");
-
- return err;
-}
-
int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
{
u64 a0 = 0, a1 = 0;
@@ -737,20 +681,7 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
return err;
}
-int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
-{
- u64 a0 = intr, a1 = 0;
- int wait = 1000;
- int err;
-
- err = vnic_dev_cmd(vdev, CMD_IAR, &a0, &a1, wait);
- if (err)
- pr_err("Failed to raise INTR[%d], err %d\n", intr, err);
-
- return err;
-}
-
-int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
void *notify_addr, dma_addr_t notify_pa, u16 intr)
{
u64 a0, a1;
@@ -789,7 +720,7 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
}
-int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
+static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = 1000;
@@ -943,30 +874,6 @@ u32 vnic_dev_mtu(struct vnic_dev *vdev)
return vdev->notify_copy.mtu;
}
-u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
-{
- if (!vnic_dev_notify_ready(vdev))
- return 0;
-
- return vdev->notify_copy.link_down_cnt;
-}
-
-u32 vnic_dev_notify_status(struct vnic_dev *vdev)
-{
- if (!vnic_dev_notify_ready(vdev))
- return 0;
-
- return vdev->notify_copy.status;
-}
-
-u32 vnic_dev_uif(struct vnic_dev *vdev)
-{
- if (!vnic_dev_notify_ready(vdev))
- return 0;
-
- return vdev->notify_copy.uif;
-}
-
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode)
{
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index 3a61873138b..3f001431208 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -84,10 +84,6 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
enum vnic_res_type type);
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index);
-dma_addr_t vnic_dev_get_res_bus_addr(struct vnic_dev *vdev,
- enum vnic_res_type type, unsigned int index);
-unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
- unsigned int desc_count, unsigned int desc_size);
void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size);
@@ -95,40 +91,27 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
struct vnic_dev_ring *ring);
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait);
-void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
-void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
int vnic_dev_hw_version(struct vnic_dev *vdev,
enum vnic_dev_hw_version *hw_ver);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value);
-int vnic_dev_stats_clear(struct vnic_dev *vdev);
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int vnic_dev_hang_notify(struct vnic_dev *vdev);
int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti);
-int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
- int multicast, int broadcast, int promisc, int allmulti);
int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
-int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
-int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
- void *notify_addr, dma_addr_t notify_pa, u16 intr);
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
-int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
int vnic_dev_notify_unset(struct vnic_dev *vdev);
int vnic_dev_link_status(struct vnic_dev *vdev);
u32 vnic_dev_port_speed(struct vnic_dev *vdev);
u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
u32 vnic_dev_mtu(struct vnic_dev *vdev);
-u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
-u32 vnic_dev_notify_status(struct vnic_dev *vdev);
-u32 vnic_dev_uif(struct vnic_dev *vdev);
int vnic_dev_close(struct vnic_dev *vdev);
int vnic_dev_enable(struct vnic_dev *vdev);
-int vnic_dev_enable_wait(struct vnic_dev *vdev);
int vnic_dev_disable(struct vnic_dev *vdev);
int vnic_dev_open(struct vnic_dev *vdev, int arg);
int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
@@ -136,8 +119,6 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg);
int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err);
int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
int vnic_dev_deinit(struct vnic_dev *vdev);
-int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
-int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index 20661755df6..9abb3d51dea 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -238,6 +238,18 @@ enum vnic_devcmd_cmd {
* out: (u32)a0=status of proxied cmd
* a1-a15=out args of proxied cmd */
CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
+
+ /*
+ * As for BY_BDF except a0 is index of hvnlink subordinate vnic
+ * or SR-IOV virtual vnic */
+ CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
+
+ /*
+ * in: (u64)a0=paddr of buffer to put latest VIC VIF-CONFIG-INFO TLV in
+ * (u32)a1=length of buffer in a0
+ * out: (u64)a0=paddr of buffer with latest VIC VIF-CONFIG-INFO TLV
+ * (u32)a1=actual length of latest VIC VIF-CONFIG-INFO TLV */
+ CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
};
/* flags for CMD_OPEN */
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 3b329124895..e8740e3704e 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -30,7 +30,7 @@ struct vnic_enet_config {
u32 wq_desc_count;
u32 rq_desc_count;
u16 mtu;
- u16 intr_timer;
+ u16 intr_timer_deprecated;
u8 intr_timer_type;
u8 intr_mode;
char devname[16];
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
index 52ab61af275..3873771d75c 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/enic/vnic_intr.c
@@ -65,8 +65,3 @@ void vnic_intr_clean(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->int_credits);
}
-
-void vnic_intr_raise(struct vnic_intr *intr)
-{
- vnic_dev_raise_intr(intr->vdev, (u16)intr->index);
-}
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h
index 810287beff1..e0a73f1ca6f 100644
--- a/drivers/net/enic/vnic_resource.h
+++ b/drivers/net/enic/vnic_resource.h
@@ -22,6 +22,11 @@
#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
#define VNIC_RES_VERSION 0x00000000L
+#define MGMTVNIC_MAGIC 0x544d474dL /* 'MGMT' */
+#define MGMTVNIC_VERSION 0x00000000L
+
+/* The MAC address assigned to the CFG vNIC is fixed. */
+#define MGMTVNIC_MAC { 0x02, 0x00, 0x54, 0x4d, 0x47, 0x4d }
/* vNIC resource types */
enum vnic_res_type {
@@ -52,6 +57,14 @@ struct vnic_resource_header {
u32 version;
};
+struct mgmt_barmap_hdr {
+ u32 magic; /* magic number */
+ u32 version; /* header format version */
+ u16 lif; /* loopback lif for mgmt frames */
+ u16 pci_slot; /* installed pci slot */
+ char serial[16]; /* card serial number */
+};
+
struct vnic_resource {
u8 type;
u8 bar;
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index dbb2aca258b..34105e0951a 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -77,8 +77,10 @@ void vnic_rq_free(struct vnic_rq *rq)
vnic_dev_free_desc_ring(vdev, &rq->ring);
for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
- kfree(rq->bufs[i]);
- rq->bufs[i] = NULL;
+ if (rq->bufs[i]) {
+ kfree(rq->bufs[i]);
+ rq->bufs[i] = NULL;
+ }
}
rq->ctrl = NULL;
@@ -113,7 +115,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
return 0;
}
-void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
+static void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
unsigned int fetch_index, unsigned int posted_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 2dc48f91abf..37f08de2454 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -143,7 +143,7 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
{
- return ((rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0);
+ return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
}
static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
@@ -202,10 +202,6 @@ static inline int vnic_rq_fill(struct vnic_rq *rq,
void vnic_rq_free(struct vnic_rq *rq);
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
unsigned int desc_count, unsigned int desc_size);
-void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
- unsigned int fetch_index, unsigned int posted_index,
- unsigned int error_interrupt_enable,
- unsigned int error_interrupt_offset);
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/enic/vnic_rss.h
deleted file mode 100644
index f62d1871962..00000000000
--- a/drivers/net/enic/vnic_rss.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
- * Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _VNIC_RSS_H_
-#define _VNIC_RSS_H_
-
-/* RSS key array */
-union vnic_rss_key {
- struct {
- u8 b[10];
- u8 b_pad[6];
- } key[4];
- u64 raw[8];
-};
-
-/* RSS cpu array */
-union vnic_rss_cpu {
- struct {
- u8 b[4] ;
- u8 b_pad[4];
- } cpu[32];
- u64 raw[32];
-};
-
-void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key);
-void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
-void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key);
-void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
-
-#endif /* _VNIC_RSS_H_ */
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
index 197c9d24af8..4725b79de0e 100644
--- a/drivers/net/enic/vnic_vic.c
+++ b/drivers/net/enic/vnic_vic.c
@@ -54,8 +54,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
if (!vp || !value)
return -EINVAL;
- if (ntohl(vp->length) + sizeof(*tlv) + length >
- VIC_PROVINFO_MAX_TLV_DATA)
+ if (ntohl(vp->length) + offsetof(struct vic_provinfo_tlv, value) +
+ length > VIC_PROVINFO_MAX_TLV_DATA)
return -ENOMEM;
tlv = (struct vic_provinfo_tlv *)((u8 *)vp->tlv +
@@ -66,7 +66,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
memcpy(tlv->value, value, length);
vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
- vp->length = htonl(ntohl(vp->length) + sizeof(*tlv) + length);
+ vp->length = htonl(ntohl(vp->length) +
+ offsetof(struct vic_provinfo_tlv, value) + length);
return 0;
}
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index 122e33bcc57..df61bd932ea 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -77,8 +77,10 @@ void vnic_wq_free(struct vnic_wq *wq)
vnic_dev_free_desc_ring(vdev, &wq->ring);
for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
- kfree(wq->bufs[i]);
- wq->bufs[i] = NULL;
+ if (wq->bufs[i]) {
+ kfree(wq->bufs[i]);
+ wq->bufs[i] = NULL;
+ }
}
wq->ctrl = NULL;
@@ -113,7 +115,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
return 0;
}
-void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
unsigned int fetch_index, unsigned int posted_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h
index 94ac4621acc..7dd937ac11c 100644
--- a/drivers/net/enic/vnic_wq.h
+++ b/drivers/net/enic/vnic_wq.h
@@ -153,10 +153,6 @@ static inline void vnic_wq_service(struct vnic_wq *wq,
void vnic_wq_free(struct vnic_wq *wq);
int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
unsigned int desc_count, unsigned int desc_size);
-void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
- unsigned int fetch_index, unsigned int posted_index,
- unsigned int error_interrupt_enable,
- unsigned int error_interrupt_offset);
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 57c8ac0ef3f..32543a300b8 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -758,7 +758,7 @@ static int epic_open(struct net_device *dev)
init_timer(&ep->timer);
ep->timer.expires = jiffies + 3*HZ;
ep->timer.data = (unsigned long)dev;
- ep->timer.function = &epic_timer; /* timer handler */
+ ep->timer.function = epic_timer; /* timer handler */
add_timer(&ep->timer);
return 0;
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index dda2c7944da..0cb1cf9cf4b 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -555,6 +555,8 @@ static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
equalizer_t *eql;
master_config_t mc;
+ memset(&mc, 0, sizeof(master_config_t));
+
if (eql_is_master(dev)) {
eql = netdev_priv(dev);
mc.max_slaves = eql->max_slaves;
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 10e39f2b31c..fb717be511f 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -637,7 +637,9 @@ static void eth16i_initialize(struct net_device *dev, int boot)
/* Set interface port type */
if(boot) {
- char *porttype[] = {"BNC", "DIX", "TP", "AUTO", "FROM_EPROM" };
+ static const char * const porttype[] = {
+ "BNC", "DIX", "TP", "AUTO", "FROM_EPROM"
+ };
switch(dev->if_port)
{
@@ -794,7 +796,7 @@ static int eth16i_receive_probe_packet(int ioaddr)
if(eth16i_debug > 1)
printk(KERN_DEBUG "RECEIVE_PACKET\n");
- return(0); /* Found receive packet */
+ return 0; /* Found receive packet */
}
}
@@ -803,7 +805,7 @@ static int eth16i_receive_probe_packet(int ioaddr)
printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG));
}
- return(0); /* Return success */
+ return 0; /* Return success */
}
#if 0
@@ -839,7 +841,7 @@ static int __init eth16i_get_irq(int ioaddr)
if( ioaddr < 0x1000) {
cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
- return( eth16i_irqmap[ ((cbyte & 0xC0) >> 6) ] );
+ return eth16i_irqmap[((cbyte & 0xC0) >> 6)];
} else { /* Oh..the card is EISA so method getting IRQ different */
unsigned short index = 0;
cbyte = inb(ioaddr + EISA_IRQ_REG);
@@ -847,7 +849,7 @@ static int __init eth16i_get_irq(int ioaddr)
cbyte = cbyte >> 1;
index++;
}
- return( eth32i_irqmap[ index ] );
+ return eth32i_irqmap[index];
}
}
@@ -907,7 +909,7 @@ static int eth16i_read_eeprom(int ioaddr, int offset)
data = eth16i_read_eeprom_word(ioaddr);
outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
- return(data);
+ return data;
}
static int eth16i_read_eeprom_word(int ioaddr)
@@ -926,7 +928,7 @@ static int eth16i_read_eeprom_word(int ioaddr)
eeprom_slow_io();
}
- return(data);
+ return data;
}
static void eth16i_eeprom_cmd(int ioaddr, unsigned char command)
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 6d653c459c1..c5a2fe099a8 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -806,11 +806,6 @@ static void ethoc_tx_timeout(struct net_device *dev)
ethoc_interrupt(dev->irq, dev);
}
-static struct net_device_stats *ethoc_stats(struct net_device *dev)
-{
- return &dev->stats;
-}
-
static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
@@ -863,7 +858,6 @@ static const struct net_device_ops ethoc_netdev_ops = {
.ndo_set_multicast_list = ethoc_set_multicast_list,
.ndo_change_mtu = ethoc_change_mtu,
.ndo_tx_timeout = ethoc_tx_timeout,
- .ndo_get_stats = ethoc_stats,
.ndo_start_xmit = ethoc_start_xmit,
};
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index d7e8f6b8f4c..dd54abe2f71 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -915,14 +915,14 @@ static int netdev_open(struct net_device *dev)
init_timer(&np->timer);
np->timer.expires = RUN_AT(3 * HZ);
np->timer.data = (unsigned long) dev;
- np->timer.function = &netdev_timer;
+ np->timer.function = netdev_timer;
/* timer handler */
add_timer(&np->timer);
init_timer(&np->reset_timer);
np->reset_timer.data = (unsigned long) dev;
- np->reset_timer.function = &reset_timer;
+ np->reset_timer.function = reset_timer;
np->reset_timer_armed = 0;
return 0;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 768b840aeb6..e83f67d22fe 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1311,6 +1311,9 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_mii_init;
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(ndev);
+
ret = register_netdev(ndev);
if (ret)
goto failed_register;
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index d1a5b17b2a9..e9f5d030bc2 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -771,11 +771,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
/* ethtool interface */
-static void mpc52xx_fec_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRIVER_NAME);
-}
static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -810,7 +805,6 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
}
static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
- .get_drvinfo = mpc52xx_fec_get_drvinfo,
.get_settings = mpc52xx_fec_get_settings,
.set_settings = mpc52xx_fec_set_settings,
.get_link = ethtool_op_get_link,
@@ -850,7 +844,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
/* ======================================================================== */
static int __devinit
-mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
+mpc52xx_fec_probe(struct platform_device *op, const struct of_device_id *match)
{
int rv;
struct net_device *ndev;
@@ -995,7 +989,7 @@ err_netdev:
}
static int
-mpc52xx_fec_remove(struct of_device *op)
+mpc52xx_fec_remove(struct platform_device *op)
{
struct net_device *ndev;
struct mpc52xx_fec_priv *priv;
@@ -1025,7 +1019,7 @@ mpc52xx_fec_remove(struct of_device *op)
}
#ifdef CONFIG_PM
-static int mpc52xx_fec_of_suspend(struct of_device *op, pm_message_t state)
+static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state)
{
struct net_device *dev = dev_get_drvdata(&op->dev);
@@ -1035,7 +1029,7 @@ static int mpc52xx_fec_of_suspend(struct of_device *op, pm_message_t state)
return 0;
}
-static int mpc52xx_fec_of_resume(struct of_device *op)
+static int mpc52xx_fec_of_resume(struct platform_device *op)
{
struct net_device *dev = dev_get_drvdata(&op->dev);
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index dbaf72cbb23..0b4cb6f1598 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -61,7 +61,7 @@ static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
data | FEC_MII_WRITE_FRAME);
}
-static int mpc52xx_fec_mdio_probe(struct of_device *of,
+static int mpc52xx_fec_mdio_probe(struct platform_device *of,
const struct of_device_id *match)
{
struct device *dev = &of->dev;
@@ -122,7 +122,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
return err;
}
-static int mpc52xx_fec_mdio_remove(struct of_device *of)
+static int mpc52xx_fec_mdio_remove(struct platform_device *of)
{
struct device *dev = &of->dev;
struct mii_bus *bus = dev_get_drvdata(dev);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 4da05b1b445..ddac63cefba 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -4620,7 +4620,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
static u32 nv_get_rx_csum(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
- return (np->rx_csum) != 0;
+ return np->rx_csum != 0;
}
static int nv_set_rx_csum(struct net_device *dev, u32 data)
@@ -5440,13 +5440,13 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
init_timer(&np->oom_kick);
np->oom_kick.data = (unsigned long) dev;
- np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
+ np->oom_kick.function = nv_do_rx_refill; /* timer handler */
init_timer(&np->nic_poll);
np->nic_poll.data = (unsigned long) dev;
- np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
+ np->nic_poll.function = nv_do_nic_poll; /* timer handler */
init_timer(&np->stats_poll);
np->stats_poll.data = (unsigned long) dev;
- np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
+ np->stats_poll.function = nv_do_stats_poll; /* timer handler */
err = pci_enable_device(pci_dev);
if (err)
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index f08cff9020b..d684f187de5 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -997,7 +997,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
#endif
};
-static int __devinit fs_enet_probe(struct of_device *ofdev,
+static int __devinit fs_enet_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct net_device *ndev;
@@ -1036,7 +1036,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
ndev = alloc_etherdev(privsize);
if (!ndev) {
ret = -ENOMEM;
- goto out_free_fpi;
+ goto out_put;
}
SET_NETDEV_DEV(ndev, &ofdev->dev);
@@ -1099,13 +1099,14 @@ out_cleanup_data:
out_free_dev:
free_netdev(ndev);
dev_set_drvdata(&ofdev->dev, NULL);
+out_put:
of_node_put(fpi->phy_node);
out_free_fpi:
kfree(fpi);
return ret;
}
-static int fs_enet_remove(struct of_device *ofdev)
+static int fs_enet_remove(struct platform_device *ofdev)
{
struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
struct fs_enet_private *fep = netdev_priv(ndev);
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 48e91b6242c..7a84e45487e 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -84,7 +84,7 @@ static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op)
static int do_pd_setup(struct fs_enet_private *fep)
{
- struct of_device *ofdev = to_of_device(fep->dev);
+ struct platform_device *ofdev = to_platform_device(fep->dev);
struct fs_platform_info *fpi = fep->fpi;
int ret = -EINVAL;
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index 7ca1642276d..61035fc5599 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -96,7 +96,7 @@ static int whack_reset(struct fec __iomem *fecp)
static int do_pd_setup(struct fs_enet_private *fep)
{
- struct of_device *ofdev = to_of_device(fep->dev);
+ struct platform_device *ofdev = to_platform_device(fep->dev);
fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (fep->interrupt == NO_IRQ)
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index a3c44544846..22a02a76706 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -96,7 +96,7 @@ static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
static int do_pd_setup(struct fs_enet_private *fep)
{
- struct of_device *ofdev = to_of_device(fep->dev);
+ struct platform_device *ofdev = to_platform_device(fep->dev);
fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (fep->interrupt == NO_IRQ)
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 3607340f3da..3cda2b51547 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -150,7 +150,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
return 0;
}
-static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
+static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct mii_bus *new_bus;
@@ -200,7 +200,7 @@ out:
return ret;
}
-static int fs_enet_mdio_remove(struct of_device *ofdev)
+static int fs_enet_mdio_remove(struct platform_device *ofdev)
{
struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
struct bb_info *bitbang = bus->priv;
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index bddffd169b9..dbb9c48623d 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -101,7 +101,7 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus)
return 0;
}
-static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
+static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct resource res;
@@ -192,7 +192,7 @@ out:
return ret;
}
-static int fs_enet_mdio_remove(struct of_device *ofdev)
+static int fs_enet_mdio_remove(struct platform_device *ofdev)
{
struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
struct fec_info *fec = bus->priv;
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index b4c41d72c42..8d3a2ccbc95 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -35,6 +35,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
@@ -124,7 +125,7 @@ int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
/* Write to the local MII regs */
- return(fsl_pq_local_mdio_write(regs, mii_id, regnum, value));
+ return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
}
/*
@@ -136,7 +137,7 @@ int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
/* Read the local MII regs */
- return(fsl_pq_local_mdio_read(regs, mii_id, regnum));
+ return fsl_pq_local_mdio_read(regs, mii_id, regnum);
}
/* Reset the MIIM registers, and wait for the bus to free */
@@ -264,7 +265,7 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
#endif
-static int fsl_pq_mdio_probe(struct of_device *ofdev,
+static int fsl_pq_mdio_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -424,7 +425,7 @@ err_free_priv:
}
-static int fsl_pq_mdio_remove(struct of_device *ofdev)
+static int fsl_pq_mdio_remove(struct platform_device *ofdev)
{
struct device *device = &ofdev->dev;
struct mii_bus *bus = dev_get_drvdata(device);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index a1b6301bc67..6180089bf67 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -122,9 +122,9 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id);
static void adjust_link(struct net_device *dev);
static void init_registers(struct net_device *dev);
static int init_phy(struct net_device *dev);
-static int gfar_probe(struct of_device *ofdev,
+static int gfar_probe(struct platform_device *ofdev,
const struct of_device_id *match);
-static int gfar_remove(struct of_device *ofdev);
+static int gfar_remove(struct platform_device *ofdev);
static void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
@@ -605,7 +605,7 @@ static int gfar_parse_group(struct device_node *np,
return 0;
}
-static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
+static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
{
const char *model;
const char *ctype;
@@ -654,9 +654,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
priv->node = ofdev->dev.of_node;
priv->ndev = dev;
- dev->num_tx_queues = num_tx_qs;
- dev->real_num_tx_queues = num_tx_qs;
priv->num_tx_queues = num_tx_qs;
+ netif_set_real_num_rx_queues(dev, num_rx_qs);
priv->num_rx_queues = num_rx_qs;
priv->num_grps = 0x0;
@@ -959,7 +958,7 @@ static void gfar_detect_errata(struct gfar_private *priv)
/* Set up the ethernet device structure, private data,
* and anything else we need before we start */
-static int gfar_probe(struct of_device *ofdev,
+static int gfar_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
u32 tempval;
@@ -1238,7 +1237,7 @@ register_fail:
return err;
}
-static int gfar_remove(struct of_device *ofdev)
+static int gfar_remove(struct platform_device *ofdev)
{
struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
@@ -1859,7 +1858,7 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
printk(KERN_ERR "%s: Can't get IRQ %d\n",
dev->name, grp->interruptError);
- goto err_irq_fail;
+ goto err_irq_fail;
}
if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
@@ -2048,7 +2047,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 bufaddr;
unsigned long flags;
unsigned int nr_frags, nr_txbds, length;
- union skb_shared_tx *shtx;
/*
* TOE=1 frames larger than 2500 bytes may see excess delays
@@ -2069,10 +2067,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
txq = netdev_get_tx_queue(dev, rq);
base = tx_queue->tx_bd_base;
regs = tx_queue->grp->regs;
- shtx = skb_tx(skb);
/* check if time stamp should be generated */
- if (unlikely(shtx->hardware && priv->hwts_tx_en))
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ priv->hwts_tx_en))
do_tstamp = 1;
/* make space for additional header when fcb is needed */
@@ -2174,7 +2172,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Setup tx hardware time stamping if requested */
if (unlikely(do_tstamp)) {
- shtx->in_progress = 1;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
if (fcb == NULL)
fcb = gfar_add_fcb(skb);
fcb->ptp = 1;
@@ -2446,7 +2444,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
int howmany = 0;
u32 lstatus;
size_t buflen;
- union skb_shared_tx *shtx;
rx_queue = priv->rx_queue[tx_queue->qindex];
bdp = tx_queue->dirty_tx;
@@ -2461,8 +2458,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
* When time stamping, one additional TxBD must be freed.
* Also, we need to dma_unmap_single() the TxPAL.
*/
- shtx = skb_tx(skb);
- if (unlikely(shtx->in_progress))
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
nr_txbds = frags + 2;
else
nr_txbds = frags + 1;
@@ -2476,7 +2472,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
(lstatus & BD_LENGTH_MASK))
break;
- if (unlikely(shtx->in_progress)) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
next = next_txbd(bdp, base, tx_ring_size);
buflen = next->length + GMAC_FCB_LEN;
} else
@@ -2485,7 +2481,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
buflen, DMA_TO_DEVICE);
- if (unlikely(shtx->in_progress)) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
struct skb_shared_hwtstamps shhwtstamps;
u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -2657,7 +2653,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 710810e2adb..68984eb88ae 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -1054,7 +1054,7 @@ struct gfar_private {
struct device_node *node;
struct net_device *ndev;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
enum gfar_errata errata;
struct gfar_priv_grp gfargrp[MAXGROUPS];
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 9bda023c023..ae8e5d3c6c1 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -254,7 +254,7 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use
/* Make sure we return a number greater than 0
* if usecs > 0 */
- return ((usecs * 1000 + count - 1) / count);
+ return (usecs * 1000 + count - 1) / count;
}
/* Convert ethernet clock ticks to microseconds */
@@ -278,7 +278,7 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
/* Make sure we return a number greater than 0 */
/* if ticks is > 0 */
- return ((ticks * count) / 1000);
+ return (ticks * count) / 1000;
}
/* Get the coalescing parameters, and put them in the cvals
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 4d09eab3548..27d6960ce09 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -893,7 +893,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
@@ -1373,7 +1373,7 @@ error:
}
/* Initialize the GRETH MAC */
-static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_device_id *match)
+static int __devinit greth_of_probe(struct platform_device *ofdev, const struct of_device_id *match)
{
struct net_device *dev;
struct greth_private *greth;
@@ -1412,7 +1412,7 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev
}
regs = (struct greth_regs *) greth->regs;
- greth->irq = ofdev->irqs[0];
+ greth->irq = ofdev->archdata.irqs[0];
dev_set_drvdata(greth->dev, dev);
SET_NETDEV_DEV(dev, greth->dev);
@@ -1547,10 +1547,10 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev
dev->netdev_ops = &greth_netdev_ops;
dev->ethtool_ops = &greth_ethtool_ops;
- if (register_netdev(dev)) {
+ err = register_netdev(dev);
+ if (err) {
if (netif_msg_probe(greth))
dev_err(greth->dev, "netdevice registration failed.\n");
- err = -ENOMEM;
goto error5;
}
@@ -1572,7 +1572,7 @@ error1:
return err;
}
-static int __devexit greth_of_remove(struct of_device *of_dev)
+static int __devexit greth_of_remove(struct platform_device *of_dev)
{
struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
struct greth_private *greth = netdev_priv(ndev);
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index 973388d6abc..03ad903cd67 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -118,7 +118,7 @@ struct greth_private {
int irq;
- struct device *dev; /* Pointer to of_device->dev */
+ struct device *dev; /* Pointer to platform_device->dev */
struct net_device *netdev;
struct napi_struct napi;
spinlock_t devlock;
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 49aac7027fb..9a6485892b3 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1004,7 +1004,7 @@ static int hamachi_open(struct net_device *dev)
init_timer(&hmp->timer);
hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
hmp->timer.data = (unsigned long)dev;
- hmp->timer.function = &hamachi_timer; /* timer handler */
+ hmp->timer.function = hamachi_timer; /* timer handler */
add_timer(&hmp->timer);
return 0;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 14f01d156db..ac1d323c5eb 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -168,7 +168,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
static inline int dev_is_ethdev(struct net_device *dev)
{
- return (dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5));
+ return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
}
/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index b8bdf9d51cd..5b37579e84b 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -110,7 +110,7 @@ static int calc_crc_ccitt(const unsigned char *buf, int cnt)
for (; cnt > 0; cnt--)
crc = (crc >> 8) ^ crc_ccitt_table[(crc ^ *buf++) & 0xff];
crc ^= 0xffff;
- return (crc & 0xffff);
+ return crc & 0xffff;
}
#endif
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 9f64c863720..33655814448 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1069,7 +1069,8 @@ static void scc_tx_done(struct scc_channel *scc)
case KISS_DUPLEX_LINK:
scc->stat.tx_state = TXS_IDLE2;
if (scc->kiss.idletime != TIMER_OFF)
- scc_start_tx_timer(scc, t_idle, scc->kiss.idletime*100);
+ scc_start_tx_timer(scc, t_idle,
+ scc->kiss.idletime*100);
break;
case KISS_DUPLEX_OPTIMA:
scc_notify(scc, HWEV_ALL_SENT);
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index 86ececd3c65..d15d2f2ba78 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -204,10 +204,10 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
ei_status.rx_start_page = HP_START_PG + TX_PAGES;
ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG;
- ei_status.reset_8390 = &hp_reset_8390;
- ei_status.get_8390_hdr = &hp_get_8390_hdr;
- ei_status.block_input = &hp_block_input;
- ei_status.block_output = &hp_block_output;
+ ei_status.reset_8390 = hp_reset_8390;
+ ei_status.get_8390_hdr = hp_get_8390_hdr;
+ ei_status.block_input = hp_block_input;
+ ei_status.block_output = hp_block_output;
hp_init_card(dev);
retval = register_netdev(dev);
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 095b17ecf60..8e2c4601b5f 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1312,7 +1312,7 @@ static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
for (p = (ringptr->pdl); p < (ringptr->pdl + 5); p++)
printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n", dev->name, (u_int) p, (u_int) * p);
#endif
- return (1);
+ return 1;
}
/* else: */
/* alloc_skb failed (no memory) -> still can receive the header
@@ -1325,7 +1325,7 @@ static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
ringptr->pdl[0] = 0x00010000; /* PDH: Count=1 Fragment */
- return (0);
+ return 0;
}
/*
@@ -2752,7 +2752,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
hp100_outw(HP100_MISC_ERROR, IRQ_STATUS);
if (val & HP100_LINK_UP_ST)
- return (0); /* login was ok */
+ return 0; /* login was ok */
else {
printk("hp100: %s: Training failed.\n", dev->name);
hp100_down_vg_link(dev);
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index 07d8e5b634f..c5ef62ceb84 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -155,10 +155,10 @@ static int __devinit hydra_init(struct zorro_dev *z)
ei_status.rx_start_page = start_page + TX_PAGES;
- ei_status.reset_8390 = &hydra_reset_8390;
- ei_status.block_input = &hydra_block_input;
- ei_status.block_output = &hydra_block_output;
- ei_status.get_8390_hdr = &hydra_get_8390_hdr;
+ ei_status.reset_8390 = hydra_reset_8390;
+ ei_status.block_input = hydra_block_input;
+ ei_status.block_output = hydra_block_output;
+ ei_status.get_8390_hdr = hydra_get_8390_hdr;
ei_status.reg_offset = hydra_offsets;
dev->netdev_ops = &hydra_netdev_ops;
@@ -173,9 +173,8 @@ static int __devinit hydra_init(struct zorro_dev *z)
zorro_set_drvdata(z, dev);
- printk(KERN_INFO "%s: Hydra at 0x%08llx, address "
- "%pM (hydra.c " HYDRA_VERSION ")\n",
- dev->name, (unsigned long long)z->resource.start, dev->dev_addr);
+ pr_info("%s: Hydra at %pR, address %pM (hydra.c " HYDRA_VERSION ")\n",
+ dev->name, &z->resource, dev->dev_addr);
return 0;
}
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 0f1d4e96cf8..385dc3204cb 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2095,11 +2095,11 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf)
if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
hdr->version = EMAC4_ETHTOOL_REGS_VER;
memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
- return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
+ return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
} else {
hdr->version = EMAC_ETHTOOL_REGS_VER;
memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
- return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
+ return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
}
}
@@ -2245,7 +2245,7 @@ static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
struct emac_depentry {
u32 phandle;
struct device_node *node;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
void *drvdata;
};
@@ -2293,7 +2293,7 @@ static int __devinit emac_check_deps(struct emac_instance *dev,
if (deps[i].drvdata != NULL)
there++;
}
- return (there == EMAC_DEP_COUNT);
+ return there == EMAC_DEP_COUNT;
}
static void emac_put_deps(struct emac_instance *dev)
@@ -2339,11 +2339,11 @@ static int __devinit emac_wait_deps(struct emac_instance *dev)
deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
if (dev->blist && dev->blist > emac_boot_list)
deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
- bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
+ bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
wait_event_timeout(emac_probe_wait,
emac_check_deps(dev, deps),
EMAC_PROBE_DEP_TIMEOUT);
- bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
+ bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
for (i = 0; i < EMAC_DEP_COUNT; i++) {
if (deps[i].node)
@@ -2719,7 +2719,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
.ndo_change_mtu = emac_change_mtu,
};
-static int __devinit emac_probe(struct of_device *ofdev,
+static int __devinit emac_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct net_device *ndev;
@@ -2928,7 +2928,7 @@ static int __devinit emac_probe(struct of_device *ofdev,
if (dev->emac_irq != NO_IRQ)
irq_dispose_mapping(dev->emac_irq);
err_free:
- kfree(ndev);
+ free_netdev(ndev);
err_gone:
/* if we were on the bootlist, remove us as we won't show up and
* wake up all waiters to notify them in case they were waiting
@@ -2941,7 +2941,7 @@ static int __devinit emac_probe(struct of_device *ofdev,
return err;
}
-static int __devexit emac_remove(struct of_device *ofdev)
+static int __devexit emac_remove(struct platform_device *ofdev)
{
struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
@@ -2971,7 +2971,7 @@ static int __devexit emac_remove(struct of_device *ofdev)
if (dev->emac_irq != NO_IRQ)
irq_dispose_mapping(dev->emac_irq);
- kfree(dev->ndev);
+ free_netdev(dev->ndev);
return 0;
}
diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h
index b1cbe6fdfc7..4fec0844d59 100644
--- a/drivers/net/ibm_newemac/core.h
+++ b/drivers/net/ibm_newemac/core.h
@@ -170,12 +170,12 @@ struct emac_instance {
struct net_device *ndev;
struct resource rsrc_regs;
struct emac_regs __iomem *emacp;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
struct device_node **blist; /* bootlist entry */
/* MAL linkage */
u32 mal_ph;
- struct of_device *mal_dev;
+ struct platform_device *mal_dev;
u32 mal_rx_chan;
u32 mal_tx_chan;
struct mal_instance *mal;
@@ -196,24 +196,24 @@ struct emac_instance {
/* Shared MDIO if any */
u32 mdio_ph;
- struct of_device *mdio_dev;
+ struct platform_device *mdio_dev;
struct emac_instance *mdio_instance;
struct mutex mdio_lock;
/* ZMII infos if any */
u32 zmii_ph;
u32 zmii_port;
- struct of_device *zmii_dev;
+ struct platform_device *zmii_dev;
/* RGMII infos if any */
u32 rgmii_ph;
u32 rgmii_port;
- struct of_device *rgmii_dev;
+ struct platform_device *rgmii_dev;
/* TAH infos if any */
u32 tah_ph;
u32 tah_port;
- struct of_device *tah_dev;
+ struct platform_device *tah_dev;
/* IRQs */
int wol_irq;
@@ -410,7 +410,7 @@ static inline u32 *emac_xaht_base(struct emac_instance *dev)
else
offset = offsetof(struct emac_regs, u0.emac4.iaht1);
- return ((u32 *)((ptrdiff_t)p + offset));
+ return (u32 *)((ptrdiff_t)p + offset);
}
static inline u32 *emac_gaht_base(struct emac_instance *dev)
@@ -418,7 +418,7 @@ static inline u32 *emac_gaht_base(struct emac_instance *dev)
/* GAHT registers always come after an identical number of
* IAHT registers.
*/
- return (emac_xaht_base(dev) + EMAC_XAHT_REGS(dev));
+ return emac_xaht_base(dev) + EMAC_XAHT_REGS(dev);
}
static inline u32 *emac_iaht_base(struct emac_instance *dev)
@@ -426,7 +426,7 @@ static inline u32 *emac_iaht_base(struct emac_instance *dev)
/* IAHT registers always come before an identical number of
* GAHT registers.
*/
- return (emac_xaht_base(dev));
+ return emac_xaht_base(dev);
}
/* Ethtool get_regs complex data.
diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c
index 3995fafc1e0..8c6c1e2a875 100644
--- a/drivers/net/ibm_newemac/debug.c
+++ b/drivers/net/ibm_newemac/debug.c
@@ -238,7 +238,7 @@ void emac_dbg_dump_all(void)
}
#if defined(CONFIG_MAGIC_SYSRQ)
-static void emac_sysrq_handler(int key, struct tty_struct *tty)
+static void emac_sysrq_handler(int key)
{
emac_dbg_dump_all();
}
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index fcff9e0bd38..d5717e2123e 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -517,7 +517,7 @@ void *mal_dump_regs(struct mal_instance *mal, void *buf)
return regs + 1;
}
-static int __devinit mal_probe(struct of_device *ofdev,
+static int __devinit mal_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct mal_instance *mal;
@@ -730,7 +730,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
return err;
}
-static int __devexit mal_remove(struct of_device *ofdev)
+static int __devexit mal_remove(struct platform_device *ofdev)
{
struct mal_instance *mal = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h
index 9ededfbf072..66084214bf4 100644
--- a/drivers/net/ibm_newemac/mal.h
+++ b/drivers/net/ibm_newemac/mal.h
@@ -210,7 +210,7 @@ struct mal_instance {
dma_addr_t bd_dma;
struct mal_descriptor *bd_virt;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
int index;
spinlock_t lock;
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index 108919bcdf1..dd61798897a 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -93,7 +93,7 @@ static inline u32 rgmii_mode_mask(int mode, int input)
}
}
-int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode)
+int __devinit rgmii_attach(struct platform_device *ofdev, int input, int mode)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p = dev->base;
@@ -122,7 +122,7 @@ int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode)
return 0;
}
-void rgmii_set_speed(struct of_device *ofdev, int input, int speed)
+void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p = dev->base;
@@ -144,7 +144,7 @@ void rgmii_set_speed(struct of_device *ofdev, int input, int speed)
mutex_unlock(&dev->lock);
}
-void rgmii_get_mdio(struct of_device *ofdev, int input)
+void rgmii_get_mdio(struct platform_device *ofdev, int input)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p = dev->base;
@@ -165,7 +165,7 @@ void rgmii_get_mdio(struct of_device *ofdev, int input)
DBG2(dev, " fer = 0x%08x\n", fer);
}
-void rgmii_put_mdio(struct of_device *ofdev, int input)
+void rgmii_put_mdio(struct platform_device *ofdev, int input)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p = dev->base;
@@ -186,7 +186,7 @@ void rgmii_put_mdio(struct of_device *ofdev, int input)
mutex_unlock(&dev->lock);
}
-void rgmii_detach(struct of_device *ofdev, int input)
+void rgmii_detach(struct platform_device *ofdev, int input)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p;
@@ -206,13 +206,13 @@ void rgmii_detach(struct of_device *ofdev, int input)
mutex_unlock(&dev->lock);
}
-int rgmii_get_regs_len(struct of_device *ofdev)
+int rgmii_get_regs_len(struct platform_device *ofdev)
{
return sizeof(struct emac_ethtool_regs_subhdr) +
sizeof(struct rgmii_regs);
}
-void *rgmii_dump_regs(struct of_device *ofdev, void *buf)
+void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct emac_ethtool_regs_subhdr *hdr = buf;
@@ -228,7 +228,7 @@ void *rgmii_dump_regs(struct of_device *ofdev, void *buf)
}
-static int __devinit rgmii_probe(struct of_device *ofdev,
+static int __devinit rgmii_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -293,7 +293,7 @@ static int __devinit rgmii_probe(struct of_device *ofdev,
return rc;
}
-static int __devexit rgmii_remove(struct of_device *ofdev)
+static int __devexit rgmii_remove(struct platform_device *ofdev)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/net/ibm_newemac/rgmii.h b/drivers/net/ibm_newemac/rgmii.h
index c4a4b358a27..d6979904986 100644
--- a/drivers/net/ibm_newemac/rgmii.h
+++ b/drivers/net/ibm_newemac/rgmii.h
@@ -51,20 +51,20 @@ struct rgmii_instance {
int users;
/* OF device instance */
- struct of_device *ofdev;
+ struct platform_device *ofdev;
};
#ifdef CONFIG_IBM_NEW_EMAC_RGMII
extern int rgmii_init(void);
extern void rgmii_exit(void);
-extern int rgmii_attach(struct of_device *ofdev, int input, int mode);
-extern void rgmii_detach(struct of_device *ofdev, int input);
-extern void rgmii_get_mdio(struct of_device *ofdev, int input);
-extern void rgmii_put_mdio(struct of_device *ofdev, int input);
-extern void rgmii_set_speed(struct of_device *ofdev, int input, int speed);
-extern int rgmii_get_regs_len(struct of_device *ofdev);
-extern void *rgmii_dump_regs(struct of_device *ofdev, void *buf);
+extern int rgmii_attach(struct platform_device *ofdev, int input, int mode);
+extern void rgmii_detach(struct platform_device *ofdev, int input);
+extern void rgmii_get_mdio(struct platform_device *ofdev, int input);
+extern void rgmii_put_mdio(struct platform_device *ofdev, int input);
+extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
+extern int rgmii_get_regs_len(struct platform_device *ofdev);
+extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
#else
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index 044637144c4..299aa49490c 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -23,7 +23,7 @@
#include "emac.h"
#include "core.h"
-int __devinit tah_attach(struct of_device *ofdev, int channel)
+int __devinit tah_attach(struct platform_device *ofdev, int channel)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
@@ -35,7 +35,7 @@ int __devinit tah_attach(struct of_device *ofdev, int channel)
return 0;
}
-void tah_detach(struct of_device *ofdev, int channel)
+void tah_detach(struct platform_device *ofdev, int channel)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
@@ -44,7 +44,7 @@ void tah_detach(struct of_device *ofdev, int channel)
mutex_unlock(&dev->lock);
}
-void tah_reset(struct of_device *ofdev)
+void tah_reset(struct platform_device *ofdev)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
struct tah_regs __iomem *p = dev->base;
@@ -66,13 +66,13 @@ void tah_reset(struct of_device *ofdev)
TAH_MR_DIG);
}
-int tah_get_regs_len(struct of_device *ofdev)
+int tah_get_regs_len(struct platform_device *ofdev)
{
return sizeof(struct emac_ethtool_regs_subhdr) +
sizeof(struct tah_regs);
}
-void *tah_dump_regs(struct of_device *ofdev, void *buf)
+void *tah_dump_regs(struct platform_device *ofdev, void *buf)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
struct emac_ethtool_regs_subhdr *hdr = buf;
@@ -87,7 +87,7 @@ void *tah_dump_regs(struct of_device *ofdev, void *buf)
return regs + 1;
}
-static int __devinit tah_probe(struct of_device *ofdev,
+static int __devinit tah_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -139,7 +139,7 @@ static int __devinit tah_probe(struct of_device *ofdev,
return rc;
}
-static int __devexit tah_remove(struct of_device *ofdev)
+static int __devexit tah_remove(struct platform_device *ofdev)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/net/ibm_newemac/tah.h b/drivers/net/ibm_newemac/tah.h
index a068b5658da..61dbeca006d 100644
--- a/drivers/net/ibm_newemac/tah.h
+++ b/drivers/net/ibm_newemac/tah.h
@@ -48,7 +48,7 @@ struct tah_instance {
int users;
/* OF device instance */
- struct of_device *ofdev;
+ struct platform_device *ofdev;
};
@@ -74,11 +74,11 @@ struct tah_instance {
extern int tah_init(void);
extern void tah_exit(void);
-extern int tah_attach(struct of_device *ofdev, int channel);
-extern void tah_detach(struct of_device *ofdev, int channel);
-extern void tah_reset(struct of_device *ofdev);
-extern int tah_get_regs_len(struct of_device *ofdev);
-extern void *tah_dump_regs(struct of_device *ofdev, void *buf);
+extern int tah_attach(struct platform_device *ofdev, int channel);
+extern void tah_detach(struct platform_device *ofdev, int channel);
+extern void tah_reset(struct platform_device *ofdev);
+extern int tah_get_regs_len(struct platform_device *ofdev);
+extern void *tah_dump_regs(struct platform_device *ofdev, void *buf);
#else
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
index 046dcd069c4..34ed6ee8ca8 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ibm_newemac/zmii.c
@@ -82,7 +82,7 @@ static inline u32 zmii_mode_mask(int mode, int input)
}
}
-int __devinit zmii_attach(struct of_device *ofdev, int input, int *mode)
+int __devinit zmii_attach(struct platform_device *ofdev, int input, int *mode)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct zmii_regs __iomem *p = dev->base;
@@ -148,7 +148,7 @@ int __devinit zmii_attach(struct of_device *ofdev, int input, int *mode)
return 0;
}
-void zmii_get_mdio(struct of_device *ofdev, int input)
+void zmii_get_mdio(struct platform_device *ofdev, int input)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
u32 fer;
@@ -161,7 +161,7 @@ void zmii_get_mdio(struct of_device *ofdev, int input)
out_be32(&dev->base->fer, fer | ZMII_FER_MDI(input));
}
-void zmii_put_mdio(struct of_device *ofdev, int input)
+void zmii_put_mdio(struct platform_device *ofdev, int input)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
@@ -170,7 +170,7 @@ void zmii_put_mdio(struct of_device *ofdev, int input)
}
-void zmii_set_speed(struct of_device *ofdev, int input, int speed)
+void zmii_set_speed(struct platform_device *ofdev, int input, int speed)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
u32 ssr;
@@ -191,7 +191,7 @@ void zmii_set_speed(struct of_device *ofdev, int input, int speed)
mutex_unlock(&dev->lock);
}
-void zmii_detach(struct of_device *ofdev, int input)
+void zmii_detach(struct platform_device *ofdev, int input)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
@@ -210,13 +210,13 @@ void zmii_detach(struct of_device *ofdev, int input)
mutex_unlock(&dev->lock);
}
-int zmii_get_regs_len(struct of_device *ofdev)
+int zmii_get_regs_len(struct platform_device *ofdev)
{
return sizeof(struct emac_ethtool_regs_subhdr) +
sizeof(struct zmii_regs);
}
-void *zmii_dump_regs(struct of_device *ofdev, void *buf)
+void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct emac_ethtool_regs_subhdr *hdr = buf;
@@ -231,7 +231,7 @@ void *zmii_dump_regs(struct of_device *ofdev, void *buf)
return regs + 1;
}
-static int __devinit zmii_probe(struct of_device *ofdev,
+static int __devinit zmii_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -286,7 +286,7 @@ static int __devinit zmii_probe(struct of_device *ofdev,
return rc;
}
-static int __devexit zmii_remove(struct of_device *ofdev)
+static int __devexit zmii_remove(struct platform_device *ofdev)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/net/ibm_newemac/zmii.h b/drivers/net/ibm_newemac/zmii.h
index 6c9beba0c4b..1333fa2b278 100644
--- a/drivers/net/ibm_newemac/zmii.h
+++ b/drivers/net/ibm_newemac/zmii.h
@@ -48,20 +48,20 @@ struct zmii_instance {
u32 fer_save;
/* OF device instance */
- struct of_device *ofdev;
+ struct platform_device *ofdev;
};
#ifdef CONFIG_IBM_NEW_EMAC_ZMII
extern int zmii_init(void);
extern void zmii_exit(void);
-extern int zmii_attach(struct of_device *ofdev, int input, int *mode);
-extern void zmii_detach(struct of_device *ofdev, int input);
-extern void zmii_get_mdio(struct of_device *ofdev, int input);
-extern void zmii_put_mdio(struct of_device *ofdev, int input);
-extern void zmii_set_speed(struct of_device *ofdev, int input, int speed);
-extern int zmii_get_regs_len(struct of_device *ocpdev);
-extern void *zmii_dump_regs(struct of_device *ofdev, void *buf);
+extern int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+extern void zmii_detach(struct platform_device *ofdev, int input);
+extern void zmii_get_mdio(struct platform_device *ofdev, int input);
+extern void zmii_put_mdio(struct platform_device *ofdev, int input);
+extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
+extern int zmii_get_regs_len(struct platform_device *ocpdev);
+extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
#else
# define zmii_init() 0
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 294ccfb427c..0037a696cd0 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -602,7 +602,7 @@ static void irqrx_handler(struct net_device *dev)
/* set up skb fields */
skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* bookkeeping */
dev->stats.rx_packets++;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 2602852cc55..b3e157ed677 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1,122 +1,84 @@
-/**************************************************************************/
-/* */
-/* IBM eServer i/pSeries Virtual Ethernet Device Driver */
-/* Copyright (C) 2003 IBM Corp. */
-/* Originally written by Dave Larson (larson1@us.ibm.com) */
-/* Maintained by Santiago Leon (santil@us.ibm.com) */
-/* */
-/* This program is free software; you can redistribute it and/or modify */
-/* it under the terms of the GNU General Public License as published by */
-/* the Free Software Foundation; either version 2 of the License, or */
-/* (at your option) any later version. */
-/* */
-/* This program is distributed in the hope that it will be useful, */
-/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
-/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
-/* GNU General Public License for more details. */
-/* */
-/* You should have received a copy of the GNU General Public License */
-/* along with this program; if not, write to the Free Software */
-/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
-/* USA */
-/* */
-/* This module contains the implementation of a virtual ethernet device */
-/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
-/* option of the RS/6000 Platform Architechture to interface with virtual */
-/* ethernet NICs that are presented to the partition by the hypervisor. */
-/* */
-/**************************************************************************/
/*
- TODO:
- - add support for sysfs
- - possibly remove procfs support
-*/
+ * IBM Power Virtual Ethernet Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2003, 2010
+ *
+ * Authors: Dave Larson <larson1@us.ibm.com>
+ * Santiago Leon <santil@linux.vnet.ibm.com>
+ * Brian King <brking@linux.vnet.ibm.com>
+ * Robert Jennings <rcj@linux.vnet.ibm.com>
+ * Anton Blanchard <anton@au.ibm.com>
+ */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/ioport.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
-#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/ethtool.h>
-#include <linux/proc_fs.h>
#include <linux/in.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/slab.h>
-#include <net/net_namespace.h>
#include <asm/hvcall.h>
#include <asm/atomic.h>
#include <asm/vio.h>
#include <asm/iommu.h>
-#include <asm/uaccess.h>
#include <asm/firmware.h>
-#include <linux/seq_file.h>
#include "ibmveth.h"
-#undef DEBUG
-
-#define ibmveth_printk(fmt, args...) \
- printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
-
-#define ibmveth_error_printk(fmt, args...) \
- printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
-
-#ifdef DEBUG
-#define ibmveth_debug_printk_no_adapter(fmt, args...) \
- printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
-#define ibmveth_debug_printk(fmt, args...) \
- printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
-#define ibmveth_assert(expr) \
- if(!(expr)) { \
- printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
- BUG(); \
- }
-#else
-#define ibmveth_debug_printk_no_adapter(fmt, args...)
-#define ibmveth_debug_printk(fmt, args...)
-#define ibmveth_assert(expr)
-#endif
-
-static int ibmveth_open(struct net_device *dev);
-static int ibmveth_close(struct net_device *dev);
-static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
-static int ibmveth_poll(struct napi_struct *napi, int budget);
-static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static void ibmveth_set_multicast_list(struct net_device *dev);
-static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
-static void ibmveth_proc_register_driver(void);
-static void ibmveth_proc_unregister_driver(void);
-static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
-static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
-static struct kobj_type ktype_veth_pool;
+static struct kobj_type ktype_veth_pool;
-#ifdef CONFIG_PROC_FS
-#define IBMVETH_PROC_DIR "ibmveth"
-static struct proc_dir_entry *ibmveth_proc_dir;
-#endif
static const char ibmveth_driver_name[] = "ibmveth";
-static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver";
-#define ibmveth_driver_version "1.03"
+static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
+#define ibmveth_driver_version "1.04"
-MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
-MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
+MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(ibmveth_driver_version);
+static unsigned int tx_copybreak __read_mostly = 128;
+module_param(tx_copybreak, uint, 0644);
+MODULE_PARM_DESC(tx_copybreak,
+ "Maximum size of packet that is copied to a new buffer on transmit");
+
+static unsigned int rx_copybreak __read_mostly = 128;
+module_param(rx_copybreak, uint, 0644);
+MODULE_PARM_DESC(rx_copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+static unsigned int rx_flush __read_mostly = 0;
+module_param(rx_flush, uint, 0644);
+MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
+
struct ibmveth_stat {
char name[ETH_GSTRING_LEN];
int offset;
@@ -128,12 +90,16 @@ struct ibmveth_stat {
struct ibmveth_stat ibmveth_stats[] = {
{ "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
{ "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
- { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) },
- { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) },
+ { "replenish_add_buff_failure",
+ IBMVETH_STAT_OFF(replenish_add_buff_failure) },
+ { "replenish_add_buff_success",
+ IBMVETH_STAT_OFF(replenish_add_buff_success) },
{ "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
{ "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
{ "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
{ "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
+ { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
+ { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
};
/* simple methods of getting data from the current rxq entry */
@@ -144,41 +110,44 @@ static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
{
- return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT;
+ return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
+ IBMVETH_RXQ_TOGGLE_SHIFT;
}
static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
{
- return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle);
+ return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
}
static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
{
- return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID);
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
}
static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
{
- return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK);
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
}
static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
{
- return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
+ return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
}
static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
{
- return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD);
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
}
/* setup the initial settings for a buffer pool */
-static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
+static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
+ u32 pool_index, u32 pool_size,
+ u32 buff_size, u32 pool_active)
{
pool->size = pool_size;
pool->index = pool_index;
pool->buff_size = buff_size;
- pool->threshold = pool_size / 2;
+ pool->threshold = pool_size * 7 / 8;
pool->active = pool_active;
}
@@ -189,12 +158,11 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
- if(!pool->free_map) {
+ if (!pool->free_map)
return -1;
- }
pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
- if(!pool->dma_addr) {
+ if (!pool->dma_addr) {
kfree(pool->free_map);
pool->free_map = NULL;
return -1;
@@ -202,7 +170,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
- if(!pool->skbuff) {
+ if (!pool->skbuff) {
kfree(pool->dma_addr);
pool->dma_addr = NULL;
@@ -213,9 +181,8 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
- for(i = 0; i < pool->size; ++i) {
+ for (i = 0; i < pool->size; ++i)
pool->free_map[i] = i;
- }
atomic_set(&pool->available, 0);
pool->producer_index = 0;
@@ -224,10 +191,19 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
return 0;
}
+static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
+{
+ unsigned long offset;
+
+ for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
+ asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
+}
+
/* replenish the buffers for a pool. note that we don't need to
* skb_reserve these since they are used for incoming...
*/
-static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
+static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
+ struct ibmveth_buff_pool *pool)
{
u32 i;
u32 count = pool->size - atomic_read(&pool->available);
@@ -240,23 +216,26 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
mb();
- for(i = 0; i < count; ++i) {
+ for (i = 0; i < count; ++i) {
union ibmveth_buf_desc desc;
- skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
+ skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
- if(!skb) {
- ibmveth_debug_printk("replenish: unable to allocate skb\n");
+ if (!skb) {
+ netdev_dbg(adapter->netdev,
+ "replenish: unable to allocate skb\n");
adapter->replenish_no_mem++;
break;
}
free_index = pool->consumer_index;
- pool->consumer_index = (pool->consumer_index + 1) % pool->size;
+ pool->consumer_index++;
+ if (pool->consumer_index >= pool->size)
+ pool->consumer_index = 0;
index = pool->free_map[free_index];
- ibmveth_assert(index != IBM_VETH_INVALID_MAP);
- ibmveth_assert(pool->skbuff[index] == NULL);
+ BUG_ON(index == IBM_VETH_INVALID_MAP);
+ BUG_ON(pool->skbuff[index] != NULL);
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
pool->buff_size, DMA_FROM_DEVICE);
@@ -269,16 +248,23 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
pool->skbuff[index] = skb;
correlator = ((u64)pool->index << 32) | index;
- *(u64*)skb->data = correlator;
+ *(u64 *)skb->data = correlator;
desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
desc.fields.address = dma_addr;
- lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
+ if (rx_flush) {
+ unsigned int len = min(pool->buff_size,
+ adapter->netdev->mtu +
+ IBMVETH_BUFF_OH);
+ ibmveth_flush_buffer(skb->data, len);
+ }
+ lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
+ desc.desc);
- if (lpar_rc != H_SUCCESS)
+ if (lpar_rc != H_SUCCESS) {
goto failure;
- else {
+ } else {
buffers_added++;
adapter->replenish_add_buff_success++;
}
@@ -313,26 +299,31 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
adapter->replenish_task_cycles++;
- for (i = (IbmVethNumBufferPools - 1); i >= 0; i--)
- if(adapter->rx_buff_pool[i].active)
- ibmveth_replenish_buffer_pool(adapter,
- &adapter->rx_buff_pool[i]);
+ for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
+ struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
- adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
+ if (pool->active &&
+ (atomic_read(&pool->available) < pool->threshold))
+ ibmveth_replenish_buffer_pool(adapter, pool);
+ }
+
+ adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
+ 4096 - 8);
}
/* empty and free ana buffer pool - also used to do cleanup in error paths */
-static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
+static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
+ struct ibmveth_buff_pool *pool)
{
int i;
kfree(pool->free_map);
pool->free_map = NULL;
- if(pool->skbuff && pool->dma_addr) {
- for(i = 0; i < pool->size; ++i) {
+ if (pool->skbuff && pool->dma_addr) {
+ for (i = 0; i < pool->size; ++i) {
struct sk_buff *skb = pool->skbuff[i];
- if(skb) {
+ if (skb) {
dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[i],
pool->buff_size,
@@ -343,31 +334,32 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
}
}
- if(pool->dma_addr) {
+ if (pool->dma_addr) {
kfree(pool->dma_addr);
pool->dma_addr = NULL;
}
- if(pool->skbuff) {
+ if (pool->skbuff) {
kfree(pool->skbuff);
pool->skbuff = NULL;
}
}
/* remove a buffer from a pool */
-static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator)
+static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
+ u64 correlator)
{
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
unsigned int free_index;
struct sk_buff *skb;
- ibmveth_assert(pool < IbmVethNumBufferPools);
- ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
skb = adapter->rx_buff_pool[pool].skbuff[index];
- ibmveth_assert(skb != NULL);
+ BUG_ON(skb == NULL);
adapter->rx_buff_pool[pool].skbuff[index] = NULL;
@@ -377,9 +369,10 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64
DMA_FROM_DEVICE);
free_index = adapter->rx_buff_pool[pool].producer_index;
- adapter->rx_buff_pool[pool].producer_index
- = (adapter->rx_buff_pool[pool].producer_index + 1)
- % adapter->rx_buff_pool[pool].size;
+ adapter->rx_buff_pool[pool].producer_index++;
+ if (adapter->rx_buff_pool[pool].producer_index >=
+ adapter->rx_buff_pool[pool].size)
+ adapter->rx_buff_pool[pool].producer_index = 0;
adapter->rx_buff_pool[pool].free_map[free_index] = index;
mb();
@@ -394,8 +387,8 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
- ibmveth_assert(pool < IbmVethNumBufferPools);
- ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
return adapter->rx_buff_pool[pool].skbuff[index];
}
@@ -410,10 +403,10 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
union ibmveth_buf_desc desc;
unsigned long lpar_rc;
- ibmveth_assert(pool < IbmVethNumBufferPools);
- ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
- if(!adapter->rx_buff_pool[pool].active) {
+ if (!adapter->rx_buff_pool[pool].active) {
ibmveth_rxq_harvest_buffer(adapter);
ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
return;
@@ -425,12 +418,13 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
- if(lpar_rc != H_SUCCESS) {
- ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
+ "during recycle rc=%ld", lpar_rc);
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
}
- if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
+ if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
adapter->rx_queue.index = 0;
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
}
@@ -440,7 +434,7 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
{
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
- if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
+ if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
adapter->rx_queue.index = 0;
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
}
@@ -451,7 +445,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
int i;
struct device *dev = &adapter->vdev->dev;
- if(adapter->buffer_list_addr != NULL) {
+ if (adapter->buffer_list_addr != NULL) {
if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
@@ -461,7 +455,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
adapter->buffer_list_addr = NULL;
}
- if(adapter->filter_list_addr != NULL) {
+ if (adapter->filter_list_addr != NULL) {
if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
dma_unmap_single(dev, adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
@@ -471,7 +465,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
adapter->filter_list_addr = NULL;
}
- if(adapter->rx_queue.queue_addr != NULL) {
+ if (adapter->rx_queue.queue_addr != NULL) {
if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
dma_unmap_single(dev,
adapter->rx_queue.queue_dma,
@@ -483,7 +477,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
adapter->rx_queue.queue_addr = NULL;
}
- for(i = 0; i<IbmVethNumBufferPools; i++)
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
if (adapter->rx_buff_pool[i].active)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
@@ -506,9 +500,11 @@ static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
{
int rc, try_again = 1;
- /* After a kexec the adapter will still be open, so our attempt to
- * open it will fail. So if we get a failure we free the adapter and
- * try again, but only once. */
+ /*
+ * After a kexec the adapter will still be open, so our attempt to
+ * open it will fail. So if we get a failure we free the adapter and
+ * try again, but only once.
+ */
retry:
rc = h_register_logical_lan(adapter->vdev->unit_address,
adapter->buffer_list_dma, rxq_desc.desc,
@@ -537,28 +533,31 @@ static int ibmveth_open(struct net_device *netdev)
int i;
struct device *dev;
- ibmveth_debug_printk("open starting\n");
+ netdev_dbg(netdev, "open starting\n");
napi_enable(&adapter->napi);
- for(i = 0; i<IbmVethNumBufferPools; i++)
+ for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
rxq_entries += adapter->rx_buff_pool[i].size;
adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
- if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
- ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
+ if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
+ netdev_err(netdev, "unable to allocate filter or buffer list "
+ "pages\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
return -ENOMEM;
}
- adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
- adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);
+ adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
+ rxq_entries;
+ adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
+ GFP_KERNEL);
- if(!adapter->rx_queue.queue_addr) {
- ibmveth_error_printk("unable to allocate rx queue pages\n");
+ if (!adapter->rx_queue.queue_addr) {
+ netdev_err(netdev, "unable to allocate rx queue pages\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
return -ENOMEM;
@@ -577,7 +576,8 @@ static int ibmveth_open(struct net_device *netdev)
if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
(dma_mapping_error(dev, adapter->filter_list_dma)) ||
(dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
- ibmveth_error_printk("unable to map filter or buffer list pages\n");
+ netdev_err(netdev, "unable to map filter or buffer list "
+ "pages\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
return -ENOMEM;
@@ -590,20 +590,23 @@ static int ibmveth_open(struct net_device *netdev)
memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
mac_address = mac_address >> 16;
- rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len;
+ rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
+ adapter->rx_queue.queue_len;
rxq_desc.fields.address = adapter->rx_queue.queue_dma;
- ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
- ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
- ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
+ netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
+ netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
+ netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
- if(lpar_rc != H_SUCCESS) {
- ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
- ibmveth_error_printk("buffer TCE:0x%llx filter TCE:0x%llx rxq desc:0x%llx MAC:0x%llx\n",
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
+ lpar_rc);
+ netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
+ "desc:0x%llx MAC:0x%llx\n",
adapter->buffer_list_dma,
adapter->filter_list_dma,
rxq_desc.desc,
@@ -613,11 +616,11 @@ static int ibmveth_open(struct net_device *netdev)
return -ENONET;
}
- for(i = 0; i<IbmVethNumBufferPools; i++) {
- if(!adapter->rx_buff_pool[i].active)
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ if (!adapter->rx_buff_pool[i].active)
continue;
if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
- ibmveth_error_printk("unable to alloc pool\n");
+ netdev_err(netdev, "unable to alloc pool\n");
adapter->rx_buff_pool[i].active = 0;
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
@@ -625,9 +628,12 @@ static int ibmveth_open(struct net_device *netdev)
}
}
- ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
- if((rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
- ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
+ netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
+ rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
+ netdev);
+ if (rc != 0) {
+ netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
+ netdev->irq, rc);
do {
rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
@@ -640,7 +646,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->bounce_buffer =
kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
if (!adapter->bounce_buffer) {
- ibmveth_error_printk("unable to allocate bounce buffer\n");
+ netdev_err(netdev, "unable to allocate bounce buffer\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
return -ENOMEM;
@@ -649,18 +655,18 @@ static int ibmveth_open(struct net_device *netdev)
dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- ibmveth_error_printk("unable to map bounce buffer\n");
+ netdev_err(netdev, "unable to map bounce buffer\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
return -ENOMEM;
}
- ibmveth_debug_printk("initial replenish cycle\n");
+ netdev_dbg(netdev, "initial replenish cycle\n");
ibmveth_interrupt(netdev->irq, netdev);
netif_start_queue(netdev);
- ibmveth_debug_printk("open complete\n");
+ netdev_dbg(netdev, "open complete\n");
return 0;
}
@@ -670,7 +676,7 @@ static int ibmveth_close(struct net_device *netdev)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
long lpar_rc;
- ibmveth_debug_printk("close starting\n");
+ netdev_dbg(netdev, "close starting\n");
napi_disable(&adapter->napi);
@@ -683,26 +689,29 @@ static int ibmveth_close(struct net_device *netdev)
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
- if(lpar_rc != H_SUCCESS)
- {
- ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
- lpar_rc);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_free_logical_lan failed with %lx, "
+ "continuing with close\n", lpar_rc);
}
free_irq(netdev->irq, netdev);
- adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
+ adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
+ 4096 - 8);
ibmveth_cleanup(adapter);
- ibmveth_debug_printk("close complete\n");
+ netdev_dbg(netdev, "close complete\n");
return 0;
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
- cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
- cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE);
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE);
+ cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
+ ADVERTISED_FIBRE);
cmd->speed = SPEED_1000;
cmd->duplex = DUPLEX_FULL;
cmd->port = PORT_FIBRE;
@@ -714,12 +723,16 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
-static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) {
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
- strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1);
+ strncpy(info->version, ibmveth_driver_version,
+ sizeof(info->version) - 1);
}
-static u32 netdev_get_link(struct net_device *dev) {
+static u32 netdev_get_link(struct net_device *dev)
+{
return 1;
}
@@ -727,18 +740,20 @@ static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
- if (data)
+ if (data) {
adapter->rx_csum = 1;
- else {
+ } else {
/*
- * Since the ibmveth firmware interface does not have the concept of
- * separate tx/rx checksum offload enable, if rx checksum is disabled
- * we also have to disable tx checksum offload. Once we disable rx
- * checksum offload, we are no longer allowed to send tx buffers that
- * are not properly checksummed.
+ * Since the ibmveth firmware interface does not have the
+ * concept of separate tx/rx checksum offload enable, if rx
+ * checksum is disabled we also have to disable tx checksum
+ * offload. Once we disable rx checksum offload, we are no
+ * longer allowed to send tx buffers that are not properly
+ * checksummed.
*/
adapter->rx_csum = 0;
dev->features &= ~NETIF_F_IP_CSUM;
+ dev->features &= ~NETIF_F_IPV6_CSUM;
}
}
@@ -747,10 +762,15 @@ static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
struct ibmveth_adapter *adapter = netdev_priv(dev);
if (data) {
- dev->features |= NETIF_F_IP_CSUM;
+ if (adapter->fw_ipv4_csum_support)
+ dev->features |= NETIF_F_IP_CSUM;
+ if (adapter->fw_ipv6_csum_support)
+ dev->features |= NETIF_F_IPV6_CSUM;
adapter->rx_csum = 1;
- } else
+ } else {
dev->features &= ~NETIF_F_IP_CSUM;
+ dev->features &= ~NETIF_F_IPV6_CSUM;
+ }
}
static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
@@ -758,7 +778,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
unsigned long set_attr, clr_attr, ret_attr;
- long ret;
+ unsigned long set_attr6, clr_attr6;
+ long ret, ret6;
int rc1 = 0, rc2 = 0;
int restart = 0;
@@ -772,10 +793,13 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
set_attr = 0;
clr_attr = 0;
- if (data)
+ if (data) {
set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
- else
+ set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
+ } else {
clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
+ clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
+ }
ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
@@ -786,18 +810,39 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
set_attr, &ret_attr);
if (ret != H_SUCCESS) {
- rc1 = -EIO;
- ibmveth_error_printk("unable to change checksum offload settings."
- " %d rc=%ld\n", data, ret);
+ netdev_err(dev, "unable to change IPv4 checksum "
+ "offload settings. %d rc=%ld\n",
+ data, ret);
ret = h_illan_attributes(adapter->vdev->unit_address,
set_attr, clr_attr, &ret_attr);
+ } else {
+ adapter->fw_ipv4_csum_support = data;
+ }
+
+ ret6 = h_illan_attributes(adapter->vdev->unit_address,
+ clr_attr6, set_attr6, &ret_attr);
+
+ if (ret6 != H_SUCCESS) {
+ netdev_err(dev, "unable to change IPv6 checksum "
+ "offload settings. %d rc=%ld\n",
+ data, ret);
+
+ ret = h_illan_attributes(adapter->vdev->unit_address,
+ set_attr6, clr_attr6,
+ &ret_attr);
} else
+ adapter->fw_ipv6_csum_support = data;
+
+ if (ret == H_SUCCESS || ret6 == H_SUCCESS)
done(dev, data);
+ else
+ rc1 = -EIO;
} else {
rc1 = -EIO;
- ibmveth_error_printk("unable to change checksum offload settings."
- " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr);
+ netdev_err(dev, "unable to change checksum offload settings."
+ " %d rc=%ld ret_attr=%lx\n", data, ret,
+ ret_attr);
}
if (restart)
@@ -821,13 +866,14 @@ static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
struct ibmveth_adapter *adapter = netdev_priv(dev);
int rc = 0;
- if (data && (dev->features & NETIF_F_IP_CSUM))
+ if (data && (dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
return 0;
- if (!data && !(dev->features & NETIF_F_IP_CSUM))
+ if (!data && !(dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
return 0;
if (data && !adapter->rx_csum)
- rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags);
+ rc = ibmveth_set_csum_offload(dev, data,
+ ibmveth_set_tx_csum_flags);
else
ibmveth_set_tx_csum_flags(dev, data);
@@ -881,6 +927,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.get_strings = ibmveth_get_strings,
.get_sset_count = ibmveth_get_sset_count,
.get_ethtool_stats = ibmveth_get_ethtool_stats,
+ .set_sg = ethtool_op_set_sg,
};
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -890,129 +937,216 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
-static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+static int ibmveth_send(struct ibmveth_adapter *adapter,
+ union ibmveth_buf_desc *descs)
{
- struct ibmveth_adapter *adapter = netdev_priv(netdev);
- union ibmveth_buf_desc desc;
- unsigned long lpar_rc;
unsigned long correlator;
- unsigned long flags;
unsigned int retry_count;
- unsigned int tx_dropped = 0;
- unsigned int tx_bytes = 0;
- unsigned int tx_packets = 0;
- unsigned int tx_send_failed = 0;
- unsigned int tx_map_failed = 0;
- int used_bounce = 0;
- unsigned long data_dma_addr;
+ unsigned long ret;
+
+ /*
+ * The retry count sets a maximum for the number of broadcast and
+ * multicast destinations within the system.
+ */
+ retry_count = 1024;
+ correlator = 0;
+ do {
+ ret = h_send_logical_lan(adapter->vdev->unit_address,
+ descs[0].desc, descs[1].desc,
+ descs[2].desc, descs[3].desc,
+ descs[4].desc, descs[5].desc,
+ correlator, &correlator);
+ } while ((ret == H_BUSY) && (retry_count--));
- desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
+ if (ret != H_SUCCESS && ret != H_DROPPED) {
+ netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
+ "with rc=%ld\n", ret);
+ return 1;
+ }
+ return 0;
+}
+
+static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned int desc_flags;
+ union ibmveth_buf_desc descs[6];
+ int last, i;
+ int force_bounce = 0;
+
+ /*
+ * veth handles a maximum of 6 segments including the header, so
+ * we have to linearize the skb if there are more than this.
+ */
+ if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+
+ /* veth can't checksum offload UDP */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
- ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
- ibmveth_error_printk("tx: failed to checksum packet\n");
- tx_dropped++;
+ ((skb->protocol == htons(ETH_P_IP) &&
+ ip_hdr(skb)->protocol != IPPROTO_TCP) ||
+ (skb->protocol == htons(ETH_P_IPV6) &&
+ ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
+ skb_checksum_help(skb)) {
+
+ netdev_err(netdev, "tx: failed to checksum packet\n");
+ netdev->stats.tx_dropped++;
goto out;
}
+ desc_flags = IBMVETH_BUF_VALID;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- unsigned char *buf = skb_transport_header(skb) + skb->csum_offset;
+ unsigned char *buf = skb_transport_header(skb) +
+ skb->csum_offset;
- desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
+ desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
/* Need to zero out the checksum */
buf[0] = 0;
buf[1] = 0;
}
- data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- ibmveth_error_printk("tx: unable to map xmit buffer\n");
+retry_bounce:
+ memset(descs, 0, sizeof(descs));
+
+ /*
+ * If a linear packet is below the rx threshold then
+ * copy it into the static bounce buffer. This avoids the
+ * cost of a TCE insert and remove.
+ */
+ if (force_bounce || (!skb_is_nonlinear(skb) &&
+ (skb->len < tx_copybreak))) {
skb_copy_from_linear_data(skb, adapter->bounce_buffer,
skb->len);
- desc.fields.address = adapter->bounce_buffer_dma;
- tx_map_failed++;
- used_bounce = 1;
- wmb();
- } else
- desc.fields.address = data_dma_addr;
-
- /* send the frame. Arbitrarily set retrycount to 1024 */
- correlator = 0;
- retry_count = 1024;
- do {
- lpar_rc = h_send_logical_lan(adapter->vdev->unit_address,
- desc.desc, 0, 0, 0, 0, 0,
- correlator, &correlator);
- } while ((lpar_rc == H_BUSY) && (retry_count--));
-
- if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
- ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
- ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n",
- (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0,
- skb->len, desc.fields.address);
- tx_send_failed++;
- tx_dropped++;
- } else {
- tx_packets++;
- tx_bytes += skb->len;
- netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+
+ descs[0].fields.flags_len = desc_flags | skb->len;
+ descs[0].fields.address = adapter->bounce_buffer_dma;
+
+ if (ibmveth_send(adapter, descs)) {
+ adapter->tx_send_failed++;
+ netdev->stats.tx_dropped++;
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
+ goto out;
}
- if (!used_bounce)
- dma_unmap_single(&adapter->vdev->dev, data_dma_addr,
- skb->len, DMA_TO_DEVICE);
+ /* Map the header */
+ descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
+ goto map_failed;
-out: spin_lock_irqsave(&adapter->stats_lock, flags);
- netdev->stats.tx_dropped += tx_dropped;
- netdev->stats.tx_bytes += tx_bytes;
- netdev->stats.tx_packets += tx_packets;
- adapter->tx_send_failed += tx_send_failed;
- adapter->tx_map_failed += tx_map_failed;
- spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
+ /* Map the frags */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ unsigned long dma_addr;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
+ frag->page_offset, frag->size,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ goto map_failed_frags;
+
+ descs[i+1].fields.flags_len = desc_flags | frag->size;
+ descs[i+1].fields.address = dma_addr;
+ }
+
+ if (ibmveth_send(adapter, descs)) {
+ adapter->tx_send_failed++;
+ netdev->stats.tx_dropped++;
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
+ descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+out:
dev_kfree_skb(skb);
return NETDEV_TX_OK;
+
+map_failed_frags:
+ last = i+1;
+ for (i = 0; i < last; i++)
+ dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
+ descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+map_failed:
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ netdev_err(netdev, "tx: unable to map xmit buffer\n");
+ adapter->tx_map_failed++;
+ skb_linearize(skb);
+ force_bounce = 1;
+ goto retry_bounce;
}
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
- struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi);
+ struct ibmveth_adapter *adapter =
+ container_of(napi, struct ibmveth_adapter, napi);
struct net_device *netdev = adapter->netdev;
int frames_processed = 0;
unsigned long lpar_rc;
- restart_poll:
+restart_poll:
do {
- struct sk_buff *skb;
-
if (!ibmveth_rxq_pending_buffer(adapter))
break;
- rmb();
+ smp_rmb();
if (!ibmveth_rxq_buffer_valid(adapter)) {
wmb(); /* suggested by larson1 */
adapter->rx_invalid_buffer++;
- ibmveth_debug_printk("recycling invalid buffer\n");
+ netdev_dbg(netdev, "recycling invalid buffer\n");
ibmveth_rxq_recycle_buffer(adapter);
} else {
+ struct sk_buff *skb, *new_skb;
int length = ibmveth_rxq_frame_length(adapter);
int offset = ibmveth_rxq_frame_offset(adapter);
int csum_good = ibmveth_rxq_csum_good(adapter);
skb = ibmveth_rxq_get_buffer(adapter);
- if (csum_good)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- ibmveth_rxq_harvest_buffer(adapter);
+ new_skb = NULL;
+ if (length < rx_copybreak)
+ new_skb = netdev_alloc_skb(netdev, length);
+
+ if (new_skb) {
+ skb_copy_to_linear_data(new_skb,
+ skb->data + offset,
+ length);
+ if (rx_flush)
+ ibmveth_flush_buffer(skb->data,
+ length + offset);
+ skb = new_skb;
+ ibmveth_rxq_recycle_buffer(adapter);
+ } else {
+ ibmveth_rxq_harvest_buffer(adapter);
+ skb_reserve(skb, offset);
+ }
- skb_reserve(skb, offset);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev);
+ if (csum_good)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
netif_receive_skb(skb); /* send it up */
netdev->stats.rx_packets++;
@@ -1030,7 +1164,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_ENABLE);
- ibmveth_assert(lpar_rc == H_SUCCESS);
+ BUG_ON(lpar_rc != H_SUCCESS);
napi_complete(napi);
@@ -1054,7 +1188,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
if (napi_schedule_prep(&adapter->napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
- ibmveth_assert(lpar_rc == H_SUCCESS);
+ BUG_ON(lpar_rc != H_SUCCESS);
__napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
@@ -1071,8 +1205,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
IbmVethMcastEnableRecv |
IbmVethMcastDisableFiltering,
0);
- if(lpar_rc != H_SUCCESS) {
- ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "entering promisc mode\n", lpar_rc);
}
} else {
struct netdev_hw_addr *ha;
@@ -1082,19 +1217,23 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
IbmVethMcastDisableFiltering |
IbmVethMcastClearFilterTable,
0);
- if(lpar_rc != H_SUCCESS) {
- ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "attempting to clear filter table\n",
+ lpar_rc);
}
/* add the addresses to the filter table */
netdev_for_each_mc_addr(ha, netdev) {
- // add the multicast address to the filter table
+ /* add the multicast address to the filter table */
unsigned long mcast_addr = 0;
memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
- if(lpar_rc != H_SUCCESS) {
- ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld "
+ "when adding an entry to the filter "
+ "table\n", lpar_rc);
}
}
@@ -1102,8 +1241,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableFiltering,
0);
- if(lpar_rc != H_SUCCESS) {
- ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "enabling filtering\n", lpar_rc);
}
}
}
@@ -1113,49 +1253,47 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
struct ibmveth_adapter *adapter = netdev_priv(dev);
struct vio_dev *viodev = adapter->vdev;
int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
- int i;
+ int i, rc;
+ int need_restart = 0;
- if (new_mtu < IBMVETH_MAX_MTU)
+ if (new_mtu < IBMVETH_MIN_MTU)
return -EINVAL;
- for (i = 0; i < IbmVethNumBufferPools; i++)
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
break;
- if (i == IbmVethNumBufferPools)
+ if (i == IBMVETH_NUM_BUFF_POOLS)
return -EINVAL;
/* Deactivate all the buffer pools so that the next loop can activate
only the buffer pools necessary to hold the new MTU */
- for (i = 0; i < IbmVethNumBufferPools; i++)
- if (adapter->rx_buff_pool[i].active) {
- ibmveth_free_buffer_pool(adapter,
- &adapter->rx_buff_pool[i]);
- adapter->rx_buff_pool[i].active = 0;
- }
+ if (netif_running(adapter->netdev)) {
+ need_restart = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(adapter->netdev);
+ adapter->pool_config = 0;
+ }
/* Look for an active buffer pool that can hold the new MTU */
- for(i = 0; i<IbmVethNumBufferPools; i++) {
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
adapter->rx_buff_pool[i].active = 1;
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
- if (netif_running(adapter->netdev)) {
- adapter->pool_config = 1;
- ibmveth_close(adapter->netdev);
- adapter->pool_config = 0;
- dev->mtu = new_mtu;
- vio_cmo_set_dev_desired(viodev,
- ibmveth_get_desired_dma
- (viodev));
- return ibmveth_open(adapter->netdev);
- }
dev->mtu = new_mtu;
vio_cmo_set_dev_desired(viodev,
ibmveth_get_desired_dma
(viodev));
+ if (need_restart) {
+ return ibmveth_open(adapter->netdev);
+ }
return 0;
}
}
+
+ if (need_restart && (rc = ibmveth_open(adapter->netdev)))
+ return rc;
+
return -EINVAL;
}
@@ -1192,7 +1330,7 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
ret += IOMMU_PAGE_ALIGN(netdev->mtu);
- for (i = 0; i < IbmVethNumBufferPools; i++) {
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
/* add the size of the active receive buffers */
if (adapter->rx_buff_pool[i].active)
ret +=
@@ -1221,41 +1359,36 @@ static const struct net_device_ops ibmveth_netdev_ops = {
#endif
};
-static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
+static int __devinit ibmveth_probe(struct vio_dev *dev,
+ const struct vio_device_id *id)
{
int rc, i;
- long ret;
struct net_device *netdev;
struct ibmveth_adapter *adapter;
- unsigned long set_attr, ret_attr;
-
unsigned char *mac_addr_p;
unsigned int *mcastFilterSize_p;
+ dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
+ dev->unit_address);
- ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
- dev->unit_address);
-
- mac_addr_p = (unsigned char *) vio_get_attribute(dev,
- VETH_MAC_ADDR, NULL);
- if(!mac_addr_p) {
- printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
- "attribute\n", __FILE__, __LINE__);
- return 0;
+ mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
+ NULL);
+ if (!mac_addr_p) {
+ dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
+ return -EINVAL;
}
- mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev,
+ mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
VETH_MCAST_FILTER_SIZE, NULL);
- if(!mcastFilterSize_p) {
- printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
- "VETH_MCAST_FILTER_SIZE attribute\n",
- __FILE__, __LINE__);
- return 0;
+ if (!mcastFilterSize_p) {
+ dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
+ "attribute\n");
+ return -EINVAL;
}
netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
- if(!netdev)
+ if (!netdev)
return -ENOMEM;
adapter = netdev_priv(netdev);
@@ -1263,19 +1396,19 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
adapter->vdev = dev;
adapter->netdev = netdev;
- adapter->mcastFilterSize= *mcastFilterSize_p;
+ adapter->mcastFilterSize = *mcastFilterSize_p;
adapter->pool_config = 0;
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
- /* Some older boxes running PHYP non-natively have an OF that
- returns a 8-byte local-mac-address field (and the first
- 2 bytes have to be ignored) while newer boxes' OF return
- a 6-byte field. Note that IEEE 1275 specifies that
- local-mac-address must be a 6-byte field.
- The RPA doc specifies that the first byte must be 10b, so
- we'll just look for it to solve this 8 vs. 6 byte field issue */
-
+ /*
+ * Some older boxes running PHYP non-natively have an OF that returns
+ * a 8-byte local-mac-address field (and the first 2 bytes have to be
+ * ignored) while newer boxes' OF return a 6-byte field. Note that
+ * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
+ * The RPA doc specifies that the first byte must be 10b, so we'll
+ * just look for it to solve this 8 vs. 6 byte field issue
+ */
if ((*mac_addr_p & 0x3) != 0x02)
mac_addr_p += 2;
@@ -1286,12 +1419,11 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- netdev->features |= NETIF_F_LLTX;
- spin_lock_init(&adapter->stats_lock);
+ netdev->features |= NETIF_F_SG;
memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
- for(i = 0; i<IbmVethNumBufferPools; i++) {
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
int error;
@@ -1304,41 +1436,25 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
kobject_uevent(kobj, KOBJ_ADD);
}
- ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
+ netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
adapter->buffer_list_dma = DMA_ERROR_CODE;
adapter->filter_list_dma = DMA_ERROR_CODE;
adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
- ibmveth_debug_printk("registering netdev...\n");
-
- ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr);
+ netdev_dbg(netdev, "registering netdev...\n");
- if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
- !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
- (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
- set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
-
- ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr);
-
- if (ret == H_SUCCESS) {
- adapter->rx_csum = 1;
- netdev->features |= NETIF_F_IP_CSUM;
- } else
- ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr);
- }
+ ibmveth_set_csum_offload(netdev, 1, ibmveth_set_tx_csum_flags);
rc = register_netdev(netdev);
- if(rc) {
- ibmveth_debug_printk("failed to register netdev rc=%d\n", rc);
+ if (rc) {
+ netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
free_netdev(netdev);
return rc;
}
- ibmveth_debug_printk("registered\n");
-
- ibmveth_proc_register_adapter(adapter);
+ netdev_dbg(netdev, "registered\n");
return 0;
}
@@ -1349,114 +1465,23 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
int i;
- for(i = 0; i<IbmVethNumBufferPools; i++)
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
kobject_put(&adapter->rx_buff_pool[i].kobj);
unregister_netdev(netdev);
- ibmveth_proc_unregister_adapter(adapter);
-
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
return 0;
}
-#ifdef CONFIG_PROC_FS
-static void ibmveth_proc_register_driver(void)
-{
- ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net);
- if (ibmveth_proc_dir) {
- }
-}
-
-static void ibmveth_proc_unregister_driver(void)
-{
- remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
-}
-
-static int ibmveth_show(struct seq_file *seq, void *v)
-{
- struct ibmveth_adapter *adapter = seq->private;
- char *current_mac = (char *) adapter->netdev->dev_addr;
- char *firmware_mac = (char *) &adapter->mac_addr;
-
- seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
-
- seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
- seq_printf(seq, "Current MAC: %pM\n", current_mac);
- seq_printf(seq, "Firmware MAC: %pM\n", firmware_mac);
-
- seq_printf(seq, "\nAdapter Statistics:\n");
- seq_printf(seq, " TX: vio_map_single failres: %lld\n", adapter->tx_map_failed);
- seq_printf(seq, " send failures: %lld\n", adapter->tx_send_failed);
- seq_printf(seq, " RX: replenish task cycles: %lld\n", adapter->replenish_task_cycles);
- seq_printf(seq, " alloc_skb_failures: %lld\n", adapter->replenish_no_mem);
- seq_printf(seq, " add buffer failures: %lld\n", adapter->replenish_add_buff_failure);
- seq_printf(seq, " invalid buffers: %lld\n", adapter->rx_invalid_buffer);
- seq_printf(seq, " no buffers: %lld\n", adapter->rx_no_buffer);
-
- return 0;
-}
-
-static int ibmveth_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ibmveth_show, PDE(inode)->data);
-}
-
-static const struct file_operations ibmveth_proc_fops = {
- .owner = THIS_MODULE,
- .open = ibmveth_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
-{
- struct proc_dir_entry *entry;
- if (ibmveth_proc_dir) {
- char u_addr[10];
- sprintf(u_addr, "%x", adapter->vdev->unit_address);
- entry = proc_create_data(u_addr, S_IFREG, ibmveth_proc_dir,
- &ibmveth_proc_fops, adapter);
- if (!entry)
- ibmveth_error_printk("Cannot create adapter proc entry");
- }
-}
-
-static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
-{
- if (ibmveth_proc_dir) {
- char u_addr[10];
- sprintf(u_addr, "%x", adapter->vdev->unit_address);
- remove_proc_entry(u_addr, ibmveth_proc_dir);
- }
-}
-
-#else /* CONFIG_PROC_FS */
-static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
-{
-}
-
-static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
-{
-}
-static void ibmveth_proc_register_driver(void)
-{
-}
-
-static void ibmveth_proc_unregister_driver(void)
-{
-}
-#endif /* CONFIG_PROC_FS */
-
static struct attribute veth_active_attr;
static struct attribute veth_num_attr;
static struct attribute veth_size_attr;
-static ssize_t veth_pool_show(struct kobject * kobj,
- struct attribute * attr, char * buf)
+static ssize_t veth_pool_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
{
struct ibmveth_buff_pool *pool = container_of(kobj,
struct ibmveth_buff_pool,
@@ -1471,8 +1496,8 @@ static ssize_t veth_pool_show(struct kobject * kobj,
return 0;
}
-static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
-const char * buf, size_t count)
+static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
{
struct ibmveth_buff_pool *pool = container_of(kobj,
struct ibmveth_buff_pool,
@@ -1486,8 +1511,9 @@ const char * buf, size_t count)
if (attr == &veth_active_attr) {
if (value && !pool->active) {
if (netif_running(netdev)) {
- if(ibmveth_alloc_buffer_pool(pool)) {
- ibmveth_error_printk("unable to alloc pool\n");
+ if (ibmveth_alloc_buffer_pool(pool)) {
+ netdev_err(netdev,
+ "unable to alloc pool\n");
return -ENOMEM;
}
pool->active = 1;
@@ -1496,14 +1522,15 @@ const char * buf, size_t count)
adapter->pool_config = 0;
if ((rc = ibmveth_open(netdev)))
return rc;
- } else
+ } else {
pool->active = 1;
+ }
} else if (!value && pool->active) {
int mtu = netdev->mtu + IBMVETH_BUFF_OH;
int i;
/* Make sure there is a buffer pool with buffers that
can hold a packet of the size of the MTU */
- for (i = 0; i < IbmVethNumBufferPools; i++) {
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
if (pool == &adapter->rx_buff_pool[i])
continue;
if (!adapter->rx_buff_pool[i].active)
@@ -1512,8 +1539,8 @@ const char * buf, size_t count)
break;
}
- if (i == IbmVethNumBufferPools) {
- ibmveth_error_printk("no active pool >= MTU\n");
+ if (i == IBMVETH_NUM_BUFF_POOLS) {
+ netdev_err(netdev, "no active pool >= MTU\n");
return -EPERM;
}
@@ -1528,9 +1555,9 @@ const char * buf, size_t count)
pool->active = 0;
}
} else if (attr == &veth_num_attr) {
- if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
+ if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
return -EINVAL;
- else {
+ } else {
if (netif_running(netdev)) {
adapter->pool_config = 1;
ibmveth_close(netdev);
@@ -1538,13 +1565,14 @@ const char * buf, size_t count)
pool->size = value;
if ((rc = ibmveth_open(netdev)))
return rc;
- } else
+ } else {
pool->size = value;
+ }
}
} else if (attr == &veth_size_attr) {
- if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
+ if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
return -EINVAL;
- else {
+ } else {
if (netif_running(netdev)) {
adapter->pool_config = 1;
ibmveth_close(netdev);
@@ -1552,8 +1580,9 @@ const char * buf, size_t count)
pool->buff_size = value;
if ((rc = ibmveth_open(netdev)))
return rc;
- } else
+ } else {
pool->buff_size = value;
+ }
}
}
@@ -1563,16 +1592,16 @@ const char * buf, size_t count)
}
-#define ATTR(_name, _mode) \
- struct attribute veth_##_name##_attr = { \
- .name = __stringify(_name), .mode = _mode, \
- };
+#define ATTR(_name, _mode) \
+ struct attribute veth_##_name##_attr = { \
+ .name = __stringify(_name), .mode = _mode, \
+ };
static ATTR(active, 0644);
static ATTR(num, 0644);
static ATTR(size, 0644);
-static struct attribute * veth_pool_attrs[] = {
+static struct attribute *veth_pool_attrs[] = {
&veth_active_attr,
&veth_num_attr,
&veth_size_attr,
@@ -1597,7 +1626,7 @@ static int ibmveth_resume(struct device *dev)
return 0;
}
-static struct vio_device_id ibmveth_device_table[] __devinitdata= {
+static struct vio_device_id ibmveth_device_table[] __devinitdata = {
{ "network", "IBM,l-lan"},
{ "", "" }
};
@@ -1621,9 +1650,8 @@ static struct vio_driver ibmveth_driver = {
static int __init ibmveth_module_init(void)
{
- ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version);
-
- ibmveth_proc_register_driver();
+ printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
+ ibmveth_driver_string, ibmveth_driver_version);
return vio_register_driver(&ibmveth_driver);
}
@@ -1631,7 +1659,6 @@ static int __init ibmveth_module_init(void)
static void __exit ibmveth_module_exit(void)
{
vio_unregister_driver(&ibmveth_driver);
- ibmveth_proc_unregister_driver();
}
module_init(ibmveth_module_init);
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index ec76ace66c6..43a794fab9f 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -1,26 +1,28 @@
-/**************************************************************************/
-/* */
-/* IBM eServer i/[Series Virtual Ethernet Device Driver */
-/* Copyright (C) 2003 IBM Corp. */
-/* Dave Larson (larson1@us.ibm.com) */
-/* Santiago Leon (santil@us.ibm.com) */
-/* */
-/* This program is free software; you can redistribute it and/or modify */
-/* it under the terms of the GNU General Public License as published by */
-/* the Free Software Foundation; either version 2 of the License, or */
-/* (at your option) any later version. */
-/* */
-/* This program is distributed in the hope that it will be useful, */
-/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
-/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
-/* GNU General Public License for more details. */
-/* */
-/* You should have received a copy of the GNU General Public License */
-/* along with this program; if not, write to the Free Software */
-/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
-/* USA */
-/* */
-/**************************************************************************/
+/*
+ * IBM Power Virtual Ethernet Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2003, 2010
+ *
+ * Authors: Dave Larson <larson1@us.ibm.com>
+ * Santiago Leon <santil@linux.vnet.ibm.com>
+ * Brian King <brking@linux.vnet.ibm.com>
+ * Robert Jennings <rcj@linux.vnet.ibm.com>
+ * Anton Blanchard <anton@au.ibm.com>
+ */
#ifndef _IBMVETH_H
#define _IBMVETH_H
@@ -92,17 +94,17 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define h_change_logical_lan_mac(ua, mac) \
plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
-#define IbmVethNumBufferPools 5
+#define IBMVETH_NUM_BUFF_POOLS 5
#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
-#define IBMVETH_MAX_MTU 68
+#define IBMVETH_MIN_MTU 68
#define IBMVETH_MAX_POOL_COUNT 4096
#define IBMVETH_BUFF_LIST_SIZE 4096
#define IBMVETH_FILT_LIST_SIZE 4096
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
-static int pool_count[] = { 256, 768, 256, 256, 256 };
+static int pool_count[] = { 256, 512, 256, 256, 256 };
static int pool_active[] = { 1, 1, 0, 0, 0};
#define IBM_VETH_INVALID_MAP ((u16)0xffff)
@@ -142,13 +144,15 @@ struct ibmveth_adapter {
void * filter_list_addr;
dma_addr_t buffer_list_dma;
dma_addr_t filter_list_dma;
- struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
+ struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
struct ibmveth_rx_q rx_queue;
int pool_config;
int rx_csum;
void *bounce_buffer;
dma_addr_t bounce_buffer_dma;
+ u64 fw_ipv6_csum_support;
+ u64 fw_ipv4_csum_support;
/* adapter specific stats */
u64 replenish_task_cycles;
u64 replenish_no_mem;
@@ -158,7 +162,6 @@ struct ibmveth_adapter {
u64 rx_no_buffer;
u64 tx_map_failed;
u64 tx_send_failed;
- spinlock_t stats_lock;
};
struct ibmveth_buf_desc_fields {
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 187622f1c81..bc183f5487c 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -132,6 +132,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_82580_SERDES:
case E1000_DEV_ID_82580_SGMII:
case E1000_DEV_ID_82580_COPPER_DUAL:
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
mac->type = e1000_82580;
break;
case E1000_DEV_ID_I350_COPPER:
@@ -282,10 +284,18 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
case M88E1111_I_PHY_ID:
phy->type = e1000_phy_m88;
phy->ops.get_phy_info = igb_get_phy_info_m88;
- phy->ops.get_cable_length = igb_get_cable_length_m88;
+
+ if (phy->id == I347AT4_E_PHY_ID ||
+ phy->id == M88E1112_E_PHY_ID)
+ phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
+ else
+ phy->ops.get_cable_length = igb_get_cable_length_m88;
+
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break;
case IGP03E1000_E_PHY_ID:
@@ -1058,7 +1068,11 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
}
switch (hw->phy.type) {
case e1000_phy_m88:
- ret_val = igb_copper_link_setup_m88(hw);
+ if (hw->phy.id == I347AT4_E_PHY_ID ||
+ hw->phy.id == M88E1112_E_PHY_ID)
+ ret_val = igb_copper_link_setup_m88_gen2(hw);
+ else
+ ret_val = igb_copper_link_setup_m88(hw);
break;
case e1000_phy_igp_3:
ret_val = igb_copper_link_setup_igp(hw);
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index bbd2ec308eb..62222796a8b 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -634,6 +634,8 @@
* E = External
*/
#define M88E1111_I_PHY_ID 0x01410CC0
+#define M88E1112_E_PHY_ID 0x01410C90
+#define I347AT4_E_PHY_ID 0x01410DC0
#define IGP03E1000_E_PHY_ID 0x02A80390
#define I82580_I_PHY_ID 0x015403A0
#define I350_I_PHY_ID 0x015403B0
@@ -702,6 +704,35 @@
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+/* Intel i347-AT4 Registers */
+
+#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
+#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
+#define I347AT4_PAGE_SELECT 0x16
+
+/* i347-AT4 Extended PHY Specific Control Register */
+
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
+#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
+#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
+#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
+#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
+#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
+#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
+#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
+#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
+#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
+
+/* i347-AT4 PHY Cable Diagnostics Control */
+#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
+
+/* Marvell 1112 only registers */
+#define M88E1112_VCT_DSP_DISTANCE 0x001A
+
/* M88EC018 Rev 2 specific DownShift settings */
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index cb8db78b1a0..c0b017f8d78 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,6 +54,8 @@ struct e1000_hw;
#define E1000_DEV_ID_82580_SERDES 0x1510
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_DH89XXCC_SGMII 0x0436
+#define E1000_DEV_ID_DH89XXCC_SERDES 0x0438
#define E1000_DEV_ID_I350_COPPER 0x1521
#define E1000_DEV_ID_I350_FIBER 0x1522
#define E1000_DEV_ID_I350_SERDES 0x1523
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index cf1f3230092..ddd036a7899 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -570,6 +570,89 @@ out:
}
/**
+ * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
+ * Also enables and sets the downshift parameters.
+ **/
+s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+ if (phy->reset_disable) {
+ ret_val = 0;
+ goto out;
+ }
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (phy->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ /* M88E1112 does not support this mode) */
+ if (phy->id != M88E1112_E_PHY_ID) {
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ }
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /*
+ * Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (phy->disable_polarity_correction == 1)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+ /* Enable downshift and setting it to X6 */
+ phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
+ phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
+ phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
+
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Commit the changes. */
+ ret_val = igb_phy_sw_reset(hw);
+ if (ret_val) {
+ hw_dbg("Error committing the PHY changes\n");
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
* igb_copper_link_setup_igp - Setup igp PHY's for copper link
* @hw: pointer to the HW structure
*
@@ -1124,18 +1207,25 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
goto out;
if (!link) {
- /*
- * We didn't get link.
- * Reset the DSP and cross our fingers.
- */
- ret_val = phy->ops.write_reg(hw,
- M88E1000_PHY_PAGE_SELECT,
- 0x001d);
- if (ret_val)
- goto out;
- ret_val = igb_phy_reset_dsp(hw);
- if (ret_val)
- goto out;
+ if (hw->phy.type != e1000_phy_m88 ||
+ hw->phy.id == I347AT4_E_PHY_ID ||
+ hw->phy.id == M88E1112_E_PHY_ID) {
+ hw_dbg("Link taking longer than expected.\n");
+ } else {
+
+ /*
+ * We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+ ret_val = phy->ops.write_reg(hw,
+ M88E1000_PHY_PAGE_SELECT,
+ 0x001d);
+ if (ret_val)
+ goto out;
+ ret_val = igb_phy_reset_dsp(hw);
+ if (ret_val)
+ goto out;
+ }
}
/* Try once more */
@@ -1145,6 +1235,11 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
goto out;
}
+ if (hw->phy.type != e1000_phy_m88 ||
+ hw->phy.id == I347AT4_E_PHY_ID ||
+ hw->phy.id == M88E1112_E_PHY_ID)
+ goto out;
+
ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
goto out;
@@ -1557,6 +1652,93 @@ out:
return ret_val;
}
+s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, phy_data2, index, default_page, is_cm;
+
+ switch (hw->phy.id) {
+ case I347AT4_E_PHY_ID:
+ /* Remember the original page select and set it to 7 */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
+ if (ret_val)
+ goto out;
+
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+ goto out;
+
+ is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+ /* Populate the phy structure with cable length in meters */
+ phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+
+ /* Reset the page selec to its original value */
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+ default_page);
+ if (ret_val)
+ goto out;
+ break;
+ case M88E1112_E_PHY_ID:
+ /* Remember the original page select and set it to 5 */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ phy->min_cable_length = e1000_m88_cable_length_table[index];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+ phy->cable_length = (phy->min_cable_length +
+ phy->max_cable_length) / 2;
+
+ /* Reset the page select to its original value */
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+ default_page);
+ if (ret_val)
+ goto out;
+
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
/**
* igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY
* @hw: pointer to the HW structure
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index 565a6dbb371..2cc117705a3 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -45,9 +45,11 @@ s32 igb_check_downshift(struct e1000_hw *hw);
s32 igb_check_reset_block(struct e1000_hw *hw);
s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
+s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw);
s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw);
s32 igb_get_cable_length_m88(struct e1000_hw *hw);
+s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw);
s32 igb_get_cable_length_igp_2(struct e1000_hw *hw);
s32 igb_get_phy_id(struct e1000_hw *hw);
s32 igb_get_phy_info_igp(struct e1000_hw *hw);
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 6e63d9a7fc7..44e0ff1494e 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -143,7 +143,7 @@ struct igb_buffer {
u16 next_to_watch;
unsigned int bytecount;
u16 gso_segs;
- union skb_shared_tx shtx;
+ u8 tx_flags;
u8 mapped_as_page;
};
/* RX */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 9b4e5895f5f..5b04eff2fd2 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -71,6 +71,8 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
@@ -986,7 +988,7 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
* Attempt to configure interrupts using the best available
* capabilities of the hardware and kernel.
**/
-static void igb_set_interrupt_capability(struct igb_adapter *adapter)
+static int igb_set_interrupt_capability(struct igb_adapter *adapter)
{
int err;
int numvecs, i;
@@ -1052,8 +1054,10 @@ msi_only:
if (!pci_enable_msi(adapter->pdev))
adapter->flags |= IGB_FLAG_HAS_MSI;
out:
- /* Notify the stack of the (possibly) reduced Tx Queue count. */
- adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
+ /* Notify the stack of the (possibly) reduced queue counts. */
+ netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
+ return netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
}
/**
@@ -1152,7 +1156,9 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int err;
- igb_set_interrupt_capability(adapter);
+ err = igb_set_interrupt_capability(adapter);
+ if (err)
+ return err;
err = igb_alloc_q_vectors(adapter);
if (err) {
@@ -1856,8 +1862,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_SG;
- if (pci_using_dac)
+ if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
if (hw->mac.type >= e1000_82576)
netdev->features |= NETIF_F_SCTP_CSUM;
@@ -1888,9 +1896,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
goto err_eeprom;
}
- setup_timer(&adapter->watchdog_timer, &igb_watchdog,
+ setup_timer(&adapter->watchdog_timer, igb_watchdog,
(unsigned long) adapter);
- setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
+ setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
(unsigned long) adapter);
INIT_WORK(&adapter->reset_task, igb_reset_task);
@@ -3954,7 +3962,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
}
tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags;
+ tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags;
/* multiply data chunks by size of headers */
tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
tx_ring->buffer_info[i].gso_segs = gso_segs;
@@ -4088,7 +4096,6 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
u32 tx_flags = 0;
u16 first;
u8 hdr_len = 0;
- union skb_shared_tx *shtx = skb_tx(skb);
/* need: 1 descriptor per page,
* + 2 desc gap to keep tail from touching head,
@@ -4100,8 +4107,8 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (unlikely(shtx->hardware)) {
- shtx->in_progress = 1;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGB_TX_FLAGS_TSTAMP;
}
@@ -4660,12 +4667,13 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
u32 vmolr = rd32(E1000_VMOLR(vf));
struct vf_data_storage *vf_data = &adapter->vf_data[vf];
- vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
+ vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
IGB_VF_FLAG_MULTI_PROMISC);
vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
vmolr |= E1000_VMOLR_MPME;
+ vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
*msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
} else {
/*
@@ -5319,7 +5327,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *bu
u64 regval;
/* if skb does not support hw timestamp or TX stamp not valid exit */
- if (likely(!buffer_info->shtx.hardware) ||
+ if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||
!(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
return;
@@ -5431,7 +5439,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_ring->total_packets += total_packets;
tx_ring->tx_stats.bytes += total_bytes;
tx_ring->tx_stats.packets += total_packets;
- return (count < tx_ring->count);
+ return count < tx_ring->count;
}
/**
@@ -5456,7 +5464,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
static inline void igb_rx_checksum_adv(struct igb_ring *ring,
u32 status_err, struct sk_buff *skb)
{
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* Ignore Checksum bit is set or checksum is disabled through ethtool */
if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
@@ -5500,7 +5508,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
* values must belong to this one here and therefore we don't need to
* compare any of the additional attributes stored for it.
*
- * If nothing went wrong, then it should have a skb_shared_tx that we
+ * If nothing went wrong, then it should have a shared tx_flags that we
* can turn into a skb_shared_hwtstamps.
*/
if (staterr & E1000_RXDADV_STAT_TSIP) {
@@ -6107,6 +6115,13 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
mac->autoneg = 0;
+ /* Fiber NIC's only allow 1000 Gbps Full duplex */
+ if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
+ spddplx != (SPEED_1000 + DUPLEX_FULL)) {
+ dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+ }
+
switch (spddplx) {
case SPEED_10 + DUPLEX_HALF:
mac->forced_speed_duplex = ADVERTISE_10_HALF;
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 103b3aa1afc..33add708bcb 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -153,7 +153,7 @@ static int igbvf_set_rx_csum(struct net_device *netdev, u32 data)
static u32 igbvf_get_tx_csum(struct net_device *netdev)
{
- return ((netdev->features & NETIF_F_IP_CSUM) != 0);
+ return (netdev->features & NETIF_F_IP_CSUM) != 0;
}
static int igbvf_set_tx_csum(struct net_device *netdev, u32 data)
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index c539f7c9c3e..265501348f3 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -103,7 +103,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
u32 status_err, struct sk_buff *skb)
{
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* Ignore Checksum bit is set or checksum is disabled through ethtool */
if ((status_err & E1000_RXD_STAT_IXSM) ||
@@ -845,7 +845,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
}
adapter->net_stats.tx_bytes += total_bytes;
adapter->net_stats.tx_packets += total_packets;
- return (count < tx_ring->count);
+ return count < tx_ring->count;
}
static irqreturn_t igbvf_msix_other(int irq, void *data)
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 0b3f6df5cff..c8ee8d28767 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -827,7 +827,7 @@ static void ioc3_mii_start(struct ioc3_private *ip)
{
ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
ip->ioc3_timer.data = (unsigned long) ip;
- ip->ioc3_timer.function = &ioc3_timer;
+ ip->ioc3_timer.function = ioc3_timer;
add_timer(&ip->ioc3_timer);
}
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 72e3d2da9e9..dc019809234 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -1213,7 +1213,7 @@ static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
skb_put(skb, framelen);
skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
netif_rx(skb);
sp->rx_buff[entry] = NULL;
}
@@ -1278,7 +1278,7 @@ static void ipg_nic_rx_with_end(struct net_device *dev,
jumbo->skb->protocol =
eth_type_trans(jumbo->skb, dev);
- jumbo->skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(jumbo->skb);
netif_rx(jumbo->skb);
}
}
@@ -1476,7 +1476,7 @@ static int ipg_nic_rx(struct net_device *dev)
* IP/TCP/UDP frame was received. Let the
* upper layer decide.
*/
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* Hand off frame for higher layer processing.
* The function netif_rx() releases the sk_buff
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 48bd5ec9f29..b626cccbccd 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -217,7 +217,7 @@ toshoboe_checkfcs (unsigned char *buf, int len)
for (i = 0; i < len; ++i)
fcs.value = irda_fcs (fcs.value, *(buf++));
- return (fcs.value == GOOD_FCS);
+ return fcs.value == GOOD_FCS;
}
/***********************************************************************/
@@ -759,7 +759,7 @@ toshoboe_maketestpacket (unsigned char *buf, int badcrc, int fir)
if (fir)
{
memset (buf, 0, TT_LEN);
- return (TT_LEN);
+ return TT_LEN;
}
fcs.value = INIT_FCS;
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 4441fa3389c..e4ea61944c2 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1124,11 +1124,11 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
* The actual image starts after the "STMP" keyword
* so forward to the firmware header tag
*/
- for (i = 0; (fw->data[i] != STIR421X_PATCH_END_OF_HDR_TAG) &&
- (i < fw->size); i++) ;
+ for (i = 0; i < fw->size && fw->data[i] !=
+ STIR421X_PATCH_END_OF_HDR_TAG; i++) ;
/* here we check for the out of buffer case */
- if ((STIR421X_PATCH_END_OF_HDR_TAG == fw->data[i]) &&
- (i < STIR421X_PATCH_CODE_OFFSET)) {
+ if (i < STIR421X_PATCH_CODE_OFFSET && i < fw->size &&
+ STIR421X_PATCH_END_OF_HDR_TAG == fw->data[i]) {
if (!memcmp(fw->data + i + 1, STIR421X_PATCH_STMP_TAG,
sizeof(STIR421X_PATCH_STMP_TAG) - 1)) {
@@ -1514,7 +1514,7 @@ static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_
IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
__func__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep);
- return((self->bulk_in_ep != 0) && (self->bulk_out_ep != 0));
+ return (self->bulk_in_ep != 0) && (self->bulk_out_ep != 0);
}
#ifdef IU_DUMP_CLASS_DESC
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 5b1036ac38d..74b20f179ce 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -734,7 +734,7 @@ static int mcs_net_open(struct net_device *netdev)
}
if (!mcs_setup_urbs(mcs))
- goto error3;
+ goto error3;
ret = mcs_receive_start(mcs);
if (ret)
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index e30cdbb1474..559fe854d76 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1348,7 +1348,7 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
outb(bank, iobase+BSR);
/* Make sure interrupt handlers keep the proper interrupt mask */
- return(ier);
+ return ier;
}
/*
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index edd5666f0ff..9e3f4f54281 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -748,7 +748,6 @@ static int __devinit sh_irda_probe(struct platform_device *pdev)
struct net_device *ndev;
struct sh_irda_self *self;
struct resource *res;
- char clk_name[8];
int irq;
int err = -ENOMEM;
@@ -775,10 +774,9 @@ static int __devinit sh_irda_probe(struct platform_device *pdev)
if (err)
goto err_mem_2;
- snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
- self->clk = clk_get(&pdev->dev, clk_name);
+ self->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(self->clk)) {
- dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ dev_err(&pdev->dev, "cannot get irda clock\n");
goto err_mem_3;
}
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 1b051dab7b2..39d6e6f15d4 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -336,7 +336,7 @@ static int sirdev_is_receiving(struct sir_dev *dev)
if (!atomic_read(&dev->enable_rx))
return 0;
- return (dev->rx_buff.state != OUTSIDE_FRAME);
+ return dev->rx_buff.state != OUTSIDE_FRAME;
}
int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 850ca1c5ee1..8c57bfb5f09 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -2051,7 +2051,7 @@ static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
*/
static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self)
{
- return (self->rx_buff.state != OUTSIDE_FRAME);
+ return self->rx_buff.state != OUTSIDE_FRAME;
}
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index e5698fa30a4..41c96b3d815 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -219,7 +219,7 @@ static inline int read_reg(struct stir_cb *stir, __u16 reg,
static inline int isfir(u32 speed)
{
- return (speed == 4000000);
+ return speed == 4000000;
}
/*
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index b0a6cd815be..67c0ad42d81 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -1182,12 +1182,13 @@ F01_E */
skb = dev_alloc_skb(len + 1 - 4);
/*
- * if frame size,data ptr,or skb ptr are wrong ,the get next
+ * if frame size, data ptr, or skb ptr are wrong, then get next
* entry.
*/
if ((skb == NULL) || (skb->data == NULL) ||
(self->rx_buff.data == NULL) || (len < 6)) {
self->netdev->stats.rx_dropped++;
+ kfree_skb(skb);
return TRUE;
}
skb_reserve(skb, 1);
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index 5a84822b5a4..c6f58482b76 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -238,7 +238,7 @@ static void WriteLPCReg(int iRegNum, unsigned char iVal)
static __u8 ReadReg(unsigned int BaseAddr, int iRegNum)
{
- return ((__u8) inb(BaseAddr + iRegNum));
+ return (__u8) inb(BaseAddr + iRegNum);
}
static void WriteReg(unsigned int BaseAddr, int iRegNum, unsigned char iVal)
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 3f24a1f3302..d66fab854bf 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -595,7 +595,7 @@ struct ring_descr {
static inline int rd_is_active(struct ring_descr *rd)
{
- return ((rd->hw->rd_status & RD_ACTIVE) != 0);
+ return (rd->hw->rd_status & RD_ACTIVE) != 0;
}
static inline void rd_activate(struct ring_descr *rd)
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index ba1de5973fb..8df645e78f2 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1524,7 +1524,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
netif_rx(skb); /* send it up */
dev->stats.rx_packets++;
dev->stats.rx_bytes += length;
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 813993f9c65..c982ab9f900 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -296,12 +296,12 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
eecd_reg = IXGB_READ_REG(hw, EECD);
if (eecd_reg & IXGB_EECD_DO)
- return (true);
+ return true;
udelay(50);
}
ASSERT(0);
- return (false);
+ return false;
}
/******************************************************************************
@@ -327,9 +327,9 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
checksum += ixgb_read_eeprom(hw, i);
if (checksum == (u16) EEPROM_SUM)
- return (true);
+ return true;
else
- return (false);
+ return false;
}
/******************************************************************************
@@ -439,7 +439,7 @@ ixgb_read_eeprom(struct ixgb_hw *hw,
/* End this read operation */
ixgb_standby_eeprom(hw);
- return (data);
+ return data;
}
/******************************************************************************
@@ -476,16 +476,16 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
/* clear the init_ctrl_reg_1 to signify that the cache is
* invalidated */
ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
- return (false);
+ return false;
}
if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
!= cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
pr_debug("Signature invalid\n");
- return(false);
+ return false;
}
- return(true);
+ return true;
}
/******************************************************************************
@@ -505,7 +505,7 @@ ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
== cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
- return (true);
+ return true;
} else {
return ixgb_get_eeprom_data(hw);
}
@@ -526,10 +526,10 @@ ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index)
if ((index < IXGB_EEPROM_SIZE) &&
(ixgb_check_and_get_eeprom_data(hw) == true)) {
- return(hw->eeprom[index]);
+ return hw->eeprom[index];
}
- return(0);
+ return 0;
}
/******************************************************************************
@@ -570,10 +570,10 @@ u32
ixgb_get_ee_pba_number(struct ixgb_hw *hw)
{
if (ixgb_check_and_get_eeprom_data(hw) == true)
- return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
- | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16));
+ return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
+ | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16);
- return(0);
+ return 0;
}
@@ -591,8 +591,8 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if (ixgb_check_and_get_eeprom_data(hw) == true)
- return (le16_to_cpu(ee_map->device_id));
+ return le16_to_cpu(ee_map->device_id);
- return (0);
+ return 0;
}
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index a4ed96caae6..43994c19999 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -410,7 +410,7 @@ static int
ixgb_get_eeprom_len(struct net_device *netdev)
{
/* return size in bytes */
- return (IXGB_EEPROM_SIZE << 1);
+ return IXGB_EEPROM_SIZE << 1;
}
static int
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 397acabccab..6cb2e42ff4c 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -167,7 +167,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
/* Clear any pending interrupt events. */
icr_reg = IXGB_READ_REG(hw, ICR);
- return (ctrl_reg & IXGB_CTRL0_RST);
+ return ctrl_reg & IXGB_CTRL0_RST;
}
@@ -209,7 +209,7 @@ ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
xpak_vendor = ixgb_xpak_vendor_infineon;
}
- return (xpak_vendor);
+ return xpak_vendor;
}
/******************************************************************************
@@ -273,7 +273,7 @@ ixgb_identify_phy(struct ixgb_hw *hw)
if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID)
phy_type = ixgb_phy_type_bcm;
- return (phy_type);
+ return phy_type;
}
/******************************************************************************
@@ -366,7 +366,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
/* 82597EX errata: Call check-for-link in case lane deskew is locked */
ixgb_check_for_link(hw);
- return (status);
+ return status;
}
/******************************************************************************
@@ -531,7 +531,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
}
hash_value &= 0xFFF;
- return (hash_value);
+ return hash_value;
}
/******************************************************************************
@@ -715,7 +715,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
}
IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water);
}
- return (status);
+ return status;
}
/******************************************************************************
@@ -1140,7 +1140,7 @@ mac_addr_valid(u8 *mac_addr)
pr_debug("MAC address is all zeros\n");
is_valid = false;
}
- return (is_valid);
+ return is_valid;
}
/******************************************************************************
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 45fc89b9ba6..80e62578ffa 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -446,8 +446,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_FILTER;
netdev->features |= NETIF_F_TSO;
- if (pci_using_dac)
+ if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
/* make sure the EEPROM is good */
@@ -470,7 +472,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &ixgb_watchdog;
+ adapter->watchdog_timer.function = ixgb_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
@@ -1905,7 +1907,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
*/
if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
(!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
return;
}
@@ -1913,7 +1915,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
/* now look at the TCP checksum error bit */
if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
/* let the stack verify checksum errors */
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
adapter->hw_csum_rx_error++;
} else {
/* TCP checksum is good */
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 9e15eb93860..a8c47b01a6f 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/cpumask.h>
#include <linux/aer.h>
#include "ixgbe_type.h"
@@ -69,15 +70,20 @@
#define IXGBE_MAX_FCPAUSE 0xFFFF
/* Supported Rx Buffer Sizes */
-#define IXGBE_RXBUFFER_64 64 /* Used for packet split */
-#define IXGBE_RXBUFFER_128 128 /* Used for packet split */
-#define IXGBE_RXBUFFER_256 256 /* Used for packet split */
+#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
#define IXGBE_RXBUFFER_2048 2048
#define IXGBE_RXBUFFER_4096 4096
#define IXGBE_RXBUFFER_8192 8192
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
-#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
+/*
+ * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
@@ -236,6 +242,7 @@ struct ixgbe_q_vector {
u8 tx_itr;
u8 rx_itr;
u32 eitr;
+ cpumask_var_t affinity_mask;
};
/* Helper macros to switch between ints/sec and what the register uses.
@@ -251,11 +258,11 @@ struct ixgbe_q_vector {
(R)->next_to_clean - (R)->next_to_use - 1)
#define IXGBE_RX_DESC_ADV(R, i) \
- (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
+ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBE_TX_DESC_ADV(R, i) \
- (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
+ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
#define IXGBE_TX_CTXTDESC_ADV(R, i) \
- (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
#ifdef IXGBE_FCOE
@@ -448,9 +455,20 @@ extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *)
extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
+extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
+ struct net_device *,
+ struct ixgbe_adapter *,
+ struct ixgbe_ring *);
+extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *,
+ struct ixgbe_tx_buffer *);
+extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ int cleaned_count);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ethtool_ioctl(struct ifreq *ifr);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 3e06a61da92..0bd8fbb5bfd 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -39,20 +39,20 @@
#define IXGBE_82599_MC_TBL_SIZE 128
#define IXGBE_82599_VFT_TBL_SIZE 128
-void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
+static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
-s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete);
-s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
@@ -369,7 +369,7 @@ out:
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
**/
-s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
u32 autoc_reg;
@@ -418,7 +418,7 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
* PHY states. This includes selectively shutting down the Tx
* laser on the PHY, effectively halting physical link.
**/
-void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
@@ -437,7 +437,7 @@ void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
* PHY states. This includes selectively turning on the Tx
* laser on the PHY, effectively starting physical link.
**/
-void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
@@ -460,7 +460,7 @@ void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
* end. This is consistent with true clause 37 autoneg, which also
* involves a loss of signal.
**/
-void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
@@ -729,7 +729,7 @@ out:
*
* Set the link speed in the AUTOC register and restarts link.
**/
-s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete)
{
@@ -1415,92 +1415,6 @@ s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
}
/**
- * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
- * @input: input stream to modify
- * @src_addr_1: the first 4 bytes of the IP address to load
- * @src_addr_2: the second 4 bytes of the IP address to load
- * @src_addr_3: the third 4 bytes of the IP address to load
- * @src_addr_4: the fourth 4 bytes of the IP address to load
- **/
-s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
- u32 src_addr_1, u32 src_addr_2,
- u32 src_addr_3, u32 src_addr_4)
-{
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
- (src_addr_4 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
- (src_addr_4 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
-
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
- (src_addr_3 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
- (src_addr_3 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
-
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
- (src_addr_2 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
- (src_addr_2 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
-
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
- (src_addr_1 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
- (src_addr_1 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
- * @input: input stream to modify
- * @dst_addr_1: the first 4 bytes of the IP address to load
- * @dst_addr_2: the second 4 bytes of the IP address to load
- * @dst_addr_3: the third 4 bytes of the IP address to load
- * @dst_addr_4: the fourth 4 bytes of the IP address to load
- **/
-s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
- u32 dst_addr_1, u32 dst_addr_2,
- u32 dst_addr_3, u32 dst_addr_4)
-{
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
- (dst_addr_4 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
- (dst_addr_4 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
-
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
- (dst_addr_3 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
- (dst_addr_3 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
-
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
- (dst_addr_2 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
- (dst_addr_2 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
-
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
- (dst_addr_1 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
- (dst_addr_1 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
-
- return 0;
-}
-
-/**
* ixgbe_atr_set_src_port_82599 - Sets the source port
* @input: input stream to modify
* @src_port: the source port to load
@@ -1540,19 +1454,6 @@ s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
}
/**
- * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
- * @input: input stream to modify
- * @vm_pool: the Virtual Machine pool to load
- **/
-s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input,
- u8 vm_pool)
-{
- input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
-
- return 0;
-}
-
-/**
* ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
* @input: input stream to modify
* @l4type: the layer 4 type value to load
@@ -1645,41 +1546,6 @@ static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
}
/**
- * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
- * @input: input stream to search
- * @dst_addr_1: the first 4 bytes of the IP address to load
- * @dst_addr_2: the second 4 bytes of the IP address to load
- * @dst_addr_3: the third 4 bytes of the IP address to load
- * @dst_addr_4: the fourth 4 bytes of the IP address to load
- **/
-s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
- u32 *dst_addr_1, u32 *dst_addr_2,
- u32 *dst_addr_3, u32 *dst_addr_4)
-{
- *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
- *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
- *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
- *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
-
- *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
- *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
- *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
- *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
-
- *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
- *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
- *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
- *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
-
- *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
- *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
- *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
- *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
-
- return 0;
-}
-
-/**
* ixgbe_atr_get_src_port_82599 - Gets the source port
* @input: input stream to modify
* @src_port: the source port to load
@@ -1732,19 +1598,6 @@ static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
}
/**
- * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
- * @input: input stream to modify
- * @vm_pool: the Virtual Machine pool to load
- **/
-s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input,
- u8 *vm_pool)
-{
- *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
-
- return 0;
-}
-
-/**
* ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
* @input: input stream to modify
* @l4type: the layer 4 type value to load
@@ -1910,56 +1763,27 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
(dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
/*
- * Program the relevant mask registers. If src/dst_port or src/dst_addr
- * are zero, then assume a full mask for that field. Also assume that
- * a VLAN of 0 is unspecified, so mask that out as well. L4type
- * cannot be masked out in this implementation.
+ * Program the relevant mask registers. L4type cannot be
+ * masked out in this implementation.
*
* This also assumes IPv4 only. IPv6 masking isn't supported at this
* point in time.
*/
- if (src_ipv4 == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
-
- if (dst_ipv4 == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
case IXGBE_ATR_L4TYPE_TCP:
- if (src_port == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
- input_masks->src_port_mask);
-
- if (dst_port == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
- (0xffff << 16)));
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
- (input_masks->dst_port_mask << 16)));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
+ (input_masks->dst_port_mask << 16)));
break;
case IXGBE_ATR_L4TYPE_UDP:
- if (src_port == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
- input_masks->src_port_mask);
-
- if (dst_port == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
- (0xffff << 16)));
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
- (input_masks->src_port_mask << 16)));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
+ (input_masks->src_port_mask << 16)));
break;
default:
/* this already would have failed above */
@@ -1967,11 +1791,11 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
}
/* Program the last mask register, FDIRM */
- if (input_masks->vlan_id_mask || !vlan_id)
+ if (input_masks->vlan_id_mask)
/* Mask both VLAN and VLANP - bits 0 and 1 */
fdirm |= 0x3;
- if (input_masks->data_mask || !flex_bytes)
+ if (input_masks->data_mask)
/* Flex bytes need masking, so mask the whole thing - bit 4 */
fdirm |= 0x10;
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 9595b1bfb8d..e3eca131638 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -52,6 +52,7 @@ static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
+static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
/**
* ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -637,7 +638,7 @@ out:
* Polls the status bit (bit 1) of the EERD or EEWR to determine when the
* read or write is done respectively.
**/
-s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
@@ -2449,7 +2450,7 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* return the VLVF index where this VLAN id should be placed
*
**/
-s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
+static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
{
u32 bits = 0;
u32 first_empty_slot = 0;
@@ -2704,48 +2705,3 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
return 0;
}
-
-/**
- * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
- * the EEPROM
- * @hw: pointer to hardware structure
- * @wwnn_prefix: the alternative WWNN prefix
- * @wwpn_prefix: the alternative WWPN prefix
- *
- * This function will read the EEPROM from the alternative SAN MAC address
- * block to check the support for the alternative WWNN/WWPN prefix support.
- **/
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
-{
- u16 offset, caps;
- u16 alt_san_mac_blk_offset;
-
- /* clear output first */
- *wwnn_prefix = 0xFFFF;
- *wwpn_prefix = 0xFFFF;
-
- /* check if alternative SAN MAC is supported */
- hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
- &alt_san_mac_blk_offset);
-
- if ((alt_san_mac_blk_offset == 0) ||
- (alt_san_mac_blk_offset == 0xFFFF))
- goto wwn_prefix_out;
-
- /* check capability in alternative san mac address block */
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
- hw->eeprom.ops.read(hw, offset, &caps);
- if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
- goto wwn_prefix_out;
-
- /* get the corresponding prefix for WWNN/WWPN */
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwnn_prefix);
-
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwpn_prefix);
-
-wwn_prefix_out:
- return 0;
-}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 5cf15aa11ca..424c223437d 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -52,7 +52,6 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 9aea4f04bbd..8bb9ddb6dff 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -34,98 +34,6 @@
#include "ixgbe_dcb_82599.h"
/**
- * ixgbe_dcb_config - Struct containing DCB settings.
- * @dcb_config: Pointer to DCB config structure
- *
- * This function checks DCB rules for DCB settings.
- * The following rules are checked:
- * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
- * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
- * Group must total 100.
- * 3. A Traffic Class should not be set to both Link Strict Priority
- * and Group Strict Priority.
- * 4. Link strict Bandwidth Groups can only have link strict traffic classes
- * with zero bandwidth.
- */
-s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *dcb_config)
-{
- struct tc_bw_alloc *p;
- s32 ret_val = 0;
- u8 i, j, bw = 0, bw_id;
- u8 bw_sum[2][MAX_BW_GROUP];
- bool link_strict[2][MAX_BW_GROUP];
-
- memset(bw_sum, 0, sizeof(bw_sum));
- memset(link_strict, 0, sizeof(link_strict));
-
- /* First Tx, then Rx */
- for (i = 0; i < 2; i++) {
- /* Check each traffic class for rule violation */
- for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
- p = &dcb_config->tc_config[j].path[i];
-
- bw = p->bwg_percent;
- bw_id = p->bwg_id;
-
- if (bw_id >= MAX_BW_GROUP) {
- ret_val = DCB_ERR_CONFIG;
- goto err_config;
- }
- if (p->prio_type == prio_link) {
- link_strict[i][bw_id] = true;
- /* Link strict should have zero bandwidth */
- if (bw) {
- ret_val = DCB_ERR_LS_BW_NONZERO;
- goto err_config;
- }
- } else if (!bw) {
- /*
- * Traffic classes without link strict
- * should have non-zero bandwidth.
- */
- ret_val = DCB_ERR_TC_BW_ZERO;
- goto err_config;
- }
- bw_sum[i][bw_id] += bw;
- }
-
- bw = 0;
-
- /* Check each bandwidth group for rule violation */
- for (j = 0; j < MAX_BW_GROUP; j++) {
- bw += dcb_config->bw_percentage[i][j];
- /*
- * Sum of bandwidth percentages of all traffic classes
- * within a Bandwidth Group must total 100 except for
- * link strict group (zero bandwidth).
- */
- if (link_strict[i][j]) {
- if (bw_sum[i][j]) {
- /*
- * Link strict group should have zero
- * bandwidth.
- */
- ret_val = DCB_ERR_LS_BWG_NONZERO;
- goto err_config;
- }
- } else if (bw_sum[i][j] != BW_PERCENT &&
- bw_sum[i][j] != 0) {
- ret_val = DCB_ERR_TC_BW;
- goto err_config;
- }
- }
-
- if (bw != BW_PERCENT) {
- ret_val = DCB_ERR_BW_GROUP;
- goto err_config;
- }
- }
-
-err_config:
- return ret_val;
-}
-
-/**
* ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
* @ixgbe_dcb_config: Struct containing DCB settings.
* @direction: Configuring either Tx or Rx.
@@ -203,133 +111,6 @@ out:
}
/**
- * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
- * @hw: pointer to hardware structure
- * @stats: pointer to statistics structure
- * @tc_count: Number of elements in bwg_array.
- *
- * This function returns the status data for each of the Traffic Classes in use.
- */
-s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
- u8 tc_count)
-{
- s32 ret = 0;
- if (hw->mac.type == ixgbe_mac_82598EB)
- ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
- else if (hw->mac.type == ixgbe_mac_82599EB)
- ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
- return ret;
-}
-
-/**
- * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
- * hw - pointer to hardware structure
- * stats - pointer to statistics structure
- * tc_count - Number of elements in bwg_array.
- *
- * This function returns the CBFC status data for each of the Traffic Classes.
- */
-s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
- u8 tc_count)
-{
- s32 ret = 0;
- if (hw->mac.type == ixgbe_mac_82598EB)
- ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
- else if (hw->mac.type == ixgbe_mac_82599EB)
- ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
- return ret;
-}
-
-/**
- * ixgbe_dcb_config_rx_arbiter - Config Rx arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Rx Data Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
-{
- s32 ret = 0;
- if (hw->mac.type == ixgbe_mac_82598EB)
- ret = ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
- else if (hw->mac.type == ixgbe_mac_82599EB)
- ret = ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
- return ret;
-}
-
-/**
- * ixgbe_dcb_config_tx_desc_arbiter - Config Tx Desc arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Tx Descriptor Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
-{
- s32 ret = 0;
- if (hw->mac.type == ixgbe_mac_82598EB)
- ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
- else if (hw->mac.type == ixgbe_mac_82599EB)
- ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
- return ret;
-}
-
-/**
- * ixgbe_dcb_config_tx_data_arbiter - Config Tx data arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Tx Data Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
-{
- s32 ret = 0;
- if (hw->mac.type == ixgbe_mac_82598EB)
- ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
- else if (hw->mac.type == ixgbe_mac_82599EB)
- ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
- return ret;
-}
-
-/**
- * ixgbe_dcb_config_pfc - Config priority flow control
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Priority Flow Control for each traffic class.
- */
-s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
-{
- s32 ret = 0;
- if (hw->mac.type == ixgbe_mac_82598EB)
- ret = ixgbe_dcb_config_pfc_82598(hw, dcb_config);
- else if (hw->mac.type == ixgbe_mac_82599EB)
- ret = ixgbe_dcb_config_pfc_82599(hw, dcb_config);
- return ret;
-}
-
-/**
- * ixgbe_dcb_config_tc_stats - Config traffic class statistics
- * @hw: pointer to hardware structure
- *
- * Configure queue statistics registers, all queues belonging to same traffic
- * class uses a single set of queue statistics counters.
- */
-s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
-{
- s32 ret = 0;
- if (hw->mac.type == ixgbe_mac_82598EB)
- ret = ixgbe_dcb_config_tc_stats_82598(hw);
- else if (hw->mac.type == ixgbe_mac_82599EB)
- ret = ixgbe_dcb_config_tc_stats_82599(hw);
- return ret;
-}
-
-/**
* ixgbe_dcb_hw_config - Config and enable DCB
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 5caafd4afbc..eb1059f09da 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -149,27 +149,9 @@ struct ixgbe_dcb_config {
/* DCB driver APIs */
-/* DCB rule checking function.*/
-s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *config);
-
/* DCB credits calculation */
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8);
-/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, struct ixgbe_dcb_config *g);
-s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
-
-/* DCB traffic class stats */
-s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
-s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
-
-/* DCB config arbiters */
-s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *,
- struct ixgbe_dcb_config *);
-s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *,
- struct ixgbe_dcb_config *);
-s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *, struct ixgbe_dcb_config *);
-
/* DCB hw initialization */
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index f0e9279d466..50288bcadc5 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -32,65 +32,6 @@
#include "ixgbe_dcb_82598.h"
/**
- * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
- * @hw: pointer to hardware structure
- * @stats: pointer to statistics structure
- * @tc_count: Number of elements in bwg_array.
- *
- * This function returns the status data for each of the Traffic Classes in use.
- */
-s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
- struct ixgbe_hw_stats *stats,
- u8 tc_count)
-{
- int tc;
-
- if (tc_count > MAX_TRAFFIC_CLASS)
- return DCB_ERR_PARAM;
-
- /* Statistics pertaining to each traffic class */
- for (tc = 0; tc < tc_count; tc++) {
- /* Transmitted Packets */
- stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
- /* Transmitted Bytes */
- stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
- /* Received Packets */
- stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
- /* Received Bytes */
- stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
- }
-
- return 0;
-}
-
-/**
- * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
- * @hw: pointer to hardware structure
- * @stats: pointer to statistics structure
- * @tc_count: Number of elements in bwg_array.
- *
- * This function returns the CBFC status data for each of the Traffic Classes.
- */
-s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
- struct ixgbe_hw_stats *stats,
- u8 tc_count)
-{
- int tc;
-
- if (tc_count > MAX_TRAFFIC_CLASS)
- return DCB_ERR_PARAM;
-
- for (tc = 0; tc < tc_count; tc++) {
- /* Priority XOFF Transmitted */
- stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
- /* Priority XOFF Received */
- stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
- }
-
- return 0;
-}
-
-/**
* ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
@@ -137,7 +78,7 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
struct tc_bw_alloc *p;
@@ -194,7 +135,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
struct tc_bw_alloc *p;
@@ -242,7 +183,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
struct tc_bw_alloc *p;
@@ -355,7 +296,7 @@ out:
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index cc728fa092e..abc03ccfa08 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -72,21 +72,6 @@
/* DCB PFC functions */
s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
-s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *,
- u8);
-
-/* DCB traffic class stats */
-s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *);
-s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *,
- u8);
-
-/* DCB config arbiters */
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *,
- struct ixgbe_dcb_config *);
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *,
- struct ixgbe_dcb_config *);
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *,
- struct ixgbe_dcb_config *);
/* DCB hw initialization */
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 25b02fb425a..67c219f86c3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -31,70 +31,13 @@
#include "ixgbe_dcb_82599.h"
/**
- * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
- * @hw: pointer to hardware structure
- * @stats: pointer to statistics structure
- * @tc_count: Number of elements in bwg_array.
- *
- * This function returns the status data for each of the Traffic Classes in use.
- */
-s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
- struct ixgbe_hw_stats *stats,
- u8 tc_count)
-{
- int tc;
-
- if (tc_count > MAX_TRAFFIC_CLASS)
- return DCB_ERR_PARAM;
- /* Statistics pertaining to each traffic class */
- for (tc = 0; tc < tc_count; tc++) {
- /* Transmitted Packets */
- stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
- /* Transmitted Bytes */
- stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
- /* Received Packets */
- stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
- /* Received Bytes */
- stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
- }
-
- return 0;
-}
-
-/**
- * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
- * @hw: pointer to hardware structure
- * @stats: pointer to statistics structure
- * @tc_count: Number of elements in bwg_array.
- *
- * This function returns the CBFC status data for each of the Traffic Classes.
- */
-s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
- struct ixgbe_hw_stats *stats,
- u8 tc_count)
-{
- int tc;
-
- if (tc_count > MAX_TRAFFIC_CLASS)
- return DCB_ERR_PARAM;
- for (tc = 0; tc < tc_count; tc++) {
- /* Priority XOFF Transmitted */
- stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
- /* Priority XOFF Received */
- stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
- }
-
- return 0;
-}
-
-/**
* ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure packet buffers for DCB mode.
*/
-s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
+static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret_val = 0;
@@ -136,7 +79,7 @@ s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
struct tc_bw_alloc *p;
@@ -191,7 +134,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
struct tc_bw_alloc *p;
@@ -238,7 +181,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
struct tc_bw_alloc *p;
@@ -359,7 +302,7 @@ out:
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -412,7 +355,7 @@ s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
*
* Configure general DCB parameters.
*/
-s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
{
u32 reg;
u32 q;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 0f3f791e1e1..18d7fbf6c29 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -101,24 +101,6 @@
/* DCB PFC functions */
s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config);
-s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
- struct ixgbe_hw_stats *stats,
- u8 tc_count);
-
-/* DCB traffic class stats */
-s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw);
-s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
- struct ixgbe_hw_stats *stats,
- u8 tc_count);
-
-/* DCB config arbiters */
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
-
/* DCB hw initialization */
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index dcebc82c6f4..d4ac94324fa 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -401,7 +401,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
static u32 ixgbe_get_rx_csum(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED);
+ return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
}
static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
@@ -820,16 +820,19 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
char firmware_version[32];
- strncpy(drvinfo->driver, ixgbe_driver_name, 32);
- strncpy(drvinfo->version, ixgbe_driver_version, 32);
+ strncpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+ strncpy(drvinfo->version, ixgbe_driver_version,
+ sizeof(drvinfo->version));
- sprintf(firmware_version, "%d.%d-%d",
- (adapter->eeprom_version & 0xF000) >> 12,
- (adapter->eeprom_version & 0x0FF0) >> 4,
- adapter->eeprom_version & 0x000F);
+ snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
+ (adapter->eeprom_version & 0xF000) >> 12,
+ (adapter->eeprom_version & 0x0FF0) >> 4,
+ adapter->eeprom_version & 0x000F);
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strncpy(drvinfo->fw_version, firmware_version,
+ sizeof(drvinfo->fw_version));
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IXGBE_STATS_LEN;
drvinfo->testinfo_len = IXGBE_TEST_LEN;
drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -985,8 +988,8 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_STATS:
return IXGBE_STATS_LEN;
case ETH_SS_NTUPLE_FILTERS:
- return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
- ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY);
+ return ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
+ ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY;
default:
return -EOPNOTSUPP;
}
@@ -1435,9 +1438,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
struct ixgbe_hw *hw = &adapter->hw;
- struct pci_dev *pdev = adapter->pdev;
u32 reg_ctl;
- int i;
/* shut down the DMA engines now so they can be reinitialized later */
@@ -1445,14 +1446,15 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
reg_ctl &= ~IXGBE_RXCTRL_RXEN;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0));
+ reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl);
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
/* now Tx */
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0));
+ reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl);
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
+
if (hw->mac.type == ixgbe_mac_82599EB) {
reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
reg_ctl &= ~IXGBE_DMATXCTL_TE;
@@ -1461,221 +1463,57 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
ixgbe_reset(adapter);
- if (tx_ring->desc && tx_ring->tx_buffer_info) {
- for (i = 0; i < tx_ring->count; i++) {
- struct ixgbe_tx_buffer *buf =
- &(tx_ring->tx_buffer_info[i]);
- if (buf->dma)
- dma_unmap_single(&pdev->dev, buf->dma,
- buf->length, DMA_TO_DEVICE);
- if (buf->skb)
- dev_kfree_skb(buf->skb);
- }
- }
-
- if (rx_ring->desc && rx_ring->rx_buffer_info) {
- for (i = 0; i < rx_ring->count; i++) {
- struct ixgbe_rx_buffer *buf =
- &(rx_ring->rx_buffer_info[i]);
- if (buf->dma)
- dma_unmap_single(&pdev->dev, buf->dma,
- IXGBE_RXBUFFER_2048,
- DMA_FROM_DEVICE);
- if (buf->skb)
- dev_kfree_skb(buf->skb);
- }
- }
-
- if (tx_ring->desc) {
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
- tx_ring->desc = NULL;
- }
- if (rx_ring->desc) {
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
- rx_ring->desc = NULL;
- }
-
- kfree(tx_ring->tx_buffer_info);
- tx_ring->tx_buffer_info = NULL;
- kfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
+ ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring);
+ ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring);
}
static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
{
struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
- struct pci_dev *pdev = adapter->pdev;
u32 rctl, reg_data;
- int i, ret_val;
+ int ret_val;
+ int err;
/* Setup Tx descriptor ring and Tx buffers */
+ tx_ring->count = IXGBE_DEFAULT_TXD;
+ tx_ring->queue_index = 0;
+ tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
+ tx_ring->numa_node = adapter->node;
- if (!tx_ring->count)
- tx_ring->count = IXGBE_DEFAULT_TXD;
-
- tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
- sizeof(struct ixgbe_tx_buffer),
- GFP_KERNEL);
- if (!(tx_ring->tx_buffer_info)) {
- ret_val = 1;
- goto err_nomem;
- }
-
- tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
- tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
- if (!(tx_ring->desc)) {
- ret_val = 2;
- goto err_nomem;
- }
- tx_ring->next_to_use = tx_ring->next_to_clean = 0;
-
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
- ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
- ((u64) tx_ring->dma >> 32));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
- tx_ring->count * sizeof(union ixgbe_adv_tx_desc));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
-
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
- reg_data |= IXGBE_HLREG0_TXPADEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+ err = ixgbe_setup_tx_resources(adapter, tx_ring);
+ if (err)
+ return 1;
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
}
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
- reg_data |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
-
- for (i = 0; i < tx_ring->count; i++) {
- union ixgbe_adv_tx_desc *desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
- struct sk_buff *skb;
- unsigned int size = 1024;
-
- skb = alloc_skb(size, GFP_KERNEL);
- if (!skb) {
- ret_val = 3;
- goto err_nomem;
- }
- skb_put(skb, size);
- tx_ring->tx_buffer_info[i].skb = skb;
- tx_ring->tx_buffer_info[i].length = skb->len;
- tx_ring->tx_buffer_info[i].dma =
- dma_map_single(&pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- desc->read.buffer_addr =
- cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
- desc->read.cmd_type_len = cpu_to_le32(skb->len);
- desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
- IXGBE_TXD_CMD_IFCS |
- IXGBE_TXD_CMD_RS);
- desc->read.olinfo_status = 0;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- desc->read.olinfo_status |=
- (skb->len << IXGBE_ADVTXD_PAYLEN_SHIFT);
- }
+ ixgbe_configure_tx_ring(adapter, tx_ring);
/* Setup Rx Descriptor ring and Rx buffers */
-
- if (!rx_ring->count)
- rx_ring->count = IXGBE_DEFAULT_RXD;
-
- rx_ring->rx_buffer_info = kcalloc(rx_ring->count,
- sizeof(struct ixgbe_rx_buffer),
- GFP_KERNEL);
- if (!(rx_ring->rx_buffer_info)) {
+ rx_ring->count = IXGBE_DEFAULT_RXD;
+ rx_ring->queue_index = 0;
+ rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
+ rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
+ rx_ring->numa_node = adapter->node;
+
+ err = ixgbe_setup_rx_resources(adapter, rx_ring);
+ if (err) {
ret_val = 4;
goto err_nomem;
}
- rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
- rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
- if (!(rx_ring->desc)) {
- ret_val = 5;
- goto err_nomem;
- }
- rx_ring->next_to_use = rx_ring->next_to_clean = 0;
-
rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
- ((u64)rx_ring->dma & 0xFFFFFFFF));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
- ((u64) rx_ring->dma >> 32));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
- reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
-
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
- reg_data &= ~IXGBE_HLREG0_LPBK;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
-
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL);
-#define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum
- Threshold Size mask */
- reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
-
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
-#define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */
- reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
- reg_data |= adapter->hw.mac.mc_filter_type;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
-
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
- reg_data |= IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- int j = adapter->rx_ring[0]->reg_idx;
- u32 k;
- for (k = 0; k < 10; k++) {
- if (IXGBE_READ_REG(&adapter->hw,
- IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
- break;
- else
- msleep(1);
- }
- }
+ ixgbe_configure_rx_ring(adapter, rx_ring);
rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
- for (i = 0; i < rx_ring->count; i++) {
- union ixgbe_adv_rx_desc *rx_desc =
- IXGBE_RX_DESC_ADV(*rx_ring, i);
- struct sk_buff *skb;
-
- skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
- if (!skb) {
- ret_val = 6;
- goto err_nomem;
- }
- skb_reserve(skb, NET_IP_ALIGN);
- rx_ring->rx_buffer_info[i].skb = skb;
- rx_ring->rx_buffer_info[i].dma =
- dma_map_single(&pdev->dev, skb->data,
- IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
- rx_desc->read.pkt_addr =
- cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
- memset(skb->data, 0x00, skb->len);
- }
-
return 0;
err_nomem:
@@ -1689,16 +1527,21 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
u32 reg_data;
/* right now we only support MAC loopback in the driver */
-
- /* Setup MAC loopback */
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+ /* Setup MAC loopback */
reg_data |= IXGBE_HLREG0_LPBK;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+ reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
+
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
reg_data &= ~IXGBE_AUTOC_LMS_MASK;
reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ msleep(10);
/* Disable Atlas Tx lanes; re-enabled in reset path */
if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -1756,15 +1599,81 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
return 13;
}
+static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct ixgbe_ring *tx_ring,
+ unsigned int size)
+{
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *rx_buffer_info;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ const int bufsz = rx_ring->rx_buf_len;
+ u32 staterr;
+ u16 rx_ntc, tx_ntc, count = 0;
+
+ /* initialize next to clean and descriptor values */
+ rx_ntc = rx_ring->next_to_clean;
+ tx_ntc = tx_ring->next_to_clean;
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ while (staterr & IXGBE_RXD_STAT_DD) {
+ /* check Rx buffer */
+ rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
+
+ /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
+ dma_unmap_single(&adapter->pdev->dev,
+ rx_buffer_info->dma,
+ bufsz,
+ DMA_FROM_DEVICE);
+ rx_buffer_info->dma = 0;
+
+ /* verify contents of skb */
+ if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
+ count++;
+
+ /* unmap buffer on Tx side */
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
+ ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+
+ /* increment Rx/Tx next to clean counters */
+ rx_ntc++;
+ if (rx_ntc == rx_ring->count)
+ rx_ntc = 0;
+ tx_ntc++;
+ if (tx_ntc == tx_ring->count)
+ tx_ntc = 0;
+
+ /* fetch next descriptor */
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ /* re-map buffers to ring, store next to clean values */
+ ixgbe_alloc_rx_buffers(adapter, rx_ring, count);
+ rx_ring->next_to_clean = rx_ntc;
+ tx_ring->next_to_clean = tx_ntc;
+
+ return count;
+}
+
static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
{
struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
- struct pci_dev *pdev = adapter->pdev;
- int i, j, k, l, lc, good_cnt, ret_val = 0;
- unsigned long time;
+ int i, j, lc, good_cnt, ret_val = 0;
+ unsigned int size = 1024;
+ netdev_tx_t tx_ret_val;
+ struct sk_buff *skb;
+
+ /* allocate test skb */
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return 11;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1);
+ /* place data into test skb */
+ ixgbe_create_lbtest_frame(skb, size);
+ skb_put(skb, size);
/*
* Calculate the loop count based on the largest descriptor ring
@@ -1777,54 +1686,40 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
else
lc = ((rx_ring->count / 64) * 2) + 1;
- k = l = 0;
for (j = 0; j <= lc; j++) {
- for (i = 0; i < 64; i++) {
- ixgbe_create_lbtest_frame(
- tx_ring->tx_buffer_info[k].skb,
- 1024);
- dma_sync_single_for_device(&pdev->dev,
- tx_ring->tx_buffer_info[k].dma,
- tx_ring->tx_buffer_info[k].length,
- DMA_TO_DEVICE);
- if (unlikely(++k == tx_ring->count))
- k = 0;
- }
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
- msleep(200);
- /* set the start time for the receive */
- time = jiffies;
+ /* reset count of good packets */
good_cnt = 0;
- do {
- /* receive the sent packets */
- dma_sync_single_for_cpu(&pdev->dev,
- rx_ring->rx_buffer_info[l].dma,
- IXGBE_RXBUFFER_2048,
- DMA_FROM_DEVICE);
- ret_val = ixgbe_check_lbtest_frame(
- rx_ring->rx_buffer_info[l].skb, 1024);
- if (!ret_val)
+
+ /* place 64 packets on the transmit queue*/
+ for (i = 0; i < 64; i++) {
+ skb_get(skb);
+ tx_ret_val = ixgbe_xmit_frame_ring(skb,
+ adapter->netdev,
+ adapter,
+ tx_ring);
+ if (tx_ret_val == NETDEV_TX_OK)
good_cnt++;
- if (++l == rx_ring->count)
- l = 0;
- /*
- * time + 20 msecs (200 msecs on 2.4) is more than
- * enough time to complete the receives, if it's
- * exceeded, break and error off
- */
- } while (good_cnt < 64 && jiffies < (time + 20));
+ }
+
if (good_cnt != 64) {
- /* ret_val is the same as mis-compare */
- ret_val = 13;
+ ret_val = 12;
break;
}
- if (jiffies >= (time + 20)) {
- /* Error code for time out error */
- ret_val = 14;
+
+ /* allow 200 milliseconds for packets to go from Tx to Rx */
+ msleep(200);
+
+ good_cnt = ixgbe_clean_test_rings(adapter, rx_ring,
+ tx_ring, size);
+ if (good_cnt != 64) {
+ ret_val = 13;
break;
}
}
+ /* free the original skb */
+ kfree_skb(skb);
+
return ret_val;
}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 072327c5e41..2f1de8b90f9 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -304,12 +304,13 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
if (!ixgbe_rx_is_fcoe(rx_desc))
goto ddp_out;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
sterr = le32_to_cpu(rx_desc->wb.upper.status_error);
fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR);
fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE);
if (fcerr == IXGBE_FCERR_BADCRC)
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
fh = (struct fc_frame_header *)(skb->data +
@@ -471,7 +472,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
/* write context desc */
i = tx_ring->next_to_use;
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+ context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e32af434cc9..790a0dae124 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -50,7 +50,7 @@
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
- "Intel(R) 10 Gigabit PCI Express Network Driver";
+ "Intel(R) 10 Gigabit PCI Express Network Driver";
#define DRV_VERSION "2.0.84-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
@@ -120,7 +120,7 @@ MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
#ifdef CONFIG_IXGBE_DCA
static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
- void *p);
+ void *p);
static struct notifier_block dca_notifier = {
.notifier_call = ixgbe_notify_dca,
.next = NULL,
@@ -131,8 +131,8 @@ static struct notifier_block dca_notifier = {
#ifdef CONFIG_PCI_IOV
static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
- "per physical function");
+MODULE_PARM_DESC(max_vfs,
+ "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -169,8 +169,8 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
/* take a breather then clean up driver data */
msleep(100);
- if (adapter->vfinfo)
- kfree(adapter->vfinfo);
+
+ kfree(adapter->vfinfo);
adapter->vfinfo = NULL;
adapter->num_vfs = 0;
@@ -282,17 +282,17 @@ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
break;
default:
- printk(KERN_INFO "%-15s %08x\n", reginfo->name,
+ pr_info("%-15s %08x\n", reginfo->name,
IXGBE_READ_REG(hw, reginfo->ofs));
return;
}
for (i = 0; i < 8; i++) {
snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
- printk(KERN_ERR "%-15s ", rname);
+ pr_err("%-15s", rname);
for (j = 0; j < 8; j++)
- printk(KERN_CONT "%08x ", regs[i*8+j]);
- printk(KERN_CONT "\n");
+ pr_cont(" %08x", regs[i*8+j]);
+ pr_cont("\n");
}
}
@@ -322,18 +322,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- printk(KERN_INFO "Device Name state "
+ pr_info("Device Name state "
"trans_start last_rx\n");
- printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name,
- netdev->state,
- netdev->trans_start,
- netdev->last_rx);
+ pr_info("%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
}
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
- printk(KERN_INFO " Register Name Value\n");
+ pr_info(" Register Name Value\n");
for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
reginfo->name; reginfo++) {
ixgbe_regdump(hw, reginfo);
@@ -344,13 +344,12 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
goto exit;
dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] "
- "leng ntw timestamp\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
tx_buffer_info =
&tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
(u64)tx_buffer_info->dma,
tx_buffer_info->length,
@@ -377,18 +376,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "T [desc] [address 63:0 ] "
+ pr_info("------------------------------------\n");
+ pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("T [desc] [address 63:0 ] "
"[PlPOIdStDDt Ln] [bi->dma ] "
"leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
- printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
+ pr_info("T [0x%03X] %016llX %016llX %016llX"
" %04X %3X %016llX %p", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
@@ -399,13 +398,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
tx_buffer_info->skb);
if (i == tx_ring->next_to_use &&
i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC/U\n");
+ pr_cont(" NTC/U\n");
else if (i == tx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
+ pr_cont(" NTU\n");
else if (i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
+ pr_cont(" NTC\n");
else
- printk(KERN_CONT "\n");
+ pr_cont("\n");
if (netif_msg_pktdata(adapter) &&
tx_buffer_info->dma != 0)
@@ -419,11 +418,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
/* Print RX Rings Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ pr_info("Queue [NTU] [NTC]\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- printk(KERN_INFO "%5d %5X %5X\n", n,
- rx_ring->next_to_use, rx_ring->next_to_clean);
+ pr_info("%5d %5X %5X\n",
+ n, rx_ring->next_to_use, rx_ring->next_to_clean);
}
/* Print RX Rings */
@@ -454,30 +453,30 @@ rx_ring_summary:
*/
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "R [desc] [ PktBuf A0] "
+ pr_info("------------------------------------\n");
+ pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("R [desc] [ PktBuf A0] "
"[ HeadBuf DD] [bi->dma ] [bi->skb] "
"<-- Adv Rx Read format\n");
- printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] "
"[vl er S cks ln] ---------------- [bi->skb] "
"<-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
rx_buffer_info = &rx_ring->rx_buffer_info[i];
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
if (staterr & IXGBE_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
+ pr_info("RWB[0x%03X] %016llX "
"%016llX ---------------- %p", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
rx_buffer_info->skb);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
+ pr_info("R [0x%03X] %016llX "
"%016llX %016llX %p", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
@@ -503,11 +502,11 @@ rx_ring_summary:
}
if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
+ pr_cont(" NTU\n");
else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
+ pr_cont(" NTC\n");
else
- printk(KERN_CONT "\n");
+ pr_cont("\n");
}
}
@@ -523,7 +522,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
/* Let firmware take over control of h/w */
ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
- ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+ ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
}
static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
@@ -533,7 +532,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
/* Let firmware know the driver has taken over */
ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
- ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
+ ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
}
/*
@@ -545,7 +544,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
*
*/
static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
- u8 queue, u8 msix_vector)
+ u8 queue, u8 msix_vector)
{
u32 ivar, index;
struct ixgbe_hw *hw = &adapter->hw;
@@ -586,7 +585,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
}
static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
- u64 qmask)
+ u64 qmask)
{
u32 mask;
@@ -601,9 +600,9 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
}
}
-static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
- struct ixgbe_tx_buffer
- *tx_buffer_info)
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
+ struct ixgbe_tx_buffer
+ *tx_buffer_info)
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
@@ -637,7 +636,7 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
* Returns : true if in xon state (currently not paused)
*/
static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring)
{
u32 txoff = IXGBE_TFCS_TXOFF;
@@ -682,8 +681,8 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
}
static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- unsigned int eop)
+ struct ixgbe_ring *tx_ring,
+ unsigned int eop)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -695,7 +694,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
ixgbe_tx_xon_state(adapter, tx_ring)) {
/* detected Tx unit hang */
union ixgbe_adv_tx_desc *tx_desc;
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
e_err(drv, "Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
" TDH, TDT <%x>, <%x>\n"
@@ -732,7 +731,7 @@ static void ixgbe_tx_timeout(struct net_device *netdev);
* @tx_ring: tx ring to clean
**/
static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
struct net_device *netdev = adapter->netdev;
@@ -743,7 +742,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
(count < tx_ring->work_limit)) {
@@ -751,7 +750,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
rmb(); /* read buffer_info after eop_desc */
for ( ; !cleaned; count++) {
struct sk_buff *skb;
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
cleaned = (i == eop);
skb = tx_buffer_info->skb;
@@ -781,7 +780,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
}
ixgbe_unmap_and_free_tx_resource(adapter,
- tx_buffer_info);
+ tx_buffer_info);
tx_desc->wb.status = 0;
@@ -791,14 +790,14 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
}
eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
}
tx_ring->next_to_clean = i;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(count && netif_carrier_ok(netdev) &&
- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -827,12 +826,12 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
tx_ring->total_packets += total_packets;
tx_ring->stats.packets += total_packets;
tx_ring->stats.bytes += total_bytes;
- return (count < tx_ring->work_limit);
+ return count < tx_ring->work_limit;
}
#ifdef CONFIG_IXGBE_DCA
static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring)
{
u32 rxctrl;
int cpu = get_cpu();
@@ -846,13 +845,13 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
- IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
+ IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
}
rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
rx_ring->cpu = cpu;
}
@@ -860,7 +859,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
}
static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring)
{
u32 txctrl;
int cpu = get_cpu();
@@ -878,7 +877,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
- IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+ IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
}
@@ -946,16 +945,15 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
* @rx_desc: rx descriptor
**/
static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb, u8 status,
- struct ixgbe_ring *ring,
- union ixgbe_adv_rx_desc *rx_desc)
+ struct sk_buff *skb, u8 status,
+ struct ixgbe_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
struct napi_struct *napi = &q_vector->napi;
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
- skb_record_rx_queue(skb, ring->queue_index);
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
@@ -981,7 +979,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
{
u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* Rx csum disabled */
if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
@@ -1017,7 +1015,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
}
static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
- struct ixgbe_ring *rx_ring, u32 val)
+ struct ixgbe_ring *rx_ring, u32 val)
{
/*
* Force memory writes to complete before letting h/w
@@ -1033,25 +1031,27 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
* ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure
**/
-static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- int cleaned_count)
+void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ int cleaned_count)
{
+ struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
unsigned int i;
+ unsigned int bufsz = rx_ring->rx_buf_len;
i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
while (cleaned_count--) {
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
if (!bi->page_dma &&
(rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC);
+ bi->page = netdev_alloc_page(netdev);
if (!bi->page) {
adapter->alloc_rx_page_failed++;
goto no_buffers;
@@ -1063,29 +1063,28 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
}
bi->page_dma = dma_map_page(&pdev->dev, bi->page,
- bi->page_offset,
- (PAGE_SIZE / 2),
+ bi->page_offset,
+ (PAGE_SIZE / 2),
DMA_FROM_DEVICE);
}
if (!bi->skb) {
- struct sk_buff *skb;
- /* netdev_alloc_skb reserves 32 bytes up front!! */
- uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES;
- skb = netdev_alloc_skb(adapter->netdev, bufsz);
+ struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
+ bufsz);
+ bi->skb = skb;
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ }
- /* advance the data pointer to the next cache line */
- skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES)
- - skb->data));
-
- bi->skb = skb;
- bi->dma = dma_map_single(&pdev->dev, skb->data,
- rx_ring->rx_buf_len,
+ if (!bi->dma) {
+ bi->dma = dma_map_single(&pdev->dev,
+ bi->skb->data,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
@@ -1095,6 +1094,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
} else {
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
}
i++;
@@ -1126,8 +1126,8 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
{
return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_RSCCNT_MASK) >>
- IXGBE_RXDADV_RSCCNT_SHIFT;
+ IXGBE_RXDADV_RSCCNT_MASK) >>
+ IXGBE_RXDADV_RSCCNT_SHIFT;
}
/**
@@ -1140,7 +1140,7 @@ static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
* turns it into the frag list owner.
**/
static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
- u64 *count)
+ u64 *count)
{
unsigned int frag_list_size = 0;
@@ -1168,8 +1168,8 @@ struct ixgbe_rsc_cb {
#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
- struct ixgbe_ring *rx_ring,
- int *work_done, int work_to_do)
+ struct ixgbe_ring *rx_ring,
+ int *work_done, int work_to_do)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
struct net_device *netdev = adapter->netdev;
@@ -1188,7 +1188,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#endif /* IXGBE_FCOE */
i = rx_ring->next_to_clean;
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
rx_buffer_info = &rx_ring->rx_buffer_info[i];
@@ -1231,9 +1231,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
} else {
dma_unmap_single(&pdev->dev,
- rx_buffer_info->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
+ rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
}
rx_buffer_info->dma = 0;
skb_put(skb, len);
@@ -1244,9 +1244,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buffer_info->page,
- rx_buffer_info->page_offset,
- upper_len);
+ rx_buffer_info->page,
+ rx_buffer_info->page_offset,
+ upper_len);
if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
(page_count(rx_buffer_info->page) != 1))
@@ -1263,7 +1263,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (i == rx_ring->count)
i = 0;
- next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
prefetch(next_rxd);
cleaned_count++;
@@ -1280,18 +1280,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (staterr & IXGBE_RXD_STAT_EOP) {
if (skb->prev)
- skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
+ skb = ixgbe_transform_rsc_queue(skb,
+ &(rx_ring->rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
if (IXGBE_RSC_CB(skb)->delay_unmap) {
dma_unmap_single(&pdev->dev,
IXGBE_RSC_CB(skb)->dma,
- rx_ring->rx_buf_len,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
IXGBE_RSC_CB(skb)->dma = 0;
IXGBE_RSC_CB(skb)->delay_unmap = false;
}
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
- rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
+ rx_ring->rsc_count +=
+ skb_shinfo(skb)->nr_frags;
else
rx_ring->rsc_count++;
rx_ring->rsc_flush++;
@@ -1403,24 +1405,24 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
q_vector = adapter->q_vector[v_idx];
/* XXX for_each_set_bit(...) */
r_idx = find_first_bit(q_vector->rxr_idx,
- adapter->num_rx_queues);
+ adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
j = adapter->rx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 0, j, v_idx);
r_idx = find_next_bit(q_vector->rxr_idx,
- adapter->num_rx_queues,
- r_idx + 1);
+ adapter->num_rx_queues,
+ r_idx + 1);
}
r_idx = find_first_bit(q_vector->txr_idx,
- adapter->num_tx_queues);
+ adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
j = adapter->tx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 1, j, v_idx);
r_idx = find_next_bit(q_vector->txr_idx,
- adapter->num_tx_queues,
- r_idx + 1);
+ adapter->num_tx_queues,
+ r_idx + 1);
}
if (q_vector->txr_count && !q_vector->rxr_count)
@@ -1431,11 +1433,26 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
q_vector->eitr = adapter->rx_eitr_param;
ixgbe_write_eitr(q_vector);
+ /* If Flow Director is enabled, set interrupt affinity */
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+ (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ /*
+ * Allocate the affinity_hint cpumask, assign the mask
+ * for this vector, and set our affinity_hint for
+ * this irq.
+ */
+ if (!alloc_cpumask_var(&q_vector->affinity_mask,
+ GFP_KERNEL))
+ return;
+ cpumask_set_cpu(v_idx, q_vector->affinity_mask);
+ irq_set_affinity_hint(adapter->msix_entries[v_idx].vector,
+ q_vector->affinity_mask);
+ }
}
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
- v_idx);
+ v_idx);
else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
ixgbe_set_ivar(adapter, -1, 1, v_idx);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
@@ -1477,8 +1494,8 @@ enum latency_range {
* parameter (see ixgbe_param.c)
**/
static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
- u32 eitr, u8 itr_setting,
- int packets, int bytes)
+ u32 eitr, u8 itr_setting,
+ int packets, int bytes)
{
unsigned int retval = itr_setting;
u32 timepassed_us;
@@ -1567,30 +1584,30 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
for (i = 0; i < q_vector->txr_count; i++) {
tx_ring = adapter->tx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
- q_vector->tx_itr,
- tx_ring->total_packets,
- tx_ring->total_bytes);
+ q_vector->tx_itr,
+ tx_ring->total_packets,
+ tx_ring->total_bytes);
/* if the result for this queue would decrease interrupt
* rate for this vector then use that result */
q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
- q_vector->tx_itr - 1 : ret_itr);
+ q_vector->tx_itr - 1 : ret_itr);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
+ r_idx + 1);
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
rx_ring = adapter->rx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
- q_vector->rx_itr,
- rx_ring->total_packets,
- rx_ring->total_bytes);
+ q_vector->rx_itr,
+ rx_ring->total_packets,
+ rx_ring->total_bytes);
/* if the result for this queue would decrease interrupt
* rate for this vector then use that result */
q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
- q_vector->rx_itr - 1 : ret_itr);
+ q_vector->rx_itr - 1 : ret_itr);
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
+ r_idx + 1);
}
current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
@@ -1627,39 +1644,40 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
static void ixgbe_check_overtemp_task(struct work_struct *work)
{
struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- check_overtemp_task);
+ struct ixgbe_adapter,
+ check_overtemp_task);
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr = adapter->interrupt_event;
- if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82599_T3_LOM: {
- u32 autoneg;
- bool link_up = false;
+ if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
+ return;
- if (hw->mac.ops.check_link)
- hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_T3_LOM: {
+ u32 autoneg;
+ bool link_up = false;
- if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
- (eicr & IXGBE_EICR_LSC))
- /* Check if this is due to overtemp */
- if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
- break;
- }
+ if (hw->mac.ops.check_link)
+ hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+
+ if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
+ (eicr & IXGBE_EICR_LSC))
+ /* Check if this is due to overtemp */
+ if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
+ break;
+ return;
+ }
+ default:
+ if (!(eicr & IXGBE_EICR_GPI_SDP0))
return;
- default:
- if (!(eicr & IXGBE_EICR_GPI_SDP0))
- return;
- break;
- }
- e_crit(drv, "Network adapter has been stopped because it has "
- "over heated. Restart the computer. If the problem "
- "persists, power off the system and replace the "
- "adapter\n");
- /* write to clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
+ break;
}
+ e_crit(drv,
+ "Network adapter has been stopped because it has over heated. "
+ "Restart the computer. If the problem persists, "
+ "power off the system and replace the adapter\n");
+ /* write to clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
}
static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
@@ -1746,9 +1764,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
netif_tx_stop_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring =
- adapter->tx_ring[i];
+ adapter->tx_ring[i];
if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
- &tx_ring->reinit_state))
+ &tx_ring->reinit_state))
schedule_work(&adapter->fdir_reinit_task);
}
}
@@ -1777,7 +1795,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
}
static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
- u64 qmask)
+ u64 qmask)
{
u32 mask;
@@ -1809,7 +1827,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
tx_ring->total_bytes = 0;
tx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
+ r_idx + 1);
}
/* EIAM disabled interrupts (on this vector) for us */
@@ -1837,7 +1855,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
+ r_idx + 1);
}
if (!q_vector->rxr_count)
@@ -1867,7 +1885,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
+ r_idx + 1);
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
@@ -1876,7 +1894,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
+ r_idx + 1);
}
/* EIAM disabled interrupts (on this vector) for us */
@@ -1896,7 +1914,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
{
struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
+ container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *rx_ring = NULL;
int work_done = 0;
@@ -1918,7 +1936,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
- ((u64)1 << q_vector->v_idx));
+ ((u64)1 << q_vector->v_idx));
}
return work_done;
@@ -1935,7 +1953,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
{
struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
+ container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *ring = NULL;
int work_done = 0, i;
@@ -1951,7 +1969,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
#endif
tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
+ r_idx + 1);
}
/* attempt to distribute budget to each queue fairly, but don't allow
@@ -1967,7 +1985,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
#endif
ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
+ r_idx + 1);
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
@@ -1979,7 +1997,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
- ((u64)1 << q_vector->v_idx));
+ ((u64)1 << q_vector->v_idx));
return 0;
}
@@ -1997,7 +2015,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
{
struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
+ container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *tx_ring = NULL;
int work_done = 0;
@@ -2019,14 +2037,15 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
if (adapter->tx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ ixgbe_irq_enable_queues(adapter,
+ ((u64)1 << q_vector->v_idx));
}
return work_done;
}
static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
- int r_idx)
+ int r_idx)
{
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
@@ -2035,7 +2054,7 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
}
static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
- int t_idx)
+ int t_idx)
{
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
@@ -2055,7 +2074,7 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
* mapping configurations in here.
**/
static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
- int vectors)
+ int vectors)
{
int v_start = 0;
int rxr_idx = 0, txr_idx = 0;
@@ -2122,7 +2141,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
struct net_device *netdev = adapter->netdev;
irqreturn_t (*handler)(int, void *);
int i, vector, q_vectors, err;
- int ri=0, ti=0;
+ int ri = 0, ti = 0;
/* Decrement for Other and TCP Timer vectors */
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -2133,26 +2152,24 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
goto out;
#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
- (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
- &ixgbe_msix_clean_many)
+ (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
+ &ixgbe_msix_clean_many)
for (vector = 0; vector < q_vectors; vector++) {
handler = SET_HANDLER(adapter->q_vector[vector]);
- if(handler == &ixgbe_msix_clean_rx) {
+ if (handler == &ixgbe_msix_clean_rx) {
sprintf(adapter->name[vector], "%s-%s-%d",
netdev->name, "rx", ri++);
- }
- else if(handler == &ixgbe_msix_clean_tx) {
+ } else if (handler == &ixgbe_msix_clean_tx) {
sprintf(adapter->name[vector], "%s-%s-%d",
netdev->name, "tx", ti++);
- }
- else
+ } else
sprintf(adapter->name[vector], "%s-%s-%d",
netdev->name, "TxRx", vector);
err = request_irq(adapter->msix_entries[vector].vector,
- handler, 0, adapter->name[vector],
- adapter->q_vector[vector]);
+ handler, 0, adapter->name[vector],
+ adapter->q_vector[vector]);
if (err) {
e_err(probe, "request_irq failed for MSIX interrupt "
"Error: %d\n", err);
@@ -2162,7 +2179,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
sprintf(adapter->name[vector], "%s:lsc", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+ ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
if (err) {
e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
goto free_queue_irqs;
@@ -2173,7 +2190,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
free_queue_irqs:
for (i = vector - 1; i >= 0; i--)
free_irq(adapter->msix_entries[--vector].vector,
- adapter->q_vector[i]);
+ adapter->q_vector[i]);
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
@@ -2191,13 +2208,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
- q_vector->tx_itr,
- tx_ring->total_packets,
- tx_ring->total_bytes);
+ q_vector->tx_itr,
+ tx_ring->total_packets,
+ tx_ring->total_bytes);
q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
- q_vector->rx_itr,
- rx_ring->total_packets,
- rx_ring->total_bytes);
+ q_vector->rx_itr,
+ rx_ring->total_packets,
+ rx_ring->total_bytes);
current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
@@ -2231,7 +2248,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
-static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
+static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
+ bool flush)
{
u32 mask;
@@ -2252,8 +2270,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
mask |= IXGBE_EIMS_FLOW_DIR;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
- ixgbe_irq_enable_queues(adapter, ~0);
- IXGBE_WRITE_FLUSH(&adapter->hw);
+ if (queues)
+ ixgbe_irq_enable_queues(adapter, ~0);
+ if (flush)
+ IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->num_vfs > 32) {
u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
@@ -2275,7 +2295,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
u32 eicr;
/*
- * Workaround for silicon errata. Mask the interrupts
+ * Workaround for silicon errata on 82598. Mask the interrupts
* before the read of EICR.
*/
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
@@ -2284,10 +2304,15 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
* therefore no explict interrupt disable is necessary */
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
if (!eicr) {
- /* shared interrupt alert!
+ /*
+ * shared interrupt alert!
* make sure interrupts are enabled because the read will
- * have disabled interrupts due to EIAM */
- ixgbe_irq_enable(adapter);
+ * have disabled interrupts due to EIAM
+ * finish the workaround of silicon errata on 82598. Unmask
+ * the interrupt that we masked before the EICR read.
+ */
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter, true, true);
return IRQ_NONE; /* Not our interrupt */
}
@@ -2311,6 +2336,14 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
__napi_schedule(&(q_vector->napi));
}
+ /*
+ * re-enable link(maybe) and non-queue interrupts, no flush.
+ * ixgbe_poll will re-enable the queue interrupts
+ */
+
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter, false, false);
+
return IRQ_HANDLED;
}
@@ -2343,10 +2376,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
err = ixgbe_request_msix_irqs(adapter);
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
- netdev->name, netdev);
+ netdev->name, netdev);
} else {
err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
- netdev->name, netdev);
+ netdev->name, netdev);
}
if (err)
@@ -2370,7 +2403,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
i--;
for (; i >= 0; i--) {
free_irq(adapter->msix_entries[i].vector,
- adapter->q_vector[i]);
+ adapter->q_vector[i]);
}
ixgbe_reset_q_vectors(adapter);
@@ -2413,7 +2446,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
- EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
+ EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
ixgbe_set_ivar(adapter, 0, 0, 0);
ixgbe_set_ivar(adapter, 1, 0, 0);
@@ -2425,95 +2458,140 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
+ * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
* @adapter: board private structure
+ * @ring: structure containing ring specific data
*
- * Configure the Tx unit of the MAC after a reset.
+ * Configure the Tx descriptor ring after a reset.
**/
-static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
+void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
{
- u64 tdba;
struct ixgbe_hw *hw = &adapter->hw;
- u32 i, j, tdlen, txctrl;
+ u64 tdba = ring->dma;
+ int wait_loop = 10;
+ u32 txdctl;
+ u16 reg_idx = ring->reg_idx;
- /* Setup the HW Tx Head and Tail descriptor pointers */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = adapter->tx_ring[i];
- j = ring->reg_idx;
- tdba = ring->dma;
- tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
- IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
- (tdba & DMA_BIT_MASK(32)));
- IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
- IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
- adapter->tx_ring[i]->head = IXGBE_TDH(j);
- adapter->tx_ring[i]->tail = IXGBE_TDT(j);
- /*
- * Disable Tx Head Writeback RO bit, since this hoses
- * bookkeeping if things aren't delivered in order.
- */
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
- break;
- case ixgbe_mac_82599EB:
- default:
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
- break;
- }
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
- break;
- case ixgbe_mac_82599EB:
- default:
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
- break;
- }
+ /* disable queue to avoid issues while updating state */
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
+ txdctl & ~IXGBE_TXDCTL_ENABLE);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
+ (tdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_tx_desc));
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
+ ring->head = IXGBE_TDH(reg_idx);
+ ring->tail = IXGBE_TDT(reg_idx);
+
+ /* configure fetching thresholds */
+ if (adapter->rx_itr_setting == 0) {
+ /* cannot set wthresh when itr==0 */
+ txdctl &= ~0x007F0000;
+ } else {
+ /* enable WTHRESH=8 descriptors, to encourage burst writeback */
+ txdctl |= (8 << 16);
+ }
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ /* PThresh workaround for Tx hang with DFP enabled. */
+ txdctl |= 32;
}
- if (hw->mac.type == ixgbe_mac_82599EB) {
- u32 rttdcs;
- u32 mask;
+ /* reinitialize flowdirector state */
+ set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
- /* disable the arbiter while setting MTQC */
- rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
- rttdcs |= IXGBE_RTTDCS_ARBDIS;
- IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+ /* enable queue */
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
- /* set transmit pool layout */
- mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
- switch (adapter->flags & mask) {
+ /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
- case (IXGBE_FLAG_SRIOV_ENABLED):
- IXGBE_WRITE_REG(hw, IXGBE_MTQC,
- (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
- break;
+ /* poll to verify queue is enabled */
+ do {
+ msleep(1);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
+}
- case (IXGBE_FLAG_DCB_ENABLED):
- /* We enable 8 traffic classes, DCB only */
- IXGBE_WRITE_REG(hw, IXGBE_MTQC,
- (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
- break;
+static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rttdcs;
+ u32 mask;
- default:
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
- break;
- }
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ /* disable the arbiter while setting MTQC */
+ rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ rttdcs |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
- /* re-eable the arbiter */
- rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
- IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+ /* set transmit pool layout */
+ mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
+ switch (adapter->flags & mask) {
+
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
+ break;
+
+ case (IXGBE_FLAG_DCB_ENABLED):
+ /* We enable 8 traffic classes, DCB only */
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
+ break;
+
+ default:
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+ break;
+ }
+
+ /* re-enable the arbiter */
+ rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+}
+
+/**
+ * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 dmatxctl;
+ u32 i;
+
+ ixgbe_setup_mtqc(adapter);
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ /* DMATXCTL.EN must be before Tx queues are enabled */
+ dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ dmatxctl |= IXGBE_DMATXCTL_TE;
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
}
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
}
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring)
{
u32 srrctl;
int index;
@@ -2529,6 +2607,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+ if (adapter->num_vfs)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK;
@@ -2549,20 +2629,46 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
}
-static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
+static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
{
- u32 mrqc = 0;
+ struct ixgbe_hw *hw = &adapter->hw;
+ static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
+ 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
+ 0x6A3E67EA, 0x14364D17, 0x3BED200D};
+ u32 mrqc = 0, reta = 0;
+ u32 rxcsum;
+ int i, j;
int mask;
- if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
- return mrqc;
+ /* Fill out hash function seeds */
+ for (i = 0; i < 10; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
- mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
+ /* Fill out redirection table */
+ for (i = 0, j = 0; i < 128; i++, j++) {
+ if (j == adapter->ring_feature[RING_F_RSS].indices)
+ j = 0;
+ /* reta = 4-byte sliding window of
+ * 0x00..(indices-1)(indices-1)00..etc. */
+ reta = (reta << 8) | (j * 0x11);
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ }
+
+ /* Disable indicating checksum in descriptor, enables RSS hash */
+ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+ rxcsum |= IXGBE_RXCSUM_PCSD;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
+ else
+ mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
#ifdef CONFIG_IXGBE_DCB
- | IXGBE_FLAG_DCB_ENABLED
+ | IXGBE_FLAG_DCB_ENABLED
#endif
- | IXGBE_FLAG_SRIOV_ENABLED
- );
+ | IXGBE_FLAG_SRIOV_ENABLED
+ );
switch (mask) {
case (IXGBE_FLAG_RSS_ENABLED):
@@ -2580,7 +2686,13 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
break;
}
- return mrqc;
+ /* Perform hash on these packet types */
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
+ | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+ | IXGBE_MRQC_RSS_FIELD_IPV6
+ | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
}
/**
@@ -2588,25 +2700,26 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
* @adapter: address of board private structure
* @index: index of ring to set
**/
-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
{
- struct ixgbe_ring *rx_ring;
struct ixgbe_hw *hw = &adapter->hw;
- int j;
u32 rscctrl;
int rx_buf_len;
+ u16 reg_idx = ring->reg_idx;
- rx_ring = adapter->rx_ring[index];
- j = rx_ring->reg_idx;
- rx_buf_len = rx_ring->rx_buf_len;
- rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ return;
+
+ rx_buf_len = ring->rx_buf_len;
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
rscctrl |= IXGBE_RSCCTL_RSCEN;
/*
* we must limit the number of descriptors so that the
* total size of max desc * buf_len is not greater
* than 65535
*/
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
#if (MAX_SKB_FRAGS > 16)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
#elif (MAX_SKB_FRAGS > 8)
@@ -2624,31 +2737,181 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
else
rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
}
- IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
}
/**
- * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
- * @adapter: board private structure
+ * ixgbe_set_uta - Set unicast filter table address
+ * @adapter: board private structure
*
- * Configure the Rx unit of the MAC after a reset.
+ * The unicast table address is a register array of 32-bit registers.
+ * The table is meant to be used in a way similar to how the MTA is used
+ * however due to certain limitations in the hardware it is necessary to
+ * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
+ * enable bit to allow vlan tag stripping when promiscuous mode is enabled
**/
-static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
+static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ /* The UTA table only exists on 82599 hardware and newer */
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return;
+
+ /* we only need to do this if VMDq is enabled */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return;
+
+ for (i = 0; i < 128; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
+}
+
+#define IXGBE_MAX_RX_DESC_POLL 10
+static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int reg_idx = ring->reg_idx;
+ int wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+
+ /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ do {
+ msleep(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop) {
+ e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
+ "the polling period\n", reg_idx);
+ }
+}
+
+void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+ u16 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
+ rxdctl & ~IXGBE_RXDCTL_ENABLE);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_rx_desc));
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
+ ring->head = IXGBE_RDH(reg_idx);
+ ring->tail = IXGBE_RDT(reg_idx);
+
+ ixgbe_configure_srrctl(adapter, ring);
+ ixgbe_configure_rscctl(adapter, ring);
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ /*
+ * enable cache line friendly hardware writes:
+ * PTHRESH=32 descriptors (half the internal cache),
+ * this also removes ugly rx_no_buffer_count increment
+ * HTHRESH=4 descriptors (to minimize latency on fetch)
+ * WTHRESH=8 burst writeback up to two cache lines
+ */
+ rxdctl &= ~0x3FFFFF;
+ rxdctl |= 0x080420;
+ }
+
+ /* enable receive descriptor ring */
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+ ixgbe_rx_desc_queue_enable(adapter, ring);
+ ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
+}
+
+static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int p;
+
+ /* PSRTYPE must be initialized in non 82598 adapters */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_L2HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
+ psrtype |= (adapter->num_rx_queues_per_pool << 29);
+
+ for (p = 0; p < adapter->num_rx_pools; p++)
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
+ psrtype);
+}
+
+static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gcr_ext;
+ u32 vt_reg_bits;
+ u32 reg_offset, vf_shift;
+ u32 vmdctl;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return;
+
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
+ vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+
+ vf_shift = adapter->num_vfs % 32;
+ reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
+
+ /* Enable only the PF's pool for Tx/Rx */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+
+ /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
+ hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+
+ /*
+ * Set up VF register offsets for selected VT Mode,
+ * i.e. 32 or 64 VFs for SR-IOV
+ */
+ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
+ gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+
+ /* enable Tx loopback for VF/PF communication */
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+}
+
+static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
{
- u64 rdba;
struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbe_ring *rx_ring;
struct net_device *netdev = adapter->netdev;
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
- int i, j;
- u32 rdlen, rxctrl, rxcsum;
- static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
- 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
- 0x6A3E67EA, 0x14364D17, 0x3BED200D};
- u32 fctrl, hlreg0;
- u32 reta = 0, mrqc = 0;
- u32 rdrxctl;
int rx_buf_len;
+ struct ixgbe_ring *rx_ring;
+ int i;
+ u32 mhadd, hlreg0;
/* Decide whether to use packet split mode or not */
/* Do not use packet split if we're in SR-IOV Mode */
@@ -2658,62 +2921,40 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
rx_buf_len = IXGBE_RX_HDR_SIZE;
- if (hw->mac.type == ixgbe_mac_82599EB) {
- /* PSRTYPE must be initialized in 82599 */
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR |
- IXGBE_PSRTYPE_L2HDR;
- IXGBE_WRITE_REG(hw,
- IXGBE_PSRTYPE(adapter->num_vfs),
- psrtype);
- }
} else {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
(netdev->mtu <= ETH_DATA_LEN))
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else
- rx_buf_len = ALIGN(max_frame, 1024);
+ rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
}
- fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
- fctrl |= IXGBE_FCTRL_BAM;
- fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
- fctrl |= IXGBE_FCTRL_PMCF;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
+#ifdef IXGBE_FCOE
+ /* adjust max frame to be able to do baby jumbo for FCoE */
+ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
+ max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+
+#endif /* IXGBE_FCOE */
+ mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+ if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
+ mhadd &= ~IXGBE_MHADD_MFS_MASK;
+ mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+ }
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (adapter->netdev->mtu <= ETH_DATA_LEN)
- hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
- else
- hlreg0 |= IXGBE_HLREG0_JUMBOEN;
-#ifdef IXGBE_FCOE
- if (netdev->features & NETIF_F_FCOE_MTU)
- hlreg0 |= IXGBE_HLREG0_JUMBOEN;
-#endif
+ /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
+ hlreg0 |= IXGBE_HLREG0_JUMBOEN;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
- /* disable receives while setting up the descriptors */
- rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
-
/*
* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
rx_ring = adapter->rx_ring[i];
- rdba = rx_ring->dma;
- j = rx_ring->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
- IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
- IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
- rx_ring->head = IXGBE_RDH(j);
- rx_ring->tail = IXGBE_RDT(j);
rx_ring->rx_buf_len = rx_buf_len;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
@@ -2729,15 +2970,21 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
rx_ring->rx_buf_len =
- IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
}
}
-
#endif /* IXGBE_FCOE */
- ixgbe_configure_srrctl(adapter, rx_ring);
}
- if (hw->mac.type == ixgbe_mac_82598EB) {
+}
+
+static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
/*
* For VMDq support of different descriptor types or
* buffer sizes through the use of multiple SRRCTL
@@ -2748,110 +2995,66 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* effects of setting this bit are only that SRRCTL must be
* fully programmed [0..15]
*/
- rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
rdrxctl |= IXGBE_RDRXCTL_MVMEN;
- IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+ break;
+ case ixgbe_mac_82599EB:
+ /* Disable RSC for ACK packets */
+ IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
+ (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
+ rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+ /* hardware requires some bits to be set by default */
+ rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
+ rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+ break;
+ default:
+ /* We should do nothing since we don't know this hardware */
+ return;
}
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- u32 vt_reg_bits;
- u32 reg_offset, vf_shift;
- u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
- | IXGBE_VT_CTL_REPLEN;
- vt_reg_bits |= (adapter->num_vfs <<
- IXGBE_VT_CTL_POOL_SHIFT);
- IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
-
- vf_shift = adapter->num_vfs % 32;
- reg_offset = adapter->num_vfs / 32;
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
- /* Enable only the PF's pool for Tx/Rx */
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
- ixgbe_set_vmolr(hw, adapter->num_vfs, true);
- }
-
- /* Program MRQC for the distribution of queues */
- mrqc = ixgbe_setup_mrqc(adapter);
-
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- /* Fill out redirection table */
- for (i = 0, j = 0; i < 128; i++, j++) {
- if (j == adapter->ring_feature[RING_F_RSS].indices)
- j = 0;
- /* reta = 4-byte sliding window of
- * 0x00..(indices-1)(indices-1)00..etc. */
- reta = (reta << 8) | (j * 0x11);
- if ((i & 3) == 3)
- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
- }
-
- /* Fill out hash function seeds */
- for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
-
- if (hw->mac.type == ixgbe_mac_82598EB)
- mrqc |= IXGBE_MRQC_RSSEN;
- /* Perform hash on these packet types */
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
- | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
- | IXGBE_MRQC_RSS_FIELD_IPV6
- | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
- }
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+}
- if (adapter->num_vfs) {
- u32 reg;
+/**
+ * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ u32 rxctrl;
- /* Map PF MAC address in RAR Entry 0 to first pool
- * following VFs */
- hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+ /* disable receives while setting up the descriptors */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
- /* Set up VF register offsets for selected VT Mode, i.e.
- * 64 VFs for SR-IOV */
- reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
- reg |= IXGBE_GCR_EXT_SRIOV;
- IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
- }
+ ixgbe_setup_psrtype(adapter);
+ ixgbe_setup_rdrxctl(adapter);
- rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+ /* Program registers for the distribution of queues */
+ ixgbe_setup_mrqc(adapter);
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
- adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
- /* Disable indicating checksum in descriptor, enables
- * RSS hash */
- rxcsum |= IXGBE_RXCSUM_PCSD;
- }
- if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
- /* Enable IPv4 payload checksum for UDP fragments
- * if PCSD is not set */
- rxcsum |= IXGBE_RXCSUM_IPPCSE;
- }
+ ixgbe_set_uta(adapter);
- IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+ /* set_rx_buffer_len must be called before ring initialization */
+ ixgbe_set_rx_buffer_len(adapter);
- if (hw->mac.type == ixgbe_mac_82599EB) {
- rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
- rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
- IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
- }
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- /* Enable 82599 HW-RSC */
- for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_configure_rscctl(adapter, i);
+ /* disable drop enable for 82598 parts */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ rxctrl |= IXGBE_RXCTRL_DMBYPS;
- /* Disable RSC for ACK packets */
- IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
- (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
- }
+ /* enable all receives */
+ rxctrl |= IXGBE_RXCTRL_RXEN;
+ hw->mac.ops.enable_rx_dma(hw, rxctrl);
}
static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -2876,7 +3079,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
vlan_group_set_device(adapter->vlgrp, vid, NULL);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable(adapter);
+ ixgbe_irq_enable(adapter, true, true);
/* remove VID from filter table */
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
@@ -2955,7 +3158,7 @@ static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
}
static void ixgbe_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
+ struct vlan_group *grp)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2973,7 +3176,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
ixgbe_vlan_rx_add_vid(netdev, 0);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable(adapter);
+ ixgbe_irq_enable(adapter, true, true);
}
static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
@@ -3052,6 +3255,11 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ /* set all bits that we expect to always be set */
+ fctrl |= IXGBE_FCTRL_BAM;
+ fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
+ fctrl |= IXGBE_FCTRL_PMCF;
+
/* clear the bits we are changing the status of */
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
@@ -3157,7 +3365,15 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
u32 txdctl;
int i, j;
- ixgbe_dcb_check_config(&adapter->dcb_cfg);
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ netif_set_gso_max_size(adapter->netdev, 65536);
+ return;
+ }
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ netif_set_gso_max_size(adapter->netdev, 32768);
+
ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
@@ -3188,17 +3404,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_restore_vlan(adapter);
#ifdef CONFIG_IXGBE_DCB
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- if (hw->mac.type == ixgbe_mac_82598EB)
- netif_set_gso_max_size(netdev, 32768);
- else
- netif_set_gso_max_size(netdev, 65536);
- ixgbe_configure_dcb(adapter);
- } else {
- netif_set_gso_max_size(netdev, 65536);
- }
-#else
- netif_set_gso_max_size(netdev, 65536);
+ ixgbe_configure_dcb(adapter);
#endif
#ifdef IXGBE_FCOE
@@ -3209,17 +3415,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->atr_sample_rate =
- adapter->atr_sample_rate;
+ adapter->atr_sample_rate;
ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
}
+ ixgbe_configure_virtualization(adapter);
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
- for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
- (adapter->rx_ring[i]->count - 1));
}
static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -3290,7 +3494,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
goto link_cfg_out;
if (hw->mac.ops.get_link_capabilities)
- ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
+ ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
+ &negotiation);
if (ret)
goto link_cfg_out;
@@ -3300,62 +3505,15 @@ link_cfg_out:
return ret;
}
-#define IXGBE_MAX_RX_DESC_POLL 10
-static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
- int rxr)
+static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
{
- int j = adapter->rx_ring[rxr]->reg_idx;
- int k;
-
- for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
- if (IXGBE_READ_REG(&adapter->hw,
- IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
- break;
- else
- msleep(1);
- }
- if (k >= IXGBE_MAX_RX_DESC_POLL) {
- e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
- "the polling period\n", rxr);
- }
- ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr]->count - 1));
-}
-
-static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- int i, j = 0;
- int num_rx_rings = adapter->num_rx_queues;
- int err;
- int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
- u32 txdctl, rxdctl, mhadd;
- u32 dmatxctl;
- u32 gpie;
- u32 ctrl_ext;
-
- ixgbe_get_hw_control(adapter);
-
- if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
- (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
- IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
- } else {
- /* MSI only */
- gpie = 0;
- }
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- gpie &= ~IXGBE_GPIE_VTMODE_MASK;
- gpie |= IXGBE_GPIE_VTMODE_64;
- }
- /* XXX: to interrupt immediately for EICS writes, enable this */
- /* gpie |= IXGBE_GPIE_EIMEN; */
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
- }
+ u32 gpie = 0;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
+ IXGBE_GPIE_OCD;
+ gpie |= IXGBE_GPIE_EIAME;
/*
* use EIAM to auto-mask when MSI-X interrupt is asserted
* this saves a register write for every interrupt
@@ -3376,98 +3534,33 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
}
- /* Enable Thermal over heat sensor interrupt */
- if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
- gpie |= IXGBE_SDP0_GPIEN;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+ /* XXX: to interrupt immediately for EICS writes, enable this */
+ /* gpie |= IXGBE_GPIE_EIMEN; */
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie |= IXGBE_GPIE_VTMODE_64;
}
- /* Enable fan failure interrupt if media type is copper */
- if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ /* Enable fan failure interrupt */
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
gpie |= IXGBE_SDP1_GPIEN;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
- }
- if (hw->mac.type == ixgbe_mac_82599EB) {
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ if (hw->mac.type == ixgbe_mac_82599EB)
gpie |= IXGBE_SDP1_GPIEN;
gpie |= IXGBE_SDP2_GPIEN;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
- }
-
-#ifdef IXGBE_FCOE
- /* adjust max frame to be able to do baby jumbo for FCoE */
- if ((netdev->features & NETIF_F_FCOE_MTU) &&
- (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
- max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
-#endif /* IXGBE_FCOE */
- mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
- if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
- mhadd &= ~IXGBE_MHADD_MFS_MASK;
- mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
-
- IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
- }
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i]->reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- if (adapter->rx_itr_setting == 0) {
- /* cannot set wthresh when itr==0 */
- txdctl &= ~0x007F0000;
- } else {
- /* enable WTHRESH=8 descriptors, to encourage burst writeback */
- txdctl |= (8 << 16);
- }
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
- }
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+}
- if (hw->mac.type == ixgbe_mac_82599EB) {
- /* DMATXCTL.EN must be set after all Tx queue config is done */
- dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
- dmatxctl |= IXGBE_DMATXCTL_TE;
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
- }
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i]->reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
- if (hw->mac.type == ixgbe_mac_82599EB) {
- int wait_loop = 10;
- /* poll for Tx Enable ready */
- do {
- msleep(1);
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- } while (--wait_loop &&
- !(txdctl & IXGBE_TXDCTL_ENABLE));
- if (!wait_loop)
- e_err(drv, "Could not enable Tx Queue %d\n", j);
- }
- }
+static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+ u32 ctrl_ext;
- for (i = 0; i < num_rx_rings; i++) {
- j = adapter->rx_ring[i]->reg_idx;
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
- /* enable PTHRESH=32 descriptors (half the internal cache)
- * and HTHRESH=0 descriptors (to minimize latency on fetch),
- * this also removes a pesky rx_no_buffer_count increment */
- rxdctl |= 0x0020;
- rxdctl |= IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
- if (hw->mac.type == ixgbe_mac_82599EB)
- ixgbe_rx_desc_queue_enable(adapter, i);
- }
- /* enable all receives */
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- if (hw->mac.type == ixgbe_mac_82598EB)
- rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
- else
- rxdctl |= IXGBE_RXCTRL_RXEN;
- hw->mac.ops.enable_rx_dma(hw, rxdctl);
+ ixgbe_get_hw_control(adapter);
+ ixgbe_setup_gpie(adapter);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
ixgbe_configure_msix(adapter);
@@ -3483,8 +3576,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
/* clear any pending interrupts, may auto mask */
IXGBE_READ_REG(hw, IXGBE_EICR);
-
- ixgbe_irq_enable(adapter);
+ ixgbe_irq_enable(adapter, true, true);
/*
* If this adapter has a fan, check to see if we had a failure
@@ -3525,12 +3617,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
e_err(probe, "link_config FAILED %d\n", err);
}
- for (i = 0; i < adapter->num_tx_queues; i++)
- set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i]->reinit_state));
-
/* enable transmits */
- netif_tx_start_all_queues(netdev);
+ netif_tx_start_all_queues(adapter->netdev);
/* bring the link up in the watchdog, this could race with our first
* link up interrupt but shouldn't be a problem */
@@ -3609,21 +3697,24 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
* @rx_ring: ring to free buffers from
**/
static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring)
{
struct pci_dev *pdev = adapter->pdev;
unsigned long size;
unsigned int i;
- /* Free all the Rx ring sk_buffs */
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_buffer_info)
+ return;
+ /* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct ixgbe_rx_buffer *rx_buffer_info;
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
- rx_ring->rx_buf_len,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
}
@@ -3635,7 +3726,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
if (IXGBE_RSC_CB(this)->delay_unmap) {
dma_unmap_single(&pdev->dev,
IXGBE_RSC_CB(this)->dma,
- rx_ring->rx_buf_len,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
IXGBE_RSC_CB(this)->dma = 0;
IXGBE_RSC_CB(skb)->delay_unmap = false;
@@ -3677,14 +3768,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
* @tx_ring: ring to be cleaned
**/
static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring)
{
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned long size;
unsigned int i;
- /* Free all the Tx ring sk_buffs */
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_buffer_info)
+ return;
+ /* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
@@ -3736,6 +3830,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
u32 rxctrl;
u32 txdctl;
int i, j;
+ int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
/* signal that we are down to the interrupt handler */
set_bit(__IXGBE_DOWN, &adapter->state);
@@ -3774,6 +3869,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_napi_disable_all(adapter);
+ /* Cleanup the affinity_hint CPU mask memory and callback */
+ for (i = 0; i < num_q_vectors; i++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL);
+ /* release the CPU mask memory */
+ free_cpumask_var(q_vector->affinity_mask);
+ }
+
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
cancel_work_sync(&adapter->fdir_reinit_task);
@@ -3786,13 +3890,13 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
- (txdctl & ~IXGBE_TXDCTL_ENABLE));
+ (txdctl & ~IXGBE_TXDCTL_ENABLE));
}
/* Disable the Tx DMA engine on 82599 */
if (hw->mac.type == ixgbe_mac_82599EB)
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
- (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
- ~IXGBE_DMATXCTL_TE));
+ (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
+ ~IXGBE_DMATXCTL_TE));
/* power down the optics */
if (hw->phy.multispeed_fiber)
@@ -3822,7 +3926,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
static int ixgbe_poll(struct napi_struct *napi, int budget)
{
struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
+ container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
int tx_clean_complete, work_done = 0;
@@ -3932,7 +4036,7 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
* Rx load across CPUs using RSS.
*
**/
-static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
+static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
{
bool ret = false;
struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
@@ -4024,7 +4128,7 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
* fallthrough conditions.
*
**/
-static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
+static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
/* Start with base case */
adapter->num_rx_queues = 1;
@@ -4033,7 +4137,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_queues_per_pool = 1;
if (ixgbe_set_sriov_queues(adapter))
- return;
+ goto done;
#ifdef IXGBE_FCOE
if (ixgbe_set_fcoe_queues(adapter))
@@ -4056,12 +4160,14 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
adapter->num_tx_queues = 1;
done:
- /* Notify the stack of the (possibly) reduced Tx Queue count. */
+ /* Notify the stack of the (possibly) reduced queue counts. */
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
+ return netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
}
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
- int vectors)
+ int vectors)
{
int err, vector_threshold;
@@ -4080,7 +4186,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
*/
while (vectors >= vector_threshold) {
err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
+ vectors);
if (!err) /* Success in acquiring all requested vectors. */
break;
else if (err < 0)
@@ -4107,7 +4213,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* vectors we were allocated.
*/
adapter->num_msix_vectors = min(vectors,
- adapter->max_msix_q_vectors + NON_Q_VECTORS);
+ adapter->max_msix_q_vectors + NON_Q_VECTORS);
}
}
@@ -4178,12 +4284,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
}
for ( ; i < 5; i++) {
adapter->tx_ring[i]->reg_idx =
- ((i + 2) << 4);
+ ((i + 2) << 4);
adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < dcb_i; i++) {
adapter->tx_ring[i]->reg_idx =
- ((i + 8) << 3);
+ ((i + 8) << 3);
adapter->rx_ring[i]->reg_idx = i << 4;
}
@@ -4226,7 +4332,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
* Cache the descriptor ring offsets for Flow Director to the assigned rings.
*
**/
-static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
+static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
{
int i;
bool ret = false;
@@ -4383,7 +4489,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
adapter->node = cur_node;
}
ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
- adapter->node);
+ adapter->node);
if (!ring)
ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
if (!ring)
@@ -4407,7 +4513,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
adapter->node = cur_node;
}
ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
- adapter->node);
+ adapter->node);
if (!ring)
ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
if (!ring)
@@ -4453,7 +4559,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
* (roughly) the same number of vectors as there are CPU's.
*/
v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
- (int)num_online_cpus()) + NON_Q_VECTORS;
+ (int)num_online_cpus()) + NON_Q_VECTORS;
/*
* At the same time, hardware can only support a maximum of
@@ -4467,7 +4573,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */
adapter->msix_entries = kcalloc(v_budget,
- sizeof(struct msix_entry), GFP_KERNEL);
+ sizeof(struct msix_entry), GFP_KERNEL);
if (adapter->msix_entries) {
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
@@ -4486,7 +4592,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
ixgbe_disable_sriov(adapter);
- ixgbe_set_num_queues(adapter);
+ err = ixgbe_set_num_queues(adapter);
+ if (err)
+ return err;
err = pci_enable_msi(adapter->pdev);
if (!err) {
@@ -4529,10 +4637,10 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
- GFP_KERNEL, adapter->node);
+ GFP_KERNEL, adapter->node);
if (!q_vector)
q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
@@ -4611,7 +4719,9 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
int err;
/* Number of supported queues */
- ixgbe_set_num_queues(adapter);
+ err = ixgbe_set_num_queues(adapter);
+ if (err)
+ return err;
err = ixgbe_set_interrupt_capability(adapter);
if (err) {
@@ -4693,8 +4803,8 @@ static void ixgbe_sfp_timer(unsigned long data)
static void ixgbe_sfp_task(struct work_struct *work)
{
struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- sfp_task);
+ struct ixgbe_adapter,
+ sfp_task);
struct ixgbe_hw *hw = &adapter->hw;
if ((hw->phy.type == ixgbe_phy_nl) &&
@@ -4719,7 +4829,7 @@ static void ixgbe_sfp_task(struct work_struct *work)
reschedule:
if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
mod_timer(&adapter->sfp_timer,
- round_jiffies(jiffies + (2 * HZ)));
+ round_jiffies(jiffies + (2 * HZ)));
}
/**
@@ -4775,7 +4885,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->atr_sample_rate = 20;
}
adapter->ring_feature[RING_F_FDIR].indices =
- IXGBE_MAX_FDIR_INDICES;
+ IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = 0;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
@@ -4806,7 +4916,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.round_robin_enable = false;
adapter->dcb_set_bitmap = 0x00;
ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
- adapter->ring_feature[RING_F_DCB].indices);
+ adapter->ring_feature[RING_F_DCB].indices);
#endif
@@ -4861,7 +4971,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
* Return 0 on success, negative on failure
**/
int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring)
{
struct pci_dev *pdev = adapter->pdev;
int size;
@@ -4928,7 +5038,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
* Returns 0 on success, negative on failure
**/
int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring)
{
struct pci_dev *pdev = adapter->pdev;
int size;
@@ -5001,7 +5111,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
* Free all transmit software resources
**/
void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring)
{
struct pci_dev *pdev = adapter->pdev;
@@ -5039,7 +5149,7 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
* Free all receive software resources
**/
void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring)
{
struct pci_dev *pdev = adapter->pdev;
@@ -5333,6 +5443,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u64 total_mpc = 0;
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
u64 non_eop_descs = 0, restart_queue = 0;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5343,7 +5454,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u64 rsc_flush = 0;
for (i = 0; i < 16; i++)
adapter->hw_rx_no_dma_resources +=
- IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) {
rsc_count += adapter->rx_ring[i]->rsc_count;
rsc_flush += adapter->rx_ring[i]->rsc_flush;
@@ -5361,119 +5472,118 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
adapter->non_eop_descs = non_eop_descs;
- adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+ hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) {
/* for packet buffers not used, the register should read 0 */
mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
missed_rx += mpc;
- adapter->stats.mpc[i] += mpc;
- total_mpc += adapter->stats.mpc[i];
+ hwstats->mpc[i] += mpc;
+ total_mpc += hwstats->mpc[i];
if (hw->mac.type == ixgbe_mac_82598EB)
- adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
- adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
- adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
- adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
if (hw->mac.type == ixgbe_mac_82599EB) {
- adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXONRXCNT(i));
- adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXOFFRXCNT(i));
- adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ hwstats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ hwstats->pxoffrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
} else {
- adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXONRXC(i));
- adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXOFFRXC(i));
+ hwstats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ hwstats->pxoffrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
}
- adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXONTXC(i));
- adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXOFFTXC(i));
+ hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+ hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
}
- adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+ hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
/* work around hardware counting issue */
- adapter->stats.gprc -= missed_rx;
+ hwstats->gprc -= missed_rx;
/* 82598 hardware only has a 32 bit counter in the high register */
if (hw->mac.type == ixgbe_mac_82599EB) {
u64 tmp;
- adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
- tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
- adapter->stats.gorc += (tmp << 32);
- adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
- tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
- adapter->stats.gotc += (tmp << 32);
- adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
- IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
- adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
- adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
- adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
- adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
+ tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
+ /* 4 high bits of GORC */
+ hwstats->gorc += (tmp << 32);
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
+ tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
+ /* 4 high bits of GOTC */
+ hwstats->gotc += (tmp << 32);
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
+ IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+ hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
#ifdef IXGBE_FCOE
- adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
- adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
- adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
- adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
- adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
- adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+ hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+ hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+ hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+ hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+ hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+ hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
#endif /* IXGBE_FCOE */
} else {
- adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
- adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
- adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
- adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
- adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
}
bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
- adapter->stats.bprc += bprc;
- adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+ hwstats->bprc += bprc;
+ hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
if (hw->mac.type == ixgbe_mac_82598EB)
- adapter->stats.mprc -= bprc;
- adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
- adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
- adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
- adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
- adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
- adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
- adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
- adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+ hwstats->mprc -= bprc;
+ hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+ hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+ hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+ hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+ hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+ hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+ hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
- adapter->stats.lxontxc += lxon;
+ hwstats->lxontxc += lxon;
lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
- adapter->stats.lxofftxc += lxoff;
- adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
- adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
- adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+ hwstats->lxofftxc += lxoff;
+ hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+ hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
+ hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
/*
* 82598 errata - tx of flow control packets is included in tx counters
*/
xon_off_tot = lxon + lxoff;
- adapter->stats.gptc -= xon_off_tot;
- adapter->stats.mptc -= xon_off_tot;
- adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
- adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
- adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
- adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
- adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
- adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
- adapter->stats.ptc64 -= xon_off_tot;
- adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
- adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
- adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
- adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
- adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
- adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+ hwstats->gptc -= xon_off_tot;
+ hwstats->mptc -= xon_off_tot;
+ hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
+ hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+ hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+ hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+ hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+ hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+ hwstats->ptc64 -= xon_off_tot;
+ hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+ hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+ hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+ hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+ hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
/* Fill out the OS statistics structure */
- netdev->stats.multicast = adapter->stats.mprc;
+ netdev->stats.multicast = hwstats->mprc;
/* Rx Errors */
- netdev->stats.rx_errors = adapter->stats.crcerrs +
- adapter->stats.rlec;
+ netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
netdev->stats.rx_dropped = 0;
- netdev->stats.rx_length_errors = adapter->stats.rlec;
- netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+ netdev->stats.rx_length_errors = hwstats->rlec;
+ netdev->stats.rx_crc_errors = hwstats->crcerrs;
netdev->stats.rx_missed_errors = total_mpc;
}
@@ -5532,8 +5642,8 @@ watchdog_short_circuit:
static void ixgbe_multispeed_fiber_task(struct work_struct *work)
{
struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- multispeed_fiber_task);
+ struct ixgbe_adapter,
+ multispeed_fiber_task);
struct ixgbe_hw *hw = &adapter->hw;
u32 autoneg;
bool negotiation;
@@ -5556,8 +5666,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
static void ixgbe_sfp_config_module_task(struct work_struct *work)
{
struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- sfp_config_module_task);
+ struct ixgbe_adapter,
+ sfp_config_module_task);
struct ixgbe_hw *hw = &adapter->hw;
u32 err;
@@ -5590,15 +5700,15 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
static void ixgbe_fdir_reinit_task(struct work_struct *work)
{
struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- fdir_reinit_task);
+ struct ixgbe_adapter,
+ fdir_reinit_task);
struct ixgbe_hw *hw = &adapter->hw;
int i;
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i]->reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
} else {
e_err(probe, "failed to finish FDIR re-initialization, "
"ignored adding FDIR ATR filters\n");
@@ -5616,8 +5726,8 @@ static DEFINE_MUTEX(ixgbe_watchdog_lock);
static void ixgbe_watchdog_task(struct work_struct *work)
{
struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- watchdog_task);
+ struct ixgbe_adapter,
+ watchdog_task);
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed;
@@ -5648,7 +5758,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
if (link_up ||
time_after(jiffies, (adapter->link_check_timeout +
- IXGBE_TRY_LINK_TIMEOUT))) {
+ IXGBE_TRY_LINK_TIMEOUT))) {
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
}
@@ -5719,8 +5829,8 @@ static void ixgbe_watchdog_task(struct work_struct *work)
}
static int ixgbe_tso(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, u8 *hdr_len)
+ struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, u8 *hdr_len)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5743,28 +5853,28 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
iph->tot_len = 0;
iph->check = 0;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
}
i = tx_ring->next_to_use;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+ context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
/* VLAN MACLEN IPLEN */
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
vlan_macip_lens |=
(tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
vlan_macip_lens |= ((skb_network_offset(skb)) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
+ IXGBE_ADVTXD_MACLEN_SHIFT);
*hdr_len += skb_network_offset(skb);
vlan_macip_lens |=
(skb_transport_header(skb) - skb_network_header(skb));
@@ -5775,7 +5885,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
+ IXGBE_ADVTXD_DTYP_CTXT);
if (skb->protocol == htons(ETH_P_IP))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
@@ -5803,9 +5913,53 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
return false;
}
+static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
+{
+ u32 rtn = 0;
+ __be16 protocol;
+
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
+ protocol = ((const struct vlan_ethhdr *)skb->data)->
+ h_vlan_encapsulated_proto;
+ else
+ protocol = skb->protocol;
+
+ switch (protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ rtn |= IXGBE_ADVTXD_TUCMD_IPV4;
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case IPPROTO_SCTP:
+ rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ break;
+ }
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ /* XXX what about other V6 headers?? */
+ switch (ipv6_hdr(skb)->nexthdr) {
+ case IPPROTO_TCP:
+ rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case IPPROTO_SCTP:
+ rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ break;
+ }
+ break;
+ default:
+ if (unlikely(net_ratelimit()))
+ e_warn(probe, "partial checksum but proto=%x!\n",
+ skb->protocol);
+ break;
+ }
+
+ return rtn;
+}
+
static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+ struct ixgbe_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5816,63 +5970,25 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
(tx_flags & IXGBE_TX_FLAGS_VLAN)) {
i = tx_ring->next_to_use;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+ context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
vlan_macip_lens |=
(tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
vlan_macip_lens |= (skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
+ IXGBE_ADVTXD_MACLEN_SHIFT);
if (skb->ip_summed == CHECKSUM_PARTIAL)
vlan_macip_lens |= (skb_transport_header(skb) -
- skb_network_header(skb));
+ skb_network_header(skb));
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = 0;
type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- __be16 protocol;
-
- if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
- const struct vlan_ethhdr *vhdr =
- (const struct vlan_ethhdr *)skb->data;
-
- protocol = vhdr->h_vlan_encapsulated_proto;
- } else {
- protocol = skb->protocol;
- }
+ IXGBE_ADVTXD_DTYP_CTXT);
- switch (protocol) {
- case cpu_to_be16(ETH_P_IP):
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
- else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- break;
- case cpu_to_be16(ETH_P_IPV6):
- /* XXX what about other V6 headers?? */
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
- else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- break;
- default:
- if (unlikely(net_ratelimit())) {
- e_warn(probe, "partial checksum "
- "but proto=%x!\n",
- skb->protocol);
- }
- break;
- }
- }
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
/* use index zero for tx checksum offload */
@@ -5893,9 +6009,9 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
}
static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags,
- unsigned int first)
+ struct ixgbe_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags,
+ unsigned int first)
{
struct pci_dev *pdev = adapter->pdev;
struct ixgbe_tx_buffer *tx_buffer_info;
@@ -5990,7 +6106,7 @@ dma_error:
/* clear timestamp and dma mappings for remaining portion of packet */
while (count--) {
- if (i==0)
+ if (i == 0)
i += tx_ring->count;
i--;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
@@ -6001,8 +6117,8 @@ dma_error:
}
static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- int tx_flags, int count, u32 paylen, u8 hdr_len)
+ struct ixgbe_ring *tx_ring,
+ int tx_flags, int count, u32 paylen, u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
struct ixgbe_tx_buffer *tx_buffer_info;
@@ -6021,17 +6137,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ IXGBE_ADVTXD_POPTS_SHIFT;
/* use index 1 context for tso */
olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
if (tx_flags & IXGBE_TX_FLAGS_IPV4)
olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ IXGBE_ADVTXD_POPTS_SHIFT;
} else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ IXGBE_ADVTXD_POPTS_SHIFT;
if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
olinfo_status |= IXGBE_ADVTXD_CC;
@@ -6045,10 +6161,10 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
i = tx_ring->next_to_use;
while (count--) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
tx_desc->read.cmd_type_len =
- cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+ cpu_to_le32(cmd_type_len | tx_buffer_info->length);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
i++;
if (i == tx_ring->count)
@@ -6070,7 +6186,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
}
static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
- int queue, u32 tx_flags)
+ int queue, u32 tx_flags)
{
struct ixgbe_atr_input atr_input;
struct tcphdr *th;
@@ -6098,7 +6214,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
- IXGBE_TX_FLAGS_VLAN_SHIFT;
+ IXGBE_TX_FLAGS_VLAN_SHIFT;
src_ipv4_addr = iph->saddr;
dst_ipv4_addr = iph->daddr;
flex_bytes = eth->h_proto;
@@ -6117,7 +6233,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
}
static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
- struct ixgbe_ring *tx_ring, int size)
+ struct ixgbe_ring *tx_ring, int size)
{
netif_stop_subqueue(netdev, tx_ring->queue_index);
/* Herbert's original patch had:
@@ -6137,7 +6253,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
}
static int ixgbe_maybe_stop_tx(struct net_device *netdev,
- struct ixgbe_ring *tx_ring, int size)
+ struct ixgbe_ring *tx_ring, int size)
{
if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
return 0;
@@ -6183,11 +6299,10 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
return skb_tx_hash(dev, skb);
}
-static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
+ struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_ring *tx_ring;
struct netdev_queue *txq;
unsigned int first;
unsigned int tx_flags = 0;
@@ -6211,8 +6326,6 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
- tx_ring = adapter->tx_ring[skb->queue_mapping];
-
#ifdef IXGBE_FCOE
/* for FCoE with DCB, we force the priority to what
* was specified by the switch */
@@ -6283,10 +6396,10 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
if (tx_ring->atr_sample_rate) {
++tx_ring->atr_count;
if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
- test_bit(__IXGBE_FDIR_INIT_DONE,
- &tx_ring->reinit_state)) {
+ test_bit(__IXGBE_FDIR_INIT_DONE,
+ &tx_ring->reinit_state)) {
ixgbe_atr(adapter, skb, tx_ring->queue_index,
- tx_flags);
+ tx_flags);
tx_ring->atr_count = 0;
}
}
@@ -6294,7 +6407,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
txq->tx_bytes += skb->len;
txq->tx_packets++;
ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
- hdr_len);
+ hdr_len);
ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
} else {
@@ -6306,6 +6419,15 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_ring *tx_ring;
+
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
+ return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
+}
+
/**
* ixgbe_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -6437,7 +6559,7 @@ static void ixgbe_netpoll(struct net_device *netdev)
#endif
static const struct net_device_ops ixgbe_netdev_ops = {
- .ndo_open = ixgbe_open,
+ .ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
.ndo_select_queue = ixgbe_select_queue,
@@ -6532,7 +6654,7 @@ err_novfs:
* and a hardware reset occur.
**/
static int __devinit ixgbe_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
struct net_device *netdev;
struct ixgbe_adapter *adapter = NULL;
@@ -6577,7 +6699,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
}
err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM), ixgbe_driver_name);
+ IORESOURCE_MEM), ixgbe_driver_name);
if (err) {
dev_err(&pdev->dev,
"pci_request_selected_regions failed 0x%x\n", err);
@@ -6617,7 +6739,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+ pci_resource_len(pdev, 0));
if (!hw->hw_addr) {
err = -EIO;
goto err_ioremap;
@@ -6661,7 +6783,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
* which might start the timer
*/
init_timer(&adapter->sfp_timer);
- adapter->sfp_timer.function = &ixgbe_sfp_timer;
+ adapter->sfp_timer.function = ixgbe_sfp_timer;
adapter->sfp_timer.data = (unsigned long) adapter;
INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
@@ -6671,7 +6793,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* a new SFP+ module arrival, called from GPI SDP2 context */
INIT_WORK(&adapter->sfp_config_module_task,
- ixgbe_sfp_config_module_task);
+ ixgbe_sfp_config_module_task);
ii->get_invariants(hw);
@@ -6723,10 +6845,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
ixgbe_probe_vf(adapter, ii);
netdev->features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_IP_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
netdev->features |= NETIF_F_IPV6_CSUM;
netdev->features |= NETIF_F_TSO;
@@ -6766,8 +6888,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_FCOE_MTU;
}
#endif /* IXGBE_FCOE */
- if (pci_using_dac)
+ if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
netdev->features |= NETIF_F_LRO;
@@ -6793,7 +6917,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
hw->mac.ops.disable_tx_laser(hw);
init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &ixgbe_watchdog;
+ adapter->watchdog_timer.function = ixgbe_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
@@ -6806,7 +6930,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
switch (pdev->device) {
case IXGBE_DEV_ID_82599_KX4:
adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
- IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+ IXGBE_WUFC_MC | IXGBE_WUFC_BC);
break;
default:
adapter->wol = 0;
@@ -6819,13 +6943,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* print bus type/speed/width info */
e_dev_info("(PCI Express:%s:%s) %pM\n",
- ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
- (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
- ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
- (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
- "Unknown"),
- netdev->dev_addr);
+ (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" :
+ hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" :
+ "Unknown"),
+ (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
+ hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
+ hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
+ "Unknown"),
+ netdev->dev_addr);
ixgbe_read_pba_num_generic(hw, &part_num);
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
@@ -6872,7 +6997,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
- INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task);
+ INIT_WORK(&adapter->check_overtemp_task,
+ ixgbe_check_overtemp_task);
#ifdef CONFIG_IXGBE_DCA
if (dca_add_requester(&pdev->dev) == 0) {
adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
@@ -6908,8 +7034,8 @@ err_eeprom:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM));
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -6976,7 +7102,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
iounmap(adapter->hw.hw_addr);
pci_release_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM));
+ IORESOURCE_MEM));
e_dev_info("complete\n");
@@ -6996,7 +7122,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
* this device has been detected.
*/
static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+ pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -7102,8 +7228,7 @@ static struct pci_driver ixgbe_driver = {
static int __init ixgbe_init_module(void)
{
int ret;
- pr_info("%s - version %s\n", ixgbe_driver_string,
- ixgbe_driver_version);
+ pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
pr_info("%s\n", ixgbe_copyright);
#ifdef CONFIG_IXGBE_DCA
@@ -7132,12 +7257,12 @@ static void __exit ixgbe_exit_module(void)
#ifdef CONFIG_IXGBE_DCA
static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
- void *p)
+ void *p)
{
int ret_val;
ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
- __ixgbe_notify_dca);
+ __ixgbe_notify_dca);
return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
}
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index d75f9148eb1..435e0281e1f 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -200,7 +200,8 @@ out:
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
-s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
@@ -227,7 +228,7 @@ out:
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
-s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -247,20 +248,6 @@ out:
return ret_val;
}
-/**
- * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
- * @hw: pointer to the HW structure
- *
- * Setup the mailbox read and write message function pointers
- **/
-void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
-{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
-
- mbx->ops.read_posted = ixgbe_read_posted_mbx;
- mbx->ops.write_posted = ixgbe_write_posted_mbx;
-}
-
static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index be7ab3309ab..c5ae4b4da83 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -83,12 +83,9 @@
s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
-void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
extern struct ixgbe_mbx_operations mbx_ops_82599;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 49661a138e2..a6b720ae7fe 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -43,8 +43,8 @@
#include "ixgbe_sriov.h"
-int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
- int entries, u16 *hash_list, u32 vf)
+static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf)
{
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
struct ixgbe_hw *hw = &adapter->hw;
@@ -104,13 +104,14 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
}
}
-int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
+static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
+ u32 vf)
{
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
-void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
+static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
vmolr |= (IXGBE_VMOLR_ROMPE |
@@ -134,7 +135,7 @@ static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
}
-inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
@@ -162,8 +163,8 @@ inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
hw->mac.ops.clear_rar(hw, rar_entry);
}
-int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
- int vf, unsigned char *mac_addr)
+static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr)
{
struct ixgbe_hw *hw = &adapter->hw;
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
@@ -197,7 +198,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
return 0;
}
-inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 reg;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 184730ecdfb..9a424bb688c 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -28,16 +28,8 @@
#ifndef _IXGBE_SRIOV_H_
#define _IXGBE_SRIOV_H_
-int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
- int entries, u16 *hash_list, u32 vf);
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
-int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
-void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
-void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
-void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
-int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
- int vf, unsigned char *mac_addr);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 9587d975d66..d3cc6ce7c97 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -871,6 +871,8 @@
#define IXGBE_RDRXCTL_MVMEN 0x00000020
#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
+#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
+#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
/* RQTC Bit Masks and Shifts */
#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index 4680b069b84..4cc817acfb6 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -330,10 +330,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
- int i, err;
+ int i, err = 0;
u32 new_rx_count, new_tx_count;
- bool need_tx_update = false;
- bool need_rx_update = false;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
@@ -355,89 +353,96 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
msleep(1);
- if (new_tx_count != adapter->tx_ring_count) {
- tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!tx_ring) {
- err = -ENOMEM;
- goto err_setup;
- }
- memcpy(tx_ring, adapter->tx_ring,
- adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
- for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring[i].count = new_tx_count;
- err = ixgbevf_setup_tx_resources(adapter,
- &tx_ring[i]);
- if (err) {
- while (i) {
- i--;
- ixgbevf_free_tx_resources(adapter,
- &tx_ring[i]);
- }
- kfree(tx_ring);
- goto err_setup;
- }
- tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
- }
- need_tx_update = true;
+ /*
+ * If the adapter isn't up and running then just set the
+ * new parameters and scurry for the exits.
+ */
+ if (!netif_running(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i].count = new_tx_count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i].count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto clear_reset;
}
- if (new_rx_count != adapter->rx_ring_count) {
- rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if ((!rx_ring) && (need_tx_update)) {
- err = -ENOMEM;
- goto err_rx_setup;
- }
- memcpy(rx_ring, adapter->rx_ring,
- adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring[i].count = new_rx_count;
- err = ixgbevf_setup_rx_resources(adapter,
- &rx_ring[i]);
- if (err) {
- while (i) {
- i--;
- ixgbevf_free_rx_resources(adapter,
- &rx_ring[i]);
- }
- kfree(rx_ring);
- goto err_rx_setup;
- }
- rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
- }
- need_rx_update = true;
+ tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!tx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
}
-err_rx_setup:
- /* if rings need to be updated, here's the place to do it in one shot */
- if (need_tx_update || need_rx_update) {
- if (netif_running(netdev))
- ixgbevf_down(adapter);
+ rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!rx_ring) {
+ err = -ENOMEM;
+ goto err_rx_setup;
}
- /* tx */
- if (need_tx_update) {
- kfree(adapter->tx_ring);
- adapter->tx_ring = tx_ring;
- tx_ring = NULL;
- adapter->tx_ring_count = new_tx_count;
+ ixgbevf_down(adapter);
+
+ memcpy(tx_ring, adapter->tx_ring,
+ adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring[i].count = new_tx_count;
+ err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_tx_resources(adapter,
+ &tx_ring[i]);
+ }
+ goto err_tx_ring_setup;
+ }
+ tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
}
- /* rx */
- if (need_rx_update) {
- kfree(adapter->rx_ring);
- adapter->rx_ring = rx_ring;
- rx_ring = NULL;
- adapter->rx_ring_count = new_rx_count;
+ memcpy(rx_ring, adapter->rx_ring,
+ adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_ring[i].count = new_rx_count;
+ err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(adapter,
+ &rx_ring[i]);
+ }
+ goto err_rx_ring_setup;
+ }
+ rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
}
+ /*
+ * Only switch to new rings if all the prior allocations
+ * and ring setups have succeeded.
+ */
+ kfree(adapter->tx_ring);
+ adapter->tx_ring = tx_ring;
+ adapter->tx_ring_count = new_tx_count;
+
+ kfree(adapter->rx_ring);
+ adapter->rx_ring = rx_ring;
+ adapter->rx_ring_count = new_rx_count;
+
/* success! */
- err = 0;
- if (netif_running(netdev))
- ixgbevf_up(adapter);
+ ixgbevf_up(adapter);
+
+ goto clear_reset;
+
+err_rx_ring_setup:
+ for(i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+
+err_tx_ring_setup:
+ kfree(rx_ring);
+
+err_rx_setup:
+ kfree(tx_ring);
-err_setup:
+clear_reset:
clear_bit(__IXGBEVF_RESETTING, &adapter->state);
return err;
}
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index f7015efbff0..da4033c6efa 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -243,7 +243,6 @@ struct ixgbevf_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
- struct net_device_stats net_stats;
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 918c00359b0..0866a1cf4d7 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -308,10 +308,10 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
tx_ring->total_bytes += total_bytes;
tx_ring->total_packets += total_packets;
- adapter->net_stats.tx_bytes += total_bytes;
- adapter->net_stats.tx_packets += total_packets;
+ netdev->stats.tx_bytes += total_bytes;
+ netdev->stats.tx_packets += total_packets;
- return (count < tx_ring->work_limit);
+ return count < tx_ring->work_limit;
}
/**
@@ -356,7 +356,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
u32 status_err, struct sk_buff *skb)
{
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* Rx csum disabled */
if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
@@ -639,8 +639,8 @@ next_desc:
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
- adapter->net_stats.rx_bytes += total_rx_bytes;
- adapter->net_stats.rx_packets += total_rx_packets;
+ adapter->netdev->stats.rx_bytes += total_rx_bytes;
+ adapter->netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -2297,7 +2297,7 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
adapter->stats.vfmprc);
/* Fill out the OS statistics structure */
- adapter->net_stats.multicast = adapter->stats.vfmprc -
+ adapter->netdev->stats.multicast = adapter->stats.vfmprc -
adapter->stats.base_vfmprc;
}
@@ -3181,21 +3181,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
/**
- * ixgbevf_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
- /* only return the current stats */
- return &adapter->net_stats;
-}
-
-/**
* ixgbevf_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -3272,7 +3257,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = &ixgbevf_open,
.ndo_stop = &ixgbevf_close,
.ndo_start_xmit = &ixgbevf_xmit_frame,
- .ndo_get_stats = &ixgbevf_get_stats,
.ndo_set_rx_mode = &ixgbevf_set_rx_mode,
.ndo_set_multicast_list = &ixgbevf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
@@ -3426,7 +3410,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
}
init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &ixgbevf_watchdog;
+ adapter->watchdog_timer.function = ixgbevf_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
index b8143501e6f..84ac486f4a6 100644
--- a/drivers/net/ixgbevf/mbx.c
+++ b/drivers/net/ixgbevf/mbx.c
@@ -308,7 +308,7 @@ out_no_read:
*
* Initializes the hw->mbx struct to correct values for vf mailbox
*/
-s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
+static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
index 1b0e0bf4c0f..8c063bebee7 100644
--- a/drivers/net/ixgbevf/mbx.h
+++ b/drivers/net/ixgbevf/mbx.h
@@ -95,6 +95,4 @@
/* forward declaration of the HW struct */
struct ixgbe_hw;
-s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *);
-
#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index f6f929958ba..bfe42c1fcfa 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -368,7 +368,7 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
return 0;
}
-struct ixgbe_mac_operations ixgbevf_mac_ops = {
+static struct ixgbe_mac_operations ixgbevf_mac_ops = {
.init_hw = ixgbevf_init_hw_vf,
.reset_hw = ixgbevf_reset_hw_vf,
.start_hw = ixgbevf_start_hw_vf,
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 94b750b8874..61f9dc83142 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -124,8 +124,6 @@ struct ixgbe_hw {
void *back;
u8 __iomem *hw_addr;
- u8 *flash_address;
- unsigned long io_base;
struct ixgbe_mac_info mac;
struct ixgbe_mbx_info mbx;
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 99f24f5cac5..c04c096bc6a 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -21,6 +21,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -73,7 +75,7 @@ read_again:
}
if (i == 0) {
- jeprintk(jme->pdev, "phy(%d) read timeout : %d\n", phy, reg);
+ pr_err("phy(%d) read timeout : %d\n", phy, reg);
return 0;
}
@@ -102,7 +104,7 @@ jme_mdio_write(struct net_device *netdev,
}
if (i == 0)
- jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg);
+ pr_err("phy(%d) write timeout : %d\n", phy, reg);
}
static inline void
@@ -227,7 +229,7 @@ jme_reload_eeprom(struct jme_adapter *jme)
}
if (i == 0) {
- jeprintk(jme->pdev, "eeprom reload timeout\n");
+ pr_err("eeprom reload timeout\n");
return -EIO;
}
}
@@ -397,8 +399,7 @@ jme_check_link(struct net_device *netdev, int testonly)
phylink = jread32(jme, JME_PHY_LINK);
}
if (!cnt)
- jeprintk(jme->pdev,
- "Waiting speed resolve timeout.\n");
+ pr_err("Waiting speed resolve timeout\n");
strcat(linkmsg, "ANed: ");
}
@@ -480,13 +481,13 @@ jme_check_link(struct net_device *netdev, int testonly)
strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
"MDI-X" :
"MDI");
- netif_info(jme, link, jme->dev, "Link is up at %s.\n", linkmsg);
+ netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg);
netif_carrier_on(netdev);
} else {
if (testonly)
goto out;
- netif_info(jme, link, jme->dev, "Link is down.\n");
+ netif_info(jme, link, jme->dev, "Link is down\n");
jme->phylink = 0;
netif_carrier_off(netdev);
}
@@ -648,7 +649,7 @@ jme_disable_tx_engine(struct jme_adapter *jme)
}
if (!i)
- jeprintk(jme->pdev, "Disable TX engine timeout.\n");
+ pr_err("Disable TX engine timeout\n");
}
static void
@@ -867,7 +868,7 @@ jme_disable_rx_engine(struct jme_adapter *jme)
}
if (!i)
- jeprintk(jme->pdev, "Disable RX engine timeout.\n");
+ pr_err("Disable RX engine timeout\n");
}
@@ -887,13 +888,13 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
== RXWBFLAG_UDPON)) {
if (flags & RXWBFLAG_IPV4)
- netif_err(jme, rx_err, jme->dev, "UDP Checksum error.\n");
+ netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
return false;
}
if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
== RXWBFLAG_IPV4)) {
- netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error.\n");
+ netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n");
return false;
}
@@ -936,7 +937,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
if (jme->vlgrp) {
@@ -1185,9 +1186,9 @@ jme_link_change_tasklet(unsigned long arg)
while (!atomic_dec_and_test(&jme->link_changing)) {
atomic_inc(&jme->link_changing);
- netif_info(jme, intr, jme->dev, "Get link change lock failed.\n");
+ netif_info(jme, intr, jme->dev, "Get link change lock failed\n");
while (atomic_read(&jme->link_changing) != 1)
- netif_info(jme, intr, jme->dev, "Waiting link change lock.\n");
+ netif_info(jme, intr, jme->dev, "Waiting link change lock\n");
}
if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
@@ -1221,15 +1222,13 @@ jme_link_change_tasklet(unsigned long arg)
if (netif_carrier_ok(netdev)) {
rc = jme_setup_rx_resources(jme);
if (rc) {
- jeprintk(jme->pdev, "Allocating resources for RX error"
- ", Device STOPPED!\n");
+ pr_err("Allocating resources for RX error, Device STOPPED!\n");
goto out_enable_tasklet;
}
rc = jme_setup_tx_resources(jme);
if (rc) {
- jeprintk(jme->pdev, "Allocating resources for TX error"
- ", Device STOPPED!\n");
+ pr_err("Allocating resources for TX error, Device STOPPED!\n");
goto err_out_free_rx_resources;
}
@@ -1324,7 +1323,7 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
smp_wmb();
if (unlikely(netif_queue_stopped(jme->dev) &&
atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
- netif_info(jme, tx_done, jme->dev, "TX Queue Waked.\n");
+ netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n");
netif_wake_queue(jme->dev);
}
@@ -1339,7 +1338,7 @@ jme_tx_clean_tasklet(unsigned long arg)
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
int i, j, cnt = 0, max, err, mask;
- tx_dbg(jme, "Into txclean.\n");
+ tx_dbg(jme, "Into txclean\n");
if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
goto out;
@@ -1361,7 +1360,7 @@ jme_tx_clean_tasklet(unsigned long arg)
!(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
tx_dbg(jme, "txclean: %d+%d@%lu\n",
- i, ctxbi->nr_desc, jiffies);
+ i, ctxbi->nr_desc, jiffies);
err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
@@ -1402,7 +1401,7 @@ jme_tx_clean_tasklet(unsigned long arg)
ctxbi->nr_desc = 0;
}
- tx_dbg(jme, "txclean: done %d@%lu.\n", i, jiffies);
+ tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies);
atomic_set(&txring->next_to_clean, i);
atomic_add(cnt, &txring->nr_free);
@@ -1548,10 +1547,10 @@ jme_request_irq(struct jme_adapter *jme)
rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
netdev);
if (rc) {
- jeprintk(jme->pdev,
- "Unable to request %s interrupt (return: %d)\n",
- test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
- rc);
+ netdev_err(netdev,
+ "Unable to request %s interrupt (return: %d)\n",
+ test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
+ rc);
if (test_bit(JME_FLAG_MSI, &jme->flags)) {
pci_disable_msi(jme->pdev);
@@ -1834,7 +1833,7 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
*flags |= TXFLAG_UDPCS;
break;
default:
- netif_err(jme, tx_err, jme->dev, "Error upper layer protocol.\n");
+ netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n");
break;
}
}
@@ -1909,12 +1908,12 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
smp_wmb();
if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
netif_stop_queue(jme->dev);
- netif_info(jme, tx_queued, jme->dev, "TX Queue Paused.\n");
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n");
smp_wmb();
if (atomic_read(&txring->nr_free)
>= (jme->tx_wake_threshold)) {
netif_wake_queue(jme->dev);
- netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked.\n");
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n");
}
}
@@ -1922,7 +1921,8 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
(jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
txbi->skb)) {
netif_stop_queue(jme->dev);
- netif_info(jme, tx_queued, jme->dev, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
+ netif_info(jme, tx_queued, jme->dev,
+ "TX Queue Stopped %d@%lu\n", idx, jiffies);
}
}
@@ -1945,7 +1945,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(idx < 0)) {
netif_stop_queue(netdev);
- netif_err(jme, tx_err, jme->dev, "BUG! Tx ring full when queue awake!\n");
+ netif_err(jme, tx_err, jme->dev,
+ "BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -1957,9 +1958,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
TXCS_QUEUE0S |
TXCS_ENABLE);
- tx_dbg(jme, "xmit: %d+%d@%lu\n", idx,
- skb_shinfo(skb)->nr_frags + 2,
- jiffies);
+ tx_dbg(jme, "xmit: %d+%d@%lu\n",
+ idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
jme_stop_queue_if_full(jme);
return NETDEV_TX_OK;
@@ -2501,7 +2501,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
val = jread32(jme, JME_SMBCSR);
}
if (!to) {
- netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
return 0xFF;
}
@@ -2517,7 +2517,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
val = jread32(jme, JME_SMBINTF);
}
if (!to) {
- netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
return 0xFF;
}
@@ -2537,7 +2537,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
val = jread32(jme, JME_SMBCSR);
}
if (!to) {
- netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
return;
}
@@ -2554,7 +2554,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
val = jread32(jme, JME_SMBINTF);
}
if (!to) {
- netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
return;
}
@@ -2699,26 +2699,26 @@ jme_init_one(struct pci_dev *pdev,
*/
rc = pci_enable_device(pdev);
if (rc) {
- jeprintk(pdev, "Cannot enable PCI device.\n");
+ pr_err("Cannot enable PCI device\n");
goto err_out;
}
using_dac = jme_pci_dma64(pdev);
if (using_dac < 0) {
- jeprintk(pdev, "Cannot set PCI DMA Mask.\n");
+ pr_err("Cannot set PCI DMA Mask\n");
rc = -EIO;
goto err_out_disable_pdev;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- jeprintk(pdev, "No PCI resource region found.\n");
+ pr_err("No PCI resource region found\n");
rc = -ENOMEM;
goto err_out_disable_pdev;
}
rc = pci_request_regions(pdev, DRV_NAME);
if (rc) {
- jeprintk(pdev, "Cannot obtain PCI resource region.\n");
+ pr_err("Cannot obtain PCI resource region\n");
goto err_out_disable_pdev;
}
@@ -2729,7 +2729,7 @@ jme_init_one(struct pci_dev *pdev,
*/
netdev = alloc_etherdev(sizeof(*jme));
if (!netdev) {
- jeprintk(pdev, "Cannot allocate netdev structure.\n");
+ pr_err("Cannot allocate netdev structure\n");
rc = -ENOMEM;
goto err_out_release_regions;
}
@@ -2767,7 +2767,7 @@ jme_init_one(struct pci_dev *pdev,
jme->regs = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!(jme->regs)) {
- jeprintk(pdev, "Mapping PCI resource region error.\n");
+ pr_err("Mapping PCI resource region error\n");
rc = -ENOMEM;
goto err_out_free_netdev;
}
@@ -2855,8 +2855,8 @@ jme_init_one(struct pci_dev *pdev,
if (!jme->mii_if.phy_id) {
rc = -EIO;
- jeprintk(pdev, "Can not find phy_id.\n");
- goto err_out_unmap;
+ pr_err("Can not find phy_id\n");
+ goto err_out_unmap;
}
jme->reg_ghc |= GHC_LINK_POLL;
@@ -2883,8 +2883,7 @@ jme_init_one(struct pci_dev *pdev,
jme_reset_mac_processor(jme);
rc = jme_reload_eeprom(jme);
if (rc) {
- jeprintk(pdev,
- "Reload eeprom for reading MAC Address error.\n");
+ pr_err("Reload eeprom for reading MAC Address error\n");
goto err_out_unmap;
}
jme_load_macaddr(netdev);
@@ -2900,7 +2899,7 @@ jme_init_one(struct pci_dev *pdev,
*/
rc = register_netdev(netdev);
if (rc) {
- jeprintk(pdev, "Cannot register net device.\n");
+ pr_err("Cannot register net device\n");
goto err_out_unmap;
}
@@ -3042,8 +3041,7 @@ static struct pci_driver jme_driver = {
static int __init
jme_init_module(void)
{
- printk(KERN_INFO PFX "JMicron JMC2XX ethernet "
- "driver version %s\n", DRV_VERSION);
+ pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION);
return pci_register_driver(&jme_driver);
}
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 07ad3a45718..1360f68861b 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -41,9 +41,6 @@
NETIF_MSG_TX_ERR | \
NETIF_MSG_HW)
-#define jeprintk(pdev, fmt, args...) \
- printk(KERN_ERR PFX fmt, ## args)
-
#ifdef TX_DEBUG
#define tx_dbg(priv, fmt, args...) \
printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args)
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index b4fb07a6f13..51919fcd50c 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -503,30 +503,33 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
ks8851_wrreg16(ks, KS_RXQCR,
ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
- if (rxlen > 0) {
- skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8);
- if (!skb) {
- /* todo - dump frame and move on */
- }
+ if (rxlen > 4) {
+ unsigned int rxalign;
+
+ rxlen -= 4;
+ rxalign = ALIGN(rxlen, 4);
+ skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
+ if (skb) {
- /* two bytes to ensure ip is aligned, and four bytes
- * for the status header and 4 bytes of garbage */
- skb_reserve(skb, 2 + 4 + 4);
+ /* 4 bytes of status header + 4 bytes of
+ * garbage: we put them before ethernet
+ * header, so that they are copied,
+ * but ignored.
+ */
- rxpkt = skb_put(skb, rxlen - 4) - 8;
+ rxpkt = skb_put(skb, rxlen) - 8;
- /* align the packet length to 4 bytes, and add 4 bytes
- * as we're getting the rx status header as well */
- ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
+ ks8851_rdfifo(ks, rxpkt, rxalign + 8);
- if (netif_msg_pktdata(ks))
- ks8851_dbg_dumpkkt(ks, rxpkt);
+ if (netif_msg_pktdata(ks))
+ ks8851_dbg_dumpkkt(ks, rxpkt);
- skb->protocol = eth_type_trans(skb, ks->netdev);
- netif_rx(skb);
+ skb->protocol = eth_type_trans(skb, ks->netdev);
+ netif_rx(skb);
- ks->netdev->stats.rx_packets++;
- ks->netdev->stats.rx_bytes += rxlen - 4;
+ ks->netdev->stats.rx_packets++;
+ ks->netdev->stats.rx_bytes += rxlen;
+ }
}
ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 4eea3f70c5c..9f8e7027b0b 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -38,6 +38,7 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
@@ -159,7 +160,7 @@ static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
* temac_dcr_setup - If the DMA is DCR based, then setup the address and
* I/O functions
*/
-static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
+static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
struct device_node *np)
{
unsigned int dcrs;
@@ -184,7 +185,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
* temac_dcr_setup - This is a stub for when DCR is not supported,
* such as with MicroBlaze
*/
-static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
+static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
struct device_node *np)
{
return -1;
@@ -494,7 +495,7 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
mutex_unlock(&lp->indirect_mutex);
- return (0);
+ return 0;
}
/* Initialize temac */
@@ -760,7 +761,7 @@ static void ll_temac_recv(struct net_device *ndev)
skb_put(skb, length);
skb->dev = ndev;
skb->protocol = eth_type_trans(skb, ndev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* if we're doing rx csum offload, set it up */
if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
@@ -902,8 +903,8 @@ temac_poll_controller(struct net_device *ndev)
disable_irq(lp->tx_irq);
disable_irq(lp->rx_irq);
- ll_temac_rx_irq(lp->tx_irq, lp);
- ll_temac_tx_irq(lp->rx_irq, lp);
+ ll_temac_rx_irq(lp->tx_irq, ndev);
+ ll_temac_tx_irq(lp->rx_irq, ndev);
enable_irq(lp->tx_irq);
enable_irq(lp->rx_irq);
@@ -952,7 +953,7 @@ static const struct attribute_group temac_attr_group = {
};
static int __init
-temac_of_probe(struct of_device *op, const struct of_device_id *match)
+temac_of_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *np;
struct temac_local *lp;
@@ -1094,7 +1095,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
return rc;
}
-static int __devexit temac_of_remove(struct of_device *op)
+static int __devexit temac_of_remove(struct platform_device *op)
{
struct net_device *ndev = dev_get_drvdata(&op->dev);
struct temac_local *lp = netdev_priv(ndev);
diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ll_temac_mdio.c
index 5ae28c975b3..8cf9d4f56bb 100644
--- a/drivers/net/ll_temac_mdio.c
+++ b/drivers/net/ll_temac_mdio.c
@@ -10,6 +10,7 @@
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/of_mdio.h>
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 9a099679532..2d9663a1c54 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -64,7 +64,6 @@ struct pcpu_lstats {
u64 packets;
u64 bytes;
struct u64_stats_sync syncp;
- unsigned long drops;
};
/*
@@ -74,7 +73,6 @@ struct pcpu_lstats {
static netdev_tx_t loopback_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct pcpu_lstats __percpu *pcpu_lstats;
struct pcpu_lstats *lb_stats;
int len;
@@ -83,8 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb->protocol = eth_type_trans(skb, dev);
/* it's OK to use per_cpu_ptr() because BHs are off */
- pcpu_lstats = (void __percpu __force *)dev->ml_priv;
- lb_stats = this_cpu_ptr(pcpu_lstats);
+ lb_stats = this_cpu_ptr(dev->lstats);
len = skb->len;
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
@@ -92,8 +89,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
lb_stats->bytes += len;
lb_stats->packets++;
u64_stats_update_end(&lb_stats->syncp);
- } else
- lb_stats->drops++;
+ }
return NETDEV_TX_OK;
}
@@ -101,32 +97,26 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- const struct pcpu_lstats __percpu *pcpu_lstats;
u64 bytes = 0;
u64 packets = 0;
- u64 drops = 0;
int i;
- pcpu_lstats = (void __percpu __force *)dev->ml_priv;
for_each_possible_cpu(i) {
const struct pcpu_lstats *lb_stats;
u64 tbytes, tpackets;
unsigned int start;
- lb_stats = per_cpu_ptr(pcpu_lstats, i);
+ lb_stats = per_cpu_ptr(dev->lstats, i);
do {
start = u64_stats_fetch_begin(&lb_stats->syncp);
tbytes = lb_stats->bytes;
tpackets = lb_stats->packets;
} while (u64_stats_fetch_retry(&lb_stats->syncp, start));
- drops += lb_stats->drops;
bytes += tbytes;
packets += tpackets;
}
stats->rx_packets = packets;
stats->tx_packets = packets;
- stats->rx_dropped = drops;
- stats->rx_errors = drops;
stats->rx_bytes = bytes;
stats->tx_bytes = bytes;
return stats;
@@ -147,22 +137,16 @@ static const struct ethtool_ops loopback_ethtool_ops = {
static int loopback_dev_init(struct net_device *dev)
{
- struct pcpu_lstats __percpu *lstats;
-
- lstats = alloc_percpu(struct pcpu_lstats);
- if (!lstats)
+ dev->lstats = alloc_percpu(struct pcpu_lstats);
+ if (!dev->lstats)
return -ENOMEM;
- dev->ml_priv = (void __force *)lstats;
return 0;
}
static void loopback_dev_free(struct net_device *dev)
{
- struct pcpu_lstats __percpu *lstats =
- (void __percpu __force *)dev->ml_priv;
-
- free_percpu(lstats);
+ free_percpu(dev->lstats);
free_netdev(dev);
}
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 3df046a58b1..3698824744c 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -460,7 +460,7 @@ init_rx_bufs(struct net_device *dev, int num) {
}
lp->rbd_tail->next = rfd->rbd;
#endif
- return (i);
+ return i;
}
static inline void
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index 3832fa4961d..f84f5e6eded 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -562,19 +562,19 @@ static int __init mac8390_initdev(struct net_device *dev,
case ACCESS_16:
/* 16 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &slow_sane_block_input;
- ei_status.block_output = &slow_sane_block_output;
- ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = slow_sane_block_input;
+ ei_status.block_output = slow_sane_block_output;
+ ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
ei_status.reg_offset = back4_offsets;
break;
case ACCESS_32:
/* 32 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &sane_block_input;
- ei_status.block_output = &sane_block_output;
- ei_status.get_8390_hdr = &sane_get_8390_hdr;
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = sane_block_input;
+ ei_status.block_output = sane_block_output;
+ ei_status.get_8390_hdr = sane_get_8390_hdr;
ei_status.reg_offset = back4_offsets;
access_bitmode = 1;
break;
@@ -586,19 +586,19 @@ static int __init mac8390_initdev(struct net_device *dev,
* but overwrite system memory when run at 32 bit.
* so we run them all at 16 bit.
*/
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &slow_sane_block_input;
- ei_status.block_output = &slow_sane_block_output;
- ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = slow_sane_block_input;
+ ei_status.block_output = slow_sane_block_output;
+ ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
ei_status.reg_offset = back4_offsets;
break;
case MAC8390_CABLETRON:
/* 16 bit card, register map is short forward */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &slow_sane_block_input;
- ei_status.block_output = &slow_sane_block_output;
- ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = slow_sane_block_input;
+ ei_status.block_output = slow_sane_block_output;
+ ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
ei_status.reg_offset = fwrd2_offsets;
break;
@@ -606,19 +606,19 @@ static int __init mac8390_initdev(struct net_device *dev,
case MAC8390_KINETICS:
/* 16 bit memory, register map is forward */
/* dayna and similar */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &dayna_block_input;
- ei_status.block_output = &dayna_block_output;
- ei_status.get_8390_hdr = &dayna_get_8390_hdr;
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = dayna_block_input;
+ ei_status.block_output = dayna_block_output;
+ ei_status.get_8390_hdr = dayna_get_8390_hdr;
ei_status.reg_offset = fwrd4_offsets;
break;
case MAC8390_INTERLAN:
/* 16 bit memory, register map is forward */
- ei_status.reset_8390 = &interlan_reset;
- ei_status.block_input = &slow_sane_block_input;
- ei_status.block_output = &slow_sane_block_output;
- ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reset_8390 = interlan_reset;
+ ei_status.block_input = slow_sane_block_input;
+ ei_status.block_output = slow_sane_block_output;
+ ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
ei_status.reg_offset = fwrd4_offsets;
break;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index ff2f158ab0b..4297f6e8c4b 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -407,7 +407,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
}
skb_reserve(skb, RX_OFFSET);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb_put(skb, len);
for (frag = first_frag; ; frag = NEXT_RX(frag)) {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0ef0eb0db94..0fc9dc7f20d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -788,6 +788,10 @@ static int macvlan_device_event(struct notifier_block *unused,
}
break;
case NETDEV_UNREGISTER:
+ /* twiddle thumbs on netns device moves */
+ if (dev->reg_state != NETREG_UNREGISTERING)
+ break;
+
list_for_each_entry_safe(vlan, next, &port->vlans, list)
vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
break;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 3b1c54a9c6e..42567279843 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -84,26 +84,45 @@ static const struct proto_ops macvtap_socket_ops;
static DEFINE_SPINLOCK(macvtap_lock);
/*
- * Choose the next free queue, for now there is only one
+ * get_slot: return a [unused/occupied] slot in vlan->taps[]:
+ * - if 'q' is NULL, return the first empty slot;
+ * - otherwise, return the slot this pointer occupies.
*/
+static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
+{
+ int i;
+
+ for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
+ if (rcu_dereference(vlan->taps[i]) == q)
+ return i;
+ }
+
+ /* Should never happen */
+ BUG_ON(1);
+}
+
static int macvtap_set_queue(struct net_device *dev, struct file *file,
struct macvtap_queue *q)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ int index;
int err = -EBUSY;
spin_lock(&macvtap_lock);
- if (rcu_dereference(vlan->tap))
+ if (vlan->numvtaps == MAX_MACVTAP_QUEUES)
goto out;
err = 0;
+ index = get_slot(vlan, NULL);
rcu_assign_pointer(q->vlan, vlan);
- rcu_assign_pointer(vlan->tap, q);
+ rcu_assign_pointer(vlan->taps[index], q);
sock_hold(&q->sk);
q->file = file;
file->private_data = q;
+ vlan->numvtaps++;
+
out:
spin_unlock(&macvtap_lock);
return err;
@@ -124,9 +143,12 @@ static void macvtap_put_queue(struct macvtap_queue *q)
spin_lock(&macvtap_lock);
vlan = rcu_dereference(q->vlan);
if (vlan) {
- rcu_assign_pointer(vlan->tap, NULL);
+ int index = get_slot(vlan, q);
+
+ rcu_assign_pointer(vlan->taps[index], NULL);
rcu_assign_pointer(q->vlan, NULL);
sock_put(&q->sk);
+ --vlan->numvtaps;
}
spin_unlock(&macvtap_lock);
@@ -136,39 +158,82 @@ static void macvtap_put_queue(struct macvtap_queue *q)
}
/*
- * Since we only support one queue, just dereference the pointer.
+ * Select a queue based on the rxq of the device on which this packet
+ * arrived. If the incoming device is not mq, calculate a flow hash
+ * to select a queue. If all fails, find the first available queue.
+ * Cache vlan->numvtaps since it can become zero during the execution
+ * of this function.
*/
static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
struct sk_buff *skb)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ struct macvtap_queue *tap = NULL;
+ int numvtaps = vlan->numvtaps;
+ __u32 rxq;
+
+ if (!numvtaps)
+ goto out;
+
+ if (likely(skb_rx_queue_recorded(skb))) {
+ rxq = skb_get_rx_queue(skb);
+
+ while (unlikely(rxq >= numvtaps))
+ rxq -= numvtaps;
+
+ tap = rcu_dereference(vlan->taps[rxq]);
+ if (tap)
+ goto out;
+ }
+
+ /* Check if we can use flow to select a queue */
+ rxq = skb_get_rxhash(skb);
+ if (rxq) {
+ tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
+ if (tap)
+ goto out;
+ }
- return rcu_dereference(vlan->tap);
+ /* Everything failed - find first available queue */
+ for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
+ tap = rcu_dereference(vlan->taps[rxq]);
+ if (tap)
+ break;
+ }
+
+out:
+ return tap;
}
/*
* The net_device is going away, give up the reference
- * that it holds on the queue (all the queues one day)
- * and safely set the pointer from the queues to NULL.
+ * that it holds on all queues and safely set the pointer
+ * from the queues to NULL.
*/
static void macvtap_del_queues(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- struct macvtap_queue *q;
+ struct macvtap_queue *q, *qlist[MAX_MACVTAP_QUEUES];
+ int i, j = 0;
+ /* macvtap_put_queue can free some slots, so go through all slots */
spin_lock(&macvtap_lock);
- q = rcu_dereference(vlan->tap);
- if (!q) {
- spin_unlock(&macvtap_lock);
- return;
+ for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
+ q = rcu_dereference(vlan->taps[i]);
+ if (q) {
+ qlist[j++] = q;
+ rcu_assign_pointer(vlan->taps[i], NULL);
+ rcu_assign_pointer(q->vlan, NULL);
+ vlan->numvtaps--;
+ }
}
-
- rcu_assign_pointer(vlan->tap, NULL);
- rcu_assign_pointer(q->vlan, NULL);
+ BUG_ON(vlan->numvtaps != 0);
spin_unlock(&macvtap_lock);
synchronize_rcu();
- sock_put(&q->sk);
+
+ for (--j; j >= 0; j--)
+ sock_put(&qlist[j]->sk);
}
/*
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 42e3294671d..60135aa5580 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -461,7 +461,7 @@ static int meth_tx_full(struct net_device *dev)
{
struct meth_private *priv = netdev_priv(dev);
- return (priv->tx_count >= TX_RING_ENTRIES - 1);
+ return priv->tx_count >= TX_RING_ENTRIES - 1;
}
static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 1fd068e1d93..d1aa45a1585 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -6,4 +6,4 @@ mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
- en_resources.o en_netdev.o
+ en_resources.o en_netdev.o en_selftest.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8c8515619b8..8f4bf1f07c1 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -74,7 +74,7 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
{
- u32 obj, i;
+ u32 obj;
if (likely(cnt == 1 && align == 1))
return mlx4_bitmap_alloc(bitmap);
@@ -91,8 +91,7 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
}
if (obj < bitmap->max) {
- for (i = 0; i < cnt; i++)
- set_bit(obj + i, bitmap->table);
+ bitmap_set(bitmap->table, obj, cnt);
if (obj == bitmap->last) {
bitmap->last = (obj + cnt);
if (bitmap->last >= bitmap->max)
@@ -109,13 +108,10 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
{
- u32 i;
-
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
- for (i = 0; i < cnt; i++)
- clear_bit(obj + i, bitmap->table);
+ bitmap_clear(bitmap->table, obj, cnt);
bitmap->last = min(bitmap->last, obj);
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
@@ -125,8 +121,6 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 reserved_top)
{
- int i;
-
/* num must be a power of 2 */
if (num != roundup_pow_of_two(num))
return -EINVAL;
@@ -142,8 +136,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
if (!bitmap->table)
return -ENOMEM;
- for (i = 0; i < reserved_bot; ++i)
- set_bit(i, bitmap->table);
+ bitmap_set(bitmap->table, 0, reserved_bot);
return 0;
}
@@ -188,7 +181,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
- buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list,
+ buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
GFP_KERNEL);
if (!buf->page_list)
return -ENOMEM;
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index b275238fe70..056152b3ff5 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -39,21 +39,6 @@
#include "en_port.h"
-static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
-{
- int i;
-
- priv->port_stats.lro_aggregated = 0;
- priv->port_stats.lro_flushed = 0;
- priv->port_stats.lro_no_desc = 0;
-
- for (i = 0; i < priv->rx_ring_num; i++) {
- priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
- priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
- priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
- }
-}
-
static void
mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
{
@@ -112,7 +97,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"tx_heartbeat_errors", "tx_window_errors",
/* port statistics */
- "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets",
+ "tso_packets",
"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
"rx_csum_good", "rx_csum_none", "tx_chksum_offload",
@@ -125,6 +110,14 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
#define NUM_MAIN_STATS 21
#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
+static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
+ "Interupt Test",
+ "Link Test",
+ "Speed Test",
+ "Register Test",
+ "Loopback Test",
+};
+
static u32 mlx4_en_get_msglevel(struct net_device *dev)
{
return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
@@ -146,10 +139,15 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- if (sset != ETH_SS_STATS)
+ switch (sset) {
+ case ETH_SS_STATS:
+ return NUM_ALL_STATS +
+ (priv->tx_ring_num + priv->rx_ring_num) * 2;
+ case ETH_SS_TEST:
+ return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.loopback_support) * 2;
+ default:
return -EOPNOTSUPP;
-
- return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
+ }
}
static void mlx4_en_get_ethtool_stats(struct net_device *dev,
@@ -161,8 +159,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
spin_lock_bh(&priv->stats_lock);
- mlx4_en_update_lro_stats(priv);
-
for (i = 0; i < NUM_MAIN_STATS; i++)
data[index++] = ((unsigned long *) &priv->stats)[i];
for (i = 0; i < NUM_PORT_STATS; i++)
@@ -181,6 +177,12 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
}
+static void mlx4_en_self_test(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ mlx4_en_ex_selftest(dev, &etest->flags, buf);
+}
+
static void mlx4_en_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
@@ -188,44 +190,76 @@ static void mlx4_en_get_strings(struct net_device *dev,
int index = 0;
int i;
- if (stringset != ETH_SS_STATS)
- return;
-
- /* Add main counters */
- for (i = 0; i < NUM_MAIN_STATS; i++)
- strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
- for (i = 0; i < NUM_PORT_STATS; i++)
- strcpy(data + (index++) * ETH_GSTRING_LEN,
+ switch (stringset) {
+ case ETH_SS_TEST:
+ for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
+ strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ if (priv->mdev->dev->caps.loopback_support)
+ for (; i < MLX4_EN_NUM_SELF_TEST; i++)
+ strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ break;
+
+ case ETH_SS_STATS:
+ /* Add main counters */
+ for (i = 0; i < NUM_MAIN_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
+ for (i = 0; i< NUM_PORT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[i + NUM_MAIN_STATS]);
- for (i = 0; i < priv->tx_ring_num; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_bytes", i);
- }
- for (i = 0; i < priv->rx_ring_num; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_bytes", i);
- }
- for (i = 0; i < NUM_PKT_STATS; i++)
- strcpy(data + (index++) * ETH_GSTRING_LEN,
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "tx%d_packets", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "tx%d_bytes", i);
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_packets", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_bytes", i);
+ }
+ for (i = 0; i< NUM_PKT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
+ break;
+ }
}
static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int trans_type;
+
cmd->autoneg = AUTONEG_DISABLE;
cmd->supported = SUPPORTED_10000baseT_Full;
- cmd->advertising = ADVERTISED_1000baseT_Full;
+ cmd->advertising = ADVERTISED_10000baseT_Full;
+
+ if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+ return -ENOMEM;
+
+ trans_type = priv->port_state.transciver;
if (netif_carrier_ok(dev)) {
- cmd->speed = SPEED_10000;
+ cmd->speed = priv->port_state.link_speed;
cmd->duplex = DUPLEX_FULL;
} else {
cmd->speed = -1;
cmd->duplex = -1;
}
+
+ if (trans_type > 0 && trans_type <= 0xC) {
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+ } else if (trans_type == 0x80 || trans_type == 0) {
+ cmd->port = PORT_TP;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->supported |= SUPPORTED_TP;
+ cmd->advertising |= ADVERTISED_TP;
+ } else {
+ cmd->port = -1;
+ cmd->transceiver = -1;
+ }
return 0;
}
@@ -343,8 +377,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
- if (rx_size == priv->prof->rx_ring_size &&
- tx_size == priv->prof->tx_ring_size)
+ if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
+ priv->rx_ring[0].size) &&
+ tx_size == priv->tx_ring[0].size)
return 0;
mutex_lock(&mdev->state_lock);
@@ -378,49 +413,13 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
memset(param, 0, sizeof(*param));
param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
- param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
- param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
-}
-
-static int mlx4_ethtool_op_set_flags(struct net_device *dev, u32 data)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- int rc = 0;
- int changed = 0;
-
- if (data & ~ETH_FLAG_LRO)
- return -EOPNOTSUPP;
-
- if (data & ETH_FLAG_LRO) {
- if (mdev->profile.num_lro == 0)
- return -EOPNOTSUPP;
- if (!(dev->features & NETIF_F_LRO))
- changed = 1;
- } else if (dev->features & NETIF_F_LRO) {
- changed = 1;
- }
-
- if (changed) {
- if (netif_running(dev)) {
- mutex_lock(&mdev->state_lock);
- mlx4_en_stop_port(dev);
- }
- dev->features ^= NETIF_F_LRO;
- if (netif_running(dev)) {
- rc = mlx4_en_start_port(dev);
- if (rc)
- en_err(priv, "Failed to restart port\n");
- mutex_unlock(&mdev->state_lock);
- }
- }
-
- return rc;
+ param->rx_pending = priv->port_up ?
+ priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
+ param->tx_pending = priv->tx_ring[0].size;
}
const struct ethtool_ops mlx4_en_ethtool_ops = {
@@ -441,6 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_strings = mlx4_en_get_strings,
.get_sset_count = mlx4_en_get_sset_count,
.get_ethtool_stats = mlx4_en_get_ethtool_stats,
+ .self_test = mlx4_en_self_test,
.get_wol = mlx4_en_get_wol,
.get_msglevel = mlx4_en_get_msglevel,
.set_msglevel = mlx4_en_set_msglevel,
@@ -451,7 +451,6 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
.get_flags = ethtool_op_get_flags,
- .set_flags = mlx4_ethtool_op_set_flags,
};
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 97934f1ec53..14390641704 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -63,15 +63,12 @@ static const char mlx4_en_version[] =
*/
-/* Use a XOR rathern than Toeplitz hash function for RSS */
-MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
-
-/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
-MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
-
-/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
-MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
- "Number of LRO sessions per ring or disabled (0)");
+/* Enable RSS TCP traffic */
+MLX4_EN_PARM_INT(tcp_rss, 1,
+ "Enable RSS for incomming TCP traffic or disabled (0)");
+/* Enable RSS UDP traffic */
+MLX4_EN_PARM_INT(udp_rss, 1,
+ "Enable RSS for incomming UDP traffic or disabled (0)");
/* Priority pausing */
MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
@@ -107,9 +104,12 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
struct mlx4_en_profile *params = &mdev->profile;
int i;
- params->rss_xor = (rss_xor != 0);
- params->rss_mask = rss_mask & 0x1f;
- params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
+ params->tcp_rss = tcp_rss;
+ params->udp_rss = udp_rss;
+ if (params->udp_rss && !mdev->dev->caps.udp_rss) {
+ mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
+ params->udp_rss = 0;
+ }
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
params->prof[i].rx_pause = 1;
params->prof[i].rx_ppp = pfcrx;
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index a0d8a26f5a0..79478bd4211 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -109,7 +109,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
mutex_unlock(&mdev->state_lock);
}
-static u64 mlx4_en_mac_to_u64(u8 *addr)
+u64 mlx4_en_mac_to_u64(u8 *addr)
{
u64 mac = 0;
int i;
@@ -513,6 +513,10 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
}
+ if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
+ queue_work(mdev->workqueue, &priv->mac_task);
+ mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
+ }
mutex_unlock(&mdev->state_lock);
}
@@ -528,10 +532,10 @@ static void mlx4_en_linkstate(struct work_struct *work)
* report to system log */
if (priv->last_link_state != linkstate) {
if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
- en_dbg(LINK, priv, "Link Down\n");
+ en_info(priv, "Link Down\n");
netif_carrier_off(priv->dev);
} else {
- en_dbg(LINK, priv, "Link Up\n");
+ en_info(priv, "Link Up\n");
netif_carrier_on(priv->dev);
}
}
@@ -653,6 +657,7 @@ int mlx4_en_start_port(struct net_device *dev)
en_err(priv, "Failed setting port mac\n");
goto tx_err;
}
+ mdev->mac_removed[priv->port] = 0;
/* Init port */
en_dbg(HW, priv, "Initializing port\n");
@@ -704,12 +709,12 @@ void mlx4_en_stop_port(struct net_device *dev)
netif_tx_stop_all_queues(dev);
netif_tx_unlock_bh(dev);
- /* close port*/
+ /* Set port as not active */
priv->port_up = false;
- mlx4_CLOSE_PORT(mdev->dev, priv->port);
/* Unregister Mac address for the port */
mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
+ mdev->mac_removed[priv->port] = 1;
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
@@ -731,6 +736,9 @@ void mlx4_en_stop_port(struct net_device *dev)
msleep(1);
mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
}
+
+ /* close port*/
+ mlx4_CLOSE_PORT(mdev->dev, priv->port);
}
static void mlx4_en_restart(struct work_struct *work)
@@ -1017,15 +1025,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
*/
dev->netdev_ops = &mlx4_netdev_ops;
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
- dev->real_num_tx_queues = MLX4_EN_NUM_TX_RINGS;
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
+ netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
/* Set defualt MAC */
dev->addr_len = ETH_ALEN;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[ETH_ALEN - 1 - i] =
- (u8) (priv->mac >> (8 * i));
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
+ dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
+ }
/*
* Set driver features
@@ -1038,8 +1048,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->features |= NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
- if (mdev->profile.num_lro)
- dev->features |= NETIF_F_LRO;
+ dev->features |= NETIF_F_GRO;
if (mdev->LSO_support) {
dev->features |= NETIF_F_TSO;
dev->features |= NETIF_F_TSO6;
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index a29abe845d2..aa3ef2aee5b 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -142,6 +142,38 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
return err;
}
+int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
+{
+ struct mlx4_en_query_port_context *qport_context;
+ struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
+ struct mlx4_en_port_state *state = &priv->port_state;
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ memset(mailbox->buf, 0, sizeof(*qport_context));
+ err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+ if (err)
+ goto out;
+ qport_context = mailbox->buf;
+
+ /* This command is always accessed from Ethtool context
+ * already synchronized, no need in locking */
+ state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
+ if ((qport_context->link_speed & MLX4_EN_SPEED_MASK) ==
+ MLX4_EN_1G_SPEED)
+ state->link_speed = 1000;
+ else
+ state->link_speed = 10000;
+ state->transciver = qport_context->transceiver;
+
+out:
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ return err;
+}
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index e6477f12beb..f6511aa2b7d 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -84,6 +84,20 @@ enum {
MLX4_MCAST_ENABLE = 2,
};
+struct mlx4_en_query_port_context {
+ u8 link_up;
+#define MLX4_EN_LINK_UP_MASK 0x80
+ u8 reserved;
+ __be16 mtu;
+ u8 reserved2;
+ u8 link_speed;
+#define MLX4_EN_SPEED_MASK 0x3
+#define MLX4_EN_1G_SPEED 0x2
+ u16 reserved3[5];
+ __be64 mac;
+ u8 transceiver;
+};
+
struct mlx4_en_stat_out_mbox {
/* Received frames with a length of 64 octets */
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 8e2fcb7103c..570f2508fb3 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -42,18 +42,6 @@
#include "mlx4_en.h"
-static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
- void **ip_hdr, void **tcpudp_hdr,
- u64 *hdr_flags, void *priv)
-{
- *mac_hdr = page_address(frags->page) + frags->page_offset;
- *ip_hdr = *mac_hdr + ETH_HLEN;
- *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
- *hdr_flags = LRO_IPV4 | LRO_TCP;
-
- return 0;
-}
-
static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
struct skb_frag_struct *skb_frags,
@@ -251,7 +239,6 @@ reduce_rings:
ring->prod--;
mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
}
- ring->size_mask = ring->actual_size - 1;
}
return 0;
@@ -313,28 +300,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
}
ring->buf = ring->wqres.buf.direct.buf;
- /* Configure lro mngr */
- memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
- ring->lro.dev = priv->dev;
- ring->lro.features = LRO_F_NAPI;
- ring->lro.frag_align_pad = NET_IP_ALIGN;
- ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
- ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
- ring->lro.max_desc = mdev->profile.num_lro;
- ring->lro.max_aggr = MAX_SKB_FRAGS;
- ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
- sizeof(struct net_lro_desc),
- GFP_KERNEL);
- if (!ring->lro.lro_arr) {
- en_err(priv, "Failed to allocate lro array\n");
- goto err_map;
- }
- ring->lro.get_frag_header = mlx4_en_get_frag_header;
-
return 0;
-err_map:
- mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_ring:
@@ -389,6 +356,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind];
+ ring->size_mask = ring->actual_size - 1;
mlx4_en_update_rx_prod_db(ring);
}
@@ -412,7 +380,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
{
struct mlx4_en_dev *mdev = priv->mdev;
- kfree(ring->lro.lro_arr);
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
vfree(ring->rx_info);
@@ -459,7 +426,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
goto fail;
/* Unmap buffer */
- pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+ pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size,
PCI_DMA_FROMDEVICE);
}
/* Adjust size of last fragment to match actual length */
@@ -541,6 +508,21 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
return skb;
}
+static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
+{
+ int i;
+ int offset = ETH_HLEN;
+
+ for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
+ if (*(skb->data + offset) != (unsigned char) (i & 0xff))
+ goto out_loopback;
+ }
+ /* Loopback found */
+ priv->loopback_ok = 1;
+
+out_loopback:
+ dev_kfree_skb_any(skb);
+}
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
@@ -548,7 +530,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
struct skb_frag_struct *skb_frags;
- struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb;
int index;
@@ -608,37 +589,35 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* - TCP/IP (v4)
* - without IP options
* - not an IP fragment */
- if (mlx4_en_can_lro(cqe->status) &&
- dev->features & NETIF_F_LRO) {
+ if (dev->features & NETIF_F_GRO) {
+ struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
+ if (!gro_skb)
+ goto next;
nr = mlx4_en_complete_rx_desc(
priv, rx_desc,
- skb_frags, lro_frags,
+ skb_frags, skb_shinfo(gro_skb)->frags,
ring->page_alloc, length);
if (!nr)
goto next;
+ skb_shinfo(gro_skb)->nr_frags = nr;
+ gro_skb->len = length;
+ gro_skb->data_len = length;
+ gro_skb->truesize += length;
+ gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
+
if (priv->vlgrp && (cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
- lro_vlan_hwaccel_receive_frags(
- &ring->lro, lro_frags,
- length, length,
- priv->vlgrp,
- be16_to_cpu(cqe->sl_vid),
- NULL, 0);
- } else
- lro_receive_frags(&ring->lro,
- lro_frags,
- length,
- length,
- NULL, 0);
+ cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)))
+ vlan_gro_frags(&cq->napi, priv->vlgrp, be16_to_cpu(cqe->sl_vid));
+ else
+ napi_gro_frags(&cq->napi);
goto next;
}
/* LRO not possible, complete processing here */
ip_summed = CHECKSUM_UNNECESSARY;
- INC_PERF_COUNTER(priv->pstats.lro_misses);
} else {
ip_summed = CHECKSUM_NONE;
priv->port_stats.rx_chksum_none++;
@@ -655,6 +634,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
+ if (unlikely(priv->validate_loopback)) {
+ validate_loopback(priv, skb);
+ goto next;
+ }
+
skb->ip_summed = ip_summed;
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
@@ -674,14 +658,10 @@ next:
if (++polled == budget) {
/* We are here because we reached the NAPI budget -
* flush only pending LRO sessions */
- lro_flush_all(&ring->lro);
goto out;
}
}
- /* If CQ is empty flush all LRO sessions unconditionally */
- lro_flush_all(&ring->lro);
-
out:
AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
mlx4_cq_set_ci(&cq->mcq);
@@ -816,7 +796,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
qp->event = mlx4_en_sqp_event;
memset(context, 0, sizeof *context);
- mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 0, 0,
+ mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
qpn, ring->cqn, context);
context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
@@ -839,8 +819,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
struct mlx4_qp_context context;
struct mlx4_en_rss_context *rss_context;
void *ptr;
- int rss_xor = mdev->profile.rss_xor;
- u8 rss_mask = mdev->profile.rss_mask;
+ u8 rss_mask = 0x3f;
int i, qpn;
int err = 0;
int good_qps = 0;
@@ -886,9 +865,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
- rss_context->hash_fn = rss_xor & 0x3;
- rss_context->flags = rss_mask << 2;
+ rss_context->flags = rss_mask;
+ if (priv->mdev->profile.udp_rss)
+ rss_context->base_qpn_udp = rss_context->default_qpn;
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
&rss_map->indir_qp, &rss_map->indir_state);
if (err)
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/mlx4/en_selftest.c
new file mode 100644
index 00000000000..9c91a92da70
--- /dev/null
+++ b/drivers/net/mlx4/en_selftest.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/mlx4/driver.h>
+
+#include "mlx4_en.h"
+
+
+static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
+{
+ return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
+{
+ struct sk_buff *skb;
+ struct ethhdr *ethh;
+ unsigned char *packet;
+ unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
+ unsigned int i;
+ int err;
+
+
+ /* build the pkt before xmit */
+ skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
+ if (!skb) {
+ en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
+ packet = (unsigned char *)skb_put(skb, packet_size);
+ memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
+ memset(ethh->h_source, 0, ETH_ALEN);
+ ethh->h_proto = htons(ETH_P_ARP);
+ skb_set_mac_header(skb, 0);
+ for (i = 0; i < packet_size; ++i) /* fill our packet */
+ packet[i] = (unsigned char)(i & 0xff);
+
+ /* xmit the pkt */
+ err = mlx4_en_xmit(skb, priv->dev);
+ return err;
+}
+
+static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
+{
+ u32 loopback_ok = 0;
+ int i;
+
+
+ priv->loopback_ok = 0;
+ priv->validate_loopback = 1;
+
+ /* xmit */
+ if (mlx4_en_test_loopback_xmit(priv)) {
+ en_err(priv, "Transmitting loopback packet failed\n");
+ goto mlx4_en_test_loopback_exit;
+ }
+
+ /* polling for result */
+ for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) {
+ msleep(MLX4_EN_LOOPBACK_TIMEOUT);
+ if (priv->loopback_ok) {
+ loopback_ok = 1;
+ break;
+ }
+ }
+ if (!loopback_ok)
+ en_err(priv, "Loopback packet didn't arrive\n");
+
+mlx4_en_test_loopback_exit:
+
+ priv->validate_loopback = 0;
+ return !loopback_ok;
+}
+
+
+static int mlx4_en_test_link(struct mlx4_en_priv *priv)
+{
+ if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+ return -ENOMEM;
+ if (priv->port_state.link_state == 1)
+ return 0;
+ else
+ return 1;
+}
+
+static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
+{
+
+ if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+ return -ENOMEM;
+
+ /* The device currently only supports 10G speed */
+ if (priv->port_state.link_speed != SPEED_10000)
+ return priv->port_state.link_speed;
+ return 0;
+}
+
+
+void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring *tx_ring;
+ int i, carrier_ok;
+
+ memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
+
+ if (*flags & ETH_TEST_FL_OFFLINE) {
+ /* disable the interface */
+ carrier_ok = netif_carrier_ok(dev);
+
+ netif_carrier_off(dev);
+retry_tx:
+ /* Wait untill all tx queues are empty.
+ * there should not be any additional incoming traffic
+ * since we turned the carrier off */
+ msleep(200);
+ for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
+ tx_ring = &priv->tx_ring[i];
+ if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
+ goto retry_tx;
+ }
+
+ if (priv->mdev->dev->caps.loopback_support){
+ buf[3] = mlx4_en_test_registers(priv);
+ buf[4] = mlx4_en_test_loopback(priv);
+ }
+
+ if (carrier_ok)
+ netif_carrier_on(dev);
+
+ }
+ buf[0] = mlx4_test_interrupts(mdev->dev);
+ buf[1] = mlx4_en_test_link(priv);
+ buf[2] = mlx4_en_test_speed(priv);
+
+ for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) {
+ if (buf[i])
+ *flags |= ETH_TEST_FL_FAILED;
+ }
+}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 580968f304e..98dd620042a 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -38,6 +38,7 @@
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
+#include <linux/tcp.h>
#include "mlx4_en.h"
@@ -600,6 +601,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
struct mlx4_wqe_data_seg *data;
struct skb_frag_struct *frag;
struct mlx4_en_tx_info *tx_info;
+ struct ethhdr *ethh;
+ u64 mac;
+ u32 mac_l, mac_h;
int tx_ind = 0;
int nr_txbb;
int desc_size;
@@ -612,6 +616,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
int lso_header_size;
void *fragptr;
+ if (!priv->port_up)
+ goto tx_drop;
+
real_size = get_real_size(skb, dev, &lso_header_size);
if (unlikely(!real_size))
goto tx_drop;
@@ -676,6 +683,19 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
priv->port_stats.tx_chksum_offload++;
}
+ if (unlikely(priv->validate_loopback)) {
+ /* Copy dst mac address to wqe */
+ skb_reset_mac_header(skb);
+ ethh = eth_hdr(skb);
+ if (ethh && ethh->h_dest) {
+ mac = mlx4_en_mac_to_u64(ethh->h_dest);
+ mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+ mac_l = (u32) (mac & 0xffffffff);
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+ tx_desc->ctrl.imm = cpu_to_be32(mac_l);
+ }
+ }
+
/* Handle LSO (TSO) packets */
if (lso_header_size) {
/* Mark opcode as LSO */
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 6d7b2bf210c..552d0fce6f6 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -699,3 +699,47 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
kfree(priv->eq_table.uar_map);
}
+
+/* A test that verifies that we can accept interrupts on all
+ * the irq vectors of the device.
+ * Interrupts are checked using the NOP command.
+ */
+int mlx4_test_interrupts(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+ int err;
+
+ err = mlx4_NOP(dev);
+ /* When not in MSI_X, there is only one irq to check */
+ if (!(dev->flags & MLX4_FLAG_MSI_X))
+ return err;
+
+ /* A loop over all completion vectors, for each vector we will check
+ * whether it works by mapping command completions to that vector
+ * and performing a NOP command
+ */
+ for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
+ /* Temporary use polling for command completions */
+ mlx4_cmd_use_polling(dev);
+
+ /* Map the new eq to handle all asyncronous events */
+ err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ priv->eq_table.eq[i].eqn);
+ if (err) {
+ mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
+ mlx4_cmd_use_events(dev);
+ break;
+ }
+
+ /* Go back to using events */
+ mlx4_cmd_use_events(dev);
+ err = mlx4_NOP(dev);
+ }
+
+ /* Return to default */
+ mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_test_interrupts);
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 04f42ae1eda..b716e1a1b29 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -141,6 +141,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
u8 field;
+ u32 field32;
u16 size;
u16 stat_rate;
int err;
@@ -178,6 +179,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
+#define QUERY_DEV_CAP_UDP_RSS_OFFSET 0x42
+#define QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET 0x43
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -268,6 +271,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_msg_sz = 1 << (field & 0x1f);
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET);
+ dev_cap->udp_rss = field & 0x1;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
+ dev_cap->loopback_support = field & 0x1;
MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
dev_cap->reserved_uars = field >> 4;
@@ -365,6 +372,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
#define QUERY_PORT_MAX_VL_OFFSET 0x0b
#define QUERY_PORT_MAC_OFFSET 0x10
+#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
+#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
+#define QUERY_PORT_TRANS_CODE_OFFSET 0x20
for (i = 1; i <= dev_cap->num_ports; ++i) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
@@ -388,6 +398,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->log_max_vlans[i] = field >> 4;
MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
+ MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
+ dev_cap->trans_type[i] = field32 >> 24;
+ dev_cap->vendor_oui[i] = field32 & 0xffffff;
+ MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
+ MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
}
}
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 526d7f30c04..65cc72eb899 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -73,7 +73,13 @@ struct mlx4_dev_cap {
int max_pkeys[MLX4_MAX_PORTS + 1];
u64 def_mac[MLX4_MAX_PORTS + 1];
u16 eth_mtu[MLX4_MAX_PORTS + 1];
+ int trans_type[MLX4_MAX_PORTS + 1];
+ int vendor_oui[MLX4_MAX_PORTS + 1];
+ u16 wavelength[MLX4_MAX_PORTS + 1];
+ u64 trans_code[MLX4_MAX_PORTS + 1];
u16 stat_rate_support;
+ int udp_rss;
+ int loopback_support;
u32 flags;
int reserved_uars;
int uar_size;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 5102ab1ac56..569fa3df381 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -184,6 +184,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
dev->caps.def_mac[i] = dev_cap->def_mac[i];
dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
+ dev->caps.trans_type[i] = dev_cap->trans_type[i];
+ dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
+ dev->caps.wavelength[i] = dev_cap->wavelength[i];
+ dev->caps.trans_code[i] = dev_cap->trans_code[i];
}
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
@@ -221,6 +225,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.bmme_flags = dev_cap->bmme_flags;
dev->caps.reserved_lkey = dev_cap->reserved_lkey;
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
+ dev->caps.udp_rss = dev_cap->udp_rss;
+ dev->caps.loopback_support = dev_cap->loopback_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.log_num_macs = log_num_mac;
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 449210994ee..1fc16ab7ad2 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -38,19 +38,19 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
-#include <linux/inet_lro.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
#include <linux/mlx4/cq.h>
#include <linux/mlx4/srq.h>
#include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "1.4.1.1"
-#define DRV_RELDATE "June 2009"
+#define DRV_VERSION "1.5.1.6"
+#define DRV_RELDATE "August 2010"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -61,7 +61,6 @@
#define MLX4_EN_PAGE_SHIFT 12
#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
-#define MAX_TX_RINGS 16
#define MAX_RX_RINGS 16
#define TXBB_SIZE 64
#define HEADROOM (2048 / TXBB_SIZE + 1)
@@ -107,6 +106,7 @@ enum {
#define MLX4_EN_SMALL_PKT_SIZE 64
#define MLX4_EN_NUM_TX_RINGS 8
#define MLX4_EN_NUM_PPP_RINGS 8
+#define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
#define MLX4_EN_DEF_TX_RING_SIZE 512
#define MLX4_EN_DEF_RX_RING_SIZE 1024
@@ -139,10 +139,14 @@ enum {
#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
+#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
#define MLX4_EN_MIN_MTU 46
#define ETH_BCAST 0xffffffffffffULL
+#define MLX4_EN_LOOPBACK_RETRIES 5
+#define MLX4_EN_LOOPBACK_TIMEOUT 100
+
#ifdef MLX4_EN_PERF_STAT
/* Number of samples to 'average' */
#define AVG_SIZE 128
@@ -249,7 +253,6 @@ struct mlx4_en_rx_desc {
struct mlx4_en_rx_ring {
struct mlx4_hwq_resources wqres;
struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
- struct net_lro_mgr lro;
u32 size ; /* number of Rx descs*/
u32 actual_size;
u32 size_mask;
@@ -313,7 +316,8 @@ struct mlx4_en_port_profile {
struct mlx4_en_profile {
int rss_xor;
- int num_lro;
+ int tcp_rss;
+ int udp_rss;
u8 rss_mask;
u32 active_ports;
u32 small_pkt_int;
@@ -337,6 +341,7 @@ struct mlx4_en_dev {
struct mlx4_mr mr;
u32 priv_pdn;
spinlock_t uar_lock;
+ u8 mac_removed[MLX4_MAX_PORTS + 1];
};
@@ -355,6 +360,13 @@ struct mlx4_en_rss_context {
u8 hash_fn;
u8 flags;
__be32 rss_key[10];
+ __be32 base_qpn_udp;
+};
+
+struct mlx4_en_port_state {
+ int link_state;
+ int link_speed;
+ int transciver;
};
struct mlx4_en_pkt_stats {
@@ -365,9 +377,6 @@ struct mlx4_en_pkt_stats {
};
struct mlx4_en_port_stats {
- unsigned long lro_aggregated;
- unsigned long lro_flushed;
- unsigned long lro_no_desc;
unsigned long tso_packets;
unsigned long queue_stopped;
unsigned long wake_queue;
@@ -376,7 +385,7 @@ struct mlx4_en_port_stats {
unsigned long rx_chksum_good;
unsigned long rx_chksum_none;
unsigned long tx_chksum_offload;
-#define NUM_PORT_STATS 11
+#define NUM_PORT_STATS 8
};
struct mlx4_en_perf_stats {
@@ -405,6 +414,7 @@ struct mlx4_en_priv {
struct vlan_group *vlgrp;
struct net_device_stats stats;
struct net_device_stats ret_stats;
+ struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
unsigned long last_moder_packets;
@@ -423,6 +433,8 @@ struct mlx4_en_priv {
u16 sample_interval;
u16 adaptive_rx_coal;
u32 msg_enable;
+ u32 loopback_ok;
+ u32 validate_loopback;
struct mlx4_hwq_resources res;
int link_state;
@@ -531,6 +543,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc);
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
+int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
+
+#define MLX4_EN_NUM_SELF_TEST 5
+void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
+u64 mlx4_en_mac_to_u64(u8 *addr);
/*
* Globals
@@ -555,6 +572,8 @@ do { \
en_print(KERN_WARNING, priv, format, ##arg)
#define en_err(priv, format, arg...) \
en_print(KERN_ERR, priv, format, ##arg)
+#define en_info(priv, format, arg...) \
+ en_print(KERN_INFO, priv, format, ## arg)
#define mlx4_err(mdev, format, arg...) \
pr_err("%s %s: " format, DRV_NAME, \
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index 5caf0115fa5..e749f82865f 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -85,7 +85,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_resource tmp;
int i, j;
- profile = kzalloc(MLX4_RES_NUM * sizeof *profile, GFP_KERNEL);
+ profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL);
if (!profile)
return -ENOMEM;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 2d488abcf62..dd2b6a71c6d 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -2901,7 +2901,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->dev = dev;
set_params(mp, pd);
- dev->real_num_tx_queues = mp->txq_count;
+ netif_set_real_num_tx_queues(dev, mp->txq_count);
+ netif_set_real_num_rx_queues(dev, mp->rxq_count);
if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
mp->phy = phy_scan(mp, pd->phy_addr);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index d771d1650d6..8524cc40ec5 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -225,6 +225,7 @@ struct myri10ge_priv {
struct msix_entry *msix_vectors;
#ifdef CONFIG_MYRI10GE_DCA
int dca_enabled;
+ int relaxed_order;
#endif
u32 link_state;
unsigned int rdma_tags_available;
@@ -239,6 +240,7 @@ struct myri10ge_priv {
int watchdog_resets;
int watchdog_pause;
int pause;
+ bool fw_name_allocated;
char *fw_name;
char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
char *product_code_string;
@@ -271,6 +273,7 @@ MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
+/* Careful: must be accessed under kparam_block_sysfs_write */
static char *myri10ge_fw_name = NULL;
module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
@@ -376,6 +379,14 @@ static inline void put_be32(__be32 val, __be32 __iomem * p)
static struct net_device_stats *myri10ge_get_stats(struct net_device *dev);
+static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
+{
+ if (mgp->fw_name_allocated)
+ kfree(mgp->fw_name);
+ mgp->fw_name = name;
+ mgp->fw_name_allocated = allocated;
+}
+
static int
myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
struct myri10ge_cmd *data, int atomic)
@@ -747,7 +758,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
dev_warn(&mgp->pdev->dev, "via hotplug\n");
}
- mgp->fw_name = "adopted";
+ set_fw_name(mgp, "adopted", false);
mgp->tx_boundary = 2048;
myri10ge_dummy_rdma(mgp, 1);
status = myri10ge_get_firmware_capabilities(mgp);
@@ -980,7 +991,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
* RX queues, so if we get an error, first retry using a
* single TX queue before giving up */
if (status != 0 && mgp->dev->real_num_tx_queues > 1) {
- mgp->dev->real_num_tx_queues = 1;
+ netif_set_real_num_tx_queues(mgp->dev, 1);
cmd.data0 = mgp->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
status = myri10ge_send_cmd(mgp,
@@ -1064,10 +1075,28 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
}
#ifdef CONFIG_MYRI10GE_DCA
+static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
+{
+ int ret, cap, err;
+ u16 ctl;
+
+ cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (!cap)
+ return 0;
+
+ err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
+ ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
+ if (ret != on) {
+ ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ ctl |= (on << 4);
+ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
+ }
+ return ret;
+}
+
static void
myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
{
- ss->cpu = cpu;
ss->cached_dca_tag = tag;
put_be32(htonl(tag), ss->dca_tag);
}
@@ -1078,9 +1107,10 @@ static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
int tag;
if (cpu != ss->cpu) {
- tag = dca_get_tag(cpu);
+ tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
if (ss->cached_dca_tag != tag)
myri10ge_write_dca(ss, cpu, tag);
+ ss->cpu = cpu;
}
put_cpu();
}
@@ -1103,9 +1133,13 @@ static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
"dca_add_requester() failed, err=%d\n", err);
return;
}
+ mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0);
mgp->dca_enabled = 1;
- for (i = 0; i < mgp->num_slices; i++)
- myri10ge_write_dca(&mgp->ss[i], -1, 0);
+ for (i = 0; i < mgp->num_slices; i++) {
+ mgp->ss[i].cpu = -1;
+ mgp->ss[i].cached_dca_tag = -1;
+ myri10ge_update_dca(&mgp->ss[i]);
+ }
}
static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
@@ -1116,6 +1150,8 @@ static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
if (!mgp->dca_enabled)
return;
mgp->dca_enabled = 0;
+ if (mgp->relaxed_order)
+ myri10ge_toggle_relaxed(pdev, 1);
err = dca_remove_requester(&pdev->dev);
}
@@ -1545,12 +1581,12 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
* valid since MSI-X irqs are not shared */
if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
napi_schedule(&ss->napi);
- return (IRQ_HANDLED);
+ return IRQ_HANDLED;
}
/* make sure it is our IRQ, and that the DMA has finished */
if (unlikely(!stats->valid))
- return (IRQ_NONE);
+ return IRQ_NONE;
/* low bit indicates receives are present, so schedule
* napi poll handler */
@@ -1589,7 +1625,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
myri10ge_check_statblock(mgp);
put_be32(htonl(3), ss->irq_claim + 1);
- return (IRQ_HANDLED);
+ return IRQ_HANDLED;
}
static int
@@ -3233,7 +3269,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
* load the optimized firmware (which assumes aligned PCIe
* completions) in order to see if it works on this host.
*/
- mgp->fw_name = myri10ge_fw_aligned;
+ set_fw_name(mgp, myri10ge_fw_aligned, false);
status = myri10ge_load_firmware(mgp, 1);
if (status != 0) {
goto abort;
@@ -3261,7 +3297,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
abort:
/* fall back to using the unaligned firmware */
mgp->tx_boundary = 2048;
- mgp->fw_name = myri10ge_fw_unaligned;
+ set_fw_name(mgp, myri10ge_fw_unaligned, false);
}
@@ -3284,7 +3320,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
link_width);
mgp->tx_boundary = 4096;
- mgp->fw_name = myri10ge_fw_aligned;
+ set_fw_name(mgp, myri10ge_fw_aligned, false);
} else {
myri10ge_firmware_probe(mgp);
}
@@ -3293,22 +3329,29 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
dev_info(&mgp->pdev->dev,
"Assuming aligned completions (forced)\n");
mgp->tx_boundary = 4096;
- mgp->fw_name = myri10ge_fw_aligned;
+ set_fw_name(mgp, myri10ge_fw_aligned, false);
} else {
dev_info(&mgp->pdev->dev,
"Assuming unaligned completions (forced)\n");
mgp->tx_boundary = 2048;
- mgp->fw_name = myri10ge_fw_unaligned;
+ set_fw_name(mgp, myri10ge_fw_unaligned, false);
}
}
+
+ kparam_block_sysfs_write(myri10ge_fw_name);
if (myri10ge_fw_name != NULL) {
- overridden = 1;
- mgp->fw_name = myri10ge_fw_name;
+ char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
+ if (fw_name) {
+ overridden = 1;
+ set_fw_name(mgp, fw_name, true);
+ }
}
+ kparam_unblock_sysfs_write(myri10ge_fw_name);
+
if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
myri10ge_fw_names[mgp->board_number] != NULL &&
strlen(myri10ge_fw_names[mgp->board_number])) {
- mgp->fw_name = myri10ge_fw_names[mgp->board_number];
+ set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false);
overridden = 1;
}
if (overridden)
@@ -3660,6 +3703,7 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
struct myri10ge_cmd cmd;
struct pci_dev *pdev = mgp->pdev;
char *old_fw;
+ bool old_allocated;
int i, status, ncpus, msix_cap;
mgp->num_slices = 1;
@@ -3672,17 +3716,23 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
/* try to load the slice aware rss firmware */
old_fw = mgp->fw_name;
+ old_allocated = mgp->fw_name_allocated;
+ /* don't free old_fw if we override it. */
+ mgp->fw_name_allocated = false;
+
if (myri10ge_fw_name != NULL) {
dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
myri10ge_fw_name);
- mgp->fw_name = myri10ge_fw_name;
+ set_fw_name(mgp, myri10ge_fw_name, false);
} else if (old_fw == myri10ge_fw_aligned)
- mgp->fw_name = myri10ge_fw_rss_aligned;
+ set_fw_name(mgp, myri10ge_fw_rss_aligned, false);
else
- mgp->fw_name = myri10ge_fw_rss_unaligned;
+ set_fw_name(mgp, myri10ge_fw_rss_unaligned, false);
status = myri10ge_load_firmware(mgp, 0);
if (status != 0) {
dev_info(&pdev->dev, "Rss firmware not found\n");
+ if (old_allocated)
+ kfree(old_fw);
return;
}
@@ -3729,8 +3779,8 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
* slices. We give up on MSI-X if we can only get a single
* vector. */
- mgp->msix_vectors = kzalloc(mgp->num_slices *
- sizeof(*mgp->msix_vectors), GFP_KERNEL);
+ mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
+ GFP_KERNEL);
if (mgp->msix_vectors == NULL)
goto disable_msix;
for (i = 0; i < mgp->num_slices; i++) {
@@ -3747,6 +3797,8 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
mgp->num_slices);
if (status == 0) {
pci_disable_msix(pdev);
+ if (old_allocated)
+ kfree(old_fw);
return;
}
if (status > 0)
@@ -3763,7 +3815,7 @@ disable_msix:
abort_with_fw:
mgp->num_slices = 1;
- mgp->fw_name = old_fw;
+ set_fw_name(mgp, old_fw, old_allocated);
myri10ge_load_firmware(mgp, 0);
}
@@ -3897,7 +3949,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "failed to alloc slice state\n");
goto abort_with_firmware;
}
- netdev->real_num_tx_queues = mgp->num_slices;
+ netif_set_real_num_tx_queues(netdev, mgp->num_slices);
+ netif_set_real_num_rx_queues(netdev, mgp->num_slices);
status = myri10ge_reset(mgp);
if (status != 0) {
dev_err(&pdev->dev, "failed reset\n");
@@ -3993,6 +4046,7 @@ abort_with_enabled:
pci_disable_device(pdev);
abort_with_netdev:
+ set_fw_name(mgp, NULL, false);
free_netdev(netdev);
return status;
}
@@ -4037,6 +4091,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
mgp->cmd, mgp->cmd_bus);
+ set_fw_name(mgp, NULL, false);
free_netdev(netdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 1a57c3da1f4..4846e131a04 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -735,7 +735,7 @@ static int myri_header(struct sk_buff *skb, struct net_device *dev,
int i;
for (i = 0; i < dev->addr_len; i++)
eth->h_dest[i] = 0;
- return(dev->hard_header_len);
+ return dev->hard_header_len;
}
if (daddr) {
@@ -926,7 +926,7 @@ static const struct net_device_ops myri_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit myri_sbus_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit myri_sbus_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
static unsigned version_printed;
@@ -1079,7 +1079,7 @@ static int __devinit myri_sbus_probe(struct of_device *op, const struct of_devic
mp->dev = dev;
dev->watchdog_timeo = 5*HZ;
- dev->irq = op->irqs[0];
+ dev->irq = op->archdata.irqs[0];
dev->netdev_ops = &myri_ops;
/* Register interrupt handler now. */
@@ -1124,7 +1124,7 @@ err:
return -ENODEV;
}
-static int __devexit myri_sbus_remove(struct of_device *op)
+static int __devexit myri_sbus_remove(struct platform_device *op)
{
struct myri_eth *mp = dev_get_drvdata(&op->dev);
struct net_device *net_dev = mp->dev;
@@ -1172,12 +1172,12 @@ static struct of_platform_driver myri_sbus_driver = {
static int __init myri_sbus_init(void)
{
- return of_register_driver(&myri_sbus_driver, &of_bus_type);
+ return of_register_platform_driver(&myri_sbus_driver);
}
static void __exit myri_sbus_exit(void)
{
- of_unregister_driver(&myri_sbus_driver);
+ of_unregister_platform_driver(&myri_sbus_driver);
}
module_init(myri_sbus_init);
diff --git a/drivers/net/myri_sbus.h b/drivers/net/myri_sbus.h
index ff363e95d9c..80a2fa5cf75 100644
--- a/drivers/net/myri_sbus.h
+++ b/drivers/net/myri_sbus.h
@@ -288,7 +288,7 @@ struct myri_eth {
struct myri_eeprom eeprom; /* Local copy of EEPROM. */
unsigned int reg_size; /* Size of register space. */
unsigned int shmem_base; /* Offset to shared ram. */
- struct of_device *myri_op; /* Our OF device struct. */
+ struct platform_device *myri_op; /* Our OF device struct. */
};
/* We use this to acquire receive skb's that we can DMA directly into. */
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index a6033d48b5c..2fd39630b1e 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -1570,7 +1570,7 @@ static int netdev_open(struct net_device *dev)
init_timer(&np->timer);
np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
np->timer.data = (unsigned long)dev;
- np->timer.function = &netdev_timer; /* timer handler */
+ np->timer.function = netdev_timer; /* timer handler */
add_timer(&np->timer);
return 0;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index ffa1b9ce1cc..6dca3574e35 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 73
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.73"
+#define _NETXEN_NIC_LINUX_SUBVERSION 74
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.74"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index c865dda2adf..a2d805aa75c 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -346,7 +346,7 @@ static u32 netxen_decode_crb_addr(u32 addr)
if (pci_base == NETXEN_ADDR_ERROR)
return pci_base;
else
- return (pci_base + offset);
+ return pci_base + offset;
}
#define NETXEN_MAX_ROM_WAIT_USEC 100
@@ -1540,7 +1540,6 @@ netxen_process_rcv(struct netxen_adapter *adapter,
if (pkt_offset)
skb_pull(skb, pkt_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, netdev);
napi_gro_receive(&sds_ring->napi, skb);
@@ -1602,8 +1601,6 @@ netxen_process_lro(struct netxen_adapter *adapter,
skb_put(skb, lro_length + data_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
-
skb_pull(skb, l2_hdr_offset);
skb->protocol = eth_type_trans(skb, netdev);
@@ -1792,7 +1789,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
done = (sw_consumer == hw_consumer);
spin_unlock(&adapter->tx_clean_lock);
- return (done);
+ return done;
}
void
@@ -1805,8 +1802,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
netxen_ctx_msg msg = 0;
struct list_head *head;
- spin_lock(&rds_ring->lock);
-
producer = rds_ring->producer;
head = &rds_ring->free_list;
@@ -1853,8 +1848,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
NETXEN_RCV_PRODUCER_OFFSET), msg);
}
}
-
- spin_unlock(&rds_ring->lock);
}
static void
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index fd86e18604e..2c6ceeb592b 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -177,7 +177,7 @@ netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
- return (recv_ctx->sds_rings == NULL);
+ return recv_ctx->sds_rings == NULL;
}
static void
@@ -2032,8 +2032,6 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
struct netxen_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &netdev->stats;
- memset(stats, 0, sizeof(*stats));
-
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes;
@@ -2133,9 +2131,16 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netxen_nic_poll_controller(struct net_device *netdev)
{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
disable_irq(adapter->irq);
- netxen_intr(adapter->irq, adapter);
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netxen_intr(adapter->irq, sds_ring);
+ }
enable_irq(adapter->irq);
}
#endif
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index b9b950845b0..c0437fd8d3f 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -28,10 +28,7 @@
#include <linux/slab.h>
#include <linux/io.h>
-
-#ifdef CONFIG_SPARC64
#include <linux/of_device.h>
-#endif
#include "niu.h"
@@ -286,7 +283,7 @@ static int niu_enable_interrupts(struct niu *np, int on)
static u32 phy_encode(u32 type, int port)
{
- return (type << (port * 2));
+ return type << (port * 2);
}
static u32 phy_decode(u32 val, int port)
@@ -3046,8 +3043,7 @@ static int tcam_flush_all(struct niu *np)
static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
{
- return ((u64)index | (num_entries == 1 ?
- HASH_TBL_ADDR_AUTOINC : 0));
+ return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
}
#if 0
@@ -3279,7 +3275,7 @@ static u16 tcam_get_index(struct niu *np, u16 idx)
/* One entry reserved for IP fragment rule */
if (idx >= (np->clas.tcam_sz - 1))
idx = 0;
- return (np->clas.tcam_top + ((idx+1) * np->parent->num_ports));
+ return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
}
static u16 tcam_get_size(struct niu *np)
@@ -3316,7 +3312,7 @@ static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
a >>= PAGE_SHIFT;
a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
- return (a & (MAX_RBR_RING_SIZE - 1));
+ return a & (MAX_RBR_RING_SIZE - 1);
}
static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
@@ -3487,7 +3483,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
RCR_ENTRY_ERROR)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
} else if (!(val & RCR_ENTRY_MULTI))
append_size = len - skb->len;
@@ -4505,9 +4501,10 @@ static int niu_alloc_channels(struct niu *np)
np->num_rx_rings = parent->rxchan_per_port[port];
np->num_tx_rings = parent->txchan_per_port[port];
- np->dev->real_num_tx_queues = np->num_tx_rings;
+ netif_set_real_num_rx_queues(np->dev, np->num_rx_rings);
+ netif_set_real_num_tx_queues(np->dev, np->num_tx_rings);
- np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info),
+ np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
GFP_KERNEL);
err = -ENOMEM;
if (!np->rx_rings)
@@ -4541,7 +4538,7 @@ static int niu_alloc_channels(struct niu *np)
return err;
}
- np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info),
+ np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info),
GFP_KERNEL);
err = -ENOMEM;
if (!np->tx_rings)
@@ -7272,32 +7269,28 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
struct niu_parent *parent = np->parent;
struct niu_tcam_entry *tp;
int i, idx, cnt;
- u16 n_entries;
unsigned long flags;
-
+ int ret = 0;
/* put the tcam size here */
nfc->data = tcam_get_size(np);
niu_lock_parent(np, flags);
- n_entries = nfc->rule_cnt;
for (cnt = 0, i = 0; i < nfc->data; i++) {
idx = tcam_get_index(np, i);
tp = &parent->tcam[idx];
if (!tp->valid)
continue;
+ if (cnt == nfc->rule_cnt) {
+ ret = -EMSGSIZE;
+ break;
+ }
rule_locs[cnt] = i;
cnt++;
}
niu_unlock_parent(np, flags);
- if (n_entries != cnt) {
- /* print warning, this should not happen */
- netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
- np->parent->index, __func__, n_entries, cnt);
- }
-
- return 0;
+ return ret;
}
static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
@@ -7469,10 +7462,12 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
if (fsp->flow_type == IP_USER_FLOW) {
int i;
int add_usr_cls = 0;
- int ipv6 = 0;
struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
+ if (uspec->ip_ver != ETH_RX_NFC_IP4)
+ return -EINVAL;
+
niu_lock_parent(np, flags);
for (i = 0; i < NIU_L3_PROG_CLS; i++) {
@@ -7501,9 +7496,7 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
default:
break;
}
- if (uspec->ip_ver == ETH_RX_NFC_IP6)
- ipv6 = 1;
- ret = tcam_user_ip_class_set(np, class, ipv6,
+ ret = tcam_user_ip_class_set(np, class, 0,
uspec->proto,
uspec->tos,
umask->tos);
@@ -7560,16 +7553,7 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
ret = -EINVAL;
goto out;
case IP_USER_FLOW:
- if (fsp->h_u.usr_ip4_spec.ip_ver == ETH_RX_NFC_IP4) {
- niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table,
- class);
- } else {
- /* Not yet implemented */
- netdev_info(np->dev, "niu%d: In %s(): usr flow for IPv6 not implemented\n",
- parent->index, __func__);
- ret = -EINVAL;
- goto out;
- }
+ niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
break;
default:
netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
@@ -7812,11 +7796,11 @@ static int niu_get_sset_count(struct net_device *dev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ((np->flags & NIU_FLAGS_XMAC ?
+ return (np->flags & NIU_FLAGS_XMAC ?
NUM_XMAC_STAT_KEYS :
NUM_BMAC_STAT_KEYS) +
(np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
- (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS));
+ (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
}
static void niu_get_ethtool_stats(struct net_device *dev,
@@ -9106,7 +9090,7 @@ retry:
static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
{
#ifdef CONFIG_SPARC64
- struct of_device *op = np->op;
+ struct platform_device *op = np->op;
const u32 *int_prop;
int i;
@@ -9114,12 +9098,12 @@ static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
if (!int_prop)
return -ENODEV;
- for (i = 0; i < op->num_irqs; i++) {
+ for (i = 0; i < op->archdata.num_irqs; i++) {
ldg_num_map[i] = int_prop[i];
- np->ldg[i].irq = op->irqs[i];
+ np->ldg[i].irq = op->archdata.irqs[i];
}
- np->num_ldg = op->num_irqs;
+ np->num_ldg = op->archdata.num_irqs;
return 0;
#else
@@ -9691,7 +9675,7 @@ static void __devinit niu_driver_version(void)
static struct net_device * __devinit niu_alloc_and_init(
struct device *gen_dev, struct pci_dev *pdev,
- struct of_device *op, const struct niu_ops *ops,
+ struct platform_device *op, const struct niu_ops *ops,
u8 port)
{
struct net_device *dev;
@@ -10067,7 +10051,7 @@ static const struct niu_ops niu_phys_ops = {
.unmap_single = niu_phys_unmap_single,
};
-static int __devinit niu_of_probe(struct of_device *op,
+static int __devinit niu_of_probe(struct platform_device *op,
const struct of_device_id *match)
{
union niu_parent_id parent_id;
@@ -10182,7 +10166,7 @@ err_out:
return err;
}
-static int __devexit niu_of_remove(struct of_device *op)
+static int __devexit niu_of_remove(struct platform_device *op)
{
struct net_device *dev = dev_get_drvdata(&op->dev);
@@ -10249,14 +10233,14 @@ static int __init niu_init(void)
niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
#ifdef CONFIG_SPARC64
- err = of_register_driver(&niu_of_driver, &of_bus_type);
+ err = of_register_platform_driver(&niu_of_driver);
#endif
if (!err) {
err = pci_register_driver(&niu_pci_driver);
#ifdef CONFIG_SPARC64
if (err)
- of_unregister_driver(&niu_of_driver);
+ of_unregister_platform_driver(&niu_of_driver);
#endif
}
@@ -10267,7 +10251,7 @@ static void __exit niu_exit(void)
{
pci_unregister_driver(&niu_pci_driver);
#ifdef CONFIG_SPARC64
- of_unregister_driver(&niu_of_driver);
+ of_unregister_platform_driver(&niu_of_driver);
#endif
}
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index d6715465f35..a41fa8ebe05 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -3236,7 +3236,7 @@ struct niu_phy_ops {
int (*link_status)(struct niu *np, int *);
};
-struct of_device;
+struct platform_device;
struct niu {
void __iomem *regs;
struct net_device *dev;
@@ -3297,7 +3297,7 @@ struct niu {
struct niu_vpd vpd;
u32 eeprom_len;
- struct of_device *op;
+ struct platform_device *op;
void __iomem *vir_regs_1;
void __iomem *vir_regs_2;
};
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 5a3488f76b3..3bbd0aab17e 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -923,7 +923,7 @@ static void rx_irq(struct net_device *ndev)
if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
skb->protocol = eth_type_trans(skb, ndev);
#ifdef NS83820_VLAN_ACCEL_SUPPORT
@@ -1246,7 +1246,6 @@ static int ns83820_get_settings(struct net_device *ndev,
{
struct ns83820 *dev = PRIV(ndev);
u32 cfg, tanar, tbicr;
- int have_optical = 0;
int fullduplex = 0;
/*
@@ -1267,25 +1266,25 @@ static int ns83820_get_settings(struct net_device *ndev,
tanar = readl(dev->base + TANAR);
tbicr = readl(dev->base + TBICR);
- if (dev->CFG_cache & CFG_TBI_EN) {
- /* we have an optical interface */
- have_optical = 1;
- fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
-
- } else {
- /* We have copper */
- fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
- }
+ fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
cmd->supported = SUPPORTED_Autoneg;
- /* we have optical interface */
if (dev->CFG_cache & CFG_TBI_EN) {
+ /* we have optical interface */
cmd->supported |= SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE;
cmd->port = PORT_FIBRE;
- } /* TODO: else copper related support */
+ } else {
+ /* we have copper */
+ cmd->supported |= SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_MII;
+ cmd->port = PORT_MII;
+ }
cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF;
switch (cfg / CFG_SPDSTS0 & 3) {
@@ -1299,7 +1298,8 @@ static int ns83820_get_settings(struct net_device *ndev,
cmd->speed = SPEED_10;
break;
}
- cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE) ? 1: 0;
+ cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE)
+ ? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
}
@@ -1405,6 +1405,13 @@ static const struct ethtool_ops ops = {
.get_link = ns83820_get_link
};
+static inline void ns83820_disable_interrupts(struct ns83820 *dev)
+{
+ writel(0, dev->base + IMR);
+ writel(0, dev->base + IER);
+ readl(dev->base + IER);
+}
+
/* this function is called in irq context from the ISR */
static void ns83820_mib_isr(struct ns83820 *dev)
{
@@ -1557,10 +1564,7 @@ static int ns83820_stop(struct net_device *ndev)
/* FIXME: protect against interrupt handler? */
del_timer_sync(&dev->tx_watchdog);
- /* disable interrupts */
- writel(0, dev->base + IMR);
- writel(0, dev->base + IER);
- readl(dev->base + IER);
+ ns83820_disable_interrupts(dev);
dev->rx_info.up = 0;
synchronize_irq(dev->pci_dev->irq);
@@ -2023,10 +2027,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
dev->tx_descs, (long)dev->tx_phy_descs,
dev->rx_info.descs, (long)dev->rx_info.phy_descs);
- /* disable interrupts */
- writel(0, dev->base + IMR);
- writel(0, dev->base + IER);
- readl(dev->base + IER);
+ ns83820_disable_interrupts(dev);
dev->IMR_cache = 0;
@@ -2250,9 +2251,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
return 0;
out_cleanup:
- writel(0, dev->base + IMR); /* paranoia */
- writel(0, dev->base + IER);
- readl(dev->base + IER);
+ ns83820_disable_interrupts(dev); /* paranoia */
out_free_irq:
rtnl_unlock();
free_irq(pci_dev->irq, ndev);
@@ -2277,9 +2276,7 @@ static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
if (!ndev) /* paranoia */
return;
- writel(0, dev->base + IMR); /* paranoia */
- writel(0, dev->base + IER);
- readl(dev->base + IER);
+ ns83820_disable_interrupts(dev); /* paranoia */
unregister_netdev(ndev);
free_irq(dev->pci_dev->irq, ndev);
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 8ab6ae0a610..828e97cacdb 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -808,7 +808,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
XCT_MACRX_CSUM_S;
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
packets++;
tot_bytes += len;
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c
index fefa79e34b9..4825959a0ef 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/pasemi_mac_ethtool.c
@@ -90,21 +90,6 @@ pasemi_mac_ethtool_set_settings(struct net_device *netdev,
return phy_ethtool_sset(phydev, cmd);
}
-static void
-pasemi_mac_ethtool_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct pasemi_mac *mac;
- mac = netdev_priv(netdev);
-
- /* clear and fill out info */
- memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
- strncpy(drvinfo->driver, "pasemi_mac", 12);
- strcpy(drvinfo->version, "N/A");
- strcpy(drvinfo->fw_version, "N/A");
- strncpy(drvinfo->bus_info, pci_name(mac->pdev), 32);
-}
-
static u32
pasemi_mac_ethtool_get_msglevel(struct net_device *netdev)
{
@@ -164,7 +149,6 @@ static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset,
const struct ethtool_ops pasemi_mac_ethtool_ops = {
.get_settings = pasemi_mac_ethtool_get_settings,
.set_settings = pasemi_mac_ethtool_set_settings,
- .get_drvinfo = pasemi_mac_ethtool_get_drvinfo,
.get_msglevel = pasemi_mac_ethtool_get_msglevel,
.set_msglevel = pasemi_mac_ethtool_set_msglevel,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/pch_gbe/Makefile b/drivers/net/pch_gbe/Makefile
new file mode 100644
index 00000000000..31288d4ad24
--- /dev/null
+++ b/drivers/net/pch_gbe/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_PCH_GBE) += pch_gbe.o
+
+pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o
+pch_gbe-y += pch_gbe_api.o pch_gbe_main.o
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
new file mode 100644
index 00000000000..9a940a940a4
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -0,0 +1,661 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _PCH_GBE_H_
+#define _PCH_GBE_H_
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+/**
+ * pch_gbe_regs_mac_adr - Structure holding values of mac address registers
+ * @high Denotes the 1st to 4th byte from the initial of MAC address
+ * @low Denotes the 5th to 6th byte from the initial of MAC address
+ */
+struct pch_gbe_regs_mac_adr {
+ u32 high;
+ u32 low;
+};
+/**
+ * pch_udc_regs - Structure holding values of MAC registers
+ */
+struct pch_gbe_regs {
+ u32 INT_ST;
+ u32 INT_EN;
+ u32 MODE;
+ u32 RESET;
+ u32 TCPIP_ACC;
+ u32 EX_LIST;
+ u32 INT_ST_HOLD;
+ u32 PHY_INT_CTRL;
+ u32 MAC_RX_EN;
+ u32 RX_FCTRL;
+ u32 PAUSE_REQ;
+ u32 RX_MODE;
+ u32 TX_MODE;
+ u32 RX_FIFO_ST;
+ u32 TX_FIFO_ST;
+ u32 TX_FID;
+ u32 TX_RESULT;
+ u32 PAUSE_PKT1;
+ u32 PAUSE_PKT2;
+ u32 PAUSE_PKT3;
+ u32 PAUSE_PKT4;
+ u32 PAUSE_PKT5;
+ u32 reserve[2];
+ struct pch_gbe_regs_mac_adr mac_adr[16];
+ u32 ADDR_MASK;
+ u32 MIIM;
+ u32 reserve2;
+ u32 RGMII_ST;
+ u32 RGMII_CTRL;
+ u32 reserve3[3];
+ u32 DMA_CTRL;
+ u32 reserve4[3];
+ u32 RX_DSC_BASE;
+ u32 RX_DSC_SIZE;
+ u32 RX_DSC_HW_P;
+ u32 RX_DSC_HW_P_HLD;
+ u32 RX_DSC_SW_P;
+ u32 reserve5[3];
+ u32 TX_DSC_BASE;
+ u32 TX_DSC_SIZE;
+ u32 TX_DSC_HW_P;
+ u32 TX_DSC_HW_P_HLD;
+ u32 TX_DSC_SW_P;
+ u32 reserve6[3];
+ u32 RX_DMA_ST;
+ u32 TX_DMA_ST;
+ u32 reserve7[2];
+ u32 WOL_ST;
+ u32 WOL_CTRL;
+ u32 WOL_ADDR_MASK;
+};
+
+/* Interrupt Status */
+/* Interrupt Status Hold */
+/* Interrupt Enable */
+#define PCH_GBE_INT_RX_DMA_CMPLT 0x00000001 /* Receive DMA Transfer Complete */
+#define PCH_GBE_INT_RX_VALID 0x00000002 /* MAC Normal Receive Complete */
+#define PCH_GBE_INT_RX_FRAME_ERR 0x00000004 /* Receive frame error */
+#define PCH_GBE_INT_RX_FIFO_ERR 0x00000008 /* Receive FIFO Overflow */
+#define PCH_GBE_INT_RX_DMA_ERR 0x00000010 /* Receive DMA Transfer Error */
+#define PCH_GBE_INT_RX_DSC_EMP 0x00000020 /* Receive Descriptor Empty */
+#define PCH_GBE_INT_TX_CMPLT 0x00000100 /* MAC Transmission Complete */
+#define PCH_GBE_INT_TX_DMA_CMPLT 0x00000200 /* DMA Transfer Complete */
+#define PCH_GBE_INT_TX_FIFO_ERR 0x00000400 /* Transmission FIFO underflow. */
+#define PCH_GBE_INT_TX_DMA_ERR 0x00000800 /* Transmission DMA Error */
+#define PCH_GBE_INT_PAUSE_CMPLT 0x00001000 /* Pause Transmission complete */
+#define PCH_GBE_INT_MIIM_CMPLT 0x00010000 /* MIIM I/F Read completion */
+#define PCH_GBE_INT_PHY_INT 0x00100000 /* Interruption from PHY */
+#define PCH_GBE_INT_WOL_DET 0x01000000 /* Wake On LAN Event detection. */
+#define PCH_GBE_INT_TCPIP_ERR 0x10000000 /* TCP/IP Accelerator Error */
+
+/* Mode */
+#define PCH_GBE_MODE_MII_ETHER 0x00000000 /* GIGA Ethernet Mode [MII] */
+#define PCH_GBE_MODE_GMII_ETHER 0x80000000 /* GIGA Ethernet Mode [GMII] */
+#define PCH_GBE_MODE_HALF_DUPLEX 0x00000000 /* Duplex Mode [half duplex] */
+#define PCH_GBE_MODE_FULL_DUPLEX 0x40000000 /* Duplex Mode [full duplex] */
+#define PCH_GBE_MODE_FR_BST 0x04000000 /* Frame bursting is done */
+
+/* Reset */
+#define PCH_GBE_ALL_RST 0x80000000 /* All reset */
+#define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */
+#define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */
+
+/* TCP/IP Accelerator Control */
+#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
+#define PCH_GBE_RX_TCPIPACC_OFF 0x00000004 /* RX TCP/IP ACC Disabled */
+#define PCH_GBE_TX_TCPIPACC_EN 0x00000002 /* TX TCP/IP ACC Enable */
+#define PCH_GBE_RX_TCPIPACC_EN 0x00000001 /* RX TCP/IP ACC Enable */
+
+/* MAC RX Enable */
+#define PCH_GBE_MRE_MAC_RX_EN 0x00000001 /* MAC Receive Enable */
+
+/* RX Flow Control */
+#define PCH_GBE_FL_CTRL_EN 0x80000000 /* Pause packet is enabled */
+
+/* Pause Packet Request */
+#define PCH_GBE_PS_PKT_RQ 0x80000000 /* Pause packet Request */
+
+/* RX Mode */
+#define PCH_GBE_ADD_FIL_EN 0x80000000 /* Address Filtering Enable */
+/* Multicast Filtering Enable */
+#define PCH_GBE_MLT_FIL_EN 0x40000000
+/* Receive Almost Empty Threshold */
+#define PCH_GBE_RH_ALM_EMP_4 0x00000000 /* 4 words */
+#define PCH_GBE_RH_ALM_EMP_8 0x00004000 /* 8 words */
+#define PCH_GBE_RH_ALM_EMP_16 0x00008000 /* 16 words */
+#define PCH_GBE_RH_ALM_EMP_32 0x0000C000 /* 32 words */
+/* Receive Almost Full Threshold */
+#define PCH_GBE_RH_ALM_FULL_4 0x00000000 /* 4 words */
+#define PCH_GBE_RH_ALM_FULL_8 0x00001000 /* 8 words */
+#define PCH_GBE_RH_ALM_FULL_16 0x00002000 /* 16 words */
+#define PCH_GBE_RH_ALM_FULL_32 0x00003000 /* 32 words */
+/* RX FIFO Read Triger Threshold */
+#define PCH_GBE_RH_RD_TRG_4 0x00000000 /* 4 words */
+#define PCH_GBE_RH_RD_TRG_8 0x00000200 /* 8 words */
+#define PCH_GBE_RH_RD_TRG_16 0x00000400 /* 16 words */
+#define PCH_GBE_RH_RD_TRG_32 0x00000600 /* 32 words */
+#define PCH_GBE_RH_RD_TRG_64 0x00000800 /* 64 words */
+#define PCH_GBE_RH_RD_TRG_128 0x00000A00 /* 128 words */
+#define PCH_GBE_RH_RD_TRG_256 0x00000C00 /* 256 words */
+#define PCH_GBE_RH_RD_TRG_512 0x00000E00 /* 512 words */
+
+/* Receive Descriptor bit definitions */
+#define PCH_GBE_RXD_ACC_STAT_BCAST 0x00000400
+#define PCH_GBE_RXD_ACC_STAT_MCAST 0x00000200
+#define PCH_GBE_RXD_ACC_STAT_UCAST 0x00000100
+#define PCH_GBE_RXD_ACC_STAT_TCPIPOK 0x000000C0
+#define PCH_GBE_RXD_ACC_STAT_IPOK 0x00000080
+#define PCH_GBE_RXD_ACC_STAT_TCPOK 0x00000040
+#define PCH_GBE_RXD_ACC_STAT_IP6ERR 0x00000020
+#define PCH_GBE_RXD_ACC_STAT_OFLIST 0x00000010
+#define PCH_GBE_RXD_ACC_STAT_TYPEIP 0x00000008
+#define PCH_GBE_RXD_ACC_STAT_MACL 0x00000004
+#define PCH_GBE_RXD_ACC_STAT_PPPOE 0x00000002
+#define PCH_GBE_RXD_ACC_STAT_VTAGT 0x00000001
+#define PCH_GBE_RXD_GMAC_STAT_PAUSE 0x0200
+#define PCH_GBE_RXD_GMAC_STAT_MARBR 0x0100
+#define PCH_GBE_RXD_GMAC_STAT_MARMLT 0x0080
+#define PCH_GBE_RXD_GMAC_STAT_MARIND 0x0040
+#define PCH_GBE_RXD_GMAC_STAT_MARNOTMT 0x0020
+#define PCH_GBE_RXD_GMAC_STAT_TLONG 0x0010
+#define PCH_GBE_RXD_GMAC_STAT_TSHRT 0x0008
+#define PCH_GBE_RXD_GMAC_STAT_NOTOCTAL 0x0004
+#define PCH_GBE_RXD_GMAC_STAT_NBLERR 0x0002
+#define PCH_GBE_RXD_GMAC_STAT_CRCERR 0x0001
+
+/* Transmit Descriptor bit definitions */
+#define PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF 0x0008
+#define PCH_GBE_TXD_CTRL_ITAG 0x0004
+#define PCH_GBE_TXD_CTRL_ICRC 0x0002
+#define PCH_GBE_TXD_CTRL_APAD 0x0001
+#define PCH_GBE_TXD_WORDS_SHIFT 2
+#define PCH_GBE_TXD_GMAC_STAT_CMPLT 0x2000
+#define PCH_GBE_TXD_GMAC_STAT_ABT 0x1000
+#define PCH_GBE_TXD_GMAC_STAT_EXCOL 0x0800
+#define PCH_GBE_TXD_GMAC_STAT_SNGCOL 0x0400
+#define PCH_GBE_TXD_GMAC_STAT_MLTCOL 0x0200
+#define PCH_GBE_TXD_GMAC_STAT_CRSER 0x0100
+#define PCH_GBE_TXD_GMAC_STAT_TLNG 0x0080
+#define PCH_GBE_TXD_GMAC_STAT_TSHRT 0x0040
+#define PCH_GBE_TXD_GMAC_STAT_LTCOL 0x0020
+#define PCH_GBE_TXD_GMAC_STAT_TFUNDFLW 0x0010
+#define PCH_GBE_TXD_GMAC_STAT_RTYCNT_MASK 0x000F
+
+/* TX Mode */
+#define PCH_GBE_TM_NO_RTRY 0x80000000 /* No Retransmission */
+#define PCH_GBE_TM_LONG_PKT 0x40000000 /* Long Packt TX Enable */
+#define PCH_GBE_TM_ST_AND_FD 0x20000000 /* Stare and Forward */
+#define PCH_GBE_TM_SHORT_PKT 0x10000000 /* Short Packet TX Enable */
+#define PCH_GBE_TM_LTCOL_RETX 0x08000000 /* Retransmission at Late Collision */
+/* Frame Start Threshold */
+#define PCH_GBE_TM_TH_TX_STRT_4 0x00000000 /* 4 words */
+#define PCH_GBE_TM_TH_TX_STRT_8 0x00004000 /* 8 words */
+#define PCH_GBE_TM_TH_TX_STRT_16 0x00008000 /* 16 words */
+#define PCH_GBE_TM_TH_TX_STRT_32 0x0000C000 /* 32 words */
+/* Transmit Almost Empty Threshold */
+#define PCH_GBE_TM_TH_ALM_EMP_4 0x00000000 /* 4 words */
+#define PCH_GBE_TM_TH_ALM_EMP_8 0x00000800 /* 8 words */
+#define PCH_GBE_TM_TH_ALM_EMP_16 0x00001000 /* 16 words */
+#define PCH_GBE_TM_TH_ALM_EMP_32 0x00001800 /* 32 words */
+#define PCH_GBE_TM_TH_ALM_EMP_64 0x00002000 /* 64 words */
+#define PCH_GBE_TM_TH_ALM_EMP_128 0x00002800 /* 128 words */
+#define PCH_GBE_TM_TH_ALM_EMP_256 0x00003000 /* 256 words */
+#define PCH_GBE_TM_TH_ALM_EMP_512 0x00003800 /* 512 words */
+/* Transmit Almost Full Threshold */
+#define PCH_GBE_TM_TH_ALM_FULL_4 0x00000000 /* 4 words */
+#define PCH_GBE_TM_TH_ALM_FULL_8 0x00000200 /* 8 words */
+#define PCH_GBE_TM_TH_ALM_FULL_16 0x00000400 /* 16 words */
+#define PCH_GBE_TM_TH_ALM_FULL_32 0x00000600 /* 32 words */
+
+/* RX FIFO Status */
+#define PCH_GBE_RF_ALM_FULL 0x80000000 /* RX FIFO is almost full. */
+#define PCH_GBE_RF_ALM_EMP 0x40000000 /* RX FIFO is almost empty. */
+#define PCH_GBE_RF_RD_TRG 0x20000000 /* Become more than RH_RD_TRG. */
+#define PCH_GBE_RF_STRWD 0x1FFE0000 /* The word count of RX FIFO. */
+#define PCH_GBE_RF_RCVING 0x00010000 /* Stored in RX FIFO. */
+
+/* MAC Address Mask */
+#define PCH_GBE_BUSY 0x80000000
+
+/* MIIM */
+#define PCH_GBE_MIIM_OPER_WRITE 0x04000000
+#define PCH_GBE_MIIM_OPER_READ 0x00000000
+#define PCH_GBE_MIIM_OPER_READY 0x04000000
+#define PCH_GBE_MIIM_PHY_ADDR_SHIFT 21
+#define PCH_GBE_MIIM_REG_ADDR_SHIFT 16
+
+/* RGMII Status */
+#define PCH_GBE_LINK_UP 0x80000008
+#define PCH_GBE_RXC_SPEED_MSK 0x00000006
+#define PCH_GBE_RXC_SPEED_2_5M 0x00000000 /* 2.5MHz */
+#define PCH_GBE_RXC_SPEED_25M 0x00000002 /* 25MHz */
+#define PCH_GBE_RXC_SPEED_125M 0x00000004 /* 100MHz */
+#define PCH_GBE_DUPLEX_FULL 0x00000001
+
+/* RGMII Control */
+#define PCH_GBE_CRS_SEL 0x00000010
+#define PCH_GBE_RGMII_RATE_125M 0x00000000
+#define PCH_GBE_RGMII_RATE_25M 0x00000008
+#define PCH_GBE_RGMII_RATE_2_5M 0x0000000C
+#define PCH_GBE_RGMII_MODE_GMII 0x00000000
+#define PCH_GBE_RGMII_MODE_RGMII 0x00000002
+#define PCH_GBE_CHIP_TYPE_EXTERNAL 0x00000000
+#define PCH_GBE_CHIP_TYPE_INTERNAL 0x00000001
+
+/* DMA Control */
+#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
+#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
+
+/* Wake On LAN Status */
+#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
+#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
+
+/* The Frame registered in Address Recognizer */
+#define PCH_GBE_WLS_IND 0x00000002
+#define PCH_GBE_WLS_MP 0x00000001 /* Magic packet Address */
+
+/* Wake On LAN Control */
+#define PCH_GBE_WLC_WOL_MODE 0x00010000
+#define PCH_GBE_WLC_IGN_TLONG 0x00000100
+#define PCH_GBE_WLC_IGN_TSHRT 0x00000080
+#define PCH_GBE_WLC_IGN_OCTER 0x00000040
+#define PCH_GBE_WLC_IGN_NBLER 0x00000020
+#define PCH_GBE_WLC_IGN_CRCER 0x00000010
+#define PCH_GBE_WLC_BR 0x00000008
+#define PCH_GBE_WLC_MLT 0x00000004
+#define PCH_GBE_WLC_IND 0x00000002
+#define PCH_GBE_WLC_MP 0x00000001
+
+/* Wake On LAN Address Mask */
+#define PCH_GBE_WLA_BUSY 0x80000000
+
+
+
+/* TX/RX descriptor defines */
+#define PCH_GBE_MAX_TXD 4096
+#define PCH_GBE_DEFAULT_TXD 256
+#define PCH_GBE_MIN_TXD 8
+#define PCH_GBE_MAX_RXD 4096
+#define PCH_GBE_DEFAULT_RXD 256
+#define PCH_GBE_MIN_RXD 8
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define PCH_GBE_TX_DESC_MULTIPLE 8
+#define PCH_GBE_RX_DESC_MULTIPLE 8
+
+/* Read/Write operation is done through MII Management IF */
+#define PCH_GBE_HAL_MIIM_READ ((u32)0x00000000)
+#define PCH_GBE_HAL_MIIM_WRITE ((u32)0x04000000)
+
+/* flow control values */
+#define PCH_GBE_FC_NONE 0
+#define PCH_GBE_FC_RX_PAUSE 1
+#define PCH_GBE_FC_TX_PAUSE 2
+#define PCH_GBE_FC_FULL 3
+#define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL
+
+
+struct pch_gbe_hw;
+/**
+ * struct pch_gbe_functions - HAL APi function pointer
+ * @get_bus_info: for pch_gbe_hal_get_bus_info
+ * @init_hw: for pch_gbe_hal_init_hw
+ * @read_phy_reg: for pch_gbe_hal_read_phy_reg
+ * @write_phy_reg: for pch_gbe_hal_write_phy_reg
+ * @reset_phy: for pch_gbe_hal_phy_hw_reset
+ * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset
+ * @power_up_phy: for pch_gbe_hal_power_up_phy
+ * @power_down_phy: for pch_gbe_hal_power_down_phy
+ * @read_mac_addr: for pch_gbe_hal_read_mac_addr
+ */
+struct pch_gbe_functions {
+ void (*get_bus_info) (struct pch_gbe_hw *);
+ s32 (*init_hw) (struct pch_gbe_hw *);
+ s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *);
+ s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16);
+ void (*reset_phy) (struct pch_gbe_hw *);
+ void (*sw_reset_phy) (struct pch_gbe_hw *);
+ void (*power_up_phy) (struct pch_gbe_hw *hw);
+ void (*power_down_phy) (struct pch_gbe_hw *hw);
+ s32 (*read_mac_addr) (struct pch_gbe_hw *);
+};
+
+/**
+ * struct pch_gbe_mac_info - MAC infomation
+ * @addr[6]: Store the MAC address
+ * @fc: Mode of flow control
+ * @fc_autoneg: Auto negotiation enable for flow control setting
+ * @tx_fc_enable: Enable flag of Transmit flow control
+ * @max_frame_size: Max transmit frame size
+ * @min_frame_size: Min transmit frame size
+ * @autoneg: Auto negotiation enable
+ * @link_speed: Link speed
+ * @link_duplex: Link duplex
+ */
+struct pch_gbe_mac_info {
+ u8 addr[6];
+ u8 fc;
+ u8 fc_autoneg;
+ u8 tx_fc_enable;
+ u32 max_frame_size;
+ u32 min_frame_size;
+ u8 autoneg;
+ u16 link_speed;
+ u16 link_duplex;
+};
+
+/**
+ * struct pch_gbe_phy_info - PHY infomation
+ * @addr: PHY address
+ * @id: PHY's identifier
+ * @revision: PHY's revision
+ * @reset_delay_us: HW reset delay time[us]
+ * @autoneg_advertised: Autoneg advertised
+ */
+struct pch_gbe_phy_info {
+ u32 addr;
+ u32 id;
+ u32 revision;
+ u32 reset_delay_us;
+ u16 autoneg_advertised;
+};
+
+/*!
+ * @ingroup Gigabit Ether driver Layer
+ * @struct pch_gbe_bus_info
+ * @brief Bus infomation
+ */
+struct pch_gbe_bus_info {
+ u8 type;
+ u8 speed;
+ u8 width;
+};
+
+/*!
+ * @ingroup Gigabit Ether driver Layer
+ * @struct pch_gbe_hw
+ * @brief Hardware infomation
+ */
+struct pch_gbe_hw {
+ void *back;
+
+ struct pch_gbe_regs __iomem *reg;
+ spinlock_t miim_lock;
+
+ const struct pch_gbe_functions *func;
+ struct pch_gbe_mac_info mac;
+ struct pch_gbe_phy_info phy;
+ struct pch_gbe_bus_info bus;
+};
+
+/**
+ * struct pch_gbe_rx_desc - Receive Descriptor
+ * @buffer_addr: RX Frame Buffer Address
+ * @tcp_ip_status: TCP/IP Accelerator Status
+ * @rx_words_eob: RX word count and Byte position
+ * @gbec_status: GMAC Status
+ * @dma_status: DMA Status
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ */
+struct pch_gbe_rx_desc {
+ u32 buffer_addr;
+ u32 tcp_ip_status;
+ u16 rx_words_eob;
+ u16 gbec_status;
+ u8 dma_status;
+ u8 reserved1;
+ u16 reserved2;
+};
+
+/**
+ * struct pch_gbe_tx_desc - Transmit Descriptor
+ * @buffer_addr: TX Frame Buffer Address
+ * @length: Data buffer length
+ * @reserved1: Reserved
+ * @tx_words_eob: TX word count and Byte position
+ * @tx_frame_ctrl: TX Frame Control
+ * @dma_status: DMA Status
+ * @reserved2: Reserved
+ * @gbec_status: GMAC Status
+ */
+struct pch_gbe_tx_desc {
+ u32 buffer_addr;
+ u16 length;
+ u16 reserved1;
+ u16 tx_words_eob;
+ u16 tx_frame_ctrl;
+ u8 dma_status;
+ u8 reserved2;
+ u16 gbec_status;
+};
+
+
+/**
+ * struct pch_gbe_buffer - Buffer infomation
+ * @skb: pointer to a socket buffer
+ * @dma: DMA address
+ * @time_stamp: time stamp
+ * @length: data size
+ */
+struct pch_gbe_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned long time_stamp;
+ u16 length;
+ bool mapped;
+};
+
+/**
+ * struct pch_gbe_tx_ring - tx ring infomation
+ * @tx_lock: spinlock structs
+ * @desc: pointer to the descriptor ring memory
+ * @dma: physical address of the descriptor ring
+ * @size: length of descriptor ring in bytes
+ * @count: number of descriptors in the ring
+ * @next_to_use: next descriptor to associate a buffer with
+ * @next_to_clean: next descriptor to check for DD status bit
+ * @buffer_info: array of buffer information structs
+ */
+struct pch_gbe_tx_ring {
+ spinlock_t tx_lock;
+ struct pch_gbe_tx_desc *desc;
+ dma_addr_t dma;
+ unsigned int size;
+ unsigned int count;
+ unsigned int next_to_use;
+ unsigned int next_to_clean;
+ struct pch_gbe_buffer *buffer_info;
+};
+
+/**
+ * struct pch_gbe_rx_ring - rx ring infomation
+ * @desc: pointer to the descriptor ring memory
+ * @dma: physical address of the descriptor ring
+ * @size: length of descriptor ring in bytes
+ * @count: number of descriptors in the ring
+ * @next_to_use: next descriptor to associate a buffer with
+ * @next_to_clean: next descriptor to check for DD status bit
+ * @buffer_info: array of buffer information structs
+ */
+struct pch_gbe_rx_ring {
+ struct pch_gbe_rx_desc *desc;
+ dma_addr_t dma;
+ unsigned int size;
+ unsigned int count;
+ unsigned int next_to_use;
+ unsigned int next_to_clean;
+ struct pch_gbe_buffer *buffer_info;
+};
+
+/**
+ * struct pch_gbe_hw_stats - Statistics counters collected by the MAC
+ * @rx_packets: total packets received
+ * @tx_packets: total packets transmitted
+ * @rx_bytes: total bytes received
+ * @tx_bytes: total bytes transmitted
+ * @rx_errors: bad packets received
+ * @tx_errors: packet transmit problems
+ * @rx_dropped: no space in Linux buffers
+ * @tx_dropped: no space available in Linux
+ * @multicast: multicast packets received
+ * @collisions: collisions
+ * @rx_crc_errors: received packet with crc error
+ * @rx_frame_errors: received frame alignment error
+ * @rx_alloc_buff_failed: allocate failure of a receive buffer
+ * @tx_length_errors: transmit length error
+ * @tx_aborted_errors: transmit aborted error
+ * @tx_carrier_errors: transmit carrier error
+ * @tx_timeout_count: Number of transmit timeout
+ * @tx_restart_count: Number of transmit restert
+ * @intr_rx_dsc_empty_count: Interrupt count of receive descriptor empty
+ * @intr_rx_frame_err_count: Interrupt count of receive frame error
+ * @intr_rx_fifo_err_count: Interrupt count of receive FIFO error
+ * @intr_rx_dma_err_count: Interrupt count of receive DMA error
+ * @intr_tx_fifo_err_count: Interrupt count of transmit FIFO error
+ * @intr_tx_dma_err_count: Interrupt count of transmit DMA error
+ * @intr_tcpip_err_count: Interrupt count of TCP/IP Accelerator
+ */
+struct pch_gbe_hw_stats {
+ u32 rx_packets;
+ u32 tx_packets;
+ u32 rx_bytes;
+ u32 tx_bytes;
+ u32 rx_errors;
+ u32 tx_errors;
+ u32 rx_dropped;
+ u32 tx_dropped;
+ u32 multicast;
+ u32 collisions;
+ u32 rx_crc_errors;
+ u32 rx_frame_errors;
+ u32 rx_alloc_buff_failed;
+ u32 tx_length_errors;
+ u32 tx_aborted_errors;
+ u32 tx_carrier_errors;
+ u32 tx_timeout_count;
+ u32 tx_restart_count;
+ u32 intr_rx_dsc_empty_count;
+ u32 intr_rx_frame_err_count;
+ u32 intr_rx_fifo_err_count;
+ u32 intr_rx_dma_err_count;
+ u32 intr_tx_fifo_err_count;
+ u32 intr_tx_dma_err_count;
+ u32 intr_tcpip_err_count;
+};
+
+/**
+ * struct pch_gbe_adapter - board specific private data structure
+ * @stats_lock: Spinlock structure for status
+ * @tx_queue_lock: Spinlock structure for transmit
+ * @ethtool_lock: Spinlock structure for ethtool
+ * @irq_sem: Semaphore for interrupt
+ * @netdev: Pointer of network device structure
+ * @pdev: Pointer of pci device structure
+ * @polling_netdev: Pointer of polling network device structure
+ * @napi: NAPI structure
+ * @hw: Pointer of hardware structure
+ * @stats: Hardware status
+ * @reset_task: Reset task
+ * @mii: MII information structure
+ * @watchdog_timer: Watchdog timer list
+ * @wake_up_evt: Wake up event
+ * @config_space: Configuration space
+ * @msg_enable: Driver message level
+ * @led_status: LED status
+ * @tx_ring: Pointer of Tx descriptor ring structure
+ * @rx_ring: Pointer of Rx descriptor ring structure
+ * @rx_buffer_len: Receive buffer length
+ * @tx_queue_len: Transmit queue length
+ * @rx_csum: Receive TCP/IP checksum enable/disable
+ * @tx_csum: Transmit TCP/IP checksum enable/disable
+ * @have_msi: PCI MSI mode flag
+ */
+
+struct pch_gbe_adapter {
+ spinlock_t stats_lock;
+ spinlock_t tx_queue_lock;
+ spinlock_t ethtool_lock;
+ atomic_t irq_sem;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device *polling_netdev;
+ struct napi_struct napi;
+ struct pch_gbe_hw hw;
+ struct pch_gbe_hw_stats stats;
+ struct work_struct reset_task;
+ struct mii_if_info mii;
+ struct timer_list watchdog_timer;
+ u32 wake_up_evt;
+ u32 *config_space;
+ unsigned long led_status;
+ struct pch_gbe_tx_ring *tx_ring;
+ struct pch_gbe_rx_ring *rx_ring;
+ unsigned long rx_buffer_len;
+ unsigned long tx_queue_len;
+ bool rx_csum;
+ bool tx_csum;
+ bool have_msi;
+};
+
+extern const char pch_driver_version[];
+
+/* pch_gbe_main.c */
+extern int pch_gbe_up(struct pch_gbe_adapter *adapter);
+extern void pch_gbe_down(struct pch_gbe_adapter *adapter);
+extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
+extern void pch_gbe_reset(struct pch_gbe_adapter *adapter);
+extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *txdr);
+extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rxdr);
+extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring);
+extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring);
+extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
+extern int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
+extern void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
+ int data);
+/* pch_gbe_param.c */
+extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
+
+/* pch_gbe_ethtool.c */
+extern void pch_gbe_set_ethtool_ops(struct net_device *netdev);
+
+/* pch_gbe_mac.c */
+extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
+extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
+extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw,
+ u32 addr, u32 dir, u32 reg, u16 data);
+#endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/pch_gbe/pch_gbe_api.c b/drivers/net/pch_gbe/pch_gbe_api.c
new file mode 100644
index 00000000000..db53d2a943e
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_api.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include "pch_gbe.h"
+#include "pch_gbe_phy.h"
+
+/* bus type values */
+#define pch_gbe_bus_type_unknown 0
+#define pch_gbe_bus_type_pci 1
+#define pch_gbe_bus_type_pcix 2
+#define pch_gbe_bus_type_pci_express 3
+#define pch_gbe_bus_type_reserved 4
+
+/* bus speed values */
+#define pch_gbe_bus_speed_unknown 0
+#define pch_gbe_bus_speed_33 1
+#define pch_gbe_bus_speed_66 2
+#define pch_gbe_bus_speed_100 3
+#define pch_gbe_bus_speed_120 4
+#define pch_gbe_bus_speed_133 5
+#define pch_gbe_bus_speed_2500 6
+#define pch_gbe_bus_speed_reserved 7
+
+/* bus width values */
+#define pch_gbe_bus_width_unknown 0
+#define pch_gbe_bus_width_pcie_x1 1
+#define pch_gbe_bus_width_pcie_x2 2
+#define pch_gbe_bus_width_pcie_x4 4
+#define pch_gbe_bus_width_32 5
+#define pch_gbe_bus_width_64 6
+#define pch_gbe_bus_width_reserved 7
+
+/**
+ * pch_gbe_plat_get_bus_info - Obtain bus information for adapter
+ * @hw: Pointer to the HW structure
+ */
+static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
+{
+ hw->bus.type = pch_gbe_bus_type_pci_express;
+ hw->bus.speed = pch_gbe_bus_speed_2500;
+ hw->bus.width = pch_gbe_bus_width_pcie_x1;
+}
+
+/**
+ * pch_gbe_plat_init_hw - Initialize hardware
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed-EBUSY
+ */
+static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw)
+{
+ s32 ret_val;
+
+ ret_val = pch_gbe_phy_get_id(hw);
+ if (ret_val) {
+ pr_err("pch_gbe_phy_get_id error\n");
+ return ret_val;
+ }
+ pch_gbe_phy_init_setting(hw);
+ /* Setup Mac interface option RGMII */
+#ifdef PCH_GBE_MAC_IFOP_RGMII
+ pch_gbe_phy_set_rgmii(hw);
+#endif
+ return ret_val;
+}
+
+static const struct pch_gbe_functions pch_gbe_ops = {
+ .get_bus_info = pch_gbe_plat_get_bus_info,
+ .init_hw = pch_gbe_plat_init_hw,
+ .read_phy_reg = pch_gbe_phy_read_reg_miic,
+ .write_phy_reg = pch_gbe_phy_write_reg_miic,
+ .reset_phy = pch_gbe_phy_hw_reset,
+ .sw_reset_phy = pch_gbe_phy_sw_reset,
+ .power_up_phy = pch_gbe_phy_power_up,
+ .power_down_phy = pch_gbe_phy_power_down,
+ .read_mac_addr = pch_gbe_mac_read_mac_addr
+};
+
+/**
+ * pch_gbe_plat_init_function_pointers - Init func ptrs
+ * @hw: Pointer to the HW structure
+ */
+void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
+{
+ /* Set PHY parameter */
+ hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
+ /* Set function pointers */
+ hw->func = &pch_gbe_ops;
+}
+
+/**
+ * pch_gbe_hal_setup_init_funcs - Initializes function pointers
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successfully
+ * ENOSYS: Function is not registered
+ */
+inline s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw)
+{
+ if (!hw->reg) {
+ pr_err("ERROR: Registers not mapped\n");
+ return -ENOSYS;
+ }
+ pch_gbe_plat_init_function_pointers(hw);
+ return 0;
+}
+
+/**
+ * pch_gbe_hal_get_bus_info - Obtain bus information for adapter
+ * @hw: Pointer to the HW structure
+ */
+inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
+{
+ if (!hw->func->get_bus_info)
+ pr_err("ERROR: configuration\n");
+ else
+ hw->func->get_bus_info(hw);
+}
+
+/**
+ * pch_gbe_hal_init_hw - Initialize hardware
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successfully
+ * ENOSYS: Function is not registered
+ */
+inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
+{
+ if (!hw->func->init_hw) {
+ pr_err("ERROR: configuration\n");
+ return -ENOSYS;
+ }
+ return hw->func->init_hw(hw);
+}
+
+/**
+ * pch_gbe_hal_read_phy_reg - Reads PHY register
+ * @hw: Pointer to the HW structure
+ * @offset: The register to read
+ * @data: The buffer to store the 16-bit read.
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
+ u16 *data)
+{
+ if (!hw->func->read_phy_reg)
+ return 0;
+ return hw->func->read_phy_reg(hw, offset, data);
+}
+
+/**
+ * pch_gbe_hal_write_phy_reg - Writes PHY register
+ * @hw: Pointer to the HW structure
+ * @offset: The register to read
+ * @data: The value to write.
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+inline s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset,
+ u16 data)
+{
+ if (!hw->func->write_phy_reg)
+ return 0;
+ return hw->func->write_phy_reg(hw, offset, data);
+}
+
+/**
+ * pch_gbe_hal_phy_hw_reset - Hard PHY reset
+ * @hw: Pointer to the HW structure
+ */
+inline void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw)
+{
+ if (!hw->func->reset_phy)
+ pr_err("ERROR: configuration\n");
+ else
+ hw->func->reset_phy(hw);
+}
+
+/**
+ * pch_gbe_hal_phy_sw_reset - Soft PHY reset
+ * @hw: Pointer to the HW structure
+ */
+inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
+{
+ if (!hw->func->sw_reset_phy)
+ pr_err("ERROR: configuration\n");
+ else
+ hw->func->sw_reset_phy(hw);
+}
+
+/**
+ * pch_gbe_hal_read_mac_addr - Reads MAC address
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successfully
+ * ENOSYS: Function is not registered
+ */
+inline s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw)
+{
+ if (!hw->func->read_mac_addr) {
+ pr_err("ERROR: configuration\n");
+ return -ENOSYS;
+ }
+ return hw->func->read_mac_addr(hw);
+}
+
+/**
+ * pch_gbe_hal_power_up_phy - Power up PHY
+ * @hw: Pointer to the HW structure
+ */
+inline void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw)
+{
+ if (hw->func->power_up_phy)
+ hw->func->power_up_phy(hw);
+}
+
+/**
+ * pch_gbe_hal_power_down_phy - Power down PHY
+ * @hw: Pointer to the HW structure
+ */
+inline void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw)
+{
+ if (hw->func->power_down_phy)
+ hw->func->power_down_phy(hw);
+}
diff --git a/drivers/net/pch_gbe/pch_gbe_api.h b/drivers/net/pch_gbe/pch_gbe_api.h
new file mode 100644
index 00000000000..94aaac5b057
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_api.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _PCH_GBE_API_H_
+#define _PCH_GBE_API_H_
+
+#include "pch_gbe_phy.h"
+
+s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw);
+void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw);
+s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw);
+s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data);
+s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data);
+void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw);
+void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw);
+s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw);
+void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw);
+void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw);
+
+#endif
diff --git a/drivers/net/pch_gbe/pch_gbe_ethtool.c b/drivers/net/pch_gbe/pch_gbe_ethtool.c
new file mode 100644
index 00000000000..e06c6aea452
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_ethtool.c
@@ -0,0 +1,584 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include "pch_gbe.h"
+#include "pch_gbe_api.h"
+
+/**
+ * pch_gbe_stats - Stats item infomation
+ */
+struct pch_gbe_stats {
+ char string[ETH_GSTRING_LEN];
+ size_t size;
+ size_t offset;
+};
+
+#define PCH_GBE_STAT(m) \
+{ \
+ .string = #m, \
+ .size = FIELD_SIZEOF(struct pch_gbe_hw_stats, m), \
+ .offset = offsetof(struct pch_gbe_hw_stats, m), \
+}
+
+/**
+ * pch_gbe_gstrings_stats - ethtool information status name list
+ */
+static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
+ PCH_GBE_STAT(rx_packets),
+ PCH_GBE_STAT(tx_packets),
+ PCH_GBE_STAT(rx_bytes),
+ PCH_GBE_STAT(tx_bytes),
+ PCH_GBE_STAT(rx_errors),
+ PCH_GBE_STAT(tx_errors),
+ PCH_GBE_STAT(rx_dropped),
+ PCH_GBE_STAT(tx_dropped),
+ PCH_GBE_STAT(multicast),
+ PCH_GBE_STAT(collisions),
+ PCH_GBE_STAT(rx_crc_errors),
+ PCH_GBE_STAT(rx_frame_errors),
+ PCH_GBE_STAT(rx_alloc_buff_failed),
+ PCH_GBE_STAT(tx_length_errors),
+ PCH_GBE_STAT(tx_aborted_errors),
+ PCH_GBE_STAT(tx_carrier_errors),
+ PCH_GBE_STAT(tx_timeout_count),
+ PCH_GBE_STAT(tx_restart_count),
+ PCH_GBE_STAT(intr_rx_dsc_empty_count),
+ PCH_GBE_STAT(intr_rx_frame_err_count),
+ PCH_GBE_STAT(intr_rx_fifo_err_count),
+ PCH_GBE_STAT(intr_rx_dma_err_count),
+ PCH_GBE_STAT(intr_tx_fifo_err_count),
+ PCH_GBE_STAT(intr_tx_dma_err_count),
+ PCH_GBE_STAT(intr_tcpip_err_count)
+};
+
+#define PCH_GBE_QUEUE_STATS_LEN 0
+#define PCH_GBE_GLOBAL_STATS_LEN ARRAY_SIZE(pch_gbe_gstrings_stats)
+#define PCH_GBE_STATS_LEN (PCH_GBE_GLOBAL_STATS_LEN + PCH_GBE_QUEUE_STATS_LEN)
+
+#define PCH_GBE_MAC_REGS_LEN (sizeof(struct pch_gbe_regs) / 4)
+#define PCH_GBE_REGS_LEN (PCH_GBE_MAC_REGS_LEN + PCH_GBE_PHY_REGS_LEN)
+/**
+ * pch_gbe_get_settings - Get device-specific settings
+ * @netdev: Network interface device structure
+ * @ecmd: Ethtool command
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ ret = mii_ethtool_gset(&adapter->mii, ecmd);
+ ecmd->supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half);
+ ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
+
+ if (!netif_carrier_ok(adapter->netdev))
+ ecmd->speed = -1;
+ return ret;
+}
+
+/**
+ * pch_gbe_set_settings - Set device-specific settings
+ * @netdev: Network interface device structure
+ * @ecmd: Ethtool command
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ int ret;
+
+ pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
+
+ if (ecmd->speed == -1)
+ ecmd->speed = SPEED_1000;
+ ecmd->duplex = DUPLEX_FULL;
+ ret = mii_ethtool_sset(&adapter->mii, ecmd);
+ if (ret) {
+ pr_err("Error: mii_ethtool_sset\n");
+ return ret;
+ }
+ hw->mac.link_speed = ecmd->speed;
+ hw->mac.link_duplex = ecmd->duplex;
+ hw->phy.autoneg_advertised = ecmd->advertising;
+ hw->mac.autoneg = ecmd->autoneg;
+ pch_gbe_hal_phy_sw_reset(hw);
+
+ /* reset the link */
+ if (netif_running(adapter->netdev)) {
+ pch_gbe_down(adapter);
+ ret = pch_gbe_up(adapter);
+ } else {
+ pch_gbe_reset(adapter);
+ }
+ return ret;
+}
+
+/**
+ * pch_gbe_get_regs_len - Report the size of device registers
+ * @netdev: Network interface device structure
+ * Returns: the size of device registers.
+ */
+static int pch_gbe_get_regs_len(struct net_device *netdev)
+{
+ return PCH_GBE_REGS_LEN * (int)sizeof(u32);
+}
+
+/**
+ * pch_gbe_get_drvinfo - Report driver information
+ * @netdev: Network interface device structure
+ * @drvinfo: Driver information structure
+ */
+static void pch_gbe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ strcpy(drvinfo->driver, KBUILD_MODNAME);
+ strcpy(drvinfo->version, pch_driver_version);
+ strcpy(drvinfo->fw_version, "N/A");
+ strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+ drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
+}
+
+/**
+ * pch_gbe_get_regs - Get device registers
+ * @netdev: Network interface device structure
+ * @regs: Ethtool register structure
+ * @p: Buffer pointer of read device register date
+ */
+static void pch_gbe_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 *regs_buff = p;
+ u16 i, tmp;
+
+ regs->version = 0x1000000 | (__u32)pdev->revision << 16 | pdev->device;
+ for (i = 0; i < PCH_GBE_MAC_REGS_LEN; i++)
+ *regs_buff++ = ioread32(&hw->reg->INT_ST + i);
+ /* PHY register */
+ for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) {
+ pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp);
+ *regs_buff++ = tmp;
+ }
+}
+
+/**
+ * pch_gbe_get_wol - Report whether Wake-on-Lan is enabled
+ * @netdev: Network interface device structure
+ * @wol: Wake-on-Lan information
+ */
+static void pch_gbe_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ if ((adapter->wake_up_evt & PCH_GBE_WLC_IND))
+ wol->wolopts |= WAKE_UCAST;
+ if ((adapter->wake_up_evt & PCH_GBE_WLC_MLT))
+ wol->wolopts |= WAKE_MCAST;
+ if ((adapter->wake_up_evt & PCH_GBE_WLC_BR))
+ wol->wolopts |= WAKE_BCAST;
+ if ((adapter->wake_up_evt & PCH_GBE_WLC_MP))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+/**
+ * pch_gbe_set_wol - Turn Wake-on-Lan on or off
+ * @netdev: Network interface device structure
+ * @wol: Pointer of wake-on-Lan information straucture
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ if ((wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)))
+ return -EOPNOTSUPP;
+ /* these settings will always override what we currently have */
+ adapter->wake_up_evt = 0;
+
+ if ((wol->wolopts & WAKE_UCAST))
+ adapter->wake_up_evt |= PCH_GBE_WLC_IND;
+ if ((wol->wolopts & WAKE_MCAST))
+ adapter->wake_up_evt |= PCH_GBE_WLC_MLT;
+ if ((wol->wolopts & WAKE_BCAST))
+ adapter->wake_up_evt |= PCH_GBE_WLC_BR;
+ if ((wol->wolopts & WAKE_MAGIC))
+ adapter->wake_up_evt |= PCH_GBE_WLC_MP;
+ return 0;
+}
+
+/**
+ * pch_gbe_nway_reset - Restart autonegotiation
+ * @netdev: Network interface device structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_nway_reset(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ return mii_nway_restart(&adapter->mii);
+}
+
+/**
+ * pch_gbe_get_ringparam - Report ring sizes
+ * @netdev: Network interface device structure
+ * @ring: Ring param structure
+ */
+static void pch_gbe_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_tx_ring *txdr = adapter->tx_ring;
+ struct pch_gbe_rx_ring *rxdr = adapter->rx_ring;
+
+ ring->rx_max_pending = PCH_GBE_MAX_RXD;
+ ring->tx_max_pending = PCH_GBE_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rxdr->count;
+ ring->tx_pending = txdr->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+/**
+ * pch_gbe_set_ringparam - Set ring sizes
+ * @netdev: Network interface device structure
+ * @ring: Ring param structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_tx_ring *txdr, *tx_old;
+ struct pch_gbe_rx_ring *rxdr, *rx_old;
+ int tx_ring_size, rx_ring_size;
+ int err = 0;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+ tx_ring_size = (int)sizeof(struct pch_gbe_tx_ring);
+ rx_ring_size = (int)sizeof(struct pch_gbe_rx_ring);
+
+ if ((netif_running(adapter->netdev)))
+ pch_gbe_down(adapter);
+ tx_old = adapter->tx_ring;
+ rx_old = adapter->rx_ring;
+
+ txdr = kzalloc(tx_ring_size, GFP_KERNEL);
+ if (!txdr) {
+ err = -ENOMEM;
+ goto err_alloc_tx;
+ }
+ rxdr = kzalloc(rx_ring_size, GFP_KERNEL);
+ if (!rxdr) {
+ err = -ENOMEM;
+ goto err_alloc_rx;
+ }
+ adapter->tx_ring = txdr;
+ adapter->rx_ring = rxdr;
+
+ rxdr->count =
+ clamp_val(ring->rx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD);
+ rxdr->count = roundup(rxdr->count, PCH_GBE_RX_DESC_MULTIPLE);
+
+ txdr->count =
+ clamp_val(ring->tx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD);
+ txdr->count = roundup(txdr->count, PCH_GBE_TX_DESC_MULTIPLE);
+
+ if ((netif_running(adapter->netdev))) {
+ /* Try to get new resources before deleting old */
+ err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
+ if (err)
+ goto err_setup_rx;
+ err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
+ if (err)
+ goto err_setup_tx;
+ /* save the new, restore the old in order to free it,
+ * then restore the new back again */
+#ifdef RINGFREE
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
+ pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
+ kfree(tx_old);
+ kfree(rx_old);
+ adapter->rx_ring = rxdr;
+ adapter->tx_ring = txdr;
+#else
+ pch_gbe_free_rx_resources(adapter, rx_old);
+ pch_gbe_free_tx_resources(adapter, tx_old);
+ kfree(tx_old);
+ kfree(rx_old);
+ adapter->rx_ring = rxdr;
+ adapter->tx_ring = txdr;
+#endif
+ err = pch_gbe_up(adapter);
+ }
+ return err;
+
+err_setup_tx:
+ pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
+err_setup_rx:
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ kfree(rxdr);
+err_alloc_rx:
+ kfree(txdr);
+err_alloc_tx:
+ if (netif_running(adapter->netdev))
+ pch_gbe_up(adapter);
+ return err;
+}
+
+/**
+ * pch_gbe_get_pauseparam - Report pause parameters
+ * @netdev: Network interface device structure
+ * @pause: Pause parameters structure
+ */
+static void pch_gbe_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ pause->autoneg =
+ ((hw->mac.fc_autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (hw->mac.fc == PCH_GBE_FC_RX_PAUSE) {
+ pause->rx_pause = 1;
+ } else if (hw->mac.fc == PCH_GBE_FC_TX_PAUSE) {
+ pause->tx_pause = 1;
+ } else if (hw->mac.fc == PCH_GBE_FC_FULL) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+/**
+ * pch_gbe_set_pauseparam - Set pause paramters
+ * @netdev: Network interface device structure
+ * @pause: Pause parameters structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ int ret = 0;
+
+ hw->mac.fc_autoneg = pause->autoneg;
+ if ((pause->rx_pause) && (pause->tx_pause))
+ hw->mac.fc = PCH_GBE_FC_FULL;
+ else if ((pause->rx_pause) && (!pause->tx_pause))
+ hw->mac.fc = PCH_GBE_FC_RX_PAUSE;
+ else if ((!pause->rx_pause) && (pause->tx_pause))
+ hw->mac.fc = PCH_GBE_FC_TX_PAUSE;
+ else if ((!pause->rx_pause) && (!pause->tx_pause))
+ hw->mac.fc = PCH_GBE_FC_NONE;
+
+ if (hw->mac.fc_autoneg == AUTONEG_ENABLE) {
+ if ((netif_running(adapter->netdev))) {
+ pch_gbe_down(adapter);
+ ret = pch_gbe_up(adapter);
+ } else {
+ pch_gbe_reset(adapter);
+ }
+ } else {
+ ret = pch_gbe_mac_force_mac_fc(hw);
+ }
+ return ret;
+}
+
+/**
+ * pch_gbe_get_rx_csum - Report whether receive checksums are turned on or off
+ * @netdev: Network interface device structure
+ * Returns
+ * true(1): Checksum On
+ * false(0): Checksum Off
+ */
+static u32 pch_gbe_get_rx_csum(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->rx_csum;
+}
+
+/**
+ * pch_gbe_set_rx_csum - Turn receive checksum on or off
+ * @netdev: Network interface device structure
+ * @data: Checksum On[true] or Off[false]
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ adapter->rx_csum = data;
+ if ((netif_running(netdev)))
+ pch_gbe_reinit_locked(adapter);
+ else
+ pch_gbe_reset(adapter);
+
+ return 0;
+}
+
+/**
+ * pch_gbe_get_tx_csum - Report whether transmit checksums are turned on or off
+ * @netdev: Network interface device structure
+ * Returns
+ * true(1): Checksum On
+ * false(0): Checksum Off
+ */
+static u32 pch_gbe_get_tx_csum(struct net_device *netdev)
+{
+ return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+/**
+ * pch_gbe_set_tx_csum - Turn transmit checksums on or off
+ * @netdev: Network interface device structure
+ * @data: Checksum on[true] or off[false]
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_set_tx_csum(struct net_device *netdev, u32 data)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ adapter->tx_csum = data;
+ if (data)
+ netdev->features |= NETIF_F_HW_CSUM;
+ else
+ netdev->features &= ~NETIF_F_HW_CSUM;
+ return 0;
+}
+
+/**
+ * pch_gbe_get_strings - Return a set of strings that describe the requested
+ * objects
+ * @netdev: Network interface device structure
+ * @stringset: Select the stringset. [ETH_SS_TEST] [ETH_SS_STATS]
+ * @data: Pointer of read string data.
+ */
+static void pch_gbe_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case (u32) ETH_SS_STATS:
+ for (i = 0; i < PCH_GBE_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, pch_gbe_gstrings_stats[i].string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+/**
+ * pch_gbe_get_ethtool_stats - Return statistics about the device
+ * @netdev: Network interface device structure
+ * @stats: Ethtool statue structure
+ * @data: Pointer of read status area
+ */
+static void pch_gbe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ int i;
+ const struct pch_gbe_stats *gstats = pch_gbe_gstrings_stats;
+ char *hw_stats = (char *)&adapter->stats;
+
+ pch_gbe_update_stats(adapter);
+ for (i = 0; i < PCH_GBE_GLOBAL_STATS_LEN; i++) {
+ char *p = hw_stats + gstats->offset;
+ data[i] = gstats->size == sizeof(u64) ? *(u64 *)p:(*(u32 *)p);
+ gstats++;
+ }
+}
+
+static int pch_gbe_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return PCH_GBE_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct ethtool_ops pch_gbe_ethtool_ops = {
+ .get_settings = pch_gbe_get_settings,
+ .set_settings = pch_gbe_set_settings,
+ .get_drvinfo = pch_gbe_get_drvinfo,
+ .get_regs_len = pch_gbe_get_regs_len,
+ .get_regs = pch_gbe_get_regs,
+ .get_wol = pch_gbe_get_wol,
+ .set_wol = pch_gbe_set_wol,
+ .nway_reset = pch_gbe_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = pch_gbe_get_ringparam,
+ .set_ringparam = pch_gbe_set_ringparam,
+ .get_pauseparam = pch_gbe_get_pauseparam,
+ .set_pauseparam = pch_gbe_set_pauseparam,
+ .get_rx_csum = pch_gbe_get_rx_csum,
+ .set_rx_csum = pch_gbe_set_rx_csum,
+ .get_tx_csum = pch_gbe_get_tx_csum,
+ .set_tx_csum = pch_gbe_set_tx_csum,
+ .get_strings = pch_gbe_get_strings,
+ .get_ethtool_stats = pch_gbe_get_ethtool_stats,
+ .get_sset_count = pch_gbe_get_sset_count,
+};
+
+void pch_gbe_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &pch_gbe_ethtool_ops);
+}
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
new file mode 100644
index 00000000000..e44644f169f
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -0,0 +1,2473 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "pch_gbe.h"
+#include "pch_gbe_api.h"
+
+#define DRV_VERSION "1.00"
+const char pch_driver_version[] = DRV_VERSION;
+
+#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
+#define PCH_GBE_MAR_ENTRIES 16
+#define PCH_GBE_SHORT_PKT 64
+#define DSC_INIT16 0xC000
+#define PCH_GBE_DMA_ALIGN 0
+#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
+#define PCH_GBE_COPYBREAK_DEFAULT 256
+#define PCH_GBE_PCI_BAR 1
+
+#define PCH_GBE_TX_WEIGHT 64
+#define PCH_GBE_RX_WEIGHT 64
+#define PCH_GBE_RX_BUFFER_WRITE 16
+
+/* Initialize the wake-on-LAN settings */
+#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
+
+#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
+ PCH_GBE_CHIP_TYPE_INTERNAL | \
+ PCH_GBE_RGMII_MODE_RGMII | \
+ PCH_GBE_CRS_SEL \
+ )
+
+/* Ethertype field values */
+#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
+#define PCH_GBE_FRAME_SIZE_2048 2048
+#define PCH_GBE_FRAME_SIZE_4096 4096
+#define PCH_GBE_FRAME_SIZE_8192 8192
+
+#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
+#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
+#define PCH_GBE_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* Pause packet value */
+#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
+#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
+#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
+#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
+
+#define PCH_GBE_ETH_ALEN 6
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define PCH_GBE_INT_ENABLE_MASK ( \
+ PCH_GBE_INT_RX_DMA_CMPLT | \
+ PCH_GBE_INT_RX_DSC_EMP | \
+ PCH_GBE_INT_WOL_DET | \
+ PCH_GBE_INT_TX_CMPLT \
+ )
+
+
+static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
+
+/**
+ * pch_gbe_mac_read_mac_addr - Read MAC address
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ */
+s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
+{
+ u32 adr1a, adr1b;
+
+ adr1a = ioread32(&hw->reg->mac_adr[0].high);
+ adr1b = ioread32(&hw->reg->mac_adr[0].low);
+
+ hw->mac.addr[0] = (u8)(adr1a & 0xFF);
+ hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
+ hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
+ hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
+ hw->mac.addr[4] = (u8)(adr1b & 0xFF);
+ hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
+
+ pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
+ return 0;
+}
+
+/**
+ * pch_gbe_wait_clr_bit - Wait to clear a bit
+ * @reg: Pointer of register
+ * @busy: Busy bit
+ */
+void pch_gbe_wait_clr_bit(void *reg, u32 bit)
+{
+ u32 tmp;
+ /* wait busy */
+ tmp = 1000;
+ while ((ioread32(reg) & bit) && --tmp)
+ cpu_relax();
+ if (!tmp)
+ pr_err("Error: busy bit is not cleared\n");
+}
+/**
+ * pch_gbe_mac_mar_set - Set MAC address register
+ * @hw: Pointer to the HW structure
+ * @addr: Pointer to the MAC address
+ * @index: MAC address array register
+ */
+void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
+{
+ u32 mar_low, mar_high, adrmask;
+
+ pr_debug("index : 0x%x\n", index);
+
+ /*
+ * HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
+ /* Stop the MAC Address of index. */
+ adrmask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+ /* Set the MAC address to the MAC address 1A/1B register */
+ iowrite32(mar_high, &hw->reg->mac_adr[index].high);
+ iowrite32(mar_low, &hw->reg->mac_adr[index].low);
+ /* Start the MAC address of index */
+ iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+}
+
+/**
+ * pch_gbe_mac_reset_hw - Reset hardware
+ * @hw: Pointer to the HW structure
+ */
+void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
+{
+ /* Read the MAC address. and store to the private data */
+ pch_gbe_mac_read_mac_addr(hw);
+ iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
+#ifdef PCH_GBE_MAC_IFOP_RGMII
+ iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
+#endif
+ pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
+ /* Setup the receive address */
+ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+ return;
+}
+
+/**
+ * pch_gbe_mac_init_rx_addrs - Initialize receive address's
+ * @hw: Pointer to the HW structure
+ * @mar_count: Receive address registers
+ */
+void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
+{
+ u32 i;
+
+ /* Setup the receive address */
+ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other receive addresses */
+ for (i = 1; i < mar_count; i++) {
+ iowrite32(0, &hw->reg->mac_adr[i].high);
+ iowrite32(0, &hw->reg->mac_adr[i].low);
+ }
+ iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+}
+
+
+/**
+ * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
+ * @hw: Pointer to the HW structure
+ * @mc_addr_list: Array of multicast addresses to program
+ * @mc_addr_count: Number of multicast addresses to program
+ * @mar_used_count: The first MAC Address register free to program
+ * @mar_total_num: Total number of supported MAC Address Registers
+ */
+void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count,
+ u32 mar_used_count, u32 mar_total_num)
+{
+ u32 i, adrmask;
+
+ /* Load the first set of multicast addresses into the exact
+ * filters (RAR). If there are not enough to fill the RAR
+ * array, clear the filters.
+ */
+ for (i = mar_used_count; i < mar_total_num; i++) {
+ if (mc_addr_count) {
+ pch_gbe_mac_mar_set(hw, mc_addr_list, i);
+ mc_addr_count--;
+ mc_addr_list += PCH_GBE_ETH_ALEN;
+ } else {
+ /* Clear MAC address mask */
+ adrmask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32((adrmask | (0x0001 << i)),
+ &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+ /* Clear MAC address */
+ iowrite32(0, &hw->reg->mac_adr[i].high);
+ iowrite32(0, &hw->reg->mac_adr[i].low);
+ }
+ }
+}
+
+/**
+ * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
+{
+ struct pch_gbe_mac_info *mac = &hw->mac;
+ u32 rx_fctrl;
+
+ pr_debug("mac->fc = %u\n", mac->fc);
+
+ rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
+
+ switch (mac->fc) {
+ case PCH_GBE_FC_NONE:
+ rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = false;
+ break;
+ case PCH_GBE_FC_RX_PAUSE:
+ rx_fctrl |= PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = false;
+ break;
+ case PCH_GBE_FC_TX_PAUSE:
+ rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = true;
+ break;
+ case PCH_GBE_FC_FULL:
+ rx_fctrl |= PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = true;
+ break;
+ default:
+ pr_err("Flow control param set incorrectly\n");
+ return -EINVAL;
+ }
+ if (mac->link_duplex == DUPLEX_HALF)
+ rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
+ iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
+ pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
+ ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
+ return 0;
+}
+
+/**
+ * pch_gbe_mac_set_wol_event - Set wake-on-lan event
+ * @hw: Pointer to the HW structure
+ * @wu_evt: Wake up event
+ */
+void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
+{
+ u32 addr_mask;
+
+ pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
+ wu_evt, ioread32(&hw->reg->ADDR_MASK));
+
+ if (wu_evt) {
+ /* Set Wake-On-Lan address mask */
+ addr_mask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
+ iowrite32(0, &hw->reg->WOL_ST);
+ iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
+ iowrite32(0x02, &hw->reg->TCPIP_ACC);
+ iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
+ } else {
+ iowrite32(0, &hw->reg->WOL_CTRL);
+ iowrite32(0, &hw->reg->WOL_ST);
+ }
+ return;
+}
+
+/**
+ * pch_gbe_mac_ctrl_miim - Control MIIM interface
+ * @hw: Pointer to the HW structure
+ * @addr: Address of PHY
+ * @dir: Operetion. (Write or Read)
+ * @reg: Access register of PHY
+ * @data: Write data.
+ *
+ * Returns: Read date.
+ */
+u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
+ u16 data)
+{
+ u32 data_out = 0;
+ unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->miim_lock, flags);
+
+ for (i = 100; i; --i) {
+ if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
+ break;
+ udelay(20);
+ }
+ if (i == 0) {
+ pr_err("pch-gbe.miim won't go Ready\n");
+ spin_unlock_irqrestore(&hw->miim_lock, flags);
+ return 0; /* No way to indicate timeout error */
+ }
+ iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
+ (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
+ dir | data), &hw->reg->MIIM);
+ for (i = 0; i < 100; i++) {
+ udelay(20);
+ data_out = ioread32(&hw->reg->MIIM);
+ if ((data_out & PCH_GBE_MIIM_OPER_READY))
+ break;
+ }
+ spin_unlock_irqrestore(&hw->miim_lock, flags);
+
+ pr_debug("PHY %s: reg=%d, data=0x%04X\n",
+ dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
+ dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
+ return (u16) data_out;
+}
+
+/**
+ * pch_gbe_mac_set_pause_packet - Set pause packet
+ * @hw: Pointer to the HW structure
+ */
+void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
+{
+ unsigned long tmp2, tmp3;
+
+ /* Set Pause packet */
+ tmp2 = hw->mac.addr[1];
+ tmp2 = (tmp2 << 8) | hw->mac.addr[0];
+ tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
+
+ tmp3 = hw->mac.addr[5];
+ tmp3 = (tmp3 << 8) | hw->mac.addr[4];
+ tmp3 = (tmp3 << 8) | hw->mac.addr[3];
+ tmp3 = (tmp3 << 8) | hw->mac.addr[2];
+
+ iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
+ iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
+ iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
+ iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
+ iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
+
+ /* Transmit Pause Packet */
+ iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
+
+ pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
+ ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
+ ioread32(&hw->reg->PAUSE_PKT5));
+
+ return;
+}
+
+
+/**
+ * pch_gbe_alloc_queues - Allocate memory for all rings
+ * @adapter: Board private structure to initialize
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
+{
+ int size;
+
+ size = (int)sizeof(struct pch_gbe_tx_ring);
+ adapter->tx_ring = kzalloc(size, GFP_KERNEL);
+ if (!adapter->tx_ring)
+ return -ENOMEM;
+ size = (int)sizeof(struct pch_gbe_rx_ring);
+ adapter->rx_ring = kzalloc(size, GFP_KERNEL);
+ if (!adapter->rx_ring) {
+ kfree(adapter->tx_ring);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * pch_gbe_init_stats - Initialize status
+ * @adapter: Board private structure to initialize
+ */
+static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
+{
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+ return;
+}
+
+/**
+ * pch_gbe_init_phy - Initialize PHY
+ * @adapter: Board private structure to initialize
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 addr;
+ u16 bmcr, stat;
+
+ /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
+ for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
+ adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
+ bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
+ stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
+ stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
+ if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
+ break;
+ }
+ adapter->hw.phy.addr = adapter->mii.phy_id;
+ pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
+ if (addr == 32)
+ return -EAGAIN;
+ /* Selected the phy and isolate the rest */
+ for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
+ if (addr != adapter->mii.phy_id) {
+ pch_gbe_mdio_write(netdev, addr, MII_BMCR,
+ BMCR_ISOLATE);
+ } else {
+ bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
+ pch_gbe_mdio_write(netdev, addr, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+ }
+ }
+
+ /* MII setup */
+ adapter->mii.phy_id_mask = 0x1F;
+ adapter->mii.reg_num_mask = 0x1F;
+ adapter->mii.dev = adapter->netdev;
+ adapter->mii.mdio_read = pch_gbe_mdio_read;
+ adapter->mii.mdio_write = pch_gbe_mdio_write;
+ adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
+ return 0;
+}
+
+/**
+ * pch_gbe_mdio_read - The read function for mii
+ * @netdev: Network interface device structure
+ * @addr: Phy ID
+ * @reg: Access location
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
+ (u16) 0);
+}
+
+/**
+ * pch_gbe_mdio_write - The write function for mii
+ * @netdev: Network interface device structure
+ * @addr: Phy ID (not used)
+ * @reg: Access location
+ * @data: Write data
+ */
+void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, int data)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
+}
+
+/**
+ * pch_gbe_reset_task - Reset processing at the time of transmission timeout
+ * @work: Pointer of board private structure
+ */
+static void pch_gbe_reset_task(struct work_struct *work)
+{
+ struct pch_gbe_adapter *adapter;
+ adapter = container_of(work, struct pch_gbe_adapter, reset_task);
+
+ pch_gbe_reinit_locked(adapter);
+}
+
+/**
+ * pch_gbe_reinit_locked- Re-initialization
+ * @adapter: Board private structure
+ */
+void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ pch_gbe_down(adapter);
+ pch_gbe_up(adapter);
+ }
+ rtnl_unlock();
+}
+
+/**
+ * pch_gbe_reset - Reset GbE
+ * @adapter: Board private structure
+ */
+void pch_gbe_reset(struct pch_gbe_adapter *adapter)
+{
+ pch_gbe_mac_reset_hw(&adapter->hw);
+ /* Setup the receive address. */
+ pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
+ if (pch_gbe_hal_init_hw(&adapter->hw))
+ pr_err("Hardware Error\n");
+}
+
+/**
+ * pch_gbe_free_irq - Free an interrupt
+ * @adapter: Board private structure
+ */
+static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+ if (adapter->have_msi) {
+ pci_disable_msi(adapter->pdev);
+ pr_debug("call pci_disable_msi\n");
+ }
+}
+
+/**
+ * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: Board private structure
+ */
+static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ atomic_inc(&adapter->irq_sem);
+ iowrite32(0, &hw->reg->INT_EN);
+ ioread32(&hw->reg->INT_ST);
+ synchronize_irq(adapter->pdev->irq);
+
+ pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
+}
+
+/**
+ * pch_gbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: Board private structure
+ */
+static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ if (likely(atomic_dec_and_test(&adapter->irq_sem)))
+ iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
+ ioread32(&hw->reg->INT_ST);
+ pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
+}
+
+
+
+/**
+ * pch_gbe_setup_tctl - configure the Transmit control registers
+ * @adapter: Board private structure
+ */
+static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 tx_mode, tcpip;
+
+ tx_mode = PCH_GBE_TM_LONG_PKT |
+ PCH_GBE_TM_ST_AND_FD |
+ PCH_GBE_TM_SHORT_PKT |
+ PCH_GBE_TM_TH_TX_STRT_8 |
+ PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
+
+ iowrite32(tx_mode, &hw->reg->TX_MODE);
+
+ tcpip = ioread32(&hw->reg->TCPIP_ACC);
+ tcpip |= PCH_GBE_TX_TCPIPACC_EN;
+ iowrite32(tcpip, &hw->reg->TCPIP_ACC);
+ return;
+}
+
+/**
+ * pch_gbe_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: Board private structure
+ */
+static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 tdba, tdlen, dctrl;
+
+ pr_debug("dma addr = 0x%08llx size = 0x%08x\n",
+ (unsigned long long)adapter->tx_ring->dma,
+ adapter->tx_ring->size);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ tdba = adapter->tx_ring->dma;
+ tdlen = adapter->tx_ring->size - 0x10;
+ iowrite32(tdba, &hw->reg->TX_DSC_BASE);
+ iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
+ iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
+
+ /* Enables Transmission DMA */
+ dctrl = ioread32(&hw->reg->DMA_CTRL);
+ dctrl |= PCH_GBE_TX_DMA_EN;
+ iowrite32(dctrl, &hw->reg->DMA_CTRL);
+}
+
+/**
+ * pch_gbe_setup_rctl - Configure the receive control registers
+ * @adapter: Board private structure
+ */
+static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 rx_mode, tcpip;
+
+ rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
+ PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
+
+ iowrite32(rx_mode, &hw->reg->RX_MODE);
+
+ tcpip = ioread32(&hw->reg->TCPIP_ACC);
+
+ if (adapter->rx_csum) {
+ tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
+ tcpip |= PCH_GBE_RX_TCPIPACC_EN;
+ } else {
+ tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
+ tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
+ }
+ iowrite32(tcpip, &hw->reg->TCPIP_ACC);
+ return;
+}
+
+/**
+ * pch_gbe_configure_rx - Configure Receive Unit after Reset
+ * @adapter: Board private structure
+ */
+static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 rdba, rdlen, rctl, rxdma;
+
+ pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
+ (unsigned long long)adapter->rx_ring->dma,
+ adapter->rx_ring->size);
+
+ pch_gbe_mac_force_mac_fc(hw);
+
+ /* Disables Receive MAC */
+ rctl = ioread32(&hw->reg->MAC_RX_EN);
+ iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
+
+ /* Disables Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma &= ~PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+
+ pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
+ ioread32(&hw->reg->MAC_RX_EN),
+ ioread32(&hw->reg->DMA_CTRL));
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ rdba = adapter->rx_ring->dma;
+ rdlen = adapter->rx_ring->size - 0x10;
+ iowrite32(rdba, &hw->reg->RX_DSC_BASE);
+ iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
+ iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
+
+ /* Enables Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma |= PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+ /* Enables Receive */
+ iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
+}
+
+/**
+ * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
+ * @adapter: Board private structure
+ * @buffer_info: Buffer information structure
+ */
+static void pch_gbe_unmap_and_free_tx_resource(
+ struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
+{
+ if (buffer_info->mapped) {
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ buffer_info->mapped = false;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+}
+
+/**
+ * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
+ * @adapter: Board private structure
+ * @buffer_info: Buffer information structure
+ */
+static void pch_gbe_unmap_and_free_rx_resource(
+ struct pch_gbe_adapter *adapter,
+ struct pch_gbe_buffer *buffer_info)
+{
+ if (buffer_info->mapped) {
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
+ buffer_info->mapped = false;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+}
+
+/**
+ * pch_gbe_clean_tx_ring - Free Tx Buffers
+ * @adapter: Board private structure
+ * @tx_ring: Ring to be cleaned
+ */
+static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
+ }
+ pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
+
+ size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
+ iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
+}
+
+/**
+ * pch_gbe_clean_rx_ring - Free Rx Buffers
+ * @adapter: Board private structure
+ * @rx_ring: Ring to free buffers from
+ */
+static void
+pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
+ }
+ pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
+ size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
+ memset(rx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
+ iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
+}
+
+static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
+ u16 duplex)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ unsigned long rgmii = 0;
+
+ /* Set the RGMII control. */
+#ifdef PCH_GBE_MAC_IFOP_RGMII
+ switch (speed) {
+ case SPEED_10:
+ rgmii = (PCH_GBE_RGMII_RATE_2_5M |
+ PCH_GBE_MAC_RGMII_CTRL_SETTING);
+ break;
+ case SPEED_100:
+ rgmii = (PCH_GBE_RGMII_RATE_25M |
+ PCH_GBE_MAC_RGMII_CTRL_SETTING);
+ break;
+ case SPEED_1000:
+ rgmii = (PCH_GBE_RGMII_RATE_125M |
+ PCH_GBE_MAC_RGMII_CTRL_SETTING);
+ break;
+ }
+ iowrite32(rgmii, &hw->reg->RGMII_CTRL);
+#else /* GMII */
+ rgmii = 0;
+ iowrite32(rgmii, &hw->reg->RGMII_CTRL);
+#endif
+}
+static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
+ u16 duplex)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ unsigned long mode = 0;
+
+ /* Set the communication mode */
+ switch (speed) {
+ case SPEED_10:
+ mode = PCH_GBE_MODE_MII_ETHER;
+ netdev->tx_queue_len = 10;
+ break;
+ case SPEED_100:
+ mode = PCH_GBE_MODE_MII_ETHER;
+ netdev->tx_queue_len = 100;
+ break;
+ case SPEED_1000:
+ mode = PCH_GBE_MODE_GMII_ETHER;
+ break;
+ }
+ if (duplex == DUPLEX_FULL)
+ mode |= PCH_GBE_MODE_FULL_DUPLEX;
+ else
+ mode |= PCH_GBE_MODE_HALF_DUPLEX;
+ iowrite32(mode, &hw->reg->MODE);
+}
+
+/**
+ * pch_gbe_watchdog - Watchdog process
+ * @data: Board private structure
+ */
+static void pch_gbe_watchdog(unsigned long data)
+{
+ struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct ethtool_cmd cmd;
+
+ pr_debug("right now = %ld\n", jiffies);
+
+ pch_gbe_update_stats(adapter);
+ if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
+ netdev->tx_queue_len = adapter->tx_queue_len;
+ /* mii library handles link maintenance tasks */
+ if (mii_ethtool_gset(&adapter->mii, &cmd)) {
+ pr_err("ethtool get setting Error\n");
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies +
+ PCH_GBE_WATCHDOG_PERIOD));
+ return;
+ }
+ hw->mac.link_speed = cmd.speed;
+ hw->mac.link_duplex = cmd.duplex;
+ /* Set the RGMII control. */
+ pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ /* Set the communication mode */
+ pch_gbe_set_mode(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ netdev_dbg(netdev,
+ "Link is Up %d Mbps %s-Duplex\n",
+ cmd.speed,
+ cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ } else if ((!mii_link_ok(&adapter->mii)) &&
+ (netif_carrier_ok(netdev))) {
+ netdev_dbg(netdev, "NIC Link is Down\n");
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
+}
+
+/**
+ * pch_gbe_tx_queue - Carry out queuing of the transmission data
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring structure
+ * @skb: Sockt buffer structure
+ */
+static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring,
+ struct sk_buff *skb)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_tx_desc *tx_desc;
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *tmp_skb;
+ unsigned int frame_ctrl;
+ unsigned int ring_num;
+ unsigned long flags;
+
+ /*-- Set frame control --*/
+ frame_ctrl = 0;
+ if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
+ frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
+ if (unlikely(!adapter->tx_csum))
+ frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
+
+ /* Performs checksum processing */
+ /*
+ * It is because the hardware accelerator does not support a checksum,
+ * when the received data size is less than 64 bytes.
+ */
+ if ((skb->len < PCH_GBE_SHORT_PKT) && (adapter->tx_csum)) {
+ frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
+ PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ unsigned int offset;
+ iph->check = 0;
+ iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
+ offset = skb_transport_offset(skb);
+ if (iph->protocol == IPPROTO_TCP) {
+ skb->csum = 0;
+ tcp_hdr(skb)->check = 0;
+ skb->csum = skb_checksum(skb, offset,
+ skb->len - offset, 0);
+ tcp_hdr(skb)->check =
+ csum_tcpudp_magic(iph->saddr,
+ iph->daddr,
+ skb->len - offset,
+ IPPROTO_TCP,
+ skb->csum);
+ } else if (iph->protocol == IPPROTO_UDP) {
+ skb->csum = 0;
+ udp_hdr(skb)->check = 0;
+ skb->csum =
+ skb_checksum(skb, offset,
+ skb->len - offset, 0);
+ udp_hdr(skb)->check =
+ csum_tcpudp_magic(iph->saddr,
+ iph->daddr,
+ skb->len - offset,
+ IPPROTO_UDP,
+ skb->csum);
+ }
+ }
+ }
+ spin_lock_irqsave(&tx_ring->tx_lock, flags);
+ ring_num = tx_ring->next_to_use;
+ if (unlikely((ring_num + 1) == tx_ring->count))
+ tx_ring->next_to_use = 0;
+ else
+ tx_ring->next_to_use = ring_num + 1;
+
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ buffer_info = &tx_ring->buffer_info[ring_num];
+ tmp_skb = buffer_info->skb;
+
+ /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */
+ memcpy(tmp_skb->data, skb->data, ETH_HLEN);
+ tmp_skb->data[ETH_HLEN] = 0x00;
+ tmp_skb->data[ETH_HLEN + 1] = 0x00;
+ tmp_skb->len = skb->len;
+ memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
+ (skb->len - ETH_HLEN));
+ /*-- Set Buffer infomation --*/
+ buffer_info->length = tmp_skb->len;
+ buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
+ pr_err("TX DMA map failed\n");
+ buffer_info->dma = 0;
+ buffer_info->time_stamp = 0;
+ tx_ring->next_to_use = ring_num;
+ return;
+ }
+ buffer_info->mapped = true;
+ buffer_info->time_stamp = jiffies;
+
+ /*-- Set Tx descriptor --*/
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
+ tx_desc->buffer_addr = (buffer_info->dma);
+ tx_desc->length = (tmp_skb->len);
+ tx_desc->tx_words_eob = ((tmp_skb->len + 3));
+ tx_desc->tx_frame_ctrl = (frame_ctrl);
+ tx_desc->gbec_status = (DSC_INIT16);
+
+ if (unlikely(++ring_num == tx_ring->count))
+ ring_num = 0;
+
+ /* Update software pointer of TX descriptor */
+ iowrite32(tx_ring->dma +
+ (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
+ &hw->reg->TX_DSC_SW_P);
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * pch_gbe_update_stats - Update the board statistics counters
+ * @adapter: Board private structure
+ */
+void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_hw_stats *stats = &adapter->stats;
+ unsigned long flags;
+
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
+ return;
+
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+
+ /* Update device status "adapter->stats" */
+ stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
+ stats->tx_errors = stats->tx_length_errors +
+ stats->tx_aborted_errors +
+ stats->tx_carrier_errors + stats->tx_timeout_count;
+
+ /* Update network device status "adapter->net_stats" */
+ netdev->stats.rx_packets = stats->rx_packets;
+ netdev->stats.rx_bytes = stats->rx_bytes;
+ netdev->stats.rx_dropped = stats->rx_dropped;
+ netdev->stats.tx_packets = stats->tx_packets;
+ netdev->stats.tx_bytes = stats->tx_bytes;
+ netdev->stats.tx_dropped = stats->tx_dropped;
+ /* Fill out the OS statistics structure */
+ netdev->stats.multicast = stats->multicast;
+ netdev->stats.collisions = stats->collisions;
+ /* Rx Errors */
+ netdev->stats.rx_errors = stats->rx_errors;
+ netdev->stats.rx_crc_errors = stats->rx_crc_errors;
+ netdev->stats.rx_frame_errors = stats->rx_frame_errors;
+ /* Tx Errors */
+ netdev->stats.tx_errors = stats->tx_errors;
+ netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
+ netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
+
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+}
+
+/**
+ * pch_gbe_intr - Interrupt Handler
+ * @irq: Interrupt number
+ * @data: Pointer to a network interface device structure
+ * Returns
+ * - IRQ_HANDLED: Our interrupt
+ * - IRQ_NONE: Not our interrupt
+ */
+static irqreturn_t pch_gbe_intr(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 int_st;
+ u32 int_en;
+
+ /* Check request status */
+ int_st = ioread32(&hw->reg->INT_ST);
+ int_st = int_st & ioread32(&hw->reg->INT_EN);
+ /* When request status is no interruption factor */
+ if (unlikely(!int_st))
+ return IRQ_NONE; /* Not our interrupt. End processing. */
+ pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
+ if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
+ adapter->stats.intr_rx_frame_err_count++;
+ if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
+ adapter->stats.intr_rx_fifo_err_count++;
+ if (int_st & PCH_GBE_INT_RX_DMA_ERR)
+ adapter->stats.intr_rx_dma_err_count++;
+ if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
+ adapter->stats.intr_tx_fifo_err_count++;
+ if (int_st & PCH_GBE_INT_TX_DMA_ERR)
+ adapter->stats.intr_tx_dma_err_count++;
+ if (int_st & PCH_GBE_INT_TCPIP_ERR)
+ adapter->stats.intr_tcpip_err_count++;
+ /* When Rx descriptor is empty */
+ if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
+ adapter->stats.intr_rx_dsc_empty_count++;
+ pr_err("Rx descriptor is empty\n");
+ int_en = ioread32(&hw->reg->INT_EN);
+ iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
+ if (hw->mac.tx_fc_enable) {
+ /* Set Pause packet */
+ pch_gbe_mac_set_pause_packet(hw);
+ }
+ if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
+ == 0) {
+ return IRQ_HANDLED;
+ }
+ }
+
+ /* When request status is Receive interruption */
+ if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
+ if (likely(napi_schedule_prep(&adapter->napi))) {
+ /* Enable only Rx Descriptor empty */
+ atomic_inc(&adapter->irq_sem);
+ int_en = ioread32(&hw->reg->INT_EN);
+ int_en &=
+ ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
+ iowrite32(int_en, &hw->reg->INT_EN);
+ /* Start polling for NAPI */
+ __napi_schedule(&adapter->napi);
+ }
+ }
+ pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n",
+ IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
+ return IRQ_HANDLED;
+}
+
+/**
+ * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
+ * @adapter: Board private structure
+ * @rx_ring: Rx descriptor ring
+ * @cleaned_count: Cleaned count
+ */
+static void
+pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_rx_desc *rx_desc;
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz;
+
+ bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
+ i = rx_ring->next_to_use;
+
+ while ((cleaned_count--)) {
+ buffer_info = &rx_ring->buffer_info[i];
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ } else {
+ skb = netdev_alloc_skb(netdev, bufsz);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->stats.rx_alloc_buff_failed++;
+ break;
+ }
+ /* 64byte align */
+ skb_reserve(skb, PCH_GBE_DMA_ALIGN);
+
+ buffer_info->skb = skb;
+ buffer_info->length = adapter->rx_buffer_len;
+ }
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ adapter->stats.rx_alloc_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
+ buffer_info->mapped = true;
+ rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
+ rx_desc->buffer_addr = (buffer_info->dma);
+ rx_desc->gbec_status = DSC_INIT16;
+
+ pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
+ i, (unsigned long long)buffer_info->dma,
+ buffer_info->length);
+
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
+ }
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+ iowrite32(rx_ring->dma +
+ (int)sizeof(struct pch_gbe_rx_desc) * i,
+ &hw->reg->RX_DSC_SW_P);
+ }
+ return;
+}
+
+/**
+ * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring
+ */
+static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz;
+ struct pch_gbe_tx_desc *tx_desc;
+
+ bufsz =
+ adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
+
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ skb = netdev_alloc_skb(adapter->netdev, bufsz);
+ skb_reserve(skb, PCH_GBE_DMA_ALIGN);
+ buffer_info->skb = skb;
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
+ tx_desc->gbec_status = (DSC_INIT16);
+ }
+ return;
+}
+
+/**
+ * pch_gbe_clean_tx - Reclaim resources after transmit completes
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring
+ * Returns
+ * true: Cleaned the descriptor
+ * false: Not cleaned the descriptor
+ */
+static bool
+pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pch_gbe_tx_desc *tx_desc;
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int cleaned_count = 0;
+ bool cleaned = false;
+
+ pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+
+ i = tx_ring->next_to_clean;
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
+ pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
+ tx_desc->gbec_status, tx_desc->dma_status);
+
+ while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
+ pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
+ cleaned = true;
+ buffer_info = &tx_ring->buffer_info[i];
+ skb = buffer_info->skb;
+
+ if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
+ adapter->stats.tx_aborted_errors++;
+ pr_err("Transfer Abort Error\n");
+ } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
+ ) {
+ adapter->stats.tx_carrier_errors++;
+ pr_err("Transfer Carrier Sense Error\n");
+ } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
+ ) {
+ adapter->stats.tx_aborted_errors++;
+ pr_err("Transfer Collision Abort Error\n");
+ } else if ((tx_desc->gbec_status &
+ (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
+ PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
+ adapter->stats.collisions++;
+ adapter->stats.tx_packets++;
+ adapter->stats.tx_bytes += skb->len;
+ pr_debug("Transfer Collision\n");
+ } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
+ ) {
+ adapter->stats.tx_packets++;
+ adapter->stats.tx_bytes += skb->len;
+ }
+ if (buffer_info->mapped) {
+ pr_debug("unmap buffer_info->dma : %d\n", i);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ buffer_info->mapped = false;
+ }
+ if (buffer_info->skb) {
+ pr_debug("trim buffer_info->skb : %d\n", i);
+ skb_trim(buffer_info->skb, 0);
+ }
+ tx_desc->gbec_status = DSC_INIT16;
+ if (unlikely(++i == tx_ring->count))
+ i = 0;
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
+
+ /* weight of a sort for tx, to avoid endless transmit cleanup */
+ if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
+ break;
+ }
+ pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
+ cleaned_count);
+ /* Recover from running out of Tx resources in xmit_frame */
+ if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
+ netif_wake_queue(adapter->netdev);
+ adapter->stats.tx_restart_count++;
+ pr_debug("Tx wake queue\n");
+ }
+ spin_lock(&adapter->tx_queue_lock);
+ tx_ring->next_to_clean = i;
+ spin_unlock(&adapter->tx_queue_lock);
+ pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+ return cleaned;
+}
+
+/**
+ * pch_gbe_clean_rx - Send received data up the network stack; legacy
+ * @adapter: Board private structure
+ * @rx_ring: Rx descriptor ring
+ * @work_done: Completed count
+ * @work_to_do: Request count
+ * Returns
+ * true: Cleaned the descriptor
+ * false: Not cleaned the descriptor
+ */
+static bool
+pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_buffer *buffer_info;
+ struct pch_gbe_rx_desc *rx_desc;
+ u32 length;
+ unsigned char tmp_packet[ETH_HLEN];
+ unsigned int i;
+ unsigned int cleaned_count = 0;
+ bool cleaned = false;
+ struct sk_buff *skb;
+ u8 dma_status;
+ u16 gbec_status;
+ u32 tcp_ip_status;
+ u8 skb_copy_flag = 0;
+ u8 skb_padding_flag = 0;
+
+ i = rx_ring->next_to_clean;
+
+ while (*work_done < work_to_do) {
+ /* Check Rx descriptor status */
+ rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
+ if (rx_desc->gbec_status == DSC_INIT16)
+ break;
+ cleaned = true;
+ cleaned_count++;
+
+ dma_status = rx_desc->dma_status;
+ gbec_status = rx_desc->gbec_status;
+ tcp_ip_status = rx_desc->tcp_ip_status;
+ rx_desc->gbec_status = DSC_INIT16;
+ buffer_info = &rx_ring->buffer_info[i];
+ skb = buffer_info->skb;
+
+ /* unmap dma */
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
+ buffer_info->mapped = false;
+ /* Prefetch the packet */
+ prefetch(skb->data);
+
+ pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
+ "TCP:0x%08x] BufInf = 0x%p\n",
+ i, dma_status, gbec_status, tcp_ip_status,
+ buffer_info);
+ /* Error check */
+ if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
+ adapter->stats.rx_frame_errors++;
+ pr_err("Receive Not Octal Error\n");
+ } else if (unlikely(gbec_status &
+ PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
+ adapter->stats.rx_frame_errors++;
+ pr_err("Receive Nibble Error\n");
+ } else if (unlikely(gbec_status &
+ PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
+ adapter->stats.rx_crc_errors++;
+ pr_err("Receive CRC Error\n");
+ } else {
+ /* get receive length */
+ /* length convert[-3], padding[-2] */
+ length = (rx_desc->rx_words_eob) - 3 - 2;
+
+ /* Decide the data conversion method */
+ if (!adapter->rx_csum) {
+ /* [Header:14][payload] */
+ skb_padding_flag = 0;
+ skb_copy_flag = 1;
+ } else {
+ /* [Header:14][padding:2][payload] */
+ skb_padding_flag = 1;
+ if (length < copybreak)
+ skb_copy_flag = 1;
+ else
+ skb_copy_flag = 0;
+ }
+
+ /* Data conversion */
+ if (skb_copy_flag) { /* recycle skb */
+ struct sk_buff *new_skb;
+ new_skb =
+ netdev_alloc_skb(netdev,
+ length + NET_IP_ALIGN);
+ if (new_skb) {
+ if (!skb_padding_flag) {
+ skb_reserve(new_skb,
+ NET_IP_ALIGN);
+ }
+ memcpy(new_skb->data, skb->data,
+ length);
+ /* save the skb
+ * in buffer_info as good */
+ skb = new_skb;
+ } else if (!skb_padding_flag) {
+ /* dorrop error */
+ pr_err("New skb allocation Error\n");
+ goto dorrop;
+ }
+ } else {
+ buffer_info->skb = NULL;
+ }
+ if (skb_padding_flag) {
+ memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
+ memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
+ ETH_HLEN);
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ }
+
+ /* update status of driver */
+ adapter->stats.rx_bytes += length;
+ adapter->stats.rx_packets++;
+ if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
+ adapter->stats.multicast++;
+ /* Write meta date of skb */
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+ if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) ==
+ PCH_GBE_RXD_ACC_STAT_TCPIPOK) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ napi_gro_receive(&adapter->napi, skb);
+ (*work_done)++;
+ pr_debug("Receive skb->ip_summed: %d length: %d\n",
+ skb->ip_summed, length);
+ }
+dorrop:
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
+ pch_gbe_alloc_rx_buffers(adapter, rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+ if (++i == rx_ring->count)
+ i = 0;
+ }
+ rx_ring->next_to_clean = i;
+ if (cleaned_count)
+ pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ return cleaned;
+}
+
+/**
+ * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring (for a specific queue) to setup
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_tx_desc *tx_desc;
+ int size;
+ int desNo;
+
+ size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
+ tx_ring->buffer_info = vmalloc(size);
+ if (!tx_ring->buffer_info) {
+ pr_err("Unable to allocate memory for the buffer infomation\n");
+ return -ENOMEM;
+ }
+ memset(tx_ring->buffer_info, 0, size);
+
+ tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
+
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ vfree(tx_ring->buffer_info);
+ pr_err("Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+ }
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ spin_lock_init(&tx_ring->tx_lock);
+
+ for (desNo = 0; desNo < tx_ring->count; desNo++) {
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
+ tx_desc->gbec_status = DSC_INIT16;
+ }
+ pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
+ "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
+ tx_ring->desc, (unsigned long long)tx_ring->dma,
+ tx_ring->next_to_clean, tx_ring->next_to_use);
+ return 0;
+}
+
+/**
+ * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
+ * @adapter: Board private structure
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_rx_desc *rx_desc;
+ int size;
+ int desNo;
+
+ size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
+ rx_ring->buffer_info = vmalloc(size);
+ if (!rx_ring->buffer_info) {
+ pr_err("Unable to allocate memory for the receive descriptor ring\n");
+ return -ENOMEM;
+ }
+ memset(rx_ring->buffer_info, 0, size);
+ rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ pr_err("Unable to allocate memory for the receive descriptor ring\n");
+ vfree(rx_ring->buffer_info);
+ return -ENOMEM;
+ }
+ memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ for (desNo = 0; desNo < rx_ring->count; desNo++) {
+ rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
+ rx_desc->gbec_status = DSC_INIT16;
+ }
+ pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
+ "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
+ rx_ring->desc, (unsigned long long)rx_ring->dma,
+ rx_ring->next_to_clean, rx_ring->next_to_use);
+ return 0;
+}
+
+/**
+ * pch_gbe_free_tx_resources - Free Tx Resources
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ */
+void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ pch_gbe_clean_tx_ring(adapter, tx_ring);
+ vfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+}
+
+/**
+ * pch_gbe_free_rx_resources - Free Rx Resources
+ * @adapter: Board private structure
+ * @rx_ring: Ring to clean the resources from
+ */
+void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ pch_gbe_clean_rx_ring(adapter, rx_ring);
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+}
+
+/**
+ * pch_gbe_request_irq - Allocate an interrupt line
+ * @adapter: Board private structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+ int flags;
+
+ flags = IRQF_SHARED;
+ adapter->have_msi = false;
+ err = pci_enable_msi(adapter->pdev);
+ pr_debug("call pci_enable_msi\n");
+ if (err) {
+ pr_debug("call pci_enable_msi - Error: %d\n", err);
+ } else {
+ flags = 0;
+ adapter->have_msi = true;
+ }
+ err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
+ flags, netdev->name, netdev);
+ if (err)
+ pr_err("Unable to allocate interrupt Error: %d\n", err);
+ pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
+ adapter->have_msi, flags, err);
+ return err;
+}
+
+
+static void pch_gbe_set_multi(struct net_device *netdev);
+/**
+ * pch_gbe_up - Up GbE network device
+ * @adapter: Board private structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+int pch_gbe_up(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
+ struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
+ int err;
+
+ /* hardware has been reset, we need to reload some things */
+ pch_gbe_set_multi(netdev);
+
+ pch_gbe_setup_tctl(adapter);
+ pch_gbe_configure_tx(adapter);
+ pch_gbe_setup_rctl(adapter);
+ pch_gbe_configure_rx(adapter);
+
+ err = pch_gbe_request_irq(adapter);
+ if (err) {
+ pr_err("Error: can't bring device up\n");
+ return err;
+ }
+ pch_gbe_alloc_tx_buffers(adapter, tx_ring);
+ pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
+ adapter->tx_queue_len = netdev->tx_queue_len;
+
+ mod_timer(&adapter->watchdog_timer, jiffies);
+
+ napi_enable(&adapter->napi);
+ pch_gbe_irq_enable(adapter);
+ netif_start_queue(adapter->netdev);
+
+ return 0;
+}
+
+/**
+ * pch_gbe_down - Down GbE network device
+ * @adapter: Board private structure
+ */
+void pch_gbe_down(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ /* signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer */
+ napi_disable(&adapter->napi);
+ atomic_set(&adapter->irq_sem, 0);
+
+ pch_gbe_irq_disable(adapter);
+ pch_gbe_free_irq(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+
+ netdev->tx_queue_len = adapter->tx_queue_len;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ pch_gbe_reset(adapter);
+ pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
+ pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
+}
+
+/**
+ * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
+ * @adapter: Board private structure to initialize
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
+ hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ /* Initialize the hardware-specific values */
+ if (pch_gbe_hal_setup_init_funcs(hw)) {
+ pr_err("Hardware Initialization Failure\n");
+ return -EIO;
+ }
+ if (pch_gbe_alloc_queues(adapter)) {
+ pr_err("Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&adapter->hw.miim_lock);
+ spin_lock_init(&adapter->tx_queue_lock);
+ spin_lock_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->ethtool_lock);
+ atomic_set(&adapter->irq_sem, 0);
+ pch_gbe_irq_disable(adapter);
+
+ pch_gbe_init_stats(adapter);
+
+ pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
+ (u32) adapter->rx_buffer_len,
+ hw->mac.min_frame_size, hw->mac.max_frame_size);
+ return 0;
+}
+
+/**
+ * pch_gbe_open - Called when a network interface is made active
+ * @netdev: Network interface device structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_open(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ int err;
+
+ /* allocate transmit descriptors */
+ err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
+ if (err)
+ goto err_setup_tx;
+ /* allocate receive descriptors */
+ err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
+ if (err)
+ goto err_setup_rx;
+ pch_gbe_hal_power_up_phy(hw);
+ err = pch_gbe_up(adapter);
+ if (err)
+ goto err_up;
+ pr_debug("Success End\n");
+ return 0;
+
+err_up:
+ if (!adapter->wake_up_evt)
+ pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
+err_setup_rx:
+ pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
+err_setup_tx:
+ pch_gbe_reset(adapter);
+ pr_err("Error End\n");
+ return err;
+}
+
+/**
+ * pch_gbe_stop - Disables a network interface
+ * @netdev: Network interface device structure
+ * Returns
+ * 0: Successfully
+ */
+static int pch_gbe_stop(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ pch_gbe_down(adapter);
+ if (!adapter->wake_up_evt)
+ pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
+ pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
+ return 0;
+}
+
+/**
+ * pch_gbe_xmit_frame - Packet transmitting start
+ * @skb: Socket buffer structure
+ * @netdev: Network interface device structure
+ * Returns
+ * - NETDEV_TX_OK: Normal end
+ * - NETDEV_TX_BUSY: Error end
+ */
+static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
+ unsigned long flags;
+
+ if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
+ pr_err("Transfer length Error: skb len: %d > max: %d\n",
+ skb->len, adapter->hw.mac.max_frame_size);
+ dev_kfree_skb_any(skb);
+ adapter->stats.tx_length_errors++;
+ return NETDEV_TX_OK;
+ }
+ if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
+ /* Collision - tell upper layer to requeue */
+ return NETDEV_TX_LOCKED;
+ }
+ if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
+ tx_ring->next_to_use, tx_ring->next_to_clean);
+ return NETDEV_TX_BUSY;
+ }
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+
+ /* CRC,ITAG no support */
+ pch_gbe_tx_queue(adapter, tx_ring, skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * pch_gbe_get_stats - Get System Network Statistics
+ * @netdev: Network interface device structure
+ * Returns: The current stats
+ */
+static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
+{
+ /* only return the current stats */
+ return &netdev->stats;
+}
+
+/**
+ * pch_gbe_set_multi - Multicast and Promiscuous mode set
+ * @netdev: Network interface device structure
+ */
+static void pch_gbe_set_multi(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ u32 rctl;
+ int i;
+ int mc_count;
+
+ pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
+
+ /* Check for Promiscuous and All Multicast modes */
+ rctl = ioread32(&hw->reg->RX_MODE);
+ mc_count = netdev_mc_count(netdev);
+ if ((netdev->flags & IFF_PROMISC)) {
+ rctl &= ~PCH_GBE_ADD_FIL_EN;
+ rctl &= ~PCH_GBE_MLT_FIL_EN;
+ } else if ((netdev->flags & IFF_ALLMULTI)) {
+ /* all the multicasting receive permissions */
+ rctl |= PCH_GBE_ADD_FIL_EN;
+ rctl &= ~PCH_GBE_MLT_FIL_EN;
+ } else {
+ if (mc_count >= PCH_GBE_MAR_ENTRIES) {
+ /* all the multicasting receive permissions */
+ rctl |= PCH_GBE_ADD_FIL_EN;
+ rctl &= ~PCH_GBE_MLT_FIL_EN;
+ } else {
+ rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
+ }
+ }
+ iowrite32(rctl, &hw->reg->RX_MODE);
+
+ if (mc_count >= PCH_GBE_MAR_ENTRIES)
+ return;
+ mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
+ if (!mta_list)
+ return;
+
+ /* The shared function expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == mc_count)
+ break;
+ memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
+ }
+ pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
+ PCH_GBE_MAR_ENTRIES);
+ kfree(mta_list);
+
+ pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
+ ioread32(&hw->reg->RX_MODE), mc_count);
+}
+
+/**
+ * pch_gbe_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: Network interface device structure
+ * @addr: Pointer to an address structure
+ * Returns
+ * 0: Successfully
+ * -EADDRNOTAVAIL: Failed
+ */
+static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *skaddr = addr;
+ int ret_val;
+
+ if (!is_valid_ether_addr(skaddr->sa_data)) {
+ ret_val = -EADDRNOTAVAIL;
+ } else {
+ memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
+ pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+ ret_val = 0;
+ }
+ pr_debug("ret_val : 0x%08x\n", ret_val);
+ pr_debug("dev_addr : %pM\n", netdev->dev_addr);
+ pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
+ pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
+ ioread32(&adapter->hw.reg->mac_adr[0].high),
+ ioread32(&adapter->hw.reg->mac_adr[0].low));
+ return ret_val;
+}
+
+/**
+ * pch_gbe_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: Network interface device structure
+ * @new_mtu: New value for maximum frame size
+ * Returns
+ * 0: Successfully
+ * -EINVAL: Failed
+ */
+static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ int max_frame;
+
+ max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
+ (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
+ pr_err("Invalid MTU setting\n");
+ return -EINVAL;
+ }
+ if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
+ else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
+ else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
+ else
+ adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
+ netdev->mtu = new_mtu;
+ adapter->hw.mac.max_frame_size = max_frame;
+
+ if (netif_running(netdev))
+ pch_gbe_reinit_locked(adapter);
+ else
+ pch_gbe_reset(adapter);
+
+ pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
+ max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
+ adapter->hw.mac.max_frame_size);
+ return 0;
+}
+
+/**
+ * pch_gbe_ioctl - Controls register through a MII interface
+ * @netdev: Network interface device structure
+ * @ifr: Pointer to ifr structure
+ * @cmd: Control command
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ pr_debug("cmd : 0x%04x\n", cmd);
+
+ return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
+}
+
+/**
+ * pch_gbe_tx_timeout - Respond to a Tx Hang
+ * @netdev: Network interface device structure
+ */
+static void pch_gbe_tx_timeout(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ adapter->stats.tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+}
+
+/**
+ * pch_gbe_napi_poll - NAPI receive and transfer polling callback
+ * @napi: Pointer of polling device struct
+ * @budget: The maximum number of a packet
+ * Returns
+ * false: Exit the polling mode
+ * true: Continue the polling mode
+ */
+static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct pch_gbe_adapter *adapter =
+ container_of(napi, struct pch_gbe_adapter, napi);
+ struct net_device *netdev = adapter->netdev;
+ int work_done = 0;
+ bool poll_end_flag = false;
+ bool cleaned = false;
+
+ pr_debug("budget : %d\n", budget);
+
+ /* Keep link state information with original netdev */
+ if (!netif_carrier_ok(netdev)) {
+ poll_end_flag = true;
+ } else {
+ cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
+ pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
+
+ if (cleaned)
+ work_done = budget;
+ /* If no Tx and not enough Rx work done,
+ * exit the polling mode
+ */
+ if ((work_done < budget) || !netif_running(netdev))
+ poll_end_flag = true;
+ }
+
+ if (poll_end_flag) {
+ napi_complete(napi);
+ pch_gbe_irq_enable(adapter);
+ }
+
+ pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
+ poll_end_flag, work_done, budget);
+
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * pch_gbe_netpoll - Used by things like netconsole to send skbs
+ * @netdev: Network interface device structure
+ */
+static void pch_gbe_netpoll(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ disable_irq(adapter->pdev->irq);
+ pch_gbe_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+static const struct net_device_ops pch_gbe_netdev_ops = {
+ .ndo_open = pch_gbe_open,
+ .ndo_stop = pch_gbe_stop,
+ .ndo_start_xmit = pch_gbe_xmit_frame,
+ .ndo_get_stats = pch_gbe_get_stats,
+ .ndo_set_mac_address = pch_gbe_set_mac,
+ .ndo_tx_timeout = pch_gbe_tx_timeout,
+ .ndo_change_mtu = pch_gbe_change_mtu,
+ .ndo_do_ioctl = pch_gbe_ioctl,
+ .ndo_set_multicast_list = &pch_gbe_set_multi,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = pch_gbe_netpoll,
+#endif
+};
+
+static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ pch_gbe_down(adapter);
+ pci_disable_device(pdev);
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ if (pci_enable_device(pdev)) {
+ pr_err("Cannot re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_reset(adapter);
+ /* Clear wake up status */
+ pch_gbe_mac_set_wol_event(hw, 0);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void pch_gbe_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (pch_gbe_up(adapter)) {
+ pr_debug("can't bring device back up after reset\n");
+ return;
+ }
+ }
+ netif_device_attach(netdev);
+}
+
+static int __pch_gbe_suspend(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 wufc = adapter->wake_up_evt;
+ int retval = 0;
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ pch_gbe_down(adapter);
+ if (wufc) {
+ pch_gbe_set_multi(netdev);
+ pch_gbe_setup_rctl(adapter);
+ pch_gbe_configure_rx(adapter);
+ pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ pch_gbe_set_mode(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ pch_gbe_mac_set_wol_event(hw, wufc);
+ pci_disable_device(pdev);
+ } else {
+ pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_mac_set_wol_event(hw, wufc);
+ pci_disable_device(pdev);
+ }
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int pch_gbe_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+
+ return __pch_gbe_suspend(pdev);
+}
+
+static int pch_gbe_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ pr_err("Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+ pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_reset(adapter);
+ /* Clear wake on lan control and status */
+ pch_gbe_mac_set_wol_event(hw, 0);
+
+ if (netif_running(netdev))
+ pch_gbe_up(adapter);
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static void pch_gbe_shutdown(struct pci_dev *pdev)
+{
+ __pch_gbe_suspend(pdev);
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+static void pch_gbe_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ flush_scheduled_work();
+ unregister_netdev(netdev);
+
+ pch_gbe_hal_phy_hw_reset(&adapter->hw);
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ iounmap(adapter->hw.reg);
+ pci_release_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+static int pch_gbe_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ struct net_device *netdev;
+ struct pch_gbe_adapter *adapter;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ ret = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "ERR: No usable DMA "
+ "configuration, aborting\n");
+ goto err_disable_device;
+ }
+ }
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "ERR: Can't reserve PCI I/O and memory resources\n");
+ goto err_disable_device;
+ }
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
+ if (!netdev) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev,
+ "ERR: Can't allocate and set up an Ethernet device\n");
+ goto err_release_pci;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->hw.back = adapter;
+ adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
+ if (!adapter->hw.reg) {
+ ret = -EIO;
+ dev_err(&pdev->dev, "Can't ioremap\n");
+ goto err_free_netdev;
+ }
+
+ netdev->netdev_ops = &pch_gbe_netdev_ops;
+ netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
+ netif_napi_add(netdev, &adapter->napi,
+ pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
+ netdev->features = NETIF_F_HW_CSUM | NETIF_F_GRO;
+ pch_gbe_set_ethtool_ops(netdev);
+
+ pch_gbe_mac_reset_hw(&adapter->hw);
+
+ /* setup the private structure */
+ ret = pch_gbe_sw_init(adapter);
+ if (ret)
+ goto err_iounmap;
+
+ /* Initialize PHY */
+ ret = pch_gbe_init_phy(adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "PHY initialize error\n");
+ goto err_free_adapter;
+ }
+ pch_gbe_hal_get_bus_info(&adapter->hw);
+
+ /* Read the MAC address. and store to the private data */
+ ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
+ if (ret) {
+ dev_err(&pdev->dev, "MAC address Read Error\n");
+ goto err_free_adapter;
+ }
+
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ dev_err(&pdev->dev, "Invalid MAC Address\n");
+ ret = -EIO;
+ goto err_free_adapter;
+ }
+ setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
+ (unsigned long)adapter);
+
+ INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
+
+ pch_gbe_check_options(adapter);
+
+ if (adapter->tx_csum)
+ netdev->features |= NETIF_F_HW_CSUM;
+ else
+ netdev->features &= ~NETIF_F_HW_CSUM;
+
+ /* initialize the wol settings based on the eeprom settings */
+ adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
+ dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
+
+ /* reset the hardware with the new settings */
+ pch_gbe_reset(adapter);
+
+ ret = register_netdev(netdev);
+ if (ret)
+ goto err_free_adapter;
+ /* tell the stack to leave us alone until pch_gbe_open() is called */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
+
+ device_set_wakeup_enable(&pdev->dev, 1);
+ return 0;
+
+err_free_adapter:
+ pch_gbe_hal_phy_hw_reset(&adapter->hw);
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+err_iounmap:
+ iounmap(adapter->hw.reg);
+err_free_netdev:
+ free_netdev(netdev);
+err_release_pci:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static const struct pci_device_id pch_gbe_pcidev_id[] = {
+ {.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00)
+ },
+ /* required last entry */
+ {0}
+};
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops pch_gbe_pm_ops = {
+ .suspend = pch_gbe_suspend,
+ .resume = pch_gbe_resume,
+ .freeze = pch_gbe_suspend,
+ .thaw = pch_gbe_resume,
+ .poweroff = pch_gbe_suspend,
+ .restore = pch_gbe_resume,
+};
+#endif
+
+static struct pci_error_handlers pch_gbe_err_handler = {
+ .error_detected = pch_gbe_io_error_detected,
+ .slot_reset = pch_gbe_io_slot_reset,
+ .resume = pch_gbe_io_resume
+};
+
+static struct pci_driver pch_gbe_pcidev = {
+ .name = KBUILD_MODNAME,
+ .id_table = pch_gbe_pcidev_id,
+ .probe = pch_gbe_probe,
+ .remove = pch_gbe_remove,
+#ifdef CONFIG_PM_OPS
+ .driver.pm = &pch_gbe_pm_ops,
+#endif
+ .shutdown = pch_gbe_shutdown,
+ .err_handler = &pch_gbe_err_handler
+};
+
+
+static int __init pch_gbe_init_module(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&pch_gbe_pcidev);
+ if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
+ if (copybreak == 0) {
+ pr_info("copybreak disabled\n");
+ } else {
+ pr_info("copybreak enabled for packets <= %u bytes\n",
+ copybreak);
+ }
+ }
+ return ret;
+}
+
+static void __exit pch_gbe_exit_module(void)
+{
+ pci_unregister_driver(&pch_gbe_pcidev);
+}
+
+module_init(pch_gbe_init_module);
+module_exit(pch_gbe_exit_module);
+
+MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver");
+MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
+
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+/* pch_gbe_main.c */
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c
new file mode 100644
index 00000000000..2510146fc56
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_param.c
@@ -0,0 +1,499 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "pch_gbe.h"
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+/**
+ * TxDescriptors - Transmit Descriptor Count
+ * @Valid Range: PCH_GBE_MIN_TXD - PCH_GBE_MAX_TXD
+ * @Default Value: PCH_GBE_DEFAULT_TXD
+ */
+static int TxDescriptors = OPTION_UNSET;
+module_param(TxDescriptors, int, 0);
+MODULE_PARM_DESC(TxDescriptors, "Number of transmit descriptors");
+
+/**
+ * RxDescriptors -Receive Descriptor Count
+ * @Valid Range: PCH_GBE_MIN_RXD - PCH_GBE_MAX_RXD
+ * @Default Value: PCH_GBE_DEFAULT_RXD
+ */
+static int RxDescriptors = OPTION_UNSET;
+module_param(RxDescriptors, int, 0);
+MODULE_PARM_DESC(RxDescriptors, "Number of receive descriptors");
+
+/**
+ * Speed - User Specified Speed Override
+ * @Valid Range: 0, 10, 100, 1000
+ * - 0: auto-negotiate at all supported speeds
+ * - 10: only link at 10 Mbps
+ * - 100: only link at 100 Mbps
+ * - 1000: only link at 1000 Mbps
+ * @Default Value: 0
+ */
+static int Speed = OPTION_UNSET;
+module_param(Speed, int, 0);
+MODULE_PARM_DESC(Speed, "Speed setting");
+
+/**
+ * Duplex - User Specified Duplex Override
+ * @Valid Range: 0-2
+ * - 0: auto-negotiate for duplex
+ * - 1: only link at half duplex
+ * - 2: only link at full duplex
+ * @Default Value: 0
+ */
+static int Duplex = OPTION_UNSET;
+module_param(Duplex, int, 0);
+MODULE_PARM_DESC(Duplex, "Duplex setting");
+
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/**
+ * AutoNeg - Auto-negotiation Advertisement Override
+ * @Valid Range: 0x01-0x0F, 0x20-0x2F
+ *
+ * The AutoNeg value is a bit mask describing which speed and duplex
+ * combinations should be advertised during auto-negotiation.
+ * The supported speed and duplex modes are listed below
+ *
+ * Bit 7 6 5 4 3 2 1 0
+ * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
+ * Duplex Full Full Half Full Half
+ *
+ * @Default Value: 0x2F (copper)
+ */
+static int AutoNeg = OPTION_UNSET;
+module_param(AutoNeg, int, 0);
+MODULE_PARM_DESC(AutoNeg, "Advertised auto-negotiation setting");
+
+#define PHY_ADVERTISE_10_HALF 0x0001
+#define PHY_ADVERTISE_10_FULL 0x0002
+#define PHY_ADVERTISE_100_HALF 0x0004
+#define PHY_ADVERTISE_100_FULL 0x0008
+#define PHY_ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define PHY_ADVERTISE_1000_FULL 0x0020
+#define PCH_AUTONEG_ADVERTISE_DEFAULT 0x2F
+
+/**
+ * FlowControl - User Specified Flow Control Override
+ * @Valid Range: 0-3
+ * - 0: No Flow Control
+ * - 1: Rx only, respond to PAUSE frames but do not generate them
+ * - 2: Tx only, generate PAUSE frames but ignore them on receive
+ * - 3: Full Flow Control Support
+ * @Default Value: Read flow control settings from the EEPROM
+ */
+static int FlowControl = OPTION_UNSET;
+module_param(FlowControl, int, 0);
+MODULE_PARM_DESC(FlowControl, "Flow Control setting");
+
+/*
+ * XsumRX - Receive Checksum Offload Enable/Disable
+ * @Valid Range: 0, 1
+ * - 0: disables all checksum offload
+ * - 1: enables receive IP/TCP/UDP checksum offload
+ * @Default Value: PCH_GBE_DEFAULT_RX_CSUM
+ */
+static int XsumRX = OPTION_UNSET;
+module_param(XsumRX, int, 0);
+MODULE_PARM_DESC(XsumRX, "Disable or enable Receive Checksum offload");
+
+#define PCH_GBE_DEFAULT_RX_CSUM true /* trueorfalse */
+
+/*
+ * XsumTX - Transmit Checksum Offload Enable/Disable
+ * @Valid Range: 0, 1
+ * - 0: disables all checksum offload
+ * - 1: enables transmit IP/TCP/UDP checksum offload
+ * @Default Value: PCH_GBE_DEFAULT_TX_CSUM
+ */
+static int XsumTX = OPTION_UNSET;
+module_param(XsumTX, int, 0);
+MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload");
+
+#define PCH_GBE_DEFAULT_TX_CSUM true /* trueorfalse */
+
+/**
+ * pch_gbe_option - Force the MAC's flow control settings
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+struct pch_gbe_option {
+ enum { enable_option, range_option, list_option } type;
+ char *name;
+ char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ const struct pch_gbe_opt_list { int i; char *str; } *p;
+ } l;
+ } arg;
+};
+
+static const struct pch_gbe_opt_list speed_list[] = {
+ { 0, "" },
+ { SPEED_10, "" },
+ { SPEED_100, "" },
+ { SPEED_1000, "" }
+};
+
+static const struct pch_gbe_opt_list dplx_list[] = {
+ { 0, "" },
+ { HALF_DUPLEX, "" },
+ { FULL_DUPLEX, "" }
+};
+
+static const struct pch_gbe_opt_list an_list[] =
+ #define AA "AutoNeg advertising "
+ {{ 0x01, AA "10/HD" },
+ { 0x02, AA "10/FD" },
+ { 0x03, AA "10/FD, 10/HD" },
+ { 0x04, AA "100/HD" },
+ { 0x05, AA "100/HD, 10/HD" },
+ { 0x06, AA "100/HD, 10/FD" },
+ { 0x07, AA "100/HD, 10/FD, 10/HD" },
+ { 0x08, AA "100/FD" },
+ { 0x09, AA "100/FD, 10/HD" },
+ { 0x0a, AA "100/FD, 10/FD" },
+ { 0x0b, AA "100/FD, 10/FD, 10/HD" },
+ { 0x0c, AA "100/FD, 100/HD" },
+ { 0x0d, AA "100/FD, 100/HD, 10/HD" },
+ { 0x0e, AA "100/FD, 100/HD, 10/FD" },
+ { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
+ { 0x20, AA "1000/FD" },
+ { 0x21, AA "1000/FD, 10/HD" },
+ { 0x22, AA "1000/FD, 10/FD" },
+ { 0x23, AA "1000/FD, 10/FD, 10/HD" },
+ { 0x24, AA "1000/FD, 100/HD" },
+ { 0x25, AA "1000/FD, 100/HD, 10/HD" },
+ { 0x26, AA "1000/FD, 100/HD, 10/FD" },
+ { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
+ { 0x28, AA "1000/FD, 100/FD" },
+ { 0x29, AA "1000/FD, 100/FD, 10/HD" },
+ { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
+ { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
+ { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
+ { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
+ { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
+ { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }
+};
+
+static const struct pch_gbe_opt_list fc_list[] = {
+ { PCH_GBE_FC_NONE, "Flow Control Disabled" },
+ { PCH_GBE_FC_RX_PAUSE, "Flow Control Receive Only" },
+ { PCH_GBE_FC_TX_PAUSE, "Flow Control Transmit Only" },
+ { PCH_GBE_FC_FULL, "Flow Control Enabled" }
+};
+
+/**
+ * pch_gbe_validate_option - Validate option
+ * @value: value
+ * @opt: option
+ * @adapter: Board private structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_validate_option(int *value,
+ const struct pch_gbe_option *opt,
+ struct pch_gbe_adapter *adapter)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ pr_debug("%s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ pr_debug("%s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ pr_debug("%s set to %i\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option: {
+ int i;
+ const struct pch_gbe_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ pr_debug("%s\n", ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ pr_debug("Invalid %s value specified (%i) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+/**
+ * pch_gbe_check_copper_options - Range Checking for Link Options, Copper Version
+ * @adapter: Board private structure
+ */
+static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ int speed, dplx;
+
+ { /* Speed */
+ static const struct pch_gbe_option opt = {
+ .type = list_option,
+ .name = "Speed",
+ .err = "parameter ignored",
+ .def = 0,
+ .arg = { .l = { .nr = (int)ARRAY_SIZE(speed_list),
+ .p = speed_list } }
+ };
+ speed = Speed;
+ pch_gbe_validate_option(&speed, &opt, adapter);
+ }
+ { /* Duplex */
+ static const struct pch_gbe_option opt = {
+ .type = list_option,
+ .name = "Duplex",
+ .err = "parameter ignored",
+ .def = 0,
+ .arg = { .l = { .nr = (int)ARRAY_SIZE(dplx_list),
+ .p = dplx_list } }
+ };
+ dplx = Duplex;
+ pch_gbe_validate_option(&dplx, &opt, adapter);
+ }
+
+ { /* Autoneg */
+ static const struct pch_gbe_option opt = {
+ .type = list_option,
+ .name = "AutoNeg",
+ .err = "parameter ignored",
+ .def = PCH_AUTONEG_ADVERTISE_DEFAULT,
+ .arg = { .l = { .nr = (int)ARRAY_SIZE(an_list),
+ .p = an_list} }
+ };
+ if (speed || dplx) {
+ pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n");
+ hw->phy.autoneg_advertised = opt.def;
+ } else {
+ hw->phy.autoneg_advertised = AutoNeg;
+ pch_gbe_validate_option(
+ (int *)(&hw->phy.autoneg_advertised),
+ &opt, adapter);
+ }
+ }
+
+ switch (speed + dplx) {
+ case 0:
+ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
+ if ((speed || dplx))
+ pr_debug("Speed and duplex autonegotiation enabled\n");
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ break;
+ case HALF_DUPLEX:
+ pr_debug("Half Duplex specified without Speed\n");
+ pr_debug("Using Autonegotiation at Half Duplex only\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
+ hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
+ PHY_ADVERTISE_100_HALF;
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ break;
+ case FULL_DUPLEX:
+ pr_debug("Full Duplex specified without Speed\n");
+ pr_debug("Using Autonegotiation at Full Duplex only\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
+ hw->phy.autoneg_advertised = PHY_ADVERTISE_10_FULL |
+ PHY_ADVERTISE_100_FULL |
+ PHY_ADVERTISE_1000_FULL;
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_FULL;
+ break;
+ case SPEED_10:
+ pr_debug("10 Mbps Speed specified without Duplex\n");
+ pr_debug("Using Autonegotiation at 10 Mbps only\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
+ hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
+ PHY_ADVERTISE_10_FULL;
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ break;
+ case SPEED_10 + HALF_DUPLEX:
+ pr_debug("Forcing to 10 Mbps Half Duplex\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 0;
+ hw->phy.autoneg_advertised = 0;
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ break;
+ case SPEED_10 + FULL_DUPLEX:
+ pr_debug("Forcing to 10 Mbps Full Duplex\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 0;
+ hw->phy.autoneg_advertised = 0;
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_FULL;
+ break;
+ case SPEED_100:
+ pr_debug("100 Mbps Speed specified without Duplex\n");
+ pr_debug("Using Autonegotiation at 100 Mbps only\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
+ hw->phy.autoneg_advertised = PHY_ADVERTISE_100_HALF |
+ PHY_ADVERTISE_100_FULL;
+ hw->mac.link_speed = SPEED_100;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ break;
+ case SPEED_100 + HALF_DUPLEX:
+ pr_debug("Forcing to 100 Mbps Half Duplex\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 0;
+ hw->phy.autoneg_advertised = 0;
+ hw->mac.link_speed = SPEED_100;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ break;
+ case SPEED_100 + FULL_DUPLEX:
+ pr_debug("Forcing to 100 Mbps Full Duplex\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 0;
+ hw->phy.autoneg_advertised = 0;
+ hw->mac.link_speed = SPEED_100;
+ hw->mac.link_duplex = DUPLEX_FULL;
+ break;
+ case SPEED_1000:
+ pr_debug("1000 Mbps Speed specified without Duplex\n");
+ goto full_duplex_only;
+ case SPEED_1000 + HALF_DUPLEX:
+ pr_debug("Half Duplex is not supported at 1000 Mbps\n");
+ /* fall through */
+ case SPEED_1000 + FULL_DUPLEX:
+full_duplex_only:
+ pr_debug("Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
+ hw->phy.autoneg_advertised = PHY_ADVERTISE_1000_FULL;
+ hw->mac.link_speed = SPEED_1000;
+ hw->mac.link_duplex = DUPLEX_FULL;
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * pch_gbe_check_options - Range Checking for Command Line Parameters
+ * @adapter: Board private structure
+ */
+void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ { /* Transmit Descriptor Count */
+ static const struct pch_gbe_option opt = {
+ .type = range_option,
+ .name = "Transmit Descriptors",
+ .err = "using default of "
+ __MODULE_STRING(PCH_GBE_DEFAULT_TXD),
+ .def = PCH_GBE_DEFAULT_TXD,
+ .arg = { .r = { .min = PCH_GBE_MIN_TXD } },
+ .arg = { .r = { .max = PCH_GBE_MAX_TXD } }
+ };
+ struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
+ tx_ring->count = TxDescriptors;
+ pch_gbe_validate_option(&tx_ring->count, &opt, adapter);
+ tx_ring->count = roundup(tx_ring->count,
+ PCH_GBE_TX_DESC_MULTIPLE);
+ }
+ { /* Receive Descriptor Count */
+ static const struct pch_gbe_option opt = {
+ .type = range_option,
+ .name = "Receive Descriptors",
+ .err = "using default of "
+ __MODULE_STRING(PCH_GBE_DEFAULT_RXD),
+ .def = PCH_GBE_DEFAULT_RXD,
+ .arg = { .r = { .min = PCH_GBE_MIN_RXD } },
+ .arg = { .r = { .max = PCH_GBE_MAX_RXD } }
+ };
+ struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
+ rx_ring->count = RxDescriptors;
+ pch_gbe_validate_option(&rx_ring->count, &opt, adapter);
+ rx_ring->count = roundup(rx_ring->count,
+ PCH_GBE_RX_DESC_MULTIPLE);
+ }
+ { /* Checksum Offload Enable/Disable */
+ static const struct pch_gbe_option opt = {
+ .type = enable_option,
+ .name = "Checksum Offload",
+ .err = "defaulting to Enabled",
+ .def = PCH_GBE_DEFAULT_RX_CSUM
+ };
+ adapter->rx_csum = XsumRX;
+ pch_gbe_validate_option((int *)(&adapter->rx_csum),
+ &opt, adapter);
+ }
+ { /* Checksum Offload Enable/Disable */
+ static const struct pch_gbe_option opt = {
+ .type = enable_option,
+ .name = "Checksum Offload",
+ .err = "defaulting to Enabled",
+ .def = PCH_GBE_DEFAULT_TX_CSUM
+ };
+ adapter->tx_csum = XsumTX;
+ pch_gbe_validate_option((int *)(&adapter->tx_csum),
+ &opt, adapter);
+ }
+ { /* Flow Control */
+ static const struct pch_gbe_option opt = {
+ .type = list_option,
+ .name = "Flow Control",
+ .err = "reading default settings from EEPROM",
+ .def = PCH_GBE_FC_DEFAULT,
+ .arg = { .l = { .nr = (int)ARRAY_SIZE(fc_list),
+ .p = fc_list } }
+ };
+ hw->mac.fc = FlowControl;
+ pch_gbe_validate_option((int *)(&hw->mac.fc),
+ &opt, adapter);
+ }
+
+ pch_gbe_check_copper_options(adapter);
+}
diff --git a/drivers/net/pch_gbe/pch_gbe_phy.c b/drivers/net/pch_gbe/pch_gbe_phy.c
new file mode 100644
index 00000000000..923a687acd3
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_phy.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "pch_gbe.h"
+#include "pch_gbe_phy.h"
+
+#define PHY_MAX_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Regiser */
+#define PHY_ID1 0x02 /* Phy Id Register (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Register (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Register */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Register */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Register */
+#define PHY_EXT_STATUS 0x0F /* Extended Status Register */
+#define PHY_PHYSP_CONTROL 0x10 /* PHY Specific Control Register */
+#define PHY_EXT_PHYSP_CONTROL 0x14 /* Extended PHY Specific Control Register */
+#define PHY_LED_CONTROL 0x18 /* LED Control Register */
+#define PHY_EXT_PHYSP_STATUS 0x1B /* Extended PHY Specific Status Register */
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+/* Phy Id Register (word 2) */
+#define PHY_REVISION_MASK 0x000F
+
+/* PHY Specific Control Register */
+#define PHYSP_CTRL_ASSERT_CRS_TX 0x0800
+
+
+/* Default value of PHY register */
+#define PHY_CONTROL_DEFAULT 0x1140 /* Control Register */
+#define PHY_AUTONEG_ADV_DEFAULT 0x01e0 /* Autoneg Advertisement */
+#define PHY_NEXT_PAGE_TX_DEFAULT 0x2001 /* Next Page TX */
+#define PHY_1000T_CTRL_DEFAULT 0x0300 /* 1000Base-T Control Register */
+#define PHY_PHYSP_CONTROL_DEFAULT 0x01EE /* PHY Specific Control Register */
+
+/**
+ * pch_gbe_phy_get_id - Retrieve the PHY ID and revision
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw)
+{
+ struct pch_gbe_phy_info *phy = &hw->phy;
+ s32 ret;
+ u16 phy_id1;
+ u16 phy_id2;
+
+ ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID1, &phy_id1);
+ if (ret)
+ return ret;
+ ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID2, &phy_id2);
+ if (ret)
+ return ret;
+ /*
+ * PHY_ID1: [bit15-0:ID(21-6)]
+ * PHY_ID2: [bit15-10:ID(5-0)][bit9-4:Model][bit3-0:revision]
+ */
+ phy->id = (u32)phy_id1;
+ phy->id = ((phy->id << 6) | ((phy_id2 & 0xFC00) >> 10));
+ phy->revision = (u32) (phy_id2 & 0x000F);
+ pr_debug("phy->id : 0x%08x phy->revision : 0x%08x\n",
+ phy->id, phy->revision);
+ return 0;
+}
+
+/**
+ * pch_gbe_phy_read_reg_miic - Read MII control register
+ * @hw: Pointer to the HW structure
+ * @offset: Register offset to be read
+ * @data: Pointer to the read data
+ * Returns
+ * 0: Successful.
+ * -EINVAL: Invalid argument.
+ */
+s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data)
+{
+ struct pch_gbe_phy_info *phy = &hw->phy;
+
+ if (offset > PHY_MAX_REG_ADDRESS) {
+ pr_err("PHY Address %d is out of range\n", offset);
+ return -EINVAL;
+ }
+ *data = pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_READ,
+ offset, (u16)0);
+ return 0;
+}
+
+/**
+ * pch_gbe_phy_write_reg_miic - Write MII control register
+ * @hw: Pointer to the HW structure
+ * @offset: Register offset to be read
+ * @data: data to write to register at offset
+ * Returns
+ * 0: Successful.
+ * -EINVAL: Invalid argument.
+ */
+s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data)
+{
+ struct pch_gbe_phy_info *phy = &hw->phy;
+
+ if (offset > PHY_MAX_REG_ADDRESS) {
+ pr_err("PHY Address %d is out of range\n", offset);
+ return -EINVAL;
+ }
+ pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_WRITE,
+ offset, data);
+ return 0;
+}
+
+/**
+ * pch_gbe_phy_sw_reset - PHY software reset
+ * @hw: Pointer to the HW structure
+ */
+void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
+{
+ u16 phy_ctrl;
+
+ pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &phy_ctrl);
+ phy_ctrl |= MII_CR_RESET;
+ pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, phy_ctrl);
+ udelay(1);
+}
+
+/**
+ * pch_gbe_phy_hw_reset - PHY hardware reset
+ * @hw: Pointer to the HW structure
+ */
+void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw)
+{
+ pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, PHY_CONTROL_DEFAULT);
+ pch_gbe_phy_write_reg_miic(hw, PHY_AUTONEG_ADV,
+ PHY_AUTONEG_ADV_DEFAULT);
+ pch_gbe_phy_write_reg_miic(hw, PHY_NEXT_PAGE_TX,
+ PHY_NEXT_PAGE_TX_DEFAULT);
+ pch_gbe_phy_write_reg_miic(hw, PHY_1000T_CTRL, PHY_1000T_CTRL_DEFAULT);
+ pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL,
+ PHY_PHYSP_CONTROL_DEFAULT);
+}
+
+/**
+ * pch_gbe_phy_power_up - restore link in case the phy was powered down
+ * @hw: Pointer to the HW structure
+ */
+void pch_gbe_phy_power_up(struct pch_gbe_hw *hw)
+{
+ u16 mii_reg;
+
+ mii_reg = 0;
+ /* Just clear the power down bit to wake the phy back up */
+ /* according to the manual, the phy will retain its
+ * settings across a power-down/up cycle */
+ pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * pch_gbe_phy_power_down - Power down PHY
+ * @hw: Pointer to the HW structure
+ */
+void pch_gbe_phy_power_down(struct pch_gbe_hw *hw)
+{
+ u16 mii_reg;
+
+ mii_reg = 0;
+ /* Power down the PHY so no link is implied when interface is down *
+ * The PHY cannot be powered down if any of the following is TRUE *
+ * (a) WoL is enabled
+ * (b) AMT is active
+ */
+ pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg);
+ mdelay(1);
+}
+
+/**
+ * pch_gbe_phy_set_rgmii - RGMII interface setting
+ * @hw: Pointer to the HW structure
+ */
+inline void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
+{
+ pch_gbe_phy_sw_reset(hw);
+}
+
+/**
+ * pch_gbe_phy_init_setting - PHY initial setting
+ * @hw: Pointer to the HW structure
+ */
+void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
+{
+ struct pch_gbe_adapter *adapter;
+ struct ethtool_cmd cmd;
+ int ret;
+ u16 mii_reg;
+
+ adapter = container_of(hw, struct pch_gbe_adapter, hw);
+ ret = mii_ethtool_gset(&adapter->mii, &cmd);
+ if (ret)
+ pr_err("Error: mii_ethtool_gset\n");
+
+ cmd.speed = hw->mac.link_speed;
+ cmd.duplex = hw->mac.link_duplex;
+ cmd.advertising = hw->phy.autoneg_advertised;
+ cmd.autoneg = hw->mac.autoneg;
+ pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET);
+ ret = mii_ethtool_sset(&adapter->mii, &cmd);
+ if (ret)
+ pr_err("Error: mii_ethtool_sset\n");
+
+ pch_gbe_phy_sw_reset(hw);
+
+ pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg);
+ mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
+ pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg);
+
+}
diff --git a/drivers/net/pch_gbe/pch_gbe_phy.h b/drivers/net/pch_gbe/pch_gbe_phy.h
new file mode 100644
index 00000000000..03264dc7b5e
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_phy.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _PCH_GBE_PHY_H_
+#define _PCH_GBE_PHY_H_
+
+#define PCH_GBE_PHY_REGS_LEN 32
+#define PCH_GBE_PHY_RESET_DELAY_US 10
+#define PCH_GBE_MAC_IFOP_RGMII
+
+s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw);
+s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data);
+s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data);
+void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw);
+void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw);
+void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
+void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
+void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw);
+void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw);
+
+#endif /* _PCH_GBE_PHY_H_ */
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 56f3fc45dba..8dd03439d99 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1125,7 +1125,7 @@ static int netdrv_open(struct net_device *dev)
init_timer(&tp->timer);
tp->timer.expires = jiffies + 3 * HZ;
tp->timer.data = (unsigned long) dev;
- tp->timer.function = &netdrv_timer;
+ tp->timer.function = netdrv_timer;
add_timer(&tp->timer);
DPRINTK("EXIT, returning 0\n");
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 10ee106a161..042f6777e6b 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -69,6 +69,8 @@ earlier 3Com products.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -83,11 +85,9 @@ earlier 3Com products.
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ioport.h>
-#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/mii.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -239,7 +239,6 @@ static int el3_rx(struct net_device *dev, int worklimit);
static int el3_close(struct net_device *dev);
static void el3_tx_timeout(struct net_device *dev);
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static const struct ethtool_ops netdev_ethtool_ops;
static void set_rx_mode(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
@@ -279,14 +278,13 @@ static int tc574_probe(struct pcmcia_device *link)
lp->p_dev = link;
spin_lock_init(&lp->window_lock);
- link->io.NumPorts1 = 32;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->resource[0]->end = 32;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
dev->netdev_ops = &el3_netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
dev->watchdog_timeo = TX_TIMEOUT;
return tc574_config(link);
@@ -338,10 +336,11 @@ static int tc574_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "3c574_config()\n");
- link->io.IOAddrLines = 16;
+ link->io_lines = 16;
+
for (i = j = 0; j < 0x400; j += 0x20) {
- link->io.BasePort1 = j ^ 0x300;
- i = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = j ^ 0x300;
+ i = pcmcia_request_io(link);
if (i == 0)
break;
}
@@ -357,7 +356,7 @@ static int tc574_config(struct pcmcia_device *link)
goto failed;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
ioaddr = dev->base_addr;
@@ -376,8 +375,8 @@ static int tc574_config(struct pcmcia_device *link)
for (i = 0; i < 3; i++)
phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
if (phys_addr[0] == htons(0x6060)) {
- printk(KERN_NOTICE "3c574_cs: IO port conflict at 0x%03lx"
- "-0x%03lx\n", dev->base_addr, dev->base_addr+15);
+ pr_notice("IO port conflict at 0x%03lx-0x%03lx\n",
+ dev->base_addr, dev->base_addr+15);
goto failed;
}
}
@@ -391,7 +390,7 @@ static int tc574_config(struct pcmcia_device *link)
outw(2<<11, ioaddr + RunnerRdCtrl);
mcr = inb(ioaddr + 2);
outw(0<<11, ioaddr + RunnerRdCtrl);
- printk(KERN_INFO " ASIC rev %d,", mcr>>3);
+ pr_info(" ASIC rev %d,", mcr>>3);
EL3WINDOW(3);
config = inl(ioaddr + Wn3_Config);
lp->default_media = (config & Xcvr) >> Xcvr_shift;
@@ -428,7 +427,7 @@ static int tc574_config(struct pcmcia_device *link)
}
}
if (phy > 32) {
- printk(KERN_NOTICE " No MII transceivers found!\n");
+ pr_notice(" No MII transceivers found!\n");
goto failed;
}
i = mdio_read(ioaddr, lp->phys, 16) | 0x40;
@@ -444,18 +443,16 @@ static int tc574_config(struct pcmcia_device *link)
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
- printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto failed;
}
- printk(KERN_INFO "%s: %s at io %#3lx, irq %d, "
- "hw_addr %pM.\n",
- dev->name, cardname, dev->base_addr, dev->irq,
- dev->dev_addr);
- printk(" %dK FIFO split %s Rx:Tx, %sMII interface.\n",
- 8 << config & Ram_size,
- ram_split[(config & Ram_split) >> Ram_split_shift],
- config & Autoselect ? "autoselect " : "");
+ netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n",
+ cardname, dev->base_addr, dev->irq, dev->dev_addr);
+ netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n",
+ 8 << config & Ram_size,
+ ram_split[(config & Ram_split) >> Ram_split_shift],
+ config & Autoselect ? "autoselect " : "");
return 0;
@@ -502,14 +499,14 @@ static void dump_status(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
EL3WINDOW(1);
- printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
- "%02x, tx free %04x\n", inw(ioaddr+EL3_STATUS),
- inw(ioaddr+RxStatus), inb(ioaddr+TxStatus),
- inw(ioaddr+TxFree));
+ netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x, tx free %04x\n",
+ inw(ioaddr+EL3_STATUS),
+ inw(ioaddr+RxStatus), inb(ioaddr+TxStatus),
+ inw(ioaddr+TxFree));
EL3WINDOW(4);
- printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x"
- " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06),
- inw(ioaddr+0x08), inw(ioaddr+0x0a));
+ netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
+ inw(ioaddr+0x04), inw(ioaddr+0x06),
+ inw(ioaddr+0x08), inw(ioaddr+0x0a));
EL3WINDOW(1);
}
@@ -523,7 +520,7 @@ static void tc574_wait_for_completion(struct net_device *dev, int cmd)
while (--i > 0)
if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
if (i == 0)
- printk(KERN_NOTICE "%s: command 0x%04x did not complete!\n", dev->name, cmd);
+ netdev_notice(dev, "command 0x%04x did not complete!\n", cmd);
}
/* Read a word from the EEPROM using the regular EEPROM access register.
@@ -710,7 +707,7 @@ static int el3_open(struct net_device *dev)
netif_start_queue(dev);
tc574_reset(dev);
- lp->media.function = &media_check;
+ lp->media.function = media_check;
lp->media.data = (unsigned long) dev;
lp->media.expires = jiffies + HZ;
add_timer(&lp->media);
@@ -725,7 +722,7 @@ static void el3_tx_timeout(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
- printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
+ netdev_notice(dev, "Transmit timed out!\n");
dump_status(dev);
dev->stats.tx_errors++;
dev->trans_start = jiffies; /* prevent tx timeout */
@@ -848,8 +845,8 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
EL3WINDOW(4);
fifo_diag = inw(ioaddr + Wn4_FIFODiag);
EL3WINDOW(1);
- printk(KERN_NOTICE "%s: adapter failure, FIFO diagnostic"
- " register %04x.\n", dev->name, fifo_diag);
+ netdev_notice(dev, "adapter failure, FIFO diagnostic register %04x\n",
+ fifo_diag);
if (fifo_diag & 0x0400) {
/* Tx overrun */
tc574_wait_for_completion(dev, TxReset);
@@ -903,7 +900,7 @@ static void media_check(unsigned long arg)
this, we can limp along even if the interrupt is blocked */
if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) {
if (!lp->fast_poll)
- printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ netdev_info(dev, "interrupt(s) dropped!\n");
local_irq_save(flags);
el3_interrupt(dev->irq, dev);
@@ -926,23 +923,21 @@ static void media_check(unsigned long arg)
if (media != lp->media_status) {
if ((media ^ lp->media_status) & 0x0004)
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (lp->media_status & 0x0004) ? "lost" : "found");
+ netdev_info(dev, "%s link beat\n",
+ (lp->media_status & 0x0004) ? "lost" : "found");
if ((media ^ lp->media_status) & 0x0020) {
lp->partner = 0;
if (lp->media_status & 0x0020) {
- printk(KERN_INFO "%s: autonegotiation restarted\n",
- dev->name);
+ netdev_info(dev, "autonegotiation restarted\n");
} else if (partner) {
partner &= lp->advertising;
lp->partner = partner;
- printk(KERN_INFO "%s: autonegotiation complete: "
- "%sbaseT-%cD selected\n", dev->name,
- ((partner & 0x0180) ? "100" : "10"),
- ((partner & 0x0140) ? 'F' : 'H'));
+ netdev_info(dev, "autonegotiation complete: "
+ "%dbaseT-%cD selected\n",
+ (partner & 0x0180) ? 100 : 10,
+ (partner & 0x0140) ? 'F' : 'H');
} else {
- printk(KERN_INFO "%s: link partner did not autonegotiate\n",
- dev->name);
+ netdev_info(dev, "link partner did not autonegotiate\n");
}
EL3WINDOW(3);
@@ -952,10 +947,9 @@ static void media_check(unsigned long arg)
}
if (media & 0x0010)
- printk(KERN_INFO "%s: remote fault detected\n",
- dev->name);
+ netdev_info(dev, "remote fault detected\n");
if (media & 0x0002)
- printk(KERN_INFO "%s: jabber detected\n", dev->name);
+ netdev_info(dev, "jabber detected\n");
lp->media_status = media;
}
spin_unlock_irqrestore(&lp->window_lock, flags);
@@ -1065,16 +1059,6 @@ static int el3_rx(struct net_device *dev, int worklimit)
return worklimit;
}
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, "3c574_cs");
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
/* Provide ioctl() calls to examine the MII xcvr state. */
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index ce63c3773b4..35562a39577 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -19,6 +19,8 @@
======================================================================*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "3c589_cs"
#define DRV_VERSION "1.162-ac"
@@ -41,7 +43,6 @@
#include <linux/bitops.h>
#include <linux/jiffies.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -214,8 +215,8 @@ static int tc589_probe(struct pcmcia_device *link)
lp->p_dev = link;
spin_lock_init(&lp->lock);
- link->io.NumPorts1 = 16;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->resource[0]->end = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -265,7 +266,7 @@ static int tc589_config(struct pcmcia_device *link)
__be16 *phys_addr;
int ret, i, j, multi = 0, fifo;
unsigned int ioaddr;
- char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
u8 *buf;
size_t len;
@@ -274,16 +275,16 @@ static int tc589_config(struct pcmcia_device *link)
phys_addr = (__be16 *)dev->dev_addr;
/* Is this a 3c562? */
if (link->manf_id != MANFID_3COM)
- printk(KERN_INFO "3c589_cs: hmmm, is this really a "
- "3Com card??\n");
+ dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
multi = (link->card_id == PRODID_3COM_3C562);
+ link->io_lines = 16;
+
/* For the 3c562, the base address must be xx00-xx7f */
- link->io.IOAddrLines = 16;
for (i = j = 0; j < 0x400; j += 0x10) {
if (multi && (j & 0x80)) continue;
- link->io.BasePort1 = j ^ 0x300;
- i = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = j ^ 0x300;
+ i = pcmcia_request_io(link);
if (i == 0)
break;
}
@@ -299,7 +300,7 @@ static int tc589_config(struct pcmcia_device *link)
goto failed;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
ioaddr = dev->base_addr;
EL3WINDOW(0);
@@ -315,8 +316,8 @@ static int tc589_config(struct pcmcia_device *link)
for (i = 0; i < 3; i++)
phys_addr[i] = htons(read_eeprom(ioaddr, i));
if (phys_addr[0] == htons(0x6060)) {
- printk(KERN_ERR "3c589_cs: IO port conflict at 0x%03lx"
- "-0x%03lx\n", dev->base_addr, dev->base_addr+15);
+ dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
+ dev->base_addr, dev->base_addr+15);
goto failed;
}
}
@@ -330,12 +331,12 @@ static int tc589_config(struct pcmcia_device *link)
if ((if_port >= 0) && (if_port <= 3))
dev->if_port = if_port;
else
- printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
+ dev_err(&link->dev, "invalid if_port requested\n");
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
- printk(KERN_ERR "3c589_cs: register_netdev() failed\n");
+ dev_err(&link->dev, "register_netdev() failed\n");
goto failed;
}
@@ -537,7 +538,7 @@ static int el3_open(struct net_device *dev)
tc589_reset(dev);
init_timer(&lp->media);
- lp->media.function = &media_check;
+ lp->media.function = media_check;
lp->media.data = (unsigned long) dev;
lp->media.expires = jiffies + HZ;
add_timer(&lp->media);
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 33525bf2a3d..3f61fde70d7 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -24,6 +24,8 @@
======================================================================*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -32,14 +34,12 @@
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
-#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include "../8390.h"
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -87,7 +87,6 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
static struct net_device_stats *get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static void axnet_tx_timeout(struct net_device *dev);
-static const struct ethtool_ops netdev_ethtool_ops;
static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
static void ei_watchdog(u_long arg);
static void axnet_reset_8390(struct net_device *dev);
@@ -172,7 +171,6 @@ static int axnet_probe(struct pcmcia_device *link)
dev->netdev_ops = &axnet_netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
dev->watchdog_timeo = TX_TIMEOUT;
return axnet_config(link);
@@ -260,28 +258,30 @@ static int get_prom(struct pcmcia_device *link)
static int try_io_port(struct pcmcia_device *link)
{
int j, ret;
- if (link->io.NumPorts1 == 32) {
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
+ if (link->resource[0]->end == 32) {
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
/* for master/slave multifunction cards */
- if (link->io.NumPorts2 > 0)
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ if (link->resource[1]->end > 0)
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
} else {
/* This should be two 16-port windows */
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16;
}
- if (link->io.BasePort1 == 0) {
- link->io.IOAddrLines = 16;
+ if (link->resource[0]->start == 0) {
for (j = 0; j < 0x400; j += 0x20) {
- link->io.BasePort1 = j ^ 0x300;
- link->io.BasePort2 = (j ^ 0x300) + 0x10;
- ret = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = j ^ 0x300;
+ link->resource[1]->start = (j ^ 0x300) + 0x10;
+ link->io_lines = 16;
+ ret = pcmcia_request_io(link);
if (ret == 0)
return ret;
}
return ret;
} else {
- return pcmcia_request_io(link, &link->io);
+ return pcmcia_request_io(link);
}
}
@@ -302,15 +302,15 @@ static int axnet_configcheck(struct pcmcia_device *p_dev,
network function with window 0, and serial with window 1 */
if (io->nwin > 1) {
i = (io->win[1].len > io->win[0].len);
- p_dev->io.BasePort2 = io->win[1-i].base;
- p_dev->io.NumPorts2 = io->win[1-i].len;
+ p_dev->resource[1]->start = io->win[1-i].base;
+ p_dev->resource[1]->end = io->win[1-i].len;
} else {
- i = p_dev->io.NumPorts2 = 0;
+ i = p_dev->resource[1]->end = 0;
}
- p_dev->io.BasePort1 = io->win[i].base;
- p_dev->io.NumPorts1 = io->win[i].len;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- if (p_dev->io.NumPorts1 + p_dev->io.NumPorts2 >= 32)
+ p_dev->resource[0]->start = io->win[i].base;
+ p_dev->resource[0]->end = io->win[i].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32)
return try_io_port(p_dev);
return -ENODEV;
@@ -333,7 +333,7 @@ static int axnet_config(struct pcmcia_device *link)
if (!link->irq)
goto failed;
- if (link->io.NumPorts2 == 8) {
+ if (resource_size(link->resource[1]) == 8) {
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
}
@@ -343,11 +343,11 @@ static int axnet_config(struct pcmcia_device *link)
goto failed;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
if (!get_prom(link)) {
- printk(KERN_NOTICE "axnet_cs: this is not an AX88190 card!\n");
- printk(KERN_NOTICE "axnet_cs: use pcnet_cs instead.\n");
+ pr_notice("this is not an AX88190 card!\n");
+ pr_notice("use pcnet_cs instead.\n");
goto failed;
}
@@ -356,10 +356,10 @@ static int axnet_config(struct pcmcia_device *link)
ei_status.tx_start_page = AXNET_START_PG;
ei_status.rx_start_page = AXNET_START_PG + TX_PAGES;
ei_status.stop_page = AXNET_STOP_PG;
- ei_status.reset_8390 = &axnet_reset_8390;
- ei_status.get_8390_hdr = &get_8390_hdr;
- ei_status.block_input = &block_input;
- ei_status.block_output = &block_output;
+ ei_status.reset_8390 = axnet_reset_8390;
+ ei_status.get_8390_hdr = get_8390_hdr;
+ ei_status.block_input = block_input;
+ ei_status.block_output = block_output;
if (inb(dev->base_addr + AXNET_TEST) != 0)
info->flags |= IS_AX88790;
@@ -379,8 +379,7 @@ static int axnet_config(struct pcmcia_device *link)
/* Maybe PHY is in power down mode. (PPD_SET = 1)
Bit 2 of CCSR is active low. */
if (i == 32) {
- conf_reg_t reg = { 0, CS_WRITE, CISREG_CCSR, 0x04 };
- pcmcia_access_configuration_register(link, &reg);
+ pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
for (i = 0; i < 32; i++) {
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
@@ -393,19 +392,18 @@ static int axnet_config(struct pcmcia_device *link)
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
- printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto failed;
}
- printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, "
- "hw_addr %pM\n",
- dev->name, ((info->flags & IS_AX88790) ? 7 : 1),
- dev->base_addr, dev->irq,
- dev->dev_addr);
+ netdev_info(dev, "Asix AX88%d90: io %#3lx, irq %d, hw_addr %pM\n",
+ ((info->flags & IS_AX88790) ? 7 : 1),
+ dev->base_addr, dev->irq, dev->dev_addr);
if (info->phy_id != -1) {
- dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n", info->phy_id, j);
+ netdev_dbg(dev, " MII transceiver at index %d, status %x\n",
+ info->phy_id, j);
} else {
- printk(KERN_NOTICE " No MII transceivers found!\n");
+ netdev_notice(dev, " No MII transceivers found!\n");
}
return 0;
@@ -532,7 +530,7 @@ static int axnet_open(struct net_device *dev)
info->link_status = 0x00;
init_timer(&info->watchdog);
- info->watchdog.function = &ei_watchdog;
+ info->watchdog.function = ei_watchdog;
info->watchdog.data = (u_long)dev;
info->watchdog.expires = jiffies + HZ;
add_timer(&info->watchdog);
@@ -585,8 +583,7 @@ static void axnet_reset_8390(struct net_device *dev)
outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
if (i == 100)
- printk(KERN_ERR "%s: axnet_reset_8390() did not complete.\n",
- dev->name);
+ netdev_err(dev, "axnet_reset_8390() did not complete\n");
} /* axnet_reset_8390 */
@@ -613,7 +610,7 @@ static void ei_watchdog(u_long arg)
this, we can limp along even if the interrupt is blocked */
if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
if (!info->fast_poll)
- printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ netdev_info(dev, "interrupt(s) dropped!\n");
ei_irq_wrapper(dev->irq, dev);
info->fast_poll = HZ;
}
@@ -628,7 +625,7 @@ static void ei_watchdog(u_long arg)
goto reschedule;
link = mdio_read(mii_addr, info->phy_id, 1);
if (!link || (link == 0xffff)) {
- printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ netdev_info(dev, "MII is missing!\n");
info->phy_id = -1;
goto reschedule;
}
@@ -636,18 +633,14 @@ static void ei_watchdog(u_long arg)
link &= 0x0004;
if (link != info->link_status) {
u_short p = mdio_read(mii_addr, info->phy_id, 5);
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (link) ? "found" : "lost");
+ netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
if (link) {
info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00;
if (p)
- printk(KERN_INFO "%s: autonegotiation complete: "
- "%sbaseT-%cD selected\n", dev->name,
- ((p & 0x0180) ? "100" : "10"),
- ((p & 0x0140) ? 'F' : 'H'));
+ netdev_info(dev, "autonegotiation complete: %dbaseT-%cD selected\n",
+ (p & 0x0180) ? 100 : 10, (p & 0x0140) ? 'F' : 'H');
else
- printk(KERN_INFO "%s: link partner did not autonegotiate\n",
- dev->name);
+ netdev_info(dev, "link partner did not autonegotiate\n");
AX88190_init(dev, 1);
}
info->link_status = link;
@@ -658,16 +651,6 @@ reschedule:
add_timer(&info->watchdog);
}
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, "axnet_cs");
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
/*====================================================================*/
static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -855,9 +838,6 @@ module_exit(exit_axnet_cs);
*/
-static const char version_8390[] = KERN_INFO \
- "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@scyld.com)\n";
-
#include <linux/bitops.h>
#include <asm/irq.h>
#include <linux/fcntl.h>
@@ -1004,9 +984,11 @@ static void axnet_tx_timeout(struct net_device *dev)
isr = inb(e8390_base+EN0_ISR);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
- printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
- dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
- (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
+ netdev_printk(KERN_DEBUG, dev,
+ "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?",
+ txsr, isr, tickssofar);
if (!isr && !dev->stats.tx_packets)
{
@@ -1076,22 +1058,28 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
output_page = ei_local->tx_start_page;
ei_local->tx1 = send_length;
if (ei_debug && ei_local->tx2 > 0)
- printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
- dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
+ netdev_printk(KERN_DEBUG, dev,
+ "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx2, ei_local->lasttx,
+ ei_local->txing);
}
else if (ei_local->tx2 == 0)
{
output_page = ei_local->tx_start_page + TX_PAGES/2;
ei_local->tx2 = send_length;
if (ei_debug && ei_local->tx1 > 0)
- printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
- dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
+ netdev_printk(KERN_DEBUG, dev,
+ "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx1, ei_local->lasttx,
+ ei_local->txing);
}
else
{ /* We should never get here. */
if (ei_debug)
- printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n",
- dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ netdev_printk(KERN_DEBUG, dev,
+ "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ ei_local->tx1, ei_local->tx2,
+ ei_local->lasttx);
ei_local->irqlock = 0;
netif_stop_queue(dev);
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -1179,23 +1167,26 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
spin_lock_irqsave(&ei_local->page_lock, flags);
- if (ei_local->irqlock)
- {
+ if (ei_local->irqlock) {
#if 1 /* This might just be an interrupt for a PCI device sharing this line */
+ const char *msg;
/* The "irqlock" check is only for testing. */
- printk(ei_local->irqlock
- ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
- : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
- dev->name, inb_p(e8390_base + EN0_ISR),
- inb_p(e8390_base + EN0_IMR));
+ if (ei_local->irqlock)
+ msg = "Interrupted while interrupts are masked!";
+ else
+ msg = "Reentering the interrupt handler!";
+ netdev_info(dev, "%s, isr=%#2x imr=%#2x\n",
+ msg,
+ inb_p(e8390_base + EN0_ISR),
+ inb_p(e8390_base + EN0_IMR));
#endif
spin_unlock_irqrestore(&ei_local->page_lock, flags);
return IRQ_NONE;
}
if (ei_debug > 3)
- printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
- inb_p(e8390_base + EN0_ISR));
+ netdev_printk(KERN_DEBUG, dev, "interrupt(isr=%#2.2x)\n",
+ inb_p(e8390_base + EN0_ISR));
outb_p(0x00, e8390_base + EN0_ISR);
ei_local->irqlock = 1;
@@ -1206,7 +1197,8 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
{
if (!netif_running(dev) || (interrupts == 0xff)) {
if (ei_debug > 1)
- printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
+ netdev_warn(dev,
+ "interrupt from stopped card\n");
outb_p(interrupts, e8390_base + EN0_ISR);
interrupts = 0;
break;
@@ -1249,11 +1241,12 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
{
/* 0xFF is valid for a card removal */
if(interrupts!=0xFF)
- printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
- dev->name, interrupts);
+ netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
+ interrupts);
outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
} else {
- printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
+ netdev_warn(dev, "unknown interrupt %#2x\n",
+ interrupts);
outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
}
}
@@ -1287,18 +1280,19 @@ static void ei_tx_err(struct net_device *dev)
unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
#ifdef VERBOSE_ERROR_DUMP
- printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
+ netdev_printk(KERN_DEBUG, dev,
+ "transmitter error (%#2x):", txsr);
if (txsr & ENTSR_ABT)
- printk("excess-collisions ");
+ pr_cont(" excess-collisions");
if (txsr & ENTSR_ND)
- printk("non-deferral ");
+ pr_cont(" non-deferral");
if (txsr & ENTSR_CRS)
- printk("lost-carrier ");
+ pr_cont(" lost-carrier");
if (txsr & ENTSR_FU)
- printk("FIFO-underrun ");
+ pr_cont(" FIFO-underrun");
if (txsr & ENTSR_CDH)
- printk("lost-heartbeat ");
- printk("\n");
+ pr_cont(" lost-heartbeat");
+ pr_cont("\n");
#endif
if (tx_was_aborted)
@@ -1335,8 +1329,9 @@ static void ei_tx_intr(struct net_device *dev)
if (ei_local->tx1 < 0)
{
if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
- printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
- ei_local->name, ei_local->lasttx, ei_local->tx1);
+ netdev_err(dev, "%s: bogus last_tx_buffer %d, tx1=%d\n",
+ ei_local->name, ei_local->lasttx,
+ ei_local->tx1);
ei_local->tx1 = 0;
if (ei_local->tx2 > 0)
{
@@ -1351,8 +1346,9 @@ static void ei_tx_intr(struct net_device *dev)
else if (ei_local->tx2 < 0)
{
if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
- printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
- ei_local->name, ei_local->lasttx, ei_local->tx2);
+ netdev_info(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
+ ei_local->name, ei_local->lasttx,
+ ei_local->tx2);
ei_local->tx2 = 0;
if (ei_local->tx1 > 0)
{
@@ -1365,8 +1361,9 @@ static void ei_tx_intr(struct net_device *dev)
else
ei_local->lasttx = 10, ei_local->txing = 0;
}
-// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
-// dev->name, ei_local->lasttx);
+// else
+// netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
+// ei_local->lasttx);
/* Minimize Tx latency: update the statistics after we restart TXing. */
if (status & ENTSR_COL)
@@ -1429,8 +1426,8 @@ static void ei_receive(struct net_device *dev)
is that some clones crash in roughly the same way.
*/
if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
- printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
- dev->name, this_frame, ei_local->current_page);
+ netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+ this_frame, ei_local->current_page);
if (this_frame == rxing_page) /* Read all the frames? */
break; /* Done for now */
@@ -1446,9 +1443,10 @@ static void ei_receive(struct net_device *dev)
if (pkt_len < 60 || pkt_len > 1518)
{
if (ei_debug)
- printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
- dev->name, rx_frame.count, rx_frame.status,
- rx_frame.next);
+ netdev_printk(KERN_DEBUG, dev,
+ "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+ rx_frame.count, rx_frame.status,
+ rx_frame.next);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
}
@@ -1460,8 +1458,9 @@ static void ei_receive(struct net_device *dev)
if (skb == NULL)
{
if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
- dev->name, pkt_len);
+ netdev_printk(KERN_DEBUG, dev,
+ "Couldn't allocate a sk_buff of size %d\n",
+ pkt_len);
dev->stats.rx_dropped++;
break;
}
@@ -1481,9 +1480,10 @@ static void ei_receive(struct net_device *dev)
else
{
if (ei_debug)
- printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
- dev->name, rx_frame.status, rx_frame.next,
- rx_frame.count);
+ netdev_printk(KERN_DEBUG, dev,
+ "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ rx_frame.status, rx_frame.next,
+ rx_frame.count);
dev->stats.rx_errors++;
/* NB: The NIC counts CRC, frame and missed errors. */
if (pkt_stat & ENRSR_FO)
@@ -1493,8 +1493,8 @@ static void ei_receive(struct net_device *dev)
/* This _should_ never happen: it's here for avoiding bad clones. */
if (next_frame >= ei_local->stop_page) {
- printk("%s: next frame inconsistency, %#2x\n", dev->name,
- next_frame);
+ netdev_info(dev, "next frame inconsistency, %#2x\n",
+ next_frame);
next_frame = ei_local->rx_start_page;
}
ei_local->current_page = next_frame;
@@ -1529,7 +1529,7 @@ static void ei_rx_overrun(struct net_device *dev)
outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
+ netdev_printk(KERN_DEBUG, dev, "Receiver overrun\n");
dev->stats.rx_over_errors++;
/*
@@ -1726,7 +1726,7 @@ static void AX88190_init(struct net_device *dev, int startp)
{
outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
- printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
+ netdev_err(dev, "Hw. address read/write mismap %d\n", i);
}
outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
@@ -1763,8 +1763,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
if (inb_p(e8390_base) & E8390_TRANS)
{
- printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
- dev->name);
+ netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
return;
}
outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 5643f94541b..f065c35cd4b 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -43,7 +43,6 @@
#include <linux/arcdevice.h>
#include <linux/com20020.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -53,23 +52,23 @@
#define VERSION "arcnet: COM20020 PCMCIA support loaded.\n"
-#ifdef DEBUG
static void regdump(struct net_device *dev)
{
+#ifdef DEBUG
int ioaddr = dev->base_addr;
int count;
- printk("com20020 register dump:\n");
+ netdev_dbg(dev, "register dump:\n");
for (count = ioaddr; count < ioaddr + 16; count++)
{
if (!(count % 16))
- printk("\n%04X: ", count);
- printk("%02X ", inb(count));
+ pr_cont("%04X:", count);
+ pr_cont(" %02X", inb(count));
}
- printk("\n");
+ pr_cont("\n");
- printk("buffer0 dump:\n");
+ netdev_dbg(dev, "buffer0 dump:\n");
/* set up the address register */
count = 0;
outb((count >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI);
@@ -78,19 +77,15 @@ static void regdump(struct net_device *dev)
for (count = 0; count < 256+32; count++)
{
if (!(count % 16))
- printk("\n%04X: ", count);
+ pr_cont("%04X:", count);
/* copy the data */
- printk("%02X ", inb(_MEMDATA));
+ pr_cont(" %02X", inb(_MEMDATA));
}
- printk("\n");
+ pr_cont("\n");
+#endif
}
-#else
-
-static inline void regdump(struct net_device *dev) { }
-
-#endif
/*====================================================================*/
@@ -159,9 +154,8 @@ static int com20020_probe(struct pcmcia_device *p_dev)
/* fill in our module parameters as defaults */
dev->dev_addr[0] = node;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.NumPorts1 = 16;
- p_dev->io.IOAddrLines = 16;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->resource[0]->end = 16;
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -246,20 +240,24 @@ static int com20020_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "com20020_config\n");
- dev_dbg(&link->dev, "baseport1 is %Xh\n", link->io.BasePort1);
+ dev_dbg(&link->dev, "baseport1 is %Xh\n",
+ (unsigned int) link->resource[0]->start);
+
i = -ENODEV;
- if (!link->io.BasePort1)
+ link->io_lines = 16;
+
+ if (!link->resource[0]->start)
{
for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x10)
{
- link->io.BasePort1 = ioaddr;
- i = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = ioaddr;
+ i = pcmcia_request_io(link);
if (i == 0)
break;
}
}
else
- i = pcmcia_request_io(link, &link->io);
+ i = pcmcia_request_io(link);
if (i != 0)
{
@@ -267,7 +265,7 @@ static int com20020_config(struct pcmcia_device *link)
goto failed;
}
- ioaddr = dev->base_addr = link->io.BasePort1;
+ ioaddr = dev->base_addr = link->resource[0]->start;
dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr);
dev_dbg(&link->dev, "request IRQ %d\n",
@@ -299,13 +297,13 @@ static int com20020_config(struct pcmcia_device *link)
i = com20020_found(dev, 0); /* calls register_netdev */
if (i != 0) {
- dev_printk(KERN_NOTICE, &link->dev,
- "com20020_cs: com20020_found() failed\n");
+ dev_notice(&link->dev,
+ "com20020_found() failed\n");
goto failed;
}
- dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n",
- dev->name, dev->base_addr, dev->irq);
+ netdev_dbg(dev, "port %#3lx, irq %d\n",
+ dev->base_addr, dev->irq);
return 0;
failed:
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 7c27c50211a..8f26d548d1b 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -28,6 +28,8 @@
======================================================================*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "fmvj18x_cs"
#define DRV_VERSION "2.9"
@@ -49,7 +51,6 @@
#include <linux/ioport.h>
#include <linux/crc32.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -249,9 +250,8 @@ static int fmvj18x_probe(struct pcmcia_device *link)
lp->base = NULL;
/* The io structure describes IO port mapping */
- link->io.NumPorts1 = 32;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 5;
+ link->resource[0]->end = 32;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
/* General socket configuration */
link->conf.Attributes = CONF_ENABLE_IRQ;
@@ -289,13 +289,13 @@ static int mfc_try_io_port(struct pcmcia_device *link)
{ 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
for (i = 0; i < 5; i++) {
- link->io.BasePort2 = serial_base[i];
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- if (link->io.BasePort2 == 0) {
- link->io.NumPorts2 = 0;
- printk(KERN_NOTICE "fmvj18x_cs: out of resource for serial\n");
+ link->resource[1]->start = serial_base[i];
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ if (link->resource[1]->start == 0) {
+ link->resource[1]->end = 0;
+ pr_notice("out of resource for serial\n");
}
- ret = pcmcia_request_io(link, &link->io);
+ ret = pcmcia_request_io(link);
if (ret == 0)
return ret;
}
@@ -311,12 +311,12 @@ static int ungermann_try_io_port(struct pcmcia_device *link)
0x380,0x3c0 only for ioport.
*/
for (ioaddr = 0x300; ioaddr < 0x3e0; ioaddr += 0x20) {
- link->io.BasePort1 = ioaddr;
- ret = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = ioaddr;
+ ret = pcmcia_request_io(link);
if (ret == 0) {
/* calculate ConfigIndex value */
link->conf.ConfigIndex =
- ((link->io.BasePort1 & 0x0f0) >> 3) | 0x22;
+ ((link->resource[0]->start & 0x0f0) >> 3) | 0x22;
return ret;
}
}
@@ -346,6 +346,8 @@ static int fmvj18x_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "fmvj18x_config\n");
+ link->io_lines = 5;
+
len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf);
kfree(buf);
@@ -364,20 +366,20 @@ static int fmvj18x_config(struct pcmcia_device *link)
/* MultiFunction Card */
link->conf.ConfigBase = 0x800;
link->conf.ConfigIndex = 0x47;
- link->io.NumPorts2 = 8;
+ link->resource[1]->end = 8;
}
break;
case MANFID_NEC:
cardtype = NEC; /* MultiFunction Card */
link->conf.ConfigBase = 0x800;
link->conf.ConfigIndex = 0x47;
- link->io.NumPorts2 = 8;
+ link->resource[1]->end = 8;
break;
case MANFID_KME:
cardtype = KME; /* MultiFunction Card */
link->conf.ConfigBase = 0x800;
link->conf.ConfigIndex = 0x47;
- link->io.NumPorts2 = 8;
+ link->resource[1]->end = 8;
break;
case MANFID_CONTEC:
cardtype = CONTEC;
@@ -418,14 +420,14 @@ static int fmvj18x_config(struct pcmcia_device *link)
}
}
- if (link->io.NumPorts2 != 0) {
+ if (link->resource[1]->end != 0) {
ret = mfc_try_io_port(link);
if (ret != 0) goto failed;
} else if (cardtype == UNGERMANN) {
ret = ungermann_try_io_port(link);
if (ret != 0) goto failed;
} else {
- ret = pcmcia_request_io(link, &link->io);
+ ret = pcmcia_request_io(link);
if (ret)
goto failed;
}
@@ -437,9 +439,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
goto failed;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
- if (link->io.BasePort2 != 0) {
+ if (resource_size(link->resource[1]) != 0) {
ret = fmvj18x_setup_mfc(link);
if (ret != 0) goto failed;
}
@@ -503,7 +505,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
case XXX10304:
/* Read MACID from Buggy CIS */
if (fmvj18x_get_hwinfo(link, buggybuf) == -1) {
- printk(KERN_NOTICE "fmvj18x_cs: unable to read hardware net address.\n");
+ pr_notice("unable to read hardware net address\n");
goto failed;
}
for (i = 0 ; i < 6; i++) {
@@ -524,15 +526,14 @@ static int fmvj18x_config(struct pcmcia_device *link)
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
- printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto failed;
}
/* print current configuration */
- printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, "
- "hw_addr %pM\n",
- dev->name, card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2",
- dev->base_addr, dev->irq, dev->dev_addr);
+ netdev_info(dev, "%s, sram %s, port %#3lx, irq %d, hw_addr %pM\n",
+ card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2",
+ dev->base_addr, dev->irq, dev->dev_addr);
return 0;
@@ -545,7 +546,6 @@ failed:
static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
{
win_req_t req;
- memreq_t mem;
u_char __iomem *base;
int i, j;
@@ -558,9 +558,7 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
return -1;
base = ioremap(req.Base, req.Size);
- mem.Page = 0;
- mem.CardOffset = 0;
- pcmcia_map_mem_page(link, link->win, &mem);
+ pcmcia_map_mem_page(link, link->win, 0);
/*
* MBH10304 CISTPL_FUNCE_LAN_NODE_ID format
@@ -594,7 +592,6 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
static int fmvj18x_setup_mfc(struct pcmcia_device *link)
{
win_req_t req;
- memreq_t mem;
int i;
struct net_device *dev = link->priv;
unsigned int ioaddr;
@@ -610,13 +607,11 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link)
lp->base = ioremap(req.Base, req.Size);
if (lp->base == NULL) {
- printk(KERN_NOTICE "fmvj18x_cs: ioremap failed\n");
+ netdev_notice(dev, "ioremap failed\n");
return -1;
}
- mem.Page = 0;
- mem.CardOffset = 0;
- i = pcmcia_map_mem_page(link, link->win, &mem);
+ i = pcmcia_map_mem_page(link, link->win, 0);
if (i != 0) {
iounmap(lp->base);
lp->base = NULL;
@@ -806,17 +801,16 @@ static void fjn_tx_timeout(struct net_device *dev)
struct local_info_t *lp = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
- printk(KERN_NOTICE "%s: transmit timed out with status %04x, %s?\n",
- dev->name, htons(inw(ioaddr + TX_STATUS)),
- inb(ioaddr + TX_STATUS) & F_TMT_RDY
- ? "IRQ conflict" : "network cable problem");
- printk(KERN_NOTICE "%s: timeout registers: %04x %04x %04x "
- "%04x %04x %04x %04x %04x.\n",
- dev->name, htons(inw(ioaddr + 0)),
- htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)),
- htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)),
- htons(inw(ioaddr +10)), htons(inw(ioaddr +12)),
- htons(inw(ioaddr +14)));
+ netdev_notice(dev, "transmit timed out with status %04x, %s?\n",
+ htons(inw(ioaddr + TX_STATUS)),
+ inb(ioaddr + TX_STATUS) & F_TMT_RDY
+ ? "IRQ conflict" : "network cable problem");
+ netdev_notice(dev, "timeout registers: %04x %04x %04x "
+ "%04x %04x %04x %04x %04x.\n",
+ htons(inw(ioaddr + 0)), htons(inw(ioaddr + 2)),
+ htons(inw(ioaddr + 4)), htons(inw(ioaddr + 6)),
+ htons(inw(ioaddr + 8)), htons(inw(ioaddr + 10)),
+ htons(inw(ioaddr + 12)), htons(inw(ioaddr + 14)));
dev->stats.tx_errors++;
/* ToDo: We should try to restart the adaptor... */
local_irq_disable();
@@ -851,13 +845,13 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
unsigned char *buf = skb->data;
if (length > ETH_FRAME_LEN) {
- printk(KERN_NOTICE "%s: Attempting to send a large packet"
- " (%d bytes).\n", dev->name, length);
+ netdev_notice(dev, "Attempting to send a large packet (%d bytes)\n",
+ length);
return NETDEV_TX_BUSY;
}
- pr_debug("%s: Transmitting a packet of length %lu.\n",
- dev->name, (unsigned long)skb->len);
+ netdev_dbg(dev, "Transmitting a packet of length %lu\n",
+ (unsigned long)skb->len);
dev->stats.tx_bytes += skb->len;
/* Disable both interrupts. */
@@ -910,7 +904,7 @@ static void fjn_reset(struct net_device *dev)
unsigned int ioaddr = dev->base_addr;
int i;
- pr_debug("fjn_reset(%s) called.\n",dev->name);
+ netdev_dbg(dev, "fjn_reset() called\n");
/* Reset controller */
if( sram_config == 0 )
@@ -994,8 +988,8 @@ static void fjn_rx(struct net_device *dev)
while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
u_short status = inw(ioaddr + DATAPORT);
- pr_debug("%s: Rxing packet mode %02x status %04x.\n",
- dev->name, inb(ioaddr + RX_MODE), status);
+ netdev_dbg(dev, "Rxing packet mode %02x status %04x.\n",
+ inb(ioaddr + RX_MODE), status);
#ifndef final_version
if (status == 0) {
outb(F_SKP_PKT, ioaddr + RX_SKIP);
@@ -1014,16 +1008,16 @@ static void fjn_rx(struct net_device *dev)
struct sk_buff *skb;
if (pkt_len > 1550) {
- printk(KERN_NOTICE "%s: The FMV-18x claimed a very "
- "large packet, size %d.\n", dev->name, pkt_len);
+ netdev_notice(dev, "The FMV-18x claimed a very large packet, size %d\n",
+ pkt_len);
outb(F_SKP_PKT, ioaddr + RX_SKIP);
dev->stats.rx_errors++;
break;
}
skb = dev_alloc_skb(pkt_len+2);
if (skb == NULL) {
- printk(KERN_NOTICE "%s: Memory squeeze, dropping "
- "packet (len %d).\n", dev->name, pkt_len);
+ netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
+ pkt_len);
outb(F_SKP_PKT, ioaddr + RX_SKIP);
dev->stats.rx_dropped++;
break;
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 67ee9851a8e..dc85282193b 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -45,6 +45,8 @@
======================================================================*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ptrace.h>
@@ -52,12 +54,10 @@
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/trdevice.h>
#include <linux/ibmtr.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -108,16 +108,6 @@ typedef struct ibmtr_dev_t {
struct tok_info *ti;
} ibmtr_dev_t;
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, "ibmtr_cs");
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) {
ibmtr_dev_t *info = dev_id;
struct net_device *dev = info->dev;
@@ -152,17 +142,14 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
link->priv = info;
info->ti = netdev_priv(dev);
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts1 = 4;
- link->io.IOAddrLines = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[0]->end = 4;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
info->dev = dev;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
-
return ibmtr_config(link);
} /* ibmtr_attach */
@@ -213,26 +200,26 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
struct net_device *dev = info->dev;
struct tok_info *ti = netdev_priv(dev);
win_req_t req;
- memreq_t mem;
int i, ret;
dev_dbg(&link->dev, "ibmtr_config\n");
link->conf.ConfigIndex = 0x61;
+ link->io_lines = 16;
/* Determine if this is PRIMARY or ALTERNATE. */
/* Try PRIMARY card at 0xA20-0xA23 */
- link->io.BasePort1 = 0xA20;
- i = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = 0xA20;
+ i = pcmcia_request_io(link);
if (i != 0) {
/* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */
- link->io.BasePort1 = 0xA24;
- ret = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = 0xA24;
+ ret = pcmcia_request_io(link);
if (ret)
goto failed;
}
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
ret = pcmcia_request_exclusive_irq(link, ibmtr_interrupt);
if (ret)
@@ -251,9 +238,7 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
if (ret)
goto failed;
- mem.CardOffset = mmiobase;
- mem.Page = 0;
- ret = pcmcia_map_mem_page(link, link->win, &mem);
+ ret = pcmcia_map_mem_page(link, link->win, mmiobase);
if (ret)
goto failed;
ti->mmio = ioremap(req.Base, req.Size);
@@ -268,13 +253,11 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
if (ret)
goto failed;
- mem.CardOffset = srambase;
- mem.Page = 0;
- ret = pcmcia_map_mem_page(link, info->sram_win_handle, &mem);
+ ret = pcmcia_map_mem_page(link, info->sram_win_handle, srambase);
if (ret)
goto failed;
- ti->sram_base = mem.CardOffset >> 12;
+ ti->sram_base = srambase >> 12;
ti->sram_virt = ioremap(req.Base, req.Size);
ti->sram_phys = req.Base;
@@ -291,15 +274,14 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
i = ibmtr_probe_card(dev);
if (i != 0) {
- printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto failed;
}
- printk(KERN_INFO
- "%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
- dev->name, dev->base_addr, dev->irq,
- (u_long)ti->mmio, (u_long)(ti->sram_base << 12),
- dev->dev_addr);
+ netdev_info(dev, "port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
+ dev->base_addr, dev->irq,
+ (u_long)ti->mmio, (u_long)(ti->sram_base << 12),
+ dev->dev_addr);
return 0;
failed:
@@ -325,7 +307,6 @@ static void ibmtr_release(struct pcmcia_device *link)
if (link->win) {
struct tok_info *ti = netdev_priv(dev);
iounmap(ti->mmio);
- pcmcia_release_window(link, info->sram_win_handle);
}
pcmcia_disable_device(link);
}
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 9b63dec549c..c1d8ce9e4a6 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -111,6 +111,8 @@ Log: nmclan_cs.c,v
---------------------------------------------------------------------------- */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "nmclan_cs"
#define DRV_VERSION "0.16"
@@ -146,7 +148,6 @@ Include Files
#include <linux/ioport.h>
#include <linux/bitops.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/cistpl.h>
@@ -459,9 +460,8 @@ static int nmclan_probe(struct pcmcia_device *link)
link->priv = dev;
spin_lock_init(&lp->bank_lock);
- link->io.NumPorts1 = 32;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 5;
+ link->resource[0]->end = 32;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
@@ -521,7 +521,7 @@ static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
spin_unlock_irqrestore(&lp->bank_lock, flags);
break;
}
- return (data & 0xFF);
+ return data & 0xFF;
} /* mace_read */
/* ----------------------------------------------------------------------------
@@ -565,7 +565,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
/* Wait for reset bit to be cleared automatically after <= 200ns */;
if(++ct > 500)
{
- printk(KERN_ERR "mace: reset failed, card removed ?\n");
+ pr_err("reset failed, card removed?\n");
return -1;
}
udelay(1);
@@ -612,7 +612,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
{
if(++ ct > 500)
{
- printk(KERN_ERR "mace: ADDRCHG timeout, card removed ?\n");
+ pr_err("ADDRCHG timeout, card removed?\n");
return -1;
}
}
@@ -645,7 +645,8 @@ static int nmclan_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "nmclan_config\n");
- ret = pcmcia_request_io(link, &link->io);
+ link->io_lines = 5;
+ ret = pcmcia_request_io(link);
if (ret)
goto failed;
ret = pcmcia_request_exclusive_irq(link, mace_interrupt);
@@ -656,7 +657,7 @@ static int nmclan_config(struct pcmcia_device *link)
goto failed;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
ioaddr = dev->base_addr;
@@ -679,8 +680,8 @@ static int nmclan_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n",
sig[0], sig[1]);
} else {
- printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should"
- " be 0x40 0x?9\n", sig[0], sig[1]);
+ pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
+ sig[0], sig[1]);
return -ENODEV;
}
}
@@ -692,20 +693,18 @@ static int nmclan_config(struct pcmcia_device *link)
if (if_port <= 2)
dev->if_port = if_port;
else
- printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
+ pr_notice("invalid if_port requested\n");
SET_NETDEV_DEV(dev, &link->dev);
i = register_netdev(dev);
if (i != 0) {
- printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto failed;
}
- printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port,"
- " hw_addr %pM\n",
- dev->name, dev->base_addr, dev->irq, if_names[dev->if_port],
- dev->dev_addr);
+ netdev_info(dev, "nmclan: port %#3lx, irq %d, %s port, hw_addr %pM\n",
+ dev->base_addr, dev->irq, if_names[dev->if_port], dev->dev_addr);
return 0;
failed:
@@ -758,29 +757,20 @@ static void nmclan_reset(struct net_device *dev)
#if RESET_XILINX
struct pcmcia_device *link = &lp->link;
- conf_reg_t reg;
- u_long OrigCorValue;
+ u8 OrigCorValue;
/* Save original COR value */
- reg.Function = 0;
- reg.Action = CS_READ;
- reg.Offset = CISREG_COR;
- reg.Value = 0;
- pcmcia_access_configuration_register(link, &reg);
- OrigCorValue = reg.Value;
+ pcmcia_read_config_byte(link, CISREG_COR, &OrigCorValue);
/* Reset Xilinx */
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_COR;
- dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n",
+ dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%x, resetting...\n",
OrigCorValue);
- reg.Value = COR_SOFT_RESET;
- pcmcia_access_configuration_register(link, &reg);
+ pcmcia_write_config_byte(link, CISREG_COR, COR_SOFT_RESET);
/* Need to wait for 20 ms for PCMCIA to finish reset. */
/* Restore original COR configuration index */
- reg.Value = COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK);
- pcmcia_access_configuration_register(link, &reg);
+ pcmcia_write_config_byte(link, CISREG_COR,
+ (COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK)));
/* Xilinx is now completely reset along with the MACE chip. */
lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
@@ -808,8 +798,7 @@ static int mace_config(struct net_device *dev, struct ifmap *map)
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 2) {
dev->if_port = map->port;
- printk(KERN_INFO "%s: switched to %s port\n", dev->name,
- if_names[dev->if_port]);
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
} else
return -EINVAL;
}
@@ -888,12 +877,12 @@ static void mace_tx_timeout(struct net_device *dev)
mace_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
- printk(KERN_NOTICE "%s: transmit timed out -- ", dev->name);
+ netdev_notice(dev, "transmit timed out -- ");
#if RESET_ON_TIMEOUT
- printk("resetting card\n");
+ pr_cont("resetting card\n");
pcmcia_reset_card(link->socket);
#else /* #if RESET_ON_TIMEOUT */
- printk("NOT resetting card\n");
+ pr_cont("NOT resetting card\n");
#endif /* #if RESET_ON_TIMEOUT */
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
@@ -975,22 +964,21 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
ioaddr = dev->base_addr;
if (lp->tx_irq_disabled) {
- printk(
- (lp->tx_irq_disabled?
- KERN_NOTICE "%s: Interrupt with tx_irq_disabled "
- "[isr=%02X, imr=%02X]\n":
- KERN_NOTICE "%s: Re-entering the interrupt handler "
- "[isr=%02X, imr=%02X]\n"),
- dev->name,
- inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
- inb(ioaddr + AM2150_MACE_BASE + MACE_IMR)
- );
+ const char *msg;
+ if (lp->tx_irq_disabled)
+ msg = "Interrupt with tx_irq_disabled";
+ else
+ msg = "Re-entering the interrupt handler";
+ netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n",
+ msg,
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IMR));
/* WARNING: MACE_IR has been read! */
return IRQ_NONE;
}
if (!netif_device_present(dev)) {
- pr_debug("%s: interrupt from dead card\n", dev->name);
+ netdev_dbg(dev, "interrupt from dead card\n");
return IRQ_NONE;
}
@@ -1388,8 +1376,8 @@ static void BuildLAF(int *ladrf, int *adr)
printk(KERN_DEBUG " adr =%pM\n", adr);
printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode);
for (i = 0; i < 8; i++)
- printk(KERN_CONT " %02X", ladrf[i]);
- printk(KERN_CONT "\n");
+ pr_cont(" %02X", ladrf[i]);
+ pr_cont("\n");
#endif
} /* BuildLAF */
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index bfdef72c5d5..c94311aed1a 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -28,6 +28,8 @@
======================================================================*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -35,14 +37,12 @@
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/delay.h>
-#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/log2.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include "../8390.h"
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -101,7 +101,6 @@ static void pcnet_release(struct pcmcia_device *link);
static int pcnet_open(struct net_device *dev);
static int pcnet_close(struct net_device *dev);
static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static const struct ethtool_ops netdev_ethtool_ops;
static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
static void ei_watchdog(u_long arg);
static void pcnet_reset_8390(struct net_device *dev);
@@ -113,8 +112,6 @@ static int setup_dma_config(struct pcmcia_device *link, int start_pg,
static void pcnet_detach(struct pcmcia_device *p_dev);
-static dev_info_t dev_info = "pcnet_cs";
-
/*====================================================================*/
typedef struct hw_info_t {
@@ -304,7 +301,6 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
win_req_t req;
- memreq_t mem;
u_char __iomem *base, *virt;
int i, j;
@@ -317,10 +313,8 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
return NULL;
virt = ioremap(req.Base, req.Size);
- mem.Page = 0;
for (i = 0; i < NR_INFO; i++) {
- mem.CardOffset = hw_info[i].offset & ~(req.Size-1);
- pcmcia_map_mem_page(link, link->win, &mem);
+ pcmcia_map_mem_page(link, link->win, hw_info[i].offset & ~(req.Size-1));
base = &virt[hw_info[i].offset & (req.Size-1)];
if ((readb(base+0) == hw_info[i].a0) &&
(readb(base+2) == hw_info[i].a1) &&
@@ -440,8 +434,6 @@ static hw_info_t *get_ax88190(struct pcmcia_device *link)
dev->dev_addr[i] = j & 0xff;
dev->dev_addr[i+1] = j >> 8;
}
- printk(KERN_NOTICE "pcnet_cs: this is an AX88190 card!\n");
- printk(KERN_NOTICE "pcnet_cs: use axnet_cs instead.\n");
return NULL;
}
@@ -480,29 +472,31 @@ static hw_info_t *get_hwired(struct pcmcia_device *link)
static int try_io_port(struct pcmcia_device *link)
{
int j, ret;
- if (link->io.NumPorts1 == 32) {
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (link->io.NumPorts2 > 0) {
+ link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
+ if (link->resource[0]->end == 32) {
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ if (link->resource[1]->end > 0) {
/* for master/slave multifunction cards */
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
}
} else {
/* This should be two 16-port windows */
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16;
}
- if (link->io.BasePort1 == 0) {
- link->io.IOAddrLines = 16;
+ if (link->resource[0]->start == 0) {
for (j = 0; j < 0x400; j += 0x20) {
- link->io.BasePort1 = j ^ 0x300;
- link->io.BasePort2 = (j ^ 0x300) + 0x10;
- ret = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = j ^ 0x300;
+ link->resource[1]->start = (j ^ 0x300) + 0x10;
+ link->io_lines = 16;
+ ret = pcmcia_request_io(link);
if (ret == 0)
return ret;
}
return ret;
} else {
- return pcmcia_request_io(link, &link->io);
+ return pcmcia_request_io(link);
}
}
@@ -512,7 +506,8 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- int *has_shmem = priv_data;
+ int *priv = priv_data;
+ int try = (*priv & 0x1);
int i;
cistpl_io_t *io = &cfg->io;
@@ -523,83 +518,109 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
network function with window 0, and serial with window 1 */
if (io->nwin > 1) {
i = (io->win[1].len > io->win[0].len);
- p_dev->io.BasePort2 = io->win[1-i].base;
- p_dev->io.NumPorts2 = io->win[1-i].len;
+ p_dev->resource[1]->start = io->win[1-i].base;
+ p_dev->resource[1]->end = io->win[1-i].len;
} else {
- i = p_dev->io.NumPorts2 = 0;
+ i = p_dev->resource[1]->end = 0;
}
- *has_shmem = ((cfg->mem.nwin == 1) &&
- (cfg->mem.win[0].len >= 0x4000));
- p_dev->io.BasePort1 = io->win[i].base;
- p_dev->io.NumPorts1 = io->win[i].len;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- if (p_dev->io.NumPorts1 + p_dev->io.NumPorts2 >= 32)
+ *priv &= ((cfg->mem.nwin == 1) &&
+ (cfg->mem.win[0].len >= 0x4000)) ? 0x10 : ~0x10;
+
+ p_dev->resource[0]->start = io->win[i].base;
+ p_dev->resource[0]->end = io->win[i].len;
+ if (!try)
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ else
+ p_dev->io_lines = 16;
+ if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32)
return try_io_port(p_dev);
- return 0;
+ return -EINVAL;
+}
+
+static hw_info_t *pcnet_try_config(struct pcmcia_device *link,
+ int *has_shmem, int try)
+{
+ struct net_device *dev = link->priv;
+ hw_info_t *local_hw_info;
+ pcnet_dev_t *info = PRIV(dev);
+ int priv = try;
+ int ret;
+
+ ret = pcmcia_loop_config(link, pcnet_confcheck, &priv);
+ if (ret) {
+ dev_warn(&link->dev, "no useable port range found\n");
+ return NULL;
+ }
+ *has_shmem = (priv & 0x10);
+
+ if (!link->irq)
+ return NULL;
+
+ if (resource_size(link->resource[1]) == 8) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+ if ((link->manf_id == MANFID_IBM) &&
+ (link->card_id == PRODID_IBM_HOME_AND_AWAY))
+ link->conf.ConfigIndex |= 0x10;
+
+ ret = pcmcia_request_configuration(link, &link->conf);
+ if (ret)
+ return NULL;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+
+ if (info->flags & HAS_MISC_REG) {
+ if ((if_port == 1) || (if_port == 2))
+ dev->if_port = if_port;
+ else
+ dev_notice(&link->dev, "invalid if_port requested\n");
+ } else
+ dev->if_port = 0;
+
+ if ((link->conf.ConfigBase == 0x03c0) &&
+ (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
+ dev_info(&link->dev,
+ "this is an AX88190 card - use axnet_cs instead.\n");
+ return NULL;
+ }
+
+ local_hw_info = get_hwinfo(link);
+ if (!local_hw_info)
+ local_hw_info = get_prom(link);
+ if (!local_hw_info)
+ local_hw_info = get_dl10019(link);
+ if (!local_hw_info)
+ local_hw_info = get_ax88190(link);
+ if (!local_hw_info)
+ local_hw_info = get_hwired(link);
+
+ return local_hw_info;
}
static int pcnet_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
pcnet_dev_t *info = PRIV(dev);
- int ret, start_pg, stop_pg, cm_offset;
+ int start_pg, stop_pg, cm_offset;
int has_shmem = 0;
hw_info_t *local_hw_info;
dev_dbg(&link->dev, "pcnet_config\n");
- ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem);
- if (ret)
- goto failed;
-
- if (!link->irq)
- goto failed;
-
- if (link->io.NumPorts2 == 8) {
- link->conf.Attributes |= CONF_ENABLE_SPKR;
- link->conf.Status = CCSR_AUDIO_ENA;
- }
- if ((link->manf_id == MANFID_IBM) &&
- (link->card_id == PRODID_IBM_HOME_AND_AWAY))
- link->conf.ConfigIndex |= 0x10;
-
- ret = pcmcia_request_configuration(link, &link->conf);
- if (ret)
- goto failed;
- dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
- if (info->flags & HAS_MISC_REG) {
- if ((if_port == 1) || (if_port == 2))
- dev->if_port = if_port;
- else
- printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n");
- } else {
- dev->if_port = 0;
- }
-
- if ((link->conf.ConfigBase == 0x03c0) &&
- (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
- printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n");
- printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n");
- goto failed;
- }
-
- local_hw_info = get_hwinfo(link);
- if (local_hw_info == NULL)
- local_hw_info = get_prom(link);
- if (local_hw_info == NULL)
- local_hw_info = get_dl10019(link);
- if (local_hw_info == NULL)
- local_hw_info = get_ax88190(link);
- if (local_hw_info == NULL)
- local_hw_info = get_hwired(link);
-
- if (local_hw_info == NULL) {
- printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
- " address for io base %#3lx\n", dev->base_addr);
- goto failed;
+ local_hw_info = pcnet_try_config(link, &has_shmem, 0);
+ if (!local_hw_info) {
+ /* check whether forcing io_lines to 16 helps... */
+ pcmcia_disable_device(link);
+ local_hw_info = pcnet_try_config(link, &has_shmem, 1);
+ if (local_hw_info == NULL) {
+ dev_notice(&link->dev, "unable to read hardware net"
+ " address for io base %#3lx\n", dev->base_addr);
+ goto failed;
+ }
}
info->flags = local_hw_info->flags;
@@ -630,9 +651,7 @@ static int pcnet_config(struct pcmcia_device *link)
ei_status.name = "NE2000";
ei_status.word16 = 1;
- ei_status.reset_8390 = &pcnet_reset_8390;
-
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ ei_status.reset_8390 = pcnet_reset_8390;
if (info->flags & (IS_DL10019|IS_DL10022))
mii_phy_probe(dev);
@@ -640,25 +659,25 @@ static int pcnet_config(struct pcmcia_device *link)
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
- printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto failed;
}
if (info->flags & (IS_DL10019|IS_DL10022)) {
u_char id = inb(dev->base_addr + 0x1a);
- printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ",
- dev->name, ((info->flags & IS_DL10022) ? 22 : 19), id);
+ netdev_info(dev, "NE2000 (DL100%d rev %02x): ",
+ (info->flags & IS_DL10022) ? 22 : 19, id);
if (info->pna_phy)
- printk("PNA, ");
+ pr_cont("PNA, ");
} else {
- printk(KERN_INFO "%s: NE2000 Compatible: ", dev->name);
+ netdev_info(dev, "NE2000 Compatible: ");
}
- printk("io %#3lx, irq %d,", dev->base_addr, dev->irq);
+ pr_cont("io %#3lx, irq %d,", dev->base_addr, dev->irq);
if (info->flags & USE_SHMEM)
- printk (" mem %#5lx,", dev->mem_start);
+ pr_cont(" mem %#5lx,", dev->mem_start);
if (info->flags & HAS_MISC_REG)
- printk(" %s xcvr,", if_names[dev->if_port]);
- printk(" hw_addr %pM\n", dev->dev_addr);
+ pr_cont(" %s xcvr,", if_names[dev->if_port]);
+ pr_cont(" hw_addr %pM\n", dev->dev_addr);
return 0;
failed:
@@ -932,7 +951,7 @@ static void mii_phy_probe(struct net_device *dev)
phyid = tmp << 16;
phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2);
phyid &= MII_PHYID_REV_MASK;
- pr_debug("%s: MII at %d is 0x%08x\n", dev->name, i, phyid);
+ netdev_dbg(dev, "MII at %d is 0x%08x\n", i, phyid);
if (phyid == AM79C9XX_HOME_PHY) {
info->pna_phy = i;
} else if (phyid != AM79C9XX_ETH_PHY) {
@@ -956,7 +975,7 @@ static int pcnet_open(struct net_device *dev)
set_misc_reg(dev);
outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */
- ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev_info, dev);
+ ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev->name, dev);
if (ret)
return ret;
@@ -965,7 +984,7 @@ static int pcnet_open(struct net_device *dev)
info->phy_id = info->eth_phy;
info->link_status = 0x00;
init_timer(&info->watchdog);
- info->watchdog.function = &ei_watchdog;
+ info->watchdog.function = ei_watchdog;
info->watchdog.data = (u_long)dev;
info->watchdog.expires = jiffies + HZ;
add_timer(&info->watchdog);
@@ -1018,8 +1037,8 @@ static void pcnet_reset_8390(struct net_device *dev)
outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
if (i == 100)
- printk(KERN_ERR "%s: pcnet_reset_8390() did not complete.\n",
- dev->name);
+ netdev_err(dev, "pcnet_reset_8390() did not complete.\n");
+
set_misc_reg(dev);
} /* pcnet_reset_8390 */
@@ -1035,8 +1054,7 @@ static int set_config(struct net_device *dev, struct ifmap *map)
else if ((map->port < 1) || (map->port > 2))
return -EINVAL;
dev->if_port = map->port;
- printk(KERN_INFO "%s: switched to %s port\n",
- dev->name, if_names[dev->if_port]);
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
NS8390_init(dev, 1);
}
return 0;
@@ -1071,7 +1089,7 @@ static void ei_watchdog(u_long arg)
this, we can limp along even if the interrupt is blocked */
if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
if (!info->fast_poll)
- printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ netdev_info(dev, "interrupt(s) dropped!\n");
ei_irq_wrapper(dev->irq, dev);
info->fast_poll = HZ;
}
@@ -1091,7 +1109,7 @@ static void ei_watchdog(u_long arg)
if (info->eth_phy) {
info->phy_id = info->eth_phy = 0;
} else {
- printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ netdev_info(dev, "MII is missing!\n");
info->flags &= ~HAS_MII;
}
goto reschedule;
@@ -1100,8 +1118,7 @@ static void ei_watchdog(u_long arg)
link &= 0x0004;
if (link != info->link_status) {
u_short p = mdio_read(mii_addr, info->phy_id, 5);
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (link) ? "found" : "lost");
+ netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
if (link && (info->flags & IS_DL10022)) {
/* Disable collision detection on full duplex links */
outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG);
@@ -1112,13 +1129,12 @@ static void ei_watchdog(u_long arg)
if (link) {
if (info->phy_id == info->eth_phy) {
if (p)
- printk(KERN_INFO "%s: autonegotiation complete: "
- "%sbaseT-%cD selected\n", dev->name,
+ netdev_info(dev, "autonegotiation complete: "
+ "%sbaseT-%cD selected\n",
((p & 0x0180) ? "100" : "10"),
((p & 0x0140) ? 'F' : 'H'));
else
- printk(KERN_INFO "%s: link partner did not "
- "autonegotiate\n", dev->name);
+ netdev_info(dev, "link partner did not autonegotiate\n");
}
NS8390_init(dev, 1);
}
@@ -1131,7 +1147,7 @@ static void ei_watchdog(u_long arg)
/* isolate this MII and try flipping to the other one */
mdio_write(mii_addr, info->phy_id, 0, 0x0400);
info->phy_id ^= info->pna_phy ^ info->eth_phy;
- printk(KERN_INFO "%s: switched to %s transceiver\n", dev->name,
+ netdev_info(dev, "switched to %s transceiver\n",
(info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
mdio_write(mii_addr, info->phy_id, 0,
(info->phy_id == info->eth_phy) ? 0x1000 : 0);
@@ -1147,18 +1163,6 @@ reschedule:
/*====================================================================*/
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, "pcnet_cs");
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
-/*====================================================================*/
-
static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
@@ -1191,9 +1195,9 @@ static void dma_get_8390_hdr(struct net_device *dev,
unsigned int nic_base = dev->base_addr;
if (ei_status.dmaing) {
- printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
+ netdev_notice(dev, "DMAing conflict in dma_block_input."
"[DMAstat:%1x][irqlock:%1x]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -1224,11 +1228,11 @@ static void dma_block_input(struct net_device *dev, int count,
char *buf = skb->data;
if ((ei_debug > 4) && (count != 4))
- pr_debug("%s: [bi=%d]\n", dev->name, count+4);
+ netdev_dbg(dev, "[bi=%d]\n", count+4);
if (ei_status.dmaing) {
- printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
+ netdev_notice(dev, "DMAing conflict in dma_block_input."
"[DMAstat:%1x][irqlock:%1x]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -1258,9 +1262,9 @@ static void dma_block_input(struct net_device *dev, int count,
break;
} while (--tries > 0);
if (tries <= 0)
- printk(KERN_NOTICE "%s: RX transfer address mismatch,"
+ netdev_notice(dev, "RX transfer address mismatch,"
"%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, ring_offset + xfer_count, addr);
+ ring_offset + xfer_count, addr);
}
#endif
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
@@ -1281,7 +1285,7 @@ static void dma_block_output(struct net_device *dev, int count,
#ifdef PCMCIA_DEBUG
if (ei_debug > 4)
- printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count);
+ netdev_dbg(dev, "[bo=%d]\n", count);
#endif
/* Round the count up for word writes. Do we need to do this?
@@ -1290,9 +1294,9 @@ static void dma_block_output(struct net_device *dev, int count,
if (count & 0x01)
count++;
if (ei_status.dmaing) {
- printk(KERN_NOTICE "%s: DMAing conflict in dma_block_output."
+ netdev_notice(dev, "DMAing conflict in dma_block_output."
"[DMAstat:%1x][irqlock:%1x]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -1329,9 +1333,9 @@ static void dma_block_output(struct net_device *dev, int count,
break;
} while (--tries > 0);
if (tries <= 0) {
- printk(KERN_NOTICE "%s: Tx packet transfer address mismatch,"
+ netdev_notice(dev, "Tx packet transfer address mismatch,"
"%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, (start_page << 8) + count, addr);
+ (start_page << 8) + count, addr);
if (retries++ == 0)
goto retry;
}
@@ -1340,8 +1344,7 @@ static void dma_block_output(struct net_device *dev, int count,
while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) {
- printk(KERN_NOTICE "%s: timeout waiting for Tx RDC.\n",
- dev->name);
+ netdev_notice(dev, "timeout waiting for Tx RDC.\n");
pcnet_reset_8390(dev);
NS8390_init(dev, 1);
break;
@@ -1365,9 +1368,9 @@ static int setup_dma_config(struct pcmcia_device *link, int start_pg,
ei_status.stop_page = stop_pg;
/* set up block i/o functions */
- ei_status.get_8390_hdr = &dma_get_8390_hdr;
- ei_status.block_input = &dma_block_input;
- ei_status.block_output = &dma_block_output;
+ ei_status.get_8390_hdr = dma_get_8390_hdr;
+ ei_status.block_input = dma_block_input;
+ ei_status.block_output = dma_block_output;
return 0;
}
@@ -1464,7 +1467,6 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
struct net_device *dev = link->priv;
pcnet_dev_t *info = PRIV(dev);
win_req_t req;
- memreq_t mem;
int i, window_size, offset, ret;
window_size = (stop_pg - start_pg) << 8;
@@ -1483,11 +1485,9 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
if (ret)
goto failed;
- mem.CardOffset = (start_pg << 8) + cm_offset;
- offset = mem.CardOffset % window_size;
- mem.CardOffset -= offset;
- mem.Page = 0;
- ret = pcmcia_map_mem_page(link, link->win, &mem);
+ offset = (start_pg << 8) + cm_offset;
+ offset -= offset % window_size;
+ ret = pcmcia_map_mem_page(link, link->win, offset);
if (ret)
goto failed;
@@ -1516,9 +1516,9 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
ei_status.stop_page = start_pg + ((req.Size - offset) >> 8);
/* set up block i/o functions */
- ei_status.get_8390_hdr = &shmem_get_8390_hdr;
- ei_status.block_input = &shmem_block_input;
- ei_status.block_output = &shmem_block_output;
+ ei_status.get_8390_hdr = shmem_get_8390_hdr;
+ ei_status.block_input = shmem_block_input;
+ ei_status.block_output = shmem_block_output;
info->flags |= USE_SHMEM;
return 0;
@@ -1644,6 +1644,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b),
PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0),
PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956),
+ PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616),
PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64),
PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5),
PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3),
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 307cd1721e9..7204a4b5529 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -25,6 +25,8 @@
======================================================================*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -44,7 +46,6 @@
#include <linux/jiffies.h>
#include <linux/firmware.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -295,7 +296,7 @@ static const struct net_device_ops smc_netdev_ops = {
.ndo_tx_timeout = smc_tx_timeout,
.ndo_set_config = s9k_config,
.ndo_set_multicast_list = set_rx_mode,
- .ndo_do_ioctl = &smc_ioctl,
+ .ndo_do_ioctl = smc_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -325,9 +326,8 @@ static int smc91c92_probe(struct pcmcia_device *link)
link->priv = dev;
spin_lock_init(&smc->lock);
- link->io.NumPorts1 = 16;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 4;
+ link->resource[0]->end = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -428,12 +428,13 @@ static int mhz_mfc_config_check(struct pcmcia_device *p_dev,
void *priv_data)
{
int k;
- p_dev->io.BasePort2 = cf->io.win[0].base;
+ p_dev->resource[1]->start = cf->io.win[0].base;
for (k = 0; k < 0x400; k += 0x10) {
if (k & 0x80)
continue;
- p_dev->io.BasePort1 = k ^ 0x300;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[0]->start = k ^ 0x300;
+ p_dev->io_lines = 16;
+ if (!pcmcia_request_io(p_dev))
return 0;
}
return -ENODEV;
@@ -444,21 +445,20 @@ static int mhz_mfc_config(struct pcmcia_device *link)
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
win_req_t req;
- memreq_t mem;
+ unsigned int offset;
int i;
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->io.IOAddrLines = 16;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts2 = 8;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->end = 8;
/* The Megahertz combo cards have modem-like CIS entries, so
we have to explicitly try a bunch of port combinations. */
if (pcmcia_loop_config(link, mhz_mfc_config_check, NULL))
return -ENODEV;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
/* Allocate a memory window, for accessing the ISR */
req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
@@ -469,11 +469,8 @@ static int mhz_mfc_config(struct pcmcia_device *link)
return -ENODEV;
smc->base = ioremap(req.Base, req.Size);
- mem.CardOffset = mem.Page = 0;
- if (smc->manfid == MANFID_MOTOROLA)
- mem.CardOffset = link->conf.ConfigBase;
- i = pcmcia_map_mem_page(link, link->win, &mem);
-
+ offset = (smc->manfid == MANFID_MOTOROLA) ? link->conf.ConfigBase : 0;
+ i = pcmcia_map_mem_page(link, link->win, offset);
if ((i == 0) &&
(smc->manfid == MANFID_MEGAHERTZ) &&
(smc->cardid == PRODID_MEGAHERTZ_EM3288))
@@ -546,7 +543,7 @@ static void mot_config(struct pcmcia_device *link)
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
- unsigned int iouart = link->io.BasePort2;
+ unsigned int iouart = link->resource[1]->start;
/* Set UART base address and force map with COR bit 1 */
writeb(iouart & 0xff, smc->base + MOT_UART + CISREG_IOBASE_0);
@@ -602,9 +599,9 @@ static int smc_configcheck(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- p_dev->io.BasePort1 = cf->io.win[0].base;
- p_dev->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK;
- return pcmcia_request_io(p_dev, &p_dev->io);
+ p_dev->resource[0]->start = cf->io.win[0].base;
+ p_dev->io_lines = cf->io.flags & CISTPL_IO_LINES_MASK;
+ return pcmcia_request_io(p_dev);
}
static int smc_config(struct pcmcia_device *link)
@@ -612,10 +609,10 @@ static int smc_config(struct pcmcia_device *link)
struct net_device *dev = link->priv;
int i;
- link->io.NumPorts1 = 16;
+ link->resource[0]->end = 16;
i = pcmcia_loop_config(link, smc_configcheck, NULL);
if (!i)
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
return i;
}
@@ -647,27 +644,27 @@ static int osi_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->io.NumPorts1 = 64;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts2 = 8;
- link->io.IOAddrLines = 16;
+ link->resource[0]->end = 64;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->end = 8;
/* Enable Hard Decode, LAN, Modem */
link->conf.ConfigIndex = 0x23;
+ link->io_lines = 16;
for (i = j = 0; j < 4; j++) {
- link->io.BasePort2 = com[j];
- i = pcmcia_request_io(link, &link->io);
+ link->resource[1]->start = com[j];
+ i = pcmcia_request_io(link);
if (i == 0)
break;
}
if (i != 0) {
/* Fallback: turn off hard decode */
link->conf.ConfigIndex = 0x03;
- link->io.NumPorts2 = 0;
- i = pcmcia_request_io(link, &link->io);
+ link->resource[1]->end = 0;
+ i = pcmcia_request_io(link);
}
- dev->base_addr = link->io.BasePort1 + 0x10;
+ dev->base_addr = link->resource[0]->start + 0x10;
return i;
}
@@ -684,7 +681,7 @@ static int osi_load_firmware(struct pcmcia_device *link)
/* Download the Seven of Diamonds firmware */
for (i = 0; i < fw->size; i++) {
- outb(fw->data[i], link->io.BasePort1 + 2);
+ outb(fw->data[i], link->resource[0]->start + 2);
udelay(50);
}
release_firmware(fw);
@@ -726,12 +723,12 @@ static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid)
return rc;
} else if (manfid == MANFID_OSITECH) {
/* Make sure both functions are powered up */
- set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR);
+ set_bits(0x300, link->resource[0]->start + OSITECH_AUI_PWR);
/* Now, turn on the interrupt for both card functions */
- set_bits(0x300, link->io.BasePort1 + OSITECH_RESET_ISR);
+ set_bits(0x300, link->resource[0]->start + OSITECH_RESET_ISR);
dev_dbg(&link->dev, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n",
- inw(link->io.BasePort1 + OSITECH_AUI_PWR),
- inw(link->io.BasePort1 + OSITECH_RESET_ISR));
+ inw(link->resource[0]->start + OSITECH_AUI_PWR),
+ inw(link->resource[0]->start + OSITECH_RESET_ISR));
}
return 0;
}
@@ -804,7 +801,7 @@ static int check_sig(struct pcmcia_device *link)
}
/* Try setting bus width */
- width = (link->io.Attributes1 == IO_DATA_PATH_WIDTH_AUTO);
+ width = (link->resource[0]->flags == IO_DATA_PATH_WIDTH_AUTO);
s = inb(ioaddr + CONFIG);
if (width)
s |= CFG_16BIT;
@@ -818,14 +815,14 @@ static int check_sig(struct pcmcia_device *link)
((s >> 8) != (s & 0xff))) {
SMC_SELECT_BANK(3);
s = inw(ioaddr + REVISION);
- return (s & 0xff);
+ return s & 0xff;
}
if (width) {
modconf_t mod = {
.Attributes = CONF_IO_CHANGE_WIDTH,
};
- printk(KERN_INFO "smc91c92_cs: using 8-bit IO window.\n");
+ pr_info("using 8-bit IO window\n");
smc91c92_suspend(link);
pcmcia_modify_configuration(link, &mod);
@@ -886,7 +883,7 @@ static int smc91c92_config(struct pcmcia_device *link)
if ((if_port >= 0) && (if_port <= 2))
dev->if_port = if_port;
else
- printk(KERN_NOTICE "smc91c92_cs: invalid if_port requested\n");
+ dev_notice(&link->dev, "invalid if_port requested\n");
switch (smc->manfid) {
case MANFID_OSITECH:
@@ -904,7 +901,7 @@ static int smc91c92_config(struct pcmcia_device *link)
}
if (i != 0) {
- printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n");
+ dev_notice(&link->dev, "Unable to find hardware address.\n");
goto config_failed;
}
@@ -957,30 +954,28 @@ static int smc91c92_config(struct pcmcia_device *link)
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
- printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
+ dev_err(&link->dev, "register_netdev() failed\n");
goto config_undo;
}
- printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, "
- "hw_addr %pM\n",
- dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq,
- dev->dev_addr);
+ netdev_info(dev, "smc91c%s rev %d: io %#3lx, irq %d, hw_addr %pM\n",
+ name, (rev & 0x0f), dev->base_addr, dev->irq, dev->dev_addr);
if (rev > 0) {
if (mir & 0x3ff)
- printk(KERN_INFO " %lu byte", mir);
+ netdev_info(dev, " %lu byte", mir);
else
- printk(KERN_INFO " %lu kb", mir>>10);
- printk(" buffer, %s xcvr\n", (smc->cfg & CFG_MII_SELECT) ?
- "MII" : if_names[dev->if_port]);
+ netdev_info(dev, " %lu kb", mir>>10);
+ pr_cont(" buffer, %s xcvr\n",
+ (smc->cfg & CFG_MII_SELECT) ? "MII" : if_names[dev->if_port]);
}
if (smc->cfg & CFG_MII_SELECT) {
if (smc->mii_if.phy_id != -1) {
- dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n",
- smc->mii_if.phy_id, j);
+ netdev_dbg(dev, " MII transceiver at index %d, status %x\n",
+ smc->mii_if.phy_id, j);
} else {
- printk(KERN_NOTICE " No MII transceivers found!\n");
+ netdev_notice(dev, " No MII transceivers found!\n");
}
}
return 0;
@@ -1086,10 +1081,10 @@ static void smc_dump(struct net_device *dev)
save = inw(ioaddr + BANK_SELECT);
for (w = 0; w < 4; w++) {
SMC_SELECT_BANK(w);
- printk(KERN_DEBUG "bank %d: ", w);
+ netdev_printk(KERN_DEBUG, dev, "bank %d: ", w);
for (i = 0; i < 14; i += 2)
- printk(" %04x", inw(ioaddr + i));
- printk("\n");
+ pr_cont(" %04x", inw(ioaddr + i));
+ pr_cont("\n");
}
outw(save, ioaddr + BANK_SELECT);
}
@@ -1111,7 +1106,7 @@ static int smc_open(struct net_device *dev)
return -ENODEV;
/* Physical device present signature. */
if (check_sig(link) < 0) {
- printk("smc91c92_cs: Yikes! Bad chip signature!\n");
+ netdev_info(dev, "Yikes! Bad chip signature!\n");
return -ENODEV;
}
link->open++;
@@ -1122,7 +1117,7 @@ static int smc_open(struct net_device *dev)
smc_reset(dev);
init_timer(&smc->media);
- smc->media.function = &media_check;
+ smc->media.function = media_check;
smc->media.data = (u_long) dev;
smc->media.expires = jiffies + HZ;
add_timer(&smc->media);
@@ -1177,7 +1172,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
u_char packet_no;
if (!skb) {
- printk(KERN_ERR "%s: In XMIT with no packet to send.\n", dev->name);
+ netdev_err(dev, "In XMIT with no packet to send\n");
return;
}
@@ -1185,8 +1180,8 @@ static void smc_hardware_send_packet(struct net_device * dev)
packet_no = inw(ioaddr + PNR_ARR) >> 8;
if (packet_no & 0x80) {
/* If not, there is a hardware problem! Likely an ejected card. */
- printk(KERN_WARNING "%s: 91c92 hardware Tx buffer allocation"
- " failed, status %#2.2x.\n", dev->name, packet_no);
+ netdev_warn(dev, "hardware Tx buffer allocation failed, status %#2.2x\n",
+ packet_no);
dev_kfree_skb_irq(skb);
smc->saved_skb = NULL;
netif_start_queue(dev);
@@ -1205,8 +1200,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
u_char *buf = skb->data;
u_int length = skb->len; /* The chip will pad to ethernet min. */
- pr_debug("%s: Trying to xmit packet of length %d.\n",
- dev->name, length);
+ netdev_dbg(dev, "Trying to xmit packet of length %d\n", length);
/* send the packet length: +6 for status word, length, and ctl */
outw(0, ioaddr + DATA_1);
@@ -1238,9 +1232,8 @@ static void smc_tx_timeout(struct net_device *dev)
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
- printk(KERN_NOTICE "%s: SMC91c92 transmit timed out, "
- "Tx_status %2.2x status %4.4x.\n",
- dev->name, inw(ioaddr)&0xff, inw(ioaddr + 2));
+ netdev_notice(dev, "transmit timed out, Tx_status %2.2x status %4.4x.\n",
+ inw(ioaddr)&0xff, inw(ioaddr + 2));
dev->stats.tx_errors++;
smc_reset(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1259,14 +1252,14 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
netif_stop_queue(dev);
- pr_debug("%s: smc_start_xmit(length = %d) called,"
- " status %4.4x.\n", dev->name, skb->len, inw(ioaddr + 2));
+ netdev_dbg(dev, "smc_start_xmit(length = %d) called, status %04x\n",
+ skb->len, inw(ioaddr + 2));
if (smc->saved_skb) {
/* THIS SHOULD NEVER HAPPEN. */
dev->stats.tx_aborted_errors++;
- printk(KERN_DEBUG "%s: Internal error -- sent packet while busy.\n",
- dev->name);
+ netdev_printk(KERN_DEBUG, dev,
+ "Internal error -- sent packet while busy\n");
return NETDEV_TX_BUSY;
}
smc->saved_skb = skb;
@@ -1274,7 +1267,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
num_pages = skb->len >> 8;
if (num_pages > 7) {
- printk(KERN_ERR "%s: Far too big packet error.\n", dev->name);
+ netdev_err(dev, "Far too big packet error: %d pages\n", num_pages);
dev_kfree_skb (skb);
smc->saved_skb = NULL;
dev->stats.tx_dropped++;
@@ -1344,8 +1337,7 @@ static void smc_tx_err(struct net_device * dev)
}
if (tx_status & TS_SUCCESS) {
- printk(KERN_NOTICE "%s: Successful packet caused error "
- "interrupt?\n", dev->name);
+ netdev_notice(dev, "Successful packet caused error interrupt?\n");
}
/* re-enable transmit */
SMC_SELECT_BANK(0);
@@ -1535,8 +1527,7 @@ static void smc_rx(struct net_device *dev)
/* Assertion: we are in Window 2. */
if (inw(ioaddr + FIFO_PORTS) & FP_RXEMPTY) {
- printk(KERN_ERR "%s: smc_rx() with nothing on Rx FIFO.\n",
- dev->name);
+ netdev_err(dev, "smc_rx() with nothing on Rx FIFO\n");
return;
}
@@ -1651,8 +1642,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map)
else if (map->port > 2)
return -EINVAL;
dev->if_port = map->port;
- printk(KERN_INFO "%s: switched to %s port\n",
- dev->name, if_names[dev->if_port]);
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
smc_reset(dev);
}
return 0;
@@ -1803,7 +1793,7 @@ static void media_check(u_long arg)
this, we can limp along even if the interrupt is blocked */
if (smc->watchdog++ && ((i>>8) & i)) {
if (!smc->fast_poll)
- printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ netdev_info(dev, "interrupt(s) dropped!\n");
local_irq_save(flags);
smc_interrupt(dev->irq, dev);
local_irq_restore(flags);
@@ -1827,7 +1817,7 @@ static void media_check(u_long arg)
SMC_SELECT_BANK(3);
link = mdio_read(dev, smc->mii_if.phy_id, 1);
if (!link || (link == 0xffff)) {
- printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ netdev_info(dev, "MII is missing!\n");
smc->mii_if.phy_id = -1;
goto reschedule;
}
@@ -1835,15 +1825,13 @@ static void media_check(u_long arg)
link &= 0x0004;
if (link != smc->link_status) {
u_short p = mdio_read(dev, smc->mii_if.phy_id, 5);
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (link) ? "found" : "lost");
+ netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40))
? TCR_FDUPLX : 0);
if (link) {
- printk(KERN_INFO "%s: autonegotiation complete: "
- "%sbaseT-%cD selected\n", dev->name,
- ((p & 0x0180) ? "100" : "10"),
- (smc->duplex ? 'F' : 'H'));
+ netdev_info(dev, "autonegotiation complete: "
+ "%dbaseT-%cD selected\n",
+ (p & 0x0180) ? 100 : 10, smc->duplex ? 'F' : 'H');
}
SMC_SELECT_BANK(0);
outw(inw(ioaddr + TCR) | smc->duplex, ioaddr + TCR);
@@ -1862,25 +1850,23 @@ static void media_check(u_long arg)
if (media != smc->media_status) {
if ((media & smc->media_status & 1) &&
((smc->media_status ^ media) & EPH_LINK_OK))
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (smc->media_status & EPH_LINK_OK ? "lost" : "found"));
+ netdev_info(dev, "%s link beat\n",
+ smc->media_status & EPH_LINK_OK ? "lost" : "found");
else if ((media & smc->media_status & 2) &&
((smc->media_status ^ media) & EPH_16COL))
- printk(KERN_INFO "%s: coax cable %s\n", dev->name,
- (media & EPH_16COL ? "problem" : "ok"));
+ netdev_info(dev, "coax cable %s\n",
+ media & EPH_16COL ? "problem" : "ok");
if (dev->if_port == 0) {
if (media & 1) {
if (media & EPH_LINK_OK)
- printk(KERN_INFO "%s: flipped to 10baseT\n",
- dev->name);
+ netdev_info(dev, "flipped to 10baseT\n");
else
smc_set_xcvr(dev, 2);
} else {
if (media & EPH_16COL)
smc_set_xcvr(dev, 1);
else
- printk(KERN_INFO "%s: flipped to 10base2\n",
- dev->name);
+ netdev_info(dev, "flipped to 10base2\n");
}
}
smc->media_status = media;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index b6c3644888c..d858b5e4c4a 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -63,6 +63,8 @@
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -82,7 +84,6 @@
#include <linux/bitops.h>
#include <linux/mii.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -211,13 +212,6 @@ enum xirc_cmd { /* Commands */
static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" };
-
-#define KDBG_XIRC KERN_DEBUG "xirc2ps_cs: "
-#define KERR_XIRC KERN_ERR "xirc2ps_cs: "
-#define KWRN_XIRC KERN_WARNING "xirc2ps_cs: "
-#define KNOT_XIRC KERN_NOTICE "xirc2ps_cs: "
-#define KINF_XIRC KERN_INFO "xirc2ps_cs: "
-
/* card types */
#define XIR_UNKNOWN 0 /* unknown: not supported */
#define XIR_CE 1 /* (prodid 1) different hardware: not supported */
@@ -351,26 +345,26 @@ PrintRegisters(struct net_device *dev)
if (pc_debug > 1) {
int i, page;
- printk(KDBG_XIRC "Register common: ");
+ printk(KERN_DEBUG pr_fmt("Register common: "));
for (i = 0; i < 8; i++)
- printk(" %2.2x", GetByte(i));
- printk("\n");
+ pr_cont(" %2.2x", GetByte(i));
+ pr_cont("\n");
for (page = 0; page <= 8; page++) {
- printk(KDBG_XIRC "Register page %2x: ", page);
+ printk(KERN_DEBUG pr_fmt("Register page %2x: "), page);
SelectPage(page);
for (i = 8; i < 16; i++)
- printk(" %2.2x", GetByte(i));
- printk("\n");
+ pr_cont(" %2.2x", GetByte(i));
+ pr_cont("\n");
}
for (page=0x40 ; page <= 0x5f; page++) {
if (page == 0x43 || (page >= 0x46 && page <= 0x4f) ||
(page >= 0x51 && page <=0x5e))
continue;
- printk(KDBG_XIRC "Register page %2x: ", page);
+ printk(KERN_DEBUG pr_fmt("Register page %2x: "), page);
SelectPage(page);
for (i = 8; i < 16; i++)
- printk(" %2.2x", GetByte(i));
- printk("\n");
+ pr_cont(" %2.2x", GetByte(i));
+ pr_cont("\n");
}
}
}
@@ -609,11 +603,11 @@ set_card_type(struct pcmcia_device *link)
local->modem = 0;
local->card_type = XIR_UNKNOWN;
if (!(prodid & 0x40)) {
- printk(KNOT_XIRC "Ooops: Not a creditcard\n");
+ pr_notice("Oops: Not a creditcard\n");
return 0;
}
if (!(mediaid & 0x01)) {
- printk(KNOT_XIRC "Not an Ethernet card\n");
+ pr_notice("Not an Ethernet card\n");
return 0;
}
if (mediaid & 0x10) {
@@ -644,12 +638,11 @@ set_card_type(struct pcmcia_device *link)
}
}
if (local->card_type == XIR_CE || local->card_type == XIR_CEM) {
- printk(KNOT_XIRC "Sorry, this is an old CE card\n");
+ pr_notice("Sorry, this is an old CE card\n");
return 0;
}
if (local->card_type == XIR_UNKNOWN)
- printk(KNOT_XIRC "unknown card (mediaid=%02x prodid=%02x)\n",
- mediaid, prodid);
+ pr_notice("unknown card (mediaid=%02x prodid=%02x)\n", mediaid, prodid);
return 1;
}
@@ -678,9 +671,9 @@ xirc2ps_config_modem(struct pcmcia_device *p_dev,
if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8) {
for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
- p_dev->io.BasePort2 = cf->io.win[0].base;
- p_dev->io.BasePort1 = ioaddr;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[1]->start = cf->io.win[0].base;
+ p_dev->resource[0]->start = ioaddr;
+ if (!pcmcia_request_io(p_dev))
return 0;
}
}
@@ -697,11 +690,11 @@ xirc2ps_config_check(struct pcmcia_device *p_dev,
int *pass = priv_data;
if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8) {
- p_dev->io.BasePort2 = cf->io.win[0].base;
- p_dev->io.BasePort1 = p_dev->io.BasePort2
+ p_dev->resource[1]->start = cf->io.win[0].base;
+ p_dev->resource[0]->start = p_dev->resource[1]->start
+ (*pass ? (cf->index & 0x20 ? -24:8)
: (cf->index & 0x20 ? 8:-24));
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ if (!pcmcia_request_io(p_dev))
return 0;
}
return -ENODEV;
@@ -749,7 +742,7 @@ xirc2ps_config(struct pcmcia_device * link)
/* Is this a valid card */
if (link->has_manf_id == 0) {
- printk(KNOT_XIRC "manfid not found in CIS\n");
+ pr_notice("manfid not found in CIS\n");
goto failure;
}
@@ -771,14 +764,14 @@ xirc2ps_config(struct pcmcia_device * link)
local->manf_str = "Toshiba";
break;
default:
- printk(KNOT_XIRC "Unknown Card Manufacturer ID: 0x%04x\n",
- (unsigned)link->manf_id);
+ pr_notice("Unknown Card Manufacturer ID: 0x%04x\n",
+ (unsigned)link->manf_id);
goto failure;
}
dev_dbg(&link->dev, "found %s card\n", local->manf_str);
if (!set_card_type(link)) {
- printk(KNOT_XIRC "this card is not supported\n");
+ pr_notice("this card is not supported\n");
goto failure;
}
@@ -804,12 +797,12 @@ xirc2ps_config(struct pcmcia_device * link)
err = pcmcia_loop_tuple(link, CISTPL_FUNCE, pcmcia_get_mac_ce, dev);
if (err) {
- printk(KNOT_XIRC "node-id not found in CIS\n");
+ pr_notice("node-id not found in CIS\n");
goto failure;
}
- link->io.IOAddrLines =10;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
+ link->io_lines = 10;
if (local->modem) {
int pass;
@@ -817,16 +810,16 @@ xirc2ps_config(struct pcmcia_device * link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status |= CCSR_AUDIO_ENA;
}
- link->io.NumPorts2 = 8;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->end = 8;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
if (local->dingo) {
/* Take the Modem IO port from the CIS and scan for a free
* Ethernet port */
- link->io.NumPorts1 = 16; /* no Mako stuff anymore */
+ link->resource[0]->end = 16; /* no Mako stuff anymore */
if (!pcmcia_loop_config(link, xirc2ps_config_modem, NULL))
goto port_found;
} else {
- link->io.NumPorts1 = 18;
+ link->resource[0]->end = 18;
/* We do 2 passes here: The first one uses the regular mapping and
* the second tries again, thereby considering that the 32 ports are
* mirrored every 32 bytes. Actually we use a mirrored port for
@@ -839,16 +832,16 @@ xirc2ps_config(struct pcmcia_device * link)
* try to configure as Ethernet only.
* .... */
}
- printk(KNOT_XIRC "no ports available\n");
+ pr_notice("no ports available\n");
} else {
- link->io.NumPorts1 = 16;
+ link->resource[0]->end = 16;
for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
- link->io.BasePort1 = ioaddr;
- if (!(err=pcmcia_request_io(link, &link->io)))
+ link->resource[0]->start = ioaddr;
+ if (!(err = pcmcia_request_io(link)))
goto port_found;
}
- link->io.BasePort1 = 0; /* let CS decide */
- if ((err=pcmcia_request_io(link, &link->io)))
+ link->resource[0]->start = 0; /* let CS decide */
+ if ((err = pcmcia_request_io(link)))
goto config_error;
}
port_found:
@@ -870,24 +863,21 @@ xirc2ps_config(struct pcmcia_device * link)
goto config_error;
if (local->dingo) {
- conf_reg_t reg;
win_req_t req;
- memreq_t mem;
/* Reset the modem's BAR to the correct value
* This is necessary because in the RequestConfiguration call,
* the base address of the ethernet port (BasePort1) is written
* to the BAR registers of the modem.
*/
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_IOBASE_0;
- reg.Value = link->io.BasePort2 & 0xff;
- if ((err = pcmcia_access_configuration_register(link, &reg)))
+ err = pcmcia_write_config_byte(link, CISREG_IOBASE_0, (u8)
+ link->resource[1]->start & 0xff);
+ if (err)
goto config_error;
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_IOBASE_1;
- reg.Value = (link->io.BasePort2 >> 8) & 0xff;
- if ((err = pcmcia_access_configuration_register(link, &reg)))
+
+ err = pcmcia_write_config_byte(link, CISREG_IOBASE_1,
+ (link->resource[1]->start >> 8) & 0xff);
+ if (err)
goto config_error;
/* There is no config entry for the Ethernet part which
@@ -901,40 +891,38 @@ xirc2ps_config(struct pcmcia_device * link)
goto config_error;
local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800;
- mem.CardOffset = 0x0;
- mem.Page = 0;
- if ((err = pcmcia_map_mem_page(link, link->win, &mem)))
+ if ((err = pcmcia_map_mem_page(link, link->win, 0)))
goto config_error;
/* Setup the CCRs; there are no infos in the CIS about the Ethernet
* part.
*/
writeb(0x47, local->dingo_ccr + CISREG_COR);
- ioaddr = link->io.BasePort1;
+ ioaddr = link->resource[0]->start;
writeb(ioaddr & 0xff , local->dingo_ccr + CISREG_IOBASE_0);
writeb((ioaddr >> 8)&0xff , local->dingo_ccr + CISREG_IOBASE_1);
#if 0
{
u_char tmp;
- printk(KERN_INFO "ECOR:");
+ pr_info("ECOR:");
for (i=0; i < 7; i++) {
tmp = readb(local->dingo_ccr + i*2);
- printk(" %02x", tmp);
+ pr_cont(" %02x", tmp);
}
- printk("\n");
- printk(KERN_INFO "DCOR:");
+ pr_cont("\n");
+ pr_info("DCOR:");
for (i=0; i < 4; i++) {
tmp = readb(local->dingo_ccr + 0x20 + i*2);
- printk(" %02x", tmp);
+ pr_cont(" %02x", tmp);
}
- printk("\n");
- printk(KERN_INFO "SCOR:");
+ pr_cont("\n");
+ pr_info("SCOR:");
for (i=0; i < 10; i++) {
tmp = readb(local->dingo_ccr + 0x40 + i*2);
- printk(" %02x", tmp);
+ pr_cont(" %02x", tmp);
}
- printk("\n");
+ pr_cont("\n");
}
#endif
@@ -953,11 +941,11 @@ xirc2ps_config(struct pcmcia_device * link)
(local->mohawk && if_port==4))
dev->if_port = if_port;
else
- printk(KNOT_XIRC "invalid if_port requested\n");
+ pr_notice("invalid if_port requested\n");
/* we can now register the device with the net subsystem */
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
if (local->dingo)
do_reset(dev, 1); /* a kludge to make the cem56 work */
@@ -965,14 +953,14 @@ xirc2ps_config(struct pcmcia_device * link)
SET_NETDEV_DEV(dev, &link->dev);
if ((err=register_netdev(dev))) {
- printk(KNOT_XIRC "register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto config_error;
}
/* give some infos about the hardware */
- printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %pM\n",
- dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq,
- dev->dev_addr);
+ netdev_info(dev, "%s: port %#3lx, irq %d, hwaddr %pM\n",
+ local->manf_str, (u_long)dev->base_addr, (int)dev->irq,
+ dev->dev_addr);
return 0;
@@ -1104,8 +1092,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */
if (!skb) {
- printk(KNOT_XIRC "low memory, packet dropped (size=%u)\n",
- pktlen);
+ pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
dev->stats.rx_dropped++;
} else { /* okay get the packet */
skb_reserve(skb, 2);
@@ -1274,7 +1261,7 @@ xirc_tx_timeout(struct net_device *dev)
{
local_info_t *lp = netdev_priv(dev);
dev->stats.tx_errors++;
- printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
+ netdev_notice(dev, "transmit timed out\n");
schedule_work(&lp->tx_timeout_task);
}
@@ -1441,8 +1428,7 @@ do_config(struct net_device *dev, struct ifmap *map)
local->probe_port = 0;
dev->if_port = map->port;
}
- printk(KERN_INFO "%s: switching to %s port\n",
- dev->name, if_names[dev->if_port]);
+ netdev_info(dev, "switching to %s port\n", if_names[dev->if_port]);
do_reset(dev,1); /* not the fine way :-) */
}
return 0;
@@ -1582,7 +1568,7 @@ do_reset(struct net_device *dev, int full)
{
SelectPage(0);
value = GetByte(XIRCREG_ESR); /* read the ESR */
- printk(KERN_DEBUG "%s: ESR is: %#02x\n", dev->name, value);
+ pr_debug("%s: ESR is: %#02x\n", dev->name, value);
}
#endif
@@ -1632,13 +1618,12 @@ do_reset(struct net_device *dev, int full)
if (full && local->mohawk && init_mii(dev)) {
if (dev->if_port == 4 || local->dingo || local->new_mii) {
- printk(KERN_INFO "%s: MII selected\n", dev->name);
+ netdev_info(dev, "MII selected\n");
SelectPage(2);
PutByte(XIRCREG2_MSR, GetByte(XIRCREG2_MSR) | 0x08);
msleep(20);
} else {
- printk(KERN_INFO "%s: MII detected; using 10mbs\n",
- dev->name);
+ netdev_info(dev, "MII detected; using 10mbs\n");
SelectPage(0x42);
if (dev->if_port == 2) /* enable 10Base2 */
PutByte(XIRCREG42_SWC1, 0xC0);
@@ -1683,8 +1668,8 @@ do_reset(struct net_device *dev, int full)
}
if (full)
- printk(KERN_INFO "%s: media %s, silicon revision %d\n",
- dev->name, if_names[dev->if_port], local->silicon);
+ netdev_info(dev, "media %s, silicon revision %d\n",
+ if_names[dev->if_port], local->silicon);
/* We should switch back to page 0 to avoid a bug in revision 0
* where regs with offset below 8 can't be read after an access
* to the MAC registers */
@@ -1726,8 +1711,7 @@ init_mii(struct net_device *dev)
control = mii_rd(ioaddr, 0, 0);
if (control & 0x0400) {
- printk(KERN_NOTICE "%s can't take PHY out of isolation mode\n",
- dev->name);
+ netdev_notice(dev, "can't take PHY out of isolation mode\n");
local->probe_port = 0;
return 0;
}
@@ -1745,8 +1729,7 @@ init_mii(struct net_device *dev)
}
if (!(status & 0x0020)) {
- printk(KERN_INFO "%s: autonegotiation failed;"
- " using 10mbs\n", dev->name);
+ netdev_info(dev, "autonegotiation failed; using 10mbs\n");
if (!local->new_mii) {
control = 0x0000;
mii_wr(ioaddr, 0, 0, control, 16);
@@ -1756,8 +1739,7 @@ init_mii(struct net_device *dev)
}
} else {
linkpartner = mii_rd(ioaddr, 0, 5);
- printk(KERN_INFO "%s: MII link partner: %04x\n",
- dev->name, linkpartner);
+ netdev_info(dev, "MII link partner: %04x\n", linkpartner);
if (linkpartner & 0x0080) {
dev->if_port = 4;
} else
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index c200c282173..aee3bb0358b 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -376,7 +376,7 @@ static void pcnet32_wio_reset(unsigned long addr)
static int pcnet32_wio_check(unsigned long addr)
{
outw(88, addr + PCNET32_WIO_RAP);
- return (inw(addr + PCNET32_WIO_RAP) == 88);
+ return inw(addr + PCNET32_WIO_RAP) == 88;
}
static struct pcnet32_access pcnet32_wio = {
@@ -431,7 +431,7 @@ static void pcnet32_dwio_reset(unsigned long addr)
static int pcnet32_dwio_check(unsigned long addr)
{
outl(88, addr + PCNET32_DWIO_RAP);
- return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88);
+ return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
}
static struct pcnet32_access pcnet32_dwio = {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a527e37728c..cb3d13e4e07 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -5,7 +5,7 @@
menuconfig PHYLIB
tristate "PHY Device support and infrastructure"
depends on !S390
- depends on NET_ETHERNET
+ depends on NETDEVICES
help
Ethernet controllers are usually attached to PHY
devices. This option provides infrastructure for
@@ -58,7 +58,6 @@ config BROADCOM_PHY
config BCM63XX_PHY
tristate "Drivers for Broadcom 63xx SOCs internal PHY"
- depends on BCM63XX
---help---
Currently supports the 6348 and 6358 PHYs.
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index c1281567983..e16f98cb4f0 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -131,7 +131,7 @@ static void __exit bcm63xx_phy_exit(void)
module_init(bcm63xx_phy_init);
module_exit(bcm63xx_phy_exit);
-static struct mdio_device_id bcm63xx_tbl[] = {
+static struct mdio_device_id __maybe_unused bcm63xx_tbl[] = {
{ 0x00406000, 0xfffffc00 },
{ 0x002bdc00, 0xfffffc00 },
{ }
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 4accd83d3df..d84c4224dd1 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -930,7 +930,7 @@ static void __exit broadcom_exit(void)
module_init(broadcom_init);
module_exit(broadcom_exit);
-static struct mdio_device_id broadcom_tbl[] = {
+static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5411, 0xfffffff0 },
{ PHY_ID_BCM5421, 0xfffffff0 },
{ PHY_ID_BCM5461, 0xfffffff0 },
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 1a325d63756..d28173161c2 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -159,7 +159,7 @@ static void __exit cicada_exit(void)
module_init(cicada_init);
module_exit(cicada_exit);
-static struct mdio_device_id cicada_tbl[] = {
+static struct mdio_device_id __maybe_unused cicada_tbl[] = {
{ 0x000fc410, 0x000ffff0 },
{ 0x000fc440, 0x000fffc0 },
{ }
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 29c17617a2e..2f774acdb55 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -219,7 +219,7 @@ static void __exit davicom_exit(void)
module_init(davicom_init);
module_exit(davicom_exit);
-static struct mdio_device_id davicom_tbl[] = {
+static struct mdio_device_id __maybe_unused davicom_tbl[] = {
{ 0x0181b880, 0x0ffffff0 },
{ 0x0181b8a0, 0x0ffffff0 },
{ 0x00181b80, 0x0ffffff0 },
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 13995f52d6a..a8eb19ec318 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -111,7 +111,7 @@ static void __exit et1011c_exit(void)
module_init(et1011c_init);
module_exit(et1011c_exit);
-static struct mdio_device_id et1011c_tbl[] = {
+static struct mdio_device_id __maybe_unused et1011c_tbl[] = {
{ 0x0282f014, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 3f2583f18a3..c1d2d251fe8 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -134,7 +134,7 @@ static void __exit ip175c_exit(void)
module_init(ip175c_init);
module_exit(ip175c_exit);
-static struct mdio_device_id icplus_tbl[] = {
+static struct mdio_device_id __maybe_unused icplus_tbl[] = {
{ 0x02430d80, 0x0ffffff0 },
{ }
};
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 29c39ff85de..6f6e8b616a6 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -223,7 +223,7 @@ static void __exit lxt_exit(void)
module_init(lxt_init);
module_exit(lxt_exit);
-static struct mdio_device_id lxt_tbl[] = {
+static struct mdio_device_id __maybe_unused lxt_tbl[] = {
{ 0x78100000, 0xfffffff0 },
{ 0x001378e0, 0xfffffff0 },
{ 0x00137a10, 0xfffffff0 },
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 0101f2bdf40..ed43c0016c6 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -721,7 +721,7 @@ static void __exit marvell_exit(void)
module_init(marvell_init);
module_exit(marvell_exit);
-static struct mdio_device_id marvell_tbl[] = {
+static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ 0x01410c60, 0xfffffff0 },
{ 0x01410c90, 0xfffffff0 },
{ 0x01410cc0, 0xfffffff0 },
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index fc5fef2a817..f62c7b717bc 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -188,7 +188,7 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev)
#ifdef CONFIG_OF_GPIO
-static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
+static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct mdio_gpio_platform_data *pdata;
@@ -224,7 +224,7 @@ out_free:
return -ENODEV;
}
-static int __devexit mdio_ofgpio_remove(struct of_device *ofdev)
+static int __devexit mdio_ofgpio_remove(struct platform_device *ofdev)
{
mdio_gpio_bus_destroy(&ofdev->dev);
kfree(ofdev->dev.platform_data);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 6a6b8199a0d..6c58da2b882 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -308,7 +308,7 @@ static int mdio_bus_suspend(struct device *dev)
* may call phy routines that try to grab the same lock, and that may
* lead to a deadlock.
*/
- if (phydev->attached_dev)
+ if (phydev->attached_dev && phydev->adjust_link)
phy_stop_machine(phydev);
if (!mdio_bus_phy_may_suspend(phydev))
@@ -331,7 +331,7 @@ static int mdio_bus_resume(struct device *dev)
return ret;
no_resume:
- if (phydev->attached_dev)
+ if (phydev->attached_dev && phydev->adjust_link)
phy_start_machine(phydev, NULL);
return 0;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 8bb7db676a5..0fd1678bc5a 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -231,7 +231,7 @@ MODULE_DESCRIPTION("Micrel PHY driver");
MODULE_AUTHOR("David J. Choi");
MODULE_LICENSE("GPL");
-static struct mdio_device_id micrel_tbl[] = {
+static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000fff10 },
{ PHY_ID_KS8001, 0x00fffff0 },
{ PHY_ID_KS8737, 0x00fffff0 },
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index a73ba0bcc0c..0620ba96350 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -151,7 +151,7 @@ MODULE_LICENSE("GPL");
module_init(ns_init);
module_exit(ns_exit);
-static struct mdio_device_id ns_tbl[] = {
+static struct mdio_device_id __maybe_unused ns_tbl[] = {
{ DP83865_PHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 5130db8f5c4..1bb16cb7943 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -301,7 +301,7 @@ EXPORT_SYMBOL(phy_ethtool_gset);
/**
* phy_mii_ioctl - generic PHY MII ioctl interface
* @phydev: the phy_device struct
- * @mii_data: MII ioctl data
+ * @ifr: &struct ifreq for socket ioctl's
* @cmd: ioctl cmd to execute
*
* Note that this function is currently incompatible with the
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c0761197c07..16ddc77313c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -466,6 +466,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phydev->interface = interface;
+ phydev->state = PHY_READY;
+
/* Do initial configuration here, now that
* we have certain key parameters
* (dev_flags and interface) */
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 6736b23f1b2..fe0d0a15d5e 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -138,7 +138,7 @@ static void __exit qs6612_exit(void)
module_init(qs6612_init);
module_exit(qs6612_exit);
-static struct mdio_device_id qs6612_tbl[] = {
+static struct mdio_device_id __maybe_unused qs6612_tbl[] = {
{ 0x00181440, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index f567c0e1aaa..a4eae750a41 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -79,7 +79,7 @@ static void __exit realtek_exit(void)
module_init(realtek_init);
module_exit(realtek_exit);
-static struct mdio_device_id realtek_tbl[] = {
+static struct mdio_device_id __maybe_unused realtek_tbl[] = {
{ 0x001cc912, 0x001fffff },
{ }
};
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 78fa988256f..342505c976d 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -254,7 +254,7 @@ MODULE_LICENSE("GPL");
module_init(smsc_init);
module_exit(smsc_exit);
-static struct mdio_device_id smsc_tbl[] = {
+static struct mdio_device_id __maybe_unused smsc_tbl[] = {
{ 0x0007c0a0, 0xfffffff0 },
{ 0x0007c0b0, 0xfffffff0 },
{ 0x0007c0c0, 0xfffffff0 },
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 72290099e5e..187a2fa814f 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -132,7 +132,7 @@ static void __exit ste10Xp_exit(void)
module_init(ste10Xp_init);
module_exit(ste10Xp_exit);
-static struct mdio_device_id ste10Xp_tbl[] = {
+static struct mdio_device_id __maybe_unused ste10Xp_tbl[] = {
{ STE101P_PHY_ID, 0xfffffff0 },
{ STE100P_PHY_ID, 0xffffffff },
{ }
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 45cce50a279..5d8f6e17bd5 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -192,7 +192,7 @@ static void __exit vsc82xx_exit(void)
module_init(vsc82xx_init);
module_exit(vsc82xx_exit);
-static struct mdio_device_id vitesse_tbl[] = {
+static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8244, 0x000fffc0 },
{ PHY_ID_VSC8221, 0x000ffff0 },
{ }
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index ec0349e84a8..ca4df7f4cf2 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -995,8 +995,10 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
static void
plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
{
- const struct in_device *in_dev = dev->ip_ptr;
+ const struct in_device *in_dev;
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
/* Any address will do - we take the first */
const struct in_ifaddr *ifa = in_dev->ifa_list;
@@ -1006,6 +1008,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
}
}
+ rcu_read_unlock();
}
static int
@@ -1088,7 +1091,8 @@ plip_open(struct net_device *dev)
when the device address isn't identical to the address of a
received frame, the kernel incorrectly drops it). */
- if ((in_dev=dev->ip_ptr) != NULL) {
+ in_dev=__in_dev_get_rtnl(dev);
+ if (in_dev) {
/* Any address will do - we take the first. We already
have the first two bytes filled with 0xfc, from
plip_init_dev(). */
@@ -1279,7 +1283,6 @@ static void plip_attach (struct parport *port)
if (!nl->pardev) {
printk(KERN_ERR "%s: parport_register failed\n", name);
goto err_free_dev;
- return;
}
plip_init_netdev(dev);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6695a51e09e..866e221643a 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1314,8 +1314,13 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
i = 0;
list_for_each_entry(pch, &ppp->channels, clist) {
- navail += pch->avail = (pch->chan != NULL);
- pch->speed = pch->chan->speed;
+ if (pch->chan) {
+ pch->avail = 1;
+ navail++;
+ pch->speed = pch->chan->speed;
+ } else {
+ pch->avail = 0;
+ }
if (pch->avail) {
if (skb_queue_empty(&pch->file.xq) ||
!pch->had_frag) {
@@ -1542,9 +1547,11 @@ ppp_channel_push(struct channel *pch)
* Receive-side routines.
*/
-/* misuse a few fields of the skb for MP reconstruction */
-#define sequence priority
-#define BEbits cb[0]
+struct ppp_mp_skb_parm {
+ u32 sequence;
+ u8 BEbits;
+};
+#define PPP_MP_CB(skb) ((struct ppp_mp_skb_parm *)((skb)->cb))
static inline void
ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
@@ -1873,13 +1880,13 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
mask = 0xffffff;
}
- skb->BEbits = skb->data[2];
+ PPP_MP_CB(skb)->BEbits = skb->data[2];
skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */
/*
* Do protocol ID decompression on the first fragment of each packet.
*/
- if ((skb->BEbits & B) && (skb->data[0] & 1))
+ if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1))
*skb_push(skb, 1) = 0;
/*
@@ -1891,7 +1898,7 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
seq += mask + 1;
else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
seq -= mask + 1; /* should never happen */
- skb->sequence = seq;
+ PPP_MP_CB(skb)->sequence = seq;
pch->lastseq = seq;
/*
@@ -1927,8 +1934,8 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
before the start of the queue. */
if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
struct sk_buff *mskb = skb_peek(&ppp->mrq);
- if (seq_before(ppp->minseq, mskb->sequence))
- ppp->minseq = mskb->sequence;
+ if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence))
+ ppp->minseq = PPP_MP_CB(mskb)->sequence;
}
/* Pull completed packets off the queue and receive them. */
@@ -1958,12 +1965,12 @@ ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
{
struct sk_buff *p;
struct sk_buff_head *list = &ppp->mrq;
- u32 seq = skb->sequence;
+ u32 seq = PPP_MP_CB(skb)->sequence;
/* N.B. we don't need to lock the list lock because we have the
ppp unit receive-side lock. */
skb_queue_walk(list, p) {
- if (seq_before(seq, p->sequence))
+ if (seq_before(seq, PPP_MP_CB(p)->sequence))
break;
}
__skb_queue_before(list, p, skb);
@@ -1992,22 +1999,22 @@ ppp_mp_reconstruct(struct ppp *ppp)
tail = NULL;
for (p = head; p != (struct sk_buff *) list; p = next) {
next = p->next;
- if (seq_before(p->sequence, seq)) {
+ if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
/* this can't happen, anyway ignore the skb */
printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n",
- p->sequence, seq);
+ PPP_MP_CB(p)->sequence, seq);
head = next;
continue;
}
- if (p->sequence != seq) {
+ if (PPP_MP_CB(p)->sequence != seq) {
/* Fragment `seq' is missing. If it is after
minseq, it might arrive later, so stop here. */
if (seq_after(seq, minseq))
break;
/* Fragment `seq' is lost, keep going. */
lost = 1;
- seq = seq_before(minseq, p->sequence)?
- minseq + 1: p->sequence;
+ seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
+ minseq + 1: PPP_MP_CB(p)->sequence;
next = p;
continue;
}
@@ -2021,7 +2028,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
*/
/* B bit set indicates this fragment starts a packet */
- if (p->BEbits & B) {
+ if (PPP_MP_CB(p)->BEbits & B) {
head = p;
lost = 0;
len = 0;
@@ -2030,7 +2037,8 @@ ppp_mp_reconstruct(struct ppp *ppp)
len += p->len;
/* Got a complete packet yet? */
- if (lost == 0 && (p->BEbits & E) && (head->BEbits & B)) {
+ if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
+ (PPP_MP_CB(head)->BEbits & B)) {
if (len > ppp->mrru + 2) {
++ppp->dev->stats.rx_length_errors;
printk(KERN_DEBUG "PPP: reconstructed packet"
@@ -2056,7 +2064,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
* and we haven't found a complete valid packet yet,
* we can discard up to and including this fragment.
*/
- if (p->BEbits & E)
+ if (PPP_MP_CB(p)->BEbits & E)
head = next;
++seq;
@@ -2066,10 +2074,11 @@ ppp_mp_reconstruct(struct ppp *ppp)
if (tail != NULL) {
/* If we have discarded any fragments,
signal a receive error. */
- if (head->sequence != ppp->nextseq) {
+ if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
if (ppp->debug & 1)
printk(KERN_DEBUG " missed pkts %u..%u\n",
- ppp->nextseq, head->sequence-1);
+ ppp->nextseq,
+ PPP_MP_CB(head)->sequence-1);
++ppp->dev->stats.rx_dropped;
ppp_receive_error(ppp);
}
@@ -2078,7 +2087,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
/* copy to a single skb */
for (p = head; p != tail->next; p = p->next)
skb_copy_bits(p, 0, skb_put(skb, p->len), p->len);
- ppp->nextseq = tail->sequence + 1;
+ ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
head = tail->next;
}
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index c07de359dc0..d72fb0519a2 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -1124,7 +1124,7 @@ static const struct proto_ops pppoe_ops = {
.ioctl = pppox_ioctl,
};
-static struct pppox_proto pppoe_proto = {
+static const struct pppox_proto pppoe_proto = {
.create = pppoe_create,
.ioctl = pppoe_ioctl,
.owner = THIS_MODULE,
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index d4191ef9cad..8c0d170dabc 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -36,9 +36,9 @@
#include <asm/uaccess.h>
-static struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1];
+static const struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1];
-int register_pppox_proto(int proto_num, struct pppox_proto *pp)
+int register_pppox_proto(int proto_num, const struct pppox_proto *pp)
{
if (proto_num < 0 || proto_num > PX_MAX_PROTO)
return -EINVAL;
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
new file mode 100644
index 00000000000..ccbc91326bf
--- /dev/null
+++ b/drivers/net/pptp.c
@@ -0,0 +1,726 @@
+/*
+ * Point-to-Point Tunneling Protocol for Linux
+ *
+ * Authors: Dmitry Kozlov <xeb@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_pppox.h>
+#include <linux/if_ppp.h>
+#include <linux/notifier.h>
+#include <linux/file.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/version.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+
+#include <net/sock.h>
+#include <net/protocol.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/route.h>
+#include <net/gre.h>
+
+#include <linux/uaccess.h>
+
+#define PPTP_DRIVER_VERSION "0.8.5"
+
+#define MAX_CALLID 65535
+
+static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
+static struct pppox_sock **callid_sock;
+
+static DEFINE_SPINLOCK(chan_lock);
+
+static struct proto pptp_sk_proto __read_mostly;
+static const struct ppp_channel_ops pptp_chan_ops;
+static const struct proto_ops pptp_ops;
+
+#define PPP_LCP_ECHOREQ 0x09
+#define PPP_LCP_ECHOREP 0x0A
+#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+#define MISSING_WINDOW 20
+#define WRAPPED(curseq, lastseq)\
+ ((((curseq) & 0xffffff00) == 0) &&\
+ (((lastseq) & 0xffffff00) == 0xffffff00))
+
+#define PPTP_GRE_PROTO 0x880B
+#define PPTP_GRE_VER 0x1
+
+#define PPTP_GRE_FLAG_C 0x80
+#define PPTP_GRE_FLAG_R 0x40
+#define PPTP_GRE_FLAG_K 0x20
+#define PPTP_GRE_FLAG_S 0x10
+#define PPTP_GRE_FLAG_A 0x80
+
+#define PPTP_GRE_IS_C(f) ((f)&PPTP_GRE_FLAG_C)
+#define PPTP_GRE_IS_R(f) ((f)&PPTP_GRE_FLAG_R)
+#define PPTP_GRE_IS_K(f) ((f)&PPTP_GRE_FLAG_K)
+#define PPTP_GRE_IS_S(f) ((f)&PPTP_GRE_FLAG_S)
+#define PPTP_GRE_IS_A(f) ((f)&PPTP_GRE_FLAG_A)
+
+#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header))
+struct pptp_gre_header {
+ u8 flags;
+ u8 ver;
+ u16 protocol;
+ u16 payload_len;
+ u16 call_id;
+ u32 seq;
+ u32 ack;
+} __packed;
+
+static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
+{
+ struct pppox_sock *sock;
+ struct pptp_opt *opt;
+
+ rcu_read_lock();
+ sock = rcu_dereference(callid_sock[call_id]);
+ if (sock) {
+ opt = &sock->proto.pptp;
+ if (opt->dst_addr.sin_addr.s_addr != s_addr)
+ sock = NULL;
+ else
+ sock_hold(sk_pppox(sock));
+ }
+ rcu_read_unlock();
+
+ return sock;
+}
+
+static int lookup_chan_dst(u16 call_id, __be32 d_addr)
+{
+ struct pppox_sock *sock;
+ struct pptp_opt *opt;
+ int i;
+
+ rcu_read_lock();
+ for (i = find_next_bit(callid_bitmap, MAX_CALLID, 1); i < MAX_CALLID;
+ i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+ opt = &sock->proto.pptp;
+ if (opt->dst_addr.call_id == call_id &&
+ opt->dst_addr.sin_addr.s_addr == d_addr)
+ break;
+ }
+ rcu_read_unlock();
+
+ return i < MAX_CALLID;
+}
+
+static int add_chan(struct pppox_sock *sock)
+{
+ static int call_id;
+
+ spin_lock(&chan_lock);
+ if (!sock->proto.pptp.src_addr.call_id) {
+ call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
+ if (call_id == MAX_CALLID) {
+ call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
+ if (call_id == MAX_CALLID)
+ goto out_err;
+ }
+ sock->proto.pptp.src_addr.call_id = call_id;
+ } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
+ goto out_err;
+
+ set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
+ rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
+ spin_unlock(&chan_lock);
+
+ return 0;
+
+out_err:
+ spin_unlock(&chan_lock);
+ return -1;
+}
+
+static void del_chan(struct pppox_sock *sock)
+{
+ spin_lock(&chan_lock);
+ clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
+ rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
+ spin_unlock(&chan_lock);
+ synchronize_rcu();
+}
+
+static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct sock *sk = (struct sock *) chan->private;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct pptp_opt *opt = &po->proto.pptp;
+ struct pptp_gre_header *hdr;
+ unsigned int header_len = sizeof(*hdr);
+ int err = 0;
+ int islcp;
+ int len;
+ unsigned char *data;
+ __u32 seq_recv;
+
+
+ struct rtable *rt;
+ struct net_device *tdev;
+ struct iphdr *iph;
+ int max_headroom;
+
+ if (sk_pppox(po)->sk_state & PPPOX_DEAD)
+ goto tx_error;
+
+ {
+ struct flowi fl = { .oif = 0,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = opt->dst_addr.sin_addr.s_addr,
+ .saddr = opt->src_addr.sin_addr.s_addr,
+ .tos = RT_TOS(0) } },
+ .proto = IPPROTO_GRE };
+ err = ip_route_output_key(&init_net, &rt, &fl);
+ if (err)
+ goto tx_error;
+ }
+ tdev = rt->dst.dev;
+
+ max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
+
+ if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
+ struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+ if (!new_skb) {
+ ip_rt_put(rt);
+ goto tx_error;
+ }
+ if (skb->sk)
+ skb_set_owner_w(new_skb, skb->sk);
+ kfree_skb(skb);
+ skb = new_skb;
+ }
+
+ data = skb->data;
+ islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
+
+ /* compress protocol field */
+ if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp)
+ skb_pull(skb, 1);
+
+ /* Put in the address/control bytes if necessary */
+ if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) {
+ data = skb_push(skb, 2);
+ data[0] = PPP_ALLSTATIONS;
+ data[1] = PPP_UI;
+ }
+
+ len = skb->len;
+
+ seq_recv = opt->seq_recv;
+
+ if (opt->ack_sent == seq_recv)
+ header_len -= sizeof(hdr->ack);
+
+ /* Push down and install GRE header */
+ skb_push(skb, header_len);
+ hdr = (struct pptp_gre_header *)(skb->data);
+
+ hdr->flags = PPTP_GRE_FLAG_K;
+ hdr->ver = PPTP_GRE_VER;
+ hdr->protocol = htons(PPTP_GRE_PROTO);
+ hdr->call_id = htons(opt->dst_addr.call_id);
+
+ hdr->flags |= PPTP_GRE_FLAG_S;
+ hdr->seq = htonl(++opt->seq_sent);
+ if (opt->ack_sent != seq_recv) {
+ /* send ack with this message */
+ hdr->ver |= PPTP_GRE_FLAG_A;
+ hdr->ack = htonl(seq_recv);
+ opt->ack_sent = seq_recv;
+ }
+ hdr->payload_len = htons(len);
+
+ /* Push down and install the IP header. */
+
+ skb_reset_transport_header(skb);
+ skb_push(skb, sizeof(*iph));
+ skb_reset_network_header(skb);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
+
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = sizeof(struct iphdr) >> 2;
+ if (ip_dont_fragment(sk, &rt->dst))
+ iph->frag_off = htons(IP_DF);
+ else
+ iph->frag_off = 0;
+ iph->protocol = IPPROTO_GRE;
+ iph->tos = 0;
+ iph->daddr = rt->rt_dst;
+ iph->saddr = rt->rt_src;
+ iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);
+ iph->tot_len = htons(skb->len);
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, &rt->dst);
+
+ nf_reset(skb);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_select_ident(iph, &rt->dst, NULL);
+ ip_send_check(iph);
+
+ ip_local_out(skb);
+
+tx_error:
+ return 1;
+}
+
+static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
+{
+ struct pppox_sock *po = pppox_sk(sk);
+ struct pptp_opt *opt = &po->proto.pptp;
+ int headersize, payload_len, seq;
+ __u8 *payload;
+ struct pptp_gre_header *header;
+
+ if (!(sk->sk_state & PPPOX_CONNECTED)) {
+ if (sock_queue_rcv_skb(sk, skb))
+ goto drop;
+ return NET_RX_SUCCESS;
+ }
+
+ header = (struct pptp_gre_header *)(skb->data);
+
+ /* test if acknowledgement present */
+ if (PPTP_GRE_IS_A(header->ver)) {
+ __u32 ack = (PPTP_GRE_IS_S(header->flags)) ?
+ header->ack : header->seq; /* ack in different place if S = 0 */
+
+ ack = ntohl(ack);
+
+ if (ack > opt->ack_recv)
+ opt->ack_recv = ack;
+ /* also handle sequence number wrap-around */
+ if (WRAPPED(ack, opt->ack_recv))
+ opt->ack_recv = ack;
+ }
+
+ /* test if payload present */
+ if (!PPTP_GRE_IS_S(header->flags))
+ goto drop;
+
+ headersize = sizeof(*header);
+ payload_len = ntohs(header->payload_len);
+ seq = ntohl(header->seq);
+
+ /* no ack present? */
+ if (!PPTP_GRE_IS_A(header->ver))
+ headersize -= sizeof(header->ack);
+ /* check for incomplete packet (length smaller than expected) */
+ if (skb->len - headersize < payload_len)
+ goto drop;
+
+ payload = skb->data + headersize;
+ /* check for expected sequence number */
+ if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
+ if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
+ (PPP_PROTOCOL(payload) == PPP_LCP) &&
+ ((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP)))
+ goto allow_packet;
+ } else {
+ opt->seq_recv = seq;
+allow_packet:
+ skb_pull(skb, headersize);
+
+ if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) {
+ /* chop off address/control */
+ if (skb->len < 3)
+ goto drop;
+ skb_pull(skb, 2);
+ }
+
+ if ((*skb->data) & 1) {
+ /* protocol is compressed */
+ skb_push(skb, 1)[0] = 0;
+ }
+
+ skb->ip_summed = CHECKSUM_NONE;
+ skb_set_network_header(skb, skb->head-skb->data);
+ ppp_input(&po->chan, skb);
+
+ return NET_RX_SUCCESS;
+ }
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static int pptp_rcv(struct sk_buff *skb)
+{
+ struct pppox_sock *po;
+ struct pptp_gre_header *header;
+ struct iphdr *iph;
+
+ if (skb->pkt_type != PACKET_HOST)
+ goto drop;
+
+ if (!pskb_may_pull(skb, 12))
+ goto drop;
+
+ iph = ip_hdr(skb);
+
+ header = (struct pptp_gre_header *)skb->data;
+
+ if (ntohs(header->protocol) != PPTP_GRE_PROTO || /* PPTP-GRE protocol for PPTP */
+ PPTP_GRE_IS_C(header->flags) || /* flag C should be clear */
+ PPTP_GRE_IS_R(header->flags) || /* flag R should be clear */
+ !PPTP_GRE_IS_K(header->flags) || /* flag K should be set */
+ (header->flags&0xF) != 0) /* routing and recursion ctrl = 0 */
+ /* if invalid, discard this packet */
+ goto drop;
+
+ po = lookup_chan(htons(header->call_id), iph->saddr);
+ if (po) {
+ skb_dst_drop(skb);
+ nf_reset(skb);
+ return sk_receive_skb(sk_pppox(po), skb, 0);
+ }
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct pptp_opt *opt = &po->proto.pptp;
+ int error = 0;
+
+ lock_sock(sk);
+
+ opt->src_addr = sp->sa_addr.pptp;
+ if (add_chan(po)) {
+ release_sock(sk);
+ error = -EBUSY;
+ }
+
+ release_sock(sk);
+ return error;
+}
+
+static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct pptp_opt *opt = &po->proto.pptp;
+ struct rtable *rt;
+ int error = 0;
+
+ if (sp->sa_protocol != PX_PROTO_PPTP)
+ return -EINVAL;
+
+ if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr))
+ return -EALREADY;
+
+ lock_sock(sk);
+ /* Check for already bound sockets */
+ if (sk->sk_state & PPPOX_CONNECTED) {
+ error = -EBUSY;
+ goto end;
+ }
+
+ /* Check for already disconnected sockets, on attempts to disconnect */
+ if (sk->sk_state & PPPOX_DEAD) {
+ error = -EALREADY;
+ goto end;
+ }
+
+ if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) {
+ error = -EINVAL;
+ goto end;
+ }
+
+ po->chan.private = sk;
+ po->chan.ops = &pptp_chan_ops;
+
+ {
+ struct flowi fl = {
+ .nl_u = {
+ .ip4_u = {
+ .daddr = opt->dst_addr.sin_addr.s_addr,
+ .saddr = opt->src_addr.sin_addr.s_addr,
+ .tos = RT_CONN_FLAGS(sk) } },
+ .proto = IPPROTO_GRE };
+ security_sk_classify_flow(sk, &fl);
+ if (ip_route_output_key(&init_net, &rt, &fl)) {
+ error = -EHOSTUNREACH;
+ goto end;
+ }
+ sk_setup_caps(sk, &rt->dst);
+ }
+ po->chan.mtu = dst_mtu(&rt->dst);
+ if (!po->chan.mtu)
+ po->chan.mtu = PPP_MTU;
+ ip_rt_put(rt);
+ po->chan.mtu -= PPTP_HEADER_OVERHEAD;
+
+ po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
+ error = ppp_register_channel(&po->chan);
+ if (error) {
+ pr_err("PPTP: failed to register PPP channel (%d)\n", error);
+ goto end;
+ }
+
+ opt->dst_addr = sp->sa_addr.pptp;
+ sk->sk_state = PPPOX_CONNECTED;
+
+ end:
+ release_sock(sk);
+ return error;
+}
+
+static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *usockaddr_len, int peer)
+{
+ int len = sizeof(struct sockaddr_pppox);
+ struct sockaddr_pppox sp;
+
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_PPTP;
+ sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
+
+ memcpy(uaddr, &sp, len);
+
+ *usockaddr_len = len;
+
+ return 0;
+}
+
+static int pptp_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po;
+ struct pptp_opt *opt;
+ int error = 0;
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+
+ if (sock_flag(sk, SOCK_DEAD)) {
+ release_sock(sk);
+ return -EBADF;
+ }
+
+ po = pppox_sk(sk);
+ opt = &po->proto.pptp;
+ del_chan(po);
+
+ pppox_unbind_sock(sk);
+ sk->sk_state = PPPOX_DEAD;
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+
+ release_sock(sk);
+ sock_put(sk);
+
+ return error;
+}
+
+static void pptp_sock_destruct(struct sock *sk)
+{
+ if (!(sk->sk_state & PPPOX_DEAD)) {
+ del_chan(pppox_sk(sk));
+ pppox_unbind_sock(sk);
+ }
+ skb_queue_purge(&sk->sk_receive_queue);
+}
+
+static int pptp_create(struct net *net, struct socket *sock)
+{
+ int error = -ENOMEM;
+ struct sock *sk;
+ struct pppox_sock *po;
+ struct pptp_opt *opt;
+
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto);
+ if (!sk)
+ goto out;
+
+ sock_init_data(sock, sk);
+
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pptp_ops;
+
+ sk->sk_backlog_rcv = pptp_rcv_core;
+ sk->sk_state = PPPOX_NONE;
+ sk->sk_type = SOCK_STREAM;
+ sk->sk_family = PF_PPPOX;
+ sk->sk_protocol = PX_PROTO_PPTP;
+ sk->sk_destruct = pptp_sock_destruct;
+
+ po = pppox_sk(sk);
+ opt = &po->proto.pptp;
+
+ opt->seq_sent = 0; opt->seq_recv = 0;
+ opt->ack_recv = 0; opt->ack_sent = 0;
+
+ error = 0;
+out:
+ return error;
+}
+
+static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sock *sk = (struct sock *) chan->private;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct pptp_opt *opt = &po->proto.pptp;
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int err, val;
+
+ err = -EFAULT;
+ switch (cmd) {
+ case PPPIOCGFLAGS:
+ val = opt->ppp_flags;
+ if (put_user(val, p))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSFLAGS:
+ if (get_user(val, p))
+ break;
+ opt->ppp_flags = val & ~SC_RCV_BITS;
+ err = 0;
+ break;
+ default:
+ err = -ENOTTY;
+ }
+
+ return err;
+}
+
+static const struct ppp_channel_ops pptp_chan_ops = {
+ .start_xmit = pptp_xmit,
+ .ioctl = pptp_ppp_ioctl,
+};
+
+static struct proto pptp_sk_proto __read_mostly = {
+ .name = "PPTP",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+static const struct proto_ops pptp_ops = {
+ .family = AF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pptp_release,
+ .bind = pptp_bind,
+ .connect = pptp_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = pptp_getname,
+ .poll = sock_no_poll,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = sock_no_sendmsg,
+ .recvmsg = sock_no_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
+};
+
+static const struct pppox_proto pppox_pptp_proto = {
+ .create = pptp_create,
+ .owner = THIS_MODULE,
+};
+
+static const struct gre_protocol gre_pptp_protocol = {
+ .handler = pptp_rcv,
+};
+
+static int __init pptp_init_module(void)
+{
+ int err = 0;
+ pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
+
+ callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *),
+ GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+ if (!callid_sock) {
+ pr_err("PPTP: cann't allocate memory\n");
+ return -ENOMEM;
+ }
+
+ err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
+ if (err) {
+ pr_err("PPTP: can't add gre protocol\n");
+ goto out_mem_free;
+ }
+
+ err = proto_register(&pptp_sk_proto, 0);
+ if (err) {
+ pr_err("PPTP: can't register sk_proto\n");
+ goto out_gre_del_protocol;
+ }
+
+ err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto);
+ if (err) {
+ pr_err("PPTP: can't register pppox_proto\n");
+ goto out_unregister_sk_proto;
+ }
+
+ return 0;
+
+out_unregister_sk_proto:
+ proto_unregister(&pptp_sk_proto);
+out_gre_del_protocol:
+ gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
+out_mem_free:
+ vfree(callid_sock);
+
+ return err;
+}
+
+static void __exit pptp_exit_module(void)
+{
+ unregister_pppox_proto(PX_PROTO_PPTP);
+ proto_unregister(&pptp_sk_proto);
+ gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
+ vfree(callid_sock);
+}
+
+module_init(pptp_init_module);
+module_exit(pptp_exit_module);
+
+MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol");
+MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 87d6b8f3630..5526ab4895e 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -956,9 +956,9 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
(!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* update netdevice statistics */
netdev->stats.rx_packets++;
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 43b8d7797f0..4a624a29393 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -85,12 +85,12 @@ static const int bitrate_list[] = {
*/
static inline int wpa2_capable(void)
{
- return (0 <= ps3_compare_firmware_version(2, 0, 0));
+ return 0 <= ps3_compare_firmware_version(2, 0, 0);
}
static inline int precise_ie(void)
{
- return (0 <= ps3_compare_firmware_version(2, 2, 0));
+ return 0 <= ps3_compare_firmware_version(2, 2, 0);
}
/*
* post_eurus_cmd helpers
@@ -506,7 +506,7 @@ static size_t gelic_wl_synthesize_ie(u8 *buf,
start[1] = (buf - start - 2);
pr_debug("%s: ->\n", __func__);
- return (buf - start);
+ return buf - start;
}
struct ie_item {
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
new file mode 100644
index 00000000000..18c0297743f
--- /dev/null
+++ b/drivers/net/pxa168_eth.c
@@ -0,0 +1,1664 @@
+/*
+ * PXA168 ethernet driver.
+ * Most of the code is derived from mv643xx ethernet driver.
+ *
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Sachin Sanap <ssanap@marvell.com>
+ * Zhangfei Gao <zgao6@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ * Mark Brown <markb@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/etherdevice.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/cacheflush.h>
+#include <linux/pxa168_eth.h>
+
+#define DRIVER_NAME "pxa168-eth"
+#define DRIVER_VERSION "0.3"
+
+/*
+ * Registers
+ */
+
+#define PHY_ADDRESS 0x0000
+#define SMI 0x0010
+#define PORT_CONFIG 0x0400
+#define PORT_CONFIG_EXT 0x0408
+#define PORT_COMMAND 0x0410
+#define PORT_STATUS 0x0418
+#define HTPR 0x0428
+#define SDMA_CONFIG 0x0440
+#define SDMA_CMD 0x0448
+#define INT_CAUSE 0x0450
+#define INT_W_CLEAR 0x0454
+#define INT_MASK 0x0458
+#define ETH_F_RX_DESC_0 0x0480
+#define ETH_C_RX_DESC_0 0x04A0
+#define ETH_C_TX_DESC_1 0x04E4
+
+/* smi register */
+#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
+#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
+#define SMI_OP_W (0 << 26) /* Write operation */
+#define SMI_OP_R (1 << 26) /* Read operation */
+
+#define PHY_WAIT_ITERATIONS 10
+
+#define PXA168_ETH_PHY_ADDR_DEFAULT 0
+/* RX & TX descriptor command */
+#define BUF_OWNED_BY_DMA (1 << 31)
+
+/* RX descriptor status */
+#define RX_EN_INT (1 << 23)
+#define RX_FIRST_DESC (1 << 17)
+#define RX_LAST_DESC (1 << 16)
+#define RX_ERROR (1 << 15)
+
+/* TX descriptor command */
+#define TX_EN_INT (1 << 23)
+#define TX_GEN_CRC (1 << 22)
+#define TX_ZERO_PADDING (1 << 18)
+#define TX_FIRST_DESC (1 << 17)
+#define TX_LAST_DESC (1 << 16)
+#define TX_ERROR (1 << 15)
+
+/* SDMA_CMD */
+#define SDMA_CMD_AT (1 << 31)
+#define SDMA_CMD_TXDL (1 << 24)
+#define SDMA_CMD_TXDH (1 << 23)
+#define SDMA_CMD_AR (1 << 15)
+#define SDMA_CMD_ERD (1 << 7)
+
+/* Bit definitions of the Port Config Reg */
+#define PCR_HS (1 << 12)
+#define PCR_EN (1 << 7)
+#define PCR_PM (1 << 0)
+
+/* Bit definitions of the Port Config Extend Reg */
+#define PCXR_2BSM (1 << 28)
+#define PCXR_DSCP_EN (1 << 21)
+#define PCXR_MFL_1518 (0 << 14)
+#define PCXR_MFL_1536 (1 << 14)
+#define PCXR_MFL_2048 (2 << 14)
+#define PCXR_MFL_64K (3 << 14)
+#define PCXR_FLP (1 << 11)
+#define PCXR_PRIO_TX_OFF 3
+#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
+
+/* Bit definitions of the SDMA Config Reg */
+#define SDCR_BSZ_OFF 12
+#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
+#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
+#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
+#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
+#define SDCR_BLMR (1 << 6)
+#define SDCR_BLMT (1 << 7)
+#define SDCR_RIFB (1 << 9)
+#define SDCR_RC_OFF 2
+#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
+
+/*
+ * Bit definitions of the Interrupt Cause Reg
+ * and Interrupt MASK Reg is the same
+ */
+#define ICR_RXBUF (1 << 0)
+#define ICR_TXBUF_H (1 << 2)
+#define ICR_TXBUF_L (1 << 3)
+#define ICR_TXEND_H (1 << 6)
+#define ICR_TXEND_L (1 << 7)
+#define ICR_RXERR (1 << 8)
+#define ICR_TXERR_H (1 << 10)
+#define ICR_TXERR_L (1 << 11)
+#define ICR_TX_UDR (1 << 13)
+#define ICR_MII_CH (1 << 28)
+
+#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
+ ICR_TXERR_H | ICR_TXERR_L |\
+ ICR_TXEND_H | ICR_TXEND_L |\
+ ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
+
+#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
+
+#define NUM_RX_DESCS 64
+#define NUM_TX_DESCS 64
+
+#define HASH_ADD 0
+#define HASH_DELETE 1
+#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
+#define HOP_NUMBER 12
+
+/* Bit definitions for Port status */
+#define PORT_SPEED_100 (1 << 0)
+#define FULL_DUPLEX (1 << 1)
+#define FLOW_CONTROL_ENABLED (1 << 2)
+#define LINK_UP (1 << 3)
+
+/* Bit definitions for work to be done */
+#define WORK_LINK (1 << 0)
+#define WORK_TX_DONE (1 << 1)
+
+/*
+ * Misc definitions.
+ */
+#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+
+struct rx_desc {
+ u32 cmd_sts; /* Descriptor command status */
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u16 buf_size; /* Buffer size */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+};
+
+struct tx_desc {
+ u32 cmd_sts; /* Command/status field */
+ u16 reserved;
+ u16 byte_cnt; /* buffer byte count */
+ u32 buf_ptr; /* pointer to buffer for this descriptor */
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+};
+
+struct pxa168_eth_private {
+ int port_num; /* User Ethernet port number */
+
+ int rx_resource_err; /* Rx ring resource error flag */
+
+ /* Next available and first returning Rx resource */
+ int rx_curr_desc_q, rx_used_desc_q;
+
+ /* Next available and first returning Tx resource */
+ int tx_curr_desc_q, tx_used_desc_q;
+
+ struct rx_desc *p_rx_desc_area;
+ dma_addr_t rx_desc_dma;
+ int rx_desc_area_size;
+ struct sk_buff **rx_skb;
+
+ struct tx_desc *p_tx_desc_area;
+ dma_addr_t tx_desc_dma;
+ int tx_desc_area_size;
+ struct sk_buff **tx_skb;
+
+ struct work_struct tx_timeout_task;
+
+ struct net_device *dev;
+ struct napi_struct napi;
+ u8 work_todo;
+ int skb_size;
+
+ struct net_device_stats stats;
+ /* Size of Tx Ring per queue */
+ int tx_ring_size;
+ /* Number of tx descriptors in use */
+ int tx_desc_count;
+ /* Size of Rx Ring per queue */
+ int rx_ring_size;
+ /* Number of rx descriptors in use */
+ int rx_desc_count;
+
+ /*
+ * Used in case RX Ring is empty, which can occur when
+ * system does not have resources (skb's)
+ */
+ struct timer_list timeout;
+ struct mii_bus *smi_bus;
+ struct phy_device *phy;
+
+ /* clock */
+ struct clk *clk;
+ struct pxa168_eth_platform_data *pd;
+ /*
+ * Ethernet controller base address.
+ */
+ void __iomem *base;
+
+ /* Pointer to the hardware address filter table */
+ void *htpr;
+ dma_addr_t htpr_dma;
+};
+
+struct addr_table_entry {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+ HASH_ENTRY_VALID = 1,
+ SKIP = 2,
+ HASH_ENTRY_RECEIVE_DISCARD = 4,
+ HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
+};
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_init_hw(struct pxa168_eth_private *pep);
+static void eth_port_reset(struct net_device *dev);
+static void eth_port_start(struct net_device *dev);
+static int pxa168_eth_open(struct net_device *dev);
+static int pxa168_eth_stop(struct net_device *dev);
+static int ethernet_phy_setup(struct net_device *dev);
+
+static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
+{
+ return readl(pep->base + offset);
+}
+
+static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
+{
+ writel(data, pep->base + offset);
+}
+
+static void abort_dma(struct pxa168_eth_private *pep)
+{
+ int delay;
+ int max_retries = 40;
+
+ do {
+ wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
+ udelay(100);
+
+ delay = 10;
+ while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
+ && delay-- > 0) {
+ udelay(10);
+ }
+ } while (max_retries-- > 0 && delay <= 0);
+
+ if (max_retries <= 0)
+ printk(KERN_ERR "%s : DMA Stuck\n", __func__);
+}
+
+static int ethernet_phy_get(struct pxa168_eth_private *pep)
+{
+ unsigned int reg_data;
+
+ reg_data = rdl(pep, PHY_ADDRESS);
+
+ return (reg_data >> (5 * pep->port_num)) & 0x1f;
+}
+
+static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
+{
+ u32 reg_data;
+ int addr_shift = 5 * pep->port_num;
+
+ reg_data = rdl(pep, PHY_ADDRESS);
+ reg_data &= ~(0x1f << addr_shift);
+ reg_data |= (phy_addr & 0x1f) << addr_shift;
+ wrl(pep, PHY_ADDRESS, reg_data);
+}
+
+static void ethernet_phy_reset(struct pxa168_eth_private *pep)
+{
+ int data;
+
+ data = phy_read(pep->phy, MII_BMCR);
+ if (data < 0)
+ return;
+
+ data |= BMCR_RESET;
+ if (phy_write(pep->phy, MII_BMCR, data) < 0)
+ return;
+
+ do {
+ data = phy_read(pep->phy, MII_BMCR);
+ } while (data >= 0 && data & BMCR_RESET);
+}
+
+static void rxq_refill(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct rx_desc *p_used_rx_desc;
+ int used_rx_desc;
+
+ while (pep->rx_desc_count < pep->rx_ring_size) {
+ int size;
+
+ skb = dev_alloc_skb(pep->skb_size);
+ if (!skb)
+ break;
+ if (SKB_DMA_REALIGN)
+ skb_reserve(skb, SKB_DMA_REALIGN);
+ pep->rx_desc_count++;
+ /* Get 'used' Rx descriptor */
+ used_rx_desc = pep->rx_used_desc_q;
+ p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
+ size = skb->end - skb->data;
+ p_used_rx_desc->buf_ptr = dma_map_single(NULL,
+ skb->data,
+ size,
+ DMA_FROM_DEVICE);
+ p_used_rx_desc->buf_size = size;
+ pep->rx_skb[used_rx_desc] = skb;
+
+ /* Return the descriptor to DMA ownership */
+ wmb();
+ p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
+ wmb();
+
+ /* Move the used descriptor pointer to the next descriptor */
+ pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
+
+ /* Any Rx return cancels the Rx resource error status */
+ pep->rx_resource_err = 0;
+
+ skb_reserve(skb, ETH_HW_IP_ALIGN);
+ }
+
+ /*
+ * If RX ring is empty of SKB, set a timer to try allocating
+ * again at a later time.
+ */
+ if (pep->rx_desc_count == 0) {
+ pep->timeout.expires = jiffies + (HZ / 10);
+ add_timer(&pep->timeout);
+ }
+}
+
+static inline void rxq_refill_timer_wrapper(unsigned long data)
+{
+ struct pxa168_eth_private *pep = (void *)data;
+ napi_schedule(&pep->napi);
+}
+
+static inline u8 flip_8_bits(u8 x)
+{
+ return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
+ | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
+ | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
+ | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
+}
+
+static void nibble_swap_every_byte(unsigned char *mac_addr)
+{
+ int i;
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
+ ((mac_addr[i] & 0xf0) >> 4);
+ }
+}
+
+static void inverse_every_nibble(unsigned char *mac_addr)
+{
+ int i;
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = flip_8_bits(mac_addr[i]);
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will calculate the hash function of the address.
+ * Inputs
+ * mac_addr_orig - MAC address.
+ * Outputs
+ * return the calculated entry.
+ */
+static u32 hash_function(unsigned char *mac_addr_orig)
+{
+ u32 hash_result;
+ u32 addr0;
+ u32 addr1;
+ u32 addr2;
+ u32 addr3;
+ unsigned char mac_addr[ETH_ALEN];
+
+ /* Make a copy of MAC address since we are going to performe bit
+ * operations on it
+ */
+ memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
+
+ nibble_swap_every_byte(mac_addr);
+ inverse_every_nibble(mac_addr);
+
+ addr0 = (mac_addr[5] >> 2) & 0x3f;
+ addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
+ addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
+ addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
+
+ hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
+ hash_result = hash_result & 0x07ff;
+ return hash_result;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will add/del an entry to the address table.
+ * Inputs
+ * pep - ETHERNET .
+ * mac_addr - MAC address.
+ * skip - if 1, skip this address.Used in case of deleting an entry which is a
+ * part of chain in the hash table.We cant just delete the entry since
+ * that will break the chain.We need to defragment the tables time to
+ * time.
+ * rd - 0 Discard packet upon match.
+ * - 1 Receive packet upon match.
+ * Outputs
+ * address table entry is added/deleted.
+ * 0 if success.
+ * -ENOSPC if table full
+ */
+static int add_del_hash_entry(struct pxa168_eth_private *pep,
+ unsigned char *mac_addr,
+ u32 rd, u32 skip, int del)
+{
+ struct addr_table_entry *entry, *start;
+ u32 new_high;
+ u32 new_low;
+ u32 i;
+
+ new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
+ | (((mac_addr[1] >> 0) & 0xf) << 11)
+ | (((mac_addr[0] >> 4) & 0xf) << 7)
+ | (((mac_addr[0] >> 0) & 0xf) << 3)
+ | (((mac_addr[3] >> 4) & 0x1) << 31)
+ | (((mac_addr[3] >> 0) & 0xf) << 27)
+ | (((mac_addr[2] >> 4) & 0xf) << 23)
+ | (((mac_addr[2] >> 0) & 0xf) << 19)
+ | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
+ | HASH_ENTRY_VALID;
+
+ new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
+ | (((mac_addr[5] >> 0) & 0xf) << 11)
+ | (((mac_addr[4] >> 4) & 0xf) << 7)
+ | (((mac_addr[4] >> 0) & 0xf) << 3)
+ | (((mac_addr[3] >> 5) & 0x7) << 0);
+
+ /*
+ * Pick the appropriate table, start scanning for free/reusable
+ * entries at the index obtained by hashing the specified MAC address
+ */
+ start = (struct addr_table_entry *)(pep->htpr);
+ entry = start + hash_function(mac_addr);
+ for (i = 0; i < HOP_NUMBER; i++) {
+ if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
+ break;
+ } else {
+ /* if same address put in same position */
+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
+ (new_low & 0xfffffff8)) &&
+ (le32_to_cpu(entry->hi) == new_high)) {
+ break;
+ }
+ }
+ if (entry == start + 0x7ff)
+ entry = start;
+ else
+ entry++;
+ }
+
+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
+ (le32_to_cpu(entry->hi) != new_high) && del)
+ return 0;
+
+ if (i == HOP_NUMBER) {
+ if (!del) {
+ printk(KERN_INFO "%s: table section is full, need to "
+ "move to 16kB implementation?\n",
+ __FILE__);
+ return -ENOSPC;
+ } else
+ return 0;
+ }
+
+ /*
+ * Update the selected entry
+ */
+ if (del) {
+ entry->hi = 0;
+ entry->lo = 0;
+ } else {
+ entry->hi = cpu_to_le32(new_high);
+ entry->lo = cpu_to_le32(new_low);
+ }
+
+ return 0;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Create an addressTable entry from MAC address info
+ * found in the specifed net_device struct
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
+ unsigned char *oaddr,
+ unsigned char *addr)
+{
+ /* Delete old entry */
+ if (oaddr)
+ add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
+ /* Add new entry */
+ add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
+}
+
+static int init_hash_table(struct pxa168_eth_private *pep)
+{
+ /*
+ * Hardware expects CPU to build a hash table based on a predefined
+ * hash function and populate it based on hardware address. The
+ * location of the hash table is identified by 32-bit pointer stored
+ * in HTPR internal register. Two possible sizes exists for the hash
+ * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
+ * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
+ * 1/2kB.
+ */
+ /* TODO: Add support for 8kB hash table and alternative hash
+ * function.Driver can dynamically switch to them if the 1/2kB hash
+ * table is full.
+ */
+ if (pep->htpr == NULL) {
+ pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma, GFP_KERNEL);
+ if (pep->htpr == NULL)
+ return -ENOMEM;
+ }
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+ wrl(pep, HTPR, pep->htpr_dma);
+ return 0;
+}
+
+static void pxa168_eth_set_rx_mode(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u32 val;
+
+ val = rdl(pep, PORT_CONFIG);
+ if (dev->flags & IFF_PROMISC)
+ val |= PCR_PM;
+ else
+ val &= ~PCR_PM;
+ wrl(pep, PORT_CONFIG, val);
+
+ /*
+ * Remove the old list of MAC address and add dev->addr
+ * and multicast address.
+ */
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+ update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+ netdev_for_each_mc_addr(ha, dev)
+ update_hash_table_mac_address(pep, NULL, ha->addr);
+}
+
+static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ unsigned char oldMac[ETH_ALEN];
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EINVAL;
+ memcpy(oldMac, dev->dev_addr, ETH_ALEN);
+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ netif_addr_lock_bh(dev);
+ update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
+ netif_addr_unlock_bh(dev);
+ return 0;
+}
+
+static void eth_port_start(struct net_device *dev)
+{
+ unsigned int val = 0;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int tx_curr_desc, rx_curr_desc;
+
+ /* Perform PHY reset, if there is a PHY. */
+ if (pep->phy != NULL) {
+ struct ethtool_cmd cmd;
+
+ pxa168_get_settings(pep->dev, &cmd);
+ ethernet_phy_reset(pep);
+ pxa168_set_settings(pep->dev, &cmd);
+ }
+
+ /* Assignment of Tx CTRP of given queue */
+ tx_curr_desc = pep->tx_curr_desc_q;
+ wrl(pep, ETH_C_TX_DESC_1,
+ (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
+
+ /* Assignment of Rx CRDP of given queue */
+ rx_curr_desc = pep->rx_curr_desc_q;
+ wrl(pep, ETH_C_RX_DESC_0,
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+ wrl(pep, ETH_F_RX_DESC_0,
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+ /* Clear all interrupts */
+ wrl(pep, INT_CAUSE, 0);
+
+ /* Enable all interrupts for receive, transmit and error. */
+ wrl(pep, INT_MASK, ALL_INTS);
+
+ val = rdl(pep, PORT_CONFIG);
+ val |= PCR_EN;
+ wrl(pep, PORT_CONFIG, val);
+
+ /* Start RX DMA engine */
+ val = rdl(pep, SDMA_CMD);
+ val |= SDMA_CMD_ERD;
+ wrl(pep, SDMA_CMD, val);
+}
+
+static void eth_port_reset(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ unsigned int val = 0;
+
+ /* Stop all interrupts for receive, transmit and error. */
+ wrl(pep, INT_MASK, 0);
+
+ /* Clear all interrupts */
+ wrl(pep, INT_CAUSE, 0);
+
+ /* Stop RX DMA */
+ val = rdl(pep, SDMA_CMD);
+ val &= ~SDMA_CMD_ERD; /* abort dma command */
+
+ /* Abort any transmit and receive operations and put DMA
+ * in idle state.
+ */
+ abort_dma(pep);
+
+ /* Disable port */
+ val = rdl(pep, PORT_CONFIG);
+ val &= ~PCR_EN;
+ wrl(pep, PORT_CONFIG, val);
+}
+
+/*
+ * txq_reclaim - Free the tx desc data for completed descriptors
+ * If force is non-zero, frees uncompleted descriptors as well
+ */
+static int txq_reclaim(struct net_device *dev, int force)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct tx_desc *desc;
+ u32 cmd_sts;
+ struct sk_buff *skb;
+ int tx_index;
+ dma_addr_t addr;
+ int count;
+ int released = 0;
+
+ netif_tx_lock(dev);
+
+ pep->work_todo &= ~WORK_TX_DONE;
+ while (pep->tx_desc_count > 0) {
+ tx_index = pep->tx_used_desc_q;
+ desc = &pep->p_tx_desc_area[tx_index];
+ cmd_sts = desc->cmd_sts;
+ if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
+ if (released > 0) {
+ goto txq_reclaim_end;
+ } else {
+ released = -1;
+ goto txq_reclaim_end;
+ }
+ }
+ pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
+ pep->tx_desc_count--;
+ addr = desc->buf_ptr;
+ count = desc->byte_cnt;
+ skb = pep->tx_skb[tx_index];
+ if (skb)
+ pep->tx_skb[tx_index] = NULL;
+
+ if (cmd_sts & TX_ERROR) {
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: Error in TX\n", dev->name);
+ dev->stats.tx_errors++;
+ }
+ dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ released++;
+ }
+txq_reclaim_end:
+ netif_tx_unlock(dev);
+ return released;
+}
+
+static void pxa168_eth_tx_timeout(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ printk(KERN_INFO "%s: TX timeout desc_count %d\n",
+ dev->name, pep->tx_desc_count);
+
+ schedule_work(&pep->tx_timeout_task);
+}
+
+static void pxa168_eth_tx_timeout_task(struct work_struct *work)
+{
+ struct pxa168_eth_private *pep = container_of(work,
+ struct pxa168_eth_private,
+ tx_timeout_task);
+ struct net_device *dev = pep->dev;
+ pxa168_eth_stop(dev);
+ pxa168_eth_open(dev);
+}
+
+static int rxq_process(struct net_device *dev, int budget)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int received_packets = 0;
+ struct sk_buff *skb;
+
+ while (budget-- > 0) {
+ int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
+ struct rx_desc *rx_desc;
+ unsigned int cmd_sts;
+
+ /* Do not process Rx ring in case of Rx ring resource error */
+ if (pep->rx_resource_err)
+ break;
+ rx_curr_desc = pep->rx_curr_desc_q;
+ rx_used_desc = pep->rx_used_desc_q;
+ rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
+ cmd_sts = rx_desc->cmd_sts;
+ rmb();
+ if (cmd_sts & (BUF_OWNED_BY_DMA))
+ break;
+ skb = pep->rx_skb[rx_curr_desc];
+ pep->rx_skb[rx_curr_desc] = NULL;
+
+ rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
+ pep->rx_curr_desc_q = rx_next_curr_desc;
+
+ /* Rx descriptors exhausted. */
+ /* Set the Rx ring resource error flag */
+ if (rx_next_curr_desc == rx_used_desc)
+ pep->rx_resource_err = 1;
+ pep->rx_desc_count--;
+ dma_unmap_single(NULL, rx_desc->buf_ptr,
+ rx_desc->buf_size,
+ DMA_FROM_DEVICE);
+ received_packets++;
+ /*
+ * Update statistics.
+ * Note byte count includes 4 byte CRC count
+ */
+ stats->rx_packets++;
+ stats->rx_bytes += rx_desc->byte_cnt;
+ /*
+ * In case received a packet without first / last bits on OR
+ * the error summary bit is on, the packets needs to be droped.
+ */
+ if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC))
+ || (cmd_sts & RX_ERROR)) {
+
+ stats->rx_dropped++;
+ if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC)) {
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "%s: Rx pkt on multiple desc\n",
+ dev->name);
+ }
+ if (cmd_sts & RX_ERROR)
+ stats->rx_errors++;
+ dev_kfree_skb_irq(skb);
+ } else {
+ /*
+ * The -4 is for the CRC in the trailer of the
+ * received packet
+ */
+ skb_put(skb, rx_desc->byte_cnt - 4);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ }
+ }
+ /* Fill RX ring with skb's */
+ rxq_refill(dev);
+ return received_packets;
+}
+
+static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
+ struct net_device *dev)
+{
+ u32 icr;
+ int ret = 0;
+
+ icr = rdl(pep, INT_CAUSE);
+ if (icr == 0)
+ return IRQ_NONE;
+
+ wrl(pep, INT_CAUSE, ~icr);
+ if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
+ pep->work_todo |= WORK_TX_DONE;
+ ret = 1;
+ }
+ if (icr & ICR_RXBUF)
+ ret = 1;
+ if (icr & ICR_MII_CH) {
+ pep->work_todo |= WORK_LINK;
+ ret = 1;
+ }
+ return ret;
+}
+
+static void handle_link_event(struct pxa168_eth_private *pep)
+{
+ struct net_device *dev = pep->dev;
+ u32 port_status;
+ int speed;
+ int duplex;
+ int fc;
+
+ port_status = rdl(pep, PORT_STATUS);
+ if (!(port_status & LINK_UP)) {
+ if (netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_carrier_off(dev);
+ txq_reclaim(dev, 1);
+ }
+ return;
+ }
+ if (port_status & PORT_SPEED_100)
+ speed = 100;
+ else
+ speed = 10;
+
+ duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
+ fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
+ printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+ "flow control %sabled\n", dev->name,
+ speed, duplex ? "full" : "half", fc ? "en" : "dis");
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+}
+
+static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (unlikely(!pxa168_eth_collect_events(pep, dev)))
+ return IRQ_NONE;
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ napi_schedule(&pep->napi);
+ return IRQ_HANDLED;
+}
+
+static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
+{
+ int skb_size;
+
+ /*
+ * Reserve 2+14 bytes for an ethernet header (the hardware
+ * automatically prepends 2 bytes of dummy data to each
+ * received packet), 16 bytes for up to four VLAN tags, and
+ * 4 bytes for the trailing FCS -- 36 bytes total.
+ */
+ skb_size = pep->dev->mtu + 36;
+
+ /*
+ * Make sure that the skb size is a multiple of 8 bytes, as
+ * the lower three bits of the receive descriptor's buffer
+ * size field are ignored by the hardware.
+ */
+ pep->skb_size = (skb_size + 7) & ~7;
+
+ /*
+ * If NET_SKB_PAD is smaller than a cache line,
+ * netdev_alloc_skb() will cause skb->data to be misaligned
+ * to a cache line boundary. If this is the case, include
+ * some extra space to allow re-aligning the data area.
+ */
+ pep->skb_size += SKB_DMA_REALIGN;
+
+}
+
+static int set_port_config_ext(struct pxa168_eth_private *pep)
+{
+ int skb_size;
+
+ pxa168_eth_recalc_skb_size(pep);
+ if (pep->skb_size <= 1518)
+ skb_size = PCXR_MFL_1518;
+ else if (pep->skb_size <= 1536)
+ skb_size = PCXR_MFL_1536;
+ else if (pep->skb_size <= 2048)
+ skb_size = PCXR_MFL_2048;
+ else
+ skb_size = PCXR_MFL_64K;
+
+ /* Extended Port Configuration */
+ wrl(pep,
+ PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
+ PCXR_DSCP_EN | /* Enable DSCP in IP */
+ skb_size | PCXR_FLP | /* do not force link pass */
+ PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
+
+ return 0;
+}
+
+static int pxa168_init_hw(struct pxa168_eth_private *pep)
+{
+ int err = 0;
+
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ wrl(pep, INT_CAUSE, 0);
+ /* Write to ICR to clear interrupts. */
+ wrl(pep, INT_W_CLEAR, 0);
+ /* Abort any transmit and receive operations and put DMA
+ * in idle state.
+ */
+ abort_dma(pep);
+ /* Initialize address hash table */
+ err = init_hash_table(pep);
+ if (err)
+ return err;
+ /* SDMA configuration */
+ wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
+ SDCR_RIFB | /* Rx interrupt on frame */
+ SDCR_BLMT | /* Little endian transmit */
+ SDCR_BLMR | /* Little endian receive */
+ SDCR_RC_MAX_RETRANS); /* Max retransmit count */
+ /* Port Configuration */
+ wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
+ set_port_config_ext(pep);
+
+ return err;
+}
+
+static int rxq_init(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct rx_desc *p_rx_desc;
+ int size = 0, i = 0;
+ int rx_desc_num = pep->rx_ring_size;
+
+ /* Allocate RX skb rings */
+ pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
+ GFP_KERNEL);
+ if (!pep->rx_skb) {
+ printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
+ return -ENOMEM;
+ }
+ /* Allocate RX ring */
+ pep->rx_desc_count = 0;
+ size = pep->rx_ring_size * sizeof(struct rx_desc);
+ pep->rx_desc_area_size = size;
+ pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->rx_desc_dma, GFP_KERNEL);
+ if (!pep->p_rx_desc_area) {
+ printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
+ dev->name, size);
+ goto out;
+ }
+ memset((void *)pep->p_rx_desc_area, 0, size);
+ /* initialize the next_desc_ptr links in the Rx descriptors ring */
+ p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
+ for (i = 0; i < rx_desc_num; i++) {
+ p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
+ ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
+ }
+ /* Save Rx desc pointer to driver struct. */
+ pep->rx_curr_desc_q = 0;
+ pep->rx_used_desc_q = 0;
+ pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
+ return 0;
+out:
+ kfree(pep->rx_skb);
+ return -ENOMEM;
+}
+
+static void rxq_deinit(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int curr;
+
+ /* Free preallocated skb's on RX rings */
+ for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
+ if (pep->rx_skb[curr]) {
+ dev_kfree_skb(pep->rx_skb[curr]);
+ pep->rx_desc_count--;
+ }
+ }
+ if (pep->rx_desc_count)
+ printk(KERN_ERR
+ "Error in freeing Rx Ring. %d skb's still\n",
+ pep->rx_desc_count);
+ /* Free RX ring */
+ if (pep->p_rx_desc_area)
+ dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
+ pep->p_rx_desc_area, pep->rx_desc_dma);
+ kfree(pep->rx_skb);
+}
+
+static int txq_init(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct tx_desc *p_tx_desc;
+ int size = 0, i = 0;
+ int tx_desc_num = pep->tx_ring_size;
+
+ pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
+ GFP_KERNEL);
+ if (!pep->tx_skb) {
+ printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
+ return -ENOMEM;
+ }
+ /* Allocate TX ring */
+ pep->tx_desc_count = 0;
+ size = pep->tx_ring_size * sizeof(struct tx_desc);
+ pep->tx_desc_area_size = size;
+ pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->tx_desc_dma, GFP_KERNEL);
+ if (!pep->p_tx_desc_area) {
+ printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
+ dev->name, size);
+ goto out;
+ }
+ memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
+ /* Initialize the next_desc_ptr links in the Tx descriptors ring */
+ p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
+ for (i = 0; i < tx_desc_num; i++) {
+ p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
+ ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
+ }
+ pep->tx_curr_desc_q = 0;
+ pep->tx_used_desc_q = 0;
+ pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
+ return 0;
+out:
+ kfree(pep->tx_skb);
+ return -ENOMEM;
+}
+
+static void txq_deinit(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ /* Free outstanding skb's on TX ring */
+ txq_reclaim(dev, 1);
+ BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
+ /* Free TX ring */
+ if (pep->p_tx_desc_area)
+ dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
+ pep->p_tx_desc_area, pep->tx_desc_dma);
+ kfree(pep->tx_skb);
+}
+
+static int pxa168_eth_open(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int err;
+
+ err = request_irq(dev->irq, pxa168_eth_int_handler,
+ IRQF_DISABLED, dev->name, dev);
+ if (err) {
+ dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+ return -EAGAIN;
+ }
+ pep->rx_resource_err = 0;
+ err = rxq_init(dev);
+ if (err != 0)
+ goto out_free_irq;
+ err = txq_init(dev);
+ if (err != 0)
+ goto out_free_rx_skb;
+ pep->rx_used_desc_q = 0;
+ pep->rx_curr_desc_q = 0;
+
+ /* Fill RX ring with skb's */
+ rxq_refill(dev);
+ pep->rx_used_desc_q = 0;
+ pep->rx_curr_desc_q = 0;
+ netif_carrier_off(dev);
+ eth_port_start(dev);
+ napi_enable(&pep->napi);
+ return 0;
+out_free_rx_skb:
+ rxq_deinit(dev);
+out_free_irq:
+ free_irq(dev->irq, dev);
+ return err;
+}
+
+static int pxa168_eth_stop(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ eth_port_reset(dev);
+
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ wrl(pep, INT_CAUSE, 0);
+ /* Write to ICR to clear interrupts. */
+ wrl(pep, INT_W_CLEAR, 0);
+ napi_disable(&pep->napi);
+ del_timer_sync(&pep->timeout);
+ netif_carrier_off(dev);
+ free_irq(dev->irq, dev);
+ rxq_deinit(dev);
+ txq_deinit(dev);
+
+ return 0;
+}
+
+static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
+{
+ int retval;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if ((mtu > 9500) || (mtu < 68))
+ return -EINVAL;
+
+ dev->mtu = mtu;
+ retval = set_port_config_ext(pep);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /*
+ * Stop and then re-open the interface. This will allocate RX
+ * skbs of the new MTU.
+ * There is a possible danger that the open will not succeed,
+ * due to memory being full.
+ */
+ pxa168_eth_stop(dev);
+ if (pxa168_eth_open(dev)) {
+ dev_printk(KERN_ERR, &dev->dev,
+ "fatal error on re-opening device after "
+ "MTU change\n");
+ }
+
+ return 0;
+}
+
+static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
+{
+ int tx_desc_curr;
+
+ tx_desc_curr = pep->tx_curr_desc_q;
+ pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
+ BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
+ pep->tx_desc_count++;
+
+ return tx_desc_curr;
+}
+
+static int pxa168_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct pxa168_eth_private *pep =
+ container_of(napi, struct pxa168_eth_private, napi);
+ struct net_device *dev = pep->dev;
+ int work_done = 0;
+
+ if (unlikely(pep->work_todo & WORK_LINK)) {
+ pep->work_todo &= ~(WORK_LINK);
+ handle_link_event(pep);
+ }
+ /*
+ * We call txq_reclaim every time since in NAPI interupts are disabled
+ * and due to this we miss the TX_DONE interrupt,which is not updated in
+ * interrupt status register.
+ */
+ txq_reclaim(dev, 0);
+ if (netif_queue_stopped(dev)
+ && pep->tx_ring_size - pep->tx_desc_count > 1) {
+ netif_wake_queue(dev);
+ }
+ work_done = rxq_process(dev, budget);
+ if (work_done < budget) {
+ napi_complete(napi);
+ wrl(pep, INT_MASK, ALL_INTS);
+ }
+
+ return work_done;
+}
+
+static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct tx_desc *desc;
+ int tx_index;
+ int length;
+
+ tx_index = eth_alloc_tx_desc_index(pep);
+ desc = &pep->p_tx_desc_area[tx_index];
+ length = skb->len;
+ pep->tx_skb[tx_index] = skb;
+ desc->byte_cnt = length;
+ desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+ wmb();
+ desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
+ TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
+ wmb();
+ wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
+
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ dev->trans_start = jiffies;
+ if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
+ /* We handled the current skb, but now we are out of space.*/
+ netif_stop_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int smi_wait_ready(struct pxa168_eth_private *pep)
+{
+ int i = 0;
+
+ /* wait for the SMI register to become available */
+ for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
+ if (i == PHY_WAIT_ITERATIONS)
+ return -ETIMEDOUT;
+ msleep(10);
+ }
+
+ return 0;
+}
+
+static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct pxa168_eth_private *pep = bus->priv;
+ int i = 0;
+ int val;
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
+ /* now wait for the data to be valid */
+ for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
+ if (i == PHY_WAIT_ITERATIONS) {
+ printk(KERN_WARNING
+ "pxa168_eth: SMI bus read not valid\n");
+ return -ENODEV;
+ }
+ msleep(10);
+ }
+
+ return val & 0xffff;
+}
+
+static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct pxa168_eth_private *pep = bus->priv;
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
+ SMI_OP_W | (value & 0xffff));
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ if (pep->phy != NULL)
+ return phy_mii_ioctl(pep->phy, ifr, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
+{
+ struct mii_bus *bus = pep->smi_bus;
+ struct phy_device *phydev;
+ int start;
+ int num;
+ int i;
+
+ if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
+ /* Scan entire range */
+ start = ethernet_phy_get(pep);
+ num = 32;
+ } else {
+ /* Use phy addr specific to platform */
+ start = phy_addr & 0x1f;
+ num = 1;
+ }
+ phydev = NULL;
+ for (i = 0; i < num; i++) {
+ int addr = (start + i) & 0x1f;
+ if (bus->phy_map[addr] == NULL)
+ mdiobus_scan(bus, addr);
+
+ if (phydev == NULL) {
+ phydev = bus->phy_map[addr];
+ if (phydev != NULL)
+ ethernet_phy_set_addr(pep, addr);
+ }
+ }
+
+ return phydev;
+}
+
+static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
+{
+ struct phy_device *phy = pep->phy;
+ ethernet_phy_reset(pep);
+
+ phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
+
+ if (speed == 0) {
+ phy->autoneg = AUTONEG_ENABLE;
+ phy->speed = 0;
+ phy->duplex = 0;
+ phy->supported &= PHY_BASIC_FEATURES;
+ phy->advertising = phy->supported | ADVERTISED_Autoneg;
+ } else {
+ phy->autoneg = AUTONEG_DISABLE;
+ phy->advertising = 0;
+ phy->speed = speed;
+ phy->duplex = duplex;
+ }
+ phy_start_aneg(phy);
+}
+
+static int ethernet_phy_setup(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (pep->pd->init)
+ pep->pd->init();
+ pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
+ if (pep->phy != NULL)
+ phy_init(pep, pep->pd->speed, pep->pd->duplex);
+ update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+ return 0;
+}
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int err;
+
+ err = phy_read_status(pep->phy);
+ if (err == 0)
+ err = phy_ethtool_gset(pep->phy, cmd);
+
+ return err;
+}
+
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ return phy_ethtool_sset(pep->phy, cmd);
+}
+
+static void pxa168_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, DRIVER_NAME, 32);
+ strncpy(info->version, DRIVER_VERSION, 32);
+ strncpy(info->fw_version, "N/A", 32);
+ strncpy(info->bus_info, "N/A", 32);
+}
+
+static u32 pxa168_get_link(struct net_device *dev)
+{
+ return !!netif_carrier_ok(dev);
+}
+
+static const struct ethtool_ops pxa168_ethtool_ops = {
+ .get_settings = pxa168_get_settings,
+ .set_settings = pxa168_set_settings,
+ .get_drvinfo = pxa168_get_drvinfo,
+ .get_link = pxa168_get_link,
+};
+
+static const struct net_device_ops pxa168_eth_netdev_ops = {
+ .ndo_open = pxa168_eth_open,
+ .ndo_stop = pxa168_eth_stop,
+ .ndo_start_xmit = pxa168_eth_start_xmit,
+ .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
+ .ndo_set_mac_address = pxa168_eth_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = pxa168_eth_do_ioctl,
+ .ndo_change_mtu = pxa168_eth_change_mtu,
+ .ndo_tx_timeout = pxa168_eth_tx_timeout,
+};
+
+static int pxa168_eth_probe(struct platform_device *pdev)
+{
+ struct pxa168_eth_private *pep = NULL;
+ struct net_device *dev = NULL;
+ struct resource *res;
+ struct clk *clk;
+ int err;
+
+ printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
+
+ clk = clk_get(&pdev->dev, "MFUCLK");
+ if (IS_ERR(clk)) {
+ printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
+ DRIVER_NAME);
+ return -ENODEV;
+ }
+ clk_enable(clk);
+
+ dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ pep = netdev_priv(dev);
+ pep->dev = dev;
+ pep->clk = clk;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ err = -ENODEV;
+ goto err_netdev;
+ }
+ pep->base = ioremap(res->start, res->end - res->start + 1);
+ if (pep->base == NULL) {
+ err = -ENOMEM;
+ goto err_netdev;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ BUG_ON(!res);
+ dev->irq = res->start;
+ dev->netdev_ops = &pxa168_eth_netdev_ops;
+ dev->watchdog_timeo = 2 * HZ;
+ dev->base_addr = 0;
+ SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
+
+ INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
+
+ printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
+ random_ether_addr(dev->dev_addr);
+
+ pep->pd = pdev->dev.platform_data;
+ pep->rx_ring_size = NUM_RX_DESCS;
+ if (pep->pd->rx_queue_size)
+ pep->rx_ring_size = pep->pd->rx_queue_size;
+
+ pep->tx_ring_size = NUM_TX_DESCS;
+ if (pep->pd->tx_queue_size)
+ pep->tx_ring_size = pep->pd->tx_queue_size;
+
+ pep->port_num = pep->pd->port_number;
+ /* Hardware supports only 3 ports */
+ BUG_ON(pep->port_num > 2);
+ netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
+
+ memset(&pep->timeout, 0, sizeof(struct timer_list));
+ init_timer(&pep->timeout);
+ pep->timeout.function = rxq_refill_timer_wrapper;
+ pep->timeout.data = (unsigned long)pep;
+
+ pep->smi_bus = mdiobus_alloc();
+ if (pep->smi_bus == NULL) {
+ err = -ENOMEM;
+ goto err_base;
+ }
+ pep->smi_bus->priv = pep;
+ pep->smi_bus->name = "pxa168_eth smi";
+ pep->smi_bus->read = pxa168_smi_read;
+ pep->smi_bus->write = pxa168_smi_write;
+ snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+ pep->smi_bus->parent = &pdev->dev;
+ pep->smi_bus->phy_mask = 0xffffffff;
+ err = mdiobus_register(pep->smi_bus);
+ if (err)
+ goto err_free_mdio;
+
+ pxa168_init_hw(pep);
+ err = ethernet_phy_setup(dev);
+ if (err)
+ goto err_mdiobus;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ err = register_netdev(dev);
+ if (err)
+ goto err_mdiobus;
+ return 0;
+
+err_mdiobus:
+ mdiobus_unregister(pep->smi_bus);
+err_free_mdio:
+ mdiobus_free(pep->smi_bus);
+err_base:
+ iounmap(pep->base);
+err_netdev:
+ free_netdev(dev);
+err_clk:
+ clk_disable(clk);
+ clk_put(clk);
+ return err;
+}
+
+static int pxa168_eth_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (pep->htpr) {
+ dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
+ pep->htpr, pep->htpr_dma);
+ pep->htpr = NULL;
+ }
+ if (pep->clk) {
+ clk_disable(pep->clk);
+ clk_put(pep->clk);
+ pep->clk = NULL;
+ }
+ if (pep->phy != NULL)
+ phy_detach(pep->phy);
+
+ iounmap(pep->base);
+ pep->base = NULL;
+ mdiobus_unregister(pep->smi_bus);
+ mdiobus_free(pep->smi_bus);
+ unregister_netdev(dev);
+ flush_scheduled_work();
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static void pxa168_eth_shutdown(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ eth_port_reset(dev);
+}
+
+#ifdef CONFIG_PM
+static int pxa168_eth_resume(struct platform_device *pdev)
+{
+ return -ENOSYS;
+}
+
+static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return -ENOSYS;
+}
+
+#else
+#define pxa168_eth_resume NULL
+#define pxa168_eth_suspend NULL
+#endif
+
+static struct platform_driver pxa168_eth_driver = {
+ .probe = pxa168_eth_probe,
+ .remove = pxa168_eth_remove,
+ .shutdown = pxa168_eth_shutdown,
+ .resume = pxa168_eth_resume,
+ .suspend = pxa168_eth_suspend,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init pxa168_init_module(void)
+{
+ return platform_driver_register(&pxa168_eth_driver);
+}
+
+static void __exit pxa168_cleanup_module(void)
+{
+ platform_driver_unregister(&pxa168_eth_driver);
+}
+
+module_init(pxa168_init_module);
+module_exit(pxa168_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
+MODULE_ALIAS("platform:pxa168_eth");
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 6168a130f33..7496ed2c34a 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2029,7 +2029,7 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
dma_unmap_len(lrg_buf_cb2, maplen),
PCI_DMA_FROMDEVICE);
prefetch(skb->data);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, qdev->ndev);
netif_receive_skb(skb);
@@ -2076,7 +2076,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
PCI_DMA_FROMDEVICE);
prefetch(skb2->data);
- skb2->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb2);
if (qdev->device_id == QL3022_DEVICE_ID) {
/*
* Copy the ethhdr from first buffer to second. This
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 970389331bb..26c37d3a586 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,9 +51,11 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 7
-#define QLCNIC_LINUX_VERSIONID "5.0.7"
+#define _QLCNIC_LINUX_SUBVERSION 11
+#define QLCNIC_LINUX_VERSIONID "5.0.11"
#define QLCNIC_DRV_IDC_VER 0x01
+#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
+ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -92,11 +94,12 @@
#define FIRST_PAGE_GROUP_START 0
#define FIRST_PAGE_GROUP_END 0x100000
-#define P3_MAX_MTU (9600)
+#define P3P_MAX_MTU (9600)
+#define P3P_MIN_MTU (68)
#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
-#define QLCNIC_P3_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
-#define QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3_MAX_MTU)
+#define QLCNIC_P3P_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
+#define QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3P_MAX_MTU)
#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
#define QLCNIC_LRO_BUFFER_EXTRA 2048
@@ -148,6 +151,7 @@
#define DEFAULT_RCV_DESCRIPTORS_1G 2048
#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+#define MAX_RDS_RINGS 2
#define get_next_index(index, length) \
(((index) + 1) & ((length) - 1))
@@ -172,7 +176,7 @@
((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
- ((_desc)->flags_opcode = \
+ ((_desc)->flags_opcode |= \
cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
@@ -221,7 +225,8 @@ struct rcv_desc {
#define QLCNIC_LRO_DESC 0x12
/* for status field in status_desc */
-#define STATUS_CKSUM_OK (2)
+#define STATUS_CKSUM_LOOP 0
+#define STATUS_CKSUM_OK 2
/* owner bits of status_desc */
#define STATUS_OWNER_HOST (0x1ULL << 56)
@@ -302,20 +307,20 @@ struct uni_data_desc{
/* Magic number to let user know flash is programmed */
#define QLCNIC_BDINFO_MAGIC 0x12345678
-#define QLCNIC_BRDTYPE_P3_REF_QG 0x0021
-#define QLCNIC_BRDTYPE_P3_HMEZ 0x0022
-#define QLCNIC_BRDTYPE_P3_10G_CX4_LP 0x0023
-#define QLCNIC_BRDTYPE_P3_4_GB 0x0024
-#define QLCNIC_BRDTYPE_P3_IMEZ 0x0025
-#define QLCNIC_BRDTYPE_P3_10G_SFP_PLUS 0x0026
-#define QLCNIC_BRDTYPE_P3_10000_BASE_T 0x0027
-#define QLCNIC_BRDTYPE_P3_XG_LOM 0x0028
-#define QLCNIC_BRDTYPE_P3_4_GB_MM 0x0029
-#define QLCNIC_BRDTYPE_P3_10G_SFP_CT 0x002a
-#define QLCNIC_BRDTYPE_P3_10G_SFP_QT 0x002b
-#define QLCNIC_BRDTYPE_P3_10G_CX4 0x0031
-#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032
-#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080
+#define QLCNIC_BRDTYPE_P3P_REF_QG 0x0021
+#define QLCNIC_BRDTYPE_P3P_HMEZ 0x0022
+#define QLCNIC_BRDTYPE_P3P_10G_CX4_LP 0x0023
+#define QLCNIC_BRDTYPE_P3P_4_GB 0x0024
+#define QLCNIC_BRDTYPE_P3P_IMEZ 0x0025
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS 0x0026
+#define QLCNIC_BRDTYPE_P3P_10000_BASE_T 0x0027
+#define QLCNIC_BRDTYPE_P3P_XG_LOM 0x0028
+#define QLCNIC_BRDTYPE_P3P_4_GB_MM 0x0029
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_CT 0x002a
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_QT 0x002b
+#define QLCNIC_BRDTYPE_P3P_10G_CX4 0x0031
+#define QLCNIC_BRDTYPE_P3P_10G_XFP 0x0032
+#define QLCNIC_BRDTYPE_P3P_10G_TP 0x0080
#define QLCNIC_MSIX_TABLE_OFFSET 0x44
@@ -555,6 +560,8 @@ struct qlcnic_recv_context {
#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a
#define QLCNIC_RCODE_SUCCESS 0
#define QLCNIC_RCODE_TIMEOUT 17
@@ -712,11 +719,13 @@ struct qlcnic_cardrsp_tx_ctx {
/* MAC */
-#define MC_COUNT_P3 38
+#define MC_COUNT_P3P 38
#define QLCNIC_MAC_NOOP 0
#define QLCNIC_MAC_ADD 1
#define QLCNIC_MAC_DEL 2
+#define QLCNIC_MAC_VLAN_ADD 3
+#define QLCNIC_MAC_VLAN_DEL 4
struct qlcnic_mac_list_s {
struct list_head list;
@@ -890,12 +899,28 @@ struct qlcnic_mac_req {
u8 mac_addr[6];
};
+struct qlcnic_vlan_req {
+ __le16 vlan_id;
+ __le16 rsvd[3];
+};
+
+struct qlcnic_ipaddr {
+ __be32 ipv4;
+ __be32 ipv6[4];
+};
+
#define QLCNIC_MSI_ENABLED 0x02
#define QLCNIC_MSIX_ENABLED 0x04
#define QLCNIC_LRO_ENABLED 0x08
+#define QLCNIC_LRO_DISABLED 0x00
#define QLCNIC_BRIDGE_ENABLED 0X10
#define QLCNIC_DIAG_ENABLED 0x20
#define QLCNIC_ESWITCH_ENABLED 0x40
+#define QLCNIC_ADAPTER_INITIALIZED 0x80
+#define QLCNIC_TAGGING_ENABLED 0x100
+#define QLCNIC_MACSPOOF 0x200
+#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
+#define QLCNIC_PROMISC_DISABLED 0x800
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -916,6 +941,22 @@ struct qlcnic_mac_req {
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
+#define QLCNIC_FILTER_AGE 80
+#define QLCNIC_LB_MAX_FILTERS 64
+
+struct qlcnic_filter {
+ struct hlist_node fnode;
+ u8 faddr[ETH_ALEN];
+ __le16 vlan_id;
+ unsigned long ftime;
+};
+
+struct qlcnic_filter_hash {
+ struct hlist_head *fhead;
+ u8 fnum;
+ u8 fmax;
+};
+
struct qlcnic_adapter {
struct qlcnic_hardware_context ahw;
@@ -924,6 +965,7 @@ struct qlcnic_adapter {
struct list_head mac_list;
spinlock_t tx_clean_lock;
+ spinlock_t mac_learn_lock;
u16 num_txd;
u16 num_rxd;
@@ -931,7 +973,6 @@ struct qlcnic_adapter {
u8 max_rds_rings;
u8 max_sds_rings;
- u8 driver_mismatch;
u8 msix_supported;
u8 rx_csum;
u8 portnum;
@@ -961,6 +1002,7 @@ struct qlcnic_adapter {
u16 max_tx_ques;
u16 max_rx_ques;
u16 max_mtu;
+ u16 pvid;
u32 fw_hal_version;
u32 capabilities;
@@ -969,7 +1011,7 @@ struct qlcnic_adapter {
u32 temp;
u32 int_vec_bit;
- u32 heartbit;
+ u32 heartbeat;
u8 max_mac_filters;
u8 dev_state;
@@ -983,6 +1025,7 @@ struct qlcnic_adapter {
u64 dev_rst_time;
+ struct vlan_group *vlgrp;
struct qlcnic_npar_info *npars;
struct qlcnic_eswitch *eswitch;
struct qlcnic_nic_template *nic_ops;
@@ -1003,6 +1046,8 @@ struct qlcnic_adapter {
struct qlcnic_nic_intr_coalesce coal;
+ struct qlcnic_filter_hash fhash;
+
unsigned long state;
__le32 file_prd_off; /*File fw product offset*/
u32 fw_version;
@@ -1042,7 +1087,7 @@ struct qlcnic_pci_info {
};
struct qlcnic_npar_info {
- u16 vlan_id;
+ u16 pvid;
u16 min_bw;
u16 max_bw;
u8 phy_port;
@@ -1050,11 +1095,13 @@ struct qlcnic_npar_info {
u8 active;
u8 enable_pm;
u8 dest_npar;
- u8 host_vlan_tag;
- u8 promisc_mode;
u8 discard_tagged;
- u8 mac_learning;
+ u8 mac_override;
+ u8 mac_anti_spoof;
+ u8 promisc_mode;
+ u8 offload_flags;
};
+
struct qlcnic_eswitch {
u8 port;
u8 active_vports;
@@ -1086,7 +1133,6 @@ struct qlcnic_eswitch {
#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
-#define IS_VALID_MODE(mode) (mode == 0 || mode == 1)
struct qlcnic_pci_func_cfg {
u16 func_type;
@@ -1118,12 +1164,53 @@ struct qlcnic_pm_func_cfg {
struct qlcnic_esw_func_cfg {
u16 vlan_id;
+ u8 op_mode;
+ u8 op_type;
u8 pci_func;
u8 host_vlan_tag;
u8 promisc_mode;
u8 discard_tagged;
- u8 mac_learning;
- u8 reserved;
+ u8 mac_override;
+ u8 mac_anti_spoof;
+ u8 offload_flags;
+ u8 reserved[5];
+};
+
+#define QLCNIC_STATS_VERSION 1
+#define QLCNIC_STATS_PORT 1
+#define QLCNIC_STATS_ESWITCH 2
+#define QLCNIC_QUERY_RX_COUNTER 0
+#define QLCNIC_QUERY_TX_COUNTER 1
+#define QLCNIC_ESW_STATS_NOT_AVAIL 0xffffffffffffffffULL
+
+#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\
+do { \
+ if (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) && \
+ ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \
+ (VAL1) = (VAL2); \
+ else if (((VAL1) != QLCNIC_ESW_STATS_NOT_AVAIL) && \
+ ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \
+ (VAL1) += (VAL2); \
+} while (0)
+
+struct __qlcnic_esw_statistics {
+ __le16 context_id;
+ __le16 version;
+ __le16 size;
+ __le16 unused;
+ __le64 unicast_frames;
+ __le64 multicast_frames;
+ __le64 broadcast_frames;
+ __le64 dropped_frames;
+ __le64 errors;
+ __le64 local_frames;
+ __le64 numbytes;
+ __le64 rsvd[3];
+};
+
+struct qlcnic_esw_statistics {
+ struct __qlcnic_esw_statistics rx;
+ struct __qlcnic_esw_statistics tx;
};
int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
@@ -1171,6 +1258,8 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
+void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
+void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
/* Functions from qlcnic_init.c */
int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
@@ -1199,7 +1288,7 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
-int qlcnic_init_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
struct qlcnic_host_rds_ring *rds_ring);
@@ -1209,7 +1298,7 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
-int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd);
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd);
int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
@@ -1220,12 +1309,13 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring);
-int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac);
void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
/* Functions from qlcnic_main.c */
+int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter);
+void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter);
int qlcnic_reset_context(struct qlcnic_adapter *);
u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
@@ -1236,22 +1326,22 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
/* Management functions */
-int qlcnic_set_mac_address(struct qlcnic_adapter *, u8*);
int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
-int qlcnic_reset_partition(struct qlcnic_adapter *, u8);
/* eSwitch management functions */
-int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *, u8,
- struct qlcnic_eswitch *);
-int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8,
- struct qlcnic_eswitch *);
-int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8);
-int qlcnic_config_switch_port(struct qlcnic_adapter *, u8, int, u8, u8,
- u8, u8, u16);
+int qlcnic_config_switch_port(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
+int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8,
+ struct __qlcnic_esw_statistics *);
+int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
+ struct __qlcnic_esw_statistics *);
+int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
extern int qlcnic_config_tso;
/*
@@ -1280,6 +1370,8 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
"3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
{0x1077, 0x8020, 0x1077, 0x20f,
"3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x103c, 0x3733,
+ "NC523SFP 10Gb 2-port Server Adapter"},
{0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
};
@@ -1298,7 +1390,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
extern const struct ethtool_ops qlcnic_ethtool_ops;
struct qlcnic_nic_template {
- int (*get_mac_addr) (struct qlcnic_adapter *, u8*);
int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
int (*config_led) (struct qlcnic_adapter *, u32, u32);
int (*start_firmware) (struct qlcnic_adapter *);
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index cc5d861d9a1..1cdc05dade6 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -556,32 +556,6 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
}
-/* Set MAC address of a NIC partition */
-int qlcnic_set_mac_address(struct qlcnic_adapter *adapter, u8* mac)
-{
- int err = 0;
- u32 arg1, arg2, arg3;
-
- arg1 = adapter->ahw.pci_func | BIT_9;
- arg2 = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
- arg3 = mac[4] | (mac[5] << 16);
-
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
- adapter->fw_hal_version,
- arg1,
- arg2,
- arg3,
- QLCNIC_CDRP_CMD_MAC_ADDRESS);
-
- if (err != QLCNIC_RCODE_SUCCESS) {
- dev_err(&adapter->pdev->dev,
- "Failed to set mac address%d\n", err);
- err = -EIO;
- }
-
- return err;
-}
/* Get MAC address of a NIC partition */
int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
@@ -742,15 +716,15 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
if (err == QLCNIC_RCODE_SUCCESS) {
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
- pci_info->id = le32_to_cpu(npar->id);
- pci_info->active = le32_to_cpu(npar->active);
- pci_info->type = le32_to_cpu(npar->type);
+ pci_info->id = le16_to_cpu(npar->id);
+ pci_info->active = le16_to_cpu(npar->active);
+ pci_info->type = le16_to_cpu(npar->type);
pci_info->default_port =
- le32_to_cpu(npar->default_port);
+ le16_to_cpu(npar->default_port);
pci_info->tx_min_bw =
- le32_to_cpu(npar->tx_min_bw);
+ le16_to_cpu(npar->tx_min_bw);
pci_info->tx_max_bw =
- le32_to_cpu(npar->tx_max_bw);
+ le16_to_cpu(npar->tx_max_bw);
memcpy(pci_info->mac, npar->mac, ETH_ALEN);
}
} else {
@@ -764,222 +738,319 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
return err;
}
-/* Reset a NIC partition */
-
-int qlcnic_reset_partition(struct qlcnic_adapter *adapter, u8 func_no)
+/* Configure eSwitch for port mirroring */
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
+ u8 enable_mirroring, u8 pci_func)
{
int err = -EIO;
+ u32 arg1;
- if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC ||
+ !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
return err;
+ arg1 = id | (enable_mirroring ? BIT_4 : 0);
+ arg1 |= pci_func << 8;
+
err = qlcnic_issue_cmd(adapter,
adapter->ahw.pci_func,
adapter->fw_hal_version,
- func_no,
+ arg1,
0,
0,
- QLCNIC_CDRP_CMD_RESET_NPAR);
+ QLCNIC_CDRP_CMD_SET_PORTMIRRORING);
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
- "Failed to issue reset partition%d\n", err);
- err = -EIO;
+ "Failed to configure port mirroring%d on eswitch:%d\n",
+ pci_func, id);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "Configured eSwitch %d for port mirroring:%d\n",
+ id, pci_func);
}
return err;
}
-/* Get eSwitch Capabilities */
-int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *adapter, u8 port,
- struct qlcnic_eswitch *eswitch)
-{
- int err = -EIO;
- u32 arg1, arg2;
+int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
+ const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
- if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
- return err;
+ size_t stats_size = sizeof(struct __qlcnic_esw_statistics);
+ struct __qlcnic_esw_statistics *stats;
+ dma_addr_t stats_dma_t;
+ void *stats_addr;
+ u32 arg1;
+ int err;
- err = qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
- adapter->fw_hal_version,
- port,
- 0,
- 0,
- QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY);
+ if (esw_stats == NULL)
+ return -ENOMEM;
- if (err == QLCNIC_RCODE_SUCCESS) {
- arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
- arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
-
- eswitch->port = arg1 & 0xf;
- eswitch->active_vports = LSB(arg2);
- eswitch->max_ucast_filters = MSB(arg2);
- eswitch->max_active_vlans = LSB(MSW(arg2));
- if (arg1 & BIT_6)
- eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
- if (arg1 & BIT_7)
- eswitch->flags |= QLCNIC_SWITCH_PROMISC_MODE;
- if (arg1 & BIT_8)
- eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
- } else {
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC &&
+ func != adapter->ahw.pci_func) {
dev_err(&adapter->pdev->dev,
- "Failed to get eswitch capabilities%d\n", err);
+ "Not privilege to query stats for func=%d", func);
+ return -EIO;
}
- return err;
-}
-
-/* Get current status of eswitch */
-int qlcnic_get_eswitch_status(struct qlcnic_adapter *adapter, u8 port,
- struct qlcnic_eswitch *eswitch)
-{
- int err = -EIO;
- u32 arg1, arg2;
+ stats_addr = pci_alloc_consistent(adapter->pdev, stats_size,
+ &stats_dma_t);
+ if (!stats_addr) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+ memset(stats_addr, 0, stats_size);
- if (adapter->op_mode != QLCNIC_MGMT_FUNC)
- return err;
+ arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
+ arg1 |= rx_tx << 15 | stats_size << 16;
err = qlcnic_issue_cmd(adapter,
adapter->ahw.pci_func,
adapter->fw_hal_version,
- port,
- 0,
- 0,
- QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS);
-
- if (err == QLCNIC_RCODE_SUCCESS) {
- arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
- arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
-
- eswitch->port = arg1 & 0xf;
- eswitch->active_vports = LSB(arg2);
- eswitch->active_ucast_filters = MSB(arg2);
- eswitch->active_vlans = LSB(MSW(arg2));
- if (arg1 & BIT_6)
- eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
- if (arg1 & BIT_8)
- eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
-
- } else {
- dev_err(&adapter->pdev->dev,
- "Failed to get eswitch status%d\n", err);
+ arg1,
+ MSD(stats_dma_t),
+ LSD(stats_dma_t),
+ QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
+
+ if (!err) {
+ stats = (struct __qlcnic_esw_statistics *)stats_addr;
+ esw_stats->context_id = le16_to_cpu(stats->context_id);
+ esw_stats->version = le16_to_cpu(stats->version);
+ esw_stats->size = le16_to_cpu(stats->size);
+ esw_stats->multicast_frames =
+ le64_to_cpu(stats->multicast_frames);
+ esw_stats->broadcast_frames =
+ le64_to_cpu(stats->broadcast_frames);
+ esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
+ esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
+ esw_stats->local_frames = le64_to_cpu(stats->local_frames);
+ esw_stats->errors = le64_to_cpu(stats->errors);
+ esw_stats->numbytes = le64_to_cpu(stats->numbytes);
}
+ pci_free_consistent(adapter->pdev, stats_size, stats_addr,
+ stats_dma_t);
return err;
}
-/* Enable/Disable eSwitch */
-int qlcnic_toggle_eswitch(struct qlcnic_adapter *adapter, u8 id, u8 enable)
+int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
+ const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
+
+ struct __qlcnic_esw_statistics port_stats;
+ u8 i;
+ int ret = -EIO;
+
+ if (esw_stats == NULL)
+ return -ENOMEM;
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return -EIO;
+ if (adapter->npars == NULL)
+ return -EIO;
+
+ memset(esw_stats, 0, sizeof(u64));
+ esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->context_id = eswitch;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ if (adapter->npars[i].phy_port != eswitch)
+ continue;
+
+ memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
+ if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats))
+ continue;
+
+ esw_stats->size = port_stats.size;
+ esw_stats->version = port_stats.version;
+ QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
+ port_stats.unicast_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
+ port_stats.multicast_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
+ port_stats.broadcast_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
+ port_stats.dropped_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->errors,
+ port_stats.errors);
+ QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
+ port_stats.local_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
+ port_stats.numbytes);
+ ret = 0;
+ }
+ return ret;
+}
+
+int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
+ const u8 port, const u8 rx_tx)
{
- int err = -EIO;
- u32 arg1, arg2;
- struct qlcnic_eswitch *eswitch;
+
+ u32 arg1;
if (adapter->op_mode != QLCNIC_MGMT_FUNC)
- return err;
+ return -EIO;
- eswitch = &adapter->eswitch[id];
- if (!eswitch)
- return err;
+ if (func_esw == QLCNIC_STATS_PORT) {
+ if (port >= QLCNIC_MAX_PCI_FUNC)
+ goto err_ret;
+ } else if (func_esw == QLCNIC_STATS_ESWITCH) {
+ if (port >= QLCNIC_NIU_MAX_XG_PORTS)
+ goto err_ret;
+ } else {
+ goto err_ret;
+ }
- arg1 = eswitch->port | (enable ? BIT_4 : 0);
- arg2 = eswitch->active_vports | (eswitch->max_ucast_filters << 8) |
- (eswitch->max_active_vlans << 16);
- err = qlcnic_issue_cmd(adapter,
+ if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
+ goto err_ret;
+
+ arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
+ arg1 |= BIT_14 | rx_tx << 15;
+
+ return qlcnic_issue_cmd(adapter,
adapter->ahw.pci_func,
adapter->fw_hal_version,
arg1,
- arg2,
0,
- QLCNIC_CDRP_CMD_TOGGLE_ESWITCH);
-
- if (err != QLCNIC_RCODE_SUCCESS) {
- dev_err(&adapter->pdev->dev,
- "Failed to enable eswitch%d\n", eswitch->port);
- eswitch->flags &= ~QLCNIC_SWITCH_ENABLE;
- err = -EIO;
- } else {
- eswitch->flags |= QLCNIC_SWITCH_ENABLE;
- dev_info(&adapter->pdev->dev,
- "Enabled eSwitch for port %d\n", eswitch->port);
- }
+ 0,
+ QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
- return err;
+err_ret:
+ dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
+ "rx_ctx=%d\n", func_esw, port, rx_tx);
+ return -EIO;
}
-/* Configure eSwitch for port mirroring */
-int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
- u8 enable_mirroring, u8 pci_func)
+static int
+__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+ u32 *arg1, u32 *arg2)
{
int err = -EIO;
- u32 arg1;
-
- if (adapter->op_mode != QLCNIC_MGMT_FUNC ||
- !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
- return err;
-
- arg1 = id | (enable_mirroring ? BIT_4 : 0);
- arg1 |= pci_func << 8;
-
+ u8 pci_func;
+ pci_func = (*arg1 >> 8);
err = qlcnic_issue_cmd(adapter,
adapter->ahw.pci_func,
adapter->fw_hal_version,
- arg1,
+ *arg1,
0,
0,
- QLCNIC_CDRP_CMD_SET_PORTMIRRORING);
+ QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG);
- if (err != QLCNIC_RCODE_SUCCESS) {
- dev_err(&adapter->pdev->dev,
- "Failed to configure port mirroring%d on eswitch:%d\n",
- pci_func, id);
- } else {
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ *arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ *arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
dev_info(&adapter->pdev->dev,
- "Configured eSwitch %d for port mirroring:%d\n",
- id, pci_func);
+ "eSwitch port config for pci func %d\n", pci_func);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get eswitch port config for pci func %d\n",
+ pci_func);
}
-
return err;
}
-
-/* Configure eSwitch port */
-int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, u8 id,
- int vlan_tagging, u8 discard_tagged, u8 promsc_mode,
- u8 mac_learn, u8 pci_func, u16 vlan_id)
+/* Configure eSwitch port
+op_mode = 0 for setting default port behavior
+op_mode = 1 for setting vlan id
+op_mode = 2 for deleting vlan id
+op_type = 0 for vlan_id
+op_type = 1 for port vlan_id
+*/
+int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
{
int err = -EIO;
- u32 arg1;
- struct qlcnic_eswitch *eswitch;
+ u32 arg1, arg2 = 0;
+ u8 pci_func;
if (adapter->op_mode != QLCNIC_MGMT_FUNC)
return err;
+ pci_func = esw_cfg->pci_func;
+ arg1 = (adapter->npars[pci_func].phy_port & BIT_0);
+ arg1 |= (pci_func << 8);
- eswitch = &adapter->eswitch[id];
- if (!(eswitch->flags & QLCNIC_SWITCH_ENABLE))
+ if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
return err;
-
- arg1 = eswitch->port | (discard_tagged ? BIT_4 : 0);
- arg1 |= (promsc_mode ? BIT_6 : 0) | (mac_learn ? BIT_7 : 0);
- arg1 |= pci_func << 8;
- if (vlan_tagging)
- arg1 |= BIT_5 | (vlan_id << 16);
+ arg1 &= ~(0x0ff << 8);
+ arg1 |= (pci_func << 8);
+ arg1 &= ~(BIT_2 | BIT_3);
+ switch (esw_cfg->op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ arg1 |= (BIT_4 | BIT_6 | BIT_7);
+ arg2 |= (BIT_0 | BIT_1);
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+ arg2 |= (BIT_2 | BIT_3);
+ if (!(esw_cfg->discard_tagged))
+ arg1 &= ~BIT_4;
+ if (!(esw_cfg->promisc_mode))
+ arg1 &= ~BIT_6;
+ if (!(esw_cfg->mac_override))
+ arg1 &= ~BIT_7;
+ if (!(esw_cfg->mac_anti_spoof))
+ arg2 &= ~BIT_0;
+ if (!(esw_cfg->offload_flags & BIT_0))
+ arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
+ if (!(esw_cfg->offload_flags & BIT_1))
+ arg2 &= ~BIT_2;
+ if (!(esw_cfg->offload_flags & BIT_2))
+ arg2 &= ~BIT_3;
+ break;
+ case QLCNIC_ADD_VLAN:
+ arg1 |= (BIT_2 | BIT_5);
+ arg1 |= (esw_cfg->vlan_id << 16);
+ break;
+ case QLCNIC_DEL_VLAN:
+ arg1 |= (BIT_3 | BIT_5);
+ arg1 &= ~(0x0ffff << 16);
+ break;
+ default:
+ return err;
+ }
err = qlcnic_issue_cmd(adapter,
adapter->ahw.pci_func,
adapter->fw_hal_version,
arg1,
- 0,
+ arg2,
0,
QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH);
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
- "Failed to configure eswitch port%d\n", eswitch->port);
+ "Failed to configure eswitch pci func %d\n", pci_func);
} else {
dev_info(&adapter->pdev->dev,
- "Configured eSwitch for port %d\n", eswitch->port);
+ "Configured eSwitch for pci func %d\n", pci_func);
}
return err;
}
+
+int
+qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ u32 arg1, arg2;
+ u8 phy_port;
+ if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+ phy_port = adapter->npars[esw_cfg->pci_func].phy_port;
+ else
+ phy_port = adapter->physical_port;
+ arg1 = phy_port;
+ arg1 |= (esw_cfg->pci_func << 8);
+ if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
+ return -EIO;
+
+ esw_cfg->discard_tagged = !!(arg1 & BIT_4);
+ esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
+ esw_cfg->promisc_mode = !!(arg1 & BIT_6);
+ esw_cfg->mac_override = !!(arg1 & BIT_7);
+ esw_cfg->vlan_id = LSW(arg1 >> 16);
+ esw_cfg->mac_anti_spoof = (arg2 & 0x1);
+ esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
+
+ return 0;
+}
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 9328d59e21e..2568aa66502 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -96,10 +96,10 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
static const u32 diag_registers[] = {
CRB_CMDPEG_STATE,
CRB_RCVPEG_STATE,
- CRB_XG_STATE_P3,
+ CRB_XG_STATE_P3P,
CRB_FW_CAPABILITIES_1,
ISR_INT_STATE_REG,
- QLCNIC_CRB_DEV_REF_COUNT,
+ QLCNIC_CRB_DRV_ACTIVE,
QLCNIC_CRB_DEV_STATE,
QLCNIC_CRB_DRV_STATE,
QLCNIC_CRB_DRV_SCRATCH,
@@ -115,9 +115,13 @@ static const u32 diag_registers[] = {
-1
};
+#define QLCNIC_MGMT_API_VERSION 2
+#define QLCNIC_DEV_INFO_SIZE 1
+#define QLCNIC_ETHTOOL_REGS_VER 2
static int qlcnic_get_regs_len(struct net_device *dev)
{
- return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN;
+ return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN +
+ QLCNIC_DEV_INFO_SIZE + 1;
}
static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -185,9 +189,9 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
goto skip;
}
- val = QLCRD32(adapter, P3_LINK_SPEED_REG(pcifn));
- ecmd->speed = P3_LINK_SPEED_MHZ *
- P3_LINK_SPEED_VAL(pcifn, val);
+ val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
+ ecmd->speed = P3P_LINK_SPEED_MHZ *
+ P3P_LINK_SPEED_VAL(pcifn, val);
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_DISABLE;
} else
@@ -198,42 +202,42 @@ skip:
ecmd->transceiver = XCVR_EXTERNAL;
switch (adapter->ahw.board_type) {
- case QLCNIC_BRDTYPE_P3_REF_QG:
- case QLCNIC_BRDTYPE_P3_4_GB:
- case QLCNIC_BRDTYPE_P3_4_GB_MM:
+ case QLCNIC_BRDTYPE_P3P_REF_QG:
+ case QLCNIC_BRDTYPE_P3P_4_GB:
+ case QLCNIC_BRDTYPE_P3P_4_GB_MM:
ecmd->supported |= SUPPORTED_Autoneg;
ecmd->advertising |= ADVERTISED_Autoneg;
- case QLCNIC_BRDTYPE_P3_10G_CX4:
- case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
- case QLCNIC_BRDTYPE_P3_10000_BASE_T:
+ case QLCNIC_BRDTYPE_P3P_10G_CX4:
+ case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
ecmd->autoneg = adapter->link_autoneg;
break;
- case QLCNIC_BRDTYPE_P3_IMEZ:
- case QLCNIC_BRDTYPE_P3_XG_LOM:
- case QLCNIC_BRDTYPE_P3_HMEZ:
+ case QLCNIC_BRDTYPE_P3P_IMEZ:
+ case QLCNIC_BRDTYPE_P3P_XG_LOM:
+ case QLCNIC_BRDTYPE_P3P_HMEZ:
ecmd->supported |= SUPPORTED_MII;
ecmd->advertising |= ADVERTISED_MII;
ecmd->port = PORT_MII;
ecmd->autoneg = AUTONEG_DISABLE;
break;
- case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
- case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
- case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
ecmd->advertising |= ADVERTISED_TP;
ecmd->supported |= SUPPORTED_TP;
check_sfp_module = netif_running(dev) &&
adapter->has_link_events;
- case QLCNIC_BRDTYPE_P3_10G_XFP:
+ case QLCNIC_BRDTYPE_P3P_10G_XFP:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
ecmd->autoneg = AUTONEG_DISABLE;
break;
- case QLCNIC_BRDTYPE_P3_10G_TP:
+ case QLCNIC_BRDTYPE_P3P_10G_TP:
if (adapter->ahw.port_type == QLCNIC_XGBE) {
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
@@ -339,14 +343,17 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring;
u32 *regs_buff = p;
- int ring, i = 0;
+ int ring, i = 0, j = 0;
memset(p, 0, qlcnic_get_regs_len(dev));
- regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
- (adapter->pdev)->device;
+ regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
+ (adapter->ahw.revision_id << 16) | (adapter->pdev)->device;
- for (i = 0; diag_registers[i] != -1; i++)
- regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
+ regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
+ regs_buff[1] = QLCNIC_MGMT_API_VERSION;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
+ regs_buff[i] = QLCRD32(adapter, diag_registers[j]);
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
@@ -374,9 +381,9 @@ static u32 qlcnic_test_link(struct net_device *dev)
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 val;
- val = QLCRD32(adapter, CRB_XG_STATE_P3);
- val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
- return (val == XG_LINK_UP_P3) ? 0 : 1;
+ val = QLCRD32(adapter, CRB_XG_STATE_P3P);
+ val = XG_LINK_STATE_P3P(adapter->ahw.pci_func, val);
+ return (val == XG_LINK_UP_P3P) ? 0 : 1;
}
static int
@@ -629,6 +636,8 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
}
#define QLC_ILB_PKT_SIZE 64
+#define QLC_NUM_ILB_PKT 16
+#define QLC_ILB_MAX_RCV_LOOP 10
static void qlcnic_create_loopback_buff(unsigned char *data)
{
@@ -650,24 +659,34 @@ static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
struct sk_buff *skb;
- int i;
+ int i, loop, cnt = 0;
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < QLC_NUM_ILB_PKT; i++) {
skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
qlcnic_create_loopback_buff(skb->data);
skb_put(skb, QLC_ILB_PKT_SIZE);
adapter->diag_cnt = 0;
-
qlcnic_xmit_frame(skb, adapter->netdev);
- msleep(5);
-
- qlcnic_process_rcv_ring_diag(sds_ring);
+ loop = 0;
+ do {
+ msleep(1);
+ qlcnic_process_rcv_ring_diag(sds_ring);
+ } while (loop++ < QLC_ILB_MAX_RCV_LOOP &&
+ !adapter->diag_cnt);
dev_kfree_skb_any(skb);
+
if (!adapter->diag_cnt)
- return -1;
+ dev_warn(&adapter->pdev->dev, "ILB Test: %dth packet"
+ " not recevied\n", i + 1);
+ else
+ cnt++;
+ }
+ if (cnt != i) {
+ dev_warn(&adapter->pdev->dev, "ILB Test failed\n");
+ return -1;
}
return 0;
}
@@ -687,6 +706,11 @@ static int qlcnic_loopback_test(struct net_device *netdev)
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EIO;
+ if (qlcnic_request_quiscent_mode(adapter)) {
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return -EIO;
+ }
+
ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
if (ret)
goto clear_it;
@@ -703,6 +727,7 @@ done:
qlcnic_diag_free_res(netdev, max_sds_rings);
clear_it:
+ qlcnic_clear_quiscent_mode(adapter);
adapter->max_sds_rings = max_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
@@ -747,6 +772,14 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
{
memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
+ data[0] = qlcnic_reg_test(dev);
+ if (data[0])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ data[1] = (u64) qlcnic_test_link(dev);
+ if (data[1])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
data[2] = qlcnic_irq_test(dev);
if (data[2])
@@ -757,15 +790,6 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
eth_test->flags |= ETH_TEST_FL_FAILED;
}
-
- data[0] = qlcnic_reg_test(dev);
- if (data[0])
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- /* link test */
- data[1] = (u64) qlcnic_test_link(dev);
- if (data[1])
- eth_test->flags |= ETH_TEST_FL_FAILED;
}
static void
@@ -805,6 +829,20 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
}
}
+static int qlcnic_set_tx_csum(struct net_device *dev, u32 data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return -EOPNOTSUPP;
+ if (data)
+ dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ else
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+
+ return 0;
+
+}
static u32 qlcnic_get_tx_csum(struct net_device *dev)
{
return dev->features & NETIF_F_IP_CSUM;
@@ -819,7 +857,23 @@ static u32 qlcnic_get_rx_csum(struct net_device *dev)
static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return -EOPNOTSUPP;
+ if (!!data) {
+ adapter->rx_csum = !!data;
+ return 0;
+ }
+
+ if (dev->features & NETIF_F_LRO) {
+ if (qlcnic_config_hw_lro(adapter, QLCNIC_LRO_DISABLED))
+ return -EIO;
+
+ dev->features &= ~NETIF_F_LRO;
+ qlcnic_send_lro_cleanup(adapter);
+ }
adapter->rx_csum = !!data;
+ dev_info(&adapter->pdev->dev, "disabling LRO as rx_csum is off\n");
return 0;
}
@@ -1002,6 +1056,15 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
return -EINVAL;
+ if (!adapter->rx_csum) {
+ dev_info(&adapter->pdev->dev, "rx csum is off, "
+ "cannot toggle lro\n");
+ return -EINVAL;
+ }
+
+ if ((data & ETH_FLAG_LRO) && (netdev->features & NETIF_F_LRO))
+ return 0;
+
if (data & ETH_FLAG_LRO) {
hw_lro = QLCNIC_LRO_ENABLED;
netdev->features |= NETIF_F_LRO;
@@ -1048,7 +1111,7 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
.get_pauseparam = qlcnic_get_pauseparam,
.set_pauseparam = qlcnic_set_pauseparam,
.get_tx_csum = qlcnic_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
+ .set_tx_csum = qlcnic_set_tx_csum,
.set_sg = ethtool_op_set_sg,
.get_tso = qlcnic_get_tso,
.set_tso = qlcnic_set_tso,
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 15fc32070be..4290b80cde1 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -556,18 +556,18 @@ enum {
#define XG_LINK_UP 0x10
#define XG_LINK_DOWN 0x20
-#define XG_LINK_UP_P3 0x01
-#define XG_LINK_DOWN_P3 0x02
-#define XG_LINK_STATE_P3_MASK 0xf
-#define XG_LINK_STATE_P3(pcifn, val) \
- (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
-
-#define P3_LINK_SPEED_MHZ 100
-#define P3_LINK_SPEED_MASK 0xff
-#define P3_LINK_SPEED_REG(pcifn) \
+#define XG_LINK_UP_P3P 0x01
+#define XG_LINK_DOWN_P3P 0x02
+#define XG_LINK_STATE_P3P_MASK 0xf
+#define XG_LINK_STATE_P3P(pcifn, val) \
+ (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3P_MASK)
+
+#define P3P_LINK_SPEED_MHZ 100
+#define P3P_LINK_SPEED_MASK 0xff
+#define P3P_LINK_SPEED_REG(pcifn) \
(CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
-#define P3_LINK_SPEED_VAL(pcifn, reg) \
- (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
+#define P3P_LINK_SPEED_VAL(pcifn, reg) \
+ (((reg) >> (8 * ((pcifn) & 0x3))) & P3P_LINK_SPEED_MASK)
#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
@@ -592,7 +592,7 @@ enum {
#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
-#define CRB_XG_STATE_P3 (QLCNIC_REG(0x98))
+#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98))
#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
@@ -698,7 +698,7 @@ enum {
#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
-#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138))
+#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138))
#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
@@ -718,8 +718,9 @@ enum {
#define QLCNIC_DEV_FAILED 0x6
#define QLCNIC_DEV_QUISCENT 0x7
-#define QLCNIC_DEV_NPAR_NOT_RDY 0
-#define QLCNIC_DEV_NPAR_RDY 1
+#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */
+#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
+#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4)))
#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
@@ -744,6 +745,15 @@ enum {
#define FW_POLL_DELAY (1 * HZ)
#define FW_FAIL_THRESH 2
+#define QLCNIC_RESET_TIMEOUT_SECS 10
+#define QLCNIC_INIT_TIMEOUT_SECS 30
+#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000
+#define QLCNIC_RCVPEG_CHECK_DELAY 10
+#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60
+#define QLCNIC_CMDPEG_CHECK_DELAY 500
+#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
+#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45
+
#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
@@ -770,6 +780,7 @@ struct qlcnic_legacy_intr_set {
#define QLCNIC_DRV_OP_MODE 0x1b2170
#define QLCNIC_MSIX_BASE 0x132110
#define QLCNIC_MAX_PCI_FUNC 8
+#define QLCNIC_MAX_VLAN_FILTERS 64
/* PCI function operational mode */
enum {
@@ -778,6 +789,12 @@ enum {
QLCNIC_NON_PRIV_FUNC = 2
};
+enum {
+ QLCNIC_PORT_DEFAULTS = 0,
+ QLCNIC_ADD_VLAN = 1,
+ QLCNIC_DEL_VLAN = 2
+};
+
#define QLC_DEV_DRV_DEFAULT 0x11111111
#define LSB(x) ((uint8_t)(x))
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e08c8b0556a..7a47a2a7ee2 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -297,8 +297,8 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
break;
if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
dev_err(&adapter->pdev->dev,
- "Failed to acquire sem=%d lock;reg_id=%d\n",
- sem, id_reg);
+ "Failed to acquire sem=%d lock; holdby=%d\n",
+ sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
return -EIO;
}
msleep(1);
@@ -375,10 +375,11 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
static int
qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
- unsigned op)
+ __le16 vlan_id, unsigned op)
{
struct qlcnic_nic_req req;
struct qlcnic_mac_req *mac_req;
+ struct qlcnic_vlan_req *vlan_req;
u64 word;
memset(&req, 0, sizeof(struct qlcnic_nic_req));
@@ -391,6 +392,9 @@ qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
mac_req->op = op;
memcpy(mac_req->mac_addr, addr, 6);
+ vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
+ vlan_req->vlan_id = vlan_id;
+
return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
}
@@ -415,7 +419,7 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
memcpy(cur->mac_addr, addr, ETH_ALEN);
if (qlcnic_sre_macaddr_change(adapter,
- cur->mac_addr, QLCNIC_MAC_ADD)) {
+ cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
kfree(cur);
return -EIO;
}
@@ -438,7 +442,8 @@ void qlcnic_set_multi(struct net_device *netdev)
qlcnic_nic_add_mac(adapter, bcast_addr);
if (netdev->flags & IFF_PROMISC) {
- mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
goto send_fw_cmd;
}
@@ -485,12 +490,63 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
while (!list_empty(head)) {
cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
qlcnic_sre_macaddr_change(adapter,
- cur->mac_addr, QLCNIC_MAC_DEL);
+ cur->mac_addr, 0, QLCNIC_MAC_DEL);
list_del(&cur->list);
kfree(cur);
}
}
+void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_filter *tmp_fil;
+ struct hlist_node *tmp_hnode, *n;
+ struct hlist_head *head;
+ int i;
+
+ for (i = 0; i < adapter->fhash.fmax; i++) {
+ head = &(adapter->fhash.fhead[i]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
+ {
+ if (jiffies >
+ (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
+ qlcnic_sre_macaddr_change(adapter,
+ tmp_fil->faddr, tmp_fil->vlan_id,
+ tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL);
+ spin_lock_bh(&adapter->mac_learn_lock);
+ adapter->fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+ }
+}
+
+void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_filter *tmp_fil;
+ struct hlist_node *tmp_hnode, *n;
+ struct hlist_head *head;
+ int i;
+
+ for (i = 0; i < adapter->fhash.fmax; i++) {
+ head = &(adapter->fhash.fhead[i]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
+ tmp_fil->vlan_id, tmp_fil->vlan_id ?
+ QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
+ spin_lock_bh(&adapter->mac_learn_lock);
+ adapter->fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+}
+
#define QLCNIC_CONFIG_INTR_COALESCE 3
/*
@@ -527,9 +583,6 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
u64 word;
int rv;
- if ((adapter->flags & QLCNIC_LRO_ENABLED) == enable)
- return 0;
-
memset(&req, 0, sizeof(struct qlcnic_nic_req));
req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
@@ -544,8 +597,6 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
dev_err(&adapter->netdev->dev,
"Could not send configure hw lro request\n");
- adapter->flags ^= QLCNIC_LRO_ENABLED;
-
return rv;
}
@@ -623,9 +674,10 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
return rv;
}
-int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd)
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
{
struct qlcnic_nic_req req;
+ struct qlcnic_ipaddr *ipa;
u64 word;
int rv;
@@ -636,7 +688,8 @@ int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd)
req.req_hdr = cpu_to_le64(word);
req.words[0] = cpu_to_le64(cmd);
- req.words[1] = cpu_to_le64(ip);
+ ipa = (struct qlcnic_ipaddr *)&req.words[1];
+ ipa->ipv4 = ip;
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0)
@@ -701,9 +754,9 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int rc = 0;
- if (mtu > P3_MAX_MTU) {
- dev_err(&adapter->netdev->dev, "mtu > %d bytes unsupported\n",
- P3_MAX_MTU);
+ if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
+ dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
+ " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
return -EINVAL;
}
@@ -715,19 +768,6 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
return rc;
}
-int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac)
-{
- u32 crbaddr;
- int pci_func = adapter->ahw.pci_func;
-
- crbaddr = CRB_MAC_BLOCK_START +
- (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
-
- qlcnic_fetch_mac(adapter, crbaddr, crbaddr+4, pci_func & 1, mac);
-
- return 0;
-}
-
/*
* Changes the CRB window to the specified window.
*/
@@ -1121,31 +1161,31 @@ int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
adapter->ahw.board_type = board_type;
- if (board_type == QLCNIC_BRDTYPE_P3_4_GB_MM) {
+ if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
if ((gpio & 0x8000) == 0)
- board_type = QLCNIC_BRDTYPE_P3_10G_TP;
+ board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
}
switch (board_type) {
- case QLCNIC_BRDTYPE_P3_HMEZ:
- case QLCNIC_BRDTYPE_P3_XG_LOM:
- case QLCNIC_BRDTYPE_P3_10G_CX4:
- case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
- case QLCNIC_BRDTYPE_P3_IMEZ:
- case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
- case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
- case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
- case QLCNIC_BRDTYPE_P3_10G_XFP:
- case QLCNIC_BRDTYPE_P3_10000_BASE_T:
+ case QLCNIC_BRDTYPE_P3P_HMEZ:
+ case QLCNIC_BRDTYPE_P3P_XG_LOM:
+ case QLCNIC_BRDTYPE_P3P_10G_CX4:
+ case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3P_IMEZ:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
+ case QLCNIC_BRDTYPE_P3P_10G_XFP:
+ case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
adapter->ahw.port_type = QLCNIC_XGBE;
break;
- case QLCNIC_BRDTYPE_P3_REF_QG:
- case QLCNIC_BRDTYPE_P3_4_GB:
- case QLCNIC_BRDTYPE_P3_4_GB_MM:
+ case QLCNIC_BRDTYPE_P3P_REF_QG:
+ case QLCNIC_BRDTYPE_P3P_4_GB:
+ case QLCNIC_BRDTYPE_P3P_4_GB_MM:
adapter->ahw.port_type = QLCNIC_GBE;
break;
- case QLCNIC_BRDTYPE_P3_10G_TP:
+ case QLCNIC_BRDTYPE_P3P_10G_TP:
adapter->ahw.port_type = (adapter->portnum < 2) ?
QLCNIC_XGBE : QLCNIC_GBE;
break;
@@ -1245,4 +1285,5 @@ void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
qlcnic_nic_set_promisc(adapter, mode);
+ msleep(1000);
}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 75ba744b173..0d180c6e41f 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -25,6 +25,7 @@
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/if_vlan.h>
#include "qlcnic.h"
struct crb_addr_pair {
@@ -45,6 +46,9 @@ static void
qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
struct qlcnic_host_rds_ring *rds_ring);
+static int
+qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
+
static void crb_addr_transform_setup(void)
{
crb_addr_transform(XDMA);
@@ -136,8 +140,6 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
- spin_lock(&rds_ring->lock);
-
INIT_LIST_HEAD(&rds_ring->free_list);
rx_buf = rds_ring->rx_buf_arr;
@@ -146,8 +148,6 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
&rds_ring->free_list);
rx_buf++;
}
-
- spin_unlock(&rds_ring->lock);
}
}
@@ -259,14 +259,14 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
switch (ring) {
case RCV_RING_NORMAL:
rds_ring->num_desc = adapter->num_rxd;
- rds_ring->dma_size = QLCNIC_P3_RX_BUF_MAX_LEN;
+ rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN;
rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
break;
case RCV_RING_JUMBO:
rds_ring->num_desc = adapter->num_jumbo_rxd;
rds_ring->dma_size =
- QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN;
+ QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN;
if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
@@ -439,11 +439,14 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
u32 off;
struct pci_dev *pdev = adapter->pdev;
- /* resetall */
+ QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
+ QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
+
qlcnic_rom_lock(adapter);
QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
qlcnic_rom_unlock(adapter);
+ /* Init HW CRB block */
if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
@@ -524,13 +527,10 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
}
kfree(buf);
- /* p2dn replyCount */
+ /* Initialize protocol process engine */
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
- /* disable_peg_cache 0 & 1*/
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
-
- /* peg_clr_all */
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
@@ -539,9 +539,87 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
+ msleep(1);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
return 0;
}
+static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
+
+ do {
+ val = QLCRD32(adapter, CRB_CMDPEG_STATE);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
+
+ msleep(QLCNIC_CMDPEG_CHECK_DELAY);
+
+ } while (--retries);
+
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_err(&adapter->pdev->dev, "Command Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+}
+
+static int
+qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
+
+ do {
+ val = QLCRD32(adapter, CRB_RCVPEG_STATE);
+
+ if (val == PHAN_PEG_RCV_INITIALIZED)
+ return 0;
+
+ msleep(QLCNIC_RCVPEG_CHECK_DELAY);
+
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_cmd_peg_ready(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_receive_peg_ready(adapter);
+ if (err)
+ return err;
+
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+ return err;
+}
+
int
qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
@@ -557,12 +635,12 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
}
adapter->physical_port = (val >> 2);
if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
- timeo = 30;
+ timeo = QLCNIC_INIT_TIMEOUT_SECS;
adapter->dev_init_timeo = timeo;
if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
- timeo = 10;
+ timeo = QLCNIC_RESET_TIMEOUT_SECS;
adapter->reset_ack_timeo = timeo;
@@ -906,54 +984,47 @@ qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
}
-int
-qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
+static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter)
{
- u32 count, old_count;
- u32 val, version, major, minor, build;
- int i, timeout;
-
- if (adapter->need_fw_reset)
- return 1;
+ if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID))
+ dev_info(&adapter->pdev->dev, "Resetting rom_lock\n");
- /* last attempt had failed */
- if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
- return 1;
+ qlcnic_pcie_sem_unlock(adapter, 2);
+}
- old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+static int
+qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
+{
+ u32 heartbeat, ret = -EIO;
+ int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
- for (i = 0; i < 10; i++) {
+ adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
- timeout = msleep_interruptible(200);
- if (timeout) {
- QLCWR32(adapter, CRB_CMDPEG_STATE,
- PHAN_INITIALIZE_FAILED);
- return -EINTR;
+ do {
+ msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
+ heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != adapter->heartbeat) {
+ ret = QLCNIC_RCODE_SUCCESS;
+ break;
}
+ } while (--retries);
- count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
- if (count != old_count)
- break;
- }
+ return ret;
+}
- /* firmware is dead */
- if (count == old_count)
+int
+qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_check_fw_hearbeat(adapter)) {
+ qlcnic_rom_lock_recovery(adapter);
return 1;
+ }
- /* check if we have got newer or different file firmware */
- if (adapter->fw) {
-
- val = qlcnic_get_fw_version(adapter);
-
- version = QLCNIC_DECODE_VERSION(val);
-
- major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
- minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
- build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ if (adapter->need_fw_reset)
+ return 1;
- if (version > QLCNIC_VERSION_CODE(major, minor, build))
- return 1;
- }
+ if (adapter->fw)
+ return 1;
return 0;
}
@@ -1089,18 +1160,6 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
return -EINVAL;
}
- /* check if flashed firmware is newer */
- if (qlcnic_rom_fast_read(adapter,
- QLCNIC_FW_VERSION_OFFSET, (int *)&val))
- return -EIO;
-
- val = QLCNIC_DECODE_VERSION(val);
- if (val > ver) {
- dev_info(&pdev->dev, "%s: firmware is older than flash\n",
- fw_name[fw_type]);
- return -EINVAL;
- }
-
QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
return 0;
}
@@ -1162,78 +1221,6 @@ qlcnic_release_firmware(struct qlcnic_adapter *adapter)
adapter->fw = NULL;
}
-static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
-{
- u32 val;
- int retries = 60;
-
- do {
- val = QLCRD32(adapter, CRB_CMDPEG_STATE);
-
- switch (val) {
- case PHAN_INITIALIZE_COMPLETE:
- case PHAN_INITIALIZE_ACK:
- return 0;
- case PHAN_INITIALIZE_FAILED:
- goto out_err;
- default:
- break;
- }
-
- msleep(500);
-
- } while (--retries);
-
- QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
-
-out_err:
- dev_err(&adapter->pdev->dev, "Command Peg initialization not "
- "complete, state: 0x%x.\n", val);
- return -EIO;
-}
-
-static int
-qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
-{
- u32 val;
- int retries = 2000;
-
- do {
- val = QLCRD32(adapter, CRB_RCVPEG_STATE);
-
- if (val == PHAN_PEG_RCV_INITIALIZED)
- return 0;
-
- msleep(10);
-
- } while (--retries);
-
- if (!retries) {
- dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
- "complete, state: 0x%x.\n", val);
- return -EIO;
- }
-
- return 0;
-}
-
-int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
-{
- int err;
-
- err = qlcnic_cmd_peg_ready(adapter);
- if (err)
- return err;
-
- err = qlcnic_receive_peg_ready(adapter);
- if (err)
- return err;
-
- QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
-
- return err;
-}
-
static void
qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
struct qlcnic_fw_msg *msg)
@@ -1316,7 +1303,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
return -ENOMEM;
}
- skb_reserve(skb, 2);
+ skb_reserve(skb, NET_IP_ALIGN);
dma = pci_map_single(pdev, skb->data,
rds_ring->dma_size, PCI_DMA_FROMDEVICE);
@@ -1351,11 +1338,12 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
skb = buffer->skb;
- if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
+ if (likely(adapter->rx_csum && (cksum == STATUS_CKSUM_OK ||
+ cksum == STATUS_CKSUM_LOOP))) {
adapter->stats.csummed++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
skb->dev = adapter->netdev;
@@ -1365,6 +1353,31 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
return skb;
}
+static int
+qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
+ u16 *vlan_tag)
+{
+ struct ethhdr *eth_hdr;
+
+ if (!__vlan_get_tag(skb, vlan_tag)) {
+ eth_hdr = (struct ethhdr *) skb->data;
+ memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ }
+ if (!adapter->pvid)
+ return 0;
+
+ if (*vlan_tag == adapter->pvid) {
+ /* Outer vlan tag. Packet should follow non-vlan path */
+ *vlan_tag = 0xffff;
+ return 0;
+ }
+ if (adapter->flags & QLCNIC_TAGGING_ENABLED)
+ return 0;
+
+ return -EINVAL;
+}
+
static struct qlcnic_rx_buffer *
qlcnic_process_rcv(struct qlcnic_adapter *adapter,
struct qlcnic_host_sds_ring *sds_ring,
@@ -1376,6 +1389,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
int index, length, cksum, pkt_offset;
+ u16 vid = 0xffff;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
@@ -1404,10 +1418,18 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
if (pkt_offset)
skb_pull(skb, pkt_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff);
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
skb->protocol = eth_type_trans(skb, netdev);
- napi_gro_receive(&sds_ring->napi, skb);
+ if ((vid != 0xffff) && adapter->vlgrp)
+ vlan_gro_receive(&sds_ring->napi, adapter->vlgrp, vid, skb);
+ else
+ napi_gro_receive(&sds_ring->napi, skb);
adapter->stats.rx_pkts++;
adapter->stats.rxbytes += length;
@@ -1436,6 +1458,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
int index;
u16 lro_length, length, data_offset;
u32 seq_number;
+ u16 vid = 0xffff;
if (unlikely(ring > adapter->max_rds_rings))
return NULL;
@@ -1466,9 +1489,14 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
skb_put(skb, lro_length + data_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
-
skb_pull(skb, l2_hdr_offset);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
skb->protocol = eth_type_trans(skb, netdev);
iph = (struct iphdr *)skb->data;
@@ -1483,7 +1511,10 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
length = skb->len;
- netif_receive_skb(skb);
+ if ((vid != 0xffff) && adapter->vlgrp)
+ vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vid);
+ else
+ netif_receive_skb(skb);
adapter->stats.lro_pkts++;
adapter->stats.lrobytes += length;
@@ -1587,8 +1618,6 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
int producer, count = 0;
struct list_head *head;
- spin_lock(&rds_ring->lock);
-
producer = rds_ring->producer;
head = &rds_ring->free_list;
@@ -1618,7 +1647,6 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
writel((producer-1) & (rds_ring->num_desc-1),
rds_ring->crb_rcv_producer);
}
- spin_unlock(&rds_ring->lock);
}
static void
@@ -1665,6 +1693,18 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
spin_unlock(&rds_ring->lock);
}
+static void dump_skb(struct sk_buff *skb)
+{
+ int i;
+ unsigned char *data = skb->data;
+
+ for (i = 0; i < skb->len; i++) {
+ printk("%02x ", data[i]);
+ if ((i & 0x0f) == 8)
+ printk("\n");
+ }
+}
+
static struct qlcnic_rx_buffer *
qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
struct qlcnic_host_sds_ring *sds_ring,
@@ -1695,15 +1735,18 @@ qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- skb_put(skb, rds_ring->skb_size);
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
if (pkt_offset)
skb_pull(skb, pkt_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff);
-
if (!qlcnic_check_loopback_buff(skb->data))
adapter->diag_cnt++;
+ else
+ dump_skb(skb);
dev_kfree_skb_any(skb);
adapter->stats.rx_pkts++;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index b9615bd745e..4aada0b8ceb 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -28,6 +28,7 @@
#include "qlcnic.h"
+#include <linux/swab.h>
#include <linux/dma-mapping.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
@@ -45,10 +46,10 @@ char qlcnic_driver_name[] = "qlcnic";
static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
"Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
-static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
-
-/* Default to restricted 1G auto-neg mode */
-static int wol_port_mode = 5;
+static struct workqueue_struct *qlcnic_wq;
+static int qlcnic_mac_learn;
+module_param(qlcnic_mac_learn, int, 0644);
+MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
static int use_msi = 1;
module_param(use_msi, int, 0644);
@@ -94,7 +95,7 @@ static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
-static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
+static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
@@ -103,13 +104,17 @@ static irqreturn_t qlcnic_msi_intr(int irq, void *data);
static irqreturn_t qlcnic_msix_intr(int irq, void *data);
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
-static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
+static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
static int qlcnic_start_firmware(struct qlcnic_adapter *);
+static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
+static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
+static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
/* PCI Device ID Table */
#define ENTRY(device) \
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -164,7 +169,7 @@ qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
- return (recv_ctx->sds_rings == NULL);
+ return recv_ctx->sds_rings == NULL;
}
static void
@@ -255,40 +260,6 @@ static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
memset(&adapter->stats, 0, sizeof(adapter->stats));
}
-static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
-{
- u32 val, data;
-
- val = adapter->ahw.board_type;
- if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
- (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
- if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
- data = QLCNIC_PORT_MODE_802_3_AP;
- QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
- } else if (port_mode == QLCNIC_PORT_MODE_XG) {
- data = QLCNIC_PORT_MODE_XG;
- QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
- } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
- data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
- QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
- } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
- data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
- QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
- } else {
- data = QLCNIC_PORT_MODE_AUTO_NEG;
- QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
- }
-
- if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
- (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
- (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
- (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
- wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
- }
- QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
- }
-}
-
static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
{
u32 control;
@@ -320,7 +291,7 @@ qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
+ if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
return -EIO;
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
@@ -341,6 +312,9 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
+ if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
+ return -EOPNOTSUPP;
+
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
@@ -360,6 +334,13 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
+static void qlcnic_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ adapter->vlgrp = grp;
+}
+
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
@@ -370,20 +351,19 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_set_mac_address = qlcnic_set_mac,
.ndo_change_mtu = qlcnic_change_mtu,
.ndo_tx_timeout = qlcnic_tx_timeout,
+ .ndo_vlan_rx_register = qlcnic_vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
};
static struct qlcnic_nic_template qlcnic_ops = {
- .get_mac_addr = qlcnic_get_mac_address,
.config_bridged_mode = qlcnic_config_bridged_mode,
.config_led = qlcnic_config_led,
.start_firmware = qlcnic_start_firmware
};
static struct qlcnic_nic_template qlcnic_vf_ops = {
- .get_mac_addr = qlcnic_get_mac_address,
.config_bridged_mode = qlcnicvf_config_bridged_mode,
.config_led = qlcnicvf_config_led,
.start_firmware = qlcnicvf_start_firmware
@@ -473,48 +453,57 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
static int
qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
- struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
- int i, ret = 0, err;
+ struct qlcnic_pci_info *pci_info;
+ int i, ret = 0;
u8 pfn;
- if (!adapter->npars)
- adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
- QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
- if (!adapter->npars)
+ pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
return -ENOMEM;
- if (!adapter->eswitch)
- adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
+ adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
+ QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
+ if (!adapter->npars) {
+ ret = -ENOMEM;
+ goto err_pci_info;
+ }
+
+ adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
if (!adapter->eswitch) {
- err = -ENOMEM;
- goto err_eswitch;
+ ret = -ENOMEM;
+ goto err_npars;
}
ret = qlcnic_get_pci_info(adapter, pci_info);
- if (!ret) {
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- pfn = pci_info[i].id;
- if (pfn > QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
- adapter->npars[pfn].active = pci_info[i].active;
- adapter->npars[pfn].type = pci_info[i].type;
- adapter->npars[pfn].phy_port = pci_info[i].default_port;
- adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
- adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
- adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
- }
-
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
- adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+ if (ret)
+ goto err_eswitch;
- return ret;
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ pfn = pci_info[i].id;
+ if (pfn > QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+ adapter->npars[pfn].active = (u8)pci_info[i].active;
+ adapter->npars[pfn].type = (u8)pci_info[i].type;
+ adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
+ adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
+ adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
}
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+
+ kfree(pci_info);
+ return 0;
+
+err_eswitch:
kfree(adapter->eswitch);
adapter->eswitch = NULL;
-err_eswitch:
+err_npars:
kfree(adapter->npars);
+ adapter->npars = NULL;
+err_pci_info:
+ kfree(pci_info);
return ret;
}
@@ -529,12 +518,10 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
/* If other drivers are not in use set their privilege level */
- ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
ret = qlcnic_api_lock(adapter);
if (ret)
goto err_lock;
- if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
- goto err_npar;
if (qlcnic_config_npars) {
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
@@ -552,18 +539,16 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
adapter->ahw.pci_func));
}
writel(data, priv_op);
-err_npar:
qlcnic_api_unlock(adapter);
err_lock:
return ret;
}
-static u32
-qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
+static void
+qlcnic_check_vf(struct qlcnic_adapter *adapter)
{
void __iomem *msix_base_addr;
void __iomem *priv_op;
- struct qlcnic_info nic_info;
u32 func;
u32 msix_base;
u32 op_mode, priv_level;
@@ -578,20 +563,6 @@ qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
adapter->ahw.pci_func = func;
- if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
- adapter->capabilities = nic_info.capabilities;
-
- if (adapter->capabilities & BIT_6)
- adapter->flags |= QLCNIC_ESWITCH_ENABLED;
- else
- adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
- }
-
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- adapter->nic_ops = &qlcnic_ops;
- return adapter->fw_hal_version;
- }
-
/* Determine function privilege level */
priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
op_mode = readl(priv_op);
@@ -600,37 +571,14 @@ qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
else
priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
- switch (priv_level) {
- case QLCNIC_MGMT_FUNC:
- adapter->op_mode = QLCNIC_MGMT_FUNC;
- adapter->nic_ops = &qlcnic_ops;
- qlcnic_init_pci_info(adapter);
- /* Set privilege level for other functions */
- qlcnic_set_function_modes(adapter);
- dev_info(&adapter->pdev->dev,
- "HAL Version: %d, Management function\n",
- adapter->fw_hal_version);
- break;
- case QLCNIC_PRIV_FUNC:
- adapter->op_mode = QLCNIC_PRIV_FUNC;
- dev_info(&adapter->pdev->dev,
- "HAL Version: %d, Privileged function\n",
- adapter->fw_hal_version);
- adapter->nic_ops = &qlcnic_ops;
- break;
- case QLCNIC_NON_PRIV_FUNC:
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
dev_info(&adapter->pdev->dev,
"HAL Version: %d Non Privileged function\n",
adapter->fw_hal_version);
adapter->nic_ops = &qlcnic_vf_ops;
- break;
- default:
- dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
- priv_level);
- return 0;
- }
- return adapter->fw_hal_version;
+ } else
+ adapter->nic_ops = &qlcnic_ops;
}
static int
@@ -663,10 +611,7 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
adapter->ahw.pci_base0 = mem_ptr0;
adapter->ahw.pci_len0 = pci_len0;
- if (!qlcnic_get_driver_mode(adapter)) {
- iounmap(adapter->ahw.pci_base0);
- return -EIO;
- }
+ qlcnic_check_vf(adapter);
adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
@@ -701,25 +646,7 @@ static void
qlcnic_check_options(struct qlcnic_adapter *adapter)
{
u32 fw_major, fw_minor, fw_build;
- char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
- char serial_num[32];
- int i, offset, val;
- int *ptr32;
struct pci_dev *pdev = adapter->pdev;
- struct qlcnic_info nic_info;
- adapter->driver_mismatch = 0;
-
- ptr32 = (int *)&serial_num;
- offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
- for (i = 0; i < 8; i++) {
- if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
- dev_err(&pdev->dev, "error reading board info\n");
- adapter->driver_mismatch = 1;
- return;
- }
- ptr32[i] = cpu_to_le32(val);
- offset += sizeof(u32);
- }
fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
@@ -727,19 +654,9 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
- if (adapter->portnum == 0) {
- get_brd_name(adapter, brd_name);
-
- pr_info("%s: %s Board Chip rev 0x%x\n",
- module_name(THIS_MODULE),
- brd_name, adapter->ahw.revision_id);
- }
-
dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
fw_major, fw_minor, fw_build);
- adapter->flags &= ~QLCNIC_LRO_ENABLED;
-
if (adapter->ahw.port_type == QLCNIC_XGBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
@@ -748,136 +665,364 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
}
- if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
- adapter->physical_port = nic_info.phys_port;
- adapter->switch_mode = nic_info.switch_mode;
- adapter->max_tx_ques = nic_info.max_tx_ques;
- adapter->max_rx_ques = nic_info.max_rx_ques;
- adapter->capabilities = nic_info.capabilities;
- adapter->max_mac_filters = nic_info.max_mac_filters;
- adapter->max_mtu = nic_info.max_mtu;
- }
-
adapter->msix_supported = !!use_msi_x;
adapter->rss_supported = !!use_msi_x;
adapter->num_txd = MAX_CMD_DESCRIPTORS;
- adapter->max_rds_rings = 2;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int
+qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
+{
+ int err;
+ struct qlcnic_info nic_info;
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
+ if (err)
+ return err;
+
+ adapter->physical_port = (u8)nic_info.phys_port;
+ adapter->switch_mode = nic_info.switch_mode;
+ adapter->max_tx_ques = nic_info.max_tx_ques;
+ adapter->max_rx_ques = nic_info.max_rx_ques;
+ adapter->capabilities = nic_info.capabilities;
+ adapter->max_mac_filters = nic_info.max_mac_filters;
+ adapter->max_mtu = nic_info.max_mtu;
+
+ if (adapter->capabilities & BIT_6)
+ adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+ else
+ adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+
+ return err;
+}
+
+static void
+qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ if (esw_cfg->discard_tagged)
+ adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+ else
+ adapter->flags |= QLCNIC_TAGGING_ENABLED;
+
+ if (esw_cfg->vlan_id)
+ adapter->pvid = esw_cfg->vlan_id;
+ else
+ adapter->pvid = 0;
+}
+
+static void
+qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
+ QLCNIC_PROMISC_DISABLED);
+
+ if (esw_cfg->mac_anti_spoof)
+ adapter->flags |= QLCNIC_MACSPOOF;
+
+ if (!esw_cfg->mac_override)
+ adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
+
+ if (!esw_cfg->promisc_mode)
+ adapter->flags |= QLCNIC_PROMISC_DISABLED;
+
+ qlcnic_set_netdev_features(adapter, esw_cfg);
+}
+
+static int
+qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return 0;
+
+ esw_cfg.pci_func = adapter->ahw.pci_func;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
+ return -EIO;
+ qlcnic_set_vlan_config(adapter, &esw_cfg);
+ qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
+
+ return 0;
+}
+
+static void
+qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ struct net_device *netdev = adapter->netdev;
+ unsigned long features, vlan_features;
+
+ features = (NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO);
+ vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
+ features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ }
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ features |= NETIF_F_LRO;
+
+ if (esw_cfg->offload_flags & BIT_0) {
+ netdev->features |= features;
+ adapter->rx_csum = 1;
+ if (!(esw_cfg->offload_flags & BIT_1))
+ netdev->features &= ~NETIF_F_TSO;
+ if (!(esw_cfg->offload_flags & BIT_2))
+ netdev->features &= ~NETIF_F_TSO6;
+ } else {
+ netdev->features &= ~features;
+ adapter->rx_csum = 0;
+ }
+
+ netdev->vlan_features = (features & vlan_features);
+}
+
+static int
+qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
+{
+ void __iomem *priv_op;
+ u32 op_mode, priv_level;
+ int err = 0;
+
+ err = qlcnic_initialize_nic(adapter);
+ if (err)
+ return err;
+
+ if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
+ return 0;
+
+ priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+ op_mode = readl(priv_op);
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
+
+ if (op_mode == QLC_DEV_DRV_DEFAULT)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
+
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ if (priv_level == QLCNIC_MGMT_FUNC) {
+ adapter->op_mode = QLCNIC_MGMT_FUNC;
+ err = qlcnic_init_pci_info(adapter);
+ if (err)
+ return err;
+ /* Set privilege level for other functions */
+ qlcnic_set_function_modes(adapter);
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Management function\n",
+ adapter->fw_hal_version);
+ } else if (priv_level == QLCNIC_PRIV_FUNC) {
+ adapter->op_mode = QLCNIC_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Privileged function\n",
+ adapter->fw_hal_version);
+ }
+ }
+
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ return err;
+}
+
+static int
+qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+ struct qlcnic_npar_info *npar;
+ u8 i;
+
+ if (adapter->need_fw_reset)
+ return 0;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ continue;
+ memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
+ esw_cfg.pci_func = i;
+ esw_cfg.offload_flags = BIT_0;
+ esw_cfg.mac_override = BIT_0;
+ esw_cfg.promisc_mode = BIT_0;
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+ esw_cfg.offload_flags |= (BIT_1 | BIT_2);
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+ npar = &adapter->npars[i];
+ npar->pvid = esw_cfg.vlan_id;
+ npar->mac_override = esw_cfg.mac_override;
+ npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
+ npar->discard_tagged = esw_cfg.discard_tagged;
+ npar->promisc_mode = esw_cfg.promisc_mode;
+ npar->offload_flags = esw_cfg.offload_flags;
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_npar_info *npar, int pci_func)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+ esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
+ esw_cfg.pci_func = pci_func;
+ esw_cfg.vlan_id = npar->pvid;
+ esw_cfg.mac_override = npar->mac_override;
+ esw_cfg.discard_tagged = npar->discard_tagged;
+ esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
+ esw_cfg.offload_flags = npar->offload_flags;
+ esw_cfg.promisc_mode = npar->promisc_mode;
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+
+ esw_cfg.op_mode = QLCNIC_ADD_VLAN;
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+
+ return 0;
}
static int
qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
{
- int i, err = 0;
+ int i, err;
struct qlcnic_npar_info *npar;
struct qlcnic_info nic_info;
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
- !adapter->need_fw_reset)
+ if (!adapter->need_fw_reset)
return 0;
- if (adapter->op_mode == QLCNIC_MGMT_FUNC) {
- /* Set the NPAR config data after FW reset */
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- npar = &adapter->npars[i];
- if (npar->type != QLCNIC_TYPE_NIC)
- continue;
- err = qlcnic_get_nic_info(adapter, &nic_info, i);
- if (err)
- goto err_out;
- nic_info.min_tx_bw = npar->min_bw;
- nic_info.max_tx_bw = npar->max_bw;
- err = qlcnic_set_nic_info(adapter, &nic_info);
+ /* Set the NPAR config data after FW reset */
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ npar = &adapter->npars[i];
+ if (npar->type != QLCNIC_TYPE_NIC)
+ continue;
+ err = qlcnic_get_nic_info(adapter, &nic_info, i);
+ if (err)
+ return err;
+ nic_info.min_tx_bw = npar->min_bw;
+ nic_info.max_tx_bw = npar->max_bw;
+ err = qlcnic_set_nic_info(adapter, &nic_info);
+ if (err)
+ return err;
+
+ if (npar->enable_pm) {
+ err = qlcnic_config_port_mirroring(adapter,
+ npar->dest_npar, 1, i);
if (err)
- goto err_out;
+ return err;
+ }
+ err = qlcnic_reset_eswitch_config(adapter, npar, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
- if (npar->enable_pm) {
- err = qlcnic_config_port_mirroring(adapter,
- npar->dest_npar, 1, i);
- if (err)
- goto err_out;
+static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
+{
+ u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
+ u32 npar_state;
- }
- npar->mac_learning = DEFAULT_MAC_LEARN;
- npar->host_vlan_tag = 0;
- npar->promisc_mode = 0;
- npar->discard_tagged = 0;
- npar->vlan_id = 0;
- }
+ if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+ return 0;
+
+ npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
+ msleep(1000);
+ npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
}
-err_out:
+ if (!npar_opt_timeo) {
+ dev_err(&adapter->pdev->dev,
+ "Waiting for NPAR state to opertional timeout\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return 0;
+
+ err = qlcnic_set_default_offload_settings(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_reset_npar_config(adapter);
+ if (err)
+ return err;
+
+ qlcnic_dev_set_npar_ready(adapter);
+
return err;
}
static int
qlcnic_start_firmware(struct qlcnic_adapter *adapter)
{
- int val, err, first_boot;
+ int err;
err = qlcnic_can_start_firmware(adapter);
if (err < 0)
return err;
else if (!err)
- goto wait_init;
-
- first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
- if (first_boot == 0x55555555)
- /* This is the first boot after power up */
- QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+ goto check_fw_status;
if (load_fw_file)
qlcnic_request_firmware(adapter);
else {
- if (qlcnic_check_flash_fw_ver(adapter))
+ err = qlcnic_check_flash_fw_ver(adapter);
+ if (err)
goto err_out;
adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
}
err = qlcnic_need_fw_reset(adapter);
- if (err < 0)
- goto err_out;
if (err == 0)
- goto wait_init;
-
- if (first_boot != 0x55555555) {
- QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
- QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
- qlcnic_pinit_from_rom(adapter);
- msleep(1);
- }
+ goto check_fw_status;
- QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
- QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
-
- qlcnic_set_port_mode(adapter);
+ err = qlcnic_pinit_from_rom(adapter);
+ if (err)
+ goto err_out;
err = qlcnic_load_firmware(adapter);
if (err)
goto err_out;
qlcnic_release_firmware(adapter);
+ QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
- val = (_QLCNIC_LINUX_MAJOR << 16)
- | ((_QLCNIC_LINUX_MINOR << 8))
- | (_QLCNIC_LINUX_SUBVERSION);
- QLCWR32(adapter, CRB_DRIVER_VERSION, val);
-
-wait_init:
- /* Handshake with the card before we register the devices. */
- err = qlcnic_init_firmware(adapter);
+check_fw_status:
+ err = qlcnic_check_fw_status(adapter);
if (err)
goto err_out;
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
qlcnic_idc_debug_info(adapter, 1);
- qlcnic_check_options(adapter);
- if (qlcnic_reset_npar_config(adapter))
+ err = qlcnic_check_eswitch_mode(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failed for eswitch\n");
+ goto err_out;
+ }
+ err = qlcnic_set_mgmt_operations(adapter);
+ if (err)
goto err_out;
- qlcnic_dev_set_npar_ready(adapter);
+ qlcnic_check_options(adapter);
adapter->need_fw_reset = 0;
qlcnic_release_firmware(adapter);
@@ -886,6 +1031,7 @@ wait_init:
err_out:
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
dev_err(&adapter->pdev->dev, "Device state set to failed\n");
+
qlcnic_release_firmware(adapter);
return err;
}
@@ -969,6 +1115,8 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
return 0;
+ if (qlcnic_set_eswitch_port_config(adapter))
+ return -EIO;
if (qlcnic_fw_create_ctx(adapter))
return -EIO;
@@ -988,7 +1136,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_config_intr_coalesce(adapter);
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ if (netdev->features & NETIF_F_LRO)
qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
qlcnic_napi_enable(adapter);
@@ -1031,6 +1179,9 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_free_mac_list(adapter);
+ if (adapter->fhash.fnum)
+ qlcnic_delete_lb_filters(adapter);
+
qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
qlcnic_napi_disable(adapter);
@@ -1267,7 +1418,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_GRO);
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM);
@@ -1286,12 +1437,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
netdev->features |= NETIF_F_LRO;
-
netdev->irq = adapter->msix_entries[0].vector;
- if (qlcnic_read_mac_addr(adapter))
- dev_warn(&pdev->dev, "failed to read mac addr\n");
-
netif_carrier_off(netdev);
netif_stop_queue(netdev);
@@ -1328,6 +1475,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
uint8_t revision_id;
uint8_t pci_using_dac;
+ char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
err = pci_enable_device(pdev);
if (err)
@@ -1385,10 +1533,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_iounmap;
}
- if (qlcnic_read_mac_addr(adapter))
- dev_warn(&pdev->dev, "failed to read mac addr\n");
-
- if (qlcnic_setup_idc_param(adapter))
+ err = qlcnic_setup_idc_param(adapter);
+ if (err)
goto err_out_iounmap;
err = adapter->nic_ops->start_firmware(adapter);
@@ -1397,6 +1543,17 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_decr_ref;
}
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ if (adapter->portnum == 0) {
+ get_brd_name(adapter, brd_name);
+
+ pr_info("%s: %s Board Chip rev 0x%x\n",
+ module_name(THIS_MODULE),
+ brd_name, adapter->ahw.revision_id);
+ }
+
qlcnic_clear_stats(adapter);
qlcnic_setup_intr(adapter);
@@ -1420,6 +1577,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
+ qlcnic_alloc_lb_filters_mem(adapter);
qlcnic_create_diag_entries(adapter);
return 0;
@@ -1428,7 +1586,7 @@ err_out_disable_msi:
qlcnic_teardown_intr(adapter);
err_out_decr_ref:
- qlcnic_clr_all_drv_state(adapter);
+ qlcnic_clr_all_drv_state(adapter, 0);
err_out_iounmap:
qlcnic_cleanup_pci_map(adapter);
@@ -1467,10 +1625,12 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
if (adapter->eswitch != NULL)
kfree(adapter->eswitch);
- qlcnic_clr_all_drv_state(adapter);
+ qlcnic_clr_all_drv_state(adapter, 0);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_free_lb_filters_mem(adapter);
+
qlcnic_teardown_intr(adapter);
qlcnic_remove_diag_entries(adapter);
@@ -1499,7 +1659,7 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
- qlcnic_clr_all_drv_state(adapter);
+ qlcnic_clr_all_drv_state(adapter, 0);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -1563,7 +1723,7 @@ qlcnic_resume(struct pci_dev *pdev)
if (err)
goto done;
- qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
done:
netif_device_attach(netdev);
@@ -1577,9 +1737,6 @@ static int qlcnic_open(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err;
- if (adapter->driver_mismatch)
- return -EIO;
-
err = qlcnic_attach(adapter);
if (err)
return err;
@@ -1609,6 +1766,121 @@ static int qlcnic_close(struct net_device *netdev)
}
static void
+qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
+{
+ void *head;
+ int i;
+
+ if (!qlcnic_mac_learn)
+ return;
+
+ spin_lock_init(&adapter->mac_learn_lock);
+
+ head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!head)
+ return;
+
+ adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
+ adapter->fhash.fhead = (struct hlist_head *)head;
+
+ for (i = 0; i < adapter->fhash.fmax; i++)
+ INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
+}
+
+static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fmax && adapter->fhash.fhead)
+ kfree(adapter->fhash.fhead);
+
+ adapter->fhash.fhead = NULL;
+ adapter->fhash.fmax = 0;
+}
+
+static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+ u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct cmd_desc_type0 *hwdesc;
+ struct qlcnic_nic_req *req;
+ struct qlcnic_mac_req *mac_req;
+ struct qlcnic_vlan_req *vlan_req;
+ u32 producer;
+ u64 word;
+
+ producer = tx_ring->producer;
+ hwdesc = &tx_ring->desc_head[tx_ring->producer];
+
+ req = (struct qlcnic_nic_req *)hwdesc;
+ memset(req, 0, sizeof(struct qlcnic_nic_req));
+ req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+ word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
+ req->req_hdr = cpu_to_le64(word);
+
+ mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
+ mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
+
+ vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
+ vlan_req->vlan_id = vlan_id;
+
+ tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
+}
+
+#define QLCNIC_MAC_HASH(MAC)\
+ ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
+
+static void
+qlcnic_send_filter(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb)
+{
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct qlcnic_filter *fil, *tmp_fil;
+ struct hlist_node *tmp_hnode, *n;
+ struct hlist_head *head;
+ u64 src_addr = 0;
+ __le16 vlan_id = 0;
+ u8 hindex;
+
+ if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
+ return;
+
+ if (adapter->fhash.fnum >= adapter->fhash.fmax)
+ return;
+
+ /* Only NPAR capable devices support vlan based learning*/
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ vlan_id = first_desc->vlan_TCI;
+ memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+ hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
+ head = &(adapter->fhash.fhead[hindex]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ tmp_fil->vlan_id == vlan_id) {
+ tmp_fil->ftime = jiffies;
+ return;
+ }
+ }
+
+ fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+ if (!fil)
+ return;
+
+ qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
+
+ fil->ftime = jiffies;
+ fil->vlan_id = vlan_id;
+ memcpy(fil->faddr, &src_addr, ETH_ALEN);
+ spin_lock(&adapter->mac_learn_lock);
+ hlist_add_head(&(fil->fnode), head);
+ adapter->fhash.fnum++;
+ spin_unlock(&adapter->mac_learn_lock);
+}
+
+static void
qlcnic_tso_check(struct net_device *netdev,
struct qlcnic_host_tx_ring *tx_ring,
struct cmd_desc_type0 *first_desc,
@@ -1616,26 +1888,14 @@ qlcnic_tso_check(struct net_device *netdev,
{
u8 opcode = TX_ETHER_PKT;
__be16 protocol = skb->protocol;
- u16 flags = 0, vid = 0;
- int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
+ u16 flags = 0;
+ int copied, offset, copy_len, hdr_len = 0, tso = 0;
struct cmd_desc_type0 *hwdesc;
struct vlan_ethhdr *vh;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
u32 producer = tx_ring->producer;
-
- if (protocol == cpu_to_be16(ETH_P_8021Q)) {
-
- vh = (struct vlan_ethhdr *)skb->data;
- protocol = vh->h_vlan_encapsulated_proto;
- flags = FLAGS_VLAN_TAGGED;
-
- } else if (vlan_tx_tag_present(skb)) {
-
- flags = FLAGS_VLAN_OOB;
- vid = vlan_tx_tag_get(skb);
- qlcnic_set_tx_vlan_tci(first_desc, vid);
- vlan_oob = 1;
- }
+ __le16 vlan_oob = first_desc->flags_opcode &
+ cpu_to_le16(FLAGS_VLAN_OOB);
if (*(skb->data) & BIT_0) {
flags |= BIT_0;
@@ -1706,7 +1966,8 @@ qlcnic_tso_check(struct net_device *netdev,
vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
skb_copy_from_linear_data(skb, vh, 12);
vh->h_vlan_proto = htons(ETH_P_8021Q);
- vh->h_vlan_TCI = htons(vid);
+ vh->h_vlan_TCI = (__be16)swab16((u16)first_desc->vlan_TCI);
+
skb_copy_from_linear_data_offset(skb, 12,
(char *)vh + 16, copy_len - 16);
@@ -1786,11 +2047,47 @@ out_err:
return -ENOMEM;
}
+static int
+qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
+ struct sk_buff *skb,
+ struct cmd_desc_type0 *first_desc)
+{
+ u8 opcode = 0;
+ u16 flags = 0;
+ __be16 protocol = skb->protocol;
+ struct vlan_ethhdr *vh;
+
+ if (protocol == cpu_to_be16(ETH_P_8021Q)) {
+ vh = (struct vlan_ethhdr *)skb->data;
+ protocol = vh->h_vlan_encapsulated_proto;
+ flags = FLAGS_VLAN_TAGGED;
+ qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
+ } else if (vlan_tx_tag_present(skb)) {
+ flags = FLAGS_VLAN_OOB;
+ qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
+ }
+ if (unlikely(adapter->pvid)) {
+ if (first_desc->vlan_TCI &&
+ !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+ return -EIO;
+ if (first_desc->vlan_TCI &&
+ (adapter->flags & QLCNIC_TAGGING_ENABLED))
+ goto set_flags;
+
+ flags = FLAGS_VLAN_OOB;
+ qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
+ }
+set_flags:
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+ return 0;
+}
+
static inline void
qlcnic_clear_cmddesc(u64 *desc)
{
desc[0] = 0ULL;
desc[2] = 0ULL;
+ desc[7] = 0ULL;
}
netdev_tx_t
@@ -1802,6 +2099,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct qlcnic_skb_frag *buffrag;
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
+ struct ethhdr *phdr;
int i, k;
u32 producer;
@@ -1813,6 +2111,13 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
+ if (adapter->flags & QLCNIC_MACSPOOF) {
+ phdr = (struct ethhdr *)skb->data;
+ if (compare_ether_addr(phdr->h_source,
+ adapter->mac_addr))
+ goto drop_packet;
+ }
+
frag_count = skb_shinfo(skb)->nr_frags + 1;
/* 4 fragments per cmd des */
@@ -1834,6 +2139,12 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
pdev = adapter->pdev;
+ first_desc = hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+ if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
+ goto drop_packet;
+
if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
adapter->stats.tx_dma_map_error++;
goto drop_packet;
@@ -1842,9 +2153,6 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
pbuf->skb = skb;
pbuf->frag_count = frag_count;
- first_desc = hwdesc = &tx_ring->desc_head[producer];
- qlcnic_clear_cmddesc((u64 *)hwdesc);
-
qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
qlcnic_set_tx_port(first_desc, adapter->portnum);
@@ -1883,6 +2191,9 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
+ if (qlcnic_mac_learn)
+ qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
+
qlcnic_update_cmd_producer(adapter, tx_ring);
adapter->stats.txbytes += skb->len;
@@ -1937,14 +2248,14 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
struct net_device *netdev = adapter->netdev;
if (adapter->ahw.linkup && !linkup) {
- dev_info(&netdev->dev, "NIC Link is down\n");
+ netdev_info(netdev, "NIC Link is down\n");
adapter->ahw.linkup = 0;
if (netif_running(netdev)) {
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
} else if (!adapter->ahw.linkup && linkup) {
- dev_info(&netdev->dev, "NIC Link is up\n");
+ netdev_info(netdev, "NIC Link is up\n");
adapter->ahw.linkup = 1;
if (netif_running(netdev)) {
netif_carrier_on(netdev);
@@ -1973,8 +2284,6 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &netdev->stats;
- memset(stats, 0, sizeof(*stats));
-
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
@@ -2180,9 +2489,16 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
disable_irq(adapter->irq);
- qlcnic_intr(adapter->irq, adapter);
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_intr(adapter->irq, sds_ring);
+ }
enable_irq(adapter->irq);
}
#endif
@@ -2243,18 +2559,22 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
}
static void
-qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
+qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
{
u32 val;
if (qlcnic_api_lock(adapter))
goto err;
- val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
- if (!(val & 0x11111111))
+ if (failed) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ dev_info(&adapter->pdev->dev,
+ "Device state set to Failed. Please Reboot\n");
+ } else if (!(val & 0x11111111))
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
@@ -2275,7 +2595,7 @@ qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
int act, state;
state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
if (((state & 0x11111111) == (act & 0x11111111)) ||
((act & 0x11111111) == ((state >> 1) & 0x11111111)))
@@ -2310,10 +2630,10 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -1;
- val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
if (!(val & (1 << (portnum * 4)))) {
QLC_DEV_SET_REF_CNT(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
}
prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
@@ -2388,13 +2708,14 @@ qlcnic_fwinit_work(struct work_struct *work)
{
struct qlcnic_adapter *adapter = container_of(work,
struct qlcnic_adapter, fw_work.work);
- u32 dev_state = 0xf, npar_state;
+ u32 dev_state = 0xf;
if (qlcnic_api_lock(adapter))
goto err_ret;
dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (dev_state == QLCNIC_DEV_QUISCENT) {
+ if (dev_state == QLCNIC_DEV_QUISCENT ||
+ dev_state == QLCNIC_DEV_NEED_QUISCENT) {
qlcnic_api_unlock(adapter);
qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
FW_POLL_DELAY * 2);
@@ -2402,16 +2723,8 @@ qlcnic_fwinit_work(struct work_struct *work)
}
if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
- if (npar_state == QLCNIC_DEV_NPAR_RDY) {
- qlcnic_api_unlock(adapter);
- goto wait_npar;
- } else {
- qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
- FW_POLL_DELAY);
- qlcnic_api_unlock(adapter);
- return;
- }
+ qlcnic_api_unlock(adapter);
+ goto wait_npar;
}
if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
@@ -2424,18 +2737,6 @@ qlcnic_fwinit_work(struct work_struct *work)
skip_ack_check:
dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
- QLCNIC_DEV_QUISCENT);
- qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
- FW_POLL_DELAY * 2);
- QLCDB(adapter, DRV, "Quiscing the driver\n");
- qlcnic_idc_debug_info(adapter, 0);
-
- qlcnic_api_unlock(adapter);
- return;
- }
-
if (dev_state == QLCNIC_DEV_NEED_RESET) {
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
QLCNIC_DEV_INITIALIZING);
@@ -2448,6 +2749,7 @@ skip_ack_check:
if (!adapter->nic_ops->start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ adapter->fw_wait_cnt = 0;
return;
}
goto err_ret;
@@ -2460,27 +2762,25 @@ wait_npar:
QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
switch (dev_state) {
- case QLCNIC_DEV_QUISCENT:
- case QLCNIC_DEV_NEED_QUISCENT:
- case QLCNIC_DEV_NEED_RESET:
- qlcnic_schedule_work(adapter,
- qlcnic_fwinit_work, FW_POLL_DELAY);
- return;
- case QLCNIC_DEV_FAILED:
- break;
-
- default:
+ case QLCNIC_DEV_READY:
if (!adapter->nic_ops->start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ adapter->fw_wait_cnt = 0;
return;
}
+ case QLCNIC_DEV_FAILED:
+ break;
+ default:
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, FW_POLL_DELAY);
+ return;
}
err_ret:
dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
"fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
netif_device_attach(adapter->netdev);
- qlcnic_clr_all_drv_state(adapter);
+ qlcnic_clr_all_drv_state(adapter, 0);
}
static void
@@ -2493,7 +2793,12 @@ qlcnic_detach_work(struct work_struct *work)
netif_device_detach(netdev);
- qlcnic_down(adapter, netdev);
+ /* Dont grab rtnl lock during Quiscent mode */
+ if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+ } else
+ qlcnic_down(adapter, netdev);
status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
@@ -2516,8 +2821,78 @@ err_ret:
dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
status, adapter->temp);
netif_device_attach(netdev);
- qlcnic_clr_all_drv_state(adapter);
+ qlcnic_clr_all_drv_state(adapter, 1);
+}
+
+/*Transit NPAR state to NON Operational */
+static void
+qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ if (state == QLCNIC_DEV_NPAR_NON_OPER)
+ return;
+
+ if (qlcnic_api_lock(adapter))
+ return;
+ QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ qlcnic_api_unlock(adapter);
+}
+
+/* Caller should held RESETTING bit.
+ * This should be call in sync with qlcnic_request_quiscent_mode.
+ */
+void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter)
+{
+ qlcnic_clr_drv_state(adapter);
+ qlcnic_api_lock(adapter);
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+ qlcnic_api_unlock(adapter);
+}
+
+/* Caller should held RESETTING bit.
+ */
+int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter)
+{
+ u8 timeo = adapter->dev_init_timeo / 2;
+ u32 state;
+
+ if (qlcnic_api_lock(adapter))
+ return -EIO;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state != QLCNIC_DEV_READY)
+ return -EIO;
+
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_QUISCENT);
+ qlcnic_api_unlock(adapter);
+ QLCDB(adapter, DRV, "NEED QUISCENT state set\n");
+ qlcnic_idc_debug_info(adapter, 0);
+
+ qlcnic_set_drv_state(adapter, QLCNIC_DEV_NEED_QUISCENT);
+
+ do {
+ msleep(2000);
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_QUISCENT)
+ return 0;
+ if (!qlcnic_check_drv_state(adapter)) {
+ if (qlcnic_api_lock(adapter))
+ return -EIO;
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_QUISCENT);
+ qlcnic_api_unlock(adapter);
+ QLCDB(adapter, DRV, "QUISCENT mode set\n");
+ return 0;
+ }
+ } while (--timeo);
+ dev_err(&adapter->pdev->dev, "Failed to quiesce device, DRV_STATE=%08x"
+ " DRV_ACTIVE=%08x\n", QLCRD32(adapter, QLCNIC_CRB_DRV_STATE),
+ QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE));
+ qlcnic_clear_quiscent_mode(adapter);
+ return -EIO;
}
/*Transit to RESET state from READY state only */
@@ -2538,6 +2913,7 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
qlcnic_idc_debug_info(adapter, 0);
}
+ QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
qlcnic_api_unlock(adapter);
}
@@ -2545,21 +2921,11 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
static void
qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
{
- u32 state;
-
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
- adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
- return;
if (qlcnic_api_lock(adapter))
return;
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
-
- if (state != QLCNIC_DEV_NPAR_RDY) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
- QLCNIC_DEV_NPAR_RDY);
- QLCDB(adapter, DRV, "NPAR READY state set\n");
- }
+ QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
+ QLCDB(adapter, DRV, "NPAR operational state set\n");
qlcnic_api_unlock(adapter);
}
@@ -2572,7 +2938,8 @@ qlcnic_schedule_work(struct qlcnic_adapter *adapter,
return;
INIT_DELAYED_WORK(&adapter->fw_work, func);
- schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
+ queue_delayed_work(qlcnic_wq, &adapter->fw_work,
+ round_jiffies_relative(delay));
}
static void
@@ -2590,12 +2957,26 @@ qlcnic_attach_work(struct work_struct *work)
struct qlcnic_adapter *adapter = container_of(work,
struct qlcnic_adapter, fw_work.work);
struct net_device *netdev = adapter->netdev;
+ u32 npar_state;
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
+ npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
+ qlcnic_clr_all_drv_state(adapter, 0);
+ else if (npar_state != QLCNIC_DEV_NPAR_OPER)
+ qlcnic_schedule_work(adapter, qlcnic_attach_work,
+ FW_POLL_DELAY);
+ else
+ goto attach;
+ QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
+ return;
+ }
+attach:
if (netif_running(netdev)) {
if (qlcnic_up(adapter, netdev))
goto done;
- qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
done:
@@ -2611,7 +2992,7 @@ done:
static int
qlcnic_check_health(struct qlcnic_adapter *adapter)
{
- u32 state = 0, heartbit;
+ u32 state = 0, heartbeat;
struct net_device *netdev = adapter->netdev;
if (qlcnic_check_temp(adapter))
@@ -2621,12 +3002,15 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
qlcnic_dev_request_reset(adapter);
state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
+ if (state == QLCNIC_DEV_NEED_RESET) {
+ qlcnic_set_npar_non_operational(adapter);
adapter->need_fw_reset = 1;
+ } else if (state == QLCNIC_DEV_NEED_QUISCENT)
+ goto detach;
- heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
- if (heartbit != adapter->heartbit) {
- adapter->heartbit = heartbit;
+ heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != adapter->heartbeat) {
+ adapter->heartbeat = heartbeat;
adapter->fw_fail_cnt = 0;
if (adapter->need_fw_reset)
goto detach;
@@ -2677,6 +3061,9 @@ qlcnic_fw_poll_work(struct work_struct *work)
if (qlcnic_check_health(adapter))
return;
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+
reschedule:
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
}
@@ -2723,7 +3110,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
if (qlcnic_api_lock(adapter))
return -EINVAL;
- if (first_func) {
+ if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
adapter->need_fw_reset = 1;
set_bit(__QLCNIC_START_FW, &adapter->state);
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
@@ -2741,7 +3128,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
if (err) {
- qlcnic_clr_all_drv_state(adapter);
+ qlcnic_clr_all_drv_state(adapter, 1);
clear_bit(__QLCNIC_AER, &adapter->state);
netif_device_attach(netdev);
return err;
@@ -2751,7 +3138,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
if (err)
goto done;
- qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
done:
netif_device_attach(netdev);
@@ -2807,7 +3194,6 @@ static void qlcnic_io_resume(struct pci_dev *pdev)
FW_POLL_DELAY);
}
-
static int
qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
{
@@ -2817,8 +3203,20 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
if (err)
return err;
+ err = qlcnic_check_npar_opertional(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_initialize_nic(adapter);
+ if (err)
+ return err;
+
qlcnic_check_options(adapter);
+ err = qlcnic_set_eswitch_port_config(adapter);
+ if (err)
+ return err;
+
adapter->need_fw_reset = 0;
return err;
@@ -3078,9 +3476,6 @@ validate_pm_config(struct qlcnic_adapter *adapter,
if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
return QL_STATUS_INVALID_PARAM;
- if (!IS_VALID_MODE(pm_cfg[i].action))
- return QL_STATUS_INVALID_PARAM;
-
s_esw_id = adapter->npars[src_pci_func].phy_port;
d_esw_id = adapter->npars[dest_pci_func].phy_port;
@@ -3114,7 +3509,7 @@ qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
return ret;
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
- action = pm_cfg[i].action;
+ action = !!pm_cfg[i].action;
id = adapter->npars[pci_func].phy_port;
ret = qlcnic_config_port_mirroring(adapter, id,
action, pci_func);
@@ -3125,7 +3520,7 @@ qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
id = adapter->npars[pci_func].phy_port;
- adapter->npars[pci_func].enable_pm = pm_cfg[i].action;
+ adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
adapter->npars[pci_func].dest_npar = id;
}
return size;
@@ -3157,30 +3552,46 @@ qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
static int
validate_esw_config(struct qlcnic_adapter *adapter,
- struct qlcnic_esw_func_cfg *esw_cfg, int count)
+ struct qlcnic_esw_func_cfg *esw_cfg, int count)
{
+ u32 op_mode;
u8 pci_func;
int i;
+ op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
+
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
if (pci_func >= QLCNIC_MAX_PCI_FUNC)
return QL_STATUS_INVALID_PARAM;
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
- return QL_STATUS_INVALID_PARAM;
+ if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+ if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
- if (esw_cfg->host_vlan_tag == 1)
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
+ QLCNIC_NON_PRIV_FUNC) {
+ esw_cfg[i].mac_anti_spoof = 0;
+ esw_cfg[i].mac_override = 1;
+ esw_cfg[i].promisc_mode = 1;
+ }
+ break;
+ case QLCNIC_ADD_VLAN:
if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
return QL_STATUS_INVALID_PARAM;
-
- if (!IS_VALID_MODE(esw_cfg[i].promisc_mode)
- || !IS_VALID_MODE(esw_cfg[i].host_vlan_tag)
- || !IS_VALID_MODE(esw_cfg[i].mac_learning)
- || !IS_VALID_MODE(esw_cfg[i].discard_tagged))
+ if (!esw_cfg[i].op_type)
+ return QL_STATUS_INVALID_PARAM;
+ break;
+ case QLCNIC_DEL_VLAN:
+ if (!esw_cfg[i].op_type)
+ return QL_STATUS_INVALID_PARAM;
+ break;
+ default:
return QL_STATUS_INVALID_PARAM;
+ }
}
-
return 0;
}
@@ -3191,8 +3602,9 @@ qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_func_cfg *esw_cfg;
+ struct qlcnic_npar_info *npar;
int count, rem, i, ret;
- u8 id, pci_func;
+ u8 pci_func, op_mode = 0;
count = size / sizeof(struct qlcnic_esw_func_cfg);
rem = size % sizeof(struct qlcnic_esw_func_cfg);
@@ -3205,30 +3617,55 @@ qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
return ret;
for (i = 0; i < count; i++) {
- pci_func = esw_cfg[i].pci_func;
- id = adapter->npars[pci_func].phy_port;
- ret = qlcnic_config_switch_port(adapter, id,
- esw_cfg[i].host_vlan_tag,
- esw_cfg[i].discard_tagged,
- esw_cfg[i].promisc_mode,
- esw_cfg[i].mac_learning,
- esw_cfg[i].pci_func,
- esw_cfg[i].vlan_id);
- if (ret)
- return ret;
+ if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+ if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
+ continue;
+
+ op_mode = esw_cfg[i].op_mode;
+ qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
+ esw_cfg[i].op_mode = op_mode;
+ esw_cfg[i].pci_func = adapter->ahw.pci_func;
+
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
+ break;
+ case QLCNIC_ADD_VLAN:
+ qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+ break;
+ case QLCNIC_DEL_VLAN:
+ esw_cfg[i].vlan_id = 0;
+ qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+ break;
+ }
}
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ goto out;
+
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- adapter->npars[pci_func].promisc_mode = esw_cfg[i].promisc_mode;
- adapter->npars[pci_func].mac_learning = esw_cfg[i].mac_learning;
- adapter->npars[pci_func].vlan_id = esw_cfg[i].vlan_id;
- adapter->npars[pci_func].discard_tagged =
- esw_cfg[i].discard_tagged;
- adapter->npars[pci_func].host_vlan_tag =
- esw_cfg[i].host_vlan_tag;
+ npar = &adapter->npars[pci_func];
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ npar->promisc_mode = esw_cfg[i].promisc_mode;
+ npar->mac_override = esw_cfg[i].mac_override;
+ npar->offload_flags = esw_cfg[i].offload_flags;
+ npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
+ npar->discard_tagged = esw_cfg[i].discard_tagged;
+ break;
+ case QLCNIC_ADD_VLAN:
+ npar->pvid = esw_cfg[i].vlan_id;
+ break;
+ case QLCNIC_DEL_VLAN:
+ npar->pvid = 0;
+ break;
+ }
}
-
+out:
return size;
}
@@ -3239,7 +3676,7 @@ qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
- int i;
+ u8 i;
if (size != sizeof(esw_cfg))
return QL_STATUS_INVALID_PARAM;
@@ -3247,12 +3684,9 @@ qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
continue;
-
- esw_cfg[i].host_vlan_tag = adapter->npars[i].host_vlan_tag;
- esw_cfg[i].promisc_mode = adapter->npars[i].promisc_mode;
- esw_cfg[i].discard_tagged = adapter->npars[i].discard_tagged;
- esw_cfg[i].vlan_id = adapter->npars[i].vlan_id;
- esw_cfg[i].mac_learning = adapter->npars[i].mac_learning;
+ esw_cfg[i].pci_func = i;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
+ return QL_STATUS_INVALID_PARAM;
}
memcpy(buf, &esw_cfg, size);
@@ -3342,7 +3776,7 @@ qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
return ret;
np_cfg[i].pci_func = i;
- np_cfg[i].op_mode = nic_info.op_mode;
+ np_cfg[i].op_mode = (u8)nic_info.op_mode;
np_cfg[i].port_num = nic_info.phys_port;
np_cfg[i].fw_capab = nic_info.capabilities;
np_cfg[i].min_bw = nic_info.min_tx_bw ;
@@ -3355,21 +3789,136 @@ qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
}
static ssize_t
+qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_statistics port_stats;
+ int ret;
+
+ if (size != sizeof(struct qlcnic_esw_statistics))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (offset >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ memset(&port_stats, 0, size);
+ ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+ &port_stats.rx);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+ &port_stats.tx);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &port_stats, size);
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_statistics esw_stats;
+ int ret;
+
+ if (size != sizeof(struct qlcnic_esw_statistics))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+ return QL_STATUS_INVALID_PARAM;
+
+ memset(&esw_stats, 0, size);
+ ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+ &esw_stats.rx);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+ &esw_stats.tx);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &esw_stats, size);
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+ QLCNIC_QUERY_RX_COUNTER);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+ QLCNIC_QUERY_TX_COUNTER);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ if (offset >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+ QLCNIC_QUERY_RX_COUNTER);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+ QLCNIC_QUERY_TX_COUNTER);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t
qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
- struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
+ struct qlcnic_pci_info *pci_info;
int i, ret;
if (size != sizeof(pci_cfg))
return QL_STATUS_INVALID_PARAM;
+ pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
ret = qlcnic_get_pci_info(adapter, pci_info);
- if (ret)
+ if (ret) {
+ kfree(pci_info);
return ret;
+ }
for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
@@ -3380,8 +3929,8 @@ qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
}
memcpy(buf, &pci_cfg, size);
+ kfree(pci_info);
return size;
-
}
static struct bin_attribute bin_attr_npar_config = {
.attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
@@ -3397,6 +3946,20 @@ static struct bin_attribute bin_attr_pci_config = {
.write = NULL,
};
+static struct bin_attribute bin_attr_port_stats = {
+ .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_get_port_stats,
+ .write = qlcnic_sysfs_clear_port_stats,
+};
+
+static struct bin_attribute bin_attr_esw_stats = {
+ .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_get_esw_stats,
+ .write = qlcnic_sysfs_clear_esw_stats,
+};
+
static struct bin_attribute bin_attr_esw_config = {
.attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
@@ -3436,6 +3999,9 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ if (device_create_bin_file(dev, &bin_attr_port_stats))
+ dev_info(dev, "failed to create port stats sysfs entry");
+
if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
return;
if (device_create_file(dev, &dev_attr_diag_mode))
@@ -3444,18 +4010,20 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
dev_info(dev, "failed to create crb sysfs entry\n");
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
- adapter->op_mode != QLCNIC_MGMT_FUNC)
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ if (device_create_bin_file(dev, &bin_attr_esw_config))
+ dev_info(dev, "failed to create esw config sysfs entry");
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
return;
if (device_create_bin_file(dev, &bin_attr_pci_config))
dev_info(dev, "failed to create pci config sysfs entry");
if (device_create_bin_file(dev, &bin_attr_npar_config))
dev_info(dev, "failed to create npar config sysfs entry");
- if (device_create_bin_file(dev, &bin_attr_esw_config))
- dev_info(dev, "failed to create esw config sysfs entry");
if (device_create_bin_file(dev, &bin_attr_pm_config))
dev_info(dev, "failed to create pm config sysfs entry");
-
+ if (device_create_bin_file(dev, &bin_attr_esw_stats))
+ dev_info(dev, "failed to create eswitch stats sysfs entry");
}
static void
@@ -3463,18 +4031,22 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ device_remove_bin_file(dev, &bin_attr_port_stats);
+
if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
return;
device_remove_file(dev, &dev_attr_diag_mode);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
- adapter->op_mode != QLCNIC_MGMT_FUNC)
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ device_remove_bin_file(dev, &bin_attr_esw_config);
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
return;
device_remove_bin_file(dev, &bin_attr_pci_config);
device_remove_bin_file(dev, &bin_attr_npar_config);
- device_remove_bin_file(dev, &bin_attr_esw_config);
device_remove_bin_file(dev, &bin_attr_pm_config);
+ device_remove_bin_file(dev, &bin_attr_esw_stats);
}
#ifdef CONFIG_INET
@@ -3482,10 +4054,10 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
static void
-qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
+ struct net_device *dev, unsigned long event)
{
struct in_device *indev;
- struct qlcnic_adapter *adapter = netdev_priv(dev);
indev = in_dev_get(dev);
if (!indev)
@@ -3509,6 +4081,27 @@ qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
in_dev_put(indev);
}
+static void
+qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct net_device *dev;
+ u16 vid;
+
+ qlcnic_config_indev_addr(adapter, netdev, event);
+
+ if (!adapter->vlgrp)
+ return;
+
+ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ dev = vlan_group_get_device(adapter->vlgrp, vid);
+ if (!dev)
+ continue;
+
+ qlcnic_config_indev_addr(adapter, dev, event);
+ }
+}
+
static int qlcnic_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -3535,7 +4128,7 @@ recheck:
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
goto done;
- qlcnic_config_indev_addr(dev, event);
+ qlcnic_config_indev_addr(adapter, dev, event);
done:
return NOTIFY_DONE;
}
@@ -3552,7 +4145,7 @@ qlcnic_inetaddr_event(struct notifier_block *this,
dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
recheck:
- if (dev == NULL || !netif_running(dev))
+ if (dev == NULL)
goto done;
if (dev->priv_flags & IFF_802_1Q_VLAN) {
@@ -3595,7 +4188,7 @@ static struct notifier_block qlcnic_inetaddr_cb = {
};
#else
static void
-qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
{ }
#endif
static struct pci_error_handlers qlcnic_err_handler = {
@@ -3624,6 +4217,12 @@ static int __init qlcnic_init_module(void)
printk(KERN_INFO "%s\n", qlcnic_driver_string);
+ qlcnic_wq = create_singlethread_workqueue("qlcnic");
+ if (qlcnic_wq == NULL) {
+ printk(KERN_ERR "qlcnic: cannot create workqueue\n");
+ return -ENOMEM;
+ }
+
#ifdef CONFIG_INET
register_netdevice_notifier(&qlcnic_netdev_cb);
register_inetaddr_notifier(&qlcnic_inetaddr_cb);
@@ -3635,6 +4234,7 @@ static int __init qlcnic_init_module(void)
unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
unregister_netdevice_notifier(&qlcnic_netdev_cb);
#endif
+ destroy_workqueue(qlcnic_wq);
}
return ret;
@@ -3651,6 +4251,7 @@ static void __exit qlcnic_exit_module(void)
unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
unregister_netdevice_notifier(&qlcnic_netdev_cb);
#endif
+ destroy_workqueue(qlcnic_wq);
}
module_exit(qlcnic_exit_module);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 8d63f69b27d..4ffebe83d88 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1566,7 +1566,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, ndev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (qdev->rx_csum &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
@@ -1676,7 +1676,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, ndev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* If rx checksum is on, and there are no
* csum or frame errors.
@@ -1996,7 +1996,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
}
skb->protocol = eth_type_trans(skb, ndev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* If rx checksum is on, and there are no
* csum or frame errors.
@@ -2222,10 +2222,11 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
ql_update_cq(rx_ring);
prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
}
+ if (!net_rsp)
+ return 0;
ql_write_cq_idx(rx_ring);
tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
- if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
- net_rsp != NULL) {
+ if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
if (atomic_read(&tx_ring->queue_stopped) &&
(atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/*
@@ -3888,11 +3889,8 @@ int ql_wol(struct ql_adapter *qdev)
return status;
}
-static int ql_adapter_down(struct ql_adapter *qdev)
+static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
{
- int i, status = 0;
-
- ql_link_off(qdev);
/* Don't kill the reset worker thread if we
* are in the process of recovery.
@@ -3904,6 +3902,15 @@ static int ql_adapter_down(struct ql_adapter *qdev)
cancel_delayed_work_sync(&qdev->mpi_idc_work);
cancel_delayed_work_sync(&qdev->mpi_core_to_log);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+}
+
+static int ql_adapter_down(struct ql_adapter *qdev)
+{
+ int i, status = 0;
+
+ ql_link_off(qdev);
+
+ ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
napi_disable(&qdev->rx_ring[i].napi);
@@ -3919,12 +3926,12 @@ static int ql_adapter_down(struct ql_adapter *qdev)
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
- ql_free_rx_buffers(qdev);
-
status = ql_adapter_reset(qdev);
if (status)
netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
qdev->func);
+ ql_free_rx_buffers(qdev);
+
return status;
}
@@ -4726,6 +4733,7 @@ static void __devexit qlge_remove(struct pci_dev *pdev)
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
del_timer_sync(&qdev->timer);
+ ql_cancel_all_work_sync(qdev);
unregister_netdev(ndev);
ql_release_all(pdev);
pci_disable_device(pdev);
@@ -4745,13 +4753,7 @@ static void ql_eeh_close(struct net_device *ndev)
/* Disabling the timer */
del_timer_sync(&qdev->timer);
- if (test_bit(QL_ADAPTER_UP, &qdev->flags))
- cancel_delayed_work_sync(&qdev->asic_reset_work);
- cancel_delayed_work_sync(&qdev->mpi_reset_work);
- cancel_delayed_work_sync(&qdev->mpi_work);
- cancel_delayed_work_sync(&qdev->mpi_idc_work);
- cancel_delayed_work_sync(&qdev->mpi_core_to_log);
- cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+ ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 142c381e1d7..68a84198eb0 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -200,7 +200,7 @@ struct r6040_private {
int old_duplex;
};
-static char version[] __devinitdata = KERN_INFO DRV_NAME
+static char version[] __devinitdata = DRV_NAME
": RDC R6040 NAPI net driver,"
"version "DRV_VERSION " (" DRV_RELDATE ")";
@@ -224,7 +224,8 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
}
/* Write a word data from PHY Chip */
-static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val)
+static void r6040_phy_write(void __iomem *ioaddr,
+ int phy_addr, int reg, u16 val)
{
int limit = 2048;
u16 cmd;
@@ -348,8 +349,8 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
}
desc->skb_ptr = skb;
desc->buf = cpu_to_le32(pci_map_single(lp->pdev,
- desc->skb_ptr->data,
- MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
+ desc->skb_ptr->data,
+ MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
desc->status = DSC_OWNER_MAC;
desc = desc->vndescp;
} while (desc != lp->rx_ring);
@@ -491,12 +492,14 @@ static int r6040_close(struct net_device *dev)
/* Free Descriptor memory */
if (lp->rx_ring) {
- pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
+ pci_free_consistent(pdev,
+ RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
}
if (lp->tx_ring) {
- pci_free_consistent(pdev, TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
+ pci_free_consistent(pdev,
+ TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
lp->tx_ring = NULL;
}
@@ -547,7 +550,7 @@ static int r6040_rx(struct net_device *dev, int limit)
}
goto next_descr;
}
-
+
/* Packet successfully received */
new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!new_skb) {
@@ -556,13 +559,13 @@ static int r6040_rx(struct net_device *dev, int limit)
}
skb_ptr = descptr->skb_ptr;
skb_ptr->dev = priv->dev;
-
+
/* Do not count the CRC */
skb_put(skb_ptr, descptr->len - 4);
pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
-
+
/* Send to upper layer */
netif_receive_skb(skb_ptr);
dev->stats.rx_packets++;
@@ -710,8 +713,10 @@ static int r6040_up(struct net_device *dev)
return ret;
/* improve performance (by RDC guys) */
- r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
- r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
+ r6040_phy_write(ioaddr, 30, 17,
+ (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
+ r6040_phy_write(ioaddr, 30, 17,
+ ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
r6040_phy_write(ioaddr, 0, 19, 0x0000);
r6040_phy_write(ioaddr, 0, 30, 0x01F0);
@@ -740,6 +745,9 @@ static void r6040_mac_address(struct net_device *dev)
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
+
+ /* Store MAC Address in perm_addr */
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
}
static int r6040_open(struct net_device *dev)
@@ -751,7 +759,7 @@ static int r6040_open(struct net_device *dev)
ret = request_irq(dev->irq, r6040_interrupt,
IRQF_SHARED, dev->name, dev);
if (ret)
- return ret;
+ goto out;
/* Set MAC address */
r6040_mac_address(dev);
@@ -759,30 +767,37 @@ static int r6040_open(struct net_device *dev)
/* Allocate Descriptor memory */
lp->rx_ring =
pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma);
- if (!lp->rx_ring)
- return -ENOMEM;
+ if (!lp->rx_ring) {
+ ret = -ENOMEM;
+ goto err_free_irq;
+ }
lp->tx_ring =
pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma);
if (!lp->tx_ring) {
- pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
- lp->rx_ring_dma);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_rx_ring;
}
ret = r6040_up(dev);
- if (ret) {
- pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
- lp->tx_ring_dma);
- pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
- lp->rx_ring_dma);
- return ret;
- }
+ if (ret)
+ goto err_free_tx_ring;
napi_enable(&lp->napi);
netif_start_queue(dev);
return 0;
+
+err_free_tx_ring:
+ pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
+ lp->tx_ring_dma);
+err_free_rx_ring:
+ pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
+ lp->rx_ring_dma);
+err_free_irq:
+ free_irq(dev->irq, dev);
+out:
+ return ret;
}
static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
@@ -946,7 +961,7 @@ static const struct net_device_ops r6040_netdev_ops = {
.ndo_set_multicast_list = r6040_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = r6040_ioctl,
.ndo_tx_timeout = r6040_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1039,7 +1054,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
u16 *adrp;
int i;
- printk("%s\n", version);
+ pr_info("%s\n", version);
err = pci_enable_device(pdev);
if (err)
@@ -1113,7 +1128,8 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Some bootloader/BIOSes do not initialize
* MAC address, warn about that */
if (!(adrp[0] || adrp[1] || adrp[2])) {
- netdev_warn(dev, "MAC address not initialized, generating random\n");
+ netdev_warn(dev, "MAC address not initialized, "
+ "generating random\n");
random_ether_addr(dev->dev_addr);
}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 35540411990..bc669a40ae9 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1076,7 +1076,12 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
int ret;
if (vlgrp && (opts2 & RxVlanTag)) {
- __vlan_hwaccel_rx(skb, vlgrp, swab16(opts2 & 0xffff), polling);
+ u16 vtag = swab16(opts2 & 0xffff);
+
+ if (likely(polling))
+ vlan_gro_receive(&tp->napi, vlgrp, vtag, skb);
+ else
+ __vlan_hwaccel_rx(skb, vlgrp, vtag, polling);
ret = 0;
} else
ret = -1;
@@ -1212,7 +1217,8 @@ static void rtl8169_update_counters(struct net_device *dev)
if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
return;
- counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
+ counters = dma_alloc_coherent(&tp->pci_dev->dev, sizeof(*counters),
+ &paddr, GFP_KERNEL);
if (!counters)
return;
@@ -1233,7 +1239,8 @@ static void rtl8169_update_counters(struct net_device *dev)
RTL_W32(CounterAddrLow, 0);
RTL_W32(CounterAddrHigh, 0);
- pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
+ dma_free_coherent(&tp->pci_dev->dev, sizeof(*counters), counters,
+ paddr);
}
static void rtl8169_get_ethtool_stats(struct net_device *dev,
@@ -2934,7 +2941,7 @@ static const struct rtl_cfg_info {
.hw_start = rtl_hw_start_8168,
.region = 2,
.align = 8,
- .intr_event = SYSErr | LinkChg | RxOverflow |
+ .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -3186,6 +3193,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_R8169_VLAN
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
#endif
+ dev->features |= NETIF_F_GRO;
tp->intr_mask = 0xffff;
tp->align = cfg->align;
@@ -3219,11 +3227,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
- if (pci_dev_run_wake(pdev)) {
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- }
- pm_runtime_idle(&pdev->dev);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
out:
return rc;
@@ -3246,17 +3251,12 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- pm_runtime_get_sync(&pdev->dev);
-
flush_scheduled_work();
unregister_netdev(dev);
- if (pci_dev_run_wake(pdev)) {
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
- }
- pm_runtime_put_noidle(&pdev->dev);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
/* restore original MAC address */
rtl_rar_set(tp, dev->perm_addr);
@@ -3300,15 +3300,15 @@ static int rtl8169_open(struct net_device *dev)
/*
* Rx and Tx desscriptors needs 256 bytes alignment.
- * pci_alloc_consistent provides more.
+ * dma_alloc_coherent provides more.
*/
- tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
- &tp->TxPhyAddr);
+ tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
+ &tp->TxPhyAddr, GFP_KERNEL);
if (!tp->TxDescArray)
goto err_pm_runtime_put;
- tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
- &tp->RxPhyAddr);
+ tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
+ &tp->RxPhyAddr, GFP_KERNEL);
if (!tp->RxDescArray)
goto err_free_tx_0;
@@ -3342,12 +3342,12 @@ out:
err_release_ring_2:
rtl8169_rx_clear(tp);
err_free_rx_1:
- pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
- tp->RxPhyAddr);
+ dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
tp->RxDescArray = NULL;
err_free_tx_0:
- pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
- tp->TxPhyAddr);
+ dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
tp->TxDescArray = NULL;
err_pm_runtime_put:
pm_runtime_put_noidle(&pdev->dev);
@@ -3983,7 +3983,7 @@ static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
{
struct pci_dev *pdev = tp->pci_dev;
- pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
+ dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(*sk_buff);
*sk_buff = NULL;
@@ -4008,7 +4008,7 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
struct net_device *dev,
struct RxDesc *desc, int rx_buf_sz,
- unsigned int align)
+ unsigned int align, gfp_t gfp)
{
struct sk_buff *skb;
dma_addr_t mapping;
@@ -4016,13 +4016,13 @@ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
pad = align ? align : NET_IP_ALIGN;
- skb = netdev_alloc_skb(dev, rx_buf_sz + pad);
+ skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
if (!skb)
goto err_out;
skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad);
- mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
+ mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz,
PCI_DMA_FROMDEVICE);
rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
@@ -4047,7 +4047,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
}
static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
- u32 start, u32 end)
+ u32 start, u32 end, gfp_t gfp)
{
u32 cur;
@@ -4062,7 +4062,7 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
tp->RxDescArray + i,
- tp->rx_buf_sz, tp->align);
+ tp->rx_buf_sz, tp->align, gfp);
if (!skb)
break;
@@ -4090,7 +4090,7 @@ static int rtl8169_init_ring(struct net_device *dev)
memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
- if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
+ if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
goto err_out;
rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
@@ -4107,7 +4107,8 @@ static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
{
unsigned int len = tx_skb->len;
- pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
+ PCI_DMA_TODEVICE);
desc->opts1 = 0x00;
desc->opts2 = 0x00;
desc->addr = 0x00;
@@ -4251,7 +4252,8 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
txd = tp->TxDescArray + entry;
len = frag->size;
addr = ((void *) page_address(frag->page)) + frag->page_offset;
- mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&tp->pci_dev->dev, addr, len,
+ PCI_DMA_TODEVICE);
/* anti gcc 2.95.3 bugware (sic) */
status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
@@ -4321,7 +4323,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->tx_skb[entry].skb = skb;
}
- mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
+ PCI_DMA_TODEVICE);
tp->tx_skb[entry].len = len;
txd->addr = cpu_to_le64(mapping);
@@ -4458,9 +4461,8 @@ static inline int rtl8169_fragmented_frame(u32 status)
return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
}
-static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
+static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
{
- u32 opts1 = le32_to_cpu(desc->opts1);
u32 status = opts1 & RxProtoMask;
if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
@@ -4468,7 +4470,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
((status == RxProtoIP) && !(opts1 & IPFail)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
@@ -4485,8 +4487,8 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
if (!skb)
goto out;
- pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size,
+ PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
*sk_buff = skb;
done = true;
@@ -4554,24 +4556,23 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
continue;
}
- rtl8169_rx_csum(skb, desc);
-
if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
- pci_dma_sync_single_for_device(pdev, addr,
+ dma_sync_single_for_device(&pdev->dev, addr,
pkt_size, PCI_DMA_FROMDEVICE);
rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
} else {
- pci_unmap_single(pdev, addr, tp->rx_buf_sz,
+ dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
tp->Rx_skbuff[entry] = NULL;
}
+ rtl8169_rx_csum(skb, status);
skb_put(skb, pkt_size);
skb->protocol = eth_type_trans(skb, dev);
if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
if (likely(polling))
- netif_receive_skb(skb);
+ napi_gro_receive(&tp->napi, skb);
else
netif_rx(skb);
}
@@ -4591,7 +4592,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
count = cur_rx - tp->cur_rx;
tp->cur_rx = cur_rx;
- delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
+ delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
if (!delta && count)
netif_info(tp, intr, dev, "no Rx buffer allocated\n");
tp->dirty_rx += delta;
@@ -4633,8 +4634,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
/* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
+ if (unlikely(status & RxFIFOOver)) {
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;
@@ -4778,10 +4778,10 @@ static int rtl8169_close(struct net_device *dev)
free_irq(dev->irq, dev);
- pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
- tp->RxPhyAddr);
- pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
- tp->TxPhyAddr);
+ dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
+ dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
tp->TxDescArray = NULL;
tp->RxDescArray = NULL;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 07eb884ff98..44150f2f7bf 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -384,7 +384,7 @@ static void rionet_remove(struct rio_dev *rdev)
free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
__ilog2(sizeof(void *)) + 4 : 0);
unregister_netdev(ndev);
- kfree(ndev);
+ free_netdev(ndev);
list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
list_del(&peer->node);
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index e26e107f93e..e68c941926f 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1245,7 +1245,7 @@ static int rr_open(struct net_device *dev)
init_timer(&rrpriv->timer);
rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */
rrpriv->timer.data = (unsigned long)dev;
- rrpriv->timer.function = &rr_timer; /* timer handler */
+ rrpriv->timer.function = rr_timer; /* timer handler */
add_timer(&rrpriv->timer);
netif_start_queue(dev);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 18bc5b718bb..c70ad515383 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -38,8 +38,6 @@
* Tx descriptors that can be associated with each corresponding FIFO.
* intr_type: This defines the type of interrupt. The values can be 0(INTA),
* 2(MSI_X). Default value is '2(MSI_X)'
- * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
- * Possible values '1' for enable '0' for disable. Default is '0'
* lro_max_pkts: This parameter defines maximum number of packets can be
* aggregated as a single large packet
* napi: This parameter used to enable/disable NAPI (polling Rx)
@@ -90,7 +88,7 @@
#include "s2io.h"
#include "s2io-regs.h"
-#define DRV_VERSION "2.0.26.26"
+#define DRV_VERSION "2.0.26.27"
/* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion";
@@ -496,8 +494,6 @@ S2IO_PARM_INT(rxsync_frequency, 3);
/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
S2IO_PARM_INT(intr_type, 2);
/* Large receive offload feature */
-static unsigned int lro_enable = 1;
-module_param_named(lro, lro_enable, uint, 0);
/* Max pkts to be aggregated by LRO at one time. If not specified,
* aggregation happens until we hit max IP pkt size(64K)
@@ -5124,8 +5120,6 @@ static void s2io_set_multicast(struct net_device *dev)
/* Create the new Rx filter list and update the same in H/W. */
i = 0;
netdev_for_each_mc_addr(ha, dev) {
- memcpy(sp->usr_addrs[i].addr, ha->addr,
- ETH_ALEN);
mac_addr = 0;
for (j = 0; j < ETH_ALEN; j++) {
mac_addr |= ha->addr[j];
@@ -6735,13 +6729,10 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
return -EINVAL;
if (data & ETH_FLAG_LRO) {
- if (lro_enable) {
- if (!(dev->features & NETIF_F_LRO)) {
- dev->features |= NETIF_F_LRO;
- changed = 1;
- }
- } else
- rc = -EINVAL;
+ if (!(dev->features & NETIF_F_LRO)) {
+ dev->features |= NETIF_F_LRO;
+ changed = 1;
+ }
} else if (dev->features & NETIF_F_LRO) {
dev->features &= ~NETIF_F_LRO;
changed = 1;
@@ -6750,7 +6741,6 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
if (changed && netif_running(dev)) {
s2io_stop_all_tx_queue(sp);
s2io_card_down(sp);
- sp->lro = !!(dev->features & NETIF_F_LRO);
rc = s2io_card_up(sp);
if (rc)
s2io_reset(sp);
@@ -7307,7 +7297,7 @@ static int s2io_card_up(struct s2io_nic *sp)
struct ring_info *ring = &mac_control->rings[i];
ring->mtu = dev->mtu;
- ring->lro = sp->lro;
+ ring->lro = !!(dev->features & NETIF_F_LRO);
ret = fill_rx_buffers(sp, ring, 1);
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
@@ -7341,7 +7331,7 @@ static int s2io_card_up(struct s2io_nic *sp)
/* Setting its receive mode */
s2io_set_multicast(dev);
- if (sp->lro) {
+ if (dev->features & NETIF_F_LRO) {
/* Initialize max aggregatable pkts per session based on MTU */
sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
/* Check if we can use (if specified) user provided value */
@@ -7613,10 +7603,10 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
* Packet with erroneous checksum, let the
* upper layers deal with it.
*/
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
swstats->mem_freed += skb->truesize;
send_up:
@@ -7911,7 +7901,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
else
sp->device_type = XFRAME_I_DEVICE;
- sp->lro = lro_enable;
/* Initialize some PCI/PCI-X fields of the NIC. */
s2io_init_pci(sp);
@@ -8047,8 +8036,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
dev->netdev_ops = &s2io_netdev_ops;
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- if (lro_enable)
- dev->features |= NETIF_F_LRO;
+ dev->features |= NETIF_F_LRO;
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
if (sp->high_dma_flag == true)
dev->features |= NETIF_F_HIGHDMA;
@@ -8283,9 +8271,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
dev->name);
}
- if (sp->lro)
- DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
- dev->name);
+ DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
+ dev->name);
if (ufo)
DBG_PRINT(ERR_DBG,
"%s: UDP Fragmentation Offload(UFO) enabled\n",
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 0af03353390..00b8614efe4 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -816,12 +816,6 @@ struct mac_info {
struct stat_block *stats_info; /* Logical address of the stat block */
};
-/* structure representing the user defined MAC addresses */
-struct usr_addr {
- char addr[ETH_ALEN];
- int usage_cnt;
-};
-
/* Default Tunable parameters of the NIC. */
#define DEFAULT_FIFO_0_LEN 4096
#define DEFAULT_FIFO_1_7_LEN 512
@@ -894,9 +888,7 @@ struct s2io_nic {
#define ALL_MULTI 2
#define MAX_ADDRS_SUPPORTED 64
- u16 usr_addr_count;
u16 mc_addr_count;
- struct usr_addr usr_addrs[256];
u16 m_cast_flg;
u16 all_multi_pos;
@@ -971,7 +963,6 @@ struct s2io_nic {
unsigned long clubbed_frms_cnt;
unsigned long sending_both;
- u8 lro;
u16 lro_max_aggr_per_sess;
volatile unsigned long state;
u64 general_int_mask;
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 8e6bd45b9f3..d8249d7653c 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -1170,7 +1170,7 @@ again:
sb->ip_summed = CHECKSUM_UNNECESSARY;
/* don't need to set sb->csum */
} else {
- sb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(sb);
}
}
prefetch(sb->data);
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 8c4067af32b..31b92f5f32c 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1251,16 +1251,6 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
return 0;
}
-static void sc92031_ethtool_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct sc92031_priv *priv = netdev_priv(dev);
- struct pci_dev *pdev = priv->pdev;
-
- strcpy(drvinfo->driver, SC92031_NAME);
- strcpy(drvinfo->bus_info, pci_name(pdev));
-}
-
static void sc92031_ethtool_get_wol(struct net_device *dev,
struct ethtool_wolinfo *wolinfo)
{
@@ -1382,7 +1372,6 @@ static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
static const struct ethtool_ops sc92031_ethtool_ops = {
.get_settings = sc92031_ethtool_get_settings,
.set_settings = sc92031_ethtool_set_settings,
- .get_drvinfo = sc92031_ethtool_get_drvinfo,
.get_wol = sc92031_ethtool_get_wol,
.set_wol = sc92031_ethtool_set_wol,
.nway_reset = sc92031_ethtool_nway_reset,
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 1047b19c60a..ab31c7124db 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,7 +1,8 @@
-sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o \
- falcon_gmac.o falcon_xmac.o mcdi_mac.o \
+sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
+ falcon_xmac.o mcdi_mac.o \
selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
- tenxpress.o falcon_boards.o mcdi.o mcdi_phy.o
+ tenxpress.o txc43128_phy.o falcon_boards.o \
+ mcdi.o mcdi_phy.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index ba674c5ca29..fa6e0207de1 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -114,7 +114,7 @@ static struct workqueue_struct *reset_workqueue;
* This is only used in MSI-X interrupt mode
*/
static unsigned int separate_tx_channels;
-module_param(separate_tx_channels, uint, 0644);
+module_param(separate_tx_channels, uint, 0444);
MODULE_PARM_DESC(separate_tx_channels,
"Use separate channels for TX and RX");
@@ -124,8 +124,9 @@ MODULE_PARM_DESC(separate_tx_channels,
static int napi_weight = 64;
/* This is the time (in jiffies) between invocations of the hardware
- * monitor, which checks for known hardware bugs and resets the
- * hardware and driver as necessary.
+ * monitor. On Falcon-based NICs, this will:
+ * - Check the on-board hardware monitor;
+ * - Poll the link state and reconfigure the hardware as necessary.
*/
unsigned int efx_monitor_interval = 1 * HZ;
@@ -201,10 +202,13 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
* Utility functions and prototypes
*
*************************************************************************/
-static void efx_remove_channel(struct efx_channel *channel);
+
+static void efx_remove_channels(struct efx_nic *efx);
static void efx_remove_port(struct efx_nic *efx);
static void efx_fini_napi(struct efx_nic *efx);
-static void efx_fini_channels(struct efx_nic *efx);
+static void efx_fini_struct(struct efx_nic *efx);
+static void efx_start_all(struct efx_nic *efx);
+static void efx_stop_all(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
@@ -248,7 +252,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_rx_strategy(channel);
- efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
+ efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
return spent;
}
@@ -334,6 +338,7 @@ void efx_process_channel_now(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
/* Disable interrupts and wait for ISRs to complete */
@@ -347,7 +352,7 @@ void efx_process_channel_now(struct efx_channel *channel)
napi_disable(&channel->napi_str);
/* Poll the channel */
- efx_process_channel(channel, EFX_EVQ_SIZE);
+ efx_process_channel(channel, channel->eventq_mask + 1);
/* Ack the eventq. This may cause an interrupt to be generated
* when they are reenabled */
@@ -364,9 +369,18 @@ void efx_process_channel_now(struct efx_channel *channel)
*/
static int efx_probe_eventq(struct efx_channel *channel)
{
+ struct efx_nic *efx = channel->efx;
+ unsigned long entries;
+
netif_dbg(channel->efx, probe, channel->efx->net_dev,
"chan %d create event queue\n", channel->channel);
+ /* Build an event queue with room for one event per tx and rx buffer,
+ * plus some extra for link state events and MCDI completions. */
+ entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
+ EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
+ channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
+
return efx_nic_probe_eventq(channel);
}
@@ -403,6 +417,63 @@ static void efx_remove_eventq(struct efx_channel *channel)
*
*************************************************************************/
+/* Allocate and initialise a channel structure, optionally copying
+ * parameters (but not resources) from an old channel structure. */
+static struct efx_channel *
+efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int j;
+
+ if (old_channel) {
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ *channel = *old_channel;
+
+ memset(&channel->eventq, 0, sizeof(channel->eventq));
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->buffer = NULL;
+ memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ if (tx_queue->channel)
+ tx_queue->channel = channel;
+ tx_queue->buffer = NULL;
+ memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
+ }
+ } else {
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->efx = efx;
+ channel->channel = i;
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ tx_queue->efx = efx;
+ tx_queue->queue = i * EFX_TXQ_TYPES + j;
+ tx_queue->channel = channel;
+ }
+ }
+
+ spin_lock_init(&channel->tx_stop_lock);
+ atomic_set(&channel->tx_stop_count, 1);
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->efx = efx;
+ setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
+ (unsigned long)rx_queue);
+
+ return channel;
+}
+
static int efx_probe_channel(struct efx_channel *channel)
{
struct efx_tx_queue *tx_queue;
@@ -459,11 +530,38 @@ static void efx_set_channel_names(struct efx_nic *efx)
number -= efx->n_rx_channels;
}
}
- snprintf(channel->name, sizeof(channel->name),
+ snprintf(efx->channel_name[channel->channel],
+ sizeof(efx->channel_name[0]),
"%s%s-%d", efx->name, type, number);
}
}
+static int efx_probe_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ int rc;
+
+ /* Restart special buffer allocation */
+ efx->next_buffer_table = 0;
+
+ efx_for_each_channel(channel, efx) {
+ rc = efx_probe_channel(channel);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create channel %d\n",
+ channel->channel);
+ goto fail;
+ }
+ }
+ efx_set_channel_names(efx);
+
+ return 0;
+
+fail:
+ efx_remove_channels(efx);
+ return rc;
+}
+
/* Channels are shutdown and reinitialised whilst the NIC is running
* to propagate configuration changes (mtu, checksum offload), or
* to clear hardware error conditions
@@ -601,6 +699,75 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_remove_eventq(channel);
}
+static void efx_remove_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_remove_channel(channel);
+}
+
+int
+efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+{
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ u32 old_rxq_entries, old_txq_entries;
+ unsigned i;
+ int rc;
+
+ efx_stop_all(efx);
+ efx_fini_channels(efx);
+
+ /* Clone channels */
+ memset(other_channel, 0, sizeof(other_channel));
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx_alloc_channel(efx, i, efx->channel[i]);
+ if (!channel) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ other_channel[i] = channel;
+ }
+
+ /* Swap entry counts and channel pointers */
+ old_rxq_entries = efx->rxq_entries;
+ old_txq_entries = efx->txq_entries;
+ efx->rxq_entries = rxq_entries;
+ efx->txq_entries = txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+
+ rc = efx_probe_channels(efx);
+ if (rc)
+ goto rollback;
+
+ /* Destroy old channels */
+ for (i = 0; i < efx->n_channels; i++)
+ efx_remove_channel(other_channel[i]);
+out:
+ /* Free unused channel structures */
+ for (i = 0; i < efx->n_channels; i++)
+ kfree(other_channel[i]);
+
+ efx_init_channels(efx);
+ efx_start_all(efx);
+ return rc;
+
+rollback:
+ /* Swap back */
+ efx->rxq_entries = old_rxq_entries;
+ efx->txq_entries = old_txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+ goto out;
+}
+
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
{
mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
@@ -761,7 +928,7 @@ static int efx_probe_port(struct efx_nic *efx)
/* Connect up MAC/PHY operations table */
rc = efx->type->probe_port(efx);
if (rc)
- goto err;
+ return rc;
/* Sanity check MAC address */
if (is_valid_ether_addr(efx->mac_address)) {
@@ -782,7 +949,7 @@ static int efx_probe_port(struct efx_nic *efx)
return 0;
err:
- efx_remove_port(efx);
+ efx->type->remove_port(efx);
return rc;
}
@@ -1050,7 +1217,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
efx->n_rx_channels = efx->n_channels;
}
for (i = 0; i < n_channels; i++)
- efx->channel[i].irq = xentries[i].vector;
+ efx_get_channel(efx, i)->irq =
+ xentries[i].vector;
} else {
/* Fall back to single channel MSI */
efx->interrupt_mode = EFX_INT_MODE_MSI;
@@ -1066,7 +1234,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
efx->n_tx_channels = 1;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
- efx->channel[0].irq = efx->pci_dev->irq;
+ efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
} else {
netif_err(efx, drv, efx->net_dev,
"could not enable MSI\n");
@@ -1097,26 +1265,32 @@ static void efx_remove_interrupts(struct efx_nic *efx)
efx->legacy_irq = 0;
}
+struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
+{
+ unsigned tx_channel_offset =
+ separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
+ EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
+ type >= EFX_TXQ_TYPES);
+ return &efx->channel[tx_channel_offset + index]->tx_queue[type];
+}
+
static void efx_set_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
unsigned tx_channel_offset =
separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
+ /* Channel pointers were set in efx_init_struct() but we now
+ * need to clear them for TX queues in any RX-only channels. */
efx_for_each_channel(channel, efx) {
- if (channel->channel - tx_channel_offset < efx->n_tx_channels) {
- channel->tx_queue = &efx->tx_queue[
- (channel->channel - tx_channel_offset) *
- EFX_TXQ_TYPES];
+ if (channel->channel - tx_channel_offset >=
+ efx->n_tx_channels) {
efx_for_each_channel_tx_queue(tx_queue, channel)
- tx_queue->channel = channel;
+ tx_queue->channel = NULL;
}
}
-
- efx_for_each_rx_queue(rx_queue, efx)
- rx_queue->channel = &efx->channel[rx_queue->queue];
}
static int efx_probe_nic(struct efx_nic *efx)
@@ -1141,7 +1315,8 @@ static int efx_probe_nic(struct efx_nic *efx)
efx->rx_indir_table[i] = i % efx->n_rx_channels;
efx_set_channels(efx);
- efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
+ netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
+ netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
/* Initialise the interrupt moderation settings */
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
@@ -1165,40 +1340,37 @@ static void efx_remove_nic(struct efx_nic *efx)
static int efx_probe_all(struct efx_nic *efx)
{
- struct efx_channel *channel;
int rc;
- /* Create NIC */
rc = efx_probe_nic(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
goto fail1;
}
- /* Create port */
rc = efx_probe_port(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to create port\n");
goto fail2;
}
- /* Create channels */
- efx_for_each_channel(channel, efx) {
- rc = efx_probe_channel(channel);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to create channel %d\n",
- channel->channel);
- goto fail3;
- }
+ efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+ rc = efx_probe_channels(efx);
+ if (rc)
+ goto fail3;
+
+ rc = efx_probe_filters(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create filter tables\n");
+ goto fail4;
}
- efx_set_channel_names(efx);
return 0;
+ fail4:
+ efx_remove_channels(efx);
fail3:
- efx_for_each_channel(channel, efx)
- efx_remove_channel(channel);
efx_remove_port(efx);
fail2:
efx_remove_nic(efx);
@@ -1328,10 +1500,8 @@ static void efx_stop_all(struct efx_nic *efx)
static void efx_remove_all(struct efx_nic *efx)
{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_remove_channel(channel);
+ efx_remove_filters(efx);
+ efx_remove_channels(efx);
efx_remove_port(efx);
efx_remove_nic(efx);
}
@@ -1355,20 +1525,20 @@ static unsigned irq_mod_ticks(int usecs, int resolution)
void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
bool rx_adaptive)
{
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
EFX_ASSERT_RESET_SERIALISED(efx);
- efx_for_each_tx_queue(tx_queue, efx)
- tx_queue->channel->irq_moderation = tx_ticks;
-
efx->irq_rx_adaptive = rx_adaptive;
efx->irq_rx_moderation = rx_ticks;
- efx_for_each_rx_queue(rx_queue, efx)
- rx_queue->channel->irq_moderation = rx_ticks;
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_get_rx_queue(channel))
+ channel->irq_moderation = rx_ticks;
+ else if (efx_channel_get_tx_queue(channel, 0))
+ channel->irq_moderation = tx_ticks;
+ }
}
/**************************************************************************
@@ -1377,8 +1547,7 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
*
**************************************************************************/
-/* Run periodically off the general workqueue. Serialised against
- * efx_reconfigure_port via the mac_lock */
+/* Run periodically off the general workqueue */
static void efx_monitor(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic,
@@ -1391,16 +1560,13 @@ static void efx_monitor(struct work_struct *data)
/* If the mac_lock is already held then it is likely a port
* reconfiguration is already in place, which will likely do
- * most of the work of check_hw() anyway. */
- if (!mutex_trylock(&efx->mac_lock))
- goto out_requeue;
- if (!efx->port_enabled)
- goto out_unlock;
- efx->type->monitor(efx);
+ * most of the work of monitor() anyway. */
+ if (mutex_trylock(&efx->mac_lock)) {
+ if (efx->port_enabled)
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
-out_unlock:
- mutex_unlock(&efx->mac_lock);
-out_requeue:
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
}
@@ -1546,11 +1712,11 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struc
stats->tx_packets = mac_stats->tx_packets;
stats->rx_bytes = mac_stats->rx_bytes;
stats->tx_bytes = mac_stats->tx_bytes;
+ stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
stats->multicast = mac_stats->rx_multicast;
stats->collisions = mac_stats->tx_collision;
stats->rx_length_errors = (mac_stats->rx_gtjumbo +
mac_stats->rx_length_error);
- stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
stats->rx_crc_errors = mac_stats->rx_bad;
stats->rx_frame_errors = mac_stats->rx_align_error;
stats->rx_fifo_errors = mac_stats->rx_overflow;
@@ -1767,6 +1933,7 @@ fail_registered:
static void efx_unregister_netdev(struct efx_nic *efx)
{
+ struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
if (!efx->net_dev)
@@ -1777,8 +1944,10 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/* Free up any skbs still remaining. This has to happen before
* we try to unregister the netdev as running their destructors
* may be needed to get the device ref. count to 0. */
- efx_for_each_tx_queue(tx_queue, efx)
- efx_release_tx_buffers(tx_queue);
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_release_tx_buffers(tx_queue);
+ }
if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
@@ -1841,6 +2010,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
efx->mac_op->reconfigure(efx);
efx_init_channels(efx);
+ efx_restore_filters(efx);
mutex_unlock(&efx->spi_lock);
mutex_unlock(&efx->mac_lock);
@@ -2037,9 +2207,6 @@ static struct efx_phy_operations efx_dummy_phy_operations = {
static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
struct pci_dev *pci_dev, struct net_device *net_dev)
{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
int i;
/* Initialise common structures */
@@ -2068,36 +2235,13 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
INIT_WORK(&efx->mac_work, efx_mac_work);
for (i = 0; i < EFX_MAX_CHANNELS; i++) {
- channel = &efx->channel[i];
- channel->efx = efx;
- channel->channel = i;
- channel->work_pending = false;
- spin_lock_init(&channel->tx_stop_lock);
- atomic_set(&channel->tx_stop_count, 1);
- }
- for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
- tx_queue = &efx->tx_queue[i];
- tx_queue->efx = efx;
- tx_queue->queue = i;
- tx_queue->buffer = NULL;
- tx_queue->channel = &efx->channel[0]; /* for safety */
- tx_queue->tso_headers_free = NULL;
- }
- for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
- rx_queue = &efx->rx_queue[i];
- rx_queue->efx = efx;
- rx_queue->queue = i;
- rx_queue->channel = &efx->channel[0]; /* for safety */
- rx_queue->buffer = NULL;
- setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
- (unsigned long)rx_queue);
+ efx->channel[i] = efx_alloc_channel(efx, i, NULL);
+ if (!efx->channel[i])
+ goto fail;
}
efx->type = type;
- /* As close as we can get to guaranteeing that we don't overflow */
- BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
-
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
/* Higher numbered interrupt modes are less capable! */
@@ -2109,13 +2253,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
pci_name(pci_dev));
efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
if (!efx->workqueue)
- return -ENOMEM;
+ goto fail;
return 0;
+
+fail:
+ efx_fini_struct(efx);
+ return -ENOMEM;
}
static void efx_fini_struct(struct efx_nic *efx)
{
+ int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++)
+ kfree(efx->channel[i]);
+
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 060dc952a0f..f502b14eb22 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -12,6 +12,7 @@
#define EFX_EFX_H
#include "net_driver.h"
+#include "filter.h"
/* PCI IDs */
#define EFX_VENDID_SFC 0x1924
@@ -37,8 +38,6 @@ efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
extern void efx_stop_queue(struct efx_channel *channel);
extern void efx_wake_queue(struct efx_channel *channel);
-#define EFX_TXQ_SIZE 1024
-#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
/* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -53,13 +52,36 @@ extern void __efx_rx_packet(struct efx_channel *channel,
extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard);
extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
-#define EFX_RXQ_SIZE 1024
-#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
+
+#define EFX_MAX_DMAQ_SIZE 4096UL
+#define EFX_DEFAULT_DMAQ_SIZE 1024UL
+#define EFX_MIN_DMAQ_SIZE 512UL
+
+#define EFX_MAX_EVQ_SIZE 16384UL
+#define EFX_MIN_EVQ_SIZE 512UL
+
+/* The smallest [rt]xq_entries that the driver supports. Callers of
+ * efx_wake_queue() assume that they can subsequently send at least one
+ * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
+#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
+
+/* Filters */
+extern int efx_probe_filters(struct efx_nic *efx);
+extern void efx_restore_filters(struct efx_nic *efx);
+extern void efx_remove_filters(struct efx_nic *efx);
+extern int efx_filter_insert_filter(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace);
+extern int efx_filter_remove_filter(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+extern void efx_filter_table_clear(struct efx_nic *efx,
+ enum efx_filter_table_id table_id,
+ enum efx_filter_priority priority);
/* Channels */
extern void efx_process_channel_now(struct efx_channel *channel);
-#define EFX_EVQ_SIZE 4096
-#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1)
+extern int
+efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
/* Ports */
extern int efx_reconfigure_port(struct efx_nic *efx);
@@ -81,8 +103,6 @@ extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
int rx_usecs, bool rx_adaptive);
-extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
-extern void efx_hex_dump(const u8 *, unsigned int, const char *);
/* Dummy PHY ops for PHY drivers */
extern int efx_port_dummy_op_int(struct efx_nic *efx);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index fd19d6ab97a..c95328fa3ee 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -15,6 +15,7 @@
#include "workarounds.h"
#include "selftest.h"
#include "efx.h"
+#include "filter.h"
#include "nic.h"
#include "spi.h"
#include "mdio_10g.h"
@@ -328,9 +329,10 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
unsigned int test_index,
struct ethtool_string *strings, u64 *data)
{
+ struct efx_channel *channel = efx_get_channel(efx, 0);
struct efx_tx_queue *tx_queue;
- efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_fill_test(test_index++, strings, data,
&lb_tests->tx_sent[tx_queue->queue],
EFX_TX_QUEUE_NAME(tx_queue),
@@ -550,9 +552,22 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
{
struct efx_nic *efx = netdev_priv(net_dev);
- u32 supported = efx->type->offload_features & ETH_FLAG_RXHASH;
+ u32 supported = (efx->type->offload_features &
+ (ETH_FLAG_RXHASH | ETH_FLAG_NTUPLE));
+ int rc;
+
+ rc = ethtool_op_set_flags(net_dev, data, supported);
+ if (rc)
+ return rc;
- return ethtool_op_set_flags(net_dev, data, supported);
+ if (!(data & ETH_FLAG_NTUPLE)) {
+ efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP,
+ EFX_FILTER_PRI_MANUAL);
+ efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC,
+ EFX_FILTER_PRI_MANUAL);
+ }
+
+ return 0;
}
static void efx_ethtool_self_test(struct net_device *net_dev,
@@ -673,15 +688,15 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
struct ethtool_coalesce *coalesce)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_tx_queue *tx_queue;
struct efx_channel *channel;
memset(coalesce, 0, sizeof(*coalesce));
/* Find lowest IRQ moderation across all used TX queues */
coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
- efx_for_each_tx_queue(tx_queue, efx) {
- channel = tx_queue->channel;
+ efx_for_each_channel(channel, efx) {
+ if (!efx_channel_get_tx_queue(channel, 0))
+ continue;
if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
if (channel->channel < efx->n_rx_channels)
coalesce->tx_coalesce_usecs_irq =
@@ -708,7 +723,6 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
unsigned tx_usecs, rx_usecs, adaptive;
if (coalesce->use_adaptive_tx_coalesce)
@@ -725,8 +739,9 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
adaptive = coalesce->use_adaptive_rx_coalesce;
/* If the channel is shared only allow RX parameters to be set */
- efx_for_each_tx_queue(tx_queue, efx) {
- if ((tx_queue->channel->channel < efx->n_rx_channels) &&
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_get_rx_queue(channel) &&
+ efx_channel_get_tx_queue(channel, 0) &&
tx_usecs) {
netif_err(efx, drv, efx->net_dev, "Channel is shared. "
"Only RX coalescing may be set\n");
@@ -741,6 +756,42 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
return 0;
}
+static void efx_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = efx->rxq_entries;
+ ring->tx_pending = efx->txq_entries;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int efx_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
+ ring->tx_pending > EFX_MAX_DMAQ_SIZE)
+ return -EINVAL;
+
+ if (ring->rx_pending < EFX_MIN_RING_SIZE ||
+ ring->tx_pending < EFX_MIN_RING_SIZE) {
+ netif_err(efx, drv, efx->net_dev,
+ "TX and RX queues cannot be smaller than %ld\n",
+ EFX_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
+ return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
+}
+
static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
@@ -918,6 +969,105 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
}
}
+static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
+ struct ethtool_rx_ntuple *ntuple)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
+ struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
+ struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
+ struct efx_filter_spec filter;
+
+ /* Range-check action */
+ if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
+ ntuple->fs.action >= (s32)efx->n_rx_channels)
+ return -EINVAL;
+
+ if (~ntuple->fs.data_mask)
+ return -EINVAL;
+
+ switch (ntuple->fs.flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ /* Must match all of destination, */
+ if (ip_mask->ip4dst | ip_mask->pdst)
+ return -EINVAL;
+ /* all or none of source, */
+ if ((ip_mask->ip4src | ip_mask->psrc) &&
+ ((__force u32)~ip_mask->ip4src |
+ (__force u16)~ip_mask->psrc))
+ return -EINVAL;
+ /* and nothing else */
+ if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
+ return -EINVAL;
+ break;
+ case ETHER_FLOW:
+ /* Must match all of destination, */
+ if (!is_zero_ether_addr(mac_mask->h_dest))
+ return -EINVAL;
+ /* all or none of VID, */
+ if (ntuple->fs.vlan_tag_mask != 0xf000 &&
+ ntuple->fs.vlan_tag_mask != 0xffff)
+ return -EINVAL;
+ /* and nothing else */
+ if (!is_broadcast_ether_addr(mac_mask->h_source) ||
+ mac_mask->h_proto != htons(0xffff))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ filter.priority = EFX_FILTER_PRI_MANUAL;
+ filter.flags = 0;
+
+ switch (ntuple->fs.flow_type) {
+ case TCP_V4_FLOW:
+ if (!ip_mask->ip4src)
+ efx_filter_set_rx_tcp_full(&filter,
+ htonl(ip_entry->ip4src),
+ htons(ip_entry->psrc),
+ htonl(ip_entry->ip4dst),
+ htons(ip_entry->pdst));
+ else
+ efx_filter_set_rx_tcp_wild(&filter,
+ htonl(ip_entry->ip4dst),
+ htons(ip_entry->pdst));
+ break;
+ case UDP_V4_FLOW:
+ if (!ip_mask->ip4src)
+ efx_filter_set_rx_udp_full(&filter,
+ htonl(ip_entry->ip4src),
+ htons(ip_entry->psrc),
+ htonl(ip_entry->ip4dst),
+ htons(ip_entry->pdst));
+ else
+ efx_filter_set_rx_udp_wild(&filter,
+ htonl(ip_entry->ip4dst),
+ htons(ip_entry->pdst));
+ break;
+ case ETHER_FLOW:
+ if (ntuple->fs.vlan_tag_mask == 0xf000)
+ efx_filter_set_rx_mac_full(&filter,
+ ntuple->fs.vlan_tag & 0xfff,
+ mac_entry->h_dest);
+ else
+ efx_filter_set_rx_mac_wild(&filter, mac_entry->h_dest);
+ break;
+ }
+
+ if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) {
+ return efx_filter_remove_filter(efx, &filter);
+ } else {
+ if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+ filter.dmaq_id = 0xfff;
+ else
+ filter.dmaq_id = ntuple->fs.action;
+ return efx_filter_insert_filter(efx, &filter, true);
+ }
+}
+
static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
struct ethtool_rxfh_indir *indir)
{
@@ -971,6 +1121,8 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_eeprom = efx_ethtool_set_eeprom,
.get_coalesce = efx_ethtool_get_coalesce,
.set_coalesce = efx_ethtool_set_coalesce,
+ .get_ringparam = efx_ethtool_get_ringparam,
+ .set_ringparam = efx_ethtool_set_ringparam,
.get_pauseparam = efx_ethtool_get_pauseparam,
.set_pauseparam = efx_ethtool_set_pauseparam,
.get_rx_csum = efx_ethtool_get_rx_csum,
@@ -994,6 +1146,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_wol = efx_ethtool_set_wol,
.reset = efx_ethtool_reset,
.get_rxnfc = efx_ethtool_get_rxnfc,
+ .set_rx_ntuple = efx_ethtool_set_rx_ntuple,
.get_rxfh_indir = efx_ethtool_get_rxfh_indir,
.set_rxfh_indir = efx_ethtool_set_rxfh_indir,
};
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 4f9d33f3cca..267019bb2b1 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -159,7 +159,6 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = efx->irq_status.addr;
- struct efx_channel *channel;
int syserr;
int queues;
@@ -194,15 +193,10 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
wmb(); /* Ensure the vector is cleared before interrupt ack */
falcon_irq_ack_a1(efx);
- /* Schedule processing of any interrupting queues */
- channel = &efx->channel[0];
- while (queues) {
- if (queues & 0x01)
- efx_schedule_channel(channel);
- channel++;
- queues >>= 1;
- }
-
+ if (queues & 1)
+ efx_schedule_channel(efx_get_channel(efx, 0));
+ if (queues & 2)
+ efx_schedule_channel(efx_get_channel(efx, 1));
return IRQ_HANDLED;
}
/**************************************************************************
@@ -452,30 +446,19 @@ static void falcon_reset_macs(struct efx_nic *efx)
/* It's not safe to use GLB_CTL_REG to reset the
* macs, so instead use the internal MAC resets
*/
- if (!EFX_IS10G(efx)) {
- EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
- efx_writeo(efx, &reg, FR_AB_GM_CFG1);
- udelay(1000);
-
- EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
- efx_writeo(efx, &reg, FR_AB_GM_CFG1);
- udelay(1000);
- return;
- } else {
- EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
- efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
-
- for (count = 0; count < 10000; count++) {
- efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
- if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
- 0)
- return;
- udelay(10);
- }
-
- netif_err(efx, hw, efx->net_dev,
- "timed out waiting for XMAC core reset\n");
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
+
+ for (count = 0; count < 10000; count++) {
+ efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
+ if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
+ 0)
+ return;
+ udelay(10);
}
+
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for XMAC core reset\n");
}
/* Mac stats will fail whist the TX fifo is draining */
@@ -514,7 +497,6 @@ static void falcon_reset_macs(struct efx_nic *efx)
* are re-enabled by the caller */
efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
- /* This can run even when the GMAC is selected */
falcon_setup_xaui(efx);
}
@@ -652,8 +634,6 @@ static void falcon_stats_timer_func(unsigned long context)
spin_unlock(&efx->stats_lock);
}
-static void falcon_switch_mac(struct efx_nic *efx);
-
static bool falcon_loopback_link_poll(struct efx_nic *efx)
{
struct efx_link_state old_state = efx->link_state;
@@ -664,11 +644,7 @@ static bool falcon_loopback_link_poll(struct efx_nic *efx)
efx->link_state.fd = true;
efx->link_state.fc = efx->wanted_fc;
efx->link_state.up = true;
-
- if (efx->loopback_mode == LOOPBACK_GMAC)
- efx->link_state.speed = 1000;
- else
- efx->link_state.speed = 10000;
+ efx->link_state.speed = 10000;
return !efx_link_state_equal(&efx->link_state, &old_state);
}
@@ -691,7 +667,7 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
falcon_stop_nic_stats(efx);
falcon_deconfigure_mac_wrapper(efx);
- falcon_switch_mac(efx);
+ falcon_reset_macs(efx);
efx->phy_op->reconfigure(efx);
rc = efx->mac_op->reconfigure(efx);
@@ -841,73 +817,23 @@ out:
return rc;
}
-static void falcon_clock_mac(struct efx_nic *efx)
-{
- unsigned strap_val;
- efx_oword_t nic_stat;
-
- /* Configure the NIC generated MAC clock correctly */
- efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
- strap_val = EFX_IS10G(efx) ? 5 : 3;
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
- EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
- efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
- } else {
- /* Falcon A1 does not support 1G/10G speed switching
- * and must not be used with a PHY that does. */
- BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
- strap_val);
- }
-}
-
-static void falcon_switch_mac(struct efx_nic *efx)
-{
- struct efx_mac_operations *old_mac_op = efx->mac_op;
- struct falcon_nic_data *nic_data = efx->nic_data;
- unsigned int stats_done_offset;
-
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
- WARN_ON(nic_data->stats_disable_count == 0);
-
- efx->mac_op = (EFX_IS10G(efx) ?
- &falcon_xmac_operations : &falcon_gmac_operations);
-
- if (EFX_IS10G(efx))
- stats_done_offset = XgDmaDone_offset;
- else
- stats_done_offset = GDmaDone_offset;
- nic_data->stats_dma_done = efx->stats_buffer.addr + stats_done_offset;
-
- if (old_mac_op == efx->mac_op)
- return;
-
- falcon_clock_mac(efx);
-
- netif_dbg(efx, hw, efx->net_dev, "selected %cMAC\n",
- EFX_IS10G(efx) ? 'X' : 'G');
- /* Not all macs support a mac-level link state */
- efx->xmac_poll_required = false;
- falcon_reset_macs(efx);
-}
-
/* This call is responsible for hooking in the MAC and PHY operations */
static int falcon_probe_port(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
switch (efx->phy_type) {
case PHY_TYPE_SFX7101:
efx->phy_op = &falcon_sfx7101_phy_ops;
break;
- case PHY_TYPE_SFT9001A:
- case PHY_TYPE_SFT9001B:
- efx->phy_op = &falcon_sft9001_phy_ops;
- break;
case PHY_TYPE_QT2022C2:
case PHY_TYPE_QT2025C:
efx->phy_op = &falcon_qt202x_phy_ops;
break;
+ case PHY_TYPE_TXC43128:
+ efx->phy_op = &falcon_txc_phy_ops;
+ break;
default:
netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
efx->phy_type);
@@ -943,6 +869,7 @@ static int falcon_probe_port(struct efx_nic *efx)
(u64)efx->stats_buffer.dma_addr,
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
+ nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
return 0;
}
@@ -1207,7 +1134,7 @@ static void falcon_monitor(struct efx_nic *efx)
falcon_stop_nic_stats(efx);
falcon_deconfigure_mac_wrapper(efx);
- falcon_switch_mac(efx);
+ falcon_reset_macs(efx);
rc = efx->mac_op->reconfigure(efx);
BUG_ON(rc);
@@ -1216,8 +1143,7 @@ static void falcon_monitor(struct efx_nic *efx)
efx_link_status_changed(efx);
}
- if (EFX_IS10G(efx))
- falcon_poll_xmac(efx);
+ falcon_poll_xmac(efx);
}
/* Zeroes out the SRAM contents. This routine must be called in
@@ -1610,16 +1536,6 @@ static int falcon_init_nic(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
efx_writeo(efx, &temp, FR_AB_NIC_STAT);
- /* Set the source of the GMAC clock */
- if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
- efx_reado(efx, &temp, FR_AB_GPIO_CTL);
- EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
- efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
- }
-
- /* Select the correct MAC */
- falcon_clock_mac(efx);
-
rc = falcon_reset_sram(efx);
if (rc)
return rc;
@@ -1880,7 +1796,7 @@ struct efx_nic_type falcon_b0_nic_type = {
* channels */
.tx_dc_base = 0x130000,
.rx_dc_base = 0x100000,
- .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH,
+ .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
.reset_world_flags = ETH_RESET_IRQ,
};
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 3d950c2cf20..cfc6a5b5a47 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -26,7 +26,7 @@
/* Board types */
#define FALCON_BOARD_SFE4001 0x01
#define FALCON_BOARD_SFE4002 0x02
-#define FALCON_BOARD_SFN4111T 0x51
+#define FALCON_BOARD_SFE4003 0x03
#define FALCON_BOARD_SFN4112F 0x52
/* Board temperature is about 15°C above ambient when air flow is
@@ -142,17 +142,17 @@ static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
#endif /* CONFIG_SENSORS_LM87 */
/*****************************************************************************
- * Support for the SFE4001 and SFN4111T NICs.
+ * Support for the SFE4001 NIC.
*
* The SFE4001 does not power-up fully at reset due to its high power
* consumption. We control its power via a PCA9539 I/O expander.
- * Both boards have a MAX6647 temperature monitor which we expose to
+ * It also has a MAX6647 temperature monitor which we expose to
* the lm90 driver.
*
* This also provides minimal support for reflashing the PHY, which is
* initiated by resetting it with the FLASH_CFG_1 pin pulled down.
* On SFE4001 rev A2 and later this is connected to the 3V3X output of
- * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3.
+ * the IO-expander.
* We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
* exclusive with the network device being open.
*/
@@ -304,34 +304,6 @@ fail_on:
return rc;
}
-static int sfn4111t_reset(struct efx_nic *efx)
-{
- struct falcon_board *board = falcon_board(efx);
- efx_oword_t reg;
-
- /* GPIO 3 and the GPIO register are shared with I2C, so block that */
- i2c_lock_adapter(&board->i2c_adap);
-
- /* Pull RST_N (GPIO 2) low then let it up again, setting the
- * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
- * output enables; the output levels should always be 0 (low)
- * and we rely on external pull-ups. */
- efx_reado(efx, &reg, FR_AB_GPIO_CTL);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, true);
- efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
- msleep(1000);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, false);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN,
- !!(efx->phy_mode & PHY_MODE_SPECIAL));
- efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
- msleep(1);
-
- i2c_unlock_adapter(&board->i2c_adap);
-
- ssleep(1);
- return 0;
-}
-
static ssize_t show_phy_flash_cfg(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -363,10 +335,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
efx->phy_mode = new_mode;
if (new_mode & PHY_MODE_SPECIAL)
falcon_stop_nic_stats(efx);
- if (falcon_board(efx)->type->id == FALCON_BOARD_SFE4001)
- err = sfe4001_poweron(efx);
- else
- err = sfn4111t_reset(efx);
+ err = sfe4001_poweron(efx);
if (!err)
err = efx_reconfigure_port(efx);
if (!(new_mode & PHY_MODE_SPECIAL))
@@ -479,83 +448,6 @@ fail_hwmon:
return rc;
}
-static int sfn4111t_check_hw(struct efx_nic *efx)
-{
- s32 status;
-
- /* If XAUI link is up then do not monitor */
- if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required)
- return 0;
-
- /* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */
- status = i2c_smbus_read_byte_data(falcon_board(efx)->hwmon_client,
- MAX664X_REG_RSL);
- if (status < 0)
- return -EIO;
- if (status & 0x57)
- return -ERANGE;
- return 0;
-}
-
-static void sfn4111t_fini(struct efx_nic *efx)
-{
- netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
-
- device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
- i2c_unregister_device(falcon_board(efx)->hwmon_client);
-}
-
-static struct i2c_board_info sfn4111t_a0_hwmon_info = {
- I2C_BOARD_INFO("max6647", 0x4e),
-};
-
-static struct i2c_board_info sfn4111t_r5_hwmon_info = {
- I2C_BOARD_INFO("max6646", 0x4d),
-};
-
-static void sfn4111t_init_phy(struct efx_nic *efx)
-{
- if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
- if (sft9001_wait_boot(efx) != -EINVAL)
- return;
-
- efx->phy_mode = PHY_MODE_SPECIAL;
- falcon_stop_nic_stats(efx);
- }
-
- sfn4111t_reset(efx);
- sft9001_wait_boot(efx);
-}
-
-static int sfn4111t_init(struct efx_nic *efx)
-{
- struct falcon_board *board = falcon_board(efx);
- int rc;
-
- board->hwmon_client =
- i2c_new_device(&board->i2c_adap,
- (board->minor < 5) ?
- &sfn4111t_a0_hwmon_info :
- &sfn4111t_r5_hwmon_info);
- if (!board->hwmon_client)
- return -EIO;
-
- rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
- if (rc)
- goto fail_hwmon;
-
- if (efx->phy_mode & PHY_MODE_SPECIAL)
- /* PHY may not generate a 156.25 MHz clock and MAC
- * stats fetch will fail. */
- falcon_stop_nic_stats(efx);
-
- return 0;
-
-fail_hwmon:
- i2c_unregister_device(board->hwmon_client);
- return rc;
-}
-
/*****************************************************************************
* Support for the SFE4002
*
@@ -691,6 +583,75 @@ static int sfn4112f_init(struct efx_nic *efx)
return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
}
+/*****************************************************************************
+ * Support for the SFE4003
+ *
+ */
+static u8 sfe4003_lm87_channel = 0x03; /* use AIN not FAN inputs */
+
+static const u8 sfe4003_lm87_regs[] = {
+ LM87_IN_LIMITS(0, 0x67, 0x7f), /* 2.5V: 1.5V +/- 10% */
+ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
+ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
+ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
+ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
+ LM87_TEMP_INT_LIMITS(0, 70 + FALCON_BOARD_TEMP_BIAS),
+ 0
+};
+
+static struct i2c_board_info sfe4003_hwmon_info = {
+ I2C_BOARD_INFO("lm87", 0x2e),
+ .platform_data = &sfe4003_lm87_channel,
+};
+
+/* Board-specific LED info. */
+#define SFE4003_RED_LED_GPIO 11
+#define SFE4003_LED_ON 1
+#define SFE4003_LED_OFF 0
+
+static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ struct falcon_board *board = falcon_board(efx);
+
+ /* The LEDs were not wired to GPIOs before A3 */
+ if (board->minor < 3 && board->major == 0)
+ return;
+
+ falcon_txc_set_gpio_val(
+ efx, SFE4003_RED_LED_GPIO,
+ (mode == EFX_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF);
+}
+
+static void sfe4003_init_phy(struct efx_nic *efx)
+{
+ struct falcon_board *board = falcon_board(efx);
+
+ /* The LEDs were not wired to GPIOs before A3 */
+ if (board->minor < 3 && board->major == 0)
+ return;
+
+ falcon_txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT);
+ falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF);
+}
+
+static int sfe4003_check_hw(struct efx_nic *efx)
+{
+ struct falcon_board *board = falcon_board(efx);
+
+ /* A0/A1/A2 board rev. 4003s report a temperature fault the whole time
+ * (bad sensor) so we mask it out. */
+ unsigned alarm_mask =
+ (board->major == 0 && board->minor <= 2) ?
+ ~LM87_ALARM_TEMP_EXT1 : ~0;
+
+ return efx_check_lm87(efx, alarm_mask);
+}
+
+static int sfe4003_init(struct efx_nic *efx)
+{
+ return efx_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs);
+}
+
static const struct falcon_board_type board_types[] = {
{
.id = FALCON_BOARD_SFE4001,
@@ -713,14 +674,14 @@ static const struct falcon_board_type board_types[] = {
.monitor = sfe4002_check_hw,
},
{
- .id = FALCON_BOARD_SFN4111T,
- .ref_model = "SFN4111T",
- .gen_type = "100/1000/10GBASE-T adapter",
- .init = sfn4111t_init,
- .init_phy = sfn4111t_init_phy,
- .fini = sfn4111t_fini,
- .set_id_led = tenxpress_set_id_led,
- .monitor = sfn4111t_check_hw,
+ .id = FALCON_BOARD_SFE4003,
+ .ref_model = "SFE4003",
+ .gen_type = "10GBASE-CX4 adapter",
+ .init = sfe4003_init,
+ .init_phy = sfe4003_init_phy,
+ .fini = efx_fini_lm87,
+ .set_id_led = sfe4003_set_id_led,
+ .monitor = sfe4003_check_hw,
},
{
.id = FALCON_BOARD_SFN4112F,
diff --git a/drivers/net/sfc/falcon_gmac.c b/drivers/net/sfc/falcon_gmac.c
deleted file mode 100644
index 7dadfcbd6ce..00000000000
--- a/drivers/net/sfc/falcon_gmac.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include <linux/delay.h>
-#include "net_driver.h"
-#include "efx.h"
-#include "nic.h"
-#include "mac.h"
-#include "regs.h"
-#include "io.h"
-
-/**************************************************************************
- *
- * MAC operations
- *
- *************************************************************************/
-
-static int falcon_reconfigure_gmac(struct efx_nic *efx)
-{
- struct efx_link_state *link_state = &efx->link_state;
- bool loopback, tx_fc, rx_fc, bytemode;
- int if_mode;
- unsigned int max_frame_len;
- efx_oword_t reg;
-
- /* Configuration register 1 */
- tx_fc = (link_state->fc & EFX_FC_TX) || !link_state->fd;
- rx_fc = !!(link_state->fc & EFX_FC_RX);
- loopback = (efx->loopback_mode == LOOPBACK_GMAC);
- bytemode = (link_state->speed == 1000);
-
- EFX_POPULATE_OWORD_5(reg,
- FRF_AB_GM_LOOP, loopback,
- FRF_AB_GM_TX_EN, 1,
- FRF_AB_GM_TX_FC_EN, tx_fc,
- FRF_AB_GM_RX_EN, 1,
- FRF_AB_GM_RX_FC_EN, rx_fc);
- efx_writeo(efx, &reg, FR_AB_GM_CFG1);
- udelay(10);
-
- /* Configuration register 2 */
- if_mode = (bytemode) ? 2 : 1;
- EFX_POPULATE_OWORD_5(reg,
- FRF_AB_GM_IF_MODE, if_mode,
- FRF_AB_GM_PAD_CRC_EN, 1,
- FRF_AB_GM_LEN_CHK, 1,
- FRF_AB_GM_FD, link_state->fd,
- FRF_AB_GM_PAMBL_LEN, 0x7/*datasheet recommended */);
-
- efx_writeo(efx, &reg, FR_AB_GM_CFG2);
- udelay(10);
-
- /* Max frame len register */
- max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
- EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_MAX_FLEN, max_frame_len);
- efx_writeo(efx, &reg, FR_AB_GM_MAX_FLEN);
- udelay(10);
-
- /* FIFO configuration register 0 */
- EFX_POPULATE_OWORD_5(reg,
- FRF_AB_GMF_FTFENREQ, 1,
- FRF_AB_GMF_STFENREQ, 1,
- FRF_AB_GMF_FRFENREQ, 1,
- FRF_AB_GMF_SRFENREQ, 1,
- FRF_AB_GMF_WTMENREQ, 1);
- efx_writeo(efx, &reg, FR_AB_GMF_CFG0);
- udelay(10);
-
- /* FIFO configuration register 1 */
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_GMF_CFGFRTH, 0x12,
- FRF_AB_GMF_CFGXOFFRTX, 0xffff);
- efx_writeo(efx, &reg, FR_AB_GMF_CFG1);
- udelay(10);
-
- /* FIFO configuration register 2 */
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_GMF_CFGHWM, 0x3f,
- FRF_AB_GMF_CFGLWM, 0xa);
- efx_writeo(efx, &reg, FR_AB_GMF_CFG2);
- udelay(10);
-
- /* FIFO configuration register 3 */
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_GMF_CFGHWMFT, 0x1c,
- FRF_AB_GMF_CFGFTTH, 0x08);
- efx_writeo(efx, &reg, FR_AB_GMF_CFG3);
- udelay(10);
-
- /* FIFO configuration register 4 */
- EFX_POPULATE_OWORD_1(reg, FRF_AB_GMF_HSTFLTRFRM_PAUSE, 1);
- efx_writeo(efx, &reg, FR_AB_GMF_CFG4);
- udelay(10);
-
- /* FIFO configuration register 5 */
- efx_reado(efx, &reg, FR_AB_GMF_CFG5);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGBYTMODE, bytemode);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGHDPLX, !link_state->fd);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTDRPLT64, !link_state->fd);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTFLTRFRMDC_PAUSE, 0);
- efx_writeo(efx, &reg, FR_AB_GMF_CFG5);
- udelay(10);
-
- /* MAC address */
- EFX_POPULATE_OWORD_4(reg,
- FRF_AB_GM_ADR_B0, efx->net_dev->dev_addr[5],
- FRF_AB_GM_ADR_B1, efx->net_dev->dev_addr[4],
- FRF_AB_GM_ADR_B2, efx->net_dev->dev_addr[3],
- FRF_AB_GM_ADR_B3, efx->net_dev->dev_addr[2]);
- efx_writeo(efx, &reg, FR_AB_GM_ADR1);
- udelay(10);
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_GM_ADR_B4, efx->net_dev->dev_addr[1],
- FRF_AB_GM_ADR_B5, efx->net_dev->dev_addr[0]);
- efx_writeo(efx, &reg, FR_AB_GM_ADR2);
- udelay(10);
-
- falcon_reconfigure_mac_wrapper(efx);
-
- return 0;
-}
-
-static void falcon_update_stats_gmac(struct efx_nic *efx)
-{
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
- unsigned long old_rx_pause, old_tx_pause;
- unsigned long new_rx_pause, new_tx_pause;
-
- /* Pause frames are erroneously counted as errors (SFC bug 3269) */
- old_rx_pause = mac_stats->rx_pause;
- old_tx_pause = mac_stats->tx_pause;
-
- /* Update MAC stats from DMAed values */
- FALCON_STAT(efx, GRxGoodOct, rx_good_bytes);
- FALCON_STAT(efx, GRxBadOct, rx_bad_bytes);
- FALCON_STAT(efx, GRxMissPkt, rx_missed);
- FALCON_STAT(efx, GRxFalseCRS, rx_false_carrier);
- FALCON_STAT(efx, GRxPausePkt, rx_pause);
- FALCON_STAT(efx, GRxBadPkt, rx_bad);
- FALCON_STAT(efx, GRxUcastPkt, rx_unicast);
- FALCON_STAT(efx, GRxMcastPkt, rx_multicast);
- FALCON_STAT(efx, GRxBcastPkt, rx_broadcast);
- FALCON_STAT(efx, GRxGoodLt64Pkt, rx_good_lt64);
- FALCON_STAT(efx, GRxBadLt64Pkt, rx_bad_lt64);
- FALCON_STAT(efx, GRx64Pkt, rx_64);
- FALCON_STAT(efx, GRx65to127Pkt, rx_65_to_127);
- FALCON_STAT(efx, GRx128to255Pkt, rx_128_to_255);
- FALCON_STAT(efx, GRx256to511Pkt, rx_256_to_511);
- FALCON_STAT(efx, GRx512to1023Pkt, rx_512_to_1023);
- FALCON_STAT(efx, GRx1024to15xxPkt, rx_1024_to_15xx);
- FALCON_STAT(efx, GRx15xxtoJumboPkt, rx_15xx_to_jumbo);
- FALCON_STAT(efx, GRxGtJumboPkt, rx_gtjumbo);
- FALCON_STAT(efx, GRxFcsErr64to15xxPkt, rx_bad_64_to_15xx);
- FALCON_STAT(efx, GRxFcsErr15xxtoJumboPkt, rx_bad_15xx_to_jumbo);
- FALCON_STAT(efx, GRxFcsErrGtJumboPkt, rx_bad_gtjumbo);
- FALCON_STAT(efx, GTxGoodBadOct, tx_bytes);
- FALCON_STAT(efx, GTxGoodOct, tx_good_bytes);
- FALCON_STAT(efx, GTxSglColPkt, tx_single_collision);
- FALCON_STAT(efx, GTxMultColPkt, tx_multiple_collision);
- FALCON_STAT(efx, GTxExColPkt, tx_excessive_collision);
- FALCON_STAT(efx, GTxDefPkt, tx_deferred);
- FALCON_STAT(efx, GTxLateCol, tx_late_collision);
- FALCON_STAT(efx, GTxExDefPkt, tx_excessive_deferred);
- FALCON_STAT(efx, GTxPausePkt, tx_pause);
- FALCON_STAT(efx, GTxBadPkt, tx_bad);
- FALCON_STAT(efx, GTxUcastPkt, tx_unicast);
- FALCON_STAT(efx, GTxMcastPkt, tx_multicast);
- FALCON_STAT(efx, GTxBcastPkt, tx_broadcast);
- FALCON_STAT(efx, GTxLt64Pkt, tx_lt64);
- FALCON_STAT(efx, GTx64Pkt, tx_64);
- FALCON_STAT(efx, GTx65to127Pkt, tx_65_to_127);
- FALCON_STAT(efx, GTx128to255Pkt, tx_128_to_255);
- FALCON_STAT(efx, GTx256to511Pkt, tx_256_to_511);
- FALCON_STAT(efx, GTx512to1023Pkt, tx_512_to_1023);
- FALCON_STAT(efx, GTx1024to15xxPkt, tx_1024_to_15xx);
- FALCON_STAT(efx, GTx15xxtoJumboPkt, tx_15xx_to_jumbo);
- FALCON_STAT(efx, GTxGtJumboPkt, tx_gtjumbo);
- FALCON_STAT(efx, GTxNonTcpUdpPkt, tx_non_tcpudp);
- FALCON_STAT(efx, GTxMacSrcErrPkt, tx_mac_src_error);
- FALCON_STAT(efx, GTxIpSrcErrPkt, tx_ip_src_error);
-
- /* Pause frames are erroneously counted as errors (SFC bug 3269) */
- new_rx_pause = mac_stats->rx_pause;
- new_tx_pause = mac_stats->tx_pause;
- mac_stats->rx_bad -= (new_rx_pause - old_rx_pause);
- mac_stats->tx_bad -= (new_tx_pause - old_tx_pause);
-
- /* Derive stats that the MAC doesn't provide directly */
- mac_stats->tx_bad_bytes =
- mac_stats->tx_bytes - mac_stats->tx_good_bytes;
- mac_stats->tx_packets =
- mac_stats->tx_lt64 + mac_stats->tx_64 +
- mac_stats->tx_65_to_127 + mac_stats->tx_128_to_255 +
- mac_stats->tx_256_to_511 + mac_stats->tx_512_to_1023 +
- mac_stats->tx_1024_to_15xx + mac_stats->tx_15xx_to_jumbo +
- mac_stats->tx_gtjumbo;
- mac_stats->tx_collision =
- mac_stats->tx_single_collision +
- mac_stats->tx_multiple_collision +
- mac_stats->tx_excessive_collision +
- mac_stats->tx_late_collision;
- mac_stats->rx_bytes =
- mac_stats->rx_good_bytes + mac_stats->rx_bad_bytes;
- mac_stats->rx_packets =
- mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64 +
- mac_stats->rx_64 + mac_stats->rx_65_to_127 +
- mac_stats->rx_128_to_255 + mac_stats->rx_256_to_511 +
- mac_stats->rx_512_to_1023 + mac_stats->rx_1024_to_15xx +
- mac_stats->rx_15xx_to_jumbo + mac_stats->rx_gtjumbo;
- mac_stats->rx_good = mac_stats->rx_packets - mac_stats->rx_bad;
- mac_stats->rx_lt64 = mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64;
-}
-
-static bool falcon_gmac_check_fault(struct efx_nic *efx)
-{
- return false;
-}
-
-struct efx_mac_operations falcon_gmac_operations = {
- .reconfigure = falcon_reconfigure_gmac,
- .update_stats = falcon_update_stats_gmac,
- .check_fault = falcon_gmac_check_fault,
-};
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
new file mode 100644
index 00000000000..52cb6082b91
--- /dev/null
+++ b/drivers/net/sfc/filter.c
@@ -0,0 +1,454 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "efx.h"
+#include "filter.h"
+#include "io.h"
+#include "nic.h"
+#include "regs.h"
+
+/* "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define FILTER_CTL_SRCH_FUDGE_WILD 3
+#define FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/* Hard maximum hop limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in efx_filter_search() when the
+ * table is full.
+ */
+#define FILTER_CTL_SRCH_MAX 200
+
+struct efx_filter_table {
+ u32 offset; /* address of table relative to BAR */
+ unsigned size; /* number of entries */
+ unsigned step; /* step between entries */
+ unsigned used; /* number currently used */
+ unsigned long *used_bitmap;
+ struct efx_filter_spec *spec;
+};
+
+struct efx_filter_state {
+ spinlock_t lock;
+ struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
+ unsigned search_depth[EFX_FILTER_TYPE_COUNT];
+};
+
+/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple. The initial LFSR state is 0xffff. */
+static u16 efx_filter_hash(u32 key)
+{
+ u16 tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ key >> 16;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ key;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ return tmp ^ tmp >> 9;
+}
+
+/* To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash. */
+static u16 efx_filter_increment(u32 key)
+{
+ return key * 2 - 1;
+}
+
+static enum efx_filter_table_id
+efx_filter_type_table_id(enum efx_filter_type type)
+{
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_FULL >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_WILD >> 2));
+ return type >> 2;
+}
+
+static void
+efx_filter_table_reset_search_depth(struct efx_filter_state *state,
+ enum efx_filter_table_id table_id)
+{
+ memset(state->search_depth + (table_id << 2), 0,
+ sizeof(state->search_depth[0]) << 2);
+}
+
+static void efx_filter_push_rx_limits(struct efx_nic *efx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ efx_oword_t filter_ctl;
+
+ efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
+ state->search_depth[EFX_FILTER_RX_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
+ state->search_depth[EFX_FILTER_RX_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
+ state->search_depth[EFX_FILTER_RX_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
+ state->search_depth[EFX_FILTER_RX_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+
+ if (state->table[EFX_FILTER_TABLE_RX_MAC].size) {
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ state->search_depth[EFX_FILTER_RX_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ state->search_depth[EFX_FILTER_RX_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
+{
+ u32 data3;
+
+ switch (efx_filter_type_table_id(spec->type)) {
+ case EFX_FILTER_TABLE_RX_IP: {
+ bool is_udp = (spec->type == EFX_FILTER_RX_UDP_FULL ||
+ spec->type == EFX_FILTER_RX_UDP_WILD);
+ EFX_POPULATE_OWORD_7(
+ *filter,
+ FRF_BZ_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_BZ_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_BZ_TCP_UDP, is_udp,
+ FRF_BZ_RXQ_ID, spec->dmaq_id,
+ EFX_DWORD_2, spec->data[2],
+ EFX_DWORD_1, spec->data[1],
+ EFX_DWORD_0, spec->data[0]);
+ data3 = is_udp;
+ break;
+ }
+
+ case EFX_FILTER_TABLE_RX_MAC: {
+ bool is_wild = spec->type == EFX_FILTER_RX_MAC_WILD;
+ EFX_POPULATE_OWORD_8(
+ *filter,
+ FRF_CZ_RMFT_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_CZ_RMFT_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_CZ_RMFT_IP_OVERRIDE,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP),
+ FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
+ FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild;
+ break;
+ }
+
+ default:
+ BUG();
+ }
+
+ return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
+}
+
+static bool efx_filter_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
+{
+ if (left->type != right->type ||
+ memcmp(left->data, right->data, sizeof(left->data)))
+ return false;
+
+ return true;
+}
+
+static int efx_filter_search(struct efx_filter_table *table,
+ struct efx_filter_spec *spec, u32 key,
+ bool for_insert, int *depth_required)
+{
+ unsigned hash, incr, filter_idx, depth;
+ struct efx_filter_spec *cmp;
+
+ hash = efx_filter_hash(key);
+ incr = efx_filter_increment(key);
+
+ for (depth = 1, filter_idx = hash & (table->size - 1);
+ depth <= FILTER_CTL_SRCH_MAX &&
+ test_bit(filter_idx, table->used_bitmap);
+ ++depth) {
+ cmp = &table->spec[filter_idx];
+ if (efx_filter_equal(spec, cmp))
+ goto found;
+ filter_idx = (filter_idx + incr) & (table->size - 1);
+ }
+ if (!for_insert)
+ return -ENOENT;
+ if (depth > FILTER_CTL_SRCH_MAX)
+ return -EBUSY;
+found:
+ *depth_required = depth;
+ return filter_idx;
+}
+
+/**
+ * efx_filter_insert_filter - add or replace a filter
+ * @efx: NIC in which to insert the filter
+ * @spec: Specification for the filter
+ * @replace: Flag for whether the specified filter may replace a filter
+ * with an identical match expression and equal or lower priority
+ *
+ * On success, return the filter index within its table.
+ * On failure, return a negative error code.
+ */
+int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
+ bool replace)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id =
+ efx_filter_type_table_id(spec->type);
+ struct efx_filter_table *table = &state->table[table_id];
+ struct efx_filter_spec *saved_spec;
+ efx_oword_t filter;
+ int filter_idx, depth;
+ u32 key;
+ int rc;
+
+ if (table->size == 0)
+ return -EINVAL;
+
+ key = efx_filter_build(&filter, spec);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: type %d search_depth=%d", __func__, spec->type,
+ state->search_depth[spec->type]);
+
+ spin_lock_bh(&state->lock);
+
+ rc = efx_filter_search(table, spec, key, true, &depth);
+ if (rc < 0)
+ goto out;
+ filter_idx = rc;
+ BUG_ON(filter_idx >= table->size);
+ saved_spec = &table->spec[filter_idx];
+
+ if (test_bit(filter_idx, table->used_bitmap)) {
+ /* Should we replace the existing filter? */
+ if (!replace) {
+ rc = -EEXIST;
+ goto out;
+ }
+ if (spec->priority < saved_spec->priority) {
+ rc = -EPERM;
+ goto out;
+ }
+ } else {
+ __set_bit(filter_idx, table->used_bitmap);
+ ++table->used;
+ }
+ *saved_spec = *spec;
+
+ if (state->search_depth[spec->type] < depth) {
+ state->search_depth[spec->type] = depth;
+ efx_filter_push_rx_limits(efx);
+ }
+
+ efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: filter type %d index %d rxq %u set",
+ __func__, spec->type, filter_idx, spec->dmaq_id);
+
+out:
+ spin_unlock_bh(&state->lock);
+ return rc;
+}
+
+static void efx_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_filter_table *table,
+ int filter_idx)
+{
+ static efx_oword_t filter;
+
+ if (test_bit(filter_idx, table->used_bitmap)) {
+ __clear_bit(filter_idx, table->used_bitmap);
+ --table->used;
+ memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
+
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+}
+
+/**
+ * efx_filter_remove_filter - remove a filter by specification
+ * @efx: NIC from which to remove the filter
+ * @spec: Specification for the filter
+ *
+ * On success, return zero.
+ * On failure, return a negative error code.
+ */
+int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id =
+ efx_filter_type_table_id(spec->type);
+ struct efx_filter_table *table = &state->table[table_id];
+ struct efx_filter_spec *saved_spec;
+ efx_oword_t filter;
+ int filter_idx, depth;
+ u32 key;
+ int rc;
+
+ key = efx_filter_build(&filter, spec);
+
+ spin_lock_bh(&state->lock);
+
+ rc = efx_filter_search(table, spec, key, false, &depth);
+ if (rc < 0)
+ goto out;
+ filter_idx = rc;
+ saved_spec = &table->spec[filter_idx];
+
+ if (spec->priority < saved_spec->priority) {
+ rc = -EPERM;
+ goto out;
+ }
+
+ efx_filter_table_clear_entry(efx, table, filter_idx);
+ if (table->used == 0)
+ efx_filter_table_reset_search_depth(state, table_id);
+ rc = 0;
+
+out:
+ spin_unlock_bh(&state->lock);
+ return rc;
+}
+
+/**
+ * efx_filter_table_clear - remove filters from a table by priority
+ * @efx: NIC from which to remove the filters
+ * @table_id: Table from which to remove the filters
+ * @priority: Maximum priority to remove
+ */
+void efx_filter_table_clear(struct efx_nic *efx,
+ enum efx_filter_table_id table_id,
+ enum efx_filter_priority priority)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_table *table = &state->table[table_id];
+ int filter_idx;
+
+ spin_lock_bh(&state->lock);
+
+ for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
+ if (table->spec[filter_idx].priority <= priority)
+ efx_filter_table_clear_entry(efx, table, filter_idx);
+ if (table->used == 0)
+ efx_filter_table_reset_search_depth(state, table_id);
+
+ spin_unlock_bh(&state->lock);
+}
+
+/* Restore filter stater after reset */
+void efx_restore_filters(struct efx_nic *efx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ efx_oword_t filter;
+ int filter_idx;
+
+ spin_lock_bh(&state->lock);
+
+ for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap))
+ continue;
+ efx_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_filter_push_rx_limits(efx);
+
+ spin_unlock_bh(&state->lock);
+}
+
+int efx_probe_filters(struct efx_nic *efx)
+{
+ struct efx_filter_state *state;
+ struct efx_filter_table *table;
+ unsigned table_id;
+
+ state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+ efx->filter_state = state;
+
+ spin_lock_init(&state->lock);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ table = &state->table[EFX_FILTER_TABLE_RX_IP];
+ table->offset = FR_BZ_RX_FILTER_TBL0;
+ table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
+ table->step = FR_BZ_RX_FILTER_TBL0_STEP;
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ table = &state->table[EFX_FILTER_TABLE_RX_MAC];
+ table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
+ }
+
+ for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+ if (table->size == 0)
+ continue;
+ table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!table->used_bitmap)
+ goto fail;
+ table->spec = vmalloc(table->size * sizeof(*table->spec));
+ if (!table->spec)
+ goto fail;
+ memset(table->spec, 0, table->size * sizeof(*table->spec));
+ }
+
+ return 0;
+
+fail:
+ efx_remove_filters(efx);
+ return -ENOMEM;
+}
+
+void efx_remove_filters(struct efx_nic *efx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+
+ for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
+ kfree(state->table[table_id].used_bitmap);
+ vfree(state->table[table_id].spec);
+ }
+ kfree(state);
+}
diff --git a/drivers/net/sfc/filter.h b/drivers/net/sfc/filter.h
new file mode 100644
index 00000000000..a53319ded79
--- /dev/null
+++ b/drivers/net/sfc/filter.h
@@ -0,0 +1,189 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_FILTER_H
+#define EFX_FILTER_H
+
+#include <linux/types.h>
+
+enum efx_filter_table_id {
+ EFX_FILTER_TABLE_RX_IP = 0,
+ EFX_FILTER_TABLE_RX_MAC,
+ EFX_FILTER_TABLE_COUNT,
+};
+
+/**
+ * enum efx_filter_type - type of hardware filter
+ * @EFX_FILTER_RX_TCP_FULL: RX, matching TCP/IPv4 4-tuple
+ * @EFX_FILTER_RX_TCP_WILD: RX, matching TCP/IPv4 destination (host, port)
+ * @EFX_FILTER_RX_UDP_FULL: RX, matching UDP/IPv4 4-tuple
+ * @EFX_FILTER_RX_UDP_WILD: RX, matching UDP/IPv4 destination (host, port)
+ * @EFX_FILTER_RX_MAC_FULL: RX, matching Ethernet destination MAC address, VID
+ * @EFX_FILTER_RX_MAC_WILD: RX, matching Ethernet destination MAC address
+ *
+ * Falcon NICs only support the RX TCP/IPv4 and UDP/IPv4 filter types.
+ */
+enum efx_filter_type {
+ EFX_FILTER_RX_TCP_FULL = 0,
+ EFX_FILTER_RX_TCP_WILD,
+ EFX_FILTER_RX_UDP_FULL,
+ EFX_FILTER_RX_UDP_WILD,
+ EFX_FILTER_RX_MAC_FULL = 4,
+ EFX_FILTER_RX_MAC_WILD,
+ EFX_FILTER_TYPE_COUNT,
+};
+
+/**
+ * enum efx_filter_priority - priority of a hardware filter specification
+ * @EFX_FILTER_PRI_HINT: Performance hint
+ * @EFX_FILTER_PRI_MANUAL: Manually configured filter
+ * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour
+ */
+enum efx_filter_priority {
+ EFX_FILTER_PRI_HINT = 0,
+ EFX_FILTER_PRI_MANUAL,
+ EFX_FILTER_PRI_REQUIRED,
+};
+
+/**
+ * enum efx_filter_flags - flags for hardware filter specifications
+ * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
+ * By default, matching packets will be delivered only to the
+ * specified queue. If this flag is set, they will be delivered
+ * to a range of queues offset from the specified queue number
+ * according to the indirection table.
+ * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
+ * queue.
+ * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
+ * any IP filter that matches the same packet. By default, IP
+ * filters take precedence.
+ *
+ * Currently, no flags are defined for TX filters.
+ */
+enum efx_filter_flags {
+ EFX_FILTER_FLAG_RX_RSS = 0x01,
+ EFX_FILTER_FLAG_RX_SCATTER = 0x02,
+ EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
+};
+
+/**
+ * struct efx_filter_spec - specification for a hardware filter
+ * @type: Type of match to be performed, from &enum efx_filter_type
+ * @priority: Priority of the filter, from &enum efx_filter_priority
+ * @flags: Miscellaneous flags, from &enum efx_filter_flags
+ * @dmaq_id: Source/target queue index
+ * @data: Match data (type-dependent)
+ *
+ * Use the efx_filter_set_*() functions to initialise the @type and
+ * @data fields.
+ */
+struct efx_filter_spec {
+ u8 type:4;
+ u8 priority:4;
+ u8 flags;
+ u16 dmaq_id;
+ u32 data[3];
+};
+
+/**
+ * efx_filter_set_rx_tcp_full - specify RX filter with TCP/IPv4 full match
+ * @spec: Specification to initialise
+ * @shost: Source host address (host byte order)
+ * @sport: Source port (host byte order)
+ * @dhost: Destination host address (host byte order)
+ * @dport: Destination port (host byte order)
+ */
+static inline void
+efx_filter_set_rx_tcp_full(struct efx_filter_spec *spec,
+ u32 shost, u16 sport, u32 dhost, u16 dport)
+{
+ spec->type = EFX_FILTER_RX_TCP_FULL;
+ spec->data[0] = sport | shost << 16;
+ spec->data[1] = dport << 16 | shost >> 16;
+ spec->data[2] = dhost;
+}
+
+/**
+ * efx_filter_set_rx_tcp_wild - specify RX filter with TCP/IPv4 wildcard match
+ * @spec: Specification to initialise
+ * @dhost: Destination host address (host byte order)
+ * @dport: Destination port (host byte order)
+ */
+static inline void
+efx_filter_set_rx_tcp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
+{
+ spec->type = EFX_FILTER_RX_TCP_WILD;
+ spec->data[0] = 0;
+ spec->data[1] = dport << 16;
+ spec->data[2] = dhost;
+}
+
+/**
+ * efx_filter_set_rx_udp_full - specify RX filter with UDP/IPv4 full match
+ * @spec: Specification to initialise
+ * @shost: Source host address (host byte order)
+ * @sport: Source port (host byte order)
+ * @dhost: Destination host address (host byte order)
+ * @dport: Destination port (host byte order)
+ */
+static inline void
+efx_filter_set_rx_udp_full(struct efx_filter_spec *spec,
+ u32 shost, u16 sport, u32 dhost, u16 dport)
+{
+ spec->type = EFX_FILTER_RX_UDP_FULL;
+ spec->data[0] = sport | shost << 16;
+ spec->data[1] = dport << 16 | shost >> 16;
+ spec->data[2] = dhost;
+}
+
+/**
+ * efx_filter_set_rx_udp_wild - specify RX filter with UDP/IPv4 wildcard match
+ * @spec: Specification to initialise
+ * @dhost: Destination host address (host byte order)
+ * @dport: Destination port (host byte order)
+ */
+static inline void
+efx_filter_set_rx_udp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
+{
+ spec->type = EFX_FILTER_RX_UDP_WILD;
+ spec->data[0] = dport;
+ spec->data[1] = 0;
+ spec->data[2] = dhost;
+}
+
+/**
+ * efx_filter_set_rx_mac_full - specify RX filter with MAC full match
+ * @spec: Specification to initialise
+ * @vid: VLAN ID
+ * @addr: Destination MAC address
+ */
+static inline void efx_filter_set_rx_mac_full(struct efx_filter_spec *spec,
+ u16 vid, const u8 *addr)
+{
+ spec->type = EFX_FILTER_RX_MAC_FULL;
+ spec->data[0] = vid;
+ spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
+ spec->data[2] = addr[0] << 8 | addr[1];
+}
+
+/**
+ * efx_filter_set_rx_mac_full - specify RX filter with MAC wildcard match
+ * @spec: Specification to initialise
+ * @addr: Destination MAC address
+ */
+static inline void efx_filter_set_rx_mac_wild(struct efx_filter_spec *spec,
+ const u8 *addr)
+{
+ spec->type = EFX_FILTER_RX_MAC_WILD;
+ spec->data[0] = 0;
+ spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
+ spec->data[2] = addr[0] << 8 | addr[1];
+}
+
+#endif /* EFX_FILTER_H */
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
index f1aa5f37489..7a6e5ca0290 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/sfc/mac.h
@@ -13,7 +13,6 @@
#include "net_driver.h"
-extern struct efx_mac_operations falcon_gmac_operations;
extern struct efx_mac_operations falcon_xmac_operations;
extern struct efx_mac_operations efx_mcdi_mac_operations;
extern void falcon_reconfigure_xmac_core(struct efx_nic *efx);
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index eeaf0bd64bd..98d94602042 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -286,46 +286,24 @@ int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
*/
void efx_mdio_an_reconfigure(struct efx_nic *efx)
{
- bool xnp = (efx->link_advertising & ADVERTISED_10000baseT_Full
- || EFX_WORKAROUND_13204(efx));
int reg;
WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
/* Set up the base page */
- reg = ADVERTISE_CSMA;
- if (efx->link_advertising & ADVERTISED_10baseT_Half)
- reg |= ADVERTISE_10HALF;
- if (efx->link_advertising & ADVERTISED_10baseT_Full)
- reg |= ADVERTISE_10FULL;
- if (efx->link_advertising & ADVERTISED_100baseT_Half)
- reg |= ADVERTISE_100HALF;
- if (efx->link_advertising & ADVERTISED_100baseT_Full)
- reg |= ADVERTISE_100FULL;
- if (xnp)
- reg |= ADVERTISE_RESV;
- else if (efx->link_advertising & (ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full))
- reg |= ADVERTISE_NPAGE;
+ reg = ADVERTISE_CSMA | ADVERTISE_RESV;
if (efx->link_advertising & ADVERTISED_Pause)
reg |= ADVERTISE_PAUSE_CAP;
if (efx->link_advertising & ADVERTISED_Asym_Pause)
reg |= ADVERTISE_PAUSE_ASYM;
efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
- /* Set up the (extended) next page if necessary */
- if (efx->phy_op->set_npage_adv)
- efx->phy_op->set_npage_adv(efx, efx->link_advertising);
+ /* Set up the (extended) next page */
+ efx->phy_op->set_npage_adv(efx, efx->link_advertising);
/* Enable and restart AN */
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
- reg |= MDIO_AN_CTRL1_ENABLE;
- if (!(EFX_WORKAROUND_15195(efx) && LOOPBACK_EXTERNAL(efx)))
- reg |= MDIO_AN_CTRL1_RESTART;
- if (xnp)
- reg |= MDIO_AN_CTRL1_XNP;
- else
- reg &= ~MDIO_AN_CTRL1_XNP;
+ reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART | MDIO_AN_CTRL1_XNP;
efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
}
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 64e7caa4bbb..44f4d58a39a 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -29,6 +29,7 @@
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/workqueue.h>
+#include <linux/vmalloc.h>
#include <linux/i2c.h>
#include "enum.h"
@@ -137,6 +138,7 @@ struct efx_tx_buffer {
* @channel: The associated channel
* @buffer: The software buffer ring
* @txd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
* @flushed: Used when handling queue flushing
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
@@ -170,6 +172,7 @@ struct efx_tx_queue {
struct efx_nic *nic;
struct efx_tx_buffer *buffer;
struct efx_special_buffer txd;
+ unsigned int ptr_mask;
enum efx_flush_state flushed;
/* Members used mainly on the completion path */
@@ -225,10 +228,9 @@ struct efx_rx_page_state {
/**
* struct efx_rx_queue - An Efx RX queue
* @efx: The associated Efx NIC
- * @queue: DMA queue number
- * @channel: The associated channel
* @buffer: The software buffer ring
* @rxd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue.
@@ -240,9 +242,6 @@ struct efx_rx_page_state {
* @min_fill: RX descriptor minimum non-zero fill level.
* This records the minimum fill level observed when a ring
* refill was triggered.
- * @min_overfill: RX descriptor minimum overflow fill level.
- * This records the minimum fill level at which RX queue
- * overflow was observed. It should never be set.
* @alloc_page_count: RX allocation strategy counter.
* @alloc_skb_count: RX allocation strategy counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
@@ -250,10 +249,9 @@ struct efx_rx_page_state {
*/
struct efx_rx_queue {
struct efx_nic *efx;
- int queue;
- struct efx_channel *channel;
struct efx_rx_buffer *buffer;
struct efx_special_buffer rxd;
+ unsigned int ptr_mask;
int added_count;
int notified_count;
@@ -302,7 +300,6 @@ enum efx_rx_alloc_method {
*
* @efx: Associated Efx NIC
* @channel: Channel instance number
- * @name: Name for channel and IRQ
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
* @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -311,6 +308,7 @@ enum efx_rx_alloc_method {
* @reset_work: Scheduled reset work thread
* @work_pending: Is work pending via NAPI?
* @eventq: Event queue buffer
+ * @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
* @magic_count: Event queue test event count
@@ -327,14 +325,14 @@ enum efx_rx_alloc_method {
* @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
- * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX
+ * @rx_queue: RX queue for this channel
* @tx_stop_count: Core TX queue stop count
* @tx_stop_lock: Core TX queue stop lock
+ * @tx_queue: TX queues for this channel
*/
struct efx_channel {
struct efx_nic *efx;
int channel;
- char name[IFNAMSIZ + 6];
bool enabled;
int irq;
unsigned int irq_moderation;
@@ -342,6 +340,7 @@ struct efx_channel {
struct napi_struct napi_str;
bool work_pending;
struct efx_special_buffer eventq;
+ unsigned int eventq_mask;
unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr;
unsigned int magic_count;
@@ -366,9 +365,12 @@ struct efx_channel {
struct efx_rx_buffer *rx_pkt;
bool rx_pkt_csummed;
- struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue rx_queue;
+
atomic_t tx_stop_count;
spinlock_t tx_stop_lock;
+
+ struct efx_tx_queue tx_queue[2];
};
enum efx_led_mode {
@@ -404,8 +406,6 @@ enum efx_int_mode {
};
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
-#define EFX_IS10G(efx) ((efx)->link_state.speed == 10000)
-
enum nic_state {
STATE_INIT = 0,
STATE_RUNNING = 1,
@@ -618,6 +618,8 @@ union efx_multicast_hash {
efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};
+struct efx_filter_state;
+
/**
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
@@ -641,6 +643,9 @@ union efx_multicast_hash {
* @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues
* @channel: Channels
+ * @channel_name: Names for channels and their IRQs
+ * @rxq_entries: Size of receive queues requested by user.
+ * @txq_entries: Size of transmit queues requested by user.
* @next_buffer_table: First available buffer table id
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
@@ -724,10 +729,11 @@ struct efx_nic {
enum nic_state state;
enum reset_type reset_pending;
- struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
- struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
- struct efx_channel channel[EFX_MAX_CHANNELS];
+ struct efx_channel *channel[EFX_MAX_CHANNELS];
+ char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
+ unsigned rxq_entries;
+ unsigned txq_entries;
unsigned next_buffer_table;
unsigned n_channels;
unsigned n_rx_channels;
@@ -794,6 +800,8 @@ struct efx_nic {
u64 loopback_modes;
void *loopback_selftest;
+
+ struct efx_filter_state *filter_state;
};
static inline int efx_dev_registered(struct efx_nic *efx)
@@ -909,39 +917,67 @@ struct efx_nic_type {
*
*************************************************************************/
+static inline struct efx_channel *
+efx_get_channel(struct efx_nic *efx, unsigned index)
+{
+ EFX_BUG_ON_PARANOID(index >= efx->n_channels);
+ return efx->channel[index];
+}
+
/* Iterate over all used channels */
#define efx_for_each_channel(_channel, _efx) \
- for (_channel = &((_efx)->channel[0]); \
- _channel < &((_efx)->channel[(efx)->n_channels]); \
- _channel++)
-
-/* Iterate over all used TX queues */
-#define efx_for_each_tx_queue(_tx_queue, _efx) \
- for (_tx_queue = &((_efx)->tx_queue[0]); \
- _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \
- (_efx)->n_tx_channels]); \
- _tx_queue++)
+ for (_channel = (_efx)->channel[0]; \
+ _channel; \
+ _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
+ (_efx)->channel[_channel->channel + 1] : NULL)
+
+extern struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type);
+
+static inline struct efx_tx_queue *
+efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
+{
+ struct efx_tx_queue *tx_queue = channel->tx_queue;
+ EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES);
+ return tx_queue->channel ? tx_queue + type : NULL;
+}
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
- for (_tx_queue = (_channel)->tx_queue; \
+ for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \
_tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
_tx_queue++)
-/* Iterate over all used RX queues */
-#define efx_for_each_rx_queue(_rx_queue, _efx) \
- for (_rx_queue = &((_efx)->rx_queue[0]); \
- _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \
- _rx_queue++)
+static inline struct efx_rx_queue *
+efx_get_rx_queue(struct efx_nic *efx, unsigned index)
+{
+ EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels);
+ return &efx->channel[index]->rx_queue;
+}
+
+static inline struct efx_rx_queue *
+efx_channel_get_rx_queue(struct efx_channel *channel)
+{
+ return channel->channel < channel->efx->n_rx_channels ?
+ &channel->rx_queue : NULL;
+}
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
- for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \
+ for (_rx_queue = efx_channel_get_rx_queue(channel); \
_rx_queue; \
- _rx_queue = NULL) \
- if (_rx_queue->channel != (_channel)) \
- continue; \
- else
+ _rx_queue = NULL)
+
+static inline struct efx_channel *
+efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
+{
+ return container_of(rx_queue, struct efx_channel, rx_queue);
+}
+
+static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
+{
+ return efx_rx_queue_channel(rx_queue)->channel;
+}
/* Returns a pointer to the specified receive buffer in the RX
* descriptor queue.
@@ -949,7 +985,7 @@ struct efx_nic_type {
static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
unsigned int index)
{
- return (&rx_queue->buffer[index]);
+ return &rx_queue->buffer[index];
}
/* Set bit in a little-endian bitfield */
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index f595d920c7c..394dd929fee 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -104,7 +104,7 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
{
- return (((efx_qword_t *) (channel->eventq.addr)) + index);
+ return ((efx_qword_t *) (channel->eventq.addr)) + index;
}
/* See if an event is present
@@ -119,8 +119,8 @@ static inline efx_qword_t *efx_event(struct efx_channel *channel,
*/
static inline int efx_event_present(efx_qword_t *event)
{
- return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
- EFX_DWORD_IS_ALL_ONES(event->dword[1])));
+ return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+ EFX_DWORD_IS_ALL_ONES(event->dword[1]));
}
static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
@@ -263,8 +263,8 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
{
len = ALIGN(len, EFX_BUF_SIZE);
- buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
- &buffer->dma_addr);
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, GFP_KERNEL);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
@@ -301,8 +301,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
(u64)buffer->dma_addr, buffer->len,
buffer->addr, (u64)virt_to_phys(buffer->addr));
- pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
- buffer->dma_addr);
+ dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
+ buffer->dma_addr);
buffer->addr = NULL;
buffer->entries = 0;
}
@@ -347,7 +347,7 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
static inline efx_qword_t *
efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
{
- return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
+ return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
}
/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
@@ -356,7 +356,7 @@ static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
unsigned write_ptr;
efx_dword_t reg;
- write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
efx_writed_page(tx_queue->efx, &reg,
FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
@@ -377,7 +377,7 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
BUG_ON(tx_queue->write_count == tx_queue->insert_count);
do {
- write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[write_ptr];
txd = efx_tx_desc(tx_queue, write_ptr);
++tx_queue->write_count;
@@ -398,10 +398,11 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
- BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
- EFX_TXQ_SIZE & EFX_TXQ_MASK);
+ unsigned entries;
+
+ entries = tx_queue->ptr_mask + 1;
return efx_alloc_special_buffer(efx, &tx_queue->txd,
- EFX_TXQ_SIZE * sizeof(efx_qword_t));
+ entries * sizeof(efx_qword_t));
}
void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
@@ -501,7 +502,7 @@ void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
static inline efx_qword_t *
efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
{
- return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
+ return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
}
/* This creates an entry in the RX descriptor queue */
@@ -526,30 +527,32 @@ efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
*/
void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
{
+ struct efx_nic *efx = rx_queue->efx;
efx_dword_t reg;
unsigned write_ptr;
while (rx_queue->notified_count != rx_queue->added_count) {
- efx_build_rx_desc(rx_queue,
- rx_queue->notified_count &
- EFX_RXQ_MASK);
+ efx_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
++rx_queue->notified_count;
}
wmb();
- write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
+ write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
- efx_writed_page(rx_queue->efx, &reg,
- FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
+ efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
+ efx_rx_queue_index(rx_queue));
}
int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
- BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
- EFX_RXQ_SIZE & EFX_RXQ_MASK);
+ unsigned entries;
+
+ entries = rx_queue->ptr_mask + 1;
return efx_alloc_special_buffer(efx, &rx_queue->rxd,
- EFX_RXQ_SIZE * sizeof(efx_qword_t));
+ entries * sizeof(efx_qword_t));
}
void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
@@ -561,7 +564,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
netif_dbg(efx, hw, efx->net_dev,
"RX queue %d ring in special buffers %d-%d\n",
- rx_queue->queue, rx_queue->rxd.index,
+ efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1);
rx_queue->flushed = FLUSH_NONE;
@@ -575,9 +578,10 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
FRF_AZ_RX_DESCQ_EVQ_ID,
- rx_queue->channel->channel,
+ efx_rx_queue_channel(rx_queue)->channel,
FRF_AZ_RX_DESCQ_OWNER_ID, 0,
- FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
+ FRF_AZ_RX_DESCQ_LABEL,
+ efx_rx_queue_index(rx_queue),
FRF_AZ_RX_DESCQ_SIZE,
__ffs(rx_queue->rxd.entries),
FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
@@ -585,7 +589,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
FRF_AZ_RX_DESCQ_EN, 1);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
- rx_queue->queue);
+ efx_rx_queue_index(rx_queue));
}
static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
@@ -598,7 +602,8 @@ static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
/* Post a flush command */
EFX_POPULATE_OWORD_2(rx_flush_descq,
FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
- FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue);
+ FRF_AZ_RX_FLUSH_DESCQ,
+ efx_rx_queue_index(rx_queue));
efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
}
@@ -613,7 +618,7 @@ void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
/* Remove RX descriptor ring from card */
EFX_ZERO_OWORD(rx_desc_ptr);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
- rx_queue->queue);
+ efx_rx_queue_index(rx_queue));
/* Unpin RX descriptor ring */
efx_fini_special_buffer(efx, &rx_queue->rxd);
@@ -680,15 +685,17 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = &efx->tx_queue[tx_ev_q_label];
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
- EFX_TXQ_MASK);
+ tx_queue->ptr_mask);
channel->irq_mod_score += tx_packets;
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = &efx->tx_queue[tx_ev_q_label];
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
if (efx_dev_registered(efx))
netif_tx_lock(efx->net_dev);
@@ -714,6 +721,7 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
bool *rx_ev_pkt_ok,
bool *discard)
{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx;
bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
@@ -746,14 +754,14 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
/* Count errors that are not in MAC stats. Ignore expected
* checksum errors during self-test. */
if (rx_ev_frm_trunc)
- ++rx_queue->channel->n_rx_frm_trunc;
+ ++channel->n_rx_frm_trunc;
else if (rx_ev_tobe_disc)
- ++rx_queue->channel->n_rx_tobe_disc;
+ ++channel->n_rx_tobe_disc;
else if (!efx->loopback_selftest) {
if (rx_ev_ip_hdr_chksum_err)
- ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
+ ++channel->n_rx_ip_hdr_chksum_err;
else if (rx_ev_tcp_udp_chksum_err)
- ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
+ ++channel->n_rx_tcp_udp_chksum_err;
}
/* The frame must be discarded if any of these are true. */
@@ -769,7 +777,7 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
netif_dbg(efx, rx_err, efx->net_dev,
" RX queue %d unexpected RX event "
EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
- rx_queue->queue, EFX_QWORD_VAL(*event),
+ efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
rx_ev_ip_hdr_chksum_err ?
" [IP_HDR_CHKSUM_ERR]" : "",
@@ -791,8 +799,8 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
struct efx_nic *efx = rx_queue->efx;
unsigned expected, dropped;
- expected = rx_queue->removed_count & EFX_RXQ_MASK;
- dropped = (index - expected) & EFX_RXQ_MASK;
+ expected = rx_queue->removed_count & rx_queue->ptr_mask;
+ dropped = (index - expected) & rx_queue->ptr_mask;
netif_info(efx, rx_err, efx->net_dev,
"dropped %d events (index=%d expected=%d)\n",
dropped, index, expected);
@@ -827,10 +835,10 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
channel->channel);
- rx_queue = &efx->rx_queue[channel->channel];
+ rx_queue = efx_channel_get_rx_queue(channel);
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
- expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
+ expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
if (unlikely(rx_ev_desc_ptr != expected_ptr))
efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
@@ -879,7 +887,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
* queue. Refill it here */
- efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
+ efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
else
netif_dbg(efx, hw, efx->net_dev, "channel %d received "
"generated event "EFX_QWORD_FMT"\n",
@@ -997,6 +1005,7 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
int efx_nic_process_eventq(struct efx_channel *channel, int budget)
{
+ struct efx_nic *efx = channel->efx;
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
@@ -1021,7 +1030,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
EFX_SET_QWORD(*p_event);
/* Increment read pointer */
- read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
+ read_ptr = (read_ptr + 1) & channel->eventq_mask;
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
@@ -1033,7 +1042,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
break;
case FSE_AZ_EV_CODE_TX_EV:
tx_packets += efx_handle_tx_event(channel, &event);
- if (tx_packets >= EFX_TXQ_SIZE) {
+ if (tx_packets > efx->txq_entries) {
spent = budget;
goto out;
}
@@ -1068,10 +1077,11 @@ out:
int efx_nic_probe_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
- BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
- EFX_EVQ_SIZE & EFX_EVQ_MASK);
+ unsigned entries;
+
+ entries = channel->eventq_mask + 1;
return efx_alloc_special_buffer(efx, &channel->eventq,
- EFX_EVQ_SIZE * sizeof(efx_qword_t));
+ entries * sizeof(efx_qword_t));
}
void efx_nic_init_eventq(struct efx_channel *channel)
@@ -1163,11 +1173,11 @@ void efx_nic_generate_fill_event(struct efx_channel *channel)
static void efx_poll_flush_events(struct efx_nic *efx)
{
- struct efx_channel *channel = &efx->channel[0];
+ struct efx_channel *channel = efx_get_channel(efx, 0);
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
unsigned int read_ptr = channel->eventq_read_ptr;
- unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
+ unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
do {
efx_qword_t *event = efx_event(channel, read_ptr);
@@ -1185,7 +1195,9 @@ static void efx_poll_flush_events(struct efx_nic *efx)
ev_queue = EFX_QWORD_FIELD(*event,
FSF_AZ_DRIVER_EV_SUBDATA);
if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
- tx_queue = efx->tx_queue + ev_queue;
+ tx_queue = efx_get_tx_queue(
+ efx, ev_queue / EFX_TXQ_TYPES,
+ ev_queue % EFX_TXQ_TYPES);
tx_queue->flushed = FLUSH_DONE;
}
} else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
@@ -1195,7 +1207,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
ev_failed = EFX_QWORD_FIELD(
*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
if (ev_queue < efx->n_rx_channels) {
- rx_queue = efx->rx_queue + ev_queue;
+ rx_queue = efx_get_rx_queue(efx, ev_queue);
rx_queue->flushed =
ev_failed ? FLUSH_FAILED : FLUSH_DONE;
}
@@ -1205,7 +1217,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
* it's ok to throw away every non-flush event */
EFX_SET_QWORD(*event);
- read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
+ read_ptr = (read_ptr + 1) & channel->eventq_mask;
} while (read_ptr != end_ptr);
channel->eventq_read_ptr = read_ptr;
@@ -1216,6 +1228,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
* serialise them */
int efx_nic_flush_queues(struct efx_nic *efx)
{
+ struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue;
int i, tx_pending, rx_pending;
@@ -1224,29 +1237,35 @@ int efx_nic_flush_queues(struct efx_nic *efx)
efx->type->prepare_flush(efx);
/* Flush all tx queues in parallel */
- efx_for_each_tx_queue(tx_queue, efx)
- efx_flush_tx_queue(tx_queue);
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_flush_tx_queue(tx_queue);
+ }
/* The hardware supports four concurrent rx flushes, each of which may
* need to be retried if there is an outstanding descriptor fetch */
for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
rx_pending = tx_pending = 0;
- efx_for_each_rx_queue(rx_queue, efx) {
- if (rx_queue->flushed == FLUSH_PENDING)
- ++rx_pending;
- }
- efx_for_each_rx_queue(rx_queue, efx) {
- if (rx_pending == EFX_RX_FLUSH_COUNT)
- break;
- if (rx_queue->flushed == FLUSH_FAILED ||
- rx_queue->flushed == FLUSH_NONE) {
- efx_flush_rx_queue(rx_queue);
- ++rx_pending;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_queue->flushed == FLUSH_PENDING)
+ ++rx_pending;
}
}
- efx_for_each_tx_queue(tx_queue, efx) {
- if (tx_queue->flushed != FLUSH_DONE)
- ++tx_pending;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_pending == EFX_RX_FLUSH_COUNT)
+ break;
+ if (rx_queue->flushed == FLUSH_FAILED ||
+ rx_queue->flushed == FLUSH_NONE) {
+ efx_flush_rx_queue(rx_queue);
+ ++rx_pending;
+ }
+ }
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->flushed != FLUSH_DONE)
+ ++tx_pending;
+ }
}
if (rx_pending == 0 && tx_pending == 0)
@@ -1258,19 +1277,21 @@ int efx_nic_flush_queues(struct efx_nic *efx)
/* Mark the queues as all flushed. We're going to return failure
* leading to a reset, or fake up success anyway */
- efx_for_each_tx_queue(tx_queue, efx) {
- if (tx_queue->flushed != FLUSH_DONE)
- netif_err(efx, hw, efx->net_dev,
- "tx queue %d flush command timed out\n",
- tx_queue->queue);
- tx_queue->flushed = FLUSH_DONE;
- }
- efx_for_each_rx_queue(rx_queue, efx) {
- if (rx_queue->flushed != FLUSH_DONE)
- netif_err(efx, hw, efx->net_dev,
- "rx queue %d flush command timed out\n",
- rx_queue->queue);
- rx_queue->flushed = FLUSH_DONE;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->flushed != FLUSH_DONE)
+ netif_err(efx, hw, efx->net_dev,
+ "tx queue %d flush command timed out\n",
+ tx_queue->queue);
+ tx_queue->flushed = FLUSH_DONE;
+ }
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_queue->flushed != FLUSH_DONE)
+ netif_err(efx, hw, efx->net_dev,
+ "rx queue %d flush command timed out\n",
+ efx_rx_queue_index(rx_queue));
+ rx_queue->flushed = FLUSH_DONE;
+ }
}
return -ETIMEDOUT;
@@ -1457,7 +1478,7 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
*/
static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
{
- struct efx_channel *channel = dev_id;
+ struct efx_channel *channel = *(struct efx_channel **)dev_id;
struct efx_nic *efx = channel->efx;
efx_oword_t *int_ker = efx->irq_status.addr;
int syserr;
@@ -1532,7 +1553,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
rc = request_irq(channel->irq, efx_msi_interrupt,
IRQF_PROBE_SHARED, /* Not shared */
- channel->name, channel);
+ efx->channel_name[channel->channel],
+ &efx->channel[channel->channel]);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq);
@@ -1544,7 +1566,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
fail2:
efx_for_each_channel(channel, efx)
- free_irq(channel->irq, channel);
+ free_irq(channel->irq, &efx->channel[channel->channel]);
fail1:
return rc;
}
@@ -1557,7 +1579,7 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx) {
if (channel->irq)
- free_irq(channel->irq, channel);
+ free_irq(channel->irq, &efx->channel[channel->channel]);
}
/* ACK legacy interrupt */
@@ -1827,8 +1849,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
- /* The register buffer is allocated with slab, so we can't
- * reasonably read all of the buffer table (up to 8MB!).
+ /* We can't reasonably read all of the buffer table (up to 8MB!).
* However this driver will only use a few entries. Reading
* 1K entries allows for some expansion of queue count and
* size before we need to change the version. */
@@ -1836,7 +1857,6 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
A, A, 8, 1024),
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
B, Z, 8, 1024),
- /* RX_FILTER_TBL{0,1} is huge and not used by this driver */
REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
REGISTER_TABLE_BB_CZ(TIMER_TBL),
REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
@@ -1846,6 +1866,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
REGISTER_TABLE_CZ(MC_TREG_SMEM),
/* MSIX_PBA_TABLE is not mapped */
/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
+ REGISTER_TABLE_BZ(RX_FILTER_TBL0),
};
size_t efx_nic_get_regs_len(struct efx_nic *efx)
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 5bc26137257..1dab609757f 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -11,17 +11,12 @@
#define EFX_PHY_H
/****************************************************************************
- * 10Xpress (SFX7101 and SFT9001) PHYs
+ * 10Xpress (SFX7101) PHY
*/
extern struct efx_phy_operations falcon_sfx7101_phy_ops;
-extern struct efx_phy_operations falcon_sft9001_phy_ops;
extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
-/* Wait for the PHY to boot. Return 0 on success, -EINVAL if the PHY failed
- * to boot due to corrupt flash, or some other negative error code. */
-extern int sft9001_wait_boot(struct efx_nic *efx);
-
/****************************************************************************
* AMCC/Quake QT202x PHYs
*/
@@ -42,6 +37,17 @@ extern struct efx_phy_operations falcon_qt202x_phy_ops;
extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
/****************************************************************************
+* Transwitch CX4 retimer
+*/
+extern struct efx_phy_operations falcon_txc_phy_ops;
+
+#define TXC_GPIO_DIR_INPUT 0
+#define TXC_GPIO_DIR_OUTPUT 1
+
+extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
+extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
+
+/****************************************************************************
* Siena managed PHYs
*/
extern struct efx_phy_operations efx_mcdi_phy_ops;
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 18a3be42834..96430ed81c3 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -2893,6 +2893,20 @@
#define FRF_AB_XX_FORCE_SIG_WIDTH 8
#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
+/* RX_MAC_FILTER_TBL0 */
+/* RMFT_DEST_MAC is wider than 32 bits */
+#define FRF_CZ_RMFT_DEST_MAC_LO_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
+#define FRF_CZ_RMFT_DEST_MAC_HI_LBN 44
+#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH 16
+
+/* TX_MAC_FILTER_TBL0 */
+/* TMFT_SRC_MAC is wider than 32 bits */
+#define FRF_CZ_TMFT_SRC_MAC_LO_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
+#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
+#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
+
/* DRIVER_EV */
/* Sub-fields of an RX flush completion event */
#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 799c461ce7b..6d0959b5158 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -133,7 +133,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
unsigned index, count;
for (count = 0; count < EFX_RX_BATCH; ++count) {
- index = rx_queue->added_count & EFX_RXQ_MASK;
+ index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
@@ -208,7 +208,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
dma_addr += sizeof(struct efx_rx_page_state);
split:
- index = rx_queue->added_count & EFX_RXQ_MASK;
+ index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->skb = NULL;
@@ -285,7 +285,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
* we'd like to insert an additional descriptor whilst leaving
* EFX_RXD_HEAD_ROOM for the non-recycle path */
fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
- if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) {
+ if (unlikely(fill_level > rx_queue->max_fill)) {
/* We could place "state" on a list, and drain the list in
* efx_fast_push_rx_descriptors(). For now, this will do. */
return;
@@ -294,7 +294,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
++state->refcnt;
get_page(rx_buf->page);
- index = rx_queue->added_count & EFX_RXQ_MASK;
+ index = rx_queue->added_count & rx_queue->ptr_mask;
new_buf = efx_rx_buffer(rx_queue, index);
new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
new_buf->skb = NULL;
@@ -311,7 +311,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf)
{
struct efx_nic *efx = channel->efx;
- struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel];
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
struct efx_rx_buffer *new_buf;
unsigned index;
@@ -319,7 +319,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
page_count(rx_buf->page) == 1)
efx_resurrect_rx_buffer(rx_queue, rx_buf);
- index = rx_queue->added_count & EFX_RXQ_MASK;
+ index = rx_queue->added_count & rx_queue->ptr_mask;
new_buf = efx_rx_buffer(rx_queue, index);
memcpy(new_buf, rx_buf, sizeof(*new_buf));
@@ -341,13 +341,13 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
*/
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
{
- struct efx_channel *channel = rx_queue->channel;
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
unsigned fill_level;
int space, rc = 0;
/* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
- EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
+ EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
if (fill_level >= rx_queue->fast_fill_trigger)
goto out;
@@ -364,7 +364,8 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filling descriptor ring from"
" level %d to level %d using %s allocation\n",
- rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
+ efx_rx_queue_index(rx_queue), fill_level,
+ rx_queue->fast_fill_limit,
channel->rx_alloc_push_pages ? "page" : "skb");
do {
@@ -382,7 +383,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filled descriptor ring "
- "to level %d\n", rx_queue->queue,
+ "to level %d\n", efx_rx_queue_index(rx_queue),
rx_queue->added_count - rx_queue->removed_count);
out:
@@ -393,7 +394,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
void efx_rx_slow_fill(unsigned long context)
{
struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
- struct efx_channel *channel = rx_queue->channel;
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
/* Post an event to cause NAPI to run and refill the queue */
efx_nic_generate_fill_event(channel);
@@ -421,7 +422,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
netif_err(efx, rx_err, efx->net_dev,
" RX queue %d seriously overlength "
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
- rx_queue->queue, len, max_len,
+ efx_rx_queue_index(rx_queue), len, max_len,
efx->type->rx_buffer_padding);
/* If this buffer was skb-allocated, then the meta
* data at the end of the skb will be trashed. So
@@ -434,10 +435,10 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
netif_err(efx, rx_err, efx->net_dev,
" RX queue %d overlength RX event "
"(0x%x > 0x%x)\n",
- rx_queue->queue, len, max_len);
+ efx_rx_queue_index(rx_queue), len, max_len);
}
- rx_queue->channel->n_rx_overlength++;
+ efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
/* Pass a received packet up through the generic LRO stack
@@ -507,7 +508,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard)
{
struct efx_nic *efx = rx_queue->efx;
- struct efx_channel *channel = rx_queue->channel;
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_rx_buffer *rx_buf;
bool leak_packet = false;
@@ -528,7 +529,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
netif_vdbg(efx, rx_status, efx->net_dev,
"RX queue %d received id %x at %llx+%x %s%s\n",
- rx_queue->queue, index,
+ efx_rx_queue_index(rx_queue), index,
(unsigned long long)rx_buf->dma_addr, len,
(checksummed ? " [SUMMED]" : ""),
(discard ? " [DISCARD]" : ""));
@@ -560,12 +561,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
*/
rx_buf->len = len;
out:
- if (rx_queue->channel->rx_pkt)
- __efx_rx_packet(rx_queue->channel,
- rx_queue->channel->rx_pkt,
- rx_queue->channel->rx_pkt_csummed);
- rx_queue->channel->rx_pkt = rx_buf;
- rx_queue->channel->rx_pkt_csummed = checksummed;
+ if (channel->rx_pkt)
+ __efx_rx_packet(channel,
+ channel->rx_pkt, channel->rx_pkt_csummed);
+ channel->rx_pkt = rx_buf;
+ channel->rx_pkt_csummed = checksummed;
}
/* Handle a received packet. Second half: Touches packet payload. */
@@ -615,7 +615,7 @@ void __efx_rx_packet(struct efx_channel *channel,
EFX_BUG_ON_PARANOID(!skb);
/* Set the SKB flags */
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* Pass the packet up */
netif_receive_skb(skb);
@@ -650,15 +650,22 @@ void efx_rx_strategy(struct efx_channel *channel)
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
- unsigned int rxq_size;
+ unsigned int entries;
int rc;
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ rx_queue->ptr_mask = entries - 1;
+
netif_dbg(efx, probe, efx->net_dev,
- "creating RX queue %d\n", rx_queue->queue);
+ "creating RX queue %d size %#x mask %#x\n",
+ efx_rx_queue_index(rx_queue), efx->rxq_entries,
+ rx_queue->ptr_mask);
/* Allocate RX buffers */
- rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
- rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
+ rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
+ GFP_KERNEL);
if (!rx_queue->buffer)
return -ENOMEM;
@@ -672,20 +679,20 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{
+ struct efx_nic *efx = rx_queue->efx;
unsigned int max_fill, trigger, limit;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "initialising RX queue %d\n", rx_queue->queue);
+ "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
/* Initialise ptr fields */
rx_queue->added_count = 0;
rx_queue->notified_count = 0;
rx_queue->removed_count = 0;
rx_queue->min_fill = -1U;
- rx_queue->min_overfill = -1U;
/* Initialise limit fields */
- max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
+ max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
limit = max_fill * min(rx_refill_limit, 100U) / 100U;
@@ -703,14 +710,14 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
struct efx_rx_buffer *rx_buf;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "shutting down RX queue %d\n", rx_queue->queue);
+ "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
del_timer_sync(&rx_queue->slow_fill);
efx_nic_fini_rx(rx_queue);
/* Release RX buffers NB start at index 0 not current HW ptr */
if (rx_queue->buffer) {
- for (i = 0; i <= EFX_RXQ_MASK; i++) {
+ for (i = 0; i <= rx_queue->ptr_mask; i++) {
rx_buf = efx_rx_buffer(rx_queue, i);
efx_fini_rx_buffer(rx_queue, rx_buf);
}
@@ -720,7 +727,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
{
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "destroying RX queue %d\n", rx_queue->queue);
+ "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
efx_nic_remove_rx(rx_queue);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 85f015f005d..da4473b7105 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -506,7 +506,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
for (i = 0; i < 3; i++) {
/* Determine how many packets to send */
- state->packet_count = EFX_TXQ_SIZE / 3;
+ state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
state->skbs = kzalloc(sizeof(state->skbs[0]) *
state->packet_count, GFP_KERNEL);
@@ -567,7 +567,7 @@ static int efx_wait_for_link(struct efx_nic *efx)
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
} else {
- struct efx_channel *channel = &efx->channel[0];
+ struct efx_channel *channel = efx_get_channel(efx, 0);
if (channel->work_pending)
efx_process_channel_now(channel);
}
@@ -594,6 +594,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
{
enum efx_loopback_mode mode;
struct efx_loopback_state *state;
+ struct efx_channel *channel = efx_get_channel(efx, 0);
struct efx_tx_queue *tx_queue;
int rc = 0;
@@ -634,7 +635,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
}
/* Test both types of TX queue */
- efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue &
EFX_TXQ_TYPE_OFFLOAD);
rc = efx_test_loopback(tx_queue,
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 3fab030f8ab..2115f95ddc8 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -450,7 +450,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
mac_stats->rx_bad_bytes);
MAC_STAT(rx_packets, RX_PKTS);
MAC_STAT(rx_good, RX_GOOD_PKTS);
- mac_stats->rx_bad = mac_stats->rx_packets - mac_stats->rx_good;
+ MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
MAC_STAT(rx_pause, RX_PAUSE_PKTS);
MAC_STAT(rx_control, RX_CONTROL_PKTS);
MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
@@ -651,6 +651,6 @@ struct efx_nic_type siena_a0_nic_type = {
.tx_dc_base = 0x88000,
.rx_dc_base = 0x68000,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXHASH),
+ NETIF_F_RXHASH | NETIF_F_NTUPLE),
.reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT,
};
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 6791be90c2f..1bc6c48c96e 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -19,10 +19,7 @@
#include "workarounds.h"
#include "selftest.h"
-/* We expect these MMDs to be in the package. SFT9001 also has a
- * clause 22 extension MMD, but since it doesn't have all the generic
- * MMD registers it is pointless to include it here.
- */
+/* We expect these MMDs to be in the package. */
#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
MDIO_DEVS_PCS | \
MDIO_DEVS_PHYXS | \
@@ -33,12 +30,6 @@
(1 << LOOPBACK_PMAPMD) | \
(1 << LOOPBACK_PHYXS_WS))
-#define SFT9001_LOOPBACKS ((1 << LOOPBACK_GPHY) | \
- (1 << LOOPBACK_PHYXS) | \
- (1 << LOOPBACK_PCS) | \
- (1 << LOOPBACK_PMAPMD) | \
- (1 << LOOPBACK_PHYXS_WS))
-
/* We complain if we fail to see the link partner as 10G capable this many
* times in a row (must be > 1 as sampling the autoneg. registers is racy)
*/
@@ -50,9 +41,8 @@
#define PMA_PMD_EXT_GMII_EN_WIDTH 1
#define PMA_PMD_EXT_CLK_OUT_LBN 2
#define PMA_PMD_EXT_CLK_OUT_WIDTH 1
-#define PMA_PMD_LNPGA_POWERDOWN_LBN 8 /* SFX7101 only */
+#define PMA_PMD_LNPGA_POWERDOWN_LBN 8
#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
-#define PMA_PMD_EXT_CLK312_LBN 8 /* SFT9001 only */
#define PMA_PMD_EXT_CLK312_WIDTH 1
#define PMA_PMD_EXT_LPOWER_LBN 12
#define PMA_PMD_EXT_LPOWER_WIDTH 1
@@ -84,7 +74,6 @@
#define PMA_PMD_LED_FLASH (3)
#define PMA_PMD_LED_MASK 3
/* All LEDs under hardware control */
-#define SFT9001_PMA_PMD_LED_DEFAULT 0
/* Green and Amber under hardware control, Red off */
#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
@@ -98,31 +87,7 @@
#define PMA_PMD_SPEED_LBN 4
#define PMA_PMD_SPEED_WIDTH 4
-/* Cable diagnostics - SFT9001 only */
-#define PMA_PMD_CDIAG_CTRL_REG 49213
-#define CDIAG_CTRL_IMMED_LBN 15
-#define CDIAG_CTRL_BRK_LINK_LBN 12
-#define CDIAG_CTRL_IN_PROG_LBN 11
-#define CDIAG_CTRL_LEN_UNIT_LBN 10
-#define CDIAG_CTRL_LEN_METRES 1
-#define PMA_PMD_CDIAG_RES_REG 49174
-#define CDIAG_RES_A_LBN 12
-#define CDIAG_RES_B_LBN 8
-#define CDIAG_RES_C_LBN 4
-#define CDIAG_RES_D_LBN 0
-#define CDIAG_RES_WIDTH 4
-#define CDIAG_RES_OPEN 2
-#define CDIAG_RES_OK 1
-#define CDIAG_RES_INVALID 0
-/* Set of 4 registers for pairs A-D */
-#define PMA_PMD_CDIAG_LEN_REG 49175
-
-/* Serdes control registers - SFT9001 only */
-#define PMA_PMD_CSERDES_CTRL_REG 64258
-/* Set the 156.25 MHz output to 312.5 MHz to drive Falcon's XMAC */
-#define PMA_PMD_CSERDES_DEFAULT 0x000f
-
-/* Misc register defines - SFX7101 only */
+/* Misc register defines */
#define PCS_CLOCK_CTRL_REG 55297
#define PLL312_RST_N_LBN 2
@@ -185,121 +150,17 @@ struct tenxpress_phy_data {
int bad_lp_tries;
};
-static ssize_t show_phy_short_reach(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
- int reg;
-
- reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR);
- return sprintf(buf, "%d\n", !!(reg & MDIO_PMA_10GBT_TXPWR_SHORT));
-}
-
-static ssize_t set_phy_short_reach(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
- int rc;
-
- rtnl_lock();
- if (efx->state != STATE_RUNNING) {
- rc = -EBUSY;
- } else {
- efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
- MDIO_PMA_10GBT_TXPWR_SHORT,
- count != 0 && *buf != '0');
- rc = efx_reconfigure_port(efx);
- }
- rtnl_unlock();
-
- return rc < 0 ? rc : (ssize_t)count;
-}
-
-static DEVICE_ATTR(phy_short_reach, 0644, show_phy_short_reach,
- set_phy_short_reach);
-
-int sft9001_wait_boot(struct efx_nic *efx)
-{
- unsigned long timeout = jiffies + HZ + 1;
- int boot_stat;
-
- for (;;) {
- boot_stat = efx_mdio_read(efx, MDIO_MMD_PCS,
- PCS_BOOT_STATUS_REG);
- if (boot_stat >= 0) {
- netif_dbg(efx, hw, efx->net_dev,
- "PHY boot status = %#x\n", boot_stat);
- switch (boot_stat &
- ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
- (3 << PCS_BOOT_PROGRESS_LBN) |
- (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN) |
- (1 << PCS_BOOT_CODE_STARTED_LBN))) {
- case ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
- (PCS_BOOT_PROGRESS_CHECKSUM <<
- PCS_BOOT_PROGRESS_LBN)):
- case ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
- (PCS_BOOT_PROGRESS_INIT <<
- PCS_BOOT_PROGRESS_LBN) |
- (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN)):
- return -EINVAL;
- case ((PCS_BOOT_PROGRESS_WAIT_MDIO <<
- PCS_BOOT_PROGRESS_LBN) |
- (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN)):
- return (efx->phy_mode & PHY_MODE_SPECIAL) ?
- 0 : -EIO;
- case ((PCS_BOOT_PROGRESS_JUMP <<
- PCS_BOOT_PROGRESS_LBN) |
- (1 << PCS_BOOT_CODE_STARTED_LBN)):
- case ((PCS_BOOT_PROGRESS_JUMP <<
- PCS_BOOT_PROGRESS_LBN) |
- (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN) |
- (1 << PCS_BOOT_CODE_STARTED_LBN)):
- return (efx->phy_mode & PHY_MODE_SPECIAL) ?
- -EIO : 0;
- default:
- if (boot_stat & (1 << PCS_BOOT_FATAL_ERROR_LBN))
- return -EIO;
- break;
- }
- }
-
- if (time_after_eq(jiffies, timeout))
- return -ETIMEDOUT;
-
- msleep(50);
- }
-}
-
static int tenxpress_init(struct efx_nic *efx)
{
- int reg;
-
- if (efx->phy_type == PHY_TYPE_SFX7101) {
- /* Enable 312.5 MHz clock */
- efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
- 1 << CLK312_EN_LBN);
- } else {
- /* Enable 312.5 MHz clock and GMII */
- reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
- reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) |
- (1 << PMA_PMD_EXT_CLK_OUT_LBN) |
- (1 << PMA_PMD_EXT_CLK312_LBN) |
- (1 << PMA_PMD_EXT_ROBUST_LBN));
-
- efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
- efx_mdio_set_flag(efx, MDIO_MMD_C22EXT,
- GPHY_XCONTROL_REG, 1 << GPHY_ISOLATE_LBN,
- false);
- }
+ /* Enable 312.5 MHz clock */
+ efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
+ 1 << CLK312_EN_LBN);
/* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
- if (efx->phy_type == PHY_TYPE_SFX7101) {
- efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
- 1 << PMA_PMA_LED_ACTIVITY_LBN, true);
- efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
- SFX7101_PMA_PMD_LED_DEFAULT);
- }
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
+ 1 << PMA_PMA_LED_ACTIVITY_LBN, true);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
+ SFX7101_PMA_PMD_LED_DEFAULT);
return 0;
}
@@ -307,7 +168,6 @@ static int tenxpress_init(struct efx_nic *efx)
static int tenxpress_phy_probe(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data;
- int rc;
/* Allocate phy private storage */
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
@@ -316,42 +176,15 @@ static int tenxpress_phy_probe(struct efx_nic *efx)
efx->phy_data = phy_data;
phy_data->phy_mode = efx->phy_mode;
- /* Create any special files */
- if (efx->phy_type == PHY_TYPE_SFT9001B) {
- rc = device_create_file(&efx->pci_dev->dev,
- &dev_attr_phy_short_reach);
- if (rc)
- goto fail;
- }
-
- if (efx->phy_type == PHY_TYPE_SFX7101) {
- efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
- efx->mdio.mode_support = MDIO_SUPPORTS_C45;
-
- efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+ efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45;
- efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full);
- } else {
- efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
- efx->loopback_modes = (SFT9001_LOOPBACKS |
- FALCON_XMAC_LOOPBACKS |
- FALCON_GMAC_LOOPBACKS);
-
- efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full);
- }
+ efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full);
return 0;
-
-fail:
- kfree(efx->phy_data);
- efx->phy_data = NULL;
- return rc;
}
static int tenxpress_phy_init(struct efx_nic *efx)
@@ -361,16 +194,6 @@ static int tenxpress_phy_init(struct efx_nic *efx)
falcon_board(efx)->type->init_phy(efx);
if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
- if (efx->phy_type == PHY_TYPE_SFT9001A) {
- int reg;
- reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
- PMA_PMD_XCONTROL_REG);
- reg |= (1 << PMA_PMD_EXT_SSR_LBN);
- efx_mdio_write(efx, MDIO_MMD_PMAPMD,
- PMA_PMD_XCONTROL_REG, reg);
- mdelay(200);
- }
-
rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
return rc;
@@ -403,7 +226,7 @@ static int tenxpress_special_reset(struct efx_nic *efx)
{
int rc, reg;
- /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
+ /* The XGMAC clock is driven from the SFX7101 312MHz clock, so
* a special software reset can glitch the XGMAC sufficiently for stats
* requests to fail. */
falcon_stop_nic_stats(efx);
@@ -484,53 +307,18 @@ static bool sfx7101_link_ok(struct efx_nic *efx)
MDIO_DEVS_PHYXS);
}
-static bool sft9001_link_ok(struct efx_nic *efx, struct ethtool_cmd *ecmd)
-{
- u32 reg;
-
- if (efx_phy_mode_disabled(efx->phy_mode))
- return false;
- else if (efx->loopback_mode == LOOPBACK_GPHY)
- return true;
- else if (efx->loopback_mode)
- return efx_mdio_links_ok(efx,
- MDIO_DEVS_PMAPMD |
- MDIO_DEVS_PHYXS);
-
- /* We must use the same definition of link state as LASI,
- * otherwise we can miss a link state transition
- */
- if (ecmd->speed == 10000) {
- reg = efx_mdio_read(efx, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT1);
- return reg & MDIO_PCS_10GBRT_STAT1_BLKLK;
- } else {
- reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_STATUS_REG);
- return reg & (1 << C22EXT_STATUS_LINK_LBN);
- }
-}
-
static void tenxpress_ext_loopback(struct efx_nic *efx)
{
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
1 << LOOPBACK_NEAR_LBN,
efx->loopback_mode == LOOPBACK_PHYXS);
- if (efx->phy_type != PHY_TYPE_SFX7101)
- efx_mdio_set_flag(efx, MDIO_MMD_C22EXT, GPHY_XCONTROL_REG,
- 1 << GPHY_LOOPBACK_NEAR_LBN,
- efx->loopback_mode == LOOPBACK_GPHY);
}
static void tenxpress_low_power(struct efx_nic *efx)
{
- if (efx->phy_type == PHY_TYPE_SFX7101)
- efx_mdio_set_mmds_lpower(
- efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
- TENXPRESS_REQUIRED_DEVS);
- else
- efx_mdio_set_flag(
- efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG,
- 1 << PMA_PMD_EXT_LPOWER_LBN,
- !!(efx->phy_mode & PHY_MODE_LOW_POWER));
+ efx_mdio_set_mmds_lpower(
+ efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
+ TENXPRESS_REQUIRED_DEVS);
}
static int tenxpress_phy_reconfigure(struct efx_nic *efx)
@@ -550,12 +338,7 @@ static int tenxpress_phy_reconfigure(struct efx_nic *efx)
if (loop_reset || phy_mode_change) {
tenxpress_special_reset(efx);
-
- /* Reset XAUI if we were in 10G, and are staying
- * in 10G. If we're moving into and out of 10G
- * then xaui will be reset anyway */
- if (EFX_IS10G(efx))
- falcon_reset_xaui(efx);
+ falcon_reset_xaui(efx);
}
tenxpress_low_power(efx);
@@ -578,29 +361,12 @@ static bool tenxpress_phy_poll(struct efx_nic *efx)
{
struct efx_link_state old_state = efx->link_state;
- if (efx->phy_type == PHY_TYPE_SFX7101) {
- efx->link_state.up = sfx7101_link_ok(efx);
- efx->link_state.speed = 10000;
- efx->link_state.fd = true;
- efx->link_state.fc = efx_mdio_get_pause(efx);
-
- sfx7101_check_bad_lp(efx, efx->link_state.up);
- } else {
- struct ethtool_cmd ecmd;
-
- /* Check the LASI alarm first */
- if (efx->loopback_mode == LOOPBACK_NONE &&
- !(efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT) &
- MDIO_PMA_LASI_LSALARM))
- return false;
+ efx->link_state.up = sfx7101_link_ok(efx);
+ efx->link_state.speed = 10000;
+ efx->link_state.fd = true;
+ efx->link_state.fc = efx_mdio_get_pause(efx);
- tenxpress_get_settings(efx, &ecmd);
-
- efx->link_state.up = sft9001_link_ok(efx, &ecmd);
- efx->link_state.speed = ecmd.speed;
- efx->link_state.fd = (ecmd.duplex == DUPLEX_FULL);
- efx->link_state.fc = efx_mdio_get_pause(efx);
- }
+ sfx7101_check_bad_lp(efx, efx->link_state.up);
return !efx_link_state_equal(&efx->link_state, &old_state);
}
@@ -621,10 +387,6 @@ static void sfx7101_phy_fini(struct efx_nic *efx)
static void tenxpress_phy_remove(struct efx_nic *efx)
{
- if (efx->phy_type == PHY_TYPE_SFT9001B)
- device_remove_file(&efx->pci_dev->dev,
- &dev_attr_phy_short_reach);
-
kfree(efx->phy_data);
efx->phy_data = NULL;
}
@@ -647,10 +409,7 @@ void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
(PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
break;
default:
- if (efx->phy_type == PHY_TYPE_SFX7101)
- reg = SFX7101_PMA_PMD_LED_DEFAULT;
- else
- reg = SFT9001_PMA_PMD_LED_DEFAULT;
+ reg = SFX7101_PMA_PMD_LED_DEFAULT;
break;
}
@@ -685,102 +444,12 @@ sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
return rc;
}
-static const char *const sft9001_test_names[] = {
- "bist",
- "cable.pairA.status",
- "cable.pairB.status",
- "cable.pairC.status",
- "cable.pairD.status",
- "cable.pairA.length",
- "cable.pairB.length",
- "cable.pairC.length",
- "cable.pairD.length",
-};
-
-static const char *sft9001_test_name(struct efx_nic *efx, unsigned int index)
-{
- if (index < ARRAY_SIZE(sft9001_test_names))
- return sft9001_test_names[index];
- return NULL;
-}
-
-static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
-{
- int rc = 0, rc2, i, ctrl_reg, res_reg;
-
- /* Initialise cable diagnostic results to unknown failure */
- for (i = 1; i < 9; ++i)
- results[i] = -1;
-
- /* Run cable diagnostics; wait up to 5 seconds for them to complete.
- * A cable fault is not a self-test failure, but a timeout is. */
- ctrl_reg = ((1 << CDIAG_CTRL_IMMED_LBN) |
- (CDIAG_CTRL_LEN_METRES << CDIAG_CTRL_LEN_UNIT_LBN));
- if (flags & ETH_TEST_FL_OFFLINE) {
- /* Break the link in order to run full diagnostics. We
- * must reset the PHY to resume normal service. */
- ctrl_reg |= (1 << CDIAG_CTRL_BRK_LINK_LBN);
- }
- efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_CTRL_REG,
- ctrl_reg);
- i = 0;
- while (efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_CTRL_REG) &
- (1 << CDIAG_CTRL_IN_PROG_LBN)) {
- if (++i == 50) {
- rc = -ETIMEDOUT;
- goto out;
- }
- msleep(100);
- }
- res_reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_RES_REG);
- for (i = 0; i < 4; i++) {
- int pair_res =
- (res_reg >> (CDIAG_RES_A_LBN - i * CDIAG_RES_WIDTH))
- & ((1 << CDIAG_RES_WIDTH) - 1);
- int len_reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
- PMA_PMD_CDIAG_LEN_REG + i);
- if (pair_res == CDIAG_RES_OK)
- results[1 + i] = 1;
- else if (pair_res == CDIAG_RES_INVALID)
- results[1 + i] = -1;
- else
- results[1 + i] = -pair_res;
- if (pair_res != CDIAG_RES_INVALID &&
- pair_res != CDIAG_RES_OPEN &&
- len_reg != 0xffff)
- results[5 + i] = len_reg;
- }
-
-out:
- if (flags & ETH_TEST_FL_OFFLINE) {
- /* Reset, running the BIST and then resuming normal service. */
- rc2 = tenxpress_special_reset(efx);
- results[0] = rc2 ? -1 : 1;
- if (!rc)
- rc = rc2;
-
- efx_mdio_an_reconfigure(efx);
- }
-
- return rc;
-}
-
static void
tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
u32 adv = 0, lpa = 0;
int reg;
- if (efx->phy_type != PHY_TYPE_SFX7101) {
- reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_CTRL);
- if (reg & (1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN))
- adv |= ADVERTISED_1000baseT_Full;
- reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_STATUS);
- if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN))
- lpa |= ADVERTISED_1000baseT_Half;
- if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN))
- lpa |= ADVERTISED_1000baseT_Full;
- }
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
if (reg & MDIO_AN_10GBT_CTRL_ADV10G)
adv |= ADVERTISED_10000baseT_Full;
@@ -790,23 +459,9 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
- if (efx->phy_type != PHY_TYPE_SFX7101) {
- ecmd->supported |= (SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full);
- if (ecmd->speed != SPEED_10000) {
- ecmd->eth_tp_mdix =
- (efx_mdio_read(efx, MDIO_MMD_PMAPMD,
- PMA_PMD_XSTATUS_REG) &
- (1 << PMA_PMD_XSTAT_MDIX_LBN))
- ? ETH_TP_MDI_X : ETH_TP_MDI;
- }
- }
-
/* In loopback, the PHY automatically brings up the correct interface,
* but doesn't advertise the correct speed. So override it */
- if (efx->loopback_mode == LOOPBACK_GPHY)
- ecmd->speed = SPEED_1000;
- else if (LOOPBACK_EXTERNAL(efx))
+ if (LOOPBACK_EXTERNAL(efx))
ecmd->speed = SPEED_10000;
}
@@ -825,16 +480,6 @@ static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
advertising & ADVERTISED_10000baseT_Full);
}
-static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
-{
- efx_mdio_set_flag(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_CTRL,
- 1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN,
- advertising & ADVERTISED_1000baseT_Full);
- efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV10G,
- advertising & ADVERTISED_10000baseT_Full);
-}
-
struct efx_phy_operations falcon_sfx7101_phy_ops = {
.probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
@@ -849,18 +494,3 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
.test_name = sfx7101_test_name,
.run_tests = sfx7101_run_tests,
};
-
-struct efx_phy_operations falcon_sft9001_phy_ops = {
- .probe = tenxpress_phy_probe,
- .init = tenxpress_phy_init,
- .reconfigure = tenxpress_phy_reconfigure,
- .poll = tenxpress_phy_poll,
- .fini = efx_port_dummy_op_void,
- .remove = tenxpress_phy_remove,
- .get_settings = tenxpress_get_settings,
- .set_settings = tenxpress_set_settings,
- .set_npage_adv = sft9001_set_npage_adv,
- .test_alive = efx_mdio_test_alive,
- .test_name = sft9001_test_name,
- .run_tests = sft9001_run_tests,
-};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index c6942da2c99..11726989fe2 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -28,7 +28,7 @@
* The tx_queue descriptor ring fill-level must fall below this value
* before we restart the netif queue
*/
-#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
+#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
/* We need to be able to nest calls to netif_tx_stop_queue(), partly
* because of the 2 hardware queues associated with each core queue,
@@ -37,8 +37,9 @@
void efx_stop_queue(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
- if (!channel->tx_queue)
+ if (!tx_queue)
return;
spin_lock_bh(&channel->tx_stop_lock);
@@ -46,9 +47,8 @@ void efx_stop_queue(struct efx_channel *channel)
atomic_inc(&channel->tx_stop_count);
netif_tx_stop_queue(
- netdev_get_tx_queue(
- efx->net_dev,
- channel->tx_queue->queue / EFX_TXQ_TYPES));
+ netdev_get_tx_queue(efx->net_dev,
+ tx_queue->queue / EFX_TXQ_TYPES));
spin_unlock_bh(&channel->tx_stop_lock);
}
@@ -57,8 +57,9 @@ void efx_stop_queue(struct efx_channel *channel)
void efx_wake_queue(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
- if (!channel->tx_queue)
+ if (!tx_queue)
return;
local_bh_disable();
@@ -66,9 +67,8 @@ void efx_wake_queue(struct efx_channel *channel)
&channel->tx_stop_lock)) {
netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
netif_tx_wake_queue(
- netdev_get_tx_queue(
- efx->net_dev,
- channel->tx_queue->queue / EFX_TXQ_TYPES));
+ netdev_get_tx_queue(efx->net_dev,
+ tx_queue->queue / EFX_TXQ_TYPES));
spin_unlock(&channel->tx_stop_lock);
}
local_bh_enable();
@@ -207,7 +207,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
}
fill_level = tx_queue->insert_count - tx_queue->old_read_count;
- q_space = EFX_TXQ_MASK - 1 - fill_level;
+ q_space = efx->txq_entries - 1 - fill_level;
/* Map for DMA. Use pci_map_single rather than pci_map_page
* since this is more efficient on machines with sparse
@@ -244,14 +244,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
&tx_queue->read_count;
fill_level = (tx_queue->insert_count
- tx_queue->old_read_count);
- q_space = EFX_TXQ_MASK - 1 - fill_level;
+ q_space = efx->txq_entries - 1 - fill_level;
if (unlikely(q_space-- <= 0))
goto stop;
smp_mb();
--tx_queue->stopped;
}
- insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
+ insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->tsoh);
@@ -320,7 +320,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
--tx_queue->insert_count;
- insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
+ insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
efx_dequeue_buffer(tx_queue, buffer);
buffer->len = 0;
@@ -350,8 +350,8 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
- stop_index = (index + 1) & EFX_TXQ_MASK;
- read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
+ stop_index = (index + 1) & tx_queue->ptr_mask;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
@@ -368,7 +368,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
buffer->len = 0;
++tx_queue->read_count;
- read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
}
}
@@ -390,9 +390,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
if (unlikely(efx->port_inhibited))
return NETDEV_TX_BUSY;
- tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)];
- if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
- tx_queue += EFX_TXQ_TYPE_OFFLOAD;
+ tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
+ skb->ip_summed == CHECKSUM_PARTIAL ?
+ EFX_TXQ_TYPE_OFFLOAD : 0);
return efx_enqueue_skb(tx_queue, skb);
}
@@ -402,7 +402,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
unsigned fill_level;
struct efx_nic *efx = tx_queue->efx;
- EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK);
+ EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index);
@@ -412,7 +412,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
smp_mb();
if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
- if (fill_level < EFX_TXQ_THRESHOLD) {
+ if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
/* Do this under netif_tx_lock(), to avoid racing
@@ -430,18 +430,24 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
- unsigned int txq_size;
+ unsigned int entries;
int i, rc;
- netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n",
- tx_queue->queue);
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ tx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating TX queue %d size %#x mask %#x\n",
+ tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
/* Allocate software ring */
- txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
- tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
+ tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
+ GFP_KERNEL);
if (!tx_queue->buffer)
return -ENOMEM;
- for (i = 0; i <= EFX_TXQ_MASK; ++i)
+ for (i = 0; i <= tx_queue->ptr_mask; ++i)
tx_queue->buffer[i].continuation = true;
/* Allocate hardware ring */
@@ -481,7 +487,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
- buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK];
+ buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
efx_dequeue_buffer(tx_queue, buffer);
buffer->continuation = true;
buffer->len = 0;
@@ -741,7 +747,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
fill_level = tx_queue->insert_count - tx_queue->old_read_count;
/* -1 as there is no way to represent all descriptors used */
- q_space = EFX_TXQ_MASK - 1 - fill_level;
+ q_space = efx->txq_entries - 1 - fill_level;
while (1) {
if (unlikely(q_space-- <= 0)) {
@@ -757,7 +763,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
*(volatile unsigned *)&tx_queue->read_count;
fill_level = (tx_queue->insert_count
- tx_queue->old_read_count);
- q_space = EFX_TXQ_MASK - 1 - fill_level;
+ q_space = efx->txq_entries - 1 - fill_level;
if (unlikely(q_space-- <= 0)) {
*final_buffer = NULL;
return 1;
@@ -766,13 +772,13 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
--tx_queue->stopped;
}
- insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
+ insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
++tx_queue->insert_count;
EFX_BUG_ON_PARANOID(tx_queue->insert_count -
- tx_queue->read_count >
- EFX_TXQ_MASK);
+ tx_queue->read_count >=
+ efx->txq_entries);
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->len);
@@ -813,7 +819,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
{
struct efx_tx_buffer *buffer;
- buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK];
+ buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->len);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
@@ -838,7 +844,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
while (tx_queue->insert_count != tx_queue->write_count) {
--tx_queue->insert_count;
buffer = &tx_queue->buffer[tx_queue->insert_count &
- EFX_TXQ_MASK];
+ tx_queue->ptr_mask];
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->skb);
if (buffer->unmap_len) {
@@ -1168,7 +1174,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
unsigned i;
if (tx_queue->buffer) {
- for (i = 0; i <= EFX_TXQ_MASK; ++i)
+ for (i = 0; i <= tx_queue->ptr_mask; ++i)
efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
}
diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/sfc/txc43128_phy.c
new file mode 100644
index 00000000000..351794a7921
--- /dev/null
+++ b/drivers/net/sfc/txc43128_phy.c
@@ -0,0 +1,560 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/*
+ * Driver for Transwitch/Mysticom CX4 retimer
+ * see www.transwitch.com, part is TXC-43128
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include "efx.h"
+#include "mdio_10g.h"
+#include "phy.h"
+#include "nic.h"
+
+/* We expect these MMDs to be in the package */
+#define TXC_REQUIRED_DEVS (MDIO_DEVS_PCS | \
+ MDIO_DEVS_PMAPMD | \
+ MDIO_DEVS_PHYXS)
+
+#define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \
+ (1 << LOOPBACK_PMAPMD) | \
+ (1 << LOOPBACK_PHYXS_WS))
+
+/**************************************************************************
+ *
+ * Compile-time config
+ *
+ **************************************************************************
+ */
+#define TXCNAME "TXC43128"
+/* Total length of time we'll wait for the PHY to come out of reset (ms) */
+#define TXC_MAX_RESET_TIME 500
+/* Interval between checks (ms) */
+#define TXC_RESET_WAIT 10
+/* How long to run BIST (us) */
+#define TXC_BIST_DURATION 50
+
+/**************************************************************************
+ *
+ * Register definitions
+ *
+ **************************************************************************
+ */
+
+/* Command register */
+#define TXC_GLRGS_GLCMD 0xc004
+/* Useful bits in command register */
+/* Lane power-down */
+#define TXC_GLCMD_L01PD_LBN 5
+#define TXC_GLCMD_L23PD_LBN 6
+/* Limited SW reset: preserves configuration but
+ * initiates a logic reset. Self-clearing */
+#define TXC_GLCMD_LMTSWRST_LBN 14
+
+/* Signal Quality Control */
+#define TXC_GLRGS_GSGQLCTL 0xc01a
+/* Enable bit */
+#define TXC_GSGQLCT_SGQLEN_LBN 15
+/* Lane selection */
+#define TXC_GSGQLCT_LNSL_LBN 13
+#define TXC_GSGQLCT_LNSL_WIDTH 2
+
+/* Analog TX control */
+#define TXC_ALRGS_ATXCTL 0xc040
+/* Lane power-down */
+#define TXC_ATXCTL_TXPD3_LBN 15
+#define TXC_ATXCTL_TXPD2_LBN 14
+#define TXC_ATXCTL_TXPD1_LBN 13
+#define TXC_ATXCTL_TXPD0_LBN 12
+
+/* Amplitude on lanes 0, 1 */
+#define TXC_ALRGS_ATXAMP0 0xc041
+/* Amplitude on lanes 2, 3 */
+#define TXC_ALRGS_ATXAMP1 0xc042
+/* Bit position of value for lane 0 (or 2) */
+#define TXC_ATXAMP_LANE02_LBN 3
+/* Bit position of value for lane 1 (or 3) */
+#define TXC_ATXAMP_LANE13_LBN 11
+
+#define TXC_ATXAMP_1280_mV 0
+#define TXC_ATXAMP_1200_mV 8
+#define TXC_ATXAMP_1120_mV 12
+#define TXC_ATXAMP_1060_mV 14
+#define TXC_ATXAMP_0820_mV 25
+#define TXC_ATXAMP_0720_mV 26
+#define TXC_ATXAMP_0580_mV 27
+#define TXC_ATXAMP_0440_mV 28
+
+#define TXC_ATXAMP_0820_BOTH \
+ ((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \
+ | (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN))
+
+#define TXC_ATXAMP_DEFAULT 0x6060 /* From databook */
+
+/* Preemphasis on lanes 0, 1 */
+#define TXC_ALRGS_ATXPRE0 0xc043
+/* Preemphasis on lanes 2, 3 */
+#define TXC_ALRGS_ATXPRE1 0xc044
+
+#define TXC_ATXPRE_NONE 0
+#define TXC_ATXPRE_DEFAULT 0x1010 /* From databook */
+
+#define TXC_ALRGS_ARXCTL 0xc045
+/* Lane power-down */
+#define TXC_ARXCTL_RXPD3_LBN 15
+#define TXC_ARXCTL_RXPD2_LBN 14
+#define TXC_ARXCTL_RXPD1_LBN 13
+#define TXC_ARXCTL_RXPD0_LBN 12
+
+/* Main control */
+#define TXC_MRGS_CTL 0xc340
+/* Bits in main control */
+#define TXC_MCTL_RESET_LBN 15 /* Self clear */
+#define TXC_MCTL_TXLED_LBN 14 /* 1 to show align status */
+#define TXC_MCTL_RXLED_LBN 13 /* 1 to show align status */
+
+/* GPIO output */
+#define TXC_GPIO_OUTPUT 0xc346
+#define TXC_GPIO_DIR 0xc348
+
+/* Vendor-specific BIST registers */
+#define TXC_BIST_CTL 0xc280
+#define TXC_BIST_TXFRMCNT 0xc281
+#define TXC_BIST_RX0FRMCNT 0xc282
+#define TXC_BIST_RX1FRMCNT 0xc283
+#define TXC_BIST_RX2FRMCNT 0xc284
+#define TXC_BIST_RX3FRMCNT 0xc285
+#define TXC_BIST_RX0ERRCNT 0xc286
+#define TXC_BIST_RX1ERRCNT 0xc287
+#define TXC_BIST_RX2ERRCNT 0xc288
+#define TXC_BIST_RX3ERRCNT 0xc289
+
+/* BIST type (controls bit patter in test) */
+#define TXC_BIST_CTRL_TYPE_LBN 10
+#define TXC_BIST_CTRL_TYPE_TSD 0 /* TranSwitch Deterministic */
+#define TXC_BIST_CTRL_TYPE_CRP 1 /* CRPAT standard */
+#define TXC_BIST_CTRL_TYPE_CJP 2 /* CJPAT standard */
+#define TXC_BIST_CTRL_TYPE_TSR 3 /* TranSwitch pseudo-random */
+/* Set this to 1 for 10 bit and 0 for 8 bit */
+#define TXC_BIST_CTRL_B10EN_LBN 12
+/* Enable BIST (write 0 to disable) */
+#define TXC_BIST_CTRL_ENAB_LBN 13
+/* Stop BIST (self-clears when stop complete) */
+#define TXC_BIST_CTRL_STOP_LBN 14
+/* Start BIST (cleared by writing 1 to STOP) */
+#define TXC_BIST_CTRL_STRT_LBN 15
+
+/* Mt. Diablo test configuration */
+#define TXC_MTDIABLO_CTRL 0xc34f
+#define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN 10
+
+struct txc43128_data {
+ unsigned long bug10934_timer;
+ enum efx_phy_mode phy_mode;
+ enum efx_loopback_mode loopback_mode;
+};
+
+/* The PHY sometimes needs a reset to bring the link back up. So long as
+ * it reports link down, we reset it every 5 seconds.
+ */
+#define BUG10934_RESET_INTERVAL (5 * HZ)
+
+/* Perform a reset that doesn't clear configuration changes */
+static void txc_reset_logic(struct efx_nic *efx);
+
+/* Set the output value of a gpio */
+void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
+}
+
+/* Set up the GPIO direction register */
+void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
+}
+
+/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
+ * global reset (it's less clear what reset of other MMDs does).*/
+static int txc_reset_phy(struct efx_nic *efx)
+{
+ int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
+ TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
+ TXC_RESET_WAIT);
+ if (rc < 0)
+ goto fail;
+
+ /* Check that all the MMDs we expect are present and responding. */
+ rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS, 0);
+ if (rc < 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, TXCNAME ": reset timed out!\n");
+ return rc;
+}
+
+/* Run a single BIST on one MMD */
+static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
+{
+ int ctrl, bctl;
+ int lane;
+ int rc = 0;
+
+ /* Set PMA to test into loopback using Mt Diablo reg as per app note */
+ ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
+ ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
+
+ /* The BIST app. note lists these as 3 distinct steps. */
+ /* Set the BIST type */
+ bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
+
+ /* Set the BSTEN bit in the BIST Control register to enable */
+ bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
+
+ /* Set the BSTRT bit in the BIST Control register */
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL,
+ bctl | (1 << TXC_BIST_CTRL_STRT_LBN));
+
+ /* Wait. */
+ udelay(TXC_BIST_DURATION);
+
+ /* Set the BSTOP bit in the BIST Control register */
+ bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
+
+ /* The STOP bit should go off when things have stopped */
+ while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
+ bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL);
+
+ /* Check all the error counts are 0 and all the frame counts are
+ non-zero */
+ for (lane = 0; lane < 4; lane++) {
+ int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
+ if (count != 0) {
+ netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
+ "Lane %d had %d errs\n", lane, count);
+ rc = -EIO;
+ }
+ count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
+ if (count == 0) {
+ netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
+ "Lane %d got 0 frames\n", lane);
+ rc = -EIO;
+ }
+ }
+
+ if (rc == 0)
+ netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n");
+
+ /* Disable BIST */
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
+
+ /* Turn off loopback */
+ ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
+
+ return rc;
+}
+
+static int txc_bist(struct efx_nic *efx)
+{
+ return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
+}
+
+/* Push the non-configurable defaults into the PHY. This must be
+ * done after every full reset */
+static void txc_apply_defaults(struct efx_nic *efx)
+{
+ int mctrl;
+
+ /* Turn amplitude down and preemphasis off on the host side
+ * (PHY<->MAC) as this is believed less likely to upset Falcon
+ * and no adverse effects have been noted. It probably also
+ * saves a picowatt or two */
+
+ /* Turn off preemphasis */
+ efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
+ efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
+
+ /* Turn down the amplitude */
+ efx_mdio_write(efx, MDIO_MMD_PHYXS,
+ TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
+ efx_mdio_write(efx, MDIO_MMD_PHYXS,
+ TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
+
+ /* Set the line side amplitude and preemphasis to the databook
+ * defaults as an erratum causes them to be 0 on at least some
+ * PHY rev.s */
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
+
+ /* Set up the LEDs */
+ mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
+
+ /* Set the Green and Red LEDs to their default modes */
+ mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
+ efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
+
+ /* Databook recommends doing this after configuration changes */
+ txc_reset_logic(efx);
+
+ falcon_board(efx)->type->init_phy(efx);
+}
+
+static int txc43128_phy_probe(struct efx_nic *efx)
+{
+ struct txc43128_data *phy_data;
+
+ /* Allocate phy private storage */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+
+ efx->mdio.mmds = TXC_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+ efx->loopback_modes = TXC_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+
+ return 0;
+}
+
+/* Initialisation entry point for this PHY driver */
+static int txc43128_phy_init(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = txc_reset_phy(efx);
+ if (rc < 0)
+ return rc;
+
+ rc = txc_bist(efx);
+ if (rc < 0)
+ return rc;
+
+ txc_apply_defaults(efx);
+
+ return 0;
+}
+
+/* Set the lane power down state in the global registers */
+static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd)
+{
+ int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
+ int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
+
+ if (!(efx->phy_mode & PHY_MODE_LOW_POWER))
+ ctl &= ~pd;
+ else
+ ctl |= pd;
+
+ efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
+}
+
+/* Set the lane power down state in the analog control registers */
+static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
+{
+ int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
+ | (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
+ int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN)
+ | (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN);
+ int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
+ int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
+
+ if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) {
+ txctl &= ~txpd;
+ rxctl &= ~rxpd;
+ } else {
+ txctl |= txpd;
+ rxctl |= rxpd;
+ }
+
+ efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
+ efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
+}
+
+static void txc_set_power(struct efx_nic *efx)
+{
+ /* According to the data book, all the MMDs can do low power */
+ efx_mdio_set_mmds_lpower(efx,
+ !!(efx->phy_mode & PHY_MODE_LOW_POWER),
+ TXC_REQUIRED_DEVS);
+
+ /* Global register bank is in PCS, PHY XS. These control the host
+ * side and line side settings respectively. */
+ txc_glrgs_lane_power(efx, MDIO_MMD_PCS);
+ txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS);
+
+ /* Analog register bank in PMA/PMD, PHY XS */
+ txc_analog_lane_power(efx, MDIO_MMD_PMAPMD);
+ txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
+}
+
+static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
+{
+ int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
+ int tries = 50;
+
+ val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
+ efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
+ while (tries--) {
+ val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
+ if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
+ break;
+ udelay(1);
+ }
+ if (!tries)
+ netif_info(efx, hw, efx->net_dev,
+ TXCNAME " Logic reset timed out!\n");
+}
+
+/* Perform a logic reset. This preserves the configuration registers
+ * and is needed for some configuration changes to take effect */
+static void txc_reset_logic(struct efx_nic *efx)
+{
+ /* The data sheet claims we can do the logic reset on either the
+ * PCS or the PHYXS and the result is a reset of both host- and
+ * line-side logic. */
+ txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
+}
+
+static bool txc43128_phy_read_link(struct efx_nic *efx)
+{
+ return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
+}
+
+static int txc43128_phy_reconfigure(struct efx_nic *efx)
+{
+ struct txc43128_data *phy_data = efx->phy_data;
+ enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
+ bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
+
+ if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) {
+ txc_reset_phy(efx);
+ txc_apply_defaults(efx);
+ falcon_reset_xaui(efx);
+ mode_change &= ~PHY_MODE_TX_DISABLED;
+ }
+
+ efx_mdio_transmit_disable(efx);
+ efx_mdio_phy_reconfigure(efx);
+ if (mode_change & PHY_MODE_LOW_POWER)
+ txc_set_power(efx);
+
+ /* The data sheet claims this is required after every reconfiguration
+ * (note at end of 7.1), but we mustn't do it when nothing changes as
+ * it glitches the link, and reconfigure gets called on link change,
+ * so we get an IRQ storm on link up. */
+ if (loop_change || mode_change)
+ txc_reset_logic(efx);
+
+ phy_data->phy_mode = efx->phy_mode;
+ phy_data->loopback_mode = efx->loopback_mode;
+
+ return 0;
+}
+
+static void txc43128_phy_fini(struct efx_nic *efx)
+{
+ /* Disable link events */
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
+}
+
+static void txc43128_phy_remove(struct efx_nic *efx)
+{
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+}
+
+/* Periodic callback: this exists mainly to poll link status as we
+ * don't use LASI interrupts */
+static bool txc43128_phy_poll(struct efx_nic *efx)
+{
+ struct txc43128_data *data = efx->phy_data;
+ bool was_up = efx->link_state.up;
+
+ efx->link_state.up = txc43128_phy_read_link(efx);
+ efx->link_state.speed = 10000;
+ efx->link_state.fd = true;
+ efx->link_state.fc = efx->wanted_fc;
+
+ if (efx->link_state.up || (efx->loopback_mode != LOOPBACK_NONE)) {
+ data->bug10934_timer = jiffies;
+ } else {
+ if (time_after_eq(jiffies, (data->bug10934_timer +
+ BUG10934_RESET_INTERVAL))) {
+ data->bug10934_timer = jiffies;
+ txc_reset_logic(efx);
+ }
+ }
+
+ return efx->link_state.up != was_up;
+}
+
+static const char *txc43128_test_names[] = {
+ "bist"
+};
+
+static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index)
+{
+ if (index < ARRAY_SIZE(txc43128_test_names))
+ return txc43128_test_names[index];
+ return NULL;
+}
+
+static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
+{
+ int rc;
+
+ if (!(flags & ETH_TEST_FL_OFFLINE))
+ return 0;
+
+ rc = txc_reset_phy(efx);
+ if (rc < 0)
+ return rc;
+
+ rc = txc_bist(efx);
+ txc_apply_defaults(efx);
+ results[0] = rc ? -1 : 1;
+ return rc;
+}
+
+static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ mdio45_ethtool_gset(&efx->mdio, ecmd);
+}
+
+struct efx_phy_operations falcon_txc_phy_ops = {
+ .probe = txc43128_phy_probe,
+ .init = txc43128_phy_init,
+ .reconfigure = txc43128_phy_reconfigure,
+ .poll = txc43128_phy_poll,
+ .fini = txc43128_phy_fini,
+ .remove = txc43128_phy_remove,
+ .get_settings = txc43128_get_settings,
+ .set_settings = efx_mdio_set_settings,
+ .test_alive = efx_mdio_test_alive,
+ .run_tests = txc43128_run_tests,
+ .test_name = txc43128_test_name,
+};
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 782e45a613d..e0d63083c3a 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -19,9 +19,7 @@
#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
-#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx)
-#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \
- (efx)->phy_type == PHY_TYPE_SFT9001B)
+#define EFX_WORKAROUND_10G(efx) 1
/* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
@@ -58,9 +56,4 @@
/* Leak overlength packets rather than free */
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
-/* Need to send XNP pages for 100BaseT */
-#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001
-/* Don't restart AN in near-side loopback */
-#define EFX_WORKAROUND_15195 EFX_WORKAROUND_SFT9001
-
#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index cc4bd8c65f8..9265315baa0 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -804,7 +804,7 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev)
err_out_free_page:
free_page((unsigned long) sp->srings);
err_out_free_dev:
- kfree(dev);
+ free_netdev(dev);
err_out:
return err;
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index f5a9eb1df59..50259dfec58 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -798,7 +798,7 @@ static int sh_eth_rx(struct net_device *ndev)
skb->dev = ndev;
sh_eth_set_receive_align(skb);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
}
if (entry >= RX_RING_SIZE - 1)
@@ -1031,7 +1031,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
mdp->duplex = -1;
/* Try connect to PHY */
- phydev = phy_connect(ndev, phy_id, &sh_eth_adjust_link,
+ phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
0, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
dev_err(&ndev->dev, "phy_connect failed\n");
@@ -1437,7 +1437,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
static int sh_eth_drv_probe(struct platform_device *pdev)
{
- int ret, i, devno = 0;
+ int ret, devno = 0;
struct resource *res;
struct net_device *ndev = NULL;
struct sh_eth_private *mdp;
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index bbbded76ff1..58183686709 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -832,7 +832,7 @@ static u16 __devinit read_eeprom(long ioaddr, int location)
outl(0, ee_addr);
eeprom_delay();
- return (retval);
+ return retval;
}
/* Read and write the MII management registers using software-generated
@@ -1042,7 +1042,7 @@ sis900_open(struct net_device *net_dev)
init_timer(&sis_priv->timer);
sis_priv->timer.expires = jiffies + HZ;
sis_priv->timer.data = (unsigned long)net_dev;
- sis_priv->timer.function = &sis900_timer;
+ sis_priv->timer.function = sis900_timer;
add_timer(&sis_priv->timer);
return 0;
@@ -2247,9 +2247,9 @@ static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision)
/* leave 8 or 7 most siginifant bits */
if ((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV))
- return ((int)(crc >> 24));
+ return (int)(crc >> 24);
else
- return ((int)(crc >> 25));
+ return (int)(crc >> 25);
}
/**
diff --git a/drivers/net/skfp/cfm.c b/drivers/net/skfp/cfm.c
index 5310d39b573..e395ace3120 100644
--- a/drivers/net/skfp/cfm.c
+++ b/drivers/net/skfp/cfm.c
@@ -542,8 +542,8 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
*/
int cfm_get_mac_input(struct s_smc *smc)
{
- return((smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
- smc->mib.fddiSMTCF_State == SC5_THRU_B) ? PB : PA) ;
+ return (smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
+ smc->mib.fddiSMTCF_State == SC5_THRU_B) ? PB : PA;
}
/*
@@ -553,8 +553,8 @@ int cfm_get_mac_input(struct s_smc *smc)
*/
int cfm_get_mac_output(struct s_smc *smc)
{
- return((smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
- smc->mib.fddiSMTCF_State == SC4_THRU_A) ? PB : PA) ;
+ return (smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
+ smc->mib.fddiSMTCF_State == SC4_THRU_A) ? PB : PA;
}
static char path_iso[] = {
@@ -623,5 +623,5 @@ int cem_build_path(struct s_smc *smc, char *to, int path_index)
LINT_USE(path_index);
- return(len) ;
+ return len;
}
diff --git a/drivers/net/skfp/drvfbi.c b/drivers/net/skfp/drvfbi.c
index c77cc14b322..07da97c303d 100644
--- a/drivers/net/skfp/drvfbi.c
+++ b/drivers/net/skfp/drvfbi.c
@@ -267,7 +267,7 @@ void timer_irq(struct s_smc *smc)
int pcm_get_s_port(struct s_smc *smc)
{
SK_UNUSED(smc) ;
- return(PS) ;
+ return PS;
}
/*
@@ -366,7 +366,7 @@ void sm_pm_bypass_req(struct s_smc *smc, int mode)
*/
int sm_pm_bypass_present(struct s_smc *smc)
{
- return( (inp(ADDR(B0_DAS)) & DAS_BYP_ST) ? TRUE: FALSE) ;
+ return (inp(ADDR(B0_DAS)) & DAS_BYP_ST) ? TRUE : FALSE;
}
void plc_clear_irq(struct s_smc *smc, int p)
@@ -483,9 +483,9 @@ static int is_equal_num(char comp1[], char comp2[], int num)
for (i = 0 ; i < num ; i++) {
if (comp1[i] != comp2[i])
- return (0) ;
+ return 0;
}
- return (1) ;
+ return 1;
} /* is_equal_num */
@@ -522,18 +522,18 @@ int set_oi_id_def(struct s_smc *smc)
i++ ;
break ; /* entry ok */
default:
- return (1) ; /* invalid oi_status */
+ return 1; /* invalid oi_status */
}
}
if (i == 0)
- return (2) ;
+ return 2;
if (!act_entries)
- return (3) ;
+ return 3;
/* ok, we have a valid OEM data base with an active entry */
smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[sel_id] ;
- return (0) ;
+ return 0;
}
#endif /* MULT_OEM */
diff --git a/drivers/net/skfp/ess.c b/drivers/net/skfp/ess.c
index e8387d25f24..8639a0884f5 100644
--- a/drivers/net/skfp/ess.c
+++ b/drivers/net/skfp/ess.c
@@ -135,7 +135,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
*/
if (!(p = (void *) sm_to_para(smc,sm,SMT_P0015))) {
DB_ESS("ESS: RAF frame error, parameter type not found\n",0,0) ;
- return(fs) ;
+ return fs;
}
msg_res_type = ((struct smt_p_0015 *)p)->res_type ;
@@ -147,7 +147,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
* error in frame: para ESS command was not found
*/
DB_ESS("ESS: RAF frame error, parameter command not found\n",0,0);
- return(fs) ;
+ return fs;
}
DB_ESSN(2,"fc %x ft %x\n",sm->smt_class,sm->smt_type) ;
@@ -175,12 +175,12 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
* local and no static allocation is used
*/
if (!local || smc->mib.fddiESSPayload)
- return(fs) ;
+ return fs;
p = (void *) sm_to_para(smc,sm,SMT_P0019) ;
for (i = 0; i < 5; i++) {
if (((struct smt_p_0019 *)p)->alloc_addr.a[i]) {
- return(fs) ;
+ return fs;
}
}
@@ -199,10 +199,10 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
sm->smt_dest = smt_sba_da ;
if (smc->ess.local_sba_active)
- return(fs | I_INDICATOR) ;
+ return fs | I_INDICATOR;
if (!(db = smt_get_mbuf(smc)))
- return(fs) ;
+ return fs;
db->sm_len = mb->sm_len ;
db->sm_off = mb->sm_off ;
@@ -212,7 +212,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
(struct smt_header *)(db->sm_data+db->sm_off),
"RAF") ;
smt_send_frame(smc,db,FC_SMT_INFO,0) ;
- return(fs) ;
+ return fs;
}
/*
@@ -221,7 +221,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
*/
if (smt_check_para(smc,sm,plist_raf_alc_res)) {
DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ;
- return(fs) ;
+ return fs;
}
/*
@@ -242,7 +242,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
(sm->smt_tid != smc->ess.alloc_trans_id)) {
DB_ESS("ESS: Allocation Responce not accepted\n",0,0) ;
- return(fs) ;
+ return fs;
}
/*
@@ -268,7 +268,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
*/
(void)process_bw_alloc(smc,(long)payload,(long)overhead) ;
- return(fs) ;
+ return fs;
/* end of Process Allocation Request */
/*
@@ -280,7 +280,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
*/
if (sm->smt_type != SMT_REQUEST) {
DB_ESS("ESS: Do not process Change Responses\n",0,0) ;
- return(fs) ;
+ return fs;
}
/*
@@ -288,7 +288,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
*/
if (smt_check_para(smc,sm,plist_raf_chg_req)) {
DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ;
- return(fs) ;
+ return fs;
}
/*
@@ -300,7 +300,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
if ((((struct smt_p_320b *)sm_to_para(smc,sm,SMT_P320B))->path_index
!= PRIMARY_RING) || (msg_res_type != SYNC_BW)) {
DB_ESS("ESS: RAF frame with para problem, ignoring\n",0,0) ;
- return(fs) ;
+ return fs;
}
/*
@@ -319,14 +319,14 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
* process the bandwidth allocation
*/
if(!process_bw_alloc(smc,(long)payload,(long)overhead))
- return(fs) ;
+ return fs;
/*
* send an RAF Change Reply
*/
ess_send_response(smc,sm,CHANGE_ALLOCATION) ;
- return(fs) ;
+ return fs;
/* end of Process Change Request */
/*
@@ -338,7 +338,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
*/
if (sm->smt_type != SMT_REQUEST) {
DB_ESS("ESS: Do not process a Report Reply\n",0,0) ;
- return(fs) ;
+ return fs;
}
DB_ESSN(2,"ESS: Report Request from %s\n",
@@ -349,7 +349,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
*/
if (msg_res_type != SYNC_BW) {
DB_ESS("ESS: ignoring RAF with para problem\n",0,0) ;
- return(fs) ;
+ return fs;
}
/*
@@ -357,7 +357,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
*/
ess_send_response(smc,sm,REPORT_ALLOCATION) ;
- return(fs) ;
+ return fs;
/* end of Process Report Request */
default:
@@ -368,7 +368,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
break ;
}
- return(fs) ;
+ return fs;
}
/*
@@ -418,17 +418,17 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
*/
/* if (smt_set_obj(smc,SMT_P320F,payload,S_SET)) {
DB_ESS("ESS: SMT does not accept the payload value\n",0,0) ;
- return(FALSE) ;
+ return FALSE;
}
if (smt_set_obj(smc,SMT_P3210,overhead,S_SET)) {
DB_ESS("ESS: SMT does not accept the overhead value\n",0,0) ;
- return(FALSE) ;
+ return FALSE;
} */
/* premliminary */
if (payload > MAX_PAYLOAD || overhead > 5000) {
DB_ESS("ESS: payload / overhead not accepted\n",0,0) ;
- return(FALSE) ;
+ return FALSE;
}
/*
@@ -468,7 +468,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
ess_config_fifo(smc) ;
set_formac_tsync(smc,smc->ess.sync_bw) ;
- return(TRUE) ;
+ return TRUE;
}
static void ess_send_response(struct s_smc *smc, struct smt_header *sm,
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 9d8d1ac4817..ca4e7bb6a5a 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -112,8 +112,8 @@ static u_long mac_get_tneg(struct s_smc *smc)
u_long tneg ;
tneg = (u_long)((long)inpw(FM_A(FM_TNEG))<<5) ;
- return((u_long)((tneg + ((inpw(FM_A(FM_TMRS))>>10)&0x1f)) |
- 0xffe00000L)) ;
+ return (u_long)((tneg + ((inpw(FM_A(FM_TMRS))>>10)&0x1f)) |
+ 0xffe00000L) ;
}
void mac_update_counter(struct s_smc *smc)
@@ -163,7 +163,7 @@ static u_long read_mdr(struct s_smc *smc, unsigned int addr)
/* is used */
p = (u_long)inpw(FM_A(FM_MDRU))<<16 ;
p += (u_long)inpw(FM_A(FM_MDRL)) ;
- return(p) ;
+ return p;
}
#endif
@@ -887,7 +887,7 @@ int init_fplus(struct s_smc *smc)
/* make sure all PCI settings are correct */
mac_do_pci_fix(smc) ;
- return(init_mac(smc,1)) ;
+ return init_mac(smc, 1);
/* enable_formac(smc) ; */
}
@@ -989,7 +989,7 @@ static int init_mac(struct s_smc *smc, int all)
}
smc->hw.hw_state = STARTED ;
- return(0) ;
+ return 0;
}
@@ -1049,7 +1049,7 @@ void sm_ma_control(struct s_smc *smc, int mode)
int sm_mac_get_tx_state(struct s_smc *smc)
{
- return((inpw(FM_A(FM_STMCHN))>>4)&7) ;
+ return (inpw(FM_A(FM_STMCHN))>>4) & 7;
}
/*
@@ -1084,9 +1084,9 @@ static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
}
if (memcmp((char *)&tb->a,(char *)own,6))
continue ;
- return(tb) ;
+ return tb;
}
- return(slot) ; /* return first free or NULL */
+ return slot; /* return first free or NULL */
}
/*
@@ -1152,12 +1152,12 @@ int mac_add_multicast(struct s_smc *smc, struct fddi_addr *addr, int can)
*/
if (can & 0x80) {
if (smc->hw.fp.smt_slots_used >= SMT_MAX_MULTI) {
- return(1) ;
+ return 1;
}
}
else {
if (smc->hw.fp.os_slots_used >= FPMAX_MULTICAST-SMT_MAX_MULTI) {
- return(1) ;
+ return 1;
}
}
@@ -1165,7 +1165,7 @@ int mac_add_multicast(struct s_smc *smc, struct fddi_addr *addr, int can)
* find empty slot
*/
if (!(tb = mac_get_mc_table(smc,addr,&own,0,can & ~0x80)))
- return(1) ;
+ return 1;
tb->n++ ;
tb->a = own ;
tb->perm = (can & 0x80) ? 1 : 0 ;
@@ -1175,7 +1175,7 @@ int mac_add_multicast(struct s_smc *smc, struct fddi_addr *addr, int can)
else
smc->hw.fp.os_slots_used++ ;
- return(0) ;
+ return 0;
}
/*
diff --git a/drivers/net/skfp/hwmtm.c b/drivers/net/skfp/hwmtm.c
index d322f1b702a..af5a755e269 100644
--- a/drivers/net/skfp/hwmtm.c
+++ b/drivers/net/skfp/hwmtm.c
@@ -232,16 +232,16 @@ u_int mac_drv_check_space(void)
#ifdef COMMON_MB_POOL
call_count++ ;
if (call_count == 1) {
- return(EXT_VIRT_MEM) ;
+ return EXT_VIRT_MEM;
}
else {
- return(EXT_VIRT_MEM_2) ;
+ return EXT_VIRT_MEM_2;
}
#else
- return (EXT_VIRT_MEM) ;
+ return EXT_VIRT_MEM;
#endif
#else
- return (0) ;
+ return 0;
#endif
}
@@ -271,7 +271,7 @@ int mac_drv_init(struct s_smc *smc)
if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *)
mac_drv_get_desc_mem(smc,(u_int)
(RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) {
- return(1) ; /* no space the hwm modul can't work */
+ return 1; /* no space the hwm modul can't work */
}
/*
@@ -283,18 +283,18 @@ int mac_drv_init(struct s_smc *smc)
#ifndef COMMON_MB_POOL
if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc,
MAX_MBUF*sizeof(SMbuf)))) {
- return(1) ; /* no space the hwm modul can't work */
+ return 1; /* no space the hwm modul can't work */
}
#else
if (!mb_start) {
if (!(mb_start = (SMbuf *) mac_drv_get_space(smc,
MAX_MBUF*sizeof(SMbuf)))) {
- return(1) ; /* no space the hwm modul can't work */
+ return 1; /* no space the hwm modul can't work */
}
}
#endif
#endif
- return (0) ;
+ return 0;
}
/*
@@ -349,7 +349,7 @@ static u_long init_descr_ring(struct s_smc *smc,
DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
d1++;
}
- return(phys) ;
+ return phys;
}
static void init_txd_ring(struct s_smc *smc)
@@ -502,7 +502,7 @@ SMbuf *smt_get_mbuf(struct s_smc *smc)
mb->sm_use_count = 1 ;
}
DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ;
- return (mb) ; /* May be NULL */
+ return mb; /* May be NULL */
}
void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
@@ -621,7 +621,7 @@ static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
t = t->txd_next ;
tx_used-- ;
}
- return(phys) ;
+ return phys;
}
/*
@@ -673,7 +673,7 @@ static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
r = r->rxd_next ;
rx_used-- ;
}
- return(phys) ;
+ return phys;
}
@@ -1595,7 +1595,7 @@ int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
}
DB_TX("frame_status = %x",frame_status,0,3) ;
NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
- return(frame_status) ;
+ return frame_status;
}
/*
@@ -1764,7 +1764,7 @@ static SMbuf *get_llc_rx(struct s_smc *smc)
smc->os.hwm.llc_rx_pipe = mb->sm_next ;
}
DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ;
- return(mb) ;
+ return mb;
}
/*
@@ -1797,7 +1797,7 @@ static SMbuf *get_txd_mb(struct s_smc *smc)
smc->os.hwm.txd_tx_pipe = mb->sm_next ;
}
DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ;
- return(mb) ;
+ return mb;
}
/*
diff --git a/drivers/net/skfp/hwt.c b/drivers/net/skfp/hwt.c
index 053151468f9..e6baa53307c 100644
--- a/drivers/net/skfp/hwt.c
+++ b/drivers/net/skfp/hwt.c
@@ -179,7 +179,7 @@ u_long hwt_read(struct s_smc *smc)
else
smc->hw.t_stop = smc->hw.t_start - tr ;
}
- return (smc->hw.t_stop) ;
+ return smc->hw.t_stop;
}
#ifdef PCI
@@ -208,7 +208,7 @@ u_long hwt_quick_read(struct s_smc *smc)
outpw(ADDR(B2_TI_CRTL), TIM_START) ;
outpd(ADDR(B2_TI_INI),interval) ;
- return(time) ;
+ return time;
}
/************************
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c
index ba45bc794d7..112d35b1bf0 100644
--- a/drivers/net/skfp/pcmplc.c
+++ b/drivers/net/skfp/pcmplc.c
@@ -504,7 +504,7 @@ int sm_pm_get_ls(struct s_smc *smc, int phy)
#ifdef CONCENTRATOR
if (!plc_is_installed(smc,phy))
- return(PC_QLS) ;
+ return PC_QLS;
#endif
state = inpw(PLC(phy,PL_STATUS_A)) & PL_LINE_ST ;
@@ -528,7 +528,7 @@ int sm_pm_get_ls(struct s_smc *smc, int phy)
default :
state = PC_LS_NONE ;
}
- return(state) ;
+ return state;
}
static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len)
@@ -547,7 +547,7 @@ static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len)
#if 0
printf("PL_PCM_SIGNAL is set\n") ;
#endif
- return(1) ;
+ return 1;
}
/* write bit[n] & length = 1 to regs */
outpw(PLC(np,PL_VECTOR_LEN),len-1) ; /* len=nr-1 */
@@ -562,7 +562,7 @@ static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len)
printf("SIGNALING bit %d .. %d\n",phy->bitn,phy->bitn+len-1) ;
#endif
#endif
- return(0) ;
+ return 0;
}
/*
@@ -1590,12 +1590,12 @@ int pcm_status_twisted(struct s_smc *smc)
{
int twist = 0 ;
if (smc->s.sas != SMT_DAS)
- return(0) ;
+ return 0;
if (smc->y[PA].twisted && (smc->y[PA].mib->fddiPORTPCMState == PC8_ACTIVE))
twist |= 1 ;
if (smc->y[PB].twisted && (smc->y[PB].mib->fddiPORTPCMState == PC8_ACTIVE))
twist |= 2 ;
- return(twist) ;
+ return twist;
}
/*
@@ -1636,9 +1636,9 @@ int pcm_rooted_station(struct s_smc *smc)
for (n = 0 ; n < NUMPHYS ; n++) {
if (smc->y[n].mib->fddiPORTPCMState == PC8_ACTIVE &&
smc->y[n].mib->fddiPORTNeighborType == TM)
- return(0) ;
+ return 0;
}
- return(1) ;
+ return 1;
}
/*
@@ -1915,7 +1915,7 @@ int get_pcm_state(struct s_smc *smc, int np)
case PL_PC9 : pcs = PC_MAINT ; break ;
default : pcs = PC_DISABLE ; break ;
}
- return(pcs) ;
+ return pcs;
}
char *get_linestate(struct s_smc *smc, int np)
@@ -1937,7 +1937,7 @@ char *get_linestate(struct s_smc *smc, int np)
default: ls = "unknown" ; break ;
#endif
}
- return(ls) ;
+ return ls;
}
char *get_pcmstate(struct s_smc *smc, int np)
@@ -1959,7 +1959,7 @@ char *get_pcmstate(struct s_smc *smc, int np)
case PL_PC9 : pcs = "MAINT" ; break ;
default : pcs = "UNKNOWN" ; break ;
}
- return(pcs) ;
+ return pcs;
}
void list_phy(struct s_smc *smc)
diff --git a/drivers/net/skfp/pmf.c b/drivers/net/skfp/pmf.c
index a320fdb3727..9ac4665d741 100644
--- a/drivers/net/skfp/pmf.c
+++ b/drivers/net/skfp/pmf.c
@@ -328,7 +328,7 @@ static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
* build SMT header
*/
if (!(mb = smt_get_mbuf(smc)))
- return(mb) ;
+ return mb;
smt = smtod(mb, struct smt_header *) ;
smt->smt_dest = req->smt_source ; /* DA == source of request */
@@ -493,7 +493,7 @@ static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
smt_add_para(smc,&set_pcon,(u_short) SMT_P1035,0,0) ;
smt_add_para(smc,&set_pcon,(u_short) SMT_P1036,0,0) ;
}
- return(mb) ;
+ return mb;
}
static int smt_authorize(struct s_smc *smc, struct smt_header *sm)
@@ -511,7 +511,7 @@ static int smt_authorize(struct s_smc *smc, struct smt_header *sm)
if (i != 8) {
if (memcmp((char *) &sm->smt_sid,
(char *) &smc->mib.fddiPRPMFStation,8))
- return(1) ;
+ return 1;
}
/*
* check authoriziation parameter if passwd not zero
@@ -522,13 +522,13 @@ static int smt_authorize(struct s_smc *smc, struct smt_header *sm)
if (i != 8) {
pa = (struct smt_para *) sm_to_para(smc,sm,SMT_P_AUTHOR) ;
if (!pa)
- return(1) ;
+ return 1;
if (pa->p_len != 8)
- return(1) ;
+ return 1;
if (memcmp((char *)(pa+1),(char *)smc->mib.fddiPRPMFPasswd,8))
- return(1) ;
+ return 1;
}
- return(0) ;
+ return 0;
}
static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm)
@@ -542,9 +542,9 @@ static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm)
if ((smc->mib.fddiSMTSetCount.count != sc->count) ||
memcmp((char *) smc->mib.fddiSMTSetCount.timestamp,
(char *)sc->timestamp,8))
- return(1) ;
+ return 1;
}
- return(0) ;
+ return 0;
}
void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
@@ -1109,7 +1109,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
break ;
case 0x2000 :
if (mac < 0 || mac >= NUMMACS) {
- return(SMT_RDF_NOPARAM) ;
+ return SMT_RDF_NOPARAM;
}
mib_m = &smc->mib.m[mac] ;
mib_addr = (char *) mib_m ;
@@ -1118,7 +1118,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
break ;
case 0x3000 :
if (path < 0 || path >= NUMPATHS) {
- return(SMT_RDF_NOPARAM) ;
+ return SMT_RDF_NOPARAM;
}
mib_a = &smc->mib.a[path] ;
mib_addr = (char *) mib_a ;
@@ -1127,7 +1127,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
break ;
case 0x4000 :
if (port < 0 || port >= smt_mib_phys(smc)) {
- return(SMT_RDF_NOPARAM) ;
+ return SMT_RDF_NOPARAM;
}
mib_p = &smc->mib.p[port_to_mib(smc,port)] ;
mib_addr = (char *) mib_p ;
@@ -1151,22 +1151,20 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
case SMT_P10F9 :
#endif
case SMT_P20F1 :
- if (!local) {
- return(SMT_RDF_NOPARAM) ;
- }
+ if (!local)
+ return SMT_RDF_NOPARAM;
break ;
}
pt = smt_get_ptab(pa->p_type) ;
- if (!pt) {
- return( (pa->p_type & 0xff00) ? SMT_RDF_NOPARAM :
- SMT_RDF_ILLEGAL ) ;
- }
+ if (!pt)
+ return (pa->p_type & 0xff00) ? SMT_RDF_NOPARAM :
+ SMT_RDF_ILLEGAL;
switch (pt->p_access) {
case AC_GR :
case AC_S :
break ;
default :
- return(SMT_RDF_ILLEGAL) ;
+ return SMT_RDF_ILLEGAL;
}
to = mib_addr + pt->p_offset ;
swap = pt->p_swap ; /* pointer to swap string */
@@ -1292,7 +1290,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
break ;
default :
SMT_PANIC(smc,SMT_E0120, SMT_E0120_MSG) ;
- return(SMT_RDF_ILLEGAL) ;
+ return SMT_RDF_ILLEGAL;
}
}
/*
@@ -1501,15 +1499,15 @@ change_mac_para:
default :
break ;
}
- return(0) ;
+ return 0;
val_error:
/* parameter value in frame is out of range */
- return(SMT_RDF_RANGE) ;
+ return SMT_RDF_RANGE;
len_error:
/* parameter value in frame is too short */
- return(SMT_RDF_LENGTH) ;
+ return SMT_RDF_LENGTH;
#if 0
no_author_error:
@@ -1518,7 +1516,7 @@ no_author_error:
* because SBA denied is not a valid return code in the
* PMF protocol.
*/
- return(SMT_RDF_AUTHOR) ;
+ return SMT_RDF_AUTHOR;
#endif
}
@@ -1527,7 +1525,7 @@ static const struct s_p_tab *smt_get_ptab(u_short para)
const struct s_p_tab *pt ;
for (pt = p_tab ; pt->p_num && pt->p_num != para ; pt++)
;
- return(pt->p_num ? pt : NULL) ;
+ return pt->p_num ? pt : NULL;
}
static int smt_mib_phys(struct s_smc *smc)
@@ -1535,11 +1533,11 @@ static int smt_mib_phys(struct s_smc *smc)
#ifdef CONCENTRATOR
SK_UNUSED(smc) ;
- return(NUMPHYS) ;
+ return NUMPHYS;
#else
if (smc->s.sas == SMT_SAS)
- return(1) ;
- return(NUMPHYS) ;
+ return 1;
+ return NUMPHYS;
#endif
}
@@ -1548,11 +1546,11 @@ static int port_to_mib(struct s_smc *smc, int p)
#ifdef CONCENTRATOR
SK_UNUSED(smc) ;
- return(p) ;
+ return p;
#else
if (smc->s.sas == SMT_SAS)
- return(PS) ;
- return(p) ;
+ return PS;
+ return p;
#endif
}
diff --git a/drivers/net/skfp/queue.c b/drivers/net/skfp/queue.c
index 09adb3d68b7..c1a0df455a5 100644
--- a/drivers/net/skfp/queue.c
+++ b/drivers/net/skfp/queue.c
@@ -128,7 +128,7 @@ u_short smt_online(struct s_smc *smc, int on)
{
queue_event(smc,EVENT_ECM,on ? EC_CONNECT : EC_DISCONNECT) ;
ev_dispatcher(smc) ;
- return(smc->mib.fddiSMTCF_State) ;
+ return smc->mib.fddiSMTCF_State;
}
/*
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 31b2dabf094..ba2e8339fe9 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -209,7 +209,7 @@ static int skfp_init_one(struct pci_dev *pdev,
void __iomem *mem;
int err;
- pr_debug(KERN_INFO "entering skfp_init_one\n");
+ pr_debug("entering skfp_init_one\n");
if (num_boards == 0)
printk("%s\n", boot_msg);
@@ -385,7 +385,7 @@ static int skfp_driver_init(struct net_device *dev)
skfddi_priv *bp = &smc->os;
int err = -EIO;
- pr_debug(KERN_INFO "entering skfp_driver_init\n");
+ pr_debug("entering skfp_driver_init\n");
// set the io address in private structures
bp->base_addr = dev->base_addr;
@@ -405,7 +405,7 @@ static int skfp_driver_init(struct net_device *dev)
// Determine the required size of the 'shared' memory area.
bp->SharedMemSize = mac_drv_check_space();
- pr_debug(KERN_INFO "Memory for HWM: %ld\n", bp->SharedMemSize);
+ pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize);
if (bp->SharedMemSize > 0) {
bp->SharedMemSize += 16; // for descriptor alignment
@@ -429,18 +429,18 @@ static int skfp_driver_init(struct net_device *dev)
card_stop(smc); // Reset adapter.
- pr_debug(KERN_INFO "mac_drv_init()..\n");
+ pr_debug("mac_drv_init()..\n");
if (mac_drv_init(smc) != 0) {
- pr_debug(KERN_INFO "mac_drv_init() failed.\n");
+ pr_debug("mac_drv_init() failed\n");
goto fail;
}
read_address(smc, NULL);
- pr_debug(KERN_INFO "HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
+ pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
smt_reset_defaults(smc, 0);
- return (0);
+ return 0;
fail:
if (bp->SharedMemAddr) {
@@ -485,7 +485,7 @@ static int skfp_open(struct net_device *dev)
struct s_smc *smc = netdev_priv(dev);
int err;
- pr_debug(KERN_INFO "entering skfp_open\n");
+ pr_debug("entering skfp_open\n");
/* Register IRQ - support shared interrupts by passing device ptr */
err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
dev->name, dev);
@@ -516,7 +516,7 @@ static int skfp_open(struct net_device *dev)
mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
netif_start_queue(dev);
- return (0);
+ return 0;
} // skfp_open
@@ -565,7 +565,7 @@ static int skfp_close(struct net_device *dev)
skb_queue_purge(&bp->SendSkbQueue);
bp->QueueSkb = MAX_TX_QUEUE_LEN;
- return (0);
+ return 0;
} // skfp_close
@@ -794,7 +794,7 @@ static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
#endif
- return ((struct net_device_stats *) &bp->os.MacStat);
+ return (struct net_device_stats *)&bp->os.MacStat;
} // ctl_get_stat
@@ -856,12 +856,12 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
/* Enable promiscuous mode, if necessary */
if (dev->flags & IFF_PROMISC) {
mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
- pr_debug(KERN_INFO "PROMISCUOUS MODE ENABLED\n");
+ pr_debug("PROMISCUOUS MODE ENABLED\n");
}
/* Else, update multicast address table */
else {
mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
- pr_debug(KERN_INFO "PROMISCUOUS MODE DISABLED\n");
+ pr_debug("PROMISCUOUS MODE DISABLED\n");
// Reset all MC addresses
mac_clear_multicast(smc);
@@ -869,7 +869,7 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
- pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
+ pr_debug("ENABLE ALL MC ADDRESSES\n");
} else if (!netdev_mc_empty(dev)) {
if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
/* use exact filtering */
@@ -880,18 +880,18 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
(struct fddi_addr *)ha->addr,
1);
- pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
- ha->addr);
+ pr_debug("ENABLE MC ADDRESS: %pMF\n",
+ ha->addr);
}
} else { // more MC addresses than HW supports
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
- pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
+ pr_debug("ENABLE ALL MC ADDRESSES\n");
}
} else { // no MC addresses
- pr_debug(KERN_INFO "DISABLE ALL MC ADDRESSES\n");
+ pr_debug("DISABLE ALL MC ADDRESSES\n");
}
/* Update adapter filters */
@@ -932,7 +932,7 @@ static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
ResetAdapter(smc);
spin_unlock_irqrestore(&bp->DriverLock, Flags);
- return (0); /* always return zero */
+ return 0; /* always return zero */
} // skfp_ctl_set_mac_address
@@ -1045,7 +1045,7 @@ static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *bp = &smc->os;
- pr_debug(KERN_INFO "skfp_send_pkt\n");
+ pr_debug("skfp_send_pkt\n");
/*
* Verify that incoming transmit request is OK
@@ -1114,13 +1114,13 @@ static void send_queued_packets(struct s_smc *smc)
int frame_status; // HWM tx frame status.
- pr_debug(KERN_INFO "send queued packets\n");
+ pr_debug("send queued packets\n");
for (;;) {
// send first buffer from queue
skb = skb_dequeue(&bp->SendSkbQueue);
if (!skb) {
- pr_debug(KERN_INFO "queue empty\n");
+ pr_debug("queue empty\n");
return;
} // queue empty !
@@ -1232,7 +1232,7 @@ static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
static void ResetAdapter(struct s_smc *smc)
{
- pr_debug(KERN_INFO "[fddi: ResetAdapter]\n");
+ pr_debug("[fddi: ResetAdapter]\n");
// Stop the adapter.
@@ -1278,7 +1278,7 @@ void llc_restart_tx(struct s_smc *smc)
{
skfddi_priv *bp = &smc->os;
- pr_debug(KERN_INFO "[llc_restart_tx]\n");
+ pr_debug("[llc_restart_tx]\n");
// Try to send queued packets
spin_unlock(&bp->DriverLock);
@@ -1308,21 +1308,21 @@ void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
{
void *virt;
- pr_debug(KERN_INFO "mac_drv_get_space (%d bytes), ", size);
+ pr_debug("mac_drv_get_space (%d bytes), ", size);
virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
printk("Unexpected SMT memory size requested: %d\n", size);
- return (NULL);
+ return NULL;
}
smc->os.SharedMemHeap += size; // Move heap pointer.
- pr_debug(KERN_INFO "mac_drv_get_space end\n");
- pr_debug(KERN_INFO "virt addr: %lx\n", (ulong) virt);
- pr_debug(KERN_INFO "bus addr: %lx\n", (ulong)
+ pr_debug("mac_drv_get_space end\n");
+ pr_debug("virt addr: %lx\n", (ulong) virt);
+ pr_debug("bus addr: %lx\n", (ulong)
(smc->os.SharedMemDMA +
((char *) virt - (char *)smc->os.SharedMemAddr)));
- return (virt);
+ return virt;
} // mac_drv_get_space
@@ -1349,7 +1349,7 @@ void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
char *virt;
- pr_debug(KERN_INFO "mac_drv_get_desc_mem\n");
+ pr_debug("mac_drv_get_desc_mem\n");
// Descriptor memory must be aligned on 16-byte boundary.
@@ -1363,9 +1363,9 @@ void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
if (!mac_drv_get_space(smc, size)) {
printk("fddi: Unable to align descriptor memory.\n");
- return (NULL);
+ return NULL;
}
- return (virt + size);
+ return virt + size;
} // mac_drv_get_desc_mem
@@ -1384,8 +1384,8 @@ void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
************************/
unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
{
- return (smc->os.SharedMemDMA +
- ((char *) virt - (char *)smc->os.SharedMemAddr));
+ return smc->os.SharedMemDMA +
+ ((char *) virt - (char *)smc->os.SharedMemAddr);
} // mac_drv_virt2phys
@@ -1419,8 +1419,8 @@ unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
************************/
u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
{
- return (smc->os.SharedMemDMA +
- ((char *) virt - (char *)smc->os.SharedMemAddr));
+ return smc->os.SharedMemDMA +
+ ((char *) virt - (char *)smc->os.SharedMemAddr);
} // dma_master
@@ -1493,7 +1493,7 @@ void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
{
struct sk_buff *skb;
- pr_debug(KERN_INFO "entering mac_drv_tx_complete\n");
+ pr_debug("entering mac_drv_tx_complete\n");
// Check if this TxD points to a skb
if (!(skb = txd->txd_os.skb)) {
@@ -1513,7 +1513,7 @@ void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
// free the skb
dev_kfree_skb_irq(skb);
- pr_debug(KERN_INFO "leaving mac_drv_tx_complete\n");
+ pr_debug("leaving mac_drv_tx_complete\n");
} // mac_drv_tx_complete
@@ -1580,7 +1580,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
unsigned short ri;
u_int RifLength;
- pr_debug(KERN_INFO "entering mac_drv_rx_complete (len=%d)\n", len);
+ pr_debug("entering mac_drv_rx_complete (len=%d)\n", len);
if (frag_count != 1) { // This is not allowed to happen.
printk("fddi: Multi-fragment receive!\n");
@@ -1589,7 +1589,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
}
skb = rxd->rxd_os.skb;
if (!skb) {
- pr_debug(KERN_INFO "No skb in rxd\n");
+ pr_debug("No skb in rxd\n");
smc->os.MacStat.gen.rx_errors++;
goto RequeueRxd;
}
@@ -1619,7 +1619,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
else {
int n;
// goos: RIF removal has still to be tested
- pr_debug(KERN_INFO "RIF found\n");
+ pr_debug("RIF found\n");
// Get RIF length from Routing Control (RC) field.
cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
@@ -1664,7 +1664,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
return;
RequeueRxd:
- pr_debug(KERN_INFO "Rx: re-queue RXD.\n");
+ pr_debug("Rx: re-queue RXD.\n");
mac_drv_requeue_rxd(smc, rxd, frag_count);
smc->os.MacStat.gen.rx_errors++; // Count receive packets
// not indicated.
@@ -1775,7 +1775,7 @@ void mac_drv_fill_rxd(struct s_smc *smc)
struct sk_buff *skb;
volatile struct s_smt_fp_rxd *rxd;
- pr_debug(KERN_INFO "entering mac_drv_fill_rxd\n");
+ pr_debug("entering mac_drv_fill_rxd\n");
// Walk through the list of free receive buffers, passing receive
// buffers to the HWM as long as RXDs are available.
@@ -1783,7 +1783,7 @@ void mac_drv_fill_rxd(struct s_smc *smc)
MaxFrameSize = smc->os.MaxFrameSize;
// Check if there is any RXD left.
while (HWM_GET_RX_FREE(smc) > 0) {
- pr_debug(KERN_INFO ".\n");
+ pr_debug(".\n");
rxd = HWM_GET_CURR_RXD(smc);
skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
@@ -1814,7 +1814,7 @@ void mac_drv_fill_rxd(struct s_smc *smc)
hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
FIRST_FRAG | LAST_FRAG);
}
- pr_debug(KERN_INFO "leaving mac_drv_fill_rxd\n");
+ pr_debug("leaving mac_drv_fill_rxd\n");
} // mac_drv_fill_rxd
@@ -1904,12 +1904,12 @@ int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
pr_debug("fddi: Discard invalid local SMT frame\n");
pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
len, la_len, (unsigned long) look_ahead);
- return (0);
+ return 0;
}
skb = alloc_skb(len + 3, GFP_ATOMIC);
if (!skb) {
pr_debug("fddi: Local SMT: skb memory exhausted.\n");
- return (0);
+ return 0;
}
skb_reserve(skb, 3);
skb_put(skb, len);
@@ -1919,7 +1919,7 @@ int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
skb->protocol = fddi_type_trans(skb, smc->os.dev);
netif_rx(skb);
- return (0);
+ return 0;
} // mac_drv_rx_init
@@ -2034,17 +2034,17 @@ void smt_stat_counter(struct s_smc *smc, int stat)
{
// BOOLEAN RingIsUp ;
- pr_debug(KERN_INFO "smt_stat_counter\n");
+ pr_debug("smt_stat_counter\n");
switch (stat) {
case 0:
- pr_debug(KERN_INFO "Ring operational change.\n");
+ pr_debug("Ring operational change.\n");
break;
case 1:
- pr_debug(KERN_INFO "Receive fifo overflow.\n");
+ pr_debug("Receive fifo overflow.\n");
smc->os.MacStat.gen.rx_errors++;
break;
default:
- pr_debug(KERN_INFO "Unknown status (%d).\n", stat);
+ pr_debug("Unknown status (%d).\n", stat);
break;
}
} // smt_stat_counter
@@ -2100,10 +2100,10 @@ void cfm_state_change(struct s_smc *smc, int c_state)
s = "SC11_C_WRAP_S";
break;
default:
- pr_debug(KERN_INFO "cfm_state_change: unknown %d\n", c_state);
+ pr_debug("cfm_state_change: unknown %d\n", c_state);
return;
}
- pr_debug(KERN_INFO "cfm_state_change: %s\n", s);
+ pr_debug("cfm_state_change: %s\n", s);
#endif // DRIVERDEBUG
} // cfm_state_change
@@ -2158,7 +2158,7 @@ void ecm_state_change(struct s_smc *smc, int e_state)
s = "unknown";
break;
}
- pr_debug(KERN_INFO "ecm_state_change: %s\n", s);
+ pr_debug("ecm_state_change: %s\n", s);
#endif //DRIVERDEBUG
} // ecm_state_change
@@ -2213,7 +2213,7 @@ void rmt_state_change(struct s_smc *smc, int r_state)
s = "unknown";
break;
}
- pr_debug(KERN_INFO "[rmt_state_change: %s]\n", s);
+ pr_debug("[rmt_state_change: %s]\n", s);
#endif // DRIVERDEBUG
} // rmt_state_change
@@ -2233,7 +2233,7 @@ void rmt_state_change(struct s_smc *smc, int r_state)
************************/
void drv_reset_indication(struct s_smc *smc)
{
- pr_debug(KERN_INFO "entering drv_reset_indication\n");
+ pr_debug("entering drv_reset_indication\n");
smc->os.ResetRequested = TRUE; // Set flag.
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 6f35bb77595..2d9941c045b 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -127,22 +127,22 @@ static inline int is_my_addr(const struct s_smc *smc,
static inline int is_broadcast(const struct fddi_addr *addr)
{
- return(*(u_short *)(&addr->a[0]) == 0xffff &&
+ return *(u_short *)(&addr->a[0]) == 0xffff &&
*(u_short *)(&addr->a[2]) == 0xffff &&
- *(u_short *)(&addr->a[4]) == 0xffff ) ;
+ *(u_short *)(&addr->a[4]) == 0xffff;
}
static inline int is_individual(const struct fddi_addr *addr)
{
- return(!(addr->a[0] & GROUP_ADDR)) ;
+ return !(addr->a[0] & GROUP_ADDR);
}
static inline int is_equal(const struct fddi_addr *addr1,
const struct fddi_addr *addr2)
{
- return(*(u_short *)(&addr1->a[0]) == *(u_short *)(&addr2->a[0]) &&
+ return *(u_short *)(&addr1->a[0]) == *(u_short *)(&addr2->a[0]) &&
*(u_short *)(&addr1->a[2]) == *(u_short *)(&addr2->a[2]) &&
- *(u_short *)(&addr1->a[4]) == *(u_short *)(&addr2->a[4]) ) ;
+ *(u_short *)(&addr1->a[4]) == *(u_short *)(&addr2->a[4]);
}
/*
@@ -457,8 +457,8 @@ static int div_ratio(u_long upper, u_long lower)
else
upper <<= 16L ;
if (!lower)
- return(0) ;
- return((int)(upper/lower)) ;
+ return 0;
+ return (int)(upper/lower) ;
}
#ifndef SLIM_SMT
@@ -1111,11 +1111,11 @@ SMbuf *smt_build_frame(struct s_smc *smc, int class, int type,
#if 0
if (!smc->r.sm_ma_avail) {
- return(0) ;
+ return 0;
}
#endif
if (!(mb = smt_get_mbuf(smc)))
- return(mb) ;
+ return mb;
mb->sm_len = length ;
smt = smtod(mb, struct smt_header *) ;
@@ -1136,7 +1136,7 @@ SMbuf *smt_build_frame(struct s_smc *smc, int class, int type,
smt->smt_tid = smt_get_tid(smc) ; /* set transaction ID */
smt->smt_pad = 0 ;
smt->smt_len = length - sizeof(struct smt_header) ;
- return(mb) ;
+ return mb;
}
static void smt_add_frame_len(SMbuf *mb, int len)
@@ -1375,7 +1375,7 @@ static int smt_fill_path(struct s_smc *smc, struct smt_p_path *path)
pd_mac = (struct smt_mac_rec *) phy ;
pd_mac->mac_addr = smc->mib.m[MAC0].fddiMACSMTAddress ;
pd_mac->mac_resource_idx = mac_con_resource_index(smc,1) ;
- return(len) ;
+ return len;
}
/*
@@ -1563,7 +1563,7 @@ u_long smt_get_tid(struct s_smc *smc)
u_long tid ;
while ((tid = ++(smc->sm.smt_tid) ^ SMT_TID_MAGIC) == 0)
;
- return(tid & 0x3fffffffL) ;
+ return tid & 0x3fffffffL;
}
@@ -1654,11 +1654,11 @@ int smt_check_para(struct s_smc *smc, struct smt_header *sm,
while (*p) {
if (!sm_to_para(smc,sm,(int) *p)) {
DB_SMT("SMT: smt_check_para - missing para %x\n",*p,0);
- return(-1) ;
+ return -1;
}
p++ ;
}
- return(0) ;
+ return 0;
}
void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para)
@@ -1687,7 +1687,7 @@ void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para)
return NULL;
}
if (found)
- return(found) ;
+ return found;
}
return NULL;
}
@@ -1732,7 +1732,7 @@ char *addr_to_string(struct fddi_addr *addr)
string[i * 3 + 2] = ':';
}
string[5 * 3 + 2] = 0;
- return(string);
+ return string;
}
#endif
@@ -1742,9 +1742,9 @@ int smt_ifconfig(int argc, char *argv[])
if (argc >= 2 && !strcmp(argv[0],"opt_bypass") &&
!strcmp(argv[1],"yes")) {
smc->mib.fddiSMTBypassPresent = 1 ;
- return(0) ;
+ return 0;
}
- return(amdfddi_config(0,argc,argv)) ;
+ return amdfddi_config(0, argc, argv);
}
#endif
@@ -1756,9 +1756,9 @@ static int mac_index(struct s_smc *smc, int mac)
SK_UNUSED(mac) ;
#ifdef CONCENTRATOR
SK_UNUSED(smc) ;
- return(NUMPHYS+1) ;
+ return NUMPHYS + 1;
#else
- return((smc->s.sas == SMT_SAS) ? 2 : 3) ;
+ return (smc->s.sas == SMT_SAS) ? 2 : 3;
#endif
}
@@ -1768,7 +1768,7 @@ static int mac_index(struct s_smc *smc, int mac)
static int phy_index(struct s_smc *smc, int phy)
{
SK_UNUSED(smc) ;
- return(phy+1);
+ return phy + 1;
}
/*
@@ -1779,19 +1779,19 @@ static int mac_con_resource_index(struct s_smc *smc, int mac)
#ifdef CONCENTRATOR
SK_UNUSED(smc) ;
SK_UNUSED(mac) ;
- return(entity_to_index(smc,cem_get_downstream(smc,ENTITY_MAC))) ;
+ return entity_to_index(smc, cem_get_downstream(smc, ENTITY_MAC));
#else
SK_UNUSED(mac) ;
switch (smc->mib.fddiSMTCF_State) {
case SC9_C_WRAP_A :
case SC5_THRU_B :
case SC11_C_WRAP_S :
- return(1) ;
+ return 1;
case SC10_C_WRAP_B :
case SC4_THRU_A :
- return(2) ;
+ return 2;
}
- return(smc->s.sas == SMT_SAS ? 2 : 3) ;
+ return smc->s.sas == SMT_SAS ? 2 : 3;
#endif
}
@@ -1801,21 +1801,21 @@ static int mac_con_resource_index(struct s_smc *smc, int mac)
static int phy_con_resource_index(struct s_smc *smc, int phy)
{
#ifdef CONCENTRATOR
- return(entity_to_index(smc,cem_get_downstream(smc,ENTITY_PHY(phy)))) ;
+ return entity_to_index(smc, cem_get_downstream(smc, ENTITY_PHY(phy))) ;
#else
switch (smc->mib.fddiSMTCF_State) {
case SC9_C_WRAP_A :
- return(phy == PA ? 3 : 2) ;
+ return phy == PA ? 3 : 2;
case SC10_C_WRAP_B :
- return(phy == PA ? 1 : 3) ;
+ return phy == PA ? 1 : 3;
case SC4_THRU_A :
- return(phy == PA ? 3 : 1) ;
+ return phy == PA ? 3 : 1;
case SC5_THRU_B :
- return(phy == PA ? 2 : 3) ;
+ return phy == PA ? 2 : 3;
case SC11_C_WRAP_S :
- return(2) ;
+ return 2;
}
- return(phy) ;
+ return phy;
#endif
}
@@ -1823,16 +1823,16 @@ static int phy_con_resource_index(struct s_smc *smc, int phy)
static int entity_to_index(struct s_smc *smc, int e)
{
if (e == ENTITY_MAC)
- return(mac_index(smc,1)) ;
+ return mac_index(smc, 1);
else
- return(phy_index(smc,e - ENTITY_PHY(0))) ;
+ return phy_index(smc, e - ENTITY_PHY(0));
}
#endif
#ifdef LITTLE_ENDIAN
static int smt_swap_short(u_short s)
{
- return(((s>>8)&0xff)|((s&0xff)<<8)) ;
+ return ((s>>8)&0xff) | ((s&0xff)<<8);
}
void smt_swap_para(struct smt_header *sm, int len, int direction)
@@ -1996,7 +1996,7 @@ int smt_action(struct s_smc *smc, int class, int code, int index)
}
break ;
default :
- return(1) ;
+ return 1;
}
break ;
case SMT_PORT_ACTION :
@@ -2017,14 +2017,14 @@ int smt_action(struct s_smc *smc, int class, int code, int index)
event = PC_STOP ;
break ;
default :
- return(1) ;
+ return 1;
}
queue_event(smc,EVENT_PCM+index,event) ;
break ;
default :
- return(1) ;
+ return 1;
}
- return(0) ;
+ return 0;
}
/*
diff --git a/drivers/net/skfp/smtdef.c b/drivers/net/skfp/smtdef.c
index 4e07ff7073f..1acab0b368e 100644
--- a/drivers/net/skfp/smtdef.c
+++ b/drivers/net/skfp/smtdef.c
@@ -303,7 +303,7 @@ int smt_set_mac_opvalues(struct s_smc *smc)
FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_T_REQ,
smt_get_event_word(smc));
}
- return(st) ;
+ return st;
}
void smt_fixup_mib(struct s_smc *smc)
@@ -350,6 +350,6 @@ static int set_min_max(int maxflag, u_long mib, u_long limit, u_long *oper)
*oper = limit ;
else
*oper = mib ;
- return(old != *oper) ;
+ return old != *oper;
}
diff --git a/drivers/net/skfp/smtinit.c b/drivers/net/skfp/smtinit.c
index 3c8964ce183..e3a0c0bc223 100644
--- a/drivers/net/skfp/smtinit.c
+++ b/drivers/net/skfp/smtinit.c
@@ -120,6 +120,6 @@ int init_smt(struct s_smc *smc, u_char *mac_addr)
PNMI_INIT(smc) ; /* PNMI initialization */
- return(0) ;
+ return 0;
}
diff --git a/drivers/net/skfp/srf.c b/drivers/net/skfp/srf.c
index 40882b3faba..f6f7baf9f27 100644
--- a/drivers/net/skfp/srf.c
+++ b/drivers/net/skfp/srf.c
@@ -165,7 +165,7 @@ static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index)
for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
if (evc->evc_code == code && evc->evc_index == index)
- return(evc) ;
+ return evc;
}
return NULL;
}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 40e5c46e757..bfec2e0f527 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -43,6 +43,7 @@
#include <linux/seq_file.h>
#include <linux/mii.h>
#include <linux/slab.h>
+#include <linux/dmi.h>
#include <asm/irq.h>
#include "skge.h"
@@ -3178,8 +3179,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
if (likely(skb)) {
- netif_receive_skb(skb);
-
+ napi_gro_receive(napi, skb);
++work_done;
}
}
@@ -3192,6 +3192,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
if (work_done < to_do) {
unsigned long flags;
+ napi_gro_flush(napi);
spin_lock_irqsave(&hw->hw_lock, flags);
__napi_complete(napi);
hw->intr_mask |= napimask[skge->port];
@@ -3849,6 +3850,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
skge->rx_csum = 1;
}
+ dev->features |= NETIF_F_GRO;
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
@@ -3868,6 +3870,8 @@ static void __devinit skge_show_addr(struct net_device *dev)
netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
}
+static int only_32bit_dma;
+
static int __devinit skge_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -3889,7 +3893,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
} else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
@@ -4147,8 +4151,21 @@ static struct pci_driver skge_driver = {
.shutdown = skge_shutdown,
};
+static struct dmi_system_id skge_32bit_dma_boards[] = {
+ {
+ .ident = "Gigabyte nForce boards",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
+ DMI_MATCH(DMI_BOARD_NAME, "nForce"),
+ },
+ },
+ {}
+};
+
static int __init skge_init_module(void)
{
+ if (dmi_check_system(skge_32bit_dma_boards))
+ only_32bit_dma = 1;
skge_debug_init();
return pci_register_driver(&skge_driver);
}
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 194e5cf8c76..3ef9b67ac6e 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4581,7 +4581,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
sky2->port = port;
- dev->features |= NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_SG;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
+ | NETIF_F_TSO | NETIF_F_GRO;
if (highmem)
dev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index fa434fb8fb7..86cbb9ea2f2 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -271,7 +271,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu)
memcpy(sl->xbuff, sl->xhead, sl->xleft);
} else {
sl->xleft = 0;
- sl->tx_dropped++;
+ dev->stats.tx_dropped++;
}
}
sl->xhead = sl->xbuff;
@@ -281,7 +281,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu)
memcpy(sl->rbuff, rbuff, sl->rcount);
} else {
sl->rcount = 0;
- sl->rx_over_errors++;
+ dev->stats.rx_over_errors++;
set_bit(SLF_ERROR, &sl->flags);
}
}
@@ -319,6 +319,7 @@ static inline void sl_unlock(struct slip *sl)
/* Send one completely decapsulated IP datagram to the IP layer. */
static void sl_bump(struct slip *sl)
{
+ struct net_device *dev = sl->dev;
struct sk_buff *skb;
int count;
@@ -329,13 +330,13 @@ static void sl_bump(struct slip *sl)
if (c & SL_TYPE_COMPRESSED_TCP) {
/* ignore compressed packets when CSLIP is off */
if (!(sl->mode & SL_MODE_CSLIP)) {
- printk(KERN_WARNING "%s: compressed packet ignored\n", sl->dev->name);
+ printk(KERN_WARNING "%s: compressed packet ignored\n", dev->name);
return;
}
/* make sure we've reserved enough space for uncompress
to use */
if (count + 80 > sl->buffsize) {
- sl->rx_over_errors++;
+ dev->stats.rx_over_errors++;
return;
}
count = slhc_uncompress(sl->slcomp, sl->rbuff, count);
@@ -346,7 +347,7 @@ static void sl_bump(struct slip *sl)
/* turn on header compression */
sl->mode |= SL_MODE_CSLIP;
sl->mode &= ~SL_MODE_ADAPTIVE;
- printk(KERN_INFO "%s: header compression turned on\n", sl->dev->name);
+ printk(KERN_INFO "%s: header compression turned on\n", dev->name);
}
sl->rbuff[0] &= 0x4f;
if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0)
@@ -355,20 +356,20 @@ static void sl_bump(struct slip *sl)
}
#endif /* SL_INCLUDE_CSLIP */
- sl->rx_bytes += count;
+ dev->stats.rx_bytes += count;
skb = dev_alloc_skb(count);
if (skb == NULL) {
- printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", sl->dev->name);
- sl->rx_dropped++;
+ printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
+ dev->stats.rx_dropped++;
return;
}
- skb->dev = sl->dev;
+ skb->dev = dev;
memcpy(skb_put(skb, count), sl->rbuff, count);
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IP);
netif_rx(skb);
- sl->rx_packets++;
+ dev->stats.rx_packets++;
}
/* Encapsulate one IP datagram and stuff into a TTY queue. */
@@ -379,7 +380,7 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
if (len > sl->mtu) { /* Sigh, shouldn't occur BUT ... */
printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name);
- sl->tx_dropped++;
+ sl->dev->stats.tx_dropped++;
sl_unlock(sl);
return;
}
@@ -433,7 +434,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
if (sl->xleft <= 0) {
/* Now serial buffer is almost free & we can start
* transmission of another packet */
- sl->tx_packets++;
+ sl->dev->stats.tx_packets++;
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
sl_unlock(sl);
return;
@@ -496,7 +497,7 @@ sl_xmit(struct sk_buff *skb, struct net_device *dev)
}
sl_lock(sl);
- sl->tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
sl_encaps(sl, skb->data, skb->len);
spin_unlock(&sl->lock);
@@ -558,39 +559,39 @@ static int sl_change_mtu(struct net_device *dev, int new_mtu)
/* Netdevice get statistics request */
-static struct net_device_stats *
-sl_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- static struct net_device_stats stats;
- struct slip *sl = netdev_priv(dev);
+ struct net_device_stats *devstats = &dev->stats;
+ unsigned long c_rx_dropped = 0;
#ifdef SL_INCLUDE_CSLIP
- struct slcompress *comp;
-#endif
+ unsigned long c_rx_fifo_errors = 0;
+ unsigned long c_tx_fifo_errors = 0;
+ unsigned long c_collisions = 0;
+ struct slip *sl = netdev_priv(dev);
+ struct slcompress *comp = sl->slcomp;
- memset(&stats, 0, sizeof(struct net_device_stats));
-
- stats.rx_packets = sl->rx_packets;
- stats.tx_packets = sl->tx_packets;
- stats.rx_bytes = sl->rx_bytes;
- stats.tx_bytes = sl->tx_bytes;
- stats.rx_dropped = sl->rx_dropped;
- stats.tx_dropped = sl->tx_dropped;
- stats.tx_errors = sl->tx_errors;
- stats.rx_errors = sl->rx_errors;
- stats.rx_over_errors = sl->rx_over_errors;
-#ifdef SL_INCLUDE_CSLIP
- stats.rx_fifo_errors = sl->rx_compressed;
- stats.tx_fifo_errors = sl->tx_compressed;
- stats.collisions = sl->tx_misses;
- comp = sl->slcomp;
if (comp) {
- stats.rx_fifo_errors += comp->sls_i_compressed;
- stats.rx_dropped += comp->sls_i_tossed;
- stats.tx_fifo_errors += comp->sls_o_compressed;
- stats.collisions += comp->sls_o_misses;
+ c_rx_fifo_errors = comp->sls_i_compressed;
+ c_rx_dropped = comp->sls_i_tossed;
+ c_tx_fifo_errors = comp->sls_o_compressed;
+ c_collisions = comp->sls_o_misses;
}
-#endif /* CONFIG_INET */
- return (&stats);
+ stats->rx_fifo_errors = sl->rx_compressed + c_rx_fifo_errors;
+ stats->tx_fifo_errors = sl->tx_compressed + c_tx_fifo_errors;
+ stats->collisions = sl->tx_misses + c_collisions;
+#endif
+ stats->rx_packets = devstats->rx_packets;
+ stats->tx_packets = devstats->tx_packets;
+ stats->rx_bytes = devstats->rx_bytes;
+ stats->tx_bytes = devstats->tx_bytes;
+ stats->rx_dropped = devstats->rx_dropped + c_rx_dropped;
+ stats->tx_dropped = devstats->tx_dropped;
+ stats->tx_errors = devstats->tx_errors;
+ stats->rx_errors = devstats->rx_errors;
+ stats->rx_over_errors = devstats->rx_over_errors;
+
+ return stats;
}
/* Netdevice register callback */
@@ -633,7 +634,7 @@ static const struct net_device_ops sl_netdev_ops = {
.ndo_open = sl_open,
.ndo_stop = sl_close,
.ndo_start_xmit = sl_xmit,
- .ndo_get_stats = sl_get_stats,
+ .ndo_get_stats64 = sl_get_stats64,
.ndo_change_mtu = sl_change_mtu,
.ndo_tx_timeout = sl_tx_timeout,
#ifdef CONFIG_SLIP_SMART
@@ -681,7 +682,7 @@ static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
- sl->rx_errors++;
+ sl->dev->stats.rx_errors++;
cp++;
continue;
}
@@ -943,7 +944,7 @@ static int slip_esc(unsigned char *s, unsigned char *d, int len)
}
}
*ptr++ = END;
- return (ptr - d);
+ return ptr - d;
}
static void slip_unesc(struct slip *sl, unsigned char s)
@@ -981,7 +982,7 @@ static void slip_unesc(struct slip *sl, unsigned char s)
sl->rbuff[sl->rcount++] = s;
return;
}
- sl->rx_over_errors++;
+ sl->dev->stats.rx_over_errors++;
set_bit(SLF_ERROR, &sl->flags);
}
}
@@ -1057,7 +1058,7 @@ static void slip_unesc6(struct slip *sl, unsigned char s)
sl->rbuff[sl->rcount++] = c;
return;
}
- sl->rx_over_errors++;
+ sl->dev->stats.rx_over_errors++;
set_bit(SLF_ERROR, &sl->flags);
}
}
diff --git a/drivers/net/slip.h b/drivers/net/slip.h
index 9ea5c11287d..914e958abbf 100644
--- a/drivers/net/slip.h
+++ b/drivers/net/slip.h
@@ -67,15 +67,6 @@ struct slip {
int xleft; /* bytes left in XMIT queue */
/* SLIP interface statistics. */
- unsigned long rx_packets; /* inbound frames counter */
- unsigned long tx_packets; /* outbound frames counter */
- unsigned long rx_bytes; /* inbound byte counte */
- unsigned long tx_bytes; /* outbound byte counter */
- unsigned long rx_errors; /* Parity, etc. errors */
- unsigned long tx_errors; /* Planned stuff */
- unsigned long rx_dropped; /* No memory for skb */
- unsigned long tx_dropped; /* When MTU change */
- unsigned long rx_over_errors; /* Frame bigger than SLIP buf. */
#ifdef SL_INCLUDE_CSLIP
unsigned long tx_compressed;
unsigned long rx_compressed;
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 8d2772cc42f..ee747919a76 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -83,43 +83,6 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
}
}
-#elif defined(CONFIG_REDWOOD_5) || defined(CONFIG_REDWOOD_6)
-
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 1
-
-#define SMC_IO_SHIFT 0
-
-#define SMC_inw(a, r) in_be16((volatile u16 *)((a) + (r)))
-#define SMC_outw(v, a, r) out_be16((volatile u16 *)((a) + (r)), v)
-#define SMC_insw(a, r, p, l) \
- do { \
- unsigned long __port = (a) + (r); \
- u16 *__p = (u16 *)(p); \
- int __l = (l); \
- insw(__port, __p, __l); \
- while (__l > 0) { \
- *__p = swab16(*__p); \
- __p++; \
- __l--; \
- } \
- } while (0)
-#define SMC_outsw(a, r, p, l) \
- do { \
- unsigned long __port = (a) + (r); \
- u16 *__p = (u16 *)(p); \
- int __l = (l); \
- while (__l > 0) { \
- /* Believe it or not, the swab isn't needed. */ \
- outw( /* swab16 */ (*__p++), __port); \
- __l--; \
- } \
- } while (0)
-#define SMC_IRQ_FLAGS (0)
-
#elif defined(CONFIG_SA1100_PLEB)
/* We can only do 16-bit reads and writes in the static memory space. */
#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 0909ae934ad..a8e5856ce88 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -58,6 +58,7 @@
MODULE_LICENSE("GPL");
MODULE_VERSION(SMSC_DRV_VERSION);
+MODULE_ALIAS("platform:smsc911x");
#if USE_DEBUG > 0
static int debug = 16;
@@ -1048,7 +1049,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
smsc911x_rx_readfifo(pdata, (unsigned int *)skb->head,
pktwords);
skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
netif_receive_skb(skb);
/* Update counters */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 1636a34d95d..cb6bcca9d54 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1000,9 +1000,9 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
!(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (data_status & SPIDER_NET_VLAN_PACKET) {
/* further enhancements: HW-accel VLAN
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index a42b6873370..4adf1242278 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -148,7 +148,7 @@ static int full_duplex[MAX_UNITS] = {0, };
* This SUCKS.
* We need a much better method to determine if dma_addr_t is 64-bit.
*/
-#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
+#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) || (defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT))
/* 64-bit dma_addr_t */
#define ADDR_64BITS /* This chip uses 64 bit addresses. */
#define netdrv_addr_t __le64
@@ -302,7 +302,7 @@ enum chipset {
};
static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
- { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
+ { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
@@ -2078,11 +2078,7 @@ static int __init starfire_init (void)
printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
#endif
- /* we can do this test only at run-time... sigh */
- if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
- printk("This driver has dma_addr_t issues, please send email to maintainer\n");
- return -ENODEV;
- }
+ BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
return pci_register_driver(&starfire_driver);
}
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index eb63d44748a..7df7df4e79c 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -3,10 +3,10 @@ config STMMAC_ETH
select MII
select PHYLIB
select CRC32
- depends on NETDEVICES && CPU_SUBTYPE_ST40
+ depends on NETDEVICES && HAS_IOMEM
help
This is the driver for the Ethernet IPs are built around a
- Synopsys IP Core and fully tested on the STMicroelectronics
+ Synopsys IP Core and only tested on the STMicroelectronics
platforms.
if STMMAC_ETH
@@ -32,6 +32,7 @@ config STMMAC_DUAL_MAC
config STMMAC_TIMER
bool "STMMAC Timer optimisation"
default n
+ depends on RTC_HCTOSYS_DEVICE
help
Use an external timer for mitigating the number of network
interrupts. Currently, for SH architectures, it is possible
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index 66b9da0260f..dec7ce40c27 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -102,8 +102,6 @@ struct stmmac_extra_stats {
#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
-#define HW_CSUM 1
-#define NO_HW_CSUM 0
enum rx_frame_status { /* IPC status */
good_frame = 0,
discard_frame = 1,
@@ -167,7 +165,7 @@ struct stmmac_desc_ops {
int (*get_tx_ls) (struct dma_desc *p);
/* Return the transmit status looking at the TDES1 */
int (*tx_status) (void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr);
+ struct dma_desc *p, void __iomem *ioaddr);
/* Get the buffer size from the descriptor */
int (*get_tx_len) (struct dma_desc *p);
/* Handle extra events on specific interrupts hw dependent */
@@ -182,44 +180,46 @@ struct stmmac_desc_ops {
struct stmmac_dma_ops {
/* DMA core initialization */
- int (*init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+ int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
/* Dump DMA registers */
- void (*dump_regs) (unsigned long ioaddr);
+ void (*dump_regs) (void __iomem *ioaddr);
/* Set tx/rx threshold in the csr6 register
* An invalid value enables the store-and-forward mode */
- void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
+ void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode);
/* To track extra statistic (if supported) */
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr);
- void (*enable_dma_transmission) (unsigned long ioaddr);
- void (*enable_dma_irq) (unsigned long ioaddr);
- void (*disable_dma_irq) (unsigned long ioaddr);
- void (*start_tx) (unsigned long ioaddr);
- void (*stop_tx) (unsigned long ioaddr);
- void (*start_rx) (unsigned long ioaddr);
- void (*stop_rx) (unsigned long ioaddr);
- int (*dma_interrupt) (unsigned long ioaddr,
+ void __iomem *ioaddr);
+ void (*enable_dma_transmission) (void __iomem *ioaddr);
+ void (*enable_dma_irq) (void __iomem *ioaddr);
+ void (*disable_dma_irq) (void __iomem *ioaddr);
+ void (*start_tx) (void __iomem *ioaddr);
+ void (*stop_tx) (void __iomem *ioaddr);
+ void (*start_rx) (void __iomem *ioaddr);
+ void (*stop_rx) (void __iomem *ioaddr);
+ int (*dma_interrupt) (void __iomem *ioaddr,
struct stmmac_extra_stats *x);
};
struct stmmac_ops {
/* MAC core initialization */
- void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
+ void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
+ /* Support checksum offload engine */
+ int (*rx_coe) (void __iomem *ioaddr);
/* Dump MAC registers */
- void (*dump_regs) (unsigned long ioaddr);
+ void (*dump_regs) (void __iomem *ioaddr);
/* Handle extra events on specific interrupts hw dependent */
- void (*host_irq_status) (unsigned long ioaddr);
+ void (*host_irq_status) (void __iomem *ioaddr);
/* Multicast filter setting */
void (*set_filter) (struct net_device *dev);
/* Flow control setting */
- void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex,
+ void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex,
unsigned int fc, unsigned int pause_time);
/* Set power management mode (e.g. magic frame) */
- void (*pmt) (unsigned long ioaddr, unsigned long mode);
+ void (*pmt) (void __iomem *ioaddr, unsigned long mode);
/* Set/Get Unicast MAC addresses */
- void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
+ void (*set_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n);
- void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
+ void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n);
};
@@ -238,16 +238,15 @@ struct mac_device_info {
struct stmmac_ops *mac;
struct stmmac_desc_ops *desc;
struct stmmac_dma_ops *dma;
- unsigned int pmt; /* support Power-Down */
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
};
-struct mac_device_info *dwmac1000_setup(unsigned long addr);
-struct mac_device_info *dwmac100_setup(unsigned long addr);
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
-extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
-extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
-extern void dwmac_dma_flush_tx_fifo(unsigned long ioaddr);
+extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h
index 8b20b19971c..81ee4fd0438 100644
--- a/drivers/net/stmmac/dwmac1000.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -99,7 +99,7 @@ enum inter_frame_gap {
#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
- GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE)
+ GMAC_CONTROL_JE | GMAC_CONTROL_BE)
/* GMAC Frame Filter defines */
#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 2b2f5c8caf1..65667b69202 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -30,7 +30,7 @@
#include <linux/slab.h>
#include "dwmac1000.h"
-static void dwmac1000_core_init(unsigned long ioaddr)
+static void dwmac1000_core_init(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + GMAC_CONTROL);
value |= GMAC_CORE_INIT;
@@ -50,10 +50,22 @@ static void dwmac1000_core_init(unsigned long ioaddr)
#endif
}
-static void dwmac1000_dump_regs(unsigned long ioaddr)
+static int dwmac1000_rx_coe_supported(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+
+ value |= GMAC_CONTROL_IPC;
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ value = readl(ioaddr + GMAC_CONTROL);
+
+ return !!(value & GMAC_CONTROL_IPC);
+}
+
+static void dwmac1000_dump_regs(void __iomem *ioaddr)
{
int i;
- pr_info("\tDWMAC1000 regs (base addr = 0x%8x)\n", (unsigned int)ioaddr);
+ pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr);
for (i = 0; i < 55; i++) {
int offset = i * 4;
@@ -62,14 +74,14 @@ static void dwmac1000_dump_regs(unsigned long ioaddr)
}
}
-static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac1000_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
GMAC_ADDR_LOW(reg_n));
}
-static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
@@ -78,7 +90,7 @@ static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
static void dwmac1000_set_filter(struct net_device *dev)
{
- unsigned long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = (void __iomem *) dev->base_addr;
unsigned int value = 0;
CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
@@ -139,7 +151,7 @@ static void dwmac1000_set_filter(struct net_device *dev)
readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
}
-static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
unsigned int fc, unsigned int pause_time)
{
unsigned int flow = 0;
@@ -162,7 +174,7 @@ static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
writel(flow, ioaddr + GMAC_FLOW_CTRL);
}
-static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
+static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
{
unsigned int pmt = 0;
@@ -178,7 +190,7 @@ static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
}
-static void dwmac1000_irq_status(unsigned long ioaddr)
+static void dwmac1000_irq_status(void __iomem *ioaddr)
{
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
@@ -202,6 +214,7 @@ static void dwmac1000_irq_status(unsigned long ioaddr)
struct stmmac_ops dwmac1000_ops = {
.core_init = dwmac1000_core_init,
+ .rx_coe = dwmac1000_rx_coe_supported,
.dump_regs = dwmac1000_dump_regs,
.host_irq_status = dwmac1000_irq_status,
.set_filter = dwmac1000_set_filter,
@@ -211,7 +224,7 @@ struct stmmac_ops dwmac1000_ops = {
.get_umac_addr = dwmac1000_get_umac_addr,
};
-struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
{
struct mac_device_info *mac;
u32 uid = readl(ioaddr + GMAC_VERSION);
@@ -226,7 +239,6 @@ struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
mac->mac = &dwmac1000_ops;
mac->dma = &dwmac1000_dma_ops;
- mac->pmt = PMT_SUPPORTED;
mac->link.port = GMAC_CONTROL_PS;
mac->link.duplex = GMAC_CONTROL_DM;
mac->link.speed = GMAC_CONTROL_FES;
diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c
index 415805057cb..ce6163e39cd 100644
--- a/drivers/net/stmmac/dwmac1000_dma.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -29,14 +29,22 @@
#include "dwmac1000.h"
#include "dwmac_dma.h"
-static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int limit;
+
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+ limit = 15000;
+ while (limit--) {
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ break;
+ }
+ if (limit < 0)
+ return -EBUSY;
value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
((pbl << DMA_BUS_MODE_PBL_SHIFT) |
@@ -58,7 +66,7 @@ static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
return 0;
}
-static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -111,12 +119,12 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
/* Not yet implemented --- no RMON module */
static void dwmac1000_dma_diagnostic_fr(void *data,
- struct stmmac_extra_stats *x, unsigned long ioaddr)
+ struct stmmac_extra_stats *x, void __iomem *ioaddr)
{
return;
}
-static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
+static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
{
int i;
pr_info(" DMA registers\n");
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
index 2fb165fa2ba..94eeccf3a8a 100644
--- a/drivers/net/stmmac/dwmac100_core.c
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -31,7 +31,7 @@
#include <linux/crc32.h>
#include "dwmac100.h"
-static void dwmac100_core_init(unsigned long ioaddr)
+static void dwmac100_core_init(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CONTROL);
@@ -42,12 +42,17 @@ static void dwmac100_core_init(unsigned long ioaddr)
#endif
}
-static void dwmac100_dump_mac_regs(unsigned long ioaddr)
+static int dwmac100_rx_coe_supported(void __iomem *ioaddr)
+{
+ return 0;
+}
+
+static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
{
pr_info("\t----------------------------------------------\n"
- "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
+ "\t DWMAC 100 CSR (base addr = 0x%p)\n"
"\t----------------------------------------------\n",
- (unsigned int)ioaddr);
+ ioaddr);
pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
readl(ioaddr + MAC_CONTROL));
pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
@@ -77,18 +82,18 @@ static void dwmac100_dump_mac_regs(unsigned long ioaddr)
MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
}
-static void dwmac100_irq_status(unsigned long ioaddr)
+static void dwmac100_irq_status(void __iomem *ioaddr)
{
return;
}
-static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
}
-static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
@@ -96,7 +101,7 @@ static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
static void dwmac100_set_filter(struct net_device *dev)
{
- unsigned long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = (void __iomem *) dev->base_addr;
u32 value = readl(ioaddr + MAC_CONTROL);
if (dev->flags & IFF_PROMISC) {
@@ -145,7 +150,7 @@ static void dwmac100_set_filter(struct net_device *dev)
readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
}
-static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
unsigned int fc, unsigned int pause_time)
{
unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -158,13 +163,14 @@ static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
/* No PMT module supported for this Ethernet Controller.
* Tested on ST platforms only.
*/
-static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
+static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
{
return;
}
struct stmmac_ops dwmac100_ops = {
.core_init = dwmac100_core_init,
+ .rx_coe = dwmac100_rx_coe_supported,
.dump_regs = dwmac100_dump_mac_regs,
.host_irq_status = dwmac100_irq_status,
.set_filter = dwmac100_set_filter,
@@ -174,7 +180,7 @@ struct stmmac_ops dwmac100_ops = {
.get_umac_addr = dwmac100_get_umac_addr,
};
-struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
{
struct mac_device_info *mac;
@@ -187,7 +193,6 @@ struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
mac->mac = &dwmac100_ops;
mac->dma = &dwmac100_dma_ops;
- mac->pmt = PMT_NOT_SUPPORTED;
mac->link.port = MAC_CONTROL_PS;
mac->link.duplex = MAC_CONTROL_F;
mac->link.speed = 0;
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c
index 2fece7b7272..96aac93b789 100644
--- a/drivers/net/stmmac/dwmac100_dma.c
+++ b/drivers/net/stmmac/dwmac100_dma.c
@@ -31,14 +31,22 @@
#include "dwmac100.h"
#include "dwmac_dma.h"
-static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int limit;
+
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+ limit = 15000;
+ while (limit--) {
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ break;
+ }
+ if (limit < 0)
+ return -EBUSY;
/* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
@@ -58,7 +66,7 @@ static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
/* Store and Forward capability is not used at all..
* The transmit threshold can be programmed by
* setting the TTC bits in the DMA control register.*/
-static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -73,7 +81,7 @@ static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
writel(csr6, ioaddr + DMA_CONTROL);
}
-static void dwmac100_dump_dma_regs(unsigned long ioaddr)
+static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
{
int i;
@@ -91,7 +99,7 @@ static void dwmac100_dump_dma_regs(unsigned long ioaddr)
/* DMA controller has two counters to track the number of
* the receive missed frames. */
static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr)
+ void __iomem *ioaddr)
{
struct net_device_stats *stats = (struct net_device_stats *)data;
u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
index 7b815a1b7b8..da3f5ccf83d 100644
--- a/drivers/net/stmmac/dwmac_dma.h
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -97,12 +97,12 @@
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
-extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
-extern void dwmac_enable_dma_irq(unsigned long ioaddr);
-extern void dwmac_disable_dma_irq(unsigned long ioaddr);
-extern void dwmac_dma_start_tx(unsigned long ioaddr);
-extern void dwmac_dma_stop_tx(unsigned long ioaddr);
-extern void dwmac_dma_start_rx(unsigned long ioaddr);
-extern void dwmac_dma_stop_rx(unsigned long ioaddr);
-extern int dwmac_dma_interrupt(unsigned long ioaddr,
+extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
+extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
+extern void dwmac_dma_start_tx(void __iomem *ioaddr);
+extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
+extern void dwmac_dma_start_rx(void __iomem *ioaddr);
+extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
+extern int dwmac_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index a85415216ef..d65fab1ba79 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -32,43 +32,43 @@
#endif
/* CSR1 enables the transmit DMA to check for new descriptor */
-void dwmac_enable_dma_transmission(unsigned long ioaddr)
+void dwmac_enable_dma_transmission(void __iomem *ioaddr)
{
writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
}
-void dwmac_enable_dma_irq(unsigned long ioaddr)
+void dwmac_enable_dma_irq(void __iomem *ioaddr)
{
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
}
-void dwmac_disable_dma_irq(unsigned long ioaddr)
+void dwmac_disable_dma_irq(void __iomem *ioaddr)
{
writel(0, ioaddr + DMA_INTR_ENA);
}
-void dwmac_dma_start_tx(unsigned long ioaddr)
+void dwmac_dma_start_tx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_stop_tx(unsigned long ioaddr)
+void dwmac_dma_stop_tx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_start_rx(unsigned long ioaddr)
+void dwmac_dma_start_rx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_stop_rx(unsigned long ioaddr)
+void dwmac_dma_stop_rx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_SR;
@@ -145,7 +145,7 @@ static void show_rx_process_state(unsigned int status)
}
#endif
-int dwmac_dma_interrupt(unsigned long ioaddr,
+int dwmac_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x)
{
int ret = 0;
@@ -219,7 +219,7 @@ int dwmac_dma_interrupt(unsigned long ioaddr,
return ret;
}
-void dwmac_dma_flush_tx_fifo(unsigned long ioaddr)
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
@@ -227,7 +227,7 @@ void dwmac_dma_flush_tx_fifo(unsigned long ioaddr)
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
}
-void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low)
{
unsigned long data;
@@ -238,7 +238,7 @@ void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
writel(data, ioaddr + low);
}
-void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low)
{
unsigned int hi_addr, lo_addr;
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c
index f612f986a7e..5d1471d8f8f 100644
--- a/drivers/net/stmmac/enh_desc.c
+++ b/drivers/net/stmmac/enh_desc.c
@@ -25,7 +25,7 @@
#include "common.h"
static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
+ struct dma_desc *p, void __iomem *ioaddr)
{
int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -284,7 +284,7 @@ static void enh_desc_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.etx.end_ring;
- memset(p, 0, sizeof(struct dma_desc));
+ memset(p, 0, offsetof(struct dma_desc, des2));
p->des01.etx.end_ring = ter;
}
diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c
index 31ad5364379..0dce90cb812 100644
--- a/drivers/net/stmmac/norm_desc.c
+++ b/drivers/net/stmmac/norm_desc.c
@@ -25,7 +25,7 @@
#include "common.h"
static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
+ struct dma_desc *p, void __iomem *ioaddr)
{
int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -174,22 +174,7 @@ static void ndesc_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.tx.end_ring;
- /* clean field used within the xmit */
- p->des01.tx.first_segment = 0;
- p->des01.tx.last_segment = 0;
- p->des01.tx.buffer1_size = 0;
-
- /* clean status reported */
- p->des01.tx.error_summary = 0;
- p->des01.tx.underflow_error = 0;
- p->des01.tx.no_carrier = 0;
- p->des01.tx.loss_carrier = 0;
- p->des01.tx.excessive_deferral = 0;
- p->des01.tx.excessive_collisions = 0;
- p->des01.tx.late_collision = 0;
- p->des01.tx.heartbeat_fail = 0;
- p->des01.tx.deferred = 0;
-
+ memset(p, 0, offsetof(struct dma_desc, des2));
/* set termination field */
p->des01.tx.end_ring = ter;
}
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index ebebc644b1b..92154ff7d70 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -21,6 +21,7 @@
*******************************************************************************/
#define DRV_MODULE_VERSION "Apr_2010"
+#include <linux/platform_device.h>
#include <linux/stmmac.h>
#include "common.h"
@@ -50,10 +51,10 @@ struct stmmac_priv {
int is_gmac;
dma_addr_t dma_rx_phy;
unsigned int dma_rx_size;
- int rx_csum;
unsigned int dma_buf_sz;
struct device *device;
struct mac_device_info *hw;
+ void __iomem *ioaddr;
struct stmmac_extra_stats xstats;
struct napi_struct napi;
@@ -65,7 +66,7 @@ struct stmmac_priv {
int phy_mask;
int (*phy_reset) (void *priv);
void (*fix_mac_speed) (void *priv, unsigned int speed);
- void (*bus_setup)(unsigned long ioaddr);
+ void (*bus_setup)(void __iomem *ioaddr);
void *bsp_priv;
int phy_irq;
@@ -76,6 +77,7 @@ struct stmmac_priv {
unsigned int flow_ctrl;
unsigned int pause;
struct mii_bus *mii;
+ int mii_clk_csr;
u32 msg_enable;
spinlock_t lock;
@@ -89,6 +91,9 @@ struct stmmac_priv {
struct vlan_group *vlgrp;
#endif
int enh_desc;
+ int rx_coe;
+ int bugged_jumbo;
+ int no_csum_insertion;
};
#ifdef CONFIG_STM_DRIVERS
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index f080509923f..25a7e385f8e 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -177,21 +177,21 @@ void stmmac_ethtool_gregs(struct net_device *dev,
if (!priv->is_gmac) {
/* MAC registers */
for (i = 0; i < 12; i++)
- reg_space[i] = readl(dev->base_addr + (i * 4));
+ reg_space[i] = readl(priv->ioaddr + (i * 4));
/* DMA registers */
for (i = 0; i < 9; i++)
reg_space[i + 12] =
- readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
- reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR);
- reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR);
+ readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
+ reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
+ reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
} else {
/* MAC registers */
for (i = 0; i < 55; i++)
- reg_space[i] = readl(dev->base_addr + (i * 4));
+ reg_space[i] = readl(priv->ioaddr + (i * 4));
/* DMA registers */
for (i = 0; i < 22; i++)
reg_space[i + 55] =
- readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
+ readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
}
}
@@ -209,7 +209,7 @@ u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- return priv->rx_csum;
+ return priv->rx_coe;
}
static void
@@ -263,11 +263,9 @@ stmmac_set_pauseparam(struct net_device *netdev,
cmd.phy_address = phy->addr;
ret = phy_ethtool_sset(phy, &cmd);
}
- } else {
- unsigned long ioaddr = netdev->base_addr;
- priv->hw->mac->flow_ctrl(ioaddr, phy->duplex,
+ } else
+ priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
priv->flow_ctrl, priv->pause);
- }
spin_unlock(&priv->lock);
return ret;
}
@@ -276,12 +274,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *dummy, u64 *data)
{
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
int i;
/* Update HW stats if supported */
priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
- ioaddr);
+ priv->ioaddr);
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
@@ -325,7 +322,7 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct stmmac_priv *priv = netdev_priv(dev);
spin_lock_irq(&priv->lock);
- if (priv->wolenabled == PMT_SUPPORTED) {
+ if (device_can_wakeup(priv->device)) {
wol->supported = WAKE_MAGIC;
wol->wolopts = priv->wolopts;
}
@@ -337,16 +334,20 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct stmmac_priv *priv = netdev_priv(dev);
u32 support = WAKE_MAGIC;
- if (priv->wolenabled == PMT_NOT_SUPPORTED)
+ if (!device_can_wakeup(priv->device))
return -EINVAL;
if (wol->wolopts & ~support)
return -EINVAL;
- if (wol->wolopts == 0)
- device_set_wakeup_enable(priv->device, 0);
- else
+ if (wol->wolopts) {
+ pr_info("stmmac: wakeup enable\n");
device_set_wakeup_enable(priv->device, 1);
+ enable_irq_wake(dev->irq);
+ } else {
+ device_set_wakeup_enable(priv->device, 0);
+ disable_irq_wake(dev->irq);
+ }
spin_lock_irq(&priv->lock);
priv->wolopts = wol->wolopts;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index bbb7951b9c4..823b9e6431d 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -134,13 +134,6 @@ static int buf_sz = DMA_BUFFER_SIZE;
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
-/* In case of Giga ETH, we can enable/disable the COE for the
- * transmit HW checksum computation.
- * Note that, if tx csum is off in HW, SG will be still supported. */
-static int tx_coe = HW_CSUM;
-module_param(tx_coe, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]");
-
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
@@ -202,7 +195,6 @@ static void stmmac_adjust_link(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
- unsigned long ioaddr = dev->base_addr;
unsigned long flags;
int new_state = 0;
unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
@@ -215,7 +207,7 @@ static void stmmac_adjust_link(struct net_device *dev)
spin_lock_irqsave(&priv->lock, flags);
if (phydev->link) {
- u32 ctrl = readl(ioaddr + MAC_CTRL_REG);
+ u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
/* Now we make sure that we can be in full duplex mode.
* If not, we operate in half-duplex mode. */
@@ -229,7 +221,7 @@ static void stmmac_adjust_link(struct net_device *dev)
}
/* Flow Control operation */
if (phydev->pause)
- priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex,
+ priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
fc, pause_time);
if (phydev->speed != priv->speed) {
@@ -238,6 +230,9 @@ static void stmmac_adjust_link(struct net_device *dev)
case 1000:
if (likely(priv->is_gmac))
ctrl &= ~priv->hw->link.port;
+ if (likely(priv->fix_mac_speed))
+ priv->fix_mac_speed(priv->bsp_priv,
+ phydev->speed);
break;
case 100:
case 10:
@@ -265,7 +260,7 @@ static void stmmac_adjust_link(struct net_device *dev)
priv->speed = phydev->speed;
}
- writel(ctrl, ioaddr + MAC_CTRL_REG);
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
if (!priv->oldlink) {
new_state = 1;
@@ -342,7 +337,7 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-static inline void stmmac_mac_enable_rx(unsigned long ioaddr)
+static inline void stmmac_mac_enable_rx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
value |= MAC_RNABLE_RX;
@@ -350,7 +345,7 @@ static inline void stmmac_mac_enable_rx(unsigned long ioaddr)
writel(value, ioaddr + MAC_CTRL_REG);
}
-static inline void stmmac_mac_enable_tx(unsigned long ioaddr)
+static inline void stmmac_mac_enable_tx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
value |= MAC_ENABLE_TX;
@@ -358,14 +353,14 @@ static inline void stmmac_mac_enable_tx(unsigned long ioaddr)
writel(value, ioaddr + MAC_CTRL_REG);
}
-static inline void stmmac_mac_disable_rx(unsigned long ioaddr)
+static inline void stmmac_mac_disable_rx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
value &= ~MAC_RNABLE_RX;
writel(value, ioaddr + MAC_CTRL_REG);
}
-static inline void stmmac_mac_disable_tx(unsigned long ioaddr)
+static inline void stmmac_mac_disable_tx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
value &= ~MAC_ENABLE_TX;
@@ -567,29 +562,22 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
* stmmac_dma_operation_mode - HW DMA operation mode
* @priv : pointer to the private device structure.
* Description: it sets the DMA operation mode: tx/rx DMA thresholds
- * or Store-And-Forward capability. It also verifies the COE for the
- * transmission in case of Giga ETH.
+ * or Store-And-Forward capability.
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
- if (!priv->is_gmac) {
- /* MAC 10/100 */
- priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0);
- priv->tx_coe = NO_HW_CSUM;
- } else {
- if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
- priv->hw->dma->dma_mode(priv->dev->base_addr,
- SF_DMA_MODE, SF_DMA_MODE);
- tc = SF_DMA_MODE;
- priv->tx_coe = HW_CSUM;
- } else {
- /* Checksum computation is performed in software. */
- priv->hw->dma->dma_mode(priv->dev->base_addr, tc,
- SF_DMA_MODE);
- priv->tx_coe = NO_HW_CSUM;
- }
- }
- tx_coe = priv->tx_coe;
+ if (likely((priv->tx_coe) && (!priv->no_csum_insertion))) {
+ /* In case of GMAC, SF mode has to be enabled
+ * to perform the TX COE. This depends on:
+ * 1) TX COE if actually supported
+ * 2) There is no bugged Jumbo frame support
+ * that needs to not insert csum in the TDES.
+ */
+ priv->hw->dma->dma_mode(priv->ioaddr,
+ SF_DMA_MODE, SF_DMA_MODE);
+ tc = SF_DMA_MODE;
+ } else
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
}
/**
@@ -600,7 +588,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
static void stmmac_tx(struct stmmac_priv *priv)
{
unsigned int txsize = priv->dma_tx_size;
- unsigned long ioaddr = priv->dev->base_addr;
while (priv->dirty_tx != priv->cur_tx) {
int last;
@@ -618,7 +605,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
int tx_error =
priv->hw->desc->tx_status(&priv->dev->stats,
&priv->xstats, p,
- ioaddr);
+ priv->ioaddr);
if (likely(tx_error == 0)) {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
@@ -674,7 +661,7 @@ static inline void stmmac_enable_irq(struct stmmac_priv *priv)
priv->tm->timer_start(tmrate);
else
#endif
- priv->hw->dma->enable_dma_irq(priv->dev->base_addr);
+ priv->hw->dma->enable_dma_irq(priv->ioaddr);
}
static inline void stmmac_disable_irq(struct stmmac_priv *priv)
@@ -684,7 +671,7 @@ static inline void stmmac_disable_irq(struct stmmac_priv *priv)
priv->tm->timer_stop();
else
#endif
- priv->hw->dma->disable_dma_irq(priv->dev->base_addr);
+ priv->hw->dma->disable_dma_irq(priv->ioaddr);
}
static int stmmac_has_work(struct stmmac_priv *priv)
@@ -739,14 +726,15 @@ static void stmmac_no_timer_stopped(void)
*/
static void stmmac_tx_err(struct stmmac_priv *priv)
{
+
netif_stop_queue(priv->dev);
- priv->hw->dma->stop_tx(priv->dev->base_addr);
+ priv->hw->dma->stop_tx(priv->ioaddr);
dma_free_tx_skbufs(priv);
priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
priv->dirty_tx = 0;
priv->cur_tx = 0;
- priv->hw->dma->start_tx(priv->dev->base_addr);
+ priv->hw->dma->start_tx(priv->ioaddr);
priv->dev->stats.tx_errors++;
netif_wake_queue(priv->dev);
@@ -755,11 +743,9 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
- unsigned long ioaddr = priv->dev->base_addr;
int status;
- status = priv->hw->dma->dma_interrupt(priv->dev->base_addr,
- &priv->xstats);
+ status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
if (likely(status == handle_tx_rx))
_stmmac_schedule(priv);
@@ -767,7 +753,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
/* Try to bump up the dma threshold on this failure */
if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
tc += 64;
- priv->hw->dma->dma_mode(ioaddr, tc, SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
priv->xstats.threshold = tc;
}
stmmac_tx_err(priv);
@@ -787,7 +773,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
static int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
int ret;
/* Check that the MAC address is valid. If its not, refuse
@@ -843,7 +828,8 @@ static int stmmac_open(struct net_device *dev)
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
- if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy,
+ if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->pbl,
+ priv->dma_tx_phy,
priv->dma_rx_phy) < 0)) {
pr_err("%s: DMA initialization failed\n", __func__);
@@ -851,22 +837,28 @@ static int stmmac_open(struct net_device *dev)
}
/* Copy the MAC addr into the HW */
- priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
/* If required, perform hw setup of the bus. */
if (priv->bus_setup)
- priv->bus_setup(ioaddr);
+ priv->bus_setup(priv->ioaddr);
/* Initialize the MAC Core */
- priv->hw->mac->core_init(ioaddr);
+ priv->hw->mac->core_init(priv->ioaddr);
+
+ priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
+ if (priv->rx_coe)
+ pr_info("stmmac: Rx Checksum Offload Engine supported\n");
+ if (priv->tx_coe)
+ pr_info("\tTX Checksum insertion supported\n");
priv->shutdown = 0;
/* Initialise the MMC (if present) to disable all interrupts. */
- writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK);
- writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK);
+ writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
+ writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
/* Enable the MAC Rx/Tx */
- stmmac_mac_enable_rx(ioaddr);
- stmmac_mac_enable_tx(ioaddr);
+ stmmac_mac_enable_rx(priv->ioaddr);
+ stmmac_mac_enable_tx(priv->ioaddr);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -877,16 +869,16 @@ static int stmmac_open(struct net_device *dev)
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
- priv->hw->dma->start_tx(ioaddr);
- priv->hw->dma->start_rx(ioaddr);
+ priv->hw->dma->start_tx(priv->ioaddr);
+ priv->hw->dma->start_rx(priv->ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
#endif
/* Dump DMA/MAC registers */
if (netif_msg_hw(priv)) {
- priv->hw->mac->dump_regs(ioaddr);
- priv->hw->dma->dump_regs(ioaddr);
+ priv->hw->mac->dump_regs(priv->ioaddr);
+ priv->hw->dma->dump_regs(priv->ioaddr);
}
if (priv->phydev)
@@ -930,15 +922,15 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
- priv->hw->dma->stop_tx(dev->base_addr);
- priv->hw->dma->stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(priv->ioaddr);
+ priv->hw->dma->stop_rx(priv->ioaddr);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
/* Disable the MAC core */
- stmmac_mac_disable_tx(dev->base_addr);
- stmmac_mac_disable_rx(dev->base_addr);
+ stmmac_mac_disable_tx(priv->ioaddr);
+ stmmac_mac_disable_rx(priv->ioaddr);
netif_carrier_off(dev);
@@ -1066,7 +1058,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return stmmac_sw_tso(priv, skb);
if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
- if (likely(priv->tx_coe == NO_HW_CSUM))
+ if (unlikely((!priv->tx_coe) || (priv->no_csum_insertion)))
skb_checksum_help(skb);
else
csum_insertion = 1;
@@ -1140,7 +1132,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
- priv->hw->dma->enable_dma_transmission(dev->base_addr);
+ priv->hw->dma->enable_dma_transmission(priv->ioaddr);
return NETDEV_TX_OK;
}
@@ -1256,7 +1248,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
if (unlikely(status == csum_none)) {
/* always for the old mac 10/100 */
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
netif_receive_skb(skb);
} else {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1390,6 +1382,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
+ /* Some GMAC devices have a bugged Jumbo frame support that
+ * needs to have the Tx COE disabled for oversized frames
+ * (due to limited buffer sizes). In this case we disable
+ * the TX csum insertionin the TDES and not use SF. */
+ if ((priv->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
+ priv->no_csum_insertion = 1;
+ else
+ priv->no_csum_insertion = 0;
+
dev->mtu = new_mtu;
return 0;
@@ -1405,11 +1406,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
- if (priv->is_gmac) {
- unsigned long ioaddr = dev->base_addr;
+ if (priv->is_gmac)
/* To handle GMAC own interrupts */
- priv->hw->mac->host_irq_status(ioaddr);
- }
+ priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
stmmac_dma_interrupt(priv);
@@ -1512,9 +1511,6 @@ static int stmmac_probe(struct net_device *dev)
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
- if (priv->is_gmac)
- priv->rx_csum = 1;
-
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
@@ -1522,7 +1518,8 @@ static int stmmac_probe(struct net_device *dev)
netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
/* Get the MAC address */
- priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
+ priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
+ dev->dev_addr, 0);
if (!is_valid_ether_addr(dev->dev_addr))
pr_warning("\tno valid MAC address;"
@@ -1552,14 +1549,13 @@ static int stmmac_probe(struct net_device *dev)
static int stmmac_mac_device_setup(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
struct mac_device_info *device;
if (priv->is_gmac)
- device = dwmac1000_setup(ioaddr);
+ device = dwmac1000_setup(priv->ioaddr);
else
- device = dwmac100_setup(ioaddr);
+ device = dwmac100_setup(priv->ioaddr);
if (!device)
return -ENOMEM;
@@ -1572,9 +1568,8 @@ static int stmmac_mac_device_setup(struct net_device *dev)
priv->hw = device;
- priv->wolenabled = priv->hw->pmt; /* PMT supported */
- if (priv->wolenabled == PMT_SUPPORTED)
- priv->wolopts = WAKE_MAGIC; /* Magic Frame */
+ if (device_can_wakeup(priv->device))
+ priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
return 0;
}
@@ -1653,7 +1648,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
{
int ret = 0;
struct resource *res;
- unsigned int *addr = NULL;
+ void __iomem *addr = NULL;
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
struct plat_stmmacenet_data *plat_dat;
@@ -1664,7 +1659,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
ret = -ENODEV;
goto out;
}
- pr_info("done!\n");
+ pr_info("\tdone!\n");
if (!request_mem_region(res->start, resource_size(res),
pdev->name)) {
@@ -1706,8 +1701,18 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
plat_dat = pdev->dev.platform_data;
priv->bus_id = plat_dat->bus_id;
priv->pbl = plat_dat->pbl; /* TLI */
+ priv->mii_clk_csr = plat_dat->clk_csr;
+ priv->tx_coe = plat_dat->tx_coe;
+ priv->bugged_jumbo = plat_dat->bugged_jumbo;
priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
priv->enh_desc = plat_dat->enh_desc;
+ priv->ioaddr = addr;
+
+ /* PMT module is not integrated in all the MAC devices. */
+ if (plat_dat->pmt) {
+ pr_info("\tPMT module supported\n");
+ device_set_wakeup_capable(&pdev->dev, 1);
+ }
platform_set_drvdata(pdev, ndev);
@@ -1743,8 +1748,8 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
priv->bsp_priv = plat_dat->bsp_priv;
pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
- "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name,
- pdev->id, ndev->irq, (unsigned int)addr);
+ "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
+ pdev->id, ndev->irq, addr);
/* MDIO bus Registration */
pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
@@ -1779,11 +1784,11 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
pr_info("%s:\n\tremoving driver", __func__);
- priv->hw->dma->stop_rx(ndev->base_addr);
- priv->hw->dma->stop_tx(ndev->base_addr);
+ priv->hw->dma->stop_rx(priv->ioaddr);
+ priv->hw->dma->stop_tx(priv->ioaddr);
- stmmac_mac_disable_rx(ndev->base_addr);
- stmmac_mac_disable_tx(ndev->base_addr);
+ stmmac_mac_disable_rx(priv->ioaddr);
+ stmmac_mac_disable_tx(priv->ioaddr);
netif_carrier_off(ndev);
@@ -1792,7 +1797,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
- iounmap((void *)ndev->base_addr);
+ iounmap((void *)priv->ioaddr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@@ -1827,23 +1832,20 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
napi_disable(&priv->napi);
/* Stop TX/RX DMA */
- priv->hw->dma->stop_tx(dev->base_addr);
- priv->hw->dma->stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(priv->ioaddr);
+ priv->hw->dma->stop_rx(priv->ioaddr);
/* Clear the Rx/Tx descriptors */
priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
dis_ic);
priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
- stmmac_mac_disable_tx(dev->base_addr);
+ stmmac_mac_disable_tx(priv->ioaddr);
- if (device_may_wakeup(&(pdev->dev))) {
- /* Enable Power down mode by programming the PMT regs */
- if (priv->wolenabled == PMT_SUPPORTED)
- priv->hw->mac->pmt(dev->base_addr,
- priv->wolopts);
- } else {
- stmmac_mac_disable_rx(dev->base_addr);
- }
+ /* Enable Power down mode by programming the PMT regs */
+ if (device_can_wakeup(priv->device))
+ priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
+ else
+ stmmac_mac_disable_rx(priv->ioaddr);
} else {
priv->shutdown = 1;
/* Although this can appear slightly redundant it actually
@@ -1860,36 +1862,34 @@ static int stmmac_resume(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
if (!netif_running(dev))
return 0;
- spin_lock(&priv->lock);
-
if (priv->shutdown) {
/* Re-open the interface and re-init the MAC/DMA
- and the rings. */
+ and the rings (i.e. on hibernation stage) */
stmmac_open(dev);
- goto out_resume;
+ return 0;
}
+ spin_lock(&priv->lock);
+
/* Power Down bit, into the PM register, is cleared
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
* this bit because it can generate problems while resuming
* from another devices (e.g. serial console). */
- if (device_may_wakeup(&(pdev->dev)))
- if (priv->wolenabled == PMT_SUPPORTED)
- priv->hw->mac->pmt(dev->base_addr, 0);
+ if (device_can_wakeup(priv->device))
+ priv->hw->mac->pmt(priv->ioaddr, 0);
netif_device_attach(dev);
/* Enable the MAC and DMA */
- stmmac_mac_enable_rx(ioaddr);
- stmmac_mac_enable_tx(ioaddr);
- priv->hw->dma->start_tx(ioaddr);
- priv->hw->dma->start_rx(ioaddr);
+ stmmac_mac_enable_rx(priv->ioaddr);
+ stmmac_mac_enable_tx(priv->ioaddr);
+ priv->hw->dma->start_tx(priv->ioaddr);
+ priv->hw->dma->start_rx(priv->ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
@@ -1901,7 +1901,6 @@ static int stmmac_resume(struct platform_device *pdev)
netif_start_queue(dev);
-out_resume:
spin_unlock(&priv->lock);
return 0;
}
@@ -1969,8 +1968,6 @@ static int __init stmmac_cmdline_opt(char *str)
strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz);
else if (!strncmp(opt, "tc:", 3))
strict_strtoul(opt + 3, 0, (unsigned long *)&tc);
- else if (!strncmp(opt, "tx_coe:", 7))
- strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe);
else if (!strncmp(opt, "watchdog:", 9))
strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog);
else if (!strncmp(opt, "flow_ctrl:", 10))
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 40b2c792971..d7441616357 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -47,21 +47,20 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
{
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned long ioaddr = ndev->base_addr;
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
int data;
u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
((phyreg << 6) & (0x000007C0)));
- regValue |= MII_BUSY; /* in case of GMAC */
+ regValue |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
- do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
- writel(regValue, ioaddr + mii_address);
- do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+ writel(regValue, priv->ioaddr + mii_address);
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
/* Read the data from the MII data register */
- data = (int)readl(ioaddr + mii_data);
+ data = (int)readl(priv->ioaddr + mii_data);
return data;
}
@@ -79,7 +78,6 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
{
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned long ioaddr = ndev->base_addr;
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
@@ -87,17 +85,18 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
(((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
| MII_WRITE;
- value |= MII_BUSY;
+ value |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
+
/* Wait until any existing MII operation is complete */
- do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
/* Set the MII address register to write */
- writel(phydata, ioaddr + mii_data);
- writel(value, ioaddr + mii_address);
+ writel(phydata, priv->ioaddr + mii_data);
+ writel(value, priv->ioaddr + mii_address);
/* Wait until any existing MII operation is complete */
- do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
return 0;
}
@@ -111,7 +110,6 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
{
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned long ioaddr = ndev->base_addr;
unsigned int mii_address = priv->hw->mii.addr;
if (priv->phy_reset) {
@@ -123,7 +121,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
* It doesn't complete its reset until at least one clock cycle
* on MDC, so perform a dummy mdio read.
*/
- writel(0, ioaddr + mii_address);
+ writel(0, priv->ioaddr + mii_address);
return 0;
}
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 358c22f9acb..7d9ec23aabf 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -436,7 +436,7 @@ static int lance_open( struct net_device *dev )
DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
dev->name, i, DREG ));
DREG = CSR0_STOP;
- return( -EIO );
+ return -EIO;
}
DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA;
@@ -445,7 +445,7 @@ static int lance_open( struct net_device *dev )
DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
- return( 0 );
+ return 0;
}
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 367e96f317d..0a6a5ced3c1 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -97,7 +97,7 @@ static int qec_global_reset(void __iomem *gregs)
static void qec_init(struct bigmac *bp)
{
- struct of_device *qec_op = bp->qec_op;
+ struct platform_device *qec_op = bp->qec_op;
void __iomem *gregs = bp->gregs;
u8 bsizes = bp->bigmac_bursts;
u32 regval;
@@ -617,7 +617,7 @@ static void bigmac_begin_auto_negotiation(struct bigmac *bp)
bp->timer_ticks = 0;
bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10;
bp->bigmac_timer.data = (unsigned long) bp;
- bp->bigmac_timer.function = &bigmac_timer;
+ bp->bigmac_timer.function = bigmac_timer;
add_timer(&bp->bigmac_timer);
}
@@ -1083,8 +1083,8 @@ static const struct net_device_ops bigmac_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit bigmac_ether_init(struct of_device *op,
- struct of_device *qec_op)
+static int __devinit bigmac_ether_init(struct platform_device *op,
+ struct platform_device *qec_op)
{
static int version_printed;
struct net_device *dev;
@@ -1201,7 +1201,7 @@ static int __devinit bigmac_ether_init(struct of_device *op,
dev->watchdog_timeo = 5*HZ;
/* Finish net device registration. */
- dev->irq = bp->bigmac_op->irqs[0];
+ dev->irq = bp->bigmac_op->archdata.irqs[0];
dev->dma = 0;
if (register_netdev(dev)) {
@@ -1242,25 +1242,25 @@ fail_and_cleanup:
/* QEC can be the parent of either QuadEthernet or a BigMAC. We want
* the latter.
*/
-static int __devinit bigmac_sbus_probe(struct of_device *op,
+static int __devinit bigmac_sbus_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device *parent = op->dev.parent;
- struct of_device *qec_op;
+ struct platform_device *qec_op;
- qec_op = to_of_device(parent);
+ qec_op = to_platform_device(parent);
return bigmac_ether_init(op, qec_op);
}
-static int __devexit bigmac_sbus_remove(struct of_device *op)
+static int __devexit bigmac_sbus_remove(struct platform_device *op)
{
struct bigmac *bp = dev_get_drvdata(&op->dev);
struct device *parent = op->dev.parent;
struct net_device *net_dev = bp->dev;
- struct of_device *qec_op;
+ struct platform_device *qec_op;
- qec_op = to_of_device(parent);
+ qec_op = to_platform_device(parent);
unregister_netdev(net_dev);
@@ -1301,12 +1301,12 @@ static struct of_platform_driver bigmac_sbus_driver = {
static int __init bigmac_init(void)
{
- return of_register_driver(&bigmac_sbus_driver, &of_bus_type);
+ return of_register_platform_driver(&bigmac_sbus_driver);
}
static void __exit bigmac_exit(void)
{
- of_unregister_driver(&bigmac_sbus_driver);
+ of_unregister_platform_driver(&bigmac_sbus_driver);
}
module_init(bigmac_init);
diff --git a/drivers/net/sunbmac.h b/drivers/net/sunbmac.h
index 8840bc0b840..8db88945b88 100644
--- a/drivers/net/sunbmac.h
+++ b/drivers/net/sunbmac.h
@@ -329,8 +329,8 @@ struct bigmac {
unsigned int timer_ticks;
struct net_device_stats enet_stats;
- struct of_device *qec_op;
- struct of_device *bigmac_op;
+ struct platform_device *qec_op;
+ struct platform_device *bigmac_op;
struct net_device *dev;
};
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 2678588ea4b..4283cc52a8c 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -96,16 +96,10 @@ static char *media[MAX_UNITS];
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
-#ifndef _COMPAT_WITH_OLD_KERNEL
+#include <linux/dma-mapping.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
-#else
-#include "crc32.h"
-#include "ethtool.h"
-#include "mii.h"
-#include "compat.h"
-#endif
/* These identify the driver base version and may not be removed. */
static const char version[] __devinitconst =
@@ -371,7 +365,6 @@ struct netdev_private {
struct timer_list timer; /* Media monitoring timer. */
/* Frequently used values: keep some adjacent for cache effect. */
spinlock_t lock;
- spinlock_t rx_lock; /* Group with Tx control cache line. */
int msg_enable;
int chip_id;
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
@@ -396,6 +389,7 @@ struct netdev_private {
unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
struct pci_dev *pci_dev;
void __iomem *base;
+ spinlock_t statlock;
};
/* The station address location in the EEPROM. */
@@ -520,16 +514,19 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
np->chip_id = chip_idx;
np->msg_enable = (1 << debug) - 1;
spin_lock_init(&np->lock);
+ spin_lock_init(&np->statlock);
tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
- ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
+ &ring_dma, GFP_KERNEL);
if (!ring_space)
goto err_out_cleardev;
np->tx_ring = (struct netdev_desc *)ring_space;
np->tx_ring_dma = ring_dma;
- ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
+ &ring_dma, GFP_KERNEL);
if (!ring_space)
goto err_out_unmap_tx;
np->rx_ring = (struct netdev_desc *)ring_space;
@@ -663,9 +660,11 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
err_out_unregister:
unregister_netdev(dev);
err_out_unmap_rx:
- pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
+ np->rx_ring, np->rx_ring_dma);
err_out_unmap_tx:
- pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
+ np->tx_ring, np->tx_ring_dma);
err_out_cleardev:
pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
@@ -874,7 +873,7 @@ static int netdev_open(struct net_device *dev)
init_timer(&np->timer);
np->timer.expires = jiffies + 3*HZ;
np->timer.data = (unsigned long)dev;
- np->timer.function = &netdev_timer; /* timer handler */
+ np->timer.function = netdev_timer; /* timer handler */
add_timer(&np->timer);
/* Enable interrupts by setting the interrupt mask. */
@@ -1011,8 +1010,14 @@ static void init_ring(struct net_device *dev)
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* 16 byte align the IP header. */
np->rx_ring[i].frag[0].addr = cpu_to_le32(
- pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE));
+ dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE));
+ if (dma_mapping_error(&np->pci_dev->dev,
+ np->rx_ring[i].frag[0].addr)) {
+ dev_kfree_skb(skb);
+ np->rx_skbuff[i] = NULL;
+ break;
+ }
np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
}
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1063,9 +1068,11 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
txdesc->next_desc = 0;
txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
- txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
- skb->len,
- PCI_DMA_TODEVICE));
+ txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
+ skb->data, skb->len, DMA_TO_DEVICE));
+ if (dma_mapping_error(&np->pci_dev->dev,
+ txdesc->frag[0].addr))
+ goto drop_frame;
txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
/* Increment cur_tx before tasklet_schedule() */
@@ -1087,6 +1094,12 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
dev->name, np->cur_tx, entry);
}
return NETDEV_TX_OK;
+
+drop_frame:
+ dev_kfree_skb(skb);
+ np->tx_skbuff[entry] = NULL;
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
/* Reset hardware tx and free all of tx buffers */
@@ -1097,7 +1110,6 @@ reset_tx (struct net_device *dev)
void __iomem *ioaddr = np->base;
struct sk_buff *skb;
int i;
- int irq = in_interrupt();
/* Reset tx logic, TxListPtr will be cleaned */
iowrite16 (TxDisable, ioaddr + MACCtrl1);
@@ -1109,13 +1121,10 @@ reset_tx (struct net_device *dev)
skb = np->tx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
le32_to_cpu(np->tx_ring[i].frag[0].addr),
- skb->len, PCI_DMA_TODEVICE);
- if (irq)
- dev_kfree_skb_irq (skb);
- else
- dev_kfree_skb (skb);
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
np->tx_skbuff[i] = NULL;
dev->stats.tx_dropped++;
}
@@ -1233,9 +1242,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
break;
skb = np->tx_skbuff[entry];
/* Free the original skb. */
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
le32_to_cpu(np->tx_ring[entry].frag[0].addr),
- skb->len, PCI_DMA_TODEVICE);
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb_irq (np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
np->tx_ring[entry].frag[0].addr = 0;
@@ -1252,9 +1261,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
break;
skb = np->tx_skbuff[entry];
/* Free the original skb. */
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
le32_to_cpu(np->tx_ring[entry].frag[0].addr),
- skb->len, PCI_DMA_TODEVICE);
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb_irq (np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
np->tx_ring[entry].frag[0].addr = 0;
@@ -1334,22 +1343,18 @@ static void rx_poll(unsigned long data)
if (pkt_len < rx_copybreak &&
(skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(np->pci_dev,
- le32_to_cpu(desc->frag[0].addr),
- np->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
-
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ le32_to_cpu(desc->frag[0].addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
- pci_dma_sync_single_for_device(np->pci_dev,
- le32_to_cpu(desc->frag[0].addr),
- np->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ le32_to_cpu(desc->frag[0].addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
} else {
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
le32_to_cpu(desc->frag[0].addr),
- np->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ np->rx_buf_sz, DMA_FROM_DEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
}
@@ -1396,8 +1401,14 @@ static void refill_rx (struct net_device *dev)
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
np->rx_ring[entry].frag[0].addr = cpu_to_le32(
- pci_map_single(np->pci_dev, skb->data,
- np->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE));
+ if (dma_mapping_error(&np->pci_dev->dev,
+ np->rx_ring[entry].frag[0].addr)) {
+ dev_kfree_skb_irq(skb);
+ np->rx_skbuff[entry] = NULL;
+ break;
+ }
}
/* Perhaps we need not reset this field. */
np->rx_ring[entry].frag[0].length =
@@ -1476,10 +1487,9 @@ static struct net_device_stats *get_stats(struct net_device *dev)
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
int i;
+ unsigned long flags;
- /* We should lock this segment of code for SMP eventually, although
- the vulnerability window is very small and statistics are
- non-critical. */
+ spin_lock_irqsave(&np->statlock, flags);
/* The chip only need report frame silently dropped. */
dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
@@ -1496,6 +1506,8 @@ static struct net_device_stats *get_stats(struct net_device *dev)
dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
+ spin_unlock_irqrestore(&np->statlock, flags);
+
return &dev->stats;
}
@@ -1715,9 +1727,9 @@ static int netdev_close(struct net_device *dev)
np->rx_ring[i].status = 0;
skb = np->rx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
le32_to_cpu(np->rx_ring[i].frag[0].addr),
- np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ np->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
}
@@ -1727,9 +1739,9 @@ static int netdev_close(struct net_device *dev)
np->tx_ring[i].next_desc = 0;
skb = np->tx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
le32_to_cpu(np->tx_ring[i].frag[0].addr),
- skb->len, PCI_DMA_TODEVICE);
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
np->tx_skbuff[i] = NULL;
}
@@ -1743,25 +1755,72 @@ static void __devexit sundance_remove1 (struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
- struct netdev_private *np = netdev_priv(dev);
-
- unregister_netdev(dev);
- pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
- np->rx_ring_dma);
- pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
- np->tx_ring_dma);
- pci_iounmap(pdev, np->base);
- pci_release_regions(pdev);
- free_netdev(dev);
- pci_set_drvdata(pdev, NULL);
+ struct netdev_private *np = netdev_priv(dev);
+ unregister_netdev(dev);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
+ np->rx_ring, np->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
+ np->tx_ring, np->tx_ring_dma);
+ pci_iounmap(pdev, np->base);
+ pci_release_regions(pdev);
+ free_netdev(dev);
+ pci_set_drvdata(pdev, NULL);
}
}
+#ifdef CONFIG_PM
+
+static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ netdev_close(dev);
+ netif_device_detach(dev);
+
+ pci_save_state(pci_dev);
+ pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+
+ return 0;
+}
+
+static int sundance_resume(struct pci_dev *pci_dev)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+ int err = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
+ pci_set_power_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
+
+ err = netdev_open(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Can't resume interface!\n",
+ dev->name);
+ goto out;
+ }
+
+ netif_device_attach(dev);
+
+out:
+ return err;
+}
+
+#endif /* CONFIG_PM */
+
static struct pci_driver sundance_driver = {
.name = DRV_NAME,
.id_table = sundance_pci_tbl,
.probe = sundance_probe1,
.remove = __devexit_p(sundance_remove1),
+#ifdef CONFIG_PM
+ .suspend = sundance_suspend,
+ .resume = sundance_resume,
+#endif /* CONFIG_PM */
};
static int __init sundance_init(void)
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 434f9d73533..4ceb3cf6a9a 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -31,6 +31,8 @@
* about when we can start taking interrupts or get xmit() called...
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -105,7 +107,6 @@ MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
MODULE_LICENSE("GPL");
#define GEM_MODULE_NAME "gem"
-#define PFX GEM_MODULE_NAME ": "
static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
@@ -262,8 +263,7 @@ static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
gp->dev->name, pcs_istat);
if (!(pcs_istat & PCS_ISTAT_LSC)) {
- printk(KERN_ERR "%s: PCS irq but no link status change???\n",
- dev->name);
+ netdev_err(dev, "PCS irq but no link status change???\n");
return 0;
}
@@ -282,20 +282,16 @@ static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
* when autoneg has completed.
*/
if (pcs_miistat & PCS_MIISTAT_RF)
- printk(KERN_INFO "%s: PCS AutoNEG complete, "
- "RemoteFault\n", dev->name);
+ netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n");
else
- printk(KERN_INFO "%s: PCS AutoNEG complete.\n",
- dev->name);
+ netdev_info(dev, "PCS AutoNEG complete\n");
}
if (pcs_miistat & PCS_MIISTAT_LS) {
- printk(KERN_INFO "%s: PCS link is now up.\n",
- dev->name);
+ netdev_info(dev, "PCS link is now up\n");
netif_carrier_on(gp->dev);
} else {
- printk(KERN_INFO "%s: PCS link is now down.\n",
- dev->name);
+ netdev_info(dev, "PCS link is now down\n");
netif_carrier_off(gp->dev);
/* If this happens and the link timer is not running,
* reset so we re-negotiate.
@@ -323,14 +319,12 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
return 0;
if (txmac_stat & MAC_TXSTAT_URUN) {
- printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
- dev->name);
+ netdev_err(dev, "TX MAC xmit underrun\n");
gp->net_stats.tx_fifo_errors++;
}
if (txmac_stat & MAC_TXSTAT_MPE) {
- printk(KERN_ERR "%s: TX MAC max packet size error.\n",
- dev->name);
+ netdev_err(dev, "TX MAC max packet size error\n");
gp->net_stats.tx_errors++;
}
@@ -377,8 +371,7 @@ static int gem_rxmac_reset(struct gem *gp)
udelay(10);
}
if (limit == 5000) {
- printk(KERN_ERR "%s: RX MAC will not reset, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX MAC will not reset, resetting whole chip\n");
return 1;
}
@@ -390,8 +383,7 @@ static int gem_rxmac_reset(struct gem *gp)
udelay(10);
}
if (limit == 5000) {
- printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
return 1;
}
@@ -403,8 +395,7 @@ static int gem_rxmac_reset(struct gem *gp)
udelay(10);
}
if (limit == 5000) {
- printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
return 1;
}
@@ -419,8 +410,7 @@ static int gem_rxmac_reset(struct gem *gp)
udelay(10);
}
if (limit == 5000) {
- printk(KERN_ERR "%s: RX reset command will not execute, resetting "
- "whole chip.\n", dev->name);
+ netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
return 1;
}
@@ -429,8 +419,7 @@ static int gem_rxmac_reset(struct gem *gp)
struct gem_rxd *rxd = &gp->init_block->rxd[i];
if (gp->rx_skbs[i] == NULL) {
- printk(KERN_ERR "%s: Parts of RX ring empty, resetting "
- "whole chip.\n", dev->name);
+ netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n");
return 1;
}
@@ -479,8 +468,7 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
if (rxmac_stat & MAC_RXSTAT_OFLW) {
u32 smac = readl(gp->regs + MAC_SMACHINE);
- printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n",
- dev->name, smac);
+ netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
gp->net_stats.rx_over_errors++;
gp->net_stats.rx_fifo_errors++;
@@ -542,19 +530,18 @@ static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
- printk(KERN_ERR "%s: PCI error [%04x] ",
- dev->name, pci_estat);
+ netdev_err(dev, "PCI error [%04x]", pci_estat);
if (pci_estat & GREG_PCIESTAT_BADACK)
- printk("<No ACK64# during ABS64 cycle> ");
+ pr_cont(" <No ACK64# during ABS64 cycle>");
if (pci_estat & GREG_PCIESTAT_DTRTO)
- printk("<Delayed transaction timeout> ");
+ pr_cont(" <Delayed transaction timeout>");
if (pci_estat & GREG_PCIESTAT_OTHER)
- printk("<other>");
- printk("\n");
+ pr_cont(" <other>");
+ pr_cont("\n");
} else {
pci_estat |= GREG_PCIESTAT_OTHER;
- printk(KERN_ERR "%s: PCI error\n", dev->name);
+ netdev_err(dev, "PCI error\n");
}
if (pci_estat & GREG_PCIESTAT_OTHER) {
@@ -565,26 +552,20 @@ static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
*/
pci_read_config_word(gp->pdev, PCI_STATUS,
&pci_cfg_stat);
- printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
- dev->name, pci_cfg_stat);
+ netdev_err(dev, "Read PCI cfg space status [%04x]\n",
+ pci_cfg_stat);
if (pci_cfg_stat & PCI_STATUS_PARITY)
- printk(KERN_ERR "%s: PCI parity error detected.\n",
- dev->name);
+ netdev_err(dev, "PCI parity error detected\n");
if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
- printk(KERN_ERR "%s: PCI target abort.\n",
- dev->name);
+ netdev_err(dev, "PCI target abort\n");
if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
- printk(KERN_ERR "%s: PCI master acks target abort.\n",
- dev->name);
+ netdev_err(dev, "PCI master acks target abort\n");
if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
- printk(KERN_ERR "%s: PCI master abort.\n",
- dev->name);
+ netdev_err(dev, "PCI master abort\n");
if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
- printk(KERN_ERR "%s: PCI system error SERR#.\n",
- dev->name);
+ netdev_err(dev, "PCI system error SERR#\n");
if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
- printk(KERN_ERR "%s: PCI parity error.\n",
- dev->name);
+ netdev_err(dev, "PCI parity error\n");
/* Write the error bits back to clear them. */
pci_cfg_stat &= (PCI_STATUS_PARITY |
@@ -874,8 +855,7 @@ static int gem_rx(struct gem *gp, int work_to_do)
gp->rx_new = entry;
if (drops)
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- gp->dev->name);
+ netdev_info(gp->dev, "Memory squeeze, deferring packet\n");
return work_done;
}
@@ -981,21 +961,19 @@ static void gem_tx_timeout(struct net_device *dev)
{
struct gem *gp = netdev_priv(dev);
- printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
if (!gp->running) {
- printk("%s: hrm.. hw not running !\n", dev->name);
+ netdev_err(dev, "hrm.. hw not running !\n");
return;
}
- printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n",
- dev->name,
- readl(gp->regs + TXDMA_CFG),
- readl(gp->regs + MAC_TXSTAT),
- readl(gp->regs + MAC_TXCFG));
- printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
- dev->name,
- readl(gp->regs + RXDMA_CFG),
- readl(gp->regs + MAC_RXSTAT),
- readl(gp->regs + MAC_RXCFG));
+ netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
+ readl(gp->regs + TXDMA_CFG),
+ readl(gp->regs + MAC_TXSTAT),
+ readl(gp->regs + MAC_TXCFG));
+ netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
+ readl(gp->regs + RXDMA_CFG),
+ readl(gp->regs + MAC_RXSTAT),
+ readl(gp->regs + MAC_RXCFG));
spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock);
@@ -1048,8 +1026,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&gp->tx_lock, flags);
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -1158,8 +1135,7 @@ static void gem_pcs_reset(struct gem *gp)
break;
}
if (limit < 0)
- printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
- gp->dev->name);
+ netdev_warn(gp->dev, "PCS reset bit would not clear\n");
}
static void gem_pcs_reinit_adv(struct gem *gp)
@@ -1230,7 +1206,7 @@ static void gem_reset(struct gem *gp)
} while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
if (limit < 0)
- printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
+ netdev_err(gp->dev, "SW reset is ghetto\n");
if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
gem_pcs_reinit_adv(gp);
@@ -1395,9 +1371,8 @@ static int gem_set_link_modes(struct gem *gp)
speed = SPEED_1000;
}
- if (netif_msg_link(gp))
- printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n",
- gp->dev->name, speed, (full_duplex ? "full" : "half"));
+ netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
+ speed, (full_duplex ? "full" : "half"));
if (!gp->running)
return 0;
@@ -1451,15 +1426,13 @@ static int gem_set_link_modes(struct gem *gp)
if (netif_msg_link(gp)) {
if (pause) {
- printk(KERN_INFO "%s: Pause is enabled "
- "(rxfifo: %d off: %d on: %d)\n",
- gp->dev->name,
- gp->rx_fifo_sz,
- gp->rx_pause_off,
- gp->rx_pause_on);
+ netdev_info(gp->dev,
+ "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+ gp->rx_fifo_sz,
+ gp->rx_pause_off,
+ gp->rx_pause_on);
} else {
- printk(KERN_INFO "%s: Pause is disabled\n",
- gp->dev->name);
+ netdev_info(gp->dev, "Pause is disabled\n");
}
}
@@ -1484,9 +1457,8 @@ static int gem_mdio_link_not_up(struct gem *gp)
{
switch (gp->lstate) {
case link_force_ret:
- if (netif_msg_link(gp))
- printk(KERN_INFO "%s: Autoneg failed again, keeping"
- " forced mode\n", gp->dev->name);
+ netif_info(gp, link, gp->dev,
+ "Autoneg failed again, keeping forced mode\n");
gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
gp->last_forced_speed, DUPLEX_HALF);
gp->timer_ticks = 5;
@@ -1499,9 +1471,7 @@ static int gem_mdio_link_not_up(struct gem *gp)
*/
if (gp->phy_mii.def->magic_aneg)
return 1;
- if (netif_msg_link(gp))
- printk(KERN_INFO "%s: switching to forced 100bt\n",
- gp->dev->name);
+ netif_info(gp, link, gp->dev, "switching to forced 100bt\n");
/* Try forced modes. */
gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
DUPLEX_HALF);
@@ -1517,9 +1487,8 @@ static int gem_mdio_link_not_up(struct gem *gp)
gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
DUPLEX_HALF);
gp->timer_ticks = 5;
- if (netif_msg_link(gp))
- printk(KERN_INFO "%s: switching to forced 10bt\n",
- gp->dev->name);
+ netif_info(gp, link, gp->dev,
+ "switching to forced 10bt\n");
return 0;
} else
return 1;
@@ -1574,8 +1543,8 @@ static void gem_link_timer(unsigned long data)
gp->last_forced_speed = gp->phy_mii.speed;
gp->timer_ticks = 5;
if (netif_msg_link(gp))
- printk(KERN_INFO "%s: Got link after fallback, retrying"
- " autoneg once...\n", gp->dev->name);
+ netdev_info(gp->dev,
+ "Got link after fallback, retrying autoneg once...\n");
gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
} else if (gp->lstate != link_up) {
gp->lstate = link_up;
@@ -1589,9 +1558,7 @@ static void gem_link_timer(unsigned long data)
*/
if (gp->lstate == link_up) {
gp->lstate = link_down;
- if (netif_msg_link(gp))
- printk(KERN_INFO "%s: Link down\n",
- gp->dev->name);
+ netif_info(gp, link, gp->dev, "Link down\n");
netif_carrier_off(gp->dev);
gp->reset_task_pending = 1;
schedule_work(&gp->reset_task);
@@ -1746,8 +1713,7 @@ static void gem_init_phy(struct gem *gp)
if (phy_read(gp, MII_BMCR) != 0xffff)
break;
if (i == 2)
- printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
- gp->dev->name);
+ netdev_warn(gp->dev, "GMAC PHY not responding !\n");
}
}
@@ -2038,7 +2004,7 @@ static int gem_check_invariants(struct gem *gp)
* as this chip has no gigabit PHY.
*/
if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
- printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n",
+ pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n",
mif_cfg);
return -1;
}
@@ -2078,7 +2044,7 @@ static int gem_check_invariants(struct gem *gp)
}
if (i == 32) {
if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
- printk(KERN_ERR PFX "RIO MII phy will not respond.\n");
+ pr_err("RIO MII phy will not respond\n");
return -1;
}
gp->phy_type = phy_serdes;
@@ -2093,7 +2059,7 @@ static int gem_check_invariants(struct gem *gp)
if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
if (gp->tx_fifo_sz != (9 * 1024) ||
gp->rx_fifo_sz != (20 * 1024)) {
- printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n",
+ pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n",
gp->tx_fifo_sz, gp->rx_fifo_sz);
return -1;
}
@@ -2101,7 +2067,7 @@ static int gem_check_invariants(struct gem *gp)
} else {
if (gp->tx_fifo_sz != (2 * 1024) ||
gp->rx_fifo_sz != (2 * 1024)) {
- printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
+ pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
gp->tx_fifo_sz, gp->rx_fifo_sz);
return -1;
}
@@ -2239,7 +2205,7 @@ static int gem_do_start(struct net_device *dev)
if (request_irq(gp->pdev->irq, gem_interrupt,
IRQF_SHARED, dev->name, (void *)dev)) {
- printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
+ netdev_err(dev, "failed to request irq !\n");
spin_lock_irqsave(&gp->lock, flags);
spin_lock(&gp->tx_lock);
@@ -2378,9 +2344,8 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
mutex_lock(&gp->pm_mutex);
- printk(KERN_INFO "%s: suspending, WakeOnLan %s\n",
- dev->name,
- (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
+ netdev_info(dev, "suspending, WakeOnLan %s\n",
+ (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
/* Keep the cell enabled during the entire operation */
spin_lock_irqsave(&gp->lock, flags);
@@ -2440,7 +2405,7 @@ static int gem_resume(struct pci_dev *pdev)
struct gem *gp = netdev_priv(dev);
unsigned long flags;
- printk(KERN_INFO "%s: resuming\n", dev->name);
+ netdev_info(dev, "resuming\n");
mutex_lock(&gp->pm_mutex);
@@ -2452,8 +2417,7 @@ static int gem_resume(struct pci_dev *pdev)
/* Make sure PCI access and bus master are enabled */
if (pci_enable_device(gp->pdev)) {
- printk(KERN_ERR "%s: Can't re-enable chip !\n",
- dev->name);
+ netdev_err(dev, "Can't re-enable chip !\n");
/* Put cell and forget it for now, it will be considered as
* still asleep, a new sleep cycle may bring it back
*/
@@ -2938,7 +2902,7 @@ static int __devinit gem_get_device_address(struct gem *gp)
addr = idprom->id_ethaddr;
#else
printk("\n");
- printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
+ pr_err("%s: can't get mac-address\n", dev->name);
return -1;
#endif
}
@@ -3009,14 +2973,12 @@ static const struct net_device_ops gem_netdev_ops = {
static int __devinit gem_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int gem_version_printed = 0;
unsigned long gemreg_base, gemreg_len;
struct net_device *dev;
struct gem *gp;
int err, pci_using_dac;
- if (gem_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ printk_once(KERN_INFO "%s", version);
/* Apple gmac note: during probe, the chip is powered up by
* the arch code to allow the code below to work (and to let
@@ -3026,8 +2988,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
*/
err = pci_enable_device(pdev);
if (err) {
- printk(KERN_ERR PFX "Cannot enable MMIO operation, "
- "aborting.\n");
+ pr_err("Cannot enable MMIO operation, aborting\n");
return err;
}
pci_set_master(pdev);
@@ -3048,8 +3009,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
- "aborting.\n");
+ pr_err("No usable DMA configuration, aborting\n");
goto err_disable_device;
}
pci_using_dac = 0;
@@ -3059,15 +3019,14 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
gemreg_len = pci_resource_len(pdev, 0);
if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
- printk(KERN_ERR PFX "Cannot find proper PCI device "
- "base address, aborting.\n");
+ pr_err("Cannot find proper PCI device base address, aborting\n");
err = -ENODEV;
goto err_disable_device;
}
dev = alloc_etherdev(sizeof(*gp));
if (!dev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ pr_err("Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_disable_device;
}
@@ -3077,8 +3036,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources, "
- "aborting.\n");
+ pr_err("Cannot obtain PCI resources, aborting\n");
goto err_out_free_netdev;
}
@@ -3104,8 +3062,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
gp->regs = ioremap(gemreg_base, gemreg_len);
if (!gp->regs) {
- printk(KERN_ERR PFX "Cannot map device registers, "
- "aborting.\n");
+ pr_err("Cannot map device registers, aborting\n");
err = -EIO;
goto err_out_free_res;
}
@@ -3150,8 +3107,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
&gp->gblock_dvma);
if (!gp->init_block) {
- printk(KERN_ERR PFX "Cannot allocate init block, "
- "aborting.\n");
+ pr_err("Cannot allocate init block, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -3180,19 +3136,18 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
/* Register with kernel */
if (register_netdev(dev)) {
- printk(KERN_ERR PFX "Cannot register net device, "
- "aborting.\n");
+ pr_err("Cannot register net device, aborting\n");
err = -ENOMEM;
goto err_out_free_consistent;
}
- printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
- dev->name, dev->dev_addr);
+ netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
+ dev->dev_addr);
if (gp->phy_type == phy_mii_mdio0 ||
gp->phy_type == phy_mii_mdio1)
- printk(KERN_INFO "%s: Found %s PHY\n", dev->name,
- gp->phy_mii.def ? gp->phy_mii.def->name : "no");
+ netdev_info(dev, "Found %s PHY\n",
+ gp->phy_mii.def ? gp->phy_mii.def->name : "no");
/* GEM can do it all... */
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX;
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 78f8cee5fd7..d16880d7099 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -88,7 +88,7 @@ static int reset_one_mii_phy(struct mii_phy* phy, int phy_id)
if ((val & BMCR_ISOLATE) && limit > 0)
__phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE);
- return (limit <= 0);
+ return limit <= 0;
}
static int bcm5201_init(struct mii_phy* phy)
@@ -1175,7 +1175,8 @@ int mii_phy_probe(struct mii_phy *phy, int mii_id)
/* Read ID and find matching entry */
id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2));
- printk(KERN_DEBUG "PHY ID: %x, addr: %x\n", id, mii_id);
+ printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n",
+ id, mii_id);
for (i=0; (def = mii_phy_table[i]) != NULL; i++)
if ((id & def->phy_id_mask) == def->phy_id)
break;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 3d9650b8d38..5e28c414421 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1409,7 +1409,7 @@ force_link:
hp->timer_ticks = 0;
hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
hp->happy_timer.data = (unsigned long) hp;
- hp->happy_timer.function = &happy_meal_timer;
+ hp->happy_timer.function = happy_meal_timer;
add_timer(&hp->happy_timer);
}
@@ -1591,7 +1591,7 @@ static int happy_meal_init(struct happy_meal *hp)
*/
#ifdef CONFIG_SBUS
if ((hp->happy_flags & HFLAG_PCI) == 0) {
- struct of_device *op = hp->happy_dev;
+ struct platform_device *op = hp->happy_dev;
if (sbus_can_dma_64bit()) {
sbus_set_sbus64(&op->dev,
hp->happy_bursts);
@@ -2480,7 +2480,7 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
#ifdef CONFIG_SBUS
else {
const struct linux_prom_registers *regs;
- struct of_device *op = hp->happy_dev;
+ struct platform_device *op = hp->happy_dev;
regs = of_get_property(op->dev.of_node, "regs", NULL);
if (regs)
sprintf(info->bus_info, "SBUS:%d",
@@ -2497,7 +2497,7 @@ static u32 hme_get_link(struct net_device *dev)
hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
spin_unlock_irq(&hp->happy_lock);
- return (hp->sw_bmsr & BMSR_LSTATUS);
+ return hp->sw_bmsr & BMSR_LSTATUS;
}
static const struct ethtool_ops hme_ethtool_ops = {
@@ -2515,13 +2515,13 @@ static int hme_version_printed;
*
* Return NULL on failure.
*/
-static struct quattro * __devinit quattro_sbus_find(struct of_device *child)
+static struct quattro * __devinit quattro_sbus_find(struct platform_device *child)
{
struct device *parent = child->dev.parent;
- struct of_device *op;
+ struct platform_device *op;
struct quattro *qp;
- op = to_of_device(parent);
+ op = to_platform_device(parent);
qp = dev_get_drvdata(&op->dev);
if (qp)
return qp;
@@ -2551,7 +2551,7 @@ static int __init quattro_sbus_register_irqs(void)
struct quattro *qp;
for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
- struct of_device *op = qp->quattro_dev;
+ struct platform_device *op = qp->quattro_dev;
int err, qfe_slot, skip = 0;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
@@ -2561,7 +2561,7 @@ static int __init quattro_sbus_register_irqs(void)
if (skip)
continue;
- err = request_irq(op->irqs[0],
+ err = request_irq(op->archdata.irqs[0],
quattro_sbus_interrupt,
IRQF_SHARED, "Quattro",
qp);
@@ -2580,7 +2580,7 @@ static void quattro_sbus_free_irqs(void)
struct quattro *qp;
for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
- struct of_device *op = qp->quattro_dev;
+ struct platform_device *op = qp->quattro_dev;
int qfe_slot, skip = 0;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
@@ -2590,7 +2590,7 @@ static void quattro_sbus_free_irqs(void)
if (skip)
continue;
- free_irq(op->irqs[0], qp);
+ free_irq(op->archdata.irqs[0], qp);
}
}
#endif /* CONFIG_SBUS */
@@ -2639,7 +2639,7 @@ static const struct net_device_ops hme_netdev_ops = {
};
#ifdef CONFIG_SBUS
-static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
+static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
{
struct device_node *dp = op->dev.of_node, *sbus_dp;
struct quattro *qp = NULL;
@@ -2648,7 +2648,7 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
int i, qfe_slot = -1;
int err = -ENODEV;
- sbus_dp = to_of_device(op->dev.parent)->dev.of_node;
+ sbus_dp = op->dev.parent->of_node;
/* We can match PCI devices too, do not accept those here. */
if (strcmp(sbus_dp->name, "sbus"))
@@ -2790,7 +2790,7 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
/* Happy Meal can do it all... */
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
- dev->irq = op->irqs[0];
+ dev->irq = op->archdata.irqs[0];
#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
/* Hook up SBUS register/descriptor accessors. */
@@ -2808,7 +2808,8 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
happy_meal_set_initial_advertisement(hp);
spin_unlock_irq(&hp->happy_lock);
- if (register_netdev(hp->dev)) {
+ err = register_netdev(hp->dev);
+ if (err) {
printk(KERN_ERR "happymeal: Cannot register net device, "
"aborting.\n");
goto err_out_free_coherent;
@@ -3130,7 +3131,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
happy_meal_set_initial_advertisement(hp);
spin_unlock_irq(&hp->happy_lock);
- if (register_netdev(hp->dev)) {
+ err = register_netdev(hp->dev);
+ if (err) {
printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
"aborting.\n");
goto err_out_iounmap;
@@ -3235,7 +3237,7 @@ static void happy_meal_pci_exit(void)
#endif
#ifdef CONFIG_SBUS
-static int __devinit hme_sbus_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit hme_sbus_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
const char *model = of_get_property(dp, "model", NULL);
@@ -3247,7 +3249,7 @@ static int __devinit hme_sbus_probe(struct of_device *op, const struct of_device
return happy_meal_sbus_probe_one(op, is_qfe);
}
-static int __devexit hme_sbus_remove(struct of_device *op)
+static int __devexit hme_sbus_remove(struct platform_device *op)
{
struct happy_meal *hp = dev_get_drvdata(&op->dev);
struct net_device *net_dev = hp->dev;
@@ -3304,7 +3306,7 @@ static int __init happy_meal_sbus_init(void)
{
int err;
- err = of_register_driver(&hme_sbus_driver, &of_bus_type);
+ err = of_register_platform_driver(&hme_sbus_driver);
if (!err)
err = quattro_sbus_register_irqs();
@@ -3313,7 +3315,7 @@ static int __init happy_meal_sbus_init(void)
static void happy_meal_sbus_exit(void)
{
- of_unregister_driver(&hme_sbus_driver);
+ of_unregister_platform_driver(&hme_sbus_driver);
quattro_sbus_free_irqs();
while (qfe_sbus_list) {
diff --git a/drivers/net/sunhme.h b/drivers/net/sunhme.h
index efd2ca0fcad..756b5bf3aa8 100644
--- a/drivers/net/sunhme.h
+++ b/drivers/net/sunhme.h
@@ -407,7 +407,7 @@ struct happy_meal {
void (*write_rxd)(struct happy_meal_rxd *, u32, u32);
#endif
- /* This is either an of_device or a pci_dev. */
+ /* This is either an platform_device or a pci_dev. */
void *happy_dev;
struct device *dma_dev;
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 7d9c33dd9d1..2cf84e5968b 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -250,7 +250,7 @@ struct lance_private {
int rx_new, tx_new;
int rx_old, tx_old;
- struct of_device *ledma; /* If set this points to ledma */
+ struct platform_device *ledma; /* If set this points to ledma */
char tpe; /* cable-selection is TPE */
char auto_select; /* cable-selection by carrier */
char burst_sizes; /* ledma SBus burst sizes */
@@ -265,8 +265,8 @@ struct lance_private {
char *name;
dma_addr_t init_block_dvma;
struct net_device *dev; /* Backpointer */
- struct of_device *op;
- struct of_device *lebuffer;
+ struct platform_device *op;
+ struct platform_device *lebuffer;
struct timer_list multicast_timer;
};
@@ -1272,7 +1272,7 @@ static void lance_free_hwresources(struct lance_private *lp)
if (lp->lregs)
of_iounmap(&lp->op->resource[0], lp->lregs, LANCE_REG_SIZE);
if (lp->dregs) {
- struct of_device *ledma = lp->ledma;
+ struct platform_device *ledma = lp->ledma;
of_iounmap(&ledma->resource[0], lp->dregs,
resource_size(&ledma->resource[0]));
@@ -1319,9 +1319,9 @@ static const struct net_device_ops sparc_lance_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit sparc_lance_probe_one(struct of_device *op,
- struct of_device *ledma,
- struct of_device *lebuffer)
+static int __devinit sparc_lance_probe_one(struct platform_device *op,
+ struct platform_device *ledma,
+ struct platform_device *lebuffer)
{
struct device_node *dp = op->dev.of_node;
static unsigned version_printed;
@@ -1474,7 +1474,7 @@ no_link_test:
dev->ethtool_ops = &sparc_lance_ethtool_ops;
dev->netdev_ops = &sparc_lance_ops;
- dev->irq = op->irqs[0];
+ dev->irq = op->archdata.irqs[0];
/* We cannot sleep if the chip is busy during a
* multicast list update event, because such events
@@ -1483,7 +1483,7 @@ no_link_test:
*/
init_timer(&lp->multicast_timer);
lp->multicast_timer.data = (unsigned long) dev;
- lp->multicast_timer.function = &lance_set_multicast_retry;
+ lp->multicast_timer.function = lance_set_multicast_retry;
if (register_netdev(dev)) {
printk(KERN_ERR "SunLance: Cannot register device.\n");
@@ -1503,9 +1503,9 @@ fail:
return -ENODEV;
}
-static int __devinit sunlance_sbus_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit sunlance_sbus_probe(struct platform_device *op, const struct of_device_id *match)
{
- struct of_device *parent = to_of_device(op->dev.parent);
+ struct platform_device *parent = to_platform_device(op->dev.parent);
struct device_node *parent_dp = parent->dev.of_node;
int err;
@@ -1519,7 +1519,7 @@ static int __devinit sunlance_sbus_probe(struct of_device *op, const struct of_d
return err;
}
-static int __devexit sunlance_sbus_remove(struct of_device *op)
+static int __devexit sunlance_sbus_remove(struct platform_device *op)
{
struct lance_private *lp = dev_get_drvdata(&op->dev);
struct net_device *net_dev = lp->dev;
@@ -1558,12 +1558,12 @@ static struct of_platform_driver sunlance_sbus_driver = {
/* Find all the lance cards on the system and initialize them */
static int __init sparc_lance_init(void)
{
- return of_register_driver(&sunlance_sbus_driver, &of_bus_type);
+ return of_register_platform_driver(&sunlance_sbus_driver);
}
static void __exit sparc_lance_exit(void)
{
- of_unregister_driver(&sunlance_sbus_driver);
+ of_unregister_platform_driver(&sunlance_sbus_driver);
}
module_init(sparc_lance_init);
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 72b579c8d81..9536b2f010b 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -689,7 +689,7 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
const struct linux_prom_registers *regs;
struct sunqe *qep = netdev_priv(dev);
- struct of_device *op;
+ struct platform_device *op;
strcpy(info->driver, "sunqe");
strcpy(info->version, "3.0");
@@ -711,7 +711,7 @@ static u32 qe_get_link(struct net_device *dev)
phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
spin_unlock_irq(&qep->lock);
- return (phyconfig & MREGS_PHYCONFIG_LSTAT);
+ return phyconfig & MREGS_PHYCONFIG_LSTAT;
}
static const struct ethtool_ops qe_ethtool_ops = {
@@ -720,7 +720,7 @@ static const struct ethtool_ops qe_ethtool_ops = {
};
/* This is only called once at boot time for each card probed. */
-static void qec_init_once(struct sunqec *qecp, struct of_device *op)
+static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
{
u8 bsizes = qecp->qec_bursts;
@@ -770,9 +770,9 @@ static u8 __devinit qec_get_burst(struct device_node *dp)
return bsizes;
}
-static struct sunqec * __devinit get_qec(struct of_device *child)
+static struct sunqec * __devinit get_qec(struct platform_device *child)
{
- struct of_device *op = to_of_device(child->dev.parent);
+ struct platform_device *op = to_platform_device(child->dev.parent);
struct sunqec *qecp;
qecp = dev_get_drvdata(&op->dev);
@@ -803,7 +803,7 @@ static struct sunqec * __devinit get_qec(struct of_device *child)
qec_init_once(qecp, op);
- if (request_irq(op->irqs[0], qec_interrupt,
+ if (request_irq(op->archdata.irqs[0], qec_interrupt,
IRQF_SHARED, "qec", (void *) qecp)) {
printk(KERN_ERR "qec: Can't register irq.\n");
goto fail;
@@ -836,7 +836,7 @@ static const struct net_device_ops qec_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit qec_ether_init(struct of_device *op)
+static int __devinit qec_ether_init(struct platform_device *op)
{
static unsigned version_printed;
struct net_device *dev;
@@ -901,7 +901,7 @@ static int __devinit qec_ether_init(struct of_device *op)
SET_NETDEV_DEV(dev, &op->dev);
dev->watchdog_timeo = 5*HZ;
- dev->irq = op->irqs[0];
+ dev->irq = op->archdata.irqs[0];
dev->dma = 0;
dev->ethtool_ops = &qe_ethtool_ops;
dev->netdev_ops = &qec_ops;
@@ -941,12 +941,12 @@ fail:
return res;
}
-static int __devinit qec_sbus_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit qec_sbus_probe(struct platform_device *op, const struct of_device_id *match)
{
return qec_ether_init(op);
}
-static int __devexit qec_sbus_remove(struct of_device *op)
+static int __devexit qec_sbus_remove(struct platform_device *op)
{
struct sunqe *qp = dev_get_drvdata(&op->dev);
struct net_device *net_dev = qp->dev;
@@ -988,18 +988,18 @@ static struct of_platform_driver qec_sbus_driver = {
static int __init qec_init(void)
{
- return of_register_driver(&qec_sbus_driver, &of_bus_type);
+ return of_register_platform_driver(&qec_sbus_driver);
}
static void __exit qec_exit(void)
{
- of_unregister_driver(&qec_sbus_driver);
+ of_unregister_platform_driver(&qec_sbus_driver);
while (root_qec_dev) {
struct sunqec *next = root_qec_dev->next_module;
- struct of_device *op = root_qec_dev->op;
+ struct platform_device *op = root_qec_dev->op;
- free_irq(op->irqs[0], (void *) root_qec_dev);
+ free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
of_iounmap(&op->resource[0], root_qec_dev->gregs,
GLOB_REG_SIZE);
kfree(root_qec_dev);
diff --git a/drivers/net/sunqe.h b/drivers/net/sunqe.h
index 5813a7b2faa..581781b6b2f 100644
--- a/drivers/net/sunqe.h
+++ b/drivers/net/sunqe.h
@@ -314,7 +314,7 @@ struct sunqec {
void __iomem *gregs; /* QEC Global Registers */
struct sunqe *qes[4]; /* Each child MACE */
unsigned int qec_bursts; /* Support burst sizes */
- struct of_device *op; /* QEC's OF device */
+ struct platform_device *op; /* QEC's OF device */
struct sunqec *next_module; /* List of all QECs in system */
};
@@ -342,7 +342,7 @@ struct sunqe {
__u32 buffers_dvma; /* DVMA visible address. */
struct sunqec *parent;
u8 mconfig; /* Base MACE mconfig value */
- struct of_device *op; /* QE's OF device struct */
+ struct platform_device *op; /* QE's OF device struct */
struct net_device *dev; /* QE's netdevice struct */
int channel; /* Who am I? */
};
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index d281a7b3470..bf3c762de62 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -3,6 +3,8 @@
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -20,7 +22,6 @@
#include "sunvnet.h"
#define DRV_MODULE_NAME "sunvnet"
-#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.0"
#define DRV_MODULE_RELDATE "June 25, 2007"
@@ -45,9 +46,9 @@ static int vnet_handle_unknown(struct vnet_port *port, void *arg)
{
struct vio_msg_tag *pkt = arg;
- printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
+ pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
- printk(KERN_ERR PFX "Resetting connection.\n");
+ pr_err("Resetting connection\n");
ldc_disconnect(port->vio.lp);
@@ -400,8 +401,8 @@ static int vnet_rx(struct vnet_port *port, void *msgbuf)
if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
return 0;
if (unlikely(pkt->seq != dr->rcv_nxt)) {
- printk(KERN_ERR PFX "RX out of sequence seq[0x%llx] "
- "rcv_nxt[0x%llx]\n", pkt->seq, dr->rcv_nxt);
+ pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
+ pkt->seq, dr->rcv_nxt);
return 0;
}
@@ -464,8 +465,7 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf)
struct vio_net_mcast_info *pkt = msgbuf;
if (pkt->tag.stype != VIO_SUBTYPE_ACK)
- printk(KERN_ERR PFX "%s: Got unexpected MCAST reply "
- "[%02x:%02x:%04x:%08x]\n",
+ pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
port->vp->dev->name,
pkt->tag.type,
pkt->tag.stype,
@@ -520,7 +520,7 @@ static void vnet_event(void *arg, int event)
}
if (unlikely(event != LDC_EVENT_DATA_READY)) {
- printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
+ pr_warning("Unexpected LDC event %d\n", event);
spin_unlock_irqrestore(&vio->lock, flags);
return;
}
@@ -662,8 +662,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
/* This is a hard error, log it. */
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
- "queue awake!\n", dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
dev->stats.tx_errors++;
}
spin_unlock_irqrestore(&port->vio.lock, flags);
@@ -696,8 +695,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
err = __vnet_tx_trigger(port);
if (unlikely(err < 0)) {
- printk(KERN_INFO PFX "%s: TX trigger error %d\n",
- dev->name, err);
+ netdev_info(dev, "TX trigger error %d\n", err);
d->hdr.state = VIO_DESC_FREE;
dev->stats.tx_carrier_errors++;
goto out_dropped_unlock;
@@ -952,12 +950,12 @@ static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
err = -ENOMEM;
if (!buf) {
- printk(KERN_ERR "TX buffer allocation failure\n");
+ pr_err("TX buffer allocation failure\n");
goto err_out;
}
err = -EFAULT;
if ((unsigned long)buf & (8UL - 1)) {
- printk(KERN_ERR "TX buffer misaligned\n");
+ pr_err("TX buffer misaligned\n");
kfree(buf);
goto err_out;
}
@@ -1030,7 +1028,7 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
dev = alloc_etherdev(sizeof(*vp));
if (!dev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ pr_err("Etherdev alloc failed, aborting\n");
return ERR_PTR(-ENOMEM);
}
@@ -1056,12 +1054,11 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR PFX "Cannot register net device, "
- "aborting.\n");
+ pr_err("Cannot register net device, aborting\n");
goto err_out_free_dev;
}
- printk(KERN_INFO "%s: Sun LDOM vnet %pM\n", dev->name, dev->dev_addr);
+ netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr);
list_add(&vp->list, &vnet_list);
@@ -1133,10 +1130,7 @@ static struct vio_driver_ops vnet_vio_ops = {
static void __devinit print_version(void)
{
- static int version_printed;
-
- if (version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ printk_once(KERN_INFO "%s", version);
}
const char *remote_macaddr_prop = "remote-mac-address";
@@ -1157,7 +1151,7 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
vp = vnet_find_parent(hp, vdev->mp);
if (IS_ERR(vp)) {
- printk(KERN_ERR PFX "Cannot find port parent vnet.\n");
+ pr_err("Cannot find port parent vnet\n");
err = PTR_ERR(vp);
goto err_out_put_mdesc;
}
@@ -1165,15 +1159,14 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
err = -ENODEV;
if (!rmac) {
- printk(KERN_ERR PFX "Port lacks %s property.\n",
- remote_macaddr_prop);
+ pr_err("Port lacks %s property\n", remote_macaddr_prop);
goto err_out_put_mdesc;
}
port = kzalloc(sizeof(*port), GFP_KERNEL);
err = -ENOMEM;
if (!port) {
- printk(KERN_ERR PFX "Cannot allocate vnet_port.\n");
+ pr_err("Cannot allocate vnet_port\n");
goto err_out_put_mdesc;
}
@@ -1214,9 +1207,8 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
dev_set_drvdata(&vdev->dev, port);
- printk(KERN_INFO "%s: PORT ( remote-mac %pM%s )\n",
- vp->dev->name, port->raddr,
- switch_port ? " switch-port" : "");
+ pr_info("%s: PORT ( remote-mac %pM%s )\n",
+ vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
vio_port_up(&port->vio);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 99e423a5b9f..b6eec8cea20 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1167,7 +1167,7 @@ static void print_eth(const u8 *add)
static int tc35815_tx_full(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
- return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end);
+ return (lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end;
}
static void tc35815_restart(struct net_device *dev)
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 737df6032bb..8b3dc1eb401 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -92,7 +92,7 @@ static void bdx_rx_free(struct bdx_priv *priv);
static void bdx_tx_free(struct bdx_priv *priv);
/* Definitions needed by bdx_probe */
-static void bdx_ethtool_ops(struct net_device *netdev);
+static void bdx_set_ethtool_ops(struct net_device *netdev);
/*************************************************************************
* Print Info *
@@ -927,13 +927,6 @@ static void bdx_update_stats(struct bdx_priv *priv)
BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
}
-static struct net_device_stats *bdx_get_stats(struct net_device *ndev)
-{
- struct bdx_priv *priv = netdev_priv(ndev);
- struct net_device_stats *net_stat = &priv->net_stats;
- return net_stat;
-}
-
static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
u16 rxd_vlan);
static void print_rxfd(struct rxf_desc *rxfd);
@@ -1220,6 +1213,7 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
{
+ struct net_device *ndev = priv->ndev;
struct sk_buff *skb, *skb2;
struct rxd_desc *rxdd;
struct rx_map *dm;
@@ -1273,7 +1267,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
if (unlikely(GET_RXD_ERR(rxd_val1))) {
DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
- priv->net_stats.rx_errors++;
+ ndev->stats.rx_errors++;
bdx_recycle_skb(priv, rxdd);
continue;
}
@@ -1300,15 +1294,16 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
bdx_rxdb_free_elem(db, rxdd->va_lo);
}
- priv->net_stats.rx_bytes += len;
+ ndev->stats.rx_bytes += len;
skb_put(skb, len);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->protocol = eth_type_trans(skb, priv->ndev);
+ skb->protocol = eth_type_trans(skb, ndev);
/* Non-IP packets aren't checksum-offloaded */
if (GET_RXD_PKT_ID(rxd_val1) == 0)
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
@@ -1316,7 +1311,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
break;
}
- priv->net_stats.rx_packets += done;
+ ndev->stats.rx_packets += done;
/* FIXME: do smth to minimize pci accesses */
WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
@@ -1712,8 +1707,8 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
#ifdef BDX_LLTX
ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
#endif
- priv->net_stats.tx_packets++;
- priv->net_stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
if (priv->tx_level < BDX_MIN_TX_LEVEL) {
DBG("%s: %s: TX Q STOP level %d\n",
@@ -1888,7 +1883,6 @@ static const struct net_device_ops bdx_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = bdx_ioctl,
.ndo_set_multicast_list = bdx_setmulti,
- .ndo_get_stats = bdx_get_stats,
.ndo_change_mtu = bdx_change_mtu,
.ndo_set_mac_address = bdx_set_mac,
.ndo_vlan_rx_register = bdx_vlan_rx_register,
@@ -2012,7 +2006,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ndev->netdev_ops = &bdx_netdev_ops;
ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
- bdx_ethtool_ops(ndev); /* ethtool interface */
+ bdx_set_ethtool_ops(ndev); /* ethtool interface */
/* these fields are used for info purposes only
* so we can have them same for all ports of the board */
@@ -2417,10 +2411,10 @@ static void bdx_get_ethtool_stats(struct net_device *netdev,
}
/*
- * bdx_ethtool_ops - ethtool interface implementation
+ * bdx_set_ethtool_ops - ethtool interface implementation
* @netdev
*/
-static void bdx_ethtool_ops(struct net_device *netdev)
+static void bdx_set_ethtool_ops(struct net_device *netdev)
{
static const struct ethtool_ops bdx_ethtool_ops = {
.get_settings = bdx_get_settings,
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 67e3b71bf70..b6ba8601e2b 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -269,7 +269,6 @@ struct bdx_priv {
u32 msg_enable;
int stats_flag;
struct bdx_stats hw_stats;
- struct net_device_stats net_stats;
struct pci_dev *pdev;
struct pci_nic *nic;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index bc3af78a869..943c2832544 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,10 +69,10 @@
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 113
+#define TG3_MIN_NUM 114
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "August 2, 2010"
+#define DRV_MODULE_RELDATE "September 30, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -101,9 +101,15 @@
* You can't change the ring sizes, but you can change where you place
* them in the NIC onboard memory.
*/
-#define TG3_RX_RING_SIZE 512
+#define TG3_RX_STD_RING_SIZE(tp) \
+ ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
+ RX_STD_MAX_SIZE_5717 : 512)
#define TG3_DEF_RX_RING_PENDING 200
-#define TG3_RX_JUMBO_RING_SIZE 256
+#define TG3_RX_JMB_RING_SIZE(tp) \
+ ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
+ 1024 : 256)
#define TG3_DEF_RX_JUMBO_RING_PENDING 100
#define TG3_RSS_INDIR_TBL_SIZE 128
@@ -113,19 +119,16 @@
* hw multiply/modulo instructions. Another solution would be to
* replace things like '% foo' with '& (foo - 1)'.
*/
-#define TG3_RX_RCB_RING_SIZE(tp) \
- (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
- !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
#define TG3_TX_RING_SIZE 512
#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
-#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
- TG3_RX_RING_SIZE)
-#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
- TG3_RX_JUMBO_RING_SIZE)
-#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
- TG3_RX_RCB_RING_SIZE(tp))
+#define TG3_RX_STD_RING_BYTES(tp) \
+ (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
+#define TG3_RX_JMB_RING_BYTES(tp) \
+ (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
+#define TG3_RX_RCB_RING_BYTES(tp) \
+ (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
TG3_TX_RING_SIZE)
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
@@ -143,11 +146,11 @@
#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
-#define TG3_RX_STD_BUFF_RING_SIZE \
- (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
+#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
+ (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
-#define TG3_RX_JMB_BUFF_RING_SIZE \
- (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
+#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
+ (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
/* Due to a hardware bug, the 5701 can only DMA to memory addresses
* that are at least dword aligned when used in PCIX mode. The driver
@@ -264,7 +267,6 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
@@ -752,42 +754,6 @@ static void tg3_int_reenable(struct tg3_napi *tnapi)
HOSTCC_MODE_ENABLE | tnapi->coal_now);
}
-static void tg3_napi_disable(struct tg3 *tp)
-{
- int i;
-
- for (i = tp->irq_cnt - 1; i >= 0; i--)
- napi_disable(&tp->napi[i].napi);
-}
-
-static void tg3_napi_enable(struct tg3 *tp)
-{
- int i;
-
- for (i = 0; i < tp->irq_cnt; i++)
- napi_enable(&tp->napi[i].napi);
-}
-
-static inline void tg3_netif_stop(struct tg3 *tp)
-{
- tp->dev->trans_start = jiffies; /* prevent tx timeout */
- tg3_napi_disable(tp);
- netif_tx_disable(tp->dev);
-}
-
-static inline void tg3_netif_start(struct tg3 *tp)
-{
- /* NOTE: unconditional netif_tx_wake_all_queues is only
- * appropriate so long as all callers are assured to
- * have free tx slots (such as after tg3_init_hw)
- */
- netif_tx_wake_all_queues(tp->dev);
-
- tg3_napi_enable(tp);
- tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
- tg3_enable_ints(tp);
-}
-
static void tg3_switch_clocks(struct tg3 *tp)
{
u32 clock_ctrl;
@@ -1917,19 +1883,16 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
*/
static int tg3_phy_reset(struct tg3 *tp)
{
- u32 cpmuctrl;
- u32 phy_status;
+ u32 val, cpmuctrl;
int err;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- u32 val;
-
val = tr32(GRC_MISC_CFG);
tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
udelay(40);
}
- err = tg3_readphy(tp, MII_BMSR, &phy_status);
- err |= tg3_readphy(tp, MII_BMSR, &phy_status);
+ err = tg3_readphy(tp, MII_BMSR, &val);
+ err |= tg3_readphy(tp, MII_BMSR, &val);
if (err != 0)
return -EBUSY;
@@ -1961,18 +1924,14 @@ static int tg3_phy_reset(struct tg3 *tp)
return err;
if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
- u32 phy;
-
- phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
- tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
+ val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
tw32(TG3_CPMU_CTRL, cpmuctrl);
}
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
- u32 val;
-
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
CPMU_LSPD_1000MB_MACCLK_12_5) {
@@ -2028,23 +1987,19 @@ out:
/* Cannot do read-modify-write on 5401 */
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
} else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
- u32 phy_reg;
-
/* Set bit 14 with read-modify-write to preserve other bits */
if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
- !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
- tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
+ !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000);
}
/* Set phy register 0x10 bit 0 to high fifo elasticity to support
* jumbo frames transmission.
*/
if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
- u32 phy_reg;
-
- if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
+ if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
tg3_writephy(tp, MII_TG3_EXT_CTRL,
- phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
+ val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -3060,7 +3015,7 @@ static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
{
int current_link_up;
- u32 bmsr, dummy;
+ u32 bmsr, val;
u32 lcl_adv, rmt_adv;
u16 current_speed;
u8 current_duplex;
@@ -3140,8 +3095,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
}
/* Clear pending interrupts... */
- tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
- tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
+ tg3_readphy(tp, MII_TG3_ISTAT, &val);
+ tg3_readphy(tp, MII_TG3_ISTAT, &val);
if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
@@ -3162,8 +3117,6 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
current_duplex = DUPLEX_INVALID;
if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
- u32 val;
-
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
if (!(val & (1 << 10))) {
@@ -3238,13 +3191,11 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
relink:
if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
- u32 tmp;
-
tg3_phy_copper_begin(tp);
- tg3_readphy(tp, MII_BMSR, &tmp);
- if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
- (tmp & BMSR_LSTATUS))
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ (bmsr & BMSR_LSTATUS))
current_link_up = 1;
}
@@ -4353,6 +4304,11 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
return err;
}
+static inline int tg3_irq_sync(struct tg3 *tp)
+{
+ return tp->irq_sync;
+}
+
/* This is called whenever we suspect that the system chipset is re-
* ordering the sequence of MMIO to the tx send mailbox. The symptom
* is bogus tx completions. We try to recover by setting the
@@ -4492,14 +4448,14 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
src_map = NULL;
switch (opaque_key) {
case RXD_OPAQUE_RING_STD:
- dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
+ dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
desc = &tpr->rx_std[dest_idx];
map = &tpr->rx_std_buffers[dest_idx];
skb_size = tp->rx_pkt_map_sz;
break;
case RXD_OPAQUE_RING_JUMBO:
- dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
+ dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
desc = &tpr->rx_jmb[dest_idx].std;
map = &tpr->rx_jmb_buffers[dest_idx];
skb_size = TG3_RX_JMB_MAP_SZ;
@@ -4549,12 +4505,12 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
struct tg3 *tp = tnapi->tp;
struct tg3_rx_buffer_desc *src_desc, *dest_desc;
struct ring_info *src_map, *dest_map;
- struct tg3_rx_prodring_set *spr = &tp->prodring[0];
+ struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
int dest_idx;
switch (opaque_key) {
case RXD_OPAQUE_RING_STD:
- dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
+ dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
dest_desc = &dpr->rx_std[dest_idx];
dest_map = &dpr->rx_std_buffers[dest_idx];
src_desc = &spr->rx_std[src_idx];
@@ -4562,7 +4518,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
break;
case RXD_OPAQUE_RING_JUMBO:
- dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
+ dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
dest_desc = &dpr->rx_jmb[dest_idx].std;
dest_map = &dpr->rx_jmb_buffers[dest_idx];
src_desc = &spr->rx_jmb[src_idx].std;
@@ -4619,7 +4575,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
u32 sw_idx = tnapi->rx_rcb_ptr;
u16 hw_idx;
int received;
- struct tg3_rx_prodring_set *tpr = tnapi->prodring;
+ struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
hw_idx = *(tnapi->rx_rcb_prod_idx);
/*
@@ -4644,13 +4600,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key == RXD_OPAQUE_RING_STD) {
- ri = &tp->prodring[0].rx_std_buffers[desc_idx];
+ ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
skb = ri->skb;
post_ptr = &std_prod_idx;
rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
- ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
+ ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
skb = ri->skb;
post_ptr = &jmb_prod_idx;
@@ -4719,7 +4675,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
>> RXD_TCPCSUM_SHIFT) == 0xffff))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, tp->dev);
@@ -4762,7 +4718,8 @@ next_pkt:
(*post_ptr)++;
if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
- tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
+ tpr->rx_std_prod_idx = std_prod_idx &
+ tp->rx_std_ring_mask;
tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
tpr->rx_std_prod_idx);
work_mask &= ~RXD_OPAQUE_RING_STD;
@@ -4770,7 +4727,7 @@ next_pkt:
}
next_pkt_nopost:
sw_idx++;
- sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
+ sw_idx &= tp->rx_ret_ring_mask;
/* Refresh hw_idx to see if there is new work */
if (sw_idx == hw_idx) {
@@ -4786,13 +4743,14 @@ next_pkt_nopost:
/* Refill RX ring(s). */
if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
if (work_mask & RXD_OPAQUE_RING_STD) {
- tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
+ tpr->rx_std_prod_idx = std_prod_idx &
+ tp->rx_std_ring_mask;
tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
tpr->rx_std_prod_idx);
}
if (work_mask & RXD_OPAQUE_RING_JUMBO) {
- tpr->rx_jmb_prod_idx = jmb_prod_idx %
- TG3_RX_JUMBO_RING_SIZE;
+ tpr->rx_jmb_prod_idx = jmb_prod_idx &
+ tp->rx_jmb_ring_mask;
tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
tpr->rx_jmb_prod_idx);
}
@@ -4803,8 +4761,8 @@ next_pkt_nopost:
*/
smp_wmb();
- tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
- tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
+ tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
+ tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
if (tnapi != &tp->napi[1])
napi_schedule(&tp->napi[1].napi);
@@ -4860,9 +4818,11 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
if (spr->rx_std_cons_idx < src_prod_idx)
cpycnt = src_prod_idx - spr->rx_std_cons_idx;
else
- cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
+ cpycnt = tp->rx_std_ring_mask + 1 -
+ spr->rx_std_cons_idx;
- cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
+ cpycnt = min(cpycnt,
+ tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
si = spr->rx_std_cons_idx;
di = dpr->rx_std_prod_idx;
@@ -4896,10 +4856,10 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
dbd->addr_lo = sbd->addr_lo;
}
- spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
- TG3_RX_RING_SIZE;
- dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
- TG3_RX_RING_SIZE;
+ spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
+ tp->rx_std_ring_mask;
+ dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
+ tp->rx_std_ring_mask;
}
while (1) {
@@ -4916,10 +4876,11 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
if (spr->rx_jmb_cons_idx < src_prod_idx)
cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
else
- cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
+ cpycnt = tp->rx_jmb_ring_mask + 1 -
+ spr->rx_jmb_cons_idx;
cpycnt = min(cpycnt,
- TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
+ tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
si = spr->rx_jmb_cons_idx;
di = dpr->rx_jmb_prod_idx;
@@ -4953,10 +4914,10 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
dbd->addr_lo = sbd->addr_lo;
}
- spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
- TG3_RX_JUMBO_RING_SIZE;
- dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
- TG3_RX_JUMBO_RING_SIZE;
+ spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
+ tp->rx_jmb_ring_mask;
+ dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
+ tp->rx_jmb_ring_mask;
}
return err;
@@ -4981,14 +4942,14 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
work_done += tg3_rx(tnapi, budget - work_done);
if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
- struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
+ struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
int i, err = 0;
u32 std_prod_idx = dpr->rx_std_prod_idx;
u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
for (i = 1; i < tp->irq_cnt; i++)
err |= tg3_rx_prodring_xfer(tp, dpr,
- tp->napi[i].prodring);
+ &tp->napi[i].prodring);
wmb();
@@ -5098,6 +5059,59 @@ tx_recovery:
return work_done;
}
+static void tg3_napi_disable(struct tg3 *tp)
+{
+ int i;
+
+ for (i = tp->irq_cnt - 1; i >= 0; i--)
+ napi_disable(&tp->napi[i].napi);
+}
+
+static void tg3_napi_enable(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ napi_enable(&tp->napi[i].napi);
+}
+
+static void tg3_napi_init(struct tg3 *tp)
+{
+ int i;
+
+ netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
+ for (i = 1; i < tp->irq_cnt; i++)
+ netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
+}
+
+static void tg3_napi_fini(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ netif_napi_del(&tp->napi[i].napi);
+}
+
+static inline void tg3_netif_stop(struct tg3 *tp)
+{
+ tp->dev->trans_start = jiffies; /* prevent tx timeout */
+ tg3_napi_disable(tp);
+ netif_tx_disable(tp->dev);
+}
+
+static inline void tg3_netif_start(struct tg3 *tp)
+{
+ /* NOTE: unconditional netif_tx_wake_all_queues is only
+ * appropriate so long as all callers are assured to
+ * have free tx slots (such as after tg3_init_hw)
+ */
+ netif_tx_wake_all_queues(tp->dev);
+
+ tg3_napi_enable(tp);
+ tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
+ tg3_enable_ints(tp);
+}
+
static void tg3_irq_quiesce(struct tg3 *tp)
{
int i;
@@ -5111,11 +5125,6 @@ static void tg3_irq_quiesce(struct tg3 *tp)
synchronize_irq(tp->napi[i].irq_vec);
}
-static inline int tg3_irq_sync(struct tg3 *tp)
-{
- return tp->irq_sync;
-}
-
/* Fully shutdown all tg3 driver activity elsewhere in the system.
* If irq_sync is non-zero, then the IRQ handler must be synchronized
* with as well. Most of the time, this is not necessary except when
@@ -5404,8 +5413,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
{
u32 base = (u32) mapping & 0xffffffff;
- return ((base > 0xffffdcc0) &&
- (base + len + 8 < base));
+ return (base > 0xffffdcc0) && (base + len + 8 < base);
}
/* Test for DMA addresses > 40-bit */
@@ -5414,7 +5422,7 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
{
#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
- return (((u64) mapping + len) > DMA_BIT_MASK(40));
+ return ((u64) mapping + len) > DMA_BIT_MASK(40);
return 0;
#else
return 0;
@@ -5574,9 +5582,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
goto out_unlock;
}
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ if (skb_is_gso_v6(skb)) {
hdrlen = skb_headlen(skb) - ETH_HLEN;
- else {
+ } else {
struct iphdr *iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
@@ -5798,7 +5806,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+ if (skb_is_gso_v6(skb)) {
hdr_len = skb_headlen(skb) - ETH_HLEN;
} else {
u32 ip_tcp_len;
@@ -6057,16 +6065,16 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
{
int i;
- if (tpr != &tp->prodring[0]) {
+ if (tpr != &tp->napi[0].prodring) {
for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
- i = (i + 1) % TG3_RX_RING_SIZE)
+ i = (i + 1) & tp->rx_std_ring_mask)
tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
for (i = tpr->rx_jmb_cons_idx;
i != tpr->rx_jmb_prod_idx;
- i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
+ i = (i + 1) & tp->rx_jmb_ring_mask) {
tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
TG3_RX_JMB_MAP_SZ);
}
@@ -6075,12 +6083,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
return;
}
- for (i = 0; i < TG3_RX_RING_SIZE; i++)
+ for (i = 0; i <= tp->rx_std_ring_mask; i++)
tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
- for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
+ for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
TG3_RX_JMB_MAP_SZ);
}
@@ -6103,16 +6111,17 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
tpr->rx_jmb_cons_idx = 0;
tpr->rx_jmb_prod_idx = 0;
- if (tpr != &tp->prodring[0]) {
- memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
+ if (tpr != &tp->napi[0].prodring) {
+ memset(&tpr->rx_std_buffers[0], 0,
+ TG3_RX_STD_BUFF_RING_SIZE(tp));
if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
memset(&tpr->rx_jmb_buffers[0], 0,
- TG3_RX_JMB_BUFF_RING_SIZE);
+ TG3_RX_JMB_BUFF_RING_SIZE(tp));
goto done;
}
/* Zero out all descriptors. */
- memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
+ memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
@@ -6124,7 +6133,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
* stuff once. This works because the card does not
* write into the rx buffer posting rings.
*/
- for (i = 0; i < TG3_RX_RING_SIZE; i++) {
+ for (i = 0; i <= tp->rx_std_ring_mask; i++) {
struct tg3_rx_buffer_desc *rxd;
rxd = &tpr->rx_std[i];
@@ -6151,12 +6160,12 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
goto done;
- memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
+ memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
goto done;
- for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
+ for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
struct tg3_rx_buffer_desc *rxd;
rxd = &tpr->rx_jmb[i].std;
@@ -6196,12 +6205,12 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
kfree(tpr->rx_jmb_buffers);
tpr->rx_jmb_buffers = NULL;
if (tpr->rx_std) {
- pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
+ pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
tpr->rx_std, tpr->rx_std_mapping);
tpr->rx_std = NULL;
}
if (tpr->rx_jmb) {
- pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
+ pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp),
tpr->rx_jmb, tpr->rx_jmb_mapping);
tpr->rx_jmb = NULL;
}
@@ -6210,23 +6219,24 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
static int tg3_rx_prodring_init(struct tg3 *tp,
struct tg3_rx_prodring_set *tpr)
{
- tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
+ tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
+ GFP_KERNEL);
if (!tpr->rx_std_buffers)
return -ENOMEM;
- tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
+ tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
&tpr->rx_std_mapping);
if (!tpr->rx_std)
goto err_out;
if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
- tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
+ tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
GFP_KERNEL);
if (!tpr->rx_jmb_buffers)
goto err_out;
tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
- TG3_RX_JUMBO_RING_BYTES,
+ TG3_RX_JMB_RING_BYTES(tp),
&tpr->rx_jmb_mapping);
if (!tpr->rx_jmb)
goto err_out;
@@ -6253,7 +6263,7 @@ static void tg3_free_rings(struct tg3 *tp)
for (j = 0; j < tp->irq_cnt; j++) {
struct tg3_napi *tnapi = &tp->napi[j];
- tg3_rx_prodring_free(tp, &tp->prodring[j]);
+ tg3_rx_prodring_free(tp, &tnapi->prodring);
if (!tnapi->tx_buffers)
continue;
@@ -6325,7 +6335,7 @@ static int tg3_init_rings(struct tg3 *tp)
if (tnapi->rx_rcb)
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
- if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
+ if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
tg3_free_rings(tp);
return -ENOMEM;
}
@@ -6361,6 +6371,8 @@ static void tg3_free_consistent(struct tg3 *tp)
tnapi->rx_rcb = NULL;
}
+ tg3_rx_prodring_fini(tp, &tnapi->prodring);
+
if (tnapi->hw_status) {
pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
tnapi->hw_status,
@@ -6374,9 +6386,6 @@ static void tg3_free_consistent(struct tg3 *tp)
tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
-
- for (i = 0; i < tp->irq_cnt; i++)
- tg3_rx_prodring_fini(tp, &tp->prodring[i]);
}
/*
@@ -6387,11 +6396,6 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{
int i;
- for (i = 0; i < tp->irq_cnt; i++) {
- if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
- goto err_out;
- }
-
tp->hw_stats = pci_alloc_consistent(tp->pdev,
sizeof(struct tg3_hw_stats),
&tp->stats_mapping);
@@ -6413,6 +6417,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
sblk = tnapi->hw_status;
+ if (tg3_rx_prodring_init(tp, &tnapi->prodring))
+ goto err_out;
+
/* If multivector TSS is enabled, vector 0 does not handle
* tx interrupts. Don't allocate any resources for it.
*/
@@ -6452,8 +6459,6 @@ static int tg3_alloc_consistent(struct tg3 *tp)
break;
}
- tnapi->prodring = &tp->prodring[i];
-
/*
* If multivector RSS is enabled, vector 0 does not handle
* rx or tx interrupts. Don't allocate any resources for it.
@@ -6596,6 +6601,10 @@ static void tg3_ape_send_event(struct tg3 *tp, u32 event)
int i;
u32 apedata;
+ /* NCSI does not support APE events */
+ if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
+ return;
+
apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
if (apedata != APE_SEG_SIG_MAGIC)
return;
@@ -6647,6 +6656,8 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
APE_HOST_BEHAV_NO_PHYLOCK);
+ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
+ TG3_APE_HOST_DRVR_STATE_START);
event = APE_EVENT_STATUS_STATE_START;
break;
@@ -6658,6 +6669,16 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
*/
tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+ if (device_may_wakeup(&tp->pdev->dev) &&
+ (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
+ tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
+ TG3_APE_HOST_WOL_SPEED_AUTO);
+ apedata = TG3_APE_HOST_DRVR_STATE_WOL;
+ } else
+ apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
+
+ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
+
event = APE_EVENT_STATUS_STATE_UNLOAD;
break;
case RESET_KIND_SUSPEND:
@@ -7548,7 +7569,7 @@ static void tg3_rings_reset(struct tg3 *tp)
/* Zero mailbox registers. */
if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
- for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
+ for (i = 1; i < tp->irq_max; i++) {
tp->napi[i].tx_prod = 0;
tp->napi[i].tx_cons = 0;
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -7594,8 +7615,8 @@ static void tg3_rings_reset(struct tg3 *tp)
if (tnapi->rx_rcb) {
tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
- (TG3_RX_RCB_RING_SIZE(tp) <<
- BDINFO_FLAGS_MAXLEN_SHIFT), 0);
+ (tp->rx_ret_ring_mask + 1) <<
+ BDINFO_FLAGS_MAXLEN_SHIFT, 0);
rxrcb += TG3_BDINFO_SIZE;
}
@@ -7618,7 +7639,7 @@ static void tg3_rings_reset(struct tg3 *tp)
}
tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
- (TG3_RX_RCB_RING_SIZE(tp) <<
+ ((tp->rx_ret_ring_mask + 1) <<
BDINFO_FLAGS_MAXLEN_SHIFT), 0);
stblk += 8;
@@ -7631,7 +7652,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
{
u32 val, rdmac_mode;
int i, err, limit;
- struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+ struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
tg3_disable_ints(tp);
@@ -7845,7 +7866,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(BUFMGR_DMA_HIGH_WATER,
tp->bufmgr_config.dma_high_water);
- tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
+ val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ val |= BUFMGR_MODE_NO_TX_UNDERRUN;
+ tw32(BUFMGR_MODE, val);
for (i = 0; i < 2000; i++) {
if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
break;
@@ -7928,10 +7952,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
BDINFO_FLAGS_DISABLED);
}
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
- val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
- (TG3_RX_STD_DMA_SZ << 2);
- else
+ if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ val = RX_STD_MAX_SIZE_5705;
+ else
+ val = RX_STD_MAX_SIZE_5717;
+ val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
+ val |= (TG3_RX_STD_DMA_SZ << 2);
+ } else
val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
} else
val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
@@ -8015,6 +8043,23 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
+ val = tr32(TG3_RDMA_RSRVCTRL_REG);
+ tw32(TG3_RDMA_RSRVCTRL_REG,
+ val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
+ tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
+ TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
+ TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
+ }
+
/* Receive/send statistics. */
if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
val = tr32(RCVLPC_STATS_ENABLE);
@@ -8197,7 +8242,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
- tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
+ val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ val |= RCVDBDI_MODE_LRG_RING_SZ;
+ tw32(RCVDBDI_MODE, val);
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
@@ -8816,16 +8865,14 @@ static bool tg3_enable_msix(struct tg3 *tp)
for (i = 0; i < tp->irq_max; i++)
tp->napi[i].irq_vec = msix_ent[i].vector;
- tp->dev->real_num_tx_queues = 1;
- if (tp->irq_cnt > 1) {
- tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
-
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
- tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
- tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
- }
+ netif_set_real_num_tx_queues(tp->dev, 1);
+ rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
+ if (netif_set_real_num_rx_queues(tp->dev, rc)) {
+ pci_disable_msix(tp->pdev);
+ return false;
}
+ if (tp->irq_cnt > 1)
+ tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
return true;
}
@@ -8858,7 +8905,8 @@ defcfg:
if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
tp->irq_cnt = 1;
tp->napi[0].irq_vec = tp->pdev->irq;
- tp->dev->real_num_tx_queues = 1;
+ netif_set_real_num_tx_queues(tp->dev, 1);
+ netif_set_real_num_rx_queues(tp->dev, 1);
}
}
@@ -8917,6 +8965,8 @@ static int tg3_open(struct net_device *dev)
if (err)
goto err_out1;
+ tg3_napi_init(tp);
+
tg3_napi_enable(tp);
for (i = 0; i < tp->irq_cnt; i++) {
@@ -9004,6 +9054,7 @@ err_out3:
err_out2:
tg3_napi_disable(tp);
+ tg3_napi_fini(tp);
tg3_free_consistent(tp);
err_out1:
@@ -9051,6 +9102,8 @@ static int tg3_close(struct net_device *dev)
memcpy(&tp->estats_prev, tg3_get_estats(tp),
sizeof(tp->estats_prev));
+ tg3_napi_fini(tp);
+
tg3_free_consistent(tp);
tg3_set_power_state(tp, PCI_D3hot);
@@ -9820,10 +9873,10 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
{
struct tg3 *tp = netdev_priv(dev);
- ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
+ ering->rx_max_pending = tp->rx_std_ring_mask;
ering->rx_mini_max_pending = 0;
if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
- ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
+ ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
else
ering->rx_jumbo_max_pending = 0;
@@ -9844,8 +9897,8 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
struct tg3 *tp = netdev_priv(dev);
int i, irq_sync = 0, err = 0;
- if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
- (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
+ if ((ering->rx_pending > tp->rx_std_ring_mask) ||
+ (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
(ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
(ering->tx_pending <= MAX_SKB_FRAGS) ||
((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
@@ -9867,7 +9920,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
tp->rx_pending = 63;
tp->rx_jumbo_pending = ering->rx_jumbo_pending;
- for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
+ for (i = 0; i < tp->irq_max; i++)
tp->napi[i].tx_pending = ering->tx_pending;
if (netif_running(dev)) {
@@ -9915,8 +9968,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (!(phydev->supported & SUPPORTED_Pause) ||
(!(phydev->supported & SUPPORTED_Asym_Pause) &&
- ((epause->rx_pause && !epause->tx_pause) ||
- (!epause->rx_pause && epause->tx_pause))))
+ (epause->rx_pause != epause->tx_pause)))
return -EINVAL;
tp->link_config.flowctrl = 0;
@@ -10608,12 +10660,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
int num_pkts, tx_len, rx_len, i, err;
struct tg3_rx_buffer_desc *desc;
struct tg3_napi *tnapi, *rnapi;
- struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+ struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
tnapi = &tp->napi[0];
rnapi = &tp->napi[0];
if (tp->irq_cnt > 1) {
- rnapi = &tp->napi[1];
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
+ rnapi = &tp->napi[1];
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
tnapi = &tp->napi[1];
}
@@ -12401,14 +12454,18 @@ skip_phy_reset:
static void __devinit tg3_read_vpd(struct tg3 *tp)
{
- u8 vpd_data[TG3_NVM_VPD_LEN];
+ u8 *vpd_data;
unsigned int block_end, rosize, len;
int j, i = 0;
u32 magic;
if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
tg3_nvram_read(tp, 0x0, &magic))
- goto out_not_found;
+ goto out_no_vpd;
+
+ vpd_data = kmalloc(TG3_NVM_VPD_LEN, GFP_KERNEL);
+ if (!vpd_data)
+ goto out_no_vpd;
if (magic == TG3_EEPROM_MAGIC) {
for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
@@ -12492,43 +12549,51 @@ partno:
memcpy(tp->board_part_number, &vpd_data[i], len);
- return;
-
out_not_found:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ kfree(vpd_data);
+ if (tp->board_part_number[0])
+ return;
+
+out_no_vpd:
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
+ strcpy(tp->board_part_number, "BCM5717");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
+ strcpy(tp->board_part_number, "BCM5718");
+ else
+ goto nomatch;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
+ strcpy(tp->board_part_number, "BCM57780");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
+ strcpy(tp->board_part_number, "BCM57760");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
+ strcpy(tp->board_part_number, "BCM57790");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
+ strcpy(tp->board_part_number, "BCM57788");
+ else
+ goto nomatch;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
+ strcpy(tp->board_part_number, "BCM57761");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
+ strcpy(tp->board_part_number, "BCM57765");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
+ strcpy(tp->board_part_number, "BCM57781");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
+ strcpy(tp->board_part_number, "BCM57785");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
+ strcpy(tp->board_part_number, "BCM57791");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+ strcpy(tp->board_part_number, "BCM57795");
+ else
+ goto nomatch;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
strcpy(tp->board_part_number, "BCM95906");
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
- strcpy(tp->board_part_number, "BCM57780");
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
- strcpy(tp->board_part_number, "BCM57760");
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
- strcpy(tp->board_part_number, "BCM57790");
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
- strcpy(tp->board_part_number, "BCM57788");
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
- strcpy(tp->board_part_number, "BCM57761");
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
- strcpy(tp->board_part_number, "BCM57765");
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
- strcpy(tp->board_part_number, "BCM57781");
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
- strcpy(tp->board_part_number, "BCM57785");
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
- strcpy(tp->board_part_number, "BCM57791");
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
- strcpy(tp->board_part_number, "BCM57795");
- else
+ } else {
+nomatch:
strcpy(tp->board_part_number, "none");
+ }
}
static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
@@ -12736,10 +12801,12 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
- if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
+ if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
+ tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
fwtype = "NCSI";
- else
+ } else {
fwtype = "DASH";
+ }
vlen = strlen(tp->fw_ver);
@@ -12795,6 +12862,18 @@ static void inline vlan_features_add(struct net_device *dev, unsigned long flags
#endif
}
+static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
+{
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ return 4096;
+ else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
+ !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
+ return 1024;
+ else
+ return 512;
+}
+
static int __devinit tg3_get_invariants(struct tg3 *tp)
{
static struct pci_device_id write_reorder_chipsets[] = {
@@ -12839,7 +12918,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
pci_read_config_dword(tp->pdev,
TG3PCI_GEN2_PRODID_ASICREV,
@@ -13410,10 +13488,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (err)
return err;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
- return -ENOTSUPP;
-
/* Initialize data/descriptor byte/word swapping. */
val = tr32(GRC_MODE);
val &= GRC_MODE_HOST_STACKUP;
@@ -13553,7 +13627,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
#endif
}
- tp->rx_std_max_post = TG3_RX_RING_SIZE;
+ tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
+ tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
+ tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
+
+ tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
/* Increment the rx prod index on the rx std ring by at most
* 8 for these chips to workaround hw errata.
@@ -14442,7 +14520,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
}
if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
- tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
dev->netdev_ops = &tg3_netdev_ops;
else
@@ -14581,7 +14659,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
- for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
+ for (i = 0; i < tp->irq_max; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
tnapi->tp = tp;
@@ -14596,13 +14674,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tnapi->consmbox = rcvmbx;
tnapi->prodmbox = sndmbx;
- if (i) {
+ if (i)
tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
- netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
- } else {
+ else
tnapi->coal_now = HOSTCC_MODE_NOW;
- netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
- }
if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
break;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4937bd19096..f6b709a3ca3 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -26,6 +26,7 @@
#define TG3_RX_INTERNAL_RING_SZ_5906 32
#define RX_STD_MAX_SIZE_5705 512
+#define RX_STD_MAX_SIZE_5717 2048
#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */
/* First 256 bytes are a mirror of PCI config space. */
@@ -46,7 +47,6 @@
#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */
#define TG3PCI_DEVICE_TIGON3_5717 0x1655
#define TG3PCI_DEVICE_TIGON3_5718 0x1656
-#define TG3PCI_DEVICE_TIGON3_5724 0x165c
#define TG3PCI_DEVICE_TIGON3_57781 0x16b1
#define TG3PCI_DEVICE_TIGON3_57785 0x16b5
#define TG3PCI_DEVICE_TIGON3_57761 0x16b0
@@ -973,6 +973,7 @@
#define RCVDBDI_MODE_JUMBOBD_NEEDED 0x00000004
#define RCVDBDI_MODE_FRM_TOO_BIG 0x00000008
#define RCVDBDI_MODE_INV_RING_SZ 0x00000010
+#define RCVDBDI_MODE_LRG_RING_SZ 0x00010000
#define RCVDBDI_STATUS 0x00002404
#define RCVDBDI_STATUS_JUMBOBD_NEEDED 0x00000004
#define RCVDBDI_STATUS_FRM_TOO_BIG 0x00000008
@@ -1225,6 +1226,7 @@
#define BUFMGR_MODE_ATTN_ENABLE 0x00000004
#define BUFMGR_MODE_BM_TEST 0x00000008
#define BUFMGR_MODE_MBLOW_ATTN_ENAB 0x00000010
+#define BUFMGR_MODE_NO_TX_UNDERRUN 0x80000000
#define BUFMGR_STATUS 0x00004404
#define BUFMGR_STATUS_ERROR 0x00000004
#define BUFMGR_STATUS_MBLOW 0x00000010
@@ -1302,7 +1304,16 @@
#define RDMAC_STATUS_FIFOURUN 0x00000080
#define RDMAC_STATUS_FIFOOREAD 0x00000100
#define RDMAC_STATUS_LNGREAD 0x00000200
-/* 0x4808 --> 0x4c00 unused */
+/* 0x4808 --> 0x4900 unused */
+
+#define TG3_RDMA_RSRVCTRL_REG 0x00004900
+#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
+/* 0x4904 --> 0x4910 unused */
+
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000
+/* 0x4914 --> 0x4c00 unused */
/* Write DMA control registers */
#define WDMAC_MODE 0x00004c00
@@ -2176,7 +2187,7 @@
#define TG3_APE_HOST_SEG_SIG 0x4200
#define APE_HOST_SEG_SIG_MAGIC 0x484f5354
#define TG3_APE_HOST_SEG_LEN 0x4204
-#define APE_HOST_SEG_LEN_MAGIC 0x0000001c
+#define APE_HOST_SEG_LEN_MAGIC 0x00000020
#define TG3_APE_HOST_INIT_COUNT 0x4208
#define TG3_APE_HOST_DRIVER_ID 0x420c
#define APE_HOST_DRIVER_ID_LINUX 0xf0000000
@@ -2188,6 +2199,12 @@
#define APE_HOST_HEARTBEAT_INT_DISABLE 0
#define APE_HOST_HEARTBEAT_INT_5SEC 5000
#define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218
+#define TG3_APE_HOST_DRVR_STATE 0x421c
+#define TG3_APE_HOST_DRVR_STATE_START 0x00000001
+#define TG3_APE_HOST_DRVR_STATE_UNLOAD 0x00000002
+#define TG3_APE_HOST_DRVR_STATE_WOL 0x00000003
+#define TG3_APE_HOST_WOL_SPEED 0x4224
+#define TG3_APE_HOST_WOL_SPEED_AUTO 0x00008000
#define TG3_APE_EVENT_STATUS 0x4300
@@ -2649,7 +2666,8 @@ struct tg3_rx_prodring_set {
dma_addr_t rx_jmb_mapping;
};
-#define TG3_IRQ_MAX_VECS 5
+#define TG3_IRQ_MAX_VECS_RSS 5
+#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS
struct tg3_napi {
struct napi_struct napi ____cacheline_aligned;
@@ -2668,7 +2686,7 @@ struct tg3_napi {
u32 consmbox;
u32 rx_rcb_ptr;
u16 *rx_rcb_prod_idx;
- struct tg3_rx_prodring_set *prodring;
+ struct tg3_rx_prodring_set prodring;
struct tg3_rx_buffer_desc *rx_rcb;
struct tg3_tx_buffer_desc *tx_ring;
@@ -2746,6 +2764,9 @@ struct tg3 {
void (*write32_rx_mbox) (struct tg3 *, u32,
u32);
u32 rx_copy_thresh;
+ u32 rx_std_ring_mask;
+ u32 rx_jmb_ring_mask;
+ u32 rx_ret_ring_mask;
u32 rx_pending;
u32 rx_jumbo_pending;
u32 rx_std_max_post;
@@ -2755,8 +2776,6 @@ struct tg3 {
struct vlan_group *vlgrp;
#endif
- struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS];
-
/* begin "everything else" cacheline(s) section */
struct rtnl_link_stats64 net_stats;
@@ -2850,6 +2869,7 @@ struct tg3 {
#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
#define TG3_FLG3_L1PLLPD_EN 0x00800000
#define TG3_FLG3_5717_PLUS 0x01000000
+#define TG3_FLG3_APE_HAS_NCSI 0x02000000
struct timer_list timer;
u16 timer_counter;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index ccee3eddc5f..ec8c804a795 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -393,7 +393,7 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
spin_unlock_irqrestore(&priv->lock, flags);
return;
}
- priv->timer.function = &TLan_Timer;
+ priv->timer.function = TLan_Timer;
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1453,7 +1453,7 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
TLan_DioWrite8( dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
if ( priv->timer.function == NULL ) {
- priv->timer.function = &TLan_Timer;
+ priv->timer.function = TLan_Timer;
priv->timer.data = (unsigned long) dev;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timerSetAt = jiffies;
@@ -1601,7 +1601,7 @@ drop_and_reuse:
TLan_DioWrite8( dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
if ( priv->timer.function == NULL ) {
- priv->timer.function = &TLan_Timer;
+ priv->timer.function = TLan_Timer;
priv->timer.data = (unsigned long) dev;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timerSetAt = jiffies;
@@ -1897,7 +1897,7 @@ static void TLan_Timer( unsigned long data )
TLan_DioWrite8( dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK );
} else {
- priv->timer.function = &TLan_Timer;
+ priv->timer.function = TLan_Timer;
priv->timer.expires = priv->timerSetAt
+ TLAN_TIMER_ACT_DELAY;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -3187,7 +3187,7 @@ static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
}
- return ( err );
+ return err;
} /* TLan_EeSendByte */
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index d13ff12d750..3315ced774e 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -442,7 +442,7 @@ typedef struct tlan_private_tag {
static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
- return (inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3)));
+ return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
} /* TLan_DioRead8 */
@@ -452,7 +452,7 @@ static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
- return (inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2)));
+ return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
} /* TLan_DioRead16 */
@@ -462,7 +462,7 @@ static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
- return (inl(base_addr + TLAN_DIO_DATA));
+ return inl(base_addr + TLAN_DIO_DATA);
} /* TLan_DioRead32 */
@@ -537,6 +537,6 @@ static inline u32 TLan_HashFunc( const u8 *a )
hash ^= ((a[2]^a[5])<<4); /* & 060 */
hash ^= ((a[2]^a[5])>>2); /* & 077 */
- return (hash & 077);
+ return hash & 077;
}
#endif
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index 16e8783ee9c..8d362e64a40 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -110,7 +110,7 @@ static int __init proteon_probe1(struct net_device *dev, int ioaddr)
}
dev->base_addr = ioaddr;
- return (0);
+ return 0;
nodev:
release_region(ioaddr, PROTEON_IO_EXTENT);
return -ENODEV;
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 0929fff5982..63db5a6762a 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -435,7 +435,7 @@ static int smctr_alloc_shared_memory(struct net_device *dev)
RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]);
tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
- return (0);
+ return 0;
}
/* Enter Bypass state. */
@@ -448,7 +448,7 @@ static int smctr_bypass_state(struct net_device *dev)
err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_BYPASS_STATE);
- return (err);
+ return err;
}
static int smctr_checksum_firmware(struct net_device *dev)
@@ -471,9 +471,9 @@ static int smctr_checksum_firmware(struct net_device *dev)
smctr_disable_adapter_ctrl_store(dev);
if(checksum)
- return (checksum);
+ return checksum;
- return (0);
+ return 0;
}
static int __init smctr_chk_mca(struct net_device *dev)
@@ -485,7 +485,7 @@ static int __init smctr_chk_mca(struct net_device *dev)
current_slot = mca_find_unused_adapter(smctr_posid, 0);
if(current_slot == MCA_NOTFOUND)
- return (-ENODEV);
+ return -ENODEV;
mca_set_adapter_name(current_slot, smctr_name);
mca_mark_as_used(current_slot);
@@ -622,9 +622,9 @@ static int __init smctr_chk_mca(struct net_device *dev)
break;
}
- return (0);
+ return 0;
#else
- return (-1);
+ return -1;
#endif /* CONFIG_MCA_LEGACY */
}
@@ -677,18 +677,18 @@ static int smctr_chg_rx_mask(struct net_device *dev)
if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0,
&tp->config_word0)))
{
- return (err);
+ return err;
}
if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1,
&tp->config_word1)))
{
- return (err);
+ return err;
}
smctr_disable_16bit(dev);
- return (0);
+ return 0;
}
static int smctr_clear_int(struct net_device *dev)
@@ -697,7 +697,7 @@ static int smctr_clear_int(struct net_device *dev)
outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR);
- return (0);
+ return 0;
}
static int smctr_clear_trc_reset(int ioaddr)
@@ -707,7 +707,7 @@ static int smctr_clear_trc_reset(int ioaddr)
r = inb(ioaddr + MSR);
outb(~MSR_RST & r, ioaddr + MSR);
- return (0);
+ return 0;
}
/*
@@ -725,7 +725,7 @@ static int smctr_close(struct net_device *dev)
/* Check to see if adapter is already in a closed state. */
if(tp->status != OPEN)
- return (0);
+ return 0;
smctr_enable_16bit(dev);
smctr_set_page(dev, (__u8 *)tp->ram_access);
@@ -733,7 +733,7 @@ static int smctr_close(struct net_device *dev)
if((err = smctr_issue_remove_cmd(dev)))
{
smctr_disable_16bit(dev);
- return (err);
+ return err;
}
for(;;)
@@ -746,7 +746,7 @@ static int smctr_close(struct net_device *dev)
}
- return (0);
+ return 0;
}
static int smctr_decode_firmware(struct net_device *dev,
@@ -807,12 +807,12 @@ static int smctr_decode_firmware(struct net_device *dev,
if(buff)
*(mem++) = SWAP_BYTES(buff);
- return (0);
+ return 0;
}
static int smctr_disable_16bit(struct net_device *dev)
{
- return (0);
+ return 0;
}
/*
@@ -832,7 +832,7 @@ static int smctr_disable_adapter_ctrl_store(struct net_device *dev)
tp->trc_mask |= CSR_WCSS;
outb(tp->trc_mask, ioaddr + CSR);
- return (0);
+ return 0;
}
static int smctr_disable_bic_int(struct net_device *dev)
@@ -844,7 +844,7 @@ static int smctr_disable_bic_int(struct net_device *dev)
| CSR_MSKTINT | CSR_WCSS;
outb(tp->trc_mask, ioaddr + CSR);
- return (0);
+ return 0;
}
static int smctr_enable_16bit(struct net_device *dev)
@@ -858,7 +858,7 @@ static int smctr_enable_16bit(struct net_device *dev)
outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR);
}
- return (0);
+ return 0;
}
/*
@@ -881,7 +881,7 @@ static int smctr_enable_adapter_ctrl_store(struct net_device *dev)
tp->trc_mask &= ~CSR_WCSS;
outb(tp->trc_mask, ioaddr + CSR);
- return (0);
+ return 0;
}
static int smctr_enable_adapter_ram(struct net_device *dev)
@@ -895,7 +895,7 @@ static int smctr_enable_adapter_ram(struct net_device *dev)
r = inb(ioaddr + MSR);
outb(MSR_MEMB | r, ioaddr + MSR);
- return (0);
+ return 0;
}
static int smctr_enable_bic_int(struct net_device *dev)
@@ -921,7 +921,7 @@ static int smctr_enable_bic_int(struct net_device *dev)
break;
}
- return (0);
+ return 0;
}
static int __init smctr_chk_isa(struct net_device *dev)
@@ -1145,7 +1145,7 @@ static int __init smctr_chk_isa(struct net_device *dev)
*/
}
- return (0);
+ return 0;
out2:
release_region(ioaddr, SMCTR_IO_EXTENT);
@@ -1199,7 +1199,7 @@ static int __init smctr_get_boardid(struct net_device *dev, int mca)
* return;
*/
if(IdByte & 0xF8)
- return (-1);
+ return -1;
r1 = inb(ioaddr + BID_REG_1);
r1 &= BID_ICR_MASK;
@@ -1250,21 +1250,21 @@ static int __init smctr_get_boardid(struct net_device *dev, int mca)
while(r1 & BID_RECALL_DONE_MASK)
r1 = inb(ioaddr + BID_REG_1);
- return (BoardIdMask);
+ return BoardIdMask;
}
static int smctr_get_group_address(struct net_device *dev)
{
smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR);
- return(smctr_wait_cmd(dev));
+ return smctr_wait_cmd(dev);
}
static int smctr_get_functional_address(struct net_device *dev)
{
smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR);
- return(smctr_wait_cmd(dev));
+ return smctr_wait_cmd(dev);
}
/* Calculate number of Non-MAC receive BDB's and data buffers.
@@ -1346,14 +1346,14 @@ static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev)
*/
mem_used += 0x100;
- return((0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock)));
+ return (0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock));
}
static int smctr_get_physical_drop_number(struct net_device *dev)
{
smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER);
- return(smctr_wait_cmd(dev));
+ return smctr_wait_cmd(dev);
}
static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue)
@@ -1366,14 +1366,14 @@ static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue)
tp->rx_fcb_curr[queue]->bdb_ptr = bdb;
- return ((__u8 *)bdb->data_block_ptr);
+ return (__u8 *)bdb->data_block_ptr;
}
static int smctr_get_station_id(struct net_device *dev)
{
smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS);
- return(smctr_wait_cmd(dev));
+ return smctr_wait_cmd(dev);
}
/*
@@ -1384,7 +1384,7 @@ static struct net_device_stats *smctr_get_stats(struct net_device *dev)
{
struct net_local *tp = netdev_priv(dev);
- return ((struct net_device_stats *)&tp->MacStat);
+ return (struct net_device_stats *)&tp->MacStat;
}
static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
@@ -1401,14 +1401,14 @@ static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
/* check if there is enough FCB blocks */
if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue])
- return ((FCBlock *)(-1L));
+ return (FCBlock *)(-1L);
/* round off the input pkt size to the nearest even number */
alloc_size = (bytes_count + 1) & 0xfffe;
/* check if enough mem */
if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue])
- return ((FCBlock *)(-1L));
+ return (FCBlock *)(-1L);
/* check if past the end ;
* if exactly enough mem to end of ring, alloc from front.
@@ -1425,7 +1425,7 @@ static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
if((tp->tx_buff_used[queue] + alloc_size)
> tp->tx_buff_size[queue])
{
- return ((FCBlock *)(-1L));
+ return (FCBlock *)(-1L);
}
/* ring wrap */
@@ -1448,14 +1448,14 @@ static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
pFCB = tp->tx_fcb_curr[queue];
tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr;
- return (pFCB);
+ return pFCB;
}
static int smctr_get_upstream_neighbor_addr(struct net_device *dev)
{
smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS);
- return(smctr_wait_cmd(dev));
+ return smctr_wait_cmd(dev);
}
static int smctr_hardware_send_packet(struct net_device *dev,
@@ -1469,21 +1469,22 @@ static int smctr_hardware_send_packet(struct net_device *dev,
printk(KERN_DEBUG"%s: smctr_hardware_send_packet\n", dev->name);
if(tp->status != OPEN)
- return (-1);
+ return -1;
if(tp->monitor_state_ready != 1)
- return (-1);
+ return -1;
for(;;)
{
/* Send first buffer from queue */
skb = skb_dequeue(&tp->SendSkbQueue);
if(skb == NULL)
- return (-1);
+ return -1;
tp->QueueSkb++;
- if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size) return (-1);
+ if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size)
+ return -1;
smctr_enable_16bit(dev);
smctr_set_page(dev, (__u8 *)tp->ram_access);
@@ -1492,7 +1493,7 @@ static int smctr_hardware_send_packet(struct net_device *dev,
== (FCBlock *)(-1L))
{
smctr_disable_16bit(dev);
- return (-1);
+ return -1;
}
smctr_tx_move_frame(dev, skb,
@@ -1508,7 +1509,7 @@ static int smctr_hardware_send_packet(struct net_device *dev,
smctr_disable_16bit(dev);
}
- return (0);
+ return 0;
}
static int smctr_init_acbs(struct net_device *dev)
@@ -1552,7 +1553,7 @@ static int smctr_init_acbs(struct net_device *dev)
tp->acb_curr = tp->acb_head->next_ptr;
tp->num_acbs_used = 0;
- return (0);
+ return 0;
}
static int smctr_init_adapter(struct net_device *dev)
@@ -1590,13 +1591,14 @@ static int smctr_init_adapter(struct net_device *dev)
if(smctr_checksum_firmware(dev))
{
- printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name); return (-ENOENT);
+ printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name);
+ return -ENOENT;
}
if((err = smctr_ram_memory_test(dev)))
{
printk(KERN_ERR "%s: RAM memory test failed.\n", dev->name);
- return (-EIO);
+ return -EIO;
}
smctr_set_rx_look_ahead(dev);
@@ -1608,7 +1610,7 @@ static int smctr_init_adapter(struct net_device *dev)
{
printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
dev->name, err);
- return (-EINVAL);
+ return -EINVAL;
}
/* This routine clobbers the TRC's internal registers. */
@@ -1616,7 +1618,7 @@ static int smctr_init_adapter(struct net_device *dev)
{
printk(KERN_ERR "%s: Card failed internal self test (%d)\n",
dev->name, err);
- return (-EINVAL);
+ return -EINVAL;
}
/* Re-Initialize adapter's internal registers */
@@ -1625,17 +1627,17 @@ static int smctr_init_adapter(struct net_device *dev)
{
printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
dev->name, err);
- return (-EINVAL);
+ return -EINVAL;
}
smctr_enable_bic_int(dev);
if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
- return (err);
+ return err;
smctr_disable_16bit(dev);
- return (0);
+ return 0;
}
static int smctr_init_card_real(struct net_device *dev)
@@ -1703,15 +1705,15 @@ static int smctr_init_card_real(struct net_device *dev)
smctr_init_shared_memory(dev);
if((err = smctr_issue_init_timers_cmd(dev)))
- return (err);
+ return err;
if((err = smctr_issue_init_txrx_cmd(dev)))
{
printk(KERN_ERR "%s: Hardware failure\n", dev->name);
- return (err);
+ return err;
}
- return (0);
+ return 0;
}
static int smctr_init_rx_bdbs(struct net_device *dev)
@@ -1763,7 +1765,7 @@ static int smctr_init_rx_bdbs(struct net_device *dev)
tp->rx_bdb_curr[i] = tp->rx_bdb_head[i]->next_ptr;
}
- return (0);
+ return 0;
}
static int smctr_init_rx_fcbs(struct net_device *dev)
@@ -1813,7 +1815,7 @@ static int smctr_init_rx_fcbs(struct net_device *dev)
tp->rx_fcb_curr[i] = tp->rx_fcb_head[i]->next_ptr;
}
- return(0);
+ return 0;
}
static int smctr_init_shared_memory(struct net_device *dev)
@@ -1871,7 +1873,7 @@ static int smctr_init_shared_memory(struct net_device *dev)
smctr_init_rx_bdbs(dev);
smctr_init_rx_fcbs(dev);
- return (0);
+ return 0;
}
static int smctr_init_tx_bdbs(struct net_device *dev)
@@ -1901,7 +1903,7 @@ static int smctr_init_tx_bdbs(struct net_device *dev)
tp->tx_bdb_head[i]->back_ptr = bdb;
}
- return (0);
+ return 0;
}
static int smctr_init_tx_fcbs(struct net_device *dev)
@@ -1940,7 +1942,7 @@ static int smctr_init_tx_fcbs(struct net_device *dev)
tp->num_tx_fcbs_used[i] = 0;
}
- return (0);
+ return 0;
}
static int smctr_internal_self_test(struct net_device *dev)
@@ -1949,33 +1951,33 @@ static int smctr_internal_self_test(struct net_device *dev)
int err;
if((err = smctr_issue_test_internal_rom_cmd(dev)))
- return (err);
+ return err;
if((err = smctr_wait_cmd(dev)))
- return (err);
+ return err;
if(tp->acb_head->cmd_done_status & 0xff)
- return (-1);
+ return -1;
if((err = smctr_issue_test_hic_cmd(dev)))
- return (err);
+ return err;
if((err = smctr_wait_cmd(dev)))
- return (err);
+ return err;
if(tp->acb_head->cmd_done_status & 0xff)
- return (-1);
+ return -1;
if((err = smctr_issue_test_mac_reg_cmd(dev)))
- return (err);
+ return err;
if((err = smctr_wait_cmd(dev)))
- return (err);
+ return err;
if(tp->acb_head->cmd_done_status & 0xff)
- return (-1);
+ return -1;
- return (0);
+ return 0;
}
/*
@@ -2468,14 +2470,14 @@ static int smctr_issue_enable_int_cmd(struct net_device *dev,
int err;
if((err = smctr_wait_while_cbusy(dev)))
- return (err);
+ return err;
tp->sclb_ptr->int_mask_control = interrupt_enable_mask;
tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK;
smctr_set_ctrl_attention(dev);
- return (0);
+ return 0;
}
static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ibits)
@@ -2483,7 +2485,7 @@ static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ib
struct net_local *tp = netdev_priv(dev);
if(smctr_wait_while_cbusy(dev))
- return (-1);
+ return -1;
tp->sclb_ptr->int_mask_control = ibits;
tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */ tp->sclb_ptr->resume_control = 0;
@@ -2491,7 +2493,7 @@ static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ib
smctr_set_ctrl_attention(dev);
- return (0);
+ return 0;
}
static int smctr_issue_init_timers_cmd(struct net_device *dev)
@@ -2502,10 +2504,10 @@ static int smctr_issue_init_timers_cmd(struct net_device *dev)
__u16 *pTimer_Struc = (__u16 *)tp->misc_command_data;
if((err = smctr_wait_while_cbusy(dev)))
- return (err);
+ return err;
if((err = smctr_wait_cmd(dev)))
- return (err);
+ return err;
tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE;
tp->config_word1 = 0;
@@ -2648,7 +2650,7 @@ static int smctr_issue_init_timers_cmd(struct net_device *dev)
err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0);
- return (err);
+ return err;
}
static int smctr_issue_init_txrx_cmd(struct net_device *dev)
@@ -2659,12 +2661,12 @@ static int smctr_issue_init_txrx_cmd(struct net_device *dev)
void **txrx_ptrs = (void *)tp->misc_command_data;
if((err = smctr_wait_while_cbusy(dev)))
- return (err);
+ return err;
if((err = smctr_wait_cmd(dev)))
{
printk(KERN_ERR "%s: Hardware failure\n", dev->name);
- return (err);
+ return err;
}
/* Initialize Transmit Queue Pointers that are used, to point to
@@ -2695,7 +2697,7 @@ static int smctr_issue_init_txrx_cmd(struct net_device *dev)
err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0);
- return (err);
+ return err;
}
static int smctr_issue_insert_cmd(struct net_device *dev)
@@ -2704,7 +2706,7 @@ static int smctr_issue_insert_cmd(struct net_device *dev)
err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP);
- return (err);
+ return err;
}
static int smctr_issue_read_ring_status_cmd(struct net_device *dev)
@@ -2712,15 +2714,15 @@ static int smctr_issue_read_ring_status_cmd(struct net_device *dev)
int err;
if((err = smctr_wait_while_cbusy(dev)))
- return (err);
+ return err;
if((err = smctr_wait_cmd(dev)))
- return (err);
+ return err;
err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS,
RW_TRC_STATUS_BLOCK);
- return (err);
+ return err;
}
static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt)
@@ -2728,15 +2730,15 @@ static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt)
int err;
if((err = smctr_wait_while_cbusy(dev)))
- return (err);
+ return err;
if((err = smctr_wait_cmd(dev)))
- return (err);
+ return err;
err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE,
aword_cnt);
- return (err);
+ return err;
}
static int smctr_issue_remove_cmd(struct net_device *dev)
@@ -2745,14 +2747,14 @@ static int smctr_issue_remove_cmd(struct net_device *dev)
int err;
if((err = smctr_wait_while_cbusy(dev)))
- return (err);
+ return err;
tp->sclb_ptr->resume_control = 0;
tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_REMOVE;
smctr_set_ctrl_attention(dev);
- return (0);
+ return 0;
}
static int smctr_issue_resume_acb_cmd(struct net_device *dev)
@@ -2761,7 +2763,7 @@ static int smctr_issue_resume_acb_cmd(struct net_device *dev)
int err;
if((err = smctr_wait_while_cbusy(dev)))
- return (err);
+ return err;
tp->sclb_ptr->resume_control = SCLB_RC_ACB;
tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
@@ -2770,7 +2772,7 @@ static int smctr_issue_resume_acb_cmd(struct net_device *dev)
smctr_set_ctrl_attention(dev);
- return (0);
+ return 0;
}
static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
@@ -2779,7 +2781,7 @@ static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
int err;
if((err = smctr_wait_while_cbusy(dev)))
- return (err);
+ return err;
if(queue == MAC_QUEUE)
tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB;
@@ -2790,7 +2792,7 @@ static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
smctr_set_ctrl_attention(dev);
- return (0);
+ return 0;
}
static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
@@ -2801,7 +2803,7 @@ static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
printk(KERN_DEBUG "%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name);
if(smctr_wait_while_cbusy(dev))
- return (-1);
+ return -1;
if(queue == MAC_QUEUE)
tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB;
@@ -2812,7 +2814,7 @@ static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
smctr_set_ctrl_attention(dev);
- return (0);
+ return 0;
}
static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue)
@@ -2823,14 +2825,14 @@ static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue)
printk(KERN_DEBUG "%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name);
if(smctr_wait_while_cbusy(dev))
- return (-1);
+ return -1;
tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue);
tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID;
smctr_set_ctrl_attention(dev);
- return (0);
+ return 0;
}
static int smctr_issue_test_internal_rom_cmd(struct net_device *dev)
@@ -2840,7 +2842,7 @@ static int smctr_issue_test_internal_rom_cmd(struct net_device *dev)
err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
TRC_INTERNAL_ROM_TEST);
- return (err);
+ return err;
}
static int smctr_issue_test_hic_cmd(struct net_device *dev)
@@ -2850,7 +2852,7 @@ static int smctr_issue_test_hic_cmd(struct net_device *dev)
err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST,
TRC_HOST_INTERFACE_REG_TEST);
- return (err);
+ return err;
}
static int smctr_issue_test_mac_reg_cmd(struct net_device *dev)
@@ -2860,7 +2862,7 @@ static int smctr_issue_test_mac_reg_cmd(struct net_device *dev)
err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
TRC_MAC_REGISTERS_TEST);
- return (err);
+ return err;
}
static int smctr_issue_trc_loopback_cmd(struct net_device *dev)
@@ -2870,7 +2872,7 @@ static int smctr_issue_trc_loopback_cmd(struct net_device *dev)
err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
TRC_INTERNAL_LOOPBACK);
- return (err);
+ return err;
}
static int smctr_issue_tri_loopback_cmd(struct net_device *dev)
@@ -2880,7 +2882,7 @@ static int smctr_issue_tri_loopback_cmd(struct net_device *dev)
err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
TRC_TRI_LOOPBACK);
- return (err);
+ return err;
}
static int smctr_issue_write_byte_cmd(struct net_device *dev,
@@ -2891,10 +2893,10 @@ static int smctr_issue_write_byte_cmd(struct net_device *dev,
int err;
if((err = smctr_wait_while_cbusy(dev)))
- return (err);
+ return err;
if((err = smctr_wait_cmd(dev)))
- return (err);
+ return err;
for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff);
iword++, ibyte += 2)
@@ -2903,8 +2905,8 @@ static int smctr_issue_write_byte_cmd(struct net_device *dev,
| (*((__u8 *)byte + ibyte + 1));
}
- return (smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
- aword_cnt));
+ return smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
+ aword_cnt);
}
static int smctr_issue_write_word_cmd(struct net_device *dev,
@@ -2914,10 +2916,10 @@ static int smctr_issue_write_word_cmd(struct net_device *dev,
unsigned int i, err;
if((err = smctr_wait_while_cbusy(dev)))
- return (err);
+ return err;
if((err = smctr_wait_cmd(dev)))
- return (err);
+ return err;
for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++)
tp->misc_command_data[i] = *((__u16 *)word + i);
@@ -2925,7 +2927,7 @@ static int smctr_issue_write_word_cmd(struct net_device *dev,
err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
aword_cnt);
- return (err);
+ return err;
}
static int smctr_join_complete_state(struct net_device *dev)
@@ -2935,7 +2937,7 @@ static int smctr_join_complete_state(struct net_device *dev)
err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
JS_JOIN_COMPLETE_STATE);
- return (err);
+ return err;
}
static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev)
@@ -2959,7 +2961,7 @@ static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev)
}
}
- return (0);
+ return 0;
}
static int smctr_load_firmware(struct net_device *dev)
@@ -2974,7 +2976,7 @@ static int smctr_load_firmware(struct net_device *dev)
if (request_firmware(&fw, "tr_smctr.bin", &dev->dev)) {
printk(KERN_ERR "%s: firmware not found\n", dev->name);
- return (UCODE_NOT_PRESENT);
+ return UCODE_NOT_PRESENT;
}
tp->num_of_tx_buffs = 4;
@@ -3036,7 +3038,7 @@ static int smctr_load_firmware(struct net_device *dev)
smctr_disable_16bit(dev);
out:
release_firmware(fw);
- return (err);
+ return err;
}
static int smctr_load_node_addr(struct net_device *dev)
@@ -3052,7 +3054,7 @@ static int smctr_load_node_addr(struct net_device *dev)
}
dev->addr_len = 6;
- return (0);
+ return 0;
}
/* Lobe Media Test.
@@ -3146,14 +3148,14 @@ static int smctr_lobe_media_test_cmd(struct net_device *dev)
if(smctr_wait_cmd(dev))
{
printk(KERN_ERR "Lobe Failed test state\n");
- return (LOBE_MEDIA_TEST_FAILED);
+ return LOBE_MEDIA_TEST_FAILED;
}
}
err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
TRC_LOBE_MEDIA_TEST);
- return (err);
+ return err;
}
static int smctr_lobe_media_test_state(struct net_device *dev)
@@ -3163,7 +3165,7 @@ static int smctr_lobe_media_test_state(struct net_device *dev)
err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
JS_LOBE_TEST_STATE);
- return (err);
+ return err;
}
static int smctr_make_8025_hdr(struct net_device *dev,
@@ -3212,7 +3214,7 @@ static int smctr_make_8025_hdr(struct net_device *dev,
break;
}
- return (0);
+ return 0;
}
static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3225,7 +3227,7 @@ static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv)
tsv->svv[0] = MSB(tp->authorized_access_priority);
tsv->svv[1] = LSB(tp->authorized_access_priority);
- return (0);
+ return 0;
}
static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3236,7 +3238,7 @@ static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv)
tsv->svv[0] = 0;
tsv->svv[1] = 0;
- return (0);
+ return 0;
}
static int smctr_make_auth_funct_class(struct net_device *dev,
@@ -3250,7 +3252,7 @@ static int smctr_make_auth_funct_class(struct net_device *dev,
tsv->svv[0] = MSB(tp->authorized_function_classes);
tsv->svv[1] = LSB(tp->authorized_function_classes);
- return (0);
+ return 0;
}
static int smctr_make_corr(struct net_device *dev,
@@ -3262,7 +3264,7 @@ static int smctr_make_corr(struct net_device *dev,
tsv->svv[0] = MSB(correlator);
tsv->svv[1] = LSB(correlator);
- return (0);
+ return 0;
}
static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3280,7 +3282,7 @@ static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
tsv->svv[2] = MSB(tp->misc_command_data[1]);
tsv->svv[3] = LSB(tp->misc_command_data[1]);
- return (0);
+ return 0;
}
static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3305,7 +3307,7 @@ static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00)
tsv->svv[0] = 0x00;
- return (0);
+ return 0;
}
static int smctr_make_phy_drop_num(struct net_device *dev,
@@ -3324,7 +3326,7 @@ static int smctr_make_phy_drop_num(struct net_device *dev,
tsv->svv[2] = MSB(tp->misc_command_data[1]);
tsv->svv[3] = LSB(tp->misc_command_data[1]);
- return (0);
+ return 0;
}
static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3337,7 +3339,7 @@ static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
for(i = 0; i < 18; i++)
tsv->svv[i] = 0xF0;
- return (0);
+ return 0;
}
static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3358,7 +3360,7 @@ static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
tsv->svv[4] = MSB(tp->misc_command_data[2]);
tsv->svv[5] = LSB(tp->misc_command_data[2]);
- return (0);
+ return 0;
}
static int smctr_make_ring_station_status(struct net_device *dev,
@@ -3374,7 +3376,7 @@ static int smctr_make_ring_station_status(struct net_device *dev,
tsv->svv[4] = 0;
tsv->svv[5] = 0;
- return (0);
+ return 0;
}
static int smctr_make_ring_station_version(struct net_device *dev,
@@ -3400,7 +3402,7 @@ static int smctr_make_ring_station_version(struct net_device *dev,
else
tsv->svv[9] = 0xc4; /* EBCDIC - D */
- return (0);
+ return 0;
}
static int smctr_make_tx_status_code(struct net_device *dev,
@@ -3414,7 +3416,7 @@ static int smctr_make_tx_status_code(struct net_device *dev,
/* Stripped frame status of Transmitted Frame */
tsv->svv[1] = tx_fstatus & 0xff;
- return (0);
+ return 0;
}
static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
@@ -3436,7 +3438,7 @@ static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
tsv->svv[4] = MSB(tp->misc_command_data[2]);
tsv->svv[5] = LSB(tp->misc_command_data[2]);
- return (0);
+ return 0;
}
static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3444,7 +3446,7 @@ static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv)
tsv->svi = WRAP_DATA;
tsv->svl = S_WRAP_DATA;
- return (0);
+ return 0;
}
/*
@@ -3464,9 +3466,9 @@ static int smctr_open(struct net_device *dev)
err = smctr_init_adapter(dev);
if(err < 0)
- return (err);
+ return err;
- return (err);
+ return err;
}
/* Interrupt driven open of Token card. */
@@ -3481,9 +3483,9 @@ static int smctr_open_tr(struct net_device *dev)
/* Now we can actually open the adapter. */
if(tp->status == OPEN)
- return (0);
+ return 0;
if(tp->status != INITIALIZED)
- return (-1);
+ return -1;
/* FIXME: it would work a lot better if we masked the irq sources
on the card here, then we could skip the locking and poll nicely */
@@ -3560,7 +3562,7 @@ static int smctr_open_tr(struct net_device *dev)
out:
spin_unlock_irqrestore(&tp->lock, flags);
- return (err);
+ return err;
}
/* Check for a network adapter of this type,
@@ -3675,7 +3677,7 @@ static int __init smctr_probe1(struct net_device *dev, int ioaddr)
dev->netdev_ops = &smctr_netdev_ops;
dev->watchdog_timeo = HZ;
- return (0);
+ return 0;
out:
return err;
@@ -3699,13 +3701,13 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
case INIT:
if((rcode = smctr_rcv_init(dev, rmf, &correlator)) == HARDWARE_FAILED)
{
- return (rcode);
+ return rcode;
}
if((err = smctr_send_rsp(dev, rmf, rcode,
correlator)))
{
- return (err);
+ return err;
}
break;
@@ -3713,13 +3715,13 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
if((rcode = smctr_rcv_chg_param(dev, rmf,
&correlator)) ==HARDWARE_FAILED)
{
- return (rcode);
+ return rcode;
}
if((err = smctr_send_rsp(dev, rmf, rcode,
correlator)))
{
- return (err);
+ return err;
}
break;
@@ -3728,16 +3730,16 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
rmf, &correlator)) != POSITIVE_ACK)
{
if(rcode == HARDWARE_FAILED)
- return (rcode);
+ return rcode;
else
- return (smctr_send_rsp(dev, rmf,
- rcode, correlator));
+ return smctr_send_rsp(dev, rmf,
+ rcode, correlator);
}
if((err = smctr_send_rpt_addr(dev, rmf,
correlator)))
{
- return (err);
+ return err;
}
break;
@@ -3746,17 +3748,17 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
rmf, &correlator)) != POSITIVE_ACK)
{
if(rcode == HARDWARE_FAILED)
- return (rcode);
+ return rcode;
else
- return (smctr_send_rsp(dev, rmf,
+ return smctr_send_rsp(dev, rmf,
rcode,
- correlator));
+ correlator);
}
if((err = smctr_send_rpt_attch(dev, rmf,
correlator)))
{
- return (err);
+ return err;
}
break;
@@ -3765,17 +3767,17 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
rmf, &correlator)) != POSITIVE_ACK)
{
if(rcode == HARDWARE_FAILED)
- return (rcode);
+ return rcode;
else
- return (smctr_send_rsp(dev, rmf,
+ return smctr_send_rsp(dev, rmf,
rcode,
- correlator));
+ correlator);
}
if((err = smctr_send_rpt_state(dev, rmf,
correlator)))
{
- return (err);
+ return err;
}
break;
@@ -3786,17 +3788,17 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
!= POSITIVE_ACK)
{
if(rcode == HARDWARE_FAILED)
- return (rcode);
+ return rcode;
else
- return (smctr_send_rsp(dev, rmf,
+ return smctr_send_rsp(dev, rmf,
rcode,
- correlator));
+ correlator);
}
if((err = smctr_send_tx_forward(dev, rmf,
&tx_fstatus)) == HARDWARE_FAILED)
{
- return (err);
+ return err;
}
if(err == A_FRAME_WAS_FORWARDED)
@@ -3805,7 +3807,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
rmf, tx_fstatus))
== HARDWARE_FAILED)
{
- return (err);
+ return err;
}
}
break;
@@ -3834,7 +3836,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
if((err = smctr_send_rsp(dev, rmf,rcode,
correlator)))
{
- return (err);
+ return err;
}
}
@@ -3899,7 +3901,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
err = 0;
}
- return (err);
+ return err;
}
/* Adapter RAM test. Incremental word ODD boundary data test. */
@@ -3942,7 +3944,7 @@ static int smctr_ram_memory_test(struct net_device *dev)
err_offset = j;
err_word = word_read;
err_pattern = word_pattern;
- return (RAM_TEST_FAILED);
+ return RAM_TEST_FAILED;
}
}
}
@@ -3966,14 +3968,14 @@ static int smctr_ram_memory_test(struct net_device *dev)
err_offset = j;
err_word = word_read;
err_pattern = word_pattern;
- return (RAM_TEST_FAILED);
+ return RAM_TEST_FAILED;
}
}
}
smctr_set_page(dev, (__u8 *)tp->ram_access);
- return (0);
+ return 0;
}
static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
@@ -3986,7 +3988,7 @@ static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
/* This Frame can only come from a CRS */
if((rmf->dc_sc & SC_MASK) != SC_CRS)
- return(E_INAPPROPRIATE_SOURCE_CLASS);
+ return E_INAPPROPRIATE_SOURCE_CLASS;
/* Remove MVID Length from total length. */
vlen = (signed short)rmf->vl - 4;
@@ -4058,7 +4060,7 @@ static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
}
}
- return (rcode);
+ return rcode;
}
static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
@@ -4071,7 +4073,7 @@ static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
/* This Frame can only come from a RPS */
if((rmf->dc_sc & SC_MASK) != SC_RPS)
- return (E_INAPPROPRIATE_SOURCE_CLASS);
+ return E_INAPPROPRIATE_SOURCE_CLASS;
/* Remove MVID Length from total length. */
vlen = (signed short)rmf->vl - 4;
@@ -4133,7 +4135,7 @@ static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
}
}
- return (rcode);
+ return rcode;
}
static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
@@ -4145,7 +4147,7 @@ static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
/* This Frame can only come from a CRS */
if((rmf->dc_sc & SC_MASK) != SC_CRS)
- return (E_INAPPROPRIATE_SOURCE_CLASS);
+ return E_INAPPROPRIATE_SOURCE_CLASS;
/* Remove MVID Length from total length */
vlen = (signed short)rmf->vl - 4;
@@ -4193,7 +4195,7 @@ static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
}
}
- return (rcode);
+ return rcode;
}
static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
@@ -4250,7 +4252,7 @@ static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
}
}
- return (rcode);
+ return rcode;
}
static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
@@ -4284,7 +4286,7 @@ static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
}
- return (E_UNRECOGNIZED_VECTOR_ID);
+ return E_UNRECOGNIZED_VECTOR_ID;
}
/*
@@ -4311,7 +4313,7 @@ static int smctr_reset_adapter(struct net_device *dev)
*/
outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR);
- return (0);
+ return 0;
}
static int smctr_restart_tx_chain(struct net_device *dev, short queue)
@@ -4329,7 +4331,7 @@ static int smctr_restart_tx_chain(struct net_device *dev, short queue)
err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
}
- return (err);
+ return err;
}
static int smctr_ring_status_chg(struct net_device *dev)
@@ -4371,7 +4373,7 @@ static int smctr_ring_status_chg(struct net_device *dev)
}
if(!(tp->ring_status_flags & RING_STATUS_CHANGED))
- return (0);
+ return 0;
switch(tp->ring_status)
{
@@ -4421,7 +4423,7 @@ static int smctr_ring_status_chg(struct net_device *dev)
break;
}
- return (0);
+ return 0;
}
static int smctr_rx_frame(struct net_device *dev)
@@ -4486,7 +4488,7 @@ static int smctr_rx_frame(struct net_device *dev)
break;
}
- return (err);
+ return err;
}
static int smctr_send_dat(struct net_device *dev)
@@ -4502,7 +4504,7 @@ static int smctr_send_dat(struct net_device *dev)
if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE,
sizeof(MAC_HEADER))) == (FCBlock *)(-1L))
{
- return (OUT_OF_RESOURCES);
+ return OUT_OF_RESOURCES;
}
/* Initialize DAT Data Fields. */
@@ -4524,7 +4526,7 @@ static int smctr_send_dat(struct net_device *dev)
/* Start Transmit. */
if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
- return (err);
+ return err;
/* Wait for Transmit to Complete */
for(i = 0; i < 10000; i++)
@@ -4538,7 +4540,7 @@ static int smctr_send_dat(struct net_device *dev)
if(!(fcb->frame_status & FCB_COMMAND_DONE) ||
fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
{
- return (INITIALIZE_FAILED);
+ return INITIALIZE_FAILED;
}
/* De-allocated Tx FCB and Frame Buffer
@@ -4549,7 +4551,7 @@ static int smctr_send_dat(struct net_device *dev)
tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
- return (0);
+ return 0;
}
static void smctr_timeout(struct net_device *dev)
@@ -4610,7 +4612,7 @@ static int smctr_send_lobe_media_test(struct net_device *dev)
if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr)
+ S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L))
{
- return (OUT_OF_RESOURCES);
+ return OUT_OF_RESOURCES;
}
/* Initialize DAT Data Fields. */
@@ -4639,7 +4641,7 @@ static int smctr_send_lobe_media_test(struct net_device *dev)
/* Start Transmit. */
tmf->vl = SWAP_BYTES(tmf->vl);
if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
- return (err);
+ return err;
/* Wait for Transmit to Complete. (10 ms). */
for(i=0; i < 10000; i++)
@@ -4653,7 +4655,7 @@ static int smctr_send_lobe_media_test(struct net_device *dev)
if(!(fcb->frame_status & FCB_COMMAND_DONE) ||
fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
{
- return (LOBE_MEDIA_TEST_FAILED);
+ return LOBE_MEDIA_TEST_FAILED;
}
/* De-allocated Tx FCB and Frame Buffer
@@ -4664,7 +4666,7 @@ static int smctr_send_lobe_media_test(struct net_device *dev)
tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
- return (0);
+ return 0;
}
static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
@@ -4679,7 +4681,7 @@ static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
+ S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS))
== (FCBlock *)(-1L))
{
- return (0);
+ return 0;
}
tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4722,7 +4724,7 @@ static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
*/
tmf->vl = SWAP_BYTES(tmf->vl);
- return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
+ return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
}
static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
@@ -4737,7 +4739,7 @@ static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
+ S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY))
== (FCBlock *)(-1L))
{
- return (0);
+ return 0;
}
tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4776,7 +4778,7 @@ static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
*/
tmf->vl = SWAP_BYTES(tmf->vl);
- return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
+ return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
}
static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
@@ -4791,7 +4793,7 @@ static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
+ S_RING_STATION_STATUS + S_STATION_IDENTIFER))
== (FCBlock *)(-1L))
{
- return (0);
+ return 0;
}
tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4826,7 +4828,7 @@ static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
*/
tmf->vl = SWAP_BYTES(tmf->vl);
- return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
+ return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
}
static int smctr_send_rpt_tx_forward(struct net_device *dev,
@@ -4839,7 +4841,7 @@ static int smctr_send_rpt_tx_forward(struct net_device *dev,
if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
+ S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L))
{
- return (0);
+ return 0;
}
tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4862,7 +4864,7 @@ static int smctr_send_rpt_tx_forward(struct net_device *dev,
*/
tmf->vl = SWAP_BYTES(tmf->vl);
- return(smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
+ return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
}
static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
@@ -4875,7 +4877,7 @@ static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
+ S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L))
{
- return (0);
+ return 0;
}
tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4888,7 +4890,7 @@ static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
smctr_make_corr(dev, tsv, correlator);
- return (0);
+ return 0;
}
static int smctr_send_rq_init(struct net_device *dev)
@@ -4907,7 +4909,7 @@ static int smctr_send_rq_init(struct net_device *dev)
+ S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER))
== (FCBlock *)(-1L)))
{
- return (0);
+ return 0;
}
tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4943,7 +4945,7 @@ static int smctr_send_rq_init(struct net_device *dev)
tmf->vl = SWAP_BYTES(tmf->vl);
if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
- return (err);
+ return err;
/* Wait for Transmit to Complete */
for(i = 0; i < 10000; i++)
@@ -4957,7 +4959,7 @@ static int smctr_send_rq_init(struct net_device *dev)
fstatus = fcb->frame_status;
if(!(fstatus & FCB_COMMAND_DONE))
- return (HARDWARE_FAILED);
+ return HARDWARE_FAILED;
if(!(fstatus & FCB_TX_STATUS_E))
count++;
@@ -4971,7 +4973,7 @@ static int smctr_send_rq_init(struct net_device *dev)
smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
} while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS));
- return (smctr_join_complete_state(dev));
+ return smctr_join_complete_state(dev);
}
static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
@@ -4984,13 +4986,13 @@ static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
/* Check if this is the END POINT of the Transmit Forward Chain. */
if(rmf->vl <= 18)
- return (0);
+ return 0;
/* Allocate Transmit FCB only by requesting 0 bytes
* of data buffer.
*/
if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L))
- return (0);
+ return 0;
/* Set pointer to Transmit Frame Buffer to the data
* portion of the received TX Forward frame, making
@@ -5006,7 +5008,7 @@ static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
fcb->bdb_ptr->buffer_length = rmf->vl - 4 - 2;
if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
- return (err);
+ return err;
/* Wait for Transmit to Complete */
for(i = 0; i < 10000; i++)
@@ -5020,7 +5022,7 @@ static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
if(!(fcb->frame_status & FCB_COMMAND_DONE))
{
if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE)))
- return (err);
+ return err;
for(i = 0; i < 10000; i++)
{
@@ -5030,12 +5032,12 @@ static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
}
if(!(fcb->frame_status & FCB_COMMAND_DONE))
- return (HARDWARE_FAILED);
+ return HARDWARE_FAILED;
}
*tx_fstatus = fcb->frame_status;
- return (A_FRAME_WAS_FORWARDED);
+ return A_FRAME_WAS_FORWARDED;
}
static int smctr_set_auth_access_pri(struct net_device *dev,
@@ -5044,11 +5046,11 @@ static int smctr_set_auth_access_pri(struct net_device *dev,
struct net_local *tp = netdev_priv(dev);
if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY)
- return (E_SUB_VECTOR_LENGTH_ERROR);
+ return E_SUB_VECTOR_LENGTH_ERROR;
tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]);
- return (POSITIVE_ACK);
+ return POSITIVE_ACK;
}
static int smctr_set_auth_funct_class(struct net_device *dev,
@@ -5057,22 +5059,22 @@ static int smctr_set_auth_funct_class(struct net_device *dev,
struct net_local *tp = netdev_priv(dev);
if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS)
- return (E_SUB_VECTOR_LENGTH_ERROR);
+ return E_SUB_VECTOR_LENGTH_ERROR;
tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]);
- return (POSITIVE_ACK);
+ return POSITIVE_ACK;
}
static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
__u16 *correlator)
{
if(rsv->svl != S_CORRELATOR)
- return (E_SUB_VECTOR_LENGTH_ERROR);
+ return E_SUB_VECTOR_LENGTH_ERROR;
*correlator = (rsv->svv[0] << 8 | rsv->svv[1]);
- return (POSITIVE_ACK);
+ return POSITIVE_ACK;
}
static int smctr_set_error_timer_value(struct net_device *dev,
@@ -5082,34 +5084,34 @@ static int smctr_set_error_timer_value(struct net_device *dev,
int err;
if(rsv->svl != S_ERROR_TIMER_VALUE)
- return (E_SUB_VECTOR_LENGTH_ERROR);
+ return E_SUB_VECTOR_LENGTH_ERROR;
err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10;
smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval);
if((err = smctr_wait_cmd(dev)))
- return (err);
+ return err;
- return (POSITIVE_ACK);
+ return POSITIVE_ACK;
}
static int smctr_set_frame_forward(struct net_device *dev,
MAC_SUB_VECTOR *rsv, __u8 dc_sc)
{
if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD))
- return (E_SUB_VECTOR_LENGTH_ERROR);
+ return E_SUB_VECTOR_LENGTH_ERROR;
if((dc_sc & DC_MASK) != DC_CRS)
{
if(rsv->svl >= 2 && rsv->svl < 20)
- return (E_TRANSMIT_FORWARD_INVALID);
+ return E_TRANSMIT_FORWARD_INVALID;
if((rsv->svv[0] != 0) || (rsv->svv[1] != 0))
- return (E_TRANSMIT_FORWARD_INVALID);
+ return E_TRANSMIT_FORWARD_INVALID;
}
- return (POSITIVE_ACK);
+ return POSITIVE_ACK;
}
static int smctr_set_local_ring_num(struct net_device *dev,
@@ -5118,13 +5120,13 @@ static int smctr_set_local_ring_num(struct net_device *dev,
struct net_local *tp = netdev_priv(dev);
if(rsv->svl != S_LOCAL_RING_NUMBER)
- return (E_SUB_VECTOR_LENGTH_ERROR);
+ return E_SUB_VECTOR_LENGTH_ERROR;
if(tp->ptr_local_ring_num)
*(__u16 *)(tp->ptr_local_ring_num)
= (rsv->svv[0] << 8 | rsv->svv[1]);
- return (POSITIVE_ACK);
+ return POSITIVE_ACK;
}
static unsigned short smctr_set_ctrl_attention(struct net_device *dev)
@@ -5140,7 +5142,7 @@ static unsigned short smctr_set_ctrl_attention(struct net_device *dev)
outb(tp->trc_mask, ioaddr + CSR);
}
- return (0);
+ return 0;
}
static void smctr_set_multicast_list(struct net_device *dev)
@@ -5159,7 +5161,7 @@ static int smctr_set_page(struct net_device *dev, __u8 *buf)
amask = (__u8)((tptr & PR_PAGE_MASK) >> 8);
outb(amask, dev->base_addr + PR);
- return (0);
+ return 0;
}
static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv)
@@ -5167,13 +5169,13 @@ static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv)
int err;
if(rsv->svl != S_PHYSICAL_DROP)
- return (E_SUB_VECTOR_LENGTH_ERROR);
+ return E_SUB_VECTOR_LENGTH_ERROR;
smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]);
if((err = smctr_wait_cmd(dev)))
- return (err);
+ return err;
- return (POSITIVE_ACK);
+ return POSITIVE_ACK;
}
/* Reset the ring speed to the opposite of what it was. This auto-pilot
@@ -5195,16 +5197,16 @@ static int smctr_set_ring_speed(struct net_device *dev)
smctr_reset_adapter(dev);
if((err = smctr_init_card_real(dev)))
- return (err);
+ return err;
smctr_enable_bic_int(dev);
if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
- return (err);
+ return err;
smctr_disable_16bit(dev);
- return (0);
+ return 0;
}
static int smctr_set_rx_look_ahead(struct net_device *dev)
@@ -5233,7 +5235,7 @@ static int smctr_set_rx_look_ahead(struct net_device *dev)
*((__u16 *)(tp->ram_access)) = sword;
}
- return (0);
+ return 0;
}
static int smctr_set_trc_reset(int ioaddr)
@@ -5243,7 +5245,7 @@ static int smctr_set_trc_reset(int ioaddr)
r = inb(ioaddr + MSR);
outb(MSR_RST | r, ioaddr + MSR);
- return (0);
+ return 0;
}
/*
@@ -5259,10 +5261,10 @@ static int smctr_setup_single_cmd(struct net_device *dev,
printk(KERN_DEBUG "%s: smctr_setup_single_cmd\n", dev->name);
if((err = smctr_wait_while_cbusy(dev)))
- return (err);
+ return err;
if((err = (unsigned int)smctr_wait_cmd(dev)))
- return (err);
+ return err;
tp->acb_head->cmd_done_status = 0;
tp->acb_head->cmd = command;
@@ -5270,7 +5272,7 @@ static int smctr_setup_single_cmd(struct net_device *dev,
err = smctr_issue_resume_acb_cmd(dev);
- return (err);
+ return err;
}
/*
@@ -5287,7 +5289,7 @@ static int smctr_setup_single_cmd_w_data(struct net_device *dev,
tp->acb_head->data_offset_lo
= (__u16)TRC_POINTER(tp->misc_command_data);
- return(smctr_issue_resume_acb_cmd(dev));
+ return smctr_issue_resume_acb_cmd(dev);
}
static char *smctr_malloc(struct net_device *dev, __u16 size)
@@ -5298,7 +5300,7 @@ static char *smctr_malloc(struct net_device *dev, __u16 size)
m = (char *)(tp->ram_access + tp->sh_mem_used);
tp->sh_mem_used += (__u32)size;
- return (m);
+ return m;
}
static int smctr_status_chg(struct net_device *dev)
@@ -5333,7 +5335,7 @@ static int smctr_status_chg(struct net_device *dev)
break;
}
- return (0);
+ return 0;
}
static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
@@ -5355,7 +5357,7 @@ static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
}
- return (err);
+ return err;
}
static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue)
@@ -5409,7 +5411,7 @@ static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue)
break;
}
- return (err);
+ return err;
}
static unsigned short smctr_tx_move_frame(struct net_device *dev,
@@ -5450,7 +5452,7 @@ static unsigned short smctr_tx_move_frame(struct net_device *dev,
pbuff += len;
}
- return (0);
+ return 0;
}
/* Update the error statistic counters for this adapter. */
@@ -5493,7 +5495,7 @@ static int smctr_update_err_stats(struct net_device *dev)
if(tstat->token_errors)
tstat->token_errors += *(tp->misc_command_data + 5) >> 8;
- return (0);
+ return 0;
}
static int smctr_update_rx_chain(struct net_device *dev, __u16 queue)
@@ -5530,7 +5532,7 @@ static int smctr_update_rx_chain(struct net_device *dev, __u16 queue)
tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END;
tp->rx_bdb_curr[queue] = bdb;
- return (0);
+ return 0;
}
static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
@@ -5542,13 +5544,13 @@ static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
printk(KERN_DEBUG "smctr_update_tx_chain\n");
if(tp->num_tx_fcbs_used[queue] <= 0)
- return (HARDWARE_FAILED);
+ return HARDWARE_FAILED;
else
{
if(tp->tx_buff_used[queue] < fcb->memory_alloc)
{
tp->tx_buff_used[queue] = 0;
- return (HARDWARE_FAILED);
+ return HARDWARE_FAILED;
}
tp->tx_buff_used[queue] -= fcb->memory_alloc;
@@ -5566,7 +5568,7 @@ static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
fcb->frame_status = 0;
tp->tx_fcb_end[queue] = fcb->next_ptr;
netif_wake_queue(dev);
- return (0);
+ return 0;
}
}
@@ -5587,12 +5589,12 @@ static int smctr_wait_cmd(struct net_device *dev)
}
if(loop_count == 0)
- return(HARDWARE_FAILED);
+ return HARDWARE_FAILED;
if(tp->acb_head->cmd_done_status & 0xff)
- return(HARDWARE_FAILED);
+ return HARDWARE_FAILED;
- return (0);
+ return 0;
}
static int smctr_wait_while_cbusy(struct net_device *dev)
@@ -5624,9 +5626,9 @@ static int smctr_wait_while_cbusy(struct net_device *dev)
}
if(timeout)
- return (0);
+ return 0;
else
- return (HARDWARE_FAILED);
+ return HARDWARE_FAILED;
}
#ifdef MODULE
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 435ef7d5470..c83f4f6e39e 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -224,7 +224,7 @@ static int madgemc_sifprobe(struct net_device *dev)
chk2 ^= 0x0FE;
if(chk1 != chk2)
- return (-1); /* No adapter */
+ return -1; /* No adapter */
chk1 -= 2;
} while(chk1 != 0); /* Repeat 128 times (all byte values) */
@@ -232,7 +232,7 @@ static int madgemc_sifprobe(struct net_device *dev)
/* Restore the SIFADR value */
SIFWRITEB(old, SIFADR);
- return (0);
+ return 0;
}
#endif
@@ -271,7 +271,7 @@ int tms380tr_open(struct net_device *dev)
{
printk(KERN_INFO "%s: Chipset initialization error\n",
dev->name);
- return (-1);
+ return -1;
}
tp->timer.expires = jiffies + 30*HZ;
@@ -298,7 +298,7 @@ int tms380tr_open(struct net_device *dev)
if(tp->AdapterVirtOpenFlag == 0)
{
tms380tr_disable_interrupts(dev);
- return (-1);
+ return -1;
}
tp->StartTime = jiffies;
@@ -309,7 +309,7 @@ int tms380tr_open(struct net_device *dev)
tp->timer.data = (unsigned long)dev;
add_timer(&tp->timer);
- return (0);
+ return 0;
}
/*
@@ -343,23 +343,23 @@ static int tms380tr_chipset_init(struct net_device *dev)
printk(KERN_DEBUG "%s: Resetting adapter...\n", dev->name);
err = tms380tr_reset_adapter(dev);
if(err < 0)
- return (-1);
+ return -1;
if(tms380tr_debug > 3)
printk(KERN_DEBUG "%s: Bringup diags...\n", dev->name);
err = tms380tr_bringup_diags(dev);
if(err < 0)
- return (-1);
+ return -1;
if(tms380tr_debug > 3)
printk(KERN_DEBUG "%s: Init adapter...\n", dev->name);
err = tms380tr_init_adapter(dev);
if(err < 0)
- return (-1);
+ return -1;
if(tms380tr_debug > 3)
printk(KERN_DEBUG "%s: Done!\n", dev->name);
- return (0);
+ return 0;
}
/*
@@ -877,7 +877,7 @@ static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqTy
IrqType != STS_IRQ_COMMAND_STATUS &&
IrqType != STS_IRQ_RING_STATUS)
{
- return (1); /* SSB not involved. */
+ return 1; /* SSB not involved. */
}
/* Note: All fields of the SSB have been set to all ones (-1) after it
@@ -887,21 +887,21 @@ static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqTy
*/
if(ssb->STS == (unsigned short) -1)
- return (0); /* Command field not yet available. */
+ return 0; /* Command field not yet available. */
if(IrqType == STS_IRQ_COMMAND_STATUS)
- return (1); /* Status fields not always affected. */
+ return 1; /* Status fields not always affected. */
if(ssb->Parm[0] == (unsigned short) -1)
- return (0); /* Status 1 field not yet available. */
+ return 0; /* Status 1 field not yet available. */
if(IrqType == STS_IRQ_RING_STATUS)
- return (1); /* Status 2 & 3 fields not affected. */
+ return 1; /* Status 2 & 3 fields not affected. */
/* Note: At this point, the interrupt is either TRANSMIT or RECEIVE. */
if(ssb->Parm[1] == (unsigned short) -1)
- return (0); /* Status 2 field not yet available. */
+ return 0; /* Status 2 field not yet available. */
if(ssb->Parm[2] == (unsigned short) -1)
- return (0); /* Status 3 field not yet available. */
+ return 0; /* Status 3 field not yet available. */
- return (1); /* All SSB fields have been written by the adapter. */
+ return 1; /* All SSB fields have been written by the adapter. */
}
/*
@@ -1143,7 +1143,7 @@ int tms380tr_close(struct net_device *dev)
#endif
tms380tr_cancel_tx_queue(tp);
- return (0);
+ return 0;
}
/*
@@ -1154,7 +1154,7 @@ static struct net_device_stats *tms380tr_get_stats(struct net_device *dev)
{
struct net_local *tp = netdev_priv(dev);
- return ((struct net_device_stats *)&tp->MacStat);
+ return (struct net_device_stats *)&tp->MacStat;
}
/*
@@ -1256,7 +1256,7 @@ static int tms380tr_reset_adapter(struct net_device *dev)
if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) {
printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n",
dev->name, "tms380tr.bin");
- return (-1);
+ return -1;
}
fw_ptr = (unsigned short *)fw_entry->data;
@@ -1321,16 +1321,14 @@ static int tms380tr_reset_adapter(struct net_device *dev)
/* Clear CPHALT and start BUD */
SIFWRITEW(c, SIFACL);
- if (fw_entry)
- release_firmware(fw_entry);
- return (1);
+ release_firmware(fw_entry);
+ return 1;
}
} while(count == 0);
- if (fw_entry)
- release_firmware(fw_entry);
+ release_firmware(fw_entry);
printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name);
- return (-1);
+ return -1;
}
MODULE_FIRMWARE("tms380tr.bin");
@@ -1365,7 +1363,7 @@ static int tms380tr_bringup_diags(struct net_device *dev)
printk(KERN_DEBUG " %04X\n", Status);
/* BUD successfully completed */
if(Status == STS_INITIALIZE)
- return (1);
+ return 1;
/* Unrecoverable hardware error, BUD not completed? */
} while((loop_cnt > 0) && ((Status & (STS_ERROR | STS_TEST))
!= (STS_ERROR | STS_TEST)));
@@ -1392,7 +1390,7 @@ static int tms380tr_bringup_diags(struct net_device *dev)
else
printk(KERN_INFO "%s: Bring Up Diagnostics Error (%04X) occurred\n", dev->name, Status & 0x000f);
- return (-1);
+ return -1;
}
/*
@@ -1466,7 +1464,7 @@ static int tms380tr_init_adapter(struct net_device *dev)
{
printk(KERN_INFO "%s: DMA failed\n", dev->name);
/* DMA data error: wrong data in SCB */
- return (-1);
+ return -1;
}
i++;
} while(i < 6);
@@ -1475,11 +1473,11 @@ static int tms380tr_init_adapter(struct net_device *dev)
do { /* Test if contents of SSB is valid */
if(SSB_Test[i] != *(sb_ptr + i))
/* DMA data error: wrong data in SSB */
- return (-1);
+ return -1;
i++;
} while (i < 8);
- return (1); /* Adapter successfully initialized */
+ return 1; /* Adapter successfully initialized */
}
else
{
@@ -1490,7 +1488,7 @@ static int tms380tr_init_adapter(struct net_device *dev)
Status &= STS_ERROR_MASK;
/* ShowInitialisationErrorCode(Status); */
printk(KERN_INFO "%s: Status error: %d\n", dev->name, Status);
- return (-1); /* Unrecoverable error */
+ return -1; /* Unrecoverable error */
}
else
{
@@ -1505,7 +1503,7 @@ static int tms380tr_init_adapter(struct net_device *dev)
} while(retry_cnt > 0);
printk(KERN_INFO "%s: Retry exceeded\n", dev->name);
- return (-1);
+ return -1;
}
/*
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index d4c7c0c0a3d..d3e788a9cd1 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -125,18 +125,16 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
dev->irq = pci_irq_line;
dev->dma = 0;
- printk("%s: %s\n", dev->name, cardinfo->name);
- printk("%s: IO: %#4lx IRQ: %d\n",
- dev->name, dev->base_addr, dev->irq);
+ dev_info(&pdev->dev, "%s\n", cardinfo->name);
+ dev_info(&pdev->dev, " IO: %#4lx IRQ: %d\n", dev->base_addr, dev->irq);
tms_pci_read_eeprom(dev);
- printk("%s: Ring Station Address: %pM\n",
- dev->name, dev->dev_addr);
+ dev_info(&pdev->dev, " Ring Station Address: %pM\n", dev->dev_addr);
ret = tmsdev_init(dev, &pdev->dev);
if (ret) {
- printk("%s: unable to get memory for dev->priv.\n", dev->name);
+ dev_info(&pdev->dev, "unable to get memory for dev->priv.\n");
goto err_out_region;
}
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index a03730bd1da..5c633a32eae 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -219,7 +219,7 @@ static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
if (i == 100)
return 0xffff;
else
- return (TSI_READ_PHY(TSI108_MAC_MII_DATAIN));
+ return TSI_READ_PHY(TSI108_MAC_MII_DATAIN);
}
static void tsi108_write_mii(struct tsi108_prv_data *data,
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index 516713fa0a0..f3035951422 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -11,8 +11,8 @@ menuconfig NET_TULIP
if NET_TULIP
config DE2104X
- tristate "Early DECchip Tulip (dc2104x) PCI support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "Early DECchip Tulip (dc2104x) PCI support"
+ depends on PCI
select CRC32
---help---
This driver is developed for the SMC EtherPower series Ethernet
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 5efa57757a2..28e1ffb13db 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -243,6 +243,7 @@ enum {
NWayState = (1 << 14) | (1 << 13) | (1 << 12),
NWayRestart = (1 << 12),
NonselPortActive = (1 << 9),
+ SelPortActive = (1 << 8),
LinkFailStatus = (1 << 2),
NetCxnErr = (1 << 1),
};
@@ -363,7 +364,9 @@ static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
-static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
+static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
+/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
+static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
@@ -945,8 +948,9 @@ static void de_set_media (struct de_private *de)
else
macmode &= ~FullDuplex;
- if (netif_msg_link(de)) {
+ if (netif_msg_link(de))
dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
+ if (netif_msg_hw(de)) {
dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
dr32(MacMode), dr32(SIAStatus),
dr32(CSR13), dr32(CSR14), dr32(CSR15));
@@ -1064,6 +1068,9 @@ static void de21041_media_timer (unsigned long data)
unsigned int carrier;
unsigned long flags;
+ /* clear port active bits */
+ dw32(SIAStatus, NonselPortActive | SelPortActive);
+
carrier = (status & NetCxnErr) ? 0 : 1;
if (carrier) {
@@ -1158,14 +1165,29 @@ no_link_yet:
static void de_media_interrupt (struct de_private *de, u32 status)
{
if (status & LinkPass) {
+ /* Ignore if current media is AUI or BNC and we can't use TP */
+ if ((de->media_type == DE_MEDIA_AUI ||
+ de->media_type == DE_MEDIA_BNC) &&
+ (de->media_lock ||
+ !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
+ return;
+ /* If current media is not TP, change it to TP */
+ if ((de->media_type == DE_MEDIA_AUI ||
+ de->media_type == DE_MEDIA_BNC)) {
+ de->media_type = DE_MEDIA_TP_AUTO;
+ de_stop_rxtx(de);
+ de_set_media(de);
+ de_start_rxtx(de);
+ }
de_link_up(de);
mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
return;
}
BUG_ON(!(status & LinkFail));
-
- if (netif_carrier_ok(de->dev)) {
+ /* Mark the link as down only if current media is TP */
+ if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
+ de->media_type != DE_MEDIA_BNC) {
de_link_down(de);
mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
}
@@ -1229,6 +1251,7 @@ static void de_adapter_sleep (struct de_private *de)
if (de->de21040)
return;
+ dw32(CSR13, 0); /* Reset phy */
pci_read_config_dword(de->pdev, PCIPM, &pmctl);
pmctl |= PM_Sleep;
pci_write_config_dword(de->pdev, PCIPM, pmctl);
@@ -1574,12 +1597,15 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
return 0; /* nothing to change */
de_link_down(de);
+ mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
de_stop_rxtx(de);
de->media_type = new_media;
de->media_lock = media_lock;
de->media_advertise = ecmd->advertising;
de_set_media(de);
+ if (netif_running(de->dev))
+ de_start_rxtx(de);
return 0;
}
@@ -1911,8 +1937,14 @@ fill_defaults:
for (i = 0; i < DE_MAX_MEDIA; i++) {
if (de->media[i].csr13 == 0xffff)
de->media[i].csr13 = t21041_csr13[i];
- if (de->media[i].csr14 == 0xffff)
- de->media[i].csr14 = t21041_csr14[i];
+ if (de->media[i].csr14 == 0xffff) {
+ /* autonegotiation is broken at least on some chip
+ revisions - rev. 0x21 works, 0x11 does not */
+ if (de->pdev->revision < 0x20)
+ de->media[i].csr14 = t21041_csr14_brk[i];
+ else
+ de->media[i].csr14 = t21041_csr14[i];
+ }
if (de->media[i].csr15 == 0xffff)
de->media[i].csr15 = t21041_csr15[i];
}
@@ -2158,6 +2190,8 @@ static int de_resume (struct pci_dev *pdev)
dev_err(&dev->dev, "pci_enable_device failed in resume\n");
goto out;
}
+ pci_set_master(pdev);
+ de_init_rings(de);
de_init_hw(de);
out_attach:
netif_device_attach(dev);
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 75a64c88cf7..4dbd493b996 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1448,7 +1448,7 @@ de4x5_sw_reset(struct net_device *dev)
status = -EIO;
}
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
lp->tx_old = lp->tx_new;
return status;
@@ -1506,7 +1506,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
lp->stats.tx_bytes += skb->len;
outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
if (TX_BUFFS_AVAIL) {
netif_start_queue(dev); /* Another pkt may be queued */
@@ -1657,7 +1657,7 @@ de4x5_rx(struct net_device *dev)
}
/* Change buffer ownership for this frame, back to the adapter */
- for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
+ for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
barrier();
}
@@ -1668,7 +1668,7 @@ de4x5_rx(struct net_device *dev)
/*
** Update entry information
*/
- lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
+ lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
}
return 0;
@@ -1726,7 +1726,7 @@ de4x5_tx(struct net_device *dev)
}
/* Update all the pointers */
- lp->tx_old = (++lp->tx_old) % lp->txRingSize;
+ lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
}
/* Any resources available? */
@@ -1801,7 +1801,7 @@ de4x5_rx_ovfc(struct net_device *dev)
for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
- lp->rx_new = (++lp->rx_new % lp->rxRingSize);
+ lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
}
outl(omr, DE4X5_OMR);
@@ -1932,7 +1932,7 @@ set_multicast_list(struct net_device *dev)
load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
SETUP_FRAME_LEN, (struct sk_buff *)1);
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
dev->trans_start = jiffies; /* prevent tx timeout */
}
@@ -3119,7 +3119,7 @@ dc2114x_autoconf(struct net_device *dev)
if (lp->media == _100Mb) {
if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
lp->media = SPD_DET;
- return (slnk & ~TIMER_CB);
+ return slnk & ~TIMER_CB;
}
} else {
if (wait_for_link(dev) < 0) {
@@ -3484,7 +3484,7 @@ is_spd_100(struct net_device *dev)
spd = ((~gep_rd(dev)) & GEP_SLNK);
} else {
if ((lp->ibn == 2) || !lp->asBitValid)
- return ((lp->chipset == DC21143)?(~inl(DE4X5_SISR)&SISR_LS100):0);
+ return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
(lp->linkOK & ~lp->asBitValid);
@@ -3502,15 +3502,15 @@ is_100_up(struct net_device *dev)
if (lp->useMII) {
/* Double read for sticky bits & temporary drops */
mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
+ return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
} else if (!lp->useSROM) { /* de500-xa */
- return ((~gep_rd(dev)) & GEP_SLNK);
+ return (~gep_rd(dev)) & GEP_SLNK;
} else {
if ((lp->ibn == 2) || !lp->asBitValid)
- return ((lp->chipset == DC21143)?(~inl(DE4X5_SISR)&SISR_LS100):0);
+ return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
- return ((lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid));
+ return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid);
}
}
@@ -3523,17 +3523,17 @@ is_10_up(struct net_device *dev)
if (lp->useMII) {
/* Double read for sticky bits & temporary drops */
mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
+ return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
} else if (!lp->useSROM) { /* de500-xa */
- return ((~gep_rd(dev)) & GEP_LNP);
+ return (~gep_rd(dev)) & GEP_LNP;
} else {
if ((lp->ibn == 2) || !lp->asBitValid)
- return (((lp->chipset & ~0x00ff) == DC2114x) ?
+ return ((lp->chipset & ~0x00ff) == DC2114x) ?
(~inl(DE4X5_SISR)&SISR_LS10):
- 0);
+ 0;
- return ((lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid));
+ return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid);
}
}
@@ -3544,7 +3544,7 @@ is_anc_capable(struct net_device *dev)
u_long iobase = dev->base_addr;
if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
- return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII));
+ return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
} else if ((lp->chipset & ~0x00ff) == DC2114x) {
return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
} else {
@@ -3568,7 +3568,7 @@ ping_media(struct net_device *dev, int msec)
lp->tmp = lp->tx_new; /* Remember the ring position */
load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD);
}
@@ -4930,7 +4930,7 @@ getfrom_mii(u32 command, u_long ioaddr)
outl(command | MII_MDC, ioaddr);
udelay(1);
- return ((inl(ioaddr) >> 19) & 1);
+ return (inl(ioaddr) >> 19) & 1;
}
/*
@@ -4975,8 +4975,8 @@ mii_get_oui(u_char phyaddr, u_long ioaddr)
a.breg[0]=a.breg[1];
a.breg[1]=i;
- return ((a.reg<<8)|ret); */ /* SEEQ and Cypress way */
-/* return ((r2<<6)|(u_int)(r3>>10)); */ /* NATIONAL and BROADCOM way */
+ return (a.reg<<8)|ret; */ /* SEEQ and Cypress way */
+/* return (r2<<6)|(u_int)(r3>>10); */ /* NATIONAL and BROADCOM way */
return r2; /* (I did it) My way */
}
@@ -5144,7 +5144,7 @@ gep_rd(struct net_device *dev)
if (lp->chipset == DC21140) {
return inl(DE4X5_GEP);
} else if ((lp->chipset & ~0x00ff) == DC2114x) {
- return (inl(DE4X5_SIGR) & 0x000fffff);
+ return inl(DE4X5_SIGR) & 0x000fffff;
}
return 0;
@@ -5417,7 +5417,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
/* Set up the descriptor and give ownership to the card */
load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
SETUP_FRAME_LEN, (struct sk_buff *)1);
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
netif_wake_queue(dev); /* Unlock the TX ring */
break;
@@ -5474,7 +5474,8 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
tmp.lval[6] = inl(DE4X5_STRR); j+=4;
tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
ioc->len = j;
- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+ if (copy_to_user(ioc->data, tmp.lval, ioc->len))
+ return -EFAULT;
break;
#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 0bc4f3030a8..a9f7d5d1a26 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -599,7 +599,7 @@ static int dmfe_open(struct DEVICE *dev)
init_timer(&db->timer);
db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
db->timer.data = (unsigned long)dev;
- db->timer.function = &dmfe_timer;
+ db->timer.function = dmfe_timer;
add_timer(&db->timer);
return 0;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 1faf7a4d720..0013642903e 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -180,21 +180,24 @@ int tulip_poll(struct napi_struct *napi, int budget)
dev_warn(&dev->dev,
"Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
status);
- tp->stats.rx_length_errors++;
- }
+ dev->stats.rx_length_errors++;
+ }
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
dev->name, status);
- tp->stats.rx_errors++; /* end of a packet.*/
- if (pkt_len > 1518 ||
- (status & RxDescRunt))
- tp->stats.rx_length_errors++;
-
- if (status & 0x0004) tp->stats.rx_frame_errors++;
- if (status & 0x0002) tp->stats.rx_crc_errors++;
- if (status & 0x0001) tp->stats.rx_fifo_errors++;
+ dev->stats.rx_errors++; /* end of a packet.*/
+ if (pkt_len > 1518 ||
+ (status & RxDescRunt))
+ dev->stats.rx_length_errors++;
+
+ if (status & 0x0004)
+ dev->stats.rx_frame_errors++;
+ if (status & 0x0002)
+ dev->stats.rx_crc_errors++;
+ if (status & 0x0001)
+ dev->stats.rx_fifo_errors++;
}
} else {
struct sk_buff *skb;
@@ -244,8 +247,8 @@ int tulip_poll(struct napi_struct *napi, int budget)
netif_receive_skb(skb);
- tp->stats.rx_packets++;
- tp->stats.rx_bytes += pkt_len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
}
#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
received++;
@@ -404,20 +407,23 @@ static int tulip_rx(struct net_device *dev)
dev_warn(&dev->dev,
"Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
status);
- tp->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
}
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
dev->name, status);
- tp->stats.rx_errors++; /* end of a packet.*/
+ dev->stats.rx_errors++; /* end of a packet.*/
if (pkt_len > 1518 ||
(status & RxDescRunt))
- tp->stats.rx_length_errors++;
- if (status & 0x0004) tp->stats.rx_frame_errors++;
- if (status & 0x0002) tp->stats.rx_crc_errors++;
- if (status & 0x0001) tp->stats.rx_fifo_errors++;
+ dev->stats.rx_length_errors++;
+ if (status & 0x0004)
+ dev->stats.rx_frame_errors++;
+ if (status & 0x0002)
+ dev->stats.rx_crc_errors++;
+ if (status & 0x0001)
+ dev->stats.rx_fifo_errors++;
}
} else {
struct sk_buff *skb;
@@ -467,8 +473,8 @@ static int tulip_rx(struct net_device *dev)
netif_rx(skb);
- tp->stats.rx_packets++;
- tp->stats.rx_bytes += pkt_len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
}
received++;
entry = (++tp->cur_rx) % RX_RING_SIZE;
@@ -602,18 +608,22 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
dev->name, status);
#endif
- tp->stats.tx_errors++;
- if (status & 0x4104) tp->stats.tx_aborted_errors++;
- if (status & 0x0C00) tp->stats.tx_carrier_errors++;
- if (status & 0x0200) tp->stats.tx_window_errors++;
- if (status & 0x0002) tp->stats.tx_fifo_errors++;
+ dev->stats.tx_errors++;
+ if (status & 0x4104)
+ dev->stats.tx_aborted_errors++;
+ if (status & 0x0C00)
+ dev->stats.tx_carrier_errors++;
+ if (status & 0x0200)
+ dev->stats.tx_window_errors++;
+ if (status & 0x0002)
+ dev->stats.tx_fifo_errors++;
if ((status & 0x0080) && tp->full_duplex == 0)
- tp->stats.tx_heartbeat_errors++;
+ dev->stats.tx_heartbeat_errors++;
} else {
- tp->stats.tx_bytes +=
+ dev->stats.tx_bytes +=
tp->tx_buffers[entry].skb->len;
- tp->stats.collisions += (status >> 3) & 15;
- tp->stats.tx_packets++;
+ dev->stats.collisions += (status >> 3) & 15;
+ dev->stats.tx_packets++;
}
pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
@@ -655,7 +665,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
if (csr5 == 0xffffffff)
break;
- if (csr5 & TxJabber) tp->stats.tx_errors++;
+ if (csr5 & TxJabber)
+ dev->stats.tx_errors++;
if (csr5 & TxFIFOUnderflow) {
if ((tp->csr6 & 0xC000) != 0xC000)
tp->csr6 += 0x4000; /* Bump up the Tx threshold */
@@ -672,8 +683,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
}
}
if (csr5 & RxDied) { /* Missed a Rx frame. */
- tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
- tp->stats.rx_errors++;
+ dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
+ dev->stats.rx_errors++;
tulip_start_rxtx(tp);
}
/*
@@ -789,7 +800,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
#endif /* CONFIG_TULIP_NAPI */
if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
- tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
+ dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
}
if (tulip_debug > 4)
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index e525875ed67..ed66a16711d 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -417,7 +417,6 @@ struct tulip_private {
int revision;
int flags;
struct napi_struct napi;
- struct net_device_stats stats;
struct timer_list timer; /* Media selection timer. */
struct timer_list oom_timer; /* Out of memory timer. */
u32 mc_filter[2];
@@ -570,7 +569,7 @@ static inline void tulip_tx_timeout_complete(struct tulip_private *tp, void __io
/* Trigger an immediate transmit demand. */
iowrite32(0, ioaddr + CSR1);
- tp->stats.tx_errors++;
+ tp->dev->stats.tx_errors++;
}
#endif /* __NET_TULIP_H__ */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 3a8d7efa2ac..2c39f259121 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -725,7 +725,7 @@ static void tulip_clean_tx_ring(struct tulip_private *tp)
int status = le32_to_cpu(tp->tx_ring[entry].status);
if (status < 0) {
- tp->stats.tx_errors++; /* It wasn't Txed */
+ tp->dev->stats.tx_errors++; /* It wasn't Txed */
tp->tx_ring[entry].status = 0;
}
@@ -781,8 +781,8 @@ static void tulip_down (struct net_device *dev)
/* release any unconsumed transmit buffers */
tulip_clean_tx_ring(tp);
- if (ioread32 (ioaddr + CSR6) != 0xffffffff)
- tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff;
+ if (ioread32(ioaddr + CSR6) != 0xffffffff)
+ dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
spin_unlock_irqrestore (&tp->lock, flags);
@@ -864,12 +864,12 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
spin_lock_irqsave (&tp->lock, flags);
- tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
+ dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
spin_unlock_irqrestore(&tp->lock, flags);
}
- return &tp->stats;
+ return &dev->stats;
}
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 96de5829b94..74217dbf014 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -480,7 +480,7 @@ static int uli526x_open(struct net_device *dev)
init_timer(&db->timer);
db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
db->timer.data = (unsigned long)dev;
- db->timer.function = &uli526x_timer;
+ db->timer.function = uli526x_timer;
add_timer(&db->timer);
return 0;
@@ -1747,7 +1747,7 @@ static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
if(cr10_value&0x10000000)
break;
}
- return (cr10_value&0x0ffff);
+ return cr10_value & 0x0ffff;
}
static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 66d41cf8da2..f0b231035de 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -662,7 +662,7 @@ static int netdev_open(struct net_device *dev)
init_timer(&np->timer);
np->timer.expires = jiffies + 1*HZ;
np->timer.data = (unsigned long)dev;
- np->timer.function = &netdev_timer; /* timer handler */
+ np->timer.function = netdev_timer; /* timer handler */
add_timer(&np->timer);
return 0;
out_err:
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index a439e93be22..5a73752be2c 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -29,7 +29,6 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
@@ -181,19 +180,6 @@ static void print_binary(unsigned int number)
}
#endif
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct xircom_private *private = netdev_priv(dev);
-
- strcpy(info->driver, "xircom_cb");
- strcpy(info->bus_info, pci_name(private->pdev));
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
static const struct net_device_ops netdev_ops = {
.ndo_open = xircom_open,
.ndo_stop = xircom_close,
@@ -279,7 +265,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
setup_descriptors(private);
dev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
pci_set_drvdata(pdev, dev);
if (register_netdev(dev)) {
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 2e50077ff45..1cc67138adb 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -541,7 +541,7 @@ cleanup:
indexes->respCleared = cpu_to_le32(cleared);
wmb();
- return (resp_save == NULL);
+ return resp_save == NULL;
}
static inline int
@@ -962,36 +962,34 @@ typhoon_do_get_stats(struct typhoon *tp)
* The extra status reported would be a good candidate for
* ethtool_ops->get_{strings,stats}()
*/
- stats->tx_packets = le32_to_cpu(s->txPackets);
- stats->tx_bytes = le64_to_cpu(s->txBytes);
- stats->tx_errors = le32_to_cpu(s->txCarrierLost);
- stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
- stats->collisions = le32_to_cpu(s->txMultipleCollisions);
- stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
- stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
- stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
+ stats->tx_packets = le32_to_cpu(s->txPackets) +
+ saved->tx_packets;
+ stats->tx_bytes = le64_to_cpu(s->txBytes) +
+ saved->tx_bytes;
+ stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
+ saved->tx_errors;
+ stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
+ saved->tx_carrier_errors;
+ stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
+ saved->collisions;
+ stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
+ saved->rx_packets;
+ stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
+ saved->rx_bytes;
+ stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
+ saved->rx_fifo_errors;
stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
- le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
- stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
- stats->rx_length_errors = le32_to_cpu(s->rxOversized);
+ le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
+ saved->rx_errors;
+ stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
+ saved->rx_crc_errors;
+ stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
+ saved->rx_length_errors;
tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
SPEED_100 : SPEED_10;
tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
DUPLEX_FULL : DUPLEX_HALF;
- /* add in the saved statistics
- */
- stats->tx_packets += saved->tx_packets;
- stats->tx_bytes += saved->tx_bytes;
- stats->tx_errors += saved->tx_errors;
- stats->collisions += saved->collisions;
- stats->rx_packets += saved->rx_packets;
- stats->rx_bytes += saved->rx_bytes;
- stats->rx_fifo_errors += saved->rx_fifo_errors;
- stats->rx_errors += saved->rx_errors;
- stats->rx_crc_errors += saved->rx_crc_errors;
- stats->rx_length_errors += saved->rx_length_errors;
-
return 0;
}
@@ -1762,7 +1760,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
(TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
new_skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
- new_skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(new_skb);
spin_lock(&tp->state_lock);
if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 8d532f9b50d..a4c3f570824 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3601,7 +3601,7 @@ static void ucc_geth_timeout(struct net_device *dev)
#ifdef CONFIG_PM
-static int ucc_geth_suspend(struct of_device *ofdev, pm_message_t state)
+static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
@@ -3629,7 +3629,7 @@ static int ucc_geth_suspend(struct of_device *ofdev, pm_message_t state)
return 0;
}
-static int ucc_geth_resume(struct of_device *ofdev)
+static int ucc_geth_resume(struct platform_device *ofdev)
{
struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
@@ -3732,7 +3732,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
#endif
};
-static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
+static int ucc_geth_probe(struct platform_device* ofdev, const struct of_device_id *match)
{
struct device *device = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -3954,7 +3954,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
return 0;
}
-static int ucc_geth_remove(struct of_device* ofdev)
+static int ucc_geth_remove(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
struct net_device *dev = dev_get_drvdata(device);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index d7b7018a1de..52ffabe6db0 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -358,6 +358,14 @@ config USB_NET_ZAURUS
really need this non-conformant variant of CDC Ethernet (or in
some cases CDC MDLM) protocol, not "g_ether".
+config USB_NET_CX82310_ETH
+ tristate "Conexant CX82310 USB ethernet port"
+ depends on USB_USBNET
+ help
+ Choose this option if you're using a Conexant CX82310-based ADSL
+ router with USB ethernet port. This driver is for routers only,
+ it will not work with ADSL modems (use cxacru driver instead).
+
config USB_HSO
tristate "Option USB High Speed Mobile Devices"
depends on USB && RFKILL
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b13a279663b..a19b0259ae1 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -25,4 +25,5 @@ obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
obj-$(CONFIG_USB_IPHETH) += ipheth.o
obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
+obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
new file mode 100644
index 00000000000..8969f124c18
--- /dev/null
+++ b/drivers/net/usb/cx82310_eth.c
@@ -0,0 +1,346 @@
+/*
+ * Driver for USB ethernet port of Conexant CX82310-based ADSL routers
+ * Copyright (C) 2010 by Ondrej Zary
+ * some parts inspired by the cxacru driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/usbnet.h>
+
+enum cx82310_cmd {
+ CMD_START = 0x84, /* no effect? */
+ CMD_STOP = 0x85, /* no effect? */
+ CMD_GET_STATUS = 0x90, /* returns nothing? */
+ CMD_GET_MAC_ADDR = 0x91, /* read MAC address */
+ CMD_GET_LINK_STATUS = 0x92, /* not useful, link is always up */
+ CMD_ETHERNET_MODE = 0x99, /* unknown, needed during init */
+};
+
+enum cx82310_status {
+ STATUS_UNDEFINED,
+ STATUS_SUCCESS,
+ STATUS_ERROR,
+ STATUS_UNSUPPORTED,
+ STATUS_UNIMPLEMENTED,
+ STATUS_PARAMETER_ERROR,
+ STATUS_DBG_LOOPBACK,
+};
+
+#define CMD_PACKET_SIZE 64
+/* first command after power on can take around 8 seconds */
+#define CMD_TIMEOUT 15000
+#define CMD_REPLY_RETRY 5
+
+#define CX82310_MTU 1514
+#define CMD_EP 0x01
+
+/*
+ * execute control command
+ * - optionally send some data (command parameters)
+ * - optionally wait for the reply
+ * - optionally read some data from the reply
+ */
+static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
+ u8 *wdata, int wlen, u8 *rdata, int rlen)
+{
+ int actual_len, retries, ret;
+ struct usb_device *udev = dev->udev;
+ u8 *buf = kzalloc(CMD_PACKET_SIZE, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ /* create command packet */
+ buf[0] = cmd;
+ if (wdata)
+ memcpy(buf + 4, wdata, min_t(int, wlen, CMD_PACKET_SIZE - 4));
+
+ /* send command packet */
+ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
+ CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&dev->udev->dev, "send command %#x: error %d\n",
+ cmd, ret);
+ goto end;
+ }
+
+ if (reply) {
+ /* wait for reply, retry if it's empty */
+ for (retries = 0; retries < CMD_REPLY_RETRY; retries++) {
+ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, CMD_EP),
+ buf, CMD_PACKET_SIZE, &actual_len,
+ CMD_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&dev->udev->dev,
+ "reply receive error %d\n", ret);
+ goto end;
+ }
+ if (actual_len > 0)
+ break;
+ }
+ if (actual_len == 0) {
+ dev_err(&dev->udev->dev, "no reply to command %#x\n",
+ cmd);
+ ret = -EIO;
+ goto end;
+ }
+ if (buf[0] != cmd) {
+ dev_err(&dev->udev->dev,
+ "got reply to command %#x, expected: %#x\n",
+ buf[0], cmd);
+ ret = -EIO;
+ goto end;
+ }
+ if (buf[1] != STATUS_SUCCESS) {
+ dev_err(&dev->udev->dev, "command %#x failed: %#x\n",
+ cmd, buf[1]);
+ ret = -EIO;
+ goto end;
+ }
+ if (rdata)
+ memcpy(rdata, buf + 4,
+ min_t(int, rlen, CMD_PACKET_SIZE - 4));
+ }
+end:
+ kfree(buf);
+ return ret;
+}
+
+#define partial_len data[0] /* length of partial packet data */
+#define partial_rem data[1] /* remaining (missing) data length */
+#define partial_data data[2] /* partial packet data */
+
+static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int ret;
+ char buf[15];
+ struct usb_device *udev = dev->udev;
+
+ /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
+ if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
+ && strcmp(buf, "USB NET CARD")) {
+ dev_info(&udev->dev, "ignoring: probably an ADSL modem\n");
+ return -ENODEV;
+ }
+
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ return ret;
+
+ /*
+ * this must not include ethernet header as the device can send partial
+ * packets with no header (and sometimes even empty URBs)
+ */
+ dev->net->hard_header_len = 0;
+ /* we can send at most 1514 bytes of data (+ 2-byte header) per URB */
+ dev->hard_mtu = CX82310_MTU + 2;
+ /* we can receive URBs up to 4KB from the device */
+ dev->rx_urb_size = 4096;
+
+ dev->partial_data = (unsigned long) kmalloc(dev->hard_mtu, GFP_KERNEL);
+ if (!dev->partial_data)
+ return -ENOMEM;
+
+ /* enable ethernet mode (?) */
+ ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
+ if (ret) {
+ dev_err(&udev->dev, "unable to enable ethernet mode: %d\n",
+ ret);
+ goto err;
+ }
+
+ /* get the MAC address */
+ ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0,
+ dev->net->dev_addr, ETH_ALEN);
+ if (ret) {
+ dev_err(&udev->dev, "unable to read MAC address: %d\n", ret);
+ goto err;
+ }
+
+ /* start (does not seem to have any effect?) */
+ ret = cx82310_cmd(dev, CMD_START, false, NULL, 0, NULL, 0);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ kfree((void *)dev->partial_data);
+ return ret;
+}
+
+static void cx82310_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ kfree((void *)dev->partial_data);
+}
+
+/*
+ * RX is NOT easy - we can receive multiple packets per skb, each having 2-byte
+ * packet length at the beginning.
+ * The last packet might be incomplete (when it crosses the 4KB URB size),
+ * continuing in the next skb (without any headers).
+ * If a packet has odd length, there is one extra byte at the end (before next
+ * packet or at the end of the URB).
+ */
+static int cx82310_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int len;
+ struct sk_buff *skb2;
+
+ /*
+ * If the last skb ended with an incomplete packet, this skb contains
+ * end of that packet at the beginning.
+ */
+ if (dev->partial_rem) {
+ len = dev->partial_len + dev->partial_rem;
+ skb2 = alloc_skb(len, GFP_ATOMIC);
+ if (!skb2)
+ return 0;
+ skb_put(skb2, len);
+ memcpy(skb2->data, (void *)dev->partial_data,
+ dev->partial_len);
+ memcpy(skb2->data + dev->partial_len, skb->data,
+ dev->partial_rem);
+ usbnet_skb_return(dev, skb2);
+ skb_pull(skb, (dev->partial_rem + 1) & ~1);
+ dev->partial_rem = 0;
+ if (skb->len < 2)
+ return 1;
+ }
+
+ /* a skb can contain multiple packets */
+ while (skb->len > 1) {
+ /* first two bytes are packet length */
+ len = skb->data[0] | (skb->data[1] << 8);
+ skb_pull(skb, 2);
+
+ /* if last packet in the skb, let usbnet to process it */
+ if (len == skb->len || len + 1 == skb->len) {
+ skb_trim(skb, len);
+ break;
+ }
+
+ if (len > CX82310_MTU) {
+ dev_err(&dev->udev->dev, "RX packet too long: %d B\n",
+ len);
+ return 0;
+ }
+
+ /* incomplete packet, save it for the next skb */
+ if (len > skb->len) {
+ dev->partial_len = skb->len;
+ dev->partial_rem = len - skb->len;
+ memcpy((void *)dev->partial_data, skb->data,
+ dev->partial_len);
+ skb_pull(skb, skb->len);
+ break;
+ }
+
+ skb2 = alloc_skb(len, GFP_ATOMIC);
+ if (!skb2)
+ return 0;
+ skb_put(skb2, len);
+ memcpy(skb2->data, skb->data, len);
+ /* process the packet */
+ usbnet_skb_return(dev, skb2);
+
+ skb_pull(skb, (len + 1) & ~1);
+ }
+
+ /* let usbnet process the last packet */
+ return 1;
+}
+
+/* TX is easy, just add 2 bytes of length at the beginning */
+static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int len = skb->len;
+
+ if (skb_headroom(skb) < 2) {
+ struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+ skb_push(skb, 2);
+
+ skb->data[0] = len;
+ skb->data[1] = len >> 8;
+
+ return skb;
+}
+
+
+static const struct driver_info cx82310_info = {
+ .description = "Conexant CX82310 USB ethernet",
+ .flags = FLAG_ETHER,
+ .bind = cx82310_bind,
+ .unbind = cx82310_unbind,
+ .rx_fixup = cx82310_rx_fixup,
+ .tx_fixup = cx82310_tx_fixup,
+};
+
+#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_DEV_INFO, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bDeviceClass = (cl), \
+ .bDeviceSubClass = (sc), \
+ .bDeviceProtocol = (pr)
+
+static const struct usb_device_id products[] = {
+ {
+ USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
+ .driver_info = (unsigned long) &cx82310_info
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver cx82310_driver = {
+ .name = "cx82310_eth",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+};
+
+static int __init cx82310_init(void)
+{
+ return usb_register(&cx82310_driver);
+}
+module_init(cx82310_init);
+
+static void __exit cx82310_exit(void)
+{
+ usb_deregister(&cx82310_driver);
+}
+module_exit(cx82310_exit);
+
+MODULE_AUTHOR("Ondrej Zary");
+MODULE_DESCRIPTION("Conexant CX82310-based ADSL router USB ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 6efca66b876..8110595fbbc 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -843,16 +843,7 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void hso_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
-{
- struct hso_net *odev = netdev_priv(net);
-
- strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN);
- usb_make_path(odev->parent->usb, info->bus_info, sizeof info->bus_info);
-}
-
static const struct ethtool_ops ops = {
- .get_drvinfo = hso_get_drvinfo,
.get_link = ethtool_op_get_link
};
@@ -1652,6 +1643,8 @@ static int hso_get_count(struct hso_serial *serial,
struct uart_icount cnow;
struct hso_tiocmget *tiocmget = serial->tiocmget;
+ memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
if (!tiocmget)
return -ENOENT;
spin_lock_irq(&serial->serial_lock);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 08e7b6abacd..b2bcf99e6f0 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -58,6 +58,7 @@
#define USB_PRODUCT_IPHONE 0x1290
#define USB_PRODUCT_IPHONE_3G 0x1292
#define USB_PRODUCT_IPHONE_3GS 0x1294
+#define USB_PRODUCT_IPHONE_4 0x1297
#define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253
@@ -92,6 +93,10 @@ static struct usb_device_id ipheth_table[] = {
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
{ }
};
MODULE_DEVICE_TABLE(usb, ipheth_table);
@@ -424,10 +429,6 @@ static const struct net_device_ops ipheth_netdev_ops = {
.ndo_get_stats = &ipheth_stats,
};
-static struct device_type ipheth_type = {
- .name = "wwan",
-};
-
static int ipheth_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -445,7 +446,7 @@ static int ipheth_probe(struct usb_interface *intf,
netdev->netdev_ops = &ipheth_netdev_ops;
netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
- strcpy(netdev->name, "wwan%d");
+ strcpy(netdev->name, "eth%d");
dev = netdev_priv(netdev);
dev->udev = udev;
@@ -495,7 +496,6 @@ static int ipheth_probe(struct usb_interface *intf,
SET_NETDEV_DEV(netdev, &intf->dev);
SET_ETHTOOL_OPS(netdev, &ops);
- SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
retval = register_netdev(netdev);
if (retval) {
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 2b7b39cad1c..5e98643a4a2 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -759,14 +759,6 @@ static int kaweth_close(struct net_device *net)
return 0;
}
-static void kaweth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- struct kaweth_device *kaweth = netdev_priv(dev);
-
- strlcpy(info->driver, driver_name, sizeof(info->driver));
- usb_make_path(kaweth->dev, info->bus_info, sizeof (info->bus_info));
-}
-
static u32 kaweth_get_link(struct net_device *dev)
{
struct kaweth_device *kaweth = netdev_priv(dev);
@@ -775,7 +767,6 @@ static u32 kaweth_get_link(struct net_device *dev)
}
static const struct ethtool_ops ops = {
- .get_drvinfo = kaweth_get_drvinfo,
.get_link = kaweth_get_link
};
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index ee85c8b9a85..d1ac15c95fa 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -203,7 +203,7 @@ static inline void sierra_net_set_private(struct usbnet *dev,
/* is packet IPv4 */
static inline int is_ip(struct sk_buff *skb)
{
- return (skb->protocol == cpu_to_be16(ETH_P_IP));
+ return skb->protocol == cpu_to_be16(ETH_P_IP);
}
/*
@@ -354,7 +354,7 @@ static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
static inline int sierra_net_is_valid_addrlen(u8 len)
{
- return (len == sizeof(struct in_addr));
+ return len == sizeof(struct in_addr);
}
static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7f62e2dea28..ca7fc9df1cc 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
static void rx_complete (struct urb *urb);
-static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
struct sk_buff *skb;
struct skb_data *entry;
@@ -327,7 +327,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
usb_free_urb (urb);
- return;
+ return -ENOMEM;
}
skb_reserve (skb, NET_IP_ALIGN);
@@ -357,6 +357,9 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
netif_dbg(dev, ifdown, dev->net, "device gone\n");
netif_device_detach (dev->net);
break;
+ case -EHOSTUNREACH:
+ retval = -ENOLINK;
+ break;
default:
netif_dbg(dev, rx_err, dev->net,
"rx submit, %d\n", retval);
@@ -374,6 +377,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
dev_kfree_skb_any (skb);
usb_free_urb (urb);
}
+ return retval;
}
@@ -912,6 +916,7 @@ fail_halt:
/* tasklet could resubmit itself forever if memory is tight */
if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
struct urb *urb = NULL;
+ int resched = 1;
if (netif_running (dev->net))
urb = usb_alloc_urb (0, GFP_KERNEL);
@@ -922,10 +927,12 @@ fail_halt:
status = usb_autopm_get_interface(dev->intf);
if (status < 0)
goto fail_lowmem;
- rx_submit (dev, urb, GFP_KERNEL);
+ if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
+ resched = 0;
usb_autopm_put_interface(dev->intf);
fail_lowmem:
- tasklet_schedule (&dev->bh);
+ if (resched)
+ tasklet_schedule (&dev->bh);
}
}
@@ -1175,8 +1182,11 @@ static void usbnet_bh (unsigned long param)
// don't refill the queue all at once
for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
urb = usb_alloc_urb (0, GFP_ATOMIC);
- if (urb != NULL)
- rx_submit (dev, urb, GFP_ATOMIC);
+ if (urb != NULL) {
+ if (rx_submit (dev, urb, GFP_ATOMIC) ==
+ -ENOLINK)
+ return;
+ }
}
if (temp != dev->rxq.qlen)
netif_dbg(dev, link, dev->net,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 5ec542dd5b5..0bbc0c32313 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -250,7 +250,7 @@ static int veth_close(struct net_device *dev)
static int is_valid_veth_mtu(int new_mtu)
{
- return (new_mtu >= MIN_MTU && new_mtu <= MAX_MTU);
+ return new_mtu >= MIN_MTU && new_mtu <= MAX_MTU;
}
static int veth_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index fd69095ef6e..6884813b809 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1954,7 +1954,7 @@ static int velocity_tx_srv(struct velocity_info *vptr)
*/
static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
{
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (rd->rdesc1.CSM & CSM_IPKT) {
if (rd->rdesc1.CSM & CSM_IPOK) {
@@ -2824,7 +2824,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
- NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG;
+ NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
ret = register_netdev(dev);
if (ret < 0)
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index f7b33ae7a70..b5e120b0074 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1504,22 +1504,25 @@ struct velocity_info {
* addresses on this chain then we use the first - multi-IP WOL is not
* supported.
*
- * CHECK ME: locking
*/
static inline int velocity_get_ip(struct velocity_info *vptr)
{
- struct in_device *in_dev = (struct in_device *) vptr->dev->ip_ptr;
+ struct in_device *in_dev;
struct in_ifaddr *ifa;
+ int res = -ENOENT;
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(vptr->dev);
if (in_dev != NULL) {
ifa = (struct in_ifaddr *) in_dev->ifa_list;
if (ifa != NULL) {
memcpy(vptr->ip_addr, &ifa->ifa_address, 4);
- return 0;
+ res = 0;
}
}
- return -ENOENT;
+ rcu_read_unlock();
+ return res;
}
/**
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4598e9d2608..bb6b67f6b0c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -705,19 +705,6 @@ static int virtnet_close(struct net_device *dev)
return 0;
}
-static void virtnet_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- struct virtio_device *vdev = vi->vdev;
-
- strncpy(drvinfo->driver, KBUILD_MODNAME, ARRAY_SIZE(drvinfo->driver));
- strncpy(drvinfo->version, "N/A", ARRAY_SIZE(drvinfo->version));
- strncpy(drvinfo->fw_version, "N/A", ARRAY_SIZE(drvinfo->fw_version));
- strncpy(drvinfo->bus_info, dev_name(&vdev->dev),
- ARRAY_SIZE(drvinfo->bus_info));
-}
-
static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -830,7 +817,6 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
}
static const struct ethtool_ops virtnet_ethtool_ops = {
- .get_drvinfo = virtnet_get_drvinfo,
.set_tx_csum = virtnet_set_tx_csum,
.set_sg = ethtool_op_set_sg,
.set_tso = ethtool_op_set_tso,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index abe0ff53daf..198ce92af0c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1042,11 +1042,11 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
skb->csum = htons(gdesc->rcd.csum);
skb->ip_summed = CHECKSUM_PARTIAL;
} else {
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
}
} else {
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index c7c5605b372..5378b849f54 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -501,7 +501,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
vxge_rx_complete(ring, skb, ext_info.vlan,
pkt_length, &ext_info);
@@ -2159,8 +2159,8 @@ start:
/* Alarm MSIX Vectors count */
vdev->intr_cnt++;
- vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry),
- GFP_KERNEL);
+ vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
+ GFP_KERNEL);
if (!vdev->entries) {
vxge_debug_init(VXGE_ERR,
"%s: memory allocation failed",
@@ -2169,9 +2169,9 @@ start:
goto alloc_entries_failed;
}
- vdev->vxge_entries =
- kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry),
- GFP_KERNEL);
+ vdev->vxge_entries = kcalloc(vdev->intr_cnt,
+ sizeof(struct vxge_msix_entry),
+ GFP_KERNEL);
if (!vdev->vxge_entries) {
vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
VXGE_DRIVER_NAME);
@@ -2914,26 +2914,18 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu)
}
/**
- * vxge_get_stats
+ * vxge_get_stats64
* @dev: pointer to the device structure
+ * @stats: pointer to struct rtnl_link_stats64
*
- * Updates the device statistics structure. This function updates the device
- * statistics structure in the net_device structure and returns a pointer
- * to the same.
*/
-static struct net_device_stats *
-vxge_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
{
- struct vxgedev *vdev;
- struct net_device_stats *net_stats;
+ struct vxgedev *vdev = netdev_priv(dev);
int k;
- vdev = netdev_priv(dev);
-
- net_stats = &vdev->stats.net_stats;
-
- memset(net_stats, 0, sizeof(struct net_device_stats));
-
+ /* net_stats already zeroed by caller */
for (k = 0; k < vdev->no_of_vpath; k++) {
net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
@@ -3102,7 +3094,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
static const struct net_device_ops vxge_netdev_ops = {
.ndo_open = vxge_open,
.ndo_stop = vxge_close,
- .ndo_get_stats = vxge_get_stats,
+ .ndo_get_stats64 = vxge_get_stats64,
.ndo_start_xmit = vxge_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = vxge_set_multicast,
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 2e3b064b8e4..d4be07eaacd 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -172,7 +172,6 @@ struct vxge_msix_entry {
struct vxge_sw_stats {
/* Network Stats (interface stats) */
- struct net_device_stats net_stats;
/* Tx */
u64 tx_frms;
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 0bd898c9475..4ac85a09c5a 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -264,7 +264,7 @@ static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
new_line.clock_type != CLOCK_TXFROMRX &&
new_line.clock_type != CLOCK_INT &&
new_line.clock_type != CLOCK_TXINT)
- return -EINVAL; /* No such clock setting */
+ return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index a5ddc6c8963..164c3624ba8 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -73,7 +73,7 @@ static int reset_cyc2x(void __iomem *addr);
static int detect_cyc2x(void __iomem *addr);
/* Miscellaneous functions */
-static int get_option_index(long *optlist, long optval);
+static int get_option_index(const long *optlist, long optval);
static u16 checksum(u8 *buf, u32 len);
#define wait_cyc(addr) cycx_exec(addr + CMD_OFFSET)
@@ -81,23 +81,23 @@ static u16 checksum(u8 *buf, u32 len);
/* Global Data */
/* private data */
-static char modname[] = "cycx_drv";
-static char fullname[] = "Cyclom 2X Support Module";
-static char copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
+static const char modname[] = "cycx_drv";
+static const char fullname[] = "Cyclom 2X Support Module";
+static const char copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
"<acme@conectiva.com.br>";
/* Hardware configuration options.
* These are arrays of configuration options used by verification routines.
* The first element of each array is its size (i.e. number of options).
*/
-static long cyc2x_dpmbase_options[] = {
+static const long cyc2x_dpmbase_options[] = {
20,
0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, 0xB8000,
0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, 0xD0000, 0xD4000,
0xD8000, 0xDC000, 0xE0000, 0xE4000, 0xE8000, 0xEC000
};
-static long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
+static const long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
/* Kernel Loadable Module Entry Points */
/* Module 'insert' entry point.
@@ -529,7 +529,7 @@ static int detect_cyc2x(void __iomem *addr)
/* Miscellaneous */
/* Get option's index into the options list.
* Return option's index (1 .. N) or zero if option is invalid. */
-static int get_option_index(long *optlist, long optval)
+static int get_option_index(const long *optlist, long optval)
{
int i = 1;
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
index a0e8611ad8e..859dba9b972 100644
--- a/drivers/net/wan/cycx_main.c
+++ b/drivers/net/wan/cycx_main.c
@@ -81,9 +81,9 @@ static irqreturn_t cycx_isr(int irq, void *dev_id);
*/
/* private data */
-static char cycx_drvname[] = "cyclomx";
-static char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver";
-static char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
+static const char cycx_drvname[] = "cyclomx";
+static const char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver";
+static const char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
"<acme@conectiva.com.br>";
static int cycx_ncards = CONFIG_CYCX_CARDS;
static struct cycx_device *cycx_card_array; /* adapter data space */
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 421d0715310..1481a446fef 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -97,11 +97,11 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
dest = skb_push(skb, hlen);
if (!dest)
- return(0);
+ return 0;
memcpy(dest, &hdr, hlen);
- return(hlen);
+ return hlen;
}
static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
@@ -211,14 +211,14 @@ static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, in
if (copy_from_user(&config, conf, sizeof(struct dlci_conf)))
return -EFAULT;
if (config.flags & ~DLCI_VALID_FLAGS)
- return(-EINVAL);
+ return -EINVAL;
memcpy(&dlp->config, &config, sizeof(struct dlci_conf));
dlp->configured = 1;
}
err = (*flp->dlci_conf)(dlp->slave, dev, get);
if (err)
- return(err);
+ return err;
if (get)
{
@@ -226,7 +226,7 @@ static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, in
return -EFAULT;
}
- return(0);
+ return 0;
}
static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -234,7 +234,7 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct dlci_local *dlp;
if (!capable(CAP_NET_ADMIN))
- return(-EPERM);
+ return -EPERM;
dlp = netdev_priv(dev);
@@ -242,7 +242,7 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
case DLCI_GET_SLAVE:
if (!*(short *)(dev->dev_addr))
- return(-EINVAL);
+ return -EINVAL;
strncpy(ifr->ifr_slave, dlp->slave->name, sizeof(ifr->ifr_slave));
break;
@@ -250,15 +250,15 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case DLCI_GET_CONF:
case DLCI_SET_CONF:
if (!*(short *)(dev->dev_addr))
- return(-EINVAL);
+ return -EINVAL;
- return(dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF));
+ return dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF);
break;
default:
- return(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
- return(0);
+ return 0;
}
static int dlci_change_mtu(struct net_device *dev, int new_mtu)
@@ -277,15 +277,15 @@ static int dlci_open(struct net_device *dev)
dlp = netdev_priv(dev);
if (!*(short *)(dev->dev_addr))
- return(-EINVAL);
+ return -EINVAL;
if (!netif_running(dlp->slave))
- return(-ENOTCONN);
+ return -ENOTCONN;
flp = netdev_priv(dlp->slave);
err = (*flp->activate)(dlp->slave, dev);
if (err)
- return(err);
+ return err;
netif_start_queue(dev);
@@ -365,14 +365,14 @@ static int dlci_add(struct dlci_add *dlci)
list_add(&dlp->list, &dlci_devs);
rtnl_unlock();
- return(0);
+ return 0;
err2:
rtnl_unlock();
free_netdev(master);
err1:
dev_put(slave);
- return(err);
+ return err;
}
static int dlci_del(struct dlci_add *dlci)
@@ -385,10 +385,10 @@ static int dlci_del(struct dlci_add *dlci)
/* validate slave device */
master = __dev_get_by_name(&init_net, dlci->devname);
if (!master)
- return(-ENODEV);
+ return -ENODEV;
if (netif_running(master)) {
- return(-EBUSY);
+ return -EBUSY;
}
dlp = netdev_priv(master);
@@ -406,7 +406,7 @@ static int dlci_del(struct dlci_add *dlci)
}
rtnl_unlock();
- return(err);
+ return err;
}
static int dlci_ioctl(unsigned int cmd, void __user *arg)
@@ -415,7 +415,7 @@ static int dlci_ioctl(unsigned int cmd, void __user *arg)
int err;
if (!capable(CAP_NET_ADMIN))
- return(-EPERM);
+ return -EPERM;
if (copy_from_user(&add, arg, sizeof(struct dlci_add)))
return -EFAULT;
@@ -438,7 +438,7 @@ static int dlci_ioctl(unsigned int cmd, void __user *arg)
err = -EINVAL;
}
- return(err);
+ return err;
}
static const struct header_ops dlci_header_ops = {
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index ad7719fe6d0..e050bd65e03 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -885,20 +885,21 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
* Receive a frame through the DMA
*/
static inline void
-fst_rx_dma(struct fst_card_info *card, unsigned char *skb,
- unsigned char *mem, int len)
+fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
+ dma_addr_t mem, int len)
{
/*
* This routine will setup the DMA and start it
*/
- dbg(DBG_RX, "In fst_rx_dma %p %p %d\n", skb, mem, len);
+ dbg(DBG_RX, "In fst_rx_dma %lx %lx %d\n",
+ (unsigned long) skb, (unsigned long) mem, len);
if (card->dmarx_in_progress) {
dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
}
- outl((unsigned long) skb, card->pci_conf + DMAPADR0); /* Copy to here */
- outl((unsigned long) mem, card->pci_conf + DMALADR0); /* from here */
+ outl(skb, card->pci_conf + DMAPADR0); /* Copy to here */
+ outl(mem, card->pci_conf + DMALADR0); /* from here */
outl(len, card->pci_conf + DMASIZ0); /* for this length */
outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
@@ -1309,8 +1310,8 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
card->dma_port_rx = port;
card->dma_len_rx = len;
card->dma_rxpos = rxp;
- fst_rx_dma(card, (char *) card->rx_dma_handle_card,
- (char *) BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
+ fst_rx_dma(card, card->rx_dma_handle_card,
+ BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
}
if (rxp != port->rxpos) {
dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index b38ffa149ab..b1e5e5b69c2 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -191,7 +191,8 @@ static int cisco_rx(struct sk_buff *skb)
switch (ntohl (cisco_data->type)) {
case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
- in_dev = dev->ip_ptr;
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
addr = 0;
mask = ~cpu_to_be32(0); /* is the mask correct? */
@@ -211,6 +212,7 @@ static int cisco_rx(struct sk_buff *skb)
cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
addr, mask);
}
+ rcu_read_unlock();
dev_kfree_skb_any(skb);
return NET_RX_SUCCESS;
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 88e363033e2..6c571e19883 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -396,7 +396,7 @@ static void hss_config(struct port *port)
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
msg.index = HSS_CONFIG_TX_PCR;
- msg.data32 = PCR_FRM_SYNC_OUTPUT_RISING | PCR_MSB_ENDIAN |
+ msg.data32 = PCR_FRM_PULSE_DISABLED | PCR_MSB_ENDIAN |
PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT;
if (port->clock_type == CLOCK_INT)
msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 4d4dc38c729..7f5bb913c8b 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -46,7 +46,7 @@
#include <net/x25device.h>
-static char bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+static const u8 bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
/* If this number is made larger, check that the temporary string buffer
* in lapbeth_new_device is large enough to store the probe device name.*/
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index e2c6f7f4f51..70feb84df67 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1022,7 +1022,7 @@ static int lmc_open(struct net_device *dev)
if (sc->lmc_ok){
lmc_trace(dev, "lmc_open lmc_ok out");
- return (0);
+ return 0;
}
lmc_softreset (sc);
@@ -1105,12 +1105,12 @@ static int lmc_open(struct net_device *dev)
init_timer (&sc->timer);
sc->timer.expires = jiffies + HZ;
sc->timer.data = (unsigned long) dev;
- sc->timer.function = &lmc_watchdog;
+ sc->timer.function = lmc_watchdog;
add_timer (&sc->timer);
lmc_trace(dev, "lmc_open out");
- return (0);
+ return 0;
}
/* Total reset to compensate for the AdTran DSU doing bad things
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 5394b51bdb2..17d408fe693 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -282,7 +282,7 @@ static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
new_line.clock_type != CLOCK_TXFROMRX &&
new_line.clock_type != CLOCK_INT &&
new_line.clock_type != CLOCK_TXINT)
- return -EINVAL; /* No such clock setting */
+ return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
@@ -379,14 +379,14 @@ static int __init n2_run(unsigned long io, unsigned long irq,
if (request_irq(irq, sca_intr, 0, devname, card)) {
printk(KERN_ERR "n2: could not allocate IRQ\n");
n2_destroy_card(card);
- return(-EBUSY);
+ return -EBUSY;
}
card->irq = irq;
if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) {
printk(KERN_ERR "n2: could not request RAM window\n");
n2_destroy_card(card);
- return(-EBUSY);
+ return -EBUSY;
}
card->phy_winbase = winbase;
card->winbase = ioremap(winbase, USE_WINDOWSIZE);
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index c6aa66e5b52..f875cfae309 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -1,5 +1,5 @@
#define USE_PCI_CLOCK
-static char rcsid[] =
+static const char rcsid[] =
"Revision: 3.4.5 Date: 2002/03/07 ";
/*
@@ -451,11 +451,11 @@ static int dma_get_rx_frame_size(pc300_t * card, int ch)
if ((status & DST_EOM) || (first_bd == card->chan[ch].rx_last_bd)) {
/* Return the size of a good frame or incomplete bad frame
* (dma_buf_read will clean the buffer descriptors in this case). */
- return (rcvd);
+ return rcvd;
}
ptdescr = (card->hw.rambase + cpc_readl(&ptdescr->next));
}
- return (-1);
+ return -1;
}
/*
@@ -557,7 +557,7 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch),
RX_BD_ADDR(ch, chan->rx_last_bd));
}
- return (rcvd);
+ return rcvd;
}
static void tx_dma_stop(pc300_t * card, int ch)
@@ -1733,7 +1733,7 @@ static u16 falc_pattern_test_error(pc300_t * card, int ch)
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
falc_t *pfalc = (falc_t *) & chan->falc;
- return (pfalc->bec);
+ return pfalc->bec;
}
/**********************************/
@@ -2819,7 +2819,7 @@ static int clock_rate_calc(u32 rate, u32 clock, int *br_io)
*br_io = 0;
if (rate == 0)
- return (0);
+ return 0;
for (br = 0, br_pwr = 1; br <= 9; br++, br_pwr <<= 1) {
if ((tc = clock / br_pwr / rate) <= 0xff) {
@@ -2832,11 +2832,11 @@ static int clock_rate_calc(u32 rate, u32 clock, int *br_io)
error = ((rate - (clock / br_pwr / rate)) / rate) * 1000;
/* Errors bigger than +/- 1% won't be tolerated */
if (error < -10 || error > 10)
- return (-1);
+ return -1;
else
- return (tc);
+ return tc;
} else {
- return (-1);
+ return -1;
}
}
@@ -3207,7 +3207,7 @@ static u32 detect_ram(pc300_t * card)
break;
}
}
- return (i);
+ return i;
}
static void plx_init(pc300_t * card)
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 4293889e287..515d9b8af01 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -540,7 +540,7 @@ static int cpc_tty_chars_in_buffer(struct tty_struct *tty)
return -ENODEV;
}
- return(0);
+ return 0;
}
static int pc300_tiocmset(struct tty_struct *tty, struct file *file,
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index e2cff64a446..fd7375955e4 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -220,7 +220,7 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
new_line.clock_type != CLOCK_TXFROMRX &&
new_line.clock_type != CLOCK_INT &&
new_line.clock_type != CLOCK_TXINT)
- return -EINVAL; /* No such clock setting */
+ return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index f4125da2762..3f4e2b5684d 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -178,7 +178,7 @@ static char sdla_byte(struct net_device *dev, int addr)
byte = *temp;
spin_unlock_irqrestore(&sdla_lock, flags);
- return(byte);
+ return byte;
}
static void sdla_stop(struct net_device *dev)
@@ -267,7 +267,7 @@ static int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char r
resp = *temp;
}
}
- return(time_before(jiffies, done) ? jiffies - start : -1);
+ return time_before(jiffies, done) ? jiffies - start : -1;
}
/* constants for Z80 CPU speed */
@@ -283,13 +283,13 @@ static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
sdla_start(dev);
if (sdla_z80_poll(dev, 0, 3*HZ, Z80_READY, 0) < 0)
- return(-EIO);
+ return -EIO;
data = LOADER_READY;
sdla_write(dev, 0, &data, 1);
if ((jiffs = sdla_z80_poll(dev, 0, 8*HZ, Z80_SCC_OK, Z80_SCC_BAD)) < 0)
- return(-EIO);
+ return -EIO;
sdla_stop(dev);
sdla_read(dev, 0, &data, 1);
@@ -297,11 +297,11 @@ static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
if (data == Z80_SCC_BAD)
{
printk("%s: SCC bad\n", dev->name);
- return(-EIO);
+ return -EIO;
}
if (data != Z80_SCC_OK)
- return(-EINVAL);
+ return -EINVAL;
if (jiffs < 165)
ifr->ifr_mtu = SDLA_CPU_16M;
@@ -316,7 +316,7 @@ static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
else
ifr->ifr_mtu = SDLA_CPU_3M;
- return(0);
+ return 0;
}
/************************************************
@@ -493,7 +493,7 @@ static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags,
if (ret != SDLA_RET_OK)
sdla_errors(dev, cmd, dlci, ret, len, &status);
- return(ret);
+ return ret;
}
/***********************************************
@@ -516,14 +516,14 @@ static int sdla_activate(struct net_device *slave, struct net_device *master)
break;
if (i == CONFIG_DLCI_MAX)
- return(-ENODEV);
+ return -ENODEV;
flp->dlci[i] = abs(flp->dlci[i]);
if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
sdla_cmd(slave, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
- return(0);
+ return 0;
}
static int sdla_deactivate(struct net_device *slave, struct net_device *master)
@@ -538,14 +538,14 @@ static int sdla_deactivate(struct net_device *slave, struct net_device *master)
break;
if (i == CONFIG_DLCI_MAX)
- return(-ENODEV);
+ return -ENODEV;
flp->dlci[i] = -abs(flp->dlci[i]);
if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
sdla_cmd(slave, SDLA_DEACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
- return(0);
+ return 0;
}
static int sdla_assoc(struct net_device *slave, struct net_device *master)
@@ -554,7 +554,7 @@ static int sdla_assoc(struct net_device *slave, struct net_device *master)
int i;
if (master->type != ARPHRD_DLCI)
- return(-EINVAL);
+ return -EINVAL;
flp = netdev_priv(slave);
@@ -563,11 +563,11 @@ static int sdla_assoc(struct net_device *slave, struct net_device *master)
if (!flp->master[i])
break;
if (abs(flp->dlci[i]) == *(short *)(master->dev_addr))
- return(-EADDRINUSE);
+ return -EADDRINUSE;
}
if (i == CONFIG_DLCI_MAX)
- return(-EMLINK); /* #### Alan: Comments on this ?? */
+ return -EMLINK; /* #### Alan: Comments on this ?? */
flp->master[i] = master;
@@ -581,7 +581,7 @@ static int sdla_assoc(struct net_device *slave, struct net_device *master)
sdla_cmd(slave, SDLA_ADD_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
}
- return(0);
+ return 0;
}
static int sdla_deassoc(struct net_device *slave, struct net_device *master)
@@ -596,7 +596,7 @@ static int sdla_deassoc(struct net_device *slave, struct net_device *master)
break;
if (i == CONFIG_DLCI_MAX)
- return(-ENODEV);
+ return -ENODEV;
flp->master[i] = NULL;
flp->dlci[i] = 0;
@@ -609,7 +609,7 @@ static int sdla_deassoc(struct net_device *slave, struct net_device *master)
sdla_cmd(slave, SDLA_DELETE_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
}
- return(0);
+ return 0;
}
static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get)
@@ -626,7 +626,7 @@ static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, i
break;
if (i == CONFIG_DLCI_MAX)
- return(-ENODEV);
+ return -ENODEV;
dlp = netdev_priv(master);
@@ -641,7 +641,7 @@ static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, i
&dlp->config, sizeof(struct dlci_conf) - 4 * sizeof(short), NULL, NULL);
}
- return(ret == SDLA_RET_OK ? 0 : -EIO);
+ return ret == SDLA_RET_OK ? 0 : -EIO;
}
/**************************
@@ -986,7 +986,7 @@ static int sdla_close(struct net_device *dev)
netif_stop_queue(dev);
- return(0);
+ return 0;
}
struct conf_data {
@@ -1006,10 +1006,10 @@ static int sdla_open(struct net_device *dev)
flp = netdev_priv(dev);
if (!flp->initialized)
- return(-EPERM);
+ return -EPERM;
if (!flp->configured)
- return(-EPERM);
+ return -EPERM;
/* time to send in the configuration */
len = 0;
@@ -1087,7 +1087,7 @@ static int sdla_open(struct net_device *dev)
netif_start_queue(dev);
- return(0);
+ return 0;
}
static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, int get)
@@ -1098,48 +1098,48 @@ static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, in
short size;
if (dev->type == 0xFFFF)
- return(-EUNATCH);
+ return -EUNATCH;
flp = netdev_priv(dev);
if (!get)
{
if (netif_running(dev))
- return(-EBUSY);
+ return -EBUSY;
if(copy_from_user(&data.config, conf, sizeof(struct frad_conf)))
return -EFAULT;
if (data.config.station & ~FRAD_STATION_NODE)
- return(-EINVAL);
+ return -EINVAL;
if (data.config.flags & ~FRAD_VALID_FLAGS)
- return(-EINVAL);
+ return -EINVAL;
if ((data.config.kbaud < 0) ||
((data.config.kbaud > 128) && (flp->type != SDLA_S508)))
- return(-EINVAL);
+ return -EINVAL;
if (data.config.clocking & ~(FRAD_CLOCK_INT | SDLA_S508_PORT_RS232))
- return(-EINVAL);
+ return -EINVAL;
if ((data.config.mtu < 0) || (data.config.mtu > SDLA_MAX_MTU))
- return(-EINVAL);
+ return -EINVAL;
if ((data.config.T391 < 5) || (data.config.T391 > 30))
- return(-EINVAL);
+ return -EINVAL;
if ((data.config.T392 < 5) || (data.config.T392 > 30))
- return(-EINVAL);
+ return -EINVAL;
if ((data.config.N391 < 1) || (data.config.N391 > 255))
- return(-EINVAL);
+ return -EINVAL;
if ((data.config.N392 < 1) || (data.config.N392 > 10))
- return(-EINVAL);
+ return -EINVAL;
if ((data.config.N393 < 1) || (data.config.N393 > 10))
- return(-EINVAL);
+ return -EINVAL;
memcpy(&flp->config, &data.config, sizeof(struct frad_conf));
flp->config.flags |= SDLA_DIRECT_RECV;
@@ -1171,7 +1171,7 @@ static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, in
{
size = sizeof(data);
if (sdla_cmd(dev, SDLA_READ_DLCI_CONFIGURATION, 0, 0, NULL, 0, &data, &size) != SDLA_RET_OK)
- return(-EIO);
+ return -EIO;
}
else
if (flp->configured)
@@ -1185,7 +1185,7 @@ static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, in
return copy_to_user(conf, &data.config, sizeof(struct frad_conf))?-EFAULT:0;
}
- return(0);
+ return 0;
}
static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int read)
@@ -1200,7 +1200,7 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r
{
temp = kzalloc(mem.len, GFP_KERNEL);
if (!temp)
- return(-ENOMEM);
+ return -ENOMEM;
sdla_read(dev, mem.addr, temp, mem.len);
if(copy_to_user(mem.data, temp, mem.len))
{
@@ -1217,7 +1217,7 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r
sdla_write(dev, mem.addr, temp, mem.len);
kfree(temp);
}
- return(0);
+ return 0;
}
static int sdla_reconfig(struct net_device *dev)
@@ -1241,7 +1241,7 @@ static int sdla_reconfig(struct net_device *dev)
sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL);
sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
- return(0);
+ return 0;
}
static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1254,20 +1254,20 @@ static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
flp = netdev_priv(dev);
if (!flp->initialized)
- return(-EINVAL);
+ return -EINVAL;
switch (cmd)
{
case FRAD_GET_CONF:
case FRAD_SET_CONF:
- return(sdla_config(dev, ifr->ifr_data, cmd == FRAD_GET_CONF));
+ return sdla_config(dev, ifr->ifr_data, cmd == FRAD_GET_CONF);
case SDLA_IDENTIFY:
ifr->ifr_flags = flp->type;
break;
case SDLA_CPUSPEED:
- return(sdla_cpuspeed(dev, ifr));
+ return sdla_cpuspeed(dev, ifr);
/* ==========================================================
NOTE: This is rather a useless action right now, as the
@@ -1277,7 +1277,7 @@ NOTE: This is rather a useless action right now, as the
============================================================*/
case SDLA_PROTOCOL:
if (flp->configured)
- return(-EALREADY);
+ return -EALREADY;
switch (ifr->ifr_flags)
{
@@ -1285,7 +1285,7 @@ NOTE: This is rather a useless action right now, as the
dev->type = ifr->ifr_flags;
break;
default:
- return(-ENOPROTOOPT);
+ return -ENOPROTOOPT;
}
break;
@@ -1297,7 +1297,7 @@ NOTE: This is rather a useless action right now, as the
case SDLA_READMEM:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
- return(sdla_xfer(dev, ifr->ifr_data, cmd == SDLA_READMEM));
+ return sdla_xfer(dev, ifr->ifr_data, cmd == SDLA_READMEM);
case SDLA_START:
sdla_start(dev);
@@ -1308,9 +1308,9 @@ NOTE: This is rather a useless action right now, as the
break;
default:
- return(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
- return(0);
+ return 0;
}
static int sdla_change_mtu(struct net_device *dev, int new_mtu)
@@ -1320,10 +1320,10 @@ static int sdla_change_mtu(struct net_device *dev, int new_mtu)
flp = netdev_priv(dev);
if (netif_running(dev))
- return(-EBUSY);
+ return -EBUSY;
/* for now, you can't change the MTU! */
- return(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static int sdla_set_config(struct net_device *dev, struct ifmap *map)
@@ -1337,18 +1337,18 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
flp = netdev_priv(dev);
if (flp->initialized)
- return(-EINVAL);
+ return -EINVAL;
for(i=0; i < ARRAY_SIZE(valid_port); i++)
if (valid_port[i] == map->base_addr)
break;
if (i == ARRAY_SIZE(valid_port))
- return(-EINVAL);
+ return -EINVAL;
if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
printk(KERN_WARNING "SDLA: io-port 0x%04lx in use\n", dev->base_addr);
- return(-EINVAL);
+ return -EINVAL;
}
base = map->base_addr;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index e47f5a986b1..d81ad839788 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -648,7 +648,7 @@ static int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
}
}
*ptr++ = X25_END;
- return (ptr - d);
+ return ptr - d;
}
static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index fbf5e843d48..93956861ea2 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -766,7 +766,7 @@ irqreturn_t z8530_interrupt(int irq, void *dev_id)
EXPORT_SYMBOL(z8530_interrupt);
-static char reg_init[16]=
+static const u8 reg_init[16]=
{
0,0,0,0,
0,0,0,0,
@@ -1206,7 +1206,7 @@ EXPORT_SYMBOL(z8530_sync_txdma_close);
* it exists...
*/
-static char *z8530_type_name[]={
+static const char *z8530_type_name[]={
"Z8530",
"Z85C30",
"Z85230"
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index eb72c67699a..f1549fff0ed 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -342,10 +342,10 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
model_name, dev->irq, dev->mem_start, dev->mem_end-1);
- ei_status.reset_8390 = &wd_reset_8390;
- ei_status.block_input = &wd_block_input;
- ei_status.block_output = &wd_block_output;
- ei_status.get_8390_hdr = &wd_get_8390_hdr;
+ ei_status.reset_8390 = wd_reset_8390;
+ ei_status.block_input = wd_block_input;
+ ei_status.block_output = wd_block_output;
+ ei_status.get_8390_hdr = wd_get_8390_hdr;
dev->netdev_ops = &wd_netdev_ops;
NS8390_init(dev, 0);
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 9fb03082153..12b84ed0e38 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -98,7 +98,7 @@ MODULE_PARM_DESC(power_save_disabled,
"False by default (so the device is told to do power "
"saving).");
-int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
+static int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
MODULE_PARM_DESC(passive_mode,
"If true, the driver will not do any device setup "
@@ -558,8 +558,9 @@ void i2400m_report_hook(struct i2400m *i2400m,
* processing should be done in the function that calls the
* command. This is here for some cases where it can't happen...
*/
-void i2400m_msg_ack_hook(struct i2400m *i2400m,
- const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size)
+static void i2400m_msg_ack_hook(struct i2400m *i2400m,
+ const struct i2400m_l3l4_hdr *l3l4_hdr,
+ size_t size)
{
int result;
struct device *dev = i2400m_dev(i2400m);
@@ -1135,7 +1136,7 @@ error_alloc:
* i2400m_report_state_hook() to parse the answer. This will set the
* carrier state, as well as the RF Kill switches state.
*/
-int i2400m_cmd_get_state(struct i2400m *i2400m)
+static int i2400m_cmd_get_state(struct i2400m *i2400m)
{
int result;
struct device *dev = i2400m_dev(i2400m);
@@ -1177,8 +1178,6 @@ error_msg_to_dev:
error_alloc:
return result;
}
-EXPORT_SYMBOL_GPL(i2400m_cmd_get_state);
-
/**
* Set basic configuration settings
@@ -1190,8 +1189,9 @@ EXPORT_SYMBOL_GPL(i2400m_cmd_get_state);
* right endianess (LE).
* @arg_size: number of pointers in the @args array
*/
-int i2400m_set_init_config(struct i2400m *i2400m,
- const struct i2400m_tlv_hdr **arg, size_t args)
+static int i2400m_set_init_config(struct i2400m *i2400m,
+ const struct i2400m_tlv_hdr **arg,
+ size_t args)
{
int result;
struct device *dev = i2400m_dev(i2400m);
@@ -1258,8 +1258,6 @@ none:
return result;
}
-EXPORT_SYMBOL_GPL(i2400m_set_init_config);
-
/**
* i2400m_set_idle_timeout - Set the device's idle mode timeout
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 9c8b78d4abd..cdedab46ba2 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -122,7 +122,7 @@ struct i2400m_work *__i2400m_work_setup(
* works struct was already queued, but we have just allocated it, so
* it should not happen.
*/
-int i2400m_schedule_work(struct i2400m *i2400m,
+static int i2400m_schedule_work(struct i2400m *i2400m,
void (*fn)(struct work_struct *), gfp_t gfp_flags,
const void *pl, size_t pl_size)
{
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
index 360d4fb195f..1d63ffdedfd 100644
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ b/drivers/net/wimax/i2400m/i2400m-sdio.h
@@ -140,7 +140,6 @@ void i2400ms_init(struct i2400ms *i2400ms)
extern int i2400ms_rx_setup(struct i2400ms *);
extern void i2400ms_rx_release(struct i2400ms *);
-extern ssize_t __i2400ms_rx_get_size(struct i2400ms *);
extern int i2400ms_tx_setup(struct i2400ms *);
extern void i2400ms_tx_release(struct i2400ms *);
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index fa74777fd65..59ac7705e76 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -910,28 +910,19 @@ struct i2400m_work {
u8 pl[0];
};
-extern int i2400m_schedule_work(struct i2400m *,
- void (*)(struct work_struct *), gfp_t,
- const void *, size_t);
-
extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
char *, size_t);
extern int i2400m_msg_size_check(struct i2400m *,
const struct i2400m_l3l4_hdr *, size_t);
extern struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
extern void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
-extern void i2400m_msg_ack_hook(struct i2400m *,
- const struct i2400m_l3l4_hdr *, size_t);
extern void i2400m_report_hook(struct i2400m *,
const struct i2400m_l3l4_hdr *, size_t);
extern void i2400m_report_hook_work(struct work_struct *);
extern int i2400m_cmd_enter_powersave(struct i2400m *);
-extern int i2400m_cmd_get_state(struct i2400m *);
extern int i2400m_cmd_exit_idle(struct i2400m *);
extern struct sk_buff *i2400m_get_device_info(struct i2400m *);
extern int i2400m_firmware_check(struct i2400m *);
-extern int i2400m_set_init_config(struct i2400m *,
- const struct i2400m_tlv_hdr **, size_t);
extern int i2400m_set_idle_timeout(struct i2400m *, unsigned);
static inline
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 8cc9e319f43..844133b44af 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -922,7 +922,7 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
* rx_roq_refcount becomes zero. This routine gets executed when
* rx_roq_refcount becomes zero.
*/
-void i2400m_rx_roq_destroy(struct kref *ref)
+static void i2400m_rx_roq_destroy(struct kref *ref)
{
unsigned itr;
struct i2400m *i2400m
@@ -1244,16 +1244,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
int i, result;
struct device *dev = i2400m_dev(i2400m);
const struct i2400m_msg_hdr *msg_hdr;
- size_t pl_itr, pl_size, skb_len;
+ size_t pl_itr, pl_size;
unsigned long flags;
- unsigned num_pls, single_last;
+ unsigned num_pls, single_last, skb_len;
skb_len = skb->len;
- d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n",
+ d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
i2400m, skb, skb_len);
result = -EIO;
msg_hdr = (void *) skb->data;
- result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len);
+ result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
if (result < 0)
goto error_msg_hdr_check;
result = -EIO;
@@ -1261,10 +1261,10 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */
num_pls * sizeof(msg_hdr->pld[0]);
pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
- if (pl_itr > skb->len) { /* got all the payload descriptors? */
+ if (pl_itr > skb_len) { /* got all the payload descriptors? */
dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
"%u payload descriptors (%zu each, total %zu)\n",
- skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
+ skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
goto error_pl_descr_short;
}
/* Walk each payload payload--check we really got it */
@@ -1272,7 +1272,7 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
/* work around old gcc warnings */
pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
- pl_itr, skb->len);
+ pl_itr, skb_len);
if (result < 0)
goto error_pl_descr_check;
single_last = num_pls == 1 || i == num_pls - 1;
@@ -1290,16 +1290,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
if (i < i2400m->rx_pl_min)
i2400m->rx_pl_min = i;
i2400m->rx_num++;
- i2400m->rx_size_acc += skb->len;
- if (skb->len < i2400m->rx_size_min)
- i2400m->rx_size_min = skb->len;
- if (skb->len > i2400m->rx_size_max)
- i2400m->rx_size_max = skb->len;
+ i2400m->rx_size_acc += skb_len;
+ if (skb_len < i2400m->rx_size_min)
+ i2400m->rx_size_min = skb_len;
+ if (skb_len > i2400m->rx_size_max)
+ i2400m->rx_size_max = skb_len;
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
error_pl_descr_check:
error_pl_descr_short:
error_msg_hdr_check:
- d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n",
+ d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n",
i2400m, skb, skb_len, result);
return result;
}
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
index 8b809c2ead6..fb6396dd115 100644
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -87,7 +87,7 @@ static const __le32 i2400m_ACK_BARKER[4] = {
*
* sdio_readl() doesn't work.
*/
-ssize_t __i2400ms_rx_get_size(struct i2400ms *i2400ms)
+static ssize_t __i2400ms_rx_get_size(struct i2400ms *i2400ms)
{
int ret, cnt, val;
ssize_t rx_size;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index fdebbe7eebb..5a56502c4eb 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2721,9 +2721,8 @@ static int airo_networks_allocate(struct airo_info *ai)
if (ai->networks)
return 0;
- ai->networks =
- kzalloc(AIRO_MAX_NETWORK_COUNT * sizeof(BSSListElement),
- GFP_KERNEL);
+ ai->networks = kcalloc(AIRO_MAX_NETWORK_COUNT, sizeof(BSSListElement),
+ GFP_KERNEL);
if (!ai->networks) {
airo_print_warn("", "Out of memory allocating beacons");
return -ENOMEM;
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index 33bdc6a84e8..9a121a5b787 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -32,7 +32,6 @@
#include <linux/timer.h>
#include <linux/netdevice.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -155,8 +154,6 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- win_req_t *req = priv_data;
-
if (cfg->index == 0)
return -ENODEV;
@@ -176,52 +173,25 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
+ if (pcmcia_request_io(p_dev) != 0)
return -ENODEV;
- /*
- Now set up a common memory window, if needed. There is room
- in the struct pcmcia_device structure for one memory window handle,
- but if the base addresses need to be saved, or if multiple
- windows are needed, the info should go in the private data
- structure for this device.
-
- Note that the memory window base is a physical address, and
- needs to be mapped to virtual space with ioremap() before it
- is used.
- */
- if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
- cistpl_mem_t *mem = (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
- memreq_t map;
- req->Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM;
- req->Base = mem->win[0].host_addr;
- req->Size = mem->win[0].len;
- req->AccessSpeed = 0;
- if (pcmcia_request_window(p_dev, req, &p_dev->win) != 0)
- return -ENODEV;
- map.Page = 0;
- map.CardOffset = mem->win[0].card_addr;
- if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0)
- return -ENODEV;
- }
/* If we got this far, we're cool! */
return 0;
}
@@ -230,17 +200,12 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev,
static int airo_config(struct pcmcia_device *link)
{
local_info_t *dev;
- win_req_t *req;
int ret;
dev = link->priv;
dev_dbg(&link->dev, "airo_config\n");
- req = kzalloc(sizeof(win_req_t), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
/*
* In this loop, we scan the CIS for configuration table
* entries, each of which describes a valid card
@@ -255,7 +220,7 @@ static int airo_config(struct pcmcia_device *link)
* and most client drivers will only use the CIS to fill in
* implementation-defined details.
*/
- ret = pcmcia_loop_config(link, airo_cs_config_check, req);
+ ret = pcmcia_loop_config(link, airo_cs_config_check, NULL);
if (ret)
goto failed;
@@ -272,7 +237,7 @@ static int airo_config(struct pcmcia_device *link)
goto failed;
((local_info_t *)link->priv)->eth_dev =
init_airo_card(link->irq,
- link->io.BasePort1, 1, &link->dev);
+ link->resource[0]->start, 1, &link->dev);
if (!((local_info_t *)link->priv)->eth_dev)
goto failed;
@@ -282,22 +247,15 @@ static int airo_config(struct pcmcia_device *link)
if (link->conf.Vpp)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
printk(", irq %d", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1+link->io.NumPorts1-1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2+link->io.NumPorts2-1);
- if (link->win)
- printk(", mem 0x%06lx-0x%06lx", req->Base,
- req->Base+req->Size-1);
+ if (link->resource[0])
+ printk(" & %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(" & %pR", link->resource[1]);
printk("\n");
- kfree(req);
return 0;
failed:
airo_release(link);
- kfree(req);
return -ENODEV;
} /* airo_config */
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index c2746fc7f2b..3b632161c10 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -42,7 +42,6 @@
#include <linux/moduleparam.h>
#include <linux/device.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -191,25 +190,23 @@ static int atmel_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
}
/* This reserves IO space but doesn't actually enable it */
- return pcmcia_request_io(p_dev, &p_dev->io);
+ return pcmcia_request_io(p_dev);
}
static int atmel_config(struct pcmcia_device *link)
@@ -254,7 +251,7 @@ static int atmel_config(struct pcmcia_device *link)
((local_info_t*)link->priv)->eth_dev =
init_atmel_card(link->irq,
- link->io.BasePort1,
+ link->resource[0]->start,
did ? did->driver_info : ATMEL_FW_TYPE_NONE,
&link->dev,
card_present,
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 0e99b634267..dfbc41d431f 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -26,7 +26,6 @@
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -65,7 +64,6 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
{
struct ssb_bus *ssb;
win_req_t win;
- memreq_t mem;
int err = -ENOMEM;
int res = 0;
@@ -78,12 +76,7 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
dev->conf.Attributes = CONF_ENABLE_IRQ;
dev->conf.IntType = INT_MEMORY_AND_IO;
- dev->io.BasePort2 = 0;
- dev->io.NumPorts2 = 0;
- dev->io.Attributes2 = 0;
-
- win.Attributes = WIN_ADDR_SPACE_MEM | WIN_MEMORY_TYPE_CM |
- WIN_ENABLE | WIN_DATA_WIDTH_16 |
+ win.Attributes = WIN_ENABLE | WIN_DATA_WIDTH_16 |
WIN_USE_WAIT;
win.Base = 0;
win.Size = SSB_CORE_SIZE;
@@ -92,9 +85,7 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
if (res != 0)
goto err_kfree_ssb;
- mem.CardOffset = 0;
- mem.Page = 0;
- res = pcmcia_map_mem_page(dev, dev->win, &mem);
+ res = pcmcia_map_mem_page(dev, dev->win, 0);
if (res != 0)
goto err_disable;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 1bdf20c8c43..dfec5496055 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -1176,7 +1176,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
len = bw << 1;
}
- samples = kzalloc(len * sizeof(struct b43_c32), GFP_KERNEL);
+ samples = kcalloc(len, sizeof(struct b43_c32), GFP_KERNEL);
if (!samples) {
b43err(dev->wl, "allocation for samples generation failed\n");
return 0;
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 29b31a694b5..ba54d1b04d2 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -12,7 +12,6 @@
#include <linux/wireless.h>
#include <net/iw_handler.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -23,7 +22,7 @@
#include "hostap_wlan.h"
-static dev_info_t dev_info = "hostap_cs";
+static char *dev_info = "hostap_cs";
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "
@@ -225,27 +224,18 @@ static int prism2_pccard_card_present(local_info_t *local)
static void sandisk_set_iobase(local_info_t *local)
{
int res;
- conf_reg_t reg;
struct hostap_cs_priv *hw_priv = local->hw_priv;
- reg.Function = 0;
- reg.Action = CS_WRITE;
- reg.Offset = 0x10; /* 0x3f0 IO base 1 */
- reg.Value = hw_priv->link->io.BasePort1 & 0x00ff;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, 0x10,
+ hw_priv->link->resource[0]->start & 0x00ff);
if (res != 0) {
printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 0 -"
" res=%d\n", res);
}
udelay(10);
- reg.Function = 0;
- reg.Action = CS_WRITE;
- reg.Offset = 0x12; /* 0x3f2 IO base 2 */
- reg.Value = (hw_priv->link->io.BasePort1 & 0xff00) >> 8;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, 0x12,
+ (hw_priv->link->resource[0]->start >> 8) & 0x00ff);
if (res != 0) {
printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 1 -"
" res=%d\n", res);
@@ -271,12 +261,11 @@ static void sandisk_write_hcr(local_info_t *local, int hcr)
static int sandisk_enable_wireless(struct net_device *dev)
{
int res, ret = 0;
- conf_reg_t reg;
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
struct hostap_cs_priv *hw_priv = local->hw_priv;
- if (hw_priv->link->io.NumPorts1 < 0x42) {
+ if (resource_size(hw_priv->link->resource[0]) < 0x42) {
/* Not enough ports to be SanDisk multi-function card */
ret = -ENODEV;
goto done;
@@ -298,12 +287,8 @@ static int sandisk_enable_wireless(struct net_device *dev)
" - using vendor-specific initialization\n", dev->name);
hw_priv->sandisk_connectplus = 1;
- reg.Function = 0;
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_COR;
- reg.Value = COR_SOFT_RESET;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
+ COR_SOFT_RESET);
if (res != 0) {
printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
dev->name, res);
@@ -311,16 +296,13 @@ static int sandisk_enable_wireless(struct net_device *dev)
}
mdelay(5);
- reg.Function = 0;
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_COR;
/*
* Do not enable interrupts here to avoid some bogus events. Interrupts
* will be enabled during the first cor_sreset call.
*/
- reg.Value = COR_LEVEL_REQ | 0x8 | COR_ADDR_DECODE | COR_FUNC_ENA;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
+ (COR_LEVEL_REQ | 0x8 | COR_ADDR_DECODE |
+ COR_FUNC_ENA));
if (res != 0) {
printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
dev->name, res);
@@ -343,30 +325,23 @@ done:
static void prism2_pccard_cor_sreset(local_info_t *local)
{
int res;
- conf_reg_t reg;
+ u8 val;
struct hostap_cs_priv *hw_priv = local->hw_priv;
if (!prism2_pccard_card_present(local))
return;
- reg.Function = 0;
- reg.Action = CS_READ;
- reg.Offset = CISREG_COR;
- reg.Value = 0;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_read_config_byte(hw_priv->link, CISREG_COR, &val);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 1 (%d)\n",
res);
return;
}
printk(KERN_DEBUG "prism2_pccard_cor_sreset: original COR %02x\n",
- reg.Value);
+ val);
- reg.Action = CS_WRITE;
- reg.Value |= COR_SOFT_RESET;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ val |= COR_SOFT_RESET;
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, val);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 2 (%d)\n",
res);
@@ -375,11 +350,10 @@ static void prism2_pccard_cor_sreset(local_info_t *local)
mdelay(hw_priv->sandisk_connectplus ? 5 : 2);
- reg.Value &= ~COR_SOFT_RESET;
+ val &= ~COR_SOFT_RESET;
if (hw_priv->sandisk_connectplus)
- reg.Value |= COR_IREQ_ENA;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ val |= COR_IREQ_ENA;
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, val);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 3 (%d)\n",
res);
@@ -396,8 +370,7 @@ static void prism2_pccard_cor_sreset(local_info_t *local)
static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
{
int res;
- conf_reg_t reg;
- int old_cor;
+ u8 old_cor;
struct hostap_cs_priv *hw_priv = local->hw_priv;
if (!prism2_pccard_card_present(local))
@@ -408,25 +381,17 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
return;
}
- reg.Function = 0;
- reg.Action = CS_READ;
- reg.Offset = CISREG_COR;
- reg.Value = 0;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_read_config_byte(hw_priv->link, CISREG_COR, &old_cor);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 1 "
"(%d)\n", res);
return;
}
printk(KERN_DEBUG "prism2_pccard_genesis_sreset: original COR %02x\n",
- reg.Value);
- old_cor = reg.Value;
+ old_cor);
- reg.Action = CS_WRITE;
- reg.Value |= COR_SOFT_RESET;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
+ old_cor | COR_SOFT_RESET);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 2 "
"(%d)\n", res);
@@ -436,11 +401,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
mdelay(10);
/* Setup Genesis mode */
- reg.Action = CS_WRITE;
- reg.Value = hcr;
- reg.Offset = CISREG_CCSR;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_CCSR, hcr);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 3 "
"(%d)\n", res);
@@ -448,11 +409,8 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
}
mdelay(10);
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_COR;
- reg.Value = old_cor & ~COR_SOFT_RESET;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
+ old_cor & ~COR_SOFT_RESET);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 4 "
"(%d)\n", res);
@@ -561,30 +519,24 @@ static int prism2_config_check(struct pcmcia_device *p_dev,
PDEBUG(DEBUG_EXTRA, "IO window settings: cfg->io.nwin=%d "
"dflt->io.nwin=%d\n",
cfg->io.nwin, dflt->io.nwin);
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- PDEBUG(DEBUG_EXTRA, "io->flags = 0x%04X, "
- "io.base=0x%04x, len=%d\n", io->flags,
- io->win[0].base, io->win[0].len);
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags &
- CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
}
/* This reserves IO space but doesn't actually enable it */
- return pcmcia_request_io(p_dev, &p_dev->io);
+ return pcmcia_request_io(p_dev);
}
static int prism2_config(struct pcmcia_device *link)
@@ -646,7 +598,7 @@ static int prism2_config(struct pcmcia_device *link)
goto failed_unlock;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
spin_unlock_irqrestore(&local->irq_init_lock, flags);
@@ -658,12 +610,10 @@ static int prism2_config(struct pcmcia_device *link)
link->conf.Vpp % 10);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1+link->io.NumPorts1-1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2+link->io.NumPorts2-1);
+ if (link->resource[0])
+ printk(" & %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(" & %pR", link->resource[1]);
printk("\n");
local->shutdown = 0;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index ed69e60f86a..61915f37141 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1921,9 +1921,9 @@ static int ipw2100_net_init(struct net_device *dev)
bg_band->band = IEEE80211_BAND_2GHZ;
bg_band->n_channels = geo->bg_channels;
- bg_band->channels =
- kzalloc(geo->bg_channels *
- sizeof(struct ieee80211_channel), GFP_KERNEL);
+ bg_band->channels = kcalloc(geo->bg_channels,
+ sizeof(struct ieee80211_channel),
+ GFP_KERNEL);
if (!bg_band->channels) {
ipw2100_down(priv);
return -ENOMEM;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index d04d7608277..8d6ed5f6f46 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11467,9 +11467,9 @@ static int ipw_net_init(struct net_device *dev)
bg_band->band = IEEE80211_BAND_2GHZ;
bg_band->n_channels = geo->bg_channels;
- bg_band->channels =
- kzalloc(geo->bg_channels *
- sizeof(struct ieee80211_channel), GFP_KERNEL);
+ bg_band->channels = kcalloc(geo->bg_channels,
+ sizeof(struct ieee80211_channel),
+ GFP_KERNEL);
if (!bg_band->channels) {
rc = -ENOMEM;
goto out;
@@ -11506,9 +11506,9 @@ static int ipw_net_init(struct net_device *dev)
a_band->band = IEEE80211_BAND_5GHZ;
a_band->n_channels = geo->a_channels;
- a_band->channels =
- kzalloc(geo->a_channels *
- sizeof(struct ieee80211_channel), GFP_KERNEL);
+ a_band->channels = kcalloc(geo->a_channels,
+ sizeof(struct ieee80211_channel),
+ GFP_KERNEL);
if (!a_band->channels) {
rc = -ENOMEM;
goto out;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 1dc44bc6511..5046a000503 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -6,6 +6,8 @@
*
*/
+#include <linux/sched.h>
+#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/wait.h>
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index a6fd70404c3..e213a5dc049 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -28,7 +28,6 @@
#include <linux/firmware.h>
#include <linux/netdevice.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -800,9 +799,9 @@ static int if_cs_ioprobe(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- p_dev->io.BasePort1 = cfg->io.win[0].base;
- p_dev->io.NumPorts1 = cfg->io.win[0].len;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ p_dev->resource[0]->start = cfg->io.win[0].base;
+ p_dev->resource[0]->end = cfg->io.win[0].len;
/* Do we need to allocate an interrupt? */
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
@@ -814,7 +813,7 @@ static int if_cs_ioprobe(struct pcmcia_device *p_dev,
}
/* This reserves IO space but doesn't actually enable it */
- return pcmcia_request_io(p_dev, &p_dev->io);
+ return pcmcia_request_io(p_dev);
}
static int if_cs_probe(struct pcmcia_device *p_dev)
@@ -853,7 +852,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
goto out1;
/* Initialize io access */
- card->iobase = ioport_map(p_dev->io.BasePort1, p_dev->io.NumPorts1);
+ card->iobase = ioport_map(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]));
if (!card->iobase) {
lbs_pr_err("error in ioport_map\n");
ret = -EIO;
@@ -872,9 +872,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
}
/* Finally, report what we've done */
- lbs_deb_cs("irq %d, io 0x%04x-0x%04x\n",
- p_dev->irq, p_dev->io.BasePort1,
- p_dev->io.BasePort1 + p_dev->io.NumPorts1 - 1);
+ lbs_deb_cs("irq %d, io %pR", p_dev->irq, p_dev->resource[0]);
/*
* Most of the libertas cards can do unaligned register access, but some
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 0b3119d9c02..296fd00a512 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -121,6 +121,8 @@ struct if_sdio_card {
const char *helper;
const char *firmware;
+ bool helper_allocated;
+ bool firmware_allocated;
u8 buffer[65536] __attribute__((aligned(4)));
@@ -1104,6 +1106,10 @@ free:
kfree(packet);
}
+ if (card->helper_allocated)
+ kfree(card->helper);
+ if (card->firmware_allocated)
+ kfree(card->firmware);
kfree(card);
goto out;
@@ -1154,6 +1160,10 @@ static void if_sdio_remove(struct sdio_func *func)
kfree(packet);
}
+ if (card->helper_allocated)
+ kfree(card->helper);
+ if (card->firmware_allocated)
+ kfree(card->firmware);
kfree(card);
lbs_deb_leave(LBS_DEB_SDIO);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index b70f0f49011..efaf8503220 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -304,10 +304,13 @@ static int if_usb_probe(struct usb_interface *intf,
}
/* Upload firmware */
+ kparam_block_sysfs_write(fw_name);
if (__if_usb_prog_firmware(cardp, lbs_fw_name, BOOT_CMD_FW_BY_USB)) {
+ kparam_unblock_sysfs_write(fw_name);
lbs_deb_usbd(&udev->dev, "FW upload failed\n");
goto err_prog_firmware;
}
+ kparam_unblock_sysfs_write(fw_name);
if (!(priv = lbs_add_card(cardp, &udev->dev)))
goto err_prog_firmware;
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 1cf01acef5f..ba7d96584cb 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -824,12 +824,15 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
lbtf_deb_enter(LBTF_DEB_USB);
+ kparam_block_sysfs_write(fw_name);
ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev);
if (ret < 0) {
pr_err("request_firmware() failed with %#x\n", ret);
pr_err("firmware %s not found\n", lbtf_fw_name);
+ kparam_unblock_sysfs_write(fw_name);
goto done;
}
+ kparam_unblock_sysfs_write(fw_name);
if (check_fwfile_format(cardp->fw->data, cardp->fw->size))
goto release_fw;
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index b16d5db52a4..ef46a2d8853 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -192,25 +191,23 @@ static int orinoco_cs_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
+ if (pcmcia_request_io(p_dev) != 0)
goto next_entry;
}
return 0;
@@ -258,7 +255,8 @@ orinoco_cs_config(struct pcmcia_device *link)
/* We initialize the hermes structure before completing PCMCIA
* configuration just in case the interrupt handler gets
* called. */
- mem = ioport_map(link->io.BasePort1, link->io.NumPorts1);
+ mem = ioport_map(link->resource[0]->start,
+ resource_size(link->resource[0]));
if (!mem)
goto failed;
@@ -280,7 +278,7 @@ orinoco_cs_config(struct pcmcia_device *link)
}
/* Register an interface with the stack */
- if (orinoco_if_add(priv, link->io.BasePort1,
+ if (orinoco_if_add(priv, link->resource[0]->start,
link->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index b51a9adc80f..873877e17e1 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -80,35 +79,27 @@ static int
spectrum_reset(struct pcmcia_device *link, int idle)
{
int ret;
- conf_reg_t reg;
- u_int save_cor;
+ u8 save_cor;
+ u8 ccsr;
/* Doing it if hardware is gone is guaranteed crash */
if (!pcmcia_dev_present(link))
return -ENODEV;
/* Save original COR value */
- reg.Function = 0;
- reg.Action = CS_READ;
- reg.Offset = CISREG_COR;
- ret = pcmcia_access_configuration_register(link, &reg);
+ ret = pcmcia_read_config_byte(link, CISREG_COR, &save_cor);
if (ret)
goto failed;
- save_cor = reg.Value;
/* Soft-Reset card */
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_COR;
- reg.Value = (save_cor | COR_SOFT_RESET);
- ret = pcmcia_access_configuration_register(link, &reg);
+ ret = pcmcia_write_config_byte(link, CISREG_COR,
+ (save_cor | COR_SOFT_RESET));
if (ret)
goto failed;
udelay(1000);
/* Read CCSR */
- reg.Action = CS_READ;
- reg.Offset = CISREG_CCSR;
- ret = pcmcia_access_configuration_register(link, &reg);
+ ret = pcmcia_read_config_byte(link, CISREG_CCSR, &ccsr);
if (ret)
goto failed;
@@ -116,19 +107,15 @@ spectrum_reset(struct pcmcia_device *link, int idle)
* Start or stop the firmware. Memory width bit should be
* preserved from the value we've just read.
*/
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_CCSR;
- reg.Value = (idle ? HCR_IDLE : HCR_RUN) | (reg.Value & HCR_MEM16);
- ret = pcmcia_access_configuration_register(link, &reg);
+ ccsr = (idle ? HCR_IDLE : HCR_RUN) | (ccsr & HCR_MEM16);
+ ret = pcmcia_write_config_byte(link, CISREG_CCSR, ccsr);
if (ret)
goto failed;
udelay(1000);
/* Restore original COR configuration index */
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_COR;
- reg.Value = (save_cor & ~COR_SOFT_RESET);
- ret = pcmcia_access_configuration_register(link, &reg);
+ ret = pcmcia_write_config_byte(link, CISREG_COR,
+ (save_cor & ~COR_SOFT_RESET));
if (ret)
goto failed;
udelay(1000);
@@ -266,25 +253,23 @@ static int spectrum_cs_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
+ if (pcmcia_request_io(p_dev) != 0)
goto next_entry;
}
return 0;
@@ -332,7 +317,8 @@ spectrum_cs_config(struct pcmcia_device *link)
/* We initialize the hermes structure before completing PCMCIA
* configuration just in case the interrupt handler gets
* called. */
- mem = ioport_map(link->io.BasePort1, link->io.NumPorts1);
+ mem = ioport_map(link->resource[0]->start,
+ resource_size(link->resource[0]));
if (!mem)
goto failed;
@@ -359,7 +345,7 @@ spectrum_cs_config(struct pcmcia_device *link)
}
/* Register an interface with the stack */
- if (orinoco_if_add(priv, link->io.BasePort1,
+ if (orinoco_if_add(priv, link->resource[0]->start,
link->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 390ccf6e08a..5ca624a64c4 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -43,10 +43,8 @@
#include <linux/if_arp.h>
#include <linux/ioport.h>
#include <linux/skbuff.h>
-#include <linux/ethtool.h>
#include <linux/ieee80211.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -81,8 +79,6 @@ static int ray_dev_config(struct net_device *dev, struct ifmap *map);
static struct net_device_stats *ray_get_stats(struct net_device *dev);
static int ray_dev_init(struct net_device *dev);
-static const struct ethtool_ops netdev_ethtool_ops;
-
static int ray_open(struct net_device *dev);
static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
struct net_device *dev);
@@ -315,9 +311,8 @@ static int ray_probe(struct pcmcia_device *p_dev)
local->finder = p_dev;
/* The io structure describes IO port mapping. None used here */
- p_dev->io.NumPorts1 = 0;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = 5;
+ p_dev->resource[0]->end = 0;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
@@ -335,7 +330,6 @@ static int ray_probe(struct pcmcia_device *p_dev)
/* Raylink entries in the device structure */
dev->netdev_ops = &ray_netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
dev->wireless_handlers = &ray_handler_def;
#ifdef WIRELESS_SPY
local->wireless_data.spy_data = &local->spy_data;
@@ -394,7 +388,6 @@ static int ray_config(struct pcmcia_device *link)
int ret = 0;
int i;
win_req_t req;
- memreq_t mem;
struct net_device *dev = (struct net_device *)link->priv;
ray_dev_t *local = netdev_priv(dev);
@@ -431,9 +424,7 @@ static int ray_config(struct pcmcia_device *link)
ret = pcmcia_request_window(link, &req, &link->win);
if (ret)
goto failed;
- mem.CardOffset = 0x0000;
- mem.Page = 0;
- ret = pcmcia_map_mem_page(link, link->win, &mem);
+ ret = pcmcia_map_mem_page(link, link->win, 0);
if (ret)
goto failed;
local->sram = ioremap(req.Base, req.Size);
@@ -447,9 +438,7 @@ static int ray_config(struct pcmcia_device *link)
ret = pcmcia_request_window(link, &req, &local->rmem_handle);
if (ret)
goto failed;
- mem.CardOffset = 0x8000;
- mem.Page = 0;
- ret = pcmcia_map_mem_page(link, local->rmem_handle, &mem);
+ ret = pcmcia_map_mem_page(link, local->rmem_handle, 0x8000);
if (ret)
goto failed;
local->rmem = ioremap(req.Base, req.Size);
@@ -463,9 +452,7 @@ static int ray_config(struct pcmcia_device *link)
ret = pcmcia_request_window(link, &req, &local->amem_handle);
if (ret)
goto failed;
- mem.CardOffset = 0x0000;
- mem.Page = 0;
- ret = pcmcia_map_mem_page(link, local->amem_handle, &mem);
+ ret = pcmcia_map_mem_page(link, local->amem_handle, 0);
if (ret)
goto failed;
local->amem = ioremap(req.Base, req.Size);
@@ -617,7 +604,7 @@ static int dl_startup_params(struct net_device *dev)
/* Start kernel timer to wait for dl startup to complete. */
local->timer.expires = jiffies + HZ / 2;
local->timer.data = (long)local;
- local->timer.function = &verify_dl_startup;
+ local->timer.function = verify_dl_startup;
add_timer(&local->timer);
dev_dbg(&link->dev,
"ray_cs dl_startup_params started timer for verify_dl_startup\n");
@@ -793,7 +780,6 @@ static void ray_release(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
ray_dev_t *local = netdev_priv(dev);
- int i;
dev_dbg(&link->dev, "ray_release\n");
@@ -802,13 +788,6 @@ static void ray_release(struct pcmcia_device *link)
iounmap(local->sram);
iounmap(local->rmem);
iounmap(local->amem);
- /* Do bother checking to see if these succeed or not */
- i = pcmcia_release_window(link, local->amem_handle);
- if (i != 0)
- dev_dbg(&link->dev, "ReleaseWindow(local->amem) ret = %x\n", i);
- i = pcmcia_release_window(link, local->rmem_handle);
- if (i != 0)
- dev_dbg(&link->dev, "ReleaseWindow(local->rmem) ret = %x\n", i);
pcmcia_disable_device(link);
dev_dbg(&link->dev, "ray_release ending\n");
@@ -1079,18 +1058,6 @@ AP to AP 1 1 dest AP src AP dest source
}
} /* end encapsulate_frame */
-/*===========================================================================*/
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, "ray_cs");
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
/*====================================================================*/
/*------------------------------------------------------------------*/
@@ -2014,12 +1981,12 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" start failed\n",
local->sparm.b4.a_current_ess_id);
- local->timer.function = &start_net;
+ local->timer.function = start_net;
} else {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" join failed\n",
local->sparm.b4.a_current_ess_id);
- local->timer.function = &join_net;
+ local->timer.function = join_net;
}
add_timer(&local->timer);
}
@@ -2487,9 +2454,9 @@ static void authenticate(ray_dev_t *local)
del_timer(&local->timer);
if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) {
- local->timer.function = &join_net;
+ local->timer.function = join_net;
} else {
- local->timer.function = &authenticate_timeout;
+ local->timer.function = authenticate_timeout;
}
local->timer.expires = jiffies + HZ * 2;
local->timer.data = (long)local;
@@ -2574,7 +2541,7 @@ static void associate(ray_dev_t *local)
del_timer(&local->timer);
local->timer.expires = jiffies + HZ * 2;
local->timer.data = (long)local;
- local->timer.function = &join_net;
+ local->timer.function = join_net;
add_timer(&local->timer);
local->card_status = CARD_ASSOC_FAILED;
return;
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 095cb6982d7..4f420a9ec5d 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1494,7 +1494,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Create channel information array
*/
- info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 7d85bf9bd25..97feb7aef80 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1809,7 +1809,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Create channel information array
*/
- info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 10339649506..93e44c7f3a7 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1722,7 +1722,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Create channel information array
*/
- info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b9433fe2855..5f00e00789d 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3297,7 +3297,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Create channel information array
*/
- info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 54dc44bb415..c1710b27ba7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -333,7 +333,7 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
if (*offset)
return 0;
- data = kzalloc(lines * MAX_LINE_LENGTH, GFP_KERNEL);
+ data = kcalloc(lines, MAX_LINE_LENGTH, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -380,7 +380,7 @@ static ssize_t rt2x00debug_read_crypto_stats(struct file *file,
loff_t *offset)
{
struct rt2x00debug_intf *intf = file->private_data;
- char *name[] = { "WEP64", "WEP128", "TKIP", "AES" };
+ static const char * const name[] = { "WEP64", "WEP128", "TKIP", "AES" };
char *data;
char *temp;
size_t size;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 7e30144c5cf..e360d287def 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -804,7 +804,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
* Allocate all queue entries.
*/
entry_size = sizeof(*entries) + qdesc->priv_size;
- entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
+ entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
if (!entries)
return -ENOMEM;
@@ -937,7 +937,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
*/
rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
- queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
+ queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
if (!queue) {
ERROR(rt2x00dev, "Queue allocation failed.\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index ac370f11e74..af548c87f10 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2661,7 +2661,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Create channel information array
*/
- info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 66939fd8689..9be8089317e 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2105,7 +2105,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Create channel information array
*/
- info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.c b/drivers/net/wireless/wl12xx/wl1271_scan.c
index 5c76b79a96b..909bb47995b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_scan.c
+++ b/drivers/net/wireless/wl12xx/wl1271_scan.c
@@ -292,7 +292,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
wl->scan.req = req;
- wl->scan.scanned_ch = kzalloc(req->n_channels *
+ wl->scan.scanned_ch = kcalloc(req->n_channels,
sizeof(*wl->scan.scanned_ch),
GFP_KERNEL);
/* we assume failure so that timeout scenarios are handled correctly */
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 376c6b964a9..420e9e986a1 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -29,7 +29,6 @@
#include <linux/delay.h>
#include <linux/types.h>
-#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/in.h>
@@ -48,7 +47,6 @@
#include <net/iw_handler.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -89,13 +87,6 @@
static int wl3501_config(struct pcmcia_device *link);
static void wl3501_release(struct pcmcia_device *link);
-/*
- * The dev_info variable is the "key" that is used to match up this
- * device driver with appropriate cards, through the card configuration
- * database.
- */
-static dev_info_t wl3501_dev_info = "wl3501_cs";
-
static const struct {
int reg_domain;
int min, max, deflt;
@@ -1419,15 +1410,6 @@ static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev)
return wstats;
}
-static void wl3501_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- strlcpy(info->driver, wl3501_dev_info, sizeof(info->driver));
-}
-
-static const struct ethtool_ops ops = {
- .get_drvinfo = wl3501_get_drvinfo
-};
-
/**
* wl3501_detach - deletes a driver "instance"
* @link - FILL_IN
@@ -1892,9 +1874,8 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
struct wl3501_card *this;
/* The io structure describes IO port mapping */
- p_dev->io.NumPorts1 = 16;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = 5;
+ p_dev->resource[0]->end = 16;
+ p_dev->resource[0]->flags = IO_DATA_PATH_WIDTH_8;
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
@@ -1914,7 +1895,6 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
this->p_dev = p_dev;
dev->wireless_data = &this->wireless_data;
dev->wireless_handlers = &wl3501_handler_def;
- SET_ETHTOOL_OPS(dev, &ops);
netif_stop_queue(dev);
p_dev->priv = dev;
@@ -1940,13 +1920,14 @@ static int wl3501_config(struct pcmcia_device *link)
/* Try allocating IO ports. This tries a few fixed addresses. If you
* want, you can also read the card's config table to pick addresses --
* see the serial driver for an example. */
+ link->io_lines = 5;
for (j = 0x280; j < 0x400; j += 0x20) {
/* The '^0x300' is so that we probe 0x300-0x3ff first, then
* 0x200-0x2ff, and so on, because this seems safer */
- link->io.BasePort1 = j;
- link->io.BasePort2 = link->io.BasePort1 + 0x10;
- i = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = j;
+ link->resource[1]->start = link->resource[0]->start + 0x10;
+ i = pcmcia_request_io(link);
if (i == 0)
break;
}
@@ -1968,7 +1949,7 @@ static int wl3501_config(struct pcmcia_device *link)
goto failed;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev)) {
printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n");
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index b50fedcef8a..630fb866476 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -135,7 +135,7 @@ static void skb_entry_set_link(union skb_entry *list, unsigned short id)
static int skb_entry_is_link(const union skb_entry *list)
{
BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
- return ((unsigned long)list->skb < PAGE_OFFSET);
+ return (unsigned long)list->skb < PAGE_OFFSET;
}
/*
@@ -203,8 +203,8 @@ static void rx_refill_timeout(unsigned long data)
static int netfront_tx_slot_available(struct netfront_info *np)
{
- return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
- (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
+ return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
+ (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
}
static void xennet_maybe_wake_tx(struct net_device *dev)
@@ -1395,7 +1395,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
}
/* Common code used when first setting up, and when resuming. */
-static int talk_to_backend(struct xenbus_device *dev,
+static int talk_to_netback(struct xenbus_device *dev,
struct netfront_info *info)
{
const char *message;
@@ -1545,7 +1545,7 @@ static int xennet_connect(struct net_device *dev)
return -ENODEV;
}
- err = talk_to_backend(np->xbdev, np);
+ err = talk_to_netback(np->xbdev, np);
if (err)
return err;
@@ -1599,7 +1599,7 @@ static int xennet_connect(struct net_device *dev)
/**
* Callback received when the backend's state changes.
*/
-static void backend_changed(struct xenbus_device *dev,
+static void netback_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
struct netfront_info *np = dev_get_drvdata(&dev->dev);
@@ -1801,7 +1801,7 @@ static struct xenbus_driver netfront_driver = {
.probe = netfront_probe,
.remove = __devexit_p(xennet_remove),
.resume = netfront_resume,
- .otherend_changed = backend_changed,
+ .otherend_changed = netback_changed,
};
static int __init netif_init(void)
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index d04c5b26205..f3f8be5a35f 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -20,7 +20,7 @@
#include <linux/skbuff.h>
#include <linux/io.h>
#include <linux/slab.h>
-
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
@@ -641,7 +641,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
skb_put(skb, len); /* Tell the skb how much data we got */
skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
@@ -1086,7 +1086,7 @@ static void xemaclite_remove_ndev(struct net_device *ndev)
*
* Return: Value of the parameter if the parameter is found, or 0 otherwise
*/
-static bool get_bool(struct of_device *ofdev, const char *s)
+static bool get_bool(struct platform_device *ofdev, const char *s)
{
u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
@@ -1115,7 +1115,7 @@ static struct net_device_ops xemaclite_netdev_ops;
* Return: 0, if the driver is bound to the Emaclite device, or
* a negative error if there is failure.
*/
-static int __devinit xemaclite_of_probe(struct of_device *ofdev,
+static int __devinit xemaclite_of_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct resource r_irq; /* Interrupt resources */
@@ -1240,7 +1240,7 @@ error2:
*
* Return: 0, always.
*/
-static int __devexit xemaclite_of_remove(struct of_device *of_dev)
+static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
{
struct device *dev = &of_dev->dev;
struct net_device *ndev = dev_get_drvdata(dev);
@@ -1269,6 +1269,16 @@ static int __devexit xemaclite_of_remove(struct of_device *of_dev)
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+xemaclite_poll_controller(struct net_device *ndev)
+{
+ disable_irq(ndev->irq);
+ xemaclite_interrupt(ndev->irq, ndev);
+ enable_irq(ndev->irq);
+}
+#endif
+
static struct net_device_ops xemaclite_netdev_ops = {
.ndo_open = xemaclite_open,
.ndo_stop = xemaclite_close,
@@ -1276,6 +1286,9 @@ static struct net_device_ops xemaclite_netdev_ops = {
.ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
.ndo_get_stats = xemaclite_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xemaclite_poll_controller,
+#endif
};
/* Match table for OF platform binding */
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 4eb67aed68d..cd1b3dcd61d 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -646,7 +646,7 @@ static int yellowfin_open(struct net_device *dev)
init_timer(&yp->timer);
yp->timer.expires = jiffies + 3*HZ;
yp->timer.data = (unsigned long)dev;
- yp->timer.function = &yellowfin_timer; /* timer handler */
+ yp->timer.function = yellowfin_timer; /* timer handler */
add_timer(&yp->timer);
return 0;
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 7cecc8fea9b..6acbff389ab 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -1,35 +1,61 @@
-config OF_FLATTREE
+config DTC
+ bool
+
+config OF
bool
+
+menu "Flattened Device Tree and Open Firmware support"
depends on OF
+config PROC_DEVICETREE
+ bool "Support for device tree in /proc"
+ depends on PROC_FS && !SPARC
+ help
+ This option adds a device-tree directory under /proc which contains
+ an image of the device tree that the kernel copies from Open
+ Firmware or other boot firmware. If unsure, say Y here.
+
+config OF_FLATTREE
+ bool
+ select DTC
+
config OF_DYNAMIC
def_bool y
- depends on OF && PPC_OF
+ depends on PPC_OF
+
+config OF_ADDRESS
+ def_bool y
+ depends on !SPARC
+
+config OF_IRQ
+ def_bool y
+ depends on !SPARC
config OF_DEVICE
def_bool y
- depends on OF && (SPARC || PPC_OF || MICROBLAZE)
config OF_GPIO
def_bool y
- depends on OF && (PPC_OF || MICROBLAZE) && GPIOLIB
+ depends on GPIOLIB && !SPARC
help
OpenFirmware GPIO accessors
config OF_I2C
def_tristate I2C
- depends on (PPC_OF || MICROBLAZE) && I2C
+ depends on I2C && !SPARC
help
OpenFirmware I2C accessors
config OF_SPI
def_tristate SPI
- depends on OF && (PPC_OF || MICROBLAZE) && SPI
+ depends on SPI && !SPARC
help
OpenFirmware SPI accessors
config OF_MDIO
def_tristate PHYLIB
- depends on OF && PHYLIB
+ depends on PHYLIB
help
OpenFirmware MDIO bus (Ethernet PHY) accessors
+
+endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index f232cc98ce0..0052c405463 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -1,5 +1,7 @@
obj-y = base.o
obj-$(CONFIG_OF_FLATTREE) += fdt.o
+obj-$(CONFIG_OF_ADDRESS) += address.o
+obj-$(CONFIG_OF_IRQ) += irq.o
obj-$(CONFIG_OF_DEVICE) += device.o platform.o
obj-$(CONFIG_OF_GPIO) += gpio.o
obj-$(CONFIG_OF_I2C) += of_i2c.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
new file mode 100644
index 00000000000..fcadb726d4f
--- /dev/null
+++ b/drivers/of/address.c
@@ -0,0 +1,595 @@
+
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/pci_regs.h>
+#include <linux/string.h>
+
+/* Max address size we deal with */
+#define OF_MAX_ADDR_CELLS 4
+#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
+ (ns) > 0)
+
+static struct of_bus *of_match_bus(struct device_node *np);
+static int __of_address_to_resource(struct device_node *dev, const u32 *addrp,
+ u64 size, unsigned int flags,
+ struct resource *r);
+
+/* Debug utility */
+#ifdef DEBUG
+static void of_dump_addr(const char *s, const u32 *addr, int na)
+{
+ printk(KERN_DEBUG "%s", s);
+ while (na--)
+ printk(" %08x", be32_to_cpu(*(addr++)));
+ printk("\n");
+}
+#else
+static void of_dump_addr(const char *s, const u32 *addr, int na) { }
+#endif
+
+/* Callbacks for bus specific translators */
+struct of_bus {
+ const char *name;
+ const char *addresses;
+ int (*match)(struct device_node *parent);
+ void (*count_cells)(struct device_node *child,
+ int *addrc, int *sizec);
+ u64 (*map)(u32 *addr, const u32 *range,
+ int na, int ns, int pna);
+ int (*translate)(u32 *addr, u64 offset, int na);
+ unsigned int (*get_flags)(const u32 *addr);
+};
+
+/*
+ * Default translator (generic bus)
+ */
+
+static void of_bus_default_count_cells(struct device_node *dev,
+ int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = of_n_addr_cells(dev);
+ if (sizec)
+ *sizec = of_n_size_cells(dev);
+}
+
+static u64 of_bus_default_map(u32 *addr, const u32 *range,
+ int na, int ns, int pna)
+{
+ u64 cp, s, da;
+
+ cp = of_read_number(range, na);
+ s = of_read_number(range + na + pna, ns);
+ da = of_read_number(addr, na);
+
+ pr_debug("OF: default map, cp=%llx, s=%llx, da=%llx\n",
+ (unsigned long long)cp, (unsigned long long)s,
+ (unsigned long long)da);
+
+ if (da < cp || da >= (cp + s))
+ return OF_BAD_ADDR;
+ return da - cp;
+}
+
+static int of_bus_default_translate(u32 *addr, u64 offset, int na)
+{
+ u64 a = of_read_number(addr, na);
+ memset(addr, 0, na * 4);
+ a += offset;
+ if (na > 1)
+ addr[na - 2] = cpu_to_be32(a >> 32);
+ addr[na - 1] = cpu_to_be32(a & 0xffffffffu);
+
+ return 0;
+}
+
+static unsigned int of_bus_default_get_flags(const u32 *addr)
+{
+ return IORESOURCE_MEM;
+}
+
+#ifdef CONFIG_PCI
+/*
+ * PCI bus specific translator
+ */
+
+static int of_bus_pci_match(struct device_node *np)
+{
+ /* "vci" is for the /chaos bridge on 1st-gen PCI powermacs */
+ return !strcmp(np->type, "pci") || !strcmp(np->type, "vci");
+}
+
+static void of_bus_pci_count_cells(struct device_node *np,
+ int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = 3;
+ if (sizec)
+ *sizec = 2;
+}
+
+static unsigned int of_bus_pci_get_flags(const u32 *addr)
+{
+ unsigned int flags = 0;
+ u32 w = addr[0];
+
+ switch((w >> 24) & 0x03) {
+ case 0x01:
+ flags |= IORESOURCE_IO;
+ break;
+ case 0x02: /* 32 bits */
+ case 0x03: /* 64 bits */
+ flags |= IORESOURCE_MEM;
+ break;
+ }
+ if (w & 0x40000000)
+ flags |= IORESOURCE_PREFETCH;
+ return flags;
+}
+
+static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna)
+{
+ u64 cp, s, da;
+ unsigned int af, rf;
+
+ af = of_bus_pci_get_flags(addr);
+ rf = of_bus_pci_get_flags(range);
+
+ /* Check address type match */
+ if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO))
+ return OF_BAD_ADDR;
+
+ /* Read address values, skipping high cell */
+ cp = of_read_number(range + 1, na - 1);
+ s = of_read_number(range + na + pna, ns);
+ da = of_read_number(addr + 1, na - 1);
+
+ pr_debug("OF: PCI map, cp=%llx, s=%llx, da=%llx\n",
+ (unsigned long long)cp, (unsigned long long)s,
+ (unsigned long long)da);
+
+ if (da < cp || da >= (cp + s))
+ return OF_BAD_ADDR;
+ return da - cp;
+}
+
+static int of_bus_pci_translate(u32 *addr, u64 offset, int na)
+{
+ return of_bus_default_translate(addr + 1, offset, na - 1);
+}
+
+const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
+ unsigned int *flags)
+{
+ const u32 *prop;
+ unsigned int psize;
+ struct device_node *parent;
+ struct of_bus *bus;
+ int onesize, i, na, ns;
+
+ /* Get parent & match bus type */
+ parent = of_get_parent(dev);
+ if (parent == NULL)
+ return NULL;
+ bus = of_match_bus(parent);
+ if (strcmp(bus->name, "pci")) {
+ of_node_put(parent);
+ return NULL;
+ }
+ bus->count_cells(dev, &na, &ns);
+ of_node_put(parent);
+ if (!OF_CHECK_COUNTS(na, ns))
+ return NULL;
+
+ /* Get "reg" or "assigned-addresses" property */
+ prop = of_get_property(dev, bus->addresses, &psize);
+ if (prop == NULL)
+ return NULL;
+ psize /= 4;
+
+ onesize = na + ns;
+ for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) {
+ u32 val = be32_to_cpu(prop[0]);
+ if ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
+ if (size)
+ *size = of_read_number(prop + na, ns);
+ if (flags)
+ *flags = bus->get_flags(prop);
+ return prop;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_get_pci_address);
+
+int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r)
+{
+ const u32 *addrp;
+ u64 size;
+ unsigned int flags;
+
+ addrp = of_get_pci_address(dev, bar, &size, &flags);
+ if (addrp == NULL)
+ return -EINVAL;
+ return __of_address_to_resource(dev, addrp, size, flags, r);
+}
+EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
+#endif /* CONFIG_PCI */
+
+/*
+ * ISA bus specific translator
+ */
+
+static int of_bus_isa_match(struct device_node *np)
+{
+ return !strcmp(np->name, "isa");
+}
+
+static void of_bus_isa_count_cells(struct device_node *child,
+ int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = 2;
+ if (sizec)
+ *sizec = 1;
+}
+
+static u64 of_bus_isa_map(u32 *addr, const u32 *range, int na, int ns, int pna)
+{
+ u64 cp, s, da;
+
+ /* Check address type match */
+ if ((addr[0] ^ range[0]) & 0x00000001)
+ return OF_BAD_ADDR;
+
+ /* Read address values, skipping high cell */
+ cp = of_read_number(range + 1, na - 1);
+ s = of_read_number(range + na + pna, ns);
+ da = of_read_number(addr + 1, na - 1);
+
+ pr_debug("OF: ISA map, cp=%llx, s=%llx, da=%llx\n",
+ (unsigned long long)cp, (unsigned long long)s,
+ (unsigned long long)da);
+
+ if (da < cp || da >= (cp + s))
+ return OF_BAD_ADDR;
+ return da - cp;
+}
+
+static int of_bus_isa_translate(u32 *addr, u64 offset, int na)
+{
+ return of_bus_default_translate(addr + 1, offset, na - 1);
+}
+
+static unsigned int of_bus_isa_get_flags(const u32 *addr)
+{
+ unsigned int flags = 0;
+ u32 w = addr[0];
+
+ if (w & 1)
+ flags |= IORESOURCE_IO;
+ else
+ flags |= IORESOURCE_MEM;
+ return flags;
+}
+
+/*
+ * Array of bus specific translators
+ */
+
+static struct of_bus of_busses[] = {
+#ifdef CONFIG_PCI
+ /* PCI */
+ {
+ .name = "pci",
+ .addresses = "assigned-addresses",
+ .match = of_bus_pci_match,
+ .count_cells = of_bus_pci_count_cells,
+ .map = of_bus_pci_map,
+ .translate = of_bus_pci_translate,
+ .get_flags = of_bus_pci_get_flags,
+ },
+#endif /* CONFIG_PCI */
+ /* ISA */
+ {
+ .name = "isa",
+ .addresses = "reg",
+ .match = of_bus_isa_match,
+ .count_cells = of_bus_isa_count_cells,
+ .map = of_bus_isa_map,
+ .translate = of_bus_isa_translate,
+ .get_flags = of_bus_isa_get_flags,
+ },
+ /* Default */
+ {
+ .name = "default",
+ .addresses = "reg",
+ .match = NULL,
+ .count_cells = of_bus_default_count_cells,
+ .map = of_bus_default_map,
+ .translate = of_bus_default_translate,
+ .get_flags = of_bus_default_get_flags,
+ },
+};
+
+static struct of_bus *of_match_bus(struct device_node *np)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(of_busses); i++)
+ if (!of_busses[i].match || of_busses[i].match(np))
+ return &of_busses[i];
+ BUG();
+ return NULL;
+}
+
+static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+ struct of_bus *pbus, u32 *addr,
+ int na, int ns, int pna, const char *rprop)
+{
+ const u32 *ranges;
+ unsigned int rlen;
+ int rone;
+ u64 offset = OF_BAD_ADDR;
+
+ /* Normally, an absence of a "ranges" property means we are
+ * crossing a non-translatable boundary, and thus the addresses
+ * below the current not cannot be converted to CPU physical ones.
+ * Unfortunately, while this is very clear in the spec, it's not
+ * what Apple understood, and they do have things like /uni-n or
+ * /ht nodes with no "ranges" property and a lot of perfectly
+ * useable mapped devices below them. Thus we treat the absence of
+ * "ranges" as equivalent to an empty "ranges" property which means
+ * a 1:1 translation at that level. It's up to the caller not to try
+ * to translate addresses that aren't supposed to be translated in
+ * the first place. --BenH.
+ *
+ * As far as we know, this damage only exists on Apple machines, so
+ * This code is only enabled on powerpc. --gcl
+ */
+ ranges = of_get_property(parent, rprop, &rlen);
+#if !defined(CONFIG_PPC)
+ if (ranges == NULL) {
+ pr_err("OF: no ranges; cannot translate\n");
+ return 1;
+ }
+#endif /* !defined(CONFIG_PPC) */
+ if (ranges == NULL || rlen == 0) {
+ offset = of_read_number(addr, na);
+ memset(addr, 0, pna * 4);
+ pr_debug("OF: empty ranges; 1:1 translation\n");
+ goto finish;
+ }
+
+ pr_debug("OF: walking ranges...\n");
+
+ /* Now walk through the ranges */
+ rlen /= 4;
+ rone = na + pna + ns;
+ for (; rlen >= rone; rlen -= rone, ranges += rone) {
+ offset = bus->map(addr, ranges, na, ns, pna);
+ if (offset != OF_BAD_ADDR)
+ break;
+ }
+ if (offset == OF_BAD_ADDR) {
+ pr_debug("OF: not found !\n");
+ return 1;
+ }
+ memcpy(addr, ranges + na, 4 * pna);
+
+ finish:
+ of_dump_addr("OF: parent translation for:", addr, pna);
+ pr_debug("OF: with offset: %llx\n", (unsigned long long)offset);
+
+ /* Translate it into parent bus space */
+ return pbus->translate(addr, offset, pna);
+}
+
+/*
+ * Translate an address from the device-tree into a CPU physical address,
+ * this walks up the tree and applies the various bus mappings on the
+ * way.
+ *
+ * Note: We consider that crossing any level with #size-cells == 0 to mean
+ * that translation is impossible (that is we are not dealing with a value
+ * that can be mapped to a cpu physical address). This is not really specified
+ * that way, but this is traditionally the way IBM at least do things
+ */
+u64 __of_translate_address(struct device_node *dev, const u32 *in_addr,
+ const char *rprop)
+{
+ struct device_node *parent = NULL;
+ struct of_bus *bus, *pbus;
+ u32 addr[OF_MAX_ADDR_CELLS];
+ int na, ns, pna, pns;
+ u64 result = OF_BAD_ADDR;
+
+ pr_debug("OF: ** translation for device %s **\n", dev->full_name);
+
+ /* Increase refcount at current level */
+ of_node_get(dev);
+
+ /* Get parent & match bus type */
+ parent = of_get_parent(dev);
+ if (parent == NULL)
+ goto bail;
+ bus = of_match_bus(parent);
+
+ /* Cound address cells & copy address locally */
+ bus->count_cells(dev, &na, &ns);
+ if (!OF_CHECK_COUNTS(na, ns)) {
+ printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
+ dev->full_name);
+ goto bail;
+ }
+ memcpy(addr, in_addr, na * 4);
+
+ pr_debug("OF: bus is %s (na=%d, ns=%d) on %s\n",
+ bus->name, na, ns, parent->full_name);
+ of_dump_addr("OF: translating address:", addr, na);
+
+ /* Translate */
+ for (;;) {
+ /* Switch to parent bus */
+ of_node_put(dev);
+ dev = parent;
+ parent = of_get_parent(dev);
+
+ /* If root, we have finished */
+ if (parent == NULL) {
+ pr_debug("OF: reached root node\n");
+ result = of_read_number(addr, na);
+ break;
+ }
+
+ /* Get new parent bus and counts */
+ pbus = of_match_bus(parent);
+ pbus->count_cells(dev, &pna, &pns);
+ if (!OF_CHECK_COUNTS(pna, pns)) {
+ printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
+ dev->full_name);
+ break;
+ }
+
+ pr_debug("OF: parent bus is %s (na=%d, ns=%d) on %s\n",
+ pbus->name, pna, pns, parent->full_name);
+
+ /* Apply bus translation */
+ if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
+ break;
+
+ /* Complete the move up one level */
+ na = pna;
+ ns = pns;
+ bus = pbus;
+
+ of_dump_addr("OF: one level translation:", addr, na);
+ }
+ bail:
+ of_node_put(parent);
+ of_node_put(dev);
+
+ return result;
+}
+
+u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
+{
+ return __of_translate_address(dev, in_addr, "ranges");
+}
+EXPORT_SYMBOL(of_translate_address);
+
+u64 of_translate_dma_address(struct device_node *dev, const u32 *in_addr)
+{
+ return __of_translate_address(dev, in_addr, "dma-ranges");
+}
+EXPORT_SYMBOL(of_translate_dma_address);
+
+const u32 *of_get_address(struct device_node *dev, int index, u64 *size,
+ unsigned int *flags)
+{
+ const u32 *prop;
+ unsigned int psize;
+ struct device_node *parent;
+ struct of_bus *bus;
+ int onesize, i, na, ns;
+
+ /* Get parent & match bus type */
+ parent = of_get_parent(dev);
+ if (parent == NULL)
+ return NULL;
+ bus = of_match_bus(parent);
+ bus->count_cells(dev, &na, &ns);
+ of_node_put(parent);
+ if (!OF_CHECK_COUNTS(na, ns))
+ return NULL;
+
+ /* Get "reg" or "assigned-addresses" property */
+ prop = of_get_property(dev, bus->addresses, &psize);
+ if (prop == NULL)
+ return NULL;
+ psize /= 4;
+
+ onesize = na + ns;
+ for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
+ if (i == index) {
+ if (size)
+ *size = of_read_number(prop + na, ns);
+ if (flags)
+ *flags = bus->get_flags(prop);
+ return prop;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_get_address);
+
+static int __of_address_to_resource(struct device_node *dev, const u32 *addrp,
+ u64 size, unsigned int flags,
+ struct resource *r)
+{
+ u64 taddr;
+
+ if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
+ return -EINVAL;
+ taddr = of_translate_address(dev, addrp);
+ if (taddr == OF_BAD_ADDR)
+ return -EINVAL;
+ memset(r, 0, sizeof(struct resource));
+ if (flags & IORESOURCE_IO) {
+ unsigned long port;
+ port = pci_address_to_pio(taddr);
+ if (port == (unsigned long)-1)
+ return -EINVAL;
+ r->start = port;
+ r->end = port + size - 1;
+ } else {
+ r->start = taddr;
+ r->end = taddr + size - 1;
+ }
+ r->flags = flags;
+ r->name = dev->full_name;
+ return 0;
+}
+
+/**
+ * of_address_to_resource - Translate device tree address and return as resource
+ *
+ * Note that if your address is a PIO address, the conversion will fail if
+ * the physical address can't be internally converted to an IO token with
+ * pci_address_to_pio(), that is because it's either called to early or it
+ * can't be matched to any host bridge IO space
+ */
+int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r)
+{
+ const u32 *addrp;
+ u64 size;
+ unsigned int flags;
+
+ addrp = of_get_address(dev, index, &size, &flags);
+ if (addrp == NULL)
+ return -EINVAL;
+ return __of_address_to_resource(dev, addrp, size, flags, r);
+}
+EXPORT_SYMBOL_GPL(of_address_to_resource);
+
+
+/**
+ * of_iomap - Maps the memory mapped IO for a given device_node
+ * @device: the device whose io range will be mapped
+ * @index: index of the io range
+ *
+ * Returns a pointer to the mapped memory
+ */
+void __iomem *of_iomap(struct device_node *np, int index)
+{
+ struct resource res;
+
+ if (of_address_to_resource(np, index, &res))
+ return NULL;
+
+ return ioremap(res.start, 1 + res.end - res.start);
+}
+EXPORT_SYMBOL(of_iomap);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index b5ad9740d8b..aa805250de7 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -545,74 +545,28 @@ struct device_node *of_find_matching_node(struct device_node *from,
EXPORT_SYMBOL(of_find_matching_node);
/**
- * of_modalias_table: Table of explicit compatible ==> modalias mappings
- *
- * This table allows particulare compatible property values to be mapped
- * to modalias strings. This is useful for busses which do not directly
- * understand the OF device tree but are populated based on data contained
- * within the device tree. SPI and I2C are the two current users of this
- * table.
- *
- * In most cases, devices do not need to be listed in this table because
- * the modalias value can be derived directly from the compatible table.
- * However, if for any reason a value cannot be derived, then this table
- * provides a method to override the implicit derivation.
- *
- * At the moment, a single table is used for all bus types because it is
- * assumed that the data size is small and that the compatible values
- * should already be distinct enough to differentiate between SPI, I2C
- * and other devices.
- */
-struct of_modalias_table {
- char *of_device;
- char *modalias;
-};
-static struct of_modalias_table of_modalias_table[] = {
- { "fsl,mcu-mpc8349emitx", "mcu-mpc8349emitx" },
- { "mmc-spi-slot", "mmc_spi" },
-};
-
-/**
* of_modalias_node - Lookup appropriate modalias for a device node
* @node: pointer to a device tree node
* @modalias: Pointer to buffer that modalias value will be copied into
* @len: Length of modalias value
*
- * Based on the value of the compatible property, this routine will determine
- * an appropriate modalias value for a particular device tree node. Two
- * separate methods are attempted to derive a modalias value.
+ * Based on the value of the compatible property, this routine will attempt
+ * to choose an appropriate modalias value for a particular device tree node.
+ * It does this by stripping the manufacturer prefix (as delimited by a ',')
+ * from the first entry in the compatible list property.
*
- * First method is to lookup the compatible value in of_modalias_table.
- * Second is to strip off the manufacturer prefix from the first
- * compatible entry and use the remainder as modalias
- *
- * This routine returns 0 on success
+ * This routine returns 0 on success, <0 on failure.
*/
int of_modalias_node(struct device_node *node, char *modalias, int len)
{
- int i, cplen;
- const char *compatible;
- const char *p;
-
- /* 1. search for exception list entry */
- for (i = 0; i < ARRAY_SIZE(of_modalias_table); i++) {
- compatible = of_modalias_table[i].of_device;
- if (!of_device_is_compatible(node, compatible))
- continue;
- strlcpy(modalias, of_modalias_table[i].modalias, len);
- return 0;
- }
+ const char *compatible, *p;
+ int cplen;
compatible = of_get_property(node, "compatible", &cplen);
- if (!compatible)
+ if (!compatible || strlen(compatible) > cplen)
return -ENODEV;
-
- /* 2. take first compatible entry and strip manufacturer */
p = strchr(compatible, ',');
- if (!p)
- return -ENODEV;
- p++;
- strlcpy(modalias, p, len);
+ strlcpy(modalias, p ? p + 1 : compatible, len);
return 0;
}
EXPORT_SYMBOL_GPL(of_modalias_node);
@@ -651,14 +605,14 @@ EXPORT_SYMBOL(of_find_node_by_phandle);
struct device_node *
of_parse_phandle(struct device_node *np, const char *phandle_name, int index)
{
- const phandle *phandle;
+ const __be32 *phandle;
int size;
phandle = of_get_property(np, phandle_name, &size);
if ((!phandle) || (size < sizeof(*phandle) * (index + 1)))
return NULL;
- return of_find_node_by_phandle(phandle[index]);
+ return of_find_node_by_phandle(be32_to_cpup(phandle + index));
}
EXPORT_SYMBOL(of_parse_phandle);
@@ -714,16 +668,16 @@ int of_parse_phandles_with_args(struct device_node *np, const char *list_name,
while (list < list_end) {
const __be32 *cells;
- const phandle *phandle;
+ phandle phandle;
- phandle = list++;
+ phandle = be32_to_cpup(list++);
args = list;
/* one cell hole in the list = <>; */
- if (!*phandle)
+ if (!phandle)
goto next;
- node = of_find_node_by_phandle(*phandle);
+ node = of_find_node_by_phandle(phandle);
if (!node) {
pr_debug("%s: could not find phandle\n",
np->full_name);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 7d18f8e0b01..92de0eb74ae 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -14,19 +14,19 @@
* @ids: array of of device match structures to search in
* @dev: the of device structure to match against
*
- * Used by a driver to check whether an of_device present in the
+ * Used by a driver to check whether an platform_device present in the
* system is in its list of supported devices.
*/
const struct of_device_id *of_match_device(const struct of_device_id *matches,
const struct device *dev)
{
- if (!dev->of_node)
+ if ((!matches) || (!dev->of_node))
return NULL;
return of_match_node(matches, dev->of_node);
}
EXPORT_SYMBOL(of_match_device);
-struct of_device *of_dev_get(struct of_device *dev)
+struct platform_device *of_dev_get(struct platform_device *dev)
{
struct device *tmp;
@@ -34,13 +34,13 @@ struct of_device *of_dev_get(struct of_device *dev)
return NULL;
tmp = get_device(&dev->dev);
if (tmp)
- return to_of_device(tmp);
+ return to_platform_device(tmp);
else
return NULL;
}
EXPORT_SYMBOL(of_dev_get);
-void of_dev_put(struct of_device *dev)
+void of_dev_put(struct platform_device *dev)
{
if (dev)
put_device(&dev->dev);
@@ -50,28 +50,25 @@ EXPORT_SYMBOL(of_dev_put);
static ssize_t devspec_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct of_device *ofdev;
+ struct platform_device *ofdev;
- ofdev = to_of_device(dev);
+ ofdev = to_platform_device(dev);
return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
}
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct of_device *ofdev;
+ struct platform_device *ofdev;
- ofdev = to_of_device(dev);
+ ofdev = to_platform_device(dev);
return sprintf(buf, "%s\n", ofdev->dev.of_node->name);
}
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct of_device *ofdev = to_of_device(dev);
- ssize_t len = 0;
-
- len = of_device_get_modalias(ofdev, buf, PAGE_SIZE - 2);
+ ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2);
buf[len] = '\n';
buf[len+1] = 0;
return len+1;
@@ -93,20 +90,25 @@ struct device_attribute of_platform_device_attrs[] = {
*/
void of_release_dev(struct device *dev)
{
- struct of_device *ofdev;
+ struct platform_device *ofdev;
- ofdev = to_of_device(dev);
+ ofdev = to_platform_device(dev);
of_node_put(ofdev->dev.of_node);
kfree(ofdev);
}
EXPORT_SYMBOL(of_release_dev);
-int of_device_register(struct of_device *ofdev)
+int of_device_register(struct platform_device *ofdev)
{
BUG_ON(ofdev->dev.of_node == NULL);
device_initialize(&ofdev->dev);
+ /* name and id have to be set so that the platform bus doesn't get
+ * confused on matching */
+ ofdev->name = dev_name(&ofdev->dev);
+ ofdev->id = -1;
+
/* device_add will assume that this device is on the same node as
* the parent. If there is no parent defined, set the node
* explicitly */
@@ -117,25 +119,24 @@ int of_device_register(struct of_device *ofdev)
}
EXPORT_SYMBOL(of_device_register);
-void of_device_unregister(struct of_device *ofdev)
+void of_device_unregister(struct platform_device *ofdev)
{
device_unregister(&ofdev->dev);
}
EXPORT_SYMBOL(of_device_unregister);
-ssize_t of_device_get_modalias(struct of_device *ofdev,
- char *str, ssize_t len)
+ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
{
const char *compat;
int cplen, i;
ssize_t tsize, csize, repend;
/* Name & Type */
- csize = snprintf(str, len, "of:N%sT%s", ofdev->dev.of_node->name,
- ofdev->dev.of_node->type);
+ csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name,
+ dev->of_node->type);
/* Get compatible property if any */
- compat = of_get_property(ofdev->dev.of_node, "compatible", &cplen);
+ compat = of_get_property(dev->of_node, "compatible", &cplen);
if (!compat)
return csize;
@@ -170,3 +171,51 @@ ssize_t of_device_get_modalias(struct of_device *ofdev,
return tsize;
}
+
+/**
+ * of_device_uevent - Display OF related uevent information
+ */
+int of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ const char *compat;
+ int seen = 0, cplen, sl;
+
+ if ((!dev) || (!dev->of_node))
+ return -ENODEV;
+
+ if (add_uevent_var(env, "OF_NAME=%s", dev->of_node->name))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "OF_TYPE=%s", dev->of_node->type))
+ return -ENOMEM;
+
+ /* Since the compatible field can contain pretty much anything
+ * it's not really legal to split it out with commas. We split it
+ * up using a number of environment variables instead. */
+
+ compat = of_get_property(dev->of_node, "compatible", &cplen);
+ while (compat && *compat && cplen > 0) {
+ if (add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat))
+ return -ENOMEM;
+
+ sl = strlen(compat) + 1;
+ compat += sl;
+ cplen -= sl;
+ seen++;
+ }
+
+ if (add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen))
+ return -ENOMEM;
+
+ /* modalias is trickier, we add it in 2 steps */
+ if (add_uevent_var(env, "MODALIAS="))
+ return -ENOMEM;
+
+ sl = of_device_get_modalias(dev, &env->buf[env->buflen-1],
+ sizeof(env->buf) - env->buflen);
+ if (sl >= (sizeof(env->buf) - env->buflen))
+ return -ENOMEM;
+ env->buflen += sl;
+
+ return 0;
+}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index b6987bba855..65da5aec755 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -69,9 +69,9 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
u32 sz = be32_to_cpup((__be32 *)p);
p += 8;
if (be32_to_cpu(initial_boot_params->version) < 0x10)
- p = _ALIGN(p, sz >= 8 ? 8 : 4);
+ p = ALIGN(p, sz >= 8 ? 8 : 4);
p += sz;
- p = _ALIGN(p, 4);
+ p = ALIGN(p, 4);
continue;
}
if (tag != OF_DT_BEGIN_NODE) {
@@ -80,7 +80,7 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
}
depth++;
pathp = (char *)p;
- p = _ALIGN(p + strlen(pathp) + 1, 4);
+ p = ALIGN(p + strlen(pathp) + 1, 4);
if ((*pathp) == '/') {
char *lp, *np;
for (lp = NULL, np = pathp; *np; np++)
@@ -109,7 +109,7 @@ unsigned long __init of_get_flat_dt_root(void)
p += 4;
BUG_ON(be32_to_cpup((__be32 *)p) != OF_DT_BEGIN_NODE);
p += 4;
- return _ALIGN(p + strlen((char *)p) + 1, 4);
+ return ALIGN(p + strlen((char *)p) + 1, 4);
}
/**
@@ -138,7 +138,7 @@ void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
noff = be32_to_cpup((__be32 *)(p + 4));
p += 8;
if (be32_to_cpu(initial_boot_params->version) < 0x10)
- p = _ALIGN(p, sz >= 8 ? 8 : 4);
+ p = ALIGN(p, sz >= 8 ? 8 : 4);
nstr = find_flat_dt_string(noff);
if (nstr == NULL) {
@@ -151,7 +151,7 @@ void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
return (void *)p;
}
p += sz;
- p = _ALIGN(p, 4);
+ p = ALIGN(p, 4);
} while (1);
}
@@ -169,7 +169,7 @@ int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
if (cp == NULL)
return 0;
while (cplen > 0) {
- if (strncasecmp(cp, compat, strlen(compat)) == 0)
+ if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
return 1;
l = strlen(cp) + 1;
cp += l;
@@ -184,7 +184,7 @@ static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
{
void *res;
- *mem = _ALIGN(*mem, align);
+ *mem = ALIGN(*mem, align);
res = (void *)*mem;
*mem += size;
@@ -220,7 +220,7 @@ unsigned long __init unflatten_dt_node(unsigned long mem,
*p += 4;
pathp = (char *)*p;
l = allocl = strlen(pathp) + 1;
- *p = _ALIGN(*p + l, 4);
+ *p = ALIGN(*p + l, 4);
/* version 0x10 has a more compact unit name here instead of the full
* path. we accumulate the full path size using "fpsize", we'll rebuild
@@ -299,7 +299,7 @@ unsigned long __init unflatten_dt_node(unsigned long mem,
noff = be32_to_cpup((__be32 *)((*p) + 4));
*p += 8;
if (be32_to_cpu(initial_boot_params->version) < 0x10)
- *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
+ *p = ALIGN(*p, sz >= 8 ? 8 : 4);
pname = find_flat_dt_string(noff);
if (pname == NULL) {
@@ -320,20 +320,20 @@ unsigned long __init unflatten_dt_node(unsigned long mem,
if ((strcmp(pname, "phandle") == 0) ||
(strcmp(pname, "linux,phandle") == 0)) {
if (np->phandle == 0)
- np->phandle = *((u32 *)*p);
+ np->phandle = be32_to_cpup((__be32*)*p);
}
/* And we process the "ibm,phandle" property
* used in pSeries dynamic device tree
* stuff */
if (strcmp(pname, "ibm,phandle") == 0)
- np->phandle = *((u32 *)*p);
+ np->phandle = be32_to_cpup((__be32 *)*p);
pp->name = pname;
pp->length = sz;
pp->value = (void *)*p;
*prev_pp = pp;
prev_pp = &pp->next;
}
- *p = _ALIGN((*p) + sz, 4);
+ *p = ALIGN((*p) + sz, 4);
}
/* with version 0x10 we may not have the name property, recreate
* it here from the unit name if absent
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index a1b31a4abae..905960338fb 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -11,13 +11,14 @@
* (at your option) any later version.
*/
-#include <linux/kernel.h>
+#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/slab.h>
+#include <linux/of_address.h>
#include <linux/of_gpio.h>
-#include <asm/prom.h>
+#include <linux/slab.h>
/**
* of_get_gpio_flags - Get a GPIO number and flags to use with GPIO API
@@ -33,32 +34,32 @@ int of_get_gpio_flags(struct device_node *np, int index,
enum of_gpio_flags *flags)
{
int ret;
- struct device_node *gc;
- struct of_gpio_chip *of_gc = NULL;
+ struct device_node *gpio_np;
+ struct gpio_chip *gc;
int size;
const void *gpio_spec;
const __be32 *gpio_cells;
ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index,
- &gc, &gpio_spec);
+ &gpio_np, &gpio_spec);
if (ret) {
pr_debug("%s: can't parse gpios property\n", __func__);
goto err0;
}
- of_gc = gc->data;
- if (!of_gc) {
+ gc = of_node_to_gpiochip(gpio_np);
+ if (!gc) {
pr_debug("%s: gpio controller %s isn't registered\n",
- np->full_name, gc->full_name);
+ np->full_name, gpio_np->full_name);
ret = -ENODEV;
goto err1;
}
- gpio_cells = of_get_property(gc, "#gpio-cells", &size);
+ gpio_cells = of_get_property(gpio_np, "#gpio-cells", &size);
if (!gpio_cells || size != sizeof(*gpio_cells) ||
- be32_to_cpup(gpio_cells) != of_gc->gpio_cells) {
+ be32_to_cpup(gpio_cells) != gc->of_gpio_n_cells) {
pr_debug("%s: wrong #gpio-cells for %s\n",
- np->full_name, gc->full_name);
+ np->full_name, gpio_np->full_name);
ret = -EINVAL;
goto err1;
}
@@ -67,13 +68,13 @@ int of_get_gpio_flags(struct device_node *np, int index,
if (flags)
*flags = 0;
- ret = of_gc->xlate(of_gc, np, gpio_spec, flags);
+ ret = gc->of_xlate(gc, np, gpio_spec, flags);
if (ret < 0)
goto err1;
- ret += of_gc->gc.base;
+ ret += gc->base;
err1:
- of_node_put(gc);
+ of_node_put(gpio_np);
err0:
pr_debug("%s exited with status %d\n", __func__, ret);
return ret;
@@ -116,7 +117,7 @@ EXPORT_SYMBOL(of_gpio_count);
/**
* of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags
- * @of_gc: pointer to the of_gpio_chip structure
+ * @gc: pointer to the gpio_chip structure
* @np: device node of the GPIO chip
* @gpio_spec: gpio specifier as found in the device tree
* @flags: a flags pointer to fill in
@@ -125,8 +126,8 @@ EXPORT_SYMBOL(of_gpio_count);
* gpio chips. This function performs only one sanity check: whether gpio
* is less than ngpios (that is specified in the gpio_chip).
*/
-int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np,
- const void *gpio_spec, enum of_gpio_flags *flags)
+static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
+ const void *gpio_spec, u32 *flags)
{
const __be32 *gpio = gpio_spec;
const u32 n = be32_to_cpup(gpio);
@@ -137,12 +138,12 @@ int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np,
* number and the flags from a single gpio cell -- this is possible,
* but not recommended).
*/
- if (of_gc->gpio_cells < 2) {
+ if (gc->of_gpio_n_cells < 2) {
WARN_ON(1);
return -EINVAL;
}
- if (n > of_gc->gc.ngpio)
+ if (n > gc->ngpio)
return -EINVAL;
if (flags)
@@ -150,7 +151,6 @@ int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np,
return n;
}
-EXPORT_SYMBOL(of_gpio_simple_xlate);
/**
* of_mm_gpiochip_add - Add memory mapped GPIO chip (bank)
@@ -161,10 +161,8 @@ EXPORT_SYMBOL(of_gpio_simple_xlate);
*
* 1) In the gpio_chip structure:
* - all the callbacks
- *
- * 2) In the of_gpio_chip structure:
- * - gpio_cells
- * - xlate callback (optional)
+ * - of_gpio_n_cells
+ * - of_xlate callback (optional)
*
* 3) In the of_mm_gpio_chip structure:
* - save_regs callback (optional)
@@ -177,8 +175,7 @@ int of_mm_gpiochip_add(struct device_node *np,
struct of_mm_gpio_chip *mm_gc)
{
int ret = -ENOMEM;
- struct of_gpio_chip *of_gc = &mm_gc->of_gc;
- struct gpio_chip *gc = &of_gc->gc;
+ struct gpio_chip *gc = &mm_gc->gc;
gc->label = kstrdup(np->full_name, GFP_KERNEL);
if (!gc->label)
@@ -190,26 +187,19 @@ int of_mm_gpiochip_add(struct device_node *np,
gc->base = -1;
- if (!of_gc->xlate)
- of_gc->xlate = of_gpio_simple_xlate;
-
if (mm_gc->save_regs)
mm_gc->save_regs(mm_gc);
- np->data = of_gc;
+ mm_gc->gc.of_node = np;
ret = gpiochip_add(gc);
if (ret)
goto err2;
- /* We don't want to lose the node and its ->data */
- of_node_get(np);
-
pr_debug("%s: registered as generic GPIO chip, base is %d\n",
np->full_name, gc->base);
return 0;
err2:
- np->data = NULL;
iounmap(mm_gc->regs);
err1:
kfree(gc->label);
@@ -219,3 +209,36 @@ err0:
return ret;
}
EXPORT_SYMBOL(of_mm_gpiochip_add);
+
+void of_gpiochip_add(struct gpio_chip *chip)
+{
+ if ((!chip->of_node) && (chip->dev))
+ chip->of_node = chip->dev->of_node;
+
+ if (!chip->of_node)
+ return;
+
+ if (!chip->of_xlate) {
+ chip->of_gpio_n_cells = 2;
+ chip->of_xlate = of_gpio_simple_xlate;
+ }
+
+ of_node_get(chip->of_node);
+}
+
+void of_gpiochip_remove(struct gpio_chip *chip)
+{
+ if (chip->of_node)
+ of_node_put(chip->of_node);
+}
+
+/* Private function for resolving node pointer to gpio_chip */
+static int of_gpiochip_is_match(struct gpio_chip *chip, void *data)
+{
+ return chip->of_node == data;
+}
+
+struct gpio_chip *of_node_to_gpiochip(struct device_node *np)
+{
+ return gpiochip_find(np, of_gpiochip_is_match);
+}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
new file mode 100644
index 00000000000..6e595e5a397
--- /dev/null
+++ b/drivers/of/irq.c
@@ -0,0 +1,349 @@
+/*
+ * Derived from arch/i386/kernel/irq.c
+ * Copyright (C) 1992 Linus Torvalds
+ * Adapted from arch/i386 by Gary Thomas
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ * Updated and modified by Cort Dougan <cort@fsmlabs.com>
+ * Copyright (C) 1996-2001 Cort Dougan
+ * Adapted for Power Macintosh by Paul Mackerras
+ * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file contains the code used to make IRQ descriptions in the
+ * device tree to actual irq numbers on an interrupt controller
+ * driver.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/string.h>
+
+/**
+ * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
+ * @device: Device node of the device whose interrupt is to be mapped
+ * @index: Index of the interrupt to map
+ *
+ * This function is a wrapper that chains of_irq_map_one() and
+ * irq_create_of_mapping() to make things easier to callers
+ */
+unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
+{
+ struct of_irq oirq;
+
+ if (of_irq_map_one(dev, index, &oirq))
+ return NO_IRQ;
+
+ return irq_create_of_mapping(oirq.controller, oirq.specifier,
+ oirq.size);
+}
+EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
+
+/**
+ * of_irq_find_parent - Given a device node, find its interrupt parent node
+ * @child: pointer to device node
+ *
+ * Returns a pointer to the interrupt parent node, or NULL if the interrupt
+ * parent could not be determined.
+ */
+static struct device_node *of_irq_find_parent(struct device_node *child)
+{
+ struct device_node *p;
+ const __be32 *parp;
+
+ if (!of_node_get(child))
+ return NULL;
+
+ do {
+ parp = of_get_property(child, "interrupt-parent", NULL);
+ if (parp == NULL)
+ p = of_get_parent(child);
+ else {
+ if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
+ p = of_node_get(of_irq_dflt_pic);
+ else
+ p = of_find_node_by_phandle(be32_to_cpup(parp));
+ }
+ of_node_put(child);
+ child = p;
+ } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL);
+
+ return p;
+}
+
+/**
+ * of_irq_map_raw - Low level interrupt tree parsing
+ * @parent: the device interrupt parent
+ * @intspec: interrupt specifier ("interrupts" property of the device)
+ * @ointsize: size of the passed in interrupt specifier
+ * @addr: address specifier (start of "reg" property of the device)
+ * @out_irq: structure of_irq filled by this function
+ *
+ * Returns 0 on success and a negative number on error
+ *
+ * This function is a low-level interrupt tree walking function. It
+ * can be used to do a partial walk with synthetized reg and interrupts
+ * properties, for example when resolving PCI interrupts when no device
+ * node exist for the parent.
+ */
+int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
+ u32 ointsize, const __be32 *addr, struct of_irq *out_irq)
+{
+ struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
+ const __be32 *tmp, *imap, *imask;
+ u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
+ int imaplen, match, i;
+
+ pr_debug("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n",
+ parent->full_name, be32_to_cpup(intspec),
+ be32_to_cpup(intspec + 1), ointsize);
+
+ ipar = of_node_get(parent);
+
+ /* First get the #interrupt-cells property of the current cursor
+ * that tells us how to interpret the passed-in intspec. If there
+ * is none, we are nice and just walk up the tree
+ */
+ do {
+ tmp = of_get_property(ipar, "#interrupt-cells", NULL);
+ if (tmp != NULL) {
+ intsize = be32_to_cpu(*tmp);
+ break;
+ }
+ tnode = ipar;
+ ipar = of_irq_find_parent(ipar);
+ of_node_put(tnode);
+ } while (ipar);
+ if (ipar == NULL) {
+ pr_debug(" -> no parent found !\n");
+ goto fail;
+ }
+
+ pr_debug("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize);
+
+ if (ointsize != intsize)
+ return -EINVAL;
+
+ /* Look for this #address-cells. We have to implement the old linux
+ * trick of looking for the parent here as some device-trees rely on it
+ */
+ old = of_node_get(ipar);
+ do {
+ tmp = of_get_property(old, "#address-cells", NULL);
+ tnode = of_get_parent(old);
+ of_node_put(old);
+ old = tnode;
+ } while (old && tmp == NULL);
+ of_node_put(old);
+ old = NULL;
+ addrsize = (tmp == NULL) ? 2 : be32_to_cpu(*tmp);
+
+ pr_debug(" -> addrsize=%d\n", addrsize);
+
+ /* Now start the actual "proper" walk of the interrupt tree */
+ while (ipar != NULL) {
+ /* Now check if cursor is an interrupt-controller and if it is
+ * then we are done
+ */
+ if (of_get_property(ipar, "interrupt-controller", NULL) !=
+ NULL) {
+ pr_debug(" -> got it !\n");
+ for (i = 0; i < intsize; i++)
+ out_irq->specifier[i] =
+ of_read_number(intspec +i, 1);
+ out_irq->size = intsize;
+ out_irq->controller = ipar;
+ of_node_put(old);
+ return 0;
+ }
+
+ /* Now look for an interrupt-map */
+ imap = of_get_property(ipar, "interrupt-map", &imaplen);
+ /* No interrupt map, check for an interrupt parent */
+ if (imap == NULL) {
+ pr_debug(" -> no map, getting parent\n");
+ newpar = of_irq_find_parent(ipar);
+ goto skiplevel;
+ }
+ imaplen /= sizeof(u32);
+
+ /* Look for a mask */
+ imask = of_get_property(ipar, "interrupt-map-mask", NULL);
+
+ /* If we were passed no "reg" property and we attempt to parse
+ * an interrupt-map, then #address-cells must be 0.
+ * Fail if it's not.
+ */
+ if (addr == NULL && addrsize != 0) {
+ pr_debug(" -> no reg passed in when needed !\n");
+ goto fail;
+ }
+
+ /* Parse interrupt-map */
+ match = 0;
+ while (imaplen > (addrsize + intsize + 1) && !match) {
+ /* Compare specifiers */
+ match = 1;
+ for (i = 0; i < addrsize && match; ++i) {
+ u32 mask = imask ? imask[i] : 0xffffffffu;
+ match = ((addr[i] ^ imap[i]) & mask) == 0;
+ }
+ for (; i < (addrsize + intsize) && match; ++i) {
+ u32 mask = imask ? imask[i] : 0xffffffffu;
+ match =
+ ((intspec[i-addrsize] ^ imap[i]) & mask) == 0;
+ }
+ imap += addrsize + intsize;
+ imaplen -= addrsize + intsize;
+
+ pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
+
+ /* Get the interrupt parent */
+ if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
+ newpar = of_node_get(of_irq_dflt_pic);
+ else
+ newpar = of_find_node_by_phandle(be32_to_cpup(imap));
+ imap++;
+ --imaplen;
+
+ /* Check if not found */
+ if (newpar == NULL) {
+ pr_debug(" -> imap parent not found !\n");
+ goto fail;
+ }
+
+ /* Get #interrupt-cells and #address-cells of new
+ * parent
+ */
+ tmp = of_get_property(newpar, "#interrupt-cells", NULL);
+ if (tmp == NULL) {
+ pr_debug(" -> parent lacks #interrupt-cells!\n");
+ goto fail;
+ }
+ newintsize = be32_to_cpu(*tmp);
+ tmp = of_get_property(newpar, "#address-cells", NULL);
+ newaddrsize = (tmp == NULL) ? 0 : be32_to_cpu(*tmp);
+
+ pr_debug(" -> newintsize=%d, newaddrsize=%d\n",
+ newintsize, newaddrsize);
+
+ /* Check for malformed properties */
+ if (imaplen < (newaddrsize + newintsize))
+ goto fail;
+
+ imap += newaddrsize + newintsize;
+ imaplen -= newaddrsize + newintsize;
+
+ pr_debug(" -> imaplen=%d\n", imaplen);
+ }
+ if (!match)
+ goto fail;
+
+ of_node_put(old);
+ old = of_node_get(newpar);
+ addrsize = newaddrsize;
+ intsize = newintsize;
+ intspec = imap - intsize;
+ addr = intspec - addrsize;
+
+ skiplevel:
+ /* Iterate again with new parent */
+ pr_debug(" -> new parent: %s\n", newpar ? newpar->full_name : "<>");
+ of_node_put(ipar);
+ ipar = newpar;
+ newpar = NULL;
+ }
+ fail:
+ of_node_put(ipar);
+ of_node_put(old);
+ of_node_put(newpar);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(of_irq_map_raw);
+
+/**
+ * of_irq_map_one - Resolve an interrupt for a device
+ * @device: the device whose interrupt is to be resolved
+ * @index: index of the interrupt to resolve
+ * @out_irq: structure of_irq filled by this function
+ *
+ * This function resolves an interrupt, walking the tree, for a given
+ * device-tree node. It's the high level pendant to of_irq_map_raw().
+ */
+int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq)
+{
+ struct device_node *p;
+ const __be32 *intspec, *tmp, *addr;
+ u32 intsize, intlen;
+ int res = -EINVAL;
+
+ pr_debug("of_irq_map_one: dev=%s, index=%d\n", device->full_name, index);
+
+ /* OldWorld mac stuff is "special", handle out of line */
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return of_irq_map_oldworld(device, index, out_irq);
+
+ /* Get the interrupts property */
+ intspec = of_get_property(device, "interrupts", &intlen);
+ if (intspec == NULL)
+ return -EINVAL;
+ intlen /= sizeof(*intspec);
+
+ pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen);
+
+ /* Get the reg property (if any) */
+ addr = of_get_property(device, "reg", NULL);
+
+ /* Look for the interrupt parent. */
+ p = of_irq_find_parent(device);
+ if (p == NULL)
+ return -EINVAL;
+
+ /* Get size of interrupt specifier */
+ tmp = of_get_property(p, "#interrupt-cells", NULL);
+ if (tmp == NULL)
+ goto out;
+ intsize = be32_to_cpu(*tmp);
+
+ pr_debug(" intsize=%d intlen=%d\n", intsize, intlen);
+
+ /* Check index */
+ if ((index + 1) * intsize > intlen)
+ goto out;
+
+ /* Get new specifier and map it */
+ res = of_irq_map_raw(p, intspec + index * intsize, intsize,
+ addr, out_irq);
+ out:
+ of_node_put(p);
+ return res;
+}
+EXPORT_SYMBOL_GPL(of_irq_map_one);
+
+/**
+ * of_irq_to_resource - Decode a node's IRQ and return it as a resource
+ * @dev: pointer to device tree node
+ * @index: zero-based index of the irq
+ * @r: pointer to resource structure to return result into.
+ */
+int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
+{
+ int irq = irq_of_parse_and_map(dev, index);
+
+ /* Only dereference the resource if both the
+ * resource and the irq are valid. */
+ if (r && irq != NO_IRQ) {
+ r->start = r->end = irq;
+ r->flags = IORESOURCE_IRQ;
+ r->name = dev->full_name;
+ }
+
+ return irq;
+}
+EXPORT_SYMBOL_GPL(of_irq_to_resource);
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index ab6522c8e4f..0a694debd22 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -14,57 +14,65 @@
#include <linux/i2c.h>
#include <linux/of.h>
#include <linux/of_i2c.h>
+#include <linux/of_irq.h>
#include <linux/module.h>
-void of_register_i2c_devices(struct i2c_adapter *adap,
- struct device_node *adap_node)
+void of_i2c_register_devices(struct i2c_adapter *adap)
{
void *result;
struct device_node *node;
- for_each_child_of_node(adap_node, node) {
+ /* Only register child devices if the adapter has a node pointer set */
+ if (!adap->dev.of_node)
+ return;
+
+ dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
+
+ for_each_child_of_node(adap->dev.of_node, node) {
struct i2c_board_info info = {};
struct dev_archdata dev_ad = {};
const __be32 *addr;
int len;
- if (of_modalias_node(node, info.type, sizeof(info.type)) < 0)
+ dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name);
+
+ if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
+ dev_err(&adap->dev, "of_i2c: modalias failure on %s\n",
+ node->full_name);
continue;
+ }
addr = of_get_property(node, "reg", &len);
- if (!addr || len < sizeof(int) || *addr > (1 << 10) - 1) {
- printk(KERN_ERR
- "of-i2c: invalid i2c device entry\n");
+ if (!addr || (len < sizeof(int))) {
+ dev_err(&adap->dev, "of_i2c: invalid reg on %s\n",
+ node->full_name);
continue;
}
- info.irq = irq_of_parse_and_map(node, 0);
-
info.addr = be32_to_cpup(addr);
+ if (info.addr > (1 << 10) - 1) {
+ dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
+ info.addr, node->full_name);
+ continue;
+ }
- info.of_node = node;
+ info.irq = irq_of_parse_and_map(node, 0);
+ info.of_node = of_node_get(node);
info.archdata = &dev_ad;
request_module("%s", info.type);
result = i2c_new_device(adap, &info);
if (result == NULL) {
- printk(KERN_ERR
- "of-i2c: Failed to load driver for %s\n",
- info.type);
+ dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
+ node->full_name);
+ of_node_put(node);
irq_dispose_mapping(info.irq);
continue;
}
-
- /*
- * Get the node to not lose the dev_archdata->of_node.
- * Currently there is no way to put it back, as well as no
- * of_unregister_i2c_devices() call.
- */
- of_node_get(node);
}
}
-EXPORT_SYMBOL(of_register_i2c_devices);
+EXPORT_SYMBOL(of_i2c_register_devices);
static int of_dev_node_match(struct device *dev, void *data)
{
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 42a6715f8e8..1fce00eb421 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/phy.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c
index 5fed7e3c7da..1dbce58a58b 100644
--- a/drivers/of/of_spi.c
+++ b/drivers/of/of_spi.c
@@ -9,17 +9,17 @@
#include <linux/of.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
+#include <linux/of_irq.h>
#include <linux/of_spi.h>
/**
* of_register_spi_devices - Register child devices onto the SPI bus
* @master: Pointer to spi_master device
- * @np: parent node of SPI device nodes
*
- * Registers an spi_device for each child node of 'np' which has a 'reg'
+ * Registers an spi_device for each child node of master node which has a 'reg'
* property.
*/
-void of_register_spi_devices(struct spi_master *master, struct device_node *np)
+void of_register_spi_devices(struct spi_master *master)
{
struct spi_device *spi;
struct device_node *nc;
@@ -27,7 +27,10 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np)
int rc;
int len;
- for_each_child_of_node(np, nc) {
+ if (!master->dev.of_node)
+ return;
+
+ for_each_child_of_node(master->dev.of_node, nc) {
/* Alloc an spi_device */
spi = spi_alloc_device(master);
if (!spi) {
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 7dacc1ebe91..bb72223c22a 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -14,8 +14,105 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+static int of_dev_node_match(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+/**
+ * of_find_device_by_node - Find the platform_device associated with a node
+ * @np: Pointer to device tree node
+ *
+ * Returns platform_device pointer, or NULL if not found
+ */
+struct platform_device *of_find_device_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&platform_bus_type, NULL, np, of_dev_node_match);
+ return dev ? to_platform_device(dev) : NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_node);
+
+static int platform_driver_probe_shim(struct platform_device *pdev)
+{
+ struct platform_driver *pdrv;
+ struct of_platform_driver *ofpdrv;
+ const struct of_device_id *match;
+
+ pdrv = container_of(pdev->dev.driver, struct platform_driver, driver);
+ ofpdrv = container_of(pdrv, struct of_platform_driver, platform_driver);
+
+ /* There is an unlikely chance that an of_platform driver might match
+ * on a non-OF platform device. If so, then of_match_device() will
+ * come up empty. Return -EINVAL in this case so other drivers get
+ * the chance to bind. */
+ match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev);
+ return match ? ofpdrv->probe(pdev, match) : -EINVAL;
+}
+
+static void platform_driver_shutdown_shim(struct platform_device *pdev)
+{
+ struct platform_driver *pdrv;
+ struct of_platform_driver *ofpdrv;
+
+ pdrv = container_of(pdev->dev.driver, struct platform_driver, driver);
+ ofpdrv = container_of(pdrv, struct of_platform_driver, platform_driver);
+ ofpdrv->shutdown(pdev);
+}
+
+/**
+ * of_register_platform_driver
+ */
+int of_register_platform_driver(struct of_platform_driver *drv)
+{
+ char *of_name;
+
+ /* setup of_platform_driver to platform_driver adaptors */
+ drv->platform_driver.driver = drv->driver;
+
+ /* Prefix the driver name with 'of:' to avoid namespace collisions
+ * and bogus matches. There are some drivers in the tree that
+ * register both an of_platform_driver and a platform_driver with
+ * the same name. This is a temporary measure until they are all
+ * cleaned up --gcl July 29, 2010 */
+ of_name = kmalloc(strlen(drv->driver.name) + 5, GFP_KERNEL);
+ if (!of_name)
+ return -ENOMEM;
+ sprintf(of_name, "of:%s", drv->driver.name);
+ drv->platform_driver.driver.name = of_name;
+
+ if (drv->probe)
+ drv->platform_driver.probe = platform_driver_probe_shim;
+ drv->platform_driver.remove = drv->remove;
+ if (drv->shutdown)
+ drv->platform_driver.shutdown = platform_driver_shutdown_shim;
+ drv->platform_driver.suspend = drv->suspend;
+ drv->platform_driver.resume = drv->resume;
+
+ return platform_driver_register(&drv->platform_driver);
+}
+EXPORT_SYMBOL(of_register_platform_driver);
+
+void of_unregister_platform_driver(struct of_platform_driver *drv)
+{
+ platform_driver_unregister(&drv->platform_driver);
+ kfree(drv->platform_driver.driver.name);
+ drv->platform_driver.driver.name = NULL;
+}
+EXPORT_SYMBOL(of_unregister_platform_driver);
+
+#if defined(CONFIG_PPC_DCR)
+#include <asm/dcr.h>
+#endif
extern struct device_attribute of_platform_device_attrs[];
@@ -33,11 +130,11 @@ static int of_platform_device_probe(struct device *dev)
{
int error = -ENODEV;
struct of_platform_driver *drv;
- struct of_device *of_dev;
+ struct platform_device *of_dev;
const struct of_device_id *match;
drv = to_of_platform_driver(dev->driver);
- of_dev = to_of_device(dev);
+ of_dev = to_platform_device(dev);
if (!drv->probe)
return error;
@@ -55,7 +152,7 @@ static int of_platform_device_probe(struct device *dev)
static int of_platform_device_remove(struct device *dev)
{
- struct of_device *of_dev = to_of_device(dev);
+ struct platform_device *of_dev = to_platform_device(dev);
struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
if (dev->driver && drv->remove)
@@ -65,7 +162,7 @@ static int of_platform_device_remove(struct device *dev)
static void of_platform_device_shutdown(struct device *dev)
{
- struct of_device *of_dev = to_of_device(dev);
+ struct platform_device *of_dev = to_platform_device(dev);
struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
if (dev->driver && drv->shutdown)
@@ -76,7 +173,7 @@ static void of_platform_device_shutdown(struct device *dev)
static int of_platform_legacy_suspend(struct device *dev, pm_message_t mesg)
{
- struct of_device *of_dev = to_of_device(dev);
+ struct platform_device *of_dev = to_platform_device(dev);
struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
int ret = 0;
@@ -87,7 +184,7 @@ static int of_platform_legacy_suspend(struct device *dev, pm_message_t mesg)
static int of_platform_legacy_resume(struct device *dev)
{
- struct of_device *of_dev = to_of_device(dev);
+ struct platform_device *of_dev = to_platform_device(dev);
struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
int ret = 0;
@@ -384,15 +481,286 @@ int of_bus_type_init(struct bus_type *bus, const char *name)
int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus)
{
- drv->driver.bus = bus;
+ /*
+ * Temporary: of_platform_bus used to be distinct from the platform
+ * bus. It isn't anymore, and so drivers on the platform bus need
+ * to be registered in a special way.
+ *
+ * After all of_platform_bus_type drivers are converted to
+ * platform_drivers, this exception can be removed.
+ */
+ if (bus == &platform_bus_type)
+ return of_register_platform_driver(drv);
/* register with core */
+ drv->driver.bus = bus;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL(of_register_driver);
void of_unregister_driver(struct of_platform_driver *drv)
{
- driver_unregister(&drv->driver);
+ if (drv->driver.bus == &platform_bus_type)
+ of_unregister_platform_driver(drv);
+ else
+ driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(of_unregister_driver);
+
+#if !defined(CONFIG_SPARC)
+/*
+ * The following routines scan a subtree and registers a device for
+ * each applicable node.
+ *
+ * Note: sparc doesn't use these routines because it has a different
+ * mechanism for creating devices from device tree nodes.
+ */
+
+/**
+ * of_device_make_bus_id - Use the device node data to assign a unique name
+ * @dev: pointer to device structure that is linked to a device tree node
+ *
+ * This routine will first try using either the dcr-reg or the reg property
+ * value to derive a unique name. As a last resort it will use the node
+ * name followed by a unique number.
+ */
+void of_device_make_bus_id(struct device *dev)
+{
+ static atomic_t bus_no_reg_magic;
+ struct device_node *node = dev->of_node;
+ const u32 *reg;
+ u64 addr;
+ int magic;
+
+#ifdef CONFIG_PPC_DCR
+ /*
+ * If it's a DCR based device, use 'd' for native DCRs
+ * and 'D' for MMIO DCRs.
+ */
+ reg = of_get_property(node, "dcr-reg", NULL);
+ if (reg) {
+#ifdef CONFIG_PPC_DCR_NATIVE
+ dev_set_name(dev, "d%x.%s", *reg, node->name);
+#else /* CONFIG_PPC_DCR_NATIVE */
+ u64 addr = of_translate_dcr_address(node, *reg, NULL);
+ if (addr != OF_BAD_ADDR) {
+ dev_set_name(dev, "D%llx.%s",
+ (unsigned long long)addr, node->name);
+ return;
+ }
+#endif /* !CONFIG_PPC_DCR_NATIVE */
+ }
+#endif /* CONFIG_PPC_DCR */
+
+ /*
+ * For MMIO, get the physical address
+ */
+ reg = of_get_property(node, "reg", NULL);
+ if (reg) {
+ addr = of_translate_address(node, reg);
+ if (addr != OF_BAD_ADDR) {
+ dev_set_name(dev, "%llx.%s",
+ (unsigned long long)addr, node->name);
+ return;
+ }
+ }
+
+ /*
+ * No BusID, use the node name and add a globally incremented
+ * counter (and pray...)
+ */
+ magic = atomic_add_return(1, &bus_no_reg_magic);
+ dev_set_name(dev, "%s.%d", node->name, magic - 1);
+}
+
+/**
+ * of_device_alloc - Allocate and initialize an of_device
+ * @np: device node to assign to device
+ * @bus_id: Name to assign to the device. May be null to use default name.
+ * @parent: Parent device.
+ */
+struct platform_device *of_device_alloc(struct device_node *np,
+ const char *bus_id,
+ struct device *parent)
+{
+ struct platform_device *dev;
+ int rc, i, num_reg = 0, num_irq = 0;
+ struct resource *res, temp_res;
+
+ /* First count how many resources are needed */
+ while (of_address_to_resource(np, num_reg, &temp_res) == 0)
+ num_reg++;
+ while (of_irq_to_resource(np, num_irq, &temp_res) != NO_IRQ)
+ num_irq++;
+
+ /* Allocate memory for both the struct device and the resource table */
+ dev = kzalloc(sizeof(*dev) + (sizeof(*res) * (num_reg + num_irq)),
+ GFP_KERNEL);
+ if (!dev)
+ return NULL;
+ res = (struct resource *) &dev[1];
+
+ /* Populate the resource table */
+ if (num_irq || num_reg) {
+ dev->num_resources = num_reg + num_irq;
+ dev->resource = res;
+ for (i = 0; i < num_reg; i++, res++) {
+ rc = of_address_to_resource(np, i, res);
+ WARN_ON(rc);
+ }
+ for (i = 0; i < num_irq; i++, res++) {
+ rc = of_irq_to_resource(np, i, res);
+ WARN_ON(rc == NO_IRQ);
+ }
+ }
+
+ dev->dev.of_node = of_node_get(np);
+#if defined(CONFIG_PPC) || defined(CONFIG_MICROBLAZE)
+ dev->dev.dma_mask = &dev->archdata.dma_mask;
+#endif
+ dev->dev.parent = parent;
+ dev->dev.release = of_release_dev;
+
+ if (bus_id)
+ dev_set_name(&dev->dev, "%s", bus_id);
+ else
+ of_device_make_bus_id(&dev->dev);
+
+ return dev;
+}
+EXPORT_SYMBOL(of_device_alloc);
+
+/**
+ * of_platform_device_create - Alloc, initialize and register an of_device
+ * @np: pointer to node to create device for
+ * @bus_id: name to assign device
+ * @parent: Linux device model parent device.
+ */
+struct platform_device *of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent)
+{
+ struct platform_device *dev;
+
+ dev = of_device_alloc(np, bus_id, parent);
+ if (!dev)
+ return NULL;
+
+#if defined(CONFIG_PPC) || defined(CONFIG_MICROBLAZE)
+ dev->archdata.dma_mask = 0xffffffffUL;
+#endif
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ dev->dev.bus = &platform_bus_type;
+
+ /* We do not fill the DMA ops for platform devices by default.
+ * This is currently the responsibility of the platform code
+ * to do such, possibly using a device notifier
+ */
+
+ if (of_device_register(dev) != 0) {
+ of_device_free(dev);
+ return NULL;
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL(of_platform_device_create);
+
+/**
+ * of_platform_bus_create - Create an OF device for a bus node and all its
+ * children. Optionally recursively instantiate matching busses.
+ * @bus: device node of the bus to instantiate
+ * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to
+ * disallow recursive creation of child busses
+ */
+static int of_platform_bus_create(const struct device_node *bus,
+ const struct of_device_id *matches,
+ struct device *parent)
+{
+ struct device_node *child;
+ struct platform_device *dev;
+ int rc = 0;
+
+ for_each_child_of_node(bus, child) {
+ pr_debug(" create child: %s\n", child->full_name);
+ dev = of_platform_device_create(child, NULL, parent);
+ if (dev == NULL)
+ rc = -ENOMEM;
+ else if (!of_match_node(matches, child))
+ continue;
+ if (rc == 0) {
+ pr_debug(" and sub busses\n");
+ rc = of_platform_bus_create(child, matches, &dev->dev);
+ }
+ if (rc) {
+ of_node_put(child);
+ break;
+ }
+ }
+ return rc;
+}
+
+/**
+ * of_platform_bus_probe - Probe the device-tree for platform busses
+ * @root: parent of the first level to probe or NULL for the root of the tree
+ * @matches: match table, NULL to use the default
+ * @parent: parent to hook devices from, NULL for toplevel
+ *
+ * Note that children of the provided root are not instantiated as devices
+ * unless the specified root itself matches the bus list and is not NULL.
+ */
+int of_platform_bus_probe(struct device_node *root,
+ const struct of_device_id *matches,
+ struct device *parent)
+{
+ struct device_node *child;
+ struct platform_device *dev;
+ int rc = 0;
+
+ if (WARN_ON(!matches || matches == OF_NO_DEEP_PROBE))
+ return -EINVAL;
+ if (root == NULL)
+ root = of_find_node_by_path("/");
+ else
+ of_node_get(root);
+ if (root == NULL)
+ return -EINVAL;
+
+ pr_debug("of_platform_bus_probe()\n");
+ pr_debug(" starting at: %s\n", root->full_name);
+
+ /* Do a self check of bus type, if there's a match, create
+ * children
+ */
+ if (of_match_node(matches, root)) {
+ pr_debug(" root match, create all sub devices\n");
+ dev = of_platform_device_create(root, NULL, parent);
+ if (dev == NULL) {
+ rc = -ENOMEM;
+ goto bail;
+ }
+ pr_debug(" create all sub busses\n");
+ rc = of_platform_bus_create(root, matches, &dev->dev);
+ goto bail;
+ }
+ for_each_child_of_node(root, child) {
+ if (!of_match_node(matches, child))
+ continue;
+
+ pr_debug(" match: %s\n", child->full_name);
+ dev = of_platform_device_create(child, NULL, parent);
+ if (dev == NULL)
+ rc = -ENOMEM;
+ else
+ rc = of_platform_bus_create(child, matches, &dev->dev);
+ if (rc) {
+ of_node_put(child);
+ break;
+ }
+ }
+ bail:
+ of_node_put(root);
+ return rc;
+}
+EXPORT_SYMBOL(of_platform_bus_probe);
+#endif /* !CONFIG_SPARC */
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index a9352b2c7ac..b7e755f4178 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = {
.notifier_call = module_load_notify,
};
-
-static void end_sync(void)
-{
- end_cpu_work();
- /* make sure we don't leak task structs */
- process_task_mortuary();
- process_task_mortuary();
-}
-
-
int sync_start(void)
{
int err;
@@ -158,7 +148,7 @@ int sync_start(void)
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM;
- start_cpu_work();
+ mutex_lock(&buffer_mutex);
err = task_handoff_register(&task_free_nb);
if (err)
@@ -173,7 +163,10 @@ int sync_start(void)
if (err)
goto out4;
+ start_cpu_work();
+
out:
+ mutex_unlock(&buffer_mutex);
return err;
out4:
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@ out3:
out2:
task_handoff_unregister(&task_free_nb);
out1:
- end_sync();
free_cpumask_var(marked_cpus);
goto out;
}
@@ -190,11 +182,20 @@ out1:
void sync_stop(void)
{
+ /* flush buffers */
+ mutex_lock(&buffer_mutex);
+ end_cpu_work();
unregister_module_notifier(&module_load_nb);
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
task_handoff_unregister(&task_free_nb);
- end_sync();
+ mutex_unlock(&buffer_mutex);
+ flush_scheduled_work();
+
+ /* make sure we don't leak task structs */
+ process_task_mortuary();
+ process_task_mortuary();
+
free_cpumask_var(marked_cpus);
}
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 219f79e2210..f179ac2ea80 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -120,8 +120,6 @@ void end_cpu_work(void)
cancel_delayed_work(&b->work);
}
-
- flush_scheduled_work();
}
/*
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 5df60a6b677..dd87e86048b 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -135,7 +135,7 @@ static int event_buffer_open(struct inode *inode, struct file *file)
* echo 1 >/dev/oprofile/enable
*/
- return 0;
+ return nonseekable_open(inode, file);
fail:
dcookie_unregister(file->private_data);
@@ -205,4 +205,5 @@ const struct file_operations event_buffer_fops = {
.open = event_buffer_open,
.release = event_buffer_release,
.read = event_buffer_read,
+ .llseek = no_llseek,
};
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index fd8cfe95f0a..23e50f4a27c 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -48,7 +48,6 @@
#include <linux/parport.h>
#include <linux/parport_pc.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -102,8 +101,8 @@ static int parport_probe(struct pcmcia_device *link)
link->priv = info;
info->p_dev = link;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -144,16 +143,16 @@ static int parport_config_check(struct pcmcia_device *p_dev,
{
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
if (epp_mode)
p_dev->conf.ConfigIndex |= FORCE_EPP_MODE;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin == 2) {
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
- if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
+ if (pcmcia_request_io(p_dev) != 0)
return -ENODEV;
return 0;
}
@@ -178,12 +177,14 @@ static int parport_config(struct pcmcia_device *link)
if (ret)
goto failed;
- p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2,
+ p = parport_pc_probe_port(link->resource[0]->start,
+ link->resource[1]->start,
link->irq, PARPORT_DMA_NONE,
&link->dev, IRQF_SHARED);
if (p == NULL) {
printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at "
- "0x%3x, irq %u failed\n", link->io.BasePort1,
+ "0x%3x, irq %u failed\n",
+ (unsigned int) link->resource[0]->start,
link->irq);
goto failed;
}
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 40e208d130f..f01e26440f1 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -342,7 +342,6 @@ static int __devinit parport_register (struct pci_dev *dev,
dev_dbg(&dev->dev,
"PCI parallel port detected: I/O at %#lx(%#lx), IRQ %d\n",
io_lo, io_hi, irq);
- irq = PARPORT_IRQ_NONE;
}
port = parport_pc_probe_port (io_lo, io_hi, irq,
PARPORT_DMA_NONE, &dev->dev, IRQF_SHARED);
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 9a5b4b89416..55ba118f1cf 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -286,7 +286,7 @@ static struct parport_operations parport_sunbpp_ops =
.owner = THIS_MODULE,
};
-static int __devinit bpp_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit bpp_probe(struct platform_device *op, const struct of_device_id *match)
{
struct parport_operations *ops;
struct bpp_regs __iomem *regs;
@@ -295,7 +295,7 @@ static int __devinit bpp_probe(struct of_device *op, const struct of_device_id *
void __iomem *base;
struct parport *p;
- irq = op->irqs[0];
+ irq = op->archdata.irqs[0];
base = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]),
"sunbpp");
@@ -351,7 +351,7 @@ out_unmap:
return err;
}
-static int __devexit bpp_remove(struct of_device *op)
+static int __devexit bpp_remove(struct platform_device *op)
{
struct parport *p = dev_get_drvdata(&op->dev);
struct parport_operations *ops = p->ops;
@@ -393,12 +393,12 @@ static struct of_platform_driver bpp_sbus_driver = {
static int __init parport_sunbpp_init(void)
{
- return of_register_driver(&bpp_sbus_driver, &of_bus_type);
+ return of_register_platform_driver(&bpp_sbus_driver);
}
static void __exit parport_sunbpp_exit(void)
{
- of_unregister_driver(&bpp_sbus_driver);
+ of_unregister_platform_driver(&bpp_sbus_driver);
}
MODULE_AUTHOR("Derrick J Brashear");
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 0b51857fbaf..dc1aa092286 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -55,6 +55,9 @@ obj-$(CONFIG_MICROBLAZE) += setup-bus.o
#
obj-$(CONFIG_ACPI) += pci-acpi.o
+# SMBIOS provided firmware instance and labels
+obj-$(CONFIG_DMI) += pci-label.o
+
# Cardbus & CompactPCI use setup-bus
obj-$(CONFIG_HOTPLUG) += setup-bus.o
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 628ea20a884..7f0af0e9b82 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -56,7 +56,7 @@ void pci_bus_remove_resources(struct pci_bus *bus)
int i;
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
- bus->resource[i] = 0;
+ bus->resource[i] = NULL;
list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
list_del(&bus_res->list);
@@ -240,6 +240,8 @@ void pci_enable_bridges(struct pci_bus *bus)
if (dev->subordinate) {
if (!pci_is_enabled(dev)) {
retval = pci_enable_device(dev);
+ if (retval)
+ dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", retval);
pci_set_master(dev);
}
pci_enable_bridges(dev->subordinate);
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 45fcc1e96df..3bc72d18b12 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -338,9 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
acpi_handle chandle, handle;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
- flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
- OSC_SHPC_NATIVE_HP_CONTROL |
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+ flags &= OSC_SHPC_NATIVE_HP_CONTROL;
if (!flags) {
err("Invalid flags %u specified!\n", flags);
return -EINVAL;
@@ -360,7 +358,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
dbg("Trying to get hotplug control for %s\n",
(char *)string.pointer);
- status = acpi_pci_osc_control_set(handle, flags);
+ status = acpi_pci_osc_control_set(handle, &flags, flags);
if (ACPI_SUCCESS(status))
goto got_one;
if (status == AE_SUPPORT)
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index 5317e4d7d96..17d10e2e8fb 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -135,7 +135,7 @@ static int __init init_legacy(void)
struct pci_dev *pdev = NULL;
/* Add existing devices */
- while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev)))
+ for_each_pci_dev(pdev)
legacy_add_slot(pdev);
/* Be alerted of any new ones */
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 4ed76b47b6d..73d51398926 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -176,19 +176,11 @@ static inline void pciehp_firmware_init(void)
{
pciehp_acpi_slot_detection_init();
}
-
-static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
-{
- int retval;
- u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- retval = acpi_get_hp_hw_control_from_firmware(dev, flags);
- if (retval)
- return retval;
- return pciehp_acpi_slot_detection_check(dev);
-}
#else
#define pciehp_firmware_init() do {} while (0)
-#define pciehp_get_hp_hw_control_from_firmware(dev) 0
+static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
+{
+ return 0;
+}
#endif /* CONFIG_ACPI */
#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 1f4000a5a10..2574700db46 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -85,9 +85,7 @@ static int __init dummy_probe(struct pcie_device *dev)
acpi_handle handle;
struct dummy_slot *slot, *tmp;
struct pci_dev *pdev = dev->port;
- /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
- if (pciehp_get_hp_hw_control_from_firmware(pdev))
- return -ENODEV;
+
pos = pci_pcie_cap(pdev);
if (!pos)
return -ENODEV;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 3588ea61b0d..aa5f3ff629f 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -59,7 +59,7 @@ module_param(pciehp_force, bool, 0644);
MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
-MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing");
+MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing");
#define PCIE_MODULE_NAME "pciehp"
@@ -235,7 +235,7 @@ static int pciehp_probe(struct pcie_device *dev)
dev_info(&dev->device,
"Bypassing BIOS check for pciehp use on %s\n",
pci_name(dev->port));
- else if (pciehp_get_hp_hw_control_from_firmware(dev->port))
+ else if (pciehp_acpi_slot_detection_check(dev->port))
goto err_out_none;
ctrl = pcie_init(dev);
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 2fce726758d..a4031dfe938 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -137,7 +137,7 @@ int pciehp_unconfigure_device(struct slot *p_slot)
"Cannot remove display device %s\n",
pci_name(temp));
pci_dev_put(temp);
- rc = EINVAL;
+ rc = -EINVAL;
break;
}
}
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 5f5e8d2e355..d3985e7deab 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -113,7 +113,7 @@
#define CON_PFAULT_INTR_MASK (1 << 28)
#define MRL_CHANGE_SERR_MASK (1 << 29)
#define CON_PFAULT_SERR_MASK (1 << 30)
-#define SLOT_REG_RSVDZ_MASK (1 << 15) | (7 << 21)
+#define SLOT_REG_RSVDZ_MASK ((1 << 15) | (7 << 21))
/*
* SHPC Command Code definitnions
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 8c3d3219f22..a2ccfcd3c29 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -60,12 +60,6 @@ int __ref shpchp_configure_device(struct slot *p_slot)
dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn));
if (!dev)
continue;
- if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- ctrl_err(ctrl, "Cannot hot-add display device %s\n",
- pci_name(dev));
- pci_dev_put(dev);
- continue;
- }
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
/* Find an unused bus number for the new bridge */
@@ -114,17 +108,11 @@ int shpchp_unconfigure_device(struct slot *p_slot)
ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n",
__func__, pci_domain_nr(parent), p_slot->bus, p_slot->device);
- for (j=0; j<8 ; j++) {
- struct pci_dev* temp = pci_get_slot(parent,
+ for (j = 0; j < 8 ; j++) {
+ struct pci_dev *temp = pci_get_slot(parent,
(p_slot->device << 3) | j);
if (!temp)
continue;
- if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- ctrl_err(ctrl, "Cannot remove display device %s\n",
- pci_name(temp));
- pci_dev_put(temp);
- continue;
- }
if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
if (bctl & PCI_BRIDGE_CTL_VGA) {
@@ -132,7 +120,8 @@ int shpchp_unconfigure_device(struct slot *p_slot)
"Cannot remove display device %s\n",
pci_name(temp));
pci_dev_put(temp);
- continue;
+ rc = -EINVAL;
+ break;
}
}
pci_remove_bus_device(temp);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index c9171be7456..4789f8e8bf7 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -71,6 +71,49 @@
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
+/* page table handling */
+#define LEVEL_STRIDE (9)
+#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
+
+static inline int agaw_to_level(int agaw)
+{
+ return agaw + 2;
+}
+
+static inline int agaw_to_width(int agaw)
+{
+ return 30 + agaw * LEVEL_STRIDE;
+}
+
+static inline int width_to_agaw(int width)
+{
+ return (width - 30) / LEVEL_STRIDE;
+}
+
+static inline unsigned int level_to_offset_bits(int level)
+{
+ return (level - 1) * LEVEL_STRIDE;
+}
+
+static inline int pfn_level_offset(unsigned long pfn, int level)
+{
+ return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
+}
+
+static inline unsigned long level_mask(int level)
+{
+ return -1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long level_size(int level)
+{
+ return 1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long align_to_level(unsigned long pfn, int level)
+{
+ return (pfn + level_size(level) - 1) & level_mask(level);
+}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
@@ -236,7 +279,7 @@ static inline u64 dma_pte_addr(struct dma_pte *pte)
return pte->val & VTD_PAGE_MASK;
#else
/* Must have a full atomic 64-bit read */
- return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
+ return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
#endif
}
@@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova)
}
-static inline int width_to_agaw(int width);
-
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
@@ -646,51 +687,6 @@ out:
spin_unlock_irqrestore(&iommu->lock, flags);
}
-/* page table handling */
-#define LEVEL_STRIDE (9)
-#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
- return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
- return 30 + agaw * LEVEL_STRIDE;
-
-}
-
-static inline int width_to_agaw(int width)
-{
- return (width - 30) / LEVEL_STRIDE;
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
- return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(unsigned long pfn, int level)
-{
- return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline unsigned long level_mask(int level)
-{
- return -1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long level_size(int level)
-{
- return 1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long align_to_level(unsigned long pfn, int level)
-{
- return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn)
{
@@ -3030,6 +3026,34 @@ static void __init iommu_exit_mempool(void)
}
+static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
+{
+ struct dmar_drhd_unit *drhd;
+ u32 vtbar;
+ int rc;
+
+ /* We know that this device on this chipset has its own IOMMU.
+ * If we find it under a different IOMMU, then the BIOS is lying
+ * to us. Hope that the IOMMU for this device is actually
+ * disabled, and it needs no translation...
+ */
+ rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
+ if (rc) {
+ /* "can't" happen */
+ dev_info(&pdev->dev, "failed to run vt-d quirk\n");
+ return;
+ }
+ vtbar &= 0xffff0000;
+
+ /* we know that the this iommu should be at offset 0xa000 from vtbar */
+ drhd = dmar_find_matched_drhd_unit(pdev);
+ if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
+ TAINT_FIRMWARE_WORKAROUND,
+ "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
+ pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
+
static void __init init_no_remapping_devices(void)
{
struct dmar_drhd_unit *drhd;
@@ -3698,6 +3722,8 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
if (cap == IOMMU_CAP_CACHE_COHERENCY)
return dmar_domain->iommu_snooping;
+ if (cap == IOMMU_CAP_INTR_REMAP)
+ return intr_remapping_enabled;
return 0;
}
@@ -3731,6 +3757,33 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+#define GGC 0x52
+#define GGC_MEMORY_SIZE_MASK (0xf << 8)
+#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
+#define GGC_MEMORY_SIZE_1M (0x1 << 8)
+#define GGC_MEMORY_SIZE_2M (0x3 << 8)
+#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
+#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
+#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
+#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
+
+static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
+{
+ unsigned short ggc;
+
+ if (pci_read_config_word(dev, GGC, &ggc))
+ return;
+
+ if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
+ printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
+ dmar_map_gfx = 0;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
+
/* On Tylersburg chipsets, some BIOSes have been known to enable the
ISOCH DMAR unit for the Azalia sound device, but not give it any
TLB entries, which causes it to deadlock. Check for that. We do
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 1315ac688aa..fd1d2867cdc 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -21,6 +21,8 @@ static int ir_ioapic_num, ir_hpet_num;
int intr_remapping_enabled;
static int disable_intremap;
+static int disable_sourceid_checking;
+
static __init int setup_nointremap(char *str)
{
disable_intremap = 1;
@@ -28,6 +30,22 @@ static __init int setup_nointremap(char *str)
}
early_param("nointremap", setup_nointremap);
+static __init int setup_intremap(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strncmp(str, "on", 2))
+ disable_intremap = 0;
+ else if (!strncmp(str, "off", 3))
+ disable_intremap = 1;
+ else if (!strncmp(str, "nosid", 5))
+ disable_sourceid_checking = 1;
+
+ return 0;
+}
+early_param("intremap", setup_intremap);
+
struct irq_2_iommu {
struct intel_iommu *iommu;
u16 irte_index;
@@ -311,8 +329,8 @@ int modify_irte(int irq, struct irte *irte_modified)
index = irq_iommu->irte_index + irq_iommu->sub_handle;
irte = &iommu->ir_table->base[index];
- set_64bit((unsigned long *)&irte->low, irte_modified->low);
- set_64bit((unsigned long *)&irte->high, irte_modified->high);
+ set_64bit(&irte->low, irte_modified->low);
+ set_64bit(&irte->high, irte_modified->high);
__iommu_flush_cache(iommu, irte, sizeof(*irte));
rc = qi_flush_iec(iommu, index, 0);
@@ -393,8 +411,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
end = start + (1 << irq_iommu->irte_mask);
for (entry = start; entry < end; entry++) {
- set_64bit((unsigned long *)&entry->low, 0);
- set_64bit((unsigned long *)&entry->high, 0);
+ set_64bit(&entry->low, 0);
+ set_64bit(&entry->high, 0);
}
return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
@@ -453,6 +471,8 @@ int free_irte(int irq)
static void set_irte_sid(struct irte *irte, unsigned int svt,
unsigned int sq, unsigned int sid)
{
+ if (disable_sourceid_checking)
+ svt = SVT_NO_VERIFY;
irte->svt = svt;
irte->sq = sq;
irte->sid = sid;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ce6a3666b3d..553d8ee55c1 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -608,7 +608,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
* the VF BAR size multiplied by the number of VFs. The alignment
* is just the VF BAR size.
*/
-int pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
+resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
{
struct resource tmp;
enum pci_bar_type type;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 77b68eaf021..69b7be33b3a 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -196,6 +196,9 @@ void unmask_msi_irq(unsigned int irq)
void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
{
struct msi_desc *entry = get_irq_desc_msi(desc);
+
+ BUG_ON(entry->dev->current_state != PCI_D0);
+
if (entry->msi_attrib.is_msix) {
void __iomem *base = entry->mask_base +
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
@@ -229,10 +232,32 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
read_msi_msg_desc(desc, msg);
}
+void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
+{
+ struct msi_desc *entry = get_irq_desc_msi(desc);
+
+ /* Assert that the cache is valid, assuming that
+ * valid messages are not all-zeroes. */
+ BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo |
+ entry->msg.data));
+
+ *msg = entry->msg;
+}
+
+void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ get_cached_msi_msg_desc(desc, msg);
+}
+
void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
{
struct msi_desc *entry = get_irq_desc_msi(desc);
- if (entry->msi_attrib.is_msix) {
+
+ if (entry->dev->current_state != PCI_D0) {
+ /* Don't touch the hardware now */
+ } else if (entry->msi_attrib.is_msix) {
void __iomem *base;
base = entry->mask_base +
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
@@ -435,7 +460,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos,
unsigned nr_entries)
{
- unsigned long phys_addr;
+ resource_size_t phys_addr;
u32 table_offset;
u8 bir;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 1ab98bbe58d..24e19c594e5 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -296,14 +296,12 @@ static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
if (!dev->wakeup.run_wake_count++) {
acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
acpi_enable_gpe(dev->wakeup.gpe_device,
- dev->wakeup.gpe_number,
- ACPI_GPE_TYPE_RUNTIME);
+ dev->wakeup.gpe_number);
}
} else if (dev->wakeup.run_wake_count > 0) {
if (!--dev->wakeup.run_wake_count) {
acpi_disable_gpe(dev->wakeup.gpe_device,
- dev->wakeup.gpe_number,
- ACPI_GPE_TYPE_RUNTIME);
+ dev->wakeup.gpe_number);
acpi_disable_wakeup_device_power(dev);
}
} else {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f9a0aec3abc..8a6f797de8e 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -289,8 +289,26 @@ struct drv_dev_and_id {
static long local_pci_probe(void *_ddi)
{
struct drv_dev_and_id *ddi = _ddi;
-
- return ddi->drv->probe(ddi->dev, ddi->id);
+ struct device *dev = &ddi->dev->dev;
+ int rc;
+
+ /* Unbound PCI devices are always set to disabled and suspended.
+ * During probe, the device is set to enabled and active and the
+ * usage count is incremented. If the driver supports runtime PM,
+ * it should call pm_runtime_put_noidle() in its probe routine and
+ * pm_runtime_get_noresume() in its remove routine.
+ */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ rc = ddi->drv->probe(ddi->dev, ddi->id);
+ if (rc) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+ }
+ return rc;
}
static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
@@ -369,11 +387,19 @@ static int pci_device_remove(struct device * dev)
struct pci_driver * drv = pci_dev->driver;
if (drv) {
- if (drv->remove)
+ if (drv->remove) {
+ pm_runtime_get_sync(dev);
drv->remove(pci_dev);
+ pm_runtime_put_noidle(dev);
+ }
pci_dev->driver = NULL;
}
+ /* Undo the runtime PM settings in local_pci_probe() */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+
/*
* If the device is still on, set the power state as "unknown",
* since it might change by the next time we load the driver.
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
new file mode 100644
index 00000000000..90c0a729cd3
--- /dev/null
+++ b/drivers/pci/pci-label.c
@@ -0,0 +1,143 @@
+/*
+ * Purpose: Export the firmware instance and label associated with
+ * a pci device to sysfs
+ * Copyright (C) 2010 Dell Inc.
+ * by Narendra K <Narendra_K@dell.com>,
+ * Jordan Hargrave <Jordan_Hargrave@dell.com>
+ *
+ * SMBIOS defines type 41 for onboard pci devices. This code retrieves
+ * the instance number and string from the type 41 record and exports
+ * it to sysfs.
+ *
+ * Please see http://linux.dell.com/wiki/index.php/Oss/libnetdevname for more
+ * information.
+ */
+
+#include <linux/dmi.h>
+#include <linux/sysfs.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include "pci.h"
+
+enum smbios_attr_enum {
+ SMBIOS_ATTR_NONE = 0,
+ SMBIOS_ATTR_LABEL_SHOW,
+ SMBIOS_ATTR_INSTANCE_SHOW,
+};
+
+static mode_t
+find_smbios_instance_string(struct pci_dev *pdev, char *buf,
+ enum smbios_attr_enum attribute)
+{
+ const struct dmi_device *dmi;
+ struct dmi_dev_onboard *donboard;
+ int bus;
+ int devfn;
+
+ bus = pdev->bus->number;
+ devfn = pdev->devfn;
+
+ dmi = NULL;
+ while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD,
+ NULL, dmi)) != NULL) {
+ donboard = dmi->device_data;
+ if (donboard && donboard->bus == bus &&
+ donboard->devfn == devfn) {
+ if (buf) {
+ if (attribute == SMBIOS_ATTR_INSTANCE_SHOW)
+ return scnprintf(buf, PAGE_SIZE,
+ "%d\n",
+ donboard->instance);
+ else if (attribute == SMBIOS_ATTR_LABEL_SHOW)
+ return scnprintf(buf, PAGE_SIZE,
+ "%s\n",
+ dmi->name);
+ }
+ return strlen(dmi->name);
+ }
+ }
+ return 0;
+}
+
+static mode_t
+smbios_instance_string_exist(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct device *dev;
+ struct pci_dev *pdev;
+
+ dev = container_of(kobj, struct device, kobj);
+ pdev = to_pci_dev(dev);
+
+ return find_smbios_instance_string(pdev, NULL, SMBIOS_ATTR_NONE) ?
+ S_IRUGO : 0;
+}
+
+static ssize_t
+smbioslabel_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev;
+ pdev = to_pci_dev(dev);
+
+ return find_smbios_instance_string(pdev, buf,
+ SMBIOS_ATTR_LABEL_SHOW);
+}
+
+static ssize_t
+smbiosinstance_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev;
+ pdev = to_pci_dev(dev);
+
+ return find_smbios_instance_string(pdev, buf,
+ SMBIOS_ATTR_INSTANCE_SHOW);
+}
+
+static struct device_attribute smbios_attr_label = {
+ .attr = {.name = "label", .mode = 0444},
+ .show = smbioslabel_show,
+};
+
+static struct device_attribute smbios_attr_instance = {
+ .attr = {.name = "index", .mode = 0444},
+ .show = smbiosinstance_show,
+};
+
+static struct attribute *smbios_attributes[] = {
+ &smbios_attr_label.attr,
+ &smbios_attr_instance.attr,
+ NULL,
+};
+
+static struct attribute_group smbios_attr_group = {
+ .attrs = smbios_attributes,
+ .is_visible = smbios_instance_string_exist,
+};
+
+static int
+pci_create_smbiosname_file(struct pci_dev *pdev)
+{
+ if (!sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group))
+ return 0;
+ return -ENODEV;
+}
+
+static void
+pci_remove_smbiosname_file(struct pci_dev *pdev)
+{
+ sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group);
+}
+
+void pci_create_firmware_label_files(struct pci_dev *pdev)
+{
+ if (!pci_create_smbiosname_file(pdev))
+ ;
+}
+
+void pci_remove_firmware_label_files(struct pci_dev *pdev)
+{
+ pci_remove_smbiosname_file(pdev);
+}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c9957f68ac9..b5a7d9bfcb2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -734,7 +734,7 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj,
struct device, kobj));
- struct resource *res = (struct resource *)attr->private;
+ struct resource *res = attr->private;
enum pci_mmap_state mmap_type;
resource_size_t start, end;
int i;
@@ -778,6 +778,70 @@ pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
return pci_mmap_resource(kobj, attr, vma, 1);
}
+static ssize_t
+pci_resource_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count, bool write)
+{
+ struct pci_dev *pdev = to_pci_dev(container_of(kobj,
+ struct device, kobj));
+ struct resource *res = attr->private;
+ unsigned long port = off;
+ int i;
+
+ for (i = 0; i < PCI_ROM_RESOURCE; i++)
+ if (res == &pdev->resource[i])
+ break;
+ if (i >= PCI_ROM_RESOURCE)
+ return -ENODEV;
+
+ port += pci_resource_start(pdev, i);
+
+ if (port > pci_resource_end(pdev, i))
+ return 0;
+
+ if (port + count - 1 > pci_resource_end(pdev, i))
+ return -EINVAL;
+
+ switch (count) {
+ case 1:
+ if (write)
+ outb(*(u8 *)buf, port);
+ else
+ *(u8 *)buf = inb(port);
+ return 1;
+ case 2:
+ if (write)
+ outw(*(u16 *)buf, port);
+ else
+ *(u16 *)buf = inw(port);
+ return 2;
+ case 4:
+ if (write)
+ outl(*(u32 *)buf, port);
+ else
+ *(u32 *)buf = inl(port);
+ return 4;
+ }
+ return -EINVAL;
+}
+
+static ssize_t
+pci_read_resource_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return pci_resource_io(filp, kobj, attr, buf, off, count, false);
+}
+
+static ssize_t
+pci_write_resource_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return pci_resource_io(filp, kobj, attr, buf, off, count, true);
+}
+
/**
* pci_remove_resource_files - cleanup resource files
* @pdev: dev to cleanup
@@ -828,6 +892,10 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
sprintf(res_attr_name, "resource%d", num);
res_attr->mmap = pci_mmap_resource_uc;
}
+ if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
+ res_attr->read = pci_read_resource_io;
+ res_attr->write = pci_write_resource_io;
+ }
res_attr->attr.name = res_attr_name;
res_attr->attr.mode = S_IRUSR | S_IWUSR;
res_attr->size = pci_resource_len(pdev, num);
@@ -1097,6 +1165,8 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
if (retval)
goto err_vga_file;
+ pci_create_firmware_label_files(pdev);
+
return 0;
err_vga_file:
@@ -1164,6 +1234,9 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
kfree(pdev->rom_attr);
}
+
+ pci_remove_firmware_label_files(pdev);
+
}
static int __init pci_sysfs_init(void)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 130ed1daf0f..7fa3cbd742c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2312,21 +2312,17 @@ void pci_msi_off(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_msi_off);
-#ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
{
return dma_set_max_seg_size(&dev->dev, size);
}
EXPORT_SYMBOL(pci_set_dma_max_seg_size);
-#endif
-#ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY
int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
{
return dma_set_seg_boundary(&dev->dev, mask);
}
EXPORT_SYMBOL(pci_set_dma_seg_boundary);
-#endif
static int pcie_flr(struct pci_dev *dev, int probe)
{
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index c8b7fd056cc..6beb11b617a 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -11,6 +11,15 @@
extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env);
extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
+#ifndef CONFIG_DMI
+static inline void pci_create_firmware_label_files(struct pci_dev *pdev)
+{ return; }
+static inline void pci_remove_firmware_label_files(struct pci_dev *pdev)
+{ return; }
+#else
+extern void pci_create_firmware_label_files(struct pci_dev *pdev);
+extern void pci_remove_firmware_label_files(struct pci_dev *pdev);
+#endif
extern void pci_cleanup_rom(struct pci_dev *dev);
#ifdef HAVE_PCI_MMAP
extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
@@ -131,8 +140,10 @@ static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
#ifdef CONFIG_PCIEAER
void pci_no_aer(void);
+bool pci_aer_available(void);
#else
static inline void pci_no_aer(void) { }
+static inline bool pci_aer_available(void) { return false; }
#endif
static inline int pci_no_d1d2(struct pci_dev *dev)
@@ -253,7 +264,8 @@ extern int pci_iov_init(struct pci_dev *dev);
extern void pci_iov_release(struct pci_dev *dev);
extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
enum pci_bar_type *type);
-extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
+extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
+ int resno);
extern void pci_restore_iov_state(struct pci_dev *dev);
extern int pci_iov_bus_range(struct pci_bus *bus);
@@ -309,7 +321,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
}
#endif /* CONFIG_PCI_IOV */
-static inline int pci_resource_alignment(struct pci_dev *dev,
+static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
struct resource *res)
{
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index b8b494b3e0d..dda70981b7a 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -31,14 +31,22 @@ source "drivers/pci/pcie/aer/Kconfig"
# PCI Express ASPM
#
config PCIEASPM
- bool "PCI Express ASPM support(Experimental)"
- depends on PCI && EXPERIMENTAL && PCIEPORTBUS
- default n
+ bool "PCI Express ASPM control" if EMBEDDED
+ depends on PCI && PCIEPORTBUS
+ default y
help
- This enables PCI Express ASPM (Active State Power Management) and
- Clock Power Management. ASPM supports state L0/L0s/L1.
+ This enables OS control over PCI Express ASPM (Active State
+ Power Management) and Clock Power Management. ASPM supports
+ state L0/L0s/L1.
- When in doubt, say N.
+ ASPM is initially set up the the firmware. With this option enabled,
+ Linux can modify this state in order to disable ASPM on known-bad
+ hardware or configurations and enable it when known-safe.
+
+ ASPM can be disabled or enabled at runtime via
+ /sys/module/pcie_aspm/parameters/policy
+
+ When in doubt, say Y.
config PCIEASPM_DEBUG
bool "Debug PCI Express ASPM"
depends on PCIEASPM
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index ea654545e7c..00c62df5a9f 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -6,10 +6,11 @@
obj-$(CONFIG_PCIEASPM) += aspm.o
pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o
+pcieportdrv-$(CONFIG_ACPI) += portdrv_acpi.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
# Build PCI Express AER if needed
obj-$(CONFIG_PCIEAER) += aer/
-obj-$(CONFIG_PCIE_PME) += pme/
+obj-$(CONFIG_PCIE_PME) += pme.o
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 484cc55194b..f409948e1a9 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -72,6 +72,11 @@ void pci_no_aer(void)
pcie_aer_disable = 1; /* has priority over 'forceload' */
}
+bool pci_aer_available(void)
+{
+ return !pcie_aer_disable && pci_msi_enabled();
+}
+
static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
@@ -411,9 +416,7 @@ static void aer_error_resume(struct pci_dev *dev)
*/
static int __init aer_service_init(void)
{
- if (pcie_aer_disable)
- return -ENXIO;
- if (!pci_msi_enabled())
+ if (!pci_aer_available())
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index f278d7b0d95..2bb9b897221 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -19,42 +19,6 @@
#include <acpi/apei.h>
#include "aerdrv.h"
-/**
- * aer_osc_setup - run ACPI _OSC method
- * @pciedev: pcie_device which AER is being enabled on
- *
- * @return: Zero on success. Nonzero otherwise.
- *
- * Invoked when PCIe bus loads AER service driver. To avoid conflict with
- * BIOS AER support requires BIOS to yield AER control to OS native driver.
- **/
-int aer_osc_setup(struct pcie_device *pciedev)
-{
- acpi_status status = AE_NOT_FOUND;
- struct pci_dev *pdev = pciedev->port;
- acpi_handle handle = NULL;
-
- if (acpi_pci_disabled)
- return -1;
-
- handle = acpi_find_root_bridge_handle(pdev);
- if (handle) {
- status = acpi_pci_osc_control_set(handle,
- OSC_PCI_EXPRESS_AER_CONTROL |
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- }
-
- if (ACPI_FAILURE(status)) {
- dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't "
- "init device: %s\n",
- (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
- "no _OSC support" : "_OSC failed");
- return -1;
- }
-
- return 0;
-}
-
#ifdef CONFIG_ACPI_APEI
static inline int hest_match_pci(struct acpi_hest_aer_common *p,
struct pci_dev *pci)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 8af4f619bba..29e268fadf1 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -727,20 +727,21 @@ static void aer_isr_one_error(struct pcie_device *p_device,
static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
{
unsigned long flags;
- int ret = 0;
/* Lock access to Root error producer/consumer index */
spin_lock_irqsave(&rpc->e_lock, flags);
- if (rpc->prod_idx != rpc->cons_idx) {
- *e_src = rpc->e_sources[rpc->cons_idx];
- rpc->cons_idx++;
- if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
- rpc->cons_idx = 0;
- ret = 1;
+ if (rpc->prod_idx == rpc->cons_idx) {
+ spin_unlock_irqrestore(&rpc->e_lock, flags);
+ return 0;
}
+
+ *e_src = rpc->e_sources[rpc->cons_idx];
+ rpc->cons_idx++;
+ if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
+ rpc->cons_idx = 0;
spin_unlock_irqrestore(&rpc->e_lock, flags);
- return ret;
+ return 1;
}
/**
@@ -771,22 +772,10 @@ void aer_isr(struct work_struct *work)
*/
int aer_init(struct pcie_device *dev)
{
- if (pcie_aer_get_firmware_first(dev->port)) {
- dev_printk(KERN_DEBUG, &dev->device,
- "PCIe errors handled by platform firmware.\n");
- goto out;
- }
-
- if (aer_osc_setup(dev))
- goto out;
-
- return 0;
-out:
if (forceload) {
dev_printk(KERN_DEBUG, &dev->device,
"aerdrv forceload requested.\n");
pcie_aer_force_firmware_first(dev->port, 0);
- return 0;
}
- return -ENXIO;
+ return 0;
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index be53d98fa38..71222814c1e 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -588,11 +588,23 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
* update through pcie_aspm_cap_init().
*/
pcie_aspm_cap_init(link, blacklist);
- pcie_config_aspm_path(link);
/* Setup initial Clock PM state */
pcie_clkpm_cap_init(link, blacklist);
- pcie_set_clkpm(link, policy_to_clkpm_state(link));
+
+ /*
+ * At this stage drivers haven't had an opportunity to change the
+ * link policy setting. Enabling ASPM on broken hardware can cripple
+ * it even before the driver has had a chance to disable ASPM, so
+ * default to a safe level right now. If we're enabling ASPM beyond
+ * the BIOS's expectation, we'll do so once pci_enable_device() is
+ * called.
+ */
+ if (aspm_policy != POLICY_POWERSAVE) {
+ pcie_config_aspm_path(link);
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
+ }
+
unlock:
mutex_unlock(&aspm_lock);
out:
diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme.c
index bbdea18693d..2f3c9040722 100644
--- a/drivers/pci/pcie/pme/pcie_pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -23,38 +23,13 @@
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
-#include "../../pci.h"
-#include "pcie_pme.h"
+#include "../pci.h"
+#include "portdrv.h"
#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
/*
- * If set, this switch will prevent the PCIe root port PME service driver from
- * being registered. Consequently, the interrupt-based PCIe PME signaling will
- * not be used by any PCIe root ports in that case.
- */
-static bool pcie_pme_disabled = true;
-
-/*
- * The PCI Express Base Specification 2.0, Section 6.1.8, states the following:
- * "In order to maintain compatibility with non-PCI Express-aware system
- * software, system power management logic must be configured by firmware to use
- * the legacy mechanism of signaling PME by default. PCI Express-aware system
- * software must notify the firmware prior to enabling native, interrupt-based
- * PME signaling." However, if the platform doesn't provide us with a suitable
- * notification mechanism or the notification fails, it is not clear whether or
- * not we are supposed to use the interrupt-based PCIe PME signaling. The
- * switch below can be used to indicate the desired behaviour. When set, it
- * will make the kernel use the interrupt-based PCIe PME signaling regardless of
- * the platform notification status, although the kernel will attempt to notify
- * the platform anyway. When unset, it will prevent the kernel from using the
- * the interrupt-based PCIe PME signaling if the platform notification fails,
- * which is the default.
- */
-static bool pcie_pme_force_enable;
-
-/*
* If this switch is set, MSI will not be used for PCIe PME signaling. This
* causes the PCIe port driver to use INTx interrupts only, but it turns out
* that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
@@ -64,38 +39,13 @@ bool pcie_pme_msi_disabled;
static int __init pcie_pme_setup(char *str)
{
- if (!strncmp(str, "auto", 4))
- pcie_pme_disabled = false;
- else if (!strncmp(str, "force", 5))
- pcie_pme_force_enable = true;
-
- str = strchr(str, ',');
- if (str) {
- str++;
- str += strspn(str, " \t");
- if (*str && !strcmp(str, "nomsi"))
- pcie_pme_msi_disabled = true;
- }
+ if (!strncmp(str, "nomsi", 5))
+ pcie_pme_msi_disabled = true;
return 1;
}
__setup("pcie_pme=", pcie_pme_setup);
-/**
- * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME.
- * @srv: PCIe PME root port service to use for carrying out the check.
- *
- * Notify the platform that the native PCIe PME is going to be used and return
- * 'true' if the control of the PCIe PME registers has been acquired from the
- * platform.
- */
-static bool pcie_pme_platform_setup(struct pcie_device *srv)
-{
- if (!pcie_pme_platform_notify(srv))
- return true;
- return pcie_pme_force_enable;
-}
-
struct pcie_pme_service_data {
spinlock_t lock;
struct pcie_device *srv;
@@ -108,7 +58,7 @@ struct pcie_pme_service_data {
* @dev: PCIe root port or event collector.
* @enable: Enable or disable the interrupt.
*/
-static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
+void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
{
int rtctl_pos;
u16 rtctl;
@@ -417,9 +367,6 @@ static int pcie_pme_probe(struct pcie_device *srv)
struct pcie_pme_service_data *data;
int ret;
- if (!pcie_pme_platform_setup(srv))
- return -EACCES;
-
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -509,8 +456,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
*/
static int __init pcie_pme_service_init(void)
{
- return pcie_pme_disabled ?
- -ENODEV : pcie_port_service_register(&pcie_pme_driver);
+ return pcie_port_service_register(&pcie_pme_driver);
}
module_init(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/pme/Makefile b/drivers/pci/pcie/pme/Makefile
deleted file mode 100644
index 8b923805308..00000000000
--- a/drivers/pci/pcie/pme/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for PCI-Express Root Port PME signaling driver
-#
-
-obj-$(CONFIG_PCIE_PME) += pmedriver.o
-
-pmedriver-objs := pcie_pme.o
-pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o
diff --git a/drivers/pci/pcie/pme/pcie_pme.h b/drivers/pci/pcie/pme/pcie_pme.h
deleted file mode 100644
index b30d2b7c977..00000000000
--- a/drivers/pci/pcie/pme/pcie_pme.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * drivers/pci/pcie/pme/pcie_pme.h
- *
- * PCI Express Root Port PME signaling support
- *
- * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- */
-
-#ifndef _PCIE_PME_H_
-#define _PCIE_PME_H_
-
-struct pcie_device;
-
-#ifdef CONFIG_ACPI
-extern int pcie_pme_acpi_setup(struct pcie_device *srv);
-
-static inline int pcie_pme_platform_notify(struct pcie_device *srv)
-{
- return pcie_pme_acpi_setup(srv);
-}
-#else /* !CONFIG_ACPI */
-static inline int pcie_pme_platform_notify(struct pcie_device *srv)
-{
- return 0;
-}
-#endif /* !CONFIG_ACPI */
-
-#endif
diff --git a/drivers/pci/pcie/pme/pcie_pme_acpi.c b/drivers/pci/pcie/pme/pcie_pme_acpi.c
deleted file mode 100644
index 83ab2287ae3..00000000000
--- a/drivers/pci/pcie/pme/pcie_pme_acpi.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * PCIe Native PME support, ACPI-related part
- *
- * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License V2. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/acpi.h>
-#include <linux/pci-acpi.h>
-#include <linux/pcieport_if.h>
-
-/**
- * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME.
- * @srv - PCIe PME service for a root port or event collector.
- *
- * Invoked when the PCIe bus type loads PCIe PME service driver. To avoid
- * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME
- * control to the kernel.
- */
-int pcie_pme_acpi_setup(struct pcie_device *srv)
-{
- acpi_status status = AE_NOT_FOUND;
- struct pci_dev *port = srv->port;
- acpi_handle handle;
- int error = 0;
-
- if (acpi_pci_disabled)
- return -ENOSYS;
-
- dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n");
-
- handle = acpi_find_root_bridge_handle(port);
- if (!handle)
- return -EINVAL;
-
- status = acpi_pci_osc_control_set(handle,
- OSC_PCI_EXPRESS_PME_CONTROL |
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- if (ACPI_FAILURE(status)) {
- dev_info(&port->dev,
- "Failed to receive control of PCIe PME service: %s\n",
- (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
- "no _OSC support" : "ACPI _OSC failed");
- error = -ENODEV;
- }
-
- return error;
-}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 813a5c3427b..7b5aba0a329 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -20,6 +20,9 @@
#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
+extern bool pcie_ports_disabled;
+extern bool pcie_ports_auto;
+
extern struct bus_type pcie_port_bus_type;
extern int pcie_port_device_register(struct pci_dev *dev);
#ifdef CONFIG_PM
@@ -30,6 +33,8 @@ extern void pcie_port_device_remove(struct pci_dev *dev);
extern int __must_check pcie_port_bus_register(void);
extern void pcie_port_bus_unregister(void);
+struct pci_dev;
+
#ifdef CONFIG_PCIE_PME
extern bool pcie_pme_msi_disabled;
@@ -42,9 +47,26 @@ static inline bool pcie_pme_no_msi(void)
{
return pcie_pme_msi_disabled;
}
+
+extern void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable);
#else /* !CONFIG_PCIE_PME */
static inline void pcie_pme_disable_msi(void) {}
static inline bool pcie_pme_no_msi(void) { return false; }
+static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
+#ifdef CONFIG_ACPI
+extern int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
+
+static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+{
+ return pcie_port_acpi_setup(port, mask);
+}
+#else /* !CONFIG_ACPI */
+static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+{
+ return 0;
+}
+#endif /* !CONFIG_ACPI */
+
#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
new file mode 100644
index 00000000000..b7c4cb1ccb2
--- /dev/null
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -0,0 +1,77 @@
+/*
+ * PCIe Port Native Services Support, ACPI-Related Part
+ *
+ * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License V2. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+#include <linux/pcieport_if.h>
+
+#include "aer/aerdrv.h"
+#include "../pci.h"
+
+/**
+ * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
+ * @port: PCIe Port service for a root port or event collector.
+ * @srv_mask: Bit mask of services that can be enabled for @port.
+ *
+ * Invoked when @port is identified as a PCIe port device. To avoid conflicts
+ * with the BIOS PCIe port native services support requires the BIOS to yield
+ * control of these services to the kernel. The mask of services that the BIOS
+ * allows to be enabled for @port is written to @srv_mask.
+ *
+ * NOTE: It turns out that we cannot do that for individual port services
+ * separately, because that would make some systems work incorrectly.
+ */
+int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
+{
+ acpi_status status;
+ acpi_handle handle;
+ u32 flags;
+
+ if (acpi_pci_disabled)
+ return 0;
+
+ handle = acpi_find_root_bridge_handle(port);
+ if (!handle)
+ return -EINVAL;
+
+ flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
+ | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
+ | OSC_PCI_EXPRESS_PME_CONTROL;
+
+ if (pci_aer_available()) {
+ if (pcie_aer_get_firmware_first(port))
+ dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
+ else
+ flags |= OSC_PCI_EXPRESS_AER_CONTROL;
+ }
+
+ status = acpi_pci_osc_control_set(handle, &flags,
+ OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+ if (ACPI_FAILURE(status)) {
+ dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n",
+ status);
+ return -ENODEV;
+ }
+
+ dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags);
+
+ *srv_mask = PCIE_PORT_SERVICE_VC;
+ if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
+ *srv_mask |= PCIE_PORT_SERVICE_HP;
+ if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
+ *srv_mask |= PCIE_PORT_SERVICE_PME;
+ if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
+ *srv_mask |= PCIE_PORT_SERVICE_AER;
+
+ return 0;
+}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e73effbe402..a9c222d79eb 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -14,6 +14,8 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/pcieport_if.h>
+#include <linux/aer.h>
+#include <linux/pci-aspm.h>
#include "../pci.h"
#include "portdrv.h"
@@ -236,24 +238,64 @@ static int get_port_device_capability(struct pci_dev *dev)
int services = 0, pos;
u16 reg16;
u32 reg32;
+ int cap_mask;
+ int err;
+
+ err = pcie_port_platform_notify(dev, &cap_mask);
+ if (pcie_ports_auto) {
+ if (err) {
+ pcie_no_aspm();
+ return 0;
+ }
+ } else {
+ cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
+ | PCIE_PORT_SERVICE_VC;
+ if (pci_aer_available())
+ cap_mask |= PCIE_PORT_SERVICE_AER;
+ }
pos = pci_pcie_cap(dev);
pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
/* Hot-Plug Capable */
- if (reg16 & PCI_EXP_FLAGS_SLOT) {
+ if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) {
pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32);
- if (reg32 & PCI_EXP_SLTCAP_HPC)
+ if (reg32 & PCI_EXP_SLTCAP_HPC) {
services |= PCIE_PORT_SERVICE_HP;
+ /*
+ * Disable hot-plug interrupts in case they have been
+ * enabled by the BIOS and the hot-plug service driver
+ * is not loaded.
+ */
+ pos += PCI_EXP_SLTCTL;
+ pci_read_config_word(dev, pos, &reg16);
+ reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
+ pci_write_config_word(dev, pos, reg16);
+ }
}
/* AER capable */
- if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
+ if ((cap_mask & PCIE_PORT_SERVICE_AER)
+ && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) {
services |= PCIE_PORT_SERVICE_AER;
+ /*
+ * Disable AER on this port in case it's been enabled by the
+ * BIOS (the AER service driver will enable it when necessary).
+ */
+ pci_disable_pcie_error_reporting(dev);
+ }
/* VC support */
if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
services |= PCIE_PORT_SERVICE_VC;
/* Root ports are capable of generating PME too */
- if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+ if ((cap_mask & PCIE_PORT_SERVICE_PME)
+ && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
services |= PCIE_PORT_SERVICE_PME;
+ /*
+ * Disable PME interrupt on this port in case it's been enabled
+ * by the BIOS (the PME service driver will enable it when
+ * necessary).
+ */
+ pcie_pme_interrupt_enable(dev, false);
+ }
return services;
}
@@ -494,6 +536,9 @@ static void pcie_port_shutdown_service(struct device *dev) {}
*/
int pcie_port_service_register(struct pcie_port_service_driver *new)
{
+ if (pcie_ports_disabled)
+ return -ENODEV;
+
new->driver.name = (char *)new->name;
new->driver.bus = &pcie_port_bus_type;
new->driver.probe = pcie_port_probe_service;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 3debed25e46..f9033e190fb 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -15,6 +15,7 @@
#include <linux/pcieport_if.h>
#include <linux/aer.h>
#include <linux/dmi.h>
+#include <linux/pci-aspm.h>
#include "portdrv.h"
#include "aer/aerdrv.h"
@@ -29,6 +30,31 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+/* If this switch is set, PCIe port native services should not be enabled. */
+bool pcie_ports_disabled;
+
+/*
+ * If this switch is set, ACPI _OSC will be used to determine whether or not to
+ * enable PCIe port native services.
+ */
+bool pcie_ports_auto = true;
+
+static int __init pcie_port_setup(char *str)
+{
+ if (!strncmp(str, "compat", 6)) {
+ pcie_ports_disabled = true;
+ } else if (!strncmp(str, "native", 6)) {
+ pcie_ports_disabled = false;
+ pcie_ports_auto = false;
+ } else if (!strncmp(str, "auto", 4)) {
+ pcie_ports_disabled = false;
+ pcie_ports_auto = true;
+ }
+
+ return 1;
+}
+__setup("pcie_ports=", pcie_port_setup);
+
/* global data */
static int pcie_portdrv_restore_config(struct pci_dev *dev)
@@ -301,6 +327,11 @@ static int __init pcie_portdrv_init(void)
{
int retval;
+ if (pcie_ports_disabled) {
+ pcie_no_aspm();
+ return -EACCES;
+ }
+
dmi_check_system(pcie_portdrv_dmi_table);
retval = pcie_port_bus_register();
@@ -315,11 +346,4 @@ static int __init pcie_portdrv_init(void)
return retval;
}
-static void __exit pcie_portdrv_exit(void)
-{
- pci_unregister_driver(&pcie_portdriver);
- pcie_port_bus_unregister();
-}
-
module_init(pcie_portdrv_init);
-module_exit(pcie_portdrv_exit);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f4adba2d1dd..12625d90f8b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -163,9 +163,16 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int pos)
{
u32 l, sz, mask;
+ u16 orig_cmd;
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+ if (!dev->mmio_always_on) {
+ pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
+ }
+
res->name = pci_name(dev);
pci_read_config_dword(dev, pos, &l);
@@ -173,6 +180,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
pci_read_config_dword(dev, pos, &sz);
pci_write_config_dword(dev, pos, l);
+ if (!dev->mmio_always_on)
+ pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
+
/*
* All bits set in sz means the device isn't working properly.
* If the BAR isn't implemented, all bits must be 0. If it's a
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 449e890267a..01f0306525a 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -431,8 +431,6 @@ int pci_proc_detach_device(struct pci_dev *dev)
struct proc_dir_entry *e;
if ((e = dev->procent)) {
- if (atomic_read(&e->count) > 1)
- return -EBUSY;
remove_proc_entry(e->name, dev->bus->procdir);
dev->procent = NULL;
}
@@ -485,9 +483,9 @@ static int __init pci_proc_init(void)
proc_create("devices", 0, proc_bus_pci_dir,
&proc_bus_pci_dev_operations);
proc_initialized = 1;
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ for_each_pci_dev(dev)
pci_proc_attach_device(dev);
- }
+
return 0;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 477345d4164..857ae01734a 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -91,6 +91,19 @@ static void __devinit quirk_resource_alignment(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_resource_alignment);
+/*
+ * Decoding should be disabled for a PCI device during BAR sizing to avoid
+ * conflict. But doing so may cause problems on host bridge and perhaps other
+ * key system devices. For devices that need to have mmio decoding always-on,
+ * we need to set the dev->mmio_always_on bit.
+ */
+static void __devinit quirk_mmio_always_on(struct pci_dev *dev)
+{
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
+ dev->mmio_always_on = 1;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_mmio_always_on);
+
/* The Mellanox Tavor device gives false positive parity errors
* Mark this device with a broken_parity_status, to allow
* PCI scanning code to "skip" this now blacklisted device.
@@ -150,6 +163,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
/*
+ * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
+ * for some HT machines to use C4 w/o hanging.
+ */
+static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
+{
+ u32 pmbase;
+ u16 pm1a;
+
+ pci_read_config_dword(dev, 0x40, &pmbase);
+ pmbase = pmbase & 0xff80;
+ pm1a = inw(pmbase);
+
+ if (pm1a & 0x10) {
+ dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
+ outw(0x10, pmbase);
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
+
+/*
* Chipsets where PCI->PCI transfers vanish or hang
*/
static void __devinit quirk_nopcipci(struct pci_dev *dev)
@@ -1459,6 +1492,7 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
switch (pdev->device) {
case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */
case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */
+ case PCI_DEVICE_ID_JMICRON_JMB364: /* SATA dual ports */
/* The controller should be in single function ahci mode */
conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
break;
@@ -1470,6 +1504,7 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
/* Fall through */
case PCI_DEVICE_ID_JMICRON_JMB361:
case PCI_DEVICE_ID_JMICRON_JMB363:
+ case PCI_DEVICE_ID_JMICRON_JMB369:
/* Enable dual function mode, AHCI on fn 0, IDE fn1 */
/* Set the class codes correctly and then direct IDE 0 */
conf1 |= 0x00C2A1B3; /* Set 0, 1, 4, 5, 7, 8, 13, 15, 17, 22, 23 */
@@ -1496,16 +1531,20 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, qui
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
#endif
@@ -2115,6 +2154,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disabl
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void __devinit quirk_disable_msi(struct pci_dev *dev)
@@ -2126,12 +2166,29 @@ static void __devinit quirk_disable_msi(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9602, quirk_disable_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASUSTEK, 0x9602, quirk_disable_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AI, 0x9602, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
+/*
+ * The APC bridge device in AMD 780 family northbridges has some random
+ * OEM subsystem ID in its vendor ID register (erratum 18), so instead
+ * we use the possible vendor/device IDs of the host bridge for the
+ * declared quirk, and search for the APC bridge by slot number.
+ */
+static void __devinit quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
+{
+ struct pci_dev *apc_bridge;
+
+ apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0));
+ if (apc_bridge) {
+ if (apc_bridge->device == 0x9602)
+ quirk_disable_msi(apc_bridge);
+ pci_dev_put(apc_bridge);
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
+
/* Go through the list of Hypertransport capabilities and
* return 1 if a HT MSI capability is found and enabled */
static int __devinit msi_ht_cap_enabled(struct pci_dev *dev)
@@ -2390,6 +2447,9 @@ static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
int pos;
int found;
+ if (!pci_msi_enabled())
+ return;
+
/* check if there is HT MSI cap or enabled on this device */
found = ht_check_msi_mapping(dev);
@@ -2742,7 +2802,7 @@ static int __init pci_apply_final_quirks(void)
printk(KERN_DEBUG "PCI: CLS %u bytes\n",
pci_cache_line_size << 2);
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ for_each_pci_dev(dev) {
pci_fixup_device(pci_fixup_final, dev);
/*
* If arch hasn't set it explicitly yet, use the CLS
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 20d03f77228..9d75dc8ca60 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -169,7 +169,7 @@ struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
{
struct pci_dev *dev = NULL;
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ for_each_pci_dev(dev) {
if (pci_domain_nr(dev->bus) == domain &&
(dev->bus->number == bus && dev->devfn == devfn))
return dev;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 19b111383f6..66cb8f4cc5f 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -874,19 +874,16 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
again:
pci_bus_size_bridges(parent);
__pci_bridge_assign_resources(bridge, &head);
- retval = pci_reenable_device(bridge);
- pci_set_master(bridge);
- pci_enable_bridges(parent);
tried_times++;
if (!head.next)
- return;
+ goto enable_all;
if (tried_times >= 2) {
/* still fail, don't need to try more */
free_failed_list(&head);
- return;
+ goto enable_all;
}
printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
@@ -919,5 +916,10 @@ again:
free_failed_list(&head);
goto again;
+
+enable_all:
+ retval = pci_reenable_device(bridge);
+ pci_set_master(bridge);
+ pci_enable_bridges(parent);
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index aa795fd428d..eec9738f349 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -59,7 +59,6 @@ pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *),
int (*map_irq)(struct pci_dev *, u8, u8))
{
struct pci_dev *dev = NULL;
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ for_each_pci_dev(dev)
pdev_fixup_irq(dev, swizzle, map_irq);
- }
}
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 659eaa0fc48..968cfea04f7 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -49,7 +49,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
}
/* these strings match up with the values in pci_bus_speed */
-static char *pci_bus_speed_strings[] = {
+static const char *pci_bus_speed_strings[] = {
"33 MHz PCI", /* 0x00 */
"66 MHz PCI", /* 0x01 */
"66 MHz PCI-X", /* 0x02 */
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index d0f5ad30607..c80a7a6e769 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -157,11 +157,11 @@ config PCMCIA_M8XX
config PCMCIA_AU1X00
tristate "Au1x00 pcmcia support"
- depends on SOC_AU1X00 && PCMCIA
+ depends on MIPS_ALCHEMY && PCMCIA
config PCMCIA_ALCHEMY_DEVBOARD
tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
- depends on SOC_AU1X00 && PCMCIA
+ depends on MIPS_ALCHEMY && PCMCIA
select 64BIT_PHYS_ADDR
help
Enable this driver of you want PCMCIA support on your Alchemy
@@ -215,7 +215,7 @@ config PCMCIA_PXA2XX
depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
|| MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
|| ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2 \
- || MACH_VPAC270)
+ || MACH_VPAC270 || MACH_BALLOON3)
select PCMCIA_SOC_COMMON
help
Say Y here to include support for the PXA2xx PCMCIA controller
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index d006e8beab9..8d9386a22eb 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -7,7 +7,6 @@ pcmcia_core-$(CONFIG_CARDBUS) += cardbus.o
obj-$(CONFIG_PCCARD) += pcmcia_core.o
pcmcia-y += ds.o pcmcia_resource.o cistpl.o pcmcia_cis.o
-pcmcia-$(CONFIG_PCMCIA_IOCTL) += pcmcia_ioctl.o
obj-$(CONFIG_PCMCIA) += pcmcia.o
pcmcia_rsrc-y += rsrc_mgr.o
@@ -70,6 +69,7 @@ pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
pxa2xx-obj-$(CONFIG_MACH_VPAC270) += pxa2xx_vpac270.o
+pxa2xx-obj-$(CONFIG_MACH_BALLOON3) += pxa2xx_balloon3.o
obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y)
diff --git a/drivers/pcmcia/au1000_generic.h b/drivers/pcmcia/au1000_generic.h
index a324d329dea..67530cefcf3 100644
--- a/drivers/pcmcia/au1000_generic.h
+++ b/drivers/pcmcia/au1000_generic.h
@@ -23,7 +23,6 @@
/* include the world */
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
diff --git a/drivers/pcmcia/au1000_pb1x00.c b/drivers/pcmcia/au1000_pb1x00.c
index 5a979cb8f3e..807f2d75dad 100644
--- a/drivers/pcmcia/au1000_pb1x00.c
+++ b/drivers/pcmcia/au1000_pb1x00.c
@@ -31,11 +31,9 @@
#include <linux/proc_fs.h>
#include <linux/types.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
-#include <pcmcia/bus_ops.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 8844bc3e311..91414a0ddc4 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -27,7 +27,6 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
#include <pcmcia/cisreg.h>
@@ -54,6 +53,9 @@ static const u_int exponent[] = {
/* Upper limit on reasonable # of tuples */
#define MAX_TUPLES 200
+/* Bits in IRQInfo1 field */
+#define IRQ_INFO2_VALID 0x10
+
/* 16-bit CIS? */
static int cis_width;
module_param(cis_width, int, 0444);
@@ -210,7 +212,7 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
* Probably only useful for writing one-byte registers. Must be called
* with ops_mutex held.
*/
-void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
+int pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
u_int len, void *ptr)
{
void __iomem *sys, *end;
@@ -232,7 +234,7 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
((cis_width) ? MAP_16BIT : 0));
if (!sys) {
dev_dbg(&s->dev, "could not map memory\n");
- return; /* FIXME: Error */
+ return -EINVAL;
}
writeb(flags, sys+CISREG_ICTRL0);
@@ -257,7 +259,7 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
sys = set_cis_map(s, card_offset, flags);
if (!sys) {
dev_dbg(&s->dev, "could not map memory\n");
- return; /* FIXME: error */
+ return -EINVAL;
}
end = sys + s->map_size;
@@ -271,6 +273,7 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
addr = 0;
}
}
+ return 0;
}
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 976d80706ea..2ec8ac97445 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -32,7 +32,6 @@
#include <asm/system.h>
#include <asm/irq.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
@@ -252,38 +251,6 @@ struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr)
}
EXPORT_SYMBOL(pcmcia_get_socket_by_nr);
-/*
- * The central event handler. Send_event() sends an event to the
- * 16-bit subsystem, which then calls the relevant device drivers.
- * Parse_events() interprets the event bits from
- * a card status change report. Do_shutdown() handles the high
- * priority stuff associated with a card removal.
- */
-
-/* NOTE: send_event needs to be called with skt->sem held. */
-
-static int send_event(struct pcmcia_socket *s, event_t event, int priority)
-{
- int ret;
-
- if ((s->state & SOCKET_CARDBUS) && (event != CS_EVENT_CARD_REMOVAL))
- return 0;
-
- dev_dbg(&s->dev, "send_event(event %d, pri %d, callback 0x%p)\n",
- event, priority, s->callback);
-
- if (!s->callback)
- return 0;
- if (!try_module_get(s->callback->owner))
- return 0;
-
- ret = s->callback->event(s, event, priority);
-
- module_put(s->callback->owner);
-
- return ret;
-}
-
static int socket_reset(struct pcmcia_socket *skt)
{
int status, i;
@@ -326,7 +293,8 @@ static void socket_shutdown(struct pcmcia_socket *s)
dev_dbg(&s->dev, "shutdown\n");
- send_event(s, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH);
+ if (s->callback)
+ s->callback->remove(s);
mutex_lock(&s->ops_mutex);
s->state &= SOCKET_INUSE | SOCKET_PRESENT;
@@ -477,7 +445,8 @@ static int socket_insert(struct pcmcia_socket *skt)
dev_dbg(&skt->dev, "insert done\n");
mutex_unlock(&skt->ops_mutex);
- send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW);
+ if (!(skt->state & SOCKET_CARDBUS) && (skt->callback))
+ skt->callback->add(skt);
} else {
mutex_unlock(&skt->ops_mutex);
socket_shutdown(skt);
@@ -494,7 +463,6 @@ static int socket_suspend(struct pcmcia_socket *skt)
mutex_lock(&skt->ops_mutex);
skt->suspended_state = skt->state;
- send_event(skt, CS_EVENT_PM_SUSPEND, CS_EVENT_PRI_LOW);
skt->socket = dead_socket;
skt->ops->set_socket(skt, &skt->socket);
if (skt->ops->suspend)
@@ -555,8 +523,8 @@ static int socket_late_resume(struct pcmcia_socket *skt)
return 0;
}
#endif
-
- send_event(skt, CS_EVENT_PM_RESUME, CS_EVENT_PRI_LOW);
+ if (!(skt->state & SOCKET_CARDBUS) && (skt->callback))
+ skt->callback->early_resume(skt);
return 0;
}
@@ -654,16 +622,8 @@ static int pccardd(void *__skt)
spin_unlock_irqrestore(&skt->thread_lock, flags);
mutex_lock(&skt->skt_mutex);
- if (events) {
- if (events & SS_DETECT)
- socket_detect_change(skt);
- if (events & SS_BATDEAD)
- send_event(skt, CS_EVENT_BATTERY_DEAD, CS_EVENT_PRI_LOW);
- if (events & SS_BATWARN)
- send_event(skt, CS_EVENT_BATTERY_LOW, CS_EVENT_PRI_LOW);
- if (events & SS_READY)
- send_event(skt, CS_EVENT_READY_CHANGE, CS_EVENT_PRI_LOW);
- }
+ if (events & SS_DETECT)
+ socket_detect_change(skt);
if (sysfs_events) {
if (sysfs_events & PCMCIA_UEVENT_EJECT)
@@ -783,7 +743,7 @@ int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c)
s->callback = c;
if ((s->state & (SOCKET_PRESENT|SOCKET_CARDBUS)) == SOCKET_PRESENT)
- send_event(s, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW);
+ s->callback->add(s);
} else
s->callback = NULL;
err:
@@ -823,20 +783,13 @@ int pcmcia_reset_card(struct pcmcia_socket *skt)
break;
}
- ret = send_event(skt, CS_EVENT_RESET_REQUEST, CS_EVENT_PRI_LOW);
- if (ret == 0) {
- send_event(skt, CS_EVENT_RESET_PHYSICAL, CS_EVENT_PRI_LOW);
- if (skt->callback)
- skt->callback->suspend(skt);
- mutex_lock(&skt->ops_mutex);
- ret = socket_reset(skt);
- mutex_unlock(&skt->ops_mutex);
- if (ret == 0) {
- send_event(skt, CS_EVENT_CARD_RESET, CS_EVENT_PRI_LOW);
- if (skt->callback)
- skt->callback->resume(skt);
- }
- }
+ if (skt->callback)
+ skt->callback->suspend(skt);
+ mutex_lock(&skt->ops_mutex);
+ ret = socket_reset(skt);
+ mutex_unlock(&skt->ops_mutex);
+ if ((ret == 0) && (skt->callback))
+ skt->callback->resume(skt);
ret = 0;
} while (0);
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 4126a75445e..da055dc14d9 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -10,7 +10,7 @@
* are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
*
* (C) 1999 David A. Hinds
- * (C) 2003 - 2008 Dominik Brodowski
+ * (C) 2003 - 2010 Dominik Brodowski
*
*
* This file contains definitions _only_ needed by the PCMCIA core modules.
@@ -26,6 +26,9 @@
/* Flags in client state */
#define CLIENT_WIN_REQ(i) (0x1<<(i))
+/* Flag to access all functions */
+#define BIND_FN_ALL 0xff
+
/* Each card function gets one of these guys */
typedef struct config_t {
struct kref ref;
@@ -35,7 +38,10 @@ typedef struct config_t {
unsigned int ConfigBase;
unsigned char Status, Pin, Copy, Option, ExtStatus;
unsigned int CardValues;
- io_req_t io;
+
+ struct resource io[MAX_IO_WIN]; /* io ports */
+ struct resource mem[MAX_WIN]; /* mem areas */
+
struct {
u_int Attributes;
} irq;
@@ -56,18 +62,11 @@ struct pccard_resource_ops {
unsigned int attr,
unsigned int *base,
unsigned int num,
- unsigned int align);
+ unsigned int align,
+ struct resource **parent);
struct resource* (*find_mem) (unsigned long base, unsigned long num,
unsigned long align, int low,
struct pcmcia_socket *s);
- int (*add_io) (struct pcmcia_socket *s,
- unsigned int action,
- unsigned long r_start,
- unsigned long r_end);
- int (*add_mem) (struct pcmcia_socket *s,
- unsigned int action,
- unsigned long r_start,
- unsigned long r_end);
int (*init) (struct pcmcia_socket *s);
void (*exit) (struct pcmcia_socket *s);
};
@@ -114,11 +113,12 @@ void cb_free(struct pcmcia_socket *s);
struct pcmcia_callback{
struct module *owner;
- int (*event) (struct pcmcia_socket *s,
- event_t event, int priority);
+ int (*add) (struct pcmcia_socket *s);
+ int (*remove) (struct pcmcia_socket *s);
void (*requery) (struct pcmcia_socket *s);
int (*validate) (struct pcmcia_socket *s, unsigned int *i);
int (*suspend) (struct pcmcia_socket *s);
+ int (*early_resume) (struct pcmcia_socket *s);
int (*resume) (struct pcmcia_socket *s);
};
@@ -146,6 +146,8 @@ void pcmcia_put_socket(struct pcmcia_socket *skt);
/* ds.c */
extern struct bus_type pcmcia_bus_type;
+struct pcmcia_device;
+
/* pcmcia_resource.c */
extern int pcmcia_release_configuration(struct pcmcia_device *p_dev);
extern int pcmcia_validate_mem(struct pcmcia_socket *s);
@@ -163,8 +165,8 @@ extern struct bin_attribute pccard_cis_attr;
int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr,
u_int addr, u_int len, void *ptr);
-void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr,
- u_int addr, u_int len, void *ptr);
+int pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr,
+ u_int addr, u_int len, void *ptr);
void release_cis_mem(struct pcmcia_socket *s);
void destroy_cis_cache(struct pcmcia_socket *s);
int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
@@ -188,34 +190,4 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function,
int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple);
-
-#ifdef CONFIG_PCMCIA_IOCTL
-/* ds.c */
-extern struct pcmcia_device *pcmcia_get_dev(struct pcmcia_device *p_dev);
-extern void pcmcia_put_dev(struct pcmcia_device *p_dev);
-
-struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
- unsigned int function);
-
-/* pcmcia_ioctl.c */
-extern void __init pcmcia_setup_ioctl(void);
-extern void __exit pcmcia_cleanup_ioctl(void);
-extern void handle_event(struct pcmcia_socket *s, event_t event);
-extern int handle_request(struct pcmcia_socket *s, event_t event);
-
-#else /* CONFIG_PCMCIA_IOCTL */
-
-static inline void __init pcmcia_setup_ioctl(void) { return; }
-static inline void __exit pcmcia_cleanup_ioctl(void) { return; }
-static inline void handle_event(struct pcmcia_socket *s, event_t event)
-{
- return;
-}
-static inline int handle_request(struct pcmcia_socket *s, event_t event)
-{
- return 0;
-}
-
-#endif /* CONFIG_PCMCIA_IOCTL */
-
#endif /* _LINUX_CS_INTERNAL_H */
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 0f4cc3f0002..27575e6378a 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <asm/mach-au1x00/au1000.h>
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index eac961463be..55570d9e1e4 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -10,7 +10,7 @@
* are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
*
* (C) 1999 David A. Hinds
- * (C) 2003 - 2006 Dominik Brodowski
+ * (C) 2003 - 2010 Dominik Brodowski
*/
#include <linux/kernel.h>
@@ -26,7 +26,6 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -213,7 +212,7 @@ EXPORT_SYMBOL(pcmcia_unregister_driver);
/* pcmcia_device handling */
-struct pcmcia_device *pcmcia_get_dev(struct pcmcia_device *p_dev)
+static struct pcmcia_device *pcmcia_get_dev(struct pcmcia_device *p_dev)
{
struct device *tmp_dev;
tmp_dev = get_device(&p_dev->dev);
@@ -222,7 +221,7 @@ struct pcmcia_device *pcmcia_get_dev(struct pcmcia_device *p_dev)
return to_pcmcia_dev(tmp_dev);
}
-void pcmcia_put_dev(struct pcmcia_device *p_dev)
+static void pcmcia_put_dev(struct pcmcia_device *p_dev)
{
if (p_dev)
put_device(&p_dev->dev);
@@ -294,7 +293,7 @@ static int pcmcia_device_probe(struct device *dev)
}
mutex_lock(&s->ops_mutex);
- if ((s->pcmcia_state.has_pfc) &&
+ if ((s->pcmcia_pfc) &&
(p_dev->socket->device_count == 1) && (p_dev->device_no == 0))
pcmcia_parse_uevents(s, PCMCIA_UEVENT_REQUERY);
mutex_unlock(&s->ops_mutex);
@@ -359,7 +358,7 @@ static int pcmcia_device_remove(struct device *dev)
* pseudo multi-function card, we need to unbind
* all devices
*/
- if ((p_dev->socket->pcmcia_state.has_pfc) &&
+ if ((p_dev->socket->pcmcia_pfc) &&
(p_dev->socket->device_count > 0) &&
(p_dev->device_no == 0))
pcmcia_card_remove(p_dev->socket, p_dev);
@@ -477,7 +476,8 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
}
-struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int function)
+static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
+ unsigned int function)
{
struct pcmcia_device *p_dev, *tmp_dev;
int i;
@@ -531,7 +531,6 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list)
if (p_dev->func == tmp_dev->func) {
p_dev->function_config = tmp_dev->function_config;
- p_dev->io = tmp_dev->io;
p_dev->irq = tmp_dev->irq;
kref_get(&p_dev->function_config->ref);
}
@@ -544,15 +543,29 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
"IRQ setup failed -- device might not work\n");
if (!p_dev->function_config) {
+ config_t *c;
dev_dbg(&p_dev->dev, "creating config_t\n");
- p_dev->function_config = kzalloc(sizeof(struct config_t),
- GFP_KERNEL);
- if (!p_dev->function_config) {
+ c = kzalloc(sizeof(struct config_t), GFP_KERNEL);
+ if (!c) {
mutex_unlock(&s->ops_mutex);
goto err_unreg;
}
- kref_init(&p_dev->function_config->ref);
+ p_dev->function_config = c;
+ kref_init(&c->ref);
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ c->io[i].name = p_dev->devname;
+ c->io[i].flags = IORESOURCE_IO;
+ }
+ for (i = 0; i< MAX_WIN; i++) {
+ c->mem[i].name = p_dev->devname;
+ c->mem[i].flags = IORESOURCE_MEM;
+ }
}
+ for (i = 0; i < MAX_IO_WIN; i++)
+ p_dev->resource[i] = &p_dev->function_config->io[i];
+ for (; i < (MAX_IO_WIN + MAX_WIN); i++)
+ p_dev->resource[i] = &p_dev->function_config->mem[i-MAX_IO_WIN];
+
mutex_unlock(&s->ops_mutex);
dev_printk(KERN_NOTICE, &p_dev->dev,
@@ -680,7 +693,7 @@ static void pcmcia_requery(struct pcmcia_socket *s)
* call pcmcia_device_add() -- which will fail if both
* devices are already registered. */
mutex_lock(&s->ops_mutex);
- has_pfc = s->pcmcia_state.has_pfc;
+ has_pfc = s->pcmcia_pfc;
mutex_unlock(&s->ops_mutex);
if (has_pfc)
pcmcia_device_add(s, 0);
@@ -812,7 +825,7 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) {
dev_dbg(&dev->dev, "this is a pseudo-multi-function device\n");
mutex_lock(&dev->socket->ops_mutex);
- dev->socket->pcmcia_state.has_pfc = 1;
+ dev->socket->pcmcia_pfc = 1;
mutex_unlock(&dev->socket->ops_mutex);
if (dev->device_no != did->device_no)
return 0;
@@ -826,7 +839,7 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
/* if this is a pseudo-multi-function device,
* we need explicit matches */
- if (dev->socket->pcmcia_state.has_pfc)
+ if (dev->socket->pcmcia_pfc)
return 0;
if (dev->device_no)
return 0;
@@ -885,14 +898,6 @@ static int pcmcia_bus_match(struct device *dev, struct device_driver *drv)
}
mutex_unlock(&p_drv->dynids.lock);
-#ifdef CONFIG_PCMCIA_IOCTL
- /* matching by cardmgr */
- if (p_dev->cardmgr == p_drv) {
- dev_dbg(dev, "cardmgr matched to %s\n", drv->name);
- return 1;
- }
-#endif
-
while (did && did->match_flags) {
dev_dbg(dev, "trying to match to %s\n", drv->name);
if (pcmcia_devmatch(p_dev, did)) {
@@ -1006,6 +1011,18 @@ pcmcia_device_stringattr(prod_id2, prod_id[1]);
pcmcia_device_stringattr(prod_id3, prod_id[2]);
pcmcia_device_stringattr(prod_id4, prod_id[3]);
+static ssize_t pcmcia_show_resources(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
+ char *str = buf;
+ int i;
+
+ for (i = 0; i < PCMCIA_NUM_RESOURCES; i++)
+ str += sprintf(str, "%pr\n", p_dev->resource[i]);
+
+ return str - buf;
+}
static ssize_t pcmcia_show_pm_state(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -1076,6 +1093,7 @@ static ssize_t pcmcia_store_allow_func_id_match(struct device *dev,
static struct device_attribute pcmcia_dev_attrs[] = {
__ATTR(function, 0444, func_show, NULL),
__ATTR(pm_state, 0644, pcmcia_show_pm_state, pcmcia_store_pm_state),
+ __ATTR(resources, 0444, pcmcia_show_resources, NULL),
__ATTR_RO(func_id),
__ATTR_RO(manf_id),
__ATTR_RO(card_id),
@@ -1215,86 +1233,57 @@ static int pcmcia_bus_suspend(struct pcmcia_socket *skt)
return 0;
}
+static int pcmcia_bus_remove(struct pcmcia_socket *skt)
+{
+ atomic_set(&skt->present, 0);
+ pcmcia_card_remove(skt, NULL);
-/*======================================================================
+ mutex_lock(&skt->ops_mutex);
+ destroy_cis_cache(skt);
+ pcmcia_cleanup_irq(skt);
+ mutex_unlock(&skt->ops_mutex);
- The card status event handler.
+ return 0;
+}
-======================================================================*/
+static int pcmcia_bus_add(struct pcmcia_socket *skt)
+{
+ atomic_set(&skt->present, 1);
-/* Normally, the event is passed to individual drivers after
- * informing userspace. Only for CS_EVENT_CARD_REMOVAL this
- * is inversed to maintain historic compatibility.
- */
+ mutex_lock(&skt->ops_mutex);
+ skt->pcmcia_pfc = 0;
+ destroy_cis_cache(skt); /* to be on the safe side... */
+ mutex_unlock(&skt->ops_mutex);
-static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
-{
- struct pcmcia_socket *s = pcmcia_get_socket(skt);
+ pcmcia_card_add(skt);
- if (!s) {
- dev_printk(KERN_ERR, &skt->dev,
- "PCMCIA obtaining reference to socket " \
- "failed, event 0x%x lost!\n", event);
- return -ENODEV;
- }
+ return 0;
+}
- dev_dbg(&skt->dev, "ds_event(0x%06x, %d, 0x%p)\n",
- event, priority, skt);
+static int pcmcia_bus_early_resume(struct pcmcia_socket *skt)
+{
+ if (!verify_cis_cache(skt)) {
+ pcmcia_put_socket(skt);
+ return 0;
+ }
- switch (event) {
- case CS_EVENT_CARD_REMOVAL:
- atomic_set(&skt->present, 0);
- pcmcia_card_remove(skt, NULL);
- handle_event(skt, event);
- mutex_lock(&s->ops_mutex);
- destroy_cis_cache(s);
- pcmcia_cleanup_irq(s);
- mutex_unlock(&s->ops_mutex);
- break;
+ dev_dbg(&skt->dev, "cis mismatch - different card\n");
- case CS_EVENT_CARD_INSERTION:
- atomic_set(&skt->present, 1);
- mutex_lock(&s->ops_mutex);
- s->pcmcia_state.has_pfc = 0;
- destroy_cis_cache(s); /* to be on the safe side... */
- mutex_unlock(&s->ops_mutex);
- pcmcia_card_add(skt);
- handle_event(skt, event);
- break;
-
- case CS_EVENT_EJECTION_REQUEST:
- break;
-
- case CS_EVENT_PM_RESUME:
- if (verify_cis_cache(skt) != 0) {
- dev_dbg(&skt->dev, "cis mismatch - different card\n");
- /* first, remove the card */
- ds_event(skt, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH);
- mutex_lock(&s->ops_mutex);
- destroy_cis_cache(skt);
- kfree(skt->fake_cis);
- skt->fake_cis = NULL;
- s->functions = 0;
- mutex_unlock(&s->ops_mutex);
- /* now, add the new card */
- ds_event(skt, CS_EVENT_CARD_INSERTION,
- CS_EVENT_PRI_LOW);
- }
- handle_event(skt, event);
- break;
+ /* first, remove the card */
+ pcmcia_bus_remove(skt);
- case CS_EVENT_PM_SUSPEND:
- case CS_EVENT_RESET_PHYSICAL:
- case CS_EVENT_CARD_RESET:
- default:
- handle_event(skt, event);
- break;
- }
+ mutex_lock(&skt->ops_mutex);
+ destroy_cis_cache(skt);
+ kfree(skt->fake_cis);
+ skt->fake_cis = NULL;
+ skt->functions = 0;
+ mutex_unlock(&skt->ops_mutex);
- pcmcia_put_socket(s);
+ /* now, add the new card */
+ pcmcia_bus_add(skt);
+ return 0;
+}
- return 0;
-} /* ds_event */
/*
* NOTE: This is racy. There's no guarantee the card will still be
@@ -1323,10 +1312,12 @@ EXPORT_SYMBOL(pcmcia_dev_present);
static struct pcmcia_callback pcmcia_bus_callback = {
.owner = THIS_MODULE,
- .event = ds_event,
+ .add = pcmcia_bus_add,
+ .remove = pcmcia_bus_remove,
.requery = pcmcia_requery,
.validate = pccard_validate_cis,
.suspend = pcmcia_bus_suspend,
+ .early_resume = pcmcia_bus_early_resume,
.resume = pcmcia_bus_resume,
};
@@ -1350,11 +1341,8 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev,
return ret;
}
-#ifdef CONFIG_PCMCIA_IOCTL
- init_waitqueue_head(&socket->queue);
-#endif
INIT_LIST_HEAD(&socket->devices_list);
- memset(&socket->pcmcia_state, 0, sizeof(u8));
+ socket->pcmcia_pfc = 0;
socket->device_count = 0;
atomic_set(&socket->present, 0);
@@ -1429,8 +1417,6 @@ static int __init init_pcmcia_bus(void)
return ret;
}
- pcmcia_setup_ioctl();
-
return 0;
}
fs_initcall(init_pcmcia_bus); /* one level after subsys_initcall so that
@@ -1439,8 +1425,6 @@ fs_initcall(init_pcmcia_bus); /* one level after subsys_initcall so that
static void __exit exit_pcmcia_bus(void)
{
- pcmcia_cleanup_ioctl();
-
class_interface_unregister(&pcmcia_bus_interface);
bus_unregister(&pcmcia_bus_type);
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index f94d8281cfb..546d3024b6f 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -44,7 +44,7 @@ struct electra_cf_socket {
unsigned present:1;
unsigned active:1;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
unsigned long mem_phys;
void __iomem * mem_base;
unsigned long mem_size;
@@ -181,7 +181,7 @@ static struct pccard_operations electra_cf_ops = {
.set_mem_map = electra_cf_set_mem_map,
};
-static int __devinit electra_cf_probe(struct of_device *ofdev,
+static int __devinit electra_cf_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device *device = &ofdev->dev;
@@ -325,7 +325,7 @@ fail1:
}
-static int __devexit electra_cf_remove(struct of_device *ofdev)
+static int __devexit electra_cf_remove(struct platform_device *ofdev)
{
struct device *device = &ofdev->dev;
struct electra_cf_socket *cf;
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 3003bb3dfcc..05d0879ce93 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -15,7 +15,6 @@
#include <linux/interrupt.h>
#include <linux/device.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 9e2a15628de..61746bd598b 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -50,7 +50,6 @@
#include <asm/io.h>
#include <asm/system.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 7e16ed8eb0a..24de4992586 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -26,7 +26,6 @@
#include <asm/io.h>
#include <asm/system.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index 6c5c3f910d7..8e4723844ad 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -27,7 +27,6 @@
#include <asm/system.h>
#include <asm/addrspace.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 25e5e30a18a..f0ecad99ce8 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -59,7 +59,6 @@
#include <asm/irq.h>
#include <asm/fs_pd.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/ss.h>
@@ -1150,7 +1149,7 @@ static struct pccard_operations m8xx_services = {
.set_mem_map = m8xx_set_mem_map,
};
-static int __init m8xx_probe(struct of_device *ofdev,
+static int __init m8xx_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct pcmcia_win *w;
@@ -1250,7 +1249,7 @@ static int __init m8xx_probe(struct of_device *ofdev,
return 0;
}
-static int m8xx_remove(struct of_device *ofdev)
+static int m8xx_remove(struct platform_device *ofdev)
{
u32 m, i;
struct pcmcia_win *w;
diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c
index 4a65eaf96b0..0ac54da1588 100644
--- a/drivers/pcmcia/pcmcia_cis.c
+++ b/drivers/pcmcia/pcmcia_cis.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ss.h>
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
deleted file mode 100644
index d007a2a0383..00000000000
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ /dev/null
@@ -1,1077 +0,0 @@
-/*
- * pcmcia_ioctl.c -- ioctl interface for cardmgr and cardctl
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * The initial developer of the original code is David A. Hinds
- * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
- *
- * (C) 1999 David A. Hinds
- * (C) 2003 - 2004 Dominik Brodowski
- */
-
-/*
- * This file will go away soon.
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/ioctl.h>
-#include <linux/proc_fs.h>
-#include <linux/poll.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-#include <linux/smp_lock.h>
-#include <linux/workqueue.h>
-
-#include <pcmcia/cs_types.h>
-#include <pcmcia/cs.h>
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-#include <pcmcia/ss.h>
-
-#include "cs_internal.h"
-
-static int major_dev = -1;
-
-
-/* Device user information */
-#define MAX_EVENTS 32
-#define USER_MAGIC 0x7ea4
-#define CHECK_USER(u) \
- (((u) == NULL) || ((u)->user_magic != USER_MAGIC))
-
-typedef struct user_info_t {
- u_int user_magic;
- int event_head, event_tail;
- event_t event[MAX_EVENTS];
- struct user_info_t *next;
- struct pcmcia_socket *socket;
-} user_info_t;
-
-
-static struct pcmcia_device *get_pcmcia_device(struct pcmcia_socket *s,
- unsigned int function)
-{
- struct pcmcia_device *p_dev = NULL;
-
- mutex_lock(&s->ops_mutex);
- list_for_each_entry(p_dev, &s->devices_list, socket_device_list) {
- if (p_dev->func == function) {
- mutex_unlock(&s->ops_mutex);
- return pcmcia_get_dev(p_dev);
- }
- }
- mutex_unlock(&s->ops_mutex);
- return NULL;
-}
-
-/* backwards-compatible accessing of driver --- by name! */
-
-static struct pcmcia_driver *get_pcmcia_driver(dev_info_t *dev_info)
-{
- struct device_driver *drv;
- struct pcmcia_driver *p_drv;
-
- drv = driver_find((char *) dev_info, &pcmcia_bus_type);
- if (!drv)
- return NULL;
-
- p_drv = container_of(drv, struct pcmcia_driver, drv);
-
- return p_drv;
-}
-
-
-#ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *proc_pccard;
-
-static int proc_read_drivers_callback(struct device_driver *driver, void *_m)
-{
- struct seq_file *m = _m;
- struct pcmcia_driver *p_drv = container_of(driver,
- struct pcmcia_driver, drv);
-
- seq_printf(m, "%-24.24s 1 %d\n", p_drv->drv.name,
-#ifdef CONFIG_MODULE_UNLOAD
- (p_drv->owner) ? module_refcount(p_drv->owner) : 1
-#else
- 1
-#endif
- );
- return 0;
-}
-
-static int pccard_drivers_proc_show(struct seq_file *m, void *v)
-{
- return bus_for_each_drv(&pcmcia_bus_type, NULL,
- m, proc_read_drivers_callback);
-}
-
-static int pccard_drivers_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pccard_drivers_proc_show, NULL);
-}
-
-static const struct file_operations pccard_drivers_proc_fops = {
- .owner = THIS_MODULE,
- .open = pccard_drivers_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif
-
-
-#ifdef CONFIG_PCMCIA_PROBE
-
-static int adjust_irq(struct pcmcia_socket *s, adjust_t *adj)
-{
- int irq;
- u32 mask;
-
- irq = adj->resource.irq.IRQ;
- if ((irq < 0) || (irq > 15))
- return -EINVAL;
-
- if (adj->Action != REMOVE_MANAGED_RESOURCE)
- return 0;
-
- mask = 1 << irq;
-
- if (!(s->irq_mask & mask))
- return 0;
-
- s->irq_mask &= ~mask;
-
- return 0;
-}
-
-#else
-
-static inline int adjust_irq(struct pcmcia_socket *s, adjust_t *adj)
-{
- return 0;
-}
-
-#endif
-
-static int pcmcia_adjust_resource_info(adjust_t *adj)
-{
- struct pcmcia_socket *s;
- int ret = -ENOSYS;
-
- down_read(&pcmcia_socket_list_rwsem);
- list_for_each_entry(s, &pcmcia_socket_list, socket_list) {
-
- if (adj->Resource == RES_IRQ)
- ret = adjust_irq(s, adj);
-
- else if (s->resource_ops->add_io) {
- unsigned long begin, end;
-
- /* you can't use the old interface if the new
- * one was used before */
- mutex_lock(&s->ops_mutex);
- if ((s->resource_setup_new) &&
- !(s->resource_setup_old)) {
- mutex_unlock(&s->ops_mutex);
- continue;
- } else if (!(s->resource_setup_old))
- s->resource_setup_old = 1;
-
- switch (adj->Resource) {
- case RES_MEMORY_RANGE:
- begin = adj->resource.memory.Base;
- end = adj->resource.memory.Base + adj->resource.memory.Size - 1;
- if (s->resource_ops->add_mem)
- ret = s->resource_ops->add_mem(s, adj->Action, begin, end);
- case RES_IO_RANGE:
- begin = adj->resource.io.BasePort;
- end = adj->resource.io.BasePort + adj->resource.io.NumPorts - 1;
- if (s->resource_ops->add_io)
- ret = s->resource_ops->add_io(s, adj->Action, begin, end);
- }
- if (!ret) {
- /* as there's no way we know this is the
- * last call to adjust_resource_info, we
- * always need to assume this is the latest
- * one... */
- s->resource_setup_done = 1;
- }
- mutex_unlock(&s->ops_mutex);
- }
- }
- up_read(&pcmcia_socket_list_rwsem);
-
- return ret;
-}
-
-
-/** pcmcia_get_window
- */
-static int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *wh_out,
- window_handle_t wh, win_req_t *req)
-{
- pccard_mem_map *win;
- window_handle_t w;
-
- wh--;
- if (!s || !(s->state & SOCKET_PRESENT))
- return -ENODEV;
- if (wh >= MAX_WIN)
- return -EINVAL;
- for (w = wh; w < MAX_WIN; w++)
- if (s->state & SOCKET_WIN_REQ(w))
- break;
- if (w == MAX_WIN)
- return -EINVAL;
- win = &s->win[w];
- req->Base = win->res->start;
- req->Size = win->res->end - win->res->start + 1;
- req->AccessSpeed = win->speed;
- req->Attributes = 0;
- if (win->flags & MAP_ATTRIB)
- req->Attributes |= WIN_MEMORY_TYPE_AM;
- if (win->flags & MAP_ACTIVE)
- req->Attributes |= WIN_ENABLE;
- if (win->flags & MAP_16BIT)
- req->Attributes |= WIN_DATA_WIDTH_16;
- if (win->flags & MAP_USE_WAIT)
- req->Attributes |= WIN_USE_WAIT;
-
- *wh_out = w + 1;
- return 0;
-} /* pcmcia_get_window */
-
-
-/** pcmcia_get_mem_page
- *
- * Change the card address of an already open memory window.
- */
-static int pcmcia_get_mem_page(struct pcmcia_socket *skt, window_handle_t wh,
- memreq_t *req)
-{
- wh--;
- if (wh >= MAX_WIN)
- return -EINVAL;
-
- req->Page = 0;
- req->CardOffset = skt->win[wh].card_start;
- return 0;
-} /* pcmcia_get_mem_page */
-
-
-/** pccard_get_status
- *
- * Get the current socket state bits. We don't support the latched
- * SocketState yet: I haven't seen any point for it.
- */
-
-static int pccard_get_status(struct pcmcia_socket *s,
- struct pcmcia_device *p_dev,
- cs_status_t *status)
-{
- config_t *c;
- int val;
-
- s->ops->get_status(s, &val);
- status->CardState = status->SocketState = 0;
- status->CardState |= (val & SS_DETECT) ? CS_EVENT_CARD_DETECT : 0;
- status->CardState |= (val & SS_CARDBUS) ? CS_EVENT_CB_DETECT : 0;
- status->CardState |= (val & SS_3VCARD) ? CS_EVENT_3VCARD : 0;
- status->CardState |= (val & SS_XVCARD) ? CS_EVENT_XVCARD : 0;
- if (s->state & SOCKET_SUSPEND)
- status->CardState |= CS_EVENT_PM_SUSPEND;
- if (!(s->state & SOCKET_PRESENT))
- return -ENODEV;
-
- c = (p_dev) ? p_dev->function_config : NULL;
-
- if ((c != NULL) && (c->state & CONFIG_LOCKED) &&
- (c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) {
- u_char reg;
- if (c->CardValues & PRESENT_PIN_REPLACE) {
- mutex_lock(&s->ops_mutex);
- pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg);
- mutex_unlock(&s->ops_mutex);
- status->CardState |=
- (reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0;
- status->CardState |=
- (reg & PRR_READY_STATUS) ? CS_EVENT_READY_CHANGE : 0;
- status->CardState |=
- (reg & PRR_BVD2_STATUS) ? CS_EVENT_BATTERY_LOW : 0;
- status->CardState |=
- (reg & PRR_BVD1_STATUS) ? CS_EVENT_BATTERY_DEAD : 0;
- } else {
- /* No PRR? Then assume we're always ready */
- status->CardState |= CS_EVENT_READY_CHANGE;
- }
- if (c->CardValues & PRESENT_EXT_STATUS) {
- mutex_lock(&s->ops_mutex);
- pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg);
- mutex_unlock(&s->ops_mutex);
- status->CardState |=
- (reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0;
- }
- return 0;
- }
- status->CardState |=
- (val & SS_WRPROT) ? CS_EVENT_WRITE_PROTECT : 0;
- status->CardState |=
- (val & SS_BATDEAD) ? CS_EVENT_BATTERY_DEAD : 0;
- status->CardState |=
- (val & SS_BATWARN) ? CS_EVENT_BATTERY_LOW : 0;
- status->CardState |=
- (val & SS_READY) ? CS_EVENT_READY_CHANGE : 0;
- return 0;
-} /* pccard_get_status */
-
-static int pccard_get_configuration_info(struct pcmcia_socket *s,
- struct pcmcia_device *p_dev,
- config_info_t *config)
-{
- config_t *c;
-
- if (!(s->state & SOCKET_PRESENT))
- return -ENODEV;
-
-
-#ifdef CONFIG_CARDBUS
- if (s->state & SOCKET_CARDBUS) {
- memset(config, 0, sizeof(config_info_t));
- config->Vcc = s->socket.Vcc;
- config->Vpp1 = config->Vpp2 = s->socket.Vpp;
- config->Option = s->cb_dev->subordinate->number;
- if (s->state & SOCKET_CARDBUS_CONFIG) {
- config->Attributes = CONF_VALID_CLIENT;
- config->IntType = INT_CARDBUS;
- config->AssignedIRQ = s->pcmcia_irq;
- if (config->AssignedIRQ)
- config->Attributes |= CONF_ENABLE_IRQ;
- if (s->io[0].res) {
- config->BasePort1 = s->io[0].res->start;
- config->NumPorts1 = s->io[0].res->end -
- config->BasePort1 + 1;
- }
- }
- return 0;
- }
-#endif
-
- if (p_dev) {
- c = p_dev->function_config;
- config->Function = p_dev->func;
- } else {
- c = NULL;
- config->Function = 0;
- }
-
- if ((c == NULL) || !(c->state & CONFIG_LOCKED)) {
- config->Attributes = 0;
- config->Vcc = s->socket.Vcc;
- config->Vpp1 = config->Vpp2 = s->socket.Vpp;
- return 0;
- }
-
- config->Attributes = c->Attributes | CONF_VALID_CLIENT;
- config->Vcc = s->socket.Vcc;
- config->Vpp1 = config->Vpp2 = s->socket.Vpp;
- config->IntType = c->IntType;
- config->ConfigBase = c->ConfigBase;
- config->Status = c->Status;
- config->Pin = c->Pin;
- config->Copy = c->Copy;
- config->Option = c->Option;
- config->ExtStatus = c->ExtStatus;
- config->Present = config->CardValues = c->CardValues;
- config->IRQAttributes = c->irq.Attributes;
- config->AssignedIRQ = s->pcmcia_irq;
- config->BasePort1 = c->io.BasePort1;
- config->NumPorts1 = c->io.NumPorts1;
- config->Attributes1 = c->io.Attributes1;
- config->BasePort2 = c->io.BasePort2;
- config->NumPorts2 = c->io.NumPorts2;
- config->Attributes2 = c->io.Attributes2;
- config->IOAddrLines = c->io.IOAddrLines;
-
- return 0;
-} /* pccard_get_configuration_info */
-
-
-/*======================================================================
-
- These manage a ring buffer of events pending for one user process
-
-======================================================================*/
-
-
-static int queue_empty(user_info_t *user)
-{
- return (user->event_head == user->event_tail);
-}
-
-static event_t get_queued_event(user_info_t *user)
-{
- user->event_tail = (user->event_tail+1) % MAX_EVENTS;
- return user->event[user->event_tail];
-}
-
-static void queue_event(user_info_t *user, event_t event)
-{
- user->event_head = (user->event_head+1) % MAX_EVENTS;
- if (user->event_head == user->event_tail)
- user->event_tail = (user->event_tail+1) % MAX_EVENTS;
- user->event[user->event_head] = event;
-}
-
-void handle_event(struct pcmcia_socket *s, event_t event)
-{
- user_info_t *user;
- for (user = s->user; user; user = user->next)
- queue_event(user, event);
- wake_up_interruptible(&s->queue);
-}
-
-
-/*======================================================================
-
- bind_request() and bind_device() are merged by now. Register_client()
- is called right at the end of bind_request(), during the driver's
- ->attach() call. Individual descriptions:
-
- bind_request() connects a socket to a particular client driver.
- It looks up the specified device ID in the list of registered
- drivers, binds it to the socket, and tries to create an instance
- of the device. unbind_request() deletes a driver instance.
-
- Bind_device() associates a device driver with a particular socket.
- It is normally called by Driver Services after it has identified
- a newly inserted card. An instance of that driver will then be
- eligible to register as a client of this socket.
-
- Register_client() uses the dev_info_t handle to match the
- caller with a socket. The driver must have already been bound
- to a socket with bind_device() -- in fact, bind_device()
- allocates the client structure that will be used.
-
-======================================================================*/
-
-static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info)
-{
- struct pcmcia_driver *p_drv;
- struct pcmcia_device *p_dev;
- int ret = 0;
-
- s = pcmcia_get_socket(s);
- if (!s)
- return -EINVAL;
-
- pr_debug("bind_request(%d, '%s')\n", s->sock,
- (char *)bind_info->dev_info);
-
- p_drv = get_pcmcia_driver(&bind_info->dev_info);
- if (!p_drv) {
- ret = -EINVAL;
- goto err_put;
- }
-
- if (!try_module_get(p_drv->owner)) {
- ret = -EINVAL;
- goto err_put_driver;
- }
-
- mutex_lock(&s->ops_mutex);
- list_for_each_entry(p_dev, &s->devices_list, socket_device_list) {
- if (p_dev->func == bind_info->function) {
- if ((p_dev->dev.driver == &p_drv->drv)) {
- if (p_dev->cardmgr) {
- /* if there's already a device
- * registered, and it was registered
- * by userspace before, we need to
- * return the "instance". */
- mutex_unlock(&s->ops_mutex);
- bind_info->instance = p_dev;
- ret = -EBUSY;
- goto err_put_module;
- } else {
- /* the correct driver managed to bind
- * itself magically to the correct
- * device. */
- mutex_unlock(&s->ops_mutex);
- p_dev->cardmgr = p_drv;
- ret = 0;
- goto err_put_module;
- }
- } else if (!p_dev->dev.driver) {
- /* there's already a device available where
- * no device has been bound to yet. So we don't
- * need to register a device! */
- mutex_unlock(&s->ops_mutex);
- goto rescan;
- }
- }
- }
- mutex_unlock(&s->ops_mutex);
-
- p_dev = pcmcia_device_add(s, bind_info->function);
- if (!p_dev) {
- ret = -EIO;
- goto err_put_module;
- }
-
-rescan:
- p_dev->cardmgr = p_drv;
-
- /* if a driver is already running, we can abort */
- if (p_dev->dev.driver)
- goto err_put_module;
-
- /*
- * Prevent this racing with a card insertion.
- */
- mutex_lock(&s->skt_mutex);
- ret = bus_rescan_devices(&pcmcia_bus_type);
- mutex_unlock(&s->skt_mutex);
- if (ret)
- goto err_put_module;
-
- /* check whether the driver indeed matched. I don't care if this
- * is racy or not, because it can only happen on cardmgr access
- * paths...
- */
- if (!(p_dev->dev.driver == &p_drv->drv))
- p_dev->cardmgr = NULL;
-
- err_put_module:
- module_put(p_drv->owner);
- err_put_driver:
- put_driver(&p_drv->drv);
- err_put:
- pcmcia_put_socket(s);
-
- return ret;
-} /* bind_request */
-
-#ifdef CONFIG_CARDBUS
-
-static struct pci_bus *pcmcia_lookup_bus(struct pcmcia_socket *s)
-{
- if (!s || !(s->state & SOCKET_CARDBUS))
- return NULL;
-
- return s->cb_dev->subordinate;
-}
-#endif
-
-static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int first)
-{
- struct pcmcia_device *p_dev;
- struct pcmcia_driver *p_drv;
- int ret = 0;
-
-#ifdef CONFIG_CARDBUS
- /*
- * Some unbelievably ugly code to associate the PCI cardbus
- * device and its driver with the PCMCIA "bind" information.
- */
- {
- struct pci_bus *bus;
-
- bus = pcmcia_lookup_bus(s);
- if (bus) {
- struct list_head *list;
- struct pci_dev *dev = NULL;
-
- list = bus->devices.next;
- while (list != &bus->devices) {
- struct pci_dev *pdev = pci_dev_b(list);
- list = list->next;
-
- if (first) {
- dev = pdev;
- break;
- }
-
- /* Try to handle "next" here some way? */
- }
- if (dev && dev->driver) {
- strlcpy(bind_info->name, dev->driver->name, DEV_NAME_LEN);
- bind_info->major = 0;
- bind_info->minor = 0;
- bind_info->next = NULL;
- return 0;
- }
- }
- }
-#endif
-
- mutex_lock(&s->ops_mutex);
- list_for_each_entry(p_dev, &s->devices_list, socket_device_list) {
- if (p_dev->func == bind_info->function) {
- p_dev = pcmcia_get_dev(p_dev);
- if (!p_dev)
- continue;
- goto found;
- }
- }
- mutex_unlock(&s->ops_mutex);
- return -ENODEV;
-
- found:
- mutex_unlock(&s->ops_mutex);
-
- p_drv = to_pcmcia_drv(p_dev->dev.driver);
- if (p_drv && !p_dev->_locked) {
- ret = -EAGAIN;
- goto err_put;
- }
-
- if (!first) {
- ret = -ENODEV;
- goto err_put;
- }
-
- strlcpy(bind_info->name, dev_name(&p_dev->dev), DEV_NAME_LEN);
- bind_info->next = NULL;
-
- err_put:
- pcmcia_put_dev(p_dev);
- return ret;
-} /* get_device_info */
-
-
-static int ds_open(struct inode *inode, struct file *file)
-{
- socket_t i = iminor(inode);
- struct pcmcia_socket *s;
- user_info_t *user;
- static int warning_printed;
- int ret = 0;
-
- pr_debug("ds_open(socket %d)\n", i);
-
- lock_kernel();
- s = pcmcia_get_socket_by_nr(i);
- if (!s) {
- ret = -ENODEV;
- goto out;
- }
- s = pcmcia_get_socket(s);
- if (!s) {
- ret = -ENODEV;
- goto out;
- }
-
- if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
- if (s->pcmcia_state.busy) {
- pcmcia_put_socket(s);
- ret = -EBUSY;
- goto out;
- }
- else
- s->pcmcia_state.busy = 1;
- }
-
- user = kmalloc(sizeof(user_info_t), GFP_KERNEL);
- if (!user) {
- pcmcia_put_socket(s);
- ret = -ENOMEM;
- goto out;
- }
- user->event_tail = user->event_head = 0;
- user->next = s->user;
- user->user_magic = USER_MAGIC;
- user->socket = s;
- s->user = user;
- file->private_data = user;
-
- if (!warning_printed) {
- printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl "
- "usage from process: %s.\n", current->comm);
- printk(KERN_INFO "pcmcia: This interface will soon be removed from "
- "the kernel; please expect breakage unless you upgrade "
- "to new tools.\n");
- printk(KERN_INFO "pcmcia: see http://www.kernel.org/pub/linux/"
- "utils/kernel/pcmcia/pcmcia.html for details.\n");
- warning_printed = 1;
- }
-
- if (atomic_read(&s->present))
- queue_event(user, CS_EVENT_CARD_INSERTION);
-out:
- unlock_kernel();
- return ret;
-} /* ds_open */
-
-/*====================================================================*/
-
-static int ds_release(struct inode *inode, struct file *file)
-{
- struct pcmcia_socket *s;
- user_info_t *user, **link;
-
- pr_debug("ds_release(socket %d)\n", iminor(inode));
-
- user = file->private_data;
- if (CHECK_USER(user))
- goto out;
-
- s = user->socket;
-
- /* Unlink user data structure */
- if ((file->f_flags & O_ACCMODE) != O_RDONLY)
- s->pcmcia_state.busy = 0;
-
- file->private_data = NULL;
- for (link = &s->user; *link; link = &(*link)->next)
- if (*link == user)
- break;
- if (link == NULL)
- goto out;
- *link = user->next;
- user->user_magic = 0;
- kfree(user);
- pcmcia_put_socket(s);
-out:
- return 0;
-} /* ds_release */
-
-/*====================================================================*/
-
-static ssize_t ds_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct pcmcia_socket *s;
- user_info_t *user;
- int ret;
-
- pr_debug("ds_read(socket %d)\n", iminor(file->f_path.dentry->d_inode));
-
- if (count < 4)
- return -EINVAL;
-
- user = file->private_data;
- if (CHECK_USER(user))
- return -EIO;
-
- s = user->socket;
- ret = wait_event_interruptible(s->queue, !queue_empty(user));
- if (ret == 0)
- ret = put_user(get_queued_event(user), (int __user *)buf) ? -EFAULT : 4;
-
- return ret;
-} /* ds_read */
-
-/*====================================================================*/
-
-static ssize_t ds_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- pr_debug("ds_write(socket %d)\n", iminor(file->f_path.dentry->d_inode));
-
- if (count != 4)
- return -EINVAL;
- if ((file->f_flags & O_ACCMODE) == O_RDONLY)
- return -EBADF;
-
- return -EIO;
-} /* ds_write */
-
-/*====================================================================*/
-
-/* No kernel lock - fine */
-static u_int ds_poll(struct file *file, poll_table *wait)
-{
- struct pcmcia_socket *s;
- user_info_t *user;
-
- pr_debug("ds_poll(socket %d)\n", iminor(file->f_path.dentry->d_inode));
-
- user = file->private_data;
- if (CHECK_USER(user))
- return POLLERR;
- s = user->socket;
- /*
- * We don't check for a dead socket here since that
- * will send cardmgr into an endless spin.
- */
- poll_wait(file, &s->queue, wait);
- if (!queue_empty(user))
- return POLLIN | POLLRDNORM;
- return 0;
-} /* ds_poll */
-
-/*====================================================================*/
-
-static int ds_ioctl(struct file *file, u_int cmd, u_long arg)
-{
- struct pcmcia_socket *s;
- void __user *uarg = (char __user *)arg;
- u_int size;
- int ret, err;
- ds_ioctl_arg_t *buf;
- user_info_t *user;
-
- pr_debug("ds_ioctl(socket %d, %#x, %#lx)\n", iminor(inode), cmd, arg);
-
- user = file->private_data;
- if (CHECK_USER(user))
- return -EIO;
-
- s = user->socket;
-
- size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
- if (size > sizeof(ds_ioctl_arg_t))
- return -EINVAL;
-
- /* Permission check */
- if (!(cmd & IOC_OUT) && !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (cmd & IOC_IN) {
- if (!access_ok(VERIFY_READ, uarg, size)) {
- pr_debug("ds_ioctl(): verify_read = %d\n", -EFAULT);
- return -EFAULT;
- }
- }
- if (cmd & IOC_OUT) {
- if (!access_ok(VERIFY_WRITE, uarg, size)) {
- pr_debug("ds_ioctl(): verify_write = %d\n", -EFAULT);
- return -EFAULT;
- }
- }
- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- err = ret = 0;
-
- if (cmd & IOC_IN) {
- if (__copy_from_user((char *)buf, uarg, size)) {
- err = -EFAULT;
- goto free_out;
- }
- }
-
- switch (cmd) {
- case DS_ADJUST_RESOURCE_INFO:
- ret = pcmcia_adjust_resource_info(&buf->adjust);
- break;
- case DS_GET_CONFIGURATION_INFO:
- if (buf->config.Function &&
- (buf->config.Function >= s->functions))
- ret = -EINVAL;
- else {
- struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function);
- ret = pccard_get_configuration_info(s, p_dev, &buf->config);
- pcmcia_put_dev(p_dev);
- }
- break;
- case DS_GET_FIRST_TUPLE:
- mutex_lock(&s->skt_mutex);
- pcmcia_validate_mem(s);
- mutex_unlock(&s->skt_mutex);
- ret = pccard_get_first_tuple(s, BIND_FN_ALL, &buf->tuple);
- break;
- case DS_GET_NEXT_TUPLE:
- ret = pccard_get_next_tuple(s, BIND_FN_ALL, &buf->tuple);
- break;
- case DS_GET_TUPLE_DATA:
- buf->tuple.TupleData = buf->tuple_parse.data;
- buf->tuple.TupleDataMax = sizeof(buf->tuple_parse.data);
- ret = pccard_get_tuple_data(s, &buf->tuple);
- break;
- case DS_PARSE_TUPLE:
- buf->tuple.TupleData = buf->tuple_parse.data;
- ret = pcmcia_parse_tuple(&buf->tuple, &buf->tuple_parse.parse);
- break;
- case DS_RESET_CARD:
- ret = pcmcia_reset_card(s);
- break;
- case DS_GET_STATUS:
- if (buf->status.Function &&
- (buf->status.Function >= s->functions))
- ret = -EINVAL;
- else {
- struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function);
- ret = pccard_get_status(s, p_dev, &buf->status);
- pcmcia_put_dev(p_dev);
- }
- break;
- case DS_VALIDATE_CIS:
- mutex_lock(&s->skt_mutex);
- pcmcia_validate_mem(s);
- mutex_unlock(&s->skt_mutex);
- ret = pccard_validate_cis(s, &buf->cisinfo.Chains);
- break;
- case DS_SUSPEND_CARD:
- pcmcia_parse_uevents(s, PCMCIA_UEVENT_SUSPEND);
- break;
- case DS_RESUME_CARD:
- pcmcia_parse_uevents(s, PCMCIA_UEVENT_RESUME);
- break;
- case DS_EJECT_CARD:
- pcmcia_parse_uevents(s, PCMCIA_UEVENT_EJECT);
- break;
- case DS_INSERT_CARD:
- pcmcia_parse_uevents(s, PCMCIA_UEVENT_INSERT);
- break;
- case DS_ACCESS_CONFIGURATION_REGISTER:
- if ((buf->conf_reg.Action == CS_WRITE) && !capable(CAP_SYS_ADMIN)) {
- err = -EPERM;
- goto free_out;
- }
-
- ret = -EINVAL;
-
- if (!(buf->conf_reg.Function &&
- (buf->conf_reg.Function >= s->functions))) {
- struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function);
- if (p_dev) {
- ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg);
- pcmcia_put_dev(p_dev);
- }
- }
- break;
- case DS_GET_FIRST_REGION:
- case DS_GET_NEXT_REGION:
- case DS_BIND_MTD:
- if (!capable(CAP_SYS_ADMIN)) {
- err = -EPERM;
- goto free_out;
- } else {
- printk_once(KERN_WARNING
- "2.6. kernels use pcmciamtd instead of memory_cs.c and do not require special\n");
- printk_once(KERN_WARNING "MTD handling any more.\n");
- }
- err = -EINVAL;
- goto free_out;
- break;
- case DS_GET_FIRST_WINDOW:
- ret = pcmcia_get_window(s, &buf->win_info.handle, 1,
- &buf->win_info.window);
- break;
- case DS_GET_NEXT_WINDOW:
- ret = pcmcia_get_window(s, &buf->win_info.handle,
- buf->win_info.handle + 1, &buf->win_info.window);
- break;
- case DS_GET_MEM_PAGE:
- ret = pcmcia_get_mem_page(s, buf->win_info.handle,
- &buf->win_info.map);
- break;
- case DS_REPLACE_CIS:
- ret = pcmcia_replace_cis(s, buf->cisdump.Data, buf->cisdump.Length);
- break;
- case DS_BIND_REQUEST:
- if (!capable(CAP_SYS_ADMIN)) {
- err = -EPERM;
- goto free_out;
- }
- err = bind_request(s, &buf->bind_info);
- break;
- case DS_GET_DEVICE_INFO:
- err = get_device_info(s, &buf->bind_info, 1);
- break;
- case DS_GET_NEXT_DEVICE:
- err = get_device_info(s, &buf->bind_info, 0);
- break;
- case DS_UNBIND_REQUEST:
- err = 0;
- break;
- default:
- err = -EINVAL;
- }
-
- if ((err == 0) && (ret != 0)) {
- pr_debug("ds_ioctl: ret = %d\n", ret);
- switch (ret) {
- case -ENODEV:
- case -EINVAL:
- case -EBUSY:
- case -ENOSYS:
- err = ret;
- break;
- case -ENOMEM:
- err = -ENOSPC; break;
- case -ENOSPC:
- err = -ENODATA; break;
- default:
- err = -EIO; break;
- }
- }
-
- if (cmd & IOC_OUT) {
- if (__copy_to_user(uarg, (char *)buf, size))
- err = -EFAULT;
- }
-
-free_out:
- kfree(buf);
- return err;
-} /* ds_ioctl */
-
-static long ds_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- lock_kernel();
- ret = ds_ioctl(file, cmd, arg);
- unlock_kernel();
-
- return ret;
-}
-
-
-/*====================================================================*/
-
-static const struct file_operations ds_fops = {
- .owner = THIS_MODULE,
- .open = ds_open,
- .release = ds_release,
- .unlocked_ioctl = ds_unlocked_ioctl,
- .read = ds_read,
- .write = ds_write,
- .poll = ds_poll,
-};
-
-void __init pcmcia_setup_ioctl(void)
-{
- int i;
-
- /* Set up character device for user mode clients */
- i = register_chrdev(0, "pcmcia", &ds_fops);
- if (i < 0)
- printk(KERN_NOTICE "unable to find a free device # for "
- "Driver Services (error=%d)\n", i);
- else
- major_dev = i;
-
-#ifdef CONFIG_PROC_FS
- proc_pccard = proc_mkdir("bus/pccard", NULL);
- if (proc_pccard)
- proc_create("drivers", 0, proc_pccard, &pccard_drivers_proc_fops);
-#endif
-}
-
-
-void __exit pcmcia_cleanup_ioctl(void)
-{
-#ifdef CONFIG_PROC_FS
- if (proc_pccard) {
- remove_proc_entry("drivers", proc_pccard);
- remove_proc_entry("bus/pccard", NULL);
- }
-#endif
- if (major_dev != -1)
- unregister_chrdev(major_dev, "pcmcia");
-}
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index a4cd9adfcbc..9ba4dade69a 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -25,7 +25,6 @@
#include <asm/irq.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
@@ -57,128 +56,171 @@ struct resource *pcmcia_find_mem_region(u_long base, u_long num, u_long align,
}
+static void release_io_space(struct pcmcia_socket *s, struct resource *res)
+{
+ resource_size_t num = resource_size(res);
+ int i;
+
+ dev_dbg(&s->dev, "release_io_space for %pR\n", res);
+
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ if (!s->io[i].res)
+ continue;
+ if ((s->io[i].res->start <= res->start) &&
+ (s->io[i].res->end >= res->end)) {
+ s->io[i].InUse -= num;
+ if (res->parent)
+ release_resource(res);
+ res->start = res->end = 0;
+ res->flags = IORESOURCE_IO;
+ /* Free the window if no one else is using it */
+ if (s->io[i].InUse == 0) {
+ release_resource(s->io[i].res);
+ kfree(s->io[i].res);
+ s->io[i].res = NULL;
+ }
+ }
+ }
+} /* release_io_space */
+
/** alloc_io_space
*
* Special stuff for managing IO windows, because they are scarce
*/
-
-static int alloc_io_space(struct pcmcia_socket *s, u_int attr,
- unsigned int *base, unsigned int num, u_int lines)
+static int alloc_io_space(struct pcmcia_socket *s, struct resource *res,
+ unsigned int lines)
{
unsigned int align;
+ unsigned int base = res->start;
+ unsigned int num = res->end;
+ int ret;
- align = (*base) ? (lines ? 1<<lines : 0) : 1;
+ res->flags |= IORESOURCE_IO;
+
+ dev_dbg(&s->dev, "alloc_io_space request for %pR, %d lines\n",
+ res, lines);
+
+ align = base ? (lines ? 1<<lines : 0) : 1;
if (align && (align < num)) {
- if (*base) {
- dev_dbg(&s->dev, "odd IO request: num %#x align %#x\n",
- num, align);
+ if (base) {
+ dev_dbg(&s->dev, "odd IO request\n");
align = 0;
} else
while (align && (align < num))
align <<= 1;
}
- if (*base & ~(align-1)) {
- dev_dbg(&s->dev, "odd IO request: base %#x align %#x\n",
- *base, align);
+ if (base & ~(align-1)) {
+ dev_dbg(&s->dev, "odd IO request\n");
align = 0;
}
- return s->resource_ops->find_io(s, attr, base, num, align);
-} /* alloc_io_space */
-
+ ret = s->resource_ops->find_io(s, res->flags, &base, num, align,
+ &res->parent);
+ if (ret) {
+ dev_dbg(&s->dev, "alloc_io_space request failed (%d)\n", ret);
+ return -EINVAL;
+ }
-static void release_io_space(struct pcmcia_socket *s, unsigned int base,
- unsigned int num)
-{
- int i;
+ res->start = base;
+ res->end = res->start + num - 1;
- for (i = 0; i < MAX_IO_WIN; i++) {
- if (!s->io[i].res)
- continue;
- if ((s->io[i].res->start <= base) &&
- (s->io[i].res->end >= base+num-1)) {
- s->io[i].InUse -= num;
- /* Free the window if no one else is using it */
- if (s->io[i].InUse == 0) {
- release_resource(s->io[i].res);
- kfree(s->io[i].res);
- s->io[i].res = NULL;
- }
+ if (res->parent) {
+ ret = request_resource(res->parent, res);
+ if (ret) {
+ dev_warn(&s->dev,
+ "request_resource %pR failed: %d\n", res, ret);
+ res->parent = NULL;
+ release_io_space(s, res);
}
}
-} /* release_io_space */
+ dev_dbg(&s->dev, "alloc_io_space request result %d: %pR\n", ret, res);
+ return ret;
+} /* alloc_io_space */
-/** pccard_access_configuration_register
+/**
+ * pcmcia_access_config() - read or write card configuration registers
*
- * Access_configuration_register() reads and writes configuration
- * registers in attribute memory. Memory window 0 is reserved for
- * this and the tuple reading services.
+ * pcmcia_access_config() reads and writes configuration registers in
+ * attribute memory. Memory window 0 is reserved for this and the tuple
+ * reading services. Drivers must use pcmcia_read_config_byte() or
+ * pcmcia_write_config_byte().
*/
-
-int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
- conf_reg_t *reg)
+static int pcmcia_access_config(struct pcmcia_device *p_dev,
+ off_t where, u8 *val,
+ int (*accessf) (struct pcmcia_socket *s,
+ int attr, unsigned int addr,
+ unsigned int len, void *ptr))
{
struct pcmcia_socket *s;
config_t *c;
int addr;
- u_char val;
int ret = 0;
- if (!p_dev || !p_dev->function_config)
- return -EINVAL;
-
s = p_dev->socket;
mutex_lock(&s->ops_mutex);
c = p_dev->function_config;
if (!(c->state & CONFIG_LOCKED)) {
- dev_dbg(&s->dev, "Configuration isnt't locked\n");
+ dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
mutex_unlock(&s->ops_mutex);
return -EACCES;
}
- addr = (c->ConfigBase + reg->Offset) >> 1;
+ addr = (c->ConfigBase + where) >> 1;
+
+ ret = accessf(s, 1, addr, 1, val);
- switch (reg->Action) {
- case CS_READ:
- ret = pcmcia_read_cis_mem(s, 1, addr, 1, &val);
- reg->Value = val;
- break;
- case CS_WRITE:
- val = reg->Value;
- pcmcia_write_cis_mem(s, 1, addr, 1, &val);
- break;
- default:
- dev_dbg(&s->dev, "Invalid conf register request\n");
- ret = -EINVAL;
- break;
- }
mutex_unlock(&s->ops_mutex);
+
return ret;
-} /* pcmcia_access_configuration_register */
-EXPORT_SYMBOL(pcmcia_access_configuration_register);
+} /* pcmcia_access_config */
+
+
+/**
+ * pcmcia_read_config_byte() - read a byte from a card configuration register
+ *
+ * pcmcia_read_config_byte() reads a byte from a configuration register in
+ * attribute memory.
+ */
+int pcmcia_read_config_byte(struct pcmcia_device *p_dev, off_t where, u8 *val)
+{
+ return pcmcia_access_config(p_dev, where, val, pcmcia_read_cis_mem);
+}
+EXPORT_SYMBOL(pcmcia_read_config_byte);
+
+
+/**
+ * pcmcia_write_config_byte() - write a byte to a card configuration register
+ *
+ * pcmcia_write_config_byte() writes a byte to a configuration register in
+ * attribute memory.
+ */
+int pcmcia_write_config_byte(struct pcmcia_device *p_dev, off_t where, u8 val)
+{
+ return pcmcia_access_config(p_dev, where, &val, pcmcia_write_cis_mem);
+}
+EXPORT_SYMBOL(pcmcia_write_config_byte);
int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh,
- memreq_t *req)
+ unsigned int offset)
{
struct pcmcia_socket *s = p_dev->socket;
+ struct resource *res = wh;
+ unsigned int w;
int ret;
- wh--;
- if (wh >= MAX_WIN)
+ w = ((res->flags & IORESOURCE_BITS & WIN_FLAGS_REQ) >> 2) - 1;
+ if (w >= MAX_WIN)
return -EINVAL;
- if (req->Page != 0) {
- dev_dbg(&s->dev, "failure: requested page is zero\n");
- return -EINVAL;
- }
+
mutex_lock(&s->ops_mutex);
- s->win[wh].card_start = req->CardOffset;
- ret = s->ops->set_mem_map(s, &s->win[wh]);
+ s->win[w].card_start = offset;
+ ret = s->ops->set_mem_map(s, &s->win[w]);
if (ret)
- dev_warn(&s->dev, "failed to set_mem_map\n");
+ dev_warn(&p_dev->dev, "failed to set_mem_map\n");
mutex_unlock(&s->ops_mutex);
return ret;
} /* pcmcia_map_mem_page */
@@ -202,18 +244,18 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
c = p_dev->function_config;
if (!(s->state & SOCKET_PRESENT)) {
- dev_dbg(&s->dev, "No card present\n");
+ dev_dbg(&p_dev->dev, "No card present\n");
ret = -ENODEV;
goto unlock;
}
if (!(c->state & CONFIG_LOCKED)) {
- dev_dbg(&s->dev, "Configuration isnt't locked\n");
+ dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
ret = -EACCES;
goto unlock;
}
if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) {
- dev_dbg(&s->dev,
+ dev_dbg(&p_dev->dev,
"changing Vcc or IRQ is not allowed at this time\n");
ret = -EINVAL;
goto unlock;
@@ -223,20 +265,22 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
(mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
if (mod->Vpp1 != mod->Vpp2) {
- dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n");
+ dev_dbg(&p_dev->dev,
+ "Vpp1 and Vpp2 must be the same\n");
ret = -EINVAL;
goto unlock;
}
s->socket.Vpp = mod->Vpp1;
if (s->ops->set_socket(s, &s->socket)) {
- dev_printk(KERN_WARNING, &s->dev,
+ dev_printk(KERN_WARNING, &p_dev->dev,
"Unable to set VPP\n");
ret = -EIO;
goto unlock;
}
} else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
(mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
- dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
+ dev_dbg(&p_dev->dev,
+ "changing Vcc is not allowed at this time\n");
ret = -EINVAL;
goto unlock;
}
@@ -316,31 +360,25 @@ int pcmcia_release_configuration(struct pcmcia_device *p_dev)
* don't bother checking the port ranges against the current socket
* values.
*/
-static int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req)
+static int pcmcia_release_io(struct pcmcia_device *p_dev)
{
struct pcmcia_socket *s = p_dev->socket;
int ret = -EINVAL;
config_t *c;
mutex_lock(&s->ops_mutex);
- c = p_dev->function_config;
-
if (!p_dev->_io)
goto out;
- p_dev->_io = 0;
+ c = p_dev->function_config;
- if ((c->io.BasePort1 != req->BasePort1) ||
- (c->io.NumPorts1 != req->NumPorts1) ||
- (c->io.BasePort2 != req->BasePort2) ||
- (c->io.NumPorts2 != req->NumPorts2))
- goto out;
+ release_io_space(s, &c->io[0]);
- c->state &= ~CONFIG_IO_REQ;
+ if (c->io[1].end)
+ release_io_space(s, &c->io[1]);
- release_io_space(s, req->BasePort1, req->NumPorts1);
- if (req->NumPorts2)
- release_io_space(s, req->BasePort2, req->NumPorts2);
+ p_dev->_io = 0;
+ c->state &= ~CONFIG_IO_REQ;
out:
mutex_unlock(&s->ops_mutex);
@@ -349,20 +387,23 @@ out:
} /* pcmcia_release_io */
-int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t wh)
+int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res)
{
struct pcmcia_socket *s = p_dev->socket;
pccard_mem_map *win;
+ unsigned int w;
+
+ dev_dbg(&p_dev->dev, "releasing window %pR\n", res);
- wh--;
- if (wh >= MAX_WIN)
+ w = ((res->flags & IORESOURCE_BITS & WIN_FLAGS_REQ) >> 2) - 1;
+ if (w >= MAX_WIN)
return -EINVAL;
mutex_lock(&s->ops_mutex);
- win = &s->win[wh];
+ win = &s->win[w];
- if (!(p_dev->_win & CLIENT_WIN_REQ(wh))) {
- dev_dbg(&s->dev, "not releasing unknown window\n");
+ if (!(p_dev->_win & CLIENT_WIN_REQ(w))) {
+ dev_dbg(&p_dev->dev, "not releasing unknown window\n");
mutex_unlock(&s->ops_mutex);
return -EINVAL;
}
@@ -370,15 +411,16 @@ int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t wh)
/* Shut down memory window */
win->flags &= ~MAP_ACTIVE;
s->ops->set_mem_map(s, win);
- s->state &= ~SOCKET_WIN_REQ(wh);
+ s->state &= ~SOCKET_WIN_REQ(w);
/* Release system memory */
if (win->res) {
+ release_resource(res);
release_resource(win->res);
kfree(win->res);
win->res = NULL;
}
- p_dev->_win &= ~CLIENT_WIN_REQ(wh);
+ p_dev->_win &= ~CLIENT_WIN_REQ(w);
mutex_unlock(&s->ops_mutex);
return 0;
@@ -399,7 +441,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
return -ENODEV;
if (req->IntType & INT_CARDBUS) {
- dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n");
+ dev_dbg(&p_dev->dev, "IntType may not be INT_CARDBUS\n");
return -EINVAL;
}
@@ -407,7 +449,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
c = p_dev->function_config;
if (c->state & CONFIG_LOCKED) {
mutex_unlock(&s->ops_mutex);
- dev_dbg(&s->dev, "Configuration is locked\n");
+ dev_dbg(&p_dev->dev, "Configuration is locked\n");
return -EACCES;
}
@@ -415,7 +457,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
s->socket.Vpp = req->Vpp;
if (s->ops->set_socket(s, &s->socket)) {
mutex_unlock(&s->ops_mutex);
- dev_printk(KERN_WARNING, &s->dev,
+ dev_printk(KERN_WARNING, &p_dev->dev,
"Unable to set socket state\n");
return -EINVAL;
}
@@ -473,13 +515,13 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
pcmcia_write_cis_mem(s, 1, (base + CISREG_ESR)>>1, 1, &c->ExtStatus);
}
if (req->Present & PRESENT_IOBASE_0) {
- u_char b = c->io.BasePort1 & 0xff;
+ u8 b = c->io[0].start & 0xff;
pcmcia_write_cis_mem(s, 1, (base + CISREG_IOBASE_0)>>1, 1, &b);
- b = (c->io.BasePort1 >> 8) & 0xff;
+ b = (c->io[0].start >> 8) & 0xff;
pcmcia_write_cis_mem(s, 1, (base + CISREG_IOBASE_1)>>1, 1, &b);
}
if (req->Present & PRESENT_IOSIZE) {
- u_char b = c->io.NumPorts1 + c->io.NumPorts2 - 1;
+ u8 b = resource_size(&c->io[0]) + resource_size(&c->io[1]) - 1;
pcmcia_write_cis_mem(s, 1, (base + CISREG_IOSIZE)>>1, 1, &b);
}
@@ -513,70 +555,63 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
EXPORT_SYMBOL(pcmcia_request_configuration);
-/** pcmcia_request_io
+/**
+ * pcmcia_request_io() - attempt to reserve port ranges for PCMCIA devices
*
- * Request_io() reserves ranges of port addresses for a socket.
- * I have not implemented range sharing or alias addressing.
+ * pcmcia_request_io() attepts to reserve the IO port ranges specified in
+ * &struct pcmcia_device @p_dev->resource[0] and @p_dev->resource[1]. The
+ * "start" value is the requested start of the IO port resource; "end"
+ * reflects the number of ports requested. The number of IO lines requested
+ * is specified in &struct pcmcia_device @p_dev->io_lines.
*/
-int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req)
+int pcmcia_request_io(struct pcmcia_device *p_dev)
{
struct pcmcia_socket *s = p_dev->socket;
- config_t *c;
+ config_t *c = p_dev->function_config;
int ret = -EINVAL;
mutex_lock(&s->ops_mutex);
+ dev_dbg(&p_dev->dev, "pcmcia_request_io: %pR , %pR",
+ &c->io[0], &c->io[1]);
if (!(s->state & SOCKET_PRESENT)) {
- dev_dbg(&s->dev, "No card present\n");
+ dev_dbg(&p_dev->dev, "pcmcia_request_io: No card present\n");
goto out;
}
- if (!req)
- goto out;
-
- c = p_dev->function_config;
if (c->state & CONFIG_LOCKED) {
- dev_dbg(&s->dev, "Configuration is locked\n");
+ dev_dbg(&p_dev->dev, "Configuration is locked\n");
goto out;
}
if (c->state & CONFIG_IO_REQ) {
- dev_dbg(&s->dev, "IO already configured\n");
- goto out;
- }
- if (req->Attributes1 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS)) {
- dev_dbg(&s->dev, "bad attribute setting for IO region 1\n");
- goto out;
- }
- if ((req->NumPorts2 > 0) &&
- (req->Attributes2 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS))) {
- dev_dbg(&s->dev, "bad attribute setting for IO region 2\n");
+ dev_dbg(&p_dev->dev, "IO already configured\n");
goto out;
}
- dev_dbg(&s->dev, "trying to allocate resource 1\n");
- ret = alloc_io_space(s, req->Attributes1, &req->BasePort1,
- req->NumPorts1, req->IOAddrLines);
- if (ret) {
- dev_dbg(&s->dev, "allocation of resource 1 failed\n");
+ ret = alloc_io_space(s, &c->io[0], p_dev->io_lines);
+ if (ret)
goto out;
- }
- if (req->NumPorts2) {
- dev_dbg(&s->dev, "trying to allocate resource 2\n");
- ret = alloc_io_space(s, req->Attributes2, &req->BasePort2,
- req->NumPorts2, req->IOAddrLines);
+ if (c->io[1].end) {
+ ret = alloc_io_space(s, &c->io[1], p_dev->io_lines);
if (ret) {
- dev_dbg(&s->dev, "allocation of resource 2 failed\n");
- release_io_space(s, req->BasePort1, req->NumPorts1);
+ struct resource tmp = c->io[0];
+ /* release the previously allocated resource */
+ release_io_space(s, &c->io[0]);
+ /* but preserve the settings, for they worked... */
+ c->io[0].end = resource_size(&tmp);
+ c->io[0].start = tmp.start;
+ c->io[0].flags = tmp.flags;
goto out;
}
- }
+ } else
+ c->io[1].start = 0;
- c->io = *req;
c->state |= CONFIG_IO_REQ;
p_dev->_io = 1;
- dev_dbg(&s->dev, "allocating resources succeeded: %d\n", ret);
+ dev_dbg(&p_dev->dev, "pcmcia_request_io succeeded: %pR , %pR",
+ &c->io[0], &c->io[1]);
out:
mutex_unlock(&s->ops_mutex);
@@ -651,7 +686,7 @@ EXPORT_SYMBOL(__pcmcia_request_exclusive_irq);
#ifdef CONFIG_PCMCIA_PROBE
/* mask of IRQs already reserved by other cards, we should avoid using them */
-static u8 pcmcia_used_irq[NR_IRQS];
+static u8 pcmcia_used_irq[32];
static irqreturn_t test_action(int cpl, void *dev_id)
{
@@ -674,6 +709,9 @@ static int pcmcia_setup_isa_irq(struct pcmcia_device *p_dev, int type)
for (try = 0; try < 64; try++) {
irq = try % 32;
+ if (irq > NR_IRQS)
+ continue;
+
/* marked as available by driver, not blocked by userspace? */
if (!((mask >> irq) & 1))
continue;
@@ -767,52 +805,48 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
struct pcmcia_socket *s = p_dev->socket;
pccard_mem_map *win;
u_long align;
+ struct resource *res;
int w;
if (!(s->state & SOCKET_PRESENT)) {
- dev_dbg(&s->dev, "No card present\n");
+ dev_dbg(&p_dev->dev, "No card present\n");
return -ENODEV;
}
- if (req->Attributes & (WIN_PAGED | WIN_SHARED)) {
- dev_dbg(&s->dev, "bad attribute setting for iomem region\n");
- return -EINVAL;
- }
/* Window size defaults to smallest available */
if (req->Size == 0)
req->Size = s->map_size;
- align = (((s->features & SS_CAP_MEM_ALIGN) ||
- (req->Attributes & WIN_STRICT_ALIGN)) ?
- req->Size : s->map_size);
+ align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size;
if (req->Size & (s->map_size-1)) {
- dev_dbg(&s->dev, "invalid map size\n");
+ dev_dbg(&p_dev->dev, "invalid map size\n");
return -EINVAL;
}
if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) ||
(req->Base & (align-1))) {
- dev_dbg(&s->dev, "invalid base address\n");
+ dev_dbg(&p_dev->dev, "invalid base address\n");
return -EINVAL;
}
if (req->Base)
align = 0;
/* Allocate system memory window */
+ mutex_lock(&s->ops_mutex);
for (w = 0; w < MAX_WIN; w++)
if (!(s->state & SOCKET_WIN_REQ(w)))
break;
if (w == MAX_WIN) {
- dev_dbg(&s->dev, "all windows are used already\n");
+ dev_dbg(&p_dev->dev, "all windows are used already\n");
+ mutex_unlock(&s->ops_mutex);
return -EINVAL;
}
- mutex_lock(&s->ops_mutex);
win = &s->win[w];
if (!(s->features & SS_CAP_STATIC_MAP)) {
win->res = pcmcia_find_mem_region(req->Base, req->Size, align,
- (req->Attributes & WIN_MAP_BELOW_1MB), s);
+ 0, s);
if (!win->res) {
- dev_dbg(&s->dev, "allocating mem region failed\n");
+ dev_dbg(&p_dev->dev, "allocating mem region failed\n");
mutex_unlock(&s->ops_mutex);
return -EINVAL;
}
@@ -821,20 +855,12 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
/* Configure the socket controller */
win->map = w+1;
- win->flags = 0;
+ win->flags = req->Attributes;
win->speed = req->AccessSpeed;
- if (req->Attributes & WIN_MEMORY_TYPE)
- win->flags |= MAP_ATTRIB;
- if (req->Attributes & WIN_ENABLE)
- win->flags |= MAP_ACTIVE;
- if (req->Attributes & WIN_DATA_WIDTH_16)
- win->flags |= MAP_16BIT;
- if (req->Attributes & WIN_USE_WAIT)
- win->flags |= MAP_USE_WAIT;
win->card_start = 0;
if (s->ops->set_mem_map(s, win) != 0) {
- dev_dbg(&s->dev, "failed to set memory mapping\n");
+ dev_dbg(&p_dev->dev, "failed to set memory mapping\n");
mutex_unlock(&s->ops_mutex);
return -EIO;
}
@@ -846,8 +872,21 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
else
req->Base = win->res->start;
+ /* convert to new-style resources */
+ res = p_dev->resource[w + MAX_IO_WIN];
+ res->start = req->Base;
+ res->end = req->Base + req->Size - 1;
+ res->flags &= ~IORESOURCE_BITS;
+ res->flags |= (req->Attributes & WIN_FLAGS_MAP) | (win->map << 2);
+ res->flags |= IORESOURCE_MEM;
+ res->parent = win->res;
+ if (win->res)
+ request_resource(&iomem_resource, res);
+
+ dev_dbg(&p_dev->dev, "request_window results in %pR\n", res);
+
mutex_unlock(&s->ops_mutex);
- *wh = w + 1;
+ *wh = res;
return 0;
} /* pcmcia_request_window */
@@ -855,13 +894,18 @@ EXPORT_SYMBOL(pcmcia_request_window);
void pcmcia_disable_device(struct pcmcia_device *p_dev)
{
+ int i;
+ for (i = 0; i < MAX_WIN; i++) {
+ struct resource *res = p_dev->resource[MAX_IO_WIN + i];
+ if (res->flags & WIN_FLAGS_REQ)
+ pcmcia_release_window(p_dev, res);
+ }
+
pcmcia_release_configuration(p_dev);
- pcmcia_release_io(p_dev, &p_dev->io);
+ pcmcia_release_io(p_dev);
if (p_dev->_irq) {
free_irq(p_dev->irq, p_dev->priv);
p_dev->_irq = 0;
}
- if (p_dev->win)
- pcmcia_release_window(p_dev, p_dev->win);
}
EXPORT_SYMBOL(pcmcia_disable_device);
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index b61a13663a0..deef6656ab7 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -17,7 +17,6 @@
#include <linux/device.h>
#include <linux/io.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
@@ -647,7 +646,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
if (!pci_resource_start(dev, 0)) {
dev_warn(&dev->dev, "refusing to load the driver as the "
"io_base is NULL.\n");
- goto err_out_free_mem;
+ goto err_out_disable;
}
dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/drivers/pcmcia/pxa2xx_balloon3.c
new file mode 100644
index 00000000000..dbbdd006320
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_balloon3.c
@@ -0,0 +1,158 @@
+/*
+ * linux/drivers/pcmcia/pxa2xx_balloon3.c
+ *
+ * Balloon3 PCMCIA specific routines.
+ *
+ * Author: Nick Bane
+ * Created: June, 2006
+ * Copyright: Toby Churchill Ltd
+ * Derived from pxa2xx_mainstone.c, by Nico Pitre
+ *
+ * Various modification by Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <mach/balloon3.h>
+
+#include "soc_common.h"
+
+/*
+ * These are a list of interrupt sources that provokes a polled
+ * check of status
+ */
+static struct pcmcia_irqs irqs[] = {
+ { 0, BALLOON3_S0_CD_IRQ, "PCMCIA0 CD" },
+ { 0, BALLOON3_BP_NSTSCHG_IRQ, "PCMCIA0 STSCHG" },
+};
+
+static int balloon3_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
+{
+ uint16_t ver;
+ int ret;
+ static void __iomem *fpga_ver;
+
+ ver = __raw_readw(BALLOON3_FPGA_VER);
+ if (ver > 0x0201)
+ pr_warn("The FPGA code, version 0x%04x, is newer than rel-0.3. "
+ "PCMCIA/CF support might be broken in this version!",
+ ver);
+
+ skt->socket.pci_irq = BALLOON3_BP_CF_NRDY_IRQ;
+ return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
+}
+
+static void balloon3_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
+{
+ soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
+}
+
+static unsigned long balloon3_pcmcia_status[2] = {
+ BALLOON3_CF_nSTSCHG_BVD1,
+ BALLOON3_CF_nSTSCHG_BVD1
+};
+
+static void balloon3_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
+ struct pcmcia_state *state)
+{
+ uint16_t status;
+ int flip;
+
+ /* This actually reads the STATUS register */
+ status = __raw_readw(BALLOON3_CF_STATUS_REG);
+ flip = (status ^ balloon3_pcmcia_status[skt->nr])
+ & BALLOON3_CF_nSTSCHG_BVD1;
+ /*
+ * Workaround for STSCHG which can't be deasserted:
+ * We therefore disable/enable corresponding IRQs
+ * as needed to avoid IRQ locks.
+ */
+ if (flip) {
+ balloon3_pcmcia_status[skt->nr] = status;
+ if (status & BALLOON3_CF_nSTSCHG_BVD1)
+ enable_irq(BALLOON3_BP_NSTSCHG_IRQ);
+ else
+ disable_irq(BALLOON3_BP_NSTSCHG_IRQ);
+ }
+
+ state->detect = !gpio_get_value(BALLOON3_GPIO_S0_CD);
+ state->ready = !!(status & BALLOON3_CF_nIRQ);
+ state->bvd1 = !!(status & BALLOON3_CF_nSTSCHG_BVD1);
+ state->bvd2 = 0; /* not available */
+ state->vs_3v = 1; /* Always true its a CF card */
+ state->vs_Xv = 0; /* not available */
+ state->wrprot = 0; /* not available */
+}
+
+static int balloon3_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
+ const socket_state_t *state)
+{
+ __raw_writew((state->flags & SS_RESET) ? BALLOON3_CF_RESET : 0,
+ BALLOON3_CF_CONTROL_REG);
+ return 0;
+}
+
+static void balloon3_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
+{
+}
+
+static void balloon3_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
+{
+}
+
+static struct pcmcia_low_level balloon3_pcmcia_ops = {
+ .owner = THIS_MODULE,
+ .hw_init = balloon3_pcmcia_hw_init,
+ .hw_shutdown = balloon3_pcmcia_hw_shutdown,
+ .socket_state = balloon3_pcmcia_socket_state,
+ .configure_socket = balloon3_pcmcia_configure_socket,
+ .socket_init = balloon3_pcmcia_socket_init,
+ .socket_suspend = balloon3_pcmcia_socket_suspend,
+ .first = 0,
+ .nr = 1,
+};
+
+static struct platform_device *balloon3_pcmcia_device;
+
+static int __init balloon3_pcmcia_init(void)
+{
+ int ret;
+
+ balloon3_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
+ if (!balloon3_pcmcia_device)
+ return -ENOMEM;
+
+ ret = platform_device_add_data(balloon3_pcmcia_device,
+ &balloon3_pcmcia_ops, sizeof(balloon3_pcmcia_ops));
+
+ if (!ret)
+ ret = platform_device_add(balloon3_pcmcia_device);
+
+ if (ret)
+ platform_device_put(balloon3_pcmcia_device);
+
+ return ret;
+}
+
+static void __exit balloon3_pcmcia_exit(void)
+{
+ platform_device_unregister(balloon3_pcmcia_device);
+}
+
+module_init(balloon3_pcmcia_init);
+module_exit(balloon3_pcmcia_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nick Bane <nick@cecomputing.co.uk>");
+MODULE_ALIAS("platform:pxa2xx-pcmcia");
+MODULE_DESCRIPTION("Balloon3 board CF/PCMCIA driver");
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index f370476d541..ae07b4db8a6 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -32,7 +32,6 @@
#include <mach/pxa2xx-regs.h>
#include <asm/mach-types.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
diff --git a/drivers/pcmcia/rsrc_iodyn.c b/drivers/pcmcia/rsrc_iodyn.c
index d0bf3502106..8510c35d295 100644
--- a/drivers/pcmcia/rsrc_iodyn.c
+++ b/drivers/pcmcia/rsrc_iodyn.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
@@ -88,7 +87,7 @@ static struct resource *__iodyn_find_io_region(struct pcmcia_socket *s,
static int iodyn_find_io(struct pcmcia_socket *s, unsigned int attr,
unsigned int *base, unsigned int num,
- unsigned int align)
+ unsigned int align, struct resource **parent)
{
int i, ret = 0;
@@ -129,6 +128,7 @@ static int iodyn_find_io(struct pcmcia_socket *s, unsigned int attr,
((res->flags & ~IORESOURCE_BITS) |
(attr & IORESOURCE_BITS));
s->io[i].InUse = num;
+ *parent = res;
return 0;
}
@@ -140,6 +140,7 @@ static int iodyn_find_io(struct pcmcia_socket *s, unsigned int attr,
continue;
*base = try;
s->io[i].InUse += num;
+ *parent = res;
return 0;
}
@@ -152,6 +153,7 @@ static int iodyn_find_io(struct pcmcia_socket *s, unsigned int attr,
continue;
*base = try;
s->io[i].InUse += num;
+ *parent = res;
return 0;
}
}
@@ -164,8 +166,6 @@ struct pccard_resource_ops pccard_iodyn_ops = {
.validate_mem = NULL,
.find_io = iodyn_find_io,
.find_mem = NULL,
- .add_io = NULL,
- .add_mem = NULL,
.init = static_init,
.exit = NULL,
};
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index 142efac3c38..4e80421fd90 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
@@ -48,11 +47,12 @@ struct resource *pcmcia_make_resource(unsigned long start, unsigned long end,
static int static_find_io(struct pcmcia_socket *s, unsigned int attr,
unsigned int *base, unsigned int num,
- unsigned int align)
+ unsigned int align, struct resource **parent)
{
if (!s->io_offset)
return -EINVAL;
*base = s->io_offset | (*base & 0x0fff);
+ *parent = NULL;
return 0;
}
@@ -62,8 +62,6 @@ struct pccard_resource_ops pccard_static_ops = {
.validate_mem = NULL,
.find_io = static_find_io,
.find_mem = NULL,
- .add_io = NULL,
- .add_mem = NULL,
.init = static_init,
.exit = NULL,
};
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index dcd1a4ad3d6..96f348b35fd 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -28,7 +28,6 @@
#include <asm/irq.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
@@ -64,6 +63,9 @@ struct socket_data {
#define MEM_PROBE_LOW (1 << 0)
#define MEM_PROBE_HIGH (1 << 1)
+/* Action field */
+#define REMOVE_MANAGED_RESOURCE 1
+#define ADD_MANAGED_RESOURCE 2
/*======================================================================
@@ -716,7 +718,7 @@ static struct resource *__nonstatic_find_io_region(struct pcmcia_socket *s,
static int nonstatic_find_io(struct pcmcia_socket *s, unsigned int attr,
unsigned int *base, unsigned int num,
- unsigned int align)
+ unsigned int align, struct resource **parent)
{
int i, ret = 0;
@@ -758,6 +760,7 @@ static int nonstatic_find_io(struct pcmcia_socket *s, unsigned int attr,
((res->flags & ~IORESOURCE_BITS) |
(attr & IORESOURCE_BITS));
s->io[i].InUse = num;
+ *parent = res;
return 0;
}
@@ -773,6 +776,7 @@ static int nonstatic_find_io(struct pcmcia_socket *s, unsigned int attr,
continue;
*base = try;
s->io[i].InUse += num;
+ *parent = res;
return 0;
}
}
@@ -791,6 +795,7 @@ static int nonstatic_find_io(struct pcmcia_socket *s, unsigned int attr,
continue;
*base = try;
s->io[i].InUse += num;
+ *parent = res;
return 0;
}
}
@@ -1055,8 +1060,6 @@ struct pccard_resource_ops pccard_nonstatic_ops = {
.validate_mem = pcmcia_nonstatic_validate_mem,
.find_io = nonstatic_find_io,
.find_mem = nonstatic_find_mem_region,
- .add_io = adjust_io,
- .add_mem = adjust_memory,
.init = nonstatic_init,
.exit = nonstatic_release_resource_db,
};
@@ -1115,8 +1118,6 @@ static ssize_t store_io_db(struct device *dev,
mutex_lock(&s->ops_mutex);
ret = adjust_io(s, add, start_addr, end_addr);
- if (!ret)
- s->resource_setup_new = 1;
mutex_unlock(&s->ops_mutex);
return ret ? ret : count;
@@ -1183,8 +1184,6 @@ static ssize_t store_mem_db(struct device *dev,
mutex_lock(&s->ops_mutex);
ret = adjust_memory(s, add, start_addr, end_addr);
- if (!ret)
- s->resource_setup_new = 1;
mutex_unlock(&s->ops_mutex);
return ret ? ret : count;
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index edbd8c47262..e0985148029 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -35,7 +35,6 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/ss.h>
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index e40824ce6b0..3fba3a67912 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -11,7 +11,6 @@
/* include the world */
#include <linux/cpufreq.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index 80e36bc407d..cb0d3ace18b 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -26,7 +26,6 @@
#include <asm/system.h>
#include <asm/irq.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 56004a1b5bb..be0d841c7eb 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -49,7 +49,6 @@
#include <asm/io.h>
#include <asm/system.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/ss.h>
#include "tcic.h"
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c
index 201ccfa1e97..fa88c360c37 100644
--- a/drivers/pcmcia/xxs1500_ss.c
+++ b/drivers/pcmcia/xxs1500_ss.c
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index f1d41374eea..414d9a6f9a3 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -19,7 +19,6 @@
#include <linux/io.h>
#include <linux/slab.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 79baa6368f7..cff7cc2c1f0 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -219,6 +219,13 @@ config SONYPI_COMPAT
---help---
Build the sonypi driver compatibility code into the sony-laptop driver.
+config IDEAPAD_ACPI
+ tristate "Lenovo IdeaPad ACPI Laptop Extras"
+ depends on ACPI
+ depends on RFKILL
+ help
+ This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
+
config THINKPAD_ACPI
tristate "ThinkPad ACPI Laptop Extras"
depends on ACPI
@@ -479,10 +486,12 @@ config TOPSTAR_LAPTOP
config ACPI_TOSHIBA
tristate "Toshiba Laptop Extras"
depends on ACPI
+ depends on LEDS_CLASS
+ depends on NEW_LEDS
+ depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
depends on RFKILL || RFKILL = n
select INPUT_POLLDEV
- select BACKLIGHT_CLASS_DEVICE
---help---
This driver adds support for access to certain system settings
on "legacy free" Toshiba laptops. These laptops can be recognized by
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 4744c7744ff..85fb2b84f57 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_ACERHDF) += acerhdf.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
+obj-$(CONFIG_IDEAPAD_ACPI) += ideapad_acpi.o
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index e058c2ba2a1..ca05aefd03b 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -938,10 +938,11 @@ static int set_brightness(int value)
/* SPLV laptop */
if (hotk->methods->brightness_set) {
if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set,
- value, NULL))
+ value, NULL)) {
printk(KERN_WARNING
"Asus ACPI: Error changing brightness\n");
ret = -EIO;
+ }
goto out;
}
@@ -953,10 +954,11 @@ static int set_brightness(int value)
hotk->methods->brightness_down,
NULL, NULL);
(value > 0) ? value-- : value++;
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
printk(KERN_WARNING
"Asus ACPI: Error changing brightness\n");
ret = -EIO;
+ }
}
out:
return ret;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index d071ce05632..097083cac41 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -841,6 +841,14 @@ static struct dmi_system_id __initdata compal_dmi_table[] = {
.callback = dmi_check_cb
},
{
+ .ident = "Dell Mini 1012",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
.ident = "Dell Inspiron 11z",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1092,5 +1100,6 @@ MODULE_ALIAS("dmi:*:rnJHL90:rvrREFERENCE:*");
MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*");
MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*");
MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*");
+MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1012:*");
MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*");
MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*");
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index b41ed5cab3e..4413975912e 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -122,6 +122,13 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = {
},
},
{
+ .ident = "Dell Mini 1012",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+ },
+ },
+ {
.ident = "Dell Inspiron 11z",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index f1551637498..c1741142a4c 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -79,12 +79,13 @@ struct bios_args {
u32 command;
u32 commandtype;
u32 datasize;
- char *data;
+ u32 data;
};
struct bios_return {
u32 sigpass;
u32 return_code;
+ u32 value;
};
struct key_entry {
@@ -148,7 +149,7 @@ static struct platform_driver hp_wmi_driver = {
* buffer = kzalloc(128, GFP_KERNEL);
* ret = hp_wmi_perform_query(0x7, 0, buffer, 128)
*/
-static int hp_wmi_perform_query(int query, int write, char *buffer,
+static int hp_wmi_perform_query(int query, int write, u32 *buffer,
int buffersize)
{
struct bios_return bios_return;
@@ -159,7 +160,7 @@ static int hp_wmi_perform_query(int query, int write, char *buffer,
.command = write ? 0x2 : 0x1,
.commandtype = query,
.datasize = buffersize,
- .data = buffer,
+ .data = *buffer,
};
struct acpi_buffer input = { sizeof(struct bios_args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -177,29 +178,14 @@ static int hp_wmi_perform_query(int query, int write, char *buffer,
bios_return = *((struct bios_return *)obj->buffer.pointer);
- if (bios_return.return_code) {
- printk(KERN_WARNING PREFIX "Query %d returned %d\n", query,
- bios_return.return_code);
- kfree(obj);
- return bios_return.return_code;
- }
- if (obj->buffer.length - sizeof(bios_return) > buffersize) {
- kfree(obj);
- return -EINVAL;
- }
-
- memset(buffer, 0, buffersize);
- memcpy(buffer,
- ((char *)obj->buffer.pointer) + sizeof(struct bios_return),
- obj->buffer.length - sizeof(bios_return));
- kfree(obj);
+ memcpy(buffer, &bios_return.value, sizeof(bios_return.value));
return 0;
}
static int hp_wmi_display_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
sizeof(state));
if (ret)
return -EINVAL;
@@ -208,8 +194,8 @@ static int hp_wmi_display_state(void)
static int hp_wmi_hddtemp_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
sizeof(state));
if (ret)
return -EINVAL;
@@ -218,8 +204,8 @@ static int hp_wmi_hddtemp_state(void)
static int hp_wmi_als_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
sizeof(state));
if (ret)
return -EINVAL;
@@ -228,8 +214,8 @@ static int hp_wmi_als_state(void)
static int hp_wmi_dock_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state));
if (ret)
@@ -240,8 +226,8 @@ static int hp_wmi_dock_state(void)
static int hp_wmi_tablet_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state));
if (ret)
return ret;
@@ -256,7 +242,7 @@ static int hp_wmi_set_block(void *data, bool blocked)
int ret;
ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
- (char *)&query, sizeof(query));
+ &query, sizeof(query));
if (ret)
return -EINVAL;
return 0;
@@ -268,10 +254,10 @@ static const struct rfkill_ops hp_wmi_rfkill_ops = {
static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
{
- int wireless;
+ int wireless = 0;
int mask;
hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
- (char *)&wireless, sizeof(wireless));
+ &wireless, sizeof(wireless));
/* TBD: Pass error */
mask = 0x200 << (r * 8);
@@ -284,10 +270,10 @@ static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
{
- int wireless;
+ int wireless = 0;
int mask;
hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
- (char *)&wireless, sizeof(wireless));
+ &wireless, sizeof(wireless));
/* TBD: Pass error */
mask = 0x800 << (r * 8);
@@ -347,7 +333,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 tmp = simple_strtoul(buf, NULL, 10);
- int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, (char *)&tmp,
+ int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
sizeof(tmp));
if (ret)
return -EINVAL;
@@ -421,7 +407,7 @@ static void hp_wmi_notify(u32 value, void *context)
static struct key_entry *key;
union acpi_object *obj;
u32 event_id, event_data;
- int key_code, ret;
+ int key_code = 0, ret;
u32 *location;
acpi_status status;
@@ -475,7 +461,7 @@ static void hp_wmi_notify(u32 value, void *context)
break;
case HPWMI_BEZEL_BUTTON:
ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
- (char *)&key_code,
+ &key_code,
sizeof(key_code));
if (ret)
break;
@@ -578,9 +564,9 @@ static void cleanup_sysfs(struct platform_device *device)
static int __devinit hp_wmi_bios_setup(struct platform_device *device)
{
int err;
- int wireless;
+ int wireless = 0;
- err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, (char *)&wireless,
+ err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless,
sizeof(wireless));
if (err)
return err;
diff --git a/drivers/platform/x86/ideapad_acpi.c b/drivers/platform/x86/ideapad_acpi.c
new file mode 100644
index 00000000000..798496353e8
--- /dev/null
+++ b/drivers/platform/x86/ideapad_acpi.c
@@ -0,0 +1,306 @@
+/*
+ * ideapad_acpi.c - Lenovo IdeaPad ACPI Extras
+ *
+ * Copyright © 2010 Intel Corporation
+ * Copyright © 2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/rfkill.h>
+
+#define IDEAPAD_DEV_CAMERA 0
+#define IDEAPAD_DEV_WLAN 1
+#define IDEAPAD_DEV_BLUETOOTH 2
+#define IDEAPAD_DEV_3G 3
+#define IDEAPAD_DEV_KILLSW 4
+
+struct ideapad_private {
+ struct rfkill *rfk[5];
+};
+
+static struct {
+ char *name;
+ int type;
+} ideapad_rfk_data[] = {
+ /* camera has no rfkill */
+ { "ideapad_wlan", RFKILL_TYPE_WLAN },
+ { "ideapad_bluetooth", RFKILL_TYPE_BLUETOOTH },
+ { "ideapad_3g", RFKILL_TYPE_WWAN },
+ { "ideapad_killsw", RFKILL_TYPE_WLAN }
+};
+
+static int ideapad_dev_exists(int device)
+{
+ acpi_status status;
+ union acpi_object in_param;
+ struct acpi_object_list input = { 1, &in_param };
+ struct acpi_buffer output;
+ union acpi_object out_obj;
+
+ output.length = sizeof(out_obj);
+ output.pointer = &out_obj;
+
+ in_param.type = ACPI_TYPE_INTEGER;
+ in_param.integer.value = device + 1;
+
+ status = acpi_evaluate_object(NULL, "\\_SB_.DECN", &input, &output);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_WARNING "IdeaPAD \\_SB_.DECN method failed %d. Is this an IdeaPAD?\n", status);
+ return -ENODEV;
+ }
+ if (out_obj.type != ACPI_TYPE_INTEGER) {
+ printk(KERN_WARNING "IdeaPAD \\_SB_.DECN method returned unexpected type\n");
+ return -ENODEV;
+ }
+ return out_obj.integer.value;
+}
+
+static int ideapad_dev_get_state(int device)
+{
+ acpi_status status;
+ union acpi_object in_param;
+ struct acpi_object_list input = { 1, &in_param };
+ struct acpi_buffer output;
+ union acpi_object out_obj;
+
+ output.length = sizeof(out_obj);
+ output.pointer = &out_obj;
+
+ in_param.type = ACPI_TYPE_INTEGER;
+ in_param.integer.value = device + 1;
+
+ status = acpi_evaluate_object(NULL, "\\_SB_.GECN", &input, &output);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_WARNING "IdeaPAD \\_SB_.GECN method failed %d\n", status);
+ return -ENODEV;
+ }
+ if (out_obj.type != ACPI_TYPE_INTEGER) {
+ printk(KERN_WARNING "IdeaPAD \\_SB_.GECN method returned unexpected type\n");
+ return -ENODEV;
+ }
+ return out_obj.integer.value;
+}
+
+static int ideapad_dev_set_state(int device, int state)
+{
+ acpi_status status;
+ union acpi_object in_params[2];
+ struct acpi_object_list input = { 2, in_params };
+
+ in_params[0].type = ACPI_TYPE_INTEGER;
+ in_params[0].integer.value = device + 1;
+ in_params[1].type = ACPI_TYPE_INTEGER;
+ in_params[1].integer.value = state;
+
+ status = acpi_evaluate_object(NULL, "\\_SB_.SECN", &input, NULL);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_WARNING "IdeaPAD \\_SB_.SECN method failed %d\n", status);
+ return -ENODEV;
+ }
+ return 0;
+}
+static ssize_t show_ideapad_cam(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int state = ideapad_dev_get_state(IDEAPAD_DEV_CAMERA);
+ if (state < 0)
+ return state;
+
+ return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t store_ideapad_cam(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret, state;
+
+ if (!count)
+ return 0;
+ if (sscanf(buf, "%i", &state) != 1)
+ return -EINVAL;
+ ret = ideapad_dev_set_state(IDEAPAD_DEV_CAMERA, !!state);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam);
+
+static int ideapad_rfk_set(void *data, bool blocked)
+{
+ int device = (unsigned long)data;
+
+ if (device == IDEAPAD_DEV_KILLSW)
+ return -EINVAL;
+ return ideapad_dev_set_state(device, !blocked);
+}
+
+static struct rfkill_ops ideapad_rfk_ops = {
+ .set_block = ideapad_rfk_set,
+};
+
+static void ideapad_sync_rfk_state(struct acpi_device *adevice)
+{
+ struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
+ int hw_blocked = !ideapad_dev_get_state(IDEAPAD_DEV_KILLSW);
+ int i;
+
+ rfkill_set_hw_state(priv->rfk[IDEAPAD_DEV_KILLSW], hw_blocked);
+ for (i = IDEAPAD_DEV_WLAN; i < IDEAPAD_DEV_KILLSW; i++)
+ if (priv->rfk[i])
+ rfkill_set_hw_state(priv->rfk[i], hw_blocked);
+ if (hw_blocked)
+ return;
+
+ for (i = IDEAPAD_DEV_WLAN; i < IDEAPAD_DEV_KILLSW; i++)
+ if (priv->rfk[i])
+ rfkill_set_sw_state(priv->rfk[i], !ideapad_dev_get_state(i));
+}
+
+static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
+{
+ struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
+ int ret;
+
+ priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev-1].name, &adevice->dev,
+ ideapad_rfk_data[dev-1].type, &ideapad_rfk_ops,
+ (void *)(long)dev);
+ if (!priv->rfk[dev])
+ return -ENOMEM;
+
+ ret = rfkill_register(priv->rfk[dev]);
+ if (ret) {
+ rfkill_destroy(priv->rfk[dev]);
+ return ret;
+ }
+ return 0;
+}
+
+static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
+{
+ struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
+
+ if (!priv->rfk[dev])
+ return;
+
+ rfkill_unregister(priv->rfk[dev]);
+ rfkill_destroy(priv->rfk[dev]);
+}
+
+static const struct acpi_device_id ideapad_device_ids[] = {
+ { "VPC2004", 0},
+ { "", 0},
+};
+MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
+
+static int ideapad_acpi_add(struct acpi_device *adevice)
+{
+ int i;
+ int devs_present[5];
+ struct ideapad_private *priv;
+
+ for (i = IDEAPAD_DEV_CAMERA; i < IDEAPAD_DEV_KILLSW; i++) {
+ devs_present[i] = ideapad_dev_exists(i);
+ if (devs_present[i] < 0)
+ return devs_present[i];
+ }
+
+ /* The hardware switch is always present */
+ devs_present[IDEAPAD_DEV_KILLSW] = 1;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (devs_present[IDEAPAD_DEV_CAMERA]) {
+ int ret = device_create_file(&adevice->dev, &dev_attr_camera_power);
+ if (ret) {
+ kfree(priv);
+ return ret;
+ }
+ }
+
+ dev_set_drvdata(&adevice->dev, priv);
+ for (i = IDEAPAD_DEV_WLAN; i <= IDEAPAD_DEV_KILLSW; i++) {
+ if (!devs_present[i])
+ continue;
+
+ ideapad_register_rfkill(adevice, i);
+ }
+ ideapad_sync_rfk_state(adevice);
+ return 0;
+}
+
+static int ideapad_acpi_remove(struct acpi_device *adevice, int type)
+{
+ struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
+ int i;
+
+ device_remove_file(&adevice->dev, &dev_attr_camera_power);
+
+ for (i = IDEAPAD_DEV_WLAN; i <= IDEAPAD_DEV_KILLSW; i++)
+ ideapad_unregister_rfkill(adevice, i);
+
+ dev_set_drvdata(&adevice->dev, NULL);
+ kfree(priv);
+ return 0;
+}
+
+static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
+{
+ ideapad_sync_rfk_state(adevice);
+}
+
+static struct acpi_driver ideapad_acpi_driver = {
+ .name = "ideapad_acpi",
+ .class = "IdeaPad",
+ .ids = ideapad_device_ids,
+ .ops.add = ideapad_acpi_add,
+ .ops.remove = ideapad_acpi_remove,
+ .ops.notify = ideapad_acpi_notify,
+ .owner = THIS_MODULE,
+};
+
+
+static int __init ideapad_acpi_module_init(void)
+{
+ acpi_bus_register_driver(&ideapad_acpi_driver);
+
+ return 0;
+}
+
+
+static void __exit ideapad_acpi_module_exit(void)
+{
+ acpi_bus_unregister_driver(&ideapad_acpi_driver);
+
+}
+
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("IdeaPad ACPI Extras");
+MODULE_LICENSE("GPL");
+
+module_init(ideapad_acpi_module_init);
+module_exit(ideapad_acpi_module_exit);
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index afe82e50dfe..9024480a822 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1342,8 +1342,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
limits = &ips_lv_limits;
else if (strstr(boot_cpu_data.x86_model_id, "CPU U"))
limits = &ips_ulv_limits;
- else
+ else {
dev_info(&ips->dev->dev, "No CPUID match found.\n");
+ goto out;
+ }
rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
tdp = turbo_power & TURBO_TDP_MASK;
@@ -1432,6 +1434,12 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
spin_lock_init(&ips->turbo_status_lock);
+ ret = pci_enable_device(dev);
+ if (ret) {
+ dev_err(&dev->dev, "can't enable PCI device, aborting\n");
+ goto error_free;
+ }
+
if (!pci_resource_start(dev, 0)) {
dev_err(&dev->dev, "TBAR not assigned, aborting\n");
ret = -ENXIO;
@@ -1444,11 +1452,6 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto error_free;
}
- ret = pci_enable_device(dev);
- if (ret) {
- dev_err(&dev->dev, "can't enable PCI device, aborting\n");
- goto error_free;
- }
ips->regmap = ioremap(pci_resource_start(dev, 0),
pci_resource_len(dev, 0));
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c
index 73f8e6d7266..2b11a33325e 100644
--- a/drivers/platform/x86/intel_rar_register.c
+++ b/drivers/platform/x86/intel_rar_register.c
@@ -145,7 +145,7 @@ static void free_rar_device(struct rar_device *rar)
*/
static struct rar_device *_rar_to_device(int rar, int *off)
{
- if (rar >= 0 && rar <= 3) {
+ if (rar >= 0 && rar < MRST_NUM_RAR) {
*off = rar;
return &my_rar_device;
}
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 943f9084dcb..6abe18e638e 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -487,7 +487,7 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
mdelay(1);
*data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
} else if (cmd == IPC_I2C_WRITE) {
- writel(addr, ipcdev.i2c_base + I2C_DATA_ADDR);
+ writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR);
mdelay(1);
writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
} else {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 5d6119bed00..2d61186ad5a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1911,6 +1911,17 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
TP_ACPI_HOTKEYSCAN_MUTE,
TP_ACPI_HOTKEYSCAN_THINKPAD,
+ TP_ACPI_HOTKEYSCAN_UNK1,
+ TP_ACPI_HOTKEYSCAN_UNK2,
+ TP_ACPI_HOTKEYSCAN_UNK3,
+ TP_ACPI_HOTKEYSCAN_UNK4,
+ TP_ACPI_HOTKEYSCAN_UNK5,
+ TP_ACPI_HOTKEYSCAN_UNK6,
+ TP_ACPI_HOTKEYSCAN_UNK7,
+ TP_ACPI_HOTKEYSCAN_UNK8,
+
+ /* Hotkey keymap size */
+ TPACPI_HOTKEY_MAP_LEN
};
enum { /* Keys/events available through NVRAM polling */
@@ -3082,6 +3093,9 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
};
+typedef u16 tpacpi_keymap_entry_t;
+typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
+
static int __init hotkey_init(struct ibm_init_struct *iibm)
{
/* Requirements for changing the default keymaps:
@@ -3113,9 +3127,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
* If the above is too much to ask, don't change the keymap.
* Ask the thinkpad-acpi maintainer to do it, instead.
*/
- static u16 ibm_keycode_map[] __initdata = {
+
+ enum keymap_index {
+ TPACPI_KEYMAP_IBM_GENERIC = 0,
+ TPACPI_KEYMAP_LENOVO_GENERIC,
+ };
+
+ static const tpacpi_keymap_t tpacpi_keymaps[] __initconst = {
+ /* Generic keymap for IBM ThinkPads */
+ [TPACPI_KEYMAP_IBM_GENERIC] = {
/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
- KEY_FN_F1, KEY_FN_F2, KEY_COFFEE, KEY_SLEEP,
+ KEY_FN_F1, KEY_BATTERY, KEY_COFFEE, KEY_SLEEP,
KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
@@ -3146,11 +3168,13 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* (assignments unknown, please report if found) */
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- };
- static u16 lenovo_keycode_map[] __initdata = {
+ },
+
+ /* Generic keymap for Lenovo ThinkPads */
+ [TPACPI_KEYMAP_LENOVO_GENERIC] = {
/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP,
- KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
+ KEY_WLAN, KEY_CAMERA, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
/* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
@@ -3189,11 +3213,25 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* (assignments unknown, please report if found) */
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ },
+ };
+
+ static const struct tpacpi_quirk tpacpi_keymap_qtable[] __initconst = {
+ /* Generic maps (fallback) */
+ {
+ .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = TPACPI_KEYMAP_IBM_GENERIC,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_LENOVO,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = TPACPI_KEYMAP_LENOVO_GENERIC,
+ },
};
-#define TPACPI_HOTKEY_MAP_LEN ARRAY_SIZE(ibm_keycode_map)
-#define TPACPI_HOTKEY_MAP_SIZE sizeof(ibm_keycode_map)
-#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(ibm_keycode_map[0])
+#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t)
+#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t)
int res, i;
int status;
@@ -3202,6 +3240,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
bool tabletsw_state = false;
unsigned long quirks;
+ unsigned long keymap_id;
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
"initializing hotkey subdriver\n");
@@ -3342,7 +3381,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
goto err_exit;
/* Set up key map */
-
hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
GFP_KERNEL);
if (!hotkey_keycode_map) {
@@ -3352,17 +3390,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
goto err_exit;
}
- if (tpacpi_is_lenovo()) {
- dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
- "using Lenovo default hot key map\n");
- memcpy(hotkey_keycode_map, &lenovo_keycode_map,
- TPACPI_HOTKEY_MAP_SIZE);
- } else {
- dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
- "using IBM default hot key map\n");
- memcpy(hotkey_keycode_map, &ibm_keycode_map,
- TPACPI_HOTKEY_MAP_SIZE);
- }
+ keymap_id = tpacpi_check_quirks(tpacpi_keymap_qtable,
+ ARRAY_SIZE(tpacpi_keymap_qtable));
+ BUG_ON(keymap_id >= ARRAY_SIZE(tpacpi_keymaps));
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "using keymap number %lu\n", keymap_id);
+
+ memcpy(hotkey_keycode_map, &tpacpi_keymaps[keymap_id],
+ TPACPI_HOTKEY_MAP_SIZE);
input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);
tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
@@ -3469,7 +3504,8 @@ static bool hotkey_notify_hotkey(const u32 hkey,
*send_acpi_ev = true;
*ignore_acpi_ev = false;
- if (scancode > 0 && scancode < 0x21) {
+ /* HKEY event 0x1001 is scancode 0x00 */
+ if (scancode > 0 && scancode <= TPACPI_HOTKEY_MAP_LEN) {
scancode--;
if (!(hotkey_source_mask & (1 << scancode))) {
tpacpi_input_send_key_masked(scancode);
@@ -6080,13 +6116,18 @@ static struct backlight_ops ibm_backlight_data = {
/* --------------------------------------------------------------------- */
+/*
+ * Call _BCL method of video device. On some ThinkPads this will
+ * switch the firmware to the ACPI brightness control mode.
+ */
+
static int __init tpacpi_query_bcl_levels(acpi_handle handle)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
int rc;
- if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) {
+ if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
printk(TPACPI_ERR "Unknown _BCL data, "
@@ -6103,55 +6144,22 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle)
return rc;
}
-static acpi_status __init tpacpi_acpi_walk_find_bcl(acpi_handle handle,
- u32 lvl, void *context, void **rv)
-{
- char name[ACPI_PATH_SEGMENT_LENGTH];
- struct acpi_buffer buffer = { sizeof(name), &name };
-
- if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) &&
- !strncmp("_BCL", name, sizeof(name) - 1)) {
- BUG_ON(!rv || !*rv);
- **(int **)rv = tpacpi_query_bcl_levels(handle);
- return AE_CTRL_TERMINATE;
- } else {
- return AE_OK;
- }
-}
/*
* Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map
*/
static unsigned int __init tpacpi_check_std_acpi_brightness_support(void)
{
- int status;
+ acpi_handle video_device;
int bcl_levels = 0;
- void *bcl_ptr = &bcl_levels;
-
- if (!vid_handle)
- TPACPI_ACPIHANDLE_INIT(vid);
-
- if (!vid_handle)
- return 0;
-
- /*
- * Search for a _BCL method, and execute it. This is safe on all
- * ThinkPads, and as a side-effect, _BCL will place a Lenovo Vista
- * BIOS in ACPI backlight control mode. We do NOT have to care
- * about calling the _BCL method in an enabled video device, any
- * will do for our purposes.
- */
- status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3,
- tpacpi_acpi_walk_find_bcl, NULL, NULL,
- &bcl_ptr);
+ tpacpi_acpi_handle_locate("video", ACPI_VIDEO_HID, &video_device);
+ if (video_device)
+ bcl_levels = tpacpi_query_bcl_levels(video_device);
- if (ACPI_SUCCESS(status) && bcl_levels > 2) {
- tp_features.bright_acpimode = 1;
- return bcl_levels - 2;
- }
+ tp_features.bright_acpimode = (bcl_levels > 0);
- return 0;
+ return (bcl_levels > 2) ? (bcl_levels - 2) : 0;
}
/*
@@ -6244,28 +6252,6 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
if (tp_features.bright_unkfw)
return 1;
- if (tp_features.bright_acpimode) {
- if (acpi_video_backlight_support()) {
- if (brightness_enable > 1) {
- printk(TPACPI_NOTICE
- "Standard ACPI backlight interface "
- "available, not loading native one.\n");
- return 1;
- } else if (brightness_enable == 1) {
- printk(TPACPI_NOTICE
- "Backlight control force enabled, even if standard "
- "ACPI backlight interface is available\n");
- }
- } else {
- if (brightness_enable > 1) {
- printk(TPACPI_NOTICE
- "Standard ACPI backlight interface not "
- "available, thinkpad_acpi native "
- "brightness control enabled\n");
- }
- }
- }
-
if (!brightness_enable) {
dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
"brightness support disabled by "
@@ -6273,6 +6259,26 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
return 1;
}
+ if (acpi_video_backlight_support()) {
+ if (brightness_enable > 1) {
+ printk(TPACPI_INFO
+ "Standard ACPI backlight interface "
+ "available, not loading native one.\n");
+ return 1;
+ } else if (brightness_enable == 1) {
+ printk(TPACPI_WARN
+ "Cannot enable backlight brightness support, "
+ "ACPI is already handling it. Refer to the "
+ "acpi_backlight kernel parameter\n");
+ return 1;
+ }
+ } else if (tp_features.bright_acpimode && brightness_enable > 1) {
+ printk(TPACPI_NOTICE
+ "Standard ACPI backlight interface not "
+ "available, thinkpad_acpi native "
+ "brightness control enabled\n");
+ }
+
/*
* Check for module parameter bogosity, note that we
* init brightness_mode to TPACPI_BRGHT_MODE_MAX in order to be
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 8e9ba177d81..07343568a12 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -136,10 +136,34 @@ config BATTERY_Z2
help
Say Y to include support for the battery on the Zipit Z2.
+config BATTERY_S3C_ADC
+ tristate "Battery driver for Samsung ADC based monitoring"
+ depends on S3C_ADC
+ help
+ Say Y here to enable support for iPAQ h1930/h1940/rx1950 battery
+
config CHARGER_PCF50633
tristate "NXP PCF50633 MBC"
depends on MFD_PCF50633
help
Say Y to include support for NXP PCF50633 Main Battery Charger.
+config BATTERY_JZ4740
+ tristate "Ingenic JZ4740 battery"
+ depends on MACH_JZ4740
+ depends on MFD_JZ4740_ADC
+ help
+ Say Y to enable support for the battery on Ingenic JZ4740 based
+ boards.
+
+ This driver can be build as a module. If so, the module will be
+ called jz4740-battery.
+
+config BATTERY_INTEL_MID
+ tristate "Battery driver for Intel MID platforms"
+ depends on INTEL_SCU_IPC && SPI
+ help
+ Say Y here to enable the battery driver on Intel MID
+ platforms.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 00050809a6c..10143aaf4ee 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -33,4 +33,7 @@ obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
+obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
+obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
+obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index 936bae560fa..dc628cb2e76 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -233,6 +233,7 @@ static int calculate_capacity(enum apm_source source)
empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
+ break;
case SOURCE_VOLTAGE:
full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
new file mode 100644
index 00000000000..2a10cd36118
--- /dev/null
+++ b/drivers/power/intel_mid_battery.c
@@ -0,0 +1,799 @@
+/*
+ * intel_mid_battery.c - Intel MID PMIC Battery Driver
+ *
+ * Copyright (C) 2009 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Nithish Mahalingam <nithish.mahalingam@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/param.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+
+#include <asm/intel_scu_ipc.h>
+
+#define DRIVER_NAME "pmic_battery"
+
+/*********************************************************************
+ * Generic defines
+ *********************************************************************/
+
+static int debug;
+module_param(debug, int, 0444);
+MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
+
+#define PMIC_BATT_DRV_INFO_UPDATED 1
+#define PMIC_BATT_PRESENT 1
+#define PMIC_BATT_NOT_PRESENT 0
+#define PMIC_USB_PRESENT PMIC_BATT_PRESENT
+#define PMIC_USB_NOT_PRESENT PMIC_BATT_NOT_PRESENT
+
+/* pmic battery register related */
+#define PMIC_BATT_CHR_SCHRGINT_ADDR 0xD2
+#define PMIC_BATT_CHR_SBATOVP_MASK (1 << 1)
+#define PMIC_BATT_CHR_STEMP_MASK (1 << 2)
+#define PMIC_BATT_CHR_SCOMP_MASK (1 << 3)
+#define PMIC_BATT_CHR_SUSBDET_MASK (1 << 4)
+#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5)
+#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6)
+#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7)
+#define PMIC_BATT_CHR_EXCPT_MASK 0xC6
+#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31)
+#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF
+
+/* pmic ipc related */
+#define PMIC_BATT_CHR_IPC_FCHRG_SUBID 0x4
+#define PMIC_BATT_CHR_IPC_TCHRG_SUBID 0x6
+
+/* types of battery charging */
+enum batt_charge_type {
+ BATT_USBOTG_500MA_CHARGE,
+ BATT_USBOTG_TRICKLE_CHARGE,
+};
+
+/* valid battery events */
+enum batt_event {
+ BATT_EVENT_BATOVP_EXCPT,
+ BATT_EVENT_USBOVP_EXCPT,
+ BATT_EVENT_TEMP_EXCPT,
+ BATT_EVENT_DCLMT_EXCPT,
+ BATT_EVENT_EXCPT
+};
+
+
+/*********************************************************************
+ * Battery properties
+ *********************************************************************/
+
+/*
+ * pmic battery info
+ */
+struct pmic_power_module_info {
+ bool is_dev_info_updated;
+ struct device *dev;
+ /* pmic battery data */
+ unsigned long update_time; /* jiffies when data read */
+ unsigned int usb_is_present;
+ unsigned int batt_is_present;
+ unsigned int batt_health;
+ unsigned int usb_health;
+ unsigned int batt_status;
+ unsigned int batt_charge_now; /* in mAS */
+ unsigned int batt_prev_charge_full; /* in mAS */
+ unsigned int batt_charge_rate; /* in units per second */
+
+ struct power_supply usb;
+ struct power_supply batt;
+ int irq; /* GPE_ID or IRQ# */
+ struct workqueue_struct *monitor_wqueue;
+ struct delayed_work monitor_battery;
+ struct work_struct handler;
+};
+
+static unsigned int delay_time = 2000; /* in ms */
+
+/*
+ * pmic ac properties
+ */
+static enum power_supply_property pmic_usb_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+/*
+ * pmic battery properties
+ */
+static enum power_supply_property pmic_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+};
+
+
+/*
+ * Glue functions for talking to the IPC
+ */
+
+struct battery_property {
+ u32 capacity; /* Charger capacity */
+ u8 crnt; /* Quick charge current value*/
+ u8 volt; /* Fine adjustment of constant charge voltage */
+ u8 prot; /* CHRGPROT register value */
+ u8 prot2; /* CHRGPROT1 register value */
+ u8 timer; /* Charging timer */
+};
+
+#define IPCMSG_BATTERY 0xEF
+
+/* Battery coulomb counter accumulator commands */
+#define IPC_CMD_CC_WR 0 /* Update coulomb counter value */
+#define IPC_CMD_CC_RD 1 /* Read coulomb counter value */
+#define IPC_CMD_BATTERY_PROPERTY 2 /* Read Battery property */
+
+/**
+ * pmic_scu_ipc_battery_cc_read - read battery cc
+ * @value: battery coulomb counter read
+ *
+ * Reads the battery couloumb counter value, returns 0 on success, or
+ * an error code
+ *
+ * This function may sleep. Locking for SCU accesses is handled for
+ * the caller.
+ */
+static int pmic_scu_ipc_battery_cc_read(u32 *value)
+{
+ return intel_scu_ipc_command(IPCMSG_BATTERY, IPC_CMD_CC_RD,
+ NULL, 0, value, 1);
+}
+
+/**
+ * pmic_scu_ipc_battery_property_get - fetch properties
+ * @prop: battery properties
+ *
+ * Retrieve the battery properties from the power management
+ *
+ * This function may sleep. Locking for SCU accesses is handled for
+ * the caller.
+ */
+static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
+{
+ u32 data[3];
+ u8 *p = (u8 *)&data[1];
+ int err = intel_scu_ipc_command(IPCMSG_BATTERY,
+ IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3);
+
+ prop->capacity = data[0];
+ prop->crnt = *p++;
+ prop->volt = *p++;
+ prop->prot = *p++;
+ prop->prot2 = *p++;
+ prop->timer = *p++;
+
+ return err;
+}
+
+/**
+ * pmic_scu_ipc_set_charger - set charger
+ * @charger: charger to select
+ *
+ * Switch the charging mode for the SCU
+ */
+
+static int pmic_scu_ipc_set_charger(int charger)
+{
+ return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger);
+}
+
+/**
+ * pmic_battery_log_event - log battery events
+ * @event: battery event to be logged
+ * Context: can sleep
+ *
+ * There are multiple battery events which may be of interest to users;
+ * this battery function logs the different battery events onto the
+ * kernel log messages.
+ */
+static void pmic_battery_log_event(enum batt_event event)
+{
+ printk(KERN_WARNING "pmic-battery: ");
+ switch (event) {
+ case BATT_EVENT_BATOVP_EXCPT:
+ printk(KERN_CONT "battery overvoltage condition\n");
+ break;
+ case BATT_EVENT_USBOVP_EXCPT:
+ printk(KERN_CONT "usb charger overvoltage condition\n");
+ break;
+ case BATT_EVENT_TEMP_EXCPT:
+ printk(KERN_CONT "high battery temperature condition\n");
+ break;
+ case BATT_EVENT_DCLMT_EXCPT:
+ printk(KERN_CONT "over battery charge current condition\n");
+ break;
+ default:
+ printk(KERN_CONT "charger/battery exception %d\n", event);
+ break;
+ }
+}
+
+/**
+ * pmic_battery_read_status - read battery status information
+ * @pbi: device info structure to update the read information
+ * Context: can sleep
+ *
+ * PMIC power source information need to be updated based on the data read
+ * from the PMIC battery registers.
+ *
+ */
+static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
+{
+ unsigned int update_time_intrvl;
+ unsigned int chrg_val;
+ u32 ccval;
+ u8 r8;
+ struct battery_property batt_prop;
+ int batt_present = 0;
+ int usb_present = 0;
+ int batt_exception = 0;
+
+ /* make sure the last batt_status read happened delay_time before */
+ if (pbi->update_time && time_before(jiffies, pbi->update_time +
+ msecs_to_jiffies(delay_time)))
+ return;
+
+ update_time_intrvl = jiffies_to_msecs(jiffies - pbi->update_time);
+ pbi->update_time = jiffies;
+
+ /* read coulomb counter registers and schrgint register */
+ if (pmic_scu_ipc_battery_cc_read(&ccval)) {
+ dev_warn(pbi->dev, "%s(): ipc config cmd failed\n",
+ __func__);
+ return;
+ }
+
+ if (intel_scu_ipc_ioread8(PMIC_BATT_CHR_SCHRGINT_ADDR, &r8)) {
+ dev_warn(pbi->dev, "%s(): ipc pmic read failed\n",
+ __func__);
+ return;
+ }
+
+ /*
+ * set pmic_power_module_info members based on pmic register values
+ * read.
+ */
+
+ /* set batt_is_present */
+ if (r8 & PMIC_BATT_CHR_SBATDET_MASK) {
+ pbi->batt_is_present = PMIC_BATT_PRESENT;
+ batt_present = 1;
+ } else {
+ pbi->batt_is_present = PMIC_BATT_NOT_PRESENT;
+ pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ pbi->batt_status = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ /* set batt_health */
+ if (batt_present) {
+ if (r8 & PMIC_BATT_CHR_SBATOVP_MASK) {
+ pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
+ batt_exception = 1;
+ } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
+ pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
+ batt_exception = 1;
+ } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
+ pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ pmic_battery_log_event(BATT_EVENT_TEMP_EXCPT);
+ batt_exception = 1;
+ } else {
+ pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+ }
+ }
+
+ /* set usb_is_present */
+ if (r8 & PMIC_BATT_CHR_SUSBDET_MASK) {
+ pbi->usb_is_present = PMIC_USB_PRESENT;
+ usb_present = 1;
+ } else {
+ pbi->usb_is_present = PMIC_USB_NOT_PRESENT;
+ pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ if (usb_present) {
+ if (r8 & PMIC_BATT_CHR_SUSBOVP_MASK) {
+ pbi->usb_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ pmic_battery_log_event(BATT_EVENT_USBOVP_EXCPT);
+ } else {
+ pbi->usb_health = POWER_SUPPLY_HEALTH_GOOD;
+ }
+ }
+
+ chrg_val = ccval & PMIC_BATT_ADC_ACCCHRGVAL_MASK;
+
+ /* set batt_prev_charge_full to battery capacity the first time */
+ if (!pbi->is_dev_info_updated) {
+ if (pmic_scu_ipc_battery_property_get(&batt_prop)) {
+ dev_warn(pbi->dev, "%s(): ipc config cmd failed\n",
+ __func__);
+ return;
+ }
+ pbi->batt_prev_charge_full = batt_prop.capacity;
+ }
+
+ /* set batt_status */
+ if (batt_present && !batt_exception) {
+ if (r8 & PMIC_BATT_CHR_SCOMP_MASK) {
+ pbi->batt_status = POWER_SUPPLY_STATUS_FULL;
+ pbi->batt_prev_charge_full = chrg_val;
+ } else if (ccval & PMIC_BATT_ADC_ACCCHRG_MASK) {
+ pbi->batt_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ } else {
+ pbi->batt_status = POWER_SUPPLY_STATUS_CHARGING;
+ }
+ }
+
+ /* set batt_charge_rate */
+ if (pbi->is_dev_info_updated && batt_present && !batt_exception) {
+ if (pbi->batt_status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ if (pbi->batt_charge_now - chrg_val) {
+ pbi->batt_charge_rate = ((pbi->batt_charge_now -
+ chrg_val) * 1000 * 60) /
+ update_time_intrvl;
+ }
+ } else if (pbi->batt_status == POWER_SUPPLY_STATUS_CHARGING) {
+ if (chrg_val - pbi->batt_charge_now) {
+ pbi->batt_charge_rate = ((chrg_val -
+ pbi->batt_charge_now) * 1000 * 60) /
+ update_time_intrvl;
+ }
+ } else
+ pbi->batt_charge_rate = 0;
+ } else {
+ pbi->batt_charge_rate = -1;
+ }
+
+ /* batt_charge_now */
+ if (batt_present && !batt_exception)
+ pbi->batt_charge_now = chrg_val;
+ else
+ pbi->batt_charge_now = -1;
+
+ pbi->is_dev_info_updated = PMIC_BATT_DRV_INFO_UPDATED;
+}
+
+/**
+ * pmic_usb_get_property - usb power source get property
+ * @psy: usb power supply context
+ * @psp: usb power source property
+ * @val: usb power source property value
+ * Context: can sleep
+ *
+ * PMIC usb power source property needs to be provided to power_supply
+ * subsytem for it to provide the information to users.
+ */
+static int pmic_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct pmic_power_module_info *pbi = container_of(psy,
+ struct pmic_power_module_info, usb);
+
+ /* update pmic_power_module_info members */
+ pmic_battery_read_status(pbi);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = pbi->usb_is_present;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = pbi->usb_health;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline unsigned long mAStouAh(unsigned long v)
+{
+ /* seconds to hours, mA to µA */
+ return (v * 1000) / 3600;
+}
+
+/**
+ * pmic_battery_get_property - battery power source get property
+ * @psy: battery power supply context
+ * @psp: battery power source property
+ * @val: battery power source property value
+ * Context: can sleep
+ *
+ * PMIC battery power source property needs to be provided to power_supply
+ * subsytem for it to provide the information to users.
+ */
+static int pmic_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct pmic_power_module_info *pbi = container_of(psy,
+ struct pmic_power_module_info, batt);
+
+ /* update pmic_power_module_info members */
+ pmic_battery_read_status(pbi);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = pbi->batt_status;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = pbi->batt_health;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = pbi->batt_is_present;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = mAStouAh(pbi->batt_charge_now);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = mAStouAh(pbi->batt_prev_charge_full);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * pmic_battery_monitor - monitor battery status
+ * @work: work structure
+ * Context: can sleep
+ *
+ * PMIC battery status needs to be monitored for any change
+ * and information needs to be frequently updated.
+ */
+static void pmic_battery_monitor(struct work_struct *work)
+{
+ struct pmic_power_module_info *pbi = container_of(work,
+ struct pmic_power_module_info, monitor_battery.work);
+
+ /* update pmic_power_module_info members */
+ pmic_battery_read_status(pbi);
+ queue_delayed_work(pbi->monitor_wqueue, &pbi->monitor_battery, HZ * 10);
+}
+
+/**
+ * pmic_battery_set_charger - set battery charger
+ * @pbi: device info structure
+ * @chrg: charge mode to set battery charger in
+ * Context: can sleep
+ *
+ * PMIC battery charger needs to be enabled based on the usb charge
+ * capabilities connected to the platform.
+ */
+static int pmic_battery_set_charger(struct pmic_power_module_info *pbi,
+ enum batt_charge_type chrg)
+{
+ int retval;
+
+ /* set usblmt bits and chrgcntl register bits appropriately */
+ switch (chrg) {
+ case BATT_USBOTG_500MA_CHARGE:
+ retval = pmic_scu_ipc_set_charger(PMIC_BATT_CHR_IPC_FCHRG_SUBID);
+ break;
+ case BATT_USBOTG_TRICKLE_CHARGE:
+ retval = pmic_scu_ipc_set_charger(PMIC_BATT_CHR_IPC_TCHRG_SUBID);
+ break;
+ default:
+ dev_warn(pbi->dev, "%s(): out of range usb charger "
+ "charge detected\n", __func__);
+ return -EINVAL;
+ }
+
+ if (retval) {
+ dev_warn(pbi->dev, "%s(): ipc pmic read failed\n",
+ __func__);
+ return retval;;
+ }
+
+ return 0;
+}
+
+/**
+ * pmic_battery_interrupt_handler - pmic battery interrupt handler
+ * Context: interrupt context
+ *
+ * PMIC battery interrupt handler which will be called with either
+ * battery full condition occurs or usb otg & battery connect
+ * condition occurs.
+ */
+static irqreturn_t pmic_battery_interrupt_handler(int id, void *dev)
+{
+ struct pmic_power_module_info *pbi = dev;
+
+ schedule_work(&pbi->handler);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * pmic_battery_handle_intrpt - pmic battery service interrupt
+ * @work: work structure
+ * Context: can sleep
+ *
+ * PMIC battery needs to either update the battery status as full
+ * if it detects battery full condition caused the interrupt or needs
+ * to enable battery charger if it detects usb and battery detect
+ * caused the source of interrupt.
+ */
+static void pmic_battery_handle_intrpt(struct work_struct *work)
+{
+ struct pmic_power_module_info *pbi = container_of(work,
+ struct pmic_power_module_info, handler);
+ enum batt_charge_type chrg;
+ u8 r8;
+
+ if (intel_scu_ipc_ioread8(PMIC_BATT_CHR_SCHRGINT_ADDR, &r8)) {
+ dev_warn(pbi->dev, "%s(): ipc pmic read failed\n",
+ __func__);
+ return;
+ }
+ /* find the cause of the interrupt */
+ if (r8 & PMIC_BATT_CHR_SBATDET_MASK) {
+ pbi->batt_is_present = PMIC_BATT_PRESENT;
+ } else {
+ pbi->batt_is_present = PMIC_BATT_NOT_PRESENT;
+ pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ pbi->batt_status = POWER_SUPPLY_STATUS_UNKNOWN;
+ return;
+ }
+
+ if (r8 & PMIC_BATT_CHR_EXCPT_MASK) {
+ pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ pmic_battery_log_event(BATT_EVENT_EXCPT);
+ return;
+ } else {
+ pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+ pbi->usb_health = POWER_SUPPLY_HEALTH_GOOD;
+ }
+
+ if (r8 & PMIC_BATT_CHR_SCOMP_MASK) {
+ u32 ccval;
+ pbi->batt_status = POWER_SUPPLY_STATUS_FULL;
+
+ if (pmic_scu_ipc_battery_cc_read(&ccval)) {
+ dev_warn(pbi->dev, "%s(): ipc config cmd "
+ "failed\n", __func__);
+ return;
+ }
+ pbi->batt_prev_charge_full = ccval &
+ PMIC_BATT_ADC_ACCCHRGVAL_MASK;
+ return;
+ }
+
+ if (r8 & PMIC_BATT_CHR_SUSBDET_MASK) {
+ pbi->usb_is_present = PMIC_USB_PRESENT;
+ } else {
+ pbi->usb_is_present = PMIC_USB_NOT_PRESENT;
+ pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ return;
+ }
+
+ /* setup battery charging */
+
+#if 0
+ /* check usb otg power capability and set charger accordingly */
+ retval = langwell_udc_maxpower(&power);
+ if (retval) {
+ dev_warn(pbi->dev,
+ "%s(): usb otg power query failed with error code %d\n",
+ __func__, retval);
+ return;
+ }
+
+ if (power >= 500)
+ chrg = BATT_USBOTG_500MA_CHARGE;
+ else
+#endif
+ chrg = BATT_USBOTG_TRICKLE_CHARGE;
+
+ /* enable battery charging */
+ if (pmic_battery_set_charger(pbi, chrg)) {
+ dev_warn(pbi->dev,
+ "%s(): failed to set up battery charging\n", __func__);
+ return;
+ }
+
+ dev_dbg(pbi->dev,
+ "pmic-battery: %s() - setting up battery charger successful\n",
+ __func__);
+}
+
+/**
+ * pmic_battery_probe - pmic battery initialize
+ * @irq: pmic battery device irq
+ * @dev: pmic battery device structure
+ * Context: can sleep
+ *
+ * PMIC battery initializes its internal data structue and other
+ * infrastructure components for it to work as expected.
+ */
+static __devinit int probe(int irq, struct device *dev)
+{
+ int retval = 0;
+ struct pmic_power_module_info *pbi;
+
+ dev_dbg(dev, "pmic-battery: found pmic battery device\n");
+
+ pbi = kzalloc(sizeof(*pbi), GFP_KERNEL);
+ if (!pbi) {
+ dev_err(dev, "%s(): memory allocation failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ pbi->dev = dev;
+ pbi->irq = irq;
+ dev_set_drvdata(dev, pbi);
+
+ /* initialize all required framework before enabling interrupts */
+ INIT_WORK(&pbi->handler, pmic_battery_handle_intrpt);
+ INIT_DELAYED_WORK(&pbi->monitor_battery, pmic_battery_monitor);
+ pbi->monitor_wqueue =
+ create_singlethread_workqueue(dev_name(dev));
+ if (!pbi->monitor_wqueue) {
+ dev_err(dev, "%s(): wqueue init failed\n", __func__);
+ retval = -ESRCH;
+ goto wqueue_failed;
+ }
+
+ /* register interrupt */
+ retval = request_irq(pbi->irq, pmic_battery_interrupt_handler,
+ 0, DRIVER_NAME, pbi);
+ if (retval) {
+ dev_err(dev, "%s(): cannot get IRQ\n", __func__);
+ goto requestirq_failed;
+ }
+
+ /* register pmic-batt with power supply subsystem */
+ pbi->batt.name = "pmic-batt";
+ pbi->batt.type = POWER_SUPPLY_TYPE_BATTERY;
+ pbi->batt.properties = pmic_battery_props;
+ pbi->batt.num_properties = ARRAY_SIZE(pmic_battery_props);
+ pbi->batt.get_property = pmic_battery_get_property;
+ retval = power_supply_register(dev, &pbi->batt);
+ if (retval) {
+ dev_err(dev,
+ "%s(): failed to register pmic battery device with power supply subsystem\n",
+ __func__);
+ goto power_reg_failed;
+ }
+
+ dev_dbg(dev, "pmic-battery: %s() - pmic battery device "
+ "registration with power supply subsystem successful\n",
+ __func__);
+
+ queue_delayed_work(pbi->monitor_wqueue, &pbi->monitor_battery, HZ * 1);
+
+ /* register pmic-usb with power supply subsystem */
+ pbi->usb.name = "pmic-usb";
+ pbi->usb.type = POWER_SUPPLY_TYPE_USB;
+ pbi->usb.properties = pmic_usb_props;
+ pbi->usb.num_properties = ARRAY_SIZE(pmic_usb_props);
+ pbi->usb.get_property = pmic_usb_get_property;
+ retval = power_supply_register(dev, &pbi->usb);
+ if (retval) {
+ dev_err(dev,
+ "%s(): failed to register pmic usb device with power supply subsystem\n",
+ __func__);
+ goto power_reg_failed_1;
+ }
+
+ if (debug)
+ printk(KERN_INFO "pmic-battery: %s() - pmic usb device "
+ "registration with power supply subsystem successful\n",
+ __func__);
+
+ return retval;
+
+power_reg_failed_1:
+ power_supply_unregister(&pbi->batt);
+power_reg_failed:
+ cancel_rearming_delayed_workqueue(pbi->monitor_wqueue,
+ &pbi->monitor_battery);
+requestirq_failed:
+ destroy_workqueue(pbi->monitor_wqueue);
+wqueue_failed:
+ kfree(pbi);
+
+ return retval;
+}
+
+static int __devinit platform_pmic_battery_probe(struct platform_device *pdev)
+{
+ return probe(pdev->id, &pdev->dev);
+}
+
+/**
+ * pmic_battery_remove - pmic battery finalize
+ * @dev: pmic battery device structure
+ * Context: can sleep
+ *
+ * PMIC battery finalizes its internal data structue and other
+ * infrastructure components that it initialized in
+ * pmic_battery_probe.
+ */
+
+static int __devexit platform_pmic_battery_remove(struct platform_device *pdev)
+{
+ struct pmic_power_module_info *pbi = dev_get_drvdata(&pdev->dev);
+
+ free_irq(pbi->irq, pbi);
+ cancel_rearming_delayed_workqueue(pbi->monitor_wqueue,
+ &pbi->monitor_battery);
+ destroy_workqueue(pbi->monitor_wqueue);
+
+ power_supply_unregister(&pbi->usb);
+ power_supply_unregister(&pbi->batt);
+
+ flush_scheduled_work();
+ kfree(pbi);
+ return 0;
+}
+
+static struct platform_driver platform_pmic_battery_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = platform_pmic_battery_probe,
+ .remove = __devexit_p(platform_pmic_battery_remove),
+};
+
+static int __init platform_pmic_battery_module_init(void)
+{
+ return platform_driver_register(&platform_pmic_battery_driver);
+}
+
+static void __exit platform_pmic_battery_module_exit(void)
+{
+ platform_driver_unregister(&platform_pmic_battery_driver);
+}
+
+module_init(platform_pmic_battery_module_init);
+module_exit(platform_pmic_battery_module_exit);
+
+MODULE_AUTHOR("Nithish Mahalingam <nithish.mahalingam@intel.com>");
+MODULE_DESCRIPTION("Intel Moorestown PMIC Battery Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
new file mode 100644
index 00000000000..20c4b952e9b
--- /dev/null
+++ b/drivers/power/jz4740-battery.c
@@ -0,0 +1,445 @@
+/*
+ * Battery measurement code for Ingenic JZ SOC.
+ *
+ * Copyright (C) 2009 Jiejing Zhang <kzjeef@gmail.com>
+ * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * based on tosa_battery.c
+ *
+ * Copyright (C) 2008 Marek Vasut <marek.vasut@gmail.com>
+*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/power_supply.h>
+
+#include <linux/power/jz4740-battery.h>
+#include <linux/jz4740-adc.h>
+
+struct jz_battery {
+ struct jz_battery_platform_data *pdata;
+ struct platform_device *pdev;
+
+ struct resource *mem;
+ void __iomem *base;
+
+ int irq;
+ int charge_irq;
+
+ struct mfd_cell *cell;
+
+ int status;
+ long voltage;
+
+ struct completion read_completion;
+
+ struct power_supply battery;
+ struct delayed_work work;
+};
+
+static inline struct jz_battery *psy_to_jz_battery(struct power_supply *psy)
+{
+ return container_of(psy, struct jz_battery, battery);
+}
+
+static irqreturn_t jz_battery_irq_handler(int irq, void *devid)
+{
+ struct jz_battery *battery = devid;
+
+ complete(&battery->read_completion);
+ return IRQ_HANDLED;
+}
+
+static long jz_battery_read_voltage(struct jz_battery *battery)
+{
+ unsigned long t;
+ unsigned long val;
+ long voltage;
+
+ INIT_COMPLETION(battery->read_completion);
+
+ enable_irq(battery->irq);
+ battery->cell->enable(battery->pdev);
+
+ t = wait_for_completion_interruptible_timeout(&battery->read_completion,
+ HZ);
+
+ if (t > 0) {
+ val = readw(battery->base) & 0xfff;
+
+ if (battery->pdata->info.voltage_max_design <= 2500000)
+ val = (val * 78125UL) >> 7UL;
+ else
+ val = ((val * 924375UL) >> 9UL) + 33000;
+ voltage = (long)val;
+ } else {
+ voltage = t ? t : -ETIMEDOUT;
+ }
+
+ battery->cell->disable(battery->pdev);
+ disable_irq(battery->irq);
+
+ return voltage;
+}
+
+static int jz_battery_get_capacity(struct power_supply *psy)
+{
+ struct jz_battery *jz_battery = psy_to_jz_battery(psy);
+ struct power_supply_info *info = &jz_battery->pdata->info;
+ long voltage;
+ int ret;
+ int voltage_span;
+
+ voltage = jz_battery_read_voltage(jz_battery);
+
+ if (voltage < 0)
+ return voltage;
+
+ voltage_span = info->voltage_max_design - info->voltage_min_design;
+ ret = ((voltage - info->voltage_min_design) * 100) / voltage_span;
+
+ if (ret > 100)
+ ret = 100;
+ else if (ret < 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int jz_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct jz_battery *jz_battery = psy_to_jz_battery(psy);
+ struct power_supply_info *info = &jz_battery->pdata->info;
+ long voltage;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = jz_battery->status;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = jz_battery->pdata->info.technology;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ voltage = jz_battery_read_voltage(jz_battery);
+ if (voltage < info->voltage_min_design)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = jz_battery_get_capacity(psy);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = jz_battery_read_voltage(jz_battery);
+ if (val->intval < 0)
+ return val->intval;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = info->voltage_max_design;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = info->voltage_min_design;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void jz_battery_external_power_changed(struct power_supply *psy)
+{
+ struct jz_battery *jz_battery = psy_to_jz_battery(psy);
+
+ cancel_delayed_work(&jz_battery->work);
+ schedule_delayed_work(&jz_battery->work, 0);
+}
+
+static irqreturn_t jz_battery_charge_irq(int irq, void *data)
+{
+ struct jz_battery *jz_battery = data;
+
+ cancel_delayed_work(&jz_battery->work);
+ schedule_delayed_work(&jz_battery->work, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void jz_battery_update(struct jz_battery *jz_battery)
+{
+ int status;
+ long voltage;
+ bool has_changed = false;
+ int is_charging;
+
+ if (gpio_is_valid(jz_battery->pdata->gpio_charge)) {
+ is_charging = gpio_get_value(jz_battery->pdata->gpio_charge);
+ is_charging ^= jz_battery->pdata->gpio_charge_active_low;
+ if (is_charging)
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ if (status != jz_battery->status) {
+ jz_battery->status = status;
+ has_changed = true;
+ }
+ }
+
+ voltage = jz_battery_read_voltage(jz_battery);
+ if (abs(voltage - jz_battery->voltage) < 50000) {
+ jz_battery->voltage = voltage;
+ has_changed = true;
+ }
+
+ if (has_changed)
+ power_supply_changed(&jz_battery->battery);
+}
+
+static enum power_supply_property jz_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_PRESENT,
+};
+
+static void jz_battery_work(struct work_struct *work)
+{
+ /* Too small interval will increase system workload */
+ const int interval = HZ * 30;
+ struct jz_battery *jz_battery = container_of(work, struct jz_battery,
+ work.work);
+
+ jz_battery_update(jz_battery);
+ schedule_delayed_work(&jz_battery->work, interval);
+}
+
+static int __devinit jz_battery_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct jz_battery_platform_data *pdata = pdev->dev.parent->platform_data;
+ struct jz_battery *jz_battery;
+ struct power_supply *battery;
+
+ jz_battery = kzalloc(sizeof(*jz_battery), GFP_KERNEL);
+ if (!jz_battery) {
+ dev_err(&pdev->dev, "Failed to allocate driver structure\n");
+ return -ENOMEM;
+ }
+
+ jz_battery->cell = pdev->dev.platform_data;
+
+ jz_battery->irq = platform_get_irq(pdev, 0);
+ if (jz_battery->irq < 0) {
+ ret = jz_battery->irq;
+ dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
+ goto err_free;
+ }
+
+ jz_battery->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!jz_battery->mem) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
+ goto err_free;
+ }
+
+ jz_battery->mem = request_mem_region(jz_battery->mem->start,
+ resource_size(jz_battery->mem), pdev->name);
+ if (!jz_battery->mem) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev, "Failed to request mmio memory region\n");
+ goto err_free;
+ }
+
+ jz_battery->base = ioremap_nocache(jz_battery->mem->start,
+ resource_size(jz_battery->mem));
+ if (!jz_battery->base) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
+ goto err_release_mem_region;
+ }
+
+ battery = &jz_battery->battery;
+ battery->name = pdata->info.name;
+ battery->type = POWER_SUPPLY_TYPE_BATTERY;
+ battery->properties = jz_battery_properties;
+ battery->num_properties = ARRAY_SIZE(jz_battery_properties);
+ battery->get_property = jz_battery_get_property;
+ battery->external_power_changed = jz_battery_external_power_changed;
+ battery->use_for_apm = 1;
+
+ jz_battery->pdata = pdata;
+ jz_battery->pdev = pdev;
+
+ init_completion(&jz_battery->read_completion);
+
+ INIT_DELAYED_WORK(&jz_battery->work, jz_battery_work);
+
+ ret = request_irq(jz_battery->irq, jz_battery_irq_handler, 0, pdev->name,
+ jz_battery);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %d\n", ret);
+ goto err_iounmap;
+ }
+ disable_irq(jz_battery->irq);
+
+ if (gpio_is_valid(pdata->gpio_charge)) {
+ ret = gpio_request(pdata->gpio_charge, dev_name(&pdev->dev));
+ if (ret) {
+ dev_err(&pdev->dev, "charger state gpio request failed.\n");
+ goto err_free_irq;
+ }
+ ret = gpio_direction_input(pdata->gpio_charge);
+ if (ret) {
+ dev_err(&pdev->dev, "charger state gpio set direction failed.\n");
+ goto err_free_gpio;
+ }
+
+ jz_battery->charge_irq = gpio_to_irq(pdata->gpio_charge);
+
+ if (jz_battery->charge_irq >= 0) {
+ ret = request_irq(jz_battery->charge_irq,
+ jz_battery_charge_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ dev_name(&pdev->dev), jz_battery);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request charge irq: %d\n", ret);
+ goto err_free_gpio;
+ }
+ }
+ } else {
+ jz_battery->charge_irq = -1;
+ }
+
+ if (jz_battery->pdata->info.voltage_max_design <= 2500000)
+ jz4740_adc_set_config(pdev->dev.parent, JZ_ADC_CONFIG_BAT_MB,
+ JZ_ADC_CONFIG_BAT_MB);
+ else
+ jz4740_adc_set_config(pdev->dev.parent, JZ_ADC_CONFIG_BAT_MB, 0);
+
+ ret = power_supply_register(&pdev->dev, &jz_battery->battery);
+ if (ret) {
+ dev_err(&pdev->dev, "power supply battery register failed.\n");
+ goto err_free_charge_irq;
+ }
+
+ platform_set_drvdata(pdev, jz_battery);
+ schedule_delayed_work(&jz_battery->work, 0);
+
+ return 0;
+
+err_free_charge_irq:
+ if (jz_battery->charge_irq >= 0)
+ free_irq(jz_battery->charge_irq, jz_battery);
+err_free_gpio:
+ if (gpio_is_valid(pdata->gpio_charge))
+ gpio_free(jz_battery->pdata->gpio_charge);
+err_free_irq:
+ free_irq(jz_battery->irq, jz_battery);
+err_iounmap:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(jz_battery->base);
+err_release_mem_region:
+ release_mem_region(jz_battery->mem->start, resource_size(jz_battery->mem));
+err_free:
+ kfree(jz_battery);
+ return ret;
+}
+
+static int __devexit jz_battery_remove(struct platform_device *pdev)
+{
+ struct jz_battery *jz_battery = platform_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&jz_battery->work);
+
+ if (gpio_is_valid(jz_battery->pdata->gpio_charge)) {
+ if (jz_battery->charge_irq >= 0)
+ free_irq(jz_battery->charge_irq, jz_battery);
+ gpio_free(jz_battery->pdata->gpio_charge);
+ }
+
+ power_supply_unregister(&jz_battery->battery);
+
+ free_irq(jz_battery->irq, jz_battery);
+
+ iounmap(jz_battery->base);
+ release_mem_region(jz_battery->mem->start, resource_size(jz_battery->mem));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int jz_battery_suspend(struct device *dev)
+{
+ struct jz_battery *jz_battery = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&jz_battery->work);
+ jz_battery->status = POWER_SUPPLY_STATUS_UNKNOWN;
+
+ return 0;
+}
+
+static int jz_battery_resume(struct device *dev)
+{
+ struct jz_battery *jz_battery = dev_get_drvdata(dev);
+
+ schedule_delayed_work(&jz_battery->work, 0);
+
+ return 0;
+}
+
+static const struct dev_pm_ops jz_battery_pm_ops = {
+ .suspend = jz_battery_suspend,
+ .resume = jz_battery_resume,
+};
+
+#define JZ_BATTERY_PM_OPS (&jz_battery_pm_ops)
+#else
+#define JZ_BATTERY_PM_OPS NULL
+#endif
+
+static struct platform_driver jz_battery_driver = {
+ .probe = jz_battery_probe,
+ .remove = __devexit_p(jz_battery_remove),
+ .driver = {
+ .name = "jz4740-battery",
+ .owner = THIS_MODULE,
+ .pm = JZ_BATTERY_PM_OPS,
+ },
+};
+
+static int __init jz_battery_init(void)
+{
+ return platform_driver_register(&jz_battery_driver);
+}
+module_init(jz_battery_init);
+
+static void __exit jz_battery_exit(void)
+{
+ platform_driver_unregister(&jz_battery_driver);
+}
+module_exit(jz_battery_exit);
+
+MODULE_ALIAS("platform:jz4740-battery");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("JZ4740 SoC battery driver");
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index baefcf1cffc..aafc1c506ed 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -1,7 +1,7 @@
/*
* Battery driver for One Laptop Per Child board.
*
- * Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
+ * Copyright © 2006-2010 David Woodhouse <dwmw2@infradead.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -384,7 +384,6 @@ static struct bin_attribute olpc_bat_eeprom = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO,
- .owner = THIS_MODULE,
},
.size = 0,
.read = olpc_bat_eeprom_read,
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
new file mode 100644
index 00000000000..fe16b482e91
--- /dev/null
+++ b/drivers/power/s3c_adc_battery.c
@@ -0,0 +1,431 @@
+/*
+ * iPAQ h1930/h1940/rx1950 battery controler driver
+ * Copyright (c) Vasily Khoruzhick
+ * Based on h1940_battery.c by Arnaud Patard
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/s3c_adc_battery.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#include <plat/adc.h>
+
+#define BAT_POLL_INTERVAL 10000 /* ms */
+#define JITTER_DELAY 500 /* ms */
+
+struct s3c_adc_bat {
+ struct power_supply psy;
+ struct s3c_adc_client *client;
+ struct s3c_adc_bat_pdata *pdata;
+ int volt_value;
+ int cur_value;
+ unsigned int timestamp;
+ int level;
+ int status;
+ int cable_plugged:1;
+};
+
+static struct delayed_work bat_work;
+
+static void s3c_adc_bat_ext_power_changed(struct power_supply *psy)
+{
+ schedule_delayed_work(&bat_work,
+ msecs_to_jiffies(JITTER_DELAY));
+}
+
+static enum power_supply_property s3c_adc_backup_bat_props[] = {
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+};
+
+static int s3c_adc_backup_bat_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct s3c_adc_bat *bat = container_of(psy, struct s3c_adc_bat, psy);
+
+ if (!bat) {
+ dev_err(psy->dev, "%s: no battery infos ?!\n", __func__);
+ return -EINVAL;
+ }
+
+ if (bat->volt_value < 0 ||
+ jiffies_to_msecs(jiffies - bat->timestamp) >
+ BAT_POLL_INTERVAL) {
+ bat->volt_value = s3c_adc_read(bat->client,
+ bat->pdata->backup_volt_channel);
+ bat->volt_value *= bat->pdata->backup_volt_mult;
+ bat->timestamp = jiffies;
+ }
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = bat->volt_value;
+ return 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ val->intval = bat->pdata->backup_volt_min;
+ return 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = bat->pdata->backup_volt_max;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct s3c_adc_bat backup_bat = {
+ .psy = {
+ .name = "backup-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = s3c_adc_backup_bat_props,
+ .num_properties = ARRAY_SIZE(s3c_adc_backup_bat_props),
+ .get_property = s3c_adc_backup_bat_get_property,
+ .use_for_apm = 1,
+ },
+};
+
+static enum power_supply_property s3c_adc_main_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+static int calc_full_volt(int volt_val, int cur_val, int impedance)
+{
+ return volt_val + cur_val * impedance / 1000;
+}
+
+static int s3c_adc_bat_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct s3c_adc_bat *bat = container_of(psy, struct s3c_adc_bat, psy);
+
+ int new_level;
+ int full_volt;
+ const struct s3c_adc_bat_thresh *lut = bat->pdata->lut_noac;
+ unsigned int lut_size = bat->pdata->lut_noac_cnt;
+
+ if (!bat) {
+ dev_err(psy->dev, "no battery infos ?!\n");
+ return -EINVAL;
+ }
+
+ if (bat->volt_value < 0 || bat->cur_value < 0 ||
+ jiffies_to_msecs(jiffies - bat->timestamp) >
+ BAT_POLL_INTERVAL) {
+ bat->volt_value = s3c_adc_read(bat->client,
+ bat->pdata->volt_channel) * bat->pdata->volt_mult;
+ bat->cur_value = s3c_adc_read(bat->client,
+ bat->pdata->current_channel) * bat->pdata->current_mult;
+ bat->timestamp = jiffies;
+ }
+
+ if (bat->cable_plugged &&
+ ((bat->pdata->gpio_charge_finished < 0) ||
+ !gpio_get_value(bat->pdata->gpio_charge_finished))) {
+ lut = bat->pdata->lut_acin;
+ lut_size = bat->pdata->lut_acin_cnt;
+ }
+
+ new_level = 100000;
+ full_volt = calc_full_volt((bat->volt_value / 1000),
+ (bat->cur_value / 1000), bat->pdata->internal_impedance);
+
+ if (full_volt < calc_full_volt(lut->volt, lut->cur,
+ bat->pdata->internal_impedance)) {
+ lut_size--;
+ while (lut_size--) {
+ int lut_volt1;
+ int lut_volt2;
+
+ lut_volt1 = calc_full_volt(lut[0].volt, lut[0].cur,
+ bat->pdata->internal_impedance);
+ lut_volt2 = calc_full_volt(lut[1].volt, lut[1].cur,
+ bat->pdata->internal_impedance);
+ if (full_volt < lut_volt1 && full_volt >= lut_volt2) {
+ new_level = (lut[1].level +
+ (lut[0].level - lut[1].level) *
+ (full_volt - lut_volt2) /
+ (lut_volt1 - lut_volt2)) * 1000;
+ break;
+ }
+ new_level = lut[1].level * 1000;
+ lut++;
+ }
+ }
+
+ bat->level = new_level;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (bat->pdata->gpio_charge_finished < 0)
+ val->intval = bat->level == 100000 ?
+ POWER_SUPPLY_STATUS_FULL : bat->status;
+ else
+ val->intval = bat->status;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = 100000;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN:
+ val->intval = 0;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = bat->level;
+ return 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = bat->volt_value;
+ return 0;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = bat->cur_value;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct s3c_adc_bat main_bat = {
+ .psy = {
+ .name = "main-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = s3c_adc_main_bat_props,
+ .num_properties = ARRAY_SIZE(s3c_adc_main_bat_props),
+ .get_property = s3c_adc_bat_get_property,
+ .external_power_changed = s3c_adc_bat_ext_power_changed,
+ .use_for_apm = 1,
+ },
+};
+
+static void s3c_adc_bat_work(struct work_struct *work)
+{
+ struct s3c_adc_bat *bat = &main_bat;
+ int is_charged;
+ int is_plugged;
+ static int was_plugged;
+
+ is_plugged = power_supply_am_i_supplied(&bat->psy);
+ bat->cable_plugged = is_plugged;
+ if (is_plugged != was_plugged) {
+ was_plugged = is_plugged;
+ if (is_plugged) {
+ if (bat->pdata->enable_charger)
+ bat->pdata->enable_charger();
+ bat->status = POWER_SUPPLY_STATUS_CHARGING;
+ } else {
+ if (bat->pdata->disable_charger)
+ bat->pdata->disable_charger();
+ bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+ } else {
+ if ((bat->pdata->gpio_charge_finished >= 0) && is_plugged) {
+ is_charged = gpio_get_value(
+ main_bat.pdata->gpio_charge_finished);
+ if (is_charged) {
+ if (bat->pdata->disable_charger)
+ bat->pdata->disable_charger();
+ bat->status = POWER_SUPPLY_STATUS_FULL;
+ } else {
+ if (bat->pdata->enable_charger)
+ bat->pdata->enable_charger();
+ bat->status = POWER_SUPPLY_STATUS_CHARGING;
+ }
+ }
+ }
+
+ power_supply_changed(&bat->psy);
+}
+
+static irqreturn_t s3c_adc_bat_charged(int irq, void *dev_id)
+{
+ schedule_delayed_work(&bat_work,
+ msecs_to_jiffies(JITTER_DELAY));
+ return IRQ_HANDLED;
+}
+
+static int __init s3c_adc_bat_probe(struct platform_device *pdev)
+{
+ struct s3c_adc_client *client;
+ struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
+ int ret;
+
+ client = s3c_adc_register(pdev, NULL, NULL, 0);
+ if (IS_ERR(client)) {
+ dev_err(&pdev->dev, "cannot register adc\n");
+ return PTR_ERR(client);
+ }
+
+ platform_set_drvdata(pdev, client);
+
+ main_bat.client = client;
+ main_bat.pdata = pdata;
+ main_bat.volt_value = -1;
+ main_bat.cur_value = -1;
+ main_bat.cable_plugged = 0;
+ main_bat.status = POWER_SUPPLY_STATUS_DISCHARGING;
+
+ ret = power_supply_register(&pdev->dev, &main_bat.psy);
+ if (ret)
+ goto err_reg_main;
+ if (pdata->backup_volt_mult) {
+ backup_bat.client = client;
+ backup_bat.pdata = pdev->dev.platform_data;
+ backup_bat.volt_value = -1;
+ ret = power_supply_register(&pdev->dev, &backup_bat.psy);
+ if (ret)
+ goto err_reg_backup;
+ }
+
+ INIT_DELAYED_WORK(&bat_work, s3c_adc_bat_work);
+
+ if (pdata->gpio_charge_finished >= 0) {
+ ret = gpio_request(pdata->gpio_charge_finished, "charged");
+ if (ret)
+ goto err_gpio;
+
+ ret = request_irq(gpio_to_irq(pdata->gpio_charge_finished),
+ s3c_adc_bat_charged,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "battery charged", NULL);
+ if (ret)
+ goto err_irq;
+ }
+
+ if (pdata->init) {
+ ret = pdata->init();
+ if (ret)
+ goto err_platform;
+ }
+
+ dev_info(&pdev->dev, "successfully loaded\n");
+ device_init_wakeup(&pdev->dev, 1);
+
+ /* Schedule timer to check current status */
+ schedule_delayed_work(&bat_work,
+ msecs_to_jiffies(JITTER_DELAY));
+
+ return 0;
+
+err_platform:
+ if (pdata->gpio_charge_finished >= 0)
+ free_irq(gpio_to_irq(pdata->gpio_charge_finished), NULL);
+err_irq:
+ if (pdata->gpio_charge_finished >= 0)
+ gpio_free(pdata->gpio_charge_finished);
+err_gpio:
+ if (pdata->backup_volt_mult)
+ power_supply_unregister(&backup_bat.psy);
+err_reg_backup:
+ power_supply_unregister(&main_bat.psy);
+err_reg_main:
+ return ret;
+}
+
+static int s3c_adc_bat_remove(struct platform_device *pdev)
+{
+ struct s3c_adc_client *client = platform_get_drvdata(pdev);
+ struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
+
+ power_supply_unregister(&main_bat.psy);
+ if (pdata->backup_volt_mult)
+ power_supply_unregister(&backup_bat.psy);
+
+ s3c_adc_release(client);
+
+ if (pdata->gpio_charge_finished >= 0) {
+ free_irq(gpio_to_irq(pdata->gpio_charge_finished), NULL);
+ gpio_free(pdata->gpio_charge_finished);
+ }
+
+ cancel_delayed_work(&bat_work);
+
+ if (pdata->exit)
+ pdata->exit();
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int s3c_adc_bat_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
+
+ if (pdata->gpio_charge_finished >= 0) {
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(
+ gpio_to_irq(pdata->gpio_charge_finished));
+ else {
+ disable_irq(gpio_to_irq(pdata->gpio_charge_finished));
+ main_bat.pdata->disable_charger();
+ }
+ }
+
+ return 0;
+}
+
+static int s3c_adc_bat_resume(struct platform_device *pdev)
+{
+ struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
+
+ if (pdata->gpio_charge_finished >= 0) {
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(
+ gpio_to_irq(pdata->gpio_charge_finished));
+ else
+ enable_irq(gpio_to_irq(pdata->gpio_charge_finished));
+ }
+
+ /* Schedule timer to check current status */
+ schedule_delayed_work(&bat_work,
+ msecs_to_jiffies(JITTER_DELAY));
+
+ return 0;
+}
+#else
+#define s3c_adc_battery_suspend NULL
+#define s3c_adc_battery_resume NULL
+#endif
+
+static struct platform_driver s3c_adc_bat_driver = {
+ .driver = {
+ .name = "s3c-adc-battery",
+ },
+ .probe = s3c_adc_bat_probe,
+ .remove = s3c_adc_bat_remove,
+ .suspend = s3c_adc_bat_suspend,
+ .resume = s3c_adc_bat_resume,
+};
+
+static int __init s3c_adc_bat_init(void)
+{
+ return platform_driver_register(&s3c_adc_bat_driver);
+}
+module_init(s3c_adc_bat_init);
+
+static void __exit s3c_adc_bat_exit(void)
+{
+ platform_driver_unregister(&s3c_adc_bat_driver);
+}
+module_exit(s3c_adc_bat_exit);
+
+MODULE_AUTHOR("Vasily Khoruzhick <anarsoul@gmail.com>");
+MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controler driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 4e8afce0c81..5071d85ec12 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -29,7 +29,6 @@ static DEFINE_MUTEX(bat_lock);
static struct work_struct bat_work;
static struct mutex work_lock;
static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
-static struct wm97xx_batt_info *gpdata;
static enum power_supply_property *prop;
static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
@@ -172,12 +171,6 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
struct wm97xx_pdata *wmdata = dev->dev.platform_data;
struct wm97xx_batt_pdata *pdata;
- if (gpdata) {
- dev_err(&dev->dev, "Do not pass platform_data through "
- "wm97xx_bat_set_pdata!\n");
- return -EINVAL;
- }
-
if (!wmdata) {
dev_err(&dev->dev, "No platform data supplied\n");
return -EINVAL;
@@ -308,15 +301,6 @@ static void __exit wm97xx_bat_exit(void)
platform_driver_unregister(&wm97xx_bat_driver);
}
-/* The interface is deprecated, as well as linux/wm97xx_batt.h */
-void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data);
-
-void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data)
-{
- gpdata = data;
-}
-EXPORT_SYMBOL_GPL(wm97xx_bat_set_pdata);
-
module_init(wm97xx_bat_init);
module_exit(wm97xx_bat_exit);
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 7d149a8d8d9..2ce2eb71d0f 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -215,7 +215,7 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
int ret = -EINVAL;
- if (info->vol_table && (index < (2 << info->vol_nbits))) {
+ if (info->vol_table && (index < (1 << info->vol_nbits))) {
ret = info->vol_table[index];
if (info->slope_double)
ret <<= 1;
@@ -233,7 +233,7 @@ static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
max_uV = max_uV >> 1;
}
if (info->vol_table) {
- for (i = 0; i < (2 << info->vol_nbits); i++) {
+ for (i = 0; i < (1 << info->vol_nbits); i++) {
if (!info->vol_table[i])
break;
if ((min_uV <= info->vol_table[i])
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 04f2e085116..172951bf23a 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -100,6 +100,14 @@ config REGULATOR_MAX8925
help
Say y here to support the voltage regulaltor of Maxim MAX8925 PMIC.
+config REGULATOR_MAX8998
+ tristate "Maxim 8998 voltage regulator"
+ depends on MFD_MAX8998
+ help
+ This driver controls a Maxim 8998 voltage output regulator
+ via I2C bus. The provided regulator is suitable for S3C6410
+ and S5PC1XX chips to control VCC_CORE and VCC_USIM voltages.
+
config REGULATOR_TWL4030
bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC"
depends on TWL4030_CORE
@@ -201,5 +209,31 @@ config REGULATOR_88PM8607
help
This driver supports 88PM8607 voltage regulator chips.
+config REGULATOR_ISL6271A
+ tristate "Intersil ISL6271A Power regulator"
+ depends on I2C
+ help
+ This driver supports ISL6271A voltage regulator chip.
+
+config REGULATOR_AD5398
+ tristate "Analog Devices AD5398/AD5821 regulators"
+ depends on I2C
+ help
+ This driver supports AD5398 and AD5821 current regulator chips.
+ If building into module, its name is ad5398.ko.
+
+config REGULATOR_AB8500
+ bool "ST-Ericsson AB8500 Power Regulators"
+ depends on AB8500_CORE
+ help
+ This driver supports the regulators found on the ST-Ericsson mixed
+ signal AB8500 PMIC
+
+config REGULATOR_TPS6586X
+ tristate "TI TPS6586X Power regulators"
+ depends on MFD_TPS6586X
+ help
+ This driver supports TPS6586X voltage regulator chips.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 4e7feece22d..8285fd832e1 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
+obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
obj-$(CONFIG_REGULATOR_DUMMY) += dummy.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
@@ -16,12 +17,14 @@ obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
+obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
+obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
@@ -31,5 +34,7 @@ obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
+obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
+obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 11790990277..b349266a43d 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -634,12 +634,9 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
"%s: failed to register regulator %s err %d\n",
__func__, ab3100_regulator_desc[i].name,
err);
- i--;
/* remove the already registered regulators */
- while (i > 0) {
+ while (--i >= 0)
regulator_unregister(ab3100_regulators[i].rdev);
- i--;
- }
return err;
}
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
new file mode 100644
index 00000000000..28c7ae67cec
--- /dev/null
+++ b/drivers/regulator/ab8500.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ *
+ * AB8500 peripheral regulators
+ *
+ * AB8500 supports the following regulators,
+ * LDOs - VAUDIO, VANAMIC2/2, VDIGMIC, VINTCORE12, VTVOUT,
+ * VAUX1/2/3, VANA
+ *
+ * for DB8500 cut 1.0 and previous versions of the silicon, all accesses
+ * to registers are through the DB8500 SPI. In cut 1.1 onwards, these
+ * accesses are through the DB8500 PRCMU I2C
+ *
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/ab8500.h>
+
+/**
+ * struct ab8500_regulator_info - ab8500 regulator information
+ * @desc: regulator description
+ * @ab8500: ab8500 parent
+ * @regulator_dev: regulator device
+ * @max_uV: maximum voltage (for variable voltage supplies)
+ * @min_uV: minimum voltage (for variable voltage supplies)
+ * @fixed_uV: typical voltage (for fixed voltage supplies)
+ * @update_reg: register to control on/off
+ * @mask: mask to enable/disable regulator
+ * @enable: bits to enable the regulator in normal(high power) mode
+ * @voltage_reg: register to control regulator voltage
+ * @voltage_mask: mask to control regulator voltage
+ * @supported_voltages: supported voltage table
+ * @voltages_len: number of supported voltages for the regulator
+ */
+struct ab8500_regulator_info {
+ struct device *dev;
+ struct regulator_desc desc;
+ struct ab8500 *ab8500;
+ struct regulator_dev *regulator;
+ int max_uV;
+ int min_uV;
+ int fixed_uV;
+ int update_reg;
+ int mask;
+ int enable;
+ int voltage_reg;
+ int voltage_mask;
+ int const *supported_voltages;
+ int voltages_len;
+};
+
+/* voltage tables for the vauxn/vintcore supplies */
+static const int ldo_vauxn_voltages[] = {
+ 1100000,
+ 1200000,
+ 1300000,
+ 1400000,
+ 1500000,
+ 1800000,
+ 1850000,
+ 1900000,
+ 2500000,
+ 2650000,
+ 2700000,
+ 2750000,
+ 2800000,
+ 2900000,
+ 3000000,
+ 3300000,
+};
+
+static const int ldo_vintcore_voltages[] = {
+ 1200000,
+ 1225000,
+ 1250000,
+ 1275000,
+ 1300000,
+ 1325000,
+ 1350000,
+};
+
+static int ab8500_regulator_enable(struct regulator_dev *rdev)
+{
+ int regulator_id, ret;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ regulator_id = rdev_get_id(rdev);
+ if (regulator_id >= AB8500_NUM_REGULATORS)
+ return -EINVAL;
+
+ ret = ab8500_set_bits(info->ab8500, info->update_reg,
+ info->mask, info->enable);
+ if (ret < 0)
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set enable bits for regulator\n");
+ return ret;
+}
+
+static int ab8500_regulator_disable(struct regulator_dev *rdev)
+{
+ int regulator_id, ret;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ regulator_id = rdev_get_id(rdev);
+ if (regulator_id >= AB8500_NUM_REGULATORS)
+ return -EINVAL;
+
+ ret = ab8500_set_bits(info->ab8500, info->update_reg,
+ info->mask, 0x0);
+ if (ret < 0)
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set disable bits for regulator\n");
+ return ret;
+}
+
+static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ int regulator_id, ret;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ regulator_id = rdev_get_id(rdev);
+ if (regulator_id >= AB8500_NUM_REGULATORS)
+ return -EINVAL;
+
+ ret = ab8500_read(info->ab8500, info->update_reg);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't read 0x%x register\n", info->update_reg);
+ return ret;
+ }
+
+ if (ret & info->mask)
+ return true;
+ else
+ return false;
+}
+
+static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+ int regulator_id;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ regulator_id = rdev_get_id(rdev);
+ if (regulator_id >= AB8500_NUM_REGULATORS)
+ return -EINVAL;
+
+ /* return the uV for the fixed regulators */
+ if (info->fixed_uV)
+ return info->fixed_uV;
+
+ if (selector >= info->voltages_len)
+ return -EINVAL;
+
+ return info->supported_voltages[selector];
+}
+
+static int ab8500_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ int regulator_id, ret, val;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ regulator_id = rdev_get_id(rdev);
+ if (regulator_id >= AB8500_NUM_REGULATORS)
+ return -EINVAL;
+
+ ret = ab8500_read(info->ab8500, info->voltage_reg);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't read voltage reg for regulator\n");
+ return ret;
+ }
+
+ /* vintcore has a different layout */
+ val = ret & info->voltage_mask;
+ if (regulator_id == AB8500_LDO_INTCORE)
+ ret = info->supported_voltages[val >> 0x3];
+ else
+ ret = info->supported_voltages[val];
+
+ return ret;
+}
+
+static int ab8500_get_best_voltage_index(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ int i;
+
+ /* check the supported voltage */
+ for (i = 0; i < info->voltages_len; i++) {
+ if ((info->supported_voltages[i] >= min_uV) &&
+ (info->supported_voltages[i] <= max_uV))
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int ab8500_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int regulator_id, ret;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ regulator_id = rdev_get_id(rdev);
+ if (regulator_id >= AB8500_NUM_REGULATORS)
+ return -EINVAL;
+
+ /* get the appropriate voltages within the range */
+ ret = ab8500_get_best_voltage_index(rdev, min_uV, max_uV);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't get best voltage for regulator\n");
+ return ret;
+ }
+
+ /* set the registers for the request */
+ ret = ab8500_set_bits(info->ab8500, info->voltage_reg,
+ info->voltage_mask, ret);
+ if (ret < 0)
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set voltage reg for regulator\n");
+
+ return ret;
+}
+
+static struct regulator_ops ab8500_regulator_ops = {
+ .enable = ab8500_regulator_enable,
+ .disable = ab8500_regulator_disable,
+ .is_enabled = ab8500_regulator_is_enabled,
+ .get_voltage = ab8500_regulator_get_voltage,
+ .set_voltage = ab8500_regulator_set_voltage,
+ .list_voltage = ab8500_list_voltage,
+};
+
+static int ab8500_fixed_get_voltage(struct regulator_dev *rdev)
+{
+ int regulator_id;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ regulator_id = rdev_get_id(rdev);
+ if (regulator_id >= AB8500_NUM_REGULATORS)
+ return -EINVAL;
+
+ return info->fixed_uV;
+}
+
+static struct regulator_ops ab8500_ldo_fixed_ops = {
+ .enable = ab8500_regulator_enable,
+ .disable = ab8500_regulator_disable,
+ .is_enabled = ab8500_regulator_is_enabled,
+ .get_voltage = ab8500_fixed_get_voltage,
+ .list_voltage = ab8500_list_voltage,
+};
+
+#define AB8500_LDO(_id, min, max, reg, reg_mask, reg_enable, \
+ volt_reg, volt_mask, voltages, \
+ len_volts) \
+{ \
+ .desc = { \
+ .name = "LDO-" #_id, \
+ .ops = &ab8500_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = AB8500_LDO_##_id, \
+ .owner = THIS_MODULE, \
+ }, \
+ .min_uV = (min) * 1000, \
+ .max_uV = (max) * 1000, \
+ .update_reg = reg, \
+ .mask = reg_mask, \
+ .enable = reg_enable, \
+ .voltage_reg = volt_reg, \
+ .voltage_mask = volt_mask, \
+ .supported_voltages = voltages, \
+ .voltages_len = len_volts, \
+ .fixed_uV = 0, \
+}
+
+#define AB8500_FIXED_LDO(_id, fixed, reg, reg_mask, \
+ reg_enable) \
+{ \
+ .desc = { \
+ .name = "LDO-" #_id, \
+ .ops = &ab8500_ldo_fixed_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = AB8500_LDO_##_id, \
+ .owner = THIS_MODULE, \
+ }, \
+ .fixed_uV = fixed * 1000, \
+ .update_reg = reg, \
+ .mask = reg_mask, \
+ .enable = reg_enable, \
+}
+
+static struct ab8500_regulator_info ab8500_regulator_info[] = {
+ /*
+ * Variable Voltage LDOs
+ * name, min uV, max uV, ctrl reg, reg mask, enable mask,
+ * volt ctrl reg, volt ctrl mask, volt table, num supported volts
+ */
+ AB8500_LDO(AUX1, 1100, 3300, 0x0409, 0x3, 0x1, 0x041f, 0xf,
+ ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
+ AB8500_LDO(AUX2, 1100, 3300, 0x0409, 0xc, 0x4, 0x0420, 0xf,
+ ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
+ AB8500_LDO(AUX3, 1100, 3300, 0x040a, 0x3, 0x1, 0x0421, 0xf,
+ ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
+ AB8500_LDO(INTCORE, 1100, 3300, 0x0380, 0x4, 0x4, 0x0380, 0x38,
+ ldo_vintcore_voltages, ARRAY_SIZE(ldo_vintcore_voltages)),
+
+ /*
+ * Fixed Voltage LDOs
+ * name, o/p uV, ctrl reg, enable, disable
+ */
+ AB8500_FIXED_LDO(TVOUT, 2000, 0x0380, 0x2, 0x2),
+ AB8500_FIXED_LDO(AUDIO, 2000, 0x0383, 0x2, 0x2),
+ AB8500_FIXED_LDO(ANAMIC1, 2050, 0x0383, 0x4, 0x4),
+ AB8500_FIXED_LDO(ANAMIC2, 2050, 0x0383, 0x8, 0x8),
+ AB8500_FIXED_LDO(DMIC, 1800, 0x0383, 0x10, 0x10),
+ AB8500_FIXED_LDO(ANA, 1200, 0x0383, 0xc, 0x4),
+};
+
+static inline struct ab8500_regulator_info *find_regulator_info(int id)
+{
+ struct ab8500_regulator_info *info;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
+ info = &ab8500_regulator_info[i];
+ if (info->desc.id == id)
+ return info;
+ }
+ return NULL;
+}
+
+static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
+ struct ab8500_platform_data *pdata;
+ int i, err;
+
+ if (!ab8500) {
+ dev_err(&pdev->dev, "null mfd parent\n");
+ return -EINVAL;
+ }
+ pdata = dev_get_platdata(ab8500->dev);
+
+ /* register all regulators */
+ for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
+ struct ab8500_regulator_info *info = NULL;
+
+ /* assign per-regulator data */
+ info = &ab8500_regulator_info[i];
+ info->dev = &pdev->dev;
+ info->ab8500 = ab8500;
+
+ info->regulator = regulator_register(&info->desc, &pdev->dev,
+ pdata->regulator[i], info);
+ if (IS_ERR(info->regulator)) {
+ err = PTR_ERR(info->regulator);
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ info->desc.name);
+ /* when we fail, un-register all earlier regulators */
+ while (--i >= 0) {
+ info = &ab8500_regulator_info[i];
+ regulator_unregister(info->regulator);
+ }
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static __devexit int ab8500_regulator_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
+ struct ab8500_regulator_info *info = NULL;
+ info = &ab8500_regulator_info[i];
+ regulator_unregister(info->regulator);
+ }
+
+ return 0;
+}
+
+static struct platform_driver ab8500_regulator_driver = {
+ .probe = ab8500_regulator_probe,
+ .remove = __devexit_p(ab8500_regulator_remove),
+ .driver = {
+ .name = "ab8500-regulator",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_regulator_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&ab8500_regulator_driver);
+ if (ret != 0)
+ pr_err("Failed to register ab8500 regulator: %d\n", ret);
+
+ return ret;
+}
+subsys_initcall(ab8500_regulator_init);
+
+static void __exit ab8500_regulator_exit(void)
+{
+ platform_driver_unregister(&ab8500_regulator_driver);
+}
+module_exit(ab8500_regulator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
+MODULE_DESCRIPTION("Regulator Driver for ST-Ericsson AB8500 Mixed-Sig PMIC");
+MODULE_ALIAS("platform:ab8500-regulator");
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
new file mode 100644
index 00000000000..df1fb53c09d
--- /dev/null
+++ b/drivers/regulator/ad5398.c
@@ -0,0 +1,288 @@
+/*
+ * Voltage and current regulation for AD5398 and AD5821
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#define AD5398_CURRENT_EN_MASK 0x8000
+
+struct ad5398_chip_info {
+ struct i2c_client *client;
+ int min_uA;
+ int max_uA;
+ unsigned int current_level;
+ unsigned int current_mask;
+ unsigned int current_offset;
+ struct regulator_dev *rdev;
+};
+
+static int ad5398_calc_current(struct ad5398_chip_info *chip,
+ unsigned selector)
+{
+ unsigned range_uA = chip->max_uA - chip->min_uA;
+
+ return chip->min_uA + (selector * range_uA / chip->current_level);
+}
+
+static int ad5398_read_reg(struct i2c_client *client, unsigned short *data)
+{
+ unsigned short val;
+ int ret;
+
+ ret = i2c_master_recv(client, (char *)&val, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read error\n");
+ return ret;
+ }
+ *data = be16_to_cpu(val);
+
+ return ret;
+}
+
+static int ad5398_write_reg(struct i2c_client *client, const unsigned short data)
+{
+ unsigned short val;
+ int ret;
+
+ val = cpu_to_be16(data);
+ ret = i2c_master_send(client, (char *)&val, 2);
+ if (ret < 0)
+ dev_err(&client->dev, "I2C write error\n");
+
+ return ret;
+}
+
+static int ad5398_get_current_limit(struct regulator_dev *rdev)
+{
+ struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
+ struct i2c_client *client = chip->client;
+ unsigned short data;
+ int ret;
+
+ ret = ad5398_read_reg(client, &data);
+ if (ret < 0)
+ return ret;
+
+ ret = (data & chip->current_mask) >> chip->current_offset;
+
+ return ad5398_calc_current(chip, ret);
+}
+
+static int ad5398_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA)
+{
+ struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
+ struct i2c_client *client = chip->client;
+ unsigned range_uA = chip->max_uA - chip->min_uA;
+ unsigned selector;
+ unsigned short data;
+ int ret;
+
+ if (min_uA > chip->max_uA || min_uA < chip->min_uA)
+ return -EINVAL;
+ if (max_uA > chip->max_uA || max_uA < chip->min_uA)
+ return -EINVAL;
+
+ selector = ((min_uA - chip->min_uA) * chip->current_level +
+ range_uA - 1) / range_uA;
+ if (ad5398_calc_current(chip, selector) > max_uA)
+ return -EINVAL;
+
+ dev_dbg(&client->dev, "changing current %dmA\n",
+ ad5398_calc_current(chip, selector) / 1000);
+
+ /* read chip enable bit */
+ ret = ad5398_read_reg(client, &data);
+ if (ret < 0)
+ return ret;
+
+ /* prepare register data */
+ selector = (selector << chip->current_offset) & chip->current_mask;
+ data = (unsigned short)selector | (data & AD5398_CURRENT_EN_MASK);
+
+ /* write the new current value back as well as enable bit */
+ ret = ad5398_write_reg(client, data);
+
+ return ret;
+}
+
+static int ad5398_is_enabled(struct regulator_dev *rdev)
+{
+ struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
+ struct i2c_client *client = chip->client;
+ unsigned short data;
+ int ret;
+
+ ret = ad5398_read_reg(client, &data);
+ if (ret < 0)
+ return ret;
+
+ if (data & AD5398_CURRENT_EN_MASK)
+ return 1;
+ else
+ return 0;
+}
+
+static int ad5398_enable(struct regulator_dev *rdev)
+{
+ struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
+ struct i2c_client *client = chip->client;
+ unsigned short data;
+ int ret;
+
+ ret = ad5398_read_reg(client, &data);
+ if (ret < 0)
+ return ret;
+
+ if (data & AD5398_CURRENT_EN_MASK)
+ return 0;
+
+ data |= AD5398_CURRENT_EN_MASK;
+
+ ret = ad5398_write_reg(client, data);
+
+ return ret;
+}
+
+static int ad5398_disable(struct regulator_dev *rdev)
+{
+ struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
+ struct i2c_client *client = chip->client;
+ unsigned short data;
+ int ret;
+
+ ret = ad5398_read_reg(client, &data);
+ if (ret < 0)
+ return ret;
+
+ if (!(data & AD5398_CURRENT_EN_MASK))
+ return 0;
+
+ data &= ~AD5398_CURRENT_EN_MASK;
+
+ ret = ad5398_write_reg(client, data);
+
+ return ret;
+}
+
+static struct regulator_ops ad5398_ops = {
+ .get_current_limit = ad5398_get_current_limit,
+ .set_current_limit = ad5398_set_current_limit,
+ .enable = ad5398_enable,
+ .disable = ad5398_disable,
+ .is_enabled = ad5398_is_enabled,
+};
+
+static struct regulator_desc ad5398_reg = {
+ .name = "isink",
+ .id = 0,
+ .ops = &ad5398_ops,
+ .type = REGULATOR_CURRENT,
+ .owner = THIS_MODULE,
+};
+
+struct ad5398_current_data_format {
+ int current_bits;
+ int current_offset;
+ int min_uA;
+ int max_uA;
+};
+
+static const struct ad5398_current_data_format df_10_4_120 = {10, 4, 0, 120000};
+
+static const struct i2c_device_id ad5398_id[] = {
+ { "ad5398", (kernel_ulong_t)&df_10_4_120 },
+ { "ad5821", (kernel_ulong_t)&df_10_4_120 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ad5398_id);
+
+static int __devinit ad5398_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regulator_init_data *init_data = client->dev.platform_data;
+ struct ad5398_chip_info *chip;
+ const struct ad5398_current_data_format *df =
+ (struct ad5398_current_data_format *)id->driver_data;
+ int ret;
+
+ if (!init_data)
+ return -EINVAL;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->client = client;
+
+ chip->min_uA = df->min_uA;
+ chip->max_uA = df->max_uA;
+ chip->current_level = 1 << df->current_bits;
+ chip->current_offset = df->current_offset;
+ chip->current_mask = (chip->current_level - 1) << chip->current_offset;
+
+ chip->rdev = regulator_register(&ad5398_reg, &client->dev,
+ init_data, chip);
+ if (IS_ERR(chip->rdev)) {
+ ret = PTR_ERR(chip->rdev);
+ dev_err(&client->dev, "failed to register %s %s\n",
+ id->name, ad5398_reg.name);
+ goto err;
+ }
+
+ i2c_set_clientdata(client, chip);
+ dev_dbg(&client->dev, "%s regulator driver is registered.\n", id->name);
+ return 0;
+
+err:
+ kfree(chip);
+ return ret;
+}
+
+static int __devexit ad5398_remove(struct i2c_client *client)
+{
+ struct ad5398_chip_info *chip = i2c_get_clientdata(client);
+
+ regulator_unregister(chip->rdev);
+ kfree(chip);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static struct i2c_driver ad5398_driver = {
+ .probe = ad5398_probe,
+ .remove = __devexit_p(ad5398_remove),
+ .driver = {
+ .name = "ad5398",
+ },
+ .id_table = ad5398_id,
+};
+
+static int __init ad5398_init(void)
+{
+ return i2c_add_driver(&ad5398_driver);
+}
+subsys_initcall(ad5398_init);
+
+static void __exit ad5398_exit(void)
+{
+ i2c_del_driver(&ad5398_driver);
+}
+module_exit(ad5398_exit);
+
+MODULE_DESCRIPTION("AD5398 and AD5821 current regulator driver");
+MODULE_AUTHOR("Sonic Zhang");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("i2c:ad5398-regulator");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 2248087b9be..cc8b337b911 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -700,7 +700,7 @@ static void print_constraints(struct regulator_dev *rdev)
constraints->min_uA != constraints->max_uA) {
ret = _regulator_get_current_limit(rdev);
if (ret > 0)
- count += sprintf(buf + count, "at %d uA ", ret / 1000);
+ count += sprintf(buf + count, "at %d mA ", ret / 1000);
}
if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
@@ -1025,7 +1025,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
if (regulator->dev_attr.attr.name == NULL)
goto attr_name_err;
- regulator->dev_attr.attr.owner = THIS_MODULE;
regulator->dev_attr.attr.mode = 0444;
regulator->dev_attr.show = device_requested_uA_show;
err = device_create_file(dev, &regulator->dev_attr);
@@ -2303,8 +2302,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
dev_set_name(&rdev->dev, "regulator.%d",
atomic_inc_return(&regulator_no) - 1);
ret = device_register(&rdev->dev);
- if (ret != 0)
+ if (ret != 0) {
+ put_device(&rdev->dev);
goto clean;
+ }
dev_set_drvdata(&rdev->dev, rdev);
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
new file mode 100644
index 00000000000..d61ecb885a8
--- /dev/null
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -0,0 +1,236 @@
+/*
+ * isl6271a-regulator.c
+ *
+ * Support for Intersil ISL6271A voltage regulator
+ *
+ * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#define ISL6271A_VOLTAGE_MIN 850000
+#define ISL6271A_VOLTAGE_MAX 1600000
+#define ISL6271A_VOLTAGE_STEP 50000
+
+/* PMIC details */
+struct isl_pmic {
+ struct i2c_client *client;
+ struct regulator_dev *rdev[3];
+ struct mutex mtx;
+};
+
+static int isl6271a_get_voltage(struct regulator_dev *dev)
+{
+ struct isl_pmic *pmic = rdev_get_drvdata(dev);
+ int idx, data;
+
+ mutex_lock(&pmic->mtx);
+
+ idx = i2c_smbus_read_byte(pmic->client);
+ if (idx < 0) {
+ dev_err(&pmic->client->dev, "Error getting voltage\n");
+ data = idx;
+ goto out;
+ }
+
+ /* Convert the data from chip to microvolts */
+ data = ISL6271A_VOLTAGE_MIN + (ISL6271A_VOLTAGE_STEP * (idx & 0xf));
+
+out:
+ mutex_unlock(&pmic->mtx);
+ return data;
+}
+
+static int isl6271a_set_voltage(struct regulator_dev *dev, int minuV, int maxuV)
+{
+ struct isl_pmic *pmic = rdev_get_drvdata(dev);
+ int vsel, err, data;
+
+ if (minuV < ISL6271A_VOLTAGE_MIN || minuV > ISL6271A_VOLTAGE_MAX)
+ return -EINVAL;
+ if (maxuV < ISL6271A_VOLTAGE_MIN || maxuV > ISL6271A_VOLTAGE_MAX)
+ return -EINVAL;
+
+ /* Align to 50000 mV */
+ vsel = minuV - (minuV % ISL6271A_VOLTAGE_STEP);
+
+ /* If the result fell out of [minuV,maxuV] range, put it back */
+ if (vsel < minuV)
+ vsel += ISL6271A_VOLTAGE_STEP;
+
+ /* Convert the microvolts to data for the chip */
+ data = (vsel - ISL6271A_VOLTAGE_MIN) / ISL6271A_VOLTAGE_STEP;
+
+ mutex_lock(&pmic->mtx);
+
+ err = i2c_smbus_write_byte(pmic->client, data);
+ if (err < 0)
+ dev_err(&pmic->client->dev, "Error setting voltage\n");
+
+ mutex_unlock(&pmic->mtx);
+ return err;
+}
+
+static int isl6271a_list_voltage(struct regulator_dev *dev, unsigned selector)
+{
+ return ISL6271A_VOLTAGE_MIN + (ISL6271A_VOLTAGE_STEP * selector);
+}
+
+static struct regulator_ops isl_core_ops = {
+ .get_voltage = isl6271a_get_voltage,
+ .set_voltage = isl6271a_set_voltage,
+ .list_voltage = isl6271a_list_voltage,
+};
+
+static int isl6271a_get_fixed_voltage(struct regulator_dev *dev)
+{
+ int id = rdev_get_id(dev);
+ return (id == 1) ? 1100000 : 1300000;
+}
+
+static int isl6271a_list_fixed_voltage(struct regulator_dev *dev, unsigned selector)
+{
+ int id = rdev_get_id(dev);
+ return (id == 1) ? 1100000 : 1300000;
+}
+
+static struct regulator_ops isl_fixed_ops = {
+ .get_voltage = isl6271a_get_fixed_voltage,
+ .list_voltage = isl6271a_list_fixed_voltage,
+};
+
+static struct regulator_desc isl_rd[] = {
+ {
+ .name = "Core Buck",
+ .id = 0,
+ .n_voltages = 16,
+ .ops = &isl_core_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO1",
+ .id = 1,
+ .n_voltages = 1,
+ .ops = &isl_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO2",
+ .id = 2,
+ .n_voltages = 1,
+ .ops = &isl_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __devinit isl6271a_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regulator_init_data *init_data = i2c->dev.platform_data;
+ struct isl_pmic *pmic;
+ int err, i;
+
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ if (!init_data) {
+ dev_err(&i2c->dev, "no platform data supplied\n");
+ return -EIO;
+ }
+
+ pmic = kzalloc(sizeof(struct isl_pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ pmic->client = i2c;
+
+ mutex_init(&pmic->mtx);
+
+ for (i = 0; i < 3; i++) {
+ pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
+ init_data, pmic);
+ if (IS_ERR(pmic->rdev[i])) {
+ dev_err(&i2c->dev, "failed to register %s\n", id->name);
+ err = PTR_ERR(pmic->rdev);
+ goto error;
+ }
+ }
+
+ i2c_set_clientdata(i2c, pmic);
+
+ return 0;
+
+error:
+ while (--i >= 0)
+ regulator_unregister(pmic->rdev[i]);
+
+ kfree(pmic);
+ return err;
+}
+
+static int __devexit isl6271a_remove(struct i2c_client *i2c)
+{
+ struct isl_pmic *pmic = i2c_get_clientdata(i2c);
+ int i;
+
+ i2c_set_clientdata(i2c, NULL);
+
+ for (i = 0; i < 3; i++)
+ regulator_unregister(pmic->rdev[i]);
+
+ kfree(pmic);
+
+ return 0;
+}
+
+static const struct i2c_device_id isl6271a_id[] = {
+ {.name = "isl6271a", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, isl6271a_id);
+
+static struct i2c_driver isl6271a_i2c_driver = {
+ .driver = {
+ .name = "isl6271a",
+ .owner = THIS_MODULE,
+ },
+ .probe = isl6271a_probe,
+ .remove = __devexit_p(isl6271a_remove),
+ .id_table = isl6271a_id,
+};
+
+static int __init isl6271a_init(void)
+{
+ return i2c_add_driver(&isl6271a_i2c_driver);
+}
+
+static void __exit isl6271a_cleanup(void)
+{
+ i2c_del_driver(&isl6271a_i2c_driver);
+}
+
+subsys_initcall(isl6271a_init);
+module_exit(isl6271a_cleanup);
+
+MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("Intersil ISL6271A voltage regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 8ae3732eb24..3bb82b624e1 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -377,7 +377,7 @@ static int lp3971_i2c_read(struct i2c_client *i2c, char reg, int count,
if (count != 1)
return -EIO;
ret = i2c_smbus_read_byte_data(i2c, reg);
- if (ret < 0 || count != 1)
+ if (ret < 0)
return -EIO;
*dest = ret;
@@ -387,15 +387,9 @@ static int lp3971_i2c_read(struct i2c_client *i2c, char reg, int count,
static int lp3971_i2c_write(struct i2c_client *i2c, char reg, int count,
const u16 *src)
{
- int ret;
-
if (count != 1)
return -EIO;
- ret = i2c_smbus_write_byte_data(i2c, reg, *src);
- if (ret >= 0)
- return 0;
-
- return ret;
+ return i2c_smbus_write_byte_data(i2c, reg, *src);
}
static u8 lp3971_reg_read(struct lp3971 *lp3971, u8 reg)
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 2b54d9d75f1..559cfa271a4 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -121,14 +121,14 @@ static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV)
if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV)
return -EINVAL;
- if (min_uV >= 3000000)
- selector = 3;
- if (min_uV < 3000000)
- selector = 2;
- if (min_uV < 2500000)
- selector = 1;
if (min_uV < 1800000)
selector = 0;
+ else if (min_uV < 2500000)
+ selector = 1;
+ else if (min_uV < 3000000)
+ selector = 2;
+ else if (min_uV >= 3000000)
+ selector = 3;
if (max1586_v6_calc_voltage(selector) > max_uV)
return -EINVAL;
@@ -223,7 +223,7 @@ static int __devinit max1586_pmic_probe(struct i2c_client *client,
}
}
- i2c_set_clientdata(client, rdev);
+ i2c_set_clientdata(client, max1586);
dev_info(&client->dev, "Maxim 1586 regulator driver loaded\n");
return 0;
@@ -238,13 +238,13 @@ out:
static int __devexit max1586_pmic_remove(struct i2c_client *client)
{
- struct regulator_dev **rdev = i2c_get_clientdata(client);
+ struct max1586_data *max1586 = i2c_get_clientdata(client);
int i;
for (i = 0; i <= MAX1586_V6; i++)
- if (rdev[i])
- regulator_unregister(rdev[i]);
- kfree(rdev);
+ if (max1586->rdev[i])
+ regulator_unregister(max1586->rdev[i]);
+ kfree(max1586);
return 0;
}
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 4520ace3f7e..6b60a9c0366 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -330,7 +330,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
/* set external clock frequency */
info->extclk_freq = pdata->extclk_freq;
max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
- info->extclk_freq);
+ info->extclk_freq << 6);
}
if (pdata->ramp_timing) {
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index d97220efae5..c570e6eb0db 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -450,7 +450,7 @@ static int __devinit max8660_probe(struct i2c_client *client,
}
}
- i2c_set_clientdata(client, rdev);
+ i2c_set_clientdata(client, max8660);
dev_info(&client->dev, "Maxim 8660/8661 regulator driver loaded\n");
return 0;
@@ -465,13 +465,13 @@ out:
static int __devexit max8660_remove(struct i2c_client *client)
{
- struct regulator_dev **rdev = i2c_get_clientdata(client);
+ struct max8660 *max8660 = i2c_get_clientdata(client);
int i;
for (i = 0; i < MAX8660_V_END; i++)
- if (rdev[i])
- regulator_unregister(rdev[i]);
- kfree(rdev);
+ if (max8660->rdev[i])
+ regulator_unregister(max8660->rdev[i]);
+ kfree(max8660);
return 0;
}
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
new file mode 100644
index 00000000000..a1baf1fbe00
--- /dev/null
+++ b/drivers/regulator/max8998.c
@@ -0,0 +1,637 @@
+/*
+ * max8998.c - Voltage regulator driver for the Maxim 8998
+ *
+ * Copyright (C) 2009-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/mfd/max8998.h>
+#include <linux/mfd/max8998-private.h>
+
+struct max8998_data {
+ struct device *dev;
+ struct max8998_dev *iodev;
+ int num_regulators;
+ struct regulator_dev **rdev;
+};
+
+struct voltage_map_desc {
+ int min;
+ int max;
+ int step;
+};
+
+/* Voltage maps */
+static const struct voltage_map_desc ldo23_voltage_map_desc = {
+ .min = 800, .step = 50, .max = 1300,
+};
+static const struct voltage_map_desc ldo456711_voltage_map_desc = {
+ .min = 1600, .step = 100, .max = 3600,
+};
+static const struct voltage_map_desc ldo8_voltage_map_desc = {
+ .min = 3000, .step = 100, .max = 3600,
+};
+static const struct voltage_map_desc ldo9_voltage_map_desc = {
+ .min = 2800, .step = 100, .max = 3100,
+};
+static const struct voltage_map_desc ldo10_voltage_map_desc = {
+ .min = 950, .step = 50, .max = 1300,
+};
+static const struct voltage_map_desc ldo1213_voltage_map_desc = {
+ .min = 800, .step = 100, .max = 3300,
+};
+static const struct voltage_map_desc ldo1415_voltage_map_desc = {
+ .min = 1200, .step = 100, .max = 3300,
+};
+static const struct voltage_map_desc ldo1617_voltage_map_desc = {
+ .min = 1600, .step = 100, .max = 3600,
+};
+static const struct voltage_map_desc buck12_voltage_map_desc = {
+ .min = 750, .step = 25, .max = 1525,
+};
+static const struct voltage_map_desc buck3_voltage_map_desc = {
+ .min = 1600, .step = 100, .max = 3600,
+};
+static const struct voltage_map_desc buck4_voltage_map_desc = {
+ .min = 800, .step = 100, .max = 2300,
+};
+
+static const struct voltage_map_desc *ldo_voltage_map[] = {
+ NULL,
+ NULL,
+ &ldo23_voltage_map_desc, /* LDO2 */
+ &ldo23_voltage_map_desc, /* LDO3 */
+ &ldo456711_voltage_map_desc, /* LDO4 */
+ &ldo456711_voltage_map_desc, /* LDO5 */
+ &ldo456711_voltage_map_desc, /* LDO6 */
+ &ldo456711_voltage_map_desc, /* LDO7 */
+ &ldo8_voltage_map_desc, /* LDO8 */
+ &ldo9_voltage_map_desc, /* LDO9 */
+ &ldo10_voltage_map_desc, /* LDO10 */
+ &ldo456711_voltage_map_desc, /* LDO11 */
+ &ldo1213_voltage_map_desc, /* LDO12 */
+ &ldo1213_voltage_map_desc, /* LDO13 */
+ &ldo1415_voltage_map_desc, /* LDO14 */
+ &ldo1415_voltage_map_desc, /* LDO15 */
+ &ldo1617_voltage_map_desc, /* LDO16 */
+ &ldo1617_voltage_map_desc, /* LDO17 */
+ &buck12_voltage_map_desc, /* BUCK1 */
+ &buck12_voltage_map_desc, /* BUCK2 */
+ &buck3_voltage_map_desc, /* BUCK3 */
+ &buck4_voltage_map_desc, /* BUCK4 */
+};
+
+static inline int max8998_get_ldo(struct regulator_dev *rdev)
+{
+ return rdev_get_id(rdev);
+}
+
+static int max8998_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ const struct voltage_map_desc *desc;
+ int ldo = max8998_get_ldo(rdev);
+ int val;
+
+ if (ldo >= ARRAY_SIZE(ldo_voltage_map))
+ return -EINVAL;
+
+ desc = ldo_voltage_map[ldo];
+ if (desc == NULL)
+ return -EINVAL;
+
+ val = desc->min + desc->step * selector;
+ if (val > desc->max)
+ return -EINVAL;
+
+ return val * 1000;
+}
+
+static int max8998_get_enable_register(struct regulator_dev *rdev,
+ int *reg, int *shift)
+{
+ int ldo = max8998_get_ldo(rdev);
+
+ switch (ldo) {
+ case MAX8998_LDO2 ... MAX8998_LDO5:
+ *reg = MAX8998_REG_ONOFF1;
+ *shift = 3 - (ldo - MAX8998_LDO2);
+ break;
+ case MAX8998_LDO6 ... MAX8998_LDO13:
+ *reg = MAX8998_REG_ONOFF2;
+ *shift = 7 - (ldo - MAX8998_LDO6);
+ break;
+ case MAX8998_LDO14 ... MAX8998_LDO17:
+ *reg = MAX8998_REG_ONOFF3;
+ *shift = 7 - (ldo - MAX8998_LDO14);
+ break;
+ case MAX8998_BUCK1 ... MAX8998_BUCK4:
+ *reg = MAX8998_REG_ONOFF1;
+ *shift = 7 - (ldo - MAX8998_BUCK1);
+ break;
+ case MAX8998_EN32KHZ_AP ... MAX8998_ENVICHG:
+ *reg = MAX8998_REG_ONOFF4;
+ *shift = 7 - (ldo - MAX8998_EN32KHZ_AP);
+ break;
+ case MAX8998_ESAFEOUT1 ... MAX8998_ESAFEOUT2:
+ *reg = MAX8998_REG_CHGR2;
+ *shift = 7 - (ldo - MAX8998_ESAFEOUT1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int max8998_ldo_is_enabled(struct regulator_dev *rdev)
+{
+ struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+ int ret, reg, shift = 8;
+ u8 val;
+
+ ret = max8998_get_enable_register(rdev, &reg, &shift);
+ if (ret)
+ return ret;
+
+ ret = max8998_read_reg(max8998->iodev, reg, &val);
+ if (ret)
+ return ret;
+
+ return val & (1 << shift);
+}
+
+static int max8998_ldo_enable(struct regulator_dev *rdev)
+{
+ struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+ int reg, shift = 8, ret;
+
+ ret = max8998_get_enable_register(rdev, &reg, &shift);
+ if (ret)
+ return ret;
+
+ return max8998_update_reg(max8998->iodev, reg, 1<<shift, 1<<shift);
+}
+
+static int max8998_ldo_disable(struct regulator_dev *rdev)
+{
+ struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+ int reg, shift = 8, ret;
+
+ ret = max8998_get_enable_register(rdev, &reg, &shift);
+ if (ret)
+ return ret;
+
+ return max8998_update_reg(max8998->iodev, reg, 0, 1<<shift);
+}
+
+static int max8998_get_voltage_register(struct regulator_dev *rdev,
+ int *_reg, int *_shift, int *_mask)
+{
+ int ldo = max8998_get_ldo(rdev);
+ int reg, shift = 0, mask = 0xff;
+
+ switch (ldo) {
+ case MAX8998_LDO2 ... MAX8998_LDO3:
+ reg = MAX8998_REG_LDO2_LDO3;
+ mask = 0xf;
+ if (ldo == MAX8998_LDO2)
+ shift = 4;
+ else
+ shift = 0;
+ break;
+ case MAX8998_LDO4 ... MAX8998_LDO7:
+ reg = MAX8998_REG_LDO4 + (ldo - MAX8998_LDO4);
+ break;
+ case MAX8998_LDO8 ... MAX8998_LDO9:
+ reg = MAX8998_REG_LDO8_LDO9;
+ mask = 0xf;
+ if (ldo == MAX8998_LDO8)
+ shift = 4;
+ else
+ shift = 0;
+ break;
+ case MAX8998_LDO10 ... MAX8998_LDO11:
+ reg = MAX8998_REG_LDO10_LDO11;
+ if (ldo == MAX8998_LDO10) {
+ shift = 5;
+ mask = 0x7;
+ } else {
+ shift = 0;
+ mask = 0x1f;
+ }
+ break;
+ case MAX8998_LDO12 ... MAX8998_LDO17:
+ reg = MAX8998_REG_LDO12 + (ldo - MAX8998_LDO12);
+ break;
+ case MAX8998_BUCK1:
+ reg = MAX8998_REG_BUCK1_DVSARM1;
+ break;
+ case MAX8998_BUCK2:
+ reg = MAX8998_REG_BUCK2_DVSINT1;
+ break;
+ case MAX8998_BUCK3:
+ reg = MAX8998_REG_BUCK3;
+ break;
+ case MAX8998_BUCK4:
+ reg = MAX8998_REG_BUCK4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *_reg = reg;
+ *_shift = shift;
+ *_mask = mask;
+
+ return 0;
+}
+
+static int max8998_get_voltage(struct regulator_dev *rdev)
+{
+ struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+ int reg, shift = 0, mask, ret;
+ u8 val;
+
+ ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
+ if (ret)
+ return ret;
+
+ ret = max8998_read_reg(max8998->iodev, reg, &val);
+ if (ret)
+ return ret;
+
+ val >>= shift;
+ val &= mask;
+
+ return max8998_list_voltage(rdev, val);
+}
+
+static int max8998_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+ int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+ int previous_vol = 0;
+ const struct voltage_map_desc *desc;
+ int ldo = max8998_get_ldo(rdev);
+ int reg, shift = 0, mask, ret;
+ int i = 0;
+ u8 val;
+ bool en_ramp = false;
+
+ if (ldo >= ARRAY_SIZE(ldo_voltage_map))
+ return -EINVAL;
+
+ desc = ldo_voltage_map[ldo];
+ if (desc == NULL)
+ return -EINVAL;
+
+ if (max_vol < desc->min || min_vol > desc->max)
+ return -EINVAL;
+
+ while (desc->min + desc->step*i < min_vol &&
+ desc->min + desc->step*i < desc->max)
+ i++;
+
+ if (desc->min + desc->step*i > max_vol)
+ return -EINVAL;
+
+ ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
+ if (ret)
+ return ret;
+
+ /* wait for RAMP_UP_DELAY if rdev is BUCK1/2 and
+ * ENRAMP is ON */
+ if (ldo == MAX8998_BUCK1 || ldo == MAX8998_BUCK2) {
+ max8998_read_reg(max8998->iodev, MAX8998_REG_ONOFF4, &val);
+ if (val & (1 << 4)) {
+ en_ramp = true;
+ previous_vol = max8998_get_voltage(rdev);
+ }
+ }
+
+ ret = max8998_update_reg(max8998->iodev, reg, i<<shift, mask<<shift);
+
+ if (en_ramp == true) {
+ int difference = desc->min + desc->step*i - previous_vol/1000;
+ if (difference > 0)
+ udelay(difference / ((val & 0x0f) + 1));
+ }
+
+ return ret;
+}
+
+static struct regulator_ops max8998_ldo_ops = {
+ .list_voltage = max8998_list_voltage,
+ .is_enabled = max8998_ldo_is_enabled,
+ .enable = max8998_ldo_enable,
+ .disable = max8998_ldo_disable,
+ .get_voltage = max8998_get_voltage,
+ .set_voltage = max8998_set_voltage,
+ .set_suspend_enable = max8998_ldo_enable,
+ .set_suspend_disable = max8998_ldo_disable,
+};
+
+static struct regulator_ops max8998_buck_ops = {
+ .list_voltage = max8998_list_voltage,
+ .is_enabled = max8998_ldo_is_enabled,
+ .enable = max8998_ldo_enable,
+ .disable = max8998_ldo_disable,
+ .get_voltage = max8998_get_voltage,
+ .set_voltage = max8998_set_voltage,
+ .set_suspend_enable = max8998_ldo_enable,
+ .set_suspend_disable = max8998_ldo_disable,
+};
+
+static struct regulator_ops max8998_others_ops = {
+ .is_enabled = max8998_ldo_is_enabled,
+ .enable = max8998_ldo_enable,
+ .disable = max8998_ldo_disable,
+ .set_suspend_enable = max8998_ldo_enable,
+ .set_suspend_disable = max8998_ldo_disable,
+};
+
+static struct regulator_desc regulators[] = {
+ {
+ .name = "LDO2",
+ .id = MAX8998_LDO2,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO3",
+ .id = MAX8998_LDO3,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO4",
+ .id = MAX8998_LDO4,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO5",
+ .id = MAX8998_LDO5,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO6",
+ .id = MAX8998_LDO6,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO7",
+ .id = MAX8998_LDO7,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO8",
+ .id = MAX8998_LDO8,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO9",
+ .id = MAX8998_LDO9,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO10",
+ .id = MAX8998_LDO10,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO11",
+ .id = MAX8998_LDO11,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO12",
+ .id = MAX8998_LDO12,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO13",
+ .id = MAX8998_LDO13,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO14",
+ .id = MAX8998_LDO14,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO15",
+ .id = MAX8998_LDO15,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO16",
+ .id = MAX8998_LDO16,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO17",
+ .id = MAX8998_LDO17,
+ .ops = &max8998_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "BUCK1",
+ .id = MAX8998_BUCK1,
+ .ops = &max8998_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "BUCK2",
+ .id = MAX8998_BUCK2,
+ .ops = &max8998_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "BUCK3",
+ .id = MAX8998_BUCK3,
+ .ops = &max8998_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "BUCK4",
+ .id = MAX8998_BUCK4,
+ .ops = &max8998_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "EN32KHz AP",
+ .id = MAX8998_EN32KHZ_AP,
+ .ops = &max8998_others_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "EN32KHz CP",
+ .id = MAX8998_EN32KHZ_CP,
+ .ops = &max8998_others_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "ENVICHG",
+ .id = MAX8998_ENVICHG,
+ .ops = &max8998_others_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "ESAFEOUT1",
+ .id = MAX8998_ESAFEOUT1,
+ .ops = &max8998_others_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "ESAFEOUT2",
+ .id = MAX8998_ESAFEOUT2,
+ .ops = &max8998_others_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }
+};
+
+static __devinit int max8998_pmic_probe(struct platform_device *pdev)
+{
+ struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct regulator_dev **rdev;
+ struct max8998_data *max8998;
+ int i, ret, size;
+
+ if (!pdata) {
+ dev_err(pdev->dev.parent, "No platform init data supplied\n");
+ return -ENODEV;
+ }
+
+ max8998 = kzalloc(sizeof(struct max8998_data), GFP_KERNEL);
+ if (!max8998)
+ return -ENOMEM;
+
+ size = sizeof(struct regulator_dev *) * pdata->num_regulators;
+ max8998->rdev = kzalloc(size, GFP_KERNEL);
+ if (!max8998->rdev) {
+ kfree(max8998);
+ return -ENOMEM;
+ }
+
+ rdev = max8998->rdev;
+ max8998->dev = &pdev->dev;
+ max8998->iodev = iodev;
+ max8998->num_regulators = pdata->num_regulators;
+ platform_set_drvdata(pdev, max8998);
+
+ for (i = 0; i < pdata->num_regulators; i++) {
+ const struct voltage_map_desc *desc;
+ int id = pdata->regulators[i].id;
+ int index = id - MAX8998_LDO2;
+
+ desc = ldo_voltage_map[id];
+ if (desc && regulators[index].ops != &max8998_others_ops) {
+ int count = (desc->max - desc->min) / desc->step + 1;
+ regulators[index].n_voltages = count;
+ }
+ rdev[i] = regulator_register(&regulators[index], max8998->dev,
+ pdata->regulators[i].initdata, max8998);
+ if (IS_ERR(rdev[i])) {
+ ret = PTR_ERR(rdev[i]);
+ dev_err(max8998->dev, "regulator init failed\n");
+ rdev[i] = NULL;
+ goto err;
+ }
+ }
+
+
+ return 0;
+err:
+ for (i = 0; i < max8998->num_regulators; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+
+ kfree(max8998->rdev);
+ kfree(max8998);
+
+ return ret;
+}
+
+static int __devexit max8998_pmic_remove(struct platform_device *pdev)
+{
+ struct max8998_data *max8998 = platform_get_drvdata(pdev);
+ struct regulator_dev **rdev = max8998->rdev;
+ int i;
+
+ for (i = 0; i < max8998->num_regulators; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+
+ kfree(max8998->rdev);
+ kfree(max8998);
+
+ return 0;
+}
+
+static struct platform_driver max8998_pmic_driver = {
+ .driver = {
+ .name = "max8998-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8998_pmic_probe,
+ .remove = __devexit_p(max8998_pmic_remove),
+};
+
+static int __init max8998_pmic_init(void)
+{
+ return platform_driver_register(&max8998_pmic_driver);
+}
+subsys_initcall(max8998_pmic_init);
+
+static void __exit max8998_pmic_cleanup(void)
+{
+ platform_driver_unregister(&max8998_pmic_driver);
+}
+module_exit(max8998_pmic_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 8998 voltage regulator driver");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index f50afc9f287..cd6d4fc9d74 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -585,6 +585,8 @@ static const struct tps_info tps65023_regs[] = {
static const struct i2c_device_id tps_65023_id[] = {
{.name = "tps65023",
.driver_data = (unsigned long) tps65023_regs,},
+ {.name = "tps65021",
+ .driver_data = (unsigned long) tps65023_regs,},
{ },
};
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 8152d65220f..020f5878d7f 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -614,6 +614,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
}
tps6507x_dev->pmic = tps;
+ platform_set_drvdata(pdev, tps6507x_dev);
return 0;
@@ -625,12 +626,6 @@ fail:
return error;
}
-/**
- * tps6507x_remove - TPS6507x driver i2c remove handler
- * @client: i2c driver client device structure
- *
- * Unregister TPS driver as an i2c client device driver
- */
static int __devexit tps6507x_pmic_remove(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
new file mode 100644
index 00000000000..51237fbb1bb
--- /dev/null
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -0,0 +1,396 @@
+/*
+ * Regulator driver for TI TPS6586x
+ *
+ * Copyright (C) 2010 Compulab Ltd.
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Based on da903x
+ * Copyright (C) 2006-2008 Marvell International Ltd.
+ * Copyright (C) 2008 Compulab Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/tps6586x.h>
+
+/* supply control and voltage setting */
+#define TPS6586X_SUPPLYENA 0x10
+#define TPS6586X_SUPPLYENB 0x11
+#define TPS6586X_SUPPLYENC 0x12
+#define TPS6586X_SUPPLYEND 0x13
+#define TPS6586X_SUPPLYENE 0x14
+#define TPS6586X_VCC1 0x20
+#define TPS6586X_VCC2 0x21
+#define TPS6586X_SM1V1 0x23
+#define TPS6586X_SM1V2 0x24
+#define TPS6586X_SM1SL 0x25
+#define TPS6586X_SM0V1 0x26
+#define TPS6586X_SM0V2 0x27
+#define TPS6586X_SM0SL 0x28
+#define TPS6586X_LDO2AV1 0x29
+#define TPS6586X_LDO2AV2 0x2A
+#define TPS6586X_LDO2BV1 0x2F
+#define TPS6586X_LDO2BV2 0x30
+#define TPS6586X_LDO4V1 0x32
+#define TPS6586X_LDO4V2 0x33
+
+/* converter settings */
+#define TPS6586X_SUPPLYV1 0x41
+#define TPS6586X_SUPPLYV2 0x42
+#define TPS6586X_SUPPLYV3 0x43
+#define TPS6586X_SUPPLYV4 0x44
+#define TPS6586X_SUPPLYV5 0x45
+#define TPS6586X_SUPPLYV6 0x46
+#define TPS6586X_SMODE1 0x47
+#define TPS6586X_SMODE2 0x48
+
+struct tps6586x_regulator {
+ struct regulator_desc desc;
+
+ int volt_reg;
+ int volt_shift;
+ int volt_nbits;
+ int enable_bit[2];
+ int enable_reg[2];
+
+ int *voltages;
+
+ /* for DVM regulators */
+ int go_reg;
+ int go_bit;
+};
+
+static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev)
+{
+ return rdev_get_dev(rdev)->parent->parent;
+}
+
+static int tps6586x_ldo_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct tps6586x_regulator *info = rdev_get_drvdata(rdev);
+
+ return info->voltages[selector] * 1000;
+}
+
+
+static int __tps6586x_ldo_set_voltage(struct device *parent,
+ struct tps6586x_regulator *ri,
+ int min_uV, int max_uV)
+{
+ int val, uV;
+ uint8_t mask;
+
+ for (val = 0; val < ri->desc.n_voltages; val++) {
+ uV = ri->voltages[val] * 1000;
+
+ /* LDO0 has minimal voltage 1.2 rather than 1.25 */
+ if (ri->desc.id == TPS6586X_ID_LDO_0 && val == 0)
+ uV -= 50 * 1000;
+
+ /* use the first in-range value */
+ if (min_uV <= uV && uV <= max_uV) {
+
+ val <<= ri->volt_shift;
+ mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
+
+ return tps6586x_update(parent, ri->volt_reg, val, mask);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int tps6586x_ldo_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6586x_dev(rdev);
+
+ return __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV);
+}
+
+static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
+{
+ struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6586x_dev(rdev);
+ uint8_t val, mask;
+ int ret;
+
+ ret = tps6586x_read(parent, ri->volt_reg, &val);
+ if (ret)
+ return ret;
+
+ mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
+ val = (val & mask) >> ri->volt_shift;
+
+ if (val >= ri->desc.n_voltages)
+ BUG();
+
+ return ri->voltages[val] * 1000;
+}
+
+static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6586x_dev(rdev);
+ int ret;
+
+ ret = __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV);
+ if (ret)
+ return ret;
+
+ return tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
+}
+
+static int tps6586x_regulator_enable(struct regulator_dev *rdev)
+{
+ struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6586x_dev(rdev);
+
+ return tps6586x_set_bits(parent, ri->enable_reg[0],
+ 1 << ri->enable_bit[0]);
+}
+
+static int tps6586x_regulator_disable(struct regulator_dev *rdev)
+{
+ struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6586x_dev(rdev);
+
+ return tps6586x_clr_bits(parent, ri->enable_reg[0],
+ 1 << ri->enable_bit[0]);
+}
+
+static int tps6586x_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps6586x_dev(rdev);
+ uint8_t reg_val;
+ int ret;
+
+ ret = tps6586x_read(parent, ri->enable_reg[0], &reg_val);
+ if (ret)
+ return ret;
+
+ return !!(reg_val & (1 << ri->enable_bit[0]));
+}
+
+static struct regulator_ops tps6586x_regulator_ldo_ops = {
+ .list_voltage = tps6586x_ldo_list_voltage,
+ .get_voltage = tps6586x_ldo_get_voltage,
+ .set_voltage = tps6586x_ldo_set_voltage,
+
+ .is_enabled = tps6586x_regulator_is_enabled,
+ .enable = tps6586x_regulator_enable,
+ .disable = tps6586x_regulator_disable,
+};
+
+static struct regulator_ops tps6586x_regulator_dvm_ops = {
+ .list_voltage = tps6586x_ldo_list_voltage,
+ .get_voltage = tps6586x_ldo_get_voltage,
+ .set_voltage = tps6586x_dvm_set_voltage,
+
+ .is_enabled = tps6586x_regulator_is_enabled,
+ .enable = tps6586x_regulator_enable,
+ .disable = tps6586x_regulator_disable,
+};
+
+static int tps6586x_ldo_voltages[] = {
+ 1250, 1500, 1800, 2500, 2700, 2850, 3100, 3300,
+};
+
+static int tps6586x_ldo4_voltages[] = {
+ 1700, 1725, 1750, 1775, 1800, 1825, 1850, 1875,
+ 1900, 1925, 1950, 1975, 2000, 2025, 2050, 2075,
+ 2100, 2125, 2150, 2175, 2200, 2225, 2250, 2275,
+ 2300, 2325, 2350, 2375, 2400, 2425, 2450, 2475,
+};
+
+static int tps6586x_sm2_voltages[] = {
+ 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350,
+ 3400, 3450, 3500, 3550, 3600, 3650, 3700, 3750,
+ 3800, 3850, 3900, 3950, 4000, 4050, 4100, 4150,
+ 4200, 4250, 4300, 4350, 4400, 4450, 4500, 4550,
+};
+
+static int tps6586x_dvm_voltages[] = {
+ 725, 750, 775, 800, 825, 850, 875, 900,
+ 925, 950, 975, 1000, 1025, 1050, 1075, 1100,
+ 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300,
+ 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500,
+};
+
+#define TPS6586X_REGULATOR(_id, vdata, _ops, vreg, shift, nbits, \
+ ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
+{ \
+ .desc = { \
+ .name = "REG-" #_id, \
+ .ops = &tps6586x_regulator_##_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = TPS6586X_ID_##_id, \
+ .n_voltages = ARRAY_SIZE(tps6586x_##vdata##_voltages), \
+ .owner = THIS_MODULE, \
+ }, \
+ .volt_reg = TPS6586X_##vreg, \
+ .volt_shift = (shift), \
+ .volt_nbits = (nbits), \
+ .enable_reg[0] = TPS6586X_SUPPLY##ereg0, \
+ .enable_bit[0] = (ebit0), \
+ .enable_reg[1] = TPS6586X_SUPPLY##ereg1, \
+ .enable_bit[1] = (ebit1), \
+ .voltages = tps6586x_##vdata##_voltages, \
+}
+
+#define TPS6586X_LDO(_id, vdata, vreg, shift, nbits, \
+ ereg0, ebit0, ereg1, ebit1) \
+ TPS6586X_REGULATOR(_id, vdata, ldo_ops, vreg, shift, nbits, \
+ ereg0, ebit0, ereg1, ebit1, 0, 0)
+
+#define TPS6586X_DVM(_id, vdata, vreg, shift, nbits, \
+ ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
+ TPS6586X_REGULATOR(_id, vdata, dvm_ops, vreg, shift, nbits, \
+ ereg0, ebit0, ereg1, ebit1, goreg, gobit)
+
+static struct tps6586x_regulator tps6586x_regulator[] = {
+ TPS6586X_LDO(LDO_0, ldo, SUPPLYV1, 5, 3, ENC, 0, END, 0),
+ TPS6586X_LDO(LDO_3, ldo, SUPPLYV4, 0, 3, ENC, 2, END, 2),
+ TPS6586X_LDO(LDO_5, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6),
+ TPS6586X_LDO(LDO_6, ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4),
+ TPS6586X_LDO(LDO_7, ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5),
+ TPS6586X_LDO(LDO_8, ldo, SUPPLYV1, 5, 3, ENC, 6, END, 6),
+ TPS6586X_LDO(LDO_9, ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),
+ TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, ENE, 7, ENE, 7),
+ TPS6586X_LDO(LDO_1, dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1),
+ TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 1, END, 1),
+
+ TPS6586X_DVM(LDO_2, dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6),
+ TPS6586X_DVM(LDO_4, ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6),
+ TPS6586X_DVM(SM_0, dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2),
+ TPS6586X_DVM(SM_1, dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0),
+};
+
+/*
+ * TPS6586X has 2 enable bits that are OR'ed to determine the actual
+ * regulator state. Clearing one of this bits allows switching
+ * regulator on and of with single register write.
+ */
+static inline int tps6586x_regulator_preinit(struct device *parent,
+ struct tps6586x_regulator *ri)
+{
+ uint8_t val1, val2;
+ int ret;
+
+ ret = tps6586x_read(parent, ri->enable_reg[0], &val1);
+ if (ret)
+ return ret;
+
+ ret = tps6586x_read(parent, ri->enable_reg[1], &val2);
+ if (ret)
+ return ret;
+
+ if (!(val2 & ri->enable_bit[1]))
+ return 0;
+
+ /*
+ * The regulator is on, but it's enabled with the bit we don't
+ * want to use, so we switch the enable bits
+ */
+ if (!(val1 & ri->enable_bit[0])) {
+ ret = tps6586x_set_bits(parent, ri->enable_reg[0],
+ 1 << ri->enable_bit[0]);
+ if (ret)
+ return ret;
+ }
+
+ return tps6586x_clr_bits(parent, ri->enable_reg[1],
+ 1 << ri->enable_bit[1]);
+}
+
+static inline struct tps6586x_regulator *find_regulator_info(int id)
+{
+ struct tps6586x_regulator *ri;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tps6586x_regulator); i++) {
+ ri = &tps6586x_regulator[i];
+ if (ri->desc.id == id)
+ return ri;
+ }
+ return NULL;
+}
+
+static int __devinit tps6586x_regulator_probe(struct platform_device *pdev)
+{
+ struct tps6586x_regulator *ri = NULL;
+ struct regulator_dev *rdev;
+ int id = pdev->id;
+ int err;
+
+ dev_dbg(&pdev->dev, "Probing reulator %d\n", id);
+
+ ri = find_regulator_info(id);
+ if (ri == NULL) {
+ dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ return -EINVAL;
+ }
+
+ err = tps6586x_regulator_preinit(pdev->dev.parent, ri);
+ if (err)
+ return err;
+
+ rdev = regulator_register(&ri->desc, &pdev->dev,
+ pdev->dev.platform_data, ri);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ ri->desc.name);
+ return PTR_ERR(rdev);
+ }
+
+ platform_set_drvdata(pdev, rdev);
+
+ return 0;
+}
+
+static int __devexit tps6586x_regulator_remove(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+ regulator_unregister(rdev);
+ return 0;
+}
+
+static struct platform_driver tps6586x_regulator_driver = {
+ .driver = {
+ .name = "tps6586x-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6586x_regulator_probe,
+ .remove = __devexit_p(tps6586x_regulator_remove),
+};
+
+static int __init tps6586x_regulator_init(void)
+{
+ return platform_driver_register(&tps6586x_regulator_driver);
+}
+subsys_initcall(tps6586x_regulator_init);
+
+static void __exit tps6586x_regulator_exit(void)
+{
+ platform_driver_unregister(&tps6586x_regulator_driver);
+}
+module_exit(tps6586x_regulator_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
+MODULE_DESCRIPTION("Regulator Driver for TI TPS6586X PMIC");
+MODULE_ALIAS("platform:tps6586x-regulator");
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index e686cdb61b9..9edf8f69234 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -215,8 +215,7 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
case REGULATOR_MODE_IDLE:
ret = wm831x_set_bits(wm831x, ctrl_reg,
- WM831X_LDO1_LP_MODE,
- WM831X_LDO1_LP_MODE);
+ WM831X_LDO1_LP_MODE, 0);
if (ret < 0)
return ret;
@@ -225,10 +224,12 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
WM831X_LDO1_ON_MODE);
if (ret < 0)
return ret;
+ break;
case REGULATOR_MODE_STANDBY:
ret = wm831x_set_bits(wm831x, ctrl_reg,
- WM831X_LDO1_LP_MODE, 0);
+ WM831X_LDO1_LP_MODE,
+ WM831X_LDO1_LP_MODE);
if (ret < 0)
return ret;
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 0e6ed7db936..fe4b8a8a9df 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1129,7 +1129,7 @@ static unsigned int wm8350_dcdc_get_mode(struct regulator_dev *rdev)
mode = REGULATOR_MODE_NORMAL;
} else if (!active && !sleep)
mode = REGULATOR_MODE_IDLE;
- else if (!sleep)
+ else if (sleep)
mode = REGULATOR_MODE_STANDBY;
return mode;
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 5a1dc8a24d3..03713bc66e4 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -219,8 +219,6 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
ldo->wm8994 = wm8994;
- ldo->is_enabled = true;
-
if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) {
ldo->enable = pdata->ldo[id].enable;
@@ -237,7 +235,8 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
ret);
goto err_gpio;
}
- }
+ } else
+ ldo->is_enabled = true;
ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev,
pdata->ldo[id].init_data, ldo);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 10ba12c8c5e..48ca7132cc0 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -166,6 +166,16 @@ config RTC_DRV_DS1672
This driver can also be built as a module. If so, the module
will be called rtc-ds1672.
+config RTC_DRV_DS3232
+ tristate "Dallas/Maxim DS3232"
+ depends on RTC_CLASS && I2C
+ help
+ If you say yes here you get support for Dallas Semiconductor
+ DS3232 real-time clock chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-ds3232.
+
config RTC_DRV_MAX6900
tristate "Maxim MAX6900"
help
@@ -203,6 +213,15 @@ config RTC_DRV_ISL1208
This driver can also be built as a module. If so, the module
will be called rtc-isl1208.
+config RTC_DRV_ISL12022
+ tristate "Intersil ISL12022"
+ help
+ If you say yes here you get support for the
+ Intersil ISL12022 RTC chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-isl12022.
+
config RTC_DRV_X1205
tristate "Xicor/Intersil X1205"
help
@@ -537,6 +556,16 @@ config RTC_DRV_MSM6242
This driver can also be built as a module. If so, the module
will be called rtc-msm6242.
+config RTC_DRV_IMXDI
+ tristate "Freescale IMX DryIce Real Time Clock"
+ depends on ARCH_MX25
+ depends on RTC_CLASS
+ help
+ Support for Freescale IMX DryIce RTC
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-imxdi".
+
config RTC_MXC
tristate "Freescale MXC Real Time Clock"
depends on ARCH_MXC
@@ -645,9 +674,16 @@ config RTC_DRV_OMAP
DA8xx/OMAP-L13x chips. This driver can also be built as a
module called rtc-omap.
+config HAVE_S3C_RTC
+ bool
+ help
+ This will include RTC support for Samsung SoCs. If
+ you want to include RTC support for any machine, kindly
+ select this in the respective mach-XXXX/Kconfig file.
+
config RTC_DRV_S3C
tristate "Samsung S3C series SoC RTC"
- depends on ARCH_S3C2410 || ARCH_S3C64XX
+ depends on ARCH_S3C2410 || ARCH_S3C64XX || HAVE_S3C_RTC
help
RTC (Realtime Clock) driver for the clock inbuilt into the
Samsung S3C24XX series of SoCs. This can provide periodic
@@ -774,7 +810,7 @@ config RTC_DRV_AT91SAM9_GPBR
config RTC_DRV_AU1XXX
tristate "Au1xxx Counter0 RTC support"
- depends on SOC_AU1X00
+ depends on MIPS_ALCHEMY
help
This is a driver for the Au1xxx on-chip Counter0 (Time-Of-Year
counter) to be used as a RTC.
@@ -905,4 +941,15 @@ config RTC_DRV_MPC5121
This driver can also be built as a module. If so, the module
will be called rtc-mpc5121.
+config RTC_DRV_JZ4740
+ tristate "Ingenic JZ4740 SoC"
+ depends on RTC_CLASS
+ depends on MACH_JZ4740
+ help
+ If you say yes here you get support for the Ingenic JZ4740 SoC RTC
+ controller.
+
+ This driver can also be buillt as a module. If so, the module
+ will be called rtc-jz4740.
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 5adbba7cf89..0f207b3b583 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -41,12 +41,16 @@ obj-$(CONFIG_RTC_DRV_DS1511) += rtc-ds1511.o
obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o
obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
+obj-$(CONFIG_RTC_DRV_DS3232) += rtc-ds3232.o
obj-$(CONFIG_RTC_DRV_DS3234) += rtc-ds3234.o
obj-$(CONFIG_RTC_DRV_EFI) += rtc-efi.o
obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o
obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o
+obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
+obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
+obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index d26780ea254..261a07e0fb2 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -235,6 +235,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
err = PTR_ERR(rtc);
return err;
}
+ platform_set_drvdata(pdev, rtc);
return 0;
}
@@ -244,6 +245,7 @@ static int __exit ab3100_rtc_remove(struct platform_device *pdev)
struct rtc_device *rtc = platform_get_drvdata(pdev);
rtc_device_unregister(rtc);
+ platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 72b2bcc2c22..d4fb82d85e9 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
enable_irq_wake(IRQ_RTC);
bfin_rtc_sync_pending(&pdev->dev);
} else
- bfin_rtc_int_clear(-1);
+ bfin_rtc_int_clear(0);
return 0;
}
@@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev)
{
if (device_may_wakeup(&pdev->dev))
disable_irq_wake(IRQ_RTC);
- else
- bfin_write_RTC_ISTAT(-1);
+
+ /*
+ * Since only some of the RTC bits are maintained externally in the
+ * Vbat domain, we need to wait for the RTC MMRs to be synced into
+ * the core after waking up. This happens every RTC 1HZ. Once that
+ * has happened, we can go ahead and re-enable the important write
+ * complete interrupt event.
+ */
+ while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC))
+ continue;
+ bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE);
return 0;
}
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 11b8ea29d2b..5856167a0c9 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -970,7 +970,6 @@ static inline int cmos_poweroff(struct device *dev)
#include <linux/acpi.h>
-#ifdef CONFIG_PM
static u32 rtc_handler(void *context)
{
acpi_clear_event(ACPI_EVENT_RTC);
@@ -999,11 +998,6 @@ static void rtc_wake_off(struct device *dev)
{
acpi_disable_event(ACPI_EVENT_RTC, 0);
}
-#else
-#define rtc_wake_setup() do{}while(0)
-#define rtc_wake_on NULL
-#define rtc_wake_off NULL
-#endif
/* Every ACPI platform has a mc146818 compatible "cmos rtc". Here we find
* its device node and pass extra config data. This helps its driver use
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
new file mode 100644
index 00000000000..9daed8db83d
--- /dev/null
+++ b/drivers/rtc/rtc-ds3232.c
@@ -0,0 +1,326 @@
+/*
+ * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
+ *
+ * Copyright (C) 2009-2010 Freescale Semiconductor.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+/*
+ * It would be more efficient to use i2c msgs/i2c_transfer directly but, as
+ * recommened in .../Documentation/i2c/writing-clients section
+ * "Sending and receiving", using SMBus level communication is preferred.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#define DS3232_REG_SECONDS 0x00
+#define DS3232_REG_MINUTES 0x01
+#define DS3232_REG_HOURS 0x02
+#define DS3232_REG_AMPM 0x02
+#define DS3232_REG_DAY 0x03
+#define DS3232_REG_DATE 0x04
+#define DS3232_REG_MONTH 0x05
+#define DS3232_REG_CENTURY 0x05
+#define DS3232_REG_YEAR 0x06
+#define DS3232_REG_ALARM1 0x07 /* Alarm 1 BASE */
+#define DS3232_REG_ALARM2 0x0B /* Alarm 2 BASE */
+#define DS3232_REG_CR 0x0E /* Control register */
+# define DS3232_REG_CR_nEOSC 0x80
+# define DS3232_REG_CR_INTCN 0x04
+# define DS3232_REG_CR_A2IE 0x02
+# define DS3232_REG_CR_A1IE 0x01
+
+#define DS3232_REG_SR 0x0F /* control/status register */
+# define DS3232_REG_SR_OSF 0x80
+# define DS3232_REG_SR_BSY 0x04
+# define DS3232_REG_SR_A2F 0x02
+# define DS3232_REG_SR_A1F 0x01
+
+struct ds3232 {
+ struct i2c_client *client;
+ struct rtc_device *rtc;
+ struct work_struct work;
+
+ /* The mutex protects alarm operations, and prevents a race
+ * between the enable_irq() in the workqueue and the free_irq()
+ * in the remove function.
+ */
+ struct mutex mutex;
+ int exiting;
+};
+
+static struct i2c_driver ds3232_driver;
+
+static int ds3232_check_rtc_status(struct i2c_client *client)
+{
+ int ret = 0;
+ int control, stat;
+
+ stat = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
+ if (stat < 0)
+ return stat;
+
+ if (stat & DS3232_REG_SR_OSF)
+ dev_warn(&client->dev,
+ "oscillator discontinuity flagged, "
+ "time unreliable\n");
+
+ stat &= ~(DS3232_REG_SR_OSF | DS3232_REG_SR_A1F | DS3232_REG_SR_A2F);
+
+ ret = i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
+ if (ret < 0)
+ return ret;
+
+ /* If the alarm is pending, clear it before requesting
+ * the interrupt, so an interrupt event isn't reported
+ * before everything is initialized.
+ */
+
+ control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+ if (control < 0)
+ return control;
+
+ control &= ~(DS3232_REG_CR_A1IE | DS3232_REG_CR_A2IE);
+ control |= DS3232_REG_CR_INTCN;
+
+ return i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+}
+
+static int ds3232_read_time(struct device *dev, struct rtc_time *time)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+ u8 buf[7];
+ unsigned int year, month, day, hour, minute, second;
+ unsigned int week, twelve_hr, am_pm;
+ unsigned int century, add_century = 0;
+
+ ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_SECONDS, 7, buf);
+
+ if (ret < 0)
+ return ret;
+ if (ret < 7)
+ return -EIO;
+
+ second = buf[0];
+ minute = buf[1];
+ hour = buf[2];
+ week = buf[3];
+ day = buf[4];
+ month = buf[5];
+ year = buf[6];
+
+ /* Extract additional information for AM/PM and century */
+
+ twelve_hr = hour & 0x40;
+ am_pm = hour & 0x20;
+ century = month & 0x80;
+
+ /* Write to rtc_time structure */
+
+ time->tm_sec = bcd2bin(second);
+ time->tm_min = bcd2bin(minute);
+ if (twelve_hr) {
+ /* Convert to 24 hr */
+ if (am_pm)
+ time->tm_hour = bcd2bin(hour & 0x1F) + 12;
+ else
+ time->tm_hour = bcd2bin(hour & 0x1F);
+ } else {
+ time->tm_hour = bcd2bin(hour);
+ }
+
+ time->tm_wday = bcd2bin(week);
+ time->tm_mday = bcd2bin(day);
+ time->tm_mon = bcd2bin(month & 0x7F);
+ if (century)
+ add_century = 100;
+
+ time->tm_year = bcd2bin(year) + add_century;
+
+ return rtc_valid_tm(time);
+}
+
+static int ds3232_set_time(struct device *dev, struct rtc_time *time)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 buf[7];
+
+ /* Extract time from rtc_time and load into ds3232*/
+
+ buf[0] = bin2bcd(time->tm_sec);
+ buf[1] = bin2bcd(time->tm_min);
+ buf[2] = bin2bcd(time->tm_hour);
+ buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
+ buf[4] = bin2bcd(time->tm_mday); /* Date */
+ buf[5] = bin2bcd(time->tm_mon);
+ if (time->tm_year >= 100) {
+ buf[5] |= 0x80;
+ buf[6] = bin2bcd(time->tm_year - 100);
+ } else {
+ buf[6] = bin2bcd(time->tm_year);
+ }
+
+ return i2c_smbus_write_i2c_block_data(client,
+ DS3232_REG_SECONDS, 7, buf);
+}
+
+static irqreturn_t ds3232_irq(int irq, void *dev_id)
+{
+ struct i2c_client *client = dev_id;
+ struct ds3232 *ds3232 = i2c_get_clientdata(client);
+
+ disable_irq_nosync(irq);
+ schedule_work(&ds3232->work);
+ return IRQ_HANDLED;
+}
+
+static void ds3232_work(struct work_struct *work)
+{
+ struct ds3232 *ds3232 = container_of(work, struct ds3232, work);
+ struct i2c_client *client = ds3232->client;
+ int stat, control;
+
+ mutex_lock(&ds3232->mutex);
+
+ stat = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
+ if (stat < 0)
+ goto unlock;
+
+ if (stat & DS3232_REG_SR_A1F) {
+ control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+ if (control < 0)
+ goto out;
+ /* disable alarm1 interrupt */
+ control &= ~(DS3232_REG_CR_A1IE);
+ i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+
+ /* clear the alarm pend flag */
+ stat &= ~DS3232_REG_SR_A1F;
+ i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
+
+ rtc_update_irq(ds3232->rtc, 1, RTC_AF | RTC_IRQF);
+ }
+
+out:
+ if (!ds3232->exiting)
+ enable_irq(client->irq);
+unlock:
+ mutex_unlock(&ds3232->mutex);
+}
+
+static const struct rtc_class_ops ds3232_rtc_ops = {
+ .read_time = ds3232_read_time,
+ .set_time = ds3232_set_time,
+};
+
+static int __devinit ds3232_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ds3232 *ds3232;
+ int ret;
+
+ ds3232 = kzalloc(sizeof(struct ds3232), GFP_KERNEL);
+ if (!ds3232)
+ return -ENOMEM;
+
+ ds3232->client = client;
+ i2c_set_clientdata(client, ds3232);
+
+ INIT_WORK(&ds3232->work, ds3232_work);
+ mutex_init(&ds3232->mutex);
+
+ ret = ds3232_check_rtc_status(client);
+ if (ret)
+ goto out_free;
+
+ ds3232->rtc = rtc_device_register(client->name, &client->dev,
+ &ds3232_rtc_ops, THIS_MODULE);
+ if (IS_ERR(ds3232->rtc)) {
+ ret = PTR_ERR(ds3232->rtc);
+ dev_err(&client->dev, "unable to register the class device\n");
+ goto out_irq;
+ }
+
+ if (client->irq >= 0) {
+ ret = request_irq(client->irq, ds3232_irq, 0,
+ "ds3232", client);
+ if (ret) {
+ dev_err(&client->dev, "unable to request IRQ\n");
+ goto out_free;
+ }
+ }
+
+ return 0;
+
+out_irq:
+ if (client->irq >= 0)
+ free_irq(client->irq, client);
+
+out_free:
+ i2c_set_clientdata(client, NULL);
+ kfree(ds3232);
+ return ret;
+}
+
+static int __devexit ds3232_remove(struct i2c_client *client)
+{
+ struct ds3232 *ds3232 = i2c_get_clientdata(client);
+
+ if (client->irq >= 0) {
+ mutex_lock(&ds3232->mutex);
+ ds3232->exiting = 1;
+ mutex_unlock(&ds3232->mutex);
+
+ free_irq(client->irq, client);
+ flush_scheduled_work();
+ }
+
+ rtc_device_unregister(ds3232->rtc);
+ i2c_set_clientdata(client, NULL);
+ kfree(ds3232);
+ return 0;
+}
+
+static const struct i2c_device_id ds3232_id[] = {
+ { "ds3232", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ds3232_id);
+
+static struct i2c_driver ds3232_driver = {
+ .driver = {
+ .name = "rtc-ds3232",
+ .owner = THIS_MODULE,
+ },
+ .probe = ds3232_probe,
+ .remove = __devexit_p(ds3232_remove),
+ .id_table = ds3232_id,
+};
+
+static int __init ds3232_init(void)
+{
+ return i2c_add_driver(&ds3232_driver);
+}
+
+static void __exit ds3232_exit(void)
+{
+ i2c_del_driver(&ds3232_driver);
+}
+
+module_init(ds3232_init);
+module_exit(ds3232_exit);
+
+MODULE_AUTHOR("Srikanth Srinivasan <srikanth.srinivasan@freescale.com>");
+MODULE_DESCRIPTION("Maxim/Dallas DS3232 RTC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index e4de8f37ae4..4cf2e70c507 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -52,8 +52,8 @@ struct fm3130 {
struct i2c_msg msg[4];
struct i2c_client *client;
struct rtc_device *rtc;
+ int alarm_valid;
int data_valid;
- int alarm;
};
static const struct i2c_device_id fm3130_id[] = {
{ "fm3130", 0 },
@@ -87,11 +87,7 @@ static void fm3130_rtc_mode(struct device *dev, int mode)
dev_dbg(dev, "invalid mode %d\n", mode);
break;
}
- /* Checking for alarm */
- if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_AF) {
- fm3130->alarm = 1;
- fm3130->regs[FM3130_RTC_CONTROL] &= ~FM3130_RTC_CONTROL_BIT_AF;
- }
+
i2c_smbus_write_byte_data(fm3130->client,
FM3130_RTC_CONTROL, fm3130->regs[FM3130_RTC_CONTROL]);
}
@@ -208,6 +204,17 @@ static int fm3130_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct fm3130 *fm3130 = dev_get_drvdata(dev);
int tmp;
struct rtc_time *tm = &alrm->time;
+
+ if (!fm3130->alarm_valid) {
+ /*
+ * We have invalid alarm in RTC, probably due to battery faults
+ * or other problems. Return EIO for now, it will allow us to
+ * set alarm value later instead of error during probing which
+ * disables device
+ */
+ return -EIO;
+ }
+
/* read the RTC alarm registers all at once */
tmp = i2c_transfer(to_i2c_adapter(fm3130->client->dev.parent),
&fm3130->msg[2], 2);
@@ -222,20 +229,31 @@ static int fm3130_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
fm3130->regs[FM3130_ALARM_DATE],
fm3130->regs[FM3130_ALARM_MONTHS]);
-
tm->tm_sec = bcd2bin(fm3130->regs[FM3130_ALARM_SECONDS] & 0x7F);
tm->tm_min = bcd2bin(fm3130->regs[FM3130_ALARM_MINUTES] & 0x7F);
tm->tm_hour = bcd2bin(fm3130->regs[FM3130_ALARM_HOURS] & 0x3F);
tm->tm_mday = bcd2bin(fm3130->regs[FM3130_ALARM_DATE] & 0x3F);
tm->tm_mon = bcd2bin(fm3130->regs[FM3130_ALARM_MONTHS] & 0x1F);
+
if (tm->tm_mon > 0)
tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */
+
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"read alarm", tm->tm_sec, tm->tm_min,
tm->tm_hour, tm->tm_mday,
tm->tm_mon, tm->tm_year, tm->tm_wday);
+ /* check if alarm enabled */
+ fm3130->regs[FM3130_RTC_CONTROL] =
+ i2c_smbus_read_byte_data(fm3130->client, FM3130_RTC_CONTROL);
+
+ if ((fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_AEN) &&
+ (~fm3130->regs[FM3130_RTC_CONTROL] &
+ FM3130_RTC_CONTROL_BIT_CAL)) {
+ alrm->enabled = 1;
+ }
+
return 0;
}
@@ -251,25 +269,20 @@ static int fm3130_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
tm->tm_hour, tm->tm_mday,
tm->tm_mon, tm->tm_year, tm->tm_wday);
- if (tm->tm_sec != -1)
- fm3130->regs[FM3130_ALARM_SECONDS] =
- bin2bcd(tm->tm_sec) | 0x80;
+ fm3130->regs[FM3130_ALARM_SECONDS] =
+ (tm->tm_sec != -1) ? bin2bcd(tm->tm_sec) : 0x80;
- if (tm->tm_min != -1)
- fm3130->regs[FM3130_ALARM_MINUTES] =
- bin2bcd(tm->tm_min) | 0x80;
+ fm3130->regs[FM3130_ALARM_MINUTES] =
+ (tm->tm_min != -1) ? bin2bcd(tm->tm_min) : 0x80;
- if (tm->tm_hour != -1)
- fm3130->regs[FM3130_ALARM_HOURS] =
- bin2bcd(tm->tm_hour) | 0x80;
+ fm3130->regs[FM3130_ALARM_HOURS] =
+ (tm->tm_hour != -1) ? bin2bcd(tm->tm_hour) : 0x80;
- if (tm->tm_mday != -1)
- fm3130->regs[FM3130_ALARM_DATE] =
- bin2bcd(tm->tm_mday) | 0x80;
+ fm3130->regs[FM3130_ALARM_DATE] =
+ (tm->tm_mday != -1) ? bin2bcd(tm->tm_mday) : 0x80;
- if (tm->tm_mon != -1)
- fm3130->regs[FM3130_ALARM_MONTHS] =
- bin2bcd(tm->tm_mon + 1) | 0x80;
+ fm3130->regs[FM3130_ALARM_MONTHS] =
+ (tm->tm_mon != -1) ? bin2bcd(tm->tm_mon + 1) : 0x80;
dev_dbg(dev, "alarm write %02x %02x %02x %02x %02x\n",
fm3130->regs[FM3130_ALARM_SECONDS],
@@ -285,11 +298,8 @@ static int fm3130_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
}
fm3130->regs[FM3130_RTC_CONTROL] =
i2c_smbus_read_byte_data(fm3130->client, FM3130_RTC_CONTROL);
- /* Checking for alarm */
- if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_AF) {
- fm3130->alarm = 1;
- fm3130->regs[FM3130_RTC_CONTROL] &= ~FM3130_RTC_CONTROL_BIT_AF;
- }
+
+ /* enable or disable alarm */
if (alrm->enabled) {
i2c_smbus_write_byte_data(fm3130->client, FM3130_RTC_CONTROL,
(fm3130->regs[FM3130_RTC_CONTROL] &
@@ -298,16 +308,55 @@ static int fm3130_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
} else {
i2c_smbus_write_byte_data(fm3130->client, FM3130_RTC_CONTROL,
fm3130->regs[FM3130_RTC_CONTROL] &
- ~(FM3130_RTC_CONTROL_BIT_AEN));
+ ~(FM3130_RTC_CONTROL_BIT_CAL) &
+ ~(FM3130_RTC_CONTROL_BIT_AEN));
}
+
+ /* We assume here that data is valid once written */
+ if (!fm3130->alarm_valid)
+ fm3130->alarm_valid = 1;
+
return 0;
}
+static int fm3130_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct fm3130 *fm3130 = dev_get_drvdata(dev);
+ int ret = 0;
+
+ fm3130->regs[FM3130_RTC_CONTROL] =
+ i2c_smbus_read_byte_data(fm3130->client, FM3130_RTC_CONTROL);
+
+ dev_dbg(dev, "alarm_irq_enable: enable=%d, FM3130_RTC_CONTROL=%02x\n",
+ enabled, fm3130->regs[FM3130_RTC_CONTROL]);
+
+ switch (enabled) {
+ case 0: /* alarm off */
+ ret = i2c_smbus_write_byte_data(fm3130->client,
+ FM3130_RTC_CONTROL, fm3130->regs[FM3130_RTC_CONTROL] &
+ ~(FM3130_RTC_CONTROL_BIT_CAL) &
+ ~(FM3130_RTC_CONTROL_BIT_AEN));
+ break;
+ case 1: /* alarm on */
+ ret = i2c_smbus_write_byte_data(fm3130->client,
+ FM3130_RTC_CONTROL, (fm3130->regs[FM3130_RTC_CONTROL] &
+ ~(FM3130_RTC_CONTROL_BIT_CAL)) |
+ FM3130_RTC_CONTROL_BIT_AEN);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static const struct rtc_class_ops fm3130_rtc_ops = {
.read_time = fm3130_get_time,
.set_time = fm3130_set_time,
.read_alarm = fm3130_read_alarm,
.set_alarm = fm3130_set_alarm,
+ .alarm_irq_enable = fm3130_alarm_irq_enable,
};
static struct i2c_driver fm3130_driver;
@@ -356,6 +405,7 @@ static int __devinit fm3130_probe(struct i2c_client *client,
fm3130->msg[3].len = FM3130_ALARM_REGS;
fm3130->msg[3].buf = &fm3130->regs[FM3130_ALARM_SECONDS];
+ fm3130->alarm_valid = 0;
fm3130->data_valid = 0;
tmp = i2c_transfer(adapter, fm3130->msg, 4);
@@ -370,12 +420,6 @@ static int __devinit fm3130_probe(struct i2c_client *client,
fm3130->regs[FM3130_CAL_CONTROL] =
i2c_smbus_read_byte_data(client, FM3130_CAL_CONTROL);
- /* Checking for alarm */
- if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_AF) {
- fm3130->alarm = 1;
- fm3130->regs[FM3130_RTC_CONTROL] &= ~FM3130_RTC_CONTROL_BIT_AF;
- }
-
/* Disabling calibration mode */
if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) {
i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
@@ -400,44 +444,79 @@ static int __devinit fm3130_probe(struct i2c_client *client,
fm3130->regs[FM3130_CAL_CONTROL] &
~(FM3130_CAL_CONTROL_BIT_nOSCEN));
- /* oscillator fault? clear flag, and warn */
- if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_LB)
+ /* low battery? clear flag, and warn */
+ if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_LB) {
+ i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
+ fm3130->regs[FM3130_RTC_CONTROL] &
+ ~(FM3130_RTC_CONTROL_BIT_LB));
dev_warn(&client->dev, "Low battery!\n");
+ }
- /* oscillator fault? clear flag, and warn */
+ /* check if Power On Reset bit is set */
if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_POR) {
i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
fm3130->regs[FM3130_RTC_CONTROL] &
~FM3130_RTC_CONTROL_BIT_POR);
- dev_warn(&client->dev, "SET TIME!\n");
+ dev_dbg(&client->dev, "POR bit is set\n");
}
/* ACS is controlled by alarm */
i2c_smbus_write_byte_data(client, FM3130_ALARM_WP_CONTROL, 0x80);
- /* TODO */
- /* TODO need to sanity check alarm */
- tmp = fm3130->regs[FM3130_RTC_SECONDS];
- tmp = bcd2bin(tmp & 0x7f);
- if (tmp > 60)
- goto exit_bad;
+ /* alarm registers sanity check */
+ tmp = bcd2bin(fm3130->regs[FM3130_RTC_SECONDS] & 0x7f);
+ if (tmp > 59)
+ goto bad_alarm;
+
tmp = bcd2bin(fm3130->regs[FM3130_RTC_MINUTES] & 0x7f);
- if (tmp > 60)
- goto exit_bad;
+ if (tmp > 59)
+ goto bad_alarm;
+
+ tmp = bcd2bin(fm3130->regs[FM3130_RTC_HOURS] & 0x3f);
+ if (tmp > 23)
+ goto bad_alarm;
tmp = bcd2bin(fm3130->regs[FM3130_RTC_DATE] & 0x3f);
if (tmp == 0 || tmp > 31)
- goto exit_bad;
+ goto bad_alarm;
tmp = bcd2bin(fm3130->regs[FM3130_RTC_MONTHS] & 0x1f);
if (tmp == 0 || tmp > 12)
- goto exit_bad;
+ goto bad_alarm;
- tmp = fm3130->regs[FM3130_RTC_HOURS];
+ fm3130->alarm_valid = 1;
+
+bad_alarm:
+
+ /* clock registers sanity chek */
+ tmp = bcd2bin(fm3130->regs[FM3130_RTC_SECONDS] & 0x7f);
+ if (tmp > 59)
+ goto bad_clock;
+
+ tmp = bcd2bin(fm3130->regs[FM3130_RTC_MINUTES] & 0x7f);
+ if (tmp > 59)
+ goto bad_clock;
+
+ tmp = bcd2bin(fm3130->regs[FM3130_RTC_HOURS] & 0x3f);
+ if (tmp > 23)
+ goto bad_clock;
+
+ tmp = bcd2bin(fm3130->regs[FM3130_RTC_DAY] & 0x7);
+ if (tmp == 0 || tmp > 7)
+ goto bad_clock;
+
+ tmp = bcd2bin(fm3130->regs[FM3130_RTC_DATE] & 0x3f);
+ if (tmp == 0 || tmp > 31)
+ goto bad_clock;
+
+ tmp = bcd2bin(fm3130->regs[FM3130_RTC_MONTHS] & 0x1f);
+ if (tmp == 0 || tmp > 12)
+ goto bad_clock;
fm3130->data_valid = 1;
-exit_bad:
- if (!fm3130->data_valid)
+bad_clock:
+
+ if (!fm3130->data_valid || !fm3130->alarm_valid)
dev_dbg(&client->dev,
"%s: %02x %02x %02x %02x %02x %02x %02x %02x"
"%02x %02x %02x %02x %02x %02x %02x\n",
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
new file mode 100644
index 00000000000..2dd3c016327
--- /dev/null
+++ b/drivers/rtc/rtc-imxdi.c
@@ -0,0 +1,519 @@
+/*
+ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2010 Orex Computed Radiography
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/* based on rtc-mc13892.c */
+
+/*
+ * This driver uses the 47-bit 32 kHz counter in the Freescale DryIce block
+ * to implement a Linux RTC. Times and alarms are truncated to seconds.
+ * Since the RTC framework performs API locking via rtc->ops_lock the
+ * only simultaneous accesses we need to deal with is updating DryIce
+ * registers while servicing an alarm.
+ *
+ * Note that reading the DSR (DryIce Status Register) automatically clears
+ * the WCF (Write Complete Flag). All DryIce writes are synchronized to the
+ * LP (Low Power) domain and set the WCF upon completion. Writes to the
+ * DIER (DryIce Interrupt Enable Register) are the only exception. These
+ * occur at normal bus speeds and do not set WCF. Periodic interrupts are
+ * not supported by the hardware.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/workqueue.h>
+
+/* DryIce Register Definitions */
+
+#define DTCMR 0x00 /* Time Counter MSB Reg */
+#define DTCLR 0x04 /* Time Counter LSB Reg */
+
+#define DCAMR 0x08 /* Clock Alarm MSB Reg */
+#define DCALR 0x0c /* Clock Alarm LSB Reg */
+#define DCAMR_UNSET 0xFFFFFFFF /* doomsday - 1 sec */
+
+#define DCR 0x10 /* Control Reg */
+#define DCR_TCE (1 << 3) /* Time Counter Enable */
+
+#define DSR 0x14 /* Status Reg */
+#define DSR_WBF (1 << 10) /* Write Busy Flag */
+#define DSR_WNF (1 << 9) /* Write Next Flag */
+#define DSR_WCF (1 << 8) /* Write Complete Flag */
+#define DSR_WEF (1 << 7) /* Write Error Flag */
+#define DSR_CAF (1 << 4) /* Clock Alarm Flag */
+#define DSR_NVF (1 << 1) /* Non-Valid Flag */
+#define DSR_SVF (1 << 0) /* Security Violation Flag */
+
+#define DIER 0x18 /* Interrupt Enable Reg */
+#define DIER_WNIE (1 << 9) /* Write Next Interrupt Enable */
+#define DIER_WCIE (1 << 8) /* Write Complete Interrupt Enable */
+#define DIER_WEIE (1 << 7) /* Write Error Interrupt Enable */
+#define DIER_CAIE (1 << 4) /* Clock Alarm Interrupt Enable */
+
+/**
+ * struct imxdi_dev - private imxdi rtc data
+ * @pdev: pionter to platform dev
+ * @rtc: pointer to rtc struct
+ * @ioaddr: IO registers pointer
+ * @irq: dryice normal interrupt
+ * @clk: input reference clock
+ * @dsr: copy of the DSR register
+ * @irq_lock: interrupt enable register (DIER) lock
+ * @write_wait: registers write complete queue
+ * @write_mutex: serialize registers write
+ * @work: schedule alarm work
+ */
+struct imxdi_dev {
+ struct platform_device *pdev;
+ struct rtc_device *rtc;
+ void __iomem *ioaddr;
+ int irq;
+ struct clk *clk;
+ u32 dsr;
+ spinlock_t irq_lock;
+ wait_queue_head_t write_wait;
+ struct mutex write_mutex;
+ struct work_struct work;
+};
+
+/*
+ * enable a dryice interrupt
+ */
+static void di_int_enable(struct imxdi_dev *imxdi, u32 intr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&imxdi->irq_lock, flags);
+ __raw_writel(__raw_readl(imxdi->ioaddr + DIER) | intr,
+ imxdi->ioaddr + DIER);
+ spin_unlock_irqrestore(&imxdi->irq_lock, flags);
+}
+
+/*
+ * disable a dryice interrupt
+ */
+static void di_int_disable(struct imxdi_dev *imxdi, u32 intr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&imxdi->irq_lock, flags);
+ __raw_writel(__raw_readl(imxdi->ioaddr + DIER) & ~intr,
+ imxdi->ioaddr + DIER);
+ spin_unlock_irqrestore(&imxdi->irq_lock, flags);
+}
+
+/*
+ * This function attempts to clear the dryice write-error flag.
+ *
+ * A dryice write error is similar to a bus fault and should not occur in
+ * normal operation. Clearing the flag requires another write, so the root
+ * cause of the problem may need to be fixed before the flag can be cleared.
+ */
+static void clear_write_error(struct imxdi_dev *imxdi)
+{
+ int cnt;
+
+ dev_warn(&imxdi->pdev->dev, "WARNING: Register write error!\n");
+
+ /* clear the write error flag */
+ __raw_writel(DSR_WEF, imxdi->ioaddr + DSR);
+
+ /* wait for it to take effect */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ if ((__raw_readl(imxdi->ioaddr + DSR) & DSR_WEF) == 0)
+ return;
+ udelay(10);
+ }
+ dev_err(&imxdi->pdev->dev,
+ "ERROR: Cannot clear write-error flag!\n");
+}
+
+/*
+ * Write a dryice register and wait until it completes.
+ *
+ * This function uses interrupts to determine when the
+ * write has completed.
+ */
+static int di_write_wait(struct imxdi_dev *imxdi, u32 val, int reg)
+{
+ int ret;
+ int rc = 0;
+
+ /* serialize register writes */
+ mutex_lock(&imxdi->write_mutex);
+
+ /* enable the write-complete interrupt */
+ di_int_enable(imxdi, DIER_WCIE);
+
+ imxdi->dsr = 0;
+
+ /* do the register write */
+ __raw_writel(val, imxdi->ioaddr + reg);
+
+ /* wait for the write to finish */
+ ret = wait_event_interruptible_timeout(imxdi->write_wait,
+ imxdi->dsr & (DSR_WCF | DSR_WEF), msecs_to_jiffies(1));
+ if (ret < 0) {
+ rc = ret;
+ goto out;
+ } else if (ret == 0) {
+ dev_warn(&imxdi->pdev->dev,
+ "Write-wait timeout "
+ "val = 0x%08x reg = 0x%08x\n", val, reg);
+ }
+
+ /* check for write error */
+ if (imxdi->dsr & DSR_WEF) {
+ clear_write_error(imxdi);
+ rc = -EIO;
+ }
+
+out:
+ mutex_unlock(&imxdi->write_mutex);
+
+ return rc;
+}
+
+/*
+ * read the seconds portion of the current time from the dryice time counter
+ */
+static int dryice_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct imxdi_dev *imxdi = dev_get_drvdata(dev);
+ unsigned long now;
+
+ now = __raw_readl(imxdi->ioaddr + DTCMR);
+ rtc_time_to_tm(now, tm);
+
+ return 0;
+}
+
+/*
+ * set the seconds portion of dryice time counter and clear the
+ * fractional part.
+ */
+static int dryice_rtc_set_mmss(struct device *dev, unsigned long secs)
+{
+ struct imxdi_dev *imxdi = dev_get_drvdata(dev);
+ int rc;
+
+ /* zero the fractional part first */
+ rc = di_write_wait(imxdi, 0, DTCLR);
+ if (rc == 0)
+ rc = di_write_wait(imxdi, secs, DTCMR);
+
+ return rc;
+}
+
+static int dryice_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct imxdi_dev *imxdi = dev_get_drvdata(dev);
+
+ if (enabled)
+ di_int_enable(imxdi, DIER_CAIE);
+ else
+ di_int_disable(imxdi, DIER_CAIE);
+
+ return 0;
+}
+
+/*
+ * read the seconds portion of the alarm register.
+ * the fractional part of the alarm register is always zero.
+ */
+static int dryice_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct imxdi_dev *imxdi = dev_get_drvdata(dev);
+ u32 dcamr;
+
+ dcamr = __raw_readl(imxdi->ioaddr + DCAMR);
+ rtc_time_to_tm(dcamr, &alarm->time);
+
+ /* alarm is enabled if the interrupt is enabled */
+ alarm->enabled = (__raw_readl(imxdi->ioaddr + DIER) & DIER_CAIE) != 0;
+
+ /* don't allow the DSR read to mess up DSR_WCF */
+ mutex_lock(&imxdi->write_mutex);
+
+ /* alarm is pending if the alarm flag is set */
+ alarm->pending = (__raw_readl(imxdi->ioaddr + DSR) & DSR_CAF) != 0;
+
+ mutex_unlock(&imxdi->write_mutex);
+
+ return 0;
+}
+
+/*
+ * set the seconds portion of dryice alarm register
+ */
+static int dryice_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct imxdi_dev *imxdi = dev_get_drvdata(dev);
+ unsigned long now;
+ unsigned long alarm_time;
+ int rc;
+
+ rc = rtc_tm_to_time(&alarm->time, &alarm_time);
+ if (rc)
+ return rc;
+
+ /* don't allow setting alarm in the past */
+ now = __raw_readl(imxdi->ioaddr + DTCMR);
+ if (alarm_time < now)
+ return -EINVAL;
+
+ /* write the new alarm time */
+ rc = di_write_wait(imxdi, (u32)alarm_time, DCAMR);
+ if (rc)
+ return rc;
+
+ if (alarm->enabled)
+ di_int_enable(imxdi, DIER_CAIE); /* enable alarm intr */
+ else
+ di_int_disable(imxdi, DIER_CAIE); /* disable alarm intr */
+
+ return 0;
+}
+
+static struct rtc_class_ops dryice_rtc_ops = {
+ .read_time = dryice_rtc_read_time,
+ .set_mmss = dryice_rtc_set_mmss,
+ .alarm_irq_enable = dryice_rtc_alarm_irq_enable,
+ .read_alarm = dryice_rtc_read_alarm,
+ .set_alarm = dryice_rtc_set_alarm,
+};
+
+/*
+ * dryice "normal" interrupt handler
+ */
+static irqreturn_t dryice_norm_irq(int irq, void *dev_id)
+{
+ struct imxdi_dev *imxdi = dev_id;
+ u32 dsr, dier;
+ irqreturn_t rc = IRQ_NONE;
+
+ dier = __raw_readl(imxdi->ioaddr + DIER);
+
+ /* handle write complete and write error cases */
+ if ((dier & DIER_WCIE)) {
+ /*If the write wait queue is empty then there is no pending
+ operations. It means the interrupt is for DryIce -Security.
+ IRQ must be returned as none.*/
+ if (list_empty_careful(&imxdi->write_wait.task_list))
+ return rc;
+
+ /* DSR_WCF clears itself on DSR read */
+ dsr = __raw_readl(imxdi->ioaddr + DSR);
+ if ((dsr & (DSR_WCF | DSR_WEF))) {
+ /* mask the interrupt */
+ di_int_disable(imxdi, DIER_WCIE);
+
+ /* save the dsr value for the wait queue */
+ imxdi->dsr |= dsr;
+
+ wake_up_interruptible(&imxdi->write_wait);
+ rc = IRQ_HANDLED;
+ }
+ }
+
+ /* handle the alarm case */
+ if ((dier & DIER_CAIE)) {
+ /* DSR_WCF clears itself on DSR read */
+ dsr = __raw_readl(imxdi->ioaddr + DSR);
+ if (dsr & DSR_CAF) {
+ /* mask the interrupt */
+ di_int_disable(imxdi, DIER_CAIE);
+
+ /* finish alarm in user context */
+ schedule_work(&imxdi->work);
+ rc = IRQ_HANDLED;
+ }
+ }
+ return rc;
+}
+
+/*
+ * post the alarm event from user context so it can sleep
+ * on the write completion.
+ */
+static void dryice_work(struct work_struct *work)
+{
+ struct imxdi_dev *imxdi = container_of(work,
+ struct imxdi_dev, work);
+
+ /* dismiss the interrupt (ignore error) */
+ di_write_wait(imxdi, DSR_CAF, DSR);
+
+ /* pass the alarm event to the rtc framework. */
+ rtc_update_irq(imxdi->rtc, 1, RTC_AF | RTC_IRQF);
+}
+
+/*
+ * probe for dryice rtc device
+ */
+static int dryice_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct imxdi_dev *imxdi;
+ int rc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ imxdi = devm_kzalloc(&pdev->dev, sizeof(*imxdi), GFP_KERNEL);
+ if (!imxdi)
+ return -ENOMEM;
+
+ imxdi->pdev = pdev;
+
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name))
+ return -EBUSY;
+
+ imxdi->ioaddr = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (imxdi->ioaddr == NULL)
+ return -ENOMEM;
+
+ imxdi->irq = platform_get_irq(pdev, 0);
+ if (imxdi->irq < 0)
+ return imxdi->irq;
+
+ init_waitqueue_head(&imxdi->write_wait);
+
+ INIT_WORK(&imxdi->work, dryice_work);
+
+ mutex_init(&imxdi->write_mutex);
+
+ imxdi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(imxdi->clk))
+ return PTR_ERR(imxdi->clk);
+ clk_enable(imxdi->clk);
+
+ /*
+ * Initialize dryice hardware
+ */
+
+ /* mask all interrupts */
+ __raw_writel(0, imxdi->ioaddr + DIER);
+
+ rc = devm_request_irq(&pdev->dev, imxdi->irq, dryice_norm_irq,
+ IRQF_SHARED, pdev->name, imxdi);
+ if (rc) {
+ dev_warn(&pdev->dev, "interrupt not available.\n");
+ goto err;
+ }
+
+ /* put dryice into valid state */
+ if (__raw_readl(imxdi->ioaddr + DSR) & DSR_NVF) {
+ rc = di_write_wait(imxdi, DSR_NVF | DSR_SVF, DSR);
+ if (rc)
+ goto err;
+ }
+
+ /* initialize alarm */
+ rc = di_write_wait(imxdi, DCAMR_UNSET, DCAMR);
+ if (rc)
+ goto err;
+ rc = di_write_wait(imxdi, 0, DCALR);
+ if (rc)
+ goto err;
+
+ /* clear alarm flag */
+ if (__raw_readl(imxdi->ioaddr + DSR) & DSR_CAF) {
+ rc = di_write_wait(imxdi, DSR_CAF, DSR);
+ if (rc)
+ goto err;
+ }
+
+ /* the timer won't count if it has never been written to */
+ if (__raw_readl(imxdi->ioaddr + DTCMR) == 0) {
+ rc = di_write_wait(imxdi, 0, DTCMR);
+ if (rc)
+ goto err;
+ }
+
+ /* start keeping time */
+ if (!(__raw_readl(imxdi->ioaddr + DCR) & DCR_TCE)) {
+ rc = di_write_wait(imxdi,
+ __raw_readl(imxdi->ioaddr + DCR) | DCR_TCE,
+ DCR);
+ if (rc)
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, imxdi);
+ imxdi->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &dryice_rtc_ops, THIS_MODULE);
+ if (IS_ERR(imxdi->rtc)) {
+ rc = PTR_ERR(imxdi->rtc);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ clk_disable(imxdi->clk);
+ clk_put(imxdi->clk);
+
+ return rc;
+}
+
+static int __devexit dryice_rtc_remove(struct platform_device *pdev)
+{
+ struct imxdi_dev *imxdi = platform_get_drvdata(pdev);
+
+ flush_work(&imxdi->work);
+
+ /* mask all interrupts */
+ __raw_writel(0, imxdi->ioaddr + DIER);
+
+ rtc_device_unregister(imxdi->rtc);
+
+ clk_disable(imxdi->clk);
+ clk_put(imxdi->clk);
+
+ return 0;
+}
+
+static struct platform_driver dryice_rtc_driver = {
+ .driver = {
+ .name = "imxdi_rtc",
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(dryice_rtc_remove),
+};
+
+static int __init dryice_rtc_init(void)
+{
+ return platform_driver_probe(&dryice_rtc_driver, dryice_rtc_probe);
+}
+
+static void __exit dryice_rtc_exit(void)
+{
+ platform_driver_unregister(&dryice_rtc_driver);
+}
+
+module_init(dryice_rtc_init);
+module_exit(dryice_rtc_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_DESCRIPTION("IMX DryIce Realtime Clock Driver (RTC)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
new file mode 100644
index 00000000000..ddbc797ea6c
--- /dev/null
+++ b/drivers/rtc/rtc-isl12022.c
@@ -0,0 +1,327 @@
+/*
+ * An I2C driver for the Intersil ISL 12022
+ *
+ * Author: Roman Fietze <roman.fietze@telemotive.de>
+ *
+ * Based on the Philips PCF8563 RTC
+ * by Alessandro Zummo <a.zummo@towertech.it>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/bcd.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define DRV_VERSION "0.1"
+
+/* ISL register offsets */
+#define ISL12022_REG_SC 0x00
+#define ISL12022_REG_MN 0x01
+#define ISL12022_REG_HR 0x02
+#define ISL12022_REG_DT 0x03
+#define ISL12022_REG_MO 0x04
+#define ISL12022_REG_YR 0x05
+#define ISL12022_REG_DW 0x06
+
+#define ISL12022_REG_SR 0x07
+#define ISL12022_REG_INT 0x08
+
+/* ISL register bits */
+#define ISL12022_HR_MIL (1 << 7) /* military or 24 hour time */
+
+#define ISL12022_SR_LBAT85 (1 << 2)
+#define ISL12022_SR_LBAT75 (1 << 1)
+
+#define ISL12022_INT_WRTC (1 << 6)
+
+
+static struct i2c_driver isl12022_driver;
+
+struct isl12022 {
+ struct rtc_device *rtc;
+
+ bool write_enabled; /* true if write enable is set */
+};
+
+
+static int isl12022_read_regs(struct i2c_client *client, uint8_t reg,
+ uint8_t *data, size_t n)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = data
+ }, /* setup read ptr */
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = n,
+ .buf = data
+ }
+ };
+
+ int ret;
+
+ data[0] = reg;
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "%s: read error, ret=%d\n",
+ __func__, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+static int isl12022_write_reg(struct i2c_client *client,
+ uint8_t reg, uint8_t val)
+{
+ uint8_t data[2] = { reg, val };
+ int err;
+
+ err = i2c_master_send(client, data, sizeof(data));
+ if (err != sizeof(data)) {
+ dev_err(&client->dev,
+ "%s: err=%d addr=%02x, data=%02x\n",
+ __func__, err, data[0], data[1]);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+/*
+ * In the routines that deal directly with the isl12022 hardware, we use
+ * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
+ */
+static int isl12022_get_datetime(struct i2c_client *client, struct rtc_time *tm)
+{
+ uint8_t buf[ISL12022_REG_INT + 1];
+ int ret;
+
+ ret = isl12022_read_regs(client, ISL12022_REG_SC, buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ if (buf[ISL12022_REG_SR] & (ISL12022_SR_LBAT85 | ISL12022_SR_LBAT75)) {
+ dev_warn(&client->dev,
+ "voltage dropped below %u%%, "
+ "date and time is not reliable.\n",
+ buf[ISL12022_REG_SR] & ISL12022_SR_LBAT85 ? 85 : 75);
+ }
+
+ dev_dbg(&client->dev,
+ "%s: raw data is sec=%02x, min=%02x, hr=%02x, "
+ "mday=%02x, mon=%02x, year=%02x, wday=%02x, "
+ "sr=%02x, int=%02x",
+ __func__,
+ buf[ISL12022_REG_SC],
+ buf[ISL12022_REG_MN],
+ buf[ISL12022_REG_HR],
+ buf[ISL12022_REG_DT],
+ buf[ISL12022_REG_MO],
+ buf[ISL12022_REG_YR],
+ buf[ISL12022_REG_DW],
+ buf[ISL12022_REG_SR],
+ buf[ISL12022_REG_INT]);
+
+ tm->tm_sec = bcd2bin(buf[ISL12022_REG_SC] & 0x7F);
+ tm->tm_min = bcd2bin(buf[ISL12022_REG_MN] & 0x7F);
+ tm->tm_hour = bcd2bin(buf[ISL12022_REG_HR] & 0x3F);
+ tm->tm_mday = bcd2bin(buf[ISL12022_REG_DT] & 0x3F);
+ tm->tm_wday = buf[ISL12022_REG_DW] & 0x07;
+ tm->tm_mon = bcd2bin(buf[ISL12022_REG_MO] & 0x1F) - 1;
+ tm->tm_year = bcd2bin(buf[ISL12022_REG_YR]) + 100;
+
+ dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
+ "mday=%d, mon=%d, year=%d, wday=%d\n",
+ __func__,
+ tm->tm_sec, tm->tm_min, tm->tm_hour,
+ tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+
+ /* The clock can give out invalid datetime, but we cannot return
+ * -EINVAL otherwise hwclock will refuse to set the time on bootup. */
+ if (rtc_valid_tm(tm) < 0)
+ dev_err(&client->dev, "retrieved date and time is invalid.\n");
+
+ return 0;
+}
+
+static int isl12022_set_datetime(struct i2c_client *client, struct rtc_time *tm)
+{
+ struct isl12022 *isl12022 = i2c_get_clientdata(client);
+ size_t i;
+ int ret;
+ uint8_t buf[ISL12022_REG_DW + 1];
+
+ dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
+ "mday=%d, mon=%d, year=%d, wday=%d\n",
+ __func__,
+ tm->tm_sec, tm->tm_min, tm->tm_hour,
+ tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+
+ if (!isl12022->write_enabled) {
+
+ ret = isl12022_read_regs(client, ISL12022_REG_INT, buf, 1);
+ if (ret)
+ return ret;
+
+ /* Check if WRTC (write rtc enable) is set factory default is
+ * 0 (not set) */
+ if (!(buf[0] & ISL12022_INT_WRTC)) {
+ dev_info(&client->dev,
+ "init write enable and 24 hour format\n");
+
+ /* Set the write enable bit. */
+ ret = isl12022_write_reg(client,
+ ISL12022_REG_INT,
+ buf[0] | ISL12022_INT_WRTC);
+ if (ret)
+ return ret;
+
+ /* Write to any RTC register to start RTC, we use the
+ * HR register, setting the MIL bit to use the 24 hour
+ * format. */
+ ret = isl12022_read_regs(client, ISL12022_REG_HR,
+ buf, 1);
+ if (ret)
+ return ret;
+
+ ret = isl12022_write_reg(client,
+ ISL12022_REG_HR,
+ buf[0] | ISL12022_HR_MIL);
+ if (ret)
+ return ret;
+ }
+
+ isl12022->write_enabled = 1;
+ }
+
+ /* hours, minutes and seconds */
+ buf[ISL12022_REG_SC] = bin2bcd(tm->tm_sec);
+ buf[ISL12022_REG_MN] = bin2bcd(tm->tm_min);
+ buf[ISL12022_REG_HR] = bin2bcd(tm->tm_hour) | ISL12022_HR_MIL;
+
+ buf[ISL12022_REG_DT] = bin2bcd(tm->tm_mday);
+
+ /* month, 1 - 12 */
+ buf[ISL12022_REG_MO] = bin2bcd(tm->tm_mon + 1);
+
+ /* year and century */
+ buf[ISL12022_REG_YR] = bin2bcd(tm->tm_year % 100);
+
+ buf[ISL12022_REG_DW] = tm->tm_wday & 0x07;
+
+ /* write register's data */
+ for (i = 0; i < ARRAY_SIZE(buf); i++) {
+ ret = isl12022_write_reg(client, ISL12022_REG_SC + i,
+ buf[ISL12022_REG_SC + i]);
+ if (ret)
+ return -EIO;
+ };
+
+ return 0;
+}
+
+static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ return isl12022_get_datetime(to_i2c_client(dev), tm);
+}
+
+static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ return isl12022_set_datetime(to_i2c_client(dev), tm);
+}
+
+static const struct rtc_class_ops isl12022_rtc_ops = {
+ .read_time = isl12022_rtc_read_time,
+ .set_time = isl12022_rtc_set_time,
+};
+
+static int isl12022_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct isl12022 *isl12022;
+
+ int ret = 0;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ isl12022 = kzalloc(sizeof(struct isl12022), GFP_KERNEL);
+ if (!isl12022)
+ return -ENOMEM;
+
+ dev_dbg(&client->dev, "chip found, driver version " DRV_VERSION "\n");
+
+ i2c_set_clientdata(client, isl12022);
+
+ isl12022->rtc = rtc_device_register(isl12022_driver.driver.name,
+ &client->dev,
+ &isl12022_rtc_ops,
+ THIS_MODULE);
+
+ if (IS_ERR(isl12022->rtc)) {
+ ret = PTR_ERR(isl12022->rtc);
+ goto exit_kfree;
+ }
+
+ return 0;
+
+exit_kfree:
+ kfree(isl12022);
+
+ return ret;
+}
+
+static int isl12022_remove(struct i2c_client *client)
+{
+ struct isl12022 *isl12022 = i2c_get_clientdata(client);
+
+ rtc_device_unregister(isl12022->rtc);
+ kfree(isl12022);
+
+ return 0;
+}
+
+static const struct i2c_device_id isl12022_id[] = {
+ { "isl12022", 0 },
+ { "rtc8564", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, isl12022_id);
+
+static struct i2c_driver isl12022_driver = {
+ .driver = {
+ .name = "rtc-isl12022",
+ },
+ .probe = isl12022_probe,
+ .remove = isl12022_remove,
+ .id_table = isl12022_id,
+};
+
+static int __init isl12022_init(void)
+{
+ return i2c_add_driver(&isl12022_driver);
+}
+
+static void __exit isl12022_exit(void)
+{
+ i2c_del_driver(&isl12022_driver);
+}
+
+module_init(isl12022_init);
+module_exit(isl12022_exit);
+
+MODULE_AUTHOR("roman.fietze@telemotive.de");
+MODULE_DESCRIPTION("ISL 12022 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
new file mode 100644
index 00000000000..2619d57b91d
--- /dev/null
+++ b/drivers/rtc/rtc-jz4740.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * JZ4740 SoC RTC driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define JZ_REG_RTC_CTRL 0x00
+#define JZ_REG_RTC_SEC 0x04
+#define JZ_REG_RTC_SEC_ALARM 0x08
+#define JZ_REG_RTC_REGULATOR 0x0C
+#define JZ_REG_RTC_HIBERNATE 0x20
+#define JZ_REG_RTC_SCRATCHPAD 0x34
+
+#define JZ_RTC_CTRL_WRDY BIT(7)
+#define JZ_RTC_CTRL_1HZ BIT(6)
+#define JZ_RTC_CTRL_1HZ_IRQ BIT(5)
+#define JZ_RTC_CTRL_AF BIT(4)
+#define JZ_RTC_CTRL_AF_IRQ BIT(3)
+#define JZ_RTC_CTRL_AE BIT(2)
+#define JZ_RTC_CTRL_ENABLE BIT(0)
+
+struct jz4740_rtc {
+ struct resource *mem;
+ void __iomem *base;
+
+ struct rtc_device *rtc;
+
+ unsigned int irq;
+
+ spinlock_t lock;
+};
+
+static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg)
+{
+ return readl(rtc->base + reg);
+}
+
+static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
+{
+ uint32_t ctrl;
+ int timeout = 1000;
+
+ do {
+ ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL);
+ } while (!(ctrl & JZ_RTC_CTRL_WRDY) && --timeout);
+
+ return timeout ? 0 : -EIO;
+}
+
+static inline int jz4740_rtc_reg_write(struct jz4740_rtc *rtc, size_t reg,
+ uint32_t val)
+{
+ int ret;
+ ret = jz4740_rtc_wait_write_ready(rtc);
+ if (ret == 0)
+ writel(val, rtc->base + reg);
+
+ return ret;
+}
+
+static int jz4740_rtc_ctrl_set_bits(struct jz4740_rtc *rtc, uint32_t mask,
+ bool set)
+{
+ int ret;
+ unsigned long flags;
+ uint32_t ctrl;
+
+ spin_lock_irqsave(&rtc->lock, flags);
+
+ ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL);
+
+ /* Don't clear interrupt flags by accident */
+ ctrl |= JZ_RTC_CTRL_1HZ | JZ_RTC_CTRL_AF;
+
+ if (set)
+ ctrl |= mask;
+ else
+ ctrl &= ~mask;
+
+ ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_CTRL, ctrl);
+
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+ return ret;
+}
+
+static int jz4740_rtc_read_time(struct device *dev, struct rtc_time *time)
+{
+ struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+ uint32_t secs, secs2;
+ int timeout = 5;
+
+ /* If the seconds register is read while it is updated, it can contain a
+ * bogus value. This can be avoided by making sure that two consecutive
+ * reads have the same value.
+ */
+ secs = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC);
+ secs2 = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC);
+
+ while (secs != secs2 && --timeout) {
+ secs = secs2;
+ secs2 = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC);
+ }
+
+ if (timeout == 0)
+ return -EIO;
+
+ rtc_time_to_tm(secs, time);
+
+ return rtc_valid_tm(time);
+}
+
+static int jz4740_rtc_set_mmss(struct device *dev, unsigned long secs)
+{
+ struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+
+ return jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, secs);
+}
+
+static int jz4740_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+ uint32_t secs;
+ uint32_t ctrl;
+
+ secs = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC_ALARM);
+
+ ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL);
+
+ alrm->enabled = !!(ctrl & JZ_RTC_CTRL_AE);
+ alrm->pending = !!(ctrl & JZ_RTC_CTRL_AF);
+
+ rtc_time_to_tm(secs, &alrm->time);
+
+ return rtc_valid_tm(&alrm->time);
+}
+
+static int jz4740_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ int ret;
+ struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long secs;
+
+ rtc_tm_to_time(&alrm->time, &secs);
+
+ ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC_ALARM, secs);
+ if (!ret)
+ ret = jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AE, alrm->enabled);
+
+ return ret;
+}
+
+static int jz4740_rtc_update_irq_enable(struct device *dev, unsigned int enable)
+{
+ struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+ return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_1HZ_IRQ, enable);
+}
+
+static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
+{
+ struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+ return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AF_IRQ, enable);
+}
+
+static struct rtc_class_ops jz4740_rtc_ops = {
+ .read_time = jz4740_rtc_read_time,
+ .set_mmss = jz4740_rtc_set_mmss,
+ .read_alarm = jz4740_rtc_read_alarm,
+ .set_alarm = jz4740_rtc_set_alarm,
+ .update_irq_enable = jz4740_rtc_update_irq_enable,
+ .alarm_irq_enable = jz4740_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t jz4740_rtc_irq(int irq, void *data)
+{
+ struct jz4740_rtc *rtc = data;
+ uint32_t ctrl;
+ unsigned long events = 0;
+
+ ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL);
+
+ if (ctrl & JZ_RTC_CTRL_1HZ)
+ events |= (RTC_UF | RTC_IRQF);
+
+ if (ctrl & JZ_RTC_CTRL_AF)
+ events |= (RTC_AF | RTC_IRQF);
+
+ rtc_update_irq(rtc->rtc, 1, events);
+
+ jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_1HZ | JZ_RTC_CTRL_AF, false);
+
+ return IRQ_HANDLED;
+}
+
+void jz4740_rtc_poweroff(struct device *dev)
+{
+ struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+ jz4740_rtc_reg_write(rtc, JZ_REG_RTC_HIBERNATE, 1);
+}
+EXPORT_SYMBOL_GPL(jz4740_rtc_poweroff);
+
+static int __devinit jz4740_rtc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct jz4740_rtc *rtc;
+ uint32_t scratchpad;
+
+ rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->irq = platform_get_irq(pdev, 0);
+ if (rtc->irq < 0) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev, "Failed to get platform irq\n");
+ goto err_free;
+ }
+
+ rtc->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!rtc->mem) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev, "Failed to get platform mmio memory\n");
+ goto err_free;
+ }
+
+ rtc->mem = request_mem_region(rtc->mem->start, resource_size(rtc->mem),
+ pdev->name);
+ if (!rtc->mem) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev, "Failed to request mmio memory region\n");
+ goto err_free;
+ }
+
+ rtc->base = ioremap_nocache(rtc->mem->start, resource_size(rtc->mem));
+ if (!rtc->base) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
+ goto err_release_mem_region;
+ }
+
+ spin_lock_init(&rtc->lock);
+
+ platform_set_drvdata(pdev, rtc);
+
+ rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &jz4740_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc->rtc)) {
+ ret = PTR_ERR(rtc->rtc);
+ dev_err(&pdev->dev, "Failed to register rtc device: %d\n", ret);
+ goto err_iounmap;
+ }
+
+ ret = request_irq(rtc->irq, jz4740_rtc_irq, 0,
+ pdev->name, rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request rtc irq: %d\n", ret);
+ goto err_unregister_rtc;
+ }
+
+ scratchpad = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SCRATCHPAD);
+ if (scratchpad != 0x12345678) {
+ ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SCRATCHPAD, 0x12345678);
+ ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not write write to RTC registers\n");
+ goto err_free_irq;
+ }
+ }
+
+ return 0;
+
+err_free_irq:
+ free_irq(rtc->irq, rtc);
+err_unregister_rtc:
+ rtc_device_unregister(rtc->rtc);
+err_iounmap:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(rtc->base);
+err_release_mem_region:
+ release_mem_region(rtc->mem->start, resource_size(rtc->mem));
+err_free:
+ kfree(rtc);
+
+ return ret;
+}
+
+static int __devexit jz4740_rtc_remove(struct platform_device *pdev)
+{
+ struct jz4740_rtc *rtc = platform_get_drvdata(pdev);
+
+ free_irq(rtc->irq, rtc);
+
+ rtc_device_unregister(rtc->rtc);
+
+ iounmap(rtc->base);
+ release_mem_region(rtc->mem->start, resource_size(rtc->mem));
+
+ kfree(rtc);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+struct platform_driver jz4740_rtc_driver = {
+ .probe = jz4740_rtc_probe,
+ .remove = __devexit_p(jz4740_rtc_remove),
+ .driver = {
+ .name = "jz4740-rtc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init jz4740_rtc_init(void)
+{
+ return platform_driver_register(&jz4740_rtc_driver);
+}
+module_init(jz4740_rtc_init);
+
+static void __exit jz4740_rtc_exit(void)
+{
+ platform_driver_unregister(&jz4740_rtc_driver);
+}
+module_exit(jz4740_rtc_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
+MODULE_ALIAS("platform:jz4740-rtc");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 6dc4e624141..d60557cae8e 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -121,7 +121,7 @@ static int m41t80_get_datetime(struct i2c_client *client,
/* assume 20YY not 19YY, and ignore the Century Bit */
tm->tm_year = bcd2bin(buf[M41T80_REG_YEAR]) + 100;
- return 0;
+ return rtc_valid_tm(tm);
}
/* Sets the given date and time to the real time clock. */
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index be8359fdb65..a99a0b554eb 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -105,7 +105,7 @@ static int m48t59_rtc_read_time(struct device *dev, struct rtc_time *tm)
dev_dbg(dev, "RTC read time %04d-%02d-%02d %02d/%02d/%02d\n",
tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
- return 0;
+ return rtc_valid_tm(tm);
}
static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -196,7 +196,7 @@ static int m48t59_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
dev_dbg(dev, "RTC read alarm time %04d-%02d-%02d %02d/%02d/%02d\n",
tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
- return 0;
+ return rtc_valid_tm(tm);
}
/*
@@ -506,7 +506,6 @@ out:
free_irq(m48t59->irq, &pdev->dev);
if (m48t59->ioaddr)
iounmap(m48t59->ioaddr);
- if (m48t59)
kfree(m48t59);
return ret;
}
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 7c045cffa9f..f981287d582 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -77,7 +77,7 @@ static int m48t86_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (ops->readbyte(M48T86_REG_HOUR) & 0x80)
tm->tm_hour += 12;
- return 0;
+ return rtc_valid_tm(tm);
}
static int m48t86_rtc_set_time(struct device *dev, struct rtc_time *tm)
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index a4f6665ab3c..486142c2637 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -159,7 +159,7 @@ static int max6900_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
bcd2bin(regs[MAX6900_REG_CENTURY]) * 100 - 1900;
tm->tm_wday = bcd2bin(regs[MAX6900_REG_DW]);
- return 0;
+ return rtc_valid_tm(tm);
}
static int max6900_i2c_clear_write_protect(struct i2c_client *client)
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index db5d8c416d2..dfcdf0901d2 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -268,7 +268,7 @@ static const struct rtc_class_ops mpc5121_rtc_ops = {
.update_irq_enable = mpc5121_rtc_update_irq_enable,
};
-static int __devinit mpc5121_rtc_probe(struct of_device *op,
+static int __devinit mpc5121_rtc_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct mpc5121_rtc_data *rtc;
@@ -338,7 +338,7 @@ out_free:
return err;
}
-static int __devexit mpc5121_rtc_remove(struct of_device *op)
+static int __devexit mpc5121_rtc_remove(struct platform_device *op)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(&op->dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 25ec921db07..0b06c1e03fd 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -83,12 +83,6 @@ struct rtc_plat_data {
void __iomem *ioaddr;
int irq;
struct clk *clk;
- unsigned int irqen;
- int alrm_sec;
- int alrm_min;
- int alrm_hour;
- int alrm_mday;
- struct timespec mxc_rtc_delta;
struct rtc_time g_rtc_alarm;
};
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index a351bd5d817..62de66af0a6 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -85,25 +85,24 @@ static irqreturn_t nuc900_rtc_interrupt(int irq, void *_rtc)
static int *check_rtc_access_enable(struct nuc900_rtc *nuc900_rtc)
{
- unsigned int i;
+ unsigned int timeout = 0x1000;
__raw_writel(INIRRESET, nuc900_rtc->rtc_reg + REG_RTC_INIR);
mdelay(10);
__raw_writel(AERPOWERON, nuc900_rtc->rtc_reg + REG_RTC_AER);
- for (i = 0; i < 1000; i++) {
- if (__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB)
- return 0;
- }
+ while (!(__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB)
+ && timeout--)
+ mdelay(1);
- if ((__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB) == 0x0)
- return ERR_PTR(-ENODEV);
+ if (!timeout)
+ return ERR_PTR(-EPERM);
- return ERR_PTR(-EPERM);
+ return 0;
}
-static void nuc900_rtc_bcd2bin(unsigned int timereg,
+static int nuc900_rtc_bcd2bin(unsigned int timereg,
unsigned int calreg, struct rtc_time *tm)
{
tm->tm_mday = bcd2bin(calreg >> 0);
@@ -114,15 +113,21 @@ static void nuc900_rtc_bcd2bin(unsigned int timereg,
tm->tm_min = bcd2bin(timereg >> 8);
tm->tm_hour = bcd2bin(timereg >> 16);
- rtc_valid_tm(tm);
+ return rtc_valid_tm(tm);
}
-static void nuc900_rtc_bin2bcd(struct rtc_time *settm,
+static void nuc900_rtc_bin2bcd(struct device *dev, struct rtc_time *settm,
struct nuc900_bcd_time *gettm)
{
gettm->bcd_mday = bin2bcd(settm->tm_mday) << 0;
gettm->bcd_mon = bin2bcd(settm->tm_mon) << 8;
- gettm->bcd_year = bin2bcd(settm->tm_year - 100) << 16;
+
+ if (settm->tm_year < 100) {
+ dev_warn(dev, "The year will be between 1970-1999, right?\n");
+ gettm->bcd_year = bin2bcd(settm->tm_year) << 16;
+ } else {
+ gettm->bcd_year = bin2bcd(settm->tm_year - 100) << 16;
+ }
gettm->bcd_sec = bin2bcd(settm->tm_sec) << 0;
gettm->bcd_min = bin2bcd(settm->tm_min) << 8;
@@ -165,9 +170,7 @@ static int nuc900_rtc_read_time(struct device *dev, struct rtc_time *tm)
timeval = __raw_readl(rtc->rtc_reg + REG_RTC_TLR);
clrval = __raw_readl(rtc->rtc_reg + REG_RTC_CLR);
- nuc900_rtc_bcd2bin(timeval, clrval, tm);
-
- return 0;
+ return nuc900_rtc_bcd2bin(timeval, clrval, tm);
}
static int nuc900_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -177,7 +180,7 @@ static int nuc900_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned long val;
int *err;
- nuc900_rtc_bin2bcd(tm, &gettm);
+ nuc900_rtc_bin2bcd(dev, tm, &gettm);
err = check_rtc_access_enable(rtc);
if (IS_ERR(err))
@@ -200,9 +203,7 @@ static int nuc900_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
timeval = __raw_readl(rtc->rtc_reg + REG_RTC_TAR);
carval = __raw_readl(rtc->rtc_reg + REG_RTC_CAR);
- nuc900_rtc_bcd2bin(timeval, carval, &alrm->time);
-
- return 0;
+ return nuc900_rtc_bcd2bin(timeval, carval, &alrm->time);
}
static int nuc900_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -212,7 +213,7 @@ static int nuc900_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned long val;
int *err;
- nuc900_rtc_bin2bcd(&alrm->time, &tm);
+ nuc900_rtc_bin2bcd(dev, &alrm->time, &tm);
err = check_rtc_access_enable(rtc);
if (IS_ERR(err))
@@ -268,29 +269,30 @@ static int __devinit nuc900_rtc_probe(struct platform_device *pdev)
goto fail2;
}
- nuc900_rtc->irq_num = platform_get_irq(pdev, 0);
- if (request_irq(nuc900_rtc->irq_num, nuc900_rtc_interrupt,
- IRQF_DISABLED, "nuc900rtc", nuc900_rtc)) {
- dev_err(&pdev->dev, "NUC900 RTC request irq failed\n");
- err = -EBUSY;
- goto fail3;
- }
+ platform_set_drvdata(pdev, nuc900_rtc);
nuc900_rtc->rtcdev = rtc_device_register(pdev->name, &pdev->dev,
&nuc900_rtc_ops, THIS_MODULE);
if (IS_ERR(nuc900_rtc->rtcdev)) {
dev_err(&pdev->dev, "rtc device register faild\n");
err = PTR_ERR(nuc900_rtc->rtcdev);
- goto fail4;
+ goto fail3;
}
- platform_set_drvdata(pdev, nuc900_rtc);
__raw_writel(__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_TSSR) | MODE24,
nuc900_rtc->rtc_reg + REG_RTC_TSSR);
+ nuc900_rtc->irq_num = platform_get_irq(pdev, 0);
+ if (request_irq(nuc900_rtc->irq_num, nuc900_rtc_interrupt,
+ IRQF_DISABLED, "nuc900rtc", nuc900_rtc)) {
+ dev_err(&pdev->dev, "NUC900 RTC request irq failed\n");
+ err = -EBUSY;
+ goto fail4;
+ }
+
return 0;
-fail4: free_irq(nuc900_rtc->irq_num, nuc900_rtc);
+fail4: rtc_device_unregister(nuc900_rtc->rtcdev);
fail3: iounmap(nuc900_rtc->rtc_reg);
fail2: release_mem_region(res->start, resource_size(res));
fail1: kfree(nuc900_rtc);
@@ -302,8 +304,8 @@ static int __devexit nuc900_rtc_remove(struct platform_device *pdev)
struct nuc900_rtc *nuc900_rtc = platform_get_drvdata(pdev);
struct resource *res;
- rtc_device_unregister(nuc900_rtc->rtcdev);
free_irq(nuc900_rtc->irq_num, nuc900_rtc);
+ rtc_device_unregister(nuc900_rtc->rtcdev);
iounmap(nuc900_rtc->rtc_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 1af42b4a6f5..b42c0c67926 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -172,14 +172,6 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
return 0;
}
-struct pcf8563_limit
-{
- unsigned char reg;
- unsigned char mask;
- unsigned char min;
- unsigned char max;
-};
-
static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
return pcf8563_get_datetime(to_i2c_client(dev), tm);
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 71bbefc3544..b7a6690e5b3 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -23,7 +23,6 @@
#include <linux/io.h>
#include <linux/bcd.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include <linux/slab.h>
/*
@@ -404,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id)
}
if (request_irq(adev->irq[0], pl031_interrupt,
- IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) {
+ IRQF_DISABLED, "rtc-pl031", ldata)) {
ret = -EIO;
goto out_no_irq;
}
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index e9c6fa03598..29e867a1aaa 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -87,7 +87,6 @@ struct pxa_rtc {
int irq_Alrm;
struct rtc_device *rtc;
spinlock_t lock; /* Protects this structure */
- struct rtc_time rtc_alarm;
};
static u32 ryxr_calc(struct rtc_time *tm)
@@ -236,32 +235,34 @@ static int pxa_periodic_irq_set_state(struct device *dev, int enabled)
return 0;
}
-static int pxa_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
+static int pxa_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
- int ret = 0;
spin_lock_irq(&pxa_rtc->lock);
- switch (cmd) {
- case RTC_AIE_OFF:
- rtsr_clear_bits(pxa_rtc, RTSR_RDALE1);
- break;
- case RTC_AIE_ON:
+
+ if (enabled)
rtsr_set_bits(pxa_rtc, RTSR_RDALE1);
- break;
- case RTC_UIE_OFF:
- rtsr_clear_bits(pxa_rtc, RTSR_HZE);
- break;
- case RTC_UIE_ON:
+ else
+ rtsr_clear_bits(pxa_rtc, RTSR_RDALE1);
+
+ spin_unlock_irq(&pxa_rtc->lock);
+ return 0;
+}
+
+static int pxa_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
+
+ spin_lock_irq(&pxa_rtc->lock);
+
+ if (enabled)
rtsr_set_bits(pxa_rtc, RTSR_HZE);
- break;
- default:
- ret = -ENOIOCTLCMD;
- }
+ else
+ rtsr_clear_bits(pxa_rtc, RTSR_HZE);
spin_unlock_irq(&pxa_rtc->lock);
- return ret;
+ return 0;
}
static int pxa_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -340,11 +341,12 @@ static int pxa_rtc_proc(struct device *dev, struct seq_file *seq)
static const struct rtc_class_ops pxa_rtc_ops = {
.open = pxa_rtc_open,
.release = pxa_rtc_release,
- .ioctl = pxa_rtc_ioctl,
.read_time = pxa_rtc_read_time,
.set_time = pxa_rtc_set_time,
.read_alarm = pxa_rtc_read_alarm,
.set_alarm = pxa_rtc_set_alarm,
+ .alarm_irq_enable = pxa_alarm_irq_enable,
+ .update_irq_enable = pxa_update_irq_enable,
.proc = pxa_rtc_proc,
.irq_set_state = pxa_periodic_irq_set_state,
.irq_set_freq = pxa_periodic_irq_set_freq,
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index a95f733bb15..36eb6618446 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -63,6 +63,8 @@ enum {
struct rp5c01_priv {
u32 __iomem *regs;
struct rtc_device *rtc;
+ spinlock_t lock; /* against concurrent RTC/NVRAM access */
+ struct bin_attribute nvram_attr;
};
static inline unsigned int rp5c01_read(struct rp5c01_priv *priv,
@@ -92,6 +94,7 @@ static int rp5c01_read_time(struct device *dev, struct rtc_time *tm)
{
struct rp5c01_priv *priv = dev_get_drvdata(dev);
+ spin_lock_irq(&priv->lock);
rp5c01_lock(priv);
tm->tm_sec = rp5c01_read(priv, RP5C01_10_SECOND) * 10 +
@@ -111,6 +114,7 @@ static int rp5c01_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_year += 100;
rp5c01_unlock(priv);
+ spin_unlock_irq(&priv->lock);
return rtc_valid_tm(tm);
}
@@ -119,6 +123,7 @@ static int rp5c01_set_time(struct device *dev, struct rtc_time *tm)
{
struct rp5c01_priv *priv = dev_get_drvdata(dev);
+ spin_lock_irq(&priv->lock);
rp5c01_lock(priv);
rp5c01_write(priv, tm->tm_sec / 10, RP5C01_10_SECOND);
@@ -139,6 +144,7 @@ static int rp5c01_set_time(struct device *dev, struct rtc_time *tm)
rp5c01_write(priv, tm->tm_year % 10, RP5C01_1_YEAR);
rp5c01_unlock(priv);
+ spin_unlock_irq(&priv->lock);
return 0;
}
@@ -147,6 +153,72 @@ static const struct rtc_class_ops rp5c01_rtc_ops = {
.set_time = rp5c01_set_time,
};
+
+/*
+ * The NVRAM is organized as 2 blocks of 13 nibbles of 4 bits.
+ * We provide access to them like AmigaOS does: the high nibble of each 8-bit
+ * byte is stored in BLOCK10, the low nibble in BLOCK11.
+ */
+
+static ssize_t rp5c01_nvram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct rp5c01_priv *priv = dev_get_drvdata(dev);
+ ssize_t count;
+
+ spin_lock_irq(&priv->lock);
+
+ for (count = 0; size > 0 && pos < RP5C01_MODE; count++, size--) {
+ u8 data;
+
+ rp5c01_write(priv,
+ RP5C01_MODE_TIMER_EN | RP5C01_MODE_RAM_BLOCK10,
+ RP5C01_MODE);
+ data = rp5c01_read(priv, pos) << 4;
+ rp5c01_write(priv,
+ RP5C01_MODE_TIMER_EN | RP5C01_MODE_RAM_BLOCK11,
+ RP5C01_MODE);
+ data |= rp5c01_read(priv, pos++);
+ rp5c01_write(priv, RP5C01_MODE_TIMER_EN | RP5C01_MODE_MODE01,
+ RP5C01_MODE);
+ *buf++ = data;
+ }
+
+ spin_unlock_irq(&priv->lock);
+ return count;
+}
+
+static ssize_t rp5c01_nvram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct rp5c01_priv *priv = dev_get_drvdata(dev);
+ ssize_t count;
+
+ spin_lock_irq(&priv->lock);
+
+ for (count = 0; size > 0 && pos < RP5C01_MODE; count++, size--) {
+ u8 data = *buf++;
+
+ rp5c01_write(priv,
+ RP5C01_MODE_TIMER_EN | RP5C01_MODE_RAM_BLOCK10,
+ RP5C01_MODE);
+ rp5c01_write(priv, data >> 4, pos);
+ rp5c01_write(priv,
+ RP5C01_MODE_TIMER_EN | RP5C01_MODE_RAM_BLOCK11,
+ RP5C01_MODE);
+ rp5c01_write(priv, data & 0xf, pos++);
+ rp5c01_write(priv, RP5C01_MODE_TIMER_EN | RP5C01_MODE_MODE01,
+ RP5C01_MODE);
+ }
+
+ spin_unlock_irq(&priv->lock);
+ return count;
+}
+
static int __init rp5c01_rtc_probe(struct platform_device *dev)
{
struct resource *res;
@@ -168,6 +240,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev)
goto out_free_priv;
}
+ sysfs_bin_attr_init(&priv->nvram_attr);
+ priv->nvram_attr.attr.name = "nvram";
+ priv->nvram_attr.attr.mode = S_IRUGO | S_IWUSR;
+ priv->nvram_attr.read = rp5c01_nvram_read;
+ priv->nvram_attr.write = rp5c01_nvram_write;
+ priv->nvram_attr.size = RP5C01_MODE;
+
+ spin_lock_init(&priv->lock);
+
rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc)) {
@@ -177,8 +258,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev)
priv->rtc = rtc;
platform_set_drvdata(dev, priv);
+
+ error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr);
+ if (error)
+ goto out_unregister;
+
return 0;
+out_unregister:
+ rtc_device_unregister(rtc);
out_unmap:
iounmap(priv->regs);
out_free_priv:
@@ -190,6 +278,7 @@ static int __exit rp5c01_rtc_remove(struct platform_device *dev)
{
struct rp5c01_priv *priv = platform_get_drvdata(dev);
+ sysfs_remove_bin_file(&dev->dev.kobj, &priv->nvram_attr);
rtc_device_unregister(priv->rtc);
iounmap(priv->regs);
kfree(priv);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 70b68d35f96..f57a87f4ae9 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -1,5 +1,8 @@
/* drivers/rtc/rtc-s3c.c
*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
* Copyright (c) 2004,2006 Simtec Electronics
* Ben Dooks, <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
@@ -39,6 +42,7 @@ enum s3c_cpu_type {
static struct resource *s3c_rtc_mem;
+static struct clk *rtc_clk;
static void __iomem *s3c_rtc_base;
static int s3c_rtc_alarmno = NO_IRQ;
static int s3c_rtc_tickno = NO_IRQ;
@@ -53,6 +57,10 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
struct rtc_device *rdev = id;
rtc_update_irq(rdev, 1, RTC_AF | RTC_IRQF);
+
+ if (s3c_rtc_cpu_type == TYPE_S3C64XX)
+ writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP);
+
return IRQ_HANDLED;
}
@@ -61,6 +69,10 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
struct rtc_device *rdev = id;
rtc_update_irq(rdev, 1, RTC_PF | RTC_IRQF);
+
+ if (s3c_rtc_cpu_type == TYPE_S3C64XX)
+ writeb(S3C2410_INTP_TIC, s3c_rtc_base + S3C2410_INTP);
+
return IRQ_HANDLED;
}
@@ -94,7 +106,7 @@ static int s3c_rtc_setpie(struct device *dev, int enabled)
if (enabled)
tmp |= S3C64XX_RTCCON_TICEN;
- writeb(tmp, s3c_rtc_base + S3C2410_RTCCON);
+ writew(tmp, s3c_rtc_base + S3C2410_RTCCON);
} else {
tmp = readb(s3c_rtc_base + S3C2410_TICNT);
tmp &= ~S3C2410_TICNT_ENABLE;
@@ -128,7 +140,7 @@ static int s3c_rtc_setfreq(struct device *dev, int freq)
tmp |= (rtc_dev->max_user_freq / freq)-1;
- writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
+ writel(tmp, s3c_rtc_base + S3C2410_TICNT);
spin_unlock_irq(&s3c_rtc_pie_lock);
return 0;
@@ -298,11 +310,6 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
s3c_rtc_setaie(alrm->enabled);
- if (alrm->enabled)
- enable_irq_wake(s3c_rtc_alarmno);
- else
- disable_irq_wake(s3c_rtc_alarmno);
-
return 0;
}
@@ -431,6 +438,10 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
s3c_rtc_setpie(&dev->dev, 0);
s3c_rtc_setaie(0);
+ clk_disable(rtc_clk);
+ clk_put(rtc_clk);
+ rtc_clk = NULL;
+
iounmap(s3c_rtc_base);
release_resource(s3c_rtc_mem);
kfree(s3c_rtc_mem);
@@ -442,6 +453,7 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct resource *res;
+ unsigned int tmp, i;
int ret;
pr_debug("%s: probe=%p\n", __func__, pdev);
@@ -488,6 +500,16 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
goto err_nomap;
}
+ rtc_clk = clk_get(&pdev->dev, "rtc");
+ if (IS_ERR(rtc_clk)) {
+ dev_err(&pdev->dev, "failed to find rtc clock source\n");
+ ret = PTR_ERR(rtc_clk);
+ rtc_clk = NULL;
+ goto err_clk;
+ }
+
+ clk_enable(rtc_clk);
+
/* check to see if everything is setup correctly */
s3c_rtc_enable(pdev, 1);
@@ -510,6 +532,15 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data;
+ /* Check RTC Time */
+
+ for (i = S3C2410_RTCSEC; i <= S3C2410_RTCYEAR; i += 0x4) {
+ tmp = readb(s3c_rtc_base + i);
+
+ if ((tmp & 0xf) > 0x9 || ((tmp >> 4) & 0xf) > 0x9)
+ writeb(0, s3c_rtc_base + i);
+ }
+
if (s3c_rtc_cpu_type == TYPE_S3C64XX)
rtc->max_user_freq = 32768;
else
@@ -523,6 +554,10 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
err_nortc:
s3c_rtc_enable(pdev, 0);
+ clk_disable(rtc_clk);
+ clk_put(rtc_clk);
+
+ err_clk:
iounmap(s3c_rtc_base);
err_nomap:
@@ -547,6 +582,10 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
ticnt_en_save &= S3C64XX_RTCCON_TICEN;
}
s3c_rtc_enable(pdev, 0);
+
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(s3c_rtc_alarmno);
+
return 0;
}
@@ -560,6 +599,10 @@ static int s3c_rtc_resume(struct platform_device *pdev)
tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
}
+
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(s3c_rtc_alarmno);
+
return 0;
}
#else
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 33975e922d6..8373ca0de8e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -21,6 +21,7 @@
#include <linux/hdreg.h>
#include <linux/async.h>
#include <linux/mutex.h>
+#include <linux/smp_lock.h>
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
@@ -1324,14 +1325,14 @@ static void __dasd_device_check_expire(struct dasd_device *device)
if (device->discipline->term_IO(cqr) != 0) {
/* Hmpf, try again in 5 sec */
dev_err(&device->cdev->dev,
- "cqr %p timed out (%is) but cannot be "
+ "cqr %p timed out (%lus) but cannot be "
"ended, retrying in 5 s\n",
cqr, (cqr->expires/HZ));
cqr->expires += 5*HZ;
dasd_device_set_timer(device, 5*HZ);
} else {
dev_err(&device->cdev->dev,
- "cqr %p timed out (%is), %i retries "
+ "cqr %p timed out (%lus), %i retries "
"remaining\n", cqr, (cqr->expires/HZ),
cqr->retries);
}
@@ -2196,7 +2197,7 @@ static void dasd_setup_queue(struct dasd_block *block)
*/
blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
- blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
+ blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN);
}
/*
@@ -2235,6 +2236,7 @@ static int dasd_open(struct block_device *bdev, fmode_t mode)
if (!block)
return -ENODEV;
+ lock_kernel();
base = block->base;
atomic_inc(&block->open_count);
if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
@@ -2269,12 +2271,14 @@ static int dasd_open(struct block_device *bdev, fmode_t mode)
goto out;
}
+ unlock_kernel();
return 0;
out:
module_put(base->discipline->owner);
unlock:
atomic_dec(&block->open_count);
+ unlock_kernel();
return rc;
}
@@ -2282,8 +2286,10 @@ static int dasd_release(struct gendisk *disk, fmode_t mode)
{
struct dasd_block *block = disk->private_data;
+ lock_kernel();
atomic_dec(&block->open_count);
module_put(block->base->discipline->owner);
+ unlock_kernel();
return 0;
}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index bed7b4634cc..8d41f3ed38d 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1083,6 +1083,49 @@ dasd_eer_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
+/*
+ * expiration time for default requests
+ */
+static ssize_t
+dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ int len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_expires);
+ dasd_put_device(device);
+ return len;
+}
+
+static ssize_t
+dasd_expires_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned long val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if ((strict_strtoul(buf, 10, &val) != 0) ||
+ (val > DASD_EXPIRES_MAX) || val == 0) {
+ dasd_put_device(device);
+ return -EINVAL;
+ }
+
+ if (val)
+ device->default_expires = val;
+
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store);
+
static struct attribute * dasd_attrs[] = {
&dev_attr_readonly.attr,
&dev_attr_discipline.attr,
@@ -1094,6 +1137,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_eer_enabled.attr,
&dev_attr_erplog.attr,
&dev_attr_failfast.attr,
+ &dev_attr_expires.attr,
NULL,
};
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 687f323cdc3..2b3bc3ec054 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
sizeof(struct dasd_diag_req)) / \
sizeof(struct dasd_diag_bio)) / 2)
#define DIAG_MAX_RETRIES 32
-#define DIAG_TIMEOUT 50 * HZ
+#define DIAG_TIMEOUT 50
static struct dasd_discipline dasd_diag_discipline;
@@ -360,6 +360,8 @@ dasd_diag_check_device(struct dasd_device *device)
goto out;
}
+ device->default_expires = DIAG_TIMEOUT;
+
/* Figure out position of label block */
switch (private->rdc_data.vdev_class) {
case DEV_CLASS_FBA:
@@ -563,7 +565,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
cqr->startdev = memdev;
cqr->memdev = memdev;
cqr->block = block;
- cqr->expires = DIAG_TIMEOUT;
+ cqr->expires = memdev->default_expires * HZ;
cqr->status = DASD_CQR_FILLED;
return cqr;
}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index ab84da5592e..66360c24bd4 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -82,6 +82,14 @@ static struct ccw_driver dasd_eckd_driver; /* see below */
#define INIT_CQR_UNFORMATTED 1
#define INIT_CQR_ERROR 2
+/* emergency request for reserve/release */
+static struct {
+ struct dasd_ccw_req cqr;
+ struct ccw1 ccw;
+ char data[32];
+} *dasd_reserve_req;
+static DEFINE_MUTEX(dasd_reserve_mutex);
+
/* initial attempt at a probe function. this can be simplified once
* the other detection code is gone */
@@ -1107,8 +1115,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
struct dasd_eckd_private *private;
struct dasd_block *block;
struct dasd_uid temp_uid;
- int is_known, rc;
+ int is_known, rc, i;
int readonly;
+ unsigned long value;
if (!ccw_device_is_pathgroup(device->cdev)) {
dev_warn(&device->cdev->dev,
@@ -1143,6 +1152,18 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (rc)
goto out_err1;
+ /* set default timeout */
+ device->default_expires = DASD_EXPIRES;
+ if (private->gneq) {
+ value = 1;
+ for (i = 0; i < private->gneq->timeout.value; i++)
+ value = 10 * value;
+ value = value * private->gneq->timeout.number;
+ /* do not accept useless values */
+ if (value != 0 && value <= DASD_EXPIRES_MAX)
+ device->default_expires = value;
+ }
+
/* Generate device unique id */
rc = dasd_eckd_generate_uid(device);
if (rc)
@@ -1973,7 +1994,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
- cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+ cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = private->path_data.ppm;
cqr->retries = 256;
cqr->buildclk = get_clock();
@@ -2150,7 +2171,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
- cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+ cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = private->path_data.ppm;
cqr->retries = 256;
cqr->buildclk = get_clock();
@@ -2398,7 +2419,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
- cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+ cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = private->path_data.ppm;
cqr->retries = 256;
cqr->buildclk = get_clock();
@@ -2645,15 +2666,23 @@ dasd_eckd_release(struct dasd_device *device)
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
+ int useglobal;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
- DBF_DEV_EVENT(DBF_WARNING, device, "%s",
- "Could not allocate initialization request");
- return PTR_ERR(cqr);
+ mutex_lock(&dasd_reserve_mutex);
+ useglobal = 1;
+ cqr = &dasd_reserve_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(&dasd_reserve_req->ccw, 0,
+ sizeof(dasd_reserve_req->ccw));
+ cqr->cpaddr = &dasd_reserve_req->ccw;
+ cqr->data = &dasd_reserve_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
@@ -2671,7 +2700,10 @@ dasd_eckd_release(struct dasd_device *device)
rc = dasd_sleep_on_immediatly(cqr);
- dasd_sfree_request(cqr, cqr->memdev);
+ if (useglobal)
+ mutex_unlock(&dasd_reserve_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
@@ -2687,15 +2719,23 @@ dasd_eckd_reserve(struct dasd_device *device)
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
+ int useglobal;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
- DBF_DEV_EVENT(DBF_WARNING, device, "%s",
- "Could not allocate initialization request");
- return PTR_ERR(cqr);
+ mutex_lock(&dasd_reserve_mutex);
+ useglobal = 1;
+ cqr = &dasd_reserve_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(&dasd_reserve_req->ccw, 0,
+ sizeof(dasd_reserve_req->ccw));
+ cqr->cpaddr = &dasd_reserve_req->ccw;
+ cqr->data = &dasd_reserve_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
@@ -2713,7 +2753,10 @@ dasd_eckd_reserve(struct dasd_device *device)
rc = dasd_sleep_on_immediatly(cqr);
- dasd_sfree_request(cqr, cqr->memdev);
+ if (useglobal)
+ mutex_unlock(&dasd_reserve_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
@@ -2728,15 +2771,23 @@ dasd_eckd_steal_lock(struct dasd_device *device)
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
+ int useglobal;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
- DBF_DEV_EVENT(DBF_WARNING, device, "%s",
- "Could not allocate initialization request");
- return PTR_ERR(cqr);
+ mutex_lock(&dasd_reserve_mutex);
+ useglobal = 1;
+ cqr = &dasd_reserve_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(&dasd_reserve_req->ccw, 0,
+ sizeof(dasd_reserve_req->ccw));
+ cqr->cpaddr = &dasd_reserve_req->ccw;
+ cqr->data = &dasd_reserve_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_SLCK;
@@ -2754,7 +2805,10 @@ dasd_eckd_steal_lock(struct dasd_device *device)
rc = dasd_sleep_on_immediatly(cqr);
- dasd_sfree_request(cqr, cqr->memdev);
+ if (useglobal)
+ mutex_unlock(&dasd_reserve_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
@@ -3488,10 +3542,15 @@ dasd_eckd_init(void)
int ret;
ASCEBC(dasd_eckd_discipline.ebcname, 4);
+ dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
+ GFP_KERNEL | GFP_DMA);
+ if (!dasd_reserve_req)
+ return -ENOMEM;
ret = ccw_driver_register(&dasd_eckd_driver);
if (!ret)
wait_for_device_probe();
-
+ else
+ kfree(dasd_reserve_req);
return ret;
}
@@ -3499,6 +3558,7 @@ static void __exit
dasd_eckd_cleanup(void)
{
ccw_driver_unregister(&dasd_eckd_driver);
+ kfree(dasd_reserve_req);
}
module_init(dasd_eckd_init);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index dd6385a5af1..0eb49655a6c 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -320,7 +320,12 @@ struct dasd_gneq {
__u8 identifier:2;
__u8 reserved:6;
} __attribute__ ((packed)) flags;
- __u8 reserved[7];
+ __u8 reserved[5];
+ struct {
+ __u8 value:2;
+ __u8 number:6;
+ } __attribute__ ((packed)) timeout;
+ __u8 reserved3;
__u16 subsystemID;
__u8 reserved2[22];
} __attribute__ ((packed));
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index dd88803e489..7158f9528ec 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -701,7 +701,7 @@ int __init dasd_eer_init(void)
void dasd_eer_exit(void)
{
if (dasd_eer_dev) {
- WARN_ON(misc_deregister(dasd_eer_dev) != 0);
+ misc_deregister(dasd_eer_dev);
kfree(dasd_eer_dev);
dasd_eer_dev = NULL;
}
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 37282b90eec..bec5486e0e6 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -163,6 +163,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
return rc;
}
+ device->default_expires = DASD_EXPIRES;
+
readonly = dasd_device_is_ro(device);
if (readonly)
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
@@ -370,7 +372,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
cqr->startdev = memdev;
cqr->memdev = memdev;
cqr->block = block;
- cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+ cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */
cqr->retries = 32;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 49b431d135e..500678d7116 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -186,7 +186,7 @@ struct dasd_ccw_req {
/* ... and how */
unsigned long starttime; /* jiffies time of request start */
- int expires; /* expiration period in jiffies */
+ unsigned long expires; /* expiration period in jiffies */
char lpm; /* logical path mask */
void *data; /* pointer to data area */
@@ -224,6 +224,9 @@ struct dasd_ccw_req {
#define DASD_CQR_CLEARED 0x84 /* request was cleared */
#define DASD_CQR_SUCCESS 0x85 /* request was successful */
+/* default expiration time*/
+#define DASD_EXPIRES 300
+#define DASD_EXPIRES_MAX 40000000
/* per dasd_ccw_req flags */
#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
@@ -404,6 +407,9 @@ struct dasd_device {
/* hook for alias management */
struct list_head alias_list;
+
+ /* default expiration time in s */
+ unsigned long default_expires;
};
struct dasd_block {
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 9b43ae94beb..2bd72aa34c5 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
+#include <linux/smp_lock.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -775,6 +776,7 @@ dcssblk_open(struct block_device *bdev, fmode_t mode)
struct dcssblk_dev_info *dev_info;
int rc;
+ lock_kernel();
dev_info = bdev->bd_disk->private_data;
if (NULL == dev_info) {
rc = -ENODEV;
@@ -784,6 +786,7 @@ dcssblk_open(struct block_device *bdev, fmode_t mode)
bdev->bd_block_size = 4096;
rc = 0;
out:
+ unlock_kernel();
return rc;
}
@@ -794,6 +797,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
struct segment_info *entry;
int rc;
+ lock_kernel();
if (!dev_info) {
rc = -ENODEV;
goto out;
@@ -811,6 +815,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
up_write(&dcssblk_devices_sem);
rc = 0;
out:
+ unlock_kernel();
return rc;
}
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index c6cbcb3f925..0e9a309b966 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -16,12 +16,11 @@
#ifdef CONFIG_MAGIC_SYSRQ
static int ctrlchar_sysrq_key;
-static struct tty_struct *sysrq_tty;
static void
ctrlchar_handle_sysrq(struct work_struct *work)
{
- handle_sysrq(ctrlchar_sysrq_key, sysrq_tty);
+ handle_sysrq(ctrlchar_sysrq_key);
}
static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
@@ -54,7 +53,6 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
/* racy */
if (len == 3 && buf[1] == '-') {
ctrlchar_sysrq_key = buf[2];
- sysrq_tty = tty;
schedule_work(&ctrlchar_work);
return CTRLCHAR_SYSRQ;
}
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 18d9a497863..8cd58e412b5 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -305,7 +305,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
if (kbd->sysrq) {
if (kbd->sysrq == K(KT_LATIN, '-')) {
kbd->sysrq = 0;
- handle_sysrq(value, kbd->tty);
+ handle_sysrq(value);
return;
}
if (value == '-') {
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 2ed3f82e5c3..e021ec663ef 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -627,7 +627,7 @@ out_iucv:
static void __exit mon_exit(void)
{
segment_unload(mon_dcss_name);
- WARN_ON(misc_deregister(&mon_dev) != 0);
+ misc_deregister(&mon_dev);
device_unregister(monreader_device);
driver_unregister(&monreader_driver);
iucv_unregister(&monreader_iucv_handler, 1);
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 98a49dfda1d..572a1e7fd09 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -380,7 +380,7 @@ out_driver:
static void __exit mon_exit(void)
{
- WARN_ON(misc_deregister(&mon_dev) != 0);
+ misc_deregister(&mon_dev);
platform_device_unregister(monwriter_pdev);
platform_driver_unregister(&monwriter_pdrv);
}
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 097da8ce6be..85cf607fc78 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/blkdev.h>
+#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/buffer_head.h>
#include <linux/kernel.h>
@@ -216,8 +217,7 @@ tapeblock_setup_device(struct tape_device * device)
if (!blkdat->request_queue)
return -ENOMEM;
- elevator_exit(blkdat->request_queue->elevator);
- rc = elevator_init(blkdat->request_queue, "noop");
+ rc = elevator_change(blkdat->request_queue, "noop");
if (rc)
goto cleanup_queue;
@@ -361,6 +361,7 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
struct tape_device * device;
int rc;
+ lock_kernel();
device = tape_get_device(disk->private_data);
if (device->required_tapemarks) {
@@ -384,12 +385,14 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
* is called.
*/
tape_state_set(device, TS_BLKUSE);
+ unlock_kernel();
return 0;
release:
tape_release(device);
put_device:
tape_put_device(device);
+ unlock_kernel();
return rc;
}
@@ -403,10 +406,12 @@ static int
tapeblock_release(struct gendisk *disk, fmode_t mode)
{
struct tape_device *device = disk->private_data;
-
+
+ lock_kernel();
tape_state_set(device, TS_IN_USE);
tape_release(device);
tape_put_device(device);
+ unlock_kernel();
return 0;
}
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 7f206ed44fd..d15f8b4d78b 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -38,9 +38,13 @@ static u16 ccwreq_next_path(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
+ if (!req->singlepath) {
+ req->mask = 0;
+ goto out;
+ }
req->retries = req->maxretries;
req->mask = lpm_adjust(req->mask >>= 1, req->lpm);
-
+out:
return req->mask;
}
@@ -113,8 +117,12 @@ void ccw_request_start(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
- /* Try all paths twice to counter link flapping. */
- req->mask = 0x8080;
+ if (req->singlepath) {
+ /* Try all paths twice to counter link flapping. */
+ req->mask = 0x8080;
+ } else
+ req->mask = req->lpm;
+
req->retries = req->maxretries;
req->mask = lpm_adjust(req->mask, req->lpm);
req->drc = 0;
@@ -182,6 +190,8 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
/* Ask the driver what to do */
if (cdev->drv && cdev->drv->uc_handler) {
todo = cdev->drv->uc_handler(cdev, lcirb);
+ CIO_TRACE_EVENT(2, "uc_response");
+ CIO_HEX_EVENT(2, &todo, sizeof(todo));
switch (todo) {
case UC_TODO_RETRY:
return IO_STATUS_ERROR;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 407d0e9adfa..4cbb1a6ca33 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -29,6 +29,7 @@
#include "chsc.h"
static void *sei_page;
+static DEFINE_SPINLOCK(siosl_lock);
static DEFINE_SPINLOCK(sda_lock);
/**
@@ -48,6 +49,7 @@ int chsc_error_from_response(int response)
case 0x0007:
case 0x0008:
case 0x000a:
+ case 0x0104:
return -EINVAL;
case 0x0004:
return -EOPNOTSUPP;
@@ -974,3 +976,49 @@ int chsc_sstpi(void *page, void *result, size_t size)
return (rr->response.code == 0x0001) ? 0 : -EIO;
}
+static struct {
+ struct chsc_header request;
+ u32 word1;
+ struct subchannel_id sid;
+ u32 word3;
+ struct chsc_header response;
+ u32 word[11];
+} __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE)));
+
+int chsc_siosl(struct subchannel_id schid)
+{
+ unsigned long flags;
+ int ccode;
+ int rc;
+
+ spin_lock_irqsave(&siosl_lock, flags);
+ memset(&siosl_area, 0, sizeof(siosl_area));
+ siosl_area.request.length = 0x0010;
+ siosl_area.request.code = 0x0046;
+ siosl_area.word1 = 0x80000000;
+ siosl_area.sid = schid;
+
+ ccode = chsc(&siosl_area);
+ if (ccode > 0) {
+ if (ccode == 3)
+ rc = -ENODEV;
+ else
+ rc = -EBUSY;
+ CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
+ schid.ssid, schid.sch_no, ccode);
+ goto out;
+ }
+ rc = chsc_error_from_response(siosl_area.response.code);
+ if (rc)
+ CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
+ schid.ssid, schid.sch_no,
+ siosl_area.response.code);
+ else
+ CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
+ schid.ssid, schid.sch_no);
+out:
+ spin_unlock_irqrestore(&siosl_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(chsc_siosl);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 37aa611d4ac..5453013f094 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -80,4 +80,6 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp);
int chsc_error_from_response(int response);
+int chsc_siosl(struct subchannel_id schid);
+
#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6d229f3523a..51bd3687d16 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -36,6 +36,7 @@
#include "ioasm.h"
#include "io_sch.h"
#include "blacklist.h"
+#include "chsc.h"
static struct timer_list recovery_timer;
static DEFINE_SPINLOCK(recovery_lock);
@@ -486,9 +487,11 @@ static int online_store_handle_offline(struct ccw_device *cdev)
spin_lock_irq(cdev->ccwlock);
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
spin_unlock_irq(cdev->ccwlock);
- } else if (cdev->online && cdev->drv && cdev->drv->set_offline)
+ return 0;
+ }
+ if (cdev->drv && cdev->drv->set_offline)
return ccw_device_set_offline(cdev);
- return 0;
+ return -EINVAL;
}
static int online_store_recog_and_online(struct ccw_device *cdev)
@@ -505,8 +508,8 @@ static int online_store_recog_and_online(struct ccw_device *cdev)
return -EAGAIN;
}
if (cdev->drv && cdev->drv->set_online)
- ccw_device_set_online(cdev);
- return 0;
+ return ccw_device_set_online(cdev);
+ return -EINVAL;
}
static int online_store_handle_online(struct ccw_device *cdev, int force)
@@ -598,6 +601,25 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf)
}
}
+static ssize_t
+initiate_logging(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ int rc;
+
+ rc = chsc_siosl(sch->schid);
+ if (rc < 0) {
+ pr_warning("Logging for subchannel 0.%x.%04x failed with "
+ "errno=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, rc);
+ return rc;
+ }
+ pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ return count;
+}
+
static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
@@ -605,10 +627,12 @@ static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
static DEVICE_ATTR(online, 0644, online_show, online_store);
static DEVICE_ATTR(availability, 0444, available_show, NULL);
+static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
static struct attribute *io_subchannel_attrs[] = {
&dev_attr_chpids.attr,
&dev_attr_pimpampom.attr,
+ &dev_attr_logging.attr,
NULL,
};
@@ -2036,6 +2060,21 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
}
}
+/**
+ * ccw_device_siosl() - initiate logging
+ * @cdev: ccw device
+ *
+ * This function is used to invoke model-dependent logging within the channel
+ * subsystem.
+ */
+int ccw_device_siosl(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ return chsc_siosl(sch->schid);
+}
+EXPORT_SYMBOL_GPL(ccw_device_siosl);
+
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_online);
EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 6facb5499a6..82a5ad0d63f 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -208,6 +208,7 @@ static void spid_start(struct ccw_device *cdev)
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = 0x80;
+ req->singlepath = 1;
req->callback = spid_callback;
spid_do(cdev);
}
@@ -420,6 +421,7 @@ static void verify_start(struct ccw_device *cdev)
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = 0x80;
+ req->singlepath = 1;
if (cdev->private->flags.pgroup) {
CIO_TRACE_EVENT(4, "snid");
CIO_HEX_EVENT(4, devid, sizeof(*devid));
@@ -507,6 +509,7 @@ void ccw_device_disband_start(struct ccw_device *cdev)
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->singlepath = 1;
req->callback = disband_callback;
fn = SPID_FUNC_DISBAND;
if (cdev->private->flags.mpath)
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index b9ce712a7f2..469ef93f230 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -92,11 +92,12 @@ enum io_status {
* @filter: optional callback to adjust request status based on IRB data
* @callback: final callback
* @data: user-defined pointer passed to all callbacks
+ * @singlepath: if set, use only one path from @lpm per start I/O
+ * @cancel: non-zero if request was cancelled
+ * @done: non-zero if request was finished
* @mask: current path mask
* @retries: current number of retries
* @drc: delayed return code
- * @cancel: non-zero if request was cancelled
- * @done: non-zero if request was finished
*/
struct ccw_request {
struct ccw1 *cp;
@@ -108,12 +109,13 @@ struct ccw_request {
enum io_status);
void (*callback)(struct ccw_device *, void *, int);
void *data;
+ unsigned int singlepath:1;
/* These fields are used internally. */
+ unsigned int cancel:1;
+ unsigned int done:1;
u16 mask;
u16 retries;
int drc;
- int cancel:1;
- int done:1;
} __attribute__((packed));
/*
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index f0037eefd44..0f4ef8769a3 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -208,6 +208,7 @@ struct qdio_dev_perf_stat {
unsigned int eqbs_partial;
unsigned int sqbs;
unsigned int sqbs_partial;
+ unsigned int int_discarded;
} ____cacheline_aligned;
struct qdio_queue_perf_stat {
@@ -222,6 +223,10 @@ struct qdio_queue_perf_stat {
unsigned int nr_sbal_total;
};
+enum qdio_queue_irq_states {
+ QDIO_QUEUE_IRQS_DISABLED,
+};
+
struct qdio_input_q {
/* input buffer acknowledgement flag */
int polling;
@@ -231,6 +236,10 @@ struct qdio_input_q {
int ack_count;
/* last time of noticing incoming data */
u64 timestamp;
+ /* upper-layer polling flag */
+ unsigned long queue_irq_state;
+ /* callback to start upper-layer polling */
+ void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
};
struct qdio_output_q {
@@ -399,6 +408,26 @@ static inline int multicast_outbound(struct qdio_q *q)
#define sub_buf(bufnr, dec) \
((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
+#define queue_irqs_enabled(q) \
+ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
+#define queue_irqs_disabled(q) \
+ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
+
+#define TIQDIO_SHARED_IND 63
+
+/* device state change indicators */
+struct indicator_t {
+ u32 ind; /* u32 because of compare-and-swap performance */
+ atomic_t count; /* use count, 0 or 1 for non-shared indicators */
+};
+
+extern struct indicator_t *q_indicators;
+
+static inline int shared_ind(struct qdio_irq *irq_ptr)
+{
+ return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
+}
+
/* prototypes for thin interrupt */
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 6ce83f56d53..28868e7471a 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -56,9 +56,16 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "DSCI: %d nr_used: %d\n",
*(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
- seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move);
- seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
- q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count);
+ seq_printf(m, "ftc: %d last_move: %d\n",
+ q->first_to_check, q->last_move);
+ if (q->is_input_q) {
+ seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
+ q->u.in.polling, q->u.in.ack_start,
+ q->u.in.ack_count);
+ seq_printf(m, "IRQs disabled: %u\n",
+ test_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state));
+ }
seq_printf(m, "SBAL states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
@@ -113,22 +120,6 @@ static int qstat_show(struct seq_file *m, void *v)
return 0;
}
-static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
- size_t count, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct qdio_q *q = seq->private;
-
- if (!q)
- return 0;
- if (q->is_input_q)
- xchg(q->irq_ptr->dsci, 1);
- local_bh_disable();
- tasklet_schedule(&q->tasklet);
- local_bh_enable();
- return count;
-}
-
static int qstat_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qstat_show,
@@ -139,7 +130,6 @@ static const struct file_operations debugfs_fops = {
.owner = THIS_MODULE,
.open = qstat_seq_open,
.read = seq_read,
- .write = qstat_seq_write,
.llseek = seq_lseek,
.release = single_release,
};
@@ -166,7 +156,8 @@ static char *qperf_names[] = {
"QEBSM eqbs",
"QEBSM eqbs partial",
"QEBSM sqbs",
- "QEBSM sqbs partial"
+ "QEBSM sqbs partial",
+ "Discarded interrupts"
};
static int qperf_show(struct seq_file *m, void *v)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 00520f9a7a8..5fcfa7f9e9e 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -884,8 +884,19 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
return;
- for_each_input_queue(irq_ptr, q, i)
- tasklet_schedule(&q->tasklet);
+ for_each_input_queue(irq_ptr, q, i) {
+ if (q->u.in.queue_start_poll) {
+ /* skip if polling is enabled or already in work */
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state)) {
+ qperf_inc(q, int_discarded);
+ continue;
+ }
+ q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
+ q->irq_ptr->int_parm);
+ } else
+ tasklet_schedule(&q->tasklet);
+ }
if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
return;
@@ -1519,6 +1530,129 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
}
EXPORT_SYMBOL_GPL(do_QDIO);
+/**
+ * qdio_start_irq - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ *
+ * Return codes
+ * 0 - success
+ * 1 - irqs not started since new data is available
+ */
+int qdio_start_irq(struct ccw_device *cdev, int nr)
+{
+ struct qdio_q *q;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+
+ WARN_ON(queue_irqs_enabled(q));
+
+ if (!shared_ind(q->irq_ptr))
+ xchg(q->irq_ptr->dsci, 0);
+
+ qdio_stop_polling(q);
+ clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
+
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (!shared_ind(q->irq_ptr) && *q->irq_ptr->dsci)
+ goto rescan;
+ if (!qdio_inbound_q_done(q))
+ goto rescan;
+ return 0;
+
+rescan:
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state))
+ return 0;
+ else
+ return 1;
+
+}
+EXPORT_SYMBOL(qdio_start_irq);
+
+/**
+ * qdio_get_next_buffers - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ * @bufnr: first filled buffer number
+ * @error: buffers are in error state
+ *
+ * Return codes
+ * < 0 - error
+ * = 0 - no new buffers found
+ * > 0 - number of processed buffers
+ */
+int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
+ int *error)
+{
+ struct qdio_q *q;
+ int start, end;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+ WARN_ON(queue_irqs_enabled(q));
+
+ qdio_sync_after_thinint(q);
+
+ /*
+ * The interrupt could be caused by a PCI request. Check the
+ * PCI capable outbound queues.
+ */
+ qdio_check_outbound_after_thinint(q);
+
+ if (!qdio_inbound_q_moved(q))
+ return 0;
+
+ /* Note: upper-layer MUST stop processing immediately here ... */
+ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+ return -EIO;
+
+ start = q->first_to_kick;
+ end = q->first_to_check;
+ *bufnr = start;
+ *error = q->qdio_error;
+
+ /* for the next time */
+ q->first_to_kick = end;
+ q->qdio_error = 0;
+ return sub_buf(end, start);
+}
+EXPORT_SYMBOL(qdio_get_next_buffers);
+
+/**
+ * qdio_stop_irq - disable interrupt processing for the device
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ *
+ * Return codes
+ * 0 - interrupts were already disabled
+ * 1 - interrupts successfully disabled
+ */
+int qdio_stop_irq(struct ccw_device *cdev, int nr)
+{
+ struct qdio_q *q;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state))
+ return 0;
+ else
+ return 1;
+}
+EXPORT_SYMBOL(qdio_stop_irq);
+
static int __init init_QDIO(void)
{
int rc;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 34c7e4046df..a13cf7ec64b 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -161,6 +161,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
+ q->u.in.queue_start_poll = qdio_init->queue_start_poll;
setup_storage_lists(q, irq_ptr, input_sbal_array, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 8daf1b99f15..752dbee06af 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -25,24 +25,20 @@
*/
#define TIQDIO_NR_NONSHARED_IND 63
#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
-#define TIQDIO_SHARED_IND 63
/* list of thin interrupt input queues */
static LIST_HEAD(tiq_list);
DEFINE_MUTEX(tiq_list_lock);
/* adapter local summary indicator */
-static unsigned char *tiqdio_alsi;
+static u8 *tiqdio_alsi;
-/* device state change indicators */
-struct indicator_t {
- u32 ind; /* u32 because of compare-and-swap performance */
- atomic_t count; /* use count, 0 or 1 for non-shared indicators */
-};
-static struct indicator_t *q_indicators;
+struct indicator_t *q_indicators;
static int css_qdio_omit_svs;
+static u64 last_ai_time;
+
static inline unsigned long do_clear_global_summary(void)
{
register unsigned long __fn asm("1") = 3;
@@ -116,59 +112,73 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
}
}
-static inline int shared_ind(struct qdio_irq *irq_ptr)
+static inline int shared_ind_used(void)
{
- return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
+ return atomic_read(&q_indicators[TIQDIO_SHARED_IND].count);
}
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
- * @ind: pointer to adapter local summary indicator
- * @drv_data: NULL
+ * @alsi: pointer to adapter local summary indicator
+ * @data: NULL
*/
-static void tiqdio_thinint_handler(void *ind, void *drv_data)
+static void tiqdio_thinint_handler(void *alsi, void *data)
{
struct qdio_q *q;
+ last_ai_time = S390_lowcore.int_clock;
+
/*
* SVS only when needed: issue SVS to benefit from iqdio interrupt
- * avoidance (SVS clears adapter interrupt suppression overwrite)
+ * avoidance (SVS clears adapter interrupt suppression overwrite).
*/
if (!css_qdio_omit_svs)
do_clear_global_summary();
- /*
- * reset local summary indicator (tiqdio_alsi) to stop adapter
- * interrupts for now
- */
- xchg((u8 *)ind, 0);
+ /* reset local summary indicator */
+ if (shared_ind_used())
+ xchg(tiqdio_alsi, 0);
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
/* check for work on all inbound thinint queues */
- list_for_each_entry_rcu(q, &tiq_list, entry)
+ list_for_each_entry_rcu(q, &tiq_list, entry) {
+
/* only process queues from changed sets */
- if (*q->irq_ptr->dsci) {
- qperf_inc(q, adapter_int);
+ if (!*q->irq_ptr->dsci)
+ continue;
+ if (q->u.in.queue_start_poll) {
+ /* skip if polling is enabled or already in work */
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state)) {
+ qperf_inc(q, int_discarded);
+ continue;
+ }
+
+ /* avoid dsci clear here, done after processing */
+ q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
+ q->irq_ptr->int_parm);
+ } else {
/* only clear it if the indicator is non-shared */
if (!shared_ind(q->irq_ptr))
xchg(q->irq_ptr->dsci, 0);
/*
- * don't call inbound processing directly since
- * that could starve other thinint queues
+ * Call inbound processing but not directly
+ * since that could starve other thinint queues.
*/
tasklet_schedule(&q->tasklet);
}
-
+ qperf_inc(q, adapter_int);
+ }
rcu_read_unlock();
/*
- * if we used the shared indicator clear it now after all queues
- * were processed
+ * If the shared indicator was used clear it now after all queues
+ * were processed.
*/
- if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) {
+ if (shared_ind_used()) {
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
/* prevent racing */
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 977bb4d4ed1..456b1874339 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -100,6 +100,6 @@ config QETH_IPV6
config CCWGROUP
tristate
- default (LCS || CTCM || QETH)
+ default (LCS || CTCM || QETH || CLAW)
endmenu
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index a75ed3083a6..8e4153d740f 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -386,7 +386,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
struct chbk *p_ch;
CLAW_DBF_TEXT(4, trace, "claw_tx");
- p_ch=&privptr->channel[WRITE];
+ p_ch = &privptr->channel[WRITE_CHANNEL];
spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
rc=claw_hw_tx( skb, dev, 1 );
spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
@@ -407,7 +407,7 @@ static struct sk_buff *
claw_pack_skb(struct claw_privbk *privptr)
{
struct sk_buff *new_skb,*held_skb;
- struct chbk *p_ch = &privptr->channel[WRITE];
+ struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
struct claw_env *p_env = privptr->p_env;
int pkt_cnt,pk_ind,so_far;
@@ -515,15 +515,15 @@ claw_open(struct net_device *dev)
privptr->p_env->write_size=CLAW_FRAME_SIZE;
}
claw_set_busy(dev);
- tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet,
- (unsigned long) &privptr->channel[READ]);
+ tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
+ (unsigned long) &privptr->channel[READ_CHANNEL]);
for ( i = 0; i < 2; i++) {
CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
init_waitqueue_head(&privptr->channel[i].wait);
/* skb_queue_head_init(&p_ch->io_queue); */
- if (i == WRITE)
+ if (i == WRITE_CHANNEL)
skb_queue_head_init(
- &privptr->channel[WRITE].collect_queue);
+ &privptr->channel[WRITE_CHANNEL].collect_queue);
privptr->channel[i].flag_a = 0;
privptr->channel[i].IO_active = 0;
privptr->channel[i].flag &= ~CLAW_TIMER;
@@ -551,12 +551,12 @@ claw_open(struct net_device *dev)
if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
del_timer(&timer);
}
- if ((((privptr->channel[READ].last_dstat |
- privptr->channel[WRITE].last_dstat) &
+ if ((((privptr->channel[READ_CHANNEL].last_dstat |
+ privptr->channel[WRITE_CHANNEL].last_dstat) &
~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
- (((privptr->channel[READ].flag |
- privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) {
- dev_info(&privptr->channel[READ].cdev->dev,
+ (((privptr->channel[READ_CHANNEL].flag |
+ privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
+ dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
"%s: remote side is not ready\n", dev->name);
CLAW_DBF_TEXT(2, trace, "notrdy");
@@ -608,8 +608,8 @@ claw_open(struct net_device *dev)
}
}
privptr->buffs_alloc = 0;
- privptr->channel[READ].flag= 0x00;
- privptr->channel[WRITE].flag = 0x00;
+ privptr->channel[READ_CHANNEL].flag = 0x00;
+ privptr->channel[WRITE_CHANNEL].flag = 0x00;
privptr->p_buff_ccw=NULL;
privptr->p_buff_read=NULL;
privptr->p_buff_write=NULL;
@@ -652,10 +652,10 @@ claw_irq_handler(struct ccw_device *cdev,
}
/* Try to extract channel from driver data. */
- if (privptr->channel[READ].cdev == cdev)
- p_ch = &privptr->channel[READ];
- else if (privptr->channel[WRITE].cdev == cdev)
- p_ch = &privptr->channel[WRITE];
+ if (privptr->channel[READ_CHANNEL].cdev == cdev)
+ p_ch = &privptr->channel[READ_CHANNEL];
+ else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
+ p_ch = &privptr->channel[WRITE_CHANNEL];
else {
dev_warn(&cdev->dev, "The device is not a CLAW device\n");
CLAW_DBF_TEXT(2, trace, "badchan");
@@ -813,7 +813,7 @@ claw_irq_handler(struct ccw_device *cdev,
claw_clearbit_busy(TB_TX, dev);
claw_clear_busy(dev);
}
- p_ch_r = (struct chbk *)&privptr->channel[READ];
+ p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
if (test_and_set_bit(CLAW_BH_ACTIVE,
(void *)&p_ch_r->flag_a) == 0)
tasklet_schedule(&p_ch_r->tasklet);
@@ -878,13 +878,13 @@ claw_release(struct net_device *dev)
for ( i = 1; i >=0 ; i--) {
spin_lock_irqsave(
get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
- /* del_timer(&privptr->channel[READ].timer); */
+ /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
privptr->channel[i].claw_state = CLAW_STOP;
privptr->channel[i].IO_active = 0;
parm = (unsigned long) &privptr->channel[i];
- if (i == WRITE)
+ if (i == WRITE_CHANNEL)
claw_purge_skb_queue(
- &privptr->channel[WRITE].collect_queue);
+ &privptr->channel[WRITE_CHANNEL].collect_queue);
rc = ccw_device_halt (privptr->channel[i].cdev, parm);
if (privptr->system_validate_comp==0x00) /* never opened? */
init_waitqueue_head(&privptr->channel[i].wait);
@@ -971,16 +971,16 @@ claw_release(struct net_device *dev)
privptr->mtc_skipping = 1;
privptr->mtc_offset=0;
- if (((privptr->channel[READ].last_dstat |
- privptr->channel[WRITE].last_dstat) &
+ if (((privptr->channel[READ_CHANNEL].last_dstat |
+ privptr->channel[WRITE_CHANNEL].last_dstat) &
~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
- dev_warn(&privptr->channel[READ].cdev->dev,
+ dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
"Deactivating %s completed with incorrect"
" subchannel status "
"(read %02x, write %02x)\n",
dev->name,
- privptr->channel[READ].last_dstat,
- privptr->channel[WRITE].last_dstat);
+ privptr->channel[READ_CHANNEL].last_dstat,
+ privptr->channel[WRITE_CHANNEL].last_dstat);
CLAW_DBF_TEXT(2, trace, "badclose");
}
CLAW_DBF_TEXT(4, trace, "rlsexit");
@@ -1324,7 +1324,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
CLAW_DBF_TEXT(4, trace, "hw_tx");
privptr = (struct claw_privbk *)(dev->ml_priv);
- p_ch=(struct chbk *)&privptr->channel[WRITE];
+ p_ch = (struct chbk *)&privptr->channel[WRITE_CHANNEL];
p_env =privptr->p_env;
claw_free_wrt_buf(dev); /* Clean up free chain if posible */
/* scan the write queue to free any completed write packets */
@@ -1357,7 +1357,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
claw_strt_out_IO(dev );
claw_free_wrt_buf( dev );
if (privptr->write_free_count==0) {
- ch = &privptr->channel[WRITE];
+ ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb);
goto Done;
@@ -1369,7 +1369,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
}
/* tx lock */
if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
- ch = &privptr->channel[WRITE];
+ ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb);
claw_strt_out_IO(dev );
@@ -1385,7 +1385,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
privptr->p_write_free_chain == NULL ) {
claw_setbit_busy(TB_NOBUFFER,dev);
- ch = &privptr->channel[WRITE];
+ ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb);
CLAW_DBF_TEXT(2, trace, "clawbusy");
@@ -1397,7 +1397,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
while (len_of_data > 0) {
p_this_ccw=privptr->p_write_free_chain; /* get a block */
if (p_this_ccw == NULL) { /* lost the race */
- ch = &privptr->channel[WRITE];
+ ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb);
goto Done2;
@@ -2067,7 +2067,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
*catch up to each other */
privptr = dev->ml_priv;
p_env=privptr->p_env;
- tdev = &privptr->channel[READ].cdev->dev;
+ tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
memcpy( &temp_host_name, p_env->host_name, 8);
memcpy( &temp_ws_name, p_env->adapter_name , 8);
dev_info(tdev, "%s: CLAW device %.8s: "
@@ -2245,7 +2245,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
dev->name, temp_ws_name,
p_ctlbk->linkid);
privptr->active_link_ID = p_ctlbk->linkid;
- p_ch = &privptr->channel[WRITE];
+ p_ch = &privptr->channel[WRITE_CHANNEL];
wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
break;
case CONNECTION_RESPONSE:
@@ -2296,7 +2296,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
"%s: Confirmed Now packing\n", dev->name);
p_env->packing = DO_PACKED;
}
- p_ch = &privptr->channel[WRITE];
+ p_ch = &privptr->channel[WRITE_CHANNEL];
wake_up(&p_ch->wait);
} else {
dev_warn(tdev, "Activating %s failed because of"
@@ -2556,7 +2556,7 @@ unpack_read(struct net_device *dev )
p_packd=NULL;
privptr = dev->ml_priv;
- p_dev = &privptr->channel[READ].cdev->dev;
+ p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
p_env = privptr->p_env;
p_this_ccw=privptr->p_read_active_first;
while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
@@ -2728,7 +2728,7 @@ claw_strt_read (struct net_device *dev, int lock )
struct ccwbk*p_ccwbk;
struct chbk *p_ch;
struct clawh *p_clawh;
- p_ch=&privptr->channel[READ];
+ p_ch = &privptr->channel[READ_CHANNEL];
CLAW_DBF_TEXT(4, trace, "StRdNter");
p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
@@ -2782,7 +2782,7 @@ claw_strt_out_IO( struct net_device *dev )
return;
}
privptr = (struct claw_privbk *)dev->ml_priv;
- p_ch=&privptr->channel[WRITE];
+ p_ch = &privptr->channel[WRITE_CHANNEL];
CLAW_DBF_TEXT(4, trace, "strt_io");
p_first_ccw=privptr->p_write_active_first;
@@ -2875,7 +2875,7 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
if (dev->flags & IFF_RUNNING)
claw_release(dev);
if (privptr) {
- privptr->channel[READ].ndev = NULL; /* say it's free */
+ privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
}
dev->ml_priv = NULL;
#ifdef MODULE
@@ -2960,18 +2960,18 @@ claw_new_device(struct ccwgroup_device *cgdev)
struct ccw_dev_id dev_id;
dev_info(&cgdev->dev, "add for %s\n",
- dev_name(&cgdev->cdev[READ]->dev));
+ dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
CLAW_DBF_TEXT(2, setup, "new_dev");
privptr = dev_get_drvdata(&cgdev->dev);
- dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
- dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
if (!privptr)
return -ENODEV;
p_env = privptr->p_env;
- ccw_device_get_id(cgdev->cdev[READ], &dev_id);
- p_env->devno[READ] = dev_id.devno;
- ccw_device_get_id(cgdev->cdev[WRITE], &dev_id);
- p_env->devno[WRITE] = dev_id.devno;
+ ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
+ p_env->devno[READ_CHANNEL] = dev_id.devno;
+ ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
+ p_env->devno[WRITE_CHANNEL] = dev_id.devno;
ret = add_channel(cgdev->cdev[0],0,privptr);
if (ret == 0)
ret = add_channel(cgdev->cdev[1],1,privptr);
@@ -2980,14 +2980,14 @@ claw_new_device(struct ccwgroup_device *cgdev)
" failed with error code %d\n", ret);
goto out;
}
- ret = ccw_device_set_online(cgdev->cdev[READ]);
+ ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
if (ret != 0) {
dev_warn(&cgdev->dev,
"Setting the read subchannel online"
" failed with error code %d\n", ret);
goto out;
}
- ret = ccw_device_set_online(cgdev->cdev[WRITE]);
+ ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
if (ret != 0) {
dev_warn(&cgdev->dev,
"Setting the write subchannel online "
@@ -3002,8 +3002,8 @@ claw_new_device(struct ccwgroup_device *cgdev)
}
dev->ml_priv = privptr;
dev_set_drvdata(&cgdev->dev, privptr);
- dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
- dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
/* sysfs magic */
SET_NETDEV_DEV(dev, &cgdev->dev);
if (register_netdev(dev) != 0) {
@@ -3021,16 +3021,16 @@ claw_new_device(struct ccwgroup_device *cgdev)
goto out;
}
}
- privptr->channel[READ].ndev = dev;
- privptr->channel[WRITE].ndev = dev;
+ privptr->channel[READ_CHANNEL].ndev = dev;
+ privptr->channel[WRITE_CHANNEL].ndev = dev;
privptr->p_env->ndev = dev;
dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
"readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
dev->name, p_env->read_size,
p_env->write_size, p_env->read_buffers,
- p_env->write_buffers, p_env->devno[READ],
- p_env->devno[WRITE]);
+ p_env->write_buffers, p_env->devno[READ_CHANNEL],
+ p_env->devno[WRITE_CHANNEL]);
dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
":%.8s api_type: %.8s\n",
dev->name, p_env->host_name,
@@ -3072,10 +3072,10 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
priv = dev_get_drvdata(&cgdev->dev);
if (!priv)
return -ENODEV;
- ndev = priv->channel[READ].ndev;
+ ndev = priv->channel[READ_CHANNEL].ndev;
if (ndev) {
/* Close the device */
- dev_info(&cgdev->dev, "%s: shutting down \n",
+ dev_info(&cgdev->dev, "%s: shutting down\n",
ndev->name);
if (ndev->flags & IFF_RUNNING)
ret = claw_release(ndev);
@@ -3083,8 +3083,8 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
unregister_netdev(ndev);
ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
claw_free_netdevice(ndev, 1);
- priv->channel[READ].ndev = NULL;
- priv->channel[WRITE].ndev = NULL;
+ priv->channel[READ_CHANNEL].ndev = NULL;
+ priv->channel[WRITE_CHANNEL].ndev = NULL;
priv->p_env->ndev = NULL;
}
ccw_device_set_offline(cgdev->cdev[1]);
@@ -3115,8 +3115,8 @@ claw_remove_device(struct ccwgroup_device *cgdev)
priv->channel[1].irb=NULL;
kfree(priv);
dev_set_drvdata(&cgdev->dev, NULL);
- dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL);
- dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL);
+ dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
+ dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
put_device(&cgdev->dev);
return;
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 46d59a13db1..1bc5904df19 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -74,8 +74,8 @@
#define MAX_ENVELOPE_SIZE 65536
#define CLAW_DEFAULT_MTU_SIZE 4096
#define DEF_PACK_BUFSIZE 32768
-#define READ 0
-#define WRITE 1
+#define READ_CHANNEL 0
+#define WRITE_CHANNEL 1
#define TB_TX 0 /* sk buffer handling in process */
#define TB_STOP 1 /* network device stop in process */
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 70eb7f13841..8c921fc3511 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -454,7 +454,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
if ((fsmstate == CTC_STATE_SETUPWAIT) &&
(ch->protocol == CTCM_PROTO_OS390)) {
/* OS/390 resp. z/OS */
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
CTC_EVENT_TIMER, ch);
@@ -472,14 +472,14 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
* if in compatibility mode, since VM TCP delays the initial
* frame until it has some data to send.
*/
- if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
+ if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
(ch->protocol != CTCM_PROTO_S390))
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
ch->ccw[1].count = 2; /* Transfer only length */
- fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
+ fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
rc = ccw_device_start(ch->cdev, &ch->ccw[0],
(unsigned long)ch, 0xff, 0);
@@ -495,7 +495,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
* reply from VM TCP which brings up the RX channel to it's
* final state.
*/
- if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
+ if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
(ch->protocol == CTCM_PROTO_S390)) {
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
@@ -600,15 +600,15 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
int rc;
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
- CTCM_FUNTAIL, ch->id,
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ CTCM_FUNTAIL, ch->id,
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
if (ch->trans_skb != NULL) {
clear_normalized_cda(&ch->ccw[1]);
dev_kfree_skb(ch->trans_skb);
ch->trans_skb = NULL;
}
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
ch->ccw[1].cmd_code = CCW_CMD_READ;
ch->ccw[1].flags = CCW_FLAG_SLI;
ch->ccw[1].count = 0;
@@ -622,7 +622,8 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
"%s(%s): %s trans_skb alloc delayed "
"until first transfer",
CTCM_FUNTAIL, ch->id,
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+ "RX" : "TX");
}
ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
@@ -720,7 +721,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state,
ch->th_seg = 0x00;
ch->th_seq_num = 0x00;
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
skb_queue_purge(&ch->io_queue);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else {
@@ -799,7 +800,8 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, CTC_STATE_STARTRETRY);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
- if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) {
+ if (!IS_MPC(ch) &&
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
if (rc != 0)
ctcm_ccw_check_rc(ch, rc,
@@ -811,10 +813,10 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
"%s(%s) : %s error during %s channel setup state=%s\n",
CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
fsm_getstate_str(fi));
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else {
@@ -945,7 +947,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
fsm_newstate(fi, CTC_STATE_DTERM);
- ch2 = priv->channel[WRITE];
+ ch2 = priv->channel[CTCM_WRITE];
fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
ccw_device_halt(ch->cdev, (unsigned long)ch);
@@ -1074,13 +1076,13 @@ static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
fsm_deltimer(&ch->timer);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s: %s: %s unrecoverable channel error",
- CTCM_FUNTAIL, ch->id, rd == READ ? "RX" : "TX");
+ CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
if (IS_MPC(ch)) {
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
}
- if (rd == READ) {
+ if (rd == CTCM_READ) {
fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else {
@@ -1503,7 +1505,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
switch (fsm_getstate(fi)) {
case CTC_STATE_STARTRETRY:
case CTC_STATE_SETUPWAIT:
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
ctcmpc_chx_rxidle(fi, event, arg);
} else {
fsm_newstate(fi, CTC_STATE_TXIDLE);
@@ -1514,7 +1516,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
break;
};
- fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
+ fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
done:
@@ -1753,8 +1755,8 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
struct net_device *dev = ach->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
- struct channel *wch = priv->channel[WRITE];
- struct channel *rch = priv->channel[READ];
+ struct channel *wch = priv->channel[CTCM_WRITE];
+ struct channel *rch = priv->channel[CTCM_READ];
struct sk_buff *skb;
struct th_sweep *header;
int rc = 0;
@@ -2070,7 +2072,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
if (IS_MPC(priv))
priv->mpcg->channels_terminating = 0;
- for (direction = READ; direction <= WRITE; direction++) {
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction];
fsm_event(ch->fsm, CTC_EVENT_START, ch);
}
@@ -2092,7 +2094,7 @@ static void dev_action_stop(fsm_instance *fi, int event, void *arg)
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
- for (direction = READ; direction <= WRITE; direction++) {
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction];
fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
ch->th_seq_num = 0x00;
@@ -2183,11 +2185,11 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg)
if (IS_MPC(priv)) {
if (event == DEV_EVENT_RXUP)
- mpc_channel_action(priv->channel[READ],
- READ, MPC_CHANNEL_ADD);
+ mpc_channel_action(priv->channel[CTCM_READ],
+ CTCM_READ, MPC_CHANNEL_ADD);
else
- mpc_channel_action(priv->channel[WRITE],
- WRITE, MPC_CHANNEL_ADD);
+ mpc_channel_action(priv->channel[CTCM_WRITE],
+ CTCM_WRITE, MPC_CHANNEL_ADD);
}
}
@@ -2239,11 +2241,11 @@ static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
}
if (IS_MPC(priv)) {
if (event == DEV_EVENT_RXDOWN)
- mpc_channel_action(priv->channel[READ],
- READ, MPC_CHANNEL_REMOVE);
+ mpc_channel_action(priv->channel[CTCM_READ],
+ CTCM_READ, MPC_CHANNEL_REMOVE);
else
- mpc_channel_action(priv->channel[WRITE],
- WRITE, MPC_CHANNEL_REMOVE);
+ mpc_channel_action(priv->channel[CTCM_WRITE],
+ CTCM_WRITE, MPC_CHANNEL_REMOVE);
}
}
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 4ecafbf9121..2c7d2d9be4d 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -267,7 +267,7 @@ static struct channel *channel_get(enum ctcm_channel_types type,
else {
ch->flags |= CHANNEL_FLAGS_INUSE;
ch->flags &= ~CHANNEL_FLAGS_RWMASK;
- ch->flags |= (direction == WRITE)
+ ch->flags |= (direction == CTCM_WRITE)
? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
}
@@ -388,7 +388,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s trans_skb allocation error",
CTCM_FUNTAIL, ch->id,
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+ "RX" : "TX");
return -ENOMEM;
}
@@ -399,7 +400,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s set norm_cda failed",
CTCM_FUNTAIL, ch->id,
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+ "RX" : "TX");
return -ENOMEM;
}
@@ -603,14 +605,14 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
priv = dev->ml_priv;
grp = priv->mpcg;
- ch = priv->channel[WRITE];
+ ch = priv->channel[CTCM_WRITE];
/* sweep processing is not complete until response and request */
/* has completed for all read channels in group */
if (grp->in_sweep == 0) {
grp->in_sweep = 1;
- grp->sweep_rsp_pend_num = grp->active_channels[READ];
- grp->sweep_req_pend_num = grp->active_channels[READ];
+ grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
+ grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
}
sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
@@ -911,7 +913,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
dev->trans_start = jiffies;
- if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0)
+ if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
return NETDEV_TX_BUSY;
return NETDEV_TX_OK;
}
@@ -994,7 +996,7 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
}
dev->trans_start = jiffies;
- if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) {
+ if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): device error - dropped",
CTCM_FUNTAIL, dev->name);
@@ -1035,7 +1037,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
priv = dev->ml_priv;
- max_bufsize = priv->channel[READ]->max_bufsize;
+ max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
if (IS_MPC(priv)) {
if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
@@ -1152,7 +1154,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
dev_fsm, dev_fsm_len, GFP_KERNEL);
if (priv->fsm == NULL) {
CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
- kfree(dev);
+ free_netdev(dev);
return NULL;
}
fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
@@ -1163,7 +1165,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
grp = ctcmpc_init_mpc_group(priv);
if (grp == NULL) {
MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
- kfree(dev);
+ free_netdev(dev);
return NULL;
}
tasklet_init(&grp->mpc_tasklet2,
@@ -1226,10 +1228,10 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
priv = dev_get_drvdata(&cgdev->dev);
/* Try to extract channel from driver data. */
- if (priv->channel[READ]->cdev == cdev)
- ch = priv->channel[READ];
- else if (priv->channel[WRITE]->cdev == cdev)
- ch = priv->channel[WRITE];
+ if (priv->channel[CTCM_READ]->cdev == cdev)
+ ch = priv->channel[CTCM_READ];
+ else if (priv->channel[CTCM_WRITE]->cdev == cdev)
+ ch = priv->channel[CTCM_WRITE];
else {
dev_err(&cdev->dev,
"%s: Internal error: Can't determine channel for "
@@ -1587,13 +1589,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
goto out_ccw2;
}
- for (direction = READ; direction <= WRITE; direction++) {
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
priv->channel[direction] =
- channel_get(type, direction == READ ? read_id : write_id,
- direction);
+ channel_get(type, direction == CTCM_READ ?
+ read_id : write_id, direction);
if (priv->channel[direction] == NULL) {
- if (direction == WRITE)
- channel_free(priv->channel[READ]);
+ if (direction == CTCM_WRITE)
+ channel_free(priv->channel[CTCM_READ]);
goto out_dev;
}
priv->channel[direction]->netdev = dev;
@@ -1617,13 +1619,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
dev_info(&dev->dev,
"setup OK : r/w = %s/%s, protocol : %d\n",
- priv->channel[READ]->id,
- priv->channel[WRITE]->id, priv->protocol);
+ priv->channel[CTCM_READ]->id,
+ priv->channel[CTCM_WRITE]->id, priv->protocol);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
- priv->channel[READ]->id,
- priv->channel[WRITE]->id, priv->protocol);
+ priv->channel[CTCM_READ]->id,
+ priv->channel[CTCM_WRITE]->id, priv->protocol);
return 0;
out_unregister:
@@ -1635,10 +1637,10 @@ out_ccw2:
out_ccw1:
ccw_device_set_offline(cgdev->cdev[0]);
out_remove_channel2:
- readc = channel_get(type, read_id, READ);
+ readc = channel_get(type, read_id, CTCM_READ);
channel_remove(readc);
out_remove_channel1:
- writec = channel_get(type, write_id, WRITE);
+ writec = channel_get(type, write_id, CTCM_WRITE);
channel_remove(writec);
out_err_result:
return result;
@@ -1660,19 +1662,19 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
if (!priv)
return -ENODEV;
- if (priv->channel[READ]) {
- dev = priv->channel[READ]->netdev;
+ if (priv->channel[CTCM_READ]) {
+ dev = priv->channel[CTCM_READ]->netdev;
CTCM_DBF_DEV(SETUP, dev, "");
/* Close the device */
ctcm_close(dev);
dev->flags &= ~IFF_RUNNING;
ctcm_remove_attributes(&cgdev->dev);
- channel_free(priv->channel[READ]);
+ channel_free(priv->channel[CTCM_READ]);
} else
dev = NULL;
- if (priv->channel[WRITE])
- channel_free(priv->channel[WRITE]);
+ if (priv->channel[CTCM_WRITE])
+ channel_free(priv->channel[CTCM_WRITE]);
if (dev) {
unregister_netdev(dev);
@@ -1685,11 +1687,11 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
ccw_device_set_offline(cgdev->cdev[1]);
ccw_device_set_offline(cgdev->cdev[0]);
- if (priv->channel[READ])
- channel_remove(priv->channel[READ]);
- if (priv->channel[WRITE])
- channel_remove(priv->channel[WRITE]);
- priv->channel[READ] = priv->channel[WRITE] = NULL;
+ if (priv->channel[CTCM_READ])
+ channel_remove(priv->channel[CTCM_READ]);
+ if (priv->channel[CTCM_WRITE])
+ channel_remove(priv->channel[CTCM_WRITE]);
+ priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
return 0;
@@ -1720,11 +1722,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
if (gdev->state == CCWGROUP_OFFLINE)
return 0;
- netif_device_detach(priv->channel[READ]->netdev);
- ctcm_close(priv->channel[READ]->netdev);
+ netif_device_detach(priv->channel[CTCM_READ]->netdev);
+ ctcm_close(priv->channel[CTCM_READ]->netdev);
if (!wait_event_timeout(priv->fsm->wait_q,
fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
- netif_device_attach(priv->channel[READ]->netdev);
+ netif_device_attach(priv->channel[CTCM_READ]->netdev);
return -EBUSY;
}
ccw_device_set_offline(gdev->cdev[1]);
@@ -1745,9 +1747,9 @@ static int ctcm_pm_resume(struct ccwgroup_device *gdev)
rc = ccw_device_set_online(gdev->cdev[0]);
if (rc)
goto err_out;
- ctcm_open(priv->channel[READ]->netdev);
+ ctcm_open(priv->channel[CTCM_READ]->netdev);
err_out:
- netif_device_attach(priv->channel[READ]->netdev);
+ netif_device_attach(priv->channel[CTCM_READ]->netdev);
return rc;
}
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index d34fa14f44e..24d5215eb0c 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -111,8 +111,8 @@ enum ctcm_channel_types {
#define CTCM_INITIAL_BLOCKLEN 2
-#define READ 0
-#define WRITE 1
+#define CTCM_READ 0
+#define CTCM_WRITE 1
#define CTCM_ID_SIZE 20+3
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 87c24d2936d..b64881f33f2 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -419,8 +419,8 @@ void ctc_mpc_establish_connectivity(int port_num,
return;
priv = dev->ml_priv;
grp = priv->mpcg;
- rch = priv->channel[READ];
- wch = priv->channel[WRITE];
+ rch = priv->channel[CTCM_READ];
+ wch = priv->channel[CTCM_WRITE];
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
"%s(%s): state=%s",
@@ -540,7 +540,7 @@ void ctc_mpc_dealloc_ch(int port_num)
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG,
"%s: %s: refcount = %d\n",
- CTCM_FUNTAIL, dev->name, atomic_read(&dev->refcnt));
+ CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev));
fsm_deltimer(&priv->restart_timer);
grp->channels_terminating = 0;
@@ -578,7 +578,7 @@ void ctc_mpc_flow_control(int port_num, int flowc)
"%s: %s: flowc = %d",
CTCM_FUNTAIL, dev->name, flowc);
- rch = priv->channel[READ];
+ rch = priv->channel[CTCM_READ];
mpcg_state = fsm_getstate(grp->fsm);
switch (flowc) {
@@ -622,7 +622,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
- struct channel *ch = priv->channel[WRITE];
+ struct channel *ch = priv->channel[CTCM_WRITE];
CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id);
CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@@ -656,7 +656,7 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
int rc = 0;
struct th_sweep *header;
struct sk_buff *sweep_skb;
- struct channel *ch = priv->channel[WRITE];
+ struct channel *ch = priv->channel[CTCM_WRITE];
CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id);
@@ -712,7 +712,7 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
- struct channel *ch = priv->channel[WRITE];
+ struct channel *ch = priv->channel[CTCM_WRITE];
if (do_debug)
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
@@ -721,8 +721,8 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
if (grp->in_sweep == 0) {
grp->in_sweep = 1;
ctcm_test_and_set_busy(dev);
- grp->sweep_req_pend_num = grp->active_channels[READ];
- grp->sweep_rsp_pend_num = grp->active_channels[READ];
+ grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
+ grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
}
CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@@ -906,14 +906,14 @@ void mpc_group_ready(unsigned long adev)
fsm_newstate(grp->fsm, MPCG_STATE_READY);
/* Put up a read on the channel */
- ch = priv->channel[READ];
+ ch = priv->channel[CTCM_READ];
ch->pdu_seq = 0;
CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
__func__, ch->pdu_seq);
ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
/* Put the write channel in idle state */
- ch = priv->channel[WRITE];
+ ch = priv->channel[CTCM_WRITE];
if (ch->collect_len > 0) {
spin_lock(&ch->collect_lock);
ctcm_purge_skb_queue(&ch->collect_queue);
@@ -960,7 +960,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
"%s: %i / Grp:%s total_channels=%i, active_channels: "
"read=%i, write=%i\n", __func__, action,
fsm_getstate_str(grp->fsm), grp->num_channel_paths,
- grp->active_channels[READ], grp->active_channels[WRITE]);
+ grp->active_channels[CTCM_READ],
+ grp->active_channels[CTCM_WRITE]);
if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
grp->num_channel_paths++;
@@ -994,10 +995,11 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
grp->xid_skb->data,
grp->xid_skb->len);
- ch->xid->xid2_dlc_type = ((CHANNEL_DIRECTION(ch->flags) == READ)
+ ch->xid->xid2_dlc_type =
+ ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? XID2_READ_SIDE : XID2_WRITE_SIDE);
- if (CHANNEL_DIRECTION(ch->flags) == WRITE)
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE)
ch->xid->xid2_buf_len = 0x00;
ch->xid_skb->data = ch->xid_skb_data;
@@ -1006,8 +1008,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
fsm_newstate(ch->fsm, CH_XID0_PENDING);
- if ((grp->active_channels[READ] > 0) &&
- (grp->active_channels[WRITE] > 0) &&
+ if ((grp->active_channels[CTCM_READ] > 0) &&
+ (grp->active_channels[CTCM_WRITE] > 0) &&
(fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
@@ -1027,10 +1029,10 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
if (grp->channels_terminating)
goto done;
- if (((grp->active_channels[READ] == 0) &&
- (grp->active_channels[WRITE] > 0))
- || ((grp->active_channels[WRITE] == 0) &&
- (grp->active_channels[READ] > 0)))
+ if (((grp->active_channels[CTCM_READ] == 0) &&
+ (grp->active_channels[CTCM_WRITE] > 0))
+ || ((grp->active_channels[CTCM_WRITE] == 0) &&
+ (grp->active_channels[CTCM_READ] > 0)))
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
done:
@@ -1038,7 +1040,8 @@ done:
"exit %s: %i / Grp:%s total_channels=%i, active_channels: "
"read=%i, write=%i\n", __func__, action,
fsm_getstate_str(grp->fsm), grp->num_channel_paths,
- grp->active_channels[READ], grp->active_channels[WRITE]);
+ grp->active_channels[CTCM_READ],
+ grp->active_channels[CTCM_WRITE]);
CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
}
@@ -1392,8 +1395,8 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
(grp->port_persist == 0))
fsm_deltimer(&priv->restart_timer);
- wch = priv->channel[WRITE];
- rch = priv->channel[READ];
+ wch = priv->channel[CTCM_WRITE];
+ rch = priv->channel[CTCM_READ];
switch (grp->saved_state) {
case MPCG_STATE_RESET:
@@ -1480,8 +1483,8 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
priv = dev->ml_priv;
grp = priv->mpcg;
- wch = priv->channel[WRITE];
- rch = priv->channel[READ];
+ wch = priv->channel[CTCM_WRITE];
+ rch = priv->channel[CTCM_READ];
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID2INITW:
@@ -1586,7 +1589,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
/*the received direction should be the opposite of ours */
- if (((CHANNEL_DIRECTION(ch->flags) == READ) ? XID2_WRITE_SIDE :
+ if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE :
XID2_READ_SIDE) != xid->xid2_dlc_type) {
rc = 2;
/* XID REJECTED: r/w channel pairing mismatch */
@@ -1912,7 +1915,7 @@ static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
if (grp == NULL)
return;
- for (direction = READ; direction <= WRITE; direction++) {
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction];
struct xid2 *thisxid = ch->xid;
ch->xid_skb->data = ch->xid_skb_data;
@@ -2152,14 +2155,15 @@ static int mpc_send_qllc_discontact(struct net_device *dev)
return -ENOMEM;
}
- *((__u32 *)skb_push(skb, 4)) = priv->channel[READ]->pdu_seq;
- priv->channel[READ]->pdu_seq++;
+ *((__u32 *)skb_push(skb, 4)) =
+ priv->channel[CTCM_READ]->pdu_seq;
+ priv->channel[CTCM_READ]->pdu_seq++;
CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
- __func__, priv->channel[READ]->pdu_seq);
+ __func__, priv->channel[CTCM_READ]->pdu_seq);
/* receipt of CC03 resets anticipated sequence number on
receiving side */
- priv->channel[READ]->pdu_seq = 0x00;
+ priv->channel[CTCM_READ]->pdu_seq = 0x00;
skb_reset_mac_header(skb);
skb->dev = dev;
skb->protocol = htons(ETH_P_SNAP);
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 2b24550e865..8305319b2a8 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -38,8 +38,8 @@ static ssize_t ctcm_buffer_write(struct device *dev,
int bs1;
struct ctcm_priv *priv = dev_get_drvdata(dev);
- if (!(priv && priv->channel[READ] &&
- (ndev = priv->channel[READ]->netdev))) {
+ ndev = priv->channel[CTCM_READ]->netdev;
+ if (!(priv && priv->channel[CTCM_READ] && ndev)) {
CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
return -ENODEV;
}
@@ -55,12 +55,12 @@ static ssize_t ctcm_buffer_write(struct device *dev,
(bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
goto einval;
- priv->channel[READ]->max_bufsize = bs1;
- priv->channel[WRITE]->max_bufsize = bs1;
+ priv->channel[CTCM_READ]->max_bufsize = bs1;
+ priv->channel[CTCM_WRITE]->max_bufsize = bs1;
if (!(ndev->flags & IFF_RUNNING))
ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
- priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
- priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+ priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+ priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
CTCM_DBF_DEV(SETUP, ndev, buf);
return count;
@@ -85,9 +85,9 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
p += sprintf(p, " Device FSM state: %s\n",
fsm_getstate_str(priv->fsm));
p += sprintf(p, " RX channel FSM state: %s\n",
- fsm_getstate_str(priv->channel[READ]->fsm));
+ fsm_getstate_str(priv->channel[CTCM_READ]->fsm));
p += sprintf(p, " TX channel FSM state: %s\n",
- fsm_getstate_str(priv->channel[WRITE]->fsm));
+ fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm));
p += sprintf(p, " Max. TX buffer used: %ld\n",
priv->channel[WRITE]->prof.maxmulti);
p += sprintf(p, " Max. chained SKBs: %ld\n",
@@ -102,7 +102,7 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
priv->channel[WRITE]->prof.tx_time);
printk(KERN_INFO "Statistics for %s:\n%s",
- priv->channel[WRITE]->netdev->name, sbuf);
+ priv->channel[CTCM_WRITE]->netdev->name, sbuf);
kfree(sbuf);
return;
}
@@ -125,7 +125,7 @@ static ssize_t stats_write(struct device *dev, struct device_attribute *attr,
return -ENODEV;
/* Reset statistics */
memset(&priv->channel[WRITE]->prof, 0,
- sizeof(priv->channel[WRITE]->prof));
+ sizeof(priv->channel[CTCM_WRITE]->prof));
return count;
}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d1257768be9..6be43eb126b 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -676,6 +676,7 @@ enum qeth_discipline_id {
};
struct qeth_discipline {
+ void (*start_poll)(struct ccw_device *, int, unsigned long);
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
int (*recover)(void *ptr);
@@ -702,6 +703,16 @@ struct qeth_skb_data {
#define QETH_SKB_MAGIC 0x71657468
#define QETH_SIGA_CC2_RETRIES 3
+struct qeth_rx {
+ int b_count;
+ int b_index;
+ struct qdio_buffer_element *b_element;
+ int e_offset;
+ int qdio_err;
+};
+
+#define QETH_NAPI_WEIGHT 128
+
struct qeth_card {
struct list_head list;
enum qeth_card_states state;
@@ -749,6 +760,8 @@ struct qeth_card {
debug_info_t *debug;
struct mutex conf_mutex;
struct mutex discipline_mutex;
+ struct napi_struct napi;
+ struct qeth_rx rx;
};
struct qeth_card_list_struct {
@@ -831,6 +844,10 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **);
void qeth_schedule_recovery(struct qeth_card *);
+void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
+void qeth_qdio_input_handler(struct ccw_device *,
+ unsigned int, unsigned int, int,
+ int, unsigned long);
void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
int, int, int, unsigned long);
void qeth_clear_ipacmd_list(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3a5a18a0fc2..76426706260 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2911,6 +2911,27 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
}
}
+void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
+ unsigned long card_ptr)
+{
+ struct qeth_card *card = (struct qeth_card *)card_ptr;
+
+ if (card->dev)
+ napi_schedule(&card->napi);
+}
+EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
+
+void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
+ unsigned int queue, int first_element, int count,
+ unsigned long card_ptr)
+{
+ struct qeth_card *card = (struct qeth_card *)card_ptr;
+
+ if (qdio_err)
+ qeth_schedule_recovery(card);
+}
+EXPORT_SYMBOL_GPL(qeth_qdio_input_handler);
+
void qeth_qdio_output_handler(struct ccw_device *ccwdev,
unsigned int qdio_error, int __queue, int first_element,
int count, unsigned long card_ptr)
@@ -3843,6 +3864,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = card->discipline.input_handler;
init_data.output_handler = card->discipline.output_handler;
+ init_data.queue_start_poll = card->discipline.start_poll;
init_data.int_parm = (unsigned long) card;
init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
@@ -4513,8 +4535,8 @@ static struct {
/* 20 */{"queue 1 buffer usage"},
{"queue 2 buffer usage"},
{"queue 3 buffer usage"},
- {"rx handler time"},
- {"rx handler count"},
+ {"rx poll time"},
+ {"rx poll count"},
{"rx do_QDIO time"},
{"rx do_QDIO count"},
{"tx handler time"},
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 830d63524d6..847e8797073 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -310,6 +310,8 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct qeth_vlan_vid *id;
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
+ if (!vid)
+ return;
if (card->info.type == QETH_CARD_TYPE_OSM) {
QETH_CARD_TEXT(card, 3, "aidOSM");
return;
@@ -407,29 +409,25 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
return rc;
}
-static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
- struct qeth_qdio_buffer *buf, int index)
+static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
+ int budget, int *done)
{
- struct qdio_buffer_element *element;
+ int work_done = 0;
struct sk_buff *skb;
struct qeth_hdr *hdr;
- int offset;
unsigned int len;
- /* get first element of current buffer */
- element = (struct qdio_buffer_element *)&buf->buffer->element[0];
- offset = 0;
- if (card->options.performance_stats)
- card->perf_stats.bufs_rec++;
- while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
- &offset, &hdr))) {
- skb->dev = card->dev;
- /* is device UP ? */
- if (!(card->dev->flags & IFF_UP)) {
- dev_kfree_skb_any(skb);
- continue;
+ *done = 0;
+ BUG_ON(!budget);
+ while (budget) {
+ skb = qeth_core_get_next_skb(card,
+ card->qdio.in_q->bufs[card->rx.b_index].buffer,
+ &card->rx.b_element, &card->rx.e_offset, &hdr);
+ if (!skb) {
+ *done = 1;
+ break;
}
-
+ skb->dev = card->dev;
switch (hdr->hdr.l2.id) {
case QETH_HEADER_TYPE_LAYER2:
skb->pkt_type = PACKET_HOST;
@@ -441,7 +439,7 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
if (skb->protocol == htons(ETH_P_802_2))
*((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
len = skb->len;
- netif_rx(skb);
+ netif_receive_skb(skb);
break;
case QETH_HEADER_TYPE_OSN:
if (card->info.type == QETH_CARD_TYPE_OSN) {
@@ -459,9 +457,87 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
continue;
}
+ work_done++;
+ budget--;
card->stats.rx_packets++;
card->stats.rx_bytes += len;
}
+ return work_done;
+}
+
+static int qeth_l2_poll(struct napi_struct *napi, int budget)
+{
+ struct qeth_card *card = container_of(napi, struct qeth_card, napi);
+ int work_done = 0;
+ struct qeth_qdio_buffer *buffer;
+ int done;
+ int new_budget = budget;
+
+ if (card->options.performance_stats) {
+ card->perf_stats.inbound_cnt++;
+ card->perf_stats.inbound_start_time = qeth_get_micros();
+ }
+
+ while (1) {
+ if (!card->rx.b_count) {
+ card->rx.qdio_err = 0;
+ card->rx.b_count = qdio_get_next_buffers(
+ card->data.ccwdev, 0, &card->rx.b_index,
+ &card->rx.qdio_err);
+ if (card->rx.b_count <= 0) {
+ card->rx.b_count = 0;
+ break;
+ }
+ card->rx.b_element =
+ &card->qdio.in_q->bufs[card->rx.b_index]
+ .buffer->element[0];
+ card->rx.e_offset = 0;
+ }
+
+ while (card->rx.b_count) {
+ buffer = &card->qdio.in_q->bufs[card->rx.b_index];
+ if (!(card->rx.qdio_err &&
+ qeth_check_qdio_errors(card, buffer->buffer,
+ card->rx.qdio_err, "qinerr")))
+ work_done += qeth_l2_process_inbound_buffer(
+ card, new_budget, &done);
+ else
+ done = 1;
+
+ if (done) {
+ if (card->options.performance_stats)
+ card->perf_stats.bufs_rec++;
+ qeth_put_buffer_pool_entry(card,
+ buffer->pool_entry);
+ qeth_queue_input_buffer(card, card->rx.b_index);
+ card->rx.b_count--;
+ if (card->rx.b_count) {
+ card->rx.b_index =
+ (card->rx.b_index + 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ card->rx.b_element =
+ &card->qdio.in_q
+ ->bufs[card->rx.b_index]
+ .buffer->element[0];
+ card->rx.e_offset = 0;
+ }
+ }
+
+ if (work_done >= budget)
+ goto out;
+ else
+ new_budget = budget - work_done;
+ }
+ }
+
+ napi_complete(napi);
+ if (qdio_start_irq(card->data.ccwdev, 0))
+ napi_schedule(&card->napi);
+out:
+ if (card->options.performance_stats)
+ card->perf_stats.inbound_time += qeth_get_micros() -
+ card->perf_stats.inbound_start_time;
+ return work_done;
}
static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
@@ -755,49 +831,10 @@ tx_drop:
return NETDEV_TX_OK;
}
-static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
- unsigned int qdio_err, unsigned int queue,
- int first_element, int count, unsigned long card_ptr)
-{
- struct net_device *net_dev;
- struct qeth_card *card;
- struct qeth_qdio_buffer *buffer;
- int index;
- int i;
-
- card = (struct qeth_card *) card_ptr;
- net_dev = card->dev;
- if (card->options.performance_stats) {
- card->perf_stats.inbound_cnt++;
- card->perf_stats.inbound_start_time = qeth_get_micros();
- }
- if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
- QETH_CARD_TEXT(card, 1, "qdinchk");
- QETH_CARD_TEXT_(card, 1, "%04X%04X", first_element,
- count);
- QETH_CARD_TEXT_(card, 1, "%04X", queue);
- qeth_schedule_recovery(card);
- return;
- }
- for (i = first_element; i < (first_element + count); ++i) {
- index = i % QDIO_MAX_BUFFERS_PER_Q;
- buffer = &card->qdio.in_q->bufs[index];
- if (!(qdio_err &&
- qeth_check_qdio_errors(card, buffer->buffer, qdio_err,
- "qinerr")))
- qeth_l2_process_inbound_buffer(card, buffer, index);
- /* clear buffer and give back to hardware */
- qeth_put_buffer_pool_entry(card, buffer->pool_entry);
- qeth_queue_input_buffer(card, index);
- }
- if (card->options.performance_stats)
- card->perf_stats.inbound_time += qeth_get_micros() -
- card->perf_stats.inbound_start_time;
-}
-
static int qeth_l2_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
+ int rc = 0;
QETH_CARD_TEXT(card, 4, "qethopen");
if (card->state != CARD_STATE_SOFTSETUP)
@@ -814,18 +851,24 @@ static int qeth_l2_open(struct net_device *dev)
if (!card->lan_online && netif_carrier_ok(dev))
netif_carrier_off(dev);
- return 0;
+ if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
+ napi_enable(&card->napi);
+ napi_schedule(&card->napi);
+ } else
+ rc = -EIO;
+ return rc;
}
-
static int qeth_l2_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "qethstop");
netif_tx_disable(dev);
- if (card->state == CARD_STATE_UP)
+ if (card->state == CARD_STATE_UP) {
card->state = CARD_STATE_SOFTSETUP;
+ napi_disable(&card->napi);
+ }
return 0;
}
@@ -836,8 +879,9 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
INIT_LIST_HEAD(&card->vid_list);
INIT_LIST_HEAD(&card->mc_list);
card->options.layer2 = 1;
+ card->discipline.start_poll = qeth_qdio_start_poll;
card->discipline.input_handler = (qdio_handler_t *)
- qeth_l2_qdio_input_handler;
+ qeth_qdio_input_handler;
card->discipline.output_handler = (qdio_handler_t *)
qeth_qdio_output_handler;
card->discipline.recover = qeth_l2_recover;
@@ -923,6 +967,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card);
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
+ netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
return register_netdev(card->dev);
}
@@ -955,6 +1000,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
card->state = CARD_STATE_HARDSETUP;
+ memset(&card->rx, 0, sizeof(struct qeth_rx));
qeth_print_status_message(card);
/* softsetup */
@@ -1086,9 +1132,6 @@ static int qeth_l2_recover(void *ptr)
card->use_hard_stop = 1;
__qeth_l2_set_offline(card->gdev, 1);
rc = __qeth_l2_set_online(card->gdev, 1);
- /* don't run another scheduled recovery */
- qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
- qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
if (!rc)
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
@@ -1099,6 +1142,8 @@ static int qeth_l2_recover(void *ptr)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
+ qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+ qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e22ae248f61..c094707fcbf 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -103,12 +103,7 @@ int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
{
- sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
- ":%02x%02x:%02x%02x:%02x%02x:%02x%02x",
- addr[0], addr[1], addr[2], addr[3],
- addr[4], addr[5], addr[6], addr[7],
- addr[8], addr[9], addr[10], addr[11],
- addr[12], addr[13], addr[14], addr[15]);
+ sprintf(buf, "%pI6", addr);
}
int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
@@ -2018,13 +2013,14 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
qeth_l3_set_multicast_list(card->dev);
}
-static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
- struct sk_buff *skb, struct qeth_hdr *hdr)
+static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ unsigned short *vlan_id)
{
- unsigned short vlan_id = 0;
__be16 prot;
struct iphdr *ip_hdr;
unsigned char tg_addr[MAX_ADDR_LEN];
+ int is_vlan = 0;
if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
@@ -2087,8 +2083,9 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
if (hdr->hdr.l3.ext_flags &
(QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
- vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
+ *vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
+ is_vlan = 1;
}
switch (card->options.checksum_type) {
@@ -2109,54 +2106,44 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
skb->ip_summed = CHECKSUM_NONE;
}
- return vlan_id;
+ return is_vlan;
}
-static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
- struct qeth_qdio_buffer *buf, int index)
+static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
+ int budget, int *done)
{
- struct qdio_buffer_element *element;
+ int work_done = 0;
struct sk_buff *skb;
struct qeth_hdr *hdr;
- int offset;
__u16 vlan_tag = 0;
+ int is_vlan;
unsigned int len;
- /* get first element of current buffer */
- element = (struct qdio_buffer_element *)&buf->buffer->element[0];
- offset = 0;
- if (card->options.performance_stats)
- card->perf_stats.bufs_rec++;
- while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
- &offset, &hdr))) {
- skb->dev = card->dev;
- /* is device UP ? */
- if (!(card->dev->flags & IFF_UP)) {
- dev_kfree_skb_any(skb);
- continue;
- }
+ *done = 0;
+ BUG_ON(!budget);
+ while (budget) {
+ skb = qeth_core_get_next_skb(card,
+ card->qdio.in_q->bufs[card->rx.b_index].buffer,
+ &card->rx.b_element, &card->rx.e_offset, &hdr);
+ if (!skb) {
+ *done = 1;
+ break;
+ }
+ skb->dev = card->dev;
switch (hdr->hdr.l3.id) {
case QETH_HEADER_TYPE_LAYER3:
- vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
+ is_vlan = qeth_l3_rebuild_skb(card, skb, hdr,
+ &vlan_tag);
len = skb->len;
- if (vlan_tag && !card->options.sniffer)
- if (card->vlangrp)
- vlan_hwaccel_rx(skb, card->vlangrp,
- vlan_tag);
- else {
- dev_kfree_skb_any(skb);
- continue;
- }
+ if (is_vlan && !card->options.sniffer)
+ vlan_gro_receive(&card->napi, card->vlangrp,
+ vlan_tag, skb);
else
- netif_rx(skb);
+ napi_gro_receive(&card->napi, skb);
break;
case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
- if (card->options.checksum_type == NO_CHECKSUMMING)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
len = skb->len;
netif_receive_skb(skb);
break;
@@ -2166,10 +2153,87 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
continue;
}
-
+ work_done++;
+ budget--;
card->stats.rx_packets++;
card->stats.rx_bytes += len;
}
+ return work_done;
+}
+
+static int qeth_l3_poll(struct napi_struct *napi, int budget)
+{
+ struct qeth_card *card = container_of(napi, struct qeth_card, napi);
+ int work_done = 0;
+ struct qeth_qdio_buffer *buffer;
+ int done;
+ int new_budget = budget;
+
+ if (card->options.performance_stats) {
+ card->perf_stats.inbound_cnt++;
+ card->perf_stats.inbound_start_time = qeth_get_micros();
+ }
+
+ while (1) {
+ if (!card->rx.b_count) {
+ card->rx.qdio_err = 0;
+ card->rx.b_count = qdio_get_next_buffers(
+ card->data.ccwdev, 0, &card->rx.b_index,
+ &card->rx.qdio_err);
+ if (card->rx.b_count <= 0) {
+ card->rx.b_count = 0;
+ break;
+ }
+ card->rx.b_element =
+ &card->qdio.in_q->bufs[card->rx.b_index]
+ .buffer->element[0];
+ card->rx.e_offset = 0;
+ }
+
+ while (card->rx.b_count) {
+ buffer = &card->qdio.in_q->bufs[card->rx.b_index];
+ if (!(card->rx.qdio_err &&
+ qeth_check_qdio_errors(card, buffer->buffer,
+ card->rx.qdio_err, "qinerr")))
+ work_done += qeth_l3_process_inbound_buffer(
+ card, new_budget, &done);
+ else
+ done = 1;
+
+ if (done) {
+ if (card->options.performance_stats)
+ card->perf_stats.bufs_rec++;
+ qeth_put_buffer_pool_entry(card,
+ buffer->pool_entry);
+ qeth_queue_input_buffer(card, card->rx.b_index);
+ card->rx.b_count--;
+ if (card->rx.b_count) {
+ card->rx.b_index =
+ (card->rx.b_index + 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ card->rx.b_element =
+ &card->qdio.in_q
+ ->bufs[card->rx.b_index]
+ .buffer->element[0];
+ card->rx.e_offset = 0;
+ }
+ }
+
+ if (work_done >= budget)
+ goto out;
+ else
+ new_budget = budget - work_done;
+ }
+ }
+
+ napi_complete(napi);
+ if (qdio_start_irq(card->data.ccwdev, 0))
+ napi_schedule(&card->napi);
+out:
+ if (card->options.performance_stats)
+ card->perf_stats.inbound_time += qeth_get_micros() -
+ card->perf_stats.inbound_start_time;
+ return work_done;
}
static int qeth_l3_verify_vlan_dev(struct net_device *dev,
@@ -3103,6 +3167,7 @@ tx_drop:
static int qeth_l3_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
+ int rc = 0;
QETH_CARD_TEXT(card, 4, "qethopen");
if (card->state != CARD_STATE_SOFTSETUP)
@@ -3113,7 +3178,12 @@ static int qeth_l3_open(struct net_device *dev)
if (!card->lan_online && netif_carrier_ok(dev))
netif_carrier_off(dev);
- return 0;
+ if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
+ napi_enable(&card->napi);
+ napi_schedule(&card->napi);
+ } else
+ rc = -EIO;
+ return rc;
}
static int qeth_l3_stop(struct net_device *dev)
@@ -3122,8 +3192,10 @@ static int qeth_l3_stop(struct net_device *dev)
QETH_CARD_TEXT(card, 4, "qethstop");
netif_tx_disable(dev);
- if (card->state == CARD_STATE_UP)
+ if (card->state == CARD_STATE_UP) {
card->state = CARD_STATE_SOFTSETUP;
+ napi_disable(&card->napi);
+ }
return 0;
}
@@ -3293,57 +3365,19 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->gso_max_size = 15 * PAGE_SIZE;
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
+ netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
return register_netdev(card->dev);
}
-static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
- unsigned int qdio_err, unsigned int queue, int first_element,
- int count, unsigned long card_ptr)
-{
- struct net_device *net_dev;
- struct qeth_card *card;
- struct qeth_qdio_buffer *buffer;
- int index;
- int i;
-
- card = (struct qeth_card *) card_ptr;
- net_dev = card->dev;
- if (card->options.performance_stats) {
- card->perf_stats.inbound_cnt++;
- card->perf_stats.inbound_start_time = qeth_get_micros();
- }
- if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
- QETH_CARD_TEXT(card, 1, "qdinchk");
- QETH_CARD_TEXT_(card, 1, "%04X%04X",
- first_element, count);
- QETH_CARD_TEXT_(card, 1, "%04X", queue);
- qeth_schedule_recovery(card);
- return;
- }
- for (i = first_element; i < (first_element + count); ++i) {
- index = i % QDIO_MAX_BUFFERS_PER_Q;
- buffer = &card->qdio.in_q->bufs[index];
- if (!(qdio_err &&
- qeth_check_qdio_errors(card, buffer->buffer,
- qdio_err, "qinerr")))
- qeth_l3_process_inbound_buffer(card, buffer, index);
- /* clear buffer and give back to hardware */
- qeth_put_buffer_pool_entry(card, buffer->pool_entry);
- qeth_queue_input_buffer(card, index);
- }
- if (card->options.performance_stats)
- card->perf_stats.inbound_time += qeth_get_micros() -
- card->perf_stats.inbound_start_time;
-}
-
static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
qeth_l3_create_device_attributes(&gdev->dev);
card->options.layer2 = 0;
+ card->discipline.start_poll = qeth_qdio_start_poll;
card->discipline.input_handler = (qdio_handler_t *)
- qeth_l3_qdio_input_handler;
+ qeth_qdio_input_handler;
card->discipline.output_handler = (qdio_handler_t *)
qeth_qdio_output_handler;
card->discipline.recover = qeth_l3_recover;
@@ -3402,6 +3436,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
}
card->state = CARD_STATE_HARDSETUP;
+ memset(&card->rx, 0, sizeof(struct qeth_rx));
qeth_print_status_message(card);
/* softsetup */
@@ -3538,9 +3573,6 @@ static int qeth_l3_recover(void *ptr)
card->use_hard_stop = 1;
__qeth_l3_set_offline(card->gdev, 1);
rc = __qeth_l3_set_online(card->gdev, 1);
- /* don't run another scheduled recovery */
- qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
- qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
if (!rc)
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
@@ -3551,6 +3583,8 @@ static int qeth_l3_recover(void *ptr)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
+ qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+ qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;
}
diff --git a/drivers/s390/net/smsgiucv_app.c b/drivers/s390/net/smsgiucv_app.c
index 13768879020..4d2ea400042 100644
--- a/drivers/s390/net/smsgiucv_app.c
+++ b/drivers/s390/net/smsgiucv_app.c
@@ -180,6 +180,13 @@ static int __init smsgiucv_app_init(void)
goto fail_put_driver;
}
+ /* convert sender to uppercase characters */
+ if (sender) {
+ int len = strlen(sender);
+ while (len--)
+ sender[len] = toupper(sender[len]);
+ }
+
/* register with the smsgiucv device driver */
rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback);
if (rc) {
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index b2635759721..da54a28a1b8 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -277,16 +277,12 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
struct zfcp_qdio *qdio)
{
-
+ memset(id, 0, sizeof(*id));
id->cdev = qdio->adapter->ccw_device;
id->q_format = QDIO_ZFCP_QFMT;
memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
ASCEBC(id->adapter_name, 8);
id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
- id->qib_param_field_format = 0;
- id->qib_param_field = NULL;
- id->input_slib_elements = NULL;
- id->output_slib_elements = NULL;
id->no_input_qs = 1;
id->no_output_qs = 1;
id->input_handler = zfcp_qdio_int_resp;
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index 103fdf6b0b8..160e7510aca 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -443,7 +443,7 @@ static int kenvctrld(void *__unused)
return 0;
}
-static void attach_one_temp(struct bbc_i2c_bus *bp, struct of_device *op,
+static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op,
int temp_idx)
{
struct bbc_cpu_temperature *tp;
@@ -488,7 +488,7 @@ static void attach_one_temp(struct bbc_i2c_bus *bp, struct of_device *op,
tp->fan_todo[FAN_CPU] = FAN_SAME;
}
-static void attach_one_fan(struct bbc_i2c_bus *bp, struct of_device *op,
+static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op,
int fan_idx)
{
struct bbc_fan_control *fp;
@@ -559,7 +559,7 @@ static void destroy_all_fans(struct bbc_i2c_bus *bp)
int bbc_envctrl_init(struct bbc_i2c_bus *bp)
{
- struct of_device *op;
+ struct platform_device *op;
int temp_index = 0;
int fan_index = 0;
int devidx = 0;
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 8bfdd63a1fc..614a5e114a1 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -51,7 +51,7 @@
* The second controller also connects to the smartcard reader, if present.
*/
-static void set_device_claimage(struct bbc_i2c_bus *bp, struct of_device *op, int val)
+static void set_device_claimage(struct bbc_i2c_bus *bp, struct platform_device *op, int val)
{
int i;
@@ -66,9 +66,9 @@ static void set_device_claimage(struct bbc_i2c_bus *bp, struct of_device *op, in
#define claim_device(BP,ECHILD) set_device_claimage(BP,ECHILD,1)
#define release_device(BP,ECHILD) set_device_claimage(BP,ECHILD,0)
-struct of_device *bbc_i2c_getdev(struct bbc_i2c_bus *bp, int index)
+struct platform_device *bbc_i2c_getdev(struct bbc_i2c_bus *bp, int index)
{
- struct of_device *op = NULL;
+ struct platform_device *op = NULL;
int curidx = 0, i;
for (i = 0; i < NUM_CHILDREN; i++) {
@@ -86,7 +86,7 @@ out:
return NULL;
}
-struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct of_device *op)
+struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct platform_device *op)
{
struct bbc_i2c_client *client;
const u32 *reg;
@@ -114,7 +114,7 @@ struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct of_device *
void bbc_i2c_detach(struct bbc_i2c_client *client)
{
struct bbc_i2c_bus *bp = client->bp;
- struct of_device *op = client->op;
+ struct platform_device *op = client->op;
release_device(bp, op);
kfree(client);
@@ -297,7 +297,7 @@ static void __init reset_one_i2c(struct bbc_i2c_bus *bp)
writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0);
}
-static struct bbc_i2c_bus * __init attach_one_i2c(struct of_device *op, int index)
+static struct bbc_i2c_bus * __init attach_one_i2c(struct platform_device *op, int index)
{
struct bbc_i2c_bus *bp;
struct device_node *dp;
@@ -317,7 +317,7 @@ static struct bbc_i2c_bus * __init attach_one_i2c(struct of_device *op, int inde
bp->waiting = 0;
init_waitqueue_head(&bp->wq);
- if (request_irq(op->irqs[0], bbc_i2c_interrupt,
+ if (request_irq(op->archdata.irqs[0], bbc_i2c_interrupt,
IRQF_SHARED, "bbc_i2c", bp))
goto fail;
@@ -330,7 +330,7 @@ static struct bbc_i2c_bus * __init attach_one_i2c(struct of_device *op, int inde
for (dp = op->dev.of_node->child;
dp && entry < 8;
dp = dp->sibling, entry++) {
- struct of_device *child_op;
+ struct platform_device *child_op;
child_op = of_find_device_by_node(dp);
bp->devs[entry].device = child_op;
@@ -361,7 +361,7 @@ fail:
extern int bbc_envctrl_init(struct bbc_i2c_bus *bp);
extern void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp);
-static int __devinit bbc_i2c_probe(struct of_device *op,
+static int __devinit bbc_i2c_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct bbc_i2c_bus *bp;
@@ -373,7 +373,7 @@ static int __devinit bbc_i2c_probe(struct of_device *op,
err = bbc_envctrl_init(bp);
if (err) {
- free_irq(op->irqs[0], bp);
+ free_irq(op->archdata.irqs[0], bp);
if (bp->i2c_bussel_reg)
of_iounmap(&op->resource[0], bp->i2c_bussel_reg, 1);
if (bp->i2c_control_regs)
@@ -386,13 +386,13 @@ static int __devinit bbc_i2c_probe(struct of_device *op,
return err;
}
-static int __devexit bbc_i2c_remove(struct of_device *op)
+static int __devexit bbc_i2c_remove(struct platform_device *op)
{
struct bbc_i2c_bus *bp = dev_get_drvdata(&op->dev);
bbc_envctrl_cleanup(bp);
- free_irq(op->irqs[0], bp);
+ free_irq(op->archdata.irqs[0], bp);
if (bp->i2c_bussel_reg)
of_iounmap(&op->resource[0], bp->i2c_bussel_reg, 1);
@@ -425,12 +425,12 @@ static struct of_platform_driver bbc_i2c_driver = {
static int __init bbc_i2c_init(void)
{
- return of_register_driver(&bbc_i2c_driver, &of_bus_type);
+ return of_register_platform_driver(&bbc_i2c_driver);
}
static void __exit bbc_i2c_exit(void)
{
- of_unregister_driver(&bbc_i2c_driver);
+ of_unregister_platform_driver(&bbc_i2c_driver);
}
module_init(bbc_i2c_init);
diff --git a/drivers/sbus/char/bbc_i2c.h b/drivers/sbus/char/bbc_i2c.h
index 83c4811b7b5..4b4531066e7 100644
--- a/drivers/sbus/char/bbc_i2c.h
+++ b/drivers/sbus/char/bbc_i2c.h
@@ -7,7 +7,7 @@
struct bbc_i2c_client {
struct bbc_i2c_bus *bp;
- struct of_device *op;
+ struct platform_device *op;
int bus;
int address;
};
@@ -64,16 +64,16 @@ struct bbc_i2c_bus {
struct list_head temps;
struct list_head fans;
- struct of_device *op;
+ struct platform_device *op;
struct {
- struct of_device *device;
+ struct platform_device *device;
int client_claimed;
} devs[NUM_CHILDREN];
};
/* Probing and attachment. */
-extern struct of_device *bbc_i2c_getdev(struct bbc_i2c_bus *, int);
-extern struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct of_device *);
+extern struct platform_device *bbc_i2c_getdev(struct bbc_i2c_bus *, int);
+extern struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct platform_device *);
extern void bbc_i2c_detach(struct bbc_i2c_client *);
/* Register read/write. NOTE: Blocking! */
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 7baf1b64403..1690e53fb84 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -13,7 +13,7 @@
#include <linux/miscdevice.h>
#include <linux/ioport.h> /* request_region */
#include <linux/slab.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/atomic.h>
@@ -26,6 +26,7 @@
#define DRIVER_NAME "d7s"
#define PFX DRIVER_NAME ": "
+static DEFINE_MUTEX(d7s_mutex);
static int sol_compat = 0; /* Solaris compatibility mode */
/* Solaris compatibility flag -
@@ -74,7 +75,6 @@ static int d7s_open(struct inode *inode, struct file *f)
{
if (D7S_MINOR != iminor(inode))
return -ENODEV;
- cycle_kernel_lock();
atomic_inc(&d7s_users);
return 0;
}
@@ -110,7 +110,7 @@ static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (D7S_MINOR != iminor(file->f_path.dentry->d_inode))
return -ENODEV;
- lock_kernel();
+ mutex_lock(&d7s_mutex);
switch (cmd) {
case D7SIOCWR:
/* assign device register values we mask-out D7S_FLIP
@@ -151,7 +151,7 @@ static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
writeb(regs, p->regs);
break;
};
- unlock_kernel();
+ mutex_unlock(&d7s_mutex);
return error;
}
@@ -170,7 +170,7 @@ static struct miscdevice d7s_miscdev = {
.fops = &d7s_fops
};
-static int __devinit d7s_probe(struct of_device *op,
+static int __devinit d7s_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device_node *opts;
@@ -236,7 +236,7 @@ out_free:
goto out;
}
-static int __devexit d7s_remove(struct of_device *op)
+static int __devexit d7s_remove(struct platform_device *op)
{
struct d7s *p = dev_get_drvdata(&op->dev);
u8 regs = readb(p->regs);
@@ -277,12 +277,12 @@ static struct of_platform_driver d7s_driver = {
static int __init d7s_init(void)
{
- return of_register_driver(&d7s_driver, &of_bus_type);
+ return of_register_platform_driver(&d7s_driver);
}
static void __exit d7s_exit(void)
{
- of_unregister_driver(&d7s_driver);
+ of_unregister_platform_driver(&d7s_driver);
}
module_init(d7s_init);
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index c8166ecf527..078e5f4520e 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -27,7 +27,6 @@
#include <linux/kmod.h>
#include <linux/reboot.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -699,7 +698,6 @@ envctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static int
envctrl_open(struct inode *inode, struct file *file)
{
- cycle_kernel_lock();
file->private_data = NULL;
return 0;
}
@@ -1029,7 +1027,7 @@ static int kenvctrld(void *__unused)
return 0;
}
-static int __devinit envctrl_probe(struct of_device *op,
+static int __devinit envctrl_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device_node *dp;
@@ -1106,7 +1104,7 @@ out_iounmap:
return err;
}
-static int __devexit envctrl_remove(struct of_device *op)
+static int __devexit envctrl_remove(struct platform_device *op)
{
int index;
@@ -1142,12 +1140,12 @@ static struct of_platform_driver envctrl_driver = {
static int __init envctrl_init(void)
{
- return of_register_driver(&envctrl_driver, &of_bus_type);
+ return of_register_platform_driver(&envctrl_driver);
}
static void __exit envctrl_exit(void)
{
- of_unregister_driver(&envctrl_driver);
+ of_unregister_platform_driver(&envctrl_driver);
}
module_init(envctrl_init);
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 368d66294d8..2b4b4b613c4 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -10,7 +10,7 @@
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/of.h>
@@ -22,6 +22,7 @@
#include <asm/io.h>
#include <asm/upa.h>
+static DEFINE_MUTEX(flash_mutex);
static DEFINE_SPINLOCK(flash_lock);
static struct {
unsigned long read_base; /* Physical read address */
@@ -80,7 +81,7 @@ flash_mmap(struct file *file, struct vm_area_struct *vma)
static long long
flash_llseek(struct file *file, long long offset, int origin)
{
- lock_kernel();
+ mutex_lock(&flash_mutex);
switch (origin) {
case 0:
file->f_pos = offset;
@@ -94,10 +95,10 @@ flash_llseek(struct file *file, long long offset, int origin)
file->f_pos = flash.read_size;
break;
default:
- unlock_kernel();
+ mutex_unlock(&flash_mutex);
return -EINVAL;
}
- unlock_kernel();
+ mutex_unlock(&flash_mutex);
return file->f_pos;
}
@@ -125,13 +126,13 @@ flash_read(struct file * file, char __user * buf,
static int
flash_open(struct inode *inode, struct file *file)
{
- lock_kernel();
+ mutex_lock(&flash_mutex);
if (test_and_set_bit(0, (void *)&flash.busy) != 0) {
- unlock_kernel();
+ mutex_unlock(&flash_mutex);
return -EBUSY;
}
- unlock_kernel();
+ mutex_unlock(&flash_mutex);
return 0;
}
@@ -159,7 +160,7 @@ static const struct file_operations flash_fops = {
static struct miscdevice flash_dev = { FLASH_MINOR, "flash", &flash_fops };
-static int __devinit flash_probe(struct of_device *op,
+static int __devinit flash_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
@@ -191,7 +192,7 @@ static int __devinit flash_probe(struct of_device *op,
return misc_register(&flash_dev);
}
-static int __devexit flash_remove(struct of_device *op)
+static int __devexit flash_remove(struct platform_device *op)
{
misc_deregister(&flash_dev);
@@ -218,12 +219,12 @@ static struct of_platform_driver flash_driver = {
static int __init flash_init(void)
{
- return of_register_driver(&flash_driver, &of_bus_type);
+ return of_register_platform_driver(&flash_driver);
}
static void __exit flash_cleanup(void)
{
- of_unregister_driver(&flash_driver);
+ of_unregister_platform_driver(&flash_driver);
}
module_init(flash_init);
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index aacbe14e2e7..8d6e508222b 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -33,7 +33,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/miscdevice.h>
#include <linux/init.h>
@@ -61,6 +61,7 @@ typedef struct openprom_private_data
} DATA;
/* ID of the PROM node containing all of the EEPROM options. */
+static DEFINE_MUTEX(openprom_mutex);
static struct device_node *options_node;
/*
@@ -316,7 +317,7 @@ static long openprom_sunos_ioctl(struct file * file,
if (bufsize < 0)
return bufsize;
- lock_kernel();
+ mutex_lock(&openprom_mutex);
switch (cmd) {
case OPROMGETOPT:
@@ -367,7 +368,7 @@ static long openprom_sunos_ioctl(struct file * file,
}
kfree(opp);
- unlock_kernel();
+ mutex_unlock(&openprom_mutex);
return error;
}
@@ -558,7 +559,7 @@ static int openprom_bsd_ioctl(struct file * file,
void __user *argp = (void __user *)arg;
int err;
- lock_kernel();
+ mutex_lock(&openprom_mutex);
switch (cmd) {
case OPIOCGET:
err = opiocget(argp, data);
@@ -589,7 +590,7 @@ static int openprom_bsd_ioctl(struct file * file,
err = -EINVAL;
break;
};
- unlock_kernel();
+ mutex_unlock(&openprom_mutex);
return err;
}
@@ -697,11 +698,11 @@ static int openprom_open(struct inode * inode, struct file * file)
if (!data)
return -ENOMEM;
- lock_kernel();
+ mutex_lock(&openprom_mutex);
data->current_node = of_find_node_by_path("/");
data->lastnode = data->current_node;
file->private_data = (void *) data;
- unlock_kernel();
+ mutex_unlock(&openprom_mutex);
return 0;
}
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 5f253665a1d..1b345be5cc0 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
@@ -72,6 +72,7 @@ struct ts102_regs {
#define UCTRL_STAT_RXNE_STA 0x04 /* receive FIFO not empty status */
#define UCTRL_STAT_RXO_STA 0x08 /* receive FIFO overflow status */
+static DEFINE_MUTEX(uctrl_mutex);
static const char *uctrl_extstatus[16] = {
"main power available",
"internal battery attached",
@@ -210,10 +211,10 @@ uctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static int
uctrl_open(struct inode *inode, struct file *file)
{
- lock_kernel();
+ mutex_lock(&uctrl_mutex);
uctrl_get_event_status(global_driver);
uctrl_get_external_status(global_driver);
- unlock_kernel();
+ mutex_unlock(&uctrl_mutex);
return 0;
}
@@ -347,7 +348,7 @@ static void uctrl_get_external_status(struct uctrl_driver *driver)
}
-static int __devinit uctrl_probe(struct of_device *op,
+static int __devinit uctrl_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct uctrl_driver *p;
@@ -367,7 +368,7 @@ static int __devinit uctrl_probe(struct of_device *op,
goto out_free;
}
- p->irq = op->irqs[0];
+ p->irq = op->archdata.irqs[0];
err = request_irq(p->irq, uctrl_interrupt, 0, "uctrl", p);
if (err) {
printk(KERN_ERR "uctrl: Unable to register irq.\n");
@@ -403,7 +404,7 @@ out_free:
goto out;
}
-static int __devexit uctrl_remove(struct of_device *op)
+static int __devexit uctrl_remove(struct platform_device *op)
{
struct uctrl_driver *p = dev_get_drvdata(&op->dev);
@@ -437,12 +438,12 @@ static struct of_platform_driver uctrl_driver = {
static int __init uctrl_init(void)
{
- return of_register_driver(&uctrl_driver, &of_bus_type);
+ return of_register_platform_driver(&uctrl_driver);
}
static void __exit uctrl_exit(void)
{
- of_unregister_driver(&uctrl_driver);
+ of_unregister_platform_driver(&uctrl_driver);
}
module_init(uctrl_init);
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 80dc3ac12cd..89fc1c8af86 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -309,9 +309,6 @@ NCR_700_detect(struct scsi_host_template *tpnt,
hostdata->msgin = memory + MSGIN_OFFSET;
hostdata->msgout = memory + MSGOUT_OFFSET;
hostdata->status = memory + STATUS_OFFSET;
- /* all of these offsets are L1_CACHE_BYTES separated. It is fatal
- * if this isn't sufficient separation to avoid dma flushing issues */
- BUG_ON(!dma_is_consistent(hostdata->dev, pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
hostdata->dev = dev;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 158284f0573..bbf91aec64f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -370,6 +370,14 @@ config ISCSI_TCP
http://open-iscsi.org
+config ISCSI_BOOT_SYSFS
+ tristate "iSCSI Boot Sysfs Interface"
+ default n
+ help
+ This option enables support for exposing iSCSI boot information
+ via sysfs to userspace. If you wish to export this information,
+ say Y. Otherwise, say N.
+
source "drivers/scsi/cxgb3i/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
source "drivers/scsi/be2iscsi/Kconfig"
@@ -1853,7 +1861,7 @@ config ZFCP_DIF
config SCSI_PMCRAID
tristate "PMC SIERRA Linux MaxRAID adapter support"
- depends on PCI && SCSI
+ depends on PCI && SCSI && NET
---help---
This driver supports the PMC SIERRA MaxRAID adapters.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2a3fca2eca6..2703c6ec5e3 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
+obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o
obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index f92da9fd5f2..5d2f148889a 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -1857,7 +1857,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
#endif
/* KLL May need eop and parity in 53c400 */
if (hostdata->flags & FLAG_NCR53C400)
- NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_PAR_CHECK | MR_ENABLE_PAR_INTR | MR_ENABLE_EOP_INTR | MR_DMA_MODE | MR_MONITOR_BSY);
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE |
+ MR_ENABLE_PAR_CHECK | MR_ENABLE_PAR_INTR |
+ MR_ENABLE_EOP_INTR | MR_MONITOR_BSY);
else
NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
#endif /* def REAL_DMA */
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 04057ab72a8..84d77fd86e5 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -352,9 +352,8 @@ static int aac_rx_check_health(struct aac_dev *dev)
pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
post, paddr);
if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) {
- ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
- ret <<= 4;
- ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
+ ret = (hex_to_bin(buffer[2]) << 4) +
+ hex_to_bin(buffer[3]);
}
pci_free_consistent(dev->pdev, 512, buffer, baddr);
return ret;
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 2a8cf137f60..4f785f254c1 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -52,22 +52,6 @@
#define SCSI_BUF_PA(address) isa_virt_to_bus(address)
#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset)
-static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
- struct scatterlist *sgp,
- int nseg,
- int badseg)
-{
- printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n",
- badseg, nseg, sg_virt(sgp),
- (unsigned long long)SCSI_SG_PA(sgp),
- sgp->length);
-
- /*
- * Not safe to continue.
- */
- panic("Buffer at physical address > 16Mb used for aha1542");
-}
-
#include<linux/stat.h>
#ifdef DEBUG
@@ -691,8 +675,6 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
}
scsi_for_each_sg(SCpnt, sg, sg_count, i) {
any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg));
- if (SCSI_SG_PA(sg) + sg->length - 1 > ISA_DMA_THRESHOLD)
- BAD_SG_DMA(SCpnt, scsi_sglist(SCpnt), sg_count, i);
any2scsi(cptr[i].datalen, sg->length);
};
any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
@@ -1133,16 +1115,9 @@ static int __init aha1542_detect(struct scsi_host_template * tpnt)
release_region(bases[indx], 4);
continue;
}
- /* For now we do this - until kmalloc is more intelligent
- we are resigned to stupid hacks like this */
- if (SCSI_BUF_PA(shpnt) >= ISA_DMA_THRESHOLD) {
- printk(KERN_ERR "Invalid address for shpnt with 1542.\n");
- goto unregister;
- }
if (!aha1542_test_port(bases[indx], shpnt))
goto unregister;
-
base_io = bases[indx];
/* Set the Bus on/off-times as not to ruin floppy performance */
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 24ac2315c5c..3b7e83d2dab 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -688,9 +688,9 @@ static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
{
int i;
struct asd_sas_phy **sas_phys =
- kmalloc(ASD_MAX_PHYS * sizeof(struct asd_sas_phy), GFP_KERNEL);
+ kcalloc(ASD_MAX_PHYS, sizeof(*sas_phys), GFP_KERNEL);
struct asd_sas_port **sas_ports =
- kmalloc(ASD_MAX_PHYS * sizeof(struct asd_sas_port), GFP_KERNEL);
+ kcalloc(ASD_MAX_PHYS, sizeof(*sas_ports), GFP_KERNEL);
if (!sas_phys || !sas_ports) {
kfree(sas_phys);
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 07fdfe57e38..a4e04c50c43 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -192,7 +192,6 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = {
.attr = {
.name = "mu_read",
.mode = S_IRUSR ,
- .owner = THIS_MODULE,
},
.size = 1032,
.read = arcmsr_sysfs_iop_message_read,
@@ -202,7 +201,6 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = {
.attr = {
.name = "mu_write",
.mode = S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 1032,
.write = arcmsr_sysfs_iop_message_write,
@@ -212,7 +210,6 @@ static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
.attr = {
.name = "mu_clear",
.mode = S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 1,
.write = arcmsr_sysfs_iop_message_clear,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 95a895dd4f1..c8dc392edd5 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -56,6 +56,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/timer.h>
+#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <asm/dma.h>
diff --git a/drivers/scsi/be2iscsi/Kconfig b/drivers/scsi/be2iscsi/Kconfig
index 84c275fb9f6..ceaca32e788 100644
--- a/drivers/scsi/be2iscsi/Kconfig
+++ b/drivers/scsi/be2iscsi/Kconfig
@@ -2,6 +2,7 @@ config BE2ISCSI
tristate "ServerEngines' 10Gbps iSCSI - BladeEngine 2"
depends on PCI && SCSI && NET
select SCSI_ISCSI_ATTRS
+ select ISCSI_BOOT_SYSFS
help
This driver implements the iSCSI functionality for ServerEngines'
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 40641d0845f..5218de4ab35 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -162,6 +162,13 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES 2
#define OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES 3
#define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7
+#define OPCODE_COMMON_ISCSI_NTWK_SET_VLAN 14
+#define OPCODE_COMMON_ISCSI_NTWK_CONFIGURE_STATELESS_IP_ADDR 17
+#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR 21
+#define OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY 22
+#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY 23
+#define OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID 24
+#define OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO 25
#define OPCODE_COMMON_ISCSI_SET_FRAGNUM_BITS_FOR_SGL_CRA 61
#define OPCODE_COMMON_ISCSI_DEFQ_CREATE 64
#define OPCODE_COMMON_ISCSI_DEFQ_DESTROY 65
@@ -237,11 +244,109 @@ struct be_cmd_resp_eq_create {
u16 rsvd0; /* sword */
} __packed;
+struct mgmt_chap_format {
+ u32 flags;
+ u8 intr_chap_name[256];
+ u8 intr_secret[16];
+ u8 target_chap_name[256];
+ u8 target_secret[16];
+ u16 intr_chap_name_length;
+ u16 intr_secret_length;
+ u16 target_chap_name_length;
+ u16 target_secret_length;
+} __packed;
+
+struct mgmt_auth_method_format {
+ u8 auth_method_type;
+ u8 padding[3];
+ struct mgmt_chap_format chap;
+} __packed;
+
+struct mgmt_conn_login_options {
+ u8 flags;
+ u8 header_digest;
+ u8 data_digest;
+ u8 rsvd0;
+ u32 max_recv_datasegment_len_ini;
+ u32 max_recv_datasegment_len_tgt;
+ u32 tcp_mss;
+ u32 tcp_window_size;
+ struct mgmt_auth_method_format auth_data;
+} __packed;
+
+struct ip_address_format {
+ u16 size_of_structure;
+ u8 reserved;
+ u8 ip_type;
+ u8 ip_address[16];
+ u32 rsvd0;
+} __packed;
+
+struct mgmt_conn_info {
+ u32 connection_handle;
+ u32 connection_status;
+ u16 src_port;
+ u16 dest_port;
+ u16 dest_port_redirected;
+ u16 cid;
+ u32 estimated_throughput;
+ struct ip_address_format src_ipaddr;
+ struct ip_address_format dest_ipaddr;
+ struct ip_address_format dest_ipaddr_redirected;
+ struct mgmt_conn_login_options negotiated_login_options;
+} __packed;
+
+struct mgmt_session_login_options {
+ u8 flags;
+ u8 error_recovery_level;
+ u16 rsvd0;
+ u32 first_burst_length;
+ u32 max_burst_length;
+ u16 max_connections;
+ u16 max_outstanding_r2t;
+ u16 default_time2wait;
+ u16 default_time2retain;
+} __packed;
+
+struct mgmt_session_info {
+ u32 session_handle;
+ u32 status;
+ u8 isid[6];
+ u16 tsih;
+ u32 session_flags;
+ u16 conn_count;
+ u16 pad;
+ u8 target_name[224];
+ u8 initiator_iscsiname[224];
+ struct mgmt_session_login_options negotiated_login_options;
+ struct mgmt_conn_info conn_list[1];
+} __packed;
+
+struct be_cmd_req_get_session {
+ struct be_cmd_req_hdr hdr;
+ u32 session_handle;
+} __packed;
+
+struct be_cmd_resp_get_session {
+ struct be_cmd_resp_hdr hdr;
+ struct mgmt_session_info session_info;
+} __packed;
+
struct mac_addr {
u16 size_of_struct;
u8 addr[ETH_ALEN];
} __packed;
+struct be_cmd_req_get_boot_target {
+ struct be_cmd_req_hdr hdr;
+} __packed;
+
+struct be_cmd_resp_get_boot_target {
+ struct be_cmd_resp_hdr hdr;
+ u32 boot_session_count;
+ int boot_session_handle;
+};
+
struct be_cmd_req_mac_query {
struct be_cmd_req_hdr hdr;
u8 type;
@@ -426,6 +531,11 @@ int be_poll_mcc(struct be_ctrl_info *ctrl);
int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba);
unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba);
+unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba);
+unsigned int beiscsi_get_session_info(struct beiscsi_hba *phba,
+ u32 boot_session_handle,
+ struct be_dma_mem *nonemb_cmd);
+
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
/*ISCSI Functuions */
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
@@ -601,14 +711,6 @@ struct be_eq_delay_params_in {
struct eq_delay delay[8];
} __packed;
-struct ip_address_format {
- u16 size_of_structure;
- u8 reserved;
- u8 ip_type;
- u8 ip_address[16];
- u32 rsvd0;
-} __packed;
-
struct tcp_connect_and_offload_in {
struct be_cmd_req_hdr hdr;
struct ip_address_format ip_address;
@@ -688,18 +790,29 @@ struct be_fw_cfg {
u32 function_caps;
} __packed;
-#define CMD_ISCSI_COMMAND_INVALIDATE 1
-#define ISCSI_OPCODE_SCSI_DATA_OUT 5
+struct be_all_if_id {
+ struct be_cmd_req_hdr hdr;
+ u32 if_count;
+ u32 if_hndl_list[1];
+} __packed;
+
+#define ISCSI_OPCODE_SCSI_DATA_OUT 5
+#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
+#define OPCODE_COMMON_ISCSI_CLEANUP 59
+#define OPCODE_COMMON_TCP_UPLOAD 56
#define OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD 70
-#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
-#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
-#define OPCODE_COMMON_ISCSI_CLEANUP 59
-#define OPCODE_COMMON_TCP_UPLOAD 56
#define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1
-/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */
-#define CMD_ISCSI_CONNECTION_INVALIDATE 0x8001
-#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 0x8002
+#define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME 6
+#define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME 7
+#define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION 14
+#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
+#define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET 52
+
+/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */
+#define CMD_ISCSI_COMMAND_INVALIDATE 1
+#define CMD_ISCSI_CONNECTION_INVALIDATE 0x8001
+#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 0x8002
#define INI_WR_CMD 1 /* Initiator write command */
#define INI_TMF_CMD 2 /* Initiator TMF command */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 6d63e7b312c..7f11f3e48e1 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -300,48 +300,65 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
{
struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
- struct be_cmd_resp_get_mac_addr *resp;
- struct be_mcc_wrb *wrb;
- unsigned int tag, wrb_num;
- int len = 0;
- unsigned short status, extd_status;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ int status = 0;
SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
- tag = be_cmd_get_mac_addr(phba);
- if (!tag) {
- SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
- return -EAGAIN;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
- SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
- free_mcc_tag(&phba->ctrl, tag);
- return -EAGAIN;
- } else {
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
- resp = embedded_payload(wrb);
- memcpy(phba->mac_address, resp->mac_address, ETH_ALEN);
- len = sysfs_format_mac(buf, phba->mac_address,
- ETH_ALEN);
+ status = beiscsi_get_macaddr(buf, phba);
+ if (status < 0) {
+ SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
+ return status;
}
break;
default:
return iscsi_host_get_param(shost, param, buf);
}
- return len;
+ return status;
}
+int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
+{
+ struct be_cmd_resp_get_mac_addr *resp;
+ struct be_mcc_wrb *wrb;
+ unsigned int tag, wrb_num;
+ unsigned short status, extd_status;
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ int rc;
+
+ if (phba->read_mac_address)
+ return sysfs_format_mac(buf, phba->mac_address,
+ ETH_ALEN);
+
+ tag = be_cmd_get_mac_addr(phba);
+ if (!tag) {
+ SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
+ return -EBUSY;
+ } else
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+
+ wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+ extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+ status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ if (status || extd_status) {
+ SE_DEBUG(DBG_LVL_1, "Failed to get be_cmd_get_mac_addr"
+ " status = %d extd_status = %d\n",
+ status, extd_status);
+ free_mcc_tag(&phba->ctrl, tag);
+ return -EAGAIN;
+ }
+ wrb = queue_get_wrb(mccq, wrb_num);
+ free_mcc_tag(&phba->ctrl, tag);
+ resp = embedded_payload(wrb);
+ memcpy(phba->mac_address, resp->mac_address, ETH_ALEN);
+ rc = sysfs_format_mac(buf, phba->mac_address,
+ ETH_ALEN);
+ phba->read_mac_address = 1;
+ return rc;
+}
+
+
/**
* beiscsi_conn_get_stats - get the iscsi stats
* @cls_conn: pointer to iscsi cls conn
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 870cdb2a73e..8950a702b9f 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -54,6 +54,8 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf);
+int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba);
+
int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf, int buflen);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 7436c5ad569..8220bde6c04 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -26,6 +26,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/semaphore.h>
+#include <linux/iscsi_boot_sysfs.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
@@ -211,6 +212,218 @@ unlock:
return rc;
}
+static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
+{
+ struct beiscsi_hba *phba = data;
+ char *str = buf;
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ rc = sprintf(buf, "%.*s\n",
+ (int)strlen(phba->boot_sess.target_name),
+ (char *)&phba->boot_sess.target_name);
+ break;
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ if (phba->boot_sess.conn_list[0].dest_ipaddr.ip_type == 0x1)
+ rc = sprintf(buf, "%pI4\n",
+ (char *)&phba->boot_sess.conn_list[0].
+ dest_ipaddr.ip_address);
+ else
+ rc = sprintf(str, "%pI6\n",
+ (char *)&phba->boot_sess.conn_list[0].
+ dest_ipaddr.ip_address);
+ break;
+ case ISCSI_BOOT_TGT_PORT:
+ rc = sprintf(str, "%d\n", phba->boot_sess.conn_list[0].
+ dest_port);
+ break;
+
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ rc = sprintf(str, "%.*s\n",
+ phba->boot_sess.conn_list[0].
+ negotiated_login_options.auth_data.chap.
+ target_chap_name_length,
+ (char *)&phba->boot_sess.conn_list[0].
+ negotiated_login_options.auth_data.chap.
+ target_chap_name);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ rc = sprintf(str, "%.*s\n",
+ phba->boot_sess.conn_list[0].
+ negotiated_login_options.auth_data.chap.
+ target_secret_length,
+ (char *)&phba->boot_sess.conn_list[0].
+ negotiated_login_options.auth_data.chap.
+ target_secret);
+
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ rc = sprintf(str, "%.*s\n",
+ phba->boot_sess.conn_list[0].
+ negotiated_login_options.auth_data.chap.
+ intr_chap_name_length,
+ (char *)&phba->boot_sess.conn_list[0].
+ negotiated_login_options.auth_data.chap.
+ intr_chap_name);
+
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ rc = sprintf(str, "%.*s\n",
+ phba->boot_sess.conn_list[0].
+ negotiated_login_options.auth_data.chap.
+ intr_secret_length,
+ (char *)&phba->boot_sess.conn_list[0].
+ negotiated_login_options.auth_data.chap.
+ intr_secret);
+ break;
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = sprintf(str, "2\n");
+ break;
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ rc = sprintf(str, "0\n");
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
+{
+ struct beiscsi_hba *phba = data;
+ char *str = buf;
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
+{
+ struct beiscsi_hba *phba = data;
+ char *str = buf;
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_FLAGS:
+ rc = sprintf(str, "2\n");
+ break;
+ case ISCSI_BOOT_ETH_INDEX:
+ rc = sprintf(str, "0\n");
+ break;
+ case ISCSI_BOOT_ETH_MAC:
+ rc = beiscsi_get_macaddr(buf, phba);
+ if (rc < 0) {
+ SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
+ return rc;
+ }
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
+
+static mode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
+{
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ case ISCSI_BOOT_TGT_PORT:
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = S_IRUGO;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static mode_t beiscsi_ini_get_attr_visibility(void *data, int type)
+{
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = S_IRUGO;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+
+static mode_t beiscsi_eth_get_attr_visibility(void *data, int type)
+{
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_FLAGS:
+ case ISCSI_BOOT_ETH_MAC:
+ case ISCSI_BOOT_ETH_INDEX:
+ rc = S_IRUGO;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
+{
+ struct iscsi_boot_kobj *boot_kobj;
+
+ phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
+ if (!phba->boot_kset)
+ return -ENOMEM;
+
+ /* get boot info using mgmt cmd */
+ boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
+ beiscsi_show_boot_tgt_info,
+ beiscsi_tgt_get_attr_visibility);
+ if (!boot_kobj)
+ goto free_kset;
+
+ boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
+ beiscsi_show_boot_ini_info,
+ beiscsi_ini_get_attr_visibility);
+ if (!boot_kobj)
+ goto free_kset;
+
+ boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
+ beiscsi_show_boot_eth_info,
+ beiscsi_eth_get_attr_visibility);
+ if (!boot_kobj)
+ goto free_kset;
+ return 0;
+
+free_kset:
+ iscsi_boot_destroy_kset(phba->boot_kset);
+ return -ENOMEM;
+}
+
/*------------------- PCI Driver operations and data ----------------- */
static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -268,6 +481,15 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
if (iscsi_host_add(shost, &phba->pcidev->dev))
goto free_devices;
+
+ if (beiscsi_setup_boot_info(phba))
+ /*
+ * log error but continue, because we may not be using
+ * iscsi boot.
+ */
+ shost_printk(KERN_ERR, phba->shost, "Could not set up "
+ "iSCSI boot info.");
+
return phba;
free_devices:
@@ -3279,6 +3501,89 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
"In hwi_disable_intr, Already Disabled\n");
}
+static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
+{
+ struct be_cmd_resp_get_boot_target *boot_resp;
+ struct be_cmd_resp_get_session *session_resp;
+ struct be_mcc_wrb *wrb;
+ struct be_dma_mem nonemb_cmd;
+ unsigned int tag, wrb_num;
+ unsigned short status, extd_status;
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+
+ tag = beiscsi_get_boot_target(phba);
+ if (!tag) {
+ SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
+ return -EAGAIN;
+ } else
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+
+ wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+ extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+ status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ if (status || extd_status) {
+ SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
+ " status = %d extd_status = %d\n",
+ status, extd_status);
+ free_mcc_tag(&phba->ctrl, tag);
+ return -EBUSY;
+ }
+ wrb = queue_get_wrb(mccq, wrb_num);
+ free_mcc_tag(&phba->ctrl, tag);
+ boot_resp = embedded_payload(wrb);
+
+ if (boot_resp->boot_session_handle < 0) {
+ printk(KERN_ERR "No Boot Session for this pci_func,"
+ "session Hndl = %d\n", boot_resp->boot_session_handle);
+ return -ENXIO;
+ }
+
+ nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+ sizeof(*session_resp),
+ &nonemb_cmd.dma);
+ if (nonemb_cmd.va == NULL) {
+ SE_DEBUG(DBG_LVL_1,
+ "Failed to allocate memory for"
+ "beiscsi_get_session_info\n");
+ return -ENOMEM;
+ }
+
+ memset(nonemb_cmd.va, 0, sizeof(*session_resp));
+ tag = beiscsi_get_session_info(phba,
+ boot_resp->boot_session_handle, &nonemb_cmd);
+ if (!tag) {
+ SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info"
+ " Failed\n");
+ goto boot_freemem;
+ } else
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+
+ wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+ extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+ status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ if (status || extd_status) {
+ SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info Failed"
+ " status = %d extd_status = %d\n",
+ status, extd_status);
+ free_mcc_tag(&phba->ctrl, tag);
+ goto boot_freemem;
+ }
+ wrb = queue_get_wrb(mccq, wrb_num);
+ free_mcc_tag(&phba->ctrl, tag);
+ session_resp = nonemb_cmd.va ;
+ memcpy(&phba->boot_sess, &session_resp->session_info,
+ sizeof(struct mgmt_session_info));
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ return 0;
+boot_freemem:
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ return -ENOMEM;
+}
+
static int beiscsi_init_port(struct beiscsi_hba *phba)
{
int ret;
@@ -3841,6 +4146,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
iscsi_host_remove(phba->shost);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
+ iscsi_boot_destroy_kset(phba->boot_kset);
}
static void beiscsi_msix_enable(struct beiscsi_hba *phba)
@@ -3996,6 +4302,11 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
goto free_blkenbld;
}
hwi_enable_intr(phba);
+ ret = beiscsi_get_boot_info(phba);
+ if (ret < 0) {
+ shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
+ "No Boot Devices !!!!!\n");
+ }
SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
return 0;
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index c643bb3736f..90eb74f6bca 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -35,7 +35,7 @@
#include "be.h"
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "2.0.527.0"
+#define BUILD_STR "2.0.549.0"
#define BE_NAME "ServerEngines BladeEngine2" \
"Linux iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -63,7 +63,7 @@
#define BEISCSI_SGLIST_ELEMENTS 30
#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
-#define BEISCSI_MAX_SECTORS 256 /* scsi_host->max_sectors */
+#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */
#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
@@ -312,6 +312,7 @@ struct beiscsi_hba {
struct list_head hba_queue;
unsigned short *cid_array;
struct iscsi_endpoint **ep_array;
+ struct iscsi_boot_kset *boot_kset;
struct Scsi_Host *shost;
struct {
/**
@@ -342,6 +343,8 @@ struct beiscsi_hba {
struct work_struct work_cqs; /* The work being queued */
struct be_ctrl_info ctrl;
unsigned int generation;
+ unsigned int read_mac_address;
+ struct mgmt_session_info boot_sess;
struct invalidate_command_table inv_tbl[128];
};
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 3f3fab91a7d..877324fc594 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -20,6 +20,77 @@
#include "be_mgmt.h"
#include "be_iscsi.h"
+#include <scsi/scsi_transport_iscsi.h>
+
+unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_mac_addr *req;
+ unsigned int tag = 0;
+
+ SE_DEBUG(DBG_LVL_8, "In bescsi_get_boot_target\n");
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
+ sizeof(*req));
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+unsigned int beiscsi_get_session_info(struct beiscsi_hba *phba,
+ u32 boot_session_handle,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ unsigned int tag = 0;
+ struct be_cmd_req_get_session *req;
+ struct be_cmd_resp_get_session *resp;
+ struct be_sge *sge;
+
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_get_session_info\n");
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ nonemb_cmd->size = sizeof(*resp);
+ req = nonemb_cmd->va;
+ memset(req, 0, sizeof(*req));
+ wrb = wrb_from_mccq(phba);
+ sge = nonembedded_sgl(wrb);
+ wrb->tag0 |= tag;
+
+
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
+ sizeof(*resp));
+ req->session_handle = boot_session_handle;
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba)
@@ -297,7 +368,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
memset(req, 0, sizeof(*req));
wrb->tag0 |= tag;
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
sizeof(*req));
diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c
index f0933d8d1ed..76867b5577f 100644
--- a/drivers/scsi/bfa/bfa_fcport.c
+++ b/drivers/scsi/bfa/bfa_fcport.c
@@ -1310,7 +1310,7 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
break;
case BFI_FCPORT_I2H_DISABLE_RSP:
- if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
+ if (fcport->msgtag == i2hmsg.pdisable_rsp->msgtag)
bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
break;
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 915a29d6c7a..ca04cc9d332 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -788,6 +788,7 @@ bfad_drv_init(struct bfad_s *bfad)
memset(&driver_info, 0, sizeof(driver_info));
strncpy(driver_info.version, BFAD_DRIVER_VERSION,
sizeof(driver_info.version) - 1);
+ __kernel_param_lock();
if (host_name)
strncpy(driver_info.host_machine_name, host_name,
sizeof(driver_info.host_machine_name) - 1);
@@ -797,6 +798,7 @@ bfad_drv_init(struct bfad_s *bfad)
if (os_patch)
strncpy(driver_info.host_os_patch, os_patch,
sizeof(driver_info.host_os_patch) - 1);
+ __kernel_param_unlock();
strncpy(driver_info.os_device_name, bfad->pci_name,
sizeof(driver_info.os_device_name - 1));
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 678120b7046..6ef87f6fcdb 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -291,7 +291,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
struct bfa_tskim_s *tskim;
struct bfad_itnim_s *itnim;
struct bfa_itnim_s *bfa_itnim;
- DECLARE_WAIT_QUEUE_HEAD(wq);
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
int rc = SUCCESS;
unsigned long flags;
enum bfi_tskim_status task_status;
@@ -353,7 +353,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
struct bfad_itnim_s *itnim;
unsigned long flags;
u32 i, rc, err_cnt = 0;
- DECLARE_WAIT_QUEUE_HEAD(wq);
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
enum bfi_tskim_status task_status;
spin_lock_irqsave(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/include/protocol/fcp.h b/drivers/scsi/bfa/include/protocol/fcp.h
index 9ade68ad285..74ea63ce84b 100644
--- a/drivers/scsi/bfa/include/protocol/fcp.h
+++ b/drivers/scsi/bfa/include/protocol/fcp.h
@@ -18,6 +18,7 @@
#ifndef __FCPPROTO_H__
#define __FCPPROTO_H__
+#include <linux/bitops.h>
#include <protocol/scsi.h>
#pragma pack(1)
@@ -102,9 +103,6 @@ enum {
/*
* Task management flags field - only one bit shall be set
*/
-#ifndef BIT
-#define BIT(_x) (1 << (_x))
-#endif
enum fcp_tm_cmnd{
FCP_TM_ABORT_TASK_SET = BIT(1),
FCP_TM_CLEAR_TASK_SET = BIT(2),
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 2fceb19eb27..1b6f86b2482 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -120,6 +120,8 @@
/* additional LOM specific iSCSI license not installed */
#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51)
+#define ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY (0x80)
+
/* SQ/RQ/CQ DB structure sizes */
#define ISCSI_SQ_DB_SIZE (16)
#define ISCSI_RQ_DB_SIZE (16)
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
index 1e9f7141102..45a6154ce97 100644
--- a/drivers/scsi/bnx2i/Kconfig
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -1,10 +1,11 @@
config SCSI_BNX2_ISCSI
tristate "Broadcom NetXtreme II iSCSI support"
+ depends on NET
+ depends on PCI
select SCSI_ISCSI_ATTRS
select NETDEVICES
select NETDEV_1000
select CNIC
- depends on PCI
---help---
This driver supports iSCSI offload for the Broadcom NetXtreme II
devices.
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 00c033511cb..99568cb9ad1 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -58,6 +58,8 @@
#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
+#define BNX2I_5771X_DBELL_PAGE_SIZE 128
+
/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
#define MAX_BD_LENGTH 65535
#define BD_SPLIT_SIZE 32768
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index d23fc256d58..99c71e6d4c1 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2405,7 +2405,8 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
reg_base = pci_resource_start(ep->hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
- reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE;
+ reg_off = BNX2I_5771X_DBELL_PAGE_SIZE * (cid_num & 0x1FFFF) +
+ DPM_TRIGER_TYPE;
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
goto arm_cq;
}
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 4799d439120..d6532187f61 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -84,10 +84,16 @@ static const char * vendor_labels[CH_TYPES-4] = {
};
// module_param_string_array(vendor_labels, NULL, 0444);
-#define dprintk(fmt, arg...) if (debug) \
- printk(KERN_DEBUG "%s: " fmt, ch->name , ## arg)
-#define vprintk(fmt, arg...) if (verbose) \
- printk(KERN_INFO "%s: " fmt, ch->name , ## arg)
+#define DPRINTK(fmt, arg...) \
+do { \
+ if (debug) \
+ printk(KERN_DEBUG "%s: " fmt, ch->name, ##arg); \
+} while (0)
+#define VPRINTK(level, fmt, arg...) \
+do { \
+ if (verbose) \
+ printk(level "%s: " fmt, ch->name, ##arg); \
+} while (0)
/* ------------------------------------------------------------------- */
@@ -186,7 +192,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
retry:
errno = 0;
if (debug) {
- dprintk("command: ");
+ DPRINTK("command: ");
__scsi_print_command(cmd);
}
@@ -194,7 +200,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
buflength, &sshdr, timeout * HZ,
MAX_RETRIES, NULL);
- dprintk("result: 0x%x\n",result);
+ DPRINTK("result: 0x%x\n",result);
if (driver_byte(result) & DRIVER_SENSE) {
if (debug)
scsi_print_sense_hdr(ch->name, &sshdr);
@@ -250,7 +256,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
cmd[9] = 255;
if (0 == (result = ch_do_scsi(ch, cmd, buffer, 256, DMA_FROM_DEVICE))) {
if (((buffer[16] << 8) | buffer[17]) != elem) {
- dprintk("asked for element 0x%02x, got 0x%02x\n",
+ DPRINTK("asked for element 0x%02x, got 0x%02x\n",
elem,(buffer[16] << 8) | buffer[17]);
kfree(buffer);
return -EIO;
@@ -259,10 +265,10 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
} else {
if (ch->voltags) {
ch->voltags = 0;
- vprintk("device has no volume tag support\n");
+ VPRINTK(KERN_INFO, "device has no volume tag support\n");
goto retry;
}
- dprintk("READ ELEMENT STATUS for element 0x%x failed\n",elem);
+ DPRINTK("READ ELEMENT STATUS for element 0x%x failed\n",elem);
}
kfree(buffer);
return result;
@@ -274,12 +280,12 @@ ch_init_elem(scsi_changer *ch)
int err;
u_char cmd[6];
- vprintk("INITIALIZE ELEMENT STATUS, may take some time ...\n");
+ VPRINTK(KERN_INFO, "INITIALIZE ELEMENT STATUS, may take some time ...\n");
memset(cmd,0,sizeof(cmd));
cmd[0] = INITIALIZE_ELEMENT_STATUS;
cmd[1] = ch->device->lun << 5;
err = ch_do_scsi(ch, cmd, NULL, 0, DMA_NONE);
- vprintk("... finished\n");
+ VPRINTK(KERN_INFO, "... finished\n");
return err;
}
@@ -322,20 +328,20 @@ ch_readconfig(scsi_changer *ch)
(buffer[buffer[3]+18] << 8) | buffer[buffer[3]+19];
ch->counts[CHET_DT] =
(buffer[buffer[3]+20] << 8) | buffer[buffer[3]+21];
- vprintk("type #1 (mt): 0x%x+%d [medium transport]\n",
+ VPRINTK(KERN_INFO, "type #1 (mt): 0x%x+%d [medium transport]\n",
ch->firsts[CHET_MT],
ch->counts[CHET_MT]);
- vprintk("type #2 (st): 0x%x+%d [storage]\n",
+ VPRINTK(KERN_INFO, "type #2 (st): 0x%x+%d [storage]\n",
ch->firsts[CHET_ST],
ch->counts[CHET_ST]);
- vprintk("type #3 (ie): 0x%x+%d [import/export]\n",
+ VPRINTK(KERN_INFO, "type #3 (ie): 0x%x+%d [import/export]\n",
ch->firsts[CHET_IE],
ch->counts[CHET_IE]);
- vprintk("type #4 (dt): 0x%x+%d [data transfer]\n",
+ VPRINTK(KERN_INFO, "type #4 (dt): 0x%x+%d [data transfer]\n",
ch->firsts[CHET_DT],
ch->counts[CHET_DT]);
} else {
- vprintk("reading element address assigment page failed!\n");
+ VPRINTK(KERN_INFO, "reading element address assigment page failed!\n");
}
/* vendor specific element types */
@@ -346,13 +352,13 @@ ch_readconfig(scsi_changer *ch)
continue;
ch->firsts[CHET_V1+i] = vendor_firsts[i];
ch->counts[CHET_V1+i] = vendor_counts[i];
- vprintk("type #%d (v%d): 0x%x+%d [%s, vendor specific]\n",
+ VPRINTK(KERN_INFO, "type #%d (v%d): 0x%x+%d [%s, vendor specific]\n",
i+5,i+1,vendor_firsts[i],vendor_counts[i],
vendor_labels[i]);
}
/* look up the devices of the data transfer elements */
- ch->dt = kmalloc(ch->counts[CHET_DT]*sizeof(struct scsi_device),
+ ch->dt = kcalloc(ch->counts[CHET_DT], sizeof(*ch->dt),
GFP_KERNEL);
if (!ch->dt) {
@@ -366,21 +372,19 @@ ch_readconfig(scsi_changer *ch)
if (elem < CH_DT_MAX && -1 != dt_id[elem]) {
id = dt_id[elem];
lun = dt_lun[elem];
- vprintk("dt 0x%x: [insmod option] ",
+ VPRINTK(KERN_INFO, "dt 0x%x: [insmod option] ",
elem+ch->firsts[CHET_DT]);
} else if (0 != ch_read_element_status
(ch,elem+ch->firsts[CHET_DT],data)) {
- vprintk("dt 0x%x: READ ELEMENT STATUS failed\n",
+ VPRINTK(KERN_INFO, "dt 0x%x: READ ELEMENT STATUS failed\n",
elem+ch->firsts[CHET_DT]);
} else {
- vprintk("dt 0x%x: ",elem+ch->firsts[CHET_DT]);
+ VPRINTK(KERN_INFO, "dt 0x%x: ",elem+ch->firsts[CHET_DT]);
if (data[6] & 0x80) {
- if (verbose)
- printk("not this SCSI bus\n");
+ VPRINTK(KERN_CONT, "not this SCSI bus\n");
ch->dt[elem] = NULL;
} else if (0 == (data[6] & 0x30)) {
- if (verbose)
- printk("ID/LUN unknown\n");
+ VPRINTK(KERN_CONT, "ID/LUN unknown\n");
ch->dt[elem] = NULL;
} else {
id = ch->device->id;
@@ -390,22 +394,19 @@ ch_readconfig(scsi_changer *ch)
}
}
if (-1 != id) {
- if (verbose)
- printk("ID %i, LUN %i, ",id,lun);
+ VPRINTK(KERN_CONT, "ID %i, LUN %i, ",id,lun);
ch->dt[elem] =
scsi_device_lookup(ch->device->host,
ch->device->channel,
id,lun);
if (!ch->dt[elem]) {
/* should not happen */
- if (verbose)
- printk("Huh? device not found!\n");
+ VPRINTK(KERN_CONT, "Huh? device not found!\n");
} else {
- if (verbose)
- printk("name: %8.8s %16.16s %4.4s\n",
- ch->dt[elem]->vendor,
- ch->dt[elem]->model,
- ch->dt[elem]->rev);
+ VPRINTK(KERN_CONT, "name: %8.8s %16.16s %4.4s\n",
+ ch->dt[elem]->vendor,
+ ch->dt[elem]->model,
+ ch->dt[elem]->rev);
}
}
}
@@ -422,7 +423,7 @@ ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate)
{
u_char cmd[10];
- dprintk("position: 0x%x\n",elem);
+ DPRINTK("position: 0x%x\n",elem);
if (0 == trans)
trans = ch->firsts[CHET_MT];
memset(cmd,0,sizeof(cmd));
@@ -441,7 +442,7 @@ ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate)
{
u_char cmd[12];
- dprintk("move: 0x%x => 0x%x\n",src,dest);
+ DPRINTK("move: 0x%x => 0x%x\n",src,dest);
if (0 == trans)
trans = ch->firsts[CHET_MT];
memset(cmd,0,sizeof(cmd));
@@ -463,7 +464,7 @@ ch_exchange(scsi_changer *ch, u_int trans, u_int src,
{
u_char cmd[12];
- dprintk("exchange: 0x%x => 0x%x => 0x%x\n",
+ DPRINTK("exchange: 0x%x => 0x%x => 0x%x\n",
src,dest1,dest2);
if (0 == trans)
trans = ch->firsts[CHET_MT];
@@ -511,7 +512,7 @@ ch_set_voltag(scsi_changer *ch, u_int elem,
if (!buffer)
return -ENOMEM;
- dprintk("%s %s voltag: 0x%x => \"%s\"\n",
+ DPRINTK("%s %s voltag: 0x%x => \"%s\"\n",
clear ? "clear" : "set",
alternate ? "alternate" : "primary",
elem, tag);
@@ -550,7 +551,7 @@ static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest)
}
put_user(data[2], dest+i);
if (data[2] & CESTATUS_EXCEPT)
- vprintk("element 0x%x: asc=0x%x, ascq=0x%x\n",
+ VPRINTK(KERN_INFO, "element 0x%x: asc=0x%x, ascq=0x%x\n",
ch->firsts[type]+i,
(int)data[4],(int)data[5]);
retval = ch_read_element_status
@@ -660,7 +661,7 @@ static long ch_ioctl(struct file *file,
return -EFAULT;
if (0 != ch_checkrange(ch, pos.cp_type, pos.cp_unit)) {
- dprintk("CHIOPOSITION: invalid parameter\n");
+ DPRINTK("CHIOPOSITION: invalid parameter\n");
return -EBADSLT;
}
mutex_lock(&ch->lock);
@@ -680,7 +681,7 @@ static long ch_ioctl(struct file *file,
if (0 != ch_checkrange(ch, mv.cm_fromtype, mv.cm_fromunit) ||
0 != ch_checkrange(ch, mv.cm_totype, mv.cm_tounit )) {
- dprintk("CHIOMOVE: invalid parameter\n");
+ DPRINTK("CHIOMOVE: invalid parameter\n");
return -EBADSLT;
}
@@ -703,7 +704,7 @@ static long ch_ioctl(struct file *file,
if (0 != ch_checkrange(ch, mv.ce_srctype, mv.ce_srcunit ) ||
0 != ch_checkrange(ch, mv.ce_fdsttype, mv.ce_fdstunit) ||
0 != ch_checkrange(ch, mv.ce_sdsttype, mv.ce_sdstunit)) {
- dprintk("CHIOEXCHANGE: invalid parameter\n");
+ DPRINTK("CHIOEXCHANGE: invalid parameter\n");
return -EBADSLT;
}
@@ -796,7 +797,7 @@ static long ch_ioctl(struct file *file,
}
} else if (ch->voltags) {
ch->voltags = 0;
- vprintk("device has no volume tag support\n");
+ VPRINTK(KERN_INFO, "device has no volume tag support\n");
goto voltag_retry;
}
kfree(buffer);
@@ -824,7 +825,7 @@ static long ch_ioctl(struct file *file,
return -EFAULT;
if (0 != ch_checkrange(ch, csv.csv_type, csv.csv_unit)) {
- dprintk("CHIOSVOLTAG: invalid parameter\n");
+ DPRINTK("CHIOSVOLTAG: invalid parameter\n");
return -EBADSLT;
}
elem = ch->firsts[csv.csv_type] + csv.csv_unit;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index cd05e049d5f..d0c82340f0e 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1404,13 +1404,13 @@ void scsi_print_sense(char *name, struct scsi_cmnd *cmd)
{
struct scsi_sense_hdr sshdr;
- scmd_printk(KERN_INFO, cmd, "");
+ scmd_printk(KERN_INFO, cmd, " ");
scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
&sshdr);
scsi_show_sense_hdr(&sshdr);
scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
&sshdr);
- scmd_printk(KERN_INFO, cmd, "");
+ scmd_printk(KERN_INFO, cmd, " ");
scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
}
EXPORT_SYMBOL(scsi_print_sense);
@@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(scsi_show_result);
void scsi_print_result(struct scsi_cmnd *cmd)
{
- scmd_printk(KERN_INFO, cmd, "");
+ scmd_printk(KERN_INFO, cmd, " ");
scsi_show_result(cmd->result);
}
EXPORT_SYMBOL(scsi_print_result);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index bd977be7544..54f50b07dac 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -1597,7 +1597,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
u32 tag_mask = 1;
u8 tag_number = 0;
while (tag_mask & dcb->tag_mask
- && tag_number <= dcb->max_command) {
+ && tag_number < dcb->max_command) {
tag_mask = tag_mask << 1;
tag_number++;
}
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 19338e0ba2c..cbb20b13b22 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
+#include <linux/bitops.h>
#include <scsi/libfc.h>
#include <scsi/libfcoe.h>
#include "fnic_io.h"
@@ -49,7 +50,6 @@
/*
* Tag bits used for special requests.
*/
-#define BIT(nr) (1UL << (nr))
#define FNIC_TAG_ABORT BIT(30) /* tag bit indicating abort */
#define FNIC_TAG_DEV_RST BIT(29) /* indicates device reset */
#define FNIC_TAG_MASK (BIT(24) - 1) /* mask for lookup */
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 75585a52c88..427a56d3117 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -285,9 +285,12 @@ static int __init do_DTC3181E_setup(char *str)
int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
{
static int current_override = 0;
- int count, i;
+ int count;
unsigned int *ports;
+#ifndef SCSI_G_NCR5380_MEM
+ int i;
unsigned long region_size = 16;
+#endif
static unsigned int __initdata ncr_53c400a_ports[] = {
0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
};
@@ -296,7 +299,7 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
};
int flags = 0;
struct Scsi_Host *instance;
-#ifdef CONFIG_SCSI_G_NCR5380_MEM
+#ifdef SCSI_G_NCR5380_MEM
unsigned long base;
void __iomem *iomem;
#endif
@@ -315,17 +318,15 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
overrides[0].board = BOARD_NCR53C400A;
else if (dtc_3181e != NCR_NOT_SET)
overrides[0].board = BOARD_DTC3181E;
-
+#ifndef SCSI_G_NCR5380_MEM
if (!current_override && isapnp_present()) {
struct pnp_dev *dev = NULL;
count = 0;
while ((dev = pnp_find_dev(NULL, ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e), dev))) {
if (count >= NO_OVERRIDES)
break;
- if (pnp_device_attach(dev) < 0) {
- printk(KERN_ERR "dtc436e probe: attach failed\n");
+ if (pnp_device_attach(dev) < 0)
continue;
- }
if (pnp_activate_dev(dev) < 0) {
printk(KERN_ERR "dtc436e probe: activate failed\n");
pnp_device_detach(dev);
@@ -349,7 +350,7 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
count++;
}
}
-
+#endif
tpnt->proc_name = "g_NCR5380";
for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
@@ -374,7 +375,7 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
break;
}
-#ifndef CONFIG_SCSI_G_NCR5380_MEM
+#ifndef SCSI_G_NCR5380_MEM
if (ports) {
/* wakeup sequence for the NCR53C400A and DTC3181E */
@@ -436,7 +437,7 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
#endif
instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata));
if (instance == NULL) {
-#ifndef CONFIG_SCSI_G_NCR5380_MEM
+#ifndef SCSI_G_NCR5380_MEM
release_region(overrides[current_override].NCR5380_map_name, region_size);
#else
iounmap(iomem);
@@ -446,10 +447,10 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
}
instance->NCR5380_instance_name = overrides[current_override].NCR5380_map_name;
-#ifndef CONFIG_SCSI_G_NCR5380_MEM
+#ifndef SCSI_G_NCR5380_MEM
instance->n_io_port = region_size;
#else
- ((struct NCR5380_hostdata *)instance->hostdata).iomem = iomem;
+ ((struct NCR5380_hostdata *)instance->hostdata)->iomem = iomem;
#endif
NCR5380_init(instance, flags);
@@ -517,10 +518,10 @@ int generic_NCR5380_release_resources(struct Scsi_Host *instance)
free_irq(instance->irq, instance);
NCR5380_exit(instance);
-#ifndef CONFIG_SCSI_G_NCR5380_MEM
+#ifndef SCSI_G_NCR5380_MEM
release_region(instance->NCR5380_instance_name, instance->n_io_port);
#else
- iounmap(((struct NCR5380_hostdata *)instance->hostdata).iomem);
+ iounmap(((struct NCR5380_hostdata *)instance->hostdata)->iomem);
release_mem_region(instance->NCR5380_instance_name, NCR5380_region_size);
#endif
@@ -590,14 +591,14 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst,
}
while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY);
-#ifndef CONFIG_SCSI_G_NCR5380_MEM
+#ifndef SCSI_G_NCR5380_MEM
{
int i;
for (i = 0; i < 128; i++)
dst[start + i] = NCR5380_read(C400_HOST_BUFFER);
}
#else
- /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ /* implies SCSI_G_NCR5380_MEM */
memcpy_fromio(dst + start, iomem + NCR53C400_host_buffer, 128);
#endif
start += 128;
@@ -610,14 +611,14 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst,
// FIXME - no timeout
}
-#ifndef CONFIG_SCSI_G_NCR5380_MEM
+#ifndef SCSI_G_NCR5380_MEM
{
int i;
for (i = 0; i < 128; i++)
dst[start + i] = NCR5380_read(C400_HOST_BUFFER);
}
#else
- /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ /* implies SCSI_G_NCR5380_MEM */
memcpy_fromio(dst + start, iomem + NCR53C400_host_buffer, 128);
#endif
start += 128;
@@ -676,13 +677,13 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src,
}
while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
; // FIXME - timeout
-#ifndef CONFIG_SCSI_G_NCR5380_MEM
+#ifndef SCSI_G_NCR5380_MEM
{
for (i = 0; i < 128; i++)
NCR5380_write(C400_HOST_BUFFER, src[start + i]);
}
#else
- /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ /* implies SCSI_G_NCR5380_MEM */
memcpy_toio(iomem + NCR53C400_host_buffer, src + start, 128);
#endif
start += 128;
@@ -692,13 +693,13 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src,
while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
; // FIXME - no timeout
-#ifndef CONFIG_SCSI_G_NCR5380_MEM
+#ifndef SCSI_G_NCR5380_MEM
{
for (i = 0; i < 128; i++)
NCR5380_write(C400_HOST_BUFFER, src[start + i]);
}
#else
- /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ /* implies SCSI_G_NCR5380_MEM */
memcpy_toio(iomem + NCR53C400_host_buffer, src + start, 128);
#endif
start += 128;
@@ -938,7 +939,7 @@ module_param(ncr_53c400a, int, 0);
module_param(dtc_3181e, int, 0);
MODULE_LICENSE("GPL");
-
+#ifndef SCSI_G_NCR5380_MEM
static struct isapnp_device_id id_table[] __devinitdata = {
{
ISAPNP_ANY_ID, ISAPNP_ANY_ID,
@@ -948,7 +949,7 @@ static struct isapnp_device_id id_table[] __devinitdata = {
};
MODULE_DEVICE_TABLE(isapnp, id_table);
-
+#endif
__setup("ncr5380=", do_NCR5380_setup);
__setup("ncr53c400=", do_NCR53C400_setup);
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index df0b3f69ef6..921764c9ab2 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -63,7 +63,7 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);
#define __STRVAL(x) #x
#define STRVAL(x) __STRVAL(x)
-#ifndef CONFIG_SCSI_G_NCR5380_MEM
+#ifndef SCSI_G_NCR5380_MEM
#define NCR5380_map_config port
#define NCR5380_map_type int
@@ -91,7 +91,7 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);
NCR5380_map_name = (NCR5380_map_type)((instance)->NCR5380_instance_name)
#else
-/* therefore CONFIG_SCSI_G_NCR5380_MEM */
+/* therefore SCSI_G_NCR5380_MEM */
#define NCR5380_map_config memory
#define NCR5380_map_type unsigned long
@@ -114,7 +114,7 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);
register void __iomem *iomem
#define NCR5380_setup(instance) \
- iomem = (((struct NCR5380_hostdata *)(instance)->hostdata).iomem)
+ iomem = (((struct NCR5380_hostdata *)(instance)->hostdata)->iomem)
#endif
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index f672d6213ee..b860d650a56 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4914,7 +4914,7 @@ static int __init gdth_eisa_probe_one(u16 eisa_slot)
error = scsi_add_host(shp, NULL);
if (error)
- goto out_free_coal_stat;
+ goto out_free_ccb_phys;
list_add_tail(&ha->list, &gdth_instances);
gdth_timer_init();
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4f5551b5fe5..c5d0606ad09 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3231,6 +3231,12 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
misc_fw_support = readl(&cfgtable->misc_fw_support);
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
+ /* The doorbell reset seems to cause lockups on some Smart
+ * Arrays (e.g. P410, P410i, maybe others). Until this is
+ * fixed or at least isolated, avoid the doorbell reset.
+ */
+ use_doorbell = 0;
+
rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index bd96cecaa61..9f75a6d519a 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -433,6 +433,9 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
{
switch (tgt->action) {
case IBMVFC_TGT_ACTION_DEL_RPORT:
+ if (action == IBMVFC_TGT_ACTION_DELETED_RPORT)
+ tgt->action = action;
+ case IBMVFC_TGT_ACTION_DELETED_RPORT:
break;
default:
if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
@@ -2036,95 +2039,108 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
}
/**
- * ibmvfc_abort_task_set - Abort outstanding commands to the device
- * @sdev: scsi device to abort commands
- *
- * This sends an Abort Task Set to the VIOS for the specified device. This does
- * NOT send any cancel to the VIOS. That must be done separately.
+ * ibmvfc_match_rport - Match function for specified remote port
+ * @evt: ibmvfc event struct
+ * @device: device to match (rport)
*
* Returns:
- * 0 on success / other on failure
+ * 1 if event matches rport / 0 if event does not match rport
**/
-static int ibmvfc_abort_task_set(struct scsi_device *sdev)
+static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
{
- struct ibmvfc_host *vhost = shost_priv(sdev->host);
- struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
- struct ibmvfc_cmd *tmf;
- struct ibmvfc_event *evt, *found_evt;
- union ibmvfc_iu rsp_iu;
- struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
- int rsp_rc = -EBUSY;
- unsigned long flags;
- int rsp_code = 0;
+ struct fc_rport *cmd_rport;
- spin_lock_irqsave(vhost->host->host_lock, flags);
- found_evt = NULL;
- list_for_each_entry(evt, &vhost->sent, queue) {
- if (evt->cmnd && evt->cmnd->device == sdev) {
- found_evt = evt;
- break;
- }
- }
-
- if (!found_evt) {
- if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
- sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
- return 0;
- }
-
- if (vhost->state == IBMVFC_ACTIVE) {
- evt = ibmvfc_get_event(vhost);
- ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
-
- tmf = &evt->iu.cmd;
- memset(tmf, 0, sizeof(*tmf));
- tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
- tmf->resp.len = sizeof(tmf->rsp);
- tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
- tmf->payload_len = sizeof(tmf->iu);
- tmf->resp_len = sizeof(tmf->rsp);
- tmf->cancel_key = (unsigned long)sdev->hostdata;
- tmf->tgt_scsi_id = rport->port_id;
- int_to_scsilun(sdev->lun, &tmf->iu.lun);
- tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
- tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
- evt->sync_iu = &rsp_iu;
-
- init_completion(&evt->comp);
- rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+ if (evt->cmnd) {
+ cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
+ if (cmd_rport == rport)
+ return 1;
}
+ return 0;
+}
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
+/**
+ * ibmvfc_match_target - Match function for specified target
+ * @evt: ibmvfc event struct
+ * @device: device to match (starget)
+ *
+ * Returns:
+ * 1 if event matches starget / 0 if event does not match starget
+ **/
+static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
+{
+ if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
+ return 1;
+ return 0;
+}
- if (rsp_rc != 0) {
- sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
- return -EIO;
- }
+/**
+ * ibmvfc_match_lun - Match function for specified LUN
+ * @evt: ibmvfc event struct
+ * @device: device to match (sdev)
+ *
+ * Returns:
+ * 1 if event matches sdev / 0 if event does not match sdev
+ **/
+static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
+{
+ if (evt->cmnd && evt->cmnd->device == device)
+ return 1;
+ return 0;
+}
- sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
- wait_for_completion(&evt->comp);
+/**
+ * ibmvfc_wait_for_ops - Wait for ops to complete
+ * @vhost: ibmvfc host struct
+ * @device: device to match (starget or sdev)
+ * @match: match function
+ *
+ * Returns:
+ * SUCCESS / FAILED
+ **/
+static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
+ int (*match) (struct ibmvfc_event *, void *))
+{
+ struct ibmvfc_event *evt;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ int wait;
+ unsigned long flags;
+ signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
- if (rsp_iu.cmd.status)
- rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+ ENTER;
+ do {
+ wait = 0;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (match(evt, device)) {
+ evt->eh_comp = &comp;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
- if (rsp_code) {
- if (fc_rsp->flags & FCP_RSP_LEN_VALID)
- rsp_code = fc_rsp->data.info.rsp_code;
+ if (wait) {
+ timeout = wait_for_completion_timeout(&comp, timeout);
- sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
- "flags: %x fcp_rsp: %x, scsi_status: %x\n",
- ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
- rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
- fc_rsp->scsi_status);
- rsp_rc = -EIO;
- } else
- sdev_printk(KERN_INFO, sdev, "Abort successful\n");
+ if (!timeout) {
+ wait = 0;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (match(evt, device)) {
+ evt->eh_comp = NULL;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ if (wait)
+ dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
+ LEAVE;
+ return wait ? FAILED : SUCCESS;
+ }
+ }
+ } while (wait);
- spin_lock_irqsave(vhost->host->host_lock, flags);
- ibmvfc_free_event(evt);
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
- return rsp_rc;
+ LEAVE;
+ return SUCCESS;
}
/**
@@ -2212,88 +2228,130 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
}
/**
- * ibmvfc_match_target - Match function for specified target
+ * ibmvfc_match_key - Match function for specified cancel key
* @evt: ibmvfc event struct
- * @device: device to match (starget)
+ * @key: cancel key to match
*
* Returns:
- * 1 if event matches starget / 0 if event does not match starget
+ * 1 if event matches key / 0 if event does not match key
**/
-static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
+static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
{
- if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
- return 1;
- return 0;
-}
+ unsigned long cancel_key = (unsigned long)key;
-/**
- * ibmvfc_match_lun - Match function for specified LUN
- * @evt: ibmvfc event struct
- * @device: device to match (sdev)
- *
- * Returns:
- * 1 if event matches sdev / 0 if event does not match sdev
- **/
-static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
-{
- if (evt->cmnd && evt->cmnd->device == device)
+ if (evt->crq.format == IBMVFC_CMD_FORMAT &&
+ evt->iu.cmd.cancel_key == cancel_key)
return 1;
return 0;
}
/**
- * ibmvfc_wait_for_ops - Wait for ops to complete
- * @vhost: ibmvfc host struct
- * @device: device to match (starget or sdev)
- * @match: match function
+ * ibmvfc_abort_task_set - Abort outstanding commands to the device
+ * @sdev: scsi device to abort commands
+ *
+ * This sends an Abort Task Set to the VIOS for the specified device. This does
+ * NOT send any cancel to the VIOS. That must be done separately.
*
* Returns:
- * SUCCESS / FAILED
+ * 0 on success / other on failure
**/
-static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
- int (*match) (struct ibmvfc_event *, void *))
+static int ibmvfc_abort_task_set(struct scsi_device *sdev)
{
- struct ibmvfc_event *evt;
- DECLARE_COMPLETION_ONSTACK(comp);
- int wait;
- unsigned long flags;
- signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_cmd *tmf;
+ struct ibmvfc_event *evt, *found_evt;
+ union ibmvfc_iu rsp_iu;
+ struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+ int rc, rsp_rc = -EBUSY;
+ unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
+ int rsp_code = 0;
- ENTER;
- do {
- wait = 0;
- spin_lock_irqsave(vhost->host->host_lock, flags);
- list_for_each_entry(evt, &vhost->sent, queue) {
- if (match(evt, device)) {
- evt->eh_comp = &comp;
- wait++;
- }
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ found_evt = NULL;
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (evt->cmnd && evt->cmnd->device == sdev) {
+ found_evt = evt;
+ break;
}
+ }
+
+ if (!found_evt) {
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return 0;
+ }
- if (wait) {
- timeout = wait_for_completion_timeout(&comp, timeout);
+ if (vhost->state == IBMVFC_ACTIVE) {
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
- if (!timeout) {
- wait = 0;
- spin_lock_irqsave(vhost->host->host_lock, flags);
- list_for_each_entry(evt, &vhost->sent, queue) {
- if (match(evt, device)) {
- evt->eh_comp = NULL;
- wait++;
- }
- }
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
- if (wait)
- dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
- LEAVE;
- return wait ? FAILED : SUCCESS;
- }
+ tmf = &evt->iu.cmd;
+ memset(tmf, 0, sizeof(*tmf));
+ tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
+ tmf->resp.len = sizeof(tmf->rsp);
+ tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
+ tmf->payload_len = sizeof(tmf->iu);
+ tmf->resp_len = sizeof(tmf->rsp);
+ tmf->cancel_key = (unsigned long)sdev->hostdata;
+ tmf->tgt_scsi_id = rport->port_id;
+ int_to_scsilun(sdev->lun, &tmf->iu.lun);
+ tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
+ tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
+ evt->sync_iu = &rsp_iu;
+
+ init_completion(&evt->comp);
+ rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+ }
+
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (rsp_rc != 0) {
+ sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
+ return -EIO;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
+ timeout = wait_for_completion_timeout(&evt->comp, timeout);
+
+ if (!timeout) {
+ rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+ if (!rc) {
+ rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
+ if (rc == SUCCESS)
+ rc = 0;
}
- } while (wait);
- LEAVE;
- return SUCCESS;
+ if (rc) {
+ sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
+ ibmvfc_reset_host(vhost);
+ rsp_rc = 0;
+ goto out;
+ }
+ }
+
+ if (rsp_iu.cmd.status)
+ rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+
+ if (rsp_code) {
+ if (fc_rsp->flags & FCP_RSP_LEN_VALID)
+ rsp_code = fc_rsp->data.info.rsp_code;
+
+ sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
+ "flags: %x fcp_rsp: %x, scsi_status: %x\n",
+ ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
+ rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ fc_rsp->scsi_status);
+ rsp_rc = -EIO;
+ } else
+ sdev_printk(KERN_INFO, sdev, "Abort successful\n");
+
+out:
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_free_event(evt);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return rsp_rc;
}
/**
@@ -2351,18 +2409,6 @@ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
}
/**
- * ibmvfc_dev_cancel_all_abts - Device iterated cancel all function
- * @sdev: scsi device struct
- * @data: return code
- *
- **/
-static void ibmvfc_dev_cancel_all_abts(struct scsi_device *sdev, void *data)
-{
- unsigned long *rc = data;
- *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
-}
-
-/**
* ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
* @sdev: scsi device struct
* @data: return code
@@ -2375,18 +2421,6 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
}
/**
- * ibmvfc_dev_abort_all - Device iterated abort task set function
- * @sdev: scsi device struct
- * @data: return code
- *
- **/
-static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
-{
- unsigned long *rc = data;
- *rc |= ibmvfc_abort_task_set(sdev);
-}
-
-/**
* ibmvfc_eh_target_reset_handler - Reset the target
* @cmd: scsi command struct
*
@@ -2440,19 +2474,22 @@ static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
**/
static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
{
- struct scsi_target *starget = to_scsi_target(&rport->dev);
- struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct Scsi_Host *shost = rport_to_shost(rport);
struct ibmvfc_host *vhost = shost_priv(shost);
- unsigned long cancel_rc = 0;
- unsigned long abort_rc = 0;
- int rc = FAILED;
+ struct fc_rport *dev_rport;
+ struct scsi_device *sdev;
+ unsigned long rc;
ENTER;
- starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_abts);
- starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
+ shost_for_each_device(sdev, shost) {
+ dev_rport = starget_to_rport(scsi_target(sdev));
+ if (dev_rport != rport)
+ continue;
+ ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+ ibmvfc_abort_task_set(sdev);
+ }
- if (!cancel_rc && !abort_rc)
- rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
+ rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
if (rc == FAILED)
ibmvfc_issue_fc_host_lip(shost);
@@ -4193,11 +4230,15 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt_dbg(tgt, "Deleting rport\n");
list_del(&tgt->queue);
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
fc_remote_port_delete(rport);
del_timer_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
+ } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return;
}
if (rport) {
@@ -4297,6 +4338,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
rport = tgt->rport;
tgt->rport = NULL;
list_del(&tgt->queue);
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rport)
fc_remote_port_delete(rport);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index d7e8dcd9065..608af394c8c 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
#include "viosrp.h"
#define IBMVFC_NAME "ibmvfc"
-#define IBMVFC_DRIVER_VERSION "1.0.8"
-#define IBMVFC_DRIVER_DATE "(June 17, 2010)"
+#define IBMVFC_DRIVER_VERSION "1.0.9"
+#define IBMVFC_DRIVER_DATE "(August 5, 2010)"
#define IBMVFC_DEFAULT_TIMEOUT 60
#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@@ -38,6 +38,7 @@
#define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT \
(IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT)
#define IBMVFC_INIT_TIMEOUT 120
+#define IBMVFC_ABORT_TIMEOUT 8
#define IBMVFC_ABORT_WAIT_TIMEOUT 40
#define IBMVFC_MAX_REQUESTS_DEFAULT 100
@@ -597,6 +598,7 @@ enum ibmvfc_target_action {
IBMVFC_TGT_ACTION_INIT,
IBMVFC_TGT_ACTION_INIT_WAIT,
IBMVFC_TGT_ACTION_DEL_RPORT,
+ IBMVFC_TGT_ACTION_DELETED_RPORT,
};
struct ibmvfc_target {
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index a7714160fbc..108797761b9 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2817,7 +2817,6 @@ static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem)
}
cmnd->result = cblk->tastat | (cblk->hastat << 16);
- WARN_ON(cmnd == NULL);
i91u_unmap_scb(host->pci_dev, cmnd);
cmnd->scsi_done(cmnd); /* Notify system DONE */
initio_release_scb(host, cblk); /* Release SCB for current channel */
diff --git a/drivers/firmware/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index df6bff7366c..df6bff7366c 100644
--- a/drivers/firmware/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index eac4d09314e..c797f6b48f0 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1765,14 +1765,14 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
struct fcoe_dev_stats *stats;
lport = shost_priv(sc_cmd->device->host);
- spin_unlock_irq(lport->host->host_lock);
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
done(sc_cmd);
- goto out;
+ return 0;
}
+ spin_unlock_irq(lport->host->host_lock);
if (!*(struct fc_remote_port **)rport->dd_data) {
/*
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 3482d5a5aed..a50aa03b8ac 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -775,6 +775,7 @@ struct lpfc_hba {
uint8_t temp_sensor_support;
/* Fields used for heart beat. */
unsigned long last_completion_time;
+ unsigned long skipped_hb;
struct timer_list hb_tmofunc;
uint8_t hb_outstanding;
enum hba_temp_state over_temp_state;
@@ -817,6 +818,8 @@ struct lpfc_hba {
uint32_t iocb_cnt;
uint32_t iocb_max;
atomic_t sdev_cnt;
+ uint8_t fips_spec_rev;
+ uint8_t fips_level;
};
static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 868874c28f9..23ce4570833 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/aer.h>
#include <linux/gfp.h>
+#include <linux/kernel.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -1239,6 +1240,44 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_fips_level_show - Return the current FIPS level for the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fips_level_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
+}
+
+/**
+ * lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
+}
+
+/**
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -1676,6 +1715,8 @@ static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
+static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
+static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -1795,12 +1836,11 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
/* Validate and store the new name */
for (i=0, j=0; i < 16; i++) {
- if ((*buf >= 'a') && (*buf <= 'f'))
- j = ((j << 4) | ((*buf++ -'a') + 10));
- else if ((*buf >= 'A') && (*buf <= 'F'))
- j = ((j << 4) | ((*buf++ -'A') + 10));
- else if ((*buf >= '0') && (*buf <= '9'))
- j = ((j << 4) | (*buf++ -'0'));
+ int value;
+
+ value = hex_to_bin(*buf++);
+ if (value >= 0)
+ j = (j << 4) | value;
else
return -EINVAL;
if (i % 2) {
@@ -1888,12 +1928,11 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
/* Validate and store the new name */
for (i=0, j=0; i < 16; i++) {
- if ((*buf >= 'a') && (*buf <= 'f'))
- j = ((j << 4) | ((*buf++ -'a') + 10));
- else if ((*buf >= 'A') && (*buf <= 'F'))
- j = ((j << 4) | ((*buf++ -'A') + 10));
- else if ((*buf >= '0') && (*buf <= '9'))
- j = ((j << 4) | (*buf++ -'0'));
+ int value;
+
+ value = hex_to_bin(*buf++);
+ if (value >= 0)
+ j = (j << 4) | value;
else
return -EINVAL;
if (i % 2) {
@@ -2778,7 +2817,6 @@ static struct bin_attribute sysfs_drvr_stat_data_attr = {
.attr = {
.name = "lpfc_drvr_stat_data",
.mode = S_IRUSR,
- .owner = THIS_MODULE,
},
.size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
.read = sysfs_drvr_stat_data_read,
@@ -3280,7 +3318,7 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
# - Default will result in registering capabilities for all profiles.
#
*/
-unsigned int lpfc_prot_mask = SHOST_DIX_TYPE0_PROTECTION;
+unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION;
module_param(lpfc_prot_mask, uint, 0);
MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
@@ -3385,6 +3423,8 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_iocb_hw,
&dev_attr_txq_hw,
&dev_attr_txcmplq_hw,
+ &dev_attr_lpfc_fips_level,
+ &dev_attr_lpfc_fips_rev,
NULL,
};
@@ -3411,6 +3451,8 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
&dev_attr_lpfc_static_vport,
+ &dev_attr_lpfc_fips_level,
+ &dev_attr_lpfc_fips_rev,
NULL,
};
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index d521569e662..49d0cf99c24 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2724,15 +2724,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
pmboxq->context2 = ext;
pmboxq->in_ext_byte_len =
- mbox_req->inExtWLen *
- sizeof(uint32_t);
- pmboxq->out_ext_byte_len =
- mbox_req->outExtWLen *
- sizeof(uint32_t);
- pmboxq->mbox_offset_word =
- mbox_req->mbOffset;
- pmboxq->context2 = ext;
- pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
pmboxq->out_ext_byte_len =
mbox_req->outExtWLen * sizeof(uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
index a11f1ae7b98..75e2e569ded 100644
--- a/drivers/scsi/lpfc/lpfc_compat.h
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -82,8 +82,7 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
static inline void
lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
{
- /* actually returns 1 byte past dest */
- memcpy_toio( dest, src, bytes);
+ __iowrite32_copy(dest, src, bytes);
}
static inline void
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index afbed6bc31f..8d09191c327 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -600,6 +600,14 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
}
+ } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ /*
+ * Driver needs to re-reg VPI in order for f/w
+ * to update the MAC address.
+ */
+ lpfc_register_new_vport(phba, vport, ndlp);
+ return 0;
}
if (phba->sli_rev < LPFC_SLI_REV4) {
@@ -801,9 +809,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
"2611 FLOGI failed on registered "
- "FCF record fcf_index:%d, trying "
- "to perform round robin failover\n",
- phba->fcf.current_rec.fcf_indx);
+ "FCF record fcf_index(%d), status: "
+ "x%x/x%x, tmo:x%x, trying to perform "
+ "round robin failover\n",
+ phba->fcf.current_rec.fcf_indx,
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout);
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
/*
@@ -841,6 +852,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
}
+ /* FLOGI failure */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout);
+
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
@@ -1291,6 +1308,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct serv_parm *sp;
uint8_t name[sizeof(struct lpfc_name)];
uint32_t rc, keepDID = 0;
+ int put_node;
+ int put_rport;
/* Fabric nodes can have the same WWPN so we don't bother searching
* by WWPN. Just return the ndlp that was given to us.
@@ -1379,6 +1398,28 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Two ndlps cannot have the same did */
ndlp->nlp_DID = keepDID;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ /* Since we are swapping the ndlp passed in with the new one
+ * and the did has already been swapped, copy over the
+ * state and names.
+ */
+ memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
+ sizeof(struct lpfc_name));
+ new_ndlp->nlp_state = ndlp->nlp_state;
+ /* Fix up the rport accordingly */
+ rport = ndlp->rport;
+ if (rport) {
+ rdata = rport->dd_data;
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
+ }
}
return new_ndlp;
}
@@ -2880,6 +2921,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry = 0;
if (retry) {
+ if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
+ /* Stop retrying PLOGI and FDISC if in FCF discovery */
+ if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "2849 Stop retry ELS command "
+ "x%x to remote NPORT x%x, "
+ "Data: x%x x%x\n", cmd, did,
+ cmdiocb->retry, delay);
+ return 0;
+ }
+ }
/* Retry ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -6076,8 +6128,12 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (mb->mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
- "0915 Register VPI failed: 0x%x\n",
- mb->mbxStatus);
+ "0915 Register VPI failed : Status: x%x"
+ " upd bit: x%x \n", mb->mbxStatus,
+ mb->un.varRegVpi.upd);
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ mb->un.varRegVpi.upd)
+ goto mbox_err_exit ;
switch (mb->mbxStatus) {
case 0x11: /* unsupported feature */
@@ -6142,7 +6198,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else
lpfc_do_scr_ns_plogi(phba, vport);
}
-
+mbox_err_exit:
/* Now, we decrement the ndlp reference count held for this
* callback function
*/
@@ -6387,6 +6443,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else
vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
spin_unlock_irq(shost->host_lock);
+ } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ /*
+ * Driver needs to re-reg VPI in order for f/w
+ * to update the MAC address.
+ */
+ lpfc_register_new_vport(phba, vport, ndlp);
+ return ;
}
if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 0639c994349..1f62ea8c165 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -588,7 +588,7 @@ lpfc_work_done(struct lpfc_hba *phba)
(status &
HA_RXMASK));
}
- if (pring->txq_cnt)
+ if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt)
lpfc_drain_txq(phba);
/*
* Turn on Ring interrupts
@@ -1852,8 +1852,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
/* If in fast failover, mark it's completed */
- phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV |
- FCF_DISCOVERY);
+ phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2836 The new FCF record (x%x) "
@@ -2651,7 +2650,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2778 Start FCF table scan at linkup\n");
-
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
LPFC_FCOE_FCF_GET_FIRST);
if (rc) {
@@ -2660,6 +2658,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
spin_unlock_irq(&phba->hbalock);
goto out;
}
+ /* Reset FCF roundrobin bmask for new discovery */
+ memset(phba->fcf.fcf_rr_bmask, 0,
+ sizeof(*phba->fcf.fcf_rr_bmask));
}
return;
@@ -5097,6 +5098,7 @@ static void
lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (mboxq->u.mb.mbxStatus) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
@@ -5104,6 +5106,9 @@ lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
"HBA state x%x\n",
mboxq->u.mb.mbxStatus, vport->port_state);
}
+ spin_lock_irq(shost->host_lock);
+ phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
mempool_free(mboxq, phba->mbox_mem_pool);
return;
}
@@ -5285,6 +5290,10 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= FCF_INIT_DISC;
spin_unlock_irq(&phba->hbalock);
+
+ /* Reset FCF roundrobin bmask for new discovery */
+ memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
+
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
if (rc) {
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index f5dbf2be3ea..1676f61291e 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -2291,7 +2291,8 @@ typedef struct {
typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd1;
- uint32_t rsvd2:8;
+ uint32_t rsvd2:7;
+ uint32_t upd:1;
uint32_t sid:24;
uint32_t wwn[2];
uint32_t rsvd5;
@@ -2300,7 +2301,8 @@ typedef struct {
#else /* __LITTLE_ENDIAN */
uint32_t rsvd1;
uint32_t sid:24;
- uint32_t rsvd2:8;
+ uint32_t upd:1;
+ uint32_t rsvd2:7;
uint32_t wwn[2];
uint32_t rsvd5;
uint16_t vpi;
@@ -2806,11 +2808,15 @@ typedef struct {
uint32_t rsvd6; /* Reserved */
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd7 : 16; /* Reserved */
+ uint32_t fips_rev : 3; /* FIPS Spec Revision */
+ uint32_t fips_level : 4; /* FIPS Level */
+ uint32_t sec_err : 9; /* security crypto error */
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
#else /* __LITTLE_ENDIAN */
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
- uint32_t rsvd7 : 16; /* Reserved */
+ uint32_t sec_err : 9; /* security crypto error */
+ uint32_t fips_level : 4; /* FIPS Level */
+ uint32_t fips_rev : 3; /* FIPS Spec Revision */
#endif
} CONFIG_PORT_VAR;
@@ -3441,63 +3447,63 @@ struct sli3_bg_fields {
static inline uint32_t
lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat)
{
- return (le32_to_cpu(bgstat) & BGS_BIDIR_BG_PROF_MASK) >>
+ return (bgstat & BGS_BIDIR_BG_PROF_MASK) >>
BGS_BIDIR_BG_PROF_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_bidir_err_cond(uint32_t bgstat)
{
- return (le32_to_cpu(bgstat) & BGS_BIDIR_ERR_COND_FLAGS_MASK) >>
+ return (bgstat & BGS_BIDIR_ERR_COND_FLAGS_MASK) >>
BGS_BIDIR_ERR_COND_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_bg_prof(uint32_t bgstat)
{
- return (le32_to_cpu(bgstat) & BGS_BG_PROFILE_MASK) >>
+ return (bgstat & BGS_BG_PROFILE_MASK) >>
BGS_BG_PROFILE_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_invalid_prof(uint32_t bgstat)
{
- return (le32_to_cpu(bgstat) & BGS_INVALID_PROF_MASK) >>
+ return (bgstat & BGS_INVALID_PROF_MASK) >>
BGS_INVALID_PROF_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_uninit_dif_block(uint32_t bgstat)
{
- return (le32_to_cpu(bgstat) & BGS_UNINIT_DIF_BLOCK_MASK) >>
+ return (bgstat & BGS_UNINIT_DIF_BLOCK_MASK) >>
BGS_UNINIT_DIF_BLOCK_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat)
{
- return (le32_to_cpu(bgstat) & BGS_HI_WATER_MARK_PRESENT_MASK) >>
+ return (bgstat & BGS_HI_WATER_MARK_PRESENT_MASK) >>
BGS_HI_WATER_MARK_PRESENT_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_reftag_err(uint32_t bgstat)
{
- return (le32_to_cpu(bgstat) & BGS_REFTAG_ERR_MASK) >>
+ return (bgstat & BGS_REFTAG_ERR_MASK) >>
BGS_REFTAG_ERR_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_apptag_err(uint32_t bgstat)
{
- return (le32_to_cpu(bgstat) & BGS_APPTAG_ERR_MASK) >>
+ return (bgstat & BGS_APPTAG_ERR_MASK) >>
BGS_APPTAG_ERR_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_guard_err(uint32_t bgstat)
{
- return (le32_to_cpu(bgstat) & BGS_GUARD_ERR_MASK) >>
+ return (bgstat & BGS_GUARD_ERR_MASK) >>
BGS_GUARD_ERR_SHIFT;
}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2786ee3b605..da9ba06ad58 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1032,27 +1032,46 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
/* If there is no heart beat outstanding, issue a heartbeat command */
if (phba->cfg_enable_hba_heartbeat) {
if (!phba->hb_outstanding) {
- pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
- if (!pmboxq) {
- mod_timer(&phba->hb_tmofunc,
- jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
- return;
- }
+ if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
+ (list_empty(&psli->mboxq))) {
+ pmboxq = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!pmboxq) {
+ mod_timer(&phba->hb_tmofunc,
+ jiffies +
+ HZ * LPFC_HB_MBOX_INTERVAL);
+ return;
+ }
- lpfc_heart_beat(phba, pmboxq);
- pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
- pmboxq->vport = phba->pport;
- retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ lpfc_heart_beat(phba, pmboxq);
+ pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
+ pmboxq->vport = phba->pport;
+ retval = lpfc_sli_issue_mbox(phba, pmboxq,
+ MBX_NOWAIT);
+
+ if (retval != MBX_BUSY &&
+ retval != MBX_SUCCESS) {
+ mempool_free(pmboxq,
+ phba->mbox_mem_pool);
+ mod_timer(&phba->hb_tmofunc,
+ jiffies +
+ HZ * LPFC_HB_MBOX_INTERVAL);
+ return;
+ }
+ phba->skipped_hb = 0;
+ phba->hb_outstanding = 1;
+ } else if (time_before_eq(phba->last_completion_time,
+ phba->skipped_hb)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2857 Last completion time not "
+ " updated in %d ms\n",
+ jiffies_to_msecs(jiffies
+ - phba->last_completion_time));
+ } else
+ phba->skipped_hb = jiffies;
- if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
- mempool_free(pmboxq, phba->mbox_mem_pool);
- mod_timer(&phba->hb_tmofunc,
- jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
- return;
- }
mod_timer(&phba->hb_tmofunc,
jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
- phba->hb_outstanding = 1;
return;
} else {
/*
@@ -3281,10 +3300,10 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
if (!ndlp)
return 0;
}
- if (phba->pport->port_state <= LPFC_FLOGI)
+ if (phba->pport->port_state < LPFC_FLOGI)
return NULL;
/* If virtual link is not yet instantiated ignore CVL */
- if (vport->port_state <= LPFC_FDISC)
+ if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC))
return NULL;
shost = lpfc_shost_from_vport(vport);
if (!shost)
@@ -3357,21 +3376,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
"evt_tag:x%x, fcf_index:x%x\n",
acqe_fcoe->event_tag,
acqe_fcoe->index);
- /* If the FCF discovery is in progress, do nothing. */
- spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & FCF_DISC_INPROGRESS) {
- spin_unlock_irq(&phba->hbalock);
- break;
- }
- /* If fast FCF failover rescan event is pending, do nothing */
- if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
- spin_unlock_irq(&phba->hbalock);
- break;
- }
- spin_unlock_irq(&phba->hbalock);
-
- if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
- !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+ if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
/*
* During period of FCF discovery, read the FCF
* table record indexed by the event to update
@@ -3385,13 +3390,26 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
acqe_fcoe->index);
rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
}
- /* If the FCF has been in discovered state, do nothing. */
+
+ /* If the FCF discovery is in progress, do nothing. */
spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & FCF_DISC_INPROGRESS) {
+ spin_unlock_irq(&phba->hbalock);
+ break;
+ }
+ /* If fast FCF failover rescan event is pending, do nothing */
+ if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
+ spin_unlock_irq(&phba->hbalock);
+ break;
+ }
+
+ /* If the FCF has been in discovered state, do nothing. */
if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
spin_unlock_irq(&phba->hbalock);
break;
}
spin_unlock_irq(&phba->hbalock);
+
/* Otherwise, scan the entire FCF table and re-discover SAN */
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2770 Start FCF table scan due to new FCF "
@@ -3417,13 +3435,9 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
"2549 FCF disconnected from network index 0x%x"
" tag 0x%x\n", acqe_fcoe->index,
acqe_fcoe->event_tag);
- /* If the event is not for currently used fcf do nothing */
- if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
- break;
- /* We request port to rediscover the entire FCF table for
- * a fast recovery from case that the current FCF record
- * is no longer valid if we are not in the middle of FCF
- * failover process already.
+ /*
+ * If we are in the middle of FCF failover process, clear
+ * the corresponding FCF bit in the roundrobin bitmap.
*/
spin_lock_irq(&phba->hbalock);
if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
@@ -3432,9 +3446,23 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
break;
}
+ spin_unlock_irq(&phba->hbalock);
+
+ /* If the event is not for currently used fcf do nothing */
+ if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
+ break;
+
+ /*
+ * Otherwise, request the port to rediscover the entire FCF
+ * table for a fast recovery from case that the current FCF
+ * is no longer valid as we are not in the middle of FCF
+ * failover process already.
+ */
+ spin_lock_irq(&phba->hbalock);
/* Mark the fast failover process in progress */
phba->fcf.fcf_flag |= FCF_DEAD_DISC;
spin_unlock_irq(&phba->hbalock);
+
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2771 Start FCF fast failover process due to "
"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
@@ -3454,12 +3482,16 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
* as a link down to FCF registration.
*/
lpfc_sli4_fcf_dead_failthrough(phba);
- } else
- /* Handling fast FCF failover to a DEAD FCF event
- * is considered equalivant to receiving CVL to all
- * vports.
+ } else {
+ /* Reset FCF roundrobin bmask for new discovery */
+ memset(phba->fcf.fcf_rr_bmask, 0,
+ sizeof(*phba->fcf.fcf_rr_bmask));
+ /*
+ * Handling fast FCF failover to a DEAD FCF event is
+ * considered equalivant to receiving CVL to all vports.
*/
lpfc_sli4_perform_all_vport_cvl(phba);
+ }
break;
case LPFC_FCOE_EVENT_TYPE_CVL:
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
@@ -3534,7 +3566,13 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
* the current registered FCF entry.
*/
lpfc_retry_pport_discovery(phba);
- }
+ } else
+ /*
+ * Reset FCF roundrobin bmask for new
+ * discovery.
+ */
+ memset(phba->fcf.fcf_rr_bmask, 0,
+ sizeof(*phba->fcf.fcf_rr_bmask));
}
break;
default:
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 9c2c7c7140c..0dfa310cd60 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -815,9 +815,15 @@ void
lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_hba *phba = vport->phba;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-
+ /*
+ * Set the re-reg VPI bit for f/w to update the MAC address.
+ */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
+ mb->un.varRegVpi.upd = 1;
mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
mb->un.varRegVpi.sid = vport->fc_myDID;
mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c818a725596..2e51aa6b45b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1325,7 +1325,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
pde5->reftag = reftag;
- /* Endian convertion if necessary for PDE5 */
+ /* Endianness conversion if necessary for PDE5 */
pde5->word0 = cpu_to_le32(pde5->word0);
pde5->reftag = cpu_to_le32(pde5->reftag);
@@ -1347,7 +1347,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde6_ai, pde6, 1);
bf_set(pde6_apptagval, pde6, apptagval);
- /* Endian convertion if necessary for PDE6 */
+ /* Endianness conversion if necessary for PDE6 */
pde6->word0 = cpu_to_le32(pde6->word0);
pde6->word1 = cpu_to_le32(pde6->word1);
pde6->word2 = cpu_to_le32(pde6->word2);
@@ -1459,7 +1459,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
pde5->reftag = reftag;
- /* Endian convertion if necessary for PDE5 */
+ /* Endianness conversion if necessary for PDE5 */
pde5->word0 = cpu_to_le32(pde5->word0);
pde5->reftag = cpu_to_le32(pde5->reftag);
@@ -1479,7 +1479,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde6_ai, pde6, 1);
bf_set(pde6_apptagval, pde6, apptagval);
- /* Endian convertion if necessary for PDE6 */
+ /* Endianness conversion if necessary for PDE6 */
pde6->word0 = cpu_to_le32(pde6->word0);
pde6->word1 = cpu_to_le32(pde6->word1);
pde6->word2 = cpu_to_le32(pde6->word2);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e758eae0d0f..fb8905f893f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1046,7 +1046,7 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
} else
spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0318 Failed to allocate IOTAG.last IOTAG is %d\n",
psli->last_iotag);
@@ -3914,7 +3914,8 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
LPFC_SLI3_HBQ_ENABLED |
LPFC_SLI3_CRP_ENABLED |
- LPFC_SLI3_BG_ENABLED);
+ LPFC_SLI3_BG_ENABLED |
+ LPFC_SLI3_DSS_ENABLED);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0442 Adapter failed to init, mbxCmd x%x "
@@ -3949,8 +3950,23 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
} else
phba->max_vpi = 0;
- if (pmb->u.mb.un.varCfgPort.gdss)
+ phba->fips_level = 0;
+ phba->fips_spec_rev = 0;
+ if (pmb->u.mb.un.varCfgPort.gdss) {
phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
+ phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
+ phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2850 Security Crypto Active. FIPS x%d "
+ "(Spec Rev: x%d)",
+ phba->fips_level, phba->fips_spec_rev);
+ }
+ if (pmb->u.mb.un.varCfgPort.sec_err) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2856 Config Port Security Crypto "
+ "Error: x%x ",
+ pmb->u.mb.un.varCfgPort.sec_err);
+ }
if (pmb->u.mb.un.varCfgPort.gerbm)
phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
if (pmb->u.mb.un.varCfgPort.gcrp)
@@ -9040,6 +9056,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
switch (bf_get(lpfc_cqe_code, &cqevt)) {
case CQE_CODE_COMPL_WQE:
/* Process the WQ/RQ complete event */
+ phba->last_completion_time = jiffies;
workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
(struct lpfc_wcqe_complete *)&cqevt);
break;
@@ -9050,11 +9067,13 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
break;
case CQE_CODE_XRI_ABORTED:
/* Process the WQ XRI abort event */
+ phba->last_completion_time = jiffies;
workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
(struct sli4_wcqe_xri_aborted *)&cqevt);
break;
case CQE_CODE_RECEIVE:
/* Process the RQ event */
+ phba->last_completion_time = jiffies;
workposted = lpfc_sli4_sp_handle_rcqe(phba,
(struct lpfc_rcqe *)&cqevt);
break;
@@ -9276,7 +9295,6 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
{
struct lpfc_wcqe_release wcqe;
bool workposted = false;
- unsigned long iflag;
/* Copy the work queue CQE and convert endian order if needed */
lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
@@ -9285,9 +9303,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
case CQE_CODE_COMPL_WQE:
/* Process the WQ complete event */
- spin_lock_irqsave(&phba->hbalock, iflag);
phba->last_completion_time = jiffies;
- spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_sli4_fp_handle_fcp_wcqe(phba,
(struct lpfc_wcqe_complete *)&wcqe);
break;
@@ -9298,6 +9314,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
break;
case CQE_CODE_XRI_ABORTED:
/* Process the WQ XRI abort event */
+ phba->last_completion_time = jiffies;
workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
(struct sli4_wcqe_xri_aborted *)&wcqe);
break;
@@ -12278,12 +12295,9 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
spin_lock_irq(&phba->hbalock);
phba->hba_flag |= FCF_DISC_INPROGRESS;
spin_unlock_irq(&phba->hbalock);
- /* Reset FCF round robin index bmask for new scan */
- if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) {
- memset(phba->fcf.fcf_rr_bmask, 0,
- sizeof(*phba->fcf.fcf_rr_bmask));
+ /* Reset eligible FCF count for new scan */
+ if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
phba->fcf.eligible_fcf_cnt = 0;
- }
error = 0;
}
fail_fcf_scan:
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index d28830af71d..61afb3420a9 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.15"
+#define LPFC_DRIVER_VERSION "8.3.16"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index ee4b6914667..e88bbdde49c 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -716,7 +716,7 @@ static int _osd_req_list_objects(struct osd_request *or,
return PTR_ERR(bio);
}
- bio->bi_rw &= ~(1 << BIO_RW);
+ bio->bi_rw &= ~REQ_WRITE;
or->in.bio = bio;
or->in.total_bytes = bio->bi_size;
return 0;
@@ -814,7 +814,7 @@ void osd_req_write(struct osd_request *or,
{
_osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
WARN_ON(or->out.bio || or->out.total_bytes);
- WARN_ON(0 == bio_rw_flagged(bio, BIO_RW));
+ WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
or->out.bio = bio;
or->out.total_bytes = len;
}
@@ -829,7 +829,7 @@ int osd_req_write_kern(struct osd_request *or,
if (IS_ERR(bio))
return PTR_ERR(bio);
- bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
+ bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
osd_req_write(or, obj, offset, bio, len);
return 0;
}
@@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or,
{
_osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
WARN_ON(or->in.bio || or->in.total_bytes);
- WARN_ON(1 == bio_rw_flagged(bio, BIO_RW));
+ WARN_ON(bio->bi_rw & REQ_WRITE);
or->in.bio = bio;
or->in.total_bytes = len;
}
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index d64b7178fa0..278b352ae78 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -5868,7 +5868,8 @@ static int osst_probe(struct device *dev)
}
/* find a free minor number */
- for (i=0; os_scsi_tapes[i] && i<osst_max_dev; i++);
+ for (i = 0; i < osst_max_dev && os_scsi_tapes[i]; i++)
+ ;
if(i >= osst_max_dev) panic ("Scsi_devices corrupt (osst)");
dev_num = i;
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 9d70aef9922..61f49bdcc0c 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -49,7 +49,6 @@
#include <scsi/scsi_host.h>
#include "aha152x.h"
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -101,9 +100,8 @@ static int aha152x_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = info;
- link->io.NumPorts1 = 0x20;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 10;
+ link->resource[0]->end = 0x20;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
@@ -131,15 +129,16 @@ static int aha152x_config_check(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
+ p_dev->io_lines = 10;
/* For New Media T&J, look for a SCSI window */
if (cfg->io.win[0].len >= 0x20)
- p_dev->io.BasePort1 = cfg->io.win[0].base;
+ p_dev->resource[0]->start = cfg->io.win[0].base;
else if ((cfg->io.nwin > 1) &&
(cfg->io.win[1].len >= 0x20))
- p_dev->io.BasePort1 = cfg->io.win[1].base;
+ p_dev->resource[0]->start = cfg->io.win[1].base;
if ((cfg->io.nwin > 0) &&
- (p_dev->io.BasePort1 < 0xffff)) {
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ (p_dev->resource[0]->start < 0xffff)) {
+ if (!pcmcia_request_io(p_dev))
return 0;
}
return -EINVAL;
@@ -168,7 +167,7 @@ static int aha152x_config_cs(struct pcmcia_device *link)
/* Set configuration options for the aha152x driver */
memset(&s, 0, sizeof(s));
s.conf = "PCMCIA setup";
- s.io_port = link->io.BasePort1;
+ s.io_port = link->resource[0]->start;
s.irq = link->irq;
s.scsiid = host_id;
s.reconnect = reconnect;
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 21b141151df..13dbe5c4849 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -46,7 +46,6 @@
#include <scsi/scsi_host.h>
#include "fdomain.h"
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -84,9 +83,8 @@ static int fdomain_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = info;
- link->io.NumPorts1 = 0x10;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 10;
+ link->resource[0]->end = 0x10;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
@@ -113,8 +111,9 @@ static int fdomain_config_check(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- p_dev->io.BasePort1 = cfg->io.win[0].base;
- return pcmcia_request_io(p_dev, &p_dev->io);
+ p_dev->io_lines = 10;
+ p_dev->resource[0]->start = cfg->io.win[0].base;
+ return pcmcia_request_io(p_dev);
}
@@ -138,10 +137,10 @@ static int fdomain_config(struct pcmcia_device *link)
goto failed;
/* A bad hack... */
- release_region(link->io.BasePort1, link->io.NumPorts1);
+ release_region(link->resource[0]->start, resource_size(link->resource[0]));
/* Set configuration options for the fdomain driver */
- sprintf(str, "%d,%d", link->io.BasePort1, link->irq);
+ sprintf(str, "%d,%d", (unsigned int) link->resource[0]->start, link->irq);
fdomain_setup(str);
host = __fdomain_16x0_detect(&fdomain_driver_template);
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 0f0e112c3f8..dd9b40306f3 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -47,7 +47,6 @@
#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -1559,9 +1558,8 @@ static int nsp_cs_probe(struct pcmcia_device *link)
nsp_dbg(NSP_DEBUG_INIT, "info=0x%p", info);
/* The io structure describes IO port mapping */
- link->io.NumPorts1 = 0x10;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 10; /* not used */
+ link->resource[0]->end = 0x10;
+ link->resource[0]->flags = IO_DATA_PATH_WIDTH_AUTO;
/* General socket configuration */
link->conf.Attributes = CONF_ENABLE_IRQ;
@@ -1642,29 +1640,27 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags =
+ p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
+ if (pcmcia_request_io(p_dev) != 0)
goto next_entry;
}
if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
- memreq_t map;
cistpl_mem_t *mem =
(cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
cfg_mem->req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM;
@@ -1676,8 +1672,8 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev,
cfg_mem->req.AccessSpeed = 0;
if (pcmcia_request_window(p_dev, &cfg_mem->req, &p_dev->win) != 0)
goto next_entry;
- map.Page = 0; map.CardOffset = mem->win[0].card_addr;
- if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0)
+ if (pcmcia_map_mem_page(p_dev, p_dev->win,
+ mem->win[0].card_addr) != 0)
goto next_entry;
cfg_mem->data->MmioAddress = (unsigned long) ioremap_nocache(cfg_mem->req.Base, cfg_mem->req.Size);
@@ -1720,17 +1716,19 @@ static int nsp_cs_config(struct pcmcia_device *link)
goto cs_failed;
if (free_ports) {
- if (link->io.BasePort1) {
- release_region(link->io.BasePort1, link->io.NumPorts1);
+ if (link->resource[0]) {
+ release_region(link->resource[0]->start,
+ resource_size(link->resource[0]));
}
- if (link->io.BasePort2) {
- release_region(link->io.BasePort2, link->io.NumPorts2);
+ if (link->resource[1]) {
+ release_region(link->resource[1]->start,
+ resource_size(link->resource[1]));
}
}
/* Set port and IRQ */
- data->BaseAddress = link->io.BasePort1;
- data->NumAddress = link->io.NumPorts1;
+ data->BaseAddress = link->resource[0]->start;
+ data->NumAddress = resource_size(link->resource[0]);
data->IrqNumber = link->irq;
nsp_dbg(NSP_DEBUG_INIT, "I/O[0x%x+0x%x] IRQ %d",
@@ -1765,13 +1763,10 @@ static int nsp_cs_config(struct pcmcia_device *link)
if (link->conf.Attributes & CONF_ENABLE_IRQ) {
printk(", irq %d", link->irq);
}
- if (link->io.NumPorts1) {
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1+link->io.NumPorts1-1);
- }
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2+link->io.NumPorts2-1);
+ if (link->resource[0])
+ printk(", io %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(" & %pR", link->resource[1]);
if (link->win)
printk(", mem 0x%06lx-0x%06lx", cfg_mem->req.Base,
cfg_mem->req.Base+cfg_mem->req.Size-1);
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index f0fc6baed9f..eb775f1a523 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -48,7 +48,6 @@
#include <scsi/scsi_host.h>
#include "../qlogicfas408.h"
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -157,9 +156,8 @@ static int qlogic_probe(struct pcmcia_device *link)
return -ENOMEM;
info->p_dev = link;
link->priv = info;
- link->io.NumPorts1 = 16;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 10;
+ link->resource[0]->end = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
@@ -186,13 +184,14 @@ static int qlogic_config_check(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- p_dev->io.BasePort1 = cfg->io.win[0].base;
- p_dev->io.NumPorts1 = cfg->io.win[0].len;
+ p_dev->io_lines = 10;
+ p_dev->resource[0]->start = cfg->io.win[0].base;
+ p_dev->resource[0]->end = cfg->io.win[0].len;
- if (p_dev->io.BasePort1 == 0)
+ if (p_dev->resource[0]->start == 0)
return -ENODEV;
- return pcmcia_request_io(p_dev, &p_dev->io);
+ return pcmcia_request_io(p_dev);
}
static int qlogic_config(struct pcmcia_device * link)
@@ -216,18 +215,18 @@ static int qlogic_config(struct pcmcia_device * link)
if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) {
/* set ATAcmd */
- outb(0xb4, link->io.BasePort1 + 0xd);
- outb(0x24, link->io.BasePort1 + 0x9);
- outb(0x04, link->io.BasePort1 + 0xd);
+ outb(0xb4, link->resource[0]->start + 0xd);
+ outb(0x24, link->resource[0]->start + 0x9);
+ outb(0x04, link->resource[0]->start + 0xd);
}
/* The KXL-810AN has a bigger IO port window */
- if (link->io.NumPorts1 == 32)
+ if (resource_size(link->resource[0]) == 32)
host = qlogic_detect(&qlogicfas_driver_template, link,
- link->io.BasePort1 + 16, link->irq);
+ link->resource[0]->start + 16, link->irq);
else
host = qlogic_detect(&qlogicfas_driver_template, link,
- link->io.BasePort1, link->irq);
+ link->resource[0]->start, link->irq);
if (!host) {
printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name);
@@ -269,9 +268,9 @@ static int qlogic_resume(struct pcmcia_device *link)
if ((info->manf_id == MANFID_MACNICA) ||
(info->manf_id == MANFID_PIONEER) ||
(info->manf_id == 0x0098)) {
- outb(0x80, link->io.BasePort1 + 0xd);
- outb(0x24, link->io.BasePort1 + 0x9);
- outb(0x04, link->io.BasePort1 + 0xd);
+ outb(0x80, link->resource[0]->start + 0xd);
+ outb(0x24, link->resource[0]->start + 0x9);
+ outb(0x04, link->resource[0]->start + 0xd);
}
/* Ugggglllyyyy!!! */
qlogicfas408_bus_reset(NULL);
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index a5116417117..321e390c912 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -71,7 +71,6 @@
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -691,13 +690,14 @@ static int SYM53C500_config_check(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- p_dev->io.BasePort1 = cfg->io.win[0].base;
- p_dev->io.NumPorts1 = cfg->io.win[0].len;
+ p_dev->io_lines = 10;
+ p_dev->resource[0]->start = cfg->io.win[0].base;
+ p_dev->resource[0]->end = cfg->io.win[0].len;
- if (p_dev->io.BasePort1 == 0)
+ if (p_dev->resource[0]->start == 0)
return -ENODEV;
- return pcmcia_request_io(p_dev, &p_dev->io);
+ return pcmcia_request_io(p_dev);
}
static int
@@ -734,9 +734,9 @@ SYM53C500_config(struct pcmcia_device *link)
(info->manf_id == MANFID_PIONEER) ||
(info->manf_id == 0x0098)) {
/* set ATAcmd */
- outb(0xb4, link->io.BasePort1 + 0xd);
- outb(0x24, link->io.BasePort1 + 0x9);
- outb(0x04, link->io.BasePort1 + 0xd);
+ outb(0xb4, link->resource[0]->start + 0xd);
+ outb(0x24, link->resource[0]->start + 0x9);
+ outb(0x04, link->resource[0]->start + 0xd);
}
/*
@@ -749,7 +749,7 @@ SYM53C500_config(struct pcmcia_device *link)
* 0x130, 0x230, 0x280, 0x290,
* 0x320, 0x330, 0x340, 0x350
*/
- port_base = link->io.BasePort1;
+ port_base = link->resource[0]->start;
irq_level = link->irq;
DEB(printk("SYM53C500: port_base=0x%x, irq=%d, fast_pio=%d\n",
@@ -822,15 +822,15 @@ static int sym53c500_resume(struct pcmcia_device *link)
if ((info->manf_id == MANFID_MACNICA) ||
(info->manf_id == MANFID_PIONEER) ||
(info->manf_id == 0x0098)) {
- outb(0x80, link->io.BasePort1 + 0xd);
- outb(0x24, link->io.BasePort1 + 0x9);
- outb(0x04, link->io.BasePort1 + 0xd);
+ outb(0x80, link->resource[0]->start + 0xd);
+ outb(0x24, link->resource[0]->start + 0x9);
+ outb(0x04, link->resource[0]->start + 0xd);
}
/*
* If things don't work after a "resume",
* this is a good place to start looking.
*/
- SYM53C500_int_host_reset(link->io.BasePort1);
+ SYM53C500_int_host_reset(link->resource[0]->start);
return 0;
}
@@ -859,9 +859,8 @@ SYM53C500_probe(struct pcmcia_device *link)
return -ENOMEM;
info->p_dev = link;
link->priv = info;
- link->io.NumPorts1 = 16;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 10;
+ link->resource[0]->end = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 58d1134935e..9793aa6afb1 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4199,8 +4199,10 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&nvmd_req, 0, sizeof(nvmd_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
+ if (rc) {
+ kfree(fw_control_context);
return rc;
+ }
ccb = &pm8001_ha->ccb_info[tag];
ccb->ccb_tag = tag;
ccb->fw_control_context = fw_control_context;
@@ -4276,8 +4278,10 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
ioctl_payload->length);
memset(&nvmd_req, 0, sizeof(nvmd_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
+ if (rc) {
+ kfree(fw_control_context);
return rc;
+ }
ccb = &pm8001_ha->ccb_info[tag];
ccb->fw_control_context = fw_control_context;
ccb->ccb_tag = tag;
@@ -4387,6 +4391,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
fw_control->len, 0) != 0) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Mem alloc failure\n"));
+ kfree(fw_control_context);
return -ENOMEM;
}
}
@@ -4401,8 +4406,10 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
fw_control_context->virtAddr = buffer;
fw_control_context->len = fw_control->len;
rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
+ if (rc) {
+ kfree(fw_control_context);
return rc;
+ }
ccb = &pm8001_ha->ccb_info[tag];
ccb->fw_control_context = fw_control_context;
ccb->ccb_tag = tag;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 420238cc794..114bc5a8117 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1838,26 +1838,33 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
qla24xx_disable_vp(vha);
+ vha->flags.delete_progress = 1;
+
fc_remove_host(vha->host);
scsi_remove_host(vha->host);
- qla2x00_free_fcports(vha);
+ if (vha->timer_active) {
+ qla2x00_vp_stop_timer(vha);
+ DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
+ " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
+ }
qla24xx_deallocate_vp_id(vha);
+ /* No pending activities shall be there on the vha now */
+ DEBUG(msleep(random32()%10)); /* Just to see if something falls on
+ * the net we have placed below */
+
+ BUG_ON(atomic_read(&vha->vref_count));
+
+ qla2x00_free_fcports(vha);
+
mutex_lock(&ha->vport_lock);
ha->cur_vport_count--;
clear_bit(vha->vp_idx, ha->vp_idx_map);
mutex_unlock(&ha->vport_lock);
- if (vha->timer_active) {
- qla2x00_vp_stop_timer(vha);
- DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
- "has stopped\n",
- vha->host_no, vha->vp_idx, vha));
- }
-
if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
qla_printk(KERN_WARNING, ha,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 6cfc28a25eb..b74e6b5743d 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -29,8 +29,6 @@
/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
-/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */
-
/*
* Macros use for debugging the driver.
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 3a432ea0c7a..d2a4e153070 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2641,6 +2641,7 @@ struct qla_hw_data {
#define MBX_UPDATE_FLASH_ACTIVE 3
struct mutex vport_lock; /* Virtual port synchronization */
+ spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
struct completion mbx_cmd_comp; /* Serialize mbx access */
struct completion mbx_intr_comp; /* Used for completion notification */
struct completion dcbx_comp; /* For set port config notification */
@@ -2828,6 +2829,7 @@ typedef struct scsi_qla_host {
uint32_t management_server_logged_in :1;
uint32_t process_response_queue :1;
uint32_t difdix_supported:1;
+ uint32_t delete_progress:1;
} flags;
atomic_t loop_state;
@@ -2922,6 +2924,8 @@ typedef struct scsi_qla_host {
struct req_que *req;
int fw_heartbeat_counter;
int seconds_since_last_heartbeat;
+
+ atomic_t vref_count;
} scsi_qla_host_t;
/*
@@ -2932,6 +2936,22 @@ typedef struct scsi_qla_host {
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
atomic_read(&ha->loop_state) == LOOP_DOWN)
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
+ atomic_inc(&__vha->vref_count); \
+ mb(); \
+ if (__vha->flags.delete_progress) { \
+ atomic_dec(&__vha->vref_count); \
+ __bail = 1; \
+ } else { \
+ __bail = 0; \
+ } \
+} while (0)
+
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
+ atomic_dec(&__vha->vref_count); \
+} while (0)
+
+
#define qla_printk(level, ha, format, arg...) \
dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d863ed2619b..9c383baebe2 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -69,21 +69,29 @@ qla2x00_ctx_sp_free(srb_t *sp)
{
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *iocb = ctx->u.iocb_cmd;
+ struct scsi_qla_host *vha = sp->fcport->vha;
del_timer_sync(&iocb->timer);
kfree(iocb);
kfree(ctx);
mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
+
+ QLA_VHA_MARK_NOT_BUSY(vha);
}
inline srb_t *
qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
unsigned long tmo)
{
- srb_t *sp;
+ srb_t *sp = NULL;
struct qla_hw_data *ha = vha->hw;
struct srb_ctx *ctx;
struct srb_iocb *iocb;
+ uint8_t bail;
+
+ QLA_VHA_MARK_BUSY(vha, bail);
+ if (bail)
+ return NULL;
sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
if (!sp)
@@ -116,6 +124,8 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
iocb->timer.function = qla2x00_ctx_sp_timeout;
add_timer(&iocb->timer);
done:
+ if (!sp)
+ QLA_VHA_MARK_NOT_BUSY(vha);
return sp;
}
@@ -1777,11 +1787,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
qla2x00_init_response_q_entries(rsp);
}
+ spin_lock_irqsave(&ha->vport_slock, flags);
/* Clear RSCN queue. */
list_for_each_entry(vp, &ha->vp_list, list) {
vp->rscn_in_ptr = 0;
vp->rscn_out_ptr = 0;
}
+
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
ha->isp_ops->config_rings(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3218,12 +3232,17 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
/* Bypass virtual ports of the same host. */
found = 0;
if (ha->num_vhosts) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
if (new_fcport->d_id.b24 == vp->d_id.b24) {
found = 1;
break;
}
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
if (found)
continue;
}
@@ -3343,6 +3362,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp;
struct scsi_qla_host *tvp;
+ unsigned long flags = 0;
rval = QLA_SUCCESS;
@@ -3367,6 +3387,8 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
/* Check for loop ID being already in use. */
found = 0;
fcport = NULL;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
list_for_each_entry(fcport, &vp->vp_fcports, list) {
if (fcport->loop_id == dev->loop_id &&
@@ -3379,6 +3401,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
if (found)
break;
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
/* If not in use then it is free to use. */
if (!found) {
@@ -3791,14 +3814,27 @@ void
qla2x00_update_fcports(scsi_qla_host_t *base_vha)
{
fc_port_t *fcport;
- struct scsi_qla_host *tvp, *vha;
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha = base_vha->hw;
+ unsigned long flags;
+ spin_lock_irqsave(&ha->vport_slock, flags);
/* Go with deferred removal of rport references. */
- list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list)
- list_for_each_entry(fcport, &vha->vp_fcports, list)
+ list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
+ atomic_inc(&vha->vref_count);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport && fcport->drport &&
- atomic_read(&fcport->state) != FCS_UNCONFIGURED)
+ atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_rport_del(fcport);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ }
+ }
+ atomic_dec(&vha->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
void
@@ -3806,7 +3842,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
- struct scsi_qla_host *tvp;
+ unsigned long flags;
vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
@@ -3824,8 +3860,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
qla2x00_mark_all_devices_lost(vha, 0);
- list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list)
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &base_vha->hw->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_mark_all_devices_lost(vp, 0);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
@@ -3862,8 +3908,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
uint8_t status = 0;
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp;
- struct scsi_qla_host *tvp;
struct req_que *req = ha->req_q_map[0];
+ unsigned long flags;
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
@@ -3970,10 +4016,21 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
DEBUG(printk(KERN_INFO
"qla2x00_abort_isp(%ld): succeeded.\n",
vha->host_no));
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- if (vp->vp_idx)
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_vp_abort_isp(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
} else {
qla_printk(KERN_INFO, ha,
"qla2x00_abort_isp: **** FAILED ****\n");
@@ -5185,7 +5242,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
struct scsi_qla_host *vp;
- struct scsi_qla_host *tvp;
+ unsigned long flags;
status = qla2x00_init_rings(vha);
if (!status) {
@@ -5272,10 +5329,21 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
DEBUG(printk(KERN_INFO
"qla82xx_restart_isp(%ld): succeeded.\n",
vha->host_no));
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- if (vp->vp_idx)
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_vp_abort_isp(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
} else {
qla_printk(KERN_INFO, ha,
"qla82xx_restart_isp: **** FAILED ****\n");
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6982ba70e12..28f65be19da 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1706,19 +1706,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
cp->result = DID_ERROR << 16;
break;
}
- } else if (!lscsi_status) {
+ } else {
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
"of 0x%x bytes).\n", vha->host_no, cp->device->id,
cp->device->lun, resid, scsi_bufflen(cp)));
- cp->result = DID_ERROR << 16;
- break;
+ cp->result = DID_ERROR << 16 | lscsi_status;
+ goto check_scsi_status;
}
cp->result = DID_OK << 16 | lscsi_status;
logit = 0;
+check_scsi_status:
/*
* Check to see if SCSI Status is non zero. If so report SCSI
* Status.
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 6009b0c6948..a595ec8264f 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2913,7 +2913,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *vp;
- scsi_qla_host_t *tvp;
+ unsigned long flags;
if (rptid_entry->entry_status != 0)
return;
@@ -2945,9 +2945,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
return;
}
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list)
if (vp_idx == vp->vp_idx)
break;
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
if (!vp)
return;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 987c5b0ca78..2b69392a71a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -30,6 +30,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
{
uint32_t vp_id;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
/* Find an empty slot and assign an vp_id */
mutex_lock(&ha->vport_lock);
@@ -44,7 +45,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
set_bit(vp_id, ha->vp_idx_map);
ha->num_vhosts++;
vha->vp_idx = vp_id;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
mutex_unlock(&ha->vport_lock);
return vp_id;
}
@@ -54,12 +59,31 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
{
uint16_t vp_id;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
mutex_lock(&ha->vport_lock);
+ /*
+ * Wait for all pending activities to finish before removing vport from
+ * the list.
+ * Lock needs to be held for safe removal from the list (it
+ * ensures no active vp_list traversal while the vport is removed
+ * from the queue)
+ */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ while (atomic_read(&vha->vref_count)) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ msleep(500);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ }
+ list_del(&vha->list);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
vp_id = vha->vp_idx;
ha->num_vhosts--;
clear_bit(vp_id, ha->vp_idx_map);
- list_del(&vha->list);
+
mutex_unlock(&ha->vport_lock);
}
@@ -68,12 +92,17 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
{
scsi_qla_host_t *vha;
struct scsi_qla_host *tvha;
+ unsigned long flags;
+ spin_lock_irqsave(&ha->vport_slock, flags);
/* Locate matching device in database. */
list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
- if (!memcmp(port_name, vha->port_name, WWN_SIZE))
+ if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
return vha;
+ }
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
return NULL;
}
@@ -93,6 +122,12 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
static void
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
+ /*
+ * !!! NOTE !!!
+ * This function, if called in contexts other than vp create, disable
+ * or delete, please make sure this is synchronized with the
+ * delete thread.
+ */
fc_port_t *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -100,7 +135,6 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
"loop_id=0x%04x :%x\n",
vha->host_no, fcport->loop_id, fcport->vp_idx));
- atomic_set(&fcport->state, FCS_DEVICE_DEAD);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
atomic_set(&fcport->state, FCS_UNCONFIGURED);
}
@@ -194,12 +228,17 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
void
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
{
- scsi_qla_host_t *vha, *tvha;
+ scsi_qla_host_t *vha;
struct qla_hw_data *ha = rsp->hw;
int i = 0;
+ unsigned long flags;
- list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vha, &ha->vp_list, list) {
if (vha->vp_idx) {
+ atomic_inc(&vha->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
switch (mb[0]) {
case MBA_LIP_OCCURRED:
case MBA_LOOP_UP:
@@ -215,9 +254,13 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
qla2x00_async_event(vha, rsp, mb);
break;
}
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vha->vref_count);
}
i++;
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
int
@@ -297,7 +340,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
int ret;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *vp;
- struct scsi_qla_host *tvp;
+ unsigned long flags = 0;
if (vha->vp_idx)
return;
@@ -309,10 +352,19 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
if (!(ha->current_topology & ISP_CFG_F))
return;
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- if (vp->vp_idx)
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
ret = qla2x00_do_dpc_vp(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
int
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 915b77a6e19..0a71cc71eab 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2672,6 +2672,19 @@ qla82xx_start_scsi(srb_t *sp)
sufficient_dsds:
req_cnt = 1;
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ &reg->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
if (!sp->ctx) {
DEBUG(printk(KERN_INFO
@@ -3307,16 +3320,19 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
qla2xxx_wake_dpc(vha);
+ ha->flags.fw_hung = 1;
if (ha->flags.mbox_busy) {
- ha->flags.fw_hung = 1;
ha->flags.mbox_int = 1;
DEBUG2(qla_printk(KERN_ERR, ha,
- "Due to fw hung, doing premature "
- "completion of mbx command\n"));
- complete(&ha->mbx_intr_comp);
+ "Due to fw hung, doing premature "
+ "completion of mbx command\n"));
+ if (test_bit(MBX_INTR_WAIT,
+ &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
}
}
- }
+ } else
+ vha->seconds_since_last_heartbeat = 0;
vha->fw_heartbeat_counter = fw_heartbeat_counter;
}
@@ -3418,13 +3434,15 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
"%s(): Adapter reset needed!\n", __func__);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
+ ha->flags.fw_hung = 1;
if (ha->flags.mbox_busy) {
- ha->flags.fw_hung = 1;
ha->flags.mbox_int = 1;
DEBUG2(qla_printk(KERN_ERR, ha,
- "Need reset, doing premature "
- "completion of mbx command\n"));
- complete(&ha->mbx_intr_comp);
+ "Need reset, doing premature "
+ "completion of mbx command\n"));
+ if (test_bit(MBX_INTR_WAIT,
+ &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
}
} else {
qla82xx_check_fw_alive(vha);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ff2172da7c1..1e4bff69525 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -115,8 +115,8 @@ int ql2xmaxqueues = 1;
module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xmaxqueues,
"Enables MQ settings "
- "Default is 1 for single queue. Set it to number \
- of queues in MQ mode.");
+ "Default is 1 for single queue. Set it to number "
+ "of queues in MQ mode.");
int ql2xmultique_tag;
module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
@@ -2341,16 +2341,28 @@ probe_out:
static void
qla2x00_remove_one(struct pci_dev *pdev)
{
- scsi_qla_host_t *base_vha, *vha, *temp;
+ scsi_qla_host_t *base_vha, *vha;
struct qla_hw_data *ha;
+ unsigned long flags;
base_vha = pci_get_drvdata(pdev);
ha = base_vha->hw;
- list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
- if (vha && vha->fc_vport)
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vha, &ha->vp_list, list) {
+ atomic_inc(&vha->vref_count);
+
+ if (vha && vha->fc_vport) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
fc_vport_terminate(vha->fc_vport);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ }
+
+ atomic_dec(&vha->vref_count);
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
set_bit(UNLOADING, &base_vha->dpc_flags);
@@ -2975,10 +2987,17 @@ static struct qla_work_evt *
qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
{
struct qla_work_evt *e;
+ uint8_t bail;
+
+ QLA_VHA_MARK_BUSY(vha, bail);
+ if (bail)
+ return NULL;
e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
- if (!e)
+ if (!e) {
+ QLA_VHA_MARK_NOT_BUSY(vha);
return NULL;
+ }
INIT_LIST_HEAD(&e->list);
e->type = type;
@@ -3135,6 +3154,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
+
+ /* For each work completed decrement vha ref count */
+ QLA_VHA_MARK_NOT_BUSY(vha);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e75ccb91317..8edbccb3232 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.03-k0"
+#define QLA2XXX_VERSION "8.03.04-k0"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
-#define QLA_DRIVER_PATCH_VER 3
+#define QLA_DRIVER_PATCH_VER 4
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index a79da8dd206..9dc0a6616ed 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -36,6 +36,24 @@
#include "ql4_dbg.h"
#include "ql4_nx.h"
+#if defined(CONFIG_PCIEAER)
+#include <linux/aer.h>
+#else
+/* AER releated */
+static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
+{
+ return -EINVAL;
+}
+static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
+{
+ return -EINVAL;
+}
+static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+{
+ return -EINVAL;
+}
+#endif
+
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
#endif
@@ -137,6 +155,9 @@
#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */
#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
+#define QL4_SESS_RECOVERY_TMO 30 /* iSCSI session */
+ /* recovery timeout */
+
#define LSDW(x) ((u32)((u64)(x)))
#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
@@ -249,7 +270,6 @@ struct ddb_entry {
uint32_t default_time2wait; /* Default Min time between
* relogins (+aens) */
- atomic_t port_down_timer; /* Device connection timer */
atomic_t retry_relogin_timer; /* Min Time between relogins
* (4000 only) */
atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
@@ -378,7 +398,9 @@ struct scsi_qla_host {
#define AF_MSI_ENABLED 16 /* 0x00010000 */
#define AF_MSIX_ENABLED 17 /* 0x00020000 */
#define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */
-
+#define AF_FW_RECOVERY 19 /* 0x00080000 */
+#define AF_EEH_BUSY 20 /* 0x00100000 */
+#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
unsigned long dpc_flags;
@@ -474,7 +496,6 @@ struct scsi_qla_host {
uint32_t timer_active;
/* Recovery Timers */
- uint32_t port_down_retry_count;
uint32_t discovery_wait;
atomic_t check_relogin_timeouts;
uint32_t retry_reset_ha_cnt;
@@ -615,6 +636,15 @@ static inline int is_qla8022(struct scsi_qla_host *ha)
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
}
+/* Note: Currently AER/EEH is now supported only for 8022 cards
+ * This function needs to be updated when AER/EEH is enabled
+ * for other cards.
+ */
+static inline int is_aer_supported(struct scsi_qla_host *ha)
+{
+ return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
+}
+
static inline int adapter_up(struct scsi_qla_host *ha)
{
return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index c94c9ddfb3a..0336c6db8cb 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -673,17 +673,17 @@ struct flash_sys_info {
}; /* 200 */
struct mbx_sys_info {
- uint8_t board_id_str[16]; /* Keep board ID string first */
- /* in this structure for GUI. */
- uint16_t board_id; /* board ID code */
- uint16_t phys_port_cnt; /* number of physical network ports */
- uint16_t port_num; /* network port for this PCI function */
+ uint8_t board_id_str[16]; /* 0-f Keep board ID string first */
+ /* in this structure for GUI. */
+ uint16_t board_id; /* 10-11 board ID code */
+ uint16_t phys_port_cnt; /* 12-13 number of physical network ports */
+ uint16_t port_num; /* 14-15 network port for this PCI function */
/* (port 0 is first port) */
- uint8_t mac_addr[6]; /* MAC address for this PCI function */
- uint32_t iscsi_pci_func_cnt; /* number of iSCSI PCI functions */
- uint32_t pci_func; /* this PCI function */
- unsigned char serial_number[16]; /* serial number string */
- uint8_t reserved[16];
+ uint8_t mac_addr[6]; /* 16-1b MAC address for this PCI function */
+ uint32_t iscsi_pci_func_cnt; /* 1c-1f number of iSCSI PCI functions */
+ uint32_t pci_func; /* 20-23 this PCI function */
+ unsigned char serial_number[16]; /* 24-33 serial number string */
+ uint8_t reserved[12]; /* 34-3f */
};
struct crash_record {
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index c9cd5d6db98..95a26fb1626 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -93,6 +93,7 @@ void qla4xxx_free_irqs(struct scsi_qla_host *ha);
void qla4xxx_process_response_queue(struct scsi_qla_host *ha);
void qla4xxx_wake_dpc(struct scsi_qla_host *ha);
void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha);
+void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha);
void qla4_8xxx_pci_config(struct scsi_qla_host *);
int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
@@ -131,6 +132,7 @@ void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha);
int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
+void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 30073577c3a..4c9be77ee70 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -308,7 +308,6 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
DEBUG2(printk("scsi%ld: %s: unable to get firmware "
"state\n", ha->host_no, __func__));
break;
-
}
if (ha->firmware_state & FW_STATE_ERROR) {
@@ -445,6 +444,16 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
{
int status = QLA_ERROR;
+ if (is_aer_supported(ha) &&
+ test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
+ return status;
+
+ /* For 82xx, stop firmware before initializing because if BIOS
+ * has previously initialized firmware, then driver's initialize
+ * firmware will fail. */
+ if (is_qla8022(ha))
+ qla4_8xxx_stop_firmware(ha);
+
ql4_printk(KERN_INFO, ha, "Initializing firmware..\n");
if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware "
@@ -669,7 +678,6 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
}
ddb_entry->fw_ddb_index = fw_ddb_index;
- atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
atomic_set(&ddb_entry->relogin_timer, 0);
atomic_set(&ddb_entry->relogin_retry_count, 0);
@@ -1556,8 +1564,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
/* Device is back online. */
if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
- atomic_set(&ddb_entry->port_down_timer,
- ha->port_down_retry_count);
atomic_set(&ddb_entry->relogin_retry_count, 0);
atomic_set(&ddb_entry->relogin_timer, 0);
clear_bit(DF_RELOGIN, &ddb_entry->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index f89973deac5..4ef9ba112ee 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -19,7 +19,7 @@ qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
/* Calculate number of free request entries. */
if ((req_cnt + 2) >= ha->req_q_count) {
- cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
+ cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha);
if (ha->request_in < cnt)
ha->req_q_count = cnt - ha->request_in;
else
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index aa65697a86b..2a1ab63f3eb 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -816,6 +816,9 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
unsigned long flags = 0;
uint8_t reqs_count = 0;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return IRQ_HANDLED;
+
ha->isr_count++;
status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
if (!(status & ha->nx_legacy_intr.int_vec_bit))
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 940ee561ee0..90021704d8c 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -39,6 +39,22 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
"pointer\n", ha->host_no, __func__));
return status;
}
+
+ if (is_qla8022(ha) &&
+ test_bit(AF_FW_RECOVERY, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: prematurely "
+ "completing mbx cmd as firmware recovery detected\n",
+ ha->host_no, __func__));
+ return status;
+ }
+
+ if ((is_aer_supported(ha)) &&
+ (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
+ DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
+ "timeout MBX Exiting.\n", ha->host_no, __func__));
+ return status;
+ }
+
/* Mailbox code active */
wait_count = MBOX_TOV * 100;
@@ -150,6 +166,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
if (time_after_eq(jiffies, wait_count))
break;
+
/*
* Service the interrupt.
* The ISR will save the mailbox status registers
@@ -196,6 +213,14 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
/* Check for mailbox timeout. */
if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
+ if (is_qla8022(ha) &&
+ test_bit(AF_FW_RECOVERY, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: prematurely completing mbx cmd as "
+ "firmware recovery detected\n",
+ ha->host_no, __func__));
+ goto mbox_exit;
+ }
DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...,"
" Scheduling Adapter Reset\n", ha->host_no,
mbx_cmd[0]));
@@ -246,6 +271,28 @@ mbox_exit:
return status;
}
+void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
+{
+ set_bit(AF_FW_RECOVERY, &ha->flags);
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: set FW RECOVERY!\n",
+ ha->host_no, __func__);
+
+ if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
+ if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) {
+ complete(&ha->mbx_intr_comp);
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
+ "recovery, doing premature completion of "
+ "mbx cmd\n", ha->host_no, __func__);
+
+ } else {
+ set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
+ "recovery, doing premature completion of "
+ "polling mbx cmd\n", ha->host_no, __func__);
+ }
+ }
+}
+
static uint8_t
qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
@@ -361,7 +408,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
/* Save Command Line Paramater info */
- ha->port_down_retry_count = le16_to_cpu(init_fw_cb->conn_ka_timeout);
ha->discovery_wait = ql4xdiscoverywait;
if (ha->acb_version == ACB_SUPPORTED) {
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 3e119ae7839..5d4a3822382 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1418,7 +1418,7 @@ static int qla4_8xxx_rcvpeg_ready(struct scsi_qla_host *ha)
return QLA_SUCCESS;
}
-static inline void
+void
qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
{
uint32_t drv_active;
@@ -1441,11 +1441,15 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
static inline int
qla4_8xxx_need_reset(struct scsi_qla_host *ha)
{
- uint32_t drv_state;
+ uint32_t drv_state, drv_active;
int rval;
+ drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
rval = drv_state & (1 << (ha->func_num * 4));
+ if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active)
+ rval = 1;
+
return rval;
}
@@ -1949,7 +1953,8 @@ qla4_8xxx_get_fdt_info(struct scsi_qla_host *ha)
uint16_t cnt, chksum;
uint16_t *wptr;
struct qla_fdt_layout *fdt;
- uint16_t mid, fid;
+ uint16_t mid = 0;
+ uint16_t fid = 0;
struct ql82xx_hw_data *hw = &ha->hw;
hw->flash_conf_off = FARX_ACCESS_FLASH_CONF;
@@ -2105,6 +2110,9 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
qla4_8xxx_clear_rst_ready(ha);
qla4_8xxx_idc_unlock(ha);
+ if (rval == QLA_SUCCESS)
+ clear_bit(AF_FW_RECOVERY, &ha->flags);
+
return rval;
}
@@ -2145,7 +2153,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
goto exit_validate_mac82;
}
- if (mbox_sts[4] < sizeof(*sys_info)) {
+ /* Make sure we receive the minimum required data to cache internally */
+ if (mbox_sts[4] < offsetof(struct mbx_sys_info, reserved)) {
DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
" error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
goto exit_validate_mac82;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 5529b2a3974..370d40ff152 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -163,10 +163,10 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
- DEBUG2(printk("scsi%ld: %s: ddb [%d] port down retry count "
+ DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout "
"of (%d) secs exhausted, marking device DEAD.\n",
ha->host_no, __func__, ddb_entry->fw_ddb_index,
- ha->port_down_retry_count));
+ QL4_SESS_RECOVERY_TMO));
qla4xxx_wake_dpc(ha);
}
@@ -298,7 +298,8 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
{
int err;
- ddb_entry->sess->recovery_tmo = ddb_entry->ha->port_down_retry_count;
+ ddb_entry->sess->recovery_tmo = QL4_SESS_RECOVERY_TMO;
+
err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index);
if (err) {
DEBUG2(printk(KERN_ERR "Could not add session.\n"));
@@ -474,6 +475,14 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
struct srb *srb;
int rval;
+ if (test_bit(AF_EEH_BUSY, &ha->flags)) {
+ if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
+ cmd->result = DID_NO_CONNECT << 16;
+ else
+ cmd->result = DID_REQUEUE << 16;
+ goto qc_fail_command;
+ }
+
if (!sess) {
cmd->result = DID_IMM_RETRY << 16;
goto qc_fail_command;
@@ -654,6 +663,13 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
uint32_t fw_heartbeat_counter, halt_status;
fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
+ if (fw_heartbeat_counter == 0xffffffff) {
+ DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
+ "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
+ ha->host_no, __func__));
+ return;
+ }
if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
ha->seconds_since_last_heartbeat++;
@@ -662,6 +678,7 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
ha->seconds_since_last_heartbeat = 0;
halt_status = qla4_8xxx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1);
+
/* Since we cannot change dev_state in interrupt
* context, set appropriate DPC flag then wakeup
* DPC */
@@ -673,6 +690,7 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
qla4xxx_wake_dpc(ha);
+ qla4xxx_mailbox_premature_completion(ha);
}
}
ha->fw_heartbeat_counter = fw_heartbeat_counter;
@@ -698,6 +716,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
ha->host_no, __func__);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
qla4xxx_wake_dpc(ha);
+ qla4xxx_mailbox_premature_completion(ha);
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
printk("scsi%ld: %s: HW State: NEED QUIES!\n",
@@ -719,6 +738,19 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
{
struct ddb_entry *ddb_entry, *dtemp;
int start_dpc = 0;
+ uint16_t w;
+
+ /* If we are in the middle of AER/EEH processing
+ * skip any processing and reschedule the timer
+ */
+ if (test_bit(AF_EEH_BUSY, &ha->flags)) {
+ mod_timer(&ha->timer, jiffies + HZ);
+ return;
+ }
+
+ /* Hardware read to trigger an EEH error during mailbox waits. */
+ if (!pci_channel_offline(ha->pdev))
+ pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s exited. HBA GOING AWAY\n",
@@ -1207,7 +1239,13 @@ static void qla4xxx_do_dpc(struct work_struct *work)
/* Initialization not yet finished. Don't do anything yet. */
if (!test_bit(AF_INIT_DONE, &ha->flags))
- return;
+ goto do_dpc_exit;
+
+ if (test_bit(AF_EEH_BUSY, &ha->flags)) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
+ ha->host_no, __func__, ha->flags));
+ goto do_dpc_exit;
+ }
/* HBA is in the process of being permanently disabled.
* Don't process anything */
@@ -1346,6 +1384,8 @@ dpc_post_reset_ha:
}
}
}
+
+do_dpc_exit:
clear_bit(AF_DPC_SCHEDULED, &ha->flags);
}
@@ -1612,6 +1652,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->host = host;
ha->host_no = host->host_no;
+ pci_enable_pcie_error_reporting(pdev);
+
/* Setup Runtime configurable options */
if (is_qla8022(ha)) {
ha->isp_ops = &qla4_8xxx_isp_ops;
@@ -1630,6 +1672,10 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->isp_ops = &qla4xxx_isp_ops;
}
+ /* Set EEH reset type to fundamental if required by hba */
+ if (is_qla8022(ha))
+ pdev->needs_freset = 1;
+
/* Configure PCI I/O space. */
ret = ha->isp_ops->iospace_config(ha);
if (ret)
@@ -1726,6 +1772,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
}
}
+ pci_save_state(ha->pdev);
ha->isp_ops->enable_intrs(ha);
/* Start timer thread. */
@@ -1752,6 +1799,7 @@ probe_failed:
qla4xxx_free_adapter(ha);
probe_failed_ioconfig:
+ pci_disable_pcie_error_reporting(pdev);
scsi_host_put(ha->host);
probe_disable_device:
@@ -1781,6 +1829,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
scsi_host_put(ha->host);
+ pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -1877,6 +1926,17 @@ static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
int done = 0;
struct srb *rp;
uint32_t max_wait_time = EH_WAIT_CMD_TOV;
+ int ret = SUCCESS;
+
+ /* Dont wait on command if PCI error is being handled
+ * by PCI AER driver
+ */
+ if (unlikely(pci_channel_offline(ha->pdev)) ||
+ (test_bit(AF_EEH_BUSY, &ha->flags))) {
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
+ ha->host_no, __func__);
+ return ret;
+ }
do {
/* Checking to see if its returned to OS */
@@ -2172,6 +2232,252 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
return return_status;
}
+/* PCI AER driver recovers from all correctable errors w/o
+ * driver intervention. For uncorrectable errors PCI AER
+ * driver calls the following device driver's callbacks
+ *
+ * - Fatal Errors - link_reset
+ * - Non-Fatal Errors - driver's pci_error_detected() which
+ * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
+ *
+ * PCI AER driver calls
+ * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
+ * returns RECOVERED or NEED_RESET if fw_hung
+ * NEED_RESET - driver's slot_reset()
+ * DISCONNECT - device is dead & cannot recover
+ * RECOVERED - driver's pci_resume()
+ */
+static pci_ers_result_t
+qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct scsi_qla_host *ha = pci_get_drvdata(pdev);
+
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
+ ha->host_no, __func__, state);
+
+ if (!is_aer_supported(ha))
+ return PCI_ERS_RESULT_NONE;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ clear_bit(AF_EEH_BUSY, &ha->flags);
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ set_bit(AF_EEH_BUSY, &ha->flags);
+ qla4xxx_mailbox_premature_completion(ha);
+ qla4xxx_free_irqs(ha);
+ pci_disable_device(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ set_bit(AF_EEH_BUSY, &ha->flags);
+ set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
+ qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * qla4xxx_pci_mmio_enabled() gets called if
+ * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
+ * and read/write to the device still works.
+ **/
+static pci_ers_result_t
+qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ struct scsi_qla_host *ha = pci_get_drvdata(pdev);
+
+ if (!is_aer_supported(ha))
+ return PCI_ERS_RESULT_NONE;
+
+ if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: firmware hang -- "
+ "mmio_enabled\n", ha->host_no, __func__);
+ return PCI_ERS_RESULT_NEED_RESET;
+ } else
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
+{
+ uint32_t rval = QLA_ERROR;
+ int fn;
+ struct pci_dev *other_pdev = NULL;
+
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
+
+ set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
+
+ if (test_bit(AF_ONLINE, &ha->flags)) {
+ clear_bit(AF_ONLINE, &ha->flags);
+ qla4xxx_mark_all_devices_missing(ha);
+ qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+ qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
+ }
+
+ fn = PCI_FUNC(ha->pdev->devfn);
+ while (fn > 0) {
+ fn--;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
+ "func %x\n", ha->host_no, __func__, fn);
+ /* Get the pci device given the domain, bus,
+ * slot/function number */
+ other_pdev =
+ pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
+ ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
+ fn));
+
+ if (!other_pdev)
+ continue;
+
+ if (atomic_read(&other_pdev->enable_cnt)) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
+ "func in enabled state%x\n", ha->host_no,
+ __func__, fn);
+ pci_dev_put(other_pdev);
+ break;
+ }
+ pci_dev_put(other_pdev);
+ }
+
+ /* The first function on the card, the reset owner will
+ * start & initialize the firmware. The other functions
+ * on the card will reset the firmware context
+ */
+ if (!fn) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
+ "0x%x is the owner\n", ha->host_no, __func__,
+ ha->pdev->devfn);
+
+ qla4_8xxx_idc_lock(ha);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_COLD);
+
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
+ QLA82XX_IDC_VERSION);
+
+ qla4_8xxx_idc_unlock(ha);
+ clear_bit(AF_FW_RECOVERY, &ha->flags);
+ rval = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST);
+ qla4_8xxx_idc_lock(ha);
+
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
+ "FAILED\n", ha->host_no, __func__);
+ qla4_8xxx_clear_drv_active(ha);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_FAILED);
+ } else {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
+ "READY\n", ha->host_no, __func__);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_READY);
+ /* Clear driver state register */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
+ qla4_8xxx_set_drv_active(ha);
+ ha->isp_ops->enable_intrs(ha);
+ }
+ qla4_8xxx_idc_unlock(ha);
+ } else {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
+ "the reset owner\n", ha->host_no, __func__,
+ ha->pdev->devfn);
+ if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
+ QLA82XX_DEV_READY)) {
+ clear_bit(AF_FW_RECOVERY, &ha->flags);
+ rval = qla4xxx_initialize_adapter(ha,
+ PRESERVE_DDB_LIST);
+ if (rval == QLA_SUCCESS)
+ ha->isp_ops->enable_intrs(ha);
+ qla4_8xxx_idc_lock(ha);
+ qla4_8xxx_set_drv_active(ha);
+ qla4_8xxx_idc_unlock(ha);
+ }
+ }
+ clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
+ return rval;
+}
+
+static pci_ers_result_t
+qla4xxx_pci_slot_reset(struct pci_dev *pdev)
+{
+ pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
+ struct scsi_qla_host *ha = pci_get_drvdata(pdev);
+ int rc;
+
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
+ ha->host_no, __func__);
+
+ if (!is_aer_supported(ha))
+ return PCI_ERS_RESULT_NONE;
+
+ /* Restore the saved state of PCIe device -
+ * BAR registers, PCI Config space, PCIX, MSI,
+ * IOV states
+ */
+ pci_restore_state(pdev);
+
+ /* pci_restore_state() clears the saved_state flag of the device
+ * save restored state which resets saved_state flag
+ */
+ pci_save_state(pdev);
+
+ /* Initialize device or resume if in suspended state */
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cant re-enable "
+ "device after reset\n", ha->host_no, __func__);
+ goto exit_slot_reset;
+ }
+
+ ret = qla4xxx_request_irqs(ha);
+ if (ret) {
+ ql4_printk(KERN_WARNING, ha, "Failed to reserve interrupt %d"
+ " already in use.\n", pdev->irq);
+ goto exit_slot_reset;
+ }
+
+ if (is_qla8022(ha)) {
+ if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
+ ret = PCI_ERS_RESULT_RECOVERED;
+ goto exit_slot_reset;
+ } else
+ goto exit_slot_reset;
+ }
+
+exit_slot_reset:
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
+ "device after reset\n", ha->host_no, __func__, ret);
+ return ret;
+}
+
+static void
+qla4xxx_pci_resume(struct pci_dev *pdev)
+{
+ struct scsi_qla_host *ha = pci_get_drvdata(pdev);
+ int ret;
+
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
+ ha->host_no, __func__);
+
+ ret = qla4xxx_wait_for_hba_online(ha);
+ if (ret != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
+ "resume I/O from slot/link_reset\n", ha->host_no,
+ __func__);
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ clear_bit(AF_EEH_BUSY, &ha->flags);
+}
+
+static struct pci_error_handlers qla4xxx_err_handler = {
+ .error_detected = qla4xxx_pci_error_detected,
+ .mmio_enabled = qla4xxx_pci_mmio_enabled,
+ .slot_reset = qla4xxx_pci_slot_reset,
+ .resume = qla4xxx_pci_resume,
+};
+
static struct pci_device_id qla4xxx_pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_QLOGIC,
@@ -2206,6 +2512,7 @@ static struct pci_driver qla4xxx_pci_driver = {
.id_table = qla4xxx_pci_tbl,
.probe = qla4xxx_probe_adapter,
.remove = qla4xxx_remove_adapter,
+ .err_handler = &qla4xxx_err_handler,
};
static int __init qla4xxx_module_init(void)
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index c905dbd7533..a77b973f2cb 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k2"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k3"
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index ca5c15c779c..f8c561cf751 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -704,7 +704,7 @@ static void __devexit qpti_chain_del(struct qlogicpti *qpti)
static int __devinit qpti_map_regs(struct qlogicpti *qpti)
{
- struct of_device *op = qpti->op;
+ struct platform_device *op = qpti->op;
qpti->qregs = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]),
@@ -727,9 +727,9 @@ static int __devinit qpti_map_regs(struct qlogicpti *qpti)
static int __devinit qpti_register_irq(struct qlogicpti *qpti)
{
- struct of_device *op = qpti->op;
+ struct platform_device *op = qpti->op;
- qpti->qhost->irq = qpti->irq = op->irqs[0];
+ qpti->qhost->irq = qpti->irq = op->archdata.irqs[0];
/* We used to try various overly-clever things to
* reduce the interrupt processing overhead on
@@ -752,7 +752,7 @@ fail:
static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
{
- struct of_device *op = qpti->op;
+ struct platform_device *op = qpti->op;
struct device_node *dp;
dp = op->dev.of_node;
@@ -773,7 +773,7 @@ static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
static void qpti_get_bursts(struct qlogicpti *qpti)
{
- struct of_device *op = qpti->op;
+ struct platform_device *op = qpti->op;
u8 bursts, bmask;
bursts = of_getintprop_default(op->dev.of_node, "burst-sizes", 0xff);
@@ -806,7 +806,7 @@ static void qpti_get_clock(struct qlogicpti *qpti)
*/
static int __devinit qpti_map_queues(struct qlogicpti *qpti)
{
- struct of_device *op = qpti->op;
+ struct platform_device *op = qpti->op;
#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
qpti->res_cpu = dma_alloc_coherent(&op->dev,
@@ -1290,7 +1290,7 @@ static struct scsi_host_template qpti_template = {
.use_clustering = ENABLE_CLUSTERING,
};
-static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit qpti_sbus_probe(struct platform_device *op, const struct of_device_id *match)
{
struct scsi_host_template *tpnt = match->data;
struct device_node *dp = op->dev.of_node;
@@ -1302,7 +1302,7 @@ static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_devic
/* Sometimes Antares cards come up not completely
* setup, and we get a report of a zero IRQ.
*/
- if (op->irqs[0] == 0)
+ if (op->archdata.irqs[0] == 0)
return -ENODEV;
host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti));
@@ -1401,7 +1401,7 @@ fail_unlink:
return -ENODEV;
}
-static int __devexit qpti_sbus_remove(struct of_device *op)
+static int __devexit qpti_sbus_remove(struct platform_device *op)
{
struct qlogicpti *qpti = dev_get_drvdata(&op->dev);
@@ -1467,12 +1467,12 @@ static struct of_platform_driver qpti_sbus_driver = {
static int __init qpti_init(void)
{
- return of_register_driver(&qpti_sbus_driver, &of_bus_type);
+ return of_register_platform_driver(&qpti_sbus_driver);
}
static void __exit qpti_exit(void)
{
- of_unregister_driver(&qpti_sbus_driver);
+ of_unregister_platform_driver(&qpti_sbus_driver);
}
MODULE_DESCRIPTION("QlogicISP SBUS driver");
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
index e3c74d1ee2d..4377e87ee79 100644
--- a/drivers/scsi/qlogicpti.h
+++ b/drivers/scsi/qlogicpti.h
@@ -342,7 +342,7 @@ struct qlogicpti {
u_int req_in_ptr; /* index of next request slot */
u_int res_out_ptr; /* index of next result slot */
long send_marker; /* must we send a marker? */
- struct of_device *op;
+ struct platform_device *op;
unsigned long __pad;
int cmd_count[MAX_TARGETS];
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 2bf98469dc4..1de30eb83bb 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -320,7 +320,7 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
"changed. The Linux SCSI layer does not "
"automatically adjust these parameters.\n");
- if (blk_barrier_rq(scmd->request))
+ if (scmd->request->cmd_flags & REQ_HARDBARRIER)
/*
* barrier requests should always retry on UA
* otherwise block will get a spurious error
@@ -473,14 +473,17 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
*/
return SUCCESS;
case RESERVATION_CONFLICT:
- /*
- * let issuer deal with this, it could be just fine
- */
- return SUCCESS;
+ if (scmd->cmnd[0] == TEST_UNIT_READY)
+ /* it is a success, we probed the device and
+ * found it */
+ return SUCCESS;
+ /* otherwise, we failed to send the command */
+ return FAILED;
case QUEUE_FULL:
scsi_handle_queue_full(scmd->device);
/* fall through */
case BUSY:
+ return NEEDS_RETRY;
default:
return FAILED;
}
@@ -1331,16 +1334,16 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)
case DID_OK:
break;
case DID_BUS_BUSY:
- return blk_failfast_transport(scmd->request);
+ return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
case DID_PARITY:
- return blk_failfast_dev(scmd->request);
+ return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
case DID_ERROR:
if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
status_byte(scmd->result) == RESERVATION_CONFLICT)
return 0;
/* fall through */
case DID_SOFT_ERROR:
- return blk_failfast_driver(scmd->request);
+ return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
}
switch (status_byte(scmd->result)) {
@@ -1349,7 +1352,9 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)
* assume caller has checked sense and determinted
* the check condition was retryable.
*/
- return blk_failfast_dev(scmd->request);
+ if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
+ scmd->request->cmd_type == REQ_TYPE_BLOCK_PC)
+ return 1;
}
return 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 1646fe7cbd4..ee02d3838a0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -85,7 +85,7 @@ static void scsi_unprep_request(struct request *req)
{
struct scsi_cmnd *cmd = req->special;
- req->cmd_flags &= ~REQ_DONTPREP;
+ blk_unprep_request(req);
req->special = NULL;
scsi_put_command(cmd);
@@ -722,7 +722,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
sense_deferred = scsi_sense_is_deferred(&sshdr);
}
- if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
+ if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
req->errors = result;
if (result) {
if (sense_valid && req->sense) {
@@ -757,7 +757,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
}
}
- BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
+ /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
+ BUG_ON(blk_bidi_rq(req));
/*
* Next deal with any sectors which we were able to correctly
@@ -1010,11 +1011,8 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
err_exit:
scsi_release_buffers(cmd);
- if (error == BLKPREP_KILL)
- scsi_put_command(cmd);
- else /* BLKPREP_DEFER */
- scsi_unprep_request(cmd->request);
-
+ cmd->request->special = NULL;
+ scsi_put_command(cmd);
return error;
}
EXPORT_SYMBOL(scsi_init_io);
@@ -1372,12 +1370,6 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
blk_start_request(req);
- if (unlikely(cmd == NULL)) {
- printk(KERN_CRIT "impossible request in %s.\n",
- __func__);
- BUG();
- }
-
sdev = cmd->device;
starget = scsi_target(sdev);
shost = sdev->host;
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 026295e2c53..b4056d14f81 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -148,8 +148,6 @@ static inline void scsi_netlink_exit(void) {}
/* scsi_pm.c */
#ifdef CONFIG_PM_OPS
extern const struct dev_pm_ops scsi_bus_pm_ops;
-#else /* CONFIG_PM_OPS */
-#define scsi_bus_pm_ops (*NULL)
#endif
#ifdef CONFIG_PM_RUNTIME
extern void scsi_autopm_get_target(struct scsi_target *);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 562fb3bce26..c3f67373a4f 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -381,7 +381,9 @@ struct bus_type scsi_bus_type = {
.name = "scsi",
.match = scsi_bus_match,
.uevent = scsi_bus_uevent,
+#ifdef CONFIG_PM_OPS
.pm = &scsi_bus_pm_ops,
+#endif
};
EXPORT_SYMBOL_GPL(scsi_bus_type);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 66241dd525a..c399be97992 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -185,6 +185,7 @@ static void scsi_tgt_cmd_destroy(struct work_struct *work)
dprintk("cmd %p %d %u\n", cmd, cmd->sc_data_direction,
rq_data_dir(cmd->request));
scsi_unmap_user_pages(tcmd);
+ tcmd->rq->bio = NULL;
scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index edb6b362a8f..d7e470a0618 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/kernel.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -1730,12 +1731,11 @@ fc_parse_wwn(const char *ns, u64 *nm)
/* Validate and store the new name */
for (i=0, j=0; i < 16; i++) {
- if ((*ns >= 'a') && (*ns <= 'f'))
- j = ((j << 4) | ((*ns++ -'a') + 10));
- else if ((*ns >= 'A') && (*ns <= 'F'))
- j = ((j << 4) | ((*ns++ -'A') + 10));
- else if ((*ns >= '0') && (*ns <= '9'))
- j = ((j << 4) | (*ns++ -'0'));
+ int value;
+
+ value = hex_to_bin(*ns++);
+ if (value >= 0)
+ j = (j << 4) | value;
else
return -EINVAL;
if (i % 2) {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index cc8a1d1d915..ffa0689ee84 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -46,6 +46,7 @@
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/delay.h>
+#include <linux/smp_lock.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
#include <linux/async.h>
@@ -118,8 +119,8 @@ static DEFINE_IDA(sd_index_ida);
* object after last put) */
static DEFINE_MUTEX(sd_ref_mutex);
-struct kmem_cache *sd_cdb_cache;
-mempool_t *sd_cdb_pool;
+static struct kmem_cache *sd_cdb_cache;
+static mempool_t *sd_cdb_pool;
static const char *sd_cache_types[] = {
"write through", "none", "write back",
@@ -146,7 +147,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
- const int len = strlen(sd_cache_types[i]);
+ len = strlen(sd_cache_types[i]);
if (strncmp(sd_cache_types[i], buf, len) == 0 &&
buf[len] == '\n') {
ct = i;
@@ -411,54 +412,85 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
}
/**
- * sd_prepare_discard - unmap blocks on thinly provisioned device
+ * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device
+ * @sdp: scsi device to operate one
* @rq: Request to prepare
*
* Will issue either UNMAP or WRITE SAME(16) depending on preference
* indicated by target device.
**/
-static int sd_prepare_discard(struct request *rq)
+static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
{
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
struct bio *bio = rq->bio;
sector_t sector = bio->bi_sector;
- unsigned int num = bio_sectors(bio);
+ unsigned int nr_sectors = bio_sectors(bio);
+ unsigned int len;
+ int ret;
+ struct page *page;
if (sdkp->device->sector_size == 4096) {
sector >>= 3;
- num >>= 3;
+ nr_sectors >>= 3;
}
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = SD_TIMEOUT;
memset(rq->cmd, 0, rq->cmd_len);
+ page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+ if (!page)
+ return BLKPREP_DEFER;
+
if (sdkp->unmap) {
- char *buf = kmap_atomic(bio_page(bio), KM_USER0);
+ char *buf = page_address(page);
+ rq->cmd_len = 10;
rq->cmd[0] = UNMAP;
rq->cmd[8] = 24;
- rq->cmd_len = 10;
-
- /* Ensure that data length matches payload */
- rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24;
put_unaligned_be16(6 + 16, &buf[0]);
put_unaligned_be16(16, &buf[2]);
put_unaligned_be64(sector, &buf[8]);
- put_unaligned_be32(num, &buf[16]);
+ put_unaligned_be32(nr_sectors, &buf[16]);
- kunmap_atomic(buf, KM_USER0);
+ len = 24;
} else {
+ rq->cmd_len = 16;
rq->cmd[0] = WRITE_SAME_16;
rq->cmd[1] = 0x8; /* UNMAP */
put_unaligned_be64(sector, &rq->cmd[2]);
- put_unaligned_be32(num, &rq->cmd[10]);
- rq->cmd_len = 16;
+ put_unaligned_be32(nr_sectors, &rq->cmd[10]);
+
+ len = sdkp->device->sector_size;
+ }
+
+ blk_add_request_payload(rq, page, len);
+ ret = scsi_setup_blk_pc_cmnd(sdp, rq);
+ rq->buffer = page_address(page);
+ if (ret != BLKPREP_OK) {
+ __free_page(page);
+ rq->buffer = NULL;
}
+ return ret;
+}
- return BLKPREP_OK;
+static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
+{
+ rq->timeout = SD_TIMEOUT;
+ rq->retries = SD_MAX_RETRIES;
+ rq->cmd[0] = SYNCHRONIZE_CACHE;
+ rq->cmd_len = 10;
+
+ return scsi_setup_blk_pc_cmnd(sdp, rq);
+}
+
+static void sd_unprep_fn(struct request_queue *q, struct request *rq)
+{
+ if (rq->cmd_flags & REQ_DISCARD) {
+ free_page((unsigned long)rq->buffer);
+ rq->buffer = NULL;
+ }
}
/**
@@ -485,10 +517,13 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
* Discard request come in as REQ_TYPE_FS but we turn them into
* block PC requests to make life easier.
*/
- if (blk_discard_rq(rq))
- ret = sd_prepare_discard(rq);
-
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ if (rq->cmd_flags & REQ_DISCARD) {
+ ret = scsi_setup_discard_cmnd(sdp, rq);
+ goto out;
+ } else if (rq->cmd_flags & REQ_FLUSH) {
+ ret = scsi_setup_flush_cmnd(sdp, rq);
+ goto out;
+ } else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
goto out;
} else if (rq->cmd_type != REQ_TYPE_FS) {
@@ -636,7 +671,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD;
SCpnt->cmnd[7] = 0x18;
SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32;
- SCpnt->cmnd[10] = protect | (blk_fua_rq(rq) ? 0x8 : 0);
+ SCpnt->cmnd[10] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
/* LBA */
SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
@@ -661,7 +696,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->cmnd[31] = (unsigned char) this_count & 0xff;
} else if (block > 0xffffffff) {
SCpnt->cmnd[0] += READ_16 - READ_6;
- SCpnt->cmnd[1] = protect | (blk_fua_rq(rq) ? 0x8 : 0);
+ SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
@@ -682,7 +717,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
this_count = 0xffff;
SCpnt->cmnd[0] += READ_10 - READ_6;
- SCpnt->cmnd[1] = protect | (blk_fua_rq(rq) ? 0x8 : 0);
+ SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
@@ -691,7 +726,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
} else {
- if (unlikely(blk_fua_rq(rq))) {
+ if (unlikely(rq->cmd_flags & REQ_FUA)) {
/*
* This happens only if this drive failed
* 10byte rw command with ILLEGAL_REQUEST
@@ -745,6 +780,8 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
* or from within the kernel (e.g. as a result of a mount(1) ).
* In the latter case @inode and @filp carry an abridged amount
* of information as noted above.
+ *
+ * Locking: called with bdev->bd_mutex held.
**/
static int sd_open(struct block_device *bdev, fmode_t mode)
{
@@ -799,7 +836,7 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
if (!scsi_device_online(sdev))
goto error_out;
- if (!sdkp->openers++ && sdev->removable) {
+ if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
if (scsi_block_when_processing_errors(sdev))
scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
}
@@ -823,6 +860,8 @@ error_autopm:
*
* Note: may block (uninterruptible) if error recovery is underway
* on this disk.
+ *
+ * Locking: called with bdev->bd_mutex held.
**/
static int sd_release(struct gendisk *disk, fmode_t mode)
{
@@ -831,7 +870,7 @@ static int sd_release(struct gendisk *disk, fmode_t mode)
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
- if (!--sdkp->openers && sdev->removable) {
+ if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
if (scsi_block_when_processing_errors(sdev))
scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
}
@@ -904,7 +943,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
error = scsi_nonblockable_ioctl(sdp, cmd, p,
(mode & FMODE_NDELAY) != 0);
if (!scsi_block_when_processing_errors(sdp) || !error)
- return error;
+ goto out;
/*
* Send SCSI addressing ioctls directly to mid level, send other
@@ -914,13 +953,17 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
switch (cmd) {
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
- return scsi_ioctl(sdp, cmd, p);
+ error = scsi_ioctl(sdp, cmd, p);
+ break;
default:
error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
if (error != -ENOTTY)
- return error;
+ break;
+ error = scsi_ioctl(sdp, cmd, p);
+ break;
}
- return scsi_ioctl(sdp, cmd, p);
+out:
+ return error;
}
static void set_media_not_present(struct scsi_disk *sdkp)
@@ -1045,15 +1088,6 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
return 0;
}
-static void sd_prepare_flush(struct request_queue *q, struct request *rq)
-{
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->timeout = SD_TIMEOUT;
- rq->retries = SD_MAX_RETRIES;
- rq->cmd[0] = SYNCHRONIZE_CACHE;
- rq->cmd_len = 10;
-}
-
static void sd_rescan(struct device *dev)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
@@ -1103,7 +1137,7 @@ static const struct block_device_operations sd_fops = {
.owner = THIS_MODULE,
.open = sd_open,
.release = sd_release,
- .locked_ioctl = sd_ioctl,
+ .ioctl = sd_ioctl,
.getgeo = sd_getgeo,
#ifdef CONFIG_COMPAT
.compat_ioctl = sd_compat_ioctl,
@@ -1120,7 +1154,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
u64 bad_lba;
int info_valid;
- if (!blk_fs_request(scmd->request))
+ if (scmd->request->cmd_type != REQ_TYPE_FS)
return 0;
info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
@@ -1171,6 +1205,12 @@ static int sd_done(struct scsi_cmnd *SCpnt)
int sense_valid = 0;
int sense_deferred = 0;
+ if (SCpnt->request->cmd_flags & REQ_DISCARD) {
+ if (!result)
+ scsi_set_resid(SCpnt, 0);
+ return good_bytes;
+ }
+
if (result) {
sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
if (sense_valid)
@@ -1383,7 +1423,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
/*
* Determine whether disk supports Data Integrity Field.
*/
-void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
+static void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
{
struct scsi_device *sdp = sdkp->device;
u8 type;
@@ -1929,7 +1969,7 @@ defaults:
* The ATO bit indicates whether the DIF application tag is available
* for use by the operating system.
*/
-void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
+static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
{
int res, offset;
struct scsi_device *sdp = sdkp->device;
@@ -2121,7 +2161,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
else
ordered = QUEUE_ORDERED_DRAIN;
- blk_queue_ordered(sdkp->disk->queue, ordered, sd_prepare_flush);
+ blk_queue_ordered(sdkp->disk->queue, ordered);
set_capacity(disk, sdkp->capacity);
kfree(buffer);
@@ -2234,6 +2274,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
sd_revalidate_disk(gd);
blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
+ blk_queue_unprep_rq(sdp->request_queue, sd_unprep_fn);
gd->driverfs_dev = &sdp->sdev_gendev;
gd->flags = GENHD_FL_EXT_DEVT;
@@ -2274,7 +2315,7 @@ static int sd_probe(struct device *dev)
struct scsi_device *sdp = to_scsi_device(dev);
struct scsi_disk *sdkp;
struct gendisk *gd;
- u32 index;
+ int index;
int error;
error = -ENODEV;
@@ -2313,7 +2354,7 @@ static int sd_probe(struct device *dev)
sdkp->driver = &sd_template;
sdkp->disk = gd;
sdkp->index = index;
- sdkp->openers = 0;
+ atomic_set(&sdkp->openers, 0);
sdkp->previous_state = 1;
if (!sdp->request_queue->rq_timeout) {
@@ -2372,6 +2413,7 @@ static int sd_remove(struct device *dev)
async_synchronize_full();
blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
+ blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
device_del(&sdkp->dev);
del_gendisk(sdkp->disk);
sd_shutdown(dev);
@@ -2583,15 +2625,15 @@ module_exit(exit_sd);
static void sd_print_sense_hdr(struct scsi_disk *sdkp,
struct scsi_sense_hdr *sshdr)
{
- sd_printk(KERN_INFO, sdkp, "");
+ sd_printk(KERN_INFO, sdkp, " ");
scsi_show_sense_hdr(sshdr);
- sd_printk(KERN_INFO, sdkp, "");
+ sd_printk(KERN_INFO, sdkp, " ");
scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
}
static void sd_print_result(struct scsi_disk *sdkp, int result)
{
- sd_printk(KERN_INFO, sdkp, "");
+ sd_printk(KERN_INFO, sdkp, " ");
scsi_show_result(result);
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 43d3caf268e..f81a9309e6d 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -47,7 +47,7 @@ struct scsi_disk {
struct scsi_device *device;
struct device dev;
struct gendisk *disk;
- unsigned int openers; /* protected by BKL for now, yuck */
+ atomic_t openers;
sector_t capacity; /* size in 512-byte sectors */
u32 index;
unsigned short hw_sector_size;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 2968c6b83dd..78d616315d8 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1686,14 +1686,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
int len, size = sizeof(struct sg_iovec) * iov_count;
struct iovec *iov;
- iov = kmalloc(size, GFP_ATOMIC);
- if (!iov)
- return -ENOMEM;
-
- if (copy_from_user(iov, hp->dxferp, size)) {
- kfree(iov);
- return -EFAULT;
- }
+ iov = memdup_user(hp->dxferp, size);
+ if (IS_ERR(iov))
+ return PTR_ERR(iov);
len = iov_length(iov, iov_count);
if (hp->dxfer_len < len) {
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0a90abc7f14..ba9c3e0387c 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -44,6 +44,7 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
+#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
@@ -466,22 +467,27 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
static int sr_block_open(struct block_device *bdev, fmode_t mode)
{
- struct scsi_cd *cd = scsi_cd_get(bdev->bd_disk);
+ struct scsi_cd *cd;
int ret = -ENXIO;
+ lock_kernel();
+ cd = scsi_cd_get(bdev->bd_disk);
if (cd) {
ret = cdrom_open(&cd->cdi, bdev, mode);
if (ret)
scsi_cd_put(cd);
}
+ unlock_kernel();
return ret;
}
static int sr_block_release(struct gendisk *disk, fmode_t mode)
{
struct scsi_cd *cd = scsi_cd(disk);
+ lock_kernel();
cdrom_release(&cd->cdi, mode);
scsi_cd_put(cd);
+ unlock_kernel();
return 0;
}
@@ -493,6 +499,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
void __user *argp = (void __user *)arg;
int ret;
+ lock_kernel();
+
/*
* Send SCSI addressing ioctls directly to mid level, send other
* ioctls to cdrom/block level.
@@ -500,12 +508,13 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
switch (cmd) {
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
- return scsi_ioctl(sdev, cmd, argp);
+ ret = scsi_ioctl(sdev, cmd, argp);
+ goto out;
}
ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
if (ret != -ENOSYS)
- return ret;
+ goto out;
/*
* ENODEV means that we didn't recognise the ioctl, or that we
@@ -516,8 +525,12 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
ret = scsi_nonblockable_ioctl(sdev, cmd, argp,
(mode & FMODE_NDELAY) != 0);
if (ret != -ENODEV)
- return ret;
- return scsi_ioctl(sdev, cmd, argp);
+ goto out;
+ ret = scsi_ioctl(sdev, cmd, argp);
+
+out:
+ unlock_kernel();
+ return ret;
}
static int sr_block_media_changed(struct gendisk *disk)
@@ -531,7 +544,7 @@ static const struct block_device_operations sr_bdops =
.owner = THIS_MODULE,
.open = sr_block_open,
.release = sr_block_release,
- .locked_ioctl = sr_block_ioctl,
+ .ioctl = sr_block_ioctl,
.media_changed = sr_block_media_changed,
/*
* No compat_ioctl for now because sr_block_ioctl never
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index b5838d547c6..713620ed70d 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -2022,7 +2022,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done
!= cmd))
{
- if(blk_fs_request(cmd->request)) {
+ if (cmd->request->cmd_type == REQ_TYPE_FS) {
sun3scsi_dma_setup(d, count,
rq_data_dir(cmd->request));
sun3_dma_setup_done = cmd;
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index e606cf0a2eb..613f5880d13 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -524,7 +524,7 @@ static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
struct scsi_cmnd *cmd,
int write_flag)
{
- if(blk_fs_request(cmd->request))
+ if (cmd->request->cmd_type == REQ_TYPE_FS)
return wanted;
else
return 0;
diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c
index aaa4fd0dd1b..7c526b8e30a 100644
--- a/drivers/scsi/sun3_scsi_vme.c
+++ b/drivers/scsi/sun3_scsi_vme.c
@@ -458,7 +458,7 @@ static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
struct scsi_cmnd *cmd,
int write_flag)
{
- if(blk_fs_request(cmd->request))
+ if (cmd->request->cmd_type == REQ_TYPE_FS)
return wanted;
else
return 0;
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 386dd9d602b..193b37ba183 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -44,7 +44,7 @@ enum dvma_rev {
};
static int __devinit esp_sbus_setup_dma(struct esp *esp,
- struct of_device *dma_of)
+ struct platform_device *dma_of)
{
esp->dma = dma_of;
@@ -81,7 +81,7 @@ static int __devinit esp_sbus_setup_dma(struct esp *esp,
static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
{
- struct of_device *op = esp->dev;
+ struct platform_device *op = esp->dev;
struct resource *res;
/* On HME, two reg sets exist, first is DVMA,
@@ -101,7 +101,7 @@ static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
static int __devinit esp_sbus_map_command_block(struct esp *esp)
{
- struct of_device *op = esp->dev;
+ struct platform_device *op = esp->dev;
esp->command_block = dma_alloc_coherent(&op->dev, 16,
&esp->command_block_dma,
@@ -114,15 +114,15 @@ static int __devinit esp_sbus_map_command_block(struct esp *esp)
static int __devinit esp_sbus_register_irq(struct esp *esp)
{
struct Scsi_Host *host = esp->host;
- struct of_device *op = esp->dev;
+ struct platform_device *op = esp->dev;
- host->irq = op->irqs[0];
+ host->irq = op->archdata.irqs[0];
return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
}
-static void __devinit esp_get_scsi_id(struct esp *esp, struct of_device *espdma)
+static void __devinit esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
{
- struct of_device *op = esp->dev;
+ struct platform_device *op = esp->dev;
struct device_node *dp;
dp = op->dev.of_node;
@@ -144,7 +144,7 @@ done:
static void __devinit esp_get_differential(struct esp *esp)
{
- struct of_device *op = esp->dev;
+ struct platform_device *op = esp->dev;
struct device_node *dp;
dp = op->dev.of_node;
@@ -156,7 +156,7 @@ static void __devinit esp_get_differential(struct esp *esp)
static void __devinit esp_get_clock_params(struct esp *esp)
{
- struct of_device *op = esp->dev;
+ struct platform_device *op = esp->dev;
struct device_node *bus_dp, *dp;
int fmhz;
@@ -170,10 +170,10 @@ static void __devinit esp_get_clock_params(struct esp *esp)
esp->cfreq = fmhz;
}
-static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of)
+static void __devinit esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
{
struct device_node *dma_dp = dma_of->dev.of_node;
- struct of_device *op = esp->dev;
+ struct platform_device *op = esp->dev;
struct device_node *dp;
u8 bursts, val;
@@ -195,7 +195,7 @@ static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of)
esp->bursts = bursts;
}
-static void __devinit esp_sbus_get_props(struct esp *esp, struct of_device *espdma)
+static void __devinit esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
{
esp_get_scsi_id(esp, espdma);
esp_get_differential(esp);
@@ -216,7 +216,7 @@ static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
size_t sz, int dir)
{
- struct of_device *op = esp->dev;
+ struct platform_device *op = esp->dev;
return dma_map_single(&op->dev, buf, sz, dir);
}
@@ -224,7 +224,7 @@ static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
- struct of_device *op = esp->dev;
+ struct platform_device *op = esp->dev;
return dma_map_sg(&op->dev, sg, num_sg, dir);
}
@@ -232,7 +232,7 @@ static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
size_t sz, int dir)
{
- struct of_device *op = esp->dev;
+ struct platform_device *op = esp->dev;
dma_unmap_single(&op->dev, addr, sz, dir);
}
@@ -240,7 +240,7 @@ static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
- struct of_device *op = esp->dev;
+ struct platform_device *op = esp->dev;
dma_unmap_sg(&op->dev, sg, num_sg, dir);
}
@@ -256,7 +256,7 @@ static void sbus_esp_reset_dma(struct esp *esp)
{
int can_do_burst16, can_do_burst32, can_do_burst64;
int can_do_sbus64, lim;
- struct of_device *op;
+ struct platform_device *op;
u32 val;
can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
@@ -487,8 +487,8 @@ static const struct esp_driver_ops sbus_esp_ops = {
.dma_error = sbus_esp_dma_error,
};
-static int __devinit esp_sbus_probe_one(struct of_device *op,
- struct of_device *espdma,
+static int __devinit esp_sbus_probe_one(struct platform_device *op,
+ struct platform_device *espdma,
int hme)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
@@ -562,11 +562,11 @@ fail:
return err;
}
-static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit esp_sbus_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *dma_node = NULL;
struct device_node *dp = op->dev.of_node;
- struct of_device *dma_of = NULL;
+ struct platform_device *dma_of = NULL;
int hme = 0;
if (dp->parent &&
@@ -585,10 +585,10 @@ static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device
return esp_sbus_probe_one(op, dma_of, hme);
}
-static int __devexit esp_sbus_remove(struct of_device *op)
+static int __devexit esp_sbus_remove(struct platform_device *op)
{
struct esp *esp = dev_get_drvdata(&op->dev);
- struct of_device *dma_of = esp->dma;
+ struct platform_device *dma_of = esp->dma;
unsigned int irq = esp->host->irq;
bool is_hme;
u32 val;
@@ -644,12 +644,12 @@ static struct of_platform_driver esp_sbus_driver = {
static int __init sunesp_init(void)
{
- return of_register_driver(&esp_sbus_driver, &of_bus_type);
+ return of_register_platform_driver(&esp_sbus_driver);
}
static void __exit sunesp_exit(void)
{
- of_unregister_driver(&esp_sbus_driver);
+ of_unregister_platform_driver(&esp_sbus_driver);
}
MODULE_DESCRIPTION("Sun ESP SCSI driver");
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index a7bc8b7b09a..2c3e89ddf06 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -72,10 +72,7 @@ static void sym_printl_hex(u_char *p, int n)
static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
{
- if (label)
- sym_print_addr(cp->cmd, "%s: ", label);
- else
- sym_print_addr(cp->cmd, "");
+ sym_print_addr(cp->cmd, "%s: ", label);
spi_print_msg(msg);
printf("\n");
@@ -4558,7 +4555,8 @@ static void sym_int_sir(struct sym_hcb *np)
switch (np->msgin [2]) {
case M_X_MODIFY_DP:
if (DEBUG_FLAGS & DEBUG_POINTER)
- sym_print_msg(cp, NULL, np->msgin);
+ sym_print_msg(cp, "extended msg ",
+ np->msgin);
tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
(np->msgin[5]<<8) + (np->msgin[6]);
sym_modify_dp(np, tp, cp, tmp);
@@ -4585,7 +4583,7 @@ static void sym_int_sir(struct sym_hcb *np)
*/
case M_IGN_RESIDUE:
if (DEBUG_FLAGS & DEBUG_POINTER)
- sym_print_msg(cp, NULL, np->msgin);
+ sym_print_msg(cp, "1 or 2 byte ", np->msgin);
if (cp->host_flags & HF_SENSE)
OUTL_DSP(np, SCRIPTA_BA(np, clrack));
else
diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c
index 8681f134505..d89aa38c5cf 100644
--- a/drivers/serial/21285.c
+++ b/drivers/serial/21285.c
@@ -216,7 +216,7 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
unsigned long flags;
- unsigned int baud, quot, h_lcr;
+ unsigned int baud, quot, h_lcr, b;
/*
* We don't support modem control lines.
@@ -234,12 +234,8 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
-
- if (port->state && port->state->port.tty) {
- struct tty_struct *tty = port->state->port.tty;
- unsigned int b = port->uartclk / (16 * quot);
- tty_encode_baud_rate(tty, b, b);
- }
+ b = port->uartclk / (16 * quot);
+ tty_termios_encode_baud_rate(termios, b, b);
switch (termios->c_cflag & CSIZE) {
case CS5:
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 30463862603..be0ebce36e5 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -78,10 +78,6 @@ struct m68k_serial *m68k_consinfo = 0;
#define M68K_CLOCK (16667000) /* FIXME: 16MHz is likely wrong */
-#ifdef CONFIG_CONSOLE
-extern wait_queue_head_t keypress_wait;
-#endif
-
struct tty_driver *serial_driver;
/* number of characters left in xmit buffer before we ask for more */
@@ -102,19 +98,13 @@ static void change_speed(struct m68k_serial *info);
* Setup for console. Argument comes from the boot command line.
*/
-#if defined(CONFIG_M68EZ328ADS) || defined(CONFIG_ALMA_ANS) || defined(CONFIG_DRAGONIXVZ)
-#define CONSOLE_BAUD_RATE 115200
-#define DEFAULT_CBAUD B115200
-#else
- /* (es) */
- /* note: this is messy, but it works, again, perhaps defined somewhere else?*/
- #ifdef CONFIG_M68VZ328
- #define CONSOLE_BAUD_RATE 19200
- #define DEFAULT_CBAUD B19200
- #endif
- /* (/es) */
+/* note: this is messy, but it works, again, perhaps defined somewhere else?*/
+#ifdef CONFIG_M68VZ328
+#define CONSOLE_BAUD_RATE 19200
+#define DEFAULT_CBAUD B19200
#endif
+
#ifndef CONSOLE_BAUD_RATE
#define CONSOLE_BAUD_RATE 9600
#define DEFAULT_CBAUD B9600
@@ -300,10 +290,6 @@ static void receive_chars(struct m68k_serial *info, unsigned short rx)
return;
#endif /* CONFIG_MAGIC_SYSRQ */
}
- /* It is a 'keyboard interrupt' ;-) */
-#ifdef CONFIG_CONSOLE
- wake_up(&keypress_wait);
-#endif
}
if(!tty)
@@ -883,7 +869,9 @@ static int get_serial_info(struct m68k_serial * info,
tmp.close_delay = info->close_delay;
tmp.closing_wait = info->closing_wait;
tmp.custom_divisor = info->custom_divisor;
- copy_to_user(retinfo,&tmp,sizeof(*retinfo));
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
return 0;
}
@@ -896,7 +884,8 @@ static int set_serial_info(struct m68k_serial * info,
if (!new_info)
return -EFAULT;
- copy_from_user(&new_serial,new_info,sizeof(new_serial));
+ if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
+ return -EFAULT;
old_info = *info;
if (!capable(CAP_SYS_ADMIN)) {
@@ -957,8 +946,7 @@ static int get_lsr_info(struct m68k_serial * info, unsigned int *value)
status = 0;
#endif
local_irq_restore(flags);
- put_user(status,value);
- return 0;
+ return put_user(status, value);
}
/*
@@ -1013,27 +1001,18 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
send_break(info, arg ? arg*(100) : 250);
return 0;
case TIOCGSERIAL:
- if (access_ok(VERIFY_WRITE, (void *) arg,
- sizeof(struct serial_struct)))
- return get_serial_info(info,
- (struct serial_struct *) arg);
- return -EFAULT;
+ return get_serial_info(info,
+ (struct serial_struct *) arg);
case TIOCSSERIAL:
return set_serial_info(info,
(struct serial_struct *) arg);
case TIOCSERGETLSR: /* Get line status register */
- if (access_ok(VERIFY_WRITE, (void *) arg,
- sizeof(unsigned int)))
- return get_lsr_info(info, (unsigned int *) arg);
- return -EFAULT;
+ return get_lsr_info(info, (unsigned int *) arg);
case TIOCSERGSTRUCT:
- if (!access_ok(VERIFY_WRITE, (void *) arg,
- sizeof(struct m68k_serial)))
+ if (copy_to_user((struct m68k_serial *) arg,
+ info, sizeof(struct m68k_serial)))
return -EFAULT;
- copy_to_user((struct m68k_serial *) arg,
- info, sizeof(struct m68k_serial));
return 0;
-
default:
return -ENOIOCTLCMD;
}
@@ -1243,7 +1222,9 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
retval = -ERESTARTSYS;
break;
}
+ tty_unlock();
schedule();
+ tty_lock();
}
current->state = TASK_RUNNING;
remove_wait_queue(&info->open_wait, &wait);
diff --git a/drivers/serial/68360serial.c b/drivers/serial/68360serial.c
index 768612f8e41..0dff3bbddc8 100644
--- a/drivers/serial/68360serial.c
+++ b/drivers/serial/68360serial.c
@@ -1705,7 +1705,6 @@ static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout)
printk("jiff=%lu...", jiffies);
#endif
- lock_kernel();
/* We go through the loop at least once because we can't tell
* exactly when the last character exits the shifter. There can
* be at least two characters waiting to be sent after the buffers
@@ -1734,7 +1733,6 @@ static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout)
bdp--;
} while (bdp->status & BD_SC_READY);
current->state = TASK_RUNNING;
- unlock_kernel();
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
#endif
@@ -1862,7 +1860,9 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
printk("block_til_ready blocking: ttys%d, count = %d\n",
info->line, state->count);
#endif
+ tty_unlock();
schedule();
+ tty_lock();
}
current->state = TASK_RUNNING;
remove_wait_queue(&info->open_wait, &wait);
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 891e1dd65f2..24110f6f61e 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -241,7 +241,7 @@ static const struct serial8250_config uart_config[] = {
.fifo_size = 128,
.tx_loadsz = 128,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO,
+ .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
},
[PORT_16654] = {
.name = "ST16654",
@@ -300,9 +300,16 @@ static const struct serial8250_config uart_config[] = {
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
.flags = UART_CAP_FIFO | UART_CAP_AFE,
},
+ [PORT_U6_16550A] = {
+ .name = "U6_16550A",
+ .fifo_size = 64,
+ .tx_loadsz = 64,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE,
+ },
};
-#if defined (CONFIG_SERIAL_8250_AU1X00)
+#if defined(CONFIG_MIPS_ALCHEMY)
/* Au1x00 UART hardware has a weird register layout */
static const u8 au_io_in_map[] = {
@@ -422,7 +429,6 @@ static unsigned int mem32_serial_in(struct uart_port *p, int offset)
return readl(p->membase + offset);
}
-#ifdef CONFIG_SERIAL_8250_AU1X00
static unsigned int au_serial_in(struct uart_port *p, int offset)
{
offset = map_8250_in_reg(p, offset) << p->regshift;
@@ -434,7 +440,6 @@ static void au_serial_out(struct uart_port *p, int offset, int value)
offset = map_8250_out_reg(p, offset) << p->regshift;
__raw_writel(value, p->membase + offset);
}
-#endif
static unsigned int tsi_serial_in(struct uart_port *p, int offset)
{
@@ -503,12 +508,11 @@ static void set_io_from_upio(struct uart_port *p)
p->serial_out = mem32_serial_out;
break;
-#ifdef CONFIG_SERIAL_8250_AU1X00
case UPIO_AU:
p->serial_in = au_serial_in;
p->serial_out = au_serial_out;
break;
-#endif
+
case UPIO_TSI:
p->serial_in = tsi_serial_in;
p->serial_out = tsi_serial_out;
@@ -535,9 +539,7 @@ serial_out_sync(struct uart_8250_port *up, int offset, int value)
switch (p->iotype) {
case UPIO_MEM:
case UPIO_MEM32:
-#ifdef CONFIG_SERIAL_8250_AU1X00
case UPIO_AU:
-#endif
case UPIO_DWAPB:
p->serial_out(p, offset, value);
p->serial_in(p, UART_LCR); /* safe, no side-effects */
@@ -573,7 +575,7 @@ static inline void _serial_dl_write(struct uart_8250_port *up, int value)
serial_outp(up, UART_DLM, value >> 8 & 0xff);
}
-#if defined(CONFIG_SERIAL_8250_AU1X00)
+#if defined(CONFIG_MIPS_ALCHEMY)
/* Au1x00 haven't got a standard divisor latch */
static int serial_dl_read(struct uart_8250_port *up)
{
@@ -1075,6 +1077,15 @@ static void autoconfig_16550a(struct uart_8250_port *up)
DEBUG_AUTOCONF("Couldn't force IER_UUE to 0 ");
}
serial_outp(up, UART_IER, iersave);
+
+ /*
+ * We distinguish between 16550A and U6 16550A by counting
+ * how many bytes are in the FIFO.
+ */
+ if (up->port.type == PORT_16550A && size_fifo(up) == 64) {
+ up->port.type = PORT_U6_16550A;
+ up->capabilities |= UART_CAP_AFE;
+ }
}
/*
@@ -2229,9 +2240,9 @@ static unsigned int serial8250_get_divisor(struct uart_port *port, unsigned int
return quot;
}
-static void
-serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+void
+serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
{
struct uart_8250_port *up = (struct uart_8250_port *)port;
unsigned char cval, fcr = 0;
@@ -2407,16 +2418,22 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
}
+EXPORT_SYMBOL(serial8250_do_set_termios);
static void
-serial8250_set_ldisc(struct uart_port *port)
+serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
{
- int line = port->line;
-
- if (line >= port->state->port.tty->driver->num)
- return;
+ if (port->set_termios)
+ port->set_termios(port, termios, old);
+ else
+ serial8250_do_set_termios(port, termios, old);
+}
- if (port->state->port.tty->ldisc->ops->num == N_PPS) {
+static void
+serial8250_set_ldisc(struct uart_port *port, int new)
+{
+ if (new == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
serial8250_enable_ms(port);
} else
@@ -2596,11 +2613,9 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (flags & UART_CONFIG_TYPE)
autoconfig(up, probeflags);
-#ifdef CONFIG_SERIAL_8250_AU1X00
/* if access method is AU, it is a 16550 with a quirk */
if (up->port.type == PORT_16550A && up->port.iotype == UPIO_AU)
up->bugs |= UART_BUG_NOMSR;
-#endif
if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
autoconfig_irq(up);
@@ -2994,6 +3009,7 @@ static int __devinit serial8250_probe(struct platform_device *dev)
port.type = p->type;
port.serial_in = p->serial_in;
port.serial_out = p->serial_out;
+ port.set_termios = p->set_termios;
port.dev = &dev->dev;
port.irqflags |= irqflag;
ret = serial8250_register_port(&port);
@@ -3157,6 +3173,9 @@ int serial8250_register_port(struct uart_port *port)
uart->port.serial_in = port->serial_in;
if (port->serial_out)
uart->port.serial_out = port->serial_out;
+ /* Possibly override set_termios call */
+ if (port->set_termios)
+ uart->port.set_termios = port->set_termios;
ret = uart_add_one_port(&serial8250_reg, &uart->port);
if (ret == 0)
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c
index f279745e9fe..eaafb98debe 100644
--- a/drivers/serial/8250_early.c
+++ b/drivers/serial/8250_early.c
@@ -19,9 +19,11 @@
* The user can specify the device directly, e.g.,
* earlycon=uart8250,io,0x3f8,9600n8
* earlycon=uart8250,mmio,0xff5e0000,115200n8
+ * earlycon=uart8250,mmio32,0xff5e0000,115200n8
* or
* console=uart8250,io,0x3f8,9600n8
* console=uart8250,mmio,0xff5e0000,115200n8
+ * console=uart8250,mmio32,0xff5e0000,115200n8
*/
#include <linux/tty.h>
@@ -48,18 +50,31 @@ static struct early_serial8250_device early_device;
static unsigned int __init serial_in(struct uart_port *port, int offset)
{
- if (port->iotype == UPIO_MEM)
+ switch (port->iotype) {
+ case UPIO_MEM:
return readb(port->membase + offset);
- else
+ case UPIO_MEM32:
+ return readl(port->membase + (offset << 2));
+ case UPIO_PORT:
return inb(port->iobase + offset);
+ default:
+ return 0;
+ }
}
static void __init serial_out(struct uart_port *port, int offset, int value)
{
- if (port->iotype == UPIO_MEM)
+ switch (port->iotype) {
+ case UPIO_MEM:
writeb(value, port->membase + offset);
- else
+ break;
+ case UPIO_MEM32:
+ writel(value, port->membase + (offset << 2));
+ break;
+ case UPIO_PORT:
outb(value, port->iobase + offset);
+ break;
+ }
}
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
@@ -137,15 +152,21 @@ static int __init parse_options(struct early_serial8250_device *device,
char *options)
{
struct uart_port *port = &device->port;
- int mmio, length;
+ int mmio, mmio32, length;
if (!options)
return -ENODEV;
port->uartclk = BASE_BAUD * 16;
- if (!strncmp(options, "mmio,", 5)) {
- port->iotype = UPIO_MEM;
- port->mapbase = simple_strtoul(options + 5, &options, 0);
+
+ mmio = !strncmp(options, "mmio,", 5);
+ mmio32 = !strncmp(options, "mmio32,", 7);
+ if (mmio || mmio32) {
+ port->iotype = (mmio ? UPIO_MEM : UPIO_MEM32);
+ port->mapbase = simple_strtoul(options + (mmio ? 5 : 7),
+ &options, 0);
+ if (mmio32)
+ port->regshift = 2;
#ifdef CONFIG_FIX_EARLYCON_MEM
set_fixmap_nocache(FIX_EARLYCON_MEM_BASE,
port->mapbase & PAGE_MASK);
@@ -157,11 +178,10 @@ static int __init parse_options(struct early_serial8250_device *device,
if (!port->membase) {
printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n",
__func__,
- (unsigned long long)port->mapbase);
+ (unsigned long long) port->mapbase);
return -ENOMEM;
}
#endif
- mmio = 1;
} else if (!strncmp(options, "io,", 3)) {
port->iotype = UPIO_PORT;
port->iobase = simple_strtoul(options + 3, &options, 0);
@@ -181,11 +201,18 @@ static int __init parse_options(struct early_serial8250_device *device,
device->baud);
}
- printk(KERN_INFO "Early serial console at %s 0x%llx (options '%s')\n",
- mmio ? "MMIO" : "I/O port",
- mmio ? (unsigned long long) port->mapbase
- : (unsigned long long) port->iobase,
- device->options);
+ if (mmio || mmio32)
+ printk(KERN_INFO
+ "Early serial console at MMIO%s 0x%llx (options '%s')\n",
+ mmio32 ? "32" : "",
+ (unsigned long long)port->mapbase,
+ device->options);
+ else
+ printk(KERN_INFO
+ "Early serial console at I/O port 0x%lx (options '%s')\n",
+ port->iobase,
+ device->options);
+
return 0;
}
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 746a44621d9..53be4d35a0a 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -994,6 +994,7 @@ static int skip_tx_en_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_TITAN_800E 0xA014
#define PCI_DEVICE_ID_TITAN_200EI 0xA016
#define PCI_DEVICE_ID_TITAN_200EISI 0xA017
+#define PCI_DEVICE_ID_OXSEMI_16PCI958 0x9538
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
@@ -1542,6 +1543,8 @@ enum pci_board_num_t {
pbn_b2_4_921600,
pbn_b2_8_921600,
+ pbn_b2_8_1152000,
+
pbn_b2_bt_1_115200,
pbn_b2_bt_2_115200,
pbn_b2_bt_4_115200,
@@ -1960,6 +1963,13 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.uart_offset = 8,
},
+ [pbn_b2_8_1152000] = {
+ .flags = FL_BASE2,
+ .num_ports = 8,
+ .base_baud = 1152000,
+ .uart_offset = 8,
+ },
+
[pbn_b2_bt_1_115200] = {
.flags = FL_BASE2|FL_BASE_BARS,
.num_ports = 1,
@@ -2875,6 +2885,9 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_bt_2_921600 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI958,
+ PCI_ANY_ID , PCI_ANY_ID, 0, 0,
+ pbn_b2_8_1152000 },
/*
* Oxford Semiconductor Inc. Tornado PCI express device range.
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 8b23165bc5d..12900f7083b 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -258,14 +258,6 @@ config SERIAL_8250_ACORN
system, say Y to this option. The driver can handle 1, 2, or 3 port
cards. If unsure, say N.
-config SERIAL_8250_AU1X00
- bool "Au1x00 serial port support"
- depends on SERIAL_8250 != n && SOC_AU1X00
- help
- If you have an Au1x00 SOC based board and want to use the serial port,
- say Y to this option. The driver can handle up to 4 serial ports,
- depending on the SOC. If unsure, say N.
-
config SERIAL_8250_RM9K
bool "Support for MIPS RM9xxx integrated serial port"
depends on SERIAL_8250 != n && SERIAL_RM9000
@@ -544,12 +536,13 @@ config SERIAL_S3C6400
config SERIAL_S5PV210
tristate "Samsung S5PV210 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_S5P6442)
- select SERIAL_SAMSUNG_UARTS_4 if CPU_S5PV210
+ depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_S5P6442 || CPU_S5PV310)
+ select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_S5PV310)
default y
help
Serial port support for Samsung's S5P Family of SoC's
+
config SERIAL_MAX3100
tristate "MAX3100 support"
depends on SPI
@@ -557,6 +550,22 @@ config SERIAL_MAX3100
help
MAX3100 chip support
+config SERIAL_MAX3107
+ tristate "MAX3107 support"
+ depends on SPI
+ select SERIAL_CORE
+ help
+ MAX3107 chip support
+
+config SERIAL_MAX3107_AAVA
+ tristate "MAX3107 AAVA platform support"
+ depends on X86_MRST && SERIAL_MAX3107 && GPIOLIB
+ select SERIAL_CORE
+ help
+ Support for the MAX3107 chip configuration found on the AAVA
+ platform. Includes the extra initialisation and GPIO support
+ neded for this device.
+
config SERIAL_DZ
bool "DECstation DZ serial driver"
depends on MACH_DECSTATION && 32BIT
@@ -698,6 +707,33 @@ config SERIAL_SA1100_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
+config SERIAL_MRST_MAX3110
+ tristate "SPI UART driver for Max3110"
+ depends on SPI_DW_PCI
+ select SERIAL_CORE
+ select SERIAL_CORE_CONSOLE
+ help
+ This is the UART protocol driver for the MAX3110 device on
+ the Intel Moorestown platform. On other systems use the max3100
+ driver.
+
+config MRST_MAX3110_IRQ
+ boolean "Enable GPIO IRQ for Max3110 over Moorestown"
+ default n
+ depends on SERIAL_MRST_MAX3110 && GPIO_LANGWELL
+ help
+ This has to be enabled after Moorestown GPIO driver is loaded
+
+config SERIAL_MFD_HSU
+ tristate "Medfield High Speed UART support"
+ depends on PCI
+ select SERIAL_CORE
+
+config SERIAL_MFD_HSU_CONSOLE
+ boolean "Medfile HSU serial console support"
+ depends on SERIAL_MFD_HSU=y
+ select SERIAL_CORE_CONSOLE
+
config SERIAL_BFIN
tristate "Blackfin serial port support"
depends on BLACKFIN
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 208a85572c3..1ca4fd599ff 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -46,6 +46,8 @@ obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.o
obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
+obj-$(CONFIG_SERIAL_MAX3107) += max3107.o
+obj-$(CONFIG_SERIAL_MAX3107_AAVA) += max3107-aava.o
obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
obj-$(CONFIG_SERIAL_MUX) += mux.o
obj-$(CONFIG_SERIAL_68328) += 68328serial.o
@@ -84,3 +86,5 @@ obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
+obj-$(CONFIG_SERIAL_MRST_MAX3110) += mrst_max3110.o
+obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o
diff --git a/drivers/serial/altera_uart.c b/drivers/serial/altera_uart.c
index 0f1189605d2..f8d8a00554d 100644
--- a/drivers/serial/altera_uart.c
+++ b/drivers/serial/altera_uart.c
@@ -394,7 +394,7 @@ int __init early_altera_uart_setup(struct altera_uart_platform_uart *platp)
static void altera_uart_console_putc(struct uart_port *port, const char c)
{
while (!(readl(port->membase + ALTERA_UART_STATUS_REG) &
- ALTERA_UART_STATUS_TRDY_MSK))
+ ALTERA_UART_STATUS_TRDY_MSK))
cpu_relax();
writel(c, port->membase + ALTERA_UART_TXDATA_REG);
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 50441ffe8e3..2904aa04412 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -472,14 +472,9 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
spin_unlock_irqrestore(&uap->port.lock, flags);
}
-static void pl010_set_ldisc(struct uart_port *port)
+static void pl010_set_ldisc(struct uart_port *port, int new)
{
- int line = port->line;
-
- if (line >= port->state->port.tty->driver->num)
- return;
-
- if (port->state->port.tty->ldisc->ops->num == N_PPS) {
+ if (new == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
pl010_enable_ms(port);
} else
diff --git a/drivers/serial/apbuart.c b/drivers/serial/apbuart.c
index 0099b8692b6..cc01c650a14 100644
--- a/drivers/serial/apbuart.c
+++ b/drivers/serial/apbuart.c
@@ -551,7 +551,7 @@ static struct uart_driver grlib_apbuart_driver = {
/* OF Platform Driver */
/* ======================================================================== */
-static int __devinit apbuart_probe(struct of_device *op,
+static int __devinit apbuart_probe(struct platform_device *op,
const struct of_device_id *match)
{
int i = -1;
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index a182def7007..3892666b5fb 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -217,7 +217,8 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
- UART_PUT_TTGR(port, rs485conf->delay_rts_before_send);
+ if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
+ UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
@@ -292,7 +293,9 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
- UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_before_send);
+ if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ UART_PUT_TTGR(port,
+ atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
@@ -1211,7 +1214,9 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
- UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_before_send);
+ if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ UART_PUT_TTGR(port,
+ atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 511cbf68787..a9eff2b18ea 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -957,15 +957,12 @@ bfin_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
* Enable the IrDA function if tty->ldisc.num is N_IRDA.
* In other cases, disable IrDA function.
*/
-static void bfin_serial_set_ldisc(struct uart_port *port)
+static void bfin_serial_set_ldisc(struct uart_port *port, int ld)
{
int line = port->line;
unsigned short val;
- if (line >= port->state->port.tty->driver->num)
- return;
-
- switch (port->state->port.tty->termios->c_line) {
+ switch (ld) {
case N_IRDA:
val = UART_GET_GCTL(&bfin_serial_ports[line]);
val |= (IREN | RPOLC);
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index e57fb3d228e..5318dd3774a 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -121,7 +121,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
unsigned int sclk = get_sclk();
/* Set TCR1 and TCR2, TFSR is not enabled for uart */
- SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK));
+ SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
SPORT_PUT_TCR2(up, size + 1);
pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index 6016179db53..f2b8adcc6c9 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -1340,7 +1340,7 @@ static struct uart_driver cpm_reg = {
static int probe_index;
-static int __devinit cpm_uart_probe(struct of_device *ofdev,
+static int __devinit cpm_uart_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
int index = probe_index++;
@@ -1364,7 +1364,7 @@ static int __devinit cpm_uart_probe(struct of_device *ofdev,
return uart_add_one_port(&cpm_reg, &pinfo->port);
}
-static int __devexit cpm_uart_remove(struct of_device *ofdev)
+static int __devexit cpm_uart_remove(struct platform_device *ofdev)
{
struct uart_cpm_port *pinfo = dev_get_drvdata(&ofdev->dev);
return uart_remove_one_port(&cpm_reg, &pinfo->port);
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index 31f172397af..c856905bb3b 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -3724,6 +3724,17 @@ rs_ioctl(struct tty_struct *tty, struct file * file,
return e100_enable_rs485(tty, &rs485data);
}
+ case TIOCGRS485:
+ {
+ struct serial_rs485 *rs485data =
+ &(((struct e100_serial *)tty->driver_data)->rs485);
+ /* This is the ioctl to get RS485 data from user-space */
+ if (copy_to_user((struct serial_rs485 *) arg,
+ rs485data,
+ sizeof(serial_rs485)))
+ return -EFAULT;
+ break;
+ }
case TIOCSERWRRS485:
{
@@ -3924,7 +3935,6 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
* Check R_DMA_CHx_STATUS bit 0-6=number of available bytes in FIFO
* R_DMA_CHx_HWSW bit 31-16=nbr of bytes left in DMA buffer (0=64k)
*/
- lock_kernel();
orig_jiffies = jiffies;
while (info->xmit.head != info->xmit.tail || /* More in send queue */
(*info->ostatusadr & 0x007f) || /* more in FIFO */
@@ -3941,7 +3951,6 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
curr_time_usec - info->last_tx_active_usec;
}
set_current_state(TASK_RUNNING);
- unlock_kernel();
}
/*
@@ -3981,7 +3990,7 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
*/
if (tty_hung_up_p(filp) ||
(info->flags & ASYNC_CLOSING)) {
- wait_event_interruptible(info->close_wait,
+ wait_event_interruptible_tty(info->close_wait,
!(info->flags & ASYNC_CLOSING));
#ifdef SERIAL_DO_RESTART
if (info->flags & ASYNC_HUP_NOTIFY)
@@ -4057,7 +4066,9 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
printk("block_til_ready blocking: ttyS%d, count = %d\n",
info->line, info->count);
#endif
+ tty_unlock();
schedule();
+ tty_lock();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&info->open_wait, &wait);
@@ -4139,7 +4150,7 @@ rs_open(struct tty_struct *tty, struct file * filp)
*/
if (tty_hung_up_p(filp) ||
(info->flags & ASYNC_CLOSING)) {
- wait_event_interruptible(info->close_wait,
+ wait_event_interruptible_tty(info->close_wait,
!(info->flags & ASYNC_CLOSING));
#ifdef SERIAL_DO_RESTART
return ((info->flags & ASYNC_HUP_NOTIFY) ?
@@ -4522,8 +4533,8 @@ static int __init rs_init(void)
INIT_WORK(&info->work, do_softint);
if (info->enabled) {
- printk(KERN_INFO "%s%d at 0x%x is a builtin UART with DMA\n",
- serial_driver->name, info->line, (unsigned int)info->ioport);
+ printk(KERN_INFO "%s%d at %p is a builtin UART with DMA\n",
+ serial_driver->name, info->line, info->ioport);
}
}
#ifdef CONFIG_ETRAX_FAST_TIMER
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index eacb588a934..66ecc7ab6da 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -909,13 +909,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
rational_best_approximation(16 * div * baud, sport->port.uartclk,
1 << 16, 1 << 16, &num, &denom);
- if (port->state && port->state->port.tty) {
- tdiv64 = sport->port.uartclk;
- tdiv64 *= num;
- do_div(tdiv64, denom * 16 * div);
- tty_encode_baud_rate(sport->port.state->port.tty,
+ tdiv64 = sport->port.uartclk;
+ tdiv64 *= num;
+ do_div(tdiv64, denom * 16 * div);
+ tty_termios_encode_baud_rate(termios,
(speed_t)tdiv64, (speed_t)tdiv64);
- }
num -= 1;
denom -= 1;
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index f164ba4eba0..93de907b120 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -954,12 +954,13 @@ ioc3_change_speed(struct uart_port *the_port,
struct ktermios *new_termios, struct ktermios *old_termios)
{
struct ioc3_port *port = get_ioc3_port(the_port);
- unsigned int cflag;
+ unsigned int cflag, iflag;
int baud;
int new_parity = 0, new_parity_enable = 0, new_stop = 0, new_data = 8;
struct uart_state *state = the_port->state;
cflag = new_termios->c_cflag;
+ iflag = new_termios->c_iflag;
switch (cflag & CSIZE) {
case CS5:
@@ -1000,12 +1001,12 @@ ioc3_change_speed(struct uart_port *the_port,
state->port.tty->low_latency = 1;
- if (I_IGNPAR(state->port.tty))
+ if (iflag & IGNPAR)
the_port->ignore_status_mask &= ~(N_PARITY_ERROR
| N_FRAMING_ERROR);
- if (I_IGNBRK(state->port.tty)) {
+ if (iflag & IGNBRK) {
the_port->ignore_status_mask &= ~N_BREAK;
- if (I_IGNPAR(state->port.tty))
+ if (iflag & IGNPAR)
the_port->ignore_status_mask &= ~N_OVERRUN_ERROR;
}
if (!(cflag & CREAD)) {
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c
index 8ad28fc6492..fcfe82653ac 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/serial/ioc4_serial.c
@@ -1685,11 +1685,12 @@ ioc4_change_speed(struct uart_port *the_port,
{
struct ioc4_port *port = get_ioc4_port(the_port, 0);
int baud, bits;
- unsigned cflag;
+ unsigned cflag, iflag;
int new_parity = 0, new_parity_enable = 0, new_stop = 0, new_data = 8;
struct uart_state *state = the_port->state;
cflag = new_termios->c_cflag;
+ iflag = new_termios->c_iflag;
switch (cflag & CSIZE) {
case CS5:
@@ -1741,12 +1742,12 @@ ioc4_change_speed(struct uart_port *the_port,
state->port.tty->low_latency = 1;
- if (I_IGNPAR(state->port.tty))
+ if (iflag & IGNPAR)
the_port->ignore_status_mask &= ~(N_PARITY_ERROR
| N_FRAMING_ERROR);
- if (I_IGNBRK(state->port.tty)) {
+ if (iflag & IGNBRK) {
the_port->ignore_status_mask &= ~N_BREAK;
- if (I_IGNPAR(state->port.tty))
+ if (iflag & IGNPAR)
the_port->ignore_status_mask &= ~N_OVERRUN_ERROR;
}
if (!(cflag & CREAD)) {
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
index a9a94ae7234..39f9a1adaa7 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/serial/kgdboc.c
@@ -17,6 +17,7 @@
#include <linux/kdb.h>
#include <linux/tty.h>
#include <linux/console.h>
+#include <linux/vt_kern.h>
#define MAX_CONFIG_LEN 40
@@ -31,6 +32,7 @@ static struct kparam_string kps = {
.maxlen = MAX_CONFIG_LEN,
};
+static int kgdboc_use_kms; /* 1 if we use kernel mode switching */
static struct tty_driver *kgdb_tty_driver;
static int kgdb_tty_line;
@@ -104,6 +106,12 @@ static int configure_kgdboc(void)
kgdboc_io_ops.is_console = 0;
kgdb_tty_driver = NULL;
+ kgdboc_use_kms = 0;
+ if (strncmp(cptr, "kms,", 4) == 0) {
+ cptr += 4;
+ kgdboc_use_kms = 1;
+ }
+
if (kgdboc_register_kbd(&cptr))
goto do_register;
@@ -201,8 +209,14 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
return configure_kgdboc();
}
+static int dbg_restore_graphics;
+
static void kgdboc_pre_exp_handler(void)
{
+ if (!dbg_restore_graphics && kgdboc_use_kms) {
+ dbg_restore_graphics = 1;
+ con_debug_enter(vc_cons[fg_console].d);
+ }
/* Increment the module count when the debugger is active */
if (!kgdb_connected)
try_module_get(THIS_MODULE);
@@ -213,6 +227,10 @@ static void kgdboc_post_exp_handler(void)
/* decrement the module count when the debugger detaches */
if (!kgdb_connected)
module_put(THIS_MODULE);
+ if (kgdboc_use_kms && dbg_restore_graphics) {
+ dbg_restore_graphics = 0;
+ con_debug_leave();
+ }
}
static struct kgdb_io kgdboc_io_ops = {
diff --git a/drivers/serial/max3100.c b/drivers/serial/max3100.c
index 3351c3bd59e..beb1afa27d8 100644
--- a/drivers/serial/max3100.c
+++ b/drivers/serial/max3100.c
@@ -430,17 +430,14 @@ max3100_set_termios(struct uart_port *port, struct ktermios *termios,
int baud = 0;
unsigned cflag;
u32 param_new, param_mask, parity = 0;
- struct tty_struct *tty = s->port.state->port.tty;
dev_dbg(&s->spi->dev, "%s\n", __func__);
- if (!tty)
- return;
cflag = termios->c_cflag;
param_new = 0;
param_mask = 0;
- baud = tty_get_baud_rate(tty);
+ baud = tty_termios_baud_rate(termios);
param_new = s->conf & MAX3100_BAUD;
switch (baud) {
case 300:
@@ -485,7 +482,7 @@ max3100_set_termios(struct uart_port *port, struct ktermios *termios,
default:
baud = s->baud;
}
- tty_encode_baud_rate(tty, baud, baud);
+ tty_termios_encode_baud_rate(termios, baud, baud);
s->baud = baud;
param_mask |= MAX3100_BAUD;
diff --git a/drivers/serial/max3107-aava.c b/drivers/serial/max3107-aava.c
new file mode 100644
index 00000000000..a1fe304f2f5
--- /dev/null
+++ b/drivers/serial/max3107-aava.c
@@ -0,0 +1,344 @@
+/*
+ * max3107.c - spi uart protocol driver for Maxim 3107
+ * Based on max3100.c
+ * by Christian Pellegrin <chripell@evolware.org>
+ * and max3110.c
+ * by Feng Tang <feng.tang@intel.com>
+ *
+ * Copyright (C) Aavamobile 2009
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/spi/spi.h>
+#include <linux/freezer.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/sfi.h>
+#include <asm/mrst.h>
+#include "max3107.h"
+
+/* GPIO direction to input function */
+static int max3107_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
+{
+ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
+ u16 buf[1]; /* Buffer for SPI transfer */
+
+ if (offset >= MAX3107_GPIO_COUNT) {
+ dev_err(&s->spi->dev, "Invalid GPIO\n");
+ return -EINVAL;
+ }
+
+ /* Read current GPIO configuration register */
+ buf[0] = MAX3107_GPIOCFG_REG;
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
+ dev_err(&s->spi->dev, "SPI transfer GPIO read failed\n");
+ return -EIO;
+ }
+ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
+
+ /* Set GPIO to input */
+ buf[0] &= ~(0x0001 << offset);
+
+ /* Write new GPIO configuration register value */
+ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG);
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, NULL, 2)) {
+ dev_err(&s->spi->dev, "SPI transfer GPIO write failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/* GPIO direction to output function */
+static int max3107_gpio_direction_out(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
+ u16 buf[2]; /* Buffer for SPI transfers */
+
+ if (offset >= MAX3107_GPIO_COUNT) {
+ dev_err(&s->spi->dev, "Invalid GPIO\n");
+ return -EINVAL;
+ }
+
+ /* Read current GPIO configuration and data registers */
+ buf[0] = MAX3107_GPIOCFG_REG;
+ buf[1] = MAX3107_GPIODATA_REG;
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) {
+ dev_err(&s->spi->dev, "SPI transfer gpio failed\n");
+ return -EIO;
+ }
+ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
+ buf[1] &= MAX3107_SPI_RX_DATA_MASK;
+
+ /* Set GPIO to output */
+ buf[0] |= (0x0001 << offset);
+ /* Set value */
+ if (value)
+ buf[1] |= (0x0001 << offset);
+ else
+ buf[1] &= ~(0x0001 << offset);
+
+ /* Write new GPIO configuration and data register values */
+ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG);
+ buf[1] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG);
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, NULL, 4)) {
+ dev_err(&s->spi->dev,
+ "SPI transfer for GPIO conf data w failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/* GPIO value query function */
+static int max3107_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
+ u16 buf[1]; /* Buffer for SPI transfer */
+
+ if (offset >= MAX3107_GPIO_COUNT) {
+ dev_err(&s->spi->dev, "Invalid GPIO\n");
+ return -EINVAL;
+ }
+
+ /* Read current GPIO data register */
+ buf[0] = MAX3107_GPIODATA_REG;
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
+ dev_err(&s->spi->dev, "SPI transfer GPIO data r failed\n");
+ return -EIO;
+ }
+ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
+
+ /* Return value */
+ return buf[0] & (0x0001 << offset);
+}
+
+/* GPIO value set function */
+static void max3107_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
+ u16 buf[2]; /* Buffer for SPI transfers */
+
+ if (offset >= MAX3107_GPIO_COUNT) {
+ dev_err(&s->spi->dev, "Invalid GPIO\n");
+ return;
+ }
+
+ /* Read current GPIO configuration registers*/
+ buf[0] = MAX3107_GPIODATA_REG;
+ buf[1] = MAX3107_GPIOCFG_REG;
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) {
+ dev_err(&s->spi->dev,
+ "SPI transfer for GPIO data and config read failed\n");
+ return;
+ }
+ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
+ buf[1] &= MAX3107_SPI_RX_DATA_MASK;
+
+ if (!(buf[1] & (0x0001 << offset))) {
+ /* Configured as input, can't set value */
+ dev_warn(&s->spi->dev,
+ "Trying to set value for input GPIO\n");
+ return;
+ }
+
+ /* Set value */
+ if (value)
+ buf[0] |= (0x0001 << offset);
+ else
+ buf[0] &= ~(0x0001 << offset);
+
+ /* Write new GPIO data register value */
+ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG);
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, NULL, 2))
+ dev_err(&s->spi->dev, "SPI transfer GPIO data w failed\n");
+}
+
+/* GPIO chip data */
+static struct gpio_chip max3107_gpio_chip = {
+ .owner = THIS_MODULE,
+ .direction_input = max3107_gpio_direction_in,
+ .direction_output = max3107_gpio_direction_out,
+ .get = max3107_gpio_get,
+ .set = max3107_gpio_set,
+ .can_sleep = 1,
+ .base = MAX3107_GPIO_BASE,
+ .ngpio = MAX3107_GPIO_COUNT,
+};
+
+/**
+ * max3107_aava_reset - reset on AAVA systems
+ * @spi: The SPI device we are probing
+ *
+ * Reset the device ready for probing.
+ */
+
+static int max3107_aava_reset(struct spi_device *spi)
+{
+ /* Reset the chip */
+ if (gpio_request(MAX3107_RESET_GPIO, "max3107")) {
+ pr_err("Requesting RESET GPIO failed\n");
+ return -EIO;
+ }
+ if (gpio_direction_output(MAX3107_RESET_GPIO, 0)) {
+ pr_err("Setting RESET GPIO to 0 failed\n");
+ gpio_free(MAX3107_RESET_GPIO);
+ return -EIO;
+ }
+ msleep(MAX3107_RESET_DELAY);
+ if (gpio_direction_output(MAX3107_RESET_GPIO, 1)) {
+ pr_err("Setting RESET GPIO to 1 failed\n");
+ gpio_free(MAX3107_RESET_GPIO);
+ return -EIO;
+ }
+ gpio_free(MAX3107_RESET_GPIO);
+ msleep(MAX3107_WAKEUP_DELAY);
+ return 0;
+}
+
+static int max3107_aava_configure(struct max3107_port *s)
+{
+ int retval;
+
+ /* Initialize GPIO chip data */
+ s->chip = max3107_gpio_chip;
+ s->chip.label = s->spi->modalias;
+ s->chip.dev = &s->spi->dev;
+
+ /* Add GPIO chip */
+ retval = gpiochip_add(&s->chip);
+ if (retval) {
+ dev_err(&s->spi->dev, "Adding GPIO chip failed\n");
+ return retval;
+ }
+
+ /* Temporary fix for EV2 boot problems, set modem reset to 0 */
+ max3107_gpio_direction_out(&s->chip, 3, 0);
+ return 0;
+}
+
+#if 0
+/* This will get enabled once we have the board stuff merged for this
+ specific case */
+
+static const struct baud_table brg13_ext[] = {
+ { 300, MAX3107_BRG13_B300 },
+ { 600, MAX3107_BRG13_B600 },
+ { 1200, MAX3107_BRG13_B1200 },
+ { 2400, MAX3107_BRG13_B2400 },
+ { 4800, MAX3107_BRG13_B4800 },
+ { 9600, MAX3107_BRG13_B9600 },
+ { 19200, MAX3107_BRG13_B19200 },
+ { 57600, MAX3107_BRG13_B57600 },
+ { 115200, MAX3107_BRG13_B115200 },
+ { 230400, MAX3107_BRG13_B230400 },
+ { 460800, MAX3107_BRG13_B460800 },
+ { 921600, MAX3107_BRG13_B921600 },
+ { 0, 0 }
+};
+
+static void max3107_aava_init(struct max3107_port *s)
+{
+ /*override for AAVA SC specific*/
+ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
+ if (get_koski_build_id() <= KOSKI_EV2)
+ if (s->ext_clk) {
+ s->brg_cfg = MAX3107_BRG13_B9600;
+ s->baud_tbl = (struct baud_table *)brg13_ext;
+ }
+ }
+}
+#endif
+
+static int __devexit max3107_aava_remove(struct spi_device *spi)
+{
+ struct max3107_port *s = dev_get_drvdata(&spi->dev);
+
+ /* Remove GPIO chip */
+ if (gpiochip_remove(&s->chip))
+ dev_warn(&spi->dev, "Removing GPIO chip failed\n");
+
+ /* Then do the default remove */
+ return max3107_remove(spi);
+}
+
+/* Platform data */
+static struct max3107_plat aava_plat_data = {
+ .loopback = 0,
+ .ext_clk = 1,
+/* .init = max3107_aava_init, */
+ .configure = max3107_aava_configure,
+ .hw_suspend = max3107_hw_susp,
+ .polled_mode = 0,
+ .poll_time = 0,
+};
+
+
+static int __devinit max3107_probe_aava(struct spi_device *spi)
+{
+ int err = max3107_aava_reset(spi);
+ if (err < 0)
+ return err;
+ return max3107_probe(spi, &aava_plat_data);
+}
+
+/* Spi driver data */
+static struct spi_driver max3107_driver = {
+ .driver = {
+ .name = "aava-max3107",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = max3107_probe_aava,
+ .remove = __devexit_p(max3107_aava_remove),
+ .suspend = max3107_suspend,
+ .resume = max3107_resume,
+};
+
+/* Driver init function */
+static int __init max3107_init(void)
+{
+ return spi_register_driver(&max3107_driver);
+}
+
+/* Driver exit function */
+static void __exit max3107_exit(void)
+{
+ spi_unregister_driver(&max3107_driver);
+}
+
+module_init(max3107_init);
+module_exit(max3107_exit);
+
+MODULE_DESCRIPTION("MAX3107 driver");
+MODULE_AUTHOR("Aavamobile");
+MODULE_ALIAS("aava-max3107-spi");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/serial/max3107.c b/drivers/serial/max3107.c
new file mode 100644
index 00000000000..67283c1a57f
--- /dev/null
+++ b/drivers/serial/max3107.c
@@ -0,0 +1,1197 @@
+/*
+ * max3107.c - spi uart protocol driver for Maxim 3107
+ * Based on max3100.c
+ * by Christian Pellegrin <chripell@evolware.org>
+ * and max3110.c
+ * by Feng Tang <feng.tang@intel.com>
+ *
+ * Copyright (C) Aavamobile 2009
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/freezer.h>
+#include "max3107.h"
+
+static const struct baud_table brg26_ext[] = {
+ { 300, MAX3107_BRG26_B300 },
+ { 600, MAX3107_BRG26_B600 },
+ { 1200, MAX3107_BRG26_B1200 },
+ { 2400, MAX3107_BRG26_B2400 },
+ { 4800, MAX3107_BRG26_B4800 },
+ { 9600, MAX3107_BRG26_B9600 },
+ { 19200, MAX3107_BRG26_B19200 },
+ { 57600, MAX3107_BRG26_B57600 },
+ { 115200, MAX3107_BRG26_B115200 },
+ { 230400, MAX3107_BRG26_B230400 },
+ { 460800, MAX3107_BRG26_B460800 },
+ { 921600, MAX3107_BRG26_B921600 },
+ { 0, 0 }
+};
+
+static const struct baud_table brg13_int[] = {
+ { 300, MAX3107_BRG13_IB300 },
+ { 600, MAX3107_BRG13_IB600 },
+ { 1200, MAX3107_BRG13_IB1200 },
+ { 2400, MAX3107_BRG13_IB2400 },
+ { 4800, MAX3107_BRG13_IB4800 },
+ { 9600, MAX3107_BRG13_IB9600 },
+ { 19200, MAX3107_BRG13_IB19200 },
+ { 57600, MAX3107_BRG13_IB57600 },
+ { 115200, MAX3107_BRG13_IB115200 },
+ { 230400, MAX3107_BRG13_IB230400 },
+ { 460800, MAX3107_BRG13_IB460800 },
+ { 921600, MAX3107_BRG13_IB921600 },
+ { 0, 0 }
+};
+
+static u32 get_new_brg(int baud, struct max3107_port *s)
+{
+ int i;
+ const struct baud_table *baud_tbl = s->baud_tbl;
+
+ for (i = 0; i < 13; i++) {
+ if (baud == baud_tbl[i].baud)
+ return baud_tbl[i].new_brg;
+ }
+
+ return 0;
+}
+
+/* Perform SPI transfer for write/read of device register(s) */
+int max3107_rw(struct max3107_port *s, u8 *tx, u8 *rx, int len)
+{
+ struct spi_message spi_msg;
+ struct spi_transfer spi_xfer;
+
+ /* Initialize SPI ,message */
+ spi_message_init(&spi_msg);
+
+ /* Initialize SPI transfer */
+ memset(&spi_xfer, 0, sizeof spi_xfer);
+ spi_xfer.len = len;
+ spi_xfer.tx_buf = tx;
+ spi_xfer.rx_buf = rx;
+ spi_xfer.speed_hz = MAX3107_SPI_SPEED;
+
+ /* Add SPI transfer to SPI message */
+ spi_message_add_tail(&spi_xfer, &spi_msg);
+
+#ifdef DBG_TRACE_SPI_DATA
+ {
+ int i;
+ pr_info("tx len %d:\n", spi_xfer.len);
+ for (i = 0 ; i < spi_xfer.len && i < 32 ; i++)
+ pr_info(" %x", ((u8 *)spi_xfer.tx_buf)[i]);
+ pr_info("\n");
+ }
+#endif
+
+ /* Perform synchronous SPI transfer */
+ if (spi_sync(s->spi, &spi_msg)) {
+ dev_err(&s->spi->dev, "spi_sync failure\n");
+ return -EIO;
+ }
+
+#ifdef DBG_TRACE_SPI_DATA
+ if (spi_xfer.rx_buf) {
+ int i;
+ pr_info("rx len %d:\n", spi_xfer.len);
+ for (i = 0 ; i < spi_xfer.len && i < 32 ; i++)
+ pr_info(" %x", ((u8 *)spi_xfer.rx_buf)[i]);
+ pr_info("\n");
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max3107_rw);
+
+/* Puts received data to circular buffer */
+static void put_data_to_circ_buf(struct max3107_port *s, unsigned char *data,
+ int len)
+{
+ struct uart_port *port = &s->port;
+ struct tty_struct *tty;
+
+ if (!port->state)
+ return;
+
+ tty = port->state->port.tty;
+ if (!tty)
+ return;
+
+ /* Insert received data */
+ tty_insert_flip_string(tty, data, len);
+ /* Update RX counter */
+ port->icount.rx += len;
+}
+
+/* Handle data receiving */
+static void max3107_handlerx(struct max3107_port *s, u16 rxlvl)
+{
+ int i;
+ int j;
+ int len; /* SPI transfer buffer length */
+ u16 *buf;
+ u8 *valid_str;
+
+ if (!s->rx_enabled)
+ /* RX is disabled */
+ return;
+
+ if (rxlvl == 0) {
+ /* RX fifo is empty */
+ return;
+ } else if (rxlvl >= MAX3107_RX_FIFO_SIZE) {
+ dev_warn(&s->spi->dev, "Possible RX FIFO overrun %d\n", rxlvl);
+ /* Ensure sanity of RX level */
+ rxlvl = MAX3107_RX_FIFO_SIZE;
+ }
+ if ((s->rxbuf == 0) || (s->rxstr == 0)) {
+ dev_warn(&s->spi->dev, "Rx buffer/str isn't ready\n");
+ return;
+ }
+ buf = s->rxbuf;
+ valid_str = s->rxstr;
+ while (rxlvl) {
+ pr_debug("rxlvl %d\n", rxlvl);
+ /* Clear buffer */
+ memset(buf, 0, sizeof(u16) * (MAX3107_RX_FIFO_SIZE + 2));
+ len = 0;
+ if (s->irqen_reg & MAX3107_IRQ_RXFIFO_BIT) {
+ /* First disable RX FIFO interrupt */
+ pr_debug("Disabling RX INT\n");
+ buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
+ s->irqen_reg &= ~MAX3107_IRQ_RXFIFO_BIT;
+ buf[0] |= s->irqen_reg;
+ len++;
+ }
+ /* Just increase the length by amount of words in FIFO since
+ * buffer was zeroed and SPI transfer of 0x0000 means reading
+ * from RX FIFO
+ */
+ len += rxlvl;
+ /* Append RX level query */
+ buf[len] = MAX3107_RXFIFOLVL_REG;
+ len++;
+
+ /* Perform the SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, len * 2)) {
+ dev_err(&s->spi->dev, "SPI transfer for RX h failed\n");
+ return;
+ }
+
+ /* Skip RX FIFO interrupt disabling word if it was added */
+ j = ((len - 1) - rxlvl);
+ /* Read received words */
+ for (i = 0; i < rxlvl; i++, j++)
+ valid_str[i] = (u8)buf[j];
+ put_data_to_circ_buf(s, valid_str, rxlvl);
+ /* Get new RX level */
+ rxlvl = (buf[len - 1] & MAX3107_SPI_RX_DATA_MASK);
+ }
+
+ if (s->rx_enabled) {
+ /* RX still enabled, re-enable RX FIFO interrupt */
+ pr_debug("Enabling RX INT\n");
+ buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
+ s->irqen_reg |= MAX3107_IRQ_RXFIFO_BIT;
+ buf[0] |= s->irqen_reg;
+ if (max3107_rw(s, (u8 *)buf, NULL, 2))
+ dev_err(&s->spi->dev, "RX FIFO INT enabling failed\n");
+ }
+
+ /* Push the received data to receivers */
+ if (s->port.state->port.tty)
+ tty_flip_buffer_push(s->port.state->port.tty);
+}
+
+
+/* Handle data sending */
+static void max3107_handletx(struct max3107_port *s)
+{
+ struct circ_buf *xmit = &s->port.state->xmit;
+ int i;
+ unsigned long flags;
+ int len; /* SPI transfer buffer length */
+ u16 *buf;
+
+ if (!s->tx_fifo_empty)
+ /* Don't send more data before previous data is sent */
+ return;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&s->port))
+ /* No data to send or TX is stopped */
+ return;
+
+ if (!s->txbuf) {
+ dev_warn(&s->spi->dev, "Txbuf isn't ready\n");
+ return;
+ }
+ buf = s->txbuf;
+ /* Get length of data pending in circular buffer */
+ len = uart_circ_chars_pending(xmit);
+ if (len) {
+ /* Limit to size of TX FIFO */
+ if (len > MAX3107_TX_FIFO_SIZE)
+ len = MAX3107_TX_FIFO_SIZE;
+
+ pr_debug("txlen %d\n", len);
+
+ /* Update TX counter */
+ s->port.icount.tx += len;
+
+ /* TX FIFO will no longer be empty */
+ s->tx_fifo_empty = 0;
+
+ i = 0;
+ if (s->irqen_reg & MAX3107_IRQ_TXEMPTY_BIT) {
+ /* First disable TX empty interrupt */
+ pr_debug("Disabling TE INT\n");
+ buf[i] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
+ s->irqen_reg &= ~MAX3107_IRQ_TXEMPTY_BIT;
+ buf[i] |= s->irqen_reg;
+ i++;
+ len++;
+ }
+ /* Add data to send */
+ spin_lock_irqsave(&s->port.lock, flags);
+ for ( ; i < len ; i++) {
+ buf[i] = (MAX3107_WRITE_BIT | MAX3107_THR_REG);
+ buf[i] |= ((u16)xmit->buf[xmit->tail] &
+ MAX3107_SPI_TX_DATA_MASK);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ }
+ spin_unlock_irqrestore(&s->port.lock, flags);
+ if (!(s->irqen_reg & MAX3107_IRQ_TXEMPTY_BIT)) {
+ /* Enable TX empty interrupt */
+ pr_debug("Enabling TE INT\n");
+ buf[i] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
+ s->irqen_reg |= MAX3107_IRQ_TXEMPTY_BIT;
+ buf[i] |= s->irqen_reg;
+ i++;
+ len++;
+ }
+ if (!s->tx_enabled) {
+ /* Enable TX */
+ pr_debug("Enable TX\n");
+ buf[i] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
+ spin_lock_irqsave(&s->data_lock, flags);
+ s->mode1_reg &= ~MAX3107_MODE1_TXDIS_BIT;
+ buf[i] |= s->mode1_reg;
+ spin_unlock_irqrestore(&s->data_lock, flags);
+ s->tx_enabled = 1;
+ i++;
+ len++;
+ }
+
+ /* Perform the SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, NULL, len*2)) {
+ dev_err(&s->spi->dev,
+ "SPI transfer TX handling failed\n");
+ return;
+ }
+ }
+
+ /* Indicate wake up if circular buffer is getting low on data */
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&s->port);
+
+}
+
+/* Handle interrupts
+ * Also reads and returns current RX FIFO level
+ */
+static u16 handle_interrupt(struct max3107_port *s)
+{
+ u16 buf[4]; /* Buffer for SPI transfers */
+ u8 irq_status;
+ u16 rx_level;
+ unsigned long flags;
+
+ /* Read IRQ status register */
+ buf[0] = MAX3107_IRQSTS_REG;
+ /* Read status IRQ status register */
+ buf[1] = MAX3107_STS_IRQSTS_REG;
+ /* Read LSR IRQ status register */
+ buf[2] = MAX3107_LSR_IRQSTS_REG;
+ /* Query RX level */
+ buf[3] = MAX3107_RXFIFOLVL_REG;
+
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 8)) {
+ dev_err(&s->spi->dev,
+ "SPI transfer for INTR handling failed\n");
+ return 0;
+ }
+
+ irq_status = (u8)buf[0];
+ pr_debug("IRQSTS %x\n", irq_status);
+ rx_level = (buf[3] & MAX3107_SPI_RX_DATA_MASK);
+
+ if (irq_status & MAX3107_IRQ_LSR_BIT) {
+ /* LSR interrupt */
+ if (buf[2] & MAX3107_LSR_RXTO_BIT)
+ /* RX timeout interrupt,
+ * handled by normal RX handling
+ */
+ pr_debug("RX TO INT\n");
+ }
+
+ if (irq_status & MAX3107_IRQ_TXEMPTY_BIT) {
+ /* Tx empty interrupt,
+ * disable TX and set tx_fifo_empty flag
+ */
+ pr_debug("TE INT, disabling TX\n");
+ buf[0] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
+ spin_lock_irqsave(&s->data_lock, flags);
+ s->mode1_reg |= MAX3107_MODE1_TXDIS_BIT;
+ buf[0] |= s->mode1_reg;
+ spin_unlock_irqrestore(&s->data_lock, flags);
+ if (max3107_rw(s, (u8 *)buf, NULL, 2))
+ dev_err(&s->spi->dev, "SPI transfer TX dis failed\n");
+ s->tx_enabled = 0;
+ s->tx_fifo_empty = 1;
+ }
+
+ if (irq_status & MAX3107_IRQ_RXFIFO_BIT)
+ /* RX FIFO interrupt,
+ * handled by normal RX handling
+ */
+ pr_debug("RFIFO INT\n");
+
+ /* Return RX level */
+ return rx_level;
+}
+
+/* Trigger work thread*/
+static void max3107_dowork(struct max3107_port *s)
+{
+ if (!work_pending(&s->work) && !freezing(current) && !s->suspended)
+ queue_work(s->workqueue, &s->work);
+ else
+ dev_warn(&s->spi->dev, "interrup isn't serviced normally!\n");
+}
+
+/* Work thread */
+static void max3107_work(struct work_struct *w)
+{
+ struct max3107_port *s = container_of(w, struct max3107_port, work);
+ u16 rxlvl = 0;
+ int len; /* SPI transfer buffer length */
+ u16 buf[5]; /* Buffer for SPI transfers */
+ unsigned long flags;
+
+ /* Start by reading current RX FIFO level */
+ buf[0] = MAX3107_RXFIFOLVL_REG;
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
+ dev_err(&s->spi->dev, "SPI transfer RX lev failed\n");
+ rxlvl = 0;
+ } else {
+ rxlvl = (buf[0] & MAX3107_SPI_RX_DATA_MASK);
+ }
+
+ do {
+ pr_debug("rxlvl %d\n", rxlvl);
+
+ /* Handle RX */
+ max3107_handlerx(s, rxlvl);
+ rxlvl = 0;
+
+ if (s->handle_irq) {
+ /* Handle pending interrupts
+ * We also get new RX FIFO level since new data may
+ * have been received while pushing received data to
+ * receivers
+ */
+ s->handle_irq = 0;
+ rxlvl = handle_interrupt(s);
+ }
+
+ /* Handle TX */
+ max3107_handletx(s);
+
+ /* Handle configuration changes */
+ len = 0;
+ spin_lock_irqsave(&s->data_lock, flags);
+ if (s->mode1_commit) {
+ pr_debug("mode1_commit\n");
+ buf[len] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
+ buf[len++] |= s->mode1_reg;
+ s->mode1_commit = 0;
+ }
+ if (s->lcr_commit) {
+ pr_debug("lcr_commit\n");
+ buf[len] = (MAX3107_WRITE_BIT | MAX3107_LCR_REG);
+ buf[len++] |= s->lcr_reg;
+ s->lcr_commit = 0;
+ }
+ if (s->brg_commit) {
+ pr_debug("brg_commit\n");
+ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVMSB_REG);
+ buf[len++] |= ((s->brg_cfg >> 16) &
+ MAX3107_SPI_TX_DATA_MASK);
+ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVLSB_REG);
+ buf[len++] |= ((s->brg_cfg >> 8) &
+ MAX3107_SPI_TX_DATA_MASK);
+ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGCFG_REG);
+ buf[len++] |= ((s->brg_cfg) & 0xff);
+ s->brg_commit = 0;
+ }
+ spin_unlock_irqrestore(&s->data_lock, flags);
+
+ if (len > 0) {
+ if (max3107_rw(s, (u8 *)buf, NULL, len * 2))
+ dev_err(&s->spi->dev,
+ "SPI transfer config failed\n");
+ }
+
+ /* Reloop if interrupt handling indicated data in RX FIFO */
+ } while (rxlvl);
+
+}
+
+/* Set sleep mode */
+static void max3107_set_sleep(struct max3107_port *s, int mode)
+{
+ u16 buf[1]; /* Buffer for SPI transfer */
+ unsigned long flags;
+ pr_debug("enter, mode %d\n", mode);
+
+ buf[0] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
+ spin_lock_irqsave(&s->data_lock, flags);
+ switch (mode) {
+ case MAX3107_DISABLE_FORCED_SLEEP:
+ s->mode1_reg &= ~MAX3107_MODE1_FORCESLEEP_BIT;
+ break;
+ case MAX3107_ENABLE_FORCED_SLEEP:
+ s->mode1_reg |= MAX3107_MODE1_FORCESLEEP_BIT;
+ break;
+ case MAX3107_DISABLE_AUTOSLEEP:
+ s->mode1_reg &= ~MAX3107_MODE1_AUTOSLEEP_BIT;
+ break;
+ case MAX3107_ENABLE_AUTOSLEEP:
+ s->mode1_reg |= MAX3107_MODE1_AUTOSLEEP_BIT;
+ break;
+ default:
+ spin_unlock_irqrestore(&s->data_lock, flags);
+ dev_warn(&s->spi->dev, "invalid sleep mode\n");
+ return;
+ }
+ buf[0] |= s->mode1_reg;
+ spin_unlock_irqrestore(&s->data_lock, flags);
+
+ if (max3107_rw(s, (u8 *)buf, NULL, 2))
+ dev_err(&s->spi->dev, "SPI transfer sleep mode failed\n");
+
+ if (mode == MAX3107_DISABLE_AUTOSLEEP ||
+ mode == MAX3107_DISABLE_FORCED_SLEEP)
+ msleep(MAX3107_WAKEUP_DELAY);
+}
+
+/* Perform full register initialization */
+static void max3107_register_init(struct max3107_port *s)
+{
+ u16 buf[11]; /* Buffer for SPI transfers */
+
+ /* 1. Configure baud rate, 9600 as default */
+ s->baud = 9600;
+ /* the below is default*/
+ if (s->ext_clk) {
+ s->brg_cfg = MAX3107_BRG26_B9600;
+ s->baud_tbl = (struct baud_table *)brg26_ext;
+ } else {
+ s->brg_cfg = MAX3107_BRG13_IB9600;
+ s->baud_tbl = (struct baud_table *)brg13_int;
+ }
+
+ if (s->pdata->init)
+ s->pdata->init(s);
+
+ buf[0] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVMSB_REG)
+ | ((s->brg_cfg >> 16) & MAX3107_SPI_TX_DATA_MASK);
+ buf[1] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVLSB_REG)
+ | ((s->brg_cfg >> 8) & MAX3107_SPI_TX_DATA_MASK);
+ buf[2] = (MAX3107_WRITE_BIT | MAX3107_BRGCFG_REG)
+ | ((s->brg_cfg) & 0xff);
+
+ /* 2. Configure LCR register, 8N1 mode by default */
+ s->lcr_reg = MAX3107_LCR_WORD_LEN_8;
+ buf[3] = (MAX3107_WRITE_BIT | MAX3107_LCR_REG)
+ | s->lcr_reg;
+
+ /* 3. Configure MODE 1 register */
+ s->mode1_reg = 0;
+ /* Enable IRQ pin */
+ s->mode1_reg |= MAX3107_MODE1_IRQSEL_BIT;
+ /* Disable TX */
+ s->mode1_reg |= MAX3107_MODE1_TXDIS_BIT;
+ s->tx_enabled = 0;
+ /* RX is enabled */
+ s->rx_enabled = 1;
+ buf[4] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG)
+ | s->mode1_reg;
+
+ /* 4. Configure MODE 2 register */
+ buf[5] = (MAX3107_WRITE_BIT | MAX3107_MODE2_REG);
+ if (s->loopback) {
+ /* Enable loopback */
+ buf[5] |= MAX3107_MODE2_LOOPBACK_BIT;
+ }
+ /* Reset FIFOs */
+ buf[5] |= MAX3107_MODE2_FIFORST_BIT;
+ s->tx_fifo_empty = 1;
+
+ /* 5. Configure FIFO trigger level register */
+ buf[6] = (MAX3107_WRITE_BIT | MAX3107_FIFOTRIGLVL_REG);
+ /* RX FIFO trigger for 16 words, TX FIFO trigger not used */
+ buf[6] |= (MAX3107_FIFOTRIGLVL_RX(16) | MAX3107_FIFOTRIGLVL_TX(0));
+
+ /* 6. Configure flow control levels */
+ buf[7] = (MAX3107_WRITE_BIT | MAX3107_FLOWLVL_REG);
+ /* Flow control halt level 96, resume level 48 */
+ buf[7] |= (MAX3107_FLOWLVL_RES(48) | MAX3107_FLOWLVL_HALT(96));
+
+ /* 7. Configure flow control */
+ buf[8] = (MAX3107_WRITE_BIT | MAX3107_FLOWCTRL_REG);
+ /* Enable auto CTS and auto RTS flow control */
+ buf[8] |= (MAX3107_FLOWCTRL_AUTOCTS_BIT | MAX3107_FLOWCTRL_AUTORTS_BIT);
+
+ /* 8. Configure RX timeout register */
+ buf[9] = (MAX3107_WRITE_BIT | MAX3107_RXTO_REG);
+ /* Timeout after 48 character intervals */
+ buf[9] |= 0x0030;
+
+ /* 9. Configure LSR interrupt enable register */
+ buf[10] = (MAX3107_WRITE_BIT | MAX3107_LSR_IRQEN_REG);
+ /* Enable RX timeout interrupt */
+ buf[10] |= MAX3107_LSR_RXTO_BIT;
+
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, NULL, 22))
+ dev_err(&s->spi->dev, "SPI transfer for init failed\n");
+
+ /* 10. Clear IRQ status register by reading it */
+ buf[0] = MAX3107_IRQSTS_REG;
+
+ /* 11. Configure interrupt enable register */
+ /* Enable LSR interrupt */
+ s->irqen_reg = MAX3107_IRQ_LSR_BIT;
+ /* Enable RX FIFO interrupt */
+ s->irqen_reg |= MAX3107_IRQ_RXFIFO_BIT;
+ buf[1] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG)
+ | s->irqen_reg;
+
+ /* 12. Clear FIFO reset that was set in step 6 */
+ buf[2] = (MAX3107_WRITE_BIT | MAX3107_MODE2_REG);
+ if (s->loopback) {
+ /* Keep loopback enabled */
+ buf[2] |= MAX3107_MODE2_LOOPBACK_BIT;
+ }
+
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 6))
+ dev_err(&s->spi->dev, "SPI transfer for init failed\n");
+
+}
+
+/* IRQ handler */
+static irqreturn_t max3107_irq(int irqno, void *dev_id)
+{
+ struct max3107_port *s = dev_id;
+
+ if (irqno != s->spi->irq) {
+ /* Unexpected IRQ */
+ return IRQ_NONE;
+ }
+
+ /* Indicate irq */
+ s->handle_irq = 1;
+
+ /* Trigger work thread */
+ max3107_dowork(s);
+
+ return IRQ_HANDLED;
+}
+
+/* HW suspension function
+ *
+ * Currently autosleep is used to decrease current consumption, alternative
+ * approach would be to set the chip to reset mode if UART is not being
+ * used but that would mess the GPIOs
+ *
+ */
+void max3107_hw_susp(struct max3107_port *s, int suspend)
+{
+ pr_debug("enter, suspend %d\n", suspend);
+
+ if (suspend) {
+ /* Suspend requested,
+ * enable autosleep to decrease current consumption
+ */
+ s->suspended = 1;
+ max3107_set_sleep(s, MAX3107_ENABLE_AUTOSLEEP);
+ } else {
+ /* Resume requested,
+ * disable autosleep
+ */
+ s->suspended = 0;
+ max3107_set_sleep(s, MAX3107_DISABLE_AUTOSLEEP);
+ }
+}
+EXPORT_SYMBOL_GPL(max3107_hw_susp);
+
+/* Modem status IRQ enabling */
+static void max3107_enable_ms(struct uart_port *port)
+{
+ /* Modem status not supported */
+}
+
+/* Data send function */
+static void max3107_start_tx(struct uart_port *port)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+
+ /* Trigger work thread for sending data */
+ max3107_dowork(s);
+}
+
+/* Function for checking that there is no pending transfers */
+static unsigned int max3107_tx_empty(struct uart_port *port)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+
+ pr_debug("returning %d\n",
+ (s->tx_fifo_empty && uart_circ_empty(&s->port.state->xmit)));
+ return s->tx_fifo_empty && uart_circ_empty(&s->port.state->xmit);
+}
+
+/* Function for stopping RX */
+static void max3107_stop_rx(struct uart_port *port)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+ unsigned long flags;
+
+ /* Set RX disabled in MODE 1 register */
+ spin_lock_irqsave(&s->data_lock, flags);
+ s->mode1_reg |= MAX3107_MODE1_RXDIS_BIT;
+ s->mode1_commit = 1;
+ spin_unlock_irqrestore(&s->data_lock, flags);
+ /* Set RX disabled */
+ s->rx_enabled = 0;
+ /* Trigger work thread for doing the actual configuration change */
+ max3107_dowork(s);
+}
+
+/* Function for returning control pin states */
+static unsigned int max3107_get_mctrl(struct uart_port *port)
+{
+ /* DCD and DSR are not wired and CTS/RTS is handled automatically
+ * so just indicate DSR and CAR asserted
+ */
+ return TIOCM_DSR | TIOCM_CAR;
+}
+
+/* Function for setting control pin states */
+static void max3107_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /* DCD and DSR are not wired and CTS/RTS is hadnled automatically
+ * so do nothing
+ */
+}
+
+/* Function for configuring UART parameters */
+static void max3107_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+ struct tty_struct *tty;
+ int baud;
+ u16 new_lcr = 0;
+ u32 new_brg = 0;
+ unsigned long flags;
+
+ if (!port->state)
+ return;
+
+ tty = port->state->port.tty;
+ if (!tty)
+ return;
+
+ /* Get new LCR register values */
+ /* Word size */
+ if ((termios->c_cflag & CSIZE) == CS7)
+ new_lcr |= MAX3107_LCR_WORD_LEN_7;
+ else
+ new_lcr |= MAX3107_LCR_WORD_LEN_8;
+
+ /* Parity */
+ if (termios->c_cflag & PARENB) {
+ new_lcr |= MAX3107_LCR_PARITY_BIT;
+ if (!(termios->c_cflag & PARODD))
+ new_lcr |= MAX3107_LCR_EVENPARITY_BIT;
+ }
+
+ /* Stop bits */
+ if (termios->c_cflag & CSTOPB) {
+ /* 2 stop bits */
+ new_lcr |= MAX3107_LCR_STOPLEN_BIT;
+ }
+
+ /* Mask termios capabilities we don't support */
+ termios->c_cflag &= ~CMSPAR;
+
+ /* Set status ignore mask */
+ s->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ s->port.ignore_status_mask |= MAX3107_ALL_ERRORS;
+
+ /* Set low latency to immediately handle pushed data */
+ s->port.state->port.tty->low_latency = 1;
+
+ /* Get new baud rate generator configuration */
+ baud = tty_get_baud_rate(tty);
+
+ spin_lock_irqsave(&s->data_lock, flags);
+ new_brg = get_new_brg(baud, s);
+ /* if can't find the corrent config, use previous */
+ if (!new_brg) {
+ baud = s->baud;
+ new_brg = s->brg_cfg;
+ }
+ spin_unlock_irqrestore(&s->data_lock, flags);
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ s->baud = baud;
+
+ /* Update timeout according to new baud rate */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_lock_irqsave(&s->data_lock, flags);
+ if (s->lcr_reg != new_lcr) {
+ s->lcr_reg = new_lcr;
+ s->lcr_commit = 1;
+ }
+ if (s->brg_cfg != new_brg) {
+ s->brg_cfg = new_brg;
+ s->brg_commit = 1;
+ }
+ spin_unlock_irqrestore(&s->data_lock, flags);
+
+ /* Trigger work thread for doing the actual configuration change */
+ max3107_dowork(s);
+}
+
+/* Port shutdown function */
+static void max3107_shutdown(struct uart_port *port)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+
+ if (s->suspended && s->pdata->hw_suspend)
+ s->pdata->hw_suspend(s, 0);
+
+ /* Free the interrupt */
+ free_irq(s->spi->irq, s);
+
+ if (s->workqueue) {
+ /* Flush and destroy work queue */
+ flush_workqueue(s->workqueue);
+ destroy_workqueue(s->workqueue);
+ s->workqueue = NULL;
+ }
+
+ /* Suspend HW */
+ if (s->pdata->hw_suspend)
+ s->pdata->hw_suspend(s, 1);
+}
+
+/* Port startup function */
+static int max3107_startup(struct uart_port *port)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+
+ /* Initialize work queue */
+ s->workqueue = create_freezeable_workqueue("max3107");
+ if (!s->workqueue) {
+ dev_err(&s->spi->dev, "Workqueue creation failed\n");
+ return -EBUSY;
+ }
+ INIT_WORK(&s->work, max3107_work);
+
+ /* Setup IRQ */
+ if (request_irq(s->spi->irq, max3107_irq, IRQF_TRIGGER_FALLING,
+ "max3107", s)) {
+ dev_err(&s->spi->dev, "IRQ reguest failed\n");
+ destroy_workqueue(s->workqueue);
+ s->workqueue = NULL;
+ return -EBUSY;
+ }
+
+ /* Resume HW */
+ if (s->pdata->hw_suspend)
+ s->pdata->hw_suspend(s, 0);
+
+ /* Init registers */
+ max3107_register_init(s);
+
+ return 0;
+}
+
+/* Port type function */
+static const char *max3107_type(struct uart_port *port)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+ return s->spi->modalias;
+}
+
+/* Port release function */
+static void max3107_release_port(struct uart_port *port)
+{
+ /* Do nothing */
+}
+
+/* Port request function */
+static int max3107_request_port(struct uart_port *port)
+{
+ /* Do nothing */
+ return 0;
+}
+
+/* Port config function */
+static void max3107_config_port(struct uart_port *port, int flags)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+ s->port.type = PORT_MAX3107;
+}
+
+/* Port verify function */
+static int max3107_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (ser->type == PORT_UNKNOWN || ser->type == PORT_MAX3107)
+ return 0;
+
+ return -EINVAL;
+}
+
+/* Port stop TX function */
+static void max3107_stop_tx(struct uart_port *port)
+{
+ /* Do nothing */
+}
+
+/* Port break control function */
+static void max3107_break_ctl(struct uart_port *port, int break_state)
+{
+ /* We don't support break control, do nothing */
+}
+
+
+/* Port functions */
+static struct uart_ops max3107_ops = {
+ .tx_empty = max3107_tx_empty,
+ .set_mctrl = max3107_set_mctrl,
+ .get_mctrl = max3107_get_mctrl,
+ .stop_tx = max3107_stop_tx,
+ .start_tx = max3107_start_tx,
+ .stop_rx = max3107_stop_rx,
+ .enable_ms = max3107_enable_ms,
+ .break_ctl = max3107_break_ctl,
+ .startup = max3107_startup,
+ .shutdown = max3107_shutdown,
+ .set_termios = max3107_set_termios,
+ .type = max3107_type,
+ .release_port = max3107_release_port,
+ .request_port = max3107_request_port,
+ .config_port = max3107_config_port,
+ .verify_port = max3107_verify_port,
+};
+
+/* UART driver data */
+static struct uart_driver max3107_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyMAX",
+ .dev_name = "ttyMAX",
+ .nr = 1,
+};
+
+static int driver_registered = 0;
+
+
+
+/* 'Generic' platform data */
+static struct max3107_plat generic_plat_data = {
+ .loopback = 0,
+ .ext_clk = 1,
+ .hw_suspend = max3107_hw_susp,
+ .polled_mode = 0,
+ .poll_time = 0,
+};
+
+
+/*******************************************************************/
+
+/**
+ * max3107_probe - SPI bus probe entry point
+ * @spi: the spi device
+ *
+ * SPI wants us to probe this device and if appropriate claim it.
+ * Perform any platform specific requirements and then initialise
+ * the device.
+ */
+
+int max3107_probe(struct spi_device *spi, struct max3107_plat *pdata)
+{
+ struct max3107_port *s;
+ u16 buf[2]; /* Buffer for SPI transfers */
+ int retval;
+
+ pr_info("enter max3107 probe\n");
+
+ /* Allocate port structure */
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ pr_err("Allocating port structure failed\n");
+ return -ENOMEM;
+ }
+
+ s->pdata = pdata;
+
+ /* SPI Rx buffer
+ * +2 for RX FIFO interrupt
+ * disabling and RX level query
+ */
+ s->rxbuf = kzalloc(sizeof(u16) * (MAX3107_RX_FIFO_SIZE+2), GFP_KERNEL);
+ if (!s->rxbuf) {
+ pr_err("Allocating RX buffer failed\n");
+ return -ENOMEM;
+ }
+ s->rxstr = kzalloc(sizeof(u8) * MAX3107_RX_FIFO_SIZE, GFP_KERNEL);
+ if (!s->rxstr) {
+ pr_err("Allocating RX buffer failed\n");
+ return -ENOMEM;
+ }
+ /* SPI Tx buffer
+ * SPI transfer buffer
+ * +3 for TX FIFO empty
+ * interrupt disabling and
+ * enabling and TX enabling
+ */
+ s->txbuf = kzalloc(sizeof(u16) * MAX3107_TX_FIFO_SIZE + 3, GFP_KERNEL);
+ if (!s->txbuf) {
+ pr_err("Allocating TX buffer failed\n");
+ return -ENOMEM;
+ }
+ /* Initialize shared data lock */
+ spin_lock_init(&s->data_lock);
+
+ /* SPI intializations */
+ dev_set_drvdata(&spi->dev, s);
+ spi->mode = SPI_MODE_0;
+ spi->dev.platform_data = pdata;
+ spi->bits_per_word = 16;
+ s->ext_clk = pdata->ext_clk;
+ s->loopback = pdata->loopback;
+ spi_setup(spi);
+ s->spi = spi;
+
+ /* Check REV ID to ensure we are talking to what we expect */
+ buf[0] = MAX3107_REVID_REG;
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
+ dev_err(&s->spi->dev, "SPI transfer for REVID read failed\n");
+ return -EIO;
+ }
+ if ((buf[0] & MAX3107_SPI_RX_DATA_MASK) != MAX3107_REVID1 &&
+ (buf[0] & MAX3107_SPI_RX_DATA_MASK) != MAX3107_REVID2) {
+ dev_err(&s->spi->dev, "REVID %x does not match\n",
+ (buf[0] & MAX3107_SPI_RX_DATA_MASK));
+ return -ENODEV;
+ }
+
+ /* Disable all interrupts */
+ buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG | 0x0000);
+ buf[0] |= 0x0000;
+
+ /* Configure clock source */
+ buf[1] = (MAX3107_WRITE_BIT | MAX3107_CLKSRC_REG);
+ if (s->ext_clk) {
+ /* External clock */
+ buf[1] |= MAX3107_CLKSRC_EXTCLK_BIT;
+ }
+
+ /* PLL bypass ON */
+ buf[1] |= MAX3107_CLKSRC_PLLBYP_BIT;
+
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, NULL, 4)) {
+ dev_err(&s->spi->dev, "SPI transfer for init failed\n");
+ return -EIO;
+ }
+
+ /* Register UART driver */
+ if (!driver_registered) {
+ retval = uart_register_driver(&max3107_uart_driver);
+ if (retval) {
+ dev_err(&s->spi->dev, "Registering UART driver failed\n");
+ return retval;
+ }
+ driver_registered = 1;
+ }
+
+ /* Initialize UART port data */
+ s->port.fifosize = 128;
+ s->port.ops = &max3107_ops;
+ s->port.line = 0;
+ s->port.dev = &spi->dev;
+ s->port.uartclk = 9600;
+ s->port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
+ s->port.irq = s->spi->irq;
+ s->port.type = PORT_MAX3107;
+
+ /* Add UART port */
+ retval = uart_add_one_port(&max3107_uart_driver, &s->port);
+ if (retval < 0) {
+ dev_err(&s->spi->dev, "Adding UART port failed\n");
+ return retval;
+ }
+
+ if (pdata->configure) {
+ retval = pdata->configure(s);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Go to suspend mode */
+ if (pdata->hw_suspend)
+ pdata->hw_suspend(s, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max3107_probe);
+
+/* Driver remove function */
+int max3107_remove(struct spi_device *spi)
+{
+ struct max3107_port *s = dev_get_drvdata(&spi->dev);
+
+ pr_info("enter max3107 remove\n");
+
+ /* Remove port */
+ if (uart_remove_one_port(&max3107_uart_driver, &s->port))
+ dev_warn(&s->spi->dev, "Removing UART port failed\n");
+
+
+ /* Free TxRx buffer */
+ kfree(s->rxbuf);
+ kfree(s->rxstr);
+ kfree(s->txbuf);
+
+ /* Free port structure */
+ kfree(s);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max3107_remove);
+
+/* Driver suspend function */
+int max3107_suspend(struct spi_device *spi, pm_message_t state)
+{
+#ifdef CONFIG_PM
+ struct max3107_port *s = dev_get_drvdata(&spi->dev);
+
+ pr_debug("enter suspend\n");
+
+ /* Suspend UART port */
+ uart_suspend_port(&max3107_uart_driver, &s->port);
+
+ /* Go to suspend mode */
+ if (s->pdata->hw_suspend)
+ s->pdata->hw_suspend(s, 1);
+#endif /* CONFIG_PM */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max3107_suspend);
+
+/* Driver resume function */
+int max3107_resume(struct spi_device *spi)
+{
+#ifdef CONFIG_PM
+ struct max3107_port *s = dev_get_drvdata(&spi->dev);
+
+ pr_debug("enter resume\n");
+
+ /* Resume from suspend */
+ if (s->pdata->hw_suspend)
+ s->pdata->hw_suspend(s, 0);
+
+ /* Resume UART port */
+ uart_resume_port(&max3107_uart_driver, &s->port);
+#endif /* CONFIG_PM */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max3107_resume);
+
+static int max3107_probe_generic(struct spi_device *spi)
+{
+ return max3107_probe(spi, &generic_plat_data);
+}
+
+/* Spi driver data */
+static struct spi_driver max3107_driver = {
+ .driver = {
+ .name = "max3107",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = max3107_probe_generic,
+ .remove = __devexit_p(max3107_remove),
+ .suspend = max3107_suspend,
+ .resume = max3107_resume,
+};
+
+/* Driver init function */
+static int __init max3107_init(void)
+{
+ pr_info("enter max3107 init\n");
+ return spi_register_driver(&max3107_driver);
+}
+
+/* Driver exit function */
+static void __exit max3107_exit(void)
+{
+ pr_info("enter max3107 exit\n");
+ /* Unregister UART driver */
+ if (driver_registered)
+ uart_unregister_driver(&max3107_uart_driver);
+ spi_unregister_driver(&max3107_driver);
+}
+
+module_init(max3107_init);
+module_exit(max3107_exit);
+
+MODULE_DESCRIPTION("MAX3107 driver");
+MODULE_AUTHOR("Aavamobile");
+MODULE_ALIAS("max3107-spi");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/serial/max3107.h b/drivers/serial/max3107.h
new file mode 100644
index 00000000000..7ab63239250
--- /dev/null
+++ b/drivers/serial/max3107.h
@@ -0,0 +1,441 @@
+/*
+ * max3107.h - spi uart protocol driver header for Maxim 3107
+ *
+ * Copyright (C) Aavamobile 2009
+ * Based on serial_max3100.h by Christian Pellegrin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MAX3107_H
+#define _MAX3107_H
+
+/* Serial error status definitions */
+#define MAX3107_PARITY_ERROR 1
+#define MAX3107_FRAME_ERROR 2
+#define MAX3107_OVERRUN_ERROR 4
+#define MAX3107_ALL_ERRORS (MAX3107_PARITY_ERROR | \
+ MAX3107_FRAME_ERROR | \
+ MAX3107_OVERRUN_ERROR)
+
+/* GPIO definitions */
+#define MAX3107_GPIO_BASE 88
+#define MAX3107_GPIO_COUNT 4
+
+
+/* GPIO connected to chip's reset pin */
+#define MAX3107_RESET_GPIO 87
+
+
+/* Chip reset delay */
+#define MAX3107_RESET_DELAY 10
+
+/* Chip wakeup delay */
+#define MAX3107_WAKEUP_DELAY 50
+
+
+/* Sleep mode definitions */
+#define MAX3107_DISABLE_FORCED_SLEEP 0
+#define MAX3107_ENABLE_FORCED_SLEEP 1
+#define MAX3107_DISABLE_AUTOSLEEP 2
+#define MAX3107_ENABLE_AUTOSLEEP 3
+
+
+/* Definitions for register access with SPI transfers
+ *
+ * SPI transfer format:
+ *
+ * Master to slave bits xzzzzzzzyyyyyyyy
+ * Slave to master bits aaaaaaaabbbbbbbb
+ *
+ * where:
+ * x = 0 for reads, 1 for writes
+ * z = register address
+ * y = new register value if write, 0 if read
+ * a = unspecified
+ * b = register value if read, unspecified if write
+ */
+
+/* SPI speed */
+#define MAX3107_SPI_SPEED (3125000 * 2)
+
+/* Write bit */
+#define MAX3107_WRITE_BIT (1 << 15)
+
+/* SPI TX data mask */
+#define MAX3107_SPI_RX_DATA_MASK (0x00ff)
+
+/* SPI RX data mask */
+#define MAX3107_SPI_TX_DATA_MASK (0x00ff)
+
+/* Register access masks */
+#define MAX3107_RHR_REG (0x0000) /* RX FIFO */
+#define MAX3107_THR_REG (0x0000) /* TX FIFO */
+#define MAX3107_IRQEN_REG (0x0100) /* IRQ enable */
+#define MAX3107_IRQSTS_REG (0x0200) /* IRQ status */
+#define MAX3107_LSR_IRQEN_REG (0x0300) /* LSR IRQ enable */
+#define MAX3107_LSR_IRQSTS_REG (0x0400) /* LSR IRQ status */
+#define MAX3107_SPCHR_IRQEN_REG (0x0500) /* Special char IRQ enable */
+#define MAX3107_SPCHR_IRQSTS_REG (0x0600) /* Special char IRQ status */
+#define MAX3107_STS_IRQEN_REG (0x0700) /* Status IRQ enable */
+#define MAX3107_STS_IRQSTS_REG (0x0800) /* Status IRQ status */
+#define MAX3107_MODE1_REG (0x0900) /* MODE1 */
+#define MAX3107_MODE2_REG (0x0a00) /* MODE2 */
+#define MAX3107_LCR_REG (0x0b00) /* LCR */
+#define MAX3107_RXTO_REG (0x0c00) /* RX timeout */
+#define MAX3107_HDPIXDELAY_REG (0x0d00) /* Auto transceiver delays */
+#define MAX3107_IRDA_REG (0x0e00) /* IRDA settings */
+#define MAX3107_FLOWLVL_REG (0x0f00) /* Flow control levels */
+#define MAX3107_FIFOTRIGLVL_REG (0x1000) /* FIFO IRQ trigger levels */
+#define MAX3107_TXFIFOLVL_REG (0x1100) /* TX FIFO level */
+#define MAX3107_RXFIFOLVL_REG (0x1200) /* RX FIFO level */
+#define MAX3107_FLOWCTRL_REG (0x1300) /* Flow control */
+#define MAX3107_XON1_REG (0x1400) /* XON1 character */
+#define MAX3107_XON2_REG (0x1500) /* XON2 character */
+#define MAX3107_XOFF1_REG (0x1600) /* XOFF1 character */
+#define MAX3107_XOFF2_REG (0x1700) /* XOFF2 character */
+#define MAX3107_GPIOCFG_REG (0x1800) /* GPIO config */
+#define MAX3107_GPIODATA_REG (0x1900) /* GPIO data */
+#define MAX3107_PLLCFG_REG (0x1a00) /* PLL config */
+#define MAX3107_BRGCFG_REG (0x1b00) /* Baud rate generator conf */
+#define MAX3107_BRGDIVLSB_REG (0x1c00) /* Baud rate divisor LSB */
+#define MAX3107_BRGDIVMSB_REG (0x1d00) /* Baud rate divisor MSB */
+#define MAX3107_CLKSRC_REG (0x1e00) /* Clock source */
+#define MAX3107_REVID_REG (0x1f00) /* Revision identification */
+
+/* IRQ register bits */
+#define MAX3107_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
+#define MAX3107_IRQ_SPCHR_BIT (1 << 1) /* Special char interrupt */
+#define MAX3107_IRQ_STS_BIT (1 << 2) /* Status interrupt */
+#define MAX3107_IRQ_RXFIFO_BIT (1 << 3) /* RX FIFO interrupt */
+#define MAX3107_IRQ_TXFIFO_BIT (1 << 4) /* TX FIFO interrupt */
+#define MAX3107_IRQ_TXEMPTY_BIT (1 << 5) /* TX FIFO empty interrupt */
+#define MAX3107_IRQ_RXEMPTY_BIT (1 << 6) /* RX FIFO empty interrupt */
+#define MAX3107_IRQ_CTS_BIT (1 << 7) /* CTS interrupt */
+
+/* LSR register bits */
+#define MAX3107_LSR_RXTO_BIT (1 << 0) /* RX timeout */
+#define MAX3107_LSR_RXOVR_BIT (1 << 1) /* RX overrun */
+#define MAX3107_LSR_RXPAR_BIT (1 << 2) /* RX parity error */
+#define MAX3107_LSR_FRERR_BIT (1 << 3) /* Frame error */
+#define MAX3107_LSR_RXBRK_BIT (1 << 4) /* RX break */
+#define MAX3107_LSR_RXNOISE_BIT (1 << 5) /* RX noise */
+#define MAX3107_LSR_UNDEF6_BIT (1 << 6) /* Undefined/not used */
+#define MAX3107_LSR_CTS_BIT (1 << 7) /* CTS pin state */
+
+/* Special character register bits */
+#define MAX3107_SPCHR_XON1_BIT (1 << 0) /* XON1 character */
+#define MAX3107_SPCHR_XON2_BIT (1 << 1) /* XON2 character */
+#define MAX3107_SPCHR_XOFF1_BIT (1 << 2) /* XOFF1 character */
+#define MAX3107_SPCHR_XOFF2_BIT (1 << 3) /* XOFF2 character */
+#define MAX3107_SPCHR_BREAK_BIT (1 << 4) /* RX break */
+#define MAX3107_SPCHR_MULTIDROP_BIT (1 << 5) /* 9-bit multidrop addr char */
+#define MAX3107_SPCHR_UNDEF6_BIT (1 << 6) /* Undefined/not used */
+#define MAX3107_SPCHR_UNDEF7_BIT (1 << 7) /* Undefined/not used */
+
+/* Status register bits */
+#define MAX3107_STS_GPIO0_BIT (1 << 0) /* GPIO 0 interrupt */
+#define MAX3107_STS_GPIO1_BIT (1 << 1) /* GPIO 1 interrupt */
+#define MAX3107_STS_GPIO2_BIT (1 << 2) /* GPIO 2 interrupt */
+#define MAX3107_STS_GPIO3_BIT (1 << 3) /* GPIO 3 interrupt */
+#define MAX3107_STS_UNDEF4_BIT (1 << 4) /* Undefined/not used */
+#define MAX3107_STS_CLKREADY_BIT (1 << 5) /* Clock ready */
+#define MAX3107_STS_SLEEP_BIT (1 << 6) /* Sleep interrupt */
+#define MAX3107_STS_UNDEF7_BIT (1 << 7) /* Undefined/not used */
+
+/* MODE1 register bits */
+#define MAX3107_MODE1_RXDIS_BIT (1 << 0) /* RX disable */
+#define MAX3107_MODE1_TXDIS_BIT (1 << 1) /* TX disable */
+#define MAX3107_MODE1_TXHIZ_BIT (1 << 2) /* TX pin three-state */
+#define MAX3107_MODE1_RTSHIZ_BIT (1 << 3) /* RTS pin three-state */
+#define MAX3107_MODE1_TRNSCVCTRL_BIT (1 << 4) /* Transceiver ctrl enable */
+#define MAX3107_MODE1_FORCESLEEP_BIT (1 << 5) /* Force sleep mode */
+#define MAX3107_MODE1_AUTOSLEEP_BIT (1 << 6) /* Auto sleep enable */
+#define MAX3107_MODE1_IRQSEL_BIT (1 << 7) /* IRQ pin enable */
+
+/* MODE2 register bits */
+#define MAX3107_MODE2_RST_BIT (1 << 0) /* Chip reset */
+#define MAX3107_MODE2_FIFORST_BIT (1 << 1) /* FIFO reset */
+#define MAX3107_MODE2_RXTRIGINV_BIT (1 << 2) /* RX FIFO INT invert */
+#define MAX3107_MODE2_RXEMPTINV_BIT (1 << 3) /* RX FIFO empty INT invert */
+#define MAX3107_MODE2_SPCHR_BIT (1 << 4) /* Special chr detect enable */
+#define MAX3107_MODE2_LOOPBACK_BIT (1 << 5) /* Internal loopback enable */
+#define MAX3107_MODE2_MULTIDROP_BIT (1 << 6) /* 9-bit multidrop enable */
+#define MAX3107_MODE2_ECHOSUPR_BIT (1 << 7) /* ECHO suppression enable */
+
+/* LCR register bits */
+#define MAX3107_LCR_LENGTH0_BIT (1 << 0) /* Word length bit 0 */
+#define MAX3107_LCR_LENGTH1_BIT (1 << 1) /* Word length bit 1
+ *
+ * Word length bits table:
+ * 00 -> 5 bit words
+ * 01 -> 6 bit words
+ * 10 -> 7 bit words
+ * 11 -> 8 bit words
+ */
+#define MAX3107_LCR_STOPLEN_BIT (1 << 2) /* STOP length bit
+ *
+ * STOP length bit table:
+ * 0 -> 1 stop bit
+ * 1 -> 1-1.5 stop bits if
+ * word length is 5,
+ * 2 stop bits otherwise
+ */
+#define MAX3107_LCR_PARITY_BIT (1 << 3) /* Parity bit enable */
+#define MAX3107_LCR_EVENPARITY_BIT (1 << 4) /* Even parity bit enable */
+#define MAX3107_LCR_FORCEPARITY_BIT (1 << 5) /* 9-bit multidrop parity */
+#define MAX3107_LCR_TXBREAK_BIT (1 << 6) /* TX break enable */
+#define MAX3107_LCR_RTS_BIT (1 << 7) /* RTS pin control */
+#define MAX3107_LCR_WORD_LEN_5 (0x0000)
+#define MAX3107_LCR_WORD_LEN_6 (0x0001)
+#define MAX3107_LCR_WORD_LEN_7 (0x0002)
+#define MAX3107_LCR_WORD_LEN_8 (0x0003)
+
+
+/* IRDA register bits */
+#define MAX3107_IRDA_IRDAEN_BIT (1 << 0) /* IRDA mode enable */
+#define MAX3107_IRDA_SIR_BIT (1 << 1) /* SIR mode enable */
+#define MAX3107_IRDA_SHORTIR_BIT (1 << 2) /* Short SIR mode enable */
+#define MAX3107_IRDA_MIR_BIT (1 << 3) /* MIR mode enable */
+#define MAX3107_IRDA_RXINV_BIT (1 << 4) /* RX logic inversion enable */
+#define MAX3107_IRDA_TXINV_BIT (1 << 5) /* TX logic inversion enable */
+#define MAX3107_IRDA_UNDEF6_BIT (1 << 6) /* Undefined/not used */
+#define MAX3107_IRDA_UNDEF7_BIT (1 << 7) /* Undefined/not used */
+
+/* Flow control trigger level register masks */
+#define MAX3107_FLOWLVL_HALT_MASK (0x000f) /* Flow control halt level */
+#define MAX3107_FLOWLVL_RES_MASK (0x00f0) /* Flow control resume level */
+#define MAX3107_FLOWLVL_HALT(words) ((words/8) & 0x000f)
+#define MAX3107_FLOWLVL_RES(words) (((words/8) & 0x000f) << 4)
+
+/* FIFO interrupt trigger level register masks */
+#define MAX3107_FIFOTRIGLVL_TX_MASK (0x000f) /* TX FIFO trigger level */
+#define MAX3107_FIFOTRIGLVL_RX_MASK (0x00f0) /* RX FIFO trigger level */
+#define MAX3107_FIFOTRIGLVL_TX(words) ((words/8) & 0x000f)
+#define MAX3107_FIFOTRIGLVL_RX(words) (((words/8) & 0x000f) << 4)
+
+/* Flow control register bits */
+#define MAX3107_FLOWCTRL_AUTORTS_BIT (1 << 0) /* Auto RTS flow ctrl enable */
+#define MAX3107_FLOWCTRL_AUTOCTS_BIT (1 << 1) /* Auto CTS flow ctrl enable */
+#define MAX3107_FLOWCTRL_GPIADDR_BIT (1 << 2) /* Enables that GPIO inputs
+ * are used in conjunction with
+ * XOFF2 for definition of
+ * special character */
+#define MAX3107_FLOWCTRL_SWFLOWEN_BIT (1 << 3) /* Auto SW flow ctrl enable */
+#define MAX3107_FLOWCTRL_SWFLOW0_BIT (1 << 4) /* SWFLOW bit 0 */
+#define MAX3107_FLOWCTRL_SWFLOW1_BIT (1 << 5) /* SWFLOW bit 1
+ *
+ * SWFLOW bits 1 & 0 table:
+ * 00 -> no transmitter flow
+ * control
+ * 01 -> receiver compares
+ * XON2 and XOFF2
+ * and controls
+ * transmitter
+ * 10 -> receiver compares
+ * XON1 and XOFF1
+ * and controls
+ * transmitter
+ * 11 -> receiver compares
+ * XON1, XON2, XOFF1 and
+ * XOFF2 and controls
+ * transmitter
+ */
+#define MAX3107_FLOWCTRL_SWFLOW2_BIT (1 << 6) /* SWFLOW bit 2 */
+#define MAX3107_FLOWCTRL_SWFLOW3_BIT (1 << 7) /* SWFLOW bit 3
+ *
+ * SWFLOW bits 3 & 2 table:
+ * 00 -> no received flow
+ * control
+ * 01 -> transmitter generates
+ * XON2 and XOFF2
+ * 10 -> transmitter generates
+ * XON1 and XOFF1
+ * 11 -> transmitter generates
+ * XON1, XON2, XOFF1 and
+ * XOFF2
+ */
+
+/* GPIO configuration register bits */
+#define MAX3107_GPIOCFG_GP0OUT_BIT (1 << 0) /* GPIO 0 output enable */
+#define MAX3107_GPIOCFG_GP1OUT_BIT (1 << 1) /* GPIO 1 output enable */
+#define MAX3107_GPIOCFG_GP2OUT_BIT (1 << 2) /* GPIO 2 output enable */
+#define MAX3107_GPIOCFG_GP3OUT_BIT (1 << 3) /* GPIO 3 output enable */
+#define MAX3107_GPIOCFG_GP0OD_BIT (1 << 4) /* GPIO 0 open-drain enable */
+#define MAX3107_GPIOCFG_GP1OD_BIT (1 << 5) /* GPIO 1 open-drain enable */
+#define MAX3107_GPIOCFG_GP2OD_BIT (1 << 6) /* GPIO 2 open-drain enable */
+#define MAX3107_GPIOCFG_GP3OD_BIT (1 << 7) /* GPIO 3 open-drain enable */
+
+/* GPIO DATA register bits */
+#define MAX3107_GPIODATA_GP0OUT_BIT (1 << 0) /* GPIO 0 output value */
+#define MAX3107_GPIODATA_GP1OUT_BIT (1 << 1) /* GPIO 1 output value */
+#define MAX3107_GPIODATA_GP2OUT_BIT (1 << 2) /* GPIO 2 output value */
+#define MAX3107_GPIODATA_GP3OUT_BIT (1 << 3) /* GPIO 3 output value */
+#define MAX3107_GPIODATA_GP0IN_BIT (1 << 4) /* GPIO 0 input value */
+#define MAX3107_GPIODATA_GP1IN_BIT (1 << 5) /* GPIO 1 input value */
+#define MAX3107_GPIODATA_GP2IN_BIT (1 << 6) /* GPIO 2 input value */
+#define MAX3107_GPIODATA_GP3IN_BIT (1 << 7) /* GPIO 3 input value */
+
+/* PLL configuration register masks */
+#define MAX3107_PLLCFG_PREDIV_MASK (0x003f) /* PLL predivision value */
+#define MAX3107_PLLCFG_PLLFACTOR_MASK (0x00c0) /* PLL multiplication factor */
+
+/* Baud rate generator configuration register masks and bits */
+#define MAX3107_BRGCFG_FRACT_MASK (0x000f) /* Fractional portion of
+ * Baud rate generator divisor
+ */
+#define MAX3107_BRGCFG_2XMODE_BIT (1 << 4) /* Double baud rate */
+#define MAX3107_BRGCFG_4XMODE_BIT (1 << 5) /* Quadruple baud rate */
+#define MAX3107_BRGCFG_UNDEF6_BIT (1 << 6) /* Undefined/not used */
+#define MAX3107_BRGCFG_UNDEF7_BIT (1 << 7) /* Undefined/not used */
+
+/* Clock source register bits */
+#define MAX3107_CLKSRC_INTOSC_BIT (1 << 0) /* Internal osc enable */
+#define MAX3107_CLKSRC_CRYST_BIT (1 << 1) /* Crystal osc enable */
+#define MAX3107_CLKSRC_PLL_BIT (1 << 2) /* PLL enable */
+#define MAX3107_CLKSRC_PLLBYP_BIT (1 << 3) /* PLL bypass */
+#define MAX3107_CLKSRC_EXTCLK_BIT (1 << 4) /* External clock enable */
+#define MAX3107_CLKSRC_UNDEF5_BIT (1 << 5) /* Undefined/not used */
+#define MAX3107_CLKSRC_UNDEF6_BIT (1 << 6) /* Undefined/not used */
+#define MAX3107_CLKSRC_CLK2RTS_BIT (1 << 7) /* Baud clk to RTS pin */
+
+
+/* HW definitions */
+#define MAX3107_RX_FIFO_SIZE 128
+#define MAX3107_TX_FIFO_SIZE 128
+#define MAX3107_REVID1 0x00a0
+#define MAX3107_REVID2 0x00a1
+
+
+/* Baud rate generator configuration values for external clock 13MHz */
+#define MAX3107_BRG13_B300 (0x0A9400 | 0x05)
+#define MAX3107_BRG13_B600 (0x054A00 | 0x03)
+#define MAX3107_BRG13_B1200 (0x02A500 | 0x01)
+#define MAX3107_BRG13_B2400 (0x015200 | 0x09)
+#define MAX3107_BRG13_B4800 (0x00A900 | 0x04)
+#define MAX3107_BRG13_B9600 (0x005400 | 0x0A)
+#define MAX3107_BRG13_B19200 (0x002A00 | 0x05)
+#define MAX3107_BRG13_B38400 (0x001500 | 0x03)
+#define MAX3107_BRG13_B57600 (0x000E00 | 0x02)
+#define MAX3107_BRG13_B115200 (0x000700 | 0x01)
+#define MAX3107_BRG13_B230400 (0x000300 | 0x08)
+#define MAX3107_BRG13_B460800 (0x000100 | 0x0c)
+#define MAX3107_BRG13_B921600 (0x000100 | 0x1c)
+
+/* Baud rate generator configuration values for external clock 26MHz */
+#define MAX3107_BRG26_B300 (0x152800 | 0x0A)
+#define MAX3107_BRG26_B600 (0x0A9400 | 0x05)
+#define MAX3107_BRG26_B1200 (0x054A00 | 0x03)
+#define MAX3107_BRG26_B2400 (0x02A500 | 0x01)
+#define MAX3107_BRG26_B4800 (0x015200 | 0x09)
+#define MAX3107_BRG26_B9600 (0x00A900 | 0x04)
+#define MAX3107_BRG26_B19200 (0x005400 | 0x0A)
+#define MAX3107_BRG26_B38400 (0x002A00 | 0x05)
+#define MAX3107_BRG26_B57600 (0x001C00 | 0x03)
+#define MAX3107_BRG26_B115200 (0x000E00 | 0x02)
+#define MAX3107_BRG26_B230400 (0x000700 | 0x01)
+#define MAX3107_BRG26_B460800 (0x000300 | 0x08)
+#define MAX3107_BRG26_B921600 (0x000100 | 0x0C)
+
+/* Baud rate generator configuration values for internal clock */
+#define MAX3107_BRG13_IB300 (0x008000 | 0x00)
+#define MAX3107_BRG13_IB600 (0x004000 | 0x00)
+#define MAX3107_BRG13_IB1200 (0x002000 | 0x00)
+#define MAX3107_BRG13_IB2400 (0x001000 | 0x00)
+#define MAX3107_BRG13_IB4800 (0x000800 | 0x00)
+#define MAX3107_BRG13_IB9600 (0x000400 | 0x00)
+#define MAX3107_BRG13_IB19200 (0x000200 | 0x00)
+#define MAX3107_BRG13_IB38400 (0x000100 | 0x00)
+#define MAX3107_BRG13_IB57600 (0x000000 | 0x0B)
+#define MAX3107_BRG13_IB115200 (0x000000 | 0x05)
+#define MAX3107_BRG13_IB230400 (0x000000 | 0x03)
+#define MAX3107_BRG13_IB460800 (0x000000 | 0x00)
+#define MAX3107_BRG13_IB921600 (0x000000 | 0x00)
+
+
+struct baud_table {
+ int baud;
+ u32 new_brg;
+};
+
+struct max3107_port {
+ /* UART port structure */
+ struct uart_port port;
+
+ /* SPI device structure */
+ struct spi_device *spi;
+
+#if defined(CONFIG_GPIOLIB)
+ /* GPIO chip stucture */
+ struct gpio_chip chip;
+#endif
+
+ /* Workqueue that does all the magic */
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+
+ /* Lock for shared data */
+ spinlock_t data_lock;
+
+ /* Device configuration */
+ int ext_clk; /* 1 if external clock used */
+ int loopback; /* Current loopback mode state */
+ int baud; /* Current baud rate */
+
+ /* State flags */
+ int suspended; /* Indicates suspend mode */
+ int tx_fifo_empty; /* Flag for TX FIFO state */
+ int rx_enabled; /* Flag for receiver state */
+ int tx_enabled; /* Flag for transmitter state */
+
+ u16 irqen_reg; /* Current IRQ enable register value */
+ /* Shared data */
+ u16 mode1_reg; /* Current mode1 register value*/
+ int mode1_commit; /* Flag for setting new mode1 register value */
+ u16 lcr_reg; /* Current LCR register value */
+ int lcr_commit; /* Flag for setting new LCR register value */
+ u32 brg_cfg; /* Current Baud rate generator config */
+ int brg_commit; /* Flag for setting new baud rate generator
+ * config
+ */
+ struct baud_table *baud_tbl;
+ int handle_irq; /* Indicates that IRQ should be handled */
+
+ /* Rx buffer and str*/
+ u16 *rxbuf;
+ u8 *rxstr;
+ /* Tx buffer*/
+ u16 *txbuf;
+
+ struct max3107_plat *pdata; /* Platform data */
+};
+
+/* Platform data structure */
+struct max3107_plat {
+ /* Loopback mode enable */
+ int loopback;
+ /* External clock enable */
+ int ext_clk;
+ /* Called during the register initialisation */
+ void (*init)(struct max3107_port *s);
+ /* Called when the port is found and configured */
+ int (*configure)(struct max3107_port *s);
+ /* HW suspend function */
+ void (*hw_suspend) (struct max3107_port *s, int suspend);
+ /* Polling mode enable */
+ int polled_mode;
+ /* Polling period if polling mode enabled */
+ int poll_time;
+};
+
+extern int max3107_rw(struct max3107_port *s, u8 *tx, u8 *rx, int len);
+extern void max3107_hw_susp(struct max3107_port *s, int suspend);
+extern int max3107_probe(struct spi_device *spi, struct max3107_plat *pdata);
+extern int max3107_remove(struct spi_device *spi);
+extern int max3107_suspend(struct spi_device *spi, pm_message_t state);
+extern int max3107_resume(struct spi_device *spi);
+
+#endif /* _LINUX_SERIAL_MAX3107_H */
diff --git a/drivers/serial/mcf.c b/drivers/serial/mcf.c
index b5aaef965f2..3394b7cc172 100644
--- a/drivers/serial/mcf.c
+++ b/drivers/serial/mcf.c
@@ -70,16 +70,14 @@ static unsigned int mcf_tx_empty(struct uart_port *port)
static unsigned int mcf_get_mctrl(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
- unsigned long flags;
unsigned int sigs;
- spin_lock_irqsave(&port->lock, flags);
sigs = (readb(port->membase + MCFUART_UIPR) & MCFUART_UIPR_CTS) ?
0 : TIOCM_CTS;
sigs |= (pp->sigs & TIOCM_RTS);
sigs |= (mcf_getppdcd(port->line) ? TIOCM_CD : 0);
sigs |= (mcf_getppdtr(port->line) ? TIOCM_DTR : 0);
- spin_unlock_irqrestore(&port->lock, flags);
+
return sigs;
}
@@ -88,16 +86,13 @@ static unsigned int mcf_get_mctrl(struct uart_port *port)
static void mcf_set_mctrl(struct uart_port *port, unsigned int sigs)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
- unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
pp->sigs = sigs;
mcf_setppdtr(port->line, (sigs & TIOCM_DTR));
if (sigs & TIOCM_RTS)
writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP1);
else
writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP0);
- spin_unlock_irqrestore(&port->lock, flags);
}
/****************************************************************************/
@@ -105,12 +100,9 @@ static void mcf_set_mctrl(struct uart_port *port, unsigned int sigs)
static void mcf_start_tx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
- unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
pp->imr |= MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
- spin_unlock_irqrestore(&port->lock, flags);
}
/****************************************************************************/
@@ -118,12 +110,9 @@ static void mcf_start_tx(struct uart_port *port)
static void mcf_stop_tx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
- unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
pp->imr &= ~MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
- spin_unlock_irqrestore(&port->lock, flags);
}
/****************************************************************************/
@@ -131,12 +120,9 @@ static void mcf_stop_tx(struct uart_port *port)
static void mcf_stop_rx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
- unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
pp->imr &= ~MCFUART_UIR_RXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
- spin_unlock_irqrestore(&port->lock, flags);
}
/****************************************************************************/
@@ -366,13 +352,22 @@ static irqreturn_t mcf_interrupt(int irq, void *data)
struct uart_port *port = data;
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned int isr;
+ irqreturn_t ret = IRQ_NONE;
isr = readb(port->membase + MCFUART_UISR) & pp->imr;
- if (isr & MCFUART_UIR_RXREADY)
+
+ spin_lock(&port->lock);
+ if (isr & MCFUART_UIR_RXREADY) {
mcf_rx_chars(pp);
- if (isr & MCFUART_UIR_TXREADY)
+ ret = IRQ_HANDLED;
+ }
+ if (isr & MCFUART_UIR_TXREADY) {
mcf_tx_chars(pp);
- return IRQ_HANDLED;
+ ret = IRQ_HANDLED;
+ }
+ spin_unlock(&port->lock);
+
+ return ret;
}
/****************************************************************************/
diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c
new file mode 100644
index 00000000000..5dff45c76d3
--- /dev/null
+++ b/drivers/serial/mfd.c
@@ -0,0 +1,1500 @@
+/*
+ * mfd.c: driver for High Speed UART device of Intel Medfield platform
+ *
+ * Refer pxa.c, 8250.c and some other drivers in drivers/serial/
+ *
+ * (C) Copyright 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+/* Notes:
+ * 1. DMA channel allocation: 0/1 channel are assigned to port 0,
+ * 2/3 chan to port 1, 4/5 chan to port 3. Even number chans
+ * are used for RX, odd chans for TX
+ *
+ * 2. In A0 stepping, UART will not support TX half empty flag
+ *
+ * 3. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always
+ * asserted, only when the HW is reset the DDCD and DDSR will
+ * be triggered
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/slab.h>
+#include <linux/serial_reg.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial_mfd.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+
+#define MFD_HSU_A0_STEPPING 1
+
+#define HSU_DMA_BUF_SIZE 2048
+
+#define chan_readl(chan, offset) readl(chan->reg + offset)
+#define chan_writel(chan, offset, val) writel(val, chan->reg + offset)
+
+#define mfd_readl(obj, offset) readl(obj->reg + offset)
+#define mfd_writel(obj, offset, val) writel(val, obj->reg + offset)
+
+#define HSU_DMA_TIMEOUT_CHECK_FREQ (HZ/10)
+
+struct hsu_dma_buffer {
+ u8 *buf;
+ dma_addr_t dma_addr;
+ u32 dma_size;
+ u32 ofs;
+};
+
+struct hsu_dma_chan {
+ u32 id;
+ enum dma_data_direction dirt;
+ struct uart_hsu_port *uport;
+ void __iomem *reg;
+ struct timer_list rx_timer; /* only needed by RX channel */
+};
+
+struct uart_hsu_port {
+ struct uart_port port;
+ unsigned char ier;
+ unsigned char lcr;
+ unsigned char mcr;
+ unsigned int lsr_break_flag;
+ char name[12];
+ int index;
+ struct device *dev;
+
+ struct hsu_dma_chan *txc;
+ struct hsu_dma_chan *rxc;
+ struct hsu_dma_buffer txbuf;
+ struct hsu_dma_buffer rxbuf;
+ int use_dma; /* flag for DMA/PIO */
+ int running;
+ int dma_tx_on;
+};
+
+/* Top level data structure of HSU */
+struct hsu_port {
+ void __iomem *reg;
+ unsigned long paddr;
+ unsigned long iolen;
+ u32 irq;
+
+ struct uart_hsu_port port[3];
+ struct hsu_dma_chan chans[10];
+
+ struct dentry *debugfs;
+};
+
+static inline unsigned int serial_in(struct uart_hsu_port *up, int offset)
+{
+ unsigned int val;
+
+ if (offset > UART_MSR) {
+ offset <<= 2;
+ val = readl(up->port.membase + offset);
+ } else
+ val = (unsigned int)readb(up->port.membase + offset);
+
+ return val;
+}
+
+static inline void serial_out(struct uart_hsu_port *up, int offset, int value)
+{
+ if (offset > UART_MSR) {
+ offset <<= 2;
+ writel(value, up->port.membase + offset);
+ } else {
+ unsigned char val = value & 0xff;
+ writeb(val, up->port.membase + offset);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define HSU_REGS_BUFSIZE 1024
+
+static int hsu_show_regs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t port_show_regs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct uart_hsu_port *up = file->private_data;
+ char *buf;
+ u32 len = 0;
+ ssize_t ret;
+
+ buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "MFD HSU port[%d] regs:\n", up->index);
+
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "=================================\n");
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "IER: \t\t0x%08x\n", serial_in(up, UART_IER));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "IIR: \t\t0x%08x\n", serial_in(up, UART_IIR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "LCR: \t\t0x%08x\n", serial_in(up, UART_LCR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "MCR: \t\t0x%08x\n", serial_in(up, UART_MCR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "LSR: \t\t0x%08x\n", serial_in(up, UART_LSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "MSR: \t\t0x%08x\n", serial_in(up, UART_MSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "FOR: \t\t0x%08x\n", serial_in(up, UART_FOR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "PS: \t\t0x%08x\n", serial_in(up, UART_PS));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "MUL: \t\t0x%08x\n", serial_in(up, UART_MUL));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "DIV: \t\t0x%08x\n", serial_in(up, UART_DIV));
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t dma_show_regs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hsu_dma_chan *chan = file->private_data;
+ char *buf;
+ u32 len = 0;
+ ssize_t ret;
+
+ buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "MFD HSU DMA channel [%d] regs:\n", chan->id);
+
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "=================================\n");
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "CR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_CR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "DCR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_DCR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "BSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_BSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "MOTSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_MOTSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0SAR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0TSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1SAR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1TSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2SAR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2TSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3SAR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR));
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations port_regs_ops = {
+ .owner = THIS_MODULE,
+ .open = hsu_show_regs_open,
+ .read = port_show_regs,
+};
+
+static const struct file_operations dma_regs_ops = {
+ .owner = THIS_MODULE,
+ .open = hsu_show_regs_open,
+ .read = dma_show_regs,
+};
+
+static int hsu_debugfs_init(struct hsu_port *hsu)
+{
+ int i;
+ char name[32];
+
+ hsu->debugfs = debugfs_create_dir("hsu", NULL);
+ if (!hsu->debugfs)
+ return -ENOMEM;
+
+ for (i = 0; i < 3; i++) {
+ snprintf(name, sizeof(name), "port_%d_regs", i);
+ debugfs_create_file(name, S_IFREG | S_IRUGO,
+ hsu->debugfs, (void *)(&hsu->port[i]), &port_regs_ops);
+ }
+
+ for (i = 0; i < 6; i++) {
+ snprintf(name, sizeof(name), "dma_chan_%d_regs", i);
+ debugfs_create_file(name, S_IFREG | S_IRUGO,
+ hsu->debugfs, (void *)&hsu->chans[i], &dma_regs_ops);
+ }
+
+ return 0;
+}
+
+static void hsu_debugfs_remove(struct hsu_port *hsu)
+{
+ if (hsu->debugfs)
+ debugfs_remove_recursive(hsu->debugfs);
+}
+
+#else
+static inline int hsu_debugfs_init(struct hsu_port *hsu)
+{
+ return 0;
+}
+
+static inline void hsu_debugfs_remove(struct hsu_port *hsu)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static void serial_hsu_enable_ms(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+}
+
+void hsu_dma_tx(struct uart_hsu_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ struct hsu_dma_buffer *dbuf = &up->txbuf;
+ int count;
+
+ /* test_and_set_bit may be better, but anyway it's in lock protected mode */
+ if (up->dma_tx_on)
+ return;
+
+ /* Update the circ buf info */
+ xmit->tail += dbuf->ofs;
+ xmit->tail &= UART_XMIT_SIZE - 1;
+
+ up->port.icount.tx += dbuf->ofs;
+ dbuf->ofs = 0;
+
+ /* Disable the channel */
+ chan_writel(up->txc, HSU_CH_CR, 0x0);
+
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
+ dma_sync_single_for_device(up->port.dev,
+ dbuf->dma_addr,
+ dbuf->dma_size,
+ DMA_TO_DEVICE);
+
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ dbuf->ofs = count;
+
+ /* Reprogram the channel */
+ chan_writel(up->txc, HSU_CH_D0SAR, dbuf->dma_addr + xmit->tail);
+ chan_writel(up->txc, HSU_CH_D0TSR, count);
+
+ /* Reenable the channel */
+ chan_writel(up->txc, HSU_CH_DCR, 0x1
+ | (0x1 << 8)
+ | (0x1 << 16)
+ | (0x1 << 24));
+ up->dma_tx_on = 1;
+ chan_writel(up->txc, HSU_CH_CR, 0x1);
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+}
+
+/* The buffer is already cache coherent */
+void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc, struct hsu_dma_buffer *dbuf)
+{
+ dbuf->ofs = 0;
+
+ chan_writel(rxc, HSU_CH_BSR, 32);
+ chan_writel(rxc, HSU_CH_MOTSR, 4);
+
+ chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr);
+ chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size);
+ chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8)
+ | (0x1 << 16)
+ | (0x1 << 24) /* timeout bit, see HSU Errata 1 */
+ );
+ chan_writel(rxc, HSU_CH_CR, 0x3);
+
+ mod_timer(&rxc->rx_timer, jiffies + HSU_DMA_TIMEOUT_CHECK_FREQ);
+}
+
+/* Protected by spin_lock_irqsave(port->lock) */
+static void serial_hsu_start_tx(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ if (up->use_dma) {
+ hsu_dma_tx(up);
+ } else if (!(up->ier & UART_IER_THRI)) {
+ up->ier |= UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+static void serial_hsu_stop_tx(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ struct hsu_dma_chan *txc = up->txc;
+
+ if (up->use_dma)
+ chan_writel(txc, HSU_CH_CR, 0x0);
+ else if (up->ier & UART_IER_THRI) {
+ up->ier &= ~UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+/* This is always called in spinlock protected mode, so
+ * modify timeout timer is safe here */
+void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
+{
+ struct hsu_dma_buffer *dbuf = &up->rxbuf;
+ struct hsu_dma_chan *chan = up->rxc;
+ struct uart_port *port = &up->port;
+ struct tty_struct *tty = port->state->port.tty;
+ int count;
+
+ if (!tty)
+ return;
+
+ /*
+ * First need to know how many is already transferred,
+ * then check if its a timeout DMA irq, and return
+ * the trail bytes out, push them up and reenable the
+ * channel
+ */
+
+ /* Timeout IRQ, need wait some time, see Errata 2 */
+ if (int_sts & 0xf00)
+ udelay(2);
+
+ /* Stop the channel */
+ chan_writel(chan, HSU_CH_CR, 0x0);
+
+ count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
+ if (!count) {
+ /* Restart the channel before we leave */
+ chan_writel(chan, HSU_CH_CR, 0x3);
+ return;
+ }
+ del_timer(&chan->rx_timer);
+
+ dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
+ dbuf->dma_size, DMA_FROM_DEVICE);
+
+ /*
+ * Head will only wrap around when we recycle
+ * the DMA buffer, and when that happens, we
+ * explicitly set tail to 0. So head will
+ * always be greater than tail.
+ */
+ tty_insert_flip_string(tty, dbuf->buf, count);
+ port->icount.rx += count;
+
+ dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
+ dbuf->dma_size, DMA_FROM_DEVICE);
+
+ /* Reprogram the channel */
+ chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
+ chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
+ chan_writel(chan, HSU_CH_DCR, 0x1
+ | (0x1 << 8)
+ | (0x1 << 16)
+ | (0x1 << 24) /* timeout bit, see HSU Errata 1 */
+ );
+ tty_flip_buffer_push(tty);
+
+ chan_writel(chan, HSU_CH_CR, 0x3);
+ chan->rx_timer.expires = jiffies + HSU_DMA_TIMEOUT_CHECK_FREQ;
+ add_timer(&chan->rx_timer);
+
+}
+
+static void serial_hsu_stop_rx(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ struct hsu_dma_chan *chan = up->rxc;
+
+ if (up->use_dma)
+ chan_writel(chan, HSU_CH_CR, 0x2);
+ else {
+ up->ier &= ~UART_IER_RLSI;
+ up->port.read_status_mask &= ~UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+static inline void receive_chars(struct uart_hsu_port *up, int *status)
+{
+ struct tty_struct *tty = up->port.state->port.tty;
+ unsigned int ch, flag;
+ unsigned int max_count = 256;
+
+ if (!tty)
+ return;
+
+ do {
+ ch = serial_in(up, UART_RX);
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+ if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
+ UART_LSR_FE | UART_LSR_OE))) {
+
+ dev_warn(up->dev, "We really rush into ERR/BI case"
+ "status = 0x%02x", *status);
+ /* For statistics only */
+ if (*status & UART_LSR_BI) {
+ *status &= ~(UART_LSR_FE | UART_LSR_PE);
+ up->port.icount.brk++;
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(&up->port))
+ goto ignore_char;
+ } else if (*status & UART_LSR_PE)
+ up->port.icount.parity++;
+ else if (*status & UART_LSR_FE)
+ up->port.icount.frame++;
+ if (*status & UART_LSR_OE)
+ up->port.icount.overrun++;
+
+ /* Mask off conditions which should be ignored. */
+ *status &= up->port.read_status_mask;
+
+#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
+ if (up->port.cons &&
+ up->port.cons->index == up->port.line) {
+ /* Recover the break flag from console xmit */
+ *status |= up->lsr_break_flag;
+ up->lsr_break_flag = 0;
+ }
+#endif
+ if (*status & UART_LSR_BI) {
+ flag = TTY_BREAK;
+ } else if (*status & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (*status & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&up->port, ch))
+ goto ignore_char;
+
+ uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
+ ignore_char:
+ *status = serial_in(up, UART_LSR);
+ } while ((*status & UART_LSR_DR) && max_count--);
+ tty_flip_buffer_push(tty);
+}
+
+static void transmit_chars(struct uart_hsu_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int count;
+
+ if (up->port.x_char) {
+ serial_out(up, UART_TX, up->port.x_char);
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+ serial_hsu_stop_tx(&up->port);
+ return;
+ }
+
+#ifndef MFD_HSU_A0_STEPPING
+ count = up->port.fifosize / 2;
+#else
+ /*
+ * A0 only supports fully empty IRQ, and the first char written
+ * into it won't clear the EMPT bit, so we may need be cautious
+ * by useing a shorter buffer
+ */
+ count = up->port.fifosize - 4;
+#endif
+ do {
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (uart_circ_empty(xmit))
+ serial_hsu_stop_tx(&up->port);
+}
+
+static inline void check_modem_status(struct uart_hsu_port *up)
+{
+ int status;
+
+ status = serial_in(up, UART_MSR);
+
+ if ((status & UART_MSR_ANY_DELTA) == 0)
+ return;
+
+ if (status & UART_MSR_TERI)
+ up->port.icount.rng++;
+ if (status & UART_MSR_DDSR)
+ up->port.icount.dsr++;
+ /* We may only get DDCD when HW init and reset */
+ if (status & UART_MSR_DDCD)
+ uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
+ /* Will start/stop_tx accordingly */
+ if (status & UART_MSR_DCTS)
+ uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
+
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+}
+
+/*
+ * This handles the interrupt from one port.
+ */
+static irqreturn_t port_irq(int irq, void *dev_id)
+{
+ struct uart_hsu_port *up = dev_id;
+ unsigned int iir, lsr;
+ unsigned long flags;
+
+ if (unlikely(!up->running))
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (up->use_dma) {
+ lsr = serial_in(up, UART_LSR);
+ if (unlikely(lsr & (UART_LSR_BI | UART_LSR_PE |
+ UART_LSR_FE | UART_LSR_OE)))
+ dev_warn(up->dev,
+ "Got lsr irq while using DMA, lsr = 0x%2x\n",
+ lsr);
+ check_modem_status(up);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ iir = serial_in(up, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return IRQ_NONE;
+ }
+
+ lsr = serial_in(up, UART_LSR);
+ if (lsr & UART_LSR_DR)
+ receive_chars(up, &lsr);
+ check_modem_status(up);
+
+ /* lsr will be renewed during the receive_chars */
+ if (lsr & UART_LSR_THRE)
+ transmit_chars(up);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return IRQ_HANDLED;
+}
+
+static inline void dma_chan_irq(struct hsu_dma_chan *chan)
+{
+ struct uart_hsu_port *up = chan->uport;
+ unsigned long flags;
+ u32 int_sts;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ if (!up->use_dma || !up->running)
+ goto exit;
+
+ /*
+ * No matter what situation, need read clear the IRQ status
+ * There is a bug, see Errata 5, HSD 2900918
+ */
+ int_sts = chan_readl(chan, HSU_CH_SR);
+
+ /* Rx channel */
+ if (chan->dirt == DMA_FROM_DEVICE)
+ hsu_dma_rx(up, int_sts);
+
+ /* Tx channel */
+ if (chan->dirt == DMA_TO_DEVICE) {
+ chan_writel(chan, HSU_CH_CR, 0x0);
+ up->dma_tx_on = 0;
+ hsu_dma_tx(up);
+ }
+
+exit:
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return;
+}
+
+static irqreturn_t dma_irq(int irq, void *dev_id)
+{
+ struct hsu_port *hsu = dev_id;
+ u32 int_sts, i;
+
+ int_sts = mfd_readl(hsu, HSU_GBL_DMAISR);
+
+ /* Currently we only have 6 channels may be used */
+ for (i = 0; i < 6; i++) {
+ if (int_sts & 0x1)
+ dma_chan_irq(&hsu->chans[i]);
+ int_sts >>= 1;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int serial_hsu_tx_empty(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return ret;
+}
+
+static unsigned int serial_hsu_get_mctrl(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ unsigned char status;
+ unsigned int ret;
+
+ status = serial_in(up, UART_MSR);
+
+ ret = 0;
+ if (status & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+ if (status & UART_MSR_RI)
+ ret |= TIOCM_RNG;
+ if (status & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
+ if (status & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+static void serial_hsu_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ unsigned char mcr = 0;
+
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ mcr |= up->mcr;
+
+ serial_out(up, UART_MCR, mcr);
+}
+
+static void serial_hsu_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_out(up, UART_LCR, up->lcr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+/*
+ * What special to do:
+ * 1. chose the 64B fifo mode
+ * 2. make sure not to select half empty mode for A0 stepping
+ * 3. start dma or pio depends on configuration
+ * 4. we only allocate dma memory when needed
+ */
+static int serial_hsu_startup(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ unsigned long flags;
+
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in set_termios())
+ */
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ serial_out(up, UART_FCR, 0);
+
+ /* Clear the interrupt registers. */
+ (void) serial_in(up, UART_LSR);
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
+
+ /* Now, initialize the UART, default is 8n1 */
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ up->port.mctrl |= TIOCM_OUT2;
+ serial_hsu_set_mctrl(&up->port, up->port.mctrl);
+
+ /*
+ * Finally, enable interrupts. Note: Modem status interrupts
+ * are set via set_termios(), which will be occurring imminently
+ * anyway, so we don't enable them here.
+ */
+ if (!up->use_dma)
+ up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE;
+ else
+ up->ier = 0;
+ serial_out(up, UART_IER, up->ier);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /* DMA init */
+ if (up->use_dma) {
+ struct hsu_dma_buffer *dbuf;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ up->dma_tx_on = 0;
+
+ /* First allocate the RX buffer */
+ dbuf = &up->rxbuf;
+ dbuf->buf = kzalloc(HSU_DMA_BUF_SIZE, GFP_KERNEL);
+ if (!dbuf->buf) {
+ up->use_dma = 0;
+ goto exit;
+ }
+ dbuf->dma_addr = dma_map_single(port->dev,
+ dbuf->buf,
+ HSU_DMA_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ dbuf->dma_size = HSU_DMA_BUF_SIZE;
+
+ /* Start the RX channel right now */
+ hsu_dma_start_rx_chan(up->rxc, dbuf);
+
+ /* Next init the TX DMA */
+ dbuf = &up->txbuf;
+ dbuf->buf = xmit->buf;
+ dbuf->dma_addr = dma_map_single(port->dev,
+ dbuf->buf,
+ UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+ dbuf->dma_size = UART_XMIT_SIZE;
+
+ /* This should not be changed all around */
+ chan_writel(up->txc, HSU_CH_BSR, 32);
+ chan_writel(up->txc, HSU_CH_MOTSR, 4);
+ dbuf->ofs = 0;
+ }
+
+exit:
+ /* And clear the interrupt registers again for luck. */
+ (void) serial_in(up, UART_LSR);
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
+
+ up->running = 1;
+ return 0;
+}
+
+static void serial_hsu_shutdown(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ unsigned long flags;
+
+ del_timer_sync(&up->rxc->rx_timer);
+
+ /* Disable interrupts from this port */
+ up->ier = 0;
+ serial_out(up, UART_IER, 0);
+ up->running = 0;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ up->port.mctrl &= ~TIOCM_OUT2;
+ serial_hsu_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /* Disable break condition and FIFOs */
+ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT);
+ serial_out(up, UART_FCR, 0);
+}
+
+static void
+serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned char cval, fcr = 0;
+ unsigned long flags;
+ unsigned int baud, quot;
+ u32 mul = 0x3600;
+ u32 ps = 0x10;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ cval = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ cval = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ break;
+ }
+
+ /* CMSPAR isn't supported by this driver */
+ if (tty)
+ tty->termios->c_cflag &= ~CMSPAR;
+
+ if (termios->c_cflag & CSTOPB)
+ cval |= UART_LCR_STOP;
+ if (termios->c_cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(termios->c_cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+
+ /*
+ * For those basic low baud rate we can get the direct
+ * scalar from 2746800, like 115200 = 2746800/24, for those
+ * higher baud rate, we have to handle them case by case,
+ * but DIV reg is never touched as its default value 0x3d09
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ quot = uart_get_divisor(port, baud);
+
+ switch (baud) {
+ case 3500000:
+ mul = 0x3345;
+ ps = 0xC;
+ quot = 1;
+ break;
+ case 2500000:
+ mul = 0x2710;
+ ps = 0x10;
+ quot = 1;
+ break;
+ case 18432000:
+ mul = 0x2400;
+ ps = 0x10;
+ quot = 1;
+ break;
+ case 1500000:
+ mul = 0x1D4C;
+ ps = 0xc;
+ quot = 1;
+ break;
+ default:
+ ;
+ }
+
+ if ((up->port.uartclk / quot) < (2400 * 16))
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B;
+ else if ((up->port.uartclk / quot) < (230400 * 16))
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_16B;
+ else
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_32B;
+
+ fcr |= UART_FCR_HSU_64B_FIFO;
+#ifdef MFD_HSU_A0_STEPPING
+ /* A0 doesn't support half empty IRQ */
+ fcr |= UART_FCR_FULL_EMPT_TXI;
+#endif
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /* Update the per-port timeout */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /* Characters to ignore */
+ up->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ up->port.ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /* Ignore all characters if CREAD is not set */
+ if ((termios->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * CTS flow control flag and modem status interrupts, disable
+ * MSI by default
+ */
+ up->ier &= ~UART_IER_MSI;
+ if (UART_ENABLE_MS(&up->port, termios->c_cflag))
+ up->ier |= UART_IER_MSI;
+
+ serial_out(up, UART_IER, up->ier);
+
+ if (termios->c_cflag & CRTSCTS)
+ up->mcr |= UART_MCR_AFE | UART_MCR_RTS;
+ else
+ up->mcr &= ~UART_MCR_AFE;
+
+ serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
+ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
+ serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
+ serial_out(up, UART_LCR, cval); /* reset DLAB */
+ serial_out(up, UART_MUL, mul); /* set MUL */
+ serial_out(up, UART_PS, ps); /* set PS */
+ up->lcr = cval; /* Save LCR */
+ serial_hsu_set_mctrl(&up->port, up->port.mctrl);
+ serial_out(up, UART_FCR, fcr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static void
+serial_hsu_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+}
+
+static void serial_hsu_release_port(struct uart_port *port)
+{
+}
+
+static int serial_hsu_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void serial_hsu_config_port(struct uart_port *port, int flags)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ up->port.type = PORT_MFD;
+}
+
+static int
+serial_hsu_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ /* We don't want the core code to modify any port params */
+ return -EINVAL;
+}
+
+static const char *
+serial_hsu_type(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ return up->name;
+}
+
+/* Mainly for uart console use */
+static struct uart_hsu_port *serial_hsu_ports[3];
+static struct uart_driver serial_hsu_reg;
+
+#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+/* Wait for transmitter & holding register to empty */
+static inline void wait_for_xmitr(struct uart_hsu_port *up)
+{
+ unsigned int status, tmout = 1000;
+
+ /* Wait up to 1ms for the character to be sent. */
+ do {
+ status = serial_in(up, UART_LSR);
+
+ if (status & UART_LSR_BI)
+ up->lsr_break_flag = UART_LSR_BI;
+
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ } while (!(status & BOTH_EMPTY));
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ tmout = 1000000;
+ while (--tmout &&
+ ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
+ udelay(1);
+ }
+}
+
+static void serial_hsu_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ wait_for_xmitr(up);
+ serial_out(up, UART_TX, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void
+serial_hsu_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_hsu_port *up = serial_hsu_ports[co->index];
+ unsigned long flags;
+ unsigned int ier;
+ int locked = 1;
+
+ local_irq_save(flags);
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress) {
+ locked = spin_trylock(&up->port.lock);
+ } else
+ spin_lock(&up->port.lock);
+
+ /* First save the IER then disable the interrupts */
+ ier = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, 0);
+
+ uart_console_write(&up->port, s, count, serial_hsu_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up);
+ serial_out(up, UART_IER, ier);
+
+ if (locked)
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
+}
+
+static struct console serial_hsu_console;
+
+static int __init
+serial_hsu_console_setup(struct console *co, char *options)
+{
+ struct uart_hsu_port *up;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ if (co->index == -1 || co->index >= serial_hsu_reg.nr)
+ co->index = 0;
+ up = serial_hsu_ports[co->index];
+ if (!up)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ ret = uart_set_options(&up->port, co, baud, parity, bits, flow);
+
+ return ret;
+}
+
+static struct console serial_hsu_console = {
+ .name = "ttyMFD",
+ .write = serial_hsu_console_write,
+ .device = uart_console_device,
+ .setup = serial_hsu_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = 2,
+ .data = &serial_hsu_reg,
+};
+#endif
+
+struct uart_ops serial_hsu_pops = {
+ .tx_empty = serial_hsu_tx_empty,
+ .set_mctrl = serial_hsu_set_mctrl,
+ .get_mctrl = serial_hsu_get_mctrl,
+ .stop_tx = serial_hsu_stop_tx,
+ .start_tx = serial_hsu_start_tx,
+ .stop_rx = serial_hsu_stop_rx,
+ .enable_ms = serial_hsu_enable_ms,
+ .break_ctl = serial_hsu_break_ctl,
+ .startup = serial_hsu_startup,
+ .shutdown = serial_hsu_shutdown,
+ .set_termios = serial_hsu_set_termios,
+ .pm = serial_hsu_pm,
+ .type = serial_hsu_type,
+ .release_port = serial_hsu_release_port,
+ .request_port = serial_hsu_request_port,
+ .config_port = serial_hsu_config_port,
+ .verify_port = serial_hsu_verify_port,
+};
+
+static struct uart_driver serial_hsu_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "MFD serial",
+ .dev_name = "ttyMFD",
+ .major = TTY_MAJOR,
+ .minor = 128,
+ .nr = 3,
+};
+
+#ifdef CONFIG_PM
+static int serial_hsu_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ void *priv = pci_get_drvdata(pdev);
+ struct uart_hsu_port *up;
+
+ /* Make sure this is not the internal dma controller */
+ if (priv && (pdev->device != 0x081E)) {
+ up = priv;
+ uart_suspend_port(&serial_hsu_reg, &up->port);
+ }
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int serial_hsu_resume(struct pci_dev *pdev)
+{
+ void *priv = pci_get_drvdata(pdev);
+ struct uart_hsu_port *up;
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "HSU: can't re-enable device, try to continue\n");
+
+ if (priv && (pdev->device != 0x081E)) {
+ up = priv;
+ uart_resume_port(&serial_hsu_reg, &up->port);
+ }
+ return 0;
+}
+#else
+#define serial_hsu_suspend NULL
+#define serial_hsu_resume NULL
+#endif
+
+/* temp global pointer before we settle down on using one or four PCI dev */
+static struct hsu_port *phsu;
+
+static int serial_hsu_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct uart_hsu_port *uport;
+ int index, ret;
+
+ printk(KERN_INFO "HSU: found PCI Serial controller(ID: %04x:%04x)\n",
+ pdev->vendor, pdev->device);
+
+ switch (pdev->device) {
+ case 0x081B:
+ index = 0;
+ break;
+ case 0x081C:
+ index = 1;
+ break;
+ case 0x081D:
+ index = 2;
+ break;
+ case 0x081E:
+ /* internal DMA controller */
+ index = 3;
+ break;
+ default:
+ dev_err(&pdev->dev, "HSU: out of index!");
+ return -ENODEV;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ if (index == 3) {
+ /* DMA controller */
+ ret = request_irq(pdev->irq, dma_irq, 0, "hsu_dma", phsu);
+ if (ret) {
+ dev_err(&pdev->dev, "can not get IRQ\n");
+ goto err_disable;
+ }
+ pci_set_drvdata(pdev, phsu);
+ } else {
+ /* UART port 0~2 */
+ uport = &phsu->port[index];
+ uport->port.irq = pdev->irq;
+ uport->port.dev = &pdev->dev;
+ uport->dev = &pdev->dev;
+
+ ret = request_irq(pdev->irq, port_irq, 0, uport->name, uport);
+ if (ret) {
+ dev_err(&pdev->dev, "can not get IRQ\n");
+ goto err_disable;
+ }
+ uart_add_one_port(&serial_hsu_reg, &uport->port);
+
+#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
+ if (index == 2) {
+ register_console(&serial_hsu_console);
+ uport->port.cons = &serial_hsu_console;
+ }
+#endif
+ pci_set_drvdata(pdev, uport);
+ }
+
+ return 0;
+
+err_disable:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void hsu_dma_rx_timeout(unsigned long data)
+{
+ struct hsu_dma_chan *chan = (void *)data;
+ struct uart_hsu_port *up = chan->uport;
+ struct hsu_dma_buffer *dbuf = &up->rxbuf;
+ int count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
+
+ if (!count) {
+ mod_timer(&chan->rx_timer, jiffies + HSU_DMA_TIMEOUT_CHECK_FREQ);
+ goto exit;
+ }
+
+ hsu_dma_rx(up, 0);
+exit:
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static void hsu_global_init(void)
+{
+ struct hsu_port *hsu;
+ struct uart_hsu_port *uport;
+ struct hsu_dma_chan *dchan;
+ int i, ret;
+
+ hsu = kzalloc(sizeof(struct hsu_port), GFP_KERNEL);
+ if (!hsu)
+ return;
+
+ /* Get basic io resource and map it */
+ hsu->paddr = 0xffa28000;
+ hsu->iolen = 0x1000;
+
+ if (!(request_mem_region(hsu->paddr, hsu->iolen, "HSU global")))
+ pr_warning("HSU: error in request mem region\n");
+
+ hsu->reg = ioremap_nocache((unsigned long)hsu->paddr, hsu->iolen);
+ if (!hsu->reg) {
+ pr_err("HSU: error in ioremap\n");
+ ret = -ENOMEM;
+ goto err_free_region;
+ }
+
+ /* Initialise the 3 UART ports */
+ uport = hsu->port;
+ for (i = 0; i < 3; i++) {
+ uport->port.type = PORT_MFD;
+ uport->port.iotype = UPIO_MEM;
+ uport->port.mapbase = (resource_size_t)hsu->paddr
+ + HSU_PORT_REG_OFFSET
+ + i * HSU_PORT_REG_LENGTH;
+ uport->port.membase = hsu->reg + HSU_PORT_REG_OFFSET
+ + i * HSU_PORT_REG_LENGTH;
+
+ sprintf(uport->name, "hsu_port%d", i);
+ uport->port.fifosize = 64;
+ uport->port.ops = &serial_hsu_pops;
+ uport->port.line = i;
+ uport->port.flags = UPF_IOREMAP;
+ /* set the scalable maxim support rate to 2746800 bps */
+ uport->port.uartclk = 115200 * 24 * 16;
+
+ uport->running = 0;
+ uport->txc = &hsu->chans[i * 2];
+ uport->rxc = &hsu->chans[i * 2 + 1];
+
+ serial_hsu_ports[i] = uport;
+ uport->index = i;
+ uport++;
+ }
+
+ /* Initialise 6 dma channels */
+ dchan = hsu->chans;
+ for (i = 0; i < 6; i++) {
+ dchan->id = i;
+ dchan->dirt = (i & 0x1) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ dchan->uport = &hsu->port[i/2];
+ dchan->reg = hsu->reg + HSU_DMA_CHANS_REG_OFFSET +
+ i * HSU_DMA_CHANS_REG_LENGTH;
+
+ /* Work around for RX */
+ if (dchan->dirt == DMA_FROM_DEVICE) {
+ init_timer(&dchan->rx_timer);
+ dchan->rx_timer.function = hsu_dma_rx_timeout;
+ dchan->rx_timer.data = (unsigned long)dchan;
+ }
+ dchan++;
+ }
+
+ phsu = hsu;
+ hsu_debugfs_init(hsu);
+ return;
+
+err_free_region:
+ release_mem_region(hsu->paddr, hsu->iolen);
+ kfree(hsu);
+ return;
+}
+
+static void serial_hsu_remove(struct pci_dev *pdev)
+{
+ void *priv = pci_get_drvdata(pdev);
+ struct uart_hsu_port *up;
+
+ if (!priv)
+ return;
+
+ /* For port 0/1/2, priv is the address of uart_hsu_port */
+ if (pdev->device != 0x081E) {
+ up = priv;
+ uart_remove_one_port(&serial_hsu_reg, &up->port);
+ }
+
+ pci_set_drvdata(pdev, NULL);
+ free_irq(pdev->irq, priv);
+ pci_disable_device(pdev);
+}
+
+/* First 3 are UART ports, and the 4th is the DMA */
+static const struct pci_device_id pci_ids[] __devinitdata = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081E) },
+ {},
+};
+
+static struct pci_driver hsu_pci_driver = {
+ .name = "HSU serial",
+ .id_table = pci_ids,
+ .probe = serial_hsu_probe,
+ .remove = __devexit_p(serial_hsu_remove),
+ .suspend = serial_hsu_suspend,
+ .resume = serial_hsu_resume,
+};
+
+static int __init hsu_pci_init(void)
+{
+ int ret;
+
+ hsu_global_init();
+
+ ret = uart_register_driver(&serial_hsu_reg);
+ if (ret)
+ return ret;
+
+ return pci_register_driver(&hsu_pci_driver);
+}
+
+static void __exit hsu_pci_exit(void)
+{
+ pci_unregister_driver(&hsu_pci_driver);
+ uart_unregister_driver(&serial_hsu_reg);
+
+ hsu_debugfs_remove(phsu);
+
+ kfree(phsu);
+}
+
+module_init(hsu_pci_init);
+module_exit(hsu_pci_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:medfield-hsu");
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 84a35f69901..c4399e23565 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -113,7 +113,9 @@ struct psc_ops {
unsigned char (*read_char)(struct uart_port *port);
void (*cw_disable_ints)(struct uart_port *port);
void (*cw_restore_ints)(struct uart_port *port);
- unsigned long (*getuartclk)(void *p);
+ unsigned int (*set_baudrate)(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old);
int (*clock)(struct uart_port *port, int enable);
int (*fifoc_init)(void);
void (*fifoc_uninit)(void);
@@ -121,6 +123,16 @@ struct psc_ops {
irqreturn_t (*handle_irq)(struct uart_port *port);
};
+/* setting the prescaler and divisor reg is common for all chips */
+static inline void mpc52xx_set_divisor(struct mpc52xx_psc __iomem *psc,
+ u16 prescaler, unsigned int divisor)
+{
+ /* select prescaler */
+ out_be16(&psc->mpc52xx_psc_clock_select, prescaler);
+ out_8(&psc->ctur, divisor >> 8);
+ out_8(&psc->ctlr, divisor & 0xff);
+}
+
#ifdef CONFIG_PPC_MPC52xx
#define FIFO_52xx(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1))
static void mpc52xx_psc_fifo_init(struct uart_port *port)
@@ -128,9 +140,6 @@ static void mpc52xx_psc_fifo_init(struct uart_port *port)
struct mpc52xx_psc __iomem *psc = PSC(port);
struct mpc52xx_psc_fifo __iomem *fifo = FIFO_52xx(port);
- /* /32 prescaler */
- out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00);
-
out_8(&fifo->rfcntl, 0x00);
out_be16(&fifo->rfalarm, 0x1ff);
out_8(&fifo->tfcntl, 0x07);
@@ -219,15 +228,47 @@ static void mpc52xx_psc_cw_restore_ints(struct uart_port *port)
out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
}
-/* Search for bus-frequency property in this node or a parent */
-static unsigned long mpc52xx_getuartclk(void *p)
+static unsigned int mpc5200_psc_set_baudrate(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
{
- /*
- * 5200 UARTs have a / 32 prescaler
- * but the generic serial code assumes 16
- * so return ipb freq / 2
- */
- return mpc5xxx_get_bus_frequency(p) / 2;
+ unsigned int baud;
+ unsigned int divisor;
+
+ /* The 5200 has a fixed /32 prescaler, uartclk contains the ipb freq */
+ baud = uart_get_baud_rate(port, new, old,
+ port->uartclk / (32 * 0xffff) + 1,
+ port->uartclk / 32);
+ divisor = (port->uartclk + 16 * baud) / (32 * baud);
+
+ /* enable the /32 prescaler and set the divisor */
+ mpc52xx_set_divisor(PSC(port), 0xdd00, divisor);
+ return baud;
+}
+
+static unsigned int mpc5200b_psc_set_baudrate(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
+{
+ unsigned int baud;
+ unsigned int divisor;
+ u16 prescaler;
+
+ /* The 5200B has a selectable /4 or /32 prescaler, uartclk contains the
+ * ipb freq */
+ baud = uart_get_baud_rate(port, new, old,
+ port->uartclk / (32 * 0xffff) + 1,
+ port->uartclk / 4);
+ divisor = (port->uartclk + 2 * baud) / (4 * baud);
+
+ /* select the proper prescaler and set the divisor */
+ if (divisor > 0xffff) {
+ divisor = (divisor + 4) / 8;
+ prescaler = 0xdd00; /* /32 */
+ } else
+ prescaler = 0xff00; /* /4 */
+ mpc52xx_set_divisor(PSC(port), prescaler, divisor);
+ return baud;
}
static void mpc52xx_psc_get_irq(struct uart_port *port, struct device_node *np)
@@ -258,7 +299,28 @@ static struct psc_ops mpc52xx_psc_ops = {
.read_char = mpc52xx_psc_read_char,
.cw_disable_ints = mpc52xx_psc_cw_disable_ints,
.cw_restore_ints = mpc52xx_psc_cw_restore_ints,
- .getuartclk = mpc52xx_getuartclk,
+ .set_baudrate = mpc5200_psc_set_baudrate,
+ .get_irq = mpc52xx_psc_get_irq,
+ .handle_irq = mpc52xx_psc_handle_irq,
+};
+
+static struct psc_ops mpc5200b_psc_ops = {
+ .fifo_init = mpc52xx_psc_fifo_init,
+ .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
+ .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
+ .rx_rdy = mpc52xx_psc_rx_rdy,
+ .tx_rdy = mpc52xx_psc_tx_rdy,
+ .tx_empty = mpc52xx_psc_tx_empty,
+ .stop_rx = mpc52xx_psc_stop_rx,
+ .start_tx = mpc52xx_psc_start_tx,
+ .stop_tx = mpc52xx_psc_stop_tx,
+ .rx_clr_irq = mpc52xx_psc_rx_clr_irq,
+ .tx_clr_irq = mpc52xx_psc_tx_clr_irq,
+ .write_char = mpc52xx_psc_write_char,
+ .read_char = mpc52xx_psc_read_char,
+ .cw_disable_ints = mpc52xx_psc_cw_disable_ints,
+ .cw_restore_ints = mpc52xx_psc_cw_restore_ints,
+ .set_baudrate = mpc5200b_psc_set_baudrate,
.get_irq = mpc52xx_psc_get_irq,
.handle_irq = mpc52xx_psc_handle_irq,
};
@@ -392,9 +454,35 @@ static void mpc512x_psc_cw_restore_ints(struct uart_port *port)
out_be32(&FIFO_512x(port)->rximr, port->read_status_mask & 0x7f);
}
-static unsigned long mpc512x_getuartclk(void *p)
+static unsigned int mpc512x_psc_set_baudrate(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
{
- return mpc5xxx_get_bus_frequency(p);
+ unsigned int baud;
+ unsigned int divisor;
+
+ /*
+ * The "MPC5121e Microcontroller Reference Manual, Rev. 3" says on
+ * pg. 30-10 that the chip supports a /32 and a /10 prescaler.
+ * Furthermore, it states that "After reset, the prescaler by 10
+ * for the UART mode is selected", but the reset register value is
+ * 0x0000 which means a /32 prescaler. This is wrong.
+ *
+ * In reality using /32 prescaler doesn't work, as it is not supported!
+ * Use /16 or /10 prescaler, see "MPC5121e Hardware Design Guide",
+ * Chapter 4.1 PSC in UART Mode.
+ * Calculate with a /16 prescaler here.
+ */
+
+ /* uartclk contains the ips freq */
+ baud = uart_get_baud_rate(port, new, old,
+ port->uartclk / (16 * 0xffff) + 1,
+ port->uartclk / 16);
+ divisor = (port->uartclk + 8 * baud) / (16 * baud);
+
+ /* enable the /16 prescaler and set the divisor */
+ mpc52xx_set_divisor(PSC(port), 0xdd00, divisor);
+ return baud;
}
/* Init PSC FIFO Controller */
@@ -412,6 +500,7 @@ static int __init mpc512x_psc_fifoc_init(void)
psc_fifoc = of_iomap(np, 0);
if (!psc_fifoc) {
pr_err("%s: Can't map FIFOC\n", __func__);
+ of_node_put(np);
return -ENODEV;
}
@@ -498,7 +587,7 @@ static struct psc_ops mpc512x_psc_ops = {
.read_char = mpc512x_psc_read_char,
.cw_disable_ints = mpc512x_psc_cw_disable_ints,
.cw_restore_ints = mpc512x_psc_cw_restore_ints,
- .getuartclk = mpc512x_getuartclk,
+ .set_baudrate = mpc512x_psc_set_baudrate,
.clock = mpc512x_psc_clock,
.fifoc_init = mpc512x_psc_fifoc_init,
.fifoc_uninit = mpc512x_psc_fifoc_uninit,
@@ -666,8 +755,8 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
struct mpc52xx_psc __iomem *psc = PSC(port);
unsigned long flags;
unsigned char mr1, mr2;
- unsigned short ctr;
- unsigned int j, baud, quot;
+ unsigned int j;
+ unsigned int baud;
/* Prepare what we're gonna write */
mr1 = 0;
@@ -704,16 +793,9 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
mr2 |= MPC52xx_PSC_MODE_TXCTS;
}
- baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
- quot = uart_get_divisor(port, baud);
- ctr = quot & 0xffff;
-
/* Get the lock */
spin_lock_irqsave(&port->lock, flags);
- /* Update the per-port timeout */
- uart_update_timeout(port, new->c_cflag, baud);
-
/* Do our best to flush TX & RX, so we don't lose anything */
/* But we don't wait indefinitely ! */
j = 5000000; /* Maximum wait */
@@ -737,8 +819,10 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
out_8(&psc->mode, mr1);
out_8(&psc->mode, mr2);
- out_8(&psc->ctur, ctr >> 8);
- out_8(&psc->ctlr, ctr & 0xff);
+ baud = psc_ops->set_baudrate(port, new, old);
+
+ /* Update the per-port timeout */
+ uart_update_timeout(port, new->c_cflag, baud);
if (UART_ENABLE_MS(port, new->c_cflag))
mpc52xx_uart_enable_ms(port);
@@ -1118,7 +1202,7 @@ mpc52xx_console_setup(struct console *co, char *options)
return ret;
}
- uartclk = psc_ops->getuartclk(np);
+ uartclk = mpc5xxx_get_bus_frequency(np);
if (uartclk == 0) {
pr_debug("Could not find uart clock frequency!\n");
return -EINVAL;
@@ -1201,6 +1285,7 @@ static struct uart_driver mpc52xx_uart_driver = {
static struct of_device_id mpc52xx_uart_of_match[] = {
#ifdef CONFIG_PPC_MPC52xx
+ { .compatible = "fsl,mpc5200b-psc-uart", .data = &mpc5200b_psc_ops, },
{ .compatible = "fsl,mpc5200-psc-uart", .data = &mpc52xx_psc_ops, },
/* binding used by old lite5200 device trees: */
{ .compatible = "mpc5200-psc-uart", .data = &mpc52xx_psc_ops, },
@@ -1214,7 +1299,7 @@ static struct of_device_id mpc52xx_uart_of_match[] = {
};
static int __devinit
-mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
+mpc52xx_uart_of_probe(struct platform_device *op, const struct of_device_id *match)
{
int idx = -1;
unsigned int uartclk;
@@ -1233,7 +1318,10 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
pr_debug("Found %s assigned to ttyPSC%x\n",
mpc52xx_uart_nodes[idx]->full_name, idx);
- uartclk = psc_ops->getuartclk(op->dev.of_node);
+ /* set the uart clock to the input clock of the psc, the different
+ * prescalers are taken into account in the set_baudrate() methods
+ * of the respective chip */
+ uartclk = mpc5xxx_get_bus_frequency(op->dev.of_node);
if (uartclk == 0) {
dev_dbg(&op->dev, "Could not find uart clock frequency!\n");
return -EINVAL;
@@ -1282,7 +1370,7 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
}
static int
-mpc52xx_uart_of_remove(struct of_device *op)
+mpc52xx_uart_of_remove(struct platform_device *op)
{
struct uart_port *port = dev_get_drvdata(&op->dev);
dev_set_drvdata(&op->dev, NULL);
@@ -1295,7 +1383,7 @@ mpc52xx_uart_of_remove(struct of_device *op)
#ifdef CONFIG_PM
static int
-mpc52xx_uart_of_suspend(struct of_device *op, pm_message_t state)
+mpc52xx_uart_of_suspend(struct platform_device *op, pm_message_t state)
{
struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev);
@@ -1306,7 +1394,7 @@ mpc52xx_uart_of_suspend(struct of_device *op, pm_message_t state)
}
static int
-mpc52xx_uart_of_resume(struct of_device *op)
+mpc52xx_uart_of_resume(struct platform_device *op)
{
struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev);
diff --git a/drivers/serial/mrst_max3110.c b/drivers/serial/mrst_max3110.c
new file mode 100644
index 00000000000..51c15f58e01
--- /dev/null
+++ b/drivers/serial/mrst_max3110.c
@@ -0,0 +1,845 @@
+/*
+ * max3110.c - spi uart protocol driver for Maxim 3110 on Moorestown
+ *
+ * Copyright (C) Intel 2008 Feng Tang <feng.tang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * Note:
+ * 1. From Max3110 spec, the Rx FIFO has 8 words, while the Tx FIFO only has
+ * 1 word. If SPI master controller doesn't support sclk frequency change,
+ * then the char need be sent out one by one with some delay
+ *
+ * 2. Currently only RX availabe interrrupt is used, no need for waiting TXE
+ * interrupt for a low speed UART device
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <asm/atomic.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/dw_spi.h>
+
+#include "mrst_max3110.h"
+
+#define PR_FMT "mrst_max3110: "
+
+#define UART_TX_NEEDED 1
+#define CON_TX_NEEDED 2
+#define BIT_IRQ_PENDING 3
+
+struct uart_max3110 {
+ struct uart_port port;
+ struct spi_device *spi;
+ char *name;
+
+ wait_queue_head_t wq;
+ struct task_struct *main_thread;
+ struct task_struct *read_thread;
+ struct mutex thread_mutex;;
+
+ u32 baud;
+ u16 cur_conf;
+ u8 clock;
+ u8 parity, word_7bits;
+
+ unsigned long uart_flags;
+
+ /* console related */
+ struct circ_buf con_xmit;
+
+ /* irq related */
+ u16 irq;
+};
+
+/* global data structure, may need be removed */
+struct uart_max3110 *pmax;
+static inline void receive_char(struct uart_max3110 *max, u8 ch);
+static void receive_chars(struct uart_max3110 *max,
+ unsigned char *str, int len);
+static int max3110_read_multi(struct uart_max3110 *max, int len, u8 *buf);
+static void max3110_console_receive(struct uart_max3110 *max);
+
+int max3110_write_then_read(struct uart_max3110 *max,
+ const u8 *txbuf, u8 *rxbuf, unsigned len, int always_fast)
+{
+ struct spi_device *spi = max->spi;
+ struct spi_message message;
+ struct spi_transfer x;
+ int ret;
+
+ if (!txbuf || !rxbuf)
+ return -EINVAL;
+
+ spi_message_init(&message);
+ memset(&x, 0, sizeof x);
+ x.len = len;
+ x.tx_buf = txbuf;
+ x.rx_buf = rxbuf;
+ spi_message_add_tail(&x, &message);
+
+ if (always_fast)
+ x.speed_hz = 3125000;
+ else if (max->baud)
+ x.speed_hz = max->baud;
+
+ /* Do the i/o */
+ ret = spi_sync(spi, &message);
+ return ret;
+}
+
+/* Write a u16 to the device, and return one u16 read back */
+int max3110_out(struct uart_max3110 *max, const u16 out)
+{
+ u16 tmp;
+ int ret;
+
+ ret = max3110_write_then_read(max, (u8 *)&out, (u8 *)&tmp, 2, 1);
+ if (ret)
+ return ret;
+
+ /* If some valid data is read back */
+ if (tmp & MAX3110_READ_DATA_AVAILABLE)
+ receive_char(max, (tmp & 0xff));
+
+ return ret;
+}
+
+#define MAX_READ_LEN 20
+/*
+ * This is usually used to read data from SPIC RX FIFO, which doesn't
+ * need any delay like flushing character out. It returns how many
+ * valide bytes are read back
+ */
+static int max3110_read_multi(struct uart_max3110 *max, int len, u8 *buf)
+{
+ u16 out[MAX_READ_LEN], in[MAX_READ_LEN];
+ u8 *pbuf, valid_str[MAX_READ_LEN];
+ int i, j, bytelen;
+
+ if (len > MAX_READ_LEN) {
+ pr_err(PR_FMT "read len %d is too large\n", len);
+ return 0;
+ }
+
+ bytelen = len * 2;
+ memset(out, 0, bytelen);
+ memset(in, 0, bytelen);
+
+ if (max3110_write_then_read(max, (u8 *)out, (u8 *)in, bytelen, 1))
+ return 0;
+
+ /* If caller don't provide a buffer, then handle received char */
+ pbuf = buf ? buf : valid_str;
+
+ for (i = 0, j = 0; i < len; i++) {
+ if (in[i] & MAX3110_READ_DATA_AVAILABLE)
+ pbuf[j++] = (u8)(in[i] & 0xff);
+ }
+
+ if (j && (pbuf == valid_str))
+ receive_chars(max, valid_str, j);
+
+ return j;
+}
+
+static void serial_m3110_con_putchar(struct uart_port *port, int ch)
+{
+ struct uart_max3110 *max =
+ container_of(port, struct uart_max3110, port);
+ struct circ_buf *xmit = &max->con_xmit;
+
+ if (uart_circ_chars_free(xmit)) {
+ xmit->buf[xmit->head] = (char)ch;
+ xmit->head = (xmit->head + 1) & (PAGE_SIZE - 1);
+ }
+
+
+ if (!test_and_set_bit(CON_TX_NEEDED, &max->uart_flags))
+ wake_up_process(max->main_thread);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void serial_m3110_con_write(struct console *co,
+ const char *s, unsigned int count)
+{
+ if (!pmax)
+ return;
+
+ uart_console_write(&pmax->port, s, count, serial_m3110_con_putchar);
+}
+
+static int __init
+serial_m3110_con_setup(struct console *co, char *options)
+{
+ struct uart_max3110 *max = pmax;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ pr_info(PR_FMT "setting up console\n");
+
+ if (!max) {
+ pr_err(PR_FMT "pmax is NULL, return");
+ return -ENODEV;
+ }
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&max->port, co, baud, parity, bits, flow);
+}
+
+static struct tty_driver *serial_m3110_con_device(struct console *co,
+ int *index)
+{
+ struct uart_driver *p = co->data;
+ *index = co->index;
+ return p->tty_driver;
+}
+
+static struct uart_driver serial_m3110_reg;
+static struct console serial_m3110_console = {
+ .name = "ttyS",
+ .write = serial_m3110_con_write,
+ .device = serial_m3110_con_device,
+ .setup = serial_m3110_con_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &serial_m3110_reg,
+};
+
+#define MRST_CONSOLE (&serial_m3110_console)
+
+static unsigned int serial_m3110_tx_empty(struct uart_port *port)
+{
+ return 1;
+}
+
+static void serial_m3110_stop_tx(struct uart_port *port)
+{
+ return;
+}
+
+/* stop_rx will be called in spin_lock env */
+static void serial_m3110_stop_rx(struct uart_port *port)
+{
+ return;
+}
+
+#define WORDS_PER_XFER 128
+static inline void send_circ_buf(struct uart_max3110 *max,
+ struct circ_buf *xmit)
+{
+ int len, left = 0;
+ u16 obuf[WORDS_PER_XFER], ibuf[WORDS_PER_XFER];
+ u8 valid_str[WORDS_PER_XFER];
+ int i, j;
+
+ while (!uart_circ_empty(xmit)) {
+ left = uart_circ_chars_pending(xmit);
+ while (left) {
+ len = (left >= WORDS_PER_XFER) ? WORDS_PER_XFER : left;
+
+ memset(obuf, 0, len * 2);
+ memset(ibuf, 0, len * 2);
+ for (i = 0; i < len; i++) {
+ obuf[i] = (u8)xmit->buf[xmit->tail] | WD_TAG;
+ xmit->tail = (xmit->tail + 1) &
+ (UART_XMIT_SIZE - 1);
+ }
+ max3110_write_then_read(max, (u8 *)obuf,
+ (u8 *)ibuf, len * 2, 0);
+
+ for (i = 0, j = 0; i < len; i++) {
+ if (ibuf[i] & MAX3110_READ_DATA_AVAILABLE)
+ valid_str[j++] = (u8)(ibuf[i] & 0xff);
+ }
+
+ if (j)
+ receive_chars(max, valid_str, j);
+
+ max->port.icount.tx += len;
+ left -= len;
+ }
+ }
+}
+
+static void transmit_char(struct uart_max3110 *max)
+{
+ struct uart_port *port = &max->port;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+ return;
+
+ send_circ_buf(max, xmit);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ serial_m3110_stop_tx(port);
+}
+
+/* This will be called by uart_write() and tty_write, can't
+ * go to sleep */
+static void serial_m3110_start_tx(struct uart_port *port)
+{
+ struct uart_max3110 *max =
+ container_of(port, struct uart_max3110, port);
+
+ if (!test_and_set_bit(UART_TX_NEEDED, &max->uart_flags))
+ wake_up_process(max->main_thread);
+}
+
+static void receive_chars(struct uart_max3110 *max, unsigned char *str, int len)
+{
+ struct uart_port *port = &max->port;
+ struct tty_struct *tty;
+ int usable;
+
+ /* If uart is not opened, just return */
+ if (!port->state)
+ return;
+
+ tty = port->state->port.tty;
+ if (!tty)
+ return; /* receive some char before the tty is opened */
+
+ while (len) {
+ usable = tty_buffer_request_room(tty, len);
+ if (usable) {
+ tty_insert_flip_string(tty, str, usable);
+ str += usable;
+ port->icount.rx += usable;
+ tty_flip_buffer_push(tty);
+ }
+ len -= usable;
+ }
+}
+
+static inline void receive_char(struct uart_max3110 *max, u8 ch)
+{
+ receive_chars(max, &ch, 1);
+}
+
+static void max3110_console_receive(struct uart_max3110 *max)
+{
+ int loop = 1, num, total = 0;
+ u8 recv_buf[512], *pbuf;
+
+ pbuf = recv_buf;
+ do {
+ num = max3110_read_multi(max, 8, pbuf);
+
+ if (num) {
+ loop = 10;
+ pbuf += num;
+ total += num;
+
+ if (total >= 500) {
+ receive_chars(max, recv_buf, total);
+ pbuf = recv_buf;
+ total = 0;
+ }
+ }
+ } while (--loop);
+
+ if (total)
+ receive_chars(max, recv_buf, total);
+}
+
+static int max3110_main_thread(void *_max)
+{
+ struct uart_max3110 *max = _max;
+ wait_queue_head_t *wq = &max->wq;
+ int ret = 0;
+ struct circ_buf *xmit = &max->con_xmit;
+
+ init_waitqueue_head(wq);
+ pr_info(PR_FMT "start main thread\n");
+
+ do {
+ wait_event_interruptible(*wq, max->uart_flags || kthread_should_stop());
+
+ mutex_lock(&max->thread_mutex);
+
+ if (test_and_clear_bit(BIT_IRQ_PENDING, &max->uart_flags))
+ max3110_console_receive(max);
+
+ /* first handle console output */
+ if (test_and_clear_bit(CON_TX_NEEDED, &max->uart_flags))
+ send_circ_buf(max, xmit);
+
+ /* handle uart output */
+ if (test_and_clear_bit(UART_TX_NEEDED, &max->uart_flags))
+ transmit_char(max);
+
+ mutex_unlock(&max->thread_mutex);
+
+ } while (!kthread_should_stop());
+
+ return ret;
+}
+
+#ifdef CONFIG_MRST_MAX3110_IRQ
+static irqreturn_t serial_m3110_irq(int irq, void *dev_id)
+{
+ struct uart_max3110 *max = dev_id;
+
+ /* max3110's irq is a falling edge, not level triggered,
+ * so no need to disable the irq */
+ if (!test_and_set_bit(BIT_IRQ_PENDING, &max->uart_flags))
+ wake_up_process(max->main_thread);
+
+ return IRQ_HANDLED;
+}
+#else
+/* if don't use RX IRQ, then need a thread to polling read */
+static int max3110_read_thread(void *_max)
+{
+ struct uart_max3110 *max = _max;
+
+ pr_info(PR_FMT "start read thread\n");
+ do {
+ mutex_lock(&max->thread_mutex);
+ max3110_console_receive(max);
+ mutex_unlock(&max->thread_mutex);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 20);
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+#endif
+
+static int serial_m3110_startup(struct uart_port *port)
+{
+ struct uart_max3110 *max =
+ container_of(port, struct uart_max3110, port);
+ u16 config = 0;
+ int ret = 0;
+
+ if (port->line != 0)
+ pr_err(PR_FMT "uart port startup failed\n");
+
+ /* firstly disable all IRQ and config it to 115200, 8n1 */
+ config = WC_TAG | WC_FIFO_ENABLE
+ | WC_1_STOPBITS
+ | WC_8BIT_WORD
+ | WC_BAUD_DR2;
+ ret = max3110_out(max, config);
+
+ /* as we use thread to handle tx/rx, need set low latency */
+ port->state->port.tty->low_latency = 1;
+
+#ifdef CONFIG_MRST_MAX3110_IRQ
+ ret = request_irq(max->irq, serial_m3110_irq,
+ IRQ_TYPE_EDGE_FALLING, "max3110", max);
+ if (ret)
+ return ret;
+
+ /* enable RX IRQ only */
+ config |= WC_RXA_IRQ_ENABLE;
+ max3110_out(max, config);
+#else
+ /* if IRQ is disabled, start a read thread for input data */
+ max->read_thread =
+ kthread_run(max3110_read_thread, max, "max3110_read");
+#endif
+
+ max->cur_conf = config;
+ return 0;
+}
+
+static void serial_m3110_shutdown(struct uart_port *port)
+{
+ struct uart_max3110 *max =
+ container_of(port, struct uart_max3110, port);
+ u16 config;
+
+ if (max->read_thread) {
+ kthread_stop(max->read_thread);
+ max->read_thread = NULL;
+ }
+
+#ifdef CONFIG_MRST_MAX3110_IRQ
+ free_irq(max->irq, max);
+#endif
+
+ /* Disable interrupts from this port */
+ config = WC_TAG | WC_SW_SHDI;
+ max3110_out(max, config);
+}
+
+static void serial_m3110_release_port(struct uart_port *port)
+{
+}
+
+static int serial_m3110_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void serial_m3110_config_port(struct uart_port *port, int flags)
+{
+ /* give it fake type */
+ port->type = PORT_PXA;
+}
+
+static int
+serial_m3110_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ /* we don't want the core code to modify any port params */
+ return -EINVAL;
+}
+
+
+static const char *serial_m3110_type(struct uart_port *port)
+{
+ struct uart_max3110 *max =
+ container_of(port, struct uart_max3110, port);
+ return max->name;
+}
+
+static void
+serial_m3110_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_max3110 *max =
+ container_of(port, struct uart_max3110, port);
+ unsigned char cval;
+ unsigned int baud, parity = 0;
+ int clk_div = -1;
+ u16 new_conf = max->cur_conf;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ new_conf |= WC_7BIT_WORD;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ new_conf |= WC_8BIT_WORD;
+ break;
+ }
+
+ baud = uart_get_baud_rate(port, termios, old, 0, 230400);
+
+ /* first calc the div for 1.8MHZ clock case */
+ switch (baud) {
+ case 300:
+ clk_div = WC_BAUD_DR384;
+ break;
+ case 600:
+ clk_div = WC_BAUD_DR192;
+ break;
+ case 1200:
+ clk_div = WC_BAUD_DR96;
+ break;
+ case 2400:
+ clk_div = WC_BAUD_DR48;
+ break;
+ case 4800:
+ clk_div = WC_BAUD_DR24;
+ break;
+ case 9600:
+ clk_div = WC_BAUD_DR12;
+ break;
+ case 19200:
+ clk_div = WC_BAUD_DR6;
+ break;
+ case 38400:
+ clk_div = WC_BAUD_DR3;
+ break;
+ case 57600:
+ clk_div = WC_BAUD_DR2;
+ break;
+ case 115200:
+ clk_div = WC_BAUD_DR1;
+ break;
+ case 230400:
+ if (max->clock & MAX3110_HIGH_CLK)
+ break;
+ default:
+ /* pick the previous baud rate */
+ baud = max->baud;
+ clk_div = max->cur_conf & WC_BAUD_DIV_MASK;
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ }
+
+ if (max->clock & MAX3110_HIGH_CLK) {
+ clk_div += 1;
+ /* high clk version max3110 doesn't support B300 */
+ if (baud == 300)
+ baud = 600;
+ if (baud == 230400)
+ clk_div = WC_BAUD_DR1;
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ }
+
+ new_conf = (new_conf & ~WC_BAUD_DIV_MASK) | clk_div;
+ if (termios->c_cflag & CSTOPB)
+ new_conf |= WC_2_STOPBITS;
+ else
+ new_conf &= ~WC_2_STOPBITS;
+
+ if (termios->c_cflag & PARENB) {
+ new_conf |= WC_PARITY_ENABLE;
+ parity |= UART_LCR_PARITY;
+ } else
+ new_conf &= ~WC_PARITY_ENABLE;
+
+ if (!(termios->c_cflag & PARODD))
+ parity |= UART_LCR_EPAR;
+ max->parity = parity;
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ new_conf |= WC_TAG;
+ if (new_conf != max->cur_conf) {
+ max3110_out(max, new_conf);
+ max->cur_conf = new_conf;
+ max->baud = baud;
+ }
+}
+
+/* don't handle hw handshaking */
+static unsigned int serial_m3110_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_DSR | TIOCM_CAR | TIOCM_DSR;
+}
+
+static void serial_m3110_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static void serial_m3110_break_ctl(struct uart_port *port, int break_state)
+{
+}
+
+static void serial_m3110_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+}
+
+static void serial_m3110_enable_ms(struct uart_port *port)
+{
+}
+
+struct uart_ops serial_m3110_ops = {
+ .tx_empty = serial_m3110_tx_empty,
+ .set_mctrl = serial_m3110_set_mctrl,
+ .get_mctrl = serial_m3110_get_mctrl,
+ .stop_tx = serial_m3110_stop_tx,
+ .start_tx = serial_m3110_start_tx,
+ .stop_rx = serial_m3110_stop_rx,
+ .enable_ms = serial_m3110_enable_ms,
+ .break_ctl = serial_m3110_break_ctl,
+ .startup = serial_m3110_startup,
+ .shutdown = serial_m3110_shutdown,
+ .set_termios = serial_m3110_set_termios, /* must have */
+ .pm = serial_m3110_pm,
+ .type = serial_m3110_type,
+ .release_port = serial_m3110_release_port,
+ .request_port = serial_m3110_request_port,
+ .config_port = serial_m3110_config_port,
+ .verify_port = serial_m3110_verify_port,
+};
+
+static struct uart_driver serial_m3110_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "MRST serial",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = 1,
+ .cons = MRST_CONSOLE,
+};
+
+static int serial_m3110_suspend(struct spi_device *spi, pm_message_t state)
+{
+ return 0;
+}
+
+static int serial_m3110_resume(struct spi_device *spi)
+{
+ return 0;
+}
+
+static struct dw_spi_chip spi0_uart = {
+ .poll_mode = 1,
+ .enable_dma = 0,
+ .type = SPI_FRF_SPI,
+};
+
+static int serial_m3110_probe(struct spi_device *spi)
+{
+ struct uart_max3110 *max;
+ int ret;
+ unsigned char *buffer;
+ u16 res;
+ max = kzalloc(sizeof(*max), GFP_KERNEL);
+ if (!max)
+ return -ENOMEM;
+
+ /* set spi info */
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 16;
+ max->clock = MAX3110_HIGH_CLK;
+ spi->controller_data = &spi0_uart;
+
+ spi_setup(spi);
+
+ max->port.type = PORT_PXA; /* need apply for a max3110 type */
+ max->port.fifosize = 2; /* only have 16b buffer */
+ max->port.ops = &serial_m3110_ops;
+ max->port.line = 0;
+ max->port.dev = &spi->dev;
+ max->port.uartclk = 115200;
+
+ max->spi = spi;
+ max->name = spi->modalias; /* use spi name as the name */
+ max->irq = (u16)spi->irq;
+
+ mutex_init(&max->thread_mutex);
+
+ max->word_7bits = 0;
+ max->parity = 0;
+ max->baud = 0;
+
+ max->cur_conf = 0;
+ max->uart_flags = 0;
+
+ /* Check if reading configuration register returns something sane */
+
+ res = RC_TAG;
+ ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
+ if (ret < 0 || res == 0 || res == 0xffff) {
+ printk(KERN_ERR "MAX3111 deemed not present (conf reg %04x)",
+ res);
+ ret = -ENODEV;
+ goto err_get_page;
+ }
+ buffer = (unsigned char *)__get_free_page(GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto err_get_page;
+ }
+ max->con_xmit.buf = (unsigned char *)buffer;
+ max->con_xmit.head = max->con_xmit.tail = 0;
+
+ max->main_thread = kthread_run(max3110_main_thread,
+ max, "max3110_main");
+ if (IS_ERR(max->main_thread)) {
+ ret = PTR_ERR(max->main_thread);
+ goto err_kthread;
+ }
+
+ pmax = max;
+ /* give membase a psudo value to pass serial_core's check */
+ max->port.membase = (void *)0xff110000;
+ uart_add_one_port(&serial_m3110_reg, &max->port);
+
+ return 0;
+
+err_kthread:
+ free_page((unsigned long)buffer);
+err_get_page:
+ pmax = NULL;
+ kfree(max);
+ return ret;
+}
+
+static int max3110_remove(struct spi_device *dev)
+{
+ struct uart_max3110 *max = pmax;
+
+ if (!pmax)
+ return 0;
+
+ pmax = NULL;
+ uart_remove_one_port(&serial_m3110_reg, &max->port);
+
+ free_page((unsigned long)max->con_xmit.buf);
+
+ if (max->main_thread)
+ kthread_stop(max->main_thread);
+
+ kfree(max);
+ return 0;
+}
+
+static struct spi_driver uart_max3110_driver = {
+ .driver = {
+ .name = "spi_max3111",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = serial_m3110_probe,
+ .remove = __devexit_p(max3110_remove),
+ .suspend = serial_m3110_suspend,
+ .resume = serial_m3110_resume,
+};
+
+
+int __init serial_m3110_init(void)
+{
+ int ret = 0;
+
+ ret = uart_register_driver(&serial_m3110_reg);
+ if (ret)
+ return ret;
+
+ ret = spi_register_driver(&uart_max3110_driver);
+ if (ret)
+ uart_unregister_driver(&serial_m3110_reg);
+
+ return ret;
+}
+
+void __exit serial_m3110_exit(void)
+{
+ spi_unregister_driver(&uart_max3110_driver);
+ uart_unregister_driver(&serial_m3110_reg);
+}
+
+module_init(serial_m3110_init);
+module_exit(serial_m3110_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("max3110-uart");
diff --git a/drivers/serial/mrst_max3110.h b/drivers/serial/mrst_max3110.h
new file mode 100644
index 00000000000..363478acb2c
--- /dev/null
+++ b/drivers/serial/mrst_max3110.h
@@ -0,0 +1,59 @@
+#ifndef _MRST_MAX3110_H
+#define _MRST_MAX3110_H
+
+#define MAX3110_HIGH_CLK 0x1 /* 3.6864 MHZ */
+#define MAX3110_LOW_CLK 0x0 /* 1.8432 MHZ */
+
+/* status bits for all 4 MAX3110 operate modes */
+#define MAX3110_READ_DATA_AVAILABLE (1 << 15)
+#define MAX3110_WRITE_BUF_EMPTY (1 << 14)
+
+#define WC_TAG (3 << 14)
+#define RC_TAG (1 << 14)
+#define WD_TAG (2 << 14)
+#define RD_TAG (0 << 14)
+
+/* bits def for write configuration */
+#define WC_FIFO_ENABLE_MASK (1 << 13)
+#define WC_FIFO_ENABLE (0 << 13)
+
+#define WC_SW_SHDI (1 << 12)
+
+#define WC_IRQ_MASK (0xF << 8)
+#define WC_TXE_IRQ_ENABLE (1 << 11) /* TX empty irq */
+#define WC_RXA_IRQ_ENABLE (1 << 10) /* RX availabe irq */
+#define WC_PAR_HIGH_IRQ_ENABLE (1 << 9)
+#define WC_REC_ACT_IRQ_ENABLE (1 << 8)
+
+#define WC_IRDA_ENABLE (1 << 7)
+
+#define WC_STOPBITS_MASK (1 << 6)
+#define WC_2_STOPBITS (1 << 6)
+#define WC_1_STOPBITS (0 << 6)
+
+#define WC_PARITY_ENABLE_MASK (1 << 5)
+#define WC_PARITY_ENABLE (1 << 5)
+
+#define WC_WORDLEN_MASK (1 << 4)
+#define WC_7BIT_WORD (1 << 4)
+#define WC_8BIT_WORD (0 << 4)
+
+#define WC_BAUD_DIV_MASK (0xF)
+#define WC_BAUD_DR1 (0x0)
+#define WC_BAUD_DR2 (0x1)
+#define WC_BAUD_DR4 (0x2)
+#define WC_BAUD_DR8 (0x3)
+#define WC_BAUD_DR16 (0x4)
+#define WC_BAUD_DR32 (0x5)
+#define WC_BAUD_DR64 (0x6)
+#define WC_BAUD_DR128 (0x7)
+#define WC_BAUD_DR3 (0x8)
+#define WC_BAUD_DR6 (0x9)
+#define WC_BAUD_DR12 (0xA)
+#define WC_BAUD_DR24 (0xB)
+#define WC_BAUD_DR48 (0xC)
+#define WC_BAUD_DR96 (0xD)
+#define WC_BAUD_DR192 (0xE)
+#define WC_BAUD_DR384 (0xF)
+
+#endif
diff --git a/drivers/serial/nwpserial.c b/drivers/serial/nwpserial.c
index e65b0d9202a..de173671e3d 100644
--- a/drivers/serial/nwpserial.c
+++ b/drivers/serial/nwpserial.c
@@ -344,7 +344,7 @@ int nwpserial_register_port(struct uart_port *port)
mutex_lock(&nwpserial_mutex);
- dn = to_of_device(port->dev)->dev.of_node;
+ dn = port->dev->of_node;
if (dn == NULL)
goto out;
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index a48d9080f55..2af8fd11312 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -14,11 +14,10 @@
#include <linux/slab.h>
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/nwpserial.h>
-#include <asm/prom.h>
-
struct of_serial_info {
int type;
int line;
@@ -27,7 +26,7 @@ struct of_serial_info {
/*
* Fill a struct uart_port for a given device node
*/
-static int __devinit of_platform_serial_setup(struct of_device *ofdev,
+static int __devinit of_platform_serial_setup(struct platform_device *ofdev,
int type, struct uart_port *port)
{
struct resource resource;
@@ -80,7 +79,7 @@ static int __devinit of_platform_serial_setup(struct of_device *ofdev,
/*
* Try to register a serial port
*/
-static int __devinit of_platform_serial_probe(struct of_device *ofdev,
+static int __devinit of_platform_serial_probe(struct platform_device *ofdev,
const struct of_device_id *id)
{
struct of_serial_info *info;
@@ -134,7 +133,7 @@ out:
/*
* Release a line
*/
-static int of_platform_serial_remove(struct of_device *ofdev)
+static int of_platform_serial_remove(struct platform_device *ofdev)
{
struct of_serial_info *info = dev_get_drvdata(&ofdev->dev);
switch (info->type) {
diff --git a/drivers/serial/s5pv210.c b/drivers/serial/s5pv210.c
index 4a789e5361a..6ebccd70a70 100644
--- a/drivers/serial/s5pv210.c
+++ b/drivers/serial/s5pv210.c
@@ -28,8 +28,12 @@
static int s5pv210_serial_setsource(struct uart_port *port,
struct s3c24xx_uart_clksrc *clk)
{
+ struct s3c2410_uartcfg *cfg = port->dev->platform_data;
unsigned long ucon = rd_regl(port, S3C2410_UCON);
+ if ((cfg->clocks_size) == 1)
+ return 0;
+
if (strcmp(clk->name, "pclk") == 0)
ucon &= ~S5PV210_UCON_CLKMASK;
else if (strcmp(clk->name, "uclk1") == 0)
@@ -47,10 +51,14 @@ static int s5pv210_serial_setsource(struct uart_port *port,
static int s5pv210_serial_getsource(struct uart_port *port,
struct s3c24xx_uart_clksrc *clk)
{
+ struct s3c2410_uartcfg *cfg = port->dev->platform_data;
u32 ucon = rd_regl(port, S3C2410_UCON);
clk->divisor = 1;
+ if ((cfg->clocks_size) == 1)
+ return 0;
+
switch (ucon & S5PV210_UCON_CLKMASK) {
case S5PV210_UCON_PCLK:
clk->name = "pclk";
diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c
index a9d6c5626a0..b1156ba8ad1 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/serial/samsung.c
@@ -705,8 +705,13 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
if (ourport->info->has_divslot) {
unsigned int div = ourport->baudclk_rate / baud;
- udivslot = udivslot_table[div & 15];
- dbg("udivslot = %04x (div %d)\n", udivslot, div & 15);
+ if (cfg->has_fracval) {
+ udivslot = (div & 15);
+ dbg("fracval = %04x\n", udivslot);
+ } else {
+ udivslot = udivslot_table[div & 15];
+ dbg("udivslot = %04x (div %d)\n", udivslot, div & 15);
+ }
}
switch (termios->c_cflag & CSIZE) {
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 7f283070951..cd8511298bc 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -58,9 +58,9 @@ static struct lock_class_key port_lock_key;
#define uart_console(port) (0)
#endif
-static void uart_change_speed(struct uart_state *state,
+static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios);
-static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
+static void __uart_wait_until_sent(struct uart_port *port, int timeout);
static void uart_change_pm(struct uart_state *state, int pm_state);
/*
@@ -137,7 +137,7 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
* Startup the port. This will be called once per open. All calls
* will be serialised by the per-port mutex.
*/
-static int uart_startup(struct uart_state *state, int init_hw)
+static int uart_startup(struct tty_struct *tty, struct uart_state *state, int init_hw)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
@@ -152,7 +152,7 @@ static int uart_startup(struct uart_state *state, int init_hw)
* once we have successfully opened the port. Also set
* up the tty->alt_speed kludge
*/
- set_bit(TTY_IO_ERROR, &port->tty->flags);
+ set_bit(TTY_IO_ERROR, &tty->flags);
if (uport->type == PORT_UNKNOWN)
return 0;
@@ -177,26 +177,26 @@ static int uart_startup(struct uart_state *state, int init_hw)
/*
* Initialise the hardware port settings.
*/
- uart_change_speed(state, NULL);
+ uart_change_speed(tty, state, NULL);
/*
* Setup the RTS and DTR signals once the
* port is open and ready to respond.
*/
- if (port->tty->termios->c_cflag & CBAUD)
+ if (tty->termios->c_cflag & CBAUD)
uart_set_mctrl(uport, TIOCM_RTS | TIOCM_DTR);
}
if (port->flags & ASYNC_CTS_FLOW) {
spin_lock_irq(&uport->lock);
if (!(uport->ops->get_mctrl(uport) & TIOCM_CTS))
- port->tty->hw_stopped = 1;
+ tty->hw_stopped = 1;
spin_unlock_irq(&uport->lock);
}
set_bit(ASYNCB_INITIALIZED, &port->flags);
- clear_bit(TTY_IO_ERROR, &port->tty->flags);
+ clear_bit(TTY_IO_ERROR, &tty->flags);
}
if (retval && capable(CAP_SYS_ADMIN))
@@ -210,11 +210,10 @@ static int uart_startup(struct uart_state *state, int init_hw)
* DTR is dropped if the hangup on close termio flag is on. Calls to
* uart_shutdown are serialised by the per-port semaphore.
*/
-static void uart_shutdown(struct uart_state *state)
+static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
- struct tty_struct *tty = port->tty;
/*
* Set the TTY IO error marker
@@ -430,11 +429,10 @@ uart_get_divisor(struct uart_port *port, unsigned int baud)
EXPORT_SYMBOL(uart_get_divisor);
/* FIXME: Consistent locking policy */
-static void
-uart_change_speed(struct uart_state *state, struct ktermios *old_termios)
+static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
+ struct ktermios *old_termios)
{
struct tty_port *port = &state->port;
- struct tty_struct *tty = port->tty;
struct uart_port *uport = state->uart_port;
struct ktermios *termios;
@@ -463,8 +461,8 @@ uart_change_speed(struct uart_state *state, struct ktermios *old_termios)
uport->ops->set_termios(uport, termios, old_termios);
}
-static inline int
-__uart_put_char(struct uart_port *port, struct circ_buf *circ, unsigned char c)
+static inline int __uart_put_char(struct uart_port *port,
+ struct circ_buf *circ, unsigned char c)
{
unsigned long flags;
int ret = 0;
@@ -494,8 +492,8 @@ static void uart_flush_chars(struct tty_struct *tty)
uart_start(tty);
}
-static int
-uart_write(struct tty_struct *tty, const unsigned char *buf, int count)
+static int uart_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
@@ -675,7 +673,7 @@ static int uart_get_info(struct uart_state *state,
return 0;
}
-static int uart_set_info(struct uart_state *state,
+static int uart_set_info(struct tty_struct *tty, struct uart_state *state,
struct serial_struct __user *newinfo)
{
struct serial_struct new_serial;
@@ -770,7 +768,7 @@ static int uart_set_info(struct uart_state *state,
* We need to shutdown the serial port at the old
* port/type/irq combination.
*/
- uart_shutdown(state);
+ uart_shutdown(tty, state);
}
if (change_port) {
@@ -869,25 +867,27 @@ static int uart_set_info(struct uart_state *state,
"is deprecated.\n", current->comm,
tty_name(port->tty, buf));
}
- uart_change_speed(state, NULL);
+ uart_change_speed(tty, state, NULL);
}
} else
- retval = uart_startup(state, 1);
+ retval = uart_startup(tty, state, 1);
exit:
mutex_unlock(&port->mutex);
return retval;
}
-
-/*
- * uart_get_lsr_info - get line status register info.
- * Note: uart_ioctl protects us against hangups.
+/**
+ * uart_get_lsr_info - get line status register info
+ * @tty: tty associated with the UART
+ * @state: UART being queried
+ * @value: returned modem value
+ *
+ * Note: uart_ioctl protects us against hangups.
*/
-static int uart_get_lsr_info(struct uart_state *state,
- unsigned int __user *value)
+static int uart_get_lsr_info(struct tty_struct *tty,
+ struct uart_state *state, unsigned int __user *value)
{
struct uart_port *uport = state->uart_port;
- struct tty_port *port = &state->port;
unsigned int result;
result = uport->ops->tx_empty(uport);
@@ -900,7 +900,7 @@ static int uart_get_lsr_info(struct uart_state *state,
*/
if (uport->x_char ||
((uart_circ_chars_pending(&state->xmit) > 0) &&
- !port->tty->stopped && !port->tty->hw_stopped))
+ !tty->stopped && !tty->hw_stopped))
result &= ~TIOCSER_TEMT;
return put_user(result, value);
@@ -961,7 +961,7 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state)
return 0;
}
-static int uart_do_autoconfig(struct uart_state *state)
+static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
@@ -980,7 +980,7 @@ static int uart_do_autoconfig(struct uart_state *state)
ret = -EBUSY;
if (tty_port_users(port) == 1) {
- uart_shutdown(state);
+ uart_shutdown(tty, state);
/*
* If we already have a port type configured,
@@ -999,7 +999,7 @@ static int uart_do_autoconfig(struct uart_state *state)
*/
uport->ops->config_port(uport, flags);
- ret = uart_startup(state, 1);
+ ret = uart_startup(tty, state, 1);
}
mutex_unlock(&port->mutex);
return ret;
@@ -1122,11 +1122,11 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd,
break;
case TIOCSSERIAL:
- ret = uart_set_info(state, uarg);
+ ret = uart_set_info(tty, state, uarg);
break;
case TIOCSERCONFIG:
- ret = uart_do_autoconfig(state);
+ ret = uart_do_autoconfig(tty, state);
break;
case TIOCSERGWILD: /* obsolete */
@@ -1172,7 +1172,7 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd,
*/
switch (cmd) {
case TIOCSERGETLSR: /* Get line status register */
- ret = uart_get_lsr_info(state, uarg);
+ ret = uart_get_lsr_info(tty, state, uarg);
break;
default: {
@@ -1194,7 +1194,7 @@ static void uart_set_ldisc(struct tty_struct *tty)
struct uart_port *uport = state->uart_port;
if (uport->ops->set_ldisc)
- uport->ops->set_ldisc(uport);
+ uport->ops->set_ldisc(uport, tty->termios->c_line);
}
static void uart_set_termios(struct tty_struct *tty,
@@ -1219,7 +1219,7 @@ static void uart_set_termios(struct tty_struct *tty,
return;
}
- uart_change_speed(state, old_termios);
+ uart_change_speed(tty, state, old_termios);
/* Handle transition to B0 status */
if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
@@ -1272,8 +1272,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
struct uart_state *state = tty->driver_data;
struct tty_port *port;
struct uart_port *uport;
+ unsigned long flags;
- BUG_ON(!kernel_locked());
+ BUG_ON(!tty_locked());
if (!state)
return;
@@ -1284,9 +1285,12 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
pr_debug("uart_close(%d) called\n", uport->line);
mutex_lock(&port->mutex);
+ spin_lock_irqsave(&port->lock, flags);
- if (tty_hung_up_p(filp))
+ if (tty_hung_up_p(filp)) {
+ spin_unlock_irqrestore(&port->lock, flags);
goto done;
+ }
if ((tty->count == 1) && (port->count != 1)) {
/*
@@ -1305,8 +1309,10 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
tty->name, port->count);
port->count = 0;
}
- if (port->count)
+ if (port->count) {
+ spin_unlock_irqrestore(&port->lock, flags);
goto done;
+ }
/*
* Now we wait for the transmit buffer to clear; and we notify
@@ -1314,9 +1320,18 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
* setting tty->closing.
*/
tty->closing = 1;
+ spin_unlock_irqrestore(&port->lock, flags);
- if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, msecs_to_jiffies(port->closing_wait));
+ if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
+ /*
+ * hack: open-coded tty_wait_until_sent to avoid
+ * recursive tty_lock
+ */
+ long timeout = msecs_to_jiffies(port->closing_wait);
+ if (wait_event_interruptible_timeout(tty->write_wait,
+ !tty_chars_in_buffer(tty), timeout) >= 0)
+ __uart_wait_until_sent(uport, timeout);
+ }
/*
* At this point, we stop accepting input. To do this, we
@@ -1332,45 +1347,47 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
* has completely drained; this is especially
* important if there is a transmit FIFO!
*/
- uart_wait_until_sent(tty, uport->timeout);
+ __uart_wait_until_sent(uport, uport->timeout);
}
- uart_shutdown(state);
+ uart_shutdown(tty, state);
uart_flush_buffer(tty);
tty_ldisc_flush(tty);
- tty->closing = 0;
tty_port_tty_set(port, NULL);
+ spin_lock_irqsave(&port->lock, flags);
+ tty->closing = 0;
if (port->blocked_open) {
+ spin_unlock_irqrestore(&port->lock, flags);
if (port->close_delay)
msleep_interruptible(port->close_delay);
+ spin_lock_irqsave(&port->lock, flags);
} else if (!uart_console(uport)) {
+ spin_unlock_irqrestore(&port->lock, flags);
uart_change_pm(state, 3);
+ spin_lock_irqsave(&port->lock, flags);
}
/*
* Wake up anyone trying to open this port.
*/
clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
+ spin_unlock_irqrestore(&port->lock, flags);
wake_up_interruptible(&port->open_wait);
done:
mutex_unlock(&port->mutex);
}
-static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
+static void __uart_wait_until_sent(struct uart_port *port, int timeout)
{
- struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->uart_port;
unsigned long char_time, expire;
if (port->type == PORT_UNKNOWN || port->fifosize == 0)
return;
- lock_kernel();
-
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
@@ -1416,7 +1433,16 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
break;
}
set_current_state(TASK_RUNNING); /* might not be needed */
- unlock_kernel();
+}
+
+static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct uart_state *state = tty->driver_data;
+ struct uart_port *port = state->uart_port;
+
+ tty_lock();
+ __uart_wait_until_sent(port, timeout);
+ tty_unlock();
}
/*
@@ -1429,16 +1455,19 @@ static void uart_hangup(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
+ unsigned long flags;
- BUG_ON(!kernel_locked());
+ BUG_ON(!tty_locked());
pr_debug("uart_hangup(%d)\n", state->uart_port->line);
mutex_lock(&port->mutex);
if (port->flags & ASYNC_NORMAL_ACTIVE) {
uart_flush_buffer(tty);
- uart_shutdown(state);
+ uart_shutdown(tty, state);
+ spin_lock_irqsave(&port->lock, flags);
port->count = 0;
clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
+ spin_unlock_irqrestore(&port->lock, flags);
tty_port_tty_set(port, NULL);
wake_up_interruptible(&port->open_wait);
wake_up_interruptible(&port->delta_msr_wait);
@@ -1446,15 +1475,19 @@ static void uart_hangup(struct tty_struct *tty)
mutex_unlock(&port->mutex);
}
-/*
- * Copy across the serial console cflag setting into the termios settings
- * for the initial open of the port. This allows continuity between the
- * kernel settings, and the settings init adopts when it opens the port
- * for the first time.
+/**
+ * uart_update_termios - update the terminal hw settings
+ * @tty: tty associated with UART
+ * @state: UART to update
+ *
+ * Copy across the serial console cflag setting into the termios settings
+ * for the initial open of the port. This allows continuity between the
+ * kernel settings, and the settings init adopts when it opens the port
+ * for the first time.
*/
-static void uart_update_termios(struct uart_state *state)
+static void uart_update_termios(struct tty_struct *tty,
+ struct uart_state *state)
{
- struct tty_struct *tty = state->port.tty;
struct uart_port *port = state->uart_port;
if (uart_console(port) && port->cons->cflag) {
@@ -1471,7 +1504,7 @@ static void uart_update_termios(struct uart_state *state)
/*
* Make termios settings take effect.
*/
- uart_change_speed(state, NULL);
+ uart_change_speed(tty, state, NULL);
/*
* And finally enable the RTS and DTR signals.
@@ -1481,90 +1514,37 @@ static void uart_update_termios(struct uart_state *state)
}
}
-/*
- * Block the open until the port is ready. We must be called with
- * the per-port semaphore held.
- */
-static int
-uart_block_til_ready(struct file *filp, struct uart_state *state)
+static int uart_carrier_raised(struct tty_port *port)
{
- DECLARE_WAITQUEUE(wait, current);
+ struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = state->uart_port;
- struct tty_port *port = &state->port;
- unsigned int mctrl;
-
- port->blocked_open++;
- port->count--;
-
- add_wait_queue(&port->open_wait, &wait);
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- /*
- * If we have been hung up, tell userspace/restart open.
- */
- if (tty_hung_up_p(filp) || port->tty == NULL)
- break;
-
- /*
- * If the port has been closed, tell userspace/restart open.
- */
- if (!(port->flags & ASYNC_INITIALIZED))
- break;
+ int mctrl;
+ spin_lock_irq(&uport->lock);
+ uport->ops->enable_ms(uport);
+ mctrl = uport->ops->get_mctrl(uport);
+ spin_unlock_irq(&uport->lock);
+ if (mctrl & TIOCM_CAR)
+ return 1;
+ return 0;
+}
- /*
- * If non-blocking mode is set, or CLOCAL mode is set,
- * we don't want to wait for the modem status lines to
- * indicate that the port is ready.
- *
- * Also, if the port is not enabled/configured, we want
- * to allow the open to succeed here. Note that we will
- * have set TTY_IO_ERROR for a non-existant port.
- */
- if ((filp->f_flags & O_NONBLOCK) ||
- (port->tty->termios->c_cflag & CLOCAL) ||
- (port->tty->flags & (1 << TTY_IO_ERROR)))
- break;
+static void uart_dtr_rts(struct tty_port *port, int onoff)
+{
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport = state->uart_port;
- /*
- * Set DTR to allow modem to know we're waiting. Do
- * not set RTS here - we want to make sure we catch
- * the data from the modem.
- */
- if (port->tty->termios->c_cflag & CBAUD)
- uart_set_mctrl(uport, TIOCM_DTR);
+ if (onoff) {
+ uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
/*
- * and wait for the carrier to indicate that the
- * modem is ready for us.
+ * If this is the first open to succeed,
+ * adjust things to suit.
*/
- spin_lock_irq(&uport->lock);
- uport->ops->enable_ms(uport);
- mctrl = uport->ops->get_mctrl(uport);
- spin_unlock_irq(&uport->lock);
- if (mctrl & TIOCM_CAR)
- break;
-
- mutex_unlock(&port->mutex);
- schedule();
- mutex_lock(&port->mutex);
-
- if (signal_pending(current))
- break;
+ if (!test_and_set_bit(ASYNCB_NORMAL_ACTIVE, &port->flags))
+ uart_update_termios(port->tty, state);
}
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&port->open_wait, &wait);
-
- port->count++;
- port->blocked_open--;
-
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- if (!port->tty || tty_hung_up_p(filp))
- return -EAGAIN;
-
- return 0;
+ else
+ uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
}
static struct uart_state *uart_get(struct uart_driver *drv, int line)
@@ -1611,7 +1591,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
struct tty_port *port;
int retval, line = tty->index;
- BUG_ON(!kernel_locked());
+ BUG_ON(!tty_locked());
pr_debug("uart_open(%d) called\n", line);
/*
@@ -1668,23 +1648,14 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
/*
* Start up the serial port.
*/
- retval = uart_startup(state, 0);
+ retval = uart_startup(tty, state, 0);
/*
* If we succeeded, wait until the port is ready.
*/
- if (retval == 0)
- retval = uart_block_til_ready(filp, state);
mutex_unlock(&port->mutex);
-
- /*
- * If this is the first open to succeed, adjust things to suit.
- */
- if (retval == 0 && !(port->flags & ASYNC_NORMAL_ACTIVE)) {
- set_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
-
- uart_update_termios(state);
- }
+ if (retval == 0)
+ retval = tty_port_block_til_ready(port, tty, filp);
fail:
return retval;
@@ -2010,9 +1981,13 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
struct tty_port *port = &state->port;
struct device *tty_dev;
struct uart_match match = {uport, drv};
+ struct tty_struct *tty;
mutex_lock(&port->mutex);
+ /* Must be inside the mutex lock until we convert to tty_port */
+ tty = port->tty;
+
tty_dev = device_find_child(uport->dev, &match, serial_match_port);
if (device_may_wakeup(tty_dev)) {
enable_irq_wake(uport->irq);
@@ -2105,9 +2080,12 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
ops->set_mctrl(uport, 0);
spin_unlock_irq(&uport->lock);
if (console_suspend_enabled || !uart_console(uport)) {
+ /* Protected by port mutex for now */
+ struct tty_struct *tty = port->tty;
ret = ops->startup(uport);
if (ret == 0) {
- uart_change_speed(state, NULL);
+ if (tty)
+ uart_change_speed(tty, state, NULL);
spin_lock_irq(&uport->lock);
ops->set_mctrl(uport, uport->mctrl);
ops->start_tx(uport);
@@ -2119,7 +2097,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
* Clear the "initialized" flag so we won't try
* to call the low level drivers shutdown method.
*/
- uart_shutdown(state);
+ uart_shutdown(tty, state);
}
}
@@ -2312,6 +2290,11 @@ static const struct tty_operations uart_ops = {
#endif
};
+static const struct tty_port_operations uart_port_ops = {
+ .carrier_raised = uart_carrier_raised,
+ .dtr_rts = uart_dtr_rts,
+};
+
/**
* uart_register_driver - register a driver with the uart core layer
* @drv: low level driver structure
@@ -2368,6 +2351,7 @@ int uart_register_driver(struct uart_driver *drv)
struct tty_port *port = &state->port;
tty_port_init(port);
+ port->ops = &uart_port_ops;
port->close_delay = 500; /* .5 seconds */
port->closing_wait = 30000; /* 30 seconds */
tasklet_init(&state->tlet, uart_tasklet_action,
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index ab17c08ddc0..7d475b2a79e 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -45,7 +45,6 @@
#include <asm/io.h>
#include <asm/system.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -115,16 +114,14 @@ static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_
static int quirk_post_ibm(struct pcmcia_device *link)
{
- conf_reg_t reg = { 0, CS_READ, 0x800, 0 };
+ u8 val;
int ret;
- ret = pcmcia_access_configuration_register(link, &reg);
+ ret = pcmcia_read_config_byte(link, 0x800, &val);
if (ret)
goto failed;
- reg.Action = CS_WRITE;
- reg.Value = reg.Value | 1;
- ret = pcmcia_access_configuration_register(link, &reg);
+ ret = pcmcia_write_config_byte(link, 0x800, val | 1);
if (ret)
goto failed;
return 0;
@@ -338,8 +335,6 @@ static int serial_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = info;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts1 = 8;
link->conf.Attributes = CONF_ENABLE_IRQ;
if (do_sound) {
link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -414,6 +409,27 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
/*====================================================================*/
+static int pfc_config(struct pcmcia_device *p_dev)
+{
+ unsigned int port = 0;
+ struct serial_info *info = p_dev->priv;
+
+ if ((p_dev->resource[1]->end != 0) &&
+ (resource_size(p_dev->resource[1]) == 8)) {
+ port = p_dev->resource[1]->start;
+ info->slave = 1;
+ } else if ((info->manfid == MANFID_OSITECH) &&
+ (resource_size(p_dev->resource[0]) == 0x40)) {
+ port = p_dev->resource[0]->start + 0x28;
+ info->slave = 1;
+ }
+ if (info->slave)
+ return setup_serial(p_dev, info, port, p_dev->irq);
+
+ dev_warn(&p_dev->dev, "no usable port range found, giving up\n");
+ return -ENODEV;
+}
+
static int simple_config_check(struct pcmcia_device *p_dev,
cistpl_cftable_entry_t *cf,
cistpl_cftable_entry_t *dflt,
@@ -427,12 +443,13 @@ static int simple_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Vpp =
cf->vpp1.param[CISTPL_POWER_VNOM] / 10000;
+ p_dev->io_lines = ((*try & 0x1) == 0) ?
+ 16 : cf->io.flags & CISTPL_IO_LINES_MASK;
+
if ((cf->io.nwin > 0) && (cf->io.win[0].len == size_table[(*try >> 1)])
&& (cf->io.win[0].base != 0)) {
- p_dev->io.BasePort1 = cf->io.win[0].base;
- p_dev->io.IOAddrLines = ((*try & 0x1) == 0) ?
- 16 : cf->io.flags & CISTPL_IO_LINES_MASK;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[0]->start = cf->io.win[0].base;
+ if (!pcmcia_request_io(p_dev))
return 0;
}
return -EINVAL;
@@ -449,9 +466,9 @@ static int simple_config_check_notpicky(struct pcmcia_device *p_dev,
if ((cf->io.nwin > 0) && ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) {
for (j = 0; j < 5; j++) {
- p_dev->io.BasePort1 = base[j];
- p_dev->io.IOAddrLines = base[j] ? 16 : 3;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[0]->start = base[j];
+ p_dev->io_lines = base[j] ? 16 : 3;
+ if (!pcmcia_request_io(p_dev))
return 0;
}
}
@@ -463,23 +480,8 @@ static int simple_config(struct pcmcia_device *link)
struct serial_info *info = link->priv;
int i = -ENODEV, try;
- /* If the card is already configured, look up the port and irq */
- if (link->function_config) {
- unsigned int port = 0;
- if ((link->io.BasePort2 != 0) &&
- (link->io.NumPorts2 == 8)) {
- port = link->io.BasePort2;
- info->slave = 1;
- } else if ((info->manfid == MANFID_OSITECH) &&
- (link->io.NumPorts1 == 0x40)) {
- port = link->io.BasePort1 + 0x28;
- info->slave = 1;
- }
- if (info->slave) {
- return setup_serial(link, info, port,
- link->irq);
- }
- }
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[0]->end = 8;
/* First pass: look for a config entry that looks normal.
* Two tries: without IO aliases, then with aliases */
@@ -493,8 +495,7 @@ static int simple_config(struct pcmcia_device *link)
if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL))
goto found_port;
- printk(KERN_NOTICE
- "serial_cs: no usable port range found, giving up\n");
+ dev_warn(&link->dev, "no usable port range found, giving up\n");
return -1;
found_port:
@@ -510,7 +511,7 @@ found_port:
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
return -1;
- return setup_serial(link, info, link->io.BasePort1, link->irq);
+ return setup_serial(link, info, link->resource[0]->start, link->irq);
}
static int multi_config_check(struct pcmcia_device *p_dev,
@@ -524,10 +525,10 @@ static int multi_config_check(struct pcmcia_device *p_dev,
/* The quad port cards have bad CIS's, so just look for a
window larger than 8 ports and assume it will be right */
if ((cf->io.nwin == 1) && (cf->io.win[0].len > 8)) {
- p_dev->io.BasePort1 = cf->io.win[0].base;
- p_dev->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK;
- if (!pcmcia_request_io(p_dev, &p_dev->io)) {
- *base2 = p_dev->io.BasePort1 + 8;
+ p_dev->resource[0]->start = cf->io.win[0].base;
+ p_dev->io_lines = cf->io.flags & CISTPL_IO_LINES_MASK;
+ if (!pcmcia_request_io(p_dev)) {
+ *base2 = p_dev->resource[0]->start + 8;
return 0;
}
}
@@ -543,11 +544,11 @@ static int multi_config_check_notpicky(struct pcmcia_device *p_dev,
int *base2 = priv_data;
if (cf->io.nwin == 2) {
- p_dev->io.BasePort1 = cf->io.win[0].base;
- p_dev->io.BasePort2 = cf->io.win[1].base;
- p_dev->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK;
- if (!pcmcia_request_io(p_dev, &p_dev->io)) {
- *base2 = p_dev->io.BasePort2;
+ p_dev->resource[0]->start = cf->io.win[0].base;
+ p_dev->resource[1]->start = cf->io.win[1].base;
+ p_dev->io_lines = cf->io.flags & CISTPL_IO_LINES_MASK;
+ if (!pcmcia_request_io(p_dev)) {
+ *base2 = p_dev->resource[1]->start;
return 0;
}
}
@@ -560,22 +561,22 @@ static int multi_config(struct pcmcia_device *link)
int i, base2 = 0;
/* First, look for a generic full-sized window */
- link->io.NumPorts1 = info->multi * 8;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[0]->end = info->multi * 8;
if (pcmcia_loop_config(link, multi_config_check, &base2)) {
/* If that didn't work, look for two windows */
- link->io.NumPorts1 = link->io.NumPorts2 = 8;
+ link->resource[0]->end = link->resource[1]->end = 8;
info->multi = 2;
if (pcmcia_loop_config(link, multi_config_check_notpicky,
&base2)) {
- printk(KERN_NOTICE "serial_cs: no usable port range"
+ dev_warn(&link->dev, "no usable port range "
"found, giving up\n");
return -ENODEV;
}
}
if (!link->irq)
- dev_warn(&link->dev,
- "serial_cs: no usable IRQ found, continuing...\n");
+ dev_warn(&link->dev, "no usable IRQ found, continuing...\n");
/*
* Apply any configuration quirks.
@@ -599,9 +600,9 @@ static int multi_config(struct pcmcia_device *link)
link->conf.ConfigIndex == 3) {
err = setup_serial(link, info, base2,
link->irq);
- base2 = link->io.BasePort1;
+ base2 = link->resource[0]->start;;
} else {
- err = setup_serial(link, info, link->io.BasePort1,
+ err = setup_serial(link, info, link->resource[0]->start,
link->irq);
}
info->c950ctrl = base2;
@@ -616,7 +617,7 @@ static int multi_config(struct pcmcia_device *link)
return 0;
}
- setup_serial(link, info, link->io.BasePort1, link->irq);
+ setup_serial(link, info, link->resource[0]->start, link->irq);
for (i = 0; i < info->multi - 1; i++)
setup_serial(link, info, base2 + (8 * i),
link->irq);
@@ -677,6 +678,7 @@ static int serial_config(struct pcmcia_device * link)
multifunction cards that ask for appropriate IO port ranges */
if ((info->multi == 0) &&
(link->has_func_id) &&
+ (link->socket->pcmcia_pfc == 0) &&
((link->func_id == CISTPL_FUNCID_MULTI) ||
(link->func_id == CISTPL_FUNCID_SERIAL)))
pcmcia_loop_config(link, serial_check_for_multi, info);
@@ -687,7 +689,13 @@ static int serial_config(struct pcmcia_device * link)
if (info->quirk && info->quirk->multi != -1)
info->multi = info->quirk->multi;
- if (info->multi > 1)
+ dev_info(&link->dev,
+ "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n",
+ link->manf_id, link->card_id,
+ link->socket->pcmcia_pfc, info->multi, info->quirk);
+ if (link->socket->pcmcia_pfc)
+ i = pfc_config(link);
+ else if (info->multi > 1)
i = multi_config(link);
else
i = simple_config(link);
@@ -706,7 +714,7 @@ static int serial_config(struct pcmcia_device * link)
return 0;
failed:
- dev_warn(&link->dev, "serial_cs: failed to initialize\n");
+ dev_warn(&link->dev, "failed to initialize\n");
serial_remove(link);
return -ENODEV;
}
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 5f90fcd7d10..c291b3add1d 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -346,6 +346,27 @@ static int scif_rxfill(struct uart_port *port)
return sci_in(port, SCFDR) & SCIF2_RFDC_MASK;
}
}
+#elif defined(CONFIG_ARCH_SH7372)
+static int scif_txfill(struct uart_port *port)
+{
+ if (port->type == PORT_SCIFA)
+ return sci_in(port, SCFDR) >> 8;
+ else
+ return sci_in(port, SCTFDR);
+}
+
+static int scif_txroom(struct uart_port *port)
+{
+ return port->fifosize - scif_txfill(port);
+}
+
+static int scif_rxfill(struct uart_port *port)
+{
+ if (port->type == PORT_SCIFA)
+ return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
+ else
+ return sci_in(port, SCRFDR);
+}
#else
static int scif_txfill(struct uart_port *port)
{
@@ -683,7 +704,7 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
u16 ssr = sci_in(port, SCxSR);
/* Disable future Rx interrupts */
- if (port->type == PORT_SCIFA) {
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
disable_irq_nosync(irq);
scr |= 0x4000;
} else {
@@ -928,7 +949,7 @@ static void sci_dma_tx_complete(void *arg)
if (!uart_circ_empty(xmit)) {
schedule_work(&s->work_tx);
- } else if (port->type == PORT_SCIFA) {
+ } else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
u16 ctrl = sci_in(port, SCSCR);
sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
}
@@ -1184,7 +1205,7 @@ static void sci_start_tx(struct uart_port *port)
unsigned short ctrl;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
- if (port->type == PORT_SCIFA) {
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
u16 new, scr = sci_in(port, SCSCR);
if (s->chan_tx)
new = scr | 0x8000;
@@ -1197,7 +1218,7 @@ static void sci_start_tx(struct uart_port *port)
s->cookie_tx < 0)
schedule_work(&s->work_tx);
#endif
- if (!s->chan_tx || port->type == PORT_SCIFA) {
+ if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
@@ -1210,7 +1231,7 @@ static void sci_stop_tx(struct uart_port *port)
/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
- if (port->type == PORT_SCIFA)
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~0x8000;
ctrl &= ~SCI_CTRL_FLAGS_TIE;
sci_out(port, SCSCR, ctrl);
@@ -1222,7 +1243,7 @@ static void sci_start_rx(struct uart_port *port)
/* Set RIE (Receive Interrupt Enable) bit in SCSCR */
ctrl |= sci_in(port, SCSCR);
- if (port->type == PORT_SCIFA)
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~0x4000;
sci_out(port, SCSCR, ctrl);
}
@@ -1233,7 +1254,7 @@ static void sci_stop_rx(struct uart_port *port)
/* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
- if (port->type == PORT_SCIFA)
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~0x4000;
ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
sci_out(port, SCSCR, ctrl);
@@ -1271,7 +1292,7 @@ static void rx_timer_fn(unsigned long arg)
struct uart_port *port = &s->port;
u16 scr = sci_in(port, SCSCR);
- if (port->type == PORT_SCIFA) {
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
scr &= ~0x4000;
enable_irq(s->irqs[1]);
}
@@ -1524,6 +1545,8 @@ static const char *sci_type(struct uart_port *port)
return "scif";
case PORT_SCIFA:
return "scifa";
+ case PORT_SCIFB:
+ return "scifb";
}
return NULL;
@@ -1612,6 +1635,9 @@ static int __devinit sci_init_single(struct platform_device *dev,
port->line = index;
switch (p->type) {
+ case PORT_SCIFB:
+ port->fifosize = 256;
+ break;
case PORT_SCIFA:
port->fifosize = 64;
break;
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index f70c49f915f..9b52f77a930 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -322,7 +322,7 @@
#define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\
static inline unsigned int sci_##name##_in(struct uart_port *port) \
{ \
- if (port->type == PORT_SCIF) { \
+ if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \
SCI_IN(scif_size, scif_offset) \
} else { /* PORT_SCI or PORT_SCIFA */ \
SCI_IN(sci_size, sci_offset); \
@@ -330,7 +330,7 @@
} \
static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
{ \
- if (port->type == PORT_SCIF) { \
+ if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \
SCI_OUT(scif_size, scif_offset, value) \
} else { /* PORT_SCI or PORT_SCIFA */ \
SCI_OUT(sci_size, sci_offset, value); \
@@ -384,8 +384,12 @@
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
defined(CONFIG_CPU_SUBTYPE_SH7721) || \
defined(CONFIG_ARCH_SH7367) || \
- defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
+ defined(CONFIG_ARCH_SH7377)
+#define SCIF_FNS(name, scif_offset, scif_size) \
+ CPU_SCIF_FNS(name, scif_offset, scif_size)
+#elif defined(CONFIG_ARCH_SH7372)
+#define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) \
+ CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size)
#define SCIF_FNS(name, scif_offset, scif_size) \
CPU_SCIF_FNS(name, scif_offset, scif_size)
#else
@@ -422,8 +426,7 @@
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
defined(CONFIG_CPU_SUBTYPE_SH7721) || \
defined(CONFIG_ARCH_SH7367) || \
- defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
+ defined(CONFIG_ARCH_SH7377)
SCIF_FNS(SCSMR, 0x00, 16)
SCIF_FNS(SCBRR, 0x04, 8)
@@ -436,6 +439,20 @@ SCIF_FNS(SCFDR, 0x1c, 16)
SCIF_FNS(SCxTDR, 0x20, 8)
SCIF_FNS(SCxRDR, 0x24, 8)
SCIF_FNS(SCLSR, 0x00, 0)
+#elif defined(CONFIG_ARCH_SH7372)
+SCIF_FNS(SCSMR, 0x00, 16)
+SCIF_FNS(SCBRR, 0x04, 8)
+SCIF_FNS(SCSCR, 0x08, 16)
+SCIF_FNS(SCTDSR, 0x0c, 16)
+SCIF_FNS(SCFER, 0x10, 16)
+SCIF_FNS(SCxSR, 0x14, 16)
+SCIF_FNS(SCFCR, 0x18, 16)
+SCIF_FNS(SCFDR, 0x1c, 16)
+SCIF_FNS(SCTFDR, 0x38, 16)
+SCIF_FNS(SCRFDR, 0x3c, 16)
+SCIx_FNS(SCxTDR, 0x20, 8, 0x40, 8)
+SCIx_FNS(SCxRDR, 0x24, 8, 0x60, 8)
+SCIF_FNS(SCLSR, 0x00, 0)
#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
defined(CONFIG_CPU_SUBTYPE_SH7724)
SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16)
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 7e5e5efea4e..cff9a306660 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -492,7 +492,7 @@ sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
sysrq_requested = 0;
if (ch && time_before(jiffies, sysrq_timeout)) {
spin_unlock_irqrestore(&port->sc_port.lock, flags);
- handle_sysrq(ch, NULL);
+ handle_sysrq(ch);
spin_lock_irqsave(&port->sc_port.lock, flags);
/* ignore actual sysrq command char */
continue;
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index 544f2e25d0e..6381a0282ee 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -55,7 +55,12 @@ EXPORT_SYMBOL(sunserial_unregister_minors);
int sunserial_console_match(struct console *con, struct device_node *dp,
struct uart_driver *drv, int line, bool ignore_line)
{
- if (!con || of_console_device != dp)
+ if (!con)
+ return 0;
+
+ drv->cons = con;
+
+ if (of_console_device != dp)
return 0;
if (!ignore_line) {
@@ -69,12 +74,10 @@ int sunserial_console_match(struct console *con, struct device_node *dp,
return 0;
}
- con->index = line;
- drv->cons = con;
-
- if (!console_set_on_cmdline)
+ if (!console_set_on_cmdline) {
+ con->index = line;
add_preferred_console(con->name, line, NULL);
-
+ }
return 1;
}
EXPORT_SYMBOL(sunserial_console_match);
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index 890f9174296..c9014868297 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -519,13 +519,13 @@ static struct console sunhv_console = {
.data = &sunhv_reg,
};
-static int __devinit hv_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit hv_probe(struct platform_device *op, const struct of_device_id *match)
{
struct uart_port *port;
unsigned long minor;
int err;
- if (op->irqs[0] == 0xffffffff)
+ if (op->archdata.irqs[0] == 0xffffffff)
return -ENODEV;
port = kzalloc(sizeof(struct uart_port), GFP_KERNEL);
@@ -557,7 +557,7 @@ static int __devinit hv_probe(struct of_device *op, const struct of_device_id *m
port->membase = (unsigned char __iomem *) __pa(port);
- port->irq = op->irqs[0];
+ port->irq = op->archdata.irqs[0];
port->dev = &op->dev;
@@ -598,7 +598,7 @@ out_free_port:
return err;
}
-static int __devexit hv_remove(struct of_device *dev)
+static int __devexit hv_remove(struct platform_device *dev)
{
struct uart_port *port = dev_get_drvdata(&dev->dev);
@@ -644,12 +644,12 @@ static int __init sunhv_init(void)
if (tlb_type != hypervisor)
return -ENODEV;
- return of_register_driver(&hv_driver, &of_bus_type);
+ return of_register_platform_driver(&hv_driver);
}
static void __exit sunhv_exit(void)
{
- of_unregister_driver(&hv_driver);
+ of_unregister_platform_driver(&hv_driver);
}
module_init(sunhv_init);
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 5e81bc6b48b..5b246b18f42 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -883,7 +883,7 @@ static int sunsab_console_setup(struct console *con, char *options)
printk("Console: ttyS%d (SAB82532)\n",
(sunsab_reg.minor - 64) + con->index);
- sunserial_console_termios(con, to_of_device(up->port.dev)->dev.of_node);
+ sunserial_console_termios(con, up->port.dev->of_node);
switch (con->cflag & CBAUD) {
case B150: baud = 150; break;
@@ -954,7 +954,7 @@ static inline struct console *SUNSAB_CONSOLE(void)
#endif
static int __devinit sunsab_init_one(struct uart_sunsab_port *up,
- struct of_device *op,
+ struct platform_device *op,
unsigned long offset,
int line)
{
@@ -969,7 +969,7 @@ static int __devinit sunsab_init_one(struct uart_sunsab_port *up,
return -ENOMEM;
up->regs = (union sab82532_async_regs __iomem *) up->port.membase;
- up->port.irq = op->irqs[0];
+ up->port.irq = op->archdata.irqs[0];
up->port.fifosize = SAB82532_XMIT_FIFO_SIZE;
up->port.iotype = UPIO_MEM;
@@ -1006,7 +1006,7 @@ static int __devinit sunsab_init_one(struct uart_sunsab_port *up,
return 0;
}
-static int __devinit sab_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit sab_probe(struct platform_device *op, const struct of_device_id *match)
{
static int inst;
struct uart_sunsab_port *up;
@@ -1062,7 +1062,7 @@ out:
return err;
}
-static int __devexit sab_remove(struct of_device *op)
+static int __devexit sab_remove(struct platform_device *op)
{
struct uart_sunsab_port *up = dev_get_drvdata(&op->dev);
@@ -1130,12 +1130,12 @@ static int __init sunsab_init(void)
}
}
- return of_register_driver(&sab_driver, &of_bus_type);
+ return of_register_platform_driver(&sab_driver);
}
static void __exit sunsab_exit(void)
{
- of_unregister_driver(&sab_driver);
+ of_unregister_platform_driver(&sab_driver);
if (sunsab_reg.nr) {
sunserial_unregister_minors(&sunsab_reg, sunsab_reg.nr);
}
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index ffbf4553f66..551ebfe3ccb 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1200,7 +1200,7 @@ static int __devinit sunsu_kbd_ms_init(struct uart_sunsu_port *up)
return -ENODEV;
printk("%s: %s port at %llx, irq %u\n",
- to_of_device(up->port.dev)->dev.of_node->full_name,
+ up->port.dev->of_node->full_name,
(up->su_type == SU_PORT_KBD) ? "Keyboard" : "Mouse",
(unsigned long long) up->port.mapbase,
up->port.irq);
@@ -1352,7 +1352,7 @@ static int __init sunsu_console_setup(struct console *co, char *options)
spin_lock_init(&port->lock);
/* Get firmware console settings. */
- sunserial_console_termios(co, to_of_device(port->dev)->dev.of_node);
+ sunserial_console_termios(co, port->dev->of_node);
memset(&termios, 0, sizeof(struct ktermios));
termios.c_cflag = co->cflag;
@@ -1406,7 +1406,7 @@ static enum su_type __devinit su_get_type(struct device_node *dp)
return SU_PORT_PORT;
}
-static int __devinit su_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit su_probe(struct platform_device *op, const struct of_device_id *match)
{
static int inst;
struct device_node *dp = op->dev.of_node;
@@ -1443,7 +1443,7 @@ static int __devinit su_probe(struct of_device *op, const struct of_device_id *m
return -ENOMEM;
}
- up->port.irq = op->irqs[0];
+ up->port.irq = op->archdata.irqs[0];
up->port.dev = &op->dev;
@@ -1497,7 +1497,7 @@ out_unmap:
return err;
}
-static int __devexit su_remove(struct of_device *op)
+static int __devexit su_remove(struct platform_device *op)
{
struct uart_sunsu_port *up = dev_get_drvdata(&op->dev);
bool kbdms = false;
@@ -1586,7 +1586,7 @@ static int __init sunsu_init(void)
return err;
}
- err = of_register_driver(&su_driver, &of_bus_type);
+ err = of_register_platform_driver(&su_driver);
if (err && num_uart)
sunserial_unregister_minors(&sunsu_reg, num_uart);
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index f9a24f4ebb3..c1967ac1c07 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -1230,7 +1230,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
(sunzilog_reg.minor - 64) + con->index, con->index);
/* Get firmware console settings. */
- sunserial_console_termios(con, to_of_device(up->port.dev)->dev.of_node);
+ sunserial_console_termios(con, up->port.dev->of_node);
/* Firmware console speed is limited to 150-->38400 baud so
* this hackish cflag thing is OK.
@@ -1399,7 +1399,7 @@ static void __devinit sunzilog_init_hw(struct uart_sunzilog_port *up)
static int zilog_irq = -1;
-static int __devinit zs_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit zs_probe(struct platform_device *op, const struct of_device_id *match)
{
static int kbm_inst, uart_inst;
int inst;
@@ -1426,7 +1426,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
rp = sunzilog_chip_regs[inst];
if (zilog_irq == -1)
- zilog_irq = op->irqs[0];
+ zilog_irq = op->archdata.irqs[0];
up = &sunzilog_port_table[inst * 2];
@@ -1434,7 +1434,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
up[0].port.mapbase = op->resource[0].start + 0x00;
up[0].port.membase = (void __iomem *) &rp->channelA;
up[0].port.iotype = UPIO_MEM;
- up[0].port.irq = op->irqs[0];
+ up[0].port.irq = op->archdata.irqs[0];
up[0].port.uartclk = ZS_CLOCK;
up[0].port.fifosize = 1;
up[0].port.ops = &sunzilog_pops;
@@ -1451,7 +1451,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
up[1].port.mapbase = op->resource[0].start + 0x04;
up[1].port.membase = (void __iomem *) &rp->channelB;
up[1].port.iotype = UPIO_MEM;
- up[1].port.irq = op->irqs[0];
+ up[1].port.irq = op->archdata.irqs[0];
up[1].port.uartclk = ZS_CLOCK;
up[1].port.fifosize = 1;
up[1].port.ops = &sunzilog_pops;
@@ -1492,12 +1492,12 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
"is a %s\n",
dev_name(&op->dev),
(unsigned long long) up[0].port.mapbase,
- op->irqs[0], sunzilog_type(&up[0].port));
+ op->archdata.irqs[0], sunzilog_type(&up[0].port));
printk(KERN_INFO "%s: Mouse at MMIO 0x%llx (irq = %d) "
"is a %s\n",
dev_name(&op->dev),
(unsigned long long) up[1].port.mapbase,
- op->irqs[0], sunzilog_type(&up[1].port));
+ op->archdata.irqs[0], sunzilog_type(&up[1].port));
kbm_inst++;
}
@@ -1516,7 +1516,7 @@ static void __devexit zs_remove_one(struct uart_sunzilog_port *up)
uart_remove_one_port(&sunzilog_reg, &up->port);
}
-static int __devexit zs_remove(struct of_device *op)
+static int __devexit zs_remove(struct platform_device *op)
{
struct uart_sunzilog_port *up = dev_get_drvdata(&op->dev);
struct zilog_layout __iomem *regs;
@@ -1576,7 +1576,7 @@ static int __init sunzilog_init(void)
goto out_free_tables;
}
- err = of_register_driver(&zs_driver, &of_bus_type);
+ err = of_register_platform_driver(&zs_driver);
if (err)
goto out_unregister_uart;
@@ -1604,7 +1604,7 @@ out:
return err;
out_unregister_driver:
- of_unregister_driver(&zs_driver);
+ of_unregister_platform_driver(&zs_driver);
out_unregister_uart:
if (num_sunzilog) {
@@ -1619,7 +1619,7 @@ out_free_tables:
static void __exit sunzilog_exit(void)
{
- of_unregister_driver(&zs_driver);
+ of_unregister_platform_driver(&zs_driver);
if (zilog_irq != -1) {
struct uart_sunzilog_port *up = sunzilog_irq_chain;
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index 67ca642713b..1f36b7eb735 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -423,7 +423,7 @@ static struct uart_driver timbuart_driver = {
.nr = 1
};
-static int timbuart_probe(struct platform_device *dev)
+static int __devinit timbuart_probe(struct platform_device *dev)
{
int err, irq;
struct timbuart_port *uart;
@@ -489,7 +489,7 @@ err_mem:
return err;
}
-static int timbuart_remove(struct platform_device *dev)
+static int __devexit timbuart_remove(struct platform_device *dev)
{
struct timbuart_port *uart = platform_get_drvdata(dev);
@@ -507,7 +507,7 @@ static struct platform_driver timbuart_platform_driver = {
.owner = THIS_MODULE,
},
.probe = timbuart_probe,
- .remove = timbuart_remove,
+ .remove = __devexit_p(timbuart_remove),
};
/*--------------------------------------------------------------------------*/
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index 8acccd56437..9b03d7b3e45 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -21,6 +21,7 @@
#include <asm/io.h>
#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE))
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
@@ -583,7 +584,7 @@ static struct platform_driver ulite_platform_driver = {
*/
#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE))
static int __devinit
-ulite_of_probe(struct of_device *op, const struct of_device_id *match)
+ulite_of_probe(struct platform_device *op, const struct of_device_id *match)
{
struct resource res;
const unsigned int *id;
@@ -604,7 +605,7 @@ ulite_of_probe(struct of_device *op, const struct of_device_id *match)
return ulite_assign(&op->dev, id ? *id : -1, res.start, irq);
}
-static int __devexit ulite_of_remove(struct of_device *op)
+static int __devexit ulite_of_remove(struct platform_device *op)
{
return ulite_release(&op->dev);
}
diff --git a/drivers/serial/ucc_uart.c b/drivers/serial/ucc_uart.c
index 907b06f5c44..3f4848e2174 100644
--- a/drivers/serial/ucc_uart.c
+++ b/drivers/serial/ucc_uart.c
@@ -1194,7 +1194,7 @@ static void uart_firmware_cont(const struct firmware *fw, void *context)
release_firmware(fw);
}
-static int ucc_uart_probe(struct of_device *ofdev,
+static int ucc_uart_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -1462,7 +1462,7 @@ static int ucc_uart_probe(struct of_device *ofdev,
return 0;
}
-static int ucc_uart_remove(struct of_device *ofdev)
+static int ucc_uart_remove(struct platform_device *ofdev)
{
struct uart_qe_port *qe_port = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 78bb5127abd..08fc653a825 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -1,9 +1,10 @@
#
# Makefile for the SuperH specific drivers.
#
+obj-y := clk.o intc.o
+
obj-$(CONFIG_SUPERHYWAY) += superhyway/
obj-$(CONFIG_MAPLE) += maple/
+
obj-$(CONFIG_GENERIC_GPIO) += pfc.o
-obj-$(CONFIG_SUPERH) += clk.o
obj-$(CONFIG_SH_CLK_CPG) += clk-cpg.o
-obj-y += intc.o
diff --git a/drivers/sh/clk-cpg.c b/drivers/sh/clk-cpg.c
index f5c80ba9ab1..8c024b984ed 100644
--- a/drivers/sh/clk-cpg.c
+++ b/drivers/sh/clk-cpg.c
@@ -68,6 +68,39 @@ static unsigned long sh_clk_div6_recalc(struct clk *clk)
return clk->freq_table[idx].frequency;
}
+static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
+{
+ struct clk_div_mult_table *table = &sh_clk_div6_table;
+ u32 value;
+ int ret, i;
+
+ if (!clk->parent_table || !clk->parent_num)
+ return -EINVAL;
+
+ /* Search the parent */
+ for (i = 0; i < clk->parent_num; i++)
+ if (clk->parent_table[i] == parent)
+ break;
+
+ if (i == clk->parent_num)
+ return -ENODEV;
+
+ ret = clk_reparent(clk, parent);
+ if (ret < 0)
+ return ret;
+
+ value = __raw_readl(clk->enable_reg) &
+ ~(((1 << clk->src_width) - 1) << clk->src_shift);
+
+ __raw_writel(value | (i << clk->src_shift), clk->enable_reg);
+
+ /* Rebuild the frequency table */
+ clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
+ table, &clk->arch_flags);
+
+ return 0;
+}
+
static int sh_clk_div6_set_rate(struct clk *clk,
unsigned long rate, int algo_id)
{
@@ -117,7 +150,17 @@ static struct clk_ops sh_clk_div6_clk_ops = {
.disable = sh_clk_div6_disable,
};
-int __init sh_clk_div6_register(struct clk *clks, int nr)
+static struct clk_ops sh_clk_div6_reparent_clk_ops = {
+ .recalc = sh_clk_div6_recalc,
+ .round_rate = sh_clk_div_round_rate,
+ .set_rate = sh_clk_div6_set_rate,
+ .enable = sh_clk_div6_enable,
+ .disable = sh_clk_div6_disable,
+ .set_parent = sh_clk_div6_set_parent,
+};
+
+static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
+ struct clk_ops *ops)
{
struct clk *clkp;
void *freq_table;
@@ -136,7 +179,7 @@ int __init sh_clk_div6_register(struct clk *clks, int nr)
for (k = 0; !ret && (k < nr); k++) {
clkp = clks + k;
- clkp->ops = &sh_clk_div6_clk_ops;
+ clkp->ops = ops;
clkp->id = -1;
clkp->freq_table = freq_table + (k * freq_table_size);
clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
@@ -147,6 +190,17 @@ int __init sh_clk_div6_register(struct clk *clks, int nr)
return ret;
}
+int __init sh_clk_div6_register(struct clk *clks, int nr)
+{
+ return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
+}
+
+int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
+{
+ return sh_clk_div6_register_ops(clks, nr,
+ &sh_clk_div6_reparent_clk_ops);
+}
+
static unsigned long sh_clk_div4_recalc(struct clk *clk)
{
struct clk_div4_table *d4t = clk->priv;
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index f0a1418ce66..4c37c4e2864 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -503,8 +503,9 @@ static void giveback(struct pl022 *pl022)
msg->state = NULL;
if (msg->complete)
msg->complete(msg->context);
- /* This message is completed, so let's turn off the clock! */
+ /* This message is completed, so let's turn off the clocks! */
clk_disable(pl022->clk);
+ amba_pclk_disable(pl022->adev);
}
/**
@@ -1139,9 +1140,10 @@ static void pump_messages(struct work_struct *work)
/* Setup the SPI using the per chip configuration */
pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
/*
- * We enable the clock here, then the clock will be disabled when
+ * We enable the clocks here, then the clocks will be disabled when
* giveback() is called in each method (poll/interrupt/DMA)
*/
+ amba_pclk_enable(pl022->adev);
clk_enable(pl022->clk);
restore_state(pl022);
flush(pl022);
@@ -1723,7 +1725,7 @@ static void pl022_cleanup(struct spi_device *spi)
}
-static int __init
+static int __devinit
pl022_probe(struct amba_device *adev, struct amba_id *id)
{
struct device *dev = &adev->dev;
@@ -1786,11 +1788,9 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
}
/* Disable SSP */
- clk_enable(pl022->clk);
writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
SSP_CR1(pl022->virtbase));
load_ssp_default_config(pl022);
- clk_disable(pl022->clk);
status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
pl022);
@@ -1818,6 +1818,8 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
goto err_spi_register;
}
dev_dbg(dev, "probe succeded\n");
+ /* Disable the silicon block pclk and clock it when needed */
+ amba_pclk_disable(adev);
return 0;
err_spi_register:
@@ -1838,7 +1840,7 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
return status;
}
-static int __exit
+static int __devexit
pl022_remove(struct amba_device *adev)
{
struct pl022 *pl022 = amba_get_drvdata(adev);
@@ -1879,9 +1881,9 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state)
return status;
}
- clk_enable(pl022->clk);
+ amba_pclk_enable(adev);
load_ssp_default_config(pl022);
- clk_disable(pl022->clk);
+ amba_pclk_disable(adev);
dev_dbg(&adev->dev, "suspended\n");
return 0;
}
@@ -1970,7 +1972,7 @@ static struct amba_driver pl022_driver = {
},
.id_table = pl022_ids,
.probe = pl022_probe,
- .remove = __exit_p(pl022_remove),
+ .remove = __devexit_p(pl022_remove),
.suspend = pl022_suspend,
.resume = pl022_resume,
};
@@ -1981,7 +1983,7 @@ static int __init pl022_init(void)
return amba_driver_register(&pl022_driver);
}
-module_init(pl022_init);
+subsys_initcall(pl022_init);
static void __exit pl022_exit(void)
{
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c
index 59be3efe063..052b3c7fa6a 100644
--- a/drivers/spi/coldfire_qspi.c
+++ b/drivers/spi/coldfire_qspi.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
+#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/io.h>
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index d256cb00604..56247853c29 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -181,10 +181,6 @@ static void flush(struct dw_spi *dws)
wait_till_not_busy(dws);
}
-static void null_cs_control(u32 command)
-{
-}
-
static int null_writer(struct dw_spi *dws)
{
u8 n_bytes = dws->n_bytes;
@@ -322,7 +318,7 @@ static void giveback(struct dw_spi *dws)
struct spi_transfer,
transfer_list);
- if (!last_transfer->cs_change)
+ if (!last_transfer->cs_change && dws->cs_control)
dws->cs_control(MRST_SPI_DEASSERT);
msg->state = NULL;
@@ -396,6 +392,11 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
static irqreturn_t dw_spi_irq(int irq, void *dev_id)
{
struct dw_spi *dws = dev_id;
+ u16 irq_status, irq_mask = 0x3f;
+
+ irq_status = dw_readw(dws, isr) & irq_mask;
+ if (!irq_status)
+ return IRQ_NONE;
if (!dws->cur_msg) {
spi_mask_intr(dws, SPI_INT_TXEI);
@@ -544,13 +545,13 @@ static void pump_transfers(unsigned long data)
*/
if (dws->cs_control) {
if (dws->rx && dws->tx)
- chip->tmode = 0x00;
+ chip->tmode = SPI_TMOD_TR;
else if (dws->rx)
- chip->tmode = 0x02;
+ chip->tmode = SPI_TMOD_RO;
else
- chip->tmode = 0x01;
+ chip->tmode = SPI_TMOD_TO;
- cr0 &= ~(0x3 << SPI_MODE_OFFSET);
+ cr0 &= ~SPI_TMOD_MASK;
cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
}
@@ -699,9 +700,6 @@ static int dw_spi_setup(struct spi_device *spi)
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
if (!chip)
return -ENOMEM;
-
- chip->cs_control = null_cs_control;
- chip->enable_dma = 0;
}
/*
@@ -883,7 +881,7 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
- ret = request_irq(dws->irq, dw_spi_irq, 0,
+ ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
"dw_spi", dws);
if (ret < 0) {
dev_err(&master->dev, "can not get IRQ\n");
diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/mpc512x_psc_spi.c
index 2534b1ec3ed..77d9e7ee8b2 100644
--- a/drivers/spi/mpc512x_psc_spi.c
+++ b/drivers/spi/mpc512x_psc_spi.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
@@ -405,9 +406,9 @@ static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
}
/* bus_num is used only for the case dev->platform_data == NULL */
-static int __init mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
- u32 size, unsigned int irq,
- s16 bus_num)
+static int __devinit mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
+ u32 size, unsigned int irq,
+ s16 bus_num)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
struct mpc512x_psc_spi *mps;
@@ -440,6 +441,7 @@ static int __init mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
master->setup = mpc512x_psc_spi_setup;
master->transfer = mpc512x_psc_spi_transfer;
master->cleanup = mpc512x_psc_spi_cleanup;
+ master->dev.of_node = dev->of_node;
tempp = ioremap(regaddr, size);
if (!tempp) {
@@ -490,7 +492,7 @@ free_master:
return ret;
}
-static int __exit mpc512x_psc_spi_do_remove(struct device *dev)
+static int __devexit mpc512x_psc_spi_do_remove(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
@@ -505,8 +507,8 @@ static int __exit mpc512x_psc_spi_do_remove(struct device *dev)
return 0;
}
-static int __init mpc512x_psc_spi_of_probe(struct of_device *op,
- const struct of_device_id *match)
+static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op,
+ const struct of_device_id *match)
{
const u32 *regaddr_p;
u64 regaddr64, size64;
@@ -537,7 +539,7 @@ static int __init mpc512x_psc_spi_of_probe(struct of_device *op,
irq_of_parse_and_map(op->dev.of_node, 0), id);
}
-static int __exit mpc512x_psc_spi_of_remove(struct of_device *op)
+static int __devexit mpc512x_psc_spi_of_remove(struct platform_device *op)
{
return mpc512x_psc_spi_do_remove(&op->dev);
}
@@ -551,7 +553,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match);
static struct of_platform_driver mpc512x_psc_spi_of_driver = {
.probe = mpc512x_psc_spi_of_probe,
- .remove = __exit_p(mpc512x_psc_spi_of_remove),
+ .remove = __devexit_p(mpc512x_psc_spi_of_remove),
.driver = {
.name = "mpc512x-psc-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 7104cb739da..983fbbfce76 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -16,8 +16,8 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <linux/of_spi.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/io.h>
@@ -398,6 +398,7 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
master->setup = mpc52xx_psc_spi_setup;
master->transfer = mpc52xx_psc_spi_transfer;
master->cleanup = mpc52xx_psc_spi_cleanup;
+ master->dev.of_node = dev->of_node;
mps->psc = ioremap(regaddr, size);
if (!mps->psc) {
@@ -464,13 +465,12 @@ static int __exit mpc52xx_psc_spi_do_remove(struct device *dev)
return 0;
}
-static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
+static int __init mpc52xx_psc_spi_of_probe(struct platform_device *op,
const struct of_device_id *match)
{
const u32 *regaddr_p;
u64 regaddr64, size64;
s16 id = -1;
- int rc;
regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL);
if (!regaddr_p) {
@@ -491,16 +491,11 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
id = *psc_nump + 1;
}
- rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
+ return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
irq_of_parse_and_map(op->dev.of_node, 0), id);
- if (rc == 0)
- of_register_spi_devices(dev_get_drvdata(&op->dev),
- op->dev.of_node);
-
- return rc;
}
-static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op)
+static int __exit mpc52xx_psc_spi_of_remove(struct platform_device *op)
{
return mpc52xx_psc_spi_do_remove(&op->dev);
}
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c
index b1a76bff775..ec9f0b1bf86 100644
--- a/drivers/spi/mpc52xx_spi.c
+++ b/drivers/spi/mpc52xx_spi.c
@@ -18,7 +18,6 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
-#include <linux/of_spi.h>
#include <linux/io.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
@@ -391,7 +390,7 @@ static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
/*
* OF Platform Bus Binding
*/
-static int __devinit mpc52xx_spi_probe(struct of_device *op,
+static int __devinit mpc52xx_spi_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct spi_master *master;
@@ -439,6 +438,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
master->setup = mpc52xx_spi_setup;
master->transfer = mpc52xx_spi_transfer;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->dev.of_node = op->dev.of_node;
dev_set_drvdata(&op->dev, master);
@@ -512,7 +512,6 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
if (rc)
goto err_register;
- of_register_spi_devices(master, op->dev.of_node);
dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n");
return rc;
@@ -531,7 +530,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
return rc;
}
-static int __devexit mpc52xx_spi_remove(struct of_device *op)
+static int __devexit mpc52xx_spi_remove(struct platform_device *op)
{
struct spi_master *master = dev_get_drvdata(&op->dev);
struct mpc52xx_spi *ms = spi_master_get_devdata(master);
diff --git a/drivers/spi/omap_spi_100k.c b/drivers/spi/omap_spi_100k.c
index 24668b30a52..9bd1c92ad96 100644
--- a/drivers/spi/omap_spi_100k.c
+++ b/drivers/spi/omap_spi_100k.c
@@ -141,7 +141,12 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
{
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
- /* write 16-bit word */
+ /* write 16-bit word, shifting 8-bit data if necessary */
+ if (len <= 8) {
+ data <<= 8;
+ len = 16;
+ }
+
spi100k_enable_clock(master);
writew( data , spi100k->base + SPI_TX_MSB);
@@ -162,6 +167,10 @@ static int spi100k_read_data(struct spi_master *master, int len)
int dataH,dataL;
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ /* Always do at least 16 bits */
+ if (len <= 8)
+ len = 16;
+
spi100k_enable_clock(master);
writew(SPI_CTRL_SEN(0) |
SPI_CTRL_WORD_SIZE(len) |
@@ -214,10 +223,6 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
c = count;
word_len = cs->word_len;
- /* RX_ONLY mode needs dummy data in TX reg */
- if (xfer->tx_buf == NULL)
- spi100k_write_data(spi->master,word_len, 0);
-
if (word_len <= 8) {
u8 *rx;
const u8 *tx;
@@ -227,9 +232,9 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
do {
c-=1;
if (xfer->tx_buf != NULL)
- spi100k_write_data(spi->master,word_len, *tx);
+ spi100k_write_data(spi->master, word_len, *tx++);
if (xfer->rx_buf != NULL)
- *rx = spi100k_read_data(spi->master,word_len);
+ *rx++ = spi100k_read_data(spi->master, word_len);
} while(c);
} else if (word_len <= 16) {
u16 *rx;
@@ -380,10 +385,6 @@ static void omap1_spi100k_work(struct work_struct *work)
if (t->len) {
unsigned count;
- /* RX_ONLY mode needs dummy data in TX reg */
- if (t->tx_buf == NULL)
- spi100k_write_data(spi->master, 8, 0);
-
count = omap1_spi100k_txrx_pio(spi, t);
m->actual_length += count;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b3a1f9259b6..b5a78a1f442 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -23,9 +23,11 @@
#include <linux/init.h>
#include <linux/cache.h>
#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
+#include <linux/of_spi.h>
/* SPI bustype and spi_master class are registered after board init code
@@ -85,6 +87,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
const struct spi_device *spi = to_spi_device(dev);
const struct spi_driver *sdrv = to_spi_driver(drv);
+ /* Attempt an OF style match */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
if (sdrv->id_table)
return !!spi_match_id(sdrv->id_table, spi);
@@ -527,6 +533,10 @@ int spi_register_master(struct spi_master *master)
dynamic = 1;
}
+ spin_lock_init(&master->bus_lock_spinlock);
+ mutex_init(&master->bus_lock_mutex);
+ master->bus_lock_flag = 0;
+
/* register the device, then userspace will see it.
* registration fails if the bus ID is in use.
*/
@@ -540,17 +550,18 @@ int spi_register_master(struct spi_master *master)
/* populate children from any spi device tables */
scan_boardinfo(master);
status = 0;
+
+ /* Register devices from the device tree */
+ of_register_spi_devices(master);
done:
return status;
}
EXPORT_SYMBOL_GPL(spi_register_master);
-static int __unregister(struct device *dev, void *master_dev)
+static int __unregister(struct device *dev, void *null)
{
- /* note: before about 2.6.14-rc1 this would corrupt memory: */
- if (dev != master_dev)
- spi_unregister_device(to_spi_device(dev));
+ spi_unregister_device(to_spi_device(dev));
return 0;
}
@@ -568,8 +579,7 @@ void spi_unregister_master(struct spi_master *master)
{
int dummy;
- dummy = device_for_each_child(master->dev.parent, &master->dev,
- __unregister);
+ dummy = device_for_each_child(&master->dev, NULL, __unregister);
device_unregister(&master->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_master);
@@ -666,6 +676,35 @@ int spi_setup(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_setup);
+static int __spi_async(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_master *master = spi->master;
+
+ /* Half-duplex links include original MicroWire, and ones with
+ * only one data pin like SPI_3WIRE (switches direction) or where
+ * either MOSI or MISO is missing. They can also be caused by
+ * software limitations.
+ */
+ if ((master->flags & SPI_MASTER_HALF_DUPLEX)
+ || (spi->mode & SPI_3WIRE)) {
+ struct spi_transfer *xfer;
+ unsigned flags = master->flags;
+
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ if (xfer->rx_buf && xfer->tx_buf)
+ return -EINVAL;
+ if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
+ return -EINVAL;
+ if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
+ return -EINVAL;
+ }
+ }
+
+ message->spi = spi;
+ message->status = -EINPROGRESS;
+ return master->transfer(spi, message);
+}
+
/**
* spi_async - asynchronous SPI transfer
* @spi: device with which data will be exchanged
@@ -698,33 +737,68 @@ EXPORT_SYMBOL_GPL(spi_setup);
int spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
+ int ret;
+ unsigned long flags;
- /* Half-duplex links include original MicroWire, and ones with
- * only one data pin like SPI_3WIRE (switches direction) or where
- * either MOSI or MISO is missing. They can also be caused by
- * software limitations.
- */
- if ((master->flags & SPI_MASTER_HALF_DUPLEX)
- || (spi->mode & SPI_3WIRE)) {
- struct spi_transfer *xfer;
- unsigned flags = master->flags;
+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
- list_for_each_entry(xfer, &message->transfers, transfer_list) {
- if (xfer->rx_buf && xfer->tx_buf)
- return -EINVAL;
- if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
- return -EINVAL;
- if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
- return -EINVAL;
- }
- }
+ if (master->bus_lock_flag)
+ ret = -EBUSY;
+ else
+ ret = __spi_async(spi, message);
- message->spi = spi;
- message->status = -EINPROGRESS;
- return master->transfer(spi, message);
+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(spi_async);
+/**
+ * spi_async_locked - version of spi_async with exclusive bus usage
+ * @spi: device with which data will be exchanged
+ * @message: describes the data transfers, including completion callback
+ * Context: any (irqs may be blocked, etc)
+ *
+ * This call may be used in_irq and other contexts which can't sleep,
+ * as well as from task contexts which can sleep.
+ *
+ * The completion callback is invoked in a context which can't sleep.
+ * Before that invocation, the value of message->status is undefined.
+ * When the callback is issued, message->status holds either zero (to
+ * indicate complete success) or a negative error code. After that
+ * callback returns, the driver which issued the transfer request may
+ * deallocate the associated memory; it's no longer in use by any SPI
+ * core or controller driver code.
+ *
+ * Note that although all messages to a spi_device are handled in
+ * FIFO order, messages may go to different devices in other orders.
+ * Some device might be higher priority, or have various "hard" access
+ * time requirements, for example.
+ *
+ * On detection of any fault during the transfer, processing of
+ * the entire message is aborted, and the device is deselected.
+ * Until returning from the associated message completion callback,
+ * no other spi_message queued to that device will be processed.
+ * (This rule applies equally to all the synchronous transfer calls,
+ * which are wrappers around this core asynchronous primitive.)
+ */
+int spi_async_locked(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_master *master = spi->master;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+
+ ret = __spi_async(spi, message);
+
+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(spi_async_locked);
+
/*-------------------------------------------------------------------------*/
@@ -738,6 +812,32 @@ static void spi_complete(void *arg)
complete(arg);
}
+static int __spi_sync(struct spi_device *spi, struct spi_message *message,
+ int bus_locked)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ int status;
+ struct spi_master *master = spi->master;
+
+ message->complete = spi_complete;
+ message->context = &done;
+
+ if (!bus_locked)
+ mutex_lock(&master->bus_lock_mutex);
+
+ status = spi_async_locked(spi, message);
+
+ if (!bus_locked)
+ mutex_unlock(&master->bus_lock_mutex);
+
+ if (status == 0) {
+ wait_for_completion(&done);
+ status = message->status;
+ }
+ message->context = NULL;
+ return status;
+}
+
/**
* spi_sync - blocking/synchronous SPI data transfers
* @spi: device with which data will be exchanged
@@ -761,21 +861,86 @@ static void spi_complete(void *arg)
*/
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
- DECLARE_COMPLETION_ONSTACK(done);
- int status;
-
- message->complete = spi_complete;
- message->context = &done;
- status = spi_async(spi, message);
- if (status == 0) {
- wait_for_completion(&done);
- status = message->status;
- }
- message->context = NULL;
- return status;
+ return __spi_sync(spi, message, 0);
}
EXPORT_SYMBOL_GPL(spi_sync);
+/**
+ * spi_sync_locked - version of spi_sync with exclusive bus usage
+ * @spi: device with which data will be exchanged
+ * @message: describes the data transfers
+ * Context: can sleep
+ *
+ * This call may only be used from a context that may sleep. The sleep
+ * is non-interruptible, and has no timeout. Low-overhead controller
+ * drivers may DMA directly into and out of the message buffers.
+ *
+ * This call should be used by drivers that require exclusive access to the
+ * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must
+ * be released by a spi_bus_unlock call when the exclusive access is over.
+ *
+ * It returns zero on success, else a negative error code.
+ */
+int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
+{
+ return __spi_sync(spi, message, 1);
+}
+EXPORT_SYMBOL_GPL(spi_sync_locked);
+
+/**
+ * spi_bus_lock - obtain a lock for exclusive SPI bus usage
+ * @master: SPI bus master that should be locked for exclusive bus access
+ * Context: can sleep
+ *
+ * This call may only be used from a context that may sleep. The sleep
+ * is non-interruptible, and has no timeout.
+ *
+ * This call should be used by drivers that require exclusive access to the
+ * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
+ * exclusive access is over. Data transfer must be done by spi_sync_locked
+ * and spi_async_locked calls when the SPI bus lock is held.
+ *
+ * It returns zero on success, else a negative error code.
+ */
+int spi_bus_lock(struct spi_master *master)
+{
+ unsigned long flags;
+
+ mutex_lock(&master->bus_lock_mutex);
+
+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+ master->bus_lock_flag = 1;
+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+
+ /* mutex remains locked until spi_bus_unlock is called */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bus_lock);
+
+/**
+ * spi_bus_unlock - release the lock for exclusive SPI bus usage
+ * @master: SPI bus master that was locked for exclusive bus access
+ * Context: can sleep
+ *
+ * This call may only be used from a context that may sleep. The sleep
+ * is non-interruptible, and has no timeout.
+ *
+ * This call releases an SPI bus lock previously obtained by an spi_bus_lock
+ * call.
+ *
+ * It returns zero on success, else a negative error code.
+ */
+int spi_bus_unlock(struct spi_master *master)
+{
+ master->bus_lock_flag = 0;
+
+ mutex_unlock(&master->bus_lock_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bus_unlock);
+
/* portable code must never pass more than 32 bytes */
#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 5265330a528..8b55724d5f3 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -259,7 +259,6 @@ static void bitbang_work(struct work_struct *work)
struct spi_bitbang *bitbang =
container_of(work, struct spi_bitbang, work);
unsigned long flags;
- int do_setup = -1;
int (*setup_transfer)(struct spi_device *,
struct spi_transfer *);
@@ -275,6 +274,7 @@ static void bitbang_work(struct work_struct *work)
unsigned tmp;
unsigned cs_change;
int status;
+ int do_setup = -1;
m = container_of(bitbang->queue.next, struct spi_message,
queue);
@@ -307,6 +307,8 @@ static void bitbang_work(struct work_struct *work)
status = setup_transfer(spi, t);
if (status < 0)
break;
+ if (do_setup == -1)
+ do_setup = 0;
}
/* set up default clock polarity, and activate chip;
@@ -367,11 +369,6 @@ static void bitbang_work(struct work_struct *work)
m->status = status;
m->complete(m->context);
- /* restore speed and wordsize if it was overridden */
- if (do_setup == 1)
- setup_transfer(spi, NULL);
- do_setup = 0;
-
/* normally deactivate chipselect ... unless no error and
* cs_change has hinted that the next message will probably
* be for this chip too.
diff --git a/drivers/spi/spi_bitbang_txrx.h b/drivers/spi/spi_bitbang_txrx.h
index fc033bbf918..c16bf853c3e 100644
--- a/drivers/spi/spi_bitbang_txrx.h
+++ b/drivers/spi/spi_bitbang_txrx.h
@@ -44,7 +44,7 @@
static inline u32
bitbang_txrx_be_cpha0(struct spi_device *spi,
- unsigned nsecs, unsigned cpol,
+ unsigned nsecs, unsigned cpol, unsigned flags,
u32 word, u8 bits)
{
/* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
@@ -53,7 +53,8 @@ bitbang_txrx_be_cpha0(struct spi_device *spi,
for (word <<= (32 - bits); likely(bits); bits--) {
/* setup MSB (to slave) on trailing edge */
- setmosi(spi, word & (1 << 31));
+ if ((flags & SPI_MASTER_NO_TX) == 0)
+ setmosi(spi, word & (1 << 31));
spidelay(nsecs); /* T(setup) */
setsck(spi, !cpol);
@@ -61,7 +62,8 @@ bitbang_txrx_be_cpha0(struct spi_device *spi,
/* sample MSB (from slave) on leading edge */
word <<= 1;
- word |= getmiso(spi);
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= getmiso(spi);
setsck(spi, cpol);
}
return word;
@@ -69,7 +71,7 @@ bitbang_txrx_be_cpha0(struct spi_device *spi,
static inline u32
bitbang_txrx_be_cpha1(struct spi_device *spi,
- unsigned nsecs, unsigned cpol,
+ unsigned nsecs, unsigned cpol, unsigned flags,
u32 word, u8 bits)
{
/* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
@@ -79,7 +81,8 @@ bitbang_txrx_be_cpha1(struct spi_device *spi,
/* setup MSB (to slave) on leading edge */
setsck(spi, !cpol);
- setmosi(spi, word & (1 << 31));
+ if ((flags & SPI_MASTER_NO_TX) == 0)
+ setmosi(spi, word & (1 << 31));
spidelay(nsecs); /* T(setup) */
setsck(spi, cpol);
@@ -87,7 +90,8 @@ bitbang_txrx_be_cpha1(struct spi_device *spi,
/* sample MSB (from slave) on trailing edge */
word <<= 1;
- word |= getmiso(spi);
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= getmiso(spi);
}
return word;
}
diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c
index 8b528128111..0d4ceba3b59 100644
--- a/drivers/spi/spi_butterfly.c
+++ b/drivers/spi/spi_butterfly.c
@@ -156,7 +156,7 @@ butterfly_txrx_word_mode0(struct spi_device *spi,
unsigned nsecs,
u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
}
/*----------------------------------------------------------------------*/
diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c
index 7edbd5807e0..63e51b011d5 100644
--- a/drivers/spi/spi_gpio.c
+++ b/drivers/spi/spi_gpio.c
@@ -146,25 +146,63 @@ static inline int getmiso(const struct spi_device *spi)
static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
}
static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
}
static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
}
static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
+}
+
+/*
+ * These functions do not call setmosi or getmiso if respective flag
+ * (SPI_MASTER_NO_RX or SPI_MASTER_NO_TX) is set, so they are safe to
+ * call when such pin is not present or defined in the controller.
+ * A separate set of callbacks is defined to get highest possible
+ * speed in the generic case (when both MISO and MOSI lines are
+ * available), as optimiser will remove the checks when argument is
+ * constant.
+ */
+
+static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ unsigned flags = spi->master->flags;
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
+}
+
+static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ unsigned flags = spi->master->flags;
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
+}
+
+static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ unsigned flags = spi->master->flags;
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
+}
+
+static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ unsigned flags = spi->master->flags;
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
/*----------------------------------------------------------------------*/
@@ -232,19 +270,30 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
}
static int __init
-spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label)
+spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
+ u16 *res_flags)
{
int value;
/* NOTE: SPI_*_GPIO symbols may reference "pdata" */
- value = spi_gpio_alloc(SPI_MOSI_GPIO, label, false);
- if (value)
- goto done;
+ if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) {
+ value = spi_gpio_alloc(SPI_MOSI_GPIO, label, false);
+ if (value)
+ goto done;
+ } else {
+ /* HW configuration without MOSI pin */
+ *res_flags |= SPI_MASTER_NO_TX;
+ }
- value = spi_gpio_alloc(SPI_MISO_GPIO, label, true);
- if (value)
- goto free_mosi;
+ if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) {
+ value = spi_gpio_alloc(SPI_MISO_GPIO, label, true);
+ if (value)
+ goto free_mosi;
+ } else {
+ /* HW configuration without MISO pin */
+ *res_flags |= SPI_MASTER_NO_RX;
+ }
value = spi_gpio_alloc(SPI_SCK_GPIO, label, false);
if (value)
@@ -253,9 +302,11 @@ spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label)
goto done;
free_miso:
- gpio_free(SPI_MISO_GPIO);
+ if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
+ gpio_free(SPI_MISO_GPIO);
free_mosi:
- gpio_free(SPI_MOSI_GPIO);
+ if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI)
+ gpio_free(SPI_MOSI_GPIO);
done:
return value;
}
@@ -266,6 +317,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev)
struct spi_master *master;
struct spi_gpio *spi_gpio;
struct spi_gpio_platform_data *pdata;
+ u16 master_flags = 0;
pdata = pdev->dev.platform_data;
#ifdef GENERIC_BITBANG
@@ -273,7 +325,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev)
return -ENODEV;
#endif
- status = spi_gpio_request(pdata, dev_name(&pdev->dev));
+ status = spi_gpio_request(pdata, dev_name(&pdev->dev), &master_flags);
if (status < 0)
return status;
@@ -289,6 +341,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev)
if (pdata)
spi_gpio->pdata = *pdata;
+ master->flags = master_flags;
master->bus_num = pdev->id;
master->num_chipselect = SPI_N_CHIPSEL;
master->setup = spi_gpio_setup;
@@ -296,10 +349,18 @@ static int __init spi_gpio_probe(struct platform_device *pdev)
spi_gpio->bitbang.master = spi_master_get(master);
spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
- spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
- spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
- spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
- spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3;
+
+ if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
+ spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
+ spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
+ spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
+ spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3;
+ } else {
+ spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0;
+ spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1;
+ spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2;
+ spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
+ }
spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
spi_gpio->bitbang.flags = SPI_CS_HIGH;
@@ -307,8 +368,10 @@ static int __init spi_gpio_probe(struct platform_device *pdev)
if (status < 0) {
spi_master_put(spi_gpio->bitbang.master);
gpio_free:
- gpio_free(SPI_MISO_GPIO);
- gpio_free(SPI_MOSI_GPIO);
+ if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
+ gpio_free(SPI_MISO_GPIO);
+ if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI)
+ gpio_free(SPI_MOSI_GPIO);
gpio_free(SPI_SCK_GPIO);
spi_master_put(master);
}
@@ -331,8 +394,10 @@ static int __exit spi_gpio_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- gpio_free(SPI_MISO_GPIO);
- gpio_free(SPI_MOSI_GPIO);
+ if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
+ gpio_free(SPI_MISO_GPIO);
+ if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI)
+ gpio_free(SPI_MOSI_GPIO);
gpio_free(SPI_SCK_GPIO);
return status;
diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c
index 86fb7b5993d..7746a41ab6d 100644
--- a/drivers/spi/spi_lm70llp.c
+++ b/drivers/spi/spi_lm70llp.c
@@ -191,7 +191,7 @@ static void lm70_chipselect(struct spi_device *spi, int value)
*/
static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
}
static void spi_lm70llp_attach(struct parport *p)
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index 97ab0a81338..1dd86b835cd 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -38,7 +38,6 @@
#include <linux/of_platform.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <linux/of_spi.h>
#include <linux/slab.h>
#include <sysdev/fsl_soc.h>
@@ -409,11 +408,17 @@ static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
- out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
+ if (mspi->rx_dma == mspi->dma_dummy_rx)
+ out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
+ else
+ out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
out_be16(&rx_bd->cbd_datlen, 0);
out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
- out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
+ if (mspi->tx_dma == mspi->dma_dummy_tx)
+ out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
+ else
+ out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
out_be16(&tx_bd->cbd_datlen, xfer_len);
out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
BD_SC_LAST);
@@ -1009,6 +1014,7 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
master->setup = mpc8xxx_spi_setup;
master->transfer = mpc8xxx_spi_transfer;
master->cleanup = mpc8xxx_spi_cleanup;
+ master->dev.of_node = dev->of_node;
mpc8xxx_spi = spi_master_get_devdata(master);
mpc8xxx_spi->dev = dev;
@@ -1236,7 +1242,7 @@ static int of_mpc8xxx_spi_free_chipselects(struct device *dev)
return 0;
}
-static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev,
+static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev,
const struct of_device_id *ofid)
{
struct device *dev = &ofdev->dev;
@@ -1299,8 +1305,6 @@ static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev,
goto err;
}
- of_register_spi_devices(master, np);
-
return 0;
err:
@@ -1310,7 +1314,7 @@ err_clk:
return ret;
}
-static int __devexit of_mpc8xxx_spi_remove(struct of_device *ofdev)
+static int __devexit of_mpc8xxx_spi_remove(struct platform_device *ofdev)
{
int ret;
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c
index d53466a249d..80e172d3e72 100644
--- a/drivers/spi/spi_ppc4xx.c
+++ b/drivers/spi/spi_ppc4xx.c
@@ -388,9 +388,9 @@ static void free_gpios(struct ppc4xx_spi *hw)
}
/*
- * of_device layer stuff...
+ * platform_device layer stuff...
*/
-static int __init spi_ppc4xx_of_probe(struct of_device *op,
+static int __init spi_ppc4xx_of_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct ppc4xx_spi *hw;
@@ -407,6 +407,7 @@ static int __init spi_ppc4xx_of_probe(struct of_device *op,
master = spi_alloc_master(dev, sizeof *hw);
if (master == NULL)
return -ENOMEM;
+ master->dev.of_node = np;
dev_set_drvdata(dev, master);
hw = spi_master_get_devdata(master);
hw->master = spi_master_get(master);
@@ -545,7 +546,6 @@ static int __init spi_ppc4xx_of_probe(struct of_device *op,
}
dev_info(dev, "driver initialized\n");
- of_register_spi_devices(master, np);
return 0;
@@ -565,7 +565,7 @@ free_master:
return ret;
}
-static int __exit spi_ppc4xx_of_remove(struct of_device *op)
+static int __exit spi_ppc4xx_of_remove(struct platform_device *op)
{
struct spi_master *master = dev_get_drvdata(&op->dev);
struct ppc4xx_spi *hw = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c
index 8979a75dbd7..be991359bf9 100644
--- a/drivers/spi/spi_s3c24xx_gpio.c
+++ b/drivers/spi/spi_s3c24xx_gpio.c
@@ -64,25 +64,25 @@ static inline u32 getmiso(struct spi_device *dev)
static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
}
static u32 s3c2410_spigpio_txrx_mode1(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
}
static u32 s3c2410_spigpio_txrx_mode2(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
}
static u32 s3c2410_spigpio_txrx_mode3(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
}
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
index 97365815a72..c3038da2648 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi_s3c64xx.c
@@ -200,6 +200,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
val = readl(regs + S3C64XX_SPI_STATUS);
} while (TX_FIFO_LVL(val, sci) && loops--);
+ if (loops == 0)
+ dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
+
/* Flush RxFIFO*/
loops = msecs_to_loops(1);
do {
@@ -210,6 +213,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
break;
} while (loops--);
+ if (loops == 0)
+ dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
+
val = readl(regs + S3C64XX_SPI_CH_CFG);
val &= ~S3C64XX_SPI_CH_SW_RST;
writel(val, regs + S3C64XX_SPI_CH_CFG);
@@ -320,16 +326,17 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer->len * 8 * 1000 / sdd->cur_speed;
- ms += 5; /* some tolerance */
+ ms += 10; /* some tolerance */
if (dma_mode) {
val = msecs_to_jiffies(ms) + 10;
val = wait_for_completion_timeout(&sdd->xfer_completion, val);
} else {
+ u32 status;
val = msecs_to_loops(ms);
do {
- val = readl(regs + S3C64XX_SPI_STATUS);
- } while (RX_FIFO_LVL(val, sci) < xfer->len && --val);
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
}
if (!val)
@@ -447,8 +454,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
writel(val, regs + S3C64XX_SPI_CLK_CFG);
}
-void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
- int size, enum s3c2410_dma_buffresult res)
+static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
+ int size, enum s3c2410_dma_buffresult res)
{
struct s3c64xx_spi_driver_data *sdd = buf_id;
unsigned long flags;
@@ -467,8 +474,8 @@ void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
spin_unlock_irqrestore(&sdd->lock, flags);
}
-void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
- int size, enum s3c2410_dma_buffresult res)
+static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
+ int size, enum s3c2410_dma_buffresult res)
{
struct s3c64xx_spi_driver_data *sdd = buf_id;
unsigned long flags;
@@ -508,8 +515,9 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->tx_buf != NULL) {
- xfer->tx_dma = dma_map_single(dev, xfer->tx_buf,
- xfer->len, DMA_TO_DEVICE);
+ xfer->tx_dma = dma_map_single(dev,
+ (void *)xfer->tx_buf, xfer->len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(dev, xfer->tx_dma)) {
dev_err(dev, "dma_map_single Tx failed\n");
xfer->tx_dma = XFER_DMAADDR_INVALID;
@@ -919,6 +927,13 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
return -ENODEV;
}
+ sci = pdev->dev.platform_data;
+ if (!sci->src_clk_name) {
+ dev_err(&pdev->dev,
+ "Board init must call s3c64xx_spi_set_info()\n");
+ return -EINVAL;
+ }
+
/* Check for availability of necessary resource */
dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -946,8 +961,6 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
return -ENOMEM;
}
- sci = pdev->dev.platform_data;
-
platform_set_drvdata(pdev, master);
sdd = spi_master_get_devdata(master);
@@ -1170,7 +1183,7 @@ static int __init s3c64xx_spi_init(void)
{
return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
}
-module_init(s3c64xx_spi_init);
+subsys_initcall(s3c64xx_spi_init);
static void __exit s3c64xx_spi_exit(void)
{
diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c
index a511be7961a..5c643916119 100644
--- a/drivers/spi/spi_sh_sci.c
+++ b/drivers/spi/spi_sh_sci.c
@@ -83,25 +83,25 @@ static inline u32 getmiso(struct spi_device *dev)
static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
}
static u32 sh_sci_spi_txrx_mode1(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
}
static u32 sh_sci_spi_txrx_mode2(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
}
static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
}
static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 1b47363cb73..80f2db5bcfd 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -390,6 +390,9 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
master->bus_num = bus_num;
master->num_chipselect = pdata->num_chipselect;
+#ifdef CONFIG_OF
+ master->dev.of_node = dev->of_node;
+#endif
xspi->mem = *mem;
xspi->irq = irq;
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c
index 4654805b08d..b66c2dbf20a 100644
--- a/drivers/spi/xilinx_spi_of.c
+++ b/drivers/spi/xilinx_spi_of.c
@@ -29,6 +29,7 @@
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/of_spi.h>
@@ -37,7 +38,7 @@
#include "xilinx_spi.h"
-static int __devinit xilinx_spi_of_probe(struct of_device *ofdev,
+static int __devinit xilinx_spi_of_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct spi_master *master;
@@ -80,13 +81,10 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev,
dev_set_drvdata(&ofdev->dev, master);
- /* Add any subnodes on the SPI bus */
- of_register_spi_devices(master, ofdev->dev.of_node);
-
return 0;
}
-static int __devexit xilinx_spi_remove(struct of_device *ofdev)
+static int __devexit xilinx_spi_remove(struct platform_device *ofdev)
{
xilinx_spi_deinit(dev_get_drvdata(&ofdev->dev));
dev_set_drvdata(&ofdev->dev, 0);
@@ -95,7 +93,7 @@ static int __devexit xilinx_spi_remove(struct of_device *ofdev)
return 0;
}
-static int __exit xilinx_spi_of_remove(struct of_device *op)
+static int __exit xilinx_spi_of_remove(struct platform_device *op)
{
return xilinx_spi_remove(op);
}
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 7cee7f4eb60..7892ac16352 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -20,7 +20,6 @@
#include <linux/mmc/sdio_func.h>
#include <linux/slab.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index e72f4046a5e..526682d68de 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -13,7 +13,6 @@
#include <linux/io.h>
#include <linux/etherdevice.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -72,14 +71,9 @@
/* Write to a PCMCIA configuration register. */
static int ssb_pcmcia_cfg_write(struct ssb_bus *bus, u8 offset, u8 value)
{
- conf_reg_t reg;
int res;
- memset(&reg, 0, sizeof(reg));
- reg.Offset = offset;
- reg.Action = CS_WRITE;
- reg.Value = value;
- res = pcmcia_access_configuration_register(bus->host_pcmcia, &reg);
+ res = pcmcia_write_config_byte(bus->host_pcmcia, offset, value);
if (unlikely(res != 0))
return -EBUSY;
@@ -89,16 +83,11 @@ static int ssb_pcmcia_cfg_write(struct ssb_bus *bus, u8 offset, u8 value)
/* Read from a PCMCIA configuration register. */
static int ssb_pcmcia_cfg_read(struct ssb_bus *bus, u8 offset, u8 *value)
{
- conf_reg_t reg;
int res;
- memset(&reg, 0, sizeof(reg));
- reg.Offset = offset;
- reg.Action = CS_READ;
- res = pcmcia_access_configuration_register(bus->host_pcmcia, &reg);
+ res = pcmcia_read_config_byte(bus->host_pcmcia, offset, value);
if (unlikely(res != 0))
return -EBUSY;
- *value = reg.Value;
return 0;
}
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 0d6c0280eb3..9738cad4ba1 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -17,7 +17,6 @@
#include <linux/pci.h>
#include <linux/io.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 0e4122ed1b3..335311a98fd 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -97,6 +97,8 @@ source "drivers/staging/octeon/Kconfig"
source "drivers/staging/serqt_usb2/Kconfig"
+source "drivers/staging/spectra/Kconfig"
+
source "drivers/staging/quatech_usb2/Kconfig"
source "drivers/staging/vt6655/Kconfig"
@@ -111,11 +113,9 @@ source "drivers/staging/vme/Kconfig"
source "drivers/staging/memrar/Kconfig"
-source "drivers/staging/sep/Kconfig"
-
source "drivers/staging/iio/Kconfig"
-source "drivers/staging/ramzswap/Kconfig"
+source "drivers/staging/zram/Kconfig"
source "drivers/staging/wlags49_h2/Kconfig"
@@ -127,8 +127,6 @@ source "drivers/staging/samsung-laptop/Kconfig"
source "drivers/staging/sm7xx/Kconfig"
-source "drivers/staging/dt3155/Kconfig"
-
source "drivers/staging/dt3155v4l/Kconfig"
source "drivers/staging/crystalhd/Kconfig"
@@ -147,5 +145,13 @@ source "drivers/staging/msm/Kconfig"
source "drivers/staging/lirc/Kconfig"
+source "drivers/staging/easycap/Kconfig"
+
+source "drivers/staging/solo6x10/Kconfig"
+
+source "drivers/staging/tidspbridge/Kconfig"
+
+source "drivers/staging/quickstart/Kconfig"
+
endif # !STAGING_EXCLUDE_BUILD
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index ecfb0bb990b..e3f1e1b6095 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_R8187SE) += rtl8187se/
obj-$(CONFIG_RTL8192SU) += rtl8192su/
obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
+obj-$(CONFIG_SPECTRA) += spectra/
obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_DREAM) += dream/
obj-$(CONFIG_POHMELFS) += pohmelfs/
@@ -37,15 +38,13 @@ obj-$(CONFIG_FB_UDL) += udlfb/
obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
-obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
-obj-$(CONFIG_RAMZSWAP) += ramzswap/
+obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
obj-$(CONFIG_BATMAN_ADV) += batman-adv/
obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop/
obj-$(CONFIG_FB_SM7XX) += sm7xx/
-obj-$(CONFIG_DT3155) += dt3155/
obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_CRYSTALHD) += crystalhd/
obj-$(CONFIG_CXT1E1) += cxt1e1/
@@ -54,3 +53,7 @@ obj-$(CONFIG_ADIS16255) += adis16255/
obj-$(CONFIG_FB_XGI) += xgifb/
obj-$(CONFIG_TOUCHSCREEN_MRSTOUCH) += mrst-touchscreen/
obj-$(CONFIG_MSM_STAGING) += msm/
+obj-$(CONFIG_EASYCAP) += easycap/
+obj-$(CONFIG_SOLO6X10) += solo6x10/
+obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge/
+obj-$(CONFIG_ACPI_QUICKSTART) += quickstart/
diff --git a/drivers/staging/adis16255/adis16255.c b/drivers/staging/adis16255/adis16255.c
index 55d66e290f7..c3e6a4d5f33 100644
--- a/drivers/staging/adis16255/adis16255.c
+++ b/drivers/staging/adis16255/adis16255.c
@@ -303,7 +303,7 @@ static int spi_adis16255_bringup(struct spi_adis16255_data *spiadis)
if (status != 0)
goto err;
if (value != 0x0800) {
- dev_warn(&spiadis->spi->dev, "Scale factor is none default"
+ dev_warn(&spiadis->spi->dev, "Scale factor is none default "
"value (%.4x)\n", value);
}
@@ -338,7 +338,7 @@ static int spi_adis16255_bringup(struct spi_adis16255_data *spiadis)
status = -ENODEV;
goto err;
} else if (value & 0x3) {
- dev_warn(&spiadis->spi->dev, "Sensor voltage"
+ dev_warn(&spiadis->spi->dev, "Sensor voltage "
"out of range.\n");
status = -ENODEV;
goto err;
diff --git a/drivers/staging/batman-adv/CHANGELOG b/drivers/staging/batman-adv/CHANGELOG
index c8f9d9e06bb..86450b4f7d7 100644
--- a/drivers/staging/batman-adv/CHANGELOG
+++ b/drivers/staging/batman-adv/CHANGELOG
@@ -1,3 +1,15 @@
+batman-adv 2010.0.0:
+
+* support latest kernels (2.6.21 - 2.6.35)
+* further code refactoring and cleaning for coding style
+* move from procfs based configuration to sysfs
+* reorganized sequence number handling
+* limit queue lengths for batman and broadcast packets
+* many bugs (endless loop and rogue packets on shutdown, wrong tcpdump output,
+ missing frees in error situations, sleeps in atomic contexts) squashed
+
+ -- Fri, 18 Jun 2010 21:34:26 +0200
+
batman-adv 0.2.1:
* support latest kernels (2.6.20 - 2.6.33)
diff --git a/drivers/staging/batman-adv/Kconfig b/drivers/staging/batman-adv/Kconfig
index 1e7e0a8dbc8..8553f351745 100644
--- a/drivers/staging/batman-adv/Kconfig
+++ b/drivers/staging/batman-adv/Kconfig
@@ -4,7 +4,7 @@
config BATMAN_ADV
tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
- depends on PROC_FS && NET
+ depends on NET
default n
---help---
diff --git a/drivers/staging/batman-adv/Makefile b/drivers/staging/batman-adv/Makefile
index f25068c0fae..e9817b5a614 100644
--- a/drivers/staging/batman-adv/Makefile
+++ b/drivers/staging/batman-adv/Makefile
@@ -18,5 +18,5 @@
# 02110-1301, USA
#
-obj-m += batman-adv.o
-batman-adv-objs := main.o send.o routing.o soft-interface.o device.o translation-table.o bitarray.o hash.o ring_buffer.o vis.o hard-interface.o aggregation.o originator.o bat_sysfs.o
+obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
+batman-adv-objs := main.o bat_debugfs.o bat_sysfs.o send.o routing.o soft-interface.o icmp_socket.o translation-table.o bitarray.o hash.o ring_buffer.o vis.o hard-interface.o aggregation.o originator.o
diff --git a/drivers/staging/batman-adv/README b/drivers/staging/batman-adv/README
index 14244a2c4e4..7192b7fa218 100644
--- a/drivers/staging/batman-adv/README
+++ b/drivers/staging/batman-adv/README
@@ -1,4 +1,4 @@
-[state: 03-05-2010]
+[state: 12-06-2010]
BATMAN-ADV
----------
diff --git a/drivers/staging/batman-adv/TODO b/drivers/staging/batman-adv/TODO
index 518db7fd41b..9c5aea20be1 100644
--- a/drivers/staging/batman-adv/TODO
+++ b/drivers/staging/batman-adv/TODO
@@ -1,6 +1,9 @@
-Request a review.
-Process the comments from the review.
-Move into mainline proper.
+ * Use hweight* for hamming weight calculation
+ * Save/cache packets direktly as skb instead of using a normal memory region
+ and copying it in a skb using send_raw_packet and similar functions
+ * Request a new review
+ * Process the comments from the review
+ * Move into mainline proper
Please send all patches to:
Marek Lindner <lindner_marek@yahoo.de>
diff --git a/drivers/staging/batman-adv/aggregation.c b/drivers/staging/batman-adv/aggregation.c
index ce8b8a6e5ae..9862d16bbdc 100644
--- a/drivers/staging/batman-adv/aggregation.c
+++ b/drivers/staging/batman-adv/aggregation.c
@@ -106,11 +106,14 @@ static void new_aggregated_packet(unsigned char *packet_buff,
{
struct forw_packet *forw_packet_aggr;
unsigned long flags;
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
/* own packet should always be scheduled */
if (!own_packet) {
if (!atomic_dec_not_zero(&batman_queue_left)) {
- bat_dbg(DBG_BATMAN, "batman packet queue full\n");
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "batman packet queue full\n");
return;
}
}
@@ -252,9 +255,9 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
while (aggregated_packet(buff_pos, packet_len,
batman_packet->num_hna)) {
- /* network to host order for our 16bit seqno, and the
+ /* network to host order for our 32bit seqno, and the
orig_interval. */
- batman_packet->seqno = ntohs(batman_packet->seqno);
+ batman_packet->seqno = ntohl(batman_packet->seqno);
hna_buff = packet_buff + buff_pos + BAT_PACKET_LEN;
receive_bat_packet(ethhdr, batman_packet,
diff --git a/drivers/staging/batman-adv/aggregation.h b/drivers/staging/batman-adv/aggregation.h
index 84401ca24c3..71a91b3da91 100644
--- a/drivers/staging/batman-adv/aggregation.h
+++ b/drivers/staging/batman-adv/aggregation.h
@@ -19,6 +19,9 @@
*
*/
+#ifndef _NET_BATMAN_ADV_AGGREGATION_H_
+#define _NET_BATMAN_ADV_AGGREGATION_H_
+
#include "main.h"
/* is there another aggregated packet here? */
@@ -36,3 +39,5 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
unsigned long send_time);
void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
int packet_len, struct batman_if *if_incoming);
+
+#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
diff --git a/drivers/staging/batman-adv/bat_debugfs.c b/drivers/staging/batman-adv/bat_debugfs.c
new file mode 100644
index 00000000000..507da68054c
--- /dev/null
+++ b/drivers/staging/batman-adv/bat_debugfs.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#include "main.h"
+
+#include <linux/debugfs.h>
+
+#include "bat_debugfs.h"
+#include "translation-table.h"
+#include "originator.h"
+#include "hard-interface.h"
+#include "vis.h"
+#include "icmp_socket.h"
+
+static struct dentry *bat_debugfs;
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+#define LOG_BUFF_MASK (log_buff_len-1)
+#define LOG_BUFF(idx) (debug_log->log_buff[(idx) & LOG_BUFF_MASK])
+
+static int log_buff_len = LOG_BUF_LEN;
+
+static void emit_log_char(struct debug_log *debug_log, char c)
+{
+ LOG_BUFF(debug_log->log_end) = c;
+ debug_log->log_end++;
+
+ if (debug_log->log_end - debug_log->log_start > log_buff_len)
+ debug_log->log_start = debug_log->log_end - log_buff_len;
+}
+
+static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
+{
+ int printed_len;
+ va_list args;
+ static char debug_log_buf[256];
+ char *p;
+ unsigned long flags;
+
+ if (!debug_log)
+ return 0;
+
+ spin_lock_irqsave(&debug_log->lock, flags);
+ va_start(args, fmt);
+ printed_len = vscnprintf(debug_log_buf, sizeof(debug_log_buf),
+ fmt, args);
+ va_end(args);
+
+ for (p = debug_log_buf; *p != 0; p++)
+ emit_log_char(debug_log, *p);
+
+ spin_unlock_irqrestore(&debug_log->lock, flags);
+
+ wake_up(&debug_log->queue_wait);
+
+ return 0;
+}
+
+int debug_log(struct bat_priv *bat_priv, char *fmt, ...)
+{
+ va_list args;
+ char tmp_log_buf[256];
+
+ va_start(args, fmt);
+ vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
+ fdebug_log(bat_priv->debug_log, "[%10u] %s",
+ (jiffies / HZ), tmp_log_buf);
+ va_end(args);
+
+ return 0;
+}
+
+static int log_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ inc_module_count();
+ return 0;
+}
+
+static int log_release(struct inode *inode, struct file *file)
+{
+ dec_module_count();
+ return 0;
+}
+
+static ssize_t log_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bat_priv *bat_priv = file->private_data;
+ struct debug_log *debug_log = bat_priv->debug_log;
+ int error, i = 0;
+ char c;
+ unsigned long flags;
+
+ if ((file->f_flags & O_NONBLOCK) &&
+ !(debug_log->log_end - debug_log->log_start))
+ return -EAGAIN;
+
+ if ((!buf) || (count < 0))
+ return -EINVAL;
+
+ if (count == 0)
+ return 0;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ error = wait_event_interruptible(debug_log->queue_wait,
+ (debug_log->log_start - debug_log->log_end));
+
+ if (error)
+ return error;
+
+ spin_lock_irqsave(&debug_log->lock, flags);
+
+ while ((!error) && (i < count) &&
+ (debug_log->log_start != debug_log->log_end)) {
+ c = LOG_BUFF(debug_log->log_start);
+
+ debug_log->log_start++;
+
+ spin_unlock_irqrestore(&debug_log->lock, flags);
+
+ error = __put_user(c, buf);
+
+ spin_lock_irqsave(&debug_log->lock, flags);
+
+ buf++;
+ i++;
+
+ }
+
+ spin_unlock_irqrestore(&debug_log->lock, flags);
+
+ if (!error)
+ return i;
+
+ return error;
+}
+
+static unsigned int log_poll(struct file *file, poll_table *wait)
+{
+ struct bat_priv *bat_priv = file->private_data;
+ struct debug_log *debug_log = bat_priv->debug_log;
+
+ poll_wait(file, &debug_log->queue_wait, wait);
+
+ if (debug_log->log_end - debug_log->log_start)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static const struct file_operations log_fops = {
+ .open = log_open,
+ .release = log_release,
+ .read = log_read,
+ .poll = log_poll,
+};
+
+static int debug_log_setup(struct bat_priv *bat_priv)
+{
+ struct dentry *d;
+
+ if (!bat_priv->debug_dir)
+ goto err;
+
+ bat_priv->debug_log = kzalloc(sizeof(struct debug_log), GFP_ATOMIC);
+ if (!bat_priv->debug_log)
+ goto err;
+
+ spin_lock_init(&bat_priv->debug_log->lock);
+ init_waitqueue_head(&bat_priv->debug_log->queue_wait);
+
+ d = debugfs_create_file("log", S_IFREG | S_IRUSR,
+ bat_priv->debug_dir, bat_priv, &log_fops);
+ if (d)
+ goto err;
+
+ return 0;
+
+err:
+ return 1;
+}
+
+static void debug_log_cleanup(struct bat_priv *bat_priv)
+{
+ kfree(bat_priv->debug_log);
+ bat_priv->debug_log = NULL;
+}
+#else /* CONFIG_BATMAN_ADV_DEBUG */
+static int debug_log_setup(struct bat_priv *bat_priv)
+{
+ bat_priv->debug_log = NULL;
+ return 0;
+}
+
+static void debug_log_cleanup(struct bat_priv *bat_priv)
+{
+ return;
+}
+#endif
+
+static int originators_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, orig_seq_print_text, net_dev);
+}
+
+static int transtable_global_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, hna_global_seq_print_text, net_dev);
+}
+
+static int transtable_local_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, hna_local_seq_print_text, net_dev);
+}
+
+static int vis_data_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, vis_seq_print_text, net_dev);
+}
+
+struct bat_debuginfo {
+ struct attribute attr;
+ const struct file_operations fops;
+};
+
+#define BAT_DEBUGINFO(_name, _mode, _open) \
+struct bat_debuginfo bat_debuginfo_##_name = { \
+ .attr = { .name = __stringify(_name), \
+ .mode = _mode, }, \
+ .fops = { .owner = THIS_MODULE, \
+ .open = _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ } \
+};
+
+static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
+static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
+static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
+static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
+
+static struct bat_debuginfo *mesh_debuginfos[] = {
+ &bat_debuginfo_originators,
+ &bat_debuginfo_transtable_global,
+ &bat_debuginfo_transtable_local,
+ &bat_debuginfo_vis_data,
+ NULL,
+};
+
+void debugfs_init(void)
+{
+ bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL);
+ if (bat_debugfs == ERR_PTR(-ENODEV))
+ bat_debugfs = NULL;
+}
+
+void debugfs_destroy(void)
+{
+ if (bat_debugfs) {
+ debugfs_remove_recursive(bat_debugfs);
+ bat_debugfs = NULL;
+ }
+}
+
+int debugfs_add_meshif(struct net_device *dev)
+{
+ struct bat_priv *bat_priv = netdev_priv(dev);
+ struct bat_debuginfo **bat_debug;
+ struct dentry *file;
+
+ if (!bat_debugfs)
+ goto out;
+
+ bat_priv->debug_dir = debugfs_create_dir(dev->name, bat_debugfs);
+ if (!bat_priv->debug_dir)
+ goto out;
+
+ bat_socket_setup(bat_priv);
+ debug_log_setup(bat_priv);
+
+ for (bat_debug = mesh_debuginfos; *bat_debug; ++bat_debug) {
+ file = debugfs_create_file(((*bat_debug)->attr).name,
+ S_IFREG | ((*bat_debug)->attr).mode,
+ bat_priv->debug_dir,
+ dev, &(*bat_debug)->fops);
+ if (!file) {
+ bat_err(dev, "Can't add debugfs file: %s/%s\n",
+ dev->name, ((*bat_debug)->attr).name);
+ goto rem_attr;
+ }
+ }
+
+ return 0;
+rem_attr:
+ debugfs_remove_recursive(bat_priv->debug_dir);
+ bat_priv->debug_dir = NULL;
+out:
+#ifdef CONFIG_DEBUG_FS
+ return -ENOMEM;
+#else
+ return 0;
+#endif /* CONFIG_DEBUG_FS */
+}
+
+void debugfs_del_meshif(struct net_device *dev)
+{
+ struct bat_priv *bat_priv = netdev_priv(dev);
+
+ debug_log_cleanup(bat_priv);
+
+ if (bat_debugfs) {
+ debugfs_remove_recursive(bat_priv->debug_dir);
+ bat_priv->debug_dir = NULL;
+ }
+}
diff --git a/drivers/staging/batman-adv/bat_debugfs.h b/drivers/staging/batman-adv/bat_debugfs.h
new file mode 100644
index 00000000000..72df532b7d5
--- /dev/null
+++ b/drivers/staging/batman-adv/bat_debugfs.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+
+#ifndef _NET_BATMAN_ADV_DEBUGFS_H_
+#define _NET_BATMAN_ADV_DEBUGFS_H_
+
+#define DEBUGFS_BAT_SUBDIR "batman_adv"
+
+void debugfs_init(void);
+void debugfs_destroy(void);
+int debugfs_add_meshif(struct net_device *dev);
+void debugfs_del_meshif(struct net_device *dev);
+
+#endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */
diff --git a/drivers/staging/batman-adv/bat_sysfs.c b/drivers/staging/batman-adv/bat_sysfs.c
index 212bc21e6d6..05ca15a6c9f 100644
--- a/drivers/staging/batman-adv/bat_sysfs.c
+++ b/drivers/staging/batman-adv/bat_sysfs.c
@@ -28,22 +28,6 @@
#define to_dev(obj) container_of(obj, struct device, kobj)
-struct bat_attribute {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
- char *buf);
- ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
- char *buf, size_t count);
-};
-
-struct hardif_attribute {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
- char *buf);
- ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
- char *buf, size_t count);
-};
-
#define BAT_ATTR(_name, _mode, _show, _store) \
struct bat_attribute bat_attr_##_name = { \
.attr = {.name = __stringify(_name), \
@@ -52,34 +36,18 @@ struct bat_attribute bat_attr_##_name = { \
.store = _store, \
};
-#define BAT_BIN_ATTR(_name, _mode, _read, _write) \
-struct bin_attribute bat_attr_##_name = { \
- .attr = { .name = __stringify(_name), \
- .mode = _mode, }, \
- .read = _read, \
- .write = _write, \
-};
-
-#define HARDIF_ATTR(_name, _mode, _show, _store) \
-struct hardif_attribute hardif_attr_##_name = { \
- .attr = {.name = __stringify(_name), \
- .mode = _mode }, \
- .show = _show, \
- .store = _store, \
-};
-
-static ssize_t show_aggr_ogm(struct kobject *kobj, struct attribute *attr,
+static ssize_t show_aggr_ogms(struct kobject *kobj, struct attribute *attr,
char *buff)
{
struct device *dev = to_dev(kobj->parent);
struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
int aggr_status = atomic_read(&bat_priv->aggregation_enabled);
- return sprintf(buff, "status: %s\ncommands: enable, disable, 0, 1\n",
+ return sprintf(buff, "%s\n",
aggr_status == 0 ? "disabled" : "enabled");
}
-static ssize_t store_aggr_ogm(struct kobject *kobj, struct attribute *attr,
+static ssize_t store_aggr_ogms(struct kobject *kobj, struct attribute *attr,
char *buff, size_t count)
{
struct device *dev = to_dev(kobj->parent);
@@ -99,23 +67,73 @@ static ssize_t store_aggr_ogm(struct kobject *kobj, struct attribute *attr,
if (buff[count - 1] == '\n')
buff[count - 1] = '\0';
- printk(KERN_INFO "batman-adv:Invalid parameter for 'aggregate OGM' setting on mesh %s received: %s\n",
- net_dev->name, buff);
+ bat_info(net_dev,
+ "Invalid parameter for 'aggregate OGM' setting"
+ "received: %s\n", buff);
return -EINVAL;
}
if (atomic_read(&bat_priv->aggregation_enabled) == aggr_tmp)
return count;
- printk(KERN_INFO "batman-adv:Changing aggregation from: %s to: %s on mesh: %s\n",
- atomic_read(&bat_priv->aggregation_enabled) == 1 ?
- "enabled" : "disabled", aggr_tmp == 1 ? "enabled" : "disabled",
- net_dev->name);
+ bat_info(net_dev, "Changing aggregation from: %s to: %s\n",
+ atomic_read(&bat_priv->aggregation_enabled) == 1 ?
+ "enabled" : "disabled", aggr_tmp == 1 ? "enabled" :
+ "disabled");
atomic_set(&bat_priv->aggregation_enabled, (unsigned)aggr_tmp);
return count;
}
+static ssize_t show_bond(struct kobject *kobj, struct attribute *attr,
+ char *buff)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
+ int bond_status = atomic_read(&bat_priv->bonding_enabled);
+
+ return sprintf(buff, "%s\n",
+ bond_status == 0 ? "disabled" : "enabled");
+}
+
+static ssize_t store_bond(struct kobject *kobj, struct attribute *attr,
+ char *buff, size_t count)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct net_device *net_dev = to_net_dev(dev);
+ struct bat_priv *bat_priv = netdev_priv(net_dev);
+ int bonding_enabled_tmp = -1;
+
+ if (((count == 2) && (buff[0] == '1')) ||
+ (strncmp(buff, "enable", 6) == 0))
+ bonding_enabled_tmp = 1;
+
+ if (((count == 2) && (buff[0] == '0')) ||
+ (strncmp(buff, "disable", 7) == 0))
+ bonding_enabled_tmp = 0;
+
+ if (bonding_enabled_tmp < 0) {
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ bat_err(net_dev,
+ "Invalid parameter for 'bonding' setting received: "
+ "%s\n", buff);
+ return -EINVAL;
+ }
+
+ if (atomic_read(&bat_priv->bonding_enabled) == bonding_enabled_tmp)
+ return count;
+
+ bat_info(net_dev, "Changing bonding from: %s to: %s\n",
+ atomic_read(&bat_priv->bonding_enabled) == 1 ?
+ "enabled" : "disabled",
+ bonding_enabled_tmp == 1 ? "enabled" : "disabled");
+
+ atomic_set(&bat_priv->bonding_enabled, (unsigned)bonding_enabled_tmp);
+ return count;
+}
+
static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr,
char *buff)
{
@@ -123,10 +141,9 @@ static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr,
struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
int vis_mode = atomic_read(&bat_priv->vis_mode);
- return sprintf(buff, "status: %s\ncommands: client, server, %d, %d\n",
+ return sprintf(buff, "%s\n",
vis_mode == VIS_TYPE_CLIENT_UPDATE ?
- "client" : "server",
- VIS_TYPE_SERVER_SYNC, VIS_TYPE_CLIENT_UPDATE);
+ "client" : "server");
}
static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
@@ -141,7 +158,8 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
ret = strict_strtoul(buff, 10, &val);
if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
- (strncmp(buff, "client", 6) == 0))
+ (strncmp(buff, "client", 6) == 0) ||
+ (strncmp(buff, "off", 3) == 0))
vis_mode_tmp = VIS_TYPE_CLIENT_UPDATE;
if (((count == 2) && (!ret) && (val == VIS_TYPE_SERVER_SYNC)) ||
@@ -152,18 +170,19 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
if (buff[count - 1] == '\n')
buff[count - 1] = '\0';
- printk(KERN_INFO "batman-adv:Invalid parameter for 'vis mode' setting on mesh %s received: %s\n",
- net_dev->name, buff);
+ bat_info(net_dev,
+ "Invalid parameter for 'vis mode' setting received: "
+ "%s\n", buff);
return -EINVAL;
}
if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
return count;
- printk(KERN_INFO "batman-adv:Changing vis mode from: %s to: %s on mesh: %s\n",
- atomic_read(&bat_priv->vis_mode) == VIS_TYPE_CLIENT_UPDATE ?
- "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
- "client" : "server", net_dev->name);
+ bat_info(net_dev, "Changing vis mode from: %s to: %s\n",
+ atomic_read(&bat_priv->vis_mode) == VIS_TYPE_CLIENT_UPDATE ?
+ "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
+ "client" : "server");
atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp);
return count;
@@ -175,7 +194,7 @@ static ssize_t show_orig_interval(struct kobject *kobj, struct attribute *attr,
struct device *dev = to_dev(kobj->parent);
struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
- return sprintf(buff, "status: %i\n",
+ return sprintf(buff, "%i\n",
atomic_read(&bat_priv->orig_interval));
}
@@ -190,91 +209,91 @@ static ssize_t store_orig_interval(struct kobject *kobj, struct attribute *attr,
ret = strict_strtoul(buff, 10, &orig_interval_tmp);
if (ret) {
- printk(KERN_INFO "batman-adv:Invalid parameter for 'orig_interval' setting on mesh %s received: %s\n",
- net_dev->name, buff);
+ bat_info(net_dev, "Invalid parameter for 'orig_interval' "
+ "setting received: %s\n", buff);
return -EINVAL;
}
- if (orig_interval_tmp <= JITTER * 2) {
- printk(KERN_INFO "batman-adv:New originator interval too small: %li (min: %i)\n",
- orig_interval_tmp, JITTER * 2);
+ if (orig_interval_tmp < JITTER * 2) {
+ bat_info(net_dev, "New originator interval too small: %li "
+ "(min: %i)\n", orig_interval_tmp, JITTER * 2);
return -EINVAL;
}
if (atomic_read(&bat_priv->orig_interval) == orig_interval_tmp)
return count;
- printk(KERN_INFO "batman-adv:Changing originator interval from: %i to: %li on mesh: %s\n",
- atomic_read(&bat_priv->orig_interval),
- orig_interval_tmp, net_dev->name);
+ bat_info(net_dev, "Changing originator interval from: %i to: %li\n",
+ atomic_read(&bat_priv->orig_interval),
+ orig_interval_tmp);
atomic_set(&bat_priv->orig_interval, orig_interval_tmp);
return count;
}
-static BAT_ATTR(aggregate_ogm, S_IRUGO | S_IWUSR,
- show_aggr_ogm, store_aggr_ogm);
-static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
-static BAT_ATTR(orig_interval, S_IRUGO | S_IWUSR,
- show_orig_interval, store_orig_interval);
-
-static struct bat_attribute *mesh_attrs[] = {
- &bat_attr_aggregate_ogm,
- &bat_attr_vis_mode,
- &bat_attr_orig_interval,
- NULL,
-};
-
-static ssize_t transtable_local_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buff, loff_t off, size_t count)
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+static ssize_t show_log_level(struct kobject *kobj, struct attribute *attr,
+ char *buff)
{
struct device *dev = to_dev(kobj->parent);
- struct net_device *net_dev = to_net_dev(dev);
+ struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
+ int log_level = atomic_read(&bat_priv->log_level);
- return hna_local_fill_buffer_text(net_dev, buff, count, off);
+ return sprintf(buff, "%d\n", log_level);
}
-static ssize_t transtable_global_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buff, loff_t off, size_t count)
+static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr,
+ char *buff, size_t count)
{
struct device *dev = to_dev(kobj->parent);
struct net_device *net_dev = to_net_dev(dev);
+ struct bat_priv *bat_priv = netdev_priv(net_dev);
+ unsigned long log_level_tmp;
+ int ret;
- return hna_global_fill_buffer_text(net_dev, buff, count, off);
-}
+ ret = strict_strtoul(buff, 10, &log_level_tmp);
+ if (ret) {
+ bat_info(net_dev, "Invalid parameter for 'log_level' "
+ "setting received: %s\n", buff);
+ return -EINVAL;
+ }
-static ssize_t originators_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buff, loff_t off, size_t count)
-{
- struct device *dev = to_dev(kobj->parent);
- struct net_device *net_dev = to_net_dev(dev);
+ if (log_level_tmp > 3) {
+ bat_info(net_dev, "New log level too big: %li "
+ "(max: %i)\n", log_level_tmp, 3);
+ return -EINVAL;
+ }
- return orig_fill_buffer_text(net_dev, buff, count, off);
-}
+ if (atomic_read(&bat_priv->log_level) == log_level_tmp)
+ return count;
-static ssize_t vis_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buff, loff_t off, size_t count)
-{
- struct device *dev = to_dev(kobj->parent);
- struct net_device *net_dev = to_net_dev(dev);
+ bat_info(net_dev, "Changing log level from: %i to: %li\n",
+ atomic_read(&bat_priv->log_level),
+ log_level_tmp);
- return vis_fill_buffer_text(net_dev, buff, count, off);
+ atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp);
+ return count;
}
+#endif
-static BAT_BIN_ATTR(transtable_local, S_IRUGO, transtable_local_read, NULL);
-static BAT_BIN_ATTR(transtable_global, S_IRUGO, transtable_global_read, NULL);
-static BAT_BIN_ATTR(originators, S_IRUGO, originators_read, NULL);
-static BAT_BIN_ATTR(vis_data, S_IRUGO, vis_data_read, NULL);
+static BAT_ATTR(aggregated_ogms, S_IRUGO | S_IWUSR,
+ show_aggr_ogms, store_aggr_ogms);
+static BAT_ATTR(bonding, S_IRUGO | S_IWUSR, show_bond, store_bond);
+static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
+static BAT_ATTR(orig_interval, S_IRUGO | S_IWUSR,
+ show_orig_interval, store_orig_interval);
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+static BAT_ATTR(log_level, S_IRUGO | S_IWUSR, show_log_level, store_log_level);
+#endif
-static struct bin_attribute *mesh_bin_attrs[] = {
- &bat_attr_transtable_local,
- &bat_attr_transtable_global,
- &bat_attr_originators,
- &bat_attr_vis_data,
+static struct bat_attribute *mesh_attrs[] = {
+ &bat_attr_aggregated_ogms,
+ &bat_attr_bonding,
+ &bat_attr_vis_mode,
+ &bat_attr_orig_interval,
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+ &bat_attr_log_level,
+#endif
NULL,
};
@@ -283,22 +302,24 @@ int sysfs_add_meshif(struct net_device *dev)
struct kobject *batif_kobject = &dev->dev.kobj;
struct bat_priv *bat_priv = netdev_priv(dev);
struct bat_attribute **bat_attr;
- struct bin_attribute **bin_attr;
int err;
/* FIXME: should be done in the general mesh setup
routine as soon as we have it */
atomic_set(&bat_priv->aggregation_enabled, 1);
+ atomic_set(&bat_priv->bonding_enabled, 0);
atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
atomic_set(&bat_priv->orig_interval, 1000);
+ atomic_set(&bat_priv->log_level, 0);
+
bat_priv->primary_if = NULL;
bat_priv->num_ifaces = 0;
bat_priv->mesh_obj = kobject_create_and_add(SYSFS_IF_MESH_SUBDIR,
batif_kobject);
if (!bat_priv->mesh_obj) {
- printk(KERN_ERR "batman-adv:Can't add sysfs directory: %s/%s\n",
- dev->name, SYSFS_IF_MESH_SUBDIR);
+ bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
+ SYSFS_IF_MESH_SUBDIR);
goto out;
}
@@ -306,28 +327,15 @@ int sysfs_add_meshif(struct net_device *dev)
err = sysfs_create_file(bat_priv->mesh_obj,
&((*bat_attr)->attr));
if (err) {
- printk(KERN_ERR "batman-adv:Can't add sysfs file: %s/%s/%s\n",
- dev->name, SYSFS_IF_MESH_SUBDIR,
- ((*bat_attr)->attr).name);
+ bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
+ dev->name, SYSFS_IF_MESH_SUBDIR,
+ ((*bat_attr)->attr).name);
goto rem_attr;
}
}
- for (bin_attr = mesh_bin_attrs; *bin_attr; ++bin_attr) {
- err = sysfs_create_bin_file(bat_priv->mesh_obj, (*bin_attr));
- if (err) {
- printk(KERN_ERR "batman-adv:Can't add sysfs file: %s/%s/%s\n",
- dev->name, SYSFS_IF_MESH_SUBDIR,
- ((*bin_attr)->attr).name);
- goto rem_bin_attr;
- }
- }
-
return 0;
-rem_bin_attr:
- for (bin_attr = mesh_bin_attrs; *bin_attr; ++bin_attr)
- sysfs_remove_bin_file(bat_priv->mesh_obj, (*bin_attr));
rem_attr:
for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
@@ -342,10 +350,6 @@ void sysfs_del_meshif(struct net_device *dev)
{
struct bat_priv *bat_priv = netdev_priv(dev);
struct bat_attribute **bat_attr;
- struct bin_attribute **bin_attr;
-
- for (bin_attr = mesh_bin_attrs; *bin_attr; ++bin_attr)
- sysfs_remove_bin_file(bat_priv->mesh_obj, (*bin_attr));
for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
@@ -364,7 +368,7 @@ static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
if (!batman_if)
return 0;
- return sprintf(buff, "status: %s\ncommands: none, bat0\n",
+ return sprintf(buff, "%s\n",
batman_if->if_status == IF_NOT_IN_USE ?
"none" : "bat0");
}
@@ -390,8 +394,8 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
if (buff[count - 1] == '\n')
buff[count - 1] = '\0';
- printk(KERN_ERR "batman-adv:Invalid parameter for 'mesh_iface' setting received: %s\n",
- buff);
+ pr_err("Invalid parameter for 'mesh_iface' setting received: "
+ "%s\n", buff);
return -EINVAL;
}
@@ -433,37 +437,37 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
}
}
-static HARDIF_ATTR(mesh_iface, S_IRUGO | S_IWUSR,
- show_mesh_iface, store_mesh_iface);
-static HARDIF_ATTR(iface_status, S_IRUGO, show_iface_status, NULL);
+static BAT_ATTR(mesh_iface, S_IRUGO | S_IWUSR,
+ show_mesh_iface, store_mesh_iface);
+static BAT_ATTR(iface_status, S_IRUGO, show_iface_status, NULL);
-static struct hardif_attribute *batman_attrs[] = {
- &hardif_attr_mesh_iface,
- &hardif_attr_iface_status,
+static struct bat_attribute *batman_attrs[] = {
+ &bat_attr_mesh_iface,
+ &bat_attr_iface_status,
NULL,
};
int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
{
struct kobject *hardif_kobject = &dev->dev.kobj;
- struct hardif_attribute **hardif_attr;
+ struct bat_attribute **bat_attr;
int err;
*hardif_obj = kobject_create_and_add(SYSFS_IF_BAT_SUBDIR,
hardif_kobject);
if (!*hardif_obj) {
- printk(KERN_ERR "batman-adv:Can't add sysfs directory: %s/%s\n",
- dev->name, SYSFS_IF_BAT_SUBDIR);
+ bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
+ SYSFS_IF_BAT_SUBDIR);
goto out;
}
- for (hardif_attr = batman_attrs; *hardif_attr; ++hardif_attr) {
- err = sysfs_create_file(*hardif_obj, &((*hardif_attr)->attr));
+ for (bat_attr = batman_attrs; *bat_attr; ++bat_attr) {
+ err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr));
if (err) {
- printk(KERN_ERR "batman-adv:Can't add sysfs file: %s/%s/%s\n",
- dev->name, SYSFS_IF_BAT_SUBDIR,
- ((*hardif_attr)->attr).name);
+ bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
+ dev->name, SYSFS_IF_BAT_SUBDIR,
+ ((*bat_attr)->attr).name);
goto rem_attr;
}
}
@@ -471,8 +475,8 @@ int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
return 0;
rem_attr:
- for (hardif_attr = batman_attrs; *hardif_attr; ++hardif_attr)
- sysfs_remove_file(*hardif_obj, &((*hardif_attr)->attr));
+ for (bat_attr = batman_attrs; *bat_attr; ++bat_attr)
+ sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr));
out:
return -ENOMEM;
}
diff --git a/drivers/staging/batman-adv/bat_sysfs.h b/drivers/staging/batman-adv/bat_sysfs.h
index e1893411871..7f186c007b4 100644
--- a/drivers/staging/batman-adv/bat_sysfs.h
+++ b/drivers/staging/batman-adv/bat_sysfs.h
@@ -20,10 +20,23 @@
*/
+#ifndef _NET_BATMAN_ADV_SYSFS_H_
+#define _NET_BATMAN_ADV_SYSFS_H_
+
#define SYSFS_IF_MESH_SUBDIR "mesh"
#define SYSFS_IF_BAT_SUBDIR "batman_adv"
+struct bat_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
+ char *buf, size_t count);
+};
+
int sysfs_add_meshif(struct net_device *dev);
void sysfs_del_meshif(struct net_device *dev);
int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev);
void sysfs_del_hardif(struct kobject **hardif_obj);
+
+#endif /* _NET_BATMAN_ADV_SYSFS_H_ */
diff --git a/drivers/staging/batman-adv/bitarray.c b/drivers/staging/batman-adv/bitarray.c
index 2fef6e35f8c..dd4193c99d4 100644
--- a/drivers/staging/batman-adv/bitarray.c
+++ b/drivers/staging/batman-adv/bitarray.c
@@ -24,10 +24,10 @@
/* returns true if the corresponding bit in the given seq_bits indicates true
* and curr_seqno is within range of last_seqno */
-uint8_t get_bit_status(TYPE_OF_WORD *seq_bits, uint16_t last_seqno,
- uint16_t curr_seqno)
+uint8_t get_bit_status(TYPE_OF_WORD *seq_bits, uint32_t last_seqno,
+ uint32_t curr_seqno)
{
- int16_t diff, word_offset, word_num;
+ int32_t diff, word_offset, word_num;
diff = last_seqno - curr_seqno;
if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) {
@@ -63,7 +63,7 @@ void bit_mark(TYPE_OF_WORD *seq_bits, int32_t n)
}
/* shift the packet array by n places. */
-void bit_shift(TYPE_OF_WORD *seq_bits, int32_t n)
+static void bit_shift(TYPE_OF_WORD *seq_bits, int32_t n)
{
int32_t word_offset, word_num;
int32_t i;
@@ -125,9 +125,12 @@ static void bit_reset_window(TYPE_OF_WORD *seq_bits)
* 1 if the window was moved (either new or very old)
* 0 if the window was not moved/shifted.
*/
-char bit_get_packet(TYPE_OF_WORD *seq_bits, int16_t seq_num_diff,
+char bit_get_packet(TYPE_OF_WORD *seq_bits, int32_t seq_num_diff,
int8_t set_mark)
{
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
+
/* sequence number is slightly older. We already got a sequence number
* higher than this one, so we just mark it. */
@@ -152,7 +155,7 @@ char bit_get_packet(TYPE_OF_WORD *seq_bits, int16_t seq_num_diff,
if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE)
|| (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"We missed a lot of packets (%i) !\n",
seq_num_diff - 1);
bit_reset_window(seq_bits);
@@ -169,7 +172,7 @@ char bit_get_packet(TYPE_OF_WORD *seq_bits, int16_t seq_num_diff,
if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
|| (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"Other host probably restarted!\n");
bit_reset_window(seq_bits);
diff --git a/drivers/staging/batman-adv/bitarray.h b/drivers/staging/batman-adv/bitarray.h
index 76ad24c9f3d..01897d6962d 100644
--- a/drivers/staging/batman-adv/bitarray.h
+++ b/drivers/staging/batman-adv/bitarray.h
@@ -19,6 +19,8 @@
*
*/
+#ifndef _NET_BATMAN_ADV_BITARRAY_H_
+#define _NET_BATMAN_ADV_BITARRAY_H_
/* you should choose something big, if you don't want to waste cpu */
#define TYPE_OF_WORD unsigned long
@@ -26,20 +28,19 @@
/* returns true if the corresponding bit in the given seq_bits indicates true
* and curr_seqno is within range of last_seqno */
-uint8_t get_bit_status(TYPE_OF_WORD *seq_bits, uint16_t last_seqno,
- uint16_t curr_seqno);
+uint8_t get_bit_status(TYPE_OF_WORD *seq_bits, uint32_t last_seqno,
+ uint32_t curr_seqno);
/* turn corresponding bit on, so we can remember that we got the packet */
void bit_mark(TYPE_OF_WORD *seq_bits, int32_t n);
-/* shift the packet array by n places. */
-void bit_shift(TYPE_OF_WORD *seq_bits, int32_t n);
-
/* receive and process one packet, returns 1 if received seq_num is considered
* new, 0 if old */
-char bit_get_packet(TYPE_OF_WORD *seq_bits, int16_t seq_num_diff,
+char bit_get_packet(TYPE_OF_WORD *seq_bits, int32_t seq_num_diff,
int8_t set_mark);
/* count the hamming weight, how many good packets did we receive? */
int bit_packet_count(TYPE_OF_WORD *seq_bits);
+
+#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/drivers/staging/batman-adv/device.c b/drivers/staging/batman-adv/device.c
deleted file mode 100644
index 32204b5572d..00000000000
--- a/drivers/staging/batman-adv/device.c
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include <linux/device.h>
-#include <linux/slab.h>
-#include "main.h"
-#include "device.h"
-#include "send.h"
-#include "types.h"
-#include "hash.h"
-#include "hard-interface.h"
-
-static struct class *batman_class;
-
-static int Major; /* Major number assigned to our device driver */
-
-static const struct file_operations fops = {
- .open = bat_device_open,
- .release = bat_device_release,
- .read = bat_device_read,
- .write = bat_device_write,
- .poll = bat_device_poll,
-};
-
-static struct device_client *device_client_hash[256];
-
-void bat_device_init(void)
-{
- memset(device_client_hash, 0, sizeof(device_client_hash));
-}
-
-int bat_device_setup(void)
-{
- int tmp_major;
-
- if (Major)
- return 1;
-
- /* register our device - kernel assigns a free major number */
- tmp_major = register_chrdev(0, DRIVER_DEVICE, &fops);
- if (tmp_major < 0) {
- printk(KERN_ERR "batman-adv:"
- "Registering the character device failed with %d\n",
- tmp_major);
- return 0;
- }
-
- batman_class = class_create(THIS_MODULE, "batman-adv");
-
- if (IS_ERR(batman_class)) {
- printk(KERN_ERR "batman-adv:"
- "Could not register class 'batman-adv'\n");
- return 0;
- }
-
- device_create(batman_class, NULL, MKDEV(tmp_major, 0), NULL,
- "batman-adv");
-
- Major = tmp_major;
- return 1;
-}
-
-void bat_device_destroy(void)
-{
- if (!Major)
- return;
-
- device_destroy(batman_class, MKDEV(Major, 0));
- class_destroy(batman_class);
-
- /* Unregister the device */
- unregister_chrdev(Major, DRIVER_DEVICE);
-
- Major = 0;
-}
-
-int bat_device_open(struct inode *inode, struct file *file)
-{
- unsigned int i;
- struct device_client *device_client;
-
- device_client = kmalloc(sizeof(struct device_client), GFP_KERNEL);
-
- if (!device_client)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(device_client_hash); i++) {
- if (!device_client_hash[i]) {
- device_client_hash[i] = device_client;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(device_client_hash)) {
- printk(KERN_ERR "batman-adv:"
- "Error - can't add another packet client: "
- "maximum number of clients reached\n");
- kfree(device_client);
- return -EXFULL;
- }
-
- INIT_LIST_HEAD(&device_client->queue_list);
- device_client->queue_len = 0;
- device_client->index = i;
- spin_lock_init(&device_client->lock);
- init_waitqueue_head(&device_client->queue_wait);
-
- file->private_data = device_client;
-
- inc_module_count();
- return 0;
-}
-
-int bat_device_release(struct inode *inode, struct file *file)
-{
- struct device_client *device_client =
- (struct device_client *)file->private_data;
- struct device_packet *device_packet;
- struct list_head *list_pos, *list_pos_tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&device_client->lock, flags);
-
- /* for all packets in the queue ... */
- list_for_each_safe(list_pos, list_pos_tmp, &device_client->queue_list) {
- device_packet = list_entry(list_pos,
- struct device_packet, list);
-
- list_del(list_pos);
- kfree(device_packet);
- }
-
- device_client_hash[device_client->index] = NULL;
- spin_unlock_irqrestore(&device_client->lock, flags);
-
- kfree(device_client);
- dec_module_count();
-
- return 0;
-}
-
-ssize_t bat_device_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
-{
- struct device_client *device_client =
- (struct device_client *)file->private_data;
- struct device_packet *device_packet;
- int error;
- unsigned long flags;
-
- if ((file->f_flags & O_NONBLOCK) && (device_client->queue_len == 0))
- return -EAGAIN;
-
- if ((!buf) || (count < sizeof(struct icmp_packet)))
- return -EINVAL;
-
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
-
- error = wait_event_interruptible(device_client->queue_wait,
- device_client->queue_len);
-
- if (error)
- return error;
-
- spin_lock_irqsave(&device_client->lock, flags);
-
- device_packet = list_first_entry(&device_client->queue_list,
- struct device_packet, list);
- list_del(&device_packet->list);
- device_client->queue_len--;
-
- spin_unlock_irqrestore(&device_client->lock, flags);
-
- error = __copy_to_user(buf, &device_packet->icmp_packet,
- sizeof(struct icmp_packet));
-
- kfree(device_packet);
-
- if (error)
- return -EFAULT;
-
- return sizeof(struct icmp_packet);
-}
-
-ssize_t bat_device_write(struct file *file, const char __user *buff,
- size_t len, loff_t *off)
-{
- struct device_client *device_client =
- (struct device_client *)file->private_data;
- struct icmp_packet icmp_packet;
- struct orig_node *orig_node;
- struct batman_if *batman_if;
- uint8_t dstaddr[ETH_ALEN];
- unsigned long flags;
-
- if (len < sizeof(struct icmp_packet)) {
- bat_dbg(DBG_BATMAN, "batman-adv:"
- "Error - can't send packet from char device: "
- "invalid packet size\n");
- return -EINVAL;
- }
-
- if (!access_ok(VERIFY_READ, buff, sizeof(struct icmp_packet)))
- return -EFAULT;
-
- if (__copy_from_user(&icmp_packet, buff, sizeof(icmp_packet)))
- return -EFAULT;
-
- if (icmp_packet.packet_type != BAT_ICMP) {
- bat_dbg(DBG_BATMAN, "batman-adv:"
- "Error - can't send packet from char device: "
- "got bogus packet type (expected: BAT_ICMP)\n");
- return -EINVAL;
- }
-
- if (icmp_packet.msg_type != ECHO_REQUEST) {
- bat_dbg(DBG_BATMAN, "batman-adv:"
- "Error - can't send packet from char device: "
- "got bogus message type (expected: ECHO_REQUEST)\n");
- return -EINVAL;
- }
-
- icmp_packet.uid = device_client->index;
-
- if (icmp_packet.version != COMPAT_VERSION) {
- icmp_packet.msg_type = PARAMETER_PROBLEM;
- icmp_packet.ttl = COMPAT_VERSION;
- bat_device_add_packet(device_client, &icmp_packet);
- goto out;
- }
-
- if (atomic_read(&module_state) != MODULE_ACTIVE)
- goto dst_unreach;
-
- spin_lock_irqsave(&orig_hash_lock, flags);
- orig_node = ((struct orig_node *)hash_find(orig_hash, icmp_packet.dst));
-
- if (!orig_node)
- goto unlock;
-
- if (!orig_node->router)
- goto unlock;
-
- batman_if = orig_node->router->if_incoming;
- memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-
- spin_unlock_irqrestore(&orig_hash_lock, flags);
-
- if (!batman_if)
- goto dst_unreach;
-
- if (batman_if->if_status != IF_ACTIVE)
- goto dst_unreach;
-
- memcpy(icmp_packet.orig,
- batman_if->net_dev->dev_addr,
- ETH_ALEN);
-
- send_raw_packet((unsigned char *)&icmp_packet,
- sizeof(struct icmp_packet),
- batman_if, dstaddr);
-
- goto out;
-
-unlock:
- spin_unlock_irqrestore(&orig_hash_lock, flags);
-dst_unreach:
- icmp_packet.msg_type = DESTINATION_UNREACHABLE;
- bat_device_add_packet(device_client, &icmp_packet);
-out:
- return len;
-}
-
-unsigned int bat_device_poll(struct file *file, poll_table *wait)
-{
- struct device_client *device_client =
- (struct device_client *)file->private_data;
-
- poll_wait(file, &device_client->queue_wait, wait);
-
- if (device_client->queue_len > 0)
- return POLLIN | POLLRDNORM;
-
- return 0;
-}
-
-void bat_device_add_packet(struct device_client *device_client,
- struct icmp_packet *icmp_packet)
-{
- struct device_packet *device_packet;
- unsigned long flags;
-
- device_packet = kmalloc(sizeof(struct device_packet), GFP_ATOMIC);
-
- if (!device_packet)
- return;
-
- INIT_LIST_HEAD(&device_packet->list);
- memcpy(&device_packet->icmp_packet, icmp_packet,
- sizeof(struct icmp_packet));
-
- spin_lock_irqsave(&device_client->lock, flags);
-
- /* while waiting for the lock the device_client could have been
- * deleted */
- if (!device_client_hash[icmp_packet->uid]) {
- spin_unlock_irqrestore(&device_client->lock, flags);
- kfree(device_packet);
- return;
- }
-
- list_add_tail(&device_packet->list, &device_client->queue_list);
- device_client->queue_len++;
-
- if (device_client->queue_len > 100) {
- device_packet = list_first_entry(&device_client->queue_list,
- struct device_packet, list);
-
- list_del(&device_packet->list);
- kfree(device_packet);
- device_client->queue_len--;
- }
-
- spin_unlock_irqrestore(&device_client->lock, flags);
-
- wake_up(&device_client->queue_wait);
-}
-
-void bat_device_receive_packet(struct icmp_packet *icmp_packet)
-{
- struct device_client *hash = device_client_hash[icmp_packet->uid];
-
- if (hash)
- bat_device_add_packet(hash, icmp_packet);
-}
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 96c86c87301..6e973a79aa2 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -108,7 +108,7 @@ static void set_primary_if(struct bat_priv *bat_priv,
set_main_if_addr(batman_if->net_dev->dev_addr);
batman_packet = (struct batman_packet *)(batman_if->packet_buff);
- batman_packet->flags = 0;
+ batman_packet->flags = PRIMARIES_FIRST_HOP;
batman_packet->ttl = TTL;
/***
@@ -128,6 +128,9 @@ static bool hardif_is_iface_up(struct batman_if *batman_if)
static void update_mac_addresses(struct batman_if *batman_if)
{
+ if (!batman_if || !batman_if->packet_buff)
+ return;
+
addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
@@ -149,12 +152,10 @@ static void check_known_mac_addr(uint8_t *addr)
if (!compare_orig(batman_if->net_dev->dev_addr, addr))
continue;
- printk(KERN_WARNING "batman-adv:"
- "The newly added mac address (%pM) already exists on: %s\n",
- addr, batman_if->dev);
- printk(KERN_WARNING "batman-adv:"
- "It is strongly recommended to keep mac addresses unique"
- "to avoid problems!\n");
+ pr_warning("The newly added mac address (%pM) already exists "
+ "on: %s\n", addr, batman_if->dev);
+ pr_warning("It is strongly recommended to keep mac addresses "
+ "unique to avoid problems!\n");
}
rcu_read_unlock();
}
@@ -188,14 +189,13 @@ void update_min_mtu(void)
soft_device->mtu = min_mtu;
}
-static void hardif_activate_interface(struct bat_priv *bat_priv,
+static void hardif_activate_interface(struct net_device *net_dev,
+ struct bat_priv *bat_priv,
struct batman_if *batman_if)
{
if (batman_if->if_status != IF_INACTIVE)
return;
- dev_hold(batman_if->net_dev);
-
update_mac_addresses(batman_if);
batman_if->if_status = IF_TO_BE_ACTIVATED;
@@ -206,8 +206,7 @@ static void hardif_activate_interface(struct bat_priv *bat_priv,
if (!bat_priv->primary_if)
set_primary_if(bat_priv, batman_if);
- printk(KERN_INFO "batman-adv:Interface activated: %s\n",
- batman_if->dev);
+ bat_info(net_dev, "Interface activated: %s\n", batman_if->dev);
if (atomic_read(&module_state) == MODULE_INACTIVE)
activate_module();
@@ -216,18 +215,16 @@ static void hardif_activate_interface(struct bat_priv *bat_priv,
return;
}
-static void hardif_deactivate_interface(struct batman_if *batman_if)
+static void hardif_deactivate_interface(struct net_device *net_dev,
+ struct batman_if *batman_if)
{
if ((batman_if->if_status != IF_ACTIVE) &&
(batman_if->if_status != IF_TO_BE_ACTIVATED))
return;
- dev_put(batman_if->net_dev);
-
batman_if->if_status = IF_INACTIVE;
- printk(KERN_INFO "batman-adv:Interface deactivated: %s\n",
- batman_if->dev);
+ bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev);
update_min_mtu();
}
@@ -245,9 +242,8 @@ int hardif_enable_interface(struct batman_if *batman_if)
batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC);
if (!batman_if->packet_buff) {
- printk(KERN_ERR "batman-adv:"
- "Can't add interface packet (%s): out of memory\n",
- batman_if->dev);
+ bat_err(soft_device, "Can't add interface packet (%s): "
+ "out of memory\n", batman_if->dev);
goto err;
}
@@ -265,15 +261,14 @@ int hardif_enable_interface(struct batman_if *batman_if)
orig_hash_add_if(batman_if, bat_priv->num_ifaces);
atomic_set(&batman_if->seqno, 1);
- printk(KERN_INFO "batman-adv:Adding interface: %s\n", batman_if->dev);
+ bat_info(soft_device, "Adding interface: %s\n", batman_if->dev);
if (hardif_is_iface_up(batman_if))
- hardif_activate_interface(bat_priv, batman_if);
+ hardif_activate_interface(soft_device, bat_priv, batman_if);
else
- printk(KERN_ERR "batman-adv:"
- "Not using interface %s "
- "(retrying later): interface not active\n",
- batman_if->dev);
+ bat_err(soft_device, "Not using interface %s "
+ "(retrying later): interface not active\n",
+ batman_if->dev);
/* begin scheduling originator messages on that interface */
schedule_own_packet(batman_if);
@@ -291,12 +286,12 @@ void hardif_disable_interface(struct batman_if *batman_if)
struct bat_priv *bat_priv = netdev_priv(soft_device);
if (batman_if->if_status == IF_ACTIVE)
- hardif_deactivate_interface(batman_if);
+ hardif_deactivate_interface(soft_device, batman_if);
if (batman_if->if_status != IF_INACTIVE)
return;
- printk(KERN_INFO "batman-adv:Removing interface: %s\n", batman_if->dev);
+ bat_info(soft_device, "Removing interface: %s\n", batman_if->dev);
bat_priv->num_ifaces--;
orig_hash_del_if(batman_if, bat_priv->num_ifaces);
@@ -321,12 +316,13 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
if (ret != 1)
goto out;
+ dev_hold(net_dev);
+
batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
if (!batman_if) {
- printk(KERN_ERR "batman-adv:"
- "Can't add interface (%s): out of memory\n",
+ pr_err("Can't add interface (%s): out of memory\n",
net_dev->name);
- goto out;
+ goto release_dev;
}
batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC);
@@ -340,6 +336,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
batman_if->if_num = -1;
batman_if->net_dev = net_dev;
batman_if->if_status = IF_NOT_IN_USE;
+ batman_if->packet_buff = NULL;
INIT_LIST_HEAD(&batman_if->list);
check_known_mac_addr(batman_if->net_dev->dev_addr);
@@ -350,6 +347,8 @@ free_dev:
kfree(batman_if->dev);
free_if:
kfree(batman_if);
+release_dev:
+ dev_put(net_dev);
out:
return NULL;
}
@@ -378,6 +377,7 @@ static void hardif_remove_interface(struct batman_if *batman_if)
batman_if->if_status = IF_TO_BE_REMOVED;
list_del_rcu(&batman_if->list);
sysfs_del_hardif(&batman_if->hardif_obj);
+ dev_put(batman_if->net_dev);
call_rcu(&batman_if->rcu, hardif_free_interface);
}
@@ -397,21 +397,19 @@ static int hard_if_event(struct notifier_block *this,
/* FIXME: each batman_if will be attached to a softif */
struct bat_priv *bat_priv = netdev_priv(soft_device);
- if (!batman_if)
- batman_if = hardif_add_interface(net_dev);
+ if (!batman_if && event == NETDEV_REGISTER)
+ batman_if = hardif_add_interface(net_dev);
if (!batman_if)
goto out;
switch (event) {
- case NETDEV_REGISTER:
- break;
case NETDEV_UP:
- hardif_activate_interface(bat_priv, batman_if);
+ hardif_activate_interface(soft_device, bat_priv, batman_if);
break;
case NETDEV_GOING_DOWN:
case NETDEV_DOWN:
- hardif_deactivate_interface(batman_if);
+ hardif_deactivate_interface(soft_device, batman_if);
break;
case NETDEV_UNREGISTER:
hardif_remove_interface(batman_if);
@@ -437,10 +435,10 @@ out:
int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *orig_dev)
{
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct batman_packet *batman_packet;
struct batman_if *batman_if;
- struct net_device_stats *stats;
- struct rtnl_link_stats64 temp;
int ret;
skb = skb_share_check(skb, GFP_ATOMIC);
@@ -469,16 +467,10 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
if (batman_if->if_status != IF_ACTIVE)
goto err_free;
- stats = (struct net_device_stats *)dev_get_stats(skb->dev, &temp);
- if (stats) {
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- }
-
batman_packet = (struct batman_packet *)skb->data;
if (batman_packet->version != COMPAT_VERSION) {
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: incompatible batman version (%i)\n",
batman_packet->version);
goto err_free;
@@ -500,7 +492,7 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
/* unicast packet */
case BAT_UNICAST:
- ret = recv_unicast_packet(skb);
+ ret = recv_unicast_packet(skb, batman_if);
break;
/* broadcast packet */
@@ -531,7 +523,6 @@ err_out:
return NET_RX_DROP;
}
-
struct notifier_block hard_if_notifier = {
.notifier_call = hard_if_event,
};
diff --git a/drivers/staging/batman-adv/hard-interface.h b/drivers/staging/batman-adv/hard-interface.h
index 1e5fc3e720f..d5640b045cb 100644
--- a/drivers/staging/batman-adv/hard-interface.h
+++ b/drivers/staging/batman-adv/hard-interface.h
@@ -19,6 +19,9 @@
*
*/
+#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_
+#define _NET_BATMAN_ADV_HARD_INTERFACE_H_
+
#define IF_NOT_IN_USE 0
#define IF_TO_BE_REMOVED 1
#define IF_INACTIVE 2
@@ -38,3 +41,5 @@ int batman_skb_recv(struct sk_buff *skb,
struct net_device *orig_dev);
int hardif_min_mtu(void);
void update_min_mtu(void);
+
+#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
diff --git a/drivers/staging/batman-adv/hash.c b/drivers/staging/batman-adv/hash.c
index d4a4adc5704..1286f8ff44f 100644
--- a/drivers/staging/batman-adv/hash.c
+++ b/drivers/staging/batman-adv/hash.c
@@ -23,7 +23,7 @@
#include "hash.h"
/* clears the hash */
-void hash_init(struct hashtable_t *hash)
+static void hash_init(struct hashtable_t *hash)
{
int i;
diff --git a/drivers/staging/batman-adv/hash.h b/drivers/staging/batman-adv/hash.h
index ea6d21e0125..c483e1129fc 100644
--- a/drivers/staging/batman-adv/hash.h
+++ b/drivers/staging/batman-adv/hash.h
@@ -19,8 +19,9 @@
*
*/
-#ifndef _BATMAN_HASH_H
-#define _BATMAN_HASH_H
+#ifndef _NET_BATMAN_ADV_HASH_H_
+#define _NET_BATMAN_ADV_HASH_H_
+
#define HASHIT(name) struct hash_it_t name = { \
.index = -1, .bucket = NULL, \
.prev_bucket = NULL, \
@@ -56,9 +57,6 @@ struct hashtable_t {
* argument and the size the second */
};
-/* clears the hash */
-void hash_init(struct hashtable_t *hash);
-
/* allocates and clears the hash */
struct hashtable_t *hash_new(int size, hashdata_compare_cb compare,
hashdata_choose_cb choose);
@@ -99,6 +97,4 @@ struct hashtable_t *hash_resize(struct hashtable_t *hash, int size);
struct hash_it_t *hash_iterate(struct hashtable_t *hash,
struct hash_it_t *iter_in);
-/* print the hash table for debugging */
-void hash_debug(struct hashtable_t *hash);
-#endif
+#endif /* _NET_BATMAN_ADV_HASH_H_ */
diff --git a/drivers/staging/batman-adv/icmp_socket.c b/drivers/staging/batman-adv/icmp_socket.c
new file mode 100644
index 00000000000..3ae7dd2d2d4
--- /dev/null
+++ b/drivers/staging/batman-adv/icmp_socket.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#include "main.h"
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include "icmp_socket.h"
+#include "send.h"
+#include "types.h"
+#include "hash.h"
+#include "hard-interface.h"
+
+
+static struct socket_client *socket_client_hash[256];
+
+static void bat_socket_add_packet(struct socket_client *socket_client,
+ struct icmp_packet_rr *icmp_packet,
+ size_t icmp_len);
+
+void bat_socket_init(void)
+{
+ memset(socket_client_hash, 0, sizeof(socket_client_hash));
+}
+
+static int bat_socket_open(struct inode *inode, struct file *file)
+{
+ unsigned int i;
+ struct socket_client *socket_client;
+
+ socket_client = kmalloc(sizeof(struct socket_client), GFP_KERNEL);
+
+ if (!socket_client)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(socket_client_hash); i++) {
+ if (!socket_client_hash[i]) {
+ socket_client_hash[i] = socket_client;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(socket_client_hash)) {
+ pr_err("Error - can't add another packet client: "
+ "maximum number of clients reached\n");
+ kfree(socket_client);
+ return -EXFULL;
+ }
+
+ INIT_LIST_HEAD(&socket_client->queue_list);
+ socket_client->queue_len = 0;
+ socket_client->index = i;
+ socket_client->bat_priv = inode->i_private;
+ spin_lock_init(&socket_client->lock);
+ init_waitqueue_head(&socket_client->queue_wait);
+
+ file->private_data = socket_client;
+
+ inc_module_count();
+ return 0;
+}
+
+static int bat_socket_release(struct inode *inode, struct file *file)
+{
+ struct socket_client *socket_client = file->private_data;
+ struct socket_packet *socket_packet;
+ struct list_head *list_pos, *list_pos_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&socket_client->lock, flags);
+
+ /* for all packets in the queue ... */
+ list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) {
+ socket_packet = list_entry(list_pos,
+ struct socket_packet, list);
+
+ list_del(list_pos);
+ kfree(socket_packet);
+ }
+
+ socket_client_hash[socket_client->index] = NULL;
+ spin_unlock_irqrestore(&socket_client->lock, flags);
+
+ kfree(socket_client);
+ dec_module_count();
+
+ return 0;
+}
+
+static ssize_t bat_socket_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct socket_client *socket_client = file->private_data;
+ struct socket_packet *socket_packet;
+ size_t packet_len;
+ int error;
+ unsigned long flags;
+
+ if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0))
+ return -EAGAIN;
+
+ if ((!buf) || (count < sizeof(struct icmp_packet)))
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ error = wait_event_interruptible(socket_client->queue_wait,
+ socket_client->queue_len);
+
+ if (error)
+ return error;
+
+ spin_lock_irqsave(&socket_client->lock, flags);
+
+ socket_packet = list_first_entry(&socket_client->queue_list,
+ struct socket_packet, list);
+ list_del(&socket_packet->list);
+ socket_client->queue_len--;
+
+ spin_unlock_irqrestore(&socket_client->lock, flags);
+
+ error = __copy_to_user(buf, &socket_packet->icmp_packet,
+ socket_packet->icmp_len);
+
+ packet_len = socket_packet->icmp_len;
+ kfree(socket_packet);
+
+ if (error)
+ return -EFAULT;
+
+ return packet_len;
+}
+
+static ssize_t bat_socket_write(struct file *file, const char __user *buff,
+ size_t len, loff_t *off)
+{
+ struct socket_client *socket_client = file->private_data;
+ struct bat_priv *bat_priv = socket_client->bat_priv;
+ struct icmp_packet_rr icmp_packet;
+ struct orig_node *orig_node;
+ struct batman_if *batman_if;
+ size_t packet_len = sizeof(struct icmp_packet);
+ uint8_t dstaddr[ETH_ALEN];
+ unsigned long flags;
+
+ if (len < sizeof(struct icmp_packet)) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Error - can't send packet from char device: "
+ "invalid packet size\n");
+ return -EINVAL;
+ }
+
+ if (!bat_priv->primary_if)
+ return -EFAULT;
+
+ if (len >= sizeof(struct icmp_packet_rr))
+ packet_len = sizeof(struct icmp_packet_rr);
+
+ if (!access_ok(VERIFY_READ, buff, packet_len))
+ return -EFAULT;
+
+ if (__copy_from_user(&icmp_packet, buff, packet_len))
+ return -EFAULT;
+
+ if (icmp_packet.packet_type != BAT_ICMP) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Error - can't send packet from char device: "
+ "got bogus packet type (expected: BAT_ICMP)\n");
+ return -EINVAL;
+ }
+
+ if (icmp_packet.msg_type != ECHO_REQUEST) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Error - can't send packet from char device: "
+ "got bogus message type (expected: ECHO_REQUEST)\n");
+ return -EINVAL;
+ }
+
+ icmp_packet.uid = socket_client->index;
+
+ if (icmp_packet.version != COMPAT_VERSION) {
+ icmp_packet.msg_type = PARAMETER_PROBLEM;
+ icmp_packet.ttl = COMPAT_VERSION;
+ bat_socket_add_packet(socket_client, &icmp_packet, packet_len);
+ goto out;
+ }
+
+ if (atomic_read(&module_state) != MODULE_ACTIVE)
+ goto dst_unreach;
+
+ spin_lock_irqsave(&orig_hash_lock, flags);
+ orig_node = ((struct orig_node *)hash_find(orig_hash, icmp_packet.dst));
+
+ if (!orig_node)
+ goto unlock;
+
+ if (!orig_node->router)
+ goto unlock;
+
+ batman_if = orig_node->router->if_incoming;
+ memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ if (!batman_if)
+ goto dst_unreach;
+
+ if (batman_if->if_status != IF_ACTIVE)
+ goto dst_unreach;
+
+ memcpy(icmp_packet.orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
+
+ if (packet_len == sizeof(struct icmp_packet_rr))
+ memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN);
+
+ send_raw_packet((unsigned char *)&icmp_packet,
+ packet_len, batman_if, dstaddr);
+
+ goto out;
+
+unlock:
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+dst_unreach:
+ icmp_packet.msg_type = DESTINATION_UNREACHABLE;
+ bat_socket_add_packet(socket_client, &icmp_packet, packet_len);
+out:
+ return len;
+}
+
+static unsigned int bat_socket_poll(struct file *file, poll_table *wait)
+{
+ struct socket_client *socket_client = file->private_data;
+
+ poll_wait(file, &socket_client->queue_wait, wait);
+
+ if (socket_client->queue_len > 0)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = bat_socket_open,
+ .release = bat_socket_release,
+ .read = bat_socket_read,
+ .write = bat_socket_write,
+ .poll = bat_socket_poll,
+};
+
+int bat_socket_setup(struct bat_priv *bat_priv)
+{
+ struct dentry *d;
+
+ if (!bat_priv->debug_dir)
+ goto err;
+
+ d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
+ bat_priv->debug_dir, bat_priv, &fops);
+ if (d)
+ goto err;
+
+ return 0;
+
+err:
+ return 1;
+}
+
+static void bat_socket_add_packet(struct socket_client *socket_client,
+ struct icmp_packet_rr *icmp_packet,
+ size_t icmp_len)
+{
+ struct socket_packet *socket_packet;
+ unsigned long flags;
+
+ socket_packet = kmalloc(sizeof(struct socket_packet), GFP_ATOMIC);
+
+ if (!socket_packet)
+ return;
+
+ INIT_LIST_HEAD(&socket_packet->list);
+ memcpy(&socket_packet->icmp_packet, icmp_packet, icmp_len);
+ socket_packet->icmp_len = icmp_len;
+
+ spin_lock_irqsave(&socket_client->lock, flags);
+
+ /* while waiting for the lock the socket_client could have been
+ * deleted */
+ if (!socket_client_hash[icmp_packet->uid]) {
+ spin_unlock_irqrestore(&socket_client->lock, flags);
+ kfree(socket_packet);
+ return;
+ }
+
+ list_add_tail(&socket_packet->list, &socket_client->queue_list);
+ socket_client->queue_len++;
+
+ if (socket_client->queue_len > 100) {
+ socket_packet = list_first_entry(&socket_client->queue_list,
+ struct socket_packet, list);
+
+ list_del(&socket_packet->list);
+ kfree(socket_packet);
+ socket_client->queue_len--;
+ }
+
+ spin_unlock_irqrestore(&socket_client->lock, flags);
+
+ wake_up(&socket_client->queue_wait);
+}
+
+void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet,
+ size_t icmp_len)
+{
+ struct socket_client *hash = socket_client_hash[icmp_packet->uid];
+
+ if (hash)
+ bat_socket_add_packet(hash, icmp_packet, icmp_len);
+}
diff --git a/drivers/staging/batman-adv/device.h b/drivers/staging/batman-adv/icmp_socket.h
index eb14b371cea..bf9b348cde2 100644
--- a/drivers/staging/batman-adv/device.h
+++ b/drivers/staging/batman-adv/icmp_socket.h
@@ -19,18 +19,16 @@
*
*/
+#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
+#define _NET_BATMAN_ADV_ICMP_SOCKET_H_
+
#include "types.h"
-void bat_device_init(void);
-int bat_device_setup(void);
-void bat_device_destroy(void);
-int bat_device_open(struct inode *inode, struct file *file);
-int bat_device_release(struct inode *inode, struct file *file);
-ssize_t bat_device_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos);
-ssize_t bat_device_write(struct file *file, const char __user *buff,
- size_t len, loff_t *off);
-unsigned int bat_device_poll(struct file *file, poll_table *wait);
-void bat_device_add_packet(struct device_client *device_client,
- struct icmp_packet *icmp_packet);
-void bat_device_receive_packet(struct icmp_packet *icmp_packet);
+#define ICMP_SOCKET "socket"
+
+void bat_socket_init(void);
+int bat_socket_setup(struct bat_priv *bat_priv);
+void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet,
+ size_t icmp_len);
+
+#endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */
diff --git a/drivers/staging/batman-adv/main.c b/drivers/staging/batman-adv/main.c
index 74c70d589a9..ef7c20ae797 100644
--- a/drivers/staging/batman-adv/main.c
+++ b/drivers/staging/batman-adv/main.c
@@ -21,11 +21,12 @@
#include "main.h"
#include "bat_sysfs.h"
+#include "bat_debugfs.h"
#include "routing.h"
#include "send.h"
#include "originator.h"
#include "soft-interface.h"
-#include "device.h"
+#include "icmp_socket.h"
#include "translation-table.h"
#include "hard-interface.h"
#include "types.h"
@@ -41,7 +42,6 @@ DEFINE_SPINLOCK(orig_hash_lock);
DEFINE_SPINLOCK(forw_bat_list_lock);
DEFINE_SPINLOCK(forw_bcast_list_lock);
-atomic_t vis_interval;
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
@@ -49,7 +49,7 @@ int16_t num_hna;
struct net_device *soft_device;
-unsigned char broadcastAddr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
atomic_t module_state;
static struct packet_type batman_adv_packet_type __read_mostly = {
@@ -59,18 +59,7 @@ static struct packet_type batman_adv_packet_type __read_mostly = {
struct workqueue_struct *bat_event_workqueue;
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-int debug;
-
-module_param(debug, int, 0644);
-
-int bat_debug_type(int type)
-{
- return debug & type;
-}
-#endif
-
-int init_module(void)
+static int __init batman_init(void)
{
int retval;
@@ -80,8 +69,6 @@ int init_module(void)
atomic_set(&module_state, MODULE_INACTIVE);
- atomic_set(&vis_interval, 1000);/* TODO: raise this later, this is only
- * for debugging now. */
atomic_set(&bcast_queue_left, BCAST_QUEUE_LEN);
atomic_set(&batman_queue_left, BATMAN_QUEUE_LEN);
@@ -92,23 +79,22 @@ int init_module(void)
if (!bat_event_workqueue)
return -ENOMEM;
- bat_device_init();
+ bat_socket_init();
+ debugfs_init();
/* initialize layer 2 interface */
soft_device = alloc_netdev(sizeof(struct bat_priv) , "bat%d",
interface_setup);
if (!soft_device) {
- printk(KERN_ERR "batman-adv:"
- "Unable to allocate the batman interface\n");
+ pr_err("Unable to allocate the batman interface\n");
goto end;
}
retval = register_netdev(soft_device);
if (retval < 0) {
- printk(KERN_ERR "batman-adv:"
- "Unable to register the batman interface: %i\n", retval);
+ pr_err("Unable to register the batman interface: %i\n", retval);
goto free_soft_device;
}
@@ -117,15 +103,22 @@ int init_module(void)
if (retval < 0)
goto unreg_soft_device;
+ retval = debugfs_add_meshif(soft_device);
+
+ if (retval < 0)
+ goto unreg_sysfs;
+
register_netdevice_notifier(&hard_if_notifier);
dev_add_pack(&batman_adv_packet_type);
- printk(KERN_INFO "batman-adv:"
- "B.A.T.M.A.N. advanced %s%s (compatibility version %i) loaded\n",
- SOURCE_VERSION, REVISION_VERSION_STR, COMPAT_VERSION);
+ pr_info("B.A.T.M.A.N. advanced %s%s (compatibility version %i) "
+ "loaded\n", SOURCE_VERSION, REVISION_VERSION_STR,
+ COMPAT_VERSION);
return 0;
+unreg_sysfs:
+ sysfs_del_meshif(soft_device);
unreg_soft_device:
unregister_netdev(soft_device);
soft_device = NULL;
@@ -138,14 +131,16 @@ end:
return -ENOMEM;
}
-void cleanup_module(void)
+static void __exit batman_exit(void)
{
deactivate_module();
+ debugfs_destroy();
unregister_netdevice_notifier(&hard_if_notifier);
hardif_remove_interfaces();
if (soft_device) {
+ debugfs_del_meshif(soft_device);
sysfs_del_meshif(soft_device);
unregister_netdev(soft_device);
soft_device = NULL;
@@ -157,7 +152,7 @@ void cleanup_module(void)
bat_event_workqueue = NULL;
}
-/* activates the module, creates bat device, starts timer ... */
+/* activates the module, starts timer ... */
void activate_module(void)
{
if (originator_init() < 1)
@@ -171,9 +166,6 @@ void activate_module(void)
hna_local_add(soft_device->dev_addr);
- if (bat_device_setup() < 1)
- goto end;
-
if (vis_init() < 1)
goto err;
@@ -182,8 +174,7 @@ void activate_module(void)
goto end;
err:
- printk(KERN_ERR "batman-adv:"
- "Unable to allocate memory for mesh information structures: "
+ pr_err("Unable to allocate memory for mesh information structures: "
"out of mem ?\n");
deactivate_module();
end:
@@ -208,7 +199,6 @@ void deactivate_module(void)
hna_global_free();
synchronize_net();
- bat_device_destroy();
synchronize_rcu();
atomic_set(&module_state, MODULE_INACTIVE);
@@ -226,8 +216,7 @@ void dec_module_count(void)
int addr_to_string(char *buff, uint8_t *addr)
{
- return sprintf(buff, MAC_FMT,
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ return sprintf(buff, "%pM", addr);
}
/* returns 1 if they are the same originator */
@@ -261,10 +250,13 @@ int choose_orig(void *data, int32_t size)
int is_my_mac(uint8_t *addr)
{
struct batman_if *batman_if;
+
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
- if ((batman_if->net_dev) &&
- (compare_orig(batman_if->net_dev->dev_addr, addr))) {
+ if (batman_if->if_status != IF_ACTIVE)
+ continue;
+
+ if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
rcu_read_unlock();
return 1;
}
@@ -284,6 +276,9 @@ int is_mcast(uint8_t *addr)
return *addr & 0x01;
}
+module_init(batman_init);
+module_exit(batman_exit);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/staging/batman-adv/main.h b/drivers/staging/batman-adv/main.h
index 5f8343d360f..8513261b8a7 100644
--- a/drivers/staging/batman-adv/main.h
+++ b/drivers/staging/batman-adv/main.h
@@ -19,6 +19,9 @@
*
*/
+#ifndef _NET_BATMAN_ADV_MAIN_H_
+#define _NET_BATMAN_ADV_MAIN_H_
+
/* Kernel Programming */
#define LINUX
@@ -27,7 +30,7 @@
#define DRIVER_DESC "B.A.T.M.A.N. advanced"
#define DRIVER_DEVICE "batman-adv"
-#define SOURCE_VERSION "0.2.2-beta"
+#define SOURCE_VERSION "maint"
/* B.A.T.M.A.N. parameters */
@@ -36,10 +39,10 @@
#define JITTER 20
#define TTL 50 /* Time To Live of broadcast messages */
-#define PURGE_TIMEOUT 200000 /* purge originators after time in ms if no
+#define PURGE_TIMEOUT 200 /* purge originators after time in seconds if no
* valid packet comes in -> TODO: check
* influence on TQ_LOCAL_WINDOW_SIZE */
-#define LOCAL_HNA_TIMEOUT 3600000
+#define LOCAL_HNA_TIMEOUT 3600 /* in seconds */
#define TQ_LOCAL_WINDOW_SIZE 64 /* sliding packet range of received originator
* messages in squence numbers (should be a
@@ -57,44 +60,42 @@
#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
#define ETH_STR_LEN 20
+#define VIS_INTERVAL 5000 /* 5 seconds */
+
+/* how much worse secondary interfaces may be to
+ * to be considered as bonding candidates */
+
+#define BONDING_TQ_THRESHOLD 50
+
#define MAX_AGGREGATION_BYTES 512 /* should not be bigger than 512 bytes or
* change the size of
* forw_packet->direct_link_flags */
#define MAX_AGGREGATION_MS 100
#define RESET_PROTECTION_MS 30000
-#define EXPECTED_SEQNO_RANGE 4096
+#define EXPECTED_SEQNO_RANGE 65536
/* don't reset again within 30 seconds */
#define MODULE_INACTIVE 0
#define MODULE_ACTIVE 1
#define MODULE_DEACTIVATING 2
-#define BCAST_QUEUE_LEN 256
+#define BCAST_QUEUE_LEN 256
#define BATMAN_QUEUE_LEN 256
/*
* Debug Messages
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* Append 'batman-adv: ' before
+ * kernel messages */
#define DBG_BATMAN 1 /* all messages related to routing / flooding /
* broadcasting / etc */
#define DBG_ROUTES 2 /* route or hna added / changed / deleted */
+#define DBG_ALL 3
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-extern int debug;
+#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
-extern int bat_debug_type(int type);
-#define bat_dbg(type, fmt, arg...) do { \
- if (bat_debug_type(type)) \
- printk(KERN_DEBUG "batman-adv:" fmt, ## arg); \
- } \
- while (0)
-#else /* !CONFIG_BATMAN_ADV_DEBUG */
-#define bat_dbg(type, fmt, arg...) do { \
- } \
- while (0)
-#endif
/*
* Vis
@@ -117,6 +118,7 @@ extern int bat_debug_type(int type);
#include <linux/slab.h>
#include <net/sock.h> /* struct sock */
#include <linux/jiffies.h>
+#include <linux/seq_file.h>
#include "types.h"
#ifndef REVISION_VERSION
@@ -134,14 +136,13 @@ extern spinlock_t orig_hash_lock;
extern spinlock_t forw_bat_list_lock;
extern spinlock_t forw_bcast_list_lock;
-extern atomic_t vis_interval;
extern atomic_t bcast_queue_left;
extern atomic_t batman_queue_left;
extern int16_t num_hna;
extern struct net_device *soft_device;
-extern unsigned char broadcastAddr[];
+extern unsigned char broadcast_addr[];
extern atomic_t module_state;
extern struct workqueue_struct *bat_event_workqueue;
@@ -155,3 +156,44 @@ int choose_orig(void *data, int32_t size);
int is_my_mac(uint8_t *addr);
int is_bcast(uint8_t *addr);
int is_mcast(uint8_t *addr);
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+extern int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
+
+#define bat_dbg(type, bat_priv, fmt, arg...) \
+ do { \
+ if (atomic_read(&bat_priv->log_level) & type) \
+ debug_log(bat_priv, fmt, ## arg); \
+ } \
+ while (0)
+#else /* !CONFIG_BATMAN_ADV_DEBUG */
+static inline void bat_dbg(char type __attribute__((unused)),
+ struct bat_priv *bat_priv __attribute__((unused)),
+ char *fmt __attribute__((unused)), ...)
+{
+}
+#endif
+
+#define bat_warning(net_dev, fmt, arg...) \
+ do { \
+ struct net_device *_netdev = (net_dev); \
+ struct bat_priv *_batpriv = netdev_priv(_netdev); \
+ bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \
+ pr_warning("%s: " fmt, _netdev->name, ## arg); \
+ } while (0)
+#define bat_info(net_dev, fmt, arg...) \
+ do { \
+ struct net_device *_netdev = (net_dev); \
+ struct bat_priv *_batpriv = netdev_priv(_netdev); \
+ bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \
+ pr_info("%s: " fmt, _netdev->name, ## arg); \
+ } while (0)
+#define bat_err(net_dev, fmt, arg...) \
+ do { \
+ struct net_device *_netdev = (net_dev); \
+ struct bat_priv *_batpriv = netdev_priv(_netdev); \
+ bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \
+ pr_err("%s: " fmt, _netdev->name, ## arg); \
+ } while (0)
+
+#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c
index 568aef8371b..de5a8c1a810 100644
--- a/drivers/staging/batman-adv/originator.c
+++ b/drivers/staging/batman-adv/originator.c
@@ -56,28 +56,16 @@ err:
return 0;
}
-void originator_free(void)
-{
- unsigned long flags;
-
- if (!orig_hash)
- return;
-
- cancel_delayed_work_sync(&purge_orig_wq);
-
- spin_lock_irqsave(&orig_hash_lock, flags);
- hash_delete(orig_hash, free_orig_node);
- orig_hash = NULL;
- spin_unlock_irqrestore(&orig_hash_lock, flags);
-}
-
struct neigh_node *
create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
uint8_t *neigh, struct batman_if *if_incoming)
{
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct neigh_node *neigh_node;
- bat_dbg(DBG_BATMAN, "Creating new last-hop neighbor of originator\n");
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Creating new last-hop neighbor of originator\n");
neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC);
if (!neigh_node)
@@ -93,7 +81,7 @@ create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
return neigh_node;
}
-void free_orig_node(void *data)
+static void free_orig_node(void *data)
{
struct list_head *list_pos, *list_pos_tmp;
struct neigh_node *neigh_node;
@@ -114,6 +102,21 @@ void free_orig_node(void *data)
kfree(orig_node);
}
+void originator_free(void)
+{
+ unsigned long flags;
+
+ if (!orig_hash)
+ return;
+
+ cancel_delayed_work_sync(&purge_orig_wq);
+
+ spin_lock_irqsave(&orig_hash_lock, flags);
+ hash_delete(orig_hash, free_orig_node);
+ orig_hash = NULL;
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+}
+
/* this function finds or creates an originator entry for the given
* address if it does not exits */
struct orig_node *get_orig_node(uint8_t *addr)
@@ -129,7 +132,8 @@ struct orig_node *get_orig_node(uint8_t *addr)
if (orig_node != NULL)
return orig_node;
- bat_dbg(DBG_BATMAN, "Creating new originator: %pM\n", addr);
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Creating new originator: %pM\n", addr);
orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC);
if (!orig_node)
@@ -163,8 +167,8 @@ struct orig_node *get_orig_node(uint8_t *addr)
swaphash = hash_resize(orig_hash, orig_hash->size * 2);
if (swaphash == NULL)
- printk(KERN_ERR
- "batman-adv:Couldn't resize orig hash table\n");
+ bat_err(soft_device,
+ "Couldn't resize orig hash table\n");
else
orig_hash = swaphash;
}
@@ -182,6 +186,8 @@ free_orig_node:
static bool purge_orig_neighbors(struct orig_node *orig_node,
struct neigh_node **best_neigh_node)
{
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct list_head *list_pos, *list_pos_tmp;
struct neigh_node *neigh_node;
bool neigh_purged = false;
@@ -193,20 +199,19 @@ static bool purge_orig_neighbors(struct orig_node *orig_node,
neigh_node = list_entry(list_pos, struct neigh_node, list);
if ((time_after(jiffies,
- (neigh_node->last_valid +
- ((PURGE_TIMEOUT * HZ) / 1000)))) ||
+ neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
(neigh_node->if_incoming->if_status ==
IF_TO_BE_REMOVED)) {
if (neigh_node->if_incoming->if_status ==
IF_TO_BE_REMOVED)
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"neighbor purge: originator %pM, "
"neighbor: %pM, iface: %s\n",
orig_node->orig, neigh_node->addr,
neigh_node->if_incoming->dev);
else
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"neighbor timeout: originator %pM, "
"neighbor: %pM, last_valid: %lu\n",
orig_node->orig, neigh_node->addr,
@@ -226,21 +231,26 @@ static bool purge_orig_neighbors(struct orig_node *orig_node,
static bool purge_orig_node(struct orig_node *orig_node)
{
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct neigh_node *best_neigh_node;
if (time_after(jiffies,
- (orig_node->last_valid +
- ((2 * PURGE_TIMEOUT * HZ) / 1000)))) {
+ orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"Originator timeout: originator %pM, last_valid %lu\n",
orig_node->orig, (orig_node->last_valid / HZ));
return true;
} else {
- if (purge_orig_neighbors(orig_node, &best_neigh_node))
+ if (purge_orig_neighbors(orig_node, &best_neigh_node)) {
update_routes(orig_node, best_neigh_node,
orig_node->hna_buff,
orig_node->hna_buff_len);
+ /* update bonding candidates, we could have lost
+ * some candidates. */
+ update_bonding_candidates(bat_priv, orig_node);
+ }
}
return false;
@@ -271,49 +281,41 @@ void purge_orig(struct work_struct *work)
start_purge_timer();
}
-ssize_t orig_fill_buffer_text(struct net_device *net_dev, char *buff,
- size_t count, loff_t off)
+int orig_seq_print_text(struct seq_file *seq, void *offset)
{
HASHIT(hashit);
+ struct net_device *net_dev = (struct net_device *)seq->private;
struct bat_priv *bat_priv = netdev_priv(net_dev);
struct orig_node *orig_node;
struct neigh_node *neigh_node;
- size_t hdr_len, tmp_len;
- int batman_count = 0, bytes_written = 0;
+ int batman_count = 0;
+ int last_seen_secs;
+ int last_seen_msecs;
unsigned long flags;
char orig_str[ETH_STR_LEN], router_str[ETH_STR_LEN];
- if (!bat_priv->primary_if) {
- if (off == 0)
- return sprintf(buff,
- "BATMAN mesh %s disabled - "
+ if ((!bat_priv->primary_if) ||
+ (bat_priv->primary_if->if_status != IF_ACTIVE)) {
+ if (!bat_priv->primary_if)
+ return seq_printf(seq, "BATMAN mesh %s disabled - "
"please specify interfaces to enable it\n",
net_dev->name);
- return 0;
+ return seq_printf(seq, "BATMAN mesh %s "
+ "disabled - primary interface not active\n",
+ net_dev->name);
}
- if (bat_priv->primary_if->if_status != IF_ACTIVE && off == 0)
- return sprintf(buff,
- "BATMAN mesh %s "
- "disabled - primary interface not active\n",
- net_dev->name);
- else if (bat_priv->primary_if->if_status != IF_ACTIVE)
- return 0;
-
rcu_read_lock();
- hdr_len = sprintf(buff,
- " %-14s (%s/%i) %17s [%10s]: %20s "
- "... [B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s (%s)]\n",
- "Originator", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF",
- "Potential nexthops", SOURCE_VERSION, REVISION_VERSION_STR,
+ seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s (%s)]\n",
+ SOURCE_VERSION, REVISION_VERSION_STR,
bat_priv->primary_if->dev, bat_priv->primary_if->addr_str,
net_dev->name);
+ seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
+ "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
+ "outgoingIF", "Potential nexthops");
rcu_read_unlock();
- if (off < hdr_len)
- bytes_written = hdr_len;
-
spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
@@ -326,44 +328,34 @@ ssize_t orig_fill_buffer_text(struct net_device *net_dev, char *buff,
if (orig_node->router->tq_avg == 0)
continue;
- /* estimated line length */
- if (count < bytes_written + 200)
- break;
-
addr_to_string(orig_str, orig_node->orig);
addr_to_string(router_str, orig_node->router->addr);
+ last_seen_secs = jiffies_to_msecs(jiffies -
+ orig_node->last_valid) / 1000;
+ last_seen_msecs = jiffies_to_msecs(jiffies -
+ orig_node->last_valid) % 1000;
- tmp_len = sprintf(buff + bytes_written,
- "%-17s (%3i) %17s [%10s]:",
- orig_str, orig_node->router->tq_avg,
- router_str,
- orig_node->router->if_incoming->dev);
+ seq_printf(seq, "%-17s %4i.%03is (%3i) %17s [%10s]:",
+ orig_str, last_seen_secs, last_seen_msecs,
+ orig_node->router->tq_avg, router_str,
+ orig_node->router->if_incoming->dev);
list_for_each_entry(neigh_node, &orig_node->neigh_list, list) {
addr_to_string(orig_str, neigh_node->addr);
- tmp_len += sprintf(buff + bytes_written + tmp_len,
- " %17s (%3i)", orig_str,
+ seq_printf(seq, " %17s (%3i)", orig_str,
neigh_node->tq_avg);
}
- tmp_len += sprintf(buff + bytes_written + tmp_len, "\n");
-
+ seq_printf(seq, "\n");
batman_count++;
- hdr_len += tmp_len;
-
- if (off >= hdr_len)
- continue;
-
- bytes_written += tmp_len;
}
spin_unlock_irqrestore(&orig_hash_lock, flags);
- if ((batman_count == 0) && (off == 0))
- bytes_written += sprintf(buff + bytes_written,
- "No batman nodes in range ...\n");
+ if ((batman_count == 0))
+ seq_printf(seq, "No batman nodes in range ...\n");
- return bytes_written;
+ return 0;
}
static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
@@ -373,8 +365,7 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
data_ptr = kmalloc(max_if_num * sizeof(TYPE_OF_WORD) * NUM_WORDS,
GFP_ATOMIC);
if (!data_ptr) {
- printk(KERN_ERR
- "batman-adv:Can't resize orig: out of memory\n");
+ pr_err("Can't resize orig: out of memory\n");
return -1;
}
@@ -385,8 +376,7 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
if (!data_ptr) {
- printk(KERN_ERR
- "batman-adv:Can't resize orig: out of memory\n");
+ pr_err("Can't resize orig: out of memory\n");
return -1;
}
@@ -401,11 +391,12 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
{
struct orig_node *orig_node;
+ unsigned long flags;
HASHIT(hashit);
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
@@ -414,11 +405,11 @@ int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
goto err;
}
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return 0;
err:
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return -ENOMEM;
}
@@ -435,8 +426,7 @@ static int orig_node_del_if(struct orig_node *orig_node,
chunk_size = sizeof(TYPE_OF_WORD) * NUM_WORDS;
data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
if (!data_ptr) {
- printk(KERN_ERR
- "batman-adv:Can't resize orig: out of memory\n");
+ pr_err("Can't resize orig: out of memory\n");
return -1;
}
@@ -457,8 +447,7 @@ free_bcast_own:
data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
if (!data_ptr) {
- printk(KERN_ERR
- "batman-adv:Can't resize orig: out of memory\n");
+ pr_err("Can't resize orig: out of memory\n");
return -1;
}
@@ -480,12 +469,13 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
{
struct batman_if *batman_if_tmp;
struct orig_node *orig_node;
+ unsigned long flags;
HASHIT(hashit);
int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
@@ -512,10 +502,10 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
rcu_read_unlock();
batman_if->if_num = -1;
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return 0;
err:
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return -ENOMEM;
}
diff --git a/drivers/staging/batman-adv/originator.h b/drivers/staging/batman-adv/originator.h
index afbc7c0e8aa..e88411d9db7 100644
--- a/drivers/staging/batman-adv/originator.h
+++ b/drivers/staging/batman-adv/originator.h
@@ -19,16 +19,18 @@
*
*/
+#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
+#define _NET_BATMAN_ADV_ORIGINATOR_H_
+
int originator_init(void);
-void free_orig_node(void *data);
void originator_free(void);
void purge_orig(struct work_struct *work);
-struct orig_node *orig_find(char *mac);
struct orig_node *get_orig_node(uint8_t *addr);
struct neigh_node *
create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
uint8_t *neigh, struct batman_if *if_incoming);
-ssize_t orig_fill_buffer_text(struct net_device *net_dev, char *buff,
- size_t count, loff_t off);
+int orig_seq_print_text(struct seq_file *seq, void *offset);
int orig_hash_add_if(struct batman_if *batman_if, int max_if_num);
int orig_hash_del_if(struct batman_if *batman_if, int max_if_num);
+
+#endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */
diff --git a/drivers/staging/batman-adv/packet.h b/drivers/staging/batman-adv/packet.h
index 152f57b1c6c..abb5e460f23 100644
--- a/drivers/staging/batman-adv/packet.h
+++ b/drivers/staging/batman-adv/packet.h
@@ -19,6 +19,9 @@
*
*/
+#ifndef _NET_BATMAN_ADV_PACKET_H_
+#define _NET_BATMAN_ADV_PACKET_H_
+
#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
#define BAT_PACKET 0x01
@@ -28,9 +31,10 @@
#define BAT_VIS 0x05
/* this file is included by batctl which needs these defines */
-#define COMPAT_VERSION 8
+#define COMPAT_VERSION 11
#define DIRECTLINK 0x40
#define VIS_SERVER 0x20
+#define PRIMARIES_FIRST_HOP 0x10
/* ICMP message types */
#define ECHO_REPLY 0
@@ -48,7 +52,7 @@ struct batman_packet {
uint8_t version; /* batman version field */
uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
uint8_t tq;
- uint16_t seqno;
+ uint32_t seqno;
uint8_t orig[6];
uint8_t prev_sender[6];
uint8_t ttl;
@@ -68,6 +72,23 @@ struct icmp_packet {
uint8_t uid;
} __attribute__((packed));
+#define BAT_RR_LEN 16
+
+/* icmp_packet_rr must start with all fields from imcp_packet
+ as this is assumed by code that handles ICMP packets */
+struct icmp_packet_rr {
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t msg_type; /* see ICMP message types above */
+ uint8_t ttl;
+ uint8_t dst[6];
+ uint8_t orig[6];
+ uint16_t seqno;
+ uint8_t uid;
+ uint8_t rr_cur;
+ uint8_t rr[BAT_RR_LEN][ETH_ALEN];
+} __attribute__((packed));
+
struct unicast_packet {
uint8_t packet_type;
uint8_t version; /* batman version field */
@@ -79,18 +100,21 @@ struct bcast_packet {
uint8_t packet_type;
uint8_t version; /* batman version field */
uint8_t orig[6];
- uint16_t seqno;
+ uint8_t ttl;
+ uint32_t seqno;
} __attribute__((packed));
struct vis_packet {
uint8_t packet_type;
uint8_t version; /* batman version field */
uint8_t vis_type; /* which type of vis-participant sent this? */
- uint8_t seqno; /* sequence number */
uint8_t entries; /* number of entries behind this struct */
+ uint32_t seqno; /* sequence number */
uint8_t ttl; /* TTL */
uint8_t vis_orig[6]; /* originator that informs about its
* neighbors */
uint8_t target_orig[6]; /* who should receive this packet */
uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */
} __attribute__((packed));
+
+#endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/drivers/staging/batman-adv/ring_buffer.h b/drivers/staging/batman-adv/ring_buffer.h
index b8c9456558b..6b0cb9aaeba 100644
--- a/drivers/staging/batman-adv/ring_buffer.h
+++ b/drivers/staging/batman-adv/ring_buffer.h
@@ -19,5 +19,10 @@
*
*/
+#ifndef _NET_BATMAN_ADV_RING_BUFFER_H_
+#define _NET_BATMAN_ADV_RING_BUFFER_H_
+
void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value);
uint8_t ring_buffer_avg(uint8_t lq_recv[]);
+
+#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index 066dc8b3881..032195e6de9 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -25,7 +25,7 @@
#include "hash.h"
#include "soft-interface.h"
#include "hard-interface.h"
-#include "device.h"
+#include "icmp_socket.h"
#include "translation-table.h"
#include "originator.h"
#include "types.h"
@@ -33,7 +33,7 @@
#include "vis.h"
#include "aggregation.h"
-DECLARE_WAIT_QUEUE_HEAD(thread_wait);
+static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
void slide_own_bcast_window(struct batman_if *batman_if)
{
@@ -77,24 +77,27 @@ static void update_route(struct orig_node *orig_node,
struct neigh_node *neigh_node,
unsigned char *hna_buff, int hna_buff_len)
{
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
+
/* route deleted */
if ((orig_node->router != NULL) && (neigh_node == NULL)) {
- bat_dbg(DBG_ROUTES, "Deleting route towards: %pM\n",
+ bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
orig_node->orig);
hna_global_del_orig(orig_node, "originator timed out");
/* route added */
} else if ((orig_node->router == NULL) && (neigh_node != NULL)) {
- bat_dbg(DBG_ROUTES,
+ bat_dbg(DBG_ROUTES, bat_priv,
"Adding route towards: %pM (via %pM)\n",
orig_node->orig, neigh_node->addr);
hna_global_add_orig(orig_node, hna_buff, hna_buff_len);
/* route changed */
} else {
- bat_dbg(DBG_ROUTES,
+ bat_dbg(DBG_ROUTES, bat_priv,
"Changing route towards: %pM "
"(now via %pM - was via %pM)\n",
orig_node->orig, neigh_node->addr,
@@ -120,11 +123,13 @@ void update_routes(struct orig_node *orig_node,
update_HNA(orig_node, hna_buff, hna_buff_len);
}
-static int isBidirectionalNeigh(struct orig_node *orig_node,
+static int is_bidirectional_neigh(struct orig_node *orig_node,
struct orig_node *orig_neigh_node,
struct batman_packet *batman_packet,
struct batman_if *if_incoming)
{
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
unsigned char total_count;
@@ -211,7 +216,7 @@ static int isBidirectionalNeigh(struct orig_node *orig_node,
orig_neigh_node->tq_asym_penalty) /
(TQ_MAX_VALUE * TQ_MAX_VALUE));
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"bidirectional: "
"orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
"real recv = %2i, local tq: %3i, asym_penalty: %3i, "
@@ -234,10 +239,12 @@ static void update_orig(struct orig_node *orig_node, struct ethhdr *ethhdr,
unsigned char *hna_buff, int hna_buff_len,
char is_duplicate)
{
+ /* FIXME: get bat_priv */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
int tmp_hna_buff_len;
- bat_dbg(DBG_BATMAN, "update_originator(): "
+ bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
"Searching and updating originator entry of received packet\n");
list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
@@ -269,7 +276,7 @@ static void update_orig(struct orig_node *orig_node, struct ethhdr *ethhdr,
if (!neigh_node)
return;
} else
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"Updating existing last-hop neighbor of originator\n");
orig_node->flags = batman_packet->flags;
@@ -318,16 +325,19 @@ update_hna:
* 0 if the packet is to be accepted
* 1 if the packet is to be ignored.
*/
-static int window_protected(int16_t seq_num_diff,
+static int window_protected(int32_t seq_num_diff,
unsigned long *last_reset)
{
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
+
if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
|| (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
if (time_after(jiffies, *last_reset +
msecs_to_jiffies(RESET_PROTECTION_MS))) {
*last_reset = jiffies;
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"old packet received, start protection\n");
return 0;
@@ -349,10 +359,12 @@ static char count_real_packets(struct ethhdr *ethhdr,
struct batman_packet *batman_packet,
struct batman_if *if_incoming)
{
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct orig_node *orig_node;
struct neigh_node *tmp_neigh_node;
char is_duplicate = 0;
- int16_t seq_diff;
+ int32_t seq_diff;
int need_update = 0;
int set_mark;
@@ -387,7 +399,8 @@ static char count_real_packets(struct ethhdr *ethhdr,
}
if (need_update) {
- bat_dbg(DBG_BATMAN, "updating last_seqno: old %d, new %d\n",
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "updating last_seqno: old %d, new %d\n",
orig_node->last_real_seqno, batman_packet->seqno);
orig_node->last_real_seqno = batman_packet->seqno;
}
@@ -395,18 +408,127 @@ static char count_real_packets(struct ethhdr *ethhdr,
return is_duplicate;
}
+/* copy primary address for bonding */
+static void mark_bonding_address(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
+ struct orig_node *orig_neigh_node,
+ struct batman_packet *batman_packet)
+
+{
+ if (batman_packet->flags & PRIMARIES_FIRST_HOP)
+ memcpy(orig_neigh_node->primary_addr,
+ orig_node->orig, ETH_ALEN);
+
+ return;
+}
+
+/* mark possible bond.candidates in the neighbor list */
+void update_bonding_candidates(struct bat_priv *bat_priv,
+ struct orig_node *orig_node)
+{
+ int candidates;
+ int interference_candidate;
+ int best_tq;
+ struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
+ struct neigh_node *first_candidate, *last_candidate;
+
+ /* update the candidates for this originator */
+ if (!orig_node->router) {
+ orig_node->bond.candidates = 0;
+ return;
+ }
+
+ best_tq = orig_node->router->tq_avg;
+
+ /* update bond.candidates */
+
+ candidates = 0;
+
+ /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
+ * as "bonding partner" */
+
+ /* first, zero the list */
+ list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
+ tmp_neigh_node->next_bond_candidate = NULL;
+ }
+
+ first_candidate = NULL;
+ last_candidate = NULL;
+ list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
+
+ /* only consider if it has the same primary address ... */
+ if (memcmp(orig_node->orig,
+ tmp_neigh_node->orig_node->primary_addr,
+ ETH_ALEN) != 0)
+ continue;
+
+ /* ... and is good enough to be considered */
+ if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
+ continue;
+
+ /* check if we have another candidate with the same
+ * mac address or interface. If we do, we won't
+ * select this candidate because of possible interference. */
+
+ interference_candidate = 0;
+ list_for_each_entry(tmp_neigh_node2,
+ &orig_node->neigh_list, list) {
+
+ if (tmp_neigh_node2 == tmp_neigh_node)
+ continue;
+
+ /* we only care if the other candidate is even
+ * considered as candidate. */
+ if (tmp_neigh_node2->next_bond_candidate == NULL)
+ continue;
+
+
+ if ((tmp_neigh_node->if_incoming ==
+ tmp_neigh_node2->if_incoming)
+ || (memcmp(tmp_neigh_node->addr,
+ tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
+
+ interference_candidate = 1;
+ break;
+ }
+ }
+ /* don't care further if it is an interference candidate */
+ if (interference_candidate)
+ continue;
+
+ if (first_candidate == NULL) {
+ first_candidate = tmp_neigh_node;
+ tmp_neigh_node->next_bond_candidate = first_candidate;
+ } else
+ tmp_neigh_node->next_bond_candidate = last_candidate;
+
+ last_candidate = tmp_neigh_node;
+
+ candidates++;
+ }
+
+ if (candidates > 0) {
+ first_candidate->next_bond_candidate = last_candidate;
+ orig_node->bond.selected = first_candidate;
+ }
+
+ orig_node->bond.candidates = candidates;
+}
+
void receive_bat_packet(struct ethhdr *ethhdr,
struct batman_packet *batman_packet,
unsigned char *hna_buff, int hna_buff_len,
struct batman_if *if_incoming)
{
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct batman_if *batman_if;
struct orig_node *orig_neigh_node, *orig_node;
char has_directlink_flag;
char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
char is_duplicate;
- unsigned short if_incoming_seqno;
+ uint32_t if_incoming_seqno;
/* Silently drop when the batman packet is actually not a
* correct packet.
@@ -431,7 +553,8 @@ void receive_bat_packet(struct ethhdr *ethhdr,
is_single_hop_neigh = (compare_orig(ethhdr->h_source,
batman_packet->orig) ? 1 : 0);
- bat_dbg(DBG_BATMAN, "Received BATMAN packet via NB: %pM, IF: %s [%s] "
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Received BATMAN packet via NB: %pM, IF: %s [%s] "
"(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
"TTL %d, V %d, IDF %d)\n",
ethhdr->h_source, if_incoming->dev, if_incoming->addr_str,
@@ -455,19 +578,19 @@ void receive_bat_packet(struct ethhdr *ethhdr,
batman_if->net_dev->dev_addr))
is_my_oldorig = 1;
- if (compare_orig(ethhdr->h_source, broadcastAddr))
+ if (compare_orig(ethhdr->h_source, broadcast_addr))
is_broadcast = 1;
}
if (batman_packet->version != COMPAT_VERSION) {
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: incompatible batman version (%i)\n",
batman_packet->version);
return;
}
if (is_my_addr) {
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: received my own broadcast (sender: %pM"
")\n",
ethhdr->h_source);
@@ -475,7 +598,7 @@ void receive_bat_packet(struct ethhdr *ethhdr,
}
if (is_broadcast) {
- bat_dbg(DBG_BATMAN, "Drop packet: "
+ bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
"ignoring all packets with broadcast source addr (sender: %pM"
")\n", ethhdr->h_source);
return;
@@ -505,13 +628,13 @@ void receive_bat_packet(struct ethhdr *ethhdr,
bit_packet_count(word);
}
- bat_dbg(DBG_BATMAN, "Drop packet: "
+ bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
"originator packet from myself (via neighbor)\n");
return;
}
if (is_my_oldorig) {
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: ignoring all rebroadcast echos (sender: "
"%pM)\n", ethhdr->h_source);
return;
@@ -524,14 +647,14 @@ void receive_bat_packet(struct ethhdr *ethhdr,
is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
if (is_duplicate == -1) {
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: packet within seqno protection time "
"(sender: %pM)\n", ethhdr->h_source);
return;
}
if (batman_packet->tq == 0) {
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: originator packet with tq equal 0\n");
return;
}
@@ -544,7 +667,7 @@ void receive_bat_packet(struct ethhdr *ethhdr,
!(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
(compare_orig(orig_node->router->addr,
orig_node->router->orig_node->router->addr))) {
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: ignoring all rebroadcast packets that "
"may make me loop (sender: %pM)\n", ethhdr->h_source);
return;
@@ -561,11 +684,12 @@ void receive_bat_packet(struct ethhdr *ethhdr,
* don't route towards it */
if (!is_single_hop_neigh &&
(orig_neigh_node->router == NULL)) {
- bat_dbg(DBG_BATMAN, "Drop packet: OGM via unknown neighbor!\n");
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: OGM via unknown neighbor!\n");
return;
}
- is_bidirectional = isBidirectionalNeigh(orig_node, orig_neigh_node,
+ is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
batman_packet, if_incoming);
/* update ranking if it is not a duplicate or has the same
@@ -577,6 +701,10 @@ void receive_bat_packet(struct ethhdr *ethhdr,
update_orig(orig_node, ethhdr, batman_packet,
if_incoming, hna_buff, hna_buff_len, is_duplicate);
+ mark_bonding_address(bat_priv, orig_node,
+ orig_neigh_node, batman_packet);
+ update_bonding_candidates(bat_priv, orig_node);
+
/* is single hop (direct) neighbor */
if (is_single_hop_neigh) {
@@ -584,24 +712,25 @@ void receive_bat_packet(struct ethhdr *ethhdr,
schedule_forward_packet(orig_node, ethhdr, batman_packet,
1, hna_buff_len, if_incoming);
- bat_dbg(DBG_BATMAN, "Forwarding packet: "
+ bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
"rebroadcast neighbor packet with direct link flag\n");
return;
}
/* multihop originator */
if (!is_bidirectional) {
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: not received via bidirectional link\n");
return;
}
if (is_duplicate) {
- bat_dbg(DBG_BATMAN, "Drop packet: duplicate packet received\n");
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: duplicate packet received\n");
return;
}
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"Forwarding packet: rebroadcast originator packet\n");
schedule_forward_packet(orig_node, ethhdr, batman_packet,
0, hna_buff_len, if_incoming);
@@ -652,10 +781,12 @@ int recv_bat_packet(struct sk_buff *skb,
return NET_RX_SUCCESS;
}
-static int recv_my_icmp_packet(struct sk_buff *skb)
+static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
{
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct orig_node *orig_node;
- struct icmp_packet *icmp_packet;
+ struct icmp_packet_rr *icmp_packet;
struct ethhdr *ethhdr;
struct sk_buff *skb_old;
struct batman_if *batman_if;
@@ -663,15 +794,18 @@ static int recv_my_icmp_packet(struct sk_buff *skb)
unsigned long flags;
uint8_t dstaddr[ETH_ALEN];
- icmp_packet = (struct icmp_packet *)skb->data;
+ icmp_packet = (struct icmp_packet_rr *)skb->data;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* add data to device queue */
if (icmp_packet->msg_type != ECHO_REQUEST) {
- bat_device_receive_packet(icmp_packet);
+ bat_socket_receive_packet(icmp_packet, icmp_len);
return NET_RX_DROP;
}
+ if (!bat_priv->primary_if)
+ return NET_RX_DROP;
+
/* answer echo request (ping) */
/* get routing information */
spin_lock_irqsave(&orig_hash_lock, flags);
@@ -690,19 +824,19 @@ static int recv_my_icmp_packet(struct sk_buff *skb)
/* create a copy of the skb, if needed, to modify it. */
skb_old = NULL;
- if (!skb_clone_writable(skb, sizeof(struct icmp_packet))) {
+ if (!skb_clone_writable(skb, icmp_len)) {
skb_old = skb;
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb)
return NET_RX_DROP;
-
- icmp_packet = (struct icmp_packet *)skb->data;
+ icmp_packet = (struct icmp_packet_rr *)skb->data;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
kfree_skb(skb_old);
}
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
- memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
+ memcpy(icmp_packet->orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
icmp_packet->msg_type = ECHO_REPLY;
icmp_packet->ttl = TTL;
@@ -715,8 +849,10 @@ static int recv_my_icmp_packet(struct sk_buff *skb)
return ret;
}
-static int recv_icmp_ttl_exceeded(struct sk_buff *skb)
+static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
{
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct orig_node *orig_node;
struct icmp_packet *icmp_packet;
struct ethhdr *ethhdr;
@@ -731,13 +867,15 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb)
/* send TTL exceeded if packet is an echo request (traceroute) */
if (icmp_packet->msg_type != ECHO_REQUEST) {
- printk(KERN_WARNING "batman-adv:"
- "Warning - can't forward icmp packet from %pM to %pM: "
- "ttl exceeded\n",
- icmp_packet->orig, icmp_packet->dst);
+ pr_warning("Warning - can't forward icmp packet from %pM to "
+ "%pM: ttl exceeded\n", icmp_packet->orig,
+ icmp_packet->dst);
return NET_RX_DROP;
}
+ if (!bat_priv->primary_if)
+ return NET_RX_DROP;
+
/* get routing information */
spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)
@@ -754,7 +892,7 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb)
spin_unlock_irqrestore(&orig_hash_lock, flags);
/* create a copy of the skb, if needed, to modify it. */
- if (!skb_clone_writable(skb, sizeof(struct icmp_packet))) {
+ if (!skb_clone_writable(skb, icmp_len)) {
skb_old = skb;
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb)
@@ -765,7 +903,8 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb)
}
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
- memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
+ memcpy(icmp_packet->orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
icmp_packet->msg_type = TTL_EXCEEDED;
icmp_packet->ttl = TTL;
@@ -781,7 +920,7 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb)
int recv_icmp_packet(struct sk_buff *skb)
{
- struct icmp_packet *icmp_packet;
+ struct icmp_packet_rr *icmp_packet;
struct ethhdr *ethhdr;
struct orig_node *orig_node;
struct sk_buff *skb_old;
@@ -791,6 +930,12 @@ int recv_icmp_packet(struct sk_buff *skb)
unsigned long flags;
uint8_t dstaddr[ETH_ALEN];
+ /**
+ * we truncate all incoming icmp packets if they don't match our size
+ */
+ if (skb_headlen(skb) >= sizeof(struct icmp_packet_rr))
+ hdr_size = sizeof(struct icmp_packet_rr);
+
/* drop packet if it has not necessary minimum size */
if (skb_headlen(skb) < hdr_size)
return NET_RX_DROP;
@@ -809,15 +954,23 @@ int recv_icmp_packet(struct sk_buff *skb)
if (!is_my_mac(ethhdr->h_dest))
return NET_RX_DROP;
- icmp_packet = (struct icmp_packet *)skb->data;
+ icmp_packet = (struct icmp_packet_rr *)skb->data;
+
+ /* add record route information if not full */
+ if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
+ (icmp_packet->rr_cur < BAT_RR_LEN)) {
+ memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
+ ethhdr->h_dest, ETH_ALEN);
+ icmp_packet->rr_cur++;
+ }
/* packet for me */
if (is_my_mac(icmp_packet->dst))
- return recv_my_icmp_packet(skb);
+ return recv_my_icmp_packet(skb, hdr_size);
/* TTL exceeded */
if (icmp_packet->ttl < 2)
- return recv_icmp_ttl_exceeded(skb);
+ return recv_icmp_ttl_exceeded(skb, hdr_size);
ret = NET_RX_DROP;
@@ -836,12 +989,12 @@ int recv_icmp_packet(struct sk_buff *skb)
spin_unlock_irqrestore(&orig_hash_lock, flags);
/* create a copy of the skb, if needed, to modify it. */
- if (!skb_clone_writable(skb, sizeof(struct icmp_packet))) {
+ if (!skb_clone_writable(skb, hdr_size)) {
skb_old = skb;
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb)
return NET_RX_DROP;
- icmp_packet = (struct icmp_packet *)skb->data;
+ icmp_packet = (struct icmp_packet_rr *)skb->data;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
kfree_skb(skb_old);
}
@@ -859,16 +1012,109 @@ int recv_icmp_packet(struct sk_buff *skb)
return ret;
}
-int recv_unicast_packet(struct sk_buff *skb)
+/* find a suitable router for this originator, and use
+ * bonding if possible. */
+struct neigh_node *find_router(struct orig_node *orig_node,
+ struct batman_if *recv_if)
+{
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct orig_node *primary_orig_node;
+ struct orig_node *router_orig;
+ struct neigh_node *router, *first_candidate, *best_router;
+ static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+ int bonding_enabled;
+
+ if (!orig_node)
+ return NULL;
+
+ if (!orig_node->router)
+ return NULL;
+
+ /* without bonding, the first node should
+ * always choose the default router. */
+
+ bonding_enabled = atomic_read(&bat_priv->bonding_enabled);
+ if (!bonding_enabled && (recv_if == NULL))
+ return orig_node->router;
+
+ router_orig = orig_node->router->orig_node;
+
+ /* if we have something in the primary_addr, we can search
+ * for a potential bonding candidate. */
+ if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
+ return orig_node->router;
+
+ /* find the orig_node which has the primary interface. might
+ * even be the same as our router_orig in many cases */
+
+ if (memcmp(router_orig->primary_addr,
+ router_orig->orig, ETH_ALEN) == 0) {
+ primary_orig_node = router_orig;
+ } else {
+ primary_orig_node = hash_find(orig_hash,
+ router_orig->primary_addr);
+ if (!primary_orig_node)
+ return orig_node->router;
+ }
+
+ /* with less than 2 candidates, we can't do any
+ * bonding and prefer the original router. */
+
+ if (primary_orig_node->bond.candidates < 2)
+ return orig_node->router;
+
+
+ /* all nodes between should choose a candidate which
+ * is is not on the interface where the packet came
+ * in. */
+ first_candidate = primary_orig_node->bond.selected;
+ router = first_candidate;
+
+ if (bonding_enabled) {
+ /* in the bonding case, send the packets in a round
+ * robin fashion over the remaining interfaces. */
+ do {
+ /* recv_if == NULL on the first node. */
+ if (router->if_incoming != recv_if)
+ break;
+
+ router = router->next_bond_candidate;
+ } while (router != first_candidate);
+
+ primary_orig_node->bond.selected = router->next_bond_candidate;
+
+ } else {
+ /* if bonding is disabled, use the best of the
+ * remaining candidates which are not using
+ * this interface. */
+ best_router = first_candidate;
+
+ do {
+ /* recv_if == NULL on the first node. */
+ if ((router->if_incoming != recv_if) &&
+ (router->tq_avg > best_router->tq_avg))
+ best_router = router;
+
+ router = router->next_bond_candidate;
+ } while (router != first_candidate);
+
+ router = best_router;
+ }
+
+ return router;
+}
+
+int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
{
struct unicast_packet *unicast_packet;
struct orig_node *orig_node;
+ struct neigh_node *router;
struct ethhdr *ethhdr;
struct batman_if *batman_if;
struct sk_buff *skb_old;
uint8_t dstaddr[ETH_ALEN];
int hdr_size = sizeof(struct unicast_packet);
- int ret;
unsigned long flags;
/* drop packet if it has not necessary minimum size */
@@ -899,49 +1145,50 @@ int recv_unicast_packet(struct sk_buff *skb)
/* TTL exceeded */
if (unicast_packet->ttl < 2) {
- printk(KERN_WARNING "batman-adv:Warning - "
- "can't forward unicast packet from %pM to %pM: "
- "ttl exceeded\n",
- ethhdr->h_source, unicast_packet->dest);
+ pr_warning("Warning - can't forward unicast packet from %pM to "
+ "%pM: ttl exceeded\n", ethhdr->h_source,
+ unicast_packet->dest);
return NET_RX_DROP;
}
- ret = NET_RX_DROP;
/* get routing information */
spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)
hash_find(orig_hash, unicast_packet->dest));
- if ((orig_node != NULL) &&
- (orig_node->router != NULL)) {
+ router = find_router(orig_node, recv_if);
- /* don't lock while sending the packets ... we therefore
- * copy the required data before sending */
- batman_if = orig_node->router->if_incoming;
- memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+ if (!router) {
spin_unlock_irqrestore(&orig_hash_lock, flags);
+ return NET_RX_DROP;
+ }
- /* create a copy of the skb, if needed, to modify it. */
- if (!skb_clone_writable(skb, sizeof(struct unicast_packet))) {
- skb_old = skb;
- skb = skb_copy(skb, GFP_ATOMIC);
- if (!skb)
- return NET_RX_DROP;
- unicast_packet = (struct unicast_packet *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
- kfree_skb(skb_old);
- }
- /* decrement ttl */
- unicast_packet->ttl--;
+ /* don't lock while sending the packets ... we therefore
+ * copy the required data before sending */
- /* route it */
- send_skb_packet(skb, batman_if, dstaddr);
- ret = NET_RX_SUCCESS;
+ batman_if = router->if_incoming;
+ memcpy(dstaddr, router->addr, ETH_ALEN);
- } else
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
- return ret;
+ /* create a copy of the skb, if needed, to modify it. */
+ if (!skb_clone_writable(skb, sizeof(struct unicast_packet))) {
+ skb_old = skb;
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_RX_DROP;
+ unicast_packet = (struct unicast_packet *) skb->data;
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+ kfree_skb(skb_old);
+ }
+
+ /* decrement ttl */
+ unicast_packet->ttl--;
+
+ /* route it */
+ send_skb_packet(skb, batman_if, dstaddr);
+
+ return NET_RX_SUCCESS;
}
int recv_bcast_packet(struct sk_buff *skb)
@@ -950,7 +1197,7 @@ int recv_bcast_packet(struct sk_buff *skb)
struct bcast_packet *bcast_packet;
struct ethhdr *ethhdr;
int hdr_size = sizeof(struct bcast_packet);
- int16_t seq_diff;
+ int32_t seq_diff;
unsigned long flags;
/* drop packet if it has not necessary minimum size */
@@ -977,6 +1224,9 @@ int recv_bcast_packet(struct sk_buff *skb)
if (is_my_mac(bcast_packet->orig))
return NET_RX_DROP;
+ if (bcast_packet->ttl < 2)
+ return NET_RX_DROP;
+
spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)
hash_find(orig_hash, bcast_packet->orig));
@@ -989,12 +1239,12 @@ int recv_bcast_packet(struct sk_buff *skb)
/* check whether the packet is a duplicate */
if (get_bit_status(orig_node->bcast_bits,
orig_node->last_bcast_seqno,
- ntohs(bcast_packet->seqno))) {
+ ntohl(bcast_packet->seqno))) {
spin_unlock_irqrestore(&orig_hash_lock, flags);
return NET_RX_DROP;
}
- seq_diff = ntohs(bcast_packet->seqno) - orig_node->last_bcast_seqno;
+ seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
/* check whether the packet is old and the host just restarted. */
if (window_protected(seq_diff, &orig_node->bcast_seqno_reset)) {
@@ -1005,7 +1255,7 @@ int recv_bcast_packet(struct sk_buff *skb)
/* mark broadcast in flood history, update window position
* if required. */
if (bit_get_packet(orig_node->bcast_bits, seq_diff, 1))
- orig_node->last_bcast_seqno = ntohs(bcast_packet->seqno);
+ orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
spin_unlock_irqrestore(&orig_hash_lock, flags);
/* rebroadcast packet */
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h
index 8288decea37..3eac64e3cf9 100644
--- a/drivers/staging/batman-adv/routing.h
+++ b/drivers/staging/batman-adv/routing.h
@@ -19,9 +19,10 @@
*
*/
-#include "types.h"
+#ifndef _NET_BATMAN_ADV_ROUTING_H_
+#define _NET_BATMAN_ADV_ROUTING_H_
-extern wait_queue_head_t thread_wait;
+#include "types.h"
void slide_own_bcast_window(struct batman_if *batman_if);
void receive_bat_packet(struct ethhdr *ethhdr,
@@ -32,8 +33,14 @@ void update_routes(struct orig_node *orig_node,
struct neigh_node *neigh_node,
unsigned char *hna_buff, int hna_buff_len);
int recv_icmp_packet(struct sk_buff *skb);
-int recv_unicast_packet(struct sk_buff *skb);
+int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if);
int recv_bcast_packet(struct sk_buff *skb);
int recv_vis_packet(struct sk_buff *skb);
int recv_bat_packet(struct sk_buff *skb,
struct batman_if *batman_if);
+struct neigh_node *find_router(struct orig_node *orig_node,
+ struct batman_if *recv_if);
+void update_bonding_candidates(struct bat_priv *bat_priv,
+ struct orig_node *orig_node);
+
+#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index ac69ed871a7..da3c82e47bb 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -29,6 +29,9 @@
#include "vis.h"
#include "aggregation.h"
+
+static void send_outstanding_bcast_packet(struct work_struct *work);
+
/* apply hop penalty for a normal link */
static uint8_t hop_penalty(const uint8_t tq)
{
@@ -38,15 +41,15 @@ static uint8_t hop_penalty(const uint8_t tq)
/* when do we schedule our own packet to be sent */
static unsigned long own_send_time(struct bat_priv *bat_priv)
{
- return jiffies +
- (((atomic_read(&bat_priv->orig_interval) - JITTER +
- (random32() % 2*JITTER)) * HZ) / 1000);
+ return jiffies + msecs_to_jiffies(
+ atomic_read(&bat_priv->orig_interval) -
+ JITTER + (random32() % 2*JITTER));
}
/* when do we schedule a forwarded packet to be sent */
static unsigned long forward_send_time(struct bat_priv *bat_priv)
{
- return jiffies + (((random32() % (JITTER/2)) * HZ) / 1000);
+ return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
}
/* send out an already prepared packet to the given address via the
@@ -64,10 +67,8 @@ int send_skb_packet(struct sk_buff *skb,
goto send_skb_err;
if (!(batman_if->net_dev->flags & IFF_UP)) {
- printk(KERN_WARNING
- "batman-adv:Interface %s "
- "is not up - can't send packet via that interface!\n",
- batman_if->dev);
+ pr_warning("Interface %s is not up - can't send packet via "
+ "that interface!\n", batman_if->dev);
goto send_skb_err;
}
@@ -119,6 +120,8 @@ void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
static void send_packet_to_if(struct forw_packet *forw_packet,
struct batman_if *batman_if)
{
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
char *fwd_str;
uint8_t packet_num;
int16_t buff_pos;
@@ -148,11 +151,11 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
"Sending own" :
"Forwarding"));
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
" IDF %s) on interface %s [%s]\n",
fwd_str, (packet_num > 0 ? "aggregated " : ""),
- batman_packet->orig, ntohs(batman_packet->seqno),
+ batman_packet->orig, ntohl(batman_packet->seqno),
batman_packet->tq, batman_packet->ttl,
(batman_packet->flags & DIRECTLINK ?
"on" : "off"),
@@ -167,20 +170,22 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
send_raw_packet(forw_packet->packet_buff,
forw_packet->packet_len,
- batman_if, broadcastAddr);
+ batman_if, broadcast_addr);
}
/* send a batman packet */
static void send_packet(struct forw_packet *forw_packet)
{
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct batman_if *batman_if;
struct batman_packet *batman_packet =
(struct batman_packet *)(forw_packet->packet_buff);
unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
if (!forw_packet->if_incoming) {
- printk(KERN_ERR "batman-adv: Error - can't forward packet: "
- "incoming iface not specified\n");
+ pr_err("Error - can't forward packet: incoming iface not "
+ "specified\n");
return;
}
@@ -193,18 +198,18 @@ static void send_packet(struct forw_packet *forw_packet)
(forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
/* FIXME: what about aggregated packets ? */
- bat_dbg(DBG_BATMAN,
+ bat_dbg(DBG_BATMAN, bat_priv,
"%s packet (originator %pM, seqno %d, TTL %d) "
"on interface %s [%s]\n",
(forw_packet->own ? "Sending own" : "Forwarding"),
- batman_packet->orig, ntohs(batman_packet->seqno),
+ batman_packet->orig, ntohl(batman_packet->seqno),
batman_packet->ttl, forw_packet->if_incoming->dev,
forw_packet->if_incoming->addr_str);
send_raw_packet(forw_packet->packet_buff,
forw_packet->packet_len,
forw_packet->if_incoming,
- broadcastAddr);
+ broadcast_addr);
return;
}
@@ -276,14 +281,14 @@ void schedule_own_packet(struct batman_if *batman_if)
batman_packet = (struct batman_packet *)batman_if->packet_buff;
/* change sequence number to network order */
- batman_packet->seqno = htons((uint16_t)atomic_read(&batman_if->seqno));
+ batman_packet->seqno =
+ htonl((uint32_t)atomic_read(&batman_if->seqno));
if (vis_server == VIS_TYPE_SERVER_SYNC)
- batman_packet->flags = VIS_SERVER;
+ batman_packet->flags |= VIS_SERVER;
else
batman_packet->flags &= ~VIS_SERVER;
- /* could be read by receive_bat_packet() */
atomic_inc(&batman_if->seqno);
slide_own_bcast_window(batman_if);
@@ -306,7 +311,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
unsigned long send_time;
if (batman_packet->ttl <= 1) {
- bat_dbg(DBG_BATMAN, "ttl exceeded\n");
+ bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
return;
}
@@ -335,13 +340,16 @@ void schedule_forward_packet(struct orig_node *orig_node,
/* apply hop penalty */
batman_packet->tq = hop_penalty(batman_packet->tq);
- bat_dbg(DBG_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, "
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Forwarding packet: tq_orig: %i, tq_avg: %i, "
"tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
batman_packet->ttl);
- batman_packet->seqno = htons(batman_packet->seqno);
+ batman_packet->seqno = htonl(batman_packet->seqno);
+ /* switch of primaries first hop flag when forwarding */
+ batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
if (directlink)
batman_packet->flags |= DIRECTLINK;
else
@@ -392,9 +400,12 @@ static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
int add_bcast_packet_to_list(struct sk_buff *skb)
{
struct forw_packet *forw_packet;
+ struct bcast_packet *bcast_packet;
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
if (!atomic_dec_not_zero(&bcast_queue_left)) {
- bat_dbg(DBG_BATMAN, "bcast packet queue full\n");
+ bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
goto out;
}
@@ -407,6 +418,10 @@ int add_bcast_packet_to_list(struct sk_buff *skb)
if (!skb)
goto packet_free;
+ /* as we have a copy now, it is safe to decrease the TTL */
+ bcast_packet = (struct bcast_packet *)skb->data;
+ bcast_packet->ttl--;
+
skb_reset_mac_header(skb);
forw_packet->skb = skb;
@@ -426,7 +441,7 @@ out:
return NETDEV_TX_BUSY;
}
-void send_outstanding_bcast_packet(struct work_struct *work)
+static void send_outstanding_bcast_packet(struct work_struct *work)
{
struct batman_if *batman_if;
struct delayed_work *delayed_work =
@@ -450,7 +465,7 @@ void send_outstanding_bcast_packet(struct work_struct *work)
skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
if (skb1)
send_skb_packet(skb1,
- batman_if, broadcastAddr);
+ batman_if, broadcast_addr);
}
rcu_read_unlock();
@@ -502,15 +517,19 @@ out:
void purge_outstanding_packets(struct batman_if *batman_if)
{
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
unsigned long flags;
if (batman_if)
- bat_dbg(DBG_BATMAN, "purge_outstanding_packets(): %s\n",
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "purge_outstanding_packets(): %s\n",
batman_if->dev);
else
- bat_dbg(DBG_BATMAN, "purge_outstanding_packets()\n");
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "purge_outstanding_packets()\n");
/* free bcast list */
spin_lock_irqsave(&forw_bcast_list_lock, flags);
diff --git a/drivers/staging/batman-adv/send.h b/drivers/staging/batman-adv/send.h
index feaa2fc7f9a..b64c62783fe 100644
--- a/drivers/staging/batman-adv/send.h
+++ b/drivers/staging/batman-adv/send.h
@@ -19,9 +19,11 @@
*
*/
+#ifndef _NET_BATMAN_ADV_SEND_H_
+#define _NET_BATMAN_ADV_SEND_H_
+
#include "types.h"
-void send_own_packet_work(struct work_struct *work);
int send_skb_packet(struct sk_buff *skb,
struct batman_if *batman_if,
uint8_t *dst_addr);
@@ -34,6 +36,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
uint8_t directlink, int hna_buff_len,
struct batman_if *if_outgoing);
int add_bcast_packet_to_list(struct sk_buff *skb);
-void send_outstanding_bcast_packet(struct work_struct *work);
void send_outstanding_bat_packet(struct work_struct *work);
void purge_outstanding_packets(struct batman_if *batman_if);
+
+#endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/drivers/staging/batman-adv/soft-interface.c b/drivers/staging/batman-adv/soft-interface.c
index 51c40b77c8d..2ea97de435c 100644
--- a/drivers/staging/batman-adv/soft-interface.c
+++ b/drivers/staging/batman-adv/soft-interface.c
@@ -22,6 +22,7 @@
#include "main.h"
#include "soft-interface.h"
#include "hard-interface.h"
+#include "routing.h"
#include "send.h"
#include "translation-table.h"
#include "types.h"
@@ -30,13 +31,12 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
-static uint16_t bcast_seqno = 1; /* give own bcast messages seq numbers to avoid
+static uint32_t bcast_seqno = 1; /* give own bcast messages seq numbers to avoid
* broadcast storms */
static int32_t skb_packets;
static int32_t skb_bad_packets;
-unsigned char mainIfAddr[ETH_ALEN];
-static unsigned char mainIfAddr_default[ETH_ALEN];
+unsigned char main_if_addr[ETH_ALEN];
static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
static void bat_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info);
@@ -58,12 +58,7 @@ static const struct ethtool_ops bat_ethtool_ops = {
void set_main_if_addr(uint8_t *addr)
{
- memcpy(mainIfAddr, addr, ETH_ALEN);
-}
-
-int main_if_was_up(void)
-{
- return (memcmp(mainIfAddr, mainIfAddr_default, ETH_ALEN) != 0 ? 1 : 0);
+ memcpy(main_if_addr, addr, ETH_ALEN);
}
int my_skb_push(struct sk_buff *skb, unsigned int len)
@@ -83,69 +78,25 @@ int my_skb_push(struct sk_buff *skb, unsigned int len)
return 0;
}
-#ifdef HAVE_NET_DEVICE_OPS
-static const struct net_device_ops bat_netdev_ops = {
- .ndo_open = interface_open,
- .ndo_stop = interface_release,
- .ndo_get_stats = interface_stats,
- .ndo_set_mac_address = interface_set_mac_addr,
- .ndo_change_mtu = interface_change_mtu,
- .ndo_start_xmit = interface_tx,
- .ndo_validate_addr = eth_validate_addr
-};
-#endif
-
-void interface_setup(struct net_device *dev)
-{
- struct bat_priv *priv = netdev_priv(dev);
- char dev_addr[ETH_ALEN];
-
- ether_setup(dev);
-
-#ifdef HAVE_NET_DEVICE_OPS
- dev->netdev_ops = &bat_netdev_ops;
-#else
- dev->open = interface_open;
- dev->stop = interface_release;
- dev->get_stats = interface_stats;
- dev->set_mac_address = interface_set_mac_addr;
- dev->change_mtu = interface_change_mtu;
- dev->hard_start_xmit = interface_tx;
-#endif
- dev->destructor = free_netdev;
-
- dev->mtu = hardif_min_mtu();
- dev->hard_header_len = BAT_HEADER_LEN; /* reserve more space in the
- * skbuff for our header */
-
- /* generate random address */
- random_ether_addr(dev_addr);
- memcpy(dev->dev_addr, dev_addr, ETH_ALEN);
-
- SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
-
- memset(priv, 0, sizeof(struct bat_priv));
-}
-
-int interface_open(struct net_device *dev)
+static int interface_open(struct net_device *dev)
{
netif_start_queue(dev);
return 0;
}
-int interface_release(struct net_device *dev)
+static int interface_release(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
-struct net_device_stats *interface_stats(struct net_device *dev)
+static struct net_device_stats *interface_stats(struct net_device *dev)
{
struct bat_priv *priv = netdev_priv(dev);
return &priv->stats;
}
-int interface_set_mac_addr(struct net_device *dev, void *p)
+static int interface_set_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
@@ -163,7 +114,7 @@ int interface_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-int interface_change_mtu(struct net_device *dev, int new_mtu)
+static int interface_change_mtu(struct net_device *dev, int new_mtu)
{
/* check ranges */
if ((new_mtu < 68) || (new_mtu > hardif_min_mtu()))
@@ -179,6 +130,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
struct unicast_packet *unicast_packet;
struct bcast_packet *bcast_packet;
struct orig_node *orig_node;
+ struct neigh_node *router;
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
struct bat_priv *priv = netdev_priv(dev);
struct batman_if *batman_if;
@@ -205,16 +157,17 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
bcast_packet = (struct bcast_packet *)skb->data;
bcast_packet->version = COMPAT_VERSION;
+ bcast_packet->ttl = TTL;
/* batman packet type: broadcast */
bcast_packet->packet_type = BAT_BCAST;
/* hw address of first interface is the orig mac because only
* this mac is known throughout the mesh */
- memcpy(bcast_packet->orig, mainIfAddr, ETH_ALEN);
+ memcpy(bcast_packet->orig, main_if_addr, ETH_ALEN);
/* set broadcast sequence number */
- bcast_packet->seqno = htons(bcast_seqno);
+ bcast_packet->seqno = htonl(bcast_seqno);
/* broadcast packet. on success, increase seqno. */
if (add_bcast_packet_to_list(skb) == NETDEV_TX_OK)
@@ -235,38 +188,36 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
if (!orig_node)
orig_node = transtable_search(ethhdr->h_dest);
- if ((orig_node) &&
- (orig_node->router)) {
- struct neigh_node *router = orig_node->router;
+ router = find_router(orig_node, NULL);
- if (my_skb_push(skb, sizeof(struct unicast_packet)) < 0)
- goto unlock;
+ if (!router)
+ goto unlock;
- unicast_packet = (struct unicast_packet *)skb->data;
+ /* don't lock while sending the packets ... we therefore
+ * copy the required data before sending */
- unicast_packet->version = COMPAT_VERSION;
- /* batman packet type: unicast */
- unicast_packet->packet_type = BAT_UNICAST;
- /* set unicast ttl */
- unicast_packet->ttl = TTL;
- /* copy the destination for faster routing */
- memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
+ batman_if = router->if_incoming;
+ memcpy(dstaddr, router->addr, ETH_ALEN);
- /* net_dev won't be available when not active */
- if (router->if_incoming->if_status != IF_ACTIVE)
- goto unlock;
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
- /* don't lock while sending the packets ... we therefore
- * copy the required data before sending */
+ if (batman_if->if_status != IF_ACTIVE)
+ goto dropped;
- batman_if = router->if_incoming;
- memcpy(dstaddr, router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ if (my_skb_push(skb, sizeof(struct unicast_packet)) < 0)
+ goto dropped;
- send_skb_packet(skb, batman_if, dstaddr);
- } else {
- goto unlock;
- }
+ unicast_packet = (struct unicast_packet *)skb->data;
+
+ unicast_packet->version = COMPAT_VERSION;
+ /* batman packet type: unicast */
+ unicast_packet->packet_type = BAT_UNICAST;
+ /* set unicast ttl */
+ unicast_packet->ttl = TTL;
+ /* copy the destination for faster routing */
+ memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
+
+ send_skb_packet(skb, batman_if, dstaddr);
}
priv->stats.tx_packets++;
@@ -315,6 +266,50 @@ void interface_rx(struct sk_buff *skb, int hdr_size)
netif_rx(skb);
}
+#ifdef HAVE_NET_DEVICE_OPS
+static const struct net_device_ops bat_netdev_ops = {
+ .ndo_open = interface_open,
+ .ndo_stop = interface_release,
+ .ndo_get_stats = interface_stats,
+ .ndo_set_mac_address = interface_set_mac_addr,
+ .ndo_change_mtu = interface_change_mtu,
+ .ndo_start_xmit = interface_tx,
+ .ndo_validate_addr = eth_validate_addr
+};
+#endif
+
+void interface_setup(struct net_device *dev)
+{
+ struct bat_priv *priv = netdev_priv(dev);
+ char dev_addr[ETH_ALEN];
+
+ ether_setup(dev);
+
+#ifdef HAVE_NET_DEVICE_OPS
+ dev->netdev_ops = &bat_netdev_ops;
+#else
+ dev->open = interface_open;
+ dev->stop = interface_release;
+ dev->get_stats = interface_stats;
+ dev->set_mac_address = interface_set_mac_addr;
+ dev->change_mtu = interface_change_mtu;
+ dev->hard_start_xmit = interface_tx;
+#endif
+ dev->destructor = free_netdev;
+
+ dev->mtu = hardif_min_mtu();
+ dev->hard_header_len = BAT_HEADER_LEN; /* reserve more space in the
+ * skbuff for our header */
+
+ /* generate random address */
+ random_ether_addr(dev_addr);
+ memcpy(dev->dev_addr, dev_addr, ETH_ALEN);
+
+ SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
+
+ memset(priv, 0, sizeof(struct bat_priv));
+}
+
/* ethtool */
static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
diff --git a/drivers/staging/batman-adv/soft-interface.h b/drivers/staging/batman-adv/soft-interface.h
index e7f59af7df3..63648543900 100644
--- a/drivers/staging/batman-adv/soft-interface.h
+++ b/drivers/staging/batman-adv/soft-interface.h
@@ -19,16 +19,15 @@
*
*/
+#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
+#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
+
void set_main_if_addr(uint8_t *addr);
-int main_if_was_up(void);
void interface_setup(struct net_device *dev);
-int interface_open(struct net_device *dev);
-int interface_release(struct net_device *dev);
-struct net_device_stats *interface_stats(struct net_device *dev);
-int interface_set_mac_addr(struct net_device *dev, void *addr);
-int interface_change_mtu(struct net_device *dev, int new_mtu);
int interface_tx(struct sk_buff *skb, struct net_device *dev);
void interface_rx(struct sk_buff *skb, int hdr_size);
int my_skb_push(struct sk_buff *skb, unsigned int len);
-extern unsigned char mainIfAddr[];
+extern unsigned char main_if_addr[];
+
+#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/drivers/staging/batman-adv/sysfs-class-net-batman-adv b/drivers/staging/batman-adv/sysfs-class-net-batman-adv
new file mode 100644
index 00000000000..38dd762def4
--- /dev/null
+++ b/drivers/staging/batman-adv/sysfs-class-net-batman-adv
@@ -0,0 +1,14 @@
+
+What: /sys/class/net/<iface>/batman-adv/mesh_iface
+Date: May 2010
+Contact: Marek Lindner <lindner_marek@yahoo.de>
+Description:
+ The /sys/class/net/<iface>/batman-adv/mesh_iface file
+ displays the batman mesh interface this <iface>
+ currently is associated with.
+
+What: /sys/class/net/<iface>/batman-adv/iface_status
+Date: May 2010
+Contact: Marek Lindner <lindner_marek@yahoo.de>
+Description:
+ Indicates the status of <iface> as it is seen by batman.
diff --git a/drivers/staging/batman-adv/sysfs-class-net-mesh b/drivers/staging/batman-adv/sysfs-class-net-mesh
new file mode 100644
index 00000000000..5aa1912e455
--- /dev/null
+++ b/drivers/staging/batman-adv/sysfs-class-net-mesh
@@ -0,0 +1,33 @@
+
+What: /sys/class/net/<mesh_iface>/mesh/aggregated_ogms
+Date: May 2010
+Contact: Marek Lindner <lindner_marek@yahoo.de>
+Description:
+ Indicates whether the batman protocol messages of the
+ mesh <mesh_iface> shall be aggregated or not.
+
+What: /sys/class/net/<mesh_iface>/mesh/bonding
+Date: June 2010
+Contact: Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Description:
+ Indicates whether the data traffic going through the
+ mesh will be sent using multiple interfaces at the
+ same time (if available).
+
+What: /sys/class/net/<mesh_iface>/mesh/orig_interval
+Date: May 2010
+Contact: Marek Lindner <lindner_marek@yahoo.de>
+Description:
+ Defines the interval in milliseconds in which batman
+ sends its protocol messages.
+
+What: /sys/class/net/<mesh_iface>/mesh/vis_mode
+Date: May 2010
+Contact: Marek Lindner <lindner_marek@yahoo.de>
+Description:
+ Each batman node only maintains information about its
+ own local neighborhood, therefore generating graphs
+ showing the topology of the entire mesh is not easily
+ feasible without having a central instance to collect
+ the local topologies from all nodes. This file allows
+ to activate the collecting (server) mode.
diff --git a/drivers/staging/batman-adv/translation-table.c b/drivers/staging/batman-adv/translation-table.c
index e01ff2151f7..b233377d756 100644
--- a/drivers/staging/batman-adv/translation-table.c
+++ b/drivers/staging/batman-adv/translation-table.c
@@ -32,7 +32,10 @@ atomic_t hna_local_changed;
DEFINE_SPINLOCK(hna_local_hash_lock);
static DEFINE_SPINLOCK(hna_global_hash_lock);
+static void hna_local_purge(struct work_struct *work);
static DECLARE_DELAYED_WORK(hna_local_purge_wq, hna_local_purge);
+static void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
+ char *message);
static void hna_local_start_timer(void)
{
@@ -57,6 +60,8 @@ int hna_local_init(void)
void hna_local_add(uint8_t *addr)
{
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct hna_local_entry *hna_local_entry;
struct hna_global_entry *hna_global_entry;
struct hashtable_t *swaphash;
@@ -77,15 +82,15 @@ void hna_local_add(uint8_t *addr)
MAC-flooding. */
if ((num_hna + 1 > (ETH_DATA_LEN - BAT_PACKET_LEN) / ETH_ALEN) ||
(num_hna + 1 > 255)) {
- bat_dbg(DBG_ROUTES,
+ bat_dbg(DBG_ROUTES, bat_priv,
"Can't add new local hna entry (%pM): "
"number of local hna entries exceeds packet size\n",
addr);
return;
}
- bat_dbg(DBG_ROUTES, "Creating new local hna entry: %pM\n",
- addr);
+ bat_dbg(DBG_ROUTES, bat_priv,
+ "Creating new local hna entry: %pM\n", addr);
hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
if (!hna_local_entry)
@@ -111,8 +116,7 @@ void hna_local_add(uint8_t *addr)
hna_local_hash->size * 2);
if (swaphash == NULL)
- printk(KERN_ERR "batman-adv:"
- "Couldn't resize local hna hash table\n");
+ pr_err("Couldn't resize local hna hash table\n");
else
hna_local_hash = swaphash;
}
@@ -160,59 +164,54 @@ int hna_local_fill_buffer(unsigned char *buff, int buff_len)
return i;
}
-int hna_local_fill_buffer_text(struct net_device *net_dev, char *buff,
- size_t count, loff_t off)
+int hna_local_seq_print_text(struct seq_file *seq, void *offset)
{
+ struct net_device *net_dev = (struct net_device *)seq->private;
struct bat_priv *bat_priv = netdev_priv(net_dev);
struct hna_local_entry *hna_local_entry;
HASHIT(hashit);
- int bytes_written = 0;
+ HASHIT(hashit_count);
unsigned long flags;
- size_t hdr_len;
+ size_t buf_size, pos;
+ char *buff;
if (!bat_priv->primary_if) {
- if (off == 0)
- return sprintf(buff,
- "BATMAN mesh %s disabled - "
- "please specify interfaces to enable it\n",
- net_dev->name);
-
- return 0;
+ return seq_printf(seq, "BATMAN mesh %s disabled - "
+ "please specify interfaces to enable it\n",
+ net_dev->name);
}
- hdr_len = sprintf(buff,
- "Locally retrieved addresses (from %s) "
- "announced via HNA:\n",
- net_dev->name);
-
- if (off < hdr_len)
- bytes_written = hdr_len;
+ seq_printf(seq, "Locally retrieved addresses (from %s) "
+ "announced via HNA:\n",
+ net_dev->name);
spin_lock_irqsave(&hna_local_hash_lock, flags);
- while (hash_iterate(hna_local_hash, &hashit)) {
- hdr_len += 21;
-
- if (count < bytes_written + 22)
- break;
+ buf_size = 1;
+ /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
+ while (hash_iterate(hna_local_hash, &hashit_count))
+ buf_size += 21;
- if (off >= hdr_len)
- continue;
+ buff = kmalloc(buf_size, GFP_ATOMIC);
+ if (!buff) {
+ spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ return -ENOMEM;
+ }
+ buff[0] = '\0';
+ pos = 0;
+ while (hash_iterate(hna_local_hash, &hashit)) {
hna_local_entry = hashit.bucket->data;
- bytes_written += snprintf(buff + bytes_written, 22,
- " * " MAC_FMT "\n",
- hna_local_entry->addr[0],
- hna_local_entry->addr[1],
- hna_local_entry->addr[2],
- hna_local_entry->addr[3],
- hna_local_entry->addr[4],
- hna_local_entry->addr[5]);
+ pos += snprintf(buff + pos, 22, " * %pM\n",
+ hna_local_entry->addr);
}
spin_unlock_irqrestore(&hna_local_hash_lock, flags);
- return bytes_written;
+
+ seq_printf(seq, "%s", buff);
+ kfree(buff);
+ return 0;
}
static void _hna_local_del(void *data)
@@ -225,7 +224,9 @@ static void _hna_local_del(void *data)
static void hna_local_del(struct hna_local_entry *hna_local_entry,
char *message)
{
- bat_dbg(DBG_ROUTES, "Deleting local hna entry (%pM): %s\n",
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
+ bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
hna_local_entry->addr, message);
hash_remove(hna_local_hash, hna_local_entry->addr);
@@ -247,7 +248,7 @@ void hna_local_remove(uint8_t *addr, char *message)
spin_unlock_irqrestore(&hna_local_hash_lock, flags);
}
-void hna_local_purge(struct work_struct *work)
+static void hna_local_purge(struct work_struct *work)
{
struct hna_local_entry *hna_local_entry;
HASHIT(hashit);
@@ -259,8 +260,7 @@ void hna_local_purge(struct work_struct *work)
while (hash_iterate(hna_local_hash, &hashit)) {
hna_local_entry = hashit.bucket->data;
- timeout = hna_local_entry->last_seen +
- ((LOCAL_HNA_TIMEOUT / 1000) * HZ);
+ timeout = hna_local_entry->last_seen + LOCAL_HNA_TIMEOUT * HZ;
if ((!hna_local_entry->never_purge) &&
time_after(jiffies, timeout))
hna_local_del(hna_local_entry, "address timed out");
@@ -296,6 +296,8 @@ int hna_global_init(void)
void hna_global_add_orig(struct orig_node *orig_node,
unsigned char *hna_buff, int hna_buff_len)
{
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct hna_global_entry *hna_global_entry;
struct hna_local_entry *hna_local_entry;
struct hashtable_t *swaphash;
@@ -322,7 +324,7 @@ void hna_global_add_orig(struct orig_node *orig_node,
memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
- bat_dbg(DBG_ROUTES,
+ bat_dbg(DBG_ROUTES, bat_priv,
"Creating new global hna entry: "
"%pM (via %pM)\n",
hna_global_entry->addr, orig_node->orig);
@@ -369,8 +371,7 @@ void hna_global_add_orig(struct orig_node *orig_node,
hna_global_hash->size * 2);
if (swaphash == NULL)
- printk(KERN_ERR "batman-adv:"
- "Couldn't resize global hna hash table\n");
+ pr_err("Couldn't resize global hna hash table\n");
else
hna_global_hash = swaphash;
}
@@ -378,71 +379,63 @@ void hna_global_add_orig(struct orig_node *orig_node,
spin_unlock_irqrestore(&hna_global_hash_lock, flags);
}
-int hna_global_fill_buffer_text(struct net_device *net_dev, char *buff,
- size_t count, loff_t off)
+int hna_global_seq_print_text(struct seq_file *seq, void *offset)
{
+ struct net_device *net_dev = (struct net_device *)seq->private;
struct bat_priv *bat_priv = netdev_priv(net_dev);
struct hna_global_entry *hna_global_entry;
HASHIT(hashit);
- int bytes_written = 0;
+ HASHIT(hashit_count);
unsigned long flags;
- size_t hdr_len;
+ size_t buf_size, pos;
+ char *buff;
if (!bat_priv->primary_if) {
- if (off == 0)
- return sprintf(buff,
- "BATMAN mesh %s disabled - "
- "please specify interfaces to enable it\n",
- net_dev->name);
-
- return 0;
+ return seq_printf(seq, "BATMAN mesh %s disabled - "
+ "please specify interfaces to enable it\n",
+ net_dev->name);
}
- hdr_len = sprintf(buff,
- "Globally announced HNAs received via the mesh %s "
- "(translation table):\n",
- net_dev->name);
-
- if (off < hdr_len)
- bytes_written = hdr_len;
+ seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
+ net_dev->name);
spin_lock_irqsave(&hna_global_hash_lock, flags);
- while (hash_iterate(hna_global_hash, &hashit)) {
- hdr_len += 43;
-
- if (count < bytes_written + 44)
- break;
+ buf_size = 1;
+ /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
+ while (hash_iterate(hna_global_hash, &hashit_count))
+ buf_size += 43;
- if (off >= hdr_len)
- continue;
+ buff = kmalloc(buf_size, GFP_ATOMIC);
+ if (!buff) {
+ spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ return -ENOMEM;
+ }
+ buff[0] = '\0';
+ pos = 0;
+ while (hash_iterate(hna_global_hash, &hashit)) {
hna_global_entry = hashit.bucket->data;
- bytes_written += snprintf(buff + bytes_written, 44,
- " * " MAC_FMT " via " MAC_FMT "\n",
- hna_global_entry->addr[0],
- hna_global_entry->addr[1],
- hna_global_entry->addr[2],
- hna_global_entry->addr[3],
- hna_global_entry->addr[4],
- hna_global_entry->addr[5],
- hna_global_entry->orig_node->orig[0],
- hna_global_entry->orig_node->orig[1],
- hna_global_entry->orig_node->orig[2],
- hna_global_entry->orig_node->orig[3],
- hna_global_entry->orig_node->orig[4],
- hna_global_entry->orig_node->orig[5]);
+ pos += snprintf(buff + pos, 44,
+ " * %pM via %pM\n", hna_global_entry->addr,
+ hna_global_entry->orig_node->orig);
}
spin_unlock_irqrestore(&hna_global_hash_lock, flags);
- return bytes_written;
+
+ seq_printf(seq, "%s", buff);
+ kfree(buff);
+ return 0;
}
-void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
- char *message)
+static void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
+ char *message)
{
- bat_dbg(DBG_ROUTES, "Deleting global hna entry %pM (via %pM): %s\n",
+ /* FIXME: each orig_node->batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
+ bat_dbg(DBG_ROUTES, bat_priv,
+ "Deleting global hna entry %pM (via %pM): %s\n",
hna_global_entry->addr, hna_global_entry->orig_node->orig,
message);
diff --git a/drivers/staging/batman-adv/translation-table.h b/drivers/staging/batman-adv/translation-table.h
index 8f412fca87f..fa93e37d095 100644
--- a/drivers/staging/batman-adv/translation-table.h
+++ b/drivers/staging/batman-adv/translation-table.h
@@ -19,23 +19,21 @@
*
*/
+#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
+#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
+
#include "types.h"
int hna_local_init(void);
void hna_local_add(uint8_t *addr);
void hna_local_remove(uint8_t *addr, char *message);
int hna_local_fill_buffer(unsigned char *buff, int buff_len);
-int hna_local_fill_buffer_text(struct net_device *net_dev, char *buff,
- size_t count, loff_t off);
-void hna_local_purge(struct work_struct *work);
+int hna_local_seq_print_text(struct seq_file *seq, void *offset);
void hna_local_free(void);
int hna_global_init(void);
void hna_global_add_orig(struct orig_node *orig_node, unsigned char *hna_buff,
int hna_buff_len);
-int hna_global_fill_buffer_text(struct net_device *net_dev, char *buff,
- size_t count, loff_t off);
-void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
- char *orig_str);
+int hna_global_seq_print_text(struct seq_file *seq, void *offset);
void hna_global_del_orig(struct orig_node *orig_node, char *message);
void hna_global_free(void);
struct orig_node *transtable_search(uint8_t *addr);
@@ -43,3 +41,5 @@ struct orig_node *transtable_search(uint8_t *addr);
extern spinlock_t hna_local_hash_lock;
extern struct hashtable_t *hna_local_hash;
extern atomic_t hna_local_changed;
+
+#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h
index 86007c7eb44..9aa9d369c75 100644
--- a/drivers/staging/batman-adv/types.h
+++ b/drivers/staging/batman-adv/types.h
@@ -21,10 +21,8 @@
-
-
-#ifndef TYPES_H
-#define TYPES_H
+#ifndef _NET_BATMAN_ADV_TYPES_H_
+#define _NET_BATMAN_ADV_TYPES_H_
#include "packet.h"
#include "bitarray.h"
@@ -52,6 +50,7 @@ struct batman_if {
/**
* orig_node - structure for orig_list maintaining nodes of mesh
+ * @primary_addr: hosts primary interface address
* @last_valid: when last packet from this node was received
* @bcast_seqno_reset: time when the broadcast seqno window was reset
* @batman_seqno_reset: time when the batman seqno window was reset
@@ -59,9 +58,13 @@ struct batman_if {
* @last_real_seqno: last and best known squence number
* @last_ttl: ttl of last received packet
* @last_bcast_seqno: last broadcast sequence number received by this host
+ *
+ * @candidates: how many candidates are available
+ * @selected: next bonding candidate
*/
struct orig_node {
uint8_t orig[ETH_ALEN];
+ uint8_t primary_addr[ETH_ALEN];
struct neigh_node *router;
TYPE_OF_WORD *bcast_own;
uint8_t *bcast_own_sum;
@@ -72,12 +75,16 @@ struct orig_node {
unsigned long batman_seqno_reset;
uint8_t flags;
unsigned char *hna_buff;
- int16_t hna_buff_len;
- uint16_t last_real_seqno;
+ int16_t hna_buff_len;
+ uint32_t last_real_seqno;
uint8_t last_ttl;
TYPE_OF_WORD bcast_bits[NUM_WORDS];
- uint16_t last_bcast_seqno;
+ uint32_t last_bcast_seqno;
struct list_head neigh_list;
+ struct {
+ uint8_t candidates;
+ struct neigh_node *selected;
+ } bond;
};
/**
@@ -92,6 +99,7 @@ struct neigh_node {
uint8_t tq_index;
uint8_t tq_avg;
uint8_t last_ttl;
+ struct neigh_node *next_bond_candidate;
unsigned long last_valid;
TYPE_OF_WORD real_bits[NUM_WORDS];
struct orig_node *orig_node;
@@ -101,24 +109,30 @@ struct neigh_node {
struct bat_priv {
struct net_device_stats stats;
atomic_t aggregation_enabled;
+ atomic_t bonding_enabled;
atomic_t vis_mode;
atomic_t orig_interval;
+ atomic_t log_level;
char num_ifaces;
+ struct debug_log *debug_log;
struct batman_if *primary_if;
struct kobject *mesh_obj;
+ struct dentry *debug_dir;
};
-struct device_client {
+struct socket_client {
struct list_head queue_list;
unsigned int queue_len;
unsigned char index;
spinlock_t lock;
wait_queue_head_t queue_wait;
+ struct bat_priv *bat_priv;
};
-struct device_packet {
+struct socket_packet {
struct list_head list;
- struct icmp_packet icmp_packet;
+ size_t icmp_len;
+ struct icmp_packet_rr icmp_packet;
};
struct hna_local_entry {
@@ -159,4 +173,12 @@ struct if_list_entry {
struct hlist_node list;
};
-#endif
+struct debug_log {
+ char log_buff[LOG_BUF_LEN];
+ unsigned long log_start;
+ unsigned long log_end;
+ spinlock_t lock;
+ wait_queue_head_t queue_wait;
+};
+
+#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/drivers/staging/batman-adv/vis.c b/drivers/staging/batman-adv/vis.c
index 1d3d954847f..4b6a5045f05 100644
--- a/drivers/staging/batman-adv/vis.c
+++ b/drivers/staging/batman-adv/vis.c
@@ -43,8 +43,8 @@
_dummy > smallest_signed_int(_dummy); })
#define seq_after(x, y) seq_before(y, x)
-struct hashtable_t *vis_hash;
-DEFINE_SPINLOCK(vis_hash_lock);
+static struct hashtable_t *vis_hash;
+static DEFINE_SPINLOCK(vis_hash_lock);
static DEFINE_SPINLOCK(recv_list_lock);
static struct vis_info *my_vis_info;
static struct list_head send_list; /* always locked with vis_hash_lock */
@@ -115,7 +115,7 @@ static void vis_data_insert_interface(const uint8_t *interface,
}
/* its a new address, add it to the list */
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return;
memcpy(entry->addr, interface, ETH_ALEN);
@@ -142,12 +142,29 @@ static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
return len;
}
+static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
+{
+ struct if_list_entry *entry;
+ struct hlist_node *pos;
+ size_t count = 0;
+
+ hlist_for_each_entry(entry, pos, if_list, list) {
+ if (entry->primary)
+ count += 9;
+ else
+ count += 23;
+ }
+
+ return count;
+}
+
/* read an entry */
static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
uint8_t *src, bool primary)
{
- char to[40];
+ char to[18];
+ /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
addr_to_string(to, entry->dest);
if (primary && entry->quality == 0)
return sprintf(buff, "HNA %s, ", to);
@@ -157,38 +174,74 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
return 0;
}
-ssize_t vis_fill_buffer_text(struct net_device *net_dev, char *buff,
- size_t count, loff_t off)
+int vis_seq_print_text(struct seq_file *seq, void *offset)
{
HASHIT(hashit);
+ HASHIT(hashit_count);
struct vis_info *info;
struct vis_info_entry *entries;
+ struct net_device *net_dev = (struct net_device *)seq->private;
struct bat_priv *bat_priv = netdev_priv(net_dev);
HLIST_HEAD(vis_if_list);
struct if_list_entry *entry;
struct hlist_node *pos, *n;
- size_t hdr_len, tmp_len;
- int i, bytes_written = 0;
+ int i;
char tmp_addr_str[ETH_STR_LEN];
unsigned long flags;
int vis_server = atomic_read(&bat_priv->vis_mode);
+ size_t buff_pos, buf_size;
+ char *buff;
if ((!bat_priv->primary_if) ||
(vis_server == VIS_TYPE_CLIENT_UPDATE))
return 0;
- hdr_len = 0;
-
+ buf_size = 1;
+ /* Estimate length */
spin_lock_irqsave(&vis_hash_lock, flags);
+ while (hash_iterate(vis_hash, &hashit_count)) {
+ info = hashit_count.bucket->data;
+ entries = (struct vis_info_entry *)
+ ((char *)info + sizeof(struct vis_info));
+
+ for (i = 0; i < info->packet.entries; i++) {
+ if (entries[i].quality == 0)
+ continue;
+ vis_data_insert_interface(entries[i].src, &vis_if_list,
+ compare_orig(entries[i].src,
+ info->packet.vis_orig));
+ }
+
+ hlist_for_each_entry(entry, pos, &vis_if_list, list) {
+ buf_size += 18 + 26 * info->packet.entries;
+
+ /* add primary/secondary records */
+ if (compare_orig(entry->addr, info->packet.vis_orig))
+ buf_size +=
+ vis_data_count_prim_sec(&vis_if_list);
+
+ buf_size += 1;
+ }
+
+ hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
+ hlist_del(&entry->list);
+ kfree(entry);
+ }
+ }
+
+ buff = kmalloc(buf_size, GFP_ATOMIC);
+ if (!buff) {
+ spin_unlock_irqrestore(&vis_hash_lock, flags);
+ return -ENOMEM;
+ }
+ buff[0] = '\0';
+ buff_pos = 0;
+
while (hash_iterate(vis_hash, &hashit)) {
info = hashit.bucket->data;
entries = (struct vis_info_entry *)
((char *)info + sizeof(struct vis_info));
- /* estimated line length */
- if (count < bytes_written + 200)
- break;
-
for (i = 0; i < info->packet.entries; i++) {
if (entries[i].quality == 0)
continue;
@@ -199,30 +252,22 @@ ssize_t vis_fill_buffer_text(struct net_device *net_dev, char *buff,
hlist_for_each_entry(entry, pos, &vis_if_list, list) {
addr_to_string(tmp_addr_str, entry->addr);
- tmp_len = sprintf(buff + bytes_written,
- "%s,", tmp_addr_str);
+ buff_pos += sprintf(buff + buff_pos, "%s,",
+ tmp_addr_str);
for (i = 0; i < info->packet.entries; i++)
- tmp_len += vis_data_read_entry(
- buff + bytes_written + tmp_len,
- &entries[i], entry->addr,
- entry->primary);
+ buff_pos += vis_data_read_entry(buff + buff_pos,
+ &entries[i],
+ entry->addr,
+ entry->primary);
/* add primary/secondary records */
if (compare_orig(entry->addr, info->packet.vis_orig))
- tmp_len += vis_data_read_prim_sec(
- buff + bytes_written + tmp_len,
- &vis_if_list);
-
- tmp_len += sprintf(buff + bytes_written + tmp_len,
- "\n");
-
- hdr_len += tmp_len;
+ buff_pos +=
+ vis_data_read_prim_sec(buff + buff_pos,
+ &vis_if_list);
- if (off >= hdr_len)
- continue;
-
- bytes_written += tmp_len;
+ buff_pos += sprintf(buff + buff_pos, "\n");
}
hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
@@ -230,9 +275,13 @@ ssize_t vis_fill_buffer_text(struct net_device *net_dev, char *buff,
kfree(entry);
}
}
+
spin_unlock_irqrestore(&vis_hash_lock, flags);
- return bytes_written;
+ seq_printf(seq, "%s", buff);
+ kfree(buff);
+
+ return 0;
}
/* add the info packet to the send list, if it was not
@@ -308,7 +357,8 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
old_info = hash_find(vis_hash, &search_elem);
if (old_info != NULL) {
- if (!seq_after(vis_packet->seqno, old_info->packet.seqno)) {
+ if (!seq_after(ntohl(vis_packet->seqno),
+ ntohl(old_info->packet.seqno))) {
if (old_info->packet.seqno == vis_packet->seqno) {
recv_list_add(&old_info->recv_list,
vis_packet->sender_orig);
@@ -340,7 +390,7 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
/* Make it a broadcast packet, if required */
if (make_broadcast)
- memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
+ memcpy(info->packet.target_orig, broadcast_addr, ETH_ALEN);
/* repair if entries is longer than packet. */
if (info->packet.entries * sizeof(struct vis_info_entry) > vis_info_len)
@@ -474,9 +524,9 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
info->packet.vis_type = atomic_read(&bat_priv->vis_mode);
spin_lock_irqsave(&orig_hash_lock, flags);
- memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
+ memcpy(info->packet.target_orig, broadcast_addr, ETH_ALEN);
info->packet.ttl = TTL;
- info->packet.seqno++;
+ info->packet.seqno = htonl(ntohl(info->packet.seqno) + 1);
info->packet.entries = 0;
if (info->packet.vis_type == VIS_TYPE_CLIENT_UPDATE) {
@@ -547,7 +597,7 @@ static void purge_vis_packets(void)
if (info == my_vis_info) /* never purge own data. */
continue;
if (time_after(jiffies,
- info->first_seen + (VIS_TIMEOUT*HZ)/1000)) {
+ info->first_seen + VIS_TIMEOUT * HZ)) {
hash_remove_bucket(vis_hash, &hashit);
send_list_del(info);
kref_put(&info->refcount, free_info);
@@ -591,7 +641,7 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length)
}
spin_unlock_irqrestore(&orig_hash_lock, flags);
- memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
+ memcpy(info->packet.target_orig, broadcast_addr, ETH_ALEN);
}
static void unicast_vis_packet(struct vis_info *info, int packet_length)
@@ -628,11 +678,11 @@ static void send_vis_packet(struct vis_info *info)
int packet_length;
if (info->packet.ttl < 2) {
- printk(KERN_WARNING "batman-adv: Error - can't send vis packet: ttl exceeded\n");
+ pr_warning("Error - can't send vis packet: ttl exceeded\n");
return;
}
- memcpy(info->packet.sender_orig, mainIfAddr, ETH_ALEN);
+ memcpy(info->packet.sender_orig, main_if_addr, ETH_ALEN);
info->packet.ttl--;
packet_length = sizeof(struct vis_packet) +
@@ -690,18 +740,18 @@ int vis_init(void)
vis_hash = hash_new(256, vis_info_cmp, vis_info_choose);
if (!vis_hash) {
- printk(KERN_ERR "batman-adv:Can't initialize vis_hash\n");
+ pr_err("Can't initialize vis_hash\n");
goto err;
}
my_vis_info = kmalloc(1000, GFP_ATOMIC);
if (!my_vis_info) {
- printk(KERN_ERR "batman-adv:Can't initialize vis packet\n");
+ pr_err("Can't initialize vis packet\n");
goto err;
}
/* prefill the vis info */
- my_vis_info->first_seen = jiffies - atomic_read(&vis_interval);
+ my_vis_info->first_seen = jiffies - msecs_to_jiffies(VIS_INTERVAL);
INIT_LIST_HEAD(&my_vis_info->recv_list);
INIT_LIST_HEAD(&my_vis_info->send_list);
kref_init(&my_vis_info->refcount);
@@ -713,12 +763,11 @@ int vis_init(void)
INIT_LIST_HEAD(&send_list);
- memcpy(my_vis_info->packet.vis_orig, mainIfAddr, ETH_ALEN);
- memcpy(my_vis_info->packet.sender_orig, mainIfAddr, ETH_ALEN);
+ memcpy(my_vis_info->packet.vis_orig, main_if_addr, ETH_ALEN);
+ memcpy(my_vis_info->packet.sender_orig, main_if_addr, ETH_ALEN);
if (hash_add(vis_hash, my_vis_info) < 0) {
- printk(KERN_ERR
- "batman-adv:Can't add own vis packet into hash\n");
+ pr_err("Can't add own vis packet into hash\n");
/* not in hash, need to remove it manually. */
kref_put(&my_vis_info->refcount, free_info);
goto err;
@@ -764,5 +813,5 @@ void vis_quit(void)
static void start_vis_timer(void)
{
queue_delayed_work(bat_event_workqueue, &vis_timer_wq,
- (atomic_read(&vis_interval) * HZ) / 1000);
+ (VIS_INTERVAL * HZ) / 1000);
}
diff --git a/drivers/staging/batman-adv/vis.h b/drivers/staging/batman-adv/vis.h
index 9c1fd771cba..bb13bf1a3f4 100644
--- a/drivers/staging/batman-adv/vis.h
+++ b/drivers/staging/batman-adv/vis.h
@@ -19,7 +19,10 @@
*
*/
-#define VIS_TIMEOUT 200000
+#ifndef _NET_BATMAN_ADV_VIS_H_
+#define _NET_BATMAN_ADV_VIS_H_
+
+#define VIS_TIMEOUT 200 /* timeout of vis packets in seconds */
struct vis_info {
unsigned long first_seen;
@@ -44,11 +47,7 @@ struct recvlist_node {
uint8_t mac[ETH_ALEN];
};
-extern struct hashtable_t *vis_hash;
-extern spinlock_t vis_hash_lock;
-
-ssize_t vis_fill_buffer_text(struct net_device *net_dev, char *buff,
- size_t count, loff_t off);
+int vis_seq_print_text(struct seq_file *seq, void *offset);
void receive_server_sync_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
int vis_info_len);
@@ -57,3 +56,5 @@ void receive_client_update_packet(struct bat_priv *bat_priv,
int vis_info_len);
int vis_init(void);
void vis_quit(void);
+
+#endif /* _NET_BATMAN_ADV_VIS_H_ */
diff --git a/drivers/staging/comedi/TODO b/drivers/staging/comedi/TODO
index 15c9348fb93..b10f739b7e3 100644
--- a/drivers/staging/comedi/TODO
+++ b/drivers/staging/comedi/TODO
@@ -2,7 +2,6 @@ TODO:
- checkpatch.pl cleanups
- Lindent
- remove all wrappers
- - remove typedefs
- audit userspace interface
- reserve major number
- cleanup the individual comedi drivers as well
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index aeb2c00875c..14091313ceb 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1845,8 +1845,15 @@ ok:
}
}
- if (dev->attached && dev->use_count == 0 && dev->open)
- dev->open(dev);
+ if (dev->attached && dev->use_count == 0 && dev->open) {
+ int rc = dev->open(dev);
+ if (rc < 0) {
+ module_put(dev->driver->module);
+ module_put(THIS_MODULE);
+ mutex_unlock(&dev->mutex);
+ return rc;
+ }
+ }
dev->use_count++;
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 4eb2b77f56d..68aa9176d24 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -53,62 +53,6 @@
COMEDI_MINORVERSION, COMEDI_MICROVERSION)
#define COMEDI_RELEASE VERSION
-#define COMEDI_INITCLEANUP_NOMODULE(x) \
- static int __init x ## _init_module(void) \
- {return comedi_driver_register(&(x)); } \
- static void __exit x ## _cleanup_module(void) \
- {comedi_driver_unregister(&(x)); } \
- module_init(x ## _init_module); \
- module_exit(x ## _cleanup_module);
-
-#define COMEDI_MODULE_MACROS \
- MODULE_AUTHOR("Comedi http://www.comedi.org"); \
- MODULE_DESCRIPTION("Comedi low-level driver"); \
- MODULE_LICENSE("GPL");
-
-#define COMEDI_INITCLEANUP(x) \
- COMEDI_MODULE_MACROS \
- COMEDI_INITCLEANUP_NOMODULE(x)
-
-#define COMEDI_PCI_INITCLEANUP_NOMODULE(comedi_driver, pci_id_table) \
- static int __devinit comedi_driver ## _pci_probe(struct pci_dev *dev, \
- const struct pci_device_id *ent) \
- { \
- return comedi_pci_auto_config(dev, comedi_driver.driver_name); \
- } \
- static void __devexit comedi_driver ## _pci_remove(\
- struct pci_dev *dev) \
- { \
- comedi_pci_auto_unconfig(dev); \
- } \
- static struct pci_driver comedi_driver ## _pci_driver = \
- { \
- .id_table = pci_id_table, \
- .probe = &comedi_driver ## _pci_probe, \
- .remove = __devexit_p(&comedi_driver ## _pci_remove) \
- }; \
- static int __init comedi_driver ## _init_module(void) \
- { \
- int retval; \
- retval = comedi_driver_register(&comedi_driver); \
- if (retval < 0) \
- return retval; \
- comedi_driver ## _pci_driver.name = \
- (char *)comedi_driver.driver_name; \
- return pci_register_driver(&comedi_driver ## _pci_driver); \
- } \
- static void __exit comedi_driver ## _cleanup_module(void) \
- { \
- pci_unregister_driver(&comedi_driver ## _pci_driver); \
- comedi_driver_unregister(&comedi_driver); \
- } \
- module_init(comedi_driver ## _init_module); \
- module_exit(comedi_driver ## _cleanup_module);
-
-#define COMEDI_PCI_INITCLEANUP(comedi_driver, pci_id_table) \
- COMEDI_MODULE_MACROS \
- COMEDI_PCI_INITCLEANUP_NOMODULE(comedi_driver, pci_id_table)
-
#define PCI_VENDOR_ID_ADLINK 0x144a
#define PCI_VENDOR_ID_ICP 0x104c
#define PCI_VENDOR_ID_CONTEC 0x1221
@@ -285,7 +229,7 @@ struct comedi_device {
struct fasync_struct *async_queue;
- void (*open) (struct comedi_device *dev);
+ int (*open) (struct comedi_device *dev);
void (*close) (struct comedi_device *dev);
};
diff --git a/drivers/staging/comedi/drivers/8255.c b/drivers/staging/comedi/drivers/8255.c
index fe63830bd85..95049a8d3b3 100644
--- a/drivers/staging/comedi/drivers/8255.c
+++ b/drivers/staging/comedi/drivers/8255.c
@@ -117,7 +117,18 @@ static struct comedi_driver driver_8255 = {
.detach = dev_8255_detach,
};
-COMEDI_INITCLEANUP(driver_8255);
+static int __init driver_8255_init_module(void)
+{
+ return comedi_driver_register(&driver_8255);
+}
+
+static void __exit driver_8255_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_8255);
+}
+
+module_init(driver_8255_init_module);
+module_exit(driver_8255_cleanup_module);
static void do_config(struct comedi_device *dev, struct comedi_subdevice *s);
@@ -457,3 +468,7 @@ static int dev_8255_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/acl7225b.c b/drivers/staging/comedi/drivers/acl7225b.c
index e20c3542c06..9def2250bb8 100644
--- a/drivers/staging/comedi/drivers/acl7225b.c
+++ b/drivers/staging/comedi/drivers/acl7225b.c
@@ -49,7 +49,18 @@ static struct comedi_driver driver_acl7225b = {
.offset = sizeof(struct boardtype),
};
-COMEDI_INITCLEANUP(driver_acl7225b);
+static int __init driver_acl7225b_init_module(void)
+{
+ return comedi_driver_register(&driver_acl7225b);
+}
+
+static void __exit driver_acl7225b_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_acl7225b);
+}
+
+module_init(driver_acl7225b_init_module);
+module_exit(driver_acl7225b_cleanup_module);
static int acl7225b_do_insn(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -150,3 +161,7 @@ static int acl7225b_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h b/drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h
index c3284eb0f0a..8ed19bcbf35 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h
+++ b/drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h
@@ -247,16 +247,14 @@ int i_pci_card_data(struct pcilst_struct *amcc,
/* build list of amcc cards in this system */
void v_pci_card_list_init(unsigned short pci_vendor, char display)
{
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
struct pcilst_struct *amcc, *last;
int i;
int i_Count = 0;
amcc_devices = NULL;
last = NULL;
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
+ for_each_pci_dev(pcidev) {
for (i_Count = 0; i_Count < 2; i_Count++) {
pci_vendor = i_ADDIDATADeviceID[i_Count];
if (pcidev->vendor == pci_vendor) {
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
index b18e81d8cf8..5ed4b9451f2 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.c
@@ -2541,7 +2541,43 @@ static struct comedi_driver driver_addi = {
.offset = sizeof(struct addi_board),
};
-COMEDI_PCI_INITCLEANUP(driver_addi, addi_apci_tbl);
+static int __devinit driver_addi_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_addi.driver_name);
+}
+
+static void __devexit driver_addi_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_addi_pci_driver = {
+ .id_table = addi_apci_tbl,
+ .probe = &driver_addi_pci_probe,
+ .remove = __devexit_p(&driver_addi_pci_remove)
+};
+
+static int __init driver_addi_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_addi);
+ if (retval < 0)
+ return retval;
+
+ driver_addi_pci_driver.name = (char *)driver_addi.driver_name;
+ return pci_register_driver(&driver_addi_pci_driver);
+}
+
+static void __exit driver_addi_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_addi_pci_driver);
+ comedi_driver_unregister(&driver_addi);
+}
+
+module_init(driver_addi_init_module);
+module_exit(driver_addi_cleanup_module);
/*
+----------------------------------------------------------------------------+
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c b/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
index bea329f44d8..e0213a903af 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
@@ -101,10 +101,10 @@ struct str_TimerMainHeader {
};
-typedef struct {
+struct str_AnalogOutputHeader {
unsigned short w_Nchannel;
unsigned char b_Resolution;
-} str_AnalogOutputHeader;
+};
struct str_AnalogInputHeader {
unsigned short w_Nchannel;
@@ -136,7 +136,7 @@ int i_EepromReadTimerHeader(unsigned short w_PCIBoardEepromAddress,
int i_EepromReadAnlogOutputHeader(unsigned short w_PCIBoardEepromAddress,
char *pc_PCIChipInformation, unsigned short w_Address,
- str_AnalogOutputHeader *s_Header);
+ struct str_AnalogOutputHeader *s_Header);
int i_EepromReadAnlogInputHeader(unsigned short w_PCIBoardEepromAddress,
char *pc_PCIChipInformation, unsigned short w_Address,
@@ -635,7 +635,7 @@ void v_EepromSendCommand76(unsigned int dw_Address, unsigned int dw_EepromComman
| Input Parameters : unsigned int dw_Address : PCI eeprom base address |
-| unsigned short w_offset : Offset of the adress to read |
+| unsigned short w_offset : Offset of the address to read |
| unsigned short * pw_Value : PCI eeprom 16 bit read value. |
@@ -811,7 +811,7 @@ int i_EepromReadMainHeader(unsigned short w_PCIBoardEepromAddress,
struct str_DigitalInputHeader s_DigitalInputHeader;
struct str_DigitalOutputHeader s_DigitalOutputHeader;
/* struct str_TimerMainHeader s_TimerMainHeader,s_WatchdogMainHeader; */
- str_AnalogOutputHeader s_AnalogOutputHeader;
+ struct str_AnalogOutputHeader s_AnalogOutputHeader;
struct str_AnalogInputHeader s_AnalogInputHeader;
/* Read size */
@@ -1081,7 +1081,7 @@ int i_EepromReadTimerHeader(unsigned short w_PCIBoardEepromAddress,
int i_EepromReadAnlogOutputHeader(unsigned short w_PCIBoardEepromAddress,
char *pc_PCIChipInformation, unsigned short w_Address,
- str_AnalogOutputHeader *s_Header)
+ struct str_AnalogOutputHeader *s_Header)
{
unsigned short w_Temp;
/* No of channels for 1st hard component */
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
index f93ddd4eb06..851f71bbf1b 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
@@ -1090,13 +1090,13 @@ int i_APCI3120_CyclicAnalogInput(int mode, struct comedi_device *dev,
* and put into into an array array used may be for differnet pages
*/
- /* DMA Start Adress Low */
+ /* DMA Start Address Low */
outw(APCI3120_ADD_ON_MWAR_LOW, devpriv->i_IobaseAddon + 0);
outw((devpriv->ul_DmaBufferHw[0] & 0xFFFF),
devpriv->i_IobaseAddon + 2);
/*************************/
- /* DMA Start Adress High */
+ /* DMA Start Address High */
/*************************/
outw(APCI3120_ADD_ON_MWAR_HIGH, devpriv->i_IobaseAddon + 0);
outw((devpriv->ul_DmaBufferHw[0] / 65536),
@@ -1733,11 +1733,11 @@ void v_APCI3120_InterruptDma(int irq, void *d)
var = devpriv->ul_DmaBufferHw[next_dma_buf];
high_word = var / 65536;
- /* DMA Start Adress Low */
+ /* DMA Start Address Low */
outw(APCI3120_ADD_ON_MWAR_LOW, devpriv->i_IobaseAddon + 0);
outw(low_word, devpriv->i_IobaseAddon + 2);
- /* DMA Start Adress High */
+ /* DMA Start Address High */
outw(APCI3120_ADD_ON_MWAR_HIGH, devpriv->i_IobaseAddon + 0);
outw(high_word, devpriv->i_IobaseAddon + 2);
diff --git a/drivers/staging/comedi/drivers/addi_apci_035.c b/drivers/staging/comedi/drivers/addi_apci_035.c
index 6dfcbe803f2..4c00df4bc15 100644
--- a/drivers/staging/comedi/drivers/addi_apci_035.c
+++ b/drivers/staging/comedi/drivers/addi_apci_035.c
@@ -5,3 +5,7 @@
#define ADDIDATA_DRIVER_NAME "addi_apci_035"
#include "addi-data/addi_common.c"
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
index 4722ec834f7..7831ce33b02 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
@@ -3,3 +3,7 @@
#define ADDIDATA_DRIVER_NAME "addi_apci_1032"
#include "addi-data/addi_common.c"
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index db3dafdcf69..bfd84f66d9c 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -3,3 +3,7 @@
#define ADDIDATA_DRIVER_NAME "addi_apci_1500"
#include "addi-data/addi_common.c"
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_1516.c b/drivers/staging/comedi/drivers/addi_apci_1516.c
index f591baff6a0..a12e2f42137 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1516.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1516.c
@@ -3,3 +3,7 @@
#define ADDIDATA_DRIVER_NAME "addi_apci_1516"
#include "addi-data/addi_common.c"
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index 6f5c923ac22..1b9d598fb6c 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -3,3 +3,7 @@
#define ADDIDATA_DRIVER_NAME "addi_apci_1564"
#include "addi-data/addi_common.c"
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_16xx.c b/drivers/staging/comedi/drivers/addi_apci_16xx.c
index 1d926add9e6..d54218d59c5 100644
--- a/drivers/staging/comedi/drivers/addi_apci_16xx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_16xx.c
@@ -3,3 +3,7 @@
#define ADDIDATA_DRIVER_NAME "addi_apci_16xx"
#include "addi-data/addi_common.c"
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_2016.c b/drivers/staging/comedi/drivers/addi_apci_2016.c
index 7266e412f0a..fa50c7bb7ad 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2016.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2016.c
@@ -3,3 +3,7 @@
#define ADDIDATA_DRIVER_NAME "addi_apci_2016"
#include "addi-data/addi_common.c"
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_2032.c b/drivers/staging/comedi/drivers/addi_apci_2032.c
index f67da94119e..073a8a56dbe 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2032.c
@@ -3,3 +3,7 @@
#define ADDIDATA_DRIVER_NAME "addi_apci_2032"
#include "addi-data/addi_common.c"
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_2200.c b/drivers/staging/comedi/drivers/addi_apci_2200.c
index bc7f7d65350..adfbb5d410e 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2200.c
@@ -3,3 +3,7 @@
#define ADDIDATA_DRIVER_NAME "addi_apci_2200"
#include "addi-data/addi_common.c"
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_3001.c b/drivers/staging/comedi/drivers/addi_apci_3001.c
index d86c4209cb9..00ac762965c 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3001.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3001.c
@@ -3,3 +3,7 @@
#define ADDIDATA_DRIVER_NAME "addi_apci_3001"
#include "addi-data/addi_common.c"
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c
index 0b22cf10415..c35515845cf 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3120.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3120.c
@@ -3,3 +3,7 @@
#define ADDIDATA_DRIVER_NAME "addi_apci_3120"
#include "addi-data/addi_common.c"
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index d8a01b154e3..dd2c1d3bc18 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -3,3 +3,7 @@
#define ADDIDATA_DRIVER_NAME "addi_apci_3501"
#include "addi-data/addi_common.c"
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index 942bc9e259a..03161c88eac 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -3,3 +3,7 @@
#define ADDIDATA_DRIVER_NAME "addi_apci_3xxx"
#include "addi-data/addi_common.c"
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index 712b9e0788b..073d0242c28 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -119,7 +119,43 @@ static struct comedi_driver driver_pci6208 = {
.detach = pci6208_detach,
};
-COMEDI_PCI_INITCLEANUP(driver_pci6208, pci6208_pci_table);
+static int __devinit driver_pci6208_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_pci6208.driver_name);
+}
+
+static void __devexit driver_pci6208_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_pci6208_pci_driver = {
+ .id_table = pci6208_pci_table,
+ .probe = &driver_pci6208_pci_probe,
+ .remove = __devexit_p(&driver_pci6208_pci_remove)
+};
+
+static int __init driver_pci6208_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_pci6208);
+ if (retval < 0)
+ return retval;
+
+ driver_pci6208_pci_driver.name = (char *)driver_pci6208.driver_name;
+ return pci_register_driver(&driver_pci6208_pci_driver);
+}
+
+static void __exit driver_pci6208_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_pci6208_pci_driver);
+ comedi_driver_unregister(&driver_pci6208);
+}
+
+module_init(driver_pci6208_init_module);
+module_exit(driver_pci6208_cleanup_module);
static int pci6208_find_device(struct comedi_device *dev, int bus, int slot);
static int
@@ -315,12 +351,10 @@ static int pci6208_ao_rinsn(struct comedi_device *dev,
static int pci6208_find_device(struct comedi_device *dev, int bus, int slot)
{
- struct pci_dev *pci_dev;
+ struct pci_dev *pci_dev = NULL;
int i;
- for (pci_dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pci_dev != NULL;
- pci_dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) {
+ for_each_pci_dev(pci_dev) {
if (pci_dev->vendor == PCI_VENDOR_ID_ADLINK) {
for (i = 0; i < ARRAY_SIZE(pci6208_boards); i++) {
if (pci6208_boards[i].dev_id ==
@@ -408,3 +442,7 @@ pci6208_pci_setup(struct pci_dev *pci_dev, unsigned long *io_base_ptr,
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adl_pci7230.c b/drivers/staging/comedi/drivers/adl_pci7230.c
index 24a82eb9d15..72a7258b5b9 100644
--- a/drivers/staging/comedi/drivers/adl_pci7230.c
+++ b/drivers/staging/comedi/drivers/adl_pci7230.c
@@ -90,7 +90,7 @@ static int adl_pci7230_do_insn_bits(struct comedi_device *dev,
static int adl_pci7230_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
struct comedi_subdevice *s;
int bus, slot;
@@ -106,10 +106,7 @@ static int adl_pci7230_attach(struct comedi_device *dev,
if (alloc_subdevices(dev, 2) < 0)
return -ENOMEM;
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
-
+ for_each_pci_dev(pcidev) {
if (pcidev->vendor == PCI_VENDOR_ID_ADLINK &&
pcidev->device == PCI_DEVICE_ID_PCI7230) {
if (bus || slot) {
@@ -203,4 +200,46 @@ static int adl_pci7230_di_insn_bits(struct comedi_device *dev,
return 2;
}
-COMEDI_PCI_INITCLEANUP(driver_adl_pci7230, adl_pci7230_pci_table);
+static int __devinit driver_adl_pci7230_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id
+ *ent)
+{
+ return comedi_pci_auto_config(dev, driver_adl_pci7230.driver_name);
+}
+
+static void __devexit driver_adl_pci7230_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_adl_pci7230_pci_driver = {
+ .id_table = adl_pci7230_pci_table,
+ .probe = &driver_adl_pci7230_pci_probe,
+ .remove = __devexit_p(&driver_adl_pci7230_pci_remove)
+};
+
+static int __init driver_adl_pci7230_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_adl_pci7230);
+ if (retval < 0)
+ return retval;
+
+ driver_adl_pci7230_pci_driver.name =
+ (char *)driver_adl_pci7230.driver_name;
+ return pci_register_driver(&driver_adl_pci7230_pci_driver);
+}
+
+static void __exit driver_adl_pci7230_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_adl_pci7230_pci_driver);
+ comedi_driver_unregister(&driver_adl_pci7230);
+}
+
+module_init(driver_adl_pci7230_init_module);
+module_exit(driver_adl_pci7230_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adl_pci7296.c b/drivers/staging/comedi/drivers/adl_pci7296.c
index 8602865ae6b..f28fe6bec05 100644
--- a/drivers/staging/comedi/drivers/adl_pci7296.c
+++ b/drivers/staging/comedi/drivers/adl_pci7296.c
@@ -77,7 +77,7 @@ static struct comedi_driver driver_adl_pci7296 = {
static int adl_pci7296_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
struct comedi_subdevice *s;
int bus, slot;
int ret;
@@ -94,10 +94,7 @@ static int adl_pci7296_attach(struct comedi_device *dev,
if (alloc_subdevices(dev, 4) < 0)
return -ENOMEM;
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
-
+ for_each_pci_dev(pcidev) {
if (pcidev->vendor == PCI_VENDOR_ID_ADLINK &&
pcidev->device == PCI_DEVICE_ID_PCI7296) {
if (bus || slot) {
@@ -177,4 +174,46 @@ static int adl_pci7296_detach(struct comedi_device *dev)
return 0;
}
-COMEDI_PCI_INITCLEANUP(driver_adl_pci7296, adl_pci7296_pci_table);
+static int __devinit driver_adl_pci7296_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id
+ *ent)
+{
+ return comedi_pci_auto_config(dev, driver_adl_pci7296.driver_name);
+}
+
+static void __devexit driver_adl_pci7296_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_adl_pci7296_pci_driver = {
+ .id_table = adl_pci7296_pci_table,
+ .probe = &driver_adl_pci7296_pci_probe,
+ .remove = __devexit_p(&driver_adl_pci7296_pci_remove)
+};
+
+static int __init driver_adl_pci7296_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_adl_pci7296);
+ if (retval < 0)
+ return retval;
+
+ driver_adl_pci7296_pci_driver.name =
+ (char *)driver_adl_pci7296.driver_name;
+ return pci_register_driver(&driver_adl_pci7296_pci_driver);
+}
+
+static void __exit driver_adl_pci7296_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_adl_pci7296_pci_driver);
+ comedi_driver_unregister(&driver_adl_pci7296);
+}
+
+module_init(driver_adl_pci7296_init_module);
+module_exit(driver_adl_pci7296_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adl_pci7432.c b/drivers/staging/comedi/drivers/adl_pci7432.c
index b5a9499e438..262da7b29b2 100644
--- a/drivers/staging/comedi/drivers/adl_pci7432.c
+++ b/drivers/staging/comedi/drivers/adl_pci7432.c
@@ -86,7 +86,7 @@ static int adl_pci7432_do_insn_bits(struct comedi_device *dev,
static int adl_pci7432_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
struct comedi_subdevice *s;
int bus, slot;
@@ -102,10 +102,7 @@ static int adl_pci7432_attach(struct comedi_device *dev,
if (alloc_subdevices(dev, 2) < 0)
return -ENOMEM;
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
-
+ for_each_pci_dev(pcidev) {
if (pcidev->vendor == PCI_VENDOR_ID_ADLINK &&
pcidev->device == PCI_DEVICE_ID_PCI7432) {
if (bus || slot) {
@@ -210,4 +207,46 @@ static int adl_pci7432_di_insn_bits(struct comedi_device *dev,
return 2;
}
-COMEDI_PCI_INITCLEANUP(driver_adl_pci7432, adl_pci7432_pci_table);
+static int __devinit driver_adl_pci7432_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id
+ *ent)
+{
+ return comedi_pci_auto_config(dev, driver_adl_pci7432.driver_name);
+}
+
+static void __devexit driver_adl_pci7432_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_adl_pci7432_pci_driver = {
+ .id_table = adl_pci7432_pci_table,
+ .probe = &driver_adl_pci7432_pci_probe,
+ .remove = __devexit_p(&driver_adl_pci7432_pci_remove)
+};
+
+static int __init driver_adl_pci7432_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_adl_pci7432);
+ if (retval < 0)
+ return retval;
+
+ driver_adl_pci7432_pci_driver.name =
+ (char *)driver_adl_pci7432.driver_name;
+ return pci_register_driver(&driver_adl_pci7432_pci_driver);
+}
+
+static void __exit driver_adl_pci7432_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_adl_pci7432_pci_driver);
+ comedi_driver_unregister(&driver_adl_pci7432);
+}
+
+module_init(driver_adl_pci7432_init_module);
+module_exit(driver_adl_pci7432_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adl_pci8164.c b/drivers/staging/comedi/drivers/adl_pci8164.c
index da256a1e0b4..767a594935c 100644
--- a/drivers/staging/comedi/drivers/adl_pci8164.c
+++ b/drivers/staging/comedi/drivers/adl_pci8164.c
@@ -125,7 +125,7 @@ static int adl_pci8164_insn_write_buf1(struct comedi_device *dev,
static int adl_pci8164_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
struct comedi_subdevice *s;
int bus, slot;
@@ -142,10 +142,7 @@ static int adl_pci8164_attach(struct comedi_device *dev,
if (alloc_subdevices(dev, 4) < 0)
return -ENOMEM;
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
-
+ for_each_pci_dev(pcidev) {
if (pcidev->vendor == PCI_VENDOR_ID_ADLINK &&
pcidev->device == PCI_DEVICE_ID_PCI8164) {
if (bus || slot) {
@@ -389,4 +386,46 @@ static int adl_pci8164_insn_write_buf1(struct comedi_device *dev,
return 2;
}
-COMEDI_PCI_INITCLEANUP(driver_adl_pci8164, adl_pci8164_pci_table);
+static int __devinit driver_adl_pci8164_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id
+ *ent)
+{
+ return comedi_pci_auto_config(dev, driver_adl_pci8164.driver_name);
+}
+
+static void __devexit driver_adl_pci8164_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_adl_pci8164_pci_driver = {
+ .id_table = adl_pci8164_pci_table,
+ .probe = &driver_adl_pci8164_pci_probe,
+ .remove = __devexit_p(&driver_adl_pci8164_pci_remove)
+};
+
+static int __init driver_adl_pci8164_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_adl_pci8164);
+ if (retval < 0)
+ return retval;
+
+ driver_adl_pci8164_pci_driver.name =
+ (char *)driver_adl_pci8164.driver_name;
+ return pci_register_driver(&driver_adl_pci8164_pci_driver);
+}
+
+static void __exit driver_adl_pci8164_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_adl_pci8164_pci_driver);
+ comedi_driver_unregister(&driver_adl_pci8164);
+}
+
+module_init(driver_adl_pci8164_init_module);
+module_exit(driver_adl_pci8164_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index 39d112b708e..b2a02b0f569 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -38,8 +38,8 @@ Supports:
- do_insn read/write
- ai_do_cmd mode with the following sources:
- - start_src TRIG_NOW
- - scan_begin_src TRIG_FOLLOW TRIG_TIMER TRIG_EXT
+ - start_src TRIG_NOW
+ - scan_begin_src TRIG_FOLLOW TRIG_TIMER TRIG_EXT
- convert_src TRIG_TIMER TRIG_EXT
- scan_end_src TRIG_COUNT
- stop_src TRIG_COUNT TRIG_NONE
@@ -68,8 +68,9 @@ CHANGELOG:
TODO:
- Really test implemented functionality.
- - Add support for the PCI-9111DG with a probe routine to identify the card type
- (perhaps with the help of the channel number readback of the A/D Data register).
+ - Add support for the PCI-9111DG with a probe routine to identify the card
+ type (perhaps with the help of the channel number readback of the A/D Data
+ register).
- Add external multiplexer support.
*/
@@ -83,12 +84,12 @@ TODO:
#include "comedi_pci.h"
#include "comedi_fc.h"
-#define PCI9111_DRIVER_NAME "adl_pci9111"
-#define PCI9111_HR_DEVICE_ID 0x9111
+#define PCI9111_DRIVER_NAME "adl_pci9111"
+#define PCI9111_HR_DEVICE_ID 0x9111
/* TODO: Add other pci9111 board id */
-#define PCI9111_IO_RANGE 0x0100
+#define PCI9111_IO_RANGE 0x0100
#define PCI9111_FIFO_HALF_SIZE 512
@@ -134,27 +135,29 @@ TODO:
/* IO address map */
-#define PCI9111_REGISTER_AD_FIFO_VALUE 0x00 /* AD Data stored in FIFO */
-#define PCI9111_REGISTER_DA_OUTPUT 0x00
-#define PCI9111_REGISTER_DIGITAL_IO 0x02
-#define PCI9111_REGISTER_EXTENDED_IO_PORTS 0x04
-#define PCI9111_REGISTER_AD_CHANNEL_CONTROL 0x06 /* Channel selection */
-#define PCI9111_REGISTER_AD_CHANNEL_READBACK 0x06
-#define PCI9111_REGISTER_INPUT_SIGNAL_RANGE 0x08
-#define PCI9111_REGISTER_RANGE_STATUS_READBACK 0x08
-#define PCI9111_REGISTER_TRIGGER_MODE_CONTROL 0x0A
-#define PCI9111_REGISTER_AD_MODE_INTERRUPT_READBACK 0x0A
-#define PCI9111_REGISTER_SOFTWARE_TRIGGER 0x0E
-#define PCI9111_REGISTER_INTERRUPT_CONTROL 0x0C
+#define PCI9111_REGISTER_AD_FIFO_VALUE 0x00 /* AD Data stored
+ in FIFO */
+#define PCI9111_REGISTER_DA_OUTPUT 0x00
+#define PCI9111_REGISTER_DIGITAL_IO 0x02
+#define PCI9111_REGISTER_EXTENDED_IO_PORTS 0x04
+#define PCI9111_REGISTER_AD_CHANNEL_CONTROL 0x06 /* Channel
+ selection */
+#define PCI9111_REGISTER_AD_CHANNEL_READBACK 0x06
+#define PCI9111_REGISTER_INPUT_SIGNAL_RANGE 0x08
+#define PCI9111_REGISTER_RANGE_STATUS_READBACK 0x08
+#define PCI9111_REGISTER_TRIGGER_MODE_CONTROL 0x0A
+#define PCI9111_REGISTER_AD_MODE_INTERRUPT_READBACK 0x0A
+#define PCI9111_REGISTER_SOFTWARE_TRIGGER 0x0E
+#define PCI9111_REGISTER_INTERRUPT_CONTROL 0x0C
#define PCI9111_REGISTER_8254_COUNTER_0 0x40
#define PCI9111_REGISTER_8254_COUNTER_1 0x42
-#define PCI9111_REGISTER_8254_COUNTER_2 0X44
+#define PCI9111_REGISTER_8254_COUNTER_2 0X44
#define PCI9111_REGISTER_8254_CONTROL 0x46
-#define PCI9111_REGISTER_INTERRUPT_CLEAR 0x48
+#define PCI9111_REGISTER_INTERRUPT_CLEAR 0x48
-#define PCI9111_TRIGGER_MASK 0x0F
-#define PCI9111_PTRG_OFF (0 << 3)
-#define PCI9111_PTRG_ON (1 << 3)
+#define PCI9111_TRIGGER_MASK 0x0F
+#define PCI9111_PTRG_OFF (0 << 3)
+#define PCI9111_PTRG_ON (1 << 3)
#define PCI9111_EITS_EXTERNAL (1 << 2)
#define PCI9111_EITS_INTERNAL (0 << 2)
#define PCI9111_TPST_SOFTWARE_TRIGGER (0 << 1)
@@ -164,9 +167,9 @@ TODO:
#define PCI9111_ISC0_SET_IRQ_ON_ENDING_OF_AD_CONVERSION (0 << 0)
#define PCI9111_ISC0_SET_IRQ_ON_FIFO_HALF_FULL (1 << 0)
-#define PCI9111_ISC1_SET_IRQ_ON_TIMER_TICK (0 << 1)
-#define PCI9111_ISC1_SET_IRQ_ON_EXT_TRG (1 << 1)
-#define PCI9111_FFEN_SET_FIFO_ENABLE (0 << 2)
+#define PCI9111_ISC1_SET_IRQ_ON_TIMER_TICK (0 << 1)
+#define PCI9111_ISC1_SET_IRQ_ON_EXT_TRG (1 << 1)
+#define PCI9111_FFEN_SET_FIFO_ENABLE (0 << 2)
#define PCI9111_FFEN_SET_FIFO_DISABLE (1 << 2)
#define PCI9111_CHANNEL_MASK 0x0F
@@ -177,7 +180,7 @@ TODO:
#define PCI9111_FIFO_FULL_MASK 0x40
#define PCI9111_AD_BUSY_MASK 0x80
-#define PCI9111_IO_BASE dev->iobase
+#define PCI9111_IO_BASE (dev->iobase)
/*
* Define inlined function
@@ -189,8 +192,9 @@ TODO:
#define pci9111_trigger_and_autoscan_set(flags) \
outb(flags, PCI9111_IO_BASE+PCI9111_REGISTER_TRIGGER_MODE_CONTROL)
-#define pci9111_interrupt_and_fifo_get() \
- ((inb(PCI9111_IO_BASE+PCI9111_REGISTER_AD_MODE_INTERRUPT_READBACK) >> 4) &0x03)
+#define pci9111_interrupt_and_fifo_get() \
+ ((inb(PCI9111_IO_BASE+PCI9111_REGISTER_AD_MODE_INTERRUPT_READBACK) >> 4) \
+ &0x03)
#define pci9111_interrupt_and_fifo_set(flags) \
outb(flags, PCI9111_IO_BASE+PCI9111_REGISTER_INTERRUPT_CONTROL)
@@ -201,45 +205,56 @@ TODO:
#define pci9111_software_trigger() \
outb(0, PCI9111_IO_BASE+PCI9111_REGISTER_SOFTWARE_TRIGGER)
-#define pci9111_fifo_reset() \
- outb(PCI9111_FFEN_SET_FIFO_ENABLE, PCI9111_IO_BASE+PCI9111_REGISTER_INTERRUPT_CONTROL); \
- outb(PCI9111_FFEN_SET_FIFO_DISABLE, PCI9111_IO_BASE+PCI9111_REGISTER_INTERRUPT_CONTROL); \
- outb(PCI9111_FFEN_SET_FIFO_ENABLE, PCI9111_IO_BASE+PCI9111_REGISTER_INTERRUPT_CONTROL)
+#define pci9111_fifo_reset() do { \
+ outb(PCI9111_FFEN_SET_FIFO_ENABLE, \
+ PCI9111_IO_BASE+PCI9111_REGISTER_INTERRUPT_CONTROL); \
+ outb(PCI9111_FFEN_SET_FIFO_DISABLE, \
+ PCI9111_IO_BASE+PCI9111_REGISTER_INTERRUPT_CONTROL); \
+ outb(PCI9111_FFEN_SET_FIFO_ENABLE, \
+ PCI9111_IO_BASE+PCI9111_REGISTER_INTERRUPT_CONTROL); \
+ } while (0)
#define pci9111_is_fifo_full() \
((inb(PCI9111_IO_BASE+PCI9111_REGISTER_RANGE_STATUS_READBACK)& \
- PCI9111_FIFO_FULL_MASK)==0)
+ PCI9111_FIFO_FULL_MASK) == 0)
#define pci9111_is_fifo_half_full() \
((inb(PCI9111_IO_BASE+PCI9111_REGISTER_RANGE_STATUS_READBACK)& \
- PCI9111_FIFO_HALF_FULL_MASK)==0)
+ PCI9111_FIFO_HALF_FULL_MASK) == 0)
#define pci9111_is_fifo_empty() \
((inb(PCI9111_IO_BASE+PCI9111_REGISTER_RANGE_STATUS_READBACK)& \
- PCI9111_FIFO_EMPTY_MASK)==0)
+ PCI9111_FIFO_EMPTY_MASK) == 0)
-#define pci9111_ai_channel_set(channel) \
- outb((channel)&PCI9111_CHANNEL_MASK, PCI9111_IO_BASE+PCI9111_REGISTER_AD_CHANNEL_CONTROL)
+#define pci9111_ai_channel_set(channel) \
+ outb((channel)&PCI9111_CHANNEL_MASK, \
+ PCI9111_IO_BASE+PCI9111_REGISTER_AD_CHANNEL_CONTROL)
-#define pci9111_ai_channel_get() \
- inb(PCI9111_IO_BASE+PCI9111_REGISTER_AD_CHANNEL_READBACK)&PCI9111_CHANNEL_MASK
+#define pci9111_ai_channel_get() \
+ (inb(PCI9111_IO_BASE+PCI9111_REGISTER_AD_CHANNEL_READBACK) \
+ &PCI9111_CHANNEL_MASK)
-#define pci9111_ai_range_set(range) \
- outb((range)&PCI9111_RANGE_MASK, PCI9111_IO_BASE+PCI9111_REGISTER_INPUT_SIGNAL_RANGE)
+#define pci9111_ai_range_set(range) \
+ outb((range)&PCI9111_RANGE_MASK, \
+ PCI9111_IO_BASE+PCI9111_REGISTER_INPUT_SIGNAL_RANGE)
-#define pci9111_ai_range_get() \
- inb(PCI9111_IO_BASE+PCI9111_REGISTER_RANGE_STATUS_READBACK)&PCI9111_RANGE_MASK
+#define pci9111_ai_range_get() \
+ (inb(PCI9111_IO_BASE+PCI9111_REGISTER_RANGE_STATUS_READBACK) \
+ &PCI9111_RANGE_MASK)
-#define pci9111_ai_get_data() \
- ((inw(PCI9111_IO_BASE+PCI9111_REGISTER_AD_FIFO_VALUE)>>4)&PCI9111_AI_RESOLUTION_MASK) \
- ^ PCI9111_AI_RESOLUTION_2_CMP_BIT
+#define pci9111_ai_get_data() \
+ (((inw(PCI9111_IO_BASE+PCI9111_REGISTER_AD_FIFO_VALUE)>>4) \
+ &PCI9111_AI_RESOLUTION_MASK) \
+ ^ PCI9111_AI_RESOLUTION_2_CMP_BIT)
-#define pci9111_hr_ai_get_data() \
- (inw(PCI9111_IO_BASE+PCI9111_REGISTER_AD_FIFO_VALUE) & PCI9111_HR_AI_RESOLUTION_MASK) \
- ^ PCI9111_HR_AI_RESOLUTION_2_CMP_BIT
+#define pci9111_hr_ai_get_data() \
+ ((inw(PCI9111_IO_BASE+PCI9111_REGISTER_AD_FIFO_VALUE) \
+ & PCI9111_HR_AI_RESOLUTION_MASK) \
+ ^ PCI9111_HR_AI_RESOLUTION_2_CMP_BIT)
-#define pci9111_ao_set_data(data) \
- outw(data&PCI9111_AO_RESOLUTION_MASK, PCI9111_IO_BASE+PCI9111_REGISTER_DA_OUTPUT)
+#define pci9111_ao_set_data(data) \
+ outw(data&PCI9111_AO_RESOLUTION_MASK, \
+ PCI9111_IO_BASE+PCI9111_REGISTER_DA_OUTPUT)
#define pci9111_di_get_bits() \
inw(PCI9111_IO_BASE+PCI9111_REGISTER_DIGITAL_IO)
@@ -284,12 +299,11 @@ static const struct comedi_lrange pci9111_hr_ai_range = {
};
static DEFINE_PCI_DEVICE_TABLE(pci9111_pci_table) = {
- {
- PCI_VENDOR_ID_ADLINK, PCI9111_HR_DEVICE_ID, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0},
- /* { PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, */
- {
- 0}
+ { PCI_VENDOR_ID_ADLINK, PCI9111_HR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, 0 },
+ /* { PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ * 0, 0, 0 }, */
+ { 0 }
};
MODULE_DEVICE_TABLE(pci, pci9111_pci_table);
@@ -337,7 +351,43 @@ static struct comedi_driver pci9111_driver = {
.detach = pci9111_detach,
};
-COMEDI_PCI_INITCLEANUP(pci9111_driver, pci9111_pci_table);
+static int __devinit pci9111_driver_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, pci9111_driver.driver_name);
+}
+
+static void __devexit pci9111_driver_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver pci9111_driver_pci_driver = {
+ .id_table = pci9111_pci_table,
+ .probe = &pci9111_driver_pci_probe,
+ .remove = __devexit_p(&pci9111_driver_pci_remove)
+};
+
+static int __init pci9111_driver_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&pci9111_driver);
+ if (retval < 0)
+ return retval;
+
+ pci9111_driver_pci_driver.name = (char *)pci9111_driver.driver_name;
+ return pci_register_driver(&pci9111_driver_pci_driver);
+}
+
+static void __exit pci9111_driver_cleanup_module(void)
+{
+ pci_unregister_driver(&pci9111_driver_pci_driver);
+ comedi_driver_unregister(&pci9111_driver);
+}
+
+module_init(pci9111_driver_init_module);
+module_exit(pci9111_driver_cleanup_module);
/* Private data structure */
@@ -345,7 +395,8 @@ struct pci9111_private_data {
struct pci_dev *pci_device;
unsigned long io_range; /* PCI6503 io range */
- unsigned long lcr_io_base; /* Local configuration register base address */
+ unsigned long lcr_io_base; /* Local configuration register base
+ * address */
unsigned long lcr_io_range;
int stop_counter;
@@ -358,7 +409,8 @@ struct pci9111_private_data {
int ao_readback; /* Last written analog output data */
- unsigned int timer_divisor_1; /* Divisor values for the 8254 timer pacer */
+ unsigned int timer_divisor_1; /* Divisor values for the 8254 timer
+ * pacer */
unsigned int timer_divisor_2;
int is_valid; /* Is device valid */
@@ -366,7 +418,7 @@ struct pci9111_private_data {
short ai_bounce_buffer[2 * PCI9111_FIFO_HALF_SIZE];
};
-#define dev_private ((struct pci9111_private_data *)dev->private)
+#define dev_private ((struct pci9111_private_data *)dev->private)
/* ------------------------------------------------------------------ */
/* PLX9050 SECTION */
@@ -548,10 +600,12 @@ static int pci9111_ai_cancel(struct comedi_device *dev,
/* Test analog input command */
-#define pci9111_check_trigger_src(src, flags) \
- tmp = src; \
- src &= flags; \
- if (!src || tmp != src) error++
+#define pci9111_check_trigger_src(src, flags) do { \
+ tmp = src; \
+ src &= flags; \
+ if (!src || tmp != src) \
+ error++; \
+ } while (false);
static int
pci9111_ai_do_cmd_test(struct comedi_device *dev,
@@ -575,7 +629,8 @@ pci9111_ai_do_cmd_test(struct comedi_device *dev,
if (error)
return 1;
- /* step 2 : make sure trigger sources are unique and mutually compatible */
+ /* step 2 : make sure trigger sources are unique and mutually
+ * compatible */
if (cmd->start_src != TRIG_NOW)
error++;
@@ -637,7 +692,8 @@ pci9111_ai_do_cmd_test(struct comedi_device *dev,
cmd->scan_begin_arg = board->ai_acquisition_period_min_ns;
error++;
}
- if ((cmd->scan_begin_src == TRIG_FOLLOW) && (cmd->scan_begin_arg != 0)) {
+ if ((cmd->scan_begin_src == TRIG_FOLLOW)
+ && (cmd->scan_begin_arg != 0)) {
cmd->scan_begin_arg = 0;
error++;
}
@@ -1216,7 +1272,7 @@ static int pci9111_attach(struct comedi_device *dev,
{
struct comedi_subdevice *subdevice;
unsigned long io_base, io_range, lcr_io_base, lcr_io_range;
- struct pci_dev *pci_device;
+ struct pci_dev *pci_device = NULL;
int error, i;
const struct pci9111_board *board;
@@ -1226,17 +1282,17 @@ static int pci9111_attach(struct comedi_device *dev,
printk("comedi%d: " PCI9111_DRIVER_NAME " driver\n", dev->minor);
- for (pci_device = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pci_device != NULL;
- pci_device = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_device)) {
+ for_each_pci_dev(pci_device) {
if (pci_device->vendor == PCI_VENDOR_ID_ADLINK) {
for (i = 0; i < pci9111_board_nbr; i++) {
if (pci9111_boards[i].device_id ==
pci_device->device) {
- /* was a particular bus/slot requested? */
+ /* was a particular bus/slot
+ * requested? */
if ((it->options[0] != 0)
|| (it->options[1] != 0)) {
- /* are we on the wrong bus/slot? */
+ /* are we on the wrong
+ * bus/slot? */
if (pci_device->bus->number !=
it->options[0]
||
@@ -1272,7 +1328,8 @@ found:
/* TODO: Warn about non-tested boards. */
- /* Read local configuration register base address [PCI_BASE_ADDRESS #1]. */
+ /* Read local configuration register base address
+ * [PCI_BASE_ADDRESS #1]. */
lcr_io_base = pci_resource_start(pci_device, 1);
lcr_io_range = pci_resource_len(pci_device, 1);
@@ -1399,3 +1456,7 @@ static int pci9111_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index ccef549778e..b0e39cb7477 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -289,7 +289,43 @@ static struct comedi_driver driver_pci9118 = {
.offset = sizeof(struct boardtype),
};
-COMEDI_PCI_INITCLEANUP(driver_pci9118, pci9118_pci_table);
+static int __devinit driver_pci9118_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_pci9118.driver_name);
+}
+
+static void __devexit driver_pci9118_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_pci9118_pci_driver = {
+ .id_table = pci9118_pci_table,
+ .probe = &driver_pci9118_pci_probe,
+ .remove = __devexit_p(&driver_pci9118_pci_remove)
+};
+
+static int __init driver_pci9118_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_pci9118);
+ if (retval < 0)
+ return retval;
+
+ driver_pci9118_pci_driver.name = (char *)driver_pci9118.driver_name;
+ return pci_register_driver(&driver_pci9118_pci_driver);
+}
+
+static void __exit driver_pci9118_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_pci9118_pci_driver);
+ comedi_driver_unregister(&driver_pci9118);
+}
+
+module_init(driver_pci9118_init_module);
+module_exit(driver_pci9118_cleanup_module);
struct pci9118_private {
unsigned long iobase_a; /* base+size for AMCC chip */
@@ -2432,3 +2468,7 @@ static int pci9118_detach(struct comedi_device *dev)
/*
==============================================================================
*/
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adq12b.c b/drivers/staging/comedi/drivers/adq12b.c
index f3ba645bf63..4b470000b69 100644
--- a/drivers/staging/comedi/drivers/adq12b.c
+++ b/drivers/staging/comedi/drivers/adq12b.c
@@ -402,4 +402,19 @@ static int adq12b_do_insn_bits(struct comedi_device *dev,
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_INITCLEANUP(driver_adq12b);
+static int __init driver_adq12b_init_module(void)
+{
+ return comedi_driver_register(&driver_adq12b);
+}
+
+static void __exit driver_adq12b_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_adq12b);
+}
+
+module_init(driver_adq12b_init_module);
+module_exit(driver_adq12b_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 67c4f11a36a..bdd6954cad9 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -19,7 +19,7 @@
/*
Driver: adv_pci1710
Description: Advantech PCI-1710, PCI-1710HG, PCI-1711, PCI-1713,
- Advantech PCI-1720, PCI-1731
+ Advantech PCI-1720, PCI-1731
Author: Michal Dobes <dobes@tesnet.cz>
Devices: [Advantech] PCI-1710 (adv_pci1710), PCI-1710HG (pci1710hg),
PCI-1711 (adv_pci1710), PCI-1713, PCI-1720,
@@ -37,8 +37,8 @@ PCI driver.
Configuration options:
[0] - PCI bus of device (optional)
[1] - PCI slot of device (optional)
- If bus/slot is not specified, the first available PCI
- device will be used.
+ If bus/slot is not specified, the first available PCI
+ device will be used.
*/
#include <linux/interrupt.h>
@@ -50,7 +50,9 @@ Configuration options:
#include "8253.h"
#include "amcc_s5933.h"
-#define PCI171x_PARANOIDCHECK /* if defined, then is used code which control correct channel number on every 12 bit sample */
+#define PCI171x_PARANOIDCHECK /* if defined, then is used code which control
+ * correct channel number on every 12 bit
+ * sample */
#undef PCI171X_EXTDEBUG
@@ -70,8 +72,8 @@ Configuration options:
#define TYPE_PCI1713 2
#define TYPE_PCI1720 3
-#define IORANGE_171x 32
-#define IORANGE_1720 16
+#define IORANGE_171x 32
+#define IORANGE_1720 16
#define PCI171x_AD_DATA 0 /* R: A/D data */
#define PCI171x_SOFTTRG 0 /* W: soft trigger for A/D */
@@ -91,13 +93,15 @@ Configuration options:
#define PCI171x_CNT2 28 /* R/W: 8254 counter 2 */
#define PCI171x_CNTCTRL 30 /* W: 8254 counter control */
-/* upper bits from status register (PCI171x_STATUS) (lower is same woth control reg) */
+/* upper bits from status register (PCI171x_STATUS) (lower is same with control
+ * reg) */
#define Status_FE 0x0100 /* 1=FIFO is empty */
#define Status_FH 0x0200 /* 1=FIFO is half full */
#define Status_FF 0x0400 /* 1=FIFO is full, fatal error */
#define Status_IRQ 0x0800 /* 1=IRQ occured */
/* bits from control register (PCI171x_CONTROL) */
-#define Control_CNT0 0x0040 /* 1=CNT0 have external source, 0=have internal 100kHz source */
+#define Control_CNT0 0x0040 /* 1=CNT0 have external source,
+ * 0=have internal 100kHz source */
#define Control_ONEFH 0x0020 /* 1=IRQ on FIFO is half full, 0=every sample */
#define Control_IRQEN 0x0010 /* 1=enable IRQ */
#define Control_GATE 0x0008 /* 1=enable external trigger GATE (8254?) */
@@ -112,7 +116,8 @@ Configuration options:
#define Counter_RW0 0x0010 /* RW0/RW1 select read/write mode */
#define Counter_RW1 0x0020
#define Counter_SC0 0x0040 /* Select Counter. Only 00 or 11 may */
-#define Counter_SC1 0x0080 /* be used, 00 for CNT0, 11 for read-back command */
+#define Counter_SC1 0x0080 /* be used, 00 for CNT0,
+ * 11 for read-back command */
#define PCI1720_DA0 0 /* W: D/A register 0 */
#define PCI1720_DA1 2 /* W: D/A register 1 */
@@ -138,8 +143,8 @@ static const struct comedi_lrange range_pci1710_3 = { 9, {
}
};
-static const char range_codes_pci1710_3[] =
- { 0x00, 0x01, 0x02, 0x03, 0x04, 0x10, 0x11, 0x12, 0x13 };
+static const char range_codes_pci1710_3[] = { 0x00, 0x01, 0x02, 0x03, 0x04,
+ 0x10, 0x11, 0x12, 0x13 };
static const struct comedi_lrange range_pci1710hg = { 12, {
BIP_RANGE(5),
@@ -157,10 +162,9 @@ static const struct comedi_lrange range_pci1710hg = { 12, {
}
};
-static const char range_codes_pci1710hg[] =
- { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12,
- 0x13
-};
+static const char range_codes_pci1710hg[] = { 0x00, 0x01, 0x02, 0x03, 0x04,
+ 0x05, 0x06, 0x07, 0x10, 0x11,
+ 0x12, 0x13 };
static const struct comedi_lrange range_pci17x1 = { 5, {
BIP_RANGE(10),
@@ -301,7 +305,8 @@ struct pci1710_private {
unsigned int ai_timer1; /* timers */
unsigned int ai_timer2;
short ao_data[4]; /* data output buffer */
- unsigned int cnt0_write_wait; /* after a write, wait for update of the internal state */
+ unsigned int cnt0_write_wait; /* after a write, wait for update of the
+ * internal state */
};
#define devpriv ((struct pci1710_private *)dev->private)
@@ -324,7 +329,9 @@ static int pci1710_reset(struct comedi_device *dev);
static int pci171x_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s);
-static const unsigned int muxonechan[] = { 0x0000, 0x0101, 0x0202, 0x0303, 0x0404, 0x0505, 0x0606, 0x0707, /* used for gain list programming */
+/* used for gain list programming */
+static const unsigned int muxonechan[] = {
+ 0x0000, 0x0101, 0x0202, 0x0303, 0x0404, 0x0505, 0x0606, 0x0707,
0x0808, 0x0909, 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f,
0x1010, 0x1111, 0x1212, 0x1313, 0x1414, 0x1515, 0x1616, 0x1717,
0x1818, 0x1919, 0x1a1a, 0x1b1b, 0x1c1c, 0x1d1d, 0x1e1e, 0x1f1f
@@ -774,7 +781,8 @@ static void interrupt_pci1710_half_fifo(void *d)
}
if (!devpriv->neverending_ai)
- if (devpriv->ai_act_scan >= devpriv->ai_scans) { /* all data sampled */
+ if (devpriv->ai_act_scan >= devpriv->ai_scans) { /* all data
+ sampled */
pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
comedi_event(dev, s);
@@ -1559,7 +1567,8 @@ static int pci1710_attach(struct comedi_device *dev,
s->maxdata = 1;
s->len_chanlist = this_board->n_dochan;
s->range_table = &range_digital;
- s->io_bits = (1 << this_board->n_dochan) - 1; /* all bits output */
+ /* all bits output */
+ s->io_bits = (1 << this_board->n_dochan) - 1;
s->state = 0;
s->insn_bits = pci171x_insn_bits_do;
subdev++;
@@ -1609,7 +1618,47 @@ static int pci1710_detach(struct comedi_device *dev)
/*
==============================================================================
*/
-COMEDI_PCI_INITCLEANUP(driver_pci1710, pci1710_pci_table);
+static int __devinit driver_pci1710_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_pci1710.driver_name);
+}
+
+static void __devexit driver_pci1710_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_pci1710_pci_driver = {
+ .id_table = pci1710_pci_table,
+ .probe = &driver_pci1710_pci_probe,
+ .remove = __devexit_p(&driver_pci1710_pci_remove)
+};
+
+static int __init driver_pci1710_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_pci1710);
+ if (retval < 0)
+ return retval;
+
+ driver_pci1710_pci_driver.name = (char *)driver_pci1710.driver_name;
+ return pci_register_driver(&driver_pci1710_pci_driver);
+}
+
+static void __exit driver_pci1710_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_pci1710_pci_driver);
+ comedi_driver_unregister(&driver_pci1710);
+}
+
+module_init(driver_pci1710_init_module);
+module_exit(driver_pci1710_cleanup_module);
/*
==============================================================================
*/
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index 9fe8fcc7f1d..b133bb84c4f 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -496,4 +496,44 @@ static int pci1723_detach(struct comedi_device *dev)
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_PCI_INITCLEANUP(driver_pci1723, pci1723_pci_table);
+static int __devinit driver_pci1723_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_pci1723.driver_name);
+}
+
+static void __devexit driver_pci1723_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_pci1723_pci_driver = {
+ .id_table = pci1723_pci_table,
+ .probe = &driver_pci1723_pci_probe,
+ .remove = __devexit_p(&driver_pci1723_pci_remove)
+};
+
+static int __init driver_pci1723_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_pci1723);
+ if (retval < 0)
+ return retval;
+
+ driver_pci1723_pci_driver.name = (char *)driver_pci1723.driver_name;
+ return pci_register_driver(&driver_pci1723_pci_driver);
+}
+
+static void __exit driver_pci1723_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_pci1723_pci_driver);
+ comedi_driver_unregister(&driver_pci1723);
+}
+
+module_init(driver_pci1723_init_module);
+module_exit(driver_pci1723_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index e424a0c7d34..d018bb4e289 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -8,8 +8,8 @@
/*
Driver: adv_pci_dio
Description: Advantech PCI-1730, PCI-1733, PCI-1734, PCI-1735U,
- PCI-1736UP, PCI-1750, PCI-1751, PCI-1752, PCI-1753/E,
- PCI-1754, PCI-1756, PCI-1762
+ PCI-1736UP, PCI-1750, PCI-1751, PCI-1752, PCI-1753/E,
+ PCI-1754, PCI-1756, PCI-1762
Author: Michal Dobes <dobes@tesnet.cz>
Devices: [Advantech] PCI-1730 (adv_pci_dio), PCI-1733,
PCI-1734, PCI-1735U, PCI-1736UP, PCI-1750,
@@ -24,8 +24,8 @@ This driver supports now only insn interface for DI/DO/DIO.
Configuration options:
[0] - PCI bus of device (optional)
[1] - PCI slot of device (optional)
- If bus/slot is not specified, the first available PCI
- device will be used.
+ If bus/slot is not specified, the first available PCI
+ device will be used.
*/
@@ -67,9 +67,12 @@ enum hw_io_access {
#define MAX_DI_SUBDEVS 2 /* max number of DI subdevices per card */
#define MAX_DO_SUBDEVS 2 /* max number of DO subdevices per card */
-#define MAX_DIO_SUBDEVG 2 /* max number of DIO subdevices group per card */
-#define MAX_8254_SUBDEVS 1 /* max number of 8254 counter subdevs per card */
- /* (could be more than one 8254 per subdevice) */
+#define MAX_DIO_SUBDEVG 2 /* max number of DIO subdevices group per
+ * card */
+#define MAX_8254_SUBDEVS 1 /* max number of 8254 counter subdevs per
+ * card */
+ /* (could be more than one 8254 per
+ * subdevice) */
#define SIZE_8254 4 /* 8254 IO space length */
#define SIZE_8255 4 /* 8255 IO space length */
@@ -84,7 +87,8 @@ enum hw_io_access {
#define PCI1730_DO 2 /* W: Digital output 0-15 */
#define PCI1733_IDI 0 /* R: Isolated digital input 0-31 */
#define PCI1730_3_INT_EN 0x08 /* R/W: enable/disable interrupts */
-#define PCI1730_3_INT_RF 0x0c /* R/W: set falling/raising edge for interrupts */
+#define PCI1730_3_INT_RF 0x0c /* R/W: set falling/raising edge for
+ * interrupts */
#define PCI1730_3_INT_CLR 0x10 /* R/W: clear interrupts */
#define PCI1734_IDO 0 /* W: Isolated digital output 0-31 */
#define PCI173x_BOARDID 4 /* R: Board I/D switch for 1730/3/4 */
@@ -99,7 +103,8 @@ enum hw_io_access {
#define PCI1736_IDI 0 /* R: Isolated digital input 0-15 */
#define PCI1736_IDO 0 /* W: Isolated digital output 0-15 */
#define PCI1736_3_INT_EN 0x08 /* R/W: enable/disable interrupts */
-#define PCI1736_3_INT_RF 0x0c /* R/W: set falling/raising edge for interrupts */
+#define PCI1736_3_INT_RF 0x0c /* R/W: set falling/raising edge for
+ * interrupts */
#define PCI1736_3_INT_CLR 0x10 /* R/W: clear interrupts */
#define PCI1736_BOARDID 4 /* R: Board I/D switch for 1736UP */
#define PCI1736_MAINREG 0 /* Normal register (2) doesn't work */
@@ -161,37 +166,66 @@ enum hw_io_access {
#define INTCSR3 0x3b
/* PCI-1760 mailbox commands */
-#define CMD_ClearIMB2 0x00 /* Clear IMB2 status and return actaul DI status in IMB3 */
+#define CMD_ClearIMB2 0x00 /* Clear IMB2 status and return actual
+ * DI status in IMB3 */
#define CMD_SetRelaysOutput 0x01 /* Set relay output from OMB0 */
#define CMD_GetRelaysStatus 0x02 /* Get relay status to IMB0 */
-#define CMD_ReadCurrentStatus 0x07 /* Read the current status of the register in OMB0, result in IMB0 */
-#define CMD_ReadFirmwareVersion 0x0e /* Read the firmware ver., result in IMB1.IMB0 */
-#define CMD_ReadHardwareVersion 0x0f /* Read the hardware ver., result in IMB1.IMB0 */
-#define CMD_EnableIDIFilters 0x20 /* Enable IDI filters based on bits in OMB0 */
-#define CMD_EnableIDIPatternMatch 0x21 /* Enable IDI pattern match based on bits in OMB0 */
-#define CMD_SetIDIPatternMatch 0x22 /* Enable IDI pattern match based on bits in OMB0 */
-#define CMD_EnableIDICounters 0x28 /* Enable IDI counters based on bits in OMB0 */
-#define CMD_ResetIDICounters 0x29 /* Reset IDI counters based on bits in OMB0 to its reset values */
-#define CMD_OverflowIDICounters 0x2a /* Enable IDI counters overflow interrupts based on bits in OMB0 */
-#define CMD_MatchIntIDICounters 0x2b /* Enable IDI counters match value interrupts based on bits in OMB0 */
-#define CMD_EdgeIDICounters 0x2c /* Set IDI up counters count edge (bit=0 - rising, =1 - falling) */
-#define CMD_GetIDICntCurValue 0x2f /* Read IDI{OMB0} up counter current value */
-#define CMD_SetIDI0CntResetValue 0x40 /* Set IDI0 Counter Reset Value 256*OMB1+OMB0 */
-#define CMD_SetIDI1CntResetValue 0x41 /* Set IDI1 Counter Reset Value 256*OMB1+OMB0 */
-#define CMD_SetIDI2CntResetValue 0x42 /* Set IDI2 Counter Reset Value 256*OMB1+OMB0 */
-#define CMD_SetIDI3CntResetValue 0x43 /* Set IDI3 Counter Reset Value 256*OMB1+OMB0 */
-#define CMD_SetIDI4CntResetValue 0x44 /* Set IDI4 Counter Reset Value 256*OMB1+OMB0 */
-#define CMD_SetIDI5CntResetValue 0x45 /* Set IDI5 Counter Reset Value 256*OMB1+OMB0 */
-#define CMD_SetIDI6CntResetValue 0x46 /* Set IDI6 Counter Reset Value 256*OMB1+OMB0 */
-#define CMD_SetIDI7CntResetValue 0x47 /* Set IDI7 Counter Reset Value 256*OMB1+OMB0 */
-#define CMD_SetIDI0CntMatchValue 0x48 /* Set IDI0 Counter Match Value 256*OMB1+OMB0 */
-#define CMD_SetIDI1CntMatchValue 0x49 /* Set IDI1 Counter Match Value 256*OMB1+OMB0 */
-#define CMD_SetIDI2CntMatchValue 0x4a /* Set IDI2 Counter Match Value 256*OMB1+OMB0 */
-#define CMD_SetIDI3CntMatchValue 0x4b /* Set IDI3 Counter Match Value 256*OMB1+OMB0 */
-#define CMD_SetIDI4CntMatchValue 0x4c /* Set IDI4 Counter Match Value 256*OMB1+OMB0 */
-#define CMD_SetIDI5CntMatchValue 0x4d /* Set IDI5 Counter Match Value 256*OMB1+OMB0 */
-#define CMD_SetIDI6CntMatchValue 0x4e /* Set IDI6 Counter Match Value 256*OMB1+OMB0 */
-#define CMD_SetIDI7CntMatchValue 0x4f /* Set IDI7 Counter Match Value 256*OMB1+OMB0 */
+#define CMD_ReadCurrentStatus 0x07 /* Read the current status of the
+ * register in OMB0, result in IMB0 */
+#define CMD_ReadFirmwareVersion 0x0e /* Read the firmware ver., result in
+ * IMB1.IMB0 */
+#define CMD_ReadHardwareVersion 0x0f /* Read the hardware ver., result in
+ * IMB1.IMB0 */
+#define CMD_EnableIDIFilters 0x20 /* Enable IDI filters based on bits in
+ * OMB0 */
+#define CMD_EnableIDIPatternMatch 0x21 /* Enable IDI pattern match based on
+ * bits in OMB0 */
+#define CMD_SetIDIPatternMatch 0x22 /* Enable IDI pattern match based on
+ * bits in OMB0 */
+#define CMD_EnableIDICounters 0x28 /* Enable IDI counters based on bits in
+ * OMB0 */
+#define CMD_ResetIDICounters 0x29 /* Reset IDI counters based on bits in
+ * OMB0 to its reset values */
+#define CMD_OverflowIDICounters 0x2a /* Enable IDI counters overflow
+ * interrupts based on bits in OMB0 */
+#define CMD_MatchIntIDICounters 0x2b /* Enable IDI counters match value
+ * interrupts based on bits in OMB0 */
+#define CMD_EdgeIDICounters 0x2c /* Set IDI up counters count edge (bit=0
+ * - rising, =1 - falling) */
+#define CMD_GetIDICntCurValue 0x2f /* Read IDI{OMB0} up counter current
+ * value */
+#define CMD_SetIDI0CntResetValue 0x40 /* Set IDI0 Counter Reset Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI1CntResetValue 0x41 /* Set IDI1 Counter Reset Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI2CntResetValue 0x42 /* Set IDI2 Counter Reset Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI3CntResetValue 0x43 /* Set IDI3 Counter Reset Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI4CntResetValue 0x44 /* Set IDI4 Counter Reset Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI5CntResetValue 0x45 /* Set IDI5 Counter Reset Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI6CntResetValue 0x46 /* Set IDI6 Counter Reset Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI7CntResetValue 0x47 /* Set IDI7 Counter Reset Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI0CntMatchValue 0x48 /* Set IDI0 Counter Match Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI1CntMatchValue 0x49 /* Set IDI1 Counter Match Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI2CntMatchValue 0x4a /* Set IDI2 Counter Match Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI3CntMatchValue 0x4b /* Set IDI3 Counter Match Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI4CntMatchValue 0x4c /* Set IDI4 Counter Match Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI5CntMatchValue 0x4d /* Set IDI5 Counter Match Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI6CntMatchValue 0x4e /* Set IDI6 Counter Match Value
+ * 256*OMB1+OMB0 */
+#define CMD_SetIDI7CntMatchValue 0x4f /* Set IDI7 Counter Match Value
+ * 256*OMB1+OMB0 */
#define OMBCMD_RETRY 0x03 /* 3 times try request before error */
@@ -244,115 +278,115 @@ MODULE_DEVICE_TABLE(pci, pci_dio_pci_table);
static const struct dio_boardtype boardtypes[] = {
{"pci1730", PCI_VENDOR_ID_ADVANTECH, 0x1730, PCIDIO_MAINREG,
TYPE_PCI1730,
- {{16, PCI1730_DI, 2, 0}, {16, PCI1730_IDI, 2, 0}},
- {{16, PCI1730_DO, 2, 0}, {16, PCI1730_IDO, 2, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
+ { {16, PCI1730_DI, 2, 0}, {16, PCI1730_IDI, 2, 0} },
+ { {16, PCI1730_DO, 2, 0}, {16, PCI1730_IDO, 2, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
{4, PCI173x_BOARDID, 1, SDF_INTERNAL},
- {{0, 0, 0, 0}},
+ { {0, 0, 0, 0} },
IO_8b},
{"pci1733", PCI_VENDOR_ID_ADVANTECH, 0x1733, PCIDIO_MAINREG,
TYPE_PCI1733,
- {{0, 0, 0, 0}, {32, PCI1733_IDI, 4, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
+ { {0, 0, 0, 0}, {32, PCI1733_IDI, 4, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
{4, PCI173x_BOARDID, 1, SDF_INTERNAL},
- {{0, 0, 0, 0}},
+ { {0, 0, 0, 0} },
IO_8b},
{"pci1734", PCI_VENDOR_ID_ADVANTECH, 0x1734, PCIDIO_MAINREG,
TYPE_PCI1734,
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
- {{0, 0, 0, 0}, {32, PCI1734_IDO, 4, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {0, 0, 0, 0}, {32, PCI1734_IDO, 4, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
{4, PCI173x_BOARDID, 1, SDF_INTERNAL},
- {{0, 0, 0, 0}},
+ { {0, 0, 0, 0} },
IO_8b},
{"pci1735", PCI_VENDOR_ID_ADVANTECH, 0x1735, PCIDIO_MAINREG,
TYPE_PCI1735,
- {{32, PCI1735_DI, 4, 0}, {0, 0, 0, 0}},
- {{32, PCI1735_DO, 4, 0}, {0, 0, 0, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
+ { {32, PCI1735_DI, 4, 0}, {0, 0, 0, 0} },
+ { {32, PCI1735_DO, 4, 0}, {0, 0, 0, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
{ 4, PCI1735_BOARDID, 1, SDF_INTERNAL},
- {{3, PCI1735_C8254, 1, 0}},
+ { {3, PCI1735_C8254, 1, 0} },
IO_8b},
{"pci1736", PCI_VENDOR_ID_ADVANTECH, 0x1736, PCI1736_MAINREG,
TYPE_PCI1736,
- {{0, 0, 0, 0}, {16, PCI1736_IDI, 2, 0}},
- {{0, 0, 0, 0}, {16, PCI1736_IDO, 2, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
+ { {0, 0, 0, 0}, {16, PCI1736_IDI, 2, 0} },
+ { {0, 0, 0, 0}, {16, PCI1736_IDO, 2, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
{4, PCI1736_BOARDID, 1, SDF_INTERNAL},
- {{0, 0, 0, 0}},
+ { {0, 0, 0, 0} },
IO_8b},
{"pci1750", PCI_VENDOR_ID_ADVANTECH, 0x1750, PCIDIO_MAINREG,
TYPE_PCI1750,
- {{0, 0, 0, 0}, {16, PCI1750_IDI, 2, 0}},
- {{0, 0, 0, 0}, {16, PCI1750_IDO, 2, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
+ { {0, 0, 0, 0}, {16, PCI1750_IDI, 2, 0} },
+ { {0, 0, 0, 0}, {16, PCI1750_IDO, 2, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
{0, 0, 0, 0},
- {{0, 0, 0, 0}},
+ { {0, 0, 0, 0} },
IO_8b},
{"pci1751", PCI_VENDOR_ID_ADVANTECH, 0x1751, PCIDIO_MAINREG,
TYPE_PCI1751,
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
- {{48, PCI1751_DIO, 2, 0}, {0, 0, 0, 0}},
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {48, PCI1751_DIO, 2, 0}, {0, 0, 0, 0} },
{0, 0, 0, 0},
- {{0, 0, 0, 0}},
+ { {0, 0, 0, 0} },
IO_8b},
{"pci1752", PCI_VENDOR_ID_ADVANTECH, 0x1752, PCIDIO_MAINREG,
TYPE_PCI1752,
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
- {{32, PCI1752_IDO, 2, 0}, {32, PCI1752_IDO2, 2, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {32, PCI1752_IDO, 2, 0}, {32, PCI1752_IDO2, 2, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
{4, PCI175x_BOARDID, 1, SDF_INTERNAL},
- {{0, 0, 0, 0}},
+ { {0, 0, 0, 0} },
IO_16b},
{"pci1753", PCI_VENDOR_ID_ADVANTECH, 0x1753, PCIDIO_MAINREG,
TYPE_PCI1753,
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
- {{96, PCI1753_DIO, 4, 0}, {0, 0, 0, 0}},
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {96, PCI1753_DIO, 4, 0}, {0, 0, 0, 0} },
{0, 0, 0, 0},
- {{0, 0, 0, 0}},
+ { {0, 0, 0, 0} },
IO_8b},
{"pci1753e", PCI_VENDOR_ID_ADVANTECH, 0x1753, PCIDIO_MAINREG,
TYPE_PCI1753E,
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
- {{96, PCI1753_DIO, 4, 0}, {96, PCI1753E_DIO, 4, 0}},
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {96, PCI1753_DIO, 4, 0}, {96, PCI1753E_DIO, 4, 0} },
{0, 0, 0, 0},
- {{0, 0, 0, 0}},
+ { {0, 0, 0, 0} },
IO_8b},
{"pci1754", PCI_VENDOR_ID_ADVANTECH, 0x1754, PCIDIO_MAINREG,
TYPE_PCI1754,
- {{32, PCI1754_IDI, 2, 0}, {32, PCI1754_IDI2, 2, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
+ { {32, PCI1754_IDI, 2, 0}, {32, PCI1754_IDI2, 2, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
{4, PCI175x_BOARDID, 1, SDF_INTERNAL},
- {{0, 0, 0, 0}},
+ { {0, 0, 0, 0} },
IO_16b},
{"pci1756", PCI_VENDOR_ID_ADVANTECH, 0x1756, PCIDIO_MAINREG,
TYPE_PCI1756,
- {{0, 0, 0, 0}, {32, PCI1756_IDI, 2, 0}},
- {{0, 0, 0, 0}, {32, PCI1756_IDO, 2, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
+ { {0, 0, 0, 0}, {32, PCI1756_IDI, 2, 0} },
+ { {0, 0, 0, 0}, {32, PCI1756_IDO, 2, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
{4, PCI175x_BOARDID, 1, SDF_INTERNAL},
- {{0, 0, 0, 0}},
+ { {0, 0, 0, 0} },
IO_16b},
{"pci1760", PCI_VENDOR_ID_ADVANTECH, 0x1760, 0,
TYPE_PCI1760,
- {{0, 0, 0, 0}, {0, 0, 0, 0}}, /* This card have own setup work */
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
+ { {0, 0, 0, 0}, {0, 0, 0, 0} }, /* This card have own setup work */
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
{0, 0, 0, 0},
- {{0, 0, 0, 0}},
+ { {0, 0, 0, 0} },
IO_8b},
{"pci1762", PCI_VENDOR_ID_ADVANTECH, 0x1762, PCIDIO_MAINREG,
TYPE_PCI1762,
- {{0, 0, 0, 0}, {16, PCI1762_IDI, 1, 0}},
- {{0, 0, 0, 0}, {16, PCI1762_RO, 1, 0}},
- {{0, 0, 0, 0}, {0, 0, 0, 0}},
+ { {0, 0, 0, 0}, {16, PCI1762_IDI, 1, 0} },
+ { {0, 0, 0, 0}, {16, PCI1762_RO, 1, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
{4, PCI1762_BOARDID, 1, SDF_INTERNAL},
- {{0, 0, 0, 0}},
+ { {0, 0, 0, 0} },
IO_16b}
};
@@ -372,13 +406,16 @@ struct pci_dio_private {
char valid; /* card is usable */
char GlobalIrqEnabled; /* 1= any IRQ source is enabled */
/* PCI-1760 specific data */
- unsigned char IDICntEnable; /* counter's counting enable status */
- unsigned char IDICntOverEnable; /* counter's overflow interrupts enable status */
- unsigned char IDICntMatchEnable; /* counter's match interrupts enable status */
- unsigned char IDICntEdge; /* counter's count edge value (bit=0 - rising, =1 - falling) */
+ unsigned char IDICntEnable; /* counter's counting enable status */
+ unsigned char IDICntOverEnable; /* counter's overflow interrupts enable
+ * status */
+ unsigned char IDICntMatchEnable; /* counter's match interrupts
+ * enable status */
+ unsigned char IDICntEdge; /* counter's count edge value
+ * (bit=0 - rising, =1 - falling) */
unsigned short CntResValue[8]; /* counters' reset value */
- unsigned short CntMatchValue[8]; /* counters' match interrupt value */
- unsigned char IDIFiltersEn; /* IDI's digital filters enable status */
+ unsigned short CntMatchValue[8]; /* counters' match interrupt value */
+ unsigned char IDIFiltersEn; /* IDI's digital filters enable status */
unsigned char IDIPatMatchEn; /* IDI's pattern match enable status */
unsigned char IDIPatMatchValue; /* IDI's pattern match value */
unsigned short IDIFiltrLow[8]; /* IDI's filter value low signal */
@@ -691,7 +728,8 @@ static int pci1760_insn_cnt_write(struct comedi_device *dev,
};
unsigned char imb[4];
- if (devpriv->CntResValue[chan] != (data[0] & 0xffff)) { /* Set reset value if different */
+ /* Set reset value if different */
+ if (devpriv->CntResValue[chan] != (data[0] & 0xffff)) {
ret = pci1760_mbxrequest(dev, omb, imb);
if (!ret)
return ret;
@@ -704,7 +742,8 @@ static int pci1760_insn_cnt_write(struct comedi_device *dev,
if (!ret)
return ret;
- if (!(bitmask & devpriv->IDICntEnable)) { /* start counter if it don't run */
+ /* start counter if it don't run */
+ if (!(bitmask & devpriv->IDICntEnable)) {
omb[0] = bitmask;
omb[2] = CMD_EnableIDICounters;
ret = pci1760_mbxrequest(dev, omb, imb);
@@ -740,12 +779,14 @@ static int pci1760_reset(struct comedi_device *dev)
devpriv->IDICntEnable = 0;
omb[0] = 0x00;
- omb[2] = CMD_OverflowIDICounters; /* disable counters overflow interrupts */
+ omb[2] = CMD_OverflowIDICounters; /* disable counters overflow
+ * interrupts */
pci1760_mbxrequest(dev, omb, imb);
devpriv->IDICntOverEnable = 0;
omb[0] = 0x00;
- omb[2] = CMD_MatchIntIDICounters; /* disable counters match value interrupts */
+ omb[2] = CMD_MatchIntIDICounters; /* disable counters match value
+ * interrupts */
pci1760_mbxrequest(dev, omb, imb);
devpriv->IDICntMatchEnable = 0;
@@ -766,7 +807,8 @@ static int pci1760_reset(struct comedi_device *dev)
}
omb[0] = 0xff;
- omb[2] = CMD_ResetIDICounters; /* reset IDI up counters to reset values */
+ omb[2] = CMD_ResetIDICounters; /* reset IDI up counters to reset
+ * values */
pci1760_mbxrequest(dev, omb, imb);
omb[0] = 0x00;
@@ -807,9 +849,12 @@ static int pci_dio_reset(struct comedi_device *dev)
outb(0, dev->iobase + PCI1730_IDO + 1);
/* NO break there! */
case TYPE_PCI1733:
- outb(0, dev->iobase + PCI1730_3_INT_EN); /* disable interrupts */
- outb(0x0f, dev->iobase + PCI1730_3_INT_CLR); /* clear interrupts */
- outb(0, dev->iobase + PCI1730_3_INT_RF); /* set rising edge trigger */
+ /* disable interrupts */
+ outb(0, dev->iobase + PCI1730_3_INT_EN);
+ /* clear interrupts */
+ outb(0x0f, dev->iobase + PCI1730_3_INT_CLR);
+ /* set rising edge trigger */
+ outb(0, dev->iobase + PCI1730_3_INT_RF);
break;
case TYPE_PCI1734:
outb(0, dev->iobase + PCI1734_IDO); /* clear outputs */
@@ -830,43 +875,53 @@ static int pci_dio_reset(struct comedi_device *dev)
case TYPE_PCI1736:
outb(0, dev->iobase + PCI1736_IDO);
outb(0, dev->iobase + PCI1736_IDO + 1);
- outb(0, dev->iobase + PCI1736_3_INT_EN); /* disable interrupts */
- outb(0x0f, dev->iobase + PCI1736_3_INT_CLR); /* clear interrupts */
- outb(0, dev->iobase + PCI1736_3_INT_RF); /* set rising edge trigger */
+ /* disable interrupts */
+ outb(0, dev->iobase + PCI1736_3_INT_EN);
+ /* clear interrupts */
+ outb(0x0f, dev->iobase + PCI1736_3_INT_CLR);
+ /* set rising edge trigger */
+ outb(0, dev->iobase + PCI1736_3_INT_RF);
break;
case TYPE_PCI1750:
case TYPE_PCI1751:
- outb(0x88, dev->iobase + PCI1750_ICR); /* disable & clear interrupts */
+ /* disable & clear interrupts */
+ outb(0x88, dev->iobase + PCI1750_ICR);
break;
case TYPE_PCI1752:
- outw(0, dev->iobase + PCI1752_6_CFC); /* disable channel freeze function */
+ outw(0, dev->iobase + PCI1752_6_CFC); /* disable channel freeze
+ * function */
outw(0, dev->iobase + PCI1752_IDO); /* clear outputs */
outw(0, dev->iobase + PCI1752_IDO + 2);
outw(0, dev->iobase + PCI1752_IDO2);
outw(0, dev->iobase + PCI1752_IDO2 + 2);
break;
case TYPE_PCI1753E:
- outb(0x88, dev->iobase + PCI1753E_ICR0); /* disable & clear interrupts */
+ outb(0x88, dev->iobase + PCI1753E_ICR0); /* disable & clear
+ * interrupts */
outb(0x80, dev->iobase + PCI1753E_ICR1);
outb(0x80, dev->iobase + PCI1753E_ICR2);
outb(0x80, dev->iobase + PCI1753E_ICR3);
/* NO break there! */
case TYPE_PCI1753:
- outb(0x88, dev->iobase + PCI1753_ICR0); /* disable & clear interrupts */
+ outb(0x88, dev->iobase + PCI1753_ICR0); /* disable & clear
+ * interrupts */
outb(0x80, dev->iobase + PCI1753_ICR1);
outb(0x80, dev->iobase + PCI1753_ICR2);
outb(0x80, dev->iobase + PCI1753_ICR3);
break;
case TYPE_PCI1754:
- outw(0x08, dev->iobase + PCI1754_6_ICR0); /* disable and clear interrupts */
+ outw(0x08, dev->iobase + PCI1754_6_ICR0); /* disable and clear
+ * interrupts */
outw(0x08, dev->iobase + PCI1754_6_ICR1);
outw(0x08, dev->iobase + PCI1754_ICR2);
outw(0x08, dev->iobase + PCI1754_ICR3);
break;
case TYPE_PCI1756:
- outw(0, dev->iobase + PCI1752_6_CFC); /* disable channel freeze function */
- outw(0x08, dev->iobase + PCI1754_6_ICR0); /* disable and clear interrupts */
+ outw(0, dev->iobase + PCI1752_6_CFC); /* disable channel freeze
+ * function */
+ outw(0x08, dev->iobase + PCI1754_6_ICR0); /* disable and clear
+ * interrupts */
outw(0x08, dev->iobase + PCI1754_6_ICR1);
outw(0, dev->iobase + PCI1756_IDO); /* clear outputs */
outw(0, dev->iobase + PCI1756_IDO + 2);
@@ -875,7 +930,8 @@ static int pci_dio_reset(struct comedi_device *dev)
pci1760_reset(dev);
break;
case TYPE_PCI1762:
- outw(0x0101, dev->iobase + PCI1762_ICR); /* disable & clear interrupts */
+ outw(0x0101, dev->iobase + PCI1762_ICR); /* disable & clear
+ * interrupts */
break;
}
@@ -996,7 +1052,7 @@ static int pci_dio_add_do(struct comedi_device *dev, struct comedi_subdevice *s,
==============================================================================
*/
static int pci_dio_add_8254(struct comedi_device *dev,
- struct comedi_subdevice * s,
+ struct comedi_subdevice *s,
const struct diosubd_data *d, int subdev)
{
s->type = COMEDI_SUBD_COUNTER;
@@ -1023,7 +1079,7 @@ static int CheckAndAllocCard(struct comedi_device *dev,
for (pr = pci_priv, prev = NULL; pr != NULL; prev = pr, pr = pr->next) {
if (pr->pcidev == pcidev)
- return 0; /* this card is used, look for another */
+ return 0; /* this card is used, look for another */
}
@@ -1048,7 +1104,7 @@ static int pci_dio_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret, subdev, n_subdevices, i, j;
unsigned long iobase;
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
printk("comedi%d: adv_pci_dio: ", dev->minor);
@@ -1058,9 +1114,7 @@ static int pci_dio_attach(struct comedi_device *dev,
return -ENOMEM;
}
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
+ for_each_pci_dev(pcidev) {
/* loop through cards supported by this driver */
for (i = 0; i < n_boardtypes; ++i) {
if (boardtypes[i].vendor_id != pcidev->vendor)
@@ -1215,15 +1269,12 @@ static int pci_dio_detach(struct comedi_device *dev)
}
}
- if (this_board->boardid.chans) {
+ if (this_board->boardid.chans)
subdev++;
- }
- for (i = 0; i < MAX_8254_SUBDEVS; i++) {
- if (this_board->s8254[i].chans) {
+ for (i = 0; i < MAX_8254_SUBDEVS; i++)
+ if (this_board->s8254[i].chans)
subdev++;
- }
- }
for (i = 0; i < dev->n_subdevices; i++) {
s = dev->subdevices + i;
@@ -1253,7 +1304,47 @@ static int pci_dio_detach(struct comedi_device *dev)
/*
==============================================================================
*/
-COMEDI_PCI_INITCLEANUP(driver_pci_dio, pci_dio_pci_table);
+static int __devinit driver_pci_dio_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_pci_dio.driver_name);
+}
+
+static void __devexit driver_pci_dio_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_pci_dio_pci_driver = {
+ .id_table = pci_dio_pci_table,
+ .probe = &driver_pci_dio_pci_probe,
+ .remove = __devexit_p(&driver_pci_dio_pci_remove)
+};
+
+static int __init driver_pci_dio_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_pci_dio);
+ if (retval < 0)
+ return retval;
+
+ driver_pci_dio_pci_driver.name = (char *)driver_pci_dio.driver_name;
+ return pci_register_driver(&driver_pci_dio_pci_driver);
+}
+
+static void __exit driver_pci_dio_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_pci_dio_pci_driver);
+ comedi_driver_unregister(&driver_pci_dio);
+}
+
+module_init(driver_pci_dio_init_module);
+module_exit(driver_pci_dio_cleanup_module);
/*
==============================================================================
*/
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/aio_aio12_8.c b/drivers/staging/comedi/drivers/aio_aio12_8.c
index 7a1c636df5b..1728cc013d1 100644
--- a/drivers/staging/comedi/drivers/aio_aio12_8.c
+++ b/drivers/staging/comedi/drivers/aio_aio12_8.c
@@ -227,4 +227,19 @@ static struct comedi_driver driver_aio_aio12_8 = {
.offset = sizeof(struct aio12_8_boardtype),
};
-COMEDI_INITCLEANUP(driver_aio_aio12_8);
+static int __init driver_aio_aio12_8_init_module(void)
+{
+ return comedi_driver_register(&driver_aio_aio12_8);
+}
+
+static void __exit driver_aio_aio12_8_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_aio_aio12_8);
+}
+
+module_init(driver_aio_aio12_8_init_module);
+module_exit(driver_aio_aio12_8_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/aio_iiro_16.c b/drivers/staging/comedi/drivers/aio_iiro_16.c
index 4baef9ff932..487599531fe 100644
--- a/drivers/staging/comedi/drivers/aio_iiro_16.c
+++ b/drivers/staging/comedi/drivers/aio_iiro_16.c
@@ -184,4 +184,19 @@ static int aio_iiro_16_dio_insn_bits_read(struct comedi_device *dev,
return 2;
}
-COMEDI_INITCLEANUP(driver_aio_iiro_16);
+static int __init driver_aio_iiro_16_init_module(void)
+{
+ return comedi_driver_register(&driver_aio_iiro_16);
+}
+
+static void __exit driver_aio_iiro_16_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_aio_iiro_16);
+}
+
+module_init(driver_aio_iiro_16_init_module);
+module_exit(driver_aio_iiro_16_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/am9513.h b/drivers/staging/comedi/drivers/am9513.h
index 73367d6afff..0bb839e5149 100644
--- a/drivers/staging/comedi/drivers/am9513.h
+++ b/drivers/staging/comedi/drivers/am9513.h
@@ -47,32 +47,32 @@
#ifdef Am9513_8BITBUS
#define Am9513_write_register(reg, val) \
- do{ \
+ do { \
Am9513_output_control(reg); \
Am9513_output_data(val>>8); \
Am9513_output_data(val&0xff); \
- }while (0)
+ } while (0)
#define Am9513_read_register(reg, val) \
- do{ \
+ do { \
Am9513_output_control(reg); \
- val=Am9513_input_data()<<8; \
- val|=Am9513_input_data(); \
- }while (0)
+ val = Am9513_input_data()<<8; \
+ val |= Am9513_input_data(); \
+ } while (0)
#else /* Am9513_16BITBUS */
#define Am9513_write_register(reg, val) \
- do{ \
+ do { \
Am9513_output_control(reg); \
Am9513_output_data(val); \
- }while (0)
+ } while (0)
#define Am9513_read_register(reg, val) \
- do{ \
+ do { \
Am9513_output_control(reg); \
- val=Am9513_input_data(); \
- }while (0)
+ val = Am9513_input_data(); \
+ } while (0)
#endif
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.c b/drivers/staging/comedi/drivers/amplc_dio200.c
index bf27617aa62..93bbe4ec318 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200.c
@@ -494,9 +494,58 @@ static struct comedi_driver driver_amplc_dio200 = {
};
#ifdef CONFIG_COMEDI_PCI
-COMEDI_PCI_INITCLEANUP(driver_amplc_dio200, dio200_pci_table);
+static int __devinit driver_amplc_dio200_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id
+ *ent)
+{
+ return comedi_pci_auto_config(dev, driver_amplc_dio200.driver_name);
+}
+
+static void __devexit driver_amplc_dio200_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_amplc_dio200_pci_driver = {
+ .id_table = dio200_pci_table,
+ .probe = &driver_amplc_dio200_pci_probe,
+ .remove = __devexit_p(&driver_amplc_dio200_pci_remove)
+};
+
+static int __init driver_amplc_dio200_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_amplc_dio200);
+ if (retval < 0)
+ return retval;
+
+ driver_amplc_dio200_pci_driver.name =
+ (char *)driver_amplc_dio200.driver_name;
+ return pci_register_driver(&driver_amplc_dio200_pci_driver);
+}
+
+static void __exit driver_amplc_dio200_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_amplc_dio200_pci_driver);
+ comedi_driver_unregister(&driver_amplc_dio200);
+}
+
+module_init(driver_amplc_dio200_init_module);
+module_exit(driver_amplc_dio200_cleanup_module);
#else
-COMEDI_INITCLEANUP(driver_amplc_dio200);
+static int __init driver_amplc_dio200_init_module(void)
+{
+ return comedi_driver_register(&driver_amplc_dio200);
+}
+
+static void __exit driver_amplc_dio200_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_amplc_dio200);
+}
+
+module_init(driver_amplc_dio200_init_module);
+module_exit(driver_amplc_dio200_cleanup_module);
#endif
/*
@@ -1501,3 +1550,7 @@ static int dio200_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
index a307d68d79c..48246cd50d4 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236.c
@@ -182,9 +182,58 @@ static struct comedi_driver driver_amplc_pc236 = {
};
#ifdef CONFIG_COMEDI_PCI
-COMEDI_PCI_INITCLEANUP(driver_amplc_pc236, pc236_pci_table);
+static int __devinit driver_amplc_pc236_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id
+ *ent)
+{
+ return comedi_pci_auto_config(dev, driver_amplc_pc236.driver_name);
+}
+
+static void __devexit driver_amplc_pc236_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_amplc_pc236_pci_driver = {
+ .id_table = pc236_pci_table,
+ .probe = &driver_amplc_pc236_pci_probe,
+ .remove = __devexit_p(&driver_amplc_pc236_pci_remove)
+};
+
+static int __init driver_amplc_pc236_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_amplc_pc236);
+ if (retval < 0)
+ return retval;
+
+ driver_amplc_pc236_pci_driver.name =
+ (char *)driver_amplc_pc236.driver_name;
+ return pci_register_driver(&driver_amplc_pc236_pci_driver);
+}
+
+static void __exit driver_amplc_pc236_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_amplc_pc236_pci_driver);
+ comedi_driver_unregister(&driver_amplc_pc236);
+}
+
+module_init(driver_amplc_pc236_init_module);
+module_exit(driver_amplc_pc236_cleanup_module);
#else
-COMEDI_INITCLEANUP(driver_amplc_pc236);
+static int __init driver_amplc_pc236_init_module(void)
+{
+ return comedi_driver_register(&driver_amplc_pc236);
+}
+
+static void __exit driver_amplc_pc236_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_amplc_pc236);
+}
+
+module_init(driver_amplc_pc236_init_module);
+module_exit(driver_amplc_pc236_cleanup_module);
#endif
static int pc236_request_region(unsigned minor, unsigned long from,
@@ -664,3 +713,7 @@ static irqreturn_t pc236_interrupt(int irq, void *d)
}
return IRQ_RETVAL(handled);
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index 15808e95cea..8a338807909 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -432,7 +432,60 @@ static int pc263_dio_insn_config(struct comedi_device *dev,
* as necessary.
*/
#ifdef CONFIG_COMEDI_PCI
-COMEDI_PCI_INITCLEANUP(driver_amplc_pc263, pc263_pci_table);
+static int __devinit driver_amplc_pc263_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id
+ *ent)
+{
+ return comedi_pci_auto_config(dev, driver_amplc_pc263.driver_name);
+}
+
+static void __devexit driver_amplc_pc263_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_amplc_pc263_pci_driver = {
+ .id_table = pc263_pci_table,
+ .probe = &driver_amplc_pc263_pci_probe,
+ .remove = __devexit_p(&driver_amplc_pc263_pci_remove)
+};
+
+static int __init driver_amplc_pc263_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_amplc_pc263);
+ if (retval < 0)
+ return retval;
+
+ driver_amplc_pc263_pci_driver.name =
+ (char *)driver_amplc_pc263.driver_name;
+ return pci_register_driver(&driver_amplc_pc263_pci_driver);
+}
+
+static void __exit driver_amplc_pc263_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_amplc_pc263_pci_driver);
+ comedi_driver_unregister(&driver_amplc_pc263);
+}
+
+module_init(driver_amplc_pc263_init_module);
+module_exit(driver_amplc_pc263_cleanup_module);
#else
-COMEDI_INITCLEANUP(driver_amplc_pc263);
+static int __init driver_amplc_pc263_init_module(void)
+{
+ return comedi_driver_register(&driver_amplc_pc263);
+}
+
+static void __exit driver_amplc_pc263_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_amplc_pc263);
+}
+
+module_init(driver_amplc_pc263_init_module);
+module_exit(driver_amplc_pc263_cleanup_module);
#endif
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index c486a878e18..1b5ba1c2725 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -443,7 +443,45 @@ static struct comedi_driver driver_amplc_pci224 = {
.num_names = ARRAY_SIZE(pci224_boards),
};
-COMEDI_PCI_INITCLEANUP(driver_amplc_pci224, pci224_pci_table);
+static int __devinit driver_amplc_pci224_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id
+ *ent)
+{
+ return comedi_pci_auto_config(dev, driver_amplc_pci224.driver_name);
+}
+
+static void __devexit driver_amplc_pci224_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_amplc_pci224_pci_driver = {
+ .id_table = pci224_pci_table,
+ .probe = &driver_amplc_pci224_pci_probe,
+ .remove = __devexit_p(&driver_amplc_pci224_pci_remove)
+};
+
+static int __init driver_amplc_pci224_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_amplc_pci224);
+ if (retval < 0)
+ return retval;
+
+ driver_amplc_pci224_pci_driver.name =
+ (char *)driver_amplc_pci224.driver_name;
+ return pci_register_driver(&driver_amplc_pci224_pci_driver);
+}
+
+static void __exit driver_amplc_pci224_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_amplc_pci224_pci_driver);
+ comedi_driver_unregister(&driver_amplc_pci224);
+}
+
+module_init(driver_amplc_pci224_init_module);
+module_exit(driver_amplc_pci224_cleanup_module);
/*
* Called from the 'insn_write' function to perform a single write.
@@ -1557,3 +1595,7 @@ static int pci224_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 7fffd967d47..5d064577b2f 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -617,7 +617,45 @@ static struct comedi_driver driver_amplc_pci230 = {
.num_names = ARRAY_SIZE(pci230_boards),
};
-COMEDI_PCI_INITCLEANUP(driver_amplc_pci230, pci230_pci_table);
+static int __devinit driver_amplc_pci230_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id
+ *ent)
+{
+ return comedi_pci_auto_config(dev, driver_amplc_pci230.driver_name);
+}
+
+static void __devexit driver_amplc_pci230_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_amplc_pci230_pci_driver = {
+ .id_table = pci230_pci_table,
+ .probe = &driver_amplc_pci230_pci_probe,
+ .remove = __devexit_p(&driver_amplc_pci230_pci_remove)
+};
+
+static int __init driver_amplc_pci230_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_amplc_pci230);
+ if (retval < 0)
+ return retval;
+
+ driver_amplc_pci230_pci_driver.name =
+ (char *)driver_amplc_pci230.driver_name;
+ return pci_register_driver(&driver_amplc_pci230_pci_driver);
+}
+
+static void __exit driver_amplc_pci230_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_amplc_pci230_pci_driver);
+ comedi_driver_unregister(&driver_amplc_pci230);
+}
+
+module_init(driver_amplc_pci230_init_module);
+module_exit(driver_amplc_pci230_cleanup_module);
static int pci230_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
@@ -726,7 +764,7 @@ static int pci230_attach(struct comedi_device *dev, struct comedi_devconfig *it)
struct comedi_subdevice *s;
unsigned long iobase1, iobase2;
/* PCI230's I/O spaces 1 and 2 respectively. */
- struct pci_dev *pci_dev;
+ struct pci_dev *pci_dev = NULL;
int i = 0, irq_hdl, rc;
printk("comedi%d: amplc_pci230: attach %s %d,%d\n", dev->minor,
@@ -742,9 +780,7 @@ static int pci230_attach(struct comedi_device *dev, struct comedi_devconfig *it)
spin_lock_init(&devpriv->ai_stop_spinlock);
spin_lock_init(&devpriv->ao_stop_spinlock);
/* Find card */
- for (pci_dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pci_dev != NULL;
- pci_dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) {
+ for_each_pci_dev(pci_dev) {
if (it->options[0] || it->options[1]) {
/* Match against bus/slot options. */
if (it->options[0] != pci_dev->bus->number ||
@@ -3014,3 +3050,7 @@ static int pci230_ai_cancel(struct comedi_device *dev,
pci230_ai_stop(dev, s);
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index fb0d5fa7176..e0ac825ea58 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -517,4 +517,19 @@ static int c6xdigio_detach(struct comedi_device *dev)
return 0;
}
-COMEDI_INITCLEANUP(driver_c6xdigio);
+static int __init driver_c6xdigio_init_module(void)
+{
+ return comedi_driver_register(&driver_c6xdigio);
+}
+
+static void __exit driver_c6xdigio_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_c6xdigio);
+}
+
+module_init(driver_c6xdigio_init_module);
+module_exit(driver_c6xdigio_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index cfeb11f443e..f8ede1182cc 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -37,7 +37,6 @@ Status: experimental
#include <linux/delay.h>
#include <linux/pci.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -171,7 +170,7 @@ static int das16cs_attach(struct comedi_device *dev,
if (!link)
return -EIO;
- dev->iobase = link->io.BasePort1;
+ dev->iobase = link->resource[0]->start;;
printk("I/O base=0x%04lx ", dev->iobase);
printk("fingerprint:\n");
@@ -662,14 +661,6 @@ static void das16cs_pcmcia_detach(struct pcmcia_device *);
less on other parts of the kernel.
*/
-/*
- The dev_info variable is the "key" that is used to match up this
- device driver with appropriate cards, through the card configuration
- database.
-*/
-
-static dev_info_t dev_info = "cb_das16_cs";
-
struct local_info_t {
struct pcmcia_device *link;
int stop;
@@ -719,8 +710,7 @@ static void das16cs_pcmcia_detach(struct pcmcia_device *link)
((struct local_info_t *)link->priv)->stop = 1;
das16cs_pcmcia_release(link);
/* This points to the parent struct local_info_t struct */
- if (link->priv)
- kfree(link->priv);
+ kfree(link->priv);
} /* das16cs_pcmcia_detach */
@@ -737,24 +727,22 @@ static int das16cs_pcmcia_config_loop(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
/* This reserves IO space but doesn't actually enable it */
- return pcmcia_request_io(p_dev, &p_dev->io);
+ return pcmcia_request_io(p_dev);
}
return 0;
@@ -788,12 +776,10 @@ static void das16cs_pcmcia_config(struct pcmcia_device *link)
dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %u", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1 + link->io.NumPorts1 - 1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2 + link->io.NumPorts2 - 1);
+ if (link->resource[0])
+ printk(", io %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(", io %pR", link->resource[1]);
printk("\n");
return;
@@ -847,7 +833,7 @@ struct pcmcia_driver das16cs_driver = {
.id_table = das16cs_id_table,
.owner = THIS_MODULE,
.drv = {
- .name = dev_info,
+ .name = "cb_das16_cs",
},
};
@@ -881,5 +867,16 @@ void __exit cleanup_module(void)
}
#else
-COMEDI_INITCLEANUP(driver_das16cs);
+static int __init driver_das16cs_init_module(void)
+{
+ return comedi_driver_register(&driver_das16cs);
+}
+
+static void __exit driver_das16cs_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_das16cs);
+}
+
+module_init(driver_das16cs_init_module);
+module_exit(driver_das16cs_cleanup_module);
#endif /* CONFIG_PCMCIA */
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 434591de37c..6530b6c9d98 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -533,7 +533,7 @@ static int cb_pcidas_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
int index;
int i;
@@ -550,9 +550,7 @@ static int cb_pcidas_attach(struct comedi_device *dev,
*/
printk("\n");
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
+ for_each_pci_dev(pcidev) {
/* is it not a computer boards card? */
if (pcidev->vendor != PCI_VENDOR_ID_CB)
continue;
@@ -1871,4 +1869,44 @@ static int nvram_read(struct comedi_device *dev, unsigned int address,
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_PCI_INITCLEANUP(driver_cb_pcidas, cb_pcidas_pci_table);
+static int __devinit driver_cb_pcidas_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_cb_pcidas.driver_name);
+}
+
+static void __devexit driver_cb_pcidas_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_cb_pcidas_pci_driver = {
+ .id_table = cb_pcidas_pci_table,
+ .probe = &driver_cb_pcidas_pci_probe,
+ .remove = __devexit_p(&driver_cb_pcidas_pci_remove)
+};
+
+static int __init driver_cb_pcidas_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_cb_pcidas);
+ if (retval < 0)
+ return retval;
+
+ driver_cb_pcidas_pci_driver.name = (char *)driver_cb_pcidas.driver_name;
+ return pci_register_driver(&driver_cb_pcidas_pci_driver);
+}
+
+static void __exit driver_cb_pcidas_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_cb_pcidas_pci_driver);
+ comedi_driver_unregister(&driver_cb_pcidas);
+}
+
+module_init(driver_cb_pcidas_init_module);
+module_exit(driver_cb_pcidas_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index 79aa286e9bb..53e7015869f 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1237,7 +1237,43 @@ static unsigned int get_ao_divisor(unsigned int ns, unsigned int flags);
static void load_ao_dma(struct comedi_device *dev,
const struct comedi_cmd *cmd);
-COMEDI_PCI_INITCLEANUP(driver_cb_pcidas, pcidas64_pci_table);
+static int __devinit driver_cb_pcidas_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_cb_pcidas.driver_name);
+}
+
+static void __devexit driver_cb_pcidas_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_cb_pcidas_pci_driver = {
+ .id_table = pcidas64_pci_table,
+ .probe = &driver_cb_pcidas_pci_probe,
+ .remove = __devexit_p(&driver_cb_pcidas_pci_remove)
+};
+
+static int __init driver_cb_pcidas_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_cb_pcidas);
+ if (retval < 0)
+ return retval;
+
+ driver_cb_pcidas_pci_driver.name = (char *)driver_cb_pcidas.driver_name;
+ return pci_register_driver(&driver_cb_pcidas_pci_driver);
+}
+
+static void __exit driver_cb_pcidas_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_cb_pcidas_pci_driver);
+ comedi_driver_unregister(&driver_cb_pcidas);
+}
+
+module_init(driver_cb_pcidas_init_module);
+module_exit(driver_cb_pcidas_cleanup_module);
static unsigned int ai_range_bits_6xxx(const struct comedi_device *dev,
unsigned int range_index)
@@ -1718,7 +1754,7 @@ static inline void warn_external_queue(struct comedi_device *dev)
*/
static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
int index;
uint32_t local_range, local_decode;
int retval;
@@ -1735,9 +1771,7 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
* Probe the device to determine what device in the series it is.
*/
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
+ for_each_pci_dev(pcidev) {
/* is it not a computer boards card? */
if (pcidev->vendor != PCI_VENDOR_ID_COMPUTERBOARDS)
continue;
@@ -4303,3 +4337,7 @@ static void i2c_write(struct comedi_device *dev, unsigned int address,
}
i2c_stop(dev);
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index c374bee2506..2d35143b8e5 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -280,7 +280,7 @@ static int cb_pcidda_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
int index;
printk("comedi%d: cb_pcidda: ", dev->minor);
@@ -296,9 +296,7 @@ static int cb_pcidda_attach(struct comedi_device *dev,
*/
printk("\n");
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
+ for_each_pci_dev(pcidev) {
if (pcidev->vendor == PCI_VENDOR_ID_CB) {
if (it->options[0] || it->options[1]) {
if (pcidev->bus->number != it->options[0] ||
@@ -856,4 +854,44 @@ static void cb_pcidda_calibrate(struct comedi_device *dev, unsigned int channel,
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_PCI_INITCLEANUP(driver_cb_pcidda, cb_pcidda_pci_table);
+static int __devinit driver_cb_pcidda_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_cb_pcidda.driver_name);
+}
+
+static void __devexit driver_cb_pcidda_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_cb_pcidda_pci_driver = {
+ .id_table = cb_pcidda_pci_table,
+ .probe = &driver_cb_pcidda_pci_probe,
+ .remove = __devexit_p(&driver_cb_pcidda_pci_remove)
+};
+
+static int __init driver_cb_pcidda_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_cb_pcidda);
+ if (retval < 0)
+ return retval;
+
+ driver_cb_pcidda_pci_driver.name = (char *)driver_cb_pcidda.driver_name;
+ return pci_register_driver(&driver_cb_pcidda_pci_driver);
+}
+
+static void __exit driver_cb_pcidda_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_cb_pcidda_pci_driver);
+ comedi_driver_unregister(&driver_cb_pcidda);
+}
+
+module_init(driver_cb_pcidda_init_module);
+module_exit(driver_cb_pcidda_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_pcidio.c b/drivers/staging/comedi/drivers/cb_pcidio.c
index 38ccd105fa3..c1693c91a6d 100644
--- a/drivers/staging/comedi/drivers/cb_pcidio.c
+++ b/drivers/staging/comedi/drivers/cb_pcidio.c
@@ -202,9 +202,7 @@ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* Probe the device to determine what device in the series it is.
*/
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
+ for_each_pci_dev(pcidev) {
/* is it not a computer boards card? */
if (pcidev->vendor != PCI_VENDOR_ID_CB)
continue;
@@ -300,4 +298,44 @@ static int pcidio_detach(struct comedi_device *dev)
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_PCI_INITCLEANUP(driver_cb_pcidio, pcidio_pci_table);
+static int __devinit driver_cb_pcidio_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_cb_pcidio.driver_name);
+}
+
+static void __devexit driver_cb_pcidio_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_cb_pcidio_pci_driver = {
+ .id_table = pcidio_pci_table,
+ .probe = &driver_cb_pcidio_pci_probe,
+ .remove = __devexit_p(&driver_cb_pcidio_pci_remove)
+};
+
+static int __init driver_cb_pcidio_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_cb_pcidio);
+ if (retval < 0)
+ return retval;
+
+ driver_cb_pcidio_pci_driver.name = (char *)driver_cb_pcidio.driver_name;
+ return pci_register_driver(&driver_cb_pcidio_pci_driver);
+}
+
+static void __exit driver_cb_pcidio_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_cb_pcidio_pci_driver);
+ comedi_driver_unregister(&driver_cb_pcidio);
+}
+
+module_init(driver_cb_pcidio_init_module);
+module_exit(driver_cb_pcidio_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index 49dccbbd713..ced346a7cae 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -210,7 +210,7 @@ static int cb_pcimdas_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
int index;
/* int i; */
@@ -227,9 +227,7 @@ static int cb_pcimdas_attach(struct comedi_device *dev,
*/
printk("\n");
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
+ for_each_pci_dev(pcidev) {
/* is it not a computer boards card? */
if (pcidev->vendor != PCI_VENDOR_ID_COMPUTERBOARDS)
continue;
@@ -491,4 +489,46 @@ static int cb_pcimdas_ao_rinsn(struct comedi_device *dev,
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_PCI_INITCLEANUP(driver_cb_pcimdas, cb_pcimdas_pci_table);
+static int __devinit driver_cb_pcimdas_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id
+ *ent)
+{
+ return comedi_pci_auto_config(dev, driver_cb_pcimdas.driver_name);
+}
+
+static void __devexit driver_cb_pcimdas_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_cb_pcimdas_pci_driver = {
+ .id_table = cb_pcimdas_pci_table,
+ .probe = &driver_cb_pcimdas_pci_probe,
+ .remove = __devexit_p(&driver_cb_pcimdas_pci_remove)
+};
+
+static int __init driver_cb_pcimdas_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_cb_pcimdas);
+ if (retval < 0)
+ return retval;
+
+ driver_cb_pcimdas_pci_driver.name =
+ (char *)driver_cb_pcimdas.driver_name;
+ return pci_register_driver(&driver_cb_pcimdas_pci_driver);
+}
+
+static void __exit driver_cb_pcimdas_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_cb_pcimdas_pci_driver);
+ comedi_driver_unregister(&driver_cb_pcimdas);
+}
+
+module_init(driver_cb_pcimdas_init_module);
+module_exit(driver_cb_pcimdas_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index f404ec7723e..8c981a89ab6 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -195,7 +195,45 @@ MODULE_DESCRIPTION("Comedi low-level driver for the Computerboards PCIM-DDA "
"series. Currently only supports PCIM-DDA06-16 (which "
"also happens to be the only board in this series. :) ) ");
MODULE_LICENSE("GPL");
-COMEDI_PCI_INITCLEANUP_NOMODULE(cb_pcimdda_driver, pci_table);
+static int __devinit cb_pcimdda_driver_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id
+ *ent)
+{
+ return comedi_pci_auto_config(dev, cb_pcimdda_driver.driver_name);
+}
+
+static void __devexit cb_pcimdda_driver_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver cb_pcimdda_driver_pci_driver = {
+ .id_table = pci_table,
+ .probe = &cb_pcimdda_driver_pci_probe,
+ .remove = __devexit_p(&cb_pcimdda_driver_pci_remove)
+};
+
+static int __init cb_pcimdda_driver_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&cb_pcimdda_driver);
+ if (retval < 0)
+ return retval;
+
+ cb_pcimdda_driver_pci_driver.name =
+ (char *)cb_pcimdda_driver.driver_name;
+ return pci_register_driver(&cb_pcimdda_driver_pci_driver);
+}
+
+static void __exit cb_pcimdda_driver_cleanup_module(void)
+{
+ pci_unregister_driver(&cb_pcimdda_driver_pci_driver);
+ comedi_driver_unregister(&cb_pcimdda_driver);
+}
+
+module_init(cb_pcimdda_driver_init_module);
+module_exit(cb_pcimdda_driver_cleanup_module);
static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
@@ -426,13 +464,11 @@ static int ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
*/
static int probe(struct comedi_device *dev, const struct comedi_devconfig *it)
{
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
int index;
unsigned long registers;
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
+ for_each_pci_dev(pcidev) {
/* is it not a computer boards card? */
if (pcidev->vendor != PCI_VENDOR_ID_COMPUTERBOARDS)
continue;
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index 701622280ff..cfcbd9b8f39 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -50,43 +50,6 @@ Configuration Options:
within each minor will be concatenated together in the order given here.
*/
-/*
- * The previous block comment is used to automatically generate
- * documentation in Comedi and Comedilib. The fields:
- *
- * Driver: the name of the driver
- * Description: a short phrase describing the driver. Don't list boards.
- * Devices: a full list of the boards that attempt to be supported by
- * the driver. Format is "(manufacturer) board name [comedi name]",
- * where comedi_name is the name that is used to configure the board.
- * See the comment near board_name: in the struct comedi_driver structure
- * below. If (manufacturer) or [comedi name] is missing, the previous
- * value is used.
- * Author: you
- * Updated: date when the _documentation_ was last updated. Use 'date -R'
- * to get a value for this.
- * Status: a one-word description of the status. Valid values are:
- * works - driver works correctly on most boards supported, and
- * passes comedi_test.
- * unknown - unknown. Usually put there by ds.
- * experimental - may not work in any particular release. Author
- * probably wants assistance testing it.
- * bitrotten - driver has not been update in a long time, probably
- * doesn't work, and probably is missing support for significant
- * Comedi interface features.
- * untested - author probably wrote it "blind", and is believed to
- * work, but no confirmation.
- *
- * These headers should be followed by a blank line, and any comments
- * you wish to say about the driver. The comment area is the place
- * to put any known bugs, limitations, unsupported features, supported
- * command triggers, whether or not commands are supported on particular
- * subdevices, etc.
- *
- * Somewhere in the comment should be information about configuration
- * options that are used with comedi_config.
- */
-
#include <linux/string.h>
#include <linux/slab.h>
#include "../comedi.h"
diff --git a/drivers/staging/comedi/drivers/comedi_parport.c b/drivers/staging/comedi/drivers/comedi_parport.c
index fcd7721c553..21d834dd92b 100644
--- a/drivers/staging/comedi/drivers/comedi_parport.c
+++ b/drivers/staging/comedi/drivers/comedi_parport.c
@@ -101,7 +101,18 @@ static struct comedi_driver driver_parport = {
.detach = parport_detach,
};
-COMEDI_INITCLEANUP(driver_parport);
+static int __init driver_parport_init_module(void)
+{
+ return comedi_driver_register(&driver_parport);
+}
+
+static void __exit driver_parport_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_parport);
+}
+
+module_init(driver_parport_init_module);
+module_exit(driver_parport_cleanup_module);
struct parport_private {
unsigned int a_data;
@@ -396,3 +407,7 @@ static int parport_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index ef83a1a445b..b220b305541 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -107,7 +107,18 @@ static struct comedi_driver driver_waveform = {
.num_names = ARRAY_SIZE(waveform_boards),
};
-COMEDI_INITCLEANUP(driver_waveform);
+static int __init driver_waveform_init_module(void)
+{
+ return comedi_driver_register(&driver_waveform);
+}
+
+static void __exit driver_waveform_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_waveform);
+}
+
+module_init(driver_waveform_init_module);
+module_exit(driver_waveform_cleanup_module);
static int waveform_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -549,3 +560,7 @@ static int waveform_ao_insn_write(struct comedi_device *dev,
return insn->n;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index 9511814e641..871f109bcfa 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -103,7 +103,7 @@ static int contec_ns_to_timer(unsigned int *ns, int round);
static int contec_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
struct comedi_subdevice *s;
printk("comedi%d: contec: ", dev->minor);
@@ -116,10 +116,7 @@ static int contec_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (alloc_subdevices(dev, 2) < 0)
return -ENOMEM;
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
-
+ for_each_pci_dev(pcidev) {
if (pcidev->vendor == PCI_VENDOR_ID_CONTEC &&
pcidev->device == PCI_DEVICE_ID_PIO1616L) {
if (it->options[0] || it->options[1]) {
@@ -232,4 +229,44 @@ static int contec_di_insn_bits(struct comedi_device *dev,
return 2;
}
-COMEDI_PCI_INITCLEANUP(driver_contec, contec_pci_table);
+static int __devinit driver_contec_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_contec.driver_name);
+}
+
+static void __devexit driver_contec_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_contec_pci_driver = {
+ .id_table = contec_pci_table,
+ .probe = &driver_contec_pci_probe,
+ .remove = __devexit_p(&driver_contec_pci_remove)
+};
+
+static int __init driver_contec_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_contec);
+ if (retval < 0)
+ return retval;
+
+ driver_contec_pci_driver.name = (char *)driver_contec.driver_name;
+ return pci_register_driver(&driver_contec_pci_driver);
+}
+
+static void __exit driver_contec_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_contec_pci_driver);
+ comedi_driver_unregister(&driver_contec);
+}
+
+module_init(driver_contec_init_module);
+module_exit(driver_contec_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 078ec273b27..6af6c8323d5 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -887,4 +887,46 @@ static int daqboard2000_detach(struct comedi_device *dev)
return 0;
}
-COMEDI_PCI_INITCLEANUP(driver_daqboard2000, daqboard2000_pci_table);
+static int __devinit driver_daqboard2000_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id
+ *ent)
+{
+ return comedi_pci_auto_config(dev, driver_daqboard2000.driver_name);
+}
+
+static void __devexit driver_daqboard2000_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_daqboard2000_pci_driver = {
+ .id_table = daqboard2000_pci_table,
+ .probe = &driver_daqboard2000_pci_probe,
+ .remove = __devexit_p(&driver_daqboard2000_pci_remove)
+};
+
+static int __init driver_daqboard2000_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_daqboard2000);
+ if (retval < 0)
+ return retval;
+
+ driver_daqboard2000_pci_driver.name =
+ (char *)driver_daqboard2000.driver_name;
+ return pci_register_driver(&driver_daqboard2000_pci_driver);
+}
+
+static void __exit driver_daqboard2000_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_daqboard2000_pci_driver);
+ comedi_driver_unregister(&driver_daqboard2000);
+}
+
+module_init(driver_daqboard2000_init_module);
+module_exit(driver_daqboard2000_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index 9cb144f7e70..3141dc80fe7 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -29,11 +29,11 @@
* Description: DAS-08 compatible boards
* Author: Warren Jasper, ds, Frank Hess
* Devices: [Keithley Metrabyte] DAS08 (isa-das08),
- * [ComputerBoards] DAS08 (isa-das08), DAS08-PGM (das08-pgm),
- * DAS08-PGH (das08-pgh), DAS08-PGL (das08-pgl), DAS08-AOH (das08-aoh),
- * DAS08-AOL (das08-aol), DAS08-AOM (das08-aom), DAS08/JR-AO (das08/jr-ao),
- * DAS08/JR-16-AO (das08jr-16-ao), PCI-DAS08 (das08),
- * PC104-DAS08 (pc104-das08), DAS08/JR/16 (das08jr/16)
+ * [ComputerBoards] DAS08 (isa-das08), DAS08-PGM (das08-pgm),
+ * DAS08-PGH (das08-pgh), DAS08-PGL (das08-pgl), DAS08-AOH (das08-aoh),
+ * DAS08-AOL (das08-aol), DAS08-AOM (das08-aom), DAS08/JR-AO (das08/jr-ao),
+ * DAS08/JR-16-AO (das08jr-16-ao), PCI-DAS08 (das08),
+ * PC104-DAS08 (pc104-das08), DAS08/JR/16 (das08jr/16)
* Status: works
*
* This is a rewrite of the das08 and das08jr drivers.
@@ -980,7 +980,7 @@ static int das08_attach(struct comedi_device *dev, struct comedi_devconfig *it)
unsigned long iobase;
#ifdef CONFIG_COMEDI_PCI
unsigned long pci_iobase = 0;
- struct pci_dev *pdev;
+ struct pci_dev *pdev = NULL;
#endif
ret = alloc_private(dev, sizeof(struct das08_private_struct));
@@ -997,9 +997,7 @@ static int das08_attach(struct comedi_device *dev, struct comedi_devconfig *it)
}
printk("\n");
/* find card */
- for (pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pdev != NULL;
- pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) {
+ for_each_pci_dev(pdev) {
if (pdev->vendor == PCI_VENDOR_ID_COMPUTERBOARDS
&& pdev->device == PCI_DEVICE_ID_PCIDAS08) {
if (it->options[0] || it->options[1]) {
@@ -1082,11 +1080,62 @@ int das08_common_detach(struct comedi_device *dev)
EXPORT_SYMBOL_GPL(das08_common_detach);
#ifdef CONFIG_COMEDI_PCI
-COMEDI_PCI_INITCLEANUP(driver_das08, das08_pci_table);
+static int __devinit driver_das08_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_das08.driver_name);
+}
+
+static void __devexit driver_das08_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_das08_pci_driver = {
+ .id_table = das08_pci_table,
+ .probe = &driver_das08_pci_probe,
+ .remove = __devexit_p(&driver_das08_pci_remove)
+};
+
+static int __init driver_das08_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_das08);
+ if (retval < 0)
+ return retval;
+
+ driver_das08_pci_driver.name = (char *)driver_das08.driver_name;
+ return pci_register_driver(&driver_das08_pci_driver);
+}
+
+static void __exit driver_das08_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_das08_pci_driver);
+ comedi_driver_unregister(&driver_das08);
+}
+
+module_init(driver_das08_init_module);
+module_exit(driver_das08_cleanup_module);
#else
-COMEDI_INITCLEANUP(driver_das08);
+static int __init driver_das08_init_module(void)
+{
+ return comedi_driver_register(&driver_das08);
+}
+
+static void __exit driver_das08_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_das08);
+}
+
+module_init(driver_das08_init_module);
+module_exit(driver_das08_cleanup_module);
#endif
#ifdef CONFIG_COMEDI_PCMCIA
EXPORT_SYMBOL_GPL(das08_cs_boards);
#endif
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 8761a6d285d..48d9fb1227d 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -48,7 +48,6 @@ Command support does not exist, but could be added for this board.
#include "das08.h"
/* pcmcia includes */
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -89,7 +88,7 @@ static int das08_cs_attach(struct comedi_device *dev,
printk(" no pcmcia cards found\n");
return -EIO;
}
- iobase = link->io.BasePort1;
+ iobase = link->resource[0]->start;
} else {
printk(" bug! board does not have PCMCIA bustype\n");
return -EINVAL;
@@ -132,14 +131,6 @@ static void das08_pcmcia_detach(struct pcmcia_device *);
less on other parts of the kernel.
*/
-/*
- The dev_info variable is the "key" that is used to match up this
- device driver with appropriate cards, through the card configuration
- database.
-*/
-
-static const dev_info_t dev_info = "pcm-das08";
-
struct local_info_t {
struct pcmcia_device *link;
int stop;
@@ -206,8 +197,7 @@ static void das08_pcmcia_detach(struct pcmcia_device *link)
das08_pcmcia_release(link);
/* This points to the parent struct local_info_t struct */
- if (link->priv)
- kfree(link->priv);
+ kfree(link->priv);
} /* das08_pcmcia_detach */
@@ -225,24 +215,22 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
/* This reserves IO space but doesn't actually enable it */
- return pcmcia_request_io(p_dev, &p_dev->io);
+ return pcmcia_request_io(p_dev);
}
return 0;
}
@@ -284,12 +272,10 @@ static void das08_pcmcia_config(struct pcmcia_device *link)
dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %u", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1 + link->io.NumPorts1 - 1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2 + link->io.NumPorts2 - 1);
+ if (link->resource[0])
+ printk(", io %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(" & %pR", link->resource[1]);
printk("\n");
return;
@@ -363,7 +349,7 @@ struct pcmcia_driver das08_cs_driver = {
.id_table = das08_cs_id_table,
.owner = THIS_MODULE,
.drv = {
- .name = dev_info,
+ .name = "pcm-das08",
},
};
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index ccee4f1802d..0af1b465908 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -1717,7 +1717,18 @@ static int das16_detach(struct comedi_device *dev)
return 0;
}
-COMEDI_INITCLEANUP(driver_das16);
+static int __init driver_das16_init_module(void)
+{
+ return comedi_driver_register(&driver_das16);
+}
+
+static void __exit driver_das16_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_das16);
+}
+
+module_init(driver_das16_init_module);
+module_exit(driver_das16_cleanup_module);
/* utility function that suggests a dma transfer size in bytes */
static unsigned int das16_suggest_transfer_size(struct comedi_device *dev,
@@ -1776,3 +1787,7 @@ static void das16_ai_munge(struct comedi_device *dev,
}
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index c403d882743..a5ce3b2abe4 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -198,7 +198,18 @@ struct das16m1_private_struct {
#define devpriv ((struct das16m1_private_struct *)(dev->private))
#define thisboard ((const struct das16m1_board *)(dev->board_ptr))
-COMEDI_INITCLEANUP(driver_das16m1);
+static int __init driver_das16m1_init_module(void)
+{
+ return comedi_driver_register(&driver_das16m1);
+}
+
+static void __exit driver_das16m1_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_das16m1);
+}
+
+module_init(driver_das16m1_init_module);
+module_exit(driver_das16m1_cleanup_module);
static inline short munge_sample(short data)
{
@@ -777,3 +788,7 @@ static int das16m1_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index de5e82fec87..6ea93f9c0b4 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -531,7 +531,18 @@ static struct comedi_driver driver_das1800 = {
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_INITCLEANUP(driver_das1800);
+static int __init driver_das1800_init_module(void)
+{
+ return comedi_driver_register(&driver_das1800);
+}
+
+static void __exit driver_das1800_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_das1800);
+}
+
+module_init(driver_das1800_init_module);
+module_exit(driver_das1800_cleanup_module);
static int das1800_init_dma(struct comedi_device *dev, unsigned int dma0,
unsigned int dma1)
@@ -1800,3 +1811,7 @@ static unsigned int suggest_transfer_size(struct comedi_cmd *cmd)
return size;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index a404a183191..6328f5280b6 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -109,7 +109,18 @@ static struct comedi_driver driver_das6402 = {
.detach = das6402_detach,
};
-COMEDI_INITCLEANUP(driver_das6402);
+static int __init driver_das6402_init_module(void)
+{
+ return comedi_driver_register(&driver_das6402);
+}
+
+static void __exit driver_das6402_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_das6402);
+}
+
+module_init(driver_das6402_init_module);
+module_exit(driver_das6402_cleanup_module);
struct das6402_private {
int ai_bytes_to_read;
@@ -360,3 +371,7 @@ static int das6402_attach(struct comedi_device *dev,
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index aadc4971c90..aecaedc5027 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -347,7 +347,18 @@ static int das800_probe(struct comedi_device *dev)
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_INITCLEANUP(driver_das800);
+static int __init driver_das800_init_module(void)
+{
+ return comedi_driver_register(&driver_das800);
+}
+
+static void __exit driver_das800_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_das800);
+}
+
+module_init(driver_das800_init_module);
+module_exit(driver_das800_cleanup_module);
/* interrupt service routine */
static irqreturn_t das800_interrupt(int irq, void *d)
@@ -905,3 +916,7 @@ static int das800_set_frequency(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index d5cbd515c37..693728e14bd 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -37,43 +37,6 @@ Configuration Options:
comedi_config /dev/comedi0 dmm32at baseaddr,irq
*/
-/*
- * The previous block comment is used to automatically generate
- * documentation in Comedi and Comedilib. The fields:
- *
- * Driver: the name of the driver
- * Description: a short phrase describing the driver. Don't list boards.
- * Devices: a full list of the boards that attempt to be supported by
- * the driver. Format is "(manufacturer) board name [comedi name]",
- * where comedi_name is the name that is used to configure the board.
- * See the comment near board_name: in the struct comedi_driver structure
- * below. If (manufacturer) or [comedi name] is missing, the previous
- * value is used.
- * Author: you
- * Updated: date when the _documentation_ was last updated. Use 'date -R'
- * to get a value for this.
- * Status: a one-word description of the status. Valid values are:
- * works - driver works correctly on most boards supported, and
- * passes comedi_test.
- * unknown - unknown. Usually put there by ds.
- * experimental - may not work in any particular release. Author
- * probably wants assistance testing it.
- * bitrotten - driver has not been update in a long time, probably
- * doesn't work, and probably is missing support for significant
- * Comedi interface features.
- * untested - author probably wrote it "blind", and is believed to
- * work, but no confirmation.
- *
- * These headers should be followed by a blank line, and any comments
- * you wish to say about the driver. The comment area is the place
- * to put any known bugs, limitations, unsupported features, supported
- * command triggers, whether or not commands are supported on particular
- * subdevices, etc.
- *
- * Somewhere in the comment should be information about configuration
- * options that are used with comedi_config.
- */
-
#include <linux/interrupt.h>
#include "../comedidev.h"
#include <linux/ioport.h>
@@ -336,12 +299,14 @@ static int dmm32at_attach(struct comedi_device *dev,
iobase = it->options[0];
irq = it->options[1];
- printk("comedi%d: dmm32at: attaching\n", dev->minor);
- printk("dmm32at: probing at address 0x%04lx, irq %u\n", iobase, irq);
+ printk(KERN_INFO "comedi%d: dmm32at: attaching\n", dev->minor);
+ printk(KERN_DEBUG "dmm32at: probing at address 0x%04lx, irq %u\n",
+ iobase, irq);
/* register address space */
if (!request_region(iobase, DMM32AT_MEMSIZE, thisboard->name)) {
- printk("I/O port conflict\n");
+ printk(KERN_ERR "comedi%d: dmm32at: I/O port conflict\n",
+ dev->minor);
return -EIO;
}
dev->iobase = iobase;
@@ -379,14 +344,15 @@ static int dmm32at_attach(struct comedi_device *dev,
intstat = dmm_inb(dev, DMM32AT_INTCLOCK);
airback = dmm_inb(dev, DMM32AT_AIRBACK);
- printk("dmm32at: lo=0x%02x hi=0x%02x fifostat=0x%02x\n",
+ printk(KERN_DEBUG "dmm32at: lo=0x%02x hi=0x%02x fifostat=0x%02x\n",
ailo, aihi, fifostat);
- printk("dmm32at: aistat=0x%02x intstat=0x%02x airback=0x%02x\n",
+ printk(KERN_DEBUG
+ "dmm32at: aistat=0x%02x intstat=0x%02x airback=0x%02x\n",
aistat, intstat, airback);
if ((ailo != 0x00) || (aihi != 0x1f) || (fifostat != 0x80) ||
(aistat != 0x60 || (intstat != 0x00) || airback != 0x0c)) {
- printk("dmmat32: board detection failed\n");
+ printk(KERN_ERR "dmmat32: board detection failed\n");
return -EIO;
}
@@ -394,7 +360,7 @@ static int dmm32at_attach(struct comedi_device *dev,
if (irq) {
ret = request_irq(irq, dmm32at_isr, 0, thisboard->name, dev);
if (ret < 0) {
- printk("irq conflict\n");
+ printk(KERN_ERR "dmm32at: irq conflict\n");
return ret;
}
dev->irq = irq;
@@ -478,7 +444,7 @@ static int dmm32at_attach(struct comedi_device *dev,
}
/* success */
- printk("comedi%d: dmm32at: attached\n", dev->minor);
+ printk(KERN_INFO "comedi%d: dmm32at: attached\n", dev->minor);
return 1;
@@ -494,7 +460,7 @@ static int dmm32at_attach(struct comedi_device *dev,
*/
static int dmm32at_detach(struct comedi_device *dev)
{
- printk("comedi%d: dmm32at: remove\n", dev->minor);
+ printk(KERN_INFO "comedi%d: dmm32at: remove\n", dev->minor);
if (dev->irq)
free_irq(dev->irq, dev);
if (dev->iobase)
@@ -542,7 +508,7 @@ static int dmm32at_ai_rinsn(struct comedi_device *dev,
break;
}
if (i == 40000) {
- printk("timeout\n");
+ printk(KERN_WARNING "dmm32at: timeout\n");
return -ETIMEDOUT;
}
@@ -557,7 +523,7 @@ static int dmm32at_ai_rinsn(struct comedi_device *dev,
break;
}
if (i == 40000) {
- printk("timeout\n");
+ printk(KERN_WARNING "dmm32at: timeout\n");
return -ETIMEDOUT;
}
@@ -627,7 +593,8 @@ static int dmm32at_ai_cmdtest(struct comedi_device *dev,
if (err)
return 1;
- /* step 2: make sure trigger sources are unique and mutually compatible */
+ /* step 2: make sure trigger sources are unique and mutually
+ * compatible */
/* note that mutual compatibility is not an issue here */
if (cmd->scan_begin_src != TRIG_TIMER &&
@@ -800,7 +767,8 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (cmd->stop_src == TRIG_COUNT)
devpriv->ai_scans_left = cmd->stop_arg;
else { /* TRIG_NONE */
- devpriv->ai_scans_left = 0xffffffff; /* indicates TRIG_NONE to isr */
+ devpriv->ai_scans_left = 0xffffffff; /* indicates TRIG_NONE to
+ * isr */
}
/* wait for circuit to settle */
@@ -810,7 +778,7 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
break;
}
if (i == 40000) {
- printk("timeout\n");
+ printk(KERN_WARNING "dmm32at: timeout\n");
return -ETIMEDOUT;
}
@@ -823,13 +791,13 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
dmm_outb(dev, DMM32AT_CONV, 0xff);
}
-/* printk("dmmat32 in command\n"); */
+/* printk("dmmat32 in command\n"); */
-/* for(i=0;i<cmd->chanlist_len;i++) */
-/* comedi_buf_put(s->async,i*100); */
+/* for(i=0;i<cmd->chanlist_len;i++) */
+/* comedi_buf_put(s->async,i*100); */
-/* s->async->events |= COMEDI_CB_EOA; */
-/* comedi_event(dev, s); */
+/* s->async->events |= COMEDI_CB_EOA; */
+/* comedi_event(dev, s); */
return 0;
@@ -937,7 +905,7 @@ static int dmm32at_ao_winsn(struct comedi_device *dev,
break;
}
if (i == 40000) {
- printk("timeout\n");
+ printk(KERN_WARNING "dmm32at: timeout\n");
return -ETIMEDOUT;
}
/* dummy read to update trigger the output */
@@ -1095,4 +1063,19 @@ void dmm32at_setaitimer(struct comedi_device *dev, unsigned int nansec)
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_INITCLEANUP(driver_dmm32at);
+static int __init driver_dmm32at_init_module(void)
+{
+ return comedi_driver_register(&driver_dmm32at);
+}
+
+static void __exit driver_dmm32at_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_dmm32at);
+}
+
+module_init(driver_dmm32at_init_module);
+module_exit(driver_dmm32at_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 83fb6e56c3e..5cce1b5f448 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -98,7 +98,18 @@ static struct comedi_driver driver_dt2801 = {
.detach = dt2801_detach,
};
-COMEDI_INITCLEANUP(driver_dt2801);
+static int __init driver_dt2801_init_module(void)
+{
+ return comedi_driver_register(&driver_dt2801);
+}
+
+static void __exit driver_dt2801_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_dt2801);
+}
+
+module_init(driver_dt2801_init_module);
+module_exit(driver_dt2801_cleanup_module);
#if 0
/* ignore 'defined but not used' warning */
@@ -720,3 +731,7 @@ static int dt2801_dio_insn_config(struct comedi_device *dev,
return 1;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index ea9bfb7fd88..a1664caa1d9 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -239,7 +239,18 @@ static struct comedi_driver driver_dt2811 = {
.offset = sizeof(struct dt2811_board),
};
-COMEDI_INITCLEANUP(driver_dt2811);
+static int __init driver_dt2811_init_module(void)
+{
+ return comedi_driver_register(&driver_dt2811);
+}
+
+static void __exit driver_dt2811_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_dt2811);
+}
+
+module_init(driver_dt2811_init_module);
+module_exit(driver_dt2811_cleanup_module);
static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
@@ -625,3 +636,7 @@ static int dt2811_do_insn_bits(struct comedi_device *dev,
return 2;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
index 16fde066d26..1c6248cf592 100644
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ b/drivers/staging/comedi/drivers/dt2814.c
@@ -70,7 +70,18 @@ static struct comedi_driver driver_dt2814 = {
.detach = dt2814_detach,
};
-COMEDI_INITCLEANUP(driver_dt2814);
+static int __init driver_dt2814_init_module(void)
+{
+ return comedi_driver_register(&driver_dt2814);
+}
+
+static void __exit driver_dt2814_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_dt2814);
+}
+
+module_init(driver_dt2814_init_module);
+module_exit(driver_dt2814_cleanup_module);
static irqreturn_t dt2814_interrupt(int irq, void *dev);
@@ -387,3 +398,7 @@ static irqreturn_t dt2814_interrupt(int irq, void *d)
comedi_event(dev, s);
return IRQ_HANDLED;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
index d1a4f782243..4155da43fd5 100644
--- a/drivers/staging/comedi/drivers/dt2815.c
+++ b/drivers/staging/comedi/drivers/dt2815.c
@@ -82,7 +82,18 @@ static struct comedi_driver driver_dt2815 = {
.detach = dt2815_detach,
};
-COMEDI_INITCLEANUP(driver_dt2815);
+static int __init driver_dt2815_init_module(void)
+{
+ return comedi_driver_register(&driver_dt2815);
+}
+
+static void __exit driver_dt2815_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_dt2815);
+}
+
+module_init(driver_dt2815_init_module);
+module_exit(driver_dt2815_cleanup_module);
static void dt2815_free_resources(struct comedi_device *dev);
@@ -255,3 +266,7 @@ static int dt2815_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2817.c b/drivers/staging/comedi/drivers/dt2817.c
index 54e0dea0fc5..651fe050d02 100644
--- a/drivers/staging/comedi/drivers/dt2817.c
+++ b/drivers/staging/comedi/drivers/dt2817.c
@@ -57,7 +57,18 @@ static struct comedi_driver driver_dt2817 = {
.detach = dt2817_detach,
};
-COMEDI_INITCLEANUP(driver_dt2817);
+static int __init driver_dt2817_init_module(void)
+{
+ return comedi_driver_register(&driver_dt2817);
+}
+
+static void __exit driver_dt2817_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_dt2817);
+}
+
+module_init(driver_dt2817_init_module);
+module_exit(driver_dt2817_cleanup_module);
static int dt2817_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -180,3 +191,7 @@ static int dt2817_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index fd8728c8366..8cea9dca3d7 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -423,7 +423,18 @@ static struct comedi_driver driver_dt282x = {
.offset = sizeof(struct dt282x_board),
};
-COMEDI_INITCLEANUP(driver_dt282x);
+static int __init driver_dt282x_init_module(void)
+{
+ return comedi_driver_register(&driver_dt282x);
+}
+
+static void __exit driver_dt282x_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_dt282x);
+}
+
+module_init(driver_dt282x_init_module);
+module_exit(driver_dt282x_cleanup_module);
static void free_resources(struct comedi_device *dev);
static int prep_ai_dma(struct comedi_device *dev, int chan, int size);
@@ -1502,3 +1513,7 @@ static int dt282x_grab_dma(struct comedi_device *dev, int dma1, int dma2)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index ca687890fc1..656e7bbf2fc 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -287,7 +287,43 @@ static struct comedi_driver driver_dt3000 = {
.detach = dt3000_detach,
};
-COMEDI_PCI_INITCLEANUP(driver_dt3000, dt3k_pci_table);
+static int __devinit driver_dt3000_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_dt3000.driver_name);
+}
+
+static void __devexit driver_dt3000_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_dt3000_pci_driver = {
+ .id_table = dt3k_pci_table,
+ .probe = &driver_dt3000_pci_probe,
+ .remove = __devexit_p(&driver_dt3000_pci_remove)
+};
+
+static int __init driver_dt3000_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_dt3000);
+ if (retval < 0)
+ return retval;
+
+ driver_dt3000_pci_driver.name = (char *)driver_dt3000.driver_name;
+ return pci_register_driver(&driver_dt3000_pci_driver);
+}
+
+static void __exit driver_dt3000_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_dt3000_pci_driver);
+ comedi_driver_unregister(&driver_dt3000);
+}
+
+module_init(driver_dt3000_init_module);
+module_exit(driver_dt3000_cleanup_module);
static void dt3k_ai_empty_fifo(struct comedi_device *dev,
struct comedi_subdevice *s);
@@ -991,3 +1027,7 @@ static struct pci_dev *dt_pci_find_device(struct pci_dev *from, int *board)
*board = -1;
return from;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 96caae36279..d01d2dc7911 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -890,8 +890,10 @@ static struct usb_driver dt9812_usb_driver = {
* Comedi functions
*/
-static void dt9812_comedi_open(struct comedi_device *dev)
+static int dt9812_comedi_open(struct comedi_device *dev)
{
+ int result = -ENODEV;
+
down(&devpriv->slot->mutex);
if (devpriv->slot->usb) {
/* We have an attached device, fill in current range info */
@@ -934,8 +936,10 @@ static void dt9812_comedi_open(struct comedi_device *dev)
}
break;
}
+ result = 0;
}
up(&devpriv->slot->mutex);
+ return result;
}
static int dt9812_di_rinsn(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
index a10a2b070a2..7f49add60b2 100644
--- a/drivers/staging/comedi/drivers/fl512.c
+++ b/drivers/staging/comedi/drivers/fl512.c
@@ -52,7 +52,18 @@ static struct comedi_driver driver_fl512 = {
.detach = fl512_detach,
};
-COMEDI_INITCLEANUP(driver_fl512);
+static int __init driver_fl512_init_module(void)
+{
+ return comedi_driver_register(&driver_fl512);
+}
+
+static void __exit driver_fl512_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_fl512);
+}
+
+module_init(driver_fl512_init_module);
+module_exit(driver_fl512_cleanup_module);
static int fl512_ai_insn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
@@ -205,3 +216,7 @@ static int fl512_detach(struct comedi_device *dev)
printk(KERN_INFO "comedi%d: fl512: dummy i detach\n", dev->minor);
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index 51f12bf45cf..1661b57ca2a 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -311,17 +311,25 @@ struct hpdi_private {
void *plx9080_iobase;
void *hpdi_iobase;
uint32_t *dio_buffer[NUM_DMA_BUFFERS]; /* dma buffers */
- dma_addr_t dio_buffer_phys_addr[NUM_DMA_BUFFERS]; /* physical addresses of dma buffers */
- struct plx_dma_desc *dma_desc; /* array of dma descriptors read by plx9080, allocated to get proper alignment */
- dma_addr_t dma_desc_phys_addr; /* physical address of dma descriptor array */
+ /* physical addresses of dma buffers */
+ dma_addr_t dio_buffer_phys_addr[NUM_DMA_BUFFERS];
+ /* array of dma descriptors read by plx9080, allocated to get proper
+ * alignment */
+ struct plx_dma_desc *dma_desc;
+ /* physical address of dma descriptor array */
+ dma_addr_t dma_desc_phys_addr;
unsigned int num_dma_descriptors;
- uint32_t *desc_dio_buffer[NUM_DMA_DESCRIPTORS]; /* pointer to start of buffers indexed by descriptor */
- volatile unsigned int dma_desc_index; /* index of the dma descriptor that is currently being used */
+ /* pointer to start of buffers indexed by descriptor */
+ uint32_t *desc_dio_buffer[NUM_DMA_DESCRIPTORS];
+ /* index of the dma descriptor that is currently being used */
+ volatile unsigned int dma_desc_index;
unsigned int tx_fifo_size;
unsigned int rx_fifo_size;
volatile unsigned long dio_count;
- volatile uint32_t bits[24]; /* software copies of values written to hpdi registers */
- volatile unsigned int block_size; /* number of bytes at which to generate COMEDI_CB_BLOCK events */
+ /* software copies of values written to hpdi registers */
+ volatile uint32_t bits[24];
+ /* number of bytes at which to generate COMEDI_CB_BLOCK events */
+ volatile unsigned int block_size;
unsigned dio_config_output:1;
};
@@ -337,7 +345,43 @@ static struct comedi_driver driver_hpdi = {
.detach = hpdi_detach,
};
-COMEDI_PCI_INITCLEANUP(driver_hpdi, hpdi_pci_table);
+static int __devinit driver_hpdi_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_hpdi.driver_name);
+}
+
+static void __devexit driver_hpdi_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_hpdi_pci_driver = {
+ .id_table = hpdi_pci_table,
+ .probe = &driver_hpdi_pci_probe,
+ .remove = __devexit_p(&driver_hpdi_pci_remove)
+};
+
+static int __init driver_hpdi_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_hpdi);
+ if (retval < 0)
+ return retval;
+
+ driver_hpdi_pci_driver.name = (char *)driver_hpdi.driver_name;
+ return pci_register_driver(&driver_hpdi_pci_driver);
+}
+
+static void __exit driver_hpdi_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_hpdi_pci_driver);
+ comedi_driver_unregister(&driver_hpdi);
+}
+
+module_init(driver_hpdi_init_module);
+module_exit(driver_hpdi_cleanup_module);
static int dio_config_insn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
@@ -570,7 +614,8 @@ static int hpdi_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -ENOMEM;
pcidev = NULL;
- for (i = 0; i < ARRAY_SIZE(hpdi_boards) && dev->board_ptr == NULL; i++) {
+ for (i = 0; i < ARRAY_SIZE(hpdi_boards) &&
+ dev->board_ptr == NULL; i++) {
do {
pcidev = pci_get_subsys(PCI_VENDOR_ID_PLX,
hpdi_boards[i].device_id,
@@ -618,7 +663,7 @@ static int hpdi_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* remap, won't work with 2.0 kernels but who cares */
priv(dev)->plx9080_iobase = ioremap(priv(dev)->plx9080_phys_iobase,
pci_resource_len(pcidev,
- PLX9080_BADDRINDEX));
+ PLX9080_BADDRINDEX));
priv(dev)->hpdi_iobase =
ioremap(priv(dev)->hpdi_phys_iobase,
pci_resource_len(pcidev, HPDI_BADDRINDEX));
@@ -769,7 +814,8 @@ static int di_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
if (err)
return 1;
- /* step 2: make sure trigger sources are unique and mutually compatible */
+ /* step 2: make sure trigger sources are unique and mutually
+ * compatible */
/* uniqueness check */
if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE)
@@ -1066,3 +1112,7 @@ static int hpdi_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c
index fa0e48173bd..809d17efd5b 100644
--- a/drivers/staging/comedi/drivers/icp_multi.c
+++ b/drivers/staging/comedi/drivers/icp_multi.c
@@ -185,7 +185,18 @@ board_name : &boardtypes[0].name,
offset : sizeof(struct boardtype),
};
-COMEDI_INITCLEANUP(driver_icp_multi);
+static int __init driver_icp_multi_init_module(void)
+{
+ return comedi_driver_register(&driver_icp_multi);
+}
+
+static void __exit driver_icp_multi_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_icp_multi);
+}
+
+module_init(driver_icp_multi_init_module);
+module_exit(driver_icp_multi_cleanup_module);
struct icp_multi_private {
struct pcilst_struct *card; /* pointer to card */
@@ -1125,3 +1136,7 @@ static int icp_multi_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/icp_multi.h b/drivers/staging/comedi/drivers/icp_multi.h
index 2bb96b1d21e..68acefe1688 100644
--- a/drivers/staging/comedi/drivers/icp_multi.h
+++ b/drivers/staging/comedi/drivers/icp_multi.h
@@ -62,16 +62,14 @@ static int pci_card_data(struct pcilst_struct *amcc,
/* build list of Inova cards in this system */
static void pci_card_list_init(unsigned short pci_vendor, char display)
{
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
struct pcilst_struct *inova, *last;
int i;
inova_devices = NULL;
last = NULL;
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
+ for_each_pci_dev(pcidev) {
if (pcidev->vendor == pci_vendor) {
inova = kzalloc(sizeof(*inova), GFP_KERNEL);
if (!inova) {
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index e26c1b88ebe..39a6a850d63 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -640,4 +640,19 @@ static unsigned int pci20xxx_di(struct comedi_device *dev,
}
#endif
-COMEDI_INITCLEANUP(driver_pci20xxx);
+static int __init driver_pci20xxx_init_module(void)
+{
+ return comedi_driver_register(&driver_pci20xxx);
+}
+
+static void __exit driver_pci20xxx_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_pci20xxx);
+}
+
+module_init(driver_pci20xxx_init_module);
+module_exit(driver_pci20xxx_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index d330b188684..8b383ee959b 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -48,6 +48,7 @@ Devices: [JR3] PCI force sensor board (jr3_pci)
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/timer.h>
+#include <linux/kernel.h>
#include "comedi_pci.h"
#include "jr3_pci.h"
@@ -123,12 +124,9 @@ struct jr3_pci_subdev_private {
};
/* Hotplug firmware loading stuff */
-
-typedef int comedi_firmware_callback(struct comedi_device *dev,
- const u8 * data, size_t size);
-
static int comedi_load_firmware(struct comedi_device *dev, char *name,
- comedi_firmware_callback cb)
+ int (*cb)(struct comedi_device *dev,
+ const u8 *data, size_t size))
{
int result = 0;
const struct firmware *fw;
@@ -373,7 +371,7 @@ static int jr3_pci_ai_insn_read(struct comedi_device *dev,
return result;
}
-static void jr3_pci_open(struct comedi_device *dev)
+static int jr3_pci_open(struct comedi_device *dev)
{
int i;
struct jr3_pci_dev_private *devpriv = dev->private;
@@ -388,6 +386,7 @@ static void jr3_pci_open(struct comedi_device *dev)
p->channel_no);
}
}
+ return 0;
}
int read_idm_word(const u8 * data, size_t size, int *pos, unsigned int *val)
@@ -399,14 +398,14 @@ int read_idm_word(const u8 * data, size_t size, int *pos, unsigned int *val)
}
/* Collect value */
*val = 0;
- for (; *pos < size && isxdigit(data[*pos]); (*pos)++) {
- char ch = tolower(data[*pos]);
- result = 1;
- if ('0' <= ch && ch <= '9') {
- *val = (*val << 4) + (ch - '0');
- } else if ('a' <= ch && ch <= 'f') {
- *val = (*val << 4) + (ch - 'a' + 10);
- }
+ for (; *pos < size; (*pos)++) {
+ int value;
+ value = hex_to_bin(data[*pos]);
+ if (value >= 0) {
+ result = 1;
+ *val = (*val << 4) + value;
+ } else
+ break;
}
}
return result;
@@ -986,4 +985,44 @@ static int jr3_pci_detach(struct comedi_device *dev)
return 0;
}
-COMEDI_PCI_INITCLEANUP(driver_jr3_pci, jr3_pci_pci_table);
+static int __devinit driver_jr3_pci_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_jr3_pci.driver_name);
+}
+
+static void __devexit driver_jr3_pci_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_jr3_pci_pci_driver = {
+ .id_table = jr3_pci_pci_table,
+ .probe = &driver_jr3_pci_pci_probe,
+ .remove = __devexit_p(&driver_jr3_pci_pci_remove)
+};
+
+static int __init driver_jr3_pci_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_jr3_pci);
+ if (retval < 0)
+ return retval;
+
+ driver_jr3_pci_pci_driver.name = (char *)driver_jr3_pci.driver_name;
+ return pci_register_driver(&driver_jr3_pci_pci_driver);
+}
+
+static void __exit driver_jr3_pci_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_jr3_pci_pci_driver);
+ comedi_driver_unregister(&driver_jr3_pci);
+}
+
+module_init(driver_jr3_pci_init_module);
+module_exit(driver_jr3_pci_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
index 73b0445e310..286093bca3f 100644
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ b/drivers/staging/comedi/drivers/ke_counter.c
@@ -96,7 +96,43 @@ static struct comedi_driver cnt_driver = {
.detach = cnt_detach,
};
-COMEDI_PCI_INITCLEANUP(cnt_driver, cnt_pci_table);
+static int __devinit cnt_driver_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, cnt_driver.driver_name);
+}
+
+static void __devexit cnt_driver_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver cnt_driver_pci_driver = {
+ .id_table = cnt_pci_table,
+ .probe = &cnt_driver_pci_probe,
+ .remove = __devexit_p(&cnt_driver_pci_remove)
+};
+
+static int __init cnt_driver_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&cnt_driver);
+ if (retval < 0)
+ return retval;
+
+ cnt_driver_pci_driver.name = (char *)cnt_driver.driver_name;
+ return pci_register_driver(&cnt_driver_pci_driver);
+}
+
+static void __exit cnt_driver_cleanup_module(void)
+{
+ pci_unregister_driver(&cnt_driver_pci_driver);
+ comedi_driver_unregister(&cnt_driver);
+}
+
+module_init(cnt_driver_init_module);
+module_exit(cnt_driver_cleanup_module);
/*-- counter write ----------------------------------------------------------*/
@@ -152,7 +188,7 @@ static int cnt_rinsn(struct comedi_device *dev,
static int cnt_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_subdevice *subdevice;
- struct pci_dev *pci_device;
+ struct pci_dev *pci_device = NULL;
struct cnt_board_struct *board;
unsigned long io_base;
int error, i;
@@ -163,9 +199,7 @@ static int cnt_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return error;
/* Probe the device to determine what device in the series it is. */
- for (pci_device = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pci_device != NULL;
- pci_device = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_device)) {
+ for_each_pci_dev(pci_device) {
if (pci_device->vendor == PCI_VENDOR_ID_KOLTER) {
for (i = 0; i < cnt_board_nbr; i++) {
if (cnt_boards[i].device_id ==
@@ -259,3 +293,7 @@ static int cnt_detach(struct comedi_device *dev)
dev->minor);
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index 8b9fa0f9f1f..14713849564 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -91,22 +91,22 @@ static DEFINE_PCI_DEVICE_TABLE(me4000_pci_table) = {
MODULE_DEVICE_TABLE(pci, me4000_pci_table);
static const struct me4000_board me4000_boards[] = {
- {"ME-4650", 0x4650, {0, 0}, {16, 0, 0, 0}, {4}, {0}},
+ {"ME-4650", 0x4650, {0, 0}, {16, 0, 0, 0}, {4}, {0} },
- {"ME-4660", 0x4660, {0, 0}, {32, 0, 16, 0}, {4}, {3}},
- {"ME-4660i", 0x4661, {0, 0}, {32, 0, 16, 0}, {4}, {3}},
- {"ME-4660s", 0x4662, {0, 0}, {32, 8, 16, 0}, {4}, {3}},
- {"ME-4660is", 0x4663, {0, 0}, {32, 8, 16, 0}, {4}, {3}},
+ {"ME-4660", 0x4660, {0, 0}, {32, 0, 16, 0}, {4}, {3} },
+ {"ME-4660i", 0x4661, {0, 0}, {32, 0, 16, 0}, {4}, {3} },
+ {"ME-4660s", 0x4662, {0, 0}, {32, 8, 16, 0}, {4}, {3} },
+ {"ME-4660is", 0x4663, {0, 0}, {32, 8, 16, 0}, {4}, {3} },
- {"ME-4670", 0x4670, {4, 0}, {32, 0, 16, 1}, {4}, {3}},
- {"ME-4670i", 0x4671, {4, 0}, {32, 0, 16, 1}, {4}, {3}},
- {"ME-4670s", 0x4672, {4, 0}, {32, 8, 16, 1}, {4}, {3}},
- {"ME-4670is", 0x4673, {4, 0}, {32, 8, 16, 1}, {4}, {3}},
+ {"ME-4670", 0x4670, {4, 0}, {32, 0, 16, 1}, {4}, {3} },
+ {"ME-4670i", 0x4671, {4, 0}, {32, 0, 16, 1}, {4}, {3} },
+ {"ME-4670s", 0x4672, {4, 0}, {32, 8, 16, 1}, {4}, {3} },
+ {"ME-4670is", 0x4673, {4, 0}, {32, 8, 16, 1}, {4}, {3} },
- {"ME-4680", 0x4680, {4, 4}, {32, 0, 16, 1}, {4}, {3}},
- {"ME-4680i", 0x4681, {4, 4}, {32, 0, 16, 1}, {4}, {3}},
- {"ME-4680s", 0x4682, {4, 4}, {32, 8, 16, 1}, {4}, {3}},
- {"ME-4680is", 0x4683, {4, 4}, {32, 8, 16, 1}, {4}, {3}},
+ {"ME-4680", 0x4680, {4, 4}, {32, 0, 16, 1}, {4}, {3} },
+ {"ME-4680i", 0x4681, {4, 4}, {32, 0, 16, 1}, {4}, {3} },
+ {"ME-4680s", 0x4682, {4, 4}, {32, 8, 16, 1}, {4}, {3} },
+ {"ME-4680is", 0x4683, {4, 4}, {32, 8, 16, 1}, {4}, {3} },
{0},
};
@@ -120,10 +120,10 @@ static int me4000_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int me4000_detach(struct comedi_device *dev);
static struct comedi_driver driver_me4000 = {
-driver_name:"me4000",
-module:THIS_MODULE,
-attach:me4000_attach,
-detach:me4000_detach,
+driver_name: "me4000",
+module : THIS_MODULE,
+attach : me4000_attach,
+detach : me4000_detach,
};
/*-----------------------------------------------------------------------------
@@ -302,8 +302,8 @@ static int me4000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (request_irq(info->irq, me4000_ai_isr,
IRQF_SHARED, "ME-4000", dev)) {
printk
- ("comedi%d: me4000: me4000_attach(): Unable to allocate irq\n",
- dev->minor);
+ ("comedi%d: me4000: me4000_attach(): "
+ "Unable to allocate irq\n", dev->minor);
} else {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
@@ -313,8 +313,8 @@ static int me4000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
}
} else {
printk(KERN_WARNING
- "comedi%d: me4000: me4000_attach(): No interrupt available\n",
- dev->minor);
+ "comedi%d: me4000: me4000_attach(): "
+ "No interrupt available\n", dev->minor);
}
} else {
s->type = COMEDI_SUBD_UNUSED;
@@ -389,7 +389,7 @@ static int me4000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static int me4000_probe(struct comedi_device *dev, struct comedi_devconfig *it)
{
- struct pci_dev *pci_device;
+ struct pci_dev *pci_device = NULL;
int result, i;
struct me4000_board *board;
@@ -402,17 +402,21 @@ static int me4000_probe(struct comedi_device *dev, struct comedi_devconfig *it)
/*
* Probe the device to determine what device in the series it is.
*/
- for (pci_device = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pci_device != NULL;
- pci_device = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_device)) {
+ for_each_pci_dev(pci_device) {
if (pci_device->vendor == PCI_VENDOR_ID_MEILHAUS) {
for (i = 0; i < ME4000_BOARD_VERSIONS; i++) {
if (me4000_boards[i].device_id ==
pci_device->device) {
- /* Was a particular bus/slot requested? */
+ /*
+ * Was a particular
+ * bus/slot requested?
+ */
if ((it->options[0] != 0)
|| (it->options[1] != 0)) {
- /* Are we on the wrong bus/slot? */
+ /*
+ * Are we on the wrong
+ * bus/slot?
+ */
if (pci_device->bus->number !=
it->options[0]
||
@@ -433,14 +437,16 @@ static int me4000_probe(struct comedi_device *dev, struct comedi_devconfig *it)
}
printk(KERN_ERR
- "comedi%d: me4000: me4000_probe(): No supported board found (req. bus/slot : %d/%d)\n",
+ "comedi%d: me4000: me4000_probe(): "
+ "No supported board found (req. bus/slot : %d/%d)\n",
dev->minor, it->options[0], it->options[1]);
return -ENODEV;
found:
printk(KERN_INFO
- "comedi%d: me4000: me4000_probe(): Found %s at PCI bus %d, slot %d\n",
+ "comedi%d: me4000: me4000_probe(): "
+ "Found %s at PCI bus %d, slot %d\n",
dev->minor, me4000_boards[i].name, pci_device->bus->number,
PCI_SLOT(pci_device->devfn));
@@ -451,8 +457,8 @@ found:
result = comedi_pci_enable(pci_device, dev->board_name);
if (result) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_probe(): Cannot enable PCI device and request I/O regions\n",
- dev->minor);
+ "comedi%d: me4000: me4000_probe(): Cannot enable PCI "
+ "device and request I/O regions\n", dev->minor);
return result;
}
@@ -460,16 +466,16 @@ found:
result = get_registers(dev, pci_device);
if (result) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_probe(): Cannot get registers\n",
- dev->minor);
+ "comedi%d: me4000: me4000_probe(): "
+ "Cannot get registers\n", dev->minor);
return result;
}
/* Initialize board info */
result = init_board_info(dev, pci_device);
if (result) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_probe(): Cannot init baord info\n",
- dev->minor);
+ "comedi%d: me4000: me4000_probe(): "
+ "Cannot init baord info\n", dev->minor);
return result;
}
@@ -477,8 +483,8 @@ found:
result = init_ao_context(dev);
if (result) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_probe(): Cannot init ao context\n",
- dev->minor);
+ "comedi%d: me4000: me4000_probe(): "
+ "Cannot init ao context\n", dev->minor);
return result;
}
@@ -486,8 +492,8 @@ found:
result = init_ai_context(dev);
if (result) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_probe(): Cannot init ai context\n",
- dev->minor);
+ "comedi%d: me4000: me4000_probe(): "
+ "Cannot init ai context\n", dev->minor);
return result;
}
@@ -495,8 +501,8 @@ found:
result = init_dio_context(dev);
if (result) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_probe(): Cannot init dio context\n",
- dev->minor);
+ "comedi%d: me4000: me4000_probe(): "
+ "Cannot init dio context\n", dev->minor);
return result;
}
@@ -504,8 +510,8 @@ found:
result = init_cnt_context(dev);
if (result) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_probe(): Cannot init cnt context\n",
- dev->minor);
+ "comedi%d: me4000: me4000_probe(): "
+ "Cannot init cnt context\n", dev->minor);
return result;
}
@@ -513,8 +519,8 @@ found:
result = xilinx_download(dev);
if (result) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_probe(): Can't download firmware\n",
- dev->minor);
+ "comedi%d: me4000: me4000_probe(): "
+ "Can't download firmware\n", dev->minor);
return result;
}
@@ -535,24 +541,24 @@ static int get_registers(struct comedi_device *dev, struct pci_dev *pci_dev_p)
CALL_PDEBUG("In get_registers()\n");
- /*--------------------------- plx regbase ---------------------------------*/
+ /*--------------------------- plx regbase -------------------------------*/
info->plx_regbase = pci_resource_start(pci_dev_p, 1);
if (info->plx_regbase == 0) {
printk(KERN_ERR
- "comedi%d: me4000: get_registers(): PCI base address 1 is not available\n",
- dev->minor);
+ "comedi%d: me4000: get_registers(): "
+ "PCI base address 1 is not available\n", dev->minor);
return -ENODEV;
}
info->plx_regbase_size = pci_resource_len(pci_dev_p, 1);
- /*--------------------------- me4000 regbase ------------------------------*/
+ /*--------------------------- me4000 regbase ----------------------------*/
info->me4000_regbase = pci_resource_start(pci_dev_p, 2);
if (info->me4000_regbase == 0) {
printk(KERN_ERR
- "comedi%d: me4000: get_registers(): PCI base address 2 is not available\n",
- dev->minor);
+ "comedi%d: me4000: get_registers(): "
+ "PCI base address 2 is not available\n", dev->minor);
return -ENODEV;
}
info->me4000_regbase_size = pci_resource_len(pci_dev_p, 2);
@@ -562,19 +568,19 @@ static int get_registers(struct comedi_device *dev, struct pci_dev *pci_dev_p)
info->timer_regbase = pci_resource_start(pci_dev_p, 3);
if (info->timer_regbase == 0) {
printk(KERN_ERR
- "comedi%d: me4000: get_registers(): PCI base address 3 is not available\n",
- dev->minor);
+ "comedi%d: me4000: get_registers(): "
+ "PCI base address 3 is not available\n", dev->minor);
return -ENODEV;
}
info->timer_regbase_size = pci_resource_len(pci_dev_p, 3);
- /*--------------------------- program regbase ------------------------------*/
+ /*--------------------------- program regbase ----------------------------*/
info->program_regbase = pci_resource_start(pci_dev_p, 5);
if (info->program_regbase == 0) {
printk(KERN_ERR
- "comedi%d: me4000: get_registers(): PCI base address 5 is not available\n",
- dev->minor);
+ "comedi%d: me4000: get_registers(): "
+ "PCI base address 5 is not available\n", dev->minor);
return -ENODEV;
}
info->program_regbase_size = pci_resource_len(pci_dev_p, 5);
@@ -800,8 +806,8 @@ static int xilinx_download(struct comedi_device *dev)
udelay(20);
if (!(inl(info->plx_regbase + PLX_INTCSR) & 0x20)) {
printk(KERN_ERR
- "comedi%d: me4000: xilinx_download(): Can't init Xilinx\n",
- dev->minor);
+ "comedi%d: me4000: xilinx_download(): "
+ "Can't init Xilinx\n", dev->minor);
return -EIO;
}
@@ -810,8 +816,8 @@ static int xilinx_download(struct comedi_device *dev)
value &= ~0x100;
outl(value, info->plx_regbase + PLX_ICR);
if (FIRMWARE_NOT_AVAILABLE) {
- comedi_error(dev,
- "xilinx firmware unavailable due to licensing, aborting");
+ comedi_error(dev, "xilinx firmware unavailable "
+ "due to licensing, aborting");
return -EIO;
} else {
/* Download Xilinx firmware */
@@ -826,7 +832,8 @@ static int xilinx_download(struct comedi_device *dev)
/* Check if BUSY flag is low */
if (inl(info->plx_regbase + PLX_ICR) & 0x20) {
printk(KERN_ERR
- "comedi%d: me4000: xilinx_download(): Xilinx is still busy (idx = %d)\n",
+ "comedi%d: me4000: xilinx_download(): "
+ "Xilinx is still busy (idx = %d)\n",
dev->minor, idx);
return -EIO;
}
@@ -837,11 +844,11 @@ static int xilinx_download(struct comedi_device *dev)
if (inl(info->plx_regbase + PLX_ICR) & 0x4) {
} else {
printk(KERN_ERR
- "comedi%d: me4000: xilinx_download(): DONE flag is not set\n",
- dev->minor);
+ "comedi%d: me4000: xilinx_download(): "
+ "DONE flag is not set\n", dev->minor);
printk(KERN_ERR
- "comedi%d: me4000: xilinx_download(): Download not successful\n",
- dev->minor);
+ "comedi%d: me4000: xilinx_download(): "
+ "Download not successful\n", dev->minor);
return -EIO;
}
@@ -902,7 +909,10 @@ static int reset_board(struct comedi_device *dev)
me4000_outl(dev, ME4000_AO_DEMUX_ADJUST_VALUE,
info->me4000_regbase + ME4000_AO_DEMUX_ADJUST_REG);
- /* Set digital I/O direction for port 0 to output on isolated versions */
+ /*
+ * Set digital I/O direction for port 0
+ * to output on isolated versions
+ */
if (!(me4000_inl(dev, info->me4000_regbase + ME4000_DIO_DIR_REG) & 0x1)) {
me4000_outl(dev, 0x1,
info->me4000_regbase + ME4000_DIO_CTRL_REG);
@@ -950,8 +960,8 @@ static int me4000_ai_insn_read(struct comedi_device *dev,
return 0;
} else if (insn->n > 1) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_insn_read(): Invalid instruction length %d\n",
- dev->minor, insn->n);
+ "comedi%d: me4000: me4000_ai_insn_read(): "
+ "Invalid instruction length %d\n", dev->minor, insn->n);
return -EINVAL;
}
@@ -970,8 +980,8 @@ static int me4000_ai_insn_read(struct comedi_device *dev,
break;
default:
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_insn_read(): Invalid range specified\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_insn_read(): "
+ "Invalid range specified\n", dev->minor);
return -EINVAL;
}
@@ -980,8 +990,8 @@ static int me4000_ai_insn_read(struct comedi_device *dev,
case AREF_COMMON:
if (chan >= thisboard->ai.count) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_insn_read(): Analog input is not available\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_insn_read(): "
+ "Analog input is not available\n", dev->minor);
return -EINVAL;
}
entry |= ME4000_AI_LIST_INPUT_SINGLE_ENDED | chan;
@@ -990,23 +1000,24 @@ static int me4000_ai_insn_read(struct comedi_device *dev,
case AREF_DIFF:
if (rang == 0 || rang == 1) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_insn_read(): Range must be bipolar when aref = diff\n",
+ "comedi%d: me4000: me4000_ai_insn_read(): "
+ "Range must be bipolar when aref = diff\n",
dev->minor);
return -EINVAL;
}
if (chan >= thisboard->ai.diff_count) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_insn_read(): Analog input is not available\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_insn_read(): "
+ "Analog input is not available\n", dev->minor);
return -EINVAL;
}
entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL | chan;
break;
default:
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_insn_read(): Invalid aref specified\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_insn_read(): "
+ "Invalid aref specified\n", dev->minor);
return -EINVAL;
}
@@ -1045,8 +1056,8 @@ static int me4000_ai_insn_read(struct comedi_device *dev,
(me4000_inl(dev, info->ai_context.status_reg) &
ME4000_AI_STATUS_BIT_EF_DATA)) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_insn_read(): Value not available after wait\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_insn_read(): "
+ "Value not available after wait\n", dev->minor);
return -EIO;
}
@@ -1086,24 +1097,24 @@ static int ai_check_chanlist(struct comedi_device *dev,
/* Check whether a channel list is available */
if (!cmd->chanlist_len) {
printk(KERN_ERR
- "comedi%d: me4000: ai_check_chanlist(): No channel list available\n",
- dev->minor);
+ "comedi%d: me4000: ai_check_chanlist(): "
+ "No channel list available\n", dev->minor);
return -EINVAL;
}
/* Check the channel list size */
if (cmd->chanlist_len > ME4000_AI_CHANNEL_LIST_COUNT) {
printk(KERN_ERR
- "comedi%d: me4000: ai_check_chanlist(): Channel list is to large\n",
- dev->minor);
+ "comedi%d: me4000: ai_check_chanlist(): "
+ "Channel list is to large\n", dev->minor);
return -EINVAL;
}
/* Check the pointer */
if (!cmd->chanlist) {
printk(KERN_ERR
- "comedi%d: me4000: ai_check_chanlist(): NULL pointer to channel list\n",
- dev->minor);
+ "comedi%d: me4000: ai_check_chanlist(): "
+ "NULL pointer to channel list\n", dev->minor);
return -EFAULT;
}
@@ -1112,7 +1123,8 @@ static int ai_check_chanlist(struct comedi_device *dev,
for (i = 0; i < cmd->chanlist_len; i++) {
if (CR_AREF(cmd->chanlist[i]) != aref) {
printk(KERN_ERR
- "comedi%d: me4000: ai_check_chanlist(): Mode is not equal for all entries\n",
+ "comedi%d: me4000: ai_check_chanlist(): "
+ "Mode is not equal for all entries\n",
dev->minor);
return -EINVAL;
}
@@ -1124,8 +1136,8 @@ static int ai_check_chanlist(struct comedi_device *dev,
if (CR_CHAN(cmd->chanlist[i]) >=
thisboard->ai.diff_count) {
printk(KERN_ERR
- "comedi%d: me4000: ai_check_chanlist(): Channel number to high\n",
- dev->minor);
+ "comedi%d: me4000: ai_check_chanlist():"
+ " Channel number to high\n", dev->minor);
return -EINVAL;
}
}
@@ -1133,8 +1145,8 @@ static int ai_check_chanlist(struct comedi_device *dev,
for (i = 0; i < cmd->chanlist_len; i++) {
if (CR_CHAN(cmd->chanlist[i]) >= thisboard->ai.count) {
printk(KERN_ERR
- "comedi%d: me4000: ai_check_chanlist(): Channel number to high\n",
- dev->minor);
+ "comedi%d: me4000: ai_check_chanlist(): "
+ "Channel number to high\n", dev->minor);
return -EINVAL;
}
}
@@ -1146,7 +1158,9 @@ static int ai_check_chanlist(struct comedi_device *dev,
if (CR_RANGE(cmd->chanlist[i]) != 1 &&
CR_RANGE(cmd->chanlist[i]) != 2) {
printk(KERN_ERR
- "comedi%d: me4000: ai_check_chanlist(): Bipolar is not selected in differential mode\n",
+ "comedi%d: me4000: ai_check_chanlist(): "
+ "Bipolar is not selected in "
+ "differential mode\n",
dev->minor);
return -EINVAL;
}
@@ -1330,21 +1344,19 @@ static int ai_write_chanlist(struct comedi_device *dev,
entry = chan;
- if (rang == 0) {
+ if (rang == 0)
entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_2_5;
- } else if (rang == 1) {
+ else if (rang == 1)
entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_10;
- } else if (rang == 2) {
+ else if (rang == 2)
entry |= ME4000_AI_LIST_RANGE_BIPOLAR_2_5;
- } else {
+ else
entry |= ME4000_AI_LIST_RANGE_BIPOLAR_10;
- }
- if (aref == SDF_DIFF) {
+ if (aref == SDF_DIFF)
entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL;
- } else {
+ else
entry |= ME4000_AI_LIST_INPUT_SINGLE_ENDED;
- }
me4000_outl(dev, entry, info->ai_context.channel_list_reg);
}
@@ -1454,8 +1466,8 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
break;
default:
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid start source\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid start source\n", dev->minor);
cmd->start_src = TRIG_NOW;
err++;
}
@@ -1470,8 +1482,8 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
break;
default:
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid scan begin source\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid scan begin source\n", dev->minor);
cmd->scan_begin_src = TRIG_FOLLOW;
err++;
}
@@ -1485,8 +1497,8 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
break;
default:
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid convert source\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid convert source\n", dev->minor);
cmd->convert_src = TRIG_TIMER;
err++;
}
@@ -1500,8 +1512,8 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
break;
default:
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid scan end source\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid scan end source\n", dev->minor);
cmd->scan_end_src = TRIG_NONE;
err++;
}
@@ -1515,8 +1527,8 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
break;
default:
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid stop source\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid stop source\n", dev->minor);
cmd->stop_src = TRIG_NONE;
err++;
}
@@ -1546,8 +1558,8 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
cmd->convert_src == TRIG_EXT) {
} else {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid start trigger combination\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid start trigger combination\n", dev->minor);
cmd->start_src = TRIG_NOW;
cmd->scan_begin_src = TRIG_FOLLOW;
cmd->convert_src = TRIG_TIMER;
@@ -1563,8 +1575,8 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
cmd->scan_end_src == TRIG_COUNT) {
} else {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid stop trigger combination\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid stop trigger combination\n", dev->minor);
cmd->stop_src = TRIG_NONE;
cmd->scan_end_src = TRIG_NONE;
err++;
@@ -1577,29 +1589,29 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
*/
if (cmd->chanlist_len < 1) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): No channel list\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "No channel list\n", dev->minor);
cmd->chanlist_len = 1;
err++;
}
if (init_ticks < 66) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Start arg to low\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Start arg to low\n", dev->minor);
cmd->start_arg = 2000;
err++;
}
if (scan_ticks && scan_ticks < 67) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Scan begin arg to low\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Scan begin arg to low\n", dev->minor);
cmd->scan_begin_arg = 2031;
err++;
}
if (chan_ticks < 66) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Convert arg to low\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Convert arg to low\n", dev->minor);
cmd->convert_arg = 2000;
err++;
}
@@ -1617,23 +1629,25 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
/* Check timer arguments */
if (init_ticks < ME4000_AI_MIN_TICKS) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid start arg\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid start arg\n", dev->minor);
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
if (chan_ticks < ME4000_AI_MIN_TICKS) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid convert arg\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid convert arg\n", dev->minor);
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
}
if (scan_ticks <= cmd->chanlist_len * chan_ticks) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid scan end arg\n",
- dev->minor);
- cmd->scan_end_arg = 2000 * cmd->chanlist_len + 31; /* At least one tick more */
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid scan end arg\n", dev->minor);
+
+ /* At least one tick more */
+ cmd->scan_end_arg = 2000 * cmd->chanlist_len + 31;
err++;
}
} else if (cmd->start_src == TRIG_NOW &&
@@ -1643,15 +1657,15 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
/* Check timer arguments */
if (init_ticks < ME4000_AI_MIN_TICKS) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid start arg\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid start arg\n", dev->minor);
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
if (chan_ticks < ME4000_AI_MIN_TICKS) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid convert arg\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid convert arg\n", dev->minor);
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
}
@@ -1662,23 +1676,25 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
/* Check timer arguments */
if (init_ticks < ME4000_AI_MIN_TICKS) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid start arg\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid start arg\n", dev->minor);
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
if (chan_ticks < ME4000_AI_MIN_TICKS) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid convert arg\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid convert arg\n", dev->minor);
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
}
if (scan_ticks <= cmd->chanlist_len * chan_ticks) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid scan end arg\n",
- dev->minor);
- cmd->scan_end_arg = 2000 * cmd->chanlist_len + 31; /* At least one tick more */
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid scan end arg\n", dev->minor);
+
+ /* At least one tick more */
+ cmd->scan_end_arg = 2000 * cmd->chanlist_len + 31;
err++;
}
} else if (cmd->start_src == TRIG_EXT &&
@@ -1688,15 +1704,15 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
/* Check timer arguments */
if (init_ticks < ME4000_AI_MIN_TICKS) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid start arg\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid start arg\n", dev->minor);
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
if (chan_ticks < ME4000_AI_MIN_TICKS) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid convert arg\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid convert arg\n", dev->minor);
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
}
@@ -1707,15 +1723,15 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
/* Check timer arguments */
if (init_ticks < ME4000_AI_MIN_TICKS) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid start arg\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid start arg\n", dev->minor);
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
if (chan_ticks < ME4000_AI_MIN_TICKS) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid convert arg\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid convert arg\n", dev->minor);
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
}
@@ -1726,8 +1742,8 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
/* Check timer arguments */
if (init_ticks < ME4000_AI_MIN_TICKS) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid start arg\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid start arg\n", dev->minor);
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
@@ -1735,8 +1751,8 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
if (cmd->stop_src == TRIG_COUNT) {
if (cmd->stop_arg == 0) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid stop arg\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid stop arg\n", dev->minor);
cmd->stop_arg = 1;
err++;
}
@@ -1744,8 +1760,8 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
if (cmd->scan_end_src == TRIG_COUNT) {
if (cmd->scan_end_arg == 0) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_do_cmd_test(): Invalid scan end arg\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_do_cmd_test(): "
+ "Invalid scan end arg\n", dev->minor);
cmd->scan_end_arg = 1;
err++;
}
@@ -1786,8 +1802,8 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
/* Check if irq number is right */
if (irq != ai_context->irq) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_isr(): Incorrect interrupt num: %d\n",
- dev->minor, irq);
+ "comedi%d: me4000: me4000_ai_isr(): "
+ "Incorrect interrupt num: %d\n", dev->minor, irq);
return IRQ_HANDLED;
}
@@ -1806,7 +1822,10 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
ISR_PDEBUG("me4000_ai_isr(): Fifo full\n");
c = ME4000_AI_FIFO_COUNT;
- /* FIFO overflow, so stop conversion and disable all interrupts */
+ /*
+ * FIFO overflow, so stop conversion
+ * and disable all interrupts
+ */
tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
ME4000_AI_CTRL_BIT_SC_IRQ);
@@ -1815,8 +1834,8 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_isr(): FIFO overflow\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_isr(): "
+ "FIFO overflow\n", dev->minor);
} else if ((tmp & ME4000_AI_STATUS_BIT_FF_DATA)
&& !(tmp & ME4000_AI_STATUS_BIT_HF_DATA)
&& (tmp & ME4000_AI_STATUS_BIT_EF_DATA)) {
@@ -1827,11 +1846,14 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
c = ME4000_AI_FIFO_COUNT / 2;
} else {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_isr(): Can't determine state of fifo\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_isr(): "
+ "Can't determine state of fifo\n", dev->minor);
c = 0;
- /* Undefined state, so stop conversion and disable all interrupts */
+ /*
+ * Undefined state, so stop conversion
+ * and disable all interrupts
+ */
tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
ME4000_AI_CTRL_BIT_SC_IRQ);
@@ -1840,8 +1862,8 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_isr(): Undefined FIFO state\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_isr(): "
+ "Undefined FIFO state\n", dev->minor);
}
ISR_PDEBUG("me4000_ai_isr(): Try to read %d values\n", c);
@@ -1852,7 +1874,10 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
lval ^= 0x8000;
if (!comedi_buf_put(s->async, lval)) {
- /* Buffer overflow, so stop conversion and disable all interrupts */
+ /*
+ * Buffer overflow, so stop conversion
+ * and disable all interrupts
+ */
tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
ME4000_AI_CTRL_BIT_SC_IRQ);
@@ -1861,8 +1886,8 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
s->async->events |= COMEDI_CB_OVERFLOW;
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_isr(): Buffer overflow\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_isr(): "
+ "Buffer overflow\n", dev->minor);
break;
}
@@ -1883,7 +1908,10 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOA;
- /* Acquisition is complete, so stop conversion and disable all interrupts */
+ /*
+ * Acquisition is complete, so stop
+ * conversion and disable all interrupts
+ */
tmp = me4000_inl(dev, ai_context->ctrl_reg);
tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ);
@@ -1897,8 +1925,8 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
if (!comedi_buf_put(s->async, lval)) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ai_isr(): Buffer overflow\n",
- dev->minor);
+ "comedi%d: me4000: me4000_ai_isr(): "
+ "Buffer overflow\n", dev->minor);
s->async->events |= COMEDI_CB_OVERFLOW;
break;
}
@@ -1941,29 +1969,29 @@ static int me4000_ao_insn_write(struct comedi_device *dev,
return 0;
} else if (insn->n > 1) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ao_insn_write(): Invalid instruction length %d\n",
- dev->minor, insn->n);
+ "comedi%d: me4000: me4000_ao_insn_write(): "
+ "Invalid instruction length %d\n", dev->minor, insn->n);
return -EINVAL;
}
if (chan >= thisboard->ao.count) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ao_insn_write(): Invalid channel %d\n",
- dev->minor, insn->n);
+ "comedi%d: me4000: me4000_ao_insn_write(): "
+ "Invalid channel %d\n", dev->minor, insn->n);
return -EINVAL;
}
if (rang != 0) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ao_insn_write(): Invalid range %d\n",
- dev->minor, insn->n);
+ "comedi%d: me4000: me4000_ao_insn_write(): "
+ "Invalid range %d\n", dev->minor, insn->n);
return -EINVAL;
}
if (aref != AREF_GROUND && aref != AREF_COMMON) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_ao_insn_write(): Invalid aref %d\n",
- dev->minor, insn->n);
+ "comedi%d: me4000: me4000_ao_insn_write(): "
+ "Invalid aref %d\n", dev->minor, insn->n);
return -EINVAL;
}
@@ -1994,8 +2022,8 @@ static int me4000_ao_insn_read(struct comedi_device *dev,
return 0;
} else if (insn->n > 1) {
printk
- ("comedi%d: me4000: me4000_ao_insn_read(): Invalid instruction length\n",
- dev->minor);
+ ("comedi%d: me4000: me4000_ao_insn_read(): "
+ "Invalid instruction length\n", dev->minor);
return -EINVAL;
}
@@ -2021,8 +2049,8 @@ static int me4000_dio_insn_bits(struct comedi_device *dev,
if (insn->n != 2) {
printk
- ("comedi%d: me4000: me4000_dio_insn_bits(): Invalid instruction length\n",
- dev->minor);
+ ("comedi%d: me4000: me4000_dio_insn_bits(): "
+ "Invalid instruction length\n", dev->minor);
return -EINVAL;
}
@@ -2095,8 +2123,9 @@ static int me4000_dio_insn_config(struct comedi_device *dev,
tmp |= ME4000_DIO_CTRL_BIT_MODE_0;
} else if (chan < 16) {
/*
- * Chech for optoisolated ME-4000 version. If one the first
- * port is a fixed output port and the second is a fixed input port.
+ * Chech for optoisolated ME-4000 version.
+ * If one the first port is a fixed output
+ * port and the second is a fixed input port.
*/
if (!me4000_inl(dev, info->dio_context.dir_reg))
return -ENODEV;
@@ -2121,8 +2150,9 @@ static int me4000_dio_insn_config(struct comedi_device *dev,
} else {
if (chan < 8) {
/*
- * Chech for optoisolated ME-4000 version. If one the first
- * port is a fixed output port and the second is a fixed input port.
+ * Chech for optoisolated ME-4000 version.
+ * If one the first port is a fixed output
+ * port and the second is a fixed input port.
*/
if (!me4000_inl(dev, info->dio_context.dir_reg))
return -ENODEV;
@@ -2257,7 +2287,8 @@ static int me4000_cnt_insn_config(struct comedi_device *dev,
case GPCT_RESET:
if (insn->n != 1) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_cnt_insn_config(): Invalid instruction length%d\n",
+ "comedi%d: me4000: me4000_cnt_insn_config(): "
+ "Invalid instruction length%d\n",
dev->minor, insn->n);
return -EINVAL;
}
@@ -2269,7 +2300,8 @@ static int me4000_cnt_insn_config(struct comedi_device *dev,
case GPCT_SET_OPERATION:
if (insn->n != 2) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_cnt_insn_config(): Invalid instruction length%d\n",
+ "comedi%d: me4000: me4000_cnt_insn_config(): "
+ "Invalid instruction length%d\n",
dev->minor, insn->n);
return -EINVAL;
}
@@ -2280,8 +2312,8 @@ static int me4000_cnt_insn_config(struct comedi_device *dev,
break;
default:
printk(KERN_ERR
- "comedi%d: me4000: me4000_cnt_insn_config(): Invalid instruction\n",
- dev->minor);
+ "comedi%d: me4000: me4000_cnt_insn_config(): "
+ "Invalid instruction\n", dev->minor);
return -EINVAL;
}
@@ -2302,7 +2334,8 @@ static int me4000_cnt_insn_read(struct comedi_device *dev,
if (insn->n > 1) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_cnt_insn_read(): Invalid instruction length %d\n",
+ "comedi%d: me4000: me4000_cnt_insn_read(): "
+ "Invalid instruction length %d\n",
dev->minor, insn->n);
return -EINVAL;
}
@@ -2328,7 +2361,8 @@ static int me4000_cnt_insn_read(struct comedi_device *dev,
break;
default:
printk(KERN_ERR
- "comedi%d: me4000: me4000_cnt_insn_read(): Invalid channel %d\n",
+ "comedi%d: me4000: me4000_cnt_insn_read(): "
+ "Invalid channel %d\n",
dev->minor, insn->chanspec);
return -EINVAL;
}
@@ -2349,7 +2383,8 @@ static int me4000_cnt_insn_write(struct comedi_device *dev,
return 0;
} else if (insn->n > 1) {
printk(KERN_ERR
- "comedi%d: me4000: me4000_cnt_insn_write(): Invalid instruction length %d\n",
+ "comedi%d: me4000: me4000_cnt_insn_write(): "
+ "Invalid instruction length %d\n",
dev->minor, insn->n);
return -EINVAL;
}
@@ -2375,7 +2410,8 @@ static int me4000_cnt_insn_write(struct comedi_device *dev,
break;
default:
printk(KERN_ERR
- "comedi%d: me4000: me4000_cnt_insn_write(): Invalid channel %d\n",
+ "comedi%d: me4000: me4000_cnt_insn_write(): "
+ "Invalid channel %d\n",
dev->minor, insn->chanspec);
return -EINVAL;
}
@@ -2383,4 +2419,44 @@ static int me4000_cnt_insn_write(struct comedi_device *dev,
return 1;
}
-COMEDI_PCI_INITCLEANUP(driver_me4000, me4000_pci_table);
+static int __devinit driver_me4000_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_me4000.driver_name);
+}
+
+static void __devexit driver_me4000_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_me4000_pci_driver = {
+ .id_table = me4000_pci_table,
+ .probe = &driver_me4000_pci_probe,
+ .remove = __devexit_p(&driver_me4000_pci_remove)
+};
+
+static int __init driver_me4000_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_me4000);
+ if (retval < 0)
+ return retval;
+
+ driver_me4000_pci_driver.name = (char *)driver_me4000.driver_name;
+ return pci_register_driver(&driver_me4000_pci_driver);
+}
+
+static void __exit driver_me4000_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_me4000_pci_driver);
+ comedi_driver_unregister(&driver_me4000);
+}
+
+module_init(driver_me4000_init_module);
+module_exit(driver_me4000_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index c8484aec657..cda4b224b30 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -257,7 +257,43 @@ static struct comedi_driver me_driver = {
.detach = me_detach,
};
-COMEDI_PCI_INITCLEANUP(me_driver, me_pci_table);
+static int __devinit me_driver_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, me_driver.driver_name);
+}
+
+static void __devexit me_driver_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver me_driver_pci_driver = {
+ .id_table = me_pci_table,
+ .probe = &me_driver_pci_probe,
+ .remove = __devexit_p(&me_driver_pci_remove)
+};
+
+static int __init me_driver_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&me_driver);
+ if (retval < 0)
+ return retval;
+
+ me_driver_pci_driver.name = (char *)me_driver.driver_name;
+ return pci_register_driver(&me_driver_pci_driver);
+}
+
+static void __exit me_driver_cleanup_module(void)
+{
+ pci_unregister_driver(&me_driver_pci_driver);
+ comedi_driver_unregister(&me_driver);
+}
+
+module_init(me_driver_init_module);
+module_exit(me_driver_cleanup_module);
/* Private data structure */
struct me_private_data {
@@ -644,7 +680,7 @@ static int me_reset(struct comedi_device *dev)
*/
static int me_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
- struct pci_dev *pci_device;
+ struct pci_dev *pci_device = NULL;
struct comedi_subdevice *subdevice;
struct me_board *board;
resource_size_t plx_regbase_tmp;
@@ -661,9 +697,7 @@ static int me_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -ENOMEM;
/* Probe the device to determine what device in the series it is. */
- for (pci_device = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pci_device != NULL;
- pci_device = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_device)) {
+ for_each_pci_dev(pci_device) {
if (pci_device->vendor == PCI_VENDOR_ID_MEILHAUS) {
for (i = 0; i < me_board_nbr; i++) {
if (me_boards[i].device_id ==
@@ -857,3 +891,7 @@ static int me_detach(struct comedi_device *dev)
}
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 99d9985c5b3..cd25b241cc1 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -70,12 +70,10 @@ EXPORT_SYMBOL(mite_devices);
void mite_init(void)
{
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev = NULL;
struct mite_struct *mite;
- for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
- pcidev != NULL;
- pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
+ for_each_pci_dev(pcidev) {
if (pcidev->vendor == PCI_VENDOR_ID_NI) {
unsigned i;
@@ -829,3 +827,7 @@ void __exit cleanup_module(void)
mite_cleanup();
}
#endif
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/mpc624.c b/drivers/staging/comedi/drivers/mpc624.c
index 9874ac3749c..a89eebd23f6 100644
--- a/drivers/staging/comedi/drivers/mpc624.c
+++ b/drivers/staging/comedi/drivers/mpc624.c
@@ -406,4 +406,19 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
return n;
}
-COMEDI_INITCLEANUP(driver_mpc624);
+static int __init driver_mpc624_init_module(void)
+{
+ return comedi_driver_register(&driver_mpc624);
+}
+
+static void __exit driver_mpc624_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_mpc624);
+}
+
+module_init(driver_mpc624_init_module);
+module_exit(driver_mpc624_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/mpc8260cpm.c b/drivers/staging/comedi/drivers/mpc8260cpm.c
index 440a144a037..5f6816a3fe8 100644
--- a/drivers/staging/comedi/drivers/mpc8260cpm.c
+++ b/drivers/staging/comedi/drivers/mpc8260cpm.c
@@ -56,7 +56,18 @@ static struct comedi_driver driver_mpc8260cpm = {
.detach = mpc8260cpm_detach,
};
-COMEDI_INITCLEANUP(driver_mpc8260cpm);
+static int __init driver_mpc8260cpm_init_module(void)
+{
+ return comedi_driver_register(&driver_mpc8260cpm);
+}
+
+static void __exit driver_mpc8260cpm_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_mpc8260cpm);
+}
+
+module_init(driver_mpc8260cpm_init_module);
+module_exit(driver_mpc8260cpm_cleanup_module);
static int mpc8260cpm_dio_config(struct comedi_device *dev,
struct comedi_subdevice *s,
diff --git a/drivers/staging/comedi/drivers/multiq3.c b/drivers/staging/comedi/drivers/multiq3.c
index 6b22f0f8f06..dace902d3bc 100644
--- a/drivers/staging/comedi/drivers/multiq3.c
+++ b/drivers/staging/comedi/drivers/multiq3.c
@@ -93,7 +93,18 @@ static struct comedi_driver driver_multiq3 = {
.detach = multiq3_detach,
};
-COMEDI_INITCLEANUP(driver_multiq3);
+static int __init driver_multiq3_init_module(void)
+{
+ return comedi_driver_register(&driver_multiq3);
+}
+
+static void __exit driver_multiq3_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_multiq3);
+}
+
+module_init(driver_multiq3_init_module);
+module_exit(driver_multiq3_cleanup_module);
struct multiq3_private {
unsigned int ao_readback[2];
@@ -338,3 +349,7 @@ static int multiq3_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 1fc76cc6a28..14e716e99a5 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -490,4 +490,40 @@ static int ni6527_find_device(struct comedi_device *dev, int bus, int slot)
return -EIO;
}
-COMEDI_PCI_INITCLEANUP(driver_ni6527, ni6527_pci_table);
+static int __devinit driver_ni6527_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_ni6527.driver_name);
+}
+
+static void __devexit driver_ni6527_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_ni6527_pci_driver = {
+ .id_table = ni6527_pci_table,
+ .probe = &driver_ni6527_pci_probe,
+ .remove = __devexit_p(&driver_ni6527_pci_remove)
+};
+
+static int __init driver_ni6527_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_ni6527);
+ if (retval < 0)
+ return retval;
+
+ driver_ni6527_pci_driver.name = (char *)driver_ni6527.driver_name;
+ return pci_register_driver(&driver_ni6527_pci_driver);
+}
+
+static void __exit driver_ni6527_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_ni6527_pci_driver);
+ comedi_driver_unregister(&driver_ni6527);
+}
+
+module_init(driver_ni6527_init_module);
+module_exit(driver_ni6527_cleanup_module);
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index d793f5a4ac9..8b8e2aaf77f 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -834,4 +834,40 @@ static int ni_65xx_find_device(struct comedi_device *dev, int bus, int slot)
return -EIO;
}
-COMEDI_PCI_INITCLEANUP(driver_ni_65xx, ni_65xx_pci_table);
+static int __devinit driver_ni_65xx_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_ni_65xx.driver_name);
+}
+
+static void __devexit driver_ni_65xx_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_ni_65xx_pci_driver = {
+ .id_table = ni_65xx_pci_table,
+ .probe = &driver_ni_65xx_pci_probe,
+ .remove = __devexit_p(&driver_ni_65xx_pci_remove)
+};
+
+static int __init driver_ni_65xx_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_ni_65xx);
+ if (retval < 0)
+ return retval;
+
+ driver_ni_65xx_pci_driver.name = (char *)driver_ni_65xx.driver_name;
+ return pci_register_driver(&driver_ni_65xx_pci_driver);
+}
+
+static void __exit driver_ni_65xx_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_ni_65xx_pci_driver);
+ comedi_driver_unregister(&driver_ni_65xx);
+}
+
+module_init(driver_ni_65xx_init_module);
+module_exit(driver_ni_65xx_cleanup_module);
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 6a6fae53ea0..6612b085c4e 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -382,7 +382,7 @@ enum global_interrupt_config_register_bits {
Global_Int_Enable_Bit = 0x80000000
};
-/* Offset of the GPCT chips from the base-adress of the card */
+/* Offset of the GPCT chips from the base-address of the card */
/* First chip is at base-address + 0x00, etc. */
static const unsigned GPCT_OFFSET[2] = { 0x0, 0x800 };
@@ -471,7 +471,43 @@ static struct comedi_driver driver_ni_660x = {
.detach = ni_660x_detach,
};
-COMEDI_PCI_INITCLEANUP(driver_ni_660x, ni_660x_pci_table);
+static int __devinit driver_ni_660x_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_ni_660x.driver_name);
+}
+
+static void __devexit driver_ni_660x_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_ni_660x_pci_driver = {
+ .id_table = ni_660x_pci_table,
+ .probe = &driver_ni_660x_pci_probe,
+ .remove = __devexit_p(&driver_ni_660x_pci_remove)
+};
+
+static int __init driver_ni_660x_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_ni_660x);
+ if (retval < 0)
+ return retval;
+
+ driver_ni_660x_pci_driver.name = (char *)driver_ni_660x.driver_name;
+ return pci_register_driver(&driver_ni_660x_pci_driver);
+}
+
+static void __exit driver_ni_660x_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_ni_660x_pci_driver);
+ comedi_driver_unregister(&driver_ni_660x);
+}
+
+module_init(driver_ni_660x_init_module);
+module_exit(driver_ni_660x_cleanup_module);
static int ni_660x_find_device(struct comedi_device *dev, int bus, int slot);
static int ni_660x_set_pfi_routing(struct comedi_device *dev, unsigned chan,
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index 44ae8368454..e9f034efdc6 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -120,7 +120,43 @@ static struct comedi_driver driver_ni_670x = {
.detach = ni_670x_detach,
};
-COMEDI_PCI_INITCLEANUP(driver_ni_670x, ni_670x_pci_table);
+static int __devinit driver_ni_670x_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_ni_670x.driver_name);
+}
+
+static void __devexit driver_ni_670x_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_ni_670x_pci_driver = {
+ .id_table = ni_670x_pci_table,
+ .probe = &driver_ni_670x_pci_probe,
+ .remove = __devexit_p(&driver_ni_670x_pci_remove)
+};
+
+static int __init driver_ni_670x_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_ni_670x);
+ if (retval < 0)
+ return retval;
+
+ driver_ni_670x_pci_driver.name = (char *)driver_ni_670x.driver_name;
+ return pci_register_driver(&driver_ni_670x_pci_driver);
+}
+
+static void __exit driver_ni_670x_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_ni_670x_pci_driver);
+ comedi_driver_unregister(&driver_ni_670x);
+}
+
+module_init(driver_ni_670x_init_module);
+module_exit(driver_ni_670x_cleanup_module);
static struct comedi_lrange range_0_20mA = { 1, {RANGE_mA(0, 20)} };
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index 9bff34cf06d..e46d62b75fc 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -197,7 +197,18 @@ static int a2150_set_chanlist(struct comedi_device *dev,
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_INITCLEANUP(driver_a2150);
+static int __init driver_a2150_init_module(void)
+{
+ return comedi_driver_register(&driver_a2150);
+}
+
+static void __exit driver_a2150_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_a2150);
+}
+
+module_init(driver_a2150_init_module);
+module_exit(driver_a2150_cleanup_module);
#ifdef A2150_DEBUG
@@ -910,3 +921,7 @@ static int a2150_set_chanlist(struct comedi_device *dev,
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_at_ao.c b/drivers/staging/comedi/drivers/ni_at_ao.c
index ce60224bb7b..138dcc2275a 100644
--- a/drivers/staging/comedi/drivers/ni_at_ao.c
+++ b/drivers/staging/comedi/drivers/ni_at_ao.c
@@ -194,7 +194,18 @@ static struct comedi_driver driver_atao = {
.num_names = ARRAY_SIZE(atao_boards),
};
-COMEDI_INITCLEANUP(driver_atao);
+static int __init driver_atao_init_module(void)
+{
+ return comedi_driver_register(&driver_atao);
+}
+
+static void __exit driver_atao_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_atao);
+}
+
+module_init(driver_atao_init_module);
+module_exit(driver_atao_cleanup_module);
static void atao_reset(struct comedi_device *dev);
@@ -459,3 +470,7 @@ static int atao_calib_insn_write(struct comedi_device *dev,
return insn->n;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c
index 003d00b595b..3330b3d53e8 100644
--- a/drivers/staging/comedi/drivers/ni_atmio.c
+++ b/drivers/staging/comedi/drivers/ni_atmio.c
@@ -349,7 +349,18 @@ static struct comedi_driver driver_atmio = {
.detach = ni_atmio_detach,
};
-COMEDI_INITCLEANUP(driver_atmio);
+static int __init driver_atmio_init_module(void)
+{
+ return comedi_driver_register(&driver_atmio);
+}
+
+static void __exit driver_atmio_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_atmio);
+}
+
+module_init(driver_atmio_init_module);
+module_exit(driver_atmio_cleanup_module);
#include "ni_mio_common.c"
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
index cf4f241f210..285b933551a 100644
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ b/drivers/staging/comedi/drivers/ni_atmio16d.c
@@ -151,7 +151,18 @@ static struct comedi_driver driver_atmio16d = {
.offset = sizeof(struct atmio16_board_t),
};
-COMEDI_INITCLEANUP(driver_atmio16d);
+static int __init driver_atmio16d_init_module(void)
+{
+ return comedi_driver_register(&driver_atmio16d);
+}
+
+static void __exit driver_atmio16d_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_atmio16d);
+}
+
+module_init(driver_atmio16d_init_module);
+module_exit(driver_atmio16d_cleanup_module);
/* range structs */
static const struct comedi_lrange range_atmio16d_ai_10_bipolar = { 4, {
@@ -887,3 +898,7 @@ static int atmio16d_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 6ec77bf88c6..cc15666e5cc 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -47,7 +47,6 @@ IRQ is assigned but not used.
#include <linux/ioport.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -377,7 +376,7 @@ static int dio700_attach(struct comedi_device *dev, struct comedi_devconfig *it)
link = pcmcia_cur_dev; /* XXX hack */
if (!link)
return -EIO;
- iobase = link->io.BasePort1;
+ iobase = link->resource[0]->start;
#ifdef incomplete
irq = link->irq;
#endif
@@ -459,14 +458,6 @@ static void dio700_cs_detach(struct pcmcia_device *);
less on other parts of the kernel.
*/
-/*
- The dev_info variable is the "key" that is used to match up this
- device driver with appropriate cards, through the card configuration
- database.
-*/
-
-static const dev_info_t dev_info = "ni_daq_700";
-
struct local_info_t {
struct pcmcia_device *link;
int stop;
@@ -537,8 +528,7 @@ static void dio700_cs_detach(struct pcmcia_device *link)
dio700_release(link);
/* This points to the parent struct local_info_t struct */
- if (link->priv)
- kfree(link->priv);
+ kfree(link->priv);
} /* dio700_cs_detach */
@@ -556,9 +546,6 @@ static int dio700_pcmcia_config_loop(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- win_req_t *req = priv_data;
- memreq_t map;
-
if (cfg->index == 0)
return -ENODEV;
@@ -572,44 +559,25 @@ static int dio700_pcmcia_config_loop(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
+ if (pcmcia_request_io(p_dev) != 0)
return -ENODEV;
}
- if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
- cistpl_mem_t *mem =
- (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
- req->Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
- req->Attributes |= WIN_ENABLE;
- req->Base = mem->win[0].host_addr;
- req->Size = mem->win[0].len;
- if (req->Size < 0x1000)
- req->Size = 0x1000;
- req->AccessSpeed = 0;
- if (pcmcia_request_window(p_dev, req, &p_dev->win))
- return -ENODEV;
- map.Page = 0;
- map.CardOffset = mem->win[0].card_addr;
- if (pcmcia_map_mem_page(p_dev, p_dev->win, &map))
- return -ENODEV;
- }
/* If we got this far, we're cool! */
return 0;
}
@@ -623,7 +591,7 @@ static void dio700_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "dio700_config\n");
- ret = pcmcia_loop_config(link, dio700_pcmcia_config_loop, &req);
+ ret = pcmcia_loop_config(link, dio700_pcmcia_config_loop, NULL);
if (ret) {
dev_warn(&link->dev, "no configuration found\n");
goto failed;
@@ -645,15 +613,10 @@ static void dio700_config(struct pcmcia_device *link)
dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1 + link->io.NumPorts1 - 1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2 + link->io.NumPorts2 - 1);
- if (link->win)
- printk(", mem 0x%06lx-0x%06lx", req.Base,
- req.Base + req.Size - 1);
+ if (link->resource[0])
+ printk(", io %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(" & %pR", link->resource[1]);
printk("\n");
return;
@@ -723,7 +686,7 @@ struct pcmcia_driver dio700_cs_driver = {
.id_table = dio700_cs_ids,
.owner = THIS_MODULE,
.drv = {
- .name = dev_info,
+ .name = "ni_daq_700",
},
};
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index e4865b1c231..773ae2044e0 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -48,7 +48,6 @@ the PCMCIA interface.
#include "8255.h"
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -129,7 +128,7 @@ static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it)
link = pcmcia_cur_dev; /* XXX hack */
if (!link)
return -EIO;
- iobase = link->io.BasePort1;
+ iobase = link->resource[0]->start;
#ifdef incomplete
irq = link->irq;
#endif
@@ -211,14 +210,6 @@ static void dio24_cs_detach(struct pcmcia_device *);
less on other parts of the kernel.
*/
-/*
- The dev_info variable is the "key" that is used to match up this
- device driver with appropriate cards, through the card configuration
- database.
-*/
-
-static const dev_info_t dev_info = "ni_daq_dio24";
-
struct local_info_t {
struct pcmcia_device *link;
int stop;
@@ -289,8 +280,7 @@ static void dio24_cs_detach(struct pcmcia_device *link)
dio24_release(link);
/* This points to the parent local_info_t struct */
- if (link->priv)
- kfree(link->priv);
+ kfree(link->priv);
} /* dio24_cs_detach */
@@ -308,9 +298,6 @@ static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- win_req_t *req = priv_data;
- memreq_t map;
-
if (cfg->index == 0)
return -ENODEV;
@@ -324,44 +311,25 @@ static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
+ if (pcmcia_request_io(p_dev) != 0)
return -ENODEV;
}
- if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
- cistpl_mem_t *mem =
- (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
- req->Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
- req->Attributes |= WIN_ENABLE;
- req->Base = mem->win[0].host_addr;
- req->Size = mem->win[0].len;
- if (req->Size < 0x1000)
- req->Size = 0x1000;
- req->AccessSpeed = 0;
- if (pcmcia_request_window(p_dev, req, &p_dev->win))
- return -ENODEV;
- map.Page = 0;
- map.CardOffset = mem->win[0].card_addr;
- if (pcmcia_map_mem_page(p_dev, p_dev->win, &map))
- return -ENODEV;
- }
/* If we got this far, we're cool! */
return 0;
}
@@ -369,13 +337,12 @@ static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void dio24_config(struct pcmcia_device *link)
{
int ret;
- win_req_t req;
printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO! - config\n");
dev_dbg(&link->dev, "dio24_config\n");
- ret = pcmcia_loop_config(link, dio24_pcmcia_config_loop, &req);
+ ret = pcmcia_loop_config(link, dio24_pcmcia_config_loop, NULL);
if (ret) {
dev_warn(&link->dev, "no configuration found\n");
goto failed;
@@ -397,15 +364,10 @@ static void dio24_config(struct pcmcia_device *link)
dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1 + link->io.NumPorts1 - 1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2 + link->io.NumPorts2 - 1);
- if (link->win)
- printk(", mem 0x%06lx-0x%06lx", req.Base,
- req.Base + req.Size - 1);
+ if (link->resource[0])
+ printk(" & %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(" & %pR", link->resource[1]);
printk("\n");
return;
@@ -474,7 +436,7 @@ struct pcmcia_driver dio24_cs_driver = {
.id_table = dio24_cs_ids,
.owner = THIS_MODULE,
.drv = {
- .name = dev_info,
+ .name = "ni_daq_dio24",
},
};
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 67c8a538802..3acf7e62bec 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -526,7 +526,8 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
unsigned long dma_flags, isr_flags;
short lsb, msb;
- printk("comedi%d: ni_labpc: %s, io 0x%lx", dev->minor, thisboard->name,
+ printk(KERN_ERR "comedi%d: ni_labpc: %s, io 0x%lx", dev->minor,
+ thisboard->name,
iobase);
if (irq)
printk(", irq %u", irq);
@@ -543,7 +544,7 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
/* check if io addresses are available */
if (!request_region(iobase, LABPC_SIZE,
driver_labpc.driver_name)) {
- printk("I/O port conflict\n");
+ printk(KERN_ERR "I/O port conflict\n");
return -EIO;
}
}
@@ -575,7 +576,7 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
isr_flags |= IRQF_SHARED;
if (request_irq(irq, labpc_interrupt, isr_flags,
driver_labpc.driver_name, dev)) {
- printk("unable to allocate irq %u\n", irq);
+ printk(KERN_ERR "unable to allocate irq %u\n", irq);
return -EINVAL;
}
}
@@ -583,18 +584,18 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
/* grab dma channel */
if (dma_chan > 3) {
- printk(" invalid dma channel %u\n", dma_chan);
+ printk(KERN_ERR " invalid dma channel %u\n", dma_chan);
return -EINVAL;
} else if (dma_chan) {
/* allocate dma buffer */
devpriv->dma_buffer =
kmalloc(dma_buffer_size, GFP_KERNEL | GFP_DMA);
if (devpriv->dma_buffer == NULL) {
- printk(" failed to allocate dma buffer\n");
+ printk(KERN_ERR " failed to allocate dma buffer\n");
return -ENOMEM;
}
if (request_dma(dma_chan, driver_labpc.driver_name)) {
- printk(" failed to allocate dma channel %u\n",
+ printk(KERN_ERR " failed to allocate dma channel %u\n",
dma_chan);
return -EINVAL;
}
@@ -690,7 +691,7 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
for (i = 0; i < EEPROM_SIZE; i++)
devpriv->eeprom_data[i] = labpc_eeprom_read(dev, i);
#ifdef LABPC_DEBUG
- printk(" eeprom:");
+ printk(KERN_ERR " eeprom:");
for (i = 0; i < EEPROM_SIZE; i++)
printk(" %i:0x%x ", i, devpriv->eeprom_data[i]);
printk("\n");
@@ -732,7 +733,8 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
iobase = (unsigned long)devpriv->mite->daq_io_addr;
irq = mite_irq(devpriv->mite);
#else
- printk(" this driver has not been built with PCI support.\n");
+ printk(KERN_ERR " this driver has not been built with PCI "
+ "support.\n");
return -EINVAL;
#endif
break;
@@ -742,7 +744,7 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -EINVAL;
break;
default:
- printk("bug! couldn't determine board type\n");
+ printk(KERN_ERR "bug! couldn't determine board type\n");
return -EINVAL;
break;
}
@@ -776,7 +778,7 @@ static int labpc_find_device(struct comedi_device *dev, int bus, int slot)
}
}
}
- printk("no device found\n");
+ printk(KERN_ERR "no device found\n");
mite_list_devices();
return -EIO;
}
@@ -784,7 +786,7 @@ static int labpc_find_device(struct comedi_device *dev, int bus, int slot)
int labpc_common_detach(struct comedi_device *dev)
{
- printk("comedi%d: ni_labpc: detach\n", dev->minor);
+ printk(KERN_ERR "comedi%d: ni_labpc: detach\n", dev->minor);
if (dev->subdevices)
subdev_8255_cleanup(dev, dev->subdevices + 2);
@@ -846,7 +848,7 @@ static enum scan_mode labpc_ai_scan_mode(const struct comedi_cmd *cmd)
if (CR_CHAN(cmd->chanlist[0]) > CR_CHAN(cmd->chanlist[1]))
return MODE_MULT_CHAN_DOWN;
- printk("ni_labpc: bug! this should never happen\n");
+ printk(KERN_ERR "ni_labpc: bug! this should never happen\n");
return 0;
}
@@ -902,7 +904,7 @@ static int labpc_ai_chanlist_invalid(const struct comedi_device *dev,
}
break;
default:
- printk("ni_labpc: bug! in chanlist check\n");
+ printk(KERN_ERR "ni_labpc: bug! in chanlist check\n");
return 1;
break;
}
@@ -1096,7 +1098,10 @@ static int labpc_ai_cmdtest(struct comedi_device *dev,
err++;
}
break;
- /* TRIG_EXT doesn't care since it doesn't trigger off a numbered channel */
+ /*
+ * TRIG_EXT doesn't care since it doesn't
+ * trigger off a numbered channel
+ */
default:
break;
}
@@ -1154,25 +1159,35 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* setup hardware conversion counter */
if (cmd->stop_src == TRIG_EXT) {
- /* load counter a1 with count of 3 (pc+ manual says this is minimum allowed) using mode 0 */
+ /*
+ * load counter a1 with count of 3
+ * (pc+ manual says this is minimum allowed) using mode 0
+ */
ret = labpc_counter_load(dev, dev->iobase + COUNTER_A_BASE_REG,
1, 3, 0);
if (ret < 0) {
comedi_error(dev, "error loading counter a1");
return -1;
}
- } else /* otherwise, just put a1 in mode 0 with no count to set its output low */
+ } else /*
+ * otherwise, just put a1 in mode 0
+ * with no count to set its output low
+ */
devpriv->write_byte(INIT_A1_BITS,
dev->iobase + COUNTER_A_CONTROL_REG);
/* figure out what method we will use to transfer data */
if (devpriv->dma_chan && /* need a dma channel allocated */
- /* dma unsafe at RT priority, and too much setup time for TRIG_WAKE_EOS for */
+ /*
+ * dma unsafe at RT priority,
+ * and too much setup time for TRIG_WAKE_EOS for
+ */
(cmd->flags & (TRIG_WAKE_EOS | TRIG_RT)) == 0 &&
/* only available on the isa boards */
thisboard->bustype == isa_bustype) {
xfer = isa_dma_transfer;
- } else if (thisboard->register_layout == labpc_1200_layout && /* pc-plus has no fifo-half full interrupt */
+ /* pc-plus has no fifo-half full interrupt */
+ } else if (thisboard->register_layout == labpc_1200_layout &&
/* wake-end-of-scan should interrupt on fifo not empty */
(cmd->flags & TRIG_WAKE_EOS) == 0 &&
/* make sure we are taking more than just a few points */
@@ -1619,7 +1634,10 @@ static int labpc_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
devpriv->command4_bits |= ADC_DIFF_BIT;
devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG);
- /* initialize pacer counter output to make sure it doesn't cause any problems */
+ /*
+ * initialize pacer counter output to make sure it doesn't
+ * cause any problems
+ */
devpriv->write_byte(INIT_A0_BITS, dev->iobase + COUNTER_A_CONTROL_REG);
labpc_clear_adc_fifo(dev);
@@ -1844,7 +1862,10 @@ static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd)
unsigned int scan_period;
scan_period = labpc_ai_scan_period(cmd);
- /* calculate cascaded counter values that give desired scan timing */
+ /*
+ * calculate cascaded counter values
+ * that give desired scan timing
+ */
i8253_cascade_ns_to_timer_2div(LABPC_TIMER_BASE,
&(devpriv->divisor_b1),
&(devpriv->divisor_b0),
@@ -1855,7 +1876,10 @@ static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd)
unsigned int convert_period;
convert_period = labpc_ai_convert_period(cmd);
- /* calculate cascaded counter values that give desired conversion timing */
+ /*
+ * calculate cascaded counter values
+ * that give desired conversion timing
+ */
i8253_cascade_ns_to_timer_2div(LABPC_TIMER_BASE,
&(devpriv->divisor_a0),
&(devpriv->divisor_b0),
@@ -2076,9 +2100,56 @@ static void write_caldac(struct comedi_device *dev, unsigned int channel,
}
#ifdef CONFIG_COMEDI_PCI
-COMEDI_PCI_INITCLEANUP(driver_labpc, labpc_pci_table);
+static int __devinit driver_labpc_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_labpc.driver_name);
+}
+
+static void __devexit driver_labpc_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_labpc_pci_driver = {
+ .id_table = labpc_pci_table,
+ .probe = &driver_labpc_pci_probe,
+ .remove = __devexit_p(&driver_labpc_pci_remove)
+};
+
+static int __init driver_labpc_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_labpc);
+ if (retval < 0)
+ return retval;
+
+ driver_labpc_pci_driver.name = (char *)driver_labpc.driver_name;
+ return pci_register_driver(&driver_labpc_pci_driver);
+}
+
+static void __exit driver_labpc_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_labpc_pci_driver);
+ comedi_driver_unregister(&driver_labpc);
+}
+
+module_init(driver_labpc_init_module);
+module_exit(driver_labpc_cleanup_module);
#else
-COMEDI_INITCLEANUP(driver_labpc);
+static int __init driver_labpc_init_module(void)
+{
+ return comedi_driver_register(&driver_labpc);
+}
+
+static void __exit driver_labpc_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_labpc);
+}
+
+module_init(driver_labpc_init_module);
+module_exit(driver_labpc_cleanup_module);
#endif
EXPORT_SYMBOL_GPL(labpc_common_attach);
@@ -2086,3 +2157,7 @@ EXPORT_SYMBOL_GPL(labpc_common_detach);
EXPORT_SYMBOL_GPL(range_labpc_1200_ai);
EXPORT_SYMBOL_GPL(labpc_1200_ai_gain_bits);
EXPORT_SYMBOL_GPL(labpc_1200_is_unipolar);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index 163245ebb31..68c4ecbd93a 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -71,7 +71,6 @@ NI manuals:
#include "comedi_fc.h"
#include "ni_labpc.h"
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -143,7 +142,7 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
link = pcmcia_cur_dev; /* XXX hack */
if (!link)
return -EIO;
- iobase = link->io.BasePort1;
+ iobase = link->resource[0]->start;
irq = link->irq;
break;
default:
@@ -189,14 +188,6 @@ static void labpc_cs_detach(struct pcmcia_device *);
less on other parts of the kernel.
*/
-/*
- The dev_info variable is the "key" that is used to match up this
- device driver with appropriate cards, through the card configuration
- database.
-*/
-
-static const dev_info_t dev_info = "daqcard-1200";
-
struct local_info_t {
struct pcmcia_device *link;
int stop;
@@ -286,9 +277,6 @@ static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- win_req_t *req = priv_data;
- memreq_t map;
-
if (cfg->index == 0)
return -ENODEV;
@@ -302,44 +290,25 @@ static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ | CONF_ENABLE_PULSE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
+ if (pcmcia_request_io(p_dev) != 0)
return -ENODEV;
}
- if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
- cistpl_mem_t *mem =
- (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
- req->Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
- req->Attributes |= WIN_ENABLE;
- req->Base = mem->win[0].host_addr;
- req->Size = mem->win[0].len;
- if (req->Size < 0x1000)
- req->Size = 0x1000;
- req->AccessSpeed = 0;
- if (pcmcia_request_window(p_dev, req, &p_dev->win))
- return -ENODEV;
- map.Page = 0;
- map.CardOffset = mem->win[0].card_addr;
- if (pcmcia_map_mem_page(p_dev, p_dev->win, &map))
- return -ENODEV;
- }
/* If we got this far, we're cool! */
return 0;
}
@@ -348,11 +317,10 @@ static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void labpc_config(struct pcmcia_device *link)
{
int ret;
- win_req_t req;
dev_dbg(&link->dev, "labpc_config\n");
- ret = pcmcia_loop_config(link, labpc_pcmcia_config_loop, &req);
+ ret = pcmcia_loop_config(link, labpc_pcmcia_config_loop, NULL);
if (ret) {
dev_warn(&link->dev, "no configuration found\n");
goto failed;
@@ -374,15 +342,10 @@ static void labpc_config(struct pcmcia_device *link)
dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1 + link->io.NumPorts1 - 1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2 + link->io.NumPorts2 - 1);
- if (link->win)
- printk(", mem 0x%06lx-0x%06lx", req.Base,
- req.Base + req.Size - 1);
+ if (link->resource[0])
+ printk(" & %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(" & %pR", link->resource[1]);
printk("\n");
return;
@@ -449,7 +412,7 @@ struct pcmcia_driver labpc_cs_driver = {
.id_table = labpc_cs_ids,
.owner = THIS_MODULE,
.drv = {
- .name = dev_info,
+ .name = "daqcard-1200",
},
};
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index 3a46f0c0bff..1f2426352eb 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -48,7 +48,6 @@ See the notes in the ni_atmio.o driver.
#include "ni_stc.h"
#include "8255.h"
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -261,12 +260,11 @@ static void cs_release(struct pcmcia_device *link);
static void cs_detach(struct pcmcia_device *);
static struct pcmcia_device *cur_dev = NULL;
-static const dev_info_t dev_info = "ni_mio_cs";
static int cs_attach(struct pcmcia_device *link)
{
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- link->io.NumPorts1 = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
+ link->resource[0]->end = 16;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -311,13 +309,12 @@ static int mio_pcmcia_config_loop(struct pcmcia_device *p_dev,
{
int base, ret;
- p_dev->io.NumPorts1 = cfg->io.win[0].len;
- p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK;
- p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = cfg->io.win[0].len;
+ p_dev->io_lines = cfg->io.flags & CISTPL_IO_LINES_MASK;
for (base = 0x000; base < 0x400; base += 0x20) {
- p_dev->io.BasePort1 = base;
- ret = pcmcia_request_io(p_dev, &p_dev->io);
+ p_dev->resource[0]->start = base;
+ ret = pcmcia_request_io(p_dev);
if (!ret)
return 0;
}
@@ -356,7 +353,7 @@ static int mio_cs_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -EIO;
dev->driver = &driver_ni_mio_cs;
- dev->iobase = link->io.BasePort1;
+ dev->iobase = link->resource[0]->start;
irq = link->irq;
@@ -450,7 +447,7 @@ struct pcmcia_driver ni_mio_cs_driver = {
.id_table = ni_mio_cs_ids,
.owner = THIS_MODULE,
.drv = {
- .name = dev_info,
+ .name = "ni_mio_cs",
},
};
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index b126638d33b..84a15c34e48 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -1317,4 +1317,40 @@ static int nidio_find_device(struct comedi_device *dev, int bus, int slot)
return -EIO;
}
-COMEDI_PCI_INITCLEANUP(driver_pcidio, ni_pcidio_pci_table);
+static int __devinit driver_pcidio_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_pcidio.driver_name);
+}
+
+static void __devexit driver_pcidio_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_pcidio_pci_driver = {
+ .id_table = ni_pcidio_pci_table,
+ .probe = &driver_pcidio_pci_probe,
+ .remove = __devexit_p(&driver_pcidio_pci_remove)
+};
+
+static int __init driver_pcidio_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_pcidio);
+ if (retval < 0)
+ return retval;
+
+ driver_pcidio_pci_driver.name = (char *)driver_pcidio.driver_name;
+ return pci_register_driver(&driver_pcidio_pci_driver);
+}
+
+static void __exit driver_pcidio_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_pcidio_pci_driver);
+ comedi_driver_unregister(&driver_pcidio);
+}
+
+module_init(driver_pcidio_init_module);
+module_exit(driver_pcidio_cleanup_module);
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 577fda84190..23a38124728 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1239,7 +1239,43 @@ static struct comedi_driver driver_pcimio = {
.detach = pcimio_detach,
};
-COMEDI_PCI_INITCLEANUP(driver_pcimio, ni_pci_table)
+static int __devinit driver_pcimio_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_pcimio.driver_name);
+}
+
+static void __devexit driver_pcimio_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_pcimio_pci_driver = {
+ .id_table = ni_pci_table,
+ .probe = &driver_pcimio_pci_probe,
+ .remove = __devexit_p(&driver_pcimio_pci_remove)
+};
+
+static int __init driver_pcimio_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_pcimio);
+ if (retval < 0)
+ return retval;
+
+ driver_pcimio_pci_driver.name = (char *)driver_pcimio.driver_name;
+ return pci_register_driver(&driver_pcimio_pci_driver);
+}
+
+static void __exit driver_pcimio_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_pcimio_pci_driver);
+ comedi_driver_unregister(&driver_pcimio);
+}
+
+module_init(driver_pcimio_init_module);
+module_exit(driver_pcimio_cleanup_module);
struct ni_private {
NI_PRIVATE_COMMON};
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 13e5b264ff0..a9bb6b13dfc 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -302,7 +302,7 @@ struct ni_gpct_device *ni_gpct_device_construct(struct comedi_device *dev,
ni_gpct_register
reg),
unsigned (*read_register)
- (struct ni_gpct * counter,
+ (struct ni_gpct *counter,
enum ni_gpct_register reg),
enum ni_gpct_variant variant,
unsigned num_counters)
@@ -332,6 +332,7 @@ struct ni_gpct_device *ni_gpct_device_construct(struct comedi_device *dev,
counter_dev->num_counters = num_counters;
return counter_dev;
}
+EXPORT_SYMBOL_GPL(ni_gpct_device_construct);
void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev)
{
@@ -340,6 +341,7 @@ void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev)
kfree(counter_dev->counters);
kfree(counter_dev);
}
+EXPORT_SYMBOL_GPL(ni_gpct_device_destroy);
static int ni_tio_second_gate_registers_present(const struct ni_gpct_device
*counter_dev)
@@ -418,6 +420,7 @@ void ni_tio_init_counter(struct ni_gpct *counter)
NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index),
~0, 0x0);
}
+EXPORT_SYMBOL_GPL(ni_tio_init_counter);
static unsigned int ni_tio_counter_status(struct ni_gpct *counter)
{
@@ -446,9 +449,7 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync)
if (ni_tio_counting_mode_registers_present(counter_dev) == 0)
return;
- switch (ni_tio_get_soft_copy(counter,
- counting_mode_reg) & Gi_Counting_Mode_Mask)
- {
+ switch (ni_tio_get_soft_copy(counter, counting_mode_reg) & Gi_Counting_Mode_Mask) {
case Gi_Counting_Mode_QuadratureX1_Bits:
case Gi_Counting_Mode_QuadratureX2_Bits:
case Gi_Counting_Mode_QuadratureX4_Bits:
@@ -513,9 +514,8 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
counting_mode_bits |=
((mode >> NI_GPCT_INDEX_PHASE_BITSHIFT) <<
Gi_Index_Phase_Bitshift) & Gi_Index_Phase_Mask;
- if (mode & NI_GPCT_INDEX_ENABLE_BIT) {
+ if (mode & NI_GPCT_INDEX_ENABLE_BIT)
counting_mode_bits |= Gi_Index_Mode_Bit;
- }
ni_tio_set_bits(counter,
NITIO_Gi_Counting_Mode_Reg(counter->
counter_index),
@@ -529,12 +529,10 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
(mode >> NI_GPCT_COUNTING_DIRECTION_SHIFT) <<
Gi_Up_Down_Shift);
- if (mode & NI_GPCT_OR_GATE_BIT) {
+ if (mode & NI_GPCT_OR_GATE_BIT)
input_select_bits |= Gi_Or_Gate_Bit;
- }
- if (mode & NI_GPCT_INVERT_OUTPUT_BIT) {
+ if (mode & NI_GPCT_INVERT_OUTPUT_BIT)
input_select_bits |= Gi_Output_Polarity_Bit;
- }
ni_tio_set_bits(counter,
NITIO_Gi_Input_Select_Reg(counter->counter_index),
Gi_Gate_Select_Load_Source_Bit | Gi_Or_Gate_Bit |
@@ -600,6 +598,7 @@ int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger)
0, 0, command_transient_bits);
return 0;
}
+EXPORT_SYMBOL_GPL(ni_tio_arm);
static unsigned ni_660x_source_select_bits(unsigned int clock_source)
{
@@ -706,7 +705,7 @@ static unsigned ni_m_series_source_select_bits(unsigned int clock_source)
}
if (i <= ni_m_series_max_pfi_channel)
break;
- printk("invalid clock source 0x%lx\n",
+ printk(KERN_ERR "invalid clock source 0x%lx\n",
(unsigned long)clock_source);
BUG();
ni_m_series_clock = 0;
@@ -1026,14 +1025,12 @@ static void ni_tio_set_first_gate_modifiers(struct ni_gpct *counter,
const unsigned mode_mask = Gi_Gate_Polarity_Bit | Gi_Gating_Mode_Mask;
unsigned mode_values = 0;
- if (gate_source & CR_INVERT) {
+ if (gate_source & CR_INVERT)
mode_values |= Gi_Gate_Polarity_Bit;
- }
- if (gate_source & CR_EDGE) {
+ if (gate_source & CR_EDGE)
mode_values |= Gi_Rising_Edge_Gating_Bits;
- } else {
+ else
mode_values |= Gi_Level_Gating_Bits;
- }
ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index),
mode_mask, mode_values);
}
@@ -1290,6 +1287,7 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
}
return 0;
}
+EXPORT_SYMBOL_GPL(ni_tio_set_gate_src);
static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index,
unsigned int source)
@@ -1531,12 +1529,10 @@ static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index,
BUG();
break;
}
- if (mode_bits & Gi_Gate_Polarity_Bit) {
+ if (mode_bits & Gi_Gate_Polarity_Bit)
*gate_source |= CR_INVERT;
- }
- if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits) {
+ if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits)
*gate_source |= CR_EDGE;
- }
break;
case 1:
if ((mode_bits & Gi_Gating_Mode_Mask) == Gi_Gating_Disabled_Bits
@@ -1572,9 +1568,8 @@ static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index,
*gate_source |= CR_INVERT;
}
/* second gate can't have edge/level mode set independently */
- if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits) {
+ if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits)
*gate_source |= CR_EDGE;
- }
break;
default:
return -EINVAL;
@@ -1627,6 +1622,7 @@ int ni_tio_insn_config(struct ni_gpct *counter,
}
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(ni_tio_insn_config);
int ni_tio_rinsn(struct ni_gpct *counter, struct comedi_insn *insn,
unsigned int *data)
@@ -1681,6 +1677,7 @@ int ni_tio_rinsn(struct ni_gpct *counter, struct comedi_insn *insn,
};
return 0;
}
+EXPORT_SYMBOL_GPL(ni_tio_rinsn);
static unsigned ni_tio_next_load_register(struct ni_gpct *counter)
{
@@ -1688,11 +1685,10 @@ static unsigned ni_tio_next_load_register(struct ni_gpct *counter)
NITIO_Gxx_Status_Reg(counter->
counter_index));
- if (bits & Gi_Next_Load_Source_Bit(counter->counter_index)) {
+ if (bits & Gi_Next_Load_Source_Bit(counter->counter_index))
return NITIO_Gi_LoadB_Reg(counter->counter_index);
- } else {
+ else
return NITIO_Gi_LoadA_Reg(counter->counter_index);
- }
}
int ni_tio_winsn(struct ni_gpct *counter, struct comedi_insn *insn,
@@ -1735,12 +1731,4 @@ int ni_tio_winsn(struct ni_gpct *counter, struct comedi_insn *insn,
}
return 0;
}
-
-EXPORT_SYMBOL_GPL(ni_tio_rinsn);
EXPORT_SYMBOL_GPL(ni_tio_winsn);
-EXPORT_SYMBOL_GPL(ni_tio_insn_config);
-EXPORT_SYMBOL_GPL(ni_tio_init_counter);
-EXPORT_SYMBOL_GPL(ni_tio_arm);
-EXPORT_SYMBOL_GPL(ni_tio_set_gate_src);
-EXPORT_SYMBOL_GPL(ni_gpct_device_construct);
-EXPORT_SYMBOL_GPL(ni_gpct_device_destroy);
diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
index a499f7070f7..b44386a6b63 100644
--- a/drivers/staging/comedi/drivers/pcl711.c
+++ b/drivers/staging/comedi/drivers/pcl711.c
@@ -171,7 +171,18 @@ static struct comedi_driver driver_pcl711 = {
.offset = sizeof(struct pcl711_board),
};
-COMEDI_INITCLEANUP(driver_pcl711);
+static int __init driver_pcl711_init_module(void)
+{
+ return comedi_driver_register(&driver_pcl711);
+}
+
+static void __exit driver_pcl711_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_pcl711);
+}
+
+module_init(driver_pcl711_init_module);
+module_exit(driver_pcl711_cleanup_module);
struct pcl711_private {
@@ -270,7 +281,7 @@ static int pcl711_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
goto ok;
udelay(1);
}
- printk("comedi%d: pcl711: A/D timeout\n", dev->minor);
+ printk(KERN_ERR "comedi%d: pcl711: A/D timeout\n", dev->minor);
return -ETIME;
ok:
@@ -505,7 +516,7 @@ static int pcl711_do_insn_bits(struct comedi_device *dev,
/* Free any resources that we have claimed */
static int pcl711_detach(struct comedi_device *dev)
{
- printk("comedi%d: pcl711: remove\n", dev->minor);
+ printk(KERN_INFO "comedi%d: pcl711: remove\n", dev->minor);
if (dev->irq)
free_irq(dev->irq, dev);
@@ -527,7 +538,7 @@ static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* claim our I/O space */
iobase = it->options[0];
- printk("comedi%d: pcl711: 0x%04lx ", dev->minor, iobase);
+ printk(KERN_INFO "comedi%d: pcl711: 0x%04lx ", dev->minor, iobase);
if (!request_region(iobase, PCL711_SIZE, "pcl711")) {
printk("I/O port conflict\n");
return -EIO;
@@ -542,15 +553,15 @@ static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* grab our IRQ */
irq = it->options[1];
if (irq > this_board->maxirq) {
- printk("irq out of range\n");
+ printk(KERN_ERR "irq out of range\n");
return -EINVAL;
}
if (irq) {
if (request_irq(irq, pcl711_interrupt, 0, "pcl711", dev)) {
- printk("unable to allocate irq %u\n", irq);
+ printk(KERN_ERR "unable to allocate irq %u\n", irq);
return -EINVAL;
} else {
- printk("( irq = %u )\n", irq);
+ printk(KERN_INFO "( irq = %u )\n", irq);
}
}
dev->irq = irq;
@@ -624,7 +635,11 @@ static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it)
outb(0, dev->iobase + PCL711_DA1_LO);
outb(0, dev->iobase + PCL711_DA1_HI);
- printk("\n");
+ printk(KERN_INFO "\n");
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl724.c b/drivers/staging/comedi/drivers/pcl724.c
index 0f103c32806..396a058bb67 100644
--- a/drivers/staging/comedi/drivers/pcl724.c
+++ b/drivers/staging/comedi/drivers/pcl724.c
@@ -93,7 +93,18 @@ static struct comedi_driver driver_pcl724 = {
.offset = sizeof(struct pcl724_board),
};
-COMEDI_INITCLEANUP(driver_pcl724);
+static int __init driver_pcl724_init_module(void)
+{
+ return comedi_driver_register(&driver_pcl724);
+}
+
+static void __exit driver_pcl724_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_pcl724);
+}
+
+module_init(driver_pcl724_init_module);
+module_exit(driver_pcl724_cleanup_module);
static int subdev_8255_cb(int dir, int port, int data, unsigned long arg)
{
@@ -221,3 +232,7 @@ static int pcl724_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl725.c b/drivers/staging/comedi/drivers/pcl725.c
index 60261f4ba5b..24b223ca439 100644
--- a/drivers/staging/comedi/drivers/pcl725.c
+++ b/drivers/staging/comedi/drivers/pcl725.c
@@ -30,7 +30,18 @@ static struct comedi_driver driver_pcl725 = {
.detach = pcl725_detach,
};
-COMEDI_INITCLEANUP(driver_pcl725);
+static int __init driver_pcl725_init_module(void)
+{
+ return comedi_driver_register(&driver_pcl725);
+}
+
+static void __exit driver_pcl725_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_pcl725);
+}
+
+module_init(driver_pcl725_init_module);
+module_exit(driver_pcl725_cleanup_module);
static int pcl725_do_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -110,3 +121,7 @@ static int pcl725_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl726.c b/drivers/staging/comedi/drivers/pcl726.c
index 6a1a9790a90..897cd808eeb 100644
--- a/drivers/staging/comedi/drivers/pcl726.c
+++ b/drivers/staging/comedi/drivers/pcl726.c
@@ -162,7 +162,18 @@ static struct comedi_driver driver_pcl726 = {
.offset = sizeof(struct pcl726_board),
};
-COMEDI_INITCLEANUP(driver_pcl726);
+static int __init driver_pcl726_init_module(void)
+{
+ return comedi_driver_register(&driver_pcl726);
+}
+
+static void __exit driver_pcl726_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_pcl726);
+}
+
+module_init(driver_pcl726_init_module);
+module_exit(driver_pcl726_cleanup_module);
struct pcl726_private {
@@ -381,3 +392,7 @@ static int pcl726_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl730.c b/drivers/staging/comedi/drivers/pcl730.c
index e5e7bed21de..c9682d614e0 100644
--- a/drivers/staging/comedi/drivers/pcl730.c
+++ b/drivers/staging/comedi/drivers/pcl730.c
@@ -55,7 +55,18 @@ static struct comedi_driver driver_pcl730 = {
.offset = sizeof(struct pcl730_board),
};
-COMEDI_INITCLEANUP(driver_pcl730);
+static int __init driver_pcl730_init_module(void)
+{
+ return comedi_driver_register(&driver_pcl730);
+}
+
+static void __exit driver_pcl730_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_pcl730);
+}
+
+module_init(driver_pcl730_init_module);
+module_exit(driver_pcl730_cleanup_module);
static int pcl730_do_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -166,3 +177,7 @@ static int pcl730_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
index 1ddc19c705a..c6dce4a1425 100644
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ b/drivers/staging/comedi/drivers/pcl812.c
@@ -15,97 +15,98 @@
* card: A-823PGH, A-823PGL, A-826PG
* driver: a823pgh, a823pgl, a826pg
*/
+
/*
-Driver: pcl812
-Description: Advantech PCL-812/PG, PCL-813/B,
- ADLink ACL-8112DG/HG/PG, ACL-8113, ACL-8216,
- ICP DAS A-821PGH/PGL/PGL-NDA, A-822PGH/PGL, A-823PGH/PGL, A-826PG,
- ICP DAS ISO-813
-Author: Michal Dobes <dobes@tesnet.cz>
-Devices: [Advantech] PCL-812 (pcl812), PCL-812PG (pcl812pg),
- PCL-813 (pcl813), PCL-813B (pcl813b), [ADLink] ACL-8112DG (acl8112dg),
- ACL-8112HG (acl8112hg), ACL-8113 (acl-8113), ACL-8216 (acl8216),
- [ICP] ISO-813 (iso813), A-821PGH (a821pgh), A-821PGL (a821pgl),
- A-821PGL-NDA (a821pclnda), A-822PGH (a822pgh), A-822PGL (a822pgl),
- A-823PGH (a823pgh), A-823PGL (a823pgl), A-826PG (a826pg)
-Updated: Mon, 06 Aug 2007 12:03:15 +0100
-Status: works (I hope. My board fire up under my hands
- and I cann't test all features.)
-
-This driver supports insn and cmd interfaces. Some boards support only insn
-becouse their hardware don't allow more (PCL-813/B, ACL-8113, ISO-813).
-Data transfer over DMA is supported only when you measure only one
-channel, this is too hardware limitation of these boards.
-
-Options for PCL-812:
- [0] - IO Base
- [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
- [2] - DMA (0=disable, 1, 3)
- [3] - 0=trigger source is internal 8253 with 2MHz clock
- 1=trigger source is external
- [4] - 0=A/D input range is +/-10V
- 1=A/D input range is +/-5V
- 2=A/D input range is +/-2.5V
- 3=A/D input range is +/-1.25V
- 4=A/D input range is +/-0.625V
- 5=A/D input range is +/-0.3125V
- [5] - 0=D/A outputs 0-5V (internal reference -5V)
- 1=D/A outputs 0-10V (internal reference -10V)
- 2=D/A outputs unknown (external reference)
-
-Options for PCL-812PG, ACL-8112PG:
- [0] - IO Base
- [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
- [2] - DMA (0=disable, 1, 3)
- [3] - 0=trigger source is internal 8253 with 2MHz clock
- 1=trigger source is external
- [4] - 0=A/D have max +/-5V input
- 1=A/D have max +/-10V input
- [5] - 0=D/A outputs 0-5V (internal reference -5V)
- 1=D/A outputs 0-10V (internal reference -10V)
- 2=D/A outputs unknown (external reference)
-
-Options for ACL-8112DG/HG, A-822PGL/PGH, A-823PGL/PGH, ACL-8216, A-826PG:
- [0] - IO Base
- [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
- [2] - DMA (0=disable, 1, 3)
- [3] - 0=trigger source is internal 8253 with 2MHz clock
- 1=trigger source is external
- [4] - 0=A/D channels are S.E.
- 1=A/D channels are DIFF
- [5] - 0=D/A outputs 0-5V (internal reference -5V)
- 1=D/A outputs 0-10V (internal reference -10V)
- 2=D/A outputs unknown (external reference)
-
-Options for A-821PGL/PGH:
- [0] - IO Base
- [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
- [2] - 0=A/D channels are S.E.
- 1=A/D channels are DIFF
- [3] - 0=D/A output 0-5V (internal reference -5V)
- 1=D/A output 0-10V (internal reference -10V)
-
-Options for A-821PGL-NDA:
- [0] - IO Base
- [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
- [2] - 0=A/D channels are S.E.
- 1=A/D channels are DIFF
-
-Options for PCL-813:
- [0] - IO Base
-
-Options for PCL-813B:
- [0] - IO Base
- [1] - 0= bipolar inputs
- 1= unipolar inputs
-
-Options for ACL-8113, ISO-813:
- [0] - IO Base
- [1] - 0= 10V bipolar inputs
- 1= 10V unipolar inputs
- 2= 20V bipolar inputs
- 3= 20V unipolar inputs
-*/
+ * Driver: pcl812
+ * Description: Advantech PCL-812/PG, PCL-813/B,
+ * ADLink ACL-8112DG/HG/PG, ACL-8113, ACL-8216,
+ * ICP DAS A-821PGH/PGL/PGL-NDA, A-822PGH/PGL, A-823PGH/PGL, A-826PG,
+ * ICP DAS ISO-813
+ * Author: Michal Dobes <dobes@tesnet.cz>
+ * Devices: [Advantech] PCL-812 (pcl812), PCL-812PG (pcl812pg),
+ * PCL-813 (pcl813), PCL-813B (pcl813b), [ADLink] ACL-8112DG (acl8112dg),
+ * ACL-8112HG (acl8112hg), ACL-8113 (acl-8113), ACL-8216 (acl8216),
+ * [ICP] ISO-813 (iso813), A-821PGH (a821pgh), A-821PGL (a821pgl),
+ * A-821PGL-NDA (a821pclnda), A-822PGH (a822pgh), A-822PGL (a822pgl),
+ * A-823PGH (a823pgh), A-823PGL (a823pgl), A-826PG (a826pg)
+ * Updated: Mon, 06 Aug 2007 12:03:15 +0100
+ * Status: works (I hope. My board fire up under my hands
+ * and I cann't test all features.)
+ *
+ * This driver supports insn and cmd interfaces. Some boards support only insn
+ * becouse their hardware don't allow more (PCL-813/B, ACL-8113, ISO-813).
+ * Data transfer over DMA is supported only when you measure only one
+ * channel, this is too hardware limitation of these boards.
+ *
+ * Options for PCL-812:
+ * [0] - IO Base
+ * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
+ * [2] - DMA (0=disable, 1, 3)
+ * [3] - 0=trigger source is internal 8253 with 2MHz clock
+ * 1=trigger source is external
+ * [4] - 0=A/D input range is +/-10V
+ * 1=A/D input range is +/-5V
+ * 2=A/D input range is +/-2.5V
+ * 3=A/D input range is +/-1.25V
+ * 4=A/D input range is +/-0.625V
+ * 5=A/D input range is +/-0.3125V
+ * [5] - 0=D/A outputs 0-5V (internal reference -5V)
+ * 1=D/A outputs 0-10V (internal reference -10V)
+ * 2=D/A outputs unknown (external reference)
+ *
+ * Options for PCL-812PG, ACL-8112PG:
+ * [0] - IO Base
+ * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
+ * [2] - DMA (0=disable, 1, 3)
+ * [3] - 0=trigger source is internal 8253 with 2MHz clock
+ * 1=trigger source is external
+ * [4] - 0=A/D have max +/-5V input
+ * 1=A/D have max +/-10V input
+ * [5] - 0=D/A outputs 0-5V (internal reference -5V)
+ * 1=D/A outputs 0-10V (internal reference -10V)
+ * 2=D/A outputs unknown (external reference)
+ *
+ * Options for ACL-8112DG/HG, A-822PGL/PGH, A-823PGL/PGH, ACL-8216, A-826PG:
+ * [0] - IO Base
+ * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
+ * [2] - DMA (0=disable, 1, 3)
+ * [3] - 0=trigger source is internal 8253 with 2MHz clock
+ * 1=trigger source is external
+ * [4] - 0=A/D channels are S.E.
+ * 1=A/D channels are DIFF
+ * [5] - 0=D/A outputs 0-5V (internal reference -5V)
+ * 1=D/A outputs 0-10V (internal reference -10V)
+ * 2=D/A outputs unknown (external reference)
+ *
+ * Options for A-821PGL/PGH:
+ * [0] - IO Base
+ * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
+ * [2] - 0=A/D channels are S.E.
+ * 1=A/D channels are DIFF
+ * [3] - 0=D/A output 0-5V (internal reference -5V)
+ * 1=D/A output 0-10V (internal reference -10V)
+ *
+ * Options for A-821PGL-NDA:
+ * [0] - IO Base
+ * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
+ * [2] - 0=A/D channels are S.E.
+ * 1=A/D channels are DIFF
+ *
+ * Options for PCL-813:
+ * [0] - IO Base
+ *
+ * Options for PCL-813B:
+ * [0] - IO Base
+ * [1] - 0= bipolar inputs
+ * 1= unipolar inputs
+ *
+ * Options for ACL-8113, ISO-813:
+ * [0] - IO Base
+ * [1] - 0= 10V bipolar inputs
+ * 1= 10V unipolar inputs
+ * 2= 20V bipolar inputs
+ * 3= 20V unipolar inputs
+ */
#include <linux/interrupt.h>
#include <linux/gfp.h>
@@ -117,49 +118,50 @@ Options for ACL-8113, ISO-813:
#include "8253.h"
-#undef PCL812_EXTDEBUG /* if this is defined then a lot of messages is printed */
+/* if this is defined then a lot of messages is printed */
+#undef PCL812_EXTDEBUG
/* hardware types of the cards */
-#define boardPCL812PG 0 /* and ACL-8112PG */
-#define boardPCL813B 1
-#define boardPCL812 2
-#define boardPCL813 3
-#define boardISO813 5
-#define boardACL8113 6
-#define boardACL8112 7 /* ACL-8112DG/HG, A-822PGL/PGH, A-823PGL/PGH */
-#define boardACL8216 8 /* and ICP DAS A-826PG */
-#define boardA821 9 /* PGH, PGL, PGL/NDA versions */
-
-#define PCLx1x_IORANGE 16
-
-#define PCL812_CTR0 0
-#define PCL812_CTR1 1
-#define PCL812_CTR2 2
-#define PCL812_CTRCTL 3
-#define PCL812_AD_LO 4
-#define PCL812_DA1_LO 4
-#define PCL812_AD_HI 5
-#define PCL812_DA1_HI 5
-#define PCL812_DA2_LO 6
-#define PCL812_DI_LO 6
-#define PCL812_DA2_HI 7
-#define PCL812_DI_HI 7
-#define PCL812_CLRINT 8
-#define PCL812_GAIN 9
-#define PCL812_MUX 10
-#define PCL812_MODE 11
-#define PCL812_CNTENABLE 10
-#define PCL812_SOFTTRIG 12
-#define PCL812_DO_LO 13
-#define PCL812_DO_HI 14
-
-#define PCL812_DRDY 0x10 /* =0 data ready */
-
-#define ACL8216_STATUS 8 /* 5. bit signalize data ready */
-
-#define ACL8216_DRDY 0x20 /* =0 data ready */
-
-#define MAX_CHANLIST_LEN 256 /* length of scan list */
+#define boardPCL812PG 0 /* and ACL-8112PG */
+#define boardPCL813B 1
+#define boardPCL812 2
+#define boardPCL813 3
+#define boardISO813 5
+#define boardACL8113 6
+#define boardACL8112 7 /* ACL-8112DG/HG, A-822PGL/PGH, A-823PGL/PGH */
+#define boardACL8216 8 /* and ICP DAS A-826PG */
+#define boardA821 9 /* PGH, PGL, PGL/NDA versions */
+
+#define PCLx1x_IORANGE 16
+
+#define PCL812_CTR0 0
+#define PCL812_CTR1 1
+#define PCL812_CTR2 2
+#define PCL812_CTRCTL 3
+#define PCL812_AD_LO 4
+#define PCL812_DA1_LO 4
+#define PCL812_AD_HI 5
+#define PCL812_DA1_HI 5
+#define PCL812_DA2_LO 6
+#define PCL812_DI_LO 6
+#define PCL812_DA2_HI 7
+#define PCL812_DI_HI 7
+#define PCL812_CLRINT 8
+#define PCL812_GAIN 9
+#define PCL812_MUX 10
+#define PCL812_MODE 11
+#define PCL812_CNTENABLE 10
+#define PCL812_SOFTTRIG 12
+#define PCL812_DO_LO 13
+#define PCL812_DO_HI 14
+
+#define PCL812_DRDY 0x10 /* =0 data ready */
+
+#define ACL8216_STATUS 8 /* 5. bit signalize data ready */
+
+#define ACL8216_DRDY 0x20 /* =0 data ready */
+
+#define MAX_CHANLIST_LEN 256 /* length of scan list */
static const struct comedi_lrange range_pcl812pg_ai = { 5, {
BIP_RANGE(5),
@@ -407,7 +409,18 @@ static struct comedi_driver driver_pcl812 = {
.offset = sizeof(struct pcl812_board),
};
-COMEDI_INITCLEANUP(driver_pcl812);
+static int __init driver_pcl812_init_module(void)
+{
+ return comedi_driver_register(&driver_pcl812);
+}
+
+static void __exit driver_pcl812_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_pcl812);
+}
+
+module_init(driver_pcl812_init_module);
+module_exit(driver_pcl812_cleanup_module);
struct pcl812_private {
@@ -466,10 +479,13 @@ static int pcl812_ai_insn_read(struct comedi_device *dev,
int n;
int timeout, hi;
- outb(devpriv->mode_reg_int | 1, dev->iobase + PCL812_MODE); /* select software trigger */
- setup_range_channel(dev, s, insn->chanspec, 1); /* select channel and renge */
+ /* select software trigger */
+ outb(devpriv->mode_reg_int | 1, dev->iobase + PCL812_MODE);
+ /* select channel and renge */
+ setup_range_channel(dev, s, insn->chanspec, 1);
for (n = 0; n < insn->n; n++) {
- outb(255, dev->iobase + PCL812_SOFTTRIG); /* start conversion */
+ /* start conversion */
+ outb(255, dev->iobase + PCL812_SOFTTRIG);
udelay(5);
timeout = 50; /* wait max 50us, it must finish under 33us */
while (timeout--) {
@@ -501,10 +517,13 @@ static int acl8216_ai_insn_read(struct comedi_device *dev,
int n;
int timeout;
- outb(1, dev->iobase + PCL812_MODE); /* select software trigger */
- setup_range_channel(dev, s, insn->chanspec, 1); /* select channel and renge */
+ /* select software trigger */
+ outb(1, dev->iobase + PCL812_MODE);
+ /* select channel and renge */
+ setup_range_channel(dev, s, insn->chanspec, 1);
for (n = 0; n < insn->n; n++) {
- outb(255, dev->iobase + PCL812_SOFTTRIG); /* start conversion */
+ /* start conversion */
+ outb(255, dev->iobase + PCL812_SOFTTRIG);
udelay(5);
timeout = 50; /* wait max 50us, it must finish under 33us */
while (timeout--) {
@@ -558,9 +577,8 @@ static int pcl812_ao_insn_read(struct comedi_device *dev,
int chan = CR_CHAN(insn->chanspec);
int i;
- for (i = 0; i < insn->n; i++) {
+ for (i = 0; i < insn->n; i++)
data[i] = devpriv->ao_readback[chan];
- }
return i;
}
@@ -608,14 +626,15 @@ static int pcl812_do_insn_bits(struct comedi_device *dev,
*/
static void pcl812_cmdtest_out(int e, struct comedi_cmd *cmd)
{
- printk("pcl812 e=%d startsrc=%x scansrc=%x convsrc=%x\n", e,
+ printk(KERN_INFO "pcl812 e=%d startsrc=%x scansrc=%x convsrc=%x\n", e,
cmd->start_src, cmd->scan_begin_src, cmd->convert_src);
- printk("pcl812 e=%d startarg=%d scanarg=%d convarg=%d\n", e,
+ printk(KERN_INFO "pcl812 e=%d startarg=%d scanarg=%d convarg=%d\n", e,
cmd->start_arg, cmd->scan_begin_arg, cmd->convert_arg);
- printk("pcl812 e=%d stopsrc=%x scanend=%x\n", e, cmd->stop_src,
- cmd->scan_end_src);
- printk("pcl812 e=%d stoparg=%d scanendarg=%d chanlistlen=%d\n", e,
- cmd->stop_arg, cmd->scan_end_arg, cmd->chanlist_len);
+ printk(KERN_INFO "pcl812 e=%d stopsrc=%x scanend=%x\n", e,
+ cmd->stop_src, cmd->scan_end_src);
+ printk(KERN_INFO "pcl812 e=%d stoparg=%d scanendarg=%d "
+ "chanlistlen=%d\n", e, cmd->stop_arg, cmd->scan_end_arg,
+ cmd->chanlist_len);
}
#endif
@@ -645,11 +664,11 @@ static int pcl812_ai_cmdtest(struct comedi_device *dev,
err++;
tmp = cmd->convert_src;
- if (devpriv->use_ext_trg) {
+ if (devpriv->use_ext_trg)
cmd->convert_src &= TRIG_EXT;
- } else {
+ else
cmd->convert_src &= TRIG_TIMER;
- }
+
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
@@ -673,7 +692,10 @@ static int pcl812_ai_cmdtest(struct comedi_device *dev,
return 1;
}
- /* step 2: make sure trigger sources are unique and mutually compatible */
+ /*
+ * step 2: make sure trigger sources are
+ * unique and mutually compatible
+ */
if (cmd->start_src != TRIG_NOW) {
cmd->start_src = TRIG_NOW;
@@ -807,7 +829,7 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
struct comedi_cmd *cmd = &s->async->cmd;
#ifdef PCL812_EXTDEBUG
- printk("pcl812 EDBG: BGN: pcl812_ai_cmd(...)\n");
+ printk(KERN_DEBUG "pcl812 EDBG: BGN: pcl812_ai_cmd(...)\n");
#endif
if (cmd->start_src != TRIG_NOW)
@@ -842,13 +864,15 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_n_chan = cmd->chanlist_len;
memcpy(devpriv->ai_chanlist, cmd->chanlist,
sizeof(unsigned int) * cmd->scan_end_arg);
- setup_range_channel(dev, s, devpriv->ai_chanlist[0], 1); /* select first channel and range */
+ /* select first channel and range */
+ setup_range_channel(dev, s, devpriv->ai_chanlist[0], 1);
if (devpriv->dma) { /* check if we can use DMA transfer */
devpriv->ai_dma = 1;
for (i = 1; i < devpriv->ai_n_chan; i++)
if (devpriv->ai_chanlist[0] != devpriv->ai_chanlist[i]) {
- devpriv->ai_dma = 0; /* we cann't use DMA :-( */
+ /* we cann't use DMA :-( */
+ devpriv->ai_dma = 0;
break;
}
} else
@@ -869,14 +893,18 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_poll_ptr = 0;
s->async->cur_chan = 0;
- if ((devpriv->ai_flags & TRIG_WAKE_EOS)) { /* don't we want wake up every scan? */
+ /* don't we want wake up every scan? */
+ if ((devpriv->ai_flags & TRIG_WAKE_EOS)) {
devpriv->ai_eos = 1;
+
+ /* DMA is useless for this situation */
if (devpriv->ai_n_chan == 1)
- devpriv->ai_dma = 0; /* DMA is useless for this situation */
+ devpriv->ai_dma = 0;
}
if (devpriv->ai_dma) {
- if (devpriv->ai_eos) { /* we use EOS, so adapt DMA buffer to one scan */
+ /* we use EOS, so adapt DMA buffer to one scan */
+ if (devpriv->ai_eos) {
devpriv->dmabytestomove[0] =
devpriv->ai_n_chan * sizeof(short);
devpriv->dmabytestomove[1] =
@@ -894,9 +922,17 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (devpriv->ai_neverending) {
devpriv->dma_runs_to_end = 1;
} else {
- bytes = devpriv->ai_n_chan * devpriv->ai_scans * sizeof(short); /* how many samples we must transfer? */
- devpriv->dma_runs_to_end = bytes / devpriv->dmabytestomove[0]; /* how many DMA pages we must fill */
- devpriv->last_dma_run = bytes % devpriv->dmabytestomove[0]; /* on last dma transfer must be moved */
+ /* how many samples we must transfer? */
+ bytes = devpriv->ai_n_chan *
+ devpriv->ai_scans * sizeof(short);
+
+ /* how many DMA pages we must fill */
+ devpriv->dma_runs_to_end =
+ bytes / devpriv->dmabytestomove[0];
+
+ /* on last dma transfer must be moved */
+ devpriv->last_dma_run =
+ bytes % devpriv->dmabytestomove[0];
if (devpriv->dma_runs_to_end == 0)
devpriv->dmabytestomove[0] =
devpriv->last_dma_run;
@@ -934,14 +970,13 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
break;
}
- if (devpriv->ai_dma) {
- outb(devpriv->mode_reg_int | 2, dev->iobase + PCL812_MODE); /* let's go! */
- } else {
- outb(devpriv->mode_reg_int | 6, dev->iobase + PCL812_MODE); /* let's go! */
- }
+ if (devpriv->ai_dma) /* let's go! */
+ outb(devpriv->mode_reg_int | 2, dev->iobase + PCL812_MODE);
+ else /* let's go! */
+ outb(devpriv->mode_reg_int | 6, dev->iobase + PCL812_MODE);
#ifdef PCL812_EXTDEBUG
- printk("pcl812 EDBG: END: pcl812_ai_cmd(...)\n");
+ printk(KERN_DEBUG "pcl812 EDBG: END: pcl812_ai_cmd(...)\n");
#endif
return 0;
@@ -983,7 +1018,8 @@ static irqreturn_t interrupt_pcl812_ai_int(int irq, void *d)
if (err) {
printk
- ("comedi%d: pcl812: (%s at 0x%lx) A/D cmd IRQ without DRDY!\n",
+ ("comedi%d: pcl812: (%s at 0x%lx) "
+ "A/D cmd IRQ without DRDY!\n",
dev->minor, dev->board_name, dev->iobase);
pcl812_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
@@ -1009,7 +1045,8 @@ static irqreturn_t interrupt_pcl812_ai_int(int irq, void *d)
if (next_chan == 0) { /* one scan done */
devpriv->ai_act_scan++;
if (!(devpriv->ai_neverending))
- if (devpriv->ai_act_scan >= devpriv->ai_scans) { /* all data sampled */
+ /* all data sampled */
+ if (devpriv->ai_act_scan >= devpriv->ai_scans) {
pcl812_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
}
@@ -1030,14 +1067,16 @@ static void transfer_from_dma_buf(struct comedi_device *dev,
s->async->events = 0;
for (i = len; i; i--) {
- comedi_buf_put(s->async, ptr[bufptr++]); /* get one sample */
+ /* get one sample */
+ comedi_buf_put(s->async, ptr[bufptr++]);
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
s->async->cur_chan = 0;
devpriv->ai_act_scan++;
if (!devpriv->ai_neverending)
- if (devpriv->ai_act_scan >= devpriv->ai_scans) { /* all data sampled */
+ /* all data sampled */
+ if (devpriv->ai_act_scan >= devpriv->ai_scans) {
pcl812_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
break;
@@ -1060,7 +1099,7 @@ static irqreturn_t interrupt_pcl812_ai_dma(int irq, void *d)
short *ptr;
#ifdef PCL812_EXTDEBUG
- printk("pcl812 EDBG: BGN: interrupt_pcl812_ai_dma(...)\n");
+ printk(KERN_DEBUG "pcl812 EDBG: BGN: interrupt_pcl812_ai_dma(...)\n");
#endif
ptr = (short *)devpriv->dmabuf[devpriv->next_dma_buf];
len = (devpriv->dmabytestomove[devpriv->next_dma_buf] >> 1) -
@@ -1095,7 +1134,7 @@ static irqreturn_t interrupt_pcl812_ai_dma(int irq, void *d)
transfer_from_dma_buf(dev, s, ptr, bufptr, len);
#ifdef PCL812_EXTDEBUG
- printk("pcl812 EDBG: END: interrupt_pcl812_ai_dma(...)\n");
+ printk(KERN_DEBUG "pcl812 EDBG: END: interrupt_pcl812_ai_dma(...)\n");
#endif
return IRQ_HANDLED;
}
@@ -1111,11 +1150,10 @@ static irqreturn_t interrupt_pcl812(int irq, void *d)
comedi_error(dev, "spurious interrupt");
return IRQ_HANDLED;
}
- if (devpriv->ai_dma) {
+ if (devpriv->ai_dma)
return interrupt_pcl812_ai_dma(irq, d);
- } else {
+ else
return interrupt_pcl812_ai_int(irq, d);
- };
}
/*
@@ -1132,7 +1170,8 @@ static int pcl812_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
spin_lock_irqsave(&dev->spinlock, flags);
for (i = 0; i < 10; i++) {
- top1 = get_dma_residue(devpriv->ai_dma); /* where is now DMA */
+ /* where is now DMA */
+ top1 = get_dma_residue(devpriv->ai_dma);
top2 = get_dma_residue(devpriv->ai_dma);
if (top1 == top2)
break;
@@ -1142,8 +1181,8 @@ static int pcl812_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
-
- top1 = devpriv->dmabytestomove[1 - devpriv->next_dma_buf] - top1; /* where is now DMA in buffer */
+ /* where is now DMA in buffer */
+ top1 = devpriv->dmabytestomove[1 - devpriv->next_dma_buf] - top1;
top1 >>= 1; /* sample position */
top2 = top1 - devpriv->ai_poll_ptr;
if (top2 < 1) { /* no new samples */
@@ -1171,7 +1210,9 @@ static void setup_range_channel(struct comedi_device *dev,
unsigned int rangechan, char wait)
{
unsigned char chan_reg = CR_CHAN(rangechan); /* normal board */
- unsigned char gain_reg = CR_RANGE(rangechan) + devpriv->range_correction; /* gain index */
+ /* gain index */
+ unsigned char gain_reg = CR_RANGE(rangechan) +
+ devpriv->range_correction;
if ((chan_reg == devpriv->old_chan_reg)
&& (gain_reg == devpriv->old_gain_reg))
@@ -1184,20 +1225,25 @@ static void setup_range_channel(struct comedi_device *dev,
if (devpriv->use_diff) {
chan_reg = chan_reg | 0x30; /* DIFF inputs */
} else {
- if (chan_reg & 0x80) {
- chan_reg = chan_reg | 0x20; /* SE inputs 8-15 */
- } else {
- chan_reg = chan_reg | 0x10; /* SE inputs 0-7 */
- }
+ if (chan_reg & 0x80)
+ /* SE inputs 8-15 */
+ chan_reg = chan_reg | 0x20;
+ else
+ /* SE inputs 0-7 */
+ chan_reg = chan_reg | 0x10;
}
}
outb(chan_reg, dev->iobase + PCL812_MUX); /* select channel */
outb(gain_reg, dev->iobase + PCL812_GAIN); /* select gain */
- if (wait) {
- udelay(devpriv->max_812_ai_mode0_rangewait); /* XXX this depends on selected range and can be very long for some high gain ranges! */
- }
+
+ if (wait)
+ /*
+ * XXX this depends on selected range and can be very long for
+ * some high gain ranges!
+ */
+ udelay(devpriv->max_812_ai_mode0_rangewait);
}
/*
@@ -1207,8 +1253,8 @@ static void start_pacer(struct comedi_device *dev, int mode,
unsigned int divisor1, unsigned int divisor2)
{
#ifdef PCL812_EXTDEBUG
- printk("pcl812 EDBG: BGN: start_pacer(%d,%u,%u)\n", mode, divisor1,
- divisor2);
+ printk(KERN_DEBUG "pcl812 EDBG: BGN: start_pacer(%d,%u,%u)\n", mode,
+ divisor1, divisor2);
#endif
outb(0xb4, dev->iobase + PCL812_CTRCTL);
outb(0x74, dev->iobase + PCL812_CTRCTL);
@@ -1221,7 +1267,7 @@ static void start_pacer(struct comedi_device *dev, int mode,
outb((divisor1 >> 8) & 0xff, dev->iobase + PCL812_CTR1);
}
#ifdef PCL812_EXTDEBUG
- printk("pcl812 EDBG: END: start_pacer(...)\n");
+ printk(KERN_DEBUG "pcl812 EDBG: END: start_pacer(...)\n");
#endif
}
@@ -1252,16 +1298,17 @@ static int pcl812_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
#ifdef PCL812_EXTDEBUG
- printk("pcl812 EDBG: BGN: pcl812_ai_cancel(...)\n");
+ printk(KERN_DEBUG "pcl812 EDBG: BGN: pcl812_ai_cancel(...)\n");
#endif
if (devpriv->ai_dma)
disable_dma(devpriv->dma);
outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
- outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE); /* Stop A/D */
+ /* Stop A/D */
+ outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
start_pacer(dev, -1, 0, 0); /* stop 8254 */
outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
#ifdef PCL812_EXTDEBUG
- printk("pcl812 EDBG: END: pcl812_ai_cancel(...)\n");
+ printk(KERN_DEBUG "pcl812 EDBG: END: pcl812_ai_cancel(...)\n");
#endif
return 0;
}
@@ -1272,7 +1319,7 @@ static int pcl812_ai_cancel(struct comedi_device *dev,
static void pcl812_reset(struct comedi_device *dev)
{
#ifdef PCL812_EXTDEBUG
- printk("pcl812 EDBG: BGN: pcl812_reset(...)\n");
+ printk(KERN_DEBUG "pcl812 EDBG: BGN: pcl812_reset(...)\n");
#endif
outb(0, dev->iobase + PCL812_MUX);
outb(0 + devpriv->range_correction, dev->iobase + PCL812_GAIN);
@@ -1304,7 +1351,7 @@ static void pcl812_reset(struct comedi_device *dev)
}
udelay(5);
#ifdef PCL812_EXTDEBUG
- printk("pcl812 EDBG: END: pcl812_reset(...)\n");
+ printk(KERN_DEBUG "pcl812 EDBG: END: pcl812_reset(...)\n");
#endif
}
@@ -1322,8 +1369,8 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int n_subdevices;
iobase = it->options[0];
- printk("comedi%d: pcl812: board=%s, ioport=0x%03lx", dev->minor,
- this_board->name, iobase);
+ printk(KERN_INFO "comedi%d: pcl812: board=%s, ioport=0x%03lx",
+ dev->minor, this_board->name, iobase);
if (!request_region(iobase, this_board->io_range, "pcl812")) {
printk("I/O port conflict\n");
@@ -1345,18 +1392,18 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (irq) { /* we want to use IRQ */
if (((1 << irq) & this_board->IRQbits) == 0) {
printk
- (", IRQ %u is out of allowed range, DISABLING IT",
- irq);
+ (", IRQ %u is out of allowed range, "
+ "DISABLING IT", irq);
irq = 0; /* Bad IRQ */
} else {
if (request_irq
(irq, interrupt_pcl812, 0, "pcl812", dev)) {
printk
- (", unable to allocate IRQ %u, DISABLING IT",
- irq);
+ (", unable to allocate IRQ %u, "
+ "DISABLING IT", irq);
irq = 0; /* Can't use IRQ */
} else {
- printk(", irq=%u", irq);
+ printk(KERN_INFO ", irq=%u", irq);
}
}
}
@@ -1376,16 +1423,20 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
}
ret = request_dma(dma, "pcl812");
if (ret) {
- printk(", unable to allocate DMA %u, FAIL!\n", dma);
+ printk(KERN_ERR ", unable to allocate DMA %u, FAIL!\n",
+ dma);
return -EBUSY; /* DMA isn't free */
}
devpriv->dma = dma;
- printk(", dma=%u", dma);
+ printk(KERN_INFO ", dma=%u", dma);
pages = 1; /* we want 8KB */
devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[0]) {
printk(", unable to allocate DMA buffer, FAIL!\n");
- /* maybe experiment with try_to_free_pages() will help .... */
+ /*
+ * maybe experiment with try_to_free_pages()
+ * will help ....
+ */
free_resources(dev);
return -EBUSY; /* no buffer :-( */
}
@@ -1394,7 +1445,7 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->hwdmasize[0] = PAGE_SIZE * (1 << pages);
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[1]) {
- printk(", unable to allocate DMA buffer, FAIL!\n");
+ printk(KERN_ERR ", unable to allocate DMA buffer, FAIL!\n");
free_resources(dev);
return -EBUSY;
}
@@ -1457,11 +1508,11 @@ no_dma:
s->maxdata = this_board->ai_maxdata;
s->len_chanlist = MAX_CHANLIST_LEN;
s->range_table = this_board->rangelist_ai;
- if (this_board->board_type == boardACL8216) {
+ if (this_board->board_type == boardACL8216)
s->insn_read = acl8216_ai_insn_read;
- } else {
+ else
s->insn_read = pcl812_ai_insn_read;
- }
+
devpriv->use_MPC = this_board->haveMPC508;
s->cancel = pcl812_ai_cancel;
if (dev->irq) {
@@ -1500,8 +1551,8 @@ no_dma:
s->range_table = &range_bipolar10;
break;
printk
- (", incorrect range number %d, changing to 0 (+/-10V)",
- it->options[4]);
+ (", incorrect range number %d, changing "
+ "to 0 (+/-10V)", it->options[4]);
break;
}
break;
@@ -1530,8 +1581,8 @@ no_dma:
s->range_table = &range_iso813_1_ai;
break;
printk
- (", incorrect range number %d, changing to 0 ",
- it->options[1]);
+ (", incorrect range number %d, "
+ "changing to 0 ", it->options[1]);
break;
}
break;
@@ -1555,8 +1606,8 @@ no_dma:
s->range_table = &range_acl8113_1_ai;
break;
printk
- (", incorrect range number %d, changing to 0 ",
- it->options[1]);
+ (", incorrect range number %d, "
+ "changing to 0 ", it->options[1]);
break;
}
break;
@@ -1627,7 +1678,8 @@ no_dma:
case boardACL8112:
devpriv->max_812_ai_mode0_rangewait = 1;
if (it->options[3] > 0)
- devpriv->use_ext_trg = 1; /* we use external trigger */
+ /* we use external trigger */
+ devpriv->use_ext_trg = 1;
case boardA821:
devpriv->max_812_ai_mode0_rangewait = 1;
devpriv->mode_reg_int = (irq << 4) & 0xf0;
@@ -1636,11 +1688,12 @@ no_dma:
case boardPCL813:
case boardISO813:
case boardACL8113:
- devpriv->max_812_ai_mode0_rangewait = 5; /* maybe there must by greatest timeout */
+ /* maybe there must by greatest timeout */
+ devpriv->max_812_ai_mode0_rangewait = 5;
break;
}
- printk("\n");
+ printk(KERN_INFO "\n");
devpriv->valid = 1;
pcl812_reset(dev);
@@ -1655,8 +1708,12 @@ static int pcl812_detach(struct comedi_device *dev)
{
#ifdef PCL812_EXTDEBUG
- printk("comedi%d: pcl812: remove\n", dev->minor);
+ printk(KERN_DEBUG "comedi%d: pcl812: remove\n", dev->minor);
#endif
free_resources(dev);
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index 71c2a3aa379..3d0f018faa6 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -2,7 +2,7 @@
comedi/drivers/pcl816.c
Author: Juan Grigera <juan@grigera.com.ar>
- based on pcl818 by Michal Dobes <dobes@tesnet.cz> and bits of pcl812
+ based on pcl818 by Michal Dobes <dobes@tesnet.cz> and bits of pcl812
hardware driver for Advantech cards:
card: PCL-816, PCL814B
@@ -28,7 +28,7 @@ Configuration Options:
[1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
[2] - DMA (0=disable, 1, 3)
[3] - 0, 10=10MHz clock for 8254
- 1= 1MHz clock for 8254
+ 1= 1MHz clock for 8254
*/
@@ -85,7 +85,7 @@ Configuration Options:
#define INT_TYPE_AI3_DMA_RTC 10
/* RTC stuff... */
-#define RTC_IRQ 8
+#define RTC_IRQ 8
#define RTC_IO_EXTENT 0x10
#endif
@@ -168,7 +168,18 @@ static struct comedi_driver driver_pcl816 = {
.offset = sizeof(struct pcl816_board),
};
-COMEDI_INITCLEANUP(driver_pcl816);
+static int __init driver_pcl816_init_module(void)
+{
+ return comedi_driver_register(&driver_pcl816);
+}
+
+static void __exit driver_pcl816_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_pcl816);
+}
+
+module_init(driver_pcl816_init_module);
+module_exit(driver_pcl816_cleanup_module);
struct pcl816_private {
@@ -253,7 +264,8 @@ static int pcl816_ai_insn_read(struct comedi_device *dev,
/* Set the input channel */
outb(CR_CHAN(insn->chanspec) & 0xf, dev->iobase + PCL816_MUX);
- outb(CR_RANGE(insn->chanspec), dev->iobase + PCL816_RANGE); /* select gain */
+ /* select gain */
+ outb(CR_RANGE(insn->chanspec), dev->iobase + PCL816_RANGE);
for (n = 0; n < insn->n; n++) {
@@ -268,8 +280,8 @@ static int pcl816_ai_insn_read(struct comedi_device *dev,
((inb(dev->iobase +
PCL816_AD_HI) << 8) |
(inb(dev->iobase + PCL816_AD_LO)));
-
- outb(0, dev->iobase + PCL816_CLRINT); /* clear INT (conversion end) flag */
+ /* clear INT (conversion end) flag */
+ outb(0, dev->iobase + PCL816_CLRINT);
break;
}
udelay(1);
@@ -278,7 +290,8 @@ static int pcl816_ai_insn_read(struct comedi_device *dev,
if (!timeout) {
comedi_error(dev, "A/D insn timeout\n");
data[0] = 0;
- outb(0, dev->iobase + PCL816_CLRINT); /* clear INT (conversion end) flag */
+ /* clear INT (conversion end) flag */
+ outb(0, dev->iobase + PCL816_CLRINT);
return -EIO;
}
@@ -332,7 +345,8 @@ static irqreturn_t interrupt_pcl816_ai_mode13_int(int irq, void *d)
}
if (!devpriv->ai_neverending)
- if (devpriv->ai_act_scan >= devpriv->ai_scans) { /* all data sampled */
+ /* all data sampled */
+ if (devpriv->ai_act_scan >= devpriv->ai_scans) {
/* all data sampled */
pcl816_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
@@ -369,7 +383,8 @@ static void transfer_from_dma_buf(struct comedi_device *dev,
}
if (!devpriv->ai_neverending)
- if (devpriv->ai_act_scan >= devpriv->ai_scans) { /* all data sampled */
+ /* all data sampled */
+ if (devpriv->ai_act_scan >= devpriv->ai_scans) {
pcl816_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_BLOCK;
@@ -391,7 +406,8 @@ static irqreturn_t interrupt_pcl816_ai_mode13_dma(int irq, void *d)
disable_dma(devpriv->dma);
this_dma_buf = devpriv->next_dma_buf;
- if ((devpriv->dma_runs_to_end > -1) || devpriv->ai_neverending) { /* switch dma bufs */
+ /* switch dma bufs */
+ if ((devpriv->dma_runs_to_end > -1) || devpriv->ai_neverending) {
devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
set_dma_mode(devpriv->dma, DMA_MODE_READ);
@@ -467,14 +483,14 @@ static irqreturn_t interrupt_pcl816(int irq, void *d)
*/
static void pcl816_cmdtest_out(int e, struct comedi_cmd *cmd)
{
- printk("pcl816 e=%d startsrc=%x scansrc=%x convsrc=%x\n", e,
+ printk(KERN_INFO "pcl816 e=%d startsrc=%x scansrc=%x convsrc=%x\n", e,
cmd->start_src, cmd->scan_begin_src, cmd->convert_src);
- printk("pcl816 e=%d startarg=%d scanarg=%d convarg=%d\n", e,
+ printk(KERN_INFO "pcl816 e=%d startarg=%d scanarg=%d convarg=%d\n", e,
cmd->start_arg, cmd->scan_begin_arg, cmd->convert_arg);
- printk("pcl816 e=%d stopsrc=%x scanend=%x\n", e, cmd->stop_src,
- cmd->scan_end_src);
- printk("pcl816 e=%d stoparg=%d scanendarg=%d chanlistlen=%d\n", e,
- cmd->stop_arg, cmd->scan_end_arg, cmd->chanlist_len);
+ printk(KERN_INFO "pcl816 e=%d stopsrc=%x scanend=%x\n", e,
+ cmd->stop_src, cmd->scan_end_src);
+ printk(KERN_INFO "pcl816 e=%d stoparg=%d scanendarg=%d chanlistlen=%d\n",
+ e, cmd->stop_arg, cmd->scan_end_arg, cmd->chanlist_len);
}
/*
@@ -486,8 +502,9 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
int err = 0;
int tmp, divisor1 = 0, divisor2 = 0;
- DEBUG(printk("pcl816 pcl812_ai_cmdtest\n"); pcl816_cmdtest_out(-1, cmd);
- );
+ DEBUG(printk(KERN_INFO "pcl816 pcl812_ai_cmdtest\n");
+ pcl816_cmdtest_out(-1, cmd);
+ );
/* step 1: make sure trigger sources are trivially valid */
tmp = cmd->start_src;
@@ -515,11 +532,14 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
- if (err) {
+ if (err)
return 1;
- }
- /* step 2: make sure trigger sources are unique and mutually compatible */
+
+ /*
+ * step 2: make sure trigger sources
+ * are unique and mutually compatible
+ */
if (cmd->start_src != TRIG_NOW) {
cmd->start_src = TRIG_NOW;
@@ -544,9 +564,9 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
if (cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_COUNT)
err++;
- if (err) {
+ if (err)
return 2;
- }
+
/* step 3: make sure arguments are trivially compatible */
if (cmd->start_arg != 0) {
@@ -586,9 +606,9 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
}
}
- if (err) {
+ if (err)
return 3;
- }
+
/* step 4: fix up any arguments */
if (cmd->convert_src == TRIG_TIMER) {
@@ -603,9 +623,9 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
err++;
}
- if (err) {
+ if (err)
return 4;
- }
+
/* step 5: complain about special chanlist considerations */
@@ -643,7 +663,9 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
i8253_cascade_ns_to_timer(this_board->i8254_osc_base, &divisor1,
&divisor2, &cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
- if (divisor1 == 1) { /* PCL816 crash if any divisor is set to 1 */
+
+ /* PCL816 crash if any divisor is set to 1 */
+ if (divisor1 == 1) {
divisor1 = 2;
divisor2 /= 2;
}
@@ -676,8 +698,10 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_neverending = 1;
}
- if ((cmd->flags & TRIG_WAKE_EOS)) { /* don't we want wake up every scan? */
- printk("pl816: You wankt WAKE_EOS but I dont want handle it");
+ /* don't we want wake up every scan? */
+ if ((cmd->flags & TRIG_WAKE_EOS)) {
+ printk(KERN_INFO
+ "pl816: You wankt WAKE_EOS but I dont want handle it");
/* devpriv->ai_eos=1; */
/* if (devpriv->ai_n_chan==1) */
/* devpriv->dma=0; // DMA is useless for this situation */
@@ -686,9 +710,17 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (devpriv->dma) {
bytes = devpriv->hwdmasize[0];
if (!devpriv->ai_neverending) {
- bytes = s->async->cmd.chanlist_len * s->async->cmd.chanlist_len * sizeof(short); /* how many */
- devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize[0]; /* how many DMA pages we must fill */
- devpriv->last_dma_run = bytes % devpriv->hwdmasize[0]; /* on last dma transfer must be moved */
+ /* how many */
+ bytes = s->async->cmd.chanlist_len *
+ s->async->cmd.chanlist_len *
+ sizeof(short);
+
+ /* how many DMA pages we must fill */
+ devpriv->dma_runs_to_end = bytes /
+ devpriv->hwdmasize[0];
+
+ /* on last dma transfer must be moved */
+ devpriv->last_dma_run = bytes % devpriv->hwdmasize[0];
devpriv->dma_runs_to_end--;
if (devpriv->dma_runs_to_end >= 0)
bytes = devpriv->hwdmasize[0];
@@ -711,14 +743,22 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
switch (cmd->convert_src) {
case TRIG_TIMER:
devpriv->int816_mode = INT_TYPE_AI1_DMA;
- outb(0x32, dev->iobase + PCL816_CONTROL); /* Pacer+IRQ+DMA */
- outb(dmairq, dev->iobase + PCL816_STATUS); /* write irq and DMA to card */
+
+ /* Pacer+IRQ+DMA */
+ outb(0x32, dev->iobase + PCL816_CONTROL);
+
+ /* write irq and DMA to card */
+ outb(dmairq, dev->iobase + PCL816_STATUS);
break;
default:
devpriv->int816_mode = INT_TYPE_AI3_DMA;
- outb(0x34, dev->iobase + PCL816_CONTROL); /* Ext trig+IRQ+DMA */
- outb(dmairq, dev->iobase + PCL816_STATUS); /* write irq to card */
+
+ /* Ext trig+IRQ+DMA */
+ outb(0x34, dev->iobase + PCL816_CONTROL);
+
+ /* write irq to card */
+ outb(dmairq, dev->iobase + PCL816_STATUS);
break;
}
@@ -747,7 +787,8 @@ static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
- top1 = devpriv->hwdmasize[0] - top1; /* where is now DMA in buffer */
+ /* where is now DMA in buffer */
+ top1 = devpriv->hwdmasize[0] - top1;
top1 >>= 1; /* sample position */
top2 = top1 - devpriv->ai_poll_ptr;
if (top2 < 1) { /* no new samples */
@@ -787,16 +828,23 @@ static int pcl816_ai_cancel(struct comedi_device *dev,
disable_dma(devpriv->dma);
case INT_TYPE_AI1_INT:
case INT_TYPE_AI3_INT:
- outb(inb(dev->iobase + PCL816_CONTROL) & 0x73, dev->iobase + PCL816_CONTROL); /* Stop A/D */
+ outb(inb(dev->iobase + PCL816_CONTROL) & 0x73,
+ dev->iobase + PCL816_CONTROL); /* Stop A/D */
udelay(1);
outb(0, dev->iobase + PCL816_CONTROL); /* Stop A/D */
- outb(0xb0, dev->iobase + PCL816_CTRCTL); /* Stop pacer */
+
+ /* Stop pacer */
+ outb(0xb0, dev->iobase + PCL816_CTRCTL);
outb(0x70, dev->iobase + PCL816_CTRCTL);
outb(0, dev->iobase + PCL816_AD_LO);
inb(dev->iobase + PCL816_AD_LO);
inb(dev->iobase + PCL816_AD_HI);
- outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
- outb(0, dev->iobase + PCL816_CONTROL); /* Stop A/D */
+
+ /* clear INT request */
+ outb(0, dev->iobase + PCL816_CLRINT);
+
+ /* Stop A/D */
+ outb(0, dev->iobase + PCL816_CONTROL);
devpriv->irq_blocked = 0;
devpriv->irq_was_now_closed = devpriv->int816_mode;
devpriv->int816_mode = 0;
@@ -866,8 +914,11 @@ start_pacer(struct comedi_device *dev, int mode, unsigned int divisor1,
outb(0xff, dev->iobase + PCL816_CTR0);
outb(0x00, dev->iobase + PCL816_CTR0);
udelay(1);
- outb(0xb4, dev->iobase + PCL816_CTRCTL); /* set counter 2 as mode 3 */
- outb(0x74, dev->iobase + PCL816_CTRCTL); /* set counter 1 as mode 3 */
+
+ /* set counter 2 as mode 3 */
+ outb(0xb4, dev->iobase + PCL816_CTRCTL);
+ /* set counter 1 as mode 3 */
+ outb(0x74, dev->iobase + PCL816_CTRCTL);
udelay(1);
if (mode == 1) {
@@ -903,41 +954,51 @@ check_channel_list(struct comedi_device *dev,
}
if (chanlen > 1) {
- chansegment[0] = chanlist[0]; /* first channel is everytime ok */
+ /* first channel is everytime ok */
+ chansegment[0] = chanlist[0];
for (i = 1, seglen = 1; i < chanlen; i++, seglen++) {
/* build part of chanlist */
- DEBUG(printk("%d. %d %d\n", i, CR_CHAN(chanlist[i]),
+ DEBUG(printk(KERN_INFO "%d. %d %d\n", i,
+ CR_CHAN(chanlist[i]),
CR_RANGE(chanlist[i]));)
+
+ /* we detect loop, this must by finish */
if (chanlist[0] == chanlist[i])
- break; /* we detect loop, this must by finish */
+ break;
nowmustbechan =
(CR_CHAN(chansegment[i - 1]) + 1) % chanlen;
if (nowmustbechan != CR_CHAN(chanlist[i])) {
/* channel list isn't continous :-( */
- printk
- ("comedi%d: pcl816: channel list must be continous! chanlist[%i]=%d but must be %d or %d!\n",
- dev->minor, i, CR_CHAN(chanlist[i]),
- nowmustbechan, CR_CHAN(chanlist[0]));
+ printk(KERN_WARNING
+ "comedi%d: pcl816: channel list must "
+ "be continous! chanlist[%i]=%d but "
+ "must be %d or %d!\n", dev->minor,
+ i, CR_CHAN(chanlist[i]), nowmustbechan,
+ CR_CHAN(chanlist[0]));
return 0;
}
- chansegment[i] = chanlist[i]; /* well, this is next correct channel in list */
+ /* well, this is next correct channel in list */
+ chansegment[i] = chanlist[i];
}
- for (i = 0, segpos = 0; i < chanlen; i++) { /* check whole chanlist */
+ /* check whole chanlist */
+ for (i = 0, segpos = 0; i < chanlen; i++) {
DEBUG(printk("%d %d=%d %d\n",
CR_CHAN(chansegment[i % seglen]),
CR_RANGE(chansegment[i % seglen]),
CR_CHAN(chanlist[i]),
CR_RANGE(chanlist[i]));)
if (chanlist[i] != chansegment[i % seglen]) {
- printk
- ("comedi%d: pcl816: bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n",
- dev->minor, i, CR_CHAN(chansegment[i]),
- CR_RANGE(chansegment[i]),
- CR_AREF(chansegment[i]),
- CR_CHAN(chanlist[i % seglen]),
- CR_RANGE(chanlist[i % seglen]),
- CR_AREF(chansegment[i % seglen]));
+ printk(KERN_WARNING
+ "comedi%d: pcl816: bad channel or range"
+ " number! chanlist[%i]=%d,%d,%d and not"
+ " %d,%d,%d!\n", dev->minor, i,
+ CR_CHAN(chansegment[i]),
+ CR_RANGE(chansegment[i]),
+ CR_AREF(chansegment[i]),
+ CR_CHAN(chanlist[i % seglen]),
+ CR_RANGE(chanlist[i % seglen]),
+ CR_AREF(chansegment[i % seglen]));
return 0; /* chan/gain list is strange */
}
}
@@ -965,12 +1026,15 @@ setup_channel_list(struct comedi_device *dev,
for (i = 0; i < seglen; i++) { /* store range list to card */
devpriv->ai_act_chanlist[i] = CR_CHAN(chanlist[i]);
outb(CR_CHAN(chanlist[0]) & 0xf, dev->iobase + PCL816_MUX);
- outb(CR_RANGE(chanlist[0]), dev->iobase + PCL816_RANGE); /* select gain */
+ /* select gain */
+ outb(CR_RANGE(chanlist[0]), dev->iobase + PCL816_RANGE);
}
udelay(1);
-
- outb(devpriv->ai_act_chanlist[0] | (devpriv->ai_act_chanlist[seglen - 1] << 4), dev->iobase + PCL816_MUX); /* select channel interval to scan */
+ /* select channel interval to scan */
+ outb(devpriv->ai_act_chanlist[0] |
+ (devpriv->ai_act_chanlist[seglen - 1] << 4),
+ dev->iobase + PCL816_MUX);
}
#ifdef unused
@@ -998,11 +1062,11 @@ static int set_rtc_irq_bit(unsigned char bit)
save_flags(flags);
cli();
val = CMOS_READ(RTC_CONTROL);
- if (bit) {
+ if (bit)
val |= RTC_PIE;
- } else {
+ else
val &= ~RTC_PIE;
- }
+
CMOS_WRITE(val, RTC_CONTROL);
CMOS_READ(RTC_INTR_FLAGS);
restore_flags(flags);
@@ -1072,7 +1136,7 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
dev->iobase = iobase;
if (pcl816_check(iobase)) {
- printk(", I cann't detect board. FAIL!\n");
+ printk(KERN_ERR ", I cann't detect board. FAIL!\n");
return -EIO;
}
@@ -1090,30 +1154,29 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (irq) { /* we want to use IRQ */
if (((1 << irq) & this_board->IRQbits) == 0) {
printk
- (", IRQ %u is out of allowed range, DISABLING IT",
- irq);
+ (", IRQ %u is out of allowed range, "
+ "DISABLING IT", irq);
irq = 0; /* Bad IRQ */
} else {
if (request_irq
(irq, interrupt_pcl816, 0, "pcl816", dev)) {
printk
- (", unable to allocate IRQ %u, DISABLING IT",
- irq);
+ (", unable to allocate IRQ %u, "
+ "DISABLING IT", irq);
irq = 0; /* Can't use IRQ */
} else {
- printk(", irq=%u", irq);
+ printk(KERN_INFO ", irq=%u", irq);
}
}
}
}
dev->irq = irq;
- if (irq) {
+ if (irq) /* 1=we have allocated irq */
devpriv->irq_free = 1;
- } /* 1=we have allocated irq */
- else {
+ else
devpriv->irq_free = 0;
- }
+
devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
devpriv->int816_mode = 0; /* mode of irq */
@@ -1170,18 +1233,22 @@ no_rtc:
}
ret = request_dma(dma, "pcl816");
if (ret) {
- printk(", unable to allocate DMA %u, FAIL!\n", dma);
+ printk(KERN_ERR
+ ", unable to allocate DMA %u, FAIL!\n", dma);
return -EBUSY; /* DMA isn't free */
}
devpriv->dma = dma;
- printk(", dma=%u", dma);
+ printk(KERN_INFO ", dma=%u", dma);
pages = 2; /* we need 16KB */
devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[0]) {
printk(", unable to allocate DMA buffer, FAIL!\n");
- /* maybe experiment with try_to_free_pages() will help .... */
+ /*
+ * maybe experiment with try_to_free_pages()
+ * will help ....
+ */
return -EBUSY; /* no buffer :-( */
}
devpriv->dmapages[0] = pages;
@@ -1192,8 +1259,9 @@ no_rtc:
if (devpriv->dma_rtc == 0) { /* we must do duble buff :-( */
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[1]) {
- printk
- (", unable to allocate DMA buffer, FAIL!\n");
+ printk(KERN_ERR
+ ", unable to allocate DMA buffer, "
+ "FAIL!\n");
return -EBUSY;
}
devpriv->dmapages[1] = pages;
@@ -1277,7 +1345,7 @@ case COMEDI_SUBD_DO:
*/
static int pcl816_detach(struct comedi_device *dev)
{
- DEBUG(printk("comedi%d: pcl816: remove\n", dev->minor);)
+ DEBUG(printk(KERN_INFO "comedi%d: pcl816: remove\n", dev->minor);)
free_resources(dev);
#ifdef unused
if (devpriv->dma_rtc)
@@ -1285,3 +1353,7 @@ static int pcl816_detach(struct comedi_device *dev)
#endif
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 9d6aa393ef1..d2bd6f82b83 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -313,7 +313,18 @@ static struct comedi_driver driver_pcl818 = {
.offset = sizeof(struct pcl818_board),
};
-COMEDI_INITCLEANUP(driver_pcl818);
+static int __init driver_pcl818_init_module(void)
+{
+ return comedi_driver_register(&driver_pcl818);
+}
+
+static void __exit driver_pcl818_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_pcl818);
+}
+
+module_init(driver_pcl818_init_module);
+module_exit(driver_pcl818_cleanup_module);
struct pcl818_private {
@@ -2036,3 +2047,7 @@ static int pcl818_detach(struct comedi_device *dev)
free_resources(dev);
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcm3724.c b/drivers/staging/comedi/drivers/pcm3724.c
index ed610307923..7fb3c27e597 100644
--- a/drivers/staging/comedi/drivers/pcm3724.c
+++ b/drivers/staging/comedi/drivers/pcm3724.c
@@ -97,7 +97,18 @@ static struct comedi_driver driver_pcm3724 = {
.offset = sizeof(struct pcm3724_board),
};
-COMEDI_INITCLEANUP(driver_pcm3724);
+static int __init driver_pcm3724_init_module(void)
+{
+ return comedi_driver_register(&driver_pcm3724);
+}
+
+static void __exit driver_pcm3724_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_pcm3724);
+}
+
+module_init(driver_pcm3724_init_module);
+module_exit(driver_pcm3724_cleanup_module);
/* (setq c-basic-offset 8) */
@@ -184,7 +195,7 @@ static void enable_chan(struct comedi_device *dev, struct comedi_subdevice *s,
struct priv_pcm3724 *priv;
gatecfg = 0;
- priv = (struct priv_pcm3724 *)(dev->private);
+ priv = dev->private;
mask = 1 << CR_CHAN(chanspec);
if (s == dev->subdevices) /* subdev 0 */
@@ -307,3 +318,7 @@ static int pcm3724_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcm3730.c b/drivers/staging/comedi/drivers/pcm3730.c
index 22b7aae63ad..bada6b236ff 100644
--- a/drivers/staging/comedi/drivers/pcm3730.c
+++ b/drivers/staging/comedi/drivers/pcm3730.c
@@ -38,7 +38,18 @@ static struct comedi_driver driver_pcm3730 = {
.detach = pcm3730_detach,
};
-COMEDI_INITCLEANUP(driver_pcm3730);
+static int __init driver_pcm3730_init_module(void)
+{
+ return comedi_driver_register(&driver_pcm3730);
+}
+
+static void __exit driver_pcm3730_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_pcm3730);
+}
+
+module_init(driver_pcm3730_init_module);
+module_exit(driver_pcm3730_cleanup_module);
static int pcm3730_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -154,3 +165,7 @@ static int pcm3730_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcm_common.c b/drivers/staging/comedi/drivers/pcm_common.c
index 52c2a669821..474af7bc6c8 100644
--- a/drivers/staging/comedi/drivers/pcm_common.c
+++ b/drivers/staging/comedi/drivers/pcm_common.c
@@ -109,3 +109,7 @@ int comedi_pcm_cmdtest(struct comedi_device *dev,
return 0;
}
EXPORT_SYMBOL(comedi_pcm_cmdtest);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcmad.c b/drivers/staging/comedi/drivers/pcmad.c
index fab8092bd7a..23b3d777340 100644
--- a/drivers/staging/comedi/drivers/pcmad.c
+++ b/drivers/staging/comedi/drivers/pcmad.c
@@ -89,7 +89,18 @@ static struct comedi_driver driver_pcmad = {
.offset = sizeof(pcmad_boards[0]),
};
-COMEDI_INITCLEANUP(driver_pcmad);
+static int __init driver_pcmad_init_module(void)
+{
+ return comedi_driver_register(&driver_pcmad);
+}
+
+static void __exit driver_pcmad_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_pcmad);
+}
+
+module_init(driver_pcmad_init_module);
+module_exit(driver_pcmad_cleanup_module);
#define TIMEOUT 100
@@ -176,3 +187,7 @@ static int pcmad_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcmda12.c b/drivers/staging/comedi/drivers/pcmda12.c
index 7133eb0352b..0e9ffa28d74 100644
--- a/drivers/staging/comedi/drivers/pcmda12.c
+++ b/drivers/staging/comedi/drivers/pcmda12.c
@@ -157,7 +157,8 @@ static int pcmda12_attach(struct comedi_device *dev,
unsigned long iobase;
iobase = it->options[0];
- printk("comedi%d: %s: io: %lx %s ", dev->minor, driver.driver_name,
+ printk(KERN_INFO
+ "comedi%d: %s: io: %lx %s ", dev->minor, driver.driver_name,
iobase, it->options[1] ? "simultaneous xfer mode enabled" : "");
if (!request_region(iobase, IOSIZE, driver.driver_name)) {
@@ -177,7 +178,7 @@ static int pcmda12_attach(struct comedi_device *dev,
* convenient macro defined in comedidev.h.
*/
if (alloc_private(dev, sizeof(struct pcmda12_private)) < 0) {
- printk("cannot allocate private data structure\n");
+ printk(KERN_ERR "cannot allocate private data structure\n");
return -ENOMEM;
}
@@ -191,7 +192,7 @@ static int pcmda12_attach(struct comedi_device *dev,
* 96-channel version of the board.
*/
if (alloc_subdevices(dev, 1) < 0) {
- printk("cannot allocate subdevice data structures\n");
+ printk(KERN_ERR "cannot allocate subdevice data structures\n");
return -ENOMEM;
}
@@ -207,7 +208,7 @@ static int pcmda12_attach(struct comedi_device *dev,
zero_chans(dev); /* clear out all the registers, basically */
- printk("attached\n");
+ printk(KERN_INFO "attached\n");
return 1;
}
@@ -222,7 +223,8 @@ static int pcmda12_attach(struct comedi_device *dev,
*/
static int pcmda12_detach(struct comedi_device *dev)
{
- printk("comedi%d: %s: remove\n", dev->minor, driver.driver_name);
+ printk(KERN_INFO
+ "comedi%d: %s: remove\n", dev->minor, driver.driver_name);
if (dev->iobase)
release_region(dev->iobase, IOSIZE);
return 0;
@@ -303,4 +305,19 @@ static int ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_INITCLEANUP(driver);
+static int __init driver_init_module(void)
+{
+ return comedi_driver_register(&driver);
+}
+
+static void __exit driver_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver);
+}
+
+module_init(driver_init_module);
+module_exit(driver_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index 025a52e8981..5c832d7ed45 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -145,10 +145,6 @@ Configuration Options:
#define PAGE_ENAB 2
#define PAGE_INT_ID 3
-typedef int (*comedi_insn_fn_t) (struct comedi_device *,
- struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-
static int ai_rinsn(struct comedi_device *, struct comedi_subdevice *,
struct comedi_insn *, unsigned int *);
static int ao_rinsn(struct comedi_device *, struct comedi_subdevice *,
@@ -171,7 +167,18 @@ struct pcmmio_board {
const int n_ai_chans;
const int n_ao_chans;
const struct comedi_lrange *ai_range_table, *ao_range_table;
- comedi_insn_fn_t ai_rinsn, ao_rinsn, ao_winsn;
+ int (*ai_rinsn) (struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data);
+ int (*ao_rinsn) (struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data);
+ int (*ao_winsn) (struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data);
};
static const struct comedi_lrange ranges_ai = {
@@ -1333,4 +1340,19 @@ static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_INITCLEANUP(driver);
+static int __init driver_init_module(void)
+{
+ return comedi_driver_register(&driver);
+}
+
+static void __exit driver_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver);
+}
+
+module_init(driver_init_module);
+module_exit(driver_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index 5af4c8448a3..7a9287433b2 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -1018,4 +1018,19 @@ pcmuio_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_INITCLEANUP(driver);
+static int __init driver_init_module(void)
+{
+ return comedi_driver_register(&driver);
+}
+
+static void __exit driver_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver);
+}
+
+module_init(driver_init_module);
+module_exit(driver_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/poc.c b/drivers/staging/comedi/drivers/poc.c
index 1ebc356ce40..831a576c24a 100644
--- a/drivers/staging/comedi/drivers/poc.c
+++ b/drivers/staging/comedi/drivers/poc.c
@@ -248,4 +248,19 @@ static int pcl734_insn_bits(struct comedi_device *dev,
return 2;
}
-COMEDI_INITCLEANUP(driver_poc);
+static int __init driver_poc_init_module(void)
+{
+ return comedi_driver_register(&driver_poc);
+}
+
+static void __exit driver_poc_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_poc);
+}
+
+module_init(driver_poc_init_module);
+module_exit(driver_poc_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index a91db6c4202..bf489d7f499 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -14,7 +14,7 @@
Documentation for the DAQP PCMCIA cards can be found on Quatech's site:
- ftp://ftp.quatech.com/Manuals/daqp-208.pdf
+ ftp://ftp.quatech.com/Manuals/daqp-208.pdf
This manual is for both the DAQP-208 and the DAQP-308.
@@ -50,7 +50,6 @@ Devices: [Quatech] DAQP-208 (daqp), DAQP-308
#include "../comedidev.h"
#include <linux/semaphore.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -195,7 +194,7 @@ static struct comedi_driver driver_daqp = {
static void daqp_dump(struct comedi_device *dev)
{
- printk("DAQP: status %02x; aux status %02x\n",
+ printk(KERN_INFO "DAQP: status %02x; aux status %02x\n",
inb(dev->iobase + DAQP_STATUS), inb(dev->iobase + DAQP_AUX));
}
@@ -207,9 +206,9 @@ static void hex_dump(char *str, void *ptr, int len)
printk(str);
for (i = 0; i < len; i++) {
- if (i % 16 == 0) {
- printk("\n0x%08x:", (unsigned int)cptr);
- }
+ if (i % 16 == 0)
+ printk("\n%p:", cptr);
+
printk(" %02x", *(cptr++));
}
printk("\n");
@@ -223,9 +222,9 @@ static int daqp_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct local_info_t *local = (struct local_info_t *)s->private;
- if (local->stop) {
+ if (local->stop)
return -EIO;
- }
+
outb(DAQP_COMMAND_STOP, dev->iobase + DAQP_COMMAND);
@@ -355,9 +354,9 @@ static int daqp_ai_insn_read(struct comedi_device *dev,
int v;
int counter = 10000;
- if (local->stop) {
+ if (local->stop)
return -EIO;
- }
+
/* Stop any running conversion */
daqp_ai_cancel(dev, s);
@@ -372,9 +371,9 @@ static int daqp_ai_insn_read(struct comedi_device *dev,
v = DAQP_SCANLIST_CHANNEL(CR_CHAN(insn->chanspec))
| DAQP_SCANLIST_GAIN(CR_RANGE(insn->chanspec));
- if (CR_AREF(insn->chanspec) == AREF_DIFF) {
+ if (CR_AREF(insn->chanspec) == AREF_DIFF)
v |= DAQP_SCANLIST_DIFFERENTIAL;
- }
+
v |= DAQP_SCANLIST_START;
@@ -488,7 +487,10 @@ static int daqp_ai_cmdtest(struct comedi_device *dev,
if (err)
return 1;
- /* step 2: make sure trigger sources are unique and mutually compatible */
+ /*
+ * step 2: make sure trigger sources
+ * are unique and mutually compatible
+ */
/* note that mutual compatibility is not an issue here */
if (cmd->scan_begin_src != TRIG_TIMER &&
@@ -588,9 +590,9 @@ static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
int i;
int v;
- if (local->stop) {
+ if (local->stop)
return -EIO;
- }
+
/* Stop any running conversion */
daqp_ai_cancel(dev, s);
@@ -640,13 +642,11 @@ static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
v = DAQP_SCANLIST_CHANNEL(CR_CHAN(chanspec))
| DAQP_SCANLIST_GAIN(CR_RANGE(chanspec));
- if (CR_AREF(chanspec) == AREF_DIFF) {
+ if (CR_AREF(chanspec) == AREF_DIFF)
v |= DAQP_SCANLIST_DIFFERENTIAL;
- }
- if (i == 0 || scanlist_start_on_every_entry) {
+ if (i == 0 || scanlist_start_on_every_entry)
v |= DAQP_SCANLIST_START;
- }
outb(v & 0xff, dev->iobase + DAQP_SCANLIST);
outb(v >> 8, dev->iobase + DAQP_SCANLIST);
@@ -760,7 +760,8 @@ static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
while (--counter
&& (inb(dev->iobase + DAQP_STATUS) & DAQP_STATUS_EVENTS)) ;
if (!counter) {
- printk("daqp: couldn't clear interrupts in status register\n");
+ printk(KERN_ERR
+ "daqp: couldn't clear interrupts in status register\n");
return -1;
}
@@ -785,9 +786,8 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
int d;
unsigned int chan;
- if (local->stop) {
+ if (local->stop)
return -EIO;
- }
chan = CR_CHAN(insn->chanspec);
d = data[0];
@@ -811,9 +811,8 @@ static int daqp_di_insn_read(struct comedi_device *dev,
{
struct local_info_t *local = (struct local_info_t *)s->private;
- if (local->stop) {
+ if (local->stop)
return -EIO;
- }
data[0] = inb(dev->iobase + DAQP_DIGITAL_IO);
@@ -828,9 +827,8 @@ static int daqp_do_insn_write(struct comedi_device *dev,
{
struct local_info_t *local = (struct local_info_t *)s->private;
- if (local->stop) {
+ if (local->stop)
return -EIO;
- }
outw(data[0] & 0xf, dev->iobase + DAQP_DIGITAL_IO);
@@ -872,13 +870,13 @@ static int daqp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
}
}
- dev->iobase = local->link->io.BasePort1;
+ dev->iobase = local->link->resource[0]->start;
ret = alloc_subdevices(dev, 4);
if (ret < 0)
return ret;
- printk("comedi%d: attaching daqp%d (io 0x%04lx)\n",
+ printk(KERN_INFO "comedi%d: attaching daqp%d (io 0x%04lx)\n",
dev->minor, it->options[0], dev->iobase);
s = dev->subdevices + 0;
@@ -931,7 +929,7 @@ static int daqp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static int daqp_detach(struct comedi_device *dev)
{
- printk("comedi%d: detaching daqp\n", dev->minor);
+ printk(KERN_INFO "comedi%d: detaching daqp\n", dev->minor);
return 0;
}
@@ -996,14 +994,6 @@ static int daqp_cs_resume(struct pcmcia_device *p_dev);
static int daqp_cs_attach(struct pcmcia_device *);
static void daqp_cs_detach(struct pcmcia_device *);
-/*
- The dev_info variable is the "key" that is used to match up this
- device driver with appropriate cards, through the card configuration
- database.
-*/
-
-static const dev_info_t dev_info = "quatech_daqp_cs";
-
/*======================================================================
daqp_cs_attach() creates an "instance" of the driver, allocating
@@ -1076,8 +1066,7 @@ static void daqp_cs_detach(struct pcmcia_device *link)
/* Unlink device structure, and free it */
dev_table[dev->table_index] = NULL;
- if (dev)
- kfree(dev);
+ kfree(dev);
} /* daqp_cs_detach */
@@ -1103,26 +1092,24 @@ static int daqp_pcmcia_config_loop(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
}
/* This reserves IO space but doesn't actually enable it */
- return pcmcia_request_io(p_dev, &p_dev->io);
+ return pcmcia_request_io(p_dev);
}
static void daqp_cs_config(struct pcmcia_device *link)
@@ -1154,12 +1141,10 @@ static void daqp_cs_config(struct pcmcia_device *link)
dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %u", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1 + link->io.NumPorts1 - 1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2 + link->io.NumPorts2 - 1);
+ if (link->resource[0])
+ printk(" & %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(" & %pR", link->resource[1]);
printk("\n");
return;
@@ -1228,7 +1213,7 @@ static struct pcmcia_driver daqp_cs_driver = {
.id_table = daqp_cs_id_table,
.owner = THIS_MODULE,
.drv = {
- .name = dev_info,
+ .name = "quatech_daqp_cs",
},
};
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 8626658e778..0367d2b9e2f 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -2356,4 +2356,44 @@ static int rtd_dio_insn_config(struct comedi_device *dev,
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_PCI_INITCLEANUP(rtd520Driver, rtd520_pci_table);
+static int __devinit rtd520Driver_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, rtd520Driver.driver_name);
+}
+
+static void __devexit rtd520Driver_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver rtd520Driver_pci_driver = {
+ .id_table = rtd520_pci_table,
+ .probe = &rtd520Driver_pci_probe,
+ .remove = __devexit_p(&rtd520Driver_pci_remove)
+};
+
+static int __init rtd520Driver_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&rtd520Driver);
+ if (retval < 0)
+ return retval;
+
+ rtd520Driver_pci_driver.name = (char *)rtd520Driver.driver_name;
+ return pci_register_driver(&rtd520Driver_pci_driver);
+}
+
+static void __exit rtd520Driver_cleanup_module(void)
+{
+ pci_unregister_driver(&rtd520Driver_pci_driver);
+ comedi_driver_unregister(&rtd520Driver);
+}
+
+module_init(rtd520Driver_init_module);
+module_exit(rtd520Driver_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/rti800.c b/drivers/staging/comedi/drivers/rti800.c
index 028ed6f89c4..72042b81831 100644
--- a/drivers/staging/comedi/drivers/rti800.c
+++ b/drivers/staging/comedi/drivers/rti800.c
@@ -158,7 +158,18 @@ static struct comedi_driver driver_rti800 = {
.offset = sizeof(struct rti800_board),
};
-COMEDI_INITCLEANUP(driver_rti800);
+static int __init driver_rti800_init_module(void)
+{
+ return comedi_driver_register(&driver_rti800);
+}
+
+static void __exit driver_rti800_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_rti800);
+}
+
+module_init(driver_rti800_init_module);
+module_exit(driver_rti800_cleanup_module);
static irqreturn_t rti800_interrupt(int irq, void *dev);
@@ -475,3 +486,7 @@ static int rti800_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/rti802.c b/drivers/staging/comedi/drivers/rti802.c
index 2157edcf799..f59cb11590f 100644
--- a/drivers/staging/comedi/drivers/rti802.c
+++ b/drivers/staging/comedi/drivers/rti802.c
@@ -57,7 +57,18 @@ static struct comedi_driver driver_rti802 = {
.detach = rti802_detach,
};
-COMEDI_INITCLEANUP(driver_rti802);
+static int __init driver_rti802_init_module(void)
+{
+ return comedi_driver_register(&driver_rti802);
+}
+
+static void __exit driver_rti802_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_rti802);
+}
+
+module_init(driver_rti802_init_module);
+module_exit(driver_rti802_cleanup_module);
struct rti802_private {
enum {
@@ -150,3 +161,7 @@ static int rti802_detach(struct comedi_device *dev)
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index 07c21e686f2..3607aaee4af 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -1002,4 +1002,19 @@ static int s526_dio_insn_config(struct comedi_device *dev,
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
-COMEDI_INITCLEANUP(driver_s526);
+static int __init driver_s526_init_module(void)
+{
+ return comedi_driver_register(&driver_s526);
+}
+
+static void __exit driver_s526_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_s526);
+}
+
+module_init(driver_s526_init_module);
+module_exit(driver_s526_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index a3cc93362ec..d5ba3ab357a 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -60,10 +60,10 @@ INSN_CONFIG instructions:
insn.insn=INSN_CONFIG; //configuration instruction
insn.n=1; //number of operation (must be 1)
insn.data=&initialvalue; //initial value loaded into encoder
- //during configuration
+ //during configuration
insn.subdev=5; //encoder subdevice
insn.chanspec=CR_PACK(encoder_channel,0,AREF_OTHER); //encoder_channel
- //to configure
+ //to configure
comedi_do_insn(cf,&insn); //executing configuration
*/
@@ -224,7 +224,43 @@ static struct dio_private *dio_private_word[]={
#define devpriv ((struct s626_private *)dev->private)
#define diopriv ((struct dio_private *)s->private)
-COMEDI_PCI_INITCLEANUP_NOMODULE(driver_s626, s626_pci_table);
+static int __devinit driver_s626_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_s626.driver_name);
+}
+
+static void __devexit driver_s626_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_s626_pci_driver = {
+ .id_table = s626_pci_table,
+ .probe = &driver_s626_pci_probe,
+ .remove = __devexit_p(&driver_s626_pci_remove)
+};
+
+static int __init driver_s626_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_s626);
+ if (retval < 0)
+ return retval;
+
+ driver_s626_pci_driver.name = (char *)driver_s626.driver_name;
+ return pci_register_driver(&driver_s626_pci_driver);
+}
+
+static void __exit driver_s626_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_s626_pci_driver);
+ comedi_driver_unregister(&driver_s626);
+}
+
+module_init(driver_s626_init_module);
+module_exit(driver_s626_cleanup_module);
/* ioctl routines */
static int s626_ai_insn_config(struct comedi_device *dev,
@@ -263,7 +299,7 @@ static int s626_enc_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int s626_ns_to_timer(int *nanosec, int round_mode);
-static int s626_ai_load_polllist(uint8_t * ppl, struct comedi_cmd *cmd);
+static int s626_ai_load_polllist(uint8_t *ppl, struct comedi_cmd *cmd);
static int s626_ai_inttrig(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned int trignum);
static irqreturn_t s626_irq_handler(int irq, void *d);
@@ -294,16 +330,16 @@ static void CloseDMAB(struct comedi_device *dev, struct bufferDMA *pdma,
/* COUNTER OBJECT ------------------------------------------------ */
struct enc_private {
/* Pointers to functions that differ for A and B counters: */
- uint16_t(*GetEnable) (struct comedi_device * dev, struct enc_private *); /* Return clock enable. */
- uint16_t(*GetIntSrc) (struct comedi_device * dev, struct enc_private *); /* Return interrupt source. */
- uint16_t(*GetLoadTrig) (struct comedi_device * dev, struct enc_private *); /* Return preload trigger source. */
- uint16_t(*GetMode) (struct comedi_device * dev, struct enc_private *); /* Return standardized operating mode. */
- void (*PulseIndex) (struct comedi_device * dev, struct enc_private *); /* Generate soft index strobe. */
- void (*SetEnable) (struct comedi_device * dev, struct enc_private *, uint16_t enab); /* Program clock enable. */
- void (*SetIntSrc) (struct comedi_device * dev, struct enc_private *, uint16_t IntSource); /* Program interrupt source. */
- void (*SetLoadTrig) (struct comedi_device * dev, struct enc_private *, uint16_t Trig); /* Program preload trigger source. */
- void (*SetMode) (struct comedi_device * dev, struct enc_private *, uint16_t Setup, uint16_t DisableIntSrc); /* Program standardized operating mode. */
- void (*ResetCapFlags) (struct comedi_device * dev, struct enc_private *); /* Reset event capture flags. */
+ uint16_t(*GetEnable) (struct comedi_device *dev, struct enc_private *); /* Return clock enable. */
+ uint16_t(*GetIntSrc) (struct comedi_device *dev, struct enc_private *); /* Return interrupt source. */
+ uint16_t(*GetLoadTrig) (struct comedi_device *dev, struct enc_private *); /* Return preload trigger source. */
+ uint16_t(*GetMode) (struct comedi_device *dev, struct enc_private *); /* Return standardized operating mode. */
+ void (*PulseIndex) (struct comedi_device *dev, struct enc_private *); /* Generate soft index strobe. */
+ void (*SetEnable) (struct comedi_device *dev, struct enc_private *, uint16_t enab); /* Program clock enable. */
+ void (*SetIntSrc) (struct comedi_device *dev, struct enc_private *, uint16_t IntSource); /* Program interrupt source. */
+ void (*SetLoadTrig) (struct comedi_device *dev, struct enc_private *, uint16_t Trig); /* Program preload trigger source. */
+ void (*SetMode) (struct comedi_device *dev, struct enc_private *, uint16_t Setup, uint16_t DisableIntSrc); /* Program standardized operating mode. */
+ void (*ResetCapFlags) (struct comedi_device *dev, struct enc_private *); /* Reset event capture flags. */
uint16_t MyCRA; /* Address of CRA register. */
uint16_t MyCRB; /* Address of CRB register. */
@@ -543,13 +579,13 @@ static int s626_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->pdev = pdev;
if (pdev == NULL) {
- printk("s626_attach: Board not present!!!\n");
+ printk(KERN_ERR "s626_attach: Board not present!!!\n");
return -ENODEV;
}
result = comedi_pci_enable(pdev, "s626");
if (result < 0) {
- printk("s626_attach: comedi_pci_enable fails\n");
+ printk(KERN_ERR "s626_attach: comedi_pci_enable fails\n");
return -ENODEV;
}
devpriv->got_regions = 1;
@@ -558,7 +594,7 @@ static int s626_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->base_addr = ioremap(resourceStart, SIZEOF_ADDRESS_SPACE);
if (devpriv->base_addr == NULL) {
- printk("s626_attach: IOREMAP failed\n");
+ printk(KERN_ERR "s626_attach: IOREMAP failed\n");
return -ENODEV;
}
@@ -579,7 +615,7 @@ static int s626_attach(struct comedi_device *dev, struct comedi_devconfig *it)
pci_alloc_consistent(devpriv->pdev, DMABUF_SIZE, &appdma);
if (devpriv->ANABuf.LogicalBase == NULL) {
- printk("s626_attach: DMA Memory mapping error\n");
+ printk(KERN_ERR "s626_attach: DMA Memory mapping error\n");
return -ENOMEM;
}
@@ -596,7 +632,7 @@ static int s626_attach(struct comedi_device *dev, struct comedi_devconfig *it)
pci_alloc_consistent(devpriv->pdev, DMABUF_SIZE, &appdma);
if (devpriv->RPSBuf.LogicalBase == NULL) {
- printk("s626_attach: DMA Memory mapping error\n");
+ printk(KERN_ERR "s626_attach: DMA Memory mapping error\n");
return -ENOMEM;
}
@@ -622,18 +658,18 @@ static int s626_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* set up interrupt handler */
if (dev->irq == 0) {
- printk(" unknown irq (bad)\n");
+ printk(KERN_ERR " unknown irq (bad)\n");
} else {
ret = request_irq(dev->irq, s626_irq_handler, IRQF_SHARED,
"s626", dev);
if (ret < 0) {
- printk(" irq not available\n");
+ printk(KERN_ERR " irq not available\n");
dev->irq = 0;
}
}
- DEBUG("s626_attach: -- it opts %d,%d -- \n",
+ DEBUG("s626_attach: -- it opts %d,%d --\n",
it->options[0], it->options[1]);
s = dev->subdevices + 0;
@@ -779,7 +815,8 @@ static int s626_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* Write I2C control: abort any I2C activity. */
MC_ENABLE(P_MC2, MC2_UPLD_IIC);
/* Invoke command upload */
- while ((RR7146(P_MC2) & MC2_UPLD_IIC) == 0) ;
+ while ((RR7146(P_MC2) & MC2_UPLD_IIC) == 0)
+ ;
/* and wait for upload to complete. */
/* Per SAA7146 data sheet, write to STATUS reg twice to
@@ -788,7 +825,8 @@ static int s626_attach(struct comedi_device *dev, struct comedi_devconfig *it)
WR7146(P_I2CSTAT, I2C_CLKSEL);
/* Write I2C control: reset error flags. */
MC_ENABLE(P_MC2, MC2_UPLD_IIC); /* Invoke command upload */
- while (!MC_TEST(P_MC2, MC2_UPLD_IIC)) ;
+ while (!MC_TEST(P_MC2, MC2_UPLD_IIC))
+ ;
/* and wait for upload to complete. */
}
@@ -828,14 +866,14 @@ static int s626_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* not start up in a defined state after a PCI reset.
*/
-/* PollList = EOPL; // Create a simple polling */
-/* // list for analog input */
-/* // channel 0. */
+/* PollList = EOPL; // Create a simple polling */
+/* // list for analog input */
+/* // channel 0. */
/* ResetADC( dev, &PollList ); */
/* s626_ai_rinsn(dev,dev->subdevices,NULL,data); //( &AdcData ); // */
-/* //Get initial ADC */
-/* //value. */
+/* //Get initial ADC */
+/* //value. */
/* StartVal = data[0]; */
@@ -848,10 +886,10 @@ static int s626_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* for ( index = 0; index < 500; index++ ) */
/* { */
-/* s626_ai_rinsn(dev,dev->subdevices,NULL,data); */
-/* AdcData = data[0]; //ReadADC( &AdcData ); */
-/* if ( AdcData != StartVal ) */
-/* break; */
+/* s626_ai_rinsn(dev,dev->subdevices,NULL,data); */
+/* AdcData = data[0]; //ReadADC( &AdcData ); */
+/* if ( AdcData != StartVal ) */
+/* break; */
/* } */
/* end initADC */
@@ -1513,7 +1551,7 @@ void ResetADC(struct comedi_device *dev, uint8_t * ppl)
break; /* Exit poll list processing loop. */
}
}
- DEBUG("ResetADC: ADC items %d \n", devpriv->AdcItems);
+ DEBUG("ResetADC: ADC items %d\n", devpriv->AdcItems);
/* VERSION 2.01 CHANGE: DELAY CHANGED FROM 250NS to 2US. Allow the
* ADC to stabilize for 2 microseconds before starting the final
@@ -1574,7 +1612,7 @@ static int s626_ai_insn_config(struct comedi_device *dev,
/* register uint8_t i; */
/* register int32_t *readaddr; */
-/* DEBUG("as626_ai_rinsn: ai_rinsn enter \n"); */
+/* DEBUG("as626_ai_rinsn: ai_rinsn enter\n"); */
/* Trigger ADC scan loop start by setting RPS Signal 0. */
/* MC_ENABLE( P_MC2, MC2_ADC_RPS ); */
@@ -1591,11 +1629,11 @@ static int s626_ai_insn_config(struct comedi_device *dev,
/* Convert ADC data to 16-bit integer values and copy to application buffer. */
/* for ( i = 0; i < devpriv->AdcItems; i++ ) { */
/* *data = s626_ai_reg_to_uint( *readaddr++ ); */
-/* DEBUG("s626_ai_rinsn: data %d \n",*data); */
+/* DEBUG("s626_ai_rinsn: data %d\n",*data); */
/* data++; */
/* } */
-/* DEBUG("s626_ai_rinsn: ai_rinsn escape \n"); */
+/* DEBUG("s626_ai_rinsn: ai_rinsn escape\n"); */
/* return i; */
/* } */
@@ -1651,7 +1689,8 @@ static int s626_ai_insn_read(struct comedi_device *dev,
/* shift into FB BUFFER 1 register. */
/* Wait for ADC done. */
- while (!(RR7146(P_PSR) & PSR_GPIO2)) ;
+ while (!(RR7146(P_PSR) & PSR_GPIO2))
+ ;
/* Fetch ADC data. */
if (n != 0)
@@ -1683,7 +1722,8 @@ static int s626_ai_insn_read(struct comedi_device *dev,
/* Wait for the data to arrive in FB BUFFER 1 register. */
/* Wait for ADC done. */
- while (!(RR7146(P_PSR) & PSR_GPIO2)) ;
+ while (!(RR7146(P_PSR) & PSR_GPIO2))
+ ;
/* Fetch ADC data from audio interface's input shift register. */
@@ -1696,7 +1736,7 @@ static int s626_ai_insn_read(struct comedi_device *dev,
return n;
}
-static int s626_ai_load_polllist(uint8_t * ppl, struct comedi_cmd *cmd)
+static int s626_ai_load_polllist(uint8_t *ppl, struct comedi_cmd *cmd)
{
int n;
@@ -1743,7 +1783,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
DEBUG("s626_ai_cmd: entering command function\n");
if (devpriv->ai_cmd_running) {
- printk("s626_ai_cmd: Another ai_cmd is running %d\n",
+ printk(KERN_ERR "s626_ai_cmd: Another ai_cmd is running %d\n",
dev->minor);
return -EBUSY;
}
@@ -2147,7 +2187,7 @@ static void s626_dio_init(struct comedi_device *dev)
DEBIwrite(dev, diopriv->WRDOut, 0); /* Program all outputs */
/* to inactive state. */
}
- DEBUG("s626_dio_init: DIO initialized \n");
+ DEBUG("s626_dio_init: DIO initialized\n");
}
/* DIO devices are slightly special. Although it is possible to
@@ -2346,7 +2386,7 @@ static int s626_enc_insn_read(struct comedi_device *dev,
int n;
struct enc_private *k = &encpriv[CR_CHAN(insn->chanspec)];
- DEBUG("s626_enc_insn_read: encoder read channel %d \n",
+ DEBUG("s626_enc_insn_read: encoder read channel %d\n",
CR_CHAN(insn->chanspec));
for (n = 0; n < insn->n; n++)
@@ -2364,7 +2404,7 @@ static int s626_enc_insn_write(struct comedi_device *dev,
struct enc_private *k = &encpriv[CR_CHAN(insn->chanspec)];
- DEBUG("s626_enc_insn_write: encoder write channel %d \n",
+ DEBUG("s626_enc_insn_write: encoder write channel %d\n",
CR_CHAN(insn->chanspec));
/* Set the preload register */
@@ -2425,8 +2465,7 @@ static void s626_timer_load(struct comedi_device *dev, struct enc_private *k,
static uint8_t trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 };
/* TrimDac LogicalChan-to-EepromAdrs mapping table. */
-static uint8_t trimadrs[] =
- { 0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63 };
+static uint8_t trimadrs[] = { 0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63 };
static void LoadTrimDACs(struct comedi_device *dev)
{
@@ -2524,10 +2563,12 @@ static uint32_t I2Chandshake(struct comedi_device *dev, uint32_t val)
/* upload confirmation. */
MC_ENABLE(P_MC2, MC2_UPLD_IIC);
- while (!MC_TEST(P_MC2, MC2_UPLD_IIC)) ;
+ while (!MC_TEST(P_MC2, MC2_UPLD_IIC))
+ ;
/* Wait until I2C bus transfer is finished or an error occurs. */
- while ((RR7146(P_I2CCTRL) & (I2C_BUSY | I2C_ERR)) == I2C_BUSY) ;
+ while ((RR7146(P_I2CCTRL) & (I2C_BUSY | I2C_ERR)) == I2C_BUSY)
+ ;
/* Return non-zero if I2C error occured. */
return RR7146(P_I2CCTRL) & I2C_ERR;
@@ -2641,7 +2682,8 @@ static void SendDAC(struct comedi_device *dev, uint32_t val)
* Done by polling the DMAC enable flag; this flag is automatically
* cleared when the transfer has finished.
*/
- while ((RR7146(P_MC1) & MC1_A2OUT) != 0) ;
+ while ((RR7146(P_MC1) & MC1_A2OUT) != 0)
+ ;
/* START THE OUTPUT STREAM TO THE TARGET DAC -------------------- */
@@ -2658,7 +2700,8 @@ static void SendDAC(struct comedi_device *dev, uint32_t val)
* finished transferring the DAC's data DWORD from the output FIFO
* to the output buffer register.
*/
- while ((RR7146(P_SSR) & SSR_AF2_OUT) == 0) ;
+ while ((RR7146(P_SSR) & SSR_AF2_OUT) == 0)
+ ;
/* Set up to trap execution at slot 0 when the TSL sequencer cycles
* back to slot 0 after executing the EOS in slot 5. Also,
@@ -2694,7 +2737,8 @@ static void SendDAC(struct comedi_device *dev, uint32_t val)
* from 0xFF to 0x00, which slot 0 causes to happen by shifting
* out/in on SD2 the 0x00 that is always referenced by slot 5.
*/
- while ((RR7146(P_FB_BUFFER2) & 0xFF000000) != 0) ;
+ while ((RR7146(P_FB_BUFFER2) & 0xFF000000) != 0)
+ ;
}
/* Either (1) we were too late setting the slot 0 trap; the TSL
* sequencer restarted slot 0 before we could set the EOS trap flag,
@@ -2710,7 +2754,8 @@ static void SendDAC(struct comedi_device *dev, uint32_t val)
* the next DAC write. This is detected when FB_BUFFER2 MSB changes
* from 0x00 to 0xFF.
*/
- while ((RR7146(P_FB_BUFFER2) & 0xFF000000) == 0) ;
+ while ((RR7146(P_FB_BUFFER2) & 0xFF000000) == 0)
+ ;
}
static void WriteMISC2(struct comedi_device *dev, uint16_t NewImage)
@@ -2749,10 +2794,12 @@ static void DEBItransfer(struct comedi_device *dev)
/* Wait for completion of upload from shadow RAM to DEBI control */
/* register. */
- while (!MC_TEST(P_MC2, MC2_UPLD_DEBI)) ;
+ while (!MC_TEST(P_MC2, MC2_UPLD_DEBI))
+ ;
/* Wait until DEBI transfer is done. */
- while (RR7146(P_PSR) & PSR_DEBI_S) ;
+ while (RR7146(P_PSR) & PSR_DEBI_S)
+ ;
}
/* Write a value to a gate array register. */
@@ -3099,18 +3146,18 @@ static uint16_t GetEnable_B(struct comedi_device *dev, struct enc_private *k)
static void SetLatchSource(struct comedi_device *dev, struct enc_private *k,
uint16_t value)
{
- DEBUG("SetLatchSource: SetLatchSource enter 3550 \n");
+ DEBUG("SetLatchSource: SetLatchSource enter 3550\n");
DEBIreplace(dev, k->MyCRB,
(uint16_t) (~(CRBMSK_INTCTRL | CRBMSK_LATCHSRC)),
(uint16_t) (value << CRBBIT_LATCHSRC));
- DEBUG("SetLatchSource: SetLatchSource exit \n");
+ DEBUG("SetLatchSource: SetLatchSource exit\n");
}
/*
* static uint16_t GetLatchSource(struct comedi_device *dev, struct enc_private *k )
* {
- * return ( DEBIread( dev, k->MyCRB) >> CRBBIT_LATCHSRC ) & 3;
+ * return ( DEBIread( dev, k->MyCRB) >> CRBBIT_LATCHSRC ) & 3;
* }
*/
@@ -3317,6 +3364,6 @@ static void CountersInit(struct comedi_device *dev)
k->ResetCapFlags(dev, k);
k->SetEnable(dev, k, CLKENAB_ALWAYS);
}
- DEBUG("CountersInit: counters initialized \n");
+ DEBUG("CountersInit: counters initialized\n");
}
diff --git a/drivers/staging/comedi/drivers/s626.h b/drivers/staging/comedi/drivers/s626.h
index d02742a9529..2d1afecbbb6 100644
--- a/drivers/staging/comedi/drivers/s626.h
+++ b/drivers/staging/comedi/drivers/s626.h
@@ -720,15 +720,6 @@
#define STDMSK_CLKMULT ((uint16_t)(3 << STDBIT_CLKMULT))
#define STDMSK_CLKENAB ((uint16_t)(1 << STDBIT_CLKENAB))
-/* typedef struct indexCounter */
-/* { */
-/* unsigned int ao; */
-/* unsigned int ai; */
-/* unsigned int digout; */
-/* unsigned int digin; */
-/* unsigned int enc; */
-/* }CallCounter; */
-
struct bufferDMA {
dma_addr_t PhysicalBase;
void *LogicalBase;
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index 0792617ebc3..c9be9e05f02 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -393,15 +393,16 @@ static void serial_write(struct file *f, struct serial_data data)
}
}
-static void serial_2002_open(struct comedi_device *dev)
+static int serial_2002_open(struct comedi_device *dev)
{
+ int result;
char port[20];
sprintf(port, "/dev/ttyS%d", devpriv->port);
devpriv->tty = filp_open(port, O_RDWR, 0);
if (IS_ERR(devpriv->tty)) {
- printk("serial_2002: file open error = %ld\n",
- PTR_ERR(devpriv->tty));
+ result = (int)PTR_ERR(devpriv->tty);
+ printk("serial_2002: file open error = %d\n", result);
} else {
struct config_t {
@@ -411,29 +412,25 @@ static void serial_2002_open(struct comedi_device *dev)
int max;
};
- struct config_t dig_in_config[32];
- struct config_t dig_out_config[32];
- struct config_t chan_in_config[32];
- struct config_t chan_out_config[32];
+ struct config_t *dig_in_config;
+ struct config_t *dig_out_config;
+ struct config_t *chan_in_config;
+ struct config_t *chan_out_config;
int i;
- for (i = 0; i < 32; i++) {
- dig_in_config[i].kind = 0;
- dig_in_config[i].bits = 0;
- dig_in_config[i].min = 0;
- dig_in_config[i].max = 0;
- dig_out_config[i].kind = 0;
- dig_out_config[i].bits = 0;
- dig_out_config[i].min = 0;
- dig_out_config[i].max = 0;
- chan_in_config[i].kind = 0;
- chan_in_config[i].bits = 0;
- chan_in_config[i].min = 0;
- chan_in_config[i].max = 0;
- chan_out_config[i].kind = 0;
- chan_out_config[i].bits = 0;
- chan_out_config[i].min = 0;
- chan_out_config[i].max = 0;
+ result = 0;
+ dig_in_config = kcalloc(32, sizeof(struct config_t),
+ GFP_KERNEL);
+ dig_out_config = kcalloc(32, sizeof(struct config_t),
+ GFP_KERNEL);
+ chan_in_config = kcalloc(32, sizeof(struct config_t),
+ GFP_KERNEL);
+ chan_out_config = kcalloc(32, sizeof(struct config_t),
+ GFP_KERNEL);
+ if (!dig_in_config || !dig_out_config
+ || !chan_in_config || !chan_out_config) {
+ result = -ENOMEM;
+ goto err_alloc_configs;
}
tty_setspeed(devpriv->tty, devpriv->speed);
@@ -447,7 +444,7 @@ static void serial_2002_open(struct comedi_device *dev)
break;
} else {
int command, channel, kind;
- struct config_t *cur_config = 0;
+ struct config_t *cur_config = NULL;
channel = data.value & 0x1f;
kind = (data.value >> 5) & 0x7;
@@ -574,8 +571,8 @@ static void serial_2002_open(struct comedi_device *dev)
for (i = 0; i <= 4; i++) {
/* Fill in subdev data */
struct config_t *c;
- unsigned char *mapping = 0;
- struct serial2002_range_table_t *range = 0;
+ unsigned char *mapping = NULL;
+ struct serial2002_range_table_t *range = NULL;
int kind = 0;
switch (i) {
@@ -613,7 +610,7 @@ static void serial_2002_open(struct comedi_device *dev)
}
break;
default:{
- c = 0;
+ c = NULL;
}
break;
}
@@ -632,22 +629,23 @@ static void serial_2002_open(struct comedi_device *dev)
s = &dev->subdevices[i];
s->n_chan = chan;
s->maxdata = 0;
- if (s->maxdata_list) {
- kfree(s->maxdata_list);
- }
+ kfree(s->maxdata_list);
s->maxdata_list = maxdata_list =
kmalloc(sizeof(unsigned int) * s->n_chan,
GFP_KERNEL);
- if (s->range_table_list) {
- kfree(s->range_table_list);
- }
+ if (!s->maxdata_list)
+ break; /* error handled below */
+ kfree(s->range_table_list);
+ s->range_table = NULL;
+ s->range_table_list = NULL;
if (range) {
- s->range_table = 0;
s->range_table_list = range_table_list =
kmalloc(sizeof
(struct
serial2002_range_table_t) *
s->n_chan, GFP_KERNEL);
+ if (!s->range_table_list)
+ break; /* err handled below */
}
for (chan = 0, j = 0; j < 32; j++) {
if (c[j].kind == kind) {
@@ -673,7 +671,35 @@ static void serial_2002_open(struct comedi_device *dev)
}
}
}
+ if (i <= 4) {
+ /* Failed to allocate maxdata_list or range_table_list
+ * for a subdevice that needed it. */
+ result = -ENOMEM;
+ for (i = 0; i <= 4; i++) {
+ struct comedi_subdevice *s;
+
+ s = &dev->subdevices[i];
+ kfree(s->maxdata_list);
+ s->maxdata_list = NULL;
+ kfree(s->range_table_list);
+ s->range_table_list = NULL;
+ }
+ }
+
+err_alloc_configs:
+ kfree(dig_in_config);
+ kfree(dig_out_config);
+ kfree(chan_in_config);
+ kfree(chan_out_config);
+
+ if (result) {
+ if (devpriv->tty) {
+ filp_close(devpriv->tty, 0);
+ devpriv->tty = NULL;
+ }
+ }
}
+ return result;
}
static void serial_2002_close(struct comedi_device *dev)
@@ -879,7 +905,7 @@ static int serial2002_detach(struct comedi_device *dev)
int i;
printk("comedi%d: serial2002: remove\n", dev->minor);
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 5; i++) {
s = &dev->subdevices[i];
if (s->maxdata_list) {
kfree(s->maxdata_list);
@@ -891,4 +917,19 @@ static int serial2002_detach(struct comedi_device *dev)
return 0;
}
-COMEDI_INITCLEANUP(driver_serial2002);
+static int __init driver_serial2002_init_module(void)
+{
+ return comedi_driver_register(&driver_serial2002);
+}
+
+static void __exit driver_serial2002_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_serial2002);
+}
+
+module_init(driver_serial2002_init_module);
+module_exit(driver_serial2002_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/skel.c b/drivers/staging/comedi/drivers/skel.c
index 490753b3d90..0b9ecb19511 100644
--- a/drivers/staging/comedi/drivers/skel.c
+++ b/drivers/staging/comedi/drivers/skel.c
@@ -39,28 +39,28 @@ Configuration Options:
* The previous block comment is used to automatically generate
* documentation in Comedi and Comedilib. The fields:
*
- * Driver: the name of the driver
- * Description: a short phrase describing the driver. Don't list boards.
- * Devices: a full list of the boards that attempt to be supported by
- * the driver. Format is "(manufacturer) board name [comedi name]",
- * where comedi_name is the name that is used to configure the board.
- * See the comment near board_name: in the struct comedi_driver structure
- * below. If (manufacturer) or [comedi name] is missing, the previous
- * value is used.
- * Author: you
- * Updated: date when the _documentation_ was last updated. Use 'date -R'
- * to get a value for this.
- * Status: a one-word description of the status. Valid values are:
- * works - driver works correctly on most boards supported, and
- * passes comedi_test.
- * unknown - unknown. Usually put there by ds.
- * experimental - may not work in any particular release. Author
- * probably wants assistance testing it.
- * bitrotten - driver has not been update in a long time, probably
- * doesn't work, and probably is missing support for significant
- * Comedi interface features.
- * untested - author probably wrote it "blind", and is believed to
- * work, but no confirmation.
+ * Driver: the name of the driver
+ * Description: a short phrase describing the driver. Don't list boards.
+ * Devices: a full list of the boards that attempt to be supported by
+ * the driver. Format is "(manufacturer) board name [comedi name]",
+ * where comedi_name is the name that is used to configure the board.
+ * See the comment near board_name: in the struct comedi_driver structure
+ * below. If (manufacturer) or [comedi name] is missing, the previous
+ * value is used.
+ * Author: you
+ * Updated: date when the _documentation_ was last updated. Use 'date -R'
+ * to get a value for this.
+ * Status: a one-word description of the status. Valid values are:
+ * works - driver works correctly on most boards supported, and
+ * passes comedi_test.
+ * unknown - unknown. Usually put there by ds.
+ * experimental - may not work in any particular release. Author
+ * probably wants assistance testing it.
+ * bitrotten - driver has not been update in a long time, probably
+ * doesn't work, and probably is missing support for significant
+ * Comedi interface features.
+ * untested - author probably wrote it "blind", and is believed to
+ * work, but no confirmation.
*
* These headers should be followed by a blank line, and any comments
* you wish to say about the driver. The comment area is the place
@@ -620,12 +620,59 @@ static int skel_dio_insn_config(struct comedi_device *dev,
return insn->n;
}
-/*
- * A convenient macro that defines init_module() and cleanup_module(),
- * as necessary.
- */
-COMEDI_INITCLEANUP(driver_skel);
-/* If you are writing a PCI driver you should use COMEDI_PCI_INITCLEANUP
- * instead.
- */
-/* COMEDI_PCI_INITCLEANUP(driver_skel, skel_pci_table) */
+#ifdef CONFIG_COMEDI_PCI
+static int __devinit driver_skel_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_skel.driver_name);
+}
+
+static void __devexit driver_skel_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_skel_pci_driver = {
+ .id_table = skel_pci_table,
+ .probe = &driver_skel_pci_probe,
+ .remove = __devexit_p(&driver_skel_pci_remove)
+};
+
+static int __init driver_skel_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_skel);
+ if (retval < 0)
+ return retval;
+
+ driver_skel_pci_driver.name = (char *)driver_skel.driver_name;
+ return pci_register_driver(&driver_skel_pci_driver);
+}
+
+static void __exit driver_skel_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_skel_pci_driver);
+ comedi_driver_unregister(&driver_skel);
+}
+
+module_init(driver_skel_init_module);
+module_exit(driver_skel_cleanup_module);
+#else
+static int __init driver_skel_init_module(void)
+{
+ return comedi_driver_register(&driver_skel);
+}
+
+static void __exit driver_skel_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_skel);
+}
+
+module_init(driver_skel_init_module);
+module_exit(driver_skel_cleanup_module);
+#endif
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ssv_dnp.c b/drivers/staging/comedi/drivers/ssv_dnp.c
index 18b0a83c4bb..526de2efa12 100644
--- a/drivers/staging/comedi/drivers/ssv_dnp.c
+++ b/drivers/staging/comedi/drivers/ssv_dnp.c
@@ -102,7 +102,18 @@ static struct comedi_driver driver_dnp = {
.num_names = ARRAY_SIZE(dnp_boards),
};
-COMEDI_INITCLEANUP(driver_dnp);
+static int __init driver_dnp_init_module(void)
+{
+ return comedi_driver_register(&driver_dnp);
+}
+
+static void __exit driver_dnp_cleanup_module(void)
+{
+ comedi_driver_unregister(&driver_dnp);
+}
+
+module_init(driver_dnp_init_module);
+module_exit(driver_dnp_cleanup_module);
static int dnp_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -314,3 +325,7 @@ static int dnp_dio_insn_config(struct comedi_device *dev,
return 1;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/unioxx5.c b/drivers/staging/comedi/drivers/unioxx5.c
index 16d4c9f6916..598884ec3ed 100644
--- a/drivers/staging/comedi/drivers/unioxx5.c
+++ b/drivers/staging/comedi/drivers/unioxx5.c
@@ -114,7 +114,18 @@ static struct comedi_driver unioxx5_driver = {
.detach = unioxx5_detach
};
-COMEDI_INITCLEANUP(unioxx5_driver);
+static int __init unioxx5_driver_init_module(void)
+{
+ return comedi_driver_register(&unioxx5_driver);
+}
+
+static void __exit unioxx5_driver_cleanup_module(void)
+{
+ comedi_driver_unregister(&unioxx5_driver);
+}
+
+module_init(unioxx5_driver_init_module);
+module_exit(unioxx5_driver_cleanup_module);
static int unioxx5_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
@@ -302,7 +313,8 @@ static int __unioxx5_subdev_init(struct comedi_subdevice *subdev,
__unioxx5_analog_config(usp, i * 2);
outb(i + 1, subdev_iobase + 5); /* sends channel number to card */
outb('H', subdev_iobase + 6); /* requests EEPROM world */
- while (!(inb(subdev_iobase + 0) & TxBE)) ; /* waits while writting will be allowed */
+ while (!(inb(subdev_iobase + 0) & TxBE))
+ ; /* waits while writting will be allowed */
outb(0, subdev_iobase + 6);
/* waits while reading of two bytes will be allowed */
@@ -437,7 +449,8 @@ static int __unioxx5_analog_write(struct unioxx5_subd_priv *usp,
/* sending for bytes to module(one byte per cycle iteration) */
for (i = 0; i < 4; i++) {
- while (!((inb(usp->usp_iobase + 0)) & TxBE)) ; /* waits while writting will be allowed */
+ while (!((inb(usp->usp_iobase + 0)) & TxBE))
+ ; /* waits while writting will be allowed */
outb(usp->usp_extra_data[module][i], usp->usp_iobase + 6);
}
@@ -467,7 +480,8 @@ static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp,
control = inb(usp->usp_iobase); /* get control register byte */
/* waits while reading four bytes will be allowed */
- while (!((control = inb(usp->usp_iobase + 0)) & Rx4CA)) ;
+ while (!((control = inb(usp->usp_iobase + 0)) & Rx4CA))
+ ;
/* if four bytes readding error occurs - return 0(false) */
if ((control & Rx4CA_ERR_MASK)) {
@@ -526,3 +540,7 @@ static int __unioxx5_define_chan_offset(int chan_num)
return (chan_num >> 3) + 1;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 27b4cb2e2ec..4b320b1ff82 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -111,7 +111,7 @@ sampling rate. If you sample two channels you get 4kHz and so on.
#define VENDOR_DIR_IN 0xC0
#define VENDOR_DIR_OUT 0x40
-/* internal adresses of the 8051 processor */
+/* internal addresses of the 8051 processor */
#define USBDUXSUB_CPUCS 0xE600
/*
@@ -2085,7 +2085,7 @@ static int usbdux_pwm_start(struct comedi_device *dev,
if (ret < 0)
return ret;
- /* initalise the buffer */
+ /* initialise the buffer */
for (i = 0; i < this_usbduxsub->sizePwmBuf; i++)
((char *)(this_usbduxsub->urbPwm->transfer_buffer))[i] = 0;
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 29c3c016b93..0a164a9a66c 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -67,7 +67,7 @@
#define VENDOR_DIR_OUT 0x40
/*
- * internal adresses of the 8051 processor
+ * internal addresses of the 8051 processor
*/
#define USBDUXFASTSUB_CPUCS 0xE600
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index 863aae40ede..0252b440885 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -93,7 +93,7 @@ static int comedi_do_insn(struct comedi_device *dev, struct comedi_insn *insn)
s = dev->subdevices + insn->subdev;
if (s->type == COMEDI_SUBD_UNUSED) {
- printk("%d not useable subdevice\n", insn->subdev);
+ printk(KERN_ERR "%d not useable subdevice\n", insn->subdev);
ret = -EIO;
goto error;
}
@@ -102,7 +102,7 @@ static int comedi_do_insn(struct comedi_device *dev, struct comedi_insn *insn)
ret = comedi_check_chanlist(s, 1, &insn->chanspec);
if (ret < 0) {
- printk("bad chanspec\n");
+ printk(KERN_ERR "bad chanspec\n");
ret = -EINVAL;
goto error;
}
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index a4ec891328c..fbb80f09a3d 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -15,11 +15,12 @@
along with this driver. If not, see <http://www.gnu.org/licenses/>.
***************************************************************************/
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include "crystalhd_lnx.h"
+static DEFINE_MUTEX(chd_dec_mutex);
static struct class *crystalhd_class;
static struct crystalhd_adp *g_adp_info;
@@ -152,10 +153,8 @@ static int chd_dec_fetch_cdata(struct crystalhd_adp *adp, struct crystalhd_ioctl
if (rc) {
BCMLOG_ERR("failed to pull add_cdata sz:%x ua_off:%x\n",
io->add_cdata_sz, (unsigned int)ua_off);
- if (io->add_cdata) {
- kfree(io->add_cdata);
- io->add_cdata = NULL;
- }
+ kfree(io->add_cdata);
+ io->add_cdata = NULL;
return -ENODATA;
}
@@ -273,22 +272,22 @@ static long chd_dec_ioctl(struct file *fd, unsigned int cmd, unsigned long ua)
return -EINVAL;
}
- uc = (struct crystalhd_user *)fd->private_data;
+ uc = fd->private_data;
if (!uc) {
BCMLOG_ERR("Failed to get uc\n");
return -ENODATA;
}
- lock_kernel();
+ mutex_lock(&chd_dec_mutex);
cproc = crystalhd_get_cmd_proc(&adp->cmds, cmd, uc);
if (!cproc) {
BCMLOG_ERR("Unhandled command: %d\n", cmd);
- unlock_kernel();
+ mutex_unlock(&chd_dec_mutex);
return -EINVAL;
}
ret = chd_dec_api_cmd(adp, ua, uc->uid, cmd, cproc);
- unlock_kernel();
+ mutex_unlock(&chd_dec_mutex);
return ret;
}
@@ -334,7 +333,7 @@ static int chd_dec_close(struct inode *in, struct file *fd)
return -EINVAL;
}
- uc = (struct crystalhd_user *)fd->private_data;
+ uc = fd->private_data;
if (!uc) {
BCMLOG_ERR("Failed to get uc\n");
return -ENODATA;
@@ -435,8 +434,7 @@ static void __devexit chd_dec_release_chdev(struct crystalhd_adp *adp)
/* Clear iodata pool.. */
do {
temp = chd_dec_alloc_iodata(adp, 0);
- if (temp)
- kfree(temp);
+ kfree(temp);
} while (temp);
crystalhd_delete_elem_pool(adp);
diff --git a/drivers/staging/cx25821/cx25821-alsa.c b/drivers/staging/cx25821/cx25821-alsa.c
index a43b18816fa..bbe36437ac1 100644
--- a/drivers/staging/cx25821/cx25821-alsa.c
+++ b/drivers/staging/cx25821/cx25821-alsa.c
@@ -698,7 +698,7 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
/* Card "creation" */
card->private_free = snd_cx25821_dev_free;
- chip = (struct cx25821_audio_dev *) card->private_data;
+ chip = card->private_data;
spin_lock_init(&chip->reg_lock);
chip->dev = dev;
diff --git a/drivers/staging/cxt1e1/functions.c b/drivers/staging/cxt1e1/functions.c
index 86b49809026..23ea101d7a8 100644
--- a/drivers/staging/cxt1e1/functions.c
+++ b/drivers/staging/cxt1e1/functions.c
@@ -122,19 +122,7 @@ watchdog_func (unsigned long arg)
pr_warning("%s: drvr not available (%x)\n", __func__, drvr_state);
return;
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- /* Initialize the tq entry only the first time */
- if (wd->init_tq)
- {
- wd->init_tq = 0;
- wd->tq.routine = wd->func;
- wd->tq.sync = 0;
- wd->tq.data = wd->softc;
- }
- schedule_task (&wd->tq);
-#else
schedule_work (&wd->work);
-#endif
mod_timer (&wd->h, jiffies + wd->ticks);
}
diff --git a/drivers/staging/cxt1e1/hwprobe.c b/drivers/staging/cxt1e1/hwprobe.c
index 4c8610293fc..89200e7af26 100644
--- a/drivers/staging/cxt1e1/hwprobe.c
+++ b/drivers/staging/cxt1e1/hwprobe.c
@@ -305,15 +305,9 @@ c4hw_attach_all (void)
error_flag = 0;
prep_hdw_info ();
/*** scan PCI bus for all possible boards */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
while ((pdev = pci_get_device (PCI_VENDOR_ID_CONEXANT,
- PCI_DEVICE_ID_CN8474,
- pdev)))
-#else
- while ((pdev = pci_find_device (PCI_VENDOR_ID_CONEXANT,
PCI_DEVICE_ID_CN8474,
pdev)))
-#endif
{
if (c4_hdw_init (pdev, found))
found++;
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index 134e7568024..eb0f4bdf627 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -142,10 +142,6 @@ getuserbychan (int channum)
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-#define DEV_TO_PRIV(dev) ( * (struct c4_priv **) ((hdlc_device*)(dev)+1))
-#else
-
char *
get_hdlc_name (hdlc_device * hdlc)
{
@@ -154,7 +150,6 @@ get_hdlc_name (hdlc_device * hdlc)
return dev->name;
}
-#endif
static status_t
@@ -167,7 +162,6 @@ mkret (int bsd)
}
/***************************************************************************/
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
#include <linux/workqueue.h>
/***
@@ -259,7 +253,6 @@ c4_wq_port_cleanup (mpi_t * pi)
pi->wq_port = 0;
}
}
-#endif
/***************************************************************************/
@@ -291,48 +284,6 @@ void_open (struct net_device * ndev)
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-#if !defined(GENERIC_HDLC_VERSION) || (GENERIC_HDLC_VERSION < 4)
-
-/** Linux 2.4.18-19 **/
-STATIC int
-chan_open (hdlc_device * hdlc)
-{
- status_t ret;
-
- if ((ret = c4_chan_up (DEV_TO_PRIV (hdlc)->ci, DEV_TO_PRIV (hdlc)->channum)))
- return -ret;
- MOD_INC_USE_COUNT;
- netif_start_queue (hdlc_to_dev (hdlc));
- return 0; /* no error = success */
-}
-
-#else
-
-/** Linux 2.4.20 and higher **/
-STATIC int
-chan_open (struct net_device * ndev)
-{
- hdlc_device *hdlc = dev_to_hdlc (ndev);
- status_t ret;
-
- hdlc->proto = IF_PROTO_HDLC;
- if ((ret = hdlc_open (hdlc)))
- {
- pr_info("hdlc_open failure, err %d.\n", ret);
- return ret;
- }
- if ((ret = c4_chan_up (DEV_TO_PRIV (hdlc)->ci, DEV_TO_PRIV (hdlc)->channum)))
- return -ret;
- MOD_INC_USE_COUNT;
- netif_start_queue (hdlc_to_dev (hdlc));
- return 0; /* no error = success */
-}
-#endif
-
-#else
-
-/** Linux 2.6 **/
STATIC int
chan_open (struct net_device * ndev)
{
@@ -351,39 +302,8 @@ chan_open (struct net_device * ndev)
netif_start_queue (ndev);
return 0; /* no error = success */
}
-#endif
-
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-#if !defined(GENERIC_HDLC_VERSION) || (GENERIC_HDLC_VERSION < 4)
-
-/** Linux 2.4.18-19 **/
-STATIC void
-chan_close (hdlc_device * hdlc)
-{
- netif_stop_queue (hdlc_to_dev (hdlc));
- musycc_chan_down ((ci_t *) 0, DEV_TO_PRIV (hdlc)->channum);
- MOD_DEC_USE_COUNT;
-}
-#else
-
-/** Linux 2.4.20 and higher **/
-STATIC int
-chan_close (struct net_device * ndev)
-{
- hdlc_device *hdlc = dev_to_hdlc (ndev);
-
- netif_stop_queue (hdlc_to_dev (hdlc));
- musycc_chan_down ((ci_t *) 0, DEV_TO_PRIV (hdlc)->channum);
- hdlc_close (hdlc);
- MOD_DEC_USE_COUNT;
- return 0;
-}
-#endif
-#else
-/** Linux 2.6 **/
STATIC int
chan_close (struct net_device * ndev)
{
@@ -396,37 +316,8 @@ chan_close (struct net_device * ndev)
module_put (THIS_MODULE);
return 0;
}
-#endif
-
-
-#if !defined(GENERIC_HDLC_VERSION) || (GENERIC_HDLC_VERSION < 4)
-
-/** Linux 2.4.18-19 **/
-STATIC int
-chan_ioctl (hdlc_device * hdlc, struct ifreq * ifr, int cmd)
-{
- if (cmd == HDLCSCLOCK)
- {
- ifr->ifr_ifru.ifru_ivalue = LINE_DEFAULT;
- return 0;
- }
- return -EINVAL;
-}
-#endif
-#if !defined(GENERIC_HDLC_VERSION) || (GENERIC_HDLC_VERSION < 4)
-STATIC int
-chan_dev_ioctl (struct net_device * hdlc, struct ifreq * ifr, int cmd)
-{
- if (cmd == HDLCSCLOCK)
- {
- ifr->ifr_ifru.ifru_ivalue = LINE_DEFAULT;
- return 0;
- }
- return -EINVAL;
-}
-#else
STATIC int
chan_dev_ioctl (struct net_device * dev, struct ifreq * ifr, int cmd)
{
@@ -435,16 +326,11 @@ chan_dev_ioctl (struct net_device * dev, struct ifreq * ifr, int cmd)
STATIC int
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-chan_attach_noop (hdlc_device * hdlc, unsigned short foo_1, unsigned short foo_2)
-#else
chan_attach_noop (struct net_device * ndev, unsigned short foo_1, unsigned short foo_2)
-#endif
{
return 0; /* our driver has nothing to do here, show's
* over, go home */
}
-#endif
STATIC struct net_device_stats *
@@ -455,16 +341,12 @@ chan_get_stats (struct net_device * ndev)
struct sbecom_chan_stats *stats;
int channum;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- channum = DEV_TO_PRIV (ndev)->channum;
-#else
{
struct c4_priv *priv;
priv = (struct c4_priv *) dev_to_hdlc (ndev)->priv;
channum = priv->channum;
}
-#endif
ch = c4_find_chan (channum);
if (ch == NULL)
@@ -511,34 +393,19 @@ get_ci_by_dev (struct net_device * ndev)
}
-#if !defined(GENERIC_HDLC_VERSION) || (GENERIC_HDLC_VERSION < 4)
-STATIC int
-c4_linux_xmit (hdlc_device * hdlc, struct sk_buff * skb)
-{
- int rval;
-
- rval = musycc_start_xmit (DEV_TO_PRIV (hdlc)->ci, DEV_TO_PRIV (hdlc)->channum, skb);
- return -rval;
-}
-#else /* new */
STATIC int
c4_linux_xmit (struct sk_buff * skb, struct net_device * ndev)
{
const struct c4_priv *priv;
int rval;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- priv = DEV_TO_PRIV (ndev);
-#else
hdlc_device *hdlc = dev_to_hdlc (ndev);
priv = hdlc->priv;
-#endif
rval = musycc_start_xmit (priv->ci, priv->channum, skb);
return -rval;
}
-#endif /* GENERIC_HDLC_VERSION */
static const struct net_device_ops chan_ops = {
.ndo_open = chan_open,
@@ -823,18 +690,10 @@ do_create_chan (struct net_device * ndev, void *data)
ret = mkret (c4_new_chan (ci, cp.port, cp.channum, dev));
if (ret)
{
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- rtnl_unlock (); /* needed due to Ioctl calling sequence */
- V7 (unregister_hdlc_device) (dev_to_hdlc (dev));
- rtnl_lock (); /* needed due to Ioctl calling sequence */
- OS_kfree (DEV_TO_PRIV (dev));
- OS_kfree (dev);
-#else
rtnl_unlock (); /* needed due to Ioctl calling sequence */
unregister_hdlc_device (dev);
rtnl_lock (); /* needed due to Ioctl calling sequence */
free_netdev (dev);
-#endif
}
return ret;
}
@@ -883,11 +742,7 @@ do_deluser (struct net_device * ndev, int lockit)
const struct c4_priv *priv;
int channum;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- priv = DEV_TO_PRIV (ndev);
-#else
priv = (struct c4_priv *) dev_to_hdlc (ndev)->priv;
-#endif
ci = priv->ci;
channum = priv->channum;
@@ -897,22 +752,12 @@ do_deluser (struct net_device * ndev, int lockit)
ch->user = 0; /* will be freed, below */
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- if (lockit)
- rtnl_unlock (); /* needed if Ioctl calling sequence */
- V7 (unregister_hdlc_device) (dev_to_hdlc (ndev));
- if (lockit)
- rtnl_lock (); /* needed if Ioctl calling sequence */
- OS_kfree (DEV_TO_PRIV (ndev));
- OS_kfree (ndev);
-#else
if (lockit)
rtnl_unlock (); /* needed if Ioctl calling sequence */
unregister_hdlc_device (ndev);
if (lockit)
rtnl_lock (); /* needed if Ioctl calling sequence */
free_netdev (ndev);
-#endif
return 0;
}
@@ -1339,14 +1184,6 @@ c4_mod_remove (void)
module_init (c4_mod_init);
module_exit (c4_mod_remove);
-#ifndef SBE_INCLUDE_SYMBOLS
-#ifndef CONFIG_SBE_WANC24_NCOMM
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-EXPORT_NO_SYMBOLS;
-#endif
-#endif
-#endif
-
MODULE_AUTHOR ("SBE Technical Services <support@sbei.com>");
MODULE_DESCRIPTION ("wanPCI-CxT1E1 Generic HDLC WAN Driver module");
#ifdef MODULE_LICENSE
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index d3f5a5b52dc..12c76a553e0 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -405,7 +405,6 @@ musycc_update_tx_thp (mch_t * ch)
}
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
/*
* This is the workq task executed by the OS when our queue_work() is
* scheduled and run. It can fire off either RX or TX ACTIVATION depending
@@ -515,7 +514,6 @@ musycc_wq_chan_restart (void *arg) /* channel private structure */
#endif
}
}
-#endif
/*
@@ -531,7 +529,6 @@ musycc_chan_restart (mch_t * ch)
ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status);
#endif
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
/* 2.6 - find next unprocessed message, then set TX thp to it */
#ifdef RLD_RESTART_DEBUG
pr_info(">> musycc_chan_restart: scheduling Chan %x workQ @ %p\n", ch->channum, &ch->ch_work);
@@ -539,51 +536,9 @@ musycc_chan_restart (mch_t * ch)
c4_wk_chan_restart (ch); /* work queue mechanism fires off: Ref:
* musycc_wq_chan_restart () */
-#else
-
-
- /* 2.4 - find next unprocessed message, then set TX thp to it */
-#ifdef RLD_RESTART_DEBUG
- pr_info(">> musycc_chan_restart: scheduling Chan %x start_tx %x\n", ch->channum, ch->ch_start_tx);
-#endif
- /* restart transmission from background loop */
- ch->up->up->wd_notify = WD_NOTIFY_1TX;
-#endif
}
-#if 0
-void
-musycc_cleanup (ci_t * ci)
-{
- mpi_t *pi;
- int i, j;
-
- /* free up driver resources */
- ci->state = C_INIT; /* mark as hardware not available */
-
- for (i = 0; i < ci->max_ports; i++)
- {
- pi = &ci->port[i];
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
- c4_wq_port_cleanup (pi);
-#endif
- for (j = 0; j < MUSYCC_NCHANS; j++)
- {
- if (pi->chan[j])
- OS_kfree (pi->chan[j]); /* free mch_t struct */
- }
- OS_kfree (pi->regram_saved);
- }
-#if 0
- /* obsolete - watchdog is now static w/in ci_t */
- OS_free_watchdog (ci->wd);
-#endif
- OS_kfree (ci->iqd_p_saved);
- OS_kfree (ci);
-}
-#endif
-
void
rld_put_led (mpi_t * pi, u_int32_t ledval)
{
@@ -2008,37 +1963,13 @@ musycc_start_xmit (ci_t * ci, int channum, void *mem_token)
atomic_add (len, &ci->tx_pending);
ch->s.tx_packets++;
ch->s.tx_bytes += len;
-#if 0
- spin_unlock_irqrestore (&ch->ch_txlock, flags); /* allow pending
- * interrupt to sneak
- * thru */
-#endif
-
/*
* If an ONR was seen, then channel requires poking to restart
* transmission.
*/
if (ch->ch_start_tx)
{
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,41)
- SD_SEM_TAKE (&ci->sem_wdbusy, "_wd_"); /* only 1 thru here, per
- * board */
- if ((ch->ch_start_tx == CH_START_TX_ONR) && (ch->p.chan_mode == CFG_CH_PROTO_TRANS))
- {
- /* ONR restart transmission from background loop */
- ci->wd_notify = WD_NOTIFY_ONR; /* enabled global watchdog
- * scan-thru */
- } else
- {
- /* start first transmission from background loop */
- ci->wd_notify = WD_NOTIFY_1TX; /* enabled global watchdog
- * scan-thru */
- }
musycc_chan_restart (ch);
- SD_SEM_GIVE (&ci->sem_wdbusy);
-#else
- musycc_chan_restart (ch);
-#endif
}
#ifdef SBE_WAN256T3_ENABLE
wan256t3_led (ci, LED_TX, LEDV_G);
@@ -2047,139 +1978,4 @@ musycc_start_xmit (ci_t * ci, int channum, void *mem_token)
}
-#if 0
-int
-musycc_set_chan (ci_t * ci, int channum, struct sbecom_chan_param * p)
-{
- mch_t *ch;
- int rok = 0;
- int n = 0;
-
- if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)) /* sanity chk param */
- return ECHRNG;
- if (!(ch = sd_find_chan (ci, channum)))
- return ENOENT;
- if (ch->channum != p->channum)
- return EINVAL;
- if (sd_line_is_ok (ch->user))
- {
- rok = 1;
- sd_line_is_down (ch->user);
- }
- if (ch->state == UP && /* bring down in current configuration */
- (ch->p.status != p->status ||
- ch->p.chan_mode != p->chan_mode ||
- ch->p.intr_mask != p->intr_mask ||
- ch->txd_free < ch->txd_num))
- {
- if ((n = musycc_chan_down (ci, channum)))
- return n;
- if (ch->p.mode_56k != p->mode_56k)
- {
- ch->p = *p; /* copy in new parameters */
- musycc_update_timeslots (&ci->port[ch->channum / MUSYCC_NCHANS]);
- } else
- ch->p = *p; /* copy in new parameters */
- if ((n = musycc_chan_up (ci, channum)))
- return n;
- sd_enable_xmit (ch->user); /* re-enable to catch flow controlled
- * channel */
- } else
- {
- if (ch->p.mode_56k != p->mode_56k)
- {
- ch->p = *p; /* copy in new parameters */
- musycc_update_timeslots (&ci->port[ch->channum / MUSYCC_NCHANS]);
- } else
- ch->p = *p; /* copy in new parameters */
- }
-
- if (rok)
- sd_line_is_up (ch->user);
- return 0;
-}
-#endif
-
-
-int
-musycc_get_chan (ci_t * ci, int channum, struct sbecom_chan_param * p)
-{
- mch_t *ch;
-
-#if 0
- if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)) /* sanity chk param */
- return ECHRNG;
-#endif
- if (!(ch = sd_find_chan (ci, channum)))
- return ENOENT;
- *p = ch->p;
- return 0;
-}
-
-
-int
-musycc_get_chan_stats (ci_t * ci, int channum, struct sbecom_chan_stats * p)
-{
- mch_t *ch;
-
- if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)) /* sanity chk param */
- return ECHRNG;
- if (!(ch = sd_find_chan (ci, channum)))
- return ENOENT;
- *p = ch->s;
- p->tx_pending = atomic_read (&ch->tx_pending);
- return 0;
-}
-
-
-
-#ifdef SBE_WAN256T3_ENABLE
-int
-musycc_chan_down (ci_t * ci, int channum)
-{
- mch_t *ch;
- mpi_t *pi;
- int i, gchan;
-
- if (!(ch = sd_find_chan (ci, channum)))
- return EINVAL;
- pi = ch->up;
- gchan = ch->gchan;
-
- /* Deactivate the channel */
- musycc_serv_req (pi, SR_CHANNEL_DEACTIVATE | SR_RX_DIRECTION | gchan);
- ch->ch_start_rx = 0;
- musycc_serv_req (pi, SR_CHANNEL_DEACTIVATE | SR_TX_DIRECTION | gchan);
- ch->ch_start_tx = 0;
-
- if (ch->state == DOWN)
- return 0;
- ch->state = DOWN;
-
- pi->regram->thp[gchan] = 0;
- pi->regram->tmp[gchan] = 0;
- pi->regram->rhp[gchan] = 0;
- pi->regram->rmp[gchan] = 0;
- FLUSH_MEM_WRITE ();
- for (i = 0; i < ch->txd_num; i++)
- {
- if (ch->mdt[i].mem_token != 0)
- OS_mem_token_free (ch->mdt[i].mem_token);
- }
-
- for (i = 0; i < ch->rxd_num; i++)
- {
- if (ch->mdr[i].mem_token != 0)
- OS_mem_token_free (ch->mdr[i].mem_token);
- }
-
- OS_kfree (ch->mdt);
- ch->mdt = 0;
- OS_kfree (ch->mdr);
- ch->mdr = 0;
-
- return 0;
-}
-#endif
-
/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/pmc93x6_eeprom.c b/drivers/staging/cxt1e1/pmc93x6_eeprom.c
index 1c8dfb80e7d..62b12fb45fc 100644
--- a/drivers/staging/cxt1e1/pmc93x6_eeprom.c
+++ b/drivers/staging/cxt1e1/pmc93x6_eeprom.c
@@ -500,11 +500,7 @@ pmc_init_seeprom (u_int32_t addr, u_int32_t serialNum)
time_t createTime;
int i;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- createTime = CURRENT_TIME;
-#else
createTime = get_seconds ();
-#endif
/* use template data */
for (i = 0; i < sizeof (FLD_TYPE2); ++i)
diff --git a/drivers/staging/cxt1e1/pmcc4.h b/drivers/staging/cxt1e1/pmcc4.h
index 26c1f0ea72e..ef6ac7fe7dd 100644
--- a/drivers/staging/cxt1e1/pmcc4.h
+++ b/drivers/staging/cxt1e1/pmcc4.h
@@ -117,12 +117,8 @@ extern "C"
#include "pmcc4_private.h"
-#if !(LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
char *get_hdlc_name (hdlc_device *);
-#endif
-
-
/*
* external interface
*/
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
index 333cf2687dd..9f730e68526 100644
--- a/drivers/staging/cxt1e1/pmcc4_drv.c
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -119,12 +119,10 @@ char OSSIid_pmcc4_drvc[] =
#define KERN_WARN KERN_WARNING
/* forward references */
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
status_t c4_wk_chan_init (mpi_t *, mch_t *);
void c4_wq_port_cleanup (mpi_t *);
status_t c4_wq_port_init (mpi_t *);
-#endif
int c4_loop_port (ci_t *, int, u_int8_t);
status_t c4_set_port (ci_t *, int);
status_t musycc_chan_down (ci_t *, int);
@@ -533,145 +531,15 @@ checkPorts (ci_t * ci)
STATIC void
c4_watchdog (ci_t * ci)
{
-#if 0
- //unsigned long flags;
-#endif
-
if (drvr_state != SBE_DRVR_AVAILABLE)
{
if (log_level >= LOG_MONITOR)
pr_info("drvr not available (%x)\n", drvr_state);
return;
}
-#if 0
- SD_SEM_TAKE (&ci->sem_wdbusy, "_wd_"); /* only 1 thru here, per
- * board */
-#endif
-
ci->wdcount++;
checkPorts (ci);
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,41)
- if (ci->wd_notify)
- { /* is there a state change to search for */
- int port, gchan;
-
- ci->wd_notify = 0; /* reset notification */
- for (gchan = 0; gchan < MUSYCC_NCHANS; gchan++)
- {
- for (port = 0; port < ci->max_port; port++)
- {
- mch_t *ch = ci->port[port].chan[gchan];
-
- if (!ch || ci->state != C_RUNNING) /* state changed while
- * acquiring semaphore */
- break;
- if (ch->state == UP)/* channel must be set up */
- {
-#if 0
-#ifdef RLD_TRANS_DEBUG
- if (1 || log_level >= LOG_MONITOR)
-#else
- if (log_level >= LOG_MONITOR)
-#endif
- pr_info("%s: watchdog reviving Port %d Channel %d [%d] sts %x/%x, start_TX %x free %x start_RX %x\n",
- ci->devname, ch->channum, port, gchan, ch->channum,
- ch->p.status, ch->status,
- ch->ch_start_tx, ch->txd_free, ch->ch_start_rx);
-#endif
-
- /**********************************/
- /** check for RX restart request **/
- /**********************************/
-
- if (ch->ch_start_rx &&
- (ch->status & RX_ENABLED)) /* requires start on
- * enabled RX */
- {
- ch->ch_start_rx = 0; /* we are restarting RX... */
-#ifdef RLD_TRANS_DEBUG
- pr_info("++ c4_watchdog() CHAN RX ACTIVATE: chan %d\n",
- ch->channum);
-#endif
-#ifdef RLD_RXACT_DEBUG
- {
- struct mdesc *md;
- static int hereb4 = 7;
-
- if (hereb4)
- {
- hereb4--;
- md = &ch->mdr[ch->rxix_irq_srv];
- pr_info("++ c4_watchdog[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n",
- ch->channum, ch->rxix_irq_srv, md, le32_to_cpu (md->status), ch->s.rx_packets);
- musycc_dump_rxbuffer_ring (ch, 1); /* RLD DEBUG */
- }
- }
-#endif
- musycc_serv_req (ch->up, SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION | gchan);
- }
- /**********************************/
- /** check for TX restart request **/
- /**********************************/
-
- if (ch->ch_start_tx &&
- (ch->status & TX_ENABLED)) /* requires start on
- * enabled TX */
- {
- struct mdesc *md;
-
- /*
- * find next unprocessed message, then set TX thp to
- * it
- */
- musycc_update_tx_thp (ch);
-
-#if 0
- spin_lock_irqsave (&ch->ch_txlock, flags);
-#endif
- md = ch->txd_irq_srv;
- if (!md)
- {
- pr_info("-- c4_watchdog[%d]: WARNING, starting NULL md\n",
- ch->channum);
- pr_info("-- chan %d txd_irq_srv %p sts %x usr_add %p sts %x, txpkt %lu\n",
- ch->channum, ch->txd_irq_srv, le32_to_cpu ((struct mdesc *) (ch->txd_irq_srv)->status),
- ch->txd_usr_add, le32_to_cpu ((struct mdesc *) (ch->txd_usr_add)->status),
- ch->s.tx_packets);
-#if 0
- spin_unlock_irqrestore (&ch->ch_txlock, flags);
-#endif
- } else if (md->data && ((le32_to_cpu (md->status)) & MUSYCC_TX_OWNED))
- {
-#ifdef RLD_TRANS_DEBUG
- pr_info("++ c4_watchdog[%d] CHAN TX ACTIVATE: start_tx %x\n",
- ch->channum, ch->ch_start_tx);
-#endif
- ch->ch_start_tx = 0; /* we are restarting
- * TX... */
-#if 0
- spin_unlock_irqrestore (&ch->ch_txlock, flags); /* allow interrupts for
- * service request */
-#endif
- musycc_serv_req (ch->up, SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION | gchan);
-#ifdef RLD_TRANS_DEBUG
- if (1 || log_level >= LOG_MONITOR)
-#else
- if (log_level >= LOG_MONITOR)
-#endif
- pr_info("++ SACK[P%d/C%d] ack'd, continuing...\n",
- ch->up->portnum, ch->channum);
- }
- }
- }
- }
- }
- }
-#else
ci->wd_notify = 0;
-#endif
-#if 0
- SD_SEM_GIVE (&ci->sem_wdbusy);/* release per-board hold */
-#endif
}
@@ -690,9 +558,7 @@ c4_cleanup (void)
for (portnum = 0; portnum < ci->max_port; portnum++)
{
pi = &ci->port[portnum];
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
c4_wq_port_cleanup (pi);
-#endif
for (j = 0; j < MUSYCC_NCHANS; j++)
{
if (pi->chan[j])
@@ -700,10 +566,6 @@ c4_cleanup (void)
}
OS_kfree (pi->regram_saved);
}
-#if 0
- /* obsolete - watchdog is now static w/in ci_t */
- OS_free_watchdog (ci->wd);
-#endif
OS_kfree (ci->iqd_p_saved);
OS_kfree (ci);
ci = next; /* cleanup next board, if any */
@@ -1145,7 +1007,6 @@ c4_set_port (ci_t * ci, int portnum)
return EBUSY; /* group needs initialization only for
* first channel of a group */
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
{
status_t ret;
@@ -1153,7 +1014,6 @@ c4_set_port (ci_t * ci, int portnum)
* workqueue_struct */
return (ret);
}
-#endif
init_comet (ci, pi->cometbase, pp->port_mode, 1 /* clockmaster == true */ , pp->portP);
clck = pci_read_32 ((u_int32_t *) &ci->cpldbase->mclk) & PMCC4_CPLD_MCLK_MASK;
@@ -1269,14 +1129,12 @@ c4_new_chan (ci_t * ci, int portnum, int channum, void *user)
spin_lock_init (&ch->ch_rxlock);
spin_lock_init (&ch->ch_txlock);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
{
status_t ret;
if ((ret = c4_wk_chan_init (pi, ch)))
return ret;
}
-#endif
/* save off interface assignments which bound a board */
if (ci->first_if == 0) /* first channel registered is assumed to
@@ -1705,31 +1563,23 @@ sbecom_get_brdinfo (ci_t * ci, struct sbe_brd_info * bip, u_int8_t *bsn)
if (ci->first_if)
{
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- np = (char *) hdlc_to_name (ci->first_if);
-#else
{
struct net_device *dev;
dev = (struct net_device *) ci->first_if;
np = (char *) dev->name;
}
-#endif
strncpy (bip->first_iname, np, CHNM_STRLEN - 1);
} else
strcpy (bip->first_iname, "<NULL>");
if (ci->last_if)
{
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- np = (char *) hdlc_to_name (ci->last_if);
-#else
{
struct net_device *dev;
dev = (struct net_device *) ci->last_if;
np = (char *) dev->name;
}
-#endif
strncpy (bip->last_iname, np, CHNM_STRLEN - 1);
} else
strcpy (bip->last_iname, "<NULL>");
@@ -1763,11 +1613,7 @@ c4_get_iidinfo (ci_t * ci, struct sbe_iid_info * iip)
if (!(dev = getuserbychan (iip->channum)))
return ENOENT;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- np = (char *) hdlc_to_name (dev_to_hdlc (dev));
-#else
np = dev->name;
-#endif
strncpy (iip->iname, np, CHNM_STRLEN - 1);
return 0;
}
@@ -1826,11 +1672,7 @@ c4_ebus_intr_th_handler (void *devp)
pci_write_32 ((u_int32_t *) &ci->reg->glcd, GCD_MAGIC | MUSYCC_GCD_INTB_DISABLE);
#endif
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,20)
- return;
-#else
return IRQ_RETVAL (handled);
-#endif
}
diff --git a/drivers/staging/cxt1e1/sbecom_inline_linux.h b/drivers/staging/cxt1e1/sbecom_inline_linux.h
index c65172db2ad..5a72cb5cff4 100644
--- a/drivers/staging/cxt1e1/sbecom_inline_linux.h
+++ b/drivers/staging/cxt1e1/sbecom_inline_linux.h
@@ -48,9 +48,6 @@
#else
#include <linux/types.h>
#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
-#include <linux/config.h>
-#endif
#if defined(CONFIG_SMP) && ! defined(__SMP__)
#define __SMP__
#endif
@@ -60,12 +57,8 @@
#ifdef MODULE
#ifdef MODVERSIONS
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-#include <linux/modversions.h>
-#else
#include <config/modversions.h>
#endif
-#endif
#include <linux/module.h>
#endif
#endif
@@ -260,11 +253,7 @@ OS_sem_free (void *sem)
struct watchdog
{
struct timer_list h;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- struct tq_struct tq;
-#else
struct work_struct work;
-#endif
void *softc;
void (*func) (void *softc);
int ticks;
diff --git a/drivers/staging/dream/camera/msm_vfe8x.c b/drivers/staging/dream/camera/msm_vfe8x.c
index e61fdba6283..d87d56f914d 100644
--- a/drivers/staging/dream/camera/msm_vfe8x.c
+++ b/drivers/staging/dream/camera/msm_vfe8x.c
@@ -644,17 +644,10 @@ static int vfe_config(struct msm_vfe_cfg_cmd *cmd, void *data)
if (!axid)
return -EFAULT;
- axio =
- kmalloc(sizeof(struct vfe_cmd_axi_output_config),
- GFP_ATOMIC);
- if (!axio)
- return -ENOMEM;
-
- if (copy_from_user(axio, (void __user *)(vfecmd.value),
- sizeof(struct vfe_cmd_axi_output_config))) {
- kfree(axio);
- return -EFAULT;
- }
+ axio = memdup_user((void __user *)(vfecmd.value),
+ sizeof(struct vfe_cmd_axi_output_config));
+ if (IS_ERR(axio))
+ return PTR_ERR(axio);
vfe_config_axi(OUTPUT_1, axid, axio);
vfe_axi_output_config(axio);
@@ -669,17 +662,10 @@ static int vfe_config(struct msm_vfe_cfg_cmd *cmd, void *data)
if (!axid)
return -EFAULT;
- axio =
- kmalloc(sizeof(struct vfe_cmd_axi_output_config),
- GFP_ATOMIC);
- if (!axio)
- return -ENOMEM;
-
- if (copy_from_user(axio, (void __user *)(vfecmd.value),
- sizeof(struct vfe_cmd_axi_output_config))) {
- kfree(axio);
- return -EFAULT;
- }
+ axio = memdup_user((void __user *)(vfecmd.value),
+ sizeof(struct vfe_cmd_axi_output_config));
+ if (IS_ERR(axio))
+ return PTR_ERR(axio);
vfe_config_axi(OUTPUT_2, axid, axio);
@@ -694,17 +680,10 @@ static int vfe_config(struct msm_vfe_cfg_cmd *cmd, void *data)
if (!axid)
return -EFAULT;
- axio =
- kmalloc(sizeof(struct vfe_cmd_axi_output_config),
- GFP_ATOMIC);
- if (!axio)
- return -ENOMEM;
-
- if (copy_from_user(axio, (void __user *)(vfecmd.value),
- sizeof(struct vfe_cmd_axi_output_config))) {
- kfree(axio);
- return -EFAULT;
- }
+ axio = memdup_user((void __user *)(vfecmd.value),
+ sizeof(struct vfe_cmd_axi_output_config));
+ if (IS_ERR(axio))
+ return PTR_ERR(axio);
vfe_config_axi(OUTPUT_1_AND_2,
axid, axio);
diff --git a/drivers/staging/dream/pmem.c b/drivers/staging/dream/pmem.c
index 6387365a833..7d6bbadd7fc 100644
--- a/drivers/staging/dream/pmem.c
+++ b/drivers/staging/dream/pmem.c
@@ -24,8 +24,8 @@
#include <linux/mempolicy.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#define PMEM_MAX_DEVICES 10
@@ -175,7 +175,7 @@ static int pmem_mmap(struct file *, struct vm_area_struct *);
static int pmem_open(struct inode *, struct file *);
static long pmem_ioctl(struct file *, unsigned int, unsigned long);
-struct file_operations pmem_fops = {
+const struct file_operations pmem_fops = {
.release = pmem_release,
.mmap = pmem_mmap,
.open = pmem_open,
@@ -209,7 +209,7 @@ static int has_allocation(struct file *file)
if (unlikely(!file->private_data))
return 0;
- data = (struct pmem_data *)file->private_data;
+ data = file->private_data;
if (unlikely(data->index < 0))
return 0;
return 1;
@@ -223,7 +223,7 @@ static int is_master_owner(struct file *file)
if (!is_pmem_file(file) || !has_allocation(file))
return 0;
- data = (struct pmem_data *)file->private_data;
+ data = file->private_data;
if (PMEM_FLAGS_MASTERMAP & data->flags)
return 1;
master_file = fget_light(data->master_fd, &put_needed);
@@ -268,7 +268,7 @@ static void pmem_revoke(struct file *file, struct pmem_data *data);
static int pmem_release(struct inode *inode, struct file *file)
{
- struct pmem_data *data = (struct pmem_data *)file->private_data;
+ struct pmem_data *data = file->private_data;
struct pmem_region_node *region_node;
struct list_head *elt, *elt2;
int id = get_id(file), ret = 0;
@@ -399,8 +399,8 @@ static int pmem_allocate(int id, unsigned long len)
DLOG("order %lx\n", order);
/* look through the bitmap:
- * if you find a free slot of the correct order use it
- * otherwise, use the best fit (smallest with size > order) slot
+ * if you find a free slot of the correct order use it
+ * otherwise, use the best fit (smallest with size > order) slot
*/
while (curr < end) {
if (PMEM_IS_FREE(id, curr)) {
@@ -426,8 +426,8 @@ static int pmem_allocate(int id, unsigned long len)
}
/* now partition the best fit:
- * split the slot into 2 buddies of order - 1
- * repeat until the slot is of the correct order
+ * split the slot into 2 buddies of order - 1
+ * repeat until the slot is of the correct order
*/
while (PMEM_ORDER(id, best_fit) > (unsigned char)order) {
int buddy;
@@ -591,7 +591,7 @@ static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
}
- data = (struct pmem_data *)file->private_data;
+ data = file->private_data;
down_write(&data->sem);
/* check this file isn't already mmaped, for submaps check this file
* has never been mmaped */
@@ -690,7 +690,7 @@ int get_pmem_user_addr(struct file *file, unsigned long *start,
#endif
return -1;
}
- data = (struct pmem_data *)file->private_data;
+ data = file->private_data;
down_read(&data->sem);
if (data->vma) {
*start = data->vma->vm_start;
@@ -712,7 +712,7 @@ int get_pmem_addr(struct file *file, unsigned long *start,
if (!is_pmem_file(file) || !has_allocation(file))
return -1;
- data = (struct pmem_data *)file->private_data;
+ data = file->private_data;
if (data->index == -1) {
#if PMEM_DEBUG
printk(KERN_INFO "pmem: requested pmem data from file with no "
@@ -766,7 +766,7 @@ void put_pmem_file(struct file *file)
if (!is_pmem_file(file))
return;
id = get_id(file);
- data = (struct pmem_data *)file->private_data;
+ data = file->private_data;
#if PMEM_DEBUG
down_write(&data->sem);
if (data->ref == 0) {
@@ -793,7 +793,7 @@ void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
return;
id = get_id(file);
- data = (struct pmem_data *)file->private_data;
+ data = file->private_data;
if (!pmem[id].cached)
return;
@@ -822,7 +822,7 @@ end:
static int pmem_connect(unsigned long connect, struct file *file)
{
- struct pmem_data *data = (struct pmem_data *)file->private_data;
+ struct pmem_data *data = file->private_data;
struct pmem_data *src_data;
struct file *src_file;
int ret = 0, put_needed;
@@ -842,7 +842,7 @@ static int pmem_connect(unsigned long connect, struct file *file)
ret = -EINVAL;
goto err_bad_file;
}
- src_data = (struct pmem_data *)src_file->private_data;
+ src_data = src_file->private_data;
if (has_allocation(file) && (data->index != src_data->index)) {
printk(KERN_INFO "pmem: file is already mapped but doesn't "
@@ -929,7 +929,7 @@ int pmem_remap(struct pmem_region *region, struct file *file,
struct mm_struct *mm = NULL;
struct list_head *elt, *elt2;
int id = get_id(file);
- struct pmem_data *data = (struct pmem_data *)file->private_data;
+ struct pmem_data *data = file->private_data;
/* pmem region must be aligned on a page boundry */
if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
@@ -1053,7 +1053,7 @@ static void pmem_revoke(struct file *file, struct pmem_data *data)
static void pmem_get_size(struct pmem_region *region, struct file *file)
{
- struct pmem_data *data = (struct pmem_data *)file->private_data;
+ struct pmem_data *data = file->private_data;
int id = get_id(file);
if (!has_allocation(file)) {
@@ -1082,7 +1082,7 @@ static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
region.offset = 0;
region.len = 0;
} else {
- data = (struct pmem_data *)file->private_data;
+ data = file->private_data;
region.offset = pmem_start_addr(id, data);
region.len = pmem_len(id, data);
}
@@ -1099,7 +1099,7 @@ static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(&region, (void __user *)arg,
sizeof(struct pmem_region)))
return -EFAULT;
- data = (struct pmem_data *)file->private_data;
+ data = file->private_data;
return pmem_remap(&region, file, PMEM_MAP);
}
break;
@@ -1109,7 +1109,7 @@ static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(&region, (void __user *)arg,
sizeof(struct pmem_region)))
return -EFAULT;
- data = (struct pmem_data *)file->private_data;
+ data = file->private_data;
return pmem_remap(&region, file, PMEM_UNMAP);
break;
}
@@ -1139,7 +1139,7 @@ static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
if (has_allocation(file))
return -EINVAL;
- data = (struct pmem_data *)file->private_data;
+ data = file->private_data;
data->index = pmem_allocate(id, arg);
break;
}
diff --git a/drivers/staging/dt3155/Kconfig b/drivers/staging/dt3155/Kconfig
deleted file mode 100644
index 4a3293c721b..00000000000
--- a/drivers/staging/dt3155/Kconfig
+++ /dev/null
@@ -1,4 +0,0 @@
-config DT3155
- tristate "DT3155 Digitizer support"
- depends on PCI
-
diff --git a/drivers/staging/dt3155/Makefile b/drivers/staging/dt3155/Makefile
deleted file mode 100644
index 136f21fdbbe..00000000000
--- a/drivers/staging/dt3155/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-obj-$(CONFIG_DT3155) += dt3155.o
-dt3155-objs := \
- dt3155_drv.o \
- dt3155_isr.o \
- dt3155_io.o \
- allocator.o
diff --git a/drivers/staging/dt3155/TODO b/drivers/staging/dt3155/TODO
deleted file mode 100644
index 3baa3b6294c..00000000000
--- a/drivers/staging/dt3155/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-TODO:
- - fix checkpatch.pl issues
- - remove old kernel support, it is not needed
- - convert to proper PCI device API
- - fix sparse warnings
- - audit for correct subsystem interaction
- - review review review!
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com>
-and Scott Smedley <ss@aao.gov.au>
diff --git a/drivers/staging/dt3155/allocator.README b/drivers/staging/dt3155/allocator.README
deleted file mode 100644
index 05700b6c926..00000000000
--- a/drivers/staging/dt3155/allocator.README
+++ /dev/null
@@ -1,98 +0,0 @@
-
-The allocator shown here exploits high memory. This document explains
-how a user can deal with drivers uses this allocator and how a
-programmer can link in the module.
-
-The module is being used by my pxc and pxdrv device drivers (as well as
-other ones), available from ftp.systemy.it/pub/develop and
-ftp.linux.it/pub/People/Rubini
-
- User's manual
- =============
-
-
-One of the most compelling problems with any DMA-capable device is the
-allocation of a suitable memory buffer. The "allocator" module tries
-to deal with the problem in a clean way. The module is able to use
-high memory (above the one used in normal operation) for DMA
-allocation.
-
-To prevent the kernel for using high memory, so that it remains
-available for DMA, you should pass a command line argument to the
-kernel. Command line arguments can be passed to Lilo, to Loadlin or
-to whichever loader you are using (unless it's very poor in design).
-For Lilo, either use "append=" in /etc/lilo.conf or add commandline
-arguments to the interactive prompt. For example, I have a 32MB box
-and reserve two megs for DMA:
-
-In lilo.conf:
- image = /zImage
- label = linux
- append = "mem=30M"
-
-Or, interactively:
- LILO: linux mem=30M
-
-Once the kernel is booted with the right command-line argument, any
-driver linked with the allocator module will be able to get
-DMA-capable memory without much trouble (unless the various drivers
-need more memory than available).
-
-The module implements an alloc/free mechanism, so that it can serve
-multiple drivers at the same time. Note however that the allocator
-uses all of high memory and assumes to be the only piece of software
-using such memory.
-
-
- Programmer's manual
- ===================
-
-The allocator, as released, is designed to be linked to a device
-driver. In this case, the driver must call allocator_init() before
-using the allocator and must call allocator_cleanup() before
-unloading. This is usually done from within init_module() and
-cleanup_module(). If the allocator is linked to a driver, it won't be
-possible for several drivers to allocate high DMA memory, as explained
-above.
-
-It is possible, on the other hand, to compile the module as a standalone
-module, so that several modules can rely on the allocator for they DMA
-buffers. To compile the allocator as a standalone module, do the
-following in this directory (or provide a suitable Makefile, or edit
-the source code):
-
- make allocator.o CC="gcc -Dallocator_init=init_module -Dallocator_cleanup=cleanup_module -include /usr/include/linux/module.h"
-
-The previous commandline tells to include <linux/module.h> in the
-first place, and to rename the init and cleanup function to the ones
-needed for module loading and unloading. Drivers using a standalone
-allocator won't need to call allocator_init() nor allocator_cleanup().
-
-The allocator exports the following functions (declared in allocator.h):
-
- unsigned long allocator_allocate_dma (unsigned long kilobytes,
- int priority);
-
- This function returns a physical address, over high_memory,
- which corresponds to an area of at least "kilobytes" kilobytes.
- The area will be owned by the module calling the function.
- The returned address can be passed to device boards, to instruct
- their DMA controllers, via phys_to_bus(). The address can be used
- by C code after vremap()/ioremap(). The "priority" argument should
- be GFP_KERNEL or GFP_ATOMIC, according to the context of the
- caller; it is used to call kmalloc(), as the allocator must keep
- track of any region it gives away. In case of error the function
- returns 0, and the caller is expected to issue a -ENOMEM error.
-
-
- void allocator_free_dma (unsigned long address);
-
- This function is the reverse of the previous one. If a driver
- doesn't free the DMA memory it allocated, the allocator will
- consider such memory as busy. Note, however, that
- allocator_cleanup() calls kfree() on every region it reclaimed,
- so that a driver with the allocator linked in can avoid calling
- allocator_free_dma() at unload time.
-
-
-
diff --git a/drivers/staging/dt3155/allocator.c b/drivers/staging/dt3155/allocator.c
deleted file mode 100644
index d33947b0378..00000000000
--- a/drivers/staging/dt3155/allocator.c
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- * allocator.c -- allocate after high_memory, if available
- *
- * NOTE: this is different from my previous allocator, the one that
- * assembles pages, which revealed itself both slow and unreliable.
- *
- * Copyright (C) 1998 rubini@linux.it (Alessandro Rubini)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
-
--- Changes --
-
- Date Programmer Description of changes made
- -------------------------------------------------------------------
- 02-Aug-2002 NJC allocator now steps in 1MB increments, rather
- than doubling its size each time.
- Also, allocator_init(u32 *) now returns
- (in the first arg) the size of the free
- space. This is no longer consistent with
- using the allocator as a module, and some changes
- may be necessary for that purpose. This was
- designed to work with the DT3155 driver, in
- stand alone mode only!!!
- 26-Oct-2009 SS Port to 2.6.30 kernel.
- */
-
-
-#ifndef __KERNEL__
-# define __KERNEL__
-#endif
-#ifndef MODULE
-# define MODULE
-#endif
-
-
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/mm.h> /* PAGE_ALIGN() */
-#include <linux/io.h>
-#include <linux/slab.h>
-
-#include <asm/page.h>
-
-#include "allocator.h"
-
-/*#define ALL_DEBUG*/
-#define ALL_MSG "allocator: "
-
-#undef PDEBUG /* undef it, just in case */
-#ifdef ALL_DEBUG
-# define __static
-# define DUMP_LIST() dump_list()
-# ifdef __KERNEL__
- /* This one if debugging is on, and kernel space */
-# define PDEBUG(fmt, args...) printk(KERN_DEBUG ALL_MSG fmt, ## args)
-# else
- /* This one for user space */
-# define PDEBUG(fmt, args...) fprintf(stderr, fmt, ## args)
-# endif
-#else
-# define PDEBUG(fmt, args...) /* not debugging: nothing */
-# define DUMP_LIST()
-# define __static static
-#endif
-
-#undef PDEBUGG
-#define PDEBUGG(fmt, args...)
-/*#define PDEBUGG(fmt, args...) printk( KERN_DEBUG ALL_MSG fmt, ## args)*/
-
-
-static int allocator_himem = 1; /* 0 = probe, pos. = megs, neg. = disable */
-static int allocator_step = 1; /* This is the step size in MB */
-static int allocator_probe = 1; /* This is a flag -- 1=probe, 0=don't probe */
-
-static unsigned long allocator_buffer; /* physical address */
-static unsigned long allocator_buffer_size; /* kilobytes */
-
-/*
- * The allocator keeps a list of DMA areas, so multiple devices
- * can coexist. The list is kept sorted by address
- */
-
-struct allocator_struct {
- unsigned long address;
- unsigned long size;
- struct allocator_struct *next;
-};
-
-static struct allocator_struct *allocator_list;
-
-#ifdef ALL_DEBUG
-static int dump_list(void)
-{
- struct allocator_struct *ptr;
-
- PDEBUG("Current list:\n");
- for (ptr = allocator_list; ptr; ptr = ptr->next)
- PDEBUG("0x%08lx (size %likB)\n", ptr->address, ptr->size>>10);
- return 0;
-}
-#endif
-
-/* ========================================================================
- * This function is the actual allocator.
- *
- * If space is available in high memory (as detected at load time), that
- * one is returned. The return value is a physical address (i.e., it can
- * be used straight ahead for DMA, but needs remapping for program use).
- */
-
-unsigned long allocator_allocate_dma(unsigned long kilobytes, gfp_t flags)
-{
- struct allocator_struct *ptr = allocator_list, *newptr;
- unsigned long bytes = kilobytes << 10;
-
- /* check if high memory is available */
- if (!allocator_buffer)
- return 0;
-
- /* Round it to a multiple of the pagesize */
- bytes = PAGE_ALIGN(bytes);
- PDEBUG("request for %li bytes\n", bytes);
-
- while (ptr && ptr->next) {
- if (ptr->next->address - (ptr->address + ptr->size) >= bytes)
- break; /* enough space */
- ptr = ptr->next;
- }
- if (!ptr->next) {
- DUMP_LIST();
- PDEBUG("alloc failed\n");
- return 0; /* end of list */
- }
- newptr = kmalloc(sizeof(struct allocator_struct), flags);
- if (!newptr)
- return 0;
-
- /* ok, now stick it after ptr */
- newptr->address = ptr->address + ptr->size;
- newptr->size = bytes;
- newptr->next = ptr->next;
- ptr->next = newptr;
-
- DUMP_LIST();
- PDEBUG("returning 0x%08lx\n", newptr->address);
- return newptr->address;
-}
-
-int allocator_free_dma(unsigned long address)
-{
- struct allocator_struct *ptr = allocator_list, *prev;
-
- while (ptr && ptr->next) {
- if (ptr->next->address == address)
- break;
- ptr = ptr->next;
- }
- /* the one being freed is ptr->next */
- prev = ptr; ptr = ptr->next;
-
- if (!ptr) {
- pr_err(ALL_MSG "free_dma but add. not allocated\n");
- return -EINVAL;
- }
- PDEBUGG("freeing: %08lx (%li) next %08lx\n", ptr->address, ptr->size,
- ptr->next->address);
- prev->next = ptr->next;
- kfree(ptr);
-
- /* dump_list(); */
- return 0;
-}
-
-/* ========================================================================
- * Init and cleanup
- *
- * On cleanup everything is released. If the list is not empty, that a
- * problem of our clients
- */
-int allocator_init(u32 *allocator_max)
-{
- /* check how much free memory is there */
- void *remapped;
- unsigned long max;
- unsigned long trial_size = allocator_himem<<20;
- unsigned long last_trial = 0;
- unsigned long step = allocator_step<<20;
- unsigned long i = 0;
- struct allocator_struct *head, *tail;
- char test_string[] = "0123456789abcde"; /* 16 bytes */
-
- PDEBUGG("himem = %i\n", allocator_himem);
- if (allocator_himem < 0) /* don't even try */
- return -EINVAL;
-
- if (!trial_size)
- trial_size = 1<<20; /* not specified: try one meg */
-
- while (1) {
- remapped = ioremap(__pa(high_memory), trial_size);
- if (!remapped) {
- PDEBUGG("%li megs failed!\n", trial_size>>20);
- break;
- }
- PDEBUGG("Trying %li megs (at %p, %p)\n", trial_size>>20,
- (void *)__pa(high_memory), remapped);
- for (i = last_trial; i < trial_size; i += 16) {
- strcpy((char *)(remapped)+i, test_string);
- if (strcmp((char *)(remapped)+i, test_string))
- break;
- }
- iounmap((void *)remapped);
- schedule();
- last_trial = trial_size;
- if (i == trial_size)
- trial_size += step; /* increment, if all went well */
- else {
- PDEBUGG("%li megs copy test failed!\n", trial_size>>20);
- break;
- }
- if (!allocator_probe)
- break;
- }
- PDEBUG("%li megs (%li k, %li b)\n", i>>20, i>>10, i);
- allocator_buffer_size = i>>10; /* kilobytes */
- allocator_buffer = __pa(high_memory);
- if (!allocator_buffer_size) {
- printk(KERN_WARNING ALL_MSG "no free high memory to use\n");
- return -ENOMEM;
- }
-
- /*
- * to simplify things, always have two cells in the list:
- * the first and the last. This avoids some conditionals and
- * extra code when allocating and deallocating: we only play
- * in the middle of the list
- */
- head = kmalloc(sizeof(struct allocator_struct), GFP_KERNEL);
- if (!head)
- return -ENOMEM;
- tail = kmalloc(sizeof(struct allocator_struct), GFP_KERNEL);
- if (!tail) {
- kfree(head);
- return -ENOMEM;
- }
-
- max = allocator_buffer_size<<10;
-
- head->size = tail->size = 0;
- head->address = allocator_buffer;
- tail->address = allocator_buffer + max;
- head->next = tail;
- tail->next = NULL;
- allocator_list = head;
-
- /* Back to the user code, in KB */
- *allocator_max = allocator_buffer_size;
-
- return 0; /* ok, ready */
-}
-
-void allocator_cleanup(void)
-{
- struct allocator_struct *ptr, *next;
-
- for (ptr = allocator_list; ptr; ptr = next) {
- next = ptr->next;
- PDEBUG("freeing list: 0x%08lx\n", ptr->address);
- kfree(ptr);
- }
-
- allocator_buffer = 0;
- allocator_buffer_size = 0;
- allocator_list = NULL;
-}
-
-
diff --git a/drivers/staging/dt3155/allocator.h b/drivers/staging/dt3155/allocator.h
deleted file mode 100644
index 425b70fcd50..00000000000
--- a/drivers/staging/dt3155/allocator.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * allocator.h -- prototypes for allocating high memory
- *
- * NOTE: this is different from my previous allocator, the one that
- * assembles pages, which revealed itself both slow and unreliable.
- *
- * Copyright (C) 1998 rubini@linux.it (Alessandro Rubini)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-int allocator_free_dma(unsigned long address);
-unsigned long allocator_allocate_dma(unsigned long kilobytes, gfp_t flags);
-int allocator_init(u32 *);
-void allocator_cleanup(void);
diff --git a/drivers/staging/dt3155/dt3155.h b/drivers/staging/dt3155/dt3155.h
deleted file mode 100644
index 793e2fcf446..00000000000
--- a/drivers/staging/dt3155/dt3155.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
-
-Copyright 1996,2002,2005 Gregory D. Hager, Alfred A. Rizzi, Noah J. Cowan,
- Jason Lapenta, Scott Smedley
-
-This file is part of the DT3155 Device Driver.
-
-The DT3155 Device Driver is free software; you can redistribute it
-and/or modify it under the terms of the GNU General Public License as
-published by the Free Software Foundation; either version 2 of the
-License, or (at your option) any later version.
-
-The DT3155 Device Driver is distributed in the hope that it will be
-useful, but WITHOUT ANY WARRANTY; without even the implied warranty
-of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with the DT3155 Device Driver; if not, write to the Free
-Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-MA 02111-1307 USA
-
--- Changes --
-
- Date Programmer Description of changes made
- -------------------------------------------------------------------
- 03-Jul-2000 JML n/a
- 10-Oct-2001 SS port to 2.4 kernel.
- 24-Jul-2002 SS remove unused code & added GPL licence.
- 05-Aug-2005 SS port to 2.6 kernel; make CCIR mode default.
-
-*/
-
-#ifndef _DT3155_INC
-#define _DT3155_INC
-
-#include <linux/types.h>
-#include <linux/time.h> /* struct timeval */
-
-
-/* Uncomment this for 50Hz CCIR */
-#define CCIR 1
-
-/* Can be 1 or 2 */
-#define MAXBOARDS 1
-
-#define BOARD_MAX_BUFFS 3
-#define MAXBUFFERS (BOARD_MAX_BUFFS*MAXBOARDS)
-
-#define PCI_PAGE_SIZE (1 << 12)
-
-#ifdef CCIR
-#define DT3155_MAX_ROWS 576
-#define DT3155_MAX_COLS 768
-#define FORMAT50HZ 1
-#else
-#define DT3155_MAX_ROWS 480
-#define DT3155_MAX_COLS 640
-#define FORMAT50HZ 0
-#endif
-
-/* Configuration structure */
-struct dt3155_config {
- u32 acq_mode;
- u32 cols, rows;
- u32 continuous;
-};
-
-
-/* hold data for each frame */
-struct frame_info {
- u32 addr; /* address of the buffer with the frame */
- u32 tag; /* unique number for the frame */
- struct timeval time; /* time that capture took place */
-};
-
-/*
- * Structure for interrupt and buffer handling.
- * This is the setup for 1 card
- */
-struct dt3155_fbuffer {
- int nbuffers;
-
- struct frame_info frame_info[BOARD_MAX_BUFFS];
-
- int empty_buffers[BOARD_MAX_BUFFS]; /* indexes empty frames */
- int empty_len; /* Number of empty buffers */
- /* Zero means empty */
-
- int active_buf; /* Where data is currently dma'ing */
- int locked_buf; /* Buffers used by user */
-
- int ready_que[BOARD_MAX_BUFFS];
- u32 ready_head; /* The most recent buffer located here */
- u32 ready_len; /* The number of ready buffers */
-
- int even_happened;
- int even_stopped;
-
- int stop_acquire; /* Flag to stop interrupts */
- u32 frame_count; /* Counter for frames acquired by this card */
-};
-
-
-
-#define DT3155_MODE_FRAME 1
-#define DT3155_MODE_FIELD 2
-
-#define DT3155_SNAP 1
-#define DT3155_ACQ 2
-
-/* There is one status structure for each card. */
-struct dt3155_status {
- int fixed_mode; /* if 1, we are in fixed frame mode */
- u32 reg_addr; /* Register address for a single card */
- u32 mem_addr; /* Buffer start addr for this card */
- u32 mem_size; /* This is the amount of mem available */
- u32 irq; /* this card's irq */
- struct dt3155_config config; /* configuration struct */
- struct dt3155_fbuffer fbuffer; /* frame buffer state struct */
- u32 state; /* this card's state */
- u32 device_installed; /* Flag if installed. 1=installed */
-};
-
-/* Reference to global status structure */
-extern struct dt3155_status dt3155_status[MAXBOARDS];
-
-#define DT3155_STATE_IDLE 0x00
-#define DT3155_STATE_FRAME 0x01
-#define DT3155_STATE_FLD 0x02
-#define DT3155_STATE_STOP 0x100
-#define DT3155_STATE_ERROR 0x200
-#define DT3155_STATE_MODE 0x0ff
-
-#define DT3155_IOC_MAGIC '!'
-
-#define DT3155_SET_CONFIG _IOW(DT3155_IOC_MAGIC, 1, struct dt3155_config)
-#define DT3155_GET_CONFIG _IOR(DT3155_IOC_MAGIC, 2, struct dt3155_status)
-#define DT3155_STOP _IO(DT3155_IOC_MAGIC, 3)
-#define DT3155_START _IO(DT3155_IOC_MAGIC, 4)
-#define DT3155_FLUSH _IO(DT3155_IOC_MAGIC, 5)
-#define DT3155_IOC_MAXNR 5
-
-/* Error codes */
-
-#define DT_ERR_NO_BUFFERS 0x10000 /* not used but it might be one day */
-#define DT_ERR_CORRUPT 0x20000
-#define DT_ERR_OVERRUN 0x30000
-#define DT_ERR_I2C_TIMEOUT 0x40000
-#define DT_ERR_MASK 0xff0000/* not used but it might be one day */
-
-/* User code will probably want to declare one of these for each card */
-struct dt3155_read {
- u32 offset;
- u32 frame_seq;
- u32 state;
-
- struct frame_info frame_info;
-};
-
-#endif /* _DT3155_inc */
diff --git a/drivers/staging/dt3155/dt3155.sysvinit b/drivers/staging/dt3155/dt3155.sysvinit
deleted file mode 100644
index 92ec0939cb7..00000000000
--- a/drivers/staging/dt3155/dt3155.sysvinit
+++ /dev/null
@@ -1,60 +0,0 @@
-#! /bin/sh
-#
-# Module load/unload script for use with SysV-style /etc/init.d/ systems.
-# On a Debian system, copy this to /etc/init.d/dt3155 and then run
-# /usr/sbin/update-rc.d dt3155 defaults 55
-# to create the appropriate /etc/rc?.d/[SK]55dt3155 start/stop links.
-# (The "55" is arbitrary but is what I use to load this rather late.)
-#
-# Andy Dougherty Feb 22 2000 doughera@lafayette.edu
-# Dept. of Physics
-# Lafayette College, Easton PA 18042
-#
-
-PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
-
-# Edit to point to your local copy.
-FILE=/usr/local/lib/modules/dt3155/dt3155.o
-NAME="dt3155"
-DESC="dt3155 Frame Grabber module"
-DEV="dt3155"
-
-if test ! -f $FILE; then
- echo "Unable to locate $FILE"
- exit 0
-fi
-
-set -e
-
-case "$1" in
- start)
- echo -n "Loading $DESC "
- if /sbin/insmod -v -f $FILE; then
- major=`grep $DEV /proc/devices | awk "{print \\$1}"`
- rm -f /dev/dt3155?
- mknod /dev/dt3155a c $major 0
- mknod /dev/dt3155b c $major 1
- chmod go+rw /dev/dt3155?
- echo
- else
- echo "$FILE not loaded."
- fi
- ;;
- stop)
- echo -n "Unloading $DESC: "
- if /sbin/rmmod $NAME ; then
- echo
- else
- echo "$DEV not removed"
- exit 0
- fi
- rm -f /dev/dt3155?
- ;;
- *)
- echo "Usage: /etc/init.d/$NAME {start|stop}"
- exit 1
- ;;
-esac
-
-exit 0
-
diff --git a/drivers/staging/dt3155/dt3155_drv.c b/drivers/staging/dt3155/dt3155_drv.c
deleted file mode 100644
index 40ef97f3feb..00000000000
--- a/drivers/staging/dt3155/dt3155_drv.c
+++ /dev/null
@@ -1,1099 +0,0 @@
-/*
-
-Copyright 1996,2002,2005 Gregory D. Hager, Alfred A. Rizzi, Noah J. Cowan,
- Jason Lapenta, Scott Smedley, Greg Sharp
-
-This file is part of the DT3155 Device Driver.
-
-The DT3155 Device Driver is free software; you can redistribute it
-and/or modify it under the terms of the GNU General Public License as
-published by the Free Software Foundation; either version 2 of the
-License, or (at your option) any later version.
-
-The DT3155 Device Driver is distributed in the hope that it will be
-useful, but WITHOUT ANY WARRANTY; without even the implied warranty
-of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with the DT3155 Device Driver; if not, write to the Free
-Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-MA 02111-1307 USA
-
--- Changes --
-
- Date Programmer Description of changes made
- -------------------------------------------------------------------
- 03-Jul-2000 JML n/a
- 10-Oct-2001 SS port to 2.4 kernel
- 02-Apr-2002 SS Mods to use allocator as a standalone module;
- Merged John Roll's changes (john@cfa.harvard.edu)
- to make work with multiple boards.
- 02-Jul-2002 SS Merged James Rose's chages (rosejr@purdue.edu) to:
- * fix successive interrupt-driven captures
- * add select/poll support.
- 10-Jul-2002 GCS Add error check when ndevices > MAXBOARDS.
- 02-Aug-2002 GCS Fix field mode so that odd (lower) field is stored
- in lower half of buffer.
- 05-Aug-2005 SS port to 2.6 kernel.
- 26-Oct-2009 SS port to 2.6.30 kernel.
-
--- Notes --
-
-** appended "mem=124" in lilo.conf to allow for 4megs free on my 128meg system.
- * using allocator.c and allocator.h from o'reilly book (alessandro rubini)
- ftp://ftp.systemy.it/pub/develop (see README.allocator)
-
- + might want to get rid of MAXboards for allocating initial buffer.
- confusing and not necessary
-
- + in cleanup_module the MOD_IN_USE looks like it is check after it should
-
- * GFP_DMA should not be set with a PCI system (pg 291)
-
- - NJC why are only two buffers allowed? (see isr, approx line 358)
-
-*/
-
-extern void printques(int);
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/smp_lock.h>
-
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-#include "dt3155.h"
-#include "dt3155_drv.h"
-#include "dt3155_isr.h"
-#include "dt3155_io.h"
-#include "allocator.h"
-
-
-MODULE_LICENSE("GPL");
-
-/* Error variable. Zero means no error. */
-int dt3155_errno = 0;
-
-#ifndef PCI_DEVICE_ID_INTEL_7116
-#define PCI_DEVICE_ID_INTEL_7116 0x1223
-#endif
-
-#define DT3155_VENDORID PCI_VENDOR_ID_INTEL
-#define DT3155_DEVICEID PCI_DEVICE_ID_INTEL_7116
-#define MAXPCI 16
-
-#ifdef DT_DEBUG
-#define DT_3155_DEBUG_MSG(x,y) printk(x,y)
-#else
-#define DT_3155_DEBUG_MSG(x,y)
-#endif
-
-/* wait queue for interrupts */
-wait_queue_head_t dt3155_read_wait_queue[MAXBOARDS];
-
-#define DT_3155_SUCCESS 0
-#define DT_3155_FAILURE -EIO
-
-/* set to dynamicaly allocate, but it is tunable: */
-/* insmod DT_3155 dt3155 dt3155_major=XX */
-int dt3155_major = 0;
-
-/* The minor numbers are 0 and 1 ... they are not tunable.
- * They are used as the indices for the structure vectors,
- * and register address vectors
- */
-
-/* Global structures and variables */
-
-/* Status of each device */
-struct dt3155_status dt3155_status[MAXBOARDS];
-
-/* kernel logical address of the board */
-u8 *dt3155_lbase[MAXBOARDS] = { NULL
-#if MAXBOARDS == 2
- , NULL
-#endif
-};
-/* DT3155 registers */
-u8 *dt3155_bbase = NULL; /* kernel logical address of the *
- * buffer region */
-u32 dt3155_dev_open[MAXBOARDS] = {0
-#if MAXBOARDS == 2
- , 0
-#endif
-};
-
-u32 ndevices = 0;
-u32 unique_tag = 0;;
-
-
-/*
- * Stops interrupt generation right away and resets the status
- * to idle. I don't know why this works and the other way doesn't.
- * (James Rose)
- */
-static void quick_stop (int minor)
-{
- // TODO: scott was here
-#if 1
- ReadMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
- /* disable interrupts */
- int_csr_r.fld.FLD_END_EVE_EN = 0;
- int_csr_r.fld.FLD_END_ODD_EN = 0;
- WriteMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
-
- dt3155_status[minor].state &= ~(DT3155_STATE_STOP|0xff);
- /* mark the system stopped: */
- dt3155_status[minor].state |= DT3155_STATE_IDLE;
- dt3155_fbuffer[minor]->stop_acquire = 0;
- dt3155_fbuffer[minor]->even_stopped = 0;
-#else
- dt3155_status[minor].state |= DT3155_STATE_STOP;
- dt3155_status[minor].fbuffer.stop_acquire = 1;
-#endif
-
-}
-
-
-/*****************************************************
- * dt3155_isr() Interrupt service routien
- *
- * - looks like this isr supports IRQ sharing (or could) JML
- * - Assumes irq's are disabled, via SA_INTERRUPT flag
- * being set in request_irq() call from init_module()
- *****************************************************/
-static void dt3155_isr(int irq, void *dev_id, struct pt_regs *regs)
-{
- int minor = -1;
- int index;
- unsigned long flags;
- u32 buffer_addr;
-
- /* find out who issued the interrupt */
- for (index = 0; index < ndevices; index++) {
- if(dev_id == (void*) &dt3155_status[index])
- {
- minor = index;
- break;
- }
- }
-
- /* hopefully we should not get here */
- if (minor < 0 || minor >= MAXBOARDS) {
- printk(KERN_ERR "dt3155_isr called with invalid dev_id\n");
- return;
- }
-
- /* Check for corruption and set a flag if so */
- ReadMReg((dt3155_lbase[minor] + CSR1), csr1_r.reg);
-
- if ((csr1_r.fld.FLD_CRPT_EVE) || (csr1_r.fld.FLD_CRPT_ODD))
- {
- /* TODO: this should probably stop acquisition */
- /* and set some flags so that dt3155_read */
- /* returns an error next time it is called */
- dt3155_errno = DT_ERR_CORRUPT;
- printk("dt3155: corrupt field\n");
- return;
- }
-
- ReadMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
-
- /* Handle the even field ... */
- if (int_csr_r.fld.FLD_END_EVE)
- {
- if ((dt3155_status[minor].state & DT3155_STATE_MODE) ==
- DT3155_STATE_FLD)
- {
- dt3155_fbuffer[minor]->frame_count++;
- }
-
- ReadI2C(dt3155_lbase[minor], EVEN_CSR, &i2c_even_csr.reg);
-
- /* Clear the interrupt? */
- int_csr_r.fld.FLD_END_EVE = 1;
-
- /* disable the interrupt if last field */
- if (dt3155_fbuffer[minor]->stop_acquire)
- {
- printk("dt3155: even stopped.\n");
- dt3155_fbuffer[minor]->even_stopped = 1;
- if (i2c_even_csr.fld.SNGL_EVE)
- {
- int_csr_r.fld.FLD_END_EVE_EN = 0;
- }
- else
- {
- i2c_even_csr.fld.SNGL_EVE = 1;
- }
- }
-
- WriteMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
-
- /* Set up next DMA if we are doing FIELDS */
- if ((dt3155_status[minor].state & DT3155_STATE_MODE) ==
- DT3155_STATE_FLD)
- {
- /* GCS (Aug 2, 2002) -- In field mode, dma the odd field
- into the lower half of the buffer */
- const u32 stride = dt3155_status[minor].config.cols;
- buffer_addr = dt3155_fbuffer[minor]->
- frame_info[dt3155_fbuffer[minor]->active_buf].addr
- + (DT3155_MAX_ROWS / 2) * stride;
- local_save_flags(flags);
- local_irq_disable();
- wake_up_interruptible(&dt3155_read_wait_queue[minor]);
-
- /* Set up the DMA address for the next field */
- local_irq_restore(flags);
- WriteMReg((dt3155_lbase[minor] + ODD_DMA_START), buffer_addr);
- }
-
- /* Check for errors. */
- i2c_even_csr.fld.DONE_EVE = 1;
- if (i2c_even_csr.fld.ERROR_EVE)
- dt3155_errno = DT_ERR_OVERRUN;
-
- WriteI2C(dt3155_lbase[minor], EVEN_CSR, i2c_even_csr.reg);
-
- /* Note that we actually saw an even field meaning */
- /* that subsequent odd field complete the frame */
- dt3155_fbuffer[minor]->even_happened = 1;
-
- /* recording the time that the even field finished, this should be */
- /* about time in the middle of the frame */
- do_gettimeofday(&(dt3155_fbuffer[minor]->
- frame_info[dt3155_fbuffer[minor]->
- active_buf].time));
- return;
- }
-
- /* ... now handle the odd field */
- if (int_csr_r.fld.FLD_END_ODD)
- {
- ReadI2C(dt3155_lbase[minor], ODD_CSR, &i2c_odd_csr.reg);
-
- /* Clear the interrupt? */
- int_csr_r.fld.FLD_END_ODD = 1;
-
- if (dt3155_fbuffer[minor]->even_happened ||
- (dt3155_status[minor].state & DT3155_STATE_MODE) ==
- DT3155_STATE_FLD)
- {
- dt3155_fbuffer[minor]->frame_count++;
- }
-
- if (dt3155_fbuffer[minor]->stop_acquire &&
- dt3155_fbuffer[minor]->even_stopped)
- {
- printk(KERN_DEBUG "dt3155: stopping odd..\n");
- if (i2c_odd_csr.fld.SNGL_ODD)
- {
- /* disable interrupts */
- int_csr_r.fld.FLD_END_ODD_EN = 0;
- dt3155_status[minor].state &= ~(DT3155_STATE_STOP|0xff);
-
- /* mark the system stopped: */
- dt3155_status[minor].state |= DT3155_STATE_IDLE;
- dt3155_fbuffer[minor]->stop_acquire = 0;
- dt3155_fbuffer[minor]->even_stopped = 0;
-
- printk(KERN_DEBUG "dt3155: state is now %x\n",
- dt3155_status[minor].state);
- }
- else
- {
- i2c_odd_csr.fld.SNGL_ODD = 1;
- }
- }
-
- WriteMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
-
- /* if the odd field has been acquired, then */
- /* change the next dma location for both fields */
- /* and wake up the process if sleeping */
- if (dt3155_fbuffer[minor]->even_happened ||
- (dt3155_status[minor].state & DT3155_STATE_MODE) ==
- DT3155_STATE_FLD)
- {
-
- local_save_flags(flags);
- local_irq_disable();
-
-#ifdef DEBUG_QUES_B
- printques(minor);
-#endif
- if (dt3155_fbuffer[minor]->nbuffers > 2)
- {
- if (!are_empty_buffers(minor))
- {
- /* The number of active + locked buffers is
- * at most 2, and since there are none empty, there
- * must be at least nbuffers-2 ready buffers.
- * This is where we 'drop frames', oldest first. */
- push_empty(pop_ready(minor), minor);
- }
-
- /* The ready_que can't be full, since we know
- * there is one active buffer right now, so it's safe
- * to push the active buf on the ready_que. */
- push_ready(minor, dt3155_fbuffer[minor]->active_buf);
- /* There's at least 1 empty -- make it active */
- dt3155_fbuffer[minor]->active_buf = pop_empty(minor);
- dt3155_fbuffer[minor]->
- frame_info[dt3155_fbuffer[minor]->
- active_buf].tag = ++unique_tag;
- }
- else /* nbuffers == 2, special case */
- { /* There is 1 active buffer.
- * If there is a locked buffer, keep the active buffer
- * the same -- that means we drop a frame.
- */
- if (dt3155_fbuffer[minor]->locked_buf < 0)
- {
- push_ready(minor,
- dt3155_fbuffer[minor]->active_buf);
- if (are_empty_buffers(minor))
- {
- dt3155_fbuffer[minor]->active_buf =
- pop_empty(minor);
- }
- else
- { /* no empty or locked buffers, so use a readybuf */
- dt3155_fbuffer[minor]->active_buf =
- pop_ready(minor);
- }
- }
- }
-
-#ifdef DEBUG_QUES_B
- printques(minor);
-#endif
-
- dt3155_fbuffer[minor]->even_happened = 0;
-
- wake_up_interruptible(&dt3155_read_wait_queue[minor]);
-
- local_irq_restore(flags);
- }
-
-
- /* Set up the DMA address for the next frame/field */
- buffer_addr = dt3155_fbuffer[minor]->
- frame_info[dt3155_fbuffer[minor]->active_buf].addr;
- if ((dt3155_status[minor].state & DT3155_STATE_MODE) ==
- DT3155_STATE_FLD)
- {
- WriteMReg((dt3155_lbase[minor] + EVEN_DMA_START), buffer_addr);
- }
- else
- {
- WriteMReg((dt3155_lbase[minor] + EVEN_DMA_START), buffer_addr);
-
- WriteMReg((dt3155_lbase[minor] + ODD_DMA_START), buffer_addr
- + dt3155_status[minor].config.cols);
- }
-
- /* Do error checking */
- i2c_odd_csr.fld.DONE_ODD = 1;
- if (i2c_odd_csr.fld.ERROR_ODD)
- dt3155_errno = DT_ERR_OVERRUN;
-
- WriteI2C(dt3155_lbase[minor], ODD_CSR, i2c_odd_csr.reg);
-
- return;
- }
- /* If we get here, the Odd Field wasn't it either... */
- printk("neither even nor odd. shared perhaps?\n");
-}
-
-/*****************************************************
- * init_isr(int minor)
- * turns on interupt generation for the card
- * designated by "minor".
- * It is called *only* from inside ioctl().
- *****************************************************/
-static void dt3155_init_isr(int minor)
-{
- const u32 stride = dt3155_status[minor].config.cols;
-
- switch (dt3155_status[minor].state & DT3155_STATE_MODE)
- {
- case DT3155_STATE_FLD:
- {
- even_dma_start_r = dt3155_status[minor].
- fbuffer.frame_info[dt3155_status[minor].fbuffer.active_buf].addr;
- even_dma_stride_r = 0;
- odd_dma_stride_r = 0;
-
- WriteMReg((dt3155_lbase[minor] + EVEN_DMA_START),
- even_dma_start_r);
- WriteMReg((dt3155_lbase[minor] + EVEN_DMA_STRIDE),
- even_dma_stride_r);
- WriteMReg((dt3155_lbase[minor] + ODD_DMA_STRIDE),
- odd_dma_stride_r);
- break;
- }
-
- case DT3155_STATE_FRAME:
- default:
- {
- even_dma_start_r = dt3155_status[minor].
- fbuffer.frame_info[dt3155_status[minor].fbuffer.active_buf].addr;
- odd_dma_start_r = even_dma_start_r + stride;
- even_dma_stride_r = stride;
- odd_dma_stride_r = stride;
-
- WriteMReg((dt3155_lbase[minor] + EVEN_DMA_START),
- even_dma_start_r);
- WriteMReg((dt3155_lbase[minor] + ODD_DMA_START),
- odd_dma_start_r);
- WriteMReg((dt3155_lbase[minor] + EVEN_DMA_STRIDE),
- even_dma_stride_r);
- WriteMReg((dt3155_lbase[minor] + ODD_DMA_STRIDE),
- odd_dma_stride_r);
- break;
- }
- }
-
- /* 50/60 Hz should be set before this point but let's make sure it is */
- /* right anyway */
-
- ReadI2C(dt3155_lbase[minor], CSR2, &i2c_csr2.reg);
- i2c_csr2.fld.HZ50 = FORMAT50HZ;
- WriteI2C(dt3155_lbase[minor], CSR2, i2c_csr2.reg);
-
- /* enable busmaster chip, clear flags */
-
- /*
- * TODO:
- * shouldn't we be concered with continuous values of
- * DT3155_SNAP & DT3155_ACQ here? (SS)
- */
-
- csr1_r.reg = 0;
- csr1_r.fld.CAP_CONT_EVE = 1; /* use continuous capture bits to */
- csr1_r.fld.CAP_CONT_ODD = 1; /* enable */
- csr1_r.fld.FLD_DN_EVE = 1; /* writing a 1 clears flags */
- csr1_r.fld.FLD_DN_ODD = 1;
- csr1_r.fld.SRST = 1; /* reset - must be 1 */
- csr1_r.fld.FIFO_EN = 1; /* fifo control - must be 1 */
- csr1_r.fld.FLD_CRPT_EVE = 1; /* writing a 1 clears flags */
- csr1_r.fld.FLD_CRPT_ODD = 1;
-
- WriteMReg((dt3155_lbase[minor] + CSR1),csr1_r.reg);
-
- /* Enable interrupts at the end of each field */
-
- int_csr_r.reg = 0;
- int_csr_r.fld.FLD_END_EVE_EN = 1;
- int_csr_r.fld.FLD_END_ODD_EN = 1;
- int_csr_r.fld.FLD_START_EN = 0;
-
- WriteMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
-
- /* start internal BUSY bits */
-
- ReadI2C(dt3155_lbase[minor], CSR2, &i2c_csr2.reg);
- i2c_csr2.fld.BUSY_ODD = 1;
- i2c_csr2.fld.BUSY_EVE = 1;
- WriteI2C(dt3155_lbase[minor], CSR2, i2c_csr2.reg);
-
- /* Now its up to the interrupt routine!! */
-
- return;
-}
-
-
-/*****************************************************
- * ioctl()
- *
- *****************************************************/
-static int dt3155_ioctl(struct inode *inode,
- struct file *file,
- unsigned int cmd,
- unsigned long arg)
-{
- int minor = MINOR(inode->i_rdev); /* What device are we ioctl()'ing? */
-
- if (minor >= MAXBOARDS || minor < 0)
- return -ENODEV;
-
- /* make sure it is valid command */
- if (_IOC_NR(cmd) > DT3155_IOC_MAXNR)
- {
- printk("DT3155: invalid IOCTL(0x%x)\n",cmd);
- printk("DT3155: Valid commands (0x%x), (0x%x), (0x%x), (0x%x), (0x%x)\n",
- (unsigned int)DT3155_GET_CONFIG,
- (unsigned int)DT3155_SET_CONFIG,
- (unsigned int)DT3155_START,
- (unsigned int)DT3155_STOP,
- (unsigned int)DT3155_FLUSH);
- return -EINVAL;
- }
-
- switch (cmd)
- {
- case DT3155_SET_CONFIG:
- {
- if (dt3155_status[minor].state != DT3155_STATE_IDLE)
- return -EBUSY;
-
- {
- struct dt3155_config tmp;
- if (copy_from_user((void *)&tmp, (void *) arg, sizeof(tmp)))
- return -EFAULT;
- /* check for valid settings */
- if (tmp.rows > DT3155_MAX_ROWS ||
- tmp.cols > DT3155_MAX_COLS ||
- (tmp.acq_mode != DT3155_MODE_FRAME &&
- tmp.acq_mode != DT3155_MODE_FIELD) ||
- (tmp.continuous != DT3155_SNAP &&
- tmp.continuous != DT3155_ACQ))
- {
- return -EINVAL;
- }
- dt3155_status[minor].config = tmp;
- }
- return 0;
- }
- case DT3155_GET_CONFIG:
- {
- if (copy_to_user((void *) arg, (void *) &dt3155_status[minor],
- sizeof(struct dt3155_status)))
- return -EFAULT;
- return 0;
- }
- case DT3155_FLUSH: /* Flushes the buffers -- ensures fresh data */
- {
- if (dt3155_status[minor].state != DT3155_STATE_IDLE)
- return -EBUSY;
- return dt3155_flush(minor);
- }
- case DT3155_STOP:
- {
- if (dt3155_status[minor].state & DT3155_STATE_STOP ||
- dt3155_status[minor].fbuffer.stop_acquire)
- return -EBUSY;
-
- if (dt3155_status[minor].state == DT3155_STATE_IDLE)
- return 0;
-
- quick_stop(minor);
- if (copy_to_user((void *) arg, (void *) &dt3155_status[minor],
- sizeof(struct dt3155_status)))
- return -EFAULT;
- return 0;
- }
- case DT3155_START:
- {
- if (dt3155_status[minor].state != DT3155_STATE_IDLE)
- return -EBUSY;
-
- dt3155_status[minor].fbuffer.stop_acquire = 0;
- dt3155_status[minor].fbuffer.frame_count = 0;
-
- /* Set the MODE in the status -- we default to FRAME */
- if (dt3155_status[minor].config.acq_mode == DT3155_MODE_FIELD)
- {
- dt3155_status[minor].state = DT3155_STATE_FLD;
- }
- else
- {
- dt3155_status[minor].state = DT3155_STATE_FRAME;
- }
-
- dt3155_init_isr(minor);
- if (copy_to_user((void *) arg, (void *) &dt3155_status[minor],
- sizeof(struct dt3155_status)))
- return -EFAULT;
- return 0;
- }
- default:
- {
- printk("DT3155: invalid IOCTL(0x%x)\n",cmd);
- printk("DT3155: Valid commands (0x%x), (0x%x), (0x%x), (0x%x), (0x%x)\n",
- (unsigned int)DT3155_GET_CONFIG,
- (unsigned int)DT3155_SET_CONFIG,
- DT3155_START, DT3155_STOP, DT3155_FLUSH);
- return -ENOSYS;
- }
- }
- return -ENOSYS;
-}
-
-/*****************************************************
- * mmap()
- *
- * only allow the user to mmap the registers and buffer
- * It is quite possible that this is broken, since the
- * addition of of the capacity for two cards!!!!!!!!
- * It *looks* like it should work but since I'm not
- * sure how to use it, I'm not actually sure. (NJC? ditto by SS)
- *****************************************************/
-static int dt3155_mmap (struct file * file, struct vm_area_struct * vma)
-{
- /* which device are we mmapping? */
- int minor = MINOR(file->f_dentry->d_inode->i_rdev);
- unsigned long offset;
- offset = vma->vm_pgoff << PAGE_SHIFT;
-
- if (offset >= __pa(high_memory) || (file->f_flags & O_SYNC))
- vma->vm_flags |= VM_IO;
-
- /* Don't try to swap out physical pages.. */
- vma->vm_flags |= VM_RESERVED;
-
- /* they are mapping the registers or the buffer */
- if ((offset == dt3155_status[minor].reg_addr &&
- vma->vm_end - vma->vm_start == PCI_PAGE_SIZE) ||
- (offset == dt3155_status[minor].mem_addr &&
- vma->vm_end - vma->vm_start == dt3155_status[minor].mem_size))
- {
- if (remap_pfn_range(vma,
- vma->vm_start,
- offset >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot)) {
- printk("DT3155: remap_page_range() failed.\n");
- return -EAGAIN;
- }
- }
- else
- {
- printk("DT3155: dt3155_mmap() bad call.\n");
- return -ENXIO;
- }
-
- return 0;
-}
-
-
-/*****************************************************
- * open()
- *
- * Our special open code.
- * MOD_INC_USE_COUNT make sure that the driver memory is not freed
- * while the device is in use.
- *****************************************************/
-static int dt3155_open(struct inode* inode, struct file* filep)
-{
- int minor = MINOR(inode->i_rdev); /* what device are we opening? */
- if (dt3155_dev_open[minor]) {
- printk ("DT3155: Already opened by another process.\n");
- return -EBUSY;
- }
-
- if (dt3155_status[minor].device_installed==0)
- {
- printk("DT3155 Open Error: No such device dt3155 minor number %d\n",
- minor);
- return -EIO;
- }
-
- if (dt3155_status[minor].state != DT3155_STATE_IDLE) {
- printk ("DT3155: Not in idle state (state = %x)\n",
- dt3155_status[minor].state);
- return -EBUSY;
- }
-
- printk("DT3155: Device opened.\n");
-
- dt3155_dev_open[minor] = 1 ;
-
- dt3155_flush(minor);
-
- /* Disable ALL interrupts */
- int_csr_r.reg = 0;
- WriteMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
-
- init_waitqueue_head(&(dt3155_read_wait_queue[minor]));
-
- return 0;
-}
-
-
-/*****************************************************
- * close()
- *
- * Now decrement the use count.
- *
- *****************************************************/
-static int dt3155_close(struct inode *inode, struct file *filep)
-{
- int minor;
-
- minor = MINOR(inode->i_rdev); /* which device are we closing */
- if (!dt3155_dev_open[minor])
- {
- printk("DT3155: attempt to CLOSE a not OPEN device\n");
- }
- else
- {
- dt3155_dev_open[minor] = 0;
-
- if (dt3155_status[minor].state != DT3155_STATE_IDLE)
- {
- quick_stop(minor);
- }
- }
- return 0;
-}
-
-/*****************************************************
- * read()
- *
- *****************************************************/
-static ssize_t dt3155_read(struct file *filep, char __user *buf,
- size_t count, loff_t *ppos)
-{
- /* which device are we reading from? */
- int minor = MINOR(filep->f_dentry->d_inode->i_rdev);
- u32 offset;
- int frame_index;
- struct frame_info *frame_info;
-
- /* TODO: this should check the error flag and */
- /* return an error on hardware failures */
- if (count != sizeof(struct dt3155_read))
- {
- printk("DT3155 ERROR (NJC): count is not right\n");
- return -EINVAL;
- }
-
-
- /* Hack here -- I'm going to allow reading even when idle.
- * this is so that the frames can be read after STOP has
- * been called. Leaving it here, commented out, as a reminder
- * for a short while to make sure there are no problems.
- * Note that if the driver is not opened in non_blocking mode,
- * and the device is idle, then it could sit here forever! */
-
- /* if (dt3155_status[minor].state == DT3155_STATE_IDLE)*/
- /* return -EBUSY;*/
-
- /* non-blocking reads should return if no data */
- if (filep->f_flags & O_NDELAY)
- {
- if ((frame_index = dt3155_get_ready_buffer(minor)) < 0) {
- /*printk("dt3155: no buffers available (?)\n");*/
- /* printques(minor); */
- return -EAGAIN;
- }
- }
- else
- {
- /*
- * sleep till data arrives , or we get interrupted.
- * Note that wait_event_interruptible() does not actually
- * sleep/wait if it's condition evaluates to true upon entry.
- */
- wait_event_interruptible(dt3155_read_wait_queue[minor],
- (frame_index = dt3155_get_ready_buffer(minor))
- >= 0);
-
- if (frame_index < 0)
- {
- printk ("DT3155: read: interrupted\n");
- quick_stop (minor);
- printques(minor);
- return -EINTR;
- }
- }
-
- frame_info = &dt3155_status[minor].fbuffer.frame_info[frame_index];
-
- /* make this an offset */
- offset = frame_info->addr - dt3155_status[minor].mem_addr;
-
- put_user(offset, (unsigned int *) buf);
- buf += sizeof(u32);
- put_user(dt3155_status[minor].fbuffer.frame_count, (unsigned int *) buf);
- buf += sizeof(u32);
- put_user(dt3155_status[minor].state, (unsigned int *) buf);
- buf += sizeof(u32);
- if (copy_to_user(buf, frame_info, sizeof(*frame_info)))
- return -EFAULT;
-
- return sizeof(struct dt3155_read);
-}
-
-static unsigned int dt3155_poll (struct file * filp, poll_table *wait)
-{
- int minor = MINOR(filp->f_dentry->d_inode->i_rdev);
-
- if (!is_ready_buf_empty(minor))
- return POLLIN | POLLRDNORM;
-
- poll_wait (filp, &dt3155_read_wait_queue[minor], wait);
-
- return 0;
-}
-
-static long
-dt3155_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- lock_kernel();
- ret = dt3155_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
- unlock_kernel();
-
- return ret;
-}
-
-/*****************************************************
- * file operations supported by DT3155 driver
- * needed by init_module
- * register_chrdev
- *****************************************************/
-static struct file_operations dt3155_fops = {
- .read = dt3155_read,
- .unlocked_ioctl = dt3155_unlocked_ioctl,
- .mmap = dt3155_mmap,
- .poll = dt3155_poll,
- .open = dt3155_open,
- .release = dt3155_close
-};
-
-
-/*****************************************************
- * find_PCI();
- *
- * PCI has been totally reworked in 2.1..
- *****************************************************/
-static int find_PCI (void)
-{
- struct pci_dev *pci_dev = NULL;
- int error, pci_index = 0;
- unsigned short rev_device;
- unsigned long base;
- unsigned char irq;
-
- while ((pci_dev = pci_get_device
- (DT3155_VENDORID, DT3155_DEVICEID, pci_dev)) != NULL)
- {
- pci_index ++;
-
- /* Is it really there? */
- if ((error =
- pci_read_config_word(pci_dev, PCI_CLASS_DEVICE, &rev_device)))
- continue;
-
- /* Found a board */
- DT_3155_DEBUG_MSG("DT3155: Device number %d \n", pci_index);
-
- /* Make sure the driver was compiled with enough buffers to handle
- this many boards */
- if (pci_index > MAXBOARDS) {
- printk("DT3155: ERROR - found %d devices, but driver only configured "
- "for %d devices\n"
- "DT3155: Please change MAXBOARDS in dt3155.h\n",
- pci_index, MAXBOARDS);
- goto err;
- }
-
- /* Now, just go out and make sure that this/these device(s) is/are
- actually mapped into the kernel address space */
- if ((error = pci_read_config_dword(pci_dev, PCI_BASE_ADDRESS_0,
- (u32 *) &base)))
- {
- printk("DT3155: Was not able to find device \n");
- goto err;
- }
-
- DT_3155_DEBUG_MSG("DT3155: Base address 0 for device is %lx \n", base);
- dt3155_status[pci_index-1].reg_addr = base;
-
- /* Remap the base address to a logical address through which we
- * can access it. */
- dt3155_lbase[pci_index - 1] = ioremap(base,PCI_PAGE_SIZE);
- dt3155_status[pci_index - 1].reg_addr = base;
- DT_3155_DEBUG_MSG("DT3155: New logical address is %p \n",
- dt3155_lbase[pci_index-1]);
- if (!dt3155_lbase[pci_index-1])
- {
- printk("DT3155: Unable to remap control registers\n");
- goto err;
- }
-
- if ((error = pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &irq)))
- {
- printk("DT3155: Was not able to find device \n");
- goto err;
- }
-
- DT_3155_DEBUG_MSG("DT3155: IRQ is %d \n",irq);
- dt3155_status[pci_index-1].irq = irq;
- /* Set flag: kth device found! */
- dt3155_status[pci_index-1].device_installed = 1;
- printk("DT3155: Installing device %d w/irq %d and address %p\n",
- pci_index,
- dt3155_status[pci_index-1].irq,
- dt3155_lbase[pci_index-1]);
-
- }
- ndevices = pci_index;
-
- return DT_3155_SUCCESS;
-
-err:
- pci_dev_put(pci_dev);
- return DT_3155_FAILURE;
-}
-
-u32 allocatorAddr = 0;
-
-/*****************************************************
- * init_module()
- *****************************************************/
-int init_module(void)
-{
- int index;
- int rcode = 0;
- char *devname[MAXBOARDS];
-
- devname[0] = "dt3155a";
-#if MAXBOARDS == 2
- devname[1] = "dt3155b";
-#endif
-
- printk("DT3155: Loading module...\n");
-
- /* Register the device driver */
- rcode = register_chrdev(dt3155_major, "dt3155", &dt3155_fops);
- if(rcode < 0)
- {
- printk(KERN_INFO "DT3155: register_chrdev failed \n");
- return rcode;
- }
-
- if(dt3155_major == 0)
- dt3155_major = rcode; /* dynamic */
-
-
- /* init the status variables. */
- /* DMA memory is taken care of in setup_buffers() */
- for (index = 0; index < MAXBOARDS; index++)
- {
- dt3155_status[index].config.acq_mode = DT3155_MODE_FRAME;
- dt3155_status[index].config.continuous = DT3155_ACQ;
- dt3155_status[index].config.cols = DT3155_MAX_COLS;
- dt3155_status[index].config.rows = DT3155_MAX_ROWS;
- dt3155_status[index].state = DT3155_STATE_IDLE;
-
- /* find_PCI() will check if devices are installed; */
- /* first assume they're not: */
- dt3155_status[index].mem_addr = 0;
- dt3155_status[index].mem_size = 0;
- dt3155_status[index].state = DT3155_STATE_IDLE;
- dt3155_status[index].device_installed = 0;
- }
-
- /* Now let's find the hardware. find_PCI() will set ndevices to the
- * number of cards found in this machine. */
- {
- if ((rcode = find_PCI()) != DT_3155_SUCCESS)
- {
- printk("DT3155 error: find_PCI() failed to find dt3155 board(s)\n");
- unregister_chrdev(dt3155_major, "dt3155");
- return rcode;
- }
- }
-
- /* Ok, time to setup the frame buffers */
- if((rcode = dt3155_setup_buffers(&allocatorAddr)) < 0)
- {
- printk("DT3155: Error: setting up buffer not large enough.");
- unregister_chrdev(dt3155_major, "dt3155");
- return rcode;
- }
-
- /* If we are this far, then there is enough RAM */
- /* for the buffers: Print the configuration. */
- for( index = 0; index < ndevices; index++)
- {
- printk("DT3155: Device = %d; acq_mode = %d; "
- "continuous = %d; cols = %d; rows = %d;\n",
- index ,
- dt3155_status[index].config.acq_mode,
- dt3155_status[index].config.continuous,
- dt3155_status[index].config.cols,
- dt3155_status[index].config.rows);
- printk("DT3155: m_addr = 0x%x; m_size = %ld; "
- "state = %d; device_installed = %d\n",
- dt3155_status[index].mem_addr,
- (long int)dt3155_status[index].mem_size,
- dt3155_status[index].state,
- dt3155_status[index].device_installed);
- }
-
- /* Disable ALL interrupts */
- int_csr_r.reg = 0;
- for( index = 0; index < ndevices; index++)
- {
- WriteMReg((dt3155_lbase[index] + INT_CSR), int_csr_r.reg);
- if(dt3155_status[index].device_installed)
- {
- /*
- * This driver *looks* like it can handle sharing interrupts,
- * but I can't actually test myself. I've had reports that it
- * DOES work so I'll enable it for now. This comment will remain
- * as a reminder in case any problems arise. (SS)
- */
- /* in older kernels flags are: SA_SHIRQ | SA_INTERRUPT */
- rcode = request_irq(dt3155_status[index].irq, (void *)dt3155_isr,
- IRQF_SHARED | IRQF_DISABLED, devname[index],
- (void*) &dt3155_status[index]);
- if(rcode < 0)
- {
- printk("DT3155: minor %d request_irq failed for IRQ %d\n",
- index, dt3155_status[index].irq);
- unregister_chrdev(dt3155_major, "dt3155");
- return rcode;
- }
- }
- }
-
- printk("DT3155: finished loading\n");
-
- return 0;
-}
-
-/*****************************************************
- * cleanup_module(void)
- *
- *****************************************************/
-void cleanup_module(void)
-{
- int index;
-
- printk("DT3155: cleanup_module called\n");
-
- /* removed DMA allocated with the allocator */
-#ifdef STANDALONE_ALLOCATOR
- if (allocatorAddr != 0)
- allocator_free_dma(allocatorAddr);
-#else
- allocator_cleanup();
-#endif
-
- unregister_chrdev(dt3155_major, "dt3155");
-
- for(index = 0; index < ndevices; index++)
- {
- if(dt3155_status[index].device_installed == 1)
- {
- printk("DT3155: Freeing irq %d for device %d\n",
- dt3155_status[index].irq, index);
- free_irq(dt3155_status[index].irq, (void*)&dt3155_status[index]);
- }
- }
-}
-
diff --git a/drivers/staging/dt3155/dt3155_drv.h b/drivers/staging/dt3155/dt3155_drv.h
deleted file mode 100644
index 95e68c3388a..00000000000
--- a/drivers/staging/dt3155/dt3155_drv.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
-
-Copyright 1996,2002 Gregory D. Hager, Alfred A. Rizzi, Noah J. Cowan,
- Scott Smedley
-
-This file is part of the DT3155 Device Driver.
-
-The DT3155 Device Driver is free software; you can redistribute it
-and/or modify it under the terms of the GNU General Public License as
-published by the Free Software Foundation; either version 2 of the
-License, or (at your option) any later version.
-
-The DT3155 Device Driver is distributed in the hope that it will be
-useful, but WITHOUT ANY WARRANTY; without even the implied warranty
-of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with the DT3155 Device Driver; if not, write to the Free
-Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-MA 02111-1307 USA
-*/
-
-#ifndef DT3155_DRV_INC
-#define DT3155_DRV_INC
-
-/* kernel logical address of the frame grabbers */
-extern u8 *dt3155_lbase[MAXBOARDS];
-
-/* kernel logical address of ram buffer */
-extern u8 *dt3155_bbase;
-
-#ifdef __KERNEL__
-#include <linux/wait.h>
-
-/* wait queue for reads */
-extern wait_queue_head_t dt3155_read_wait_queue[MAXBOARDS];
-#endif
-
-/* number of devices */
-extern u32 ndevices;
-
-extern int dt3155_errno;
-
-#endif
diff --git a/drivers/staging/dt3155/dt3155_io.c b/drivers/staging/dt3155/dt3155_io.c
deleted file mode 100644
index 7792e712d16..00000000000
--- a/drivers/staging/dt3155/dt3155_io.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright 1996,2002,2005 Gregory D. Hager, Alfred A. Rizzi, Noah J. Cowan,
- * Jason Lapenta, Scott Smedley
- *
- * This file is part of the DT3155 Device Driver.
- *
- * The DT3155 Device Driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * The DT3155 Device Driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
- * Public License for more details.
- */
-
-/*
- * This file provides some basic register io routines. It is modified from
- * demo code provided by Data Translations.
- */
-
-#include <linux/delay.h>
-#include "dt3155.h"
-#include "dt3155_io.h"
-#include "dt3155_drv.h"
-
-
-/****** local copies of board's 32 bit registers ******/
-u32 even_dma_start_r; /* bit 0 should always be 0 */
-u32 odd_dma_start_r; /* .. */
-u32 even_dma_stride_r; /* bits 0&1 should always be 0 */
-u32 odd_dma_stride_r; /* .. */
-u32 even_pixel_fmt_r;
-u32 odd_pixel_fmt_r;
-
-FIFO_TRIGGER_R fifo_trigger_r;
-XFER_MODE_R xfer_mode_r;
-CSR1_R csr1_r;
-RETRY_WAIT_CNT_R retry_wait_cnt_r;
-INT_CSR_R int_csr_r;
-
-u32 even_fld_mask_r;
-u32 odd_fld_mask_r;
-
-MASK_LENGTH_R mask_length_r;
-FIFO_FLAG_CNT_R fifo_flag_cnt_r;
-IIC_CLK_DUR_R iic_clk_dur_r;
-IIC_CSR1_R iic_csr1_r;
-IIC_CSR2_R iic_csr2_r;
-DMA_UPPER_LMT_R even_dma_upper_lmt_r;
-DMA_UPPER_LMT_R odd_dma_upper_lmt_r;
-
-
-
-/******** local copies of board's 8 bit I2C registers ******/
-I2C_CSR2 i2c_csr2;
-I2C_EVEN_CSR i2c_even_csr;
-I2C_ODD_CSR i2c_odd_csr;
-I2C_CONFIG i2c_config;
-u8 i2c_dt_id;
-u8 i2c_x_clip_start;
-u8 i2c_y_clip_start;
-u8 i2c_x_clip_end;
-u8 i2c_y_clip_end;
-u8 i2c_ad_addr;
-u8 i2c_ad_lut;
-I2C_AD_CMD i2c_ad_cmd;
-u8 i2c_dig_out;
-u8 i2c_pm_lut_addr;
-u8 i2c_pm_lut_data;
-
-/*
- * wait_ibsyclr()
- *
- * This function handles read/write timing and r/w timeout error
- */
-static int wait_ibsyclr(u8 *lpReg)
-{
- /* wait 100 microseconds */
- udelay(100L);
- /* __delay(loops_per_sec/10000); */
-
- ReadMReg(lpReg + IIC_CSR2, iic_csr2_r.reg);
- if (iic_csr2_r.fld.NEW_CYCLE) {
- /* if NEW_CYCLE didn't clear */
- /* TIMEOUT ERROR */
- dt3155_errno = DT_ERR_I2C_TIMEOUT;
- return -ETIMEDOUT;
- }
-
- return 0; /* no error */
-}
-
-/*
- * WriteI2C()
- *
- * This function handles writing to 8-bit DT3155 registers
- *
- * 1st parameter is pointer to 32-bit register base address
- * 2nd parameter is reg. index;
- * 3rd is value to be written
- */
-int WriteI2C(u8 *lpReg, u_short wIregIndex, u8 byVal)
-{
- /* read 32 bit IIC_CSR2 register data into union */
-
- ReadMReg((lpReg + IIC_CSR2), iic_csr2_r.reg);
-
- /* for write operation */
- iic_csr2_r.fld.DIR_RD = 0;
- /* I2C address of I2C register: */
- iic_csr2_r.fld.DIR_ADDR = wIregIndex;
- /* 8 bit data to be written to I2C reg */
- iic_csr2_r.fld.DIR_WR_DATA = byVal;
- /* will start a direct I2C cycle: */
- iic_csr2_r.fld.NEW_CYCLE = 1;
-
- /* xfer union data into 32 bit IIC_CSR2 register */
- WriteMReg((lpReg + IIC_CSR2), iic_csr2_r.reg);
-
- /* wait for IIC cycle to finish */
- return wait_ibsyclr(lpReg);
-}
-
-/*
- * ReadI2C()
- *
- * This function handles reading from 8-bit DT3155 registers
- *
- * 1st parameter is pointer to 32-bit register base address
- * 2nd parameter is reg. index;
- * 3rd is adrs of value to be read
- */
-int ReadI2C(u8 *lpReg, u_short wIregIndex, u8 *byVal)
-{
- int writestat; /* status for return */
-
- /* read 32 bit IIC_CSR2 register data into union */
- ReadMReg((lpReg + IIC_CSR2), iic_csr2_r.reg);
-
- /* for read operation */
- iic_csr2_r.fld.DIR_RD = 1;
-
- /* I2C address of I2C register: */
- iic_csr2_r.fld.DIR_ADDR = wIregIndex;
-
- /* will start a direct I2C cycle: */
- iic_csr2_r.fld.NEW_CYCLE = 1;
-
- /* xfer union's data into 32 bit IIC_CSR2 register */
- WriteMReg((lpReg + IIC_CSR2), iic_csr2_r.reg);
-
- /* wait for IIC cycle to finish */
- writestat = wait_ibsyclr(lpReg);
-
- /* Next 2 commands read 32 bit IIC_CSR1 register's data into union */
- /* first read data is in IIC_CSR1 */
- ReadMReg((lpReg + IIC_CSR1), iic_csr1_r.reg);
-
- /* now get data u8 out of register */
- *byVal = (u8) iic_csr1_r.fld.RD_DATA;
-
- return writestat;
-}
diff --git a/drivers/staging/dt3155/dt3155_io.h b/drivers/staging/dt3155/dt3155_io.h
deleted file mode 100644
index d1a25100169..00000000000
--- a/drivers/staging/dt3155/dt3155_io.h
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
-
-Copyright 1996,2002 Gregory D. Hager, Alfred A. Rizzi, Noah J. Cowan,
- Jason Lapenta, Scott Smedley
-
-This file is part of the DT3155 Device Driver.
-
-The DT3155 Device Driver is free software; you can redistribute it
-and/or modify it under the terms of the GNU General Public License as
-published by the Free Software Foundation; either version 2 of the
-License, or (at your option) any later version.
-
-The DT3155 Device Driver is distributed in the hope that it will be
-useful, but WITHOUT ANY WARRANTY; without even the implied warranty
-of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with the DT3155 Device Driver; if not, write to the Free
-Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-MA 02111-1307 USA
-
-
--- Changes --
-
- Date Programmer Description of changes made
- -------------------------------------------------------------------
- 24-Jul-2002 SS GPL licence.
-
-*/
-
-/* This code is a modified version of examples provided by Data Translations.*/
-
-#ifndef DT3155_IO_INC
-#define DT3155_IO_INC
-
-/* macros to access registers */
-
-#define WriteMReg(Address, Data) (*((u32 *)(Address)) = Data)
-#define ReadMReg(Address, Data) (Data = *((u32 *)(Address)))
-
-/***************** 32 bit register globals **************/
-
-/* offsets for 32-bit memory mapped registers */
-
-#define EVEN_DMA_START 0x000
-#define ODD_DMA_START 0x00C
-#define EVEN_DMA_STRIDE 0x018
-#define ODD_DMA_STRIDE 0x024
-#define EVEN_PIXEL_FMT 0x030
-#define ODD_PIXEL_FMT 0x034
-#define FIFO_TRIGGER 0x038
-#define XFER_MODE 0x03C
-#define CSR1 0x040
-#define RETRY_WAIT_CNT 0x044
-#define INT_CSR 0x048
-#define EVEN_FLD_MASK 0x04C
-#define ODD_FLD_MASK 0x050
-#define MASK_LENGTH 0x054
-#define FIFO_FLAG_CNT 0x058
-#define IIC_CLK_DUR 0x05C
-#define IIC_CSR1 0x060
-#define IIC_CSR2 0x064
-#define EVEN_DMA_UPPR_LMT 0x08C
-#define ODD_DMA_UPPR_LMT 0x090
-
-#define CLK_DUR_VAL 0x01010101
-
-
-
-/******** Assignments and Typedefs for 32 bit Memory Mapped Registers ********/
-
-typedef union fifo_trigger_tag {
- u32 reg;
- struct {
- u32 PACKED:6;
- u32 :9;
- u32 PLANER:7;
- u32 :9;
- } fld;
-} FIFO_TRIGGER_R;
-
-typedef union xfer_mode_tag {
- u32 reg;
- struct {
- u32 :2;
- u32 FIELD_TOGGLE:1;
- u32 :5;
- u32 :2;
- u32 :22;
- } fld;
-} XFER_MODE_R;
-
-typedef union csr1_tag {
- u32 reg;
- struct {
- u32 CAP_CONT_EVE:1;
- u32 CAP_CONT_ODD:1;
- u32 CAP_SNGL_EVE:1;
- u32 CAP_SNGL_ODD:1;
- u32 FLD_DN_EVE :1;
- u32 FLD_DN_ODD :1;
- u32 SRST :1;
- u32 FIFO_EN :1;
- u32 FLD_CRPT_EVE:1;
- u32 FLD_CRPT_ODD:1;
- u32 ADDR_ERR_EVE:1;
- u32 ADDR_ERR_ODD:1;
- u32 CRPT_DIS :1;
- u32 RANGE_EN :1;
- u32 :16;
- } fld;
-} CSR1_R;
-
-typedef union retry_wait_cnt_tag {
- u32 reg;
- struct {
- u32 RTRY_WAIT_CNT:8;
- u32 :24;
- } fld;
-} RETRY_WAIT_CNT_R;
-
-typedef union int_csr_tag {
- u32 reg;
- struct {
- u32 FLD_END_EVE :1;
- u32 FLD_END_ODD :1;
- u32 FLD_START :1;
- u32 :5;
- u32 FLD_END_EVE_EN:1;
- u32 FLD_END_ODD_EN:1;
- u32 FLD_START_EN :1;
- u32 :21;
- } fld;
-} INT_CSR_R;
-
-typedef union mask_length_tag {
- u32 reg;
- struct {
- u32 MASK_LEN_EVE:5;
- u32 :11;
- u32 MASK_LEN_ODD:5;
- u32 :11;
- } fld;
-} MASK_LENGTH_R;
-
-typedef union fifo_flag_cnt_tag {
- u32 reg;
- struct {
- u32 AF_COUNT:7;
- u32 :9;
- u32 AE_COUNT:7;
- u32 :9;
- } fld;
-} FIFO_FLAG_CNT_R;
-
-typedef union iic_clk_dur {
- u32 reg;
- struct {
- u32 PHASE_1:8;
- u32 PHASE_2:8;
- u32 PHASE_3:8;
- u32 PHASE_4:8;
- } fld;
-} IIC_CLK_DUR_R;
-
-typedef union iic_csr1_tag {
- u32 reg;
- struct {
- u32 AUTO_EN :1;
- u32 BYPASS :1;
- u32 SDA_OUT :1;
- u32 SCL_OUT :1;
- u32 :4;
- u32 AUTO_ABORT :1;
- u32 DIRECT_ABORT:1;
- u32 SDA_IN :1;
- u32 SCL_IN :1;
- u32 :4;
- u32 AUTO_ADDR :8;
- u32 RD_DATA :8;
- } fld;
-} IIC_CSR1_R;
-
-/**********************************
- * iic_csr2_tag
- */
-typedef union iic_csr2_tag {
- u32 reg;
- struct {
- u32 DIR_WR_DATA :8;
- u32 DIR_SUB_ADDR:8;
- u32 DIR_RD :1;
- u32 DIR_ADDR :7;
- u32 NEW_CYCLE :1;
- u32 :7;
- } fld;
-} IIC_CSR2_R;
-
-/* use for both EVEN and ODD DMA UPPER LIMITS */
-
-/*
- * dma_upper_lmt_tag
- */
-typedef union dma_upper_lmt_tag {
- u32 reg;
- struct {
- u32 DMA_UPPER_LMT_VAL:24;
- u32 :8;
- } fld;
-} DMA_UPPER_LMT_R;
-
-
-/*
- * Global declarations of local copies of boards' 32 bit registers
- */
-extern u32 even_dma_start_r; /* bit 0 should always be 0 */
-extern u32 odd_dma_start_r; /* .. */
-extern u32 even_dma_stride_r; /* bits 0&1 should always be 0 */
-extern u32 odd_dma_stride_r; /* .. */
-extern u32 even_pixel_fmt_r;
-extern u32 odd_pixel_fmt_r;
-
-extern FIFO_TRIGGER_R fifo_trigger_r;
-extern XFER_MODE_R xfer_mode_r;
-extern CSR1_R csr1_r;
-extern RETRY_WAIT_CNT_R retry_wait_cnt_r;
-extern INT_CSR_R int_csr_r;
-
-extern u32 even_fld_mask_r;
-extern u32 odd_fld_mask_r;
-
-extern MASK_LENGTH_R mask_length_r;
-extern FIFO_FLAG_CNT_R fifo_flag_cnt_r;
-extern IIC_CLK_DUR_R iic_clk_dur_r;
-extern IIC_CSR1_R iic_csr1_r;
-extern IIC_CSR2_R iic_csr2_r;
-extern DMA_UPPER_LMT_R even_dma_upper_lmt_r;
-extern DMA_UPPER_LMT_R odd_dma_upper_lmt_r;
-
-
-
-/***************** 8 bit I2C register globals ***********/
-#define CSR2 0x010 /* indices of 8-bit I2C mapped reg's*/
-#define EVEN_CSR 0x011
-#define ODD_CSR 0x012
-#define CONFIG 0x013
-#define DT_ID 0x01F
-#define X_CLIP_START 0x020
-#define Y_CLIP_START 0x022
-#define X_CLIP_END 0x024
-#define Y_CLIP_END 0x026
-#define AD_ADDR 0x030
-#define AD_LUT 0x031
-#define AD_CMD 0x032
-#define DIG_OUT 0x040
-#define PM_LUT_ADDR 0x050
-#define PM_LUT_DATA 0x051
-
-
-/******** Assignments and Typedefs for 8 bit I2C Registers********************/
-
-typedef union i2c_csr2_tag {
- u8 reg;
- struct {
- u8 CHROM_FIL:1;
- u8 SYNC_SNTL:1;
- u8 HZ50:1;
- u8 SYNC_PRESENT:1;
- u8 BUSY_EVE:1;
- u8 BUSY_ODD:1;
- u8 DISP_PASS:1;
- } fld;
-} I2C_CSR2;
-
-typedef union i2c_even_csr_tag {
- u8 reg;
- struct {
- u8 DONE_EVE :1;
- u8 SNGL_EVE :1;
- u8 ERROR_EVE:1;
- u8 :5;
- } fld;
-} I2C_EVEN_CSR;
-
-typedef union i2c_odd_csr_tag {
- u8 reg;
- struct {
- u8 DONE_ODD:1;
- u8 SNGL_ODD:1;
- u8 ERROR_ODD:1;
- u8 :5;
- } fld;
-} I2C_ODD_CSR;
-
-typedef union i2c_config_tag {
- u8 reg;
- struct {
- u8 ACQ_MODE:2;
- u8 EXT_TRIG_EN:1;
- u8 EXT_TRIG_POL:1;
- u8 H_SCALE:1;
- u8 CLIP:1;
- u8 PM_LUT_SEL:1;
- u8 PM_LUT_PGM:1;
- } fld;
-} I2C_CONFIG;
-
-
-typedef union i2c_ad_cmd_tag {
- /* bits can have 3 different meanings depending on value of AD_ADDR */
- u8 reg;
- /* Bt252 Command Register if AD_ADDR = 00h */
- struct {
- u8 :2;
- u8 SYNC_LVL_SEL:2;
- u8 SYNC_CNL_SEL:2;
- u8 DIGITIZE_CNL_SEL1:2;
- } bt252_command;
-
- /* Bt252 IOUT0 register if AD_ADDR = 01h */
- struct {
- u8 IOUT_DATA:8;
- } bt252_iout0;
-
- /* BT252 IOUT1 register if AD_ADDR = 02h */
- struct {
- u8 IOUT_DATA:8;
- } bt252_iout1;
-} I2C_AD_CMD;
-
-
-/***** Global declarations of local copies of boards' 8 bit I2C registers ***/
-
-extern I2C_CSR2 i2c_csr2;
-extern I2C_EVEN_CSR i2c_even_csr;
-extern I2C_ODD_CSR i2c_odd_csr;
-extern I2C_CONFIG i2c_config;
-extern u8 i2c_dt_id;
-extern u8 i2c_x_clip_start;
-extern u8 i2c_y_clip_start;
-extern u8 i2c_x_clip_end;
-extern u8 i2c_y_clip_end;
-extern u8 i2c_ad_addr;
-extern u8 i2c_ad_lut;
-extern I2C_AD_CMD i2c_ad_cmd;
-extern u8 i2c_dig_out;
-extern u8 i2c_pm_lut_addr;
-extern u8 i2c_pm_lut_data;
-
-/* Functions for Global use */
-
-/* access 8-bit IIC registers */
-
-extern int ReadI2C(u8 *lpReg, u_short wIregIndex, u8 *byVal);
-extern int WriteI2C(u8 *lpReg, u_short wIregIndex, u8 byVal);
-
-#endif
diff --git a/drivers/staging/dt3155/dt3155_isr.c b/drivers/staging/dt3155/dt3155_isr.c
deleted file mode 100644
index 33ddc9c057f..00000000000
--- a/drivers/staging/dt3155/dt3155_isr.c
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
-
-Copyright 1996,2002,2005 Gregory D. Hager, Alfred A. Rizzi, Noah J. Cowan,
- Jason Lapenta, Scott Smedley, Greg Sharp
-
-This file is part of the DT3155 Device Driver.
-
-The DT3155 Device Driver is free software; you can redistribute it
-and/or modify it under the terms of the GNU General Public License as
-published by the Free Software Foundation; either version 2 of the
-License, or (at your option) any later version.
-
-The DT3155 Device Driver is distributed in the hope that it will be
-useful, but WITHOUT ANY WARRANTY; without even the implied warranty
-of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with the DT3155 Device Driver; if not, write to the Free
-Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-MA 02111-1307 USA
-
- File: dt3155_isr.c
-Purpose: Buffer management routines, and other routines for the ISR
- (the actual isr is in dt3155_drv.c)
-
--- Changes --
-
- Date Programmer Description of changes made
- -------------------------------------------------------------------
- 03-Jul-2000 JML n/a
- 02-Apr-2002 SS Mods to make work with separate allocator
- module; Merged John Roll's mods to make work with
- multiple boards.
- 10-Jul-2002 GCS Complete rewrite of setup_buffers to disallow
- buffers which span a 4MB boundary.
- 24-Jul-2002 SS GPL licence.
- 30-Jul-2002 NJC Added support for buffer loop.
- 31-Jul-2002 NJC Complete rewrite of buffer management
- 02-Aug-2002 NJC Including slab.h instead of malloc.h (no warning).
- Also, allocator_init() now returns allocator_max
- so cleaned up allocate_buffers() accordingly.
- 08-Aug-2005 SS port to 2.6 kernel.
-
-*/
-
-#include <asm/system.h>
-#include <linux/gfp.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-
-#include "dt3155.h"
-#include "dt3155_drv.h"
-#include "dt3155_io.h"
-#include "dt3155_isr.h"
-#include "allocator.h"
-
-#define FOUR_MB (0x0400000) /* Can't DMA accross a 4MB boundary!*/
-#define UPPER_10_BITS (0x3FF<<22) /* Can't DMA accross a 4MB boundary!*/
-
-
-/* Pointer into global structure for handling buffers */
-struct dt3155_fbuffer *dt3155_fbuffer[MAXBOARDS] = {NULL
-#if MAXBOARDS == 2
- , NULL
-#endif
-};
-
-/******************************************************************************
- * Simple array based que struct
- *
- * Some handy functions using the buffering structure.
- *****************************************************************************/
-
-
-/***************************
- * are_empty_buffers
- * m is minor # of device
- ***************************/
-bool are_empty_buffers(int m)
-{
- return dt3155_fbuffer[m]->empty_len;
-}
-
-/**************************
- * push_empty
- * m is minor # of device
- *
- * This is slightly confusing. The number empty_len is the literal #
- * of empty buffers. After calling, empty_len-1 is the index into the
- * empty buffer stack. So, if empty_len == 1, there is one empty buffer,
- * given by dt3155_fbuffer[m]->empty_buffers[0].
- * empty_buffers should never fill up, though this is not checked.
- **************************/
-void push_empty(int index, int m)
-{
- dt3155_fbuffer[m]->empty_buffers[dt3155_fbuffer[m]->empty_len] = index;
- dt3155_fbuffer[m]->empty_len++;
-}
-
-/**************************
- * pop_empty(m)
- * m is minor # of device
- **************************/
-int pop_empty(int m)
-{
- dt3155_fbuffer[m]->empty_len--;
- return dt3155_fbuffer[m]->empty_buffers[dt3155_fbuffer[m]->empty_len];
-}
-
-/*************************
- * is_ready_buf_empty(m)
- * m is minor # of device
- *************************/
-bool is_ready_buf_empty(int m)
-{
- return ((dt3155_fbuffer[m]->ready_len) == 0);
-}
-
-/*************************
- * is_ready_buf_full(m)
- * m is minor # of device
- * this should *never* be true if there are any active, locked or empty
- * buffers, since it corresponds to nbuffers ready buffers!!
- * 7/31/02: total rewrite. --NJC
- *************************/
-bool is_ready_buf_full(int m)
-{
- return dt3155_fbuffer[m]->ready_len == dt3155_fbuffer[m]->nbuffers;
-}
-
-/*****************************************************
- * push_ready(m, buffer)
- * m is minor # of device
- *
- *****************************************************/
-void push_ready(int m, int index)
-{
- int head = dt3155_fbuffer[m]->ready_head;
-
- dt3155_fbuffer[m]->ready_que[head] = index;
- dt3155_fbuffer[m]->ready_head = ((head + 1) %
- (dt3155_fbuffer[m]->nbuffers));
- dt3155_fbuffer[m]->ready_len++;
-
-}
-
-/*****************************************************
- * get_tail()
- * m is minor # of device
- *
- * Simply comptutes the tail given the head and the length.
- *****************************************************/
-static int get_tail(int m)
-{
- return (dt3155_fbuffer[m]->ready_head -
- dt3155_fbuffer[m]->ready_len +
- dt3155_fbuffer[m]->nbuffers)%
- (dt3155_fbuffer[m]->nbuffers);
-}
-
-
-
-/*****************************************************
- * pop_ready()
- * m is minor # of device
- *
- * This assumes that there is a ready buffer ready... should
- * be checked (e.g. with is_ready_buf_empty() prior to call.
- *****************************************************/
-int pop_ready(int m)
-{
- int tail;
- tail = get_tail(m);
- dt3155_fbuffer[m]->ready_len--;
- return dt3155_fbuffer[m]->ready_que[tail];
-}
-
-
-/*****************************************************
- * printques
- * m is minor # of device
- *****************************************************/
-void printques(int m)
-{
- int head = dt3155_fbuffer[m]->ready_head;
- int tail;
- int num = dt3155_fbuffer[m]->nbuffers;
- int frame_index;
- int index;
-
- tail = get_tail(m);
-
- printk("\n R:");
- for (index = tail; index != head; index++, index = index % (num)) {
- frame_index = dt3155_fbuffer[m]->ready_que[index];
- printk(" %d ", frame_index);
- }
-
- printk("\n E:");
- for (index = 0; index < dt3155_fbuffer[m]->empty_len; index++) {
- frame_index = dt3155_fbuffer[m]->empty_buffers[index];
- printk(" %d ", frame_index);
- }
-
- frame_index = dt3155_fbuffer[m]->active_buf;
- printk("\n A: %d", frame_index);
-
- frame_index = dt3155_fbuffer[m]->locked_buf;
- printk("\n L: %d\n", frame_index);
-
-}
-
-/*****************************************************
- * adjust_4MB
- *
- * If a buffer intersects the 4MB boundary, push
- * the start address up to the beginning of the
- * next 4MB chunk (assuming bufsize < 4MB).
- *****************************************************/
-u32 adjust_4MB(u32 buf_addr, u32 bufsize)
-{
- if (((buf_addr+bufsize) & UPPER_10_BITS) != (buf_addr & UPPER_10_BITS))
- return (buf_addr+bufsize) & UPPER_10_BITS;
- else
- return buf_addr;
-}
-
-
-/*****************************************************
- * allocate_buffers
- *
- * Try to allocate enough memory for all requested
- * buffers. If there is not enough free space
- * try for less memory.
- *****************************************************/
-void allocate_buffers(u32 *buf_addr, u32* total_size_kbs,
- u32 bufsize)
-{
- /* Compute the minimum amount of memory guaranteed to hold all
- MAXBUFFERS such that no buffer crosses the 4MB boundary.
- Store this value in the variable "full_size" */
-
- u32 allocator_max;
- u32 bufs_per_chunk = (FOUR_MB / bufsize);
- u32 filled_chunks = (MAXBUFFERS-1) / bufs_per_chunk;
- u32 leftover_bufs = MAXBUFFERS - filled_chunks * bufs_per_chunk;
-
- u32 full_size = bufsize /* possibly unusable part of 1st chunk */
- + filled_chunks * FOUR_MB /* max # of completely filled 4mb chunks */
- + leftover_bufs * bufsize; /* these buffs will be in a partly filled
- chunk at beginning or end */
-
- u32 full_size_kbs = 1 + (full_size-1) / 1024;
- u32 min_size_kbs = 2*ndevices*bufsize / 1024;
- u32 size_kbs;
-
- /* Now, try to allocate full_size. If this fails, keep trying for
- less & less memory until it succeeds. */
-#ifndef STANDALONE_ALLOCATOR
- /* initialize the allocator */
- allocator_init(&allocator_max);
-#endif
- size_kbs = full_size_kbs;
- *buf_addr = 0;
- printk("DT3155: We would like to get: %d KB\n", full_size_kbs);
- printk("DT3155: ...but need at least: %d KB\n", min_size_kbs);
- printk("DT3155: ...the allocator has: %d KB\n", allocator_max);
- size_kbs = (full_size_kbs <= allocator_max ? full_size_kbs : allocator_max);
- if (size_kbs > min_size_kbs) {
- if ((*buf_addr = allocator_allocate_dma(size_kbs, GFP_KERNEL)) != 0) {
- printk("DT3155: Managed to allocate: %d KB\n", size_kbs);
- *total_size_kbs = size_kbs;
- return;
- }
- }
- /* If we got here, the allocation failed */
- printk("DT3155: Allocator failed!\n");
- *buf_addr = 0;
- *total_size_kbs = 0;
- return;
-
-}
-
-
-/*****************************************************
- * dt3155_setup_buffers
- *
- * setup_buffers just puts the buffering system into
- * a consistent state before the start of interrupts
- *
- * JML : it looks like all the buffers need to be
- * continuous. So I'm going to try and allocate one
- * continuous buffer.
- *
- * GCS : Fix DMA problems when buffer spans
- * 4MB boundary. Also, add error checking. This
- * function will return -ENOMEM when not enough memory.
- *****************************************************/
-u32 dt3155_setup_buffers(u32 *allocatorAddr)
-
-{
- u32 index;
- u32 rambuff_addr; /* start of allocation */
- u32 rambuff_size; /* total size allocated to driver */
- u32 rambuff_acm; /* accumlator, keep track of how much
- is left after being split up*/
- u32 rambuff_end; /* end of rambuff */
- u32 numbufs; /* number of useful buffers allocated (per device) */
- u32 bufsize = DT3155_MAX_ROWS * DT3155_MAX_COLS;
- int m; /* minor # of device, looped for all devs */
-
- /* zero the fbuffer status and address structure */
- for (m = 0; m < ndevices; m++) {
- dt3155_fbuffer[m] = &(dt3155_status[m].fbuffer);
-
- /* Make sure the buffering variables are consistent */
- {
- u8 *ptr = (u8 *) dt3155_fbuffer[m];
- for (index = 0; index < sizeof(struct dt3155_fbuffer); index++)
- *(ptr++) = 0;
- }
- }
-
- /* allocate a large contiguous chunk of RAM */
- allocate_buffers(&rambuff_addr, &rambuff_size, bufsize);
- printk("DT3155: mem info\n");
- printk(" - rambuf_addr = 0x%x\n", rambuff_addr);
- printk(" - length (kb) = %u\n", rambuff_size);
- if (rambuff_addr == 0) {
- printk(KERN_INFO
- "DT3155: Error setup_buffers() allocator dma failed\n");
- return -ENOMEM;
- }
- *allocatorAddr = rambuff_addr;
- rambuff_end = rambuff_addr + 1024 * rambuff_size;
-
- /* after allocation, we need to count how many useful buffers there
- are so we can give an equal number to each device */
- rambuff_acm = rambuff_addr;
- for (index = 0; index < MAXBUFFERS; index++) {
- rambuff_acm = adjust_4MB(rambuff_acm, bufsize);/*avoid spanning 4MB bdry*/
- if (rambuff_acm + bufsize > rambuff_end)
- break;
- rambuff_acm += bufsize;
- }
- /* Following line is OK, will waste buffers if index
- * not evenly divisible by ndevices -NJC*/
- numbufs = index / ndevices;
- printk(" - numbufs = %u\n", numbufs);
- if (numbufs < 2) {
- printk(KERN_INFO
- "DT3155: Error setup_buffers() couldn't allocate 2 bufs/board\n");
- return -ENOMEM;
- }
-
- /* now that we have board memory we spit it up */
- /* between the boards and the buffers */
- rambuff_acm = rambuff_addr;
- for (m = 0; m < ndevices; m++) {
- rambuff_acm = adjust_4MB(rambuff_acm, bufsize);
-
- /* Save the start of this boards buffer space (for mmap). */
- dt3155_status[m].mem_addr = rambuff_acm;
-
- for (index = 0; index < numbufs; index++) {
- rambuff_acm = adjust_4MB(rambuff_acm, bufsize);
- if (rambuff_acm + bufsize > rambuff_end) {
- /* Should never happen */
- printk("DT3155 PROGRAM ERROR (GCS)\n"
- "Error distributing allocated buffers\n");
- return -ENOMEM;
- }
-
- dt3155_fbuffer[m]->frame_info[index].addr = rambuff_acm;
- push_empty(index, m);
- /* printk(" - Buffer : %lx\n",
- * dt3155_fbuffer[m]->frame_info[index].addr);
- */
- dt3155_fbuffer[m]->nbuffers += 1;
- rambuff_acm += bufsize;
- }
-
- /* Make sure there is an active buffer there. */
- dt3155_fbuffer[m]->active_buf = pop_empty(m);
- dt3155_fbuffer[m]->even_happened = 0;
- dt3155_fbuffer[m]->even_stopped = 0;
-
- /* make sure there is no locked_buf JML 2/28/00 */
- dt3155_fbuffer[m]->locked_buf = -1;
-
- dt3155_status[m].mem_size =
- rambuff_acm - dt3155_status[m].mem_addr;
-
- /* setup the ready queue */
- dt3155_fbuffer[m]->ready_head = 0;
- dt3155_fbuffer[m]->ready_len = 0;
- printk("Available buffers for device %d: %d\n",
- m, dt3155_fbuffer[m]->nbuffers);
- }
-
- return 1;
-}
-
-/*****************************************************
- * internal_release_locked_buffer
- *
- * The internal function for releasing a locked buffer.
- * It assumes interrupts are turned off.
- *
- * m is minor number of device
- *****************************************************/
-static void internal_release_locked_buffer(int m)
-{
- /* Pointer into global structure for handling buffers */
- if (dt3155_fbuffer[m]->locked_buf >= 0) {
- push_empty(dt3155_fbuffer[m]->locked_buf, m);
- dt3155_fbuffer[m]->locked_buf = -1;
- }
-}
-
-
-/*****************************************************
- * dt3155_release_locked_buffer()
- * m is minor # of device
- *
- * The user function of the above.
- *
- *****************************************************/
-void dt3155_release_locked_buffer(int m)
-{
- unsigned long int flags;
- local_save_flags(flags);
- local_irq_disable();
- internal_release_locked_buffer(m);
- local_irq_restore(flags);
-}
-
-
-/*****************************************************
- * dt3155_flush()
- * m is minor # of device
- *
- *****************************************************/
-int dt3155_flush(int m)
-{
- int index;
- unsigned long int flags;
- local_save_flags(flags);
- local_irq_disable();
-
- internal_release_locked_buffer(m);
- dt3155_fbuffer[m]->empty_len = 0;
-
- for (index = 0; index < dt3155_fbuffer[m]->nbuffers; index++)
- push_empty(index, m);
-
- /* Make sure there is an active buffer there. */
- dt3155_fbuffer[m]->active_buf = pop_empty(m);
-
- dt3155_fbuffer[m]->even_happened = 0;
- dt3155_fbuffer[m]->even_stopped = 0;
-
- /* setup the ready queue */
- dt3155_fbuffer[m]->ready_head = 0;
- dt3155_fbuffer[m]->ready_len = 0;
-
- local_irq_restore(flags);
-
- return 0;
-}
-
-/*****************************************************
- * dt3155_get_ready_buffer()
- * m is minor # of device
- *
- * get_ready_buffer will grab the next chunk of data
- * if it is already there, otherwise it returns 0.
- * If the user has a buffer locked it will unlock
- * that buffer before returning the new one.
- *****************************************************/
-int dt3155_get_ready_buffer(int m)
-{
- int frame_index;
- unsigned long int flags;
- local_save_flags(flags);
- local_irq_disable();
-
-#ifdef DEBUG_QUES_A
- printques(m);
-#endif
-
- internal_release_locked_buffer(m);
-
- if (is_ready_buf_empty(m))
- frame_index = -1;
- else {
- frame_index = pop_ready(m);
- dt3155_fbuffer[m]->locked_buf = frame_index;
- }
-
-#ifdef DEBUG_QUES_B
- printques(m);
-#endif
-
- local_irq_restore(flags);
-
- return frame_index;
-}
diff --git a/drivers/staging/dt3155/dt3155_isr.h b/drivers/staging/dt3155/dt3155_isr.h
deleted file mode 100644
index 7d474cf743d..00000000000
--- a/drivers/staging/dt3155/dt3155_isr.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
-
-Copyright 1996,2002 Gregory D. Hager, Alfred A. Rizzi, Noah J. Cowan,
- Jason Lapenta, Scott Smedley
-
-This file is part of the DT3155 Device Driver.
-
-The DT3155 Device Driver is free software; you can redistribute it
-and/or modify it under the terms of the GNU General Public License as
-published by the Free Software Foundation; either version 2 of the
-License, or (at your option) any later version.
-
-The DT3155 Device Driver is distributed in the hope that it will be
-useful, but WITHOUT ANY WARRANTY; without even the implied warranty
-of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with the DT3155 Device Driver; if not, write to the Free
-Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-MA 02111-1307 USA
-
-
--- Changes --
-
- Date Programmer Description of changes made
- -------------------------------------------------------------------
- 03-Jul-2000 JML n/a
- 24-Jul-2002 SS GPL licence.
- 26-Oct-2009 SS Porting to 2.6.30 kernel.
-
--- notes --
-
-*/
-
-#ifndef DT3155_ISR_H
-#define DT3155_ISR_H
-
-extern struct dt3155_fbuffer *dt3155_fbuffer[MAXBOARDS];
-
-/* User functions for buffering */
-/* Initialize the buffering system. This should */
-/* be called prior to enabling interrupts */
-
-u32 dt3155_setup_buffers(u32 *allocatorAddr);
-
-/* Get the next frame of data if it is ready. Returns */
-/* zero if no data is ready. If there is data but */
-/* the user has a locked buffer, it will unlock that */
-/* buffer and return it to the free list. */
-
-int dt3155_get_ready_buffer(int minor);
-
-/* Return a locked buffer to the free list */
-
-void dt3155_release_locked_buffer(int minor);
-
-/* Flush the buffer system */
-int dt3155_flush(int minor);
-
-/**********************************
- * Simple array based que struct
- **********************************/
-
-bool are_empty_buffers(int minor);
-void push_empty(int index, int minor);
-
-int pop_empty(int minor);
-
-bool is_ready_buf_empty(int minor);
-bool is_ready_buf_full(int minor);
-
-void push_ready(int minor, int index);
-int pop_ready(int minor);
-
-
-#endif
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.c b/drivers/staging/dt3155v4l/dt3155v4l.c
index 6dc3af62284..fd48b38e797 100644
--- a/drivers/staging/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/dt3155v4l/dt3155v4l.c
@@ -1008,6 +1008,8 @@ struct dma_coherent_mem {
static int __devinit
dt3155_alloc_coherent(struct device *dev, size_t size, int flags)
{
+ struct dma_coherent_mem *mem;
+ dma_addr_t dev_base;
int pages = size >> PAGE_SHIFT;
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
@@ -1018,25 +1020,28 @@ dt3155_alloc_coherent(struct device *dev, size_t size, int flags)
if (dev->dma_mem)
goto out;
- dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
- if (!dev->dma_mem)
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
goto out;
- dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!dev->dma_mem->bitmap)
+ mem->virt_base = dma_alloc_coherent(dev, size, &dev_base,
+ DT3155_COH_FLAGS);
+ if (!mem->virt_base)
+ goto err_alloc_coherent;
+ mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!mem->bitmap)
goto err_bitmap;
- dev->dma_mem->virt_base = dma_alloc_coherent(dev, size,
- &dev->dma_mem->device_base, DT3155_COH_FLAGS);
- if (!dev->dma_mem->virt_base)
- goto err_coherent;
- dev->dma_mem->size = pages;
- dev->dma_mem->flags = flags;
+ /* coherent_dma_mask is already set to 32 bits */
+ mem->device_base = dev_base;
+ mem->size = pages;
+ mem->flags = flags;
+ dev->dma_mem = mem;
return DMA_MEMORY_MAP;
-err_coherent:
- kfree(dev->dma_mem->bitmap);
err_bitmap:
- kfree(dev->dma_mem);
+ dma_free_coherent(dev, size, mem->virt_base, dev_base);
+err_alloc_coherent:
+ kfree(mem);
out:
return 0;
}
diff --git a/drivers/staging/easycap/Kconfig b/drivers/staging/easycap/Kconfig
new file mode 100644
index 00000000000..bd96f39f273
--- /dev/null
+++ b/drivers/staging/easycap/Kconfig
@@ -0,0 +1,17 @@
+config EASYCAP
+ tristate "EasyCAP USB ID 05e1:0408 support"
+ depends on USB && VIDEO_DEV
+
+ ---help---
+ This is an integrated audio/video driver for EasyCAP cards with
+ USB ID 05e1:0408. It supports two hardware variants:
+
+ * EasyCAP USB 2.0 Video Adapter with Audio, Model DC60,
+ having input cables labelled CVBS, S-VIDEO, AUDIO(L), AUDIO(R)
+
+ * EasyCAP002 4-Channel USB 2.0 DVR, having input cables labelled
+ 1, 2, 3, 4 and an unlabelled input cable for a microphone.
+
+ To compile this driver as a module, choose M here: the
+ module will be called easycap
+
diff --git a/drivers/staging/easycap/Makefile b/drivers/staging/easycap/Makefile
new file mode 100644
index 00000000000..d93bd6b70a4
--- /dev/null
+++ b/drivers/staging/easycap/Makefile
@@ -0,0 +1,13 @@
+
+obj-$(CONFIG_EASYCAP) += easycap.o
+
+easycap-objs := easycap_main.o easycap_low.o easycap_sound.o
+easycap-objs += easycap_ioctl.o easycap_settings.o
+easycap-objs += easycap_testcard.o
+
+EXTRA_CFLAGS += -Wall
+# Impose all or none of the following:
+EXTRA_CFLAGS += -DEASYCAP_IS_VIDEODEV_CLIENT
+EXTRA_CFLAGS += -DEASYCAP_NEEDS_V4L2_DEVICE_H
+EXTRA_CFLAGS += -DEASYCAP_NEEDS_V4L2_FOPS
+
diff --git a/drivers/staging/easycap/README b/drivers/staging/easycap/README
new file mode 100644
index 00000000000..3775481f05e
--- /dev/null
+++ b/drivers/staging/easycap/README
@@ -0,0 +1,130 @@
+
+ ***********************************************************
+ * EasyCAP USB 2.0 Video Adapter with Audio, Model DC60 *
+ * and *
+ * EasyCAP002 4-Channel USB 2.0 DVR *
+ ***********************************************************
+ Mike Thomas <rmthomas@sciolus.org>
+
+
+
+SUPPORTED HARDWARE
+------------------
+
+This driver is intended for use with hardware having USB ID 05e1:0408.
+Two kinds of EasyCAP have this USB ID, namely:
+
+ * EasyCAP USB 2.0 Video Adapter with Audio, Model DC60,
+ having input cables labelled CVBS, S-VIDEO, AUDIO(L), AUDIO(R)
+
+ * EasyCAP002 4-Channel USB 2.0 DVR, having input cables labelled
+ 1, 2, 3, 4 and an unlabelled input cable for a microphone.
+
+
+BUILD OPTIONS AND DEPENDENCIES
+------------------------------
+
+If the parameter EASYCAP_IS_VIDEODEV_CLIENT is undefined during compilation
+the built module is entirely independent of the videodev module, and when
+the EasyCAP is physically plugged into a USB port the special files
+/dev/easycap0 and /dev/easysnd1 are created as video and sound sources
+respectively.
+
+If the parameter EASYCAP_IS_VIDEODEV_CLIENT is defined during compilation
+the built easycap module is configured to register with the videodev module,
+in which case the special files created when the EasyCAP is plugged in are
+/dev/video0 and /dev/easysnd0. Use of the easycap module as a client of
+the videodev module has received very little testing as of June 2010.
+
+
+KNOWN BUILD PROBLEMS
+--------------------
+
+(1) Recent gcc versions may generate the message:
+
+ warning: the frame size of .... bytes is larger than 1024 bytes
+
+This warning can be suppressed by specifying in the Makefile:
+
+ EXTRA_CFLAGS += -Wframe-larger-than=8192
+
+but it would be preferable to remove the cause of the warning.
+
+
+KNOWN RUNTIME ISSUES
+--------------------
+
+(1) Randomly (maybe 5 to 10% of occasions) the driver fails to produce any
+output at start-up. Closing mplayer (or whatever the user program is) and
+restarting it restores normal performance without any other remedial action
+being necessary. The reason for this is not known.
+
+(2) Intentionally, this driver will not stream material which is unambiguously
+identified by the hardware as copy-protected. The video output will freeze
+within about a minute when this situation arises.
+
+(3) The controls for luminance, contrast, saturation, hue and volume may not
+always work properly.
+
+(4) Reduced-resolution S-Video seems to suffer from moire artefacts. No
+attempt has yet been made to rememdy this.
+
+
+SUPPORTED TV STANDARDS AND RESOLUTIONS
+--------------------------------------
+
+The following TV standards are natively supported by the hardware and are
+usable as (for example) the "norm=" parameter in the mplayer command:
+
+ PAL_BGHIN, NTSC_N_443,
+ PAL_Nc, NTSC_N,
+ SECAM, NTSC_M, NTSC_M_JP,
+ PAL_60, NTSC_443,
+ PAL_M.
+
+The available picture sizes are:
+
+ at 25 frames per second: 720x576, 704x576, 640x480, 360x288, 320x240;
+ at 30 frames per second: 720x480, 640x480, 360x240, 320x240;
+
+
+WHAT'S TESTED AND WHAT'S NOT
+----------------------------
+
+This driver is known to work with mplayer, mencoder, tvtime and sufficiently
+recent versions of vlc. An interface to ffmpeg is implemented, but serious
+audio-video synchronization problems remain.
+
+The driver is designed to support all the TV standards accepted by the
+hardware, but as yet it has actually been tested on only a few of these.
+
+I have been unable to test and calibrate the S-video input myself because I
+do not possess any equipment with S-video output.
+
+This driver does not understand the V4L1 IOCTL commands, so programs such
+as camorama are not compatible. There are reports that the driver does
+work with sufficiently recent (V4L2) versions of zoneminder, but I have not
+attempted to confirm this myself.
+
+
+UDEV RULES
+----------
+
+In order that the special files /dev/easycap0 and /dev/easysnd1 are created
+with conveniently relaxed permissions when the EasyCAP is plugged in, a file
+is preferably to be provided in directory /etc/udev/rules.d with content:
+
+ACTION!="add|change", GOTO="easycap_rules_end"
+ATTRS{idVendor}=="05e1", ATTRS{idProduct}=="0408", \
+ MODE="0666", OWNER="root", GROUP="root"
+LABEL="easycap_rules_end"
+
+
+ACKNOWLEGEMENTS AND REFERENCES
+------------------------------
+This driver makes use of information contained in the Syntek Semicon DC-1125
+Driver, presently maintained at http://sourceforge.net/projects/syntekdriver/
+by Nicolas Vivien. Particularly useful has been a patch to the latter driver
+provided by Ivor Hewitt in January 2009. The NTSC implementation is taken
+from the work of Ben Trask.
+
diff --git a/drivers/staging/easycap/easycap.h b/drivers/staging/easycap/easycap.h
new file mode 100644
index 00000000000..f3c827eb0ab
--- /dev/null
+++ b/drivers/staging/easycap/easycap.h
@@ -0,0 +1,638 @@
+/*****************************************************************************
+* *
+* easycap.h *
+* *
+*****************************************************************************/
+/*
+ *
+ * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
+ *
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * THE FOLLOWING PARAMETERS ARE UNDEFINED:
+ *
+ * EASYCAP_DEBUG
+ * EASYCAP_IS_VIDEODEV_CLIENT
+ * EASYCAP_NEEDS_USBVIDEO_H
+ * EASYCAP_NEEDS_V4L2_DEVICE_H
+ * EASYCAP_NEEDS_V4L2_FOPS
+ *
+ * IF REQUIRED THEY MUST BE EXTERNALLY DEFINED, FOR EXAMPLE AS COMPILER
+ * OPTIONS.
+ */
+/*---------------------------------------------------------------------------*/
+
+#if (!defined(EASYCAP_H))
+#define EASYCAP_H
+
+#if defined(EASYCAP_DEBUG)
+#if (9 < EASYCAP_DEBUG)
+#error Debug levels 0 to 9 are okay.\
+ To achieve higher levels, remove this trap manually from easycap.h
+#endif
+#endif /*EASYCAP_DEBUG*/
+/*---------------------------------------------------------------------------*/
+/*
+ * THESE ARE FOR MAINTENANCE ONLY - NORMALLY UNDEFINED:
+ */
+/*---------------------------------------------------------------------------*/
+#undef PREFER_NTSC
+#undef EASYCAP_TESTCARD
+#undef EASYCAP_TESTTONE
+#undef LOCKFRAME
+#undef NOREADBACK
+#undef AUDIOTIME
+/*---------------------------------------------------------------------------*/
+/*
+ *
+ * DEFINE BRIDGER TO ACTIVATE THE ROUTINE FOR BRIDGING VIDEOTAPE DROPOUTS.
+ *
+ * *** UNDER DEVELOPMENT/TESTING - NOT READY YET!***
+ *
+ */
+/*---------------------------------------------------------------------------*/
+#undef BRIDGER
+/*---------------------------------------------------------------------------*/
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/smp_lock.h>
+#include <linux/usb.h>
+#include <linux/uaccess.h>
+
+#include <linux/i2c.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/poll.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+#if (!defined(__OLD_VIDIOC_))
+#define __OLD_VIDIOC_
+#endif /* !defined(__OLD_VIDIOC_) */
+
+#include <media/v4l2-dev.h>
+
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+#include <media/v4l2-device.h>
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+
+#if (!defined(__OLD_VIDIOC_))
+#define __OLD_VIDIOC_
+#endif /* !defined(__OLD_VIDIOC_) */
+#include <linux/videodev2.h>
+
+#include <linux/soundcard.h>
+
+#if defined(EASYCAP_NEEDS_USBVIDEO_H)
+#include <config/video/usbvideo.h>
+#endif /*EASYCAP_NEEDS_USBVIDEO_H*/
+
+#if (!defined(PAGE_SIZE))
+#error "PAGE_SIZE not defined"
+#endif
+
+#define STRINGIZE_AGAIN(x) #x
+#define STRINGIZE(x) STRINGIZE_AGAIN(x)
+
+/*---------------------------------------------------------------------------*/
+/* VENDOR, PRODUCT: Syntek Semiconductor Co., Ltd
+ *
+ * EITHER EasyCAP USB 2.0 Video Adapter with Audio, Model No. DC60
+ * with input cabling: AUDIO(L), AUDIO(R), CVBS, S-VIDEO.
+ *
+ * OR EasyCAP 4CHANNEL USB 2.0 DVR, Model No. EasyCAP002
+ * with input cabling: MICROPHONE, CVBS1, CVBS2, CVBS3, CVBS4.
+ */
+/*---------------------------------------------------------------------------*/
+#define USB_EASYCAP_VENDOR_ID 0x05e1
+#define USB_EASYCAP_PRODUCT_ID 0x0408
+
+#define EASYCAP_DRIVER_VERSION "0.8.21"
+#define EASYCAP_DRIVER_DESCRIPTION "easycapdc60"
+
+#define USB_SKEL_MINOR_BASE 192
+#define VIDEO_DEVICE_MANY 8
+
+/*---------------------------------------------------------------------------*/
+/*
+ * DEFAULT LUMINANCE, CONTRAST, SATURATION AND HUE
+ */
+/*---------------------------------------------------------------------------*/
+#define SAA_0A_DEFAULT 0x7F
+#define SAA_0B_DEFAULT 0x3F
+#define SAA_0C_DEFAULT 0x2F
+#define SAA_0D_DEFAULT 0x00
+/*---------------------------------------------------------------------------*/
+/*
+ * VIDEO STREAMING PARAMETERS:
+ * USB 2.0 PROVIDES FOR HIGH-BANDWIDTH ENDPOINTS WITH AN UPPER LIMIT
+ * OF 3072 BYTES PER MICROFRAME for wMaxPacketSize.
+ */
+/*---------------------------------------------------------------------------*/
+#define VIDEO_ISOC_BUFFER_MANY 16
+#define VIDEO_ISOC_ORDER 3
+#define VIDEO_ISOC_FRAMESPERDESC ((unsigned int) 1 << VIDEO_ISOC_ORDER)
+#define USB_2_0_MAXPACKETSIZE 3072
+#if (USB_2_0_MAXPACKETSIZE > PAGE_SIZE)
+#error video_isoc_buffer[.] will not be big enough
+#endif
+/*---------------------------------------------------------------------------*/
+/*
+ * VIDEO BUFFERS
+ */
+/*---------------------------------------------------------------------------*/
+#define FIELD_BUFFER_SIZE (203 * PAGE_SIZE)
+#define FRAME_BUFFER_SIZE (405 * PAGE_SIZE)
+#define FIELD_BUFFER_MANY 4
+#define FRAME_BUFFER_MANY 6
+/*---------------------------------------------------------------------------*/
+/*
+ * AUDIO STREAMING PARAMETERS
+ */
+/*---------------------------------------------------------------------------*/
+#define AUDIO_ISOC_BUFFER_MANY 16
+#define AUDIO_ISOC_ORDER 3
+#define AUDIO_ISOC_BUFFER_SIZE (PAGE_SIZE << AUDIO_ISOC_ORDER)
+/*---------------------------------------------------------------------------*/
+/*
+ * AUDIO BUFFERS
+ */
+/*---------------------------------------------------------------------------*/
+#define AUDIO_FRAGMENT_MANY 32
+/*---------------------------------------------------------------------------*/
+/*
+ * IT IS ESSENTIAL THAT EVEN-NUMBERED STANDARDS ARE 25 FRAMES PER SECOND,
+ * ODD-NUMBERED STANDARDS ARE 30 FRAMES PER SECOND.
+ * THE NUMBERING OF STANDARDS MUST NOT BE CHANGED WITHOUT DUE CARE. NOT
+ * ONLY MUST THE PARAMETER
+ * STANDARD_MANY
+ * BE CHANGED TO CORRESPOND TO THE NEW NUMBER OF STANDARDS, BUT ALSO THE
+ * NUMBERING MUST REMAIN AN UNBROKEN ASCENDING SEQUENCE: DUMMY STANDARDS
+ * MAY NEED TO BE ADDED. APPROPRIATE CHANGES WILL ALWAYS BE REQUIRED IN
+ * ROUTINE fillin_formats() AND POSSIBLY ELSEWHERE. BEWARE.
+ */
+/*---------------------------------------------------------------------------*/
+#define PAL_BGHIN 0
+#define PAL_Nc 2
+#define SECAM 4
+#define NTSC_N 6
+#define NTSC_N_443 8
+#define NTSC_M 1
+#define NTSC_443 3
+#define NTSC_M_JP 5
+#define PAL_60 7
+#define PAL_M 9
+#define STANDARD_MANY 10
+/*---------------------------------------------------------------------------*/
+/*
+ * ENUMS
+ */
+/*---------------------------------------------------------------------------*/
+enum {
+AT_720x576,
+AT_704x576,
+AT_640x480,
+AT_720x480,
+AT_360x288,
+AT_320x240,
+AT_360x240,
+RESOLUTION_MANY
+};
+enum {
+FMT_UYVY,
+FMT_YUY2,
+FMT_RGB24,
+FMT_RGB32,
+FMT_BGR24,
+FMT_BGR32,
+PIXELFORMAT_MANY
+};
+enum {
+FIELD_NONE,
+FIELD_INTERLACED,
+FIELD_ALTERNATE,
+INTERLACE_MANY
+};
+#define SETTINGS_MANY (STANDARD_MANY * \
+ RESOLUTION_MANY * \
+ 2 * \
+ PIXELFORMAT_MANY * \
+ INTERLACE_MANY)
+/*---------------------------------------------------------------------------*/
+/*
+ * STRUCTURE DEFINITIONS
+ */
+/*---------------------------------------------------------------------------*/
+struct data_buffer {
+struct list_head list_head;
+void *pgo;
+void *pto;
+__u16 kount;
+};
+/*---------------------------------------------------------------------------*/
+struct data_urb {
+struct list_head list_head;
+struct urb *purb;
+int isbuf;
+int length;
+};
+/*---------------------------------------------------------------------------*/
+struct easycap_standard {
+__u16 mask;
+struct v4l2_standard v4l2_standard;
+};
+struct easycap_format {
+__u16 mask;
+char name[128];
+struct v4l2_format v4l2_format;
+};
+/*---------------------------------------------------------------------------*/
+/*
+ * easycap.ilk == 0 => CVBS+S-VIDEO HARDWARE, AUDIO wMaxPacketSize=256
+ * easycap.ilk == 2 => CVBS+S-VIDEO HARDWARE, AUDIO wMaxPacketSize=9
+ * easycap.ilk == 3 => FOUR-CVBS HARDWARE, AUDIO wMaxPacketSize=9
+ */
+/*---------------------------------------------------------------------------*/
+struct easycap {
+unsigned int audio_pages_per_fragment;
+unsigned int audio_bytes_per_fragment;
+unsigned int audio_buffer_page_many;
+
+#define UPSAMPLE
+#if defined(UPSAMPLE)
+__s16 oldaudio;
+#endif /*UPSAMPLE*/
+
+struct easycap_format easycap_format[1 + SETTINGS_MANY];
+
+int ilk;
+bool microphone;
+
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+struct video_device *pvideo_device;
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+
+struct usb_device *pusb_device;
+struct usb_interface *pusb_interface;
+
+struct kref kref;
+
+struct mutex mutex_mmap_video[FRAME_BUFFER_MANY];
+struct mutex mutex_timeval0;
+struct mutex mutex_timeval1;
+
+int queued[FRAME_BUFFER_MANY];
+int done[FRAME_BUFFER_MANY];
+
+wait_queue_head_t wq_video;
+wait_queue_head_t wq_audio;
+
+int input;
+int polled;
+int standard_offset;
+int format_offset;
+
+int fps;
+int usec;
+int tolerate;
+int merit[180];
+
+struct timeval timeval0;
+struct timeval timeval1;
+struct timeval timeval2;
+struct timeval timeval7;
+long long int dnbydt;
+
+int video_interface;
+int video_altsetting_on;
+int video_altsetting_off;
+int video_endpointnumber;
+int video_isoc_maxframesize;
+int video_isoc_buffer_size;
+int video_isoc_framesperdesc;
+
+int video_isoc_streaming;
+int video_isoc_sequence;
+int video_idle;
+int video_eof;
+int video_junk;
+
+int fudge;
+
+struct data_buffer video_isoc_buffer[VIDEO_ISOC_BUFFER_MANY];
+struct data_buffer \
+ field_buffer[FIELD_BUFFER_MANY][(FIELD_BUFFER_SIZE/PAGE_SIZE)];
+struct data_buffer \
+ frame_buffer[FRAME_BUFFER_MANY][(FRAME_BUFFER_SIZE/PAGE_SIZE)];
+
+struct list_head urb_video_head;
+struct list_head *purb_video_head;
+
+int vma_many;
+
+/*---------------------------------------------------------------------------*/
+/*
+ * BUFFER INDICATORS
+ */
+/*---------------------------------------------------------------------------*/
+int field_fill; /* Field buffer being filled by easycap_complete(). */
+ /* Bumped only by easycap_complete(). */
+int field_page; /* Page of field buffer page being filled by */
+ /* easycap_complete(). */
+int field_read; /* Field buffer to be read by field2frame(). */
+ /* Bumped only by easycap_complete(). */
+int frame_fill; /* Frame buffer being filled by field2frame(). */
+ /* Bumped only by easycap_dqbuf() when */
+ /* field2frame() has created a complete frame. */
+int frame_read; /* Frame buffer offered to user by DQBUF. */
+ /* Set only by easycap_dqbuf() to trail frame_fill.*/
+int frame_lock; /* Flag set to 1 by DQBUF and cleared by QBUF */
+/*---------------------------------------------------------------------------*/
+/*
+ * IMAGE PROPERTIES
+ */
+/*---------------------------------------------------------------------------*/
+__u32 pixelformat;
+__u32 field;
+int width;
+int height;
+int bytesperpixel;
+bool byteswaporder;
+bool decimatepixel;
+bool offerfields;
+int frame_buffer_used;
+int frame_buffer_many;
+int videofieldamount;
+
+int brightness;
+int contrast;
+int saturation;
+int hue;
+
+int allocation_video_urb;
+int allocation_video_page;
+int allocation_video_struct;
+int registered_video;
+/*---------------------------------------------------------------------------*/
+/*
+ * SOUND PROPERTIES
+ */
+/*---------------------------------------------------------------------------*/
+int audio_interface;
+int audio_altsetting_on;
+int audio_altsetting_off;
+int audio_endpointnumber;
+int audio_isoc_maxframesize;
+int audio_isoc_buffer_size;
+int audio_isoc_framesperdesc;
+
+int audio_isoc_streaming;
+int audio_idle;
+int audio_eof;
+int volume;
+int mute;
+
+struct data_buffer audio_isoc_buffer[AUDIO_ISOC_BUFFER_MANY];
+
+struct list_head urb_audio_head;
+struct list_head *purb_audio_head;
+/*---------------------------------------------------------------------------*/
+/*
+ * BUFFER INDICATORS
+ */
+/*---------------------------------------------------------------------------*/
+int audio_fill; /* Audio buffer being filled by easysnd_complete(). */
+ /* Bumped only by easysnd_complete(). */
+int audio_read; /* Audio buffer page being read by easysnd_read(). */
+ /* Set by easysnd_read() to trail audio_fill by */
+ /* one fragment. */
+/*---------------------------------------------------------------------------*/
+/*
+ * SOUND PROPERTIES
+ */
+/*---------------------------------------------------------------------------*/
+
+int audio_buffer_many;
+
+int allocation_audio_urb;
+int allocation_audio_page;
+int allocation_audio_struct;
+int registered_audio;
+
+long long int audio_sample;
+long long int audio_niveau;
+long long int audio_square;
+
+struct data_buffer audio_buffer[];
+};
+/*---------------------------------------------------------------------------*/
+/*
+ * VIDEO FUNCTION PROTOTYPES
+ */
+/*---------------------------------------------------------------------------*/
+void easycap_complete(struct urb *);
+int easycap_open(struct inode *, struct file *);
+int easycap_release(struct inode *, struct file *);
+long easycap_ioctl(struct file *, unsigned int, unsigned long);
+
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+int easycap_open_noinode(struct file *);
+int easycap_release_noinode(struct file *);
+int videodev_release(struct video_device *);
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+
+unsigned int easycap_poll(struct file *, poll_table *);
+int easycap_mmap(struct file *, struct vm_area_struct *);
+int easycap_usb_probe(struct usb_interface *, \
+ const struct usb_device_id *);
+void easycap_usb_disconnect(struct usb_interface *);
+void easycap_delete(struct kref *);
+
+void easycap_vma_open(struct vm_area_struct *);
+void easycap_vma_close(struct vm_area_struct *);
+int easycap_vma_fault(struct vm_area_struct *, struct vm_fault *);
+int easycap_dqbuf(struct easycap *, int);
+int submit_video_urbs(struct easycap *);
+int kill_video_urbs(struct easycap *);
+int field2frame(struct easycap *);
+int redaub(struct easycap *, void *, void *, \
+ int, int, __u8, __u8, bool);
+void debrief(struct easycap *);
+void sayreadonly(struct easycap *);
+void easycap_testcard(struct easycap *, int);
+int explain_ioctl(__u32);
+int explain_cid(__u32);
+int fillin_formats(void);
+int adjust_standard(struct easycap *, v4l2_std_id);
+int adjust_format(struct easycap *, __u32, __u32, __u32, \
+ int, bool);
+int adjust_brightness(struct easycap *, int);
+int adjust_contrast(struct easycap *, int);
+int adjust_saturation(struct easycap *, int);
+int adjust_hue(struct easycap *, int);
+int adjust_volume(struct easycap *, int);
+/*---------------------------------------------------------------------------*/
+/*
+ * AUDIO FUNCTION PROTOTYPES
+ */
+/*---------------------------------------------------------------------------*/
+void easysnd_complete(struct urb *);
+ssize_t easysnd_read(struct file *, char __user *, size_t, loff_t *);
+int easysnd_open(struct inode *, struct file *);
+int easysnd_release(struct inode *, struct file *);
+long easysnd_ioctl(struct file *, unsigned int, unsigned long);
+unsigned int easysnd_poll(struct file *, poll_table *);
+void easysnd_delete(struct kref *);
+int submit_audio_urbs(struct easycap *);
+int kill_audio_urbs(struct easycap *);
+void easysnd_testtone(struct easycap *, int);
+int audio_setup(struct easycap *);
+/*---------------------------------------------------------------------------*/
+/*
+ * LOW-LEVEL FUNCTION PROTOTYPES
+ */
+/*---------------------------------------------------------------------------*/
+int audio_gainget(struct usb_device *);
+int audio_gainset(struct usb_device *, __s8);
+
+int set_interface(struct usb_device *, __u16);
+int wakeup_device(struct usb_device *);
+int confirm_resolution(struct usb_device *);
+int confirm_stream(struct usb_device *);
+
+int setup_stk(struct usb_device *);
+int setup_saa(struct usb_device *);
+int setup_vt(struct usb_device *);
+int check_stk(struct usb_device *);
+int check_saa(struct usb_device *);
+int ready_saa(struct usb_device *);
+int merit_saa(struct usb_device *);
+int check_vt(struct usb_device *);
+int select_input(struct usb_device *, int, int);
+int set_resolution(struct usb_device *, \
+ __u16, __u16, __u16, __u16);
+
+int read_saa(struct usb_device *, __u16);
+int read_stk(struct usb_device *, __u32);
+int write_saa(struct usb_device *, __u16, __u16);
+int wait_i2c(struct usb_device *);
+int write_000(struct usb_device *, __u16, __u16);
+int start_100(struct usb_device *);
+int stop_100(struct usb_device *);
+int write_300(struct usb_device *);
+int read_vt(struct usb_device *, __u16);
+int write_vt(struct usb_device *, __u16, __u16);
+
+int set2to78(struct usb_device *);
+int set2to93(struct usb_device *);
+
+int regset(struct usb_device *, __u16, __u16);
+int regget(struct usb_device *, __u16, void *);
+/*---------------------------------------------------------------------------*/
+struct signed_div_result {
+long long int quotient;
+unsigned long long int remainder;
+} signed_div(long long int, long long int);
+/*---------------------------------------------------------------------------*/
+/*
+ * MACROS
+ */
+/*---------------------------------------------------------------------------*/
+#define GET(X, Y, Z) do { \
+ int rc; \
+ *(Z) = (__u16)0; \
+ rc = regget(X, Y, Z); \
+ if (0 > rc) { \
+ JOT(8, ":-(%i\n", __LINE__); return(rc); \
+ } \
+} while (0)
+
+#define SET(X, Y, Z) do { \
+ int rc; \
+ rc = regset(X, Y, Z); \
+ if (0 > rc) { \
+ JOT(8, ":-(%i\n", __LINE__); return(rc); \
+ } \
+} while (0)
+/*---------------------------------------------------------------------------*/
+
+#define SAY(format, args...) do { \
+ printk(KERN_DEBUG "easycap: %s: " format, __func__, ##args); \
+} while (0)
+
+
+#if defined(EASYCAP_DEBUG)
+#define JOT(n, format, args...) do { \
+ if (n <= easycap_debug) { \
+ printk(KERN_DEBUG "easycap: %s: " format, __func__, ##args); \
+ } \
+} while (0)
+#else
+#define JOT(n, format, args...) do {} while (0)
+#endif /*EASYCAP_DEBUG*/
+
+#define POUT JOT(8, ":-(in file %s line %4i\n", __FILE__, __LINE__)
+
+#define MICROSECONDS(X, Y) \
+ ((1000000*((long long int)(X.tv_sec - Y.tv_sec))) + \
+ (long long int)(X.tv_usec - Y.tv_usec))
+
+/*---------------------------------------------------------------------------*/
+/*
+ * (unsigned char *)P pointer to next byte pair
+ * (long int *)X pointer to accumulating count
+ * (long int *)Y pointer to accumulating sum
+ * (long long int *)Z pointer to accumulating sum of squares
+ */
+/*---------------------------------------------------------------------------*/
+#define SUMMER(P, X, Y, Z) do { \
+ unsigned char *p; \
+ unsigned int u0, u1, u2; \
+ long int s; \
+ p = (unsigned char *)(P); \
+ u0 = (unsigned int) (*p); \
+ u1 = (unsigned int) (*(p + 1)); \
+ u2 = (unsigned int) ((u1 << 8) | u0); \
+ if (0x8000 & u2) \
+ s = -(long int)(0x7FFF & (~u2)); \
+ else \
+ s = (long int)(0x7FFF & u2); \
+ *((X)) += (long int) 1; \
+ *((Y)) += (long int) s; \
+ *((Z)) += ((long long int)(s) * (long long int)(s)); \
+} while (0)
+/*---------------------------------------------------------------------------*/
+
+#endif /*EASYCAP_H*/
diff --git a/drivers/staging/easycap/easycap_debug.h b/drivers/staging/easycap/easycap_debug.h
new file mode 100644
index 00000000000..1d10d7ea7d6
--- /dev/null
+++ b/drivers/staging/easycap/easycap_debug.h
@@ -0,0 +1,27 @@
+/*****************************************************************************
+* *
+* easycap_debug.h *
+* *
+*****************************************************************************/
+/*
+ *
+ * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
+ *
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+/*****************************************************************************/
+extern int easycap_debug;
diff --git a/drivers/staging/easycap/easycap_ioctl.c b/drivers/staging/easycap/easycap_ioctl.c
new file mode 100644
index 00000000000..9a42ae02cd5
--- /dev/null
+++ b/drivers/staging/easycap/easycap_ioctl.c
@@ -0,0 +1,2695 @@
+/******************************************************************************
+* *
+* easycap_ioctl.c *
+* *
+******************************************************************************/
+/*
+ *
+ * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
+ *
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+/*****************************************************************************/
+
+#include <linux/smp_lock.h>
+#include "easycap.h"
+#include "easycap_debug.h"
+#include "easycap_standard.h"
+#include "easycap_ioctl.h"
+
+/*--------------------------------------------------------------------------*/
+/*
+ * UNLESS THERE IS A PREMATURE ERROR RETURN THIS ROUTINE UPDATES THE
+ * FOLLOWING:
+ * peasycap->standard_offset
+ * peasycap->fps
+ * peasycap->usec
+ * peasycap->tolerate
+ */
+/*---------------------------------------------------------------------------*/
+int adjust_standard(struct easycap *peasycap, v4l2_std_id std_id)
+{
+struct easycap_standard const *peasycap_standard;
+__u16 reg, set;
+int ir, rc, need;
+unsigned int itwas, isnow;
+
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+peasycap_standard = &easycap_standard[0];
+while (0xFFFF != peasycap_standard->mask) {
+ if (std_id & peasycap_standard->v4l2_standard.id)
+ break;
+ peasycap_standard++;
+}
+if (0xFFFF == peasycap_standard->mask) {
+ SAY("ERROR: 0x%08X=std_id: standard not found\n", \
+ (unsigned int)std_id);
+ return -EINVAL;
+}
+SAY("user requests standard: %s\n", \
+ &(peasycap_standard->v4l2_standard.name[0]));
+if (peasycap->standard_offset == \
+ (int)(peasycap_standard - &easycap_standard[0])) {
+ SAY("requested standard already in effect\n");
+ return 0;
+}
+peasycap->standard_offset = (int)(peasycap_standard - &easycap_standard[0]);
+peasycap->fps = peasycap_standard->v4l2_standard.frameperiod.denominator / \
+ peasycap_standard->v4l2_standard.frameperiod.numerator;
+if (!peasycap->fps) {
+ SAY("MISTAKE: frames-per-second is zero\n");
+ return -EFAULT;
+}
+JOT(8, "%i frames-per-second\n", peasycap->fps);
+peasycap->usec = 1000000 / (2 * peasycap->fps);
+peasycap->tolerate = 1000 * (25 / peasycap->fps);
+
+kill_video_urbs(peasycap);
+
+/*--------------------------------------------------------------------------*/
+/*
+ * SAA7113H DATASHEET PAGE 44, TABLE 42
+ */
+/*--------------------------------------------------------------------------*/
+need = 0; itwas = 0; reg = 0x00; set = 0x00;
+switch (peasycap_standard->mask & 0x000F) {
+case NTSC_M_JP: {
+ reg = 0x0A; set = 0x95;
+ ir = read_saa(peasycap->pusb_device, reg);
+ if (0 > ir)
+ SAY("ERROR: cannot read SAA register 0x%02X\n", reg);
+ else
+ itwas = (unsigned int)ir;
+
+
+ set2to78(peasycap->pusb_device);
+
+
+ rc = write_saa(peasycap->pusb_device, reg, set);
+ if (0 != rc)
+ SAY("ERROR: failed to set SAA register " \
+ "0x%02X to 0x%02X for JP standard\n", reg, set);
+ else {
+ isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
+ if (0 > ir)
+ JOT(8, "SAA register 0x%02X changed " \
+ "to 0x%02X\n", reg, isnow);
+ else
+ JOT(8, "SAA register 0x%02X changed " \
+ "from 0x%02X to 0x%02X\n", reg, itwas, isnow);
+
+ set2to78(peasycap->pusb_device);
+
+ }
+
+ reg = 0x0B; set = 0x48;
+ ir = read_saa(peasycap->pusb_device, reg);
+ if (0 > ir)
+ SAY("ERROR: cannot read SAA register 0x%02X\n", reg);
+ else
+ itwas = (unsigned int)ir;
+
+ set2to78(peasycap->pusb_device);
+
+ rc = write_saa(peasycap->pusb_device, reg, set);
+ if (0 != rc)
+ SAY("ERROR: failed to set SAA register 0x%02X to 0x%02X " \
+ "for JP standard\n", reg, set);
+ else {
+ isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
+ if (0 > ir)
+ JOT(8, "SAA register 0x%02X changed " \
+ "to 0x%02X\n", reg, isnow);
+ else
+ JOT(8, "SAA register 0x%02X changed " \
+ "from 0x%02X to 0x%02X\n", reg, itwas, isnow);
+
+ set2to78(peasycap->pusb_device);
+
+ }
+/*--------------------------------------------------------------------------*/
+/*
+ * NOTE: NO break HERE: RUN ON TO NEXT CASE
+ */
+/*--------------------------------------------------------------------------*/
+}
+case NTSC_M:
+case PAL_BGHIN: {
+ reg = 0x0E; set = 0x01; need = 1; break;
+}
+case NTSC_N_443:
+case PAL_60: {
+ reg = 0x0E; set = 0x11; need = 1; break;
+}
+case NTSC_443:
+case PAL_Nc: {
+ reg = 0x0E; set = 0x21; need = 1; break;
+}
+case NTSC_N:
+case PAL_M: {
+ reg = 0x0E; set = 0x31; need = 1; break;
+}
+case SECAM: {
+ reg = 0x0E; set = 0x51; need = 1; break;
+}
+default:
+ break;
+}
+/*--------------------------------------------------------------------------*/
+if (need) {
+ ir = read_saa(peasycap->pusb_device, reg);
+ if (0 > ir)
+ SAY("ERROR: failed to read SAA register 0x%02X\n", reg);
+ else
+ itwas = (unsigned int)ir;
+
+ set2to78(peasycap->pusb_device);
+
+ rc = write_saa(peasycap->pusb_device, reg, set);
+ if (0 != write_saa(peasycap->pusb_device, reg, set)) {
+ SAY("ERROR: failed to set SAA register " \
+ "0x%02X to 0x%02X for table 42\n", reg, set);
+ } else {
+ isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
+ if (0 > ir)
+ JOT(8, "SAA register 0x%02X changed " \
+ "to 0x%02X\n", reg, isnow);
+ else
+ JOT(8, "SAA register 0x%02X changed " \
+ "from 0x%02X to 0x%02X\n", reg, itwas, isnow);
+ }
+}
+/*--------------------------------------------------------------------------*/
+/*
+ * SAA7113H DATASHEET PAGE 41
+ */
+/*--------------------------------------------------------------------------*/
+reg = 0x08;
+ir = read_saa(peasycap->pusb_device, reg);
+if (0 > ir)
+ SAY("ERROR: failed to read SAA register 0x%02X " \
+ "so cannot reset\n", reg);
+else {
+ itwas = (unsigned int)ir;
+ if (peasycap_standard->mask & 0x0001)
+ set = itwas | 0x40 ;
+ else
+ set = itwas & ~0x40 ;
+
+set2to78(peasycap->pusb_device);
+
+rc = write_saa(peasycap->pusb_device, reg, set);
+if (0 != rc)
+ SAY("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", reg, set);
+else {
+ isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
+ if (0 > ir)
+ JOT(8, "SAA register 0x%02X changed to 0x%02X\n", reg, isnow);
+ else
+ JOT(8, "SAA register 0x%02X changed " \
+ "from 0x%02X to 0x%02X\n", reg, itwas, isnow);
+ }
+}
+/*--------------------------------------------------------------------------*/
+/*
+ * SAA7113H DATASHEET PAGE 51, TABLE 57
+ */
+/*---------------------------------------------------------------------------*/
+reg = 0x40;
+ir = read_saa(peasycap->pusb_device, reg);
+if (0 > ir)
+ SAY("ERROR: failed to read SAA register 0x%02X " \
+ "so cannot reset\n", reg);
+else {
+ itwas = (unsigned int)ir;
+ if (peasycap_standard->mask & 0x0001)
+ set = itwas | 0x80 ;
+ else
+ set = itwas & ~0x80 ;
+
+set2to78(peasycap->pusb_device);
+
+rc = write_saa(peasycap->pusb_device, reg, set);
+if (0 != rc)
+ SAY("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", reg, set);
+else {
+ isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
+ if (0 > ir)
+ JOT(8, "SAA register 0x%02X changed to 0x%02X\n", reg, isnow);
+ else
+ JOT(8, "SAA register 0x%02X changed " \
+ "from 0x%02X to 0x%02X\n", reg, itwas, isnow);
+ }
+}
+/*--------------------------------------------------------------------------*/
+/*
+ * SAA7113H DATASHEET PAGE 53, TABLE 66
+ */
+/*--------------------------------------------------------------------------*/
+reg = 0x5A;
+ir = read_saa(peasycap->pusb_device, reg);
+if (0 > ir)
+ SAY("ERROR: failed to read SAA register 0x%02X but continuing\n", reg);
+ itwas = (unsigned int)ir;
+ if (peasycap_standard->mask & 0x0001)
+ set = 0x0A ;
+ else
+ set = 0x07 ;
+
+ set2to78(peasycap->pusb_device);
+
+ if (0 != write_saa(peasycap->pusb_device, reg, set))
+ SAY("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", \
+ reg, set);
+ else {
+ isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
+ if (0 > ir)
+ JOT(8, "SAA register 0x%02X changed "
+ "to 0x%02X\n", reg, isnow);
+ else
+ JOT(8, "SAA register 0x%02X changed "
+ "from 0x%02X to 0x%02X\n", reg, itwas, isnow);
+ }
+ if (0 != check_saa(peasycap->pusb_device))
+ SAY("ERROR: check_saa() failed\n");
+return 0;
+}
+/*****************************************************************************/
+/*--------------------------------------------------------------------------*/
+/*
+ * THE ALGORITHM FOR RESPONDING TO THE VIDIO_S_FMT IOCTL DEPENDS ON THE
+ * CURRENT VALUE OF peasycap->standard_offset.
+ * PROVIDED THE ARGUMENT try IS false AND THERE IS NO PREMATURE ERROR RETURN
+ * THIS ROUTINE UPDATES THE FOLLOWING:
+ * peasycap->format_offset
+ * peasycap->pixelformat
+ * peasycap->field
+ * peasycap->height
+ * peasycap->width
+ * peasycap->bytesperpixel
+ * peasycap->byteswaporder
+ * peasycap->decimatepixel
+ * peasycap->frame_buffer_used
+ * peasycap->videofieldamount
+ * peasycap->offerfields
+ *
+ * IF SUCCESSFUL THE FUNCTION RETURNS THE OFFSET IN easycap_format[]
+ * IDENTIFYING THE FORMAT WHICH IS TO RETURNED TO THE USER.
+ * ERRORS RETURN A NEGATIVE NUMBER.
+ */
+/*--------------------------------------------------------------------------*/
+int adjust_format(struct easycap *peasycap, \
+ __u32 width, __u32 height, __u32 pixelformat, int field, bool try)
+{
+struct easycap_format *peasycap_format, *peasycap_best_format;
+__u16 mask;
+struct usb_device *p;
+int miss, multiplier, best;
+char bf[5], *pc;
+__u32 uc;
+
+if ((struct easycap *)NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
+p = peasycap->pusb_device;
+if ((struct usb_device *)NULL == p) {
+ SAY("ERROR: peaycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+pc = &bf[0];
+uc = pixelformat; memcpy((void *)pc, (void *)(&uc), 4); bf[4] = 0;
+mask = easycap_standard[peasycap->standard_offset].mask;
+SAY("sought: %ix%i,%s(0x%08X),%i=field,0x%02X=std mask\n", \
+ width, height, pc, pixelformat, field, mask);
+if (V4L2_FIELD_ANY == field) {
+ field = V4L2_FIELD_INTERLACED;
+ SAY("prefer: V4L2_FIELD_INTERLACED=field, was V4L2_FIELD_ANY\n");
+}
+peasycap_best_format = (struct easycap_format *)NULL;
+peasycap_format = &easycap_format[0];
+while (0 != peasycap_format->v4l2_format.fmt.pix.width) {
+ JOT(16, ".> %i %i 0x%08X %ix%i\n", \
+ peasycap_format->mask & 0x01,
+ peasycap_format->v4l2_format.fmt.pix.field,
+ peasycap_format->v4l2_format.fmt.pix.pixelformat,
+ peasycap_format->v4l2_format.fmt.pix.width,
+ peasycap_format->v4l2_format.fmt.pix.height);
+
+ if (((peasycap_format->mask & 0x0F) == (mask & 0x0F)) && \
+ (peasycap_format->v4l2_format.fmt.pix.field == field) && \
+ (peasycap_format->v4l2_format.fmt.pix.pixelformat == \
+ pixelformat) && \
+ (peasycap_format->v4l2_format.fmt.pix.width == width) && \
+ (peasycap_format->v4l2_format.fmt.pix.height == height)) {
+ peasycap_best_format = peasycap_format;
+ break;
+ }
+ peasycap_format++;
+}
+if (0 == peasycap_format->v4l2_format.fmt.pix.width) {
+ SAY("cannot do: %ix%i with standard mask 0x%02X\n", \
+ width, height, mask);
+ peasycap_format = &easycap_format[0]; best = -1;
+ while (0 != peasycap_format->v4l2_format.fmt.pix.width) {
+ if (((peasycap_format->mask & 0x0F) == (mask & 0x0F)) && \
+ (peasycap_format->v4l2_format.fmt.pix\
+ .field == field) && \
+ (peasycap_format->v4l2_format.fmt.pix\
+ .pixelformat == pixelformat)) {
+ miss = abs(peasycap_format->\
+ v4l2_format.fmt.pix.width - width);
+ if ((best > miss) || (best < 0)) {
+ best = miss;
+ peasycap_best_format = peasycap_format;
+ if (!miss)
+ break;
+ }
+ }
+ peasycap_format++;
+ }
+ if (-1 == best) {
+ SAY("cannot do %ix... with standard mask 0x%02X\n", \
+ width, mask);
+ SAY("cannot do ...x%i with standard mask 0x%02X\n", \
+ height, mask);
+ SAY(" %ix%i unmatched\n", width, height);
+ return peasycap->format_offset;
+ }
+}
+if ((struct easycap_format *)NULL == peasycap_best_format) {
+ SAY("MISTAKE: peasycap_best_format is NULL");
+ return -EINVAL;
+}
+peasycap_format = peasycap_best_format;
+
+/*...........................................................................*/
+if (true == try)
+ return (int)(peasycap_best_format - &easycap_format[0]);
+/*...........................................................................*/
+
+if (false != try) {
+ SAY("MISTAKE: true==try where is should be false\n");
+ return -EINVAL;
+}
+SAY("actioning: %ix%i %s\n", \
+ peasycap_format->v4l2_format.fmt.pix.width, \
+ peasycap_format->v4l2_format.fmt.pix.height,
+ &peasycap_format->name[0]);
+peasycap->height = peasycap_format->v4l2_format.fmt.pix.height;
+peasycap->width = peasycap_format->v4l2_format.fmt.pix.width;
+peasycap->pixelformat = peasycap_format->v4l2_format.fmt.pix.pixelformat;
+peasycap->field = peasycap_format->v4l2_format.fmt.pix.field;
+peasycap->format_offset = (int)(peasycap_format - &easycap_format[0]);
+peasycap->bytesperpixel = (0x00F0 & peasycap_format->mask) >> 4 ;
+if (0x0100 & peasycap_format->mask)
+ peasycap->byteswaporder = true;
+else
+ peasycap->byteswaporder = false;
+if (0x0800 & peasycap_format->mask)
+ peasycap->decimatepixel = true;
+else
+ peasycap->decimatepixel = false;
+if (0x1000 & peasycap_format->mask)
+ peasycap->offerfields = true;
+else
+ peasycap->offerfields = false;
+if (true == peasycap->decimatepixel)
+ multiplier = 2;
+else
+ multiplier = 1;
+peasycap->videofieldamount = multiplier * peasycap->width * \
+ multiplier * peasycap->height;
+peasycap->frame_buffer_used = peasycap->bytesperpixel * \
+ peasycap->width * peasycap->height;
+
+if (true == peasycap->offerfields) {
+ SAY("WARNING: %i=peasycap->field is untested: " \
+ "please report problems\n", peasycap->field);
+
+
+/*
+ * FIXME ---- THIS IS UNTESTED, MAY BE (AND PROBABLY IS) INCORRECT:
+ *
+ * peasycap->frame_buffer_used = peasycap->frame_buffer_used / 2;
+ *
+ * SO DO NOT RISK IT YET.
+ *
+ */
+
+
+
+}
+
+kill_video_urbs(peasycap);
+
+/*---------------------------------------------------------------------------*/
+/*
+ * PAL
+ */
+/*---------------------------------------------------------------------------*/
+if (0 == (0x01 & peasycap_format->mask)) {
+ if (((720 == peasycap_format->v4l2_format.fmt.pix.width) && \
+ (576 == \
+ peasycap_format->v4l2_format.fmt.pix.height)) || \
+ ((360 == \
+ peasycap_format->v4l2_format.fmt.pix.width) && \
+ (288 == \
+ peasycap_format->v4l2_format.fmt.pix.height))) {
+ if (0 != set_resolution(p, 0x0000, 0x0001, 0x05A0, 0x0121)) {
+ SAY("ERROR: set_resolution() failed\n");
+ return -EINVAL;
+ }
+ } else if ((704 == peasycap_format->v4l2_format.fmt.pix.width) && \
+ (576 == peasycap_format->v4l2_format.fmt.pix.height)) {
+ if (0 != set_resolution(p, 0x0004, 0x0001, 0x0584, 0x0121)) {
+ SAY("ERROR: set_resolution() failed\n");
+ return -EINVAL;
+ }
+ } else if (((640 == peasycap_format->v4l2_format.fmt.pix.width) && \
+ (480 == \
+ peasycap_format->v4l2_format.fmt.pix.height)) || \
+ ((320 == \
+ peasycap_format->v4l2_format.fmt.pix.width) && \
+ (240 == \
+ peasycap_format->v4l2_format.fmt.pix.height))) {
+ if (0 != set_resolution(p, 0x0014, 0x0020, 0x0514, 0x0110)) {
+ SAY("ERROR: set_resolution() failed\n");
+ return -EINVAL;
+ }
+ } else {
+ SAY("MISTAKE: bad format, cannot set resolution\n");
+ return -EINVAL;
+ }
+/*---------------------------------------------------------------------------*/
+/*
+ * NTSC
+ */
+/*---------------------------------------------------------------------------*/
+} else {
+ if (((720 == peasycap_format->v4l2_format.fmt.pix.width) && \
+ (480 == \
+ peasycap_format->v4l2_format.fmt.pix.height)) || \
+ ((360 == \
+ peasycap_format->v4l2_format.fmt.pix.width) && \
+ (240 == \
+ peasycap_format->v4l2_format.fmt.pix.height))) {
+ if (0 != set_resolution(p, 0x0000, 0x0003, 0x05A0, 0x00F3)) {
+ SAY("ERROR: set_resolution() failed\n");
+ return -EINVAL;
+ }
+ } else if (((640 == peasycap_format->v4l2_format.fmt.pix.width) && \
+ (480 == \
+ peasycap_format->v4l2_format.fmt.pix.height)) || \
+ ((320 == \
+ peasycap_format->v4l2_format.fmt.pix.width) && \
+ (240 == \
+ peasycap_format->v4l2_format.fmt.pix.height))) {
+ if (0 != set_resolution(p, 0x0014, 0x0003, 0x0514, 0x00F3)) {
+ SAY("ERROR: set_resolution() failed\n");
+ return -EINVAL;
+ }
+ } else {
+ SAY("MISTAKE: bad format, cannot set resolution\n");
+ return -EINVAL;
+ }
+}
+/*---------------------------------------------------------------------------*/
+
+check_stk(peasycap->pusb_device);
+
+return (int)(peasycap_best_format - &easycap_format[0]);
+}
+/*****************************************************************************/
+int adjust_brightness(struct easycap *peasycap, int value)
+{
+unsigned int mood;
+int i1;
+
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+i1 = 0;
+while (0xFFFFFFFF != easycap_control[i1].id) {
+ if (V4L2_CID_BRIGHTNESS == easycap_control[i1].id) {
+ if ((easycap_control[i1].minimum > value) || \
+ (easycap_control[i1].maximum < value))
+ value = easycap_control[i1].default_value;
+ peasycap->brightness = value;
+ mood = 0x00FF & (unsigned int)peasycap->brightness;
+
+ set2to78(peasycap->pusb_device);
+
+ if (!write_saa(peasycap->pusb_device, 0x0A, mood)) {
+ SAY("adjusting brightness to 0x%02X\n", mood);
+ return 0;
+ } else {
+ SAY("WARNING: failed to adjust brightness " \
+ "to 0x%02X\n", mood);
+ return -ENOENT;
+ }
+
+ set2to78(peasycap->pusb_device);
+
+ break;
+ }
+ i1++;
+}
+SAY("WARNING: failed to adjust brightness: control not found\n");
+return -ENOENT;
+}
+/*****************************************************************************/
+int adjust_contrast(struct easycap *peasycap, int value)
+{
+unsigned int mood;
+int i1;
+
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+i1 = 0;
+while (0xFFFFFFFF != easycap_control[i1].id) {
+ if (V4L2_CID_CONTRAST == easycap_control[i1].id) {
+ if ((easycap_control[i1].minimum > value) || \
+ (easycap_control[i1].maximum < value))
+ value = easycap_control[i1].default_value;
+ peasycap->contrast = value;
+ mood = 0x00FF & (unsigned int) (peasycap->contrast - 128);
+
+ set2to78(peasycap->pusb_device);
+
+ if (!write_saa(peasycap->pusb_device, 0x0B, mood)) {
+ SAY("adjusting contrast to 0x%02X\n", mood);
+ return 0;
+ } else {
+ SAY("WARNING: failed to adjust contrast to " \
+ "0x%02X\n", mood);
+ return -ENOENT;
+ }
+
+ set2to78(peasycap->pusb_device);
+
+ break;
+ }
+ i1++;
+}
+SAY("WARNING: failed to adjust contrast: control not found\n");
+return -ENOENT;
+}
+/*****************************************************************************/
+int adjust_saturation(struct easycap *peasycap, int value)
+{
+unsigned int mood;
+int i1;
+
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+i1 = 0;
+while (0xFFFFFFFF != easycap_control[i1].id) {
+ if (V4L2_CID_SATURATION == easycap_control[i1].id) {
+ if ((easycap_control[i1].minimum > value) || \
+ (easycap_control[i1].maximum < value))
+ value = easycap_control[i1].default_value;
+ peasycap->saturation = value;
+ mood = 0x00FF & (unsigned int) (peasycap->saturation - 128);
+
+ set2to78(peasycap->pusb_device);
+
+ if (!write_saa(peasycap->pusb_device, 0x0C, mood)) {
+ SAY("adjusting saturation to 0x%02X\n", mood);
+ return 0;
+ } else {
+ SAY("WARNING: failed to adjust saturation to " \
+ "0x%02X\n", mood);
+ return -ENOENT;
+ }
+ break;
+
+ set2to78(peasycap->pusb_device);
+
+ }
+ i1++;
+}
+SAY("WARNING: failed to adjust saturation: control not found\n");
+return -ENOENT;
+}
+/*****************************************************************************/
+int adjust_hue(struct easycap *peasycap, int value)
+{
+unsigned int mood;
+int i1, i2;
+
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+i1 = 0;
+while (0xFFFFFFFF != easycap_control[i1].id) {
+ if (V4L2_CID_HUE == easycap_control[i1].id) {
+ if ((easycap_control[i1].minimum > value) || \
+ (easycap_control[i1].maximum < value))
+ value = easycap_control[i1].default_value;
+ peasycap->hue = value;
+ i2 = peasycap->hue - 128;
+ mood = 0x00FF & ((int) i2);
+
+ set2to78(peasycap->pusb_device);
+
+ if (!write_saa(peasycap->pusb_device, 0x0D, mood)) {
+ SAY("adjusting hue to 0x%02X\n", mood);
+ return 0;
+ } else {
+ SAY("WARNING: failed to adjust hue to 0x%02X\n", mood);
+ return -ENOENT;
+ }
+
+ set2to78(peasycap->pusb_device);
+
+ break;
+ }
+ i1++;
+}
+SAY("WARNING: failed to adjust hue: control not found\n");
+return -ENOENT;
+}
+/*****************************************************************************/
+int adjust_volume(struct easycap *peasycap, int value)
+{
+__s8 mood;
+int i1;
+
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+i1 = 0;
+while (0xFFFFFFFF != easycap_control[i1].id) {
+ if (V4L2_CID_AUDIO_VOLUME == easycap_control[i1].id) {
+ if ((easycap_control[i1].minimum > value) || \
+ (easycap_control[i1].maximum < value))
+ value = easycap_control[i1].default_value;
+ peasycap->volume = value;
+ mood = (16 > peasycap->volume) ? 16 : \
+ ((31 < peasycap->volume) ? 31 : \
+ (__s8) peasycap->volume);
+ if (!audio_gainset(peasycap->pusb_device, mood)) {
+ SAY("adjusting volume to 0x%01X\n", mood);
+ return 0;
+ } else {
+ SAY("WARNING: failed to adjust volume to " \
+ "0x%1X\n", mood);
+ return -ENOENT;
+ }
+ break;
+ }
+i1++;
+}
+SAY("WARNING: failed to adjust volume: control not found\n");
+return -ENOENT;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * AN ALTERNATIVE METHOD OF MUTING MIGHT SEEM TO BE:
+ * usb_set_interface(peasycap->pusb_device, \
+ * peasycap->audio_interface, \
+ * peasycap->audio_altsetting_off);
+ * HOWEVER, AFTER THIS COMMAND IS ISSUED ALL SUBSEQUENT URBS RECEIVE STATUS
+ * -ESHUTDOWN. THE HANDLER ROUTINE easysnd_complete() DECLINES TO RESUBMIT
+ * THE URB AND THE PIPELINE COLLAPSES IRRETRIEVABLY. BEWARE.
+ */
+/*---------------------------------------------------------------------------*/
+int adjust_mute(struct easycap *peasycap, int value)
+{
+int i1;
+
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+i1 = 0;
+while (0xFFFFFFFF != easycap_control[i1].id) {
+ if (V4L2_CID_AUDIO_MUTE == easycap_control[i1].id) {
+ peasycap->mute = value;
+ switch (peasycap->mute) {
+ case 1: {
+ peasycap->audio_idle = 1;
+ peasycap->timeval0.tv_sec = 0;
+ SAY("adjusting mute: %i=peasycap->audio_idle\n", \
+ peasycap->audio_idle);
+ return 0;
+ }
+ default: {
+ peasycap->audio_idle = 0;
+ SAY("adjusting mute: %i=peasycap->audio_idle\n", \
+ peasycap->audio_idle);
+ return 0;
+ }
+ }
+ break;
+ }
+ i1++;
+}
+SAY("WARNING: failed to adjust mute: control not found\n");
+return -ENOENT;
+}
+
+/*--------------------------------------------------------------------------*/
+static int easycap_ioctl_bkl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+static struct easycap *peasycap;
+static struct usb_device *p;
+static __u32 isequence;
+
+peasycap = file->private_data;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -1;
+}
+p = peasycap->pusb_device;
+if ((struct usb_device *)NULL == p) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * MOST OF THE VARIABLES DECLARED static IN THE case{} BLOCKS BELOW ARE SO
+ * DECLARED SIMPLY TO AVOID A COMPILER WARNING OF THE KIND:
+ * easycap_ioctl.c: warning:
+ * the frame size of ... bytes is larger than 1024 bytes
+ */
+/*---------------------------------------------------------------------------*/
+switch (cmd) {
+case VIDIOC_QUERYCAP: {
+ static struct v4l2_capability v4l2_capability;
+ static char version[16], *p1, *p2;
+ static int i, rc, k[3];
+ static long lng;
+
+ JOT(8, "VIDIOC_QUERYCAP\n");
+
+ if (16 <= strlen(EASYCAP_DRIVER_VERSION)) {
+ SAY("ERROR: bad driver version string\n"); return -EINVAL;
+ }
+ strcpy(&version[0], EASYCAP_DRIVER_VERSION);
+ for (i = 0; i < 3; i++)
+ k[i] = 0;
+ p2 = &version[0]; i = 0;
+ while (*p2) {
+ p1 = p2;
+ while (*p2 && ('.' != *p2))
+ p2++;
+ if (*p2)
+ *p2++ = 0;
+ if (3 > i) {
+ rc = (int) strict_strtol(p1, 10, &lng);
+ if (0 != rc) {
+ SAY("ERROR: %i=strict_strtol(%s,.,,)\n", \
+ rc, p1);
+ return -EINVAL;
+ }
+ k[i] = (int)lng;
+ }
+ i++;
+ }
+
+ memset(&v4l2_capability, 0, sizeof(struct v4l2_capability));
+ strlcpy(&v4l2_capability.driver[0], "easycap", \
+ sizeof(v4l2_capability.driver));
+
+ v4l2_capability.capabilities = \
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | \
+ V4L2_CAP_AUDIO | V4L2_CAP_READWRITE;
+
+ v4l2_capability.version = KERNEL_VERSION(k[0], k[1], k[2]);
+ JOT(8, "v4l2_capability.version=(%i,%i,%i)\n", k[0], k[1], k[2]);
+
+ strlcpy(&v4l2_capability.card[0], "EasyCAP DC60", \
+ sizeof(v4l2_capability.card));
+
+ if (usb_make_path(peasycap->pusb_device, &v4l2_capability.bus_info[0],\
+ sizeof(v4l2_capability.bus_info)) < 0) {
+ strlcpy(&v4l2_capability.bus_info[0], "EasyCAP bus_info", \
+ sizeof(v4l2_capability.bus_info));
+ JOT(8, "%s=v4l2_capability.bus_info\n", \
+ &v4l2_capability.bus_info[0]);
+ }
+ if (0 != copy_to_user((void __user *)arg, &v4l2_capability, \
+ sizeof(struct v4l2_capability))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_ENUMINPUT: {
+ static struct v4l2_input v4l2_input;
+ static __u32 index;
+
+ JOT(8, "VIDIOC_ENUMINPUT\n");
+
+ if (0 != copy_from_user(&v4l2_input, (void __user *)arg, \
+ sizeof(struct v4l2_input))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ index = v4l2_input.index;
+ memset(&v4l2_input, 0, sizeof(struct v4l2_input));
+
+ switch (index) {
+ case 0: {
+ v4l2_input.index = index;
+ strcpy(&v4l2_input.name[0], "CVBS0");
+ v4l2_input.type = V4L2_INPUT_TYPE_CAMERA;
+ v4l2_input.audioset = 0x01;
+ v4l2_input.tuner = 0;
+ v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
+ V4L2_STD_NTSC ;
+ v4l2_input.status = 0;
+ JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+ break;
+ }
+ case 1: {
+ v4l2_input.index = index;
+ strcpy(&v4l2_input.name[0], "CVBS1");
+ v4l2_input.type = V4L2_INPUT_TYPE_CAMERA;
+ v4l2_input.audioset = 0x01;
+ v4l2_input.tuner = 0;
+ v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
+ V4L2_STD_NTSC ;
+ v4l2_input.status = 0;
+ JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+ break;
+ }
+ case 2: {
+ v4l2_input.index = index;
+ strcpy(&v4l2_input.name[0], "CVBS2");
+ v4l2_input.type = V4L2_INPUT_TYPE_CAMERA;
+ v4l2_input.audioset = 0x01;
+ v4l2_input.tuner = 0;
+ v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
+ V4L2_STD_NTSC ;
+ v4l2_input.status = 0;
+ JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+ break;
+ }
+ case 3: {
+ v4l2_input.index = index;
+ strcpy(&v4l2_input.name[0], "CVBS3");
+ v4l2_input.type = V4L2_INPUT_TYPE_CAMERA;
+ v4l2_input.audioset = 0x01;
+ v4l2_input.tuner = 0;
+ v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
+ V4L2_STD_NTSC ;
+ v4l2_input.status = 0;
+ JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+ break;
+ }
+ case 4: {
+ v4l2_input.index = index;
+ strcpy(&v4l2_input.name[0], "CVBS4");
+ v4l2_input.type = V4L2_INPUT_TYPE_CAMERA;
+ v4l2_input.audioset = 0x01;
+ v4l2_input.tuner = 0;
+ v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
+ V4L2_STD_NTSC ;
+ v4l2_input.status = 0;
+ JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+ break;
+ }
+ case 5: {
+ v4l2_input.index = index;
+ strcpy(&v4l2_input.name[0], "S-VIDEO");
+ v4l2_input.type = V4L2_INPUT_TYPE_CAMERA;
+ v4l2_input.audioset = 0x01;
+ v4l2_input.tuner = 0;
+ v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
+ V4L2_STD_NTSC ;
+ v4l2_input.status = 0;
+ JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+ break;
+ }
+ default: {
+ JOT(8, "%i=index: exhausts inputs\n", index);
+ return -EINVAL;
+ }
+ }
+
+ if (0 != copy_to_user((void __user *)arg, &v4l2_input, \
+ sizeof(struct v4l2_input))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_G_INPUT: {
+ static __u32 index;
+
+ JOT(8, "VIDIOC_G_INPUT\n");
+ index = (__u32)peasycap->input;
+ JOT(8, "user is told: %i\n", index);
+ if (0 != copy_to_user((void __user *)arg, &index, sizeof(__u32))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_S_INPUT:
+ {
+ static __u32 index;
+
+ JOT(8, "VIDIOC_S_INPUT\n");
+
+ if (0 != copy_from_user(&index, (void __user *)arg, sizeof(__u32))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ JOT(8, "user requests input %i\n", index);
+
+ if ((int)index == peasycap->input) {
+ SAY("requested input already in effect\n");
+ break;
+ }
+
+ if ((0 > index) || (5 < index)) {
+ JOT(8, "ERROR: bad requested input: %i\n", index);
+ return -EINVAL;
+ }
+ peasycap->input = (int)index;
+
+ select_input(peasycap->pusb_device, peasycap->input, 9);
+
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_ENUMAUDIO: {
+ JOT(8, "VIDIOC_ENUMAUDIO\n");
+ return -EINVAL;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_ENUMAUDOUT: {
+ static struct v4l2_audioout v4l2_audioout;
+
+ JOT(8, "VIDIOC_ENUMAUDOUT\n");
+
+ if (0 != copy_from_user(&v4l2_audioout, (void __user *)arg, \
+ sizeof(struct v4l2_audioout))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ if (0 != v4l2_audioout.index)
+ return -EINVAL;
+ memset(&v4l2_audioout, 0, sizeof(struct v4l2_audioout));
+ v4l2_audioout.index = 0;
+ strcpy(&v4l2_audioout.name[0], "Soundtrack");
+
+ if (0 != copy_to_user((void __user *)arg, &v4l2_audioout, \
+ sizeof(struct v4l2_audioout))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_QUERYCTRL: {
+ static int i1;
+ static struct v4l2_queryctrl v4l2_queryctrl;
+
+ JOT(8, "VIDIOC_QUERYCTRL\n");
+
+ if (0 != copy_from_user(&v4l2_queryctrl, (void __user *)arg, \
+ sizeof(struct v4l2_queryctrl))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ i1 = 0;
+ while (0xFFFFFFFF != easycap_control[i1].id) {
+ if (easycap_control[i1].id == v4l2_queryctrl.id) {
+ JOT(8, "VIDIOC_QUERYCTRL %s=easycap_control[%i]" \
+ ".name\n", &easycap_control[i1].name[0], i1);
+ memcpy(&v4l2_queryctrl, &easycap_control[i1], \
+ sizeof(struct v4l2_queryctrl));
+ break;
+ }
+ i1++;
+ }
+ if (0xFFFFFFFF == easycap_control[i1].id) {
+ JOT(8, "%i=index: exhausts controls\n", i1);
+ return -EINVAL;
+ }
+ if (0 != copy_to_user((void __user *)arg, &v4l2_queryctrl, \
+ sizeof(struct v4l2_queryctrl))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_QUERYMENU: {
+ JOT(8, "VIDIOC_QUERYMENU unsupported\n");
+ return -EINVAL;
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_G_CTRL: {
+ static struct v4l2_control v4l2_control;
+
+ JOT(8, "VIDIOC_G_CTRL\n");
+
+ if (0 != copy_from_user(&v4l2_control, (void __user *)arg, \
+ sizeof(struct v4l2_control))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ switch (v4l2_control.id) {
+ case V4L2_CID_BRIGHTNESS: {
+ v4l2_control.value = peasycap->brightness;
+ JOT(8, "user enquires brightness: %i\n", v4l2_control.value);
+ break;
+ }
+ case V4L2_CID_CONTRAST: {
+ v4l2_control.value = peasycap->contrast;
+ JOT(8, "user enquires contrast: %i\n", v4l2_control.value);
+ break;
+ }
+ case V4L2_CID_SATURATION: {
+ v4l2_control.value = peasycap->saturation;
+ JOT(8, "user enquires saturation: %i\n", v4l2_control.value);
+ break;
+ }
+ case V4L2_CID_HUE: {
+ v4l2_control.value = peasycap->hue;
+ JOT(8, "user enquires hue: %i\n", v4l2_control.value);
+ break;
+ }
+ case V4L2_CID_AUDIO_VOLUME: {
+ v4l2_control.value = peasycap->volume;
+ JOT(8, "user enquires volume: %i\n", v4l2_control.value);
+ break;
+ }
+ case V4L2_CID_AUDIO_MUTE: {
+ if (1 == peasycap->mute)
+ v4l2_control.value = true;
+ else
+ v4l2_control.value = false;
+ JOT(8, "user enquires mute: %i\n", v4l2_control.value);
+ break;
+ }
+ default: {
+ SAY("ERROR: unknown V4L2 control: 0x%08X=id\n", \
+ v4l2_control.id);
+ explain_cid(v4l2_control.id);
+ return -EINVAL;
+ }
+ }
+ if (0 != copy_to_user((void __user *)arg, &v4l2_control, \
+ sizeof(struct v4l2_control))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+#if defined(VIDIOC_S_CTRL_OLD)
+case VIDIOC_S_CTRL_OLD: {
+ JOT(8, "VIDIOC_S_CTRL_OLD required at least for xawtv\n");
+}
+#endif /*VIDIOC_S_CTRL_OLD*/
+case VIDIOC_S_CTRL:
+ {
+ static struct v4l2_control v4l2_control;
+
+ JOT(8, "VIDIOC_S_CTRL\n");
+
+ if (0 != copy_from_user(&v4l2_control, (void __user *)arg, \
+ sizeof(struct v4l2_control))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ switch (v4l2_control.id) {
+ case V4L2_CID_BRIGHTNESS: {
+ JOT(8, "user requests brightness %i\n", v4l2_control.value);
+ if (0 != adjust_brightness(peasycap, v4l2_control.value))
+ ;
+ break;
+ }
+ case V4L2_CID_CONTRAST: {
+ JOT(8, "user requests contrast %i\n", v4l2_control.value);
+ if (0 != adjust_contrast(peasycap, v4l2_control.value))
+ ;
+ break;
+ }
+ case V4L2_CID_SATURATION: {
+ JOT(8, "user requests saturation %i\n", v4l2_control.value);
+ if (0 != adjust_saturation(peasycap, v4l2_control.value))
+ ;
+ break;
+ }
+ case V4L2_CID_HUE: {
+ JOT(8, "user requests hue %i\n", v4l2_control.value);
+ if (0 != adjust_hue(peasycap, v4l2_control.value))
+ ;
+ break;
+ }
+ case V4L2_CID_AUDIO_VOLUME: {
+ JOT(8, "user requests volume %i\n", v4l2_control.value);
+ if (0 != adjust_volume(peasycap, v4l2_control.value))
+ ;
+ break;
+ }
+ case V4L2_CID_AUDIO_MUTE: {
+ int mute;
+
+ JOT(8, "user requests mute %i\n", v4l2_control.value);
+ if (true == v4l2_control.value)
+ mute = 1;
+ else
+ mute = 0;
+
+ if (0 != adjust_mute(peasycap, mute))
+ SAY("WARNING: failed to adjust mute to %i\n", mute);
+ break;
+ }
+ default: {
+ SAY("ERROR: unknown V4L2 control: 0x%08X=id\n", \
+ v4l2_control.id);
+ explain_cid(v4l2_control.id);
+ return -EINVAL;
+ }
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_S_EXT_CTRLS: {
+ JOT(8, "VIDIOC_S_EXT_CTRLS unsupported\n");
+ return -EINVAL;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_ENUM_FMT: {
+ static __u32 index;
+ static struct v4l2_fmtdesc v4l2_fmtdesc;
+
+ JOT(8, "VIDIOC_ENUM_FMT\n");
+
+ if (0 != copy_from_user(&v4l2_fmtdesc, (void __user *)arg, \
+ sizeof(struct v4l2_fmtdesc))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ index = v4l2_fmtdesc.index;
+ memset(&v4l2_fmtdesc, 0, sizeof(struct v4l2_fmtdesc));
+
+ v4l2_fmtdesc.index = index;
+ v4l2_fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ switch (index) {
+ case 0: {
+ v4l2_fmtdesc.flags = 0;
+ strcpy(&v4l2_fmtdesc.description[0], "uyvy");
+ v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_UYVY;
+ JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+ break;
+ }
+ case 1: {
+ v4l2_fmtdesc.flags = 0;
+ strcpy(&v4l2_fmtdesc.description[0], "yuy2");
+ v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_YUYV;
+ JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+ break;
+ }
+ case 2: {
+ v4l2_fmtdesc.flags = 0;
+ strcpy(&v4l2_fmtdesc.description[0], "rgb24");
+ v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_RGB24;
+ JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+ break;
+ }
+ case 3: {
+ v4l2_fmtdesc.flags = 0;
+ strcpy(&v4l2_fmtdesc.description[0], "rgb32");
+ v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_RGB32;
+ JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+ break;
+ }
+ case 4: {
+ v4l2_fmtdesc.flags = 0;
+ strcpy(&v4l2_fmtdesc.description[0], "bgr24");
+ v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_BGR24;
+ JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+ break;
+ }
+ case 5: {
+ v4l2_fmtdesc.flags = 0;
+ strcpy(&v4l2_fmtdesc.description[0], "bgr32");
+ v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_BGR32;
+ JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+ break;
+ }
+ default: {
+ JOT(8, "%i=index: exhausts formats\n", index);
+ return -EINVAL;
+ }
+ }
+ if (0 != copy_to_user((void __user *)arg, &v4l2_fmtdesc, \
+ sizeof(struct v4l2_fmtdesc))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_ENUM_FRAMESIZES: {
+ JOT(8, "VIDIOC_ENUM_FRAMESIZES unsupported\n");
+ return -EINVAL;
+}
+case VIDIOC_ENUM_FRAMEINTERVALS: {
+ JOT(8, "VIDIOC_ENUM_FRAME_INTERVALS unsupported\n");
+ return -EINVAL;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_G_FMT: {
+ static struct v4l2_format v4l2_format;
+ static struct v4l2_pix_format v4l2_pix_format;
+
+ JOT(8, "VIDIOC_G_FMT\n");
+
+ if (0 != copy_from_user(&v4l2_format, (void __user *)arg, \
+ sizeof(struct v4l2_format))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ if (v4l2_format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ POUT;
+ return -EINVAL;
+ }
+
+ memset(&v4l2_pix_format, 0, sizeof(struct v4l2_pix_format));
+ v4l2_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ memcpy(&(v4l2_format.fmt.pix), \
+ &(easycap_format[peasycap->format_offset]\
+ .v4l2_format.fmt.pix), sizeof(v4l2_pix_format));
+ JOT(8, "user is told: %s\n", \
+ &easycap_format[peasycap->format_offset].name[0]);
+
+ if (0 != copy_to_user((void __user *)arg, &v4l2_format, \
+ sizeof(struct v4l2_format))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_TRY_FMT:
+case VIDIOC_S_FMT: {
+ static struct v4l2_format v4l2_format;
+ static struct v4l2_pix_format v4l2_pix_format;
+ static bool try;
+ static int best_format;
+
+ if (VIDIOC_TRY_FMT == cmd) {
+ JOT(8, "VIDIOC_TRY_FMT\n");
+ try = true;
+ } else {
+ JOT(8, "VIDIOC_S_FMT\n");
+ try = false;
+ }
+
+ if (0 != copy_from_user(&v4l2_format, (void __user *)arg, \
+ sizeof(struct v4l2_format))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ best_format = adjust_format(peasycap, \
+ v4l2_format.fmt.pix.width, \
+ v4l2_format.fmt.pix.height, \
+ v4l2_format.fmt.pix.pixelformat, \
+ v4l2_format.fmt.pix.field, \
+ try);
+ if (0 > best_format) {
+ JOT(8, "WARNING: adjust_format() returned %i\n", best_format);
+ return -ENOENT;
+ }
+/*...........................................................................*/
+ memset(&v4l2_pix_format, 0, sizeof(struct v4l2_pix_format));
+ v4l2_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ memcpy(&(v4l2_format.fmt.pix), &(easycap_format[best_format]\
+ .v4l2_format.fmt.pix), sizeof(v4l2_pix_format));
+ JOT(8, "user is told: %s\n", &easycap_format[best_format].name[0]);
+
+ if (0 != copy_to_user((void __user *)arg, &v4l2_format, \
+ sizeof(struct v4l2_format))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_CROPCAP: {
+ static struct v4l2_cropcap v4l2_cropcap;
+
+ JOT(8, "VIDIOC_CROPCAP\n");
+
+ if (0 != copy_from_user(&v4l2_cropcap, (void __user *)arg, \
+ sizeof(struct v4l2_cropcap))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ if (v4l2_cropcap.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ JOT(8, "v4l2_cropcap.type != V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
+
+ memset(&v4l2_cropcap, 0, sizeof(struct v4l2_cropcap));
+ v4l2_cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ v4l2_cropcap.bounds.left = 0;
+ v4l2_cropcap.bounds.top = 0;
+ v4l2_cropcap.bounds.width = peasycap->width;
+ v4l2_cropcap.bounds.height = peasycap->height;
+ v4l2_cropcap.defrect.left = 0;
+ v4l2_cropcap.defrect.top = 0;
+ v4l2_cropcap.defrect.width = peasycap->width;
+ v4l2_cropcap.defrect.height = peasycap->height;
+ v4l2_cropcap.pixelaspect.numerator = 1;
+ v4l2_cropcap.pixelaspect.denominator = 1;
+
+ JOT(8, "user is told: %ix%i\n", peasycap->width, peasycap->height);
+
+ if (0 != copy_to_user((void __user *)arg, &v4l2_cropcap, \
+ sizeof(struct v4l2_cropcap))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_G_CROP:
+case VIDIOC_S_CROP: {
+ JOT(8, "VIDIOC_G_CROP|VIDIOC_S_CROP unsupported\n");
+ return -EINVAL;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_QUERYSTD: {
+ JOT(8, "VIDIOC_QUERYSTD: " \
+ "EasyCAP is incapable of detecting standard\n");
+ return -EINVAL;
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+/*---------------------------------------------------------------------------*/
+/*
+ * THE MANIPULATIONS INVOLVING last0,last1,last2,last3 CONSTITUTE A WORKAROUND
+ * FOR WHAT APPEARS TO BE A BUG IN 64-BIT mplayer.
+ * NOT NEEDED, BUT HOPEFULLY HARMLESS, FOR 32-BIT mplayer.
+ */
+/*---------------------------------------------------------------------------*/
+case VIDIOC_ENUMSTD: {
+ static int last0 = -1, last1 = -1, last2 = -1, last3 = -1;
+ static struct v4l2_standard v4l2_standard;
+ static __u32 index;
+ static struct easycap_standard const *peasycap_standard;
+
+ JOT(8, "VIDIOC_ENUMSTD\n");
+
+ if (0 != copy_from_user(&v4l2_standard, (void __user *)arg, \
+ sizeof(struct v4l2_standard))) {
+ POUT;
+ return -EFAULT;
+ }
+ index = v4l2_standard.index;
+
+ last3 = last2; last2 = last1; last1 = last0; last0 = index;
+ if ((index == last3) && (index == last2) && \
+ (index == last1) && (index == last0)) {
+ index++;
+ last3 = last2; last2 = last1; last1 = last0; last0 = index;
+ }
+
+ memset(&v4l2_standard, 0, sizeof(struct v4l2_standard));
+
+ peasycap_standard = &easycap_standard[0];
+ while (0xFFFF != peasycap_standard->mask) {
+ if ((int)(peasycap_standard - &easycap_standard[0]) == index)
+ break;
+ peasycap_standard++;
+ }
+ if (0xFFFF == peasycap_standard->mask) {
+ JOT(8, "%i=index: exhausts standards\n", index);
+ return -EINVAL;
+ }
+ JOT(8, "%i=index: %s\n", index, \
+ &(peasycap_standard->v4l2_standard.name[0]));
+ memcpy(&v4l2_standard, &(peasycap_standard->v4l2_standard), \
+ sizeof(struct v4l2_standard));
+
+ v4l2_standard.index = index;
+
+ if (0 != copy_to_user((void __user *)arg, &v4l2_standard, \
+ sizeof(struct v4l2_standard))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_G_STD: {
+ static v4l2_std_id std_id;
+ static struct easycap_standard const *peasycap_standard;
+
+ JOT(8, "VIDIOC_G_STD\n");
+
+ if (0 != copy_from_user(&std_id, (void __user *)arg, \
+ sizeof(v4l2_std_id))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ peasycap_standard = &easycap_standard[peasycap->standard_offset];
+ std_id = peasycap_standard->v4l2_standard.id;
+
+ JOT(8, "user is told: %s\n", \
+ &peasycap_standard->v4l2_standard.name[0]);
+
+ if (0 != copy_to_user((void __user *)arg, &std_id, \
+ sizeof(v4l2_std_id))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_S_STD: {
+ static v4l2_std_id std_id;
+ static int rc;
+
+ JOT(8, "VIDIOC_S_STD\n");
+
+ if (0 != copy_from_user(&std_id, (void __user *)arg, \
+ sizeof(v4l2_std_id))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ rc = adjust_standard(peasycap, std_id);
+ if (0 > rc) {
+ JOT(8, "WARNING: adjust_standard() returned %i\n", rc);
+ return -ENOENT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_REQBUFS: {
+ static int nbuffers;
+ static struct v4l2_requestbuffers v4l2_requestbuffers;
+
+ JOT(8, "VIDIOC_REQBUFS\n");
+
+ if (0 != copy_from_user(&v4l2_requestbuffers, (void __user *)arg, \
+ sizeof(struct v4l2_requestbuffers))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ if (v4l2_requestbuffers.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (v4l2_requestbuffers.memory != V4L2_MEMORY_MMAP) {
+ POUT;
+ return -EINVAL;
+ }
+ nbuffers = v4l2_requestbuffers.count;
+ JOT(8, " User requests %i buffers ...\n", nbuffers);
+ if (nbuffers < 2)
+ nbuffers = 2;
+ if (nbuffers > FRAME_BUFFER_MANY)
+ nbuffers = FRAME_BUFFER_MANY;
+ if (v4l2_requestbuffers.count == nbuffers) {
+ JOT(8, " ... agree to %i buffers\n", \
+ nbuffers);
+ } else {
+ JOT(8, " ... insist on %i buffers\n", \
+ nbuffers);
+ v4l2_requestbuffers.count = nbuffers;
+ }
+ peasycap->frame_buffer_many = nbuffers;
+
+ if (0 != copy_to_user((void __user *)arg, &v4l2_requestbuffers, \
+ sizeof(struct v4l2_requestbuffers))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_QUERYBUF: {
+ static __u32 index;
+ static struct v4l2_buffer v4l2_buffer;
+
+ JOT(8, "VIDIOC_QUERYBUF\n");
+
+ if (peasycap->video_eof) {
+ JOT(8, "returning -1 because %i=video_eof\n", \
+ peasycap->video_eof);
+ return -1;
+ }
+
+ if (0 != copy_from_user(&v4l2_buffer, (void __user *)arg, \
+ sizeof(struct v4l2_buffer))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ index = v4l2_buffer.index;
+ if (index < 0 || index >= peasycap->frame_buffer_many)
+ return -EINVAL;
+ memset(&v4l2_buffer, 0, sizeof(struct v4l2_buffer));
+ v4l2_buffer.index = index;
+ v4l2_buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ v4l2_buffer.bytesused = peasycap->frame_buffer_used;
+ v4l2_buffer.flags = V4L2_BUF_FLAG_MAPPED | \
+ peasycap->done[index] | \
+ peasycap->queued[index];
+ v4l2_buffer.field = peasycap->field;
+ v4l2_buffer.memory = V4L2_MEMORY_MMAP;
+ v4l2_buffer.m.offset = index * FRAME_BUFFER_SIZE;
+ v4l2_buffer.length = FRAME_BUFFER_SIZE;
+
+ JOT(16, " %10i=index\n", v4l2_buffer.index);
+ JOT(16, " 0x%08X=type\n", v4l2_buffer.type);
+ JOT(16, " %10i=bytesused\n", v4l2_buffer.bytesused);
+ JOT(16, " 0x%08X=flags\n", v4l2_buffer.flags);
+ JOT(16, " %10i=field\n", v4l2_buffer.field);
+ JOT(16, " %10li=timestamp.tv_usec\n", \
+ (long)v4l2_buffer.timestamp.tv_usec);
+ JOT(16, " %10i=sequence\n", v4l2_buffer.sequence);
+ JOT(16, " 0x%08X=memory\n", v4l2_buffer.memory);
+ JOT(16, " %10i=m.offset\n", v4l2_buffer.m.offset);
+ JOT(16, " %10i=length\n", v4l2_buffer.length);
+
+ if (0 != copy_to_user((void __user *)arg, &v4l2_buffer, \
+ sizeof(struct v4l2_buffer))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_QBUF: {
+ static struct v4l2_buffer v4l2_buffer;
+
+ JOT(8, "VIDIOC_QBUF\n");
+
+ if (0 != copy_from_user(&v4l2_buffer, (void __user *)arg, \
+ sizeof(struct v4l2_buffer))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (v4l2_buffer.memory != V4L2_MEMORY_MMAP)
+ return -EINVAL;
+ if (v4l2_buffer.index < 0 || \
+ (v4l2_buffer.index >= peasycap->frame_buffer_many))
+ return -EINVAL;
+ v4l2_buffer.flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED;
+
+ peasycap->done[v4l2_buffer.index] = 0;
+ peasycap->queued[v4l2_buffer.index] = V4L2_BUF_FLAG_QUEUED;
+
+ if (0 != copy_to_user((void __user *)arg, &v4l2_buffer, \
+ sizeof(struct v4l2_buffer))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ JOT(8, "..... user queueing frame buffer %i\n", \
+ (int)v4l2_buffer.index);
+
+ peasycap->frame_lock = 0;
+
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_DQBUF:
+ {
+#if defined(AUDIOTIME)
+ static struct signed_div_result sdr;
+ static long long int above, below, dnbydt, fudge, sll;
+ static unsigned long long int ull;
+ static struct timeval timeval0;
+ struct timeval timeval1;
+#endif /*AUDIOTIME*/
+ static struct timeval timeval, timeval2;
+ static int i, j;
+ static struct v4l2_buffer v4l2_buffer;
+
+ JOT(8, "VIDIOC_DQBUF\n");
+
+ if ((peasycap->video_idle) || (peasycap->video_eof)) {
+ JOT(8, "returning -EIO because " \
+ "%i=video_idle %i=video_eof\n", \
+ peasycap->video_idle, peasycap->video_eof);
+ return -EIO;
+ }
+
+ if (0 != copy_from_user(&v4l2_buffer, (void __user *)arg, \
+ sizeof(struct v4l2_buffer))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (!peasycap->video_isoc_streaming) {
+ JOT(16, "returning -EIO because video urbs not streaming\n");
+ return -EIO;
+ }
+/*---------------------------------------------------------------------------*/
+/*
+ * IF THE USER HAS PREVIOUSLY CALLED easycap_poll(), AS DETERMINED BY FINDING
+ * THE FLAG peasycap->polled SET, THERE MUST BE NO FURTHER WAIT HERE. IN THIS
+ * CASE, JUST CHOOSE THE FRAME INDICATED BY peasycap->frame_read
+ */
+/*---------------------------------------------------------------------------*/
+
+ if (!peasycap->polled) {
+ if (-EIO == easycap_dqbuf(peasycap, 0))
+ return -EIO;
+ } else {
+ if (peasycap->video_eof)
+ return -EIO;
+ }
+ if (V4L2_BUF_FLAG_DONE != peasycap->done[peasycap->frame_read]) {
+ SAY("ERROR: V4L2_BUF_FLAG_DONE != 0x%08X\n", \
+ peasycap->done[peasycap->frame_read]);
+ }
+ peasycap->polled = 0;
+
+ if (!(isequence % 10)) {
+ for (i = 0; i < 179; i++)
+ peasycap->merit[i] = peasycap->merit[i+1];
+ peasycap->merit[179] = merit_saa(peasycap->pusb_device);
+ j = 0;
+ for (i = 0; i < 180; i++)
+ j += peasycap->merit[i];
+ if (90 < j) {
+ SAY("easycap driver shutting down " \
+ "on condition blue\n");
+ peasycap->video_eof = 1; peasycap->audio_eof = 1;
+ }
+ }
+
+ v4l2_buffer.index = peasycap->frame_read;
+ v4l2_buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ v4l2_buffer.bytesused = peasycap->frame_buffer_used;
+ v4l2_buffer.flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE;
+ v4l2_buffer.field = peasycap->field;
+ if (V4L2_FIELD_ALTERNATE == v4l2_buffer.field)
+ v4l2_buffer.field = \
+ 0x000F & (peasycap->\
+ frame_buffer[peasycap->frame_read][0].kount);
+ do_gettimeofday(&timeval);
+ timeval2 = timeval;
+
+#if defined(AUDIOTIME)
+ if (!peasycap->timeval0.tv_sec) {
+ timeval0 = timeval;
+ timeval1 = timeval;
+ timeval2 = timeval;
+ dnbydt = 192000;
+
+ if (mutex_lock_interruptible(&(peasycap->mutex_timeval0)))
+ return -ERESTARTSYS;
+ peasycap->timeval0 = timeval0;
+ mutex_unlock(&(peasycap->mutex_timeval0));
+ } else {
+ if (mutex_lock_interruptible(&(peasycap->mutex_timeval1)))
+ return -ERESTARTSYS;
+ dnbydt = peasycap->dnbydt;
+ timeval1 = peasycap->timeval1;
+ mutex_unlock(&(peasycap->mutex_timeval1));
+ above = dnbydt * MICROSECONDS(timeval, timeval1);
+ below = 192000;
+ sdr = signed_div(above, below);
+
+ above = sdr.quotient + timeval1.tv_usec - 350000;
+
+ below = 1000000;
+ sdr = signed_div(above, below);
+ timeval2.tv_usec = sdr.remainder;
+ timeval2.tv_sec = timeval1.tv_sec + sdr.quotient;
+ }
+ if (!(isequence % 500)) {
+ fudge = ((long long int)(1000000)) * \
+ ((long long int)(timeval.tv_sec - \
+ timeval2.tv_sec)) + \
+ (long long int)(timeval.tv_usec - \
+ timeval2.tv_usec);
+ sdr = signed_div(fudge, 1000);
+ sll = sdr.quotient;
+ ull = sdr.remainder;
+
+ SAY("%5lli.%-3lli=ms timestamp fudge\n", sll, ull);
+ }
+#endif /*AUDIOTIME*/
+
+ v4l2_buffer.timestamp = timeval2;
+ v4l2_buffer.sequence = isequence++;
+ v4l2_buffer.memory = V4L2_MEMORY_MMAP;
+ v4l2_buffer.m.offset = v4l2_buffer.index * FRAME_BUFFER_SIZE;
+ v4l2_buffer.length = FRAME_BUFFER_SIZE;
+
+ JOT(16, " %10i=index\n", v4l2_buffer.index);
+ JOT(16, " 0x%08X=type\n", v4l2_buffer.type);
+ JOT(16, " %10i=bytesused\n", v4l2_buffer.bytesused);
+ JOT(16, " 0x%08X=flags\n", v4l2_buffer.flags);
+ JOT(16, " %10i=field\n", v4l2_buffer.field);
+ JOT(16, " %10li=timestamp.tv_usec\n", \
+ (long)v4l2_buffer.timestamp.tv_usec);
+ JOT(16, " %10i=sequence\n", v4l2_buffer.sequence);
+ JOT(16, " 0x%08X=memory\n", v4l2_buffer.memory);
+ JOT(16, " %10i=m.offset\n", v4l2_buffer.m.offset);
+ JOT(16, " %10i=length\n", v4l2_buffer.length);
+
+ if (0 != copy_to_user((void __user *)arg, &v4l2_buffer, \
+ sizeof(struct v4l2_buffer))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ JOT(8, "..... user is offered frame buffer %i\n", \
+ peasycap->frame_read);
+ peasycap->frame_lock = 1;
+ if (peasycap->frame_read == peasycap->frame_fill) {
+ if (peasycap->frame_lock) {
+ JOT(8, "ERROR: filling frame buffer " \
+ "while offered to user\n");
+ }
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+/*---------------------------------------------------------------------------*/
+/*
+ * AUDIO URBS HAVE ALREADY BEEN SUBMITTED WHEN THIS COMMAND IS RECEIVED;
+ * VIDEO URBS HAVE NOT.
+ */
+/*---------------------------------------------------------------------------*/
+case VIDIOC_STREAMON: {
+ static int i;
+
+ JOT(8, "VIDIOC_STREAMON\n");
+
+ isequence = 0;
+ for (i = 0; i < 180; i++)
+ peasycap->merit[i] = 0;
+ if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+ }
+ submit_video_urbs(peasycap);
+ peasycap->video_idle = 0;
+ peasycap->audio_idle = 0;
+ peasycap->video_eof = 0;
+ peasycap->audio_eof = 0;
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_STREAMOFF: {
+ JOT(8, "VIDIOC_STREAMOFF\n");
+
+ if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+ }
+
+ peasycap->video_idle = 1;
+ peasycap->audio_idle = 1; peasycap->timeval0.tv_sec = 0;
+/*---------------------------------------------------------------------------*/
+/*
+ * IF THE WAIT QUEUES ARE NOT CLEARED IN RESPONSE TO THE STREAMOFF COMMAND
+ * THE USERSPACE PROGRAM, E.G. mplayer, MAY HANG ON EXIT. BEWARE.
+ */
+/*---------------------------------------------------------------------------*/
+ JOT(8, "calling wake_up on wq_video and wq_audio\n");
+ wake_up_interruptible(&(peasycap->wq_video));
+ wake_up_interruptible(&(peasycap->wq_audio));
+/*---------------------------------------------------------------------------*/
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_G_PARM: {
+ static struct v4l2_streamparm v4l2_streamparm;
+
+ JOT(8, "VIDIOC_G_PARM\n");
+
+ if (0 != copy_from_user(&v4l2_streamparm, (void __user *)arg, \
+ sizeof(struct v4l2_streamparm))) {
+ POUT;
+ return -EFAULT;
+ }
+
+ if (v4l2_streamparm.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ POUT;
+ return -EINVAL;
+ }
+ v4l2_streamparm.parm.capture.capability = 0;
+ v4l2_streamparm.parm.capture.capturemode = 0;
+ v4l2_streamparm.parm.capture.timeperframe.numerator = 1;
+ v4l2_streamparm.parm.capture.timeperframe.denominator = 30;
+ v4l2_streamparm.parm.capture.readbuffers = peasycap->frame_buffer_many;
+ v4l2_streamparm.parm.capture.extendedmode = 0;
+ if (0 != copy_to_user((void __user *)arg, &v4l2_streamparm, \
+ sizeof(struct v4l2_streamparm))) {
+ POUT;
+ return -EFAULT;
+ }
+ break;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_S_PARM: {
+ JOT(8, "VIDIOC_S_PARM unsupported\n");
+ return -EINVAL;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_G_AUDIO: {
+ JOT(8, "VIDIOC_G_AUDIO unsupported\n");
+ return -EINVAL;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_S_AUDIO: {
+ JOT(8, "VIDIOC_S_AUDIO unsupported\n");
+ return -EINVAL;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_S_TUNER: {
+ JOT(8, "VIDIOC_S_TUNER unsupported\n");
+ return -EINVAL;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_G_FBUF:
+case VIDIOC_S_FBUF:
+case VIDIOC_OVERLAY: {
+ JOT(8, "VIDIOC_G_FBUF|VIDIOC_S_FBUF|VIDIOC_OVERLAY unsupported\n");
+ return -EINVAL;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+case VIDIOC_G_TUNER: {
+ JOT(8, "VIDIOC_G_TUNER unsupported\n");
+ return -EINVAL;
+}
+case VIDIOC_G_FREQUENCY:
+case VIDIOC_S_FREQUENCY: {
+ JOT(8, "VIDIOC_G_FREQUENCY|VIDIOC_S_FREQUENCY unsupported\n");
+ return -EINVAL;
+}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+default: {
+ JOT(8, "ERROR: unrecognized V4L2 IOCTL command: 0x%08X\n", cmd);
+ explain_ioctl(cmd);
+ POUT;
+ return -ENOIOCTLCMD;
+}
+}
+return 0;
+}
+
+long easycap_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ long ret;
+
+ lock_kernel();
+ ret = easycap_ioctl_bkl(inode, file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
+/*--------------------------------------------------------------------------*/
+static int easysnd_ioctl_bkl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+struct easycap *peasycap;
+struct usb_device *p;
+
+peasycap = file->private_data;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL.\n");
+ return -1;
+}
+p = peasycap->pusb_device;
+/*---------------------------------------------------------------------------*/
+switch (cmd) {
+case SNDCTL_DSP_GETCAPS: {
+ int caps;
+ JOT(8, "SNDCTL_DSP_GETCAPS\n");
+
+#if defined(UPSAMPLE)
+ if (true == peasycap->microphone)
+ caps = 0x04400000;
+ else
+ caps = 0x04400000;
+#else
+ if (true == peasycap->microphone)
+ caps = 0x02400000;
+ else
+ caps = 0x04400000;
+#endif /*UPSAMPLE*/
+
+ if (0 != copy_to_user((void __user *)arg, &caps, sizeof(int)))
+ return -EFAULT;
+ break;
+}
+case SNDCTL_DSP_GETFMTS: {
+ int incoming;
+ JOT(8, "SNDCTL_DSP_GETFMTS\n");
+
+#if defined(UPSAMPLE)
+ if (true == peasycap->microphone)
+ incoming = AFMT_S16_LE;
+ else
+ incoming = AFMT_S16_LE;
+#else
+ if (true == peasycap->microphone)
+ incoming = AFMT_S16_LE;
+ else
+ incoming = AFMT_S16_LE;
+#endif /*UPSAMPLE*/
+
+ if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+ return -EFAULT;
+ break;
+}
+case SNDCTL_DSP_SETFMT: {
+ int incoming, outgoing;
+ JOT(8, "SNDCTL_DSP_SETFMT\n");
+ if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+ return -EFAULT;
+ JOT(8, "........... %i=incoming\n", incoming);
+
+#if defined(UPSAMPLE)
+ if (true == peasycap->microphone)
+ outgoing = AFMT_S16_LE;
+ else
+ outgoing = AFMT_S16_LE;
+#else
+ if (true == peasycap->microphone)
+ outgoing = AFMT_S16_LE;
+ else
+ outgoing = AFMT_S16_LE;
+#endif /*UPSAMPLE*/
+
+ if (incoming != outgoing) {
+ JOT(8, "........... %i=outgoing\n", outgoing);
+ JOT(8, " cf. %i=AFMT_S16_LE\n", AFMT_S16_LE);
+ JOT(8, " cf. %i=AFMT_U8\n", AFMT_U8);
+ if (0 != copy_to_user((void __user *)arg, &outgoing, \
+ sizeof(int)))
+ return -EFAULT;
+ return -EINVAL ;
+ }
+ break;
+}
+case SNDCTL_DSP_STEREO: {
+ int incoming;
+ JOT(8, "SNDCTL_DSP_STEREO\n");
+ if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+ return -EFAULT;
+ JOT(8, "........... %i=incoming\n", incoming);
+
+#if defined(UPSAMPLE)
+ if (true == peasycap->microphone)
+ incoming = 1;
+ else
+ incoming = 1;
+#else
+ if (true == peasycap->microphone)
+ incoming = 0;
+ else
+ incoming = 1;
+#endif /*UPSAMPLE*/
+
+ if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+ return -EFAULT;
+ break;
+}
+case SNDCTL_DSP_SPEED: {
+ int incoming;
+ JOT(8, "SNDCTL_DSP_SPEED\n");
+ if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+ return -EFAULT;
+ JOT(8, "........... %i=incoming\n", incoming);
+
+#if defined(UPSAMPLE)
+ if (true == peasycap->microphone)
+ incoming = 32000;
+ else
+ incoming = 48000;
+#else
+ if (true == peasycap->microphone)
+ incoming = 8000;
+ else
+ incoming = 48000;
+#endif /*UPSAMPLE*/
+
+ if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+ return -EFAULT;
+ break;
+}
+case SNDCTL_DSP_GETTRIGGER: {
+ int incoming;
+ JOT(8, "SNDCTL_DSP_GETTRIGGER\n");
+ if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+ return -EFAULT;
+ JOT(8, "........... %i=incoming\n", incoming);
+
+ incoming = PCM_ENABLE_INPUT;
+ if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+ return -EFAULT;
+ break;
+}
+case SNDCTL_DSP_SETTRIGGER: {
+ int incoming;
+ JOT(8, "SNDCTL_DSP_SETTRIGGER\n");
+ if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+ return -EFAULT;
+ JOT(8, "........... %i=incoming\n", incoming);
+ JOT(8, "........... cf 0x%x=PCM_ENABLE_INPUT " \
+ "0x%x=PCM_ENABLE_OUTPUT\n", \
+ PCM_ENABLE_INPUT, PCM_ENABLE_OUTPUT);
+ ;
+ ;
+ ;
+ ;
+ break;
+}
+case SNDCTL_DSP_GETBLKSIZE: {
+ int incoming;
+ JOT(8, "SNDCTL_DSP_GETBLKSIZE\n");
+ if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+ return -EFAULT;
+ JOT(8, "........... %i=incoming\n", incoming);
+ incoming = peasycap->audio_bytes_per_fragment;
+ if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+ return -EFAULT;
+ break;
+}
+case SNDCTL_DSP_GETISPACE: {
+ struct audio_buf_info audio_buf_info;
+
+ JOT(8, "SNDCTL_DSP_GETISPACE\n");
+
+ audio_buf_info.bytes = peasycap->audio_bytes_per_fragment;
+ audio_buf_info.fragments = 1;
+ audio_buf_info.fragsize = 0;
+ audio_buf_info.fragstotal = 0;
+
+ if (0 != copy_to_user((void __user *)arg, &audio_buf_info, \
+ sizeof(int)))
+ return -EFAULT;
+ break;
+}
+default: {
+ JOT(8, "ERROR: unrecognized DSP IOCTL command: 0x%08X\n", cmd);
+ POUT;
+ return -ENOIOCTLCMD;
+}
+}
+return 0;
+}
+
+long easysnd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ long ret;
+
+ lock_kernel();
+ ret = easysnd_ioctl_bkl(inode, file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
+/*****************************************************************************/
+int explain_ioctl(__u32 wot)
+{
+int k;
+/*---------------------------------------------------------------------------*/
+/*
+ * THE DATA FOR THE ARRAY mess BELOW WERE CONSTRUCTED BY RUNNING THE FOLLOWING
+ * SHELL SCRIPT:
+ * #
+ * cat /usr/src/linux-headers-`uname -r`/include/linux/videodev2.h | \
+ * grep "^#define VIDIOC_" - | grep -v "_OLD" - | \
+ * sed -e "s,_IO.*$,,;p" | sed -e "N;s,\n,, " | \
+ * sed -e "s/^#define / {/;s/#define /, \"/;s/$/\"},/" | \
+ * sed -e "s, ,,g;s, ,,g" >ioctl.tmp
+ * echo "{0xFFFFFFFF,\"\"}" >>ioctl.tmp
+ * exit 0
+ * #
+ * AND REINSTATING THE EXCISED "_OLD" CASES WERE LATER MANUALLY.
+ *
+ * THE DATA FOR THE ARRAY mess1 BELOW WERE CONSTRUCTED BY RUNNING THE FOLLOWING
+ * SHELL SCRIPT:
+ * cat /usr/src/linux-headers-`uname -r`/include/linux/videodev.h | \
+ * grep "^#define VIDIOC" - | grep -v "_OLD" - | \
+ * sed -e "s,_IO.*$,,;p" | sed -e "N;s,\n,, " | \
+ * sed -e "s/^#define / {/;s/#define /, \"/;s/$/\"},/" | \
+ * sed -e "s, ,,g;s, ,,g" >ioctl.tmp
+ * echo "{0xFFFFFFFF,\"\"}" >>ioctl.tmp
+ * exit 0
+ * #
+ */
+/*---------------------------------------------------------------------------*/
+static struct mess {
+ __u32 command;
+ char name[64];
+} mess[] = {
+#if defined(VIDIOC_QUERYCAP)
+{VIDIOC_QUERYCAP, "VIDIOC_QUERYCAP"},
+#endif
+#if defined(VIDIOC_RESERVED)
+{VIDIOC_RESERVED, "VIDIOC_RESERVED"},
+#endif
+#if defined(VIDIOC_ENUM_FMT)
+{VIDIOC_ENUM_FMT, "VIDIOC_ENUM_FMT"},
+#endif
+#if defined(VIDIOC_G_FMT)
+{VIDIOC_G_FMT, "VIDIOC_G_FMT"},
+#endif
+#if defined(VIDIOC_S_FMT)
+{VIDIOC_S_FMT, "VIDIOC_S_FMT"},
+#endif
+#if defined(VIDIOC_REQBUFS)
+{VIDIOC_REQBUFS, "VIDIOC_REQBUFS"},
+#endif
+#if defined(VIDIOC_QUERYBUF)
+{VIDIOC_QUERYBUF, "VIDIOC_QUERYBUF"},
+#endif
+#if defined(VIDIOC_G_FBUF)
+{VIDIOC_G_FBUF, "VIDIOC_G_FBUF"},
+#endif
+#if defined(VIDIOC_S_FBUF)
+{VIDIOC_S_FBUF, "VIDIOC_S_FBUF"},
+#endif
+#if defined(VIDIOC_OVERLAY)
+{VIDIOC_OVERLAY, "VIDIOC_OVERLAY"},
+#endif
+#if defined(VIDIOC_QBUF)
+{VIDIOC_QBUF, "VIDIOC_QBUF"},
+#endif
+#if defined(VIDIOC_DQBUF)
+{VIDIOC_DQBUF, "VIDIOC_DQBUF"},
+#endif
+#if defined(VIDIOC_STREAMON)
+{VIDIOC_STREAMON, "VIDIOC_STREAMON"},
+#endif
+#if defined(VIDIOC_STREAMOFF)
+{VIDIOC_STREAMOFF, "VIDIOC_STREAMOFF"},
+#endif
+#if defined(VIDIOC_G_PARM)
+{VIDIOC_G_PARM, "VIDIOC_G_PARM"},
+#endif
+#if defined(VIDIOC_S_PARM)
+{VIDIOC_S_PARM, "VIDIOC_S_PARM"},
+#endif
+#if defined(VIDIOC_G_STD)
+{VIDIOC_G_STD, "VIDIOC_G_STD"},
+#endif
+#if defined(VIDIOC_S_STD)
+{VIDIOC_S_STD, "VIDIOC_S_STD"},
+#endif
+#if defined(VIDIOC_ENUMSTD)
+{VIDIOC_ENUMSTD, "VIDIOC_ENUMSTD"},
+#endif
+#if defined(VIDIOC_ENUMINPUT)
+{VIDIOC_ENUMINPUT, "VIDIOC_ENUMINPUT"},
+#endif
+#if defined(VIDIOC_G_CTRL)
+{VIDIOC_G_CTRL, "VIDIOC_G_CTRL"},
+#endif
+#if defined(VIDIOC_S_CTRL)
+{VIDIOC_S_CTRL, "VIDIOC_S_CTRL"},
+#endif
+#if defined(VIDIOC_G_TUNER)
+{VIDIOC_G_TUNER, "VIDIOC_G_TUNER"},
+#endif
+#if defined(VIDIOC_S_TUNER)
+{VIDIOC_S_TUNER, "VIDIOC_S_TUNER"},
+#endif
+#if defined(VIDIOC_G_AUDIO)
+{VIDIOC_G_AUDIO, "VIDIOC_G_AUDIO"},
+#endif
+#if defined(VIDIOC_S_AUDIO)
+{VIDIOC_S_AUDIO, "VIDIOC_S_AUDIO"},
+#endif
+#if defined(VIDIOC_QUERYCTRL)
+{VIDIOC_QUERYCTRL, "VIDIOC_QUERYCTRL"},
+#endif
+#if defined(VIDIOC_QUERYMENU)
+{VIDIOC_QUERYMENU, "VIDIOC_QUERYMENU"},
+#endif
+#if defined(VIDIOC_G_INPUT)
+{VIDIOC_G_INPUT, "VIDIOC_G_INPUT"},
+#endif
+#if defined(VIDIOC_S_INPUT)
+{VIDIOC_S_INPUT, "VIDIOC_S_INPUT"},
+#endif
+#if defined(VIDIOC_G_OUTPUT)
+{VIDIOC_G_OUTPUT, "VIDIOC_G_OUTPUT"},
+#endif
+#if defined(VIDIOC_S_OUTPUT)
+{VIDIOC_S_OUTPUT, "VIDIOC_S_OUTPUT"},
+#endif
+#if defined(VIDIOC_ENUMOUTPUT)
+{VIDIOC_ENUMOUTPUT, "VIDIOC_ENUMOUTPUT"},
+#endif
+#if defined(VIDIOC_G_AUDOUT)
+{VIDIOC_G_AUDOUT, "VIDIOC_G_AUDOUT"},
+#endif
+#if defined(VIDIOC_S_AUDOUT)
+{VIDIOC_S_AUDOUT, "VIDIOC_S_AUDOUT"},
+#endif
+#if defined(VIDIOC_G_MODULATOR)
+{VIDIOC_G_MODULATOR, "VIDIOC_G_MODULATOR"},
+#endif
+#if defined(VIDIOC_S_MODULATOR)
+{VIDIOC_S_MODULATOR, "VIDIOC_S_MODULATOR"},
+#endif
+#if defined(VIDIOC_G_FREQUENCY)
+{VIDIOC_G_FREQUENCY, "VIDIOC_G_FREQUENCY"},
+#endif
+#if defined(VIDIOC_S_FREQUENCY)
+{VIDIOC_S_FREQUENCY, "VIDIOC_S_FREQUENCY"},
+#endif
+#if defined(VIDIOC_CROPCAP)
+{VIDIOC_CROPCAP, "VIDIOC_CROPCAP"},
+#endif
+#if defined(VIDIOC_G_CROP)
+{VIDIOC_G_CROP, "VIDIOC_G_CROP"},
+#endif
+#if defined(VIDIOC_S_CROP)
+{VIDIOC_S_CROP, "VIDIOC_S_CROP"},
+#endif
+#if defined(VIDIOC_G_JPEGCOMP)
+{VIDIOC_G_JPEGCOMP, "VIDIOC_G_JPEGCOMP"},
+#endif
+#if defined(VIDIOC_S_JPEGCOMP)
+{VIDIOC_S_JPEGCOMP, "VIDIOC_S_JPEGCOMP"},
+#endif
+#if defined(VIDIOC_QUERYSTD)
+{VIDIOC_QUERYSTD, "VIDIOC_QUERYSTD"},
+#endif
+#if defined(VIDIOC_TRY_FMT)
+{VIDIOC_TRY_FMT, "VIDIOC_TRY_FMT"},
+#endif
+#if defined(VIDIOC_ENUMAUDIO)
+{VIDIOC_ENUMAUDIO, "VIDIOC_ENUMAUDIO"},
+#endif
+#if defined(VIDIOC_ENUMAUDOUT)
+{VIDIOC_ENUMAUDOUT, "VIDIOC_ENUMAUDOUT"},
+#endif
+#if defined(VIDIOC_G_PRIORITY)
+{VIDIOC_G_PRIORITY, "VIDIOC_G_PRIORITY"},
+#endif
+#if defined(VIDIOC_S_PRIORITY)
+{VIDIOC_S_PRIORITY, "VIDIOC_S_PRIORITY"},
+#endif
+#if defined(VIDIOC_G_SLICED_VBI_CAP)
+{VIDIOC_G_SLICED_VBI_CAP, "VIDIOC_G_SLICED_VBI_CAP"},
+#endif
+#if defined(VIDIOC_LOG_STATUS)
+{VIDIOC_LOG_STATUS, "VIDIOC_LOG_STATUS"},
+#endif
+#if defined(VIDIOC_G_EXT_CTRLS)
+{VIDIOC_G_EXT_CTRLS, "VIDIOC_G_EXT_CTRLS"},
+#endif
+#if defined(VIDIOC_S_EXT_CTRLS)
+{VIDIOC_S_EXT_CTRLS, "VIDIOC_S_EXT_CTRLS"},
+#endif
+#if defined(VIDIOC_TRY_EXT_CTRLS)
+{VIDIOC_TRY_EXT_CTRLS, "VIDIOC_TRY_EXT_CTRLS"},
+#endif
+#if defined(VIDIOC_ENUM_FRAMESIZES)
+{VIDIOC_ENUM_FRAMESIZES, "VIDIOC_ENUM_FRAMESIZES"},
+#endif
+#if defined(VIDIOC_ENUM_FRAMEINTERVALS)
+{VIDIOC_ENUM_FRAMEINTERVALS, "VIDIOC_ENUM_FRAMEINTERVALS"},
+#endif
+#if defined(VIDIOC_G_ENC_INDEX)
+{VIDIOC_G_ENC_INDEX, "VIDIOC_G_ENC_INDEX"},
+#endif
+#if defined(VIDIOC_ENCODER_CMD)
+{VIDIOC_ENCODER_CMD, "VIDIOC_ENCODER_CMD"},
+#endif
+#if defined(VIDIOC_TRY_ENCODER_CMD)
+{VIDIOC_TRY_ENCODER_CMD, "VIDIOC_TRY_ENCODER_CMD"},
+#endif
+#if defined(VIDIOC_G_CHIP_IDENT)
+{VIDIOC_G_CHIP_IDENT, "VIDIOC_G_CHIP_IDENT"},
+#endif
+
+#if defined(VIDIOC_OVERLAY_OLD)
+{VIDIOC_OVERLAY_OLD, "VIDIOC_OVERLAY_OLD"},
+#endif
+#if defined(VIDIOC_S_PARM_OLD)
+{VIDIOC_S_PARM_OLD, "VIDIOC_S_PARM_OLD"},
+#endif
+#if defined(VIDIOC_S_CTRL_OLD)
+{VIDIOC_S_CTRL_OLD, "VIDIOC_S_CTRL_OLD"},
+#endif
+#if defined(VIDIOC_G_AUDIO_OLD)
+{VIDIOC_G_AUDIO_OLD, "VIDIOC_G_AUDIO_OLD"},
+#endif
+#if defined(VIDIOC_G_AUDOUT_OLD)
+{VIDIOC_G_AUDOUT_OLD, "VIDIOC_G_AUDOUT_OLD"},
+#endif
+#if defined(VIDIOC_CROPCAP_OLD)
+{VIDIOC_CROPCAP_OLD, "VIDIOC_CROPCAP_OLD"},
+#endif
+{0xFFFFFFFF, ""}
+};
+
+static struct mess mess1[] = \
+{
+#if defined(VIDIOCGCAP)
+{VIDIOCGCAP, "VIDIOCGCAP"},
+#endif
+#if defined(VIDIOCGCHAN)
+{VIDIOCGCHAN, "VIDIOCGCHAN"},
+#endif
+#if defined(VIDIOCSCHAN)
+{VIDIOCSCHAN, "VIDIOCSCHAN"},
+#endif
+#if defined(VIDIOCGTUNER)
+{VIDIOCGTUNER, "VIDIOCGTUNER"},
+#endif
+#if defined(VIDIOCSTUNER)
+{VIDIOCSTUNER, "VIDIOCSTUNER"},
+#endif
+#if defined(VIDIOCGPICT)
+{VIDIOCGPICT, "VIDIOCGPICT"},
+#endif
+#if defined(VIDIOCSPICT)
+{VIDIOCSPICT, "VIDIOCSPICT"},
+#endif
+#if defined(VIDIOCCAPTURE)
+{VIDIOCCAPTURE, "VIDIOCCAPTURE"},
+#endif
+#if defined(VIDIOCGWIN)
+{VIDIOCGWIN, "VIDIOCGWIN"},
+#endif
+#if defined(VIDIOCSWIN)
+{VIDIOCSWIN, "VIDIOCSWIN"},
+#endif
+#if defined(VIDIOCGFBUF)
+{VIDIOCGFBUF, "VIDIOCGFBUF"},
+#endif
+#if defined(VIDIOCSFBUF)
+{VIDIOCSFBUF, "VIDIOCSFBUF"},
+#endif
+#if defined(VIDIOCKEY)
+{VIDIOCKEY, "VIDIOCKEY"},
+#endif
+#if defined(VIDIOCGFREQ)
+{VIDIOCGFREQ, "VIDIOCGFREQ"},
+#endif
+#if defined(VIDIOCSFREQ)
+{VIDIOCSFREQ, "VIDIOCSFREQ"},
+#endif
+#if defined(VIDIOCGAUDIO)
+{VIDIOCGAUDIO, "VIDIOCGAUDIO"},
+#endif
+#if defined(VIDIOCSAUDIO)
+{VIDIOCSAUDIO, "VIDIOCSAUDIO"},
+#endif
+#if defined(VIDIOCSYNC)
+{VIDIOCSYNC, "VIDIOCSYNC"},
+#endif
+#if defined(VIDIOCMCAPTURE)
+{VIDIOCMCAPTURE, "VIDIOCMCAPTURE"},
+#endif
+#if defined(VIDIOCGMBUF)
+{VIDIOCGMBUF, "VIDIOCGMBUF"},
+#endif
+#if defined(VIDIOCGUNIT)
+{VIDIOCGUNIT, "VIDIOCGUNIT"},
+#endif
+#if defined(VIDIOCGCAPTURE)
+{VIDIOCGCAPTURE, "VIDIOCGCAPTURE"},
+#endif
+#if defined(VIDIOCSCAPTURE)
+{VIDIOCSCAPTURE, "VIDIOCSCAPTURE"},
+#endif
+#if defined(VIDIOCSPLAYMODE)
+{VIDIOCSPLAYMODE, "VIDIOCSPLAYMODE"},
+#endif
+#if defined(VIDIOCSWRITEMODE)
+{VIDIOCSWRITEMODE, "VIDIOCSWRITEMODE"},
+#endif
+#if defined(VIDIOCGPLAYINFO)
+{VIDIOCGPLAYINFO, "VIDIOCGPLAYINFO"},
+#endif
+#if defined(VIDIOCSMICROCODE)
+{VIDIOCSMICROCODE, "VIDIOCSMICROCODE"},
+#endif
+{0xFFFFFFFF, ""}
+};
+
+k = 0;
+while (mess[k].name[0]) {
+ if (wot == mess[k].command) {
+ JOT(8, "ioctl 0x%08X is %s\n", \
+ mess[k].command, &mess[k].name[0]);
+ return 0;
+ }
+ k++;
+}
+JOT(8, "ioctl 0x%08X is not in videodev2.h\n", wot);
+
+k = 0;
+while (mess1[k].name[0]) {
+ if (wot == mess1[k].command) {
+ JOT(8, "ioctl 0x%08X is %s (V4L1)\n", \
+ mess1[k].command, &mess1[k].name[0]);
+ return 0;
+ }
+ k++;
+}
+JOT(8, "ioctl 0x%08X is not in videodev.h\n", wot);
+return -1;
+}
+/*****************************************************************************/
+int explain_cid(__u32 wot)
+{
+int k;
+/*---------------------------------------------------------------------------*/
+/*
+ * THE DATA FOR THE ARRAY mess BELOW WERE CONSTRUCTED BY RUNNING THE FOLLOWING
+ * SHELL SCRIPT:
+ * #
+ * cat /usr/src/linux-headers-`uname -r`/include/linux/videodev2.h | \
+ * grep "^#define V4L2_CID_" | \
+ * sed -e "s,(.*$,,;p" | sed -e "N;s,\n,, " | \
+ * sed -e "s/^#define / {/;s/#define /, \"/;s/$/\"},/" | \
+ * sed -e "s, ,,g;s, ,,g" | grep -v "_BASE" | grep -v "MPEG" >cid.tmp
+ * echo "{0xFFFFFFFF,\"\"}" >>cid.tmp
+ * exit 0
+ * #
+ */
+/*---------------------------------------------------------------------------*/
+static struct mess
+{
+__u32 command;
+char name[64];
+} mess[] = {
+#if defined(V4L2_CID_USER_CLASS)
+{V4L2_CID_USER_CLASS, "V4L2_CID_USER_CLASS"},
+#endif
+#if defined(V4L2_CID_BRIGHTNESS)
+{V4L2_CID_BRIGHTNESS, "V4L2_CID_BRIGHTNESS"},
+#endif
+#if defined(V4L2_CID_CONTRAST)
+{V4L2_CID_CONTRAST, "V4L2_CID_CONTRAST"},
+#endif
+#if defined(V4L2_CID_SATURATION)
+{V4L2_CID_SATURATION, "V4L2_CID_SATURATION"},
+#endif
+#if defined(V4L2_CID_HUE)
+{V4L2_CID_HUE, "V4L2_CID_HUE"},
+#endif
+#if defined(V4L2_CID_AUDIO_VOLUME)
+{V4L2_CID_AUDIO_VOLUME, "V4L2_CID_AUDIO_VOLUME"},
+#endif
+#if defined(V4L2_CID_AUDIO_BALANCE)
+{V4L2_CID_AUDIO_BALANCE, "V4L2_CID_AUDIO_BALANCE"},
+#endif
+#if defined(V4L2_CID_AUDIO_BASS)
+{V4L2_CID_AUDIO_BASS, "V4L2_CID_AUDIO_BASS"},
+#endif
+#if defined(V4L2_CID_AUDIO_TREBLE)
+{V4L2_CID_AUDIO_TREBLE, "V4L2_CID_AUDIO_TREBLE"},
+#endif
+#if defined(V4L2_CID_AUDIO_MUTE)
+{V4L2_CID_AUDIO_MUTE, "V4L2_CID_AUDIO_MUTE"},
+#endif
+#if defined(V4L2_CID_AUDIO_LOUDNESS)
+{V4L2_CID_AUDIO_LOUDNESS, "V4L2_CID_AUDIO_LOUDNESS"},
+#endif
+#if defined(V4L2_CID_BLACK_LEVEL)
+{V4L2_CID_BLACK_LEVEL, "V4L2_CID_BLACK_LEVEL"},
+#endif
+#if defined(V4L2_CID_AUTO_WHITE_BALANCE)
+{V4L2_CID_AUTO_WHITE_BALANCE, "V4L2_CID_AUTO_WHITE_BALANCE"},
+#endif
+#if defined(V4L2_CID_DO_WHITE_BALANCE)
+{V4L2_CID_DO_WHITE_BALANCE, "V4L2_CID_DO_WHITE_BALANCE"},
+#endif
+#if defined(V4L2_CID_RED_BALANCE)
+{V4L2_CID_RED_BALANCE, "V4L2_CID_RED_BALANCE"},
+#endif
+#if defined(V4L2_CID_BLUE_BALANCE)
+{V4L2_CID_BLUE_BALANCE, "V4L2_CID_BLUE_BALANCE"},
+#endif
+#if defined(V4L2_CID_GAMMA)
+{V4L2_CID_GAMMA, "V4L2_CID_GAMMA"},
+#endif
+#if defined(V4L2_CID_WHITENESS)
+{V4L2_CID_WHITENESS, "V4L2_CID_WHITENESS"},
+#endif
+#if defined(V4L2_CID_EXPOSURE)
+{V4L2_CID_EXPOSURE, "V4L2_CID_EXPOSURE"},
+#endif
+#if defined(V4L2_CID_AUTOGAIN)
+{V4L2_CID_AUTOGAIN, "V4L2_CID_AUTOGAIN"},
+#endif
+#if defined(V4L2_CID_GAIN)
+{V4L2_CID_GAIN, "V4L2_CID_GAIN"},
+#endif
+#if defined(V4L2_CID_HFLIP)
+{V4L2_CID_HFLIP, "V4L2_CID_HFLIP"},
+#endif
+#if defined(V4L2_CID_VFLIP)
+{V4L2_CID_VFLIP, "V4L2_CID_VFLIP"},
+#endif
+#if defined(V4L2_CID_HCENTER)
+{V4L2_CID_HCENTER, "V4L2_CID_HCENTER"},
+#endif
+#if defined(V4L2_CID_VCENTER)
+{V4L2_CID_VCENTER, "V4L2_CID_VCENTER"},
+#endif
+#if defined(V4L2_CID_POWER_LINE_FREQUENCY)
+{V4L2_CID_POWER_LINE_FREQUENCY, "V4L2_CID_POWER_LINE_FREQUENCY"},
+#endif
+#if defined(V4L2_CID_HUE_AUTO)
+{V4L2_CID_HUE_AUTO, "V4L2_CID_HUE_AUTO"},
+#endif
+#if defined(V4L2_CID_WHITE_BALANCE_TEMPERATURE)
+{V4L2_CID_WHITE_BALANCE_TEMPERATURE, "V4L2_CID_WHITE_BALANCE_TEMPERATURE"},
+#endif
+#if defined(V4L2_CID_SHARPNESS)
+{V4L2_CID_SHARPNESS, "V4L2_CID_SHARPNESS"},
+#endif
+#if defined(V4L2_CID_BACKLIGHT_COMPENSATION)
+{V4L2_CID_BACKLIGHT_COMPENSATION, "V4L2_CID_BACKLIGHT_COMPENSATION"},
+#endif
+#if defined(V4L2_CID_CHROMA_AGC)
+{V4L2_CID_CHROMA_AGC, "V4L2_CID_CHROMA_AGC"},
+#endif
+#if defined(V4L2_CID_COLOR_KILLER)
+{V4L2_CID_COLOR_KILLER, "V4L2_CID_COLOR_KILLER"},
+#endif
+#if defined(V4L2_CID_LASTP1)
+{V4L2_CID_LASTP1, "V4L2_CID_LASTP1"},
+#endif
+#if defined(V4L2_CID_CAMERA_CLASS)
+{V4L2_CID_CAMERA_CLASS, "V4L2_CID_CAMERA_CLASS"},
+#endif
+#if defined(V4L2_CID_EXPOSURE_AUTO)
+{V4L2_CID_EXPOSURE_AUTO, "V4L2_CID_EXPOSURE_AUTO"},
+#endif
+#if defined(V4L2_CID_EXPOSURE_ABSOLUTE)
+{V4L2_CID_EXPOSURE_ABSOLUTE, "V4L2_CID_EXPOSURE_ABSOLUTE"},
+#endif
+#if defined(V4L2_CID_EXPOSURE_AUTO_PRIORITY)
+{V4L2_CID_EXPOSURE_AUTO_PRIORITY, "V4L2_CID_EXPOSURE_AUTO_PRIORITY"},
+#endif
+#if defined(V4L2_CID_PAN_RELATIVE)
+{V4L2_CID_PAN_RELATIVE, "V4L2_CID_PAN_RELATIVE"},
+#endif
+#if defined(V4L2_CID_TILT_RELATIVE)
+{V4L2_CID_TILT_RELATIVE, "V4L2_CID_TILT_RELATIVE"},
+#endif
+#if defined(V4L2_CID_PAN_RESET)
+{V4L2_CID_PAN_RESET, "V4L2_CID_PAN_RESET"},
+#endif
+#if defined(V4L2_CID_TILT_RESET)
+{V4L2_CID_TILT_RESET, "V4L2_CID_TILT_RESET"},
+#endif
+#if defined(V4L2_CID_PAN_ABSOLUTE)
+{V4L2_CID_PAN_ABSOLUTE, "V4L2_CID_PAN_ABSOLUTE"},
+#endif
+#if defined(V4L2_CID_TILT_ABSOLUTE)
+{V4L2_CID_TILT_ABSOLUTE, "V4L2_CID_TILT_ABSOLUTE"},
+#endif
+#if defined(V4L2_CID_FOCUS_ABSOLUTE)
+{V4L2_CID_FOCUS_ABSOLUTE, "V4L2_CID_FOCUS_ABSOLUTE"},
+#endif
+#if defined(V4L2_CID_FOCUS_RELATIVE)
+{V4L2_CID_FOCUS_RELATIVE, "V4L2_CID_FOCUS_RELATIVE"},
+#endif
+#if defined(V4L2_CID_FOCUS_AUTO)
+{V4L2_CID_FOCUS_AUTO, "V4L2_CID_FOCUS_AUTO"},
+#endif
+{0xFFFFFFFF, ""}
+};
+
+k = 0;
+while (mess[k].name[0]) {
+ if (wot == mess[k].command) {
+ JOT(8, "ioctl 0x%08X is %s\n", \
+ mess[k].command, &mess[k].name[0]);
+ return 0;
+ }
+ k++;
+}
+JOT(8, "cid 0x%08X is not in videodev2.h\n", wot);
+return -1;
+}
+/*****************************************************************************/
diff --git a/drivers/staging/easycap/easycap_ioctl.h b/drivers/staging/easycap/easycap_ioctl.h
new file mode 100644
index 00000000000..210cd627235
--- /dev/null
+++ b/drivers/staging/easycap/easycap_ioctl.h
@@ -0,0 +1,28 @@
+/*****************************************************************************
+* *
+* easycap_ioctl.h *
+* *
+*****************************************************************************/
+/*
+ *
+ * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
+ *
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+/*****************************************************************************/
+extern struct easycap_format easycap_format[];
+extern struct v4l2_queryctrl easycap_control[];
diff --git a/drivers/staging/easycap/easycap_low.c b/drivers/staging/easycap/easycap_low.c
new file mode 100644
index 00000000000..ad1fc4cc471
--- /dev/null
+++ b/drivers/staging/easycap/easycap_low.c
@@ -0,0 +1,1041 @@
+/*****************************************************************************
+* *
+* *
+* easycap_low.c *
+* *
+* *
+*****************************************************************************/
+/*
+ *
+ * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
+ *
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+/*****************************************************************************/
+/*
+ * ACKNOWLEGEMENTS AND REFERENCES
+ * ------------------------------
+ * This driver makes use of register information contained in the Syntek
+ * Semicon DC-1125 driver hosted at
+ * http://sourceforge.net/projects/syntekdriver/.
+ * Particularly useful has been a patch to the latter driver provided by
+ * Ivor Hewitt in January 2009. The NTSC implementation is taken from the
+ * work of Ben Trask.
+*/
+/****************************************************************************/
+
+#include "easycap_debug.h"
+#include "easycap.h"
+
+/*--------------------------------------------------------------------------*/
+const struct stk1160config { int reg; int set; } stk1160config[256] = {
+ {0x000, 0x0098},
+ {0x002, 0x0093},
+
+ {0x001, 0x0003},
+ {0x003, 0x0080},
+ {0x00D, 0x0000},
+ {0x00F, 0x0002},
+ {0x018, 0x0010},
+ {0x019, 0x0000},
+ {0x01A, 0x0014},
+ {0x01B, 0x000E},
+ {0x01C, 0x0046},
+
+ {0x100, 0x0033},
+ {0x103, 0x0000},
+ {0x104, 0x0000},
+ {0x105, 0x0000},
+ {0x106, 0x0000},
+
+#if defined(PREFER_NTSC)
+
+#undef OLDMARGIN
+#if defined(OLDMARGIN)
+ {0x110, 0x0008},
+#else
+ {0x110, 0x0014},
+#endif /*OLDMARGIN*/
+
+ {0x111, 0x0000},
+ {0x112, 0x0003},
+ {0x113, 0x0000},
+
+#if defined(OLDMARGIN)
+ {0x114, 0x0508},
+#else
+ {0x114, 0x0514},
+#endif /*OLDMARGIN*/
+
+ {0x115, 0x0005},
+ {0x116, 0x00F3},
+ {0x117, 0x0000},
+
+#else /* ! PREFER_NTSC*/
+
+#if defined(OLDMARGIN)
+ {0x110, 0x0008},
+#else
+ {0x110, 0x0014},
+#endif /*OLDMARGIN*/
+
+ {0x111, 0x0000},
+ {0x112, 0x0020},
+ {0x113, 0x0000},
+
+#if defined(OLDMARGIN)
+ {0x114, 0x0508},
+#else
+ {0x114, 0x0514},
+#endif /*OLDMARGIN*/
+
+ {0x115, 0x0005},
+ {0x116, 0x0110},
+ {0x117, 0x0001},
+
+#endif /* ! PREFER_NTSC*/
+
+ {0x202, 0x000F},
+ {0x203, 0x004A},
+ {0x2FF, 0x0000},
+/*---------------------------------------------------------------------------*/
+ {0xFFF, 0xFFFF}
+ };
+/*--------------------------------------------------------------------------*/
+const struct saa7113config { int reg; int set; } saa7113config[256] = {
+ {0x01, 0x08},
+ {0x02, 0x80},
+ {0x03, 0x33},
+ {0x04, 0x00},
+ {0x05, 0x00},
+ {0x06, 0xE9},
+ {0x07, 0x0D},
+#if defined(PREFER_NTSC)
+ {0x08, 0x78},
+#else
+ {0x08, 0x38},
+#endif /* ! PREFER_NTSC*/
+ {0x09, 0x00},
+ {0x0A, SAA_0A_DEFAULT},
+ {0x0B, SAA_0B_DEFAULT},
+ {0x0C, SAA_0C_DEFAULT},
+ {0x0D, SAA_0D_DEFAULT},
+ {0x0E, 0x01},
+ {0x0F, 0x36},
+ {0x10, 0x00},
+ {0x11, 0x0C},
+ {0x12, 0xE7},
+ {0x13, 0x00},
+ {0x15, 0x00},
+ {0x16, 0x00},
+#if defined(PREFER_NTSC)
+ {0x40, 0x82},
+#else
+ {0x40, 0x02},
+#endif /* ! PREFER_NTSC*/
+ {0x41, 0xFF},
+ {0x42, 0xFF},
+ {0x43, 0xFF},
+ {0x44, 0xFF},
+ {0x45, 0xFF},
+ {0x46, 0xFF},
+ {0x47, 0xFF},
+ {0x48, 0xFF},
+ {0x49, 0xFF},
+ {0x4A, 0xFF},
+ {0x4B, 0xFF},
+ {0x4C, 0xFF},
+ {0x4D, 0xFF},
+ {0x4E, 0xFF},
+ {0x4F, 0xFF},
+ {0x50, 0xFF},
+ {0x51, 0xFF},
+ {0x52, 0xFF},
+ {0x53, 0xFF},
+ {0x54, 0xFF},
+ {0x55, 0xFF},
+ {0x56, 0xFF},
+ {0x57, 0xFF},
+ {0x58, 0x40},
+ {0x59, 0x54},
+#if defined(PREFER_NTSC)
+ {0x5A, 0x0A},
+#else
+ {0x5A, 0x07},
+#endif /* ! PREFER_NTSC*/
+ {0x5B, 0x83},
+ {0xFF, 0xFF}
+ };
+/*--------------------------------------------------------------------------*/
+
+/****************************************************************************/
+int
+confirm_resolution(struct usb_device *p)
+{
+__u8 get0, get1, get2, get3, get4, get5, get6, get7;
+GET(p, 0x0110, &get0);
+GET(p, 0x0111, &get1);
+GET(p, 0x0112, &get2);
+GET(p, 0x0113, &get3);
+GET(p, 0x0114, &get4);
+GET(p, 0x0115, &get5);
+GET(p, 0x0116, &get6);
+GET(p, 0x0117, &get7);
+JOT(8, "0x%03X, 0x%03X, " \
+ "0x%03X, 0x%03X, " \
+ "0x%03X, 0x%03X, " \
+ "0x%03X, 0x%03X\n", \
+ get0, get1, get2, get3, get4, get5, get6, get7);
+JOT(8, "....cf PAL_720x526: " \
+ "0x%03X, 0x%03X, " \
+ "0x%03X, 0x%03X, " \
+ "0x%03X, 0x%03X, " \
+ "0x%03X, 0x%03X\n", \
+ 0x000, 0x000, 0x001, 0x000, 0x5A0, 0x005, 0x121, 0x001);
+JOT(8, "....cf PAL_704x526: " \
+ "0x%03X, 0x%03X, " \
+ "0x%03X, 0x%03X, " \
+ "0x%03X, 0x%03X, " \
+ "0x%03X, 0x%03X\n", \
+ 0x004, 0x000, 0x001, 0x000, 0x584, 0x005, 0x121, 0x001);
+JOT(8, "....cf VGA_640x480: " \
+ "0x%03X, 0x%03X, " \
+ "0x%03X, 0x%03X, " \
+ "0x%03X, 0x%03X, " \
+ "0x%03X, 0x%03X\n", \
+ 0x008, 0x000, 0x020, 0x000, 0x508, 0x005, 0x110, 0x001);
+return 0;
+}
+/****************************************************************************/
+int
+confirm_stream(struct usb_device *p)
+{
+__u16 get2;
+__u8 igot;
+
+GET(p, 0x0100, &igot); get2 = 0x80 & igot;
+if (0x80 == get2)
+ JOT(8, "confirm_stream: OK\n");
+else
+ JOT(8, "confirm_stream: STUCK\n");
+return 0;
+}
+/****************************************************************************/
+int
+setup_stk(struct usb_device *p)
+{
+int i0;
+
+i0 = 0;
+while (0xFFF != stk1160config[i0].reg) {
+ SET(p, stk1160config[i0].reg, stk1160config[i0].set);
+ i0++;
+ }
+
+write_300(p);
+
+return 0;
+}
+/****************************************************************************/
+int
+setup_saa(struct usb_device *p)
+{
+int i0, ir;
+
+
+set2to78(p);
+
+
+i0 = 0;
+while (0xFF != saa7113config[i0].reg) {
+ ir = write_saa(p, saa7113config[i0].reg, saa7113config[i0].set);
+ i0++;
+ }
+return 0;
+}
+/****************************************************************************/
+int
+write_000(struct usb_device *p, __u16 set2, __u16 set0)
+{
+__u8 igot0, igot2;
+
+GET(p, 0x0002, &igot2);
+GET(p, 0x0000, &igot0);
+SET(p, 0x0002, set2);
+SET(p, 0x0000, set0);
+return 0;
+}
+/****************************************************************************/
+int
+write_saa(struct usb_device *p, __u16 reg0, __u16 set0)
+{
+SET(p, 0x200, 0x00);
+SET(p, 0x204, reg0);
+SET(p, 0x205, set0);
+SET(p, 0x200, 0x01);
+return wait_i2c(p);
+}
+/****************************************************************************/
+/*--------------------------------------------------------------------------*/
+/*
+ * REGISTER 500: SETTING VALUE TO 0x008B READS FROM VT1612A (?)
+ * REGISTER 500: SETTING VALUE TO 0x008C WRITES TO VT1612A
+ * REGISTER 502: LEAST SIGNIFICANT BYTE OF VALUE TO SET
+ * REGISTER 503: MOST SIGNIFICANT BYTE OF VALUE TO SET
+ * REGISTER 504: TARGET ADDRESS ON VT1612A
+ */
+/*--------------------------------------------------------------------------*/
+int
+write_vt(struct usb_device *p, __u16 reg0, __u16 set0)
+{
+__u8 igot;
+__u16 got502, got503;
+__u16 set502, set503;
+
+SET(p, 0x0504, reg0);
+SET(p, 0x0500, 0x008B);
+
+GET(p, 0x0502, &igot); got502 = (0xFF & igot);
+GET(p, 0x0503, &igot); got503 = (0xFF & igot);
+
+JOT(16, "write_vt(., 0x%04X, 0x%04X): was 0x%04X\n", \
+ reg0, set0, ((got503 << 8) | got502));
+
+set502 = (0x00FF & set0);
+set503 = ((0xFF00 & set0) >> 8);
+
+SET(p, 0x0504, reg0);
+SET(p, 0x0502, set502);
+SET(p, 0x0503, set503);
+SET(p, 0x0500, 0x008C);
+
+return 0;
+}
+/****************************************************************************/
+/*--------------------------------------------------------------------------*/
+/*
+ * REGISTER 500: SETTING VALUE TO 0x008B READS FROM VT1612A (?)
+ * REGISTER 500: SETTING VALUE TO 0x008C WRITES TO VT1612A
+ * REGISTER 502: LEAST SIGNIFICANT BYTE OF VALUE TO GET
+ * REGISTER 503: MOST SIGNIFICANT BYTE OF VALUE TO GET
+ * REGISTER 504: TARGET ADDRESS ON VT1612A
+ */
+/*--------------------------------------------------------------------------*/
+int
+read_vt(struct usb_device *p, __u16 reg0)
+{
+__u8 igot;
+__u16 got502, got503;
+
+SET(p, 0x0504, reg0);
+SET(p, 0x0500, 0x008B);
+
+GET(p, 0x0502, &igot); got502 = (0xFF & igot);
+GET(p, 0x0503, &igot); got503 = (0xFF & igot);
+
+JOT(16, "read_vt(., 0x%04X): has 0x%04X\n", reg0, ((got503 << 8) | got502));
+
+return (got503 << 8) | got502;
+}
+/****************************************************************************/
+/*--------------------------------------------------------------------------*/
+/*
+ * THESE APPEAR TO HAVE NO EFFECT ON EITHER VIDEO OR AUDIO.
+ */
+/*--------------------------------------------------------------------------*/
+int
+write_300(struct usb_device *p)
+{
+SET(p, 0x300, 0x0012);
+SET(p, 0x350, 0x002D);
+SET(p, 0x351, 0x0001);
+SET(p, 0x352, 0x0000);
+SET(p, 0x353, 0x0000);
+SET(p, 0x300, 0x0080);
+return 0;
+}
+/****************************************************************************/
+/*--------------------------------------------------------------------------*/
+/*
+ * NOTE: THE FOLLOWING IS NOT CHECKED:
+ * REGISTER 0x0F, WHICH IS INVOLVED IN CHROMINANCE AUTOMATIC GAIN CONTROL.
+ */
+/*--------------------------------------------------------------------------*/
+int
+check_saa(struct usb_device *p)
+{
+int i0, ir, rc;
+i0 = 0;
+
+rc = 0;
+while (0xFF != saa7113config[i0].reg) {
+ if (0x0F == saa7113config[i0].reg) {
+ i0++; continue;
+ }
+
+ ir = read_saa(p, saa7113config[i0].reg);
+ if (ir != saa7113config[i0].set) {
+ SAY("SAA register 0x%02X has 0x%02X, expected 0x%02X\n", \
+ saa7113config[i0].reg, ir, saa7113config[i0].set);
+ rc--;
+ }
+ i0++;
+}
+if (-8 > rc)
+ return rc;
+else
+ return 0;
+}
+/****************************************************************************/
+int
+merit_saa(struct usb_device *p)
+{
+int rc;
+
+rc = read_saa(p, 0x1F);
+if ((0 > rc) || (0x02 & rc))
+ return 1 ;
+else
+ return 0;
+}
+/****************************************************************************/
+int
+ready_saa(struct usb_device *p)
+{
+int j, rc;
+static int max = 10;
+
+j = 0;
+while (max > j) {
+ rc = read_saa(p, 0x1F);
+ if (0 <= rc) {
+ if ((1 == (0x01 & rc))&&(0 == (0x40 & rc)))
+ break;
+ }
+ msleep(100); j++;
+}
+if (max == j)
+ return -1;
+else {
+ if (0x20 & rc)
+ JOT(8, "hardware detects 60 Hz\n");
+ else
+ JOT(8, "hardware detects 50 Hz\n");
+ if (0x80 & rc)
+ JOT(8, "hardware detects interlacing\n");
+ else
+ JOT(8, "hardware detects no interlacing\n");
+}
+return 0;
+}
+/****************************************************************************/
+/*--------------------------------------------------------------------------*/
+/*
+ * NOTE: THE FOLLOWING ARE NOT CHECKED:
+ * REGISTERS 0x000, 0x002: FUNCTIONALITY IS NOT KNOWN
+ * REGISTER 0x100: ACCEPT ALSO (0x80 | stk1160config[.].set)
+ */
+/*--------------------------------------------------------------------------*/
+int
+check_stk(struct usb_device *p)
+{
+int i0, ir;
+i0 = 0;
+while (0xFFF != stk1160config[i0].reg) {
+ if (0x000 == stk1160config[i0].reg) {
+ i0++; continue;
+ }
+ if (0x002 == stk1160config[i0].reg) {
+ i0++; continue;
+ }
+
+ ir = read_stk(p, stk1160config[i0].reg);
+
+ if (0x100 == stk1160config[i0].reg) {
+ if ((ir != (0xFF & stk1160config[i0].set)) && \
+ (ir != (0x80 | (0xFF & stk1160config[i0].set))) && \
+ (0xFFFF != stk1160config[i0].set)) {
+ SAY("STK register 0x%03X has 0x%02X, " \
+ "expected 0x%02X\n", \
+ stk1160config[i0].reg, ir, \
+ stk1160config[i0].set);
+ }
+ i0++; continue;
+ }
+
+ if ((ir != (0xFF & stk1160config[i0].set)) && \
+ (0xFFFF != stk1160config[i0].set)) {
+ SAY("STK register 0x%03X has 0x%02X, " \
+ "expected 0x%02X\n", \
+ stk1160config[i0].reg, ir, \
+ stk1160config[i0].set);
+ }
+ i0++;
+ }
+return 0;
+}
+/****************************************************************************/
+int
+read_saa(struct usb_device *p, __u16 reg0)
+{
+__u8 igot;
+
+SET(p, 0x208, reg0);
+SET(p, 0x200, 0x20);
+if (0 != wait_i2c(p))
+ return -1;
+igot = 0;
+GET(p, 0x0209, &igot);
+return igot;
+}
+/****************************************************************************/
+int
+read_stk(struct usb_device *p, __u32 reg0)
+{
+__u8 igot;
+
+igot = 0;
+GET(p, reg0, &igot);
+return igot;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * HARDWARE USERSPACE INPUT NUMBER PHYSICAL INPUT DRIVER input VALUE
+ *
+ * CVBS+S-VIDEO 0 or 1 CVBS 1
+ * FOUR-CVBS 0 or 1 CVBS1 1
+ * FOUR-CVBS 2 CVBS2 2
+ * FOUR-CVBS 3 CVBS3 3
+ * FOUR-CVBS 4 CVBS4 4
+ * CVBS+S-VIDEO 5 S-VIDEO 5
+ *
+ * WHEN 5==input THE ARGUMENT mode MUST ALSO BE SUPPLIED:
+ *
+ * mode 7 => GAIN TO BE SET EXPLICITLY USING REGISTER 0x05 (UNTESTED)
+ * mode 9 => USE AUTOMATIC GAIN CONTROL (DEFAULT)
+ *
+*/
+/*---------------------------------------------------------------------------*/
+int
+select_input(struct usb_device *p, int input, int mode)
+{
+
+stop_100(p);
+
+msleep(20);
+switch (input) {
+case 0:
+case 1: {
+ SET(p, 0x0000, 0x0098); break;
+}
+case 2: {
+ SET(p, 0x0000, 0x0090); break;
+}
+case 3: {
+ SET(p, 0x0000, 0x0088); break;
+}
+case 4: {
+ SET(p, 0x0000, 0x0080); break;
+}
+case 5: {
+ if (9 != mode)
+ mode = 7;
+ switch (mode) {
+ case 7:
+ {
+ if (0 != write_saa(p, 0x02, 0x87)) {
+ SAY("ERROR: failed to set SAA " \
+ "register 0x02 for input " \
+ "%i\n", input);
+ }
+ if (0 != write_saa(p, 0x05, 0xFF)) {
+ SAY("ERROR: failed to set SAA " \
+ "register 0x05 for input " \
+ "%i\n", input);
+ }
+ break;
+ }
+ case 9:
+ {
+ if (0 != write_saa(p, 0x02, 0x89)) {
+ SAY("ERROR: failed to set SAA " \
+ "register 0x02 for input " \
+ "%i\n", input);
+ }
+ if (0 != write_saa(p, 0x05, 0x00)) {
+ SAY("ERROR: failed to set SAA " \
+ "register 0x05 for input " \
+ "%i\n", input);
+ }
+ break;
+ }
+ default:
+ {
+ SAY("MISTAKE: bad mode: %i\n", mode);
+ return -1;
+ }
+ }
+ if (0 != write_saa(p, 0x04, 0x00)) {
+ SAY("ERROR: failed to set SAA register 0x04 " \
+ "for input %i\n", input);
+ }
+ if (0 != write_saa(p, 0x09, 0x80)) {
+ SAY("ERROR: failed to set SAA register 0x09 " \
+ "for input %i\n", input);
+ }
+ break;
+}
+default:
+ {
+ SAY("ERROR: bad input: %i\n", input);
+ return -1;
+}
+}
+msleep(20);
+SET(p, 0x0002, 0x0093);
+msleep(20);
+
+start_100(p);
+
+return 0;
+}
+/****************************************************************************/
+int
+set_resolution(struct usb_device *p, \
+ __u16 set0, __u16 set1, __u16 set2, __u16 set3)
+{
+__u16 u0x0111, u0x0113, u0x0115, u0x0117;
+
+u0x0111 = ((0xFF00 & set0) >> 8);
+u0x0113 = ((0xFF00 & set1) >> 8);
+u0x0115 = ((0xFF00 & set2) >> 8);
+u0x0117 = ((0xFF00 & set3) >> 8);
+
+SET(p, 0x0110, (0x00FF & set0));
+SET(p, 0x0111, u0x0111);
+SET(p, 0x0112, (0x00FF & set1));
+SET(p, 0x0113, u0x0113);
+SET(p, 0x0114, (0x00FF & set2));
+SET(p, 0x0115, u0x0115);
+SET(p, 0x0116, (0x00FF & set3));
+SET(p, 0x0117, u0x0117);
+
+return 0;
+}
+/****************************************************************************/
+int
+start_100(struct usb_device *p)
+{
+__u16 get0;
+__u8 igot;
+
+GET(p, 0x0100, &igot); get0 = igot;
+msleep(0x1f4);
+SET(p, 0x0100, (0x80 | get0));
+msleep(0x1f4);
+return 0;
+}
+/****************************************************************************/
+int
+stop_100(struct usb_device *p)
+{
+__u16 get0;
+__u8 igot;
+
+GET(p, 0x0100, &igot); get0 = igot;
+msleep(0x1f4);
+SET(p, 0x0100, (0x7F & get0));
+msleep(0x1f4);
+return 0;
+}
+/****************************************************************************/
+/*--------------------------------------------------------------------------*/
+/*
+ * FUNCTION wait_i2c() RETURNS 0 ON SUCCESS
+*/
+/*--------------------------------------------------------------------------*/
+int
+wait_i2c(struct usb_device *p)
+{
+__u16 get0;
+__u8 igot;
+const int max = 4;
+int k;
+
+for (k = 0; k < max; k++) {
+ GET(p, 0x0201, &igot); get0 = igot;
+ switch (get0) {
+ case 0x04:
+ case 0x01: {
+ return 0;
+ }
+ case 0x00: {
+ msleep(10);
+ continue;
+ }
+ default: {
+ return get0 - 1;
+ }
+ }
+}
+return -1;
+}
+/****************************************************************************/
+int
+regset(struct usb_device *pusb_device, __u16 index, __u16 value)
+{
+__u16 igot;
+int rc0, rc1;
+
+if (!pusb_device)
+ return -EFAULT;
+
+rc1 = 0; igot = 0;
+rc0 = usb_control_msg(pusb_device, usb_sndctrlpipe(pusb_device, 0), \
+ (__u8)0x01, \
+ (__u8)(USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE), \
+ (__u16)value, \
+ (__u16)index, \
+ (void *)NULL, \
+ (__u16)0, \
+ (int)500);
+
+#if defined(NOREADBACK)
+#
+#else
+rc1 = usb_control_msg(pusb_device, usb_rcvctrlpipe(pusb_device, 0), \
+ (__u8)0x00, \
+ (__u8)(USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE), \
+ (__u16)0x00, \
+ (__u16)index, \
+ (void *)&igot, \
+ (__u16)sizeof(__u16), \
+ (int)50000);
+igot = 0xFF & igot;
+switch (index) {
+case 0x000:
+case 0x500:
+case 0x502:
+case 0x503:
+case 0x504:
+case 0x506:
+case 0x507: {
+ break;
+}
+case 0x204:
+case 0x205:
+case 0x350:
+case 0x351: {
+ if (0 != igot) {
+ JOT(8, "unexpected 0x%02X for STK register 0x%03X\n", \
+ igot, index);
+ }
+break;
+}
+case 0x114:
+case 0x116: {
+ if ((0xFF & value) != igot) {
+ JOT(8, "unexpected 0x%02X != 0x%02X " \
+ "for STK register 0x%03X\n", \
+ igot, value, index);
+ }
+break;
+}
+case 0x200: {
+ if (0 == igot)
+ break;
+}
+default: {
+ if (value != igot) {
+ JOT(8, "unexpected 0x%02X != 0x%02X " \
+ "for STK register 0x%03X\n", \
+ igot, value, index);
+ }
+break;
+}
+}
+#endif /* ! NOREADBACK*/
+
+return (0 > rc0) ? rc0 : rc1;
+}
+/*****************************************************************************/
+int
+regget(struct usb_device *pusb_device, __u16 index, void *pvoid)
+{
+int ir;
+
+if (!pusb_device)
+ return -EFAULT;
+
+ir = usb_control_msg(pusb_device, usb_rcvctrlpipe(pusb_device, 0), \
+ (__u8)0x00, \
+ (__u8)(USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE), \
+ (__u16)0x00, \
+ (__u16)index, \
+ (void *)pvoid, \
+ sizeof(__u8), \
+ (int)50000);
+return 0xFF & ir;
+}
+/*****************************************************************************/
+int
+wakeup_device(struct usb_device *pusb_device)
+{
+return usb_control_msg(pusb_device, usb_sndctrlpipe(pusb_device, 0), \
+ (__u8)USB_REQ_SET_FEATURE, \
+ (__u8)(USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE), \
+ USB_DEVICE_REMOTE_WAKEUP, \
+ (__u16)0, \
+ (void *) NULL, \
+ (__u16)0, \
+ (int)50000);
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * IMPORTANT:
+ * THE MESSAGE OF TYPE (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)
+ * CAUSES MUTING IF THE VALUE 0x0100 IS SENT.
+ * TO ENABLE AUDIO THE VALUE 0x0200 MUST BE SENT.
+ */
+/*---------------------------------------------------------------------------*/
+int
+audio_setup(struct easycap *peasycap)
+{
+struct usb_device *pusb_device;
+static __u8 request = 0x01;
+static __u8 requesttype = \
+ (__u8)(USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
+
+static __u16 value_unmute = 0x0200;
+static __u16 index = 0x0301;
+
+static unsigned char buffer[1];
+static __u16 length = 1;
+int rc, id1, id2;
+
+if (NULL == peasycap)
+ return -EFAULT;
+
+pusb_device = peasycap->pusb_device;
+if (NULL == pusb_device)
+ return -EFAULT;
+
+JOT(8, "%02X %02X %02X %02X %02X %02X %02X %02X\n", \
+ requesttype, request, \
+ (0x00FF & value_unmute), \
+ (0xFF00 & value_unmute) >> 8, \
+ (0x00FF & index), \
+ (0xFF00 & index) >> 8, \
+ (0x00FF & length), \
+ (0xFF00 & length) >> 8);
+
+buffer[0] = 0x01;
+
+rc = usb_control_msg(pusb_device, usb_sndctrlpipe(pusb_device, 0), \
+ (__u8)request, \
+ (__u8)requesttype, \
+ (__u16)value_unmute, \
+ (__u16)index, \
+ (void *)&buffer[0], \
+ (__u16)length, \
+ (int)50000);
+
+JOT(8, "0x%02X=buffer\n", *((__u8 *) &buffer[0]));
+if (rc != (int)length)
+ SAY("ERROR: usb_control_msg returned %i\n", rc);
+
+/*--------------------------------------------------------------------------*/
+/*
+ * REGISTER 500: SETTING VALUE TO 0x0094 RESETS AUDIO CONFIGURATION ???
+ * REGISTER 506: ANALOGUE AUDIO ATTENTUATOR ???
+ * FOR THE CVBS+S-VIDEO HARDWARE:
+ * SETTING VALUE TO 0x0000 GIVES QUIET SOUND.
+ * THE UPPER BYTE SEEMS TO HAVE NO EFFECT.
+ * FOR THE FOUR-CVBS HARDWARE:
+ * SETTING VALUE TO 0x0000 SEEMS TO HAVE NO EFFECT.
+ * REGISTER 507: ANALOGUE AUDIO PREAMPLIFIER ON/OFF ???
+ * FOR THE CVBS-S-VIDEO HARDWARE:
+ * SETTING VALUE TO 0x0001 GIVES VERY LOUD, DISTORTED SOUND.
+ * THE UPPER BYTE SEEMS TO HAVE NO EFFECT.
+ */
+/*--------------------------------------------------------------------------*/
+
+SET(pusb_device, 0x0500, 0x0094);
+
+SET(pusb_device, 0x0500, 0x008C);
+
+SET(pusb_device, 0x0506, 0x0001);
+SET(pusb_device, 0x0507, 0x0000);
+
+id1 = read_vt(pusb_device, 0x007C);
+id2 = read_vt(pusb_device, 0x007E);
+SAY("0x%04X:0x%04X is audio vendor id\n", id1, id2);
+
+/*---------------------------------------------------------------------------*/
+/*
+* SELECT AUDIO SOURCE "LINE IN" AND SET DEFAULT GAIN TO 0 dB.
+*
+* THESE COMMANDS SEEM TO BE ACCEPTED (THOUGH POSSIBLY IGNORED) EVEN WHEN
+* THERE IS NO SEPARATE AUDIO CHIP PRESENT.
+*/
+/*---------------------------------------------------------------------------*/
+
+write_vt(pusb_device, 0x0002, 0x8000);
+write_vt(pusb_device, 0x001C, 0x8000);
+
+write_vt(pusb_device, 0x000E, 0x0000);
+write_vt(pusb_device, 0x0010, 0x0000);
+write_vt(pusb_device, 0x0012, 0x8000);
+write_vt(pusb_device, 0x0016, 0x0000);
+
+write_vt(pusb_device, 0x001A, 0x0404);
+write_vt(pusb_device, 0x0002, 0x0000);
+write_vt(pusb_device, 0x001C, 0x0000);
+
+check_vt(pusb_device);
+
+return 0;
+}
+/*****************************************************************************/
+int
+check_vt(struct usb_device *pusb_device)
+{
+int igot;
+
+igot = read_vt(pusb_device, 0x0002);
+if (0 > igot)
+ SAY("ERROR: failed to read VT1612A register 0x02\n");
+if (0x8000 & igot)
+ SAY("register 0x%02X muted\n", 0x02);
+
+igot = read_vt(pusb_device, 0x000E);
+if (0 > igot)
+ SAY("ERROR: failed to read VT1612A register 0x0E\n");
+if (0x8000 & igot)
+ SAY("register 0x%02X muted\n", 0x0E);
+
+igot = read_vt(pusb_device, 0x0010);
+if (0 > igot)
+ SAY("ERROR: failed to read VT1612A register 0x10\n");
+if (0x8000 & igot)
+ SAY("register 0x%02X muted\n", 0x10);
+
+igot = read_vt(pusb_device, 0x0012);
+if (0 > igot)
+ SAY("ERROR: failed to read VT1612A register 0x12\n");
+if (0x8000 & igot)
+ SAY("register 0x%02X muted\n", 0x12);
+
+igot = read_vt(pusb_device, 0x0016);
+if (0 > igot)
+ SAY("ERROR: failed to read VT1612A register 0x16\n");
+if (0x8000 & igot)
+ SAY("register 0x%02X muted\n", 0x16);
+
+igot = read_vt(pusb_device, 0x001A);
+if (0 > igot)
+ SAY("ERROR: failed to read VT1612A register 0x1A\n");
+if (0x8000 & igot)
+ SAY("register 0x%02X muted\n", 0x1A);
+
+igot = read_vt(pusb_device, 0x001C);
+if (0 > igot)
+ SAY("ERROR: failed to read VT1612A register 0x1C\n");
+if (0x8000 & igot)
+ SAY("register 0x%02X muted\n", 0x1C);
+
+return 0;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * NOTE: THIS DOES INCREASE THE VOLUME DRAMATICALLY:
+ * audio_gainset(pusb_device, 0x000F);
+ *
+ * IF 16<loud<31 VT1621A REGISTER 0x1C IS SET FOR POSITIVE GAIN.
+ * IF loud<=16 VT1621A REGISTER 0x1C IS SET FOR ZERO GAIN.
+ * THERE IS NEVER ANY (ADDITIONAL) ATTENUATION.
+ */
+/*---------------------------------------------------------------------------*/
+int
+audio_gainset(struct usb_device *pusb_device, __s8 loud)
+{
+int igot;
+__u8 u8;
+__u16 mute;
+
+if (16 > loud)
+ loud = 16;
+u8 = 0x000F & (__u8)(loud - 16);
+
+write_vt(pusb_device, 0x0002, 0x8000);
+
+igot = read_vt(pusb_device, 0x001C);
+if (0 > igot) {
+ SAY("ERROR: failed to read VT1612A register 0x1C\n");
+ mute = 0x0000;
+} else
+ mute = 0x8000 & ((unsigned int)igot);
+
+JOT(8, "0x%04X=(mute|u8|(u8<<8))\n", mute | u8 | (u8 << 8));
+
+write_vt(pusb_device, 0x001C, 0x8000);
+write_vt(pusb_device, 0x001C, (mute | u8 | (u8 << 8)));
+write_vt(pusb_device, 0x0002, 0x0000);
+
+return 0;
+}
+/*****************************************************************************/
+int
+audio_gainget(struct usb_device *pusb_device)
+{
+int igot;
+
+igot = read_vt(pusb_device, 0x001C);
+if (0 > igot)
+ SAY("ERROR: failed to read VT1612A register 0x1C\n");
+return igot;
+}
+/*****************************************************************************/
+int
+set2to78(struct usb_device *p)
+{
+int ir;
+
+msleep(20);
+ir = regset(p, 0x0002, 0x0078);
+if (0 > ir)
+ SAY("ERROR: failed to set register 0x0002 to 0x0078\n");
+msleep(20);
+return ir;
+}
+/*****************************************************************************/
+int
+set2to93(struct usb_device *p)
+{
+int ir;
+
+msleep(20);
+ir = regset(p, 0x0002, 0x0093);
+if (0 > ir)
+ SAY("ERROR: failed to set register 0x0002 to 0x0078\n");
+msleep(20);
+return ir;
+}
+/*****************************************************************************/
diff --git a/drivers/staging/easycap/easycap_main.c b/drivers/staging/easycap/easycap_main.c
new file mode 100644
index 00000000000..5a4bbd9b453
--- /dev/null
+++ b/drivers/staging/easycap/easycap_main.c
@@ -0,0 +1,4354 @@
+/******************************************************************************
+* *
+* easycap_main.c *
+* *
+* Video driver for EasyCAP USB2.0 Video Capture Device DC60 *
+* *
+* *
+******************************************************************************/
+/*
+ *
+ * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
+ *
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+/*****************************************************************************/
+
+#include "easycap.h"
+#include "easycap_standard.h"
+
+int easycap_debug;
+module_param(easycap_debug, int, S_IRUGO | S_IWUSR);
+
+/*---------------------------------------------------------------------------*/
+/*
+ * PARAMETERS APPLICABLE TO ENTIRE DRIVER, I.E. BOTH VIDEO AND AUDIO
+ */
+/*---------------------------------------------------------------------------*/
+struct usb_device_id easycap_usb_device_id_table[] = {
+{ USB_DEVICE(USB_EASYCAP_VENDOR_ID, USB_EASYCAP_PRODUCT_ID) },
+{ }
+};
+MODULE_DEVICE_TABLE(usb, easycap_usb_device_id_table);
+struct usb_driver easycap_usb_driver = {
+.name = "easycap",
+.id_table = easycap_usb_device_id_table,
+.probe = easycap_usb_probe,
+.disconnect = easycap_usb_disconnect,
+};
+/*---------------------------------------------------------------------------*/
+/*
+ * PARAMETERS USED WHEN REGISTERING THE VIDEO INTERFACE
+ *
+ * NOTE: SOME KERNELS IGNORE usb_class_driver.minor_base, AS MENTIONED BY
+ * CORBET ET AL. "LINUX DEVICE DRIVERS", 3rd EDITION, PAGE 253.
+ * THIS IS THE CASE FOR OpenSUSE.
+ */
+/*---------------------------------------------------------------------------*/
+const struct file_operations easycap_fops = {
+ .owner = THIS_MODULE,
+ .open = easycap_open,
+ .release = easycap_release,
+ .unlocked_ioctl = easycap_ioctl,
+ .poll = easycap_poll,
+ .mmap = easycap_mmap,
+ .llseek = no_llseek,
+};
+struct vm_operations_struct easycap_vm_ops = {
+.open = easycap_vma_open,
+.close = easycap_vma_close,
+.fault = easycap_vma_fault,
+};
+struct usb_class_driver easycap_class = {
+.name = "usb/easycap%d",
+.fops = &easycap_fops,
+.minor_base = USB_SKEL_MINOR_BASE,
+};
+
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+#if defined(EASYCAP_NEEDS_V4L2_FOPS)
+const struct v4l2_file_operations v4l2_fops = {
+ .owner = THIS_MODULE,
+ .open = easycap_open_noinode,
+ .release = easycap_release_noinode,
+ .unlocked_ioctl = easycap_ioctl,
+ .poll = easycap_poll,
+ .mmap = easycap_mmap,
+};
+#endif /*EASYCAP_NEEDS_V4L2_FOPS*/
+int video_device_many /*=0*/;
+struct video_device *pvideo_array[VIDEO_DEVICE_MANY], *pvideo_device;
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+
+/*--------------------------------------------------------------------------*/
+/*
+ * PARAMETERS USED WHEN REGISTERING THE AUDIO INTERFACE
+ */
+/*--------------------------------------------------------------------------*/
+const struct file_operations easysnd_fops = {
+ .owner = THIS_MODULE,
+ .open = easysnd_open,
+ .release = easysnd_release,
+ .unlocked_ioctl = easysnd_ioctl,
+ .read = easysnd_read,
+ .llseek = no_llseek,
+};
+struct usb_class_driver easysnd_class = {
+.name = "usb/easysnd%d",
+.fops = &easysnd_fops,
+.minor_base = USB_SKEL_MINOR_BASE,
+};
+/****************************************************************************/
+/*--------------------------------------------------------------------------*/
+/*
+ * IT IS NOT APPROPRIATE FOR easycap_open() TO SUBMIT THE VIDEO URBS HERE,
+ * BECAUSE THERE WILL ALWAYS BE SUBSEQUENT NEGOTIATION OF TV STANDARD AND
+ * FORMAT BY IOCTL AND IT IS INADVISABLE TO HAVE THE URBS RUNNING WHILE
+ * REGISTERS OF THE SA7113H ARE BEING MANIPULATED.
+ *
+ * THE SUBMISSION OF VIDEO URBS IS THEREFORE DELAYED UNTIL THE IOCTL COMMAND
+ * STREAMON IS RECEIVED.
+ */
+/*--------------------------------------------------------------------------*/
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+int
+easycap_open_noinode(struct file *file)
+{
+return easycap_open((struct inode *)NULL, file);
+}
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+int
+easycap_open(struct inode *inode, struct file *file)
+{
+#if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
+struct usb_interface *pusb_interface;
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+struct usb_device *p;
+struct easycap *peasycap;
+int i, k, m, rc;
+
+JOT(4, "\n");
+SAY("==========OPEN=========\n");
+
+peasycap = (struct easycap *)NULL;
+#if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
+if ((struct inode *)NULL == inode) {
+ SAY("ERROR: inode is NULL.\n");
+ return -EFAULT;
+}
+pusb_interface = usb_find_interface(&easycap_usb_driver, iminor(inode));
+if (!pusb_interface) {
+ SAY("ERROR: pusb_interface is NULL.\n");
+ return -EFAULT;
+}
+peasycap = usb_get_intfdata(pusb_interface);
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#else
+for (i = 0; i < video_device_many; i++) {
+ pvideo_device = pvideo_array[i];
+ if ((struct video_device *)NULL != pvideo_device) {
+ peasycap = (struct easycap *)video_get_drvdata(pvideo_device);
+ break;
+ }
+}
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+if ((struct easycap *)NULL == peasycap) {
+ SAY("MISTAKE: peasycap is NULL\n");
+ return -EFAULT;
+}
+file->private_data = peasycap;
+/*---------------------------------------------------------------------------*/
+/*
+ * INITIALIZATION
+ */
+/*---------------------------------------------------------------------------*/
+JOT(4, "starting initialization\n");
+
+for (k = 0; k < FRAME_BUFFER_MANY; k++) {
+ for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++)
+ memset(peasycap->frame_buffer[k][m].pgo, 0, PAGE_SIZE);
+}
+p = peasycap->pusb_device;
+if ((struct usb_device *)NULL == p) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+} else {
+ JOT(16, "0x%08lX=peasycap->pusb_device\n", \
+ (long int)peasycap->pusb_device);
+}
+rc = wakeup_device(peasycap->pusb_device);
+if (0 == rc)
+ JOT(8, "wakeup_device() OK\n");
+else {
+ SAY("ERROR: wakeup_device() returned %i\n", rc);
+ return -EFAULT;
+}
+rc = setup_stk(p); peasycap->input = 0;
+if (0 == rc)
+ JOT(8, "setup_stk() OK\n");
+else {
+ SAY("ERROR: setup_stk() returned %i\n", rc);
+ return -EFAULT;
+}
+rc = setup_saa(p);
+if (0 == rc)
+ JOT(8, "setup_saa() OK\n");
+else {
+ SAY("ERROR: setup_saa() returned %i\n", rc);
+ return -EFAULT;
+}
+rc = check_saa(p);
+if (0 == rc)
+ JOT(8, "check_saa() OK\n");
+else if (-8 < rc)
+ SAY("check_saa() returned %i\n", rc);
+else {
+ SAY("ERROR: check_saa() returned %i\n", rc);
+ return -EFAULT;
+}
+peasycap->standard_offset = -1;
+/*---------------------------------------------------------------------------*/
+#if defined(PREFER_NTSC)
+
+rc = adjust_standard(peasycap, V4L2_STD_NTSC_M);
+if (0 == rc)
+ JOT(8, "adjust_standard(.,NTSC_M) OK\n");
+else {
+ SAY("ERROR: adjust_standard(.,NTSC_M) returned %i\n", rc);
+ return -EFAULT;
+}
+rc = adjust_format(peasycap, 640, 480, V4L2_PIX_FMT_UYVY, V4L2_FIELD_NONE, \
+ false);
+if (0 <= rc)
+ JOT(8, "adjust_format(.,640,480,UYVY) OK\n");
+else {
+ SAY("ERROR: adjust_format(.,640,480,UYVY) returned %i\n", rc);
+ return -EFAULT;
+}
+
+#else
+
+rc = adjust_standard(peasycap, \
+ (V4L2_STD_PAL_B | V4L2_STD_PAL_G | V4L2_STD_PAL_H | \
+ V4L2_STD_PAL_I | V4L2_STD_PAL_N));
+if (0 == rc)
+ JOT(8, "adjust_standard(.,PAL_BGHIN) OK\n");
+else {
+ SAY("ERROR: adjust_standard(.,PAL_BGHIN) returned %i\n", rc);
+ return -EFAULT;
+}
+rc = adjust_format(peasycap, 640, 480, V4L2_PIX_FMT_UYVY, V4L2_FIELD_NONE, \
+ false);
+if (0 <= rc)
+ JOT(8, "adjust_format(.,640,480,uyvy,false) OK\n");
+else {
+ SAY("ERROR: adjust_format(.,640,480,uyvy,false) returned %i\n", rc);
+ return -EFAULT;
+}
+
+#endif /* !PREFER_NTSC*/
+/*---------------------------------------------------------------------------*/
+rc = adjust_brightness(peasycap, -8192);
+if (0 != rc) {
+ SAY("ERROR: adjust_brightness(default) returned %i\n", rc);
+ return -EFAULT;
+}
+rc = adjust_contrast(peasycap, -8192);
+if (0 != rc) {
+ SAY("ERROR: adjust_contrast(default) returned %i\n", rc);
+ return -EFAULT;
+}
+rc = adjust_saturation(peasycap, -8192);
+if (0 != rc) {
+ SAY("ERROR: adjust_saturation(default) returned %i\n", rc);
+ return -EFAULT;
+}
+rc = adjust_hue(peasycap, -8192);
+if (0 != rc) {
+ SAY("ERROR: adjust_hue(default) returned %i\n", rc);
+ return -EFAULT;
+}
+/*---------------------------------------------------------------------------*/
+rc = usb_set_interface(peasycap->pusb_device, peasycap->video_interface, \
+ peasycap->video_altsetting_on);
+if (0 == rc)
+ JOT(8, "usb_set_interface(.,%i,%i) OK\n", peasycap->video_interface, \
+ peasycap->video_altsetting_on);
+else {
+ SAY("ERROR: usb_set_interface() returned %i\n", rc);
+ return -EFAULT;
+}
+rc = start_100(p);
+if (0 == rc)
+ JOT(8, "start_100() OK\n");
+else {
+ SAY("ERROR: start_100() returned %i\n", rc);
+ return -EFAULT;
+}
+peasycap->video_isoc_sequence = VIDEO_ISOC_BUFFER_MANY - 1;
+peasycap->video_idle = 0;
+peasycap->video_junk = 0;
+for (i = 0; i < 180; i++)
+ peasycap->merit[i] = 0;
+peasycap->video_eof = 0;
+peasycap->audio_eof = 0;
+
+do_gettimeofday(&peasycap->timeval7);
+
+peasycap->fudge = 0;
+
+JOT(4, "finished initialization\n");
+return 0;
+}
+/*****************************************************************************/
+int
+submit_video_urbs(struct easycap *peasycap)
+{
+struct data_urb *pdata_urb;
+struct urb *purb;
+struct list_head *plist_head;
+int j, isbad, m, rc;
+int isbuf;
+
+if ((struct list_head *)NULL == peasycap->purb_video_head) {
+ SAY("ERROR: peasycap->urb_video_head uninitialized\n");
+ return -EFAULT;
+}
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+if (!peasycap->video_isoc_streaming) {
+
+
+
+
+
+
+
+
+ JOT(4, "submission of all video urbs\n");
+ if (0 != ready_saa(peasycap->pusb_device)) {
+ SAY("ERROR: not ready to capture after waiting " \
+ "one second\n");
+ SAY("..... continuing anyway\n");
+ }
+ isbad = 0; m = 0;
+ list_for_each(plist_head, (peasycap->purb_video_head)) {
+ pdata_urb = list_entry(plist_head, struct data_urb, list_head);
+ if (NULL != pdata_urb) {
+ purb = pdata_urb->purb;
+ if (NULL != purb) {
+ isbuf = pdata_urb->isbuf;
+ purb->interval = 1;
+ purb->dev = peasycap->pusb_device;
+ purb->pipe = \
+ usb_rcvisocpipe(peasycap->pusb_device,\
+ peasycap->video_endpointnumber);
+ purb->transfer_flags = URB_ISO_ASAP;
+ purb->transfer_buffer = \
+ peasycap->video_isoc_buffer[isbuf].pgo;
+ purb->transfer_buffer_length = \
+ peasycap->video_isoc_buffer_size;
+ purb->complete = easycap_complete;
+ purb->context = peasycap;
+ purb->start_frame = 0;
+ purb->number_of_packets = \
+ peasycap->video_isoc_framesperdesc;
+
+ for (j = 0; j < peasycap->\
+ video_isoc_framesperdesc; j++) {
+ purb->iso_frame_desc[j].\
+ offset = j * \
+ peasycap->\
+ video_isoc_maxframesize;
+ purb->iso_frame_desc[j].\
+ length = peasycap->\
+ video_isoc_maxframesize;
+ }
+
+ rc = usb_submit_urb(purb, GFP_KERNEL);
+ if (0 != rc) {
+ isbad++;
+ SAY("ERROR: usb_submit_urb() failed " \
+ "for urb with rc:\n");
+ switch (rc) {
+ case -ENOMEM: {
+ SAY("ENOMEM\n");
+ break;
+ }
+ case -ENODEV: {
+ SAY("ENODEV\n");
+ break;
+ }
+ case -ENXIO: {
+ SAY("ENXIO\n");
+ break;
+ }
+ case -EINVAL: {
+ SAY("EINVAL\n");
+ break;
+ }
+ case -EAGAIN: {
+ SAY("EAGAIN\n");
+ break;
+ }
+ case -EFBIG: {
+ SAY("EFBIG\n");
+ break;
+ }
+ case -EPIPE: {
+ SAY("EPIPE\n");
+ break;
+ }
+ case -EMSGSIZE: {
+ SAY("EMSGSIZE\n");
+ break;
+ }
+ default: {
+ SAY("unknown error code %i\n",\
+ rc);
+ break;
+ }
+ }
+ } else {
+ m++;
+ }
+ } else {
+ isbad++;
+ }
+ } else {
+ isbad++;
+ }
+ }
+ if (isbad) {
+ JOT(4, "attempting cleanup instead of submitting\n");
+ list_for_each(plist_head, (peasycap->purb_video_head)) {
+ pdata_urb = list_entry(plist_head, struct data_urb, \
+ list_head);
+ if (NULL != pdata_urb) {
+ purb = pdata_urb->purb;
+ if (NULL != purb)
+ usb_kill_urb(purb);
+ }
+ }
+ peasycap->video_isoc_streaming = 0;
+ } else {
+ peasycap->video_isoc_streaming = 1;
+ JOT(4, "submitted %i video urbs\n", m);
+ }
+
+
+
+
+
+
+} else {
+ JOT(4, "already streaming video urbs\n");
+}
+return 0;
+}
+/*****************************************************************************/
+int
+kill_video_urbs(struct easycap *peasycap)
+{
+int m;
+struct list_head *plist_head;
+struct data_urb *pdata_urb;
+
+if ((struct easycap *)NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
+if (peasycap->video_isoc_streaming) {
+
+
+
+ if ((struct list_head *)NULL != peasycap->purb_video_head) {
+ peasycap->video_isoc_streaming = 0;
+ JOT(4, "killing video urbs\n");
+ m = 0;
+ list_for_each(plist_head, (peasycap->purb_video_head)) {
+ pdata_urb = list_entry(plist_head, struct data_urb, \
+ list_head);
+ if ((struct data_urb *)NULL != pdata_urb) {
+ if ((struct urb *)NULL != pdata_urb->purb) {
+ usb_kill_urb(pdata_urb->purb);
+ m++;
+ }
+ }
+ }
+ JOT(4, "%i video urbs killed\n", m);
+ } else {
+ SAY("ERROR: peasycap->purb_video_head is NULL\n");
+ return -EFAULT;
+ }
+} else {
+ JOT(8, "%i=video_isoc_streaming, no video urbs killed\n", \
+ peasycap->video_isoc_streaming);
+}
+return 0;
+}
+/****************************************************************************/
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+int
+easycap_release_noinode(struct file *file)
+{
+return easycap_release((struct inode *)NULL, file);
+}
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+/*--------------------------------------------------------------------------*/
+int
+easycap_release(struct inode *inode, struct file *file)
+{
+#if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
+struct easycap *peasycap;
+
+JOT(4, "\n");
+
+peasycap = file->private_data;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL.\n");
+ SAY("ending unsuccessfully\n");
+ return -EFAULT;
+}
+if (0 != kill_video_urbs(peasycap)) {
+ SAY("ERROR: kill_video_urbs() failed\n");
+ return -EFAULT;
+}
+JOT(4, "ending successfully\n");
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#else
+#
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+
+return 0;
+}
+/****************************************************************************/
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+int
+videodev_release(struct video_device *pvd)
+{
+struct easycap *peasycap;
+int i, j, k;
+
+JOT(4, "\n");
+
+k = 0;
+for (i = 0; i < video_device_many; i++) {
+ pvideo_device = pvideo_array[i];
+ if ((struct video_device *)NULL != pvideo_device) {
+ if (pvd->minor == pvideo_device->minor) {
+ peasycap = (struct easycap *)\
+ video_get_drvdata(pvideo_device);
+ if ((struct easycap *)NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ SAY("ending unsuccessfully\n");
+ return -EFAULT;
+ }
+ if (0 != kill_video_urbs(peasycap)) {
+ SAY("ERROR: kill_video_urbs() failed\n");
+ return -EFAULT;
+ }
+ JOT(4, "freeing video_device structure: " \
+ "/dev/video%i\n", i);
+ kfree((void *)pvideo_device);
+ for (j = i; j < (VIDEO_DEVICE_MANY - 1); j++)
+ pvideo_array[j] = pvideo_array[j + 1];
+ video_device_many--; k++;
+ break;
+ }
+ }
+}
+if (!k) {
+ SAY("ERROR: lost video_device structure for %i=minor\n", pvd->minor);
+ SAY("cannot free: may cause memory leak\n");
+ SAY("ending unsuccessfully\n");
+ return -EFAULT;
+}
+
+JOT(4, "ending successfully\n");
+return 0;
+}
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+/****************************************************************************/
+/*--------------------------------------------------------------------------*/
+/*
+ * THIS FUNCTION IS CALLED FROM WITHIN easycap_usb_disconnect().
+ * BY THIS STAGE THE DEVICE HAS ALREADY BEEN PHYSICALLY UNPLUGGED.
+ * peasycap->pusb_device IS NO LONGER VALID AND SHOULD HAVE BEEN SET TO NULL.
+ */
+/*---------------------------------------------------------------------------*/
+void
+easycap_delete(struct kref *pkref)
+{
+int k, m, lost;
+int allocation_video_urb, allocation_video_page, allocation_video_struct;
+int allocation_audio_urb, allocation_audio_page, allocation_audio_struct;
+int registered_video, registered_audio;
+struct easycap *peasycap;
+struct data_urb *pdata_urb;
+struct list_head *plist_head, *plist_next;
+
+JOT(4, "\n");
+
+peasycap = container_of(pkref, struct easycap, kref);
+if ((struct easycap *)NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL: cannot perform deletions\n");
+ return;
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * FREE VIDEO.
+ */
+/*---------------------------------------------------------------------------*/
+if ((struct list_head *)NULL != peasycap->purb_video_head) {
+ JOT(4, "freeing video urbs\n");
+ m = 0;
+ list_for_each(plist_head, (peasycap->purb_video_head)) {
+ pdata_urb = list_entry(plist_head, struct data_urb, list_head);
+ if (NULL == pdata_urb)
+ JOT(4, "ERROR: pdata_urb is NULL\n");
+ else {
+ if ((struct urb *)NULL != pdata_urb->purb) {
+ usb_free_urb(pdata_urb->purb);
+ pdata_urb->purb = (struct urb *)NULL;
+ peasycap->allocation_video_urb -= 1;
+ m++;
+ }
+ }
+ }
+
+ JOT(4, "%i video urbs freed\n", m);
+/*---------------------------------------------------------------------------*/
+ JOT(4, "freeing video data_urb structures.\n");
+ m = 0;
+ list_for_each_safe(plist_head, plist_next, peasycap->purb_video_head) {
+ pdata_urb = list_entry(plist_head, struct data_urb, list_head);
+ if ((struct data_urb *)NULL != pdata_urb) {
+ kfree(pdata_urb); pdata_urb = (struct data_urb *)NULL;
+ peasycap->allocation_video_struct -= \
+ sizeof(struct data_urb);
+ m++;
+ }
+ }
+ JOT(4, "%i video data_urb structures freed\n", m);
+ JOT(4, "setting peasycap->purb_video_head=NULL\n");
+ peasycap->purb_video_head = (struct list_head *)NULL;
+ } else {
+JOT(4, "peasycap->purb_video_head is NULL\n");
+}
+/*---------------------------------------------------------------------------*/
+JOT(4, "freeing video isoc buffers.\n");
+m = 0;
+for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
+ if ((void *)NULL != peasycap->video_isoc_buffer[k].pgo) {
+ free_pages((unsigned long)\
+ (peasycap->video_isoc_buffer[k].pgo), \
+ VIDEO_ISOC_ORDER);
+ peasycap->video_isoc_buffer[k].pgo = (void *)NULL;
+ peasycap->allocation_video_page -= \
+ ((unsigned int)(0x01 << VIDEO_ISOC_ORDER));
+ m++;
+ }
+}
+JOT(4, "isoc video buffers freed: %i pages\n", m * (0x01 << VIDEO_ISOC_ORDER));
+/*---------------------------------------------------------------------------*/
+JOT(4, "freeing video field buffers.\n");
+lost = 0;
+for (k = 0; k < FIELD_BUFFER_MANY; k++) {
+ for (m = 0; m < FIELD_BUFFER_SIZE/PAGE_SIZE; m++) {
+ if ((void *)NULL != peasycap->field_buffer[k][m].pgo) {
+ free_page((unsigned long)\
+ (peasycap->field_buffer[k][m].pgo));
+ peasycap->field_buffer[k][m].pgo = (void *)NULL;
+ peasycap->allocation_video_page -= 1;
+ lost++;
+ }
+ }
+}
+JOT(4, "video field buffers freed: %i pages\n", lost);
+/*---------------------------------------------------------------------------*/
+JOT(4, "freeing video frame buffers.\n");
+lost = 0;
+for (k = 0; k < FRAME_BUFFER_MANY; k++) {
+ for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++) {
+ if ((void *)NULL != peasycap->frame_buffer[k][m].pgo) {
+ free_page((unsigned long)\
+ (peasycap->frame_buffer[k][m].pgo));
+ peasycap->frame_buffer[k][m].pgo = (void *)NULL;
+ peasycap->allocation_video_page -= 1;
+ lost++;
+ }
+ }
+}
+JOT(4, "video frame buffers freed: %i pages\n", lost);
+/*---------------------------------------------------------------------------*/
+/*
+ * FREE AUDIO.
+ */
+/*---------------------------------------------------------------------------*/
+if ((struct list_head *)NULL != peasycap->purb_audio_head) {
+ JOT(4, "freeing audio urbs\n");
+ m = 0;
+ list_for_each(plist_head, (peasycap->purb_audio_head)) {
+ pdata_urb = list_entry(plist_head, struct data_urb, list_head);
+ if (NULL == pdata_urb)
+ JOT(4, "ERROR: pdata_urb is NULL\n");
+ else {
+ if ((struct urb *)NULL != pdata_urb->purb) {
+ usb_free_urb(pdata_urb->purb);
+ pdata_urb->purb = (struct urb *)NULL;
+ peasycap->allocation_audio_urb -= 1;
+ m++;
+ }
+ }
+ }
+ JOT(4, "%i audio urbs freed\n", m);
+/*---------------------------------------------------------------------------*/
+ JOT(4, "freeing audio data_urb structures.\n");
+ m = 0;
+ list_for_each_safe(plist_head, plist_next, peasycap->purb_audio_head) {
+ pdata_urb = list_entry(plist_head, struct data_urb, list_head);
+ if ((struct data_urb *)NULL != pdata_urb) {
+ kfree(pdata_urb); pdata_urb = (struct data_urb *)NULL;
+ peasycap->allocation_audio_struct -= \
+ sizeof(struct data_urb);
+ m++;
+ }
+ }
+JOT(4, "%i audio data_urb structures freed\n", m);
+JOT(4, "setting peasycap->purb_audio_head=NULL\n");
+peasycap->purb_audio_head = (struct list_head *)NULL;
+} else {
+JOT(4, "peasycap->purb_audio_head is NULL\n");
+}
+/*---------------------------------------------------------------------------*/
+JOT(4, "freeing audio isoc buffers.\n");
+m = 0;
+for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
+ if ((void *)NULL != peasycap->audio_isoc_buffer[k].pgo) {
+ free_pages((unsigned long)\
+ (peasycap->audio_isoc_buffer[k].pgo), \
+ AUDIO_ISOC_ORDER);
+ peasycap->audio_isoc_buffer[k].pgo = (void *)NULL;
+ peasycap->allocation_audio_page -= \
+ ((unsigned int)(0x01 << AUDIO_ISOC_ORDER));
+ m++;
+ }
+}
+JOT(4, "easysnd_delete(): isoc audio buffers freed: %i pages\n", \
+ m * (0x01 << AUDIO_ISOC_ORDER));
+/*---------------------------------------------------------------------------*/
+JOT(4, "freeing audio buffers.\n");
+lost = 0;
+for (k = 0; k < peasycap->audio_buffer_page_many; k++) {
+ if ((void *)NULL != peasycap->audio_buffer[k].pgo) {
+ free_page((unsigned long)(peasycap->audio_buffer[k].pgo));
+ peasycap->audio_buffer[k].pgo = (void *)NULL;
+ peasycap->allocation_audio_page -= 1;
+ lost++;
+ }
+}
+JOT(4, "easysnd_delete(): audio buffers freed: %i pages\n", lost);
+/*---------------------------------------------------------------------------*/
+JOT(4, "freeing easycap structure.\n");
+allocation_video_urb = peasycap->allocation_video_urb;
+allocation_video_page = peasycap->allocation_video_page;
+allocation_video_struct = peasycap->allocation_video_struct;
+registered_video = peasycap->registered_video;
+allocation_audio_urb = peasycap->allocation_audio_urb;
+allocation_audio_page = peasycap->allocation_audio_page;
+allocation_audio_struct = peasycap->allocation_audio_struct;
+registered_audio = peasycap->registered_audio;
+m = 0;
+if ((struct easycap *)NULL != peasycap) {
+ kfree(peasycap); peasycap = (struct easycap *)NULL;
+ allocation_video_struct -= sizeof(struct easycap);
+ m++;
+}
+JOT(4, "%i easycap structure freed\n", m);
+/*---------------------------------------------------------------------------*/
+
+SAY("%8i= video urbs after all deletions\n", allocation_video_urb);
+SAY("%8i= video pages after all deletions\n", allocation_video_page);
+SAY("%8i= video structs after all deletions\n", allocation_video_struct);
+SAY("%8i= video devices after all deletions\n", registered_video);
+SAY("%8i= audio urbs after all deletions\n", allocation_audio_urb);
+SAY("%8i= audio pages after all deletions\n", allocation_audio_page);
+SAY("%8i= audio structs after all deletions\n", allocation_audio_struct);
+SAY("%8i= audio devices after all deletions\n", registered_audio);
+
+JOT(4, "ending.\n");
+return;
+}
+/*****************************************************************************/
+unsigned int easycap_poll(struct file *file, poll_table *wait)
+{
+struct easycap *peasycap;
+
+JOT(8, "\n");
+
+if (NULL == ((poll_table *)wait))
+ JOT(8, "WARNING: poll table pointer is NULL ... continuing\n");
+if (NULL == ((struct file *)file)) {
+ SAY("ERROR: file pointer is NULL\n");
+ return -EFAULT;
+}
+peasycap = file->private_data;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
+peasycap->polled = 1;
+
+if (0 == easycap_dqbuf(peasycap, 0))
+ return POLLIN | POLLRDNORM;
+else
+ return POLLERR;
+
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * IF mode IS NONZERO THIS ROUTINE RETURNS -EAGAIN RATHER THAN BLOCKING.
+ */
+/*---------------------------------------------------------------------------*/
+int
+easycap_dqbuf(struct easycap *peasycap, int mode)
+{
+int miss, rc;
+
+JOT(8, "\n");
+
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * WAIT FOR FIELD 0
+ */
+/*---------------------------------------------------------------------------*/
+miss = 0;
+if (mutex_lock_interruptible(&(peasycap->mutex_mmap_video[0])))
+ return -ERESTARTSYS;
+while ((peasycap->field_read == peasycap->field_fill) || \
+ (0 != (0xFF00 & peasycap->field_buffer\
+ [peasycap->field_read][0].kount)) || \
+ (0 != (0x00FF & peasycap->field_buffer\
+ [peasycap->field_read][0].kount))) {
+ mutex_unlock(&(peasycap->mutex_mmap_video[0]));
+
+ if (mode)
+ return -EAGAIN;
+
+ JOT(8, "first wait on wq_video, " \
+ "%i=field_read %i=field_fill\n", \
+ peasycap->field_read, peasycap->field_fill);
+
+ msleep(1);
+ if (0 != (wait_event_interruptible(peasycap->wq_video, \
+ (peasycap->video_idle || peasycap->video_eof || \
+ ((peasycap->field_read != peasycap->field_fill) && \
+ (0 == (0xFF00 & peasycap->field_buffer\
+ [peasycap->field_read][0].kount)) && \
+ (0 == (0x00FF & peasycap->field_buffer\
+ [peasycap->field_read][0].kount))))))){
+ SAY("aborted by signal\n");
+ return -EIO;
+ }
+ if (peasycap->video_idle) {
+ JOT(8, "%i=peasycap->video_idle\n", peasycap->video_idle);
+ return -EIO;
+ }
+ if (peasycap->video_eof) {
+ JOT(8, "%i=peasycap->video_eof\n", peasycap->video_eof);
+ debrief(peasycap);
+ kill_video_urbs(peasycap);
+ return -EIO;
+ }
+miss++;
+if (mutex_lock_interruptible(&(peasycap->mutex_mmap_video[0])))
+ return -ERESTARTSYS;
+}
+mutex_unlock(&(peasycap->mutex_mmap_video[0]));
+JOT(8, "first awakening on wq_video after %i waits\n", miss);
+
+rc = field2frame(peasycap);
+if (0 != rc)
+ SAY("ERROR: field2frame() returned %i\n", rc);
+
+if (true == peasycap->offerfields) {
+ peasycap->frame_read = peasycap->frame_fill;
+ (peasycap->frame_fill)++;
+ if (peasycap->frame_buffer_many <= peasycap->frame_fill)
+ peasycap->frame_fill = 0;
+
+ if (0x01 & easycap_standard[peasycap->standard_offset].mask) {
+ peasycap->frame_buffer[peasycap->frame_read][0].kount = \
+ V4L2_FIELD_BOTTOM;
+ } else {
+ peasycap->frame_buffer[peasycap->frame_read][0].kount = \
+ V4L2_FIELD_TOP;
+ }
+JOT(8, "setting: %i=peasycap->frame_read\n", peasycap->frame_read);
+JOT(8, "bumped to: %i=peasycap->frame_fill\n", peasycap->frame_fill);
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * WAIT FOR FIELD 1
+ */
+/*---------------------------------------------------------------------------*/
+miss = 0;
+if (mutex_lock_interruptible(&(peasycap->mutex_mmap_video[0])))
+ return -ERESTARTSYS;
+while ((peasycap->field_read == peasycap->field_fill) || \
+ (0 != (0xFF00 & peasycap->field_buffer\
+ [peasycap->field_read][0].kount)) || \
+ (0 == (0x00FF & peasycap->field_buffer\
+ [peasycap->field_read][0].kount))) {
+ mutex_unlock(&(peasycap->mutex_mmap_video[0]));
+
+ if (mode)
+ return -EAGAIN;
+
+ JOT(8, "second wait on wq_video, " \
+ "%i=field_read %i=field_fill\n", \
+ peasycap->field_read, peasycap->field_fill);
+ msleep(1);
+ if (0 != (wait_event_interruptible(peasycap->wq_video, \
+ (peasycap->video_idle || peasycap->video_eof || \
+ ((peasycap->field_read != peasycap->field_fill) && \
+ (0 == (0xFF00 & peasycap->field_buffer\
+ [peasycap->field_read][0].kount)) && \
+ (0 != (0x00FF & peasycap->field_buffer\
+ [peasycap->field_read][0].kount))))))){
+ SAY("aborted by signal\n");
+ return -EIO;
+ }
+ if (peasycap->video_idle) {
+ JOT(8, "%i=peasycap->video_idle\n", peasycap->video_idle);
+ return -EIO;
+ }
+ if (peasycap->video_eof) {
+ JOT(8, "%i=peasycap->video_eof\n", peasycap->video_eof);
+ debrief(peasycap);
+ kill_video_urbs(peasycap);
+ return -EIO;
+ }
+miss++;
+if (mutex_lock_interruptible(&(peasycap->mutex_mmap_video[0])))
+ return -ERESTARTSYS;
+}
+mutex_unlock(&(peasycap->mutex_mmap_video[0]));
+JOT(8, "second awakening on wq_video after %i waits\n", miss);
+
+rc = field2frame(peasycap);
+if (0 != rc)
+ SAY("ERROR: field2frame() returned %i\n", rc);
+
+peasycap->frame_read = peasycap->frame_fill;
+peasycap->queued[peasycap->frame_read] = 0;
+peasycap->done[peasycap->frame_read] = V4L2_BUF_FLAG_DONE;
+
+(peasycap->frame_fill)++;
+if (peasycap->frame_buffer_many <= peasycap->frame_fill)
+ peasycap->frame_fill = 0;
+
+if (0x01 & easycap_standard[peasycap->standard_offset].mask) {
+ peasycap->frame_buffer[peasycap->frame_read][0].kount = \
+ V4L2_FIELD_TOP;
+} else {
+ peasycap->frame_buffer[peasycap->frame_read][0].kount = \
+ V4L2_FIELD_BOTTOM;
+}
+
+JOT(8, "setting: %i=peasycap->frame_read\n", peasycap->frame_read);
+JOT(8, "bumped to: %i=peasycap->frame_fill\n", peasycap->frame_fill);
+
+return 0;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * BY DEFINITION, odd IS true FOR THE FIELD OCCUPYING LINES 1,3,5,...,479
+ * odd IS false FOR THE FIELD OCCUPYING LINES 0,2,4,...,478
+ *
+ * WHEN BOOLEAN PARAMETER decimatepixel IS true, ONLY THE FIELD FOR WHICH
+ * odd==false IS TRANSFERRED TO THE FRAME BUFFER.
+ *
+ * THE BOOLEAN PARAMETER offerfields IS true ONLY WHEN THE USER PROGRAM
+ * CHOOSES THE OPTION V4L2_FIELD_ALTERNATE. NO USERSPACE PROGRAM TESTED
+ * TO DATE HAS DONE THIS. BUGS ARE LIKELY.
+ */
+/*---------------------------------------------------------------------------*/
+int
+field2frame(struct easycap *peasycap)
+{
+static struct timeval timeval0;
+struct timeval timeval;
+long long int above, below;
+__u32 remainder;
+struct signed_div_result sdr;
+
+void *pex, *pad;
+int kex, kad, mex, mad, rex, rad, rad2;
+int c2, c3, w2, w3, cz, wz;
+int rc, bytesperpixel, multiplier, much, more, over, rump, caches;
+__u8 mask, margin;
+bool odd, isuy, decimatepixel, offerfields;
+
+JOT(8, "===== parity %i, field buffer %i --> frame buffer %i\n", \
+ peasycap->field_buffer[peasycap->field_read][0].kount,\
+ peasycap->field_read, peasycap->frame_fill);
+JOT(8, "===== %i=bytesperpixel\n", peasycap->bytesperpixel);
+if (true == peasycap->offerfields)
+ JOT(8, "===== offerfields\n");
+
+/*---------------------------------------------------------------------------*/
+/*
+ * REJECT OR CLEAN BAD FIELDS
+ */
+/*---------------------------------------------------------------------------*/
+if (peasycap->field_read == peasycap->field_fill) {
+ SAY("ERROR: on entry, still filling field buffer %i\n", \
+ peasycap->field_read);
+ return 0;
+}
+#if defined(EASYCAP_TESTCARD)
+easycap_testcard(peasycap, peasycap->field_read);
+#else
+if (0 != (0x0400 & peasycap->field_buffer[peasycap->field_read][0].kount))
+ easycap_testcard(peasycap, peasycap->field_read);
+#endif /*EASYCAP_TESTCARD*/
+/*---------------------------------------------------------------------------*/
+
+offerfields = peasycap->offerfields;
+bytesperpixel = peasycap->bytesperpixel;
+decimatepixel = peasycap->decimatepixel;
+
+if ((2 != bytesperpixel) && \
+ (3 != bytesperpixel) && \
+ (4 != bytesperpixel)) {
+ SAY("MISTAKE: %i=bytesperpixel\n", bytesperpixel);
+ return -EFAULT;
+}
+if (true == decimatepixel)
+ multiplier = 2;
+else
+ multiplier = 1;
+
+w2 = 2 * multiplier * (peasycap->width);
+w3 = bytesperpixel * \
+ multiplier * \
+ (peasycap->width);
+wz = multiplier * \
+ (peasycap->height) * \
+ multiplier * \
+ (peasycap->width);
+
+kex = peasycap->field_read; mex = 0;
+kad = peasycap->frame_fill; mad = 0;
+
+pex = peasycap->field_buffer[kex][0].pgo; rex = PAGE_SIZE;
+pad = peasycap->frame_buffer[kad][0].pgo; rad = PAGE_SIZE;
+if (peasycap->field_buffer[kex][0].kount)
+ odd = true;
+else
+ odd = false;
+
+if ((true == odd) && (false == offerfields) &&(false == decimatepixel)) {
+ JOT(8, " initial skipping %4i bytes p.%4i\n", \
+ w3/multiplier, mad);
+ pad += (w3 / multiplier); rad -= (w3 / multiplier);
+}
+isuy = true;
+mask = 0; rump = 0; caches = 0;
+
+cz = 0;
+while (cz < wz) {
+ /*-------------------------------------------------------------------*/
+ /*
+ ** PROCESS ONE LINE OF FRAME AT FULL RESOLUTION:
+ ** READ w2 BYTES FROM FIELD BUFFER,
+ ** WRITE w3 BYTES TO FRAME BUFFER
+ **/
+ /*-------------------------------------------------------------------*/
+ if (false == decimatepixel) {
+ over = w2;
+ do {
+ much = over; more = 0; margin = 0; mask = 0x00;
+ if (rex < much)
+ much = rex;
+ rump = 0;
+
+ if (much % 2) {
+ SAY("MISTAKE: much is odd\n");
+ return -EFAULT;
+ }
+
+ more = (bytesperpixel * \
+ much) / 2;
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+ if (1 < bytesperpixel) {
+ if ((rad * \
+ 2) < (much * \
+ bytesperpixel)) {
+ /*
+ ** INJUDICIOUS ALTERATION OF THIS
+ ** BLOCK WILL CAUSE BREAKAGE.
+ ** BEWARE.
+ **/
+ rad2 = rad + bytesperpixel - 1;
+ much = ((((2 * \
+ rad2)/bytesperpixel)/2) * 2);
+ rump = ((bytesperpixel * \
+ much) / 2) - rad;
+ more = rad;
+ }
+ mask = (__u8)rump;
+ margin = 0;
+ if (much == rex) {
+ mask |= 0x04;
+ if ((mex + 1) < FIELD_BUFFER_SIZE/ \
+ PAGE_SIZE) {
+ margin = *((__u8 *)(peasycap->\
+ field_buffer\
+ [kex][mex + 1].pgo));
+ } else
+ mask |= 0x08;
+ }
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+ } else {
+ SAY("MISTAKE: %i=bytesperpixel\n", \
+ bytesperpixel);
+ return -EFAULT;
+ }
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+ if (rump)
+ caches++;
+
+ rc = redaub(peasycap, pad, pex, much, more, \
+ mask, margin, isuy);
+ if (0 > rc) {
+ SAY("ERROR: redaub() failed\n");
+ return -EFAULT;
+ }
+ if (much % 4) {
+ if (isuy)
+ isuy = false;
+ else
+ isuy = true;
+ }
+ over -= much; cz += much;
+ pex += much; rex -= much;
+ if (!rex) {
+ mex++;
+ pex = peasycap->field_buffer[kex][mex].pgo;
+ rex = PAGE_SIZE;
+ }
+ pad += more;
+ rad -= more;
+ if (!rad) {
+ mad++;
+ pad = peasycap->frame_buffer[kad][mad].pgo;
+ rad = PAGE_SIZE;
+ if (rump) {
+ pad += rump;
+ rad -= rump;
+ }
+ }
+ } while (over);
+/*---------------------------------------------------------------------------*/
+/*
+ * SKIP w3 BYTES IN TARGET FRAME BUFFER,
+ * UNLESS IT IS THE LAST LINE OF AN ODD FRAME
+ */
+/*---------------------------------------------------------------------------*/
+ if (((false == odd) || (cz != wz))&&(false == offerfields)) {
+ over = w3;
+ do {
+ if (!rad) {
+ mad++;
+ pad = peasycap->frame_buffer\
+ [kad][mad].pgo;
+ rad = PAGE_SIZE;
+ }
+ more = over;
+ if (rad < more)
+ more = rad;
+ over -= more;
+ pad += more;
+ rad -= more;
+ } while (over);
+ }
+/*---------------------------------------------------------------------------*/
+/*
+ * PROCESS ONE LINE OF FRAME AT REDUCED RESOLUTION:
+ * ONLY IF false==odd,
+ * READ w2 BYTES FROM FIELD BUFFER,
+ * WRITE w3 / 2 BYTES TO FRAME BUFFER
+ */
+/*---------------------------------------------------------------------------*/
+ } else if (false == odd) {
+ over = w2;
+ do {
+ much = over; more = 0; margin = 0; mask = 0x00;
+ if (rex < much)
+ much = rex;
+ rump = 0;
+
+ if (much % 2) {
+ SAY("MISTAKE: much is odd\n");
+ return -EFAULT;
+ }
+
+ more = (bytesperpixel * \
+ much) / 4;
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+ if (1 < bytesperpixel) {
+ if ((rad * 4) < (much * \
+ bytesperpixel)) {
+ /*
+ ** INJUDICIOUS ALTERATION OF THIS
+ ** BLOCK WILL CAUSE BREAKAGE.
+ ** BEWARE.
+ **/
+ rad2 = rad + bytesperpixel - 1;
+ much = ((((2 * rad2)/bytesperpixel)/2)\
+ * 4);
+ rump = ((bytesperpixel * \
+ much) / 4) - rad;
+ more = rad;
+ }
+ mask = (__u8)rump;
+ margin = 0;
+ if (much == rex) {
+ mask |= 0x04;
+ if ((mex + 1) < FIELD_BUFFER_SIZE/ \
+ PAGE_SIZE) {
+ margin = *((__u8 *)(peasycap->\
+ field_buffer\
+ [kex][mex + 1].pgo));
+ }
+ else
+ mask |= 0x08;
+ }
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+ } else {
+ SAY("MISTAKE: %i=bytesperpixel\n", \
+ bytesperpixel);
+ return -EFAULT;
+ }
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+ if (rump)
+ caches++;
+
+ rc = redaub(peasycap, pad, pex, much, more, \
+ mask, margin, isuy);
+ if (0 > rc) {
+ SAY("ERROR: redaub() failed\n");
+ return -EFAULT;
+ }
+ over -= much; cz += much;
+ pex += much; rex -= much;
+ if (!rex) {
+ mex++;
+ pex = peasycap->field_buffer[kex][mex].pgo;
+ rex = PAGE_SIZE;
+ }
+ pad += more;
+ rad -= more;
+ if (!rad) {
+ mad++;
+ pad = peasycap->frame_buffer[kad][mad].pgo;
+ rad = PAGE_SIZE;
+ if (rump) {
+ pad += rump;
+ rad -= rump;
+ }
+ }
+ } while (over);
+/*---------------------------------------------------------------------------*/
+/*
+ * OTHERWISE JUST
+ * READ w2 BYTES FROM FIELD BUFFER AND DISCARD THEM
+ */
+/*---------------------------------------------------------------------------*/
+ } else {
+ over = w2;
+ do {
+ if (!rex) {
+ mex++;
+ pex = peasycap->field_buffer[kex][mex].pgo;
+ rex = PAGE_SIZE;
+ }
+ much = over;
+ if (rex < much)
+ much = rex;
+ over -= much;
+ cz += much;
+ pex += much;
+ rex -= much;
+ } while (over);
+ }
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * SANITY CHECKS
+ */
+/*---------------------------------------------------------------------------*/
+c2 = (mex + 1)*PAGE_SIZE - rex;
+if (cz != c2)
+ SAY("ERROR: discrepancy %i in bytes read\n", c2 - cz);
+c3 = (mad + 1)*PAGE_SIZE - rad;
+
+if (false == decimatepixel) {
+ if (bytesperpixel * \
+ cz != c3) \
+ SAY("ERROR: discrepancy %i in bytes written\n", \
+ c3 - (bytesperpixel * \
+ cz));
+} else {
+ if (false == odd) {
+ if (bytesperpixel * \
+ cz != (4 * c3))
+ SAY("ERROR: discrepancy %i in bytes written\n", \
+ (2*c3)-(bytesperpixel * \
+ cz));
+ } else {
+ if (0 != c3)
+ SAY("ERROR: discrepancy %i " \
+ "in bytes written\n", c3);
+ }
+}
+if (rump)
+ SAY("ERROR: undischarged cache at end of line in frame buffer\n");
+
+JOT(8, "===== field2frame(): %i bytes --> %i bytes (incl skip)\n", c2, c3);
+JOT(8, "===== field2frame(): %i=mad %i=rad\n", mad, rad);
+
+if (true == odd)
+ JOT(8, "+++++ field2frame(): frame buffer %i is full\n", kad);
+
+if (peasycap->field_read == peasycap->field_fill)
+ SAY("WARNING: on exit, filling field buffer %i\n", \
+ peasycap->field_read);
+/*---------------------------------------------------------------------------*/
+/*
+ * CALCULATE VIDEO STREAMING RATE
+ */
+/*---------------------------------------------------------------------------*/
+do_gettimeofday(&timeval);
+if (timeval0.tv_sec) {
+ below = ((long long int)(1000000)) * \
+ ((long long int)(timeval.tv_sec - timeval0.tv_sec)) + \
+ (long long int)(timeval.tv_usec - timeval0.tv_usec);
+ above = (long long int)1000000;
+
+ sdr = signed_div(above, below);
+ above = sdr.quotient;
+ remainder = (__u32)sdr.remainder;
+
+ JOT(8, "video streaming at %3lli.%03i fields per second\n", above, \
+ (remainder/1000));
+}
+timeval0 = timeval;
+
+if (caches)
+ JOT(8, "%i=caches\n", caches);
+return 0;
+}
+/*****************************************************************************/
+struct signed_div_result
+signed_div(long long int above, long long int below)
+{
+struct signed_div_result sdr;
+
+if (((0 <= above) && (0 <= below)) || ((0 > above) && (0 > below))) {
+ sdr.remainder = (unsigned long long int) do_div(above, below);
+ sdr.quotient = (long long int) above;
+} else {
+ if (0 > above)
+ above = -above;
+ if (0 > below)
+ below = -below;
+ sdr.remainder = (unsigned long long int) do_div(above, below);
+ sdr.quotient = -((long long int) above);
+}
+return sdr;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * DECIMATION AND COLOURSPACE CONVERSION.
+ *
+ * THIS ROUTINE REQUIRES THAT ALL THE DATA TO BE READ RESIDES ON ONE PAGE
+ * AND THAT ALL THE DATA TO BE WRITTEN RESIDES ON ONE (DIFFERENT) PAGE.
+ * THE CALLING ROUTINE MUST ENSURE THAT THIS REQUIREMENT IS MET, AND MUST
+ * ALSO ENSURE THAT much IS EVEN.
+ *
+ * much BYTES ARE READ, AT LEAST (bytesperpixel * much)/2 BYTES ARE WRITTEN
+ * IF THERE IS NO DECIMATION, HALF THIS AMOUNT IF THERE IS DECIMATION.
+ *
+ * mask IS ZERO WHEN NO SPECIAL BEHAVIOUR REQUIRED. OTHERWISE IT IS SET THUS:
+ * 0x03 & mask = number of bytes to be written to cache instead of to
+ * frame buffer
+ * 0x04 & mask => use argument margin to set the chrominance for last pixel
+ * 0x08 & mask => do not set the chrominance for last pixel
+ *
+ * YUV to RGB CONVERSION IS (OR SHOULD BE) ITU-R BT 601.
+ *
+ * THERE IS A LOT OF CODE REPETITION IN THIS ROUTINE IN ORDER TO AVOID
+ * INEFFICIENT SWITCHING INSIDE INNER LOOPS. REARRANGING THE LOGIC TO
+ * REDUCE CODE LENGTH WILL GENERALLY IMPAIR RUNTIME PERFORMANCE. BEWARE.
+ */
+/*---------------------------------------------------------------------------*/
+int
+redaub(struct easycap *peasycap, void *pad, void *pex, int much, int more, \
+ __u8 mask, __u8 margin, bool isuy)
+{
+static __s32 ay[256], bu[256], rv[256], gu[256], gv[256];
+static __u8 cache[8], *pcache;
+__u8 r, g, b, y, u, v, c, *p2, *p3, *pz, *pr;
+int bytesperpixel;
+bool byteswaporder, decimatepixel, last;
+int j, rump;
+__s32 s32;
+
+if (much % 2) {
+ SAY("MISTAKE: much is odd\n");
+ return -EFAULT;
+}
+bytesperpixel = peasycap->bytesperpixel;
+byteswaporder = peasycap->byteswaporder;
+decimatepixel = peasycap->decimatepixel;
+
+/*---------------------------------------------------------------------------*/
+if (!bu[255]) {
+ for (j = 0; j < 112; j++) {
+ s32 = (0xFF00 & (453 * j)) >> 8;
+ bu[j + 128] = s32; bu[127 - j] = -s32;
+ s32 = (0xFF00 & (359 * j)) >> 8;
+ rv[j + 128] = s32; rv[127 - j] = -s32;
+ s32 = (0xFF00 & (88 * j)) >> 8;
+ gu[j + 128] = s32; gu[127 - j] = -s32;
+ s32 = (0xFF00 & (183 * j)) >> 8;
+ gv[j + 128] = s32; gv[127 - j] = -s32;
+ }
+ for (j = 0; j < 16; j++) {
+ bu[j] = bu[16]; rv[j] = rv[16];
+ gu[j] = gu[16]; gv[j] = gv[16];
+ }
+ for (j = 240; j < 256; j++) {
+ bu[j] = bu[239]; rv[j] = rv[239];
+ gu[j] = gu[239]; gv[j] = gv[239];
+ }
+ for (j = 16; j < 236; j++)
+ ay[j] = j;
+ for (j = 0; j < 16; j++)
+ ay[j] = ay[16];
+ for (j = 236; j < 256; j++)
+ ay[j] = ay[235];
+ JOT(8, "lookup tables are prepared\n");
+}
+if ((__u8 *)NULL == pcache)
+ pcache = &cache[0];
+/*---------------------------------------------------------------------------*/
+/*
+ * TRANSFER CONTENTS OF CACHE TO THE FRAME BUFFER
+ */
+/*---------------------------------------------------------------------------*/
+if (!pcache) {
+ SAY("MISTAKE: pcache is NULL\n");
+ return -EFAULT;
+}
+
+if (pcache != &cache[0])
+ JOT(16, "cache has %i bytes\n", (int)(pcache - &cache[0]));
+p2 = &cache[0];
+p3 = (__u8 *)pad - (int)(pcache - &cache[0]);
+while (p2 < pcache) {
+ *p3++ = *p2; p2++;
+}
+pcache = &cache[0];
+if (p3 != pad) {
+ SAY("MISTAKE: pointer misalignment\n");
+ return -EFAULT;
+}
+/*---------------------------------------------------------------------------*/
+rump = (int)(0x03 & mask);
+u = 0; v = 0;
+p2 = (__u8 *)pex; pz = p2 + much; pr = p3 + more; last = false;
+p2++;
+
+if (true == isuy)
+ u = *(p2 - 1);
+else
+ v = *(p2 - 1);
+
+if (rump)
+ JOT(16, "%4i=much %4i=more %i=rump\n", much, more, rump);
+
+/*---------------------------------------------------------------------------*/
+switch (bytesperpixel) {
+case 2: {
+ if (false == decimatepixel) {
+ memcpy(pad, pex, (size_t)much);
+ if (false == byteswaporder)
+ /*---------------------------------------------------*/
+ /*
+ ** UYVY
+ */
+ /*---------------------------------------------------*/
+ return 0;
+ else {
+ /*---------------------------------------------------*/
+ /*
+ ** YUYV
+ */
+ /*---------------------------------------------------*/
+ p3 = (__u8 *)pad; pz = p3 + much;
+ while (pz > p3) {
+ c = *p3;
+ *p3 = *(p3 + 1);
+ *(p3 + 1) = c;
+ p3 += 2;
+ }
+ return 0;
+ }
+ } else {
+ if (false == byteswaporder) {
+ /*---------------------------------------------------*/
+ /*
+ ** UYVY DECIMATED
+ */
+ /*---------------------------------------------------*/
+ p2 = (__u8 *)pex; p3 = (__u8 *)pad; pz = p2 + much;
+ while (pz > p2) {
+ *p3 = *p2;
+ *(p3 + 1) = *(p2 + 1);
+ *(p3 + 2) = *(p2 + 2);
+ *(p3 + 3) = *(p2 + 3);
+ p3 += 4; p2 += 8;
+ }
+ return 0;
+ } else {
+ /*---------------------------------------------------*/
+ /*
+ ** YUYV DECIMATED
+ **/
+ /*---------------------------------------------------*/
+ p2 = (__u8 *)pex; p3 = (__u8 *)pad; pz = p2 + much;
+ while (pz > p2) {
+ *p3 = *(p2 + 1);
+ *(p3 + 1) = *p2;
+ *(p3 + 2) = *(p2 + 3);
+ *(p3 + 3) = *(p2 + 2);
+ p3 += 4; p2 += 8;
+ }
+ return 0;
+ }
+ }
+ break;
+ }
+case 3:
+ {
+ if (false == decimatepixel) {
+ if (false == byteswaporder) {
+ /*---------------------------------------------------*/
+ /*
+ ** RGB
+ **/
+ /*---------------------------------------------------*/
+ while (pz > p2) {
+ if (pr <= (p3 + bytesperpixel))
+ last = true;
+ else
+ last = false;
+ y = *p2;
+ if ((true == last) && (0x0C & mask)) {
+ if (0x04 & mask) {
+ if (true == isuy)
+ v = margin;
+ else
+ u = margin;
+ } else
+ if (0x08 & mask)
+ ;
+ } else {
+ if (true == isuy)
+ v = *(p2 + 1);
+ else
+ u = *(p2 + 1);
+ }
+
+ s32 = ay[(int)y] + rv[(int)v];
+ r = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] - gu[(int)u] - gv[(int)v];
+ g = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] + bu[(int)u];
+ b = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+
+ if ((true == last) && rump) {
+ pcache = &cache[0];
+ switch (bytesperpixel - rump) {
+ case 1: {
+ *p3 = r;
+ *pcache++ = g;
+ *pcache++ = b;
+ break;
+ }
+ case 2: {
+ *p3 = r;
+ *(p3 + 1) = g;
+ *pcache++ = b;
+ break;
+ }
+ default: {
+ SAY("MISTAKE: %i=rump\n", \
+ bytesperpixel - rump);
+ return -EFAULT;
+ }
+ }
+ } else {
+ *p3 = r;
+ *(p3 + 1) = g;
+ *(p3 + 2) = b;
+ }
+ p2 += 2;
+ if (true == isuy)
+ isuy = false;
+ else
+ isuy = true;
+ p3 += bytesperpixel;
+ }
+ return 0;
+ } else {
+ /*---------------------------------------------------*/
+ /*
+ ** BGR
+ */
+ /*---------------------------------------------------*/
+ while (pz > p2) {
+ if (pr <= (p3 + bytesperpixel))
+ last = true;
+ else
+ last = false;
+ y = *p2;
+ if ((true == last) && (0x0C & mask)) {
+ if (0x04 & mask) {
+ if (true == isuy)
+ v = margin;
+ else
+ u = margin;
+ }
+ else
+ if (0x08 & mask)
+ ;
+ } else {
+ if (true == isuy)
+ v = *(p2 + 1);
+ else
+ u = *(p2 + 1);
+ }
+
+ s32 = ay[(int)y] + rv[(int)v];
+ r = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] - gu[(int)u] - gv[(int)v];
+ g = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] + bu[(int)u];
+ b = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+
+ if ((true == last) && rump) {
+ pcache = &cache[0];
+ switch (bytesperpixel - rump) {
+ case 1: {
+ *p3 = b;
+ *pcache++ = g;
+ *pcache++ = r;
+ break;
+ }
+ case 2: {
+ *p3 = b;
+ *(p3 + 1) = g;
+ *pcache++ = r;
+ break;
+ }
+ default: {
+ SAY("MISTAKE: %i=rump\n", \
+ bytesperpixel - rump);
+ return -EFAULT;
+ }
+ }
+ } else {
+ *p3 = b;
+ *(p3 + 1) = g;
+ *(p3 + 2) = r;
+ }
+ p2 += 2;
+ if (true == isuy)
+ isuy = false;
+ else
+ isuy = true;
+ p3 += bytesperpixel;
+ }
+ }
+ return 0;
+ } else {
+ if (false == byteswaporder) {
+ /*---------------------------------------------------*/
+ /*
+ ** RGB DECIMATED
+ */
+ /*---------------------------------------------------*/
+ while (pz > p2) {
+ if (pr <= (p3 + bytesperpixel))
+ last = true;
+ else
+ last = false;
+ y = *p2;
+ if ((true == last) && (0x0C & mask)) {
+ if (0x04 & mask) {
+ if (true == isuy)
+ v = margin;
+ else
+ u = margin;
+ } else
+ if (0x08 & mask)
+ ;
+ } else {
+ if (true == isuy)
+ v = *(p2 + 1);
+ else
+ u = *(p2 + 1);
+ }
+
+ if (true == isuy) {
+ s32 = ay[(int)y] + rv[(int)v];
+ r = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] - gu[(int)u] - \
+ gv[(int)v];
+ g = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] + bu[(int)u];
+ b = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+
+ if ((true == last) && rump) {
+ pcache = &cache[0];
+ switch (bytesperpixel - rump) {
+ case 1: {
+ *p3 = r;
+ *pcache++ = g;
+ *pcache++ = b;
+ break;
+ }
+ case 2: {
+ *p3 = r;
+ *(p3 + 1) = g;
+ *pcache++ = b;
+ break;
+ }
+ default: {
+ SAY("MISTAKE: " \
+ "%i=rump\n", \
+ bytesperpixel - rump);
+ return -EFAULT;
+ }
+ }
+ } else {
+ *p3 = r;
+ *(p3 + 1) = g;
+ *(p3 + 2) = b;
+ }
+ isuy = false;
+ p3 += bytesperpixel;
+ } else {
+ isuy = true;
+ }
+ p2 += 2;
+ }
+ return 0;
+ } else {
+ /*---------------------------------------------------*/
+ /*
+ * BGR DECIMATED
+ */
+ /*---------------------------------------------------*/
+ while (pz > p2) {
+ if (pr <= (p3 + bytesperpixel))
+ last = true;
+ else
+ last = false;
+ y = *p2;
+ if ((true == last) && (0x0C & mask)) {
+ if (0x04 & mask) {
+ if (true == isuy)
+ v = margin;
+ else
+ u = margin;
+ } else
+ if (0x08 & mask)
+ ;
+ } else {
+ if (true == isuy)
+ v = *(p2 + 1);
+ else
+ u = *(p2 + 1);
+ }
+
+ if (true == isuy) {
+
+ s32 = ay[(int)y] + rv[(int)v];
+ r = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] - gu[(int)u] - \
+ gv[(int)v];
+ g = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] + bu[(int)u];
+ b = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+
+ if ((true == last) && rump) {
+ pcache = &cache[0];
+ switch (bytesperpixel - rump) {
+ case 1: {
+ *p3 = b;
+ *pcache++ = g;
+ *pcache++ = r;
+ break;
+ }
+ case 2: {
+ *p3 = b;
+ *(p3 + 1) = g;
+ *pcache++ = r;
+ break;
+ }
+ default: {
+ SAY("MISTAKE: " \
+ "%i=rump\n", \
+ bytesperpixel - rump);
+ return -EFAULT;
+ }
+ }
+ } else {
+ *p3 = b;
+ *(p3 + 1) = g;
+ *(p3 + 2) = r;
+ }
+ isuy = false;
+ p3 += bytesperpixel;
+ }
+ else
+ isuy = true;
+ p2 += 2;
+ }
+ return 0;
+ }
+ }
+ break;
+ }
+case 4:
+ {
+ if (false == decimatepixel) {
+ if (false == byteswaporder) {
+ /*---------------------------------------------------*/
+ /*
+ ** RGBA
+ */
+ /*---------------------------------------------------*/
+ while (pz > p2) {
+ if (pr <= (p3 + bytesperpixel))
+ last = true;
+ else
+ last = false;
+ y = *p2;
+ if ((true == last) && (0x0C & mask)) {
+ if (0x04 & mask) {
+ if (true == isuy)
+ v = margin;
+ else
+ u = margin;
+ } else
+ if (0x08 & mask)
+ ;
+ } else {
+ if (true == isuy)
+ v = *(p2 + 1);
+ else
+ u = *(p2 + 1);
+ }
+
+ s32 = ay[(int)y] + rv[(int)v];
+ r = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] - gu[(int)u] - gv[(int)v];
+ g = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] + bu[(int)u];
+ b = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+
+ if ((true == last) && rump) {
+ pcache = &cache[0];
+ switch (bytesperpixel - rump) {
+ case 1: {
+ *p3 = r;
+ *pcache++ = g;
+ *pcache++ = b;
+ *pcache++ = 0;
+ break;
+ }
+ case 2: {
+ *p3 = r;
+ *(p3 + 1) = g;
+ *pcache++ = b;
+ *pcache++ = 0;
+ break;
+ }
+ case 3: {
+ *p3 = r;
+ *(p3 + 1) = g;
+ *(p3 + 2) = b;
+ *pcache++ = 0;
+ break;
+ }
+ default: {
+ SAY("MISTAKE: %i=rump\n", \
+ bytesperpixel - rump);
+ return -EFAULT;
+ }
+ }
+ } else {
+ *p3 = r;
+ *(p3 + 1) = g;
+ *(p3 + 2) = b;
+ *(p3 + 3) = 0;
+ }
+ p2 += 2;
+ if (true == isuy)
+ isuy = false;
+ else
+ isuy = true;
+ p3 += bytesperpixel;
+ }
+ return 0;
+ } else {
+ /*---------------------------------------------------*/
+ /*
+ ** BGRA
+ */
+ /*---------------------------------------------------*/
+ while (pz > p2) {
+ if (pr <= (p3 + bytesperpixel))
+ last = true;
+ else
+ last = false;
+ y = *p2;
+ if ((true == last) && (0x0C & mask)) {
+ if (0x04 & mask) {
+ if (true == isuy)
+ v = margin;
+ else
+ u = margin;
+ } else
+ if (0x08 & mask)
+ ;
+ } else {
+ if (true == isuy)
+ v = *(p2 + 1);
+ else
+ u = *(p2 + 1);
+ }
+
+ s32 = ay[(int)y] + rv[(int)v];
+ r = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] - gu[(int)u] - gv[(int)v];
+ g = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] + bu[(int)u];
+ b = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+
+ if ((true == last) && rump) {
+ pcache = &cache[0];
+ switch (bytesperpixel - rump) {
+ case 1: {
+ *p3 = b;
+ *pcache++ = g;
+ *pcache++ = r;
+ *pcache++ = 0;
+ break;
+ }
+ case 2: {
+ *p3 = b;
+ *(p3 + 1) = g;
+ *pcache++ = r;
+ *pcache++ = 0;
+ break;
+ }
+ case 3: {
+ *p3 = b;
+ *(p3 + 1) = g;
+ *(p3 + 2) = r;
+ *pcache++ = 0;
+ break;
+ }
+ default: {
+ SAY("MISTAKE: %i=rump\n", \
+ bytesperpixel - rump);
+ return -EFAULT;
+ }
+ }
+ } else {
+ *p3 = b;
+ *(p3 + 1) = g;
+ *(p3 + 2) = r;
+ *(p3 + 3) = 0;
+ }
+ p2 += 2;
+ if (true == isuy)
+ isuy = false;
+ else
+ isuy = true;
+ p3 += bytesperpixel;
+ }
+ }
+ return 0;
+ } else {
+ if (false == byteswaporder) {
+ /*---------------------------------------------------*/
+ /*
+ ** RGBA DECIMATED
+ */
+ /*---------------------------------------------------*/
+ while (pz > p2) {
+ if (pr <= (p3 + bytesperpixel))
+ last = true;
+ else
+ last = false;
+ y = *p2;
+ if ((true == last) && (0x0C & mask)) {
+ if (0x04 & mask) {
+ if (true == isuy)
+ v = margin;
+ else
+ u = margin;
+ } else
+ if (0x08 & mask)
+ ;
+ } else {
+ if (true == isuy)
+ v = *(p2 + 1);
+ else
+ u = *(p2 + 1);
+ }
+
+ if (true == isuy) {
+
+ s32 = ay[(int)y] + rv[(int)v];
+ r = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] - gu[(int)u] - \
+ gv[(int)v];
+ g = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] + bu[(int)u];
+ b = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+
+ if ((true == last) && rump) {
+ pcache = &cache[0];
+ switch (bytesperpixel - rump) {
+ case 1: {
+ *p3 = r;
+ *pcache++ = g;
+ *pcache++ = b;
+ *pcache++ = 0;
+ break;
+ }
+ case 2: {
+ *p3 = r;
+ *(p3 + 1) = g;
+ *pcache++ = b;
+ *pcache++ = 0;
+ break;
+ }
+ case 3: {
+ *p3 = r;
+ *(p3 + 1) = g;
+ *(p3 + 2) = b;
+ *pcache++ = 0;
+ break;
+ }
+ default: {
+ SAY("MISTAKE: " \
+ "%i=rump\n", \
+ bytesperpixel - \
+ rump);
+ return -EFAULT;
+ }
+ }
+ } else {
+ *p3 = r;
+ *(p3 + 1) = g;
+ *(p3 + 2) = b;
+ *(p3 + 3) = 0;
+ }
+ isuy = false;
+ p3 += bytesperpixel;
+ } else
+ isuy = true;
+ p2 += 2;
+ }
+ return 0;
+ } else {
+ /*---------------------------------------------------*/
+ /*
+ ** BGRA DECIMATED
+ */
+ /*---------------------------------------------------*/
+ while (pz > p2) {
+ if (pr <= (p3 + bytesperpixel))
+ last = true;
+ else
+ last = false;
+ y = *p2;
+ if ((true == last) && (0x0C & mask)) {
+ if (0x04 & mask) {
+ if (true == isuy)
+ v = margin;
+ else
+ u = margin;
+ } else
+ if (0x08 & mask)
+ ;
+ } else {
+ if (true == isuy)
+ v = *(p2 + 1);
+ else
+ u = *(p2 + 1);
+ }
+
+ if (true == isuy) {
+ s32 = ay[(int)y] + rv[(int)v];
+ r = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] - gu[(int)u] - \
+ gv[(int)v];
+ g = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+ s32 = ay[(int)y] + bu[(int)u];
+ b = (255 < s32) ? 255 : ((0 > s32) ? \
+ 0 : (__u8)s32);
+
+ if ((true == last) && rump) {
+ pcache = &cache[0];
+ switch (bytesperpixel - rump) {
+ case 1: {
+ *p3 = b;
+ *pcache++ = g;
+ *pcache++ = r;
+ *pcache++ = 0;
+ break;
+ }
+ case 2: {
+ *p3 = b;
+ *(p3 + 1) = g;
+ *pcache++ = r;
+ *pcache++ = 0;
+ break;
+ }
+ case 3: {
+ *p3 = b;
+ *(p3 + 1) = g;
+ *(p3 + 2) = r;
+ *pcache++ = 0;
+ break;
+ }
+ default: {
+ SAY("MISTAKE: " \
+ "%i=rump\n", \
+ bytesperpixel - rump);
+ return -EFAULT;
+ }
+ }
+ } else {
+ *p3 = b;
+ *(p3 + 1) = g;
+ *(p3 + 2) = r;
+ *(p3 + 3) = 0;
+ }
+ isuy = false;
+ p3 += bytesperpixel;
+ } else
+ isuy = true;
+ p2 += 2;
+ }
+ return 0;
+ }
+ }
+ break;
+ }
+default: {
+ SAY("MISTAKE: %i=bytesperpixel\n", bytesperpixel);
+ return -EFAULT;
+ }
+}
+return 0;
+}
+/*****************************************************************************/
+void
+debrief(struct easycap *peasycap)
+{
+if ((struct usb_device *)NULL != peasycap->pusb_device) {
+ check_stk(peasycap->pusb_device);
+ check_saa(peasycap->pusb_device);
+ sayreadonly(peasycap);
+ SAY("%i=peasycap->field_fill\n", peasycap->field_fill);
+ SAY("%i=peasycap->field_read\n", peasycap->field_read);
+ SAY("%i=peasycap->frame_fill\n", peasycap->frame_fill);
+ SAY("%i=peasycap->frame_read\n", peasycap->frame_read);
+}
+return;
+}
+/*****************************************************************************/
+void
+sayreadonly(struct easycap *peasycap)
+{
+static int done;
+int got00, got1F, got60, got61, got62;
+
+if ((!done) && ((struct usb_device *)NULL != peasycap->pusb_device)) {
+ done = 1;
+ got00 = read_saa(peasycap->pusb_device, 0x00);
+ got1F = read_saa(peasycap->pusb_device, 0x1F);
+ got60 = read_saa(peasycap->pusb_device, 0x60);
+ got61 = read_saa(peasycap->pusb_device, 0x61);
+ got62 = read_saa(peasycap->pusb_device, 0x62);
+ SAY("0x%02X=reg0x00 0x%02X=reg0x1F\n", got00, got1F);
+ SAY("0x%02X=reg0x60 0x%02X=reg0x61 0x%02X=reg0x62\n", \
+ got60, got61, got62);
+}
+return;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * SEE CORBET ET AL. "LINUX DEVICE DRIVERS", 3rd EDITION, PAGES 430-434
+ */
+/*---------------------------------------------------------------------------*/
+int easycap_mmap(struct file *file, struct vm_area_struct *pvma)
+{
+
+JOT(8, "\n");
+
+pvma->vm_ops = &easycap_vm_ops;
+pvma->vm_flags |= VM_RESERVED;
+if (NULL != file)
+ pvma->vm_private_data = file->private_data;
+easycap_vma_open(pvma);
+return 0;
+}
+/*****************************************************************************/
+void
+easycap_vma_open(struct vm_area_struct *pvma)
+{
+struct easycap *peasycap;
+
+peasycap = pvma->vm_private_data;
+if (NULL != peasycap)
+ peasycap->vma_many++;
+
+JOT(8, "%i=peasycap->vma_many\n", peasycap->vma_many);
+
+return;
+}
+/*****************************************************************************/
+void
+easycap_vma_close(struct vm_area_struct *pvma)
+{
+struct easycap *peasycap;
+
+peasycap = pvma->vm_private_data;
+if (NULL != peasycap) {
+ peasycap->vma_many--;
+ JOT(8, "%i=peasycap->vma_many\n", peasycap->vma_many);
+}
+return;
+}
+/*****************************************************************************/
+int
+easycap_vma_fault(struct vm_area_struct *pvma, struct vm_fault *pvmf)
+{
+int k, m, retcode;
+void *pbuf;
+struct page *page;
+struct easycap *peasycap;
+
+retcode = VM_FAULT_NOPAGE;
+pbuf = (void *)NULL;
+page = (struct page *)NULL;
+
+if (NULL == pvma) {
+ SAY("pvma is NULL\n");
+ return retcode;
+}
+if (NULL == pvmf) {
+ SAY("pvmf is NULL\n");
+ return retcode;
+}
+
+k = (pvmf->pgoff) / (FRAME_BUFFER_SIZE/PAGE_SIZE);
+m = (pvmf->pgoff) % (FRAME_BUFFER_SIZE/PAGE_SIZE);
+
+if (!m)
+ JOT(4, "%4i=k, %4i=m\n", k, m);
+else
+ JOT(16, "%4i=k, %4i=m\n", k, m);
+
+if ((0 > k) || (FRAME_BUFFER_MANY <= k)) {
+ SAY("ERROR: buffer index %i out of range\n", k);
+ return retcode;
+}
+if ((0 > m) || (FRAME_BUFFER_SIZE/PAGE_SIZE <= m)) {
+ SAY("ERROR: page number %i out of range\n", m);
+ return retcode;
+}
+peasycap = pvma->vm_private_data;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return retcode;
+}
+mutex_lock(&(peasycap->mutex_mmap_video[0]));
+/*---------------------------------------------------------------------------*/
+pbuf = peasycap->frame_buffer[k][m].pgo;
+if (NULL == pbuf) {
+ SAY("ERROR: pbuf is NULL\n");
+ goto finish;
+}
+page = virt_to_page(pbuf);
+if (NULL == page) {
+ SAY("ERROR: page is NULL\n");
+ goto finish;
+}
+get_page(page);
+/*---------------------------------------------------------------------------*/
+finish:
+mutex_unlock(&(peasycap->mutex_mmap_video[0]));
+if (NULL == page) {
+ SAY("ERROR: page is NULL after get_page(page)\n");
+} else {
+ pvmf->page = page;
+ retcode = VM_FAULT_MINOR;
+}
+return retcode;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * ON COMPLETION OF A VIDEO URB ITS DATA IS COPIED TO THE FIELD BUFFERS
+ * PROVIDED peasycap->video_idle IS ZER0. REGARDLESS OF THIS BEING TRUE,
+ * IT IS RESUBMITTED PROVIDED peasycap->video_isoc_streaming IS NOT ZERO.
+ *
+ * THIS FUNCTION IS AN INTERRUPT SERVICE ROUTINE AND MUST NOT SLEEP.
+ *
+ * INFORMATION ABOUT THE VALIDITY OF THE CONTENTS OF THE FIELD BUFFER ARE
+ * STORED IN THE TWO-BYTE STATUS PARAMETER
+ * peasycap->field_buffer[peasycap->field_fill][0].kount
+ * NOTICE THAT THE INFORMATION IS STORED ONLY WITH PAGE 0 OF THE FIELD BUFFER.
+ *
+ * THE LOWER BYTE CONTAINS THE FIELD PARITY BYTE FURNISHED BY THE SAA7113H
+ * CHIP.
+ *
+ * THE UPPER BYTE IS ZERO IF NO PROBLEMS, OTHERWISE:
+ * 0 != (kount & 0x8000) => AT LEAST ONE URB COMPLETED WITH ERRORS
+ * 0 != (kount & 0x4000) => BUFFER HAS TOO MUCH DATA
+ * 0 != (kount & 0x2000) => BUFFER HAS NOT ENOUGH DATA
+ * 0 != (kount & 0x0400) => FIELD WAS SUBMITTED BY BRIDGER ROUTINE
+ * 0 != (kount & 0x0200) => FIELD BUFFER NOT YET CHECKED
+ * 0 != (kount & 0x0100) => BUFFER HAS TWO EXTRA BYTES - WHY?
+ */
+/*---------------------------------------------------------------------------*/
+void
+easycap_complete(struct urb *purb)
+{
+static int mt;
+struct easycap *peasycap;
+struct data_buffer *pfield_buffer;
+char errbuf[16];
+int i, more, much, leap, rc, last;
+int videofieldamount;
+unsigned int override;
+int framestatus, framelength, frameactual, frameoffset;
+__u8 *pu;
+#if defined(BRIDGER)
+struct timeval timeval;
+long long usec;
+#endif /*BRIDGER*/
+
+if (NULL == purb) {
+ SAY("ERROR: easycap_complete(): purb is NULL\n");
+ return;
+}
+peasycap = purb->context;
+if (NULL == peasycap) {
+ SAY("ERROR: easycap_complete(): peasycap is NULL\n");
+ return;
+}
+
+if (peasycap->video_eof)
+ return;
+
+for (i = 0; i < VIDEO_ISOC_BUFFER_MANY; i++)
+ if (purb->transfer_buffer == peasycap->video_isoc_buffer[i].pgo)
+ break;
+JOT(16, "%2i=urb\n", i);
+last = peasycap->video_isoc_sequence;
+if ((((VIDEO_ISOC_BUFFER_MANY - 1) == last) && \
+ (0 != i)) || \
+ (((VIDEO_ISOC_BUFFER_MANY - 1) != last) && \
+ ((last + 1) != i))) {
+ SAY("ERROR: out-of-order urbs %i,%i ... continuing\n", last, i);
+}
+peasycap->video_isoc_sequence = i;
+
+if (peasycap->video_idle) {
+ JOT(16, "%i=video_idle %i=video_isoc_streaming\n", \
+ peasycap->video_idle, peasycap->video_isoc_streaming);
+ if (peasycap->video_isoc_streaming) {
+ rc = usb_submit_urb(purb, GFP_ATOMIC);
+ if (0 != rc) {
+ SAY("ERROR: while %i=video_idle, " \
+ "usb_submit_urb() failed with rc:\n", \
+ peasycap->video_idle);
+ switch (rc) {
+ case -ENOMEM: {
+ SAY("ENOMEM\n");
+ break;
+ }
+ case -ENODEV: {
+ SAY("ENODEV\n");
+ break;
+ }
+ case -ENXIO: {
+ SAY("ENXIO\n");
+ break;
+ }
+ case -EINVAL: {
+ SAY("EINVAL\n");
+ break;
+ }
+ case -EAGAIN: {
+ SAY("EAGAIN\n");
+ break;
+ }
+ case -EFBIG: {
+ SAY("EFBIG\n");
+ break;
+ }
+ case -EPIPE: {
+ SAY("EPIPE\n");
+ break;
+ }
+ case -EMSGSIZE: {
+ SAY("EMSGSIZE\n");
+ break;
+ }
+ case -ENOSPC: {
+ SAY("ENOSPC\n");
+ break;
+ }
+ default: {
+ SAY("0x%08X\n", rc);
+ break;
+ }
+ }
+ }
+ }
+return;
+}
+override = 0;
+/*---------------------------------------------------------------------------*/
+if (FIELD_BUFFER_MANY <= peasycap->field_fill) {
+ SAY("ERROR: bad peasycap->field_fill\n");
+ return;
+}
+if (purb->status) {
+ if ((-ESHUTDOWN == purb->status) || (-ENOENT == purb->status)) {
+ JOT(8, "urb status -ESHUTDOWN or -ENOENT\n");
+ return;
+ }
+
+ (peasycap->field_buffer[peasycap->field_fill][0].kount) |= 0x8000 ;
+ SAY("ERROR: bad urb status:\n");
+ switch (purb->status) {
+ case -EINPROGRESS: {
+ SAY("-EINPROGRESS\n"); break;
+ }
+ case -ENOSR: {
+ SAY("-ENOSR\n"); break;
+ }
+ case -EPIPE: {
+ SAY("-EPIPE\n"); break;
+ }
+ case -EOVERFLOW: {
+ SAY("-EOVERFLOW\n"); break;
+ }
+ case -EPROTO: {
+ SAY("-EPROTO\n"); break;
+ }
+ case -EILSEQ: {
+ SAY("-EILSEQ\n"); break;
+ }
+ case -ETIMEDOUT: {
+ SAY("-ETIMEDOUT\n"); break;
+ }
+ case -EMSGSIZE: {
+ SAY("-EMSGSIZE\n"); break;
+ }
+ case -EOPNOTSUPP: {
+ SAY("-EOPNOTSUPP\n"); break;
+ }
+ case -EPFNOSUPPORT: {
+ SAY("-EPFNOSUPPORT\n"); break;
+ }
+ case -EAFNOSUPPORT: {
+ SAY("-EAFNOSUPPORT\n"); break;
+ }
+ case -EADDRINUSE: {
+ SAY("-EADDRINUSE\n"); break;
+ }
+ case -EADDRNOTAVAIL: {
+ SAY("-EADDRNOTAVAIL\n"); break;
+ }
+ case -ENOBUFS: {
+ SAY("-ENOBUFS\n"); break;
+ }
+ case -EISCONN: {
+ SAY("-EISCONN\n"); break;
+ }
+ case -ENOTCONN: {
+ SAY("-ENOTCONN\n"); break;
+ }
+ case -ESHUTDOWN: {
+ SAY("-ESHUTDOWN\n"); break;
+ }
+ case -ENOENT: {
+ SAY("-ENOENT\n"); break;
+ }
+ case -ECONNRESET: {
+ SAY("-ECONNRESET\n"); break;
+ }
+ case -ENOSPC: {
+ SAY("ENOSPC\n"); break;
+ }
+ default: {
+ SAY("unknown error code 0x%08X\n", purb->status); break;
+ }
+ }
+/*---------------------------------------------------------------------------*/
+} else {
+ for (i = 0; i < purb->number_of_packets; i++) {
+ if (0 != purb->iso_frame_desc[i].status) {
+ (peasycap->field_buffer\
+ [peasycap->field_fill][0].kount) |= 0x8000 ;
+ switch (purb->iso_frame_desc[i].status) {
+ case 0: {
+ strcpy(&errbuf[0], "OK"); break;
+ }
+ case -ENOENT: {
+ strcpy(&errbuf[0], "-ENOENT"); break;
+ }
+ case -EINPROGRESS: {
+ strcpy(&errbuf[0], "-EINPROGRESS"); break;
+ }
+ case -EPROTO: {
+ strcpy(&errbuf[0], "-EPROTO"); break;
+ }
+ case -EILSEQ: {
+ strcpy(&errbuf[0], "-EILSEQ"); break;
+ }
+ case -ETIME: {
+ strcpy(&errbuf[0], "-ETIME"); break;
+ }
+ case -ETIMEDOUT: {
+ strcpy(&errbuf[0], "-ETIMEDOUT"); break;
+ }
+ case -EPIPE: {
+ strcpy(&errbuf[0], "-EPIPE"); break;
+ }
+ case -ECOMM: {
+ strcpy(&errbuf[0], "-ECOMM"); break;
+ }
+ case -ENOSR: {
+ strcpy(&errbuf[0], "-ENOSR"); break;
+ }
+ case -EOVERFLOW: {
+ strcpy(&errbuf[0], "-EOVERFLOW"); break;
+ }
+ case -EREMOTEIO: {
+ strcpy(&errbuf[0], "-EREMOTEIO"); break;
+ }
+ case -ENODEV: {
+ strcpy(&errbuf[0], "-ENODEV"); break;
+ }
+ case -EXDEV: {
+ strcpy(&errbuf[0], "-EXDEV"); break;
+ }
+ case -EINVAL: {
+ strcpy(&errbuf[0], "-EINVAL"); break;
+ }
+ case -ECONNRESET: {
+ strcpy(&errbuf[0], "-ECONNRESET"); break;
+ }
+ case -ENOSPC: {
+ SAY("ENOSPC\n"); break;
+ }
+ case -ESHUTDOWN: {
+ strcpy(&errbuf[0], "-ESHUTDOWN"); break;
+ }
+ default: {
+ strcpy(&errbuf[0], "unknown error"); break;
+ }
+ }
+ }
+ framestatus = purb->iso_frame_desc[i].status;
+ framelength = purb->iso_frame_desc[i].length;
+ frameactual = purb->iso_frame_desc[i].actual_length;
+ frameoffset = purb->iso_frame_desc[i].offset;
+
+ JOT(16, "frame[%2i]:" \
+ "%4i=status " \
+ "%4i=actual " \
+ "%4i=length " \
+ "%5i=offset\n", \
+ i, framestatus, frameactual, framelength, frameoffset);
+ if (!purb->iso_frame_desc[i].status) {
+ more = purb->iso_frame_desc[i].actual_length;
+ pfield_buffer = &peasycap->field_buffer\
+ [peasycap->field_fill][peasycap->field_page];
+ videofieldamount = (peasycap->field_page * \
+ PAGE_SIZE) + \
+ (int)(pfield_buffer->pto - pfield_buffer->pgo);
+ if (4 == more)
+ mt++;
+ if (4 < more) {
+ if (mt) {
+ JOT(8, "%4i empty video urb frames\n", mt);
+ mt = 0;
+ }
+ if (FIELD_BUFFER_MANY <= peasycap->field_fill) {
+ SAY("ERROR: bad peasycap->field_fill\n");
+ return;
+ }
+ if (FIELD_BUFFER_SIZE/PAGE_SIZE <= \
+ peasycap->field_page) {
+ SAY("ERROR: bad peasycap->field_page\n");
+ return;
+ }
+ pfield_buffer = &peasycap->field_buffer\
+ [peasycap->field_fill][peasycap->field_page];
+ pu = (__u8 *)(purb->transfer_buffer + \
+ purb->iso_frame_desc[i].offset);
+ if (0x80 & *pu)
+ leap = 8;
+ else
+ leap = 4;
+/*--------------------------------------------------------------------------*/
+/*
+ * EIGHT-BYTE END-OF-VIDEOFIELD MARKER.
+ * NOTE: A SUCCESSION OF URB FRAMES FOLLOWING THIS ARE EMPTY,
+ * CORRESPONDING TO THE FIELD FLYBACK (VERTICAL BLANKING) PERIOD.
+ *
+ * PROVIDED THE FIELD BUFFER CONTAINS GOOD DATA AS INDICATED BY A ZERO UPPER
+ * BYTE OF
+ * peasycap->field_buffer[peasycap->field_fill][0].kount
+ * THE CONTENTS OF THE FIELD BUFFER ARE OFFERED TO dqbuf(), field_read IS
+ * UPDATED AND field_fill IS BUMPED. IF THE FIELD BUFFER CONTAINS BAD DATA
+ * NOTHING IS OFFERED TO dqbuf().
+ *
+ * THE DECISION ON WHETHER THE PARITY OF THE OFFERED FIELD BUFFER IS RIGHT
+ * RESTS WITH dqbuf().
+ */
+/*---------------------------------------------------------------------------*/
+ if ((8 == more) || override) {
+ if (videofieldamount > \
+ peasycap->videofieldamount) {
+ if (2 == videofieldamount - \
+ peasycap->\
+ videofieldamount)
+ (peasycap->field_buffer\
+ [peasycap->field_fill]\
+ [0].kount) |= 0x0100;
+ else
+ (peasycap->field_buffer\
+ [peasycap->field_fill]\
+ [0].kount) |= 0x4000;
+ } else if (videofieldamount < \
+ peasycap->\
+ videofieldamount) {
+ (peasycap->field_buffer\
+ [peasycap->field_fill]\
+ [0].kount) |= 0x2000;
+ }
+ if (!(0xFF00 & peasycap->field_buffer\
+ [peasycap->field_fill]\
+ [0].kount)) {
+ (peasycap->video_junk)--;
+ if (-16 > peasycap->video_junk)
+ peasycap->video_junk = -16;
+ peasycap->field_read = \
+ (peasycap->\
+ field_fill)++;
+
+ if (FIELD_BUFFER_MANY <= \
+ peasycap->field_fill)
+ peasycap->field_fill = 0;
+ peasycap->field_page = 0;
+ pfield_buffer = &peasycap->\
+ field_buffer\
+ [peasycap->field_fill]\
+ [peasycap->field_page];
+ pfield_buffer->pto = \
+ pfield_buffer->pgo;
+
+ JOT(8, "bumped to: %i=peasycap->" \
+ "field_fill %i=parity\n", \
+ peasycap->field_fill, \
+ 0x00FF & pfield_buffer->kount);
+ JOT(8, "field buffer %i has %i " \
+ "bytes fit to be read\n", \
+ peasycap->field_read, \
+ videofieldamount);
+ JOT(8, "wakeup call to wq_video, " \
+ "%i=field_read %i=field_fill "\
+ "%i=parity\n", \
+ peasycap->field_read, \
+ peasycap->field_fill, \
+ 0x00FF & peasycap->\
+ field_buffer[peasycap->\
+ field_read][0].kount);
+ wake_up_interruptible(&(peasycap->\
+ wq_video));
+ do_gettimeofday(&peasycap->timeval7);
+ } else {
+ peasycap->video_junk++;
+ JOT(8, "field buffer %i had %i " \
+ "bytes, now discarded\n", \
+ peasycap->field_fill, \
+ videofieldamount);
+
+ (peasycap->field_fill)++;
+
+ if (FIELD_BUFFER_MANY <= \
+ peasycap->field_fill)
+ peasycap->field_fill = 0;
+ peasycap->field_page = 0;
+ pfield_buffer = \
+ &peasycap->field_buffer\
+ [peasycap->field_fill]\
+ [peasycap->field_page];
+ pfield_buffer->pto = \
+ pfield_buffer->pgo;
+
+ JOT(8, "bumped to: %i=peasycap->" \
+ "field_fill %i=parity\n", \
+ peasycap->field_fill, \
+ 0x00FF & pfield_buffer->kount);
+ }
+ if (8 == more) {
+ JOT(8, "end-of-field: received " \
+ "parity byte 0x%02X\n", \
+ (0xFF & *pu));
+ if (0x40 & *pu)
+ pfield_buffer->kount = 0x0000;
+ else
+ pfield_buffer->kount = 0x0001;
+ JOT(8, "end-of-field: 0x%02X=kount\n",\
+ 0xFF & pfield_buffer->kount);
+ }
+ }
+/*---------------------------------------------------------------------------*/
+/*
+ * COPY more BYTES FROM ISOC BUFFER TO FIELD BUFFER
+ */
+/*---------------------------------------------------------------------------*/
+ pu += leap;
+ more -= leap;
+
+ if (FIELD_BUFFER_MANY <= peasycap->field_fill) {
+ SAY("ERROR: bad peasycap->field_fill\n");
+ return;
+ }
+ if (FIELD_BUFFER_SIZE/PAGE_SIZE <= \
+ peasycap->field_page) {
+ SAY("ERROR: bad peasycap->field_page\n");
+ return;
+ }
+ pfield_buffer = &peasycap->field_buffer\
+ [peasycap->field_fill][peasycap->field_page];
+ while (more) {
+ pfield_buffer = &peasycap->field_buffer\
+ [peasycap->field_fill]\
+ [peasycap->field_page];
+ if (PAGE_SIZE < (pfield_buffer->pto - \
+ pfield_buffer->pgo)) {
+ SAY("ERROR: bad pfield_buffer->pto\n");
+ return;
+ }
+ if (PAGE_SIZE == (pfield_buffer->pto - \
+ pfield_buffer->pgo)) {
+ (peasycap->field_page)++;
+ if (FIELD_BUFFER_SIZE/PAGE_SIZE <= \
+ peasycap->field_page) {
+ JOT(16, "wrapping peasycap->" \
+ "field_page\n");
+ peasycap->field_page = 0;
+ }
+ pfield_buffer = &peasycap->\
+ field_buffer\
+ [peasycap->field_fill]\
+ [peasycap->field_page];
+ pfield_buffer->pto = \
+ pfield_buffer->pgo;
+ }
+
+ much = PAGE_SIZE - (int)(pfield_buffer->pto - \
+ pfield_buffer->pgo);
+
+ if (much > more)
+ much = more;
+ memcpy(pfield_buffer->pto, pu, much);
+ pu += much;
+ (pfield_buffer->pto) += much;
+ more -= much;
+ }
+ }
+ }
+ }
+}
+/*---------------------------------------------------------------------------*/
+/*
+ *
+ *
+ * *** UNDER DEVELOPMENT/TESTING - NOT READY YET! ***
+ *
+ *
+ *
+ * VIDEOTAPES MAY HAVE BEEN MANUALLY PAUSED AND RESTARTED DURING RECORDING.
+ * THIS CAUSES LOSS OF SYNC, CONFUSING DOWNSTREAM USERSPACE PROGRAMS WHICH
+ * MAY INTERPRET THE INTERRUPTION AS A SYMPTOM OF LATENCY. TO OVERCOME THIS
+ * THE DRIVER BRIDGES THE HIATUS BY SENDING DUMMY VIDEO FRAMES AT ROUGHLY
+ * THE RIGHT TIME INTERVALS IN THE HOPE OF PERSUADING THE DOWNSTREAM USERSPACE
+ * PROGRAM TO RESUME NORMAL SERVICE WHEN THE INTERRUPTION IS OVER.
+ */
+/*---------------------------------------------------------------------------*/
+#if defined(BRIDGER)
+do_gettimeofday(&timeval);
+if (peasycap->timeval7.tv_sec) {
+ usec = 1000000*(timeval.tv_sec - peasycap->timeval7.tv_sec) + \
+ (timeval.tv_usec - peasycap->timeval7.tv_usec);
+ if (usec > (peasycap->usec + peasycap->tolerate)) {
+ JOT(8, "bridging hiatus\n");
+ peasycap->video_junk = 0;
+ peasycap->field_buffer[peasycap->field_fill][0].kount |= 0x0400;
+
+ peasycap->field_read = (peasycap->field_fill)++;
+
+ if (FIELD_BUFFER_MANY <= peasycap->field_fill) \
+ peasycap->field_fill = 0;
+ peasycap->field_page = 0;
+ pfield_buffer = &peasycap->field_buffer\
+ [peasycap->field_fill][peasycap->field_page];
+ pfield_buffer->pto = pfield_buffer->pgo;
+
+ JOT(8, "bumped to: %i=peasycap->field_fill %i=parity\n", \
+ peasycap->field_fill, 0x00FF & pfield_buffer->kount);
+ JOT(8, "field buffer %i has %i bytes to be overwritten\n", \
+ peasycap->field_read, videofieldamount);
+ JOT(8, "wakeup call to wq_video, " \
+ "%i=field_read %i=field_fill %i=parity\n", \
+ peasycap->field_read, peasycap->field_fill, \
+ 0x00FF & \
+ peasycap->field_buffer[peasycap->field_read][0].kount);
+ wake_up_interruptible(&(peasycap->wq_video));
+ do_gettimeofday(&peasycap->timeval7);
+ }
+}
+#endif /*BRIDGER*/
+/*---------------------------------------------------------------------------*/
+/*
+ * RESUBMIT THIS URB, UNLESS A SEVERE PERSISTENT ERROR CONDITION EXISTS.
+ *
+ * IF THE WAIT QUEUES ARE NOT CLEARED IN RESPONSE TO AN ERROR CONDITION
+ * THE USERSPACE PROGRAM, E.G. mplayer, MAY HANG ON EXIT. BEWARE.
+ */
+/*---------------------------------------------------------------------------*/
+if (VIDEO_ISOC_BUFFER_MANY <= peasycap->video_junk) {
+ SAY("easycap driver shutting down on condition green\n");
+ peasycap->video_eof = 1;
+ peasycap->audio_eof = 1;
+ peasycap->video_junk = -VIDEO_ISOC_BUFFER_MANY;
+ wake_up_interruptible(&(peasycap->wq_video));
+ wake_up_interruptible(&(peasycap->wq_audio));
+ return;
+}
+if (peasycap->video_isoc_streaming) {
+ rc = usb_submit_urb(purb, GFP_ATOMIC);
+ if (0 != rc) {
+ SAY("ERROR: while %i=video_idle, usb_submit_urb() failed " \
+ "with rc:\n", peasycap->video_idle);
+ switch (rc) {
+ case -ENOMEM: {
+ SAY("ENOMEM\n"); break;
+ }
+ case -ENODEV: {
+ SAY("ENODEV\n"); break;
+ }
+ case -ENXIO: {
+ SAY("ENXIO\n"); break;
+ }
+ case -EINVAL: {
+ SAY("EINVAL\n"); break;
+ }
+ case -EAGAIN: {
+ SAY("EAGAIN\n"); break;
+ }
+ case -EFBIG: {
+ SAY("EFBIG\n"); break;
+ }
+ case -EPIPE: {
+ SAY("EPIPE\n"); break;
+ }
+ case -EMSGSIZE: {
+ SAY("EMSGSIZE\n"); break;
+ }
+ case -ENOSPC: {
+ SAY("ENOSPC\n"); break;
+ }
+ default: {
+ SAY("0x%08X\n", rc); break;
+ }
+ }
+ }
+}
+return;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ *
+ * FIXME
+ *
+ *
+ * THIS FUNCTION ASSUMES THAT, ON EACH AND EVERY OCCASION THAT THE DEVICE IS
+ * PHYSICALLY PLUGGED IN, INTERFACE 0 IS PROBED FIRST.
+ * IF THIS IS NOT TRUE, THERE IS THE POSSIBILITY OF AN Oops.
+ *
+ * THIS HAS NEVER BEEN A PROBLEM IN PRACTICE, BUT SOMETHING SEEMS WRONG HERE.
+ */
+/*---------------------------------------------------------------------------*/
+int
+easycap_usb_probe(struct usb_interface *pusb_interface, \
+ const struct usb_device_id *id)
+{
+struct usb_device *pusb_device, *pusb_device1;
+struct usb_host_interface *pusb_host_interface;
+struct usb_endpoint_descriptor *pepd;
+struct usb_interface_descriptor *pusb_interface_descriptor;
+struct usb_interface_assoc_descriptor *pusb_interface_assoc_descriptor;
+struct urb *purb;
+static struct easycap *peasycap /*=NULL*/;
+struct data_urb *pdata_urb;
+size_t wMaxPacketSize;
+int ISOCwMaxPacketSize;
+int BULKwMaxPacketSize;
+int INTwMaxPacketSize;
+int CTRLwMaxPacketSize;
+__u8 bEndpointAddress;
+__u8 ISOCbEndpointAddress;
+__u8 INTbEndpointAddress;
+int isin, i, j, k, m;
+__u8 bInterfaceNumber;
+__u8 bInterfaceClass;
+__u8 bInterfaceSubClass;
+void *pbuf;
+int okalt[8], isokalt;
+int okepn[8], isokepn;
+int okmps[8], isokmps;
+int maxpacketsize;
+int rc;
+
+JOT(4, "\n");
+
+if ((struct usb_interface *)NULL == pusb_interface) {
+ SAY("ERROR: pusb_interface is NULL\n");
+ return -EFAULT;
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * GET POINTER TO STRUCTURE usb_device
+ */
+/*---------------------------------------------------------------------------*/
+pusb_device1 = container_of(pusb_interface->dev.parent, \
+ struct usb_device, dev);
+if ((struct usb_device *)NULL == pusb_device1) {
+ SAY("ERROR: pusb_device1 is NULL\n");
+ return -EFAULT;
+}
+pusb_device = usb_get_dev(pusb_device1);
+if ((struct usb_device *)NULL == pusb_device) {
+ SAY("ERROR: pusb_device is NULL\n");
+ return -EFAULT;
+}
+if ((unsigned long int)pusb_device1 != (unsigned long int)pusb_device) {
+ JOT(4, "ERROR: pusb_device1 != pusb_device\n");
+ return -EFAULT;
+}
+
+JOT(4, "bNumConfigurations=%i\n", pusb_device->descriptor.bNumConfigurations);
+
+/*---------------------------------------------------------------------------*/
+pusb_host_interface = pusb_interface->cur_altsetting;
+if (NULL == pusb_host_interface) {
+ SAY("ERROR: pusb_host_interface is NULL\n");
+ return -EFAULT;
+}
+pusb_interface_descriptor = &(pusb_host_interface->desc);
+if (NULL == pusb_interface_descriptor) {
+ SAY("ERROR: pusb_interface_descriptor is NULL\n");
+ return -EFAULT;
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * GET PROPERTIES OF PROBED INTERFACE
+ */
+/*---------------------------------------------------------------------------*/
+bInterfaceNumber = pusb_interface_descriptor->bInterfaceNumber;
+bInterfaceClass = pusb_interface_descriptor->bInterfaceClass;
+bInterfaceSubClass = pusb_interface_descriptor->bInterfaceSubClass;
+
+JOT(4, "intf[%i]: pusb_interface->num_altsetting=%i\n", \
+ bInterfaceNumber, pusb_interface->num_altsetting);
+JOT(4, "intf[%i]: pusb_interface->cur_altsetting - " \
+ "pusb_interface->altsetting=%li\n", bInterfaceNumber, \
+ (long int)(pusb_interface->cur_altsetting - \
+ pusb_interface->altsetting));
+switch (bInterfaceClass) {
+case USB_CLASS_AUDIO: {
+ JOT(4, "intf[%i]: bInterfaceClass=0x%02X=USB_CLASS_AUDIO\n", \
+ bInterfaceNumber, bInterfaceClass); break;
+ }
+case USB_CLASS_VIDEO: {
+ JOT(4, "intf[%i]: bInterfaceClass=0x%02X=USB_CLASS_VIDEO\n", \
+ bInterfaceNumber, bInterfaceClass); break;
+ }
+case USB_CLASS_VENDOR_SPEC: {
+ JOT(4, "intf[%i]: bInterfaceClass=0x%02X=USB_CLASS_VENDOR_SPEC\n", \
+ bInterfaceNumber, bInterfaceClass); break;
+ }
+default:
+ break;
+}
+switch (bInterfaceSubClass) {
+case 0x01: {
+ JOT(4, "intf[%i]: bInterfaceSubClass=0x%02X=AUDIOCONTROL\n", \
+ bInterfaceNumber, bInterfaceSubClass); break;
+}
+case 0x02: {
+ JOT(4, "intf[%i]: bInterfaceSubClass=0x%02X=AUDIOSTREAMING\n", \
+ bInterfaceNumber, bInterfaceSubClass); break;
+}
+case 0x03: {
+ JOT(4, "intf[%i]: bInterfaceSubClass=0x%02X=MIDISTREAMING\n", \
+ bInterfaceNumber, bInterfaceSubClass); break;
+}
+default:
+ break;
+}
+/*---------------------------------------------------------------------------*/
+pusb_interface_assoc_descriptor = pusb_interface->intf_assoc;
+if (NULL != pusb_interface_assoc_descriptor) {
+ JOT(4, "intf[%i]: bFirstInterface=0x%02X bInterfaceCount=0x%02X\n", \
+ bInterfaceNumber, \
+ pusb_interface_assoc_descriptor->bFirstInterface, \
+ pusb_interface_assoc_descriptor->bInterfaceCount);
+} else {
+JOT(4, "intf[%i]: pusb_interface_assoc_descriptor is NULL\n", \
+ bInterfaceNumber);
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * A NEW struct easycap IS ALWAYS ALLOCATED WHEN INTERFACE 0 IS PROBED.
+ * IT IS NOT POSSIBLE HERE TO FREE ANY EXISTING struct easycap. THIS
+ * SHOULD HAVE BEEN DONE BY easycap_delete() WHEN THE DEVICE WAS PHYSICALLY
+ * UNPLUGGED.
+ */
+/*---------------------------------------------------------------------------*/
+if (0 == bInterfaceNumber) {
+ peasycap = kzalloc(sizeof(struct easycap), GFP_KERNEL);
+ if (NULL == peasycap) {
+ SAY("ERROR: Could not allocate peasycap\n");
+ return -ENOMEM;
+ } else {
+ peasycap->allocation_video_struct = sizeof(struct easycap);
+ peasycap->allocation_video_page = 0;
+ peasycap->allocation_video_urb = 0;
+ peasycap->allocation_audio_struct = 0;
+ peasycap->allocation_audio_page = 0;
+ peasycap->allocation_audio_urb = 0;
+ }
+/*---------------------------------------------------------------------------*/
+/*
+ * INITIALIZE THE NEW easycap STRUCTURE.
+ * NO PARAMETERS ARE SPECIFIED HERE REQUIRING THE SETTING OF REGISTERS.
+ * THAT IS DONE FIRST BY easycap_open() AND LATER BY easycap_ioctl().
+ */
+/*---------------------------------------------------------------------------*/
+ peasycap->pusb_device = pusb_device;
+ peasycap->pusb_interface = pusb_interface;
+
+ kref_init(&peasycap->kref);
+ JOT(8, "intf[%i]: after kref_init(..._video) " \
+ "%i=peasycap->kref.refcount.counter\n", \
+ bInterfaceNumber, peasycap->kref.refcount.counter);
+
+ init_waitqueue_head(&(peasycap->wq_video));
+ init_waitqueue_head(&(peasycap->wq_audio));
+
+ mutex_init(&(peasycap->mutex_timeval0));
+ mutex_init(&(peasycap->mutex_timeval1));
+
+ for (k = 0; k < FRAME_BUFFER_MANY; k++)
+ mutex_init(&(peasycap->mutex_mmap_video[k]));
+
+ peasycap->ilk = 0;
+ peasycap->microphone = false;
+
+ peasycap->video_interface = -1;
+ peasycap->video_altsetting_on = -1;
+ peasycap->video_altsetting_off = -1;
+ peasycap->video_endpointnumber = -1;
+ peasycap->video_isoc_maxframesize = -1;
+ peasycap->video_isoc_buffer_size = -1;
+
+ peasycap->audio_interface = -1;
+ peasycap->audio_altsetting_on = -1;
+ peasycap->audio_altsetting_off = -1;
+ peasycap->audio_endpointnumber = -1;
+ peasycap->audio_isoc_maxframesize = -1;
+ peasycap->audio_isoc_buffer_size = -1;
+
+ peasycap->frame_buffer_many = FRAME_BUFFER_MANY;
+
+ if ((struct mutex *)NULL == &(peasycap->mutex_mmap_video[0])) {
+ SAY("ERROR: &(peasycap->mutex_mmap_video[%i]) is NULL\n", 0);
+ return -EFAULT;
+ }
+/*---------------------------------------------------------------------------*/
+/*
+ * DYNAMICALLY FILL IN THE AVAILABLE FORMATS.
+ */
+/*---------------------------------------------------------------------------*/
+ rc = fillin_formats();
+ if (0 > rc) {
+ SAY("ERROR: fillin_formats() returned %i\n", rc);
+ return -EFAULT;
+ }
+ JOT(4, "%i formats available\n", rc);
+ } else {
+/*---------------------------------------------------------------------------*/
+ if ((struct easycap *)NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL " \
+ "when probing interface %i\n", \
+ bInterfaceNumber);
+ return -EFAULT;
+ }
+
+ JOT(8, "kref_get() with %i=peasycap->kref.refcount.counter\n", \
+ (int)peasycap->kref.refcount.counter);
+ kref_get(&peasycap->kref);
+}
+/*---------------------------------------------------------------------------*/
+if ((USB_CLASS_VIDEO == bInterfaceClass) || \
+ (USB_CLASS_VENDOR_SPEC == bInterfaceClass)) {
+ if (-1 == peasycap->video_interface) {
+ peasycap->video_interface = bInterfaceNumber;
+ JOT(4, "setting peasycap->video_interface=%i\n", \
+ peasycap->video_interface);
+ } else {
+ if (peasycap->video_interface != bInterfaceNumber) {
+ SAY("ERROR: attempting to reset " \
+ "peasycap->video_interface\n");
+ SAY("...... continuing with " \
+ "%i=peasycap->video_interface\n", \
+ peasycap->video_interface);
+ }
+ }
+} else if ((USB_CLASS_AUDIO == bInterfaceClass) && \
+ (0x02 == bInterfaceSubClass)) {
+ if (-1 == peasycap->audio_interface) {
+ peasycap->audio_interface = bInterfaceNumber;
+ JOT(4, "setting peasycap->audio_interface=%i\n", \
+ peasycap->audio_interface);
+ } else {
+ if (peasycap->audio_interface != bInterfaceNumber) {
+ SAY("ERROR: attempting to reset " \
+ "peasycap->audio_interface\n");
+ SAY("...... continuing with " \
+ "%i=peasycap->audio_interface\n", \
+ peasycap->audio_interface);
+ }
+ }
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * INVESTIGATE ALL ALTSETTINGS.
+ * DONE IN DETAIL BECAUSE USB DEVICE 05e1:0408 HAS DISPARATE INCARNATIONS.
+ */
+/*---------------------------------------------------------------------------*/
+isokalt = 0;
+isokepn = 0;
+isokmps = 0;
+
+for (i = 0; i < pusb_interface->num_altsetting; i++) {
+ pusb_host_interface = &(pusb_interface->altsetting[i]);
+ if ((struct usb_host_interface *)NULL == pusb_host_interface) {
+ SAY("ERROR: pusb_host_interface is NULL\n");
+ return -EFAULT;
+ }
+ pusb_interface_descriptor = &(pusb_host_interface->desc);
+ if ((struct usb_interface_descriptor *)NULL == \
+ pusb_interface_descriptor) {
+ SAY("ERROR: pusb_interface_descriptor is NULL\n");
+ return -EFAULT;
+ }
+
+ JOT(4, "intf[%i]alt[%i]: desc.bDescriptorType=0x%02X\n", \
+ bInterfaceNumber, i, pusb_interface_descriptor->bDescriptorType);
+ JOT(4, "intf[%i]alt[%i]: desc.bInterfaceNumber=0x%02X\n", \
+ bInterfaceNumber, i, pusb_interface_descriptor->bInterfaceNumber);
+ JOT(4, "intf[%i]alt[%i]: desc.bAlternateSetting=0x%02X\n", \
+ bInterfaceNumber, i, pusb_interface_descriptor->bAlternateSetting);
+ JOT(4, "intf[%i]alt[%i]: desc.bNumEndpoints=0x%02X\n", \
+ bInterfaceNumber, i, pusb_interface_descriptor->bNumEndpoints);
+ JOT(4, "intf[%i]alt[%i]: desc.bInterfaceClass=0x%02X\n", \
+ bInterfaceNumber, i, pusb_interface_descriptor->bInterfaceClass);
+ JOT(4, "intf[%i]alt[%i]: desc.bInterfaceSubClass=0x%02X\n", \
+ bInterfaceNumber, i, pusb_interface_descriptor->bInterfaceSubClass);
+ JOT(4, "intf[%i]alt[%i]: desc.bInterfaceProtocol=0x%02X\n", \
+ bInterfaceNumber, i, pusb_interface_descriptor->bInterfaceProtocol);
+ JOT(4, "intf[%i]alt[%i]: desc.iInterface=0x%02X\n", \
+ bInterfaceNumber, i, pusb_interface_descriptor->iInterface);
+
+ ISOCwMaxPacketSize = -1;
+ BULKwMaxPacketSize = -1;
+ INTwMaxPacketSize = -1;
+ CTRLwMaxPacketSize = -1;
+ ISOCbEndpointAddress = 0;
+ INTbEndpointAddress = 0;
+
+ if (0 == pusb_interface_descriptor->bNumEndpoints)
+ JOT(4, "intf[%i]alt[%i] has no endpoints\n", \
+ bInterfaceNumber, i);
+/*---------------------------------------------------------------------------*/
+ for (j = 0; j < pusb_interface_descriptor->bNumEndpoints; j++) {
+ pepd = &(pusb_host_interface->endpoint[j].desc);
+ if ((struct usb_endpoint_descriptor *)NULL == pepd) {
+ SAY("ERROR: pepd is NULL.\n");
+ SAY("...... skipping\n");
+ continue;
+ }
+ wMaxPacketSize = le16_to_cpu(pepd->wMaxPacketSize);
+ bEndpointAddress = pepd->bEndpointAddress;
+
+ JOT(4, "intf[%i]alt[%i]end[%i]: bEndpointAddress=0x%X\n", \
+ bInterfaceNumber, i, j, \
+ pepd->bEndpointAddress);
+ JOT(4, "intf[%i]alt[%i]end[%i]: bmAttributes=0x%X\n", \
+ bInterfaceNumber, i, j, \
+ pepd->bmAttributes);
+ JOT(4, "intf[%i]alt[%i]end[%i]: wMaxPacketSize=%i\n", \
+ bInterfaceNumber, i, j, \
+ pepd->wMaxPacketSize);
+ JOT(4, "intf[%i]alt[%i]end[%i]: bInterval=%i\n",
+ bInterfaceNumber, i, j, \
+ pepd->bInterval);
+
+ if (pepd->bEndpointAddress & USB_DIR_IN) {
+ JOT(4, "intf[%i]alt[%i]end[%i] is an IN endpoint\n",\
+ bInterfaceNumber, i, j);
+ isin = 1;
+ } else {
+ JOT(4, "intf[%i]alt[%i]end[%i] is an OUT endpoint\n",\
+ bInterfaceNumber, i, j);
+ SAY("ERROR: OUT endpoint unexpected\n");
+ SAY("...... continuing\n");
+ isin = 0;
+ }
+ if ((pepd->bmAttributes & \
+ USB_ENDPOINT_XFERTYPE_MASK) == \
+ USB_ENDPOINT_XFER_ISOC) {
+ JOT(4, "intf[%i]alt[%i]end[%i] is an ISOC endpoint\n",\
+ bInterfaceNumber, i, j);
+ if (isin) {
+ switch (bInterfaceClass) {
+ case USB_CLASS_VIDEO:
+ case USB_CLASS_VENDOR_SPEC: {
+ if (!peasycap) {
+ SAY("MISTAKE: " \
+ "peasycap is NULL\n");
+ return -EFAULT;
+ }
+ if (pepd->wMaxPacketSize) {
+ if (8 > isokalt) {
+ okalt[isokalt] = i;
+ JOT(4,\
+ "%i=okalt[%i]\n", \
+ okalt[isokalt], \
+ isokalt);
+ isokalt++;
+ }
+ if (8 > isokepn) {
+ okepn[isokepn] = \
+ pepd->\
+ bEndpointAddress & \
+ 0x0F;
+ JOT(4,\
+ "%i=okepn[%i]\n", \
+ okepn[isokepn], \
+ isokepn);
+ isokepn++;
+ }
+ if (8 > isokmps) {
+ okmps[isokmps] = \
+ le16_to_cpu(pepd->\
+ wMaxPacketSize);
+ JOT(4,\
+ "%i=okmps[%i]\n", \
+ okmps[isokmps], \
+ isokmps);
+ isokmps++;
+ }
+ } else {
+ if (-1 == peasycap->\
+ video_altsetting_off) {
+ peasycap->\
+ video_altsetting_off =\
+ i;
+ JOT(4, "%i=video_" \
+ "altsetting_off " \
+ "<====\n", \
+ peasycap->\
+ video_altsetting_off);
+ } else {
+ SAY("ERROR: peasycap" \
+ "->video_altsetting_" \
+ "off already set\n");
+ SAY("...... " \
+ "continuing with " \
+ "%i=peasycap->video_" \
+ "altsetting_off\n", \
+ peasycap->\
+ video_altsetting_off);
+ }
+ }
+ break;
+ }
+ case USB_CLASS_AUDIO: {
+ if (0x02 != bInterfaceSubClass)
+ break;
+ if (!peasycap) {
+ SAY("MISTAKE: " \
+ "peasycap is NULL\n");
+ return -EFAULT;
+ }
+ if (pepd->wMaxPacketSize) {
+ if (8 > isokalt) {
+ okalt[isokalt] = i ;
+ JOT(4,\
+ "%i=okalt[%i]\n", \
+ okalt[isokalt], \
+ isokalt);
+ isokalt++;
+ }
+ if (8 > isokepn) {
+ okepn[isokepn] = \
+ pepd->\
+ bEndpointAddress & \
+ 0x0F;
+ JOT(4,\
+ "%i=okepn[%i]\n", \
+ okepn[isokepn], \
+ isokepn);
+ isokepn++;
+ }
+ if (8 > isokmps) {
+ okmps[isokmps] = \
+ le16_to_cpu(pepd->\
+ wMaxPacketSize);
+ JOT(4,\
+ "%i=okmps[%i]\n",\
+ okmps[isokmps], \
+ isokmps);
+ isokmps++;
+ }
+ } else {
+ if (-1 == peasycap->\
+ audio_altsetting_off) {
+ peasycap->\
+ audio_altsetting_off =\
+ i;
+ JOT(4, "%i=audio_" \
+ "altsetting_off " \
+ "<====\n", \
+ peasycap->\
+ audio_altsetting_off);
+ } else {
+ SAY("ERROR: peasycap" \
+ "->audio_altsetting_" \
+ "off already set\n");
+ SAY("...... " \
+ "continuing with " \
+ "%i=peasycap->\
+ audio_altsetting_" \
+ "off\n",
+ peasycap->\
+ audio_altsetting_off);
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ } else if ((pepd->bmAttributes & \
+ USB_ENDPOINT_XFERTYPE_MASK) ==\
+ USB_ENDPOINT_XFER_BULK) {
+ JOT(4, "intf[%i]alt[%i]end[%i] is a BULK endpoint\n",\
+ bInterfaceNumber, i, j);
+ } else if ((pepd->bmAttributes & \
+ USB_ENDPOINT_XFERTYPE_MASK) ==\
+ USB_ENDPOINT_XFER_INT) {
+ JOT(4, "intf[%i]alt[%i]end[%i] is an INT endpoint\n",\
+ bInterfaceNumber, i, j);
+ } else {
+ JOT(4, "intf[%i]alt[%i]end[%i] is a CTRL endpoint\n",\
+ bInterfaceNumber, i, j);
+ }
+ if (0 == pepd->wMaxPacketSize) {
+ JOT(4, "intf[%i]alt[%i]end[%i] " \
+ "has zero packet size\n", \
+ bInterfaceNumber, i, j);
+ }
+ }
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * PERFORM INITIALIZATION OF THE PROBED INTERFACE
+ */
+/*---------------------------------------------------------------------------*/
+JOT(4, "initialization begins for interface %i\n", \
+ pusb_interface_descriptor->bInterfaceNumber);
+switch (bInterfaceNumber) {
+/*---------------------------------------------------------------------------*/
+/*
+ * INTERFACE 0 IS THE VIDEO INTERFACE
+ */
+/*---------------------------------------------------------------------------*/
+case 0: {
+ if (!peasycap) {
+ SAY("MISTAKE: peasycap is NULL\n");
+ return -EFAULT;
+ }
+ if (!isokalt) {
+ SAY("ERROR: no viable video_altsetting_on\n");
+ return -ENOENT;
+ } else {
+ peasycap->video_altsetting_on = okalt[isokalt - 1];
+ JOT(4, "%i=video_altsetting_on <====\n", \
+ peasycap->video_altsetting_on);
+ }
+ if (!isokepn) {
+ SAY("ERROR: no viable video_endpointnumber\n");
+ return -ENOENT;
+ } else {
+ peasycap->video_endpointnumber = okepn[isokepn - 1];
+ JOT(4, "%i=video_endpointnumber\n", \
+ peasycap->video_endpointnumber);
+ }
+ if (!isokmps) {
+ SAY("ERROR: no viable video_maxpacketsize\n");
+ return -ENOENT;
+/*---------------------------------------------------------------------------*/
+/*
+ * DECIDE THE VIDEO STREAMING PARAMETERS
+ */
+/*---------------------------------------------------------------------------*/
+ } else {
+ maxpacketsize = okmps[isokmps - 1] - 1024;
+ if (USB_2_0_MAXPACKETSIZE > maxpacketsize) {
+ peasycap->video_isoc_maxframesize = maxpacketsize;
+ } else {
+ peasycap->video_isoc_maxframesize = \
+ USB_2_0_MAXPACKETSIZE;
+ }
+ JOT(4, "%i=video_isoc_maxframesize\n", \
+ peasycap->video_isoc_maxframesize);
+ if (0 >= peasycap->video_isoc_maxframesize) {
+ SAY("ERROR: bad video_isoc_maxframesize\n");
+ return -ENOENT;
+ }
+ peasycap->video_isoc_framesperdesc = VIDEO_ISOC_FRAMESPERDESC;
+ JOT(4, "%i=video_isoc_framesperdesc\n", \
+ peasycap->video_isoc_framesperdesc);
+ if (0 >= peasycap->video_isoc_framesperdesc) {
+ SAY("ERROR: bad video_isoc_framesperdesc\n");
+ return -ENOENT;
+ }
+ peasycap->video_isoc_buffer_size = \
+ peasycap->video_isoc_maxframesize * \
+ peasycap->video_isoc_framesperdesc;
+ JOT(4, "%i=video_isoc_buffer_size\n", \
+ peasycap->video_isoc_buffer_size);
+ if ((PAGE_SIZE << VIDEO_ISOC_ORDER) < \
+ peasycap->video_isoc_buffer_size) {
+ SAY("MISTAKE: " \
+ "peasycap->video_isoc_buffer_size too big\n");
+ return -EFAULT;
+ }
+ }
+/*---------------------------------------------------------------------------*/
+ if (-1 == peasycap->video_interface) {
+ SAY("MISTAKE: video_interface is unset\n");
+ return -EFAULT;
+ }
+ if (-1 == peasycap->video_altsetting_on) {
+ SAY("MISTAKE: video_altsetting_on is unset\n");
+ return -EFAULT;
+ }
+ if (-1 == peasycap->video_altsetting_off) {
+ SAY("MISTAKE: video_interface_off is unset\n");
+ return -EFAULT;
+ }
+ if (-1 == peasycap->video_endpointnumber) {
+ SAY("MISTAKE: video_endpointnumber is unset\n");
+ return -EFAULT;
+ }
+ if (-1 == peasycap->video_isoc_maxframesize) {
+ SAY("MISTAKE: video_isoc_maxframesize is unset\n");
+ return -EFAULT;
+ }
+ if (-1 == peasycap->video_isoc_buffer_size) {
+ SAY("MISTAKE: video_isoc_buffer_size is unset\n");
+ return -EFAULT;
+ }
+/*---------------------------------------------------------------------------*/
+/*
+ * ALLOCATE MEMORY FOR VIDEO BUFFERS. LISTS MUST BE INITIALIZED FIRST.
+ */
+/*---------------------------------------------------------------------------*/
+ INIT_LIST_HEAD(&(peasycap->urb_video_head));
+ peasycap->purb_video_head = &(peasycap->urb_video_head);
+/*---------------------------------------------------------------------------*/
+ JOT(4, "allocating %i frame buffers of size %li\n", \
+ FRAME_BUFFER_MANY, (long int)FRAME_BUFFER_SIZE);
+ JOT(4, ".... each scattered over %li pages\n", \
+ FRAME_BUFFER_SIZE/PAGE_SIZE);
+
+ for (k = 0; k < FRAME_BUFFER_MANY; k++) {
+ for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++) {
+ if ((void *)NULL != peasycap->frame_buffer[k][m].pgo)
+ SAY("attempting to reallocate frame " \
+ " buffers\n");
+ else {
+ pbuf = (void *)__get_free_page(GFP_KERNEL);
+ if ((void *)NULL == pbuf) {
+ SAY("ERROR: Could not allocate frame "\
+ "buffer %i page %i\n", k, m);
+ return -ENOMEM;
+ } else
+ peasycap->allocation_video_page += 1;
+ peasycap->frame_buffer[k][m].pgo = pbuf;
+ }
+ peasycap->frame_buffer[k][m].pto = \
+ peasycap->frame_buffer[k][m].pgo;
+ }
+ }
+
+ peasycap->frame_fill = 0;
+ peasycap->frame_read = 0;
+ JOT(4, "allocation of frame buffers done: %i pages\n", k * \
+ m);
+/*---------------------------------------------------------------------------*/
+ JOT(4, "allocating %i field buffers of size %li\n", \
+ FIELD_BUFFER_MANY, (long int)FIELD_BUFFER_SIZE);
+ JOT(4, ".... each scattered over %li pages\n", \
+ FIELD_BUFFER_SIZE/PAGE_SIZE);
+
+ for (k = 0; k < FIELD_BUFFER_MANY; k++) {
+ for (m = 0; m < FIELD_BUFFER_SIZE/PAGE_SIZE; m++) {
+ if ((void *)NULL != peasycap->field_buffer[k][m].pgo) {
+ SAY("ERROR: attempting to reallocate " \
+ "field buffers\n");
+ } else {
+ pbuf = (void *) __get_free_page(GFP_KERNEL);
+ if ((void *)NULL == pbuf) {
+ SAY("ERROR: Could not allocate field" \
+ " buffer %i page %i\n", k, m);
+ return -ENOMEM;
+ }
+ else
+ peasycap->allocation_video_page += 1;
+ peasycap->field_buffer[k][m].pgo = pbuf;
+ }
+ peasycap->field_buffer[k][m].pto = \
+ peasycap->field_buffer[k][m].pgo;
+ }
+ peasycap->field_buffer[k][0].kount = 0x0200;
+ }
+ peasycap->field_fill = 0;
+ peasycap->field_page = 0;
+ peasycap->field_read = 0;
+ JOT(4, "allocation of field buffers done: %i pages\n", k * \
+ m);
+/*---------------------------------------------------------------------------*/
+ JOT(4, "allocating %i isoc video buffers of size %i\n", \
+ VIDEO_ISOC_BUFFER_MANY, \
+ peasycap->video_isoc_buffer_size);
+ JOT(4, ".... each occupying contiguous memory pages\n");
+
+ for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
+ pbuf = (void *)__get_free_pages(GFP_KERNEL, VIDEO_ISOC_ORDER);
+ if (NULL == pbuf) {
+ SAY("ERROR: Could not allocate isoc video buffer " \
+ "%i\n", k);
+ return -ENOMEM;
+ } else
+ peasycap->allocation_video_page += \
+ ((unsigned int)(0x01 << VIDEO_ISOC_ORDER));
+
+ peasycap->video_isoc_buffer[k].pgo = pbuf;
+ peasycap->video_isoc_buffer[k].pto = pbuf + \
+ peasycap->video_isoc_buffer_size;
+ peasycap->video_isoc_buffer[k].kount = k;
+ }
+ JOT(4, "allocation of isoc video buffers done: %i pages\n", \
+ k * (0x01 << VIDEO_ISOC_ORDER));
+/*---------------------------------------------------------------------------*/
+/*
+ * ALLOCATE AND INITIALIZE MULTIPLE struct urb ...
+ */
+/*---------------------------------------------------------------------------*/
+ JOT(4, "allocating %i struct urb.\n", VIDEO_ISOC_BUFFER_MANY);
+ JOT(4, "using %i=peasycap->video_isoc_framesperdesc\n", \
+ peasycap->video_isoc_framesperdesc);
+ JOT(4, "using %i=peasycap->video_isoc_maxframesize\n", \
+ peasycap->video_isoc_maxframesize);
+ JOT(4, "using %i=peasycap->video_isoc_buffer_sizen", \
+ peasycap->video_isoc_buffer_size);
+
+ for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
+ purb = usb_alloc_urb(peasycap->video_isoc_framesperdesc, \
+ GFP_KERNEL);
+ if (NULL == purb) {
+ SAY("ERROR: usb_alloc_urb returned NULL for buffer " \
+ "%i\n", k);
+ return -ENOMEM;
+ } else
+ peasycap->allocation_video_urb += 1;
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+ pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
+ if (NULL == pdata_urb) {
+ SAY("ERROR: Could not allocate struct data_urb.\n");
+ return -ENOMEM;
+ } else
+ peasycap->allocation_video_struct += \
+ sizeof(struct data_urb);
+
+ pdata_urb->purb = purb;
+ pdata_urb->isbuf = k;
+ pdata_urb->length = 0;
+ list_add_tail(&(pdata_urb->list_head), \
+ peasycap->purb_video_head);
+/*---------------------------------------------------------------------------*/
+/*
+ * ... AND INITIALIZE THEM
+ */
+/*---------------------------------------------------------------------------*/
+ if (!k) {
+ JOT(4, "initializing video urbs thus:\n");
+ JOT(4, " purb->interval = 1;\n");
+ JOT(4, " purb->dev = peasycap->pusb_device;\n");
+ JOT(4, " purb->pipe = usb_rcvisocpipe" \
+ "(peasycap->pusb_device,%i);\n", \
+ peasycap->video_endpointnumber);
+ JOT(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
+ JOT(4, " purb->transfer_buffer = peasycap->" \
+ "video_isoc_buffer[.].pgo;\n");
+ JOT(4, " purb->transfer_buffer_length = %i;\n", \
+ peasycap->video_isoc_buffer_size);
+ JOT(4, " purb->complete = easycap_complete;\n");
+ JOT(4, " purb->context = peasycap;\n");
+ JOT(4, " purb->start_frame = 0;\n");
+ JOT(4, " purb->number_of_packets = %i;\n", \
+ peasycap->video_isoc_framesperdesc);
+ JOT(4, " for (j = 0; j < %i; j++)\n", \
+ peasycap->video_isoc_framesperdesc);
+ JOT(4, " {\n");
+ JOT(4, " purb->iso_frame_desc[j].offset = j*%i;\n",\
+ peasycap->video_isoc_maxframesize);
+ JOT(4, " purb->iso_frame_desc[j].length = %i;\n", \
+ peasycap->video_isoc_maxframesize);
+ JOT(4, " }\n");
+ }
+
+ purb->interval = 1;
+ purb->dev = peasycap->pusb_device;
+ purb->pipe = usb_rcvisocpipe(peasycap->pusb_device, \
+ peasycap->video_endpointnumber);
+ purb->transfer_flags = URB_ISO_ASAP;
+ purb->transfer_buffer = peasycap->video_isoc_buffer[k].pgo;
+ purb->transfer_buffer_length = \
+ peasycap->video_isoc_buffer_size;
+ purb->complete = easycap_complete;
+ purb->context = peasycap;
+ purb->start_frame = 0;
+ purb->number_of_packets = peasycap->video_isoc_framesperdesc;
+ for (j = 0; j < peasycap->video_isoc_framesperdesc; j++) {
+ purb->iso_frame_desc[j].offset = j * \
+ peasycap->video_isoc_maxframesize;
+ purb->iso_frame_desc[j].length = \
+ peasycap->video_isoc_maxframesize;
+ }
+ }
+ JOT(4, "allocation of %i struct urb done.\n", k);
+/*--------------------------------------------------------------------------*/
+/*
+ * SAVE POINTER peasycap IN THIS INTERFACE.
+ */
+/*--------------------------------------------------------------------------*/
+ usb_set_intfdata(pusb_interface, peasycap);
+/*--------------------------------------------------------------------------*/
+/*
+ * THE VIDEO DEVICE CAN BE REGISTERED NOW, AS IT IS READY.
+ */
+/*--------------------------------------------------------------------------*/
+#if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
+ if (0 != (usb_register_dev(pusb_interface, &easycap_class))) {
+ err("Not able to get a minor for this device");
+ usb_set_intfdata(pusb_interface, NULL);
+ return -ENODEV;
+ } else
+ (peasycap->registered_video)++;
+ SAY("easycap attached to minor #%d\n", pusb_interface->minor);
+ break;
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#else
+ pvideo_device = (struct video_device *)\
+ kzalloc(sizeof(struct video_device), GFP_KERNEL);
+ if ((struct video_device *)NULL == pvideo_device) {
+ SAY("ERROR: Could not allocate structure video_device\n");
+ return -ENOMEM;
+ }
+ if (VIDEO_DEVICE_MANY <= video_device_many) {
+ SAY("ERROR: Too many /dev/videos\n");
+ return -ENOMEM;
+ }
+ pvideo_array[video_device_many] = pvideo_device; video_device_many++;
+
+ strcpy(&pvideo_device->name[0], "easycapdc60");
+#if defined(EASYCAP_NEEDS_V4L2_FOPS)
+ pvideo_device->fops = &v4l2_fops;
+#else
+ pvideo_device->fops = &easycap_fops;
+#endif /*EASYCAP_NEEDS_V4L2_FOPS*/
+ pvideo_device->minor = -1;
+ pvideo_device->release = (void *)(&videodev_release);
+
+ video_set_drvdata(pvideo_device, (void *)peasycap);
+
+ rc = video_register_device(pvideo_device, VFL_TYPE_GRABBER, -1);
+ if (0 != rc) {
+ err("Not able to register with videodev");
+ videodev_release(pvideo_device);
+ return -ENODEV;
+ } else {
+ peasycap->pvideo_device = pvideo_device;
+ (peasycap->registered_video)++;
+ JOT(4, "registered with videodev: %i=minor\n", \
+ pvideo_device->minor);
+ }
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+ break;
+}
+/*--------------------------------------------------------------------------*/
+/*
+ * INTERFACE 1 IS THE AUDIO CONTROL INTERFACE
+ * INTERFACE 2 IS THE AUDIO STREAMING INTERFACE
+ */
+/*--------------------------------------------------------------------------*/
+case 1: {
+/*--------------------------------------------------------------------------*/
+/*
+ * SAVE POINTER peasycap IN INTERFACE 1
+ */
+/*--------------------------------------------------------------------------*/
+ usb_set_intfdata(pusb_interface, peasycap);
+ JOT(4, "no initialization required for interface %i\n", \
+ pusb_interface_descriptor->bInterfaceNumber);
+ break;
+}
+/*--------------------------------------------------------------------------*/
+case 2: {
+ if (!peasycap) {
+ SAY("MISTAKE: peasycap is NULL\n");
+ return -EFAULT;
+ }
+ if (!isokalt) {
+ SAY("ERROR: no viable audio_altsetting_on\n");
+ return -ENOENT;
+ } else {
+ peasycap->audio_altsetting_on = okalt[isokalt - 1];
+ JOT(4, "%i=audio_altsetting_on <====\n", \
+ peasycap->audio_altsetting_on);
+ }
+ if (!isokepn) {
+ SAY("ERROR: no viable audio_endpointnumber\n");
+ return -ENOENT;
+ } else {
+ peasycap->audio_endpointnumber = okepn[isokepn - 1];
+ JOT(4, "%i=audio_endpointnumber\n", \
+ peasycap->audio_endpointnumber);
+ }
+ if (!isokmps) {
+ SAY("ERROR: no viable audio_maxpacketsize\n");
+ return -ENOENT;
+ } else {
+ peasycap->audio_isoc_maxframesize = okmps[isokmps - 1];
+ JOT(4, "%i=audio_isoc_maxframesize\n", \
+ peasycap->audio_isoc_maxframesize);
+ if (0 >= peasycap->audio_isoc_maxframesize) {
+ SAY("ERROR: bad audio_isoc_maxframesize\n");
+ return -ENOENT;
+ }
+ if (9 == peasycap->audio_isoc_maxframesize) {
+ peasycap->ilk |= 0x02;
+ SAY("hardware is FOUR-CVBS\n");
+ peasycap->microphone = true;
+ peasycap->audio_pages_per_fragment = 4;
+ } else if (256 == peasycap->audio_isoc_maxframesize) {
+ peasycap->ilk &= ~0x02;
+ SAY("hardware is CVBS+S-VIDEO\n");
+ peasycap->microphone = false;
+ peasycap->audio_pages_per_fragment = 4;
+ } else {
+ SAY("hardware is unidentified:\n");
+ SAY("%i=audio_isoc_maxframesize\n", \
+ peasycap->audio_isoc_maxframesize);
+ return -ENOENT;
+ }
+
+ peasycap->audio_bytes_per_fragment = \
+ peasycap->audio_pages_per_fragment * \
+ PAGE_SIZE ;
+ peasycap->audio_buffer_page_many = (AUDIO_FRAGMENT_MANY * \
+ peasycap->audio_pages_per_fragment);
+
+ JOT(4, "%6i=AUDIO_FRAGMENT_MANY\n", AUDIO_FRAGMENT_MANY);
+ JOT(4, "%6i=audio_pages_per_fragment\n", \
+ peasycap->audio_pages_per_fragment);
+ JOT(4, "%6i=audio_bytes_per_fragment\n", \
+ peasycap->audio_bytes_per_fragment);
+ JOT(4, "%6i=audio_buffer_page_many\n", \
+ peasycap->audio_buffer_page_many);
+
+ peasycap->audio_isoc_framesperdesc = 128;
+
+ JOT(4, "%i=audio_isoc_framesperdesc\n", \
+ peasycap->audio_isoc_framesperdesc);
+ if (0 >= peasycap->audio_isoc_framesperdesc) {
+ SAY("ERROR: bad audio_isoc_framesperdesc\n");
+ return -ENOENT;
+ }
+
+ peasycap->audio_isoc_buffer_size = \
+ peasycap->audio_isoc_maxframesize * \
+ peasycap->audio_isoc_framesperdesc;
+ JOT(4, "%i=audio_isoc_buffer_size\n", \
+ peasycap->audio_isoc_buffer_size);
+ if (AUDIO_ISOC_BUFFER_SIZE < \
+ peasycap->audio_isoc_buffer_size) {
+ SAY("MISTAKE: audio_isoc_buffer_size bigger "
+ "than %li=AUDIO_ISOC_BUFFER_SIZE\n", \
+ AUDIO_ISOC_BUFFER_SIZE);
+ return -EFAULT;
+ }
+ }
+
+ if (-1 == peasycap->audio_interface) {
+ SAY("MISTAKE: audio_interface is unset\n");
+ return -EFAULT;
+ }
+ if (-1 == peasycap->audio_altsetting_on) {
+ SAY("MISTAKE: audio_altsetting_on is unset\n");
+ return -EFAULT;
+ }
+ if (-1 == peasycap->audio_altsetting_off) {
+ SAY("MISTAKE: audio_interface_off is unset\n");
+ return -EFAULT;
+ }
+ if (-1 == peasycap->audio_endpointnumber) {
+ SAY("MISTAKE: audio_endpointnumber is unset\n");
+ return -EFAULT;
+ }
+ if (-1 == peasycap->audio_isoc_maxframesize) {
+ SAY("MISTAKE: audio_isoc_maxframesize is unset\n");
+ return -EFAULT;
+ }
+ if (-1 == peasycap->audio_isoc_buffer_size) {
+ SAY("MISTAKE: audio_isoc_buffer_size is unset\n");
+ return -EFAULT;
+ }
+/*---------------------------------------------------------------------------*/
+/*
+ * ALLOCATE MEMORY FOR AUDIO BUFFERS. LISTS MUST BE INITIALIZED FIRST.
+ */
+/*---------------------------------------------------------------------------*/
+ INIT_LIST_HEAD(&(peasycap->urb_audio_head));
+ peasycap->purb_audio_head = &(peasycap->urb_audio_head);
+
+ JOT(4, "allocating an audio buffer\n");
+ JOT(4, ".... scattered over %i pages\n", \
+ peasycap->audio_buffer_page_many);
+
+ for (k = 0; k < peasycap->audio_buffer_page_many; k++) {
+ if ((void *)NULL != peasycap->audio_buffer[k].pgo) {
+ SAY("ERROR: attempting to reallocate audio buffers\n");
+ } else {
+ pbuf = (void *) __get_free_page(GFP_KERNEL);
+ if ((void *)NULL == pbuf) {
+ SAY("ERROR: Could not allocate audio " \
+ "buffer page %i\n", k);
+ return -ENOMEM;
+ } else
+ peasycap->allocation_audio_page += 1;
+
+ peasycap->audio_buffer[k].pgo = pbuf;
+ }
+ peasycap->audio_buffer[k].pto = peasycap->audio_buffer[k].pgo;
+ }
+
+ peasycap->audio_fill = 0;
+ peasycap->audio_read = 0;
+ JOT(4, "allocation of audio buffer done: %i pages\n", k);
+/*---------------------------------------------------------------------------*/
+ JOT(4, "allocating %i isoc audio buffers of size %i\n", \
+ AUDIO_ISOC_BUFFER_MANY, peasycap->audio_isoc_buffer_size);
+ JOT(4, ".... each occupying contiguous memory pages\n");
+
+ for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
+ pbuf = (void *)__get_free_pages(GFP_KERNEL, AUDIO_ISOC_ORDER);
+ if (NULL == pbuf) {
+ SAY("ERROR: Could not allocate isoc audio buffer " \
+ "%i\n", k);
+ return -ENOMEM;
+ } else
+ peasycap->allocation_audio_page += \
+ ((unsigned int)(0x01 << AUDIO_ISOC_ORDER));
+
+ peasycap->audio_isoc_buffer[k].pgo = pbuf;
+ peasycap->audio_isoc_buffer[k].pto = pbuf + \
+ peasycap->audio_isoc_buffer_size;
+ peasycap->audio_isoc_buffer[k].kount = k;
+ }
+ JOT(4, "allocation of isoc audio buffers done.\n");
+/*---------------------------------------------------------------------------*/
+/*
+ * ALLOCATE AND INITIALIZE MULTIPLE struct urb ...
+ */
+/*---------------------------------------------------------------------------*/
+ JOT(4, "allocating %i struct urb.\n", AUDIO_ISOC_BUFFER_MANY);
+ JOT(4, "using %i=peasycap->audio_isoc_framesperdesc\n", \
+ peasycap->audio_isoc_framesperdesc);
+ JOT(4, "using %i=peasycap->audio_isoc_maxframesize\n", \
+ peasycap->audio_isoc_maxframesize);
+ JOT(4, "using %i=peasycap->audio_isoc_buffer_size\n", \
+ peasycap->audio_isoc_buffer_size);
+
+ for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
+ purb = usb_alloc_urb(peasycap->audio_isoc_framesperdesc, \
+ GFP_KERNEL);
+ if (NULL == purb) {
+ SAY("ERROR: usb_alloc_urb returned NULL for buffer " \
+ "%i\n", k);
+ return -ENOMEM;
+ } else
+ peasycap->allocation_audio_urb += 1 ;
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+ pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
+ if (NULL == pdata_urb) {
+ SAY("ERROR: Could not allocate struct data_urb.\n");
+ return -ENOMEM;
+ } else
+ peasycap->allocation_audio_struct += \
+ sizeof(struct data_urb);
+
+ pdata_urb->purb = purb;
+ pdata_urb->isbuf = k;
+ pdata_urb->length = 0;
+ list_add_tail(&(pdata_urb->list_head), \
+ peasycap->purb_audio_head);
+/*---------------------------------------------------------------------------*/
+/*
+ * ... AND INITIALIZE THEM
+ */
+/*---------------------------------------------------------------------------*/
+ if (!k) {
+ JOT(4, "initializing audio urbs thus:\n");
+ JOT(4, " purb->interval = 1;\n");
+ JOT(4, " purb->dev = peasycap->pusb_device;\n");
+ JOT(4, " purb->pipe = usb_rcvisocpipe(peasycap->" \
+ "pusb_device,%i);\n", \
+ peasycap->audio_endpointnumber);
+ JOT(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
+ JOT(4, " purb->transfer_buffer = " \
+ "peasycap->audio_isoc_buffer[.].pgo;\n");
+ JOT(4, " purb->transfer_buffer_length = %i;\n", \
+ peasycap->audio_isoc_buffer_size);
+ JOT(4, " purb->complete = easysnd_complete;\n");
+ JOT(4, " purb->context = peasycap;\n");
+ JOT(4, " purb->start_frame = 0;\n");
+ JOT(4, " purb->number_of_packets = %i;\n", \
+ peasycap->audio_isoc_framesperdesc);
+ JOT(4, " for (j = 0; j < %i; j++)\n", \
+ peasycap->audio_isoc_framesperdesc);
+ JOT(4, " {\n");
+ JOT(4, " purb->iso_frame_desc[j].offset = j*%i;\n",\
+ peasycap->audio_isoc_maxframesize);
+ JOT(4, " purb->iso_frame_desc[j].length = %i;\n", \
+ peasycap->audio_isoc_maxframesize);
+ JOT(4, " }\n");
+ }
+
+ purb->interval = 1;
+ purb->dev = peasycap->pusb_device;
+ purb->pipe = usb_rcvisocpipe(peasycap->pusb_device, \
+ peasycap->audio_endpointnumber);
+ purb->transfer_flags = URB_ISO_ASAP;
+ purb->transfer_buffer = peasycap->audio_isoc_buffer[k].pgo;
+ purb->transfer_buffer_length = \
+ peasycap->audio_isoc_buffer_size;
+ purb->complete = easysnd_complete;
+ purb->context = peasycap;
+ purb->start_frame = 0;
+ purb->number_of_packets = peasycap->audio_isoc_framesperdesc;
+ for (j = 0; j < peasycap->audio_isoc_framesperdesc; j++) {
+ purb->iso_frame_desc[j].offset = j * \
+ peasycap->audio_isoc_maxframesize;
+ purb->iso_frame_desc[j].length = \
+ peasycap->audio_isoc_maxframesize;
+ }
+ }
+ JOT(4, "allocation of %i struct urb done.\n", k);
+/*---------------------------------------------------------------------------*/
+/*
+ * SAVE POINTER peasycap IN THIS INTERFACE.
+ */
+/*---------------------------------------------------------------------------*/
+ usb_set_intfdata(pusb_interface, peasycap);
+/*---------------------------------------------------------------------------*/
+/*
+ * THE AUDIO DEVICE CAN BE REGISTERED NOW, AS IT IS READY.
+ */
+/*---------------------------------------------------------------------------*/
+ rc = usb_register_dev(pusb_interface, &easysnd_class);
+ if (0 != rc) {
+ err("Not able to get a minor for this device.");
+ usb_set_intfdata(pusb_interface, NULL);
+ return -ENODEV;
+ } else
+ (peasycap->registered_audio)++;
+/*---------------------------------------------------------------------------*/
+/*
+ * LET THE USER KNOW WHAT NODE THE AUDIO DEVICE IS ATTACHED TO.
+ */
+/*---------------------------------------------------------------------------*/
+ SAY("easysnd attached to minor #%d\n", pusb_interface->minor);
+ break;
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * INTERFACES OTHER THAN 0, 1 AND 2 ARE UNEXPECTED
+ */
+/*---------------------------------------------------------------------------*/
+default: {
+ JOT(4, "ERROR: unexpected interface %i\n", bInterfaceNumber);
+ return -EINVAL;
+}
+}
+JOT(4, "ends successfully for interface %i\n", \
+ pusb_interface_descriptor->bInterfaceNumber);
+return 0;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * WHEN THIS FUNCTION IS CALLED THE DEVICE HAS ALREADY BEEN PHYSICALLY
+ * UNPLUGGED.
+ * HENCE peasycap->pusb_device IS NO LONGER VALID AND MUST BE SET TO NULL.
+ */
+/*---------------------------------------------------------------------------*/
+void
+easycap_usb_disconnect(struct usb_interface *pusb_interface)
+{
+struct usb_host_interface *pusb_host_interface;
+struct usb_interface_descriptor *pusb_interface_descriptor;
+__u8 bInterfaceNumber;
+struct easycap *peasycap;
+
+struct list_head *plist_head;
+struct data_urb *pdata_urb;
+int minor, m;
+
+JOT(4, "\n");
+
+if ((struct usb_interface *)NULL == pusb_interface) {
+ JOT(4, "ERROR: pusb_interface is NULL\n");
+ return;
+}
+pusb_host_interface = pusb_interface->cur_altsetting;
+if ((struct usb_host_interface *)NULL == pusb_host_interface) {
+ JOT(4, "ERROR: pusb_host_interface is NULL\n");
+ return;
+}
+pusb_interface_descriptor = &(pusb_host_interface->desc);
+if ((struct usb_interface_descriptor *)NULL == pusb_interface_descriptor) {
+ JOT(4, "ERROR: pusb_interface_descriptor is NULL\n");
+ return;
+}
+bInterfaceNumber = pusb_interface_descriptor->bInterfaceNumber;
+minor = pusb_interface->minor;
+JOT(4, "intf[%i]: minor=%i\n", bInterfaceNumber, minor);
+
+peasycap = usb_get_intfdata(pusb_interface);
+if ((struct easycap *)NULL == peasycap)
+ SAY("ERROR: peasycap is NULL\n");
+else {
+ peasycap->pusb_device = (struct usb_device *)NULL;
+ switch (bInterfaceNumber) {
+/*---------------------------------------------------------------------------*/
+ case 0: {
+ if ((struct list_head *)NULL != peasycap->purb_video_head) {
+ JOT(4, "killing video urbs\n");
+ m = 0;
+ list_for_each(plist_head, (peasycap->purb_video_head))
+ {
+ pdata_urb = list_entry(plist_head, \
+ struct data_urb, list_head);
+ if ((struct data_urb *)NULL != pdata_urb) {
+ if ((struct urb *)NULL != \
+ pdata_urb->purb) {
+ usb_kill_urb(pdata_urb->purb);
+ m++;
+ }
+ }
+ }
+ JOT(4, "%i video urbs killed\n", m);
+ } else
+ SAY("ERROR: peasycap->purb_video_head is NULL\n");
+ break;
+ }
+/*---------------------------------------------------------------------------*/
+ case 2: {
+ if ((struct list_head *)NULL != peasycap->purb_audio_head) {
+ JOT(4, "killing audio urbs\n");
+ m = 0;
+ list_for_each(plist_head, \
+ (peasycap->purb_audio_head)) {
+ pdata_urb = list_entry(plist_head, \
+ struct data_urb, list_head);
+ if ((struct data_urb *)NULL != pdata_urb) {
+ if ((struct urb *)NULL != \
+ pdata_urb->purb) {
+ usb_kill_urb(pdata_urb->purb);
+ m++;
+ }
+ }
+ }
+ JOT(4, "%i audio urbs killed\n", m);
+ } else
+ SAY("ERROR: peasycap->purb_audio_head is NULL\n");
+ break;
+ }
+/*---------------------------------------------------------------------------*/
+ default:
+ break;
+ }
+}
+/*--------------------------------------------------------------------------*/
+/*
+ * DEREGISTER
+ */
+/*--------------------------------------------------------------------------*/
+switch (bInterfaceNumber) {
+case 0: {
+#if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
+ if ((struct easycap *)NULL == peasycap) {
+ SAY("ERROR: peasycap has become NULL\n");
+ } else {
+ lock_kernel();
+ usb_deregister_dev(pusb_interface, &easycap_class);
+ (peasycap->registered_video)--;
+
+ JOT(4, "intf[%i]: usb_deregister_dev()\n", bInterfaceNumber);
+ unlock_kernel();
+ SAY("easycap detached from minor #%d\n", minor);
+ }
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#else
+ if ((struct easycap *)NULL == peasycap)
+ SAY("ERROR: peasycap has become NULL\n");
+ else {
+ lock_kernel();
+ video_unregister_device(peasycap->pvideo_device);
+ (peasycap->registered_video)--;
+ unlock_kernel();
+ JOT(4, "unregistered with videodev: %i=minor\n", \
+ pvideo_device->minor);
+ }
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+ break;
+}
+case 2: {
+ lock_kernel();
+
+ usb_deregister_dev(pusb_interface, &easysnd_class);
+ if ((struct easycap *)NULL != peasycap)
+ (peasycap->registered_audio)--;
+
+ JOT(4, "intf[%i]: usb_deregister_dev()\n", bInterfaceNumber);
+ unlock_kernel();
+
+ SAY("easysnd detached from minor #%d\n", minor);
+ break;
+}
+default:
+ break;
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * CALL easycap_delete() IF NO REMAINING REFERENCES TO peasycap
+ */
+/*---------------------------------------------------------------------------*/
+if ((struct easycap *)NULL == peasycap) {
+ SAY("ERROR: peasycap has become NULL\n");
+ SAY("cannot call kref_put()\n");
+ SAY("ending unsuccessfully: may cause memory leak\n");
+ return;
+}
+if (!peasycap->kref.refcount.counter) {
+ SAY("ERROR: peasycap->kref.refcount.counter is zero " \
+ "so cannot call kref_put()\n");
+ SAY("ending unsuccessfully: may cause memory leak\n");
+ return;
+}
+JOT(4, "intf[%i]: kref_put() with %i=peasycap->kref.refcount.counter\n", \
+ bInterfaceNumber, (int)peasycap->kref.refcount.counter);
+kref_put(&peasycap->kref, easycap_delete);
+JOT(4, "intf[%i]: kref_put() done.\n", bInterfaceNumber);
+/*---------------------------------------------------------------------------*/
+
+JOT(4, "ends\n");
+return;
+}
+/*****************************************************************************/
+int __init
+easycap_module_init(void)
+{
+int result;
+
+SAY("========easycap=======\n");
+JOT(4, "begins. %i=debug\n", easycap_debug);
+SAY("version: " EASYCAP_DRIVER_VERSION "\n");
+/*---------------------------------------------------------------------------*/
+/*
+ * REGISTER THIS DRIVER WITH THE USB SUBSYTEM.
+ */
+/*---------------------------------------------------------------------------*/
+JOT(4, "registering driver easycap\n");
+
+result = usb_register(&easycap_usb_driver);
+if (0 != result)
+ SAY("ERROR: usb_register returned %i\n", result);
+
+JOT(4, "ends\n");
+return result;
+}
+/*****************************************************************************/
+void __exit
+easycap_module_exit(void)
+{
+JOT(4, "begins\n");
+
+/*---------------------------------------------------------------------------*/
+/*
+ * DEREGISTER THIS DRIVER WITH THE USB SUBSYTEM.
+ */
+/*---------------------------------------------------------------------------*/
+usb_deregister(&easycap_usb_driver);
+
+JOT(4, "ends\n");
+}
+/*****************************************************************************/
+
+module_init(easycap_module_init);
+module_exit(easycap_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("R.M. Thomas <rmthomas@sciolus.org>");
+MODULE_DESCRIPTION(EASYCAP_DRIVER_DESCRIPTION);
+MODULE_VERSION(EASYCAP_DRIVER_VERSION);
+#if defined(EASYCAP_DEBUG)
+MODULE_PARM_DESC(easycap_debug, "debug: 0 (default), 1, 2,...");
+#endif /*EASYCAP_DEBUG*/
+/*****************************************************************************/
diff --git a/drivers/staging/easycap/easycap_settings.c b/drivers/staging/easycap/easycap_settings.c
new file mode 100644
index 00000000000..38d94051241
--- /dev/null
+++ b/drivers/staging/easycap/easycap_settings.c
@@ -0,0 +1,489 @@
+/******************************************************************************
+* *
+* easycap_settings.c *
+* *
+******************************************************************************/
+/*
+ *
+ * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
+ *
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+/*****************************************************************************/
+
+#include "easycap.h"
+#include "easycap_debug.h"
+
+/*---------------------------------------------------------------------------*/
+/*
+ * THE LEAST SIGNIFICANT BIT OF easycap_standard.mask HAS MEANING:
+ * 0 => 25 fps
+ * 1 => 30 fps
+ */
+/*---------------------------------------------------------------------------*/
+const struct easycap_standard easycap_standard[] = {
+{
+.mask = 0x000F & PAL_BGHIN ,
+.v4l2_standard = {
+ .index = PAL_BGHIN,
+ .id = (V4L2_STD_PAL_B | V4L2_STD_PAL_G | V4L2_STD_PAL_H | \
+ V4L2_STD_PAL_I | V4L2_STD_PAL_N),
+ .name = "PAL_BGHIN",
+ .frameperiod = {1, 25},
+ .framelines = 625,
+ .reserved = {0, 0, 0, 0}
+ }
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x000F & NTSC_N_443 ,
+.v4l2_standard = {
+ .index = NTSC_N_443,
+ .id = V4L2_STD_UNKNOWN,
+ .name = "NTSC_N_443",
+ .frameperiod = {1, 25},
+ .framelines = 480,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x000F & PAL_Nc ,
+.v4l2_standard = {
+ .index = PAL_Nc,
+ .id = V4L2_STD_PAL_Nc,
+ .name = "PAL_Nc",
+ .frameperiod = {1, 25},
+ .framelines = 625,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x000F & NTSC_N ,
+.v4l2_standard = {
+ .index = NTSC_N,
+ .id = V4L2_STD_UNKNOWN,
+ .name = "NTSC_N",
+ .frameperiod = {1, 25},
+ .framelines = 525,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x000F & SECAM ,
+.v4l2_standard = {
+ .index = SECAM,
+ .id = V4L2_STD_SECAM,
+ .name = "SECAM",
+ .frameperiod = {1, 25},
+ .framelines = 625,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x000F & NTSC_M ,
+.v4l2_standard = {
+ .index = NTSC_M,
+ .id = V4L2_STD_NTSC_M,
+ .name = "NTSC_M",
+ .frameperiod = {1, 30},
+ .framelines = 525,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x000F & NTSC_M_JP ,
+.v4l2_standard = {
+ .index = NTSC_M_JP,
+ .id = V4L2_STD_NTSC_M_JP,
+ .name = "NTSC_M_JP",
+ .frameperiod = {1, 30},
+ .framelines = 525,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x000F & PAL_60 ,
+.v4l2_standard = {
+ .index = PAL_60,
+ .id = V4L2_STD_PAL_60,
+ .name = "PAL_60",
+ .frameperiod = {1, 30},
+ .framelines = 525,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x000F & NTSC_443 ,
+.v4l2_standard = {
+ .index = NTSC_443,
+ .id = V4L2_STD_NTSC_443,
+ .name = "NTSC_443",
+ .frameperiod = {1, 30},
+ .framelines = 525,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x000F & PAL_M ,
+.v4l2_standard = {
+ .index = PAL_M,
+ .id = V4L2_STD_PAL_M,
+ .name = "PAL_M",
+ .frameperiod = {1, 30},
+ .framelines = 525,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0xFFFF
+}
+};
+/*---------------------------------------------------------------------------*/
+/*
+ * THE 16-BIT easycap_format.mask HAS MEANING:
+ * (least significant) BIT 0: 0 => PAL, 25 FPS; 1 => NTSC, 30 FPS
+ * BITS 1-3: RESERVED FOR DIFFERENTIATING STANDARDS
+ * BITS 4-7: NUMBER OF BYTES PER PIXEL
+ * BIT 8: 0 => NATIVE BYTE ORDER; 1 => SWAPPED
+ * BITS 9-10: RESERVED FOR OTHER BYTE PERMUTATIONS
+ * BIT 11: 0 => UNDECIMATED; 1 => DECIMATED
+ * BIT 12: 0 => OFFER FRAMES; 1 => OFFER FIELDS
+ * (most significant) BITS 13-15: RESERVED FOR OTHER FIELD ORDER OPTIONS
+ * IT FOLLOWS THAT:
+ * bytesperpixel IS ((0x00F0 & easycap_format.mask) >> 4)
+ * byteswaporder IS true IF (0 != (0x0100 & easycap_format.mask))
+ *
+ * decimatepixel IS true IF (0 != (0x0800 & easycap_format.mask))
+ *
+ * offerfields IS true IF (0 != (0x1000 & easycap_format.mask))
+ */
+/*---------------------------------------------------------------------------*/
+
+struct easycap_format easycap_format[1 + SETTINGS_MANY];
+
+int
+fillin_formats(void)
+{
+int i, j, k, m, n;
+__u32 width, height, pixelformat, bytesperline, sizeimage;
+__u32 field, colorspace;
+__u16 mask1, mask2, mask3, mask4;
+char name1[32], name2[32], name3[32], name4[32];
+
+for (i = 0, n = 0; i < STANDARD_MANY; i++) {
+ mask1 = 0x0000;
+ switch (i) {
+ case PAL_BGHIN: {
+ mask1 = PAL_BGHIN;
+ strcpy(&name1[0], "PAL_BGHIN");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+ break;
+ }
+ case SECAM: {
+ mask1 = SECAM;
+ strcpy(&name1[0], "SECAM");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+ break;
+ }
+ case PAL_Nc: {
+ mask1 = PAL_Nc;
+ strcpy(&name1[0], "PAL_Nc");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+ break;
+ }
+ case PAL_60: {
+ mask1 = PAL_60;
+ strcpy(&name1[0], "PAL_60");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+ break;
+ }
+ case PAL_M: {
+ mask1 = PAL_M;
+ strcpy(&name1[0], "PAL_M");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+ break;
+ }
+ case NTSC_M: {
+ mask1 = NTSC_M;
+ strcpy(&name1[0], "NTSC_M");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+ break;
+ }
+ case NTSC_443: {
+ mask1 = NTSC_443;
+ strcpy(&name1[0], "NTSC_443");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+ break;
+ }
+ case NTSC_M_JP: {
+ mask1 = NTSC_M_JP;
+ strcpy(&name1[0], "NTSC_M_JP");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+ break;
+ }
+ case NTSC_N: {
+ mask1 = NTSC_M;
+ strcpy(&name1[0], "NTSC_N");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+ break;
+ }
+ case NTSC_N_443: {
+ mask1 = NTSC_N_443;
+ strcpy(&name1[0], "NTSC_N_443");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ for (j = 0; j < RESOLUTION_MANY; j++) {
+ mask2 = 0x0000;
+ switch (j) {
+ case AT_720x576: {
+ if (0x1 & mask1)
+ continue;
+ strcpy(&name2[0], "_AT_720x576");
+ width = 720; height = 576; break;
+ }
+ case AT_704x576: {
+ if (0x1 & mask1)
+ continue;
+ strcpy(&name2[0], "_AT_704x576");
+ width = 704; height = 576; break;
+ }
+ case AT_640x480: {
+ strcpy(&name2[0], "_AT_640x480");
+ width = 640; height = 480; break;
+ }
+ case AT_720x480: {
+ if (!(0x1 & mask1))
+ continue;
+ strcpy(&name2[0], "_AT_720x480");
+ width = 720; height = 480; break;
+ }
+ case AT_360x288: {
+ if (0x1 & mask1)
+ continue;
+ strcpy(&name2[0], "_AT_360x288");
+ width = 360; height = 288; mask2 = 0x0800; break;
+ }
+ case AT_320x240: {
+ strcpy(&name2[0], "_AT_320x240");
+ width = 320; height = 240; mask2 = 0x0800; break;
+ }
+ case AT_360x240: {
+ if (!(0x1 & mask1))
+ continue;
+ strcpy(&name2[0], "_AT_360x240");
+ width = 360; height = 240; mask2 = 0x0800; break;
+ }
+ default:
+ return -2;
+ }
+
+ for (k = 0; k < PIXELFORMAT_MANY; k++) {
+ mask3 = 0x0000;
+ switch (k) {
+ case FMT_UYVY: {
+ strcpy(&name3[0], "_" STRINGIZE(FMT_UYVY));
+ pixelformat = V4L2_PIX_FMT_UYVY;
+ mask3 |= (0x02 << 4);
+ break;
+ }
+ case FMT_YUY2: {
+ strcpy(&name3[0], "_" STRINGIZE(FMT_YUY2));
+ pixelformat = V4L2_PIX_FMT_YUYV;
+ mask3 |= (0x02 << 4);
+ mask3 |= 0x0100;
+ break;
+ }
+ case FMT_RGB24: {
+ strcpy(&name3[0], "_" STRINGIZE(FMT_RGB24));
+ pixelformat = V4L2_PIX_FMT_RGB24;
+ mask3 |= (0x03 << 4);
+ break;
+ }
+ case FMT_RGB32: {
+ strcpy(&name3[0], "_" STRINGIZE(FMT_RGB32));
+ pixelformat = V4L2_PIX_FMT_RGB32;
+ mask3 |= (0x04 << 4);
+ break;
+ }
+ case FMT_BGR24: {
+ strcpy(&name3[0], "_" STRINGIZE(FMT_BGR24));
+ pixelformat = V4L2_PIX_FMT_BGR24;
+ mask3 |= (0x03 << 4);
+ mask3 |= 0x0100;
+ break;
+ }
+ case FMT_BGR32: {
+ strcpy(&name3[0], "_" STRINGIZE(FMT_BGR32));
+ pixelformat = V4L2_PIX_FMT_BGR32;
+ mask3 |= (0x04 << 4);
+ mask3 |= 0x0100;
+ break;
+ }
+ default:
+ return -3;
+ }
+ bytesperline = width * ((mask3 & 0x00F0) >> 4);
+ sizeimage = bytesperline * height;
+
+ for (m = 0; m < INTERLACE_MANY; m++) {
+ mask4 = 0x0000;
+ switch (m) {
+ case FIELD_NONE: {
+ strcpy(&name4[0], "-n");
+ field = V4L2_FIELD_NONE;
+ break;
+ }
+ case FIELD_INTERLACED: {
+ strcpy(&name4[0], "-i");
+ field = V4L2_FIELD_INTERLACED;
+ break;
+ }
+ case FIELD_ALTERNATE: {
+ strcpy(&name4[0], "-a");
+ mask4 |= 0x1000;
+ field = V4L2_FIELD_ALTERNATE;
+ break;
+ }
+ default:
+ return -4;
+ }
+ if (SETTINGS_MANY <= n)
+ return -5;
+ strcpy(&easycap_format[n].name[0], &name1[0]);
+ strcat(&easycap_format[n].name[0], &name2[0]);
+ strcat(&easycap_format[n].name[0], &name3[0]);
+ strcat(&easycap_format[n].name[0], &name4[0]);
+ easycap_format[n].mask = \
+ mask1 | mask2 | mask3 | mask4;
+ easycap_format[n].v4l2_format\
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ easycap_format[n].v4l2_format\
+ .fmt.pix.width = width;
+ easycap_format[n].v4l2_format\
+ .fmt.pix.height = height;
+ easycap_format[n].v4l2_format\
+ .fmt.pix.pixelformat = pixelformat;
+ easycap_format[n].v4l2_format\
+ .fmt.pix.field = field;
+ easycap_format[n].v4l2_format\
+ .fmt.pix.bytesperline = bytesperline;
+ easycap_format[n].v4l2_format\
+ .fmt.pix.sizeimage = sizeimage;
+ easycap_format[n].v4l2_format\
+ .fmt.pix.colorspace = colorspace;
+ easycap_format[n].v4l2_format\
+ .fmt.pix.priv = 0;
+ n++;
+ }
+ }
+ }
+}
+if ((1 + SETTINGS_MANY) <= n)
+ return -6;
+easycap_format[n].mask = 0xFFFF;
+return n;
+}
+/*---------------------------------------------------------------------------*/
+struct v4l2_queryctrl easycap_control[] = \
+ {{
+.id = V4L2_CID_BRIGHTNESS,
+.type = V4L2_CTRL_TYPE_INTEGER,
+.name = "Brightness",
+.minimum = 0,
+.maximum = 255,
+.step = 1,
+.default_value = SAA_0A_DEFAULT,
+.flags = 0,
+.reserved = {0, 0}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.id = V4L2_CID_CONTRAST,
+.type = V4L2_CTRL_TYPE_INTEGER,
+.name = "Contrast",
+.minimum = 0,
+.maximum = 255,
+.step = 1,
+.default_value = SAA_0B_DEFAULT + 128,
+.flags = 0,
+.reserved = {0, 0}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.id = V4L2_CID_SATURATION,
+.type = V4L2_CTRL_TYPE_INTEGER,
+.name = "Saturation",
+.minimum = 0,
+.maximum = 255,
+.step = 1,
+.default_value = SAA_0C_DEFAULT + 128,
+.flags = 0,
+.reserved = {0, 0}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.id = V4L2_CID_HUE,
+.type = V4L2_CTRL_TYPE_INTEGER,
+.name = "Hue",
+.minimum = 0,
+.maximum = 255,
+.step = 1,
+.default_value = SAA_0D_DEFAULT + 128,
+.flags = 0,
+.reserved = {0, 0}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.id = V4L2_CID_AUDIO_VOLUME,
+.type = V4L2_CTRL_TYPE_INTEGER,
+.name = "Volume",
+.minimum = 0,
+.maximum = 31,
+.step = 1,
+.default_value = 16,
+.flags = 0,
+.reserved = {0, 0}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.id = V4L2_CID_AUDIO_MUTE,
+.type = V4L2_CTRL_TYPE_BOOLEAN,
+.name = "Mute",
+.default_value = true,
+.flags = 0,
+.reserved = {0, 0}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.id = 0xFFFFFFFF
+}
+ };
+/*****************************************************************************/
diff --git a/drivers/staging/easycap/easycap_sound.c b/drivers/staging/easycap/easycap_sound.c
new file mode 100644
index 00000000000..63562bda738
--- /dev/null
+++ b/drivers/staging/easycap/easycap_sound.c
@@ -0,0 +1,1046 @@
+/******************************************************************************
+* *
+* easycap_sound.c *
+* *
+* Audio driver for EasyCAP USB2.0 Video Capture Device DC60 *
+* *
+* *
+******************************************************************************/
+/*
+ *
+ * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
+ *
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+/*****************************************************************************/
+
+#include "easycap.h"
+#include "easycap_debug.h"
+#include "easycap_sound.h"
+
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * ON COMPLETION OF AN AUDIO URB ITS DATA IS COPIED TO THE AUDIO BUFFERS
+ * PROVIDED peasycap->audio_idle IS ZER0. REGARDLESS OF THIS BEING TRUE,
+ * IT IS RESUBMITTED PROVIDED peasycap->audio_isoc_streaming IS NOT ZERO.
+ */
+/*---------------------------------------------------------------------------*/
+void
+easysnd_complete(struct urb *purb)
+{
+static int mt;
+struct easycap *peasycap;
+struct data_buffer *paudio_buffer;
+char errbuf[16];
+__u8 *p1, *p2;
+__s16 s16;
+int i, j, more, much, leap, rc;
+#if defined(UPSAMPLE)
+int k;
+__s16 oldaudio, newaudio, delta;
+#endif /*UPSAMPLE*/
+
+JOT(16, "\n");
+
+if (NULL == purb) {
+ SAY("ERROR: purb is NULL\n");
+ return;
+}
+peasycap = purb->context;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return;
+}
+much = 0;
+
+
+if (peasycap->audio_idle) {
+ JOT(16, "%i=audio_idle %i=audio_isoc_streaming\n", \
+ peasycap->audio_idle, peasycap->audio_isoc_streaming);
+ if (peasycap->audio_isoc_streaming) {
+ rc = usb_submit_urb(purb, GFP_ATOMIC);
+ if (0 != rc) {
+ SAY("ERROR: while %i=audio_idle, " \
+ "usb_submit_urb() failed with rc:\n", \
+ peasycap->audio_idle);
+ switch (rc) {
+ case -ENOMEM: {
+ SAY("ENOMEM\n"); break;
+ }
+ case -ENODEV: {
+ SAY("ENODEV\n"); break;
+ }
+ case -ENXIO: {
+ SAY("ENXIO\n"); break;
+ }
+ case -EINVAL: {
+ SAY("EINVAL\n"); break;
+ }
+ case -EAGAIN: {
+ SAY("EAGAIN\n"); break;
+ }
+ case -EFBIG: {
+ SAY("EFBIG\n"); break;
+ }
+ case -EPIPE: {
+ SAY("EPIPE\n"); break;
+ }
+ case -EMSGSIZE: {
+ SAY("EMSGSIZE\n"); break;
+ }
+ case -ENOSPC: {
+ SAY("ENOSPC\n"); break;
+ }
+ default: {
+ SAY("0x%08X\n", rc); break;
+ }
+ }
+ }
+ }
+return;
+}
+/*---------------------------------------------------------------------------*/
+if (purb->status) {
+ if (-ESHUTDOWN == purb->status) {
+ JOT(16, "immediate return because -ESHUTDOWN=purb->status\n");
+ return;
+ }
+ SAY("ERROR: non-zero urb status:\n");
+ switch (purb->status) {
+ case -EINPROGRESS: {
+ SAY("-EINPROGRESS\n"); break;
+ }
+ case -ENOSR: {
+ SAY("-ENOSR\n"); break;
+ }
+ case -EPIPE: {
+ SAY("-EPIPE\n"); break;
+ }
+ case -EOVERFLOW: {
+ SAY("-EOVERFLOW\n"); break;
+ }
+ case -EPROTO: {
+ SAY("-EPROTO\n"); break;
+ }
+ case -EILSEQ: {
+ SAY("-EILSEQ\n"); break;
+ }
+ case -ETIMEDOUT: {
+ SAY("-ETIMEDOUT\n"); break;
+ }
+ case -EMSGSIZE: {
+ SAY("-EMSGSIZE\n"); break;
+ }
+ case -EOPNOTSUPP: {
+ SAY("-EOPNOTSUPP\n"); break;
+ }
+ case -EPFNOSUPPORT: {
+ SAY("-EPFNOSUPPORT\n"); break;
+ }
+ case -EAFNOSUPPORT: {
+ SAY("-EAFNOSUPPORT\n"); break;
+ }
+ case -EADDRINUSE: {
+ SAY("-EADDRINUSE\n"); break;
+ }
+ case -EADDRNOTAVAIL: {
+ SAY("-EADDRNOTAVAIL\n"); break;
+ }
+ case -ENOBUFS: {
+ SAY("-ENOBUFS\n"); break;
+ }
+ case -EISCONN: {
+ SAY("-EISCONN\n"); break;
+ }
+ case -ENOTCONN: {
+ SAY("-ENOTCONN\n"); break;
+ }
+ case -ESHUTDOWN: {
+ SAY("-ESHUTDOWN\n"); break;
+ }
+ case -ENOENT: {
+ SAY("-ENOENT\n"); break;
+ }
+ case -ECONNRESET: {
+ SAY("-ECONNRESET\n"); break;
+ }
+ case -ENOSPC: {
+ SAY("ENOSPC\n"); break;
+ }
+ default: {
+ SAY("unknown error code 0x%08X\n", purb->status); break;
+ }
+ }
+/*---------------------------------------------------------------------------*/
+/*
+ * RESUBMIT THIS URB AFTER AN ERROR
+ *
+ * (THIS IS DUPLICATE CODE TO REDUCE INDENTATION OF THE NO-ERROR PATH)
+ */
+/*---------------------------------------------------------------------------*/
+ if (peasycap->audio_isoc_streaming) {
+ rc = usb_submit_urb(purb, GFP_ATOMIC);
+ if (0 != rc) {
+ SAY("ERROR: while %i=audio_idle, usb_submit_urb() "
+ "failed with rc:\n", peasycap->audio_idle);
+ switch (rc) {
+ case -ENOMEM: {
+ SAY("ENOMEM\n"); break;
+ }
+ case -ENODEV: {
+ SAY("ENODEV\n"); break;
+ }
+ case -ENXIO: {
+ SAY("ENXIO\n"); break;
+ }
+ case -EINVAL: {
+ SAY("EINVAL\n"); break;
+ }
+ case -EAGAIN: {
+ SAY("EAGAIN\n"); break;
+ }
+ case -EFBIG: {
+ SAY("EFBIG\n"); break;
+ }
+ case -EPIPE: {
+ SAY("EPIPE\n"); break;
+ }
+ case -EMSGSIZE: {
+ SAY("EMSGSIZE\n"); break;
+ }
+ default: {
+ SAY("0x%08X\n", rc); break;
+ }
+ }
+ }
+ }
+ return;
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * PROCEED HERE WHEN NO ERROR
+ */
+/*---------------------------------------------------------------------------*/
+#if defined(UPSAMPLE)
+oldaudio = peasycap->oldaudio;
+#endif /*UPSAMPLE*/
+
+for (i = 0; i < purb->number_of_packets; i++) {
+ switch (purb->iso_frame_desc[i].status) {
+ case 0: {
+ strcpy(&errbuf[0], "OK"); break;
+ }
+ case -ENOENT: {
+ strcpy(&errbuf[0], "-ENOENT"); break;
+ }
+ case -EINPROGRESS: {
+ strcpy(&errbuf[0], "-EINPROGRESS"); break;
+ }
+ case -EPROTO: {
+ strcpy(&errbuf[0], "-EPROTO"); break;
+ }
+ case -EILSEQ: {
+ strcpy(&errbuf[0], "-EILSEQ"); break;
+ }
+ case -ETIME: {
+ strcpy(&errbuf[0], "-ETIME"); break;
+ }
+ case -ETIMEDOUT: {
+ strcpy(&errbuf[0], "-ETIMEDOUT"); break;
+ }
+ case -EPIPE: {
+ strcpy(&errbuf[0], "-EPIPE"); break;
+ }
+ case -ECOMM: {
+ strcpy(&errbuf[0], "-ECOMM"); break;
+ }
+ case -ENOSR: {
+ strcpy(&errbuf[0], "-ENOSR"); break;
+ }
+ case -EOVERFLOW: {
+ strcpy(&errbuf[0], "-EOVERFLOW"); break;
+ }
+ case -EREMOTEIO: {
+ strcpy(&errbuf[0], "-EREMOTEIO"); break;
+ }
+ case -ENODEV: {
+ strcpy(&errbuf[0], "-ENODEV"); break;
+ }
+ case -EXDEV: {
+ strcpy(&errbuf[0], "-EXDEV"); break;
+ }
+ case -EINVAL: {
+ strcpy(&errbuf[0], "-EINVAL"); break;
+ }
+ case -ECONNRESET: {
+ strcpy(&errbuf[0], "-ECONNRESET"); break;
+ }
+ case -ENOSPC: {
+ strcpy(&errbuf[0], "-ENOSPC"); break;
+ }
+ case -ESHUTDOWN: {
+ strcpy(&errbuf[0], "-ESHUTDOWN"); break;
+ }
+ default: {
+ strcpy(&errbuf[0], "UNKNOWN"); break;
+ }
+ }
+ if ((!purb->iso_frame_desc[i].status) && 0) {
+ JOT(16, "frame[%2i]: %i=status{=%16s} " \
+ "%5i=actual " \
+ "%5i=length " \
+ "%3i=offset\n", \
+ i, purb->iso_frame_desc[i].status, &errbuf[0],
+ purb->iso_frame_desc[i].actual_length,
+ purb->iso_frame_desc[i].length,
+ purb->iso_frame_desc[i].offset);
+ }
+ if (!purb->iso_frame_desc[i].status) {
+ more = purb->iso_frame_desc[i].actual_length;
+
+#if defined(TESTTONE)
+ if (!more)
+ more = purb->iso_frame_desc[i].length;
+#endif
+
+ if (!more)
+ mt++;
+ else {
+ if (mt) {
+ JOT(16, "%4i empty audio urb frames\n", mt);
+ mt = 0;
+ }
+
+ p1 = (__u8 *)(purb->transfer_buffer + \
+ purb->iso_frame_desc[i].offset);
+
+ leap = 0;
+ p1 += leap;
+ more -= leap;
+/*---------------------------------------------------------------------------*/
+/*
+ * COPY more BYTES FROM ISOC BUFFER TO AUDIO BUFFER,
+ * CONVERTING 8-BIT MONO TO 16-BIT SIGNED LITTLE-ENDIAN SAMPLES IF NECESSARY
+ */
+/*---------------------------------------------------------------------------*/
+ while (more) {
+ if (0 > more) {
+ SAY("easysnd_complete: MISTAKE: " \
+ "more is negative\n");
+ return;
+ }
+ if (peasycap->audio_buffer_page_many <= \
+ peasycap->audio_fill) {
+ SAY("ERROR: bad " \
+ "peasycap->audio_fill\n");
+ return;
+ }
+
+ paudio_buffer = &peasycap->audio_buffer\
+ [peasycap->audio_fill];
+ if (PAGE_SIZE < (paudio_buffer->pto - \
+ paudio_buffer->pgo)) {
+ SAY("ERROR: bad paudio_buffer->pto\n");
+ return;
+ }
+ if (PAGE_SIZE == (paudio_buffer->pto - \
+ paudio_buffer->pgo)) {
+
+#if defined(TESTTONE)
+ easysnd_testtone(peasycap, \
+ peasycap->audio_fill);
+#endif /*TESTTONE*/
+
+ paudio_buffer->pto = \
+ paudio_buffer->pgo;
+ (peasycap->audio_fill)++;
+ if (peasycap->\
+ audio_buffer_page_many <= \
+ peasycap->audio_fill)
+ peasycap->audio_fill = 0;
+
+ JOT(12, "bumped peasycap->" \
+ "audio_fill to %i\n", \
+ peasycap->audio_fill);
+
+ paudio_buffer = &peasycap->\
+ audio_buffer\
+ [peasycap->audio_fill];
+ paudio_buffer->pto = \
+ paudio_buffer->pgo;
+
+ if (!(peasycap->audio_fill % \
+ peasycap->\
+ audio_pages_per_fragment)) {
+ JOT(12, "wakeup call on wq_" \
+ "audio, %i=frag reading %i" \
+ "=fragment fill\n", \
+ (peasycap->audio_read / \
+ peasycap->\
+ audio_pages_per_fragment), \
+ (peasycap->audio_fill / \
+ peasycap->\
+ audio_pages_per_fragment));
+ wake_up_interruptible\
+ (&(peasycap->wq_audio));
+ }
+ }
+
+ much = PAGE_SIZE - (int)(paudio_buffer->pto -\
+ paudio_buffer->pgo);
+
+ if (false == peasycap->microphone) {
+ if (much > more)
+ much = more;
+
+ memcpy(paudio_buffer->pto, p1, much);
+ p1 += much;
+ more -= much;
+ } else {
+#if defined(UPSAMPLE)
+ if (much % 16)
+ JOT(8, "MISTAKE? much" \
+ " is not divisible by 16\n");
+ if (much > (16 * \
+ more))
+ much = 16 * \
+ more;
+ p2 = (__u8 *)paudio_buffer->pto;
+
+ for (j = 0; j < (much/16); j++) {
+ newaudio = ((int) *p1) - 128;
+ newaudio = 128 * \
+ newaudio;
+
+ delta = (newaudio - oldaudio) \
+ / 4;
+ s16 = oldaudio + delta;
+
+ for (k = 0; k < 4; k++) {
+ *p2 = (0x00FF & s16);
+ *(p2 + 1) = (0xFF00 & \
+ s16) >> 8;
+ p2 += 2;
+ *p2 = (0x00FF & s16);
+ *(p2 + 1) = (0xFF00 & \
+ s16) >> 8;
+ p2 += 2;
+
+ s16 += delta;
+ }
+ p1++;
+ more--;
+ oldaudio = s16;
+ }
+#else
+ if (much > (2 * more))
+ much = 2 * more;
+ p2 = (__u8 *)paudio_buffer->pto;
+
+ for (j = 0; j < (much / 2); j++) {
+ s16 = ((int) *p1) - 128;
+ s16 = 128 * \
+ s16;
+ *p2 = (0x00FF & s16);
+ *(p2 + 1) = (0xFF00 & s16) >> \
+ 8;
+ p1++; p2 += 2;
+ more--;
+ }
+#endif /*UPSAMPLE*/
+ }
+ (paudio_buffer->pto) += much;
+ }
+ }
+ } else {
+ JOT(12, "discarding audio samples because " \
+ "%i=purb->iso_frame_desc[i].status\n", \
+ purb->iso_frame_desc[i].status);
+ }
+
+#if defined(UPSAMPLE)
+peasycap->oldaudio = oldaudio;
+#endif /*UPSAMPLE*/
+
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * RESUBMIT THIS URB AFTER NO ERROR
+ */
+/*---------------------------------------------------------------------------*/
+if (peasycap->audio_isoc_streaming) {
+ rc = usb_submit_urb(purb, GFP_ATOMIC);
+ if (0 != rc) {
+ SAY("ERROR: while %i=audio_idle, usb_submit_urb() failed " \
+ "with rc:\n", peasycap->audio_idle);
+ switch (rc) {
+ case -ENOMEM: {
+ SAY("ENOMEM\n"); break;
+ }
+ case -ENODEV: {
+ SAY("ENODEV\n"); break;
+ }
+ case -ENXIO: {
+ SAY("ENXIO\n"); break;
+ }
+ case -EINVAL: {
+ SAY("EINVAL\n"); break;
+ }
+ case -EAGAIN: {
+ SAY("EAGAIN\n"); break;
+ }
+ case -EFBIG: {
+ SAY("EFBIG\n"); break;
+ }
+ case -EPIPE: {
+ SAY("EPIPE\n"); break;
+ }
+ case -EMSGSIZE: {
+ SAY("EMSGSIZE\n"); break;
+ }
+ case -ENOSPC: {
+ SAY("ENOSPC\n"); break;
+ }
+ default: {
+ SAY("0x%08X\n", rc); break;
+ }
+ }
+ }
+}
+return;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * THE AUDIO URBS ARE SUBMITTED AT THIS EARLY STAGE SO THAT IT IS POSSIBLE TO
+ * STREAM FROM /dev/easysnd1 WITH SIMPLE PROGRAMS SUCH AS cat WHICH DO NOT
+ * HAVE AN IOCTL INTERFACE. THE VIDEO URBS, BY CONTRAST, MUST BE SUBMITTED
+ * MUCH LATER: SEE COMMENTS IN FILE easycap_main.c.
+ */
+/*---------------------------------------------------------------------------*/
+int
+easysnd_open(struct inode *inode, struct file *file)
+{
+struct usb_interface *pusb_interface;
+struct easycap *peasycap;
+int subminor, rc;
+
+JOT(4, "begins.\n");
+
+subminor = iminor(inode);
+
+pusb_interface = usb_find_interface(&easycap_usb_driver, subminor);
+if (NULL == pusb_interface) {
+ SAY("ERROR: pusb_interface is NULL\n");
+ SAY("ending unsuccessfully\n");
+ return -1;
+}
+peasycap = usb_get_intfdata(pusb_interface);
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ SAY("ending unsuccessfully\n");
+ return -1;
+}
+
+file->private_data = peasycap;
+
+/*---------------------------------------------------------------------------*/
+/*
+ * INITIALIZATION.
+ */
+/*---------------------------------------------------------------------------*/
+JOT(4, "starting initialization\n");
+
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+} else {
+ JOT(16, "0x%08lX=peasycap->pusb_device\n", \
+ (long int)peasycap->pusb_device);
+}
+
+rc = audio_setup(peasycap);
+if (0 <= rc)
+ JOT(8, "audio_setup() returned %i\n", rc);
+else
+ JOT(8, "easysnd open(): ERROR: audio_setup() returned %i\n", rc);
+
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device has become NULL\n");
+ return -EFAULT;
+}
+rc = adjust_volume(peasycap, -8192);
+if (0 != rc) {
+ SAY("ERROR: adjust_volume(default) returned %i\n", rc);
+ return -EFAULT;
+}
+/*---------------------------------------------------------------------------*/
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device has become NULL\n");
+ return -EFAULT;
+}
+rc = usb_set_interface(peasycap->pusb_device, peasycap->audio_interface, \
+ peasycap->audio_altsetting_on);
+JOT(8, "usb_set_interface(.,%i,%i) returned %i\n", peasycap->audio_interface, \
+ peasycap->audio_altsetting_on, rc);
+
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device has become NULL\n");
+ return -EFAULT;
+}
+rc = wakeup_device(peasycap->pusb_device);
+if (0 == rc)
+ JOT(8, "wakeup_device() returned %i\n", rc);
+else
+ JOT(8, "easysnd open(): ERROR: wakeup_device() returned %i\n", rc);
+
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device has become NULL\n");
+ return -EFAULT;
+}
+submit_audio_urbs(peasycap);
+peasycap->audio_idle = 0;
+
+peasycap->timeval1.tv_sec = 0;
+peasycap->timeval1.tv_usec = 0;
+
+JOT(4, "finished initialization\n");
+return 0;
+}
+/*****************************************************************************/
+int
+easysnd_release(struct inode *inode, struct file *file)
+{
+struct easycap *peasycap;
+
+JOT(4, "begins\n");
+
+peasycap = file->private_data;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL.\n");
+ return -EFAULT;
+}
+if (0 != kill_audio_urbs(peasycap)) {
+ SAY("ERROR: kill_audio_urbs() failed\n");
+ return -EFAULT;
+}
+JOT(4, "ending successfully\n");
+return 0;
+}
+/*****************************************************************************/
+ssize_t
+easysnd_read(struct file *file, char __user *puserspacebuffer, \
+ size_t kount, loff_t *poff)
+{
+struct timeval timeval;
+static struct timeval timeval1;
+static long long int audio_bytes, above, below, mean;
+struct signed_div_result sdr;
+unsigned char *p0;
+long int kount1, more, rc, l0, lm;
+int fragment;
+struct easycap *peasycap;
+struct data_buffer *pdata_buffer;
+size_t szret;
+
+/*---------------------------------------------------------------------------*/
+/*
+ * DO A BLOCKING READ TO TRANSFER DATA TO USER SPACE.
+ *
+ ******************************************************************************
+ ***** N.B. IF THIS FUNCTION RETURNS 0, NOTHING IS SEEN IN USER SPACE. ******
+ ***** THIS CONDITION SIGNIFIES END-OF-FILE. ******
+ ******************************************************************************
+ */
+/*---------------------------------------------------------------------------*/
+
+JOT(8, "===== easysnd_read(): kount=%i, *poff=%i\n", (int)kount, (int)(*poff));
+
+peasycap = (struct easycap *)(file->private_data);
+if (NULL == peasycap) {
+ SAY("ERROR in easysnd_read(): peasycap is NULL\n");
+ return -EFAULT;
+}
+/*---------------------------------------------------------------------------*/
+if ((0 > peasycap->audio_read) || \
+ (peasycap->audio_buffer_page_many <= peasycap->audio_read)) {
+ SAY("ERROR: peasycap->audio_read out of range\n");
+ return -EFAULT;
+}
+pdata_buffer = &peasycap->audio_buffer[peasycap->audio_read];
+if ((struct data_buffer *)NULL == pdata_buffer) {
+ SAY("ERROR: pdata_buffer is NULL\n");
+ return -EFAULT;
+}
+JOT(12, "before wait, %i=frag read %i=frag fill\n", \
+ (peasycap->audio_read / peasycap->audio_pages_per_fragment), \
+ (peasycap->audio_fill / peasycap->audio_pages_per_fragment));
+fragment = (peasycap->audio_read / peasycap->audio_pages_per_fragment);
+while ((fragment == (peasycap->audio_fill / \
+ peasycap->audio_pages_per_fragment)) || \
+ (0 == (PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo)))) {
+ if (file->f_flags & O_NONBLOCK) {
+ JOT(16, "returning -EAGAIN as instructed\n");
+ return -EAGAIN;
+ }
+ rc = wait_event_interruptible(peasycap->wq_audio, \
+ (peasycap->audio_idle || peasycap->audio_eof || \
+ ((fragment != (peasycap->audio_fill / \
+ peasycap->audio_pages_per_fragment)) && \
+ (0 < (PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo))))));
+ if (0 != rc) {
+ SAY("aborted by signal\n");
+ return -ERESTARTSYS;
+ }
+ if (peasycap->audio_eof) {
+ JOT(8, "returning 0 because %i=audio_eof\n", \
+ peasycap->audio_eof);
+ kill_audio_urbs(peasycap);
+ msleep(500);
+ return 0;
+ }
+ if (peasycap->audio_idle) {
+ JOT(16, "returning 0 because %i=audio_idle\n", \
+ peasycap->audio_idle);
+ return 0;
+ }
+ if (!peasycap->audio_isoc_streaming) {
+ JOT(16, "returning 0 because audio urbs not streaming\n");
+ return 0;
+ }
+}
+JOT(12, "after wait, %i=frag read %i=frag fill\n", \
+ (peasycap->audio_read / peasycap->audio_pages_per_fragment), \
+ (peasycap->audio_fill / peasycap->audio_pages_per_fragment));
+szret = (size_t)0;
+while (fragment == (peasycap->audio_read / \
+ peasycap->audio_pages_per_fragment)) {
+ if (NULL == pdata_buffer->pgo) {
+ SAY("ERROR: pdata_buffer->pgo is NULL\n");
+ return -EFAULT;
+ }
+ if (NULL == pdata_buffer->pto) {
+ SAY("ERROR: pdata_buffer->pto is NULL\n");
+ return -EFAULT;
+ }
+ kount1 = PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo);
+ if (0 > kount1) {
+ SAY("easysnd_read: MISTAKE: kount1 is negative\n");
+ return -ERESTARTSYS;
+ }
+ if (!kount1) {
+ (peasycap->audio_read)++;
+ if (peasycap->audio_buffer_page_many <= peasycap->audio_read)
+ peasycap->audio_read = 0;
+ JOT(12, "bumped peasycap->audio_read to %i\n", \
+ peasycap->audio_read);
+
+ if (fragment != (peasycap->audio_read / \
+ peasycap->audio_pages_per_fragment))
+ break;
+
+ if ((0 > peasycap->audio_read) || \
+ (peasycap->audio_buffer_page_many <= \
+ peasycap->audio_read)) {
+ SAY("ERROR: peasycap->audio_read out of range\n");
+ return -EFAULT;
+ }
+ pdata_buffer = &peasycap->audio_buffer[peasycap->audio_read];
+ if ((struct data_buffer *)NULL == pdata_buffer) {
+ SAY("ERROR: pdata_buffer is NULL\n");
+ return -EFAULT;
+ }
+ if (NULL == pdata_buffer->pgo) {
+ SAY("ERROR: pdata_buffer->pgo is NULL\n");
+ return -EFAULT;
+ }
+ if (NULL == pdata_buffer->pto) {
+ SAY("ERROR: pdata_buffer->pto is NULL\n");
+ return -EFAULT;
+ }
+ kount1 = PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo);
+ }
+ JOT(12, "ready to send %li bytes\n", (long int) kount1);
+ JOT(12, "still to send %li bytes\n", (long int) kount);
+ more = kount1;
+ if (more > kount)
+ more = kount;
+ JOT(12, "agreed to send %li bytes from page %i\n", \
+ more, peasycap->audio_read);
+ if (!more)
+ break;
+
+/*---------------------------------------------------------------------------*/
+/*
+ * ACCUMULATE DYNAMIC-RANGE INFORMATION
+ */
+/*---------------------------------------------------------------------------*/
+ p0 = (unsigned char *)pdata_buffer->pgo; l0 = 0; lm = more/2;
+ while (l0 < lm) {
+ SUMMER(p0, &peasycap->audio_sample, &peasycap->audio_niveau, \
+ &peasycap->audio_square); l0++; p0 += 2;
+ }
+/*---------------------------------------------------------------------------*/
+ rc = copy_to_user(puserspacebuffer, pdata_buffer->pto, more);
+ if (0 != rc) {
+ SAY("ERROR: copy_to_user() returned %li\n", rc);
+ return -EFAULT;
+ }
+ *poff += (loff_t)more;
+ szret += (size_t)more;
+ pdata_buffer->pto += more;
+ puserspacebuffer += more;
+ kount -= (size_t)more;
+}
+JOT(12, "after read, %i=frag read %i=frag fill\n", \
+ (peasycap->audio_read / peasycap->audio_pages_per_fragment), \
+ (peasycap->audio_fill / peasycap->audio_pages_per_fragment));
+if (kount < 0) {
+ SAY("MISTAKE: %li=kount %li=szret\n", \
+ (long int)kount, (long int)szret);
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * CALCULATE DYNAMIC RANGE FOR (VAPOURWARE) AUTOMATIC VOLUME CONTROL
+ */
+/*---------------------------------------------------------------------------*/
+if (peasycap->audio_sample) {
+ below = peasycap->audio_sample;
+ above = peasycap->audio_square;
+ sdr = signed_div(above, below);
+ above = sdr.quotient;
+ mean = peasycap->audio_niveau;
+ sdr = signed_div(mean, peasycap->audio_sample);
+
+ JOT(8, "%8lli=mean %8lli=meansquare after %lli samples, =>\n", \
+ sdr.quotient, above, peasycap->audio_sample);
+
+ sdr = signed_div(above, 32768);
+ JOT(8, "audio dynamic range is roughly %lli\n", sdr.quotient);
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * UPDATE THE AUDIO CLOCK
+ */
+/*---------------------------------------------------------------------------*/
+do_gettimeofday(&timeval);
+if (!peasycap->timeval1.tv_sec) {
+ audio_bytes = 0;
+ timeval1 = timeval;
+
+ if (mutex_lock_interruptible(&(peasycap->mutex_timeval1)))
+ return -ERESTARTSYS;
+ peasycap->timeval1 = timeval1;
+ mutex_unlock(&(peasycap->mutex_timeval1));
+ sdr.quotient = 192000;
+} else {
+ audio_bytes += (long long int) szret;
+ below = ((long long int)(1000000)) * \
+ ((long long int)(timeval.tv_sec - timeval1.tv_sec)) + \
+ (long long int)(timeval.tv_usec - timeval1.tv_usec);
+ above = 1000000 * ((long long int) audio_bytes);
+
+ if (below)
+ sdr = signed_div(above, below);
+ else
+ sdr.quotient = 192000;
+}
+JOT(8, "audio streaming at %lli bytes/second\n", sdr.quotient);
+if (mutex_lock_interruptible(&(peasycap->mutex_timeval1)))
+ return -ERESTARTSYS;
+peasycap->dnbydt = sdr.quotient;
+mutex_unlock(&(peasycap->mutex_timeval1));
+
+JOT(8, "returning %li\n", (long int)szret);
+return szret;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * SUBMIT ALL AUDIO URBS.
+ */
+/*---------------------------------------------------------------------------*/
+int
+submit_audio_urbs(struct easycap *peasycap)
+{
+struct data_urb *pdata_urb;
+struct urb *purb;
+struct list_head *plist_head;
+int j, isbad, m, rc;
+int isbuf;
+
+if ((struct list_head *)NULL == peasycap->purb_audio_head) {
+ SAY("ERROR: peasycap->urb_audio_head uninitialized\n");
+ return -EFAULT;
+}
+if ((struct usb_device *)NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+if (!peasycap->audio_isoc_streaming) {
+ JOT(4, "initial submission of all audio urbs\n");
+ rc = usb_set_interface(peasycap->pusb_device,
+ peasycap->audio_interface, \
+ peasycap->audio_altsetting_on);
+ JOT(8, "usb_set_interface(.,%i,%i) returned %i\n", \
+ peasycap->audio_interface, \
+ peasycap->audio_altsetting_on, rc);
+
+ isbad = 0; m = 0;
+ list_for_each(plist_head, (peasycap->purb_audio_head)) {
+ pdata_urb = list_entry(plist_head, struct data_urb, list_head);
+ if (NULL != pdata_urb) {
+ purb = pdata_urb->purb;
+ if (NULL != purb) {
+ isbuf = pdata_urb->isbuf;
+
+ purb->interval = 1;
+ purb->dev = peasycap->pusb_device;
+ purb->pipe = \
+ usb_rcvisocpipe(peasycap->pusb_device,\
+ peasycap->audio_endpointnumber);
+ purb->transfer_flags = URB_ISO_ASAP;
+ purb->transfer_buffer = \
+ peasycap->audio_isoc_buffer[isbuf].pgo;
+ purb->transfer_buffer_length = \
+ peasycap->audio_isoc_buffer_size;
+ purb->complete = easysnd_complete;
+ purb->context = peasycap;
+ purb->start_frame = 0;
+ purb->number_of_packets = \
+ peasycap->audio_isoc_framesperdesc;
+ for (j = 0; j < peasycap->\
+ audio_isoc_framesperdesc; \
+ j++) {
+ purb->iso_frame_desc[j].offset = j * \
+ peasycap->\
+ audio_isoc_maxframesize;
+ purb->iso_frame_desc[j].length = \
+ peasycap->\
+ audio_isoc_maxframesize;
+ }
+
+ rc = usb_submit_urb(purb, GFP_KERNEL);
+ if (0 != rc) {
+ isbad++;
+ SAY("ERROR: usb_submit_urb() failed" \
+ " for urb with rc:\n");
+ switch (rc) {
+ case -ENOMEM: {
+ SAY("ENOMEM\n"); break;
+ }
+ case -ENODEV: {
+ SAY("ENODEV\n"); break;
+ }
+ case -ENXIO: {
+ SAY("ENXIO\n"); break;
+ }
+ case -EINVAL: {
+ SAY("EINVAL\n"); break;
+ }
+ case -EAGAIN: {
+ SAY("EAGAIN\n"); break;
+ }
+ case -EFBIG: {
+ SAY("EFBIG\n"); break;
+ }
+ case -EPIPE: {
+ SAY("EPIPE\n"); break;
+ }
+ case -EMSGSIZE: {
+ SAY("EMSGSIZE\n"); break;
+ }
+ case -ENOSPC: {
+ SAY("ENOSPC\n"); break;
+ }
+ default: {
+ SAY("unknown error code %i\n",\
+ rc); break;
+ }
+ }
+ } else {
+ m++;
+ }
+ } else {
+ isbad++;
+ }
+ } else {
+ isbad++;
+ }
+ }
+ if (isbad) {
+ JOT(4, "attempting cleanup instead of submitting\n");
+ list_for_each(plist_head, (peasycap->purb_audio_head)) {
+ pdata_urb = list_entry(plist_head, struct data_urb, \
+ list_head);
+ if (NULL != pdata_urb) {
+ purb = pdata_urb->purb;
+ if (NULL != purb)
+ usb_kill_urb(purb);
+ }
+ }
+ peasycap->audio_isoc_streaming = 0;
+ } else {
+ peasycap->audio_isoc_streaming = 1;
+ JOT(4, "submitted %i audio urbs\n", m);
+ }
+} else
+ JOT(4, "already streaming audio urbs\n");
+
+return 0;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * KILL ALL AUDIO URBS.
+ */
+/*---------------------------------------------------------------------------*/
+int
+kill_audio_urbs(struct easycap *peasycap)
+{
+int m;
+struct list_head *plist_head;
+struct data_urb *pdata_urb;
+
+if (peasycap->audio_isoc_streaming) {
+ if ((struct list_head *)NULL != peasycap->purb_audio_head) {
+ peasycap->audio_isoc_streaming = 0;
+ JOT(4, "killing audio urbs\n");
+ m = 0;
+ list_for_each(plist_head, (peasycap->purb_audio_head)) {
+ pdata_urb = list_entry(plist_head, struct data_urb,
+ list_head);
+ if ((struct data_urb *)NULL != pdata_urb) {
+ if ((struct urb *)NULL != pdata_urb->purb) {
+ usb_kill_urb(pdata_urb->purb);
+ m++;
+ }
+ }
+ }
+ JOT(4, "%i audio urbs killed\n", m);
+ } else {
+ SAY("ERROR: peasycap->purb_audio_head is NULL\n");
+ return -EFAULT;
+ }
+} else {
+ JOT(8, "%i=audio_isoc_streaming, no audio urbs killed\n", \
+ peasycap->audio_isoc_streaming);
+}
+return 0;
+}
+/*****************************************************************************/
diff --git a/drivers/staging/easycap/easycap_sound.h b/drivers/staging/easycap/easycap_sound.h
new file mode 100644
index 00000000000..49127396902
--- /dev/null
+++ b/drivers/staging/easycap/easycap_sound.h
@@ -0,0 +1,28 @@
+/*****************************************************************************
+* *
+* easycap_sound.h *
+* *
+*****************************************************************************/
+/*
+ *
+ * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
+ *
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+/*****************************************************************************/
+extern struct easycap *peasycap;
+extern struct usb_driver easycap_usb_driver;
diff --git a/drivers/staging/easycap/easycap_standard.h b/drivers/staging/easycap/easycap_standard.h
new file mode 100644
index 00000000000..cadc8d27a85
--- /dev/null
+++ b/drivers/staging/easycap/easycap_standard.h
@@ -0,0 +1,27 @@
+/*****************************************************************************
+* *
+* easycap_standard.h *
+* *
+*****************************************************************************/
+/*
+ *
+ * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
+ *
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+/*****************************************************************************/
+extern struct easycap_standard easycap_standard[];
diff --git a/drivers/staging/easycap/easycap_testcard.c b/drivers/staging/easycap/easycap_testcard.c
new file mode 100644
index 00000000000..3c2ce28fab9
--- /dev/null
+++ b/drivers/staging/easycap/easycap_testcard.c
@@ -0,0 +1,392 @@
+/******************************************************************************
+* *
+* easycap_testcard.c *
+* *
+******************************************************************************/
+/*
+ *
+ * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
+ *
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+/*****************************************************************************/
+
+#include "easycap.h"
+#include "easycap_debug.h"
+
+/*****************************************************************************/
+#define TESTCARD_BYTESPERLINE (2 * 1440)
+void
+easycap_testcard(struct easycap *peasycap, int field_fill)
+{
+int total;
+int y, u, v, r, g, b;
+unsigned char uyvy[4];
+
+int i1, line, k, m, n, more, much, barwidth;
+unsigned char bfbar[TESTCARD_BYTESPERLINE / 8], *p1, *p2;
+struct data_buffer *pfield_buffer;
+
+JOT(8, "%i=field_fill\n", field_fill);
+
+if ((TESTCARD_BYTESPERLINE / 2) < peasycap->width) {
+ SAY("ERROR: image is too wide\n");
+ return;
+}
+if (peasycap->width % 16) {
+ SAY("ERROR: indivisible image width\n");
+ return;
+}
+
+total = 0;
+barwidth = (2 * peasycap->width) / 8;
+
+k = field_fill;
+m = 0;
+n = 0;
+
+for (line = 0; line < (peasycap->height / 2); line++) {
+ for (i1 = 0; i1 < 8; i1++) {
+ r = (i1 * 256)/8;
+ g = (i1 * 256)/8;
+ b = (i1 * 256)/8;
+
+ y = 299*r/1000 + 587*g/1000 + 114*b/1000 ;
+ u = -147*r/1000 - 289*g/1000 + 436*b/1000 ; u = u + 128;
+ v = 615*r/1000 - 515*g/1000 - 100*b/1000 ; v = v + 128;
+
+ uyvy[0] = 0xFF & u ;
+ uyvy[1] = 0xFF & y ;
+ uyvy[2] = 0xFF & v ;
+ uyvy[3] = 0xFF & y ;
+
+ p1 = &bfbar[0];
+ while (p1 < &bfbar[barwidth]) {
+ *p1++ = uyvy[0] ;
+ *p1++ = uyvy[1] ;
+ *p1++ = uyvy[2] ;
+ *p1++ = uyvy[3] ;
+ total += 4;
+ }
+
+ p1 = &bfbar[0];
+ more = barwidth;
+
+ while (more) {
+ if ((FIELD_BUFFER_SIZE/PAGE_SIZE) <= m) {
+ SAY("ERROR: bad m reached\n");
+ return;
+ }
+ if (PAGE_SIZE < n) {
+ SAY("ERROR: bad n reached\n"); return;
+ }
+
+ if (0 > more) {
+ SAY("ERROR: internal fault\n");
+ return;
+ }
+
+ much = PAGE_SIZE - n;
+ if (much > more)
+ much = more;
+ pfield_buffer = &peasycap->field_buffer[k][m];
+ p2 = pfield_buffer->pgo + n;
+ memcpy(p2, p1, much);
+
+ p1 += much;
+ n += much;
+ more -= much;
+ if (PAGE_SIZE == n) {
+ m++;
+ n = 0;
+ }
+ }
+ }
+}
+
+JOT(8, "%i=total\n", total);
+if (total != peasycap->width * peasycap->height)
+ SAY("ERROR: wrong number of bytes written: %i\n", total);
+return;
+}
+/*****************************************************************************/
+#if defined(EASYCAP_TESTTONE)
+/*-----------------------------------------------------------------------------
+THE tones[] ARRAY BELOW IS THE OUTPUT OF THIS PROGRAM,
+COMPILED gcc -o prog -lm prog.c
+- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+#include <stdio.h>
+#include <math.h>
+
+int main(void);
+int
+main(void)
+{
+int i1, i2, last;
+double d1, d2;
+
+last = 1024 - 1;
+d1 = 10.0*3.14159265/1024.0;
+printf("int tones[2048] =\n{\n");
+for (i1 = 0; i1 <= last; i1++)
+ {
+ d2 = ((double)i1) * d1;
+ i2 = (int)(16384.0*sin(d2));
+
+ if (last != i1)
+ {
+ printf("%6i, ", i2); printf("%6i, ", i2);
+ if (!((i1 + 1)%5)) printf("\n");
+ }
+ else
+ {
+ printf("%6i, ", i2); printf("%6i\n};\n", i2);
+ }
+ }
+return(0);
+}
+-----------------------------------------------------------------------------*/
+int tones[2048] = {
+ 0, 0, 502, 502, 1004, 1004, 1505, 1505, 2005, 2005,
+ 2503, 2503, 2998, 2998, 3491, 3491, 3980, 3980, 4466, 4466,
+ 4948, 4948, 5424, 5424, 5896, 5896, 6362, 6362, 6822, 6822,
+ 7276, 7276, 7723, 7723, 8162, 8162, 8594, 8594, 9018, 9018,
+ 9434, 9434, 9840, 9840, 10237, 10237, 10625, 10625, 11002, 11002,
+ 11370, 11370, 11726, 11726, 12072, 12072, 12406, 12406, 12728, 12728,
+ 13038, 13038, 13337, 13337, 13622, 13622, 13895, 13895, 14155, 14155,
+ 14401, 14401, 14634, 14634, 14853, 14853, 15058, 15058, 15249, 15249,
+ 15426, 15426, 15588, 15588, 15735, 15735, 15868, 15868, 15985, 15985,
+ 16088, 16088, 16175, 16175, 16248, 16248, 16305, 16305, 16346, 16346,
+ 16372, 16372, 16383, 16383, 16379, 16379, 16359, 16359, 16323, 16323,
+ 16272, 16272, 16206, 16206, 16125, 16125, 16028, 16028, 15917, 15917,
+ 15790, 15790, 15649, 15649, 15492, 15492, 15322, 15322, 15136, 15136,
+ 14937, 14937, 14723, 14723, 14496, 14496, 14255, 14255, 14001, 14001,
+ 13733, 13733, 13452, 13452, 13159, 13159, 12854, 12854, 12536, 12536,
+ 12207, 12207, 11866, 11866, 11513, 11513, 11150, 11150, 10777, 10777,
+ 10393, 10393, 10000, 10000, 9597, 9597, 9185, 9185, 8765, 8765,
+ 8336, 8336, 7900, 7900, 7456, 7456, 7005, 7005, 6547, 6547,
+ 6083, 6083, 5614, 5614, 5139, 5139, 4659, 4659, 4175, 4175,
+ 3687, 3687, 3196, 3196, 2701, 2701, 2204, 2204, 1705, 1705,
+ 1205, 1205, 703, 703, 201, 201, -301, -301, -803, -803,
+ -1305, -1305, -1805, -1805, -2304, -2304, -2801, -2801, -3294, -3294,
+ -3785, -3785, -4272, -4272, -4756, -4756, -5234, -5234, -5708, -5708,
+ -6176, -6176, -6639, -6639, -7095, -7095, -7545, -7545, -7988, -7988,
+ -8423, -8423, -8850, -8850, -9268, -9268, -9679, -9679, -10079, -10079,
+-10471, -10471, -10853, -10853, -11224, -11224, -11585, -11585, -11935, -11935,
+-12273, -12273, -12600, -12600, -12916, -12916, -13219, -13219, -13510, -13510,
+-13788, -13788, -14053, -14053, -14304, -14304, -14543, -14543, -14767, -14767,
+-14978, -14978, -15175, -15175, -15357, -15357, -15525, -15525, -15678, -15678,
+-15817, -15817, -15940, -15940, -16049, -16049, -16142, -16142, -16221, -16221,
+-16284, -16284, -16331, -16331, -16364, -16364, -16381, -16381, -16382, -16382,
+-16368, -16368, -16339, -16339, -16294, -16294, -16234, -16234, -16159, -16159,
+-16069, -16069, -15963, -15963, -15842, -15842, -15707, -15707, -15557, -15557,
+-15392, -15392, -15212, -15212, -15018, -15018, -14810, -14810, -14589, -14589,
+-14353, -14353, -14104, -14104, -13842, -13842, -13566, -13566, -13278, -13278,
+-12977, -12977, -12665, -12665, -12340, -12340, -12003, -12003, -11656, -11656,
+-11297, -11297, -10928, -10928, -10548, -10548, -10159, -10159, -9759, -9759,
+ -9351, -9351, -8934, -8934, -8509, -8509, -8075, -8075, -7634, -7634,
+ -7186, -7186, -6731, -6731, -6269, -6269, -5802, -5802, -5329, -5329,
+ -4852, -4852, -4369, -4369, -3883, -3883, -3393, -3393, -2900, -2900,
+ -2404, -2404, -1905, -1905, -1405, -1405, -904, -904, -402, -402,
+ 100, 100, 603, 603, 1105, 1105, 1605, 1605, 2105, 2105,
+ 2602, 2602, 3097, 3097, 3589, 3589, 4078, 4078, 4563, 4563,
+ 5043, 5043, 5519, 5519, 5990, 5990, 6455, 6455, 6914, 6914,
+ 7366, 7366, 7811, 7811, 8249, 8249, 8680, 8680, 9102, 9102,
+ 9516, 9516, 9920, 9920, 10315, 10315, 10701, 10701, 11077, 11077,
+ 11442, 11442, 11796, 11796, 12139, 12139, 12471, 12471, 12791, 12791,
+ 13099, 13099, 13395, 13395, 13678, 13678, 13948, 13948, 14205, 14205,
+ 14449, 14449, 14679, 14679, 14895, 14895, 15098, 15098, 15286, 15286,
+ 15459, 15459, 15618, 15618, 15763, 15763, 15892, 15892, 16007, 16007,
+ 16107, 16107, 16191, 16191, 16260, 16260, 16314, 16314, 16353, 16353,
+ 16376, 16376, 16384, 16384, 16376, 16376, 16353, 16353, 16314, 16314,
+ 16260, 16260, 16191, 16191, 16107, 16107, 16007, 16007, 15892, 15892,
+ 15763, 15763, 15618, 15618, 15459, 15459, 15286, 15286, 15098, 15098,
+ 14895, 14895, 14679, 14679, 14449, 14449, 14205, 14205, 13948, 13948,
+ 13678, 13678, 13395, 13395, 13099, 13099, 12791, 12791, 12471, 12471,
+ 12139, 12139, 11796, 11796, 11442, 11442, 11077, 11077, 10701, 10701,
+ 10315, 10315, 9920, 9920, 9516, 9516, 9102, 9102, 8680, 8680,
+ 8249, 8249, 7811, 7811, 7366, 7366, 6914, 6914, 6455, 6455,
+ 5990, 5990, 5519, 5519, 5043, 5043, 4563, 4563, 4078, 4078,
+ 3589, 3589, 3097, 3097, 2602, 2602, 2105, 2105, 1605, 1605,
+ 1105, 1105, 603, 603, 100, 100, -402, -402, -904, -904,
+ -1405, -1405, -1905, -1905, -2404, -2404, -2900, -2900, -3393, -3393,
+ -3883, -3883, -4369, -4369, -4852, -4852, -5329, -5329, -5802, -5802,
+ -6269, -6269, -6731, -6731, -7186, -7186, -7634, -7634, -8075, -8075,
+ -8509, -8509, -8934, -8934, -9351, -9351, -9759, -9759, -10159, -10159,
+-10548, -10548, -10928, -10928, -11297, -11297, -11656, -11656, -12003, -12003,
+-12340, -12340, -12665, -12665, -12977, -12977, -13278, -13278, -13566, -13566,
+-13842, -13842, -14104, -14104, -14353, -14353, -14589, -14589, -14810, -14810,
+-15018, -15018, -15212, -15212, -15392, -15392, -15557, -15557, -15707, -15707,
+-15842, -15842, -15963, -15963, -16069, -16069, -16159, -16159, -16234, -16234,
+-16294, -16294, -16339, -16339, -16368, -16368, -16382, -16382, -16381, -16381,
+-16364, -16364, -16331, -16331, -16284, -16284, -16221, -16221, -16142, -16142,
+-16049, -16049, -15940, -15940, -15817, -15817, -15678, -15678, -15525, -15525,
+-15357, -15357, -15175, -15175, -14978, -14978, -14767, -14767, -14543, -14543,
+-14304, -14304, -14053, -14053, -13788, -13788, -13510, -13510, -13219, -13219,
+-12916, -12916, -12600, -12600, -12273, -12273, -11935, -11935, -11585, -11585,
+-11224, -11224, -10853, -10853, -10471, -10471, -10079, -10079, -9679, -9679,
+ -9268, -9268, -8850, -8850, -8423, -8423, -7988, -7988, -7545, -7545,
+ -7095, -7095, -6639, -6639, -6176, -6176, -5708, -5708, -5234, -5234,
+ -4756, -4756, -4272, -4272, -3785, -3785, -3294, -3294, -2801, -2801,
+ -2304, -2304, -1805, -1805, -1305, -1305, -803, -803, -301, -301,
+ 201, 201, 703, 703, 1205, 1205, 1705, 1705, 2204, 2204,
+ 2701, 2701, 3196, 3196, 3687, 3687, 4175, 4175, 4659, 4659,
+ 5139, 5139, 5614, 5614, 6083, 6083, 6547, 6547, 7005, 7005,
+ 7456, 7456, 7900, 7900, 8336, 8336, 8765, 8765, 9185, 9185,
+ 9597, 9597, 10000, 10000, 10393, 10393, 10777, 10777, 11150, 11150,
+ 11513, 11513, 11866, 11866, 12207, 12207, 12536, 12536, 12854, 12854,
+ 13159, 13159, 13452, 13452, 13733, 13733, 14001, 14001, 14255, 14255,
+ 14496, 14496, 14723, 14723, 14937, 14937, 15136, 15136, 15322, 15322,
+ 15492, 15492, 15649, 15649, 15790, 15790, 15917, 15917, 16028, 16028,
+ 16125, 16125, 16206, 16206, 16272, 16272, 16323, 16323, 16359, 16359,
+ 16379, 16379, 16383, 16383, 16372, 16372, 16346, 16346, 16305, 16305,
+ 16248, 16248, 16175, 16175, 16088, 16088, 15985, 15985, 15868, 15868,
+ 15735, 15735, 15588, 15588, 15426, 15426, 15249, 15249, 15058, 15058,
+ 14853, 14853, 14634, 14634, 14401, 14401, 14155, 14155, 13895, 13895,
+ 13622, 13622, 13337, 13337, 13038, 13038, 12728, 12728, 12406, 12406,
+ 12072, 12072, 11726, 11726, 11370, 11370, 11002, 11002, 10625, 10625,
+ 10237, 10237, 9840, 9840, 9434, 9434, 9018, 9018, 8594, 8594,
+ 8162, 8162, 7723, 7723, 7276, 7276, 6822, 6822, 6362, 6362,
+ 5896, 5896, 5424, 5424, 4948, 4948, 4466, 4466, 3980, 3980,
+ 3491, 3491, 2998, 2998, 2503, 2503, 2005, 2005, 1505, 1505,
+ 1004, 1004, 502, 502, 0, 0, -502, -502, -1004, -1004,
+ -1505, -1505, -2005, -2005, -2503, -2503, -2998, -2998, -3491, -3491,
+ -3980, -3980, -4466, -4466, -4948, -4948, -5424, -5424, -5896, -5896,
+ -6362, -6362, -6822, -6822, -7276, -7276, -7723, -7723, -8162, -8162,
+ -8594, -8594, -9018, -9018, -9434, -9434, -9840, -9840, -10237, -10237,
+-10625, -10625, -11002, -11002, -11370, -11370, -11726, -11726, -12072, -12072,
+-12406, -12406, -12728, -12728, -13038, -13038, -13337, -13337, -13622, -13622,
+-13895, -13895, -14155, -14155, -14401, -14401, -14634, -14634, -14853, -14853,
+-15058, -15058, -15249, -15249, -15426, -15426, -15588, -15588, -15735, -15735,
+-15868, -15868, -15985, -15985, -16088, -16088, -16175, -16175, -16248, -16248,
+-16305, -16305, -16346, -16346, -16372, -16372, -16383, -16383, -16379, -16379,
+-16359, -16359, -16323, -16323, -16272, -16272, -16206, -16206, -16125, -16125,
+-16028, -16028, -15917, -15917, -15790, -15790, -15649, -15649, -15492, -15492,
+-15322, -15322, -15136, -15136, -14937, -14937, -14723, -14723, -14496, -14496,
+-14255, -14255, -14001, -14001, -13733, -13733, -13452, -13452, -13159, -13159,
+-12854, -12854, -12536, -12536, -12207, -12207, -11866, -11866, -11513, -11513,
+-11150, -11150, -10777, -10777, -10393, -10393, -10000, -10000, -9597, -9597,
+ -9185, -9185, -8765, -8765, -8336, -8336, -7900, -7900, -7456, -7456,
+ -7005, -7005, -6547, -6547, -6083, -6083, -5614, -5614, -5139, -5139,
+ -4659, -4659, -4175, -4175, -3687, -3687, -3196, -3196, -2701, -2701,
+ -2204, -2204, -1705, -1705, -1205, -1205, -703, -703, -201, -201,
+ 301, 301, 803, 803, 1305, 1305, 1805, 1805, 2304, 2304,
+ 2801, 2801, 3294, 3294, 3785, 3785, 4272, 4272, 4756, 4756,
+ 5234, 5234, 5708, 5708, 6176, 6176, 6639, 6639, 7095, 7095,
+ 7545, 7545, 7988, 7988, 8423, 8423, 8850, 8850, 9268, 9268,
+ 9679, 9679, 10079, 10079, 10471, 10471, 10853, 10853, 11224, 11224,
+ 11585, 11585, 11935, 11935, 12273, 12273, 12600, 12600, 12916, 12916,
+ 13219, 13219, 13510, 13510, 13788, 13788, 14053, 14053, 14304, 14304,
+ 14543, 14543, 14767, 14767, 14978, 14978, 15175, 15175, 15357, 15357,
+ 15525, 15525, 15678, 15678, 15817, 15817, 15940, 15940, 16049, 16049,
+ 16142, 16142, 16221, 16221, 16284, 16284, 16331, 16331, 16364, 16364,
+ 16381, 16381, 16382, 16382, 16368, 16368, 16339, 16339, 16294, 16294,
+ 16234, 16234, 16159, 16159, 16069, 16069, 15963, 15963, 15842, 15842,
+ 15707, 15707, 15557, 15557, 15392, 15392, 15212, 15212, 15018, 15018,
+ 14810, 14810, 14589, 14589, 14353, 14353, 14104, 14104, 13842, 13842,
+ 13566, 13566, 13278, 13278, 12977, 12977, 12665, 12665, 12340, 12340,
+ 12003, 12003, 11656, 11656, 11297, 11297, 10928, 10928, 10548, 10548,
+ 10159, 10159, 9759, 9759, 9351, 9351, 8934, 8934, 8509, 8509,
+ 8075, 8075, 7634, 7634, 7186, 7186, 6731, 6731, 6269, 6269,
+ 5802, 5802, 5329, 5329, 4852, 4852, 4369, 4369, 3883, 3883,
+ 3393, 3393, 2900, 2900, 2404, 2404, 1905, 1905, 1405, 1405,
+ 904, 904, 402, 402, -100, -100, -603, -603, -1105, -1105,
+ -1605, -1605, -2105, -2105, -2602, -2602, -3097, -3097, -3589, -3589,
+ -4078, -4078, -4563, -4563, -5043, -5043, -5519, -5519, -5990, -5990,
+ -6455, -6455, -6914, -6914, -7366, -7366, -7811, -7811, -8249, -8249,
+ -8680, -8680, -9102, -9102, -9516, -9516, -9920, -9920, -10315, -10315,
+-10701, -10701, -11077, -11077, -11442, -11442, -11796, -11796, -12139, -12139,
+-12471, -12471, -12791, -12791, -13099, -13099, -13395, -13395, -13678, -13678,
+-13948, -13948, -14205, -14205, -14449, -14449, -14679, -14679, -14895, -14895,
+-15098, -15098, -15286, -15286, -15459, -15459, -15618, -15618, -15763, -15763,
+-15892, -15892, -16007, -16007, -16107, -16107, -16191, -16191, -16260, -16260,
+-16314, -16314, -16353, -16353, -16376, -16376, -16383, -16383, -16376, -16376,
+-16353, -16353, -16314, -16314, -16260, -16260, -16191, -16191, -16107, -16107,
+-16007, -16007, -15892, -15892, -15763, -15763, -15618, -15618, -15459, -15459,
+-15286, -15286, -15098, -15098, -14895, -14895, -14679, -14679, -14449, -14449,
+-14205, -14205, -13948, -13948, -13678, -13678, -13395, -13395, -13099, -13099,
+-12791, -12791, -12471, -12471, -12139, -12139, -11796, -11796, -11442, -11442,
+-11077, -11077, -10701, -10701, -10315, -10315, -9920, -9920, -9516, -9516,
+ -9102, -9102, -8680, -8680, -8249, -8249, -7811, -7811, -7366, -7366,
+ -6914, -6914, -6455, -6455, -5990, -5990, -5519, -5519, -5043, -5043,
+ -4563, -4563, -4078, -4078, -3589, -3589, -3097, -3097, -2602, -2602,
+ -2105, -2105, -1605, -1605, -1105, -1105, -603, -603, -100, -100,
+ 402, 402, 904, 904, 1405, 1405, 1905, 1905, 2404, 2404,
+ 2900, 2900, 3393, 3393, 3883, 3883, 4369, 4369, 4852, 4852,
+ 5329, 5329, 5802, 5802, 6269, 6269, 6731, 6731, 7186, 7186,
+ 7634, 7634, 8075, 8075, 8509, 8509, 8934, 8934, 9351, 9351,
+ 9759, 9759, 10159, 10159, 10548, 10548, 10928, 10928, 11297, 11297,
+ 11656, 11656, 12003, 12003, 12340, 12340, 12665, 12665, 12977, 12977,
+ 13278, 13278, 13566, 13566, 13842, 13842, 14104, 14104, 14353, 14353,
+ 14589, 14589, 14810, 14810, 15018, 15018, 15212, 15212, 15392, 15392,
+ 15557, 15557, 15707, 15707, 15842, 15842, 15963, 15963, 16069, 16069,
+ 16159, 16159, 16234, 16234, 16294, 16294, 16339, 16339, 16368, 16368,
+ 16382, 16382, 16381, 16381, 16364, 16364, 16331, 16331, 16284, 16284,
+ 16221, 16221, 16142, 16142, 16049, 16049, 15940, 15940, 15817, 15817,
+ 15678, 15678, 15525, 15525, 15357, 15357, 15175, 15175, 14978, 14978,
+ 14767, 14767, 14543, 14543, 14304, 14304, 14053, 14053, 13788, 13788,
+ 13510, 13510, 13219, 13219, 12916, 12916, 12600, 12600, 12273, 12273,
+ 11935, 11935, 11585, 11585, 11224, 11224, 10853, 10853, 10471, 10471,
+ 10079, 10079, 9679, 9679, 9268, 9268, 8850, 8850, 8423, 8423,
+ 7988, 7988, 7545, 7545, 7095, 7095, 6639, 6639, 6176, 6176,
+ 5708, 5708, 5234, 5234, 4756, 4756, 4272, 4272, 3785, 3785,
+ 3294, 3294, 2801, 2801, 2304, 2304, 1805, 1805, 1305, 1305,
+ 803, 803, 301, 301, -201, -201, -703, -703, -1205, -1205,
+ -1705, -1705, -2204, -2204, -2701, -2701, -3196, -3196, -3687, -3687,
+ -4175, -4175, -4659, -4659, -5139, -5139, -5614, -5614, -6083, -6083,
+ -6547, -6547, -7005, -7005, -7456, -7456, -7900, -7900, -8336, -8336,
+ -8765, -8765, -9185, -9185, -9597, -9597, -10000, -10000, -10393, -10393,
+-10777, -10777, -11150, -11150, -11513, -11513, -11866, -11866, -12207, -12207,
+-12536, -12536, -12854, -12854, -13159, -13159, -13452, -13452, -13733, -13733,
+-14001, -14001, -14255, -14255, -14496, -14496, -14723, -14723, -14937, -14937,
+-15136, -15136, -15322, -15322, -15492, -15492, -15649, -15649, -15790, -15790,
+-15917, -15917, -16028, -16028, -16125, -16125, -16206, -16206, -16272, -16272,
+-16323, -16323, -16359, -16359, -16379, -16379, -16383, -16383, -16372, -16372,
+-16346, -16346, -16305, -16305, -16248, -16248, -16175, -16175, -16088, -16088,
+-15985, -15985, -15868, -15868, -15735, -15735, -15588, -15588, -15426, -15426,
+-15249, -15249, -15058, -15058, -14853, -14853, -14634, -14634, -14401, -14401,
+-14155, -14155, -13895, -13895, -13622, -13622, -13337, -13337, -13038, -13038,
+-12728, -12728, -12406, -12406, -12072, -12072, -11726, -11726, -11370, -11370,
+-11002, -11002, -10625, -10625, -10237, -10237, -9840, -9840, -9434, -9434,
+ -9018, -9018, -8594, -8594, -8162, -8162, -7723, -7723, -7276, -7276,
+ -6822, -6822, -6362, -6362, -5896, -5896, -5424, -5424, -4948, -4948,
+ -4466, -4466, -3980, -3980, -3491, -3491, -2998, -2998, -2503, -2503,
+ -2005, -2005, -1505, -1505, -1004, -1004, -502, -502
+};
+/*****************************************************************************/
+void
+easysnd_testtone(struct easycap *peasycap, int audio_fill)
+{
+int i1;
+unsigned char *p2;
+struct data_buffer *paudio_buffer;
+
+JOT(8, "%i=audio_fill\n", audio_fill);
+
+paudio_buffer = &peasycap->audio_buffer[audio_fill];
+
+p2 = (unsigned char *)(paudio_buffer->pgo);
+for (i1 = 0; i1 < PAGE_SIZE; i1 += 4, p2 += 4) {
+ *p2 = (unsigned char) (0x00FF & tones[i1/2]);
+ *(p2 + 1) = (unsigned char)((0xFF00 & tones[i1/2]) >> 8);
+ *(p2 + 2) = (unsigned char) (0x00FF & tones[i1/2 + 1]);
+ *(p2 + 3) = (unsigned char)((0xFF00 & tones[i1/2 + 1]) >> 8);
+ }
+return;
+}
+#endif /*EASYCAP_TESTTONE*/
+/*****************************************************************************/
diff --git a/drivers/staging/et131x/et1310_phy.c b/drivers/staging/et131x/et1310_phy.c
index a6d9f29ff49..21c5eeec62d 100644
--- a/drivers/staging/et131x/et1310_phy.c
+++ b/drivers/staging/et131x/et1310_phy.c
@@ -760,7 +760,8 @@ void et131x_Mii_check(struct et131x_adapter *etdev,
if (etdev->linkspeed == TRUEPHY_SPEED_10MBPS) {
/* NOTE - Is there a way to query this without
* TruePHY?
- * && TRU_QueryCoreType(etdev->hTruePhy, 0) == EMI_TRUEPHY_A13O) {
+ * && TRU_QueryCoreType(etdev->hTruePhy, 0) ==
+ * EMI_TRUEPHY_A13O) {
*/
u16 Register18;
@@ -778,7 +779,7 @@ void et131x_Mii_check(struct et131x_adapter *etdev,
* in the LinkDetectionDPC).
*/
if (!(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) ||
- (etdev->MediaState == NETIF_STATUS_MEDIA_DISCONNECT)) {
+ (etdev->MediaState == NETIF_STATUS_MEDIA_DISCONNECT)) {
spin_lock_irqsave(&etdev->Lock, flags);
etdev->MediaState =
NETIF_STATUS_MEDIA_DISCONNECT;
@@ -836,7 +837,8 @@ void et131x_Mii_check(struct et131x_adapter *etdev,
/*
* NOTE - Is there a way to query this without
* TruePHY?
- * && TRU_QueryCoreType(etdev->hTruePhy, 0)== EMI_TRUEPHY_A13O) {
+ * && TRU_QueryCoreType(etdev->hTruePhy, 0)==
+ * EMI_TRUEPHY_A13O) {
*/
u16 Register18;
diff --git a/drivers/staging/hv/Kconfig b/drivers/staging/hv/Kconfig
index 97480f5c659..7455c804962 100644
--- a/drivers/staging/hv/Kconfig
+++ b/drivers/staging/hv/Kconfig
@@ -17,7 +17,7 @@ config HYPERV_STORAGE
config HYPERV_BLOCK
tristate "Microsoft Hyper-V virtual block driver"
- depends on BLOCK && SCSI && LBDAF
+ depends on BLOCK && SCSI && (LBDAF || 64BIT)
default HYPERV
help
Select this option to enable the Hyper-V virtual block driver.
diff --git a/drivers/staging/hv/Makefile b/drivers/staging/hv/Makefile
index 1866f80a45d..b63515c20f5 100644
--- a/drivers/staging/hv/Makefile
+++ b/drivers/staging/hv/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_HYPERV) += hv_vmbus.o
+obj-$(CONFIG_HYPERV) += hv_vmbus.o hv_timesource.o
obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
obj-$(CONFIG_HYPERV_BLOCK) += hv_blkvsc.o
obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
diff --git a/drivers/staging/hv/blkvsc.c b/drivers/staging/hv/blkvsc.c
index 0daebc472e6..929238a6ce8 100644
--- a/drivers/staging/hv/blkvsc.c
+++ b/drivers/staging/hv/blkvsc.c
@@ -40,15 +40,11 @@ static int BlkVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
struct storvsc_device_info *deviceInfo;
int ret = 0;
- DPRINT_ENTER(BLKVSC);
-
deviceInfo = (struct storvsc_device_info *)AdditionalInfo;
ret = StorVscOnDeviceAdd(Device, AdditionalInfo);
- if (ret != 0) {
- DPRINT_EXIT(BLKVSC);
+ if (ret != 0)
return ret;
- }
/*
* We need to use the device instance guid to set the path and target
@@ -63,8 +59,6 @@ static int BlkVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
deviceInfo->TargetId = Device->deviceInstance.data[5] << 8 |
Device->deviceInstance.data[4];
- DPRINT_EXIT(BLKVSC);
-
return ret;
}
@@ -73,8 +67,6 @@ int BlkVscInitialize(struct hv_driver *Driver)
struct storvsc_driver_object *storDriver;
int ret = 0;
- DPRINT_ENTER(BLKVSC);
-
storDriver = (struct storvsc_driver_object *)Driver;
/* Make sure we are at least 2 pages since 1 page is used for control */
@@ -106,7 +98,5 @@ int BlkVscInitialize(struct hv_driver *Driver)
storDriver->Base.OnCleanup = StorVscOnCleanup;
storDriver->OnIORequest = StorVscOnIORequest;
- DPRINT_EXIT(BLKVSC);
-
return ret;
}
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index 61bd0be5fb1..ff1d24720f1 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -25,6 +25,7 @@
#include <linux/major.h>
#include <linux/delay.h>
#include <linux/hdreg.h>
+#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -175,8 +176,6 @@ static int blkvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
struct driver_context *drv_ctx = &g_blkvsc_drv.drv_ctx;
int ret;
- DPRINT_ENTER(BLKVSC_DRV);
-
vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface);
storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size;
@@ -195,8 +194,6 @@ static int blkvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
/* The driver belongs to vmbus */
ret = vmbus_child_driver_register(drv_ctx);
- DPRINT_EXIT(BLKVSC_DRV);
-
return ret;
}
@@ -214,8 +211,6 @@ static void blkvsc_drv_exit(void)
struct device *current_dev;
int ret;
- DPRINT_ENTER(BLKVSC_DRV);
-
while (1) {
current_dev = NULL;
@@ -241,8 +236,6 @@ static void blkvsc_drv_exit(void)
vmbus_child_driver_unregister(drv_ctx);
- DPRINT_EXIT(BLKVSC_DRV);
-
return;
}
@@ -268,8 +261,6 @@ static int blkvsc_probe(struct device *device)
static int ide0_registered;
static int ide1_registered;
- DPRINT_ENTER(BLKVSC_DRV);
-
DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter");
if (!storvsc_drv_obj->Base.OnDeviceAdd) {
@@ -413,8 +404,6 @@ Cleanup:
blkdev = NULL;
}
- DPRINT_EXIT(BLKVSC_DRV);
-
return ret;
}
@@ -751,14 +740,10 @@ static int blkvsc_remove(struct device *device)
unsigned long flags;
int ret;
- DPRINT_ENTER(BLKVSC_DRV);
-
DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n");
- if (!storvsc_drv_obj->Base.OnDeviceRemove) {
- DPRINT_EXIT(BLKVSC_DRV);
+ if (!storvsc_drv_obj->Base.OnDeviceRemove)
return -1;
- }
/*
* Call to the vsc driver to let it know that the device is being
@@ -802,8 +787,6 @@ static int blkvsc_remove(struct device *device)
kfree(blkdev);
- DPRINT_EXIT(BLKVSC_DRV);
-
return ret;
}
@@ -823,7 +806,8 @@ static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
blkvsc_req->cmnd[0] = READ_16;
}
- blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
+ blkvsc_req->cmnd[1] |=
+ (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0;
*(unsigned long long *)&blkvsc_req->cmnd[2] =
cpu_to_be64(blkvsc_req->sector_start);
@@ -839,7 +823,8 @@ static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
blkvsc_req->cmnd[0] = READ_10;
}
- blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
+ blkvsc_req->cmnd[1] |=
+ (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0;
*(unsigned int *)&blkvsc_req->cmnd[2] =
cpu_to_be32(blkvsc_req->sector_start);
@@ -1286,7 +1271,7 @@ static void blkvsc_request(struct request_queue *queue)
DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req);
blkdev = req->rq_disk->private_data;
- if (blkdev->shutting_down || !blk_fs_request(req) ||
+ if (blkdev->shutting_down || req->cmd_type != REQ_TYPE_FS ||
blkdev->media_not_present) {
__blk_end_request_cur(req, 0);
continue;
@@ -1324,6 +1309,7 @@ static int blkvsc_open(struct block_device *bdev, fmode_t mode)
DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users,
blkdev->gd->disk_name);
+ lock_kernel();
spin_lock(&blkdev->lock);
if (!blkdev->users && blkdev->device_type == DVD_TYPE) {
@@ -1335,6 +1321,7 @@ static int blkvsc_open(struct block_device *bdev, fmode_t mode)
blkdev->users++;
spin_unlock(&blkdev->lock);
+ unlock_kernel();
return 0;
}
@@ -1345,6 +1332,7 @@ static int blkvsc_release(struct gendisk *disk, fmode_t mode)
DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users,
blkdev->gd->disk_name);
+ lock_kernel();
spin_lock(&blkdev->lock);
if (blkdev->users == 1) {
spin_unlock(&blkdev->lock);
@@ -1355,6 +1343,7 @@ static int blkvsc_release(struct gendisk *disk, fmode_t mode)
blkdev->users--;
spin_unlock(&blkdev->lock);
+ unlock_kernel();
return 0;
}
@@ -1492,22 +1481,16 @@ static int __init blkvsc_init(void)
BUILD_BUG_ON(sizeof(sector_t) != 8);
- DPRINT_ENTER(BLKVSC_DRV);
-
DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing....");
ret = blkvsc_drv_init(BlkVscInitialize);
- DPRINT_EXIT(BLKVSC_DRV);
-
return ret;
}
static void __exit blkvsc_exit(void)
{
- DPRINT_ENTER(BLKVSC_DRV);
blkvsc_drv_exit();
- DPRINT_ENTER(BLKVSC_DRV);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c
index f047c5a7f64..fece30c303a 100644
--- a/drivers/staging/hv/channel.c
+++ b/drivers/staging/hv/channel.c
@@ -74,8 +74,6 @@ static void VmbusChannelSetEvent(struct vmbus_channel *Channel)
{
struct hv_monitor_page *monitorPage;
- DPRINT_ENTER(VMBUS);
-
if (Channel->OfferMsg.MonitorAllocated) {
/* Each u32 represents 32 channels */
set_bit(Channel->OfferMsg.ChildRelId & 31,
@@ -92,8 +90,6 @@ static void VmbusChannelSetEvent(struct vmbus_channel *Channel)
} else {
VmbusSetEvent(Channel->OfferMsg.ChildRelId);
}
-
- DPRINT_EXIT(VMBUS);
}
#if 0
@@ -101,8 +97,6 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorPage;
- DPRINT_ENTER(VMBUS);
-
if (Channel->OfferMsg.MonitorAllocated) {
/* Each u32 represents 32 channels */
clear_bit(Channel->OfferMsg.ChildRelId & 31,
@@ -117,8 +111,6 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel)
(unsigned long *)&monitorPage->TriggerGroup
[Channel->MonitorGroup].Pending);
}
-
- DPRINT_EXIT(VMBUS);
}
#endif
@@ -180,8 +172,6 @@ int VmbusChannelOpen(struct vmbus_channel *NewChannel, u32 SendRingBufferSize,
unsigned long flags;
int ret, err = 0;
- DPRINT_ENTER(VMBUS);
-
/* Aligned to page size */
/* ASSERT(!(SendRingBufferSize & (PAGE_SIZE - 1))); */
/* ASSERT(!(RecvRingBufferSize & (PAGE_SIZE - 1))); */
@@ -305,9 +295,6 @@ Cleanup:
kfree(openInfo->WaitEvent);
kfree(openInfo);
-
- DPRINT_EXIT(VMBUS);
-
return 0;
errorout:
@@ -465,6 +452,8 @@ static int VmbusChannelCreateGpadlHeader(void *Kbuffer, u32 Size,
sizeof(struct vmbus_channel_gpadl_header) +
sizeof(struct gpa_range) + pageCount * sizeof(u64);
msgHeader = kzalloc(msgSize, GFP_KERNEL);
+ if (msgHeader == NULL)
+ goto nomem;
msgHeader->MessageSize = msgSize;
gpaHeader = (struct vmbus_channel_gpadl_header *)msgHeader->Msg;
@@ -509,8 +498,6 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
unsigned long flags;
int ret = 0;
- DPRINT_ENTER(VMBUS);
-
nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
atomic_inc(&gVmbusConnection.NextGpadlHandle);
@@ -592,9 +579,6 @@ Cleanup:
kfree(msgInfo->WaitEvent);
kfree(msgInfo);
-
- DPRINT_EXIT(VMBUS);
-
return ret;
}
@@ -608,8 +592,6 @@ int VmbusChannelTeardownGpadl(struct vmbus_channel *Channel, u32 GpadlHandle)
unsigned long flags;
int ret;
- DPRINT_ENTER(VMBUS);
-
/* ASSERT(GpadlHandle != 0); */
info = kmalloc(sizeof(*info) +
@@ -650,9 +632,6 @@ int VmbusChannelTeardownGpadl(struct vmbus_channel *Channel, u32 GpadlHandle)
kfree(info->WaitEvent);
kfree(info);
-
- DPRINT_EXIT(VMBUS);
-
return ret;
}
@@ -666,8 +645,6 @@ void VmbusChannelClose(struct vmbus_channel *Channel)
unsigned long flags;
int ret;
- DPRINT_ENTER(VMBUS);
-
/* Stop callback and cancel the timer asap */
Channel->OnChannelCallback = NULL;
del_timer_sync(&Channel->poll_timer);
@@ -720,8 +697,6 @@ void VmbusChannelClose(struct vmbus_channel *Channel)
FreeVmbusChannel(Channel);
}
-
- DPRINT_EXIT(VMBUS);
}
/**
@@ -749,7 +724,6 @@ int VmbusChannelSendPacket(struct vmbus_channel *Channel, const void *Buffer,
u64 alignedData = 0;
int ret;
- DPRINT_ENTER(VMBUS);
DPRINT_DBG(VMBUS, "channel %p buffer %p len %d",
Channel, Buffer, BufferLen);
@@ -776,8 +750,6 @@ int VmbusChannelSendPacket(struct vmbus_channel *Channel, const void *Buffer,
if (ret == 0 && !GetRingBufferInterruptMask(&Channel->Outbound))
VmbusChannelSetEvent(Channel);
- DPRINT_EXIT(VMBUS);
-
return ret;
}
EXPORT_SYMBOL(VmbusChannelSendPacket);
@@ -800,8 +772,6 @@ int VmbusChannelSendPacketPageBuffer(struct vmbus_channel *Channel,
struct scatterlist bufferList[3];
u64 alignedData = 0;
- DPRINT_ENTER(VMBUS);
-
if (PageCount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;
@@ -844,8 +814,6 @@ int VmbusChannelSendPacketPageBuffer(struct vmbus_channel *Channel,
if (ret == 0 && !GetRingBufferInterruptMask(&Channel->Outbound))
VmbusChannelSetEvent(Channel);
- DPRINT_EXIT(VMBUS);
-
return ret;
}
@@ -867,8 +835,6 @@ int VmbusChannelSendPacketMultiPageBuffer(struct vmbus_channel *Channel,
u32 PfnCount = NUM_PAGES_SPANNED(MultiPageBuffer->Offset,
MultiPageBuffer->Length);
- DPRINT_ENTER(VMBUS);
-
DumpVmbusChannel(Channel);
DPRINT_DBG(VMBUS, "data buffer - offset %u len %u pfn count %u",
@@ -914,8 +880,6 @@ int VmbusChannelSendPacketMultiPageBuffer(struct vmbus_channel *Channel,
if (ret == 0 && !GetRingBufferInterruptMask(&Channel->Outbound))
VmbusChannelSetEvent(Channel);
- DPRINT_EXIT(VMBUS);
-
return ret;
}
@@ -942,8 +906,6 @@ int VmbusChannelRecvPacket(struct vmbus_channel *Channel, void *Buffer,
int ret;
unsigned long flags;
- DPRINT_ENTER(VMBUS);
-
*BufferActualLen = 0;
*RequestId = 0;
@@ -955,7 +917,6 @@ int VmbusChannelRecvPacket(struct vmbus_channel *Channel, void *Buffer,
spin_unlock_irqrestore(&Channel->inbound_lock, flags);
/* DPRINT_DBG(VMBUS, "nothing to read!!"); */
- DPRINT_EXIT(VMBUS);
return 0;
}
@@ -977,8 +938,6 @@ int VmbusChannelRecvPacket(struct vmbus_channel *Channel, void *Buffer,
DPRINT_ERR(VMBUS, "buffer too small - got %d needs %d",
BufferLen, userLen);
- DPRINT_EXIT(VMBUS);
-
return -1;
}
@@ -990,8 +949,6 @@ int VmbusChannelRecvPacket(struct vmbus_channel *Channel, void *Buffer,
spin_unlock_irqrestore(&Channel->inbound_lock, flags);
- DPRINT_EXIT(VMBUS);
-
return 0;
}
EXPORT_SYMBOL(VmbusChannelRecvPacket);
@@ -1009,8 +966,6 @@ int VmbusChannelRecvPacketRaw(struct vmbus_channel *Channel, void *Buffer,
int ret;
unsigned long flags;
- DPRINT_ENTER(VMBUS);
-
*BufferActualLen = 0;
*RequestId = 0;
@@ -1022,7 +977,6 @@ int VmbusChannelRecvPacketRaw(struct vmbus_channel *Channel, void *Buffer,
spin_unlock_irqrestore(&Channel->inbound_lock, flags);
/* DPRINT_DBG(VMBUS, "nothing to read!!"); */
- DPRINT_EXIT(VMBUS);
return 0;
}
@@ -1043,7 +997,6 @@ int VmbusChannelRecvPacketRaw(struct vmbus_channel *Channel, void *Buffer,
DPRINT_ERR(VMBUS, "buffer too small - needed %d bytes but "
"got space for only %d bytes", packetLen, BufferLen);
- DPRINT_EXIT(VMBUS);
return -2;
}
@@ -1053,9 +1006,6 @@ int VmbusChannelRecvPacketRaw(struct vmbus_channel *Channel, void *Buffer,
ret = RingBufferRead(&Channel->Inbound, Buffer, packetLen, 0);
spin_unlock_irqrestore(&Channel->inbound_lock, flags);
-
- DPRINT_EXIT(VMBUS);
-
return 0;
}
diff --git a/drivers/staging/hv/channel_mgmt.c b/drivers/staging/hv/channel_mgmt.c
index 12db555a3a5..6ccf505e802 100644
--- a/drivers/staging/hv/channel_mgmt.c
+++ b/drivers/staging/hv/channel_mgmt.c
@@ -267,15 +267,11 @@ static inline void ReleaseVmbusChannel(void *context)
{
struct vmbus_channel *channel = context;
- DPRINT_ENTER(VMBUS);
-
DPRINT_DBG(VMBUS, "releasing channel (%p)", channel);
destroy_workqueue(channel->ControlWQ);
DPRINT_DBG(VMBUS, "channel released (%p)", channel);
kfree(channel);
-
- DPRINT_EXIT(VMBUS);
}
/*
@@ -326,8 +322,6 @@ static void VmbusChannelProcessOffer(void *context)
int cnt;
unsigned long flags;
- DPRINT_ENTER(VMBUS);
-
/* Make sure this is a new offer */
spin_lock_irqsave(&gVmbusConnection.channel_lock, flags);
@@ -353,7 +347,6 @@ static void VmbusChannelProcessOffer(void *context)
DPRINT_DBG(VMBUS, "Ignoring duplicate offer for relid (%d)",
newChannel->OfferMsg.ChildRelId);
FreeVmbusChannel(newChannel);
- DPRINT_EXIT(VMBUS);
return;
}
@@ -410,7 +403,6 @@ static void VmbusChannelProcessOffer(void *context)
}
}
}
- DPRINT_EXIT(VMBUS);
}
/*
@@ -420,9 +412,7 @@ static void VmbusChannelProcessRescindOffer(void *context)
{
struct vmbus_channel *channel = context;
- DPRINT_ENTER(VMBUS);
VmbusChildDeviceRemove(channel->DeviceObject);
- DPRINT_EXIT(VMBUS);
}
/*
@@ -441,8 +431,6 @@ static void VmbusChannelOnOffer(struct vmbus_channel_message_header *hdr)
int i;
int fSupported = 0;
- DPRINT_ENTER(VMBUS);
-
offer = (struct vmbus_channel_offer_channel *)hdr;
for (i = 0; i < MAX_NUM_DEVICE_CLASSES_SUPPORTED; i++) {
if (memcmp(&offer->Offer.InterfaceType,
@@ -455,7 +443,6 @@ static void VmbusChannelOnOffer(struct vmbus_channel_message_header *hdr)
if (!fSupported) {
DPRINT_DBG(VMBUS, "Ignoring channel offer notification for "
"child relid %d", offer->ChildRelId);
- DPRINT_EXIT(VMBUS);
return;
}
@@ -504,8 +491,6 @@ static void VmbusChannelOnOffer(struct vmbus_channel_message_header *hdr)
/* TODO: Make sure the offer comes from our parent partition */
osd_schedule_callback(newChannel->ControlWQ, VmbusChannelProcessOffer,
newChannel);
-
- DPRINT_EXIT(VMBUS);
}
/*
@@ -518,8 +503,6 @@ static void VmbusChannelOnOfferRescind(struct vmbus_channel_message_header *hdr)
struct vmbus_channel_rescind_offer *rescind;
struct vmbus_channel *channel;
- DPRINT_ENTER(VMBUS);
-
rescind = (struct vmbus_channel_rescind_offer *)hdr;
channel = GetChannelFromRelId(rescind->ChildRelId);
if (channel == NULL) {
@@ -531,8 +514,6 @@ static void VmbusChannelOnOfferRescind(struct vmbus_channel_message_header *hdr)
osd_schedule_callback(channel->ControlWQ,
VmbusChannelProcessRescindOffer,
channel);
-
- DPRINT_EXIT(VMBUS);
}
/*
@@ -543,8 +524,6 @@ static void VmbusChannelOnOfferRescind(struct vmbus_channel_message_header *hdr)
static void VmbusChannelOnOffersDelivered(
struct vmbus_channel_message_header *hdr)
{
- DPRINT_ENTER(VMBUS);
- DPRINT_EXIT(VMBUS);
}
/*
@@ -563,8 +542,6 @@ static void VmbusChannelOnOpenResult(struct vmbus_channel_message_header *hdr)
struct vmbus_channel_open_channel *openMsg;
unsigned long flags;
- DPRINT_ENTER(VMBUS);
-
result = (struct vmbus_channel_open_result *)hdr;
DPRINT_DBG(VMBUS, "vmbus open result - %d", result->Status);
@@ -591,8 +568,6 @@ static void VmbusChannelOnOpenResult(struct vmbus_channel_message_header *hdr)
}
}
spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
-
- DPRINT_EXIT(VMBUS);
}
/*
@@ -611,8 +586,6 @@ static void VmbusChannelOnGpadlCreated(struct vmbus_channel_message_header *hdr)
struct vmbus_channel_gpadl_header *gpadlHeader;
unsigned long flags;
- DPRINT_ENTER(VMBUS);
-
gpadlCreated = (struct vmbus_channel_gpadl_created *)hdr;
DPRINT_DBG(VMBUS, "vmbus gpadl created result - %d",
gpadlCreated->CreationStatus);
@@ -643,8 +616,6 @@ static void VmbusChannelOnGpadlCreated(struct vmbus_channel_message_header *hdr)
}
}
spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
-
- DPRINT_EXIT(VMBUS);
}
/*
@@ -664,8 +635,6 @@ static void VmbusChannelOnGpadlTorndown(
struct vmbus_channel_gpadl_teardown *gpadlTeardown;
unsigned long flags;
- DPRINT_ENTER(VMBUS);
-
gpadlTorndown = (struct vmbus_channel_gpadl_torndown *)hdr;
/*
@@ -691,8 +660,6 @@ static void VmbusChannelOnGpadlTorndown(
}
}
spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
-
- DPRINT_EXIT(VMBUS);
}
/*
@@ -712,8 +679,6 @@ static void VmbusChannelOnVersionResponse(
struct vmbus_channel_version_response *versionResponse;
unsigned long flags;
- DPRINT_ENTER(VMBUS);
-
versionResponse = (struct vmbus_channel_version_response *)hdr;
spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
@@ -732,8 +697,6 @@ static void VmbusChannelOnVersionResponse(
}
}
spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
-
- DPRINT_EXIT(VMBUS);
}
/* Channel message dispatch table */
@@ -769,8 +732,6 @@ void VmbusOnChannelMessage(void *Context)
struct vmbus_channel_message_header *hdr;
int size;
- DPRINT_ENTER(VMBUS);
-
hdr = (struct vmbus_channel_message_header *)msg->u.Payload;
size = msg->Header.PayloadSize;
@@ -794,7 +755,6 @@ void VmbusOnChannelMessage(void *Context)
/* Free the msg that was allocated in VmbusOnMsgDPC() */
kfree(msg);
- DPRINT_EXIT(VMBUS);
}
/*
@@ -806,8 +766,6 @@ int VmbusChannelRequestOffers(void)
struct vmbus_channel_msginfo *msgInfo;
int ret;
- DPRINT_ENTER(VMBUS);
-
msgInfo = kmalloc(sizeof(*msgInfo) +
sizeof(struct vmbus_channel_message_header),
GFP_KERNEL);
@@ -853,7 +811,6 @@ Cleanup:
kfree(msgInfo);
}
- DPRINT_EXIT(VMBUS);
return ret;
}
diff --git a/drivers/staging/hv/channel_mgmt.h b/drivers/staging/hv/channel_mgmt.h
index 5908b81d3e9..f969267895b 100644
--- a/drivers/staging/hv/channel_mgmt.h
+++ b/drivers/staging/hv/channel_mgmt.h
@@ -247,8 +247,8 @@ struct vmbus_channel {
/* Allocated memory for ring buffer */
void *RingBufferPages;
u32 RingBufferPageCount;
- RING_BUFFER_INFO Outbound; /* send to parent */
- RING_BUFFER_INFO Inbound; /* receive from parent */
+ struct hv_ring_buffer_info Outbound; /* send to parent */
+ struct hv_ring_buffer_info Inbound; /* receive from parent */
spinlock_t inbound_lock;
struct workqueue_struct *ControlWQ;
@@ -272,8 +272,8 @@ struct vmbus_channel_debug_info {
u32 ClientMonitorLatency;
u32 ClientMonitorConnectionId;
- RING_BUFFER_DEBUG_INFO Inbound;
- RING_BUFFER_DEBUG_INFO Outbound;
+ struct hv_ring_buffer_debug_info Inbound;
+ struct hv_ring_buffer_debug_info Outbound;
};
/*
diff --git a/drivers/staging/hv/connection.c b/drivers/staging/hv/connection.c
index e8824dadffc..1f4d6683aaa 100644
--- a/drivers/staging/hv/connection.c
+++ b/drivers/staging/hv/connection.c
@@ -44,8 +44,6 @@ int VmbusConnect(void)
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
- DPRINT_ENTER(VMBUS);
-
/* Make sure we are not connecting or connected */
if (gVmbusConnection.ConnectState != Disconnected)
return -1;
@@ -155,8 +153,6 @@ int VmbusConnect(void)
kfree(msgInfo->WaitEvent);
kfree(msgInfo);
- DPRINT_EXIT(VMBUS);
-
return 0;
Cleanup:
@@ -180,8 +176,6 @@ Cleanup:
kfree(msgInfo);
}
- DPRINT_EXIT(VMBUS);
-
return ret;
}
@@ -193,8 +187,6 @@ int VmbusDisconnect(void)
int ret = 0;
struct vmbus_channel_message_header *msg;
- DPRINT_ENTER(VMBUS);
-
/* Make sure we are connected */
if (gVmbusConnection.ConnectState != Connected)
return -1;
@@ -221,7 +213,6 @@ int VmbusDisconnect(void)
Cleanup:
kfree(msg);
- DPRINT_EXIT(VMBUS);
return ret;
}
@@ -285,8 +276,6 @@ void VmbusOnEvents(void)
int relid;
u32 *recvInterruptPage = gVmbusConnection.RecvInterruptPage;
- DPRINT_ENTER(VMBUS);
-
/* Check events */
if (recvInterruptPage) {
for (dword = 0; dword < maxdword; dword++) {
@@ -310,8 +299,6 @@ void VmbusOnEvents(void)
}
}
}
- DPRINT_EXIT(VMBUS);
-
return;
}
@@ -332,18 +319,10 @@ int VmbusPostMessage(void *buffer, size_t bufferLen)
*/
int VmbusSetEvent(u32 childRelId)
{
- int ret = 0;
-
- DPRINT_ENTER(VMBUS);
-
/* Each u32 represents 32 channels */
set_bit(childRelId & 31,
(unsigned long *)gVmbusConnection.SendInterruptPage +
(childRelId >> 5));
- ret = HvSignalEvent();
-
- DPRINT_EXIT(VMBUS);
-
- return ret;
+ return HvSignalEvent();
}
diff --git a/drivers/staging/hv/hv.c b/drivers/staging/hv/hv.c
index 6c77e64027f..86b1ddd9040 100644
--- a/drivers/staging/hv/hv.c
+++ b/drivers/staging/hv/hv.c
@@ -192,8 +192,6 @@ int HvInit(void)
union hv_x64_msr_hypercall_contents hypercallMsr;
void *virtAddr = NULL;
- DPRINT_ENTER(VMBUS);
-
memset(gHvContext.synICEventPage, 0, sizeof(void *) * MAX_NUM_CPUS);
memset(gHvContext.synICMessagePage, 0, sizeof(void *) * MAX_NUM_CPUS);
@@ -275,8 +273,6 @@ int HvInit(void)
gHvContext.SignalEventParam->FlagNumber = 0;
gHvContext.SignalEventParam->RsvdZ = 0;
- DPRINT_EXIT(VMBUS);
-
return ret;
Cleanup:
@@ -289,8 +285,6 @@ Cleanup:
vfree(virtAddr);
}
ret = -1;
- DPRINT_EXIT(VMBUS);
-
return ret;
}
@@ -303,8 +297,6 @@ void HvCleanup(void)
{
union hv_x64_msr_hypercall_contents hypercallMsr;
- DPRINT_ENTER(VMBUS);
-
kfree(gHvContext.SignalEventBuffer);
gHvContext.SignalEventBuffer = NULL;
gHvContext.SignalEventParam = NULL;
@@ -315,8 +307,6 @@ void HvCleanup(void)
vfree(gHvContext.HypercallPage);
gHvContext.HypercallPage = NULL;
}
-
- DPRINT_EXIT(VMBUS);
}
/*
@@ -392,12 +382,8 @@ void HvSynicInit(void *irqarg)
u32 irqVector = *((u32 *)(irqarg));
int cpu = smp_processor_id();
- DPRINT_ENTER(VMBUS);
-
- if (!gHvContext.HypercallPage) {
- DPRINT_EXIT(VMBUS);
+ if (!gHvContext.HypercallPage)
return;
- }
/* Check the version */
rdmsrl(HV_X64_MSR_SVERSION, version);
@@ -464,9 +450,6 @@ void HvSynicInit(void *irqarg)
wrmsrl(HV_X64_MSR_SCONTROL, sctrl.AsUINT64);
gHvContext.SynICInitialized = true;
-
- DPRINT_EXIT(VMBUS);
-
return;
Cleanup:
@@ -475,8 +458,6 @@ Cleanup:
if (gHvContext.synICMessagePage[cpu])
osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
-
- DPRINT_EXIT(VMBUS);
return;
}
@@ -490,12 +471,8 @@ void HvSynicCleanup(void *arg)
union hv_synic_siefp siefp;
int cpu = smp_processor_id();
- DPRINT_ENTER(VMBUS);
-
- if (!gHvContext.SynICInitialized) {
- DPRINT_EXIT(VMBUS);
+ if (!gHvContext.SynICInitialized)
return;
- }
rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
@@ -519,6 +496,4 @@ void HvSynicCleanup(void *arg)
osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
osd_PageFree(gHvContext.synICEventPage[cpu], 1);
-
- DPRINT_EXIT(VMBUS);
}
diff --git a/drivers/staging/hv/hv_timesource.c b/drivers/staging/hv/hv_timesource.c
new file mode 100644
index 00000000000..a7ee533303b
--- /dev/null
+++ b/drivers/staging/hv/hv_timesource.c
@@ -0,0 +1,101 @@
+/*
+ * A clocksource for Linux running on HyperV.
+ *
+ *
+ * Copyright (C) 2010, Novell, Inc.
+ * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dmi.h>
+#include <asm/hyperv.h>
+#include <asm/mshyperv.h>
+#include <asm/hypervisor.h>
+
+#define HV_CLOCK_SHIFT 22
+
+static cycle_t read_hv_clock(struct clocksource *arg)
+{
+ cycle_t current_tick;
+ /*
+ * Read the partition counter to get the current tick count. This count
+ * is set to 0 when the partition is created and is incremented in
+ * 100 nanosecond units.
+ */
+ rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
+ return current_tick;
+}
+
+static struct clocksource hyperv_cs = {
+ .name = "hyperv_clocksource",
+ .rating = 400, /* use this when running on Hyperv*/
+ .read = read_hv_clock,
+ .mask = CLOCKSOURCE_MASK(64),
+ /*
+ * The time ref counter in HyperV is in 100ns units.
+ * The definition of mult is:
+ * mult/2^shift = ns/cyc = 100
+ * mult = (100 << shift)
+ */
+ .mult = (100 << HV_CLOCK_SHIFT),
+ .shift = HV_CLOCK_SHIFT,
+};
+
+static const struct dmi_system_id __initconst
+hv_timesource_dmi_table[] __maybe_unused = {
+ {
+ .ident = "Hyper-V",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
+ },
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(dmi, hv_timesource_dmi_table);
+
+static const struct pci_device_id __initconst
+hv_timesource_pci_table[] __maybe_unused = {
+ { PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, hv_timesource_pci_table);
+
+
+static int __init init_hv_clocksource(void)
+{
+ if ((x86_hyper != &x86_hyper_ms_hyperv) ||
+ !(ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE))
+ return -ENODEV;
+
+ if (!dmi_check_system(hv_timesource_dmi_table))
+ return -ENODEV;
+
+ printk(KERN_INFO "Registering HyperV clock source\n");
+ return clocksource_register(&hyperv_cs);
+}
+
+module_init(init_hv_clocksource);
+MODULE_DESCRIPTION("HyperV based clocksource");
+MODULE_AUTHOR("K. Y. Srinivasan <ksrinivasan@novell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/hv/hv_utils.c b/drivers/staging/hv/hv_utils.c
index 2adc9b48ca9..6eb79febef9 100644
--- a/drivers/staging/hv/hv_utils.c
+++ b/drivers/staging/hv/hv_utils.c
@@ -52,8 +52,6 @@ static void shutdown_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct icmsg_negotiate *negop = NULL;
- DPRINT_ENTER(VMBUS);
-
buflen = PAGE_SIZE;
buf = kmalloc(buflen, GFP_ATOMIC);
@@ -102,8 +100,6 @@ static void shutdown_onchannelcallback(void *context)
kfree(buf);
- DPRINT_EXIT(VMBUS);
-
if (execute_shutdown == true)
orderly_poweroff(false);
}
@@ -160,8 +156,6 @@ static void timesync_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct ictimesync_data *timedatap;
- DPRINT_ENTER(VMBUS);
-
buflen = PAGE_SIZE;
buf = kmalloc(buflen, GFP_ATOMIC);
@@ -192,8 +186,6 @@ static void timesync_onchannelcallback(void *context)
}
kfree(buf);
-
- DPRINT_EXIT(VMBUS);
}
/*
@@ -210,8 +202,6 @@ static void heartbeat_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct heartbeat_msg_data *heartbeat_msg;
- DPRINT_ENTER(VMBUS);
-
buflen = PAGE_SIZE;
buf = kmalloc(buflen, GFP_ATOMIC);
@@ -249,8 +239,6 @@ static void heartbeat_onchannelcallback(void *context)
}
kfree(buf);
-
- DPRINT_EXIT(VMBUS);
}
static const struct pci_device_id __initconst
diff --git a/drivers/staging/hv/logging.h b/drivers/staging/hv/logging.h
index ad4cfcfb7b1..20d4d12023d 100644
--- a/drivers/staging/hv/logging.h
+++ b/drivers/staging/hv/logging.h
@@ -92,21 +92,4 @@ extern unsigned int vmbus_loglevel;
__func__, ## args);\
} while (0)
-#ifdef DEBUG
-#define DPRINT_ENTER(mod) do {\
- if ((mod & (HIWORD(vmbus_loglevel))) && \
- (DEBUG_LVL_ENTEREXIT <= LOWORD(vmbus_loglevel))) \
- printk(KERN_DEBUG "["#mod"]: %s() enter\n", __func__);\
- } while (0)
-
-#define DPRINT_EXIT(mod) do {\
- if ((mod & (HIWORD(vmbus_loglevel))) && \
- (DEBUG_LVL_ENTEREXIT <= LOWORD(vmbus_loglevel))) \
- printk(KERN_DEBUG "["#mod"]: %s() exit\n", __func__);\
- } while (0)
-#else
-#define DPRINT_ENTER(mod)
-#define DPRINT_EXIT(mod)
-#endif
-
#endif /* _LOGGING_H_ */
diff --git a/drivers/staging/hv/netvsc.c b/drivers/staging/hv/netvsc.c
index ba15059c45b..1d2ebbe17e2 100644
--- a/drivers/staging/hv/netvsc.c
+++ b/drivers/staging/hv/netvsc.c
@@ -174,8 +174,6 @@ int NetVscInitialize(struct hv_driver *drv)
{
struct netvsc_driver *driver = (struct netvsc_driver *)drv;
- DPRINT_ENTER(NETVSC);
-
DPRINT_DBG(NETVSC, "sizeof(struct hv_netvsc_packet)=%zd, "
"sizeof(struct nvsp_message)=%zd, "
"sizeof(struct vmtransfer_page_packet_header)=%zd",
@@ -202,9 +200,6 @@ int NetVscInitialize(struct hv_driver *drv)
driver->OnSend = NetVscOnSend;
RndisFilterInit(driver);
-
- DPRINT_EXIT(NETVSC);
-
return 0;
}
@@ -214,13 +209,10 @@ static int NetVscInitializeReceiveBufferWithNetVsp(struct hv_device *Device)
struct netvsc_device *netDevice;
struct nvsp_message *initPacket;
- DPRINT_ENTER(NETVSC);
-
netDevice = GetOutboundNetDevice(Device);
if (!netDevice) {
DPRINT_ERR(NETVSC, "unable to get net device..."
"device being destroyed?");
- DPRINT_EXIT(NETVSC);
return -1;
}
/* ASSERT(netDevice->ReceiveBufferSize > 0); */
@@ -335,7 +327,6 @@ Cleanup:
Exit:
PutNetDevice(Device);
- DPRINT_EXIT(NETVSC);
return ret;
}
@@ -345,13 +336,10 @@ static int NetVscInitializeSendBufferWithNetVsp(struct hv_device *Device)
struct netvsc_device *netDevice;
struct nvsp_message *initPacket;
- DPRINT_ENTER(NETVSC);
-
netDevice = GetOutboundNetDevice(Device);
if (!netDevice) {
DPRINT_ERR(NETVSC, "unable to get net device..."
"device being destroyed?");
- DPRINT_EXIT(NETVSC);
return -1;
}
if (netDevice->SendBufferSize <= 0) {
@@ -434,7 +422,6 @@ Cleanup:
Exit:
PutNetDevice(Device);
- DPRINT_EXIT(NETVSC);
return ret;
}
@@ -443,8 +430,6 @@ static int NetVscDestroyReceiveBuffer(struct netvsc_device *NetDevice)
struct nvsp_message *revokePacket;
int ret = 0;
- DPRINT_ENTER(NETVSC);
-
/*
* If we got a section count, it means we received a
* SendReceiveBufferComplete msg (ie sent
@@ -475,7 +460,6 @@ static int NetVscDestroyReceiveBuffer(struct netvsc_device *NetDevice)
if (ret != 0) {
DPRINT_ERR(NETVSC, "unable to send revoke receive "
"buffer to netvsp");
- DPRINT_EXIT(NETVSC);
return -1;
}
}
@@ -492,7 +476,6 @@ static int NetVscDestroyReceiveBuffer(struct netvsc_device *NetDevice)
if (ret != 0) {
DPRINT_ERR(NETVSC,
"unable to teardown receive buffer's gpadl");
- DPRINT_EXIT(NETVSC);
return -1;
}
NetDevice->ReceiveBufferGpadlHandle = 0;
@@ -513,8 +496,6 @@ static int NetVscDestroyReceiveBuffer(struct netvsc_device *NetDevice)
NetDevice->ReceiveSections = NULL;
}
- DPRINT_EXIT(NETVSC);
-
return ret;
}
@@ -523,8 +504,6 @@ static int NetVscDestroySendBuffer(struct netvsc_device *NetDevice)
struct nvsp_message *revokePacket;
int ret = 0;
- DPRINT_ENTER(NETVSC);
-
/*
* If we got a section count, it means we received a
* SendReceiveBufferComplete msg (ie sent
@@ -554,7 +533,6 @@ static int NetVscDestroySendBuffer(struct netvsc_device *NetDevice)
if (ret != 0) {
DPRINT_ERR(NETVSC, "unable to send revoke send buffer "
"to netvsp");
- DPRINT_EXIT(NETVSC);
return -1;
}
}
@@ -572,7 +550,6 @@ static int NetVscDestroySendBuffer(struct netvsc_device *NetDevice)
if (ret != 0) {
DPRINT_ERR(NETVSC, "unable to teardown send buffer's "
"gpadl");
- DPRINT_EXIT(NETVSC);
return -1;
}
NetDevice->SendBufferGpadlHandle = 0;
@@ -587,8 +564,6 @@ static int NetVscDestroySendBuffer(struct netvsc_device *NetDevice)
NetDevice->SendBuffer = NULL;
}
- DPRINT_EXIT(NETVSC);
-
return ret;
}
@@ -600,13 +575,10 @@ static int NetVscConnectToVsp(struct hv_device *Device)
struct nvsp_message *initPacket;
int ndisVersion;
- DPRINT_ENTER(NETVSC);
-
netDevice = GetOutboundNetDevice(Device);
if (!netDevice) {
DPRINT_ERR(NETVSC, "unable to get net device..."
"device being destroyed?");
- DPRINT_EXIT(NETVSC);
return -1;
}
@@ -696,18 +668,13 @@ static int NetVscConnectToVsp(struct hv_device *Device)
Cleanup:
PutNetDevice(Device);
- DPRINT_EXIT(NETVSC);
return ret;
}
static void NetVscDisconnectFromVsp(struct netvsc_device *NetDevice)
{
- DPRINT_ENTER(NETVSC);
-
NetVscDestroyReceiveBuffer(NetDevice);
NetVscDestroySendBuffer(NetDevice);
-
- DPRINT_EXIT(NETVSC);
}
/*
@@ -722,8 +689,6 @@ static int NetVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
struct netvsc_driver *netDriver =
(struct netvsc_driver *)Device->Driver;
- DPRINT_ENTER(NETVSC);
-
netDevice = AllocNetDevice(Device);
if (!netDevice) {
ret = -1;
@@ -787,7 +752,6 @@ static int NetVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
DPRINT_INFO(NETVSC, "*** NetVSC channel handshake result - %d ***",
ret);
- DPRINT_EXIT(NETVSC);
return ret;
Close:
@@ -812,7 +776,6 @@ Cleanup:
FreeNetDevice(netDevice);
}
- DPRINT_EXIT(NETVSC);
return ret;
}
@@ -824,8 +787,6 @@ static int NetVscOnDeviceRemove(struct hv_device *Device)
struct netvsc_device *netDevice;
struct hv_netvsc_packet *netvscPacket, *pos;
- DPRINT_ENTER(NETVSC);
-
DPRINT_INFO(NETVSC, "Disabling outbound traffic on net device (%p)...",
Device->Extension);
@@ -868,8 +829,6 @@ static int NetVscOnDeviceRemove(struct hv_device *Device)
kfree(netDevice->ChannelInitEvent);
FreeNetDevice(netDevice);
-
- DPRINT_EXIT(NETVSC);
return 0;
}
@@ -878,8 +837,6 @@ static int NetVscOnDeviceRemove(struct hv_device *Device)
*/
static void NetVscOnCleanup(struct hv_driver *drv)
{
- DPRINT_ENTER(NETVSC);
- DPRINT_EXIT(NETVSC);
}
static void NetVscOnSendCompletion(struct hv_device *Device,
@@ -889,13 +846,10 @@ static void NetVscOnSendCompletion(struct hv_device *Device,
struct nvsp_message *nvspPacket;
struct hv_netvsc_packet *nvscPacket;
- DPRINT_ENTER(NETVSC);
-
netDevice = GetInboundNetDevice(Device);
if (!netDevice) {
DPRINT_ERR(NETVSC, "unable to get net device..."
"device being destroyed?");
- DPRINT_EXIT(NETVSC);
return;
}
@@ -929,7 +883,6 @@ static void NetVscOnSendCompletion(struct hv_device *Device,
}
PutNetDevice(Device);
- DPRINT_EXIT(NETVSC);
}
static int NetVscOnSend(struct hv_device *Device,
@@ -940,13 +893,10 @@ static int NetVscOnSend(struct hv_device *Device,
struct nvsp_message sendMessage;
- DPRINT_ENTER(NETVSC);
-
netDevice = GetOutboundNetDevice(Device);
if (!netDevice) {
DPRINT_ERR(NETVSC, "net device (%p) shutting down..."
"ignoring outbound packets", netDevice);
- DPRINT_EXIT(NETVSC);
return -2;
}
@@ -986,8 +936,6 @@ static int NetVscOnSend(struct hv_device *Device,
atomic_inc(&netDevice->NumOutstandingSends);
PutNetDevice(Device);
-
- DPRINT_EXIT(NETVSC);
return ret;
}
@@ -1007,13 +955,10 @@ static void NetVscOnReceive(struct hv_device *Device,
unsigned long flags;
LIST_HEAD(listHead);
- DPRINT_ENTER(NETVSC);
-
netDevice = GetInboundNetDevice(Device);
if (!netDevice) {
DPRINT_ERR(NETVSC, "unable to get net device..."
"device being destroyed?");
- DPRINT_EXIT(NETVSC);
return;
}
@@ -1189,7 +1134,6 @@ static void NetVscOnReceive(struct hv_device *Device,
/* ASSERT(list_empty(&listHead)); */
PutNetDevice(Device);
- DPRINT_EXIT(NETVSC);
}
static void NetVscSendReceiveCompletion(struct hv_device *Device,
@@ -1248,8 +1192,6 @@ static void NetVscOnReceiveCompletion(void *Context)
bool fSendReceiveComp = false;
unsigned long flags;
- DPRINT_ENTER(NETVSC);
-
/* ASSERT(packet->XferPagePacket); */
/*
@@ -1261,7 +1203,6 @@ static void NetVscOnReceiveCompletion(void *Context)
if (!netDevice) {
DPRINT_ERR(NETVSC, "unable to get net device..."
"device being destroyed?");
- DPRINT_EXIT(NETVSC);
return;
}
@@ -1292,7 +1233,6 @@ static void NetVscOnReceiveCompletion(void *Context)
NetVscSendReceiveCompletion(device, transactionId);
PutNetDevice(device);
- DPRINT_EXIT(NETVSC);
}
static void NetVscOnChannelCallback(void *Context)
@@ -1307,9 +1247,6 @@ static void NetVscOnChannelCallback(void *Context)
unsigned char *buffer;
int bufferlen = NETVSC_PACKET_SIZE;
-
- DPRINT_ENTER(NETVSC);
-
/* ASSERT(device); */
packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
@@ -1322,7 +1259,6 @@ static void NetVscOnChannelCallback(void *Context)
if (!netDevice) {
DPRINT_ERR(NETVSC, "net device (%p) shutting down..."
"ignoring inbound packets", netDevice);
- DPRINT_EXIT(NETVSC);
goto out;
}
@@ -1386,7 +1322,6 @@ static void NetVscOnChannelCallback(void *Context)
} while (1);
PutNetDevice(device);
- DPRINT_EXIT(NETVSC);
out:
kfree(buffer);
return;
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 55b993298ff..64a01147eca 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -76,8 +76,6 @@ static int netvsc_open(struct net_device *net)
struct hv_device *device_obj = &net_device_ctx->device_ctx->device_obj;
int ret = 0;
- DPRINT_ENTER(NETVSC_DRV);
-
if (netif_carrier_ok(net)) {
/* Open up the device */
ret = RndisFilterOnOpen(device_obj);
@@ -92,7 +90,6 @@ static int netvsc_open(struct net_device *net)
DPRINT_ERR(NETVSC_DRV, "unable to open device...link is down.");
}
- DPRINT_EXIT(NETVSC_DRV);
return ret;
}
@@ -102,16 +99,12 @@ static int netvsc_close(struct net_device *net)
struct hv_device *device_obj = &net_device_ctx->device_ctx->device_obj;
int ret;
- DPRINT_ENTER(NETVSC_DRV);
-
netif_stop_queue(net);
ret = RndisFilterOnClose(device_obj);
if (ret != 0)
DPRINT_ERR(NETVSC_DRV, "unable to close device (ret %d).", ret);
- DPRINT_EXIT(NETVSC_DRV);
-
return ret;
}
@@ -121,8 +114,6 @@ static void netvsc_xmit_completion(void *context)
struct sk_buff *skb = (struct sk_buff *)
(unsigned long)packet->Completion.Send.SendCompletionTid;
- DPRINT_ENTER(NETVSC_DRV);
-
kfree(packet);
if (skb) {
@@ -135,8 +126,6 @@ static void netvsc_xmit_completion(void *context)
if ((net_device_ctx->avail += num_pages) >= PACKET_PAGES_HIWATER)
netif_wake_queue(net);
}
-
- DPRINT_EXIT(NETVSC_DRV);
}
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
@@ -151,8 +140,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
int ret;
unsigned int i, num_pages;
- DPRINT_ENTER(NETVSC_DRV);
-
DPRINT_DBG(NETVSC_DRV, "xmit packet - len %d data_len %d",
skb->len, skb->data_len);
@@ -225,7 +212,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
netvsc_xmit_completion(packet);
}
- DPRINT_EXIT(NETVSC_DRV);
return NETDEV_TX_OK;
}
@@ -238,8 +224,6 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct vm_device *device_ctx = to_vm_device(device_obj);
struct net_device *net = dev_get_drvdata(&device_ctx->device);
- DPRINT_ENTER(NETVSC_DRV);
-
if (!net) {
DPRINT_ERR(NETVSC_DRV, "got link status but net device "
"not initialized yet");
@@ -253,7 +237,6 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
netif_carrier_off(net);
netif_stop_queue(net);
}
- DPRINT_EXIT(NETVSC_DRV);
}
/*
@@ -270,8 +253,6 @@ static int netvsc_recv_callback(struct hv_device *device_obj,
int i;
unsigned long flags;
- DPRINT_ENTER(NETVSC_DRV);
-
if (!net) {
DPRINT_ERR(NETVSC_DRV, "got receive callback but net device "
"not initialized yet");
@@ -323,8 +304,6 @@ static int netvsc_recv_callback(struct hv_device *device_obj,
DPRINT_DBG(NETVSC_DRV, "# of recvs %lu total size %lu",
net->stats.rx_packets, net->stats.rx_bytes);
- DPRINT_EXIT(NETVSC_DRV);
-
return 0;
}
@@ -348,6 +327,9 @@ static const struct net_device_ops device_ops = {
.ndo_stop = netvsc_close,
.ndo_start_xmit = netvsc_start_xmit,
.ndo_set_multicast_list = netvsc_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
};
static int netvsc_probe(struct device *device)
@@ -364,8 +346,6 @@ static int netvsc_probe(struct device *device)
struct netvsc_device_info device_info;
int ret;
- DPRINT_ENTER(NETVSC_DRV);
-
if (!net_drv_obj->Base.OnDeviceAdd)
return -1;
@@ -422,7 +402,6 @@ static int netvsc_probe(struct device *device)
free_netdev(net);
}
- DPRINT_EXIT(NETVSC_DRV);
return ret;
}
@@ -438,18 +417,13 @@ static int netvsc_remove(struct device *device)
struct hv_device *device_obj = &device_ctx->device_obj;
int ret;
- DPRINT_ENTER(NETVSC_DRV);
-
if (net == NULL) {
DPRINT_INFO(NETVSC, "no net device to remove");
- DPRINT_EXIT(NETVSC_DRV);
return 0;
}
- if (!net_drv_obj->Base.OnDeviceRemove) {
- DPRINT_EXIT(NETVSC_DRV);
+ if (!net_drv_obj->Base.OnDeviceRemove)
return -1;
- }
/* Stop outbound asap */
netif_stop_queue(net);
@@ -468,7 +442,6 @@ static int netvsc_remove(struct device *device)
}
free_netdev(net);
- DPRINT_EXIT(NETVSC_DRV);
return ret;
}
@@ -488,8 +461,6 @@ static void netvsc_drv_exit(void)
struct device *current_dev;
int ret;
- DPRINT_ENTER(NETVSC_DRV);
-
while (1) {
current_dev = NULL;
@@ -515,8 +486,6 @@ static void netvsc_drv_exit(void)
vmbus_child_driver_unregister(drv_ctx);
- DPRINT_EXIT(NETVSC_DRV);
-
return;
}
@@ -526,8 +495,6 @@ static int netvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
struct driver_context *drv_ctx = &g_netvsc_drv.drv_ctx;
int ret;
- DPRINT_ENTER(NETVSC_DRV);
-
vmbus_get_interface(&net_drv_obj->Base.VmbusChannelInterface);
net_drv_obj->RingBufferSize = ring_size * PAGE_SIZE;
@@ -547,8 +514,6 @@ static int netvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
/* The driver belongs to vmbus */
ret = vmbus_child_driver_register(drv_ctx);
- DPRINT_EXIT(NETVSC_DRV);
-
return ret;
}
@@ -568,26 +533,17 @@ MODULE_DEVICE_TABLE(dmi, hv_netvsc_dmi_table);
static int __init netvsc_init(void)
{
- int ret;
-
- DPRINT_ENTER(NETVSC_DRV);
DPRINT_INFO(NETVSC_DRV, "Netvsc initializing....");
if (!dmi_check_system(hv_netvsc_dmi_table))
return -ENODEV;
- ret = netvsc_drv_init(NetVscInitialize);
-
- DPRINT_EXIT(NETVSC_DRV);
-
- return ret;
+ return netvsc_drv_init(NetVscInitialize);
}
static void __exit netvsc_exit(void)
{
- DPRINT_ENTER(NETVSC_DRV);
netvsc_drv_exit();
- DPRINT_EXIT(NETVSC_DRV);
}
static const struct pci_device_id __initconst
diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/staging/hv/ring_buffer.c
index ae2a10e24d9..d78c569ac94 100644
--- a/drivers/staging/hv/ring_buffer.c
+++ b/drivers/staging/hv/ring_buffer.c
@@ -46,7 +46,7 @@ Description:
--*/
static inline void
-GetRingBufferAvailBytes(RING_BUFFER_INFO *rbi, u32 *read, u32 *write)
+GetRingBufferAvailBytes(struct hv_ring_buffer_info *rbi, u32 *read, u32 *write)
{
u32 read_loc, write_loc;
@@ -68,7 +68,7 @@ Description:
--*/
static inline u32
-GetNextWriteLocation(RING_BUFFER_INFO *RingInfo)
+GetNextWriteLocation(struct hv_ring_buffer_info *RingInfo)
{
u32 next = RingInfo->RingBuffer->WriteIndex;
@@ -87,7 +87,8 @@ Description:
--*/
static inline void
-SetNextWriteLocation(RING_BUFFER_INFO *RingInfo, u32 NextWriteLocation)
+SetNextWriteLocation(struct hv_ring_buffer_info *RingInfo,
+ u32 NextWriteLocation)
{
RingInfo->RingBuffer->WriteIndex = NextWriteLocation;
}
@@ -102,7 +103,7 @@ Description:
--*/
static inline u32
-GetNextReadLocation(RING_BUFFER_INFO *RingInfo)
+GetNextReadLocation(struct hv_ring_buffer_info *RingInfo)
{
u32 next = RingInfo->RingBuffer->ReadIndex;
@@ -122,7 +123,7 @@ Description:
--*/
static inline u32
-GetNextReadLocationWithOffset(RING_BUFFER_INFO *RingInfo, u32 Offset)
+GetNextReadLocationWithOffset(struct hv_ring_buffer_info *RingInfo, u32 Offset)
{
u32 next = RingInfo->RingBuffer->ReadIndex;
@@ -143,7 +144,7 @@ Description:
--*/
static inline void
-SetNextReadLocation(RING_BUFFER_INFO *RingInfo, u32 NextReadLocation)
+SetNextReadLocation(struct hv_ring_buffer_info *RingInfo, u32 NextReadLocation)
{
RingInfo->RingBuffer->ReadIndex = NextReadLocation;
}
@@ -159,7 +160,7 @@ Description:
--*/
static inline void *
-GetRingBuffer(RING_BUFFER_INFO *RingInfo)
+GetRingBuffer(struct hv_ring_buffer_info *RingInfo)
{
return (void *)RingInfo->RingBuffer->Buffer;
}
@@ -175,7 +176,7 @@ Description:
--*/
static inline u32
-GetRingBufferSize(RING_BUFFER_INFO *RingInfo)
+GetRingBufferSize(struct hv_ring_buffer_info *RingInfo)
{
return RingInfo->RingDataSize;
}
@@ -190,10 +191,9 @@ Description:
--*/
static inline u64
-GetRingBufferIndices(RING_BUFFER_INFO *RingInfo)
+GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo)
{
- return ((u64)RingInfo->RingBuffer->WriteIndex << 32)
- || RingInfo->RingBuffer->ReadIndex;
+ return (u64)RingInfo->RingBuffer->WriteIndex << 32;
}
@@ -206,7 +206,7 @@ Description:
Dump out to console the ring buffer info
--*/
-void DumpRingInfo(RING_BUFFER_INFO *RingInfo, char *Prefix)
+void DumpRingInfo(struct hv_ring_buffer_info *RingInfo, char *Prefix)
{
u32 bytesAvailToWrite;
u32 bytesAvailToRead;
@@ -233,14 +233,14 @@ void DumpRingInfo(RING_BUFFER_INFO *RingInfo, char *Prefix)
static u32
CopyToRingBuffer(
- RING_BUFFER_INFO *RingInfo,
+ struct hv_ring_buffer_info *RingInfo,
u32 StartWriteOffset,
void *Src,
u32 SrcLen);
static u32
CopyFromRingBuffer(
- RING_BUFFER_INFO *RingInfo,
+ struct hv_ring_buffer_info *RingInfo,
void *Dest,
u32 DestLen,
u32 StartReadOffset);
@@ -256,8 +256,8 @@ Description:
Get various debug metrics for the specified ring buffer
--*/
-void RingBufferGetDebugInfo(RING_BUFFER_INFO *RingInfo,
- RING_BUFFER_DEBUG_INFO *DebugInfo)
+void RingBufferGetDebugInfo(struct hv_ring_buffer_info *RingInfo,
+ struct hv_ring_buffer_debug_info *debug_info)
{
u32 bytesAvailToWrite;
u32 bytesAvailToRead;
@@ -267,11 +267,11 @@ void RingBufferGetDebugInfo(RING_BUFFER_INFO *RingInfo,
&bytesAvailToRead,
&bytesAvailToWrite);
- DebugInfo->BytesAvailToRead = bytesAvailToRead;
- DebugInfo->BytesAvailToWrite = bytesAvailToWrite;
- DebugInfo->CurrentReadIndex = RingInfo->RingBuffer->ReadIndex;
- DebugInfo->CurrentWriteIndex = RingInfo->RingBuffer->WriteIndex;
- DebugInfo->CurrentInterruptMask = RingInfo->RingBuffer->InterruptMask;
+ debug_info->BytesAvailToRead = bytesAvailToRead;
+ debug_info->BytesAvailToWrite = bytesAvailToWrite;
+ debug_info->CurrentReadIndex = RingInfo->RingBuffer->ReadIndex;
+ debug_info->CurrentWriteIndex = RingInfo->RingBuffer->WriteIndex;
+ debug_info->CurrentInterruptMask = RingInfo->RingBuffer->InterruptMask;
}
}
@@ -285,7 +285,7 @@ Description:
Get the interrupt mask for the specified ring buffer
--*/
-u32 GetRingBufferInterruptMask(RING_BUFFER_INFO *rbi)
+u32 GetRingBufferInterruptMask(struct hv_ring_buffer_info *rbi)
{
return rbi->RingBuffer->InterruptMask;
}
@@ -299,18 +299,18 @@ Description:
Initialize the ring buffer
--*/
-int RingBufferInit(RING_BUFFER_INFO *RingInfo, void *Buffer, u32 BufferLen)
+int RingBufferInit(struct hv_ring_buffer_info *RingInfo, void *Buffer, u32 BufferLen)
{
- if (sizeof(RING_BUFFER) != PAGE_SIZE)
+ if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
return -EINVAL;
- memset(RingInfo, 0, sizeof(RING_BUFFER_INFO));
+ memset(RingInfo, 0, sizeof(struct hv_ring_buffer_info));
- RingInfo->RingBuffer = (RING_BUFFER *)Buffer;
+ RingInfo->RingBuffer = (struct hv_ring_buffer *)Buffer;
RingInfo->RingBuffer->ReadIndex = RingInfo->RingBuffer->WriteIndex = 0;
RingInfo->RingSize = BufferLen;
- RingInfo->RingDataSize = BufferLen - sizeof(RING_BUFFER);
+ RingInfo->RingDataSize = BufferLen - sizeof(struct hv_ring_buffer);
spin_lock_init(&RingInfo->ring_lock);
@@ -326,7 +326,7 @@ Description:
Cleanup the ring buffer
--*/
-void RingBufferCleanup(RING_BUFFER_INFO *RingInfo)
+void RingBufferCleanup(struct hv_ring_buffer_info *RingInfo)
{
}
@@ -339,7 +339,7 @@ Description:
Write to the ring buffer
--*/
-int RingBufferWrite(RING_BUFFER_INFO *OutRingInfo,
+int RingBufferWrite(struct hv_ring_buffer_info *OutRingInfo,
struct scatterlist *sglist, u32 sgcount)
{
int i = 0;
@@ -352,8 +352,6 @@ int RingBufferWrite(RING_BUFFER_INFO *OutRingInfo,
u64 prevIndices = 0;
unsigned long flags;
- DPRINT_ENTER(VMBUS);
-
for_each_sg(sglist, sg, sgcount, i)
{
totalBytesToWrite += sg->length;
@@ -382,9 +380,6 @@ int RingBufferWrite(RING_BUFFER_INFO *OutRingInfo,
byteAvailToWrite);
spin_unlock_irqrestore(&OutRingInfo->ring_lock, flags);
-
- DPRINT_EXIT(VMBUS);
-
return -1;
}
@@ -416,9 +411,6 @@ int RingBufferWrite(RING_BUFFER_INFO *OutRingInfo,
/* DumpRingInfo(OutRingInfo, "AFTER "); */
spin_unlock_irqrestore(&OutRingInfo->ring_lock, flags);
-
- DPRINT_EXIT(VMBUS);
-
return 0;
}
@@ -432,7 +424,7 @@ Description:
Read without advancing the read index
--*/
-int RingBufferPeek(RING_BUFFER_INFO *InRingInfo, void *Buffer, u32 BufferLen)
+int RingBufferPeek(struct hv_ring_buffer_info *InRingInfo, void *Buffer, u32 BufferLen)
{
u32 bytesAvailToWrite;
u32 bytesAvailToRead;
@@ -481,7 +473,7 @@ Description:
Read and advance the read index
--*/
-int RingBufferRead(RING_BUFFER_INFO *InRingInfo, void *Buffer,
+int RingBufferRead(struct hv_ring_buffer_info *InRingInfo, void *Buffer,
u32 BufferLen, u32 Offset)
{
u32 bytesAvailToWrite;
@@ -556,7 +548,7 @@ Description:
--*/
static u32
CopyToRingBuffer(
- RING_BUFFER_INFO *RingInfo,
+ struct hv_ring_buffer_info *RingInfo,
u32 StartWriteOffset,
void *Src,
u32 SrcLen)
@@ -594,7 +586,7 @@ Description:
--*/
static u32
CopyFromRingBuffer(
- RING_BUFFER_INFO *RingInfo,
+ struct hv_ring_buffer_info *RingInfo,
void *Dest,
u32 DestLen,
u32 StartReadOffset)
diff --git a/drivers/staging/hv/ring_buffer.h b/drivers/staging/hv/ring_buffer.h
index 6202157e145..a7f1717c6a5 100644
--- a/drivers/staging/hv/ring_buffer.h
+++ b/drivers/staging/hv/ring_buffer.h
@@ -27,7 +27,7 @@
#include <linux/scatterlist.h>
-typedef struct _RING_BUFFER {
+struct hv_ring_buffer {
/* Offset in bytes from the start of ring data below */
volatile u32 WriteIndex;
@@ -51,51 +51,52 @@ typedef struct _RING_BUFFER {
* !!! DO NOT place any fields below this !!!
*/
u8 Buffer[0];
-} __attribute__((packed)) RING_BUFFER;
+} __attribute__((packed));
-typedef struct _RING_BUFFER_INFO {
- RING_BUFFER *RingBuffer;
+struct hv_ring_buffer_info {
+ struct hv_ring_buffer *RingBuffer;
u32 RingSize; /* Include the shared header */
spinlock_t ring_lock;
u32 RingDataSize; /* < ringSize */
u32 RingDataStartOffset;
+};
-} RING_BUFFER_INFO;
-
-typedef struct _RING_BUFFER_DEBUG_INFO {
+struct hv_ring_buffer_debug_info {
u32 CurrentInterruptMask;
u32 CurrentReadIndex;
u32 CurrentWriteIndex;
u32 BytesAvailToRead;
u32 BytesAvailToWrite;
-} RING_BUFFER_DEBUG_INFO;
+};
/* Interface */
-int RingBufferInit(RING_BUFFER_INFO *RingInfo, void *Buffer, u32 BufferLen);
+int RingBufferInit(struct hv_ring_buffer_info *RingInfo, void *Buffer,
+ u32 BufferLen);
-void RingBufferCleanup(RING_BUFFER_INFO *RingInfo);
+void RingBufferCleanup(struct hv_ring_buffer_info *RingInfo);
-int RingBufferWrite(RING_BUFFER_INFO *RingInfo,
+int RingBufferWrite(struct hv_ring_buffer_info *RingInfo,
struct scatterlist *sglist,
u32 sgcount);
-int RingBufferPeek(RING_BUFFER_INFO *RingInfo, void *Buffer, u32 BufferLen);
+int RingBufferPeek(struct hv_ring_buffer_info *RingInfo, void *Buffer,
+ u32 BufferLen);
-int RingBufferRead(RING_BUFFER_INFO *RingInfo,
+int RingBufferRead(struct hv_ring_buffer_info *RingInfo,
void *Buffer,
u32 BufferLen,
u32 Offset);
-u32 GetRingBufferInterruptMask(RING_BUFFER_INFO *RingInfo);
+u32 GetRingBufferInterruptMask(struct hv_ring_buffer_info *RingInfo);
-void DumpRingInfo(RING_BUFFER_INFO *RingInfo, char *Prefix);
+void DumpRingInfo(struct hv_ring_buffer_info *RingInfo, char *Prefix);
-void RingBufferGetDebugInfo(RING_BUFFER_INFO *RingInfo,
- RING_BUFFER_DEBUG_INFO *DebugInfo);
+void RingBufferGetDebugInfo(struct hv_ring_buffer_info *RingInfo,
+ struct hv_ring_buffer_debug_info *debug_info);
#endif /* _RING_BUFFER_H_ */
diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
index 5edf0853c6a..fa2141f454f 100644
--- a/drivers/staging/hv/rndis_filter.c
+++ b/drivers/staging/hv/rndis_filter.c
@@ -244,8 +244,6 @@ static int RndisFilterSendRequest(struct rndis_device *Device,
int ret;
struct hv_netvsc_packet *packet;
- DPRINT_ENTER(NETVSC);
-
/* Setup the packet to send it */
packet = &Request->Packet;
@@ -265,7 +263,6 @@ static int RndisFilterSendRequest(struct rndis_device *Device,
packet->Completion.Send.SendCompletionTid = (unsigned long)Device;
ret = gRndisFilter.InnerDriver.OnSend(Device->NetDevice->Device, packet);
- DPRINT_EXIT(NETVSC);
return ret;
}
@@ -276,8 +273,6 @@ static void RndisFilterReceiveResponse(struct rndis_device *Device,
bool found = false;
unsigned long flags;
- DPRINT_ENTER(NETVSC);
-
spin_lock_irqsave(&Device->request_lock, flags);
list_for_each_entry(request, &Device->RequestList, ListEntry) {
/*
@@ -325,8 +320,6 @@ static void RndisFilterReceiveResponse(struct rndis_device *Device,
Response->Message.InitializeComplete.RequestId,
Response->NdisMessageType);
}
-
- DPRINT_EXIT(NETVSC);
}
static void RndisFilterReceiveIndicateStatus(struct rndis_device *Device,
@@ -353,8 +346,6 @@ static void RndisFilterReceiveData(struct rndis_device *Device,
struct rndis_packet *rndisPacket;
u32 dataOffset;
- DPRINT_ENTER(NETVSC);
-
/* empty ethernet frame ?? */
/* ASSERT(Packet->PageBuffers[0].Length > */
/* RNDIS_MESSAGE_SIZE(struct rndis_packet)); */
@@ -377,8 +368,6 @@ static void RndisFilterReceiveData(struct rndis_device *Device,
gRndisFilter.InnerDriver.OnReceiveCallback(Device->NetDevice->Device,
Packet);
-
- DPRINT_EXIT(NETVSC);
}
static int RndisFilterOnReceive(struct hv_device *Device,
@@ -389,8 +378,6 @@ static int RndisFilterOnReceive(struct hv_device *Device,
struct rndis_message rndisMessage;
struct rndis_message *rndisHeader;
- DPRINT_ENTER(NETVSC);
-
if (!netDevice)
return -EINVAL;
@@ -398,7 +385,6 @@ static int RndisFilterOnReceive(struct hv_device *Device,
if (!netDevice->Extension) {
DPRINT_ERR(NETVSC, "got rndis message but no rndis device..."
"dropping this message!");
- DPRINT_EXIT(NETVSC);
return -1;
}
@@ -406,7 +392,6 @@ static int RndisFilterOnReceive(struct hv_device *Device,
if (rndisDevice->State == RNDIS_DEV_UNINITIALIZED) {
DPRINT_ERR(NETVSC, "got rndis message but rndis device "
"uninitialized...dropping this message!");
- DPRINT_EXIT(NETVSC);
return -1;
}
@@ -431,7 +416,6 @@ static int RndisFilterOnReceive(struct hv_device *Device,
"bytes got %u)...dropping this message!",
rndisHeader->MessageLength,
Packet->TotalDataBufferLength);
- DPRINT_EXIT(NETVSC);
return -1;
}
#endif
@@ -479,7 +463,6 @@ static int RndisFilterOnReceive(struct hv_device *Device,
break;
}
- DPRINT_EXIT(NETVSC);
return 0;
}
@@ -492,8 +475,6 @@ static int RndisFilterQueryDevice(struct rndis_device *Device, u32 Oid,
struct rndis_query_complete *queryComplete;
int ret = 0;
- DPRINT_ENTER(NETVSC);
-
if (!Result)
return -EINVAL;
@@ -536,7 +517,6 @@ static int RndisFilterQueryDevice(struct rndis_device *Device, u32 Oid,
Cleanup:
if (request)
PutRndisRequest(Device, request);
- DPRINT_EXIT(NETVSC);
return ret;
}
@@ -568,8 +548,6 @@ static int RndisFilterSetPacketFilter(struct rndis_device *Device,
u32 status;
int ret;
- DPRINT_ENTER(NETVSC);
-
/* ASSERT(RNDIS_MESSAGE_SIZE(struct rndis_set_request) + sizeof(u32) <= */
/* sizeof(struct rndis_message)); */
@@ -614,15 +592,11 @@ Cleanup:
if (request)
PutRndisRequest(Device, request);
Exit:
- DPRINT_EXIT(NETVSC);
-
return ret;
}
int RndisFilterInit(struct netvsc_driver *Driver)
{
- DPRINT_ENTER(NETVSC);
-
DPRINT_DBG(NETVSC, "sizeof(struct rndis_filter_packet) == %zd",
sizeof(struct rndis_filter_packet));
@@ -658,8 +632,6 @@ int RndisFilterInit(struct netvsc_driver *Driver)
/* Driver->QueryLinkStatus = RndisFilterQueryDeviceLinkStatus; */
Driver->OnReceiveCallback = RndisFilterOnReceive;
- DPRINT_EXIT(NETVSC);
-
return 0;
}
@@ -671,8 +643,6 @@ static int RndisFilterInitDevice(struct rndis_device *Device)
u32 status;
int ret;
- DPRINT_ENTER(NETVSC);
-
request = GetRndisRequest(Device, REMOTE_NDIS_INITIALIZE_MSG,
RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
if (!request) {
@@ -710,7 +680,6 @@ static int RndisFilterInitDevice(struct rndis_device *Device)
Cleanup:
if (request)
PutRndisRequest(Device, request);
- DPRINT_EXIT(NETVSC);
return ret;
}
@@ -720,8 +689,6 @@ static void RndisFilterHaltDevice(struct rndis_device *Device)
struct rndis_request *request;
struct rndis_halt_request *halt;
- DPRINT_ENTER(NETVSC);
-
/* Attempt to do a rndis device halt */
request = GetRndisRequest(Device, REMOTE_NDIS_HALT_MSG,
RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
@@ -740,7 +707,6 @@ static void RndisFilterHaltDevice(struct rndis_device *Device)
Cleanup:
if (request)
PutRndisRequest(Device, request);
- DPRINT_EXIT(NETVSC);
return;
}
@@ -748,8 +714,6 @@ static int RndisFilterOpenDevice(struct rndis_device *Device)
{
int ret;
- DPRINT_ENTER(NETVSC);
-
if (Device->State != RNDIS_DEV_INITIALIZED)
return 0;
@@ -760,7 +724,6 @@ static int RndisFilterOpenDevice(struct rndis_device *Device)
if (ret == 0)
Device->State = RNDIS_DEV_DATAINITIALIZED;
- DPRINT_EXIT(NETVSC);
return ret;
}
@@ -768,8 +731,6 @@ static int RndisFilterCloseDevice(struct rndis_device *Device)
{
int ret;
- DPRINT_ENTER(NETVSC);
-
if (Device->State != RNDIS_DEV_DATAINITIALIZED)
return 0;
@@ -777,8 +738,6 @@ static int RndisFilterCloseDevice(struct rndis_device *Device)
if (ret == 0)
Device->State = RNDIS_DEV_INITIALIZED;
- DPRINT_EXIT(NETVSC);
-
return ret;
}
@@ -790,13 +749,9 @@ static int RndisFilterOnDeviceAdd(struct hv_device *Device,
struct rndis_device *rndisDevice;
struct netvsc_device_info *deviceInfo = AdditionalInfo;
- DPRINT_ENTER(NETVSC);
-
rndisDevice = GetRndisDevice();
- if (!rndisDevice) {
- DPRINT_EXIT(NETVSC);
+ if (!rndisDevice)
return -1;
- }
DPRINT_DBG(NETVSC, "rndis device object allocated - %p", rndisDevice);
@@ -808,7 +763,6 @@ static int RndisFilterOnDeviceAdd(struct hv_device *Device,
ret = gRndisFilter.InnerDriver.Base.OnDeviceAdd(Device, AdditionalInfo);
if (ret != 0) {
kfree(rndisDevice);
- DPRINT_EXIT(NETVSC);
return ret;
}
@@ -849,8 +803,6 @@ static int RndisFilterOnDeviceAdd(struct hv_device *Device,
DPRINT_INFO(NETVSC, "Device 0x%p link state %s", rndisDevice,
((deviceInfo->LinkState) ? ("down") : ("up")));
- DPRINT_EXIT(NETVSC);
-
return ret;
}
@@ -859,8 +811,6 @@ static int RndisFilterOnDeviceRemove(struct hv_device *Device)
struct netvsc_device *netDevice = Device->Extension;
struct rndis_device *rndisDevice = netDevice->Extension;
- DPRINT_ENTER(NETVSC);
-
/* Halt and release the rndis device */
RndisFilterHaltDevice(rndisDevice);
@@ -870,50 +820,31 @@ static int RndisFilterOnDeviceRemove(struct hv_device *Device)
/* Pass control to inner driver to remove the device */
gRndisFilter.InnerDriver.Base.OnDeviceRemove(Device);
- DPRINT_EXIT(NETVSC);
-
return 0;
}
static void RndisFilterOnCleanup(struct hv_driver *Driver)
{
- DPRINT_ENTER(NETVSC);
-
- DPRINT_EXIT(NETVSC);
}
int RndisFilterOnOpen(struct hv_device *Device)
{
- int ret;
struct netvsc_device *netDevice = Device->Extension;
- DPRINT_ENTER(NETVSC);
-
if (!netDevice)
return -EINVAL;
- ret = RndisFilterOpenDevice(netDevice->Extension);
-
- DPRINT_EXIT(NETVSC);
-
- return ret;
+ return RndisFilterOpenDevice(netDevice->Extension);
}
int RndisFilterOnClose(struct hv_device *Device)
{
- int ret;
struct netvsc_device *netDevice = Device->Extension;
- DPRINT_ENTER(NETVSC);
-
if (!netDevice)
return -EINVAL;
- ret = RndisFilterCloseDevice(netDevice->Extension);
-
- DPRINT_EXIT(NETVSC);
-
- return ret;
+ return RndisFilterCloseDevice(netDevice->Extension);
}
static int RndisFilterOnSend(struct hv_device *Device,
@@ -925,8 +856,6 @@ static int RndisFilterOnSend(struct hv_device *Device,
struct rndis_packet *rndisPacket;
u32 rndisMessageSize;
- DPRINT_ENTER(NETVSC);
-
/* Add the rndis header */
filterPacket = (struct rndis_filter_packet *)Packet->Extension;
/* ASSERT(filterPacket); */
@@ -971,8 +900,6 @@ static int RndisFilterOnSend(struct hv_device *Device,
filterPacket->CompletionContext;
}
- DPRINT_EXIT(NETVSC);
-
return ret;
}
@@ -980,19 +907,12 @@ static void RndisFilterOnSendCompletion(void *Context)
{
struct rndis_filter_packet *filterPacket = Context;
- DPRINT_ENTER(NETVSC);
-
/* Pass it back to the original handler */
filterPacket->OnCompletion(filterPacket->CompletionContext);
-
- DPRINT_EXIT(NETVSC);
}
static void RndisFilterOnSendRequestCompletion(void *Context)
{
- DPRINT_ENTER(NETVSC);
-
/* Noop */
- DPRINT_EXIT(NETVSC);
}
diff --git a/drivers/staging/hv/storvsc.c b/drivers/staging/hv/storvsc.c
index 27a276e08ee..6bd2ff138d2 100644
--- a/drivers/staging/hv/storvsc.c
+++ b/drivers/staging/hv/storvsc.c
@@ -186,7 +186,6 @@ static int StorVscChannelInit(struct hv_device *Device)
if (!storDevice) {
DPRINT_ERR(STORVSC, "unable to get stor device..."
"device being destroyed?");
- DPRINT_EXIT(STORVSC);
return -1;
}
@@ -344,8 +343,6 @@ Cleanup:
request->WaitEvent = NULL;
nomem:
PutStorDevice(Device);
-
- DPRINT_EXIT(STORVSC);
return ret;
}
@@ -356,13 +353,10 @@ static void StorVscOnIOCompletion(struct hv_device *Device,
struct hv_storvsc_request *request;
struct storvsc_device *storDevice;
- DPRINT_ENTER(STORVSC);
-
storDevice = MustGetStorDevice(Device);
if (!storDevice) {
DPRINT_ERR(STORVSC, "unable to get stor device..."
"device being destroyed?");
- DPRINT_EXIT(STORVSC);
return;
}
@@ -414,8 +408,6 @@ static void StorVscOnIOCompletion(struct hv_device *Device,
atomic_dec(&storDevice->NumOutstandingRequests);
PutStorDevice(Device);
-
- DPRINT_EXIT(STORVSC);
}
static void StorVscOnReceive(struct hv_device *Device,
@@ -449,15 +441,12 @@ static void StorVscOnChannelCallback(void *context)
struct storvsc_request_extension *request;
int ret;
- DPRINT_ENTER(STORVSC);
-
/* ASSERT(device); */
storDevice = MustGetStorDevice(device);
if (!storDevice) {
DPRINT_ERR(STORVSC, "unable to get stor device..."
"device being destroyed?");
- DPRINT_EXIT(STORVSC);
return;
}
@@ -501,8 +490,6 @@ static void StorVscOnChannelCallback(void *context)
} while (1);
PutStorDevice(device);
-
- DPRINT_EXIT(STORVSC);
return;
}
@@ -547,8 +534,6 @@ static int StorVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
struct storvsc_device_info *deviceInfo;
int ret = 0;
- DPRINT_ENTER(STORVSC);
-
deviceInfo = (struct storvsc_device_info *)AdditionalInfo;
storDevice = AllocStorDevice(Device);
if (!storDevice) {
@@ -584,8 +569,6 @@ static int StorVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
storDevice->TargetId);
Cleanup:
- DPRINT_EXIT(STORVSC);
-
return ret;
}
@@ -596,8 +579,6 @@ static int StorVscOnDeviceRemove(struct hv_device *Device)
{
struct storvsc_device *storDevice;
- DPRINT_ENTER(STORVSC);
-
DPRINT_INFO(STORVSC, "disabling storage device (%p)...",
Device->Extension);
@@ -625,8 +606,6 @@ static int StorVscOnDeviceRemove(struct hv_device *Device)
Device->Driver->VmbusChannelInterface.Close(Device);
FreeStorDevice(storDevice);
-
- DPRINT_EXIT(STORVSC);
return 0;
}
@@ -637,15 +616,12 @@ int StorVscOnHostReset(struct hv_device *Device)
struct vstor_packet *vstorPacket;
int ret;
- DPRINT_ENTER(STORVSC);
-
DPRINT_INFO(STORVSC, "resetting host adapter...");
storDevice = GetStorDevice(Device);
if (!storDevice) {
DPRINT_ERR(STORVSC, "unable to get stor device..."
"device being destroyed?");
- DPRINT_EXIT(STORVSC);
return -1;
}
@@ -687,7 +663,6 @@ int StorVscOnHostReset(struct hv_device *Device)
Cleanup:
PutStorDevice(Device);
- DPRINT_EXIT(STORVSC);
return ret;
}
@@ -702,8 +677,6 @@ static int StorVscOnIORequest(struct hv_device *Device,
struct vstor_packet *vstorPacket;
int ret = 0;
- DPRINT_ENTER(STORVSC);
-
requestExtension =
(struct storvsc_request_extension *)Request->Extension;
vstorPacket = &requestExtension->VStorPacket;
@@ -720,7 +693,6 @@ static int StorVscOnIORequest(struct hv_device *Device,
if (!storDevice) {
DPRINT_ERR(STORVSC, "unable to get stor device..."
"device being destroyed?");
- DPRINT_EXIT(STORVSC);
return -2;
}
@@ -786,8 +758,6 @@ static int StorVscOnIORequest(struct hv_device *Device,
atomic_inc(&storDevice->NumOutstandingRequests);
PutStorDevice(Device);
-
- DPRINT_EXIT(STORVSC);
return ret;
}
@@ -796,8 +766,6 @@ static int StorVscOnIORequest(struct hv_device *Device,
*/
static void StorVscOnCleanup(struct hv_driver *Driver)
{
- DPRINT_ENTER(STORVSC);
- DPRINT_EXIT(STORVSC);
}
/*
@@ -807,8 +775,6 @@ int StorVscInitialize(struct hv_driver *Driver)
{
struct storvsc_driver_object *storDriver;
- DPRINT_ENTER(STORVSC);
-
storDriver = (struct storvsc_driver_object *)Driver;
DPRINT_DBG(STORVSC, "sizeof(STORVSC_REQUEST)=%zd "
@@ -852,7 +818,5 @@ int StorVscInitialize(struct hv_driver *Driver)
storDriver->OnIORequest = StorVscOnIORequest;
- DPRINT_EXIT(STORVSC);
-
return 0;
}
diff --git a/drivers/staging/hv/storvsc_api.h b/drivers/staging/hv/storvsc_api.h
index 0063bde9a4b..8505a1c5f9e 100644
--- a/drivers/staging/hv/storvsc_api.h
+++ b/drivers/staging/hv/storvsc_api.h
@@ -28,10 +28,10 @@
#include "vmbus_api.h"
/* Defines */
-#define STORVSC_RING_BUFFER_SIZE (10*PAGE_SIZE)
+#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
#define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
-#define STORVSC_MAX_IO_REQUESTS 64
+#define STORVSC_MAX_IO_REQUESTS 128
/*
* In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index d22e35f598b..62882a437aa 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -141,8 +141,6 @@ static int storvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
struct storvsc_driver_object *storvsc_drv_obj = &g_storvsc_drv.drv_obj;
struct driver_context *drv_ctx = &g_storvsc_drv.drv_ctx;
- DPRINT_ENTER(STORVSC_DRV);
-
vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface);
storvsc_drv_obj->RingBufferSize = storvsc_ringbuffer_size;
@@ -175,8 +173,6 @@ static int storvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
/* The driver belongs to vmbus */
ret = vmbus_child_driver_register(drv_ctx);
- DPRINT_EXIT(STORVSC_DRV);
-
return ret;
}
@@ -194,8 +190,6 @@ static void storvsc_drv_exit(void)
struct device *current_dev = NULL;
int ret;
- DPRINT_ENTER(STORVSC_DRV);
-
while (1) {
current_dev = NULL;
@@ -219,9 +213,6 @@ static void storvsc_drv_exit(void)
storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
vmbus_child_driver_unregister(drv_ctx);
-
- DPRINT_EXIT(STORVSC_DRV);
-
return;
}
@@ -243,8 +234,6 @@ static int storvsc_probe(struct device *device)
struct host_device_context *host_device_ctx;
struct storvsc_device_info device_info;
- DPRINT_ENTER(STORVSC_DRV);
-
if (!storvsc_drv_obj->Base.OnDeviceAdd)
return -1;
@@ -271,8 +260,6 @@ static int storvsc_probe(struct device *device)
if (!host_device_ctx->request_pool) {
scsi_host_put(host);
- DPRINT_EXIT(STORVSC_DRV);
-
return -ENOMEM;
}
@@ -284,8 +271,6 @@ static int storvsc_probe(struct device *device)
DPRINT_ERR(STORVSC_DRV, "unable to add scsi vsc device");
kmem_cache_destroy(host_device_ctx->request_pool);
scsi_host_put(host);
- DPRINT_EXIT(STORVSC_DRV);
-
return -1;
}
@@ -309,15 +294,10 @@ static int storvsc_probe(struct device *device)
kmem_cache_destroy(host_device_ctx->request_pool);
scsi_host_put(host);
- DPRINT_EXIT(STORVSC_DRV);
-
return -1;
}
scsi_scan_host(host);
-
- DPRINT_EXIT(STORVSC_DRV);
-
return ret;
}
@@ -340,12 +320,8 @@ static int storvsc_remove(struct device *device)
(struct host_device_context *)host->hostdata;
- DPRINT_ENTER(STORVSC_DRV);
-
- if (!storvsc_drv_obj->Base.OnDeviceRemove) {
- DPRINT_EXIT(STORVSC_DRV);
+ if (!storvsc_drv_obj->Base.OnDeviceRemove)
return -1;
- }
/*
* Call to the vsc driver to let it know that the device is being
@@ -368,9 +344,6 @@ static int storvsc_remove(struct device *device)
DPRINT_INFO(STORVSC, "releasing host adapter (%p)...", host);
scsi_host_put(host);
-
- DPRINT_EXIT(STORVSC_DRV);
-
return ret;
}
@@ -393,8 +366,6 @@ static void storvsc_commmand_completion(struct hv_storvsc_request *request)
/* (unsigned long)cmd_request); */
/* ASSERT(scmnd->scsi_done); */
- DPRINT_ENTER(STORVSC_DRV);
-
if (cmd_request->bounce_sgl_count) {
/* using bounce buffer */
/* printk("copy_from_bounce_buffer\n"); */
@@ -427,8 +398,6 @@ static void storvsc_commmand_completion(struct hv_storvsc_request *request)
scsi_done_fn(scmnd);
kmem_cache_free(host_device_ctx->request_pool, cmd_request);
-
- DPRINT_EXIT(STORVSC_DRV);
}
static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
@@ -526,7 +495,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
/* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
- if (j == 0)
+ if (bounce_addr == 0)
bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
while (srclen) {
@@ -587,7 +556,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
destlen = orig_sgl[i].length;
/* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
- if (j == 0)
+ if (bounce_addr == 0)
bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
while (destlen) {
@@ -646,8 +615,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
unsigned int request_size = 0;
int i;
struct scatterlist *sgl;
-
- DPRINT_ENTER(STORVSC_DRV);
+ unsigned int sg_count = 0;
DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d "
"queue depth %d tagged %d", scmnd, scmnd->sc_data_direction,
@@ -730,6 +698,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
request->DataBuffer.Length = scsi_bufflen(scmnd);
if (scsi_sg_count(scmnd)) {
sgl = (struct scatterlist *)scsi_sglist(scmnd);
+ sg_count = scsi_sg_count(scmnd);
/* check if we need to bounce the sgl */
if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
@@ -764,15 +733,16 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
scsi_sg_count(scmnd));
sgl = cmd_request->bounce_sgl;
+ sg_count = cmd_request->bounce_sgl_count;
}
request->DataBuffer.Offset = sgl[0].offset;
- for (i = 0; i < scsi_sg_count(scmnd); i++) {
+ for (i = 0; i < sg_count; i++) {
DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n",
i, sgl[i].length, sgl[i].offset);
request->DataBuffer.PfnArray[i] =
- page_to_pfn(sg_page((&sgl[i])));
+ page_to_pfn(sg_page((&sgl[i])));
}
} else if (scsi_sglist(scmnd)) {
/* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */
@@ -812,8 +782,6 @@ retry_request:
ret = SCSI_MLQUEUE_DEVICE_BUSY;
}
- DPRINT_EXIT(STORVSC_DRV);
-
return ret;
}
@@ -873,23 +841,17 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
(struct host_device_context *)scmnd->device->host->hostdata;
struct vm_device *device_ctx = host_device_ctx->device_ctx;
- DPRINT_ENTER(STORVSC_DRV);
-
DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host resetting...",
scmnd->device, &device_ctx->device_obj);
/* Invokes the vsc to reset the host/bus */
ret = StorVscOnHostReset(&device_ctx->device_obj);
- if (ret != 0) {
- DPRINT_EXIT(STORVSC_DRV);
+ if (ret != 0)
return ret;
- }
DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host reseted",
scmnd->device, &device_ctx->device_obj);
- DPRINT_EXIT(STORVSC_DRV);
-
return ret;
}
@@ -977,18 +939,14 @@ static int __init storvsc_init(void)
{
int ret;
- DPRINT_ENTER(STORVSC_DRV);
DPRINT_INFO(STORVSC_DRV, "Storvsc initializing....");
ret = storvsc_drv_init(StorVscInitialize);
- DPRINT_EXIT(STORVSC_DRV);
return ret;
}
static void __exit storvsc_exit(void)
{
- DPRINT_ENTER(STORVSC_DRV);
storvsc_drv_exit();
- DPRINT_ENTER(STORVSC_DRV);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/hv/vmbus.c b/drivers/staging/hv/vmbus.c
index 007543bdb41..ca1e18a6200 100644
--- a/drivers/staging/hv/vmbus.c
+++ b/drivers/staging/hv/vmbus.c
@@ -57,9 +57,7 @@ static struct hv_device *gDevice; /* vmbus root device */
*/
static void VmbusGetChannelOffers(void)
{
- DPRINT_ENTER(VMBUS);
VmbusChannelRequestOffers();
- DPRINT_EXIT(VMBUS);
}
/*
@@ -120,8 +118,6 @@ static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo)
u32 *irqvector = AdditionalInfo;
int ret;
- DPRINT_ENTER(VMBUS);
-
gDevice = dev;
memcpy(&gDevice->deviceType, &gVmbusDeviceType, sizeof(struct hv_guid));
@@ -136,8 +132,6 @@ static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo)
ret = VmbusConnect();
/* VmbusSendEvent(device->localPortId+1); */
- DPRINT_EXIT(VMBUS);
-
return ret;
}
@@ -148,12 +142,9 @@ static int VmbusOnDeviceRemove(struct hv_device *dev)
{
int ret = 0;
- DPRINT_ENTER(VMBUS);
VmbusChannelReleaseUnattachedChannels();
VmbusDisconnect();
on_each_cpu(HvSynicCleanup, NULL, 1);
- DPRINT_EXIT(VMBUS);
-
return ret;
}
@@ -164,9 +155,7 @@ static void VmbusOnCleanup(struct hv_driver *drv)
{
/* struct vmbus_driver *driver = (struct vmbus_driver *)drv; */
- DPRINT_ENTER(VMBUS);
HvCleanup();
- DPRINT_EXIT(VMBUS);
}
/*
@@ -239,8 +228,6 @@ static int VmbusOnISR(struct hv_driver *drv)
page_addr = gHvContext.synICMessagePage[cpu];
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
- DPRINT_ENTER(VMBUS);
-
/* Check if there are actual msgs to be process */
if (msg->Header.MessageType != HvMessageTypeNone) {
DPRINT_DBG(VMBUS, "received msg type %d size %d",
@@ -259,7 +246,6 @@ static int VmbusOnISR(struct hv_driver *drv)
ret |= 0x2;
}
- DPRINT_EXIT(VMBUS);
return ret;
}
@@ -271,8 +257,6 @@ int VmbusInitialize(struct hv_driver *drv)
struct vmbus_driver *driver = (struct vmbus_driver *)drv;
int ret;
- DPRINT_ENTER(VMBUS);
-
DPRINT_INFO(VMBUS, "+++++++ HV Driver version = %s +++++++",
HV_DRV_VERSION);
DPRINT_INFO(VMBUS, "+++++++ Vmbus supported version = %d +++++++",
@@ -305,7 +289,5 @@ int VmbusInitialize(struct hv_driver *drv)
ret);
gDriver = drv;
- DPRINT_EXIT(VMBUS);
-
return ret;
}
diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
index 22c80ece638..092f02ed6be 100644
--- a/drivers/staging/hv/vmbus_drv.c
+++ b/drivers/staging/hv/vmbus_drv.c
@@ -254,8 +254,6 @@ static int vmbus_bus_init(int (*drv_init)(struct hv_driver *drv))
int ret;
unsigned int vector;
- DPRINT_ENTER(VMBUS_DRV);
-
/*
* Set this up to allow lower layer to callback to add/remove child
* devices on the bus
@@ -360,8 +358,6 @@ static int vmbus_bus_init(int (*drv_init)(struct hv_driver *drv))
wait_for_completion(&hv_channel_ready);
cleanup:
- DPRINT_EXIT(VMBUS_DRV);
-
return ret;
}
@@ -377,8 +373,6 @@ static void vmbus_bus_exit(void)
struct vm_device *dev_ctx = &g_vmbus_drv.device_ctx;
- DPRINT_ENTER(VMBUS_DRV);
-
/* Remove the root device */
if (vmbus_drv_obj->Base.OnDeviceRemove)
vmbus_drv_obj->Base.OnDeviceRemove(&dev_ctx->device_obj);
@@ -395,10 +389,6 @@ static void vmbus_bus_exit(void)
tasklet_kill(&vmbus_drv_ctx->msg_dpc);
tasklet_kill(&vmbus_drv_ctx->event_dpc);
-
- DPRINT_EXIT(VMBUS_DRV);
-
- return;
}
@@ -419,8 +409,6 @@ int vmbus_child_driver_register(struct driver_context *driver_ctx)
struct vmbus_driver *vmbus_drv_obj = &g_vmbus_drv.drv_obj;
int ret;
- DPRINT_ENTER(VMBUS_DRV);
-
DPRINT_INFO(VMBUS_DRV, "child driver (%p) registering - name %s",
driver_ctx, driver_ctx->driver.name);
@@ -431,8 +419,6 @@ int vmbus_child_driver_register(struct driver_context *driver_ctx)
vmbus_drv_obj->GetChannelOffers();
- DPRINT_EXIT(VMBUS_DRV);
-
return ret;
}
EXPORT_SYMBOL(vmbus_child_driver_register);
@@ -450,16 +436,12 @@ EXPORT_SYMBOL(vmbus_child_driver_register);
*/
void vmbus_child_driver_unregister(struct driver_context *driver_ctx)
{
- DPRINT_ENTER(VMBUS_DRV);
-
DPRINT_INFO(VMBUS_DRV, "child driver (%p) unregistering - name %s",
driver_ctx, driver_ctx->driver.name);
driver_unregister(&driver_ctx->driver);
driver_ctx->driver.bus = NULL;
-
- DPRINT_EXIT(VMBUS_DRV);
}
EXPORT_SYMBOL(vmbus_child_driver_unregister);
@@ -506,15 +488,11 @@ static struct hv_device *vmbus_child_device_create(struct hv_guid *type,
struct vm_device *child_device_ctx;
struct hv_device *child_device_obj;
- DPRINT_ENTER(VMBUS_DRV);
-
/* Allocate the new child device */
child_device_ctx = kzalloc(sizeof(struct vm_device), GFP_KERNEL);
if (!child_device_ctx) {
DPRINT_ERR(VMBUS_DRV,
"unable to allocate device_context for child device");
- DPRINT_EXIT(VMBUS_DRV);
-
return NULL;
}
@@ -546,8 +524,6 @@ static struct hv_device *vmbus_child_device_create(struct hv_guid *type,
memcpy(&child_device_ctx->class_id, type, sizeof(struct hv_guid));
memcpy(&child_device_ctx->device_id, instance, sizeof(struct hv_guid));
- DPRINT_EXIT(VMBUS_DRV);
-
return child_device_obj;
}
@@ -564,8 +540,6 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
to_vm_device(child_device_obj);
static atomic_t device_num = ATOMIC_INIT(0);
- DPRINT_ENTER(VMBUS_DRV);
-
DPRINT_DBG(VMBUS_DRV, "child device (%p) registering",
child_device_ctx);
@@ -594,8 +568,6 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
DPRINT_INFO(VMBUS_DRV, "child device (%p) registered",
&child_device_ctx->device);
- DPRINT_EXIT(VMBUS_DRV);
-
return ret;
}
@@ -607,8 +579,6 @@ static void vmbus_child_device_unregister(struct hv_device *device_obj)
{
struct vm_device *device_ctx = to_vm_device(device_obj);
- DPRINT_ENTER(VMBUS_DRV);
-
DPRINT_INFO(VMBUS_DRV, "unregistering child device (%p)",
&device_ctx->device);
@@ -620,8 +590,6 @@ static void vmbus_child_device_unregister(struct hv_device *device_obj)
DPRINT_INFO(VMBUS_DRV, "child device (%p) unregistered",
&device_ctx->device);
-
- DPRINT_EXIT(VMBUS_DRV);
}
/*
@@ -629,9 +597,6 @@ static void vmbus_child_device_unregister(struct hv_device *device_obj)
*/
static void vmbus_child_device_destroy(struct hv_device *device_obj)
{
- DPRINT_ENTER(VMBUS_DRV);
-
- DPRINT_EXIT(VMBUS_DRV);
}
/*
@@ -646,8 +611,6 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
struct vm_device *device_ctx = device_to_vm_device(device);
int ret;
- DPRINT_ENTER(VMBUS_DRV);
-
DPRINT_INFO(VMBUS_DRV, "generating uevent - VMBUS_DEVICE_CLASS_GUID={"
"%02x%02x%02x%02x-%02x%02x-%02x%02x-"
"%02x%02x%02x%02x%02x%02x%02x%02x}",
@@ -708,8 +671,6 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
if (ret)
return ret;
- DPRINT_EXIT(VMBUS_DRV);
-
return 0;
}
@@ -722,8 +683,6 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
struct driver_context *driver_ctx = driver_to_driver_context(driver);
struct vm_device *device_ctx = device_to_vm_device(device);
- DPRINT_ENTER(VMBUS_DRV);
-
/* We found our driver ? */
if (memcmp(&device_ctx->class_id, &driver_ctx->class_id,
sizeof(struct hv_guid)) == 0) {
@@ -742,9 +701,6 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
match = 1;
}
-
- DPRINT_EXIT(VMBUS_DRV);
-
return match;
}
@@ -759,8 +715,6 @@ static void vmbus_probe_failed_cb(struct work_struct *context)
{
struct vm_device *device_ctx = (struct vm_device *)context;
- DPRINT_ENTER(VMBUS_DRV);
-
/*
* Kick off the process of unregistering the device.
* This will call vmbus_remove() and eventually vmbus_device_release()
@@ -768,7 +722,6 @@ static void vmbus_probe_failed_cb(struct work_struct *context)
device_unregister(&device_ctx->device);
/* put_device(&device_ctx->device); */
- DPRINT_EXIT(VMBUS_DRV);
}
/*
@@ -782,8 +735,6 @@ static int vmbus_probe(struct device *child_device)
struct vm_device *device_ctx =
device_to_vm_device(child_device);
- DPRINT_ENTER(VMBUS_DRV);
-
/* Let the specific open-source driver handles the probe if it can */
if (driver_ctx->probe) {
ret = device_ctx->probe_error = driver_ctx->probe(child_device);
@@ -802,8 +753,6 @@ static int vmbus_probe(struct device *child_device)
child_device->driver->name);
ret = -1;
}
-
- DPRINT_EXIT(VMBUS_DRV);
return ret;
}
@@ -815,15 +764,12 @@ static int vmbus_remove(struct device *child_device)
int ret;
struct driver_context *driver_ctx;
- DPRINT_ENTER(VMBUS_DRV);
-
/* Special case root bus device */
if (child_device->parent == NULL) {
/*
* No-op since it is statically defined and handle in
* vmbus_bus_exit()
*/
- DPRINT_EXIT(VMBUS_DRV);
return 0;
}
@@ -844,8 +790,6 @@ static int vmbus_remove(struct device *child_device)
}
}
- DPRINT_EXIT(VMBUS_DRV);
-
return 0;
}
@@ -856,23 +800,18 @@ static void vmbus_shutdown(struct device *child_device)
{
struct driver_context *driver_ctx;
- DPRINT_ENTER(VMBUS_DRV);
-
/* Special case root bus device */
if (child_device->parent == NULL) {
/*
* No-op since it is statically defined and handle in
* vmbus_bus_exit()
*/
- DPRINT_EXIT(VMBUS_DRV);
return;
}
/* The device may not be attached yet */
- if (!child_device->driver) {
- DPRINT_EXIT(VMBUS_DRV);
+ if (!child_device->driver)
return;
- }
driver_ctx = driver_to_driver_context(child_device->driver);
@@ -880,8 +819,6 @@ static void vmbus_shutdown(struct device *child_device)
if (driver_ctx->shutdown)
driver_ctx->shutdown(child_device);
- DPRINT_EXIT(VMBUS_DRV);
-
return;
}
@@ -890,13 +827,11 @@ static void vmbus_shutdown(struct device *child_device)
*/
static void vmbus_bus_release(struct device *device)
{
- DPRINT_ENTER(VMBUS_DRV);
/* FIXME */
/* Empty release functions are a bug, or a major sign
* of a problem design, this MUST BE FIXED! */
dev_err(device, "%s needs to be fixed!\n", __func__);
WARN_ON(1);
- DPRINT_EXIT(VMBUS_DRV);
}
/*
@@ -906,15 +841,10 @@ static void vmbus_device_release(struct device *device)
{
struct vm_device *device_ctx = device_to_vm_device(device);
- DPRINT_ENTER(VMBUS_DRV);
-
/* vmbus_child_device_destroy(&device_ctx->device_obj); */
kfree(device_ctx);
/* !!DO NOT REFERENCE device_ctx anymore at this point!! */
- DPRINT_EXIT(VMBUS_DRV);
-
- return;
}
/*
@@ -924,14 +854,10 @@ static void vmbus_msg_dpc(unsigned long data)
{
struct vmbus_driver *vmbus_drv_obj = (struct vmbus_driver *)data;
- DPRINT_ENTER(VMBUS_DRV);
-
/* ASSERT(vmbus_drv_obj->OnMsgDpc != NULL); */
/* Call to bus driver to handle interrupt */
vmbus_drv_obj->OnMsgDpc(&vmbus_drv_obj->Base);
-
- DPRINT_EXIT(VMBUS_DRV);
}
/*
@@ -941,14 +867,10 @@ static void vmbus_event_dpc(unsigned long data)
{
struct vmbus_driver *vmbus_drv_obj = (struct vmbus_driver *)data;
- DPRINT_ENTER(VMBUS_DRV);
-
/* ASSERT(vmbus_drv_obj->OnEventDpc != NULL); */
/* Call to bus driver to handle interrupt */
vmbus_drv_obj->OnEventDpc(&vmbus_drv_obj->Base);
-
- DPRINT_EXIT(VMBUS_DRV);
}
static irqreturn_t vmbus_isr(int irq, void *dev_id)
@@ -956,8 +878,6 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
struct vmbus_driver *vmbus_driver_obj = &g_vmbus_drv.drv_obj;
int ret;
- DPRINT_ENTER(VMBUS_DRV);
-
/* ASSERT(vmbus_driver_obj->OnIsr != NULL); */
/* Call to bus driver to handle interrupt */
@@ -971,10 +891,8 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
if (test_bit(1, (unsigned long *)&ret))
tasklet_schedule(&g_vmbus_drv.event_dpc);
- DPRINT_EXIT(VMBUS_DRV);
return IRQ_HANDLED;
} else {
- DPRINT_EXIT(VMBUS_DRV);
return IRQ_NONE;
}
}
@@ -994,10 +912,6 @@ MODULE_DEVICE_TABLE(dmi, microsoft_hv_dmi_table);
static int __init vmbus_init(void)
{
- int ret = 0;
-
- DPRINT_ENTER(VMBUS_DRV);
-
DPRINT_INFO(VMBUS_DRV,
"Vmbus initializing.... current log level 0x%x (%x,%x)",
vmbus_loglevel, HIWORD(vmbus_loglevel), LOWORD(vmbus_loglevel));
@@ -1006,20 +920,13 @@ static int __init vmbus_init(void)
if (!dmi_check_system(microsoft_hv_dmi_table))
return -ENODEV;
- ret = vmbus_bus_init(VmbusInitialize);
-
- DPRINT_EXIT(VMBUS_DRV);
- return ret;
+ return vmbus_bus_init(VmbusInitialize);
}
static void __exit vmbus_exit(void)
{
- DPRINT_ENTER(VMBUS_DRV);
-
vmbus_bus_exit();
/* Todo: it is used for loglevel, to be ported to new kernel. */
- DPRINT_EXIT(VMBUS_DRV);
- return;
}
/*
@@ -1028,7 +935,7 @@ static void __exit vmbus_exit(void)
* installed and/or configured. We don't do anything else with the table, but
* it needs to be present.
*/
-const static struct pci_device_id microsoft_hv_pci_table[] = {
+static const struct pci_device_id microsoft_hv_pci_table[] = {
{ PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
{ 0 }
};
diff --git a/drivers/staging/iio/Documentation/overview.txt b/drivers/staging/iio/Documentation/overview.txt
index e39dfc1705a..cc6ecad4035 100644
--- a/drivers/staging/iio/Documentation/overview.txt
+++ b/drivers/staging/iio/Documentation/overview.txt
@@ -44,7 +44,7 @@ which the raw data it self may be read back.
applications it it useful to be able to capture data based on some
external signal (trigger). These triggers might be a data ready
signal, a gpio line connected to some external system or an on
-processor periodic interrupt. A single trigger many initialize data
+processor periodic interrupt. A single trigger may initialize data
capture or reading from a number of sensors. These triggers are
used in iio to fill software ring buffers acting in a very similar
fashion to the hardware buffers described above.
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index b0e62449c62..ed48815a916 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -21,6 +21,7 @@ config IIO_RING_BUFFER
if IIO_RING_BUFFER
config IIO_SW_RING
+ select IIO_TRIGGER
tristate "Industrial I/O lock free software ring"
help
Example software ring buffer implementation. The design aim
@@ -44,6 +45,7 @@ source "drivers/staging/iio/adc/Kconfig"
source "drivers/staging/iio/gyro/Kconfig"
source "drivers/staging/iio/imu/Kconfig"
source "drivers/staging/iio/light/Kconfig"
+source "drivers/staging/iio/magnetometer/Kconfig"
source "drivers/staging/iio/trigger/Kconfig"
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index 3502b39f084..e909674920f 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -14,5 +14,5 @@ obj-y += adc/
obj-y += gyro/
obj-y += imu/
obj-y += light/
-
-obj-y += trigger/ \ No newline at end of file
+obj-y += trigger/
+obj-y += magnetometer/
diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO
index 15da0c2bb78..898cba1c939 100644
--- a/drivers/staging/iio/TODO
+++ b/drivers/staging/iio/TODO
@@ -66,4 +66,4 @@ Documentation
2) Some device require indvidual docs.
Contact: Jonathan Cameron <jic23@cam.ac.uk>.
-Mailing list: LKML.
+Mailing list: linux-iio@vger.kernel.org
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index b4e57d1bc87..5926c03be1a 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -4,29 +4,29 @@
comment "Accelerometers"
config ADIS16209
- tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
- depends on SPI
- select IIO_TRIGGER if IIO_RING_BUFFER
- select IIO_SW_RING if IIO_RING_BUFFER
- help
- Say yes here to build support for Analog Devices adis16209 dual-axis digital inclinometer
- and accelerometer.
+ tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
+ depends on SPI
+ select IIO_TRIGGER if IIO_RING_BUFFER
+ select IIO_SW_RING if IIO_RING_BUFFER
+ help
+ Say yes here to build support for Analog Devices adis16209 dual-axis digital inclinometer
+ and accelerometer.
config ADIS16220
- tristate "Analog Devices ADIS16220 Programmable Digital Vibration Sensor driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices adis16220 programmable
- digital vibration sensor.
+ tristate "Analog Devices ADIS16220 Programmable Digital Vibration Sensor"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices adis16220 programmable
+ digital vibration sensor.
config ADIS16240
- tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
- depends on SPI
- select IIO_TRIGGER if IIO_RING_BUFFER
- select IIO_SW_RING if IIO_RING_BUFFER
- help
- Say yes here to build support for Analog Devices adis16240 programmable
- impact Sensor and recorder.
+ tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
+ depends on SPI
+ select IIO_TRIGGER if IIO_RING_BUFFER
+ select IIO_SW_RING if IIO_RING_BUFFER
+ help
+ Say yes here to build support for Analog Devices adis16240 programmable
+ impact Sensor and recorder.
config KXSD9
tristate "Kionix KXSD9 Accelerometer Driver"
@@ -46,9 +46,9 @@ config LIS3L02DQ
and an event interface via a character device.
config SCA3000
- depends on IIO_RING_BUFFER
- depends on SPI
- tristate "VTI SCA3000 series accelerometers"
- help
- Say yes here to build support for the VTI SCA3000 series of SPI
- accelerometers. These devices use a hardware ring buffer. \ No newline at end of file
+ depends on IIO_RING_BUFFER
+ depends on SPI
+ tristate "VTI SCA3000 series accelerometers"
+ help
+ Say yes here to build support for the VTI SCA3000 series of SPI
+ accelerometers. These devices use a hardware ring buffer.
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index c34b13634c2..ff84703a16f 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for industrial I/O accelerometer drivers
#
+
adis16209-y := adis16209_core.o
adis16209-$(CONFIG_IIO_RING_BUFFER) += adis16209_ring.o adis16209_trigger.o
obj-$(CONFIG_ADIS16209) += adis16209.o
@@ -19,4 +20,4 @@ lis3l02dq-$(CONFIG_IIO_RING_BUFFER) += lis3l02dq_ring.o
obj-$(CONFIG_LIS3L02DQ) += lis3l02dq.o
sca3000-y := sca3000_core.o sca3000_ring.o
-obj-$(CONFIG_SCA3000) += sca3000.o \ No newline at end of file
+obj-$(CONFIG_SCA3000) += sca3000.o
diff --git a/drivers/staging/iio/accel/adis16209.h b/drivers/staging/iio/accel/adis16209.h
index 877fd2a4838..4e97596620e 100644
--- a/drivers/staging/iio/accel/adis16209.h
+++ b/drivers/staging/iio/accel/adis16209.h
@@ -105,8 +105,6 @@
* struct adis16209_state - device instance specific data
* @us: actual spi_device
* @work_trigger_to_ring: bh for triggered event handling
- * @work_cont_thresh: CLEAN
- * @inter: used to check if new interrupt has been triggered
* @last_timestamp: passing timestamp from th to bh of interrupt handler
* @indio_dev: industrial I/O device structure
* @trig: data ready trigger registered with iio
@@ -117,7 +115,6 @@
struct adis16209_state {
struct spi_device *us;
struct work_struct work_trigger_to_ring;
- struct iio_work_cont work_cont_thresh;
s64 last_timestamp;
struct iio_dev *indio_dev;
struct iio_trigger *trig;
@@ -129,16 +126,15 @@ struct adis16209_state {
int adis16209_set_irq(struct device *dev, bool enable);
#ifdef CONFIG_IIO_RING_BUFFER
-enum adis16209_scan {
- ADIS16209_SCAN_SUPPLY,
- ADIS16209_SCAN_ACC_X,
- ADIS16209_SCAN_ACC_Y,
- ADIS16209_SCAN_AUX_ADC,
- ADIS16209_SCAN_TEMP,
- ADIS16209_SCAN_INCLI_X,
- ADIS16209_SCAN_INCLI_Y,
- ADIS16209_SCAN_ROT,
-};
+
+#define ADIS16209_SCAN_SUPPLY 0
+#define ADIS16209_SCAN_ACC_X 1
+#define ADIS16209_SCAN_ACC_Y 2
+#define ADIS16209_SCAN_AUX_ADC 3
+#define ADIS16209_SCAN_TEMP 4
+#define ADIS16209_SCAN_INCLI_X 5
+#define ADIS16209_SCAN_INCLI_Y 6
+#define ADIS16209_SCAN_ROT 7
void adis16209_remove_trigger(struct iio_dev *indio_dev);
int adis16209_probe_trigger(struct iio_dev *indio_dev);
@@ -150,8 +146,6 @@ ssize_t adis16209_read_data_from_ring(struct device *dev,
int adis16209_configure_ring(struct iio_dev *indio_dev);
void adis16209_unconfigure_ring(struct iio_dev *indio_dev);
-int adis16209_initialize_ring(struct iio_ring_buffer *ring);
-void adis16209_uninitialize_ring(struct iio_ring_buffer *ring);
#else /* CONFIG_IIO_RING_BUFFER */
static inline void adis16209_remove_trigger(struct iio_dev *indio_dev)
@@ -180,14 +174,5 @@ static inline void adis16209_unconfigure_ring(struct iio_dev *indio_dev)
{
}
-static inline int adis16209_initialize_ring(struct iio_ring_buffer *ring)
-{
- return 0;
-}
-
-static inline void adis16209_uninitialize_ring(struct iio_ring_buffer *ring)
-{
-}
-
#endif /* CONFIG_IIO_RING_BUFFER */
#endif /* SPI_ADIS16209_H_ */
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index ac375c50f56..6c6923f2eaa 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -14,12 +14,13 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
#include "../iio.h"
#include "../sysfs.h"
+#include "../ring_generic.h"
#include "accel.h"
#include "inclinometer.h"
#include "../gyro/gyro.h"
@@ -76,11 +77,13 @@ static int adis16209_spi_write_reg_16(struct device *dev,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
+ .delay_usecs = 30,
}, {
.tx_buf = st->tx + 2,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
+ .delay_usecs = 30,
},
};
@@ -120,13 +123,13 @@ static int adis16209_spi_read_reg_16(struct device *dev,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 20,
+ .delay_usecs = 30,
}, {
.rx_buf = st->rx,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 20,
+ .delay_usecs = 30,
},
};
@@ -518,7 +521,7 @@ static int __devinit adis16209_probe(struct spi_device *spi)
goto error_unreg_ring_funcs;
regdone = 1;
- ret = adis16209_initialize_ring(st->indio_dev->ring);
+ ret = iio_ring_buffer_register(st->indio_dev->ring, 0);
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
@@ -550,7 +553,7 @@ error_unregister_line:
if (spi->irq)
iio_unregister_interrupt_line(st->indio_dev, 0);
error_uninitialize_ring:
- adis16209_uninitialize_ring(st->indio_dev->ring);
+ iio_ring_buffer_unregister(st->indio_dev->ring);
error_unreg_ring_funcs:
adis16209_unconfigure_ring(st->indio_dev);
error_free_dev:
@@ -579,7 +582,7 @@ static int adis16209_remove(struct spi_device *spi)
if (spi->irq)
iio_unregister_interrupt_line(indio_dev, 0);
- adis16209_uninitialize_ring(indio_dev->ring);
+ iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
adis16209_unconfigure_ring(indio_dev);
kfree(st->tx);
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
index 533e2857491..25fde659d09 100644
--- a/drivers/staging/iio/accel/adis16209_ring.c
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
@@ -16,16 +17,6 @@
#include "../trigger.h"
#include "adis16209.h"
-/**
- * combine_8_to_16() utility function to munge to u8s into u16
- **/
-static inline u16 combine_8_to_16(u8 lower, u8 upper)
-{
- u16 _lower = lower;
- u16 _upper = upper;
- return _lower | (_upper << 8);
-}
-
static IIO_SCAN_EL_C(supply, ADIS16209_SCAN_SUPPLY, IIO_UNSIGNED(14),
ADIS16209_SUPPLY_OUT, NULL);
static IIO_SCAN_EL_C(accel_x, ADIS16209_SCAN_ACC_X, IIO_SIGNED(14),
@@ -67,10 +58,10 @@ static struct attribute_group adis16209_scan_el_group = {
* adis16209_poll_func_th() top half interrupt handler called by trigger
* @private_data: iio_dev
**/
-static void adis16209_poll_func_th(struct iio_dev *indio_dev)
+static void adis16209_poll_func_th(struct iio_dev *indio_dev, s64 time)
{
struct adis16209_state *st = iio_dev_get_devdata(indio_dev);
- st->last_timestamp = indio_dev->trig->timestamp;
+ st->last_timestamp = time;
schedule_work(&st->work_trigger_to_ring);
}
@@ -138,10 +129,9 @@ static void adis16209_trigger_bh_to_ring(struct work_struct *work_s)
if (st->indio_dev->scan_count)
if (adis16209_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
- for (; i < st->indio_dev->scan_count; i++) {
- data[i] = combine_8_to_16(st->rx[i*2+1],
- st->rx[i*2]);
- }
+ for (; i < st->indio_dev->scan_count; i++)
+ data[i] = be16_to_cpup(
+ (__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (st->indio_dev->scan_timestamp)
@@ -157,50 +147,6 @@ static void adis16209_trigger_bh_to_ring(struct work_struct *work_s)
return;
}
-/* in these circumstances is it better to go with unaligned packing and
- * deal with the cost?*/
-static int adis16209_data_rdy_ring_preenable(struct iio_dev *indio_dev)
-{
- size_t size;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
- /* Check if there are any scan elements enabled, if not fail*/
- if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
- return -EINVAL;
-
- if (indio_dev->ring->access.set_bpd) {
- if (indio_dev->scan_timestamp)
- if (indio_dev->scan_count)
- /* Timestamp (aligned to s64) and data */
- size = (((indio_dev->scan_count * sizeof(s16))
- + sizeof(s64) - 1)
- & ~(sizeof(s64) - 1))
- + sizeof(s64);
- else /* Timestamp only */
- size = sizeof(s64);
- else /* Data only */
- size = indio_dev->scan_count*sizeof(s16);
- indio_dev->ring->access.set_bpd(indio_dev->ring, size);
- }
-
- return 0;
-}
-
-static int adis16209_data_rdy_ring_postenable(struct iio_dev *indio_dev)
-{
- return indio_dev->trig
- ? iio_trigger_attach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
-
-static int adis16209_data_rdy_ring_predisable(struct iio_dev *indio_dev)
-{
- return indio_dev->trig
- ? iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
-
void adis16209_unconfigure_ring(struct iio_dev *indio_dev)
{
kfree(indio_dev->pollfunc);
@@ -235,18 +181,16 @@ int adis16209_configure_ring(struct iio_dev *indio_dev)
indio_dev->ring = ring;
/* Effectively select the ring buffer implementation */
iio_ring_sw_register_funcs(&ring->access);
- ring->preenable = &adis16209_data_rdy_ring_preenable;
- ring->postenable = &adis16209_data_rdy_ring_postenable;
- ring->predisable = &adis16209_data_rdy_ring_predisable;
+ ring->bpe = 2;
+ ring->preenable = &iio_sw_ring_preenable;
+ ring->postenable = &iio_triggered_ring_postenable;
+ ring->predisable = &iio_triggered_ring_predisable;
ring->owner = THIS_MODULE;
- indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
- if (indio_dev->pollfunc == NULL) {
- ret = -ENOMEM;
- goto error_iio_sw_rb_free;;
- }
- indio_dev->pollfunc->poll_func_main = &adis16209_poll_func_th;
- indio_dev->pollfunc->private_data = indio_dev;
+ ret = iio_alloc_pollfunc(indio_dev, NULL, &adis16209_poll_func_th);
+ if (ret)
+ goto error_iio_sw_rb_free;
+
indio_dev->modes |= INDIO_RING_TRIGGERED;
return 0;
@@ -254,13 +198,3 @@ error_iio_sw_rb_free:
iio_sw_rb_free(indio_dev->ring);
return ret;
}
-
-int adis16209_initialize_ring(struct iio_ring_buffer *ring)
-{
- return iio_ring_buffer_register(ring, 0);
-}
-
-void adis16209_uninitialize_ring(struct iio_ring_buffer *ring)
-{
- iio_ring_buffer_unregister(ring);
-}
diff --git a/drivers/staging/iio/accel/adis16209_trigger.c b/drivers/staging/iio/accel/adis16209_trigger.c
index 4a0507c9a13..1487effa2e3 100644
--- a/drivers/staging/iio/accel/adis16209_trigger.c
+++ b/drivers/staging/iio/accel/adis16209_trigger.c
@@ -23,8 +23,7 @@ static int adis16209_data_rdy_trig_poll(struct iio_dev *dev_info,
struct adis16209_state *st = iio_dev_get_devdata(dev_info);
struct iio_trigger *trig = st->trig;
- trig->timestamp = timestamp;
- iio_trigger_poll(trig);
+ iio_trigger_poll(trig, timestamp);
return IRQ_HANDLED;
}
@@ -83,14 +82,13 @@ int adis16209_probe_trigger(struct iio_dev *indio_dev)
struct adis16209_state *st = indio_dev->dev_data;
st->trig = iio_allocate_trigger();
- st->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ st->trig->name = kasprintf(GFP_KERNEL,
+ "adis16209-dev%d",
+ indio_dev->id);
if (!st->trig->name) {
ret = -ENOMEM;
goto error_free_trig;
}
- snprintf((char *)st->trig->name,
- IIO_TRIGGER_NAME_LENGTH,
- "adis16209-dev%d", indio_dev->id);
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
st->trig->private_data = st;
diff --git a/drivers/staging/iio/accel/adis16220.h b/drivers/staging/iio/accel/adis16220.h
index 2abf4850b37..7013314a9d7 100644
--- a/drivers/staging/iio/accel/adis16220.h
+++ b/drivers/staging/iio/accel/adis16220.h
@@ -127,7 +127,6 @@
* struct adis16220_state - device instance specific data
* @us: actual spi_device
* @work_trigger_to_ring: bh for triggered event handling
- * @work_cont_thresh: CLEAN
* @inter: used to check if new interrupt has been triggered
* @last_timestamp: passing timestamp from th to bh of interrupt handler
* @indio_dev: industrial I/O device structure
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index 6de439fd167..bb7d76539cd 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -14,7 +14,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
@@ -72,13 +72,13 @@ static int adis16220_spi_write_reg_16(struct device *dev,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 25,
+ .delay_usecs = 35,
}, {
.tx_buf = st->tx + 2,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 25,
+ .delay_usecs = 35,
},
};
@@ -118,13 +118,13 @@ static int adis16220_spi_read_reg_16(struct device *dev,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 25,
+ .delay_usecs = 35,
}, {
.rx_buf = st->rx,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 25,
+ .delay_usecs = 35,
},
};
@@ -291,9 +291,9 @@ static int adis16220_check_status(struct device *dev)
if (status & ADIS16220_DIAG_STAT_FLASH_UPT)
dev_err(dev, "Flash update failed\n");
if (status & ADIS16220_DIAG_STAT_POWER_HIGH)
- dev_err(dev, "Power supply above 5.25V\n");
+ dev_err(dev, "Power supply above 3.625V\n");
if (status & ADIS16220_DIAG_STAT_POWER_LOW)
- dev_err(dev, "Power supply below 4.75V\n");
+ dev_err(dev, "Power supply below 3.15V\n");
error_ret:
return ret;
@@ -414,7 +414,7 @@ static ssize_t adis16220_capture_buffer_read(struct adis16220_state *st,
return count;
}
-static ssize_t adis16220_accel_bin_read(struct kobject *kobj,
+static ssize_t adis16220_accel_bin_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf,
loff_t off,
@@ -438,7 +438,7 @@ static struct bin_attribute accel_bin = {
.size = ADIS16220_CAPTURE_SIZE,
};
-static ssize_t adis16220_adc1_bin_read(struct kobject *kobj,
+static ssize_t adis16220_adc1_bin_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
@@ -461,7 +461,7 @@ static struct bin_attribute adc1_bin = {
.size = ADIS16220_CAPTURE_SIZE,
};
-static ssize_t adis16220_adc2_bin_read(struct kobject *kobj,
+static ssize_t adis16220_adc2_bin_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
diff --git a/drivers/staging/iio/accel/adis16240.h b/drivers/staging/iio/accel/adis16240.h
index dcff43c7523..51a807dde27 100644
--- a/drivers/staging/iio/accel/adis16240.h
+++ b/drivers/staging/iio/accel/adis16240.h
@@ -127,7 +127,6 @@
* struct adis16240_state - device instance specific data
* @us: actual spi_device
* @work_trigger_to_ring: bh for triggered event handling
- * @work_cont_thresh: CLEAN
* @inter: used to check if new interrupt has been triggered
* @last_timestamp: passing timestamp from th to bh of interrupt handler
* @indio_dev: industrial I/O device structure
@@ -139,7 +138,6 @@
struct adis16240_state {
struct spi_device *us;
struct work_struct work_trigger_to_ring;
- struct iio_work_cont work_cont_thresh;
s64 last_timestamp;
struct iio_dev *indio_dev;
struct iio_trigger *trig;
@@ -155,14 +153,12 @@ int adis16240_set_irq(struct device *dev, bool enable);
* filling. This may change!
*/
-enum adis16240_scan {
- ADIS16240_SCAN_SUPPLY,
- ADIS16240_SCAN_ACC_X,
- ADIS16240_SCAN_ACC_Y,
- ADIS16240_SCAN_ACC_Z,
- ADIS16240_SCAN_AUX_ADC,
- ADIS16240_SCAN_TEMP,
-};
+#define ADIS16240_SCAN_SUPPLY 0
+#define ADIS16240_SCAN_ACC_X 1
+#define ADIS16240_SCAN_ACC_Y 2
+#define ADIS16240_SCAN_ACC_Z 3
+#define ADIS16240_SCAN_AUX_ADC 4
+#define ADIS16240_SCAN_TEMP 5
void adis16240_remove_trigger(struct iio_dev *indio_dev);
int adis16240_probe_trigger(struct iio_dev *indio_dev);
@@ -175,8 +171,6 @@ ssize_t adis16240_read_data_from_ring(struct device *dev,
int adis16240_configure_ring(struct iio_dev *indio_dev);
void adis16240_unconfigure_ring(struct iio_dev *indio_dev);
-int adis16240_initialize_ring(struct iio_ring_buffer *ring);
-void adis16240_uninitialize_ring(struct iio_ring_buffer *ring);
#else /* CONFIG_IIO_RING_BUFFER */
static inline void adis16240_remove_trigger(struct iio_dev *indio_dev)
@@ -205,14 +199,5 @@ static inline void adis16240_unconfigure_ring(struct iio_dev *indio_dev)
{
}
-static inline int adis16240_initialize_ring(struct iio_ring_buffer *ring)
-{
- return 0;
-}
-
-static inline void adis16240_uninitialize_ring(struct iio_ring_buffer *ring)
-{
-}
-
#endif /* CONFIG_IIO_RING_BUFFER */
#endif /* SPI_ADIS16240_H_ */
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index 54fd6d77412..3e9531dd000 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -14,12 +14,13 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
#include "../iio.h"
#include "../sysfs.h"
+#include "../ring_generic.h"
#include "accel.h"
#include "../adc/adc.h"
@@ -74,13 +75,13 @@ static int adis16240_spi_write_reg_16(struct device *dev,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 25,
+ .delay_usecs = 35,
}, {
.tx_buf = st->tx + 2,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 25,
+ .delay_usecs = 35,
},
};
@@ -120,13 +121,13 @@ static int adis16240_spi_read_reg_16(struct device *dev,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 25,
+ .delay_usecs = 35,
}, {
.rx_buf = st->rx,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 25,
+ .delay_usecs = 35,
},
};
@@ -502,7 +503,7 @@ static int __devinit adis16240_probe(struct spi_device *spi)
goto error_unreg_ring_funcs;
regdone = 1;
- ret = adis16240_initialize_ring(st->indio_dev->ring);
+ ret = iio_ring_buffer_register(st->indio_dev->ring, 0);
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
@@ -534,7 +535,7 @@ error_unregister_line:
if (spi->irq)
iio_unregister_interrupt_line(st->indio_dev, 0);
error_uninitialize_ring:
- adis16240_uninitialize_ring(st->indio_dev->ring);
+ iio_ring_buffer_unregister(st->indio_dev->ring);
error_unreg_ring_funcs:
adis16240_unconfigure_ring(st->indio_dev);
error_free_dev:
@@ -563,7 +564,7 @@ static int adis16240_remove(struct spi_device *spi)
if (spi->irq)
iio_unregister_interrupt_line(indio_dev, 0);
- adis16240_uninitialize_ring(indio_dev->ring);
+ iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
adis16240_unconfigure_ring(indio_dev);
kfree(st->tx);
diff --git a/drivers/staging/iio/accel/adis16240_ring.c b/drivers/staging/iio/accel/adis16240_ring.c
index 26b677bd84c..cd69a2e2bb9 100644
--- a/drivers/staging/iio/accel/adis16240_ring.c
+++ b/drivers/staging/iio/accel/adis16240_ring.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
@@ -16,16 +17,6 @@
#include "../trigger.h"
#include "adis16240.h"
-/**
- * combine_8_to_16() utility function to munge to u8s into u16
- **/
-static inline u16 combine_8_to_16(u8 lower, u8 upper)
-{
- u16 _lower = lower;
- u16 _upper = upper;
- return _lower | (_upper << 8);
-}
-
static IIO_SCAN_EL_C(supply, ADIS16240_SCAN_SUPPLY, IIO_UNSIGNED(10),
ADIS16240_SUPPLY_OUT, NULL);
static IIO_SCAN_EL_C(accel_x, ADIS16240_SCAN_ACC_X, IIO_SIGNED(10),
@@ -61,10 +52,10 @@ static struct attribute_group adis16240_scan_el_group = {
* adis16240_poll_func_th() top half interrupt handler called by trigger
* @private_data: iio_dev
**/
-static void adis16240_poll_func_th(struct iio_dev *indio_dev)
+static void adis16240_poll_func_th(struct iio_dev *indio_dev, s64 time)
{
struct adis16240_state *st = iio_dev_get_devdata(indio_dev);
- st->last_timestamp = indio_dev->trig->timestamp;
+ st->last_timestamp = time;
schedule_work(&st->work_trigger_to_ring);
}
@@ -130,10 +121,9 @@ static void adis16240_trigger_bh_to_ring(struct work_struct *work_s)
if (st->indio_dev->scan_count)
if (adis16240_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
- for (; i < st->indio_dev->scan_count; i++) {
- data[i] = combine_8_to_16(st->rx[i*2+1],
- st->rx[i*2]);
- }
+ for (; i < st->indio_dev->scan_count; i++)
+ data[i] = be16_to_cpup(
+ (__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (st->indio_dev->scan_timestamp)
@@ -149,48 +139,6 @@ static void adis16240_trigger_bh_to_ring(struct work_struct *work_s)
return;
}
-static int adis16240_data_rdy_ring_preenable(struct iio_dev *indio_dev)
-{
- size_t size;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
- /* Check if there are any scan elements enabled, if not fail*/
- if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
- return -EINVAL;
-
- if (indio_dev->ring->access.set_bpd) {
- if (indio_dev->scan_timestamp)
- if (indio_dev->scan_count)
- /* Timestamp (aligned sizeof(s64) and data */
- size = (((indio_dev->scan_count * sizeof(s16))
- + sizeof(s64) - 1)
- & ~(sizeof(s64) - 1))
- + sizeof(s64);
- else /* Timestamp only */
- size = sizeof(s64);
- else /* Data only */
- size = indio_dev->scan_count*sizeof(s16);
- indio_dev->ring->access.set_bpd(indio_dev->ring, size);
- }
-
- return 0;
-}
-
-static int adis16240_data_rdy_ring_postenable(struct iio_dev *indio_dev)
-{
- return indio_dev->trig
- ? iio_trigger_attach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
-
-static int adis16240_data_rdy_ring_predisable(struct iio_dev *indio_dev)
-{
- return indio_dev->trig
- ? iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
-
void adis16240_unconfigure_ring(struct iio_dev *indio_dev)
{
kfree(indio_dev->pollfunc);
@@ -223,18 +171,16 @@ int adis16240_configure_ring(struct iio_dev *indio_dev)
indio_dev->ring = ring;
/* Effectively select the ring buffer implementation */
iio_ring_sw_register_funcs(&ring->access);
- ring->preenable = &adis16240_data_rdy_ring_preenable;
- ring->postenable = &adis16240_data_rdy_ring_postenable;
- ring->predisable = &adis16240_data_rdy_ring_predisable;
+ ring->bpe = 2;
+ ring->preenable = &iio_sw_ring_preenable;
+ ring->postenable = &iio_triggered_ring_postenable;
+ ring->predisable = &iio_triggered_ring_predisable;
ring->owner = THIS_MODULE;
- indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
- if (indio_dev->pollfunc == NULL) {
- ret = -ENOMEM;
- goto error_iio_sw_rb_free;;
- }
- indio_dev->pollfunc->poll_func_main = &adis16240_poll_func_th;
- indio_dev->pollfunc->private_data = indio_dev;
+ ret = iio_alloc_pollfunc(indio_dev, NULL, &adis16240_poll_func_th);
+ if (ret)
+ goto error_iio_sw_rb_free;
+
indio_dev->modes |= INDIO_RING_TRIGGERED;
return 0;
@@ -243,12 +189,3 @@ error_iio_sw_rb_free:
return ret;
}
-int adis16240_initialize_ring(struct iio_ring_buffer *ring)
-{
- return iio_ring_buffer_register(ring, 0);
-}
-
-void adis16240_uninitialize_ring(struct iio_ring_buffer *ring)
-{
- iio_ring_buffer_unregister(ring);
-}
diff --git a/drivers/staging/iio/accel/adis16240_trigger.c b/drivers/staging/iio/accel/adis16240_trigger.c
index df1312e17f4..2ba71fd73a4 100644
--- a/drivers/staging/iio/accel/adis16240_trigger.c
+++ b/drivers/staging/iio/accel/adis16240_trigger.c
@@ -23,8 +23,7 @@ static int adis16240_data_rdy_trig_poll(struct iio_dev *dev_info,
struct adis16240_state *st = iio_dev_get_devdata(dev_info);
struct iio_trigger *trig = st->trig;
- trig->timestamp = timestamp;
- iio_trigger_poll(trig);
+ iio_trigger_poll(trig, timestamp);
return IRQ_HANDLED;
}
@@ -83,14 +82,13 @@ int adis16240_probe_trigger(struct iio_dev *indio_dev)
struct adis16240_state *st = indio_dev->dev_data;
st->trig = iio_allocate_trigger();
- st->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ st->trig->name = kasprintf(GFP_KERNEL,
+ "adis16240-dev%d",
+ indio_dev->id);
if (!st->trig->name) {
ret = -ENOMEM;
goto error_free_trig;
}
- snprintf((char *)st->trig->name,
- IIO_TRIGGER_NAME_LENGTH,
- "adis16240-dev%d", indio_dev->id);
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
st->trig->private_data = st;
diff --git a/drivers/staging/iio/accel/kxsd9.c b/drivers/staging/iio/accel/kxsd9.c
index ae7ffe114fc..79f57950ebe 100644
--- a/drivers/staging/iio/accel/kxsd9.c
+++ b/drivers/staging/iio/accel/kxsd9.c
@@ -16,17 +16,11 @@
* heavily optimized ring buffer access function.
*/
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/fs.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/sysfs.h>
-#include <linux/rtc.h>
-#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/string.h>
#include "../iio.h"
#include "../sysfs.h"
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
index e76a97937a3..6e730553fca 100644
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -148,30 +148,31 @@ Form of high byte dependant on justification set in ctrl reg */
#define LIS3L02DQ_MAX_RX 12
/**
* struct lis3l02dq_state - device instance specific data
+ * @helper: data and func pointer allowing generic functions
* @us: actual spi_device
- * @work_trigger_to_ring: bh for triggered event handling
- * @work_cont_thresh: CLEAN
+ * @work_thresh: bh for threshold events
+ * @thresh_timestamp: timestamp for threshold interrupts.
* @inter: used to check if new interrupt has been triggered
- * @last_timestamp: passing timestamp from th to bh of interrupt handler
- * @indio_dev: industrial I/O device structure
* @trig: data ready trigger registered with iio
* @tx: transmit buffer
* @rx: recieve buffer
* @buf_lock: mutex to protect tx and rx
**/
struct lis3l02dq_state {
+ struct iio_sw_ring_helper_state help;
struct spi_device *us;
- struct work_struct work_trigger_to_ring;
- struct iio_work_cont work_cont_thresh;
+ struct work_struct work_thresh;
+ s64 thresh_timestamp;
bool inter;
- s64 last_timestamp;
- struct iio_dev *indio_dev;
struct iio_trigger *trig;
u8 *tx;
u8 *rx;
struct mutex buf_lock;
};
+#define lis3l02dq_h_to_s(_h) \
+ container_of(_h, struct lis3l02dq_state, help)
+
int lis3l02dq_spi_read_reg_8(struct device *dev,
u8 reg_address,
u8 *val);
@@ -195,15 +196,15 @@ ssize_t lis3l02dq_read_accel_from_ring(struct device *dev,
int lis3l02dq_configure_ring(struct iio_dev *indio_dev);
void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev);
-int lis3l02dq_initialize_ring(struct iio_ring_buffer *ring);
-void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring);
#else /* CONFIG_IIO_RING_BUFFER */
-static inline void lis3l02dq_remove_trigger(struct iio_dev *indio_dev) {};
+static inline void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
+{
+}
static inline int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
{
return 0;
-};
+}
static inline ssize_t
lis3l02dq_read_accel_from_ring(struct device *dev,
@@ -211,18 +212,14 @@ lis3l02dq_read_accel_from_ring(struct device *dev,
char *buf)
{
return 0;
-};
+}
static int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
{
return 0;
-};
+}
static inline void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
-{};
-static inline int lis3l02dq_initialize_ring(struct iio_ring_buffer *ring)
{
- return 0;
-};
-static inline void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring) {};
+}
#endif /* CONFIG_IIO_RING_BUFFER */
#endif /* SPI_LIS3L02DQ_H_ */
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 6b5577d7d8d..0ee93373754 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -27,6 +27,9 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../ring_generic.h"
+#include "../ring_sw.h"
+
#include "accel.h"
#include "lis3l02dq.h"
@@ -47,7 +50,9 @@ int lis3l02dq_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val)
int ret;
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct lis3l02dq_state *st = iio_dev_get_devdata(indio_dev);
+ struct iio_sw_ring_helper_state *h = iio_dev_get_devdata(indio_dev);
+ struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
+
struct spi_transfer xfer = {
.tx_buf = st->tx,
.rx_buf = st->rx,
@@ -82,7 +87,9 @@ int lis3l02dq_spi_write_reg_8(struct device *dev,
int ret;
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct lis3l02dq_state *st = iio_dev_get_devdata(indio_dev);
+ struct iio_sw_ring_helper_state *h
+ = iio_dev_get_devdata(indio_dev);
+ struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
struct spi_transfer xfer = {
.tx_buf = st->tx,
.bits_per_word = 8,
@@ -96,7 +103,7 @@ int lis3l02dq_spi_write_reg_8(struct device *dev,
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync(st->us, &msg);
mutex_unlock(&st->buf_lock);
return ret;
@@ -116,7 +123,9 @@ static int lis3l02dq_spi_write_reg_s16(struct device *dev,
int ret;
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct lis3l02dq_state *st = iio_dev_get_devdata(indio_dev);
+ struct iio_sw_ring_helper_state *h
+ = iio_dev_get_devdata(indio_dev);
+ struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
struct spi_transfer xfers[] = { {
.tx_buf = st->tx,
.bits_per_word = 8,
@@ -158,7 +167,9 @@ static int lis3l02dq_spi_read_reg_s16(struct device *dev,
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct lis3l02dq_state *st = iio_dev_get_devdata(indio_dev);
+ struct iio_sw_ring_helper_state *h
+ = iio_dev_get_devdata(indio_dev);
+ struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
int ret;
struct spi_transfer xfers[] = { {
.tx_buf = st->tx,
@@ -411,7 +422,7 @@ static int lis3l02dq_initial_setup(struct lis3l02dq_state *st)
val = LIS3L02DQ_DEFAULT_CTRL1;
/* Write suitable defaults to ctrl1 */
- ret = lis3l02dq_spi_write_reg_8(&st->indio_dev->dev,
+ ret = lis3l02dq_spi_write_reg_8(&st->help.indio_dev->dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
&val);
if (ret) {
@@ -419,7 +430,7 @@ static int lis3l02dq_initial_setup(struct lis3l02dq_state *st)
goto err_ret;
}
/* Repeat as sometimes doesn't work first time?*/
- ret = lis3l02dq_spi_write_reg_8(&st->indio_dev->dev,
+ ret = lis3l02dq_spi_write_reg_8(&st->help.indio_dev->dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
&val);
if (ret) {
@@ -429,17 +440,17 @@ static int lis3l02dq_initial_setup(struct lis3l02dq_state *st)
/* Read back to check this has worked acts as loose test of correct
* chip */
- ret = lis3l02dq_spi_read_reg_8(&st->indio_dev->dev,
+ ret = lis3l02dq_spi_read_reg_8(&st->help.indio_dev->dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
&valtest);
if (ret || (valtest != val)) {
- dev_err(&st->indio_dev->dev, "device not playing ball");
+ dev_err(&st->help.indio_dev->dev, "device not playing ball");
ret = -EINVAL;
goto err_ret;
}
val = LIS3L02DQ_DEFAULT_CTRL2;
- ret = lis3l02dq_spi_write_reg_8(&st->indio_dev->dev,
+ ret = lis3l02dq_spi_write_reg_8(&st->help.indio_dev->dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
&val);
if (ret) {
@@ -448,7 +459,7 @@ static int lis3l02dq_initial_setup(struct lis3l02dq_state *st)
}
val = LIS3L02DQ_REG_WAKE_UP_CFG_LATCH_SRC;
- ret = lis3l02dq_spi_write_reg_8(&st->indio_dev->dev,
+ ret = lis3l02dq_spi_write_reg_8(&st->help.indio_dev->dev,
LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
&val);
if (ret)
@@ -524,8 +535,7 @@ static ssize_t lis3l02dq_read_interrupt_config(struct device *dev,
LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
(u8 *)&val);
- return ret ? ret : sprintf(buf, "%d\n",
- (val & this_attr->mask) ? 1 : 0);;
+ return ret ? ret : sprintf(buf, "%d\n", !!(val & this_attr->mask));
}
static ssize_t lis3l02dq_write_interrupt_config(struct device *dev,
@@ -595,16 +605,18 @@ error_mutex_unlock:
}
-static int lis3l02dq_thresh_handler_th(struct iio_dev *dev_info,
+static int lis3l02dq_thresh_handler_th(struct iio_dev *indio_dev,
int index,
s64 timestamp,
int no_test)
{
- struct lis3l02dq_state *st = dev_info->dev_data;
+ struct iio_sw_ring_helper_state *h
+ = iio_dev_get_devdata(indio_dev);
+ struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
/* Stash the timestamp somewhere convenient for the bh */
- st->last_timestamp = timestamp;
- schedule_work(&st->work_cont_thresh.ws);
+ st->thresh_timestamp = timestamp;
+ schedule_work(&st->work_thresh);
return 0;
}
@@ -615,48 +627,49 @@ static int lis3l02dq_thresh_handler_th(struct iio_dev *dev_info,
*/
static void lis3l02dq_thresh_handler_bh_no_check(struct work_struct *work_s)
{
- struct iio_work_cont *wc
- = container_of(work_s, struct iio_work_cont, ws);
- struct lis3l02dq_state *st = wc->st;
+ struct lis3l02dq_state *st
+ = container_of(work_s,
+ struct lis3l02dq_state, work_thresh);
+
u8 t;
- lis3l02dq_spi_read_reg_8(&st->indio_dev->dev,
+ lis3l02dq_spi_read_reg_8(&st->help.indio_dev->dev,
LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
&t);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_HIGH)
- iio_push_event(st->indio_dev, 0,
+ iio_push_event(st->help.indio_dev, 0,
IIO_EVENT_CODE_ACCEL_Z_HIGH,
- st->last_timestamp);
+ st->thresh_timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_LOW)
- iio_push_event(st->indio_dev, 0,
+ iio_push_event(st->help.indio_dev, 0,
IIO_EVENT_CODE_ACCEL_Z_LOW,
- st->last_timestamp);
+ st->thresh_timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_HIGH)
- iio_push_event(st->indio_dev, 0,
+ iio_push_event(st->help.indio_dev, 0,
IIO_EVENT_CODE_ACCEL_Y_HIGH,
- st->last_timestamp);
+ st->thresh_timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_LOW)
- iio_push_event(st->indio_dev, 0,
+ iio_push_event(st->help.indio_dev, 0,
IIO_EVENT_CODE_ACCEL_Y_LOW,
- st->last_timestamp);
+ st->thresh_timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_HIGH)
- iio_push_event(st->indio_dev, 0,
+ iio_push_event(st->help.indio_dev, 0,
IIO_EVENT_CODE_ACCEL_X_HIGH,
- st->last_timestamp);
+ st->thresh_timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_LOW)
- iio_push_event(st->indio_dev, 0,
+ iio_push_event(st->help.indio_dev, 0,
IIO_EVENT_CODE_ACCEL_X_LOW,
- st->last_timestamp);
+ st->thresh_timestamp);
/* reenable the irq */
enable_irq(st->us->irq);
/* Ack and allow for new interrupts */
- lis3l02dq_spi_read_reg_8(&st->indio_dev->dev,
+ lis3l02dq_spi_read_reg_8(&st->help.indio_dev->dev,
LIS3L02DQ_REG_WAKE_UP_ACK_ADDR,
&t);
@@ -750,6 +763,7 @@ static int __devinit lis3l02dq_probe(struct spi_device *spi)
ret = -ENOMEM;
goto error_ret;
}
+ INIT_WORK(&st->work_thresh, lis3l02dq_thresh_handler_bh_no_check);
/* this is only used tor removal purposes */
spi_set_drvdata(spi, st);
@@ -767,56 +781,46 @@ static int __devinit lis3l02dq_probe(struct spi_device *spi)
st->us = spi;
mutex_init(&st->buf_lock);
/* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device();
- if (st->indio_dev == NULL) {
+ st->help.indio_dev = iio_allocate_device();
+ if (st->help.indio_dev == NULL) {
ret = -ENOMEM;
goto error_free_tx;
}
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->num_interrupt_lines = 1;
- st->indio_dev->event_attrs = &lis3l02dq_event_attribute_group;
- st->indio_dev->attrs = &lis3l02dq_attribute_group;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->driver_module = THIS_MODULE;
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ st->help.indio_dev->dev.parent = &spi->dev;
+ st->help.indio_dev->num_interrupt_lines = 1;
+ st->help.indio_dev->event_attrs = &lis3l02dq_event_attribute_group;
+ st->help.indio_dev->attrs = &lis3l02dq_attribute_group;
+ st->help.indio_dev->dev_data = (void *)(&st->help);
+ st->help.indio_dev->driver_module = THIS_MODULE;
+ st->help.indio_dev->modes = INDIO_DIRECT_MODE;
- ret = lis3l02dq_configure_ring(st->indio_dev);
+ ret = lis3l02dq_configure_ring(st->help.indio_dev);
if (ret)
goto error_free_dev;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(st->help.indio_dev);
if (ret)
goto error_unreg_ring_funcs;
regdone = 1;
- ret = lis3l02dq_initialize_ring(st->indio_dev->ring);
+ ret = iio_ring_buffer_register(st->help.indio_dev->ring, 0);
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
}
if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
- /* This is a little unusual, in that the device seems
- to need a full read of the interrupt source reg before
- the interrupt will reset.
- Hence the two handlers are the same */
- iio_init_work_cont(&st->work_cont_thresh,
- lis3l02dq_thresh_handler_bh_no_check,
- lis3l02dq_thresh_handler_bh_no_check,
- LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
- 0,
- st);
st->inter = 0;
ret = iio_register_interrupt_line(spi->irq,
- st->indio_dev,
+ st->help.indio_dev,
0,
IRQF_TRIGGER_RISING,
"lis3l02dq");
if (ret)
goto error_uninitialize_ring;
- ret = lis3l02dq_probe_trigger(st->indio_dev);
+ ret = lis3l02dq_probe_trigger(st->help.indio_dev);
if (ret)
goto error_unregister_line;
}
@@ -828,20 +832,20 @@ static int __devinit lis3l02dq_probe(struct spi_device *spi)
return 0;
error_remove_trigger:
- if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
- lis3l02dq_remove_trigger(st->indio_dev);
+ if (st->help.indio_dev->modes & INDIO_RING_TRIGGERED)
+ lis3l02dq_remove_trigger(st->help.indio_dev);
error_unregister_line:
- if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
- iio_unregister_interrupt_line(st->indio_dev, 0);
+ if (st->help.indio_dev->modes & INDIO_RING_TRIGGERED)
+ iio_unregister_interrupt_line(st->help.indio_dev, 0);
error_uninitialize_ring:
- lis3l02dq_uninitialize_ring(st->indio_dev->ring);
+ iio_ring_buffer_unregister(st->help.indio_dev->ring);
error_unreg_ring_funcs:
- lis3l02dq_unconfigure_ring(st->indio_dev);
+ lis3l02dq_unconfigure_ring(st->help.indio_dev);
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(st->help.indio_dev);
else
- iio_free_device(st->indio_dev);
+ iio_free_device(st->help.indio_dev);
error_free_tx:
kfree(st->tx);
error_free_rx:
@@ -856,7 +860,9 @@ error_ret:
static int lis3l02dq_stop_device(struct iio_dev *indio_dev)
{
int ret;
- struct lis3l02dq_state *st = indio_dev->dev_data;
+ struct iio_sw_ring_helper_state *h
+ = iio_dev_get_devdata(indio_dev);
+ struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
u8 val = 0;
mutex_lock(&indio_dev->mlock);
@@ -883,7 +889,7 @@ static int lis3l02dq_remove(struct spi_device *spi)
{
int ret;
struct lis3l02dq_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = st->help.indio_dev;
ret = lis3l02dq_stop_device(indio_dev);
if (ret)
@@ -895,7 +901,7 @@ static int lis3l02dq_remove(struct spi_device *spi)
if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
iio_unregister_interrupt_line(indio_dev, 0);
- lis3l02dq_uninitialize_ring(indio_dev->ring);
+ iio_ring_buffer_unregister(indio_dev->ring);
lis3l02dq_unconfigure_ring(indio_dev);
iio_device_unregister(indio_dev);
kfree(st->tx);
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index e4e202e6cb3..a960a8ff3c4 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -103,13 +103,15 @@ static struct attribute_group lis3l02dq_scan_el_group = {
* lis3l02dq_poll_func_th() top half interrupt handler called by trigger
* @private_data: iio_dev
**/
-static void lis3l02dq_poll_func_th(struct iio_dev *indio_dev)
+static void lis3l02dq_poll_func_th(struct iio_dev *indio_dev, s64 time)
{
- struct lis3l02dq_state *st = iio_dev_get_devdata(indio_dev);
- st->last_timestamp = indio_dev->trig->timestamp;
- schedule_work(&st->work_trigger_to_ring);
- /* Indicate that this interrupt is being handled */
+ struct iio_sw_ring_helper_state *h
+ = iio_dev_get_devdata(indio_dev);
+ struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
+ /* in this case we need to slightly extend the helper function */
+ iio_sw_poll_func_th(indio_dev, time);
+ /* Indicate that this interrupt is being handled */
/* Technically this is trigger related, but without this
* handler running there is currently now way for the interrupt
* to clear.
@@ -120,16 +122,16 @@ static void lis3l02dq_poll_func_th(struct iio_dev *indio_dev)
/**
* lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
**/
-static int lis3l02dq_data_rdy_trig_poll(struct iio_dev *dev_info,
+static int lis3l02dq_data_rdy_trig_poll(struct iio_dev *indio_dev,
int index,
s64 timestamp,
int no_test)
{
- struct lis3l02dq_state *st = iio_dev_get_devdata(dev_info);
- struct iio_trigger *trig = st->trig;
+ struct iio_sw_ring_helper_state *h
+ = iio_dev_get_devdata(indio_dev);
+ struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
- trig->timestamp = timestamp;
- iio_trigger_poll(trig);
+ iio_trigger_poll(st->trig, timestamp);
return IRQ_HANDLED;
}
@@ -213,7 +215,7 @@ static int lis3l02dq_read_all(struct lis3l02dq_state *st, u8 *rx_array)
struct spi_message msg;
int ret, i, j = 0;
- xfers = kzalloc((st->indio_dev->scan_count) * 2
+ xfers = kzalloc((st->help.indio_dev->scan_count) * 2
* sizeof(*xfers), GFP_KERNEL);
if (!xfers)
return -ENOMEM;
@@ -221,7 +223,7 @@ static int lis3l02dq_read_all(struct lis3l02dq_state *st, u8 *rx_array)
mutex_lock(&st->buf_lock);
for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++) {
- if (st->indio_dev->scan_mask & (1 << i)) {
+ if (st->help.indio_dev->scan_mask & (1 << i)) {
/* lower byte */
xfers[j].tx_buf = st->tx + 2*j;
st->tx[2*j] = read_all_tx_array[i*4];
@@ -249,7 +251,7 @@ static int lis3l02dq_read_all(struct lis3l02dq_state *st, u8 *rx_array)
* values in alternate bytes
*/
spi_message_init(&msg);
- for (j = 0; j < st->indio_dev->scan_count * 2; j++)
+ for (j = 0; j < st->help.indio_dev->scan_count * 2; j++)
spi_message_add_tail(&xfers[j], &msg);
ret = spi_sync(st->us, &msg);
@@ -259,102 +261,37 @@ static int lis3l02dq_read_all(struct lis3l02dq_state *st, u8 *rx_array)
return ret;
}
-
-/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
- * specific to be rolled into the core.
- */
static void lis3l02dq_trigger_bh_to_ring(struct work_struct *work_s)
{
- struct lis3l02dq_state *st
- = container_of(work_s, struct lis3l02dq_state,
- work_trigger_to_ring);
-
- u8 *rx_array;
- int i = 0;
- u16 *data;
- size_t datasize = st->indio_dev
- ->ring->access.get_bpd(st->indio_dev->ring);
-
- data = kmalloc(datasize , GFP_KERNEL);
- if (data == NULL) {
- dev_err(&st->us->dev, "memory alloc failed in ring bh");
- return;
- }
- /* Due to interleaved nature of transmission this buffer must be
- * twice the number of bytes, or 4 times the number of channels
- */
- rx_array = kmalloc(4 * (st->indio_dev->scan_count), GFP_KERNEL);
- if (rx_array == NULL) {
- dev_err(&st->us->dev, "memory alloc failed in ring bh");
- kfree(data);
- return;
- }
+ struct iio_sw_ring_helper_state *h
+ = container_of(work_s, struct iio_sw_ring_helper_state,
+ work_trigger_to_ring);
+ struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
- /* whilst trigger specific, if this read does nto occur the data
- ready interrupt will not be cleared. Need to add a mechanism
- to provide a dummy read function if this is not triggering on
- the data ready function but something else is.
- */
st->inter = 0;
-
- if (st->indio_dev->scan_count)
- if (lis3l02dq_read_all(st, rx_array) >= 0)
- for (; i < st->indio_dev->scan_count; i++)
- data[i] = combine_8_to_16(rx_array[i*4+1],
- rx_array[i*4+3]);
- /* Guaranteed to be aligned with 8 byte boundary */
- if (st->indio_dev->scan_timestamp)
- *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
-
- st->indio_dev->ring->access.store_to(st->indio_dev->ring,
- (u8 *)data,
- st->last_timestamp);
-
- iio_trigger_notify_done(st->indio_dev->trig);
- kfree(rx_array);
- kfree(data);
-
- return;
-}
-/* in these circumstances is it better to go with unaligned packing and
- * deal with the cost?*/
-static int lis3l02dq_data_rdy_ring_preenable(struct iio_dev *indio_dev)
-{
- size_t size;
- /* Check if there are any scan elements enabled, if not fail*/
- if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
- return -EINVAL;
-
- if (indio_dev->ring->access.set_bpd) {
- if (indio_dev->scan_timestamp)
- if (indio_dev->scan_count) /* Timestamp and data */
- size = 2*sizeof(s64);
- else /* Timestamp only */
- size = sizeof(s64);
- else /* Data only */
- size = indio_dev->scan_count*sizeof(s16);
- indio_dev->ring->access.set_bpd(indio_dev->ring, size);
- }
-
- return 0;
+ iio_sw_trigger_bh_to_ring(work_s);
}
-static int lis3l02dq_data_rdy_ring_postenable(struct iio_dev *indio_dev)
+static int lis3l02dq_get_ring_element(struct iio_sw_ring_helper_state *h,
+ u8 *buf)
{
- return indio_dev->trig
- ? iio_trigger_attach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
+ int ret, i;
+ u8 *rx_array ;
+ s16 *data = (s16 *)buf;
-static int lis3l02dq_data_rdy_ring_predisable(struct iio_dev *indio_dev)
-{
- return indio_dev->trig
- ? iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
+ rx_array = kzalloc(4 * (h->indio_dev->scan_count), GFP_KERNEL);
+ if (rx_array == NULL)
+ return -ENOMEM;
+ ret = lis3l02dq_read_all(lis3l02dq_h_to_s(h), rx_array);
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < h->indio_dev->scan_count; i++)
+ data[i] = combine_8_to_16(rx_array[i*4+1],
+ rx_array[i*4+3]);
+ kfree(rx_array);
+ return i*sizeof(data[0]);
+}
/* Caller responsible for locking as necessary. */
static int
@@ -427,7 +364,7 @@ static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
struct lis3l02dq_state *st = trig->private_data;
int ret = 0;
u8 t;
- __lis3l02dq_write_data_ready_config(&st->indio_dev->dev,
+ __lis3l02dq_write_data_ready_config(&st->help.indio_dev->dev,
&iio_event_data_rdy_trig,
state);
if (state == false) {
@@ -437,7 +374,7 @@ static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
/* Clear any outstanding ready events */
ret = lis3l02dq_read_all(st, NULL);
}
- lis3l02dq_spi_read_reg_8(&st->indio_dev->dev,
+ lis3l02dq_spi_read_reg_8(&st->help.indio_dev->dev,
LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
&t);
return ret;
@@ -495,14 +432,14 @@ int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
if (!state->trig)
return -ENOMEM;
- state->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ state->trig->name = kasprintf(GFP_KERNEL,
+ "lis3l02dq-dev%d",
+ indio_dev->id);
if (!state->trig->name) {
ret = -ENOMEM;
goto error_free_trig;
}
- snprintf((char *)state->trig->name,
- IIO_TRIGGER_NAME_LENGTH,
- "lis3l02dq-dev%d", indio_dev->id);
+
state->trig->dev.parent = &state->us->dev;
state->trig->owner = THIS_MODULE;
state->trig->private_data = state;
@@ -540,12 +477,12 @@ void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
{
- int ret = 0;
- struct lis3l02dq_state *st = indio_dev->dev_data;
- struct iio_ring_buffer *ring;
- INIT_WORK(&st->work_trigger_to_ring, lis3l02dq_trigger_bh_to_ring);
- /* Set default scan mode */
+ int ret;
+ struct iio_sw_ring_helper_state *h = iio_dev_get_devdata(indio_dev);
+ INIT_WORK(&h->work_trigger_to_ring, lis3l02dq_trigger_bh_to_ring);
+ /* Set default scan mode */
+ h->get_ring_element = &lis3l02dq_get_ring_element;
iio_scan_mask_set(indio_dev, iio_scan_el_accel_x.number);
iio_scan_mask_set(indio_dev, iio_scan_el_accel_y.number);
iio_scan_mask_set(indio_dev, iio_scan_el_accel_z.number);
@@ -553,26 +490,21 @@ int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
indio_dev->scan_el_attrs = &lis3l02dq_scan_el_group;
- ring = iio_sw_rb_allocate(indio_dev);
- if (!ring) {
- ret = -ENOMEM;
- return ret;
- }
- indio_dev->ring = ring;
+ indio_dev->ring = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->ring)
+ return -ENOMEM;
+
/* Effectively select the ring buffer implementation */
- iio_ring_sw_register_funcs(&ring->access);
- ring->preenable = &lis3l02dq_data_rdy_ring_preenable;
- ring->postenable = &lis3l02dq_data_rdy_ring_postenable;
- ring->predisable = &lis3l02dq_data_rdy_ring_predisable;
- ring->owner = THIS_MODULE;
-
- indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
- if (indio_dev->pollfunc == NULL) {
- ret = -ENOMEM;
+ iio_ring_sw_register_funcs(&indio_dev->ring->access);
+ indio_dev->ring->bpe = 2;
+ indio_dev->ring->preenable = &iio_sw_ring_preenable;
+ indio_dev->ring->postenable = &iio_triggered_ring_postenable;
+ indio_dev->ring->predisable = &iio_triggered_ring_predisable;
+ indio_dev->ring->owner = THIS_MODULE;
+
+ ret = iio_alloc_pollfunc(indio_dev, NULL, &lis3l02dq_poll_func_th);
+ if (ret)
goto error_iio_sw_rb_free;;
- }
- indio_dev->pollfunc->poll_func_main = &lis3l02dq_poll_func_th;
- indio_dev->pollfunc->private_data = indio_dev;
indio_dev->modes |= INDIO_RING_TRIGGERED;
return 0;
@@ -580,23 +512,3 @@ error_iio_sw_rb_free:
iio_sw_rb_free(indio_dev->ring);
return ret;
}
-
-int lis3l02dq_initialize_ring(struct iio_ring_buffer *ring)
-{
- return iio_ring_buffer_register(ring, 0);
-}
-
-void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring)
-{
- iio_ring_buffer_unregister(ring);
-}
-
-int lis3l02dq_set_ring_length(struct iio_dev *indio_dev, int length)
-{
- /* Set sensible defaults for the ring buffer */
- if (indio_dev->ring->access.set_length)
- return indio_dev->ring->access.set_length(indio_dev->ring, 500);
- return 0;
-}
-
-
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
index e5321999b26..09d9470bb9a 100644
--- a/drivers/staging/iio/accel/sca3000.h
+++ b/drivers/staging/iio/accel/sca3000.h
@@ -242,7 +242,7 @@ static inline int sca3000_11bit_convert(uint8_t msb, uint8_t lsb)
val |= (val & (1 << 12)) ? 0xE000 : 0;
return val;
-};
+}
static inline int sca3000_13bit_convert(uint8_t msb, uint8_t lsb)
{
@@ -253,7 +253,7 @@ static inline int sca3000_13bit_convert(uint8_t msb, uint8_t lsb)
val |= (val & (1 << 12)) ? 0xE000 : 0;
return val;
-};
+}
#ifdef CONFIG_IIO_RING_BUFFER
@@ -286,15 +286,19 @@ void sca3000_unconfigure_ring(struct iio_dev *indio_dev);
void sca3000_ring_int_process(u8 val, struct iio_ring_buffer *ring);
#else
-static inline void sca3000_register_ring_funcs(struct iio_dev *indio_dev) {};
+static inline void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
+{
+}
static inline
int sca3000_register_ring_access_and_init(struct iio_dev *indio_dev)
{
return 0;
-};
+}
-static inline void sca3000_ring_int_process(u8 val, void *ring) {};
+static inline void sca3000_ring_int_process(u8 val, void *ring)
+{
+}
#endif
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index d4f82c39f33..b78b6b66ffe 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -387,7 +387,7 @@ sca3000_show_available_measurement_modes(struct device *dev,
case SCA3000_OP_MODE_BYPASS:
len += sprintf(buf + len, ", 1 - bypass mode");
break;
- };
+ }
switch (st->info->option_mode_2) {
case SCA3000_OP_MODE_WIDE:
len += sprintf(buf + len, ", 2 - wide mode");
@@ -433,7 +433,7 @@ sca3000_show_measurement_mode(struct device *dev,
case SCA3000_OP_MODE_BYPASS:
len += sprintf(buf + len, "1 - bypass mode\n");
break;
- };
+ }
break;
case SCA3000_MEAS_MODE_OP_2:
switch (st->info->option_mode_2) {
@@ -442,7 +442,7 @@ sca3000_show_measurement_mode(struct device *dev,
break;
}
break;
- };
+ }
error_ret:
mutex_unlock(&st->lock);
@@ -559,7 +559,7 @@ static ssize_t sca3000_read_av_freq(struct device *dev,
st->info->option_mode_2_freq/2,
st->info->option_mode_2_freq/4);
break;
- };
+ }
kfree(rx);
return len;
error_ret:
@@ -590,7 +590,7 @@ static inline int __sca3000_get_base_freq(struct sca3000_state *st,
case SCA3000_MEAS_MODE_OP_2:
*base_freq = info->option_mode_2_freq;
break;
- };
+ }
kfree(rx);
error_ret:
return ret;
@@ -627,8 +627,8 @@ static ssize_t sca3000_read_frequency(struct device *dev,
case 0x02:
len = sprintf(buf, "%d\n", base_freq/4);
break;
- };
- kfree(rx);
+ }
+ kfree(rx);
return len;
error_ret_mut:
mutex_unlock(&st->lock);
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index 18c9376ecbb..688510fd8bb 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -1,4 +1,4 @@
-
+#
# Makefile for industrial I/O ADC drivers
#
diff --git a/drivers/staging/iio/adc/adc.h b/drivers/staging/iio/adc/adc.h
index 04eb16fd0a9..7841e6ad434 100644
--- a/drivers/staging/iio/adc/adc.h
+++ b/drivers/staging/iio/adc/adc.h
@@ -26,3 +26,6 @@
_show, \
NULL, \
_addr)
+
+#define IIO_EVENT_CODE_IN_HIGH_THRESH(a) (IIO_EVENT_CODE_ADC_BASE + a)
+#define IIO_EVENT_CODE_IN_LOW_THRESH(a) (IIO_EVENT_CODE_ADC_BASE + a + 32)
diff --git a/drivers/staging/iio/adc/max1363.h b/drivers/staging/iio/adc/max1363.h
index 72cf3670936..8f0fe1ced2c 100644
--- a/drivers/staging/iio/adc/max1363.h
+++ b/drivers/staging/iio/adc/max1363.h
@@ -32,14 +32,6 @@
/* Specific to the max1363 */
#define MAX1363_MON_RESET_CHAN(a) (1 << ((a) + 4))
-#define MAX1363_MON_CONV_RATE_133ksps 0
-#define MAX1363_MON_CONV_RATE_66_5ksps 0x02
-#define MAX1363_MON_CONV_RATE_33_3ksps 0x04
-#define MAX1363_MON_CONV_RATE_16_6ksps 0x06
-#define MAX1363_MON_CONV_RATE_8_3ksps 0x08
-#define MAX1363_MON_CONV_RATE_4_2ksps 0x0A
-#define MAX1363_MON_CONV_RATE_2_0ksps 0x0C
-#define MAX1363_MON_CONV_RATE_1_0ksps 0x0E
#define MAX1363_MON_INT_ENABLE 0x01
/* defined for readability reasons */
@@ -67,9 +59,8 @@
/**
* struct max1363_mode - scan mode information
- * @name: Name used to identify the scan mode.
* @conf: The corresponding value of the configuration register
- * @numvals: The number of values returned by a single scan
+ * @modemask: Bit mask corresponding to channels enabled in this mode
*/
struct max1363_mode {
int8_t conf;
@@ -122,15 +113,6 @@ struct max1363_mode {
.modemask = _mask \
}
-/* Not currently handled */
-#define MAX1363_MODE_MONITOR { \
- .name = "monitor", \
- .conf = MAX1363_CHANNEL_SEL(3) \
- | MAX1363_CONFIG_SCAN_MONITOR_MODE \
- | MAX1363_CONFIG_SE, \
- .numvals = 10, \
- }
-
/* This may seem an overly long winded way to do this, but at least it makes
* clear what all the various options actually do. Alternative suggestions
* that don't require user to have intimate knowledge of the chip welcomed.
@@ -147,7 +129,7 @@ enum max1363_channels {
max1363_in1min0, max1363_in3min2,
max1363_in5min4, max1363_in7min6,
max1363_in9min8, max1363_in11min10,
- };
+};
/* This must be maintained along side the max1363_mode_table in max1363_core */
enum max1363_modes {
@@ -179,7 +161,6 @@ enum max1363_modes {
* @default_mode: the scan mode in which the chip starts up
*/
struct max1363_chip_info {
- const char *name;
u8 num_inputs;
u8 bits;
u16 int_vref_mv;
@@ -191,7 +172,6 @@ struct max1363_chip_info {
struct attribute_group *scan_attrs;
};
-
/**
* struct max1363_state - driver instance specific data
* @indio_dev: the industrial I/O device
@@ -204,12 +184,20 @@ struct max1363_chip_info {
* @poll_work: bottom half of polling interrupt handler
* @protect_ring: used to ensure only one polling bh running at a time
* @reg: supply regulator
+ * @monitor_on: whether monitor mode is enabled
+ * @monitor_speed: parameter corresponding to device monitor speed setting
+ * @mask_high: bitmask for enabled high thresholds
+ * @mask_low: bitmask for enabled low thresholds
+ * @thresh_high: high threshold values
+ * @thresh_low: low threshold values
+ * @last_timestamp: timestamp of last event interrupt
+ * @thresh_work: bh work structure for event handling
*/
struct max1363_state {
struct iio_dev *indio_dev;
struct i2c_client *client;
- char setupbyte;
- char configbyte;
+ u8 setupbyte;
+ u8 configbyte;
const struct max1363_chip_info *chip_info;
const struct max1363_mode *current_mode;
u32 requestedmask;
@@ -217,6 +205,18 @@ struct max1363_state {
atomic_t protect_ring;
struct iio_trigger *trig;
struct regulator *reg;
+
+ /* Using monitor modes and buffer at the same time is
+ currently not supported */
+ bool monitor_on;
+ unsigned int monitor_speed:3;
+ u8 mask_high;
+ u8 mask_low;
+ /* 4x unipolar first then the fours bipolar ones */
+ s16 thresh_high[8];
+ s16 thresh_low[8];
+ s64 last_timestamp;
+ struct work_struct thresh_work;
};
const struct max1363_mode
@@ -230,32 +230,21 @@ int max1363_single_channel_from_ring(long mask, struct max1363_state *st);
int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void max1363_ring_cleanup(struct iio_dev *indio_dev);
-int max1363_initialize_ring(struct iio_ring_buffer *ring);
-void max1363_uninitialize_ring(struct iio_ring_buffer *ring);
-
#else /* CONFIG_MAX1363_RING_BUFFER */
-static inline void max1363_uninitialize_ring(struct iio_ring_buffer *ring)
-{
-};
-
-static inline int max1363_initialize_ring(struct iio_ring_buffer *ring)
-{
- return 0;
-};
-
int max1363_single_channel_from_ring(long mask, struct max1363_state *st)
{
return -EINVAL;
-};
-
+}
static inline int
max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
{
return 0;
-};
+}
-static inline void max1363_ring_cleanup(struct iio_dev *indio_dev) {};
+static inline void max1363_ring_cleanup(struct iio_dev *indio_dev)
+{
+}
#endif /* CONFIG_MAX1363_RING_BUFFER */
#endif /* _MAX1363_H_ */
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
index 905f8560d31..6435e509dd5 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -18,21 +18,19 @@
*
* Not currently implemented.
*
- * - Monitor interrrupt generation.
* - Control of internal reference.
*/
#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sysfs.h>
#include <linux/list.h>
#include <linux/i2c.h>
-#include <linux/rtc.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/err.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -48,7 +46,7 @@
IIO_SCAN_EL_C(in##number, number, IIO_UNSIGNED(16), 0, NULL);
#define MAX1363_SCAN_EL_D(p, n, number) \
IIO_SCAN_NAMED_EL_C(in##p##m##in##n, in##p-in##n, \
- number, IIO_SIGNED(16), 0 , NULL);
+ number, IIO_SIGNED(16), 0, NULL);
static MAX1363_SCAN_EL(0);
static MAX1363_SCAN_EL(1);
@@ -148,7 +146,7 @@ const struct max1363_mode
mask))
return &max1363_mode_table[ci->mode_list[i]];
return NULL;
-};
+}
static ssize_t max1363_show_precision(struct device *dev,
struct device_attribute *attr,
@@ -167,7 +165,7 @@ static int max1363_write_basic_config(struct i2c_client *client,
unsigned char d2)
{
int ret;
- u8 *tx_buf = kmalloc(2 , GFP_KERNEL);
+ u8 *tx_buf = kmalloc(2, GFP_KERNEL);
if (!tx_buf)
return -ENOMEM;
@@ -206,6 +204,16 @@ static ssize_t max1363_read_single_channel(struct device *dev,
long mask;
mutex_lock(&dev_info->mlock);
+ /*
+ * If monitor mode is enabled, the method for reading a single
+ * channel will have to be rather different and has not yet
+ * been implemented.
+ */
+ if (st->monitor_on) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+
/* If ring buffer capture is occuring, query the buffer */
if (iio_ring_enabled(dev_info)) {
mask = max1363_mode_table[this_attr->address].modemask;
@@ -305,7 +313,7 @@ static ssize_t max1363_show_name(struct device *dev,
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct max1363_state *st = iio_dev_get_devdata(dev_info);
- return sprintf(buf, "%s\n", st->chip_info->name);
+ return sprintf(buf, "%s\n", st->client->name);
}
static IIO_DEVICE_ATTR(name, S_IRUGO, max1363_show_name, NULL, 0);
@@ -552,8 +560,7 @@ enum { max1361,
/* max1363 and max1368 tested - rest from data sheet */
static const struct max1363_chip_info max1363_chip_info_tbl[] = {
- {
- .name = "max1361",
+ [max1361] = {
.num_inputs = 4,
.bits = 10,
.int_vref_mv = 2048,
@@ -563,8 +570,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max1362",
+ },
+ [max1362] = {
.num_inputs = 4,
.bits = 10,
.int_vref_mv = 4096,
@@ -574,8 +581,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max1363",
+ },
+ [max1363] = {
.num_inputs = 4,
.bits = 12,
.int_vref_mv = 2048,
@@ -585,8 +592,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max1364",
+ },
+ [max1364] = {
.num_inputs = 4,
.bits = 12,
.int_vref_mv = 4096,
@@ -596,8 +603,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max1036",
+ },
+ [max1036] = {
.num_inputs = 4,
.bits = 8,
.int_vref_mv = 4096,
@@ -606,8 +613,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max1037",
+ },
+ [max1037] = {
.num_inputs = 4,
.bits = 8,
.int_vref_mv = 2048,
@@ -616,8 +623,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max1038",
+ },
+ [max1038] = {
.num_inputs = 12,
.bits = 8,
.int_vref_mv = 4096,
@@ -626,8 +633,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to11,
.dev_attrs = &max1238_dev_attr_group,
.scan_attrs = &max1238_scan_el_group,
- }, {
- .name = "max1039",
+ },
+ [max1039] = {
.num_inputs = 12,
.bits = 8,
.int_vref_mv = 2048,
@@ -636,8 +643,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to11,
.dev_attrs = &max1238_dev_attr_group,
.scan_attrs = &max1238_scan_el_group,
- }, {
- .name = "max1136",
+ },
+ [max1136] = {
.num_inputs = 4,
.bits = 10,
.int_vref_mv = 4096,
@@ -646,8 +653,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max1137",
+ },
+ [max1137] = {
.num_inputs = 4,
.bits = 10,
.int_vref_mv = 2048,
@@ -656,8 +663,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max1138",
+ },
+ [max1138] = {
.num_inputs = 12,
.bits = 10,
.int_vref_mv = 4096,
@@ -666,8 +673,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to11,
.dev_attrs = &max1238_dev_attr_group,
.scan_attrs = &max1238_scan_el_group,
- }, {
- .name = "max1139",
+ },
+ [max1139] = {
.num_inputs = 12,
.bits = 10,
.int_vref_mv = 2048,
@@ -676,8 +683,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to11,
.dev_attrs = &max1238_dev_attr_group,
.scan_attrs = &max1238_scan_el_group,
- }, {
- .name = "max1236",
+ },
+ [max1236] = {
.num_inputs = 4,
.bits = 12,
.int_vref_mv = 4096,
@@ -686,8 +693,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max1237",
+ },
+ [max1237] = {
.num_inputs = 4,
.bits = 12,
.int_vref_mv = 2048,
@@ -696,8 +703,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max1238",
+ },
+ [max1238] = {
.num_inputs = 12,
.bits = 12,
.int_vref_mv = 4096,
@@ -706,8 +713,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to11,
.dev_attrs = &max1238_dev_attr_group,
.scan_attrs = &max1238_scan_el_group,
- }, {
- .name = "max1239",
+ },
+ [max1239] = {
.num_inputs = 12,
.bits = 12,
.int_vref_mv = 2048,
@@ -716,8 +723,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to11,
.dev_attrs = &max1238_dev_attr_group,
.scan_attrs = &max1238_scan_el_group,
- }, {
- .name = "max11600",
+ },
+ [max11600] = {
.num_inputs = 4,
.bits = 8,
.int_vref_mv = 4096,
@@ -726,8 +733,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max11601",
+ },
+ [max11601] = {
.num_inputs = 4,
.bits = 8,
.int_vref_mv = 2048,
@@ -736,8 +743,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max11602",
+ },
+ [max11602] = {
.num_inputs = 8,
.bits = 8,
.int_vref_mv = 4096,
@@ -746,8 +753,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to7,
.dev_attrs = &max11608_dev_attr_group,
.scan_attrs = &max11608_scan_el_group,
- }, {
- .name = "max11603",
+ },
+ [max11603] = {
.num_inputs = 8,
.bits = 8,
.int_vref_mv = 2048,
@@ -756,8 +763,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to7,
.dev_attrs = &max11608_dev_attr_group,
.scan_attrs = &max11608_scan_el_group,
- }, {
- .name = "max11604",
+ },
+ [max11604] = {
.num_inputs = 12,
.bits = 8,
.int_vref_mv = 4098,
@@ -766,8 +773,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to11,
.dev_attrs = &max1238_dev_attr_group,
.scan_attrs = &max1238_scan_el_group,
- }, {
- .name = "max11605",
+ },
+ [max11605] = {
.num_inputs = 12,
.bits = 8,
.int_vref_mv = 2048,
@@ -776,8 +783,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to11,
.dev_attrs = &max1238_dev_attr_group,
.scan_attrs = &max1238_scan_el_group,
- }, {
- .name = "max11606",
+ },
+ [max11606] = {
.num_inputs = 4,
.bits = 10,
.int_vref_mv = 4096,
@@ -786,8 +793,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max11607",
+ },
+ [max11607] = {
.num_inputs = 4,
.bits = 10,
.int_vref_mv = 2048,
@@ -796,8 +803,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max11608",
+ },
+ [max11608] = {
.num_inputs = 8,
.bits = 10,
.int_vref_mv = 4096,
@@ -806,8 +813,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to7,
.dev_attrs = &max11608_dev_attr_group,
.scan_attrs = &max11608_scan_el_group,
- }, {
- .name = "max11609",
+ },
+ [max11609] = {
.num_inputs = 8,
.bits = 10,
.int_vref_mv = 2048,
@@ -816,8 +823,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to7,
.dev_attrs = &max11608_dev_attr_group,
.scan_attrs = &max11608_scan_el_group,
- }, {
- .name = "max11610",
+ },
+ [max11610] = {
.num_inputs = 12,
.bits = 10,
.int_vref_mv = 4098,
@@ -826,8 +833,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to11,
.dev_attrs = &max1238_dev_attr_group,
.scan_attrs = &max1238_scan_el_group,
- }, {
- .name = "max11611",
+ },
+ [max11611] = {
.num_inputs = 12,
.bits = 10,
.int_vref_mv = 2048,
@@ -836,8 +843,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to11,
.dev_attrs = &max1238_dev_attr_group,
.scan_attrs = &max1238_scan_el_group,
- }, {
- .name = "max11612",
+ },
+ [max11612] = {
.num_inputs = 4,
.bits = 12,
.int_vref_mv = 4096,
@@ -846,8 +853,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max11613",
+ },
+ [max11613] = {
.num_inputs = 4,
.bits = 12,
.int_vref_mv = 2048,
@@ -856,8 +863,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to3,
.dev_attrs = &max1363_dev_attr_group,
.scan_attrs = &max1363_scan_el_group,
- }, {
- .name = "max11614",
+ },
+ [max11614] = {
.num_inputs = 8,
.bits = 12,
.int_vref_mv = 4096,
@@ -866,8 +873,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to7,
.dev_attrs = &max11608_dev_attr_group,
.scan_attrs = &max11608_scan_el_group,
- }, {
- .name = "max11615",
+ },
+ [max11615] = {
.num_inputs = 8,
.bits = 12,
.int_vref_mv = 2048,
@@ -876,8 +883,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to7,
.dev_attrs = &max11608_dev_attr_group,
.scan_attrs = &max11608_scan_el_group,
- }, {
- .name = "max11616",
+ },
+ [max11616] = {
.num_inputs = 12,
.bits = 12,
.int_vref_mv = 4098,
@@ -886,8 +893,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
.default_mode = s0to11,
.dev_attrs = &max1238_dev_attr_group,
.scan_attrs = &max1238_scan_el_group,
- }, {
- .name = "max11617",
+ },
+ [max11617] = {
.num_inputs = 12,
.bits = 12,
.int_vref_mv = 2048,
@@ -899,6 +906,668 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
}
};
+static const int max1363_monitor_speeds[] = { 133000, 665000, 33300, 16600,
+ 8300, 4200, 2000, 1000 };
+
+static ssize_t max1363_monitor_show_freq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = iio_dev_get_devdata(dev_info);
+ return sprintf(buf, "%d\n", max1363_monitor_speeds[st->monitor_speed]);
+}
+
+static ssize_t max1363_monitor_store_freq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = iio_dev_get_devdata(dev_info);
+ int i, ret;
+ unsigned long val;
+ bool found = false;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(max1363_monitor_speeds); i++)
+ if (val == max1363_monitor_speeds[i]) {
+ found = true;
+ break;
+ }
+ if (!found)
+ return -EINVAL;
+
+ mutex_lock(&dev_info->mlock);
+ st->monitor_speed = i;
+ mutex_unlock(&dev_info->mlock);
+
+ return 0;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR,
+ max1363_monitor_show_freq,
+ max1363_monitor_store_freq);
+
+static IIO_CONST_ATTR(sampling_frequency_available,
+ "133000 665000 33300 16600 8300 4200 2000 1000");
+
+static ssize_t max1363_show_thresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ bool high)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ if (high)
+ return sprintf(buf, "%d\n",
+ st->thresh_high[this_attr->address]);
+ else
+ return sprintf(buf, "%d\n",
+ st->thresh_low[this_attr->address & 0x7]);
+}
+
+static ssize_t max1363_show_thresh_low(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return max1363_show_thresh(dev, attr, buf, false);
+}
+
+static ssize_t max1363_show_thresh_high(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return max1363_show_thresh(dev, attr, buf, true);
+}
+
+static ssize_t max1363_store_thresh_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len,
+ bool high)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return -EINVAL;
+ switch (st->chip_info->bits) {
+ case 10:
+ if (val > 0x3FF)
+ return -EINVAL;
+ break;
+ case 12:
+ if (val > 0xFFF)
+ return -EINVAL;
+ break;
+ }
+
+ switch (high) {
+ case 1:
+ st->thresh_high[this_attr->address] = val;
+ break;
+ case 0:
+ st->thresh_low[this_attr->address & 0x7] = val;
+ break;
+ }
+
+ return len;
+}
+
+static ssize_t max1363_store_thresh_high_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return max1363_store_thresh_unsigned(dev, attr, buf, len, true);
+}
+
+static ssize_t max1363_store_thresh_low_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return max1363_store_thresh_unsigned(dev, attr, buf, len, false);
+}
+
+static ssize_t max1363_store_thresh_signed(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len,
+ bool high)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ long val;
+ int ret;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return -EINVAL;
+ switch (st->chip_info->bits) {
+ case 10:
+ if (val < -512 || val > 511)
+ return -EINVAL;
+ break;
+ case 12:
+ if (val < -2048 || val > 2047)
+ return -EINVAL;
+ break;
+ }
+
+ switch (high) {
+ case 1:
+ st->thresh_high[this_attr->address] = val;
+ break;
+ case 0:
+ st->thresh_low[this_attr->address & 0x7] = val;
+ break;
+ }
+
+ return len;
+}
+
+static ssize_t max1363_store_thresh_high_signed(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return max1363_store_thresh_signed(dev, attr, buf, len, true);
+}
+
+static ssize_t max1363_store_thresh_low_signed(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return max1363_store_thresh_signed(dev, attr, buf, len, false);
+}
+
+static IIO_DEVICE_ATTR(in0_thresh_high_value, S_IRUGO | S_IWUSR,
+ max1363_show_thresh_high,
+ max1363_store_thresh_high_unsigned, 0);
+static IIO_DEVICE_ATTR(in0_thresh_low_value, S_IRUGO | S_IWUSR,
+ max1363_show_thresh_low,
+ max1363_store_thresh_low_unsigned, 0);
+static IIO_DEVICE_ATTR(in1_thresh_high_value, S_IRUGO | S_IWUSR,
+ max1363_show_thresh_high,
+ max1363_store_thresh_high_unsigned, 1);
+static IIO_DEVICE_ATTR(in1_thresh_low_value, S_IRUGO | S_IWUSR,
+ max1363_show_thresh_low,
+ max1363_store_thresh_low_unsigned, 1);
+static IIO_DEVICE_ATTR(in2_thresh_high_value, S_IRUGO | S_IWUSR,
+ max1363_show_thresh_high,
+ max1363_store_thresh_high_unsigned, 2);
+static IIO_DEVICE_ATTR(in2_thresh_low_value, S_IRUGO | S_IWUSR,
+ max1363_show_thresh_low,
+ max1363_store_thresh_low_unsigned, 2);
+static IIO_DEVICE_ATTR(in3_thresh_high_value, S_IRUGO | S_IWUSR,
+ max1363_show_thresh_high,
+ max1363_store_thresh_high_unsigned, 3);
+static IIO_DEVICE_ATTR(in3_thresh_low_value, S_IRUGO | S_IWUSR,
+ max1363_show_thresh_low,
+ max1363_store_thresh_low_unsigned, 3);
+
+static IIO_DEVICE_ATTR_NAMED(in0min1_thresh_high_value,
+ in0-in1_thresh_high_value,
+ S_IRUGO | S_IWUSR, max1363_show_thresh_high,
+ max1363_store_thresh_high_signed, 4);
+static IIO_DEVICE_ATTR_NAMED(in0min1_thresh_low_value,
+ in0-in1_thresh_low_value,
+ S_IRUGO | S_IWUSR, max1363_show_thresh_low,
+ max1363_store_thresh_low_signed, 4);
+static IIO_DEVICE_ATTR_NAMED(in2min3_thresh_high_value,
+ in2-in3_thresh_high_value,
+ S_IRUGO | S_IWUSR, max1363_show_thresh_high,
+ max1363_store_thresh_high_signed, 5);
+static IIO_DEVICE_ATTR_NAMED(in2min3_thresh_low_value,
+ in2-in3_thresh_low_value,
+ S_IRUGO | S_IWUSR, max1363_show_thresh_low,
+ max1363_store_thresh_low_signed, 5);
+static IIO_DEVICE_ATTR_NAMED(in1min0_thresh_high_value,
+ in1-in0_thresh_high_value,
+ S_IRUGO | S_IWUSR, max1363_show_thresh_high,
+ max1363_store_thresh_high_signed, 6);
+static IIO_DEVICE_ATTR_NAMED(in1min0_thresh_low_value,
+ in1-in0_thresh_low_value,
+ S_IRUGO | S_IWUSR, max1363_show_thresh_low,
+ max1363_store_thresh_low_signed, 6);
+static IIO_DEVICE_ATTR_NAMED(in3min2_thresh_high_value,
+ in3-in2_thresh_high_value,
+ S_IRUGO | S_IWUSR, max1363_show_thresh_high,
+ max1363_store_thresh_high_signed, 7);
+static IIO_DEVICE_ATTR_NAMED(in3min2_thresh_low_value,
+ in3-in2_thresh_low_value,
+ S_IRUGO | S_IWUSR, max1363_show_thresh_low,
+ max1363_store_thresh_low_signed, 7);
+
+static int max1363_int_th(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int not_test)
+{
+ struct max1363_state *st = dev_info->dev_data;
+
+ st->last_timestamp = timestamp;
+ schedule_work(&st->thresh_work);
+ return 0;
+}
+
+static void max1363_thresh_handler_bh(struct work_struct *work_s)
+{
+ struct max1363_state *st = container_of(work_s, struct max1363_state,
+ thresh_work);
+ u8 rx;
+ u8 tx[2] = { st->setupbyte,
+ MAX1363_MON_INT_ENABLE | (st->monitor_speed << 1) | 0xF0 };
+
+ i2c_master_recv(st->client, &rx, 1);
+ if (rx & (1 << 0))
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_IN_LOW_THRESH(3),
+ st->last_timestamp);
+ if (rx & (1 << 1))
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_IN_HIGH_THRESH(3),
+ st->last_timestamp);
+ if (rx & (1 << 2))
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_IN_LOW_THRESH(2),
+ st->last_timestamp);
+ if (rx & (1 << 3))
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_IN_HIGH_THRESH(2),
+ st->last_timestamp);
+ if (rx & (1 << 4))
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_IN_LOW_THRESH(1),
+ st->last_timestamp);
+ if (rx & (1 << 5))
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_IN_HIGH_THRESH(1),
+ st->last_timestamp);
+ if (rx & (1 << 6))
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_IN_LOW_THRESH(0),
+ st->last_timestamp);
+ if (rx & (1 << 7))
+ iio_push_event(st->indio_dev, 0,
+ IIO_EVENT_CODE_IN_HIGH_THRESH(0),
+ st->last_timestamp);
+ enable_irq(st->client->irq);
+ i2c_master_send(st->client, tx, 2);
+}
+
+static ssize_t max1363_read_interrupt_config(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_event_attr *this_attr = to_iio_event_attr(attr);
+ int val;
+
+ mutex_lock(&dev_info->mlock);
+ if (this_attr->mask & 0x8)
+ val = (1 << (this_attr->mask & 0x7)) & st->mask_low;
+ else
+ val = (1 << this_attr->mask) & st->mask_high;
+ mutex_unlock(&dev_info->mlock);
+
+ return sprintf(buf, "%d\n", !!val);
+}
+
+static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
+{
+ u8 *tx_buf;
+ int ret, i = 3, j;
+ unsigned long numelements;
+ int len;
+ long modemask;
+
+ if (!enabled) {
+ /* transition to ring capture is not currently supported */
+ st->setupbyte &= ~MAX1363_SETUP_MONITOR_SETUP;
+ st->configbyte &= ~MAX1363_SCAN_MASK;
+ st->monitor_on = false;
+ return max1363_write_basic_config(st->client,
+ st->setupbyte,
+ st->configbyte);
+ }
+
+ /* Ensure we are in the relevant mode */
+ st->setupbyte |= MAX1363_SETUP_MONITOR_SETUP;
+ st->configbyte &= ~(MAX1363_CHANNEL_SEL_MASK
+ | MAX1363_SCAN_MASK
+ | MAX1363_SE_DE_MASK);
+ st->configbyte |= MAX1363_CONFIG_SCAN_MONITOR_MODE;
+ if ((st->mask_low | st->mask_high) & 0x0F) {
+ st->configbyte |= max1363_mode_table[s0to3].conf;
+ modemask = max1363_mode_table[s0to3].modemask;
+ } else if ((st->mask_low | st->mask_high) & 0x30) {
+ st->configbyte |= max1363_mode_table[d0m1to2m3].conf;
+ modemask = max1363_mode_table[d0m1to2m3].modemask;
+ } else {
+ st->configbyte |= max1363_mode_table[d1m0to3m2].conf;
+ modemask = max1363_mode_table[d1m0to3m2].modemask;
+ }
+ numelements = hweight_long(modemask);
+ len = 3 * numelements + 3;
+ tx_buf = kmalloc(len, GFP_KERNEL);
+ if (!tx_buf) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ tx_buf[0] = st->configbyte;
+ tx_buf[1] = st->setupbyte;
+ tx_buf[2] = (st->monitor_speed << 1);
+
+ /*
+ * So we need to do yet another bit of nefarious scan mode
+ * setup to match what we need.
+ */
+ for (j = 0; j < 8; j++)
+ if (modemask & (1 << j)) {
+ /* Establish the mode is in the scan */
+ if (st->mask_low & (1 << j)) {
+ tx_buf[i] = (st->thresh_low[j] >> 4) & 0xFF;
+ tx_buf[i + 1] = (st->thresh_low[j] << 4) & 0xF0;
+ } else if (j < 4) {
+ tx_buf[i] = 0;
+ tx_buf[i + 1] = 0;
+ } else {
+ tx_buf[i] = 0x80;
+ tx_buf[i + 1] = 0;
+ }
+ if (st->mask_high & (1 << j)) {
+ tx_buf[i + 1] |=
+ (st->thresh_high[j] >> 8) & 0x0F;
+ tx_buf[i + 2] = st->thresh_high[j] & 0xFF;
+ } else if (j < 4) {
+ tx_buf[i + 1] |= 0x0F;
+ tx_buf[i + 2] = 0xFF;
+ } else {
+ tx_buf[i + 1] |= 0x07;
+ tx_buf[i + 2] = 0xFF;
+ }
+ i += 3;
+ }
+
+
+ ret = i2c_master_send(st->client, tx_buf, len);
+ if (ret < 0)
+ goto error_ret;
+ if (ret != len) {
+ ret = -EIO;
+ goto error_ret;
+ }
+
+ /*
+ * Now that we hopefully have sensible thresholds in place it is
+ * time to turn the interrupts on.
+ * It is unclear from the data sheet if this should be necessary
+ * (i.e. whether monitor mode setup is atomic) but it appears to
+ * be in practice.
+ */
+ tx_buf[0] = st->setupbyte;
+ tx_buf[1] = MAX1363_MON_INT_ENABLE | (st->monitor_speed << 1) | 0xF0;
+ ret = i2c_master_send(st->client, tx_buf, 2);
+ if (ret < 0)
+ goto error_ret;
+ if (ret != 2) {
+ ret = -EIO;
+ goto error_ret;
+ }
+ ret = 0;
+ st->monitor_on = true;
+error_ret:
+
+ kfree(tx_buf);
+
+ return ret;
+}
+
+/*
+ * To keep this managable we always use one of 3 scan modes.
+ * Scan 0...3, 0-1,2-3 and 1-0,3-2
+ */
+static inline int __max1363_check_event_mask(int thismask, int checkmask)
+{
+ int ret = 0;
+ /* Is it unipolar */
+ if (thismask < 4) {
+ if (checkmask & ~0x0F) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ } else if (thismask < 6) {
+ if (checkmask & ~0x30) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ } else if (checkmask & ~0xC0)
+ ret = -EBUSY;
+error_ret:
+ return ret;
+}
+
+static ssize_t max1363_write_interrupt_config(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_event_attr *this_attr = to_iio_event_attr(attr);
+ unsigned long val;
+ int ret;
+ u16 unifiedmask;
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return -EINVAL;
+ mutex_lock(&st->indio_dev->mlock);
+ unifiedmask = st->mask_low | st->mask_high;
+ if (this_attr->mask & 0x08) {
+ /* If we are disabling no need to test */
+ if (val == 0)
+ st->mask_low &= ~(1 << (this_attr->mask & 0x7));
+ else {
+ ret = __max1363_check_event_mask(this_attr->mask & 0x7,
+ unifiedmask);
+ if (ret)
+ goto error_ret;
+ st->mask_low |= (1 << (this_attr->mask & 0x7));
+ }
+ } else {
+ if (val == 0)
+ st->mask_high &= ~(1 << (this_attr->mask));
+ else {
+ ret = __max1363_check_event_mask(this_attr->mask,
+ unifiedmask);
+ if (ret)
+ goto error_ret;
+ st->mask_high |= (1 << this_attr->mask);
+ }
+ }
+ if (st->monitor_on && !st->mask_high && !st->mask_low)
+ iio_remove_event_from_list(this_attr->listel,
+ &dev_info->interrupts[0]->ev_list);
+ if (!st->monitor_on && val)
+ iio_add_event_to_list(this_attr->listel,
+ &dev_info->interrupts[0]->ev_list);
+
+ max1363_monitor_mode_update(st, !!(st->mask_high | st->mask_low));
+error_ret:
+ mutex_unlock(&st->indio_dev->mlock);
+
+ return len;
+}
+
+IIO_EVENT_SH(max1363_thresh, max1363_int_th);
+
+#define MAX1363_HIGH_THRESH(a) a
+#define MAX1363_LOW_THRESH(a) (a | 0x8)
+
+IIO_EVENT_ATTR_SH(in0_thresh_high_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_HIGH_THRESH(0));
+
+IIO_EVENT_ATTR_SH(in0_thresh_low_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_LOW_THRESH(0));
+
+IIO_EVENT_ATTR_SH(in1_thresh_high_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_HIGH_THRESH(1));
+
+IIO_EVENT_ATTR_SH(in1_thresh_low_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_LOW_THRESH(1));
+
+IIO_EVENT_ATTR_SH(in2_thresh_high_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_HIGH_THRESH(2));
+
+IIO_EVENT_ATTR_SH(in2_thresh_low_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_LOW_THRESH(2));
+
+IIO_EVENT_ATTR_SH(in3_thresh_high_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_HIGH_THRESH(3));
+
+IIO_EVENT_ATTR_SH(in3_thresh_low_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_LOW_THRESH(3));
+
+IIO_EVENT_ATTR_NAMED_SH(in0min1_thresh_high_en,
+ in0-in1_thresh_high_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_HIGH_THRESH(4));
+
+IIO_EVENT_ATTR_NAMED_SH(in0min1_thresh_low_en,
+ in0-in1_thresh_low_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_LOW_THRESH(4));
+
+IIO_EVENT_ATTR_NAMED_SH(in3min2_thresh_high_en,
+ in3-in2_thresh_high_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_HIGH_THRESH(5));
+
+IIO_EVENT_ATTR_NAMED_SH(in3min2_thresh_low_en,
+ in3-in2_thresh_low_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_LOW_THRESH(5));
+
+IIO_EVENT_ATTR_NAMED_SH(in1min0_thresh_high_en,
+ in1-in0_thresh_high_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_HIGH_THRESH(6));
+
+IIO_EVENT_ATTR_NAMED_SH(in1min0_thresh_low_en,
+ in1-in0_thresh_low_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_LOW_THRESH(6));
+
+IIO_EVENT_ATTR_NAMED_SH(in2min3_thresh_high_en,
+ in2-in3_thresh_high_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_HIGH_THRESH(7));
+
+IIO_EVENT_ATTR_NAMED_SH(in2min3_thresh_low_en,
+ in2-in3_thresh_low_en,
+ iio_event_max1363_thresh,
+ max1363_read_interrupt_config,
+ max1363_write_interrupt_config,
+ MAX1363_LOW_THRESH(7));
+
+/*
+ * As with scan_elements, only certain sets of these can
+ * be combined.
+ */
+static struct attribute *max1363_event_attributes[] = {
+ &iio_dev_attr_in0_thresh_high_value.dev_attr.attr,
+ &iio_dev_attr_in0_thresh_low_value.dev_attr.attr,
+ &iio_dev_attr_in1_thresh_high_value.dev_attr.attr,
+ &iio_dev_attr_in1_thresh_low_value.dev_attr.attr,
+ &iio_dev_attr_in2_thresh_high_value.dev_attr.attr,
+ &iio_dev_attr_in2_thresh_low_value.dev_attr.attr,
+ &iio_dev_attr_in3_thresh_high_value.dev_attr.attr,
+ &iio_dev_attr_in3_thresh_low_value.dev_attr.attr,
+ &iio_dev_attr_in0min1_thresh_high_value.dev_attr.attr,
+ &iio_dev_attr_in0min1_thresh_low_value.dev_attr.attr,
+ &iio_dev_attr_in2min3_thresh_high_value.dev_attr.attr,
+ &iio_dev_attr_in2min3_thresh_low_value.dev_attr.attr,
+ &iio_dev_attr_in1min0_thresh_high_value.dev_attr.attr,
+ &iio_dev_attr_in1min0_thresh_low_value.dev_attr.attr,
+ &iio_dev_attr_in3min2_thresh_high_value.dev_attr.attr,
+ &iio_dev_attr_in3min2_thresh_low_value.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_event_attr_in0_thresh_high_en.dev_attr.attr,
+ &iio_event_attr_in0_thresh_low_en.dev_attr.attr,
+ &iio_event_attr_in1_thresh_high_en.dev_attr.attr,
+ &iio_event_attr_in1_thresh_low_en.dev_attr.attr,
+ &iio_event_attr_in2_thresh_high_en.dev_attr.attr,
+ &iio_event_attr_in2_thresh_low_en.dev_attr.attr,
+ &iio_event_attr_in3_thresh_high_en.dev_attr.attr,
+ &iio_event_attr_in3_thresh_low_en.dev_attr.attr,
+ &iio_event_attr_in0min1_thresh_high_en.dev_attr.attr,
+ &iio_event_attr_in0min1_thresh_low_en.dev_attr.attr,
+ &iio_event_attr_in3min2_thresh_high_en.dev_attr.attr,
+ &iio_event_attr_in3min2_thresh_low_en.dev_attr.attr,
+ &iio_event_attr_in1min0_thresh_high_en.dev_attr.attr,
+ &iio_event_attr_in1min0_thresh_low_en.dev_attr.attr,
+ &iio_event_attr_in2min3_thresh_high_en.dev_attr.attr,
+ &iio_event_attr_in2min3_thresh_low_en.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group max1363_event_attribute_group = {
+ .attrs = max1363_event_attributes,
+};
+
static int max1363_initial_setup(struct max1363_state *st)
{
st->setupbyte = MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_VDD
@@ -930,19 +1599,7 @@ static int __devinit max1363_probe(struct i2c_client *client,
atomic_set(&st->protect_ring, 0);
- /* Find the chip model specific data */
- for (i = 0; i < ARRAY_SIZE(max1363_chip_info_tbl); i++)
- if (!strcmp(max1363_chip_info_tbl[i].name, id->name)) {
- st->chip_info = &max1363_chip_info_tbl[i];
- break;
- };
- /* Unsupported chip */
- if (!st->chip_info) {
- dev_err(&client->dev, "%s is not supported\n", id->name);
- ret = -ENODEV;
- goto error_free_st;
- }
-
+ st->chip_info = &max1363_chip_info_tbl[id->driver_data];
st->reg = regulator_get(&client->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
@@ -978,6 +1635,11 @@ static int __devinit max1363_probe(struct i2c_client *client,
st->indio_dev->dev_data = (void *)(st);
st->indio_dev->driver_module = THIS_MODULE;
st->indio_dev->modes = INDIO_DIRECT_MODE;
+ if (st->chip_info->monitor_mode && client->irq) {
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs
+ = &max1363_event_attribute_group;
+ }
ret = max1363_initial_setup(st);
if (ret)
@@ -991,10 +1653,25 @@ static int __devinit max1363_probe(struct i2c_client *client,
if (ret)
goto error_cleanup_ring;
regdone = 1;
- ret = max1363_initialize_ring(st->indio_dev->ring);
+ ret = iio_ring_buffer_register(st->indio_dev->ring, 0);
if (ret)
goto error_cleanup_ring;
+
+ if (st->chip_info->monitor_mode && client->irq) {
+ ret = iio_register_interrupt_line(client->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ client->name);
+ if (ret)
+ goto error_uninit_ring;
+
+ INIT_WORK(&st->thresh_work, max1363_thresh_handler_bh);
+ }
+
return 0;
+error_uninit_ring:
+ iio_ring_buffer_unregister(st->indio_dev->ring);
error_cleanup_ring:
max1363_ring_cleanup(st->indio_dev);
error_free_available_scan_masks:
@@ -1010,7 +1687,6 @@ error_disable_reg:
error_put_reg:
if (!IS_ERR(st->reg))
regulator_put(st->reg);
-error_free_st:
kfree(st);
error_ret:
@@ -1021,7 +1697,10 @@ static int max1363_remove(struct i2c_client *client)
{
struct max1363_state *st = i2c_get_clientdata(client);
struct iio_dev *indio_dev = st->indio_dev;
- max1363_uninitialize_ring(indio_dev->ring);
+
+ if (st->chip_info->monitor_mode && client->irq)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+ iio_ring_buffer_unregister(indio_dev->ring);
max1363_ring_cleanup(indio_dev);
kfree(st->indio_dev->available_scan_masks);
iio_device_unregister(indio_dev);
diff --git a/drivers/staging/iio/adc/max1363_ring.c b/drivers/staging/iio/adc/max1363_ring.c
index 56688dc9c92..786b17a0d6b 100644
--- a/drivers/staging/iio/adc/max1363_ring.c
+++ b/drivers/staging/iio/adc/max1363_ring.c
@@ -68,7 +68,7 @@ error_ret:
}
/**
- * max1363_ring_preenable() setup the parameters of the ring before enabling
+ * max1363_ring_preenable() - setup the parameters of the ring before enabling
*
* The complex nature of the setting of the nuber of bytes per datum is due
* to this driver currently ensuring that the timestamp is stored at an 8
@@ -105,44 +105,15 @@ static int max1363_ring_preenable(struct iio_dev *indio_dev)
return 0;
}
-/**
- * max1363_ring_postenable() typical ring post enable
- *
- * Only not moved into the core for the hardware ring buffer cases
- * that are more sophisticated.
- **/
-static int max1363_ring_postenable(struct iio_dev *indio_dev)
-{
- if (indio_dev->trig == NULL)
- return 0;
- return iio_trigger_attach_poll_func(indio_dev->trig,
- indio_dev->pollfunc);
-}
/**
- * max1363_ring_predisable() runs just prior to ring buffer being disabled
- *
- * Typical predisable function which ensures that no trigger events can
- * occur before we disable the ring buffer (and hence would have no idea
- * what to do with them)
- **/
-static int max1363_ring_predisable(struct iio_dev *indio_dev)
-{
- if (indio_dev->trig)
- return iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc);
- else
- return 0;
-}
-
-/**
- * max1363_poll_func_th() th of trigger launched polling to ring buffer
+ * max1363_poll_func_th() - th of trigger launched polling to ring buffer
*
* As sampling only occurs on i2c comms occuring, leave timestamping until
* then. Some triggers will generate their own time stamp. Currently
* there is no way of notifying them when no one cares.
**/
-static void max1363_poll_func_th(struct iio_dev *indio_dev)
+static void max1363_poll_func_th(struct iio_dev *indio_dev, s64 time)
{
struct max1363_state *st = indio_dev->dev_data;
@@ -151,7 +122,7 @@ static void max1363_poll_func_th(struct iio_dev *indio_dev)
return;
}
/**
- * max1363_poll_bh_to_ring() bh of trigger launched polling to ring buffer
+ * max1363_poll_bh_to_ring() - bh of trigger launched polling to ring buffer
* @work_s: the work struct through which this was scheduled
*
* Currently there is no option in this driver to disable the saving of
@@ -223,19 +194,14 @@ int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Effectively select the ring buffer implementation */
iio_ring_sw_register_funcs(&st->indio_dev->ring->access);
- indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
- if (indio_dev->pollfunc == NULL) {
- ret = -ENOMEM;
+ ret = iio_alloc_pollfunc(indio_dev, NULL, &max1363_poll_func_th);
+ if (ret)
goto error_deallocate_sw_rb;
- }
- /* Configure the polling function called on trigger interrupts */
- indio_dev->pollfunc->poll_func_main = &max1363_poll_func_th;
- indio_dev->pollfunc->private_data = indio_dev;
/* Ring buffer functions - here trigger setup related */
- indio_dev->ring->postenable = &max1363_ring_postenable;
+ indio_dev->ring->postenable = &iio_triggered_ring_postenable;
indio_dev->ring->preenable = &max1363_ring_preenable;
- indio_dev->ring->predisable = &max1363_ring_predisable;
+ indio_dev->ring->predisable = &iio_triggered_ring_predisable;
INIT_WORK(&st->poll_work, &max1363_poll_bh_to_ring);
/* Flag that polled ring buffering is possible */
@@ -258,13 +224,3 @@ void max1363_ring_cleanup(struct iio_dev *indio_dev)
kfree(indio_dev->pollfunc);
iio_sw_rb_free(indio_dev->ring);
}
-
-void max1363_uninitialize_ring(struct iio_ring_buffer *ring)
-{
- iio_ring_buffer_unregister(ring);
-};
-
-int max1363_initialize_ring(struct iio_ring_buffer *ring)
-{
- return iio_ring_buffer_register(ring, 0);
-};
diff --git a/drivers/staging/iio/chrdev.h b/drivers/staging/iio/chrdev.h
index 3f96f8696a4..fd23bd1ea7b 100644
--- a/drivers/staging/iio/chrdev.h
+++ b/drivers/staging/iio/chrdev.h
@@ -73,8 +73,6 @@ struct iio_shared_ev_pointer {
* @det_events: list of detected events
* @max_events: maximum number of events before new ones are dropped
* @current_events: number of events in detected list
- * @id: indentifier to allow the event interface to know which
- * physical line it corresponds to
* @attr: this chrdev's minor number sysfs attribute
* @owner: ensure the driver module owns the file, not iio
* @private: driver specific data
@@ -90,7 +88,6 @@ struct iio_event_interface {
struct iio_detected_event_list det_events;
int max_events;
int current_events;
- int id;
struct iio_chrdev_minor_attr attr;
struct module *owner;
void *private;
diff --git a/drivers/staging/iio/gyro/Makefile b/drivers/staging/iio/gyro/Makefile
index 6d2c547686c..b5f0dc01122 100644
--- a/drivers/staging/iio/gyro/Makefile
+++ b/drivers/staging/iio/gyro/Makefile
@@ -1,4 +1,4 @@
-
+#
# Makefile for digital gyroscope sensor drivers
#
diff --git a/drivers/staging/iio/gyro/adis16260.h b/drivers/staging/iio/gyro/adis16260.h
index f19efb4c91c..812440af57d 100644
--- a/drivers/staging/iio/gyro/adis16260.h
+++ b/drivers/staging/iio/gyro/adis16260.h
@@ -85,7 +85,6 @@
* struct adis16260_state - device instance specific data
* @us: actual spi_device
* @work_trigger_to_ring: bh for triggered event handling
- * @work_cont_thresh: CLEAN
* @inter: used to check if new interrupt has been triggered
* @last_timestamp: passing timestamp from th to bh of interrupt handler
* @indio_dev: industrial I/O device structure
@@ -97,7 +96,6 @@
struct adis16260_state {
struct spi_device *us;
struct work_struct work_trigger_to_ring;
- struct iio_work_cont work_cont_thresh;
s64 last_timestamp;
struct iio_dev *indio_dev;
struct iio_trigger *trig;
@@ -113,13 +111,11 @@ int adis16260_set_irq(struct device *dev, bool enable);
* filling. This may change!
*/
-enum adis16260_scan {
- ADIS16260_SCAN_SUPPLY,
- ADIS16260_SCAN_GYRO,
- ADIS16260_SCAN_AUX_ADC,
- ADIS16260_SCAN_TEMP,
- ADIS16260_SCAN_ANGL,
-};
+#define ADIS16260_SCAN_SUPPLY 0
+#define ADIS16260_SCAN_GYRO 1
+#define ADIS16260_SCAN_AUX_ADC 2
+#define ADIS16260_SCAN_TEMP 3
+#define ADIS16260_SCAN_ANGL 4
void adis16260_remove_trigger(struct iio_dev *indio_dev);
int adis16260_probe_trigger(struct iio_dev *indio_dev);
@@ -132,8 +128,6 @@ ssize_t adis16260_read_data_from_ring(struct device *dev,
int adis16260_configure_ring(struct iio_dev *indio_dev);
void adis16260_unconfigure_ring(struct iio_dev *indio_dev);
-int adis16260_initialize_ring(struct iio_ring_buffer *ring);
-void adis16260_uninitialize_ring(struct iio_ring_buffer *ring);
#else /* CONFIG_IIO_RING_BUFFER */
static inline void adis16260_remove_trigger(struct iio_dev *indio_dev)
@@ -162,14 +156,5 @@ static inline void adis16260_unconfigure_ring(struct iio_dev *indio_dev)
{
}
-static inline int adis16260_initialize_ring(struct iio_ring_buffer *ring)
-{
- return 0;
-}
-
-static inline void adis16260_uninitialize_ring(struct iio_ring_buffer *ring)
-{
-}
-
#endif /* CONFIG_IIO_RING_BUFFER */
#endif /* SPI_ADIS16260_H_ */
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
index c93f4d580fc..134dfaae2f0 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -14,12 +14,13 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
#include "../iio.h"
#include "../sysfs.h"
+#include "../ring_generic.h"
#include "../adc/adc.h"
#include "gyro.h"
@@ -555,8 +556,7 @@ static int __devinit adis16260_probe(struct spi_device *spi)
if (ret)
goto error_unreg_ring_funcs;
regdone = 1;
-
- ret = adis16260_initialize_ring(st->indio_dev->ring);
+ ret = iio_ring_buffer_register(st->indio_dev->ring, 0);
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
@@ -588,7 +588,7 @@ error_unregister_line:
if (spi->irq)
iio_unregister_interrupt_line(st->indio_dev, 0);
error_uninitialize_ring:
- adis16260_uninitialize_ring(st->indio_dev->ring);
+ iio_ring_buffer_unregister(st->indio_dev->ring);
error_unreg_ring_funcs:
adis16260_unconfigure_ring(st->indio_dev);
error_free_dev:
@@ -622,15 +622,13 @@ static int adis16260_remove(struct spi_device *spi)
if (spi->irq)
iio_unregister_interrupt_line(indio_dev, 0);
- adis16260_uninitialize_ring(indio_dev->ring);
+ iio_ring_buffer_unregister(st->indio_dev->ring);
iio_device_unregister(indio_dev);
adis16260_unconfigure_ring(indio_dev);
kfree(st->tx);
kfree(st->rx);
kfree(st);
- return 0;
-
err_ret:
return ret;
}
diff --git a/drivers/staging/iio/gyro/adis16260_ring.c b/drivers/staging/iio/gyro/adis16260_ring.c
index 4c4390ca6d7..9ef7f9080dc 100644
--- a/drivers/staging/iio/gyro/adis16260_ring.c
+++ b/drivers/staging/iio/gyro/adis16260_ring.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
@@ -16,16 +17,6 @@
#include "../trigger.h"
#include "adis16260.h"
-/**
- * combine_8_to_16() utility function to munge to u8s into u16
- **/
-static inline u16 combine_8_to_16(u8 lower, u8 upper)
-{
- u16 _lower = lower;
- u16 _upper = upper;
- return _lower | (_upper << 8);
-}
-
static IIO_SCAN_EL_C(supply, ADIS16260_SCAN_SUPPLY, IIO_UNSIGNED(12),
ADIS16260_SUPPLY_OUT, NULL);
static IIO_SCAN_EL_C(gyro, ADIS16260_SCAN_GYRO, IIO_SIGNED(14),
@@ -58,10 +49,10 @@ static struct attribute_group adis16260_scan_el_group = {
* adis16260_poll_func_th() top half interrupt handler called by trigger
* @private_data: iio_dev
**/
-static void adis16260_poll_func_th(struct iio_dev *indio_dev)
+static void adis16260_poll_func_th(struct iio_dev *indio_dev, s64 time)
{
struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
- st->last_timestamp = indio_dev->trig->timestamp;
+ st->last_timestamp = time;
schedule_work(&st->work_trigger_to_ring);
}
@@ -133,10 +124,9 @@ static void adis16260_trigger_bh_to_ring(struct work_struct *work_s)
if (st->indio_dev->scan_count)
if (adis16260_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
- for (; i < st->indio_dev->scan_count; i++) {
- data[i] = combine_8_to_16(st->rx[i*2+1],
- st->rx[i*2]);
- }
+ for (; i < st->indio_dev->scan_count; i++)
+ data[i] = be16_to_cpup(
+ (__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (st->indio_dev->scan_timestamp)
@@ -152,48 +142,6 @@ static void adis16260_trigger_bh_to_ring(struct work_struct *work_s)
return;
}
-static int adis16260_data_rdy_ring_preenable(struct iio_dev *indio_dev)
-{
- size_t size;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
- /* Check if there are any scan elements enabled, if not fail*/
- if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
- return -EINVAL;
-
- if (indio_dev->ring->access.set_bpd) {
- if (indio_dev->scan_timestamp)
- if (indio_dev->scan_count)
- /* Timestamp (aligned s64) and data */
- size = (((indio_dev->scan_count * sizeof(s16))
- + sizeof(s64) - 1)
- & ~(sizeof(s64) - 1))
- + sizeof(s64);
- else /* Timestamp only */
- size = sizeof(s64);
- else /* Data only */
- size = indio_dev->scan_count*sizeof(s16);
- indio_dev->ring->access.set_bpd(indio_dev->ring, size);
- }
-
- return 0;
-}
-
-static int adis16260_data_rdy_ring_postenable(struct iio_dev *indio_dev)
-{
- return indio_dev->trig
- ? iio_trigger_attach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
-
-static int adis16260_data_rdy_ring_predisable(struct iio_dev *indio_dev)
-{
- return indio_dev->trig
- ? iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
-
void adis16260_unconfigure_ring(struct iio_dev *indio_dev)
{
kfree(indio_dev->pollfunc);
@@ -225,18 +173,16 @@ int adis16260_configure_ring(struct iio_dev *indio_dev)
indio_dev->ring = ring;
/* Effectively select the ring buffer implementation */
iio_ring_sw_register_funcs(&ring->access);
- ring->preenable = &adis16260_data_rdy_ring_preenable;
- ring->postenable = &adis16260_data_rdy_ring_postenable;
- ring->predisable = &adis16260_data_rdy_ring_predisable;
+ ring->bpe = 2;
+ ring->preenable = &iio_sw_ring_preenable;
+ ring->postenable = &iio_triggered_ring_postenable;
+ ring->predisable = &iio_triggered_ring_predisable;
ring->owner = THIS_MODULE;
- indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
- if (indio_dev->pollfunc == NULL) {
- ret = -ENOMEM;
- goto error_iio_sw_rb_free;;
- }
- indio_dev->pollfunc->poll_func_main = &adis16260_poll_func_th;
- indio_dev->pollfunc->private_data = indio_dev;
+ ret = iio_alloc_pollfunc(indio_dev, NULL, &adis16260_poll_func_th);
+ if (ret)
+ goto error_iio_sw_rb_free;
+
indio_dev->modes |= INDIO_RING_TRIGGERED;
return 0;
@@ -244,13 +190,3 @@ error_iio_sw_rb_free:
iio_sw_rb_free(indio_dev->ring);
return ret;
}
-
-int adis16260_initialize_ring(struct iio_ring_buffer *ring)
-{
- return iio_ring_buffer_register(ring, 0);
-}
-
-void adis16260_uninitialize_ring(struct iio_ring_buffer *ring)
-{
- iio_ring_buffer_unregister(ring);
-}
diff --git a/drivers/staging/iio/gyro/adis16260_trigger.c b/drivers/staging/iio/gyro/adis16260_trigger.c
index b3c565942b8..de01537d257 100644
--- a/drivers/staging/iio/gyro/adis16260_trigger.c
+++ b/drivers/staging/iio/gyro/adis16260_trigger.c
@@ -23,8 +23,7 @@ static int adis16260_data_rdy_trig_poll(struct iio_dev *dev_info,
struct adis16260_state *st = iio_dev_get_devdata(dev_info);
struct iio_trigger *trig = st->trig;
- trig->timestamp = timestamp;
- iio_trigger_poll(trig);
+ iio_trigger_poll(trig, timestamp);
return IRQ_HANDLED;
}
@@ -83,14 +82,13 @@ int adis16260_probe_trigger(struct iio_dev *indio_dev)
struct adis16260_state *st = indio_dev->dev_data;
st->trig = iio_allocate_trigger();
- st->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ st->trig->name = kasprintf(GFP_KERNEL,
+ "adis16260-dev%d",
+ indio_dev->id);
if (!st->trig->name) {
ret = -ENOMEM;
goto error_free_trig;
}
- snprintf((char *)st->trig->name,
- IIO_TRIGGER_NAME_LENGTH,
- "adis16260-dev%d", indio_dev->id);
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
st->trig->private_data = st;
diff --git a/drivers/staging/iio/iio.h b/drivers/staging/iio/iio.h
index fcee47cbe89..9d0ca128679 100644
--- a/drivers/staging/iio/iio.h
+++ b/drivers/staging/iio/iio.h
@@ -16,9 +16,7 @@
#include "chrdev.h"
/* IIO TODO LIST */
-/* Static device specific elements (conversion factors etc)
- * should be exported via sysfs
- *
+/*
* Provide means of adjusting timer accuracy.
* Currently assumes nano seconds.
*/
@@ -284,49 +282,6 @@ int iio_push_event(struct iio_dev *dev_info,
s64 timestamp);
/**
- * struct iio_work_cont - container for when singleton handler case matters
- * @ws: [DEVICE] work_struct when not only possible event
- * @ws_nocheck: [DEVICE] work_struct when only possible event
- * @address: [DEVICE] associated register address
- * @mask: [DEVICE] associated mask for identifying event source
- * @st: [DEVICE] device specific state information
- **/
-struct iio_work_cont {
- struct work_struct ws;
- struct work_struct ws_nocheck;
- int address;
- int mask;
- void *st;
-};
-
-#define to_iio_work_cont_check(_ws) \
- container_of(_ws, struct iio_work_cont, ws)
-
-#define to_iio_work_cont_no_check(_ws) \
- container_of(_ws, struct iio_work_cont, ws_nocheck)
-
-/**
- * iio_init_work_cont() - intiialize the elements of a work container
- * @cont: the work container
- * @_checkfunc: function called when there are multiple possible int sources
- * @_nocheckfunc: function for when there is only one int source
- * @_add: driver dependent, typically a register address
- * @_mask: driver dependent, typically a bit mask for a register
- * @_st: driver dependent, typically pointer to a device state structure
- **/
-static inline void
-iio_init_work_cont(struct iio_work_cont *cont,
- void (*_checkfunc)(struct work_struct *),
- void (*_nocheckfunc)(struct work_struct *),
- int _add, int _mask, void *_st)
-{
- INIT_WORK(&(cont)->ws, _checkfunc);
- INIT_WORK(&(cont)->ws_nocheck, _nocheckfunc);
- cont->address = _add;
- cont->mask = _mask;
- cont->st = _st;
-}
-/**
* __iio_push_event() - tries to add an event to the list associated with a chrdev
* @ev_int: the event interface to which we are pushing the event
* @ev_code: the outgoing event code
@@ -428,7 +383,9 @@ void iio_put(void);
**/
void iio_get(void);
-/* Ring buffer related */
+/**
+ * iio_device_get_chrdev_minor() - get an unused minor number
+ **/
int iio_device_get_chrdev_minor(void);
void iio_device_free_chrdev_minor(int val);
diff --git a/drivers/staging/iio/imu/Kconfig b/drivers/staging/iio/imu/Kconfig
index 6308d6faad5..31a6233a206 100644
--- a/drivers/staging/iio/imu/Kconfig
+++ b/drivers/staging/iio/imu/Kconfig
@@ -6,9 +6,8 @@ comment "Inertial measurement units"
config ADIS16300
tristate "Analog Devices ADIS16300 IMU SPI driver"
depends on SPI
- select IIO_SW_RING
- select IIO_RING_BUFFER
- select IIO_TRIGGER
+ select IIO_SW_RING if IIO_RING_BUFFER
+ select IIO_TRIGGER if IIO_RING_BUFFER
help
Say yes here to build support for Analog Devices adis16300 four degrees
of freedom inertial sensor.
@@ -24,10 +23,9 @@ config ADIS16350
config ADIS16400
tristate "Analog Devices ADIS16400/5 IMU SPI driver"
- depends on SPI
- select IIO_SW_RING
- select IIO_RING_BUFFER
- select IIO_TRIGGER
- help
- Say yes here to build support for Analog Devices adis16400/5 triaxial
- inertial sensor with Magnetometer. \ No newline at end of file
+ depends on SPI
+ select IIO_SW_RING if IIO_RING_BUFFER
+ select IIO_TRIGGER if IIO_RING_BUFFER
+ help
+ Say yes here to build support for Analog Devices adis16400/5 triaxial
+ inertial sensor with Magnetometer.
diff --git a/drivers/staging/iio/imu/Makefile b/drivers/staging/iio/imu/Makefile
index 31df7359e20..f3b450b6611 100644
--- a/drivers/staging/iio/imu/Makefile
+++ b/drivers/staging/iio/imu/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for Inertial Measurement Units
#
+
adis16300-y := adis16300_core.o
adis16300-$(CONFIG_IIO_RING_BUFFER) += adis16300_ring.o adis16300_trigger.o
obj-$(CONFIG_ADIS16300) += adis16300.o
@@ -11,4 +12,4 @@ obj-$(CONFIG_ADIS16350) += adis16350.o
adis16400-y := adis16400_core.o
adis16400-$(CONFIG_IIO_RING_BUFFER) += adis16400_ring.o adis16400_trigger.o
-obj-$(CONFIG_ADIS16400) += adis16400.o \ No newline at end of file
+obj-$(CONFIG_ADIS16400) += adis16400.o
diff --git a/drivers/staging/iio/imu/adis16300.h b/drivers/staging/iio/imu/adis16300.h
index 1c7ea5c840e..1f25d68064a 100644
--- a/drivers/staging/iio/imu/adis16300.h
+++ b/drivers/staging/iio/imu/adis16300.h
@@ -94,7 +94,6 @@
* struct adis16300_state - device instance specific data
* @us: actual spi_device
* @work_trigger_to_ring: bh for triggered event handling
- * @work_cont_thresh: CLEAN
* @inter: used to check if new interrupt has been triggered
* @last_timestamp: passing timestamp from th to bh of interrupt handler
* @indio_dev: industrial I/O device structure
@@ -106,7 +105,6 @@
struct adis16300_state {
struct spi_device *us;
struct work_struct work_trigger_to_ring;
- struct iio_work_cont work_cont_thresh;
s64 last_timestamp;
struct iio_dev *indio_dev;
struct iio_trigger *trig;
@@ -115,30 +113,22 @@ struct adis16300_state {
struct mutex buf_lock;
};
-int adis16300_spi_read_burst(struct device *dev, u8 *rx);
-
int adis16300_set_irq(struct device *dev, bool enable);
-int adis16300_reset(struct device *dev);
-
-int adis16300_check_status(struct device *dev);
-
#ifdef CONFIG_IIO_RING_BUFFER
/* At the moment triggers are only used for ring buffer
* filling. This may change!
*/
-enum adis16300_scan {
- ADIS16300_SCAN_SUPPLY,
- ADIS16300_SCAN_GYRO_X,
- ADIS16300_SCAN_ACC_X,
- ADIS16300_SCAN_ACC_Y,
- ADIS16300_SCAN_ACC_Z,
- ADIS16300_SCAN_TEMP,
- ADIS16300_SCAN_ADC_0,
- ADIS16300_SCAN_INCLI_X,
- ADIS16300_SCAN_INCLI_Y,
-};
+#define ADIS16300_SCAN_SUPPLY 0
+#define ADIS16300_SCAN_GYRO_X 1
+#define ADIS16300_SCAN_ACC_X 2
+#define ADIS16300_SCAN_ACC_Y 3
+#define ADIS16300_SCAN_ACC_Z 4
+#define ADIS16300_SCAN_TEMP 5
+#define ADIS16300_SCAN_ADC_0 6
+#define ADIS16300_SCAN_INCLI_X 7
+#define ADIS16300_SCAN_INCLI_Y 8
void adis16300_remove_trigger(struct iio_dev *indio_dev);
int adis16300_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/imu/adis16300_core.c b/drivers/staging/iio/imu/adis16300_core.c
index 5a7e5ef9bc5..f1950d56cb1 100644
--- a/drivers/staging/iio/imu/adis16300_core.c
+++ b/drivers/staging/iio/imu/adis16300_core.c
@@ -14,12 +14,13 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
#include "../iio.h"
#include "../sysfs.h"
+#include "../ring_generic.h"
#include "../accel/accel.h"
#include "../accel/inclinometer.h"
#include "../gyro/gyro.h"
@@ -29,10 +30,7 @@
#define DRIVER_NAME "adis16300"
-/* At the moment the spi framework doesn't allow global setting of cs_change.
- * It's in the likely to be added comment at the top of spi.h.
- * This means that use cannot be made of spi_write etc.
- */
+static int adis16300_check_status(struct device *dev);
/**
* adis16300_spi_write_reg_8() - write single byte to a register
@@ -79,11 +77,13 @@ static int adis16300_spi_write_reg_16(struct device *dev,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
+ .delay_usecs = 75,
}, {
.tx_buf = st->tx + 2,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
+ .delay_usecs = 75,
},
};
@@ -122,12 +122,14 @@ static int adis16300_spi_read_reg_16(struct device *dev,
.tx_buf = st->tx,
.bits_per_word = 8,
.len = 2,
- .cs_change = 0,
+ .cs_change = 1,
+ .delay_usecs = 75,
}, {
.rx_buf = st->rx,
.bits_per_word = 8,
.len = 2,
- .cs_change = 0,
+ .cs_change = 1,
+ .delay_usecs = 75,
},
};
@@ -154,54 +156,6 @@ error_ret:
return ret;
}
-/**
- * adis16300_spi_read_burst() - read all data registers
- * @dev: device associated with child of actual device (iio_dev or iio_trig)
- * @rx: somewhere to pass back the value read (min size is 24 bytes)
- **/
-int adis16300_spi_read_burst(struct device *dev, u8 *rx)
-{
- struct spi_message msg;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16300_state *st = iio_dev_get_devdata(indio_dev);
- u32 old_speed_hz = st->us->max_speed_hz;
- int ret;
-
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 0,
- }, {
- .rx_buf = rx,
- .bits_per_word = 8,
- .len = 18,
- .cs_change = 0,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADIS16300_READ_REG(ADIS16300_GLOB_CMD);
- st->tx[1] = 0;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
-
- st->us->max_speed_hz = min(ADIS16300_SPI_BURST, old_speed_hz);
- spi_setup(st->us);
-
- ret = spi_sync(st->us, &msg);
- if (ret)
- dev_err(&st->us->dev, "problem when burst reading");
-
- st->us->max_speed_hz = old_speed_hz;
- spi_setup(st->us);
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
static ssize_t adis16300_spi_read_signed(struct device *dev,
struct device_attribute *attr,
char *buf,
@@ -240,6 +194,24 @@ static ssize_t adis16300_read_12bit_unsigned(struct device *dev,
return sprintf(buf, "%u\n", val & 0x0FFF);
}
+static ssize_t adis16300_read_14bit_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16300_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16300_ERROR_ACTIVE)
+ adis16300_check_status(dev);
+
+ return sprintf(buf, "%u\n", val & 0x3FFF);
+}
+
static ssize_t adis16300_read_14bit_signed(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -356,6 +328,18 @@ static ssize_t adis16300_write_frequency(struct device *dev,
return ret ? ret : len;
}
+static int adis16300_reset(struct device *dev)
+{
+ int ret;
+ ret = adis16300_spi_write_reg_8(dev,
+ ADIS16300_GLOB_CMD,
+ ADIS16300_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(dev, "problem resetting device");
+
+ return ret;
+}
+
static ssize_t adis16300_write_reset(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
@@ -371,8 +355,6 @@ static ssize_t adis16300_write_reset(struct device *dev,
return -1;
}
-
-
int adis16300_set_irq(struct device *dev, bool enable)
{
int ret;
@@ -396,32 +378,37 @@ error_ret:
return ret;
}
-int adis16300_reset(struct device *dev)
+/* Power down the device */
+static int adis16300_stop_device(struct device *dev)
{
int ret;
- ret = adis16300_spi_write_reg_8(dev,
- ADIS16300_GLOB_CMD,
- ADIS16300_GLOB_CMD_SW_RESET);
+ u16 val = ADIS16300_SLP_CNT_POWER_OFF;
+
+ ret = adis16300_spi_write_reg_16(dev, ADIS16300_SLP_CNT, val);
if (ret)
- dev_err(dev, "problem resetting device");
+ dev_err(dev, "problem with turning device off: SLP_CNT");
return ret;
}
-/* Power down the device */
-static int adis16300_stop_device(struct device *dev)
+static int adis16300_self_test(struct device *dev)
{
int ret;
- u16 val = ADIS16300_SLP_CNT_POWER_OFF;
+ ret = adis16300_spi_write_reg_16(dev,
+ ADIS16300_MSC_CTRL,
+ ADIS16300_MSC_CTRL_MEM_TEST);
+ if (ret) {
+ dev_err(dev, "problem starting self test");
+ goto err_ret;
+ }
- ret = adis16300_spi_write_reg_16(dev, ADIS16300_SLP_CNT, val);
- if (ret)
- dev_err(dev, "problem with turning device off: SLP_CNT");
+ adis16300_check_status(dev);
+err_ret:
return ret;
}
-int adis16300_check_status(struct device *dev)
+static int adis16300_check_status(struct device *dev)
{
u16 status;
int ret;
@@ -483,6 +470,11 @@ static int adis16300_initial_setup(struct adis16300_state *st)
}
/* Do self test */
+ ret = adis16300_self_test(dev);
+ if (ret) {
+ dev_err(dev, "self test failure");
+ goto err_ret;
+ }
/* Read status register to check the result */
ret = adis16300_check_status(dev);
@@ -526,7 +518,7 @@ static IIO_DEV_ATTR_ACCEL_Z_OFFSET(S_IWUSR | S_IRUGO,
adis16300_write_16bit,
ADIS16300_ZACCL_OFF);
-static IIO_DEV_ATTR_IN_NAMED_RAW(supply, adis16300_read_14bit_signed,
+static IIO_DEV_ATTR_IN_NAMED_RAW(supply, adis16300_read_14bit_unsigned,
ADIS16300_SUPPLY_OUT);
static IIO_CONST_ATTR(in_supply_scale, "0.00242");
@@ -548,7 +540,7 @@ static IIO_DEV_ATTR_INCLI_Y(adis16300_read_13bit_signed,
ADIS16300_YINCLI_OUT);
static IIO_CONST_ATTR(incli_scale, "0.044 d");
-static IIO_DEV_ATTR_TEMP_RAW(adis16300_read_12bit_signed);
+static IIO_DEV_ATTR_TEMP_RAW(adis16300_read_12bit_unsigned);
static IIO_CONST_ATTR(temp_offset, "198.16 K");
static IIO_CONST_ATTR(temp_scale, "0.14 K");
@@ -653,21 +645,13 @@ static int __devinit adis16300_probe(struct spi_device *spi)
goto error_unreg_ring_funcs;
regdone = 1;
- ret = adis16300_initialize_ring(st->indio_dev->ring);
+ ret = iio_ring_buffer_register(st->indio_dev->ring, 0);
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
}
- if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
-#if 0 /* fixme: here we should support */
- iio_init_work_cont(&st->work_cont_thresh,
- NULL,
- adis16300_thresh_handler_bh_no_check,
- 0,
- 0,
- st);
-#endif
+ if (spi->irq) {
ret = iio_register_interrupt_line(spi->irq,
st->indio_dev,
0,
@@ -688,13 +672,12 @@ static int __devinit adis16300_probe(struct spi_device *spi)
return 0;
error_remove_trigger:
- if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
- adis16300_remove_trigger(st->indio_dev);
+ adis16300_remove_trigger(st->indio_dev);
error_unregister_line:
- if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ if (spi->irq)
iio_unregister_interrupt_line(st->indio_dev, 0);
error_uninitialize_ring:
- adis16300_uninitialize_ring(st->indio_dev->ring);
+ iio_ring_buffer_unregister(st->indio_dev->ring);
error_unreg_ring_funcs:
adis16300_unconfigure_ring(st->indio_dev);
error_free_dev:
@@ -712,7 +695,6 @@ error_ret:
return ret;
}
-/* fixme, confirm ordering in this function */
static int adis16300_remove(struct spi_device *spi)
{
int ret;
@@ -726,12 +708,12 @@ static int adis16300_remove(struct spi_device *spi)
flush_scheduled_work();
adis16300_remove_trigger(indio_dev);
- if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ if (spi->irq)
iio_unregister_interrupt_line(indio_dev, 0);
- adis16300_uninitialize_ring(indio_dev->ring);
- adis16300_unconfigure_ring(indio_dev);
+ iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
+ adis16300_unconfigure_ring(indio_dev);
kfree(st->tx);
kfree(st->rx);
kfree(st);
diff --git a/drivers/staging/iio/imu/adis16300_ring.c b/drivers/staging/iio/imu/adis16300_ring.c
index 76cf8a6f3c3..fc93160acb2 100644
--- a/drivers/staging/iio/imu/adis16300_ring.c
+++ b/drivers/staging/iio/imu/adis16300_ring.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
@@ -16,17 +17,7 @@
#include "../trigger.h"
#include "adis16300.h"
-/**
- * combine_8_to_16() utility function to munge to u8s into u16
- **/
-static inline u16 combine_8_to_16(u8 lower, u8 upper)
-{
- u16 _lower = lower;
- u16 _upper = upper;
- return _lower | (_upper << 8);
-}
-
-static IIO_SCAN_EL_C(supply, ADIS16300_SCAN_SUPPLY, IIO_SIGNED(14),
+static IIO_SCAN_EL_C(supply, ADIS16300_SCAN_SUPPLY, IIO_UNSIGNED(14),
ADIS16300_SUPPLY_OUT, NULL);
static IIO_SCAN_EL_C(gyro_x, ADIS16300_SCAN_GYRO_X, IIO_SIGNED(14),
@@ -39,9 +30,9 @@ static IIO_SCAN_EL_C(accel_y, ADIS16300_SCAN_ACC_Y, IIO_SIGNED(14),
static IIO_SCAN_EL_C(accel_z, ADIS16300_SCAN_ACC_Z, IIO_SIGNED(14),
ADIS16300_ZACCL_OUT, NULL);
-static IIO_SCAN_EL_C(temp, ADIS16300_SCAN_TEMP, IIO_SIGNED(12),
+static IIO_SCAN_EL_C(temp, ADIS16300_SCAN_TEMP, IIO_UNSIGNED(12),
ADIS16300_TEMP_OUT, NULL);
-static IIO_SCAN_EL_C(adc_0, ADIS16300_SCAN_ADC_0, IIO_SIGNED(12),
+static IIO_SCAN_EL_C(adc_0, ADIS16300_SCAN_ADC_0, IIO_UNSIGNED(12),
ADIS16300_AUX_ADC, NULL);
static IIO_SCAN_EL_C(incli_x, ADIS16300_SCAN_INCLI_X, IIO_SIGNED(12),
@@ -74,10 +65,10 @@ static struct attribute_group adis16300_scan_el_group = {
* adis16300_poll_func_th() top half interrupt handler called by trigger
* @private_data: iio_dev
**/
-static void adis16300_poll_func_th(struct iio_dev *indio_dev)
+static void adis16300_poll_func_th(struct iio_dev *indio_dev, s64 time)
{
struct adis16300_state *st = iio_dev_get_devdata(indio_dev);
- st->last_timestamp = indio_dev->trig->timestamp;
+ st->last_timestamp = time;
schedule_work(&st->work_trigger_to_ring);
/* Indicate that this interrupt is being handled */
@@ -87,6 +78,54 @@ static void adis16300_poll_func_th(struct iio_dev *indio_dev)
*/
}
+/**
+ * adis16300_spi_read_burst() - read all data registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read (min size is 24 bytes)
+ **/
+static int adis16300_spi_read_burst(struct device *dev, u8 *rx)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16300_state *st = iio_dev_get_devdata(indio_dev);
+ u32 old_speed_hz = st->us->max_speed_hz;
+ int ret;
+
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 0,
+ }, {
+ .rx_buf = rx,
+ .bits_per_word = 8,
+ .len = 18,
+ .cs_change = 0,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16300_READ_REG(ADIS16300_GLOB_CMD);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+
+ st->us->max_speed_hz = ADIS16300_SPI_BURST;
+ spi_setup(st->us);
+
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when burst reading");
+
+ st->us->max_speed_hz = old_speed_hz;
+ spi_setup(st->us);
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
* specific to be rolled into the core.
*/
@@ -109,10 +148,9 @@ static void adis16300_trigger_bh_to_ring(struct work_struct *work_s)
if (st->indio_dev->scan_count)
if (adis16300_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0)
- for (; i < st->indio_dev->scan_count; i++) {
- data[i] = combine_8_to_16(st->rx[i*2+1],
- st->rx[i*2]);
- }
+ for (; i < st->indio_dev->scan_count; i++)
+ data[i] = be16_to_cpup(
+ (__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (st->indio_dev->scan_timestamp)
@@ -127,45 +165,6 @@ static void adis16300_trigger_bh_to_ring(struct work_struct *work_s)
return;
}
-/* in these circumstances is it better to go with unaligned packing and
- * deal with the cost?*/
-static int adis16300_data_rdy_ring_preenable(struct iio_dev *indio_dev)
-{
- size_t size;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
- /* Check if there are any scan elements enabled, if not fail*/
- if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
- return -EINVAL;
-
- if (indio_dev->ring->access.set_bpd) {
- if (indio_dev->scan_timestamp)
- if (indio_dev->scan_count) /* Timestamp and data */
- size = 4*sizeof(s64);
- else /* Timestamp only */
- size = sizeof(s64);
- else /* Data only */
- size = indio_dev->scan_count*sizeof(s16);
- indio_dev->ring->access.set_bpd(indio_dev->ring, size);
- }
-
- return 0;
-}
-
-static int adis16300_data_rdy_ring_postenable(struct iio_dev *indio_dev)
-{
- return indio_dev->trig
- ? iio_trigger_attach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
-
-static int adis16300_data_rdy_ring_predisable(struct iio_dev *indio_dev)
-{
- return indio_dev->trig
- ? iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
void adis16300_unconfigure_ring(struct iio_dev *indio_dev)
{
@@ -202,18 +201,16 @@ int adis16300_configure_ring(struct iio_dev *indio_dev)
indio_dev->ring = ring;
/* Effectively select the ring buffer implementation */
iio_ring_sw_register_funcs(&ring->access);
- ring->preenable = &adis16300_data_rdy_ring_preenable;
- ring->postenable = &adis16300_data_rdy_ring_postenable;
- ring->predisable = &adis16300_data_rdy_ring_predisable;
+ ring->bpe = 2;
+ ring->preenable = &iio_sw_ring_preenable;
+ ring->postenable = &iio_triggered_ring_postenable;
+ ring->predisable = &iio_triggered_ring_predisable;
ring->owner = THIS_MODULE;
- indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
- if (indio_dev->pollfunc == NULL) {
- ret = -ENOMEM;
- goto error_iio_sw_rb_free;;
- }
- indio_dev->pollfunc->poll_func_main = &adis16300_poll_func_th;
- indio_dev->pollfunc->private_data = indio_dev;
+ ret = iio_alloc_pollfunc(indio_dev, NULL, &adis16300_poll_func_th);
+ if (ret)
+ goto error_iio_sw_rb_free;
+
indio_dev->modes |= INDIO_RING_TRIGGERED;
return 0;
@@ -222,12 +219,3 @@ error_iio_sw_rb_free:
return ret;
}
-int adis16300_initialize_ring(struct iio_ring_buffer *ring)
-{
- return iio_ring_buffer_register(ring, 0);
-}
-
-void adis16300_uninitialize_ring(struct iio_ring_buffer *ring)
-{
- iio_ring_buffer_unregister(ring);
-}
diff --git a/drivers/staging/iio/imu/adis16300_trigger.c b/drivers/staging/iio/imu/adis16300_trigger.c
index 54edb20bf11..64036cd9910 100644
--- a/drivers/staging/iio/imu/adis16300_trigger.c
+++ b/drivers/staging/iio/imu/adis16300_trigger.c
@@ -23,8 +23,7 @@ static int adis16300_data_rdy_trig_poll(struct iio_dev *dev_info,
struct adis16300_state *st = iio_dev_get_devdata(dev_info);
struct iio_trigger *trig = st->trig;
- trig->timestamp = timestamp;
- iio_trigger_poll(trig);
+ iio_trigger_poll(trig, timestamp);
return IRQ_HANDLED;
}
@@ -86,14 +85,13 @@ int adis16300_probe_trigger(struct iio_dev *indio_dev)
struct adis16300_state *st = indio_dev->dev_data;
st->trig = iio_allocate_trigger();
- st->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ st->trig->name = kasprintf(GFP_KERNEL,
+ "adis16300-dev%d",
+ indio_dev->id);
if (!st->trig->name) {
ret = -ENOMEM;
goto error_free_trig;
}
- snprintf((char *)st->trig->name,
- IIO_TRIGGER_NAME_LENGTH,
- "adis16300-dev%d", indio_dev->id);
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
st->trig->private_data = st;
diff --git a/drivers/staging/iio/imu/adis16350.h b/drivers/staging/iio/imu/adis16350.h
index 334b18ace38..b00001e3edd 100644
--- a/drivers/staging/iio/imu/adis16350.h
+++ b/drivers/staging/iio/imu/adis16350.h
@@ -100,7 +100,6 @@
* struct adis16350_state - device instance specific data
* @us: actual spi_device
* @work_trigger_to_ring: bh for triggered event handling
- * @work_cont_thresh: CLEAN
* @inter: used to check if new interrupt has been triggered
* @last_timestamp: passing timestamp from th to bh of interrupt handler
* @indio_dev: industrial I/O device structure
@@ -112,7 +111,6 @@
struct adis16350_state {
struct spi_device *us;
struct work_struct work_trigger_to_ring;
- struct iio_work_cont work_cont_data_rdy;
s64 last_timestamp;
struct iio_dev *indio_dev;
struct iio_trigger *trig;
@@ -125,19 +123,17 @@ int adis16350_set_irq(struct device *dev, bool enable);
#ifdef CONFIG_IIO_RING_BUFFER
-enum adis16350_scan {
- ADIS16350_SCAN_SUPPLY,
- ADIS16350_SCAN_GYRO_X,
- ADIS16350_SCAN_GYRO_Y,
- ADIS16350_SCAN_GYRO_Z,
- ADIS16350_SCAN_ACC_X,
- ADIS16350_SCAN_ACC_Y,
- ADIS16350_SCAN_ACC_Z,
- ADIS16350_SCAN_TEMP_X,
- ADIS16350_SCAN_TEMP_Y,
- ADIS16350_SCAN_TEMP_Z,
- ADIS16350_SCAN_ADC_0
-};
+#define ADIS16350_SCAN_SUPPLY 0
+#define ADIS16350_SCAN_GYRO_X 1
+#define ADIS16350_SCAN_GYRO_Y 2
+#define ADIS16350_SCAN_GYRO_Z 3
+#define ADIS16350_SCAN_ACC_X 4
+#define ADIS16350_SCAN_ACC_Y 5
+#define ADIS16350_SCAN_ACC_Z 6
+#define ADIS16350_SCAN_TEMP_X 7
+#define ADIS16350_SCAN_TEMP_Y 8
+#define ADIS16350_SCAN_TEMP_Z 9
+#define ADIS16350_SCAN_ADC_0 10
void adis16350_remove_trigger(struct iio_dev *indio_dev);
int adis16350_probe_trigger(struct iio_dev *indio_dev);
@@ -150,8 +146,6 @@ ssize_t adis16350_read_data_from_ring(struct device *dev,
int adis16350_configure_ring(struct iio_dev *indio_dev);
void adis16350_unconfigure_ring(struct iio_dev *indio_dev);
-int adis16350_initialize_ring(struct iio_ring_buffer *ring);
-void adis16350_uninitialize_ring(struct iio_ring_buffer *ring);
#else /* CONFIG_IIO_RING_BUFFER */
static inline void adis16350_remove_trigger(struct iio_dev *indio_dev)
@@ -171,7 +165,7 @@ adis16350_read_data_from_ring(struct device *dev,
return 0;
}
-static int adis16350_configure_ring(struct iio_dev *indio_dev)
+static inline int adis16350_configure_ring(struct iio_dev *indio_dev)
{
return 0;
}
@@ -179,15 +173,5 @@ static int adis16350_configure_ring(struct iio_dev *indio_dev)
static inline void adis16350_unconfigure_ring(struct iio_dev *indio_dev)
{
}
-
-static inline int adis16350_initialize_ring(struct iio_ring_buffer *ring)
-{
- return 0;
-}
-
-static inline void adis16350_uninitialize_ring(struct iio_ring_buffer *ring)
-{
-}
-
#endif /* CONFIG_IIO_RING_BUFFER */
#endif /* SPI_ADIS16350_H_ */
diff --git a/drivers/staging/iio/imu/adis16350_core.c b/drivers/staging/iio/imu/adis16350_core.c
index 0edde73ce5c..1575b7b5d44 100644
--- a/drivers/staging/iio/imu/adis16350_core.c
+++ b/drivers/staging/iio/imu/adis16350_core.c
@@ -14,12 +14,13 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
#include "../iio.h"
#include "../sysfs.h"
+#include "../ring_generic.h"
#include "../accel/accel.h"
#include "../adc/adc.h"
#include "../gyro/gyro.h"
@@ -75,13 +76,13 @@ static int adis16350_spi_write_reg_16(struct device *dev,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 25,
+ .delay_usecs = 35,
}, {
.tx_buf = st->tx + 2,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 25,
+ .delay_usecs = 35,
},
};
@@ -121,13 +122,13 @@ static int adis16350_spi_read_reg_16(struct device *dev,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 25,
+ .delay_usecs = 35,
}, {
.rx_buf = st->rx,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = 25,
+ .delay_usecs = 35,
},
};
@@ -619,7 +620,7 @@ static int __devinit adis16350_probe(struct spi_device *spi)
goto error_unreg_ring_funcs;
regdone = 1;
- ret = adis16350_initialize_ring(st->indio_dev->ring);
+ ret = iio_ring_buffer_register(st->indio_dev->ring, 0);
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
@@ -651,7 +652,7 @@ error_unregister_line:
if (spi->irq)
iio_unregister_interrupt_line(st->indio_dev, 0);
error_uninitialize_ring:
- adis16350_uninitialize_ring(st->indio_dev->ring);
+ iio_ring_buffer_unregister(st->indio_dev->ring);
error_unreg_ring_funcs:
adis16350_unconfigure_ring(st->indio_dev);
error_free_dev:
@@ -685,7 +686,7 @@ static int adis16350_remove(struct spi_device *spi)
if (spi->irq)
iio_unregister_interrupt_line(indio_dev, 0);
- adis16350_uninitialize_ring(indio_dev->ring);
+ iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
adis16350_unconfigure_ring(indio_dev);
kfree(st->tx);
diff --git a/drivers/staging/iio/imu/adis16350_ring.c b/drivers/staging/iio/imu/adis16350_ring.c
index 5e9716ea7c7..e053e9aaa2e 100644
--- a/drivers/staging/iio/imu/adis16350_ring.c
+++ b/drivers/staging/iio/imu/adis16350_ring.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
@@ -16,16 +17,6 @@
#include "../trigger.h"
#include "adis16350.h"
-/**
- * combine_8_to_16() utility function to munge to u8s into u16
- **/
-static inline u16 combine_8_to_16(u8 lower, u8 upper)
-{
- u16 _lower = lower;
- u16 _upper = upper;
- return _lower | (_upper << 8);
-}
-
static IIO_SCAN_EL_C(supply, ADIS16350_SCAN_SUPPLY, IIO_UNSIGNED(12),
ADIS16350_SUPPLY_OUT, NULL);
@@ -80,10 +71,10 @@ static struct attribute_group adis16350_scan_el_group = {
* adis16350_poll_func_th() top half interrupt handler called by trigger
* @private_data: iio_dev
**/
-static void adis16350_poll_func_th(struct iio_dev *indio_dev)
+static void adis16350_poll_func_th(struct iio_dev *indio_dev, s64 time)
{
struct adis16350_state *st = iio_dev_get_devdata(indio_dev);
- st->last_timestamp = indio_dev->trig->timestamp;
+ st->last_timestamp = time;
schedule_work(&st->work_trigger_to_ring);
}
@@ -157,10 +148,9 @@ static void adis16350_trigger_bh_to_ring(struct work_struct *work_s)
if (st->indio_dev->scan_count)
if (adis16350_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0)
- for (; i < st->indio_dev->scan_count; i++) {
- data[i] = combine_8_to_16(st->rx[i*2+1],
- st->rx[i*2]);
- }
+ for (; i < st->indio_dev->scan_count; i++)
+ data[i] = be16_to_cpup(
+ (__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (st->indio_dev->scan_timestamp)
@@ -176,48 +166,6 @@ static void adis16350_trigger_bh_to_ring(struct work_struct *work_s)
return;
}
-static int adis16350_data_rdy_ring_preenable(struct iio_dev *indio_dev)
-{
- size_t size;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
- /* Check if there are any scan elements enabled, if not fail*/
- if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
- return -EINVAL;
-
- if (indio_dev->ring->access.set_bpd) {
- if (indio_dev->scan_timestamp)
- if (indio_dev->scan_count)
- /* Timestamp (aligned sizeof(s64) and data */
- size = (((indio_dev->scan_count * sizeof(s16))
- + sizeof(s64) - 1)
- & ~(sizeof(s64) - 1))
- + sizeof(s64);
- else /* Timestamp only */
- size = sizeof(s64);
- else /* Data only */
- size = indio_dev->scan_count*sizeof(s16);
- indio_dev->ring->access.set_bpd(indio_dev->ring, size);
- }
-
- return 0;
-}
-
-static int adis16350_data_rdy_ring_postenable(struct iio_dev *indio_dev)
-{
- return indio_dev->trig
- ? iio_trigger_attach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
-
-static int adis16350_data_rdy_ring_predisable(struct iio_dev *indio_dev)
-{
- return indio_dev->trig
- ? iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
-
void adis16350_unconfigure_ring(struct iio_dev *indio_dev)
{
kfree(indio_dev->pollfunc);
@@ -255,18 +203,16 @@ int adis16350_configure_ring(struct iio_dev *indio_dev)
indio_dev->ring = ring;
/* Effectively select the ring buffer implementation */
iio_ring_sw_register_funcs(&ring->access);
- ring->preenable = &adis16350_data_rdy_ring_preenable;
- ring->postenable = &adis16350_data_rdy_ring_postenable;
- ring->predisable = &adis16350_data_rdy_ring_predisable;
+ ring->bpe = 2;
+ ring->preenable = &iio_sw_ring_preenable;
+ ring->postenable = &iio_triggered_ring_postenable;
+ ring->predisable = &iio_triggered_ring_predisable;
ring->owner = THIS_MODULE;
- indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
- if (indio_dev->pollfunc == NULL) {
- ret = -ENOMEM;
- goto error_iio_sw_rb_free;;
- }
- indio_dev->pollfunc->poll_func_main = &adis16350_poll_func_th;
- indio_dev->pollfunc->private_data = indio_dev;
+ ret = iio_alloc_pollfunc(indio_dev, NULL, &adis16350_poll_func_th);
+ if (ret)
+ goto error_iio_sw_rb_free;
+
indio_dev->modes |= INDIO_RING_TRIGGERED;
return 0;
@@ -275,12 +221,3 @@ error_iio_sw_rb_free:
return ret;
}
-int adis16350_initialize_ring(struct iio_ring_buffer *ring)
-{
- return iio_ring_buffer_register(ring, 0);
-}
-
-void adis16350_uninitialize_ring(struct iio_ring_buffer *ring)
-{
- iio_ring_buffer_unregister(ring);
-}
diff --git a/drivers/staging/iio/imu/adis16350_trigger.c b/drivers/staging/iio/imu/adis16350_trigger.c
index 1ffa75d05fa..76edccc85b7 100644
--- a/drivers/staging/iio/imu/adis16350_trigger.c
+++ b/drivers/staging/iio/imu/adis16350_trigger.c
@@ -23,8 +23,7 @@ static int adis16350_data_rdy_trig_poll(struct iio_dev *dev_info,
struct adis16350_state *st = iio_dev_get_devdata(dev_info);
struct iio_trigger *trig = st->trig;
- trig->timestamp = timestamp;
- iio_trigger_poll(trig);
+ iio_trigger_poll(trig, timestamp);
return IRQ_HANDLED;
}
@@ -86,14 +85,13 @@ int adis16350_probe_trigger(struct iio_dev *indio_dev)
struct adis16350_state *st = indio_dev->dev_data;
st->trig = iio_allocate_trigger();
- st->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ st->trig->name = kasprintf(GFP_KERNEL,
+ "adis16350-dev%d",
+ indio_dev->id);
if (!st->trig->name) {
ret = -ENOMEM;
goto error_free_trig;
}
- snprintf((char *)st->trig->name,
- IIO_TRIGGER_NAME_LENGTH,
- "adis16350-dev%d", indio_dev->id);
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
st->trig->private_data = st;
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h
index 5a69a7ab91c..6ff33e1ad8c 100644
--- a/drivers/staging/iio/imu/adis16400.h
+++ b/drivers/staging/iio/imu/adis16400.h
@@ -126,7 +126,6 @@
* struct adis16400_state - device instance specific data
* @us: actual spi_device
* @work_trigger_to_ring: bh for triggered event handling
- * @work_cont_thresh: CLEAN
* @inter: used to check if new interrupt has been triggered
* @last_timestamp: passing timestamp from th to bh of interrupt handler
* @indio_dev: industrial I/O device structure
@@ -138,7 +137,6 @@
struct adis16400_state {
struct spi_device *us;
struct work_struct work_trigger_to_ring;
- struct iio_work_cont work_cont_thresh;
s64 last_timestamp;
struct iio_dev *indio_dev;
struct iio_trigger *trig;
@@ -147,33 +145,25 @@ struct adis16400_state {
struct mutex buf_lock;
};
-int adis16400_spi_read_burst(struct device *dev, u8 *rx);
-
int adis16400_set_irq(struct device *dev, bool enable);
-int adis16400_reset(struct device *dev);
-
-int adis16400_check_status(struct device *dev);
-
#ifdef CONFIG_IIO_RING_BUFFER
/* At the moment triggers are only used for ring buffer
* filling. This may change!
*/
-enum adis16400_scan {
- ADIS16400_SCAN_SUPPLY,
- ADIS16400_SCAN_GYRO_X,
- ADIS16400_SCAN_GYRO_Y,
- ADIS16400_SCAN_GYRO_Z,
- ADIS16400_SCAN_ACC_X,
- ADIS16400_SCAN_ACC_Y,
- ADIS16400_SCAN_ACC_Z,
- ADIS16400_SCAN_MAGN_X,
- ADIS16400_SCAN_MAGN_Y,
- ADIS16400_SCAN_MAGN_Z,
- ADIS16400_SCAN_TEMP,
- ADIS16400_SCAN_ADC_0
-};
+#define ADIS16400_SCAN_SUPPLY 0
+#define ADIS16400_SCAN_GYRO_X 1
+#define ADIS16400_SCAN_GYRO_Y 2
+#define ADIS16400_SCAN_GYRO_Z 3
+#define ADIS16400_SCAN_ACC_X 4
+#define ADIS16400_SCAN_ACC_Y 5
+#define ADIS16400_SCAN_ACC_Z 6
+#define ADIS16400_SCAN_MAGN_X 7
+#define ADIS16400_SCAN_MAGN_Y 8
+#define ADIS16400_SCAN_MAGN_Z 9
+#define ADIS16400_SCAN_TEMP 10
+#define ADIS16400_SCAN_ADC_0 11
void adis16400_remove_trigger(struct iio_dev *indio_dev);
int adis16400_probe_trigger(struct iio_dev *indio_dev);
@@ -186,8 +176,6 @@ ssize_t adis16400_read_data_from_ring(struct device *dev,
int adis16400_configure_ring(struct iio_dev *indio_dev);
void adis16400_unconfigure_ring(struct iio_dev *indio_dev);
-int adis16400_initialize_ring(struct iio_ring_buffer *ring);
-void adis16400_uninitialize_ring(struct iio_ring_buffer *ring);
#else /* CONFIG_IIO_RING_BUFFER */
static inline void adis16400_remove_trigger(struct iio_dev *indio_dev)
@@ -216,14 +204,5 @@ static inline void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
{
}
-static inline int adis16400_initialize_ring(struct iio_ring_buffer *ring)
-{
- return 0;
-}
-
-static inline void adis16400_uninitialize_ring(struct iio_ring_buffer *ring)
-{
-}
-
#endif /* CONFIG_IIO_RING_BUFFER */
#endif /* SPI_ADIS16400_H_ */
diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c
index e69e2ce47da..6013fee218e 100644
--- a/drivers/staging/iio/imu/adis16400_core.c
+++ b/drivers/staging/iio/imu/adis16400_core.c
@@ -21,12 +21,13 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
#include "../iio.h"
#include "../sysfs.h"
+#include "../ring_generic.h"
#include "../accel/accel.h"
#include "../adc/adc.h"
#include "../gyro/gyro.h"
@@ -36,6 +37,8 @@
#define DRIVER_NAME "adis16400"
+static int adis16400_check_status(struct device *dev);
+
/* At the moment the spi framework doesn't allow global setting of cs_change.
* It's in the likely to be added comment at the top of spi.h.
* This means that use cannot be made of spi_write etc.
@@ -161,54 +164,6 @@ error_ret:
return ret;
}
-/**
- * adis16400_spi_read_burst() - read all data registers
- * @dev: device associated with child of actual device (iio_dev or iio_trig)
- * @rx: somewhere to pass back the value read (min size is 24 bytes)
- **/
-int adis16400_spi_read_burst(struct device *dev, u8 *rx)
-{
- struct spi_message msg;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16400_state *st = iio_dev_get_devdata(indio_dev);
- u32 old_speed_hz = st->us->max_speed_hz;
- int ret;
-
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 0,
- }, {
- .rx_buf = rx,
- .bits_per_word = 8,
- .len = 24,
- .cs_change = 1,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADIS16400_READ_REG(ADIS16400_GLOB_CMD);
- st->tx[1] = 0;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
-
- st->us->max_speed_hz = min(ADIS16400_SPI_BURST, old_speed_hz);
- spi_setup(st->us);
-
- ret = spi_sync(st->us, &msg);
- if (ret)
- dev_err(&st->us->dev, "problem when burst reading");
-
- st->us->max_speed_hz = old_speed_hz;
- spi_setup(st->us);
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
static ssize_t adis16400_spi_read_signed(struct device *dev,
struct device_attribute *attr,
char *buf,
@@ -277,7 +232,6 @@ static ssize_t adis16400_read_12bit_signed(struct device *dev,
return ret;
}
-
static ssize_t adis16400_write_16bit(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -349,6 +303,18 @@ static ssize_t adis16400_write_frequency(struct device *dev,
return ret ? ret : len;
}
+static int adis16400_reset(struct device *dev)
+{
+ int ret;
+ ret = adis16400_spi_write_reg_8(dev,
+ ADIS16400_GLOB_CMD,
+ ADIS16400_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(dev, "problem resetting device");
+
+ return ret;
+}
+
static ssize_t adis16400_write_reset(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
@@ -364,8 +330,6 @@ static ssize_t adis16400_write_reset(struct device *dev,
return -1;
}
-
-
int adis16400_set_irq(struct device *dev, bool enable)
{
int ret;
@@ -388,18 +352,6 @@ error_ret:
return ret;
}
-int adis16400_reset(struct device *dev)
-{
- int ret;
- ret = adis16400_spi_write_reg_8(dev,
- ADIS16400_GLOB_CMD,
- ADIS16400_GLOB_CMD_SW_RESET);
- if (ret)
- dev_err(dev, "problem resetting device");
-
- return ret;
-}
-
/* Power down the device */
static int adis16400_stop_device(struct device *dev)
{
@@ -430,7 +382,7 @@ err_ret:
return ret;
}
-int adis16400_check_status(struct device *dev)
+static int adis16400_check_status(struct device *dev)
{
u16 status;
int ret;
@@ -496,6 +448,11 @@ static int adis16400_initial_setup(struct adis16400_state *st)
}
/* Do self test */
+ ret = adis16400_self_test(dev);
+ if (ret) {
+ dev_err(dev, "self test failure");
+ goto err_ret;
+ }
/* Read status register to check the result */
ret = adis16400_check_status(dev);
@@ -685,21 +642,13 @@ static int __devinit adis16400_probe(struct spi_device *spi)
goto error_unreg_ring_funcs;
regdone = 1;
- ret = adis16400_initialize_ring(st->indio_dev->ring);
+ ret = iio_ring_buffer_register(st->indio_dev->ring, 0);
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
}
if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
-#if 0 /* fixme: here we should support */
- iio_init_work_cont(&st->work_cont_thresh,
- NULL,
- adis16400_thresh_handler_bh_no_check,
- 0,
- 0,
- st);
-#endif
ret = iio_register_interrupt_line(spi->irq,
st->indio_dev,
0,
@@ -726,7 +675,7 @@ error_unregister_line:
if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
iio_unregister_interrupt_line(st->indio_dev, 0);
error_uninitialize_ring:
- adis16400_uninitialize_ring(st->indio_dev->ring);
+ iio_ring_buffer_unregister(st->indio_dev->ring);
error_unreg_ring_funcs:
adis16400_unconfigure_ring(st->indio_dev);
error_free_dev:
@@ -761,7 +710,7 @@ static int adis16400_remove(struct spi_device *spi)
if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
iio_unregister_interrupt_line(indio_dev, 0);
- adis16400_uninitialize_ring(indio_dev->ring);
+ iio_ring_buffer_unregister(st->indio_dev->ring);
adis16400_unconfigure_ring(indio_dev);
iio_device_unregister(indio_dev);
kfree(st->tx);
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index 5529b32bd2e..949db76283d 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
@@ -16,16 +17,6 @@
#include "../trigger.h"
#include "adis16400.h"
-/**
- * combine_8_to_16() utility function to munge to u8s into u16
- **/
-static inline u16 combine_8_to_16(u8 lower, u8 upper)
-{
- u16 _lower = lower;
- u16 _upper = upper;
- return _lower | (_upper << 8);
-}
-
static IIO_SCAN_EL_C(supply, ADIS16400_SCAN_SUPPLY, IIO_SIGNED(14),
ADIS16400_SUPPLY_OUT, NULL);
@@ -83,10 +74,10 @@ static struct attribute_group adis16400_scan_el_group = {
* adis16400_poll_func_th() top half interrupt handler called by trigger
* @private_data: iio_dev
**/
-static void adis16400_poll_func_th(struct iio_dev *indio_dev)
+static void adis16400_poll_func_th(struct iio_dev *indio_dev, s64 time)
{
struct adis16400_state *st = iio_dev_get_devdata(indio_dev);
- st->last_timestamp = indio_dev->trig->timestamp;
+ st->last_timestamp = time;
schedule_work(&st->work_trigger_to_ring);
/* Indicate that this interrupt is being handled */
@@ -96,6 +87,54 @@ static void adis16400_poll_func_th(struct iio_dev *indio_dev)
*/
}
+/**
+ * adis16400_spi_read_burst() - read all data registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read (min size is 24 bytes)
+ **/
+static int adis16400_spi_read_burst(struct device *dev, u8 *rx)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16400_state *st = iio_dev_get_devdata(indio_dev);
+ u32 old_speed_hz = st->us->max_speed_hz;
+ int ret;
+
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 0,
+ }, {
+ .rx_buf = rx,
+ .bits_per_word = 8,
+ .len = 24,
+ .cs_change = 1,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16400_READ_REG(ADIS16400_GLOB_CMD);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+
+ st->us->max_speed_hz = min(ADIS16400_SPI_BURST, old_speed_hz);
+ spi_setup(st->us);
+
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when burst reading");
+
+ st->us->max_speed_hz = old_speed_hz;
+ spi_setup(st->us);
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
* specific to be rolled into the core.
*/
@@ -118,10 +157,9 @@ static void adis16400_trigger_bh_to_ring(struct work_struct *work_s)
if (st->indio_dev->scan_count)
if (adis16400_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0)
- for (; i < st->indio_dev->scan_count; i++) {
- data[i] = combine_8_to_16(st->rx[i*2+1],
- st->rx[i*2]);
- }
+ for (; i < st->indio_dev->scan_count; i++)
+ data[i] = be16_to_cpup(
+ (__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (st->indio_dev->scan_timestamp)
@@ -136,45 +174,6 @@ static void adis16400_trigger_bh_to_ring(struct work_struct *work_s)
return;
}
-/* in these circumstances is it better to go with unaligned packing and
- * deal with the cost?*/
-static int adis16400_data_rdy_ring_preenable(struct iio_dev *indio_dev)
-{
- size_t size;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
- /* Check if there are any scan elements enabled, if not fail*/
- if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
- return -EINVAL;
-
- if (indio_dev->ring->access.set_bpd) {
- if (indio_dev->scan_timestamp)
- if (indio_dev->scan_count) /* Timestamp and data */
- size = 6*sizeof(s64);
- else /* Timestamp only */
- size = sizeof(s64);
- else /* Data only */
- size = indio_dev->scan_count*sizeof(s16);
- indio_dev->ring->access.set_bpd(indio_dev->ring, size);
- }
-
- return 0;
-}
-
-static int adis16400_data_rdy_ring_postenable(struct iio_dev *indio_dev)
-{
- return indio_dev->trig
- ? iio_trigger_attach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
-
-static int adis16400_data_rdy_ring_predisable(struct iio_dev *indio_dev)
-{
- return indio_dev->trig
- ? iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
-}
void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
{
@@ -214,18 +213,16 @@ int adis16400_configure_ring(struct iio_dev *indio_dev)
indio_dev->ring = ring;
/* Effectively select the ring buffer implementation */
iio_ring_sw_register_funcs(&ring->access);
- ring->preenable = &adis16400_data_rdy_ring_preenable;
- ring->postenable = &adis16400_data_rdy_ring_postenable;
- ring->predisable = &adis16400_data_rdy_ring_predisable;
+ ring->bpe = 2;
+ ring->preenable = &iio_sw_ring_preenable;
+ ring->postenable = &iio_triggered_ring_postenable;
+ ring->predisable = &iio_triggered_ring_predisable;
ring->owner = THIS_MODULE;
- indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
- if (indio_dev->pollfunc == NULL) {
- ret = -ENOMEM;
- goto error_iio_sw_rb_free;;
- }
- indio_dev->pollfunc->poll_func_main = &adis16400_poll_func_th;
- indio_dev->pollfunc->private_data = indio_dev;
+ ret = iio_alloc_pollfunc(indio_dev, NULL, &adis16400_poll_func_th);
+ if (ret)
+ goto error_iio_sw_rb_free;
+
indio_dev->modes |= INDIO_RING_TRIGGERED;
return 0;
@@ -233,13 +230,3 @@ error_iio_sw_rb_free:
iio_sw_rb_free(indio_dev->ring);
return ret;
}
-
-int adis16400_initialize_ring(struct iio_ring_buffer *ring)
-{
- return iio_ring_buffer_register(ring, 0);
-}
-
-void adis16400_uninitialize_ring(struct iio_ring_buffer *ring)
-{
- iio_ring_buffer_unregister(ring);
-}
diff --git a/drivers/staging/iio/imu/adis16400_trigger.c b/drivers/staging/iio/imu/adis16400_trigger.c
index 3b3250ac768..aafe6010f1b 100644
--- a/drivers/staging/iio/imu/adis16400_trigger.c
+++ b/drivers/staging/iio/imu/adis16400_trigger.c
@@ -23,8 +23,7 @@ static int adis16400_data_rdy_trig_poll(struct iio_dev *dev_info,
struct adis16400_state *st = iio_dev_get_devdata(dev_info);
struct iio_trigger *trig = st->trig;
- trig->timestamp = timestamp;
- iio_trigger_poll(trig);
+ iio_trigger_poll(trig, timestamp);
return IRQ_HANDLED;
}
@@ -86,14 +85,13 @@ int adis16400_probe_trigger(struct iio_dev *indio_dev)
struct adis16400_state *st = indio_dev->dev_data;
st->trig = iio_allocate_trigger();
- st->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ st->trig->name = kasprintf(GFP_KERNEL,
+ "adis16400-dev%d",
+ indio_dev->id);
if (!st->trig->name) {
ret = -ENOMEM;
goto error_free_trig;
}
- snprintf((char *)st->trig->name,
- IIO_TRIGGER_NAME_LENGTH,
- "adis16400-dev%d", indio_dev->id);
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
st->trig->private_data = st;
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 01030684ef2..dd4d87a8bca 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -30,9 +30,6 @@
/* IDR to assign each registered device a unique id*/
static DEFINE_IDR(iio_idr);
-
-/* IDR for general event identifiers */
-static DEFINE_IDR(iio_event_idr);
/* IDR to allocate character device minor numbers */
static DEFINE_IDR(iio_chrdev_idr);
/* Lock used to protect both of the above */
@@ -654,16 +651,11 @@ static int iio_device_register_eventset(struct iio_dev *dev_info)
for (i = 0; i < dev_info->num_interrupt_lines; i++) {
dev_info->event_interfaces[i].owner = dev_info->driver_module;
- ret = iio_get_new_idr_val(&iio_event_idr);
- if (ret < 0)
- goto error_free_setup_ev_ints;
- else
- dev_info->event_interfaces[i].id = ret;
snprintf(dev_info->event_interfaces[i]._name, 20,
"%s:event%d",
dev_name(&dev_info->dev),
- dev_info->event_interfaces[i].id);
+ i);
ret = iio_setup_ev_int(&dev_info->event_interfaces[i],
(const char *)(dev_info
@@ -674,8 +666,6 @@ static int iio_device_register_eventset(struct iio_dev *dev_info)
if (ret) {
dev_err(&dev_info->dev,
"Could not get chrdev interface\n");
- iio_free_idr_val(&iio_event_idr,
- dev_info->event_interfaces[i].id);
goto error_free_setup_ev_ints;
}
@@ -711,11 +701,8 @@ error_remove_sysfs_interfaces:
->event_interfaces[j].dev.kobj,
&dev_info->event_attrs[j]);
error_free_setup_ev_ints:
- for (j = 0; j < i; j++) {
- iio_free_idr_val(&iio_event_idr,
- dev_info->event_interfaces[j].id);
+ for (j = 0; j < i; j++)
iio_free_ev_int(&dev_info->event_interfaces[j]);
- }
kfree(dev_info->interrupts);
error_free_event_interfaces:
kfree(dev_info->event_interfaces);
@@ -735,11 +722,8 @@ static void iio_device_unregister_eventset(struct iio_dev *dev_info)
->event_interfaces[i].dev.kobj,
&dev_info->event_attrs[i]);
- for (i = 0; i < dev_info->num_interrupt_lines; i++) {
- iio_free_idr_val(&iio_event_idr,
- dev_info->event_interfaces[i].id);
+ for (i = 0; i < dev_info->num_interrupt_lines; i++)
iio_free_ev_int(&dev_info->event_interfaces[i]);
- }
kfree(dev_info->interrupts);
kfree(dev_info->event_interfaces);
}
diff --git a/drivers/staging/iio/industrialio-ring.c b/drivers/staging/iio/industrialio-ring.c
index ada159bbb1f..6ab578e4f5f 100644
--- a/drivers/staging/iio/industrialio-ring.c
+++ b/drivers/staging/iio/industrialio-ring.c
@@ -149,12 +149,10 @@ __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer *buf,
{
int ret;
- buf->ev_int.id = id;
-
snprintf(buf->ev_int._name, sizeof(buf->ev_int._name),
"%s:event%d",
dev_name(&buf->dev),
- buf->ev_int.id);
+ id);
ret = iio_setup_ev_int(&(buf->ev_int),
buf->ev_int._name,
owner,
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c
index 5682e61600f..57dd9232cf0 100644
--- a/drivers/staging/iio/industrialio-trigger.c
+++ b/drivers/staging/iio/industrialio-trigger.c
@@ -31,7 +31,6 @@
* Any other suggestions?
*/
-
static DEFINE_IDR(iio_trigger_idr);
static DEFINE_SPINLOCK(iio_trigger_idr_lock);
@@ -173,7 +172,7 @@ struct iio_trigger *iio_trigger_find_by_name(const char *name, size_t len)
}
EXPORT_SYMBOL(iio_trigger_find_by_name);
-void iio_trigger_poll(struct iio_trigger *trig)
+void iio_trigger_poll(struct iio_trigger *trig, s64 time)
{
struct iio_poll_func *pf_cursor;
@@ -185,7 +184,8 @@ void iio_trigger_poll(struct iio_trigger *trig)
}
list_for_each_entry(pf_cursor, &trig->pollfunc_list, list) {
if (pf_cursor->poll_func_main) {
- pf_cursor->poll_func_main(pf_cursor->private_data);
+ pf_cursor->poll_func_main(pf_cursor->private_data,
+ time);
trig->use_count++;
}
}
@@ -198,8 +198,7 @@ void iio_trigger_notify_done(struct iio_trigger *trig)
if (trig->use_count == 0 && trig->try_reenable)
if (trig->try_reenable(trig)) {
/* Missed and interrupt so launch new poll now */
- trig->timestamp = 0;
- iio_trigger_poll(trig);
+ iio_trigger_poll(trig, 0);
}
}
EXPORT_SYMBOL(iio_trigger_notify_done);
@@ -284,7 +283,7 @@ error_ret:
EXPORT_SYMBOL(iio_trigger_dettach_poll_func);
/**
- * iio_trigger_read_currrent() trigger consumer sysfs query which trigger
+ * iio_trigger_read_currrent() - trigger consumer sysfs query which trigger
*
* For trigger consumers the current_trigger interface allows the trigger
* used by the device to be queried.
@@ -296,10 +295,9 @@ static ssize_t iio_trigger_read_current(struct device *dev,
struct iio_dev *dev_info = dev_get_drvdata(dev);
int len = 0;
if (dev_info->trig)
- len = snprintf(buf,
- IIO_TRIGGER_NAME_LENGTH,
- "%s\n",
- dev_info->trig->name);
+ len = sprintf(buf,
+ "%s\n",
+ dev_info->trig->name);
return len;
}
@@ -324,8 +322,6 @@ static ssize_t iio_trigger_write_current(struct device *dev,
}
mutex_unlock(&dev_info->mlock);
- len = len < IIO_TRIGGER_NAME_LENGTH ? len : IIO_TRIGGER_NAME_LENGTH;
-
dev_info->trig = iio_trigger_find_by_name(buf, len);
if (oldtrig && dev_info->trig != oldtrig)
iio_put_trigger(oldtrig);
@@ -402,3 +398,34 @@ int iio_device_unregister_trigger_consumer(struct iio_dev *dev_info)
}
EXPORT_SYMBOL(iio_device_unregister_trigger_consumer);
+int iio_alloc_pollfunc(struct iio_dev *indio_dev,
+ void (*immediate)(struct iio_dev *indio_dev),
+ void (*main)(struct iio_dev *private_data, s64 time))
+{
+ indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
+ if (indio_dev->pollfunc == NULL)
+ return -ENOMEM;
+ indio_dev->pollfunc->poll_func_immediate = immediate;
+ indio_dev->pollfunc->poll_func_main = main;
+ indio_dev->pollfunc->private_data = indio_dev;
+ return 0;
+}
+EXPORT_SYMBOL(iio_alloc_pollfunc);
+
+int iio_triggered_ring_postenable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_attach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+EXPORT_SYMBOL(iio_triggered_ring_postenable);
+
+int iio_triggered_ring_predisable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+EXPORT_SYMBOL(iio_triggered_ring_predisable);
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
index 80cb6e590fb..3ddc478e618 100644
--- a/drivers/staging/iio/light/Kconfig
+++ b/drivers/staging/iio/light/Kconfig
@@ -12,4 +12,3 @@ config SENSORS_TSL2563
This driver can also be built as a module. If so, the module
will be called tsl2563.
-
diff --git a/drivers/staging/iio/light/light.h b/drivers/staging/iio/light/light.h
index f00f827689c..e4e1e2c4139 100644
--- a/drivers/staging/iio/light/light.h
+++ b/drivers/staging/iio/light/light.h
@@ -2,11 +2,6 @@
/* Light to digital sensor attributes */
-#define IIO_DEV_ATTR_LIGHT_INFRARED(_num, _show, _addr) \
- IIO_DEVICE_ATTR(light_infrared##_num, S_IRUGO, _show, NULL, _addr)
+#define IIO_EVENT_CODE_LIGHT_THRESH IIO_EVENT_CODE_LIGHT_BASE
-#define IIO_DEV_ATTR_LIGHT_BROAD(_num, _show, _addr) \
- IIO_DEVICE_ATTR(light_broadspectrum##_num, S_IRUGO, _show, NULL, _addr)
-#define IIO_DEV_ATTR_LIGHT_VISIBLE(_num, _show, _addr) \
- IIO_DEVICE_ATTR(light_visible##_num, S_IRUGO, _show, NULL, _addr)
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/staging/iio/light/tsl2563.c
index e4b0a5ef1c1..98f8b78f5d8 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/staging/iio/light/tsl2563.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/delay.h>
@@ -117,15 +118,17 @@ struct tsl2563_chip {
struct iio_dev *indio_dev;
struct delayed_work poweroff_work;
+ struct work_struct work_thresh;
+ s64 event_timestamp;
/* Remember state for suspend and resume functions */
pm_message_t state;
struct tsl2563_gainlevel_coeff *gainlevel;
- /* Thresholds are in lux */
u16 low_thres;
u16 high_thres;
u8 intr;
+ bool int_enabled;
/* Calibration coefficients */
u32 calib0;
@@ -189,17 +192,29 @@ static int tsl2563_get_power(struct tsl2563_chip *chip)
static int tsl2563_configure(struct tsl2563_chip *chip)
{
- struct i2c_client *client = chip->client;
int ret;
- ret = tsl2563_write(client, TSL2563_REG_TIMING,
+ ret = tsl2563_write(chip->client, TSL2563_REG_TIMING,
chip->gainlevel->gaintime);
if (ret)
- goto out;
-
- ret = tsl2563_write(client, TSL2563_REG_INT, chip->intr);
-
-out:
+ goto error_ret;
+ ret = tsl2563_write(chip->client, TSL2563_REG_HIGHLOW,
+ chip->high_thres & 0xFF);
+ if (ret)
+ goto error_ret;
+ ret = tsl2563_write(chip->client, TSL2563_REG_HIGHHIGH,
+ (chip->high_thres >> 8) & 0xFF);
+ if (ret)
+ goto error_ret;
+ ret = tsl2563_write(chip->client, TSL2563_REG_LOWLOW,
+ chip->low_thres & 0xFF);
+ if (ret)
+ goto error_ret;
+ ret = tsl2563_write(chip->client, TSL2563_REG_LOWHIGH,
+ (chip->low_thres >> 8) & 0xFF);
+/* Interrupt register is automatically written anyway if it is relevant
+ so is not here */
+error_ret:
return ret;
}
@@ -323,21 +338,23 @@ static int tsl2563_get_adc(struct tsl2563_chip *chip)
if (chip->state.event != PM_EVENT_ON)
goto out;
- cancel_delayed_work(&chip->poweroff_work);
-
- if (!tsl2563_get_power(chip)) {
- ret = tsl2563_set_power(chip, 1);
- if (ret)
- goto out;
- ret = tsl2563_configure(chip);
- if (ret)
- goto out;
- tsl2563_wait_adc(chip);
+ if (!chip->int_enabled) {
+ cancel_delayed_work(&chip->poweroff_work);
+
+ if (!tsl2563_get_power(chip)) {
+ ret = tsl2563_set_power(chip, 1);
+ if (ret)
+ goto out;
+ ret = tsl2563_configure(chip);
+ if (ret)
+ goto out;
+ tsl2563_wait_adc(chip);
+ }
}
while (retry) {
ret = tsl2563_read(client,
- TSL2563_REG_DATA0LOW | TSL2563_CLEARINT,
+ TSL2563_REG_DATA0LOW,
buf0, sizeof(buf0));
if (ret != sizeof(buf0))
goto out;
@@ -356,7 +373,8 @@ static int tsl2563_get_adc(struct tsl2563_chip *chip)
chip->data0 = normalize_adc(adc0, chip->gainlevel->gaintime);
chip->data1 = normalize_adc(adc1, chip->gainlevel->gaintime);
- schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
+ if (!chip->int_enabled)
+ schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
ret = 0;
out:
@@ -449,11 +467,12 @@ static unsigned int adc_to_lux(u32 adc0, u32 adc1)
/* Sysfs interface */
/*--------------------------------------------------------------*/
-static ssize_t tsl2563_adc0_show(struct device *dev,
+static ssize_t tsl2563_adc_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tsl2563_chip *chip = indio_dev->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
mutex_lock(&chip->lock);
@@ -462,26 +481,14 @@ static ssize_t tsl2563_adc0_show(struct device *dev,
if (ret)
goto out;
- ret = snprintf(buf, PAGE_SIZE, "%d\n", chip->data0);
-out:
- mutex_unlock(&chip->lock);
- return ret;
-}
-
-static ssize_t tsl2563_adc1_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2563_chip *chip = indio_dev->dev_data;
- int ret;
-
- mutex_lock(&chip->lock);
-
- ret = tsl2563_get_adc(chip);
- if (ret)
- goto out;
-
- ret = snprintf(buf, PAGE_SIZE, "%d\n", chip->data1);
+ switch (this_attr->address) {
+ case 0:
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", chip->data0);
+ break;
+ case 1:
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", chip->data1);
+ break;
+ }
out:
mutex_unlock(&chip->lock);
return ret;
@@ -527,37 +534,36 @@ static ssize_t format_calib(char *buf, int len, u32 calib)
return snprintf(buf, PAGE_SIZE, "%d\n", calib_to_sysfs(calib));
}
-static ssize_t tsl2563_calib0_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2563_chip *chip = indio_dev->dev_data;
- int ret;
-
- mutex_lock(&chip->lock);
- ret = format_calib(buf, PAGE_SIZE, chip->calib0);
- mutex_unlock(&chip->lock);
- return ret;
-}
-
-static ssize_t tsl2563_calib1_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t tsl2563_calib_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tsl2563_chip *chip = indio_dev->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
mutex_lock(&chip->lock);
- ret = format_calib(buf, PAGE_SIZE, chip->calib1);
+ switch (this_attr->address) {
+ case 0:
+ ret = format_calib(buf, PAGE_SIZE, chip->calib0);
+ break;
+ case 1:
+ ret = format_calib(buf, PAGE_SIZE, chip->calib1);
+ break;
+ default:
+ ret = -ENODEV;
+ }
mutex_unlock(&chip->lock);
return ret;
}
-static int do_calib_store(struct device *dev, const char *buf, size_t len,
- int ch)
+static ssize_t tsl2563_calib_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tsl2563_chip *chip = indio_dev->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int value;
u32 calib;
@@ -566,37 +572,27 @@ static int do_calib_store(struct device *dev, const char *buf, size_t len,
calib = calib_from_sysfs(value);
- if (ch)
- chip->calib1 = calib;
- else
+ switch (this_attr->address) {
+ case 0:
chip->calib0 = calib;
+ break;
+ case 1:
+ chip->calib1 = calib;
+ break;
+ }
return len;
}
-static ssize_t tsl2563_calib0_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- return do_calib_store(dev, buf, len, 0);
-}
-
-static ssize_t tsl2563_calib1_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- return do_calib_store(dev, buf, len, 1);
-}
-
-/* AmitXXXX: Convert to IIO_DEV_ATTR_LIGHT* as in tsl2561
- * once I understand what they mean */
-static DEVICE_ATTR(adc0, S_IRUGO, tsl2563_adc0_show, NULL);
-static DEVICE_ATTR(adc1, S_IRUGO, tsl2563_adc1_show, NULL);
+static IIO_DEVICE_ATTR(intensity_both_raw, S_IRUGO,
+ tsl2563_adc_show, NULL, 0);
+static IIO_DEVICE_ATTR(intensity_ir_raw, S_IRUGO,
+ tsl2563_adc_show, NULL, 1);
static DEVICE_ATTR(illuminance0_input, S_IRUGO, tsl2563_lux_show, NULL);
-static DEVICE_ATTR(calib0, S_IRUGO | S_IWUSR,
- tsl2563_calib0_show, tsl2563_calib0_store);
-static DEVICE_ATTR(calib1, S_IRUGO | S_IWUSR,
- tsl2563_calib1_show, tsl2563_calib1_store);
+static IIO_DEVICE_ATTR(intensity_both_calibgain, S_IRUGO | S_IWUSR,
+ tsl2563_calib_show, tsl2563_calib_store, 0);
+static IIO_DEVICE_ATTR(intensity_ir_calibgain, S_IRUGO | S_IWUSR,
+ tsl2563_calib_show, tsl2563_calib_store, 1);
static ssize_t tsl2563_show_name(struct device *dev,
struct device_attribute *attr,
@@ -610,11 +606,11 @@ static ssize_t tsl2563_show_name(struct device *dev,
static DEVICE_ATTR(name, S_IRUGO, tsl2563_show_name, NULL);
static struct attribute *tsl2563_attributes[] = {
- &dev_attr_adc0.attr,
- &dev_attr_adc1.attr,
+ &iio_dev_attr_intensity_both_raw.dev_attr.attr,
+ &iio_dev_attr_intensity_ir_raw.dev_attr.attr,
&dev_attr_illuminance0_input.attr,
- &dev_attr_calib0.attr,
- &dev_attr_calib1.attr,
+ &iio_dev_attr_intensity_both_calibgain.dev_attr.attr,
+ &iio_dev_attr_intensity_ir_calibgain.dev_attr.attr,
&dev_attr_name.attr,
NULL
};
@@ -623,6 +619,192 @@ static const struct attribute_group tsl2563_group = {
.attrs = tsl2563_attributes,
};
+static ssize_t tsl2563_read_thresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2563_chip *chip = indio_dev->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ u16 val = 0;
+ switch (this_attr->address) {
+ case TSL2563_REG_HIGHLOW:
+ val = chip->high_thres;
+ break;
+ case TSL2563_REG_LOWLOW:
+ val = chip->low_thres;
+ break;
+ }
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t tsl2563_write_thresh(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2563_chip *chip = indio_dev->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ mutex_lock(&chip->lock);
+ ret = tsl2563_write(chip->client, this_attr->address, val & 0xFF);
+ if (ret)
+ goto error_ret;
+ ret = tsl2563_write(chip->client, this_attr->address + 1,
+ (val >> 8) & 0xFF);
+ switch (this_attr->address) {
+ case TSL2563_REG_HIGHLOW:
+ chip->high_thres = val;
+ break;
+ case TSL2563_REG_LOWLOW:
+ chip->low_thres = val;
+ break;
+ }
+
+error_ret:
+ mutex_unlock(&chip->lock);
+
+ return ret < 0 ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(intensity_both_thresh_high_value,
+ S_IRUGO | S_IWUSR,
+ tsl2563_read_thresh,
+ tsl2563_write_thresh,
+ TSL2563_REG_HIGHLOW);
+
+static IIO_DEVICE_ATTR(intensity_both_thresh_low_value,
+ S_IRUGO | S_IWUSR,
+ tsl2563_read_thresh,
+ tsl2563_write_thresh,
+ TSL2563_REG_LOWLOW);
+
+static int tsl2563_int_th(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int not_test)
+{
+ struct tsl2563_chip *chip = dev_info->dev_data;
+
+ chip->event_timestamp = timestamp;
+ schedule_work(&chip->work_thresh);
+
+ return 0;
+}
+
+static void tsl2563_int_bh(struct work_struct *work_s)
+{
+ struct tsl2563_chip *chip
+ = container_of(work_s,
+ struct tsl2563_chip, work_thresh);
+ u8 cmd = TSL2563_CMD | TSL2563_CLEARINT;
+
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_LIGHT_BASE,
+ chip->event_timestamp);
+
+ /* reenable_irq */
+ enable_irq(chip->client->irq);
+ /* clear the interrupt and push the event */
+ i2c_master_send(chip->client, &cmd, sizeof(cmd));
+
+}
+
+static ssize_t tsl2563_write_interrupt_config(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2563_chip *chip = indio_dev->dev_data;
+ struct iio_event_attr *this_attr = to_iio_event_attr(attr);
+ int input, ret = 0;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+ mutex_lock(&chip->lock);
+ if (input && !(chip->intr & 0x30)) {
+ iio_add_event_to_list(this_attr->listel,
+ &indio_dev->interrupts[0]->ev_list);
+ chip->intr &= ~0x30;
+ chip->intr |= 0x10;
+ /* ensure the chip is actually on */
+ cancel_delayed_work(&chip->poweroff_work);
+ if (!tsl2563_get_power(chip)) {
+ ret = tsl2563_set_power(chip, 1);
+ if (ret)
+ goto out;
+ ret = tsl2563_configure(chip);
+ if (ret)
+ goto out;
+ }
+ ret = tsl2563_write(chip->client, TSL2563_REG_INT, chip->intr);
+ chip->int_enabled = true;
+ }
+
+ if (!input && (chip->intr & 0x30)) {
+ chip->intr |= ~0x30;
+ ret = tsl2563_write(chip->client, TSL2563_REG_INT, chip->intr);
+ iio_remove_event_from_list(this_attr->listel,
+ &indio_dev->interrupts[0]->ev_list);
+ chip->int_enabled = false;
+ /* now the interrupt is not enabled, we can go to sleep */
+ schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
+ }
+out:
+ mutex_unlock(&chip->lock);
+
+ return (ret < 0) ? ret : len;
+}
+
+static ssize_t tsl2563_read_interrupt_config(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2563_chip *chip = indio_dev->dev_data;
+ int ret;
+ u8 rxbuf;
+ ssize_t len;
+
+ mutex_lock(&chip->lock);
+ ret = tsl2563_read(chip->client,
+ TSL2563_REG_INT,
+ &rxbuf,
+ sizeof(rxbuf));
+ mutex_unlock(&chip->lock);
+ if (ret < 0)
+ goto error_ret;
+ len = snprintf(buf, PAGE_SIZE, "%d\n", !!(rxbuf & 0x30));
+error_ret:
+
+ return (ret < 0) ? ret : len;
+}
+
+IIO_EVENT_ATTR(intensity_both_thresh_both_en,
+ tsl2563_read_interrupt_config,
+ tsl2563_write_interrupt_config,
+ 0,
+ tsl2563_int_th);
+
+static struct attribute *tsl2563_event_attributes[] = {
+ &iio_event_attr_intensity_both_thresh_both_en.dev_attr.attr,
+ &iio_dev_attr_intensity_both_thresh_high_value.dev_attr.attr,
+ &iio_dev_attr_intensity_both_thresh_low_value.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group tsl2563_event_attribute_group = {
+ .attrs = tsl2563_event_attributes,
+};
+
/*--------------------------------------------------------------*/
/* Probe, Attach, Remove */
/*--------------------------------------------------------------*/
@@ -641,6 +823,7 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
if (!chip)
return -ENOMEM;
+ INIT_WORK(&chip->work_thresh, tsl2563_int_bh);
i2c_set_clientdata(client, chip);
chip->client = client;
@@ -679,18 +862,36 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
chip->indio_dev->dev_data = (void *)(chip);
chip->indio_dev->driver_module = THIS_MODULE;
chip->indio_dev->modes = INDIO_DIRECT_MODE;
+ if (client->irq) {
+ chip->indio_dev->num_interrupt_lines = 1;
+ chip->indio_dev->event_attrs
+ = &tsl2563_event_attribute_group;
+ }
ret = iio_device_register(chip->indio_dev);
if (ret)
goto fail1;
+ if (client->irq) {
+ ret = iio_register_interrupt_line(client->irq,
+ chip->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ client->name);
+ if (ret)
+ goto fail2;
+ }
err = tsl2563_configure(chip);
if (err)
- goto fail2;
+ goto fail3;
INIT_DELAYED_WORK(&chip->poweroff_work, tsl2563_poweroff_work);
+ /* The interrupt cannot yet be enabled so this is fine without lock */
schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
return 0;
+fail3:
+ if (client->irq)
+ iio_unregister_interrupt_line(chip->indio_dev, 0);
fail2:
iio_device_unregister(chip->indio_dev);
fail1:
@@ -701,7 +902,15 @@ fail1:
static int tsl2563_remove(struct i2c_client *client)
{
struct tsl2563_chip *chip = i2c_get_clientdata(client);
-
+ if (!chip->int_enabled)
+ cancel_delayed_work(&chip->poweroff_work);
+ /* Ensure that interrupts are disabled - then flush any bottom halves */
+ chip->intr |= ~0x30;
+ tsl2563_write(chip->client, TSL2563_REG_INT, chip->intr);
+ flush_scheduled_work();
+ tsl2563_set_power(chip, 0);
+ if (client->irq)
+ iio_unregister_interrupt_line(chip->indio_dev, 0);
iio_device_unregister(chip->indio_dev);
kfree(chip);
diff --git a/drivers/staging/iio/magnetometer/Kconfig b/drivers/staging/iio/magnetometer/Kconfig
new file mode 100644
index 00000000000..d01445060f5
--- /dev/null
+++ b/drivers/staging/iio/magnetometer/Kconfig
@@ -0,0 +1,15 @@
+#
+# Magnetometer sensors
+#
+comment "Magnetometer sensors"
+
+config SENSORS_HMC5843
+ tristate "Honeywell HMC5843 3-Axis Magnetometer"
+ depends on I2C
+ help
+ Say Y here to add support for the Honeywell HMC 5843 3-Axis
+ Magnetometer (digital compass).
+
+ To compile this driver as a module, choose M here: the module
+ will be called hmc5843
+
diff --git a/drivers/staging/iio/magnetometer/Makefile b/drivers/staging/iio/magnetometer/Makefile
new file mode 100644
index 00000000000..f9bfb2e11d7
--- /dev/null
+++ b/drivers/staging/iio/magnetometer/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for industrial I/O Magnetometer sensors
+#
+
+obj-$(CONFIG_SENSORS_HMC5843) += hmc5843.o
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
new file mode 100644
index 00000000000..92f6c6fb90f
--- /dev/null
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -0,0 +1,624 @@
+/* Copyright (C) 2010 Texas Instruments
+ Author: Shubhrajyoti Datta <shubhrajyoti@ti.com>
+ Acknowledgement: Jonathan Cameron <jic23@cam.ac.uk> for valuable inputs.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "../iio.h"
+#include "../sysfs.h"
+#include "magnet.h"
+
+#define HMC5843_I2C_ADDRESS 0x1E
+
+#define HMC5843_CONFIG_REG_A 0x00
+#define HMC5843_CONFIG_REG_B 0x01
+#define HMC5843_MODE_REG 0x02
+#define HMC5843_DATA_OUT_X_MSB_REG 0x03
+#define HMC5843_DATA_OUT_X_LSB_REG 0x04
+#define HMC5843_DATA_OUT_Y_MSB_REG 0x05
+#define HMC5843_DATA_OUT_Y_LSB_REG 0x06
+#define HMC5843_DATA_OUT_Z_MSB_REG 0x07
+#define HMC5843_DATA_OUT_Z_LSB_REG 0x08
+#define HMC5843_STATUS_REG 0x09
+#define HMC5843_ID_REG_A 0x0A
+#define HMC5843_ID_REG_B 0x0B
+#define HMC5843_ID_REG_C 0x0C
+
+#define HMC5843_ID_REG_LENGTH 0x03
+#define HMC5843_ID_STRING "H43"
+
+/*
+ * Range settings in (+-)Ga
+ * */
+#define RANGE_GAIN_OFFSET 0x05
+
+#define RANGE_0_7 0x00
+#define RANGE_1_0 0x01 /* default */
+#define RANGE_1_5 0x02
+#define RANGE_2_0 0x03
+#define RANGE_3_2 0x04
+#define RANGE_3_8 0x05
+#define RANGE_4_5 0x06
+#define RANGE_6_5 0x07 /* Not recommended */
+
+/*
+ * Device status
+ */
+#define DATA_READY 0x01
+#define DATA_OUTPUT_LOCK 0x02
+#define VOLTAGE_REGULATOR_ENABLED 0x04
+
+/*
+ * Mode register configuration
+ */
+#define MODE_CONVERSION_CONTINUOUS 0x00
+#define MODE_CONVERSION_SINGLE 0x01
+#define MODE_IDLE 0x02
+#define MODE_SLEEP 0x03
+
+/* Minimum Data Output Rate in 1/10 Hz */
+#define RATE_OFFSET 0x02
+#define RATE_BITMASK 0x1C
+#define RATE_5 0x00
+#define RATE_10 0x01
+#define RATE_20 0x02
+#define RATE_50 0x03
+#define RATE_100 0x04
+#define RATE_200 0x05
+#define RATE_500 0x06
+#define RATE_NOT_USED 0x07
+
+/*
+ * Device Configutration
+ */
+#define CONF_NORMAL 0x00
+#define CONF_POSITIVE_BIAS 0x01
+#define CONF_NEGATIVE_BIAS 0x02
+#define CONF_NOT_USED 0x03
+#define MEAS_CONF_MASK 0x03
+
+static const int regval_to_counts_per_mg[] = {
+ 1620,
+ 1300,
+ 970,
+ 780,
+ 530,
+ 460,
+ 390,
+ 280
+};
+static const int regval_to_input_field_mg[] = {
+ 700,
+ 1000,
+ 1500,
+ 2000,
+ 3200,
+ 3800,
+ 4500,
+ 6500
+};
+static const char *regval_to_samp_freq[] = {
+ "0.5",
+ "1",
+ "2",
+ "5",
+ "10",
+ "20",
+ "50",
+};
+
+/* Addresses to scan: 0x1E */
+static const unsigned short normal_i2c[] = { HMC5843_I2C_ADDRESS,
+ I2C_CLIENT_END };
+
+/* Each client has this additional data */
+struct hmc5843_data {
+ struct iio_dev *indio_dev;
+ struct mutex lock;
+ u8 rate;
+ u8 meas_conf;
+ u8 operating_mode;
+ u8 range;
+};
+
+static void hmc5843_init_client(struct i2c_client *client);
+
+static s32 hmc5843_configure(struct i2c_client *client,
+ u8 operating_mode)
+{
+ /* The lower two bits contain the current conversion mode */
+ return i2c_smbus_write_byte_data(client,
+ HMC5843_MODE_REG,
+ (operating_mode & 0x03));
+}
+
+/* Return the measurement value from the specified channel */
+static ssize_t hmc5843_read_measurement(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
+ s16 coordinate_val;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct hmc5843_data *data = indio_dev->dev_data;
+ s32 result;
+
+ mutex_lock(&data->lock);
+
+ result = i2c_smbus_read_byte_data(client, HMC5843_STATUS_REG);
+ while (!(result & DATA_READY))
+ result = i2c_smbus_read_byte_data(client, HMC5843_STATUS_REG);
+
+ result = i2c_smbus_read_word_data(client, this_attr->address);
+ mutex_unlock(&data->lock);
+ if (result < 0)
+ return -EINVAL;
+
+ coordinate_val = (s16)swab16((u16)result);
+ return sprintf(buf, "%d\n", coordinate_val);
+}
+static IIO_DEV_ATTR_MAGN_X(hmc5843_read_measurement,
+ HMC5843_DATA_OUT_X_MSB_REG);
+static IIO_DEV_ATTR_MAGN_Y(hmc5843_read_measurement,
+ HMC5843_DATA_OUT_Y_MSB_REG);
+static IIO_DEV_ATTR_MAGN_Z(hmc5843_read_measurement,
+ HMC5843_DATA_OUT_Z_MSB_REG);
+
+/*
+ * From the datasheet
+ * 0 - Continuous-Conversion Mode: In continuous-conversion mode, the
+ * device continuously performs conversions an places the result in the
+ * data register.
+ *
+ * 1 - Single-Conversion Mode : device performs a single measurement,
+ * sets RDY high and returned to sleep mode
+ *
+ * 2 - Idle Mode : Device is placed in idle mode.
+ *
+ * 3 - Sleep Mode. Device is placed in sleep mode.
+ *
+ */
+static ssize_t hmc5843_show_operating_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct hmc5843_data *data = indio_dev->dev_data;
+ return sprintf(buf, "%d\n", data->operating_mode);
+}
+
+static ssize_t hmc5843_set_operating_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
+ struct hmc5843_data *data = indio_dev->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ unsigned long operating_mode = 0;
+ s32 status;
+ int error;
+ mutex_lock(&data->lock);
+ error = strict_strtoul(buf, 10, &operating_mode);
+ if (error)
+ return error;
+ dev_dbg(dev, "set Conversion mode to %lu\n", operating_mode);
+ if (operating_mode > MODE_SLEEP)
+ return -EINVAL;
+
+ status = i2c_smbus_write_byte_data(client, this_attr->address,
+ operating_mode);
+ if (status) {
+ count = -EINVAL;
+ goto exit;
+ }
+ data->operating_mode = operating_mode;
+
+exit:
+ mutex_unlock(&data->lock);
+ return count;
+}
+static IIO_DEVICE_ATTR(operating_mode,
+ S_IWUSR | S_IRUGO,
+ hmc5843_show_operating_mode,
+ hmc5843_set_operating_mode,
+ HMC5843_MODE_REG);
+
+/*
+ * API for setting the measurement configuration to
+ * Normal, Positive bias and Negative bias
+ * From the datasheet
+ *
+ * Normal measurement configuration (default): In normal measurement
+ * configuration the device follows normal measurement flow. Pins BP and BN
+ * are left floating and high impedance.
+ *
+ * Positive bias configuration: In positive bias configuration, a positive
+ * current is forced across the resistive load on pins BP and BN.
+ *
+ * Negative bias configuration. In negative bias configuration, a negative
+ * current is forced across the resistive load on pins BP and BN.
+ *
+ */
+static s32 hmc5843_set_meas_conf(struct i2c_client *client,
+ u8 meas_conf)
+{
+ struct hmc5843_data *data = i2c_get_clientdata(client);
+ u8 reg_val;
+ reg_val = (meas_conf & MEAS_CONF_MASK) | (data->rate << RATE_OFFSET);
+ return i2c_smbus_write_byte_data(client, HMC5843_CONFIG_REG_A, reg_val);
+}
+
+static ssize_t hmc5843_show_measurement_configuration(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct hmc5843_data *data = indio_dev->dev_data;
+ return sprintf(buf, "%d\n", data->meas_conf);
+}
+
+static ssize_t hmc5843_set_measurement_configuration(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
+ struct hmc5843_data *data = i2c_get_clientdata(client);
+ unsigned long meas_conf = 0;
+ int error = strict_strtoul(buf, 10, &meas_conf);
+ if (error)
+ return error;
+ mutex_lock(&data->lock);
+
+ dev_dbg(dev, "set mode to %lu\n", meas_conf);
+ if (hmc5843_set_meas_conf(client, meas_conf)) {
+ count = -EINVAL;
+ goto exit;
+ }
+ data->meas_conf = meas_conf;
+
+exit:
+ mutex_unlock(&data->lock);
+ return count;
+}
+static IIO_DEVICE_ATTR(meas_conf,
+ S_IWUSR | S_IRUGO,
+ hmc5843_show_measurement_configuration,
+ hmc5843_set_measurement_configuration,
+ 0);
+
+/*
+ * From Datasheet
+ * The table shows the minimum data output
+ * Value | Minimum data output rate(Hz)
+ * 0 | 0.5
+ * 1 | 1
+ * 2 | 2
+ * 3 | 5
+ * 4 | 10 (default)
+ * 5 | 20
+ * 6 | 50
+ * 7 | Not used
+ */
+static IIO_CONST_ATTR_AVAIL_SAMP_FREQ("0.5 1 2 5 10 20 50");
+
+static s32 hmc5843_set_rate(struct i2c_client *client,
+ u8 rate)
+{
+ struct hmc5843_data *data = i2c_get_clientdata(client);
+ u8 reg_val;
+
+ reg_val = (data->meas_conf) | (rate << RATE_OFFSET);
+ if (rate >= RATE_NOT_USED) {
+ dev_err(&client->dev,
+ "This data output rate is not supported \n");
+ return -EINVAL;
+ }
+ return i2c_smbus_write_byte_data(client, HMC5843_CONFIG_REG_A, reg_val);
+}
+
+static ssize_t set_sampling_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
+ struct hmc5843_data *data = indio_dev->dev_data;
+ unsigned long rate = 0;
+
+ if (strncmp(buf, "0.5" , 3) == 0)
+ rate = RATE_5;
+ else if (strncmp(buf, "1" , 1) == 0)
+ rate = RATE_10;
+ else if (strncmp(buf, "2", 1) == 0)
+ rate = RATE_20;
+ else if (strncmp(buf, "5", 1) == 0)
+ rate = RATE_50;
+ else if (strncmp(buf, "10", 2) == 0)
+ rate = RATE_100;
+ else if (strncmp(buf, "20" , 2) == 0)
+ rate = RATE_200;
+ else if (strncmp(buf, "50" , 2) == 0)
+ rate = RATE_500;
+ else
+ return -EINVAL;
+
+ mutex_lock(&data->lock);
+ dev_dbg(dev, "set rate to %lu\n", rate);
+ if (hmc5843_set_rate(client, rate)) {
+ count = -EINVAL;
+ goto exit;
+ }
+ data->rate = rate;
+
+exit:
+ mutex_unlock(&data->lock);
+ return count;
+}
+
+static ssize_t show_sampling_frequency(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ u32 rate;
+
+ rate = i2c_smbus_read_byte_data(client, this_attr->address);
+ if (rate < 0)
+ return -EINVAL;
+ rate = (rate & RATE_BITMASK) >> RATE_OFFSET;
+ return sprintf(buf, "%s\n", regval_to_samp_freq[rate]);
+}
+static IIO_DEVICE_ATTR(sampling_frequency,
+ S_IWUSR | S_IRUGO,
+ show_sampling_frequency,
+ set_sampling_frequency,
+ HMC5843_CONFIG_REG_A);
+
+/*
+ * From Datasheet
+ * Nominal gain settings
+ * Value | Sensor Input Field Range(Ga) | Gain(counts/ milli-gauss)
+ *0 |(+-)0.7 |1620
+ *1 |(+-)1.0 |1300
+ *2 |(+-)1.5 |970
+ *3 |(+-)2.0 |780
+ *4 |(+-)3.2 |530
+ *5 |(+-)3.8 |460
+ *6 |(+-)4.5 |390
+ *7 |(+-)6.5 |280
+ */
+static ssize_t show_range(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 range;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct hmc5843_data *data = indio_dev->dev_data;
+
+ range = data->range;
+ return sprintf(buf, "%d\n", regval_to_input_field_mg[range]);
+}
+
+static ssize_t set_range(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct hmc5843_data *data = indio_dev->dev_data;
+ unsigned long range = 0;
+ int error;
+ mutex_lock(&data->lock);
+ error = strict_strtoul(buf, 10, &range);
+ if (error)
+ return error;
+ dev_dbg(dev, "set range to %lu\n", range);
+
+ if (range > RANGE_6_5)
+ return -EINVAL;
+
+ data->range = range;
+ range = range << RANGE_GAIN_OFFSET;
+ if (i2c_smbus_write_byte_data(client, this_attr->address, range))
+ count = -EINVAL;
+
+ mutex_unlock(&data->lock);
+ return count;
+
+}
+static IIO_DEVICE_ATTR(magn_range,
+ S_IWUSR | S_IRUGO,
+ show_range,
+ set_range,
+ HMC5843_CONFIG_REG_B);
+
+static ssize_t show_gain(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct hmc5843_data *data = indio_dev->dev_data;
+ return sprintf(buf, "%d\n", regval_to_counts_per_mg[data->range]);
+}
+static IIO_DEVICE_ATTR(magn_gain,
+ S_IRUGO,
+ show_gain,
+ NULL , 0);
+
+static struct attribute *hmc5843_attributes[] = {
+ &iio_dev_attr_meas_conf.dev_attr.attr,
+ &iio_dev_attr_operating_mode.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_magn_range.dev_attr.attr,
+ &iio_dev_attr_magn_gain.dev_attr.attr,
+ &iio_dev_attr_magn_x_raw.dev_attr.attr,
+ &iio_dev_attr_magn_y_raw.dev_attr.attr,
+ &iio_dev_attr_magn_z_raw.dev_attr.attr,
+ &iio_const_attr_available_sampling_frequency.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group hmc5843_group = {
+ .attrs = hmc5843_attributes,
+};
+
+static int hmc5843_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ unsigned char id_str[HMC5843_ID_REG_LENGTH];
+
+ if (client->addr != HMC5843_I2C_ADDRESS)
+ return -ENODEV;
+
+ if (i2c_smbus_read_i2c_block_data(client, HMC5843_ID_REG_A,
+ HMC5843_ID_REG_LENGTH, id_str)
+ != HMC5843_ID_REG_LENGTH)
+ return -ENODEV;
+
+ if (0 != strncmp(id_str, HMC5843_ID_STRING, HMC5843_ID_REG_LENGTH))
+ return -ENODEV;
+
+ return 0;
+}
+
+/* Called when we have found a new HMC5843. */
+static void hmc5843_init_client(struct i2c_client *client)
+{
+ struct hmc5843_data *data = i2c_get_clientdata(client);
+ hmc5843_set_meas_conf(client, data->meas_conf);
+ hmc5843_set_rate(client, data->rate);
+ hmc5843_configure(client, data->operating_mode);
+ i2c_smbus_write_byte_data(client, HMC5843_CONFIG_REG_B, data->range);
+ mutex_init(&data->lock);
+ pr_info("HMC5843 initialized\n");
+}
+
+static int hmc5843_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct hmc5843_data *data;
+ int err = 0;
+
+ data = kzalloc(sizeof(struct hmc5843_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* default settings at probe */
+
+ data->meas_conf = CONF_NORMAL;
+ data->range = RANGE_1_0;
+ data->operating_mode = MODE_CONVERSION_CONTINUOUS;
+
+ i2c_set_clientdata(client, data);
+
+ /* Initialize the HMC5843 chip */
+ hmc5843_init_client(client);
+
+ data->indio_dev = iio_allocate_device();
+ if (!data->indio_dev) {
+ err = -ENOMEM;
+ goto exit_free1;
+ }
+ data->indio_dev->attrs = &hmc5843_group;
+ data->indio_dev->dev.parent = &client->dev;
+ data->indio_dev->dev_data = (void *)(data);
+ data->indio_dev->driver_module = THIS_MODULE;
+ data->indio_dev->modes = INDIO_DIRECT_MODE;
+ err = iio_device_register(data->indio_dev);
+ if (err)
+ goto exit_free2;
+ return 0;
+exit_free2:
+ iio_free_device(data->indio_dev);
+exit_free1:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int hmc5843_remove(struct i2c_client *client)
+{
+ struct hmc5843_data *data = i2c_get_clientdata(client);
+ /* sleep mode to save power */
+ hmc5843_configure(client, MODE_SLEEP);
+ iio_device_unregister(data->indio_dev);
+ kfree(i2c_get_clientdata(client));
+ return 0;
+}
+
+static int hmc5843_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ hmc5843_configure(client, MODE_SLEEP);
+ return 0;
+}
+
+static int hmc5843_resume(struct i2c_client *client)
+{
+ struct hmc5843_data *data = i2c_get_clientdata(client);
+ hmc5843_configure(client, data->operating_mode);
+ return 0;
+}
+
+static const struct i2c_device_id hmc5843_id[] = {
+ { "hmc5843", 0 },
+ { }
+};
+
+static struct i2c_driver hmc5843_driver = {
+ .driver = {
+ .name = "hmc5843",
+ },
+ .id_table = hmc5843_id,
+ .probe = hmc5843_probe,
+ .remove = hmc5843_remove,
+ .detect = hmc5843_detect,
+ .address_list = normal_i2c,
+ .suspend = hmc5843_suspend,
+ .resume = hmc5843_resume,
+};
+
+static int __init hmc5843_init(void)
+{
+ return i2c_add_driver(&hmc5843_driver);
+}
+
+static void __exit hmc5843_exit(void)
+{
+ i2c_del_driver(&hmc5843_driver);
+}
+
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti@ti.com");
+MODULE_DESCRIPTION("HMC5843 driver");
+MODULE_LICENSE("GPL");
+
+module_init(hmc5843_init);
+module_exit(hmc5843_exit);
diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
index 0e443757b02..a872d3904a3 100644
--- a/drivers/staging/iio/ring_generic.h
+++ b/drivers/staging/iio/ring_generic.h
@@ -11,6 +11,8 @@
#define _IIO_RING_GENERIC_H_
#include "iio.h"
+#ifdef CONFIG_IIO_RING_BUFFER
+
struct iio_handler;
struct iio_ring_buffer;
struct iio_dev;
@@ -98,6 +100,7 @@ struct iio_ring_access_funcs {
* @access_id: device id number
* @length: [DEVICE] number of datums in ring
* @bpd: [DEVICE] size of individual datum including timestamp
+ * @bpe: [DEVICE] size of individual channel value
* @loopcount: [INTERN] number of times the ring has looped
* @access_handler: [INTERN] chrdev access handling
* @ev_int: [INTERN] chrdev interface for the event chrdev
@@ -119,6 +122,7 @@ struct iio_ring_buffer {
int access_id;
int length;
int bpd;
+ int bpe;
int loopcount;
struct iio_handler access_handler;
struct iio_event_interface ev_int;
@@ -213,7 +217,7 @@ ssize_t iio_scan_el_ts_show(struct device *dev, struct device_attribute *attr,
* @_label: indentification variable used by drivers. Often a reg address.
* @_controlfunc: function used to notify hardware of whether state changes
**/
-#define IIO_SCAN_EL_C(_name, _number, _bits, _label, _controlfunc) \
+#define __IIO_SCAN_EL_C(_name, _number, _bits, _label, _controlfunc) \
struct iio_scan_el iio_scan_el_##_name = { \
.dev_attr = __ATTR(_number##_##_name##_en, \
S_IRUGO | S_IWUSR, \
@@ -225,7 +229,10 @@ ssize_t iio_scan_el_ts_show(struct device *dev, struct device_attribute *attr,
.set_state = _controlfunc, \
}
-#define IIO_SCAN_NAMED_EL_C(_name, _string, _number, _bits, _label, _cf) \
+#define IIO_SCAN_EL_C(_name, _number, _bits, _label, _controlfunc) \
+ __IIO_SCAN_EL_C(_name, _number, _bits, _label, _controlfunc)
+
+#define __IIO_SCAN_NAMED_EL_C(_name, _string, _number, _bits, _label, _cf) \
struct iio_scan_el iio_scan_el_##_name = { \
.dev_attr = __ATTR(_number##_##_string##_en, \
S_IRUGO | S_IWUSR, \
@@ -236,7 +243,8 @@ ssize_t iio_scan_el_ts_show(struct device *dev, struct device_attribute *attr,
.label = _label, \
.set_state = _cf, \
}
-
+#define IIO_SCAN_NAMED_EL_C(_name, _string, _number, _bits, _label, _cf) \
+ __IIO_SCAN_NAMED_EL_C(_name, _string, _number, _bits, _label, _cf)
/**
* IIO_SCAN_EL_TIMESTAMP - declare a special scan element for timestamps
*
@@ -287,5 +295,14 @@ ssize_t iio_show_ring_enable(struct device *dev,
#define IIO_RING_ENABLE_ATTR DEVICE_ATTR(ring_enable, S_IRUGO | S_IWUSR, \
iio_show_ring_enable, \
iio_store_ring_enable)
+#else /* CONFIG_IIO_RING_BUFFER */
+static inline int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id)
+{
+ return 0;
+};
+static inline void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
+{};
+
+#endif /* CONFIG_IIO_RING_BUFFER */
#endif /* _IIO_RING_GENERIC_H_ */
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
index 294272d0619..e2f01c640ba 100644
--- a/drivers/staging/iio/ring_sw.c
+++ b/drivers/staging/iio/ring_sw.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/workqueue.h>
#include "ring_sw.h"
+#include "trigger.h"
static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
int bytes_per_datum, int length)
@@ -431,5 +432,73 @@ void iio_sw_rb_free(struct iio_ring_buffer *r)
iio_put_ring_buffer(r);
}
EXPORT_SYMBOL(iio_sw_rb_free);
+
+int iio_sw_ring_preenable(struct iio_dev *indio_dev)
+{
+ size_t size;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+ /* Check if there are any scan elements enabled, if not fail*/
+ if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
+ return -EINVAL;
+ if (indio_dev->scan_timestamp)
+ if (indio_dev->scan_count)
+ /* Timestamp (aligned to s64) and data */
+ size = (((indio_dev->scan_count * indio_dev->ring->bpe)
+ + sizeof(s64) - 1)
+ & ~(sizeof(s64) - 1))
+ + sizeof(s64);
+ else /* Timestamp only */
+ size = sizeof(s64);
+ else /* Data only */
+ size = indio_dev->scan_count * indio_dev->ring->bpe;
+ indio_dev->ring->access.set_bpd(indio_dev->ring, size);
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_sw_ring_preenable);
+
+void iio_sw_trigger_bh_to_ring(struct work_struct *work_s)
+{
+ struct iio_sw_ring_helper_state *st
+ = container_of(work_s, struct iio_sw_ring_helper_state,
+ work_trigger_to_ring);
+ int len = 0;
+ size_t datasize = st->indio_dev
+ ->ring->access.get_bpd(st->indio_dev->ring);
+ char *data = kmalloc(datasize, GFP_KERNEL);
+
+ if (data == NULL) {
+ dev_err(st->indio_dev->dev.parent,
+ "memory alloc failed in ring bh");
+ return;
+ }
+
+ if (st->indio_dev->scan_count)
+ len = st->get_ring_element(st, data);
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (st->indio_dev->scan_timestamp)
+ *(s64 *)(((phys_addr_t)data + len
+ + sizeof(s64) - 1) & ~(sizeof(s64) - 1))
+ = st->last_timestamp;
+ st->indio_dev->ring->access.store_to(st->indio_dev->ring,
+ (u8 *)data,
+ st->last_timestamp);
+
+ iio_trigger_notify_done(st->indio_dev->trig);
+ kfree(data);
+
+ return;
+}
+EXPORT_SYMBOL(iio_sw_trigger_bh_to_ring);
+
+void iio_sw_poll_func_th(struct iio_dev *indio_dev, s64 time)
+{ struct iio_sw_ring_helper_state *h
+ = iio_dev_get_devdata(indio_dev);
+ h->last_timestamp = time;
+ schedule_work(&h->work_trigger_to_ring);
+}
+EXPORT_SYMBOL(iio_sw_poll_func_th);
+
MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
index fd677f00836..61f1ed65039 100644
--- a/drivers/staging/iio/ring_sw.h
+++ b/drivers/staging/iio/ring_sw.h
@@ -207,10 +207,21 @@ struct iio_sw_ring_buffer {
struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev);
void iio_sw_rb_free(struct iio_ring_buffer *ring);
+int iio_sw_ring_preenable(struct iio_dev *indio_dev);
+struct iio_sw_ring_helper_state {
+ struct work_struct work_trigger_to_ring;
+ struct iio_dev *indio_dev;
+ int (*get_ring_element)(struct iio_sw_ring_helper_state *st, u8 *buf);
+ s64 last_timestamp;
+};
+
+void iio_sw_poll_func_th(struct iio_dev *indio_dev, s64 time);
+void iio_sw_trigger_bh_to_ring(struct work_struct *work_s);
#else /* CONFIG_IIO_RING_BUFFER*/
-static inline void iio_ring_sw_register_funcs(struct iio_ring_access_funcs *ra)
-{};
+struct iio_sw_ring_helper_state {
+ struct iio_dev *indio_dev;
+};
#endif /* !CONFIG_IIO_RING_BUFFER */
#endif /* _IIO_RING_SW_H_ */
diff --git a/drivers/staging/iio/sysfs.h b/drivers/staging/iio/sysfs.h
index afcf5ab85f4..60834162eb3 100644
--- a/drivers/staging/iio/sysfs.h
+++ b/drivers/staging/iio/sysfs.h
@@ -284,6 +284,14 @@ struct iio_const_attr {
.mask = _mask, \
.listel = &_ev_list };
+#define IIO_EVENT_ATTR_NAMED_SH(_vname, _name, _ev_list, _show, _store, _mask) \
+ static struct iio_event_attr \
+ iio_event_attr_##_vname \
+ = { .dev_attr = __ATTR(_name, S_IRUGO | S_IWUSR, \
+ _show, _store), \
+ .mask = _mask, \
+ .listel = &_ev_list };
+
/**
* IIO_EVENT_ATTR - non-shared event attribute
* @_name: event name
@@ -293,10 +301,7 @@ struct iio_const_attr {
* @_handler: handler function to be called
**/
#define IIO_EVENT_ATTR(_name, _show, _store, _mask, _handler) \
- static struct iio_event_handler_list \
- iio_event_##_name = { \
- .handler = _handler, \
- }; \
+ IIO_EVENT_SH(_name, _handler); \
static struct \
iio_event_attr \
iio_event_attr_##_name \
@@ -324,6 +329,7 @@ struct iio_const_attr {
#define IIO_EVENT_CODE_GYRO_BASE 400
#define IIO_EVENT_CODE_ADC_BASE 500
#define IIO_EVENT_CODE_MISC_BASE 600
+#define IIO_EVENT_CODE_LIGHT_BASE 700
#define IIO_EVENT_CODE_DEVICE_SPECIFIC 1000
diff --git a/drivers/staging/iio/trigger.h b/drivers/staging/iio/trigger.h
index 784e7b6fac1..4699586a593 100644
--- a/drivers/staging/iio/trigger.h
+++ b/drivers/staging/iio/trigger.h
@@ -8,10 +8,6 @@
*/
#ifndef _IIO_TRIGGER_H_
#define _IIO_TRIGGER_H_
-#define IIO_TRIGGER_NAME_LENGTH 20
-#define IIO_TRIGGER_ID_PREFIX "iio:trigger"
-#define IIO_TRIGGER_ID_FORMAT IIO_TRIGGER_ID_PREFIX "%d"
-
/**
* struct iio_trigger - industrial I/O trigger device
@@ -25,7 +21,6 @@
* @pollfunc_list_lock: [INTERN] protection of the polling function list
* @pollfunc_list: [INTERN] list of functions to run on trigger.
* @control_attrs: [DRIVER] sysfs attributes relevant to trigger type
- * @timestamp: [INTERN] timestamp usesd by some trigs (e.g. datardy)
* @owner: [DRIVER] used to monitor usage count of the trigger.
* @use_count: use count for the trigger
* @set_trigger_state: [DRIVER] switch on/off the trigger on demand
@@ -43,7 +38,6 @@ struct iio_trigger {
spinlock_t pollfunc_list_lock;
struct list_head pollfunc_list;
const struct attribute_group *control_attrs;
- s64 timestamp;
struct module *owner;
int use_count;
@@ -124,7 +118,7 @@ int iio_trigger_dettach_poll_func(struct iio_trigger *trig,
*
* Typically called in relevant hardware interrupt handler.
**/
-void iio_trigger_poll(struct iio_trigger *trig);
+void iio_trigger_poll(struct iio_trigger *trig, s64 time);
void iio_trigger_notify_done(struct iio_trigger *trig);
/**
@@ -148,13 +142,27 @@ struct iio_poll_func {
struct list_head list;
void *private_data;
void (*poll_func_immediate)(struct iio_dev *indio_dev);
- void (*poll_func_main)(struct iio_dev *private_data);
+ void (*poll_func_main)(struct iio_dev *private_data, s64 time);
};
+int iio_alloc_pollfunc(struct iio_dev *indio_dev,
+ void (*immediate)(struct iio_dev *indio_dev),
+ void (*main)(struct iio_dev *private_data, s64 time));
+
+/*
+ * Two functions for common case where all that happens is a pollfunc
+ * is attached and detached form a trigger
+ */
+int iio_triggered_ring_postenable(struct iio_dev *indio_dev);
+int iio_triggered_ring_predisable(struct iio_dev *indio_dev);
+
struct iio_trigger *iio_allocate_trigger(void);
void iio_free_trigger(struct iio_trigger *trig);
+struct iio_simple_trigger {
+ struct iio_trigger trig;
+};
#endif /* _IIO_TRIGGER_H_ */
diff --git a/drivers/staging/iio/trigger/Makefile b/drivers/staging/iio/trigger/Makefile
index e5f96d2fe64..10aeca5e347 100644
--- a/drivers/staging/iio/trigger/Makefile
+++ b/drivers/staging/iio/trigger/Makefile
@@ -1,5 +1,6 @@
#
# Makefile for triggers not associated with iio-devices
#
+
obj-$(CONFIG_IIO_PERIODIC_RTC_TRIGGER) += iio-trig-periodic-rtc.o
-obj-$(CONFIG_IIO_GPIO_TRIGGER) += iio-trig-gpio.o \ No newline at end of file
+obj-$(CONFIG_IIO_GPIO_TRIGGER) += iio-trig-gpio.o
diff --git a/drivers/staging/iio/trigger/iio-trig-gpio.c b/drivers/staging/iio/trigger/iio-trig-gpio.c
index 1da285d2863..f93cc916983 100644
--- a/drivers/staging/iio/trigger/iio-trig-gpio.c
+++ b/drivers/staging/iio/trigger/iio-trig-gpio.c
@@ -42,7 +42,8 @@ struct iio_gpio_trigger_info {
static irqreturn_t iio_gpio_trigger_poll(int irq, void *private)
{
- iio_trigger_poll(private);
+ /* Timestamp not currently provided */
+ iio_trigger_poll(private, 0);
return IRQ_HANDLED;
}
@@ -93,16 +94,11 @@ static int iio_gpio_trigger_probe(struct platform_device *pdev)
trig->private_data = trig_info;
trig_info->irq = irq;
trig->owner = THIS_MODULE;
- trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH,
- GFP_KERNEL);
- if (!trig->name) {
+ trig->name = kasprintf(GFP_KERNEL, "irqtrig%d", irq);
+ if (trig->name == NULL) {
ret = -ENOMEM;
goto error_free_trig_info;
}
- snprintf((char *)trig->name,
- IIO_TRIGGER_NAME_LENGTH,
- "irqtrig%d", irq);
-
ret = request_irq(irq, iio_gpio_trigger_poll,
irqflags, trig->name, trig);
if (ret) {
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index 4ee3ae1ef89..b0b52f84edf 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -25,7 +25,6 @@ static DEFINE_MUTEX(iio_prtc_trigger_list_lock);
struct iio_prtc_trigger_info {
struct rtc_device *rtc;
int frequency;
- char *name;
struct rtc_task task;
};
@@ -78,8 +77,7 @@ static ssize_t iio_trig_periodic_read_name(struct device *dev,
char *buf)
{
struct iio_trigger *trig = dev_get_drvdata(dev);
- struct iio_prtc_trigger_info *trig_info = trig->private_data;
- return sprintf(buf, "%s\n", trig_info->name);
+ return sprintf(buf, "%s\n", trig->name);
}
static DEVICE_ATTR(name, S_IRUGO,
@@ -100,7 +98,8 @@ static const struct attribute_group iio_trig_prtc_attr_group = {
static void iio_prtc_trigger_poll(void *private_data)
{
- iio_trigger_poll(private_data);
+ /* Timestamp is not provided currently */
+ iio_trigger_poll(private_data, 0);
}
static int iio_trig_periodic_rtc_probe(struct platform_device *dev)
@@ -129,16 +128,12 @@ static int iio_trig_periodic_rtc_probe(struct platform_device *dev)
trig->private_data = trig_info;
trig->owner = THIS_MODULE;
trig->set_trigger_state = &iio_trig_periodic_rtc_set_state;
- trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ trig->name = kasprintf(GFP_KERNEL, "periodic%s", pdata[i]);
if (trig->name == NULL) {
ret = -ENOMEM;
goto error_free_trig_info;
}
- snprintf((char *)trig->name,
- IIO_TRIGGER_NAME_LENGTH,
- "periodic%s",
- pdata[i]);
- trig_info->name = (char *)trig->name;
+
/* RTC access */
trig_info->rtc
= rtc_class_open(pdata[i]);
diff --git a/drivers/staging/line6/Kconfig b/drivers/staging/line6/Kconfig
index 7852d4a960c..bc1ffbed3c8 100644
--- a/drivers/staging/line6/Kconfig
+++ b/drivers/staging/line6/Kconfig
@@ -2,6 +2,7 @@ config LINE6_USB
tristate "Line6 USB support"
depends on USB && SND
select SND_RAWMIDI
+ select SND_PCM
help
This is a driver for the guitar amp, cab, and effects modeller
PODxt Pro by Line6 (and similar devices), supporting the
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 1d5a4730276..27b986a50a0 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -679,8 +679,10 @@ static int line6_probe(struct usb_interface *interface, const struct usb_device_
usb_get_dev(usbdev);
/* we don't handle multiple configurations */
- if (usbdev->descriptor.bNumConfigurations != 1)
- return -ENODEV;
+ if (usbdev->descriptor.bNumConfigurations != 1) {
+ ret = -ENODEV;
+ goto err_put;
+ }
/* check vendor and product id */
for (devtype = ARRAY_SIZE(line6_id_table) - 1; devtype--;) {
@@ -692,16 +694,20 @@ static int line6_probe(struct usb_interface *interface, const struct usb_device_
break;
}
- if (devtype < 0)
- return -ENODEV;
+ if (devtype < 0) {
+ ret = -ENODEV;
+ goto err_put;
+ }
/* find free slot in device table: */
for (devnum = 0; devnum < LINE6_MAX_DEVICES; ++devnum)
if (line6_devices[devnum] == NULL)
break;
- if (devnum == LINE6_MAX_DEVICES)
- return -ENODEV;
+ if (devnum == LINE6_MAX_DEVICES) {
+ ret = -ENODEV;
+ goto err_put;
+ }
/* initialize device info: */
properties = &line6_properties_table[devtype];
@@ -762,13 +768,14 @@ static int line6_probe(struct usb_interface *interface, const struct usb_device_
default:
MISSING_CASE;
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put;
}
ret = usb_set_interface(usbdev, interface_number, alternate);
if (ret < 0) {
dev_err(&interface->dev, "set_interface failed\n");
- return ret;
+ goto err_put;
}
/* initialize device data based on product id: */
@@ -815,7 +822,8 @@ static int line6_probe(struct usb_interface *interface, const struct usb_device_
break;
default:
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put;
}
break;
@@ -827,19 +835,22 @@ static int line6_probe(struct usb_interface *interface, const struct usb_device_
default:
MISSING_CASE;
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put;
}
if (size == 0) {
dev_err(line6->ifcdev, "driver bug: interface data size not set\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put;
}
line6 = kzalloc(size, GFP_KERNEL);
if (line6 == NULL) {
dev_err(&interface->dev, "Out of memory\n");
- return -ENOMEM;
+ ret = -ENODEV;
+ goto err_put;
}
/* store basic data: */
@@ -875,16 +886,16 @@ static int line6_probe(struct usb_interface *interface, const struct usb_device_
if (line6->buffer_listen == NULL) {
dev_err(&interface->dev, "Out of memory\n");
- line6_destruct(interface);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_destruct;
}
line6->buffer_message = kmalloc(LINE6_MESSAGE_MAXLEN, GFP_KERNEL);
if (line6->buffer_message == NULL) {
dev_err(&interface->dev, "Out of memory\n");
- line6_destruct(interface);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_destruct;
}
line6->urb_listen = usb_alloc_urb(0, GFP_KERNEL);
@@ -892,15 +903,15 @@ static int line6_probe(struct usb_interface *interface, const struct usb_device_
if (line6->urb_listen == NULL) {
dev_err(&interface->dev, "Out of memory\n");
line6_destruct(interface);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_destruct;
}
ret = line6_start_listen(line6);
if (ret < 0) {
dev_err(&interface->dev, "%s: usb_submit_urb failed\n",
__func__);
- line6_destruct(interface);
- return ret;
+ goto err_destruct;
}
}
@@ -952,22 +963,25 @@ static int line6_probe(struct usb_interface *interface, const struct usb_device_
ret = -ENODEV;
}
- if (ret < 0) {
- line6_destruct(interface);
- return ret;
- }
+ if (ret < 0)
+ goto err_destruct;
ret = sysfs_create_link(&interface->dev.kobj, &usbdev->dev.kobj,
"usb_device");
- if (ret < 0) {
- line6_destruct(interface);
- return ret;
- }
+ if (ret < 0)
+ goto err_destruct;
dev_info(&interface->dev, "Line6 %s now attached\n",
line6->properties->name);
line6_devices[devnum] = line6;
line6_list_devices();
+ return 0;
+
+err_destruct:
+ line6_destruct(interface);
+err_put:
+ usb_put_intf(interface);
+ usb_put_dev(usbdev);
return ret;
}
diff --git a/drivers/staging/lirc/Kconfig b/drivers/staging/lirc/Kconfig
index 968c2adee06..100c4d4b812 100644
--- a/drivers/staging/lirc/Kconfig
+++ b/drivers/staging/lirc/Kconfig
@@ -3,6 +3,7 @@
#
menuconfig LIRC_STAGING
bool "Linux Infrared Remote Control IR receiver/transmitter drivers"
+ depends on LIRC
help
Say Y here, and all supported Linux Infrared Remote Control IR and
RF receiver and transmitter drivers will be displayed. When paired
@@ -13,21 +14,13 @@ if LIRC_STAGING
config LIRC_BT829
tristate "BT829 based hardware"
- depends on LIRC_STAGING
+ depends on LIRC_STAGING && PCI
help
Driver for the IR interface on BT829-based hardware
-config LIRC_ENE0100
- tristate "ENE KB3924/ENE0100 CIR Port Reciever"
- depends on LIRC_STAGING
- help
- This is a driver for CIR port handled by ENE KB3924 embedded
- controller found on some notebooks.
- It appears on PNP list as ENE0100.
-
config LIRC_I2C
tristate "I2C Based IR Receivers"
- depends on LIRC_STAGING
+ depends on LIRC_STAGING && I2C
help
Driver for I2C-based IR receivers, such as those commonly
found onboard Hauppauge PVR-150/250/350 video capture cards
@@ -40,7 +33,7 @@ config LIRC_IGORPLUGUSB
config LIRC_IMON
tristate "Legacy SoundGraph iMON Receiver and Display"
- depends on LIRC_STAGING
+ depends on LIRC_STAGING && USB
help
Driver for the original SoundGraph iMON IR Receiver and Display
@@ -48,7 +41,7 @@ config LIRC_IMON
config LIRC_IT87
tristate "ITE IT87XX CIR Port Receiver"
- depends on LIRC_STAGING
+ depends on LIRC_STAGING && PNP
help
Driver for the ITE IT87xx IR Receiver
@@ -60,13 +53,13 @@ config LIRC_ITE8709
config LIRC_PARALLEL
tristate "Homebrew Parallel Port Receiver"
- depends on LIRC_STAGING && !SMP
+ depends on LIRC_STAGING && PARPORT && !SMP
help
Driver for Homebrew Parallel Port Receivers
config LIRC_SASEM
tristate "Sasem USB IR Remote"
- depends on LIRC_STAGING
+ depends on LIRC_STAGING && USB
help
Driver for the Sasem OnAir Remocon-V or Dign HV5 HTPC IR/VFD Module
@@ -89,12 +82,6 @@ config LIRC_SIR
help
Driver for the SIR IrDA port
-config LIRC_STREAMZAP
- tristate "Streamzap PC Receiver"
- depends on LIRC_STAGING
- help
- Driver for the Streamzap PC Receiver
-
config LIRC_TTUSBIR
tristate "Technotrend USB IR Receiver"
depends on LIRC_STAGING && USB
@@ -103,7 +90,7 @@ config LIRC_TTUSBIR
config LIRC_ZILOG
tristate "Zilog/Hauppauge IR Transmitter"
- depends on LIRC_STAGING
+ depends on LIRC_STAGING && I2C
help
Driver for the Zilog/Hauppauge IR Transmitter, found on
PVR-150/500, HVR-1200/1250/1700/1800, HD-PVR and other cards
diff --git a/drivers/staging/lirc/Makefile b/drivers/staging/lirc/Makefile
index a019182a7a3..4da1f3397a1 100644
--- a/drivers/staging/lirc/Makefile
+++ b/drivers/staging/lirc/Makefile
@@ -4,7 +4,6 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_LIRC_BT829) += lirc_bt829.o
-obj-$(CONFIG_LIRC_ENE0100) += lirc_ene0100.o
obj-$(CONFIG_LIRC_I2C) += lirc_i2c.o
obj-$(CONFIG_LIRC_IGORPLUGUSB) += lirc_igorplugusb.o
obj-$(CONFIG_LIRC_IMON) += lirc_imon.o
@@ -14,6 +13,5 @@ obj-$(CONFIG_LIRC_PARALLEL) += lirc_parallel.o
obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o
obj-$(CONFIG_LIRC_SERIAL) += lirc_serial.o
obj-$(CONFIG_LIRC_SIR) += lirc_sir.o
-obj-$(CONFIG_LIRC_STREAMZAP) += lirc_streamzap.o
obj-$(CONFIG_LIRC_TTUSBIR) += lirc_ttusbir.o
obj-$(CONFIG_LIRC_ZILOG) += lirc_zilog.o
diff --git a/drivers/staging/lirc/lirc_ene0100.c b/drivers/staging/lirc/lirc_ene0100.c
deleted file mode 100644
index a152c52b074..00000000000
--- a/drivers/staging/lirc/lirc_ene0100.c
+++ /dev/null
@@ -1,646 +0,0 @@
-/*
- * driver for ENE KB3926 B/C/D CIR (also known as ENE0100)
- *
- * Copyright (C) 2009 Maxim Levitsky <maximlevitsky@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pnp.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include "lirc_ene0100.h"
-
-static int sample_period = 75;
-static int enable_idle = 1;
-static int enable_learning;
-
-static void ene_set_idle(struct ene_device *dev, int idle);
-static void ene_set_inputs(struct ene_device *dev, int enable);
-
-/* read a hardware register */
-static u8 ene_hw_read_reg(struct ene_device *dev, u16 reg)
-{
- outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
- outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
- return inb(dev->hw_io + ENE_IO);
-}
-
-/* write a hardware register */
-static void ene_hw_write_reg(struct ene_device *dev, u16 reg, u8 value)
-{
- outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
- outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
- outb(value, dev->hw_io + ENE_IO);
-}
-
-/* change specific bits in hardware register */
-static void ene_hw_write_reg_mask(struct ene_device *dev,
- u16 reg, u8 value, u8 mask)
-{
- u8 regvalue;
-
- outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
- outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
-
- regvalue = inb(dev->hw_io + ENE_IO) & ~mask;
- regvalue |= (value & mask);
- outb(regvalue, dev->hw_io + ENE_IO);
-}
-
-/* read irq status and ack it */
-static int ene_hw_irq_status(struct ene_device *dev, int *buffer_pointer)
-{
- u8 irq_status;
- u8 fw_flags1, fw_flags2;
-
- fw_flags2 = ene_hw_read_reg(dev, ENE_FW2);
-
- if (buffer_pointer)
- *buffer_pointer = 4 * (fw_flags2 & ENE_FW2_BUF_HIGH);
-
- if (dev->hw_revision < ENE_HW_C) {
- irq_status = ene_hw_read_reg(dev, ENEB_IRQ_STATUS);
-
- if (!(irq_status & ENEB_IRQ_STATUS_IR))
- return 0;
- ene_hw_write_reg(dev, ENEB_IRQ_STATUS,
- irq_status & ~ENEB_IRQ_STATUS_IR);
-
- /* rev B support only recieving */
- return ENE_IRQ_RX;
- }
-
- irq_status = ene_hw_read_reg(dev, ENEC_IRQ);
-
- if (!(irq_status & ENEC_IRQ_STATUS))
- return 0;
-
- /* original driver does that twice - a workaround ? */
- ene_hw_write_reg(dev, ENEC_IRQ, irq_status & ~ENEC_IRQ_STATUS);
- ene_hw_write_reg(dev, ENEC_IRQ, irq_status & ~ENEC_IRQ_STATUS);
-
- /* clear unknown flag in F8F9 */
- if (fw_flags2 & ENE_FW2_IRQ_CLR)
- ene_hw_write_reg(dev, ENE_FW2, fw_flags2 & ~ENE_FW2_IRQ_CLR);
-
- /* check if this is a TX interrupt */
- fw_flags1 = ene_hw_read_reg(dev, ENE_FW1);
-
- if (fw_flags1 & ENE_FW1_TXIRQ) {
- ene_hw_write_reg(dev, ENE_FW1, fw_flags1 & ~ENE_FW1_TXIRQ);
- return ENE_IRQ_TX;
- } else
- return ENE_IRQ_RX;
-}
-
-static int ene_hw_detect(struct ene_device *dev)
-{
- u8 chip_major, chip_minor;
- u8 hw_revision, old_ver;
- u8 tmp;
- u8 fw_capabilities;
-
- tmp = ene_hw_read_reg(dev, ENE_HW_UNK);
- ene_hw_write_reg(dev, ENE_HW_UNK, tmp & ~ENE_HW_UNK_CLR);
-
- chip_major = ene_hw_read_reg(dev, ENE_HW_VER_MAJOR);
- chip_minor = ene_hw_read_reg(dev, ENE_HW_VER_MINOR);
-
- ene_hw_write_reg(dev, ENE_HW_UNK, tmp);
- hw_revision = ene_hw_read_reg(dev, ENE_HW_VERSION);
- old_ver = ene_hw_read_reg(dev, ENE_HW_VER_OLD);
-
- if (hw_revision == 0xFF) {
-
- ene_printk(KERN_WARNING, "device seems to be disabled\n");
- ene_printk(KERN_WARNING,
- "send a mail to lirc-list@lists.sourceforge.net\n");
- ene_printk(KERN_WARNING, "please attach output of acpidump\n");
-
- return -ENODEV;
- }
-
- if (chip_major == 0x33) {
- ene_printk(KERN_WARNING, "chips 0x33xx aren't supported yet\n");
- return -ENODEV;
- }
-
- if (chip_major == 0x39 && chip_minor == 0x26 && hw_revision == 0xC0) {
- dev->hw_revision = ENE_HW_C;
- ene_printk(KERN_WARNING,
- "KB3926C detected, driver support is not complete!\n");
-
- } else if (old_ver == 0x24 && hw_revision == 0xC0) {
- dev->hw_revision = ENE_HW_B;
- ene_printk(KERN_NOTICE, "KB3926B detected\n");
- } else {
- dev->hw_revision = ENE_HW_D;
- ene_printk(KERN_WARNING,
- "unknown ENE chip detected, assuming KB3926D\n");
- ene_printk(KERN_WARNING, "driver support incomplete");
-
- }
-
- ene_printk(KERN_DEBUG, "chip is 0x%02x%02x - 0x%02x, 0x%02x\n",
- chip_major, chip_minor, old_ver, hw_revision);
-
-
- /* detect features hardware supports */
-
- if (dev->hw_revision < ENE_HW_C)
- return 0;
-
- fw_capabilities = ene_hw_read_reg(dev, ENE_FW2);
-
- dev->hw_gpio40_learning = fw_capabilities & ENE_FW2_GP40_AS_LEARN;
- dev->hw_learning_and_tx_capable = fw_capabilities & ENE_FW2_LEARNING;
-
- dev->hw_fan_as_normal_input = dev->hw_learning_and_tx_capable &&
- fw_capabilities & ENE_FW2_FAN_AS_NRML_IN;
-
- ene_printk(KERN_NOTICE, "hardware features:\n");
- ene_printk(KERN_NOTICE,
- "learning and tx %s, gpio40_learn %s, fan_in %s\n",
- dev->hw_learning_and_tx_capable ? "on" : "off",
- dev->hw_gpio40_learning ? "on" : "off",
- dev->hw_fan_as_normal_input ? "on" : "off");
-
- if (!dev->hw_learning_and_tx_capable && enable_learning)
- enable_learning = 0;
-
- if (dev->hw_learning_and_tx_capable) {
- ene_printk(KERN_WARNING,
- "Device supports transmitting, but the driver doesn't\n");
- ene_printk(KERN_WARNING,
- "due to lack of hardware to test against.\n");
- ene_printk(KERN_WARNING,
- "Send a mail to: lirc-list@lists.sourceforge.net\n");
- }
- return 0;
-}
-
-/* hardware initialization */
-static int ene_hw_init(void *data)
-{
- u8 reg_value;
- struct ene_device *dev = (struct ene_device *)data;
- dev->in_use = 1;
-
- if (dev->hw_revision < ENE_HW_C) {
- ene_hw_write_reg(dev, ENEB_IRQ, dev->irq << 1);
- ene_hw_write_reg(dev, ENEB_IRQ_UNK1, 0x01);
- } else {
- reg_value = ene_hw_read_reg(dev, ENEC_IRQ) & 0xF0;
- reg_value |= ENEC_IRQ_UNK_EN;
- reg_value &= ~ENEC_IRQ_STATUS;
- reg_value |= (dev->irq & ENEC_IRQ_MASK);
- ene_hw_write_reg(dev, ENEC_IRQ, reg_value);
- ene_hw_write_reg(dev, ENE_TX_UNK1, 0x63);
- }
-
- ene_hw_write_reg(dev, ENE_CIR_CONF2, 0x00);
- ene_set_inputs(dev, enable_learning);
-
- /* set sampling period */
- ene_hw_write_reg(dev, ENE_CIR_SAMPLE_PERIOD, sample_period);
-
- /* ack any pending irqs - just in case */
- ene_hw_irq_status(dev, NULL);
-
- /* enter idle mode */
- ene_set_idle(dev, 1);
-
- /* enable firmware bits */
- ene_hw_write_reg_mask(dev, ENE_FW1,
- ENE_FW1_ENABLE | ENE_FW1_IRQ,
- ENE_FW1_ENABLE | ENE_FW1_IRQ);
- /* clear stats */
- dev->sample = 0;
- return 0;
-}
-
-/* this enables gpio40 signal, used if connected to wide band input*/
-static void ene_enable_gpio40(struct ene_device *dev, int enable)
-{
- ene_hw_write_reg_mask(dev, ENE_CIR_CONF1, enable ?
- 0 : ENE_CIR_CONF2_GPIO40DIS,
- ENE_CIR_CONF2_GPIO40DIS);
-}
-
-/* this enables the classic sampler */
-static void ene_enable_normal_recieve(struct ene_device *dev, int enable)
-{
- ene_hw_write_reg(dev, ENE_CIR_CONF1, enable ? ENE_CIR_CONF1_ADC_ON : 0);
-}
-
-/* this enables recieve via fan input */
-static void ene_enable_fan_recieve(struct ene_device *dev, int enable)
-{
- if (!enable)
- ene_hw_write_reg(dev, ENE_FAN_AS_IN1, 0);
- else {
- ene_hw_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN);
- ene_hw_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN);
- }
- dev->fan_input_inuse = enable;
-}
-
-/* determine which input to use*/
-static void ene_set_inputs(struct ene_device *dev, int learning_enable)
-{
- ene_enable_normal_recieve(dev, 1);
-
- /* old hardware doesn't support learning mode for sure */
- if (dev->hw_revision <= ENE_HW_B)
- return;
-
- /* reciever not learning capable, still set gpio40 correctly */
- if (!dev->hw_learning_and_tx_capable) {
- ene_enable_gpio40(dev, !dev->hw_gpio40_learning);
- return;
- }
-
- /* enable learning mode */
- if (learning_enable) {
- ene_enable_gpio40(dev, dev->hw_gpio40_learning);
-
- /* fan input is not used for learning */
- if (dev->hw_fan_as_normal_input)
- ene_enable_fan_recieve(dev, 0);
-
- /* disable learning mode */
- } else {
- if (dev->hw_fan_as_normal_input) {
- ene_enable_fan_recieve(dev, 1);
- ene_enable_normal_recieve(dev, 0);
- } else
- ene_enable_gpio40(dev, !dev->hw_gpio40_learning);
- }
-
- /* set few additional settings for this mode */
- ene_hw_write_reg_mask(dev, ENE_CIR_CONF1, learning_enable ?
- ENE_CIR_CONF1_LEARN1 : 0, ENE_CIR_CONF1_LEARN1);
-
- ene_hw_write_reg_mask(dev, ENE_CIR_CONF2, learning_enable ?
- ENE_CIR_CONF2_LEARN2 : 0, ENE_CIR_CONF2_LEARN2);
-}
-
-/* deinitialization */
-static void ene_hw_deinit(void *data)
-{
- struct ene_device *dev = (struct ene_device *)data;
-
- /* disable samplers */
- ene_enable_normal_recieve(dev, 0);
-
- if (dev->hw_fan_as_normal_input)
- ene_enable_fan_recieve(dev, 0);
-
- /* disable hardware IRQ and firmware flag */
- ene_hw_write_reg_mask(dev, ENE_FW1, 0, ENE_FW1_ENABLE | ENE_FW1_IRQ);
-
- ene_set_idle(dev, 1);
- dev->in_use = 0;
-}
-
-/* sends current sample to userspace */
-static void send_sample(struct ene_device *dev)
-{
- int value = abs(dev->sample) & PULSE_MASK;
-
- if (dev->sample > 0)
- value |= PULSE_BIT;
-
- if (!lirc_buffer_full(dev->lirc_driver->rbuf)) {
- lirc_buffer_write(dev->lirc_driver->rbuf, (void *)&value);
- wake_up(&dev->lirc_driver->rbuf->wait_poll);
- }
- dev->sample = 0;
-}
-
-/* this updates current sample */
-static void update_sample(struct ene_device *dev, int sample)
-{
- if (!dev->sample)
- dev->sample = sample;
- else if (same_sign(dev->sample, sample))
- dev->sample += sample;
- else {
- send_sample(dev);
- dev->sample = sample;
- }
-}
-
-/* enable or disable idle mode */
-static void ene_set_idle(struct ene_device *dev, int idle)
-{
- struct timeval now;
- int disable = idle && enable_idle && (dev->hw_revision < ENE_HW_C);
-
- ene_hw_write_reg_mask(dev, ENE_CIR_SAMPLE_PERIOD,
- disable ? 0 : ENE_CIR_SAMPLE_OVERFLOW,
- ENE_CIR_SAMPLE_OVERFLOW);
- dev->idle = idle;
-
- /* remember when we have entered the idle mode */
- if (idle) {
- do_gettimeofday(&dev->gap_start);
- return;
- }
-
- /* send the gap between keypresses now */
- do_gettimeofday(&now);
-
- if (now.tv_sec - dev->gap_start.tv_sec > 16)
- dev->sample = space(PULSE_MASK);
- else
- dev->sample = dev->sample +
- space(1000000ull * (now.tv_sec - dev->gap_start.tv_sec))
- + space(now.tv_usec - dev->gap_start.tv_usec);
-
- if (abs(dev->sample) > PULSE_MASK)
- dev->sample = space(PULSE_MASK);
- send_sample(dev);
-}
-
-/* interrupt handler */
-static irqreturn_t ene_hw_irq(int irq, void *data)
-{
- u16 hw_value;
- int i, hw_sample;
- int space;
- int buffer_pointer;
- int irq_status;
-
- struct ene_device *dev = (struct ene_device *)data;
- irq_status = ene_hw_irq_status(dev, &buffer_pointer);
-
- if (!irq_status)
- return IRQ_NONE;
-
- /* TODO: only RX for now */
- if (irq_status == ENE_IRQ_TX)
- return IRQ_HANDLED;
-
- for (i = 0; i < ENE_SAMPLES_SIZE; i++) {
-
- hw_value = ene_hw_read_reg(dev,
- ENE_SAMPLE_BUFFER + buffer_pointer + i);
-
- if (dev->fan_input_inuse) {
- /* read high part of the sample */
- hw_value |= ene_hw_read_reg(dev,
- ENE_SAMPLE_BUFFER_FAN + buffer_pointer + i) << 8;
-
- /* test for _space_ bit */
- space = !(hw_value & ENE_FAN_SMPL_PULS_MSK);
-
- /* clear space bit, and other unused bits */
- hw_value &= ENE_FAN_VALUE_MASK;
- hw_sample = hw_value * ENE_SAMPLE_PERIOD_FAN;
-
- } else {
- space = hw_value & ENE_SAMPLE_SPC_MASK;
- hw_value &= ENE_SAMPLE_VALUE_MASK;
- hw_sample = hw_value * sample_period;
- }
-
- /* no more data */
- if (!(hw_value))
- break;
-
- if (space)
- hw_sample *= -1;
-
- /* overflow sample recieved, handle it */
-
- if (!dev->fan_input_inuse && hw_value == ENE_SAMPLE_OVERFLOW) {
-
- if (dev->idle)
- continue;
-
- if (dev->sample > 0 || abs(dev->sample) <= ENE_MAXGAP)
- update_sample(dev, hw_sample);
- else
- ene_set_idle(dev, 1);
-
- continue;
- }
-
- /* normal first sample recieved */
- if (!dev->fan_input_inuse && dev->idle) {
- ene_set_idle(dev, 0);
-
- /* discard first recieved value, its random
- since its the time signal was off before
- first pulse if idle mode is enabled, HW
- does that for us */
-
- if (!enable_idle)
- continue;
- }
- update_sample(dev, hw_sample);
- send_sample(dev);
- }
- return IRQ_HANDLED;
-}
-
-static int ene_probe(struct pnp_dev *pnp_dev,
- const struct pnp_device_id *dev_id)
-{
- struct ene_device *dev;
- struct lirc_driver *lirc_driver;
- int error = -ENOMEM;
-
- dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL);
-
- if (!dev)
- goto err1;
-
- dev->pnp_dev = pnp_dev;
- pnp_set_drvdata(pnp_dev, dev);
-
-
- /* prepare lirc interface */
- error = -ENOMEM;
- lirc_driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
-
- if (!lirc_driver)
- goto err2;
-
- dev->lirc_driver = lirc_driver;
-
- strcpy(lirc_driver->name, ENE_DRIVER_NAME);
- lirc_driver->minor = -1;
- lirc_driver->code_length = sizeof(int) * 8;
- lirc_driver->features = LIRC_CAN_REC_MODE2;
- lirc_driver->data = dev;
- lirc_driver->set_use_inc = ene_hw_init;
- lirc_driver->set_use_dec = ene_hw_deinit;
- lirc_driver->dev = &pnp_dev->dev;
- lirc_driver->owner = THIS_MODULE;
-
- lirc_driver->rbuf = kzalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
-
- if (!lirc_driver->rbuf)
- goto err3;
-
- if (lirc_buffer_init(lirc_driver->rbuf, sizeof(int), sizeof(int) * 256))
- goto err4;
-
- error = -ENODEV;
- if (lirc_register_driver(lirc_driver))
- goto err5;
-
- /* validate resources */
- if (!pnp_port_valid(pnp_dev, 0) ||
- pnp_port_len(pnp_dev, 0) < ENE_MAX_IO)
- goto err6;
-
- if (!pnp_irq_valid(pnp_dev, 0))
- goto err6;
-
- dev->hw_io = pnp_port_start(pnp_dev, 0);
- dev->irq = pnp_irq(pnp_dev, 0);
-
- /* claim the resources */
- error = -EBUSY;
- if (!request_region(dev->hw_io, ENE_MAX_IO, ENE_DRIVER_NAME))
- goto err6;
-
- if (request_irq(dev->irq, ene_hw_irq,
- IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev))
- goto err7;
-
- /* detect hardware version and features */
- error = ene_hw_detect(dev);
- if (error)
- goto err8;
-
- ene_printk(KERN_NOTICE, "driver has been succesfully loaded\n");
- return 0;
-
-err8:
- free_irq(dev->irq, dev);
-err7:
- release_region(dev->hw_io, ENE_MAX_IO);
-err6:
- lirc_unregister_driver(lirc_driver->minor);
-err5:
- lirc_buffer_free(lirc_driver->rbuf);
-err4:
- kfree(lirc_driver->rbuf);
-err3:
- kfree(lirc_driver);
-err2:
- kfree(dev);
-err1:
- return error;
-}
-
-static void ene_remove(struct pnp_dev *pnp_dev)
-{
- struct ene_device *dev = pnp_get_drvdata(pnp_dev);
- ene_hw_deinit(dev);
- free_irq(dev->irq, dev);
- release_region(dev->hw_io, ENE_MAX_IO);
- lirc_unregister_driver(dev->lirc_driver->minor);
- lirc_buffer_free(dev->lirc_driver->rbuf);
- kfree(dev->lirc_driver);
- kfree(dev);
-}
-
-#ifdef CONFIG_PM
-
-/* TODO: make 'wake on IR' configurable and add .shutdown */
-/* currently impossible due to lack of kernel support */
-
-static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
-{
- struct ene_device *dev = pnp_get_drvdata(pnp_dev);
- ene_hw_write_reg_mask(dev, ENE_FW1, ENE_FW1_WAKE, ENE_FW1_WAKE);
- return 0;
-}
-
-static int ene_resume(struct pnp_dev *pnp_dev)
-{
- struct ene_device *dev = pnp_get_drvdata(pnp_dev);
- if (dev->in_use)
- ene_hw_init(dev);
-
- ene_hw_write_reg_mask(dev, ENE_FW1, 0, ENE_FW1_WAKE);
- return 0;
-}
-
-#endif
-
-static const struct pnp_device_id ene_ids[] = {
- {.id = "ENE0100",},
- {},
-};
-
-static struct pnp_driver ene_driver = {
- .name = ENE_DRIVER_NAME,
- .id_table = ene_ids,
- .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
-
- .probe = ene_probe,
- .remove = __devexit_p(ene_remove),
-
-#ifdef CONFIG_PM
- .suspend = ene_suspend,
- .resume = ene_resume,
-#endif
-};
-
-static int __init ene_init(void)
-{
- if (sample_period < 5) {
- ene_printk(KERN_ERR, "sample period must be at\n");
- ene_printk(KERN_ERR, "least 5 us, (at least 30 recommended)\n");
- return -EINVAL;
- }
- return pnp_register_driver(&ene_driver);
-}
-
-static void ene_exit(void)
-{
- pnp_unregister_driver(&ene_driver);
-}
-
-module_param(sample_period, int, S_IRUGO);
-MODULE_PARM_DESC(sample_period, "Hardware sample period (75 us default)");
-
-module_param(enable_idle, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(enable_idle,
- "Enables turning off signal sampling after long inactivity time; "
- "if disabled might help detecting input signal (default: enabled)");
-
-module_param(enable_learning, bool, S_IRUGO);
-MODULE_PARM_DESC(enable_learning, "Use wide band (learning) reciever");
-
-MODULE_DEVICE_TABLE(pnp, ene_ids);
-MODULE_DESCRIPTION
- ("LIRC driver for KB3926B/KB3926C/KB3926D (aka ENE0100) CIR port");
-MODULE_AUTHOR("Maxim Levitsky");
-MODULE_LICENSE("GPL");
-
-module_init(ene_init);
-module_exit(ene_exit);
diff --git a/drivers/staging/lirc/lirc_it87.c b/drivers/staging/lirc/lirc_it87.c
index 09f36961c6d..ec11c0e949a 100644
--- a/drivers/staging/lirc/lirc_it87.c
+++ b/drivers/staging/lirc/lirc_it87.c
@@ -109,6 +109,7 @@ static DECLARE_WAIT_QUEUE_HEAD(lirc_read_queue);
static DEFINE_SPINLOCK(hardware_lock);
static DEFINE_SPINLOCK(dev_lock);
+static bool device_open;
static int rx_buf[RBUF_LEN];
unsigned int rx_tail, rx_head;
@@ -147,10 +148,11 @@ static void drop_port(void);
static int lirc_open(struct inode *inode, struct file *file)
{
spin_lock(&dev_lock);
- if (module_refcount(THIS_MODULE)) {
+ if (device_open) {
spin_unlock(&dev_lock);
return -EBUSY;
}
+ device_open = true;
spin_unlock(&dev_lock);
return 0;
}
@@ -158,6 +160,9 @@ static int lirc_open(struct inode *inode, struct file *file)
static int lirc_close(struct inode *inode, struct file *file)
{
+ spin_lock(&dev_lock);
+ device_open = false;
+ spin_unlock(&dev_lock);
return 0;
}
@@ -363,7 +368,6 @@ static struct lirc_driver driver = {
};
-#ifdef MODULE
static int init_chrdev(void)
{
driver.minor = lirc_register_driver(&driver);
@@ -380,7 +384,6 @@ static void drop_chrdev(void)
{
lirc_unregister_driver(driver.minor);
}
-#endif
/* SECTION: Hardware */
diff --git a/drivers/staging/lirc/lirc_parallel.c b/drivers/staging/lirc/lirc_parallel.c
index a1ebd071640..6da4a8c6ebc 100644
--- a/drivers/staging/lirc/lirc_parallel.c
+++ b/drivers/staging/lirc/lirc_parallel.c
@@ -240,7 +240,7 @@ static void irq_handler(void *blah)
unsigned int level, newlevel;
unsigned int timeout;
- if (!module_refcount(THIS_MODULE))
+ if (!is_open)
return;
if (!is_claimed)
@@ -515,7 +515,7 @@ static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
static int lirc_open(struct inode *node, struct file *filep)
{
- if (module_refcount(THIS_MODULE) || !lirc_claim())
+ if (is_open || !lirc_claim())
return -EBUSY;
parport_enable_irq(pport);
diff --git a/drivers/staging/lirc/lirc_streamzap.c b/drivers/staging/lirc/lirc_streamzap.c
deleted file mode 100644
index be09c103f0c..00000000000
--- a/drivers/staging/lirc/lirc_streamzap.c
+++ /dev/null
@@ -1,821 +0,0 @@
-/*
- * Streamzap Remote Control driver
- *
- * Copyright (c) 2005 Christoph Bartelmus <lirc@bartelmus.de>
- *
- * This driver was based on the work of Greg Wickham and Adrian
- * Dewhurst. It was substantially rewritten to support correct signal
- * gaps and now maintains a delay buffer, which is used to present
- * consistent timing behaviour to user space applications. Without the
- * delay buffer an ugly hack would be required in lircd, which can
- * cause sluggish signal decoding in certain situations.
- *
- * This driver is based on the USB skeleton driver packaged with the
- * kernel; copyright (C) 2001-2003 Greg Kroah-Hartman (greg@kroah.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/smp_lock.h>
-#include <linux/completion.h>
-#include <linux/uaccess.h>
-#include <linux/usb.h>
-
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
-
-#define DRIVER_VERSION "1.28"
-#define DRIVER_NAME "lirc_streamzap"
-#define DRIVER_DESC "Streamzap Remote Control driver"
-
-static int debug;
-
-#define USB_STREAMZAP_VENDOR_ID 0x0e9c
-#define USB_STREAMZAP_PRODUCT_ID 0x0000
-
-/* Use our own dbg macro */
-#define dprintk(fmt, args...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG DRIVER_NAME "[%d]: " \
- fmt "\n", ## args); \
- } while (0)
-
-/* table of devices that work with this driver */
-static struct usb_device_id streamzap_table[] = {
- /* Streamzap Remote Control */
- { USB_DEVICE(USB_STREAMZAP_VENDOR_ID, USB_STREAMZAP_PRODUCT_ID) },
- /* Terminating entry */
- { }
-};
-
-MODULE_DEVICE_TABLE(usb, streamzap_table);
-
-#define STREAMZAP_PULSE_MASK 0xf0
-#define STREAMZAP_SPACE_MASK 0x0f
-#define STREAMZAP_TIMEOUT 0xff
-#define STREAMZAP_RESOLUTION 256
-
-/* number of samples buffered */
-#define STREAMZAP_BUF_LEN 128
-
-enum StreamzapDecoderState {
- PulseSpace,
- FullPulse,
- FullSpace,
- IgnorePulse
-};
-
-/* Structure to hold all of our device specific stuff
- *
- * some remarks regarding locking:
- * theoretically this struct can be accessed from three threads:
- *
- * - from lirc_dev through set_use_inc/set_use_dec
- *
- * - from the USB layer throuh probe/disconnect/irq
- *
- * Careful placement of lirc_register_driver/lirc_unregister_driver
- * calls will prevent conflicts. lirc_dev makes sure that
- * set_use_inc/set_use_dec are not being executed and will not be
- * called after lirc_unregister_driver returns.
- *
- * - by the timer callback
- *
- * The timer is only running when the device is connected and the
- * LIRC device is open. Making sure the timer is deleted by
- * set_use_dec will make conflicts impossible.
- */
-struct usb_streamzap {
-
- /* usb */
- /* save off the usb device pointer */
- struct usb_device *udev;
- /* the interface for this device */
- struct usb_interface *interface;
-
- /* buffer & dma */
- unsigned char *buf_in;
- dma_addr_t dma_in;
- unsigned int buf_in_len;
-
- struct usb_endpoint_descriptor *endpoint;
-
- /* IRQ */
- struct urb *urb_in;
-
- /* lirc */
- struct lirc_driver *driver;
- struct lirc_buffer *delay_buf;
-
- /* timer used to support delay buffering */
- struct timer_list delay_timer;
- int timer_running;
- spinlock_t timer_lock;
-
- /* tracks whether we are currently receiving some signal */
- int idle;
- /* sum of signal lengths received since signal start */
- unsigned long sum;
- /* start time of signal; necessary for gap tracking */
- struct timeval signal_last;
- struct timeval signal_start;
- enum StreamzapDecoderState decoder_state;
- struct timer_list flush_timer;
- int flush;
- int in_use;
- int timeout_enabled;
-};
-
-
-/* local function prototypes */
-static int streamzap_probe(struct usb_interface *interface,
- const struct usb_device_id *id);
-static void streamzap_disconnect(struct usb_interface *interface);
-static void usb_streamzap_irq(struct urb *urb);
-static int streamzap_use_inc(void *data);
-static void streamzap_use_dec(void *data);
-static long streamzap_ioctl(struct file *filep, unsigned int cmd,
- unsigned long arg);
-static int streamzap_suspend(struct usb_interface *intf, pm_message_t message);
-static int streamzap_resume(struct usb_interface *intf);
-
-/* usb specific object needed to register this driver with the usb subsystem */
-
-static struct usb_driver streamzap_driver = {
- .name = DRIVER_NAME,
- .probe = streamzap_probe,
- .disconnect = streamzap_disconnect,
- .suspend = streamzap_suspend,
- .resume = streamzap_resume,
- .id_table = streamzap_table,
-};
-
-static void stop_timer(struct usb_streamzap *sz)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sz->timer_lock, flags);
- if (sz->timer_running) {
- sz->timer_running = 0;
- spin_unlock_irqrestore(&sz->timer_lock, flags);
- del_timer_sync(&sz->delay_timer);
- } else {
- spin_unlock_irqrestore(&sz->timer_lock, flags);
- }
-}
-
-static void flush_timeout(unsigned long arg)
-{
- struct usb_streamzap *sz = (struct usb_streamzap *) arg;
-
- /* finally start accepting data */
- sz->flush = 0;
-}
-static void delay_timeout(unsigned long arg)
-{
- unsigned long flags;
- /* deliver data every 10 ms */
- static unsigned long timer_inc =
- (10000/(1000000/HZ)) == 0 ? 1 : (10000/(1000000/HZ));
- struct usb_streamzap *sz = (struct usb_streamzap *) arg;
- int data;
-
- spin_lock_irqsave(&sz->timer_lock, flags);
-
- if (!lirc_buffer_empty(sz->delay_buf) &&
- !lirc_buffer_full(sz->driver->rbuf)) {
- lirc_buffer_read(sz->delay_buf, (unsigned char *) &data);
- lirc_buffer_write(sz->driver->rbuf, (unsigned char *) &data);
- }
- if (!lirc_buffer_empty(sz->delay_buf)) {
- while (lirc_buffer_available(sz->delay_buf) <
- STREAMZAP_BUF_LEN / 2 &&
- !lirc_buffer_full(sz->driver->rbuf)) {
- lirc_buffer_read(sz->delay_buf,
- (unsigned char *) &data);
- lirc_buffer_write(sz->driver->rbuf,
- (unsigned char *) &data);
- }
- if (sz->timer_running) {
- sz->delay_timer.expires = jiffies + timer_inc;
- add_timer(&sz->delay_timer);
- }
- } else {
- sz->timer_running = 0;
- }
-
- if (!lirc_buffer_empty(sz->driver->rbuf))
- wake_up(&sz->driver->rbuf->wait_poll);
-
- spin_unlock_irqrestore(&sz->timer_lock, flags);
-}
-
-static void flush_delay_buffer(struct usb_streamzap *sz)
-{
- int data;
- int empty = 1;
-
- while (!lirc_buffer_empty(sz->delay_buf)) {
- empty = 0;
- lirc_buffer_read(sz->delay_buf, (unsigned char *) &data);
- if (!lirc_buffer_full(sz->driver->rbuf)) {
- lirc_buffer_write(sz->driver->rbuf,
- (unsigned char *) &data);
- } else {
- dprintk("buffer overflow", sz->driver->minor);
- }
- }
- if (!empty)
- wake_up(&sz->driver->rbuf->wait_poll);
-}
-
-static void push(struct usb_streamzap *sz, unsigned char *data)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sz->timer_lock, flags);
- if (lirc_buffer_full(sz->delay_buf)) {
- int read_data;
-
- lirc_buffer_read(sz->delay_buf,
- (unsigned char *) &read_data);
- if (!lirc_buffer_full(sz->driver->rbuf)) {
- lirc_buffer_write(sz->driver->rbuf,
- (unsigned char *) &read_data);
- } else {
- dprintk("buffer overflow", sz->driver->minor);
- }
- }
-
- lirc_buffer_write(sz->delay_buf, data);
-
- if (!sz->timer_running) {
- sz->delay_timer.expires = jiffies + HZ/10;
- add_timer(&sz->delay_timer);
- sz->timer_running = 1;
- }
-
- spin_unlock_irqrestore(&sz->timer_lock, flags);
-}
-
-static void push_full_pulse(struct usb_streamzap *sz,
- unsigned char value)
-{
- int pulse;
-
- if (sz->idle) {
- long deltv;
- int tmp;
-
- sz->signal_last = sz->signal_start;
- do_gettimeofday(&sz->signal_start);
-
- deltv = sz->signal_start.tv_sec-sz->signal_last.tv_sec;
- if (deltv > 15) {
- /* really long time */
- tmp = LIRC_SPACE(LIRC_VALUE_MASK);
- } else {
- tmp = (int) (deltv*1000000+
- sz->signal_start.tv_usec -
- sz->signal_last.tv_usec);
- tmp -= sz->sum;
- tmp = LIRC_SPACE(tmp);
- }
- dprintk("ls %u", sz->driver->minor, tmp);
- push(sz, (char *)&tmp);
-
- sz->idle = 0;
- sz->sum = 0;
- }
-
- pulse = ((int) value) * STREAMZAP_RESOLUTION;
- pulse += STREAMZAP_RESOLUTION / 2;
- sz->sum += pulse;
- pulse = LIRC_PULSE(pulse);
-
- dprintk("p %u", sz->driver->minor, pulse & PULSE_MASK);
- push(sz, (char *)&pulse);
-}
-
-static void push_half_pulse(struct usb_streamzap *sz,
- unsigned char value)
-{
- push_full_pulse(sz, (value & STREAMZAP_PULSE_MASK)>>4);
-}
-
-static void push_full_space(struct usb_streamzap *sz,
- unsigned char value)
-{
- int space;
-
- space = ((int) value)*STREAMZAP_RESOLUTION;
- space += STREAMZAP_RESOLUTION/2;
- sz->sum += space;
- space = LIRC_SPACE(space);
- dprintk("s %u", sz->driver->minor, space);
- push(sz, (char *)&space);
-}
-
-static void push_half_space(struct usb_streamzap *sz,
- unsigned char value)
-{
- push_full_space(sz, value & STREAMZAP_SPACE_MASK);
-}
-
-/**
- * usb_streamzap_irq - IRQ handler
- *
- * This procedure is invoked on reception of data from
- * the usb remote.
- */
-static void usb_streamzap_irq(struct urb *urb)
-{
- struct usb_streamzap *sz;
- int len;
- unsigned int i = 0;
-
- if (!urb)
- return;
-
- sz = urb->context;
- len = urb->actual_length;
-
- switch (urb->status) {
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /*
- * this urb is terminated, clean up.
- * sz might already be invalid at this point
- */
- dprintk("urb status: %d", -1, urb->status);
- return;
- default:
- break;
- }
-
- dprintk("received %d", sz->driver->minor, urb->actual_length);
- if (!sz->flush) {
- for (i = 0; i < urb->actual_length; i++) {
- dprintk("%d: %x", sz->driver->minor,
- i, (unsigned char) sz->buf_in[i]);
- switch (sz->decoder_state) {
- case PulseSpace:
- if ((sz->buf_in[i]&STREAMZAP_PULSE_MASK) ==
- STREAMZAP_PULSE_MASK) {
- sz->decoder_state = FullPulse;
- continue;
- } else if ((sz->buf_in[i]&STREAMZAP_SPACE_MASK)
- == STREAMZAP_SPACE_MASK) {
- push_half_pulse(sz, sz->buf_in[i]);
- sz->decoder_state = FullSpace;
- continue;
- } else {
- push_half_pulse(sz, sz->buf_in[i]);
- push_half_space(sz, sz->buf_in[i]);
- }
- break;
- case FullPulse:
- push_full_pulse(sz, sz->buf_in[i]);
- sz->decoder_state = IgnorePulse;
- break;
- case FullSpace:
- if (sz->buf_in[i] == STREAMZAP_TIMEOUT) {
- sz->idle = 1;
- stop_timer(sz);
- if (sz->timeout_enabled) {
- int timeout =
- LIRC_TIMEOUT
- (STREAMZAP_TIMEOUT *
- STREAMZAP_RESOLUTION);
- push(sz, (char *)&timeout);
- }
- flush_delay_buffer(sz);
- } else
- push_full_space(sz, sz->buf_in[i]);
- sz->decoder_state = PulseSpace;
- break;
- case IgnorePulse:
- if ((sz->buf_in[i]&STREAMZAP_SPACE_MASK) ==
- STREAMZAP_SPACE_MASK) {
- sz->decoder_state = FullSpace;
- continue;
- }
- push_half_space(sz, sz->buf_in[i]);
- sz->decoder_state = PulseSpace;
- break;
- }
- }
- }
-
- usb_submit_urb(urb, GFP_ATOMIC);
-
- return;
-}
-
-static const struct file_operations streamzap_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = streamzap_ioctl,
- .read = lirc_dev_fop_read,
- .write = lirc_dev_fop_write,
- .poll = lirc_dev_fop_poll,
- .open = lirc_dev_fop_open,
- .release = lirc_dev_fop_close,
-};
-
-
-/**
- * streamzap_probe
- *
- * Called by usb-core to associated with a candidate device
- * On any failure the return value is the ERROR
- * On success return 0
- */
-static int streamzap_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct usb_device *udev = interface_to_usbdev(interface);
- struct usb_host_interface *iface_host;
- struct usb_streamzap *sz;
- struct lirc_driver *driver;
- struct lirc_buffer *lirc_buf;
- struct lirc_buffer *delay_buf;
- char buf[63], name[128] = "";
- int retval = -ENOMEM;
- int minor = 0;
-
- /* Allocate space for device driver specific data */
- sz = kzalloc(sizeof(struct usb_streamzap), GFP_KERNEL);
- if (sz == NULL)
- return -ENOMEM;
-
- sz->udev = udev;
- sz->interface = interface;
-
- /* Check to ensure endpoint information matches requirements */
- iface_host = interface->cur_altsetting;
-
- if (iface_host->desc.bNumEndpoints != 1) {
- err("%s: Unexpected desc.bNumEndpoints (%d)", __func__,
- iface_host->desc.bNumEndpoints);
- retval = -ENODEV;
- goto free_sz;
- }
-
- sz->endpoint = &(iface_host->endpoint[0].desc);
- if ((sz->endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
- != USB_DIR_IN) {
- err("%s: endpoint doesn't match input device 02%02x",
- __func__, sz->endpoint->bEndpointAddress);
- retval = -ENODEV;
- goto free_sz;
- }
-
- if ((sz->endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- != USB_ENDPOINT_XFER_INT) {
- err("%s: endpoint attributes don't match xfer 02%02x",
- __func__, sz->endpoint->bmAttributes);
- retval = -ENODEV;
- goto free_sz;
- }
-
- if (sz->endpoint->wMaxPacketSize == 0) {
- err("%s: endpoint message size==0? ", __func__);
- retval = -ENODEV;
- goto free_sz;
- }
-
- /* Allocate the USB buffer and IRQ URB */
-
- sz->buf_in_len = sz->endpoint->wMaxPacketSize;
- sz->buf_in = usb_alloc_coherent(sz->udev, sz->buf_in_len,
- GFP_ATOMIC, &sz->dma_in);
- if (sz->buf_in == NULL)
- goto free_sz;
-
- sz->urb_in = usb_alloc_urb(0, GFP_KERNEL);
- if (sz->urb_in == NULL)
- goto free_sz;
-
- /* Connect this device to the LIRC sub-system */
- driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
- if (!driver)
- goto free_sz;
-
- lirc_buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
- if (!lirc_buf)
- goto free_driver;
- if (lirc_buffer_init(lirc_buf, sizeof(int), STREAMZAP_BUF_LEN))
- goto kfree_lirc_buf;
-
- delay_buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
- if (!delay_buf)
- goto free_lirc_buf;
- if (lirc_buffer_init(delay_buf, sizeof(int), STREAMZAP_BUF_LEN))
- goto kfree_delay_buf;
-
- sz->driver = driver;
- strcpy(sz->driver->name, DRIVER_NAME);
- sz->driver->minor = -1;
- sz->driver->sample_rate = 0;
- sz->driver->code_length = sizeof(int) * 8;
- sz->driver->features = LIRC_CAN_REC_MODE2 |
- LIRC_CAN_GET_REC_RESOLUTION |
- LIRC_CAN_SET_REC_TIMEOUT;
- sz->driver->data = sz;
- sz->driver->min_timeout = STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION;
- sz->driver->max_timeout = STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION;
- sz->driver->rbuf = lirc_buf;
- sz->delay_buf = delay_buf;
- sz->driver->set_use_inc = &streamzap_use_inc;
- sz->driver->set_use_dec = &streamzap_use_dec;
- sz->driver->fops = &streamzap_fops;
- sz->driver->dev = &interface->dev;
- sz->driver->owner = THIS_MODULE;
-
- sz->idle = 1;
- sz->decoder_state = PulseSpace;
- init_timer(&sz->delay_timer);
- sz->delay_timer.function = delay_timeout;
- sz->delay_timer.data = (unsigned long) sz;
- sz->timer_running = 0;
- spin_lock_init(&sz->timer_lock);
-
- init_timer(&sz->flush_timer);
- sz->flush_timer.function = flush_timeout;
- sz->flush_timer.data = (unsigned long) sz;
- /* Complete final initialisations */
-
- usb_fill_int_urb(sz->urb_in, udev,
- usb_rcvintpipe(udev, sz->endpoint->bEndpointAddress),
- sz->buf_in, sz->buf_in_len, usb_streamzap_irq, sz,
- sz->endpoint->bInterval);
- sz->urb_in->transfer_dma = sz->dma_in;
- sz->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
- if (udev->descriptor.iManufacturer
- && usb_string(udev, udev->descriptor.iManufacturer,
- buf, sizeof(buf)) > 0)
- strlcpy(name, buf, sizeof(name));
-
- if (udev->descriptor.iProduct
- && usb_string(udev, udev->descriptor.iProduct,
- buf, sizeof(buf)) > 0)
- snprintf(name + strlen(name), sizeof(name) - strlen(name),
- " %s", buf);
-
- minor = lirc_register_driver(driver);
-
- if (minor < 0)
- goto free_delay_buf;
-
- sz->driver->minor = minor;
-
- usb_set_intfdata(interface, sz);
-
- printk(KERN_INFO DRIVER_NAME "[%d]: %s on usb%d:%d attached\n",
- sz->driver->minor, name,
- udev->bus->busnum, sz->udev->devnum);
-
- return 0;
-
-free_delay_buf:
- lirc_buffer_free(sz->delay_buf);
-kfree_delay_buf:
- kfree(delay_buf);
-free_lirc_buf:
- lirc_buffer_free(sz->driver->rbuf);
-kfree_lirc_buf:
- kfree(lirc_buf);
-free_driver:
- kfree(driver);
-free_sz:
- if (retval == -ENOMEM)
- err("Out of memory");
-
- if (sz) {
- usb_free_urb(sz->urb_in);
- usb_free_coherent(udev, sz->buf_in_len, sz->buf_in, sz->dma_in);
- kfree(sz);
- }
-
- return retval;
-}
-
-static int streamzap_use_inc(void *data)
-{
- struct usb_streamzap *sz = data;
-
- if (!sz) {
- dprintk("%s called with no context", -1, __func__);
- return -EINVAL;
- }
- dprintk("set use inc", sz->driver->minor);
-
- lirc_buffer_clear(sz->driver->rbuf);
- lirc_buffer_clear(sz->delay_buf);
-
- sz->flush_timer.expires = jiffies + HZ;
- sz->flush = 1;
- add_timer(&sz->flush_timer);
-
- sz->urb_in->dev = sz->udev;
- if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) {
- dprintk("open result = -EIO error submitting urb",
- sz->driver->minor);
- return -EIO;
- }
- sz->in_use++;
-
- return 0;
-}
-
-static void streamzap_use_dec(void *data)
-{
- struct usb_streamzap *sz = data;
-
- if (!sz) {
- dprintk("%s called with no context", -1, __func__);
- return;
- }
- dprintk("set use dec", sz->driver->minor);
-
- if (sz->flush) {
- sz->flush = 0;
- del_timer_sync(&sz->flush_timer);
- }
-
- usb_kill_urb(sz->urb_in);
-
- stop_timer(sz);
-
- sz->in_use--;
-}
-
-static long streamzap_ioctl(struct file *filep, unsigned int cmd,
- unsigned long arg)
-{
- int result = 0;
- int val;
- struct usb_streamzap *sz = lirc_get_pdata(filep);
-
- switch (cmd) {
- case LIRC_GET_REC_RESOLUTION:
- result = put_user(STREAMZAP_RESOLUTION, (unsigned int *) arg);
- break;
- case LIRC_SET_REC_TIMEOUT:
- result = get_user(val, (int *)arg);
- if (result == 0) {
- if (val == STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION)
- sz->timeout_enabled = 1;
- else if (val == 0)
- sz->timeout_enabled = 0;
- else
- result = -EINVAL;
- }
- break;
- default:
- return lirc_dev_fop_ioctl(filep, cmd, arg);
- }
- return result;
-}
-
-/**
- * streamzap_disconnect
- *
- * Called by the usb core when the device is removed from the system.
- *
- * This routine guarantees that the driver will not submit any more urbs
- * by clearing dev->udev. It is also supposed to terminate any currently
- * active urbs. Unfortunately, usb_bulk_msg(), used in streamzap_read(),
- * does not provide any way to do this.
- */
-static void streamzap_disconnect(struct usb_interface *interface)
-{
- struct usb_streamzap *sz;
- int errnum;
- int minor;
-
- sz = usb_get_intfdata(interface);
-
- /* unregister from the LIRC sub-system */
-
- errnum = lirc_unregister_driver(sz->driver->minor);
- if (errnum != 0)
- dprintk("error in lirc_unregister: (returned %d)",
- sz->driver->minor, errnum);
-
- lirc_buffer_free(sz->delay_buf);
- lirc_buffer_free(sz->driver->rbuf);
-
- /* unregister from the USB sub-system */
-
- usb_free_urb(sz->urb_in);
-
- usb_free_coherent(sz->udev, sz->buf_in_len, sz->buf_in, sz->dma_in);
-
- minor = sz->driver->minor;
- kfree(sz->driver->rbuf);
- kfree(sz->driver);
- kfree(sz->delay_buf);
- kfree(sz);
-
- printk(KERN_INFO DRIVER_NAME "[%d]: disconnected\n", minor);
-}
-
-static int streamzap_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct usb_streamzap *sz = usb_get_intfdata(intf);
-
- printk(KERN_INFO DRIVER_NAME "[%d]: suspend\n", sz->driver->minor);
- if (sz->in_use) {
- if (sz->flush) {
- sz->flush = 0;
- del_timer_sync(&sz->flush_timer);
- }
-
- stop_timer(sz);
-
- usb_kill_urb(sz->urb_in);
- }
- return 0;
-}
-
-static int streamzap_resume(struct usb_interface *intf)
-{
- struct usb_streamzap *sz = usb_get_intfdata(intf);
-
- lirc_buffer_clear(sz->driver->rbuf);
- lirc_buffer_clear(sz->delay_buf);
-
- if (sz->in_use) {
- sz->flush_timer.expires = jiffies + HZ;
- sz->flush = 1;
- add_timer(&sz->flush_timer);
-
- sz->urb_in->dev = sz->udev;
- if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) {
- dprintk("open result = -EIO error submitting urb",
- sz->driver->minor);
- return -EIO;
- }
- }
- return 0;
-}
-
-/**
- * usb_streamzap_init
- */
-static int __init usb_streamzap_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&streamzap_driver);
-
- if (result) {
- err("usb_register failed. Error number %d",
- result);
- return result;
- }
-
- printk(KERN_INFO DRIVER_NAME " " DRIVER_VERSION " registered\n");
- return 0;
-}
-
-/**
- * usb_streamzap_exit
- */
-static void __exit usb_streamzap_exit(void)
-{
- usb_deregister(&streamzap_driver);
-}
-
-
-module_init(usb_streamzap_init);
-module_exit(usb_streamzap_exit);
-
-MODULE_AUTHOR("Christoph Bartelmus, Greg Wickham, Adrian Dewhurst");
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Enable debugging messages");
diff --git a/drivers/staging/memrar/TODO b/drivers/staging/memrar/TODO
index 0087447d503..435e09ba44c 100644
--- a/drivers/staging/memrar/TODO
+++ b/drivers/staging/memrar/TODO
@@ -1,7 +1,7 @@
RAR Handler (memrar) Driver TODO Items
======================================
-Maintainer: Ossama Othman <ossama.othman@intel.com>
+Maintainer: Eugene Epshteyn <eugene.epshteyn@intel.com>
memrar.h
--------
diff --git a/drivers/staging/memrar/memrar-abi b/drivers/staging/memrar/memrar-abi
index 98a6bb158ba..c23fc996a43 100644
--- a/drivers/staging/memrar/memrar-abi
+++ b/drivers/staging/memrar/memrar-abi
@@ -1,7 +1,7 @@
What: /dev/memrar
Date: March 2010
-KernelVersion: Kernel version this feature first showed up in.
-Contact: Ossama Othman <ossama.othman@intel.com>
+KernelVersion: 2.6.34
+Contact: Eugene Epshteyn <eugene.epshteyn@intel.com>
Description: The Intel Moorestown Restricted Access Region (RAR)
Handler driver exposes an ioctl() based interface that
allows a user to reserve and release blocks of RAR
diff --git a/drivers/staging/memrar/memrar_handler.c b/drivers/staging/memrar/memrar_handler.c
index 41876f2b0e5..a98b3f1f11e 100644
--- a/drivers/staging/memrar/memrar_handler.c
+++ b/drivers/staging/memrar/memrar_handler.c
@@ -278,19 +278,10 @@ static int memrar_init_rar_resources(int rarnum, char const *devname)
BUG_ON(!memrar_is_valid_rar_type(rarnum));
BUG_ON(rar->allocated);
- mutex_init(&rar->lock);
-
- /*
- * Initialize the process table before we reach any
- * code that exit on failure since the finalization
- * code requires an initialized list.
- */
- INIT_LIST_HEAD(&rar->buffers.list);
-
if (rar_get_address(rarnum, &low, &high) != 0)
/* No RAR is available. */
return -ENODEV;
-
+
if (low == 0 || high == 0) {
rar->base = 0;
rar->length = 0;
@@ -310,7 +301,8 @@ static int memrar_init_rar_resources(int rarnum, char const *devname)
/* Claim RAR memory as our own. */
if (request_mem_region(low, rar->length, devname) == NULL) {
rar->length = 0;
- pr_err("%s: Unable to claim RAR[%d] memory.\n", devname, rarnum);
+ pr_err("%s: Unable to claim RAR[%d] memory.\n",
+ devname, rarnum);
pr_err("%s: RAR[%d] disabled.\n", devname, rarnum);
return -EBUSY;
}
@@ -346,7 +338,7 @@ static int memrar_init_rar_resources(int rarnum, char const *devname)
}
pr_info("%s: BRAR[%d] bus address range = [0x%lx, 0x%lx]\n",
- devname, rarnum, (unsigned long) low, (unsigned long) high);
+ devname, rarnum, (unsigned long) low, (unsigned long) high);
pr_info("%s: BRAR[%d] size = %zu KiB\n",
devname, rarnum, rar->allocator->capacity / 1024);
@@ -530,7 +522,7 @@ static long memrar_get_stat(struct RAR_stat *r)
{
struct memrar_allocator *allocator;
- if (!memrar_is_valid_rar_type(r->type))
+ if (!memrar_is_valid_rar_type(r->type))
return -EINVAL;
if (!memrars[r->type].allocated)
@@ -939,9 +931,28 @@ static int memrar_registration_callback(unsigned long rar)
static int __init memrar_init(void)
{
int err;
+ int i;
printk(banner);
+ /*
+ * Some delayed initialization is performed in this driver.
+ * Make sure resources that are used during driver clean-up
+ * (e.g. during driver's release() function) are fully
+ * initialized before first use. This is particularly
+ * important for the case when the delayed initialization
+ * isn't completed, leaving behind a partially initialized
+ * driver.
+ *
+ * Such a scenario can occur when RAR is not available on the
+ * platform, and the driver is release()d.
+ */
+ for (i = 0; i != ARRAY_SIZE(memrars); ++i) {
+ struct memrar_rar_info * const rar = &memrars[i];
+ mutex_init(&rar->lock);
+ INIT_LIST_HEAD(&rar->buffers.list);
+ }
+
err = misc_register(&memrar_miscdev);
if (err)
return err;
diff --git a/drivers/staging/msm/Kconfig b/drivers/staging/msm/Kconfig
index c57039f2060..c5309eec58f 100644
--- a/drivers/staging/msm/Kconfig
+++ b/drivers/staging/msm/Kconfig
@@ -46,21 +46,11 @@ config FB_MSM_LCDC_PRISM_WVGA
select FB_MSM_LCDC_PANEL
default n
-config FB_MSM_LCDC_ST1_WXGA
- bool
- select FB_MSM_LCDC_PANEL
- default n
-
config FB_MSM_LCDC_ST15_WXGA
bool
select FB_MSM_LCDC_PANEL
default n
-config FB_MSM_LCDC_WXGA
- bool
- select FB_MSM_LCDC_PANEL
- default n
-
choice
prompt "LCD Panel"
default FB_MSM_LCDC_ST15_PANEL
diff --git a/drivers/staging/msm/Makefile b/drivers/staging/msm/Makefile
index 98a0ce177cb..bb3606faf20 100644
--- a/drivers/staging/msm/Makefile
+++ b/drivers/staging/msm/Makefile
@@ -61,14 +61,12 @@ obj-y += mddi_prism.o
obj-y += mddi_toshiba.o
obj-y += mddi_toshiba_vga.o
obj-y += mddi_toshiba_wvga_pt.o
-obj-y += mddi_toshiba_wvga.o
obj-y += mddi_sharp.o
else
obj-$(CONFIG_FB_MSM_MDDI_PRISM_WVGA) += mddi_prism.o
obj-$(CONFIG_FB_MSM_MDDI_TOSHIBA_COMMON) += mddi_toshiba.o
obj-$(CONFIG_FB_MSM_MDDI_TOSHIBA_COMMON_VGA) += mddi_toshiba_vga.o
obj-$(CONFIG_FB_MSM_MDDI_TOSHIBA_WVGA_PORTRAIT) += mddi_toshiba_wvga_pt.o
-obj-$(CONFIG_FB_MSM_MDDI_TOSHIBA_WVGA) += mddi_toshiba_wvga.o
obj-$(CONFIG_FB_MSM_MDDI_SHARP_QVGA_128x128) += mddi_sharp.o
endif
@@ -76,11 +74,8 @@ obj-$(CONFIG_FB_MSM_LCDC_PANEL) += lcdc_panel.o
obj-$(CONFIG_FB_MSM_LCDC_PRISM_WVGA) += lcdc_prism.o
obj-$(CONFIG_FB_MSM_LCDC_EXTERNAL_WXGA) += lcdc_external.o
obj-$(CONFIG_FB_MSM_LCDC_GORDON_VGA) += lcdc_gordon.o
-obj-$(CONFIG_FB_MSM_LCDC_WXGA) += lcdc_wxga.o
obj-$(CONFIG_FB_MSM_LCDC_TOSHIBA_WVGA_PT) += lcdc_toshiba_wvga_pt.o
obj-$(CONFIG_FB_MSM_LCDC_SHARP_WVGA_PT) += lcdc_sharp_wvga_pt.o
-obj-$(CONFIG_FB_MSM_LCDC_GRAPEFRUIT_VGA) += lcdc_grapefruit.o
-obj-$(CONFIG_FB_MSM_LCDC_ST1_WXGA) += lcdc_st1_wxga.o
obj-$(CONFIG_FB_MSM_LCDC_ST15_WXGA) += lcdc_st15.o
obj-$(CONFIG_FB_MSM_HDMI_SII_EXTERNAL_720P) += hdmi_sii9022.o
diff --git a/drivers/staging/msm/lcdc_grapefruit.c b/drivers/staging/msm/lcdc_grapefruit.c
deleted file mode 100644
index 7284649ea0a..00000000000
--- a/drivers/staging/msm/lcdc_grapefruit.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-
-#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
-#include "mddihosti.h"
-#endif
-
-static int __init lcdc_grapefruit_init(void)
-{
- int ret;
- struct msm_panel_info pinfo;
-
-#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
- if (msm_fb_detect_client("lcdc_grapefruit_vga"))
- return 0;
-#endif
-
- pinfo.xres = 1024;
- pinfo.yres = 600;
- pinfo.type = LCDC_PANEL;
- pinfo.pdest = DISPLAY_1;
- pinfo.wait_cycle = 0;
- pinfo.bpp = 18;
- pinfo.fb_num = 2;
- pinfo.clk_rate = 40000000;
-
- pinfo.lcdc.h_back_porch = 88;
- pinfo.lcdc.h_front_porch = 40;
- pinfo.lcdc.h_pulse_width = 128;
- pinfo.lcdc.v_back_porch = 23;
- pinfo.lcdc.v_front_porch = 1;
- pinfo.lcdc.v_pulse_width = 4;
- pinfo.lcdc.border_clr = 0; /* blk */
- pinfo.lcdc.underflow_clr = 0xff; /* blue */
- pinfo.lcdc.hsync_skew = 0;
-
- ret = lcdc_device_register(&pinfo);
- if (ret)
- printk(KERN_ERR "%s: failed to register device!\n", __func__);
-
- return ret;
-}
-
-module_init(lcdc_grapefruit_init);
diff --git a/drivers/staging/msm/lcdc_st1_wxga.c b/drivers/staging/msm/lcdc_st1_wxga.c
deleted file mode 100644
index 73760019cf2..00000000000
--- a/drivers/staging/msm/lcdc_st1_wxga.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-
-static int __init lcdc_st1_wxga_init(void)
-{
- int ret;
- struct msm_panel_info pinfo;
-
- if (msm_fb_detect_client("lcdc_st1_wxga"))
- return 0;
-
- pinfo.xres = 1280;
- pinfo.yres = 720;
- pinfo.type = LCDC_PANEL;
- pinfo.pdest = DISPLAY_1;
- pinfo.wait_cycle = 0;
- pinfo.bpp = 18;
- pinfo.fb_num = 2;
- pinfo.clk_rate = 74250000;
-
- pinfo.lcdc.h_back_porch = 124;
- pinfo.lcdc.h_front_porch = 110;
- pinfo.lcdc.h_pulse_width = 136;
- pinfo.lcdc.v_back_porch = 19;
- pinfo.lcdc.v_front_porch = 5;
- pinfo.lcdc.v_pulse_width = 6;
- pinfo.lcdc.border_clr = 0; /* blk */
- pinfo.lcdc.underflow_clr = 0xff; /* blue */
- pinfo.lcdc.hsync_skew = 0;
-
- ret = lcdc_device_register(&pinfo);
- if (ret)
- printk(KERN_ERR "%s: failed to register device!\n", __func__);
-
- return ret;
-}
-
-module_init(lcdc_st1_wxga_init);
diff --git a/drivers/staging/msm/lcdc_wxga.c b/drivers/staging/msm/lcdc_wxga.c
deleted file mode 100644
index 202c92c0ef5..00000000000
--- a/drivers/staging/msm/lcdc_wxga.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-
-static int __init lcdc_wxga_init(void)
-{
- int ret;
- struct msm_panel_info pinfo;
-
-#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
- if (msm_fb_detect_client("lcdc_wxga"))
- return 0;
-#endif
-
- pinfo.xres = 1280;
- pinfo.yres = 720;
- pinfo.type = LCDC_PANEL;
- pinfo.pdest = DISPLAY_1;
- pinfo.wait_cycle = 0;
- pinfo.bpp = 24;
- pinfo.fb_num = 2;
- pinfo.clk_rate = 74250000;
-
- pinfo.lcdc.h_back_porch = 124;
- pinfo.lcdc.h_front_porch = 110;
- pinfo.lcdc.h_pulse_width = 136;
- pinfo.lcdc.v_back_porch = 19;
- pinfo.lcdc.v_front_porch = 5;
- pinfo.lcdc.v_pulse_width = 6;
- pinfo.lcdc.border_clr = 0; /* blk */
- pinfo.lcdc.underflow_clr = 0xff; /* blue */
- pinfo.lcdc.hsync_skew = 0;
-
- ret = lcdc_device_register(&pinfo);
- if (ret)
- printk(KERN_ERR "%s: failed to register device!\n", __func__);
-
- return ret;
-}
-
-module_init(lcdc_wxga_init);
diff --git a/drivers/staging/msm/mddi_toshiba_wvga.c b/drivers/staging/msm/mddi_toshiba_wvga.c
deleted file mode 100644
index 557b0f08faf..00000000000
--- a/drivers/staging/msm/mddi_toshiba_wvga.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-#include "mddihost.h"
-#include "mddi_toshiba.h"
-
-static int __init mddi_toshiba_wvga_init(void)
-{
- int ret;
- struct msm_panel_info pinfo;
-
-#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
- if (msm_fb_detect_client("mddi_toshiba_wvga"))
- return 0;
-#endif
-
- pinfo.xres = 800;
- pinfo.yres = 480;
- pinfo.pdest = DISPLAY_2;
- pinfo.type = MDDI_PANEL;
- pinfo.mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
- pinfo.wait_cycle = 0;
- pinfo.bpp = 18;
- pinfo.lcd.vsync_enable = TRUE;
- pinfo.lcd.refx100 = 6118;
- pinfo.lcd.v_back_porch = 6;
- pinfo.lcd.v_front_porch = 0;
- pinfo.lcd.v_pulse_width = 0;
- pinfo.lcd.hw_vsync_mode = FALSE;
- pinfo.lcd.vsync_notifier_period = (1 * HZ);
- pinfo.bl_max = 4;
- pinfo.bl_min = 1;
- pinfo.clk_rate = 192000000;
- pinfo.clk_min = 190000000;
- pinfo.clk_max = 200000000;
- pinfo.fb_num = 2;
-
- ret = mddi_toshiba_device_register(&pinfo, TOSHIBA_VGA_PRIM,
- LCD_TOSHIBA_2P4_WVGA);
- if (ret) {
- printk(KERN_ERR "%s: failed to register device!\n", __func__);
- return ret;
- }
-
- return ret;
-}
-
-module_init(mddi_toshiba_wvga_init);
diff --git a/drivers/staging/msm/mddihost.h b/drivers/staging/msm/mddihost.h
index 20b817841c4..c46f24aea25 100644
--- a/drivers/staging/msm/mddihost.h
+++ b/drivers/staging/msm/mddihost.h
@@ -44,8 +44,6 @@
#include <asm/system.h>
#include <asm/mach-types.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
#include "msm_fb_panel.h"
diff --git a/drivers/staging/msm/mdp4_debugfs.c b/drivers/staging/msm/mdp4_debugfs.c
index 844d46775ec..36954e89478 100644
--- a/drivers/staging/msm/mdp4_debugfs.c
+++ b/drivers/staging/msm/mdp4_debugfs.c
@@ -63,13 +63,6 @@ DEFINE_SIMPLE_ATTRIBUTE(
"%llx\n");
-static int mdp4_debugfs_open(struct inode *inode, struct file *file)
-{
- /* non-seekable */
- file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
- return 0;
-}
-
static int mdp4_debugfs_release(struct inode *inode, struct file *file)
{
return 0;
@@ -144,10 +137,11 @@ static ssize_t mdp4_debugfs_read(
}
static const struct file_operations mdp4_debugfs_fops = {
- .open = mdp4_debugfs_open,
+ .open = nonseekable_open,
.release = mdp4_debugfs_release,
.read = mdp4_debugfs_read,
.write = mdp4_debugfs_write,
+ .llseek = no_llseek,
};
int mdp4_debugfs_init(void)
diff --git a/drivers/staging/msm/mdp4_overlay.c b/drivers/staging/msm/mdp4_overlay.c
index 304bb829763..de284c28faa 100644
--- a/drivers/staging/msm/mdp4_overlay.c
+++ b/drivers/staging/msm/mdp4_overlay.c
@@ -874,8 +874,8 @@ struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(void)
if (pipe->pipe_ndx == 0) {
pipe->pipe_ndx = i + 1; /* start from 1 */
init_completion(&pipe->comp);
- printk(KERN_INFO "mdp4_overlay_pipe_alloc: pipe=%x ndx=%d\n",
- (int)pipe, pipe->pipe_ndx);
+ printk(KERN_INFO "mdp4_overlay_pipe_alloc: pipe=%p ndx=%d\n",
+ pipe, pipe->pipe_ndx);
return pipe;
}
pipe++;
@@ -887,8 +887,8 @@ struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(void)
void mdp4_overlay_pipe_free(struct mdp4_overlay_pipe *pipe)
{
- printk(KERN_INFO "mdp4_overlay_pipe_free: pipe=%x ndx=%d\n",
- (int)pipe, pipe->pipe_ndx);
+ printk(KERN_INFO "mdp4_overlay_pipe_free: pipe=%p ndx=%d\n",
+ pipe, pipe->pipe_ndx);
memset(pipe, 0, sizeof(*pipe));
}
diff --git a/drivers/staging/msm/msm_fb_def.h b/drivers/staging/msm/msm_fb_def.h
index 6de44093742..c5f9e9e670f 100644
--- a/drivers/staging/msm/msm_fb_def.h
+++ b/drivers/staging/msm/msm_fb_def.h
@@ -50,15 +50,11 @@
#include <linux/debugfs.h>
#include <linux/console.h>
-#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/time.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
-#include "linux/proc_fs.h"
#include <mach/hardware.h>
#include <linux/io.h>
-#include <linux/fb.h>
#include <asm/system.h>
#include <asm/mach-types.h>
#include <linux/platform_device.h>
diff --git a/drivers/staging/msm/staging-devices.c b/drivers/staging/msm/staging-devices.c
index 0f8ec3e2601..861f3307231 100644
--- a/drivers/staging/msm/staging-devices.c
+++ b/drivers/staging/msm/staging-devices.c
@@ -18,7 +18,6 @@
#include "msm_mdp.h"
#include "memory_ll.h"
//#include "android_pmem.h"
-#include <mach/board.h>
#ifdef CONFIG_MSM_SOC_REV_A
#define MSM_SMI_BASE 0xE0000000
@@ -115,17 +114,7 @@ static int msm_fb_detect_panel(const char *name)
} else if ((machine_is_qsd8x50_surf() || machine_is_qsd8x50a_surf())
&& !strcmp(name, "lcdc_external"))
ret = 0;
- else if (0 /*machine_is_qsd8x50_grapefruit() */) {
- if (!strcmp(name, "lcdc_grapefruit_vga"))
- ret = 0;
- else
- ret = -ENODEV;
- } else if (machine_is_qsd8x50_st1()) {
- if (!strcmp(name, "lcdc_st1_wxga"))
- ret = 0;
- else
- ret = -ENODEV;
- } else if (machine_is_qsd8x50a_st1_5()) {
+ else if (machine_is_qsd8x50a_st1_5()) {
if (!strcmp(name, "lcdc_st15") ||
!strcmp(name, "hdmi_sii9022"))
ret = 0;
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 638ad6b3589..9493128e5fd 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,6 +1,6 @@
config OCTEON_ETHERNET
tristate "Cavium Networks Octeon Ethernet support"
- depends on CPU_CAVIUM_OCTEON
+ depends on CPU_CAVIUM_OCTEON && NETDEVICES
select PHYLIB
select MDIO_OCTEON
help
diff --git a/drivers/staging/octeon/cvmx-cmd-queue.c b/drivers/staging/octeon/cvmx-cmd-queue.c
index 976227b0127..e9809d37516 100644
--- a/drivers/staging/octeon/cvmx-cmd-queue.c
+++ b/drivers/staging/octeon/cvmx-cmd-queue.c
@@ -140,21 +140,21 @@ cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
if (qstate->base_ptr_div128) {
if (max_depth != (int)qstate->max_depth) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
- "Queue already initalized with different "
+ "Queue already initialized with different "
"max_depth (%d).\n",
(int)qstate->max_depth);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
if (fpa_pool != qstate->fpa_pool) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
- "Queue already initalized with different "
+ "Queue already initialized with different "
"FPA pool (%u).\n",
qstate->fpa_pool);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
- "Queue already initalized with different "
+ "Queue already initialized with different "
"FPA pool size (%u).\n",
(qstate->pool_size_m1 + 1) << 3);
return CVMX_CMD_QUEUE_INVALID_PARAM;
diff --git a/drivers/staging/octeon/cvmx-fau.h b/drivers/staging/octeon/cvmx-fau.h
index 29bdce66cdf..a6939fc8ba1 100644
--- a/drivers/staging/octeon/cvmx-fau.h
+++ b/drivers/staging/octeon/cvmx-fau.h
@@ -299,7 +299,7 @@ cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
/**
* Builds I/O data for async operations
*
- * @scraddr: Scratch pad byte addres to write to. Must be 8 byte aligned
+ * @scraddr: Scratch pad byte address to write to. Must be 8 byte aligned
* @value: Signed value to add.
* Note: When performing 32 and 64 bit access, only the low
* 22 bits are available.
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index b58b8971f93..97082542188 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -294,6 +294,8 @@ int cvm_oct_spi_init(struct net_device *dev)
if (number_spi_ports == 0) {
r = request_irq(OCTEON_IRQ_RML, cvm_oct_spi_rml_interrupt,
IRQF_SHARED, "SPI", &number_spi_ports);
+ if (r)
+ return r;
}
number_spi_ports++;
diff --git a/drivers/staging/otus/80211core/ctxrx.c b/drivers/staging/otus/80211core/ctxrx.c
index a127196260e..135167d23d0 100644
--- a/drivers/staging/otus/80211core/ctxrx.c
+++ b/drivers/staging/otus/80211core/ctxrx.c
@@ -3117,7 +3117,7 @@ u16_t zfWlanRxFilter(zdev_t* dev, zbuf_t* buf)
index = (src[2]+up) & (ZM_FILTER_TABLE_ROW-1);
- /* TBD : filter frame with source address == own MAC adress */
+ /* TBD : filter frame with source address == own MAC address */
if ((wd->macAddr[0] == src[0]) && (wd->macAddr[1] == src[1])
&& (wd->macAddr[2] == src[2]))
{
diff --git a/drivers/staging/otus/TODO b/drivers/staging/otus/TODO
index 4caf026a491..6fea974fcc9 100644
--- a/drivers/staging/otus/TODO
+++ b/drivers/staging/otus/TODO
@@ -2,15 +2,7 @@ I'm hesitant to add a TODO file here, as the wireless developers would
really have people help them out on the "clean" ar9170 driver that can
be found at the linux-wireless developer site.
-But, if you wish to clean up this driver instead, here's a short list of
-things that need to be done to get it into a more mergable shape:
-
-TODO:
- - checkpatch.pl cleanups
- - sparse cleanups
- - port to in-kernel 80211 stack
- - review by the wireless developer community
-
-Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and
-Luis Rodriguez <Luis.Rodriguez@Atheros.com> and the
-otus-devel@lists.madwifi-project.org mailing list.
+This driver is unmaintained and its only purpose is as a
+source of documentation for developers working on ar9170 and carl9170.
+Once carl9170 gets 11n support and merged upstream then this driver
+can be removed.
diff --git a/drivers/staging/otus/apdbg.c b/drivers/staging/otus/apdbg.c
index b59028e7e33..09415a6b93c 100644
--- a/drivers/staging/otus/apdbg.c
+++ b/drivers/staging/otus/apdbg.c
@@ -90,28 +90,6 @@ struct zdap_ioctl {
#endif
-static char hex(char v)
-{
- if (isdigit(v))
- return v - '0';
- else if (isxdigit(v))
- return tolower(v) - 'a' + 10;
- else
- return 0;
-}
-
-static unsigned char asctohex(char *str)
-{
- unsigned char value;
-
- value = hex(*str) & 0x0f;
- value = value << 4;
- str++;
- value |= hex(*str) & 0x0f;
-
- return value;
-}
-
char *prgname;
int set_ioctl(int sock, struct ifreq *req)
@@ -180,7 +158,7 @@ int main(int argc, char **argv)
if (argc < 3) {
fprintf(stderr, "%s: usage is \"%s <ifname> <operation>"
"[<address>] [<value>]\"\n", prgname, prgname);
- fprintf(stderr, "valid operation : read, write, mem, reg, \n");
+ fprintf(stderr, "valid operation : read, write, mem, reg,\n");
fprintf(stderr, " : txd, rxd, rmem, wmem\n");
fprintf(stderr, " : dmat, regt, test\n");
diff --git a/drivers/staging/otus/hal/hpani.c b/drivers/staging/otus/hal/hpani.c
index f53e483b394..9b9420c75d4 100644
--- a/drivers/staging/otus/hal/hpani.c
+++ b/drivers/staging/otus/hal/hpani.c
@@ -72,7 +72,6 @@ s32_t BEACON_RSSI(zdev_t *dev)
void zfHpAniAttach(zdev_t *dev)
{
-#define N(a) (sizeof(a) / sizeof(a[0]))
u32_t i;
struct zsHpPriv *HpPriv;
@@ -125,7 +124,6 @@ void zfHpAniAttach(zdev_t *dev)
HpPriv->stats.ast_nodestats.ns_avgbrssi = ZM_RSSI_DUMMY_MARKER;
HpPriv->stats.ast_nodestats.ns_avgrssi = ZM_RSSI_DUMMY_MARKER;
HpPriv->stats.ast_nodestats.ns_avgtxrssi = ZM_RSSI_DUMMY_MARKER;
-#undef N
}
/*
@@ -133,7 +131,6 @@ void zfHpAniAttach(zdev_t *dev)
*/
u8_t zfHpAniControl(zdev_t *dev, ZM_HAL_ANI_CMD cmd, int param)
{
-#define N(a) (sizeof(a)/sizeof(a[0]))
typedef s32_t TABLE[];
struct zsHpPriv *HpPriv;
struct zsAniState *aniState;
@@ -148,9 +145,9 @@ u8_t zfHpAniControl(zdev_t *dev, ZM_HAL_ANI_CMD cmd, int param)
{
u32_t level = param;
- if (level >= N(HpPriv->totalSizeDesired)) {
+ if (level >= ARRAY_SIZE(HpPriv->totalSizeDesired)) {
zm_debug_msg1("level out of range, desired level : ", level);
- zm_debug_msg1("max level : ", N(HpPriv->totalSizeDesired));
+ zm_debug_msg1("max level : ", ARRAY_SIZE(HpPriv->totalSizeDesired));
return FALSE;
}
@@ -260,10 +257,10 @@ u8_t zfHpAniControl(zdev_t *dev, ZM_HAL_ANI_CMD cmd, int param)
const TABLE firstep = { 0, 4, 8 };
u32_t level = param;
- if (level >= N(firstep))
+ if (level >= ARRAY_SIZE(firstep))
{
zm_debug_msg1("level out of range, desired level : ", level);
- zm_debug_msg1("max level : ", N(firstep));
+ zm_debug_msg1("max level : ", ARRAY_SIZE(firstep));
return FALSE;
}
zfDelayWriteInternalReg(dev, AR_PHY_FIND_SIG,
@@ -283,10 +280,10 @@ u8_t zfHpAniControl(zdev_t *dev, ZM_HAL_ANI_CMD cmd, int param)
const TABLE cycpwrThr1 = { 2, 4, 6, 8, 10, 12, 14, 16 };
u32_t level = param;
- if (level >= N(cycpwrThr1))
+ if (level >= ARRAY_SIZE(cycpwrThr1))
{
zm_debug_msg1("level out of range, desired level : ", level);
- zm_debug_msg1("max level : ", N(cycpwrThr1));
+ zm_debug_msg1("max level : ", ARRAY_SIZE(cycpwrThr1));
return FALSE;
}
zfDelayWriteInternalReg(dev, AR_PHY_TIMING5,
@@ -335,7 +332,6 @@ u8_t zfHpAniControl(zdev_t *dev, ZM_HAL_ANI_CMD cmd, int param)
return FALSE;
}
return TRUE;
-#undef N
}
void zfHpAniRestart(zdev_t* dev)
diff --git a/drivers/staging/otus/hal/hpmain.c b/drivers/staging/otus/hal/hpmain.c
index 5f412e02045..6d2d358d5ca 100644
--- a/drivers/staging/otus/hal/hpmain.c
+++ b/drivers/staging/otus/hal/hpmain.c
@@ -430,7 +430,7 @@ void zfInitPhy(zdev_t* dev, u32_t frequency, u8_t bw40)
* Register setting by mode
*/
- entries = sizeof(ar5416Modes) / sizeof(*ar5416Modes);
+ entries = ARRAY_SIZE(ar5416Modes);
zm_msg1_scan(ZM_LV_2, "Modes register setting entries=", entries);
for (i=0; i<entries; i++)
{
@@ -496,7 +496,7 @@ void zfInitPhy(zdev_t* dev, u32_t frequency, u8_t bw40)
/*
* Common Register setting
*/
- entries = sizeof(ar5416Common) / sizeof(*ar5416Common);
+ entries = ARRAY_SIZE(ar5416Common);
for (i=0; i<entries; i++)
{
reg_write(ar5416Common[i][0], ar5416Common[i][1]);
@@ -506,7 +506,7 @@ void zfInitPhy(zdev_t* dev, u32_t frequency, u8_t bw40)
/*
* RF Gain setting by freqIndex
*/
- entries = sizeof(ar5416BB_RfGain) / sizeof(*ar5416BB_RfGain);
+ entries = ARRAY_SIZE(ar5416BB_RfGain);
for (i=0; i<entries; i++)
{
reg_write(ar5416BB_RfGain[i][0], ar5416BB_RfGain[i][freqIndex]);
@@ -963,7 +963,6 @@ u32_t reverse_bits(u32_t chan_sel)
/* Bank 0 1 2 3 5 6 7 */
void zfSetRfRegs(zdev_t* dev, u32_t frequency)
{
- u16_t entries;
u16_t freqIndex = 0;
u16_t i;
@@ -984,33 +983,28 @@ void zfSetRfRegs(zdev_t* dev, u32_t frequency)
}
#if 1
- entries = sizeof(otusBank) / sizeof(*otusBank);
- for (i=0; i<entries; i++)
+ for (i=0; i<ARRAY_SIZE(otusBank); i++)
{
reg_write(otusBank[i][0], otusBank[i][freqIndex]);
}
#else
/* Bank0 */
- entries = sizeof(ar5416Bank0) / sizeof(*ar5416Bank0);
- for (i=0; i<entries; i++)
+ for (i=0; i<ARRAY_SIZE(ar5416Bank0); i++)
{
reg_write(ar5416Bank0[i][0], ar5416Bank0[i][1]);
}
/* Bank1 */
- entries = sizeof(ar5416Bank1) / sizeof(*ar5416Bank1);
- for (i=0; i<entries; i++)
+ for (i=0; i<ARRAY_SIZE(ar5416Bank1); i++)
{
reg_write(ar5416Bank1[i][0], ar5416Bank1[i][1]);
}
/* Bank2 */
- entries = sizeof(ar5416Bank2) / sizeof(*ar5416Bank2);
- for (i=0; i<entries; i++)
+ for (i=0; i<ARRAY_SIZE(ar5416Bank2); i++)
{
reg_write(ar5416Bank2[i][0], ar5416Bank2[i][1]);
}
/* Bank3 */
- entries = sizeof(ar5416Bank3) / sizeof(*ar5416Bank3);
- for (i=0; i<entries; i++)
+ for (i=0; i<ARRAY_SIZE(ar5416Bank3); i++)
{
reg_write(ar5416Bank3[i][0], ar5416Bank3[i][freqIndex]);
}
@@ -1018,14 +1012,12 @@ void zfSetRfRegs(zdev_t* dev, u32_t frequency)
reg_write (0x98b0, 0x00000013);
reg_write (0x98e4, 0x00000002);
/* Bank6 */
- entries = sizeof(ar5416Bank6) / sizeof(*ar5416Bank6);
- for (i=0; i<entries; i++)
+ for (i=0; i<ARRAY_SIZE(ar5416Bank6); i++)
{
reg_write(ar5416Bank6[i][0], ar5416Bank6[i][freqIndex]);
}
/* Bank7 */
- entries = sizeof(ar5416Bank7) / sizeof(*ar5416Bank7);
- for (i=0; i<entries; i++)
+ for (i=0; i<ARRAY_SIZE(ar5416Bank7); i++)
{
reg_write(ar5416Bank7[i][0], ar5416Bank7[i][1]);
}
diff --git a/drivers/staging/otus/hal/hpreg.c b/drivers/staging/otus/hal/hpreg.c
index da3b7743387..9b04653c1c5 100644
--- a/drivers/staging/otus/hal/hpreg.c
+++ b/drivers/staging/otus/hal/hpreg.c
@@ -29,9 +29,6 @@
#include "hpreg.h"
#include "hpusb.h"
-/* used throughout this file... */
-#define N(a) (sizeof(a) / sizeof(a[0]))
-
#define HAL_MODE_11A_TURBO HAL_MODE_108A
#define HAL_MODE_11G_TURBO HAL_MODE_108G
@@ -1557,7 +1554,7 @@ u8_t GetWmRD(u16_t regionCode, u16_t channelFlag, REG_DOMAIN *rd)
u64_t flags = NO_REQ;
REG_DMN_PAIR_MAPPING *regPair = NULL;
- for (i = 0, found = 0; (i < N(regDomainPairs)) && (!found); i++) {
+ for (i = 0, found = 0; (i < ARRAY_SIZE(regDomainPairs)) && (!found); i++) {
if (regDomainPairs[i].regDmnEnum == regionCode) {
regPair = &regDomainPairs[i];
found = 1;
@@ -1581,7 +1578,7 @@ u8_t GetWmRD(u16_t regionCode, u16_t channelFlag, REG_DOMAIN *rd)
* unitary reg domain of the pair
*/
- for (i = 0 ; i < N(regDomains) ; i++) {
+ for (i = 0 ; i < ARRAY_SIZE(regDomains) ; i++) {
if (regDomains[i].regDmnEnum == regDmn) {
if (rd != NULL) {
zfMemoryCopy((u8_t *)rd, (u8_t *)&regDomains[i],
@@ -1653,7 +1650,7 @@ void zfHpGetRegulationTable(zdev_t *dev, u16_t regionCode, u16_t c_lo, u16_t c_h
zmw_enter_critical_section(dev);
- for (cm = modes; cm < &modes[N(modes)]; cm++) {
+ for (cm = modes; cm < &modes[ARRAY_SIZE(modes)]; cm++) {
u16_t c;
u64_t *channelBM = NULL;
REG_DOMAIN *rd = NULL;
@@ -1846,7 +1843,7 @@ void zfHpGetRegulationTablefromCountry(zdev_t *dev, u16_t CountryCode)
zmw_declare_for_critical_section();
- for (i = 0; i < N(allCountries); i++) {
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
if (CountryCode == allCountries[i].countryCode) {
RegDomain = allCountries[i].regDmnEnum;
@@ -1881,7 +1878,7 @@ u8_t zfHpGetRegulationTablefromISO(zdev_t *dev, u8_t *countryInfo, u8_t length)
strLen = 3; */
}
/* zm_debug_msg_s("Desired iso name = ", isoName); */
- for (i = 0; i < N(allCountries); i++) {
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
/* zm_debug_msg_s("Current iso name = ", allCountries[i].isoName); */
if (zfMemoryIsEqual((u8_t *)allCountries[i].isoName, (u8_t *)&countryInfo[2], length-1)) {
/* DbgPrint("Set current iso name = %s\n", allCountries[i].isoName); */
@@ -1937,7 +1934,7 @@ const char *zfHpGetisoNamefromregionCode(zdev_t *dev, u16_t regionCode)
{
u16_t i;
- for (i = 0; i < N(allCountries); i++) {
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
if (allCountries[i].regDmnEnum == regionCode)
return allCountries[i].isoName;
}
@@ -1953,7 +1950,7 @@ u16_t zfHpGetRegionCodeFromIsoName(zdev_t *dev, u8_t *countryIsoName)
/* if no matching item, return default */
regionCode = DEF_REGDMN;
- for (i = 0; i < N(allCountries); i++) {
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
if (zfMemoryIsEqual((u8_t *)allCountries[i].isoName, countryIsoName, 2)) {
regionCode = allCountries[i].regDmnEnum;
break;
diff --git a/drivers/staging/otus/ioctl.c b/drivers/staging/otus/ioctl.c
index a48c8e4a9ea..dc3066d2884 100644
--- a/drivers/staging/otus/ioctl.c
+++ b/drivers/staging/otus/ioctl.c
@@ -63,8 +63,7 @@
extern u16_t zfLnxGetVapId(zdev_t *dev);
-static const u32_t channel_frequency_11A[] =
-{
+static const u32_t channel_frequency_11A[] = {
/* Even element for Channel Number, Odd for Frequency */
36, 5180,
40, 5200,
@@ -507,7 +506,7 @@ int usbdrvwext_giwname(struct net_device *dev,
{
/* struct usbdrv_private *macp = dev->ml_priv; */
- strcpy(wrq->name, "IEEE 802.11-MIMO");
+ strcpy(wrq->name, "IEEE 802.11abgn");
return 0;
}
@@ -1361,7 +1360,7 @@ int usbdrvwext_giwpower(struct net_device *dev,
}
/*int usbdrvwext_setparam(struct net_device *dev, struct iw_request_info *info,
-* void *w, char *extra)
+* void *w, char *extra)
*{
* struct ieee80211vap *vap = dev->ml_priv;
* struct ieee80211com *ic = vap->iv_ic;
@@ -2261,10 +2260,10 @@ int usbdrv_wpa_ioctl(struct net_device *dev, struct athr_wlan_param *zdparm)
printk(KERN_ERR "wd->ap.wpaLen : % d\n", len);
/* DUMP WPA IE */
- for(ii = 0; ii < len;) {
+ for (ii = 0; ii < len;) {
printk(KERN_ERR "0x%02x ", wpaie[ii]);
- if((++ii % 16) == 0)
+ if ((++ii % 16) == 0)
printk(KERN_ERR "\n");
}
printk(KERN_ERR "\n");
@@ -2309,11 +2308,10 @@ int usbdrv_cenc_ioctl(struct net_device *dev, struct zydas_cenc_param *zdparm)
/* Get the AP Id */
apId = zfLnxGetVapId(dev);
- if (apId == 0xffff) {
+ if (apId == 0xffff)
apId = 0;
- } else {
+ else
apId = apId + 1;
- }
switch (zdparm->cmd) {
case ZM_CMD_CENC_SETCENC:
@@ -2334,15 +2332,15 @@ int usbdrv_cenc_ioctl(struct net_device *dev, struct zydas_cenc_param *zdparm)
printk(KERN_ERR "Key Index : % d\n", zdparm->u.crypt.keyid);
printk(KERN_ERR "Encryption key = ");
- for (ii = 0; ii < 16; ii++) {
+ for (ii = 0; ii < 16; ii++)
printk(KERN_ERR "0x%02x ", zdparm->u.crypt.key[ii]);
- }
+
printk(KERN_ERR "\n");
printk(KERN_ERR "MIC key = ");
- for(ii = 16; ii < ZM_CENC_KEY_SIZE; ii++) {
+ for (ii = 16; ii < ZM_CENC_KEY_SIZE; ii++)
printk(KERN_ERR "0x%02x ", zdparm->u.crypt.key[ii]);
- }
+
printk(KERN_ERR "\n");
/* Set up key information */
@@ -2424,7 +2422,7 @@ int usbdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
case SIOCSIWRTS:
err = usbdrv_ioctl_setrts(dev, &wrq->u.rts);
- if (! err)
+ if (!err)
changed = 1;
break;
/* set_auth */
@@ -2582,8 +2580,7 @@ int usbdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
ZM_AUTH_MODE_WPA);
} else if ((macp->supIe[17] == 0xf) &&
(macp->supIe[18] == 0xac) &&
- (macp->supIe[19] == 0x2))
- {
+ (macp->supIe[19] == 0x2)) {
printk(KERN_ERR
"wd->sta.authMode = ZM_AUTH_MODE_WPA2PSK\n");
/* wd->sta.authMode = ZM_AUTH_MODE_WPA2PSK; */
@@ -2592,8 +2589,7 @@ int usbdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
ZM_AUTH_MODE_WPA2PSK);
} else if ((macp->supIe[17] == 0xf) &&
(macp->supIe[18] == 0xac) &&
- (macp->supIe[19] == 0x1))
- {
+ (macp->supIe[19] == 0x1)) {
printk(KERN_ERR
"wd->sta.authMode = ZM_AUTH_MODE_WPA2\n");
/* wd->sta.authMode = ZM_AUTH_MODE_WPA2; */
@@ -2618,7 +2614,7 @@ int usbdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
zfiWlanSetWepStatus(dev, ZM_ENCRYPTION_AES);
}
}
- //WPA2 or WPA2PSK
+ /*WPA2 or WPA2PSK*/
if ((macp->supIe[17] == 0xf) ||
(macp->supIe[18] == 0xac)) {
if (macp->supIe[13] == 0x2) {
@@ -2656,7 +2652,7 @@ int usbdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
printk(KERN_ERR
"****************ZD_PARAM_COUNTERMEASURES : ");
- if(arg) {
+ if (arg) {
/* mCounterMeasureState=1; */
printk(KERN_ERR "enable\n");
} else {
@@ -2667,20 +2663,18 @@ int usbdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (op == ZD_PARAM_DROPUNENCRYPTED) {
printk(KERN_ERR "ZD_PARAM_DROPUNENCRYPTED : ");
- if(arg) {
+ if (arg)
printk(KERN_ERR "enable\n");
- } else {
+ else
printk(KERN_ERR "disable\n");
- }
}
if (op == ZD_PARAM_AUTH_ALGS) {
printk(KERN_ERR "ZD_PARAM_AUTH_ALGS : ");
- if (arg == 0) {
+ if (arg == 0)
printk(KERN_ERR "OPEN_SYSTEM\n");
- } else {
+ else
printk(KERN_ERR "SHARED_KEY\n");
- }
}
if (op == ZD_PARAM_WPS_FILTER) {
printk(KERN_ERR "ZD_PARAM_WPS_FILTER : ");
@@ -2705,11 +2699,10 @@ int usbdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Get the AP Id */
apId = zfLnxGetVapId(dev);
- if (apId == 0xffff) {
+ if (apId == 0xffff)
apId = 0;
- } else {
+ else
apId = apId + 1;
- }
if (copy_from_user(&req_wpaie, ifr->ifr_data,
sizeof(struct ieee80211req_wpaie))) {
@@ -2721,10 +2714,10 @@ int usbdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
for (j = 0; j < IEEE80211_ADDR_LEN; j++) {
if (macp->stawpaie[i].wpa_macaddr[j] !=
req_wpaie.wpa_macaddr[j])
- break;
+ break;
}
if (j == 6)
- break;
+ break;
}
if (i < ZM_OAL_MAX_STA_SUPPORT) {
diff --git a/drivers/staging/otus/wrap_sec.c b/drivers/staging/otus/wrap_sec.c
index 0b238e9999b..1fba7a98d52 100644
--- a/drivers/staging/otus/wrap_sec.c
+++ b/drivers/staging/otus/wrap_sec.c
@@ -36,7 +36,7 @@ extern int zfLnxCencSendMsg(struct sock *netlink_sk, u_int8_t *msg, int len);
u16_t zfLnxCencAsocNotify(zdev_t *dev, u16_t *macAddr, u8_t *body,
u16_t bodySize, u16_t port)
{
- struct usbdrv_private *macp = (struct usbdrv_private *)dev->priv;
+ struct usbdrv_private *macp = dev->priv;
struct zydas_cenc_sta_info cenc_info;
/* struct sock *netlink_sk; */
u8_t ie_len;
diff --git a/drivers/staging/otus/wrap_usb.c b/drivers/staging/otus/wrap_usb.c
index 93459cadc47..9f04047bf5a 100644
--- a/drivers/staging/otus/wrap_usb.c
+++ b/drivers/staging/otus/wrap_usb.c
@@ -104,6 +104,11 @@ u32_t zfwUsbSubmitControl(zdev_t *dev, u8_t req, u16_t value, u16_t index,
if (size > 0) {
buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL) {
+ pr_err("zfwUsbSubmitControl() failed, "
+ "kmalloc() returned NULL\n");
+ return 1;
+ }
memcpy(buf, (u8_t *)data, size);
} else
buf = NULL;
diff --git a/drivers/staging/otus/wwrap.c b/drivers/staging/otus/wwrap.c
index b02eb42cd79..fcd3da07155 100644
--- a/drivers/staging/otus/wwrap.c
+++ b/drivers/staging/otus/wwrap.c
@@ -29,24 +29,24 @@
#include <linux/slab.h>
#include <net/iw_handler.h>
-extern void zfiRecv80211(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo* addInfo);
-extern void zfCoreRecv(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo* addInfo);
-extern void zfIdlChkRsp(zdev_t* dev, u32_t* rsp, u16_t rspLen);
-extern void zfIdlRsp(zdev_t* dev, u32_t *rsp, u16_t rspLen);
+extern void zfiRecv80211(zdev_t *dev, zbuf_t *buf, struct zsAdditionInfo *addInfo);
+extern void zfCoreRecv(zdev_t *dev, zbuf_t *buf, struct zsAdditionInfo *addInfo);
+extern void zfIdlChkRsp(zdev_t *dev, u32_t *rsp, u16_t rspLen);
+extern void zfIdlRsp(zdev_t *dev, u32_t *rsp, u16_t rspLen);
-//extern struct zsWdsStruct wds[ZM_WDS_PORT_NUMBER];
+/*extern struct zsWdsStruct wds[ZM_WDS_PORT_NUMBER];*/
extern struct zsVapStruct vap[ZM_VAP_PORT_NUMBER];
-u32_t zfLnxUsbSubmitTxData(zdev_t* dev);
-u32_t zfLnxUsbIn(zdev_t* dev, urb_t *urb, zbuf_t *buf);
+u32_t zfLnxUsbSubmitTxData(zdev_t *dev);
+u32_t zfLnxUsbIn(zdev_t *dev, urb_t *urb, zbuf_t *buf);
u32_t zfLnxSubmitRegInUrb(zdev_t *dev);
u32_t zfLnxUsbSubmitBulkUrb(urb_t *urb, struct usb_device *usb, u16_t epnum, u16_t direction,
- void *transfer_buffer, int buffer_length, usb_complete_t complete, void *context);
+ void *transfer_buffer, int buffer_length, usb_complete_t complete, void *context);
u32_t zfLnxUsbSubmitIntUrb(urb_t *urb, struct usb_device *usb, u16_t epnum, u16_t direction,
- void *transfer_buffer, int buffer_length, usb_complete_t complete, void *context,
- u32_t interval);
+ void *transfer_buffer, int buffer_length, usb_complete_t complete, void *context,
+ u32_t interval);
u16_t zfLnxGetFreeTxUrb(zdev_t *dev)
{
@@ -56,22 +56,19 @@ u16_t zfLnxGetFreeTxUrb(zdev_t *dev)
spin_lock_irqsave(&macp->cs_lock, irqFlag);
- //idx = ((macp->TxUrbTail + 1) & (ZM_MAX_TX_URB_NUM - 1));
+ /*idx = ((macp->TxUrbTail + 1) & (ZM_MAX_TX_URB_NUM - 1));*/
- //if (idx != macp->TxUrbHead)
- if (macp->TxUrbCnt != 0)
- {
- idx = macp->TxUrbTail;
- macp->TxUrbTail = ((macp->TxUrbTail + 1) & (ZM_MAX_TX_URB_NUM - 1));
- macp->TxUrbCnt--;
- }
- else
- {
- //printk(KERN_ERR "macp->TxUrbCnt: %d\n", macp->TxUrbCnt);
- idx = 0xffff;
- }
+ /*if (idx != macp->TxUrbHead)*/
+ if (macp->TxUrbCnt != 0) {
+ idx = macp->TxUrbTail;
+ macp->TxUrbTail = ((macp->TxUrbTail + 1) & (ZM_MAX_TX_URB_NUM - 1));
+ macp->TxUrbCnt--;
+ } else {
+ /*printk(KERN_ERR "macp->TxUrbCnt: %d\n", macp->TxUrbCnt);*/
+ idx = 0xffff;
+ }
- spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
+ spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
return idx;
}
@@ -85,16 +82,13 @@ void zfLnxPutTxUrb(zdev_t *dev)
idx = ((macp->TxUrbHead + 1) & (ZM_MAX_TX_URB_NUM - 1));
- //if (idx != macp->TxUrbTail)
- if (macp->TxUrbCnt < ZM_MAX_TX_URB_NUM)
- {
- macp->TxUrbHead = idx;
- macp->TxUrbCnt++;
- }
- else
- {
- printk("UsbTxUrbQ inconsistent: TxUrbHead: %d, TxUrbTail: %d\n",
- macp->TxUrbHead, macp->TxUrbTail);
+ /*if (idx != macp->TxUrbTail)*/
+ if (macp->TxUrbCnt < ZM_MAX_TX_URB_NUM) {
+ macp->TxUrbHead = idx;
+ macp->TxUrbCnt++;
+ } else {
+ printk("UsbTxUrbQ inconsistent: TxUrbHead: %d, TxUrbTail: %d\n",
+ macp->TxUrbHead, macp->TxUrbTail);
}
spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
@@ -125,24 +119,20 @@ UsbTxQ_t *zfLnxGetUsbTxBuffer(zdev_t *dev)
idx = ((macp->TxBufHead+1) & (ZM_MAX_TX_BUF_NUM - 1));
- //if (idx != macp->TxBufTail)
- if (macp->TxBufCnt > 0)
- {
- //printk("CWY - zfwGetUsbTxBuffer ,macp->TxBufCnt = %d\n", macp->TxBufCnt);
- TxQ = (UsbTxQ_t *)&(macp->UsbTxBufQ[macp->TxBufHead]);
- macp->TxBufHead = ((macp->TxBufHead+1) & (ZM_MAX_TX_BUF_NUM - 1));
- macp->TxBufCnt--;
- }
- else
- {
- if (macp->TxBufHead != macp->TxBufTail)
- {
- printk(KERN_ERR "zfwGetUsbTxBuf UsbTxBufQ inconsistent: TxBufHead: %d, TxBufTail: %d\n",
- macp->TxBufHead, macp->TxBufTail);
- }
-
- spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
- return NULL;
+ /*if (idx != macp->TxBufTail)*/
+ if (macp->TxBufCnt > 0) {
+ /*printk("CWY - zfwGetUsbTxBuffer ,macp->TxBufCnt = %d\n", macp->TxBufCnt);*/
+ TxQ = (UsbTxQ_t *)&(macp->UsbTxBufQ[macp->TxBufHead]);
+ macp->TxBufHead = ((macp->TxBufHead+1) & (ZM_MAX_TX_BUF_NUM - 1));
+ macp->TxBufCnt--;
+ } else {
+ if (macp->TxBufHead != macp->TxBufTail) {
+ printk(KERN_ERR "zfwGetUsbTxBuf UsbTxBufQ inconsistent: TxBufHead: %d, TxBufTail: %d\n",
+ macp->TxBufHead, macp->TxBufTail);
+ }
+
+ spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
+ return NULL;
}
spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
@@ -150,8 +140,8 @@ UsbTxQ_t *zfLnxGetUsbTxBuffer(zdev_t *dev)
}
u16_t zfLnxPutUsbTxBuffer(zdev_t *dev, u8_t *hdr, u16_t hdrlen,
- u8_t *snap, u16_t snapLen, u8_t *tail, u16_t tailLen,
- zbuf_t *buf, u16_t offset)
+ u8_t *snap, u16_t snapLen, u8_t *tail, u16_t tailLen,
+ zbuf_t *buf, u16_t offset)
{
struct usbdrv_private *macp = dev->ml_priv;
u16_t idx;
@@ -163,32 +153,29 @@ u16_t zfLnxPutUsbTxBuffer(zdev_t *dev, u8_t *hdr, u16_t hdrlen,
idx = ((macp->TxBufTail+1) & (ZM_MAX_TX_BUF_NUM - 1));
/* For Tx debug */
- //zm_assert(macp->TxBufCnt >= 0); // deleted because of always true
-
- //if (idx != macp->TxBufHead)
- if (macp->TxBufCnt < ZM_MAX_TX_BUF_NUM)
- {
- //printk("CWY - zfwPutUsbTxBuffer ,macp->TxBufCnt = %d\n", macp->TxBufCnt);
- TxQ = (UsbTxQ_t *)&(macp->UsbTxBufQ[macp->TxBufTail]);
- memcpy(TxQ->hdr, hdr, hdrlen);
- TxQ->hdrlen = hdrlen;
- memcpy(TxQ->snap, snap, snapLen);
- TxQ->snapLen = snapLen;
- memcpy(TxQ->tail, tail, tailLen);
- TxQ->tailLen = tailLen;
- TxQ->buf = buf;
- TxQ->offset = offset;
-
- macp->TxBufTail = ((macp->TxBufTail+1) & (ZM_MAX_TX_BUF_NUM - 1));
- macp->TxBufCnt++;
- }
- else
- {
- printk(KERN_ERR "zfLnxPutUsbTxBuffer UsbTxBufQ inconsistent: TxBufHead: %d, TxBufTail: %d, TxBufCnt: %d\n",
- macp->TxBufHead, macp->TxBufTail, macp->TxBufCnt);
- spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
- return 0xffff;
- }
+ /*zm_assert(macp->TxBufCnt >= 0); // deleted because of always true*/
+
+ /*if (idx != macp->TxBufHead)*/
+ if (macp->TxBufCnt < ZM_MAX_TX_BUF_NUM) {
+ /*printk("CWY - zfwPutUsbTxBuffer ,macp->TxBufCnt = %d\n", macp->TxBufCnt);*/
+ TxQ = (UsbTxQ_t *)&(macp->UsbTxBufQ[macp->TxBufTail]);
+ memcpy(TxQ->hdr, hdr, hdrlen);
+ TxQ->hdrlen = hdrlen;
+ memcpy(TxQ->snap, snap, snapLen);
+ TxQ->snapLen = snapLen;
+ memcpy(TxQ->tail, tail, tailLen);
+ TxQ->tailLen = tailLen;
+ TxQ->buf = buf;
+ TxQ->offset = offset;
+
+ macp->TxBufTail = ((macp->TxBufTail+1) & (ZM_MAX_TX_BUF_NUM - 1));
+ macp->TxBufCnt++;
+ } else {
+ printk(KERN_ERR "zfLnxPutUsbTxBuffer UsbTxBufQ inconsistent: TxBufHead: %d, TxBufTail: %d, TxBufCnt: %d\n",
+ macp->TxBufHead, macp->TxBufTail, macp->TxBufCnt);
+ spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
+ return 0xffff;
+ }
spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
return 0;
@@ -197,28 +184,25 @@ u16_t zfLnxPutUsbTxBuffer(zdev_t *dev, u8_t *hdr, u16_t hdrlen,
zbuf_t *zfLnxGetUsbRxBuffer(zdev_t *dev)
{
struct usbdrv_private *macp = dev->ml_priv;
- //u16_t idx;
+ /*u16_t idx;*/
zbuf_t *buf;
unsigned long irqFlag;
spin_lock_irqsave(&macp->cs_lock, irqFlag);
- //idx = ((macp->RxBufHead+1) & (ZM_MAX_RX_URB_NUM - 1));
-
- //if (idx != macp->RxBufTail)
- if (macp->RxBufCnt != 0)
- {
- buf = macp->UsbRxBufQ[macp->RxBufHead];
- macp->RxBufHead = ((macp->RxBufHead+1) & (ZM_MAX_RX_URB_NUM - 1));
- macp->RxBufCnt--;
- }
- else
- {
- printk("RxBufQ inconsistent: RxBufHead: %d, RxBufTail: %d\n",
- macp->RxBufHead, macp->RxBufTail);
- spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
- return NULL;
- }
+ /*idx = ((macp->RxBufHead+1) & (ZM_MAX_RX_URB_NUM - 1));*/
+
+ /*if (idx != macp->RxBufTail)*/
+ if (macp->RxBufCnt != 0) {
+ buf = macp->UsbRxBufQ[macp->RxBufHead];
+ macp->RxBufHead = ((macp->RxBufHead+1) & (ZM_MAX_RX_URB_NUM - 1));
+ macp->RxBufCnt--;
+ } else {
+ printk("RxBufQ inconsistent: RxBufHead: %d, RxBufTail: %d\n",
+ macp->RxBufHead, macp->RxBufTail);
+ spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
+ return NULL;
+ }
spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
return buf;
@@ -234,61 +218,56 @@ u32_t zfLnxPutUsbRxBuffer(zdev_t *dev, zbuf_t *buf)
idx = ((macp->RxBufTail+1) & (ZM_MAX_RX_URB_NUM - 1));
- //if (idx != macp->RxBufHead)
- if (macp->RxBufCnt != ZM_MAX_RX_URB_NUM)
- {
- macp->UsbRxBufQ[macp->RxBufTail] = buf;
- macp->RxBufTail = idx;
- macp->RxBufCnt++;
- }
- else
- {
- printk("RxBufQ inconsistent: RxBufHead: %d, RxBufTail: %d\n",
- macp->RxBufHead, macp->RxBufTail);
- spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
- return 0xffff;
- }
+ /*if (idx != macp->RxBufHead)*/
+ if (macp->RxBufCnt != ZM_MAX_RX_URB_NUM) {
+ macp->UsbRxBufQ[macp->RxBufTail] = buf;
+ macp->RxBufTail = idx;
+ macp->RxBufCnt++;
+ } else {
+ printk("RxBufQ inconsistent: RxBufHead: %d, RxBufTail: %d\n",
+ macp->RxBufHead, macp->RxBufTail);
+ spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
+ return 0xffff;
+ }
- spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
- return 0;
+ spin_unlock_irqrestore(&macp->cs_lock, irqFlag);
+ return 0;
}
void zfLnxUsbDataOut_callback(urb_t *urb)
{
- zdev_t* dev = urb->context;
- //UsbTxQ_t *TxData;
+ zdev_t *dev = urb->context;
+ /*UsbTxQ_t *TxData;*/
/* Give the urb back */
zfLnxPutTxUrb(dev);
/* Check whether there is any pending buffer needed */
/* to be sent */
- if (zfLnxCheckTxBufferCnt(dev) != 0)
- {
- //TxData = zfwGetUsbTxBuffer(dev);
-
- //if (TxData == NULL)
- //{
- // printk("Get a NULL buffer from zfwGetUsbTxBuffer\n");
- // return;
- //}
- //else
- //{
- zfLnxUsbSubmitTxData(dev);
- //}
+ if (zfLnxCheckTxBufferCnt(dev) != 0) {
+ /*TxData = zfwGetUsbTxBuffer(dev);
+ //if (TxData == NULL)
+ //{
+ // printk("Get a NULL buffer from zfwGetUsbTxBuffer\n");
+ // return;
+ //}
+ //else
+ //{
+ zfLnxUsbSubmitTxData(dev);
+ //}*/
}
}
void zfLnxUsbDataIn_callback(urb_t *urb)
{
- zdev_t* dev = urb->context;
+ zdev_t *dev = urb->context;
struct usbdrv_private *macp = dev->ml_priv;
zbuf_t *buf;
zbuf_t *new_buf;
int status;
#if ZM_USB_STREAM_MODE == 1
- static int remain_len = 0, check_pad = 0, check_len = 0;
+ static int remain_len, check_pad, check_len;
int index = 0;
int chk_idx;
u16_t pkt_len;
@@ -299,47 +278,45 @@ void zfLnxUsbDataIn_callback(urb_t *urb)
#endif
/* Check status for URB */
- if (urb->status != 0){
- printk("zfLnxUsbDataIn_callback() : status=0x%x\n", urb->status);
- if ((urb->status != -ENOENT) && (urb->status != -ECONNRESET)
- && (urb->status != -ESHUTDOWN))
- {
- if (urb->status == -EPIPE){
- //printk(KERN_ERR "nonzero read bulk status received: -EPIPE");
- status = -1;
- }
-
- if (urb->status == -EPROTO){
- //printk(KERN_ERR "nonzero read bulk status received: -EPROTO");
- status = -1;
- }
- }
-
- //printk(KERN_ERR "urb->status: 0x%08x\n", urb->status);
-
- /* Dequeue skb buffer */
- buf = zfLnxGetUsbRxBuffer(dev);
- dev_kfree_skb_any(buf);
- #if 0
- /* Enqueue skb buffer */
- zfLnxPutUsbRxBuffer(dev, buf);
-
- /* Submit a Rx urb */
- zfLnxUsbIn(dev, urb, buf);
- #endif
- return;
- }
+ if (urb->status != 0) {
+ printk("zfLnxUsbDataIn_callback() : status=0x%x\n", urb->status);
+ if ((urb->status != -ENOENT) && (urb->status != -ECONNRESET)
+ && (urb->status != -ESHUTDOWN)) {
+ if (urb->status == -EPIPE) {
+ /*printk(KERN_ERR "nonzero read bulk status received: -EPIPE");*/
+ status = -1;
+ }
+
+ if (urb->status == -EPROTO) {
+ /*printk(KERN_ERR "nonzero read bulk status received: -EPROTO");*/
+ status = -1;
+ }
+ }
+
+ /*printk(KERN_ERR "urb->status: 0x%08x\n", urb->status);*/
+
+ /* Dequeue skb buffer */
+ buf = zfLnxGetUsbRxBuffer(dev);
+ dev_kfree_skb_any(buf);
+ #if 0
+ /* Enqueue skb buffer */
+ zfLnxPutUsbRxBuffer(dev, buf);
- if (urb->actual_length == 0)
- {
- printk(KERN_ERR "Get an URB whose length is zero");
- status = -1;
+ /* Submit a Rx urb */
+ zfLnxUsbIn(dev, urb, buf);
+ #endif
+ return;
+ }
+
+ if (urb->actual_length == 0) {
+ printk(KERN_ERR "Get an URB whose length is zero");
+ status = -1;
}
/* Dequeue skb buffer */
buf = zfLnxGetUsbRxBuffer(dev);
- //zfwBufSetSize(dev, buf, urb->actual_length);
+ /*zfwBufSetSize(dev, buf, urb->actual_length);*/
#ifdef NET_SKBUFF_DATA_USES_OFFSET
buf->tail = 0;
buf->len = 0;
@@ -353,134 +330,122 @@ void zfLnxUsbDataIn_callback(urb_t *urb)
skb_put(buf, urb->actual_length);
#if ZM_USB_STREAM_MODE == 1
- if (remain_len != 0)
- {
- zbuf_t *remain_buf = macp->reamin_buf;
+ if (remain_len != 0) {
+ zbuf_t *remain_buf = macp->reamin_buf;
- index = remain_len;
- remain_len -= check_pad;
+ index = remain_len;
+ remain_len -= check_pad;
- /* Copy data */
- memcpy(&(remain_buf->data[check_len]), buf->data, remain_len);
- check_len += remain_len;
- remain_len = 0;
+ /* Copy data */
+ memcpy(&(remain_buf->data[check_len]), buf->data, remain_len);
+ check_len += remain_len;
+ remain_len = 0;
- rxBufPool[rxBufPoolIndex++] = remain_buf;
+ rxBufPool[rxBufPoolIndex++] = remain_buf;
}
- while(index < urb->actual_length)
- {
- pkt_len = buf->data[index] + (buf->data[index+1] << 8);
- pkt_tag = buf->data[index+2] + (buf->data[index+3] << 8);
-
- if (pkt_tag == 0x4e00)
- {
- int pad_len;
-
- //printk("Get a packet, index: %d, pkt_len: 0x%04x\n", index, pkt_len);
- #if 0
- /* Dump data */
- for (ii = index; ii < pkt_len+4;)
- {
- printk("%02x ", (buf->data[ii] & 0xff));
-
- if ((++ii % 16) == 0)
- printk("\n");
- }
-
- printk("\n");
- #endif
-
- pad_len = 4 - (pkt_len & 0x3);
-
- if(pad_len == 4)
- pad_len = 0;
-
- chk_idx = index;
- index = index + 4 + pkt_len + pad_len;
-
- if (index > ZM_MAX_RX_BUFFER_SIZE)
- {
- remain_len = index - ZM_MAX_RX_BUFFER_SIZE; // - pad_len;
- check_len = ZM_MAX_RX_BUFFER_SIZE - chk_idx - 4;
- check_pad = pad_len;
-
- /* Allocate a skb buffer */
- //new_buf = zfwBufAllocate(dev, ZM_MAX_RX_BUFFER_SIZE);
- new_buf = dev_alloc_skb(ZM_MAX_RX_BUFFER_SIZE);
-
- /* Set skb buffer length */
- #ifdef NET_SKBUFF_DATA_USES_OFFSET
- new_buf->tail = 0;
- new_buf->len = 0;
- #else
- new_buf->tail = new_buf->data;
- new_buf->len = 0;
- #endif
-
- skb_put(new_buf, pkt_len);
-
- /* Copy the buffer */
- memcpy(new_buf->data, &(buf->data[chk_idx+4]), check_len);
-
- /* Record the buffer pointer */
- macp->reamin_buf = new_buf;
- }
- else
- {
- #ifdef ZM_DONT_COPY_RX_BUFFER
- if (rxBufPoolIndex == 0)
- {
- new_buf = skb_clone(buf, GFP_ATOMIC);
-
- new_buf->data = &(buf->data[chk_idx+4]);
- new_buf->len = pkt_len;
- }
- else
- {
- #endif
- /* Allocate a skb buffer */
- new_buf = dev_alloc_skb(ZM_MAX_RX_BUFFER_SIZE);
-
- /* Set skb buffer length */
- #ifdef NET_SKBUFF_DATA_USES_OFFSET
- new_buf->tail = 0;
- new_buf->len = 0;
- #else
- new_buf->tail = new_buf->data;
- new_buf->len = 0;
- #endif
-
- skb_put(new_buf, pkt_len);
-
- /* Copy the buffer */
- memcpy(new_buf->data, &(buf->data[chk_idx+4]), pkt_len);
-
- #ifdef ZM_DONT_COPY_RX_BUFFER
- }
- #endif
- rxBufPool[rxBufPoolIndex++] = new_buf;
- }
- }
- else
- {
- printk(KERN_ERR "Can't find tag, pkt_len: 0x%04x, tag: 0x%04x\n", pkt_len, pkt_tag);
-
- /* Free buffer */
- dev_kfree_skb_any(buf);
-
- /* Allocate a skb buffer */
- new_buf = dev_alloc_skb(ZM_MAX_RX_BUFFER_SIZE);
-
- /* Enqueue skb buffer */
- zfLnxPutUsbRxBuffer(dev, new_buf);
-
- /* Submit a Rx urb */
- zfLnxUsbIn(dev, urb, new_buf);
-
- return;
- }
- }
+ while (index < urb->actual_length) {
+ pkt_len = buf->data[index] + (buf->data[index+1] << 8);
+ pkt_tag = buf->data[index+2] + (buf->data[index+3] << 8);
+
+ if (pkt_tag == 0x4e00) {
+ int pad_len;
+
+ /*printk("Get a packet, index: %d, pkt_len: 0x%04x\n", index, pkt_len);*/
+ #if 0
+ /* Dump data */
+ for (ii = index; ii < pkt_len+4;) {
+ printk("%02x ", (buf->data[ii] & 0xff));
+
+ if ((++ii % 16) == 0)
+ printk("\n");
+ }
+
+ printk("\n");
+ #endif
+
+ pad_len = 4 - (pkt_len & 0x3);
+
+ if (pad_len == 4)
+ pad_len = 0;
+
+ chk_idx = index;
+ index = index + 4 + pkt_len + pad_len;
+
+ if (index > ZM_MAX_RX_BUFFER_SIZE) {
+ remain_len = index - ZM_MAX_RX_BUFFER_SIZE; /* - pad_len;*/
+ check_len = ZM_MAX_RX_BUFFER_SIZE - chk_idx - 4;
+ check_pad = pad_len;
+
+ /* Allocate a skb buffer */
+ /*new_buf = zfwBufAllocate(dev, ZM_MAX_RX_BUFFER_SIZE);*/
+ new_buf = dev_alloc_skb(ZM_MAX_RX_BUFFER_SIZE);
+
+ /* Set skb buffer length */
+ #ifdef NET_SKBUFF_DATA_USES_OFFSET
+ new_buf->tail = 0;
+ new_buf->len = 0;
+ #else
+ new_buf->tail = new_buf->data;
+ new_buf->len = 0;
+ #endif
+
+ skb_put(new_buf, pkt_len);
+
+ /* Copy the buffer */
+ memcpy(new_buf->data, &(buf->data[chk_idx+4]), check_len);
+
+ /* Record the buffer pointer */
+ macp->reamin_buf = new_buf;
+ } else {
+ #ifdef ZM_DONT_COPY_RX_BUFFER
+ if (rxBufPoolIndex == 0) {
+ new_buf = skb_clone(buf, GFP_ATOMIC);
+
+ new_buf->data = &(buf->data[chk_idx+4]);
+ new_buf->len = pkt_len;
+ } else {
+ #endif
+ /* Allocate a skb buffer */
+ new_buf = dev_alloc_skb(ZM_MAX_RX_BUFFER_SIZE);
+
+ /* Set skb buffer length */
+ #ifdef NET_SKBUFF_DATA_USES_OFFSET
+ new_buf->tail = 0;
+ new_buf->len = 0;
+ #else
+ new_buf->tail = new_buf->data;
+ new_buf->len = 0;
+ #endif
+
+ skb_put(new_buf, pkt_len);
+
+ /* Copy the buffer */
+ memcpy(new_buf->data, &(buf->data[chk_idx+4]), pkt_len);
+
+ #ifdef ZM_DONT_COPY_RX_BUFFER
+ }
+ #endif
+ rxBufPool[rxBufPoolIndex++] = new_buf;
+ }
+ } else {
+ printk(KERN_ERR "Can't find tag, pkt_len: 0x%04x, tag: 0x%04x\n", pkt_len, pkt_tag);
+
+ /* Free buffer */
+ dev_kfree_skb_any(buf);
+
+ /* Allocate a skb buffer */
+ new_buf = dev_alloc_skb(ZM_MAX_RX_BUFFER_SIZE);
+
+ /* Enqueue skb buffer */
+ zfLnxPutUsbRxBuffer(dev, new_buf);
+
+ /* Submit a Rx urb */
+ zfLnxUsbIn(dev, urb, new_buf);
+
+ return;
+ }
+ }
/* Free buffer */
dev_kfree_skb_any(buf);
@@ -496,9 +461,8 @@ void zfLnxUsbDataIn_callback(urb_t *urb)
zfLnxUsbIn(dev, urb, new_buf);
#if ZM_USB_STREAM_MODE == 1
- for(ii = 0; ii < rxBufPoolIndex; ii++)
- {
- macp->usbCbFunctions.zfcbUsbRecv(dev, rxBufPool[ii]);
+ for (ii = 0; ii < rxBufPoolIndex; ii++) {
+ macp->usbCbFunctions.zfcbUsbRecv(dev, rxBufPool[ii]);
}
#else
/* pass data to upper layer */
@@ -508,51 +472,48 @@ void zfLnxUsbDataIn_callback(urb_t *urb)
void zfLnxUsbRegOut_callback(urb_t *urb)
{
- //dev_t* dev = urb->context;
+ /*dev_t* dev = urb->context;*/
- //printk(KERN_ERR "zfwUsbRegOut_callback\n");
+ /*printk(KERN_ERR "zfwUsbRegOut_callback\n");*/
}
void zfLnxUsbRegIn_callback(urb_t *urb)
{
- zdev_t* dev = urb->context;
+ zdev_t *dev = urb->context;
u32_t rsp[64/4];
int status;
struct usbdrv_private *macp = dev->ml_priv;
/* Check status for URB */
- if (urb->status != 0){
- printk("zfLnxUsbRegIn_callback() : status=0x%x\n", urb->status);
- if ((urb->status != -ENOENT) && (urb->status != -ECONNRESET)
- && (urb->status != -ESHUTDOWN))
- {
- if (urb->status == -EPIPE){
- //printk(KERN_ERR "nonzero read bulk status received: -EPIPE");
- status = -1;
- }
-
- if (urb->status == -EPROTO){
- //printk(KERN_ERR "nonzero read bulk status received: -EPROTO");
- status = -1;
- }
- }
-
- //printk(KERN_ERR "urb->status: 0x%08x\n", urb->status);
- return;
- }
+ if (urb->status != 0) {
+ printk("zfLnxUsbRegIn_callback() : status=0x%x\n", urb->status);
+ if ((urb->status != -ENOENT) && (urb->status != -ECONNRESET) && (urb->status != -ESHUTDOWN)) {
+ if (urb->status == -EPIPE) {
+ /*printk(KERN_ERR "nonzero read bulk status received: -EPIPE");*/
+ status = -1;
+ }
+
+ if (urb->status == -EPROTO) {
+ /*printk(KERN_ERR "nonzero read bulk status received: -EPROTO");*/
+ status = -1;
+ }
+ }
+
+ /*printk(KERN_ERR "urb->status: 0x%08x\n", urb->status);*/
+ return;
+ }
- if (urb->actual_length == 0)
- {
- printk(KERN_ERR "Get an URB whose length is zero");
- status = -1;
+ if (urb->actual_length == 0) {
+ printk(KERN_ERR "Get an URB whose length is zero");
+ status = -1;
}
/* Copy data into respone buffer */
memcpy(rsp, macp->regUsbReadBuf, urb->actual_length);
/* Notify to upper layer */
- //zfIdlChkRsp(dev, rsp, (u16_t)urb->actual_length);
- //zfiUsbRegIn(dev, rsp, (u16_t)urb->actual_length);
+ /*zfIdlChkRsp(dev, rsp, (u16_t)urb->actual_length);*/
+ /*zfiUsbRegIn(dev, rsp, (u16_t)urb->actual_length);*/
macp->usbCbFunctions.zfcbUsbRegIn(dev, rsp, (u16_t)urb->actual_length);
/* Issue another USB IN URB */
@@ -564,22 +525,22 @@ u32_t zfLnxSubmitRegInUrb(zdev_t *dev)
u32_t ret;
struct usbdrv_private *macp = dev->ml_priv;
- /* Submit a rx urb */
+ /* Submit a rx urb
//ret = zfLnxUsbSubmitBulkUrb(macp->RegInUrb, macp->udev,
// USB_REG_IN_PIPE, USB_DIR_IN, macp->regUsbReadBuf,
// ZM_USB_REG_MAX_BUF_SIZE, zfLnxUsbRegIn_callback, dev);
//CWYang(-)
//if (ret != 0)
- // printk("zfwUsbSubmitBulkUrb fail, status: 0x%08x\n", (int)ret);
+ // printk("zfwUsbSubmitBulkUrb fail, status: 0x%08x\n", (int)ret);*/
ret = zfLnxUsbSubmitIntUrb(macp->RegInUrb, macp->udev,
- USB_REG_IN_PIPE, USB_DIR_IN, macp->regUsbReadBuf,
- ZM_USB_REG_MAX_BUF_SIZE, zfLnxUsbRegIn_callback, dev, 1);
+ USB_REG_IN_PIPE, USB_DIR_IN, macp->regUsbReadBuf,
+ ZM_USB_REG_MAX_BUF_SIZE, zfLnxUsbRegIn_callback, dev, 1);
return ret;
}
-u32_t zfLnxUsbSubmitTxData(zdev_t* dev)
+u32_t zfLnxUsbSubmitTxData(zdev_t *dev)
{
u32_t i;
u32_t ret;
@@ -600,39 +561,33 @@ u32_t zfLnxUsbSubmitTxData(zdev_t* dev)
freeTxUrb = zfLnxGetFreeTxUrb(dev);
/* If there is no any free Tx Urb */
- if (freeTxUrb == 0xffff)
- {
- //printk(KERN_ERR "Can't get free Tx Urb\n");
- //printk("CWY - Can't get free Tx Urb\n");
- return 0xffff;
+ if (freeTxUrb == 0xffff) {
+ /*printk(KERN_ERR "Can't get free Tx Urb\n");
+ //printk("CWY - Can't get free Tx Urb\n");*/
+ return 0xffff;
}
#if ZM_USB_TX_STREAM_MODE == 1
usbTxAggCnt = zfLnxCheckTxBufferCnt(dev);
- if (usbTxAggCnt >= ZM_MAX_TX_AGGREGATE_NUM)
- {
- usbTxAggCnt = ZM_MAX_TX_AGGREGATE_NUM;
- }
- else
- {
- usbTxAggCnt = 1;
+ if (usbTxAggCnt >= ZM_MAX_TX_AGGREGATE_NUM) {
+ usbTxAggCnt = ZM_MAX_TX_AGGREGATE_NUM;
+ } else {
+ usbTxAggCnt = 1;
}
- //printk("usbTxAggCnt: %d\n", usbTxAggCnt);
+ /*printk("usbTxAggCnt: %d\n", usbTxAggCnt);*/
#endif
#if ZM_USB_TX_STREAM_MODE == 1
- for(ii = 0; ii < usbTxAggCnt; ii++)
- {
+ for (ii = 0; ii < usbTxAggCnt; ii++) {
#endif
/* Dequeue the packet from UsbTxBufQ */
TxData = zfLnxGetUsbTxBuffer(dev);
- if (TxData == NULL)
- {
- /* Give the urb back */
- zfLnxPutTxUrb(dev);
- return 0xffff;
+ if (TxData == NULL) {
+ /* Give the urb back */
+ zfLnxPutTxUrb(dev);
+ return 0xffff;
}
/* Point to the freeTxUrb buffer */
@@ -644,114 +599,103 @@ u32_t zfLnxUsbSubmitTxData(zdev_t* dev)
/* Add the packet length and tag information */
*pUsbTxHdr++ = TxData->hdrlen + TxData->snapLen +
- (TxData->buf->len - TxData->offset) + TxData->tailLen;
+ (TxData->buf->len - TxData->offset) + TxData->tailLen;
*pUsbTxHdr++ = 0x697e;
puTxBuf += 4;
-#endif // #ifdef ZM_USB_TX_STREAM_MODE
+#endif /* #ifdef ZM_USB_TX_STREAM_MODE*/
/* Copy WLAN header and packet buffer into USB buffer */
- for(i = 0; i < TxData->hdrlen; i++)
- {
- *puTxBuf++ = TxData->hdr[i];
+ for (i = 0; i < TxData->hdrlen; i++) {
+ *puTxBuf++ = TxData->hdr[i];
}
/* Copy SNAP header */
- for(i = 0; i < TxData->snapLen; i++)
- {
- *puTxBuf++ = TxData->snap[i];
+ for (i = 0; i < TxData->snapLen; i++) {
+ *puTxBuf++ = TxData->snap[i];
}
/* Copy packet buffer */
- for(i = 0; i < TxData->buf->len - TxData->offset; i++)
- {
- //*puTxBuf++ = zmw_rx_buf_readb(dev, TxData->buf, i);
- *puTxBuf++ = *(u8_t*)((u8_t*)TxData->buf->data+i+TxData->offset);
+ for (i = 0; i < TxData->buf->len - TxData->offset; i++) {
+ /*puTxBuf++ = zmw_rx_buf_readb(dev, TxData->buf, i);*/
+ *puTxBuf++ = *(u8_t *)((u8_t *)TxData->buf->data+i+TxData->offset);
}
/* Copy tail */
- for(i = 0; i < TxData->tailLen; i++)
- {
- *puTxBuf++ = TxData->tail[i];
+ for (i = 0; i < TxData->tailLen; i++) {
+ *puTxBuf++ = TxData->tail[i];
}
len = TxData->hdrlen+TxData->snapLen+TxData->buf->len+TxData->tailLen-TxData->offset;
#if 0
- if (TxData->hdrlen != 0)
- {
- puTxBuf = macp->txUsbBuf[freeTxUrb];
- for (i = 0; i < len; i++)
- {
- printk("%02x ", puTxBuf[i]);
- if (i % 16 == 15)
- printk("\n");
- }
- printk("\n");
- }
+ if (TxData->hdrlen != 0) {
+ puTxBuf = macp->txUsbBuf[freeTxUrb];
+ for (i = 0; i < len; i++) {
+ printk("%02x ", puTxBuf[i]);
+ if (i % 16 == 15)
+ printk("\n");
+ }
+ printk("\n");
+ }
#endif
#if 0
/* For debug purpose */
- if(TxData->hdr[9] & 0x40)
- {
- int i;
- u16_t ctrlLen = TxData->hdr[0] + (TxData->hdr[1] << 8);
-
- if (ctrlLen != len + 4)
- {
- /* Dump control setting */
- for(i = 0; i < 8; i++)
- {
- printk(KERN_ERR "0x%02x ", TxData->hdr[i]);
- }
- printk(KERN_ERR "\n");
-
- printk(KERN_ERR "ctrLen: %d, hdrLen: %d, snapLen: %d\n", ctrlLen, TxData->hdrlen, TxData->snapLen);
- printk(KERN_ERR "bufLen: %d, tailLen: %d, len: %d\n", TxData->buf->len, TxData->tailLen, len);
- }
+ if (TxData->hdr[9] & 0x40) {
+ int i;
+ u16_t ctrlLen = TxData->hdr[0] + (TxData->hdr[1] << 8);
+
+ if (ctrlLen != len + 4) {
+ /* Dump control setting */
+ for (i = 0; i < 8; i++) {
+ printk(KERN_ERR "0x%02x ", TxData->hdr[i]);
+ }
+ printk(KERN_ERR "\n");
+
+ printk(KERN_ERR "ctrLen: %d, hdrLen: %d, snapLen: %d\n", ctrlLen, TxData->hdrlen, TxData->snapLen);
+ printk(KERN_ERR "bufLen: %d, tailLen: %d, len: %d\n", TxData->buf->len, TxData->tailLen, len);
+ }
}
#endif
#if ZM_USB_TX_STREAM_MODE == 1
- // Add the Length and Tag
+ /* Add the Length and Tag*/
len += 4;
- //printk("%d packet, length: %d\n", ii+1, len);
+ /*printk("%d packet, length: %d\n", ii+1, len);*/
- if (ii < (ZM_MAX_TX_AGGREGATE_NUM-1))
- {
- /* Pad the buffer to firmware descriptor boundary */
- offset += (((len-1) / 4) + 1) * 4;
+ if (ii < (ZM_MAX_TX_AGGREGATE_NUM-1)) {
+ /* Pad the buffer to firmware descriptor boundary */
+ offset += (((len-1) / 4) + 1) * 4;
}
- if (ii == (ZM_MAX_TX_AGGREGATE_NUM-1))
- {
- len += offset;
+ if (ii == (ZM_MAX_TX_AGGREGATE_NUM-1)) {
+ len += offset;
}
TxQPool[ii] = TxData;
- //DbgPrint("%d packet, offset: %d\n", ii+1, pUsbTxTransfer->offset);
+ /*DbgPrint("%d packet, offset: %d\n", ii+1, pUsbTxTransfer->offset);*/
/* free packet */
- //zfBufFree(dev, txData->buf);
+ /*zfBufFree(dev, txData->buf);*/
}
#endif
- //printk("CWY - call zfwUsbSubmitBulkUrb(), len = 0x%d\n", len);
+ /*printk("CWY - call zfwUsbSubmitBulkUrb(), len = 0x%d\n", len);*/
/* Submit a tx urb */
ret = zfLnxUsbSubmitBulkUrb(macp->WlanTxDataUrb[freeTxUrb], macp->udev,
- USB_WLAN_TX_PIPE, USB_DIR_OUT, macp->txUsbBuf[freeTxUrb],
- len, zfLnxUsbDataOut_callback, dev);
- //CWYang(-)
+ USB_WLAN_TX_PIPE, USB_DIR_OUT, macp->txUsbBuf[freeTxUrb],
+ len, zfLnxUsbDataOut_callback, dev);
+ /*CWYang(-)
//if (ret != 0)
- // printk("zfwUsbSubmitBulkUrb fail, status: 0x%08x\n", (int)ret);
+ // printk("zfwUsbSubmitBulkUrb fail, status: 0x%08x\n", (int)ret);*/
/* free packet */
- //dev_kfree_skb_any(TxData->buf);
+ /*dev_kfree_skb_any(TxData->buf);*/
#if ZM_USB_TX_STREAM_MODE == 1
- for(ii = 0; ii < usbTxAggCnt; ii++)
- macp->usbCbFunctions.zfcbUsbOutComplete(dev, TxQPool[ii]->buf, 1, TxQPool[ii]->hdr);
+ for (ii = 0; ii < usbTxAggCnt; ii++)
+ macp->usbCbFunctions.zfcbUsbOutComplete(dev, TxQPool[ii]->buf, 1, TxQPool[ii]->hdr);
#else
macp->usbCbFunctions.zfcbUsbOutComplete(dev, TxData->buf, 1, TxData->hdr);
#endif
@@ -761,23 +705,23 @@ u32_t zfLnxUsbSubmitTxData(zdev_t* dev)
-u32_t zfLnxUsbIn(zdev_t* dev, urb_t *urb, zbuf_t *buf)
+u32_t zfLnxUsbIn(zdev_t *dev, urb_t *urb, zbuf_t *buf)
{
u32_t ret;
struct usbdrv_private *macp = dev->ml_priv;
/* Submit a rx urb */
ret = zfLnxUsbSubmitBulkUrb(urb, macp->udev, USB_WLAN_RX_PIPE,
- USB_DIR_IN, buf->data, ZM_MAX_RX_BUFFER_SIZE,
- zfLnxUsbDataIn_callback, dev);
- //CWYang(-)
+ USB_DIR_IN, buf->data, ZM_MAX_RX_BUFFER_SIZE,
+ zfLnxUsbDataIn_callback, dev);
+ /*CWYang(-)
//if (ret != 0)
- // printk("zfwUsbSubmitBulkUrb fail, status: 0x%08x\n", (int)ret);
+ // printk("zfwUsbSubmitBulkUrb fail, status: 0x%08x\n", (int)ret);*/
return ret;
}
-u32_t zfLnxUsbWriteReg(zdev_t* dev, u32_t* cmd, u16_t cmdLen)
+u32_t zfLnxUsbWriteReg(zdev_t *dev, u32_t *cmd, u16_t cmdLen)
{
struct usbdrv_private *macp = dev->ml_priv;
u32_t ret;
@@ -785,7 +729,7 @@ u32_t zfLnxUsbWriteReg(zdev_t* dev, u32_t* cmd, u16_t cmdLen)
#ifdef ZM_CONFIG_BIG_ENDIAN
int ii = 0;
- for(ii=0; ii<(cmdLen>>2); ii++)
+ for (ii = 0; ii < (cmdLen>>2); ii++)
cmd[ii] = cpu_to_le32(cmd[ii]);
#endif
@@ -794,39 +738,38 @@ u32_t zfLnxUsbWriteReg(zdev_t* dev, u32_t* cmd, u16_t cmdLen)
/* Issue an USB Out transfer */
/* Submit a tx urb */
ret = zfLnxUsbSubmitIntUrb(macp->RegOutUrb, macp->udev,
- USB_REG_OUT_PIPE, USB_DIR_OUT, macp->regUsbWriteBuf,
- cmdLen, zfLnxUsbRegOut_callback, dev, 1);
+ USB_REG_OUT_PIPE, USB_DIR_OUT, macp->regUsbWriteBuf,
+ cmdLen, zfLnxUsbRegOut_callback, dev, 1);
return ret;
}
-u32_t zfLnxUsbOut(zdev_t* dev, u8_t *hdr, u16_t hdrlen, u8_t *snap, u16_t snapLen,
- u8_t *tail, u16_t tailLen, zbuf_t *buf, u16_t offset)
+u32_t zfLnxUsbOut(zdev_t *dev, u8_t *hdr, u16_t hdrlen, u8_t *snap, u16_t snapLen,
+ u8_t *tail, u16_t tailLen, zbuf_t *buf, u16_t offset)
{
u32_t ret;
struct usbdrv_private *macp = dev->ml_priv;
/* Check length of tail buffer */
- //zm_assert((tailLen <= 16));
+ /*zm_assert((tailLen <= 16));*/
/* Enqueue the packet into UsbTxBufQ */
- if (zfLnxPutUsbTxBuffer(dev, hdr, hdrlen, snap, snapLen, tail, tailLen, buf, offset) == 0xffff)
- {
- /* free packet */
- //printk("CWY - zfwPutUsbTxBuffer Error, free packet\n");
- //dev_kfree_skb_any(buf);
- macp->usbCbFunctions.zfcbUsbOutComplete(dev, buf, 0, hdr);
- return 0xffff;
- }
+ if (zfLnxPutUsbTxBuffer(dev, hdr, hdrlen, snap, snapLen, tail, tailLen, buf, offset) == 0xffff) {
+ /* free packet */
+ /*printk("CWY - zfwPutUsbTxBuffer Error, free packet\n");
+ //dev_kfree_skb_any(buf);*/
+ macp->usbCbFunctions.zfcbUsbOutComplete(dev, buf, 0, hdr);
+ return 0xffff;
+ }
- //return 0;
- //printk("CWY - call zfwUsbSubmitTxData()\n");
+ /*return 0;
+ //printk("CWY - call zfwUsbSubmitTxData()\n");*/
ret = zfLnxUsbSubmitTxData(dev);
return ret;
}
-void zfLnxInitUsbTxQ(zdev_t* dev)
+void zfLnxInitUsbTxQ(zdev_t *dev)
{
struct usbdrv_private *macp = dev->ml_priv;
@@ -842,7 +785,7 @@ void zfLnxInitUsbTxQ(zdev_t* dev)
macp->TxUrbCnt = ZM_MAX_TX_URB_NUM;
}
-void zfLnxInitUsbRxQ(zdev_t* dev)
+void zfLnxInitUsbRxQ(zdev_t *dev)
{
u16_t i;
zbuf_t *buf;
@@ -853,76 +796,65 @@ void zfLnxInitUsbRxQ(zdev_t* dev)
macp->RxBufHead = 0;
- for (i = 0; i < ZM_MAX_RX_URB_NUM; i++)
- {
- //buf = zfwBufAllocate(dev, ZM_MAX_RX_BUFFER_SIZE);
- buf = dev_alloc_skb(ZM_MAX_RX_BUFFER_SIZE);
- macp->UsbRxBufQ[i] = buf;
- }
+ for (i = 0; i < ZM_MAX_RX_URB_NUM; i++) {
+ /*buf = zfwBufAllocate(dev, ZM_MAX_RX_BUFFER_SIZE);*/
+ buf = dev_alloc_skb(ZM_MAX_RX_BUFFER_SIZE);
+ macp->UsbRxBufQ[i] = buf;
+ }
- //macp->RxBufTail = ZM_MAX_RX_URB_NUM - 1;
+ /*macp->RxBufTail = ZM_MAX_RX_URB_NUM - 1;*/
macp->RxBufTail = 0;
/* Submit all Rx urbs */
- for (i = 0; i < ZM_MAX_RX_URB_NUM; i++)
- {
- zfLnxPutUsbRxBuffer(dev, macp->UsbRxBufQ[i]);
- zfLnxUsbIn(dev, macp->WlanRxDataUrb[i], macp->UsbRxBufQ[i]);
- }
+ for (i = 0; i < ZM_MAX_RX_URB_NUM; i++) {
+ zfLnxPutUsbRxBuffer(dev, macp->UsbRxBufQ[i]);
+ zfLnxUsbIn(dev, macp->WlanRxDataUrb[i], macp->UsbRxBufQ[i]);
+ }
}
u32_t zfLnxUsbSubmitBulkUrb(urb_t *urb, struct usb_device *usb, u16_t epnum, u16_t direction,
- void *transfer_buffer, int buffer_length, usb_complete_t complete, void *context)
+ void *transfer_buffer, int buffer_length, usb_complete_t complete, void *context)
{
u32_t ret;
- if(direction == USB_DIR_OUT)
- {
- usb_fill_bulk_urb(urb, usb, usb_sndbulkpipe(usb, epnum),
- transfer_buffer, buffer_length, complete, context);
+ if (direction == USB_DIR_OUT) {
+ usb_fill_bulk_urb(urb, usb, usb_sndbulkpipe(usb, epnum),
+ transfer_buffer, buffer_length, complete, context);
- urb->transfer_flags |= URB_ZERO_PACKET;
- }
- else
- {
- usb_fill_bulk_urb(urb, usb, usb_rcvbulkpipe(usb, epnum),
- transfer_buffer, buffer_length, complete, context);
+ urb->transfer_flags |= URB_ZERO_PACKET;
+ } else {
+ usb_fill_bulk_urb(urb, usb, usb_rcvbulkpipe(usb, epnum),
+ transfer_buffer, buffer_length, complete, context);
}
- if (epnum == 4)
- {
- if (urb->hcpriv)
- {
- //printk("CWY - urb->hcpriv set by unknown reason, reset it\n");
- //urb->hcpriv = 0;
- }
- }
+ if (epnum == 4) {
+ if (urb->hcpriv) {
+ /*printk("CWY - urb->hcpriv set by unknown reason, reset it\n");
+ //urb->hcpriv = 0;*/
+ }
+ }
ret = usb_submit_urb(urb, GFP_ATOMIC);
- if ((epnum == 4) & (ret != 0))
- {
- //printk("CWY - ret = %x\n", ret);
+ if ((epnum == 4) & (ret != 0)) {
+ /*printk("CWY - ret = %x\n", ret);*/
}
return ret;
}
u32_t zfLnxUsbSubmitIntUrb(urb_t *urb, struct usb_device *usb, u16_t epnum, u16_t direction,
- void *transfer_buffer, int buffer_length, usb_complete_t complete, void *context,
- u32_t interval)
+ void *transfer_buffer, int buffer_length, usb_complete_t complete, void *context,
+ u32_t interval)
{
u32_t ret;
- if(direction == USB_DIR_OUT)
- {
- usb_fill_int_urb(urb, usb, usb_sndbulkpipe(usb, epnum),
- transfer_buffer, buffer_length, complete, context, interval);
- }
- else
- {
- usb_fill_int_urb(urb, usb, usb_rcvbulkpipe(usb, epnum),
- transfer_buffer, buffer_length, complete, context, interval);
+ if (direction == USB_DIR_OUT) {
+ usb_fill_int_urb(urb, usb, usb_sndbulkpipe(usb, epnum),
+ transfer_buffer, buffer_length, complete, context, interval);
+ } else {
+ usb_fill_int_urb(urb, usb, usb_rcvbulkpipe(usb, epnum),
+ transfer_buffer, buffer_length, complete, context, interval);
}
ret = usb_submit_urb(urb, GFP_ATOMIC);
@@ -946,51 +878,48 @@ int zfLnxCencSendMsg(struct sock *netlink_sk, u_int8_t *msg, int len)
size = NLMSG_SPACE(len);
skb = alloc_skb(size, GFP_ATOMIC);
- if(skb == NULL)
- {
+ if (skb == NULL) {
printk("dev_alloc_skb failure \n");
goto out;
}
old_tail = skb->tail;
- /*ÌîдÊý¾Ý±¨Ïà¹ØÐÅÏ¢*/
+ /* */
nlh = NLMSG_PUT(skb, 0, 0, WAI_K_MSG, size-sizeof(*nlh));
pos = NLMSG_DATA(nlh);
- /*´«Êäµ½Óû§¿Õ¼äµÄÊý¾Ý*/
+ /* */
memcpy(pos, msg, len);
- /*¼ÆËã¾­¹ý×Ö½Ú¶ÔÆäºóµÄÊý¾Ýʵ¼Ê³¤¶È*/
+ /* */
nlh->nlmsg_len = skb->tail - old_tail;
NETLINK_CB(skb).dst_group = COMMTYPE_GROUP;
netlink_broadcast(netlink_sk, skb, 0, COMMTYPE_GROUP, GFP_ATOMIC);
ret = 0;
out:
return ret;
-nlmsg_failure: /*NLMSG_PUT ʧ°Ü£¬Ôò³·ÏúÌ×½Ó×Ö»º´æ*/
+nlmsg_failure: /* */
kfree_skb(skb);
goto out;
#undef COMMTYPE_GROUP
#undef WAI_K_MSG
}
-#endif //ZM_ENABLE_CENC
+#endif /*ZM_ENABLE_CENC*/
/* Simply return 0xffff if VAP function is not supported */
-u16_t zfLnxGetVapId(zdev_t* dev)
+u16_t zfLnxGetVapId(zdev_t *dev)
{
u16_t i;
- for (i=0; i<ZM_VAP_PORT_NUMBER; i++)
- {
- if (vap[i].dev == dev)
- {
- return i;
- }
- }
- return 0xffff;
+ for (i = 0; i < ZM_VAP_PORT_NUMBER; i++) {
+ if (vap[i].dev == dev) {
+ return i;
+ }
+ }
+ return 0xffff;
}
-u32_t zfwReadReg(zdev_t* dev, u32_t offset)
+u32_t zfwReadReg(zdev_t *dev, u32_t offset)
{
return 0;
}
@@ -1012,25 +941,23 @@ u32_t smp_kevent_Lock = 0;
void kevent(struct work_struct *work)
{
struct usbdrv_private *macp =
- container_of(work, struct usbdrv_private, kevent);
- zdev_t *dev = macp->device;
+ container_of(work, struct usbdrv_private, kevent);
+ zdev_t *dev = macp->device;
- if (test_and_set_bit(0, (void *)&smp_kevent_Lock))
- {
- //schedule_work(&macp->kevent);
- return;
+ if (test_and_set_bit(0, (void *)&smp_kevent_Lock)) {
+ /*schedule_work(&macp->kevent);*/
+ return;
}
down(&macp->ioctl_sem);
- if (test_and_clear_bit(KEVENT_WATCHDOG, &macp->kevent_flags))
- {
+ if (test_and_clear_bit(KEVENT_WATCHDOG, &macp->kevent_flags)) {
extern u16_t zfHpStartRecv(zdev_t *dev);
- //zfiHwWatchDogReinit(dev);
- printk(("\n ************ Hw watchDog occur!! ************** \n"));
- zfiWlanSuspend(dev);
- zfiWlanResume(dev,0);
- zfHpStartRecv(dev);
+ /*zfiHwWatchDogReinit(dev);*/
+ printk(("\n ************ Hw watchDog occur!! ************** \n"));
+ zfiWlanSuspend(dev);
+ zfiWlanResume(dev , 0);
+ zfHpStartRecv(dev);
}
clear_bit(0, (void *)&smp_kevent_Lock);
@@ -1083,41 +1010,38 @@ void zfLnxSignalThread(zdev_t *dev, int flag)
{
struct usbdrv_private *macp = dev->ml_priv;
- if (macp == NULL)
- {
- printk("macp is NULL\n");
- return;
+ if (macp == NULL) {
+ printk("macp is NULL\n");
+ return;
}
- if (0 && macp->kevent_ready != 1)
- {
- printk("Kevent not ready\n");
- return;
+ if (0 && macp->kevent_ready != 1) {
+ printk("Kevent not ready\n");
+ return;
}
set_bit(flag, &macp->kevent_flags);
- if (!schedule_work(&macp->kevent))
- {
- //Fails is Normal
- //printk(KERN_ERR "schedule_task failed, flag = %x\n", flag);
- }
+ if (!schedule_work(&macp->kevent)) {
+ /*Fails is Normal
+ //printk(KERN_ERR "schedule_task failed, flag = %x\n", flag);*/
+ }
}
/* Notify wrapper todo redownload firmware and reinit procedure when */
/* hardware watchdog occur : zfiHwWatchDogReinit() */
-void zfLnxWatchDogNotify(zdev_t* dev)
+void zfLnxWatchDogNotify(zdev_t *dev)
{
zfLnxSignalThread(dev, KEVENT_WATCHDOG);
}
/* Query Durantion of Active Scan */
-void zfwGetActiveScanDur(zdev_t* dev, u8_t* Dur)
+void zfwGetActiveScanDur(zdev_t *dev, u8_t *Dur)
{
- *Dur = 30; // default 30 ms
+ *Dur = 30; /* default 30 ms*/
}
-void zfwGetShowZeroLengthSSID(zdev_t* dev, u8_t* Dur)
+void zfwGetShowZeroLengthSSID(zdev_t *dev, u8_t *Dur)
{
*Dur = 0;
}
diff --git a/drivers/staging/otus/zdusb.c b/drivers/staging/otus/zdusb.c
index 2c799a25029..4014b747245 100644
--- a/drivers/staging/otus/zdusb.c
+++ b/drivers/staging/otus/zdusb.c
@@ -48,7 +48,7 @@ static const char driver_name[] = "Otus";
/* table of devices that work with this driver */
static const struct usb_device_id zd1221_ids[] = {
{ USB_DEVICE(VENDOR_ATHR, PRODUCT_AR9170) },
- { USB_DEVICE(VENDOR_DLINK, PRODUCT_DWA160A) },
+ { USB_DEVICE(VENDOR_DLINK, PRODUCT_DWA160A) },
{ USB_DEVICE(VENDOR_NETGEAR, PRODUCT_WNDA3100) },
{ USB_DEVICE(VENDOR_NETGEAR, PRODUCT_WN111v2) },
{ } /* Terminating entry */
@@ -60,9 +60,9 @@ extern u8_t zfLnxInitSetup(struct net_device *dev, struct usbdrv_private *macp);
extern int usbdrv_close(struct net_device *dev);
extern u8_t zfLnxClearStructs(struct net_device *dev);
extern int zfWdsClose(struct net_device *dev);
-extern int zfUnregisterWdsDev(struct net_device* parentDev, u16_t wdsId);
+extern int zfUnregisterWdsDev(struct net_device *parentDev, u16_t wdsId);
extern int zfLnxVapClose(struct net_device *dev);
-extern int zfLnxUnregisterVapDev(struct net_device* parentDev, u16_t vapId);
+extern int zfLnxUnregisterVapDev(struct net_device *parentDev, u16_t vapId);
/* WDS */
extern struct zsWdsStruct wds[ZM_WDS_PORT_NUMBER];
@@ -73,148 +73,135 @@ extern struct zsVapStruct vap[ZM_VAP_PORT_NUMBER];
static int zfLnxProbe(struct usb_interface *interface,
const struct usb_device_id *id)
{
- struct usb_device *dev = interface_to_usbdev(interface);
-
- struct net_device *net = NULL;
- struct usbdrv_private *macp = NULL;
- int vendor_id, product_id;
- int result = 0;
-
- usb_get_dev(dev);
-
- vendor_id = dev->descriptor.idVendor;
- product_id = dev->descriptor.idProduct;
-
-#ifdef HMAC_DEBUG
- printk(KERN_NOTICE "vendor_id = %04x\n", vendor_id);
- printk(KERN_NOTICE "product_id = %04x\n", product_id);
-
- if (dev->speed == USB_SPEED_HIGH)
- printk(KERN_NOTICE "USB 2.0 Host\n");
- else
- printk(KERN_NOTICE "USB 1.1 Host\n");
-#endif
-
- macp = kzalloc(sizeof(struct usbdrv_private), GFP_KERNEL);
- if (!macp)
- {
- printk(KERN_ERR "out of memory allocating device structure\n");
- result = -ENOMEM;
- goto fail;
- }
-
- net = alloc_etherdev(0);
-
- if (net == NULL)
- {
- printk(KERN_ERR "zfLnxProbe: Not able to alloc etherdev struct\n");
- result = -ENOMEM;
- goto fail1;
- }
-
- strcpy(net->name, "ath%d");
-
- net->ml_priv = macp; //kernel 2.6
- macp->udev = dev;
- macp->device = net;
-
- /* set up the endpoint information */
- /* check out the endpoints */
- macp->interface = interface;
-
- //init_waitqueue_head(&macp->regSet_wait);
- //init_waitqueue_head(&macp->iorwRsp_wait);
- //init_waitqueue_head(&macp->term_wait);
-
- if (!zfLnxAllocAllUrbs(macp))
- {
- result = -ENOMEM;
- goto fail2;
- }
-
- if (!zfLnxInitSetup(net, macp))
- {
- result = -EIO;
- goto fail3;
- }
- else
- {
- usb_set_intfdata(interface, macp);
- SET_NETDEV_DEV(net, &interface->dev);
-
- if (register_netdev(net) != 0)
- {
- usb_set_intfdata(interface, NULL);
- goto fail3;
- }
- }
-
- netif_carrier_off(net);
- goto done;
-
+ struct usb_device *dev = interface_to_usbdev(interface);
+
+ struct net_device *net = NULL;
+ struct usbdrv_private *macp = NULL;
+ int vendor_id, product_id;
+ int result = 0;
+
+ usb_get_dev(dev);
+
+ vendor_id = dev->descriptor.idVendor;
+ product_id = dev->descriptor.idProduct;
+
+ #ifdef HMAC_DEBUG
+ printk(KERN_NOTICE "vendor_id = %04x\n", vendor_id);
+ printk(KERN_NOTICE "product_id = %04x\n", product_id);
+
+ if (dev->speed == USB_SPEED_HIGH)
+ printk(KERN_NOTICE "USB 2.0 Host\n");
+ else
+ printk(KERN_NOTICE "USB 1.1 Host\n");
+ #endif
+
+ macp = kzalloc(sizeof(struct usbdrv_private), GFP_KERNEL);
+ if (!macp) {
+ printk(KERN_ERR "out of memory allocating device structure\n");
+ result = -ENOMEM;
+ goto fail;
+ }
+
+ net = alloc_etherdev(0);
+
+ if (net == NULL) {
+ printk(KERN_ERR "zfLnxProbe: Not able to alloc etherdev struct\n");
+ result = -ENOMEM;
+ goto fail1;
+ }
+
+ strcpy(net->name, "ath%d");
+
+ net->ml_priv = macp; /* kernel 2.6 */
+ macp->udev = dev;
+ macp->device = net;
+
+ /* set up the endpoint information */
+ /* check out the endpoints */
+ macp->interface = interface;
+
+ /* init_waitqueue_head(&macp->regSet_wait); */
+ /* init_waitqueue_head(&macp->iorwRsp_wait); */
+ /* init_waitqueue_head(&macp->term_wait); */
+
+ if (!zfLnxAllocAllUrbs(macp)) {
+ result = -ENOMEM;
+ goto fail2;
+ }
+
+ if (!zfLnxInitSetup(net, macp)) {
+ result = -EIO;
+ goto fail3;
+ } else {
+ usb_set_intfdata(interface, macp);
+ SET_NETDEV_DEV(net, &interface->dev);
+
+ if (register_netdev(net) != 0) {
+ usb_set_intfdata(interface, NULL);
+ goto fail3;
+ }
+ }
+
+ netif_carrier_off(net);
+ goto done;
fail3:
- zfLnxFreeAllUrbs(macp);
+ zfLnxFreeAllUrbs(macp);
fail2:
- free_netdev(net); //kernel 2.6
+ free_netdev(net); /* kernel 2.6 */
fail1:
- kfree(macp);
-
+ kfree(macp);
fail:
- usb_put_dev(dev);
- macp = NULL;
-
+ usb_put_dev(dev);
+ macp = NULL;
done:
- return result;
+ return result;
}
static void zfLnxDisconnect(struct usb_interface *interface)
{
- struct usbdrv_private *macp = (struct usbdrv_private *) usb_get_intfdata(interface);
-
- printk(KERN_DEBUG "zfLnxDisconnect\n");
-
- if (!macp)
- {
- printk(KERN_ERR "unregistering non-existant device\n");
- return;
- }
-
- if (macp->driver_isolated)
- {
- if (macp->device->flags & IFF_UP)
- usbdrv_close(macp->device);
- }
-
-#if 0
- /* Close WDS */
- //zfWdsClose(wds[0].dev);
- /* Unregister WDS */
- //zfUnregisterWdsDev(macp->device, 0);
-
- /* Close VAP */
- zfLnxVapClose(vap[0].dev);
- /* Unregister VAP */
- zfLnxUnregisterVapDev(macp->device, 0);
-#endif
+ struct usbdrv_private *macp = (struct usbdrv_private *) usb_get_intfdata(interface);
+
+ printk(KERN_DEBUG "zfLnxDisconnect\n");
+
+ if (!macp) {
+ printk(KERN_ERR "unregistering non-existant device\n");
+ return;
+ }
+
+ if (macp->driver_isolated)
+ if (macp->device->flags & IFF_UP)
+ usbdrv_close(macp->device);
+
+ #if 0
+ /* Close WDS */
+ /* zfWdsClose(wds[0].dev); */
+ /* Unregister WDS */
+ /* zfUnregisterWdsDev(macp->device, 0); */
+
+ /* Close VAP */
+ zfLnxVapClose(vap[0].dev);
+ /* Unregister VAP */
+ zfLnxUnregisterVapDev(macp->device, 0);
+ #endif
- zfLnxClearStructs(macp->device);
+ zfLnxClearStructs(macp->device);
- unregister_netdev(macp->device);
+ unregister_netdev(macp->device);
- usb_put_dev(interface_to_usbdev(interface));
+ usb_put_dev(interface_to_usbdev(interface));
- //printk(KERN_ERR "3. zfLnxUnlinkAllUrbs\n");
- //zfLnxUnlinkAllUrbs(macp);
+ /* printk(KERN_ERR "3. zfLnxUnlinkAllUrbs\n"); */
+ /* zfLnxUnlinkAllUrbs(macp); */
- /* Free network interface */
- free_netdev(macp->device);
+ /* Free network interface */
+ free_netdev(macp->device);
- zfLnxFreeAllUrbs(macp);
- //zfLnxClearStructs(macp->device);
- kfree(macp);
- macp = NULL;
+ zfLnxFreeAllUrbs(macp);
+ /* zfLnxClearStructs(macp->device); */
+ kfree(macp);
+ macp = NULL;
- usb_set_intfdata(interface, NULL);
+ usb_set_intfdata(interface, NULL);
}
static struct usb_driver zd1221_driver = {
@@ -226,13 +213,13 @@ static struct usb_driver zd1221_driver = {
int __init zfLnxIinit(void)
{
- printk(KERN_NOTICE "%s - version %s\n", DRIVER_NAME, VERSIONID);
- return usb_register(&zd1221_driver);
+ printk(KERN_NOTICE "%s - version %s\n", DRIVER_NAME, VERSIONID);
+ return usb_register(&zd1221_driver);
}
void __exit zfLnxExit(void)
{
- usb_deregister(&zd1221_driver);
+ usb_deregister(&zd1221_driver);
}
module_init(zfLnxIinit);
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 9ca0e9e2a96..3221814a856 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -48,6 +48,7 @@
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/parport.h>
#include <linux/version.h>
@@ -68,11 +69,16 @@
#define LCD_MAXBYTES 256 /* max burst write */
#define KEYPAD_BUFFER 64
-#define INPUT_POLL_TIME (HZ/50) /* poll the keyboard this every second */
-#define KEYPAD_REP_START (10) /* a key starts to repeat after this times INPUT_POLL_TIME */
-#define KEYPAD_REP_DELAY (2) /* a key repeats this times INPUT_POLL_TIME */
-#define FLASH_LIGHT_TEMPO (200) /* keep the light on this times INPUT_POLL_TIME for each flash */
+/* poll the keyboard this every second */
+#define INPUT_POLL_TIME (HZ/50)
+/* a key starts to repeat after this times INPUT_POLL_TIME */
+#define KEYPAD_REP_START (10)
+/* a key repeats this times INPUT_POLL_TIME */
+#define KEYPAD_REP_DELAY (2)
+
+/* keep the light on this times INPUT_POLL_TIME for each flash */
+#define FLASH_LIGHT_TEMPO (200)
/* converts an r_str() input to an active high, bits string : 000BAOSE */
#define PNL_PINPUT(a) ((((unsigned char)(a)) ^ 0x7F) >> 3)
@@ -84,7 +90,8 @@
#define PNL_PERRORP 0x08 /* direct input, active low */
#define PNL_PBIDIR 0x20 /* bi-directional ports */
-#define PNL_PINTEN 0x10 /* high to read data in or-ed with data out */
+/* high to read data in or-ed with data out */
+#define PNL_PINTEN 0x10
#define PNL_PSELECP 0x08 /* inverted output, active low */
#define PNL_PINITP 0x04 /* direct output, active low */
#define PNL_PAUTOLF 0x02 /* inverted output, active low */
@@ -123,7 +130,7 @@
#define LCD_FLAG_N 0x0040 /* 2-rows mode */
#define LCD_FLAG_L 0x0080 /* backlight enabled */
-#define LCD_ESCAPE_LEN 24 /* 24 chars max for an LCD escape command */
+#define LCD_ESCAPE_LEN 24 /* max chars for LCD escape command */
#define LCD_ESCAPE_CHAR 27 /* use char 27 for escape command */
/* macros to simplify use of the parallel port */
@@ -134,8 +141,10 @@
#define w_dtr(x, y) do { parport_write_data((x)->port, (y)); } while (0)
/* this defines which bits are to be used and which ones to be ignored */
-static __u8 scan_mask_o; /* logical or of the output bits involved in the scan matrix */
-static __u8 scan_mask_i; /* logical or of the input bits involved in the scan matrix */
+/* logical or of the output bits involved in the scan matrix */
+static __u8 scan_mask_o;
+/* logical or of the input bits involved in the scan matrix */
+static __u8 scan_mask_i;
typedef __u64 pmask_t;
@@ -161,14 +170,14 @@ struct logical_input {
__u8 rise_timer, fall_timer, high_timer;
union {
- struct { /* this structure is valid when type == INPUT_TYPE_STD */
+ struct { /* valid when type == INPUT_TYPE_STD */
void (*press_fct) (int);
void (*release_fct) (int);
int press_data;
int release_data;
} std;
- struct { /* this structure is valid when type == INPUT_TYPE_KBD */
- /* strings can be full-length (ie. non null-terminated) */
+ struct { /* valid when type == INPUT_TYPE_KBD */
+ /* strings can be non null-terminated */
char press_str[sizeof(void *) + sizeof(int)];
char repeat_str[sizeof(void *) + sizeof(int)];
char release_str[sizeof(void *) + sizeof(int)];
@@ -188,11 +197,17 @@ LIST_HEAD(logical_inputs); /* list of all defined logical inputs */
* 0000000000000000000BAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSE
* <-----unused------><gnd><d07><d06><d05><d04><d03><d02><d01><d00>
*/
-static pmask_t phys_read; /* what has just been read from the I/O ports */
-static pmask_t phys_read_prev; /* previous phys_read */
-static pmask_t phys_curr; /* stabilized phys_read (phys_read|phys_read_prev) */
-static pmask_t phys_prev; /* previous phys_curr */
-static char inputs_stable; /* 0 means that at least one logical signal needs be computed */
+
+/* what has just been read from the I/O ports */
+static pmask_t phys_read;
+/* previous phys_read */
+static pmask_t phys_read_prev;
+/* stabilized phys_read (phys_read|phys_read_prev) */
+static pmask_t phys_curr;
+/* previous phys_curr */
+static pmask_t phys_prev;
+/* 0 means that at least one logical signal needs be computed */
+static char inputs_stable;
/* these variables are specific to the keypad */
static char keypad_buffer[KEYPAD_BUFFER];
@@ -202,11 +217,17 @@ static char keypressed;
static wait_queue_head_t keypad_read_wait;
/* lcd-specific variables */
-static unsigned long int lcd_flags; /* contains the LCD config state */
-static unsigned long int lcd_addr_x; /* contains the LCD X offset */
-static unsigned long int lcd_addr_y; /* contains the LCD Y offset */
-static char lcd_escape[LCD_ESCAPE_LEN + 1]; /* current escape sequence, 0 terminated */
-static int lcd_escape_len = -1; /* not in escape state. >=0 = escape cmd len */
+
+/* contains the LCD config state */
+static unsigned long int lcd_flags;
+/* contains the LCD X offset */
+static unsigned long int lcd_addr_x;
+/* contains the LCD Y offset */
+static unsigned long int lcd_addr_y;
+/* current escape sequence, 0 terminated */
+static char lcd_escape[LCD_ESCAPE_LEN + 1];
+/* not in escape state. >=0 = escape cmd len */
+static int lcd_escape_len = -1;
/*
* Bit masks to convert LCD signals to parallel port outputs.
@@ -436,11 +457,13 @@ MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead");
static int lcd_type = -1;
module_param(lcd_type, int, 0000);
MODULE_PARM_DESC(lcd_type,
- "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in");
+ "LCD type: 0=none, 1=old //, 2=serial ks0074, "
+ "3=hantronix //, 4=nexcom //, 5=compiled-in");
static int lcd_proto = -1;
module_param(lcd_proto, int, 0000);
-MODULE_PARM_DESC(lcd_proto, "LCD communication: 0=parallel (//), 1=serial,"
+MODULE_PARM_DESC(lcd_proto,
+ "LCD communication: 0=parallel (//), 1=serial,"
"2=TI LCD Interface");
static int lcd_charset = -1;
@@ -450,12 +473,14 @@ MODULE_PARM_DESC(lcd_charset, "LCD character set: 0=standard, 1=KS0074");
static int keypad_type = -1;
module_param(keypad_type, int, 0000);
MODULE_PARM_DESC(keypad_type,
- "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, 3=nexcom 4 keys");
+ "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, "
+ "3=nexcom 4 keys");
static int profile = DEFAULT_PROFILE;
module_param(profile, int, 0000);
MODULE_PARM_DESC(profile,
- "1=16x2 old kp; 2=serial 16x2, new kp; 3=16x2 hantronix; 4=16x2 nexcom; default=40x2, old kp");
+ "1=16x2 old kp; 2=serial 16x2, new kp; 3=16x2 hantronix; "
+ "4=16x2 nexcom; default=40x2, old kp");
/*
* These are the parallel port pins the LCD control signals are connected to.
@@ -469,32 +494,38 @@ MODULE_PARM_DESC(profile,
static int lcd_e_pin = PIN_NOT_SET;
module_param(lcd_e_pin, int, 0000);
MODULE_PARM_DESC(lcd_e_pin,
- "# of the // port pin connected to LCD 'E' signal, with polarity (-17..17)");
+ "# of the // port pin connected to LCD 'E' signal, "
+ "with polarity (-17..17)");
static int lcd_rs_pin = PIN_NOT_SET;
module_param(lcd_rs_pin, int, 0000);
MODULE_PARM_DESC(lcd_rs_pin,
- "# of the // port pin connected to LCD 'RS' signal, with polarity (-17..17)");
+ "# of the // port pin connected to LCD 'RS' signal, "
+ "with polarity (-17..17)");
static int lcd_rw_pin = PIN_NOT_SET;
module_param(lcd_rw_pin, int, 0000);
MODULE_PARM_DESC(lcd_rw_pin,
- "# of the // port pin connected to LCD 'RW' signal, with polarity (-17..17)");
+ "# of the // port pin connected to LCD 'RW' signal, "
+ "with polarity (-17..17)");
static int lcd_bl_pin = PIN_NOT_SET;
module_param(lcd_bl_pin, int, 0000);
MODULE_PARM_DESC(lcd_bl_pin,
- "# of the // port pin connected to LCD backlight, with polarity (-17..17)");
+ "# of the // port pin connected to LCD backlight, "
+ "with polarity (-17..17)");
static int lcd_da_pin = PIN_NOT_SET;
module_param(lcd_da_pin, int, 0000);
MODULE_PARM_DESC(lcd_da_pin,
- "# of the // port pin connected to serial LCD 'SDA' signal, with polarity (-17..17)");
+ "# of the // port pin connected to serial LCD 'SDA' "
+ "signal, with polarity (-17..17)");
static int lcd_cl_pin = PIN_NOT_SET;
module_param(lcd_cl_pin, int, 0000);
MODULE_PARM_DESC(lcd_cl_pin,
- "# of the // port pin connected to serial LCD 'SCL' signal, with polarity (-17..17)");
+ "# of the // port pin connected to serial LCD 'SCL' "
+ "signal, with polarity (-17..17)");
static unsigned char *lcd_char_conv;
@@ -572,12 +603,12 @@ static char (*keypad_profile)[4][9] = old_keypad_profile;
/* FIXME: this should be converted to a bit array containing signals states */
static struct {
- unsigned char e; /* parallel LCD E (data latch on falling edge) */
- unsigned char rs; /* parallel LCD RS (0 = cmd, 1 = data) */
- unsigned char rw; /* parallel LCD R/W (0 = W, 1 = R) */
- unsigned char bl; /* parallel LCD backlight (0 = off, 1 = on) */
- unsigned char cl; /* serial LCD clock (latch on rising edge) */
- unsigned char da; /* serial LCD data */
+ unsigned char e; /* parallel LCD E (data latch on falling edge) */
+ unsigned char rs; /* parallel LCD RS (0 = cmd, 1 = data) */
+ unsigned char rw; /* parallel LCD R/W (0 = W, 1 = R) */
+ unsigned char bl; /* parallel LCD backlight (0 = off, 1 = on) */
+ unsigned char cl; /* serial LCD clock (latch on rising edge) */
+ unsigned char da; /* serial LCD data */
} bits;
static void init_scan_timer(void);
@@ -666,7 +697,7 @@ void pin_to_bits(int pin, unsigned char *d_val, unsigned char *c_val)
c_bit = PNL_PAUTOLF;
inv = !inv;
break;
- case PIN_INITP: /* init, direct */
+ case PIN_INITP: /* init, direct */
c_bit = PNL_PINITP;
break;
case PIN_SELECP: /* select_in, inverted */
@@ -698,23 +729,23 @@ static void long_sleep(int ms)
}
}
-/* send a serial byte to the LCD panel. The caller is responsible for locking if needed. */
+/* send a serial byte to the LCD panel. The caller is responsible for locking
+ if needed. */
static void lcd_send_serial(int byte)
{
int bit;
/* the data bit is set on D0, and the clock on STROBE.
- * LCD reads D0 on STROBE's rising edge.
- */
+ * LCD reads D0 on STROBE's rising edge. */
for (bit = 0; bit < 8; bit++) {
bits.cl = BIT_CLR; /* CLK low */
panel_set_bits();
bits.da = byte & 1;
panel_set_bits();
- udelay(2); /* maintain the data during 2 us before CLK up */
+ udelay(2); /* maintain the data during 2 us before CLK up */
bits.cl = BIT_SET; /* CLK high */
panel_set_bits();
- udelay(1); /* maintain the strobe during 1 us */
+ udelay(1); /* maintain the strobe during 1 us */
byte >>= 1;
}
}
@@ -760,19 +791,19 @@ static void lcd_write_cmd_p8(int cmd)
spin_lock(&pprt_lock);
/* present the data to the data port */
w_dtr(pprt, cmd);
- udelay(20); /* maintain the data during 20 us before the strobe */
+ udelay(20); /* maintain the data during 20 us before the strobe */
bits.e = BIT_SET;
bits.rs = BIT_CLR;
bits.rw = BIT_CLR;
set_ctrl_bits();
- udelay(40); /* maintain the strobe during 40 us */
+ udelay(40); /* maintain the strobe during 40 us */
bits.e = BIT_CLR;
set_ctrl_bits();
- udelay(120); /* the shortest command takes at least 120 us */
+ udelay(120); /* the shortest command takes at least 120 us */
spin_unlock(&pprt_lock);
}
@@ -782,19 +813,19 @@ static void lcd_write_data_p8(int data)
spin_lock(&pprt_lock);
/* present the data to the data port */
w_dtr(pprt, data);
- udelay(20); /* maintain the data during 20 us before the strobe */
+ udelay(20); /* maintain the data during 20 us before the strobe */
bits.e = BIT_SET;
bits.rs = BIT_SET;
bits.rw = BIT_CLR;
set_ctrl_bits();
- udelay(40); /* maintain the strobe during 40 us */
+ udelay(40); /* maintain the strobe during 40 us */
bits.e = BIT_CLR;
set_ctrl_bits();
- udelay(45); /* the shortest data takes at least 45 us */
+ udelay(45); /* the shortest data takes at least 45 us */
spin_unlock(&pprt_lock);
}
@@ -822,7 +853,8 @@ static void lcd_gotoxy(void)
{
lcd_write_cmd(0x80 /* set DDRAM address */
| (lcd_addr_y ? lcd_hwidth : 0)
- /* we force the cursor to stay at the end of the line if it wants to go farther */
+ /* we force the cursor to stay at the end of the
+ line if it wants to go farther */
| ((lcd_addr_x < lcd_bwidth) ? lcd_addr_x &
(lcd_hwidth - 1) : lcd_bwidth - 1));
}
@@ -871,19 +903,23 @@ static void lcd_clear_fast_p8(void)
for (pos = 0; pos < lcd_height * lcd_hwidth; pos++) {
/* present the data to the data port */
w_dtr(pprt, ' ');
- udelay(20); /* maintain the data during 20 us before the strobe */
+
+ /* maintain the data during 20 us before the strobe */
+ udelay(20);
bits.e = BIT_SET;
bits.rs = BIT_SET;
bits.rw = BIT_CLR;
set_ctrl_bits();
- udelay(40); /* maintain the strobe during 40 us */
+ /* maintain the strobe during 40 us */
+ udelay(40);
bits.e = BIT_CLR;
set_ctrl_bits();
- udelay(45); /* the shortest data takes at least 45 us */
+ /* the shortest data takes at least 45 us */
+ udelay(45);
}
spin_unlock(&pprt_lock);
@@ -954,7 +990,8 @@ static void lcd_init_display(void)
long_sleep(10);
- lcd_write_cmd(0x06); /* entry mode set : increment, cursor shifting */
+ /* entry mode set : increment, cursor shifting */
+ lcd_write_cmd(0x06);
lcd_clear_display();
}
@@ -966,317 +1003,336 @@ static void lcd_init_display(void)
*
*/
+static inline int handle_lcd_special_code(void)
+{
+ /* LCD special codes */
+
+ int processed = 0;
+
+ char *esc = lcd_escape + 2;
+ int oldflags = lcd_flags;
+
+ /* check for display mode flags */
+ switch (*esc) {
+ case 'D': /* Display ON */
+ lcd_flags |= LCD_FLAG_D;
+ processed = 1;
+ break;
+ case 'd': /* Display OFF */
+ lcd_flags &= ~LCD_FLAG_D;
+ processed = 1;
+ break;
+ case 'C': /* Cursor ON */
+ lcd_flags |= LCD_FLAG_C;
+ processed = 1;
+ break;
+ case 'c': /* Cursor OFF */
+ lcd_flags &= ~LCD_FLAG_C;
+ processed = 1;
+ break;
+ case 'B': /* Blink ON */
+ lcd_flags |= LCD_FLAG_B;
+ processed = 1;
+ break;
+ case 'b': /* Blink OFF */
+ lcd_flags &= ~LCD_FLAG_B;
+ processed = 1;
+ break;
+ case '+': /* Back light ON */
+ lcd_flags |= LCD_FLAG_L;
+ processed = 1;
+ break;
+ case '-': /* Back light OFF */
+ lcd_flags &= ~LCD_FLAG_L;
+ processed = 1;
+ break;
+ case '*':
+ /* flash back light using the keypad timer */
+ if (scan_timer.function != NULL) {
+ if (light_tempo == 0 && ((lcd_flags & LCD_FLAG_L) == 0))
+ lcd_backlight(1);
+ light_tempo = FLASH_LIGHT_TEMPO;
+ }
+ processed = 1;
+ break;
+ case 'f': /* Small Font */
+ lcd_flags &= ~LCD_FLAG_F;
+ processed = 1;
+ break;
+ case 'F': /* Large Font */
+ lcd_flags |= LCD_FLAG_F;
+ processed = 1;
+ break;
+ case 'n': /* One Line */
+ lcd_flags &= ~LCD_FLAG_N;
+ processed = 1;
+ break;
+ case 'N': /* Two Lines */
+ lcd_flags |= LCD_FLAG_N;
+ break;
+ case 'l': /* Shift Cursor Left */
+ if (lcd_addr_x > 0) {
+ /* back one char if not at end of line */
+ if (lcd_addr_x < lcd_bwidth)
+ lcd_write_cmd(0x10);
+ lcd_addr_x--;
+ }
+ processed = 1;
+ break;
+ case 'r': /* shift cursor right */
+ if (lcd_addr_x < lcd_width) {
+ /* allow the cursor to pass the end of the line */
+ if (lcd_addr_x <
+ (lcd_bwidth - 1))
+ lcd_write_cmd(0x14);
+ lcd_addr_x++;
+ }
+ processed = 1;
+ break;
+ case 'L': /* shift display left */
+ lcd_left_shift++;
+ lcd_write_cmd(0x18);
+ processed = 1;
+ break;
+ case 'R': /* shift display right */
+ lcd_left_shift--;
+ lcd_write_cmd(0x1C);
+ processed = 1;
+ break;
+ case 'k': { /* kill end of line */
+ int x;
+ for (x = lcd_addr_x; x < lcd_bwidth; x++)
+ lcd_write_data(' ');
+
+ /* restore cursor position */
+ lcd_gotoxy();
+ processed = 1;
+ break;
+ }
+ case 'I': /* reinitialize display */
+ lcd_init_display();
+ lcd_left_shift = 0;
+ processed = 1;
+ break;
+ case 'G': {
+ /* Generator : LGcxxxxx...xx; must have <c> between '0'
+ * and '7', representing the numerical ASCII code of the
+ * redefined character, and <xx...xx> a sequence of 16
+ * hex digits representing 8 bytes for each character.
+ * Most LCDs will only use 5 lower bits of the 7 first
+ * bytes.
+ */
+
+ unsigned char cgbytes[8];
+ unsigned char cgaddr;
+ int cgoffset;
+ int shift;
+ char value;
+ int addr;
+
+ if (strchr(esc, ';') == NULL)
+ break;
+
+ esc++;
+
+ cgaddr = *(esc++) - '0';
+ if (cgaddr > 7) {
+ processed = 1;
+ break;
+ }
+
+ cgoffset = 0;
+ shift = 0;
+ value = 0;
+ while (*esc && cgoffset < 8) {
+ shift ^= 4;
+ if (*esc >= '0' && *esc <= '9')
+ value |= (*esc - '0') << shift;
+ else if (*esc >= 'A' && *esc <= 'Z')
+ value |= (*esc - 'A' + 10) << shift;
+ else if (*esc >= 'a' && *esc <= 'z')
+ value |= (*esc - 'a' + 10) << shift;
+ else {
+ esc++;
+ continue;
+ }
+
+ if (shift == 0) {
+ cgbytes[cgoffset++] = value;
+ value = 0;
+ }
+
+ esc++;
+ }
+
+ lcd_write_cmd(0x40 | (cgaddr * 8));
+ for (addr = 0; addr < cgoffset; addr++)
+ lcd_write_data(cgbytes[addr]);
+
+ /* ensures that we stop writing to CGRAM */
+ lcd_gotoxy();
+ processed = 1;
+ break;
+ }
+ case 'x': /* gotoxy : LxXXX[yYYY]; */
+ case 'y': /* gotoxy : LyYYY[xXXX]; */
+ if (strchr(esc, ';') == NULL)
+ break;
+
+ while (*esc) {
+ char *endp;
+
+ if (*esc == 'x') {
+ esc++;
+ lcd_addr_x = simple_strtoul(esc, &endp, 10);
+ esc = endp;
+ } else if (*esc == 'y') {
+ esc++;
+ lcd_addr_y = simple_strtoul(esc, &endp, 10);
+ esc = endp;
+ } else
+ break;
+ }
+
+ lcd_gotoxy();
+ processed = 1;
+ break;
+ }
+
+ /* Check wether one flag was changed */
+ if (oldflags != lcd_flags) {
+ /* check whether one of B,C,D flags were changed */
+ if ((oldflags ^ lcd_flags) &
+ (LCD_FLAG_B | LCD_FLAG_C | LCD_FLAG_D))
+ /* set display mode */
+ lcd_write_cmd(0x08
+ | ((lcd_flags & LCD_FLAG_D) ? 4 : 0)
+ | ((lcd_flags & LCD_FLAG_C) ? 2 : 0)
+ | ((lcd_flags & LCD_FLAG_B) ? 1 : 0));
+ /* check whether one of F,N flags was changed */
+ else if ((oldflags ^ lcd_flags) & (LCD_FLAG_F | LCD_FLAG_N))
+ lcd_write_cmd(0x30
+ | ((lcd_flags & LCD_FLAG_F) ? 4 : 0)
+ | ((lcd_flags & LCD_FLAG_N) ? 8 : 0));
+ /* check wether L flag was changed */
+ else if ((oldflags ^ lcd_flags) & (LCD_FLAG_L)) {
+ if (lcd_flags & (LCD_FLAG_L))
+ lcd_backlight(1);
+ else if (light_tempo == 0)
+ /* switch off the light only when the tempo
+ lighting is gone */
+ lcd_backlight(0);
+ }
+ }
+
+ return processed;
+}
+
static ssize_t lcd_write(struct file *file,
const char *buf, size_t count, loff_t *ppos)
{
-
const char *tmp = buf;
char c;
for (; count-- > 0; (ppos ? (*ppos)++ : 0), ++tmp) {
if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
- schedule(); /* let's be a little nice with other processes that need some CPU */
+ /* let's be a little nice with other processes
+ that need some CPU */
+ schedule();
if (ppos == NULL && file == NULL)
- c = *tmp; /* let's not use get_user() from the kernel ! */
+ /* let's not use get_user() from the kernel ! */
+ c = *tmp;
else if (get_user(c, tmp))
return -EFAULT;
/* first, we'll test if we're in escape mode */
- if ((c != '\n') && lcd_escape_len >= 0) { /* yes, let's add this char to the buffer */
+ if ((c != '\n') && lcd_escape_len >= 0) {
+ /* yes, let's add this char to the buffer */
lcd_escape[lcd_escape_len++] = c;
lcd_escape[lcd_escape_len] = 0;
} else {
- lcd_escape_len = -1; /* aborts any previous escape sequence */
+ /* aborts any previous escape sequence */
+ lcd_escape_len = -1;
switch (c) {
- case LCD_ESCAPE_CHAR: /* start of an escape sequence */
+ case LCD_ESCAPE_CHAR:
+ /* start of an escape sequence */
lcd_escape_len = 0;
lcd_escape[lcd_escape_len] = 0;
break;
- case '\b': /* go back one char and clear it */
+ case '\b':
+ /* go back one char and clear it */
if (lcd_addr_x > 0) {
- if (lcd_addr_x < lcd_bwidth) /* check if we're not at the end of the line */
- lcd_write_cmd(0x10); /* back one char */
+ /* check if we're not at the
+ end of the line */
+ if (lcd_addr_x < lcd_bwidth)
+ /* back one char */
+ lcd_write_cmd(0x10);
lcd_addr_x--;
}
- lcd_write_data(' '); /* replace with a space */
- lcd_write_cmd(0x10); /* back one char again */
+ /* replace with a space */
+ lcd_write_data(' ');
+ /* back one char again */
+ lcd_write_cmd(0x10);
break;
- case '\014': /* quickly clear the display */
+ case '\014':
+ /* quickly clear the display */
lcd_clear_fast();
break;
- case '\n': /* flush the remainder of the current line and go to the
- beginning of the next line */
+ case '\n':
+ /* flush the remainder of the current line and
+ go to the beginning of the next line */
for (; lcd_addr_x < lcd_bwidth; lcd_addr_x++)
lcd_write_data(' ');
lcd_addr_x = 0;
lcd_addr_y = (lcd_addr_y + 1) % lcd_height;
lcd_gotoxy();
break;
- case '\r': /* go to the beginning of the same line */
+ case '\r':
+ /* go to the beginning of the same line */
lcd_addr_x = 0;
lcd_gotoxy();
break;
- case '\t': /* print a space instead of the tab */
+ case '\t':
+ /* print a space instead of the tab */
lcd_print(' ');
break;
- default: /* simply print this char */
+ default:
+ /* simply print this char */
lcd_print(c);
break;
}
}
/* now we'll see if we're in an escape mode and if the current
- escape sequence can be understood.
- */
- if (lcd_escape_len >= 2) { /* minimal length for an escape command */
- int processed = 0; /* 1 means the command has been processed */
+ escape sequence can be understood. */
+ if (lcd_escape_len >= 2) {
+ int processed = 0;
- if (!strcmp(lcd_escape, "[2J")) { /* Clear the display */
- lcd_clear_fast(); /* clear display */
+ if (!strcmp(lcd_escape, "[2J")) {
+ /* clear the display */
+ lcd_clear_fast();
processed = 1;
- } else if (!strcmp(lcd_escape, "[H")) { /* Cursor to home */
+ } else if (!strcmp(lcd_escape, "[H")) {
+ /* cursor to home */
lcd_addr_x = lcd_addr_y = 0;
lcd_gotoxy();
processed = 1;
}
/* codes starting with ^[[L */
else if ((lcd_escape_len >= 3) &&
- (lcd_escape[0] == '[') && (lcd_escape[1] == 'L')) { /* LCD special codes */
-
- char *esc = lcd_escape + 2;
- int oldflags = lcd_flags;
-
- /* check for display mode flags */
- switch (*esc) {
- case 'D': /* Display ON */
- lcd_flags |= LCD_FLAG_D;
- processed = 1;
- break;
- case 'd': /* Display OFF */
- lcd_flags &= ~LCD_FLAG_D;
- processed = 1;
- break;
- case 'C': /* Cursor ON */
- lcd_flags |= LCD_FLAG_C;
- processed = 1;
- break;
- case 'c': /* Cursor OFF */
- lcd_flags &= ~LCD_FLAG_C;
- processed = 1;
- break;
- case 'B': /* Blink ON */
- lcd_flags |= LCD_FLAG_B;
- processed = 1;
- break;
- case 'b': /* Blink OFF */
- lcd_flags &= ~LCD_FLAG_B;
- processed = 1;
- break;
- case '+': /* Back light ON */
- lcd_flags |= LCD_FLAG_L;
- processed = 1;
- break;
- case '-': /* Back light OFF */
- lcd_flags &= ~LCD_FLAG_L;
- processed = 1;
- break;
- case '*': /* flash back light using the keypad timer */
- if (scan_timer.function != NULL) {
- if (light_tempo == 0
- && ((lcd_flags & LCD_FLAG_L)
- == 0))
- lcd_backlight(1);
- light_tempo = FLASH_LIGHT_TEMPO;
- }
- processed = 1;
- break;
- case 'f': /* Small Font */
- lcd_flags &= ~LCD_FLAG_F;
- processed = 1;
- break;
- case 'F': /* Large Font */
- lcd_flags |= LCD_FLAG_F;
- processed = 1;
- break;
- case 'n': /* One Line */
- lcd_flags &= ~LCD_FLAG_N;
- processed = 1;
- break;
- case 'N': /* Two Lines */
- lcd_flags |= LCD_FLAG_N;
- break;
-
- case 'l': /* Shift Cursor Left */
- if (lcd_addr_x > 0) {
- if (lcd_addr_x < lcd_bwidth)
- lcd_write_cmd(0x10); /* back one char if not at end of line */
- lcd_addr_x--;
- }
- processed = 1;
- break;
-
- case 'r': /* shift cursor right */
- if (lcd_addr_x < lcd_width) {
- if (lcd_addr_x < (lcd_bwidth - 1))
- lcd_write_cmd(0x14); /* allow the cursor to pass the end of the line */
- lcd_addr_x++;
- }
- processed = 1;
- break;
-
- case 'L': /* shift display left */
- lcd_left_shift++;
- lcd_write_cmd(0x18);
- processed = 1;
- break;
-
- case 'R': /* shift display right */
- lcd_left_shift--;
- lcd_write_cmd(0x1C);
- processed = 1;
- break;
-
- case 'k':{ /* kill end of line */
- int x;
- for (x = lcd_addr_x; x < lcd_bwidth; x++)
- lcd_write_data(' ');
- lcd_gotoxy(); /* restore cursor position */
- processed = 1;
- break;
- }
- case 'I': /* reinitialize display */
- lcd_init_display();
- lcd_left_shift = 0;
- processed = 1;
- break;
-
- case 'G': /* Generator : LGcxxxxx...xx; */ {
- /* must have <c> between '0' and '7', representing the numerical
- * ASCII code of the redefined character, and <xx...xx> a sequence
- * of 16 hex digits representing 8 bytes for each character. Most
- * LCDs will only use 5 lower bits of the 7 first bytes.
- */
-
- unsigned char cgbytes[8];
- unsigned char cgaddr;
- int cgoffset;
- int shift;
- char value;
- int addr;
-
- if (strchr(esc, ';') == NULL)
- break;
-
- esc++;
-
- cgaddr = *(esc++) - '0';
- if (cgaddr > 7) {
- processed = 1;
- break;
- }
-
- cgoffset = 0;
- shift = 0;
- value = 0;
- while (*esc && cgoffset < 8) {
- shift ^= 4;
- if (*esc >= '0' && *esc <= '9')
- value |= (*esc - '0') << shift;
- else if (*esc >= 'A' && *esc <= 'Z')
- value |= (*esc - 'A' + 10) << shift;
- else if (*esc >= 'a' && *esc <= 'z')
- value |= (*esc - 'a' + 10) << shift;
- else {
- esc++;
- continue;
- }
-
- if (shift == 0) {
- cgbytes[cgoffset++] = value;
- value = 0;
- }
-
- esc++;
- }
-
- lcd_write_cmd(0x40 | (cgaddr * 8));
- for (addr = 0; addr < cgoffset; addr++)
- lcd_write_data(cgbytes[addr]);
-
- lcd_gotoxy(); /* ensures that we stop writing to CGRAM */
- processed = 1;
- break;
- }
- case 'x': /* gotoxy : LxXXX[yYYY]; */
- case 'y': /* gotoxy : LyYYY[xXXX]; */
- if (strchr(esc, ';') == NULL)
- break;
-
- while (*esc) {
- if (*esc == 'x') {
- esc++;
- lcd_addr_x = 0;
- while (isdigit(*esc)) {
- lcd_addr_x =
- lcd_addr_x *
- 10 + (*esc -
- '0');
- esc++;
- }
- } else if (*esc == 'y') {
- esc++;
- lcd_addr_y = 0;
- while (isdigit(*esc)) {
- lcd_addr_y =
- lcd_addr_y *
- 10 + (*esc -
- '0');
- esc++;
- }
- } else
- break;
- }
-
- lcd_gotoxy();
- processed = 1;
- break;
- } /* end of switch */
-
- /* Check wether one flag was changed */
- if (oldflags != lcd_flags) {
- /* check wether one of B,C,D flags was changed */
- if ((oldflags ^ lcd_flags) &
- (LCD_FLAG_B | LCD_FLAG_C | LCD_FLAG_D))
- /* set display mode */
- lcd_write_cmd(0x08 |
- ((lcd_flags & LCD_FLAG_D) ? 4 : 0) |
- ((lcd_flags & LCD_FLAG_C) ? 2 : 0) |
- ((lcd_flags & LCD_FLAG_B) ? 1 : 0));
- /* check wether one of F,N flags was changed */
- else if ((oldflags ^ lcd_flags) &
- (LCD_FLAG_F | LCD_FLAG_N))
- lcd_write_cmd(0x30 |
- ((lcd_flags & LCD_FLAG_F) ? 4 : 0) |
- ((lcd_flags & LCD_FLAG_N) ? 8 : 0));
- /* check wether L flag was changed */
- else if ((oldflags ^ lcd_flags) &
- (LCD_FLAG_L)) {
- if (lcd_flags & (LCD_FLAG_L))
- lcd_backlight(1);
- else if (light_tempo == 0) /* switch off the light only when the tempo lighting is gone */
- lcd_backlight(0);
- }
- }
+ (lcd_escape[0] == '[') &&
+ (lcd_escape[1] == 'L')) {
+ processed = handle_lcd_special_code();
}
/* LCD special escape codes */
- /* flush the escape sequence if it's been processed or if it is
- getting too long. */
+ /* flush the escape sequence if it's been processed
+ or if it is getting too long. */
if (processed || (lcd_escape_len >= LCD_ESCAPE_LEN))
lcd_escape_len = -1;
- } /* escape codes */
+ } /* escape codes */
}
return tmp - buf;
@@ -1295,7 +1351,7 @@ static int lcd_open(struct inode *inode, struct file *file)
lcd_must_clear = 0;
}
lcd_open_cnt++;
- return 0;
+ return nonseekable_open(inode, file);
}
static int lcd_release(struct inode *inode, struct file *file)
@@ -1304,10 +1360,11 @@ static int lcd_release(struct inode *inode, struct file *file)
return 0;
}
-static struct file_operations lcd_fops = {
+static const struct file_operations lcd_fops = {
.write = lcd_write,
.open = lcd_open,
.release = lcd_release,
+ .llseek = no_llseek,
};
static struct miscdevice lcd_dev = {
@@ -1327,7 +1384,8 @@ void panel_lcd_print(char *s)
void lcd_init(void)
{
switch (lcd_type) {
- case LCD_TYPE_OLD: /* parallel mode, 8 bits */
+ case LCD_TYPE_OLD:
+ /* parallel mode, 8 bits */
if (lcd_proto < 0)
lcd_proto = LCD_PROTO_PARALLEL;
if (lcd_charset < 0)
@@ -1346,7 +1404,8 @@ void lcd_init(void)
if (lcd_height < 0)
lcd_height = 2;
break;
- case LCD_TYPE_KS0074: /* serial mode, ks0074 */
+ case LCD_TYPE_KS0074:
+ /* serial mode, ks0074 */
if (lcd_proto < 0)
lcd_proto = LCD_PROTO_SERIAL;
if (lcd_charset < 0)
@@ -1367,7 +1426,8 @@ void lcd_init(void)
if (lcd_height < 0)
lcd_height = 2;
break;
- case LCD_TYPE_NEXCOM: /* parallel mode, 8 bits, generic */
+ case LCD_TYPE_NEXCOM:
+ /* parallel mode, 8 bits, generic */
if (lcd_proto < 0)
lcd_proto = LCD_PROTO_PARALLEL;
if (lcd_charset < 0)
@@ -1388,14 +1448,16 @@ void lcd_init(void)
if (lcd_height < 0)
lcd_height = 2;
break;
- case LCD_TYPE_CUSTOM: /* customer-defined */
+ case LCD_TYPE_CUSTOM:
+ /* customer-defined */
if (lcd_proto < 0)
lcd_proto = DEFAULT_LCD_PROTO;
if (lcd_charset < 0)
lcd_charset = DEFAULT_LCD_CHARSET;
/* default geometry will be set later */
break;
- case LCD_TYPE_HANTRONIX: /* parallel mode, 8 bits, hantronix-like */
+ case LCD_TYPE_HANTRONIX:
+ /* parallel mode, 8 bits, hantronix-like */
default:
if (lcd_proto < 0)
lcd_proto = LCD_PROTO_PARALLEL;
@@ -1496,8 +1558,7 @@ void lcd_init(void)
/* before this line, we must NOT send anything to the display.
* Since lcd_init_display() needs to write data, we have to
- * enable mark the LCD initialized just before.
- */
+ * enable mark the LCD initialized just before. */
lcd_initialized = 1;
lcd_init_display();
@@ -1511,7 +1572,8 @@ void lcd_init(void)
PANEL_VERSION);
#endif
lcd_addr_x = lcd_addr_y = 0;
- lcd_must_clear = 1; /* clear the display on the next device opening */
+ /* clear the display on the next device opening */
+ lcd_must_clear = 1;
lcd_gotoxy();
}
@@ -1535,7 +1597,8 @@ static ssize_t keypad_read(struct file *file,
return -EINTR;
}
- for (; count-- > 0 && (keypad_buflen > 0); ++i, ++tmp, --keypad_buflen) {
+ for (; count-- > 0 && (keypad_buflen > 0);
+ ++i, ++tmp, --keypad_buflen) {
put_user(keypad_buffer[keypad_start], tmp);
keypad_start = (keypad_start + 1) % KEYPAD_BUFFER;
}
@@ -1564,7 +1627,7 @@ static int keypad_release(struct inode *inode, struct file *file)
return 0;
}
-static struct file_operations keypad_fops = {
+static const struct file_operations keypad_fops = {
.read = keypad_read, /* read */
.open = keypad_open, /* open */
.release = keypad_release, /* close */
@@ -1591,14 +1654,15 @@ static void keypad_send_key(char *string, int max_len)
}
}
-/* this function scans all the bits involving at least one logical signal, and puts the
- * results in the bitfield "phys_read" (one bit per established contact), and sets
- * "phys_read_prev" to "phys_read".
+/* this function scans all the bits involving at least one logical signal,
+ * and puts the results in the bitfield "phys_read" (one bit per established
+ * contact), and sets "phys_read_prev" to "phys_read".
*
- * Note: to debounce input signals, we will only consider as switched a signal which is
- * stable across 2 measures. Signals which are different between two reads will be kept
- * as they previously were in their logical form (phys_prev). A signal which has just
- * switched will have a 1 in (phys_read ^ phys_read_prev).
+ * Note: to debounce input signals, we will only consider as switched a signal
+ * which is stable across 2 measures. Signals which are different between two
+ * reads will be kept as they previously were in their logical form (phys_prev).
+ * A signal which has just switched will have a 1 in
+ * (phys_read ^ phys_read_prev).
*/
static void phys_scan_contacts(void)
{
@@ -1611,21 +1675,30 @@ static void phys_scan_contacts(void)
phys_read_prev = phys_read;
phys_read = 0; /* flush all signals */
- oldval = r_dtr(pprt) | scan_mask_o; /* keep track of old value, with all outputs disabled */
- w_dtr(pprt, oldval & ~scan_mask_o); /* activate all keyboard outputs (active low) */
- bitmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i; /* will have a 1 for each bit set to gnd */
- w_dtr(pprt, oldval); /* disable all matrix signals */
+ /* keep track of old value, with all outputs disabled */
+ oldval = r_dtr(pprt) | scan_mask_o;
+ /* activate all keyboard outputs (active low) */
+ w_dtr(pprt, oldval & ~scan_mask_o);
+
+ /* will have a 1 for each bit set to gnd */
+ bitmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i;
+ /* disable all matrix signals */
+ w_dtr(pprt, oldval);
/* now that all outputs are cleared, the only active input bits are
* directly connected to the ground
*/
- gndmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i; /* 1 for each grounded input */
- phys_read |= (pmask_t) gndmask << 40; /* grounded inputs are signals 40-44 */
+ /* 1 for each grounded input */
+ gndmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i;
+
+ /* grounded inputs are signals 40-44 */
+ phys_read |= (pmask_t) gndmask << 40;
if (bitmask != gndmask) {
- /* since clearing the outputs changed some inputs, we know that some
- * input signals are currently tied to some outputs. So we'll scan them.
+ /* since clearing the outputs changed some inputs, we know
+ * that some input signals are currently tied to some outputs.
+ * So we'll scan them.
*/
for (bit = 0; bit < 8; bit++) {
bitval = 1 << bit;
@@ -1639,11 +1712,127 @@ static void phys_scan_contacts(void)
}
w_dtr(pprt, oldval); /* disable all outputs */
}
- /* this is easy: use old bits when they are flapping, use new ones when stable */
- phys_curr =
- (phys_prev & (phys_read ^ phys_read_prev)) | (phys_read &
- ~(phys_read ^
- phys_read_prev));
+ /* this is easy: use old bits when they are flapping,
+ * use new ones when stable */
+ phys_curr = (phys_prev & (phys_read ^ phys_read_prev)) |
+ (phys_read & ~(phys_read ^ phys_read_prev));
+}
+
+static inline int input_state_high(struct logical_input *input)
+{
+#if 0
+ /* FIXME:
+ * this is an invalid test. It tries to catch
+ * transitions from single-key to multiple-key, but
+ * doesn't take into account the contacts polarity.
+ * The only solution to the problem is to parse keys
+ * from the most complex to the simplest combinations,
+ * and mark them as 'caught' once a combination
+ * matches, then unmatch it for all other ones.
+ */
+
+ /* try to catch dangerous transitions cases :
+ * someone adds a bit, so this signal was a false
+ * positive resulting from a transition. We should
+ * invalidate the signal immediately and not call the
+ * release function.
+ * eg: 0 -(press A)-> A -(press B)-> AB : don't match A's release.
+ */
+ if (((phys_prev & input->mask) == input->value)
+ && ((phys_curr & input->mask) > input->value)) {
+ input->state = INPUT_ST_LOW; /* invalidate */
+ return 1;
+ }
+#endif
+
+ if ((phys_curr & input->mask) == input->value) {
+ if ((input->type == INPUT_TYPE_STD) &&
+ (input->high_timer == 0)) {
+ input->high_timer++;
+ if (input->u.std.press_fct != NULL)
+ input->u.std.press_fct(input->u.std.press_data);
+ } else if (input->type == INPUT_TYPE_KBD) {
+ /* will turn on the light */
+ keypressed = 1;
+
+ if (input->high_timer == 0) {
+ char *press_str = input->u.kbd.press_str;
+ if (press_str[0])
+ keypad_send_key(press_str,
+ sizeof(press_str));
+ }
+
+ if (input->u.kbd.repeat_str[0]) {
+ char *repeat_str = input->u.kbd.repeat_str;
+ if (input->high_timer >= KEYPAD_REP_START) {
+ input->high_timer -= KEYPAD_REP_DELAY;
+ keypad_send_key(repeat_str,
+ sizeof(repeat_str));
+ }
+ /* we will need to come back here soon */
+ inputs_stable = 0;
+ }
+
+ if (input->high_timer < 255)
+ input->high_timer++;
+ }
+ return 1;
+ } else {
+ /* else signal falling down. Let's fall through. */
+ input->state = INPUT_ST_FALLING;
+ input->fall_timer = 0;
+ }
+ return 0;
+}
+
+static inline void input_state_falling(struct logical_input *input)
+{
+#if 0
+ /* FIXME !!! same comment as in input_state_high */
+ if (((phys_prev & input->mask) == input->value)
+ && ((phys_curr & input->mask) > input->value)) {
+ input->state = INPUT_ST_LOW; /* invalidate */
+ return;
+ }
+#endif
+
+ if ((phys_curr & input->mask) == input->value) {
+ if (input->type == INPUT_TYPE_KBD) {
+ /* will turn on the light */
+ keypressed = 1;
+
+ if (input->u.kbd.repeat_str[0]) {
+ char *repeat_str = input->u.kbd.repeat_str;
+ if (input->high_timer >= KEYPAD_REP_START)
+ input->high_timer -= KEYPAD_REP_DELAY;
+ keypad_send_key(repeat_str,
+ sizeof(repeat_str));
+ /* we will need to come back here soon */
+ inputs_stable = 0;
+ }
+
+ if (input->high_timer < 255)
+ input->high_timer++;
+ }
+ input->state = INPUT_ST_HIGH;
+ } else if (input->fall_timer >= input->fall_time) {
+ /* call release event */
+ if (input->type == INPUT_TYPE_STD) {
+ void (*release_fct)(int) = input->u.std.release_fct;
+ if (release_fct != NULL)
+ release_fct(input->u.std.release_data);
+ } else if (input->type == INPUT_TYPE_KBD) {
+ char *release_str = input->u.kbd.release_str;
+ if (release_str[0])
+ keypad_send_key(release_str,
+ sizeof(release_str));
+ }
+
+ input->state = INPUT_ST_LOW;
+ } else {
+ input->fall_timer++;
+ inputs_stable = 0;
+ }
}
static void panel_process_inputs(void)
@@ -1666,10 +1855,12 @@ static void panel_process_inputs(void)
case INPUT_ST_LOW:
if ((phys_curr & input->mask) != input->value)
break;
- /* if all needed ones were already set previously, this means that
- * this logical signal has been activated by the releasing of
- * another combined signal, so we don't want to match.
- * eg: AB -(release B)-> A -(release A)-> 0 : don't match A.
+ /* if all needed ones were already set previously,
+ * this means that this logical signal has been
+ * activated by the releasing of another combined
+ * signal, so we don't want to match.
+ * eg: AB -(release B)-> A -(release A)-> 0 :
+ * don't match A.
*/
if ((phys_prev & input->mask) == input->value)
break;
@@ -1690,122 +1881,11 @@ static void panel_process_inputs(void)
input->state = INPUT_ST_HIGH;
/* no break here, fall through */
case INPUT_ST_HIGH:
-#if 0
- /* FIXME:
- * this is an invalid test. It tries to catch transitions from single-key
- * to multiple-key, but doesn't take into account the contacts polarity.
- * The only solution to the problem is to parse keys from the most complex
- * to the simplest combinations, and mark them as 'caught' once a combination
- * matches, then unmatch it for all other ones.
- */
-
- /* try to catch dangerous transitions cases :
- * someone adds a bit, so this signal was a false
- * positive resulting from a transition. We should invalidate
- * the signal immediately and not call the release function.
- * eg: 0 -(press A)-> A -(press B)-> AB : don't match A's release.
- */
- if (((phys_prev & input->mask) == input->value)
- && ((phys_curr & input->mask) > input->value)) {
- input->state = INPUT_ST_LOW; /* invalidate */
- break;
- }
-#endif
-
- if ((phys_curr & input->mask) == input->value) {
- if ((input->type == INPUT_TYPE_STD)
- && (input->high_timer == 0)) {
- input->high_timer++;
- if (input->u.std.press_fct != NULL)
- input->u.std.press_fct(input->u.
- std.
- press_data);
- } else if (input->type == INPUT_TYPE_KBD) {
- keypressed = 1; /* will turn on the light */
-
- if (input->high_timer == 0) {
- if (input->u.kbd.press_str[0])
- keypad_send_key(input->
- u.kbd.
- press_str,
- sizeof
- (input->
- u.kbd.
- press_str));
- }
-
- if (input->u.kbd.repeat_str[0]) {
- if (input->high_timer >=
- KEYPAD_REP_START) {
- input->high_timer -=
- KEYPAD_REP_DELAY;
- keypad_send_key(input->
- u.kbd.
- repeat_str,
- sizeof
- (input->
- u.kbd.
- repeat_str));
- }
- inputs_stable = 0; /* we will need to come back here soon */
- }
-
- if (input->high_timer < 255)
- input->high_timer++;
- }
+ if (input_state_high(input))
break;
- } else {
- /* else signal falling down. Let's fall through. */
- input->state = INPUT_ST_FALLING;
- input->fall_timer = 0;
- }
/* no break here, fall through */
case INPUT_ST_FALLING:
-#if 0
- /* FIXME !!! same comment as above */
- if (((phys_prev & input->mask) == input->value)
- && ((phys_curr & input->mask) > input->value)) {
- input->state = INPUT_ST_LOW; /* invalidate */
- break;
- }
-#endif
-
- if ((phys_curr & input->mask) == input->value) {
- if (input->type == INPUT_TYPE_KBD) {
- keypressed = 1; /* will turn on the light */
-
- if (input->u.kbd.repeat_str[0]) {
- if (input->high_timer >= KEYPAD_REP_START)
- input->high_timer -= KEYPAD_REP_DELAY;
- keypad_send_key(input->u.kbd.repeat_str,
- sizeof(input->u.kbd.repeat_str));
- inputs_stable = 0; /* we will need to come back here soon */
- }
-
- if (input->high_timer < 255)
- input->high_timer++;
- }
- input->state = INPUT_ST_HIGH;
- break;
- } else if (input->fall_timer >= input->fall_time) {
- /* call release event */
- if (input->type == INPUT_TYPE_STD) {
- if (input->u.std.release_fct != NULL)
- input->u.std.release_fct(input->u.std.release_data);
-
- } else if (input->type == INPUT_TYPE_KBD) {
- if (input->u.kbd.release_str[0])
- keypad_send_key(input->u.kbd.release_str,
- sizeof(input->u.kbd.release_str));
- }
-
- input->state = INPUT_ST_LOW;
- break;
- } else {
- input->fall_timer++;
- inputs_stable = 0;
- break;
- }
+ input_state_falling(input);
}
}
}
@@ -1815,7 +1895,9 @@ static void panel_scan_timer(void)
if (keypad_enabled && keypad_initialized) {
if (spin_trylock(&pprt_lock)) {
phys_scan_contacts();
- spin_unlock(&pprt_lock); /* no need for the parport anymore */
+
+ /* no need for the parport anymore */
+ spin_unlock(&pprt_lock);
}
if (!inputs_stable || phys_curr != phys_prev)
@@ -1850,8 +1932,8 @@ static void init_scan_timer(void)
}
/* converts a name of the form "({BbAaPpSsEe}{01234567-})*" to a series of bits.
- * if <omask> or <imask> are non-null, they will be or'ed with the bits corresponding
- * to out and in bits respectively.
+ * if <omask> or <imask> are non-null, they will be or'ed with the bits
+ * corresponding to out and in bits respectively.
* returns 1 if ok, 0 if error (in which case, nothing is written).
*/
static int input_name2mask(char *name, pmask_t *mask, pmask_t *value,
@@ -1864,7 +1946,8 @@ static int input_name2mask(char *name, pmask_t *mask, pmask_t *value,
om = im = m = v = 0ULL;
while (*name) {
int in, out, bit, neg;
- for (in = 0; (in < sizeof(sigtab)) && (sigtab[in] != *name); in++)
+ for (in = 0; (in < sizeof(sigtab)) &&
+ (sigtab[in] != *name); in++)
;
if (in >= sizeof(sigtab))
return 0; /* input name not found */
@@ -1912,8 +1995,10 @@ static struct logical_input *panel_bind_key(char *name, char *press,
return NULL;
}
if (!input_name2mask(name, &key->mask, &key->value, &scan_mask_i,
- &scan_mask_o))
+ &scan_mask_o)) {
+ kfree(key);
return NULL;
+ }
key->type = INPUT_TYPE_KBD;
key->state = INPUT_ST_LOW;
@@ -1936,7 +2021,8 @@ static struct logical_input *panel_bind_key(char *name, char *press,
/* tries to bind a callback function to the signal name <name>. The function
* <press_fct> will be called with the <press_data> arg when the signal is
* activated, and so on for <release_fct>/<release_data>
- * Returns the pointer to the new signal if ok, NULL if the signal could not be bound.
+ * Returns the pointer to the new signal if ok, NULL if the signal could not
+ * be bound.
*/
static struct logical_input *panel_bind_callback(char *name,
void (*press_fct) (int),
@@ -2028,33 +2114,52 @@ static void panel_attach(struct parport *port)
if (pprt) {
printk(KERN_ERR
- "panel_attach(): port->number=%d parport=%d, already registered !\n",
+ "panel_attach(): port->number=%d parport=%d, "
+ "already registered !\n",
port->number, parport);
return;
}
- pprt = parport_register_device(port, "panel", NULL, NULL, /* pf, kf */
+ pprt = parport_register_device(port, "panel", NULL, NULL, /* pf, kf */
NULL,
/*PARPORT_DEV_EXCL */
0, (void *)&pprt);
+ if (pprt == NULL) {
+ pr_err("panel_attach(): port->number=%d parport=%d, "
+ "parport_register_device() failed\n",
+ port->number, parport);
+ return;
+ }
if (parport_claim(pprt)) {
printk(KERN_ERR
- "Panel: could not claim access to parport%d. Aborting.\n",
- parport);
- return;
+ "Panel: could not claim access to parport%d. "
+ "Aborting.\n", parport);
+ goto err_unreg_device;
}
- /* must init LCD first, just in case an IRQ from the keypad is generated at keypad init */
+ /* must init LCD first, just in case an IRQ from the keypad is
+ * generated at keypad init
+ */
if (lcd_enabled) {
lcd_init();
- misc_register(&lcd_dev);
+ if (misc_register(&lcd_dev))
+ goto err_unreg_device;
}
if (keypad_enabled) {
keypad_init();
- misc_register(&keypad_dev);
+ if (misc_register(&keypad_dev))
+ goto err_lcd_unreg;
}
+ return;
+
+err_lcd_unreg:
+ if (lcd_enabled)
+ misc_deregister(&lcd_dev);
+err_unreg_device:
+ parport_unregister_device(pprt);
+ pprt = NULL;
}
static void panel_detach(struct parport *port)
@@ -2064,7 +2169,8 @@ static void panel_detach(struct parport *port)
if (!pprt) {
printk(KERN_ERR
- "panel_detach(): port->number=%d parport=%d, nothing to unregister.\n",
+ "panel_detach(): port->number=%d parport=%d, "
+ "nothing to unregister.\n",
port->number, parport);
return;
}
@@ -2105,13 +2211,15 @@ int panel_init(void)
/* take care of an eventual profile */
switch (profile) {
- case PANEL_PROFILE_CUSTOM: /* custom profile */
+ case PANEL_PROFILE_CUSTOM:
+ /* custom profile */
if (keypad_type < 0)
keypad_type = DEFAULT_KEYPAD;
if (lcd_type < 0)
lcd_type = DEFAULT_LCD;
break;
- case PANEL_PROFILE_OLD: /* 8 bits, 2*16, old keypad */
+ case PANEL_PROFILE_OLD:
+ /* 8 bits, 2*16, old keypad */
if (keypad_type < 0)
keypad_type = KEYPAD_TYPE_OLD;
if (lcd_type < 0)
@@ -2121,25 +2229,29 @@ int panel_init(void)
if (lcd_hwidth < 0)
lcd_hwidth = 16;
break;
- case PANEL_PROFILE_NEW: /* serial, 2*16, new keypad */
+ case PANEL_PROFILE_NEW:
+ /* serial, 2*16, new keypad */
if (keypad_type < 0)
keypad_type = KEYPAD_TYPE_NEW;
if (lcd_type < 0)
lcd_type = LCD_TYPE_KS0074;
break;
- case PANEL_PROFILE_HANTRONIX: /* 8 bits, 2*16 hantronix-like, no keypad */
+ case PANEL_PROFILE_HANTRONIX:
+ /* 8 bits, 2*16 hantronix-like, no keypad */
if (keypad_type < 0)
keypad_type = KEYPAD_TYPE_NONE;
if (lcd_type < 0)
lcd_type = LCD_TYPE_HANTRONIX;
break;
- case PANEL_PROFILE_NEXCOM: /* generic 8 bits, 2*16, nexcom keypad, eg. Nexcom. */
+ case PANEL_PROFILE_NEXCOM:
+ /* generic 8 bits, 2*16, nexcom keypad, eg. Nexcom. */
if (keypad_type < 0)
keypad_type = KEYPAD_TYPE_NEXCOM;
if (lcd_type < 0)
lcd_type = LCD_TYPE_NEXCOM;
break;
- case PANEL_PROFILE_LARGE: /* 8 bits, 2*40, old keypad */
+ case PANEL_PROFILE_LARGE:
+ /* 8 bits, 2*40, old keypad */
if (keypad_type < 0)
keypad_type = KEYPAD_TYPE_OLD;
if (lcd_type < 0)
@@ -2179,6 +2291,7 @@ int panel_init(void)
if (pprt) {
parport_release(pprt);
parport_unregister_device(pprt);
+ pprt = NULL;
}
parport_unregister_driver(&panel_driver);
printk(KERN_ERR "Panel driver version " PANEL_VERSION
@@ -2195,7 +2308,8 @@ int panel_init(void)
else
printk(KERN_INFO "Panel driver version " PANEL_VERSION
" not yet registered\n");
- /* tells various subsystems about the fact that initialization is finished */
+ /* tells various subsystems about the fact that initialization
+ is finished */
init_in_progress = 0;
return 0;
}
@@ -2228,6 +2342,7 @@ static void __exit panel_cleanup_module(void)
/* TODO: free all input signals */
parport_release(pprt);
parport_unregister_device(pprt);
+ pprt = NULL;
}
parport_unregister_driver(&panel_driver);
}
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index 643b413d9f0..97dae297ca3 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -815,7 +815,7 @@ static int pohmelfs_readpages(struct file *file, struct address_space *mapping,
}
/*
- * Small addres space operations for POHMELFS.
+ * Small address space operations for POHMELFS.
*/
const struct address_space_operations pohmelfs_aops = {
.readpage = pohmelfs_readpage,
@@ -847,7 +847,7 @@ static void pohmelfs_destroy_inode(struct inode *inode)
}
/*
- * ->alloc_inode() callback. Allocates inode and initilizes private data.
+ * ->alloc_inode() callback. Allocates inode and initializes private data.
*/
static struct inode *pohmelfs_alloc_inode(struct super_block *sb)
{
@@ -968,12 +968,18 @@ int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr)
goto err_out_exit;
}
- err = inode_setattr(inode, attr);
- if (err) {
- dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino);
- goto err_out_exit;
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ err = vmtruncate(inode, attr->ia_size);
+ if (err) {
+ dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino);
+ goto err_out_exit;
+ }
}
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+
dprintk("%s: ino: %llu, mode: %o -> %o, uid: %u -> %u, gid: %u -> %u, size: %llu -> %llu.\n",
__func__, POHMELFS_I(inode)->ino, inode->i_mode, attr->ia_mode,
inode->i_uid, attr->ia_uid, inode->i_gid, attr->ia_gid, inode->i_size, attr->ia_size);
@@ -1217,7 +1223,7 @@ void pohmelfs_fill_inode(struct inode *inode, struct netfs_inode_info *info)
}
}
-static void pohmelfs_drop_inode(struct inode *inode)
+static int pohmelfs_drop_inode(struct inode *inode)
{
struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
struct pohmelfs_inode *pi = POHMELFS_I(inode);
@@ -1226,7 +1232,7 @@ static void pohmelfs_drop_inode(struct inode *inode)
list_del_init(&pi->inode_entry);
spin_unlock(&psb->ino_lock);
- generic_drop_inode(inode);
+ return generic_drop_inode(inode);
}
static struct pohmelfs_inode *pohmelfs_get_inode_from_list(struct pohmelfs_sb *psb,
@@ -1266,7 +1272,7 @@ static void pohmelfs_put_super(struct super_block *sb)
{
struct pohmelfs_sb *psb = POHMELFS_SB(sb);
struct pohmelfs_inode *pi;
- unsigned int count;
+ unsigned int count = 0;
unsigned int in_drop_list = 0;
struct inode *inode, *tmp;
diff --git a/drivers/staging/pohmelfs/path_entry.c b/drivers/staging/pohmelfs/path_entry.c
index cdc4dd50d63..8ec83d2dffb 100644
--- a/drivers/staging/pohmelfs/path_entry.c
+++ b/drivers/staging/pohmelfs/path_entry.c
@@ -44,9 +44,9 @@ int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int le
return -ENOENT;
}
- read_lock(&current->fs->lock);
+ spin_lock(&current->fs->lock);
path.mnt = mntget(current->fs->root.mnt);
- read_unlock(&current->fs->lock);
+ spin_unlock(&current->fs->lock);
path.dentry = d;
@@ -91,9 +91,9 @@ int pohmelfs_path_length(struct pohmelfs_inode *pi)
return -ENOENT;
}
- read_lock(&current->fs->lock);
+ spin_lock(&current->fs->lock);
root = dget(current->fs->root.dentry);
- read_unlock(&current->fs->lock);
+ spin_unlock(&current->fs->lock);
spin_lock(&dcache_lock);
diff --git a/drivers/staging/quatech_usb2/quatech_usb2.c b/drivers/staging/quatech_usb2/quatech_usb2.c
index ecd73135b31..9838ea279c5 100644
--- a/drivers/staging/quatech_usb2/quatech_usb2.c
+++ b/drivers/staging/quatech_usb2/quatech_usb2.c
@@ -258,8 +258,6 @@ static int qt2_box_get_register(struct usb_serial *serial,
static int qt2_box_set_register(struct usb_serial *serial,
unsigned short Uart_Number, unsigned short Register_Num,
unsigned short Value);
-static int qt2_box_flush(struct usb_serial *serial, unsigned char uart_number,
- unsigned short rcv_or_xmit);
static int qt2_boxsetuart(struct usb_serial *serial, unsigned short Uart_Number,
unsigned short default_divisor, unsigned char default_LCR);
static int qt2_boxsethw_flowctl(struct usb_serial *serial,
@@ -645,9 +643,6 @@ static void qt2_close(struct usb_serial_port *port)
/* get the device private data */
port_extra = qt2_get_port_private(port); /* port private data */
- /* we don't need to force flush though the hardware, so we skip using
- * qt2_box_flush() here */
-
/* we can now (and only now) stop reading data */
port_extra->close_pending = true;
dbg("%s(): port_extra->close_pending = true", __func__);
@@ -1841,24 +1836,6 @@ static int qt2_box_set_register(struct usb_serial *serial,
return result;
}
-
-/** @brief Request the Tx or Rx buffers on the USB side be flushed
- *
- * Tx flush: When all the currently buffered data has been sent, send an escape
- * sequence back up the data stream to us
- * Rx flush: add a flag in the data stream now so we know when it's made it's
- * way up to us.
- */
-static int qt2_box_flush(struct usb_serial *serial, unsigned char uart_number,
- unsigned short rcv_or_xmit)
-{
- int result;
- result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- QT2_FLUSH_DEVICE, 0x40, rcv_or_xmit, uart_number, NULL, 0,
- 300);
- return result;
-}
-
/** qt2_boxsetuart - Issue a SET_UART vendor-spcific request on the default
* control pipe. If successful sets baud rate divisor and LCR value.
*/
@@ -1873,6 +1850,7 @@ static int qt2_boxsetuart(struct usb_serial *serial, unsigned short Uart_Number,
QT2_GET_SET_UART, 0x40, default_divisor, UartNumandLCR,
NULL, 0, 300);
}
+
/** qt2_boxsethw_flowctl - Turn hardware (RTS/CTS) flow control on and off for
* a hardware UART.
*/
diff --git a/drivers/staging/quickstart/Kconfig b/drivers/staging/quickstart/Kconfig
new file mode 100644
index 00000000000..5bea4875d37
--- /dev/null
+++ b/drivers/staging/quickstart/Kconfig
@@ -0,0 +1,10 @@
+config ACPI_QUICKSTART
+ tristate "ACPI Quickstart key driver"
+ depends on ACPI && INPUT
+ help
+ Say Y here if you have a platform that supports the ACPI
+ quickstart key protocol.
+
+ To compile this driver as a module, choose M here: the module will be
+ called quickstart.
+
diff --git a/drivers/staging/quickstart/Makefile b/drivers/staging/quickstart/Makefile
new file mode 100644
index 00000000000..290e0e47679
--- /dev/null
+++ b/drivers/staging/quickstart/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ACPI_QUICKSTART) += quickstart.o
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
new file mode 100644
index 00000000000..66122479d52
--- /dev/null
+++ b/drivers/staging/quickstart/quickstart.c
@@ -0,0 +1,474 @@
+/*
+ * quickstart.c - ACPI Direct App Launch driver
+ *
+ *
+ * Copyright (C) 2007-2010 Angelo Arrifano <miknix@gmail.com>
+ *
+ * Information gathered from disassebled dsdt and from here:
+ * "http://download.microsoft.com/download/9/c/5/
+ * 9c5b2167-8017-4bae-9fde-d599bac8184a/DirAppLaunch_Vista.doc"
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define QUICKSTART_VERSION "1.03"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+
+MODULE_AUTHOR("Angelo Arrifano");
+MODULE_DESCRIPTION("ACPI Direct App Launch driver");
+MODULE_LICENSE("GPL");
+
+#define QUICKSTART_ACPI_DEVICE_NAME "quickstart"
+#define QUICKSTART_ACPI_CLASS "quickstart"
+#define QUICKSTART_ACPI_HID "PNP0C32"
+
+#define QUICKSTART_PF_DRIVER_NAME "quickstart"
+#define QUICKSTART_PF_DEVICE_NAME "quickstart"
+#define QUICKSTART_PF_DEVATTR_NAME "pressed_button"
+
+#define QUICKSTART_MAX_BTN_NAME_LEN 16
+
+/* There will be two events:
+ * 0x02 - A hot button was pressed while device was off/sleeping.
+ * 0x80 - A hot button was pressed while device was up. */
+#define QUICKSTART_EVENT_WAKE 0x02
+#define QUICKSTART_EVENT_RUNTIME 0x80
+
+struct quickstart_btn {
+ char *name;
+ unsigned int id;
+ struct quickstart_btn *next;
+};
+
+static struct quickstart_driver_data {
+ struct quickstart_btn *btn_lst;
+ struct quickstart_btn *pressed;
+} quickstart_data;
+
+/* ACPI driver Structs */
+struct quickstart_acpi {
+ struct acpi_device *device;
+ struct quickstart_btn *btn;
+};
+static int quickstart_acpi_add(struct acpi_device *device);
+static int quickstart_acpi_remove(struct acpi_device *device, int type);
+static const struct acpi_device_id quickstart_device_ids[] = {
+ {QUICKSTART_ACPI_HID, 0},
+ {"", 0},
+};
+
+static struct acpi_driver quickstart_acpi_driver = {
+ .name = "quickstart",
+ .class = QUICKSTART_ACPI_CLASS,
+ .ids = quickstart_device_ids,
+ .ops = {
+ .add = quickstart_acpi_add,
+ .remove = quickstart_acpi_remove,
+ },
+};
+
+/* Input device structs */
+struct input_dev *quickstart_input;
+
+/* Platform driver structs */
+static ssize_t buttons_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+static ssize_t pressed_button_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+static ssize_t pressed_button_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count);
+static DEVICE_ATTR(pressed_button, 0666, pressed_button_show,
+ pressed_button_store);
+static DEVICE_ATTR(buttons, 0444, buttons_show, NULL);
+static struct platform_device *pf_device;
+static struct platform_driver pf_driver = {
+ .driver = {
+ .name = QUICKSTART_PF_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+/*
+ * Platform driver functions
+ */
+static ssize_t buttons_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int count = 0;
+ struct quickstart_btn *ptr = quickstart_data.btn_lst;
+
+ if (!ptr)
+ return snprintf(buf, PAGE_SIZE, "none");
+
+ while (ptr && (count < PAGE_SIZE)) {
+ if (ptr->name) {
+ count += snprintf(buf + count,
+ PAGE_SIZE - count,
+ "%d\t%s\n", ptr->id, ptr->name);
+ }
+ ptr = ptr->next;
+ }
+
+ return count;
+}
+
+static ssize_t pressed_button_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (quickstart_data.pressed?quickstart_data.pressed->name:"none"));
+}
+
+
+static ssize_t pressed_button_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (count < 2)
+ return -EINVAL;
+
+ if (strncasecmp(buf, "none", 4) != 0)
+ return -EINVAL;
+
+ quickstart_data.pressed = NULL;
+ return count;
+}
+
+/* Hotstart Helper functions */
+static int quickstart_btnlst_add(struct quickstart_btn **data)
+{
+ struct quickstart_btn **ptr = &quickstart_data.btn_lst;
+
+ while (*ptr)
+ ptr = &((*ptr)->next);
+
+ *ptr = kzalloc(sizeof(struct quickstart_btn), GFP_KERNEL);
+ if (!*ptr) {
+ *data = NULL;
+ return -ENOMEM;
+ }
+ *data = *ptr;
+
+ return 0;
+}
+
+static void quickstart_btnlst_del(struct quickstart_btn *data)
+{
+ struct quickstart_btn **ptr = &quickstart_data.btn_lst;
+
+ if (!data)
+ return;
+
+ while (*ptr) {
+ if (*ptr == data) {
+ *ptr = (*ptr)->next;
+ kfree(data);
+ return;
+ }
+ ptr = &((*ptr)->next);
+ }
+
+ return;
+}
+
+static void quickstart_btnlst_free(void)
+{
+ struct quickstart_btn *ptr = quickstart_data.btn_lst;
+ struct quickstart_btn *lptr = NULL;
+
+ while (ptr) {
+ lptr = ptr;
+ ptr = ptr->next;
+ kfree(lptr->name);
+ kfree(lptr);
+ }
+
+ return;
+}
+
+/* ACPI Driver functions */
+static void quickstart_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct quickstart_acpi *quickstart = data;
+
+ if (!quickstart)
+ return;
+
+ if (event == QUICKSTART_EVENT_WAKE)
+ quickstart_data.pressed = quickstart->btn;
+ else if (event == QUICKSTART_EVENT_RUNTIME) {
+ input_report_key(quickstart_input, quickstart->btn->id, 1);
+ input_sync(quickstart_input);
+ input_report_key(quickstart_input, quickstart->btn->id, 0);
+ input_sync(quickstart_input);
+ }
+ return;
+}
+
+static void quickstart_acpi_ghid(struct quickstart_acpi *quickstart)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ uint32_t usageid = 0;
+
+ if (!quickstart)
+ return;
+
+ /* This returns a buffer telling the button usage ID,
+ * and triggers pending notify events (The ones before booting). */
+ status = acpi_evaluate_object(quickstart->device->handle,
+ "GHID", NULL, &buffer);
+ if (ACPI_FAILURE(status) || !buffer.pointer) {
+ printk(KERN_ERR "quickstart: %s GHID method failed.\n",
+ quickstart->btn->name);
+ return;
+ }
+
+ if (buffer.length < 8)
+ return;
+
+ /* <<The GHID method can return a BYTE, WORD, or DWORD.
+ * The value must be encoded in little-endian byte
+ * order (least significant byte first).>> */
+ usageid = *((uint32_t *)(buffer.pointer + (buffer.length - 8)));
+ quickstart->btn->id = usageid;
+
+ kfree(buffer.pointer);
+}
+
+static int quickstart_acpi_config(struct quickstart_acpi *quickstart, char *bid)
+{
+ int len = strlen(bid);
+ int ret;
+
+ /* Add button to list */
+ ret = quickstart_btnlst_add(&quickstart->btn);
+ if (ret)
+ return ret;
+
+ quickstart->btn->name = kzalloc(len + 1, GFP_KERNEL);
+ if (!quickstart->btn->name) {
+ quickstart_btnlst_free();
+ return -ENOMEM;
+ }
+ strcpy(quickstart->btn->name, bid);
+
+ return 0;
+}
+
+static int quickstart_acpi_add(struct acpi_device *device)
+{
+ int ret = 0;
+ acpi_status status = AE_OK;
+ struct quickstart_acpi *quickstart = NULL;
+
+ if (!device)
+ return -EINVAL;
+
+ quickstart = kzalloc(sizeof(struct quickstart_acpi), GFP_KERNEL);
+ if (!quickstart)
+ return -ENOMEM;
+
+ quickstart->device = device;
+ strcpy(acpi_device_name(device), QUICKSTART_ACPI_DEVICE_NAME);
+ strcpy(acpi_device_class(device), QUICKSTART_ACPI_CLASS);
+ device->driver_data = quickstart;
+
+ /* Add button to list and initialize some stuff */
+ ret = quickstart_acpi_config(quickstart, acpi_device_bid(device));
+ if (ret)
+ goto fail_config;
+
+ status = acpi_install_notify_handler(device->handle,
+ ACPI_ALL_NOTIFY,
+ quickstart_acpi_notify,
+ quickstart);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR "quickstart: Notify handler install error\n");
+ ret = -ENODEV;
+ goto fail_installnotify;
+ }
+
+ quickstart_acpi_ghid(quickstart);
+
+ return 0;
+
+fail_installnotify:
+ quickstart_btnlst_del(quickstart->btn);
+
+fail_config:
+
+ kfree(quickstart);
+
+ return ret;
+}
+
+static int quickstart_acpi_remove(struct acpi_device *device, int type)
+{
+ acpi_status status = 0;
+ struct quickstart_acpi *quickstart = NULL;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ quickstart = acpi_driver_data(device);
+
+ status = acpi_remove_notify_handler(device->handle,
+ ACPI_ALL_NOTIFY,
+ quickstart_acpi_notify);
+ if (ACPI_FAILURE(status))
+ printk(KERN_ERR "quickstart: Error removing notify handler\n");
+
+
+ kfree(quickstart);
+
+ return 0;
+}
+
+/* Module functions */
+
+static void quickstart_exit(void)
+{
+ input_unregister_device(quickstart_input);
+ input_free_device(quickstart_input);
+
+ device_remove_file(&pf_device->dev, &dev_attr_pressed_button);
+ device_remove_file(&pf_device->dev, &dev_attr_buttons);
+
+ platform_device_unregister(pf_device);
+
+ platform_driver_unregister(&pf_driver);
+
+ acpi_bus_unregister_driver(&quickstart_acpi_driver);
+
+ quickstart_btnlst_free();
+
+ return;
+}
+
+static int __init quickstart_init_input(void)
+{
+ struct quickstart_btn **ptr = &quickstart_data.btn_lst;
+ int count;
+
+ quickstart_input = input_allocate_device();
+
+ if (!quickstart_input)
+ return -ENOMEM;
+
+ quickstart_input->name = "Quickstart ACPI Buttons";
+ quickstart_input->id.bustype = BUS_HOST;
+
+ while (*ptr) {
+ count++;
+ set_bit(EV_KEY, quickstart_input->evbit);
+ set_bit((*ptr)->id, quickstart_input->keybit);
+ ptr = &((*ptr)->next);
+ }
+
+ return input_register_device(quickstart_input);
+}
+
+static int __init quickstart_init(void)
+{
+ int ret;
+ acpi_status status = 0;
+
+ /* ACPI Check */
+ if (acpi_disabled)
+ return -ENODEV;
+
+ /* ACPI driver register */
+ status = acpi_bus_register_driver(&quickstart_acpi_driver);
+ if (status < 0)
+ return -ENODEV;
+
+ /* If existing bus with no devices */
+ if (!quickstart_data.btn_lst) {
+ ret = -ENODEV;
+ goto fail_pfdrv_reg;
+ }
+
+ /* Platform driver register */
+ ret = platform_driver_register(&pf_driver);
+ if (ret)
+ goto fail_pfdrv_reg;
+
+ /* Platform device register */
+ pf_device = platform_device_alloc(QUICKSTART_PF_DEVICE_NAME, -1);
+ if (!pf_device) {
+ ret = -ENOMEM;
+ goto fail_pfdev_alloc;
+ }
+ ret = platform_device_add(pf_device);
+ if (ret)
+ goto fail_pfdev_add;
+
+ /* Create device sysfs file */
+ ret = device_create_file(&pf_device->dev, &dev_attr_pressed_button);
+ if (ret)
+ goto fail_dev_file;
+
+ ret = device_create_file(&pf_device->dev, &dev_attr_buttons);
+ if (ret)
+ goto fail_dev_file2;
+
+
+ /* Input device */
+ ret = quickstart_init_input();
+ if (ret)
+ goto fail_input;
+
+ printk(KERN_INFO "quickstart: ACPI Direct App Launch ver %s\n",
+ QUICKSTART_VERSION);
+
+ return 0;
+fail_input:
+ device_remove_file(&pf_device->dev, &dev_attr_buttons);
+
+fail_dev_file2:
+ device_remove_file(&pf_device->dev, &dev_attr_pressed_button);
+
+fail_dev_file:
+ platform_device_del(pf_device);
+
+fail_pfdev_add:
+ platform_device_put(pf_device);
+
+fail_pfdev_alloc:
+ platform_driver_unregister(&pf_driver);
+
+fail_pfdrv_reg:
+ acpi_bus_unregister_driver(&quickstart_acpi_driver);
+
+ return ret;
+}
+
+module_init(quickstart_init);
+module_exit(quickstart_exit);
diff --git a/drivers/staging/ramzswap/Kconfig b/drivers/staging/ramzswap/Kconfig
deleted file mode 100644
index 127b3c6c959..00000000000
--- a/drivers/staging/ramzswap/Kconfig
+++ /dev/null
@@ -1,21 +0,0 @@
-config RAMZSWAP
- tristate "Compressed in-memory swap device (ramzswap)"
- depends on SWAP
- select LZO_COMPRESS
- select LZO_DECOMPRESS
- default n
- help
- Creates virtual block devices which can (only) be used as swap
- disks. Pages swapped to these disks are compressed and stored in
- memory itself.
-
- See ramzswap.txt for more information.
- Project home: http://compcache.googlecode.com/
-
-config RAMZSWAP_STATS
- bool "Enable ramzswap stats"
- depends on RAMZSWAP
- default y
- help
- Enable statistics collection for ramzswap. This adds only a minimal
- overhead. In unsure, say Y.
diff --git a/drivers/staging/ramzswap/Makefile b/drivers/staging/ramzswap/Makefile
deleted file mode 100644
index 507d7dc3b86..00000000000
--- a/drivers/staging/ramzswap/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-ramzswap-objs := ramzswap_drv.o xvmalloc.o
-
-obj-$(CONFIG_RAMZSWAP) += ramzswap.o
diff --git a/drivers/staging/ramzswap/ramzswap.txt b/drivers/staging/ramzswap/ramzswap.txt
deleted file mode 100644
index 9694acfeb43..00000000000
--- a/drivers/staging/ramzswap/ramzswap.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-ramzswap: Compressed RAM based swap device
--------------------------------------------
-
-Project home: http://compcache.googlecode.com/
-
-* Introduction
-
-The ramzswap module creates RAM based block devices which can (only) be used as
-swap disks. Pages swapped to these devices are compressed and stored in memory
-itself. See project home for use cases, performance numbers and a lot more.
-
-Individual ramzswap devices are configured and initialized using rzscontrol
-userspace utility as shown in examples below. See rzscontrol man page for more
-details.
-
-* Usage
-
-Following shows a typical sequence of steps for using ramzswap.
-
-1) Load Modules:
- modprobe ramzswap num_devices=4
- This creates 4 (uninitialized) devices: /dev/ramzswap{0,1,2,3}
- (num_devices parameter is optional. Default: 1)
-
-2) Initialize:
- Use rzscontrol utility to configure and initialize individual
- ramzswap devices. Example:
- rzscontrol /dev/ramzswap2 --init # uses default value of disksize_kb
-
- *See rzscontrol man page for more details and examples*
-
-3) Activate:
- swapon /dev/ramzswap2 # or any other initialized ramzswap device
-
-4) Stats:
- rzscontrol /dev/ramzswap2 --stats
-
-5) Deactivate:
- swapoff /dev/ramzswap2
-
-6) Reset:
- rzscontrol /dev/ramzswap2 --reset
- (This frees all the memory allocated for this device).
-
-
-Please report any problems at:
- - Mailing list: linux-mm-cc at laptop dot org
- - Issue tracker: http://code.google.com/p/compcache/issues/list
-
-Nitin Gupta
-ngupta@vflare.org
diff --git a/drivers/staging/ramzswap/ramzswap_drv.c b/drivers/staging/ramzswap/ramzswap_drv.c
deleted file mode 100644
index d14bf9129e3..00000000000
--- a/drivers/staging/ramzswap/ramzswap_drv.c
+++ /dev/null
@@ -1,837 +0,0 @@
-/*
- * Compressed RAM based swap device
- *
- * Copyright (C) 2008, 2009, 2010 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the licence that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- *
- * Project home: http://compcache.googlecode.com
- */
-
-#define KMSG_COMPONENT "ramzswap"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
-#include <linux/device.h>
-#include <linux/genhd.h>
-#include <linux/highmem.h>
-#include <linux/slab.h>
-#include <linux/lzo.h>
-#include <linux/string.h>
-#include <linux/swap.h>
-#include <linux/swapops.h>
-#include <linux/vmalloc.h>
-
-#include "ramzswap_drv.h"
-
-/* Globals */
-static int ramzswap_major;
-static struct ramzswap *devices;
-
-/* Module params (documentation at end) */
-static unsigned int num_devices;
-
-static int rzs_test_flag(struct ramzswap *rzs, u32 index,
- enum rzs_pageflags flag)
-{
- return rzs->table[index].flags & BIT(flag);
-}
-
-static void rzs_set_flag(struct ramzswap *rzs, u32 index,
- enum rzs_pageflags flag)
-{
- rzs->table[index].flags |= BIT(flag);
-}
-
-static void rzs_clear_flag(struct ramzswap *rzs, u32 index,
- enum rzs_pageflags flag)
-{
- rzs->table[index].flags &= ~BIT(flag);
-}
-
-static int page_zero_filled(void *ptr)
-{
- unsigned int pos;
- unsigned long *page;
-
- page = (unsigned long *)ptr;
-
- for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
- if (page[pos])
- return 0;
- }
-
- return 1;
-}
-
-static void ramzswap_set_disksize(struct ramzswap *rzs, size_t totalram_bytes)
-{
- if (!rzs->disksize) {
- pr_info(
- "disk size not provided. You can use disksize_kb module "
- "param to specify size.\nUsing default: (%u%% of RAM).\n",
- default_disksize_perc_ram
- );
- rzs->disksize = default_disksize_perc_ram *
- (totalram_bytes / 100);
- }
-
- if (rzs->disksize > 2 * (totalram_bytes)) {
- pr_info(
- "There is little point creating a ramzswap of greater than "
- "twice the size of memory since we expect a 2:1 compression "
- "ratio. Note that ramzswap uses about 0.1%% of the size of "
- "the swap device when not in use so a huge ramzswap is "
- "wasteful.\n"
- "\tMemory Size: %zu kB\n"
- "\tSize you selected: %zu kB\n"
- "Continuing anyway ...\n",
- totalram_bytes >> 10, rzs->disksize
- );
- }
-
- rzs->disksize &= PAGE_MASK;
-}
-
-/*
- * Swap header (1st page of swap device) contains information
- * about a swap file/partition. Prepare such a header for the
- * given ramzswap device so that swapon can identify it as a
- * swap partition.
- */
-static void setup_swap_header(struct ramzswap *rzs, union swap_header *s)
-{
- s->info.version = 1;
- s->info.last_page = (rzs->disksize >> PAGE_SHIFT) - 1;
- s->info.nr_badpages = 0;
- memcpy(s->magic.magic, "SWAPSPACE2", 10);
-}
-
-static void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
- struct ramzswap_ioctl_stats *s)
-{
- s->disksize = rzs->disksize;
-
-#if defined(CONFIG_RAMZSWAP_STATS)
- {
- struct ramzswap_stats *rs = &rzs->stats;
- size_t succ_writes, mem_used;
- unsigned int good_compress_perc = 0, no_compress_perc = 0;
-
- mem_used = xv_get_total_size_bytes(rzs->mem_pool)
- + (rs->pages_expand << PAGE_SHIFT);
- succ_writes = rzs_stat64_read(rzs, &rs->num_writes) -
- rzs_stat64_read(rzs, &rs->failed_writes);
-
- if (succ_writes && rs->pages_stored) {
- good_compress_perc = rs->good_compress * 100
- / rs->pages_stored;
- no_compress_perc = rs->pages_expand * 100
- / rs->pages_stored;
- }
-
- s->num_reads = rzs_stat64_read(rzs, &rs->num_reads);
- s->num_writes = rzs_stat64_read(rzs, &rs->num_writes);
- s->failed_reads = rzs_stat64_read(rzs, &rs->failed_reads);
- s->failed_writes = rzs_stat64_read(rzs, &rs->failed_writes);
- s->invalid_io = rzs_stat64_read(rzs, &rs->invalid_io);
- s->notify_free = rzs_stat64_read(rzs, &rs->notify_free);
- s->pages_zero = rs->pages_zero;
-
- s->good_compress_pct = good_compress_perc;
- s->pages_expand_pct = no_compress_perc;
-
- s->pages_stored = rs->pages_stored;
- s->pages_used = mem_used >> PAGE_SHIFT;
- s->orig_data_size = rs->pages_stored << PAGE_SHIFT;
- s->compr_data_size = rs->compr_size;
- s->mem_used_total = mem_used;
- }
-#endif /* CONFIG_RAMZSWAP_STATS */
-}
-
-static void ramzswap_free_page(struct ramzswap *rzs, size_t index)
-{
- u32 clen;
- void *obj;
-
- struct page *page = rzs->table[index].page;
- u32 offset = rzs->table[index].offset;
-
- if (unlikely(!page)) {
- /*
- * No memory is allocated for zero filled pages.
- * Simply clear zero page flag.
- */
- if (rzs_test_flag(rzs, index, RZS_ZERO)) {
- rzs_clear_flag(rzs, index, RZS_ZERO);
- rzs_stat_dec(&rzs->stats.pages_zero);
- }
- return;
- }
-
- if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED))) {
- clen = PAGE_SIZE;
- __free_page(page);
- rzs_clear_flag(rzs, index, RZS_UNCOMPRESSED);
- rzs_stat_dec(&rzs->stats.pages_expand);
- goto out;
- }
-
- obj = kmap_atomic(page, KM_USER0) + offset;
- clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
- kunmap_atomic(obj, KM_USER0);
-
- xv_free(rzs->mem_pool, page, offset);
- if (clen <= PAGE_SIZE / 2)
- rzs_stat_dec(&rzs->stats.good_compress);
-
-out:
- rzs->stats.compr_size -= clen;
- rzs_stat_dec(&rzs->stats.pages_stored);
-
- rzs->table[index].page = NULL;
- rzs->table[index].offset = 0;
-}
-
-static int handle_zero_page(struct bio *bio)
-{
- void *user_mem;
- struct page *page = bio->bi_io_vec[0].bv_page;
-
- user_mem = kmap_atomic(page, KM_USER0);
- memset(user_mem, 0, PAGE_SIZE);
- kunmap_atomic(user_mem, KM_USER0);
-
- flush_dcache_page(page);
-
- set_bit(BIO_UPTODATE, &bio->bi_flags);
- bio_endio(bio, 0);
- return 0;
-}
-
-static int handle_uncompressed_page(struct ramzswap *rzs, struct bio *bio)
-{
- u32 index;
- struct page *page;
- unsigned char *user_mem, *cmem;
-
- page = bio->bi_io_vec[0].bv_page;
- index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
-
- user_mem = kmap_atomic(page, KM_USER0);
- cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
- rzs->table[index].offset;
-
- memcpy(user_mem, cmem, PAGE_SIZE);
- kunmap_atomic(user_mem, KM_USER0);
- kunmap_atomic(cmem, KM_USER1);
-
- flush_dcache_page(page);
-
- set_bit(BIO_UPTODATE, &bio->bi_flags);
- bio_endio(bio, 0);
- return 0;
-}
-
-/*
- * Called when request page is not present in ramzswap.
- * This is an attempt to read before any previous write
- * to this location - this happens due to readahead when
- * swap device is read from user-space (e.g. during swapon)
- */
-static int handle_ramzswap_fault(struct ramzswap *rzs, struct bio *bio)
-{
- pr_debug("Read before write on swap device: "
- "sector=%lu, size=%u, offset=%u\n",
- (ulong)(bio->bi_sector), bio->bi_size,
- bio->bi_io_vec[0].bv_offset);
-
- /* Do nothing. Just return success */
- set_bit(BIO_UPTODATE, &bio->bi_flags);
- bio_endio(bio, 0);
- return 0;
-}
-
-static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
-{
- int ret;
- u32 index;
- size_t clen;
- struct page *page;
- struct zobj_header *zheader;
- unsigned char *user_mem, *cmem;
-
- rzs_stat64_inc(rzs, &rzs->stats.num_reads);
-
- page = bio->bi_io_vec[0].bv_page;
- index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
-
- if (rzs_test_flag(rzs, index, RZS_ZERO))
- return handle_zero_page(bio);
-
- /* Requested page is not present in compressed area */
- if (!rzs->table[index].page)
- return handle_ramzswap_fault(rzs, bio);
-
- /* Page is stored uncompressed since it's incompressible */
- if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)))
- return handle_uncompressed_page(rzs, bio);
-
- user_mem = kmap_atomic(page, KM_USER0);
- clen = PAGE_SIZE;
-
- cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
- rzs->table[index].offset;
-
- ret = lzo1x_decompress_safe(
- cmem + sizeof(*zheader),
- xv_get_object_size(cmem) - sizeof(*zheader),
- user_mem, &clen);
-
- kunmap_atomic(user_mem, KM_USER0);
- kunmap_atomic(cmem, KM_USER1);
-
- /* should NEVER happen */
- if (unlikely(ret != LZO_E_OK)) {
- pr_err("Decompression failed! err=%d, page=%u\n",
- ret, index);
- rzs_stat64_inc(rzs, &rzs->stats.failed_reads);
- goto out;
- }
-
- flush_dcache_page(page);
-
- set_bit(BIO_UPTODATE, &bio->bi_flags);
- bio_endio(bio, 0);
- return 0;
-
-out:
- bio_io_error(bio);
- return 0;
-}
-
-static int ramzswap_write(struct ramzswap *rzs, struct bio *bio)
-{
- int ret;
- u32 offset, index;
- size_t clen;
- struct zobj_header *zheader;
- struct page *page, *page_store;
- unsigned char *user_mem, *cmem, *src;
-
- rzs_stat64_inc(rzs, &rzs->stats.num_writes);
-
- page = bio->bi_io_vec[0].bv_page;
- index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
-
- src = rzs->compress_buffer;
-
- mutex_lock(&rzs->lock);
-
- user_mem = kmap_atomic(page, KM_USER0);
- if (page_zero_filled(user_mem)) {
- kunmap_atomic(user_mem, KM_USER0);
- mutex_unlock(&rzs->lock);
- rzs_stat_inc(&rzs->stats.pages_zero);
- rzs_set_flag(rzs, index, RZS_ZERO);
-
- set_bit(BIO_UPTODATE, &bio->bi_flags);
- bio_endio(bio, 0);
- return 0;
- }
-
- ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
- rzs->compress_workmem);
-
- kunmap_atomic(user_mem, KM_USER0);
-
- if (unlikely(ret != LZO_E_OK)) {
- mutex_unlock(&rzs->lock);
- pr_err("Compression failed! err=%d\n", ret);
- rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
- goto out;
- }
-
- /*
- * Page is incompressible. Store it as-is (uncompressed)
- * since we do not want to return too many swap write
- * errors which has side effect of hanging the system.
- */
- if (unlikely(clen > max_zpage_size)) {
- clen = PAGE_SIZE;
- page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
- if (unlikely(!page_store)) {
- mutex_unlock(&rzs->lock);
- pr_info("Error allocating memory for incompressible "
- "page: %u\n", index);
- rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
- goto out;
- }
-
- offset = 0;
- rzs_set_flag(rzs, index, RZS_UNCOMPRESSED);
- rzs_stat_inc(&rzs->stats.pages_expand);
- rzs->table[index].page = page_store;
- src = kmap_atomic(page, KM_USER0);
- goto memstore;
- }
-
- if (xv_malloc(rzs->mem_pool, clen + sizeof(*zheader),
- &rzs->table[index].page, &offset,
- GFP_NOIO | __GFP_HIGHMEM)) {
- mutex_unlock(&rzs->lock);
- pr_info("Error allocating memory for compressed "
- "page: %u, size=%zu\n", index, clen);
- rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
- goto out;
- }
-
-memstore:
- rzs->table[index].offset = offset;
-
- cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
- rzs->table[index].offset;
-
-#if 0
- /* Back-reference needed for memory defragmentation */
- if (!rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)) {
- zheader = (struct zobj_header *)cmem;
- zheader->table_idx = index;
- cmem += sizeof(*zheader);
- }
-#endif
-
- memcpy(cmem, src, clen);
-
- kunmap_atomic(cmem, KM_USER1);
- if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)))
- kunmap_atomic(src, KM_USER0);
-
- /* Update stats */
- rzs->stats.compr_size += clen;
- rzs_stat_inc(&rzs->stats.pages_stored);
- if (clen <= PAGE_SIZE / 2)
- rzs_stat_inc(&rzs->stats.good_compress);
-
- mutex_unlock(&rzs->lock);
-
- set_bit(BIO_UPTODATE, &bio->bi_flags);
- bio_endio(bio, 0);
- return 0;
-
-out:
- bio_io_error(bio);
- return 0;
-}
-
-/*
- * Check if request is within bounds and page aligned.
- */
-static inline int valid_swap_request(struct ramzswap *rzs, struct bio *bio)
-{
- if (unlikely(
- (bio->bi_sector >= (rzs->disksize >> SECTOR_SHIFT)) ||
- (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
- (bio->bi_vcnt != 1) ||
- (bio->bi_size != PAGE_SIZE) ||
- (bio->bi_io_vec[0].bv_offset != 0))) {
-
- return 0;
- }
-
- /* swap request is valid */
- return 1;
-}
-
-/*
- * Handler function for all ramzswap I/O requests.
- */
-static int ramzswap_make_request(struct request_queue *queue, struct bio *bio)
-{
- int ret = 0;
- struct ramzswap *rzs = queue->queuedata;
-
- if (unlikely(!rzs->init_done)) {
- bio_io_error(bio);
- return 0;
- }
-
- if (!valid_swap_request(rzs, bio)) {
- rzs_stat64_inc(rzs, &rzs->stats.invalid_io);
- bio_io_error(bio);
- return 0;
- }
-
- switch (bio_data_dir(bio)) {
- case READ:
- ret = ramzswap_read(rzs, bio);
- break;
-
- case WRITE:
- ret = ramzswap_write(rzs, bio);
- break;
- }
-
- return ret;
-}
-
-static void reset_device(struct ramzswap *rzs)
-{
- size_t index;
-
- /* Do not accept any new I/O request */
- rzs->init_done = 0;
-
- /* Free various per-device buffers */
- kfree(rzs->compress_workmem);
- free_pages((unsigned long)rzs->compress_buffer, 1);
-
- rzs->compress_workmem = NULL;
- rzs->compress_buffer = NULL;
-
- /* Free all pages that are still in this ramzswap device */
- for (index = 0; index < rzs->disksize >> PAGE_SHIFT; index++) {
- struct page *page;
- u16 offset;
-
- page = rzs->table[index].page;
- offset = rzs->table[index].offset;
-
- if (!page)
- continue;
-
- if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)))
- __free_page(page);
- else
- xv_free(rzs->mem_pool, page, offset);
- }
-
- vfree(rzs->table);
- rzs->table = NULL;
-
- xv_destroy_pool(rzs->mem_pool);
- rzs->mem_pool = NULL;
-
- /* Reset stats */
- memset(&rzs->stats, 0, sizeof(rzs->stats));
-
- rzs->disksize = 0;
-}
-
-static int ramzswap_ioctl_init_device(struct ramzswap *rzs)
-{
- int ret;
- size_t num_pages;
- struct page *page;
- union swap_header *swap_header;
-
- if (rzs->init_done) {
- pr_info("Device already initialized!\n");
- return -EBUSY;
- }
-
- ramzswap_set_disksize(rzs, totalram_pages << PAGE_SHIFT);
-
- rzs->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
- if (!rzs->compress_workmem) {
- pr_err("Error allocating compressor working memory!\n");
- ret = -ENOMEM;
- goto fail;
- }
-
- rzs->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
- if (!rzs->compress_buffer) {
- pr_err("Error allocating compressor buffer space\n");
- ret = -ENOMEM;
- goto fail;
- }
-
- num_pages = rzs->disksize >> PAGE_SHIFT;
- rzs->table = vmalloc(num_pages * sizeof(*rzs->table));
- if (!rzs->table) {
- pr_err("Error allocating ramzswap address table\n");
- /* To prevent accessing table entries during cleanup */
- rzs->disksize = 0;
- ret = -ENOMEM;
- goto fail;
- }
- memset(rzs->table, 0, num_pages * sizeof(*rzs->table));
-
- page = alloc_page(__GFP_ZERO);
- if (!page) {
- pr_err("Error allocating swap header page\n");
- ret = -ENOMEM;
- goto fail;
- }
- rzs->table[0].page = page;
- rzs_set_flag(rzs, 0, RZS_UNCOMPRESSED);
-
- swap_header = kmap(page);
- setup_swap_header(rzs, swap_header);
- kunmap(page);
-
- set_capacity(rzs->disk, rzs->disksize >> SECTOR_SHIFT);
-
- /* ramzswap devices sort of resembles non-rotational disks */
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rzs->disk->queue);
-
- rzs->mem_pool = xv_create_pool();
- if (!rzs->mem_pool) {
- pr_err("Error creating memory pool\n");
- ret = -ENOMEM;
- goto fail;
- }
-
- rzs->init_done = 1;
-
- pr_debug("Initialization done!\n");
- return 0;
-
-fail:
- reset_device(rzs);
-
- pr_err("Initialization failed: err=%d\n", ret);
- return ret;
-}
-
-static int ramzswap_ioctl_reset_device(struct ramzswap *rzs)
-{
- if (rzs->init_done)
- reset_device(rzs);
-
- return 0;
-}
-
-static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int ret = 0;
- size_t disksize_kb;
-
- struct ramzswap *rzs = bdev->bd_disk->private_data;
-
- switch (cmd) {
- case RZSIO_SET_DISKSIZE_KB:
- if (rzs->init_done) {
- ret = -EBUSY;
- goto out;
- }
- if (copy_from_user(&disksize_kb, (void *)arg,
- _IOC_SIZE(cmd))) {
- ret = -EFAULT;
- goto out;
- }
- rzs->disksize = disksize_kb << 10;
- pr_info("Disk size set to %zu kB\n", disksize_kb);
- break;
-
- case RZSIO_GET_STATS:
- {
- struct ramzswap_ioctl_stats *stats;
- if (!rzs->init_done) {
- ret = -ENOTTY;
- goto out;
- }
- stats = kzalloc(sizeof(*stats), GFP_KERNEL);
- if (!stats) {
- ret = -ENOMEM;
- goto out;
- }
- ramzswap_ioctl_get_stats(rzs, stats);
- if (copy_to_user((void *)arg, stats, sizeof(*stats))) {
- kfree(stats);
- ret = -EFAULT;
- goto out;
- }
- kfree(stats);
- break;
- }
- case RZSIO_INIT:
- ret = ramzswap_ioctl_init_device(rzs);
- break;
-
- case RZSIO_RESET:
- /* Do not reset an active device! */
- if (bdev->bd_holders) {
- ret = -EBUSY;
- goto out;
- }
-
- /* Make sure all pending I/O is finished */
- if (bdev)
- fsync_bdev(bdev);
-
- ret = ramzswap_ioctl_reset_device(rzs);
- break;
-
- default:
- pr_info("Invalid ioctl %u\n", cmd);
- ret = -ENOTTY;
- }
-
-out:
- return ret;
-}
-
-void ramzswap_slot_free_notify(struct block_device *bdev, unsigned long index)
-{
- struct ramzswap *rzs;
-
- rzs = bdev->bd_disk->private_data;
- ramzswap_free_page(rzs, index);
- rzs_stat64_inc(rzs, &rzs->stats.notify_free);
-
- return;
-}
-
-static struct block_device_operations ramzswap_devops = {
- .ioctl = ramzswap_ioctl,
- .swap_slot_free_notify = ramzswap_slot_free_notify,
- .owner = THIS_MODULE
-};
-
-static int create_device(struct ramzswap *rzs, int device_id)
-{
- int ret = 0;
-
- mutex_init(&rzs->lock);
- spin_lock_init(&rzs->stat64_lock);
-
- rzs->queue = blk_alloc_queue(GFP_KERNEL);
- if (!rzs->queue) {
- pr_err("Error allocating disk queue for device %d\n",
- device_id);
- ret = -ENOMEM;
- goto out;
- }
-
- blk_queue_make_request(rzs->queue, ramzswap_make_request);
- rzs->queue->queuedata = rzs;
-
- /* gendisk structure */
- rzs->disk = alloc_disk(1);
- if (!rzs->disk) {
- blk_cleanup_queue(rzs->queue);
- pr_warning("Error allocating disk structure for device %d\n",
- device_id);
- ret = -ENOMEM;
- goto out;
- }
-
- rzs->disk->major = ramzswap_major;
- rzs->disk->first_minor = device_id;
- rzs->disk->fops = &ramzswap_devops;
- rzs->disk->queue = rzs->queue;
- rzs->disk->private_data = rzs;
- snprintf(rzs->disk->disk_name, 16, "ramzswap%d", device_id);
-
- /* Actual capacity set using RZSIO_SET_DISKSIZE_KB ioctl */
- set_capacity(rzs->disk, 0);
-
- blk_queue_physical_block_size(rzs->disk->queue, PAGE_SIZE);
- blk_queue_logical_block_size(rzs->disk->queue, PAGE_SIZE);
-
- add_disk(rzs->disk);
-
- rzs->init_done = 0;
-
-out:
- return ret;
-}
-
-static void destroy_device(struct ramzswap *rzs)
-{
- if (rzs->disk) {
- del_gendisk(rzs->disk);
- put_disk(rzs->disk);
- }
-
- if (rzs->queue)
- blk_cleanup_queue(rzs->queue);
-}
-
-static int __init ramzswap_init(void)
-{
- int ret, dev_id;
-
- if (num_devices > max_num_devices) {
- pr_warning("Invalid value for num_devices: %u\n",
- num_devices);
- ret = -EINVAL;
- goto out;
- }
-
- ramzswap_major = register_blkdev(0, "ramzswap");
- if (ramzswap_major <= 0) {
- pr_warning("Unable to get major number\n");
- ret = -EBUSY;
- goto out;
- }
-
- if (!num_devices) {
- pr_info("num_devices not specified. Using default: 1\n");
- num_devices = 1;
- }
-
- /* Allocate the device array and initialize each one */
- pr_info("Creating %u devices ...\n", num_devices);
- devices = kzalloc(num_devices * sizeof(struct ramzswap), GFP_KERNEL);
- if (!devices) {
- ret = -ENOMEM;
- goto unregister;
- }
-
- for (dev_id = 0; dev_id < num_devices; dev_id++) {
- ret = create_device(&devices[dev_id], dev_id);
- if (ret)
- goto free_devices;
- }
-
- return 0;
-
-free_devices:
- while (dev_id)
- destroy_device(&devices[--dev_id]);
-unregister:
- unregister_blkdev(ramzswap_major, "ramzswap");
-out:
- return ret;
-}
-
-static void __exit ramzswap_exit(void)
-{
- int i;
- struct ramzswap *rzs;
-
- for (i = 0; i < num_devices; i++) {
- rzs = &devices[i];
-
- destroy_device(rzs);
- if (rzs->init_done)
- reset_device(rzs);
- }
-
- unregister_blkdev(ramzswap_major, "ramzswap");
-
- kfree(devices);
- pr_debug("Cleanup done!\n");
-}
-
-module_param(num_devices, uint, 0);
-MODULE_PARM_DESC(num_devices, "Number of ramzswap devices");
-
-module_init(ramzswap_init);
-module_exit(ramzswap_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
-MODULE_DESCRIPTION("Compressed RAM Based Swap Device");
diff --git a/drivers/staging/rt2860/ap.h b/drivers/staging/rt2860/ap.h
index 3f744a52aac..faac85d931d 100644
--- a/drivers/staging/rt2860/ap.h
+++ b/drivers/staging/rt2860/ap.h
@@ -42,7 +42,8 @@
/* ap_wpa.c */
void WpaStateMachineInit(struct rt_rtmp_adapter *pAd,
- struct rt_state_machine *Sm, OUT STATE_MACHINE_FUNC Trans[]);
+ struct rt_state_machine *Sm,
+ OUT STATE_MACHINE_FUNC Trans[]);
#ifdef RTMP_MAC_USB
void BeaconUpdateExec(void *SystemSpecific1,
@@ -61,6 +62,7 @@ struct rt_mac_table_entry *MacTableInsertEntry(struct rt_rtmp_adapter *pAd,
BOOLEAN MacTableDeleteEntry(struct rt_rtmp_adapter *pAd,
u16 wcid, u8 *pAddr);
-struct rt_mac_table_entry *MacTableLookup(struct rt_rtmp_adapter *pAd, u8 *pAddr);
+struct rt_mac_table_entry *MacTableLookup(struct rt_rtmp_adapter *pAd,
+ u8 *pAddr);
#endif /* __AP_H__ */
diff --git a/drivers/staging/rt2860/chlist.h b/drivers/staging/rt2860/chlist.h
index ada65e5ac61..1231e69d518 100644
--- a/drivers/staging/rt2860/chlist.h
+++ b/drivers/staging/rt2860/chlist.h
@@ -73,35 +73,31 @@ struct rt_ch_freq_map {
extern struct rt_ch_freq_map CH_HZ_ID_MAP[];
extern int CH_HZ_ID_MAP_NUM;
-#define MAP_CHANNEL_ID_TO_KHZ(_ch, _khz) \
- do{ \
- int _chIdx; \
- for (_chIdx = 0; _chIdx < CH_HZ_ID_MAP_NUM; _chIdx++)\
- { \
- if ((_ch) == CH_HZ_ID_MAP[_chIdx].channel) \
- { \
- (_khz) = CH_HZ_ID_MAP[_chIdx].freqKHz * 1000; \
- break; \
- } \
- } \
- if (_chIdx == CH_HZ_ID_MAP_NUM) \
- (_khz) = 2412000; \
- }while(0)
+#define MAP_CHANNEL_ID_TO_KHZ(_ch, _khz) \
+ do { \
+ int _chIdx; \
+ for (_chIdx = 0; _chIdx < CH_HZ_ID_MAP_NUM; _chIdx++) {\
+ if ((_ch) == CH_HZ_ID_MAP[_chIdx].channel) { \
+ (_khz) = CH_HZ_ID_MAP[_chIdx].freqKHz * 1000;\
+ break; \
+ } \
+ } \
+ if (_chIdx == CH_HZ_ID_MAP_NUM) \
+ (_khz) = 2412000; \
+ } while (0)
#define MAP_KHZ_TO_CHANNEL_ID(_khz, _ch) \
- do{ \
- int _chIdx; \
- for (_chIdx = 0; _chIdx < CH_HZ_ID_MAP_NUM; _chIdx++)\
- { \
- if ((_khz) == CH_HZ_ID_MAP[_chIdx].freqKHz) \
- { \
- (_ch) = CH_HZ_ID_MAP[_chIdx].channel; \
- break; \
- } \
- } \
- if (_chIdx == CH_HZ_ID_MAP_NUM) \
- (_ch) = 1; \
- }while(0)
+ do { \
+ int _chIdx; \
+ for (_chIdx = 0; _chIdx < CH_HZ_ID_MAP_NUM; _chIdx++) {\
+ if ((_khz) == CH_HZ_ID_MAP[_chIdx].freqKHz) {\
+ (_ch) = CH_HZ_ID_MAP[_chIdx].channel; \
+ break; \
+ } \
+ } \
+ if (_chIdx == CH_HZ_ID_MAP_NUM) \
+ (_ch) = 1; \
+ } while (0)
void BuildChannelListEx(struct rt_rtmp_adapter *pAd);
diff --git a/drivers/staging/rt2860/common/cmm_wpa.c b/drivers/staging/rt2860/common/cmm_wpa.c
index c16f3763cca..9414aa34437 100644
--- a/drivers/staging/rt2860/common/cmm_wpa.c
+++ b/drivers/staging/rt2860/common/cmm_wpa.c
@@ -427,7 +427,7 @@ void RTMPToWirelessSta(struct rt_rtmp_adapter *pAd,
/*
==========================================================================
Description:
- This is a function to initilize 4-way handshake
+ This is a function to initialize 4-way handshake
Return:
@@ -867,7 +867,7 @@ void PeerPairMsg3Action(struct rt_rtmp_adapter *pAd,
==========================================================================
Description:
When receiving the last packet of 4-way pairwisekey handshake.
- Initilize 2-way groupkey handshake following.
+ Initialize 2-way groupkey handshake following.
Return:
==========================================================================
*/
diff --git a/drivers/staging/rt2860/common/rtmp_timer.c b/drivers/staging/rt2860/common/rtmp_timer.c
index 42e47d9dc2c..ab520909490 100644
--- a/drivers/staging/rt2860/common/rtmp_timer.c
+++ b/drivers/staging/rt2860/common/rtmp_timer.c
@@ -143,8 +143,8 @@ int RtmpTimerQThread(IN void *Context)
struct rt_rtmp_os_task *pTask;
struct rt_rtmp_adapter *pAd;
- pTask = (struct rt_rtmp_os_task *)Context;
- pAd = (struct rt_rtmp_adapter *)pTask->priv;
+ pTask = Context;
+ pAd = pTask->priv;
RtmpOSTaskCustomize(pTask);
diff --git a/drivers/staging/rt2860/mlme.h b/drivers/staging/rt2860/mlme.h
index 99c9362bae8..01414c3b488 100644
--- a/drivers/staging/rt2860/mlme.h
+++ b/drivers/staging/rt2860/mlme.h
@@ -31,7 +31,7 @@
Revision History:
Who When What
- -------- ---------- ----------------------------------------------
+ -------- ---------- ------------------------------
John Chang 2003-08-28 Created
John Chang 2004-09-06 modified for RT2600
@@ -50,7 +50,7 @@
#define MLME_TASK_EXEC_INTV 100/*200*/ /* */
#define LEAD_TIME 5
#define MLME_TASK_EXEC_MULTIPLE 10 /*5*/ /* MLME_TASK_EXEC_MULTIPLE * MLME_TASK_EXEC_INTV = 1 sec */
-#define REORDER_EXEC_INTV 100 /* 0.1 sec */
+#define REORDER_EXEC_INTV 100 /* 0.1 sec */
/* The definition of Radar detection duration region */
#define CE 0
@@ -60,7 +60,7 @@
#define JAP_W56 4
#define MAX_RD_REGION 5
-#define BEACON_LOST_TIME 4 * OS_HZ /* 2048 msec = 2 sec */
+#define BEACON_LOST_TIME (4 * OS_HZ) /* 2048 msec = 2 sec */
#define DLS_TIMEOUT 1200 /* unit: msec */
#define AUTH_TIMEOUT 300 /* unit: msec */
@@ -119,8 +119,8 @@
#define MAC_ADDR_IS_GROUP(Addr) (((Addr[0]) & 0x01))
#define MAC_ADDR_HASH(Addr) (Addr[0] ^ Addr[1] ^ Addr[2] ^ Addr[3] ^ Addr[4] ^ Addr[5])
#define MAC_ADDR_HASH_INDEX(Addr) (MAC_ADDR_HASH(Addr) % HASH_TABLE_SIZE)
-#define TID_MAC_HASH(Addr,TID) (TID^Addr[0] ^ Addr[1] ^ Addr[2] ^ Addr[3] ^ Addr[4] ^ Addr[5])
-#define TID_MAC_HASH_INDEX(Addr,TID) (TID_MAC_HASH(Addr,TID) % HASH_TABLE_SIZE)
+#define TID_MAC_HASH(Addr, TID) (TID^Addr[0] ^ Addr[1] ^ Addr[2] ^ Addr[3] ^ Addr[4] ^ Addr[5])
+#define TID_MAC_HASH_INDEX(Addr, TID) (TID_MAC_HASH(Addr, TID) % HASH_TABLE_SIZE)
/* LED Control */
/* assoiation ON. one LED ON. another blinking when TX, OFF when idle */
@@ -145,7 +145,7 @@
#define CAP_IS_DSSS_OFDM(x) (((x) & 0x2000) != 0)
#define CAP_IS_DELAY_BA(x) (((x) & 0x4000) != 0) /* 802.11e d9 */
-#define CAP_GENERATE(ess,ibss,priv,s_pre,s_slot,spectrum) (((ess) ? 0x0001 : 0x0000) | ((ibss) ? 0x0002 : 0x0000) | ((priv) ? 0x0010 : 0x0000) | ((s_pre) ? 0x0020 : 0x0000) | ((s_slot) ? 0x0400 : 0x0000) | ((spectrum) ? 0x0100 : 0x0000))
+#define CAP_GENERATE(ess, ibss, priv, s_pre, s_slot, spectrum) (((ess) ? 0x0001 : 0x0000) | ((ibss) ? 0x0002 : 0x0000) | ((priv) ? 0x0010 : 0x0000) | ((s_pre) ? 0x0020 : 0x0000) | ((s_slot) ? 0x0400 : 0x0000) | ((spectrum) ? 0x0100 : 0x0000))
#define ERP_IS_NON_ERP_PRESENT(x) (((x) & 0x01) != 0) /* 802.11g */
#define ERP_IS_USE_PROTECTION(x) (((x) & 0x02) != 0) /* 802.11g */
@@ -154,9 +154,9 @@
#define DRS_TX_QUALITY_WORST_BOUND 8 /* 3 // just test by gary */
#define DRS_PENALTY 8
-#define BA_NOTUSE 2
+#define BA_NOTUSE 2
/*BA Policy subfiled value in ADDBA frame */
-#define IMMED_BA 1
+#define IMMED_BA 1
#define DELAY_BA 0
/* BA Initiator subfield in DELBA frame */
@@ -176,8 +176,7 @@
/* reset all OneSecTx counters */
#define RESET_ONE_SEC_TX_CNT(__pEntry) \
-if (((__pEntry)) != NULL) \
-{ \
+if (((__pEntry)) != NULL) { \
(__pEntry)->OneSecTxRetryOkCount = 0; \
(__pEntry)->OneSecTxFailCount = 0; \
(__pEntry)->OneSecTxNoRetryOkCount = 0; \
@@ -846,7 +845,7 @@ struct rt_mlme_queue {
struct rt_mlme_queue_elem Entry[MAX_LEN_OF_MLME_QUEUE];
};
-typedef void(*STATE_MACHINE_FUNC) (void * Adaptor, struct rt_mlme_queue_elem *Elem);
+typedef void(*STATE_MACHINE_FUNC) (void *Adaptor, struct rt_mlme_queue_elem *Elem);
struct rt_state_machine {
unsigned long Base;
diff --git a/drivers/staging/rt2860/rt_linux.c b/drivers/staging/rt2860/rt_linux.c
index 0029b2d73b7..6536965df3f 100644
--- a/drivers/staging/rt2860/rt_linux.c
+++ b/drivers/staging/rt2860/rt_linux.c
@@ -1015,7 +1015,7 @@ int RtmpOSTaskKill(struct rt_rtmp_os_task *pTask)
struct rt_rtmp_adapter *pAd;
int ret = NDIS_STATUS_FAILURE;
- pAd = (struct rt_rtmp_adapter *)pTask->priv;
+ pAd = pTask->priv;
#ifdef KTHREAD_SUPPORT
if (pTask->kthread_task) {
diff --git a/drivers/staging/rt2860/rtmp.h b/drivers/staging/rt2860/rtmp.h
index 82b6e783b33..282935caba2 100644
--- a/drivers/staging/rt2860/rtmp.h
+++ b/drivers/staging/rt2860/rtmp.h
@@ -2511,7 +2511,7 @@ void RTMPWriteTxWI(struct rt_rtmp_adapter *pAd, struct rt_txwi * pTxWI, IN BOOLE
u8 TID,
u8 TxRate,
u8 Txopmode,
- IN BOOLEAN CfAck, IN HTTRANSMIT_SETTING * pTransmit);
+ IN BOOLEAN CfAck, IN HTTRANSMIT_SETTING *pTransmit);
void RTMPWriteTxWI_Data(struct rt_rtmp_adapter *pAd,
struct rt_txwi *pTxWI, struct rt_tx_blk *pTxBlk);
@@ -3059,7 +3059,7 @@ BOOLEAN PeerBeaconAndProbeRspSanity(struct rt_rtmp_adapter *pAd,
u16 *pBeaconPeriod,
u8 *pChannel,
u8 *pNewChannel,
- OUT LARGE_INTEGER * pTimestamp,
+ OUT LARGE_INTEGER *pTimestamp,
struct rt_cf_parm *pCfParm,
u16 *pAtimWin,
u16 *pCapabilityInfo,
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index 674769d2b59..ebf9074a908 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -44,6 +44,7 @@ struct usb_device_id rtusb_usb_id[] = {
{USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */
{USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */
{USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */
+ {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom 2770 */
{USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */
{USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */
{USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */
@@ -64,6 +65,7 @@ struct usb_device_id rtusb_usb_id[] = {
{USB_DEVICE(0x14B2, 0x3C07)}, /* AL */
{USB_DEVICE(0x050D, 0x8053)}, /* Belkin */
{USB_DEVICE(0x050D, 0x825B)}, /* Belkin */
+ {USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */
{USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */
{USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */
{USB_DEVICE(0x07AA, 0x002F)}, /* Corega */
@@ -94,7 +96,8 @@ struct usb_device_id rtusb_usb_id[] = {
{USB_DEVICE(0x050d, 0x815c)},
{USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */
{USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */
- {USB_DEVICE(0x04E8, 0x2018)}, /* samsung */
+ {USB_DEVICE(0x04E8, 0x2018)}, /* samsung linkstick2 */
+ {USB_DEVICE(0x1690, 0x0740)}, /* Askey */
{USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */
{USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */
{USB_DEVICE(0x7392, 0x7718)},
@@ -104,21 +107,34 @@ struct usb_device_id rtusb_usb_id[] = {
{USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
{USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
{USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */
+ {USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */
#endif /* RT2870 // */
#ifdef RT3070
{USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */
{USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */
{USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */
{USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x871C)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x822C)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x871B)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x822B)}, /* Ralink 3070 */
{USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */
{USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */
+ {USB_DEVICE(0x0DF6, 0x0048)}, /* Sitecom 3070 */
+ {USB_DEVICE(0x0DF6, 0x0047)}, /* Sitecom 3071 */
{USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */
{USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */
{USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */
+ {USB_DEVICE(0x083A, 0xA701)}, /* SMC 3070 */
+ {USB_DEVICE(0x083A, 0xA702)}, /* SMC 3072 */
{USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */
{USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */
{USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */
+ {USB_DEVICE(0x1740, 0x9707)}, /* EnGenius 3070 */
+ {USB_DEVICE(0x1740, 0x9708)}, /* EnGenius 3071 */
+ {USB_DEVICE(0x1740, 0x9709)}, /* EnGenius 3072 */
{USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */
+ {USB_DEVICE(0x13D3, 0x3305)}, /* AzureWave 3070*/
{USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */
{USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */
{USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */
@@ -131,14 +147,36 @@ struct usb_device_id rtusb_usb_id[] = {
{USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */
{USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */
{USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */
+ {USB_DEVICE(0x07D1, 0x3C16)}, /* D-Link 3070 */
+ {USB_DEVICE(0x07D1, 0x3C17)}, /* D-Link 8070 */
{USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */
{USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */
{USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */
{USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */
{USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */
+ {USB_DEVICE(0x04BB, 0x0947)}, /* I-O DATA 3070 */
+ {USB_DEVICE(0x04BB, 0x0948)}, /* I-O DATA 3072 */
{USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */
+ {USB_DEVICE(0x20B8, 0x8888)}, /* PARA INDUSTRIAL 3070 */
+ {USB_DEVICE(0x0B05, 0x1784)}, /* Asus 3072 */
+ {USB_DEVICE(0x203D, 0x14A9)}, /* Encore 3070*/
+ {USB_DEVICE(0x0DB0, 0x899A)}, /* MSI 3070*/
+ {USB_DEVICE(0x0DB0, 0x3870)}, /* MSI 3070*/
+ {USB_DEVICE(0x0DB0, 0x870A)}, /* MSI 3070*/
+ {USB_DEVICE(0x0DB0, 0x6899)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x3822)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x3871)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x871A)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x822A)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x3821)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x821A)}, /* Ralink 3070 */
+ {USB_DEVICE(0x083A, 0xA703)}, /* IO-MAGIC */
+ {USB_DEVICE(0x13D3, 0x3307)}, /* Azurewave */
+ {USB_DEVICE(0x13D3, 0x3321)}, /* Azurewave */
+ {USB_DEVICE(0x07FA, 0x7712)}, /* Edimax */
+ {USB_DEVICE(0x0789, 0x0166)}, /* Edimax */
+ {USB_DEVICE(0x148F, 0x2070)}, /* Edimax */
#endif /* RT3070 // */
- {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom WL-608 */
{USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */
{USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */
{USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */
@@ -422,8 +460,8 @@ int MlmeThread(IN void *Context)
int status;
status = 0;
- pTask = (struct rt_rtmp_os_task *)Context;
- pAd = (struct rt_rtmp_adapter *)pTask->priv;
+ pTask = Context;
+ pAd = pTask->priv;
RtmpOSTaskCustomize(pTask);
@@ -491,8 +529,8 @@ int RTUSBCmdThread(IN void *Context)
int status;
status = 0;
- pTask = (struct rt_rtmp_os_task *)Context;
- pAd = (struct rt_rtmp_adapter *)pTask->priv;
+ pTask = Context;
+ pAd = pTask->priv;
RtmpOSTaskCustomize(pTask);
diff --git a/drivers/staging/rt3070/md4.h b/drivers/staging/rt3070/md4.h
deleted file mode 100644
index b3fb6372618..00000000000
--- a/drivers/staging/rt3070/md4.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- *************************************************************************
- * Ralink Tech Inc.
- * 5F., No.36, Taiyuan St., Jhubei City,
- * Hsinchu County 302,
- * Taiwan, R.O.C.
- *
- * (c) Copyright 2002-2007, Ralink Technology, Inc.
- *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License as published by *
- * the Free Software Foundation; either version 2 of the License, or *
- * (at your option) any later version. *
- * *
- * This program is distributed in the hope that it will be useful, *
- * but WITHOUT ANY WARRANTY; without even the implied warranty of *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
- * GNU General Public License for more details. *
- * *
- * You should have received a copy of the GNU General Public License *
- * along with this program; if not, write to the *
- * Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
- * *
- *************************************************************************
- */
-
-#ifndef __MD4_H__
-#define __MD4_H__
-
-/* MD4 context. */
-typedef struct _MD4_CTX_ {
- unsigned long state[4]; /* state (ABCD) */
- unsigned long count[2]; /* number of bits, modulo 2^64 (lsb first) */
- u8 buffer[64]; /* input buffer */
-} MD4_CTX;
-
-void MD4Init(MD4_CTX *);
-void MD4Update(MD4_CTX *, u8 *, UINT);
-void MD4Final(u8 [16], MD4_CTX *);
-
-#endif /*__MD4_H__*/
diff --git a/drivers/staging/rtl8187se/Kconfig b/drivers/staging/rtl8187se/Kconfig
index 155a78e0740..1b3103fbf29 100644
--- a/drivers/staging/rtl8187se/Kconfig
+++ b/drivers/staging/rtl8187se/Kconfig
@@ -4,6 +4,7 @@ config R8187SE
select WIRELESS_EXT
select WEXT_PRIV
select EEPROM_93CX6
+ select CRYPTO
default N
---help---
If built as a module, it will be called r8187se.ko.
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 49ab9fa9ffa..ed7457bc24e 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -61,7 +61,7 @@ static struct pci_device_id rtl8180_pci_id_tbl[] __devinitdata = {
};
-static char *ifname = "wlan%d";
+static char ifname[IFNAMSIZ] = "wlan%d";
static int hwseqnum = 0;
static int hwwep = 0;
static int channels = 0x3fff;
@@ -72,7 +72,7 @@ MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
MODULE_DESCRIPTION("Linux driver for Realtek RTL8180 / RTL8185 WiFi cards");
-module_param(ifname, charp, S_IRUGO|S_IWUSR);
+module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO|S_IWUSR);
module_param(hwseqnum, int, S_IRUGO|S_IWUSR);
module_param(hwwep, int, S_IRUGO|S_IWUSR);
module_param(channels, int, S_IRUGO|S_IWUSR);
@@ -3609,7 +3609,7 @@ static int __devinit rtl8180_pci_probe(struct pci_dev *pdev,
if (dev_alloc_name(dev, ifname) < 0) {
DMESG("Oops: devname already taken! Trying wlan%%d...\n");
- ifname = "wlan%d";
+ strcpy(ifname, "wlan%d");
dev_alloc_name(dev, ifname);
}
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 2ae3745f775..2e64b239e24 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -3,5 +3,6 @@ config RTL8192E
depends on PCI && WLAN
select WIRELESS_EXT
select WEXT_PRIV
+ select CRYPTO
default N
---help---
diff --git a/drivers/staging/rtl8192e/ieee80211/dot11d.c b/drivers/staging/rtl8192e/ieee80211/dot11d.c
index 908f6051d57..6bbf0919cdf 100644
--- a/drivers/staging/rtl8192e/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192e/ieee80211/dot11d.c
@@ -218,22 +218,4 @@ int ToLegalChannel(
return default_chn;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
-//EXPORT_SYMBOL(Dot11d_Init);
-//EXPORT_SYMBOL(Dot11d_Reset);
-//EXPORT_SYMBOL(Dot11d_UpdateCountryIe);
-//EXPORT_SYMBOL(DOT11D_GetMaxTxPwrInDbm);
-//EXPORT_SYMBOL(DOT11D_ScanComplete);
-//EXPORT_SYMBOL(IsLegalChannel);
-//EXPORT_SYMBOL(ToLegalChannel);
-#else
-EXPORT_SYMBOL_NOVERS(Dot11d_Init);
-EXPORT_SYMBOL_NOVERS(Dot11d_Reset);
-EXPORT_SYMBOL_NOVERS(Dot11d_UpdateCountryIe);
-EXPORT_SYMBOL_NOVERS(DOT11D_GetMaxTxPwrInDbm);
-EXPORT_SYMBOL_NOVERS(DOT11D_ScanComplete);
-EXPORT_SYMBOL_NOVERS(IsLegalChannel);
-EXPORT_SYMBOL_NOVERS(ToLegalChannel);
-#endif
-
#endif
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211.h b/drivers/staging/rtl8192e/ieee80211/ieee80211.h
index 50728f6e9c5..dda6719234c 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211.h
@@ -27,12 +27,7 @@
#include <linux/kernel.h> /* ARRAY_SIZE */
#include <linux/version.h>
#include <linux/module.h>
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
#include <linux/jiffies.h>
-#else
-#include <linux/jffs.h>
-#include <linux/tqueue.h>
-#endif
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
@@ -44,12 +39,6 @@
#include "rtl819x_BA.h"
#include "rtl819x_TS.h"
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
-#ifndef bool
-typedef enum{false = 0, true} bool;
-#endif
-#endif
-
#ifndef IW_MODE_MONITOR
#define IW_MODE_MONITOR 6
#endif
@@ -428,46 +417,9 @@ typedef struct ieee_param {
#define IW_QUAL_NOISE_UPDATED 0x4
#endif
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-static inline void tq_init(struct tq_struct * task, void(*func)(void *), void *data)
-{
- task->routine = func;
- task->data = data;
- //task->next = NULL;
- INIT_LIST_HEAD(&task->list);
- task->sync = 0;
-}
-#endif
-
// linux under 2.6.9 release may not support it, so modify it for common use
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
-//#define MSECS(t) (1000 * ((t) / HZ) + 1000 * ((t) % HZ) / HZ)
-#define MSECS(t) (HZ * ((t) / 1000) + (HZ * ((t) % 1000)) / 1000)
-static inline unsigned long msleep_interruptible_rsl(unsigned int msecs)
-{
- unsigned long timeout = MSECS(msecs) + 1;
-
- while (timeout) {
- set_current_state(TASK_INTERRUPTIBLE);
- timeout = schedule_timeout(timeout);
- }
- return timeout;
-}
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,31))
-static inline void msleep(unsigned int msecs)
-{
- unsigned long timeout = MSECS(msecs) + 1;
-
- while (timeout) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- timeout = schedule_timeout(timeout);
- }
-}
-#endif
-#else
#define MSECS(t) msecs_to_jiffies(t)
#define msleep_interruptible_rsl msleep_interruptible
-#endif
#define IEEE80211_DATA_LEN 2304
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
@@ -1747,21 +1699,6 @@ enum ieee80211_state {
#define IEEE80211_52GHZ_CHANNELS (IEEE80211_52GHZ_MAX_CHANNEL - \
IEEE80211_52GHZ_MIN_CHANNEL + 1)
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11))
-extern inline int is_multicast_ether_addr(const u8 *addr)
-{
- return ((addr[0] != 0xff) && (0x01 & addr[0]));
-}
-#endif
-
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,13))
-extern inline int is_broadcast_ether_addr(const u8 *addr)
-{
- return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
- (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
-}
-#endif
-
typedef struct tx_pending_t{
int frag;
struct ieee80211_txb *txb;
@@ -1838,11 +1775,7 @@ typedef struct _RT_POWER_SAVE_CONTROL
bool bIPSModeBackup;
bool bSwRfProcessing;
RT_RF_POWER_STATE eInactivePowerState;
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
struct work_struct InactivePsWorkItem;
-#else
- struct tq_struct InactivePsWorkItem;
-#endif
struct timer_list InactivePsTimer;
// Return point for join action
@@ -2329,36 +2262,16 @@ struct ieee80211_device {
/* used if IEEE_SOFTMAC_BEACONS is set */
struct timer_list beacon_timer;
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
struct work_struct associate_complete_wq;
struct work_struct associate_procedure_wq;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
struct delayed_work softmac_scan_wq;
struct delayed_work associate_retry_wq;
struct delayed_work start_ibss_wq;
struct delayed_work hw_wakeup_wq;
struct delayed_work hw_sleep_wq;
-#else
- struct work_struct softmac_scan_wq;
- struct work_struct associate_retry_wq;
- struct work_struct start_ibss_wq;
- struct work_struct hw_wakeup_wq;
- struct work_struct hw_sleep_wq;
-#endif
+
struct work_struct wx_sync_scan_wq;
struct workqueue_struct *wq;
-#else
- /* used for periodly scan */
- struct timer_list scan_timer;
-
- struct tq_struct associate_complete_wq;
- struct tq_struct associate_retry_wq;
- struct tq_struct start_ibss_wq;
- struct tq_struct associate_procedure_wq;
- struct tq_struct softmac_scan_wq;
- struct tq_struct wx_sync_scan_wq;
-
-#endif
// Qos related. Added by Annie, 2005-11-01.
//STA_QOS StaQos;
@@ -2557,11 +2470,7 @@ struct ieee80211_device {
static inline void *ieee80211_priv(struct net_device *dev)
{
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
return ((struct ieee80211_device *)netdev_priv(dev))->priv;
-#else
- return ((struct ieee80211_device *)dev->priv)->priv;
-#endif
}
extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
@@ -2814,11 +2723,7 @@ extern int ieee80211_wx_get_freq(struct ieee80211_device *ieee, struct iw_reques
union iwreq_data *wrqu, char *b);
//extern void ieee80211_wx_sync_scan_wq(struct ieee80211_device *ieee);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
extern void ieee80211_wx_sync_scan_wq(struct work_struct *work);
-#else
- extern void ieee80211_wx_sync_scan_wq(struct ieee80211_device *ieee);
-#endif
extern int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
index d5aa9af3d9f..ae503791890 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
@@ -243,23 +243,3 @@ void ieee80211_crypto_deinit(void)
kfree(hcrypt);
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
-//EXPORT_SYMBOL(ieee80211_crypt_deinit_entries);
-//EXPORT_SYMBOL(ieee80211_crypt_deinit_handler);
-//EXPORT_SYMBOL(ieee80211_crypt_delayed_deinit);
-
-//EXPORT_SYMBOL(ieee80211_register_crypto_ops);
-//EXPORT_SYMBOL(ieee80211_unregister_crypto_ops);
-//EXPORT_SYMBOL(ieee80211_get_crypto_ops);
-#else
-EXPORT_SYMBOL_NOVERS(ieee80211_crypt_deinit_entries);
-EXPORT_SYMBOL_NOVERS(ieee80211_crypt_deinit_handler);
-EXPORT_SYMBOL_NOVERS(ieee80211_crypt_delayed_deinit);
-
-EXPORT_SYMBOL_NOVERS(ieee80211_register_crypto_ops);
-EXPORT_SYMBOL_NOVERS(ieee80211_unregister_crypto_ops);
-EXPORT_SYMBOL_NOVERS(ieee80211_get_crypto_ops);
-#endif
-
-//module_init(ieee80211_crypto_init);
-//module_exit(ieee80211_crypto_deinit);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.h b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.h
index a84df4b7648..ca7dd0dda82 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.h
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.h
@@ -82,12 +82,4 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *, int);
void ieee80211_crypt_deinit_handler(unsigned long);
void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
struct ieee80211_crypt_data **crypt);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
-#endif
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,31))
-#define crypto_alloc_tfm crypto_alloc_tfm_rsl
-#define crypto_free_tfm crypto_free_tfm_rsl
-#endif
-
#endif
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c
index 7165c4c75c7..a4e21cbcdf1 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c
@@ -24,18 +24,9 @@
#include "ieee80211.h"
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-#include "rtl_crypto.h"
-#else
#include <linux/crypto.h>
-#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
- #include <asm/scatterlist.h>
-#else
#include <linux/scatterlist.h>
-#endif
-//#include <asm/scatterlist.h>
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Host AP crypt: CCMP");
@@ -75,21 +66,7 @@ struct ieee80211_ccmp_data {
void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
const u8 pt[16], u8 ct[16])
{
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- struct scatterlist src, dst;
-
- src.page = virt_to_page(pt);
- src.offset = offset_in_page(pt);
- src.length = AES_BLOCK_LEN;
-
- dst.page = virt_to_page(ct);
- dst.offset = offset_in_page(ct);
- dst.length = AES_BLOCK_LEN;
-
- crypto_cipher_encrypt(tfm, &dst, &src, AES_BLOCK_LEN);
-#else
crypto_cipher_encrypt_one((void*)tfm, ct, pt);
-#endif
}
static void * ieee80211_ccmp_init(int key_idx)
@@ -101,14 +78,6 @@ static void * ieee80211_ccmp_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- priv->tfm = crypto_alloc_tfm("aes", 0);
- if (priv->tfm == NULL) {
- printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate "
- "crypto API aes\n");
- goto fail;
- }
- #else
priv->tfm = (void*)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm)) {
printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate "
@@ -116,17 +85,12 @@ static void * ieee80211_ccmp_init(int key_idx)
priv->tfm = NULL;
goto fail;
}
- #endif
return priv;
fail:
if (priv) {
if (priv->tfm)
- #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
- crypto_free_tfm(priv->tfm);
- #else
crypto_free_cipher((void*)priv->tfm);
- #endif
kfree(priv);
}
@@ -138,11 +102,7 @@ static void ieee80211_ccmp_deinit(void *priv)
{
struct ieee80211_ccmp_data *_priv = priv;
if (_priv && _priv->tfm)
-#if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
- crypto_free_tfm(_priv->tfm);
-#else
crypto_free_cipher((void*)_priv->tfm);
-#endif
kfree(priv);
}
@@ -528,11 +488,3 @@ void ieee80211_crypto_ccmp_exit(void)
ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp);
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
-//EXPORT_SYMBOL(ieee80211_ccmp_null);
-#else
-EXPORT_SYMBOL_NOVERS(ieee80211_ccmp_null);
-#endif
-
-//module_init(ieee80211_crypto_ccmp_init);
-//module_exit(ieee80211_crypto_ccmp_exit);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c
index 65f48896bfa..14ca61087c0 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c
@@ -24,17 +24,8 @@
#include "ieee80211.h"
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-#include "rtl_crypto.h"
-#else
#include <linux/crypto.h>
-#endif
-//#include <asm/scatterlist.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
- #include <asm/scatterlist.h>
-#else
- #include <linux/scatterlist.h>
-#endif
+#include <linux/scatterlist.h>
#include <linux/crc32.h>
@@ -68,17 +59,10 @@ struct ieee80211_tkip_data {
u32 dot11RSNAStatsTKIPLocalMICFailures;
int key_idx;
-#if((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) || (OPENSUSE_SLED))
struct crypto_blkcipher *rx_tfm_arc4;
struct crypto_hash *rx_tfm_michael;
struct crypto_blkcipher *tx_tfm_arc4;
struct crypto_hash *tx_tfm_michael;
-#else
- struct crypto_tfm *tx_tfm_arc4;
- struct crypto_tfm *tx_tfm_michael;
- struct crypto_tfm *rx_tfm_arc4;
- struct crypto_tfm *rx_tfm_michael;
-#endif
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16], tx_hdr[16];
};
@@ -91,35 +75,6 @@ static void * ieee80211_tkip_init(int key_idx)
if (priv == NULL)
goto fail;
priv->key_idx = key_idx;
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- priv->tx_tfm_arc4 = crypto_alloc_tfm("arc4", 0);
- if (priv->tx_tfm_arc4 == NULL) {
- printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
- "crypto API arc4\n");
- goto fail;
- }
-
- priv->tx_tfm_michael = crypto_alloc_tfm("michael_mic", 0);
- if (priv->tx_tfm_michael == NULL) {
- printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
- "crypto API michael_mic\n");
- goto fail;
- }
-
- priv->rx_tfm_arc4 = crypto_alloc_tfm("arc4", 0);
- if (priv->rx_tfm_arc4 == NULL) {
- printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
- "crypto API arc4\n");
- goto fail;
- }
-
- priv->rx_tfm_michael = crypto_alloc_tfm("michael_mic", 0);
- if (priv->rx_tfm_michael == NULL) {
- printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
- "crypto API michael_mic\n");
- goto fail;
- }
-#else
priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_arc4)) {
@@ -155,22 +110,10 @@ static void * ieee80211_tkip_init(int key_idx)
priv->rx_tfm_michael = NULL;
goto fail;
}
-#endif
return priv;
fail:
if (priv) {
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- if (priv->tx_tfm_michael)
- crypto_free_tfm(priv->tx_tfm_michael);
- if (priv->tx_tfm_arc4)
- crypto_free_tfm(priv->tx_tfm_arc4);
- if (priv->rx_tfm_michael)
- crypto_free_tfm(priv->rx_tfm_michael);
- if (priv->rx_tfm_arc4)
- crypto_free_tfm(priv->rx_tfm_arc4);
-
-#else
if (priv->tx_tfm_michael)
crypto_free_hash(priv->tx_tfm_michael);
if (priv->tx_tfm_arc4)
@@ -179,7 +122,6 @@ fail:
crypto_free_hash(priv->rx_tfm_michael);
if (priv->rx_tfm_arc4)
crypto_free_blkcipher(priv->rx_tfm_arc4);
-#endif
kfree(priv);
}
@@ -190,16 +132,6 @@ fail:
static void ieee80211_tkip_deinit(void *priv)
{
struct ieee80211_tkip_data *_priv = priv;
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- if (_priv->tx_tfm_michael)
- crypto_free_tfm(_priv->tx_tfm_michael);
- if (_priv->tx_tfm_arc4)
- crypto_free_tfm(_priv->tx_tfm_arc4);
- if (_priv->rx_tfm_michael)
- crypto_free_tfm(_priv->rx_tfm_michael);
- if (_priv->rx_tfm_arc4)
- crypto_free_tfm(_priv->rx_tfm_arc4);
-#else
if (_priv) {
if (_priv->tx_tfm_michael)
crypto_free_hash(_priv->tx_tfm_michael);
@@ -210,7 +142,6 @@ static void ieee80211_tkip_deinit(void *priv)
if (_priv->rx_tfm_arc4)
crypto_free_blkcipher(_priv->rx_tfm_arc4);
}
-#endif
kfree(priv);
}
@@ -381,10 +312,8 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
struct ieee80211_hdr_4addr *hdr;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- #if((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) || (OPENSUSE_SLED))
struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
int ret = 0;
- #endif
u8 rc4key[16], *icv;
u32 crc;
struct scatterlist sg;
@@ -447,32 +376,14 @@ printk("%x\n", ((u32*)tkey->key)[7]);
if (!tcb_desc->bHwSec)
{
icv = skb_put(skb, 4);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
crc = ~crc32_le(~0, pos, len);
-#else
- crc = ~ether_crc_le(len, pos);
-#endif
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- crypto_cipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
- sg.page = virt_to_page(pos);
- sg.offset = offset_in_page(pos);
- sg.length = len + 4;
- crypto_cipher_encrypt(tkey->tx_tfm_arc4, &sg, &sg, len + 4);
-#else
crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
-#if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
- sg.page = virt_to_page(pos);
- sg.offset = offset_in_page(pos);
- sg.length = len + 4;
-#else
sg_init_one(&sg, pos, len+4);
-#endif
ret= crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
-#endif
}
@@ -483,11 +394,7 @@ printk("%x\n", ((u32*)tkey->key)[7]);
}
if (!tcb_desc->bHwSec)
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- return 0;
- #else
return ret;
- #endif
else
return 0;
@@ -502,9 +409,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
u16 iv16;
struct ieee80211_hdr_4addr *hdr;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- #if((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) || (OPENSUSE_SLED))
struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4};
- #endif
u8 rc4key[16];
u8 icv[4];
u32 crc;
@@ -563,21 +468,8 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- crypto_cipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- sg.page = virt_to_page(pos);
- sg.offset = offset_in_page(pos);
- sg.length = plen + 4;
- crypto_cipher_decrypt(tkey->rx_tfm_arc4, &sg, &sg, plen + 4);
-#else
crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
-#if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
- sg.page = virt_to_page(pos);
- sg.offset = offset_in_page(pos);
- sg.length = plen + 4;
-#else
sg_init_one(&sg, pos, plen+4);
-#endif
if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
if (net_ratelimit()) {
printk(KERN_DEBUG ": TKIP: failed to decrypt "
@@ -586,13 +478,8 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
}
return -7;
}
-#endif
- #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
crc = ~crc32_le(~0, pos, plen);
- #else
- crc = ~ether_crc_le(plen, pos);
- #endif
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
@@ -641,47 +528,6 @@ if( ((u16*)skb->data)[0] & 0x4000){
}
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
-static int michael_mic(struct crypto_tfm * tfm_michael, u8 *key, u8 *hdr,
- u8 *data, size_t data_len, u8 *mic)
-{
- struct scatterlist sg[2];
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
- struct hash_desc desc;
- int ret = 0;
-#endif
-
- if (tfm_michael == NULL){
- printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
- return -1;
- }
- sg[0].page = virt_to_page(hdr);
- sg[0].offset = offset_in_page(hdr);
- sg[0].length = 16;
-
- sg[1].page = virt_to_page(data);
- sg[1].offset = offset_in_page(data);
- sg[1].length = data_len;
-
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
- crypto_digest_init(tfm_michael);
- crypto_digest_setkey(tfm_michael, key, 8);
- crypto_digest_update(tfm_michael, sg, 2);
- crypto_digest_final(tfm_michael, mic);
- return 0;
-#else
-if (crypto_hash_setkey(tkey->tfm_michael, key, 8))
- return -1;
-
-// return 0;
- desc.tfm = tkey->tfm_michael;
- desc.flags = 0;
- ret = crypto_hash_digest(&desc, sg, data_len + 16, mic);
- return ret;
-#endif
-}
-#else
static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
u8 * data, size_t data_len, u8 * mic)
{
@@ -692,19 +538,9 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
return -1;
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
- sg[0].page = virt_to_page(hdr);
- sg[0].offset = offset_in_page(hdr);
- sg[0].length = 16;
-
- sg[1].page = virt_to_page(data);
- sg[1].offset = offset_in_page(data);
- sg[1].length = data_len;
-#else
sg_init_table(sg, 2);
sg_set_buf(&sg[0], hdr, 16);
sg_set_buf(&sg[1], data, data_len);
-#endif
if (crypto_hash_setkey(tfm_michael, key, 8))
return -1;
@@ -713,7 +549,6 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
desc.flags = 0;
return crypto_hash_digest(&desc, sg, data_len + 16, mic);
}
-#endif
@@ -772,13 +607,8 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *pri
}
// }
pos = skb_put(skb, 8);
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
- skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
-#else
if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
-#endif
return -1;
return 0;
@@ -850,13 +680,8 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
}
// }
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
- skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
-#else
if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
-#endif
return -1;
if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
struct ieee80211_hdr_4addr *hdr;
@@ -886,32 +711,18 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
int keyidx;
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- struct crypto_tfm *tfm = tkey->tx_tfm_michael;
- struct crypto_tfm *tfm2 = tkey->tx_tfm_arc4;
- struct crypto_tfm *tfm3 = tkey->rx_tfm_michael;
- struct crypto_tfm *tfm4 = tkey->rx_tfm_arc4;
-#else
struct crypto_hash *tfm = tkey->tx_tfm_michael;
struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
-#endif
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
tkey->key_idx = keyidx;
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
tkey->tx_tfm_michael = tfm;
tkey->tx_tfm_arc4 = tfm2;
tkey->rx_tfm_michael = tfm3;
tkey->rx_tfm_arc4 = tfm4;
-#else
- tkey->tx_tfm_michael = tfm;
- tkey->tx_tfm_arc4 = tfm2;
- tkey->rx_tfm_michael = tfm3;
- tkey->rx_tfm_arc4 = tfm4;
-#endif
if (len == TKIP_KEY_LEN) {
memcpy(tkey->key, key, TKIP_KEY_LEN);
@@ -1021,11 +832,4 @@ void ieee80211_tkip_null(void)
// printk("============>%s()\n", __FUNCTION__);
return;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
-//EXPORT_SYMBOL(ieee80211_tkip_null);
-#else
-EXPORT_SYMBOL_NOVERS(ieee80211_tkip_null);
-#endif
-//module_init(ieee80211_crypto_tkip_init);
-//module_exit(ieee80211_crypto_tkip_exit);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c
index c4bbc8ddbad..5dc976498aa 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c
@@ -21,30 +21,11 @@
#include "ieee80211.h"
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-#include "rtl_crypto.h"
-#else
#include <linux/crypto.h>
-#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
- #include <asm/scatterlist.h>
-#else
- #include <linux/scatterlist.h>
-#endif
-//#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
#include <linux/crc32.h>
-//
-/*
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-#include "rtl_crypto.h"
-#else
-#include <linux/crypto.h>
-#endif
-#include <asm/scatterlist.h>
-#include <linux/crc32.h>
-*/
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Host AP crypt: WEP");
MODULE_LICENSE("GPL");
@@ -58,12 +39,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- struct crypto_tfm *tfm;
- #else
struct crypto_blkcipher *tx_tfm;
struct crypto_blkcipher *rx_tfm;
- #endif
};
@@ -76,14 +53,6 @@ static void * prism2_wep_init(int keyidx)
goto fail;
priv->key_idx = keyidx;
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- priv->tfm = crypto_alloc_tfm("arc4", 0);
- if (priv->tfm == NULL) {
- printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
- "crypto API arc4\n");
- goto fail;
- }
- #else
priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm)) {
printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
@@ -98,7 +67,6 @@ static void * prism2_wep_init(int keyidx)
priv->rx_tfm = NULL;
goto fail;
}
- #endif
/* start WEP IV from a random value */
get_random_bytes(&priv->iv, 4);
@@ -106,13 +74,6 @@ static void * prism2_wep_init(int keyidx)
return priv;
fail:
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- if (priv) {
- if (priv->tfm)
- crypto_free_tfm(priv->tfm);
- kfree(priv);
- }
- #else
if (priv) {
if (priv->tx_tfm)
crypto_free_blkcipher(priv->tx_tfm);
@@ -120,7 +81,6 @@ fail:
crypto_free_blkcipher(priv->rx_tfm);
kfree(priv);
}
- #endif
return NULL;
}
@@ -128,17 +88,12 @@ fail:
static void prism2_wep_deinit(void *priv)
{
struct prism2_wep_data *_priv = priv;
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- if (_priv && _priv->tfm)
- crypto_free_tfm(_priv->tfm);
- #else
if (_priv) {
if (_priv->tx_tfm)
crypto_free_blkcipher(_priv->tx_tfm);
if (_priv->rx_tfm)
crypto_free_blkcipher(_priv->rx_tfm);
}
- #endif
kfree(priv);
}
@@ -155,9 +110,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
u8 key[WEP_KEY_LEN + 3];
u8 *pos;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- #if((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) || (OPENSUSE_SLED))
struct blkcipher_desc desc = {.tfm = wep->tx_tfm};
- #endif
u32 crc;
u8 *icv;
struct scatterlist sg;
@@ -196,35 +149,16 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
/* Append little-endian CRC32 and encrypt it to produce ICV */
- #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
crc = ~crc32_le(~0, pos, len);
- #else
- crc = ~ether_crc_le(len, pos);
- #endif
icv = skb_put(skb, 4);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- crypto_cipher_setkey(wep->tfm, key, klen);
- sg.page = virt_to_page(pos);
- sg.offset = offset_in_page(pos);
- sg.length = len + 4;
- crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4);
- return 0;
- #else
crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
- #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
- sg.page = virt_to_page(pos);
- sg.offset = offset_in_page(pos);
- sg.length = len + 4;
- #else
sg_init_one(&sg, pos, len+4);
- #endif
return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
- #endif
}
return 0;
@@ -245,9 +179,7 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
u8 key[WEP_KEY_LEN + 3];
u8 keyidx, *pos;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- #if((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) || (OPENSUSE_SLED))
struct blkcipher_desc desc = {.tfm = wep->rx_tfm};
- #endif
u32 crc;
u8 icv[4];
struct scatterlist sg;
@@ -272,29 +204,11 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (!tcb_desc->bHwSec)
{
-#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
- crypto_cipher_setkey(wep->tfm, key, klen);
- sg.page = virt_to_page(pos);
- sg.offset = offset_in_page(pos);
- sg.length = plen + 4;
- crypto_cipher_decrypt(wep->tfm, &sg, &sg, plen + 4);
- #else
crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
- #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
- sg.page = virt_to_page(pos);
- sg.offset = offset_in_page(pos);
- sg.length = plen + 4;
- #else
sg_init_one(&sg, pos, plen+4);
- #endif
if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
return -7;
- #endif
- #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
crc = ~crc32_le(~0, pos, plen);
- #else
- crc = ~ether_crc_le(plen, pos);
- #endif
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
@@ -379,14 +293,6 @@ void __exit ieee80211_crypto_wep_exit(void)
void ieee80211_wep_null(void)
{
-// printk("============>%s()\n", __FUNCTION__);
return;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
-//EXPORT_SYMBOL(ieee80211_wep_null);
-#else
-EXPORT_SYMBOL_NOVERS(ieee80211_wep_null);
-#endif
-//module_init(ieee80211_crypto_wep_init);
-//module_exit(ieee80211_crypto_wep_exit);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
index 614a8b630e6..7edf5c897a6 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
@@ -31,7 +31,6 @@
*******************************************************************************/
#include <linux/compiler.h>
-//#include <linux/config.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
@@ -110,14 +109,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
goto failed;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
ieee = netdev_priv(dev);
-#else
- ieee = (struct ieee80211_device *)dev->priv;
-#endif
-#if 0
- dev->hard_start_xmit = ieee80211_rtl_xmit;
-#endif
memset(ieee, 0, sizeof(struct ieee80211_device)+sizeof_priv);
ieee->dev = dev;
@@ -166,12 +158,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
ieee80211_softmac_init(ieee);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
ieee->pHTInfo = kzalloc(sizeof(RT_HIGH_THROUGHPUT), GFP_KERNEL);
-#else
- ieee->pHTInfo = (RT_HIGH_THROUGHPUT*)kmalloc(sizeof(RT_HIGH_THROUGHPUT), GFP_KERNEL);
- memset(ieee->pHTInfo,0,sizeof(RT_HIGH_THROUGHPUT));
-#endif
if (ieee->pHTInfo == NULL)
{
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for HTInfo\n");
@@ -180,13 +167,6 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
HTUpdateDefaultSetting(ieee);
HTInitializeHTInfo(ieee); //may move to other place.
TSInitialize(ieee);
-#if 0
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
- INIT_WORK(&ieee->ht_onAssRsp, (void(*)(void*)) HTOnAssocRsp_wq);
-#else
- INIT_WORK(&ieee->ht_onAssRsp, (void(*)(void*)) HTOnAssocRsp_wq, ieee);
-#endif
-#endif
for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++)
INIT_LIST_HEAD(&ieee->ibss_mac_hash[i]);
@@ -205,32 +185,20 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
failed:
if (dev)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
free_netdev(dev);
-#else
- kfree(dev);
-#endif
return NULL;
}
void free_ieee80211(struct net_device *dev)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
struct ieee80211_device *ieee = netdev_priv(dev);
-#else
- struct ieee80211_device *ieee = (struct ieee80211_device *)dev->priv;
-#endif
int i;
- //struct list_head *p, *q;
-// del_timer_sync(&ieee->SwBwTimer);
-#if 1
if (ieee->pHTInfo != NULL)
{
kfree(ieee->pHTInfo);
ieee->pHTInfo = NULL;
}
-#endif
RemoveAllTS(ieee);
ieee80211_softmac_free(ieee);
del_timer_sync(&ieee->crypt_deinit_timer);
@@ -247,20 +215,7 @@ void free_ieee80211(struct net_device *dev)
}
ieee80211_networks_free(ieee);
-#if 0
- for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++) {
- list_for_each_safe(p, q, &ieee->ibss_mac_hash[i]) {
- kfree(list_entry(p, struct ieee_ibss_seq, list));
- list_del(p);
- }
- }
-
-#endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
free_netdev(dev);
-#else
- kfree(dev);
-#endif
}
#ifdef CONFIG_IEEE80211_DEBUG
@@ -358,11 +313,7 @@ int __init ieee80211_rtl_init(void)
}
ieee80211_debug_level = debug;
-#if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
- ieee80211_proc = create_proc_entry(DRV_NAME, S_IFDIR, proc_net);
-#else
ieee80211_proc = create_proc_entry(DRV_NAME, S_IFDIR, init_net.proc_net);
-#endif
if (ieee80211_proc == NULL) {
IEEE80211_ERROR("Unable to create " DRV_NAME
" proc directory\n");
@@ -371,11 +322,7 @@ int __init ieee80211_rtl_init(void)
e = create_proc_entry("debug_level", S_IFREG | S_IRUGO | S_IWUSR,
ieee80211_proc);
if (!e) {
-#if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
- remove_proc_entry(DRV_NAME, proc_net);
-#else
remove_proc_entry(DRV_NAME, init_net.proc_net);
-#endif
ieee80211_proc = NULL;
return -EIO;
}
@@ -390,11 +337,7 @@ void __exit ieee80211_rtl_exit(void)
{
if (ieee80211_proc) {
remove_proc_entry("debug_level", ieee80211_proc);
-#if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
- remove_proc_entry(DRV_NAME, proc_net);
-#else
remove_proc_entry(DRV_NAME, init_net.proc_net);
-#endif
ieee80211_proc = NULL;
}
ieee80211_crypto_wep_exit();
@@ -403,21 +346,10 @@ void __exit ieee80211_rtl_exit(void)
ieee80211_crypto_deinit();
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
#include <linux/moduleparam.h>
module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "debug output mask");
-//module_exit(ieee80211_rtl_exit);
-//module_init(ieee80211_rtl_init);
-#endif
#endif
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
-//EXPORT_SYMBOL(alloc_ieee80211);
-//EXPORT_SYMBOL(free_ieee80211);
-#else
-EXPORT_SYMBOL_NOVERS(alloc_ieee80211);
-EXPORT_SYMBOL_NOVERS(free_ieee80211);
-#endif
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
index da10067485e..aaf9b9dc45e 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
@@ -55,11 +55,7 @@ static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee,
u16 fc = le16_to_cpu(hdr->frame_ctl);
skb->dev = ieee->dev;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
skb_reset_mac_header(skb);
-#else
- skb->mac.raw = skb->data;
-#endif
skb_pull(skb, ieee80211_get_hdrlen(fc));
skb->pkt_type = PACKET_OTHERHOST;
@@ -2793,8 +2789,6 @@ static inline void ieee80211_process_probe_response(
#endif
memcpy(target, &network, sizeof(*target));
list_add_tail(&target->list, &ieee->network_list);
- if(ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)
- ieee80211_softmac_new_net(ieee,&network);
} else {
IEEE80211_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n",
escape_essid(target->ssid,
@@ -2821,8 +2815,6 @@ static inline void ieee80211_process_probe_response(
//YJ,add,080819,for hidden ap,end
update_network(target, &network);
- if(renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE))
- ieee80211_softmac_new_net(ieee,&network);
}
spin_unlock_irqrestore(&ieee->lock, flags);
@@ -2880,11 +2872,3 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
}
}
-
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
-//EXPORT_SYMBOL(ieee80211_rx_mgt);
-//EXPORT_SYMBOL(ieee80211_rx);
-#else
-EXPORT_SYMBOL_NOVERS(ieee80211_rx_mgt);
-EXPORT_SYMBOL_NOVERS(ieee80211_rx);
-#endif
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
index 46b6e8c900e..b7ec1ddee70 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
@@ -510,34 +510,11 @@ out:
}
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-/* called both by wq with ieee->lock held */
-void ieee80211_softmac_scan(struct ieee80211_device *ieee)
-{
-#if 0
- short watchdog = 0;
- do{
- ieee->current_network.channel =
- (ieee->current_network.channel + 1) % MAX_CHANNEL_NUMBER;
- if (watchdog++ > MAX_CHANNEL_NUMBER)
- return; /* no good chans */
- }while(!ieee->channel_map[ieee->current_network.channel]);
-#endif
-
- schedule_task(&ieee->softmac_scan_wq);
-}
-#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
void ieee80211_softmac_scan_wq(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work, work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
-#else
-void ieee80211_softmac_scan_wq(struct ieee80211_device *ieee)
-{
-#endif
static short watchdog = 0;
u8 last_channel = ieee->current_network.channel;
#ifdef ENABLE_DOT11D
@@ -575,13 +552,7 @@ void ieee80211_softmac_scan_wq(struct ieee80211_device *ieee)
ieee80211_send_probe_requests(ieee);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
-#else
- //ieee->scan_timer.expires = jiffies + MSECS(IEEE80211_SOFTMAC_SCAN_TIME);
- if (ieee->scanning == 1)
- mod_timer(&ieee->scan_timer,(jiffies + MSECS(IEEE80211_SOFTMAC_SCAN_TIME)));
-#endif
up(&ieee->scan_sem);
return;
@@ -597,19 +568,6 @@ out:
up(&ieee->scan_sem);
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-void ieee80211_softmac_scan_cb(unsigned long _dev)
-{
- unsigned long flags;
- struct ieee80211_device *ieee = (struct ieee80211_device *)_dev;
-
- spin_lock_irqsave(&ieee->lock, flags);
- ieee80211_softmac_scan(ieee);
- spin_unlock_irqrestore(&ieee->lock, flags);
-}
-#endif
-
-
void ieee80211_beacons_start(struct ieee80211_device *ieee)
{
unsigned long flags;
@@ -665,11 +623,7 @@ void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
if (ieee->scanning == 1){
ieee->scanning = 0;
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
cancel_delayed_work(&ieee->softmac_scan_wq);
-#else
- del_timer_sync(&ieee->scan_timer);
-#endif
}
// spin_unlock_irqrestore(&ieee->lock, flags);
@@ -704,16 +658,7 @@ void ieee80211_rtl_start_scan(struct ieee80211_device *ieee)
if (ieee->softmac_features & IEEE_SOFTMAC_SCAN){
if (ieee->scanning == 0){
ieee->scanning = 1;
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, 0);
-#else
-
- queue_work(ieee->wq, &ieee->softmac_scan_wq);
-#endif
-#else
- ieee80211_softmac_scan(ieee);
-#endif
}
}else
ieee->start_scan(ieee->dev);
@@ -1428,13 +1373,8 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
ieee->state = IEEE80211_ASSOCIATING_RETRY;
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
queue_delayed_work(ieee->wq, &ieee->associate_retry_wq, \
IEEE80211_SOFTMAC_ASSOC_RETRY_TIME);
-#else
- schedule_task(&ieee->associate_retry_wq);
-#endif
-
spin_unlock_irqrestore(&ieee->lock, flags);
}
@@ -1527,14 +1467,9 @@ void ieee80211_associate_step2(struct ieee80211_device *ieee)
//dev_kfree_skb_any(skb);//edit by thomas
}
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
void ieee80211_associate_complete_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
-#else
-void ieee80211_associate_complete_wq(struct ieee80211_device *ieee)
-{
-#endif
printk(KERN_INFO "Associated successfully\n");
ieee->is_roaming = false;
if(ieee80211_is_54g(ieee->current_network) &&
@@ -1606,21 +1541,12 @@ void ieee80211_associate_complete(struct ieee80211_device *ieee)
}
#endif
//ieee->UpdateHalRATRTableHandler(dev, ieee->dot11HTOperationalRateSet);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
queue_work(ieee->wq, &ieee->associate_complete_wq);
-#else
- schedule_task(&ieee->associate_complete_wq);
-#endif
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
void ieee80211_associate_procedure_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq);
-#else
-void ieee80211_associate_procedure_wq(struct ieee80211_device *ieee)
-{
-#endif
ieee->sync_scan_hurryup = 1;
#ifdef ENABLE_IPS
if(ieee->ieee80211_ips_leave != NULL)
@@ -1734,11 +1660,7 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
}
ieee->state = IEEE80211_ASSOCIATING;
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
queue_work(ieee->wq, &ieee->associate_procedure_wq);
-#else
- schedule_task(&ieee->associate_procedure_wq);
-#endif
}else{
if(ieee80211_is_54g(ieee->current_network) &&
(ieee->modulation & IEEE80211_OFDM_MODULATION)){
@@ -2332,11 +2254,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
"Association response status code 0x%x\n",
errcode);
if(ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT) {
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
queue_work(ieee->wq, &ieee->associate_procedure_wq);
-#else
- schedule_task(&ieee->associate_procedure_wq);
-#endif
} else {
ieee80211_associate_abort(ieee);
}
@@ -2446,11 +2364,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
// notify_wx_assoc_event(ieee);
//HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
RemovePeerTS(ieee, header->addr2);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
queue_work(ieee->wq, &ieee->associate_procedure_wq);
-#else
- schedule_task(&ieee->associate_procedure_wq);
-#endif
}
break;
case IEEE80211_STYPE_MANAGE_ACT:
@@ -2687,16 +2601,11 @@ void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
netif_carrier_on(ieee->dev);
}
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
void ieee80211_start_ibss_wq(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work, work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
-#else
-void ieee80211_start_ibss_wq(struct ieee80211_device *ieee)
-{
-#endif
/* iwconfig mode ad-hoc will schedule this and return
* on the other hand this will block further iwconfig SET
* operations because of the wx_sem hold.
@@ -2807,11 +2716,7 @@ void ieee80211_start_ibss_wq(struct ieee80211_device *ieee)
inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
{
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
queue_delayed_work(ieee->wq, &ieee->start_ibss_wq, 150);
-#else
- schedule_task(&ieee->start_ibss_wq);
-#endif
}
/* this is called only in user context, with wx_sem held */
@@ -2873,22 +2778,22 @@ void ieee80211_disassociate(struct ieee80211_device *ieee)
if(IS_DOT11D_ENABLE(ieee))
Dot11d_Reset(ieee);
#endif
- ieee->state = IEEE80211_NOLINK;
ieee->is_set_key = false;
ieee->link_change(ieee->dev);
//HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- notify_wx_assoc_event(ieee);
+ if (ieee->state == IEEE80211_LINKED ||
+ ieee->state == IEEE80211_ASSOCIATING) {
+ ieee->state = IEEE80211_NOLINK;
+ notify_wx_assoc_event(ieee);
+ }
+
+ ieee->state = IEEE80211_NOLINK;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
void ieee80211_associate_retry_wq(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work, work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
-#else
-void ieee80211_associate_retry_wq(struct ieee80211_device *ieee)
-{
-#endif
unsigned long flags;
down(&ieee->wx_sem);
@@ -2990,10 +2895,8 @@ void ieee80211_stop_protocol(struct ieee80211_device *ieee, u8 shutdown)
ieee80211_stop_send_beacons(ieee);
del_timer_sync(&ieee->associate_timer);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
cancel_delayed_work(&ieee->associate_retry_wq);
cancel_delayed_work(&ieee->start_ibss_wq);
-#endif
ieee80211_stop_scan(ieee);
ieee80211_disassociate(ieee);
@@ -3114,11 +3017,6 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->sta_edca_param[3] = 0x002F3262;
ieee->aggregation = true;
ieee->enable_rx_imm_BA = 1;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- init_timer(&ieee->scan_timer);
- ieee->scan_timer.data = (unsigned long)ieee;
- ieee->scan_timer.function = ieee80211_softmac_scan_cb;
-#endif
ieee->tx_pending.txb = NULL;
init_timer(&ieee->associate_timer);
@@ -3129,16 +3027,12 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->beacon_timer.data = (unsigned long) ieee;
ieee->beacon_timer.function = ieee80211_send_beacon_cb;
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
#ifdef PF_SYNCTHREAD
ieee->wq = create_workqueue(DRV_NAME,0);
#else
ieee->wq = create_workqueue(DRV_NAME);
#endif
-#endif
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
INIT_DELAYED_WORK(&ieee->start_ibss_wq,ieee80211_start_ibss_wq);
INIT_WORK(&ieee->associate_complete_wq, ieee80211_associate_complete_wq);
INIT_WORK(&ieee->associate_procedure_wq, ieee80211_associate_procedure_wq);
@@ -3146,23 +3040,6 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
INIT_DELAYED_WORK(&ieee->associate_retry_wq, ieee80211_associate_retry_wq);
INIT_WORK(&ieee->wx_sync_scan_wq,ieee80211_wx_sync_scan_wq);
-#else
- INIT_WORK(&ieee->start_ibss_wq,(void(*)(void*)) ieee80211_start_ibss_wq,ieee);
- INIT_WORK(&ieee->associate_retry_wq,(void(*)(void*)) ieee80211_associate_retry_wq,ieee);
- INIT_WORK(&ieee->associate_complete_wq,(void(*)(void*)) ieee80211_associate_complete_wq,ieee);
- INIT_WORK(&ieee->associate_procedure_wq,(void(*)(void*)) ieee80211_associate_procedure_wq,ieee);
- INIT_WORK(&ieee->softmac_scan_wq,(void(*)(void*)) ieee80211_softmac_scan_wq,ieee);
- INIT_WORK(&ieee->wx_sync_scan_wq,(void(*)(void*)) ieee80211_wx_sync_scan_wq,ieee);
-#endif
-
-#else
- tq_init(&ieee->start_ibss_wq,(void(*)(void*)) ieee80211_start_ibss_wq,ieee);
- tq_init(&ieee->associate_retry_wq,(void(*)(void*)) ieee80211_associate_retry_wq,ieee);
- tq_init(&ieee->associate_complete_wq,(void(*)(void*)) ieee80211_associate_complete_wq,ieee);
- tq_init(&ieee->associate_procedure_wq,(void(*)(void*)) ieee80211_associate_procedure_wq,ieee);
- tq_init(&ieee->softmac_scan_wq,(void(*)(void*)) ieee80211_softmac_scan_wq,ieee);
- tq_init(&ieee->wx_sync_scan_wq,(void(*)(void*)) ieee80211_wx_sync_scan_wq,ieee);
-#endif
sema_init(&ieee->wx_sem, 1);
sema_init(&ieee->scan_sem, 1);
#ifdef ENABLE_IPS
@@ -3189,10 +3066,8 @@ void ieee80211_softmac_free(struct ieee80211_device *ieee)
#endif
del_timer_sync(&ieee->associate_timer);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
cancel_delayed_work(&ieee->associate_retry_wq);
destroy_workqueue(ieee->wq);
-#endif
up(&ieee->wx_sem);
}
@@ -3647,49 +3522,3 @@ void notify_wx_assoc_event(struct ieee80211_device *ieee)
memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
wireless_send_event(ieee->dev, SIOCGIWAP, &wrqu, NULL);
}
-
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
-//EXPORT_SYMBOL(ieee80211_get_beacon);
-//EXPORT_SYMBOL(ieee80211_rtl_wake_queue);
-//EXPORT_SYMBOL(ieee80211_rtl_stop_queue);
-//EXPORT_SYMBOL(ieee80211_reset_queue);
-//EXPORT_SYMBOL(ieee80211_softmac_stop_protocol);
-//EXPORT_SYMBOL(ieee80211_softmac_start_protocol);
-//EXPORT_SYMBOL(ieee80211_is_shortslot);
-//EXPORT_SYMBOL(ieee80211_is_54g);
-//EXPORT_SYMBOL(ieee80211_wpa_supplicant_ioctl);
-//EXPORT_SYMBOL(ieee80211_ps_tx_ack);
-//EXPORT_SYMBOL(ieee80211_softmac_xmit);
-//EXPORT_SYMBOL(ieee80211_stop_send_beacons);
-//EXPORT_SYMBOL(notify_wx_assoc_event);
-//EXPORT_SYMBOL(SendDisassociation);
-//EXPORT_SYMBOL(ieee80211_disassociate);
-//EXPORT_SYMBOL(ieee80211_start_send_beacons);
-//EXPORT_SYMBOL(ieee80211_stop_scan);
-//EXPORT_SYMBOL(ieee80211_send_probe_requests);
-//EXPORT_SYMBOL(ieee80211_softmac_scan_syncro);
-//EXPORT_SYMBOL(ieee80211_start_scan_syncro);
-#else
-EXPORT_SYMBOL_NOVERS(ieee80211_get_beacon);
-EXPORT_SYMBOL_NOVERS(ieee80211_rtl_wake_queue);
-EXPORT_SYMBOL_NOVERS(ieee80211_rtl_stop_queue);
-EXPORT_SYMBOL_NOVERS(ieee80211_reset_queue);
-EXPORT_SYMBOL_NOVERS(ieee80211_softmac_stop_protocol);
-EXPORT_SYMBOL_NOVERS(ieee80211_softmac_start_protocol);
-EXPORT_SYMBOL_NOVERS(ieee80211_is_shortslot);
-EXPORT_SYMBOL_NOVERS(ieee80211_is_54g);
-EXPORT_SYMBOL_NOVERS(ieee80211_wpa_supplicant_ioctl);
-EXPORT_SYMBOL_NOVERS(ieee80211_ps_tx_ack);
-EXPORT_SYMBOL_NOVERS(ieee80211_softmac_xmit);
-EXPORT_SYMBOL_NOVERS(ieee80211_stop_send_beacons);
-EXPORT_SYMBOL_NOVERS(notify_wx_assoc_event);
-EXPORT_SYMBOL_NOVERS(SendDisassociation);
-EXPORT_SYMBOL_NOVERS(ieee80211_disassociate);
-EXPORT_SYMBOL_NOVERS(ieee80211_start_send_beacons);
-EXPORT_SYMBOL_NOVERS(ieee80211_stop_scan);
-EXPORT_SYMBOL_NOVERS(ieee80211_send_probe_requests);
-EXPORT_SYMBOL_NOVERS(ieee80211_softmac_scan_syncro);
-EXPORT_SYMBOL_NOVERS(ieee80211_start_scan_syncro);
-EXPORT_SYMBOL_NOVERS(ieee80211_sta_ps_send_null_frame);
-EXPORT_SYMBOL_NOVERS(ieee80211_sta_ps_send_pspoll_frame);
-#endif
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c
index 1bbd49f1d6f..d0a10807f7f 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c
@@ -312,14 +312,9 @@ out:
return 0;
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
void ieee80211_wx_sync_scan_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wx_sync_scan_wq);
-#else
-void ieee80211_wx_sync_scan_wq(struct ieee80211_device *ieee)
-{
-#endif
short chan;
HT_EXTCHNL_OFFSET chan_offset=0;
HT_CHANNEL_WIDTH bandwidth=0;
@@ -337,8 +332,6 @@ void ieee80211_wx_sync_scan_wq(struct ieee80211_device *ieee)
ieee80211_sta_ps_send_null_frame(ieee, 1);
#endif
- netif_carrier_off(ieee->dev);
-
if (ieee->data_hard_stop)
ieee->data_hard_stop(ieee->dev);
@@ -389,7 +382,6 @@ void ieee80211_wx_sync_scan_wq(struct ieee80211_device *ieee)
if(ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER)
ieee80211_start_send_beacons(ieee);
- netif_carrier_on(ieee->dev);
count = 0;
up(&ieee->wx_sem);
@@ -408,11 +400,7 @@ int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info
}
if ( ieee->state == IEEE80211_LINKED){
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
queue_work(ieee->wq, &ieee->wx_sync_scan_wq);
-#else
- schedule_task(&ieee->wx_sync_scan_wq);
-#endif
/* intentionally forget to up sem */
return 0;
}
@@ -459,29 +447,8 @@ int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
if (wrqu->essid.flags && wrqu->essid.length) {
//first flush current network.ssid
len = ((wrqu->essid.length-1) < IW_ESSID_MAX_SIZE) ? (wrqu->essid.length-1) : IW_ESSID_MAX_SIZE;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
- strncpy(ieee->current_network.ssid, extra, len);
- ieee->current_network.ssid_len = len;
-#if 0
- {
- int i;
- for (i=0; i<len; i++)
- printk("%c ", extra[i]);
- printk("\n");
- }
-#endif
-#else
strncpy(ieee->current_network.ssid, extra, len+1);
ieee->current_network.ssid_len = len+1;
-#if 0
- {
- int i;
- for (i=0; i<len + 1; i++)
- printk("%c ", extra[i]);
- printk("\n");
- }
-#endif
-#endif
ieee->ssid_set = 1;
}
else{
@@ -659,42 +626,4 @@ exit:
return ret;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
-//EXPORT_SYMBOL(ieee80211_wx_get_essid);
-//EXPORT_SYMBOL(ieee80211_wx_set_essid);
-//EXPORT_SYMBOL(ieee80211_wx_set_rate);
-//EXPORT_SYMBOL(ieee80211_wx_get_rate);
-//EXPORT_SYMBOL(ieee80211_wx_set_wap);
-//EXPORT_SYMBOL(ieee80211_wx_get_wap);
-//EXPORT_SYMBOL(ieee80211_wx_set_mode);
-//EXPORT_SYMBOL(ieee80211_wx_get_mode);
-//EXPORT_SYMBOL(ieee80211_wx_set_scan);
-//EXPORT_SYMBOL(ieee80211_wx_get_freq);
-//EXPORT_SYMBOL(ieee80211_wx_set_freq);
-//EXPORT_SYMBOL(ieee80211_wx_set_rawtx);
-//EXPORT_SYMBOL(ieee80211_wx_get_name);
-//EXPORT_SYMBOL(ieee80211_wx_set_power);
-//EXPORT_SYMBOL(ieee80211_wx_get_power);
-//EXPORT_SYMBOL(ieee80211_wlan_frequencies);
-//EXPORT_SYMBOL(ieee80211_wx_set_rts);
-//EXPORT_SYMBOL(ieee80211_wx_get_rts);
-#else
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_get_essid);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_essid);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_rate);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_get_rate);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_wap);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_get_wap);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_mode);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_get_mode);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_scan);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_get_freq);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_freq);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_rawtx);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_get_name);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_power);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_get_power);
-EXPORT_SYMBOL_NOVERS(ieee80211_wlan_frequencies);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_rts);
-EXPORT_SYMBOL_NOVERS(ieee80211_wx_get_rts);
-#endif
+
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
index a75f3668a40..dd8a221e21a 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
@@ -286,12 +286,7 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
if (eth->h_proto != htons(ETH_P_IP))
return 0;
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
ip = ip_hdr(skb);
-#else
- ip = (struct iphdr*)(skb->data + sizeof(struct ether_header));
-#endif
switch (ip->tos & 0xfc) {
case 0x20:
return 2;
@@ -613,11 +608,7 @@ void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u
int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
struct ieee80211_device *ieee = netdev_priv(dev);
-#else
- struct ieee80211_device *ieee = (struct ieee80211_device *)dev->priv;
-#endif
struct ieee80211_txb *txb = NULL;
struct ieee80211_hdr_3addrqos *frag_hdr;
int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
index 4971b1c8e7d..b74491c38ec 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
@@ -54,25 +54,7 @@ struct modes_unit ieee80211_modes[] = {
{"N-5G",4},
};
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,20)) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-static inline char *
-iwe_stream_add_event_rsl(char * stream, /* Stream of events */
- char * ends, /* End of stream */
- struct iw_event *iwe, /* Payload */
- int event_len) /* Real size of payload */
-{
- /* Check if it's possible */
- if((stream + event_len) < ends) {
- iwe->len = event_len;
- ndelay(1); //new
- memcpy(stream, (char *) iwe, event_len);
- stream += event_len;
- }
- return stream;
-}
-#else
#define iwe_stream_add_event_rsl iwe_stream_add_event
-#endif
#define MAX_CUSTOM_LEN 64
static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
@@ -93,11 +75,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_ADDR_LEN);
-#else
- start = iwe_stream_add_event_rsl(start, stop, &iwe, IW_EV_ADDR_LEN);
-#endif
/* Remaining entries will be displayed in the order we provide them */
/* Add the ESSID */
@@ -106,22 +84,14 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
// if (network->flags & NETWORK_EMPTY_ESSID) {
if (network->ssid_len == 0) {
iwe.u.data.length = sizeof("<hidden>");
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_point(info, start, stop, &iwe, "<hidden>");
-#else
- start = iwe_stream_add_point(start, stop, &iwe, "<hidden>");
-#endif
} else {
iwe.u.data.length = min(network->ssid_len, (u8)32);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
-#else
- start = iwe_stream_add_point(start, stop, &iwe, network->ssid);
-#endif
}
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
- for(i=0; i<(sizeof(ieee80211_modes)/sizeof(ieee80211_modes[0])); i++) {
+ for(i=0; i<ARRAY_SIZE(ieee80211_modes); i++) {
if(network->mode&(1<<i)) {
sprintf(pname,ieee80211_modes[i].mode_string,ieee80211_modes[i].mode_size);
pname +=ieee80211_modes[i].mode_size;
@@ -129,11 +99,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
}
*pname = '\0';
snprintf(iwe.u.name, IFNAMSIZ, "IEEE802.11%s", proto_name);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_CHAR_LEN);
-#else
- start = iwe_stream_add_event_rsl(start, stop, &iwe, IW_EV_CHAR_LEN);
-#endif
/* Add mode */
iwe.cmd = SIOCGIWMODE;
if (network->capability &
@@ -142,11 +108,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
iwe.u.mode = IW_MODE_MASTER;
else
iwe.u.mode = IW_MODE_ADHOC;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_UINT_LEN);
-#else
- start = iwe_stream_add_event_rsl(start, stop, &iwe, IW_EV_UINT_LEN);
-#endif
}
/* Add frequency/channel */
@@ -156,11 +118,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
iwe.u.freq.m = network->channel;
iwe.u.freq.e = 0;
iwe.u.freq.i = 0;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_FREQ_LEN);
-#else
- start = iwe_stream_add_event_rsl(start, stop, &iwe, IW_EV_FREQ_LEN);
-#endif
/* Add encryption capability */
iwe.cmd = SIOCGIWENCODE;
if (network->capability & WLAN_CAPABILITY_PRIVACY)
@@ -168,11 +126,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
iwe.u.data.length = 0;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
-#else
- start = iwe_stream_add_point(start, stop, &iwe, network->ssid);
-#endif
/* Add basic and extended rates */
max_rate = 0;
p = custom;
@@ -216,33 +170,15 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
if (rate > max_rate)
max_rate = rate;
}
-#if 0
- printk("max rate:%d ===basic rate:\n", max_rate);
- for (i=0;i<network->rates_len;i++)
- printk(" %x", network->rates[i]);
- printk("\n=======extend rate\n");
- for (i=0; i<network->rates_ex_len; i++)
- printk(" %x", network->rates_ex[i]);
- printk("\n");
-#endif
iwe.cmd = SIOCGIWRATE;
iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
iwe.u.bitrate.value = max_rate * 500000;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_event_rsl(info, start, stop, &iwe,
IW_EV_PARAM_LEN);
-#else
- start = iwe_stream_add_event_rsl(start, stop, &iwe,
- IW_EV_PARAM_LEN);
-#endif
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
-#else
- start = iwe_stream_add_point(start, stop, &iwe, custom);
-#endif
/* Add quality statistics */
/* TODO: Fix these values... */
iwe.cmd = IWEVQUAL;
@@ -257,21 +193,13 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL))
iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID;
iwe.u.qual.updated = 7;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_QUAL_LEN);
-#else
- start = iwe_stream_add_event_rsl(start, stop, &iwe, IW_EV_QUAL_LEN);
-#endif
iwe.cmd = IWEVCUSTOM;
p = custom;
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
-#else
- start = iwe_stream_add_point(start, stop, &iwe, custom);
-#endif
#if (WIRELESS_EXT < 18)
if (ieee->wpa_enabled && network->wpa_ie_len){
char buf[MAX_WPA_IE_LEN * 2 + 30];
@@ -285,11 +213,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = strlen(buf);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
-#else
- start = iwe_stream_add_point(start, stop, &iwe, buf);
-#endif
}
if (ieee->wpa_enabled && network->rsn_ie_len){
@@ -304,11 +228,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = strlen(buf);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
-#else
- start = iwe_stream_add_point(start, stop, &iwe, buf);
-#endif
}
#else
memset(&iwe, 0, sizeof(iwe));
@@ -318,11 +238,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
memcpy(buf, network->wpa_ie, network->wpa_ie_len);
iwe.cmd = IWEVGENIE;
iwe.u.data.length = network->wpa_ie_len;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
-#else
- start = iwe_stream_add_point(start, stop, &iwe, buf);
-#endif
}
memset(&iwe, 0, sizeof(iwe));
if (network->rsn_ie_len)
@@ -331,11 +247,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
memcpy(buf, network->rsn_ie, network->rsn_ie_len);
iwe.cmd = IWEVGENIE;
iwe.u.data.length = network->rsn_ie_len;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
-#else
- start = iwe_stream_add_point(start, stop, &iwe, buf);
-#endif
}
#endif
@@ -348,11 +260,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
" Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100));
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
-#else
- start = iwe_stream_add_point(start, stop, &iwe, custom);
-#endif
return start;
}
@@ -632,7 +540,6 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
struct net_device *dev = ieee->dev;
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
@@ -807,7 +714,6 @@ done:
IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name);
return -EINVAL;
}
-#endif
return ret;
}
@@ -870,7 +776,6 @@ int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
struct iw_mlme *mlme = (struct iw_mlme *) extra;
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
@@ -880,7 +785,6 @@ int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
default:
return -EOPNOTSUPP;
}
-#endif
return 0;
}
@@ -888,7 +792,6 @@ int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
struct iw_request_info *info,
struct iw_param *data, char *extra)
{
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
switch (data->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
/*need to support wpa2 here*/
@@ -946,23 +849,12 @@ int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
default:
return -EOPNOTSUPP;
}
-#endif
return 0;
}
#endif
#if 1
int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
{
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
-#if 0
- printk("====>%s()\n", __FUNCTION__);
- {
- int i;
- for (i=0; i<len; i++)
- printk("%2x ", ie[i]&0xff);
- printk("\n");
- }
-#endif
u8 *buf;
if (len>MAX_WPA_IE_LEN || (len && ie == NULL))
@@ -992,29 +884,7 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
ieee->wpa_ie = NULL;
ieee->wpa_ie_len = 0;
}
-#endif
return 0;
}
#endif
-
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
-//EXPORT_SYMBOL(ieee80211_wx_set_gen_ie);
-#if (WIRELESS_EXT >= 18)
-//EXPORT_SYMBOL(ieee80211_wx_set_mlme);
-//EXPORT_SYMBOL(ieee80211_wx_set_auth);
-//EXPORT_SYMBOL(ieee80211_wx_set_encode_ext);
-//EXPORT_SYMBOL(ieee80211_wx_get_encode_ext);
-#endif
-//EXPORT_SYMBOL(ieee80211_wx_get_scan);
-//EXPORT_SYMBOL(ieee80211_wx_set_encode);
-//EXPORT_SYMBOL(ieee80211_wx_get_encode);
-#else
-//EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_gen_ie);
-//EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_mlme);
-//EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_auth);
-//EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_encode_ext);
-//EXPORT_SYMBOL_NOVERS(ieee80211_wx_get_scan);
-//EXPORT_SYMBOL_NOVERS(ieee80211_wx_set_encode);
-//EXPORT_SYMBOL_NOVERS(ieee80211_wx_get_encode);
-#endif
diff --git a/drivers/staging/rtl8192e/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192e/ieee80211/rtl819x_HTProc.c
index 4c4b1df350a..b0c9c78eca4 100644
--- a/drivers/staging/rtl8192e/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/ieee80211/rtl819x_HTProc.c
@@ -1024,17 +1024,6 @@ u8 HTFilterMCSRate( struct ieee80211_device* ieee, u8* pSupportMCS, u8* pOperate
return true;
}
void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
-#if 0
-//I need move this function to other places, such as rx?
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
-void HTOnAssocRsp_wq(struct work_struct *work)
-{
- struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, ht_onAssRsp);
-#else
-void HTOnAssocRsp_wq(struct ieee80211_device *ieee)
-{
-#endif
-#endif
void HTOnAssocRsp(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -1760,9 +1749,3 @@ void HTSetConnectBwModeCallback(struct ieee80211_device* ieee)
pHTInfo->bSwBwInProgress = false;
}
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-//EXPORT_SYMBOL_NOVERS(HTUpdateSelfAndPeerSetting);
-#else
-//EXPORT_SYMBOL(HTUpdateSelfAndPeerSetting);
-#endif
diff --git a/drivers/staging/rtl8192e/r8190_rtl8256.c b/drivers/staging/rtl8192e/r8190_rtl8256.c
index 7391f5f8f25..8bd5b173a7d 100644
--- a/drivers/staging/rtl8192e/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192e/r8190_rtl8256.c
@@ -501,13 +501,13 @@ SetRFPowerState8190(
if((priv->ieee80211->eRFPowerState == eRfOff) && RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC))
{ // The current RF state is OFF and the RF OFF level is halting the NIC, re-initialize the NIC.
bool rtstatus = true;
- u32 InitilizeCount = 3;
+ u32 InitializeCount = 3;
do
{
- InitilizeCount--;
+ InitializeCount--;
priv->RegRfOff = false;
rtstatus = NicIFEnableNIC(dev);
- }while( (rtstatus != true) &&(InitilizeCount >0) );
+ }while( (rtstatus != true) &&(InitializeCount >0) );
if(rtstatus != true)
{
diff --git a/drivers/staging/rtl8192e/r8192E.h b/drivers/staging/rtl8192e/r8192E.h
index f4be9cc1100..865cdc00897 100644
--- a/drivers/staging/rtl8192e/r8192E.h
+++ b/drivers/staging/rtl8192e/r8192E.h
@@ -1468,7 +1468,6 @@ typedef union _AC_PARAM{
#endif
bool init_firmware(struct net_device *dev);
-void rtl819xE_tx_cmd(struct net_device *dev, struct sk_buff *skb);
short rtl8192_tx(struct net_device *dev, struct sk_buff* skb);
u32 read_cam(struct net_device *dev, u8 addr);
void write_cam(struct net_device *dev, u8 addr, u32 data);
@@ -1503,10 +1502,9 @@ void write_phy_ofdm(struct net_device *dev, u8 adr, u32 data);
void rtl8185_tx_antenna(struct net_device *dev, u8 ant);
void rtl8187_set_rxconf(struct net_device *dev);
//short check_nic_enough_desc(struct net_device *dev, priority_t priority);
-void rtl8192_start_beacon(struct net_device *dev);
void CamResetAllEntry(struct net_device* dev);
void EnableHWSecurityConfig8192(struct net_device *dev);
-void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType, u8 *MacAddr, u8 DefaultKey, u32 *KeyContent );
+void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType, const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent );
void CamPrintDbgReg(struct net_device* dev);
extern void dm_cck_txpower_adjust(struct net_device *dev,bool binch14);
extern void firmware_init_param(struct net_device *dev);
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index eb41402d1d3..17a806f9ee7 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -92,12 +92,8 @@ u32 rt_global_debug_component = \
// COMP_POWER_TRACKING |
// COMP_INTR |
COMP_ERR ; //always open err flags on
-#ifndef PCI_DEVICE
-#define PCI_DEVICE(vend,dev)\
- .vendor=(vend),.device=(dev),\
- .subvendor=PCI_ANY_ID,.subdevice=PCI_ANY_ID
-#endif
-static struct pci_device_id rtl8192_pci_id_tbl[] __devinitdata = {
+
+static const struct pci_device_id rtl8192_pci_id_tbl[] __devinitdata = {
#ifdef RTL8190P
/* Realtek */
/* Dlink */
@@ -116,7 +112,7 @@ static struct pci_device_id rtl8192_pci_id_tbl[] __devinitdata = {
{}
};
-static char* ifname = "wlan%d";
+static char ifname[IFNAMSIZ] = "wlan%d";
static int hwwep = 1; //default use hw. set 0 to use software security
static int channels = 0x3fff;
@@ -127,7 +123,7 @@ MODULE_DEVICE_TABLE(pci, rtl8192_pci_id_tbl);
MODULE_DESCRIPTION("Linux driver for Realtek RTL819x WiFi cards");
-module_param(ifname, charp, S_IRUGO|S_IWUSR );
+module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO|S_IWUSR);
//module_param(hwseqnum,int, S_IRUGO|S_IWUSR);
module_param(hwwep,int, S_IRUGO|S_IWUSR);
module_param(channels,int, S_IRUGO|S_IWUSR);
@@ -155,6 +151,16 @@ static struct pci_driver rtl8192_pci_driver = {
#endif
};
+static void rtl8192_start_beacon(struct net_device *dev);
+static void rtl8192_stop_beacon(struct net_device *dev);
+static void rtl819x_watchdog_wqcallback(struct work_struct *work);
+static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv);
+static void rtl8192_irq_tx_tasklet(struct r8192_priv *priv);
+static void rtl8192_prepare_beacon(struct r8192_priv *priv);
+static irqreturn_t rtl8192_interrupt(int irq, void *netdev);
+static void rtl8192_try_wake_queue(struct net_device *dev, int pri);
+static void rtl819xE_tx_cmd(struct net_device *dev, struct sk_buff *skb);
+
#ifdef ENABLE_DOT11D
typedef struct _CHANNEL_LIST
@@ -163,7 +169,7 @@ typedef struct _CHANNEL_LIST
u8 Len;
}CHANNEL_LIST, *PCHANNEL_LIST;
-static CHANNEL_LIST ChannelPlan[] = {
+static const CHANNEL_LIST ChannelPlan[] = {
{{1,2,3,4,5,6,7,8,9,10,11,36,40,44,48,52,56,60,64,149,153,157,161,165},24}, //FCC
{{1,2,3,4,5,6,7,8,9,10,11},11}, //IC
{{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //ETSI
@@ -349,8 +355,8 @@ u8 rtl8192e_ap_sec_type(struct ieee80211_device *ieee)
//struct r8192_priv* priv = ieee80211_priv(dev);
//struct ieee80211_device *ieee = priv->ieee80211;
- static u8 ccmp_ie[4] = {0x00,0x50,0xf2,0x04};
- static u8 ccmp_rsn_ie[4] = {0x00, 0x0f, 0xac, 0x04};
+ static const u8 ccmp_ie[4] = {0x00,0x50,0xf2,0x04};
+ static const u8 ccmp_rsn_ie[4] = {0x00, 0x0f, 0xac, 0x04};
int wpa_ie_len= ieee->wpa_ie_len;
struct ieee80211_crypt_data* crypt;
int encrypt;
@@ -487,15 +493,13 @@ rtl8192e_SetHwReg(struct net_device *dev,u8 variable,u8* val)
/* this might still called in what was the PHY rtl8185/rtl8192 common code
* plans are to possibilty turn it again in one common code...
*/
-inline void force_pci_posting(struct net_device *dev)
+void force_pci_posting(struct net_device *dev)
{
}
//warning message WB
-irqreturn_t rtl8192_interrupt(int irq, void *netdev);
//static struct net_device_stats *rtl8192_stats(struct net_device *dev);
-void rtl8192_commit(struct net_device *dev);
//void rtl8192_restart(struct net_device *dev);
void rtl8192_restart(struct work_struct *work);
//void rtl8192_rq_tx_ack(struct work_struct *work);
@@ -940,7 +944,7 @@ void rtl8192_rx_enable(struct net_device *dev)
* HIGH_QUEUE ===> 7
* BEACON_QUEUE ===> 8
* */
-static u32 TX_DESC_BASE[] = {BKQDA, BEQDA, VIQDA, VOQDA, HCCAQDA, CQDA, MQDA, HQDA, BQDA};
+static const u32 TX_DESC_BASE[] = {BKQDA, BEQDA, VIQDA, VOQDA, HCCAQDA, CQDA, MQDA, HQDA, BQDA};
void rtl8192_tx_enable(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -1116,7 +1120,7 @@ static void rtl8192_reset(struct net_device *dev)
}
#endif
-static u16 rtl_rate[] = {10,20,55,110,60,90,120,180,240,360,480,540};
+static const u16 rtl_rate[] = {10,20,55,110,60,90,120,180,240,360,480,540};
inline u16 rtl8192_rate2rate(short rate)
{
if (rate >11) return 0;
@@ -1252,8 +1256,6 @@ static int rtl8192_hard_start_xmit(struct sk_buff *skb,struct net_device *dev)
}
-void rtl8192_try_wake_queue(struct net_device *dev, int pri);
-
static void rtl8192_tx_isr(struct net_device *dev, int prio)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -1733,11 +1735,6 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff* skb)
pdesc->NoEnc = 1;
pdesc->SecType = 0x0;
if (tcb_desc->bHwSec) {
- static u8 tmp =0;
- if (!tmp) {
- printk("==>================hw sec\n");
- tmp = 1;
- }
switch (priv->ieee80211->pairwise_key_type) {
case KEY_TYPE_WEP40:
case KEY_TYPE_WEP104:
@@ -1988,7 +1985,7 @@ static void rtl8192_update_beacon(struct work_struct * work)
/*
* background support to run QoS activate functionality
*/
-static int WDCAPARA_ADD[] = {EDCAPARA_BE,EDCAPARA_BK,EDCAPARA_VI,EDCAPARA_VO};
+static const int WDCAPARA_ADD[] = {EDCAPARA_BE,EDCAPARA_BK,EDCAPARA_VI,EDCAPARA_VO};
static void rtl8192_qos_activate(struct work_struct * work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, qos_activate);
@@ -2646,11 +2643,6 @@ static void rtl8192_init_priv_lock(struct r8192_priv* priv)
mutex_init(&priv->mutex);
}
-extern void rtl819x_watchdog_wqcallback(struct work_struct *work);
-
-void rtl8192_irq_rx_tasklet(struct r8192_priv *priv);
-void rtl8192_irq_tx_tasklet(struct r8192_priv *priv);
-void rtl8192_prepare_beacon(struct r8192_priv *priv);
//init tasklet and wait_queue here. only 2.6 above kernel is considered
#define DRV_NAME "wlan0"
static void rtl8192_init_priv_task(struct net_device* dev)
@@ -3807,7 +3799,7 @@ static RT_STATUS rtl8192_adapter_start(struct net_device *dev)
}
-void rtl8192_prepare_beacon(struct r8192_priv *priv)
+static void rtl8192_prepare_beacon(struct r8192_priv *priv)
{
struct sk_buff *skb;
//unsigned long flags;
@@ -3837,7 +3829,7 @@ void rtl8192_prepare_beacon(struct r8192_priv *priv)
* rtl8192_beacon_tx_enable(). rtl8192_beacon_tx_disable() might
* be used to stop beacon transmission
*/
-void rtl8192_start_beacon(struct net_device *dev)
+static void rtl8192_start_beacon(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct ieee80211_network *net = &priv->ieee80211->current_network;
@@ -4124,14 +4116,14 @@ static void CamRestoreAllEntry(struct net_device *dev)
{
u8 EntryId = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
- u8* MacAddr = priv->ieee80211->current_network.bssid;
+ const u8* MacAddr = priv->ieee80211->current_network.bssid;
- static u8 CAM_CONST_ADDR[4][6] = {
+ static const u8 CAM_CONST_ADDR[4][6] = {
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x03}};
- static u8 CAM_CONST_BROAD[] =
+ static const u8 CAM_CONST_BROAD[] =
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
RT_TRACE(COMP_SEC, "CamRestoreAllEntry: \n");
@@ -4318,7 +4310,6 @@ RESET_START:
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
ieee80211_stop_scan(ieee);
- netif_carrier_off(dev);
up(&ieee->wx_sem);
}
else{
@@ -4669,7 +4660,7 @@ static void rtl819x_update_rxcounts(
}
-void rtl819x_watchdog_wqcallback(struct work_struct *work)
+static void rtl819x_watchdog_wqcallback(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work,struct delayed_work,work);
struct r8192_priv *priv = container_of(dwork,struct r8192_priv,watch_dog_wq);
@@ -6076,7 +6067,7 @@ static void rtl8192_tx_resume(struct net_device *dev)
}
}
-void rtl8192_irq_tx_tasklet(struct r8192_priv *priv)
+static void rtl8192_irq_tx_tasklet(struct r8192_priv *priv)
{
rtl8192_tx_resume(priv->ieee80211->dev);
}
@@ -6305,7 +6296,7 @@ done:
}
-void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
+static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
{
rtl8192_rx(priv->ieee80211->dev);
/* unmask RDU */
@@ -6455,7 +6446,7 @@ static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
if (dev_alloc_name(dev, ifname) < 0){
RT_TRACE(COMP_INIT, "Oops: devname already taken! Trying wlan%%d...\n");
- ifname = "wlan%d";
+ strcpy(ifname, "wlan%d");
dev_alloc_name(dev, ifname);
}
@@ -6640,7 +6631,7 @@ static void __exit rtl8192_pci_module_exit(void)
}
//warning message WB
-irqreturn_t rtl8192_interrupt(int irq, void *netdev)
+static irqreturn_t rtl8192_interrupt(int irq, void *netdev)
{
struct net_device *dev = (struct net_device *) netdev;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -6784,7 +6775,7 @@ irqreturn_t rtl8192_interrupt(int irq, void *netdev)
return IRQ_HANDLED;
}
-void rtl8192_try_wake_queue(struct net_device *dev, int pri)
+static void rtl8192_try_wake_queue(struct net_device *dev, int pri)
{
#if 0
unsigned long flags;
@@ -6847,7 +6838,7 @@ void setKey( struct net_device *dev,
u8 EntryNo,
u8 KeyIndex,
u16 KeyType,
- u8 *MacAddr,
+ const u8 *MacAddr,
u8 DefaultKey,
u32 *KeyContent )
{
diff --git a/drivers/staging/rtl8192e/r8192E_dm.c b/drivers/staging/rtl8192e/r8192E_dm.c
index a249f00da60..a5884c6bcc2 100644
--- a/drivers/staging/rtl8192e/r8192E_dm.c
+++ b/drivers/staging/rtl8192e/r8192E_dm.c
@@ -26,20 +26,20 @@ Major Change History:
// Indicate different AP vendor for IOT issue.
//
#ifdef RTL8190P
-static u32 edca_setting_DL[HT_IOT_PEER_MAX] =
+static const u32 edca_setting_DL[HT_IOT_PEER_MAX] =
{ 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0xa44f, 0x5e4322, 0x5e4322};
-static u32 edca_setting_UL[HT_IOT_PEER_MAX] =
+static const u32 edca_setting_UL[HT_IOT_PEER_MAX] =
{ 0x5e4322, 0xa44f, 0x5e4322, 0x604322, 0x5e4322, 0x5e4322, 0x5e4322};
#else
#ifdef RTL8192E
-static u32 edca_setting_DL[HT_IOT_PEER_MAX] =
+static const u32 edca_setting_DL[HT_IOT_PEER_MAX] =
{ 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0xa44f, 0x5e4322, 0x5e4322};
-static u32 edca_setting_UL[HT_IOT_PEER_MAX] =
+static const u32 edca_setting_UL[HT_IOT_PEER_MAX] =
{ 0x5e4322, 0xa44f, 0x5e4322, 0x604322, 0x5e4322, 0x5e4322, 0x5e4322};
#else
-static u32 edca_setting_DL[HT_IOT_PEER_MAX] =
+static const u32 edca_setting_DL[HT_IOT_PEER_MAX] =
{ 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0xa44f, 0x5ea44f, 0x5e4322};
-static u32 edca_setting_UL[HT_IOT_PEER_MAX] =
+static const u32 edca_setting_UL[HT_IOT_PEER_MAX] =
{ 0x5e4322, 0xa44f, 0x5e4322, 0x604322, 0x5ea44f, 0x5ea44f, 0x5e4322};
#endif
#endif
@@ -592,7 +592,7 @@ static void dm_bandwidth_autoswitch(struct net_device * dev)
//OFDM default at 0db, index=6.
#ifndef RTL8190P
-static u32 OFDMSwingTable[OFDM_Table_Length] = {
+static const u32 OFDMSwingTable[OFDM_Table_Length] = {
0x7f8001fe, // 0, +6db
0x71c001c7, // 1, +5db
0x65400195, // 2, +4db
@@ -613,7 +613,7 @@ static u32 OFDMSwingTable[OFDM_Table_Length] = {
0x12000048, // 17, -11db
0x10000040 // 18, -12db
};
-static u8 CCKSwingTable_Ch1_Ch13[CCK_Table_length][8] = {
+static const u8 CCKSwingTable_Ch1_Ch13[CCK_Table_length][8] = {
{0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, // 0, +0db ===> CCK40M default
{0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, // 1, -1db
{0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, // 2, -2db
@@ -628,7 +628,7 @@ static u8 CCKSwingTable_Ch1_Ch13[CCK_Table_length][8] = {
{0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01} // 11, -11db
};
-static u8 CCKSwingTable_Ch14[CCK_Table_length][8] = {
+static const u8 CCKSwingTable_Ch14[CCK_Table_length][8] = {
{0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, // 0, +0db ===> CCK40M default
{0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, // 1, -1db
{0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, // 2, -2db
@@ -2094,8 +2094,6 @@ static void dm_ctrl_initgain_byrssi(struct net_device *dev)
dm_ctrl_initgain_byrssi_by_fwfalse_alarm(dev);
else if(dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
dm_ctrl_initgain_byrssi_by_driverrssi(dev);
- else
- return;
}
@@ -2938,8 +2936,6 @@ void dm_gpio_change_rf_callback(struct work_struct *work)
RT_RF_POWER_STATE eRfPowerStateToSet;
bool bActuallySet = false;
- bActuallySet=false;
-
if(!priv->up)
{
RT_TRACE((COMP_INIT | COMP_POWER | COMP_RF),"dm_gpio_change_rf_callback(): Callback function breaks out!!\n");
diff --git a/drivers/staging/rtl8192e/r8192E_wx.c b/drivers/staging/rtl8192e/r8192E_wx.c
index 0b0f39ce3ce..5742cee8120 100644
--- a/drivers/staging/rtl8192e/r8192E_wx.c
+++ b/drivers/staging/rtl8192e/r8192E_wx.c
@@ -26,7 +26,7 @@
#endif
#define RATE_COUNT 12
-static u32 rtl8180_rates[] = {1000000,2000000,5500000,11000000,
+static const u32 rtl8180_rates[] = {1000000,2000000,5500000,11000000,
6000000,9000000,12000000,18000000,24000000,36000000,48000000,54000000};
@@ -137,161 +137,6 @@ static int r8192_wx_get_power(struct net_device *dev,
return ieee80211_wx_get_power(priv->ieee80211,info,wrqu,extra);
}
-#ifdef JOHN_IOCTL
-u16 read_rtl8225(struct net_device *dev, u8 addr);
-void write_rtl8225(struct net_device *dev, u8 adr, u16 data);
-u32 john_read_rtl8225(struct net_device *dev, u8 adr);
-void _write_rtl8225(struct net_device *dev, u8 adr, u16 data);
-
-static int r8192_wx_read_regs(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 addr;
- u16 data1;
-
- down(&priv->wx_sem);
-
-
- get_user(addr,(u8*)wrqu->data.pointer);
- data1 = read_rtl8225(dev, addr);
- wrqu->data.length = data1;
-
- up(&priv->wx_sem);
- return 0;
-
-}
-
-static int r8192_wx_write_regs(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 addr;
-
- down(&priv->wx_sem);
-
- get_user(addr, (u8*)wrqu->data.pointer);
- write_rtl8225(dev, addr, wrqu->data.length);
-
- up(&priv->wx_sem);
- return 0;
-
-}
-
-void rtl8187_write_phy(struct net_device *dev, u8 adr, u32 data);
-u8 rtl8187_read_phy(struct net_device *dev,u8 adr, u32 data);
-
-static int r8192_wx_read_bb(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 databb;
-#if 0
- int i;
- for(i=0;i<12;i++) printk("%8x\n", read_cam(dev, i) );
-#endif
-
- down(&priv->wx_sem);
-
- databb = rtl8187_read_phy(dev, (u8)wrqu->data.length, 0x00000000);
- wrqu->data.length = databb;
-
- up(&priv->wx_sem);
- return 0;
-}
-
-void rtl8187_write_phy(struct net_device *dev, u8 adr, u32 data);
-static int r8192_wx_write_bb(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 databb;
-
- down(&priv->wx_sem);
-
- get_user(databb, (u8*)wrqu->data.pointer);
- rtl8187_write_phy(dev, wrqu->data.length, databb);
-
- up(&priv->wx_sem);
- return 0;
-
-}
-
-
-static int r8192_wx_write_nicb(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 addr;
-
- down(&priv->wx_sem);
-
- get_user(addr, (u32*)wrqu->data.pointer);
- write_nic_byte(dev, addr, wrqu->data.length);
-
- up(&priv->wx_sem);
- return 0;
-
-}
-static int r8192_wx_read_nicb(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u32 addr;
- u16 data1;
-
- down(&priv->wx_sem);
-
- get_user(addr,(u32*)wrqu->data.pointer);
- data1 = read_nic_byte(dev, addr);
- wrqu->data.length = data1;
-
- up(&priv->wx_sem);
- return 0;
-}
-
-static int r8192_wx_get_ap_status(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
- struct ieee80211_network *target;
- int name_len;
-
- down(&priv->wx_sem);
-
- //count the length of input ssid
- for(name_len=0 ; ((char*)wrqu->data.pointer)[name_len]!='\0' ; name_len++);
-
- //search for the correspoding info which is received
- list_for_each_entry(target, &ieee->network_list, list) {
- if ( (target->ssid_len == name_len) &&
- (strncmp(target->ssid, (char*)wrqu->data.pointer, name_len)==0)){
- if(target->wpa_ie_len>0 || target->rsn_ie_len>0 )
- //set flags=1 to indicate this ap is WPA
- wrqu->data.flags = 1;
- else wrqu->data.flags = 0;
-
-
- break;
- }
- }
-
- up(&priv->wx_sem);
- return 0;
-}
-
-
-
-#endif
-
static int r8192_wx_set_rawtx(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
diff --git a/drivers/staging/rtl8192e/r819xE_phy.c b/drivers/staging/rtl8192e/r819xE_phy.c
index 7bd4fae0667..ffd1e97e27b 100644
--- a/drivers/staging/rtl8192e/r819xE_phy.c
+++ b/drivers/staging/rtl8192e/r819xE_phy.c
@@ -7,7 +7,7 @@
#ifdef ENABLE_DOT11D
#include "ieee80211/dot11d.h"
#endif
-static u32 RF_CHANNEL_TABLE_ZEBRA[] = {
+static const u32 RF_CHANNEL_TABLE_ZEBRA[] = {
0,
0x085c, //2412 1
0x08dc, //2417 2
diff --git a/drivers/staging/rtl8192su/Kconfig b/drivers/staging/rtl8192su/Kconfig
index b422ea1ecf9..27b89a43267 100644
--- a/drivers/staging/rtl8192su/Kconfig
+++ b/drivers/staging/rtl8192su/Kconfig
@@ -4,5 +4,6 @@ config RTL8192SU
select WIRELESS_EXT
select WEXT_PRIV
select EEPROM_93CX6
+ select CRYPTO
default N
---help---
diff --git a/drivers/staging/rtl8192su/TODO b/drivers/staging/rtl8192su/TODO
index 3c8da157a93..b15204ea4ec 100644
--- a/drivers/staging/rtl8192su/TODO
+++ b/drivers/staging/rtl8192su/TODO
@@ -1,4 +1,10 @@
TODO:
+- merge realteks bugfixes and new features into the driver:
+ - an updated version of this driver can be found here:
+ http://www.getnet.eu/products_GN-621U.html
+ - note:
+ realtek has stripped alomost all comments from the source,
+ so please leave all comments that may help in development in the code.
- prepare private ieee80211 stack for merge with rtl8187se's version:
- remove rtl8192su's specific dead code
- cleanup ieee80211.h
@@ -7,7 +13,6 @@ TODO:
- switch to use shared "librtl" instead of private ieee80211 stack
- switch to use LIB80211
- switch to use MAC80211
-- switch to use EEPROM_93CX6
- use kernel coding style
- checkpatch.pl fixes
- sparse fixes
diff --git a/drivers/staging/rtl8192su/ieee80211/dot11d.c b/drivers/staging/rtl8192su/ieee80211/dot11d.c
index 22484621478..6275cc75ec8 100644
--- a/drivers/staging/rtl8192su/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192su/ieee80211/dot11d.c
@@ -1,11 +1,21 @@
-//-----------------------------------------------------------------------------
-// File:
-// Dot11d.c
-//
-// Description:
-// Implement 802.11d.
-//
-//-----------------------------------------------------------------------------
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
#include "dot11d.h"
@@ -50,7 +60,6 @@ Dot11d_Reset(struct ieee80211_device *ieee)
pDot11dInfo->CountryIeLen = 0;
RESET_CIE_WATCHDOG(ieee);
- //printk("Dot11d_Reset()\n");
}
//
@@ -105,7 +114,6 @@ Dot11d_UpdateCountryIe(
pTriple = (PCHNL_TXPOWER_TRIPLE)((u8*)pTriple + 3);
}
#if 1
- //printk("Dot11d_UpdateCountryIe(): Channel List:\n");
printk("Channel List:");
for(i=1; i<= MAX_CHANNEL_NUMBER; i++)
if(pDot11dInfo->channel_map[i] > 0)
diff --git a/drivers/staging/rtl8192su/ieee80211/dot11d.h b/drivers/staging/rtl8192su/ieee80211/dot11d.h
index 913ac5d97e7..62a2c905e1f 100644
--- a/drivers/staging/rtl8192su/ieee80211/dot11d.h
+++ b/drivers/staging/rtl8192su/ieee80211/dot11d.h
@@ -1,10 +1,26 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
#ifndef __INC_DOT11D_H
#define __INC_DOT11D_H
#include "ieee80211.h"
-//#define DOT11D_MAX_CHNL_NUM 83
-
typedef struct _CHNL_TXPOWER_TRIPLE {
u8 FirstChnl;
u8 NumChnls;
@@ -18,7 +34,6 @@ typedef enum _DOT11D_STATE {
}DOT11D_STATE;
typedef struct _RT_DOT11D_INFO {
- //DECLARE_RT_OBJECT(RT_DOT11D_INFO);
bool bEnabled; // dot11MultiDomainCapabilityEnabled
@@ -28,8 +43,6 @@ typedef struct _RT_DOT11D_INFO {
u8 CountryIeWatchdog;
u8 channel_map[MAX_CHANNEL_NUMBER+1]; //!!!Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan)
- //u8 ChnlListLen; // #Bytes valid in ChnlList[].
- //u8 ChnlList[DOT11D_MAX_CHNL_NUM];
u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
DOT11D_STATE State;
@@ -95,4 +108,4 @@ int ToLegalChannel(
struct ieee80211_device * dev,
u8 channel
);
-#endif // #ifndef __INC_DOT11D_H
+#endif
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211.h b/drivers/staging/rtl8192su/ieee80211/ieee80211.h
index bcb2b12a839..1d6789db4e4 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211.h
@@ -168,6 +168,10 @@ typedef struct ieee_param {
/* QOS control */
#define IEEE80211_QCTL_TID 0x000F
+#define OUI_SUBTYPE_WMM_INFO 0
+#define OUI_SUBTYPE_WMM_PARAM 1
+#define OUI_SUBTYPE_QOS_CAPABI 5
+
/* debug macros */
#define CONFIG_IEEE80211_DEBUG
#ifdef CONFIG_IEEE80211_DEBUG
@@ -1120,11 +1124,27 @@ enum {
COUNTRY_CODE_MKK = 5,
COUNTRY_CODE_MKK1 = 6,
COUNTRY_CODE_ISRAEL = 7,
- COUNTRY_CODE_TELEC,
- COUNTRY_CODE_MIC,
- COUNTRY_CODE_GLOBAL_DOMAIN
+ COUNTRY_CODE_TELEC = 8,
+ COUNTRY_CODE_MIC = 9,
+ COUNTRY_CODE_GLOBAL_DOMAIN = 10,
+ COUNTRY_CODE_WORLD_WIDE_13 = 11,
+ COUNTRY_CODE_TELEC_NETGEAR = 12,
+ COUNTRY_CODE_MAX
};
+#define NUM_PMKID_CACHE 16
+
+typedef struct _RT_PMKID_LIST
+{
+ u8 bUsed;
+ u8 Bssid[6];
+ u8 PMKID[16];
+ u8 SsidBuf[33];
+ u8* ssid_octet;
+ u16 ssid_length;
+} RT_PMKID_LIST, *PRT_PMKID_LIST;
+
+
#include "ieee80211_r8192s.h"
struct ieee80211_device {
@@ -1134,6 +1154,7 @@ struct ieee80211_device {
/* hw security related */
u8 hwsec_active;
bool is_silent_reset;
+ bool force_mic_error;
bool is_roaming;
bool ieee_up;
bool bSupportRemoteWakeUp;
@@ -1247,6 +1268,7 @@ struct ieee80211_device {
int bcrx_sta_key; /* use individual keys to override default keys even
* with RX of broad/multicast frames */
+ RT_PMKID_LIST PMKIDList[NUM_PMKID_CACHE];
/* Fragmentation structures */
// each streaming contain a entry
struct ieee80211_frag_entry frag_cache[17][IEEE80211_FRAG_CACHE_LEN];
@@ -1295,6 +1317,10 @@ struct ieee80211_device {
*/
void *pDot11dInfo;
bool bGlobalDomain;
+
+ u8 IbssStartChnl;
+ u8 ibss_maxjoin_chal;
+
int rate; /* current rate */
int basic_rate;
//FIXME: pleace callback, see if redundant with softmac_features
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c
index 80194233943..24e7d595e3c 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c
@@ -11,7 +11,6 @@
*
*/
-//#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -201,7 +200,7 @@ static struct ieee80211_crypto_ops ieee80211_crypt_null = {
.owner = THIS_MODULE,
};
-int __init ieee80211_crypto_init(void)
+int ieee80211_crypto_init(void)
{
int ret = -ENOMEM;
@@ -221,7 +220,7 @@ out:
return ret;
}
-void __exit ieee80211_crypto_deinit(void)
+void ieee80211_crypto_deinit(void)
{
struct list_head *ptr, *n;
struct ieee80211_crypto_alg *alg = NULL;
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.h b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.h
index b58a3bcc0dc..42e52aedd29 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.h
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.h
@@ -49,7 +49,7 @@ struct ieee80211_crypto_ops {
* These can be NULL if full MSDU operations are not needed. */
int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
- void *priv);
+ void *priv, struct ieee80211_device* ieee);
int (*set_key)(void *key, int len, u8 *seq, void *priv);
int (*get_key)(void *key, int len, u8 *seq, void *priv);
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_ccmp.c
index 77de957f1b1..caee44ba3bc 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_ccmp.c
@@ -9,7 +9,6 @@
* more details.
*/
-//#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -131,7 +130,6 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
(WLAN_FC_GET_STYPE(fc) & 0x08));
*/
- // fixed by David :2006.9.6
qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
(WLAN_FC_GET_STYPE(fc) & 0x80));
aad_len = 22;
@@ -210,7 +208,6 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
pos += hdr_len;
-// mic = skb_put(skb, CCMP_MIC_LEN);
i = CCMP_PN_LEN - 1;
while (i >= 0) {
@@ -240,7 +237,6 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
u8 *e = key->tx_e;
u8 *s0 = key->tx_s0;
- //mic is moved to here by john
mic = skb_put(skb, CCMP_MIC_LEN);
ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
@@ -445,7 +441,6 @@ static char * ieee80211_ccmp_print_stats(char *p, void *priv)
void ieee80211_ccmp_null(void)
{
-// printk("============>%s()\n", __FUNCTION__);
return;
}
@@ -470,7 +465,7 @@ int __init ieee80211_crypto_ccmp_init(void)
return ieee80211_register_crypto_ops(&ieee80211_crypt_ccmp);
}
-void __exit ieee80211_crypto_ccmp_exit(void)
+void ieee80211_crypto_ccmp_exit(void)
{
ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp);
}
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_tkip.c
index ade5f6f1366..5ab94a9665e 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_tkip.c
@@ -9,7 +9,6 @@
* more details.
*/
-//#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -43,6 +42,7 @@ struct ieee80211_tkip_data {
u32 rx_iv32;
u16 rx_iv16;
+ bool initialized;
u16 rx_ttak[5];
int rx_phase1_done;
u32 rx_iv32_new;
@@ -433,8 +433,8 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (!tcb_desc->bHwSec)
{
- if (iv32 < tkey->rx_iv32 ||
- (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
+ if ((iv32 < tkey->rx_iv32 ||
+ (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16))&&tkey->initialized) {
if (net_ratelimit()) {
printk(KERN_DEBUG "TKIP: replay detected: STA=%pM"
" previous TSC %08x%04x received TSC "
@@ -444,6 +444,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
tkey->dot11RSNAStatsTKIPReplays++;
return -4;
}
+ tkey->initialized = true;
if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) {
tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32);
@@ -452,10 +453,8 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16);
plen = skb->len - hdr_len - 12;
-
+ sg_init_one(&sg, pos, plen+4);
crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- sg_init_one(&sg, pos, plen + 4);
-
if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
if (net_ratelimit()) {
printk(KERN_DEBUG ": TKIP: failed to decrypt "
@@ -571,12 +570,9 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *pri
michael_mic_hdr(skb, tkey->tx_hdr);
- // { david, 2006.9.1
- // fix the wpa process with wmm enabled.
if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
}
- // }
pos = skb_put(skb, 8);
if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
@@ -608,7 +604,7 @@ static void ieee80211_michael_mic_failure(struct net_device *dev,
}
static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
- int hdr_len, void *priv)
+ int hdr_len, void *priv, struct ieee80211_device* ieee)
{
struct ieee80211_tkip_data *tkey = priv;
u8 mic[8];
@@ -620,12 +616,9 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
return -1;
michael_mic_hdr(skb, tkey->rx_hdr);
- // { david, 2006.9.1
- // fix the wpa process with wmm enabled.
if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
}
- // }
if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
@@ -637,9 +630,14 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
"MSDU from %pM keyidx=%d\n",
skb->dev ? skb->dev->name : "N/A", hdr->addr2,
keyidx);
- if (skb->dev)
+ printk("%d, force_mic_error = %d\n", (memcmp(mic, skb->data + skb->len - 8, 8) != 0),\
+ ieee->force_mic_error);
+ if (skb->dev) {
+ printk("skb->dev != NULL\n");
ieee80211_michael_mic_failure(skb->dev, hdr, keyidx);
+ }
tkey->dot11RSNAStatsTKIPLocalMICFailures++;
+ ieee->force_mic_error = false;
return -1;
}
@@ -762,18 +760,17 @@ static struct ieee80211_crypto_ops ieee80211_crypt_tkip = {
.owner = THIS_MODULE,
};
-int __init ieee80211_crypto_tkip_init(void)
+int ieee80211_crypto_tkip_init(void)
{
return ieee80211_register_crypto_ops(&ieee80211_crypt_tkip);
}
-void __exit ieee80211_crypto_tkip_exit(void)
+void ieee80211_crypto_tkip_exit(void)
{
ieee80211_unregister_crypto_ops(&ieee80211_crypt_tkip);
}
void ieee80211_tkip_null(void)
{
-// printk("============>%s()\n", __FUNCTION__);
return;
}
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c
index a1c0a59122b..5219bfd4ea8 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c
@@ -9,7 +9,6 @@
* more details.
*/
-//#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -279,18 +278,17 @@ static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
.owner = THIS_MODULE,
};
-int __init ieee80211_crypto_wep_init(void)
+int ieee80211_crypto_wep_init(void)
{
return ieee80211_register_crypto_ops(&ieee80211_crypt_wep);
}
-void __exit ieee80211_crypto_wep_exit(void)
+void ieee80211_crypto_wep_exit(void)
{
ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
}
void ieee80211_wep_null(void)
{
-// printk("============>%s()\n", __FUNCTION__);
return;
}
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c
index a87650a517b..4945b3dbf72 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c
@@ -31,7 +31,6 @@
*******************************************************************************/
#include <linux/compiler.h>
-//#include <linux/config.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
@@ -142,7 +141,6 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
spin_lock_init(&ieee->wpax_suitlist_lock);
spin_lock_init(&ieee->bw_spinlock);
spin_lock_init(&ieee->reorder_spinlock);
- //added by WB
atomic_set(&(ieee->atm_chnlop), 0);
atomic_set(&(ieee->atm_swbw), 0);
@@ -153,7 +151,6 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
ieee->privacy_invoked = 0;
ieee->ieee802_1x = 1;
ieee->raw_tx = 0;
- //ieee->hwsec_support = 1; //default support hw security. //use module_param instead.
ieee->hwsec_active = 0; //disable hwsec, switch it on when necessary.
ieee80211_softmac_init(ieee);
@@ -196,8 +193,6 @@ void free_ieee80211(struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
int i;
- //struct list_head *p, *q;
-// del_timer_sync(&ieee->SwBwTimer);
#if 1
if (ieee->pHTInfo != NULL)
{
@@ -228,23 +223,6 @@ void free_ieee80211(struct net_device *dev)
u32 ieee80211_debug_level = 0;
static int debug = \
- // IEEE80211_DL_INFO |
- // IEEE80211_DL_WX |
- // IEEE80211_DL_SCAN |
- // IEEE80211_DL_STATE |
- // IEEE80211_DL_MGMT |
- // IEEE80211_DL_FRAG |
- // IEEE80211_DL_EAP |
- // IEEE80211_DL_DROP |
- // IEEE80211_DL_TX |
- // IEEE80211_DL_RX |
- //IEEE80211_DL_QOS |
- // IEEE80211_DL_HT |
- // IEEE80211_DL_TS |
-// IEEE80211_DL_BA |
- // IEEE80211_DL_REORDER|
-// IEEE80211_DL_TRACE |
- //IEEE80211_DL_DATA |
IEEE80211_DL_ERR //awayls open this flags to show error out
;
struct proc_dir_entry *ieee80211_proc = NULL;
@@ -282,7 +260,7 @@ static int store_debug_level(struct file *file, const char *buffer,
return strnlen(buf, count);
}
-int __init ieee80211_debug_init(void)
+int ieee80211_debug_init(void)
{
struct proc_dir_entry *e;
@@ -308,7 +286,7 @@ int __init ieee80211_debug_init(void)
return 0;
}
-void __exit ieee80211_debug_exit(void)
+void ieee80211_debug_exit(void)
{
if (ieee80211_proc) {
remove_proc_entry("debug_level", ieee80211_proc);
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h b/drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h
index 1824cda790d..7e7fbb26980 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h
@@ -339,28 +339,39 @@ enum {
};
/* Firmware related CMD IO. */
-typedef enum _FW_CMD_IO_TYPE {
- FW_CMD_DIG_ENABLE = 0, /* for DIG DM */
+typedef enum _FW_CMD_IO_TYPE{
+ FW_CMD_DIG_ENABLE = 0, /* for DIG DM */
FW_CMD_DIG_DISABLE = 1,
FW_CMD_DIG_HALT = 2,
FW_CMD_DIG_RESUME = 3,
- FW_CMD_HIGH_PWR_ENABLE = 4, /* for High Power DM */
+ FW_CMD_HIGH_PWR_ENABLE = 4, /* for DIG DM */
FW_CMD_HIGH_PWR_DISABLE = 5,
- FW_CMD_RA_RESET = 6, /* for Rate adaptive DM */
- FW_CMD_RA_ACTIVE = 7,
- FW_CMD_RA_REFRESH_N = 8,
- FW_CMD_RA_REFRESH_BG = 9,
- FW_CMD_IQK_ENABLE = 10, /* for FW supported IQK */
- FW_CMD_TXPWR_TRACK_ENABLE = 11, /* Tx power tracking switch */
- FW_CMD_TXPWR_TRACK_DISABLE = 12,/* Tx power tracking switch */
- FW_CMD_PAUSE_DM_BY_SCAN = 13,
- FW_CMD_RESUME_DM_BY_SCAN = 14,
- FW_CMD_MID_HIGH_PWR_ENABLE = 15,
+ FW_CMD_RA_RESET = 6, /* for DIG DM */
+ FW_CMD_RA_ACTIVE= 7,
+ FW_CMD_RA_REFRESH_N= 8,
+ FW_CMD_RA_REFRESH_BG= 9,
+ FW_CMD_RA_INIT= 10, /* for FW supported IQK */
+ FW_CMD_IQK_ENABLE = 11, /* Tx power tracking switch */
+ FW_CMD_TXPWR_TRACK_ENABLE = 12, /* Tx power tracking switch */
+ FW_CMD_TXPWR_TRACK_DISABLE = 13,
+ FW_CMD_TXPWR_TRACK_THERMAL = 14,
+ FW_CMD_PAUSE_DM_BY_SCAN = 15,
/* indicate firmware that driver enters LPS, for PS-Poll hardware bug */
- FW_CMD_LPS_ENTER = 16,
+ FW_CMD_RESUME_DM_BY_SCAN = 16,
/* indicate firmware that driver leave LPS */
- FW_CMD_LPS_LEAVE = 17,
-} FW_CMD_IO_TYPE;
+ FW_CMD_RA_REFRESH_N_COMB = 17,
+ FW_CMD_RA_REFRESH_BG_COMB = 18,
+ FW_CMD_ANTENNA_SW_ENABLE = 19,
+ FW_CMD_ANTENNA_SW_DISABLE = 20,
+ FW_CMD_TX_FEEDBACK_CCX_ENABLE = 21,
+ FW_CMD_LPS_ENTER = 22,
+ FW_CMD_LPS_LEAVE = 23,
+ FW_CMD_DIG_MODE_SS = 24,
+ FW_CMD_DIG_MODE_FA = 25,
+ FW_CMD_ADD_A2_ENTRY = 26,
+ FW_CMD_CTRL_DM_BY_DRIVER = 27,
+ FW_CMD_CTRL_DM_BY_DRIVER_NEW = 28,
+}FW_CMD_IO_TYPE,*PFW_CMD_IO_TYPE;
#define RT_MAX_LD_SLOT_NUM 10
struct rt_link_detect {
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
index 1f2bc7ac6f7..09a02f7e39f 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
@@ -360,7 +360,7 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device* ieee, struct sk_buff *s
hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
- res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
+ res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv,ieee);
atomic_dec(&crypt->refcnt);
if (res < 0) {
printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
index 4f1f2f08b2d..02850479dd6 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
@@ -160,7 +160,6 @@ void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
ieee->mgmt_queue_head = nh;
ieee->mgmt_queue_ring[nh] = skb;
- //return 0;
}
struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
@@ -183,19 +182,53 @@ void init_mgmt_queue(struct ieee80211_device *ieee)
ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
}
+u8
+MgntQuery_TxRateExcludeCCKRates(struct ieee80211_device *ieee)
+{
+ u16 i;
+ u8 QueryRate = 0;
+ u8 BasicRate;
+
+
+ for( i = 0; i < ieee->current_network.rates_len; i++)
+ {
+ BasicRate = ieee->current_network.rates[i]&0x7F;
+ if(!ieee80211_is_cck_rate(BasicRate))
+ {
+ if(QueryRate == 0)
+ {
+ QueryRate = BasicRate;
+ }
+ else
+ {
+ if(BasicRate < QueryRate)
+ {
+ QueryRate = BasicRate;
+ }
+ }
+ }
+ }
+
+ if(QueryRate == 0)
+ {
+ QueryRate = 12;
+ printk("No BasicRate found!!\n");
+ }
+ return QueryRate;
+}
u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
u8 rate;
- // 2008/01/25 MH For broadcom, MGNT frame set as OFDM 6M.
- if(pHTInfo->IOTAction & HT_IOT_ACT_MGNT_USE_CCK_6M)
- rate = 0x0c;
+ if(pHTInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom)
+ {
+ rate = MgntQuery_TxRateExcludeCCKRates(ieee);
+ }
else
rate = ieee->basic_rate & 0x7f;
if(rate == 0){
- // 2005.01.26, by rcnjko.
if(ieee->mode == IEEE_A||
ieee->mode== IEEE_N_5G||
(ieee->mode== IEEE_N_24G&&!pHTInfo->bCurSuppCCK))
@@ -203,17 +236,6 @@ u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee)
else
rate = 0x02;
}
-
- /*
- // Data rate of ProbeReq is already decided. Annie, 2005-03-31
- if( pMgntInfo->bScanInProgress || (pMgntInfo->bDualModeScanStep!=0) )
- {
- if(pMgntInfo->dot11CurrentWirelessMode==WIRELESS_MODE_A)
- rate = 0x0c;
- else
- rate = 0x02;
- }
- */
return rate;
}
@@ -251,9 +273,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
ieee->seq_ctrl[0]++;
/* avoid watchdog triggers */
- // ieee->dev->trans_start = jiffies;
ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
- //dev_kfree_skb_any(skb);//edit by thomas
}
spin_unlock_irqrestore(&ieee->lock, flags);
@@ -279,9 +299,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
printk("%s():insert to waitqueue!\n",__FUNCTION__);
skb_queue_tail(&ieee->skb_waitQ[tcb_desc->queue_index], skb);
} else {
- //printk("TX packet!\n");
ieee->softmac_hard_start_xmit(skb,ieee->dev);
- //dev_kfree_skb_any(skb);//edit by thomas
}
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags);
}
@@ -293,16 +311,24 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
struct ieee80211_hdr_3addr *header =
(struct ieee80211_hdr_3addr *) skb->data;
+ u16 fc,type,stype;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + 8);
+ fc = header->frame_control;
+ type = WLAN_FC_GET_TYPE(fc);
+ stype = WLAN_FC_GET_STYPE(fc);
+
+
+ if(stype != IEEE80211_STYPE_PSPOLL)
tcb_desc->queue_index = MGNT_QUEUE;
+ else
+ tcb_desc->queue_index = HIGH_QUEUE;
tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
tcb_desc->RATRIndex = 7;
tcb_desc->bTxDisableRateFallBack = 1;
tcb_desc->bTxUseDriverAssingedRate = 1;
- //printk("=============>%s()\n", __FUNCTION__);
if(single){
-
+ if(!(type == IEEE80211_FTYPE_CTL)) {
header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
@@ -310,12 +336,12 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
else
ieee->seq_ctrl[0]++;
+ }
/* avoid watchdog triggers */
- // ieee->dev->trans_start = jiffies;
ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
}else{
-
+ if(!(type == IEEE80211_FTYPE_CTL)) {
header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
@@ -323,10 +349,10 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
else
ieee->seq_ctrl[0]++;
+ }
ieee->softmac_hard_start_xmit(skb,ieee->dev);
}
- //dev_kfree_skb_any(skb);//edit by thomas
}
inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
@@ -373,24 +399,16 @@ void ieee80211_send_beacon(struct ieee80211_device *ieee)
struct sk_buff *skb;
if(!ieee->ieee_up)
return;
- //unsigned long flags;
skb = ieee80211_get_beacon_(ieee);
if (skb){
softmac_mgmt_xmit(skb, ieee);
ieee->softmac_stats.tx_beacons++;
- //dev_kfree_skb_any(skb);//edit by thomas
}
-// ieee->beacon_timer.expires = jiffies +
-// (MSECS( ieee->current_network.beacon_interval -5));
- //spin_lock_irqsave(&ieee->beacon_lock,flags);
if(ieee->beacon_txing && ieee->ieee_up){
-// if(!timer_pending(&ieee->beacon_timer))
-// add_timer(&ieee->beacon_timer);
mod_timer(&ieee->beacon_timer,jiffies+(MSECS(ieee->current_network.beacon_interval-5)));
}
- //spin_unlock_irqrestore(&ieee->beacon_lock,flags);
}
@@ -414,7 +432,6 @@ void ieee80211_send_probe(struct ieee80211_device *ieee)
if (skb){
softmac_mgmt_xmit(skb, ieee);
ieee->softmac_stats.tx_probe_rq++;
- //dev_kfree_skb_any(skb);//edit by thomas
}
}
@@ -585,12 +602,8 @@ void ieee80211_start_send_beacons(struct ieee80211_device *ieee)
void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
{
-// unsigned long flags;
-
- //ieee->sync_scan_hurryup = 1;
down(&ieee->scan_sem);
-// spin_lock_irqsave(&ieee->lock, flags);
ieee->scan_watch_dog = 0;
if (ieee->scanning == 1){
ieee->scanning = 0;
@@ -598,7 +611,6 @@ void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
cancel_delayed_work(&ieee->softmac_scan_wq);
}
-// spin_unlock_irqrestore(&ieee->lock, flags);
up(&ieee->scan_sem);
}
@@ -672,7 +684,6 @@ inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *be
memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy(auth->header.addr3, beacon->bssid, ETH_ALEN);
- //auth->algorithm = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
if(ieee->auth_mode == 0)
auth->algorithm = WLAN_AUTH_OPEN;
else if(ieee->auth_mode == 1)
@@ -689,6 +700,26 @@ inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *be
}
+void constructWMMIE(u8* wmmie, u8* wmm_len,u8 oui_subtype)
+{
+ u8 szQoSOUI[] ={221, 0, 0x00, 0x50, 0xf2, 0x02, 0, 1};
+
+ if (oui_subtype == OUI_SUBTYPE_QOS_CAPABI)
+ {
+ szQoSOUI[0] = 46;
+ szQoSOUI[1] = *wmm_len;
+ memcpy(wmmie,szQoSOUI,3);
+ *wmm_len = 3;
+ }
+ else
+ {
+ szQoSOUI[1] = *wmm_len + 6;
+ szQoSOUI[6] = oui_subtype;
+ memcpy(wmmie, szQoSOUI, 8);
+ *(wmmie+8) = 0;
+ *wmm_len = 9;
+ }
+}
static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *dest)
{
@@ -707,14 +738,18 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
int wpa_ie_len = ieee->wpa_ie_len;
u8 erpinfo_content = 0;
- u8* tmp_ht_cap_buf;
+ u8* tmp_ht_cap_buf=NULL;
u8 tmp_ht_cap_len=0;
- u8* tmp_ht_info_buf;
+ u8* tmp_ht_info_buf=NULL;
u8 tmp_ht_info_len=0;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
u8* tmp_generic_ie_buf=NULL;
u8 tmp_generic_ie_len=0;
+
+ u8 wmmie[9] = {0};
+ u8 wmm_len = 0;
+
if(rate_ex_len > 0) rate_ex_len+=2;
if(ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
@@ -722,7 +757,7 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
else
atim_len = 0;
-#if 1
+#if 0
if(ieee80211_is_54g(ieee->current_network))
erp_len = 3;
else
@@ -747,22 +782,35 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len));
//HT ralated element
#if 1
- tmp_ht_cap_buf =(u8*) &(ieee->pHTInfo->SelfHTCap);
- tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
- tmp_ht_info_buf =(u8*) &(ieee->pHTInfo->SelfHTInfo);
- tmp_ht_info_len = sizeof(ieee->pHTInfo->SelfHTInfo);
- HTConstructCapabilityElement(ieee, tmp_ht_cap_buf, &tmp_ht_cap_len,encrypt);
- HTConstructInfoElement(ieee,tmp_ht_info_buf,&tmp_ht_info_len, encrypt);
-
-
- if(pHTInfo->bRegRT2RTAggregation)
- {
- tmp_generic_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
- tmp_generic_ie_len = sizeof(ieee->pHTInfo->szRT2RTAggBuffer);
- HTConstructRT2RTAggElement(ieee, tmp_generic_ie_buf, &tmp_generic_ie_len);
- }
-// printk("===============>tmp_ht_cap_len is %d,tmp_ht_info_len is %d, tmp_generic_ie_len is %d\n",tmp_ht_cap_len,tmp_ht_info_len,tmp_generic_ie_len);
+ if(ieee->pHTInfo->bCurrentHTSupport){
+ tmp_ht_cap_buf =(u8*) &(ieee->pHTInfo->SelfHTCap);
+ tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
+ tmp_ht_info_buf =(u8*) &(ieee->pHTInfo->SelfHTInfo);
+ tmp_ht_info_len = sizeof(ieee->pHTInfo->SelfHTInfo);
+
+ HTConstructCapabilityElement(ieee, tmp_ht_cap_buf, &tmp_ht_cap_len,encrypt);
+
+ HTConstructInfoElement(ieee,tmp_ht_info_buf,&tmp_ht_info_len, encrypt);
+
+
+ if(pHTInfo->bRegRT2RTAggregation)
+ {
+ tmp_generic_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
+ tmp_generic_ie_len = sizeof(ieee->pHTInfo->szRT2RTAggBuffer);
+ HTConstructRT2RTAggElement(ieee, tmp_generic_ie_buf, &tmp_generic_ie_len);
+ }
+ }
#endif
+
+ if(ieee->qos_support){
+
+ if(ieee->iw_mode == IW_MODE_ADHOC)
+ {
+ wmm_len = 1;
+ constructWMMIE(wmmie,&wmm_len,OUI_SUBTYPE_WMM_INFO);
+ }
+ }
+
beacon_size = sizeof(struct ieee80211_probe_response)+2+
ssid_len
+3 //channel
@@ -825,7 +873,6 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
u16 val16;
*(tag++) = MFIE_TYPE_IBSS_SET;
*(tag++) = 2;
- //*((u16*)(tag)) = cpu_to_le16(ieee->current_network.atim_window);
val16 = cpu_to_le16(ieee->current_network.atim_window);
memcpy((u8 *)tag, (u8 *)&val16, 2);
tag+=2;
@@ -854,7 +901,6 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
tag += wpa_ie_len;
}
- //skb->dev = ieee->dev;
return skb;
}
@@ -996,19 +1042,39 @@ void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
}
+inline int SecIsInPMKIDList(struct ieee80211_device *ieee, u8 *bssid)
+{
+ int i = 0;
+
+ do
+ {
+ if ((ieee->PMKIDList[i].bUsed) && (memcmp(ieee->PMKIDList[i].Bssid, bssid, ETH_ALEN) == 0))
+ {
+ break;
+ }
+ else
+ {
+ i++;
+ }
+ } while (i < NUM_PMKID_CACHE);
+
+ if (i == NUM_PMKID_CACHE)
+ {
+ i = -1;
+ }
+ else
+ {
+ }
+
+ return (i);
+
+}
inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beacon,struct ieee80211_device *ieee)
{
struct sk_buff *skb;
- //unsigned long flags;
struct ieee80211_assoc_request_frame *hdr;
u8 *tag;//,*rsn_ie;
- //short info_addr = 0;
- //int i;
- //u16 suite_count = 0;
- //u8 suit_select = 0;
- //unsigned int wpa_len = beacon->wpa_ie_len;
- //for HT
u8* ht_cap_buf = NULL;
u8 ht_cap_len=0;
u8* realtek_ie_buf=NULL;
@@ -1019,6 +1085,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
unsigned int cxvernum_ie_len=0;
struct ieee80211_crypt_data* crypt;
int encrypt;
+ int PMKCacheIdx;
unsigned int rate_len = ieee80211_MFIE_rate_len(ieee);
unsigned int wmm_info_len = beacon->qos_data.supported?9:0;
@@ -1060,6 +1127,14 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
{
cxvernum_ie_len = 5+2;
}
+
+ PMKCacheIdx = SecIsInPMKIDList(ieee, ieee->current_network.bssid);
+ if (PMKCacheIdx >= 0)
+ {
+ wpa_ie_len += 18;
+ printk("[PMK cache]: WPA2 IE length: %x\n", wpa_ie_len);
+ }
+
len = sizeof(struct ieee80211_assoc_request_frame)+ 2
+ beacon->ssid_len//essid tagged val
+ rate_len//rates tagged val
@@ -1187,6 +1262,13 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
tag = skb_put(skb, wpa_ie_len);
if (wpa_ie_len){
memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
+ if (PMKCacheIdx >= 0)
+ {
+ tag = skb_put(skb, 18);
+ *tag = 1;
+ *(tag + 1) = 0;
+ memcpy((tag + 2), &ieee->PMKIDList[PMKCacheIdx].PMKID, 16);
+ }
}
tag = skb_put(skb,wmm_info_len);
@@ -1215,8 +1297,6 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
memcpy(tag, realtek_ie_buf,realtek_ie_len -2 );
}
}
-// printk("<=====%s(), %p, %p\n", __FUNCTION__, ieee->dev, ieee->dev->dev_addr);
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len);
return skb;
}
@@ -1271,7 +1351,6 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee)
else{
ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATING ;
IEEE80211_DEBUG_MGMT("Sending authentication request\n");
- //printk(KERN_WARNING "Sending authentication request\n");
softmac_mgmt_xmit(skb, ieee);
//BUGON when you try to add_timer twice, using mod_timer may be better, john0709
if(!timer_pending(&ieee->associate_timer)){
@@ -1287,7 +1366,6 @@ void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge,
u8 *c;
struct sk_buff *skb;
struct ieee80211_network *beacon = &ieee->current_network;
-// int hlen = sizeof(struct ieee80211_authentication);
ieee->associate_seq++;
ieee->softmac_stats.tx_auth_rq++;
@@ -1307,7 +1385,6 @@ void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge,
softmac_mgmt_xmit(skb, ieee);
mod_timer(&ieee->associate_timer, jiffies + (HZ/2));
- //dev_kfree_skb_any(skb);//edit by thomas
}
kfree(challenge);
}
@@ -1328,7 +1405,6 @@ void ieee80211_associate_step2(struct ieee80211_device *ieee)
else{
softmac_mgmt_xmit(skb, ieee);
mod_timer(&ieee->associate_timer, jiffies + (HZ/2));
- //dev_kfree_skb_any(skb);//edit by thomas
}
}
@@ -1356,7 +1432,6 @@ void ieee80211_associate_complete_wq(struct work_struct *work)
{
printk("Successfully associated, ht not enabled(%d, %d)\n", ieee->pHTInfo->bCurrentHTSupport, ieee->pHTInfo->bEnableHT);
memset(ieee->dot11HTOperationalRateSet, 0, 16);
- //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
}
ieee->LinkDetectInfo.SlotNum = 2 * (1 + ieee->current_network.beacon_interval/500);
// To prevent the immediately calling watch_dog after association.
@@ -1388,7 +1463,6 @@ void ieee80211_associate_complete(struct ieee80211_device *ieee)
del_timer_sync(&ieee->associate_timer);
ieee->state = IEEE80211_LINKED;
- //ieee->UpdateHalRATRTableHandler(dev, ieee->dot11HTOperationalRateSet);
queue_work(ieee->wq, &ieee->associate_complete_wq);
}
@@ -1404,9 +1478,14 @@ void ieee80211_associate_procedure_wq(struct work_struct *work)
ieee80211_stop_scan(ieee);
printk("===>%s(), chan:%d\n", __FUNCTION__, ieee->current_network.channel);
- //ieee->set_chan(ieee->dev, ieee->current_network.channel);
HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
+ if(ieee->eRFPowerState == eRfOff)
+ {
+ printk("=============>%s():Rf state is eRfOff, schedule ipsleave wq again,return\n",__FUNCTION__);
+ up(&ieee->wx_sem);
+ return;
+ }
ieee->associate_seq = 1;
ieee80211_associate_step1(ieee);
@@ -1432,14 +1511,16 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
if ((ieee->iw_mode == IW_MODE_ADHOC) && !(net->capability & WLAN_CAPABILITY_IBSS))
return;
+ if ((ieee->iw_mode == IW_MODE_ADHOC) && (net->channel > ieee->ibss_maxjoin_chal))
+ return;
if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC){
/* if the user specified the AP MAC, we need also the essid
* This could be obtained by beacons or, if the network does not
* broadcast it, it can be put manually.
*/
- apset = ieee->wap_set;//(memcmp(ieee->current_network.bssid, zero,ETH_ALEN)!=0 );
- ssidset = ieee->ssid_set;//ieee->current_network.ssid[0] != '\0';
+ apset = ieee->wap_set;
+ ssidset = ieee->ssid_set;
ssidbroad = !(net->ssid_len == 0 || net->ssid[0]== '\0');
apmatch = (memcmp(ieee->current_network.bssid, net->bssid, ETH_ALEN)==0);
ssidmatch = (ieee->current_network.ssid_len == net->ssid_len)&&\
@@ -1480,7 +1561,6 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
ieee->AsocRetryCount = 0;
//for HT by amy 080514
if((ieee->current_network.qos_data.supported == 1) &&
- // (ieee->pHTInfo->bEnableHT && ieee->current_network.bssht.bdSupportHT))
ieee->current_network.bssht.bdSupportHT)
/*WB, 2008.09.09:bCurrentHTSupport and bEnableHT two flags are going to put together to check whether we are in HT now, so needn't to check bEnableHT flags here. That's is to say we will set to HT support whenever joined AP has the ability to support HT. And whether we are in HT or not, please check bCurrentHTSupport&&bEnableHT now please.*/
{
@@ -1508,7 +1588,6 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
printk(KERN_INFO"Using B rates\n");
}
memset(ieee->dot11HTOperationalRateSet, 0, 16);
- //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
ieee->state = IEEE80211_LINKED;
}
@@ -1598,6 +1677,16 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
if (skb->len < sizeof (struct ieee80211_hdr_3addr ))
return -1; /* corrupted */
+ if((memcmp(header->addr3,ieee->current_network.bssid,ETH_ALEN) != 0)&&
+ (memcmp(header->addr3,"\xff\xff\xff\xff\xff\xff",ETH_ALEN) != 0)) {
+ return -1;
+ }
+
+ if(memcmp(header->addr3,ieee->current_network.bssid,ETH_ALEN) == 0) {
+ }
+
+ if(memcmp(header->addr3,"\xff\xff\xff\xff\xff\xff",ETH_ALEN) == 0) {
+ }
memcpy(src,header->addr2, ETH_ALEN);
skbend = (u8*)skb->data + skb->len;
@@ -1615,7 +1704,6 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
tag++; /* point to the next tag */
}
- //IEEE80211DMESG("Card MAC address is "MACSTR, MAC2STR(src));
if (ssidlen == 0) return 1;
if (!ssid) return 1; /* ssid not found in tagged param */
@@ -1673,11 +1761,8 @@ ieee80211_rx_probe_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
{
u8 dest[ETH_ALEN];
- //IEEE80211DMESG("Rx probe");
ieee->softmac_stats.rx_probe_rq++;
- //DMESG("Dest is "MACSTR, MAC2STR(dest));
if (probe_rq_parse(ieee, skb, dest)){
- //IEEE80211DMESG("Was for me!");
ieee->softmac_stats.tx_probe_rs++;
ieee80211_resp_to_probe(ieee, dest);
}
@@ -1688,14 +1773,12 @@ ieee80211_rx_auth_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
{
u8 dest[ETH_ALEN];
int status;
- //IEEE80211DMESG("Rx probe");
ieee->softmac_stats.rx_auth_rq++;
status = auth_rq_parse(skb, dest);
if (status != -1) {
ieee80211_resp_to_auth(ieee, status, dest);
}
- //DMESG("Dest is "MACSTR, MAC2STR(dest));
}
@@ -1704,7 +1787,6 @@ ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
{
u8 dest[ETH_ALEN];
- //unsigned long flags;
ieee->softmac_stats.rx_ass_rq++;
if (assoc_rq_parse(skb,dest) != -1){
@@ -1739,7 +1821,6 @@ short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, u32 *ti
return 0;
*/
dtim = ieee->current_network.dtim_data;
- //printk("DTIM\n");
if(!(dtim & IEEE80211_DTIM_VALID))
return 0;
timeout = ieee->current_network.beacon_interval; //should we use ps_timeout value or beacon_interval
@@ -1762,7 +1843,6 @@ short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, u32 *ti
if(time_l){
*time_l = ieee->current_network.last_dtim_sta_time[0]
+ (ieee->current_network.beacon_interval);
- // * ieee->current_network.dtim_period) * 1000;
}
if(time_h){
@@ -1790,7 +1870,6 @@ inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
ieee->iw_mode != IW_MODE_INFRA ||
ieee->state != IEEE80211_LINKED)){
- // #warning CHECK_LOCK_HERE
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
ieee80211_sta_wakeup(ieee, 1);
@@ -1809,7 +1888,6 @@ inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
ieee->enter_sleep_state(ieee->dev,th,tl);
else if(ieee->sta_sleep == 0){
- // printk("send null 1\n");
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
if(ieee->ps_is_queue_empty(ieee->dev)){
@@ -1918,8 +1996,6 @@ void ieee80211_process_action(struct ieee80211_device* ieee, struct sk_buff* skb
ieee80211_rx_DELBA(ieee, skb);
break;
default:
-// if (net_ratelimit())
-// IEEE80211_DEBUG(IEEE80211_DL_BA, "unknown action frame(%d)\n", tmp);
break;
}
return;
@@ -1936,7 +2012,6 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
int chlen=0;
int aid;
struct ieee80211_assoc_response_frame *assoc_resp;
-// struct ieee80211_info_element *info_element;
bool bSupportNmode = true, bHalfSupportNmode = false; //default support N mode, disable halfNmode
if(!ieee->proto_started)
@@ -2095,8 +2170,6 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
ieee->softmac_stats.reassoc++;
ieee->is_roaming = true;
ieee80211_disassociate(ieee);
- // notify_wx_assoc_event(ieee);
- //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
RemovePeerTS(ieee, header->addr2);
if(ieee->LedControlHandler != NULL)
ieee->LedControlHandler(ieee->dev, LED_CTL_START_TO_LINK); //added by amy for LED 090318
@@ -2111,7 +2184,6 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
break;
}
- //dev_kfree_skb_any(skb);
return 0;
}
@@ -2163,22 +2235,16 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *
/* as for the completion function, it does not need
* to check it any more.
* */
- //printk("error:no descriptor left@queue_index %d, %d, %d\n", queue_index, skb_queue_len(&ieee->skb_waitQ[queue_index]), ieee->check_nic_enough_desc(ieee->dev,queue_index));
- //ieee80211_rtl_stop_queue(ieee);
skb_queue_tail(&ieee->skb_waitQ[queue_index], txb->fragments[i]);
}else{
ieee->softmac_data_hard_start_xmit(
txb->fragments[i],
ieee->dev,ieee->rate);
- //ieee->stats.tx_packets++;
- //ieee->stats.tx_bytes += txb->fragments[i]->len;
- //ieee->dev->trans_start = jiffies;
}
}
#endif
ieee80211_txb_free(txb);
-//exit:
spin_unlock_irqrestore(&ieee->lock,flags);
}
@@ -2197,9 +2263,7 @@ void ieee80211_resume_tx(struct ieee80211_device *ieee)
ieee->softmac_data_hard_start_xmit(
ieee->tx_pending.txb->fragments[i],
ieee->dev,ieee->rate);
- //(i+1)<ieee->tx_pending.txb->nr_frags);
ieee->stats.tx_packets++;
- // ieee->dev->trans_start = jiffies;
}
}
@@ -2249,7 +2313,6 @@ void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
ieee->seq_ctrl[0]++;
ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
- //dev_kfree_skb_any(skb);//edit by thomas
}
}
if (!ieee->queue_stop && ieee->tx_pending.txb)
@@ -2267,15 +2330,12 @@ exit :
void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee)
{
- //unsigned long flags;
- //spin_lock_irqsave(&ieee->lock,flags);
if (! netif_queue_stopped(ieee->dev)){
netif_stop_queue(ieee->dev);
ieee->softmac_stats.swtxstop++;
}
ieee->queue_stop = 1;
- //spin_unlock_irqrestore(&ieee->lock,flags);
}
@@ -2362,7 +2422,7 @@ void ieee80211_start_ibss_wq(struct work_struct *work)
// if((IS_DOT11D_ENABLE(ieee)) && (ieee->state == IEEE80211_NOLINK))
if (ieee->state == IEEE80211_NOLINK)
- ieee->current_network.channel = 6;
+ ieee->current_network.channel = ieee->IbssStartChnl;
/* if not then the state is not linked. Maybe the user swithced to
* ad-hoc mode just after being in monitor mode, or just after
* being very few time in managed mode (so the card have had no
@@ -2509,11 +2569,9 @@ void ieee80211_disassociate(struct ieee80211_device *ieee)
ieee->state = IEEE80211_NOLINK;
ieee->is_set_key = false;
- //LZM for usb dev crash.
- //ieee->link_change(ieee->dev);
queue_delayed_work(ieee->wq, &ieee->link_change_wq, 0);
- //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
+
notify_wx_assoc_event(ieee);
}
@@ -2657,8 +2715,6 @@ void ieee80211_start_protocol(struct ieee80211_device *ieee)
if (ieee->current_network.beacon_interval == 0)
ieee->current_network.beacon_interval = 100;
-// printk("===>%s(), chan:%d\n", __FUNCTION__, ieee->current_network.channel);
-// ieee->set_chan(ieee->dev,ieee->current_network.channel);
for(i = 0; i < 17; i++) {
ieee->last_rxseq_num[i] = -1;
@@ -2721,7 +2777,6 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->Regdot11HTOperationalRateSet[0]= 0xff;//support MCS 0~7
ieee->Regdot11HTOperationalRateSet[1]= 0xff;//support MCS 8~15
ieee->Regdot11HTOperationalRateSet[4]= 0x01;
- //added by amy
ieee->actscanning = false;
ieee->beinretry = false;
ieee->is_set_key = false;
@@ -2891,8 +2946,6 @@ static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value)
if (ieee->set_security)
ieee->set_security(ieee->dev, &sec);
- //else
- // ret = -EOPNOTSUPP;
return ret;
}
@@ -3173,7 +3226,6 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
int ret=0;
down(&ieee->wx_sem);
- //IEEE_DEBUG_INFO("wpa_supplicant: len=%d\n", p->length);
if (p->length < sizeof(struct ieee_param) || !p->pointer){
ret = -EINVAL;
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c
index 484c3aba5cb..a6a5d68df3a 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c
@@ -32,7 +32,6 @@
******************************************************************************/
#include <linux/compiler.h>
-//#include <linux/config.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
@@ -208,7 +207,6 @@ int ieee80211_encrypt_fragment(
/* To encrypt, frame format is:
* IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
- // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption.
/* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
* call both MSDU and MPDU encryption functions from here. */
atomic_inc(&crypt->refcnt);
@@ -231,7 +229,6 @@ int ieee80211_encrypt_fragment(
void ieee80211_txb_free(struct ieee80211_txb *txb) {
- //int i;
if (unlikely(!txb))
return;
kfree(txb);
@@ -280,7 +277,6 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
if (eth->h_proto != htons(ETH_P_IP))
return 0;
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len);
ip = ip_hdr(skb);
switch (ip->tos & 0xfc) {
@@ -681,10 +677,8 @@ int rtl8192_ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
if (encrypt)
fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP;
else
-
fc = IEEE80211_FTYPE_DATA;
- //if(ieee->current_network.QoS_Enable)
if(qos_actived)
fc |= IEEE80211_STYPE_QOS_DATA;
else
@@ -765,7 +759,6 @@ int rtl8192_ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
txb->encrypted = encrypt;
txb->payload_size = bytes;
- //if (ieee->current_network.QoS_Enable)
if(qos_actived)
{
txb->queue_index = UP2AC(skb->priority);
@@ -812,7 +805,6 @@ int rtl8192_ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
/* The last fragment takes the remaining length */
bytes = bytes_last_frag;
}
- //if(ieee->current_network.QoS_Enable)
if(qos_actived)
{
// add 1 only indicate to corresponding seq number control 2006/7/12
@@ -889,7 +881,6 @@ int rtl8192_ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
if ( tcb_desc->bMulticast || tcb_desc->bBroadcast)
tcb_desc->data_rate = ieee->basic_rate;
else
- //tcb_desc->data_rate = CURRENT_RATE(ieee->current_network.mode, ieee->rate, ieee->HTCurrentOperaRate);
tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc);
ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
@@ -897,8 +888,6 @@ int rtl8192_ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
ieee80211_query_BandwidthMode(ieee, tcb_desc);
ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1);
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, txb->fragments[0]->data, txb->fragments[0]->len);
- //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, tcb_desc, sizeof(cb_desc));
#endif
}
spin_unlock_irqrestore(&ieee->lock, flags);
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c
index 2ce5bd543ea..984a3608561 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c
@@ -77,7 +77,6 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
/* Add the ESSID */
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
-// if (network->flags & NETWORK_EMPTY_ESSID) {
if (network->ssid_len == 0) {
iwe.u.data.length = sizeof("<hidden>");
start = iwe_stream_add_point(info, start, stop, &iwe, "<hidden>");
@@ -240,9 +239,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
unsigned long flags;
char *ev = extra;
-// char *stop = ev + IW_SCAN_MAX_DATA;
char *stop = ev + wrqu->data.length;//IW_SCAN_MAX_DATA;
- //char *stop = ev + IW_SCAN_MAX_DATA;
int i = 0;
int err = 0;
IEEE80211_DEBUG_WX("Getting scan\n");
@@ -511,7 +508,6 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
struct ieee80211_security sec = {
.flags = 0,
};
- //printk("======>encoding flag:%x,ext flag:%x, ext alg:%d\n", encoding->flags,ext->ext_flags, ext->alg);
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
if (idx < 1 || idx > WEP_KEYS)
@@ -562,7 +558,6 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
}
sec.enabled = 1;
- // sec.encrypt = 1;
switch (ext->alg) {
case IW_ENCODE_ALG_WEP:
@@ -580,7 +575,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
ret = -EINVAL;
goto done;
}
- printk("alg name:%s\n",alg);
+ IEEE80211_DEBUG_WX("alg name: %s\n", alg);
ops = ieee80211_get_crypto_ops(alg);
if (ops == NULL)
@@ -624,8 +619,6 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
goto done;
}
#if 1
- //skip_host_crypt:
- //printk("skip_host_crypt:ext_flags:%x\n", ext->ext_flags);
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
ieee->tx_keyidx = idx;
sec.active_key = idx;
@@ -633,7 +626,6 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
}
if (ext->alg != IW_ENCODE_ALG_NONE) {
- //memcpy(sec.keys[idx], ext->key, ext->key_len);
sec.key_sizes[idx] = ext->key_len;
sec.flags |= (1 << idx);
if (ext->alg == IW_ENCODE_ALG_WEP) {
@@ -690,7 +682,6 @@ int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
switch (data->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
/*need to support wpa2 here*/
- //printk("wpa version:%x\n", data->value);
break;
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
@@ -708,8 +699,6 @@ int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
break;
case IW_AUTH_80211_AUTH_ALG:
- //printk("======>%s():data->value is %d\n",__FUNCTION__,data->value);
- // ieee->open_wep = (data->value&IW_AUTH_ALG_OPEN_SYSTEM)?1:0;
if(data->value & IW_AUTH_ALG_SHARED_KEY){
ieee->open_wep = 0;
ieee->auth_mode = 1;
@@ -721,17 +710,14 @@ int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
else if(data->value & IW_AUTH_ALG_LEAP){
ieee->open_wep = 1;
ieee->auth_mode = 2;
- //printk("hahahaa:LEAP\n");
}
else
return -EINVAL;
- //printk("open_wep:%d\n", ieee->open_wep);
break;
#if 1
case IW_AUTH_WPA_ENABLED:
ieee->wpa_enabled = (data->value)?1:0;
- //printk("enable wpa:%d\n", ieee->wpa_enabled);
break;
#endif
@@ -755,7 +741,6 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
if (len>MAX_WPA_IE_LEN || (len && ie == NULL))
{
- // printk("return error out, len:%d\n", len);
return -EINVAL;
}
diff --git a/drivers/staging/rtl8192su/ieee80211/rtl819x_BA.h b/drivers/staging/rtl8192su/ieee80211/rtl819x_BA.h
index 8ddc8bf9dc2..1c2a40b75a1 100644
--- a/drivers/staging/rtl8192su/ieee80211/rtl819x_BA.h
+++ b/drivers/staging/rtl8192su/ieee80211/rtl819x_BA.h
@@ -1,3 +1,21 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
#ifndef _BATYPE_H_
#define _BATYPE_H_
@@ -18,14 +36,7 @@
#define DELBA_REASON_END_BA 37
#define DELBA_REASON_UNKNOWN_BA 38
#define DELBA_REASON_TIMEOUT 39
-/* whether need define BA Action frames here?
-struct ieee80211_ADDBA_Req{
- struct ieee80211_header_data header;
- u8 category;
- u8
-} __attribute__ ((packed));
-*/
-//Is this need?I put here just to make it easier to define structure BA_RECORD //WB
+
typedef union _SEQUENCE_CONTROL{
u16 ShortData;
struct
@@ -65,5 +76,4 @@ typedef struct _BA_RECORD {
SEQUENCE_CONTROL BaStartSeqCtrl;
} BA_RECORD, *PBA_RECORD;
-#endif //end _BATYPE_H_
-
+#endif
diff --git a/drivers/staging/rtl8192su/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192su/ieee80211/rtl819x_BAProc.c
index 8c37dd124fc..ca611faf17b 100644
--- a/drivers/staging/rtl8192su/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192su/ieee80211/rtl819x_BAProc.c
@@ -1,9 +1,21 @@
-/********************************************************************************************************************************
- * This file is created to process BA Action Frame. According to 802.11 spec, there are 3 BA action types at all. And as BA is
- * related to TS, this part need some struture defined in QOS side code. Also TX RX is going to be resturctured, so how to send
- * ADDBAREQ ADDBARSP and DELBA packet is still on consideration. Temporarily use MANAGE QUEUE instead of Normal Queue.
- * WB 2008-05-27
- * *****************************************************************************************************************************/
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
#include "ieee80211.h"
#include "rtl819x_BA.h"
@@ -112,7 +124,6 @@ static struct sk_buff* ieee80211_ADDBA(struct ieee80211_device* ieee, u8* Dst, P
u8* tag = NULL;
u16 tmp = 0;
u16 len = ieee->tx_headroom + 9;
- //category(1) + action field(1) + Dialog Token(1) + BA Parameter Set(2) + BA Timeout Value(2) + BA Start SeqCtrl(2)(or StatusCode(2))
IEEE80211_DEBUG(IEEE80211_DL_TRACE | IEEE80211_DL_BA, "========>%s(), frame(%d) sentd to:%pM, ieee->dev:%p\n", __FUNCTION__, type, Dst, ieee->dev);
if (pBA == NULL||ieee == NULL)
{
@@ -138,7 +149,6 @@ static struct sk_buff* ieee80211_ADDBA(struct ieee80211_device* ieee, u8* Dst, P
BAReq->frame_control = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame
- //tag += sizeof( struct ieee80211_hdr_3addr); //move to action field
tag = (u8*)skb_put(skb, 9);
*tag ++= ACT_CAT_BA;
*tag ++= type;
@@ -171,7 +181,6 @@ static struct sk_buff* ieee80211_ADDBA(struct ieee80211_device* ieee, u8* Dst, P
IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
return skb;
- //return NULL;
}
/********************************************************************************************************************
@@ -196,7 +205,6 @@ static struct sk_buff* ieee80211_DELBA(
struct ieee80211_hdr_3addr* Delba = NULL;
u8* tag = NULL;
u16 tmp = 0;
- //len = head len + DELBA Parameter Set(2) + Reason Code(2)
u16 len = 6 + ieee->tx_headroom;
if (net_ratelimit())
@@ -213,7 +221,6 @@ static struct sk_buff* ieee80211_DELBA(
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc skb for ADDBA_REQ\n");
return NULL;
}
-// memset(skb->data, 0, len+sizeof( struct ieee80211_hdr_3addr));
skb_reserve(skb, ieee->tx_headroom);
Delba = ( struct ieee80211_hdr_3addr *) skb_put(skb,sizeof( struct ieee80211_hdr_3addr));
@@ -258,9 +265,6 @@ void ieee80211_send_ADDBAReq(struct ieee80211_device* ieee, u8* dst, PBA_RECORD
if (skb)
{
softmac_mgmt_xmit(skb, ieee);
- //add statistic needed here.
- //and skb will be freed in softmac_mgmt_xmit(), so omit all dev_kfree_skb_any() outside softmac_mgmt_xmit()
- //WB
}
else
{
@@ -284,7 +288,6 @@ void ieee80211_send_ADDBARsp(struct ieee80211_device* ieee, u8* dst, PBA_RECORD
if (skb)
{
softmac_mgmt_xmit(skb, ieee);
- //same above
}
else
{
@@ -311,7 +314,6 @@ void ieee80211_send_DELBA(struct ieee80211_device* ieee, u8* dst, PBA_RECORD pBA
if (skb)
{
softmac_mgmt_xmit(skb, ieee);
- //same above
}
else
{
@@ -361,8 +363,7 @@ int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb)
//some other capability is not ready now.
if( (ieee->current_network.qos_data.active == 0) ||
(ieee->pHTInfo->bCurrentHTSupport == false) ||
- (ieee->pHTInfo->IOTAction & HT_IOT_ACT_REJECT_ADDBA_REQ)) //||
- // (ieee->pStaQos->bEnableRxImmBA == false) )
+ (ieee->pHTInfo->IOTAction & HT_IOT_ACT_REJECT_ADDBA_REQ))
{
rc = ADDBA_STATUS_REFUSED;
IEEE80211_DEBUG(IEEE80211_DL_ERR, "Failed to reply on ADDBA_REQ as some capability is not ready(%d, %d)\n", ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport);
@@ -394,7 +395,6 @@ int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb)
goto OnADDBAReq_Fail;
}
// Admit the ADDBA Request
- //
DeActivateBAEntry(ieee, pBA);
pBA->DialogToken = *pDialogToken;
pBA->BaParamSet = *pBaParamSet;
@@ -406,10 +406,9 @@ int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb)
pBA->BaParamSet.field.BufferSize = 1;
else
pBA->BaParamSet.field.BufferSize = 32;
- ActivateBAEntry(ieee, pBA, 0);//pBA->BaTimeoutValue);
+ ActivateBAEntry(ieee, pBA, 0);
ieee80211_send_ADDBARsp(ieee, dst, pBA, ADDBA_STATUS_SUCCESS);
- // End of procedure.
return 0;
OnADDBAReq_Fail:
@@ -541,11 +540,11 @@ int ieee80211_rx_ADDBARsp( struct ieee80211_device* ieee, struct sk_buff *skb)
pAdmittedBA->BaParamSet = *pBaParamSet;
DeActivateBAEntry(ieee, pAdmittedBA);
ActivateBAEntry(ieee, pAdmittedBA, *pBaTimeoutVal);
- }
- else
- {
- // Delay next ADDBA process.
+ } else {
pTS->bAddBaReqDelayed = true;
+ pTS->bDisable_AddBa = true;
+ ReasonCode = DELBA_REASON_END_BA;
+ goto OnADDBARsp_Reject;
}
// End of procedure
@@ -635,7 +634,6 @@ int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb)
pTxTs->bAddBaReqInProgress = false;
pTxTs->bAddBaReqDelayed = false;
del_timer_sync(&pTxTs->TsAddBaTimer);
- //PlatformCancelTimer(Adapter, &pTxTs->TsAddBaTimer);
TxTsDeleteBA(ieee, pTxTs);
}
return 0;
diff --git a/drivers/staging/rtl8192su/ieee80211/rtl819x_HT.h b/drivers/staging/rtl8192su/ieee80211/rtl819x_HT.h
index a97c901edbf..17121891433 100644
--- a/drivers/staging/rtl8192su/ieee80211/rtl819x_HT.h
+++ b/drivers/staging/rtl8192su/ieee80211/rtl819x_HT.h
@@ -1,3 +1,21 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
#ifndef _RTL819XU_HTTYPE_H_
#define _RTL819XU_HTTYPE_H_
@@ -381,8 +399,7 @@ typedef struct _BSS_HT{
u16 bdHTInfoLen;
HT_SPEC_VER bdHTSpecVer;
- //HT_CAPABILITY_ELE bdHTCapEle;
- //HT_INFORMATION_ELE bdHTInfoEle;
+ HT_CHANNEL_WIDTH bdBandWidth;
u8 bdRT2RTAggregation;
u8 bdRT2RTLongSlotTime;
@@ -406,7 +423,7 @@ typedef struct _MIMO_EVM{
typedef struct _FALSE_ALARM_STATISTICS{
u32 Cnt_Parity_Fail;
- u32 Cnt_Rate_Illegal;
+ u32 Cnt_Rate_Illegal;
u32 Cnt_Crc8_fail;
u32 Cnt_all;
}FALSE_ALARM_STATISTICS, *PFALSE_ALARM_STATISTICS;
@@ -476,10 +493,9 @@ typedef enum _HT_IOT_ACTION{
HT_IOT_ACT_FORCED_CTS2SELF = 0x00000200,
HT_IOT_ACT_FORCED_RTS = 0x00000400,
HT_IOT_ACT_AMSDU_ENABLE = 0x00000800,
- HT_IOT_ACT_MID_HIGHPOWER = 0x00001000,
- HT_IOT_ACT_REJECT_ADDBA_REQ = 0x00002000,
- HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT = 0x00004000,
- HT_IOT_ACT_EDCA_BIAS_ON_RX = 0x00008000,
+ HT_IOT_ACT_REJECT_ADDBA_REQ = 0x00001000,
+ HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT = 0x00002000,
+ HT_IOT_ACT_EDCA_BIAS_ON_RX = 0x00004000,
HT_IOT_ACT_HYBRID_AGGREGATION = 0x00010000,
HT_IOT_ACT_DISABLE_SHORT_GI = 0x00020000,
@@ -487,12 +503,19 @@ typedef enum _HT_IOT_ACTION{
HT_IOT_ACT_DISABLE_TX_40_MHZ = 0x00080000,
HT_IOT_ACT_TX_NO_AGGREGATION = 0x00100000,
HT_IOT_ACT_DISABLE_TX_2SS = 0x00200000,
+
+ HT_IOT_ACT_MID_HIGHPOWER = 0x00400000,
+ HT_IOT_ACT_NULL_DATA_POWER_SAVING = 0x00800000,
+
+ HT_IOT_ACT_DISABLE_CCK_RATE = 0x01000000,
+ HT_IOT_ACT_FORCED_ENABLE_BE_TXOP = 0x02000000,
+ HT_IOT_ACT_WA_IOT_Broadcom = 0x04000000,
}HT_IOT_ACTION_E, *PHT_IOT_ACTION_E;
typedef enum _HT_IOT_RAFUNC{
+ HT_IOT_RAFUNC_DISABLE_ALL = 0x00,
HT_IOT_RAFUNC_PEER_1R = 0x01,
HT_IOT_RAFUNC_TX_AMSDU = 0x02,
- HT_IOT_RAFUNC_DISABLE_ALL = 0x80,
}HT_IOT_RAFUNC, *PHT_IOT_RAFUNC;
typedef enum _RT_HT_CAP{
@@ -504,5 +527,4 @@ typedef enum _RT_HT_CAP{
RT_HT_CAP_USE_92SE = 0x20,
}RT_HT_CAPBILITY, *PRT_HT_CAPBILITY;
-#endif //_RTL819XU_HTTYPE_H_
-
+#endif
diff --git a/drivers/staging/rtl8192su/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192su/ieee80211/rtl819x_HTProc.c
index 01114c5181b..cfd9a1a5b38 100644
--- a/drivers/staging/rtl8192su/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192su/ieee80211/rtl819x_HTProc.c
@@ -1,5 +1,21 @@
-
-//As this function is mainly ported from Windows driver, so leave the name little changed. If any confusion caused, tell me. Created by WB. 2008.05.08
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
#include "ieee80211.h"
#include "rtl819x_HT.h"
u8 MCS_FILTER_ALL[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
@@ -8,32 +24,31 @@ u8 MCS_FILTER_1SS[16] = {0xff, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0
u16 MCS_DATA_RATE[2][2][77] =
{ { {13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78 ,104, 156, 208, 234, 260,
- 39, 78, 117, 234, 312, 351, 390, 52, 104, 156, 208, 312, 416, 468, 520,
+ 39, 78, 117, 234, 312, 351, 390, 52, 104, 156, 208, 312, 416, 468, 520,
0, 78, 104, 130, 117, 156, 195, 104, 130, 130, 156, 182, 182, 208, 156, 195,
- 195, 234, 273, 273, 312, 130, 156, 181, 156, 181, 208, 234, 208, 234, 260, 260,
- 286, 195, 234, 273, 234, 273, 312, 351, 312, 351, 390, 390, 429}, // Long GI, 20MHz
- {14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289,
- 43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520, 578,
- 0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231, 173, 217,
- 217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260, 231, 260, 289, 289,
- 318, 217, 260, 303, 260, 303, 347, 390, 347, 390, 433, 433, 477} }, // Short GI, 20MHz
- { {27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486, 540,
- 81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648, 864, 972, 1080,
- 12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324, 378, 378, 432, 324, 405,
- 405, 486, 567, 567, 648, 270, 324, 378, 324, 378, 432, 486, 432, 486, 540, 540,
- 594, 405, 486, 567, 486, 567, 648, 729, 648, 729, 810, 810, 891}, // Long GI, 40MHz
- {30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540, 600,
- 90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720, 960, 1080, 1200,
- 13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360, 420, 420, 480, 360, 450,
- 450, 540, 630, 630, 720, 300, 360, 420, 360, 420, 480, 540, 480, 540, 600, 600,
- 660, 450, 540, 630, 540, 630, 720, 810, 720, 810, 900, 900, 990} } // Short GI, 40MHz
+ 195, 234, 273, 273, 312, 130, 156, 181, 156, 181, 208, 234, 208, 234, 260, 260,
+ 286, 195, 234, 273, 234, 273, 312, 351, 312, 351, 390, 390, 429},
+ {14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289,
+ 43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520, 578,
+ 0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231, 173, 217,
+ 217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260, 231, 260, 289, 289,
+ 318, 217, 260, 303, 260, 303, 347, 390, 347, 390, 433, 433, 477} },
+ { {27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486, 540,
+ 81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648, 864, 972, 1080,
+ 12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324, 378, 378, 432, 324, 405,
+ 405, 486, 567, 567, 648, 270, 324, 378, 324, 378, 432, 486, 432, 486, 540, 540,
+ 594, 405, 486, 567, 486, 567, 648, 729, 648, 729, 810, 810, 891},
+ {30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540, 600,
+ 90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720, 960, 1080, 1200,
+ 13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360, 420, 420, 480, 360, 450,
+ 450, 540, 630, 630, 720, 300, 360, 420, 360, 420, 480, 540, 480, 540, 600, 600,
+ 660, 450, 540, 630, 540, 630, 720, 810, 720, 810, 900, 900, 990} }
};
static u8 UNKNOWN_BORADCOM[3] = {0x00, 0x14, 0xbf};
static u8 LINKSYSWRT330_LINKSYSWRT300_BROADCOM[3] = {0x00, 0x1a, 0x70};
static u8 LINKSYSWRT350_LINKSYSWRT150_BROADCOM[3] = {0x00, 0x1d, 0x7e};
-static u8 NETGEAR834Bv2_BROADCOM[3] = {0x00, 0x1b, 0x2f};
-static u8 BELKINF5D8233V1_RALINK[3] = {0x00, 0x17, 0x3f}; //cosa 03202008
+static u8 BELKINF5D8233V1_RALINK[3] = {0x00, 0x17, 0x3f};
static u8 BELKINF5D82334V3_RALINK[3] = {0x00, 0x1c, 0xdf};
static u8 PCI_RALINK[3] = {0x00, 0x90, 0xcc};
static u8 EDIMAX_RALINK[3] = {0x00, 0x0e, 0x2e};
@@ -41,10 +56,9 @@ static u8 AIRLINK_RALINK[3] = {0x00, 0x18, 0x02};
static u8 DLINK_ATHEROS_1[3] = {0x00, 0x1c, 0xf0};
static u8 DLINK_ATHEROS_2[3] = {0x00, 0x21, 0x91};
static u8 CISCO_BROADCOM[3] = {0x00, 0x17, 0x94};
+static u8 NETGEAR_BROADCOM[3] = {0x00, 0x1f, 0x33};
static u8 LINKSYS_MARVELL_4400N[3] = {0x00, 0x14, 0xa4};
-// 2008/04/01 MH For Cisco G mode RX TP We need to change FW duration. Should we put the
-// code in other place??
-//static u8 WIFI_CISCO_G_AP[3] = {0x00, 0x40, 0x96};
+
/********************************************************************************************************************
*function: This function update default settings in pHTInfo structure
* input: PRT_HIGH_THROUGHPUT pHTInfo
@@ -55,10 +69,7 @@ static u8 LINKSYS_MARVELL_4400N[3] = {0x00, 0x14, 0xa4};
void HTUpdateDefaultSetting(struct ieee80211_device* ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- //const typeof( ((struct ieee80211_device *)0)->pHTInfo ) *__mptr = &pHTInfo;
- //printk("pHTinfo:%p, &pHTinfo:%p, mptr:%p, offsetof:%x\n", pHTInfo, &pHTInfo, __mptr, offsetof(struct ieee80211_device, pHTInfo));
- //printk("===>ieee:%p,\n", ieee);
// ShortGI support
pHTInfo->bRegShortGI20MHz= 1;
pHTInfo->bRegShortGI40MHz= 1;
@@ -291,7 +302,6 @@ u16 HTMcsToDataRate( struct ieee80211_device* ieee, u8 nMcsRate)
* *****************************************************************************************************************/
u16 TxCountToDataRate( struct ieee80211_device* ieee, u8 nDataRate)
{
- //PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
u16 CCKOFDMRate[12] = {0x02 , 0x04 , 0x0b , 0x16 , 0x0c , 0x12 , 0x18 , 0x24 , 0x30 , 0x48 , 0x60 , 0x6c};
u8 is40MHz = 0;
u8 isShortGI = 0;
@@ -307,28 +317,24 @@ u16 TxCountToDataRate( struct ieee80211_device* ieee, u8 nDataRate)
is40MHz = 0;
isShortGI = 0;
- // nDataRate = nDataRate - 12;
}
else if(nDataRate >=0x20 && nDataRate <= 0x2f ) //(27, 44)
{
is40MHz = 1;
isShortGI = 0;
- //nDataRate = nDataRate - 28;
}
else if(nDataRate >= 0x30 && nDataRate <= 0x3f ) //(43, 60)
{
is40MHz = 0;
isShortGI = 1;
- //nDataRate = nDataRate - 44;
}
else if(nDataRate >= 0x40 && nDataRate <= 0x4f ) //(59, 76)
{
is40MHz = 1;
isShortGI = 1;
- //nDataRate = nDataRate - 60;
}
return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate&0xf];
}
@@ -351,7 +357,6 @@ bool IsHTHalfNmodeAPs(struct ieee80211_device* ieee)
else if((memcmp(net->bssid, UNKNOWN_BORADCOM, 3)==0) ||
(memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)||
(memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)||
- (memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3)==0) ||
(net->broadcom_cap_exist))
retValue = true;
else if(net->bssht.bdRT2RTAggregation)
@@ -379,13 +384,15 @@ void HTIOTPeerDetermine(struct ieee80211_device* ieee)
if(net->bssht.RT2RT_HT_Mode & RT_HT_CAP_USE_92SE){
pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK_92SE;
}
+ if(net->bssht.RT2RT_HT_Mode & RT_HT_CAP_USE_SOFTAP){
+ pHTInfo->IOTPeer = HT_IOT_PEER_92U_SOFTAP;
+ }
}
else if(net->broadcom_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
else if((memcmp(net->bssid, UNKNOWN_BORADCOM, 3)==0) ||
(memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)||
- (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)||
- (memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3)==0) )
+ (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0))
pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
else if((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3)==0) ||
(memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3)==0) ||
@@ -398,7 +405,7 @@ void HTIOTPeerDetermine(struct ieee80211_device* ieee)
(memcmp(net->bssid, DLINK_ATHEROS_1, 3) == 0)||
(memcmp(net->bssid, DLINK_ATHEROS_2, 3) == 0))
pHTInfo->IOTPeer = HT_IOT_PEER_ATHEROS;
- else if(memcmp(net->bssid, CISCO_BROADCOM, 3)==0)
+ else if ((memcmp(net->bssid, CISCO_BROADCOM, 3)==0)||net->cisco_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_CISCO;
else if ((memcmp(net->bssid, LINKSYS_MARVELL_4400N, 3) == 0) ||
net->marvell_cap_exist)
@@ -439,25 +446,6 @@ u8 HTIOTActIsDisableMCS14(struct ieee80211_device* ieee, u8* PeerMacAddr)
bool HTIOTActIsDisableMCS15(struct ieee80211_device* ieee)
{
bool retValue = false;
-
-#ifdef TODO
- // Apply for 819u only
-#if (HAL_CODE_BASE==RTL8192)
-
-#if (DEV_BUS_TYPE == USB_INTERFACE)
- // Alway disable MCS15 by Jerry Chang's request.by Emily, 2008.04.15
- retValue = true;
-#elif (DEV_BUS_TYPE == PCI_INTERFACE)
- // Enable MCS15 if the peer is Cisco AP. by Emily, 2008.05.12
-// if(pBssDesc->bCiscoCapExist)
-// retValue = false;
-// else
- retValue = false;
-#endif
-#endif
-#endif
- // Jerry Chang suggest that 8190 1x2 does not need to disable MCS15
-
return retValue;
}
@@ -624,17 +612,11 @@ HTIOCActRejcectADDBARequest(struct ieee80211_network *network)
HTIOTActIsEDCABiasRx(struct ieee80211_device* ieee,struct ieee80211_network *network)
{
u8 retValue = 0;
- //if(IS_HARDWARE_TYPE_8192SU(Adapter))
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
{
-//#if UNDER_VISTA
-// if(pBssDesc->Vender==HT_IOT_PEER_ATHEROS ||
-// pBssDesc->Vender==HT_IOT_PEER_RALINK)
-//#else
if(pHTInfo->IOTPeer==HT_IOT_PEER_ATHEROS ||
pHTInfo->IOTPeer==HT_IOT_PEER_BROADCOM ||
pHTInfo->IOTPeer==HT_IOT_PEER_RALINK)
-//#endif
return 1;
}
@@ -649,7 +631,6 @@ HTIOTActDisableShortGI(struct ieee80211_device* ieee,struct ieee80211_network *n
if(pHTInfo->IOTPeer==HT_IOT_PEER_RALINK)
{
- if(network->bssht.bdHT1R)
retValue = 1;
}
@@ -662,9 +643,10 @@ HTIOTActDisableHighPower(struct ieee80211_device* ieee,struct ieee80211_network
u8 retValue = 0;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- if(pHTInfo->IOTPeer==HT_IOT_PEER_RALINK)
+ if(pHTInfo->IOTPeer==HT_IOT_PEER_RALINK ||
+ pHTInfo->IOTPeer==HT_IOT_PEER_REALTEK ||
+ pHTInfo->IOTPeer==HT_IOT_PEER_REALTEK_92SE)
{
- if(network->bssht.bdHT1R)
retValue = 1;
}
@@ -718,8 +700,7 @@ HTIOTActIsTxNoAggregation(struct ieee80211_device* ieee,struct ieee80211_network
(KEY_TYPE_WEP40 == ieee->group_key_type) ||
(KEY_TYPE_TKIP == ieee->pairwise_key_type) )
{
- if(pHTInfo->IOTPeer==HT_IOT_PEER_REALTEK ||
- pHTInfo->IOTPeer==HT_IOT_PEER_UNKNOWN)
+ if(pHTInfo->IOTPeer==HT_IOT_PEER_REALTEK)
retValue = 1;
}
@@ -751,10 +732,11 @@ bool HTIOCActAllowPeerAggOnePacket(struct ieee80211_device* ieee,struct ieee8021
{
bool retValue = false;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
+ if(pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM)
{
- if(pHTInfo->IOTPeer == HT_IOT_PEER_MARVELL)
+ if((memcmp(network->bssid, NETGEAR_BROADCOM, 3)==0)
+ && (network->bssht.bdBandWidth == HT_CHANNEL_WIDTH_20_40))
return true;
-
}
return retValue;
}
@@ -783,7 +765,6 @@ void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u
{
PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo;
PHT_CAPABILITY_ELE pCapELE = NULL;
- //u8 bIsDeclareMCS13;
if ((posHTCap == NULL) || (pHT == NULL))
{
@@ -813,13 +794,11 @@ void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u
pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0);
}
-// pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0);
pCapELE->MimoPwrSave = pHT->SelfMimoPs;
pCapELE->GreenField = 0; // This feature is not supported now!!
pCapELE->ShortGI20Mhz = 1; // We can receive Short GI!!
pCapELE->ShortGI40Mhz = 1; // We can receive Short GI!!
- //DbgPrint("TX HT cap/info ele BW=%d SG20=%d SG40=%d\n\r",
- //pCapELE->ChlWidth, pCapELE->ShortGI20Mhz, pCapELE->ShortGI40Mhz);
+
pCapELE->TxSTBC = 1;
pCapELE->RxSTBC = 0;
pCapELE->DelayBA = 0; // Do not support now!!
@@ -879,12 +858,6 @@ void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u
else
*len = 26 + 2;
-
-
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_HT, posHTCap, *len -2);
-
- //Print each field in detail. Driver should not print out this message by default
-// HTDebugHTCapability(posHTCap, (u8*)"HTConstructCapability()");
return;
}
@@ -938,8 +911,6 @@ void HTConstructInfoElement(struct ieee80211_device* ieee, u8* posHTInfo, u8* le
//STA should not generate High Throughput Information Element
*len = 0;
}
- //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_HT, posHTInfo, *len - 2);
- //HTDebugHTInfo(posHTInfo, "HTConstructInforElement");
return;
}
@@ -1005,7 +976,6 @@ void HTConstructRT2RTAggElement(struct ieee80211_device* ieee, u8* posRT2RTAgg,
*/
#else
- // Do Nothing
#endif
posRT2RTAgg->Length = 6;
@@ -1188,12 +1158,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
return;
}
IEEE80211_DEBUG(IEEE80211_DL_HT, "===> HTOnAssocRsp_wq(): HT_ENABLE\n");
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, pHTInfo->PeerHTCapBuf, sizeof(HT_CAPABILITY_ELE));
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, pHTInfo->PeerHTInfoBuf, sizeof(HT_INFORMATION_ELE));
-// HTDebugHTCapability(pHTInfo->PeerHTCapBuf,"HTOnAssocRsp_wq");
-// HTDebugHTInfo(pHTInfo->PeerHTInfoBuf,"HTOnAssocRsp_wq");
- //
if(!memcmp(pHTInfo->PeerHTCapBuf,EWC11NHTCap, sizeof(EWC11NHTCap)))
pPeerHTCap = (PHT_CAPABILITY_ELE)(&pHTInfo->PeerHTCapBuf[4]);
else
@@ -1209,12 +1174,10 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
// Configurations:
////////////////////////////////////////////////////////
IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_HT, pPeerHTCap, sizeof(HT_CAPABILITY_ELE));
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_HT, pPeerHTInfo, sizeof(HT_INFORMATION_ELE));
- // Config Supported Channel Width setting
- //
+
HTSetConnectBwMode(ieee, (HT_CHANNEL_WIDTH)(pPeerHTCap->ChlWidth), (HT_EXTCHNL_OFFSET)(pPeerHTInfo->ExtChlOffset));
-// if(pHTInfo->bCurBW40MHz == true)
+ if(pHTInfo->bCurBW40MHz == true)
pHTInfo->bCurTxBW40MHz = ((pPeerHTInfo->RecommemdedTxWidth == 1)?true:false);
//
@@ -1295,7 +1258,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
// <2> Set AMPDU Minimum MPDU Start Spacing
// 802.11n 3.0 section 9.7d.3
-#if 1
+#if 0
if(pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
else
diff --git a/drivers/staging/rtl8192su/ieee80211/rtl819x_Qos.h b/drivers/staging/rtl8192su/ieee80211/rtl819x_Qos.h
index d4565ecc7ab..928062f3571 100644
--- a/drivers/staging/rtl8192su/ieee80211/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192su/ieee80211/rtl819x_Qos.h
@@ -1,3 +1,21 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
#ifndef __INC_QOS_TYPE_H
#define __INC_QOS_TYPE_H
@@ -36,18 +54,6 @@
#define MAX_WMMELE_LENGTH 64
-//
-// QoS mode.
-// enum 0, 1, 2, 4: since we can use the OR(|) operation.
-//
-// QOS_MODE is redefined for enum can't be ++, | under C++ compiler, 2006.05.17, by rcnjko.
-//typedef enum _QOS_MODE{
-// QOS_DISABLE = 0,
-// QOS_WMM = 1,
-// QOS_EDCA = 2,
-// QOS_HCCA = 4,
-//}QOS_MODE,*PQOS_MODE;
-//
typedef u32 QOS_MODE, *PQOS_MODE;
#define QOS_DISABLE 0
#define QOS_WMM 1
@@ -219,19 +225,6 @@ typedef union _QOS_INFO_FIELD{
}QOS_INFO_FIELD, *PQOS_INFO_FIELD;
-//
-// ACI to AC coding.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.13.
-//
-// AC_CODING is redefined for enum can't be ++, | under C++ compiler, 2006.05.17, by rcnjko.
-//typedef enum _AC_CODING{
-// AC0_BE = 0, // ACI: 0x00 // Best Effort
-// AC1_BK = 1, // ACI: 0x01 // Background
-// AC2_VI = 2, // ACI: 0x10 // Video
-// AC3_VO = 3, // ACI: 0x11 // Voice
-// AC_MAX = 4, // Max: define total number; Should not to be used as a real enum.
-//}AC_CODING,*PAC_CODING;
-//
typedef u32 AC_CODING;
#define AC0_BE 0 // ACI: 0x00 // Best Effort
#define AC1_BK 1 // ACI: 0x01 // Background
@@ -252,7 +245,7 @@ typedef union _ACI_AIFSN{
u8 ACM:1;
u8 ACI:2;
u8 Reserved:1;
- }f; // Field
+ }f;
}ACI_AIFSN, *PACI_AIFSN;
//
@@ -265,7 +258,7 @@ typedef union _ECW{
{
u8 ECWmin:4;
u8 ECWmax:4;
- }f; // Field
+ }f;
}ECW, *PECW;
//
@@ -281,7 +274,7 @@ typedef union _AC_PARAM{
ACI_AIFSN AciAifsn;
ECW Ecw;
u16 TXOPLimit;
- }f; // Field
+ }f;
}AC_PARAM, *PAC_PARAM;
@@ -354,7 +347,7 @@ typedef union _TSPEC_BODY{
u32 MinPhyRate;
u16 SurplusBandwidthAllowance;
u16 MediumTime;
- } f; // Field
+ } f;
}TSPEC_BODY, *PTSPEC_BODY;
@@ -384,7 +377,6 @@ typedef enum _ACM_METHOD{
typedef struct _ACM{
-// u8 RegEnableACM;
u64 UsedTime;
u64 MediumTime;
u8 HwAcmCtl; // TRUE: UsedTime exceed => Do NOT USE this AC. It wll be written to ACM_CONTROL(0xBF BIT 0/1/2 in 8185B).
@@ -404,10 +396,6 @@ typedef u8 AC_UAPSD, *PAC_UAPSD;
#define GET_BE_UAPSD(_apsd) ((_apsd) & BIT3)
#define SET_BE_UAPSD(_apsd) ((_apsd) |= BIT3)
-
-//typedef struct _TCLASS{
-// TODO
-//} TCLASS, *PTCLASS;
typedef union _QOS_TCLAS{
struct _TYPE_GENERAL{
@@ -459,32 +447,12 @@ typedef union _QOS_TCLAS{
} TYPE2_8021Q;
} QOS_TCLAS, *PQOS_TCLAS;
-//typedef struct _WMM_TSTREAM{
-//
-//- TSPEC
-//- AC (which to mapping)
-//} WMM_TSTREAM, *PWMM_TSTREAM;
typedef struct _QOS_TSTREAM{
u8 AC;
WMM_TSPEC TSpec;
QOS_TCLAS TClass;
} QOS_TSTREAM, *PQOS_TSTREAM;
-//typedef struct _U_APSD{
-//- TriggerEnable [4]
-//- MaxSPLength
-//- HighestAcBuffered
-//} U_APSD, *PU_APSD;
-
-//joseph TODO:
-// UAPSD function should be implemented by 2 data structure
-// "Qos control field" and "Qos info field"
-//typedef struct _QOS_UAPSD{
-// u8 bTriggerEnable[4];
-// u8 MaxSPLength;
-// u8 HighestBufAC;
-//} QOS_UAPSD, *PQOS_APSD;
-
//----------------------------------------------------------------------------
// 802.11 Management frame Status Code field
//----------------------------------------------------------------------------
@@ -498,7 +466,6 @@ typedef struct _OCTET_STRING{
// Ref: DOT11_QOS in 8185 code. [def. in QoS_mp.h]
//
typedef struct _STA_QOS{
- //DECLARE_RT_OBJECT(STA_QOS);
u8 WMMIEBuf[MAX_WMMELE_LENGTH];
u8* WMMIE;
@@ -565,18 +532,9 @@ typedef struct _BSS_QOS{
AC_PARAM AcParameter[4];
}BSS_QOS, *PBSS_QOS;
-
-//
-// Ref: sQoSCtlLng and QoSCtl definition in 8185 QoS code.
-//#define QoSCtl (( (Adapter->bRegQoS) && (Adapter->dot11QoS.QoSMode &(QOS_EDCA|QOS_HCCA)) ) ?sQoSCtlLng:0)
-//
#define sQoSCtlLng 2
#define QOS_CTRL_LEN(_QosMode) ((_QosMode > QOS_DISABLE)? sQoSCtlLng : 0)
-
-//Added by joseph
-//UP Mapping to AC, using in MgntQuery_SequenceNumber() and maybe for DSCP
-//#define UP2AC(up) ((up<3)?((up==0)?1:0):(up>>1))
#define IsACValid(ac) ((ac<=7 )?true:false )
-#endif // #ifndef __INC_QOS_TYPE_H
+#endif
diff --git a/drivers/staging/rtl8192su/ieee80211/rtl819x_TS.h b/drivers/staging/rtl8192su/ieee80211/rtl819x_TS.h
index baaac2149de..a07b2344a6f 100644
--- a/drivers/staging/rtl8192su/ieee80211/rtl819x_TS.h
+++ b/drivers/staging/rtl8192su/ieee80211/rtl819x_TS.h
@@ -1,3 +1,21 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
#ifndef _TSTYPE_H_
#define _TSTYPE_H_
#include "rtl819x_Qos.h"
@@ -30,10 +48,10 @@ typedef struct _TX_TS_RECORD{
u16 TxCurSeq;
BA_RECORD TxPendingBARecord; // For BA Originator
BA_RECORD TxAdmittedBARecord; // For BA Originator
-// QOS_DL_RECORD DLRecord;
u8 bAddBaReqInProgress;
u8 bAddBaReqDelayed;
u8 bUsingBa;
+ u8 bDisable_AddBa;
struct timer_list TsAddBaTimer;
u8 num;
} TX_TS_RECORD, *PTX_TS_RECORD;
@@ -48,9 +66,6 @@ typedef struct _RX_TS_RECORD {
u16 RxLastSeqNum;
u8 RxLastFragNum;
u8 num;
-// QOS_DL_RECORD DLRecord;
} RX_TS_RECORD, *PRX_TS_RECORD;
-
#endif
-
diff --git a/drivers/staging/rtl8192su/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192su/ieee80211/rtl819x_TSProc.c
index de143ecae5f..7ffc06ca89a 100644
--- a/drivers/staging/rtl8192su/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192su/ieee80211/rtl819x_TSProc.c
@@ -1,3 +1,21 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
#include "ieee80211.h"
#include <linux/etherdevice.h>
#include <linux/slab.h>
@@ -29,7 +47,6 @@ void RxPktPendingTimeout(unsigned long data)
PRX_REORDER_ENTRY pReorderEntry = NULL;
- //u32 flags = 0;
unsigned long flags = 0;
struct ieee80211_rxb *stats_IndicateArray[REORDER_WIN_SIZE];
u8 index = 0;
@@ -37,7 +54,6 @@ void RxPktPendingTimeout(unsigned long data)
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
- //PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK);
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"==================>%s()\n",__FUNCTION__);
if(pRxTs->RxTimeoutIndicateSeq != 0xffff)
{
@@ -72,7 +88,6 @@ void RxPktPendingTimeout(unsigned long data)
if(index>0)
{
- // Set RxTimeoutIndicateSeq to 0xffff to indicate no pending packets in buffer now.
pRxTs->RxTimeoutIndicateSeq = 0xffff;
// Indicate packets
@@ -82,6 +97,7 @@ void RxPktPendingTimeout(unsigned long data)
return;
}
ieee80211_indicate_packets(ieee, stats_IndicateArray, index);
+ bPktInBuf = false;
}
if(bPktInBuf && (pRxTs->RxTimeoutIndicateSeq==0xffff))
@@ -126,6 +142,7 @@ void ResetTxTsEntry(PTX_TS_RECORD pTS)
pTS->bAddBaReqInProgress = false;
pTS->bAddBaReqDelayed = false;
pTS->bUsingBa = false;
+ pTS->bDisable_AddBa = false;
ResetBaEntry(&pTS->TxAdmittedBARecord); //For BA Originator
ResetBaEntry(&pTS->TxPendingBARecord);
}
@@ -212,7 +229,6 @@ void TSInitialize(struct ieee80211_device *ieee)
}
// Initialize unused Rx Reorder List.
INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
-//#ifdef TO_DO_LIST
for(count = 0; count < REORDER_ENTRY_NUM; count++)
{
list_add_tail( &pRxReorderEntry->List,&ieee->RxReorder_Unused_List);
@@ -220,7 +236,6 @@ void TSInitialize(struct ieee80211_device *ieee)
break;
pRxReorderEntry = &ieee->RxReorderEntry[count+1];
}
-//#endif
}
@@ -236,7 +251,6 @@ void AdmitTS(struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, u32 I
PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8* Addr, u8 TID, TR_SELECT TxRxSelect)
{
- //DIRECTION_VALUE dir;
u8 dir;
bool search_dir[4] = {0, 0, 0, 0};
struct list_head* psearch_list; //FIXME
@@ -282,18 +296,15 @@ PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8* Addr, u8
else
psearch_list = &ieee->Rx_TS_Admit_List;
- //for(dir = DIR_UP; dir <= DIR_BI_DIR; dir++)
for(dir = 0; dir <= DIR_BI_DIR; dir++)
{
if(search_dir[dir] ==false )
continue;
list_for_each_entry(pRet, psearch_list, List){
- // IEEE80211_DEBUG(IEEE80211_DL_TS, "ADD:%pM, TID:%d, dir:%d\n", pRet->Addr, pRet->TSpec.f.TSInfo.field.ucTSID, pRet->TSpec.f.TSInfo.field.ucDirection);
if (memcmp(pRet->Addr, Addr, 6) == 0)
if (pRet->TSpec.f.TSInfo.field.ucTSID == TID)
if(pRet->TSpec.f.TSInfo.field.ucDirection == dir)
{
- // printk("Bingo! got it\n");
break;
}
@@ -352,10 +363,9 @@ bool GetTs(
//
if(is_broadcast_ether_addr(Addr) || is_multicast_ether_addr(Addr))
{
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "get TS for Broadcast or Multicast\n");
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, "ERR! get TS for Broadcast or Multicast\n");
return false;
}
-
if (ieee->current_network.qos_data.supported == 0)
UP = 0;
else
@@ -363,7 +373,7 @@ bool GetTs(
// In WMM case: we use 4 TID only
if (!IsACValid(TID))
{
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " in %s(), TID(%d) is not valid\n", __FUNCTION__, TID);
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, "ERR! in %s(), TID(%d) is not valid\n", __FUNCTION__, TID);
return false;
}
@@ -478,7 +488,6 @@ void RemoveTsEntry(
TR_SELECT TxRxSelect
)
{
- //u32 flags = 0;
unsigned long flags = 0;
del_timer_sync(&pTs->SetupTimer);
del_timer_sync(&pTs->InactTimer);
@@ -486,7 +495,6 @@ void RemoveTsEntry(
if(TxRxSelect == RX_DIR)
{
-//#ifdef TO_DO_LIST
PRX_REORDER_ENTRY pRxReorderEntry;
PRX_TS_RECORD pRxTS = (PRX_TS_RECORD)pTs;
if(timer_pending(&pRxTS->RxPktPendingTimer))
@@ -494,9 +502,7 @@ void RemoveTsEntry(
while(!list_empty(&pRxTS->RxPendingPktList))
{
- // PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK);
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
- //pRxReorderEntry = list_entry(&pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
pRxReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
list_del_init(&pRxReorderEntry->List);
{
@@ -514,11 +520,8 @@ void RemoveTsEntry(
prxb = NULL;
}
list_add_tail(&pRxReorderEntry->List,&ieee->RxReorder_Unused_List);
- //PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
-
-//#endif
}
else
{
diff --git a/drivers/staging/rtl8192su/r8192SU_HWImg.c b/drivers/staging/rtl8192su/r8192SU_HWImg.c
index ba8e12c209c..7c4fd18d89c 100644
--- a/drivers/staging/rtl8192su/r8192SU_HWImg.c
+++ b/drivers/staging/rtl8192su/r8192SU_HWImg.c
@@ -93,7 +93,7 @@ u32 Rtl8192SUPHY_REG_2T2RArray[PHY_REG_2T2RArrayLength] = {
0x900,0x00000000,
0x904,0x00000023,
0x908,0x00000000,
-0x90c,0x03321333,
+0x90c,0x01121313,
0xa00,0x00d047c8,
0xa04,0x80ff0008,
0xa08,0x8ccd8300,
@@ -135,7 +135,7 @@ u32 Rtl8192SUPHY_REG_2T2RArray[PHY_REG_2T2RArrayLength] = {
0xc68,0x69543420,
0xc6c,0x433c0094,
0xc70,0x2c7f000d,
-0xc74,0x0186155b,
+0xc74,0x0186175b,
0xc78,0x0000001f,
0xc7c,0x00b91612,
0xc80,0x40000100,
@@ -256,13 +256,34 @@ u32 Rtl8192SUPHY_ChangeTo_2T2RArray[PHY_ChangeTo_2T2RArrayLength] = {
};
u32 Rtl8192SUPHY_REG_Array_PG[PHY_REG_Array_PGLength] = {
-0xe00,0xffffffff,0x06090909,
-0xe04,0xffffffff,0x00030406,
+0xe00,0xffffffff,0x04060606,
+0xe04,0xffffffff,0x00020204,
0xe08,0x0000ff00,0x00000000,
-0xe10,0xffffffff,0x0a0c0d0e,
-0xe14,0xffffffff,0x04070809,
-0xe18,0xffffffff,0x0a0c0d0e,
-0xe1c,0xffffffff,0x04070809,
+0xe10,0xffffffff,0x0408080a,
+0xe14,0xffffffff,0x00020204,
+0xe18,0xffffffff,0x0408080a,
+0xe1c,0xffffffff,0x00020204,
+0xe00,0xffffffff,0x00000000,
+0xe04,0xffffffff,0x00000000,
+0xe08,0x0000ff00,0x00000000,
+0xe10,0xffffffff,0x00000000,
+0xe14,0xffffffff,0x00000000,
+0xe18,0xffffffff,0x00000000,
+0xe1c,0xffffffff,0x00000000,
+0xe00,0xffffffff,0x00000000,
+0xe04,0xffffffff,0x00000000,
+0xe08,0x0000ff00,0x00000000,
+0xe10,0xffffffff,0x00000000,
+0xe14,0xffffffff,0x00000000,
+0xe18,0xffffffff,0x00000000,
+0xe1c,0xffffffff,0x00000000,
+0xe00,0xffffffff,0x00000000,
+0xe04,0xffffffff,0x00000000,
+0xe08,0x0000ff00,0x00000000,
+0xe10,0xffffffff,0x00000000,
+0xe14,0xffffffff,0x00000000,
+0xe18,0xffffffff,0x00000000,
+0xe1c,0xffffffff,0x00000000,
};
u32 Rtl8192SURadioA_1T_Array[RadioA_1T_ArrayLength] = {
diff --git a/drivers/staging/rtl8192su/r8192SU_HWImg.h b/drivers/staging/rtl8192su/r8192SU_HWImg.h
index 36e84aff6ed..69a66c39960 100644
--- a/drivers/staging/rtl8192su/r8192SU_HWImg.h
+++ b/drivers/staging/rtl8192su/r8192SU_HWImg.h
@@ -1,3 +1,21 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
#ifndef __INC_HAL8192SU_FW_IMG_H
#define __INC_HAL8192SU_FW_IMG_H
@@ -19,7 +37,7 @@ extern u32 Rtl8192SUPHY_ChangeTo_1T1RArray[PHY_ChangeTo_1T1RArrayLength];
extern u32 Rtl8192SUPHY_ChangeTo_1T2RArray[PHY_ChangeTo_1T2RArrayLength];
#define PHY_ChangeTo_2T2RArrayLength 45
extern u32 Rtl8192SUPHY_ChangeTo_2T2RArray[PHY_ChangeTo_2T2RArrayLength];
-#define PHY_REG_Array_PGLength 21
+#define PHY_REG_Array_PGLength 84
extern u32 Rtl8192SUPHY_REG_Array_PG[PHY_REG_Array_PGLength];
#define RadioA_1T_ArrayLength 202
extern u32 Rtl8192SURadioA_1T_Array[RadioA_1T_ArrayLength];
@@ -38,5 +56,5 @@ extern u32 Rtl8192SUMACPHY_Array_PG[MACPHY_Array_PGLength];
#define AGCTAB_ArrayLength 320
extern u32 Rtl8192SUAGCTAB_Array[AGCTAB_ArrayLength];
-#endif //__INC_HAL8192SU_FW_IMG_H
+#endif
diff --git a/drivers/staging/rtl8192su/r8192SU_led.c b/drivers/staging/rtl8192su/r8192SU_led.c
index 609dba67eb4..5d96b356bf1 100644
--- a/drivers/staging/rtl8192su/r8192SU_led.c
+++ b/drivers/staging/rtl8192su/r8192SU_led.c
@@ -1087,22 +1087,13 @@ BlinkTimerCallback(
struct net_device *dev = (struct net_device *)data;
struct r8192_priv *priv = ieee80211_priv(dev);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
schedule_work(&(priv->BlinkWorkItem));
-#endif
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
void BlinkWorkItemCallback(struct work_struct *work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, BlinkWorkItem);
-#else
-void BlinkWorkItemCallback(void * Context)
-{
- struct net_device *dev = (struct net_device *)Context;
- struct r8192_priv *priv = ieee80211_priv(dev);
-#endif
PLED_819xUsb pLed = priv->pLed;
diff --git a/drivers/staging/rtl8192su/r8192S_Efuse.c b/drivers/staging/rtl8192su/r8192S_Efuse.c
index f0ce6562c23..bbefd0f3034 100644
--- a/drivers/staging/rtl8192su/r8192S_Efuse.c
+++ b/drivers/staging/rtl8192su/r8192S_Efuse.c
@@ -1,27 +1,26 @@
/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
*
- * Module: Efuse.c ( Source C File)
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
*
- * Note: Copy from WMAC for the first version!!!!
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
*
- * Function:
- *
- * Export:
- *
- * Abbrev:
- *
- * History:
- * Data Who Remark
- *
- * 09/23/2008 MHC Porting Efuse R/W API from WMAC.
- * 11/10/2008 MHC 1. Porting from 8712 EFUSE.
- * 2. Add description and reorganize code arch.
- * 11/16/2008 MHC 1. Reorganize code architecture.
- * 2. Rename for some API and change extern or static type.
- *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
******************************************************************************/
#include "r8192U.h"
#include "r8192S_hw.h"
@@ -30,20 +29,14 @@
#include "r8192S_Efuse.h"
#include <linux/types.h>
+#include <linux/ctype.h>
-//typedef int INT32;
-//
-// In the future, we will always support EFUSE!!
-//
-/*---------------------------Define Local Constant---------------------------*/
#define _POWERON_DELAY_
#define _PRE_EXECUTE_READ_CMD_
#define EFUSE_REPEAT_THRESHOLD_ 3
#define EFUSE_ERROE_HANDLE 1
-
-// From 8712!!!!!
typedef struct _EFUSE_MAP_A{
u8 offset; //0~15
u8 word_start; //0~3
@@ -91,14 +84,11 @@ struct efuse_priv
u8 tx_power_g[14];
};
-/*---------------------------Define Local Constant---------------------------*/
-
-
-/*------------------------Define global variable-----------------------------*/
const u8 MAX_PGPKT_SIZE = 9; //header+ 2* 4 words (BYTES)
const u8 PGPKT_DATA_SIZE = 8; //BYTES sizeof(u8)*8
const u32 EFUSE_MAX_SIZE = 512;
+const u8 EFUSE_OOB_PROTECT_BYTES = 14;
const EFUSE_MAP RTL8712_SDIO_EFUSE_TABLE[]={
//offset word_s byte_start byte_cnts
@@ -117,15 +107,6 @@ const EFUSE_MAP RTL8712_SDIO_EFUSE_TABLE[]={
/*TxPwIndex */ {11 ,0 ,0 ,28 } // 58~73h 3...4
};
-/*------------------------Define global variable-----------------------------*/
-
-
-/*------------------------Define local variable------------------------------*/
-
-/*------------------------Define local variable------------------------------*/
-
-
-/*--------------------Define function prototype-----------------------*/
//
// From WMAC Efuse one byte R/W
//
@@ -176,7 +157,7 @@ efuse_ParsingMap(char* szStr,u32* pu4bVal,u32* pu4bMove);
//
static u8
efuse_PgPacketRead( struct net_device* dev,u8 offset,u8 *data);
-static u8
+static u32
efuse_PgPacketWrite(struct net_device* dev,u8 offset,u8 word_en,u8 *data);
static void
efuse_WordEnableDataRead( u8 word_en,u8 *sourdata,u8 *targetdata);
@@ -194,7 +175,6 @@ efuse_CalculateWordCnts(u8 word_en);
#ifdef TO_DO_LIST
static void efuse_reg_ctrl(struct net_device* dev, u8 bPowerOn);
#endif
-/*--------------------Define function prototype-----------------------*/
@@ -242,8 +222,7 @@ EFUSE_Initialize(struct net_device* dev)
//Set E-fuse program time & read time : 0x30[30:24]=1110010b
write_nic_byte(dev, EFUSE_CTRL+3, 0x72);
-} /* EFUSE_Initialize */
-
+}
/*-----------------------------------------------------------------------------
* Function: EFUSE_Read1Byte
@@ -302,7 +281,7 @@ EFUSE_Read1Byte(struct net_device* dev, u16 Address)
else
return 0xFF;
-} /* EFUSE_Read1Byte */
+}
/*-----------------------------------------------------------------------------
@@ -324,13 +303,10 @@ EFUSE_Read1Byte(struct net_device* dev, u16 Address)
extern void
EFUSE_Write1Byte(struct net_device* dev, u16 Address,u8 Value)
{
- //u8 data;
u8 Bytetemp = {0x00};
u8 temp = {0x00};
u32 k=0;
- //RT_TRACE(COMP_EFUSE, "Addr=%x Data =%x\n", Address, Value);
-
if( Address < EFUSE_MAC_LEN) //E-fuse 512Byte
{
write_nic_byte(dev, EFUSE_CTRL, Value);
@@ -349,7 +325,6 @@ EFUSE_Write1Byte(struct net_device* dev, u16 Address,u8 Value)
temp = Bytetemp | 0x80;
write_nic_byte(dev, EFUSE_CTRL+3, temp);
- //Wait Write-ready (0x30[31]=0)
Bytetemp = read_nic_byte(dev, EFUSE_CTRL+3);
while(Bytetemp & 0x80)
{
@@ -363,8 +338,7 @@ EFUSE_Write1Byte(struct net_device* dev, u16 Address,u8 Value)
}
}
-} /* EFUSE_Write1Byte */
-
+}
#ifdef EFUSE_FOR_92SU
//
@@ -380,12 +354,10 @@ EFUSE_Write1Byte(struct net_device* dev, u16 Address,u8 Value)
//
void do_93c46(struct net_device* dev, u8 addorvalue)
{
- //u8 clear[1] = {0x0}; // cs=0 , sk=0 , di=0 , do=0
u8 cs[1] = {0x88}; // cs=1 , sk=0 , di=0 , do=0
u8 cssk[1] = {0x8c}; // cs=1 , sk=1 , di=0 , do=0
u8 csdi[1] = {0x8a}; // cs=1 , sk=0 , di=1 , do=0
u8 csskdi[1] = {0x8e}; // cs=1 , sk=1 , di=1 , do=0
- //u8 di[1] = {0x82}; // cs=0 , sk=0 , di=1 , do=0
u8 count;
for(count=0 ; count<8 ; count++)
@@ -424,7 +396,6 @@ u16 Read93C46(struct net_device* dev, u16 Reg )
u8 cssk[1] = {0x8c}; // cs=1 , sk=1 , di=0 , do=0
u8 csdi[1] = {0x8a}; // cs=1 , sk=0 , di=1 , do=0
u8 csskdi[1] = {0x8e}; // cs=1 , sk=1 , di=1 , do=0
- //u8 di[1] = {0x82}; // cs=0 , sk=0 , di=1 , do=0
u8 EepromSEL[1]={0x00};
u8 address;
@@ -434,7 +405,6 @@ u16 Read93C46(struct net_device* dev, u16 Reg )
address = (u8)Reg;
- // Suggested by SD1 Alex, 2008.10.20. Revised by Roger.
*EepromSEL= read_nic_byte(dev, EPROM_CMD);
if((*EepromSEL & 0x10) == 0x10) // select 93c46
@@ -486,13 +456,10 @@ u16 Read93C46(struct net_device* dev, u16 Reg )
void
ReadEFuseByte(struct net_device* dev,u16 _offset, u8 *pbuf)
{
-
- //u16 indexk=0;
u32 value32;
u8 readbyte;
u16 retry;
-
//Write Address
write_nic_byte(dev, EFUSE_CTRL+1, (_offset & 0xff));
readbyte = read_nic_byte(dev, EFUSE_CTRL+2);
@@ -505,7 +472,6 @@ ReadEFuseByte(struct net_device* dev,u16 _offset, u8 *pbuf)
//Check bit 32 read-ready
retry = 0;
value32 = read_nic_dword(dev, EFUSE_CTRL);
- //while(!(((value32 >> 24) & 0xff) & 0x80) && (retry<10))
while(!(((value32 >> 24) & 0xff) & 0x80) && (retry<10000))
{
value32 = read_nic_dword(dev, EFUSE_CTRL);
@@ -532,78 +498,145 @@ void
ReadEFuse(struct net_device* dev, u16 _offset, u16 _size_byte, u8 *pbuf)
{
- u8 efuseTbl[128];
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ u8 efuseTbl[EFUSE_MAP_LEN];
u8 rtemp8[1];
u16 eFuse_Addr = 0;
u8 offset, wren;
u16 i, j;
- u16 eFuseWord[16][4];// = {0xFF};//FIXLZM
-
- for(i=0; i<16; i++)
- for(j=0; j<4; j++)
- eFuseWord[i][j]=0xFF;
+ u16 eFuseWord[EFUSE_MAX_SECTION][EFUSE_MAX_WORD_UNIT];
+ u16 efuse_utilized = 0;
+ u16 efuse_usage = 0;
- // Do NOT excess total size of EFuse table. Added by Roger, 2008.11.10.
- if((_offset + _size_byte)>128)
- {// total E-Fuse table is 128bytes
- //RT_TRACE(COMP_EFUSE, "ReadEFuse(): Invalid offset(%#x) with read bytes(%#x)!!\n",_offset, _size_byte);
+ if((_offset + _size_byte)>EFUSE_MAP_LEN)
+ {
printk("ReadEFuse(): Invalid offset with read bytes!!\n");
return;
}
- // Refresh efuse init map as all oxFF.
- for (i = 0; i < 128; i++)
- efuseTbl[i] = 0xFF;
+ for(i = 0; i < EFUSE_MAX_SECTION; i++)
+ for(j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
+ eFuseWord[i][j]=0xFFFF;
#if (EFUSE_READ_SWITCH == 1)
ReadEFuseByte(dev, eFuse_Addr, rtemp8);
#else
rtemp8[0] = EFUSE_Read1Byte(dev, eFuse_Addr);
#endif
- if(*rtemp8 != 0xFF) eFuse_Addr++;
- while((*rtemp8 != 0xFF) && (eFuse_Addr < 512)){
+ if(*rtemp8 != 0xFF){
+ efuse_utilized++;
+ RT_TRACE(COMP_EPROM, "Addr=%d\n", eFuse_Addr);
+ eFuse_Addr++;
+ }
+
+ while((*rtemp8 != 0xFF) && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN))
+ {
offset = ((*rtemp8 >> 4) & 0x0f);
- if(offset <= 0x0F){
+ if(offset < EFUSE_MAX_SECTION)
+ {
wren = (*rtemp8 & 0x0f);
- for(i=0; i<4; i++){
- if(!(wren & 0x01)){
+ RT_TRACE(COMP_EPROM, "Offset-%d Worden=%x\n", offset, wren);
+
+ for(i = 0; i < EFUSE_MAX_WORD_UNIT; i++)
+ {
+ if(!(wren & 0x01))
+ {
+ RT_TRACE(COMP_EPROM, "Addr=%d\n", eFuse_Addr);
#if (EFUSE_READ_SWITCH == 1)
ReadEFuseByte(dev, eFuse_Addr, rtemp8); eFuse_Addr++;
#else
rtemp8[0] = EFUSE_Read1Byte(dev, eFuse_Addr); eFuse_Addr++;
#endif
+ efuse_utilized++;
eFuseWord[offset][i] = (*rtemp8 & 0xff);
- if(eFuse_Addr >= 512) break;
+ if(eFuse_Addr >= EFUSE_REAL_CONTENT_LEN)
+ break;
+
+ RT_TRACE(COMP_EPROM, "Addr=%d\n", eFuse_Addr);
#if (EFUSE_READ_SWITCH == 1)
ReadEFuseByte(dev, eFuse_Addr, rtemp8); eFuse_Addr++;
#else
rtemp8[0] = EFUSE_Read1Byte(dev, eFuse_Addr); eFuse_Addr++;
#endif
+ efuse_utilized++;
eFuseWord[offset][i] |= (((u16)*rtemp8 << 8) & 0xff00);
- if(eFuse_Addr >= 512) break;
+ if(eFuse_Addr >= EFUSE_REAL_CONTENT_LEN)
+ break;
}
wren >>= 1;
}
}
+
+ RT_TRACE(COMP_EPROM, "Addr=%d\n", eFuse_Addr);
#if (EFUSE_READ_SWITCH == 1)
ReadEFuseByte(dev, eFuse_Addr, rtemp8);
#else
rtemp8[0] = EFUSE_Read1Byte(dev, eFuse_Addr); eFuse_Addr++;
#endif
- if(*rtemp8 != 0xFF && (eFuse_Addr < 512)) eFuse_Addr++;
+ if(*rtemp8 != 0xFF && (eFuse_Addr < 512))
+ {
+ efuse_utilized++;
+ eFuse_Addr++;
+ }
}
- for(i=0; i<16; i++){
- for(j=0; j<4; j++){
+ for(i=0; i<EFUSE_MAX_SECTION; i++)
+ {
+ for(j=0; j<EFUSE_MAX_WORD_UNIT; j++)
+ {
efuseTbl[(i*8)+(j*2)]=(eFuseWord[i][j] & 0xff);
efuseTbl[(i*8)+((j*2)+1)]=((eFuseWord[i][j] >> 8) & 0xff);
}
}
for(i=0; i<_size_byte; i++)
pbuf[i] = efuseTbl[_offset+i];
+
+ efuse_usage = (u8)((efuse_utilized*100)/EFUSE_REAL_CONTENT_LEN);
+ priv->EfuseUsedBytes = efuse_utilized;
+ priv->EfuseUsedPercentage = (u8)efuse_usage;
}
-#endif // #if (EFUSE_FOR_92SU == 1)
+#endif
+
+extern bool
+EFUSE_ShadowUpdateChk(struct net_device* dev)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ u8 SectionIdx, i, Base;
+ u16 WordsNeed = 0, HdrNum = 0, TotalBytes = 0, EfuseUsed = 0;
+ bool bWordChanged, bResult = true;
+
+ for (SectionIdx = 0; SectionIdx < 16; SectionIdx++)
+ {
+ Base = SectionIdx * 8;
+ bWordChanged = false;
+
+ for (i = 0; i < 8; i=i+2)
+ {
+ if((priv->EfuseMap[EFUSE_INIT_MAP][Base+i] !=
+ priv->EfuseMap[EFUSE_MODIFY_MAP][Base+i]) ||
+ (priv->EfuseMap[EFUSE_INIT_MAP][Base+i+1] !=
+ priv->EfuseMap[EFUSE_MODIFY_MAP][Base+i+1]))
+ {
+ WordsNeed++;
+ bWordChanged = true;
+ }
+ }
+ if( bWordChanged==true )
+ HdrNum++;
+ }
+
+ TotalBytes = HdrNum + WordsNeed*2;
+ EfuseUsed = priv->EfuseUsedBytes;
+
+ if( (TotalBytes + EfuseUsed) >= (EFUSE_MAX_SIZE-EFUSE_OOB_PROTECT_BYTES))
+ bResult = true;
+
+ RT_TRACE(COMP_EPROM, "EFUSE_ShadowUpdateChk(): TotalBytes(%x), HdrNum(%x), WordsNeed(%x), EfuseUsed(%d)\n",
+ TotalBytes, HdrNum, WordsNeed, EfuseUsed);
+
+ return bResult;
+}
/*-----------------------------------------------------------------------------
* Function: EFUSE_ShadowRead
@@ -624,8 +657,6 @@ ReadEFuse(struct net_device* dev, u16 _offset, u16 _size_byte, u8 *pbuf)
extern void
EFUSE_ShadowRead( struct net_device* dev, u8 Type, u16 Offset, u32 *Value)
{
- //HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
-
if (Type == 1)
efuse_ShadowRead1Byte(dev, Offset, (u8 *)Value);
else if (Type == 2)
@@ -633,8 +664,7 @@ EFUSE_ShadowRead( struct net_device* dev, u8 Type, u16 Offset, u32 *Value)
else if (Type == 4)
efuse_ShadowRead4Byte(dev, Offset, (u32 *)Value);
-} // EFUSE_ShadowRead
-
+}
/*-----------------------------------------------------------------------------
* Function: EFUSE_ShadowWrite
@@ -655,8 +685,6 @@ EFUSE_ShadowRead( struct net_device* dev, u8 Type, u16 Offset, u32 *Value)
extern void
EFUSE_ShadowWrite( struct net_device* dev, u8 Type, u16 Offset,u32 Value)
{
- //HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
-
if (Offset >= 0x18 && Offset <= 0x1F)
return;
@@ -667,8 +695,7 @@ EFUSE_ShadowWrite( struct net_device* dev, u8 Type, u16 Offset,u32 Value)
else if (Type == 4)
efuse_ShadowWrite4Byte(dev, Offset, (u32)Value);
-} // EFUSE_ShadowWrite
-
+}
/*-----------------------------------------------------------------------------
* Function: EFUSE_ShadowUpdate
@@ -686,15 +713,25 @@ EFUSE_ShadowWrite( struct net_device* dev, u8 Type, u16 Offset,u32 Value)
* 11/12/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
-extern void
+extern bool
EFUSE_ShadowUpdate(struct net_device* dev)
{
- //HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter);
struct r8192_priv *priv = ieee80211_priv(dev);
u16 i, offset, base = 0;
u8 word_en = 0x0F;
- bool first_pg = false;
- // For Efuse write action, we must enable LDO2.5V and 40MHZ clk.
+ bool first_pg = false;
+
+ RT_TRACE(COMP_EPROM, "--->EFUSE_ShadowUpdate()\n");
+
+ if(!EFUSE_ShadowUpdateChk(dev))
+ {
+ efuse_ReadAllMap(dev, &priv->EfuseMap[EFUSE_INIT_MAP][0]);
+ memcpy((void *)&priv->EfuseMap[EFUSE_MODIFY_MAP][0],
+ (void *)&priv->EfuseMap[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE_92S);
+
+ RT_TRACE(COMP_EPROM, "<---EFUSE_ShadowUpdate(): Efuse out of capacity!!\n");
+ return false;
+ }
efuse_PowerSwitch(dev, TRUE);
//
@@ -712,16 +749,12 @@ EFUSE_ShadowUpdate(struct net_device* dev)
//
for (i = 0; i < 8; i++)
{
- if (offset == 0 && priv->EfuseMap[EFUSE_INIT_MAP][base+i] == 0xFF)
- {
- first_pg = TRUE;
- }
-
- // 2008/12/11 MH HW autoload fail workaround for A/BCUT.
-
if (first_pg == TRUE)
{
word_en &= ~(1<<(i/2));
+ RT_TRACE(COMP_EPROM,"Section(%x) Addr[%x] %x update to %x, Word_En=%02x\n",
+ offset, base+i, priv->EfuseMap[EFUSE_INIT_MAP][base+i],
+ priv->EfuseMap[EFUSE_MODIFY_MAP][base+i],word_en);
priv->EfuseMap[EFUSE_INIT_MAP][base+i] =
priv->EfuseMap[EFUSE_MODIFY_MAP][base+i];
}else
@@ -730,8 +763,9 @@ EFUSE_ShadowUpdate(struct net_device* dev)
priv->EfuseMap[EFUSE_MODIFY_MAP][base+i])
{
word_en &= ~(EFUSE_BIT(i/2));
- //RT_TRACE(COMP_EFUSE, "Offset=%d Addr%x %x ==> %x Word_En=%02x\n",
- //offset, base+i, priv->EfuseMap[0][base+i], priv->EfuseMap[1][base+i],word_en);
+ RT_TRACE(COMP_EPROM, "Section(%x) Addr[%x] %x update to %x, Word_En=%02x\n",
+ offset, base+i, priv->EfuseMap[0][base+i],
+ priv->EfuseMap[1][base+i],word_en);
// Update init table!!!
priv->EfuseMap[EFUSE_INIT_MAP][base+i] =
@@ -747,25 +781,27 @@ EFUSE_ShadowUpdate(struct net_device* dev)
{
u8 tmpdata[8];
- //FIXLZM
- memcpy(tmpdata, &(priv->EfuseMap[EFUSE_MODIFY_MAP][base]), 8);
- //RT_PRINT_DATA(COMP_INIT, DBG_LOUD, ("U-EFUSE\n"), tmpdata, 8);
- efuse_PgPacketWrite(dev,(u8)offset,word_en,tmpdata);
+ memcpy((void *)tmpdata, (void *)&(priv->EfuseMap[EFUSE_MODIFY_MAP][base]), 8);
+ RT_TRACE(COMP_INIT, "U-EFUSE\n");
+
+ if(!efuse_PgPacketWrite(dev,(u8)offset,word_en,tmpdata))
+ {
+ RT_TRACE(COMP_EPROM,"EFUSE_ShadowUpdate(): PG section(%x) fail!!\n", offset);
+ break;
+ }
}
}
- // 2008/12/01 MH For Efuse HW load bug workarounf method!!!!
- // We will force write 0x10EC into address 10&11 after all Efuse content.
- //
-
-
// For warm reboot, we must resume Efuse clock to 500K.
+
efuse_PowerSwitch(dev, FALSE);
- // 2008/12/01 MH We update shadow content again!!!!
- EFUSE_ShadowMapUpdate(dev);
-} // EFUSE_ShadowUpdate
+ efuse_ReadAllMap(dev, &priv->EfuseMap[EFUSE_INIT_MAP][0]);
+ memcpy((void *)&priv->EfuseMap[EFUSE_MODIFY_MAP][0],
+ (void *)&priv->EfuseMap[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE_92S);
+ return true;
+}
/*-----------------------------------------------------------------------------
* Function: EFUSE_ShadowMapUpdate
@@ -792,12 +828,10 @@ extern void EFUSE_ShadowMapUpdate(struct net_device* dev)
}else{
efuse_ReadAllMap(dev, &priv->EfuseMap[EFUSE_INIT_MAP][0]);
}
- //PlatformMoveMemory(&priv->EfuseMap[EFUSE_MODIFY_MAP][0],
- //&priv->EfuseMap[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE_92S);//FIXLZM
- memcpy(&priv->EfuseMap[EFUSE_MODIFY_MAP][0],
- &priv->EfuseMap[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE_92S);
+ memcpy((void *)&priv->EfuseMap[EFUSE_MODIFY_MAP][0],
+ (void *)&priv->EfuseMap[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE_92S);
-} // EFUSE_ShadowMapUpdate
+}
extern void
EFUSE_ForceWriteVendorId( struct net_device* dev)
@@ -810,7 +844,7 @@ EFUSE_ForceWriteVendorId( struct net_device* dev)
efuse_PowerSwitch(dev, FALSE);
-} // EFUSE_ForceWriteVendorId
+}
/*-----------------------------------------------------------------------------
* Function: efuse_ShadowRead1Byte
@@ -837,7 +871,7 @@ efuse_ShadowRead1Byte(struct net_device* dev, u16 Offset, u8 *Value)
*Value = priv->EfuseMap[EFUSE_MODIFY_MAP][Offset];
-} // EFUSE_ShadowRead1Byte
+}
//---------------Read Two Bytes
static void
@@ -848,7 +882,7 @@ efuse_ShadowRead2Byte(struct net_device* dev, u16 Offset, u16 *Value)
*Value = priv->EfuseMap[EFUSE_MODIFY_MAP][Offset];
*Value |= priv->EfuseMap[EFUSE_MODIFY_MAP][Offset+1]<<8;
-} // EFUSE_ShadowRead2Byte
+}
//---------------Read Four Bytes
static void
@@ -861,7 +895,7 @@ efuse_ShadowRead4Byte(struct net_device* dev, u16 Offset, u32 *Value)
*Value |= priv->EfuseMap[EFUSE_MODIFY_MAP][Offset+2]<<16;
*Value |= priv->EfuseMap[EFUSE_MODIFY_MAP][Offset+3]<<24;
-} // efuse_ShadowRead4Byte
+}
@@ -890,7 +924,7 @@ efuse_ShadowWrite1Byte(struct net_device* dev, u16 Offset, u8 Value)
priv->EfuseMap[EFUSE_MODIFY_MAP][Offset] = Value;
-} // efuse_ShadowWrite1Byte
+}
//---------------Write Two Bytes
static void
@@ -901,7 +935,7 @@ efuse_ShadowWrite2Byte(struct net_device* dev, u16 Offset, u16 Value)
priv->EfuseMap[EFUSE_MODIFY_MAP][Offset] = Value&0x00FF;
priv->EfuseMap[EFUSE_MODIFY_MAP][Offset+1] = Value>>8;
-} // efuse_ShadowWrite1Byte
+}
//---------------Write Four Bytes
static void
@@ -914,10 +948,8 @@ efuse_ShadowWrite4Byte(struct net_device* dev, u16 Offset, u32 Value)
priv->EfuseMap[EFUSE_MODIFY_MAP][Offset+2] = (u8)((Value>>16)&0x00FF);
priv->EfuseMap[EFUSE_MODIFY_MAP][Offset+3] = (u8)((Value>>24)&0xFF);
-} // efuse_ShadowWrite1Byte
-
+}
-/* 11/16/2008 MH Read one byte from real Efuse. */
static u8
efuse_OneByteRead(struct net_device* dev, u16 addr,u8 *data)
{
@@ -947,7 +979,7 @@ efuse_OneByteRead(struct net_device* dev, u16 addr,u8 *data)
bResult = FALSE;
}
return bResult;
-} // efuse_OneByteRead
+}
/* 11/16/2008 MH Write one byte to reald Efuse. */
static u8
@@ -956,10 +988,6 @@ efuse_OneByteWrite(struct net_device* dev, u16 addr, u8 data)
u8 tmpidx = 0;
u8 bResult;
- //RT_TRACE(COMP_EFUSE, "Addr = %x Data=%x\n", addr, data);
-
- //return 0;
-
// -----------------e-fuse reg ctrl ---------------------------------
//address
write_nic_byte(dev, EFUSE_CTRL+1, (u8)(addr&0xff));
@@ -983,8 +1011,7 @@ efuse_OneByteWrite(struct net_device* dev, u16 addr, u8 data)
}
return bResult;
-} // efuse_OneByteWrite
-
+}
/*-----------------------------------------------------------------------------
* Function: efuse_ReadAllMap
@@ -1005,19 +1032,13 @@ efuse_OneByteWrite(struct net_device* dev, u16 addr, u8 data)
static void
efuse_ReadAllMap(struct net_device* dev, u8 *Efuse)
{
- //u8 pg_data[8];
- //u8 offset = 0;
- //u8 tmpidx;
- //static u8 index = 0;
-
//
// We must enable clock and LDO 2.5V otherwise, read all map will be fail!!!!
//
efuse_PowerSwitch(dev, TRUE);
ReadEFuse(dev, 0, 128, Efuse);
efuse_PowerSwitch(dev, FALSE);
-} // efuse_ReadAllMap
-
+}
/*-----------------------------------------------------------------------------
* Function: efuse_WriteAllMap
@@ -1057,18 +1078,11 @@ efuse_WriteAllMap(struct net_device* dev,u8 *eeprom, u32 eeprom_size)
// 0x18-1f Reserve >0x50 Reserve for tx power
if (offset == 3/* || offset > 9*/)
continue;//word_en = 0x0F;
- //else if (offset == 9) // 0x4c-4f Reserve
- //word_en = 0x0C;
else
word_en = 0x00;
}
- //RT_TRACE(COMP_EFUSE, ("Addr=%d size=%d Word_En=%02x\n", offset, eeprom_size, word_en));
- //memcpy(tmpdata,eeprom+(offset*PGPKT_DATA_SIZE),8);
memcpy(tmpdata, (eeprom+(offset*PGPKT_DATA_SIZE)), 8);
-
- //RT_PRINT_DATA(COMP_INIT, DBG_LOUD, ("EFUSE\t"), tmpdata, 8);
-
efuse_PgPacketWrite(dev,offset,word_en,tmpdata);
@@ -1077,7 +1091,7 @@ efuse_WriteAllMap(struct net_device* dev,u8 *eeprom, u32 eeprom_size)
// For warm reboot, we must resume Efuse clock to 500K.
efuse_PowerSwitch(dev, FALSE);
-} // efuse_WriteAllMap
+}
#endif
/*-----------------------------------------------------------------------------
@@ -1114,15 +1128,9 @@ efuse_PgPacketRead( struct net_device* dev, u8 offset, u8 *data)
if(data==NULL) return FALSE;
if(offset>15) return FALSE;
- //FIXLZM
- //PlatformFillMemory((PVOID)data, sizeof(u8)*PGPKT_DATA_SIZE, 0xff);
- //PlatformFillMemory((PVOID)tmpdata, sizeof(u8)*PGPKT_DATA_SIZE, 0xff);
memset(data, 0xff, sizeof(u8)*PGPKT_DATA_SIZE);
memset(tmpdata, 0xff, sizeof(u8)*PGPKT_DATA_SIZE);
- //RT_PRINT_DATA(COMP_EFUSE, DBG_LOUD, ("efuse_PgPacketRead-1\n"), data, 8);
-
- //efuse_reg_ctrl(pAdapter,TRUE);//power on
while(bContinual && (efuse_addr < EFUSE_MAX_SIZE) )
{
//------- Header Read -------------
@@ -1169,9 +1177,6 @@ efuse_PgPacketRead( struct net_device* dev, u8 offset, u8 *data)
}
}
- //efuse_reg_ctrl(pAdapter,FALSE);//power off
-
- //RT_PRINT_DATA(COMP_EFUSE, DBG_LOUD, ("efuse_PgPacketRead-2\n"), data, 8);
if( (data[0]==0xff) &&(data[1]==0xff) && (data[2]==0xff) && (data[3]==0xff) &&
(data[4]==0xff) &&(data[5]==0xff) && (data[6]==0xff) && (data[7]==0xff))
@@ -1179,8 +1184,7 @@ efuse_PgPacketRead( struct net_device* dev, u8 offset, u8 *data)
else
return TRUE;
-} // efuse_PgPacketRead
-
+}
/*-----------------------------------------------------------------------------
* Function: efuse_PgPacketWrite
@@ -1200,7 +1204,7 @@ efuse_PgPacketRead( struct net_device* dev, u8 offset, u8 *data)
* 11/16/2008 MHC Reorganize code Arch and assign as local API.
*
*---------------------------------------------------------------------------*/
-static u8 efuse_PgPacketWrite(struct net_device* dev, u8 offset, u8 word_en,u8 *data)
+static u32 efuse_PgPacketWrite(struct net_device* dev, u8 offset, u8 word_en,u8 *data)
{
u8 WriteState = PG_STATE_HEADER;
@@ -1210,12 +1214,9 @@ static u8 efuse_PgPacketWrite(struct net_device* dev, u8 offset, u8 word_en,u8 *
u8 pg_header = 0;
- //u16 tmp_addr=0;
u8 tmp_word_cnts=0,target_word_cnts=0;
u8 tmp_header,match_word_en,tmp_word_en;
- //u8 efuse_clk_ori,efuse_clk_new;
-
PGPKT_STRUCT target_pkt;
PGPKT_STRUCT tmp_pkt;
@@ -1240,7 +1241,6 @@ static u8 efuse_PgPacketWrite(struct net_device* dev, u8 offset, u8 word_en,u8 *
efuse_WordEnableDataRead(word_en,data,target_pkt.data);
target_word_cnts = efuse_CalculateWordCnts(target_pkt.word_en);
- //efuse_reg_ctrl(pAdapter,TRUE);//power on
printk("EFUSE Power ON\n");
while( bContinual && (efuse_addr < EFUSE_MAX_SIZE) )
@@ -1312,14 +1312,12 @@ static u8 efuse_PgPacketWrite(struct net_device* dev, u8 offset, u8 word_en,u8 *
badworden = efuse_WordEnableDataWrite(dev,efuse_addr+1, tmp_pkt.word_en ,target_pkt.data);
//************ so-2-2-A-1 *******************
- //############################
if(0x0F != (badworden&0x0F))
{
u8 reorg_offset = offset;
u8 reorg_worden=badworden;
efuse_PgPacketWrite(dev,reorg_offset,reorg_worden,originaldata);
}
- //############################
tmp_word_en = 0x0F;
if( (target_pkt.word_en&BIT0)^(match_word_en&BIT0) )
@@ -1342,13 +1340,13 @@ static u8 efuse_PgPacketWrite(struct net_device* dev, u8 offset, u8 word_en,u8 *
//************ so-2-2-A-2 *******************
if((tmp_word_en&0x0F)!=0x0F){
//reorganize other pg packet
- //efuse_addr = efuse_addr + (2*tmp_word_cnts) +1;//next pg packet addr
- efuse_addr = efuse_GetCurrentSize(dev);
- //===========================
- target_pkt.offset = offset;
+
+ efuse_addr = efuse_GetCurrentSize(dev);
+
+ target_pkt.offset = offset;
target_pkt.word_en= tmp_word_en;
- //===========================
- }else{
+
+ }else{
bContinual = FALSE;
}
#if (EFUSE_ERROE_HANDLE == 1)
@@ -1363,10 +1361,8 @@ static u8 efuse_PgPacketWrite(struct net_device* dev, u8 offset, u8 word_en,u8 *
else{//************ so-2-2-B *******************
//reorganize other pg packet
efuse_addr = efuse_addr + (2*tmp_word_cnts) +1;//next pg packet addr
- //===========================
target_pkt.offset = offset;
target_pkt.word_en= target_pkt.word_en;
- //===========================
#if (EFUSE_ERROE_HANDLE == 1)
WriteState=PG_STATE_HEADER;
#endif
@@ -1405,13 +1401,10 @@ static u8 efuse_PgPacketWrite(struct net_device* dev, u8 offset, u8 word_en,u8 *
//************ s1-2-A :cover the exist data *******************
memset(originaldata,0xff,sizeof(u8)*8);
- //PlatformFillMemory((PVOID)originaldata, sizeof(u8)*8, 0xff);
if(efuse_PgPacketRead( dev, tmp_pkt.offset,originaldata))
{ //check if data exist
- //efuse_reg_ctrl(pAdapter,TRUE);//power on
badworden = efuse_WordEnableDataWrite(dev,efuse_addr+1,tmp_pkt.word_en,originaldata);
- //############################
if(0x0F != (badworden&0x0F))
{
u8 reorg_offset = tmp_pkt.offset;
@@ -1419,7 +1412,6 @@ static u8 efuse_PgPacketWrite(struct net_device* dev, u8 offset, u8 word_en,u8 *
efuse_PgPacketWrite(dev,reorg_offset,reorg_worden,originaldata);
efuse_addr = efuse_GetCurrentSize(dev);
}
- //############################
else{
efuse_addr = efuse_addr + (tmp_word_cnts*2) +1; //Next pg_packet
}
@@ -1459,11 +1451,9 @@ static u8 efuse_PgPacketWrite(struct net_device* dev, u8 offset, u8 word_en,u8 *
{//reorganize other pg packet //************ s1-1-B *******************
efuse_addr = efuse_addr + (2*target_word_cnts) +1;//next pg packet addr
- //===========================
target_pkt.offset = offset;
target_pkt.word_en= badworden;
target_word_cnts = efuse_CalculateWordCnts(target_pkt.word_en);
- //===========================
#if (EFUSE_ERROE_HANDLE == 1)
WriteState=PG_STATE_HEADER;
repeat_times++;
@@ -1477,11 +1467,12 @@ static u8 efuse_PgPacketWrite(struct net_device* dev, u8 offset, u8 word_en,u8 *
}
}
- //efuse_reg_ctrl(pAdapter,FALSE);//power off
-
+ if(efuse_addr >= (EFUSE_MAX_SIZE-EFUSE_OOB_PROTECT_BYTES))
+ {
+ RT_TRACE(COMP_EPROM, "efuse_PgPacketWrite(): efuse_addr(%x) Out of size!!\n", efuse_addr);
+ }
return TRUE;
-} // efuse_PgPacketWrite
-
+}
/*-----------------------------------------------------------------------------
* Function: efuse_WordEnableDataRead
@@ -1503,12 +1494,6 @@ static u8 efuse_PgPacketWrite(struct net_device* dev, u8 offset, u8 word_en,u8 *
static void
efuse_WordEnableDataRead( u8 word_en,u8 *sourdata,u8 *targetdata)
{
- //u8 tmpindex = 0;
-
- //DbgPrint("efuse_WordEnableDataRead word_en = %x\n", word_en);
-
- //RT_PRINT_DATA(COMP_EFUSE, DBG_LOUD, ("sourdata\n"), sourdata, 8);
- //RT_PRINT_DATA(COMP_EFUSE, DBG_LOUD, ("targetdata\n"), targetdata, 8);
if (!(word_en&BIT0))
{
@@ -1530,8 +1515,7 @@ efuse_WordEnableDataRead( u8 word_en,u8 *sourdata,u8 *targetdata)
targetdata[6] = sourdata[6];//sourdata[tmpindex++];
targetdata[7] = sourdata[7];//sourdata[tmpindex++];
}
-} // efuse_WordEnableDataRead
-
+}
/*-----------------------------------------------------------------------------
* Function: efuse_WordEnableDataWrite
@@ -1555,15 +1539,9 @@ efuse_WordEnableDataWrite( struct net_device* dev, u16 efuse_addr, u8 word_en, u
u16 tmpaddr = 0;
u16 start_addr = efuse_addr;
u8 badworden = 0x0F;
- //u8 NextState;
u8 tmpdata[8];
memset(tmpdata,0xff,PGPKT_DATA_SIZE);
- //PlatformFillMemory((PVOID)tmpdata, PGPKT_DATA_SIZE, 0xff);
-
- //RT_TRACE(COMP_EFUSE, "word_en = %x efuse_addr=%x\n", word_en, efuse_addr);
-
- //RT_PRINT_DATA(COMP_EFUSE, DBG_LOUD, ("U-EFUSE\n"), data, 8);
if(!(word_en&BIT0))
{
@@ -1614,8 +1592,7 @@ efuse_WordEnableDataWrite( struct net_device* dev, u16 efuse_addr, u8 word_en, u
}
}
return badworden;
-} // efuse_WordEnableDataWrite
-
+}
/*-----------------------------------------------------------------------------
* Function: efuse_PowerSwitch
@@ -1658,8 +1635,7 @@ efuse_PowerSwitch(struct net_device* dev, u8 PwrState)
write_nic_byte(dev, EFUSE_CLK, 0x02);
}
-} /* efuse_PowerSwitch */
-
+}
/*-----------------------------------------------------------------------------
* Function: efuse_GetCurrentSize
@@ -1686,8 +1662,6 @@ efuse_GetCurrentSize(struct net_device* dev)
u8 hoffset=0,hworden=0;
u8 efuse_data,word_cnts=0;
- //efuse_reg_ctrl(pAdapter,TRUE);//power on
-
while ( bContinual &&
efuse_OneByteRead(dev, efuse_addr ,&efuse_data) &&
(efuse_addr < EFUSE_MAX_SIZE) )
@@ -1706,12 +1680,9 @@ efuse_GetCurrentSize(struct net_device* dev)
}
}
- //efuse_reg_ctrl(pAdapter,FALSE);//power off
-
return efuse_addr;
-} // efuse_GetCurrentSize}
-
+}
/* 11/16/2008 MH Add description. Get current efuse area enabled word!!. */
static u8
@@ -1723,7 +1694,7 @@ efuse_CalculateWordCnts(u8 word_en)
if(!(word_en & BIT2)) word_cnts++;
if(!(word_en & BIT3)) word_cnts++;
return word_cnts;
-} // efuse_CalculateWordCnts
+}
/*-----------------------------------------------------------------------------
* Function: EFUSE_ProgramMap
@@ -1786,12 +1757,10 @@ EFUSE_ProgramMap(struct net_device* dev, char* pFileName,u8 TableType)
{
u32 j;
- //GetHexValueFromString(szLine, &u4bRegValue, &u4bMove);
efuse_ParsingMap(szLine, &u4bRegValue, &u4bMove);
// Get next hex value as EEPROM value.
szLine += u4bMove;
- //WriteEEprom(dev, (u16)(ithLine*8+i), (u16)u4bRegValue);
eeprom[index++] = (u8)(u4bRegValue&0xff);
eeprom[index++] = (u8)((u4bRegValue>>8)&0xff);
@@ -1808,9 +1777,6 @@ EFUSE_ProgramMap(struct net_device* dev, char* pFileName,u8 TableType)
return RT_STATUS_FAILURE;
}
-
- //RT_PRINT_DATA(COMP_EFUSE, DBG_LOUD, ("EFUSE "), eeprom, HWSET_MAX_SIZE_92S);
-
// Use map file to update real Efuse or shadow modify table.
if (TableType == 1)
{
@@ -1824,45 +1790,9 @@ EFUSE_ProgramMap(struct net_device* dev, char* pFileName,u8 TableType)
}
return rtStatus;
-} /* EFUSE_ProgramMap */
-
-#endif
-
-//
-// Description:
-// Return TRUE if chTmp is represent for hex digit and
-// FALSE otherwise.
-//
-//
-bool IsHexDigit( char chTmp)
-{
- if( (chTmp >= '0' && chTmp <= '9') ||
- (chTmp >= 'a' && chTmp <= 'f') ||
- (chTmp >= 'A' && chTmp <= 'F') )
- {
- return TRUE;
- }
- else
- {
- return FALSE;
- }
}
-//
-// Description:
-// Translate a character to hex digit.
-//
-u32 MapCharToHexDigit(char chTmp)
-{
- if(chTmp >= '0' && chTmp <= '9')
- return (chTmp - '0');
- else if(chTmp >= 'a' && chTmp <= 'f')
- return (10 + (chTmp - 'a'));
- else if(chTmp >= 'A' && chTmp <= 'F')
- return (10 + (chTmp - 'A'));
- else
- return 0;
-}
+#endif
/*-----------------------------------------------------------------------------
* Function: efuse_ParsingMap
@@ -1889,9 +1819,6 @@ efuse_ParsingMap(char* szStr,u32* pu4bVal,u32* pu4bMove)
// Check input parameter.
if(szStr == NULL || pu4bVal == NULL || pu4bMove == NULL)
{
- //RT_TRACE(COMP_EFUSE,
- //"eeprom_ParsingMap(): Invalid IN args! szStr: %p, pu4bVal: %p, pu4bMove: %p\n",
- //szStr, pu4bVal, pu4bMove);
return FALSE;
}
@@ -1909,34 +1836,26 @@ efuse_ParsingMap(char* szStr,u32* pu4bVal,u32* pu4bMove)
// Check if szScan is now pointer to a character for hex digit,
// if not, it means this is not a valid hex number.
- if(!IsHexDigit(*szScan))
- {
+ if (!isxdigit(*szScan))
return FALSE;
- }
// Parse each digit.
do
{
- (*pu4bVal) <<= 4;
- *pu4bVal += MapCharToHexDigit(*szScan);
+ *pu4bVal = (*pu4bVal << 4) + hex_to_bin(*szScan);
szScan++;
(*pu4bMove)++;
- } while(IsHexDigit(*szScan));
+ } while (isxdigit(*szScan));
return TRUE;
-} /* efuse_ParsingMap */
+}
#endif
-//
-// Useless Section Code Now!!!!!!
-//
-// Porting from 8712 SDIO
int efuse_one_byte_rw(struct net_device* dev, u8 bRead, u16 addr, u8 *data)
{
u32 bResult;
- //u8 efuse_ctlreg,tmpidx = 0;
u8 tmpidx = 0;
u8 tmpv8=0;
@@ -1965,7 +1884,6 @@ int efuse_one_byte_rw(struct net_device* dev, u8 bRead, u16 addr, u8 *data)
}
else{
- //return 0;
write_nic_byte(dev, EFUSE_CTRL, *data);//data
write_nic_byte(dev, EFUSE_CTRL+3, 0xF2);//write cmd
@@ -1987,21 +1905,18 @@ int efuse_one_byte_rw(struct net_device* dev, u8 bRead, u16 addr, u8 *data)
}
return bResult;
}
-//------------------------------------------------------------------------------
+
void efuse_access(struct net_device* dev, u8 bRead,u16 start_addr, u8 cnts, u8 *data)
{
u8 efuse_clk_ori,efuse_clk_new;//,tmp8;
u32 i = 0;
if(start_addr>0x200) return;
- //RT_TRACE(_module_rtl871x_mp_ioctl_c_,_drv_err_,
- // ("\n ===> efuse_access [start_addr=0x%x cnts:%d dataarray:0x%08x Query Efuse].\n",start_addr,cnts,data));
// -----------------SYS_FUNC_EN Digital Core Vdd enable ---------------------------------
efuse_clk_ori = read_nic_byte(dev,SYS_FUNC_EN+1);
efuse_clk_new = efuse_clk_ori|0x20;
if(efuse_clk_new!= efuse_clk_ori){
- //RT_TRACE(_module_rtl871x_mp_ioctl_c_,_drv_err_,("====write 0x10250003=====\n"));
write_nic_byte(dev, SYS_FUNC_EN+1, efuse_clk_new);
}
#ifdef _POWERON_DELAY_
@@ -2021,9 +1936,8 @@ void efuse_access(struct net_device* dev, u8 bRead,u16 start_addr, u8 cnts, u8 *
//-----------------e-fuse one byte read / write ------------------------------
for(i=0;i<cnts;i++){
efuse_one_byte_rw(dev,bRead, start_addr+i , data+i);
- ////RT_TRACE(_module_rtl871x_mp_ioctl_c_,_drv_err_,("==>efuse_access addr:0x%02x value:0x%02x\n",data+i,*(data+i)));
+
}
- // -----------------e-fuse pwr & clk reg ctrl ---------------------------------
write_nic_byte(dev, EFUSE_TEST+3, read_nic_byte(dev, EFUSE_TEST+3)&0x7f);
write_nic_byte(dev, EFUSE_CLK_CTRL, read_nic_byte(dev, EFUSE_CLK_CTRL)&0xfd);
@@ -2031,8 +1945,6 @@ void efuse_access(struct net_device* dev, u8 bRead,u16 start_addr, u8 cnts, u8 *
if(efuse_clk_new != efuse_clk_ori) write_nic_byte(dev, 0x10250003, efuse_clk_ori);
}
-//------------------------------------------------------------------------------
-//------------------------------------------------------------------------------
#ifdef TO_DO_LIST
static void efuse_reg_ctrl(struct net_device* dev, u8 bPowerOn)
@@ -2060,15 +1972,12 @@ static void efuse_reg_ctrl(struct net_device* dev, u8 bPowerOn)
write_nic_byte(dev, EFUSE_CLK_CTRL, read_nic_byte(dev, EFUSE_CLK_CTRL)&0xfd);
// -----------------SYS_FUNC_EN Digital Core Vdd disable ---------------------------------
- //write_nic_byte(pAdapter, SYS_FUNC_EN+1, read_nic_byte(pAdapter,SYS_FUNC_EN+1)&0xDF);
}
}
#endif
-//------------------------------------------------------------------------------
-//------------------------------------------------------------------------------
void efuse_read_data(struct net_device* dev,u8 efuse_read_item,u8 *data,u32 data_size)
{
u8 offset, word_start,byte_start,byte_cnts;
@@ -2079,10 +1988,8 @@ void efuse_read_data(struct net_device* dev,u8 efuse_read_item,u8 *data,u32 data
u8 tmpidx;
u8 pg_data[8];
- //u8 temp_value[8] = {0xff};
if(efuse_read_item> (sizeof(RTL8712_SDIO_EFUSE_TABLE)/sizeof(EFUSE_MAP))){
- //error msg
return ;
}
@@ -2092,48 +1999,39 @@ void efuse_read_data(struct net_device* dev,u8 efuse_read_item,u8 *data,u32 data
byte_cnts = RTL8712_SDIO_EFUSE_TABLE[efuse_read_item].byte_cnts;
if(data_size!=byte_cnts){
- //error msg
return;
}
pg_pkt_cnts = (byte_cnts /PGPKT_DATA_SIZE) +1;
if(pg_pkt_cnts > 1){
- //tmpdata = _malloc(pg_pkt_cnts*PGPKT_DATA_SIZE);
tmpdata = efusedata;
if(tmpdata!=NULL)
{
memset(tmpdata,0xff,pg_pkt_cnts*PGPKT_DATA_SIZE);
- //PlatformFillMemory((PVOID)pg_data, pg_pkt_cnts*PGPKT_DATA_SIZE, 0xff);
for(tmpidx=0;tmpidx<pg_pkt_cnts;tmpidx++)
{
memset(pg_data,0xff,PGPKT_DATA_SIZE);
- //PlatformFillMemory((PVOID)pg_data, PGPKT_DATA_SIZE, 0xff);
if(TRUE== efuse_PgPacketRead(dev,offset+tmpidx,pg_data))
{
memcpy(tmpdata+(PGPKT_DATA_SIZE*tmpidx),pg_data,PGPKT_DATA_SIZE);
- //PlatformMoveMemory((PVOID)(tmpdata+(PGPKT_DATA_SIZE*tmpidx)), (PVOID)pg_data, PGPKT_DATA_SIZE);
}
}
memcpy(data,(tmpdata+ (2*word_start)+byte_start ),data_size);
- //PlatformMoveMemory((PVOID)data, (PVOID)(tmpdata+ (2*word_start)+byte_start ), data_size);
- //_mfree(tmpdata, pg_pkt_cnts*PGPKT_DATA_SIZE);
}
}
else
{
memset(pg_data,0xff,PGPKT_DATA_SIZE);
- //PlatformFillMemory((PVOID)pg_data, PGPKT_DATA_SIZE, 0xff);
if(TRUE==efuse_PgPacketRead(dev,offset,pg_data)){
memcpy(data,pg_data+ (2*word_start)+byte_start ,data_size);
- //PlatformMoveMemory((PVOID)data, (PVOID)(pg_data+ (2*word_start)+byte_start), data_size);
}
}
}
-//------------------------------------------------------------------------------
+
//per interface doesn't alike
void efuse_write_data(struct net_device* dev,u8 efuse_write_item,u8 *data,u32 data_size,u32 bWordUnit)
{
@@ -2145,7 +2043,6 @@ void efuse_write_data(struct net_device* dev,u8 efuse_write_item,u8 *data,u32 da
u8 pg_data[8],tmpbytes=0;
if(efuse_write_item> (sizeof(RTL8712_SDIO_EFUSE_TABLE)/sizeof(EFUSE_MAP))){
- //error msg
return ;
}
@@ -2155,7 +2052,6 @@ void efuse_write_data(struct net_device* dev,u8 efuse_write_item,u8 *data,u32 da
byte_cnts = RTL8712_SDIO_EFUSE_TABLE[efuse_write_item].byte_cnts;
if(data_size > byte_cnts){
- //error msg
return;
}
pg_pkt_cnts = (byte_cnts /PGPKT_DATA_SIZE) +1;
@@ -2168,13 +2064,11 @@ void efuse_write_data(struct net_device* dev,u8 efuse_write_item,u8 *data,u32 da
if((efuse_write_item==EFUSE_F0CIS)||(efuse_write_item==EFUSE_F1CIS)){
memset(pg_data,0xff,PGPKT_DATA_SIZE);
- //PlatformFillMemory((PVOID)pg_data, PGPKT_DATA_SIZE, 0xff);
efuse_PgPacketRead(dev,offset,pg_data);
if(efuse_write_item==EFUSE_F0CIS){
word_en = 0x07;
memcpy(pg_data+word_start*2+byte_start,data,sizeof(u8)*2);
- //PlatformMoveMemory((PVOID)(pg_data+word_start*2+byte_start), (PVOID)data, sizeof(u8)*2);
efuse_PgPacketWrite(dev,offset,word_en,pg_data+(word_start*2));
word_en = 0x00;
@@ -2183,7 +2077,6 @@ void efuse_write_data(struct net_device* dev,u8 efuse_write_item,u8 *data,u32 da
word_en = 0x00;
efuse_PgPacketRead(dev,offset+2,pg_data);
memcpy(pg_data,data+2+8,sizeof(u8)*7);
- //PlatformMoveMemory((PVOID)(pg_data), (PVOID)(data+2+8), sizeof(u8)*7);
efuse_PgPacketWrite(dev,(offset+2),word_en,pg_data);
}
@@ -2202,7 +2095,6 @@ void efuse_write_data(struct net_device* dev,u8 efuse_write_item,u8 *data,u32 da
}
else{
memset(pg_data,0xff,PGPKT_DATA_SIZE);
- //PlatformFillMemory((PVOID)pg_data, PGPKT_DATA_SIZE, 0xff);
if((efuse_write_item==EFUSE_SDIO_SETTING)||(efuse_write_item==EFUSE_CCCR)){
word_en = 0x0e ;
tmpbytes = 2;
@@ -2221,12 +2113,10 @@ void efuse_write_data(struct net_device* dev,u8 efuse_write_item,u8 *data,u32 da
}
if(bWordUnit==TRUE){
memcpy(pg_data+word_start*2 ,data,sizeof(u8)*tmpbytes);
- //PlatformMoveMemory((PVOID)(pg_data+word_start*2), (PVOID)(data), sizeof(u8)*tmpbytes);
}
else{
efuse_PgPacketRead(dev,offset,pg_data);
memcpy(pg_data+(2*word_start)+byte_start,data,sizeof(u8)*byte_cnts);
- //PlatformMoveMemory((PVOID)(pg_data+(2*word_start)+byte_start), (PVOID)(data), sizeof(u8)*byte_cnts);
}
efuse_PgPacketWrite(dev,offset,word_en,pg_data+(word_start*2));
@@ -2234,7 +2124,6 @@ void efuse_write_data(struct net_device* dev,u8 efuse_write_item,u8 *data,u32 da
}
}
- //========================================================================
else if(pg_pkt_cnts>1){//situation B
if(word_start==0){
word_en = 0x00;
@@ -2255,7 +2144,6 @@ void efuse_write_data(struct net_device* dev,u8 efuse_write_item,u8 *data,u32 da
}
}
- //========================================================================
else{//situation C
word_en = 0x0f;
for(tmpidx= 0; tmpidx<word_cnts ; tmpidx++)
@@ -2267,7 +2155,6 @@ void efuse_write_data(struct net_device* dev,u8 efuse_write_item,u8 *data,u32 da
}
}
-//------------------------------------------------------------------------------
void efuset_test_func_read(struct net_device* dev)
{
@@ -2288,7 +2175,6 @@ void efuset_test_func_read(struct net_device* dev)
memset(txpowertable,0,sizeof(u8)*28);
efuse_read_data(dev,EFUSE_TXPW_TAB,txpowertable,sizeof(txpowertable));
}
-//------------------------------------------------------------------------------
void efuset_test_func_write(struct net_device* dev)
{
@@ -2311,19 +2197,3 @@ void efuset_test_func_write(struct net_device* dev)
efuse_write_data(dev,EFUSE_SDIO_SETTING,tmpdata,sizeof(tmpdata),bWordUnit);
}
-//------------------------------------------------------------------------------
-
-
-
-
-
-
-
-
-
-
-/* End of Efuse.c */
-
-
-
-
diff --git a/drivers/staging/rtl8192su/r8192S_Efuse.h b/drivers/staging/rtl8192su/r8192S_Efuse.h
index 1e50153ba02..c48a11bc06f 100644
--- a/drivers/staging/rtl8192su/r8192S_Efuse.h
+++ b/drivers/staging/rtl8192su/r8192S_Efuse.h
@@ -1,39 +1,38 @@
/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
*
- * Module: Efuse.h ( Header File)
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
*
- * Note:
- *
- * Function:
- *
- * Export:
- *
- * Abbrev:
- *
- * History:
- * Data Who Remark
- *
- * 09/23/2008 MHC Porting Efuse R/W API from WMAC.
- * 11/10/2008 MHC Porting Efuse.h from 8712 SDIO.
- * 1. We muse redefine the header file to fit our coding
- * style.
- * 2. THe API we export to other module, we must redefine
- * for 8192S series.
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
*
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
******************************************************************************/
-/* Check to see if the file has been included already. */
#ifndef __INC_EFUSE_H
#define __INC_EFUSE_H
-// Roger porting for 8192SU
#define EFUSE_FOR_92SU 1
-/*--------------------------Define Parameters-------------------------------*/
#define EFUSE_MAC_LEN 0x200
+#define EFUSE_REAL_CONTENT_LEN 512
+#define EFUSE_MAP_LEN 128
+#define EFUSE_MAX_SECTION 16
+#define EFUSE_MAX_WORD_UNIT 4
#define EFUSE_INIT_MAP 0
#define EFUSE_MODIFY_MAP 1
@@ -41,7 +40,6 @@
#define EFUSE_CLK_CTRL EFUSE_CTRL
#define EFUSE_BIT(x) (1 << (x))
-// From 8712!!!!!!!!
#define PG_STATE_HEADER 0x01
#define PG_STATE_WORD_0 0x02
#define PG_STATE_WORD_1 0x04
@@ -52,23 +50,6 @@
#define PG_SWBYTE_H 0x01
#define PG_SWBYTE_L 0x02
-/*--------------------------Define Parameters-------------------------------*/
-
-
-/*------------------------------Define structure----------------------------*/
-
-/*------------------------------Define structure----------------------------*/
-
-
-/*------------------------Export global variable----------------------------*/
-/*------------------------Export global variable----------------------------*/
-
-/*------------------------Export Marco Definition---------------------------*/
-
-/*------------------------Export Marco Definition---------------------------*/
-
-
-/*--------------------------Exported Function prototype---------------------*/
extern void
EFUSE_Initialize(struct net_device* dev);
extern u8
@@ -81,21 +62,18 @@ extern void
ReadEFuse(struct net_device* dev,u16 _offset,u16 _size_byte,u8* pbuf);
extern void
ReadEFuseByte(struct net_device* dev,u16 _offset,u8 *pbuf);
-#endif // #if (EFUSE_FOR_92SU == 1)
+#endif
extern void
EFUSE_ShadowRead(struct net_device* dev,unsigned char Type,unsigned short Offset,u32 *Value);
extern void
EFUSE_ShadowWrite(struct net_device* dev,unsigned char Type,unsigned short Offset,u32 Value);
-extern void
+extern bool
EFUSE_ShadowUpdate(struct net_device* dev);
extern void
EFUSE_ShadowMapUpdate(struct net_device* dev);
extern bool
EFUSE_ProgramMap(struct net_device* dev,char* pFileName, u8 TableType); // 0=Shadow 1=Real Efuse
-/*--------------------------Exported Function prototype---------------------*/
-
-/* End of Efuse.h */
-#endif //__INC_EFUSE_H
+#endif
diff --git a/drivers/staging/rtl8192su/r8192S_firmware.c b/drivers/staging/rtl8192su/r8192S_firmware.c
index 5036d547d5d..db0d2d5fc61 100644
--- a/drivers/staging/rtl8192su/r8192S_firmware.c
+++ b/drivers/staging/rtl8192su/r8192S_firmware.c
@@ -1,16 +1,22 @@
-/**************************************************************************************************
- * Procedure: Init boot code/firmware code/data session
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * Description: This routine will intialize firmware. If any error occurs during the initialization
- * process, the routine shall terminate immediately and return fail.
- * NIC driver should call NdisOpenFile only from MiniportInitialize.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
*
- * Arguments: The pointer of the adapter
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
- * Returns:
- * NDIS_STATUS_FAILURE - the following initialization process should be terminated
- * NDIS_STATUS_SUCCESS - if firmware initialization process success
-**************************************************************************************************/
#include "r8192U.h"
#include "r8192S_firmware.h"
#include <linux/unistd.h>
diff --git a/drivers/staging/rtl8192su/r8192S_firmware.h b/drivers/staging/rtl8192su/r8192S_firmware.h
index 2c2cf8032de..7f268a8de5e 100644
--- a/drivers/staging/rtl8192su/r8192S_firmware.h
+++ b/drivers/staging/rtl8192su/r8192S_firmware.h
@@ -1,44 +1,32 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
#ifndef __INC_FIRMWARE_H
#define __INC_FIRMWARE_H
-//#define RTL8190_CPU_START_OFFSET 0x80
-/* TODO: this definition is TBD */
-//#define USB_HWDESC_HEADER_LEN 0
-
-/* It should be double word alignment */
-//#if DEV_BUS_TYPE==PCI_INTERFACE
-//#define GET_COMMAND_PACKET_FRAG_THRESHOLD(v) 4*(v/4) - 8
-//#else
-//#define GET_COMMAND_PACKET_FRAG_THRESHOLD(v) (4*(v/4) - 8 - USB_HWDESC_HEADER_LEN)
-//#endif
-
-//typedef enum _firmware_init_step{
-// FW_INIT_STEP0_BOOT = 0,
-// FW_INIT_STEP1_MAIN = 1,
-// FW_INIT_STEP2_DATA = 2,
-//}firmware_init_step_e;
-
-//typedef enum _DESC_PACKET_TYPE{
-// DESC_PACKET_TYPE_INIT = 0,
-// DESC_PACKET_TYPE_NORMAL = 1,
-//}DESC_PACKET_TYPE;
-#define RTL8192S_FW_PKT_FRAG_SIZE 0xFF00 // 64K
-
-
#define RTL8190_MAX_FIRMWARE_CODE_SIZE 64000 //64k
#define MAX_FIRMWARE_CODE_SIZE 0xFF00 // Firmware Local buffer size.
#define RTL8190_CPU_START_OFFSET 0x80
-
+#define RTL8192S_FW_PKT_FRAG_SIZE 0x4000
#define GET_COMMAND_PACKET_FRAG_THRESHOLD(v) (4*(v/4) - 8 - USB_HWDESC_HEADER_LEN)
-//typedef enum _DESC_PACKET_TYPE{
-// DESC_PACKET_TYPE_INIT = 0,
-// DESC_PACKET_TYPE_NORMAL = 1,
-//}DESC_PACKET_TYPE;
-// Forward declaration.
-//typedef struct _ADAPTER ADAPTER, *PADAPTER;
#ifdef RTL8192S
typedef enum _firmware_init_step{
FW_INIT_STEP0_IMEM = 0,
@@ -64,17 +52,8 @@ typedef enum _opt_rst_type{
OPT_FIRMWARE_RESET = 1,
}opt_rst_type_e;
-/*typedef enum _FIRMWARE_STATUS{
- FW_STATUS_0_INIT = 0,
- FW_STATUS_1_MOVE_BOOT_CODE = 1,
- FW_STATUS_2_MOVE_MAIN_CODE = 2,
- FW_STATUS_3_TURNON_CPU = 3,
- FW_STATUS_4_MOVE_DATA_CODE = 4,
- FW_STATUS_5_READY = 5,
-}FIRMWARE_STATUS;
-*/
//--------------------------------------------------------------------------------
-// RTL8192S Firmware related, Revised by Roger, 2008.12.18.
+// RTL8192S Firmware related
//--------------------------------------------------------------------------------
typedef struct _RT_8192S_FIRMWARE_PRIV { //8-bytes alignment required
@@ -181,7 +160,6 @@ typedef enum _FIRMWARE_8192S_STATUS{
typedef struct _rt_firmware{
PRT_8192S_FIRMWARE_HDR pFwHeader;
FIRMWARE_8192S_STATUS FWStatus;
- u16 FirmwareVersion;
u8 FwIMEM[RTL8190_MAX_FIRMWARE_CODE_SIZE];
u8 FwEMEM[RTL8190_MAX_FIRMWARE_CODE_SIZE];
u32 FwIMEMLen;
@@ -189,11 +167,43 @@ typedef struct _rt_firmware{
u8 szFwTmpBuffer[164000];
u32 szFwTmpBufferLen;
u16 CmdPacketFragThresold;
+ u16 FirmwareVersion;
}rt_firmware, *prt_firmware;
-//typedef struct _RT_FIRMWARE_INFO_8192SU{
-// u8 szInfo[16];
-//}RT_FIRMWARE_INFO_8192SU, *PRT_FIRMWARE_INFO_8192SU;
+#define FW_DIG_ENABLE_CTL BIT0
+#define FW_HIGH_PWR_ENABLE_CTL BIT1
+#define FW_SS_CTL BIT2
+#define FW_RA_INIT_CTL BIT3
+#define FW_RA_BG_CTL BIT4
+#define FW_RA_N_CTL BIT5
+#define FW_PWR_TRK_CTL BIT6
+#define FW_IQK_CTL BIT7
+#define FW_ANTENNA_SW BIT8
+#define FW_DISABLE_ALL_DM 0
+
+#define FW_PWR_TRK_PARAM_CLR 0x0000ffff
+#define FW_RA_PARAM_CLR 0xffff0000
+
+#define FW_CMD_IO_CLR(_pdev, _Bit) \
+ udelay(1000); \
+ ((struct r8192_priv *)ieee80211_priv(_pdev))->FwCmdIOMap &= (~_Bit);
+
+#define FW_CMD_IO_UPDATE(_pdev, _val) \
+ ((struct r8192_priv *)ieee80211_priv(_pdev))->FwCmdIOMap = _val;
+
+#define FW_CMD_IO_SET(_pdev, _val) \
+ write_nic_word(_pdev, LBUS_MON_ADDR, (u16)_val); \
+ FW_CMD_IO_UPDATE(_pdev, _val);
+
+#define FW_CMD_PARA_SET(_pdev, _val) \
+ write_nic_dword(_pdev, LBUS_ADDR_MASK, _val); \
+ ((struct r8192_priv *)ieee80211_priv(_pdev))->FwCmdIOParam = _val;
+
+#define FW_CMD_IO_QUERY(_pdev) (u16)(((struct r8192_priv *)ieee80211_priv(_pdev))->FwCmdIOMap)
+#define FW_CMD_IO_PARA_QUERY(_pdev) (u32)(((struct r8192_priv *)ieee80211_priv(_pdev))->FwCmdIOParam)
+
+
+
bool FirmwareDownload92S(struct net_device *dev);
#endif
diff --git a/drivers/staging/rtl8192su/r8192S_hw.h b/drivers/staging/rtl8192su/r8192S_hw.h
index 82ea96b6f4d..e62b79df5ba 100644
--- a/drivers/staging/rtl8192su/r8192S_hw.h
+++ b/drivers/staging/rtl8192su/r8192S_hw.h
@@ -1,25 +1,22 @@
-/*****************************************************************************
- * Copyright(c) 2008, RealTEK Technology Inc. All Right Reserved.
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * Module: __INC_HAL8192SEREG_H
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
*
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
- * Note: 1. Define Mac register address and corresponding bit mask map
- * 2. CCX register
- * 3. Backward compatible register with useless address.
- * 4. Define 92SU required register address and definition.
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
*
- *
- * Export: Constants, macro, functions(API), global variables(None).
- *
- * Abbrev:
- *
- * History:
- * Data Who Remark
- * 08/07/2007 MHC 1. Porting from 9x series PHYCFG.h.
- * 2. Reorganize code architecture.
- *
- *****************************************************************************/
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+
#ifndef R8192S_HW
#define R8192S_HW
@@ -29,21 +26,14 @@ typedef enum _VERSION_8192S{
VERSION_8192S_CCUT
}VERSION_8192S,*PVERSION_8192S;
-//#ifdef RTL8192SU
typedef enum _VERSION_8192SUsb{
VERSION_8192SU_A, //A-Cut
VERSION_8192SU_B, //B-Cut
VERSION_8192SU_C, //C-Cut
}VERSION_8192SUsb, *PVERSION_8192SUsb;
-//#else
-typedef enum _VERSION_819xU{
- VERSION_819xU_A, // A-cut
- VERSION_819xU_B, // B-cut
- VERSION_819xU_C,// C-cut
-}VERSION_819xU,*PVERSION_819xU;
-//#endif
-
-/* 2007/11/15 MH Define different RF type. */
+
+
+/* RF type. */
typedef enum _RT_RF_TYPE_DEFINITION
{
RF_1T2R = 0,
@@ -51,9 +41,6 @@ typedef enum _RT_RF_TYPE_DEFINITION
RF_2T2R,
RF_1T1R,
RF_2T2R_GREEN,
- //RF_3T3R,
- //RF_3T4R,
- //RF_4T4R,
RF_819X_MAX_TYPE
}RT_RF_TYPE_DEF_E;
@@ -68,12 +55,10 @@ typedef enum _BaseBand_Config_Type{
#define RTL8187_REQ_SET_REGS 0x05
#define MAX_TX_URB 5
-#define MAX_RX_URB 16
+#define MAX_RX_URB 8
#define R8180_MAX_RETRY 255
-//#define MAX_RX_NORMAL_URB 3
-//#define MAX_RX_COMMAND_URB 2
-#define RX_URB_SIZE 9100
+#define RX_URB_SIZE 0x4000
#define BB_ANTATTEN_CHAN14 0x0c
#define BB_ANTENNA_B 0x40
@@ -134,7 +119,6 @@ typedef enum _BaseBand_Config_Type{
#define MSR_LINK_ENEDCA (1<<4)
-//#define Cmd9346CR_9356SEL (1<<4)
#define EPROM_CMD_RESERVED_MASK (1<<5)
#define EPROM_CMD_OPERATING_MODE_SHIFT 6
#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
@@ -147,13 +131,6 @@ typedef enum _BaseBand_Config_Type{
#define EPROM_W_SHIFT 1
#define EPROM_R_SHIFT 0
-//#define MAC0 0x000,
-//#define MAC1 0x001,
-//#define MAC2 0x002,
-//#define MAC3 0x003,
-//#define MAC4 0x004,
-//#define MAC5 0x005,
-
//============================================================
// 8192S Regsiter offset definition
//============================================================
@@ -529,14 +506,6 @@ typedef enum _BaseBand_Config_Type{
// USB RPWM register
#define USB_RPWM 0xFE58
-//FIXLZM SVN_BRACH NOT MOD HERE, IF MOD RX IS LITTLE LOW
-//#if ((HAL_CODE_BASE == RTL8192_S) && (DEV_BUS_TYPE==PCI_INTERFACE))
-//#define RPWM PCI_RPWM
-//#elif ((HAL_CODE_BASE == RTL8192_S) && (DEV_BUS_TYPE==USB_INTERFACE))
-//#define RPWM USB_RPWM
-//#endif
-
-
//============================================================================
// 8190 Regsiter offset definition
//============================================================================
@@ -777,13 +746,11 @@ typedef enum _BaseBand_Config_Type{
#define RCR_MXDMA_OFFSET 8
#define RCR_FIFO_OFFSET 13
-//in 92U FIXLZM
-//#ifdef RTL8192U
#define RCR_ONLYERLPKT BIT31 // Early Receiving based on Packet Size.
#define RCR_ENCS2 BIT30 // Enable Carrier Sense Detection Method 2
#define RCR_ENCS1 BIT29 // Enable Carrier Sense Detection Method 1
#define RCR_ACKTXBW (BIT24|BIT25) // TXBW Setting of ACK frames
-//#endif
+
//----------------------------------------------------------------------------
// 8192S (MSR) Media Status Register (Offset 0x4C, 8 bits)
//----------------------------------------------------------------------------
@@ -1259,17 +1226,18 @@ Default: 00b.
#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
+#define EEPROM_CID_DEFAULT 0x0
+#define EEPROM_CID_ALPHA 0x1
+#define EEPROM_CID_Senao 0x3
+#define EEPROM_CID_CAMEO 0X8
+#define EEPROM_CID_SITECOM 0x9
+#define EEPROM_CID_COREGA 0xB
+#define EEPROM_CID_EDIMAX_BELKIN 0xC
+#define EEPROM_CID_SERCOMM_BELKIN 0xE
+#define EEPROM_CID_CAMEO1 0xF
+#define EEPROM_CID_WHQL 0xFE
+#define EEPROM_CID_NetCore 0x5
-#define EEPROM_CID_DEFAULT 0x0
-#define EEPROM_CID_ALPHA 0x1
-#define EEPROM_CID_CAMEO 0X8
-#define EEPROM_CID_SITECOM 0x9
-
-//#define EEPROM_CID_RUNTOP 0x2
-//#define EEPROM_CID_Senao 0x3
-//#define EEPROM_CID_TOSHIBA 0x4
-//#define EEPROM_CID_NetCore 0x5
-#define EEPROM_CID_WHQL 0xFE // added by chiyoko for dtm, 20090108
//-----------------------------------------------------------------
// 0x2c0 FW Command Control register definition, added by Roger, 2008.11.27.
@@ -1282,18 +1250,32 @@ Default: 00b.
#define FW_HIGH_PWR_ENABLE 0xfd000009
#define FW_TXPWR_TRACK_ENABLE 0xfd000017
#define FW_TXPWR_TRACK_DISABLE 0xfd000018
-#define FW_RA_RESET 0xfd0000af
-#define FW_RA_ACTIVE 0xfd0000a6
+#define FW_TXPWR_TRACK_THERMAL 0xfd000019
+#define FW_RA_INIT 0xfd000026
+#define FW_RA_IOT_BG_COMB 0xfd000030
+#define FW_RA_IOT_N_COMB 0xfd000031
#define FW_RA_REFRESH 0xfd0000a0
-#define FW_RA_ENABLE_BG 0xfd0000ac
+#define FW_RA_DISABLE 0xfd0000a4
+#define FW_RA_ACTIVE 0xfd0000a6
+#define FW_RA_DISABLE_RSSI_MASK 0xfd0000ac
+#define FW_RA_ENABLE_RSSI_MASK 0xfd0000ad
+#define FW_RA_RESET 0xfd0000af
+#define FW_DM_DISABLE 0xfd00aa00
#define FW_IQK_ENABLE 0xf0000020
#define FW_IQK_SUCCESS 0x0000dddd
#define FW_IQK_FAIL 0x0000ffff
#define FW_OP_FAILURE 0xffffffff
-#define FW_DM_DISABLE 0xfd00aa00
+#define FW_TX_FEEDBACK_NONE 0xfb000000
+#define FW_TX_FEEDBACK_DTM_ENABLE (FW_TX_FEEDBACK_NONE | 0x1)
+#define FW_TX_FEEDBACK_CCX_ENABLE (FW_TX_FEEDBACK_NONE | 0x2)
#define FW_BB_RESET_ENABLE 0xff00000d
#define FW_BB_RESET_DISABLE 0xff00000e
-
+#define FW_LPS_ENTER 0xfe000010
+#define FW_LPS_LEAVE 0xfe000011
+#define FW_INDIRECT_READ 0xf2000000
+#define FW_INDIRECT_WRITE 0xf2000001
+#define FW_TXANT_SWITCH_ENABLE 0xfd000023
+#define FW_TXANT_SWITCH_DISABLE 0xfd000024
//
//--------------92SU require delete or move to other place later
//
@@ -1460,34 +1442,4 @@ Default: 00b.
#define HAL_8192S_HW_GPIO_OFF_MASK 0xF7
#define HAL_8192S_HW_GPIO_WPS_BIT BIT4
-#endif //R8192S_HW
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+#endif
diff --git a/drivers/staging/rtl8192su/r8192S_phy.c b/drivers/staging/rtl8192su/r8192S_phy.c
index b6c0f199074..a5fc2d1cb06 100644
--- a/drivers/staging/rtl8192su/r8192S_phy.c
+++ b/drivers/staging/rtl8192su/r8192S_phy.c
@@ -1,35 +1,20 @@
/******************************************************************************
-
- (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
-
- Module: hal8192sphy.c
-
- Note: Merge 92SE/SU PHY config as below
- 1. BB register R/W API
- 2. RF register R/W API
- 3. Initial BB/RF/MAC config by reading BB/MAC/RF txt.
- 3. Power setting API
- 4. Channel switch API
- 5. Initial gain switch API.
- 6. Other BB/MAC/RF API.
-
- Function: PHY: Extern function, phy: local function
-
- Export: PHY_FunctionName
-
- Abbrev: NONE
-
- History:
- Data Who Remark
- 08/08/2008 MHC 1. Port from 9x series phycfg.c
- 2. Reorganize code arch and ad description.
- 3. Collect similar function.
- 4. Seperate extern/local API.
- 08/12/2008 MHC We must merge or move USB PHY relative function later.
- 10/07/2008 MHC Add IQ calibration for PHY.(Only 1T2R mode now!!!)
- 11/06/2008 MHC Add TX Power index PG file to config in 0xExx register
- area to map with EEPROM/EFUSE tx pwr index.
-
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
******************************************************************************/
#include "r8192U.h"
#include "r8192U_dm.h"
@@ -42,16 +27,12 @@
#include "ieee80211/dot11d.h"
-/*---------------------------Define Local Constant---------------------------*/
/* Channel switch:The size of command tables for switch channel*/
#define MAX_PRECMD_CNT 16
#define MAX_RFDEPENDCMD_CNT 16
#define MAX_POSTCMD_CNT 16
#define MAX_DOZE_WAITING_TIMES_9x 64
-/*------------------------Define local variable------------------------------*/
-// 2004-05-11
-
static u32
phy_CalculateBitShift(u32 BitMask);
static RT_STATUS
@@ -86,7 +67,6 @@ static long phy_TxPwrIdxToDbm( struct net_device* dev, WIRELESS_MODE WirelessM
static u8 phy_DbmToTxPwrIdx( struct net_device* dev, WIRELESS_MODE WirelessMode, long PowerInDbm);
void phy_SetFwCmdIOCallback(struct net_device* dev);
-//#if ((HAL_CODE_BASE == RTL8192_S) && (DEV_BUS_TYPE==USB_INTERFACE))
//
// Description:
// Base Band read by 4181 to make sure that operation could be done in unlimited cycle.
@@ -95,8 +75,6 @@ void phy_SetFwCmdIOCallback(struct net_device* dev);
// - Only use on RTL8192S USB interface.
// - PASSIVE LEVEL
//
-// Created by Roger, 2008.09.06.
-//
//use in phy only
u32 phy_QueryUsbBBReg(struct net_device* dev, u32 RegAddr)
{
@@ -118,7 +96,7 @@ u32 phy_QueryUsbBBReg(struct net_device* dev, u32 RegAddr)
msleep(1); // 1 ms
// Wait too long, return FALSE to avoid to be stuck here.
- if((BBWaitCounter > 100) )//||RT_USB_CANNOT_IO(Adapter))
+ if((BBWaitCounter > 100) )
{
RT_TRACE(COMP_RF, "phy_QueryUsbBBReg(): (%d) Wait too logn to query BB!!\n", BBWaitCounter);
return ReturnValue;
@@ -160,9 +138,6 @@ u32 phy_QueryUsbBBReg(struct net_device* dev, u32 RegAddr)
// Assumption:
// - Only use on RTL8192S USB interface.
// - PASSIVE LEVEL
-//
-// Created by Roger, 2008.09.06.
-//
//use in phy only
void
phy_SetUsbBBReg(struct net_device* dev,u32 RegAddr,u32 Data)
@@ -191,7 +166,6 @@ phy_SetUsbBBReg(struct net_device* dev,u32 RegAddr,u32 Data)
}
priv->bChangeBBInProgress = true;
- //printk("**************%s: RegAddr:%x Data:%x\n", __FUNCTION__,RegAddr, Data);
write_nic_dword(dev, RegAddr, Data);
priv->bChangeBBInProgress = false;
@@ -215,9 +189,7 @@ u32 phy_QueryUsbRFReg( struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Of
{
struct r8192_priv *priv = ieee80211_priv(dev);
- //u32 value = 0, ReturnValue = 0;
u32 ReturnValue = 0;
- //u32 tmplong,tmplong2;
u8 PollingCnt = 50;
u8 RFWaitCounter = 0;
@@ -229,8 +201,6 @@ u32 phy_QueryUsbRFReg( struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Of
//
while(priv->bChangeRFInProgress)
{
- //PlatformReleaseSpinLock(Adapter, RT_RF_OPERATE_SPINLOCK);
- //spin_lock_irqsave(&priv->rf_lock, flags); //LZM,090318
down(&priv->rf_sem);
RFWaitCounter ++;
@@ -244,14 +214,10 @@ u32 phy_QueryUsbRFReg( struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Of
}
else
{
- //PlatformAcquireSpinLock(Adapter, RT_RF_OPERATE_SPINLOCK);
}
}
priv->bChangeRFInProgress = true;
- //PlatformReleaseSpinLock(Adapter, RT_RF_OPERATE_SPINLOCK);
-
-
Offset &= 0x3f; //RF_Offset= 0x00~0x3F
write_nic_dword(dev, RF_BB_CMD_ADDR, 0xF0000002|
@@ -267,8 +233,6 @@ u32 phy_QueryUsbRFReg( struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Of
// Data FW read back.
ReturnValue = read_nic_dword(dev, RF_BB_CMD_DATA);
- //PlatformAcquireSpinLock(Adapter, RT_RF_OPERATE_SPINLOCK);
- //spin_unlock_irqrestore(&priv->rf_lock, flags); //LZM,090318
up(&priv->rf_sem);
priv->bChangeRFInProgress = false;
@@ -306,27 +270,23 @@ void phy_SetUsbRFReg(struct net_device* dev,RF90_RADIO_PATH_E eRFPath,u32 RegAdd
//
while(priv->bChangeRFInProgress)
{
- //PlatformReleaseSpinLock(Adapter, RT_RF_OPERATE_SPINLOCK);
- //spin_lock_irqsave(&priv->rf_lock, flags); //LZM,090318
down(&priv->rf_sem);
RFWaitCounter ++;
RT_TRACE(COMP_RF, "phy_SetUsbRFReg(): Wait 1 ms (%d times)...\n", RFWaitCounter);
msleep(1); // 1 ms
- if((RFWaitCounter > 100))// || RT_USB_CANNOT_IO(Adapter))
+ if((RFWaitCounter > 100))
{
RT_TRACE(COMP_RF, "phy_SetUsbRFReg(): (%d) Wait too logn to query BB!!\n", RFWaitCounter);
return;
}
else
{
- //PlatformAcquireSpinLock(Adapter, RT_RF_OPERATE_SPINLOCK);
}
}
priv->bChangeRFInProgress = true;
- //PlatformReleaseSpinLock(Adapter, RT_RF_OPERATE_SPINLOCK);
RegAddr &= 0x3f; //RF_Offset= 0x00~0x3F
@@ -347,18 +307,11 @@ void phy_SetUsbRFReg(struct net_device* dev,RF90_RADIO_PATH_E eRFPath,u32 RegAdd
RT_TRACE(COMP_RF, "phy_SetUsbRFReg(): Set RegAddr(%#x) = %#x Fail!!!\n", RegAddr, Data);
}
- //PlatformAcquireSpinLock(Adapter, RT_RF_OPERATE_SPINLOCK);
- //spin_unlock_irqrestore(&priv->rf_lock, flags); //LZM,090318
up(&priv->rf_sem);
priv->bChangeRFInProgress = false;
}
-
-/*---------------------Define local function prototype-----------------------*/
-
-
-/*----------------------------Function Body----------------------------------*/
//
// 1. BB register R/W API
//
@@ -376,8 +329,6 @@ void phy_SetUsbRFReg(struct net_device* dev,RF90_RADIO_PATH_E eRFPath,u32 RegAdd
* Return: u32 Data //The readback register value
* Note: This function is equal to "GetRegSetting" in PHY programming guide
*/
-//use phy dm core 8225 8256 6052
-//u32 PHY_QueryBBReg(struct net_device* dev,u32 RegAddr, u32 BitMask)
u32 rtl8192_QueryBBReg(struct net_device* dev, u32 RegAddr, u32 BitMask)
{
@@ -392,10 +343,8 @@ u32 rtl8192_QueryBBReg(struct net_device* dev, u32 RegAddr, u32 BitMask)
// infinite cycle.
// 2008.09.06.
//
-//#if ((HAL_CODE_BASE == RTL8192_S) && (DEV_BUS_TYPE==USB_INTERFACE))
if(IS_BB_REG_OFFSET_92S(RegAddr))
{
- //if(RT_USB_CANNOT_IO(Adapter)) return FALSE;
if((RegAddr & 0x03) != 0)
{
@@ -413,7 +362,7 @@ u32 rtl8192_QueryBBReg(struct net_device* dev, u32 RegAddr, u32 BitMask)
BitShift = phy_CalculateBitShift(BitMask);
ReturnValue = (OriginalValue & BitMask) >> BitShift;
- //RTPRINT(FPHY, PHY_BBR, ("BBR MASK=0x%x Addr[0x%x]=0x%x\n", BitMask, RegAddr, OriginalValue));
+
RT_TRACE(COMP_RF, "<---PHY_QueryBBReg(): RegAddr(%#x), BitMask(%#x), OriginalValue(%#x)\n", RegAddr, BitMask, OriginalValue);
return (ReturnValue);
}
@@ -435,8 +384,6 @@ u32 rtl8192_QueryBBReg(struct net_device* dev, u32 RegAddr, u32 BitMask)
* Return: None
* Note: This function is equal to "PutRegSetting" in PHY programming guide
*/
-//use phy dm core 8225 8256
-//void PHY_SetBBReg(struct net_device* dev,u32 RegAddr, u32 BitMask, u32 Data )
void rtl8192_setBBreg(struct net_device* dev, u32 RegAddr, u32 BitMask, u32 Data)
{
u32 OriginalValue, BitShift, NewValue;
@@ -450,7 +397,6 @@ void rtl8192_setBBreg(struct net_device* dev, u32 RegAddr, u32 BitMask, u32 Data
// infinite cycle.
// 2008.09.06.
//
-//#if ((HAL_CODE_BASE == RTL8192_S) && (DEV_BUS_TYPE==USB_INTERFACE))
if(IS_BB_REG_OFFSET_92S(RegAddr))
{
if((RegAddr & 0x03) != 0)
@@ -480,7 +426,6 @@ void rtl8192_setBBreg(struct net_device* dev, u32 RegAddr, u32 BitMask, u32 Data
write_nic_dword(dev, RegAddr, Data);
}
- //RT_TRACE(COMP_RF, "<---PHY_SetBBReg(): RegAddr(%#x), BitMask(%#x), Data(%#x)\n", RegAddr, BitMask, Data);
return;
}
@@ -505,8 +450,6 @@ void rtl8192_setBBreg(struct net_device* dev, u32 RegAddr, u32 BitMask, u32 Data
* Return: u32 Readback value
* Note: This function is equal to "GetRFRegSetting" in PHY programming guide
*/
-//in dm 8256 and phy
-//u32 PHY_QueryRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 RegAddr, u32 BitMask)
u32 rtl8192_phy_QueryRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 RegAddr, u32 BitMask)
{
u32 Original_Value, Readback_Value, BitShift;//, flags;
@@ -527,9 +470,6 @@ u32 rtl8192_phy_QueryRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u3
return 0;
}
- /* 2008/01/17 MH We get and release spin lock when reading RF register. */
- //PlatformAcquireSpinLock(dev, RT_RF_OPERATE_SPINLOCK);FIXLZM
- //spin_lock_irqsave(&priv->rf_lock, flags); //YJ,test,090113
down(&priv->rf_sem);
//
// <Roger_Notes> Due to 8051 operation cycle (limitation cycle: 6us) and 1-Byte access issue, we should use
@@ -537,17 +477,11 @@ u32 rtl8192_phy_QueryRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u3
// infinite cycle.
// 2008.09.06.
//
-//#if (HAL_CODE_BASE == RTL8192_S && DEV_BUS_TYPE==USB_INTERFACE)
- //if(RT_USB_CANNOT_IO(Adapter)) return FALSE;
Original_Value = phy_QueryUsbRFReg(dev, eRFPath, RegAddr);
BitShift = phy_CalculateBitShift(BitMask);
Readback_Value = (Original_Value & BitMask) >> BitShift;
- //spin_unlock_irqrestore(&priv->rf_lock, flags); //YJ,test,090113
up(&priv->rf_sem);
- //PlatformReleaseSpinLock(dev, RT_RF_OPERATE_SPINLOCK);
-
- //RTPRINT(FPHY, PHY_RFR, ("RFR-%d MASK=0x%x Addr[0x%x]=0x%x\n", eRFPath, BitMask, RegAddr, Original_Value));
return (Readback_Value);
}
@@ -570,8 +504,6 @@ u32 rtl8192_phy_QueryRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u3
* Return: None
* Note: This function is equal to "PutRFRegSetting" in PHY programming guide
*/
-//use phy 8225 8256
-//void PHY_SetRFReg(struct net_device* dev,RF90_RADIO_PATH_E eRFPath, u32 RegAddr, u32 BitMask,u32 Data )
void rtl8192_phy_SetRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 RegAddr, u32 BitMask, u32 Data)
{
@@ -592,18 +524,11 @@ void rtl8192_phy_SetRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32
return;
}
- /* 2008/01/17 MH We get and release spin lock when writing RF register. */
- //PlatformAcquireSpinLock(dev, RT_RF_OPERATE_SPINLOCK);
- //spin_lock_irqsave(&priv->rf_lock, flags); //YJ,test,090113
down(&priv->rf_sem);
//
// <Roger_Notes> Due to 8051 operation cycle (limitation cycle: 6us) and 1-Byte access issue, we should use
// 4181 to access Base Band instead of 8051 on USB interface to make sure that access could be done in
// infinite cycle.
- // 2008.09.06.
- //
-//#if (HAL_CODE_BASE == RTL8192_S && DEV_BUS_TYPE==USB_INTERFACE)
- //if(RT_USB_CANNOT_IO(Adapter)) return;
if (BitMask != bRFRegOffsetMask) // RF data is 12 bits only
{
@@ -614,10 +539,7 @@ void rtl8192_phy_SetRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32
}
else
phy_SetUsbRFReg(dev, eRFPath, RegAddr, Data);
- //PlatformReleaseSpinLock(dev, RT_RF_OPERATE_SPINLOCK);
- //spin_unlock_irqrestore(&priv->rf_lock, flags); //YJ,test,090113
up(&priv->rf_sem);
- //RTPRINT(FPHY, PHY_RFW, ("RFW-%d MASK=0x%x Addr[0x%x]=0x%x\n", eRFPath, BitMask, RegAddr, Data));
RT_TRACE(COMP_RF, "<---PHY_SetRFReg(): RegAddr(%#x), BitMask(%#x), Data(%#x), eRFPath(%#x)\n",
RegAddr, BitMask, Data, eRFPath);
@@ -691,29 +613,9 @@ PHY_BBConfig8192S(struct net_device* dev)
struct r8192_priv *priv = ieee80211_priv(dev);
phy_InitBBRFRegisterDefinition(dev);
- //
- // Config BB and AGC
- //
- //switch( Adapter->MgntInfo.bRegHwParaFile )
- //{
- // case 0:
- // phy_BB8190_Config_HardCode(dev);
- // break;
- // case 1:
rtStatus = phy_BB8192S_Config_ParaFile(dev);
- // break;
-
- // case 2:
- // Partial Modify.
- // phy_BB8190_Config_HardCode(dev);
- // phy_BB8192S_Config_ParaFile(dev);
- // break;
-
- // default:
- // phy_BB8190_Config_HardCode(dev);
- // break;
- //}
+
PathMap = (u8)(rtl8192_QueryBBReg(dev, rFPGA0_TxInfo, 0xf) |
rtl8192_QueryBBReg(dev, rOFDM0_TRxPathEnable, 0xf));
priv->rf_pathmap = PathMap;
@@ -774,15 +676,10 @@ PHY_RFConfig8192S(struct net_device* dev)
}
-// Joseph test: new initialize order!!
-// Test only!! This part need to be re-organized.
-// Now it is just for 8256.
-//use in phy only
#ifdef TO_DO_LIST
static RT_STATUS
phy_BB8190_Config_HardCode(struct net_device* dev)
{
- //RT_ASSERT(FALSE, ("This function is not implement yet!! \n"));
return RT_STATUS_SUCCESS;
}
#endif
@@ -811,7 +708,6 @@ phy_SetBBtoDiffRFWithHeaderFile(struct net_device* dev, u8 ConfigType)
u32* Rtl819XPHY_REGArraytoXTXR_Table;
u16 PHY_REGArraytoXTXRLen;
-//#if (HAL_CODE_BASE != RTL8192_S)
if(priv->rf_type == RF_1T1R)
{
@@ -823,11 +719,6 @@ phy_SetBBtoDiffRFWithHeaderFile(struct net_device* dev, u8 ConfigType)
Rtl819XPHY_REGArraytoXTXR_Table = Rtl819XPHY_REG_to1T2R_Array;
PHY_REGArraytoXTXRLen = PHY_ChangeTo_1T2RArrayLength;
}
- //else if(priv->rf_type == RF_2T2R || priv->rf_type == RF_2T2R_GREEN)
- //{
- // Rtl819XPHY_REGArraytoXTXR_Table = Rtl819XPHY_REG_to2T2R_Array;
- // PHY_REGArraytoXTXRLen = PHY_ChangeTo_2T2RArrayLength;
- //}
else
{
return RT_STATUS_FAILURE;
@@ -850,15 +741,11 @@ phy_SetBBtoDiffRFWithHeaderFile(struct net_device* dev, u8 ConfigType)
else if (Rtl819XPHY_REGArraytoXTXR_Table[i] == 0xf9)
udelay(1);
rtl8192_setBBreg(dev, Rtl819XPHY_REGArraytoXTXR_Table[i], Rtl819XPHY_REGArraytoXTXR_Table[i+1], Rtl819XPHY_REGArraytoXTXR_Table[i+2]);
- //RT_TRACE(COMP_SEND,
- //"The Rtl819XPHY_REGArraytoXTXR_Table[0] is %lx Rtl819XPHY_REGArraytoXTXR_Table[1] is %lx Rtl819XPHY_REGArraytoXTXR_Table[2] is %lx \n",
- //Rtl819XPHY_REGArraytoXTXR_Table[i],Rtl819XPHY_REGArraytoXTXR_Table[i+1], Rtl819XPHY_REGArraytoXTXR_Table[i+2]);
}
}
else {
RT_TRACE(COMP_SEND, "phy_SetBBtoDiffRFWithHeaderFile(): ConfigType != BaseBand_Config_PHY_REG\n");
}
-//#endif // #if (HAL_CODE_BASE != RTL8192_S)
return RT_STATUS_SUCCESS;
}
@@ -869,14 +756,6 @@ phy_BB8192S_Config_ParaFile(struct net_device* dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
RT_STATUS rtStatus = RT_STATUS_SUCCESS;
- //u8 u2RegValue;
- //u16 u4RegValue;
- //char szBBRegFile[] = RTL819X_PHY_REG;
- //char szBBRegFile1T2R[] = RTL819X_PHY_REG_1T2R;
- //char szBBRegPgFile[] = RTL819X_PHY_REG_PG;
- //char szAGCTableFile[] = RTL819X_AGC_TAB;
- //char szBBRegto1T1RFile[] = RTL819X_PHY_REG_to1T1R;
- //char szBBRegto1T2RFile[] = RTL819X_PHY_REG_to1T2R;
RT_TRACE(COMP_INIT, "==>phy_BB8192S_Config_ParaFile\n");
@@ -956,42 +835,16 @@ phy_ConfigMACWithHeaderFile(struct net_device* dev)
u32 i = 0;
u32 ArrayLength = 0;
u32* ptrArray;
- //struct r8192_priv *priv = ieee80211_priv(dev);
-//#if (HAL_CODE_BASE != RTL8192_S)
- /*if(Adapter->bInHctTest)
- {
- RT_TRACE(COMP_INIT, DBG_LOUD, ("Rtl819XMACPHY_ArrayDTM\n"));
- ArrayLength = MACPHY_ArrayLengthDTM;
- ptrArray = Rtl819XMACPHY_ArrayDTM;
- }
- else if(pHalData->bTXPowerDataReadFromEEPORM)
- {
-// RT_TRACE(COMP_INIT, DBG_LOUD, ("Rtl819XMACPHY_Array_PG\n"));
-// ArrayLength = MACPHY_Array_PGLength;
-// ptrArray = Rtl819XMACPHY_Array_PG;
-
- }else*/
{ //2008.11.06 Modified by tynli.
RT_TRACE(COMP_INIT, "Read Rtl819XMACPHY_Array\n");
ArrayLength = MAC_2T_ArrayLength;
ptrArray = Rtl819XMAC_Array;
}
- /*for(i = 0 ;i < ArrayLength;i=i+3){
- RT_TRACE(COMP_SEND, DBG_LOUD, ("The Rtl819XMACPHY_Array[0] is %lx Rtl819XMACPHY_Array[1] is %lx Rtl819XMACPHY_Array[2] is %lx\n",ptrArray[i], ptrArray[i+1], ptrArray[i+2]));
- if(ptrArray[i] == 0x318)
- {
- ptrArray[i+2] = 0x00000800;
- //DbgPrint("ptrArray[i], ptrArray[i+1], ptrArray[i+2] = %x, %x, %x\n",
- // ptrArray[i], ptrArray[i+1], ptrArray[i+2]);
- }
- PHY_SetBBReg(Adapter, ptrArray[i], ptrArray[i+1], ptrArray[i+2]);
- }*/
for(i = 0 ;i < ArrayLength;i=i+2){ // Add by tynli for 2 column
write_nic_byte(dev, ptrArray[i], (u8)ptrArray[i+1]);
}
-//#endif
return RT_STATUS_SUCCESS;
}
@@ -1015,41 +868,14 @@ static RT_STATUS
phy_ConfigBBWithHeaderFile(struct net_device* dev,u8 ConfigType)
{
int i;
- //u8 ArrayLength;
u32* Rtl819XPHY_REGArray_Table;
u32* Rtl819XAGCTAB_Array_Table;
u16 PHY_REGArrayLen, AGCTAB_ArrayLen;
- //struct r8192_priv *priv = ieee80211_priv(dev);
-//#if (HAL_CODE_BASE != RTL8192_S)
- /*if(Adapter->bInHctTest)
- {
-
- AGCTAB_ArrayLen = AGCTAB_ArrayLengthDTM;
- Rtl819XAGCTAB_Array_Table = Rtl819XAGCTAB_ArrayDTM;
-
- if(pHalData->RF_Type == RF_2T4R)
- {
- PHY_REGArrayLen = PHY_REGArrayLengthDTM;
- Rtl819XPHY_REGArray_Table = Rtl819XPHY_REGArrayDTM;
- }
- else if (pHalData->RF_Type == RF_1T2R)
- {
- PHY_REGArrayLen = PHY_REG_1T2RArrayLengthDTM;
- Rtl819XPHY_REGArray_Table = Rtl819XPHY_REG_1T2RArrayDTM;
- }
- }
- else
- */
- //{
- //
- // 2008.11.06 Modified by tynli.
- //
AGCTAB_ArrayLen = AGCTAB_ArrayLength;
Rtl819XAGCTAB_Array_Table = Rtl819XAGCTAB_Array;
PHY_REGArrayLen = PHY_REG_2T2RArrayLength; // Default RF_type: 2T2R
Rtl819XPHY_REGArray_Table = Rtl819XPHY_REG_Array;
- //}
if(ConfigType == BaseBand_Config_PHY_REG)
{
@@ -1068,7 +894,6 @@ phy_ConfigBBWithHeaderFile(struct net_device* dev,u8 ConfigType)
else if (Rtl819XPHY_REGArray_Table[i] == 0xf9)
udelay(1);
rtl8192_setBBreg(dev, Rtl819XPHY_REGArray_Table[i], bMaskDWord, Rtl819XPHY_REGArray_Table[i+1]);
- //RT_TRACE(COMP_SEND, "The Rtl819XPHY_REGArray_Table[0] is %lx Rtl819XPHY_REGArray[1] is %lx \n",Rtl819XPHY_REGArray_Table[i], Rtl819XPHY_REGArray_Table[i+1]);
}
}
@@ -1078,7 +903,6 @@ phy_ConfigBBWithHeaderFile(struct net_device* dev,u8 ConfigType)
rtl8192_setBBreg(dev, Rtl819XAGCTAB_Array_Table[i], bMaskDWord, Rtl819XAGCTAB_Array_Table[i+1]);
}
}
-//#endif // #if (HAL_CODE_BASE != RTL8192_S)
return RT_STATUS_SUCCESS;
}
@@ -1103,12 +927,8 @@ static RT_STATUS
phy_ConfigBBWithPgHeaderFile(struct net_device* dev,u8 ConfigType)
{
int i;
- //u8 ArrayLength;
u32* Rtl819XPHY_REGArray_Table_PG;
u16 PHY_REGArrayPGLen;
- //struct r8192_priv *priv = ieee80211_priv(dev);
-//#if (HAL_CODE_BASE != RTL8192_S)
- // Default: pHalData->RF_Type = RF_2T2R.
PHY_REGArrayPGLen = PHY_REG_Array_PGLength;
Rtl819XPHY_REGArray_Table_PG = Rtl819XPHY_REG_Array_PG;
@@ -1130,15 +950,13 @@ phy_ConfigBBWithPgHeaderFile(struct net_device* dev,u8 ConfigType)
else if (Rtl819XPHY_REGArray_Table_PG[i] == 0xf9)
udelay(1);
rtl8192_setBBreg(dev, Rtl819XPHY_REGArray_Table_PG[i], Rtl819XPHY_REGArray_Table_PG[i+1], Rtl819XPHY_REGArray_Table_PG[i+2]);
- //RT_TRACE(COMP_SEND, "The Rtl819XPHY_REGArray_Table_PG[0] is %lx Rtl819XPHY_REGArray_Table_PG[1] is %lx \n",
- // Rtl819XPHY_REGArray_Table_PG[i], Rtl819XPHY_REGArray_Table_PG[i+1]);
}
}else{
RT_TRACE(COMP_SEND, "phy_ConfigBBWithPgHeaderFile(): ConfigType != BaseBand_Config_PHY_REG\n");
}
return RT_STATUS_SUCCESS;
-} /* phy_ConfigBBWithPgHeaderFile */
+}
/*-----------------------------------------------------------------------------
* Function: PHY_ConfigRFWithHeaderFile()
@@ -1155,19 +973,14 @@ phy_ConfigBBWithPgHeaderFile(struct net_device* dev,u8 ConfigType)
*
* Note: Delay may be required for RF configuration
*---------------------------------------------------------------------------*/
-//in 8256 phy_RF8256_Config_ParaFile only
-//RT_STATUS PHY_ConfigRFWithHeaderFile(struct net_device* dev,RF90_RADIO_PATH_E eRFPath)
u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device* dev, RF90_RADIO_PATH_E eRFPath)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int i;
- //u32* pRFArray;
RT_STATUS rtStatus = RT_STATUS_SUCCESS;
u32 *Rtl819XRadioA_Array_Table;
u32 *Rtl819XRadioB_Array_Table;
- //u32* Rtl819XRadioC_Array_Table;
- //u32* Rtl819XRadioD_Array_Table;
u16 RadioA_ArrayLen,RadioB_ArrayLen;
{ //2008.11.06 Modified by tynli
@@ -1190,18 +1003,12 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device* dev, RF90_RADIO_PATH_E
rtStatus = RT_STATUS_SUCCESS;
- // When initialization, we want the delay function(mdelay(), delay_us()
- // ==> actually we call PlatformStallExecution()) to do NdisStallExecution()
- // [busy wait] instead of NdisMSleep(). So we acquire RT_INITIAL_SPINLOCK
- // to run at Dispatch level to achive it.
- //cosa PlatformAcquireSpinLock(Adapter, RT_INITIAL_SPINLOCK);
switch(eRFPath){
case RF90_PATH_A:
for(i = 0;i<RadioA_ArrayLen; i=i+2){
if(Rtl819XRadioA_Array_Table[i] == 0xfe)
{ // Deay specific ms. Only RF configuration require delay.
-//#if (DEV_BUS_TYPE == USB_INTERFACE)
mdelay(1000);
}
else if (Rtl819XRadioA_Array_Table[i] == 0xfd)
@@ -1210,7 +1017,6 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device* dev, RF90_RADIO_PATH_E
mdelay(1);
else if (Rtl819XRadioA_Array_Table[i] == 0xfb)
udelay(50);
- //PlatformStallExecution(50);
else if (Rtl819XRadioA_Array_Table[i] == 0xfa)
udelay(5);
else if (Rtl819XRadioA_Array_Table[i] == 0xf9)
@@ -1225,7 +1031,6 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device* dev, RF90_RADIO_PATH_E
for(i = 0;i<RadioB_ArrayLen; i=i+2){
if(Rtl819XRadioB_Array_Table[i] == 0xfe)
{ // Deay specific ms. Only RF configuration require delay.
-//#if (DEV_BUS_TYPE == USB_INTERFACE)
mdelay(1000);
}
else if (Rtl819XRadioB_Array_Table[i] == 0xfd)
@@ -1281,7 +1086,6 @@ PHY_CheckBBAndRFOK(
RF90_RADIO_PATH_E eRFPath
)
{
- //struct r8192_priv *priv = ieee80211_priv(dev);
RT_STATUS rtStatus = RT_STATUS_SUCCESS;
u32 i, CheckTimes = 4,ulRegRead = 0;
u32 WriteAddr[4];
@@ -1302,7 +1106,6 @@ PHY_CheckBBAndRFOK(
switch(CheckBlock)
{
case HW90_BLOCK_MAC:
- //RT_ASSERT(FALSE, ("PHY_CheckBBRFOK(): Never Write 0x100 here!"));
RT_TRACE(COMP_INIT, "PHY_CheckBBRFOK(): Never Write 0x100 here!\n");
break;
@@ -1313,18 +1116,12 @@ PHY_CheckBBAndRFOK(
break;
case HW90_BLOCK_RF:
- // When initialization, we want the delay function(mdelay(), delay_us()
- // ==> actually we call PlatformStallExecution()) to do NdisStallExecution()
- // [busy wait] instead of NdisMSleep(). So we acquire RT_INITIAL_SPINLOCK
- // to run at Dispatch level to achive it.
- //cosa PlatformAcquireSpinLock(dev, RT_INITIAL_SPINLOCK);
WriteData[i] &= 0xfff;
rtl8192_phy_SetRFReg(dev, eRFPath, WriteAddr[HW90_BLOCK_RF], bRFRegOffsetMask, WriteData[i]);
// TODO: we should not delay for such a long time. Ask SD3
mdelay(10);
ulRegRead = rtl8192_phy_QueryRFReg(dev, eRFPath, WriteAddr[HW90_BLOCK_RF], bMaskDWord);
mdelay(10);
- //cosa PlatformReleaseSpinLock(dev, RT_INITIAL_SPINLOCK);
break;
default:
@@ -1338,7 +1135,6 @@ PHY_CheckBBAndRFOK(
//
if(ulRegRead != WriteData[i])
{
- //RT_TRACE(COMP_FPGA, ("ulRegRead: %x, WriteData: %x \n", ulRegRead, WriteData[i]));
RT_TRACE(COMP_ERR, "read back error(read:%x, write:%x)\n", ulRegRead, WriteData[i]);
rtStatus = RT_STATUS_FAILURE;
break;
@@ -1348,7 +1144,6 @@ PHY_CheckBBAndRFOK(
return rtStatus;
}
-//no use temp in windows driver
#ifdef TO_DO_LIST
void
PHY_SetRFPowerState8192SUsb(
@@ -1359,7 +1154,6 @@ PHY_SetRFPowerState8192SUsb(
struct r8192_priv *priv = ieee80211_priv(dev);
bool WaitShutDown = FALSE;
u32 DWordContent;
- //RF90_RADIO_PATH_E eRFPath;
u8 eRFPath;
BB_REGISTER_DEFINITION_T *pPhyReg;
@@ -1368,7 +1162,6 @@ PHY_SetRFPowerState8192SUsb(
priv->SetRFPowerStateInProgress = TRUE;
- // TODO: Emily, 2006.11.21, we should rewrite this function
if(RFPowerState==RF_SHUT_DOWN)
{
@@ -1420,22 +1213,20 @@ PHY_SetRFPowerState8192SUsb(
case RF_8258:
break;
- }// switch( priv->rf_chip )
+ }
priv->SetRFPowerStateInProgress = FALSE;
}
#endif
#ifdef RTL8192U
-//no use temp in windows driver
void
PHY_UpdateInitialGain(
struct net_device* dev
)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- //unsigned char *IGTable;
- //u8 DIG_CurrentInitialGain = 4;
+
switch(priv->rf_chip)
{
@@ -1456,7 +1247,6 @@ PHY_UpdateInitialGain(
}
#endif
-//YJ,modified,090107
void PHY_GetHWRegOriginalValue(struct net_device* dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1625,8 +1415,6 @@ static void phy_InitBBRFRegisterDefinition( struct net_device* dev)
// Tranceiver LSSI Readback PI mode
priv->PHYRegDef[RF90_PATH_A].rfLSSIReadBackPi = TransceiverA_HSPI_Readback;
priv->PHYRegDef[RF90_PATH_B].rfLSSIReadBackPi = TransceiverB_HSPI_Readback;
- //pHalData->PHYRegDef[RF90_PATH_C].rfLSSIReadBackPi = rFPGA0_XC_LSSIReadBack;
- //pHalData->PHYRegDef[RF90_PATH_D].rfLSSIReadBackPi = rFPGA0_XD_LSSIReadBack;
}
@@ -1637,9 +1425,7 @@ static void phy_InitBBRFRegisterDefinition( struct net_device* dev)
// Assumption: This function must be executed in re-schdulable context,
// ie. PASSIVE_LEVEL.
//
-// 050823, by rcnjko.
-//not understand it seem's use in init
-//SetHwReg8192SUsb--->HalFunc.SetHwRegHandler
+
bool PHY_SetRFPowerState(struct net_device* dev, RT_RF_POWER_STATE eRFPowerState)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1665,8 +1451,6 @@ static bool phy_SetRFPowerState8192SU(struct net_device* dev,RT_RF_POWER_STATE e
{
struct r8192_priv *priv = ieee80211_priv(dev);
bool bResult = TRUE;
- //u8 eRFPath;
- //u8 i, QueueID;
u8 u1bTmp;
if(priv->SetRFPowerStateInProgress == TRUE)
@@ -1697,9 +1481,6 @@ static bool phy_SetRFPowerState8192SU(struct net_device* dev,RT_RF_POWER_STATE e
break;
//
//RF Off/Sleep sequence. Designed/tested from SD4 Scott, SD1 Grent and Jonbon.
- // Added by Bruce, 2008-11-22.
- //
- //==================================================================
// (0) Disable FW BB reset checking
write_nic_dword(dev, WFM5, FW_BB_RESET_DISABLE);
@@ -1728,7 +1509,6 @@ static bool phy_SetRFPowerState8192SU(struct net_device* dev,RT_RF_POWER_STATE e
default:
bResult = FALSE;
- //RT_ASSERT(FALSE, ("phy_SetRFPowerState8192SU(): unknown state to set: 0x%X!!!\n", eRFPowerState));
break;
}
break;
@@ -1860,7 +1640,6 @@ PHY_GetTxPowerLevel8192S(
void PHY_SetTxPowerLevel8192S(struct net_device* dev, u8 channel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- //HAL_DATA_TYPE *pHalData = GET_HAL_DATA(dev);
u8 powerlevel = (u8)EEPROM_Default_TxPower, powerlevelOFDM24G = 0x10;
s8 ant_pwr_diff = 0;
u32 u4RegValue;
@@ -1909,14 +1688,6 @@ PHY_GetTxPowerLevel8192S(
// RF B HT OFDM pwr-RFA HT OFDM pwr
ant_pwr_diff = priv->RfTxPwrLevelOfdm2T[1][index] -
priv->RfTxPwrLevelOfdm2T[0][index];
- // RF B (HT OFDM pwr+legacy-ht-diff) -(RFA HT OFDM pwr+legacy-ht-diff)
- // We can not handle Path B&A HT/Legacy pwr diff for 92S now.
-
- //RTPRINT(FPHY, PHY_TXPWR, ("CH-%d HT40 A/B Pwr index = %x/%x(%d/%d)\n",
- //channel, priv->RfTxPwrLevelOfdm2T[0][index],
- //priv->RfTxPwrLevelOfdm2T[1][index],
- //priv->RfTxPwrLevelOfdm2T[0][index],
- //priv->RfTxPwrLevelOfdm2T[1][index]));
ht20pwr[0] = ht40pwr[0] = priv->RfTxPwrLevelOfdm2T[0][index];
ht20pwr[1] = ht40pwr[1] = priv->RfTxPwrLevelOfdm2T[1][index];
@@ -1949,10 +1720,6 @@ PHY_GetTxPowerLevel8192S(
// RF B HT OFDM pwr-RFA HT OFDM pwr
if (priv->rf_type == RF_2T2R)
ant_pwr_diff = ht20pwr[1] - ht20pwr[0];
-
- //RTPRINT(FPHY, PHY_TXPWR,
- //("HT20 to HT40 pwrdiff[A/B]=%d/%d, ant_pwr_diff=%d(B-A=%d-%d)\n",
- //pwrdiff[0], pwrdiff[1], ant_pwr_diff, ht20pwr[1], ht20pwr[0]));
}
// Band Edge scheme is enabled for FCC mode
@@ -1997,18 +1764,12 @@ PHY_GetTxPowerLevel8192S(
{
if (channel <= 1 || channel >= 11)
{
- //RTPRINT(FPHY, PHY_TXPWR,
- //("HT20 Band-edge pwrdiff[A/B]=%d/%d, ant_pwr_diff=%d(B-A=%d-%d)\n",
- //pwrdiff[0], pwrdiff[1], ant_pwr_diff, ht20pwr[1], ht20pwr[0]));
}
}
else
{
if (channel <= 3 || channel >= 9)
{
- //RTPRINT(FPHY, PHY_TXPWR,
- //("HT40 Band-edge pwrdiff[A/B]=%d/%d, ant_pwr_diff=%d(B-A=%d-%d)\n",
- //pwrdiff[0], pwrdiff[1], ant_pwr_diff, ht40pwr[1], ht40pwr[0]));
}
}
}
@@ -2021,10 +1782,6 @@ PHY_GetTxPowerLevel8192S(
if(ant_pwr_diff < -8)
ant_pwr_diff = -8;
- //RTPRINT(FPHY, PHY_TXPWR,
- //("CCK/HT Power index = %x/%x(%d/%d), ant_pwr_diff=%d\n",
- //powerlevel, powerlevelOFDM24G, powerlevel, powerlevelOFDM24G, ant_pwr_diff));
-
ant_pwr_diff &= 0xf;
// Antenna TX power difference
@@ -2050,7 +1807,6 @@ PHY_GetTxPowerLevel8192S(
// TODO:
// 1. 802.11h power contraint
//
- // 071011, by rcnjko.
//
#ifdef TODO //WB, 11h has not implemented now.
if( priv->ieee80211->iw_mode != IW_MODE_INFRA && priv->bWithCcxCellPwr &&
@@ -2095,8 +1851,6 @@ PHY_GetTxPowerLevel8192S(
switch(priv->rf_chip)
{
case RF_8225:
- //PHY_SetRF8225CckTxPower(dev, powerlevel);
- //PHY_SetRF8225OfdmTxPower(dev, powerlevelOFDM24G);
break;
case RF_8256:
@@ -2121,8 +1875,6 @@ PHY_GetTxPowerLevel8192S(
//
// TODO:
// A mode.
-// By Bruce, 2008-02-04.
-// no use temp
bool PHY_UpdateTxPowerDbm8192S(struct net_device* dev, long powerInDbm)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2164,8 +1916,6 @@ bool PHY_UpdateTxPowerDbm8192S(struct net_device* dev, long powerInDbm)
Description:
When beacon interval is changed, the values of the
hw registers should be modified.
- By tynli, 2008.10.24.
-
*/
extern void PHY_SetBeaconHwReg( struct net_device* dev, u16 BeaconInterval)
@@ -2173,8 +1923,6 @@ extern void PHY_SetBeaconHwReg( struct net_device* dev, u16 BeaconInterval)
u32 NewBeaconNum;
NewBeaconNum = BeaconInterval *32 - 64;
- //PlatformEFIOWrite4Byte(Adapter, WFM3+4, NewBeaconNum);
- //PlatformEFIOWrite4Byte(Adapter, WFM3, 0xB026007C);
write_nic_dword(dev, WFM3+4, NewBeaconNum);
write_nic_dword(dev, WFM3, 0xB026007C);
}
@@ -2184,7 +1932,6 @@ extern void PHY_SetBeaconHwReg( struct net_device* dev, u16 BeaconInterval)
// Map dBm into Tx power index according to
// current HW model, for example, RF and PA, and
// current wireless mode.
-// By Bruce, 2008-01-29.
// use in phy only
static u8 phy_DbmToTxPwrIdx(
struct net_device* dev,
@@ -2192,7 +1939,6 @@ static u8 phy_DbmToTxPwrIdx(
long PowerInDbm
)
{
- //struct r8192_priv *priv = ieee80211_priv(dev);
u8 TxPwrIdx = 0;
long Offset = 0;
@@ -2202,7 +1948,6 @@ static u8 phy_DbmToTxPwrIdx(
// 3dbm, and OFDM HT equals to 0dbm repectively.
// Note:
// The mapping may be different by different NICs. Do not use this formula for what needs accurate result.
- // By Bruce, 2008-01-29.
//
switch(WirelessMode)
{
@@ -2238,7 +1983,6 @@ static u8 phy_DbmToTxPwrIdx(
// Map Tx power index into dBm according to
// current HW model, for example, RF and PA, and
// current wireless mode.
-// By Bruce, 2008-01-29.
// use in phy only
static long phy_TxPwrIdxToDbm(
struct net_device* dev,
@@ -2255,7 +1999,6 @@ static long phy_TxPwrIdxToDbm(
// 3dbm, and OFDM HT equals to 0dbm repectively.
// Note:
// The mapping may be different by different NICs. Do not use this formula for what needs accurate result.
- // By Bruce, 2008-01-29.
//
switch(WirelessMode)
{
@@ -2327,9 +2070,6 @@ PHY_ScanOperationBackup8192S(
void PHY_InitialGain8192S(struct net_device* dev,u8 Operation )
{
- //struct r8192_priv *priv = ieee80211_priv(dev);
- //u32 BitMask;
- //u8 initial_gain;
}
/*-----------------------------------------------------------------------------
@@ -2353,11 +2093,6 @@ void PHY_SetBWModeCallback8192S(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
u8 regBwOpMode;
- //return;
-
- // Added it for 20/40 mhz switch time evaluation by guangan 070531
- //u32 NowL, NowH;
- //u8Byte BeginTime, EndTime;
u8 regRRSR_RSC;
RT_TRACE(COMP_SWBW, "==>SetBWModeCallback8190Pci() Switch to %s bandwidth\n", \
@@ -2372,10 +2107,6 @@ void PHY_SetBWModeCallback8192S(struct net_device *dev)
if(!priv->up)
return;
- // Added it for 20/40 mhz switch time evaluation by guangan 070531
- //NowL = read_nic_dword(dev, TSFR);
- //NowH = read_nic_dword(dev, TSFR+4);
- //BeginTime = ((u8Byte)NowH << 32) + NowL;
//3//
//3//<1>Set MAC register
@@ -2386,8 +2117,6 @@ void PHY_SetBWModeCallback8192S(struct net_device *dev)
switch(priv->CurrentChannelBW)
{
case HT_CHANNEL_WIDTH_20:
- //if(priv->card_8192_version >= VERSION_8192S_BCUT)
- // write_nic_byte(dev, rFPGA0_AnalogParameter2, 0x58);
regBwOpMode |= BW_OPMODE_20MHZ;
// 2007/02/07 Mark by Emily becasue we have not verify whether this register works
@@ -2395,8 +2124,6 @@ void PHY_SetBWModeCallback8192S(struct net_device *dev)
break;
case HT_CHANNEL_WIDTH_20_40:
- //if(priv->card_8192_version >= VERSION_8192S_BCUT)
- // write_nic_byte(dev, rFPGA0_AnalogParameter2, 0x18);
regBwOpMode &= ~BW_OPMODE_20MHZ;
// 2007/02/07 Mark by Emily becasue we have not verify whether this register works
@@ -2421,12 +2148,6 @@ void PHY_SetBWModeCallback8192S(struct net_device *dev)
rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x0);
rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x0);
- // Correct the tx power for CCK rate in 40M. Suggest by YN, 20071207
- // It is set in Tx descriptor for 8192x series
- //write_nic_dword(dev, rCCK0_TxFilter1, 0x1a1b0000);
- //write_nic_dword(dev, rCCK0_TxFilter2, 0x090e1317);
- //write_nic_dword(dev, rCCK0_DebugPort, 0x00000204);
-
if (priv->card_8192_version >= VERSION_8192S_BCUT)
write_nic_byte(dev, rFPGA0_AnalogParameter2, 0x58);
@@ -2438,16 +2159,11 @@ void PHY_SetBWModeCallback8192S(struct net_device *dev)
rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1);
rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1);
- // Correct the tx power for CCK rate in 40M. Suggest by YN, 20071207
- //write_nic_dword(dev, rCCK0_TxFilter1, 0x35360000);
- //write_nic_dword(dev, rCCK0_TxFilter2, 0x121c252e);
- //write_nic_dword(dev, rCCK0_DebugPort, 0x00000409);
// Set Control channel to upper or lower. These settings are required only for 40MHz
rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand, (priv->nCur40MhzPrimeSC>>1));
rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00, priv->nCur40MhzPrimeSC);
- //rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00300000, 3);
if (priv->card_8192_version >= VERSION_8192S_BCUT)
write_nic_byte(dev, rFPGA0_AnalogParameter2, 0x18);
@@ -2461,11 +2177,6 @@ void PHY_SetBWModeCallback8192S(struct net_device *dev)
}
//Skip over setting of J-mode in BB register here. Default value is "None J mode". Emily 20070315
- // Added it for 20/40 mhz switch time evaluation by guangan 070531
- //NowL = read_nic_dword(dev, TSFR);
- //NowH = read_nic_dword(dev, TSFR+4);
- //EndTime = ((u8Byte)NowH << 32) + NowL;
- //RT_TRACE(COMP_SCAN, DBG_LOUD, ("SetBWModeCallback8190Pci: time of SetBWMode = %I64d us!\n", (EndTime - BeginTime)));
//3<3>Set RF related register
switch( priv->rf_chip )
@@ -2516,36 +2227,11 @@ void PHY_SetBWModeCallback8192S(struct net_device *dev)
*
* Note: We do not take j mode into consideration now
*---------------------------------------------------------------------------*/
-//extern void PHY_SetBWMode8192S( struct net_device* dev,
-// HT_CHANNEL_WIDTH Bandwidth, // 20M or 40M
-// HT_EXTCHNL_OFFSET Offset // Upper, Lower, or Don't care
void rtl8192_SetBWMode(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset)
{
struct r8192_priv *priv = ieee80211_priv(dev);
HT_CHANNEL_WIDTH tmpBW = priv->CurrentChannelBW;
-
- // Modified it for 20/40 mhz switch by guangan 070531
-
- //return;
-
- //if(priv->SwChnlInProgress)
-// if(pMgntInfo->bScanInProgress)
-// {
-// RT_TRACE(COMP_SCAN, DBG_LOUD, ("SetBWMode8190Pci() %s Exit because bScanInProgress!\n",
-// Bandwidth == HT_CHANNEL_WIDTH_20?"20MHz":"40MHz"));
-// return;
-// }
-
-// if(priv->SetBWModeInProgress)
-// {
-// // Modified it for 20/40 mhz switch by guangan 070531
-// RT_TRACE(COMP_SCAN, DBG_LOUD, ("SetBWMode8190Pci() %s cancel last timer because SetBWModeInProgress!\n",
-// Bandwidth == HT_CHANNEL_WIDTH_20?"20MHz":"40MHz"));
-// PlatformCancelTimer(dev, &priv->SetBWModeTimer);
-// //return;
-// }
-
if(priv->SetBWModeInProgress)
return;
@@ -2560,7 +2246,7 @@ void rtl8192_SetBWMode(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth, HT_EX
else
priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- if((priv->up) )// && !(RT_CANNOT_IO(Adapter) && Adapter->bInSetPower) )
+ if((priv->up) )
{
SetBWModeCallback8192SUsbWorkItem(dev);
}
@@ -2578,7 +2264,6 @@ void PHY_SwChnlCallback8192S(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
u32 delay;
- //bool ret;
RT_TRACE(COMP_CH, "==>SwChnlCallback8190Pci(), switch to channel %d\n", priv->chan);
@@ -2595,16 +2280,11 @@ void PHY_SwChnlCallback8192S(struct net_device *dev)
if(!priv->SwChnlInProgress)
break;
- //if(!phy_SwChnlStepByStep(dev, priv->CurrentChannel, &priv->SwChnlStage, &priv->SwChnlStep, &delay))
if(!phy_SwChnlStepByStep(dev, priv->chan, &priv->SwChnlStage, &priv->SwChnlStep, &delay))
{
if(delay>0)
{
mdelay(delay);
- //PlatformSetTimer(dev, &priv->SwChnlTimer, delay);
- //mod_timer(&priv->SwChnlTimer, jiffies + MSECS(delay));
- //==>PHY_SwChnlCallback8192S(dev); for 92se
- //==>SwChnlCallback8192SUsb(dev) for 92su
}
else
continue;
@@ -2618,12 +2298,9 @@ void PHY_SwChnlCallback8192S(struct net_device *dev)
}
// Call after initialization
-//extern void PHY_SwChnl8192S(struct net_device* dev, u8 channel)
u8 rtl8192_phy_SwChnl(struct net_device* dev, u8 channel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- //u8 tmpchannel =channel;
- //bool bResult = false;
if(!priv->up)
return false;
@@ -2634,7 +2311,6 @@ u8 rtl8192_phy_SwChnl(struct net_device* dev, u8 channel)
if(priv->SetBWModeInProgress)
return false;
- //--------------------------------------------
switch(priv->ieee80211->mode)
{
case WIRELESS_MODE_A:
@@ -2661,10 +2337,9 @@ u8 rtl8192_phy_SwChnl(struct net_device* dev, u8 channel)
break;
default:
- ;//RT_TRACE(COMP_ERR, "Invalid WirelessMode(%#x)!!\n", priv->ieee80211->mode);
+ ;
break;
}
- //--------------------------------------------
priv->SwChnlInProgress = TRUE;
if( channel == 0)
@@ -2675,7 +2350,7 @@ u8 rtl8192_phy_SwChnl(struct net_device* dev, u8 channel)
priv->SwChnlStage=0;
priv->SwChnlStep=0;
- if((priv->up))// && !(RT_CANNOT_IO(Adapter) && Adapter->bInSetPower))
+ if((priv->up))
{
SwChnlCallback8192SUsbWorkItem(dev);
#ifdef TO_DO_LIST
@@ -2695,7 +2370,6 @@ u8 rtl8192_phy_SwChnl(struct net_device* dev, u8 channel)
{
RT_TRACE(COMP_SCAN, "PHY_SwChnl8192S SwChnlInProgress FALSE driver sleep or unload\n");
priv->SwChnlInProgress = false;
- //priv->CurrentChannel = tmpchannel;
}
return true;
}
@@ -2709,8 +2383,7 @@ u8 rtl8192_phy_SwChnl(struct net_device* dev, u8 channel)
// The following procedure is operted according to SwChanlCallback8190Pci().
// However, this procedure is performed synchronously which should be running under
// passive level.
-//
-//not understand it
+
void PHY_SwChnlPhy8192S( // Only called during initialize
struct net_device* dev,
u8 channel
@@ -2767,14 +2440,10 @@ phy_SetSwChnlCmdArray(
if(CmdTable == NULL)
{
- //RT_ASSERT(FALSE, ("phy_SetSwChnlCmdArray(): CmdTable cannot be NULL.\n"));
return FALSE;
}
if(CmdTableIdx >= CmdTableSz)
{
- //RT_ASSERT(FALSE,
- // ("phy_SetSwChnlCmdArray(): Access invalid index, please check size of the table, CmdTableIdx:%d, CmdTableSz:%d\n",
- //CmdTableIdx, CmdTableSz));
return FALSE;
}
@@ -2798,7 +2467,6 @@ phy_SwChnlStepByStep(
)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- //PCHANNEL_ACCESS_SETTING pChnlAccessSetting;
SwChnlCmd PreCommonCmd[MAX_PRECMD_CNT];
u32 PreCommonCmdCnt;
SwChnlCmd PostCommonCmd[MAX_POSTCMD_CNT];
@@ -2808,22 +2476,13 @@ phy_SwChnlStepByStep(
SwChnlCmd *CurrentCmd = NULL;
u8 eRFPath;
- //RT_ASSERT((dev != NULL), ("Adapter should not be NULL\n"));
- //RT_ASSERT(IsLegalChannel(dev, channel), ("illegal channel: %d\n", channel));
RT_TRACE(COMP_CH, "===========>%s(), channel:%d, stage:%d, step:%d\n", __FUNCTION__, channel, *stage, *step);
- //RT_ASSERT((pHalData != NULL), ("pHalData should not be NULL\n"));
if (!IsLegalChannel(priv->ieee80211, channel))
{
RT_TRACE(COMP_ERR, "=============>set to illegal channel:%d\n", channel);
return true; //return true to tell upper caller function this channel setting is finished! Or it will in while loop.
}
- //pChnlAccessSetting = &Adapter->MgntInfo.Info8185.ChannelAccessSetting;
- //RT_ASSERT((pChnlAccessSetting != NULL), ("pChnlAccessSetting should not be NULL\n"));
-
- //for(eRFPath = RF90_PATH_A; eRFPath <priv->NumTotalRFPath; eRFPath++)
- //for(eRFPath = 0; eRFPath <priv->NumTotalRFPath; eRFPath++)
- //{
// <1> Fill up pre common command.
PreCommonCmdCnt = 0;
phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT,
@@ -2844,8 +2503,7 @@ phy_SwChnlStepByStep(
case RF_8225:
if (channel < 1 || channel > 14)
RT_TRACE(COMP_ERR, "illegal channel for zebra:%d\n", channel);
- //RT_ASSERT((channel >= 1 && channel <= 14), ("illegal channel for Zebra: %d\n", channel));
- // 2008/09/04 MH Change channel.
+
phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT,
CmdID_RF_WriteReg, rRfChannel, channel, 10);
phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT,
@@ -2855,8 +2513,6 @@ phy_SwChnlStepByStep(
case RF_8256:
if (channel < 1 || channel > 14)
RT_TRACE(COMP_ERR, "illegal channel for zebra:%d\n", channel);
- // TEST!! This is not the table for 8256!!
- //RT_ASSERT((channel >= 1 && channel <= 14), ("illegal channel for Zebra: %d\n", channel));
phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT,
CmdID_RF_WriteReg, rRfChannel, channel, 10);
phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT,
@@ -2876,7 +2532,6 @@ phy_SwChnlStepByStep(
break;
default:
- //RT_ASSERT(FALSE, ("Unknown rf_chip: %d\n", priv->rf_chip));
return FALSE;
break;
}
@@ -2913,7 +2568,6 @@ phy_SwChnlStepByStep(
switch(CurrentCmd->CmdID)
{
case CmdID_SetTxPowerLevel:
- //if(priv->card_8192_version > VERSION_8190_BD)
PHY_SetTxPowerLevel8192S(dev,channel);
break;
case CmdID_WritePortUlong:
@@ -2930,7 +2584,6 @@ phy_SwChnlStepByStep(
{
// For new T65 RF 0222d register 0x18 bit 0-9 = channel number.
rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, CurrentCmd->Para1, 0x1f, (CurrentCmd->Para2));
- //printk("====>%x, %x, read_back:%x\n", CurrentCmd->Para2,CurrentCmd->Para1, rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, CurrentCmd->Para1, 0x1f));
}
break;
default:
@@ -2939,7 +2592,6 @@ phy_SwChnlStepByStep(
break;
}while(TRUE);
- //cosa }/*for(Number of RF paths)*/
(*delay)=CurrentCmd->msDelay;
(*step)++;
@@ -2985,14 +2637,8 @@ phy_FinishSwChnlNow( // We should not call this function directly
* 11/15/2007 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
- //called by rtl8192_phy_QueryRFReg, rtl8192_phy_SetRFReg, PHY_SetRFPowerState8192SUsb
-//extern bool
-//PHY_CheckIsLegalRfPath8192S(
-// struct net_device* dev,
-// u32 eRFPath)
u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device* dev, u32 eRFPath)
{
-// struct r8192_priv *priv = ieee80211_priv(dev);
bool rtValue = TRUE;
// NOt check RF Path now.!
@@ -3023,7 +2669,6 @@ u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device* dev, u32 eRFPath)
void
PHY_IQCalibrate( struct net_device* dev)
{
- //struct r8192_priv *priv = ieee80211_priv(dev);
u32 i, reg;
u32 old_value;
long X, Y, TX0[4];
@@ -3039,7 +2684,6 @@ PHY_IQCalibrate( struct net_device* dev)
{
// IQK
rtl8192_setBBreg(dev, 0xc04, bMaskDWord, 0x00a05430);
- //PlatformStallExecution(5);
udelay(5);
rtl8192_setBBreg(dev, 0xc08, bMaskDWord, 0x000800e4);
udelay(5);
@@ -3169,8 +2813,6 @@ PHY_IQCalibrate( struct net_device* dev)
*---------------------------------------------------------------------------*/
extern void PHY_IQCalibrateBcut(struct net_device* dev)
{
- //struct r8192_priv *priv = ieee80211_priv(dev);
- //PMGNT_INFO pMgntInfo = &pAdapter->MgntInfo;
u32 i, reg;
u32 old_value;
long X, Y, TX0[4];
@@ -3184,21 +2826,6 @@ extern void PHY_IQCalibrateBcut(struct net_device* dev)
//
// 1. Save e70~ee0 register setting, and load calibration setting
//
- /*
- 0xee0[31:0]=0x3fed92fb;
- 0xedc[31:0] =0x3fed92fb;
- 0xe70[31:0] =0x3fed92fb;
- 0xe74[31:0] =0x3fed92fb;
- 0xe78[31:0] =0x3fed92fb;
- 0xe7c[31:0]= 0x3fed92fb;
- 0xe80[31:0]= 0x3fed92fb;
- 0xe84[31:0]= 0x3fed92fb;
- 0xe88[31:0]= 0x3fed92fb;
- 0xe8c[31:0]= 0x3fed92fb;
- 0xed0[31:0]= 0x3fed92fb;
- 0xed4[31:0]= 0x3fed92fb;
- 0xed8[31:0]= 0x3fed92fb;
- */
calibrate_set [0] = 0xee0;
calibrate_set [1] = 0xedc;
calibrate_set [2] = 0xe70;
@@ -3212,7 +2839,6 @@ extern void PHY_IQCalibrateBcut(struct net_device* dev)
calibrate_set [10] = 0xed0;
calibrate_set [11] = 0xed4;
calibrate_set [12] = 0xed8;
- //RT_TRACE(COMP_INIT, DBG_LOUD, ("Save e70~ee0 register setting\n"));
for (i = 0; i < 13; i++)
{
load_value[i] = rtl8192_QueryBBReg(dev, calibrate_set[i], bMaskDWord);
@@ -3232,7 +2858,6 @@ extern void PHY_IQCalibrateBcut(struct net_device* dev)
//BB switch to PI mode. If default is PI mode, ignoring 2 commands below.
if (!RfPiEnable) //if original is SI mode, then switch to PI mode.
{
- //DbgPrint("IQK Switch to PI mode\n");
rtl8192_setBBreg(dev, 0x820, bMaskDWord, 0x01000100);
rtl8192_setBBreg(dev, 0x828, bMaskDWord, 0x01000100);
}
@@ -3286,7 +2911,6 @@ extern void PHY_IQCalibrateBcut(struct net_device* dev)
if (!RfPiEnable) //if original is SI mode, then switch to PI mode.
{
- //DbgPrint("IQK Switch back to SI mode\n");
rtl8192_setBBreg(dev, 0x820, bMaskDWord, 0x01000000);
rtl8192_setBBreg(dev, 0x828, bMaskDWord, 0x01000000);
}
@@ -3369,7 +2993,6 @@ extern void PHY_IQCalibrateBcut(struct net_device* dev)
//
// 4. Reload e70~ee0 register setting.
//
- //RT_TRACE(COMP_INIT, DBG_LOUD, ("Reload e70~ee0 register setting.\n"));
for (i = 0; i < 13; i++)
rtl8192_setBBreg(dev, calibrate_set[i], bMaskDWord, load_value[i]);
@@ -3380,14 +3003,12 @@ extern void PHY_IQCalibrateBcut(struct net_device* dev)
-} // PHY_IQCalibrateBcut
+}
//
// Move from phycfg.c to gen.c to be code independent later
//
-//-------------------------Move to other DIR later----------------------------*/
-//#if (DEV_BUS_TYPE == USB_INTERFACE)
// use in phy only (in win it's timer)
void SwChnlCallback8192SUsb(struct net_device *dev)
@@ -3395,7 +3016,6 @@ void SwChnlCallback8192SUsb(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
u32 delay;
-// bool ret;
RT_TRACE(COMP_SCAN, "==>SwChnlCallback8190Pci(), switch to channel %d\n",
priv->chan);
@@ -3418,7 +3038,6 @@ void SwChnlCallback8192SUsb(struct net_device *dev)
{
if(delay>0)
{
- //PlatformSetTimer(dev, &priv->SwChnlTimer, delay);
}
else
@@ -3473,16 +3092,12 @@ void SwChnlCallback8192SUsbWorkItem(struct net_device *dev )
* (2) Will two workitem of "switch channel" and "switch channel bandwidth" run
* concurrently?
*---------------------------------------------------------------------------*/
-//====>//rtl8192_SetBWMode
-// use in phy only (in win it's timer)
+// use in phy only
void SetBWModeCallback8192SUsb(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 regBwOpMode;
- // Added it for 20/40 mhz switch time evaluation by guangan 070531
- //u32 NowL, NowH;
- //u8Byte BeginTime, EndTime;
u8 regRRSR_RSC;
RT_TRACE(COMP_SCAN, "==>SetBWModeCallback8190Pci() Switch to %s bandwidth\n", \
@@ -3497,10 +3112,6 @@ void SetBWModeCallback8192SUsb(struct net_device *dev)
if(!priv->up)
return;
- // Added it for 20/40 mhz switch time evaluation by guangan 070531
- //NowL = read_nic_dword(dev, TSFR);
- //NowH = read_nic_dword(dev, TSFR+4);
- //BeginTime = ((u8Byte)NowH << 32) + NowL;
//3<1>Set MAC register
regBwOpMode = read_nic_byte(dev, BW_OPMODE);
@@ -3510,13 +3121,11 @@ void SetBWModeCallback8192SUsb(struct net_device *dev)
{
case HT_CHANNEL_WIDTH_20:
regBwOpMode |= BW_OPMODE_20MHZ;
- // 2007/02/07 Mark by Emily becasue we have not verify whether this register works
write_nic_byte(dev, BW_OPMODE, regBwOpMode);
break;
case HT_CHANNEL_WIDTH_20_40:
regBwOpMode &= ~BW_OPMODE_20MHZ;
- // 2007/02/07 Mark by Emily becasue we have not verify whether this register works
write_nic_byte(dev, BW_OPMODE, regBwOpMode);
regRRSR_RSC = (regRRSR_RSC&0x90) |(priv->nCur40MhzPrimeSC<<5);
@@ -3546,12 +3155,6 @@ void SetBWModeCallback8192SUsb(struct net_device *dev)
rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand, (priv->nCur40MhzPrimeSC>>1));
rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00, priv->nCur40MhzPrimeSC);
- // Correct the tx power for CCK rate in 40M. Suggest by YN, 20071207
- //PHY_SetBBReg(Adapter, rCCK0_TxFilter1, bMaskDWord, 0x35360000);
- //PHY_SetBBReg(Adapter, rCCK0_TxFilter2, bMaskDWord, 0x121c252e);
- //PHY_SetBBReg(Adapter, rCCK0_DebugPort, bMaskDWord, 0x00000409);
- //PHY_SetBBReg(Adapter, rFPGA0_AnalogParameter1, bADClkPhase, 0);
-
if (priv->card_8192_version >= VERSION_8192S_BCUT)
rtl8192_setBBreg(dev, rFPGA0_AnalogParameter2, 0xff, 0x18);
@@ -3564,12 +3167,6 @@ void SetBWModeCallback8192SUsb(struct net_device *dev)
}
//Skip over setting of J-mode in BB register here. Default value is "None J mode". Emily 20070315
- // Added it for 20/40 mhz switch time evaluation by guangan 070531
- //NowL = read_nic_dword(dev, TSFR);
- //NowH = read_nic_dword(dev, TSFR+4);
- //EndTime = ((u8Byte)NowH << 32) + NowL;
- //RT_TRACE(COMP_SCAN, DBG_LOUD, ("SetBWModeCallback8190Pci: time of SetBWMode = %I64d us!\n", (EndTime - BeginTime)));
-
#if 1
//3<3>Set RF related register
switch( priv->rf_chip )
@@ -3597,7 +3194,6 @@ void SetBWModeCallback8192SUsb(struct net_device *dev)
break;
default:
- //RT_ASSERT(FALSE, ("Unknown rf_chip: %d\n", priv->rf_chip));
break;
}
#endif
@@ -3705,7 +3301,6 @@ void SetBWModeCallback8192SUsbWorkItem(struct net_device *dev)
priv->SetBWModeInProgress= FALSE;
}
-//--------------------------Move to oter DIR later-------------------------------*/
void InitialGain8192S(struct net_device *dev, u8 Operation)
{
#ifdef TO_DO_LIST
@@ -3812,7 +3407,6 @@ bool HalSetFwCmd8192S(struct net_device* dev, FW_CMD_IO_TYPE FwCmdIO)
u16 FwCmdWaitLimit = 1000;
- //if(IS_HARDWARE_TYPE_8192SU(Adapter) && Adapter->bInHctTest)
if(priv->bInHctTest)
return true;
@@ -3828,11 +3422,6 @@ bool HalSetFwCmd8192S(struct net_device* dev, FW_CMD_IO_TYPE FwCmdIO)
#if 1
while(priv->SetFwCmdInProgress && FwCmdWaitCounter<FwCmdWaitLimit)
{
- //if(RT_USB_CANNOT_IO(Adapter))
- //{
- // RT_TRACE(COMP_CMD, DBG_WARNING, ("HalSetFwCmd8192S(): USB can NOT IO!!\n"));
- // return FALSE;
- //}
RT_TRACE(COMP_CMD, "HalSetFwCmd8192S(): previous workitem not finish!!\n");
return false;
@@ -3843,9 +3432,7 @@ bool HalSetFwCmd8192S(struct net_device* dev, FW_CMD_IO_TYPE FwCmdIO)
if(FwCmdWaitCounter == FwCmdWaitLimit)
{
- //RT_ASSERT(FALSE, ("SetFwCmdIOWorkItemCallback(): Wait too logn to set FW CMD\n"));
RT_TRACE(COMP_CMD, "HalSetFwCmd8192S(): Wait too logn to set FW CMD\n");
- //return false;
}
#endif
if (priv->SetFwCmdInProgress)
@@ -3898,10 +3485,10 @@ void ChkFwCmdIoDone(struct net_device* dev)
//
void phy_SetFwCmdIOCallback(struct net_device* dev)
{
- //struct net_device* dev = (struct net_device*) data;
- u32 input;
- static u32 ScanRegister;
struct r8192_priv *priv = ieee80211_priv(dev);
+ PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
+ rt_firmware *pFirmware = priv->pFirmware;
+ u32 input, CurrentAID = 0;;
if(!priv->up)
{
RT_TRACE(COMP_CMD, "SetFwCmdIOTimerCallback(): driver is going to unload\n");
@@ -3910,61 +3497,22 @@ void phy_SetFwCmdIOCallback(struct net_device* dev)
RT_TRACE(COMP_CMD, "--->SetFwCmdIOTimerCallback(): Cmd(%#x), SetFwCmdInProgress(%d)\n", priv->CurrentFwCmdIO, priv->SetFwCmdInProgress);
- switch(priv->CurrentFwCmdIO)
+ if(pFirmware->FirmwareVersion >= 0x34)
{
- case FW_CMD_HIGH_PWR_ENABLE:
- if((priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_HIGH_POWER)==0)
- write_nic_dword(dev, WFM5, FW_HIGH_PWR_ENABLE);
- break;
-
- case FW_CMD_HIGH_PWR_DISABLE:
- write_nic_dword(dev, WFM5, FW_HIGH_PWR_DISABLE);
- break;
-
- case FW_CMD_DIG_RESUME:
- write_nic_dword(dev, WFM5, FW_DIG_RESUME);
- break;
-
- case FW_CMD_DIG_HALT:
- write_nic_dword(dev, WFM5, FW_DIG_HALT);
- break;
-
- //
- // <Roger_Notes> The following FW CMD IO was combined into single operation
- // (i.e., to prevent number of system workitem out of resource!!).
- // 2008.12.04.
- //
- case FW_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(COMP_CMD, "[FW CMD] Set HIGHPWR enable and DIG resume!!\n");
- if((priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_HIGH_POWER)==0)
- {
- write_nic_dword(dev, WFM5, FW_HIGH_PWR_ENABLE); //break;
- ChkFwCmdIoDone(dev);
- }
- write_nic_dword(dev, WFM5, FW_DIG_RESUME);
- break;
-
- case FW_CMD_PAUSE_DM_BY_SCAN:
- RT_TRACE(COMP_CMD, "[FW CMD] Set HIGHPWR disable and DIG halt!!\n");
- write_nic_dword(dev, WFM5, FW_HIGH_PWR_DISABLE); //break;
- ChkFwCmdIoDone(dev);
- write_nic_dword(dev, WFM5, FW_DIG_HALT);
+ switch(priv->CurrentFwCmdIO)
+ {
+ case FW_CMD_RA_REFRESH_N:
+ priv->CurrentFwCmdIO = FW_CMD_RA_REFRESH_N_COMB;
break;
-
- //
- // <Roger_Notes> The following FW CMD IO should be checked
- // (i.e., workitem schedule timing issue!!).
- // 2008.12.04.
- //
- case FW_CMD_DIG_DISABLE:
- RT_TRACE(COMP_CMD, "[FW CMD] Set DIG disable!!\n");
- write_nic_dword(dev, WFM5, FW_DIG_DISABLE);
+ case FW_CMD_RA_REFRESH_BG:
+ priv->CurrentFwCmdIO = FW_CMD_RA_REFRESH_BG_COMB;
break;
-
- case FW_CMD_DIG_ENABLE:
- RT_TRACE(COMP_CMD, "[FW CMD] Set DIG enable!!\n");
- write_nic_dword(dev, WFM5, FW_DIG_ENABLE);
+ default:
break;
+ }
+ }
+ switch(priv->CurrentFwCmdIO)
+ {
case FW_CMD_RA_RESET:
write_nic_dword(dev, WFM5, FW_RA_RESET);
@@ -3975,82 +3523,111 @@ void phy_SetFwCmdIOCallback(struct net_device* dev)
break;
case FW_CMD_RA_REFRESH_N:
- RT_TRACE(COMP_CMD, "[FW CMD] Set RA refresh!! N\n");
- if(priv->ieee80211->pHTInfo->IOTRaFunc & HT_IOT_RAFUNC_DISABLE_ALL)
+ RT_TRACE(COMP_CMD, "[FW CMD] Set RA n refresh!!\n");
+ if(pHTInfo->IOTRaFunc & HT_IOT_RAFUNC_DISABLE_ALL)
input = FW_RA_REFRESH;
else
- input = FW_RA_REFRESH | (priv->ieee80211->pHTInfo->IOTRaFunc << 8);
+ input = FW_RA_REFRESH | (pHTInfo->IOTRaFunc << 8);
write_nic_dword(dev, WFM5, input);
+ ChkFwCmdIoDone(dev);
+ write_nic_dword(dev, WFM5, FW_RA_ENABLE_RSSI_MASK);
+ ChkFwCmdIoDone(dev);
break;
case FW_CMD_RA_REFRESH_BG:
- RT_TRACE(COMP_CMD, "[FW CMD] Set RA refresh!! B/G\n");
+ RT_TRACE(COMP_CMD, "[FW CMD] Set RA BG refresh!!\n");
write_nic_dword(dev, WFM5, FW_RA_REFRESH);
ChkFwCmdIoDone(dev);
- write_nic_dword(dev, WFM5, FW_RA_ENABLE_BG);
+ write_nic_dword(dev, WFM5, FW_RA_DISABLE_RSSI_MASK);
+ ChkFwCmdIoDone(dev);
+ break;
+
+ case FW_CMD_RA_REFRESH_N_COMB:
+ RT_TRACE(COMP_CMD, "[FW CMD] Set RA n Combo refresh!!\n");
+ if(pHTInfo->IOTRaFunc & HT_IOT_RAFUNC_DISABLE_ALL)
+ input = FW_RA_IOT_N_COMB;
+ else
+ input = FW_RA_IOT_N_COMB | (((pHTInfo->IOTRaFunc)&0x0f) << 8);
+ input = input |((pHTInfo->IOTPeer & 0xf) <<12);
+ RT_TRACE(COMP_CMD, "[FW CMD] Set RA/IOT Comb in n mode!! input(%#x)\n", input);
+ write_nic_dword(dev, WFM5, input);
+ ChkFwCmdIoDone(dev);
+ break;
+
+ case FW_CMD_RA_REFRESH_BG_COMB:
+ RT_TRACE(COMP_CMD, "[FW CMD] Set RA B/G Combo refresh!!\n");
+ if(pHTInfo->IOTRaFunc & HT_IOT_RAFUNC_DISABLE_ALL)
+ input = FW_RA_IOT_BG_COMB;
+ else
+ input = FW_RA_IOT_BG_COMB | (((pHTInfo->IOTRaFunc)&0x0f) << 8);
+ input = input |((pHTInfo->IOTPeer & 0xf) <<12);
+ RT_TRACE(COMP_CMD, "[FW CMD] Set RA/IOT Comb in B/G mode!! input(%#x)\n", input);
+ write_nic_dword(dev, WFM5, input);
+ ChkFwCmdIoDone(dev);
break;
case FW_CMD_IQK_ENABLE:
write_nic_dword(dev, WFM5, FW_IQK_ENABLE);
+ ChkFwCmdIoDone(dev);
break;
case FW_CMD_TXPWR_TRACK_ENABLE:
write_nic_dword(dev, WFM5, FW_TXPWR_TRACK_ENABLE);
+ ChkFwCmdIoDone(dev);
break;
case FW_CMD_TXPWR_TRACK_DISABLE:
write_nic_dword(dev, WFM5, FW_TXPWR_TRACK_DISABLE);
+ ChkFwCmdIoDone(dev);
break;
- default:
- RT_TRACE(COMP_CMD,"Unknown FW Cmd IO(%#x)\n", priv->CurrentFwCmdIO);
+ case FW_CMD_PAUSE_DM_BY_SCAN:
+ RT_TRACE(COMP_CMD,"[FW CMD] Pause DM by Scan!!\n");
+ rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bMaskByte0, 0x17);
+ rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bMaskByte0, 0x17);
+ rtl8192_setBBreg(dev, rCCK0_CCA, bMaskByte2, 0x40);
break;
- }
-
- ChkFwCmdIoDone(dev);
- switch(priv->CurrentFwCmdIO)
- {
+ case FW_CMD_RESUME_DM_BY_SCAN:
+ RT_TRACE(COMP_CMD, "[FW CMD] Resume DM by Scan!!\n");
+ rtl8192_setBBreg(dev, rCCK0_CCA, bMaskByte2, 0x83);
+ PHY_SetTxPowerLevel8192S(dev, priv->chan);
+ break;
case FW_CMD_HIGH_PWR_DISABLE:
- //if(pMgntInfo->bTurboScan)
- {
- //Lower initial gain
- rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bMaskByte0, 0x17);
- rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bMaskByte0, 0x17);
- // CCA threshold
- rtl8192_setBBreg(dev, rCCK0_CCA, bMaskByte2, 0x40);
- // Disable OFDM Part
- rtl8192_setBBreg(dev, rOFDM0_TRMuxPar, bMaskByte2, 0x1);
- ScanRegister = rtl8192_QueryBBReg(dev, rOFDM0_RxDetector1,bMaskDWord);
- rtl8192_setBBreg(dev, rOFDM0_RxDetector1, 0xf, 0xf);
- rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0xf, 0x0);
- }
+ RT_TRACE(COMP_CMD, "[FW CMD] High Pwr Disable!!\n");
+ if(priv->DMFlag & HAL_DM_HIPWR_DISABLE)
+ break;
+ rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bMaskByte0, 0x17);
+ rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bMaskByte0, 0x17);
+ rtl8192_setBBreg(dev, rCCK0_CCA, bMaskByte2, 0x40);
break;
case FW_CMD_HIGH_PWR_ENABLE:
- //if(pMgntInfo->bTurboScan)
- {
- rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bMaskByte0, 0x36);
- rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bMaskByte0, 0x36);
+ RT_TRACE(COMP_CMD, "[FW CMD] High Pwr Enable!!\n");
+ if(priv->DMFlag & HAL_DM_HIPWR_DISABLE)
+ break;
+ rtl8192_setBBreg(dev, rCCK0_CCA, bMaskByte2, 0x83);
+ break;
- // CCA threshold
- rtl8192_setBBreg(dev, rCCK0_CCA, bMaskByte2, 0x83);
- // Enable OFDM Part
- rtl8192_setBBreg(dev, rOFDM0_TRMuxPar, bMaskByte2, 0x0);
+ case FW_CMD_LPS_ENTER:
+ RT_TRACE(COMP_CMD, "[FW CMD] Enter LPS mode!!\n");
+ CurrentAID = priv->ieee80211->assoc_id;
+ write_nic_dword(dev, WFM5, (FW_LPS_ENTER| ((CurrentAID|0xc000)<<8)) );
+ ChkFwCmdIoDone(dev);
+ pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
+ break;
- //LZM ADD because sometimes there is no FW_CMD_HIGH_PWR_DISABLE, this value will be 0.
- if(ScanRegister != 0){
- rtl8192_setBBreg(dev, rOFDM0_RxDetector1, bMaskDWord, ScanRegister);
- }
+ case FW_CMD_LPS_LEAVE:
+ RT_TRACE(COMP_CMD, "[FW CMD] Leave LPS mode!!\n");
+ write_nic_dword(dev, WFM5, FW_LPS_LEAVE );
+ ChkFwCmdIoDone(dev);
+ pHTInfo->IOTAction &= (~HT_IOT_ACT_DISABLE_EDCA_TURBO);
+ break;
- if(priv->rf_type == RF_1T2R || priv->rf_type == RF_2T2R)
- rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0xf, 0x3);
- else
- rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0xf, 0x1);
- }
+ default:
break;
}
- priv->SetFwCmdInProgress = false;// Clear FW CMD operation flag.
+ priv->SetFwCmdInProgress = false;
RT_TRACE(COMP_CMD, "<---SetFwCmdIOWorkItemCallback()\n");
}
diff --git a/drivers/staging/rtl8192su/r8192U.h b/drivers/staging/rtl8192su/r8192U.h
index eccf4478fba..741c6bf9a01 100644
--- a/drivers/staging/rtl8192su/r8192U.h
+++ b/drivers/staging/rtl8192su/r8192U.h
@@ -1,33 +1,40 @@
-/*
- This is part of rtl8187 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
- Released under the terms of GPL (General Public Licence)
-
- Parts of this driver are based on the GPL part of the
- official realtek driver
-
- Parts of this driver are based on the rtl8192 driver skeleton
- from Patric Schenke & Andres Salomon
-
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
-
- We want to tanks the Authors of those projects and the Ndiswrapper
- project Authors.
-*/
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ * Linux device driver for RTL8192U
+ *
+ * Based on the r8187 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
#ifndef R819xU_H
#define R819xU_H
#include <linux/module.h>
#include <linux/kernel.h>
-//#include <linux/config.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
-//#include <linux/pci.h>
#include <linux/usb.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
@@ -51,9 +58,7 @@
#define RTL819X_EEPROM_CMD_CK (1 << 2)
#define RTL819X_EEPROM_CMD_CS (1 << 3)
-//#define RTL8192U
#define RTL819xU_MODULE_NAME "rtl819xU"
-//added for HW security, john.0629
#define FALSE 0
#define TRUE 1
#define MAX_KEY_LEN 61
@@ -114,6 +119,7 @@ do { if(rt_global_debug_component & component) \
#define COMP_TRACE BIT0 // For function call tracing.
#define COMP_DBG BIT1 // Only for temporary debug message.
+#define COMP_MLME BIT1
#define COMP_INIT BIT2 // during driver initialization / halt / reset.
@@ -137,14 +143,14 @@ do { if(rt_global_debug_component & component) \
#define COMP_SEC BIT20 // Event handling
#define COMP_LED BIT21 // For LED.
#define COMP_RF BIT22 // For RF.
-//1!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#define COMP_RXDESC BIT23
+
#define COMP_RXDESC BIT23 // Show Rx desc information for SD3 debug. Added by Annie, 2006-07-15.
-//1//1Attention Please!!!<11n or 8190 specific code should be put below this line>
-//1!!!!!!!!!!!!!!!!!!!!!!!!!!!
#define COMP_FIRMWARE BIT24 //for firmware downloading
#define COMP_HT BIT25 // For 802.11n HT related information. by Emily 2006-8-11
#define COMP_AMSDU BIT26 // For A-MSDU Debugging
+#define COMP_PS BIT26
#define COMP_SCAN BIT27
#define COMP_CMD BIT28
@@ -159,8 +165,7 @@ do { if(rt_global_debug_component & component) \
printk( "Assertion failed! %s,%s,%s,line=%d\n", \
#expr,__FILE__,__FUNCTION__,__LINE__); \
}
-//wb added to debug out data buf
-//if you want print DATA buffer related BA, please set ieee80211_debug_level to DATA|BA
+
#define RT_DEBUG_DATA(level, data, datalen) \
do{ if ((rt_global_debug_component & (level)) == (level)) \
{ \
@@ -180,25 +185,17 @@ do { if(rt_global_debug_component & component) \
#define RT_DEBUG_DATA(level, data, datalen) do {} while(0)
#endif /* RTL8169_DEBUG */
-//#ifdef RTL8192SU
//2TODO: We should define 8192S firmware related macro settings here!!
#define RTL819X_DEFAULT_RF_TYPE RF_1T2R
#define RTL819X_TOTAL_RF_PATH 2
- //#define Rtl819XFwBootArray Rtl8192UsbFwBootArray
- //#define Rtl819XFwMainArray Rtl8192UsbFwMainArray
- //#define Rtl819XFwDataArray Rtl8192UsbFwDataArray
-
#define Rtl819XMACPHY_Array_PG Rtl8192UsbMACPHY_Array_PG
#define Rtl819XMACPHY_Array Rtl8192UsbMACPHY_Array
#define Rtl819XPHY_REGArray Rtl8192UsbPHY_REGArray
#define Rtl819XPHY_REG_1T2RArray Rtl8192UsbPHY_REG_1T2RArray
- //#define Rtl819XRadioA_Array Rtl8192UsbRadioA_Array
- //#define Rtl819XRadioB_Array Rtl8192UsbRadioB_Array
#define Rtl819XRadioC_Array Rtl8192UsbRadioC_Array
#define Rtl819XRadioD_Array Rtl8192UsbRadioD_Array
- //2008.11.06 Add.
#define Rtl819XFwImageArray Rtl8192SUFwImgArray
#define Rtl819XMAC_Array Rtl8192SUMAC_2T_Array
#define Rtl819XAGCTAB_Array Rtl8192SUAGCTAB_Array
@@ -212,8 +209,6 @@ do { if(rt_global_debug_component & component) \
#define Rtl819XRadioB_GM_Array Rtl8192SURadioB_GM_Array
#define Rtl819XRadioA_to1T_Array Rtl8192SURadioA_to1T_Array
#define Rtl819XRadioA_to2T_Array Rtl8192SURadioA_to2T_Array
-//#endif
-
//
// Queue Select Value in TxDesc
//
@@ -256,7 +251,6 @@ do { if(rt_global_debug_component & component) \
#define DESC90_RATEMCS15 0x0f
#define DESC90_RATEMCS32 0x20
-//#ifdef RTL8192SU
// CCK Rates, TxHT = 0
#define DESC92S_RATE1M 0x00
#define DESC92S_RATE2M 0x01
@@ -292,13 +286,12 @@ do { if(rt_global_debug_component & component) \
#define DESC92S_RATEMCS15 0x1b
#define DESC92S_RATEMCS15_SG 0x1c
#define DESC92S_RATEMCS32 0x20
-//#endif
#define RTL819X_DEFAULT_RF_TYPE RF_1T2R
#define IEEE80211_WATCH_DOG_TIME 2000
#define PHY_Beacon_RSSI_SLID_WIN_MAX 10
-//for txpowertracking by amy
+//for txpowertracking
#define OFDM_Table_Length 19
#define CCK_Table_length 12
@@ -522,7 +515,6 @@ typedef struct rtl8192_rx_info {
u8 out_pipe;
}rtl8192_rx_info ;
-//typedef struct _RX_DESC_STATUS_8192SU{
typedef struct rx_desc_819x_usb{
//DWORD 0
u16 Length:14;
@@ -582,77 +574,34 @@ typedef struct rx_desc_819x_usb{
//DWORD 5
u32 TSFL;
-//}RX_DESC_STATUS_8192SU, *PRX_DESC_STATUS_8192SU;
}rx_desc_819x_usb, *prx_desc_819x_usb;
//
// Driver info are written to the begining of the RxBuffer
//
-//typedef struct _RX_DRIVER_INFO_8192S{
typedef struct rx_drvinfo_819x_usb{
- //
- // Driver info contain PHY status and other variabel size info
- // PHY Status content as below
- //
-
- //DWORD 0
- /*u4Byte gain_0:7;
- u4Byte trsw_0:1;
- u4Byte gain_1:7;
- u4Byte trsw_1:1;
- u4Byte gain_2:7;
- u4Byte trsw_2:1;
- u4Byte gain_3:7;
- u4Byte trsw_3:1; */
u8 gain_trsw[4];
//DWORD 1
- /*u4Byte pwdb_all:8;
- u4Byte cfosho_0:8;
- u4Byte cfosho_1:8;
- u4Byte cfosho_2:8;*/
u8 pwdb_all;
u8 cfosho[4];
//DWORD 2
- /*u4Byte cfosho_3:8;
- u4Byte cfotail_0:8;
- u4Byte cfotail_1:8;
- u4Byte cfotail_2:8;*/
u8 cfotail[4];
//DWORD 3
- /*u4Byte cfotail_3:8;
- u4Byte rxevm_0:8;
- u4Byte rxevm_1:8;
- u4Byte rxsnr_0:8;*/
char rxevm[2];
char rxsnr[4];
//DWORD 4
- /*u4Byte rxsnr_1:8;
- u4Byte rxsnr_2:8;
- u4Byte rxsnr_3:8;
- u4Byte pdsnr_0:8;*/
u8 pdsnr[2];
//DWORD 5
- /*u4Byte pdsnr_1:8;
- u4Byte csi_current_0:8;
- u4Byte csi_current_1:8;
- u4Byte csi_target_0:8;*/
u8 csi_current[2];
u8 csi_target[2];
//DWORD 6
- /*u4Byte csi_target_1:8;
- u4Byte sigevm:8;
- u4Byte max_ex_pwr:8;
- u4Byte ex_intf_flag:1;
- u4Byte sgi_en:1;
- u4Byte rxsc:2;
- u4Byte reserve:4;*/
u8 sigevm;
u8 max_ex_pwr;
u8 ex_intf_flag:1;
@@ -669,10 +618,8 @@ typedef struct rx_drvinfo_819x_usb{
#define MAX_DEV_ADDR_SIZE 8 /* support till 64 bit bus width OS */
#define MAX_FIRMWARE_INFORMATION_SIZE 32 /*2006/04/30 by Emily forRTL8190*/
-//#define MAX_802_11_HEADER_LENGTH (40 + MAX_FIRMWARE_INFORMATION_SIZE)
#define ENCRYPTION_MAX_OVERHEAD 128
#define USB_HWDESC_HEADER_LEN sizeof(tx_desc_819x_usb)
-//#define TX_PACKET_SHIFT_BYTES (USB_HWDESC_HEADER_LEN + sizeof(tx_fwinfo_819x_usb))
#define MAX_FRAGMENT_COUNT 8
#ifdef RTL8192U
#define MAX_TRANSMIT_BUFFER_SIZE 8000
@@ -681,19 +628,15 @@ typedef struct rx_drvinfo_819x_usb{
#endif
#define scrclng 4 // octets for crc32 (FCS, ICV)
+#define HAL_DM_DIG_DISABLE BIT0
+#define HAL_DM_HIPWR_DISABLE BIT1
+
typedef enum rf_optype
{
RF_OP_By_SW_3wire = 0,
RF_OP_By_FW,
RF_OP_MAX
}rf_op_type;
-/* 8190 Loopback Mode definition */
-typedef enum _rtl819xUsb_loopback{
- RTL819xU_NO_LOOPBACK = 0,
- RTL819xU_MAC_LOOPBACK = 1,
- RTL819xU_DMA_LOOPBACK = 2,
- RTL819xU_CCK_LOOPBACK = 3,
-}rtl819xUsb_loopback_e;
/* for rtl819x */
typedef enum _RT_STATUS{
@@ -703,16 +646,13 @@ typedef enum _RT_STATUS{
RT_STATUS_RESOURCE = 3
}RT_STATUS,*PRT_STATUS;
-//#ifdef RTL8192SU
typedef enum _RTL8192SUSB_LOOPBACK{
RTL8192SU_NO_LOOPBACK = 0,
RTL8192SU_MAC_LOOPBACK = 1,
RTL8192SU_DMA_LOOPBACK = 2,
RTL8192SU_CCK_LOOPBACK = 3,
}RTL8192SUSB_LOOPBACK_E;
-//#endif
-//+by amy 080507
#define MAX_RECEIVE_BUFFER_SIZE 9100 // Add this to 9100 bytes to receive A-MSDU from RT-AP
@@ -790,10 +730,6 @@ typedef struct rtl_reg_debug{
typedef struct _rt_9x_tx_rate_history {
u32 cck[4];
u32 ofdm[8];
- // HT_MCS[0][]: BW=0 SG=0
- // HT_MCS[1][]: BW=1 SG=0
- // HT_MCS[2][]: BW=0 SG=1
- // HT_MCS[3][]: BW=1 SG=1
u32 ht_mcs[4][16];
}rt_tx_rahis_t, *prt_tx_rahis_t;
typedef struct _RT_SMOOTH_DATA_4RF {
@@ -808,11 +744,6 @@ typedef struct _RT_SMOOTH_DATA_4RF {
typedef struct Stats
{
unsigned long txrdu;
-// unsigned long rxrdu;
- //unsigned long rxnolast;
- //unsigned long rxnodata;
-// unsigned long rxreset;
-// unsigned long rxnopointer;
unsigned long rxok;
unsigned long rxframgment;
unsigned long rxcmdpkt[4]; //08/05/08 amy rx cmd element txfeedback/bcn report/cfg set/query
@@ -832,18 +763,8 @@ typedef struct Stats
unsigned long txnperr;
unsigned long txnpdrop;
unsigned long txresumed;
-// unsigned long rxerr;
-// unsigned long rxoverflow;
-// unsigned long rxint;
unsigned long txnpokint;
-// unsigned long txhpokint;
-// unsigned long txhperr;
-// unsigned long ints;
-// unsigned long shints;
unsigned long txoverflow;
-// unsigned long rxdmafail;
-// unsigned long txbeacon;
-// unsigned long txbeaconerr;
unsigned long txlpokint;
unsigned long txlpdrop;
unsigned long txlperr;
@@ -909,14 +830,11 @@ typedef struct Stats
u32 CurrentShowTxate;
} Stats;
-
// Bandwidth Offset
#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
#define HAL_PRIME_CHNL_OFFSET_LOWER 1
#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-//+by amy 080507
-
typedef struct ChnlAccessSetting {
u16 SIFS_Timer;
u16 DIFS_Timer;
@@ -956,14 +874,12 @@ typedef enum _RT_RF_TYPE_819xU{
RF_PSEUDO_11N = 5,
}RT_RF_TYPE_819xU, *PRT_RF_TYPE_819xU;
-//#ifdef RTL8192SU
typedef enum _RF_POWER_STATE{
RF_ON,
RF_SLEEP,
RF_OFF,
RF_SHUT_DOWN,
}RF_POWER_STATE, *PRF_POWER_STATE;
-//#endif
typedef struct _rate_adaptive
{
@@ -1014,7 +930,6 @@ typedef struct _init_gain
u8 cca;
} init_gain, *pinit_gain;
-//by amy 0606
typedef struct _phy_ofdm_rx_status_report_819xusb
{
@@ -1066,8 +981,26 @@ typedef enum _RT_CUSTOMER_ID
RT_CID_Nettronix = 11,
RT_CID_DLINK = 12,
RT_CID_PRONET = 13,
+ RT_CID_COREGA = 14,
+ RT_CID_819x_ALPHA = 15,
+ RT_CID_819x_Sitecom = 16,
+ RT_CID_CCX = 17,
+ RT_CID_819x_Lenovo = 18,
+ RT_CID_819x_QMI = 19,
+ RT_CID_819x_Edimax_Belkin = 20,
+ RT_CID_819x_Sercomm_Belkin = 21,
+ RT_CID_819x_CAMEO1 = 22,
+ RT_CID_819x_MSI = 23,
+ RT_CID_819x_Acer = 24,
}RT_CUSTOMER_ID, *PRT_CUSTOMER_ID;
+typedef enum _RT_OP_MODE{
+ RT_OP_MODE_AP,
+ RT_OP_MODE_INFRASTRUCTURE,
+ RT_OP_MODE_IBSS,
+ RT_OP_MODE_NO_LINK,
+}RT_OP_MODE, *PRT_OP_MODE;
+
typedef enum _RESET_TYPE {
RESET_TYPE_NORESET = 0x00,
RESET_TYPE_NORMAL = 0x01,
@@ -1093,8 +1026,6 @@ typedef enum{
NIC_8192SU = 5,
} nic_t;
-//definded by WB. Ready to fill handlers for different NIC types.
-//add handle here when necessary.
struct rtl819x_ops{
nic_t nic_type;
void (* rtl819x_read_eeprom_info)(struct net_device *dev);
@@ -1113,11 +1044,12 @@ typedef struct r8192_priv
struct rtl819x_ops* ops;
struct usb_device *udev;
/* added for maintain info from eeprom */
+ short epromtype;
u16 eeprom_vid;
u16 eeprom_pid;
u8 eeprom_CustomerID;
u8 eeprom_SubCustomerID;
- u8 eeprom_ChannelPlan;
+ u16 eeprom_ChannelPlan;
RT_CUSTOMER_ID CustomerID;
LED_STRATEGY_819xUsb LedStrategy;
u8 txqueue_to_outpipemap[9];
@@ -1129,76 +1061,55 @@ typedef struct r8192_priv
int irq;
struct ieee80211_device *ieee80211;
+ u8 RATRTableBitmap;
+
+ u32 IC_Cut;
short card_8192; /* O: rtl8192, 1:rtl8185 V B/C, 2:rtl8185 V D */
- u8 card_8192_version; /* if TCR reports card V B/C this discriminates */
-// short phy_ver; /* meaningful for rtl8225 1:A 2:B 3:C */
+ u32 card_8192_version; /* if TCR reports card V B/C this discriminates */
short enable_gpio0;
enum card_type {PCI,MINIPCI,CARDBUS,USB}card_type;
short hw_plcp_len;
short plcp_preamble_mode;
spinlock_t irq_lock;
-// spinlock_t irq_th_lock;
spinlock_t tx_lock;
spinlock_t ps_lock;
struct mutex mutex;
+ bool ps_force;
spinlock_t rf_lock; //used to lock rf write operation added by wb
+ spinlock_t rf_ps_lock;
u16 irq_mask;
-// short irq_enabled;
-// struct net_device *dev; //comment this out.
short chan;
short sens;
short max_sens;
-
- // u8 chtxpwr[15]; //channels from 1 to 14, 0 not used
-// u8 chtxpwr_ofdm[15]; //channels from 1 to 14, 0 not used
-// u8 cck_txpwr_base;
-// u8 ofdm_txpwr_base;
-// u8 challow[15]; //channels from 1 to 14, 0 not used
short up;
short crcmon; //if 1 allow bad crc frame reception in monitor mode
-// short prism_hdr;
-
-// struct timer_list scan_timer;
- /*short scanpending;
- short stopscan;*/
-// spinlock_t scan_lock;
-// u8 active_probe;
- //u8 active_scan_num;
+ bool bSurpriseRemoved;
+
struct semaphore wx_sem;
struct semaphore rf_sem; //used to lock rf write operation added by wb, modified by david
-// short hw_wep;
-// short digphy;
-// short antb;
-// short diversity;
-// u8 cs_treshold;
-// short rcr_csense;
u8 rf_type; //0 means 1T2R, 1 means 2T4R
RT_RF_TYPE_819xU rf_chip;
-// u32 key0[4];
short (*rf_set_sens)(struct net_device *dev,short sens);
u8 (*rf_set_chan)(struct net_device *dev,u8 ch);
void (*rf_close)(struct net_device *dev);
void (*rf_init)(struct net_device *dev);
- //short rate;
short promisc;
+ u32 mc_filter[2];
/*stats*/
struct Stats stats;
struct iw_statistics wstats;
struct proc_dir_entry *dir_dev;
/*RX stuff*/
-// u32 *rxring;
-// u32 *rxringtail;
-// dma_addr_t rxringdma;
struct urb **rx_urb;
struct urb **rx_cmd_urb;
-/* modified by davad for Rx process */
+/* for Rx process */
struct sk_buff_head rx_queue;
struct sk_buff_head skb_queue;
@@ -1209,6 +1120,7 @@ typedef struct r8192_priv
struct tasklet_struct irq_rx_tasklet;
+ struct tasklet_struct irq_tx_tasklet;
struct urb *rxurb_task;
//2 Tx Related variables
@@ -1235,6 +1147,7 @@ typedef struct r8192_priv
struct ChnlAccessSetting ChannelAccessSetting;
struct work_struct reset_wq;
+ struct work_struct mcast_wq;
/**********************************************************/
//for rtl819xUsb
@@ -1244,21 +1157,28 @@ typedef struct r8192_priv
bool bDcut;
bool bCurrentRxAggrEnable;
u8 Rf_Mode; //add for Firmware RF -R/W switch
+ u8 FwRsvdTxPageCfg;
prt_firmware pFirmware;
- rtl819xUsb_loopback_e LoopbackMode;
+ RTL8192SUSB_LOOPBACK_E LoopbackMode;
bool usb_error;
u16 EEPROMTxPowerDiff;
u8 EEPROMThermalMeter;
u8 EEPROMPwDiff;
u8 EEPROMCrystalCap;
+ u8 EEPROMBluetoothCoexist;
u8 EEPROM_Def_Ver;
u8 EEPROMTxPowerLevelCCK;// CCK channel 1~14
u8 EEPROMTxPowerLevelCCK_V1[3];
u8 EEPROMTxPowerLevelOFDM24G[3]; // OFDM 2.4G channel 1~14
u8 EEPROMTxPowerLevelOFDM5G[24]; // OFDM 5G
-//RTL8192SU
+ u8 EEPROMOptional;
+ u8 ShowRateMode;
+ bool bForcedShowRxRate;
+
+ u32 RfRegChnlVal[2];
+
bool bDmDisableProtect;
bool bIgnoreDiffRateTxPowerOffset;
@@ -1278,6 +1198,9 @@ typedef struct r8192_priv
bool EepromOrEfuse;
bool bBootFromEfuse; // system boot form EFUSE
u8 EfuseMap[2][HWSET_MAX_SIZE_92S];
+ u16 EfuseUsedBytes;
+ u8 EfuseUsedPercentage;
+
u8 EEPROMUsbOption;
u8 EEPROMUsbPhyParam[5];
@@ -1290,6 +1213,8 @@ typedef struct r8192_priv
u8 EEPROMTxPwrTkMode;
u8 bTXPowerDataReadFromEEPORM;
+ u8 EEPROMRegulatory;
+ u8 EEPROMPwrGroup[2][3];
u8 EEPROMVersion;
u8 EEPROMUsbEndPointNumber;
@@ -1298,7 +1223,7 @@ typedef struct r8192_priv
u8 RfTxPwrLevelCck[2][14];
u8 RfTxPwrLevelOfdm1T[2][14];
u8 RfTxPwrLevelOfdm2T[2][14];
- // 2009/01/20 MH Add for new EEPROM format.
+ // new EEPROM format.
u8 TxPwrHt20Diff[2][14]; // HT 20<->40 Pwr diff
u8 TxPwrLegacyHtDiff[2][14]; // For HT<->legacy pwr diff
u8 TxPwrbandEdgeHt40[2][2]; // Band edge for HY 40MHZlow/up channel
@@ -1310,7 +1235,6 @@ typedef struct r8192_priv
u8 MidHighPwrTHR_L1;
u8 MidHighPwrTHR_L2;
u8 TxPwrSafetyFlag; // for Tx power safety spec
-//RTL8192SU
/*PHY related*/
BB_REGISTER_DEFINITION_T PHYRegDef[4]; //Radio A/B/C/D
@@ -1323,8 +1247,11 @@ typedef struct r8192_priv
u32 Pwr_Track;
u8 TxPowerDiff;
u8 AntennaTxPwDiff[2]; // Antenna gain offset, index 0 for B, 1 for C, and 2 for D
+ u8 ThermalMeter[2]; // ThermalMeter, index 0 for RFIC0, and 1 for RFIC1
+ u8 ThermalValue;
u8 CrystalCap; // CrystalCap.
- u8 ThermalMeter[2]; // ThermalMeter, index 0 for RFIC0, and 1 for RFIC1
+ u8 BluetoothCoexist;
+ u8 ExternalPA;
u8 CckPwEnl;
// Use to calculate PWBD.
@@ -1337,24 +1264,22 @@ typedef struct r8192_priv
u8 SwChnlStep;
u8 SetBWModeInProgress;
HT_CHANNEL_WIDTH CurrentChannelBW;
+ bool bChnlPlanFromHW;
u8 ChannelPlan;
+ u16 RegChannelPlan;
u8 pwrGroupCnt;
// 8190 40MHz mode
//
u8 nCur40MhzPrimeSC; // Control channel sub-carrier
- // Joseph test for shorten RF configuration time.
- // We save RF reg0 in this variable to reduce RF reading.
- //
+
u32 RfReg0Value[4];
u8 NumTotalRFPath;
bool brfpath_rxenable[4];
//RF set related
bool SetRFPowerStateInProgress;
-//+by amy 080507
+
struct timer_list watch_dog_timer;
-//+by amy 080515 for dynamic mechenism
- //Add by amy Tx Power Control for Near/Far Range 2008/05/15
bool bdynamic_txpower; //bDynamicTxPower
bool bDynamicTxHighPower; // Tx high power state
bool bDynamicTxLowPower; // Tx low power state
@@ -1363,17 +1288,18 @@ typedef struct r8192_priv
bool bstore_last_dtpflag;
bool bstart_txctrl_bydtp; //Define to discriminate on High power State or on sitesuvey to change Tx gain index
- //Add by amy for Rate Adaptive
+
rate_adaptive rate_adaptive;
- //Add by amy for TX power tracking
- //2008/05/15 Mars OPEN/CLOSE TX POWER TRACKING
+ // TX power tracking
txbbgain_struct txbbgain_table[TxBBGainTableLength];
u8 EEPROMTxPowerTrackEnable;
u8 txpower_count;//For 6 sec do tracking again
bool btxpower_trackingInit;
u8 OFDM_index;
u8 CCK_index;
- //2007/09/10 Mars Add CCK TX Power Tracking
+ u8 Record_CCK_20Mindex;
+ u8 Record_CCK_40Mindex;
+ // CCK TX Power Tracking
ccktxbbgain_struct cck_txbbgain_table[CCKTxBBGainTableLength];
ccktxbbgain_struct cck_txbbgain_ch14_table[CCKTxBBGainTableLength];
u8 rfa_txpowertrackingindex;
@@ -1390,10 +1316,15 @@ typedef struct r8192_priv
bool bcck_in_ch14;
bool btxpowerdata_readfromEEPORM;
u16 TSSI_13dBm;
+ u8 CCKPresentAttentuation_20Mdefault;
+ u8 CCKPresentAttentuation_40Mdefault;
+ char CCKPresentAttentuation_difference;
+ char CCKPresentAttentuation;
+ bool bDMInitialGainEnable;
//For Backup Initial Gain
init_gain initgain_backup;
u8 DefaultInitialGain[4];
- // For EDCA Turbo mode, Added by amy 080515.
+ // For EDCA Turbo mode
bool bis_any_nonbepkts;
bool bcurrent_turbo_EDCA;
bool bis_cur_rdlstate;
@@ -1407,17 +1338,23 @@ typedef struct r8192_priv
u8 framesync;
u32 framesyncC34;
u8 framesyncMonitor;
- //Added by amy 080516 for RX related
+ // RX related
u16 nrxAMPDU_size;
u8 nrxAMPDU_aggr_num;
- //by amy for gpio
+ // gpio
bool bHwRadioOff;
- //by amy for reset_count
+ bool isRFOff;
+ bool bInPowerSaveMode;
+
+ bool RFChangeInProgress;
+ bool RegRfOff;
+ u8 bHwRfOffAction;
+
u32 reset_count;
bool bpbc_pressed;
- //by amy for debug
+ // debug
u32 txpower_checkcnt;
u32 txpower_tracking_callback_cnt;
u8 thermal_read_val[40];
@@ -1426,7 +1363,7 @@ typedef struct r8192_priv
u32 ccktxpower_adjustcnt_ch14;
u8 tx_fwinfo_force_subcarriermode;
u8 tx_fwinfo_force_subcarrierval;
- //by amy for silent reset
+ // silent reset
RESET_TYPE ResetProgress;
bool bForcedSilentReset;
bool bDisableNormalResetCheck;
@@ -1435,11 +1372,11 @@ typedef struct r8192_priv
int IrpPendingCount;
bool bResetInProgress;
bool force_reset;
+ bool force_lps;
u8 InitialGainOperateType;
u16 SifsTime;
- //define work item by amy 080526
struct delayed_work update_beacon_wq;
struct delayed_work watch_dog_wq;
struct delayed_work txpower_tracking_wq;
@@ -1448,8 +1385,7 @@ typedef struct r8192_priv
struct delayed_work initialgain_operate_wq;
struct workqueue_struct *priv_wq;
-//#ifdef RTL8192SU
- //lzm add for 8192S
+
u32 IntrMask;
// RF and BB access related synchronization flags.
bool bChangeBBInProgress; // BaseBand RW is still in progress.
@@ -1464,7 +1400,6 @@ typedef struct r8192_priv
u8 ThermalReadBackIndex; //debug only
u8 ThermalReadVal[40]; //debug only
- // For HCT test, 2005.07.15, by rcnjko.
// not realize true, just define it, set it 0 default, because some func use it
bool bInHctTest;
@@ -1481,7 +1416,6 @@ typedef struct r8192_priv
char RF_C_TxPwDiff; // Antenna gain offset, rf-c to rf-a
bool bRFSiOrPi;//0=si, 1=pi.
- //lzm add for 8192S
bool SetFwCmdInProgress; //is set FW CMD in Progress? 92S only
u8 CurrentFwCmdIO;
@@ -1495,25 +1429,17 @@ typedef struct r8192_priv
LED_819xUsb SwLed0;
LED_819xUsb SwLed1;
u8 bRegUseLed;
- struct work_struct BlinkWorkItem;
+ struct work_struct BlinkWorkItem;
/* added for led control */
u16 FwCmdIOMap;
u32 FwCmdIOParam;
- u8 DMFlag;
+ u8 DMFlag;
}r8192_priv;
-// for rtl8187
-// now mirging to rtl8187B
-/*
-typedef enum{
- LOW_PRIORITY = 0x02,
- NORM_PRIORITY
- } priority_t;
-*/
//for rtl8187B
typedef enum{
BULK_PRIORITY = 0x01,
@@ -1542,6 +1468,9 @@ struct ssid_thread {
};
#endif
+void LedControl8192SUsb(struct net_device *dev, LED_CTL_MODE LedAction);
+void InitSwLeds(struct net_device *dev);
+void DeInitSwLeds(struct net_device *dev);
short rtl8192SU_tx_cmd(struct net_device *dev, struct sk_buff *skb);
short rtl8192SU_tx(struct net_device *dev, struct sk_buff* skb);
bool FirmwareDownload92S(struct net_device *dev);
@@ -1567,7 +1496,6 @@ void rtl8192_rx_enable(struct net_device *);
void rtl8192_tx_enable(struct net_device *);
void rtl8192_disassociate(struct net_device *dev);
-//void fix_rx_fifo(struct net_device *dev);
void rtl8185_set_rf_pins_enable(struct net_device *dev,u32 a);
void rtl8192_set_anaparam(struct net_device *dev,u32 a);
@@ -1582,7 +1510,6 @@ void write_phy_cck(struct net_device *dev, u8 adr, u32 data);
void write_phy_ofdm(struct net_device *dev, u8 adr, u32 data);
void rtl8185_tx_antenna(struct net_device *dev, u8 ant);
void rtl8192_set_rxconf(struct net_device *dev);
-//short check_nic_enough_desc(struct net_device *dev, priority_t priority);
extern void rtl819xusb_beacon_tx(struct net_device *dev,u16 tx_rate);
void CamResetAllEntry(struct net_device* dev);
void EnableHWSecurityConfig8192(struct net_device *dev);
diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c
index 1b6890611fb..df5b52baf89 100644
--- a/drivers/staging/rtl8192su/r8192U_core.c
+++ b/drivers/staging/rtl8192su/r8192U_core.c
@@ -27,6 +27,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/notifier.h>
#undef LOOP_TEST
#undef DUMP_RX
@@ -143,13 +144,13 @@ MODULE_VERSION("V 1.1");
MODULE_DEVICE_TABLE(usb, rtl8192_usb_id_tbl);
MODULE_DESCRIPTION("Linux driver for Realtek RTL8192 USB WiFi cards");
-static char* ifname = "wlan%d";
+static char ifname[IFNAMSIZ] = "wlan%d";
static int hwwep = 1; //default use hw. set 0 to use software security
static int channels = 0x3fff;
-module_param(ifname, charp, S_IRUGO|S_IWUSR );
+module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO|S_IWUSR);
//module_param(hwseqnum,int, S_IRUGO|S_IWUSR);
module_param(hwwep,int, S_IRUGO|S_IWUSR);
module_param(channels,int, S_IRUGO|S_IWUSR);
@@ -162,6 +163,8 @@ MODULE_PARM_DESC(channels," Channel bitmask for specific locales. NYI");
static int __devinit rtl8192_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void __devexit rtl8192_usb_disconnect(struct usb_interface *intf);
+static const struct net_device_ops rtl8192_netdev_ops;
+static struct notifier_block proc_netdev_notifier;
static struct usb_driver rtl8192_usb_driver = {
.name = RTL819xU_MODULE_NAME, /* Driver name */
@@ -252,53 +255,49 @@ static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv* priv)
{
int i, max_chan=-1, min_chan=-1;
struct ieee80211_device* ieee = priv->ieee80211;
- switch (channel_plan)
- {
- case COUNTRY_CODE_FCC:
- case COUNTRY_CODE_IC:
- case COUNTRY_CODE_ETSI:
- case COUNTRY_CODE_SPAIN:
- case COUNTRY_CODE_FRANCE:
- case COUNTRY_CODE_MKK:
- case COUNTRY_CODE_MKK1:
- case COUNTRY_CODE_ISRAEL:
- case COUNTRY_CODE_TELEC:
- case COUNTRY_CODE_MIC:
- {
- Dot11d_Init(ieee);
- ieee->bGlobalDomain = false;
- //acturally 8225 & 8256 rf chip only support B,G,24N mode
- if ((priv->rf_chip == RF_8225) || (priv->rf_chip == RF_8256) || (priv->rf_chip == RF_6052))
- {
- min_chan = 1;
- max_chan = 14;
- }
- else
- {
- RT_TRACE(COMP_ERR, "unknown rf chip, can't set channel map in function:%s()\n", __FUNCTION__);
- }
- if (ChannelPlan[channel_plan].Len != 0){
- // Clear old channel map
- memset(GET_DOT11D_INFO(ieee)->channel_map, 0, sizeof(GET_DOT11D_INFO(ieee)->channel_map));
- // Set new channel map
- for (i=0;i<ChannelPlan[channel_plan].Len;i++)
- {
- if (ChannelPlan[channel_plan].Channel[i] < min_chan || ChannelPlan[channel_plan].Channel[i] > max_chan)
- break;
- GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan[channel_plan].Channel[i]] = 1;
- }
- }
- break;
- }
- case COUNTRY_CODE_GLOBAL_DOMAIN:
- {
- GET_DOT11D_INFO(ieee)->bEnabled = 0;//this flag enabled to follow 11d country IE setting, otherwise, it shall follow global domain settings.
- Dot11d_Reset(ieee);
- ieee->bGlobalDomain = true;
- break;
+
+ ieee->bGlobalDomain = false;
+ switch (priv->rf_chip) {
+ case RF_8225:
+ case RF_8256:
+ case RF_6052:
+ min_chan = 1;
+ max_chan = 14;
+ break;
+ default:
+ pr_err("%s(): unknown rf chip, can't set channel map\n",
+ __func__);
+ break;
+ }
+ if (ChannelPlan[channel_plan].Len != 0) {
+ memset(GET_DOT11D_INFO(ieee)->channel_map, 0,
+ sizeof(GET_DOT11D_INFO(ieee)->channel_map));
+
+ for (i = 0; i < ChannelPlan[channel_plan].Len; i++) {
+ if (ChannelPlan[channel_plan].Channel[i] < min_chan || ChannelPlan[channel_plan].Channel[i] > max_chan)
+ break;
+ GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan[channel_plan].Channel[i]] = 1;
}
- default:
- break;
+ }
+ switch (channel_plan) {
+ case COUNTRY_CODE_GLOBAL_DOMAIN:
+ ieee->bGlobalDomain = true;
+ for (i = 12; i <= 14; i++)
+ GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
+ ieee->IbssStartChnl = 10;
+ ieee->ibss_maxjoin_chal = 11;
+ break;
+ case COUNTRY_CODE_WORLD_WIDE_13:
+ printk(KERN_INFO "world wide 13\n");
+ for (i = 12; i <= 13; i++)
+ GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
+ ieee->IbssStartChnl = 10;
+ ieee->ibss_maxjoin_chal = 11;
+ break;
+ default:
+ ieee->IbssStartChnl = 1;
+ ieee->ibss_maxjoin_chal = 14;
+ break;
}
return;
}
@@ -991,15 +990,24 @@ static int proc_get_stats_rx(char *page, char **start,
return len;
}
-void rtl8192_proc_module_init(void)
+int rtl8192_proc_module_init(void)
{
+ int ret;
+
RT_TRACE(COMP_INIT, "Initializing proc filesystem");
rtl8192_proc=create_proc_entry(RTL819xU_MODULE_NAME, S_IFDIR, init_net.proc_net);
+ if (!rtl8192_proc)
+ return -ENOMEM;
+ ret = register_netdevice_notifier(&proc_netdev_notifier);
+ if (ret)
+ remove_proc_entry(RTL819xU_MODULE_NAME, init_net.proc_net);
+ return ret;
}
void rtl8192_proc_module_remove(void)
{
+ unregister_netdevice_notifier(&proc_netdev_notifier);
remove_proc_entry(RTL819xU_MODULE_NAME, init_net.proc_net);
}
@@ -1027,8 +1035,7 @@ void rtl8192_proc_remove_one(struct net_device *dev)
remove_proc_entry("registers-e", priv->dir_dev);
// remove_proc_entry("cck-registers",priv->dir_dev);
// remove_proc_entry("ofdm-registers",priv->dir_dev);
- //remove_proc_entry(dev->name, rtl8192_proc);
- remove_proc_entry("wlan0", rtl8192_proc);
+ remove_proc_entry(priv->dir_dev->name, rtl8192_proc);
priv->dir_dev = NULL;
}
}
@@ -1145,6 +1152,25 @@ void rtl8192_proc_init_one(struct net_device *dev)
dev->name);
}
}
+
+static int proc_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *net_dev = ptr;
+
+ if (net_dev->netdev_ops == &rtl8192_netdev_ops &&
+ event == NETDEV_CHANGENAME) {
+ rtl8192_proc_remove_one(net_dev);
+ rtl8192_proc_init_one(net_dev);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block proc_netdev_notifier = {
+ .notifier_call = proc_netdev_event,
+};
+
/****************************************************************************
-----------------------------MISC STUFF-------------------------
*****************************************************************************/
@@ -7355,6 +7381,8 @@ static int __devinit rtl8192_usb_probe(struct usb_interface *intf,
RT_TRACE(COMP_INIT, "Oops: i'm coming\n");
dev = alloc_ieee80211(sizeof(struct r8192_priv));
+ if (dev == NULL)
+ return -ENOMEM;
usb_set_intfdata(intf, dev);
SET_NETDEV_DEV(dev, &intf->dev);
@@ -7378,7 +7406,7 @@ static int __devinit rtl8192_usb_probe(struct usb_interface *intf,
if (dev_alloc_name(dev, ifname) < 0){
RT_TRACE(COMP_INIT, "Oops: devname already taken! Trying wlan%%d...\n");
- ifname = "wlan%d";
+ strcpy(ifname, "wlan%d");
dev_alloc_name(dev, ifname);
}
@@ -7392,7 +7420,8 @@ static int __devinit rtl8192_usb_probe(struct usb_interface *intf,
netif_carrier_off(dev);
netif_stop_queue(dev);
- register_netdev(dev);
+ if (register_netdev(dev))
+ goto fail;
RT_TRACE(COMP_INIT, "dev name=======> %s\n",dev->name);
rtl8192_proc_init_one(dev);
@@ -7474,35 +7503,63 @@ static int __init rtl8192_usb_module_init(void)
ret = ieee80211_crypto_init();
if (ret) {
printk(KERN_ERR "ieee80211_crypto_init() failed %d\n", ret);
- return ret;
+ goto fail_crypto;
}
ret = ieee80211_crypto_tkip_init();
if (ret) {
printk(KERN_ERR "ieee80211_crypto_tkip_init() failed %d\n",
ret);
- return ret;
+ goto fail_crypto_tkip;
}
ret = ieee80211_crypto_ccmp_init();
if (ret) {
printk(KERN_ERR "ieee80211_crypto_ccmp_init() failed %d\n",
ret);
- return ret;
+ goto fail_crypto_ccmp;
}
ret = ieee80211_crypto_wep_init();
if (ret) {
printk(KERN_ERR "ieee80211_crypto_wep_init() failed %d\n", ret);
- return ret;
+ goto fail_crypto_wep;
}
printk(KERN_INFO "\nLinux kernel driver for RTL8192 based WLAN cards\n");
printk(KERN_INFO "Copyright (c) 2007-2008, Realsil Wlan\n");
RT_TRACE(COMP_INIT, "Initializing module");
RT_TRACE(COMP_INIT, "Wireless extensions version %d", WIRELESS_EXT);
- rtl8192_proc_module_init();
- return usb_register(&rtl8192_usb_driver);
+
+ ret = rtl8192_proc_module_init();
+ if (ret) {
+ pr_err("rtl8192_proc_module_init() failed %d\n", ret);
+ goto fail_proc;
+ }
+
+ ret = usb_register(&rtl8192_usb_driver);
+ if (ret) {
+ pr_err("usb_register() failed %d\n", ret);
+ goto fail_usb;
+ }
+
+ return 0;
+
+fail_usb:
+ rtl8192_proc_module_remove();
+fail_proc:
+ ieee80211_crypto_wep_exit();
+fail_crypto_wep:
+ ieee80211_crypto_ccmp_exit();
+fail_crypto_ccmp:
+ ieee80211_crypto_tkip_exit();
+fail_crypto_tkip:
+ ieee80211_crypto_deinit();
+fail_crypto:
+#ifdef CONFIG_IEEE80211_DEBUG
+ ieee80211_debug_exit();
+#endif
+ return ret;
}
diff --git a/drivers/staging/rtl8192su/r8192U_dm.c b/drivers/staging/rtl8192su/r8192U_dm.c
index fa5e24416dd..ce7e1ee4c3a 100644
--- a/drivers/staging/rtl8192su/r8192U_dm.c
+++ b/drivers/staging/rtl8192su/r8192U_dm.c
@@ -2673,7 +2673,6 @@ static void dm_check_edca_turbo(
{
struct r8192_priv *priv = ieee80211_priv(dev);
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
- //PSTA_QOS pStaQos = pMgntInfo->pStaQos;
// Keep past Tx/Rx packet count for RT-to-RT EDCA turbo.
static unsigned long lastTxOkCnt = 0;
@@ -2681,10 +2680,8 @@ static void dm_check_edca_turbo(
unsigned long curTxOkCnt = 0;
unsigned long curRxOkCnt = 0;
- //
- // Do not be Turbo if it's under WiFi config and Qos Enabled, because the EDCA parameters
- // should follow the settings from QAP. By Bruce, 2007-12-07.
- //
+ u32 EDCA_BE_UL = edca_setting_UL[pHTInfo->IOTPeer];
+ u32 EDCA_BE_DL = edca_setting_DL[pHTInfo->IOTPeer];
#if 1
if(priv->ieee80211->state != IEEE80211_LINKED)
goto dm_CheckEdcaTurbo_EXIT;
@@ -2693,6 +2690,14 @@ static void dm_check_edca_turbo(
if(priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_EDCA_TURBO)
goto dm_CheckEdcaTurbo_EXIT;
+ if(priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_FORCED_ENABLE_BE_TXOP)
+ {
+ if(!(EDCA_BE_UL & 0xffff0000))
+ EDCA_BE_UL |= 0x005e0000;
+ if(!(EDCA_BE_DL & 0xffff0000))
+ EDCA_BE_DL |= 0x005e0000;
+ }
+
{
u8* peername[11] = {"unknown", "realtek", "realtek_92se", "broadcom", "ralink", "atheros", "cisco", "marvell", "92u_softap", "self_softap"};
static int wb_tmp = 0;
@@ -2714,7 +2719,7 @@ static void dm_check_edca_turbo(
{
if(priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA)
{
- write_nic_dword(dev, EDCAPARA_BE, edca_setting_UL[pHTInfo->IOTPeer]);
+ write_nic_dword(dev, EDCAPARA_BE, EDCA_BE_UL);
priv->bis_cur_rdlstate = false;
}
}
@@ -2722,7 +2727,7 @@ static void dm_check_edca_turbo(
{
if(!priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA)
{
- write_nic_dword(dev, EDCAPARA_BE, edca_setting_DL[pHTInfo->IOTPeer]);
+ write_nic_dword(dev, EDCAPARA_BE, EDCA_BE_DL);
priv->bis_cur_rdlstate = true;
}
}
@@ -2734,7 +2739,7 @@ static void dm_check_edca_turbo(
{
if(!priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA)
{
- write_nic_dword(dev, EDCAPARA_BE, edca_setting_DL[pHTInfo->IOTPeer]);
+ write_nic_dword(dev, EDCAPARA_BE, EDCA_BE_DL);
priv->bis_cur_rdlstate = true;
}
}
@@ -2742,7 +2747,7 @@ static void dm_check_edca_turbo(
{
if(priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA)
{
- write_nic_dword(dev, EDCAPARA_BE, edca_setting_UL[pHTInfo->IOTPeer]);
+ write_nic_dword(dev, EDCAPARA_BE, EDCA_BE_UL);
priv->bis_cur_rdlstate = false;
}
}
@@ -2771,7 +2776,7 @@ static void dm_check_edca_turbo(
(((u32)(qos_parameters->cw_max[0]))<< AC_PARAM_ECW_MAX_OFFSET)|
(((u32)(qos_parameters->cw_min[0]))<< AC_PARAM_ECW_MIN_OFFSET)|
((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
- //write_nic_dword(dev, WDCAPARA_ADD[i], u4bAcParam);
+
write_nic_dword(dev, EDCAPARA_BE, u4bAcParam);
// Check ACM bit.
@@ -2780,7 +2785,7 @@ static void dm_check_edca_turbo(
// TODO: Modified this part and try to set acm control in only 1 IO processing!!
PACI_AIFSN pAciAifsn = (PACI_AIFSN)&(qos_parameters->aifs[0]);
- u8 AcmCtrl = read_nic_byte( dev, AcmHwCtrl );
+ u8 AcmCtrl = priv->AcmControl | 0x1;
if( pAciAifsn->f.ACM )
{ // ACM bit is 1.
AcmCtrl |= AcmHw_BeqEn;
@@ -2804,7 +2809,7 @@ dm_CheckEdcaTurbo_EXIT:
priv->ieee80211->bis_any_nonbepkts = false;
lastTxOkCnt = priv->stats.txbytesunicast;
lastRxOkCnt = priv->stats.rxbytesunicast;
-} // dm_CheckEdcaTurbo
+}
#endif
extern void DM_CTSToSelfSetting(struct net_device * dev,u32 DM_Type, u32 DM_Value)
diff --git a/drivers/staging/rtl8192su/r8192U_wx.c b/drivers/staging/rtl8192su/r8192U_wx.c
index a7cc6f9a473..2005b811eba 100644
--- a/drivers/staging/rtl8192su/r8192U_wx.c
+++ b/drivers/staging/rtl8192su/r8192U_wx.c
@@ -1,21 +1,23 @@
-/*
- This file contains wireless extension handlers.
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ * Linux device driver for RTL8192U
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
- This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
- Released under the terms of GPL (General Public Licence)
-
- Parts of this driver are based on the GPL part
- of the official realtek driver.
-
- Parts of this driver are based on the rtl8180 driver skeleton
- from Patric Schenke & Andres Salomon.
-
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
-
- We want to tanks the Authors of those projects and the Ndiswrapper
- project Authors.
-*/
#include <linux/string.h>
#include "r8192U.h"
@@ -248,6 +250,7 @@ static int r8192_wx_get_ap_status(struct net_device *dev,
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
struct ieee80211_network *target;
+ struct ieee80211_network *latest = NULL;
int name_len;
down(&priv->wx_sem);
@@ -259,13 +262,20 @@ static int r8192_wx_get_ap_status(struct net_device *dev,
list_for_each_entry(target, &ieee->network_list, list) {
if ( (target->ssid_len == name_len) &&
(strncmp(target->ssid, (char*)wrqu->data.pointer, name_len)==0)){
- if(target->wpa_ie_len>0 || target->rsn_ie_len>0 )
- //set flags=1 to indicate this ap is WPA
- wrqu->data.flags = 1;
- else wrqu->data.flags = 0;
+ if ((latest == NULL) ||(target->last_scanned > latest->last_scanned))
+ latest = target;
+
+ }
+ }
+ if(latest != NULL)
+ {
+ wrqu->data.length = latest->SignalStrength;
- break;
+ if(latest->wpa_ie_len>0 || latest->rsn_ie_len>0 ) {
+ wrqu->data.flags = 1;
+ } else {
+ wrqu->data.flags = 0;
}
}
@@ -460,14 +470,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
range->we_version_compiled = WIRELESS_EXT;
range->we_version_source = 16;
-// range->retry_capa; /* What retry options are supported */
-// range->retry_flags; /* How to decode max/min retry limit */
-// range->r_time_flags; /* How to decode max/min retry life */
-// range->min_retry; /* Minimal number of retries */
-// range->max_retry; /* Maximal number of retries */
-// range->min_r_time; /* Minimal retry lifetime */
-// range->max_r_time; /* Maximal retry lifetime */
-
for (i = 0, val = 0; i < 14; i++) {
@@ -1011,6 +1013,70 @@ static int r8192_wx_set_mlme(struct net_device *dev,
return ret;
}
+static int r8192_wx_set_pmkid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int i;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ struct ieee80211_device* ieee = priv->ieee80211;
+ struct iw_pmksa* pPMK = (struct iw_pmksa*)extra;
+ int intReturn = false;
+
+ switch (pPMK->cmd)
+ {
+ case IW_PMKSA_ADD:
+ for (i = 0; i < NUM_PMKID_CACHE; i++)
+ {
+ if (memcmp(ieee->PMKIDList[i].Bssid, pPMK->bssid.sa_data, ETH_ALEN) == 0)
+ {
+ memcpy(ieee->PMKIDList[i].PMKID, pPMK->pmkid, IW_PMKID_LEN);
+ memcpy(ieee->PMKIDList[i].Bssid, pPMK->bssid.sa_data, ETH_ALEN);
+ ieee->PMKIDList[i].bUsed = true;
+ intReturn = true;
+ goto __EXIT__;
+ }
+ }
+
+ for (i = 0; i < NUM_PMKID_CACHE; i++)
+ {
+ if (ieee->PMKIDList[i].bUsed == false)
+ {
+ memcpy(ieee->PMKIDList[i].PMKID, pPMK->pmkid, IW_PMKID_LEN);
+ memcpy(ieee->PMKIDList[i].Bssid, pPMK->bssid.sa_data, ETH_ALEN);
+ ieee->PMKIDList[i].bUsed = true;
+ intReturn = true;
+ goto __EXIT__;
+ }
+ }
+ break;
+
+ case IW_PMKSA_REMOVE:
+ for (i = 0; i < NUM_PMKID_CACHE; i++)
+ {
+ if (memcmp(ieee->PMKIDList[i].Bssid, pPMK->bssid.sa_data, ETH_ALEN) == true)
+ {
+ memset(&ieee->PMKIDList[i], 0x00, sizeof(RT_PMKID_LIST));
+ intReturn = true;
+ break;
+ }
+ }
+ break;
+
+ case IW_PMKSA_FLUSH:
+ memset(&ieee->PMKIDList[0], 0x00, (sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE));
+ intReturn = true;
+ break;
+
+ default:
+ break;
+ }
+
+__EXIT__:
+ return (intReturn);
+
+}
+
static int r8192_wx_set_gen_ie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *data, char *extra)
@@ -1093,7 +1159,7 @@ static iw_handler r8192_wx_handlers[] =
NULL,//r8192_wx_get_auth,//NULL, /* SIOCSIWAUTH */
r8192_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
NULL,//r8192_wx_get_enc_ext,//NULL, /* SIOCSIWENCODEEXT */
- NULL, /* SIOCSIWPMKSA */
+ r8192_wx_set_pmkid, /* SIOCSIWPMKSA */
NULL, /*---hole---*/
};
diff --git a/drivers/staging/rtl8192su/r819xU_cmdpkt.c b/drivers/staging/rtl8192su/r819xU_cmdpkt.c
index a8e9d2d96f5..7ab9e22f895 100644
--- a/drivers/staging/rtl8192su/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192su/r819xU_cmdpkt.c
@@ -1,13 +1,22 @@
-/*
- * (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ * Linux device driver for RTL8192U
*
- * Module: r819xusb_cmdpkt.c
- * (RTL8190 TX/RX command packet handler Source C File)
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
*
- * Note: The module is responsible for handling TX and RX command packet.
- * 1.TX: Send set and query configuration command packet.
- * 2.RX: Receive tx feedback, beacon state, query configuration, command packet.
- */
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
#include "r8192U.h"
#include "r819xU_cmdpkt.h"
@@ -19,19 +28,13 @@ bool SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
cb_desc *tcb_desc;
unsigned char *ptr_buf;
- /* PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK); */
-
/*
* Get TCB and local buffer from common pool.
* (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
*/
skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
- if (skb == NULL) {
- RT_TRACE(COMP_ERR, "(%s): unable to alloc skb buffer\n",
- __func__);
- rtStatus = false;
- return rtStatus;
- }
+ if (!skb)
+ return false;
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
@@ -51,7 +54,6 @@ bool SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
priv->ieee80211->softmac_hard_start_xmit(skb, dev);
}
- //PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK);
return rtStatus;
}
@@ -224,7 +226,6 @@ static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC) {
//2 maybe need endian transform?
rx_intr_status.interrupt_status = *((u32 *)(pmsg + 4));
- //rx_intr_status.InterruptStatus = N2H4BYTE(*((UINT32 *)(pMsg + 4)));
DMESG("interrupt status = 0x%x\n", rx_intr_status.interrupt_status);
@@ -491,6 +492,13 @@ cmpk_message_handle_rx(
cmpk_handle_tx_rate_history(dev, pcmd_buff);
cmd_length = CMPK_TX_RAHIS_SIZE;
break;
+ case RX_TX_TSSI_MEAN_BACK:
+ {
+ u32 *pMsg;
+ pMsg = (u32 *)pcmd_buff;
+ }
+ cmd_length = 32;
+ break;
default:
RT_TRACE(COMP_ERR, "(%s): unknown CMD Element\n",
__func__);
diff --git a/drivers/staging/rtl8192su/r819xU_cmdpkt.h b/drivers/staging/rtl8192su/r819xU_cmdpkt.h
index d3c56155188..95885bee7a4 100644
--- a/drivers/staging/rtl8192su/r819xU_cmdpkt.h
+++ b/drivers/staging/rtl8192su/r819xU_cmdpkt.h
@@ -173,6 +173,7 @@ typedef enum tag_command_packet_directories {
RX_DBGINFO_FEEDBACK = 5,
RX_TX_PER_PKT_FEEDBACK = 6,
RX_TX_RATE_HISTORY = 7,
+ RX_TX_TSSI_MEAN_BACK = 8,
RX_CMD_ELE_MAX
} cmpk_element_e;
diff --git a/drivers/staging/rtl8192u/Kconfig b/drivers/staging/rtl8192u/Kconfig
index 0439c90b416..28969198e7e 100644
--- a/drivers/staging/rtl8192u/Kconfig
+++ b/drivers/staging/rtl8192u/Kconfig
@@ -3,5 +3,6 @@ config RTL8192U
depends on PCI && WLAN && USB
select WIRELESS_EXT
select WEXT_PRIV
+ select CRYPTO
default N
---help---
diff --git a/drivers/staging/rtl8192u/dot11d.h b/drivers/staging/rtl8192u/dot11d.h
index 0851b9db17a..d99cc030ec7 100644
--- a/drivers/staging/rtl8192u/dot11d.h
+++ b/drivers/staging/rtl8192u/dot11d.h
@@ -2,7 +2,7 @@
#define __INC_DOT11D_H
#ifdef ENABLE_DOT11D
-#include "ieee80211.h"
+#include "ieee80211/ieee80211.h"
typedef struct _CHNL_TXPOWER_TRIPLE {
diff --git a/drivers/staging/rtl8192u/ieee80211.h b/drivers/staging/rtl8192u/ieee80211.h
deleted file mode 100644
index 9c726113214..00000000000
--- a/drivers/staging/rtl8192u/ieee80211.h
+++ /dev/null
@@ -1,2595 +0,0 @@
-/*
- * Merged with mainline ieee80211.h in Aug 2004. Original ieee802_11
- * remains copyright by the original authors
- *
- * Portions of the merged code are based on Host AP (software wireless
- * LAN access point) driver for Intersil Prism2/2.5/3.
- *
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <jkmaline@cc.hut.fi>
- * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * Adaption to a generic IEEE 802.11 stack by James Ketrenos
- * <jketreno@linux.intel.com>
- * Copyright (c) 2004, Intel Corporation
- *
- * Modified for Realtek's wi-fi cards by Andrea Merello
- * <andreamrl@tiscali.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
- */
-#ifndef IEEE80211_H
-#define IEEE80211_H
-#include <linux/if_ether.h> /* ETH_ALEN */
-#include <linux/kernel.h> /* ARRAY_SIZE */
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/jiffies.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-
-#include <linux/delay.h>
-#include <linux/wireless.h>
-
-#include "ieee80211/rtl819x_HT.h"
-#include "ieee80211/rtl819x_BA.h"
-#include "ieee80211/rtl819x_TS.h"
-
-
-#ifndef IW_MODE_MONITOR
-#define IW_MODE_MONITOR 6
-#endif
-
-#ifndef IWEVCUSTOM
-#define IWEVCUSTOM 0x8c02
-#endif
-
-
-#ifndef container_of
-/**
- * container_of - cast a member of a structure out to the containing structure
- *
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) ({ \
- const typeof(((type *)0)->member) (*__mptr = (ptr)); \
- (type *)((char *)__mptr - offsetof(type, member)); })
-#endif
-
-#define KEY_TYPE_NA 0x0
-#define KEY_TYPE_WEP40 0x1
-#define KEY_TYPE_TKIP 0x2
-#define KEY_TYPE_CCMP 0x4
-#define KEY_TYPE_WEP104 0x5
-
-/* added for rtl819x tx procedure */
-#define MAX_QUEUE_SIZE 0x10
-
-/*
- * 8190 queue mapping
- */
-#define BK_QUEUE 0
-#define BE_QUEUE 1
-#define VI_QUEUE 2
-#define VO_QUEUE 3
-#define HCCA_QUEUE 4
-#define TXCMD_QUEUE 5
-#define MGNT_QUEUE 6
-#define HIGH_QUEUE 7
-#define BEACON_QUEUE 8
-
-#define LOW_QUEUE BE_QUEUE
-#define NORMAL_QUEUE MGNT_QUEUE
-
-/* added by amy for ps */
-#define SWRF_TIMEOUT 50
-
-/* added by amy for LEAP related */
-#define IE_CISCO_FLAG_POSITION 0x08 /* Flag byte: byte 8, numbered from 0. */
-#define SUPPORT_CKIP_MIC 0x08 /* bit3 */
-#define SUPPORT_CKIP_PK 0x10 /* bit4 */
-/* defined for skb cb field */
-/* At most 28 byte */
-typedef struct cb_desc {
- /* Tx Desc Related flags (8-9) */
- u8 bLastIniPkt:1;
- u8 bCmdOrInit:1;
- u8 bFirstSeg:1;
- u8 bLastSeg:1;
- u8 bEncrypt:1;
- u8 bTxDisableRateFallBack:1;
- u8 bTxUseDriverAssingedRate:1;
- u8 bHwSec:1; /* indicate whether use Hw security. WB */
-
- u8 reserved1;
-
- /* Tx Firmware Relaged flags (10-11)*/
- u8 bCTSEnable:1;
- u8 bRTSEnable:1;
- u8 bUseShortGI:1;
- u8 bUseShortPreamble:1;
- u8 bTxEnableFwCalcDur:1;
- u8 bAMPDUEnable:1;
- u8 bRTSSTBC:1;
- u8 RTSSC:1;
-
- u8 bRTSBW:1;
- u8 bPacketBW:1;
- u8 bRTSUseShortPreamble:1;
- u8 bRTSUseShortGI:1;
- u8 bMulticast:1;
- u8 bBroadcast:1;
- u8 drv_agg_enable:1;
- u8 reserved2:1;
-
- /* Tx Desc related element(12-19) */
- u8 rata_index;
- u8 queue_index;
- u16 txbuf_size;
- u8 RATRIndex;
- u8 reserved6;
- u8 reserved7;
- u8 reserved8;
-
- /* Tx firmware related element(20-27) */
- u8 data_rate;
- u8 rts_rate;
- u8 ampdu_factor;
- u8 ampdu_density;
- u8 DrvAggrNum;
- u16 pkt_size;
- u8 reserved12;
-} cb_desc, *pcb_desc;
-
-/*--------------------------Define -------------------------------------------*/
-#define MGN_1M 0x02
-#define MGN_2M 0x04
-#define MGN_5_5M 0x0b
-#define MGN_11M 0x16
-
-#define MGN_6M 0x0c
-#define MGN_9M 0x12
-#define MGN_12M 0x18
-#define MGN_18M 0x24
-#define MGN_24M 0x30
-#define MGN_36M 0x48
-#define MGN_48M 0x60
-#define MGN_54M 0x6c
-
-#define MGN_MCS0 0x80
-#define MGN_MCS1 0x81
-#define MGN_MCS2 0x82
-#define MGN_MCS3 0x83
-#define MGN_MCS4 0x84
-#define MGN_MCS5 0x85
-#define MGN_MCS6 0x86
-#define MGN_MCS7 0x87
-#define MGN_MCS8 0x88
-#define MGN_MCS9 0x89
-#define MGN_MCS10 0x8a
-#define MGN_MCS11 0x8b
-#define MGN_MCS12 0x8c
-#define MGN_MCS13 0x8d
-#define MGN_MCS14 0x8e
-#define MGN_MCS15 0x8f
-
-/*
- * 802.11 Management frame Reason Code field
- */
-enum _ReasonCode{
- unspec_reason = 0x1,
- auth_not_valid = 0x2,
- deauth_lv_ss = 0x3,
- inactivity = 0x4,
- ap_overload = 0x5,
- class2_err = 0x6,
- class3_err = 0x7,
- disas_lv_ss = 0x8,
- asoc_not_auth = 0x9,
-
- /* ----MIC_CHECK */
- mic_failure = 0xe,
- /* ----END MIC_CHECK */
-
- /* Reason code defined in 802.11i D10.0 p.28. */
- invalid_IE = 0x0d,
- four_way_tmout = 0x0f,
- two_way_tmout = 0x10,
- IE_dismatch = 0x11,
- invalid_Gcipher = 0x12,
- invalid_Pcipher = 0x13,
- invalid_AKMP = 0x14,
- unsup_RSNIEver = 0x15,
- invalid_RSNIE = 0x16,
- auth_802_1x_fail = 0x17,
- ciper_reject = 0x18,
-
- /* Reason code defined in 7.3.1.7, 802.1e D13.0, p.42. */
- QoS_unspec = 0x20, /* 32 */
- QAP_bandwidth = 0x21, /* 33 */
- poor_condition = 0x22, /* 34 */
- no_facility = 0x23, /* 35 */
- /* Where is 36??? */
- req_declined = 0x25, /* 37 */
- invalid_param = 0x26, /* 38 */
- req_not_honored = 0x27, /* 39 */
- TS_not_created = 0x2F, /* 47 */
- DL_not_allowed = 0x30, /* 48 */
- dest_not_exist = 0x31, /* 49 */
- dest_not_QSTA = 0x32, /* 50 */
-};
-
-
-
-#define aSifsTime ((priv->ieee80211->current_network.mode == IEEE_A) || \
- (priv->ieee80211->current_network.mode == IEEE_N_24G) || \
- (priv->ieee80211->current_network.mode == IEEE_N_5G)) ? 16 : 10
-
-#define MGMT_QUEUE_NUM 5
-
-#define IEEE_CMD_SET_WPA_PARAM 1
-#define IEEE_CMD_SET_WPA_IE 2
-#define IEEE_CMD_SET_ENCRYPTION 3
-#define IEEE_CMD_MLME 4
-
-#define IEEE_PARAM_WPA_ENABLED 1
-#define IEEE_PARAM_TKIP_COUNTERMEASURES 2
-#define IEEE_PARAM_DROP_UNENCRYPTED 3
-#define IEEE_PARAM_PRIVACY_INVOKED 4
-#define IEEE_PARAM_AUTH_ALGS 5
-#define IEEE_PARAM_IEEE_802_1X 6
-/* It should consistent with the driver_XXX.c */
-#define IEEE_PARAM_WPAX_SELECT 7
-/* Added for notify the encryption type selection */
-#define IEEE_PROTO_WPA 1
-#define IEEE_PROTO_RSN 2
-/* Added for notify the encryption type selection */
-#define IEEE_WPAX_USEGROUP 0
-#define IEEE_WPAX_WEP40 1
-#define IEEE_WPAX_TKIP 2
-#define IEEE_WPAX_WRAP 3
-#define IEEE_WPAX_CCMP 4
-#define IEEE_WPAX_WEP104 5
-
-#define IEEE_KEY_MGMT_IEEE8021X 1
-#define IEEE_KEY_MGMT_PSK 2
-
-#define IEEE_MLME_STA_DEAUTH 1
-#define IEEE_MLME_STA_DISASSOC 2
-
-
-#define IEEE_CRYPT_ERR_UNKNOWN_ALG 2
-#define IEEE_CRYPT_ERR_UNKNOWN_ADDR 3
-#define IEEE_CRYPT_ERR_CRYPT_INIT_FAILED 4
-#define IEEE_CRYPT_ERR_KEY_SET_FAILED 5
-#define IEEE_CRYPT_ERR_TX_KEY_SET_FAILED 6
-#define IEEE_CRYPT_ERR_CARD_CONF_FAILED 7
-
-
-#define IEEE_CRYPT_ALG_NAME_LEN 16
-
-#define MAX_IE_LEN 0xff
-
-/* added for kernel conflict */
-#define ieee80211_crypt_deinit_entries ieee80211_crypt_deinit_entries_rsl
-#define ieee80211_crypt_deinit_handler ieee80211_crypt_deinit_handler_rsl
-#define ieee80211_crypt_delayed_deinit ieee80211_crypt_delayed_deinit_rsl
-#define ieee80211_register_crypto_ops ieee80211_register_crypto_ops_rsl
-#define ieee80211_unregister_crypto_ops ieee80211_unregister_crypto_ops_rsl
-#define ieee80211_get_crypto_ops ieee80211_get_crypto_ops_rsl
-
-#define ieee80211_ccmp_null ieee80211_ccmp_null_rsl
-
-#define ieee80211_tkip_null ieee80211_tkip_null_rsl
-
-#define ieee80211_wep_null ieee80211_wep_null_rsl
-
-#define free_ieee80211 free_ieee80211_rsl
-#define alloc_ieee80211 alloc_ieee80211_rsl
-
-#define ieee80211_rx ieee80211_rx_rsl
-#define ieee80211_rx_mgt ieee80211_rx_mgt_rsl
-
-#define ieee80211_get_beacon ieee80211_get_beacon_rsl
-#define ieee80211_wake_queue ieee80211_wake_queue_rsl
-#define ieee80211_stop_queue ieee80211_stop_queue_rsl
-#define ieee80211_reset_queue ieee80211_reset_queue_rsl
-#define ieee80211_softmac_stop_protocol ieee80211_softmac_stop_protocol_rsl
-#define ieee80211_softmac_start_protocol ieee80211_softmac_start_protocol_rsl
-#define ieee80211_is_shortslot ieee80211_is_shortslot_rsl
-#define ieee80211_is_54g ieee80211_is_54g_rsl
-#define ieee80211_wpa_supplicant_ioctl ieee80211_wpa_supplicant_ioctl_rsl
-#define ieee80211_ps_tx_ack ieee80211_ps_tx_ack_rsl
-#define ieee80211_softmac_xmit ieee80211_softmac_xmit_rsl
-#define ieee80211_stop_send_beacons ieee80211_stop_send_beacons_rsl
-#define notify_wx_assoc_event notify_wx_assoc_event_rsl
-#define SendDisassociation SendDisassociation_rsl
-#define ieee80211_disassociate ieee80211_disassociate_rsl
-#define ieee80211_start_send_beacons ieee80211_start_send_beacons_rsl
-#define ieee80211_stop_scan ieee80211_stop_scan_rsl
-#define ieee80211_send_probe_requests ieee80211_send_probe_requests_rsl
-#define ieee80211_softmac_scan_syncro ieee80211_softmac_scan_syncro_rsl
-#define ieee80211_start_scan_syncro ieee80211_start_scan_syncro_rsl
-
-#define ieee80211_wx_get_essid ieee80211_wx_get_essid_rsl
-#define ieee80211_wx_set_essid ieee80211_wx_set_essid_rsl
-#define ieee80211_wx_set_rate ieee80211_wx_set_rate_rsl
-#define ieee80211_wx_get_rate ieee80211_wx_get_rate_rsl
-#define ieee80211_wx_set_wap ieee80211_wx_set_wap_rsl
-#define ieee80211_wx_get_wap ieee80211_wx_get_wap_rsl
-#define ieee80211_wx_set_mode ieee80211_wx_set_mode_rsl
-#define ieee80211_wx_get_mode ieee80211_wx_get_mode_rsl
-#define ieee80211_wx_set_scan ieee80211_wx_set_scan_rsl
-#define ieee80211_wx_get_freq ieee80211_wx_get_freq_rsl
-#define ieee80211_wx_set_freq ieee80211_wx_set_freq_rsl
-#define ieee80211_wx_set_rawtx ieee80211_wx_set_rawtx_rsl
-#define ieee80211_wx_get_name ieee80211_wx_get_name_rsl
-#define ieee80211_wx_set_power ieee80211_wx_set_power_rsl
-#define ieee80211_wx_get_power ieee80211_wx_get_power_rsl
-#define ieee80211_wlan_frequencies ieee80211_wlan_frequencies_rsl
-#define ieee80211_wx_set_rts ieee80211_wx_set_rts_rsl
-#define ieee80211_wx_get_rts ieee80211_wx_get_rts_rsl
-
-#define ieee80211_txb_free ieee80211_txb_free_rsl
-
-#define ieee80211_wx_set_gen_ie ieee80211_wx_set_gen_ie_rsl
-#define ieee80211_wx_get_scan ieee80211_wx_get_scan_rsl
-#define ieee80211_wx_set_encode ieee80211_wx_set_encode_rsl
-#define ieee80211_wx_get_encode ieee80211_wx_get_encode_rsl
-#if WIRELESS_EXT >= 18
-#define ieee80211_wx_set_mlme ieee80211_wx_set_mlme_rsl
-#define ieee80211_wx_set_auth ieee80211_wx_set_auth_rsl
-#define ieee80211_wx_set_encode_ext ieee80211_wx_set_encode_ext_rsl
-#define ieee80211_wx_get_encode_ext ieee80211_wx_get_encode_ext_rsl
-#endif
-
-
-typedef struct ieee_param {
- u32 cmd;
- u8 sta_addr[ETH_ALEN];
- union {
- struct {
- u8 name;
- u32 value;
- } wpa_param;
- struct {
- u32 len;
- u8 reserved[32];
- u8 data[0];
- } wpa_ie;
- struct{
- int command;
- int reason_code;
- } mlme;
- struct {
- u8 alg[IEEE_CRYPT_ALG_NAME_LEN];
- u8 set_tx;
- u32 err;
- u8 idx;
- u8 seq[8]; /* sequence counter (set: RX, get: TX) */
- u16 key_len;
- u8 key[0];
- } crypt;
- } u;
-} ieee_param;
-
-
-#if WIRELESS_EXT < 17
-#define IW_QUAL_QUAL_INVALID 0x10
-#define IW_QUAL_LEVEL_INVALID 0x20
-#define IW_QUAL_NOISE_INVALID 0x40
-#define IW_QUAL_QUAL_UPDATED 0x1
-#define IW_QUAL_LEVEL_UPDATED 0x2
-#define IW_QUAL_NOISE_UPDATED 0x4
-#endif
-
-
-/* linux under 2.6.9 release may not support it, so modify it for common use */
-#define MSECS(t) msecs_to_jiffies(t)
-#define msleep_interruptible_rsl msleep_interruptible
-
-#define IEEE80211_DATA_LEN 2304
-/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
- 6.2.1.1.2.
-
- The figure in section 7.1.2 suggests a body size of up to 2312
- bytes is allowed, which is a bit confusing, I suspect this
- represents the 2304 bytes of real data, plus a possible 8 bytes of
- WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
-#define IEEE80211_1ADDR_LEN 10
-#define IEEE80211_2ADDR_LEN 16
-#define IEEE80211_3ADDR_LEN 24
-#define IEEE80211_4ADDR_LEN 30
-#define IEEE80211_FCS_LEN 4
-#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-#define IEEE80211_MGMT_HDR_LEN 24
-#define IEEE80211_DATA_HDR3_LEN 24
-#define IEEE80211_DATA_HDR4_LEN 30
-
-#define MIN_FRAG_THRESHOLD 256U
-#define MAX_FRAG_THRESHOLD 2346U
-
-
-/* Frame control field constants */
-#define IEEE80211_FCTL_VERS 0x0003
-#define IEEE80211_FCTL_FTYPE 0x000c
-#define IEEE80211_FCTL_STYPE 0x00f0
-#define IEEE80211_FCTL_FRAMETYPE 0x00fc
-#define IEEE80211_FCTL_TODS 0x0100
-#define IEEE80211_FCTL_FROMDS 0x0200
-#define IEEE80211_FCTL_DSTODS 0x0300
-#define IEEE80211_FCTL_MOREFRAGS 0x0400
-#define IEEE80211_FCTL_RETRY 0x0800
-#define IEEE80211_FCTL_PM 0x1000
-#define IEEE80211_FCTL_MOREDATA 0x2000
-#define IEEE80211_FCTL_WEP 0x4000
-#define IEEE80211_FCTL_ORDER 0x8000
-
-#define IEEE80211_FTYPE_MGMT 0x0000
-#define IEEE80211_FTYPE_CTL 0x0004
-#define IEEE80211_FTYPE_DATA 0x0008
-
-/* management */
-#define IEEE80211_STYPE_ASSOC_REQ 0x0000
-#define IEEE80211_STYPE_ASSOC_RESP 0x0010
-#define IEEE80211_STYPE_REASSOC_REQ 0x0020
-#define IEEE80211_STYPE_REASSOC_RESP 0x0030
-#define IEEE80211_STYPE_PROBE_REQ 0x0040
-#define IEEE80211_STYPE_PROBE_RESP 0x0050
-#define IEEE80211_STYPE_BEACON 0x0080
-#define IEEE80211_STYPE_ATIM 0x0090
-#define IEEE80211_STYPE_DISASSOC 0x00A0
-#define IEEE80211_STYPE_AUTH 0x00B0
-#define IEEE80211_STYPE_DEAUTH 0x00C0
-#define IEEE80211_STYPE_MANAGE_ACT 0x00D0
-
-/* control */
-#define IEEE80211_STYPE_PSPOLL 0x00A0
-#define IEEE80211_STYPE_RTS 0x00B0
-#define IEEE80211_STYPE_CTS 0x00C0
-#define IEEE80211_STYPE_ACK 0x00D0
-#define IEEE80211_STYPE_CFEND 0x00E0
-#define IEEE80211_STYPE_CFENDACK 0x00F0
-#define IEEE80211_STYPE_BLOCKACK 0x0094
-
-/* data */
-#define IEEE80211_STYPE_DATA 0x0000
-#define IEEE80211_STYPE_DATA_CFACK 0x0010
-#define IEEE80211_STYPE_DATA_CFPOLL 0x0020
-#define IEEE80211_STYPE_DATA_CFACKPOLL 0x0030
-#define IEEE80211_STYPE_NULLFUNC 0x0040
-#define IEEE80211_STYPE_CFACK 0x0050
-#define IEEE80211_STYPE_CFPOLL 0x0060
-#define IEEE80211_STYPE_CFACKPOLL 0x0070
-#define IEEE80211_STYPE_QOS_DATA 0x0080
-#define IEEE80211_STYPE_QOS_NULL 0x00C0
-
-#define IEEE80211_SCTL_FRAG 0x000F
-#define IEEE80211_SCTL_SEQ 0xFFF0
-
-/* QOS control */
-#define IEEE80211_QCTL_TID 0x000F
-
-#define FC_QOS_BIT BIT7
-#define IsDataFrame(pdu) (((pdu[0] & 0x0C) == 0x08) ? true : false)
-#define IsLegacyDataFrame(pdu) (IsDataFrame(pdu) && (!(pdu[0]&FC_QOS_BIT)))
-
-#define IsQoSDataFrame(pframe) ((*(u16 *)pframe&(IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA)) == (IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA))
-#define Frame_Order(pframe) (*(u16 *)pframe&IEEE80211_FCTL_ORDER)
-#define SN_LESS(a, b) (((a-b)&0x800) != 0)
-#define SN_EQUAL(a, b) (a == b)
-#define MAX_DEV_ADDR_SIZE 8
-typedef enum _ACT_CATEGORY{
- ACT_CAT_QOS = 1,
- ACT_CAT_DLS = 2,
- ACT_CAT_BA = 3,
- ACT_CAT_HT = 7,
- ACT_CAT_WMM = 17,
-} ACT_CATEGORY, *PACT_CATEGORY;
-
-typedef enum _TS_ACTION{
- ACT_ADDTSREQ = 0,
- ACT_ADDTSRSP = 1,
- ACT_DELTS = 2,
- ACT_SCHEDULE = 3,
-} TS_ACTION, *PTS_ACTION;
-
-typedef enum _BA_ACTION{
- ACT_ADDBAREQ = 0,
- ACT_ADDBARSP = 1,
- ACT_DELBA = 2,
-} BA_ACTION, *PBA_ACTION;
-
-typedef enum _InitialGainOpType{
- IG_Backup = 0,
- IG_Restore,
- IG_Max
-} InitialGainOpType;
-
-/* debug macros */
-#define CONFIG_IEEE80211_DEBUG
-#ifdef CONFIG_IEEE80211_DEBUG
-extern u32 ieee80211_debug_level;
-#define IEEE80211_DEBUG(level, fmt, args...) \
-do { if (ieee80211_debug_level & (level)) \
- printk(KERN_DEBUG "ieee80211: " fmt, ## args); } while (0)
-/* wb added to debug out data buf
- * if you want print DATA buffer related BA, please set ieee80211_debug_level
- * to DATA|BA
- */
-#define IEEE80211_DEBUG_DATA(level, data, datalen) \
- do { if ((ieee80211_debug_level & (level)) == (level)) { \
- int i; \
- u8* pdata = (u8 *) data; \
- printk(KERN_DEBUG "ieee80211: %s()\n", __FUNCTION__); \
- for (i = 0; i < (int)(datalen); i++) { \
- printk("%2x ", pdata[i]); \
- if ((i+1)%16 == 0) \
- printk("\n"); \
- } \
- printk("\n"); \
- } \
- } while (0)
-#else
-#define IEEE80211_DEBUG(level, fmt, args...) do {} while (0)
-#define IEEE80211_DEBUG_DATA(level, data, datalen) do {} while (0)
-#endif /* CONFIG_IEEE80211_DEBUG */
-
-/* debug macros not dependent on CONFIG_IEEE80211_DEBUG */
-
-/*
- * To use the debug system;
- *
- * If you are defining a new debug classification, simply add it to the #define
- * list here in the form of:
- *
- * #define IEEE80211_DL_xxxx VALUE
- *
- * shifting value to the left one bit from the previous entry. xxxx should be
- * the name of the classification (for example, WEP)
- *
- * You then need to either add a IEEE80211_xxxx_DEBUG() macro definition for your
- * classification, or use IEEE80211_DEBUG(IEEE80211_DL_xxxx, ...) whenever you want
- * to send output to that classification.
- *
- * To add your debug level to the list of levels seen when you perform
- *
- * % cat /proc/net/ipw/debug_level
- *
- * you simply need to add your entry to the ipw_debug_levels array.
- *
- * If you do not see debug_level in /proc/net/ipw then you do not have
- * CONFIG_IEEE80211_DEBUG defined in your kernel configuration
- *
- */
-
-#define IEEE80211_DL_INFO (1<<0)
-#define IEEE80211_DL_WX (1<<1)
-#define IEEE80211_DL_SCAN (1<<2)
-#define IEEE80211_DL_STATE (1<<3)
-#define IEEE80211_DL_MGMT (1<<4)
-#define IEEE80211_DL_FRAG (1<<5)
-#define IEEE80211_DL_EAP (1<<6)
-#define IEEE80211_DL_DROP (1<<7)
-
-#define IEEE80211_DL_TX (1<<8)
-#define IEEE80211_DL_RX (1<<9)
-
-#define IEEE80211_DL_HT (1<<10) /* HT */
-#define IEEE80211_DL_BA (1<<11) /* ba */
-#define IEEE80211_DL_TS (1<<12) /* TS */
-#define IEEE80211_DL_QOS (1<<13)
-#define IEEE80211_DL_REORDER (1<<14)
-#define IEEE80211_DL_IOT (1<<15)
-#define IEEE80211_DL_IPS (1<<16)
-#define IEEE80211_DL_TRACE (1<<29) /* trace function, need to user net_ratelimit() together in order not to print too much to the screen */
-#define IEEE80211_DL_DATA (1<<30) /* use this flag to control whether print data buf out. */
-#define IEEE80211_DL_ERR (1<<31) /* always open */
-#define IEEE80211_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a)
-#define IEEE80211_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a)
-#define IEEE80211_DEBUG_INFO(f, a...) IEEE80211_DEBUG(IEEE80211_DL_INFO, f, ## a)
-
-#define IEEE80211_DEBUG_WX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_WX, f, ## a)
-#define IEEE80211_DEBUG_SCAN(f, a...) IEEE80211_DEBUG(IEEE80211_DL_SCAN, f, ## a)
-#define IEEE80211_DEBUG_STATE(f, a...) IEEE80211_DEBUG(IEEE80211_DL_STATE, f, ## a)
-#define IEEE80211_DEBUG_MGMT(f, a...) IEEE80211_DEBUG(IEEE80211_DL_MGMT, f, ## a)
-#define IEEE80211_DEBUG_FRAG(f, a...) IEEE80211_DEBUG(IEEE80211_DL_FRAG, f, ## a)
-#define IEEE80211_DEBUG_EAP(f, a...) IEEE80211_DEBUG(IEEE80211_DL_EAP, f, ## a)
-#define IEEE80211_DEBUG_DROP(f, a...) IEEE80211_DEBUG(IEEE80211_DL_DROP, f, ## a)
-#define IEEE80211_DEBUG_TX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_TX, f, ## a)
-#define IEEE80211_DEBUG_RX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_RX, f, ## a)
-#define IEEE80211_DEBUG_QOS(f, a...) IEEE80211_DEBUG(IEEE80211_DL_QOS, f, ## a)
-
-#ifdef CONFIG_IEEE80211_DEBUG
-/* Added by Annie, 2005-11-22. */
-#define MAX_STR_LEN 64
-/* I want to see ASCII 33 to 126 only. Otherwise, I print '?'. Annie, 2005-11-22.*/
-#define PRINTABLE(_ch) (_ch > '!' && _ch < '~')
-#define IEEE80211_PRINT_STR(_Comp, _TitleString, _Ptr, _Len) \
- if ((_Comp) & level) { \
- int __i; \
- u8 buffer[MAX_STR_LEN]; \
- int length = (_Len < MAX_STR_LEN) ? _Len : (MAX_STR_LEN - 1); \
- memset(buffer, 0, MAX_STR_LEN); \
- memcpy(buffer, (u8 *)_Ptr, length); \
- for (__i = 0; __i < MAX_STR_LEN; __i++) { \
- if (!PRINTABLE(buffer[__i])) \
- buffer[__i] = '?'; \
- } \
- buffer[length] = '\0'; \
- printk("Rtl819x: "); \
- printk(_TitleString); \
- printk(": %d, <%s>\n", _Len, buffer); \
- }
-#else
-#define IEEE80211_PRINT_STR(_Comp, _TitleString, _Ptr, _Len) do {} while (0)
-#endif
-
-#include <linux/netdevice.h>
-#include <linux/if_arp.h> /* ARPHRD_ETHER */
-
-#ifndef WIRELESS_SPY
-#define WIRELESS_SPY /* enable iwspy support */
-#endif
-#include <net/iw_handler.h> /* new driver API */
-
-#ifndef ETH_P_PAE
-#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
-#endif /* ETH_P_PAE */
-
-#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */
-
-#ifndef ETH_P_80211_RAW
-#define ETH_P_80211_RAW (ETH_P_ECONET + 1)
-#endif
-
-/* IEEE 802.11 defines */
-
-#define P80211_OUI_LEN 3
-
-struct ieee80211_snap_hdr {
-
- u8 dsap; /* always 0xAA */
- u8 ssap; /* always 0xAA */
- u8 ctrl; /* always 0x03 */
- u8 oui[P80211_OUI_LEN]; /* organizational universal id */
-
-} __attribute__ ((packed));
-
-#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
-
-#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS)
-#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
-#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
-
-#define WLAN_FC_GET_FRAMETYPE(fc) ((fc) & IEEE80211_FCTL_FRAMETYPE)
-#define WLAN_GET_SEQ_FRAG(seq) ((seq) & IEEE80211_SCTL_FRAG)
-#define WLAN_GET_SEQ_SEQ(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-
-/* Authentication algorithms */
-#define WLAN_AUTH_OPEN 0
-#define WLAN_AUTH_SHARED_KEY 1
-#define WLAN_AUTH_LEAP 2
-
-#define WLAN_AUTH_CHALLENGE_LEN 128
-
-#define WLAN_CAPABILITY_BSS (1<<0)
-#define WLAN_CAPABILITY_IBSS (1<<1)
-#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
-#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
-#define WLAN_CAPABILITY_PRIVACY (1<<4)
-#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
-#define WLAN_CAPABILITY_PBCC (1<<6)
-#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
-#define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8)
-#define WLAN_CAPABILITY_QOS (1<<9)
-#define WLAN_CAPABILITY_SHORT_SLOT (1<<10)
-#define WLAN_CAPABILITY_DSSS_OFDM (1<<13)
-
-/* 802.11g ERP information element */
-#define WLAN_ERP_NON_ERP_PRESENT (1<<0)
-#define WLAN_ERP_USE_PROTECTION (1<<1)
-#define WLAN_ERP_BARKER_PREAMBLE (1<<2)
-
-/* Status codes */
-enum ieee80211_statuscode {
- WLAN_STATUS_SUCCESS = 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE = 1,
- WLAN_STATUS_CAPS_UNSUPPORTED = 10,
- WLAN_STATUS_REASSOC_NO_ASSOC = 11,
- WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12,
- WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13,
- WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14,
- WLAN_STATUS_CHALLENGE_FAIL = 15,
- WLAN_STATUS_AUTH_TIMEOUT = 16,
- WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17,
- WLAN_STATUS_ASSOC_DENIED_RATES = 18,
- /* 802.11b */
- WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19,
- WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20,
- WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21,
- /* 802.11h */
- WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22,
- WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23,
- WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24,
- /* 802.11g */
- WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25,
- WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26,
- /* 802.11i */
- WLAN_STATUS_INVALID_IE = 40,
- WLAN_STATUS_INVALID_GROUP_CIPHER = 41,
- WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42,
- WLAN_STATUS_INVALID_AKMP = 43,
- WLAN_STATUS_UNSUPP_RSN_VERSION = 44,
- WLAN_STATUS_INVALID_RSN_IE_CAP = 45,
- WLAN_STATUS_CIPHER_SUITE_REJECTED = 46,
-};
-
-/* Reason codes */
-enum ieee80211_reasoncode {
- WLAN_REASON_UNSPECIFIED = 1,
- WLAN_REASON_PREV_AUTH_NOT_VALID = 2,
- WLAN_REASON_DEAUTH_LEAVING = 3,
- WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4,
- WLAN_REASON_DISASSOC_AP_BUSY = 5,
- WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6,
- WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7,
- WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8,
- WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9,
- /* 802.11h */
- WLAN_REASON_DISASSOC_BAD_POWER = 10,
- WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11,
- /* 802.11i */
- WLAN_REASON_INVALID_IE = 13,
- WLAN_REASON_MIC_FAILURE = 14,
- WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15,
- WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16,
- WLAN_REASON_IE_DIFFERENT = 17,
- WLAN_REASON_INVALID_GROUP_CIPHER = 18,
- WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19,
- WLAN_REASON_INVALID_AKMP = 20,
- WLAN_REASON_UNSUPP_RSN_VERSION = 21,
- WLAN_REASON_INVALID_RSN_IE_CAP = 22,
- WLAN_REASON_IEEE8021X_FAILED = 23,
- WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
-};
-
-#define IEEE80211_STATMASK_SIGNAL (1<<0)
-#define IEEE80211_STATMASK_RSSI (1<<1)
-#define IEEE80211_STATMASK_NOISE (1<<2)
-#define IEEE80211_STATMASK_RATE (1<<3)
-#define IEEE80211_STATMASK_WEMASK 0x7
-
-#define IEEE80211_CCK_MODULATION (1<<0)
-#define IEEE80211_OFDM_MODULATION (1<<1)
-
-#define IEEE80211_24GHZ_BAND (1<<0)
-#define IEEE80211_52GHZ_BAND (1<<1)
-
-#define IEEE80211_CCK_RATE_LEN 4
-#define IEEE80211_CCK_RATE_1MB 0x02
-#define IEEE80211_CCK_RATE_2MB 0x04
-#define IEEE80211_CCK_RATE_5MB 0x0B
-#define IEEE80211_CCK_RATE_11MB 0x16
-#define IEEE80211_OFDM_RATE_LEN 8
-#define IEEE80211_OFDM_RATE_6MB 0x0C
-#define IEEE80211_OFDM_RATE_9MB 0x12
-#define IEEE80211_OFDM_RATE_12MB 0x18
-#define IEEE80211_OFDM_RATE_18MB 0x24
-#define IEEE80211_OFDM_RATE_24MB 0x30
-#define IEEE80211_OFDM_RATE_36MB 0x48
-#define IEEE80211_OFDM_RATE_48MB 0x60
-#define IEEE80211_OFDM_RATE_54MB 0x6C
-#define IEEE80211_BASIC_RATE_MASK 0x80
-
-#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
-#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
-#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
-#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
-#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
-#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
-#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
-#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
-#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
-#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
-#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
-#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
-
-#define IEEE80211_CCK_RATES_MASK 0x0000000F
-#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
- IEEE80211_CCK_RATE_2MB_MASK)
-#define IEEE80211_CCK_DEFAULT_RATES_MASK (IEEE80211_CCK_BASIC_RATES_MASK | \
- IEEE80211_CCK_RATE_5MB_MASK | \
- IEEE80211_CCK_RATE_11MB_MASK)
-
-#define IEEE80211_OFDM_RATES_MASK 0x00000FF0
-#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \
- IEEE80211_OFDM_RATE_12MB_MASK | \
- IEEE80211_OFDM_RATE_24MB_MASK)
-#define IEEE80211_OFDM_DEFAULT_RATES_MASK (IEEE80211_OFDM_BASIC_RATES_MASK | \
- IEEE80211_OFDM_RATE_9MB_MASK | \
- IEEE80211_OFDM_RATE_18MB_MASK | \
- IEEE80211_OFDM_RATE_36MB_MASK | \
- IEEE80211_OFDM_RATE_48MB_MASK | \
- IEEE80211_OFDM_RATE_54MB_MASK)
-#define IEEE80211_DEFAULT_RATES_MASK (IEEE80211_OFDM_DEFAULT_RATES_MASK | \
- IEEE80211_CCK_DEFAULT_RATES_MASK)
-
-#define IEEE80211_NUM_OFDM_RATES 8
-#define IEEE80211_NUM_CCK_RATES 4
-#define IEEE80211_OFDM_SHIFT_MASK_A 4
-
-
-/* this is stolen and modified from the madwifi driver*/
-#define IEEE80211_FC0_TYPE_MASK 0x0c
-#define IEEE80211_FC0_TYPE_DATA 0x08
-#define IEEE80211_FC0_SUBTYPE_MASK 0xB0
-#define IEEE80211_FC0_SUBTYPE_QOS 0x80
-
-#define IEEE80211_QOS_HAS_SEQ(fc) \
- (((fc) & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == \
- (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
-
-/* this is stolen from ipw2200 driver */
-#define IEEE_IBSS_MAC_HASH_SIZE 31
-struct ieee_ibss_seq {
- u8 mac[ETH_ALEN];
- u16 seq_num[17];
- u16 frag_num[17];
- unsigned long packet_time[17];
- struct list_head list;
-};
-
-/* NOTE: This data is for statistical purposes; not all hardware provides this
- * information for frames received. Not setting these will not cause
- * any adverse affects. */
-struct ieee80211_rx_stats {
- u32 mac_time[2];
- s8 rssi;
- u8 signal;
- u8 noise;
- u16 rate; /* in 100 kbps */
- u8 received_channel;
- u8 control;
- u8 mask;
- u8 freq;
- u16 len;
- u64 tsf;
- u32 beacon_time;
- u8 nic_type;
- u16 Length;
- u8 SignalQuality; /* in 0-100 index. */
- s32 RecvSignalPower; /* Real power in dBm for this packet, no beautification and aggregation. */
- s8 RxPower; /* in dBm Translate from PWdB */
- u8 SignalStrength; /* in 0-100 index. */
- u16 bHwError:1;
- u16 bCRC:1;
- u16 bICV:1;
- u16 bShortPreamble:1;
- u16 Antenna:1; /* for rtl8185 */
- u16 Decrypted:1; /* for rtl8185, rtl8187 */
- u16 Wakeup:1; /* for rtl8185 */
- u16 Reserved0:1; /* for rtl8185 */
- u8 AGC;
- u32 TimeStampLow;
- u32 TimeStampHigh;
- bool bShift;
- bool bIsQosData;
- u8 UserPriority;
-
- /*
- * 1Attention Please!!!<11n or 8190 specific code should be put below this line>
- */
-
- u8 RxDrvInfoSize;
- u8 RxBufShift;
- bool bIsAMPDU;
- bool bFirstMPDU;
- bool bContainHTC;
- bool RxIs40MHzPacket;
- u32 RxPWDBAll;
- u8 RxMIMOSignalStrength[4]; /* in 0~100 index */
- s8 RxMIMOSignalQuality[2];
- bool bPacketMatchBSSID;
- bool bIsCCK;
- bool bPacketToSelf;
- u8 *virtual_address;
- u16 packetlength; /* Total packet length: Must equal to sum of all FragLength */
- u16 fraglength; /* FragLength should equal to PacketLength in non-fragment case */
- u16 fragoffset; /* Data offset for this fragment */
- u16 ntotalfrag;
- bool bisrxaggrsubframe;
- bool bPacketBeacon; /* cosa add for rssi */
- bool bToSelfBA; /* cosa add for rssi */
- char cck_adc_pwdb[4]; /* cosa add for rx path selection */
- u16 Seq_Num;
-
-};
-
-/* IEEE 802.11 requires that STA supports concurrent reception of at least
- * three fragmented frames. This define can be increased to support more
- * concurrent frames, but it should be noted that each entry can consume about
- * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
-#define IEEE80211_FRAG_CACHE_LEN 4
-
-struct ieee80211_frag_entry {
- unsigned long first_frag_time;
- unsigned int seq;
- unsigned int last_frag;
- struct sk_buff *skb;
- u8 src_addr[ETH_ALEN];
- u8 dst_addr[ETH_ALEN];
-};
-
-struct ieee80211_stats {
- unsigned int tx_unicast_frames;
- unsigned int tx_multicast_frames;
- unsigned int tx_fragments;
- unsigned int tx_unicast_octets;
- unsigned int tx_multicast_octets;
- unsigned int tx_deferred_transmissions;
- unsigned int tx_single_retry_frames;
- unsigned int tx_multiple_retry_frames;
- unsigned int tx_retry_limit_exceeded;
- unsigned int tx_discards;
- unsigned int rx_unicast_frames;
- unsigned int rx_multicast_frames;
- unsigned int rx_fragments;
- unsigned int rx_unicast_octets;
- unsigned int rx_multicast_octets;
- unsigned int rx_fcs_errors;
- unsigned int rx_discards_no_buffer;
- unsigned int tx_discards_wrong_sa;
- unsigned int rx_discards_undecryptable;
- unsigned int rx_message_in_msg_fragments;
- unsigned int rx_message_in_bad_msg_fragments;
-};
-
-struct ieee80211_device;
-
-#include "ieee80211_crypt.h"
-
-#define SEC_KEY_1 (1<<0)
-#define SEC_KEY_2 (1<<1)
-#define SEC_KEY_3 (1<<2)
-#define SEC_KEY_4 (1<<3)
-#define SEC_ACTIVE_KEY (1<<4)
-#define SEC_AUTH_MODE (1<<5)
-#define SEC_UNICAST_GROUP (1<<6)
-#define SEC_LEVEL (1<<7)
-#define SEC_ENABLED (1<<8)
-#define SEC_ENCRYPT (1<<9)
-
-#define SEC_LEVEL_0 0 /* None */
-#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
-#define SEC_LEVEL_2 2 /* Level 1 + TKIP */
-#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */
-#define SEC_LEVEL_3 4 /* Level 2 + CCMP */
-
-#define SEC_ALG_NONE 0
-#define SEC_ALG_WEP 1
-#define SEC_ALG_TKIP 2
-#define SEC_ALG_CCMP 3
-
-#define WEP_KEYS 4
-#define WEP_KEY_LEN 13
-#define SCM_KEY_LEN 32
-#define SCM_TEMPORAL_KEY_LENGTH 16
-
-struct ieee80211_security {
- u16 active_key:2,
- enabled:1,
- auth_mode:2,
- auth_algo:4,
- unicast_uses_group:1,
- encrypt:1;
- u8 key_sizes[WEP_KEYS];
- u8 keys[WEP_KEYS][SCM_KEY_LEN];
- u8 level;
- u16 flags;
-} __attribute__ ((packed));
-
-
-/*
- 802.11 data frame from AP
- ,-------------------------------------------------------------------.
-Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
- |------|------|---------|---------|---------|------|---------|------|
-Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | frame | fcs |
- | | tion | (BSSID) | | | ence | data | |
- `-------------------------------------------------------------------'
-Total: 28-2340 bytes
-*/
-
-/* Management Frame Information Element Types */
-enum ieee80211_mfie {
- MFIE_TYPE_SSID = 0,
- MFIE_TYPE_RATES = 1,
- MFIE_TYPE_FH_SET = 2,
- MFIE_TYPE_DS_SET = 3,
- MFIE_TYPE_CF_SET = 4,
- MFIE_TYPE_TIM = 5,
- MFIE_TYPE_IBSS_SET = 6,
- MFIE_TYPE_COUNTRY = 7,
- MFIE_TYPE_HOP_PARAMS = 8,
- MFIE_TYPE_HOP_TABLE = 9,
- MFIE_TYPE_REQUEST = 10,
- MFIE_TYPE_CHALLENGE = 16,
- MFIE_TYPE_POWER_CONSTRAINT = 32,
- MFIE_TYPE_POWER_CAPABILITY = 33,
- MFIE_TYPE_TPC_REQUEST = 34,
- MFIE_TYPE_TPC_REPORT = 35,
- MFIE_TYPE_SUPP_CHANNELS = 36,
- MFIE_TYPE_CSA = 37,
- MFIE_TYPE_MEASURE_REQUEST = 38,
- MFIE_TYPE_MEASURE_REPORT = 39,
- MFIE_TYPE_QUIET = 40,
- MFIE_TYPE_IBSS_DFS = 41,
- MFIE_TYPE_ERP = 42,
- MFIE_TYPE_RSN = 48,
- MFIE_TYPE_RATES_EX = 50,
- MFIE_TYPE_HT_CAP = 45,
- MFIE_TYPE_HT_INFO = 61,
- MFIE_TYPE_AIRONET = 133,
- MFIE_TYPE_GENERIC = 221,
- MFIE_TYPE_QOS_PARAMETER = 222,
-};
-
-/* Minimal header; can be used for passing 802.11 frames with sufficient
- * information to determine what type of underlying data type is actually
- * stored in the data. */
-struct ieee80211_hdr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 payload[0];
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_1addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 payload[0];
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_2addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 payload[0];
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 payload[0];
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_4addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 addr4[ETH_ALEN];
- u8 payload[0];
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_3addrqos {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 payload[0];
- __le16 qos_ctl;
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_4addrqos {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 addr4[ETH_ALEN];
- u8 payload[0];
- __le16 qos_ctl;
-} __attribute__ ((packed));
-
-struct ieee80211_info_element {
- u8 id;
- u8 len;
- u8 data[0];
-} __attribute__ ((packed));
-
-struct ieee80211_authentication {
- struct ieee80211_hdr_3addr header;
- __le16 algorithm;
- __le16 transaction;
- __le16 status;
- /*challenge*/
- struct ieee80211_info_element info_element[0];
-} __attribute__ ((packed));
-
-struct ieee80211_disassoc {
- struct ieee80211_hdr_3addr header;
- __le16 reason;
-} __attribute__ ((packed));
-
-struct ieee80211_probe_request {
- struct ieee80211_hdr_3addr header;
- /* SSID, supported rates */
- struct ieee80211_info_element info_element[0];
-} __attribute__ ((packed));
-
-struct ieee80211_probe_response {
- struct ieee80211_hdr_3addr header;
- u32 time_stamp[2];
- __le16 beacon_interval;
- __le16 capability;
- /* SSID, supported rates, FH params, DS params,
- * CF params, IBSS params, TIM (if beacon), RSN */
- struct ieee80211_info_element info_element[0];
-} __attribute__ ((packed));
-
-/* Alias beacon for probe_response */
-#define ieee80211_beacon ieee80211_probe_response
-
-struct ieee80211_assoc_request_frame {
- struct ieee80211_hdr_3addr header;
- __le16 capability;
- __le16 listen_interval;
- /* SSID, supported rates, RSN */
- struct ieee80211_info_element info_element[0];
-} __attribute__ ((packed));
-
-struct ieee80211_reassoc_request_frame {
- struct ieee80211_hdr_3addr header;
- __le16 capability;
- __le16 listen_interval;
- u8 current_ap[ETH_ALEN];
- /* SSID, supported rates, RSN */
- struct ieee80211_info_element info_element[0];
-} __attribute__ ((packed));
-
-struct ieee80211_assoc_response_frame {
- struct ieee80211_hdr_3addr header;
- __le16 capability;
- __le16 status;
- __le16 aid;
- struct ieee80211_info_element info_element[0]; /* supported rates */
-} __attribute__ ((packed));
-
-struct ieee80211_txb {
- u8 nr_frags;
- u8 encrypted;
- u8 queue_index;
- u8 rts_included;
- u16 reserved;
- __le16 frag_size;
- __le16 payload_size;
- struct sk_buff *fragments[0];
-};
-
-#define MAX_TX_AGG_COUNT 16
-struct ieee80211_drv_agg_txb {
- u8 nr_drv_agg_frames;
- struct sk_buff *tx_agg_frames[MAX_TX_AGG_COUNT];
-} __attribute__((packed));
-
-#define MAX_SUBFRAME_COUNT 64
-struct ieee80211_rxb {
- u8 nr_subframes;
- struct sk_buff *subframes[MAX_SUBFRAME_COUNT];
- u8 dst[ETH_ALEN];
- u8 src[ETH_ALEN];
-} __attribute__((packed));
-
-typedef union _frameqos {
- u16 shortdata;
- u8 chardata[2];
- struct {
- u16 tid:4;
- u16 eosp:1;
- u16 ack_policy:2;
- u16 reserved:1;
- u16 txop:8;
- } field;
-} frameqos, *pframeqos;
-
-/* SWEEP TABLE ENTRIES NUMBER*/
-#define MAX_SWEEP_TAB_ENTRIES 42
-#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7
-/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs
- * only use 8, and then use extended rates for the remaining supported
- * rates. Other APs, however, stick all of their supported rates on the
- * main rates information element... */
-#define MAX_RATES_LENGTH ((u8)12)
-#define MAX_RATES_EX_LENGTH ((u8)16)
-#define MAX_NETWORK_COUNT 128
-
-#define MAX_CHANNEL_NUMBER 161
-#define IEEE80211_SOFTMAC_SCAN_TIME 100
-/* (HZ / 2) */
-#define IEEE80211_SOFTMAC_ASSOC_RETRY_TIME (HZ * 2)
-
-#define CRC_LENGTH 4U
-
-#define MAX_WPA_IE_LEN 64
-
-#define NETWORK_EMPTY_ESSID (1<<0)
-#define NETWORK_HAS_OFDM (1<<1)
-#define NETWORK_HAS_CCK (1<<2)
-
-/* QoS structure */
-#define NETWORK_HAS_QOS_PARAMETERS (1<<3)
-#define NETWORK_HAS_QOS_INFORMATION (1<<4)
-#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \
- NETWORK_HAS_QOS_INFORMATION)
-/* 802.11h */
-#define NETWORK_HAS_POWER_CONSTRAINT (1<<5)
-#define NETWORK_HAS_CSA (1<<6)
-#define NETWORK_HAS_QUIET (1<<7)
-#define NETWORK_HAS_IBSS_DFS (1<<8)
-#define NETWORK_HAS_TPC_REPORT (1<<9)
-
-#define NETWORK_HAS_ERP_VALUE (1<<10)
-
-#define QOS_QUEUE_NUM 4
-#define QOS_OUI_LEN 3
-#define QOS_OUI_TYPE 2
-#define QOS_ELEMENT_ID 221
-#define QOS_OUI_INFO_SUB_TYPE 0
-#define QOS_OUI_PARAM_SUB_TYPE 1
-#define QOS_VERSION_1 1
-#define QOS_AIFSN_MIN_VALUE 2
-struct ieee80211_qos_information_element {
- u8 elementID;
- u8 length;
- u8 qui[QOS_OUI_LEN];
- u8 qui_type;
- u8 qui_subtype;
- u8 version;
- u8 ac_info;
-} __attribute__ ((packed));
-
-struct ieee80211_qos_ac_parameter {
- u8 aci_aifsn;
- u8 ecw_min_max;
- __le16 tx_op_limit;
-} __attribute__ ((packed));
-
-struct ieee80211_qos_parameter_info {
- struct ieee80211_qos_information_element info_element;
- u8 reserved;
- struct ieee80211_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM];
-} __attribute__ ((packed));
-
-struct ieee80211_qos_parameters {
- __le16 cw_min[QOS_QUEUE_NUM];
- __le16 cw_max[QOS_QUEUE_NUM];
- u8 aifs[QOS_QUEUE_NUM];
- u8 flag[QOS_QUEUE_NUM];
- __le16 tx_op_limit[QOS_QUEUE_NUM];
-} __attribute__ ((packed));
-
-struct ieee80211_qos_data {
- struct ieee80211_qos_parameters parameters;
- int active;
- int supported;
- u8 param_count;
- u8 old_param_count;
-};
-
-struct ieee80211_tim_parameters {
- u8 tim_count;
- u8 tim_period;
-} __attribute__ ((packed));
-
-struct ieee80211_wmm_ac_param {
- u8 ac_aci_acm_aifsn;
- u8 ac_ecwmin_ecwmax;
- u16 ac_txop_limit;
-};
-
-struct ieee80211_wmm_ts_info {
- u8 ac_dir_tid;
- u8 ac_up_psb;
- u8 reserved;
-} __attribute__ ((packed));
-
-struct ieee80211_wmm_tspec_elem {
- struct ieee80211_wmm_ts_info ts_info;
- u16 norm_msdu_size;
- u16 max_msdu_size;
- u32 min_serv_inter;
- u32 max_serv_inter;
- u32 inact_inter;
- u32 suspen_inter;
- u32 serv_start_time;
- u32 min_data_rate;
- u32 mean_data_rate;
- u32 peak_data_rate;
- u32 max_burst_size;
- u32 delay_bound;
- u32 min_phy_rate;
- u16 surp_band_allow;
- u16 medium_time;
-} __attribute__((packed));
-enum eap_type {
- EAP_PACKET = 0,
- EAPOL_START,
- EAPOL_LOGOFF,
- EAPOL_KEY,
- EAPOL_ENCAP_ASF_ALERT
-};
-
-static const char *eap_types[] = {
- [EAP_PACKET] = "EAP-Packet",
- [EAPOL_START] = "EAPOL-Start",
- [EAPOL_LOGOFF] = "EAPOL-Logoff",
- [EAPOL_KEY] = "EAPOL-Key",
- [EAPOL_ENCAP_ASF_ALERT] = "EAPOL-Encap-ASF-Alert"
-};
-
-static inline const char *eap_get_type(int type)
-{
- return ((u32)type >= ARRAY_SIZE(eap_types)) ? "Unknown" : eap_types[type];
-}
-static inline u8 Frame_QoSTID(u8 *buf)
-{
- struct ieee80211_hdr_3addr *hdr;
- u16 fc;
- hdr = (struct ieee80211_hdr_3addr *)buf;
- fc = le16_to_cpu(hdr->frame_ctl);
- return (u8)((frameqos *)(buf + (((fc & IEEE80211_FCTL_TODS) && (fc & IEEE80211_FCTL_FROMDS)) ? 30 : 24)))->field.tid;
-}
-
-
-struct eapol {
- u8 snap[6];
- u16 ethertype;
- u8 version;
- u8 type;
- u16 length;
-} __attribute__ ((packed));
-
-struct ieee80211_softmac_stats{
- unsigned int rx_ass_ok;
- unsigned int rx_ass_err;
- unsigned int rx_probe_rq;
- unsigned int tx_probe_rs;
- unsigned int tx_beacons;
- unsigned int rx_auth_rq;
- unsigned int rx_auth_rs_ok;
- unsigned int rx_auth_rs_err;
- unsigned int tx_auth_rq;
- unsigned int no_auth_rs;
- unsigned int no_ass_rs;
- unsigned int tx_ass_rq;
- unsigned int rx_ass_rq;
- unsigned int tx_probe_rq;
- unsigned int reassoc;
- unsigned int swtxstop;
- unsigned int swtxawake;
- unsigned char CurrentShowTxate;
- unsigned char last_packet_rate;
- unsigned int txretrycount;
-};
-
-#define BEACON_PROBE_SSID_ID_POSITION 12
-
-struct ieee80211_info_element_hdr {
- u8 id;
- u8 len;
-} __attribute__ ((packed));
-
-/*
- * These are the data types that can make up management packets
- *
- u16 auth_algorithm;
- u16 auth_sequence;
- u16 beacon_interval;
- u16 capability;
- u8 current_ap[ETH_ALEN];
- u16 listen_interval;
- struct {
- u16 association_id:14, reserved:2;
- } __attribute__ ((packed));
- u32 time_stamp[2];
- u16 reason;
- u16 status;
-*/
-
-#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
-#define IEEE80211_DEFAULT_BASIC_RATE 2 /* 1Mbps */
-
-enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
-#define MAX_SP_Len (WMM_all_frame << 4)
-#define IEEE80211_QOS_TID 0x0f
-#define QOS_CTL_NOTCONTAIN_ACK (0x01 << 5)
-
-#define IEEE80211_DTIM_MBCAST 4
-#define IEEE80211_DTIM_UCAST 2
-#define IEEE80211_DTIM_VALID 1
-#define IEEE80211_DTIM_INVALID 0
-
-#define IEEE80211_PS_DISABLED 0
-#define IEEE80211_PS_UNICAST IEEE80211_DTIM_UCAST
-#define IEEE80211_PS_MBCAST IEEE80211_DTIM_MBCAST
-
-
-#ifdef WMM_Hang_8187
-#undef WMM_Hang_8187
-#endif
-
-#define WME_AC_BK 0x00
-#define WME_AC_BE 0x01
-#define WME_AC_VI 0x02
-#define WME_AC_VO 0x03
-#define WME_ACI_MASK 0x03
-#define WME_AIFSN_MASK 0x03
-#define WME_AC_PRAM_LEN 16
-
-#define MAX_RECEIVE_BUFFER_SIZE 9100
-
-/* UP Mapping to AC, using in MgntQuery_SequenceNumber() and maybe for DSCP */
-#define UP2AC(up) ( \
- ((up) < 1) ? WME_AC_BE : \
- ((up) < 3) ? WME_AC_BK : \
- ((up) < 4) ? WME_AC_BE : \
- ((up) < 6) ? WME_AC_VI : \
- WME_AC_VO)
-/* AC Mapping to UP, using in Tx part for selecting the corresponding TX queue */
-#define AC2UP(_ac) ( \
- ((_ac) == WME_AC_VO) ? 6 : \
- ((_ac) == WME_AC_VI) ? 5 : \
- ((_ac) == WME_AC_BK) ? 1 : \
- 0)
-
-#define ETHER_ADDR_LEN 6 /* length of an Ethernet address */
-#define ETHERNET_HEADER_SIZE 14 /* length of two Ethernet address plus ether type*/
-
-struct ether_header {
- u8 ether_dhost[ETHER_ADDR_LEN];
- u8 ether_shost[ETHER_ADDR_LEN];
- u16 ether_type;
-} __attribute__((packed));
-
-#ifndef ETHERTYPE_PAE
-#define ETHERTYPE_PAE 0x888e /* EAPOL PAE/802.1x */
-#endif
-#ifndef ETHERTYPE_IP
-#define ETHERTYPE_IP 0x0800 /* IP protocol */
-#endif
-
-typedef struct _bss_ht{
-
- bool support_ht;
-
- /* HT related elements */
- u8 ht_cap_buf[32];
- u16 ht_cap_len;
- u8 ht_info_buf[32];
- u16 ht_info_len;
-
- HT_SPEC_VER ht_spec_ver;
- /* HT_CAPABILITY_ELE bdHTCapEle; */
- /* HT_INFORMATION_ELE bdHTInfoEle; */
-
- bool aggregation;
- bool long_slot_time;
-} bss_ht, *pbss_ht;
-
-typedef enum _erp_t{
- ERP_NonERPpresent = 0x01,
- ERP_UseProtection = 0x02,
- ERP_BarkerPreambleMode = 0x04,
-} erp_t;
-
-
-struct ieee80211_network {
- /* These entries are used to identify a unique network */
- u8 bssid[ETH_ALEN];
- u8 channel;
- /* Ensure null-terminated for any debug msgs */
- u8 ssid[IW_ESSID_MAX_SIZE + 1];
- u8 ssid_len;
- struct ieee80211_qos_data qos_data;
- bool bWithAironetIE;
- bool bCkipSupported;
- bool bCcxRmEnable;
- u16 CcxRmState[2];
- /* CCXv4 S59, MBSSID. */
- bool bMBssidValid;
- u8 MBssidMask;
- u8 MBssid[6];
- /* CCX 2 S38, WLAN Device Version Number element. */
- bool bWithCcxVerNum;
- u8 BssCcxVerNumber;
- /* These are network statistics */
- struct ieee80211_rx_stats stats;
- u16 capability;
- u8 rates[MAX_RATES_LENGTH];
- u8 rates_len;
- u8 rates_ex[MAX_RATES_EX_LENGTH];
- u8 rates_ex_len;
- unsigned long last_scanned;
- u8 mode;
- u32 flags;
- u32 last_associate;
- u32 time_stamp[2];
- u16 beacon_interval;
- u16 listen_interval;
- u16 atim_window;
- u8 erp_value;
- u8 wpa_ie[MAX_WPA_IE_LEN];
- size_t wpa_ie_len;
- u8 rsn_ie[MAX_WPA_IE_LEN];
- size_t rsn_ie_len;
-
- struct ieee80211_tim_parameters tim;
- u8 dtim_period;
- u8 dtim_data;
- u32 last_dtim_sta_time[2];
-
- /* appeded for QoS */
- u8 wmm_info;
- struct ieee80211_wmm_ac_param wmm_param[4];
- u8 QoS_Enable;
-#ifdef THOMAS_TURBO
- u8 Turbo_Enable;/* enable turbo mode, added by thomas */
-#endif
-#ifdef ENABLE_DOT11D
- u16 CountryIeLen;
- u8 CountryIeBuf[MAX_IE_LEN];
-#endif
- /* HT Related */
- BSS_HT bssht;
- /* Add to handle broadcom AP management frame CCK rate. */
- bool broadcom_cap_exist;
- bool ralink_cap_exist;
- bool atheros_cap_exist;
- bool cisco_cap_exist;
- bool unknown_cap_exist;
- bool berp_info_valid;
- bool buseprotection;
- /* put at the end of the structure. */
- struct list_head list;
-};
-
-enum ieee80211_state {
-
- /* the card is not linked at all */
- IEEE80211_NOLINK = 0,
-
- /* IEEE80211_ASSOCIATING* are for BSS client mode
- * the driver shall not perform RX filtering unless
- * the state is LINKED.
- * The driver shall just check for the state LINKED and
- * defaults to NOLINK for ALL the other states (including
- * LINKED_SCANNING)
- */
-
- /* the association procedure will start (wq scheduling)*/
- IEEE80211_ASSOCIATING,
- IEEE80211_ASSOCIATING_RETRY,
-
- /* the association procedure is sending AUTH request*/
- IEEE80211_ASSOCIATING_AUTHENTICATING,
-
- /* the association procedure has successfully authentcated
- * and is sending association request
- */
- IEEE80211_ASSOCIATING_AUTHENTICATED,
-
- /* the link is ok. the card associated to a BSS or linked
- * to a ibss cell or acting as an AP and creating the bss
- */
- IEEE80211_LINKED,
-
- /* same as LINKED, but the driver shall apply RX filter
- * rules as we are in NO_LINK mode. As the card is still
- * logically linked, but it is doing a syncro site survey
- * then it will be back to LINKED state.
- */
- IEEE80211_LINKED_SCANNING,
-
-};
-
-#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
-#define DEFAULT_FTS 2346
-
-#define CFG_IEEE80211_RESERVE_FCS (1<<0)
-#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
-#define CFG_IEEE80211_RTS (1<<2)
-
-#define IEEE80211_24GHZ_MIN_CHANNEL 1
-#define IEEE80211_24GHZ_MAX_CHANNEL 14
-#define IEEE80211_24GHZ_CHANNELS (IEEE80211_24GHZ_MAX_CHANNEL - \
- IEEE80211_24GHZ_MIN_CHANNEL + 1)
-
-#define IEEE80211_52GHZ_MIN_CHANNEL 34
-#define IEEE80211_52GHZ_MAX_CHANNEL 165
-#define IEEE80211_52GHZ_CHANNELS (IEEE80211_52GHZ_MAX_CHANNEL - \
- IEEE80211_52GHZ_MIN_CHANNEL + 1)
-
-
-
-typedef struct tx_pending_t{
- int frag;
- struct ieee80211_txb *txb;
-} tx_pending_t;
-
-typedef struct _bandwidth_autoswitch {
- long threshold_20Mhzto40Mhz;
- long threshold_40Mhzto20Mhz;
- bool bforced_tx20Mhz;
- bool bautoswitch_enable;
-} bandwidth_autoswitch, *pbandwidth_autoswitch;
-
-
-
-#define REORDER_WIN_SIZE 128
-#define REORDER_ENTRY_NUM 128
-typedef struct _RX_REORDER_ENTRY {
- struct list_head List;
- u16 SeqNum;
- struct ieee80211_rxb *prxb;
-} RX_REORDER_ENTRY, *PRX_REORDER_ENTRY;
-
-typedef enum _Fsync_State {
- Default_Fsync,
- HW_Fsync,
- SW_Fsync
-} Fsync_State;
-
-/* Power save mode configured. */
-typedef enum _RT_PS_MODE {
- eActive, /* Active/Continuous access. */
- eMaxPs, /* Max power save mode. */
- eFastPs /* Fast power save mode. */
-} RT_PS_MODE;
-
-typedef enum _IPS_CALLBACK_FUNCION {
- IPS_CALLBACK_NONE = 0,
- IPS_CALLBACK_MGNT_LINK_REQUEST = 1,
- IPS_CALLBACK_JOIN_REQUEST = 2,
-} IPS_CALLBACK_FUNCION;
-
-typedef enum _RT_JOIN_ACTION {
- RT_JOIN_INFRA = 1,
- RT_JOIN_IBSS = 2,
- RT_START_IBSS = 3,
- RT_NO_ACTION = 4,
-} RT_JOIN_ACTION;
-
-typedef struct _IbssParms {
- u16 atimWin;
-} IbssParms, *PIbssParms;
-#define MAX_NUM_RATES 264 /* Max num of support rates element: 8, Max num of ext. support rate: 255. 061122, by rcnjko. */
-
-/* RF state. */
-typedef enum _RT_RF_POWER_STATE {
- eRfOn,
- eRfSleep,
- eRfOff
-} RT_RF_POWER_STATE;
-
-typedef struct _RT_POWER_SAVE_CONTROL {
-
- /* Inactive Power Save(IPS) : Disable RF when disconnected */
- bool bInactivePs;
- bool bIPSModeBackup;
- bool bSwRfProcessing;
- RT_RF_POWER_STATE eInactivePowerState;
- struct work_struct InactivePsWorkItem;
- struct timer_list InactivePsTimer;
-
- /* Return point for join action */
- IPS_CALLBACK_FUNCION ReturnPoint;
-
- /* Recored Parameters for rescheduled JoinRequest */
- bool bTmpBssDesc;
- RT_JOIN_ACTION tmpJoinAction;
- struct ieee80211_network tmpBssDesc;
-
- /* Recored Parameters for rescheduled MgntLinkRequest */
- bool bTmpScanOnly;
- bool bTmpActiveScan;
- bool bTmpFilterHiddenAP;
- bool bTmpUpdateParms;
- u8 tmpSsidBuf[33];
- OCTET_STRING tmpSsid2Scan;
- bool bTmpSsid2Scan;
- u8 tmpNetworkType;
- u8 tmpChannelNumber;
- u16 tmpBcnPeriod;
- u8 tmpDtimPeriod;
- u16 tmpmCap;
- OCTET_STRING tmpSuppRateSet;
- u8 tmpSuppRateBuf[MAX_NUM_RATES];
- bool bTmpSuppRate;
- IbssParms tmpIbpm;
- bool bTmpIbpm;
-
- /* Leisre Poswer Save : Disable RF if connected but traffic is not busy */
- bool bLeisurePs;
-
-} RT_POWER_SAVE_CONTROL, *PRT_POWER_SAVE_CONTROL;
-
-typedef u32 RT_RF_CHANGE_SOURCE;
-#define RF_CHANGE_BY_SW BIT31
-#define RF_CHANGE_BY_HW BIT30
-#define RF_CHANGE_BY_PS BIT29
-#define RF_CHANGE_BY_IPS BIT28
-#define RF_CHANGE_BY_INIT 0 /* Do not change the RFOff reason. */
-
-#ifdef ENABLE_DOT11D
-typedef enum {
- COUNTRY_CODE_FCC = 0,
- COUNTRY_CODE_IC = 1,
- COUNTRY_CODE_ETSI = 2,
- COUNTRY_CODE_SPAIN = 3,
- COUNTRY_CODE_FRANCE = 4,
- COUNTRY_CODE_MKK = 5,
- COUNTRY_CODE_MKK1 = 6,
- COUNTRY_CODE_ISRAEL = 7,
- COUNTRY_CODE_TELEC,
- COUNTRY_CODE_MIC,
- COUNTRY_CODE_GLOBAL_DOMAIN
-} country_code_type_t;
-#endif
-
-#define RT_MAX_LD_SLOT_NUM 10
-typedef struct _RT_LINK_DETECT_T {
-
- u32 NumRecvBcnInPeriod;
- u32 NumRecvDataInPeriod;
-
- u32 RxBcnNum[RT_MAX_LD_SLOT_NUM]; /* number of Rx beacon / CheckForHang_period to determine link status */
- u32 RxDataNum[RT_MAX_LD_SLOT_NUM]; /* number of Rx data / CheckForHang_period to determine link status */
- u16 SlotNum; /* number of CheckForHang period to determine link status */
- u16 SlotIndex;
-
- u32 NumTxOkInPeriod;
- u32 NumRxOkInPeriod;
- bool bBusyTraffic;
-} RT_LINK_DETECT_T, *PRT_LINK_DETECT_T;
-
-
-struct ieee80211_device {
- struct net_device *dev;
- struct ieee80211_security sec;
-
- /* hw security related */
- u8 hwsec_active; /* hw security active. */
- bool is_silent_reset;
- bool ieee_up;
- bool bSupportRemoteWakeUp;
- RT_PS_MODE dot11PowerSaveMode; /* Power save mode configured. */
- bool actscanning;
- bool beinretry;
- RT_RF_POWER_STATE eRFPowerState;
- RT_RF_CHANGE_SOURCE RfOffReason;
- bool is_set_key;
- /* 11n spec related I wonder if These info structure need to be moved out of ieee80211_device */
-
- /* 11n HT below */
- PRT_HIGH_THROUGHPUT pHTInfo;
- spinlock_t bw_spinlock;
-
- spinlock_t reorder_spinlock;
- /* for HT operation rate set. we use this one for HT data rate to
- * separate different descriptors
- * the way fill this is the same as in the IE
- */
- u8 Regdot11HTOperationalRateSet[16]; /* use RATR format */
- u8 dot11HTOperationalRateSet[16]; /* use RATR format */
- u8 RegHTSuppRateSet[16];
- u8 HTCurrentOperaRate;
- u8 HTHighestOperaRate;
- /* wb added for rate operation mode to firmware */
- u8 bTxDisableRateFallBack;
- u8 bTxUseDriverAssingedRate;
- atomic_t atm_chnlop;
- atomic_t atm_swbw;
-
- /* 802.11e and WMM Traffic Stream Info (TX) */
- struct list_head Tx_TS_Admit_List;
- struct list_head Tx_TS_Pending_List;
- struct list_head Tx_TS_Unused_List;
- TX_TS_RECORD TxTsRecord[TOTAL_TS_NUM];
- /* 802.11e and WMM Traffic Stream Info (RX) */
- struct list_head Rx_TS_Admit_List;
- struct list_head Rx_TS_Pending_List;
- struct list_head Rx_TS_Unused_List;
- RX_TS_RECORD RxTsRecord[TOTAL_TS_NUM];
- RX_REORDER_ENTRY RxReorderEntry[128];
- struct list_head RxReorder_Unused_List;
- /* Qos related. */
-/* PSTA_QOS pStaQos; */
- u8 ForcedPriority; /* Force per-packet priority 1~7. (default: 0, not to force it.) */
-
-
- /* Bookkeeping structures */
- struct net_device_stats stats;
- struct ieee80211_stats ieee_stats;
- struct ieee80211_softmac_stats softmac_stats;
-
- /* Probe / Beacon management */
- struct list_head network_free_list;
- struct list_head network_list;
- struct ieee80211_network *networks;
- int scans;
- int scan_age;
-
- int iw_mode; /* operating mode (IW_MODE_*) */
- struct iw_spy_data spy_data;
-
- spinlock_t lock;
- spinlock_t wpax_suitlist_lock;
-
- int tx_headroom; /* Set to size of any additional room needed at front
- * of allocated Tx SKBs */
- u32 config;
-
- /* WEP and other encryption related settings at the device level */
- int open_wep; /* Set to 1 to allow unencrypted frames */
- int auth_mode;
- int reset_on_keychange; /* Set to 1 if the HW needs to be reset on
- * WEP key changes */
-
- /* If the host performs {en,de}cryption, then set to 1 */
- int host_encrypt;
- int host_encrypt_msdu;
- int host_decrypt;
- /* host performs multicast decryption */
- int host_mc_decrypt;
-
- /* host should strip IV and ICV from protected frames */
- /* meaningful only when hardware decryption is being used */
- int host_strip_iv_icv;
-
- int host_open_frag;
- int host_build_iv;
- int ieee802_1x; /* is IEEE 802.1X used */
-
- /* WPA data */
- bool bHalfWirelessN24GMode;
- int wpa_enabled;
- int drop_unencrypted;
- int tkip_countermeasures;
- int privacy_invoked;
- size_t wpa_ie_len;
- u8 *wpa_ie;
- u8 ap_mac_addr[6];
- u16 pairwise_key_type;
- u16 group_key_type;
- struct list_head crypt_deinit_list;
- struct ieee80211_crypt_data *crypt[WEP_KEYS];
- int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
- struct timer_list crypt_deinit_timer;
- int crypt_quiesced;
-
- int bcrx_sta_key; /* use individual keys to override default keys even
- * with RX of broad/multicast frames */
-
- /* Fragmentation structures */
- /* each streaming contain a entry */
- struct ieee80211_frag_entry frag_cache[17][IEEE80211_FRAG_CACHE_LEN];
- unsigned int frag_next_idx[17];
- u16 fts; /* Fragmentation Threshold */
-#define DEFAULT_RTS_THRESHOLD 2346U
-#define MIN_RTS_THRESHOLD 1
-#define MAX_RTS_THRESHOLD 2346U
- u16 rts; /* RTS threshold */
-
- /* Association info */
- u8 bssid[ETH_ALEN];
-
- /* This stores infos for the current network.
- * Either the network we are associated in INFRASTRUCTURE
- * or the network that we are creating in MASTER mode.
- * ad-hoc is a mixture ;-).
- * Note that in infrastructure mode, even when not associated,
- * fields bssid and essid may be valid (if wpa_set and essid_set
- * are true) as thy carry the value set by the user via iwconfig
- */
- struct ieee80211_network current_network;
-
- enum ieee80211_state state;
-
- int short_slot;
- int reg_mode;
- int mode; /* A, B, G */
- int modulation; /* CCK, OFDM */
- int freq_band; /* 2.4Ghz, 5.2Ghz, Mixed */
- int abg_true; /* ABG flag */
-
- /* used for forcing the ibss workqueue to terminate
- * without wait for the syncro scan to terminate
- */
- short sync_scan_hurryup;
-
- int perfect_rssi;
- int worst_rssi;
-
- u16 prev_seq_ctl; /* used to drop duplicate frames */
-
- /* map of allowed channels. 0 is dummy */
- /* FIXME: remeber to default to a basic channel plan depending of the PHY type */
-#ifdef ENABLE_DOT11D
- void *pDot11dInfo;
- bool bGlobalDomain;
-#else
- int channel_map[MAX_CHANNEL_NUMBER+1];
-#endif
- int rate; /* current rate */
- int basic_rate;
- /* FIXME: pleace callback, see if redundant with softmac_features */
- short active_scan;
-
- /* this contains flags for selectively enable softmac support */
- u16 softmac_features;
-
- /* if the sequence control field is not filled by HW */
- u16 seq_ctrl[5];
-
- /* association procedure transaction sequence number */
- u16 associate_seq;
-
- /* AID for RTXed association responses */
- u16 assoc_id;
-
- /* power save mode related*/
- short ps;
- short sta_sleep;
- int ps_timeout;
- int ps_period;
- struct tasklet_struct ps_task;
- u32 ps_th;
- u32 ps_tl;
-
- short raw_tx;
- /* used if IEEE_SOFTMAC_TX_QUEUE is set */
- short queue_stop;
- short scanning;
- short proto_started;
-
- struct semaphore wx_sem;
- struct semaphore scan_sem;
-
- spinlock_t mgmt_tx_lock;
- spinlock_t beacon_lock;
-
- short beacon_txing;
-
- short wap_set;
- short ssid_set;
-
- u8 wpax_type_set;
- u32 wpax_type_notify;
-
- /* QoS related flag */
- char init_wmmparam_flag;
- /* set on initialization */
- u8 qos_support;
-
- /* for discarding duplicated packets in IBSS */
- struct list_head ibss_mac_hash[IEEE_IBSS_MAC_HASH_SIZE];
-
- /* for discarding duplicated packets in BSS */
- u16 last_rxseq_num[17]; /* rx seq previous per-tid */
- u16 last_rxfrag_num[17];/* tx frag previous per-tid */
- unsigned long last_packet_time[17];
-
- /* for PS mode */
- unsigned long last_rx_ps_time;
-
- /* used if IEEE_SOFTMAC_SINGLE_QUEUE is set */
- struct sk_buff *mgmt_queue_ring[MGMT_QUEUE_NUM];
- int mgmt_queue_head;
- int mgmt_queue_tail;
-/* added for rtl819x */
-#define IEEE80211_QUEUE_LIMIT 128
- u8 AsocRetryCount;
- unsigned int hw_header;
- struct sk_buff_head skb_waitQ[MAX_QUEUE_SIZE];
- struct sk_buff_head skb_aggQ[MAX_QUEUE_SIZE];
- struct sk_buff_head skb_drv_aggQ[MAX_QUEUE_SIZE];
- u32 sta_edca_param[4];
- bool aggregation;
- /* Enable/Disable Rx immediate BA capability. */
- bool enable_rx_imm_BA;
- bool bibsscoordinator;
-
- /*+by amy for DM ,080515 */
- /* Dynamic Tx power for near/far range enable/Disable */
- bool bdynamic_txpower_enable;
-
- bool bCTSToSelfEnable;
- u8 CTSToSelfTH;
-
- u32 fsync_time_interval;
- u32 fsync_rate_bitmap;
- u8 fsync_rssi_threshold;
- bool bfsync_enable;
-
- u8 fsync_multiple_timeinterval; /* FsyncMultipleTimeInterval * FsyncTimeInterval */
- u32 fsync_firstdiff_ratethreshold; /* low threshold */
- u32 fsync_seconddiff_ratethreshold; /* decrease threshold */
- Fsync_State fsync_state;
- bool bis_any_nonbepkts;
- /* 20Mhz 40Mhz AutoSwitch Threshold */
- bandwidth_autoswitch bandwidth_auto_switch;
- /* for txpower tracking */
- bool FwRWRF;
-
- /* added by amy for AP roaming */
- RT_LINK_DETECT_T LinkDetectInfo;
- /* added by amy for ps */
- RT_POWER_SAVE_CONTROL PowerSaveControl;
- /* used if IEEE_SOFTMAC_TX_QUEUE is set */
- struct tx_pending_t tx_pending;
-
- /* used if IEEE_SOFTMAC_ASSOCIATE is set */
- struct timer_list associate_timer;
-
- /* used if IEEE_SOFTMAC_BEACONS is set */
- struct timer_list beacon_timer;
- struct work_struct associate_complete_wq;
- struct work_struct associate_procedure_wq;
- struct delayed_work softmac_scan_wq;
- struct delayed_work associate_retry_wq;
- struct delayed_work start_ibss_wq;
- struct work_struct wx_sync_scan_wq;
- struct workqueue_struct *wq;
- /* Qos related. Added by Annie, 2005-11-01. */
- /* STA_QOS StaQos; */
-
-
- /* Callback functions */
- void (*set_security)(struct net_device *dev,
- struct ieee80211_security *sec);
-
- /* Used to TX data frame by using txb structs.
- * this is not used if in the softmac_features
- * is set the flag IEEE_SOFTMAC_TX_QUEUE
- */
- int (*hard_start_xmit)(struct ieee80211_txb *txb,
- struct net_device *dev);
-
- int (*reset_port)(struct net_device *dev);
- int (*is_queue_full) (struct net_device *dev, int pri);
-
- int (*handle_management) (struct net_device *dev,
- struct ieee80211_network *network, u16 type);
- int (*is_qos_active) (struct net_device *dev, struct sk_buff *skb);
-
- /* Softmac-generated frames (mamagement) are TXed via this
- * callback if the flag IEEE_SOFTMAC_SINGLE_QUEUE is
- * not set. As some cards may have different HW queues that
- * one might want to use for data and management frames
- * the option to have two callbacks might be useful.
- * This fucntion can't sleep.
- */
- int (*softmac_hard_start_xmit)(struct sk_buff *skb,
- struct net_device *dev);
-
- /* used instead of hard_start_xmit (not softmac_hard_start_xmit)
- * if the IEEE_SOFTMAC_TX_QUEUE feature is used to TX data
- * frames. I the option IEEE_SOFTMAC_SINGLE_QUEUE is also set
- * then also management frames are sent via this callback.
- * This function can't sleep.
- */
- void (*softmac_data_hard_start_xmit)(struct sk_buff *skb,
- struct net_device *dev, int rate);
-
- /* stops the HW queue for DATA frames. Useful to avoid
- * waste time to TX data frame when we are reassociating
- * This function can sleep.
- */
- void (*data_hard_stop)(struct net_device *dev);
-
- /* OK this is complementar to data_poll_hard_stop */
- void (*data_hard_resume)(struct net_device *dev);
-
- /* ask to the driver to retune the radio .
- * This function can sleep. the driver should ensure
- * the radio has been swithced before return.
- */
- void (*set_chan)(struct net_device *dev, short ch);
-
- /* These are not used if the ieee stack takes care of
- * scanning (IEEE_SOFTMAC_SCAN feature set).
- * In this case only the set_chan is used.
- *
- * The syncro version is similar to the start_scan but
- * does not return until all channels has been scanned.
- * this is called in user context and should sleep,
- * it is called in a work_queue when swithcing to ad-hoc mode
- * or in behalf of iwlist scan when the card is associated
- * and root user ask for a scan.
- * the fucntion stop_scan should stop both the syncro and
- * background scanning and can sleep.
- * The fucntion start_scan should initiate the background
- * scanning and can't sleep.
- */
- void (*scan_syncro)(struct net_device *dev);
- void (*start_scan)(struct net_device *dev);
- void (*stop_scan)(struct net_device *dev);
-
- /* indicate the driver that the link state is changed
- * for example it may indicate the card is associated now.
- * Driver might be interested in this to apply RX filter
- * rules or simply light the LINK led
- */
- void (*link_change)(struct net_device *dev);
-
- /* these two function indicates to the HW when to start
- * and stop to send beacons. This is used when the
- * IEEE_SOFTMAC_BEACONS is not set. For now the
- * stop_send_bacons is NOT guaranteed to be called only
- * after start_send_beacons.
- */
- void (*start_send_beacons) (struct net_device *dev, u16 tx_rate);
- void (*stop_send_beacons) (struct net_device *dev);
-
- /* power save mode related */
- void (*sta_wake_up) (struct net_device *dev);
- void (*ps_request_tx_ack) (struct net_device *dev);
- void (*enter_sleep_state) (struct net_device *dev, u32 th, u32 tl);
- short (*ps_is_queue_empty) (struct net_device *dev);
- int (*handle_beacon) (struct net_device *dev, struct ieee80211_beacon *beacon, struct ieee80211_network *network);
- int (*handle_assoc_response) (struct net_device *dev, struct ieee80211_assoc_response_frame *resp, struct ieee80211_network *network);
-
-
- /* check whether Tx hw resouce available */
- short (*check_nic_enough_desc)(struct net_device *dev, int queue_index);
- /* added by wb for HT related */
- void (*SetBWModeHandler)(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
- bool (*GetNmodeSupportBySecCfg)(struct net_device *dev);
- void (*SetWirelessMode)(struct net_device *dev, u8 wireless_mode);
- bool (*GetHalfNmodeSupportByAPsHandler)(struct net_device *dev);
- void (*InitialGainHandler)(struct net_device *dev, u8 Operation);
-
- /* This must be the last item so that it points to the data
- * allocated beyond this structure by alloc_ieee80211 */
- u8 priv[0];
-};
-
-#define IEEE_A (1<<0)
-#define IEEE_B (1<<1)
-#define IEEE_G (1<<2)
-#define IEEE_N_24G (1<<4)
-#define IEEE_N_5G (1<<5)
-#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
-
-/* Generate a 802.11 header */
-
-/* Uses the channel change callback directly
- * instead of [start/stop] scan callbacks
- */
-#define IEEE_SOFTMAC_SCAN (1<<2)
-
-/* Perform authentication and association handshake */
-#define IEEE_SOFTMAC_ASSOCIATE (1<<3)
-
-/* Generate probe requests */
-#define IEEE_SOFTMAC_PROBERQ (1<<4)
-
-/* Generate respones to probe requests */
-#define IEEE_SOFTMAC_PROBERS (1<<5)
-
-/* The ieee802.11 stack will manages the netif queue
- * wake/stop for the driver, taking care of 802.11
- * fragmentation. See softmac.c for details. */
-#define IEEE_SOFTMAC_TX_QUEUE (1<<7)
-
-/* Uses only the softmac_data_hard_start_xmit
- * even for TX management frames.
- */
-#define IEEE_SOFTMAC_SINGLE_QUEUE (1<<8)
-
-/* Generate beacons. The stack will enqueue beacons
- * to the card
- */
-#define IEEE_SOFTMAC_BEACONS (1<<6)
-
-static inline void *ieee80211_priv(struct net_device *dev)
-{
- return ((struct ieee80211_device *)netdev_priv(dev))->priv;
-}
-
-extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
-{
- /* Single white space is for Linksys APs */
- if (essid_len == 1 && essid[0] == ' ')
- return 1;
-
- /* Otherwise, if the entire essid is 0, we assume it is hidden */
- while (essid_len) {
- essid_len--;
- if (essid[essid_len] != '\0')
- return 0;
- }
-
- return 1;
-}
-
-extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode)
-{
- /*
- * It is possible for both access points and our device to support
- * combinations of modes, so as long as there is one valid combination
- * of ap/device supported modes, then return success
- *
- */
- if ((mode & IEEE_A) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION) &&
- (ieee->freq_band & IEEE80211_52GHZ_BAND))
- return 1;
-
- if ((mode & IEEE_G) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION) &&
- (ieee->freq_band & IEEE80211_24GHZ_BAND))
- return 1;
-
- if ((mode & IEEE_B) &&
- (ieee->modulation & IEEE80211_CCK_MODULATION) &&
- (ieee->freq_band & IEEE80211_24GHZ_BAND))
- return 1;
-
- return 0;
-}
-
-extern inline int ieee80211_get_hdrlen(u16 fc)
-{
- int hdrlen = IEEE80211_3ADDR_LEN;
-
- switch (WLAN_FC_GET_TYPE(fc)) {
- case IEEE80211_FTYPE_DATA:
- if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
- hdrlen = IEEE80211_4ADDR_LEN; /* Addr4 */
- if (IEEE80211_QOS_HAS_SEQ(fc))
- hdrlen += 2; /* QOS ctrl*/
- break;
- case IEEE80211_FTYPE_CTL:
- switch (WLAN_FC_GET_STYPE(fc)) {
- case IEEE80211_STYPE_CTS:
- case IEEE80211_STYPE_ACK:
- hdrlen = IEEE80211_1ADDR_LEN;
- break;
- default:
- hdrlen = IEEE80211_2ADDR_LEN;
- break;
- }
- break;
- }
-
- return hdrlen;
-}
-
-static inline u8 *ieee80211_get_payload(struct ieee80211_hdr *hdr)
-{
- switch (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl))) {
- case IEEE80211_1ADDR_LEN:
- return ((struct ieee80211_hdr_1addr *)hdr)->payload;
- case IEEE80211_2ADDR_LEN:
- return ((struct ieee80211_hdr_2addr *)hdr)->payload;
- case IEEE80211_3ADDR_LEN:
- return ((struct ieee80211_hdr_3addr *)hdr)->payload;
- case IEEE80211_4ADDR_LEN:
- return ((struct ieee80211_hdr_4addr *)hdr)->payload;
- }
- return NULL;
-}
-
-static inline int ieee80211_is_ofdm_rate(u8 rate)
-{
- switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
- case IEEE80211_OFDM_RATE_6MB:
- case IEEE80211_OFDM_RATE_9MB:
- case IEEE80211_OFDM_RATE_12MB:
- case IEEE80211_OFDM_RATE_18MB:
- case IEEE80211_OFDM_RATE_24MB:
- case IEEE80211_OFDM_RATE_36MB:
- case IEEE80211_OFDM_RATE_48MB:
- case IEEE80211_OFDM_RATE_54MB:
- return 1;
- }
- return 0;
-}
-
-static inline int ieee80211_is_cck_rate(u8 rate)
-{
- switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
- case IEEE80211_CCK_RATE_1MB:
- case IEEE80211_CCK_RATE_2MB:
- case IEEE80211_CCK_RATE_5MB:
- case IEEE80211_CCK_RATE_11MB:
- return 1;
- }
- return 0;
-}
-
-
-/* ieee80211.c */
-extern void free_ieee80211(struct net_device *dev);
-extern struct net_device *alloc_ieee80211(int sizeof_priv);
-
-extern int ieee80211_set_encryption(struct ieee80211_device *ieee);
-
-/* ieee80211_tx.c */
-
-extern int ieee80211_encrypt_fragment(
- struct ieee80211_device *ieee,
- struct sk_buff *frag,
- int hdr_len);
-
-extern int ieee80211_xmit(struct sk_buff *skb,
- struct net_device *dev);
-extern void ieee80211_txb_free(struct ieee80211_txb *);
-
-
-/* ieee80211_rx.c */
-extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats);
-extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
- struct ieee80211_hdr_4addr *header,
- struct ieee80211_rx_stats *stats);
-
-/* ieee80211_wx.c */
-extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-#if WIRELESS_EXT >= 18
-extern int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- struct iw_param *data, char *extra);
-extern int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-#endif
-extern int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len);
-
-/* ieee80211_softmac.c */
-extern short ieee80211_is_54g(struct ieee80211_network net);
-extern short ieee80211_is_shortslot(struct ieee80211_network net);
-extern int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats, u16 type,
- u16 stype);
-extern void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net);
-
-void SendDisassociation(struct ieee80211_device *ieee, u8* asSta, u8 asRsn);
-extern void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee);
-
-extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
-extern void notify_wx_assoc_event(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee);
-extern void ieee80211_start_bss(struct ieee80211_device *ieee);
-extern void ieee80211_start_master_bss(struct ieee80211_device *ieee);
-extern void ieee80211_start_ibss(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_init(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_free(struct ieee80211_device *ieee);
-extern void ieee80211_associate_abort(struct ieee80211_device *ieee);
-extern void ieee80211_disassociate(struct ieee80211_device *ieee);
-extern void ieee80211_stop_scan(struct ieee80211_device *ieee);
-extern void ieee80211_start_scan_syncro(struct ieee80211_device *ieee);
-extern void ieee80211_check_all_nets(struct ieee80211_device *ieee);
-extern void ieee80211_start_protocol(struct ieee80211_device *ieee);
-extern void ieee80211_stop_protocol(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
-extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
-extern void ieee80211_wake_queue(struct ieee80211_device *ieee);
-extern void ieee80211_stop_queue(struct ieee80211_device *ieee);
-extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
-extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
-extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
-extern int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_point *p);
-extern void notify_wx_assoc_event(struct ieee80211_device *ieee);
-extern void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
-
-extern void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee);
-
-/* ieee80211_crypt_ccmp&tkip&wep.c */
-extern void ieee80211_tkip_null(void);
-extern void ieee80211_wep_null(void);
-extern void ieee80211_ccmp_null(void);
-
-/* ieee80211_softmac_wx.c */
-
-extern int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *ext);
-
-extern int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra);
-
-extern int ieee80211_wx_get_essid(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-extern int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-extern int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-extern int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-extern int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-extern int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra);
-
-extern int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-extern int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-extern int ieee80211_wx_get_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-
-extern void ieee80211_wx_sync_scan_wq(struct work_struct *work);
-
-
-extern int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-extern int ieee80211_wx_get_name(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-extern int ieee80211_wx_set_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-extern int ieee80211_wx_get_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-extern int ieee80211_wx_set_rts(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-extern int ieee80211_wx_get_rts(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-/* HT */
-#define MAX_RECEIVE_BUFFER_SIZE 9100
-extern void HTDebugHTCapability(u8 *CapIE, u8 *TitleString);
-extern void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString);
-
-void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
-extern void HTUpdateDefaultSetting(struct ieee80211_device *ieee);
-extern void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u8 *len, u8 isEncrypt);
-extern void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo, u8 *len, u8 isEncrypt);
-extern void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg, u8 *len);
-extern void HTOnAssocRsp(struct ieee80211_device *ieee);
-extern void HTInitializeHTInfo(struct ieee80211_device *ieee);
-extern void HTInitializeBssDesc(PBSS_HT pBssHT);
-extern void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork);
-extern void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork);
-extern u8 HTGetHighestMCSRate(struct ieee80211_device *ieee, u8 *pMCSRateSet, u8 *pMCSFilter);
-extern u8 MCS_FILTER_ALL[];
-extern u16 MCS_DATA_RATE[2][2][77] ;
-extern u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame);
-extern void HTResetIOTSetting(PRT_HIGH_THROUGHPUT pHTInfo);
-extern bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee);
-extern u16 HTHalfMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate);
-extern u16 HTMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate);
-extern u16 TxCountToDataRate(struct ieee80211_device *ieee, u8 nDataRate);
-/* function in BAPROC.c */
-extern int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb);
-extern int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb);
-extern int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb);
-extern void TsInitAddBA(struct ieee80211_device *ieee, PTX_TS_RECORD pTS, u8 Policy, u8 bOverwritePending);
-extern void TsInitDelBA(struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect);
-extern void BaSetupTimeOut(unsigned long data);
-extern void TxBaInactTimeout(unsigned long data);
-extern void RxBaInactTimeout(unsigned long data);
-extern void ResetBaEntry(PBA_RECORD pBA);
-/* function in TS.c */
-extern bool GetTs(
- struct ieee80211_device *ieee,
- PTS_COMMON_INFO *ppTS,
- u8 *Addr,
- u8 TID,
- TR_SELECT TxRxSelect, /* Rx:1, Tx:0 */
- bool bAddNewTs
- );
-extern void TSInitialize(struct ieee80211_device *ieee);
-extern void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTS);
-extern void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr);
-extern void RemoveAllTS(struct ieee80211_device *ieee);
-void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee);
-
-extern const long ieee80211_wlan_frequencies[];
-
-extern inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
-{
- ieee->scans++;
-}
-
-extern inline int ieee80211_get_scans(struct ieee80211_device *ieee)
-{
- return ieee->scans;
-}
-
-static inline const char *escape_essid(const char *essid, u8 essid_len)
-{
- static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
- const char *s = essid;
- char *d = escaped;
-
- if (ieee80211_is_empty_essid(essid, essid_len)) {
- memcpy(escaped, "<hidden>", sizeof("<hidden>"));
- return escaped;
- }
-
- essid_len = min(essid_len, (u8)IW_ESSID_MAX_SIZE);
- while (essid_len--) {
- if (*s == '\0') {
- *d++ = '\\';
- *d++ = '0';
- s++;
- } else {
- *d++ = *s++;
- }
- }
- *d = '\0';
- return escaped;
-}
-
-/* For the function is more related to hardware setting, it's better to use the
- * ieee handler to refer to it.
- */
-extern short check_nic_enough_desc(struct net_device *dev, int queue_index);
-extern int ieee80211_data_xmit(struct sk_buff *skb, struct net_device *dev);
-extern int ieee80211_parse_info_param(struct ieee80211_device *ieee,
- struct ieee80211_info_element *info_element,
- u16 length,
- struct ieee80211_network *network,
- struct ieee80211_rx_stats *stats);
-
-void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb **prxbIndicateArray, u8 index);
-#define RT_ASOC_RETRY_LIMIT 5
-#endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index fb78ed2876e..d6f55c290db 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -85,7 +85,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
}
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
- for(i=0; i<(sizeof(ieee80211_modes)/sizeof(ieee80211_modes[0])); i++) {
+ for(i=0; i<ARRAY_SIZE(ieee80211_modes); i++) {
if(network->mode&(1<<i)) {
sprintf(pname,ieee80211_modes[i].mode_string,ieee80211_modes[i].mode_size);
pname +=ieee80211_modes[i].mode_size;
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index 69a2721e850..6206f929a65 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -39,7 +39,7 @@
#include <linux/random.h>
#include <linux/version.h>
#include <asm/io.h>
-#include "ieee80211.h"
+#include "ieee80211/ieee80211.h"
#define RTL8192U
#define RTL819xU_MODULE_NAME "rtl819xU"
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index f38472c2e75..1ff7850cc1e 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -410,14 +410,12 @@ u16 read_nic_word(struct net_device *dev, int indx)
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- (indx&0xff)|0xff00, (indx>>8)&0x0f, &data, 2, HZ / 2);
+ RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
+ (indx&0xff)|0xff00, (indx>>8)&0x0f,
+ &data, 2, HZ / 2);
if (status < 0)
- {
printk("read_nic_word TimeOut! status:%d\n", status);
- }
-
return data;
}
@@ -431,13 +429,10 @@ u16 read_nic_word_E(struct net_device *dev, int indx)
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- indx|0xfe00, 0, &data, 2, HZ / 2);
+ indx|0xfe00, 0, &data, 2, HZ / 2);
if (status < 0)
- {
printk("read_nic_word TimeOut! status:%d\n", status);
- }
-
return data;
}
@@ -446,31 +441,29 @@ u32 read_nic_dword(struct net_device *dev, int indx)
{
u32 data;
int status;
-// int result;
+ /* int result; */
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- (indx&0xff)|0xff00, (indx>>8)&0x0f, &data, 4, HZ / 2);
-// if(0 != result) {
-// printk(KERN_WARNING "read size of data = %d\, date = %d\n", result, data);
-// }
+ RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
+ (indx&0xff)|0xff00, (indx>>8)&0x0f,
+ &data, 4, HZ / 2);
+ /* if(0 != result) {
+ * printk(KERN_WARNING "read size of data = %d\, date = %d\n",
+ * result, data);
+ * }
+ */
if (status < 0)
- {
printk("read_nic_dword TimeOut! status:%d\n", status);
- }
-
-
return data;
}
-
-//u8 read_phy_cck(struct net_device *dev, u8 adr);
-//u8 read_phy_ofdm(struct net_device *dev, u8 adr);
+/* u8 read_phy_cck(struct net_device *dev, u8 adr); */
+/* u8 read_phy_ofdm(struct net_device *dev, u8 adr); */
/* this might still called in what was the PHY rtl8185/rtl8192 common code
* plans are to possibilty turn it again in one common code...
*/
@@ -478,26 +471,22 @@ inline void force_pci_posting(struct net_device *dev)
{
}
-
static struct net_device_stats *rtl8192_stats(struct net_device *dev);
void rtl8192_commit(struct net_device *dev);
-//void rtl8192_restart(struct net_device *dev);
+/* void rtl8192_restart(struct net_device *dev); */
void rtl8192_restart(struct work_struct *work);
-//void rtl8192_rq_tx_ack(struct work_struct *work);
-
+/* void rtl8192_rq_tx_ack(struct work_struct *work); */
void watch_dog_timer_callback(unsigned long data);
/****************************************************************************
- -----------------------------PROCFS STUFF-------------------------
-*****************************************************************************/
-
-static struct proc_dir_entry *rtl8192_proc = NULL;
-
+ * -----------------------------PROCFS STUFF-------------------------
+*****************************************************************************
+ */
+static struct proc_dir_entry *rtl8192_proc;
-static int proc_get_stats_ap(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
+static int proc_get_stats_ap(char *page, char **start, off_t offset, int count,
+ int *eof, void *data)
{
struct net_device *dev = data;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -508,18 +497,12 @@ static int proc_get_stats_ap(char *page, char **start,
list_for_each_entry(target, &ieee->network_list, list) {
- len += snprintf(page + len, count - len,
- "%s ", target->ssid);
-
- if(target->wpa_ie_len>0 || target->rsn_ie_len>0){
- len += snprintf(page + len, count - len,
- "WPA\n");
- }
- else{
- len += snprintf(page + len, count - len,
- "non_WPA\n");
- }
+ len += snprintf(page + len, count - len, "%s ", target->ssid);
+ if (target->wpa_ie_len > 0 || target->rsn_ie_len > 0)
+ len += snprintf(page + len, count - len, "WPA\n");
+ else
+ len += snprintf(page + len, count - len, "non_WPA\n");
}
*eof = 1;
diff --git a/drivers/staging/rtl8192u/r8192U_wx.h b/drivers/staging/rtl8192u/r8192U_wx.h
index b2f7a571b1c..f4cf2801136 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.h
+++ b/drivers/staging/rtl8192u/r8192U_wx.h
@@ -15,7 +15,7 @@
#ifndef R8180_WX_H
#define R8180_WX_H
//#include <linux/wireless.h>
-//#include "ieee80211.h"
+
extern struct iw_handler_def r8192_wx_handlers_def;
/* Enable the rtl819x_core.c to share this function, david 2008.9.22 */
extern struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev);
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index 3cc2d571f9b..b136ee48828 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -11,7 +11,7 @@
* NDIS_STATUS_FAILURE - the following initialization process should be terminated
* NDIS_STATUS_SUCCESS - if firmware initialization process success
**************************************************************************************************/
-//#include "ieee80211.h"
+
#include "r8192U.h"
#include "r8192U_hw.h"
#include "r819xU_firmware_img.h"
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
deleted file mode 100644
index 0a9c39c7f2b..00000000000
--- a/drivers/staging/sep/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config DX_SEP
- tristate "Discretix SEP driver"
-# depends on MRST
- depends on RAR_REGISTER && PCI
- default y
- help
- Discretix SEP driver
-
- If unsure say M. The compiled module will be
- called sep_driver.ko
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
deleted file mode 100644
index 628d5f91941..00000000000
--- a/drivers/staging/sep/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_DX_SEP) := sep_driver.o
-
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
deleted file mode 100644
index ff0e931dab6..00000000000
--- a/drivers/staging/sep/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-Todo's so far (from Alan Cox)
-- Fix firmware loading
-- Get firmware into firmware git tree
-- Review and tidy each algorithm function
-- Check whether it can be plugged into any of the kernel crypto API
- interfaces
-- Do something about the magic shared memory interface and replace it
- with something saner (in Linux terms)
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
deleted file mode 100644
index 9200524bb64..00000000000
--- a/drivers/staging/sep/sep_dev.h
+++ /dev/null
@@ -1,110 +0,0 @@
-#ifndef __SEP_DEV_H__
-#define __SEP_DEV_H__
-
-/*
- *
- * sep_dev.h - Security Processor Device Structures
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Alan Cox alan@linux.intel.com
- *
- */
-
-struct sep_device {
- /* pointer to pci dev */
- struct pci_dev *pdev;
-
- unsigned long in_use;
-
- /* address of the shared memory allocated during init for SEP driver
- (coherent alloc) */
- void *shared_addr;
- /* the physical address of the shared area */
- dma_addr_t shared_bus;
-
- /* restricted access region (coherent alloc) */
- dma_addr_t rar_bus;
- void *rar_addr;
- /* firmware regions: cache is at rar_addr */
- unsigned long cache_size;
-
- /* follows the cache */
- dma_addr_t resident_bus;
- unsigned long resident_size;
- void *resident_addr;
-
- /* start address of the access to the SEP registers from driver */
- void __iomem *reg_addr;
- /* transaction counter that coordinates the transactions between SEP and HOST */
- unsigned long send_ct;
- /* counter for the messages from sep */
- unsigned long reply_ct;
- /* counter for the number of bytes allocated in the pool for the current
- transaction */
- unsigned long data_pool_bytes_allocated;
-
- /* array of pointers to the pages that represent input data for the synchronic
- DMA action */
- struct page **in_page_array;
-
- /* array of pointers to the pages that represent out data for the synchronic
- DMA action */
- struct page **out_page_array;
-
- /* number of pages in the sep_in_page_array */
- unsigned long in_num_pages;
-
- /* number of pages in the sep_out_page_array */
- unsigned long out_num_pages;
-
- /* global data for every flow */
- struct sep_flow_context_t flows[SEP_DRIVER_NUM_FLOWS];
-
- /* pointer to the workqueue that handles the flow done interrupts */
- struct workqueue_struct *flow_wq;
-
-};
-
-static struct sep_device *sep_dev;
-
-static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
-{
- void __iomem *addr = dev->reg_addr + reg;
- writel(value, addr);
-}
-
-static inline u32 sep_read_reg(struct sep_device *dev, int reg)
-{
- void __iomem *addr = dev->reg_addr + reg;
- return readl(addr);
-}
-
-/* wait for SRAM write complete(indirect write */
-static inline void sep_wait_sram_write(struct sep_device *dev)
-{
- u32 reg_val;
- do
- reg_val = sep_read_reg(dev, HW_SRAM_DATA_READY_REG_ADDR);
- while (!(reg_val & 1));
-}
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644
index ecbde3467b1..00000000000
--- a/drivers/staging/sep/sep_driver.c
+++ /dev/null
@@ -1,2742 +0,0 @@
-/*
- *
- * sep_driver.c - Security Processor Driver main group of functions
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/kdev_t.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/poll.h>
-#include <linux/wait.h>
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <asm/ioctl.h>
-#include <linux/ioport.h>
-#include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <asm/cacheflush.h>
-#include "sep_driver_hw_defs.h"
-#include "sep_driver_config.h"
-#include "sep_driver_api.h"
-#include "sep_dev.h"
-
-#if SEP_DRIVER_ARM_DEBUG_MODE
-
-#define CRYS_SEP_ROM_length 0x4000
-#define CRYS_SEP_ROM_start_address 0x8000C000UL
-#define CRYS_SEP_ROM_start_address_offset 0xC000UL
-#define SEP_ROM_BANK_register 0x80008420UL
-#define SEP_ROM_BANK_register_offset 0x8420UL
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
-
-/*
- * THESE 2 definitions are specific to the board - must be
- * defined during integration
- */
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
-
-/* 2M size */
-
-static void sep_load_rom_code(struct sep_device *sep)
-{
- /* Index variables */
- unsigned long i, k, j;
- u32 reg;
- u32 error;
- u32 warning;
-
- /* Loading ROM from SEP_ROM_image.h file */
- k = sizeof(CRYS_SEP_ROM);
-
- edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
-
- edbg("SEP Driver: k is %lu\n", k);
- edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
- edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
-
- for (i = 0; i < 4; i++) {
- /* write bank */
- sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
-
- for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
- sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
-
- k = k - 4;
-
- if (k == 0) {
- j = CRYS_SEP_ROM_length;
- i = 4;
- }
- }
- }
-
- /* reset the SEP */
- sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
-
- /* poll for SEP ROM boot finish */
- do
- reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!reg);
-
- edbg("SEP Driver: ROM polling ended\n");
-
- switch (reg) {
- case 0x1:
- /* fatal error - read erro status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- edbg("SEP Driver: ROM polling case 1\n");
- break;
- case 0x4:
- /* Cold boot ended successfully */
- case 0x8:
- /* Warmboot ended successfully */
- case 0x10:
- /* ColdWarm boot ended successfully */
- error = 0;
- case 0x2:
- /* Boot First Phase ended */
- warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- case 0x20:
- edbg("SEP Driver: ROM polling case %d\n", reg);
- break;
- }
-
-}
-
-#else
-static void sep_load_rom_code(struct sep_device *sep) { }
-#endif /* SEP_DRIVER_ARM_DEBUG_MODE */
-
-
-
-/*----------------------------------------
- DEFINES
------------------------------------------*/
-
-#define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
-#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
-
-/*--------------------------------------------
- GLOBAL variables
---------------------------------------------*/
-
-/* debug messages level */
-static int debug;
-module_param(debug, int , 0);
-MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
-
-/* Keep this a single static object for now to keep the conversion easy */
-
-static struct sep_device sep_instance;
-static struct sep_device *sep_dev = &sep_instance;
-
-/*
- mutex for the access to the internals of the sep driver
-*/
-static DEFINE_MUTEX(sep_mutex);
-
-
-/* wait queue head (event) of the driver */
-static DECLARE_WAIT_QUEUE_HEAD(sep_event);
-
-/**
- * sep_load_firmware - copy firmware cache/resident
- * @sep: device we are loading
- *
- * This functions copies the cache and resident from their source
- * location into destination shared memory.
- */
-
-static int sep_load_firmware(struct sep_device *sep)
-{
- const struct firmware *fw;
- char *cache_name = "sep/cache.image.bin";
- char *res_name = "sep/resident.image.bin";
- int error;
-
- edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
- edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
-
- /* load cache */
- error = request_firmware(&fw, cache_name, &sep->pdev->dev);
- if (error) {
- edbg("SEP Driver:cant request cache fw\n");
- return error;
- }
- edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
-
- memcpy(sep->rar_addr, (void *)fw->data, fw->size);
- sep->cache_size = fw->size;
- release_firmware(fw);
-
- sep->resident_bus = sep->rar_bus + sep->cache_size;
- sep->resident_addr = sep->rar_addr + sep->cache_size;
-
- /* load resident */
- error = request_firmware(&fw, res_name, &sep->pdev->dev);
- if (error) {
- edbg("SEP Driver:cant request res fw\n");
- return error;
- }
- edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
-
- memcpy(sep->resident_addr, (void *) fw->data, fw->size);
- sep->resident_size = fw->size;
- release_firmware(fw);
-
- edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
- sep->resident_addr, (unsigned long long)sep->resident_bus,
- sep->rar_addr, (unsigned long long)sep->rar_bus);
- return 0;
-}
-
-MODULE_FIRMWARE("sep/cache.image.bin");
-MODULE_FIRMWARE("sep/resident.image.bin");
-
-/**
- * sep_map_and_alloc_shared_area - allocate shared block
- * @sep: security processor
- * @size: size of shared area
- *
- * Allocate a shared buffer in host memory that can be used by both the
- * kernel and also the hardware interface via DMA.
- */
-
-static int sep_map_and_alloc_shared_area(struct sep_device *sep,
- unsigned long size)
-{
- /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
- sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
- &sep->shared_bus, GFP_KERNEL);
-
- if (!sep->shared_addr) {
- edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
- return -ENOMEM;
- }
- /* set the bus address of the shared area */
- edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
- size, sep->shared_addr, (unsigned long long)sep->shared_bus);
- return 0;
-}
-
-/**
- * sep_unmap_and_free_shared_area - free shared block
- * @sep: security processor
- *
- * Free the shared area allocated to the security processor. The
- * processor must have finished with this and any final posted
- * writes cleared before we do so.
- */
-static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
-{
- dma_free_coherent(&sep->pdev->dev, size,
- sep->shared_addr, sep->shared_bus);
-}
-
-/**
- * sep_shared_virt_to_bus - convert bus/virt addresses
- *
- * Returns the bus address inside the shared area according
- * to the virtual address.
- */
-
-static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
- void *virt_address)
-{
- dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
- edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa,
- virt_address);
- return pa;
-}
-
-/**
- * sep_shared_bus_to_virt - convert bus/virt addresses
- *
- * Returns virtual address inside the shared area according
- * to the bus address.
- */
-
-static void *sep_shared_bus_to_virt(struct sep_device *sep,
- dma_addr_t bus_address)
-{
- return sep->shared_addr + (bus_address - sep->shared_bus);
-}
-
-
-/**
- * sep_try_open - attempt to open a SEP device
- * @sep: device to attempt to open
- *
- * Atomically attempt to get ownership of a SEP device.
- * Returns 1 if the device was opened, 0 on failure.
- */
-
-static int sep_try_open(struct sep_device *sep)
-{
- if (!test_and_set_bit(0, &sep->in_use))
- return 1;
- return 0;
-}
-
-/**
- * sep_open - device open method
- * @inode: inode of sep device
- * @filp: file handle to sep device
- *
- * Open method for the SEP device. Called when userspace opens
- * the SEP device node. Must also release the memory data pool
- * allocations.
- *
- * Returns zero on success otherwise an error code.
- */
-
-static int sep_open(struct inode *inode, struct file *filp)
-{
- if (sep_dev == NULL)
- return -ENODEV;
-
- /* check the blocking mode */
- if (filp->f_flags & O_NDELAY) {
- if (sep_try_open(sep_dev) == 0)
- return -EAGAIN;
- } else
- if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
- return -EINTR;
-
- /* Bind to the device, we only have one which makes it easy */
- filp->private_data = sep_dev;
- /* release data pool allocations */
- sep_dev->data_pool_bytes_allocated = 0;
- return 0;
-}
-
-
-/**
- * sep_release - close a SEP device
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP device. As the open protects against
- * multiple simultaenous opens that means this method is called when the
- * final reference to the open handle is dropped.
- */
-
-static int sep_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-#if 0 /*!SEP_DRIVER_POLLING_MODE */
- /* close IMR */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
- /* release IRQ line */
- free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
-#endif
- /* Ensure any blocked open progresses */
- clear_bit(0, &sep->in_use);
- wake_up(&sep_event);
- return 0;
-}
-
-/*---------------------------------------------------------------
- map function - this functions maps the message shared area
------------------------------------------------------------------*/
-static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- dma_addr_t bus_addr;
- struct sep_device *sep = filp->private_data;
-
- dbg("-------->SEP Driver: mmap start\n");
-
- /* check that the size of the mapped range is as the size of the message
- shared area */
- if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
- edbg("SEP Driver mmap requested size is more than allowed\n");
- printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n");
- printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
- printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
- return -EAGAIN;
- }
-
- edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
- /* get bus address */
- bus_addr = sep->shared_bus;
-
- edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
-
- if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
- edbg("SEP Driver remap_page_range failed\n");
- printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
- return -EAGAIN;
- }
-
- dbg("SEP Driver:<-------- mmap end\n");
-
- return 0;
-}
-
-
-/*-----------------------------------------------
- poll function
-*----------------------------------------------*/
-static unsigned int sep_poll(struct file *filp, poll_table * wait)
-{
- unsigned long count;
- unsigned int mask = 0;
- unsigned long retval = 0; /* flow id */
- struct sep_device *sep = filp->private_data;
-
- dbg("---------->SEP Driver poll: start\n");
-
-
-#if SEP_DRIVER_POLLING_MODE
-
- while (sep->send_ct != (retval & 0x7FFFFFFF)) {
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
-
- for (count = 0; count < 10 * 4; count += 4)
- edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
- }
-
- sep->reply_ct++;
-#else
- /* add the event to the polling wait table */
- poll_wait(filp, &sep_event, wait);
-
-#endif
-
- edbg("sep->send_ct is %lu\n", sep->send_ct);
- edbg("sep->reply_ct is %lu\n", sep->reply_ct);
-
- /* check if the data is ready */
- if (sep->send_ct == sep->reply_ct) {
- for (count = 0; count < 12 * 4; count += 4)
- edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
-
- for (count = 0; count < 10 * 4; count += 4)
- edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
-
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- edbg("retval is %lu\n", retval);
- /* check if the this is sep reply or request */
- if (retval >> 31) {
- edbg("SEP Driver: sep request in\n");
- /* request */
- mask |= POLLOUT | POLLWRNORM;
- } else {
- edbg("SEP Driver: sep reply in\n");
- mask |= POLLIN | POLLRDNORM;
- }
- }
- dbg("SEP Driver:<-------- poll exit\n");
- return mask;
-}
-
-/**
- * sep_time_address - address in SEP memory of time
- * @sep: SEP device we want the address from
- *
- * Return the address of the two dwords in memory used for time
- * setting.
- */
-
-static u32 *sep_time_address(struct sep_device *sep)
-{
- return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
-}
-
-/**
- * sep_set_time - set the SEP time
- * @sep: the SEP we are setting the time for
- *
- * Calculates time and sets it at the predefined address.
- * Called with the sep mutex held.
- */
-static unsigned long sep_set_time(struct sep_device *sep)
-{
- struct timeval time;
- u32 *time_addr; /* address of time as seen by the kernel */
-
-
- dbg("sep:sep_set_time start\n");
-
- do_gettimeofday(&time);
-
- /* set value in the SYSTEM MEMORY offset */
- time_addr = sep_time_address(sep);
-
- time_addr[0] = SEP_TIME_VAL_TOKEN;
- time_addr[1] = time.tv_sec;
-
- edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
- edbg("SEP Driver:time_addr is %p\n", time_addr);
- edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
- return time.tv_sec;
-}
-
-/**
- * sep_dump_message - dump the message that is pending
- * @sep: sep device
- *
- * Dump out the message pending in the shared message area
- */
-
-static void sep_dump_message(struct sep_device *sep)
-{
- int count;
- for (count = 0; count < 12 * 4; count += 4)
- edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
-}
-
-/**
- * sep_send_command_handler - kick off a command
- * @sep: sep being signalled
- *
- * This function raises interrupt to SEP that signals that is has a new
- * command from the host
- */
-
-static void sep_send_command_handler(struct sep_device *sep)
-{
- dbg("sep:sep_send_command_handler start\n");
-
- mutex_lock(&sep_mutex);
- sep_set_time(sep);
-
- /* FIXME: flush cache */
- flush_cache_all();
-
- sep_dump_message(sep);
- /* update counter */
- sep->send_ct++;
- /* send interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
- dbg("SEP Driver:<-------- sep_send_command_handler end\n");
- mutex_unlock(&sep_mutex);
- return;
-}
-
-/**
- * sep_send_reply_command_handler - kick off a command reply
- * @sep: sep being signalled
- *
- * This function raises interrupt to SEP that signals that is has a new
- * command from the host
- */
-
-static void sep_send_reply_command_handler(struct sep_device *sep)
-{
- dbg("sep:sep_send_reply_command_handler start\n");
-
- /* flash cache */
- flush_cache_all();
-
- sep_dump_message(sep);
-
- mutex_lock(&sep_mutex);
- sep->send_ct++; /* update counter */
- /* send the interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
- /* update both counters */
- sep->send_ct++;
- sep->reply_ct++;
- mutex_unlock(&sep_mutex);
- dbg("sep: sep_send_reply_command_handler end\n");
-}
-
-/*
- This function handles the allocate data pool memory request
- This function returns calculates the bus address of the
- allocated memory, and the offset of this area from the mapped address.
- Therefore, the FVOs in user space can calculate the exact virtual
- address of this allocated memory
-*/
-static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error;
- struct sep_driver_alloc_t command_args;
-
- dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* allocate memory */
- if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
- error = -ENOMEM;
- goto end_function;
- }
-
- /* set the virtual and bus address */
- command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
- command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
-
- /* write the memory back to the user space */
- error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* set the allocation */
- sep->data_pool_bytes_allocated += command_args.num_bytes;
-
-end_function:
- dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
- return error;
-}
-
-/*
- This function handles write into allocated data pool command
-*/
-static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- void *virt_address;
- unsigned long va;
- unsigned long app_in_address;
- unsigned long num_bytes;
- void *data_pool_area_addr;
-
- dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
-
- /* get the application address */
- error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
- if (error)
- goto end_function;
-
- /* get the virtual kernel address address */
- error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
- if (error)
- goto end_function;
- virt_address = (void *)va;
-
- /* get the number of bytes */
- error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
- if (error)
- goto end_function;
-
- /* calculate the start of the data pool */
- data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
-
- /* check that the range of the virtual kernel address is correct */
- if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
- error = -EINVAL;
- goto end_function;
- }
- /* copy the application data */
- error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
- if (error)
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
- return error;
-}
-
-/*
- this function handles the read from data pool command
-*/
-static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- /* virtual address of dest application buffer */
- unsigned long app_out_address;
- /* virtual address of the data pool */
- unsigned long va;
- void *virt_address;
- unsigned long num_bytes;
- void *data_pool_area_addr;
-
- dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
-
- /* get the application address */
- error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
- if (error)
- goto end_function;
-
- /* get the virtual kernel address address */
- error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
- if (error)
- goto end_function;
- virt_address = (void *)va;
-
- /* get the number of bytes */
- error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
- if (error)
- goto end_function;
-
- /* calculate the start of the data pool */
- data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
- /* FIXME: These are incomplete all over the driver: what about + len
- and when doing that also overflows */
- /* check that the range of the virtual kernel address is correct */
- if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
- error = -EINVAL;
- goto end_function;
- }
-
- /* copy the application data */
- error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
- if (error)
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
- return error;
-}
-
-/*
- This function releases all the application virtual buffer physical pages,
- that were previously locked
-*/
-static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
-{
- unsigned long count;
-
- if (dirtyFlag) {
- for (count = 0; count < num_pages; count++) {
- /* the out array was written, therefore the data was changed */
- if (!PageReserved(page_array_ptr[count]))
- SetPageDirty(page_array_ptr[count]);
- page_cache_release(page_array_ptr[count]);
- }
- } else {
- /* free in pages - the data was only read, therefore no update was done
- on those pages */
- for (count = 0; count < num_pages; count++)
- page_cache_release(page_array_ptr[count]);
- }
-
- if (page_array_ptr)
- /* free the array */
- kfree(page_array_ptr);
-
- return 0;
-}
-
-/*
- This function locks all the physical pages of the kernel virtual buffer
- and construct a basic lli array, where each entry holds the physical
- page address and the size that application data holds in this physical pages
-*/
-static int sep_lock_kernel_pages(struct sep_device *sep,
- unsigned long kernel_virt_addr,
- unsigned long data_size,
- unsigned long *num_pages_ptr,
- struct sep_lli_entry_t **lli_array_ptr,
- struct page ***page_array_ptr)
-{
- int error = 0;
- /* the the page of the end address of the user space buffer */
- unsigned long end_page;
- /* the page of the start address of the user space buffer */
- unsigned long start_page;
- /* the range in pages */
- unsigned long num_pages;
- struct sep_lli_entry_t *lli_array;
- /* next kernel address to map */
- unsigned long next_kernel_address;
- unsigned long count;
-
- dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
-
- /* set start and end pages and num pages */
- end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
- start_page = kernel_virt_addr >> PAGE_SHIFT;
- num_pages = end_page - start_page + 1;
-
- edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
- edbg("SEP Driver: data_size is %lu\n", data_size);
- edbg("SEP Driver: start_page is %lx\n", start_page);
- edbg("SEP Driver: end_page is %lx\n", end_page);
- edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
- if (!lli_array) {
- edbg("SEP Driver: kmalloc for lli_array failed\n");
- error = -ENOMEM;
- goto end_function;
- }
-
- /* set the start address of the first page - app data may start not at
- the beginning of the page */
- lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
-
- /* check that not all the data is in the first page only */
- if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
- lli_array[0].block_size = data_size;
- else
- lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
-
- /* debug print */
- dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
- /* advance the address to the start of the next page */
- next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
-
- /* go from the second page to the prev before last */
- for (count = 1; count < (num_pages - 1); count++) {
- lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
- lli_array[count].block_size = PAGE_SIZE;
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- next_kernel_address += PAGE_SIZE;
- }
-
- /* if more then 1 pages locked - then update for the last page size needed */
- if (num_pages > 1) {
- /* update the address of the last page */
- lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
-
- /* set the size of the last page */
- lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
-
- if (lli_array[count].block_size == 0) {
- dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
- dbg("data_size is %lu\n", data_size);
- while (1);
- }
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- }
- /* set output params */
- *lli_array_ptr = lli_array;
- *num_pages_ptr = num_pages;
- *page_array_ptr = 0;
-end_function:
- dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
- return 0;
-}
-
-/*
- This function locks all the physical pages of the application virtual buffer
- and construct a basic lli array, where each entry holds the physical page
- address and the size that application data holds in this physical pages
-*/
-static int sep_lock_user_pages(struct sep_device *sep,
- unsigned long app_virt_addr,
- unsigned long data_size,
- unsigned long *num_pages_ptr,
- struct sep_lli_entry_t **lli_array_ptr,
- struct page ***page_array_ptr)
-{
- int error = 0;
- /* the the page of the end address of the user space buffer */
- unsigned long end_page;
- /* the page of the start address of the user space buffer */
- unsigned long start_page;
- /* the range in pages */
- unsigned long num_pages;
- struct page **page_array;
- struct sep_lli_entry_t *lli_array;
- unsigned long count;
- int result;
-
- dbg("SEP Driver:--------> sep_lock_user_pages start\n");
-
- /* set start and end pages and num pages */
- end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
- start_page = app_virt_addr >> PAGE_SHIFT;
- num_pages = end_page - start_page + 1;
-
- edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
- edbg("SEP Driver: data_size is %lu\n", data_size);
- edbg("SEP Driver: start_page is %lu\n", start_page);
- edbg("SEP Driver: end_page is %lu\n", end_page);
- edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
- /* allocate array of pages structure pointers */
- page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
- if (!page_array) {
- edbg("SEP Driver: kmalloc for page_array failed\n");
-
- error = -ENOMEM;
- goto end_function;
- }
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
- if (!lli_array) {
- edbg("SEP Driver: kmalloc for lli_array failed\n");
-
- error = -ENOMEM;
- goto end_function_with_error1;
- }
-
- /* convert the application virtual address into a set of physical */
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
- up_read(&current->mm->mmap_sem);
-
- /* check the number of pages locked - if not all then exit with error */
- if (result != num_pages) {
- dbg("SEP Driver: not all pages locked by get_user_pages\n");
-
- error = -ENOMEM;
- goto end_function_with_error2;
- }
-
- /* flush the cache */
- for (count = 0; count < num_pages; count++)
- flush_dcache_page(page_array[count]);
-
- /* set the start address of the first page - app data may start not at
- the beginning of the page */
- lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
-
- /* check that not all the data is in the first page only */
- if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
- lli_array[0].block_size = data_size;
- else
- lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
-
- /* debug print */
- dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
- /* go from the second page to the prev before last */
- for (count = 1; count < (num_pages - 1); count++) {
- lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
- lli_array[count].block_size = PAGE_SIZE;
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- }
-
- /* if more then 1 pages locked - then update for the last page size needed */
- if (num_pages > 1) {
- /* update the address of the last page */
- lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
-
- /* set the size of the last page */
- lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
-
- if (lli_array[count].block_size == 0) {
- dbg("app_virt_addr is %08lx\n", app_virt_addr);
- dbg("data_size is %lu\n", data_size);
- while (1);
- }
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n",
- count, lli_array[count].physical_address,
- count, lli_array[count].block_size);
- }
-
- /* set output params */
- *lli_array_ptr = lli_array;
- *num_pages_ptr = num_pages;
- *page_array_ptr = page_array;
- goto end_function;
-
-end_function_with_error2:
- /* release the cache */
- for (count = 0; count < num_pages; count++)
- page_cache_release(page_array[count]);
- kfree(lli_array);
-end_function_with_error1:
- kfree(page_array);
-end_function:
- dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
- return 0;
-}
-
-
-/*
- this function calculates the size of data that can be inserted into the lli
- table from this array the condition is that either the table is full
- (all etnries are entered), or there are no more entries in the lli array
-*/
-static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
-{
- unsigned long table_data_size = 0;
- unsigned long counter;
-
- /* calculate the data in the out lli table if till we fill the whole
- table or till the data has ended */
- for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
- table_data_size += lli_in_array_ptr[counter].block_size;
- return table_data_size;
-}
-
-/*
- this functions builds ont lli table from the lli_array according to
- the given size of data
-*/
-static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
-{
- unsigned long curr_table_data_size;
- /* counter of lli array entry */
- unsigned long array_counter;
-
- dbg("SEP Driver:--------> sep_build_lli_table start\n");
-
- /* init currrent table data size and lli array entry counter */
- curr_table_data_size = 0;
- array_counter = 0;
- *num_table_entries_ptr = 1;
-
- edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
- /* fill the table till table size reaches the needed amount */
- while (curr_table_data_size < table_data_size) {
- /* update the number of entries in table */
- (*num_table_entries_ptr)++;
-
- lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
- lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
- curr_table_data_size += lli_table_ptr->block_size;
-
- edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* check for overflow of the table data */
- if (curr_table_data_size > table_data_size) {
- edbg("SEP Driver:curr_table_data_size > table_data_size\n");
-
- /* update the size of block in the table */
- lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
-
- /* update the physical address in the lli array */
- lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
-
- /* update the block size left in the lli array */
- lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
- } else
- /* advance to the next entry in the lli_array */
- array_counter++;
-
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* move to the next entry in table */
- lli_table_ptr++;
- }
-
- /* set the info entry to default */
- lli_table_ptr->physical_address = 0xffffffff;
- lli_table_ptr->block_size = 0;
-
- edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* set the output parameter */
- *num_processed_entries_ptr += array_counter;
-
- edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
- dbg("SEP Driver:<-------- sep_build_lli_table end\n");
- return;
-}
-
-/*
- this function goes over the list of the print created tables and
- prints all the data
-*/
-static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
-{
- unsigned long table_count;
- unsigned long entries_count;
-
- dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
-
- table_count = 1;
- while ((unsigned long) lli_table_ptr != 0xffffffff) {
- edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
- edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
-
- /* print entries of the table (without info entry) */
- for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
- edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
- }
-
- /* point to the info entry */
- lli_table_ptr--;
-
- edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
- edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
-
-
- table_data_size = lli_table_ptr->block_size & 0xffffff;
- num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
- lli_table_ptr = (struct sep_lli_entry_t *)
- (lli_table_ptr->physical_address);
-
- edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
-
- if ((unsigned long) lli_table_ptr != 0xffffffff)
- lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
-
- table_count++;
- }
- dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
-}
-
-
-/*
- This function prepares only input DMA table for synhronic symmetric
- operations (HASH)
-*/
-static int sep_prepare_input_dma_table(struct sep_device *sep,
- unsigned long app_virt_addr,
- unsigned long data_size,
- unsigned long block_size,
- unsigned long *lli_table_ptr,
- unsigned long *num_entries_ptr,
- unsigned long *table_data_size_ptr,
- bool isKernelVirtualAddress)
-{
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_entry_ptr;
- /* array of pointers ot page */
- struct sep_lli_entry_t *lli_array_ptr;
- /* points to the first entry to be processed in the lli_in_array */
- unsigned long current_entry;
- /* num entries in the virtual buffer */
- unsigned long sep_lli_entries;
- /* lli table pointer */
- struct sep_lli_entry_t *in_lli_table_ptr;
- /* the total data in one table */
- unsigned long table_data_size;
- /* number of entries in lli table */
- unsigned long num_entries_in_table;
- /* next table address */
- void *lli_table_alloc_addr;
- unsigned long result;
-
- dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
-
- edbg("SEP Driver:data_size is %lu\n", data_size);
- edbg("SEP Driver:block_size is %lu\n", block_size);
-
- /* initialize the pages pointers */
- sep->in_page_array = 0;
- sep->in_num_pages = 0;
-
- if (data_size == 0) {
- /* special case - created 2 entries table with zero data */
- in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
- /* FIXME: Should the entry below not be for _bus */
- in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
- in_lli_table_ptr->block_size = 0;
-
- in_lli_table_ptr++;
- in_lli_table_ptr->physical_address = 0xFFFFFFFF;
- in_lli_table_ptr->block_size = 0;
-
- *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
- *num_entries_ptr = 2;
- *table_data_size_ptr = 0;
-
- goto end_function;
- }
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true)
- /* lock the pages of the kernel buffer and translate them to pages */
- result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
- else
- /* lock the pages of the user buffer and translate them to pages */
- result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
-
- if (result)
- return result;
-
- edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
-
- current_entry = 0;
- info_entry_ptr = 0;
- sep_lli_entries = sep->in_num_pages;
-
- /* initiate to point after the message area */
- lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- /* loop till all the entries in in array are not processed */
- while (current_entry < sep_lli_entries) {
- /* set the new input and output tables */
- in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* calculate the maximum size of data for input table */
- table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
-
- /* now calculate the table size so that it will be module block size */
- table_data_size = (table_data_size / block_size) * block_size;
-
- edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
-
- /* construct input lli table */
- sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
-
- if (info_entry_ptr == 0) {
- /* set the output parameters to physical addresses */
- *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- *num_entries_ptr = num_entries_in_table;
- *table_data_size_ptr = table_data_size;
-
- edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
- } else {
- /* update the info entry of the previous in table */
- info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
- }
-
- /* save the pointer to the info entry of the current tables */
- info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
- }
-
- /* print input tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
-
- /* the array of the pages */
- kfree(lli_array_ptr);
-end_function:
- dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
- return 0;
-
-}
-
-/*
- This function creates the input and output dma tables for
- symmetric operations (AES/DES) according to the block size from LLI arays
-*/
-static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
- struct sep_lli_entry_t *lli_in_array,
- unsigned long sep_in_lli_entries,
- struct sep_lli_entry_t *lli_out_array,
- unsigned long sep_out_lli_entries,
- unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
-{
- /* points to the area where next lli table can be allocated: keep void *
- as there is pointer scaling to fix otherwise */
- void *lli_table_alloc_addr;
- /* input lli table */
- struct sep_lli_entry_t *in_lli_table_ptr;
- /* output lli table */
- struct sep_lli_entry_t *out_lli_table_ptr;
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_in_entry_ptr;
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_out_entry_ptr;
- /* points to the first entry to be processed in the lli_in_array */
- unsigned long current_in_entry;
- /* points to the first entry to be processed in the lli_out_array */
- unsigned long current_out_entry;
- /* max size of the input table */
- unsigned long in_table_data_size;
- /* max size of the output table */
- unsigned long out_table_data_size;
- /* flag te signifies if this is the first tables build from the arrays */
- unsigned long first_table_flag;
- /* the data size that should be in table */
- unsigned long table_data_size;
- /* number of etnries in the input table */
- unsigned long num_entries_in_table;
- /* number of etnries in the output table */
- unsigned long num_entries_out_table;
-
- dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
-
- /* initiate to pint after the message area */
- lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- current_in_entry = 0;
- current_out_entry = 0;
- first_table_flag = 1;
- info_in_entry_ptr = 0;
- info_out_entry_ptr = 0;
-
- /* loop till all the entries in in array are not processed */
- while (current_in_entry < sep_in_lli_entries) {
- /* set the new input and output tables */
- in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* set the first output tables */
- out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* calculate the maximum size of data for input table */
- in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
-
- /* calculate the maximum size of data for output table */
- out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
-
- edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
- edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
-
- /* check where the data is smallest */
- table_data_size = in_table_data_size;
- if (table_data_size > out_table_data_size)
- table_data_size = out_table_data_size;
-
- /* now calculate the table size so that it will be module block size */
- table_data_size = (table_data_size / block_size) * block_size;
-
- dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
- /* construct input lli table */
- sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
-
- /* construct output lli table */
- sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
-
- /* if info entry is null - this is the first table built */
- if (info_in_entry_ptr == 0) {
- /* set the output parameters to physical addresses */
- *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- *in_num_entries_ptr = num_entries_in_table;
- *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
- *out_num_entries_ptr = num_entries_out_table;
- *table_data_size_ptr = table_data_size;
-
- edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
- edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
- } else {
- /* update the info entry of the previous in table */
- info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
-
- /* update the info entry of the previous in table */
- info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
- info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
- }
-
- /* save the pointer to the info entry of the current tables */
- info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
- info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
-
- edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
- edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
- edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
- }
-
- /* print input tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
- /* print output tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
- dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
- return 0;
-}
-
-
-/*
- This function builds input and output DMA tables for synhronic
- symmetric operations (AES, DES). It also checks that each table
- is of the modular block size
-*/
-static int sep_prepare_input_output_dma_table(struct sep_device *sep,
- unsigned long app_virt_in_addr,
- unsigned long app_virt_out_addr,
- unsigned long data_size,
- unsigned long block_size,
- unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
-{
- /* array of pointers of page */
- struct sep_lli_entry_t *lli_in_array;
- /* array of pointers of page */
- struct sep_lli_entry_t *lli_out_array;
- int result = 0;
-
- dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
-
- /* initialize the pages pointers */
- sep->in_page_array = 0;
- sep->out_page_array = 0;
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true) {
- /* lock the pages of the kernel buffer and translate them to pages */
- result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
- goto end_function;
- }
- } else {
- /* lock the pages of the user buffer and translate them to pages */
- result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
- goto end_function;
- }
- }
-
- if (isKernelVirtualAddress == true) {
- result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
- goto end_function_with_error1;
- }
- } else {
- result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
- goto end_function_with_error1;
- }
- }
- edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
- edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
- edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
-
- /* call the fucntion that creates table from the lli arrays */
- result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
- if (result) {
- edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
- goto end_function_with_error2;
- }
-
- /* fall through - free the lli entry arrays */
- dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
- dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
- dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
-end_function_with_error2:
- kfree(lli_out_array);
-end_function_with_error1:
- kfree(lli_in_array);
-end_function:
- dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
- return result;
-
-}
-
-/*
- this function handles tha request for creation of the DMA table
- for the synchronic symmetric operations (AES,DES)
-*/
-static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error;
- /* command arguments */
- struct sep_driver_build_sync_table_t command_args;
-
- dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- edbg("app_in_address is %08lx\n", command_args.app_in_address);
- edbg("app_out_address is %08lx\n", command_args.app_out_address);
- edbg("data_size is %lu\n", command_args.data_in_size);
- edbg("block_size is %lu\n", command_args.block_size);
-
- /* check if we need to build only input table or input/output */
- if (command_args.app_out_address)
- /* prepare input and output tables */
- error = sep_prepare_input_output_dma_table(sep,
- command_args.app_in_address,
- command_args.app_out_address,
- command_args.data_in_size,
- command_args.block_size,
- &command_args.in_table_address,
- &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
- else
- /* prepare input tables */
- error = sep_prepare_input_dma_table(sep,
- command_args.app_in_address,
- command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
-
- if (error)
- goto end_function;
- /* copy to user */
- if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
- return error;
-}
-
-/*
- this function handles the request for freeing dma table for synhronic actions
-*/
-static int sep_free_dma_table_data_handler(struct sep_device *sep)
-{
- dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
-
- /* free input pages array */
- sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
-
- /* free output pages array if needed */
- if (sep->out_page_array)
- sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
-
- /* reset all the values */
- sep->in_page_array = 0;
- sep->out_page_array = 0;
- sep->in_num_pages = 0;
- sep->out_num_pages = 0;
- dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
- return 0;
-}
-
-/*
- this function find a space for the new flow dma table
-*/
-static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
- unsigned long **table_address_ptr)
-{
- int error = 0;
- /* pointer to the id field of the flow dma table */
- unsigned long *start_table_ptr;
- /* Do not make start_addr unsigned long * unless fixing the offset
- computations ! */
- void *flow_dma_area_start_addr;
- unsigned long *flow_dma_area_end_addr;
- /* maximum table size in words */
- unsigned long table_size_in_words;
-
- /* find the start address of the flow DMA table area */
- flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- /* set end address of the flow table area */
- flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
-
- /* set table size in words */
- table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
-
- /* set the pointer to the start address of DMA area */
- start_table_ptr = flow_dma_area_start_addr;
-
- /* find the space for the next table */
- while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
- start_table_ptr += table_size_in_words;
-
- /* check if we reached the end of floa tables area */
- if (start_table_ptr >= flow_dma_area_end_addr)
- error = -1;
- else
- *table_address_ptr = start_table_ptr;
-
- return error;
-}
-
-/*
- This function creates one DMA table for flow and returns its data,
- and pointer to its info entry
-*/
-static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
- unsigned long virt_buff_addr,
- unsigned long virt_buff_size,
- struct sep_lli_entry_t *table_data,
- struct sep_lli_entry_t **info_entry_ptr,
- struct sep_flow_context_t *flow_data_ptr,
- bool isKernelVirtualAddress)
-{
- int error;
- /* the range in pages */
- unsigned long lli_array_size;
- struct sep_lli_entry_t *lli_array;
- struct sep_lli_entry_t *flow_dma_table_entry_ptr;
- unsigned long *start_dma_table_ptr;
- /* total table data counter */
- unsigned long dma_table_data_count;
- /* pointer that will keep the pointer to the pages of the virtual buffer */
- struct page **page_array_ptr;
- unsigned long entry_count;
-
- /* find the space for the new table */
- error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
- if (error)
- goto end_function;
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true)
- /* lock kernel buffer in the memory */
- error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
- else
- /* lock user buffer in the memory */
- error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
-
- if (error)
- goto end_function;
-
- /* set the pointer to page array at the beginning of table - this table is
- now considered taken */
- *start_dma_table_ptr = lli_array_size;
-
- /* point to the place of the pages pointers of the table */
- start_dma_table_ptr++;
-
- /* set the pages pointer */
- *start_dma_table_ptr = (unsigned long) page_array_ptr;
-
- /* set the pointer to the first entry */
- flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
-
- /* now create the entries for table */
- for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
- flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
-
- flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
-
- /* set the total data of a table */
- dma_table_data_count += lli_array[entry_count].block_size;
-
- flow_dma_table_entry_ptr++;
- }
-
- /* set the physical address */
- table_data->physical_address = virt_to_phys(start_dma_table_ptr);
-
- /* set the num_entries and total data size */
- table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
-
- /* set the info entry */
- flow_dma_table_entry_ptr->physical_address = 0xffffffff;
- flow_dma_table_entry_ptr->block_size = 0;
-
- /* set the pointer to info entry */
- *info_entry_ptr = flow_dma_table_entry_ptr;
-
- /* the array of the lli entries */
- kfree(lli_array);
-end_function:
- return error;
-}
-
-
-
-/*
- This function creates a list of tables for flow and returns the data for
- the first and last tables of the list
-*/
-static int sep_prepare_flow_dma_tables(struct sep_device *sep,
- unsigned long num_virtual_buffers,
- unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
-{
- int error;
- unsigned long virt_buff_addr;
- unsigned long virt_buff_size;
- struct sep_lli_entry_t table_data;
- struct sep_lli_entry_t *info_entry_ptr;
- struct sep_lli_entry_t *prev_info_entry_ptr;
- unsigned long i;
-
- /* init vars */
- error = 0;
- prev_info_entry_ptr = 0;
-
- /* init the first table to default */
- table_data.physical_address = 0xffffffff;
- first_table_data_ptr->physical_address = 0xffffffff;
- table_data.block_size = 0;
-
- for (i = 0; i < num_virtual_buffers; i++) {
- /* get the virtual buffer address */
- error = get_user(virt_buff_addr, &first_buff_addr);
- if (error)
- goto end_function;
-
- /* get the virtual buffer size */
- first_buff_addr++;
- error = get_user(virt_buff_size, &first_buff_addr);
- if (error)
- goto end_function;
-
- /* advance the address to point to the next pair of address|size */
- first_buff_addr++;
-
- /* now prepare the one flow LLI table from the data */
- error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
- if (error)
- goto end_function;
-
- if (i == 0) {
- /* if this is the first table - save it to return to the user
- application */
- *first_table_data_ptr = table_data;
-
- /* set the pointer to info entry */
- prev_info_entry_ptr = info_entry_ptr;
- } else {
- /* not first table - the previous table info entry should
- be updated */
- prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
-
- /* set the pointer to info entry */
- prev_info_entry_ptr = info_entry_ptr;
- }
- }
-
- /* set the last table data */
- *last_table_data_ptr = table_data;
-end_function:
- return error;
-}
-
-/*
- this function goes over all the flow tables connected to the given
- table and deallocate them
-*/
-static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
-{
- /* id pointer */
- unsigned long *table_ptr;
- /* end address of the flow dma area */
- unsigned long num_entries;
- unsigned long num_pages;
- struct page **pages_ptr;
- /* maximum table size in words */
- struct sep_lli_entry_t *info_entry_ptr;
-
- /* set the pointer to the first table */
- table_ptr = (unsigned long *) first_table_ptr->physical_address;
-
- /* set the num of entries */
- num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
- & SEP_NUM_ENTRIES_MASK;
-
- /* go over all the connected tables */
- while (*table_ptr != 0xffffffff) {
- /* get number of pages */
- num_pages = *(table_ptr - 2);
-
- /* get the pointer to the pages */
- pages_ptr = (struct page **) (*(table_ptr - 1));
-
- /* free the pages */
- sep_free_dma_pages(pages_ptr, num_pages, 1);
-
- /* goto to the info entry */
- info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
-
- table_ptr = (unsigned long *) info_entry_ptr->physical_address;
- num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
- }
-
- return;
-}
-
-/**
- * sep_find_flow_context - find a flow
- * @sep: the SEP we are working with
- * @flow_id: flow identifier
- *
- * Returns a pointer the matching flow, or NULL if the flow does not
- * exist.
- */
-
-static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
- unsigned long flow_id)
-{
- int count;
- /*
- * always search for flow with id default first - in case we
- * already started working on the flow there can be no situation
- * when 2 flows are with default flag
- */
- for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
- if (sep->flows[count].flow_id == flow_id)
- return &sep->flows[count];
- }
- return NULL;
-}
-
-
-/*
- this function handles the request to create the DMA tables for flow
-*/
-static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error = -ENOENT;
- struct sep_driver_build_flow_table_t command_args;
- /* first table - output */
- struct sep_lli_entry_t first_table_data;
- /* dma table data */
- struct sep_lli_entry_t last_table_data;
- /* pointer to the info entry of the previuos DMA table */
- struct sep_lli_entry_t *prev_info_entry_ptr;
- /* pointer to the flow data strucutre */
- struct sep_flow_context_t *flow_context_ptr;
-
- dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
-
- /* init variables */
- prev_info_entry_ptr = 0;
- first_table_data.physical_address = 0xffffffff;
-
- /* find the free structure for flow data */
- error = -EINVAL;
- flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* create flow tables */
- error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
- if (error)
- goto end_function_with_error;
-
- /* check if flow is static */
- if (!command_args.flow_type)
- /* point the info entry of the last to the info entry of the first */
- last_table_data = first_table_data;
-
- /* set output params */
- command_args.first_table_addr = first_table_data.physical_address;
- command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
- command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
- if (error) {
- error = -EFAULT;
- goto end_function_with_error;
- }
-
- /* all the flow created - update the flow entry with temp id */
- flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
-
- /* set the processing tables data in the context */
- if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
- flow_context_ptr->input_tables_in_process = first_table_data;
- else
- flow_context_ptr->output_tables_in_process = first_table_data;
-
- goto end_function;
-
-end_function_with_error:
- /* free the allocated tables */
- sep_deallocated_flow_tables(&first_table_data);
-end_function:
- dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
- return error;
-}
-
-/*
- this function handles add tables to flow
-*/
-static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- unsigned long num_entries;
- struct sep_driver_add_flow_table_t command_args;
- struct sep_flow_context_t *flow_context_ptr;
- /* first dma table data */
- struct sep_lli_entry_t first_table_data;
- /* last dma table data */
- struct sep_lli_entry_t last_table_data;
- /* pointer to the info entry of the current DMA table */
- struct sep_lli_entry_t *info_entry_ptr;
-
- dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
-
- /* get input parameters */
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* find the flow structure for the flow id */
- flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- /* prepare the flow dma tables */
- error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
- if (error)
- goto end_function_with_error;
-
- /* now check if there is already an existing add table for this flow */
- if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
- /* this buffer was for input buffers */
- if (flow_context_ptr->input_tables_flag) {
- /* add table already exists - add the new tables to the end
- of the previous */
- num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
- info_entry_ptr = (struct sep_lli_entry_t *)
- (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
- /* connect to list of tables */
- *info_entry_ptr = first_table_data;
-
- /* set the first table data */
- first_table_data = flow_context_ptr->first_input_table;
- } else {
- /* set the input flag */
- flow_context_ptr->input_tables_flag = 1;
-
- /* set the first table data */
- flow_context_ptr->first_input_table = first_table_data;
- }
- /* set the last table data */
- flow_context_ptr->last_input_table = last_table_data;
- } else { /* this is output tables */
-
- /* this buffer was for input buffers */
- if (flow_context_ptr->output_tables_flag) {
- /* add table already exists - add the new tables to
- the end of the previous */
- num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
- info_entry_ptr = (struct sep_lli_entry_t *)
- (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
- /* connect to list of tables */
- *info_entry_ptr = first_table_data;
-
- /* set the first table data */
- first_table_data = flow_context_ptr->first_output_table;
- } else {
- /* set the input flag */
- flow_context_ptr->output_tables_flag = 1;
-
- /* set the first table data */
- flow_context_ptr->first_output_table = first_table_data;
- }
- /* set the last table data */
- flow_context_ptr->last_output_table = last_table_data;
- }
-
- /* set output params */
- command_args.first_table_addr = first_table_data.physical_address;
- command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
- command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
- if (error)
- error = -EFAULT;
-end_function_with_error:
- /* free the allocated tables */
- sep_deallocated_flow_tables(&first_table_data);
-end_function:
- dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
- return error;
-}
-
-/*
- this function add the flow add message to the specific flow
-*/
-static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_add_message_t command_args;
- struct sep_flow_context_t *flow_context_ptr;
-
- dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* check input */
- if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
- error = -ENOMEM;
- goto end_function;
- }
-
- /* find the flow context */
- flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- /* copy the message into context */
- flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
- error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
- if (error)
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
- return error;
-}
-
-
-/*
- this function returns the bus and virtual addresses of the static pool
-*/
-static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_static_pool_addr_t command_args;
-
- dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
-
- /*prepare the output parameters in the struct */
- command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
- command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
-
- edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
- if (error)
- error = -EFAULT;
- dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
- return error;
-}
-
-/*
- this address gets the offset of the physical address from the start
- of the mapped area
-*/
-static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_get_mapped_offset_t command_args;
-
- dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- if (command_args.physical_address < sep->shared_bus) {
- error = -EINVAL;
- goto end_function;
- }
-
- /*prepare the output parameters in the struct */
- command_args.offset = command_args.physical_address - sep->shared_bus;
-
- edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
- if (error)
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
- return error;
-}
-
-
-/*
- ?
-*/
-static int sep_start_handler(struct sep_device *sep)
-{
- unsigned long reg_val;
- unsigned long error = 0;
-
- dbg("SEP Driver:--------> sep_start_handler start\n");
-
- /* wait in polling for message from SEP */
- do
- reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!reg_val);
-
- /* check the value */
- if (reg_val == 0x1)
- /* fatal error - read error status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- dbg("SEP Driver:<-------- sep_start_handler end\n");
- return error;
-}
-
-/*
- this function handles the request for SEP initialization
-*/
-static int sep_init_handler(struct sep_device *sep, unsigned long arg)
-{
- unsigned long message_word;
- unsigned long *message_ptr;
- struct sep_driver_init_t command_args;
- unsigned long counter;
- unsigned long error;
- unsigned long reg_val;
-
- dbg("SEP Driver:--------> sep_init_handler start\n");
- error = 0;
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
- dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user\n");
-
- /* PATCH - configure the DMA to single -burst instead of multi-burst */
- /*sep_configure_dma_burst(); */
-
- dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
-
- message_ptr = (unsigned long *) command_args.message_addr;
-
- /* set the base address of the SRAM */
- sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
-
- for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
- get_user(message_word, message_ptr);
- /* write data to SRAM */
- sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
- edbg("SEP Driver:message_word is %lu\n", message_word);
- /* wait for write complete */
- sep_wait_sram_write(sep);
- }
- dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
- /* signal SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
-
- do
- reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!(reg_val & 0xFFFFFFFD));
-
- dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
-
- /* check the value */
- if (reg_val == 0x1) {
- edbg("SEP Driver:init failed\n");
-
- error = sep_read_reg(sep, 0x8060);
- edbg("SEP Driver:sw monitor is %lu\n", error);
-
- /* fatal error - read erro status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- edbg("SEP Driver:error is %lu\n", error);
- }
-end_function:
- dbg("SEP Driver:<-------- sep_init_handler end\n");
- return error;
-
-}
-
-/*
- this function handles the request cache and resident reallocation
-*/
-static int sep_realloc_cache_resident_handler(struct sep_device *sep,
- unsigned long arg)
-{
- struct sep_driver_realloc_cache_resident_t command_args;
- int error;
-
- /* copy cache and resident to the their intended locations */
- error = sep_load_firmware(sep);
- if (error)
- return error;
-
- command_args.new_base_addr = sep->shared_bus;
-
- /* find the new base address according to the lowest address between
- cache, resident and shared area */
- if (sep->resident_bus < command_args.new_base_addr)
- command_args.new_base_addr = sep->resident_bus;
- if (sep->rar_bus < command_args.new_base_addr)
- command_args.new_base_addr = sep->rar_bus;
-
- /* set the return parameters */
- command_args.new_cache_addr = sep->rar_bus;
- command_args.new_resident_addr = sep->resident_bus;
-
- /* set the new shared area */
- command_args.new_shared_area_addr = sep->shared_bus;
-
- edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
- edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
- edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
- edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
-
- /* return to user */
- if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
- return -EFAULT;
- return 0;
-}
-
-/**
- * sep_get_time_handler - time request from user space
- * @sep: sep we are to set the time for
- * @arg: pointer to user space arg buffer
- *
- * This function reports back the time and the address in the SEP
- * shared buffer at which it has been placed. (Do we really need this!!!)
- */
-
-static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
-{
- struct sep_driver_get_time_t command_args;
-
- mutex_lock(&sep_mutex);
- command_args.time_value = sep_set_time(sep);
- command_args.time_physical_address = (unsigned long)sep_time_address(sep);
- mutex_unlock(&sep_mutex);
- if (copy_to_user((void __user *)arg,
- &command_args, sizeof(struct sep_driver_get_time_t)))
- return -EFAULT;
- return 0;
-
-}
-
-/*
- This API handles the end transaction request
-*/
-static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
-{
- dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
-
-#if 0 /*!SEP_DRIVER_POLLING_MODE */
- /* close IMR */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
-
- /* release IRQ line */
- free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
- /* lock the sep mutex */
- mutex_unlock(&sep_mutex);
-#endif
-
- dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
-
- return 0;
-}
-
-
-/**
- * sep_set_flow_id_handler - handle flow setting
- * @sep: the SEP we are configuring
- * @flow_id: the flow we are setting
- *
- * This function handler the set flow id command
- */
-static int sep_set_flow_id_handler(struct sep_device *sep,
- unsigned long flow_id)
-{
- int error = 0;
- struct sep_flow_context_t *flow_data_ptr;
-
- /* find the flow data structure that was just used for creating new flow
- - its id should be default */
-
- mutex_lock(&sep_mutex);
- flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
- if (flow_data_ptr)
- flow_data_ptr->flow_id = flow_id; /* set flow id */
- else
- error = -EINVAL;
- mutex_unlock(&sep_mutex);
- return error;
-}
-
-static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int error = 0;
- struct sep_device *sep = filp->private_data;
-
- dbg("------------>SEP Driver: ioctl start\n");
-
- edbg("SEP Driver: cmd is %x\n", cmd);
-
- switch (cmd) {
- case SEP_IOCSENDSEPCOMMAND:
- /* send command to SEP */
- sep_send_command_handler(sep);
- edbg("SEP Driver: after sep_send_command_handler\n");
- break;
- case SEP_IOCSENDSEPRPLYCOMMAND:
- /* send reply command to SEP */
- sep_send_reply_command_handler(sep);
- break;
- case SEP_IOCALLOCDATAPOLL:
- /* allocate data pool */
- error = sep_allocate_data_pool_memory_handler(sep, arg);
- break;
- case SEP_IOCWRITEDATAPOLL:
- /* write data into memory pool */
- error = sep_write_into_data_pool_handler(sep, arg);
- break;
- case SEP_IOCREADDATAPOLL:
- /* read data from data pool into application memory */
- error = sep_read_from_data_pool_handler(sep, arg);
- break;
- case SEP_IOCCREATESYMDMATABLE:
- /* create dma table for synhronic operation */
- error = sep_create_sync_dma_tables_handler(sep, arg);
- break;
- case SEP_IOCCREATEFLOWDMATABLE:
- /* create flow dma tables */
- error = sep_create_flow_dma_tables_handler(sep, arg);
- break;
- case SEP_IOCFREEDMATABLEDATA:
- /* free the pages */
- error = sep_free_dma_table_data_handler(sep);
- break;
- case SEP_IOCSETFLOWID:
- /* set flow id */
- error = sep_set_flow_id_handler(sep, (unsigned long)arg);
- break;
- case SEP_IOCADDFLOWTABLE:
- /* add tables to the dynamic flow */
- error = sep_add_flow_tables_handler(sep, arg);
- break;
- case SEP_IOCADDFLOWMESSAGE:
- /* add message of add tables to flow */
- error = sep_add_flow_tables_message_handler(sep, arg);
- break;
- case SEP_IOCSEPSTART:
- /* start command to sep */
- error = sep_start_handler(sep);
- break;
- case SEP_IOCSEPINIT:
- /* init command to sep */
- error = sep_init_handler(sep, arg);
- break;
- case SEP_IOCGETSTATICPOOLADDR:
- /* get the physical and virtual addresses of the static pool */
- error = sep_get_static_pool_addr_handler(sep, arg);
- break;
- case SEP_IOCENDTRANSACTION:
- error = sep_end_transaction_handler(sep, arg);
- break;
- case SEP_IOCREALLOCCACHERES:
- error = sep_realloc_cache_resident_handler(sep, arg);
- break;
- case SEP_IOCGETMAPPEDADDROFFSET:
- error = sep_get_physical_mapped_offset_handler(sep, arg);
- break;
- case SEP_IOCGETIME:
- error = sep_get_time_handler(sep, arg);
- break;
- default:
- error = -ENOTTY;
- break;
- }
- dbg("SEP Driver:<-------- ioctl end\n");
- return error;
-}
-
-
-
-#if !SEP_DRIVER_POLLING_MODE
-
-/* handler for flow done interrupt */
-
-static void sep_flow_done_handler(struct work_struct *work)
-{
- struct sep_flow_context_t *flow_data_ptr;
-
- /* obtain the mutex */
- mutex_lock(&sep_mutex);
-
- /* get the pointer to context */
- flow_data_ptr = (struct sep_flow_context_t *) work;
-
- /* free all the current input tables in sep */
- sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
-
- /* free all the current tables output tables in SEP (if needed) */
- if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
- sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
-
- /* check if we have additional tables to be sent to SEP only input
- flag may be checked */
- if (flow_data_ptr->input_tables_flag) {
- /* copy the message to the shared RAM and signal SEP */
- memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
-
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
- }
- mutex_unlock(&sep_mutex);
-}
-/*
- interrupt handler function
-*/
-static irqreturn_t sep_inthandler(int irq, void *dev_id)
-{
- irqreturn_t int_error;
- unsigned long reg_val;
- unsigned long flow_id;
- struct sep_flow_context_t *flow_context_ptr;
- struct sep_device *sep = dev_id;
-
- int_error = IRQ_HANDLED;
-
- /* read the IRR register to check if this is SEP interrupt */
- reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
- edbg("SEP Interrupt - reg is %08lx\n", reg_val);
-
- /* check if this is the flow interrupt */
- if (0 /*reg_val & (0x1 << 11) */ ) {
- /* read GPRO to find out the which flow is done */
- flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
-
- /* find the contex of the flow */
- flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
- if (flow_context_ptr == NULL)
- goto end_function_with_error;
-
- /* queue the work */
- INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
- queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
-
- } else {
- /* check if this is reply interrupt from SEP */
- if (reg_val & (0x1 << 13)) {
- /* update the counter of reply messages */
- sep->reply_ct++;
- /* wake up the waiting process */
- wake_up(&sep_event);
- } else {
- int_error = IRQ_NONE;
- goto end_function;
- }
- }
-end_function_with_error:
- /* clear the interrupt */
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
-end_function:
- return int_error;
-}
-
-#endif
-
-
-
-#if 0
-
-static void sep_wait_busy(struct sep_device *sep)
-{
- u32 reg;
-
- do {
- reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
- } while (reg);
-}
-
-/*
- PATCH for configuring the DMA to single burst instead of multi-burst
-*/
-static void sep_configure_dma_burst(struct sep_device *sep)
-{
-#define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
-
- /* request access to registers from SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
-
- sep_wait_busy(sep);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
-
- /* set the DMA burst register to single burst */
- sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
-
- /* release the sep busy */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
- sep_wait_busy(sep);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
-
-}
-
-#endif
-
-/*
- Function that is activated on the successful probe of the SEP device
-*/
-static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int error = 0;
- struct sep_device *sep;
- int counter;
- int size; /* size of memory for allocation */
-
- edbg("Sep pci probe starting\n");
- if (sep_dev != NULL) {
- dev_warn(&pdev->dev, "only one SEP supported.\n");
- return -EBUSY;
- }
-
- /* enable the device */
- error = pci_enable_device(pdev);
- if (error) {
- edbg("error enabling pci device\n");
- goto end_function;
- }
-
- /* set the pci dev pointer */
- sep_dev = &sep_instance;
- sep = &sep_instance;
-
- edbg("sep->shared_addr = %p\n", sep->shared_addr);
- /* transaction counter that coordinates the transactions between SEP
- and HOST */
- sep->send_ct = 0;
- /* counter for the messages from sep */
- sep->reply_ct = 0;
- /* counter for the number of bytes allocated in the pool
- for the current transaction */
- sep->data_pool_bytes_allocated = 0;
-
- /* calculate the total size for allocation */
- size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
-
- /* allocate the shared area */
- if (sep_map_and_alloc_shared_area(sep, size)) {
- error = -ENOMEM;
- /* allocation failed */
- goto end_function_error;
- }
- /* now set the memory regions */
-#if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
- /* Note: this test section will need moving before it could ever
- work as the registers are not yet mapped ! */
- /* send the new SHARED MESSAGE AREA to the SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
-
- /* poll for SEP response */
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
- while (retval != 0xffffffff && retval != sep->shared_bus)
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-
- /* check the return value (register) */
- if (retval != sep->shared_bus) {
- error = -ENOMEM;
- goto end_function_deallocate_sep_shared_area;
- }
-#endif
- /* init the flow contextes */
- for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
- sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
-
- sep->flow_wq = create_singlethread_workqueue("sepflowwq");
- if (sep->flow_wq == NULL) {
- error = -ENOMEM;
- edbg("sep_driver:flow queue creation failed\n");
- goto end_function_deallocate_sep_shared_area;
- }
- edbg("SEP Driver: create flow workqueue \n");
- sep->pdev = pci_dev_get(pdev);
-
- sep->reg_addr = pci_ioremap_bar(pdev, 0);
- if (!sep->reg_addr) {
- edbg("sep: ioremap of registers failed.\n");
- goto end_function_deallocate_sep_shared_area;
- }
- edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
-
- /* load the rom code */
- sep_load_rom_code(sep);
-
- /* set up system base address and shared memory location */
- sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
- 2 * SEP_RAR_IO_MEM_REGION_SIZE,
- &sep->rar_bus, GFP_KERNEL);
-
- if (!sep->rar_addr) {
- edbg("SEP Driver:can't allocate rar\n");
- goto end_function_uniomap;
- }
-
-
- edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
- edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
-
-#if !SEP_DRIVER_POLLING_MODE
-
- edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
-
- /* clear ICR register */
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
-
- /* set the IMR register - open only GPR 2 */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
- edbg("SEP Driver: about to call request_irq\n");
- /* get the interrupt line */
- error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
- if (error)
- goto end_function_free_res;
- return 0;
- edbg("SEP Driver: about to write IMR REG_ADDR");
-
- /* set the IMR register - open only GPR 2 */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
-end_function_free_res:
- dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
- sep->rar_addr, sep->rar_bus);
-#endif /* SEP_DRIVER_POLLING_MODE */
-end_function_uniomap:
- iounmap(sep->reg_addr);
-end_function_deallocate_sep_shared_area:
- /* de-allocate shared area */
- sep_unmap_and_free_shared_area(sep, size);
-end_function_error:
- sep_dev = NULL;
-end_function:
- return error;
-}
-
-static const struct pci_device_id sep_pci_id_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
-
-/* field for registering driver to PCI device */
-static struct pci_driver sep_pci_driver = {
- .name = "sep_sec_driver",
- .id_table = sep_pci_id_tbl,
- .probe = sep_probe
- /* FIXME: remove handler */
-};
-
-/* major and minor device numbers */
-static dev_t sep_devno;
-
-/* the files operations structure of the driver */
-static struct file_operations sep_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_ioctl,
- .poll = sep_poll,
- .open = sep_open,
- .release = sep_release,
- .mmap = sep_mmap,
-};
-
-
-/* cdev struct of the driver */
-static struct cdev sep_cdev;
-
-/*
- this function registers the driver to the file system
-*/
-static int sep_register_driver_to_fs(void)
-{
- int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
- if (ret_val) {
- edbg("sep: major number allocation failed, retval is %d\n",
- ret_val);
- return ret_val;
- }
- /* init cdev */
- cdev_init(&sep_cdev, &sep_file_operations);
- sep_cdev.owner = THIS_MODULE;
-
- /* register the driver with the kernel */
- ret_val = cdev_add(&sep_cdev, sep_devno, 1);
- if (ret_val) {
- edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
- }
- return ret_val;
-}
-
-
-/*--------------------------------------------------------------
- init function
-----------------------------------------------------------------*/
-static int __init sep_init(void)
-{
- int ret_val = 0;
- dbg("SEP Driver:-------->Init start\n");
- /* FIXME: Probe can occur before we are ready to survive a probe */
- ret_val = pci_register_driver(&sep_pci_driver);
- if (ret_val) {
- edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
- goto end_function_unregister_from_fs;
- }
- /* register driver to fs */
- ret_val = sep_register_driver_to_fs();
- if (ret_val)
- goto end_function_unregister_pci;
- goto end_function;
-end_function_unregister_pci:
- pci_unregister_driver(&sep_pci_driver);
-end_function_unregister_from_fs:
- /* unregister from fs */
- cdev_del(&sep_cdev);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
-end_function:
- dbg("SEP Driver:<-------- Init end\n");
- return ret_val;
-}
-
-
-/*-------------------------------------------------------------
- exit function
---------------------------------------------------------------*/
-static void __exit sep_exit(void)
-{
- int size;
-
- dbg("SEP Driver:--------> Exit start\n");
-
- /* unregister from fs */
- cdev_del(&sep_cdev);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
- /* calculate the total size for de-allocation */
- size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
- /* FIXME: We need to do this in the unload for the device */
- /* free shared area */
- if (sep_dev) {
- sep_unmap_and_free_shared_area(sep_dev, size);
- edbg("SEP Driver: free pages SEP SHARED AREA \n");
- iounmap((void *) sep_dev->reg_addr);
- edbg("SEP Driver: iounmap \n");
- }
- edbg("SEP Driver: release_mem_region \n");
- dbg("SEP Driver:<-------- Exit end\n");
-}
-
-
-module_init(sep_init);
-module_exit(sep_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
deleted file mode 100644
index 383543d97f9..00000000000
--- a/drivers/staging/sep/sep_driver_api.h
+++ /dev/null
@@ -1,425 +0,0 @@
-/*
- *
- * sep_driver_api.h - Security Processor Driver api definitions
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#ifndef __SEP_DRIVER_API_H__
-#define __SEP_DRIVER_API_H__
-
-
-
-/*----------------------------------------------------------------
- IOCTL command defines
- -----------------------------------------------------------------*/
-
-/* magic number 1 of the sep IOCTL command */
-#define SEP_IOC_MAGIC_NUMBER 's'
-
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 0)
-
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPRPLYCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 1)
-
-/* allocate memory in data pool */
-#define SEP_IOCALLOCDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 2)
-
-/* write to pre-allocated memory in data pool */
-#define SEP_IOCWRITEDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 3)
-
-/* read from pre-allocated memory in data pool */
-#define SEP_IOCREADDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 4)
-
-/* create sym dma lli tables */
-#define SEP_IOCCREATESYMDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 5)
-
-/* create flow dma lli tables */
-#define SEP_IOCCREATEFLOWDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 6)
-
-/* free dynamic data aalocated during table creation */
-#define SEP_IOCFREEDMATABLEDATA _IO(SEP_IOC_MAGIC_NUMBER , 7)
-
-/* get the static pool area addersses (physical and virtual) */
-#define SEP_IOCGETSTATICPOOLADDR _IO(SEP_IOC_MAGIC_NUMBER , 8)
-
-/* set flow id command */
-#define SEP_IOCSETFLOWID _IO(SEP_IOC_MAGIC_NUMBER , 9)
-
-/* add tables to the dynamic flow */
-#define SEP_IOCADDFLOWTABLE _IO(SEP_IOC_MAGIC_NUMBER , 10)
-
-/* add flow add tables message */
-#define SEP_IOCADDFLOWMESSAGE _IO(SEP_IOC_MAGIC_NUMBER , 11)
-
-/* start sep command */
-#define SEP_IOCSEPSTART _IO(SEP_IOC_MAGIC_NUMBER , 12)
-
-/* init sep command */
-#define SEP_IOCSEPINIT _IO(SEP_IOC_MAGIC_NUMBER , 13)
-
-/* end transaction command */
-#define SEP_IOCENDTRANSACTION _IO(SEP_IOC_MAGIC_NUMBER , 15)
-
-/* reallocate cache and resident */
-#define SEP_IOCREALLOCCACHERES _IO(SEP_IOC_MAGIC_NUMBER , 16)
-
-/* get the offset of the address starting from the beginnnig of the map area */
-#define SEP_IOCGETMAPPEDADDROFFSET _IO(SEP_IOC_MAGIC_NUMBER , 17)
-
-/* get time address and value */
-#define SEP_IOCGETIME _IO(SEP_IOC_MAGIC_NUMBER , 19)
-
-/*-------------------------------------------
- TYPEDEFS
-----------------------------------------------*/
-
-/*
- init command struct
-*/
-struct sep_driver_init_t {
- /* start of the 1G of the host memory address that SEP can access */
- unsigned long message_addr;
-
- /* start address of resident */
- unsigned long message_size_in_words;
-
-};
-
-
-/*
- realloc cache resident command
-*/
-struct sep_driver_realloc_cache_resident_t {
- /* new cache address */
- u64 new_cache_addr;
- /* new resident address */
- u64 new_resident_addr;
- /* new resident address */
- u64 new_shared_area_addr;
- /* new base address */
- u64 new_base_addr;
-};
-
-struct sep_driver_alloc_t {
- /* virtual address of allocated space */
- unsigned long offset;
-
- /* physical address of allocated space */
- unsigned long phys_address;
-
- /* number of bytes to allocate */
- unsigned long num_bytes;
-};
-
-/*
- */
-struct sep_driver_write_t {
- /* application space address */
- unsigned long app_address;
-
- /* address of the data pool */
- unsigned long datapool_address;
-
- /* number of bytes to write */
- unsigned long num_bytes;
-};
-
-/*
- */
-struct sep_driver_read_t {
- /* application space address */
- unsigned long app_address;
-
- /* address of the data pool */
- unsigned long datapool_address;
-
- /* number of bytes to read */
- unsigned long num_bytes;
-};
-
-/*
-*/
-struct sep_driver_build_sync_table_t {
- /* address value of the data in */
- unsigned long app_in_address;
-
- /* size of data in */
- unsigned long data_in_size;
-
- /* address of the data out */
- unsigned long app_out_address;
-
- /* the size of the block of the operation - if needed,
- every table will be modulo this parameter */
- unsigned long block_size;
-
- /* the physical address of the first input DMA table */
- unsigned long in_table_address;
-
- /* number of entries in the first input DMA table */
- unsigned long in_table_num_entries;
-
- /* the physical address of the first output DMA table */
- unsigned long out_table_address;
-
- /* number of entries in the first output DMA table */
- unsigned long out_table_num_entries;
-
- /* data in the first input table */
- unsigned long table_data_size;
-
- /* distinct user/kernel layout */
- bool isKernelVirtualAddress;
-
-};
-
-/*
-*/
-struct sep_driver_build_flow_table_t {
- /* flow type */
- unsigned long flow_type;
-
- /* flag for input output */
- unsigned long input_output_flag;
-
- /* address value of the data in */
- unsigned long virt_buff_data_addr;
-
- /* size of data in */
- unsigned long num_virtual_buffers;
-
- /* the physical address of the first input DMA table */
- unsigned long first_table_addr;
-
- /* number of entries in the first input DMA table */
- unsigned long first_table_num_entries;
-
- /* data in the first input table */
- unsigned long first_table_data_size;
-
- /* distinct user/kernel layout */
- bool isKernelVirtualAddress;
-};
-
-
-struct sep_driver_add_flow_table_t {
- /* flow id */
- unsigned long flow_id;
-
- /* flag for input output */
- unsigned long inputOutputFlag;
-
- /* address value of the data in */
- unsigned long virt_buff_data_addr;
-
- /* size of data in */
- unsigned long num_virtual_buffers;
-
- /* address of the first table */
- unsigned long first_table_addr;
-
- /* number of entries in the first table */
- unsigned long first_table_num_entries;
-
- /* data size of the first table */
- unsigned long first_table_data_size;
-
- /* distinct user/kernel layout */
- bool isKernelVirtualAddress;
-
-};
-
-/*
- command struct for set flow id
-*/
-struct sep_driver_set_flow_id_t {
- /* flow id to set */
- unsigned long flow_id;
-};
-
-
-/* command struct for add tables message */
-struct sep_driver_add_message_t {
- /* flow id to set */
- unsigned long flow_id;
-
- /* message size in bytes */
- unsigned long message_size_in_bytes;
-
- /* address of the message */
- unsigned long message_address;
-};
-
-/* command struct for static pool addresses */
-struct sep_driver_static_pool_addr_t {
- /* physical address of the static pool */
- unsigned long physical_static_address;
-
- /* virtual address of the static pool */
- unsigned long virtual_static_address;
-};
-
-/* command struct for getiing offset of the physical address from
- the start of the mapped area */
-struct sep_driver_get_mapped_offset_t {
- /* physical address of the static pool */
- unsigned long physical_address;
-
- /* virtual address of the static pool */
- unsigned long offset;
-};
-
-/* command struct for getting time value and address */
-struct sep_driver_get_time_t {
- /* physical address of stored time */
- unsigned long time_physical_address;
-
- /* value of the stored time */
- unsigned long time_value;
-};
-
-
-/*
- structure that represent one entry in the DMA LLI table
-*/
-struct sep_lli_entry_t {
- /* physical address */
- unsigned long physical_address;
-
- /* block size */
- unsigned long block_size;
-};
-
-/*
- structure that reperesents data needed for lli table construction
-*/
-struct sep_lli_prepare_table_data_t {
- /* pointer to the memory where the first lli entry to be built */
- struct sep_lli_entry_t *lli_entry_ptr;
-
- /* pointer to the array of lli entries from which the table is to be built */
- struct sep_lli_entry_t *lli_array_ptr;
-
- /* number of elements in lli array */
- int lli_array_size;
-
- /* number of entries in the created table */
- int num_table_entries;
-
- /* number of array entries processed during table creation */
- int num_array_entries_processed;
-
- /* the totatl data size in the created table */
- int lli_table_total_data_size;
-};
-
-/*
- structure that represent tone table - it is not used in code, jkust
- to show what table looks like
-*/
-struct sep_lli_table_t {
- /* number of pages mapped in this tables. If 0 - means that the table
- is not defined (used as a valid flag) */
- unsigned long num_pages;
- /*
- pointer to array of page pointers that represent the mapping of the
- virtual buffer defined by the table to the physical memory. If this
- pointer is NULL, it means that the table is not defined
- (used as a valid flag)
- */
- struct page **table_page_array_ptr;
-
- /* maximum flow entries in table */
- struct sep_lli_entry_t lli_entries[SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE];
-};
-
-
-/*
- structure for keeping the mapping of the virtual buffer into physical pages
-*/
-struct sep_flow_buffer_data {
- /* pointer to the array of page structs pointers to the pages of the
- virtual buffer */
- struct page **page_array_ptr;
-
- /* number of pages taken by the virtual buffer */
- unsigned long num_pages;
-
- /* this flag signals if this page_array is the last one among many that were
- sent in one setting to SEP */
- unsigned long last_page_array_flag;
-};
-
-/*
- struct that keeps all the data for one flow
-*/
-struct sep_flow_context_t {
- /*
- work struct for handling the flow done interrupt in the workqueue
- this structure must be in the first place, since it will be used
- forcasting to the containing flow context
- */
- struct work_struct flow_wq;
-
- /* flow id */
- unsigned long flow_id;
-
- /* additional input tables exists */
- unsigned long input_tables_flag;
-
- /* additional output tables exists */
- unsigned long output_tables_flag;
-
- /* data of the first input file */
- struct sep_lli_entry_t first_input_table;
-
- /* data of the first output table */
- struct sep_lli_entry_t first_output_table;
-
- /* last input table data */
- struct sep_lli_entry_t last_input_table;
-
- /* last output table data */
- struct sep_lli_entry_t last_output_table;
-
- /* first list of table */
- struct sep_lli_entry_t input_tables_in_process;
-
- /* output table in process (in sep) */
- struct sep_lli_entry_t output_tables_in_process;
-
- /* size of messages in bytes */
- unsigned long message_size_in_bytes;
-
- /* message */
- unsigned char message[SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES];
-};
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
deleted file mode 100644
index 6008fe5eca0..00000000000
--- a/drivers/staging/sep/sep_driver_config.h
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- *
- * sep_driver_config.h - Security Processor Driver configuration
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#ifndef __SEP_DRIVER_CONFIG_H__
-#define __SEP_DRIVER_CONFIG_H__
-
-
-/*--------------------------------------
- DRIVER CONFIGURATION FLAGS
- -------------------------------------*/
-
-/* if flag is on , then the driver is running in polling and
- not interrupt mode */
-#define SEP_DRIVER_POLLING_MODE 1
-
-/* flag which defines if the shared area address should be
- reconfiged (send to SEP anew) during init of the driver */
-#define SEP_DRIVER_RECONFIG_MESSAGE_AREA 0
-
-/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
-#define SEP_DRIVER_ARM_DEBUG_MODE 0
-
-/*-------------------------------------------
- INTERNAL DATA CONFIGURATION
- -------------------------------------------*/
-
-/* flag for the input array */
-#define SEP_DRIVER_IN_FLAG 0
-
-/* flag for output array */
-#define SEP_DRIVER_OUT_FLAG 1
-
-/* maximum number of entries in one LLI tables */
-#define SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP 8
-
-
-/*--------------------------------------------------------
- SHARED AREA memory total size is 36K
- it is divided is following:
-
- SHARED_MESSAGE_AREA 8K }
- }
- STATIC_POOL_AREA 4K } MAPPED AREA ( 24 K)
- }
- DATA_POOL_AREA 12K }
-
- SYNCHRONIC_DMA_TABLES_AREA 5K
-
- FLOW_DMA_TABLES_AREA 4K
-
- SYSTEM_MEMORY_AREA 3k
-
- SYSTEM_MEMORY total size is 3k
- it is divided as following:
-
- TIME_MEMORY_AREA 8B
------------------------------------------------------------*/
-
-
-
-/*
- the maximum length of the message - the rest of the message shared
- area will be dedicated to the dma lli tables
-*/
-#define SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES (8 * 1024)
-
-/* the size of the message shared area in pages */
-#define SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES (8 * 1024)
-
-/* the size of the data pool static area in pages */
-#define SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES (4 * 1024)
-
-/* the size of the data pool shared area size in pages */
-#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (12 * 1024)
-
-/* the size of the message shared area in pages */
-#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 5)
-
-
-/* the size of the data pool shared area size in pages */
-#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4)
-
-/* system data (time, caller id etc') pool */
-#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES 100
-
-
-/* area size that is mapped - we map the MESSAGE AREA, STATIC POOL and
- DATA POOL areas. area must be module 4k */
-#define SEP_DRIVER_MMMAP_AREA_SIZE (1024 * 24)
-
-
-/*-----------------------------------------------
- offsets of the areas starting from the shared area start address
-*/
-
-/* message area offset */
-#define SEP_DRIVER_MESSAGE_AREA_OFFSET_IN_BYTES 0
-
-/* static pool area offset */
-#define SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES \
- (SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
-
-/* data pool area offset */
-#define SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES \
- (SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES + \
- SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES)
-
-/* synhronic dma tables area offset */
-#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES \
- (SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + \
- SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)
-
-/* sep driver flow dma tables area offset */
-#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES \
- (SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
- SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES)
-
-/* system memory offset in bytes */
-#define SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES \
- (SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
- SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES)
-
-/* offset of the time area */
-#define SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES \
- (SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES)
-
-
-
-/* start physical address of the SEP registers memory in HOST */
-#define SEP_IO_MEM_REGION_START_ADDRESS 0x80000000
-
-/* size of the SEP registers memory region in HOST (for now 100 registers) */
-#define SEP_IO_MEM_REGION_SIZE (2 * 0x100000)
-
-/* define the number of IRQ for SEP interrupts */
-#define SEP_DIRVER_IRQ_NUM 1
-
-/* maximum number of add buffers */
-#define SEP_MAX_NUM_ADD_BUFFERS 100
-
-/* number of flows */
-#define SEP_DRIVER_NUM_FLOWS 4
-
-/* maximum number of entries in flow table */
-#define SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE 25
-
-/* offset of the num entries in the block length entry of the LLI */
-#define SEP_NUM_ENTRIES_OFFSET_IN_BITS 24
-
-/* offset of the interrupt flag in the block length entry of the LLI */
-#define SEP_INT_FLAG_OFFSET_IN_BITS 31
-
-/* mask for extracting data size from LLI */
-#define SEP_TABLE_DATA_SIZE_MASK 0xFFFFFF
-
-/* mask for entries after being shifted left */
-#define SEP_NUM_ENTRIES_MASK 0x7F
-
-/* default flow id */
-#define SEP_FREE_FLOW_ID 0xFFFFFFFF
-
-/* temp flow id used during cretiong of new flow until receiving
- real flow id from sep */
-#define SEP_TEMP_FLOW_ID (SEP_DRIVER_NUM_FLOWS + 1)
-
-/* maximum add buffers message length in bytes */
-#define SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES (7 * 4)
-
-/* maximum number of concurrent virtual buffers */
-#define SEP_MAX_VIRT_BUFFERS_CONCURRENT 100
-
-/* the token that defines the start of time address */
-#define SEP_TIME_VAL_TOKEN 0x12345678
-
-/* DEBUG LEVEL MASKS */
-#define SEP_DEBUG_LEVEL_BASIC 0x1
-
-#define SEP_DEBUG_LEVEL_EXTENDED 0x4
-
-
-/* Debug helpers */
-
-#define dbg(fmt, args...) \
-do {\
- if (debug & SEP_DEBUG_LEVEL_BASIC) \
- printk(KERN_DEBUG fmt, ##args); \
-} while(0);
-
-#define edbg(fmt, args...) \
-do { \
- if (debug & SEP_DEBUG_LEVEL_EXTENDED) \
- printk(KERN_DEBUG fmt, ##args); \
-} while(0);
-
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
deleted file mode 100644
index ea6abd8a14b..00000000000
--- a/drivers/staging/sep/sep_driver_hw_defs.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- *
- * sep_driver_hw_defs.h - Security Processor Driver hardware definitions
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#ifndef SEP_DRIVER_HW_DEFS__H
-#define SEP_DRIVER_HW_DEFS__H
-
-/*--------------------------------------------------------------------------*/
-/* Abstract: HW Registers Defines. */
-/* */
-/* Note: This file was automatically created !!! */
-/* DO NOT EDIT THIS FILE !!! */
-/*--------------------------------------------------------------------------*/
-
-
-/* cf registers */
-#define HW_R0B_ADDR_0_REG_ADDR 0x0000UL
-#define HW_R0B_ADDR_1_REG_ADDR 0x0004UL
-#define HW_R0B_ADDR_2_REG_ADDR 0x0008UL
-#define HW_R0B_ADDR_3_REG_ADDR 0x000cUL
-#define HW_R0B_ADDR_4_REG_ADDR 0x0010UL
-#define HW_R0B_ADDR_5_REG_ADDR 0x0014UL
-#define HW_R0B_ADDR_6_REG_ADDR 0x0018UL
-#define HW_R0B_ADDR_7_REG_ADDR 0x001cUL
-#define HW_R0B_ADDR_8_REG_ADDR 0x0020UL
-#define HW_R2B_ADDR_0_REG_ADDR 0x0080UL
-#define HW_R2B_ADDR_1_REG_ADDR 0x0084UL
-#define HW_R2B_ADDR_2_REG_ADDR 0x0088UL
-#define HW_R2B_ADDR_3_REG_ADDR 0x008cUL
-#define HW_R2B_ADDR_4_REG_ADDR 0x0090UL
-#define HW_R2B_ADDR_5_REG_ADDR 0x0094UL
-#define HW_R2B_ADDR_6_REG_ADDR 0x0098UL
-#define HW_R2B_ADDR_7_REG_ADDR 0x009cUL
-#define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL
-#define HW_R3B_REG_ADDR 0x00C0UL
-#define HW_R4B_REG_ADDR 0x0100UL
-#define HW_CSA_ADDR_0_REG_ADDR 0x0140UL
-#define HW_CSA_ADDR_1_REG_ADDR 0x0144UL
-#define HW_CSA_ADDR_2_REG_ADDR 0x0148UL
-#define HW_CSA_ADDR_3_REG_ADDR 0x014cUL
-#define HW_CSA_ADDR_4_REG_ADDR 0x0150UL
-#define HW_CSA_ADDR_5_REG_ADDR 0x0154UL
-#define HW_CSA_ADDR_6_REG_ADDR 0x0158UL
-#define HW_CSA_ADDR_7_REG_ADDR 0x015cUL
-#define HW_CSA_ADDR_8_REG_ADDR 0x0160UL
-#define HW_CSA_REG_ADDR 0x0140UL
-#define HW_SINB_REG_ADDR 0x0180UL
-#define HW_SOUTB_REG_ADDR 0x0184UL
-#define HW_PKI_CONTROL_REG_ADDR 0x01C0UL
-#define HW_PKI_STATUS_REG_ADDR 0x01C4UL
-#define HW_PKI_BUSY_REG_ADDR 0x01C8UL
-#define HW_PKI_A_1025_REG_ADDR 0x01CCUL
-#define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL
-#define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL
-#define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL
-#define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL
-#define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL
-#define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL
-#define HW_PKI_CLR_REG_ADDR 0x01E8UL
-#define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL
-#define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL
-#define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL
-#define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL
-#define HW_DES_KEY_0_REG_ADDR 0x0208UL
-#define HW_DES_KEY_1_REG_ADDR 0x020CUL
-#define HW_DES_KEY_2_REG_ADDR 0x0210UL
-#define HW_DES_KEY_3_REG_ADDR 0x0214UL
-#define HW_DES_KEY_4_REG_ADDR 0x0218UL
-#define HW_DES_KEY_5_REG_ADDR 0x021CUL
-#define HW_DES_CONTROL_0_REG_ADDR 0x0220UL
-#define HW_DES_CONTROL_1_REG_ADDR 0x0224UL
-#define HW_DES_IV_0_REG_ADDR 0x0228UL
-#define HW_DES_IV_1_REG_ADDR 0x022CUL
-#define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL
-#define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL
-#define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL
-#define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL
-#define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL
-#define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL
-#define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL
-#define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL
-#define HW_AES_KEY_0_REG_ADDR 0x0400UL
-#define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL
-#define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL
-#define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL
-#define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL
-#define HW_AES_IV_0_REG_ADDR 0x0440UL
-#define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL
-#define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL
-#define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL
-#define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL
-#define HW_AES_CTR1_REG_ADDR 0x0460UL
-#define HW_AES_SK_REG_ADDR 0x0478UL
-#define HW_AES_MAC_OK_REG_ADDR 0x0480UL
-#define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL
-#define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL
-#define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL
-#define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL
-#define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL
-#define HW_AES_CONTROL_REG_ADDR 0x04C0UL
-#define HW_HASH_H0_REG_ADDR 0x0640UL
-#define HW_HASH_H1_REG_ADDR 0x0644UL
-#define HW_HASH_H2_REG_ADDR 0x0648UL
-#define HW_HASH_H3_REG_ADDR 0x064CUL
-#define HW_HASH_H4_REG_ADDR 0x0650UL
-#define HW_HASH_H5_REG_ADDR 0x0654UL
-#define HW_HASH_H6_REG_ADDR 0x0658UL
-#define HW_HASH_H7_REG_ADDR 0x065CUL
-#define HW_HASH_H8_REG_ADDR 0x0660UL
-#define HW_HASH_H9_REG_ADDR 0x0664UL
-#define HW_HASH_H10_REG_ADDR 0x0668UL
-#define HW_HASH_H11_REG_ADDR 0x066CUL
-#define HW_HASH_H12_REG_ADDR 0x0670UL
-#define HW_HASH_H13_REG_ADDR 0x0674UL
-#define HW_HASH_H14_REG_ADDR 0x0678UL
-#define HW_HASH_H15_REG_ADDR 0x067CUL
-#define HW_HASH_CONTROL_REG_ADDR 0x07C0UL
-#define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL
-#define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL
-#define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL
-#define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL
-#define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL
-#define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL
-#define HW_HASH_PARAM_REG_ADDR 0x07DCUL
-#define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL
-#define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL
-#define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL
-#define HW_HASH_DATA_REG_ADDR 0x07ECUL
-#define HW_DRNG_CONTROL_REG_ADDR 0x0800UL
-#define HW_DRNG_VALID_REG_ADDR 0x0804UL
-#define HW_DRNG_DATA_REG_ADDR 0x0808UL
-#define HW_RND_SRC_EN_REG_ADDR 0x080CUL
-#define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL
-#define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL
-#define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL
-#define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL
-#define HW_CLK_STATUS_REG_ADDR 0x0824UL
-#define HW_CLK_ENABLE_REG_ADDR 0x0828UL
-#define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL
-#define HW_RND_SRC_CTL_REG_ADDR 0x0858UL
-#define HW_CRYPTO_CTL_REG_ADDR 0x0900UL
-#define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL
-#define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL
-#define HW_AES_BUSY_REG_ADDR 0x0914UL
-#define HW_DES_BUSY_REG_ADDR 0x0918UL
-#define HW_HASH_BUSY_REG_ADDR 0x091CUL
-#define HW_CONTENT_REG_ADDR 0x0924UL
-#define HW_VERSION_REG_ADDR 0x0928UL
-#define HW_CONTEXT_ID_REG_ADDR 0x0930UL
-#define HW_DIN_BUFFER_REG_ADDR 0x0C00UL
-#define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL
-#define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL
-#define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL
-#define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL
-#define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL
-#define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL
-#define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL
-#define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL
-#define HW_OLD_DATA_REG_ADDR 0x0C48UL
-#define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL
-#define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL
-#define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL
-#define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL
-#define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL
-#define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL
-#define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL
-#define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL
-#define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL
-#define HW_READ_ALIGN_REG_ADDR 0x0D3CUL
-#define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL
-#define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL
-#define HW_AHB_SINGLE_REG_ADDR 0x0E00UL
-#define HW_SRAM_DATA_REG_ADDR 0x0F00UL
-#define HW_SRAM_ADDR_REG_ADDR 0x0F04UL
-#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
-#define HW_HOST_IRR_REG_ADDR 0x0A00UL
-#define HW_HOST_IMR_REG_ADDR 0x0A04UL
-#define HW_HOST_ICR_REG_ADDR 0x0A08UL
-#define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL
-#define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL
-#define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL
-#define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL
-#define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL
-#define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL
-#define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL
-#define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL
-#define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL
-#define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL
-#define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL
-#define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL
-#define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL
-#define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL
-#define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL
-#define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL
-#define HW_HOST_SEP_HOST_GPR3_REG_ADDR 0x0B0CUL
-#define HW_HOST_HOST_SEP_GPR0_REG_ADDR 0x0B80UL
-#define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL
-#define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL
-#define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL
-#define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL
-#define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL
-#define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL
-#define HW_CC_SRAM_BASE_ADDRESS 0x5800UL
-
-#endif /* ifndef HW_DEFS */
diff --git a/drivers/staging/slicoss/slic.h b/drivers/staging/slicoss/slic.h
index eb3a619c6a9..beab400805a 100644
--- a/drivers/staging/slicoss/slic.h
+++ b/drivers/staging/slicoss/slic.h
@@ -168,17 +168,6 @@ struct slic_cmdqueue {
struct slic_spinlock lock;
};
-#ifdef STATUS_SUCCESS
-#undef STATUS_SUCCESS
-#endif
-
-#define STATUS_SUCCESS 0
-#define STATUS_PENDING 0
-#define STATUS_FAILURE -1
-#define STATUS_ERROR -2
-#define STATUS_NOT_SUPPORTED -3
-#define STATUS_BUFFER_TOO_SHORT -4
-
#define SLIC_MAX_CARDS 32
#define SLIC_MAX_PORTS 4 /* Max # of ports per card */
@@ -510,7 +499,6 @@ struct adapter {
struct slic_ifevents if_events;
struct slic_stats inicstats_prev;
struct slicnet_stats slic_stats;
- struct net_device_stats stats;
};
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index bebf0fd2af8..f8c4b127e83 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -97,66 +97,6 @@
#include "slichw.h"
#include "slic.h"
-static struct net_device_stats *slic_get_stats(struct net_device *dev);
-static int slic_entry_open(struct net_device *dev);
-static int slic_entry_halt(struct net_device *dev);
-static int slic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static netdev_tx_t slic_xmit_start(struct sk_buff *skb, struct net_device *dev);
-static void slic_xmit_fail(struct adapter *adapter, struct sk_buff *skb,
- void *cmd, u32 skbtype, u32 status);
-static void slic_config_pci(struct pci_dev *pcidev);
-static struct sk_buff *slic_rcvqueue_getnext(struct adapter *adapter);
-static int slic_mac_set_address(struct net_device *dev, void *ptr);
-static void slic_link_event_handler(struct adapter *adapter);
-static void slic_upr_request_complete(struct adapter *adapter, u32 isr);
-static int slic_rspqueue_init(struct adapter *adapter);
-static void slic_rspqueue_free(struct adapter *adapter);
-static struct slic_rspbuf *slic_rspqueue_getnext(struct adapter *adapter);
-static int slic_cmdq_init(struct adapter *adapter);
-static void slic_cmdq_free(struct adapter *adapter);
-static void slic_cmdq_reset(struct adapter *adapter);
-static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page);
-static void slic_cmdq_getdone(struct adapter *adapter);
-static void slic_cmdq_putdone_irq(struct adapter *adapter,
- struct slic_hostcmd *cmd);
-static struct slic_hostcmd *slic_cmdq_getfree(struct adapter *adapter);
-static int slic_rcvqueue_init(struct adapter *adapter);
-static int slic_rcvqueue_fill(struct adapter *adapter);
-static u32 slic_rcvqueue_reinsert(struct adapter *adapter, struct sk_buff *skb);
-static void slic_rcvqueue_free(struct adapter *adapter);
-static void slic_adapter_set_hwaddr(struct adapter *adapter);
-static int slic_card_init(struct sliccard *card, struct adapter *adapter);
-static void slic_intagg_set(struct adapter *adapter, u32 value);
-static int slic_card_download(struct adapter *adapter);
-static u32 slic_card_locate(struct adapter *adapter);
-static int slic_if_init(struct adapter *adapter);
-static int slic_adapter_allocresources(struct adapter *adapter);
-static void slic_adapter_freeresources(struct adapter *adapter);
-static void slic_link_config(struct adapter *adapter, u32 linkspeed,
- u32 linkduplex);
-static void slic_unmap_mmio_space(struct adapter *adapter);
-static void slic_card_cleanup(struct sliccard *card);
-static void slic_soft_reset(struct adapter *adapter);
-static bool slic_mac_filter(struct adapter *adapter,
- struct ether_header *ether_frame);
-static void slic_mac_address_config(struct adapter *adapter);
-static void slic_mac_config(struct adapter *adapter);
-static void slic_mcast_set_mask(struct adapter *adapter);
-static void slic_config_set(struct adapter *adapter, bool linkchange);
-static void slic_config_clear(struct adapter *adapter);
-static void slic_config_get(struct adapter *adapter, u32 config,
- u32 configh);
-static void slic_timer_load_check(ulong context);
-static void slic_assert_fail(void);
-static ushort slic_eeprom_cksum(char *m, int len);
-static void slic_upr_start(struct adapter *adapter);
-static void slic_link_upr_complete(struct adapter *adapter, u32 Isr);
-static int slic_upr_request(struct adapter *adapter, u32 upr_request,
- u32 upr_data, u32 upr_data_h, u32 upr_buffer,
- u32 upr_buffer_h);
-static void slic_mcast_set_list(struct net_device *dev);
-
-
static uint slic_first_init = 1;
static char *slic_banner = "Alacritech SLIC Technology(tm) Server "\
"and Storage Accelerator (Non-Accelerated)";
@@ -206,6 +146,17 @@ MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
#undef ASSERT
#endif
+static void slic_assert_fail(void)
+{
+ u32 cpuid;
+ u32 curr_pid;
+ cpuid = smp_processor_id();
+ curr_pid = current->pid;
+
+ printk(KERN_ERR "%s CPU # %d ---- PID # %d\n",
+ __func__, cpuid, curr_pid);
+}
+
#ifndef ASSERT
#define ASSERT(a) do { \
if (!(a)) { \
@@ -241,13 +192,6 @@ MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
_adapter->handle_lock.flags); \
}
-static void slic_debug_init(void);
-static void slic_debug_cleanup(void);
-static void slic_debug_adapter_create(struct adapter *adapter);
-static void slic_debug_adapter_destroy(struct adapter *adapter);
-static void slic_debug_card_create(struct sliccard *card);
-static void slic_debug_card_destroy(struct sliccard *card);
-
static inline void slic_reg32_write(void __iomem *reg, u32 value, bool flush)
{
writel(value, reg);
@@ -272,1016 +216,6 @@ static inline void slic_reg64_write(struct adapter *adapter, void __iomem *reg,
adapter->bit64reglock.flags);
}
-static void slic_init_driver(void)
-{
- if (slic_first_init) {
- slic_first_init = 0;
- spin_lock_init(&slic_global.driver_lock.lock);
- slic_debug_init();
- }
-}
-
-static void slic_init_adapter(struct net_device *netdev,
- struct pci_dev *pcidev,
- const struct pci_device_id *pci_tbl_entry,
- void __iomem *memaddr, int chip_idx)
-{
- ushort index;
- struct slic_handle *pslic_handle;
- struct adapter *adapter = netdev_priv(netdev);
-
-/* adapter->pcidev = pcidev;*/
- adapter->vendid = pci_tbl_entry->vendor;
- adapter->devid = pci_tbl_entry->device;
- adapter->subsysid = pci_tbl_entry->subdevice;
- adapter->busnumber = pcidev->bus->number;
- adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
- adapter->functionnumber = (pcidev->devfn & 0x7);
- adapter->memorylength = pci_resource_len(pcidev, 0);
- adapter->slic_regs = (__iomem struct slic_regs *)memaddr;
- adapter->irq = pcidev->irq;
-/* adapter->netdev = netdev;*/
- adapter->next_netdevice = head_netdevice;
- head_netdevice = netdev;
- adapter->chipid = chip_idx;
- adapter->port = 0; /*adapter->functionnumber;*/
- adapter->cardindex = adapter->port;
- adapter->memorybase = memaddr;
- spin_lock_init(&adapter->upr_lock.lock);
- spin_lock_init(&adapter->bit64reglock.lock);
- spin_lock_init(&adapter->adapter_lock.lock);
- spin_lock_init(&adapter->reset_lock.lock);
- spin_lock_init(&adapter->handle_lock.lock);
-
- adapter->card_size = 1;
- /*
- Initialize slic_handle array
- */
- ASSERT(SLIC_CMDQ_MAXCMDS <= 0xFFFF);
- /*
- Start with 1. 0 is an invalid host handle.
- */
- for (index = 1, pslic_handle = &adapter->slic_handles[1];
- index < SLIC_CMDQ_MAXCMDS; index++, pslic_handle++) {
-
- pslic_handle->token.handle_index = index;
- pslic_handle->type = SLIC_HANDLE_FREE;
- pslic_handle->next = adapter->pfree_slic_handles;
- adapter->pfree_slic_handles = pslic_handle;
- }
- adapter->pshmem = (struct slic_shmem *)
- pci_alloc_consistent(adapter->pcidev,
- sizeof(struct slic_shmem),
- &adapter->
- phys_shmem);
- ASSERT(adapter->pshmem);
-
- memset(adapter->pshmem, 0, sizeof(struct slic_shmem));
-
- return;
-}
-
-static const struct net_device_ops slic_netdev_ops = {
- .ndo_open = slic_entry_open,
- .ndo_stop = slic_entry_halt,
- .ndo_start_xmit = slic_xmit_start,
- .ndo_do_ioctl = slic_ioctl,
- .ndo_set_mac_address = slic_mac_set_address,
- .ndo_get_stats = slic_get_stats,
- .ndo_set_multicast_list = slic_mcast_set_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
-};
-
-static int __devinit slic_entry_probe(struct pci_dev *pcidev,
- const struct pci_device_id *pci_tbl_entry)
-{
- static int cards_found;
- static int did_version;
- int err = -ENODEV;
- struct net_device *netdev;
- struct adapter *adapter;
- void __iomem *memmapped_ioaddr = NULL;
- u32 status = 0;
- ulong mmio_start = 0;
- ulong mmio_len = 0;
- struct sliccard *card = NULL;
- int pci_using_dac = 0;
-
- slic_global.dynamic_intagg = dynamic_intagg;
-
- err = pci_enable_device(pcidev);
-
- if (err)
- return err;
-
- if (slic_debug > 0 && did_version++ == 0) {
- printk(KERN_DEBUG "%s\n", slic_banner);
- printk(KERN_DEBUG "%s\n", slic_proc_version);
- }
-
- if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
- dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for "
- "consistent allocations\n");
- goto err_out_disable_pci;
- }
- } else if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
- pci_using_dac = 0;
- pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
- } else {
- dev_err(&pcidev->dev, "no usable DMA configuration\n");
- goto err_out_disable_pci;
- }
-
- err = pci_request_regions(pcidev, DRV_NAME);
- if (err) {
- dev_err(&pcidev->dev, "can't obtain PCI resources\n");
- goto err_out_disable_pci;
- }
-
- pci_set_master(pcidev);
-
- netdev = alloc_etherdev(sizeof(struct adapter));
- if (!netdev) {
- err = -ENOMEM;
- goto err_out_exit_slic_probe;
- }
-
- SET_NETDEV_DEV(netdev, &pcidev->dev);
-
- pci_set_drvdata(pcidev, netdev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->pcidev = pcidev;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
-
- mmio_start = pci_resource_start(pcidev, 0);
- mmio_len = pci_resource_len(pcidev, 0);
-
-
-/* memmapped_ioaddr = (u32)ioremap_nocache(mmio_start, mmio_len);*/
- memmapped_ioaddr = ioremap(mmio_start, mmio_len);
- if (!memmapped_ioaddr) {
- dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n",
- mmio_len, mmio_start);
- goto err_out_free_netdev;
- }
-
- slic_config_pci(pcidev);
-
- slic_init_driver();
-
- slic_init_adapter(netdev,
- pcidev, pci_tbl_entry, memmapped_ioaddr, cards_found);
-
- status = slic_card_locate(adapter);
- if (status) {
- dev_err(&pcidev->dev, "cannot locate card\n");
- goto err_out_free_mmio_region;
- }
-
- card = adapter->card;
-
- if (!adapter->allocated) {
- card->adapters_allocated++;
- adapter->allocated = 1;
- }
-
- status = slic_card_init(card, adapter);
-
- if (status != STATUS_SUCCESS) {
- card->state = CARD_FAIL;
- adapter->state = ADAPT_FAIL;
- adapter->linkstate = LINK_DOWN;
- dev_err(&pcidev->dev, "FAILED status[%x]\n", status);
- } else {
- slic_adapter_set_hwaddr(adapter);
- }
-
- netdev->base_addr = (unsigned long)adapter->memorybase;
- netdev->irq = adapter->irq;
- netdev->netdev_ops = &slic_netdev_ops;
-
- slic_debug_adapter_create(adapter);
-
- strcpy(netdev->name, "eth%d");
- err = register_netdev(netdev);
- if (err) {
- dev_err(&pcidev->dev, "Cannot register net device, aborting.\n");
- goto err_out_unmap;
- }
-
- cards_found++;
-
- return status;
-
-err_out_unmap:
- iounmap(memmapped_ioaddr);
-err_out_free_mmio_region:
- release_mem_region(mmio_start, mmio_len);
-err_out_free_netdev:
- free_netdev(netdev);
-err_out_exit_slic_probe:
- pci_release_regions(pcidev);
-err_out_disable_pci:
- pci_disable_device(pcidev);
- return err;
-}
-
-static int slic_entry_open(struct net_device *dev)
-{
- struct adapter *adapter = netdev_priv(dev);
- struct sliccard *card = adapter->card;
- u32 locked = 0;
- int status;
-
- ASSERT(adapter);
- ASSERT(card);
-
- netif_stop_queue(adapter->netdev);
-
- spin_lock_irqsave(&slic_global.driver_lock.lock,
- slic_global.driver_lock.flags);
- locked = 1;
- if (!adapter->activated) {
- card->adapters_activated++;
- slic_global.num_slic_ports_active++;
- adapter->activated = 1;
- }
- status = slic_if_init(adapter);
-
- if (status != STATUS_SUCCESS) {
- if (adapter->activated) {
- card->adapters_activated--;
- slic_global.num_slic_ports_active--;
- adapter->activated = 0;
- }
- if (locked) {
- spin_unlock_irqrestore(&slic_global.driver_lock.lock,
- slic_global.driver_lock.flags);
- locked = 0;
- }
- return status;
- }
- if (!card->master)
- card->master = adapter;
-
- if (locked) {
- spin_unlock_irqrestore(&slic_global.driver_lock.lock,
- slic_global.driver_lock.flags);
- locked = 0;
- }
-
- return STATUS_SUCCESS;
-}
-
-static void __devexit slic_entry_remove(struct pci_dev *pcidev)
-{
- struct net_device *dev = pci_get_drvdata(pcidev);
- u32 mmio_start = 0;
- uint mmio_len = 0;
- struct adapter *adapter = netdev_priv(dev);
- struct sliccard *card;
- struct mcast_address *mcaddr, *mlist;
-
- ASSERT(adapter);
- slic_adapter_freeresources(adapter);
- slic_unmap_mmio_space(adapter);
- unregister_netdev(dev);
-
- mmio_start = pci_resource_start(pcidev, 0);
- mmio_len = pci_resource_len(pcidev, 0);
-
- release_mem_region(mmio_start, mmio_len);
-
- iounmap((void __iomem *)dev->base_addr);
- /* free multicast addresses */
- mlist = adapter->mcastaddrs;
- while (mlist) {
- mcaddr = mlist;
- mlist = mlist->next;
- kfree(mcaddr);
- }
- ASSERT(adapter->card);
- card = adapter->card;
- ASSERT(card->adapters_allocated);
- card->adapters_allocated--;
- adapter->allocated = 0;
- if (!card->adapters_allocated) {
- struct sliccard *curr_card = slic_global.slic_card;
- if (curr_card == card) {
- slic_global.slic_card = card->next;
- } else {
- while (curr_card->next != card)
- curr_card = curr_card->next;
- ASSERT(curr_card);
- curr_card->next = card->next;
- }
- ASSERT(slic_global.num_slic_cards);
- slic_global.num_slic_cards--;
- slic_card_cleanup(card);
- }
- kfree(dev);
- pci_release_regions(pcidev);
-}
-
-static int slic_entry_halt(struct net_device *dev)
-{
- struct adapter *adapter = netdev_priv(dev);
- struct sliccard *card = adapter->card;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
-
- spin_lock_irqsave(&slic_global.driver_lock.lock,
- slic_global.driver_lock.flags);
- ASSERT(card);
- netif_stop_queue(adapter->netdev);
- adapter->state = ADAPT_DOWN;
- adapter->linkstate = LINK_DOWN;
- adapter->upr_list = NULL;
- adapter->upr_busy = 0;
- adapter->devflags_prev = 0;
- ASSERT(card->adapter[adapter->cardindex] == adapter);
- slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
- adapter->all_reg_writes++;
- adapter->icr_reg_writes++;
- slic_config_clear(adapter);
- if (adapter->activated) {
- card->adapters_activated--;
- slic_global.num_slic_ports_active--;
- adapter->activated = 0;
- }
-#ifdef AUTOMATIC_RESET
- slic_reg32_write(&slic_regs->slic_reset_iface, 0, FLUSH);
-#endif
- /*
- * Reset the adapter's cmd queues
- */
- slic_cmdq_reset(adapter);
-
-#ifdef AUTOMATIC_RESET
- if (!card->adapters_activated)
- slic_card_init(card, adapter);
-#endif
-
- spin_unlock_irqrestore(&slic_global.driver_lock.lock,
- slic_global.driver_lock.flags);
- return STATUS_SUCCESS;
-}
-
-static int slic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct adapter *adapter = netdev_priv(dev);
- struct ethtool_cmd edata;
- struct ethtool_cmd ecmd;
- u32 data[7];
- u32 intagg;
-
- ASSERT(rq);
- switch (cmd) {
- case SIOCSLICSETINTAGG:
- if (copy_from_user(data, rq->ifr_data, 28))
- return -EFAULT;
- intagg = data[0];
- dev_err(&dev->dev, "%s: set interrupt aggregation to %d\n",
- __func__, intagg);
- slic_intagg_set(adapter, intagg);
- return 0;
-
-#ifdef SLIC_TRACE_DUMP_ENABLED
- case SIOCSLICTRACEDUMP:
- {
- u32 value;
- DBG_IOCTL("slic_ioctl SIOCSLIC_TRACE_DUMP\n");
-
- if (copy_from_user(data, rq->ifr_data, 28)) {
- PRINT_ERROR
- ("slic: copy_from_user FAILED getting initial simba param\n");
- return -EFAULT;
- }
-
- value = data[0];
- if (tracemon_request == SLIC_DUMP_DONE) {
- PRINT_ERROR
- ("ATK Diagnostic Trace Dump Requested\n");
- tracemon_request = SLIC_DUMP_REQUESTED;
- tracemon_request_type = value;
- tracemon_timestamp = jiffies;
- } else if ((tracemon_request == SLIC_DUMP_REQUESTED) ||
- (tracemon_request ==
- SLIC_DUMP_IN_PROGRESS)) {
- PRINT_ERROR
- ("ATK Diagnostic Trace Dump Requested but already in progress... ignore\n");
- } else {
- PRINT_ERROR
- ("ATK Diagnostic Trace Dump Requested\n");
- tracemon_request = SLIC_DUMP_REQUESTED;
- tracemon_request_type = value;
- tracemon_timestamp = jiffies;
- }
- return 0;
- }
-#endif
- case SIOCETHTOOL:
- ASSERT(adapter);
- if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd)))
- return -EFAULT;
-
- if (ecmd.cmd == ETHTOOL_GSET) {
- edata.supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_Autoneg | SUPPORTED_MII);
- edata.port = PORT_MII;
- edata.transceiver = XCVR_INTERNAL;
- edata.phy_address = 0;
- if (adapter->linkspeed == LINK_100MB)
- edata.speed = SPEED_100;
- else if (adapter->linkspeed == LINK_10MB)
- edata.speed = SPEED_10;
- else
- edata.speed = 0;
-
- if (adapter->linkduplex == LINK_FULLD)
- edata.duplex = DUPLEX_FULL;
- else
- edata.duplex = DUPLEX_HALF;
-
- edata.autoneg = AUTONEG_ENABLE;
- edata.maxtxpkt = 1;
- edata.maxrxpkt = 1;
- if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
- return -EFAULT;
-
- } else if (ecmd.cmd == ETHTOOL_SSET) {
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (adapter->linkspeed == LINK_100MB)
- edata.speed = SPEED_100;
- else if (adapter->linkspeed == LINK_10MB)
- edata.speed = SPEED_10;
- else
- edata.speed = 0;
-
- if (adapter->linkduplex == LINK_FULLD)
- edata.duplex = DUPLEX_FULL;
- else
- edata.duplex = DUPLEX_HALF;
-
- edata.autoneg = AUTONEG_ENABLE;
- edata.maxtxpkt = 1;
- edata.maxrxpkt = 1;
- if ((ecmd.speed != edata.speed) ||
- (ecmd.duplex != edata.duplex)) {
- u32 speed;
- u32 duplex;
-
- if (ecmd.speed == SPEED_10)
- speed = 0;
- else
- speed = PCR_SPEED_100;
- if (ecmd.duplex == DUPLEX_FULL)
- duplex = PCR_DUPLEX_FULL;
- else
- duplex = 0;
- slic_link_config(adapter, speed, duplex);
- slic_link_event_handler(adapter);
- }
- }
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-#define XMIT_FAIL_LINK_STATE 1
-#define XMIT_FAIL_ZERO_LENGTH 2
-#define XMIT_FAIL_HOSTCMD_FAIL 3
-
-static void slic_xmit_build_request(struct adapter *adapter,
- struct slic_hostcmd *hcmd, struct sk_buff *skb)
-{
- struct slic_host64_cmd *ihcmd;
- ulong phys_addr;
-
- ihcmd = &hcmd->cmd64;
-
- ihcmd->flags = (adapter->port << IHFLG_IFSHFT);
- ihcmd->command = IHCMD_XMT_REQ;
- ihcmd->u.slic_buffers.totlen = skb->len;
- phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr);
- ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr);
- ihcmd->u.slic_buffers.bufs[0].length = skb->len;
-#if defined(CONFIG_X86_64)
- hcmd->cmdsize = (u32) ((((u64)&ihcmd->u.slic_buffers.bufs[1] -
- (u64) hcmd) + 31) >> 5);
-#elif defined(CONFIG_X86)
- hcmd->cmdsize = ((((u32) &ihcmd->u.slic_buffers.bufs[1] -
- (u32) hcmd) + 31) >> 5);
-#else
- Stop Compilation;
-#endif
-}
-
-#define NORMAL_ETHFRAME 0
-
-static netdev_tx_t slic_xmit_start(struct sk_buff *skb, struct net_device *dev)
-{
- struct sliccard *card;
- struct adapter *adapter = netdev_priv(dev);
- struct slic_hostcmd *hcmd = NULL;
- u32 status = 0;
- u32 skbtype = NORMAL_ETHFRAME;
- void *offloadcmd = NULL;
-
- card = adapter->card;
- ASSERT(card);
- if ((adapter->linkstate != LINK_UP) ||
- (adapter->state != ADAPT_UP) || (card->state != CARD_UP)) {
- status = XMIT_FAIL_LINK_STATE;
- goto xmit_fail;
-
- } else if (skb->len == 0) {
- status = XMIT_FAIL_ZERO_LENGTH;
- goto xmit_fail;
- }
-
- if (skbtype == NORMAL_ETHFRAME) {
- hcmd = slic_cmdq_getfree(adapter);
- if (!hcmd) {
- adapter->xmitq_full = 1;
- status = XMIT_FAIL_HOSTCMD_FAIL;
- goto xmit_fail;
- }
- ASSERT(hcmd->pslic_handle);
- ASSERT(hcmd->cmd64.hosthandle ==
- hcmd->pslic_handle->token.handle_token);
- hcmd->skb = skb;
- hcmd->busy = 1;
- hcmd->type = SLIC_CMD_DUMB;
- if (skbtype == NORMAL_ETHFRAME)
- slic_xmit_build_request(adapter, hcmd, skb);
- }
- adapter->stats.tx_packets++;
- adapter->stats.tx_bytes += skb->len;
-
-#ifdef DEBUG_DUMP
- if (adapter->kill_card) {
- struct slic_host64_cmd ihcmd;
-
- ihcmd = &hcmd->cmd64;
-
- ihcmd->flags |= 0x40;
- adapter->kill_card = 0; /* only do this once */
- }
-#endif
- if (hcmd->paddrh == 0) {
- slic_reg32_write(&adapter->slic_regs->slic_cbar,
- (hcmd->paddrl | hcmd->cmdsize), DONT_FLUSH);
- } else {
- slic_reg64_write(adapter, &adapter->slic_regs->slic_cbar64,
- (hcmd->paddrl | hcmd->cmdsize),
- &adapter->slic_regs->slic_addr_upper,
- hcmd->paddrh, DONT_FLUSH);
- }
-xmit_done:
- return NETDEV_TX_OK;
-xmit_fail:
- slic_xmit_fail(adapter, skb, offloadcmd, skbtype, status);
- goto xmit_done;
-}
-
-static void slic_xmit_fail(struct adapter *adapter,
- struct sk_buff *skb,
- void *cmd, u32 skbtype, u32 status)
-{
- if (adapter->xmitq_full)
- netif_stop_queue(adapter->netdev);
- if ((cmd == NULL) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) {
- switch (status) {
- case XMIT_FAIL_LINK_STATE:
- dev_err(&adapter->netdev->dev,
- "reject xmit skb[%p: %x] linkstate[%s] "
- "adapter[%s:%d] card[%s:%d]\n",
- skb, skb->pkt_type,
- SLIC_LINKSTATE(adapter->linkstate),
- SLIC_ADAPTER_STATE(adapter->state),
- adapter->state,
- SLIC_CARD_STATE(adapter->card->state),
- adapter->card->state);
- break;
- case XMIT_FAIL_ZERO_LENGTH:
- dev_err(&adapter->netdev->dev,
- "xmit_start skb->len == 0 skb[%p] type[%x]\n",
- skb, skb->pkt_type);
- break;
- case XMIT_FAIL_HOSTCMD_FAIL:
- dev_err(&adapter->netdev->dev,
- "xmit_start skb[%p] type[%x] No host commands "
- "available\n", skb, skb->pkt_type);
- break;
- default:
- ASSERT(0);
- }
- }
- dev_kfree_skb(skb);
- adapter->stats.tx_dropped++;
-}
-
-static void slic_rcv_handle_error(struct adapter *adapter,
- struct slic_rcvbuf *rcvbuf)
-{
- struct slic_hddr_wds *hdr = (struct slic_hddr_wds *)rcvbuf->data;
-
- if (adapter->devid != SLIC_1GB_DEVICE_ID) {
- if (hdr->frame_status14 & VRHSTAT_802OE)
- adapter->if_events.oflow802++;
- if (hdr->frame_status14 & VRHSTAT_TPOFLO)
- adapter->if_events.Tprtoflow++;
- if (hdr->frame_status_b14 & VRHSTATB_802UE)
- adapter->if_events.uflow802++;
- if (hdr->frame_status_b14 & VRHSTATB_RCVE) {
- adapter->if_events.rcvearly++;
- adapter->stats.rx_fifo_errors++;
- }
- if (hdr->frame_status_b14 & VRHSTATB_BUFF) {
- adapter->if_events.Bufov++;
- adapter->stats.rx_over_errors++;
- }
- if (hdr->frame_status_b14 & VRHSTATB_CARRE) {
- adapter->if_events.Carre++;
- adapter->stats.tx_carrier_errors++;
- }
- if (hdr->frame_status_b14 & VRHSTATB_LONGE)
- adapter->if_events.Longe++;
- if (hdr->frame_status_b14 & VRHSTATB_PREA)
- adapter->if_events.Invp++;
- if (hdr->frame_status_b14 & VRHSTATB_CRC) {
- adapter->if_events.Crc++;
- adapter->stats.rx_crc_errors++;
- }
- if (hdr->frame_status_b14 & VRHSTATB_DRBL)
- adapter->if_events.Drbl++;
- if (hdr->frame_status_b14 & VRHSTATB_CODE)
- adapter->if_events.Code++;
- if (hdr->frame_status_b14 & VRHSTATB_TPCSUM)
- adapter->if_events.TpCsum++;
- if (hdr->frame_status_b14 & VRHSTATB_TPHLEN)
- adapter->if_events.TpHlen++;
- if (hdr->frame_status_b14 & VRHSTATB_IPCSUM)
- adapter->if_events.IpCsum++;
- if (hdr->frame_status_b14 & VRHSTATB_IPLERR)
- adapter->if_events.IpLen++;
- if (hdr->frame_status_b14 & VRHSTATB_IPHERR)
- adapter->if_events.IpHlen++;
- } else {
- if (hdr->frame_statusGB & VGBSTAT_XPERR) {
- u32 xerr = hdr->frame_statusGB >> VGBSTAT_XERRSHFT;
-
- if (xerr == VGBSTAT_XCSERR)
- adapter->if_events.TpCsum++;
- if (xerr == VGBSTAT_XUFLOW)
- adapter->if_events.Tprtoflow++;
- if (xerr == VGBSTAT_XHLEN)
- adapter->if_events.TpHlen++;
- }
- if (hdr->frame_statusGB & VGBSTAT_NETERR) {
- u32 nerr =
- (hdr->
- frame_statusGB >> VGBSTAT_NERRSHFT) &
- VGBSTAT_NERRMSK;
- if (nerr == VGBSTAT_NCSERR)
- adapter->if_events.IpCsum++;
- if (nerr == VGBSTAT_NUFLOW)
- adapter->if_events.IpLen++;
- if (nerr == VGBSTAT_NHLEN)
- adapter->if_events.IpHlen++;
- }
- if (hdr->frame_statusGB & VGBSTAT_LNKERR) {
- u32 lerr = hdr->frame_statusGB & VGBSTAT_LERRMSK;
-
- if (lerr == VGBSTAT_LDEARLY)
- adapter->if_events.rcvearly++;
- if (lerr == VGBSTAT_LBOFLO)
- adapter->if_events.Bufov++;
- if (lerr == VGBSTAT_LCODERR)
- adapter->if_events.Code++;
- if (lerr == VGBSTAT_LDBLNBL)
- adapter->if_events.Drbl++;
- if (lerr == VGBSTAT_LCRCERR)
- adapter->if_events.Crc++;
- if (lerr == VGBSTAT_LOFLO)
- adapter->if_events.oflow802++;
- if (lerr == VGBSTAT_LUFLO)
- adapter->if_events.uflow802++;
- }
- }
- return;
-}
-
-#define TCP_OFFLOAD_FRAME_PUSHFLAG 0x10000000
-#define M_FAST_PATH 0x0040
-
-static void slic_rcv_handler(struct adapter *adapter)
-{
- struct sk_buff *skb;
- struct slic_rcvbuf *rcvbuf;
- u32 frames = 0;
-
- while ((skb = slic_rcvqueue_getnext(adapter))) {
- u32 rx_bytes;
-
- ASSERT(skb->head);
- rcvbuf = (struct slic_rcvbuf *)skb->head;
- adapter->card->events++;
- if (rcvbuf->status & IRHDDR_ERR) {
- adapter->rx_errors++;
- slic_rcv_handle_error(adapter, rcvbuf);
- slic_rcvqueue_reinsert(adapter, skb);
- continue;
- }
-
- if (!slic_mac_filter(adapter, (struct ether_header *)
- rcvbuf->data)) {
- slic_rcvqueue_reinsert(adapter, skb);
- continue;
- }
- skb_pull(skb, SLIC_RCVBUF_HEADSIZE);
- rx_bytes = (rcvbuf->length & IRHDDR_FLEN_MSK);
- skb_put(skb, rx_bytes);
- adapter->stats.rx_packets++;
- adapter->stats.rx_bytes += rx_bytes;
-#if SLIC_OFFLOAD_IP_CHECKSUM
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-#endif
-
- skb->dev = adapter->netdev;
- skb->protocol = eth_type_trans(skb, skb->dev);
- netif_rx(skb);
-
- ++frames;
-#if SLIC_INTERRUPT_PROCESS_LIMIT
- if (frames >= SLIC_RCVQ_MAX_PROCESS_ISR) {
- adapter->rcv_interrupt_yields++;
- break;
- }
-#endif
- }
- adapter->max_isr_rcvs = max(adapter->max_isr_rcvs, frames);
-}
-
-static void slic_xmit_complete(struct adapter *adapter)
-{
- struct slic_hostcmd *hcmd;
- struct slic_rspbuf *rspbuf;
- u32 frames = 0;
- struct slic_handle_word slic_handle_word;
-
- do {
- rspbuf = slic_rspqueue_getnext(adapter);
- if (!rspbuf)
- break;
- adapter->xmit_completes++;
- adapter->card->events++;
- /*
- Get the complete host command buffer
- */
- slic_handle_word.handle_token = rspbuf->hosthandle;
- ASSERT(slic_handle_word.handle_index);
- ASSERT(slic_handle_word.handle_index <= SLIC_CMDQ_MAXCMDS);
- hcmd =
- (struct slic_hostcmd *)
- adapter->slic_handles[slic_handle_word.handle_index].
- address;
-/* hcmd = (struct slic_hostcmd *) rspbuf->hosthandle; */
- ASSERT(hcmd);
- ASSERT(hcmd->pslic_handle ==
- &adapter->slic_handles[slic_handle_word.handle_index]);
- if (hcmd->type == SLIC_CMD_DUMB) {
- if (hcmd->skb)
- dev_kfree_skb_irq(hcmd->skb);
- slic_cmdq_putdone_irq(adapter, hcmd);
- }
- rspbuf->status = 0;
- rspbuf->hosthandle = 0;
- frames++;
- } while (1);
- adapter->max_isr_xmits = max(adapter->max_isr_xmits, frames);
-}
-
-static irqreturn_t slic_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *)dev_id;
- struct adapter *adapter = netdev_priv(dev);
- u32 isr;
-
- if ((adapter->pshmem) && (adapter->pshmem->isr)) {
- slic_reg32_write(&adapter->slic_regs->slic_icr,
- ICR_INT_MASK, FLUSH);
- isr = adapter->isrcopy = adapter->pshmem->isr;
- adapter->pshmem->isr = 0;
- adapter->num_isrs++;
- switch (adapter->card->state) {
- case CARD_UP:
- if (isr & ~ISR_IO) {
- if (isr & ISR_ERR) {
- adapter->error_interrupts++;
- if (isr & ISR_RMISS) {
- int count;
- int pre_count;
- int errors;
-
- struct slic_rcvqueue *rcvq =
- &adapter->rcvqueue;
-
- adapter->
- error_rmiss_interrupts++;
- if (!rcvq->errors)
- rcv_count = rcvq->count;
- pre_count = rcvq->count;
- errors = rcvq->errors;
-
- while (rcvq->count <
- SLIC_RCVQ_FILLTHRESH) {
- count =
- slic_rcvqueue_fill
- (adapter);
- if (!count)
- break;
- }
- } else if (isr & ISR_XDROP) {
- dev_err(&dev->dev,
- "isr & ISR_ERR [%x] "
- "ISR_XDROP \n", isr);
- } else {
- dev_err(&dev->dev,
- "isr & ISR_ERR [%x]\n",
- isr);
- }
- }
-
- if (isr & ISR_LEVENT) {
- adapter->linkevent_interrupts++;
- slic_link_event_handler(adapter);
- }
-
- if ((isr & ISR_UPC) ||
- (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
- adapter->upr_interrupts++;
- slic_upr_request_complete(adapter, isr);
- }
- }
-
- if (isr & ISR_RCV) {
- adapter->rcv_interrupts++;
- slic_rcv_handler(adapter);
- }
-
- if (isr & ISR_CMD) {
- adapter->xmit_interrupts++;
- slic_xmit_complete(adapter);
- }
- break;
-
- case CARD_DOWN:
- if ((isr & ISR_UPC) ||
- (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
- adapter->upr_interrupts++;
- slic_upr_request_complete(adapter, isr);
- }
- break;
-
- default:
- break;
- }
-
- adapter->isrcopy = 0;
- adapter->all_reg_writes += 2;
- adapter->isr_reg_writes++;
- slic_reg32_write(&adapter->slic_regs->slic_isr, 0, FLUSH);
- } else {
- adapter->false_interrupts++;
- }
- return IRQ_HANDLED;
-}
-
-/*
- * slic_link_event_handler -
- *
- * Initiate a link configuration sequence. The link configuration begins
- * by issuing a READ_LINK_STATUS command to the Utility Processor on the
- * SLIC. Since the command finishes asynchronously, the slic_upr_comlete
- * routine will follow it up witha UP configuration write command, which
- * will also complete asynchronously.
- *
- */
-static void slic_link_event_handler(struct adapter *adapter)
-{
- int status;
- struct slic_shmem *pshmem;
-
- if (adapter->state != ADAPT_UP) {
- /* Adapter is not operational. Ignore. */
- return;
- }
-
- pshmem = (struct slic_shmem *)adapter->phys_shmem;
-
-#if defined(CONFIG_X86_64)
- status = slic_upr_request(adapter,
- SLIC_UPR_RLSR,
- SLIC_GET_ADDR_LOW(&pshmem->linkstatus),
- SLIC_GET_ADDR_HIGH(&pshmem->linkstatus),
- 0, 0);
-#elif defined(CONFIG_X86)
- status = slic_upr_request(adapter, SLIC_UPR_RLSR,
- (u32) &pshmem->linkstatus, /* no 4GB wrap guaranteed */
- 0, 0, 0);
-#else
- Stop compilation;
-#endif
- ASSERT((status == STATUS_SUCCESS) || (status == STATUS_PENDING));
-}
-
-static void slic_init_cleanup(struct adapter *adapter)
-{
- if (adapter->intrregistered) {
- adapter->intrregistered = 0;
- free_irq(adapter->netdev->irq, adapter->netdev);
-
- }
- if (adapter->pshmem) {
- pci_free_consistent(adapter->pcidev,
- sizeof(struct slic_shmem),
- adapter->pshmem, adapter->phys_shmem);
- adapter->pshmem = NULL;
- adapter->phys_shmem = (dma_addr_t) NULL;
- }
-
- if (adapter->pingtimerset) {
- adapter->pingtimerset = 0;
- del_timer(&adapter->pingtimer);
- }
-
- slic_rspqueue_free(adapter);
- slic_cmdq_free(adapter);
- slic_rcvqueue_free(adapter);
-}
-
-static struct net_device_stats *slic_get_stats(struct net_device *dev)
-{
- struct adapter *adapter = netdev_priv(dev);
-
- ASSERT(adapter);
- dev->stats.collisions = adapter->slic_stats.iface.xmit_collisions;
- dev->stats.rx_errors = adapter->slic_stats.iface.rcv_errors;
- dev->stats.tx_errors = adapter->slic_stats.iface.xmt_errors;
- dev->stats.rx_missed_errors = adapter->slic_stats.iface.rcv_discards;
- dev->stats.tx_heartbeat_errors = 0;
- dev->stats.tx_aborted_errors = 0;
- dev->stats.tx_window_errors = 0;
- dev->stats.tx_fifo_errors = 0;
- dev->stats.rx_frame_errors = 0;
- dev->stats.rx_length_errors = 0;
-
- return &dev->stats;
-}
-
-/*
- * Allocate a mcast_address structure to hold the multicast address.
- * Link it in.
- */
-static int slic_mcast_add_list(struct adapter *adapter, char *address)
-{
- struct mcast_address *mcaddr, *mlist;
-
- /* Check to see if it already exists */
- mlist = adapter->mcastaddrs;
- while (mlist) {
- if (!compare_ether_addr(mlist->address, address))
- return STATUS_SUCCESS;
- mlist = mlist->next;
- }
-
- /* Doesn't already exist. Allocate a structure to hold it */
- mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC);
- if (mcaddr == NULL)
- return 1;
-
- memcpy(mcaddr->address, address, 6);
-
- mcaddr->next = adapter->mcastaddrs;
- adapter->mcastaddrs = mcaddr;
-
- return STATUS_SUCCESS;
-}
-
/*
* Functions to obtain the CRC corresponding to the destination mac address.
* This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
@@ -1362,44 +296,6 @@ static void slic_mcast_set_bit(struct adapter *adapter, char *address)
adapter->mcastmask |= (u64) 1 << crcpoly;
}
-static void slic_mcast_set_list(struct net_device *dev)
-{
- struct adapter *adapter = netdev_priv(dev);
- int status = STATUS_SUCCESS;
- char *addresses;
- struct netdev_hw_addr *ha;
-
- ASSERT(adapter);
-
- netdev_for_each_mc_addr(ha, dev) {
- addresses = (char *) &ha->addr;
- status = slic_mcast_add_list(adapter, addresses);
- if (status != STATUS_SUCCESS)
- break;
- slic_mcast_set_bit(adapter, addresses);
- }
-
- if (adapter->devflags_prev != dev->flags) {
- adapter->macopts = MAC_DIRECTED;
- if (dev->flags) {
- if (dev->flags & IFF_BROADCAST)
- adapter->macopts |= MAC_BCAST;
- if (dev->flags & IFF_PROMISC)
- adapter->macopts |= MAC_PROMISC;
- if (dev->flags & IFF_ALLMULTI)
- adapter->macopts |= MAC_ALLMCAST;
- if (dev->flags & IFF_MULTICAST)
- adapter->macopts |= MAC_MCAST;
- }
- adapter->devflags_prev = dev->flags;
- slic_config_set(adapter, true);
- } else {
- if (status == STATUS_SUCCESS)
- slic_mcast_set_mask(adapter);
- }
- return;
-}
-
static void slic_mcast_set_mask(struct adapter *adapter)
{
__iomem struct slic_regs *slic_regs = adapter->slic_regs;
@@ -1439,123 +335,6 @@ static void slic_timer_ping(ulong dev)
add_timer(&adapter->pingtimer);
}
-/*
- * slic_if_init
- *
- * Perform initialization of our slic interface.
- *
- */
-static int slic_if_init(struct adapter *adapter)
-{
- struct sliccard *card = adapter->card;
- struct net_device *dev = adapter->netdev;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
- struct slic_shmem *pshmem;
- int status = 0;
-
- ASSERT(card);
-
- /* adapter should be down at this point */
- if (adapter->state != ADAPT_DOWN) {
- dev_err(&dev->dev, "%s: adapter->state != ADAPT_DOWN\n",
- __func__);
- return -EIO;
- }
- ASSERT(adapter->linkstate == LINK_DOWN);
-
- adapter->devflags_prev = dev->flags;
- adapter->macopts = MAC_DIRECTED;
- if (dev->flags) {
- if (dev->flags & IFF_BROADCAST)
- adapter->macopts |= MAC_BCAST;
- if (dev->flags & IFF_PROMISC)
- adapter->macopts |= MAC_PROMISC;
- if (dev->flags & IFF_ALLMULTI)
- adapter->macopts |= MAC_ALLMCAST;
- if (dev->flags & IFF_MULTICAST)
- adapter->macopts |= MAC_MCAST;
- }
- status = slic_adapter_allocresources(adapter);
- if (status != STATUS_SUCCESS) {
- dev_err(&dev->dev,
- "%s: slic_adapter_allocresources FAILED %x\n",
- __func__, status);
- slic_adapter_freeresources(adapter);
- return status;
- }
-
- if (!adapter->queues_initialized) {
- if (slic_rspqueue_init(adapter))
- return -ENOMEM;
- if (slic_cmdq_init(adapter))
- return -ENOMEM;
- if (slic_rcvqueue_init(adapter))
- return -ENOMEM;
- adapter->queues_initialized = 1;
- }
-
- slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
- mdelay(1);
-
- if (!adapter->isp_initialized) {
- pshmem = (struct slic_shmem *)adapter->phys_shmem;
-
- spin_lock_irqsave(&adapter->bit64reglock.lock,
- adapter->bit64reglock.flags);
-
-#if defined(CONFIG_X86_64)
- slic_reg32_write(&slic_regs->slic_addr_upper,
- SLIC_GET_ADDR_HIGH(&pshmem->isr), DONT_FLUSH);
- slic_reg32_write(&slic_regs->slic_isp,
- SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH);
-#elif defined(CONFIG_X86)
- slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH);
- slic_reg32_write(&slic_regs->slic_isp, (u32)&pshmem->isr, FLUSH);
-#else
- Stop Compilations
-#endif
- spin_unlock_irqrestore(&adapter->bit64reglock.lock,
- adapter->bit64reglock.flags);
- adapter->isp_initialized = 1;
- }
-
- adapter->state = ADAPT_UP;
- if (!card->loadtimerset) {
- init_timer(&card->loadtimer);
- card->loadtimer.expires =
- jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
- card->loadtimer.data = (ulong) card;
- card->loadtimer.function = &slic_timer_load_check;
- add_timer(&card->loadtimer);
-
- card->loadtimerset = 1;
- }
-
- if (!adapter->pingtimerset) {
- init_timer(&adapter->pingtimer);
- adapter->pingtimer.expires =
- jiffies + (PING_TIMER_INTERVAL * HZ);
- adapter->pingtimer.data = (ulong) dev;
- adapter->pingtimer.function = &slic_timer_ping;
- add_timer(&adapter->pingtimer);
- adapter->pingtimerset = 1;
- adapter->card->pingstatus = ISR_PINGMASK;
- }
-
- /*
- * clear any pending events, then enable interrupts
- */
- adapter->isrcopy = 0;
- adapter->pshmem->isr = 0;
- slic_reg32_write(&slic_regs->slic_isr, 0, FLUSH);
- slic_reg32_write(&slic_regs->slic_icr, ICR_INT_ON, FLUSH);
-
- slic_link_config(adapter, LINK_AUTOSPEED, LINK_AUTOD);
- slic_link_event_handler(adapter);
-
- return STATUS_SUCCESS;
-}
-
static void slic_unmap_mmio_space(struct adapter *adapter)
{
if (adapter->slic_regs)
@@ -1563,64 +342,6 @@ static void slic_unmap_mmio_space(struct adapter *adapter)
adapter->slic_regs = NULL;
}
-static int slic_adapter_allocresources(struct adapter *adapter)
-{
- if (!adapter->intrregistered) {
- int retval;
-
- spin_unlock_irqrestore(&slic_global.driver_lock.lock,
- slic_global.driver_lock.flags);
-
- retval = request_irq(adapter->netdev->irq,
- &slic_interrupt,
- IRQF_SHARED,
- adapter->netdev->name, adapter->netdev);
-
- spin_lock_irqsave(&slic_global.driver_lock.lock,
- slic_global.driver_lock.flags);
-
- if (retval) {
- dev_err(&adapter->netdev->dev,
- "request_irq (%s) FAILED [%x]\n",
- adapter->netdev->name, retval);
- return retval;
- }
- adapter->intrregistered = 1;
- }
- return STATUS_SUCCESS;
-}
-
-static void slic_config_pci(struct pci_dev *pcidev)
-{
- u16 pci_command;
- u16 new_command;
-
- pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
-
- new_command = pci_command | PCI_COMMAND_MASTER
- | PCI_COMMAND_MEMORY
- | PCI_COMMAND_INVALIDATE
- | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
- if (pci_command != new_command)
- pci_write_config_word(pcidev, PCI_COMMAND, new_command);
-}
-
-static void slic_adapter_freeresources(struct adapter *adapter)
-{
- slic_init_cleanup(adapter);
- memset(&adapter->stats, 0, sizeof(struct net_device_stats));
- adapter->error_interrupts = 0;
- adapter->rcv_interrupts = 0;
- adapter->xmit_interrupts = 0;
- adapter->linkevent_interrupts = 0;
- adapter->upr_interrupts = 0;
- adapter->num_isrs = 0;
- adapter->xmit_completes = 0;
- adapter->rcv_broadcasts = 0;
- adapter->rcv_multicasts = 0;
- adapter->rcv_unicasts = 0;
-}
-
/*
* slic_link_config
*
@@ -1774,18 +495,6 @@ static void slic_link_config(struct adapter *adapter,
}
}
-static void slic_card_cleanup(struct sliccard *card)
-{
- if (card->loadtimerset) {
- card->loadtimerset = 0;
- del_timer(&card->loadtimer);
- }
-
- slic_debug_card_destroy(card);
-
- kfree(card);
-}
-
static int slic_card_download_gbrcv(struct adapter *adapter)
{
const struct firmware *fw;
@@ -1967,7 +676,7 @@ static int slic_card_download(struct adapter *adapter)
and reach mainloop */
mdelay(20);
- return STATUS_SUCCESS;
+ return 0;
}
MODULE_FIRMWARE("slicoss/oasisdownload.sys");
@@ -1999,327 +708,72 @@ static void slic_intagg_set(struct adapter *adapter, u32 value)
adapter->card->loadlevel_current = value;
}
-static int slic_card_init(struct sliccard *card, struct adapter *adapter)
+static void slic_soft_reset(struct adapter *adapter)
{
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
- struct slic_eeprom *peeprom;
- struct oslic_eeprom *pOeeprom;
- dma_addr_t phys_config;
- u32 phys_configh;
- u32 phys_configl;
- u32 i = 0;
- struct slic_shmem *pshmem;
- int status;
- uint macaddrs = card->card_size;
- ushort eecodesize;
- ushort dramsize;
- ushort ee_chksum;
- ushort calc_chksum;
- struct slic_config_mac *pmac;
- unsigned char fruformat;
- unsigned char oemfruformat;
- struct atk_fru *patkfru;
- union oemfru *poemfru;
-
- /* Reset everything except PCI configuration space */
- slic_soft_reset(adapter);
-
- /* Download the microcode */
- status = slic_card_download(adapter);
-
- if (status != STATUS_SUCCESS) {
- dev_err(&adapter->pcidev->dev,
- "download failed bus %d slot %d\n",
- adapter->busnumber, adapter->slotnumber);
- return status;
- }
-
- if (!card->config_set) {
- peeprom = pci_alloc_consistent(adapter->pcidev,
- sizeof(struct slic_eeprom),
- &phys_config);
-
- phys_configl = SLIC_GET_ADDR_LOW(phys_config);
- phys_configh = SLIC_GET_ADDR_HIGH(phys_config);
-
- if (!peeprom) {
- dev_err(&adapter->pcidev->dev,
- "eeprom read failed to get memory "
- "bus %d slot %d\n", adapter->busnumber,
- adapter->slotnumber);
- return -ENOMEM;
- } else {
- memset(peeprom, 0, sizeof(struct slic_eeprom));
- }
- slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
+ if (adapter->card->state == CARD_UP) {
+ slic_reg32_write(&adapter->slic_regs->slic_quiesce, 0, FLUSH);
mdelay(1);
- pshmem = (struct slic_shmem *)adapter->phys_shmem;
-
- spin_lock_irqsave(&adapter->bit64reglock.lock,
- adapter->bit64reglock.flags);
- slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH);
- slic_reg32_write(&slic_regs->slic_isp,
- SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH);
- spin_unlock_irqrestore(&adapter->bit64reglock.lock,
- adapter->bit64reglock.flags);
-
- slic_config_get(adapter, phys_configl, phys_configh);
-
- for (;;) {
- if (adapter->pshmem->isr) {
- if (adapter->pshmem->isr & ISR_UPC) {
- adapter->pshmem->isr = 0;
- slic_reg64_write(adapter,
- &slic_regs->slic_isp, 0,
- &slic_regs->slic_addr_upper,
- 0, FLUSH);
- slic_reg32_write(&slic_regs->slic_isr,
- 0, FLUSH);
-
- slic_upr_request_complete(adapter, 0);
- break;
- } else {
- adapter->pshmem->isr = 0;
- slic_reg32_write(&slic_regs->slic_isr,
- 0, FLUSH);
- }
- } else {
- mdelay(1);
- i++;
- if (i > 5000) {
- dev_err(&adapter->pcidev->dev,
- "%d config data fetch timed out!\n",
- adapter->port);
- slic_reg64_write(adapter,
- &slic_regs->slic_isp, 0,
- &slic_regs->slic_addr_upper,
- 0, FLUSH);
- return -EINVAL;
- }
- }
- }
-
- switch (adapter->devid) {
- /* Oasis card */
- case SLIC_2GB_DEVICE_ID:
- /* extract EEPROM data and pointers to EEPROM data */
- pOeeprom = (struct oslic_eeprom *) peeprom;
- eecodesize = pOeeprom->EecodeSize;
- dramsize = pOeeprom->DramSize;
- pmac = pOeeprom->MacInfo;
- fruformat = pOeeprom->FruFormat;
- patkfru = &pOeeprom->AtkFru;
- oemfruformat = pOeeprom->OemFruFormat;
- poemfru = &pOeeprom->OemFru;
- macaddrs = 2;
- /* Minor kludge for Oasis card
- get 2 MAC addresses from the
- EEPROM to ensure that function 1
- gets the Port 1 MAC address */
- break;
- default:
- /* extract EEPROM data and pointers to EEPROM data */
- eecodesize = peeprom->EecodeSize;
- dramsize = peeprom->DramSize;
- pmac = peeprom->u2.mac.MacInfo;
- fruformat = peeprom->FruFormat;
- patkfru = &peeprom->AtkFru;
- oemfruformat = peeprom->OemFruFormat;
- poemfru = &peeprom->OemFru;
- break;
- }
-
- card->config.EepromValid = false;
-
- /* see if the EEPROM is valid by checking it's checksum */
- if ((eecodesize <= MAX_EECODE_SIZE) &&
- (eecodesize >= MIN_EECODE_SIZE)) {
-
- ee_chksum =
- *(u16 *) ((char *) peeprom + (eecodesize - 2));
- /*
- calculate the EEPROM checksum
- */
- calc_chksum =
- ~slic_eeprom_cksum((char *) peeprom,
- (eecodesize - 2));
- /*
- if the ucdoe chksum flag bit worked,
- we wouldn't need this shit
- */
- if (ee_chksum == calc_chksum)
- card->config.EepromValid = true;
- }
- /* copy in the DRAM size */
- card->config.DramSize = dramsize;
-
- /* copy in the MAC address(es) */
- for (i = 0; i < macaddrs; i++) {
- memcpy(&card->config.MacInfo[i],
- &pmac[i], sizeof(struct slic_config_mac));
- }
-
- /* copy the Alacritech FRU information */
- card->config.FruFormat = fruformat;
- memcpy(&card->config.AtkFru, patkfru,
- sizeof(struct atk_fru));
-
- pci_free_consistent(adapter->pcidev,
- sizeof(struct slic_eeprom),
- peeprom, phys_config);
-
- if ((!card->config.EepromValid) &&
- (adapter->reg_params.fail_on_bad_eeprom)) {
- slic_reg64_write(adapter, &slic_regs->slic_isp, 0,
- &slic_regs->slic_addr_upper,
- 0, FLUSH);
- dev_err(&adapter->pcidev->dev,
- "unsupported CONFIGURATION EEPROM invalid\n");
- return -EINVAL;
- }
-
- card->config_set = 1;
}
- if (slic_card_download_gbrcv(adapter)) {
- dev_err(&adapter->pcidev->dev,
- "unable to download GB receive microcode\n");
- return -EINVAL;
- }
-
- if (slic_global.dynamic_intagg)
- slic_intagg_set(adapter, 0);
- else
- slic_intagg_set(adapter, intagg_delay);
-
- /*
- * Initialize ping status to "ok"
- */
- card->pingstatus = ISR_PINGMASK;
-
- /*
- * Lastly, mark our card state as up and return success
- */
- card->state = CARD_UP;
- card->reset_in_progress = 0;
-
- return STATUS_SUCCESS;
+ slic_reg32_write(&adapter->slic_regs->slic_reset, SLIC_RESET_MAGIC,
+ FLUSH);
+ mdelay(1);
}
-static u32 slic_card_locate(struct adapter *adapter)
+static void slic_mac_address_config(struct adapter *adapter)
{
- struct sliccard *card = slic_global.slic_card;
- struct physcard *physcard = slic_global.phys_card;
- ushort card_hostid;
- u16 __iomem *hostid_reg;
- uint i;
- uint rdhostid_offset = 0;
-
- switch (adapter->devid) {
- case SLIC_2GB_DEVICE_ID:
- rdhostid_offset = SLIC_RDHOSTID_2GB;
- break;
- case SLIC_1GB_DEVICE_ID:
- rdhostid_offset = SLIC_RDHOSTID_1GB;
- break;
- default:
- ASSERT(0);
- break;
- }
+ u32 value;
+ u32 value2;
+ __iomem struct slic_regs *slic_regs = adapter->slic_regs;
- hostid_reg =
- (u16 __iomem *) (((u8 __iomem *) (adapter->slic_regs)) +
- rdhostid_offset);
+ value = *(u32 *) &adapter->currmacaddr[2];
+ value = ntohl(value);
+ slic_reg32_write(&slic_regs->slic_wraddral, value, FLUSH);
+ slic_reg32_write(&slic_regs->slic_wraddrbl, value, FLUSH);
- /* read the 16 bit hostid from SRAM */
- card_hostid = (ushort) readw(hostid_reg);
+ value2 = (u32) ((adapter->currmacaddr[0] << 8 |
+ adapter->currmacaddr[1]) & 0xFFFF);
- /* Initialize a new card structure if need be */
- if (card_hostid == SLIC_HOSTID_DEFAULT) {
- card = kzalloc(sizeof(struct sliccard), GFP_KERNEL);
- if (card == NULL)
- return -ENOMEM;
+ slic_reg32_write(&slic_regs->slic_wraddrah, value2, FLUSH);
+ slic_reg32_write(&slic_regs->slic_wraddrbh, value2, FLUSH);
- card->next = slic_global.slic_card;
- slic_global.slic_card = card;
- card->busnumber = adapter->busnumber;
- card->slotnumber = adapter->slotnumber;
+ /* Write our multicast mask out to the card. This is done */
+ /* here in addition to the slic_mcast_addr_set routine */
+ /* because ALL_MCAST may have been enabled or disabled */
+ slic_mcast_set_mask(adapter);
+}
- /* Find an available cardnum */
- for (i = 0; i < SLIC_MAX_CARDS; i++) {
- if (slic_global.cardnuminuse[i] == 0) {
- slic_global.cardnuminuse[i] = 1;
- card->cardnum = i;
- break;
- }
- }
- slic_global.num_slic_cards++;
+static void slic_mac_config(struct adapter *adapter)
+{
+ u32 value;
+ __iomem struct slic_regs *slic_regs = adapter->slic_regs;
- slic_debug_card_create(card);
+ /* Setup GMAC gaps */
+ if (adapter->linkspeed == LINK_1000MB) {
+ value = ((GMCR_GAPBB_1000 << GMCR_GAPBB_SHIFT) |
+ (GMCR_GAPR1_1000 << GMCR_GAPR1_SHIFT) |
+ (GMCR_GAPR2_1000 << GMCR_GAPR2_SHIFT));
} else {
- /* Card exists, find the card this adapter belongs to */
- while (card) {
- if (card->cardnum == card_hostid)
- break;
- card = card->next;
- }
- }
-
- ASSERT(card);
- if (!card)
- return STATUS_FAILURE;
- /* Put the adapter in the card's adapter list */
- ASSERT(card->adapter[adapter->port] == NULL);
- if (!card->adapter[adapter->port]) {
- card->adapter[adapter->port] = adapter;
- adapter->card = card;
+ value = ((GMCR_GAPBB_100 << GMCR_GAPBB_SHIFT) |
+ (GMCR_GAPR1_100 << GMCR_GAPR1_SHIFT) |
+ (GMCR_GAPR2_100 << GMCR_GAPR2_SHIFT));
}
- card->card_size = 1; /* one port per *logical* card */
-
- while (physcard) {
- for (i = 0; i < SLIC_MAX_PORTS; i++) {
- if (!physcard->adapter[i])
- continue;
- else
- break;
- }
- ASSERT(i != SLIC_MAX_PORTS);
- if (physcard->adapter[i]->slotnumber == adapter->slotnumber)
- break;
- physcard = physcard->next;
- }
- if (!physcard) {
- /* no structure allocated for this physical card yet */
- physcard = kzalloc(sizeof(struct physcard), GFP_ATOMIC);
- ASSERT(physcard);
+ /* enable GMII */
+ if (adapter->linkspeed == LINK_1000MB)
+ value |= GMCR_GBIT;
- physcard->next = slic_global.phys_card;
- slic_global.phys_card = physcard;
- physcard->adapters_allocd = 1;
- } else {
- physcard->adapters_allocd++;
+ /* enable fullduplex */
+ if ((adapter->linkduplex == LINK_FULLD)
+ || (adapter->macopts & MAC_LOOPBACK)) {
+ value |= GMCR_FULLD;
}
- /* Note - this is ZERO relative */
- adapter->physport = physcard->adapters_allocd - 1;
-
- ASSERT(physcard->adapter[adapter->physport] == NULL);
- physcard->adapter[adapter->physport] = adapter;
- adapter->physcard = physcard;
-
- return 0;
-}
-static void slic_soft_reset(struct adapter *adapter)
-{
- if (adapter->card->state == CARD_UP) {
- slic_reg32_write(&adapter->slic_regs->slic_quiesce, 0, FLUSH);
- mdelay(1);
- }
+ /* write mac config */
+ slic_reg32_write(&slic_regs->slic_wmcfg, value, FLUSH);
- slic_reg32_write(&adapter->slic_regs->slic_reset, SLIC_RESET_MAGIC,
- FLUSH);
- mdelay(1);
+ /* setup mac addresses */
+ slic_mac_address_config(adapter);
}
static void slic_config_set(struct adapter *adapter, bool linkchange)
@@ -2403,76 +857,10 @@ static void slic_config_clear(struct adapter *adapter)
slic_reg32_write(&slic_regs->slic_wphy, phy_config, FLUSH);
}
-static void slic_config_get(struct adapter *adapter, u32 config,
- u32 config_h)
-{
- int status;
-
- status = slic_upr_request(adapter,
- SLIC_UPR_RCONFIG,
- (u32) config, (u32) config_h, 0, 0);
- ASSERT(status == 0);
-}
-
-static void slic_mac_address_config(struct adapter *adapter)
-{
- u32 value;
- u32 value2;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
-
- value = *(u32 *) &adapter->currmacaddr[2];
- value = ntohl(value);
- slic_reg32_write(&slic_regs->slic_wraddral, value, FLUSH);
- slic_reg32_write(&slic_regs->slic_wraddrbl, value, FLUSH);
-
- value2 = (u32) ((adapter->currmacaddr[0] << 8 |
- adapter->currmacaddr[1]) & 0xFFFF);
-
- slic_reg32_write(&slic_regs->slic_wraddrah, value2, FLUSH);
- slic_reg32_write(&slic_regs->slic_wraddrbh, value2, FLUSH);
-
- /* Write our multicast mask out to the card. This is done */
- /* here in addition to the slic_mcast_addr_set routine */
- /* because ALL_MCAST may have been enabled or disabled */
- slic_mcast_set_mask(adapter);
-}
-
-static void slic_mac_config(struct adapter *adapter)
-{
- u32 value;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
-
- /* Setup GMAC gaps */
- if (adapter->linkspeed == LINK_1000MB) {
- value = ((GMCR_GAPBB_1000 << GMCR_GAPBB_SHIFT) |
- (GMCR_GAPR1_1000 << GMCR_GAPR1_SHIFT) |
- (GMCR_GAPR2_1000 << GMCR_GAPR2_SHIFT));
- } else {
- value = ((GMCR_GAPBB_100 << GMCR_GAPBB_SHIFT) |
- (GMCR_GAPR1_100 << GMCR_GAPR1_SHIFT) |
- (GMCR_GAPR2_100 << GMCR_GAPR2_SHIFT));
- }
-
- /* enable GMII */
- if (adapter->linkspeed == LINK_1000MB)
- value |= GMCR_GBIT;
-
- /* enable fullduplex */
- if ((adapter->linkduplex == LINK_FULLD)
- || (adapter->macopts & MAC_LOOPBACK)) {
- value |= GMCR_FULLD;
- }
-
- /* write mac config */
- slic_reg32_write(&slic_regs->slic_wmcfg, value, FLUSH);
-
- /* setup mac addresses */
- slic_mac_address_config(adapter);
-}
-
static bool slic_mac_filter(struct adapter *adapter,
struct ether_header *ether_frame)
{
+ struct net_device *netdev = adapter->netdev;
u32 opts = adapter->macopts;
u32 *dhost4 = (u32 *)&ether_frame->ether_dhost[0];
u16 *dhost2 = (u16 *)&ether_frame->ether_dhost[4];
@@ -2492,7 +880,7 @@ static bool slic_mac_filter(struct adapter *adapter,
if (ether_frame->ether_dhost[0] & 0x01) {
if (opts & MAC_ALLMCAST) {
adapter->rcv_multicasts++;
- adapter->stats.multicast++;
+ netdev->stats.multicast++;
return true;
}
if (opts & MAC_MCAST) {
@@ -2502,7 +890,7 @@ static bool slic_mac_filter(struct adapter *adapter,
if (!compare_ether_addr(mcaddr->address,
ether_frame->ether_dhost)) {
adapter->rcv_multicasts++;
- adapter->stats.multicast++;
+ netdev->stats.multicast++;
return true;
}
mcaddr = mcaddr->next;
@@ -2597,17 +985,6 @@ static void slic_timer_load_check(ulong cardaddr)
add_timer(&card->loadtimer);
}
-static void slic_assert_fail(void)
-{
- u32 cpuid;
- u32 curr_pid;
- cpuid = smp_processor_id();
- curr_pid = current->pid;
-
- printk(KERN_ERR "%s CPU # %d ---- PID # %d\n",
- __func__, cpuid, curr_pid);
-}
-
static int slic_upr_queue_request(struct adapter *adapter,
u32 upr_request,
u32 upr_data,
@@ -2637,7 +1014,55 @@ static int slic_upr_queue_request(struct adapter *adapter,
} else {
adapter->upr_list = upr;
}
- return STATUS_SUCCESS;
+ return 0;
+}
+
+static void slic_upr_start(struct adapter *adapter)
+{
+ struct slic_upr *upr;
+ __iomem struct slic_regs *slic_regs = adapter->slic_regs;
+/*
+ char * ptr1;
+ char * ptr2;
+ uint cmdoffset;
+*/
+ upr = adapter->upr_list;
+ if (!upr)
+ return;
+ if (adapter->upr_busy)
+ return;
+ adapter->upr_busy = 1;
+
+ switch (upr->upr_request) {
+ case SLIC_UPR_STATS:
+ if (upr->upr_data_h == 0) {
+ slic_reg32_write(&slic_regs->slic_stats, upr->upr_data,
+ FLUSH);
+ } else {
+ slic_reg64_write(adapter, &slic_regs->slic_stats64,
+ upr->upr_data,
+ &slic_regs->slic_addr_upper,
+ upr->upr_data_h, FLUSH);
+ }
+ break;
+
+ case SLIC_UPR_RLSR:
+ slic_reg64_write(adapter, &slic_regs->slic_rlsr, upr->upr_data,
+ &slic_regs->slic_addr_upper, upr->upr_data_h,
+ FLUSH);
+ break;
+
+ case SLIC_UPR_RCONFIG:
+ slic_reg64_write(adapter, &slic_regs->slic_rconfig,
+ upr->upr_data, &slic_regs->slic_addr_upper,
+ upr->upr_data_h, FLUSH);
+ break;
+ case SLIC_UPR_PING:
+ slic_reg32_write(&slic_regs->slic_ping, 1, FLUSH);
+ break;
+ default:
+ ASSERT(0);
+ }
}
static int slic_upr_request(struct adapter *adapter,
@@ -2646,22 +1071,97 @@ static int slic_upr_request(struct adapter *adapter,
u32 upr_data_h,
u32 upr_buffer, u32 upr_buffer_h)
{
- int status;
+ int rc;
spin_lock_irqsave(&adapter->upr_lock.lock, adapter->upr_lock.flags);
- status = slic_upr_queue_request(adapter,
+ rc = slic_upr_queue_request(adapter,
upr_request,
upr_data,
upr_data_h, upr_buffer, upr_buffer_h);
- if (status != STATUS_SUCCESS) {
- spin_unlock_irqrestore(&adapter->upr_lock.lock,
- adapter->upr_lock.flags);
- return status;
- }
+ if (rc)
+ goto err_unlock_irq;
+
slic_upr_start(adapter);
+err_unlock_irq:
spin_unlock_irqrestore(&adapter->upr_lock.lock,
adapter->upr_lock.flags);
- return STATUS_PENDING;
+ return rc;
+}
+
+static void slic_link_upr_complete(struct adapter *adapter, u32 isr)
+{
+ u32 linkstatus = adapter->pshmem->linkstatus;
+ uint linkup;
+ unsigned char linkspeed;
+ unsigned char linkduplex;
+
+ if ((isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
+ struct slic_shmem *pshmem;
+
+ pshmem = (struct slic_shmem *)adapter->phys_shmem;
+#if defined(CONFIG_X86_64)
+ slic_upr_queue_request(adapter,
+ SLIC_UPR_RLSR,
+ SLIC_GET_ADDR_LOW(&pshmem->linkstatus),
+ SLIC_GET_ADDR_HIGH(&pshmem->linkstatus),
+ 0, 0);
+#elif defined(CONFIG_X86)
+ slic_upr_queue_request(adapter,
+ SLIC_UPR_RLSR,
+ (u32) &pshmem->linkstatus,
+ SLIC_GET_ADDR_HIGH(pshmem), 0, 0);
+#else
+ Stop Compilation;
+#endif
+ return;
+ }
+ if (adapter->state != ADAPT_UP)
+ return;
+
+ ASSERT((adapter->devid == SLIC_1GB_DEVICE_ID)
+ || (adapter->devid == SLIC_2GB_DEVICE_ID));
+
+ linkup = linkstatus & GIG_LINKUP ? LINK_UP : LINK_DOWN;
+ if (linkstatus & GIG_SPEED_1000)
+ linkspeed = LINK_1000MB;
+ else if (linkstatus & GIG_SPEED_100)
+ linkspeed = LINK_100MB;
+ else
+ linkspeed = LINK_10MB;
+
+ if (linkstatus & GIG_FULLDUPLEX)
+ linkduplex = LINK_FULLD;
+ else
+ linkduplex = LINK_HALFD;
+
+ if ((adapter->linkstate == LINK_DOWN) && (linkup == LINK_DOWN))
+ return;
+
+ /* link up event, but nothing has changed */
+ if ((adapter->linkstate == LINK_UP) &&
+ (linkup == LINK_UP) &&
+ (adapter->linkspeed == linkspeed) &&
+ (adapter->linkduplex == linkduplex))
+ return;
+
+ /* link has changed at this point */
+
+ /* link has gone from up to down */
+ if (linkup == LINK_DOWN) {
+ adapter->linkstate = LINK_DOWN;
+ return;
+ }
+
+ /* link has gone from down to up */
+ adapter->linkspeed = linkspeed;
+ adapter->linkduplex = linkduplex;
+
+ if (adapter->linkstate != LINK_UP) {
+ /* setup the mac */
+ slic_config_set(adapter, true);
+ adapter->linkstate = LINK_UP;
+ netif_start_queue(adapter->netdev);
+ }
}
static void slic_upr_request_complete(struct adapter *adapter, u32 isr)
@@ -2786,128 +1286,15 @@ static void slic_upr_request_complete(struct adapter *adapter, u32 isr)
adapter->upr_lock.flags);
}
-static void slic_upr_start(struct adapter *adapter)
-{
- struct slic_upr *upr;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
-/*
- char * ptr1;
- char * ptr2;
- uint cmdoffset;
-*/
- upr = adapter->upr_list;
- if (!upr)
- return;
- if (adapter->upr_busy)
- return;
- adapter->upr_busy = 1;
-
- switch (upr->upr_request) {
- case SLIC_UPR_STATS:
- if (upr->upr_data_h == 0) {
- slic_reg32_write(&slic_regs->slic_stats, upr->upr_data,
- FLUSH);
- } else {
- slic_reg64_write(adapter, &slic_regs->slic_stats64,
- upr->upr_data,
- &slic_regs->slic_addr_upper,
- upr->upr_data_h, FLUSH);
- }
- break;
-
- case SLIC_UPR_RLSR:
- slic_reg64_write(adapter, &slic_regs->slic_rlsr, upr->upr_data,
- &slic_regs->slic_addr_upper, upr->upr_data_h,
- FLUSH);
- break;
-
- case SLIC_UPR_RCONFIG:
- slic_reg64_write(adapter, &slic_regs->slic_rconfig,
- upr->upr_data, &slic_regs->slic_addr_upper,
- upr->upr_data_h, FLUSH);
- break;
- case SLIC_UPR_PING:
- slic_reg32_write(&slic_regs->slic_ping, 1, FLUSH);
- break;
- default:
- ASSERT(0);
- }
-}
-
-static void slic_link_upr_complete(struct adapter *adapter, u32 isr)
+static void slic_config_get(struct adapter *adapter, u32 config,
+ u32 config_h)
{
- u32 linkstatus = adapter->pshmem->linkstatus;
- uint linkup;
- unsigned char linkspeed;
- unsigned char linkduplex;
-
- if ((isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
- struct slic_shmem *pshmem;
-
- pshmem = (struct slic_shmem *)adapter->phys_shmem;
-#if defined(CONFIG_X86_64)
- slic_upr_queue_request(adapter,
- SLIC_UPR_RLSR,
- SLIC_GET_ADDR_LOW(&pshmem->linkstatus),
- SLIC_GET_ADDR_HIGH(&pshmem->linkstatus),
- 0, 0);
-#elif defined(CONFIG_X86)
- slic_upr_queue_request(adapter,
- SLIC_UPR_RLSR,
- (u32) &pshmem->linkstatus,
- SLIC_GET_ADDR_HIGH(pshmem), 0, 0);
-#else
- Stop Compilation;
-#endif
- return;
- }
- if (adapter->state != ADAPT_UP)
- return;
-
- ASSERT((adapter->devid == SLIC_1GB_DEVICE_ID)
- || (adapter->devid == SLIC_2GB_DEVICE_ID));
-
- linkup = linkstatus & GIG_LINKUP ? LINK_UP : LINK_DOWN;
- if (linkstatus & GIG_SPEED_1000)
- linkspeed = LINK_1000MB;
- else if (linkstatus & GIG_SPEED_100)
- linkspeed = LINK_100MB;
- else
- linkspeed = LINK_10MB;
-
- if (linkstatus & GIG_FULLDUPLEX)
- linkduplex = LINK_FULLD;
- else
- linkduplex = LINK_HALFD;
-
- if ((adapter->linkstate == LINK_DOWN) && (linkup == LINK_DOWN))
- return;
-
- /* link up event, but nothing has changed */
- if ((adapter->linkstate == LINK_UP) &&
- (linkup == LINK_UP) &&
- (adapter->linkspeed == linkspeed) &&
- (adapter->linkduplex == linkduplex))
- return;
-
- /* link has changed at this point */
-
- /* link has gone from up to down */
- if (linkup == LINK_DOWN) {
- adapter->linkstate = LINK_DOWN;
- return;
- }
-
- /* link has gone from down to up */
- adapter->linkspeed = linkspeed;
- adapter->linkduplex = linkduplex;
+ int status;
- if (adapter->linkstate != LINK_UP) {
- /* setup the mac */
- slic_config_set(adapter, true);
- adapter->linkstate = LINK_UP;
- netif_start_queue(adapter->netdev);
- }
+ status = slic_upr_request(adapter,
+ SLIC_UPR_RCONFIG,
+ (u32) config, (u32) config_h, 0, 0);
+ ASSERT(status == 0);
}
/*
@@ -3012,6 +1399,24 @@ static ushort slic_eeprom_cksum(char *m, int len)
return (ushort) sum;
}
+static void slic_rspqueue_free(struct adapter *adapter)
+{
+ int i;
+ struct slic_rspqueue *rspq = &adapter->rspqueue;
+
+ for (i = 0; i < rspq->num_pages; i++) {
+ if (rspq->vaddr[i]) {
+ pci_free_consistent(adapter->pcidev, PAGE_SIZE,
+ rspq->vaddr[i], rspq->paddr[i]);
+ }
+ rspq->vaddr[i] = NULL;
+ rspq->paddr[i] = 0;
+ }
+ rspq->offset = 0;
+ rspq->pageindex = 0;
+ rspq->rspbuf = NULL;
+}
+
static int slic_rspqueue_init(struct adapter *adapter)
{
int i;
@@ -3032,7 +1437,7 @@ static int slic_rspqueue_init(struct adapter *adapter)
dev_err(&adapter->pcidev->dev,
"pci_alloc_consistent failed\n");
slic_rspqueue_free(adapter);
- return STATUS_FAILURE;
+ return -ENOMEM;
}
#ifndef CONFIG_X86_64
ASSERT(((u32) rspq->vaddr[i] & 0xFFFFF000) ==
@@ -3056,25 +1461,7 @@ static int slic_rspqueue_init(struct adapter *adapter)
rspq->offset = 0;
rspq->pageindex = 0;
rspq->rspbuf = (struct slic_rspbuf *)rspq->vaddr[0];
- return STATUS_SUCCESS;
-}
-
-static void slic_rspqueue_free(struct adapter *adapter)
-{
- int i;
- struct slic_rspqueue *rspq = &adapter->rspqueue;
-
- for (i = 0; i < rspq->num_pages; i++) {
- if (rspq->vaddr[i]) {
- pci_free_consistent(adapter->pcidev, PAGE_SIZE,
- rspq->vaddr[i], rspq->paddr[i]);
- }
- rspq->vaddr[i] = NULL;
- rspq->paddr[i] = 0;
- }
- rspq->offset = 0;
- rspq->pageindex = 0;
- rspq->rspbuf = NULL;
+ return 0;
}
static struct slic_rspbuf *slic_rspqueue_getnext(struct adapter *adapter)
@@ -3159,36 +1546,6 @@ static u32 *slic_cmdqmem_addpage(struct adapter *adapter)
return pageaddr;
}
-static int slic_cmdq_init(struct adapter *adapter)
-{
- int i;
- u32 *pageaddr;
-
- ASSERT(adapter->state == ADAPT_DOWN);
- memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
- memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
- memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
- spin_lock_init(&adapter->cmdq_all.lock.lock);
- spin_lock_init(&adapter->cmdq_free.lock.lock);
- spin_lock_init(&adapter->cmdq_done.lock.lock);
- slic_cmdqmem_init(adapter);
- adapter->slic_handle_ix = 1;
- for (i = 0; i < SLIC_CMDQ_INITPAGES; i++) {
- pageaddr = slic_cmdqmem_addpage(adapter);
-#ifndef CONFIG_X86_64
- ASSERT(((u32) pageaddr & 0xFFFFF000) == (u32) pageaddr);
-#endif
- if (!pageaddr) {
- slic_cmdq_free(adapter);
- return STATUS_FAILURE;
- }
- slic_cmdq_addcmdpage(adapter, pageaddr);
- }
- adapter->slic_handle_ix = 1;
-
- return STATUS_SUCCESS;
-}
-
static void slic_cmdq_free(struct adapter *adapter)
{
struct slic_hostcmd *cmd;
@@ -3212,53 +1569,6 @@ static void slic_cmdq_free(struct adapter *adapter)
slic_cmdqmem_free(adapter);
}
-static void slic_cmdq_reset(struct adapter *adapter)
-{
- struct slic_hostcmd *hcmd;
- struct sk_buff *skb;
- u32 outstanding;
-
- spin_lock_irqsave(&adapter->cmdq_free.lock.lock,
- adapter->cmdq_free.lock.flags);
- spin_lock_irqsave(&adapter->cmdq_done.lock.lock,
- adapter->cmdq_done.lock.flags);
- outstanding = adapter->cmdq_all.count - adapter->cmdq_done.count;
- outstanding -= adapter->cmdq_free.count;
- hcmd = adapter->cmdq_all.head;
- while (hcmd) {
- if (hcmd->busy) {
- skb = hcmd->skb;
- ASSERT(skb);
- hcmd->busy = 0;
- hcmd->skb = NULL;
- dev_kfree_skb_irq(skb);
- }
- hcmd = hcmd->next_all;
- }
- adapter->cmdq_free.count = 0;
- adapter->cmdq_free.head = NULL;
- adapter->cmdq_free.tail = NULL;
- adapter->cmdq_done.count = 0;
- adapter->cmdq_done.head = NULL;
- adapter->cmdq_done.tail = NULL;
- adapter->cmdq_free.head = adapter->cmdq_all.head;
- hcmd = adapter->cmdq_all.head;
- while (hcmd) {
- adapter->cmdq_free.count++;
- hcmd->next = hcmd->next_all;
- hcmd = hcmd->next_all;
- }
- if (adapter->cmdq_free.count != adapter->cmdq_all.count) {
- dev_err(&adapter->netdev->dev,
- "free_count %d != all count %d\n",
- adapter->cmdq_free.count, adapter->cmdq_all.count);
- }
- spin_unlock_irqrestore(&adapter->cmdq_done.lock.lock,
- adapter->cmdq_done.lock.flags);
- spin_unlock_irqrestore(&adapter->cmdq_free.lock.lock,
- adapter->cmdq_free.lock.flags);
-}
-
static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page)
{
struct slic_hostcmd *cmd;
@@ -3324,6 +1634,99 @@ static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page)
spin_unlock_irqrestore(&cmdq->lock.lock, cmdq->lock.flags);
}
+static int slic_cmdq_init(struct adapter *adapter)
+{
+ int i;
+ u32 *pageaddr;
+
+ ASSERT(adapter->state == ADAPT_DOWN);
+ memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
+ memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
+ memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
+ spin_lock_init(&adapter->cmdq_all.lock.lock);
+ spin_lock_init(&adapter->cmdq_free.lock.lock);
+ spin_lock_init(&adapter->cmdq_done.lock.lock);
+ slic_cmdqmem_init(adapter);
+ adapter->slic_handle_ix = 1;
+ for (i = 0; i < SLIC_CMDQ_INITPAGES; i++) {
+ pageaddr = slic_cmdqmem_addpage(adapter);
+#ifndef CONFIG_X86_64
+ ASSERT(((u32) pageaddr & 0xFFFFF000) == (u32) pageaddr);
+#endif
+ if (!pageaddr) {
+ slic_cmdq_free(adapter);
+ return -ENOMEM;
+ }
+ slic_cmdq_addcmdpage(adapter, pageaddr);
+ }
+ adapter->slic_handle_ix = 1;
+
+ return 0;
+}
+
+static void slic_cmdq_reset(struct adapter *adapter)
+{
+ struct slic_hostcmd *hcmd;
+ struct sk_buff *skb;
+ u32 outstanding;
+
+ spin_lock_irqsave(&adapter->cmdq_free.lock.lock,
+ adapter->cmdq_free.lock.flags);
+ spin_lock_irqsave(&adapter->cmdq_done.lock.lock,
+ adapter->cmdq_done.lock.flags);
+ outstanding = adapter->cmdq_all.count - adapter->cmdq_done.count;
+ outstanding -= adapter->cmdq_free.count;
+ hcmd = adapter->cmdq_all.head;
+ while (hcmd) {
+ if (hcmd->busy) {
+ skb = hcmd->skb;
+ ASSERT(skb);
+ hcmd->busy = 0;
+ hcmd->skb = NULL;
+ dev_kfree_skb_irq(skb);
+ }
+ hcmd = hcmd->next_all;
+ }
+ adapter->cmdq_free.count = 0;
+ adapter->cmdq_free.head = NULL;
+ adapter->cmdq_free.tail = NULL;
+ adapter->cmdq_done.count = 0;
+ adapter->cmdq_done.head = NULL;
+ adapter->cmdq_done.tail = NULL;
+ adapter->cmdq_free.head = adapter->cmdq_all.head;
+ hcmd = adapter->cmdq_all.head;
+ while (hcmd) {
+ adapter->cmdq_free.count++;
+ hcmd->next = hcmd->next_all;
+ hcmd = hcmd->next_all;
+ }
+ if (adapter->cmdq_free.count != adapter->cmdq_all.count) {
+ dev_err(&adapter->netdev->dev,
+ "free_count %d != all count %d\n",
+ adapter->cmdq_free.count, adapter->cmdq_all.count);
+ }
+ spin_unlock_irqrestore(&adapter->cmdq_done.lock.lock,
+ adapter->cmdq_done.lock.flags);
+ spin_unlock_irqrestore(&adapter->cmdq_free.lock.lock,
+ adapter->cmdq_free.lock.flags);
+}
+
+static void slic_cmdq_getdone(struct adapter *adapter)
+{
+ struct slic_cmdqueue *done_cmdq = &adapter->cmdq_done;
+ struct slic_cmdqueue *free_cmdq = &adapter->cmdq_free;
+
+ ASSERT(free_cmdq->head == NULL);
+ spin_lock_irqsave(&done_cmdq->lock.lock, done_cmdq->lock.flags);
+
+ free_cmdq->head = done_cmdq->head;
+ free_cmdq->count = done_cmdq->count;
+ done_cmdq->head = NULL;
+ done_cmdq->tail = NULL;
+ done_cmdq->count = 0;
+ spin_unlock_irqrestore(&done_cmdq->lock.lock, done_cmdq->lock.flags);
+}
+
static struct slic_hostcmd *slic_cmdq_getfree(struct adapter *adapter)
{
struct slic_cmdqueue *cmdq = &adapter->cmdq_free;
@@ -3357,22 +1760,6 @@ retry:
return cmd;
}
-static void slic_cmdq_getdone(struct adapter *adapter)
-{
- struct slic_cmdqueue *done_cmdq = &adapter->cmdq_done;
- struct slic_cmdqueue *free_cmdq = &adapter->cmdq_free;
-
- ASSERT(free_cmdq->head == NULL);
- spin_lock_irqsave(&done_cmdq->lock.lock, done_cmdq->lock.flags);
-
- free_cmdq->head = done_cmdq->head;
- free_cmdq->count = done_cmdq->count;
- done_cmdq->head = NULL;
- done_cmdq->tail = NULL;
- done_cmdq->count = 0;
- spin_unlock_irqrestore(&done_cmdq->lock.lock, done_cmdq->lock.flags);
-}
-
static void slic_cmdq_putdone_irq(struct adapter *adapter,
struct slic_hostcmd *cmd)
{
@@ -3388,79 +1775,6 @@ static void slic_cmdq_putdone_irq(struct adapter *adapter,
spin_unlock(&cmdq->lock.lock);
}
-static int slic_rcvqueue_init(struct adapter *adapter)
-{
- int i, count;
- struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
-
- ASSERT(adapter->state == ADAPT_DOWN);
- rcvq->tail = NULL;
- rcvq->head = NULL;
- rcvq->size = SLIC_RCVQ_ENTRIES;
- rcvq->errors = 0;
- rcvq->count = 0;
- i = (SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES);
- count = 0;
- while (i) {
- count += slic_rcvqueue_fill(adapter);
- i--;
- }
- if (rcvq->count < SLIC_RCVQ_MINENTRIES) {
- slic_rcvqueue_free(adapter);
- return STATUS_FAILURE;
- }
- return STATUS_SUCCESS;
-}
-
-static void slic_rcvqueue_free(struct adapter *adapter)
-{
- struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
- struct sk_buff *skb;
-
- while (rcvq->head) {
- skb = rcvq->head;
- rcvq->head = rcvq->head->next;
- dev_kfree_skb(skb);
- }
- rcvq->tail = NULL;
- rcvq->head = NULL;
- rcvq->count = 0;
-}
-
-static struct sk_buff *slic_rcvqueue_getnext(struct adapter *adapter)
-{
- struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
- struct sk_buff *skb;
- struct slic_rcvbuf *rcvbuf;
- int count;
-
- if (rcvq->count) {
- skb = rcvq->head;
- rcvbuf = (struct slic_rcvbuf *)skb->head;
- ASSERT(rcvbuf);
-
- if (rcvbuf->status & IRHDDR_SVALID) {
- rcvq->head = rcvq->head->next;
- skb->next = NULL;
- rcvq->count--;
- } else {
- skb = NULL;
- }
- } else {
- dev_err(&adapter->netdev->dev,
- "RcvQ Empty!! rcvq[%p] count[%x]\n", rcvq, rcvq->count);
- skb = NULL;
- }
- while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
- count = slic_rcvqueue_fill(adapter);
- if (!count)
- break;
- }
- if (skb)
- rcvq->errors = 0;
- return skb;
-}
-
static int slic_rcvqueue_fill(struct adapter *adapter)
{
void *paddr;
@@ -3548,6 +1862,79 @@ retry_rcvqfill:
return i;
}
+static void slic_rcvqueue_free(struct adapter *adapter)
+{
+ struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
+ struct sk_buff *skb;
+
+ while (rcvq->head) {
+ skb = rcvq->head;
+ rcvq->head = rcvq->head->next;
+ dev_kfree_skb(skb);
+ }
+ rcvq->tail = NULL;
+ rcvq->head = NULL;
+ rcvq->count = 0;
+}
+
+static int slic_rcvqueue_init(struct adapter *adapter)
+{
+ int i, count;
+ struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
+
+ ASSERT(adapter->state == ADAPT_DOWN);
+ rcvq->tail = NULL;
+ rcvq->head = NULL;
+ rcvq->size = SLIC_RCVQ_ENTRIES;
+ rcvq->errors = 0;
+ rcvq->count = 0;
+ i = (SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES);
+ count = 0;
+ while (i) {
+ count += slic_rcvqueue_fill(adapter);
+ i--;
+ }
+ if (rcvq->count < SLIC_RCVQ_MINENTRIES) {
+ slic_rcvqueue_free(adapter);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static struct sk_buff *slic_rcvqueue_getnext(struct adapter *adapter)
+{
+ struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
+ struct sk_buff *skb;
+ struct slic_rcvbuf *rcvbuf;
+ int count;
+
+ if (rcvq->count) {
+ skb = rcvq->head;
+ rcvbuf = (struct slic_rcvbuf *)skb->head;
+ ASSERT(rcvbuf);
+
+ if (rcvbuf->status & IRHDDR_SVALID) {
+ rcvq->head = rcvq->head->next;
+ skb->next = NULL;
+ rcvq->count--;
+ } else {
+ skb = NULL;
+ }
+ } else {
+ dev_err(&adapter->netdev->dev,
+ "RcvQ Empty!! rcvq[%p] count[%x]\n", rcvq, rcvq->count);
+ skb = NULL;
+ }
+ while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
+ count = slic_rcvqueue_fill(adapter);
+ if (!count)
+ break;
+ }
+ if (skb)
+ rcvq->errors = 0;
+ return skb;
+}
+
static u32 slic_rcvqueue_reinsert(struct adapter *adapter, struct sk_buff *skb)
{
struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
@@ -3813,11 +2200,10 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
static int slic_debug_adapter_show(struct seq_file *seq, void *v)
{
struct adapter *adapter = seq->private;
+ struct net_device *netdev = adapter->netdev;
- if ((adapter->netdev) && (adapter->netdev->name)) {
- seq_printf(seq, "info: interface : %s\n",
+ seq_printf(seq, "info: interface : %s\n",
adapter->netdev->name);
- }
seq_printf(seq, "info: status : %s\n",
SLIC_LINKSTATE(adapter->linkstate));
seq_printf(seq, "info: port : %d\n",
@@ -3835,9 +2221,9 @@ static int slic_debug_adapter_show(struct seq_file *seq, void *v)
seq_printf(seq, "info: RcvQ current : %4.4X\n",
adapter->rcvqueue.count);
seq_printf(seq, "rx stats: packets : %8.8lX\n",
- adapter->stats.rx_packets);
+ netdev->stats.rx_packets);
seq_printf(seq, "rx stats: bytes : %8.8lX\n",
- adapter->stats.rx_bytes);
+ netdev->stats.rx_bytes);
seq_printf(seq, "rx stats: broadcasts : %8.8X\n",
adapter->rcv_broadcasts);
seq_printf(seq, "rx stats: multicasts : %8.8X\n",
@@ -3851,13 +2237,13 @@ static int slic_debug_adapter_show(struct seq_file *seq, void *v)
seq_printf(seq, "rx stats: drops : %8.8X\n",
(u32) adapter->rcv_drops);
seq_printf(seq, "tx stats: packets : %8.8lX\n",
- adapter->stats.tx_packets);
+ netdev->stats.tx_packets);
seq_printf(seq, "tx stats: bytes : %8.8lX\n",
- adapter->stats.tx_bytes);
+ netdev->stats.tx_bytes);
seq_printf(seq, "tx stats: errors : %8.8X\n",
(u32) adapter->slic_stats.iface.xmt_errors);
seq_printf(seq, "rx stats: multicasts : %8.8lX\n",
- adapter->stats.multicast);
+ netdev->stats.multicast);
seq_printf(seq, "tx stats: collision errors : %8.8X\n",
(u32) adapter->slic_stats.iface.xmit_collisions);
seq_printf(seq, "perf: Max rcv frames/isr : %8.8X\n",
@@ -4039,9 +2425,1555 @@ static void slic_debug_cleanup(void)
}
}
-/******************************************************************************/
-/**************** MODULE INITIATION / TERMINATION FUNCTIONS ***************/
-/******************************************************************************/
+/*
+ * slic_link_event_handler -
+ *
+ * Initiate a link configuration sequence. The link configuration begins
+ * by issuing a READ_LINK_STATUS command to the Utility Processor on the
+ * SLIC. Since the command finishes asynchronously, the slic_upr_comlete
+ * routine will follow it up witha UP configuration write command, which
+ * will also complete asynchronously.
+ *
+ */
+static void slic_link_event_handler(struct adapter *adapter)
+{
+ int status;
+ struct slic_shmem *pshmem;
+
+ if (adapter->state != ADAPT_UP) {
+ /* Adapter is not operational. Ignore. */
+ return;
+ }
+
+ pshmem = (struct slic_shmem *)adapter->phys_shmem;
+
+#if defined(CONFIG_X86_64)
+ status = slic_upr_request(adapter,
+ SLIC_UPR_RLSR,
+ SLIC_GET_ADDR_LOW(&pshmem->linkstatus),
+ SLIC_GET_ADDR_HIGH(&pshmem->linkstatus),
+ 0, 0);
+#elif defined(CONFIG_X86)
+ status = slic_upr_request(adapter, SLIC_UPR_RLSR,
+ (u32) &pshmem->linkstatus, /* no 4GB wrap guaranteed */
+ 0, 0, 0);
+#else
+ Stop compilation;
+#endif
+ ASSERT(status == 0);
+}
+
+static void slic_init_cleanup(struct adapter *adapter)
+{
+ if (adapter->intrregistered) {
+ adapter->intrregistered = 0;
+ free_irq(adapter->netdev->irq, adapter->netdev);
+
+ }
+ if (adapter->pshmem) {
+ pci_free_consistent(adapter->pcidev,
+ sizeof(struct slic_shmem),
+ adapter->pshmem, adapter->phys_shmem);
+ adapter->pshmem = NULL;
+ adapter->phys_shmem = (dma_addr_t) NULL;
+ }
+
+ if (adapter->pingtimerset) {
+ adapter->pingtimerset = 0;
+ del_timer(&adapter->pingtimer);
+ }
+
+ slic_rspqueue_free(adapter);
+ slic_cmdq_free(adapter);
+ slic_rcvqueue_free(adapter);
+}
+
+/*
+ * Allocate a mcast_address structure to hold the multicast address.
+ * Link it in.
+ */
+static int slic_mcast_add_list(struct adapter *adapter, char *address)
+{
+ struct mcast_address *mcaddr, *mlist;
+
+ /* Check to see if it already exists */
+ mlist = adapter->mcastaddrs;
+ while (mlist) {
+ if (!compare_ether_addr(mlist->address, address))
+ return 0;
+ mlist = mlist->next;
+ }
+
+ /* Doesn't already exist. Allocate a structure to hold it */
+ mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC);
+ if (mcaddr == NULL)
+ return 1;
+
+ memcpy(mcaddr->address, address, 6);
+
+ mcaddr->next = adapter->mcastaddrs;
+ adapter->mcastaddrs = mcaddr;
+
+ return 0;
+}
+
+static void slic_mcast_set_list(struct net_device *dev)
+{
+ struct adapter *adapter = netdev_priv(dev);
+ int status = 0;
+ char *addresses;
+ struct netdev_hw_addr *ha;
+
+ ASSERT(adapter);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ addresses = (char *) &ha->addr;
+ status = slic_mcast_add_list(adapter, addresses);
+ if (status != 0)
+ break;
+ slic_mcast_set_bit(adapter, addresses);
+ }
+
+ if (adapter->devflags_prev != dev->flags) {
+ adapter->macopts = MAC_DIRECTED;
+ if (dev->flags) {
+ if (dev->flags & IFF_BROADCAST)
+ adapter->macopts |= MAC_BCAST;
+ if (dev->flags & IFF_PROMISC)
+ adapter->macopts |= MAC_PROMISC;
+ if (dev->flags & IFF_ALLMULTI)
+ adapter->macopts |= MAC_ALLMCAST;
+ if (dev->flags & IFF_MULTICAST)
+ adapter->macopts |= MAC_MCAST;
+ }
+ adapter->devflags_prev = dev->flags;
+ slic_config_set(adapter, true);
+ } else {
+ if (status == 0)
+ slic_mcast_set_mask(adapter);
+ }
+ return;
+}
+
+#define XMIT_FAIL_LINK_STATE 1
+#define XMIT_FAIL_ZERO_LENGTH 2
+#define XMIT_FAIL_HOSTCMD_FAIL 3
+
+static void slic_xmit_build_request(struct adapter *adapter,
+ struct slic_hostcmd *hcmd, struct sk_buff *skb)
+{
+ struct slic_host64_cmd *ihcmd;
+ ulong phys_addr;
+
+ ihcmd = &hcmd->cmd64;
+
+ ihcmd->flags = (adapter->port << IHFLG_IFSHFT);
+ ihcmd->command = IHCMD_XMT_REQ;
+ ihcmd->u.slic_buffers.totlen = skb->len;
+ phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr);
+ ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr);
+ ihcmd->u.slic_buffers.bufs[0].length = skb->len;
+#if defined(CONFIG_X86_64)
+ hcmd->cmdsize = (u32) ((((u64)&ihcmd->u.slic_buffers.bufs[1] -
+ (u64) hcmd) + 31) >> 5);
+#elif defined(CONFIG_X86)
+ hcmd->cmdsize = ((((u32) &ihcmd->u.slic_buffers.bufs[1] -
+ (u32) hcmd) + 31) >> 5);
+#else
+ Stop Compilation;
+#endif
+}
+
+static void slic_xmit_fail(struct adapter *adapter,
+ struct sk_buff *skb,
+ void *cmd, u32 skbtype, u32 status)
+{
+ if (adapter->xmitq_full)
+ netif_stop_queue(adapter->netdev);
+ if ((cmd == NULL) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) {
+ switch (status) {
+ case XMIT_FAIL_LINK_STATE:
+ dev_err(&adapter->netdev->dev,
+ "reject xmit skb[%p: %x] linkstate[%s] "
+ "adapter[%s:%d] card[%s:%d]\n",
+ skb, skb->pkt_type,
+ SLIC_LINKSTATE(adapter->linkstate),
+ SLIC_ADAPTER_STATE(adapter->state),
+ adapter->state,
+ SLIC_CARD_STATE(adapter->card->state),
+ adapter->card->state);
+ break;
+ case XMIT_FAIL_ZERO_LENGTH:
+ dev_err(&adapter->netdev->dev,
+ "xmit_start skb->len == 0 skb[%p] type[%x]\n",
+ skb, skb->pkt_type);
+ break;
+ case XMIT_FAIL_HOSTCMD_FAIL:
+ dev_err(&adapter->netdev->dev,
+ "xmit_start skb[%p] type[%x] No host commands "
+ "available\n", skb, skb->pkt_type);
+ break;
+ default:
+ ASSERT(0);
+ }
+ }
+ dev_kfree_skb(skb);
+ adapter->netdev->stats.tx_dropped++;
+}
+
+static void slic_rcv_handle_error(struct adapter *adapter,
+ struct slic_rcvbuf *rcvbuf)
+{
+ struct slic_hddr_wds *hdr = (struct slic_hddr_wds *)rcvbuf->data;
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->devid != SLIC_1GB_DEVICE_ID) {
+ if (hdr->frame_status14 & VRHSTAT_802OE)
+ adapter->if_events.oflow802++;
+ if (hdr->frame_status14 & VRHSTAT_TPOFLO)
+ adapter->if_events.Tprtoflow++;
+ if (hdr->frame_status_b14 & VRHSTATB_802UE)
+ adapter->if_events.uflow802++;
+ if (hdr->frame_status_b14 & VRHSTATB_RCVE) {
+ adapter->if_events.rcvearly++;
+ netdev->stats.rx_fifo_errors++;
+ }
+ if (hdr->frame_status_b14 & VRHSTATB_BUFF) {
+ adapter->if_events.Bufov++;
+ netdev->stats.rx_over_errors++;
+ }
+ if (hdr->frame_status_b14 & VRHSTATB_CARRE) {
+ adapter->if_events.Carre++;
+ netdev->stats.tx_carrier_errors++;
+ }
+ if (hdr->frame_status_b14 & VRHSTATB_LONGE)
+ adapter->if_events.Longe++;
+ if (hdr->frame_status_b14 & VRHSTATB_PREA)
+ adapter->if_events.Invp++;
+ if (hdr->frame_status_b14 & VRHSTATB_CRC) {
+ adapter->if_events.Crc++;
+ netdev->stats.rx_crc_errors++;
+ }
+ if (hdr->frame_status_b14 & VRHSTATB_DRBL)
+ adapter->if_events.Drbl++;
+ if (hdr->frame_status_b14 & VRHSTATB_CODE)
+ adapter->if_events.Code++;
+ if (hdr->frame_status_b14 & VRHSTATB_TPCSUM)
+ adapter->if_events.TpCsum++;
+ if (hdr->frame_status_b14 & VRHSTATB_TPHLEN)
+ adapter->if_events.TpHlen++;
+ if (hdr->frame_status_b14 & VRHSTATB_IPCSUM)
+ adapter->if_events.IpCsum++;
+ if (hdr->frame_status_b14 & VRHSTATB_IPLERR)
+ adapter->if_events.IpLen++;
+ if (hdr->frame_status_b14 & VRHSTATB_IPHERR)
+ adapter->if_events.IpHlen++;
+ } else {
+ if (hdr->frame_statusGB & VGBSTAT_XPERR) {
+ u32 xerr = hdr->frame_statusGB >> VGBSTAT_XERRSHFT;
+
+ if (xerr == VGBSTAT_XCSERR)
+ adapter->if_events.TpCsum++;
+ if (xerr == VGBSTAT_XUFLOW)
+ adapter->if_events.Tprtoflow++;
+ if (xerr == VGBSTAT_XHLEN)
+ adapter->if_events.TpHlen++;
+ }
+ if (hdr->frame_statusGB & VGBSTAT_NETERR) {
+ u32 nerr =
+ (hdr->
+ frame_statusGB >> VGBSTAT_NERRSHFT) &
+ VGBSTAT_NERRMSK;
+ if (nerr == VGBSTAT_NCSERR)
+ adapter->if_events.IpCsum++;
+ if (nerr == VGBSTAT_NUFLOW)
+ adapter->if_events.IpLen++;
+ if (nerr == VGBSTAT_NHLEN)
+ adapter->if_events.IpHlen++;
+ }
+ if (hdr->frame_statusGB & VGBSTAT_LNKERR) {
+ u32 lerr = hdr->frame_statusGB & VGBSTAT_LERRMSK;
+
+ if (lerr == VGBSTAT_LDEARLY)
+ adapter->if_events.rcvearly++;
+ if (lerr == VGBSTAT_LBOFLO)
+ adapter->if_events.Bufov++;
+ if (lerr == VGBSTAT_LCODERR)
+ adapter->if_events.Code++;
+ if (lerr == VGBSTAT_LDBLNBL)
+ adapter->if_events.Drbl++;
+ if (lerr == VGBSTAT_LCRCERR)
+ adapter->if_events.Crc++;
+ if (lerr == VGBSTAT_LOFLO)
+ adapter->if_events.oflow802++;
+ if (lerr == VGBSTAT_LUFLO)
+ adapter->if_events.uflow802++;
+ }
+ }
+ return;
+}
+
+#define TCP_OFFLOAD_FRAME_PUSHFLAG 0x10000000
+#define M_FAST_PATH 0x0040
+
+static void slic_rcv_handler(struct adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct sk_buff *skb;
+ struct slic_rcvbuf *rcvbuf;
+ u32 frames = 0;
+
+ while ((skb = slic_rcvqueue_getnext(adapter))) {
+ u32 rx_bytes;
+
+ ASSERT(skb->head);
+ rcvbuf = (struct slic_rcvbuf *)skb->head;
+ adapter->card->events++;
+ if (rcvbuf->status & IRHDDR_ERR) {
+ adapter->rx_errors++;
+ slic_rcv_handle_error(adapter, rcvbuf);
+ slic_rcvqueue_reinsert(adapter, skb);
+ continue;
+ }
+
+ if (!slic_mac_filter(adapter, (struct ether_header *)
+ rcvbuf->data)) {
+ slic_rcvqueue_reinsert(adapter, skb);
+ continue;
+ }
+ skb_pull(skb, SLIC_RCVBUF_HEADSIZE);
+ rx_bytes = (rcvbuf->length & IRHDDR_FLEN_MSK);
+ skb_put(skb, rx_bytes);
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += rx_bytes;
+#if SLIC_OFFLOAD_IP_CHECKSUM
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+#endif
+
+ skb->dev = adapter->netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ netif_rx(skb);
+
+ ++frames;
+#if SLIC_INTERRUPT_PROCESS_LIMIT
+ if (frames >= SLIC_RCVQ_MAX_PROCESS_ISR) {
+ adapter->rcv_interrupt_yields++;
+ break;
+ }
+#endif
+ }
+ adapter->max_isr_rcvs = max(adapter->max_isr_rcvs, frames);
+}
+
+static void slic_xmit_complete(struct adapter *adapter)
+{
+ struct slic_hostcmd *hcmd;
+ struct slic_rspbuf *rspbuf;
+ u32 frames = 0;
+ struct slic_handle_word slic_handle_word;
+
+ do {
+ rspbuf = slic_rspqueue_getnext(adapter);
+ if (!rspbuf)
+ break;
+ adapter->xmit_completes++;
+ adapter->card->events++;
+ /*
+ Get the complete host command buffer
+ */
+ slic_handle_word.handle_token = rspbuf->hosthandle;
+ ASSERT(slic_handle_word.handle_index);
+ ASSERT(slic_handle_word.handle_index <= SLIC_CMDQ_MAXCMDS);
+ hcmd =
+ (struct slic_hostcmd *)
+ adapter->slic_handles[slic_handle_word.handle_index].
+ address;
+/* hcmd = (struct slic_hostcmd *) rspbuf->hosthandle; */
+ ASSERT(hcmd);
+ ASSERT(hcmd->pslic_handle ==
+ &adapter->slic_handles[slic_handle_word.handle_index]);
+ if (hcmd->type == SLIC_CMD_DUMB) {
+ if (hcmd->skb)
+ dev_kfree_skb_irq(hcmd->skb);
+ slic_cmdq_putdone_irq(adapter, hcmd);
+ }
+ rspbuf->status = 0;
+ rspbuf->hosthandle = 0;
+ frames++;
+ } while (1);
+ adapter->max_isr_xmits = max(adapter->max_isr_xmits, frames);
+}
+
+static irqreturn_t slic_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct adapter *adapter = netdev_priv(dev);
+ u32 isr;
+
+ if ((adapter->pshmem) && (adapter->pshmem->isr)) {
+ slic_reg32_write(&adapter->slic_regs->slic_icr,
+ ICR_INT_MASK, FLUSH);
+ isr = adapter->isrcopy = adapter->pshmem->isr;
+ adapter->pshmem->isr = 0;
+ adapter->num_isrs++;
+ switch (adapter->card->state) {
+ case CARD_UP:
+ if (isr & ~ISR_IO) {
+ if (isr & ISR_ERR) {
+ adapter->error_interrupts++;
+ if (isr & ISR_RMISS) {
+ int count;
+ int pre_count;
+ int errors;
+
+ struct slic_rcvqueue *rcvq =
+ &adapter->rcvqueue;
+
+ adapter->
+ error_rmiss_interrupts++;
+ if (!rcvq->errors)
+ rcv_count = rcvq->count;
+ pre_count = rcvq->count;
+ errors = rcvq->errors;
+
+ while (rcvq->count <
+ SLIC_RCVQ_FILLTHRESH) {
+ count =
+ slic_rcvqueue_fill
+ (adapter);
+ if (!count)
+ break;
+ }
+ } else if (isr & ISR_XDROP) {
+ dev_err(&dev->dev,
+ "isr & ISR_ERR [%x] "
+ "ISR_XDROP \n", isr);
+ } else {
+ dev_err(&dev->dev,
+ "isr & ISR_ERR [%x]\n",
+ isr);
+ }
+ }
+
+ if (isr & ISR_LEVENT) {
+ adapter->linkevent_interrupts++;
+ slic_link_event_handler(adapter);
+ }
+
+ if ((isr & ISR_UPC) ||
+ (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
+ adapter->upr_interrupts++;
+ slic_upr_request_complete(adapter, isr);
+ }
+ }
+
+ if (isr & ISR_RCV) {
+ adapter->rcv_interrupts++;
+ slic_rcv_handler(adapter);
+ }
+
+ if (isr & ISR_CMD) {
+ adapter->xmit_interrupts++;
+ slic_xmit_complete(adapter);
+ }
+ break;
+
+ case CARD_DOWN:
+ if ((isr & ISR_UPC) ||
+ (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
+ adapter->upr_interrupts++;
+ slic_upr_request_complete(adapter, isr);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ adapter->isrcopy = 0;
+ adapter->all_reg_writes += 2;
+ adapter->isr_reg_writes++;
+ slic_reg32_write(&adapter->slic_regs->slic_isr, 0, FLUSH);
+ } else {
+ adapter->false_interrupts++;
+ }
+ return IRQ_HANDLED;
+}
+
+#define NORMAL_ETHFRAME 0
+
+static netdev_tx_t slic_xmit_start(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sliccard *card;
+ struct adapter *adapter = netdev_priv(dev);
+ struct slic_hostcmd *hcmd = NULL;
+ u32 status = 0;
+ u32 skbtype = NORMAL_ETHFRAME;
+ void *offloadcmd = NULL;
+
+ card = adapter->card;
+ ASSERT(card);
+ if ((adapter->linkstate != LINK_UP) ||
+ (adapter->state != ADAPT_UP) || (card->state != CARD_UP)) {
+ status = XMIT_FAIL_LINK_STATE;
+ goto xmit_fail;
+
+ } else if (skb->len == 0) {
+ status = XMIT_FAIL_ZERO_LENGTH;
+ goto xmit_fail;
+ }
+
+ if (skbtype == NORMAL_ETHFRAME) {
+ hcmd = slic_cmdq_getfree(adapter);
+ if (!hcmd) {
+ adapter->xmitq_full = 1;
+ status = XMIT_FAIL_HOSTCMD_FAIL;
+ goto xmit_fail;
+ }
+ ASSERT(hcmd->pslic_handle);
+ ASSERT(hcmd->cmd64.hosthandle ==
+ hcmd->pslic_handle->token.handle_token);
+ hcmd->skb = skb;
+ hcmd->busy = 1;
+ hcmd->type = SLIC_CMD_DUMB;
+ if (skbtype == NORMAL_ETHFRAME)
+ slic_xmit_build_request(adapter, hcmd, skb);
+ }
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+#ifdef DEBUG_DUMP
+ if (adapter->kill_card) {
+ struct slic_host64_cmd ihcmd;
+
+ ihcmd = &hcmd->cmd64;
+
+ ihcmd->flags |= 0x40;
+ adapter->kill_card = 0; /* only do this once */
+ }
+#endif
+ if (hcmd->paddrh == 0) {
+ slic_reg32_write(&adapter->slic_regs->slic_cbar,
+ (hcmd->paddrl | hcmd->cmdsize), DONT_FLUSH);
+ } else {
+ slic_reg64_write(adapter, &adapter->slic_regs->slic_cbar64,
+ (hcmd->paddrl | hcmd->cmdsize),
+ &adapter->slic_regs->slic_addr_upper,
+ hcmd->paddrh, DONT_FLUSH);
+ }
+xmit_done:
+ return NETDEV_TX_OK;
+xmit_fail:
+ slic_xmit_fail(adapter, skb, offloadcmd, skbtype, status);
+ goto xmit_done;
+}
+
+
+static void slic_adapter_freeresources(struct adapter *adapter)
+{
+ slic_init_cleanup(adapter);
+ adapter->error_interrupts = 0;
+ adapter->rcv_interrupts = 0;
+ adapter->xmit_interrupts = 0;
+ adapter->linkevent_interrupts = 0;
+ adapter->upr_interrupts = 0;
+ adapter->num_isrs = 0;
+ adapter->xmit_completes = 0;
+ adapter->rcv_broadcasts = 0;
+ adapter->rcv_multicasts = 0;
+ adapter->rcv_unicasts = 0;
+}
+
+static int slic_adapter_allocresources(struct adapter *adapter)
+{
+ if (!adapter->intrregistered) {
+ int retval;
+
+ spin_unlock_irqrestore(&slic_global.driver_lock.lock,
+ slic_global.driver_lock.flags);
+
+ retval = request_irq(adapter->netdev->irq,
+ &slic_interrupt,
+ IRQF_SHARED,
+ adapter->netdev->name, adapter->netdev);
+
+ spin_lock_irqsave(&slic_global.driver_lock.lock,
+ slic_global.driver_lock.flags);
+
+ if (retval) {
+ dev_err(&adapter->netdev->dev,
+ "request_irq (%s) FAILED [%x]\n",
+ adapter->netdev->name, retval);
+ return retval;
+ }
+ adapter->intrregistered = 1;
+ }
+ return 0;
+}
+
+/*
+ * slic_if_init
+ *
+ * Perform initialization of our slic interface.
+ *
+ */
+static int slic_if_init(struct adapter *adapter)
+{
+ struct sliccard *card = adapter->card;
+ struct net_device *dev = adapter->netdev;
+ __iomem struct slic_regs *slic_regs = adapter->slic_regs;
+ struct slic_shmem *pshmem;
+ int rc;
+
+ ASSERT(card);
+
+ /* adapter should be down at this point */
+ if (adapter->state != ADAPT_DOWN) {
+ dev_err(&dev->dev, "%s: adapter->state != ADAPT_DOWN\n",
+ __func__);
+ rc = -EIO;
+ goto err;
+ }
+ ASSERT(adapter->linkstate == LINK_DOWN);
+
+ adapter->devflags_prev = dev->flags;
+ adapter->macopts = MAC_DIRECTED;
+ if (dev->flags) {
+ if (dev->flags & IFF_BROADCAST)
+ adapter->macopts |= MAC_BCAST;
+ if (dev->flags & IFF_PROMISC)
+ adapter->macopts |= MAC_PROMISC;
+ if (dev->flags & IFF_ALLMULTI)
+ adapter->macopts |= MAC_ALLMCAST;
+ if (dev->flags & IFF_MULTICAST)
+ adapter->macopts |= MAC_MCAST;
+ }
+ rc = slic_adapter_allocresources(adapter);
+ if (rc) {
+ dev_err(&dev->dev,
+ "%s: slic_adapter_allocresources FAILED %x\n",
+ __func__, rc);
+ slic_adapter_freeresources(adapter);
+ goto err;
+ }
+
+ if (!adapter->queues_initialized) {
+ if ((rc = slic_rspqueue_init(adapter)))
+ goto err;
+ if ((rc = slic_cmdq_init(adapter)))
+ goto err;
+ if ((rc = slic_rcvqueue_init(adapter)))
+ goto err;
+ adapter->queues_initialized = 1;
+ }
+
+ slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
+ mdelay(1);
+
+ if (!adapter->isp_initialized) {
+ pshmem = (struct slic_shmem *)adapter->phys_shmem;
+
+ spin_lock_irqsave(&adapter->bit64reglock.lock,
+ adapter->bit64reglock.flags);
+
+#if defined(CONFIG_X86_64)
+ slic_reg32_write(&slic_regs->slic_addr_upper,
+ SLIC_GET_ADDR_HIGH(&pshmem->isr), DONT_FLUSH);
+ slic_reg32_write(&slic_regs->slic_isp,
+ SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH);
+#elif defined(CONFIG_X86)
+ slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH);
+ slic_reg32_write(&slic_regs->slic_isp, (u32)&pshmem->isr, FLUSH);
+#else
+ Stop Compilations
+#endif
+ spin_unlock_irqrestore(&adapter->bit64reglock.lock,
+ adapter->bit64reglock.flags);
+ adapter->isp_initialized = 1;
+ }
+
+ adapter->state = ADAPT_UP;
+ if (!card->loadtimerset) {
+ init_timer(&card->loadtimer);
+ card->loadtimer.expires =
+ jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
+ card->loadtimer.data = (ulong) card;
+ card->loadtimer.function = &slic_timer_load_check;
+ add_timer(&card->loadtimer);
+
+ card->loadtimerset = 1;
+ }
+
+ if (!adapter->pingtimerset) {
+ init_timer(&adapter->pingtimer);
+ adapter->pingtimer.expires =
+ jiffies + (PING_TIMER_INTERVAL * HZ);
+ adapter->pingtimer.data = (ulong) dev;
+ adapter->pingtimer.function = &slic_timer_ping;
+ add_timer(&adapter->pingtimer);
+ adapter->pingtimerset = 1;
+ adapter->card->pingstatus = ISR_PINGMASK;
+ }
+
+ /*
+ * clear any pending events, then enable interrupts
+ */
+ adapter->isrcopy = 0;
+ adapter->pshmem->isr = 0;
+ slic_reg32_write(&slic_regs->slic_isr, 0, FLUSH);
+ slic_reg32_write(&slic_regs->slic_icr, ICR_INT_ON, FLUSH);
+
+ slic_link_config(adapter, LINK_AUTOSPEED, LINK_AUTOD);
+ slic_link_event_handler(adapter);
+
+err:
+ return rc;
+}
+
+static int slic_entry_open(struct net_device *dev)
+{
+ struct adapter *adapter = netdev_priv(dev);
+ struct sliccard *card = adapter->card;
+ u32 locked = 0;
+ int status;
+
+ ASSERT(adapter);
+ ASSERT(card);
+
+ netif_stop_queue(adapter->netdev);
+
+ spin_lock_irqsave(&slic_global.driver_lock.lock,
+ slic_global.driver_lock.flags);
+ locked = 1;
+ if (!adapter->activated) {
+ card->adapters_activated++;
+ slic_global.num_slic_ports_active++;
+ adapter->activated = 1;
+ }
+ status = slic_if_init(adapter);
+
+ if (status != 0) {
+ if (adapter->activated) {
+ card->adapters_activated--;
+ slic_global.num_slic_ports_active--;
+ adapter->activated = 0;
+ }
+ if (locked) {
+ spin_unlock_irqrestore(&slic_global.driver_lock.lock,
+ slic_global.driver_lock.flags);
+ locked = 0;
+ }
+ return status;
+ }
+ if (!card->master)
+ card->master = adapter;
+
+ if (locked) {
+ spin_unlock_irqrestore(&slic_global.driver_lock.lock,
+ slic_global.driver_lock.flags);
+ locked = 0;
+ }
+
+ return 0;
+}
+
+static void slic_card_cleanup(struct sliccard *card)
+{
+ if (card->loadtimerset) {
+ card->loadtimerset = 0;
+ del_timer(&card->loadtimer);
+ }
+
+ slic_debug_card_destroy(card);
+
+ kfree(card);
+}
+
+static void __devexit slic_entry_remove(struct pci_dev *pcidev)
+{
+ struct net_device *dev = pci_get_drvdata(pcidev);
+ u32 mmio_start = 0;
+ uint mmio_len = 0;
+ struct adapter *adapter = netdev_priv(dev);
+ struct sliccard *card;
+ struct mcast_address *mcaddr, *mlist;
+
+ ASSERT(adapter);
+ slic_adapter_freeresources(adapter);
+ slic_unmap_mmio_space(adapter);
+ unregister_netdev(dev);
+
+ mmio_start = pci_resource_start(pcidev, 0);
+ mmio_len = pci_resource_len(pcidev, 0);
+
+ release_mem_region(mmio_start, mmio_len);
+
+ iounmap((void __iomem *)dev->base_addr);
+ /* free multicast addresses */
+ mlist = adapter->mcastaddrs;
+ while (mlist) {
+ mcaddr = mlist;
+ mlist = mlist->next;
+ kfree(mcaddr);
+ }
+ ASSERT(adapter->card);
+ card = adapter->card;
+ ASSERT(card->adapters_allocated);
+ card->adapters_allocated--;
+ adapter->allocated = 0;
+ if (!card->adapters_allocated) {
+ struct sliccard *curr_card = slic_global.slic_card;
+ if (curr_card == card) {
+ slic_global.slic_card = card->next;
+ } else {
+ while (curr_card->next != card)
+ curr_card = curr_card->next;
+ ASSERT(curr_card);
+ curr_card->next = card->next;
+ }
+ ASSERT(slic_global.num_slic_cards);
+ slic_global.num_slic_cards--;
+ slic_card_cleanup(card);
+ }
+ kfree(dev);
+ pci_release_regions(pcidev);
+}
+
+static int slic_entry_halt(struct net_device *dev)
+{
+ struct adapter *adapter = netdev_priv(dev);
+ struct sliccard *card = adapter->card;
+ __iomem struct slic_regs *slic_regs = adapter->slic_regs;
+
+ spin_lock_irqsave(&slic_global.driver_lock.lock,
+ slic_global.driver_lock.flags);
+ ASSERT(card);
+ netif_stop_queue(adapter->netdev);
+ adapter->state = ADAPT_DOWN;
+ adapter->linkstate = LINK_DOWN;
+ adapter->upr_list = NULL;
+ adapter->upr_busy = 0;
+ adapter->devflags_prev = 0;
+ ASSERT(card->adapter[adapter->cardindex] == adapter);
+ slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
+ adapter->all_reg_writes++;
+ adapter->icr_reg_writes++;
+ slic_config_clear(adapter);
+ if (adapter->activated) {
+ card->adapters_activated--;
+ slic_global.num_slic_ports_active--;
+ adapter->activated = 0;
+ }
+#ifdef AUTOMATIC_RESET
+ slic_reg32_write(&slic_regs->slic_reset_iface, 0, FLUSH);
+#endif
+ /*
+ * Reset the adapter's cmd queues
+ */
+ slic_cmdq_reset(adapter);
+
+#ifdef AUTOMATIC_RESET
+ if (!card->adapters_activated)
+ slic_card_init(card, adapter);
+#endif
+
+ spin_unlock_irqrestore(&slic_global.driver_lock.lock,
+ slic_global.driver_lock.flags);
+ return 0;
+}
+
+static struct net_device_stats *slic_get_stats(struct net_device *dev)
+{
+ struct adapter *adapter = netdev_priv(dev);
+
+ ASSERT(adapter);
+ dev->stats.collisions = adapter->slic_stats.iface.xmit_collisions;
+ dev->stats.rx_errors = adapter->slic_stats.iface.rcv_errors;
+ dev->stats.tx_errors = adapter->slic_stats.iface.xmt_errors;
+ dev->stats.rx_missed_errors = adapter->slic_stats.iface.rcv_discards;
+ dev->stats.tx_heartbeat_errors = 0;
+ dev->stats.tx_aborted_errors = 0;
+ dev->stats.tx_window_errors = 0;
+ dev->stats.tx_fifo_errors = 0;
+ dev->stats.rx_frame_errors = 0;
+ dev->stats.rx_length_errors = 0;
+
+ return &dev->stats;
+}
+
+static int slic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct adapter *adapter = netdev_priv(dev);
+ struct ethtool_cmd edata;
+ struct ethtool_cmd ecmd;
+ u32 data[7];
+ u32 intagg;
+
+ ASSERT(rq);
+ switch (cmd) {
+ case SIOCSLICSETINTAGG:
+ if (copy_from_user(data, rq->ifr_data, 28))
+ return -EFAULT;
+ intagg = data[0];
+ dev_err(&dev->dev, "%s: set interrupt aggregation to %d\n",
+ __func__, intagg);
+ slic_intagg_set(adapter, intagg);
+ return 0;
+
+#ifdef SLIC_TRACE_DUMP_ENABLED
+ case SIOCSLICTRACEDUMP:
+ {
+ u32 value;
+ DBG_IOCTL("slic_ioctl SIOCSLIC_TRACE_DUMP\n");
+
+ if (copy_from_user(data, rq->ifr_data, 28)) {
+ PRINT_ERROR
+ ("slic: copy_from_user FAILED getting initial simba param\n");
+ return -EFAULT;
+ }
+
+ value = data[0];
+ if (tracemon_request == SLIC_DUMP_DONE) {
+ PRINT_ERROR
+ ("ATK Diagnostic Trace Dump Requested\n");
+ tracemon_request = SLIC_DUMP_REQUESTED;
+ tracemon_request_type = value;
+ tracemon_timestamp = jiffies;
+ } else if ((tracemon_request == SLIC_DUMP_REQUESTED) ||
+ (tracemon_request ==
+ SLIC_DUMP_IN_PROGRESS)) {
+ PRINT_ERROR
+ ("ATK Diagnostic Trace Dump Requested but already in progress... ignore\n");
+ } else {
+ PRINT_ERROR
+ ("ATK Diagnostic Trace Dump Requested\n");
+ tracemon_request = SLIC_DUMP_REQUESTED;
+ tracemon_request_type = value;
+ tracemon_timestamp = jiffies;
+ }
+ return 0;
+ }
+#endif
+ case SIOCETHTOOL:
+ ASSERT(adapter);
+ if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd)))
+ return -EFAULT;
+
+ if (ecmd.cmd == ETHTOOL_GSET) {
+ edata.supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_MII);
+ edata.port = PORT_MII;
+ edata.transceiver = XCVR_INTERNAL;
+ edata.phy_address = 0;
+ if (adapter->linkspeed == LINK_100MB)
+ edata.speed = SPEED_100;
+ else if (adapter->linkspeed == LINK_10MB)
+ edata.speed = SPEED_10;
+ else
+ edata.speed = 0;
+
+ if (adapter->linkduplex == LINK_FULLD)
+ edata.duplex = DUPLEX_FULL;
+ else
+ edata.duplex = DUPLEX_HALF;
+
+ edata.autoneg = AUTONEG_ENABLE;
+ edata.maxtxpkt = 1;
+ edata.maxrxpkt = 1;
+ if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
+ return -EFAULT;
+
+ } else if (ecmd.cmd == ETHTOOL_SSET) {
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (adapter->linkspeed == LINK_100MB)
+ edata.speed = SPEED_100;
+ else if (adapter->linkspeed == LINK_10MB)
+ edata.speed = SPEED_10;
+ else
+ edata.speed = 0;
+
+ if (adapter->linkduplex == LINK_FULLD)
+ edata.duplex = DUPLEX_FULL;
+ else
+ edata.duplex = DUPLEX_HALF;
+
+ edata.autoneg = AUTONEG_ENABLE;
+ edata.maxtxpkt = 1;
+ edata.maxrxpkt = 1;
+ if ((ecmd.speed != edata.speed) ||
+ (ecmd.duplex != edata.duplex)) {
+ u32 speed;
+ u32 duplex;
+
+ if (ecmd.speed == SPEED_10)
+ speed = 0;
+ else
+ speed = PCR_SPEED_100;
+ if (ecmd.duplex == DUPLEX_FULL)
+ duplex = PCR_DUPLEX_FULL;
+ else
+ duplex = 0;
+ slic_link_config(adapter, speed, duplex);
+ slic_link_event_handler(adapter);
+ }
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void slic_config_pci(struct pci_dev *pcidev)
+{
+ u16 pci_command;
+ u16 new_command;
+
+ pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
+
+ new_command = pci_command | PCI_COMMAND_MASTER
+ | PCI_COMMAND_MEMORY
+ | PCI_COMMAND_INVALIDATE
+ | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
+ if (pci_command != new_command)
+ pci_write_config_word(pcidev, PCI_COMMAND, new_command);
+}
+
+static int slic_card_init(struct sliccard *card, struct adapter *adapter)
+{
+ __iomem struct slic_regs *slic_regs = adapter->slic_regs;
+ struct slic_eeprom *peeprom;
+ struct oslic_eeprom *pOeeprom;
+ dma_addr_t phys_config;
+ u32 phys_configh;
+ u32 phys_configl;
+ u32 i = 0;
+ struct slic_shmem *pshmem;
+ int status;
+ uint macaddrs = card->card_size;
+ ushort eecodesize;
+ ushort dramsize;
+ ushort ee_chksum;
+ ushort calc_chksum;
+ struct slic_config_mac *pmac;
+ unsigned char fruformat;
+ unsigned char oemfruformat;
+ struct atk_fru *patkfru;
+ union oemfru *poemfru;
+
+ /* Reset everything except PCI configuration space */
+ slic_soft_reset(adapter);
+
+ /* Download the microcode */
+ status = slic_card_download(adapter);
+
+ if (status != 0) {
+ dev_err(&adapter->pcidev->dev,
+ "download failed bus %d slot %d\n",
+ adapter->busnumber, adapter->slotnumber);
+ return status;
+ }
+
+ if (!card->config_set) {
+ peeprom = pci_alloc_consistent(adapter->pcidev,
+ sizeof(struct slic_eeprom),
+ &phys_config);
+
+ phys_configl = SLIC_GET_ADDR_LOW(phys_config);
+ phys_configh = SLIC_GET_ADDR_HIGH(phys_config);
+
+ if (!peeprom) {
+ dev_err(&adapter->pcidev->dev,
+ "eeprom read failed to get memory "
+ "bus %d slot %d\n", adapter->busnumber,
+ adapter->slotnumber);
+ return -ENOMEM;
+ } else {
+ memset(peeprom, 0, sizeof(struct slic_eeprom));
+ }
+ slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
+ mdelay(1);
+ pshmem = (struct slic_shmem *)adapter->phys_shmem;
+
+ spin_lock_irqsave(&adapter->bit64reglock.lock,
+ adapter->bit64reglock.flags);
+ slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH);
+ slic_reg32_write(&slic_regs->slic_isp,
+ SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH);
+ spin_unlock_irqrestore(&adapter->bit64reglock.lock,
+ adapter->bit64reglock.flags);
+
+ slic_config_get(adapter, phys_configl, phys_configh);
+
+ for (;;) {
+ if (adapter->pshmem->isr) {
+ if (adapter->pshmem->isr & ISR_UPC) {
+ adapter->pshmem->isr = 0;
+ slic_reg64_write(adapter,
+ &slic_regs->slic_isp, 0,
+ &slic_regs->slic_addr_upper,
+ 0, FLUSH);
+ slic_reg32_write(&slic_regs->slic_isr,
+ 0, FLUSH);
+
+ slic_upr_request_complete(adapter, 0);
+ break;
+ } else {
+ adapter->pshmem->isr = 0;
+ slic_reg32_write(&slic_regs->slic_isr,
+ 0, FLUSH);
+ }
+ } else {
+ mdelay(1);
+ i++;
+ if (i > 5000) {
+ dev_err(&adapter->pcidev->dev,
+ "%d config data fetch timed out!\n",
+ adapter->port);
+ slic_reg64_write(adapter,
+ &slic_regs->slic_isp, 0,
+ &slic_regs->slic_addr_upper,
+ 0, FLUSH);
+ return -EINVAL;
+ }
+ }
+ }
+
+ switch (adapter->devid) {
+ /* Oasis card */
+ case SLIC_2GB_DEVICE_ID:
+ /* extract EEPROM data and pointers to EEPROM data */
+ pOeeprom = (struct oslic_eeprom *) peeprom;
+ eecodesize = pOeeprom->EecodeSize;
+ dramsize = pOeeprom->DramSize;
+ pmac = pOeeprom->MacInfo;
+ fruformat = pOeeprom->FruFormat;
+ patkfru = &pOeeprom->AtkFru;
+ oemfruformat = pOeeprom->OemFruFormat;
+ poemfru = &pOeeprom->OemFru;
+ macaddrs = 2;
+ /* Minor kludge for Oasis card
+ get 2 MAC addresses from the
+ EEPROM to ensure that function 1
+ gets the Port 1 MAC address */
+ break;
+ default:
+ /* extract EEPROM data and pointers to EEPROM data */
+ eecodesize = peeprom->EecodeSize;
+ dramsize = peeprom->DramSize;
+ pmac = peeprom->u2.mac.MacInfo;
+ fruformat = peeprom->FruFormat;
+ patkfru = &peeprom->AtkFru;
+ oemfruformat = peeprom->OemFruFormat;
+ poemfru = &peeprom->OemFru;
+ break;
+ }
+
+ card->config.EepromValid = false;
+
+ /* see if the EEPROM is valid by checking it's checksum */
+ if ((eecodesize <= MAX_EECODE_SIZE) &&
+ (eecodesize >= MIN_EECODE_SIZE)) {
+
+ ee_chksum =
+ *(u16 *) ((char *) peeprom + (eecodesize - 2));
+ /*
+ calculate the EEPROM checksum
+ */
+ calc_chksum =
+ ~slic_eeprom_cksum((char *) peeprom,
+ (eecodesize - 2));
+ /*
+ if the ucdoe chksum flag bit worked,
+ we wouldn't need this shit
+ */
+ if (ee_chksum == calc_chksum)
+ card->config.EepromValid = true;
+ }
+ /* copy in the DRAM size */
+ card->config.DramSize = dramsize;
+
+ /* copy in the MAC address(es) */
+ for (i = 0; i < macaddrs; i++) {
+ memcpy(&card->config.MacInfo[i],
+ &pmac[i], sizeof(struct slic_config_mac));
+ }
+
+ /* copy the Alacritech FRU information */
+ card->config.FruFormat = fruformat;
+ memcpy(&card->config.AtkFru, patkfru,
+ sizeof(struct atk_fru));
+
+ pci_free_consistent(adapter->pcidev,
+ sizeof(struct slic_eeprom),
+ peeprom, phys_config);
+
+ if ((!card->config.EepromValid) &&
+ (adapter->reg_params.fail_on_bad_eeprom)) {
+ slic_reg64_write(adapter, &slic_regs->slic_isp, 0,
+ &slic_regs->slic_addr_upper,
+ 0, FLUSH);
+ dev_err(&adapter->pcidev->dev,
+ "unsupported CONFIGURATION EEPROM invalid\n");
+ return -EINVAL;
+ }
+
+ card->config_set = 1;
+ }
+
+ if (slic_card_download_gbrcv(adapter)) {
+ dev_err(&adapter->pcidev->dev,
+ "unable to download GB receive microcode\n");
+ return -EINVAL;
+ }
+
+ if (slic_global.dynamic_intagg)
+ slic_intagg_set(adapter, 0);
+ else
+ slic_intagg_set(adapter, intagg_delay);
+
+ /*
+ * Initialize ping status to "ok"
+ */
+ card->pingstatus = ISR_PINGMASK;
+
+ /*
+ * Lastly, mark our card state as up and return success
+ */
+ card->state = CARD_UP;
+ card->reset_in_progress = 0;
+
+ return 0;
+}
+
+static void slic_init_driver(void)
+{
+ if (slic_first_init) {
+ slic_first_init = 0;
+ spin_lock_init(&slic_global.driver_lock.lock);
+ slic_debug_init();
+ }
+}
+
+static void slic_init_adapter(struct net_device *netdev,
+ struct pci_dev *pcidev,
+ const struct pci_device_id *pci_tbl_entry,
+ void __iomem *memaddr, int chip_idx)
+{
+ ushort index;
+ struct slic_handle *pslic_handle;
+ struct adapter *adapter = netdev_priv(netdev);
+
+/* adapter->pcidev = pcidev;*/
+ adapter->vendid = pci_tbl_entry->vendor;
+ adapter->devid = pci_tbl_entry->device;
+ adapter->subsysid = pci_tbl_entry->subdevice;
+ adapter->busnumber = pcidev->bus->number;
+ adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
+ adapter->functionnumber = (pcidev->devfn & 0x7);
+ adapter->memorylength = pci_resource_len(pcidev, 0);
+ adapter->slic_regs = (__iomem struct slic_regs *)memaddr;
+ adapter->irq = pcidev->irq;
+/* adapter->netdev = netdev;*/
+ adapter->next_netdevice = head_netdevice;
+ head_netdevice = netdev;
+ adapter->chipid = chip_idx;
+ adapter->port = 0; /*adapter->functionnumber;*/
+ adapter->cardindex = adapter->port;
+ adapter->memorybase = memaddr;
+ spin_lock_init(&adapter->upr_lock.lock);
+ spin_lock_init(&adapter->bit64reglock.lock);
+ spin_lock_init(&adapter->adapter_lock.lock);
+ spin_lock_init(&adapter->reset_lock.lock);
+ spin_lock_init(&adapter->handle_lock.lock);
+
+ adapter->card_size = 1;
+ /*
+ Initialize slic_handle array
+ */
+ ASSERT(SLIC_CMDQ_MAXCMDS <= 0xFFFF);
+ /*
+ Start with 1. 0 is an invalid host handle.
+ */
+ for (index = 1, pslic_handle = &adapter->slic_handles[1];
+ index < SLIC_CMDQ_MAXCMDS; index++, pslic_handle++) {
+
+ pslic_handle->token.handle_index = index;
+ pslic_handle->type = SLIC_HANDLE_FREE;
+ pslic_handle->next = adapter->pfree_slic_handles;
+ adapter->pfree_slic_handles = pslic_handle;
+ }
+ adapter->pshmem = (struct slic_shmem *)
+ pci_alloc_consistent(adapter->pcidev,
+ sizeof(struct slic_shmem),
+ &adapter->
+ phys_shmem);
+ ASSERT(adapter->pshmem);
+
+ memset(adapter->pshmem, 0, sizeof(struct slic_shmem));
+
+ return;
+}
+
+static const struct net_device_ops slic_netdev_ops = {
+ .ndo_open = slic_entry_open,
+ .ndo_stop = slic_entry_halt,
+ .ndo_start_xmit = slic_xmit_start,
+ .ndo_do_ioctl = slic_ioctl,
+ .ndo_set_mac_address = slic_mac_set_address,
+ .ndo_get_stats = slic_get_stats,
+ .ndo_set_multicast_list = slic_mcast_set_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static u32 slic_card_locate(struct adapter *adapter)
+{
+ struct sliccard *card = slic_global.slic_card;
+ struct physcard *physcard = slic_global.phys_card;
+ ushort card_hostid;
+ u16 __iomem *hostid_reg;
+ uint i;
+ uint rdhostid_offset = 0;
+
+ switch (adapter->devid) {
+ case SLIC_2GB_DEVICE_ID:
+ rdhostid_offset = SLIC_RDHOSTID_2GB;
+ break;
+ case SLIC_1GB_DEVICE_ID:
+ rdhostid_offset = SLIC_RDHOSTID_1GB;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ hostid_reg =
+ (u16 __iomem *) (((u8 __iomem *) (adapter->slic_regs)) +
+ rdhostid_offset);
+
+ /* read the 16 bit hostid from SRAM */
+ card_hostid = (ushort) readw(hostid_reg);
+
+ /* Initialize a new card structure if need be */
+ if (card_hostid == SLIC_HOSTID_DEFAULT) {
+ card = kzalloc(sizeof(struct sliccard), GFP_KERNEL);
+ if (card == NULL)
+ return -ENOMEM;
+
+ card->next = slic_global.slic_card;
+ slic_global.slic_card = card;
+ card->busnumber = adapter->busnumber;
+ card->slotnumber = adapter->slotnumber;
+
+ /* Find an available cardnum */
+ for (i = 0; i < SLIC_MAX_CARDS; i++) {
+ if (slic_global.cardnuminuse[i] == 0) {
+ slic_global.cardnuminuse[i] = 1;
+ card->cardnum = i;
+ break;
+ }
+ }
+ slic_global.num_slic_cards++;
+
+ slic_debug_card_create(card);
+ } else {
+ /* Card exists, find the card this adapter belongs to */
+ while (card) {
+ if (card->cardnum == card_hostid)
+ break;
+ card = card->next;
+ }
+ }
+
+ ASSERT(card);
+ if (!card)
+ return -ENXIO;
+ /* Put the adapter in the card's adapter list */
+ ASSERT(card->adapter[adapter->port] == NULL);
+ if (!card->adapter[adapter->port]) {
+ card->adapter[adapter->port] = adapter;
+ adapter->card = card;
+ }
+
+ card->card_size = 1; /* one port per *logical* card */
+
+ while (physcard) {
+ for (i = 0; i < SLIC_MAX_PORTS; i++) {
+ if (!physcard->adapter[i])
+ continue;
+ else
+ break;
+ }
+ ASSERT(i != SLIC_MAX_PORTS);
+ if (physcard->adapter[i]->slotnumber == adapter->slotnumber)
+ break;
+ physcard = physcard->next;
+ }
+ if (!physcard) {
+ /* no structure allocated for this physical card yet */
+ physcard = kzalloc(sizeof(struct physcard), GFP_ATOMIC);
+ ASSERT(physcard);
+
+ physcard->next = slic_global.phys_card;
+ slic_global.phys_card = physcard;
+ physcard->adapters_allocd = 1;
+ } else {
+ physcard->adapters_allocd++;
+ }
+ /* Note - this is ZERO relative */
+ adapter->physport = physcard->adapters_allocd - 1;
+
+ ASSERT(physcard->adapter[adapter->physport] == NULL);
+ physcard->adapter[adapter->physport] = adapter;
+ adapter->physcard = physcard;
+
+ return 0;
+}
+
+static int __devinit slic_entry_probe(struct pci_dev *pcidev,
+ const struct pci_device_id *pci_tbl_entry)
+{
+ static int cards_found;
+ static int did_version;
+ int err = -ENODEV;
+ struct net_device *netdev;
+ struct adapter *adapter;
+ void __iomem *memmapped_ioaddr = NULL;
+ u32 status = 0;
+ ulong mmio_start = 0;
+ ulong mmio_len = 0;
+ struct sliccard *card = NULL;
+ int pci_using_dac = 0;
+
+ slic_global.dynamic_intagg = dynamic_intagg;
+
+ err = pci_enable_device(pcidev);
+
+ if (err)
+ return err;
+
+ if (slic_debug > 0 && did_version++ == 0) {
+ printk(KERN_DEBUG "%s\n", slic_banner);
+ printk(KERN_DEBUG "%s\n", slic_proc_version);
+ }
+
+ if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
+ dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for "
+ "consistent allocations\n");
+ goto err_out_disable_pci;
+ }
+ } else if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
+ pci_using_dac = 0;
+ pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
+ } else {
+ dev_err(&pcidev->dev, "no usable DMA configuration\n");
+ goto err_out_disable_pci;
+ }
+
+ err = pci_request_regions(pcidev, DRV_NAME);
+ if (err) {
+ dev_err(&pcidev->dev, "can't obtain PCI resources\n");
+ goto err_out_disable_pci;
+ }
+
+ pci_set_master(pcidev);
+
+ netdev = alloc_etherdev(sizeof(struct adapter));
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_out_exit_slic_probe;
+ }
+
+ SET_NETDEV_DEV(netdev, &pcidev->dev);
+
+ pci_set_drvdata(pcidev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pcidev = pcidev;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ mmio_start = pci_resource_start(pcidev, 0);
+ mmio_len = pci_resource_len(pcidev, 0);
+
+
+/* memmapped_ioaddr = (u32)ioremap_nocache(mmio_start, mmio_len);*/
+ memmapped_ioaddr = ioremap(mmio_start, mmio_len);
+ if (!memmapped_ioaddr) {
+ dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n",
+ mmio_len, mmio_start);
+ goto err_out_free_netdev;
+ }
+
+ slic_config_pci(pcidev);
+
+ slic_init_driver();
+
+ slic_init_adapter(netdev,
+ pcidev, pci_tbl_entry, memmapped_ioaddr, cards_found);
+
+ status = slic_card_locate(adapter);
+ if (status) {
+ dev_err(&pcidev->dev, "cannot locate card\n");
+ goto err_out_free_mmio_region;
+ }
+
+ card = adapter->card;
+
+ if (!adapter->allocated) {
+ card->adapters_allocated++;
+ adapter->allocated = 1;
+ }
+
+ status = slic_card_init(card, adapter);
+
+ if (status != 0) {
+ card->state = CARD_FAIL;
+ adapter->state = ADAPT_FAIL;
+ adapter->linkstate = LINK_DOWN;
+ dev_err(&pcidev->dev, "FAILED status[%x]\n", status);
+ } else {
+ slic_adapter_set_hwaddr(adapter);
+ }
+
+ netdev->base_addr = (unsigned long)adapter->memorybase;
+ netdev->irq = adapter->irq;
+ netdev->netdev_ops = &slic_netdev_ops;
+
+ slic_debug_adapter_create(adapter);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pcidev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_unmap;
+ }
+
+ cards_found++;
+
+ return status;
+
+err_out_unmap:
+ iounmap(memmapped_ioaddr);
+err_out_free_mmio_region:
+ release_mem_region(mmio_start, mmio_len);
+err_out_free_netdev:
+ free_netdev(netdev);
+err_out_exit_slic_probe:
+ pci_release_regions(pcidev);
+err_out_disable_pci:
+ pci_disable_device(pcidev);
+ return err;
+}
static struct pci_driver slic_driver = {
.name = DRV_NAME,
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index 9ffeb36ddde..f6b401c0ccc 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -835,7 +835,7 @@ __setup("vga=", sm712vga_setup);
* Original init function changed to probe method to be used by pci_drv
* process used to detect chips replaced with kernel process in pci_drv
*/
-static int __init smtcfb_pci_probe(struct pci_dev *pdev,
+static int __devinit smtcfb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct smtcfb_info *sfb;
diff --git a/drivers/staging/solo6x10/Kconfig b/drivers/staging/solo6x10/Kconfig
new file mode 100644
index 00000000000..d96398c701f
--- /dev/null
+++ b/drivers/staging/solo6x10/Kconfig
@@ -0,0 +1,7 @@
+config SOLO6X10
+ tristate "Softlogic 6x10 MPEG codec cards"
+ depends on PCI && VIDEO_DEV && SND
+ select VIDEOBUF_DMA_CONTIG
+ ---help---
+ This driver supports the Softlogic based MPEG-4 and h.264 codec
+ codec cards.
diff --git a/drivers/staging/solo6x10/Makefile b/drivers/staging/solo6x10/Makefile
new file mode 100644
index 00000000000..7e70044d8da
--- /dev/null
+++ b/drivers/staging/solo6x10/Makefile
@@ -0,0 +1,6 @@
+solo6x10-objs := solo6010-core.o solo6010-i2c.o solo6010-p2m.o \
+ solo6010-v4l2.o solo6010-tw28.o solo6010-gpio.o \
+ solo6010-disp.o solo6010-enc.o solo6010-v4l2-enc.o \
+ solo6010-g723.o
+
+obj-$(CONFIG_SOLO6X10) := solo6x10.o
diff --git a/drivers/staging/solo6x10/TODO b/drivers/staging/solo6x10/TODO
new file mode 100644
index 00000000000..e6a2ee22674
--- /dev/null
+++ b/drivers/staging/solo6x10/TODO
@@ -0,0 +1,28 @@
+TODO (staging => main):
+
+ * checkpatch.pl (haven't run it yet)
+ * Lindent (should be clean, but check)
+ * Motion detection flags need to be moved to v4l2
+ * Some private CIDs need to be moved to v4l2
+
+TODO (general):
+
+ * encoder on/off controls
+ * mpeg cid bitrate mode (vbr/cbr)
+ * mpeg cid bitrate/bitrate-peak
+ * mpeg encode of user data
+ * mpeg decode of user data
+ * switch between 4 frames/irq to 1 when using mjpeg (and then back
+ when not)
+ * implement a CID control for motion areas/thresholds
+ * implement CID controls for mozaic areas
+ * allow for higher level of interval (for < 1 fps)
+ * sound:
+ - implement playback via external sound jack
+ - implement loopback of external sound jack with incoming audio?
+ - implement pause/resume
+ - check into jacking sound from tx28xx chips directly (to avoid
+ g.723/8khz limitations)
+
+Plase send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc Ben Collins
+<bcollins@bluecherry.net>
diff --git a/drivers/staging/solo6x10/solo6010-core.c b/drivers/staging/solo6x10/solo6010-core.c
new file mode 100644
index 00000000000..98c6739fc19
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-core.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/videodev2.h>
+
+#include "solo6010.h"
+#include "solo6010-tw28.h"
+
+MODULE_DESCRIPTION("Softlogic 6010 MP4 Encoder/Decoder V4L2/ALSA Driver");
+MODULE_AUTHOR("Ben Collins <bcollins@bluecherry.net>");
+MODULE_VERSION(SOLO6010_VERSION);
+MODULE_LICENSE("GPL");
+
+void solo6010_irq_on(struct solo6010_dev *solo_dev, u32 mask)
+{
+ solo_dev->irq_mask |= mask;
+ solo_reg_write(solo_dev, SOLO_IRQ_ENABLE, solo_dev->irq_mask);
+}
+
+void solo6010_irq_off(struct solo6010_dev *solo_dev, u32 mask)
+{
+ solo_dev->irq_mask &= ~mask;
+ solo_reg_write(solo_dev, SOLO_IRQ_ENABLE, solo_dev->irq_mask);
+}
+
+/* XXX We should check the return value of the sub-device ISR's */
+static irqreturn_t solo6010_isr(int irq, void *data)
+{
+ struct solo6010_dev *solo_dev = data;
+ u32 status;
+ int i;
+
+ status = solo_reg_read(solo_dev, SOLO_IRQ_STAT);
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & ~solo_dev->irq_mask) {
+ solo_reg_write(solo_dev, SOLO_IRQ_STAT,
+ status & ~solo_dev->irq_mask);
+ status &= solo_dev->irq_mask;
+ }
+
+ if (status & SOLO_IRQ_PCI_ERR) {
+ u32 err = solo_reg_read(solo_dev, SOLO_PCI_ERR);
+ solo_p2m_error_isr(solo_dev, err);
+ solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_PCI_ERR);
+ }
+
+ for (i = 0; i < SOLO_NR_P2M; i++)
+ if (status & SOLO_IRQ_P2M(i))
+ solo_p2m_isr(solo_dev, i);
+
+ if (status & SOLO_IRQ_IIC)
+ solo_i2c_isr(solo_dev);
+
+ if (status & SOLO_IRQ_VIDEO_IN)
+ solo_video_in_isr(solo_dev);
+
+ /* Call this first so enc gets detected flag set */
+ if (status & SOLO_IRQ_MOTION)
+ solo_motion_isr(solo_dev);
+
+ if (status & SOLO_IRQ_ENCODER)
+ solo_enc_v4l2_isr(solo_dev);
+
+ if (status & SOLO_IRQ_G723)
+ solo_g723_isr(solo_dev);
+
+ return IRQ_HANDLED;
+}
+
+static void free_solo_dev(struct solo6010_dev *solo_dev)
+{
+ struct pci_dev *pdev;
+
+ if (!solo_dev)
+ return;
+
+ pdev = solo_dev->pdev;
+
+ /* If we never initialized the PCI device, then nothing else
+ * below here needs cleanup */
+ if (!pdev) {
+ kfree(solo_dev);
+ return;
+ }
+
+ /* Bring down the sub-devices first */
+ solo_g723_exit(solo_dev);
+ solo_enc_v4l2_exit(solo_dev);
+ solo_enc_exit(solo_dev);
+ solo_v4l2_exit(solo_dev);
+ solo_disp_exit(solo_dev);
+ solo_gpio_exit(solo_dev);
+ solo_p2m_exit(solo_dev);
+ solo_i2c_exit(solo_dev);
+
+ /* Now cleanup the PCI device */
+ if (solo_dev->reg_base) {
+ solo6010_irq_off(solo_dev, ~0);
+ pci_iounmap(pdev, solo_dev->reg_base);
+ free_irq(pdev->irq, solo_dev);
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ kfree(solo_dev);
+}
+
+static int __devinit solo6010_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct solo6010_dev *solo_dev;
+ int ret;
+ int sdram;
+ u8 chip_id;
+
+ if ((solo_dev = kzalloc(sizeof(*solo_dev), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ solo_dev->pdev = pdev;
+ spin_lock_init(&solo_dev->reg_io_lock);
+ pci_set_drvdata(pdev, solo_dev);
+
+ if ((ret = pci_enable_device(pdev)))
+ goto fail_probe;
+
+ pci_set_master(pdev);
+
+ if ((ret = pci_request_regions(pdev, SOLO6010_NAME)))
+ goto fail_probe;
+
+ if ((solo_dev->reg_base = pci_ioremap_bar(pdev, 0)) == NULL) {
+ ret = -ENOMEM;
+ goto fail_probe;
+ }
+
+ chip_id = solo_reg_read(solo_dev, SOLO_CHIP_OPTION) &
+ SOLO_CHIP_ID_MASK;
+ switch (chip_id) {
+ case 7:
+ solo_dev->nr_chans = 16;
+ solo_dev->nr_ext = 5;
+ break;
+ case 6:
+ solo_dev->nr_chans = 8;
+ solo_dev->nr_ext = 2;
+ break;
+ default:
+ dev_warn(&pdev->dev, "Invalid chip_id 0x%02x, "
+ "defaulting to 4 channels\n",
+ chip_id);
+ case 5:
+ solo_dev->nr_chans = 4;
+ solo_dev->nr_ext = 1;
+ }
+
+ /* Disable all interrupts to start */
+ solo6010_irq_off(solo_dev, ~0);
+
+ /* Initial global settings */
+ solo_reg_write(solo_dev, SOLO_SYS_CFG, SOLO_SYS_CFG_SDRAM64BIT |
+ SOLO_SYS_CFG_INPUTDIV(25) |
+ SOLO_SYS_CFG_FEEDBACKDIV((SOLO_CLOCK_MHZ * 2) - 2) |
+ SOLO_SYS_CFG_OUTDIV(3));
+ solo_reg_write(solo_dev, SOLO_TIMER_CLOCK_NUM, SOLO_CLOCK_MHZ - 1);
+
+ /* PLL locking time of 1ms */
+ mdelay(1);
+
+ ret = request_irq(pdev->irq, solo6010_isr, IRQF_SHARED, SOLO6010_NAME,
+ solo_dev);
+ if (ret)
+ goto fail_probe;
+
+ /* Handle this from the start */
+ solo6010_irq_on(solo_dev, SOLO_IRQ_PCI_ERR);
+
+ if ((ret = solo_i2c_init(solo_dev)))
+ goto fail_probe;
+
+ /* Setup the DMA engine */
+ sdram = (solo_dev->nr_chans >= 8) ? 2 : 1;
+ solo_reg_write(solo_dev, SOLO_DMA_CTRL,
+ SOLO_DMA_CTRL_REFRESH_CYCLE(1) |
+ SOLO_DMA_CTRL_SDRAM_SIZE(sdram) |
+ SOLO_DMA_CTRL_SDRAM_CLK_INVERT |
+ SOLO_DMA_CTRL_READ_CLK_SELECT |
+ SOLO_DMA_CTRL_LATENCY(1));
+
+ if ((ret = solo_p2m_init(solo_dev)))
+ goto fail_probe;
+
+ if ((ret = solo_disp_init(solo_dev)))
+ goto fail_probe;
+
+ if ((ret = solo_gpio_init(solo_dev)))
+ goto fail_probe;
+
+ if ((ret = solo_tw28_init(solo_dev)))
+ goto fail_probe;
+
+ if ((ret = solo_v4l2_init(solo_dev)))
+ goto fail_probe;
+
+ if ((ret = solo_enc_init(solo_dev)))
+ goto fail_probe;
+
+ if ((ret = solo_enc_v4l2_init(solo_dev)))
+ goto fail_probe;
+
+ if ((ret = solo_g723_init(solo_dev)))
+ goto fail_probe;
+
+ return 0;
+
+fail_probe:
+ free_solo_dev(solo_dev);
+ return ret;
+}
+
+static void __devexit solo6010_pci_remove(struct pci_dev *pdev)
+{
+ struct solo6010_dev *solo_dev = pci_get_drvdata(pdev);
+
+ free_solo_dev(solo_dev);
+}
+
+static struct pci_device_id solo6010_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_SOFTLOGIC, PCI_DEVICE_ID_SOLO6010)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_NEUSOLO_4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_NEUSOLO_9)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_NEUSOLO_16)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_COMMSOLO_4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_COMMSOLO_9)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_COMMSOLO_16)},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, solo6010_id_table);
+
+static struct pci_driver solo6010_pci_driver = {
+ .name = SOLO6010_NAME,
+ .id_table = solo6010_id_table,
+ .probe = solo6010_pci_probe,
+ .remove = solo6010_pci_remove,
+};
+
+static int __init solo6010_module_init(void)
+{
+ return pci_register_driver(&solo6010_pci_driver);
+}
+
+static void __exit solo6010_module_exit(void)
+{
+ pci_unregister_driver(&solo6010_pci_driver);
+}
+
+module_init(solo6010_module_init);
+module_exit(solo6010_module_exit);
diff --git a/drivers/staging/solo6x10/solo6010-disp.c b/drivers/staging/solo6x10/solo6010-disp.c
new file mode 100644
index 00000000000..555f024f72e
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-disp.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ioctl.h>
+
+#include "solo6010.h"
+
+#define SOLO_VCLK_DELAY 3
+#define SOLO_PROGRESSIVE_VSIZE 1024
+
+#define SOLO_MOT_THRESH_W 64
+#define SOLO_MOT_THRESH_H 64
+#define SOLO_MOT_THRESH_SIZE 8192
+#define SOLO_MOT_THRESH_REAL (SOLO_MOT_THRESH_W * SOLO_MOT_THRESH_H)
+#define SOLO_MOT_FLAG_SIZE 512
+#define SOLO_MOT_FLAG_AREA (SOLO_MOT_FLAG_SIZE * 32)
+
+static unsigned video_type;
+module_param(video_type, uint, 0644);
+MODULE_PARM_DESC(video_type, "video_type (0 = NTSC/Default, 1 = PAL)");
+
+static void solo_vin_config(struct solo6010_dev *solo_dev)
+{
+ solo_dev->vin_hstart = 8;
+ solo_dev->vin_vstart = 2;
+
+ solo_reg_write(solo_dev, SOLO_SYS_VCLK,
+ SOLO_VCLK_SELECT(2) |
+ SOLO_VCLK_VIN1415_DELAY(SOLO_VCLK_DELAY) |
+ SOLO_VCLK_VIN1213_DELAY(SOLO_VCLK_DELAY) |
+ SOLO_VCLK_VIN1011_DELAY(SOLO_VCLK_DELAY) |
+ SOLO_VCLK_VIN0809_DELAY(SOLO_VCLK_DELAY) |
+ SOLO_VCLK_VIN0607_DELAY(SOLO_VCLK_DELAY) |
+ SOLO_VCLK_VIN0405_DELAY(SOLO_VCLK_DELAY) |
+ SOLO_VCLK_VIN0203_DELAY(SOLO_VCLK_DELAY) |
+ SOLO_VCLK_VIN0001_DELAY(SOLO_VCLK_DELAY));
+
+ solo_reg_write(solo_dev, SOLO_VI_ACT_I_P,
+ SOLO_VI_H_START(solo_dev->vin_hstart) |
+ SOLO_VI_V_START(solo_dev->vin_vstart) |
+ SOLO_VI_V_STOP(solo_dev->vin_vstart +
+ solo_dev->video_vsize));
+
+ solo_reg_write(solo_dev, SOLO_VI_ACT_I_S,
+ SOLO_VI_H_START(solo_dev->vout_hstart) |
+ SOLO_VI_V_START(solo_dev->vout_vstart) |
+ SOLO_VI_V_STOP(solo_dev->vout_vstart +
+ solo_dev->video_vsize));
+
+ solo_reg_write(solo_dev, SOLO_VI_ACT_P,
+ SOLO_VI_H_START(0) |
+ SOLO_VI_V_START(1) |
+ SOLO_VI_V_STOP(SOLO_PROGRESSIVE_VSIZE));
+
+ solo_reg_write(solo_dev, SOLO_VI_CH_FORMAT,
+ SOLO_VI_FD_SEL_MASK(0) | SOLO_VI_PROG_MASK(0));
+
+ solo_reg_write(solo_dev, SOLO_VI_FMT_CFG, 0);
+ solo_reg_write(solo_dev, SOLO_VI_PAGE_SW, 2);
+
+ if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) {
+ solo_reg_write(solo_dev, SOLO_VI_PB_CONFIG,
+ SOLO_VI_PB_USER_MODE);
+ solo_reg_write(solo_dev, SOLO_VI_PB_RANGE_HV,
+ SOLO_VI_PB_HSIZE(858) | SOLO_VI_PB_VSIZE(246));
+ solo_reg_write(solo_dev, SOLO_VI_PB_ACT_V,
+ SOLO_VI_PB_VSTART(4) |
+ SOLO_VI_PB_VSTOP(4 + 240));
+ } else {
+ solo_reg_write(solo_dev, SOLO_VI_PB_CONFIG,
+ SOLO_VI_PB_USER_MODE | SOLO_VI_PB_PAL);
+ solo_reg_write(solo_dev, SOLO_VI_PB_RANGE_HV,
+ SOLO_VI_PB_HSIZE(864) | SOLO_VI_PB_VSIZE(294));
+ solo_reg_write(solo_dev, SOLO_VI_PB_ACT_V,
+ SOLO_VI_PB_VSTART(4) |
+ SOLO_VI_PB_VSTOP(4 + 288));
+ }
+ solo_reg_write(solo_dev, SOLO_VI_PB_ACT_H, SOLO_VI_PB_HSTART(16) |
+ SOLO_VI_PB_HSTOP(16 + 720));
+}
+
+static void solo_disp_config(struct solo6010_dev *solo_dev)
+{
+ solo_dev->vout_hstart = 6;
+ solo_dev->vout_vstart = 8;
+
+ solo_reg_write(solo_dev, SOLO_VO_BORDER_LINE_COLOR,
+ (0xa0 << 24) | (0x88 << 16) | (0xa0 << 8) | 0x88);
+ solo_reg_write(solo_dev, SOLO_VO_BORDER_FILL_COLOR,
+ (0x10 << 24) | (0x8f << 16) | (0x10 << 8) | 0x8f);
+ solo_reg_write(solo_dev, SOLO_VO_BKG_COLOR,
+ (16 << 24) | (128 << 16) | (16 << 8) | 128);
+
+ solo_reg_write(solo_dev, SOLO_VO_FMT_ENC,
+ solo_dev->video_type |
+ SOLO_VO_USER_COLOR_SET_NAV |
+ SOLO_VO_NA_COLOR_Y(0) |
+ SOLO_VO_NA_COLOR_CB(0) |
+ SOLO_VO_NA_COLOR_CR(0));
+
+ solo_reg_write(solo_dev, SOLO_VO_ACT_H,
+ SOLO_VO_H_START(solo_dev->vout_hstart) |
+ SOLO_VO_H_STOP(solo_dev->vout_hstart +
+ solo_dev->video_hsize));
+
+ solo_reg_write(solo_dev, SOLO_VO_ACT_V,
+ SOLO_VO_V_START(solo_dev->vout_vstart) |
+ SOLO_VO_V_STOP(solo_dev->vout_vstart +
+ solo_dev->video_vsize));
+
+ solo_reg_write(solo_dev, SOLO_VO_RANGE_HV,
+ SOLO_VO_H_LEN(solo_dev->video_hsize) |
+ SOLO_VO_V_LEN(solo_dev->video_vsize));
+
+ solo_reg_write(solo_dev, SOLO_VI_WIN_SW, 5);
+
+ solo_reg_write(solo_dev, SOLO_VO_DISP_CTRL, SOLO_VO_DISP_ON |
+ SOLO_VO_DISP_ERASE_COUNT(8) |
+ SOLO_VO_DISP_BASE(SOLO_DISP_EXT_ADDR(solo_dev)));
+
+ solo_reg_write(solo_dev, SOLO_VO_DISP_ERASE, SOLO_VO_DISP_ERASE_ON);
+
+ /* Enable channels we support */
+ solo_reg_write(solo_dev, SOLO_VI_CH_ENA, (1 << solo_dev->nr_chans) - 1);
+
+ /* Disable the watchdog */
+ solo_reg_write(solo_dev, SOLO_WATCHDOG, 0);
+}
+
+static int solo_dma_vin_region(struct solo6010_dev *solo_dev, u32 off,
+ u16 val, int reg_size)
+{
+ u16 buf[64];
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < sizeof(buf) >> 1; i++)
+ buf[i] = val;
+
+ for (i = 0; i < reg_size; i += sizeof(buf))
+ ret |= solo_p2m_dma(solo_dev, SOLO_P2M_DMA_ID_VIN, 1, buf,
+ SOLO_MOTION_EXT_ADDR(solo_dev) + off + i,
+ sizeof(buf));
+
+ return ret;
+}
+
+void solo_set_motion_threshold(struct solo6010_dev *solo_dev, u8 ch, u16 val)
+{
+ if (ch > solo_dev->nr_chans)
+ return;
+
+ solo_dma_vin_region(solo_dev, SOLO_MOT_FLAG_AREA +
+ (ch * SOLO_MOT_THRESH_SIZE * 2),
+ val, SOLO_MOT_THRESH_REAL);
+}
+
+/* First 8k is motion flag (512 bytes * 16). Following that is an 8k+8k
+ * threshold and working table for each channel. Atleast that's what the
+ * spec says. However, this code (take from rdk) has some mystery 8k
+ * block right after the flag area, before the first thresh table. */
+static void solo_motion_config(struct solo6010_dev *solo_dev)
+{
+ int i;
+
+ for (i = 0; i < solo_dev->nr_chans; i++) {
+ /* Clear motion flag area */
+ solo_dma_vin_region(solo_dev, i * SOLO_MOT_FLAG_SIZE, 0x0000,
+ SOLO_MOT_FLAG_SIZE);
+
+ /* Clear working cache table */
+ solo_dma_vin_region(solo_dev, SOLO_MOT_FLAG_AREA +
+ SOLO_MOT_THRESH_SIZE +
+ (i * SOLO_MOT_THRESH_SIZE * 2),
+ 0x0000, SOLO_MOT_THRESH_REAL);
+
+ /* Set default threshold table */
+ solo_set_motion_threshold(solo_dev, i, SOLO_DEF_MOT_THRESH);
+ }
+
+ /* Default motion settings */
+ solo_reg_write(solo_dev, SOLO_VI_MOT_ADR, SOLO_VI_MOTION_EN(0) |
+ (SOLO_MOTION_EXT_ADDR(solo_dev) >> 16));
+ solo_reg_write(solo_dev, SOLO_VI_MOT_CTRL,
+ SOLO_VI_MOTION_FRAME_COUNT(3) |
+ SOLO_VI_MOTION_SAMPLE_LENGTH(solo_dev->video_hsize / 16)
+ | //SOLO_VI_MOTION_INTR_START_STOP |
+ SOLO_VI_MOTION_SAMPLE_COUNT(10));
+
+ solo_reg_write(solo_dev, SOLO_VI_MOTION_BORDER, 0);
+ solo_reg_write(solo_dev, SOLO_VI_MOTION_BAR, 0);
+}
+
+int solo_disp_init(struct solo6010_dev *solo_dev)
+{
+ int i;
+
+ solo_dev->video_hsize = 704;
+ if (video_type == 0) {
+ solo_dev->video_type = SOLO_VO_FMT_TYPE_NTSC;
+ solo_dev->video_vsize = 240;
+ solo_dev->fps = 30;
+ } else {
+ solo_dev->video_type = SOLO_VO_FMT_TYPE_PAL;
+ solo_dev->video_vsize = 288;
+ solo_dev->fps = 25;
+ }
+
+ solo_vin_config(solo_dev);
+ solo_motion_config(solo_dev);
+ solo_disp_config(solo_dev);
+
+ for (i = 0; i < solo_dev->nr_chans; i++)
+ solo_reg_write(solo_dev, SOLO_VI_WIN_ON(i), 1);
+
+ return 0;
+}
+
+void solo_disp_exit(struct solo6010_dev *solo_dev)
+{
+ int i;
+
+ solo6010_irq_off(solo_dev, SOLO_IRQ_MOTION);
+
+ solo_reg_write(solo_dev, SOLO_VO_DISP_CTRL, 0);
+ solo_reg_write(solo_dev, SOLO_VO_ZOOM_CTRL, 0);
+ solo_reg_write(solo_dev, SOLO_VO_FREEZE_CTRL, 0);
+
+ for (i = 0; i < solo_dev->nr_chans; i++) {
+ solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL0(i), 0);
+ solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL1(i), 0);
+ solo_reg_write(solo_dev, SOLO_VI_WIN_ON(i), 0);
+ }
+
+ /* Set default border */
+ for (i = 0; i < 5; i++)
+ solo_reg_write(solo_dev, SOLO_VO_BORDER_X(i), 0);
+
+ for (i = 0; i < 5; i++)
+ solo_reg_write(solo_dev, SOLO_VO_BORDER_Y(i), 0);
+
+ solo_reg_write(solo_dev, SOLO_VO_BORDER_LINE_MASK, 0);
+ solo_reg_write(solo_dev, SOLO_VO_BORDER_FILL_MASK, 0);
+
+ solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_CTRL(0), 0);
+ solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_START(0), 0);
+ solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_STOP(0), 0);
+
+ solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_CTRL(1), 0);
+ solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_START(1), 0);
+ solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_STOP(1), 0);
+}
diff --git a/drivers/staging/solo6x10/solo6010-enc.c b/drivers/staging/solo6x10/solo6010-enc.c
new file mode 100644
index 00000000000..a6cf0a8a3f2
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-enc.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+
+#include "solo6010.h"
+#include "solo6010-osd-font.h"
+
+#define CAPTURE_MAX_BANDWIDTH 32 // D1 4channel (D1 == 4)
+#define OSG_BUFFER_SIZE 1024
+
+#define VI_PROG_HSIZE (1280 - 16)
+#define VI_PROG_VSIZE (1024 - 16)
+
+static void solo_capture_config(struct solo6010_dev *solo_dev)
+{
+ int i, j;
+ unsigned long height;
+ unsigned long width;
+ unsigned char *buf;
+
+ solo_reg_write(solo_dev, SOLO_CAP_BASE,
+ SOLO_CAP_MAX_PAGE(SOLO_CAP_EXT_MAX_PAGE *
+ solo_dev->nr_chans) |
+ SOLO_CAP_BASE_ADDR(SOLO_CAP_EXT_ADDR(solo_dev) >> 16));
+ solo_reg_write(solo_dev, SOLO_CAP_BTW,
+ (1 << 17) | SOLO_CAP_PROG_BANDWIDTH(2) |
+ SOLO_CAP_MAX_BANDWIDTH(CAPTURE_MAX_BANDWIDTH));
+
+ /* Set scale 1, 9 dimension */
+ width = solo_dev->video_hsize;
+ height = solo_dev->video_vsize;
+ solo_reg_write(solo_dev, SOLO_DIM_SCALE1,
+ SOLO_DIM_H_MB_NUM(width / 16) |
+ SOLO_DIM_V_MB_NUM_FRAME(height / 8) |
+ SOLO_DIM_V_MB_NUM_FIELD(height / 16));
+
+ /* Set scale 2, 10 dimension */
+ width = solo_dev->video_hsize / 2;
+ height = solo_dev->video_vsize;
+ solo_reg_write(solo_dev, SOLO_DIM_SCALE2,
+ SOLO_DIM_H_MB_NUM(width / 16) |
+ SOLO_DIM_V_MB_NUM_FRAME(height / 8) |
+ SOLO_DIM_V_MB_NUM_FIELD(height / 16));
+
+ /* Set scale 3, 11 dimension */
+ width = solo_dev->video_hsize / 2;
+ height = solo_dev->video_vsize / 2;
+ solo_reg_write(solo_dev, SOLO_DIM_SCALE3,
+ SOLO_DIM_H_MB_NUM(width / 16) |
+ SOLO_DIM_V_MB_NUM_FRAME(height / 8) |
+ SOLO_DIM_V_MB_NUM_FIELD(height / 16));
+
+ /* Set scale 4, 12 dimension */
+ width = solo_dev->video_hsize / 3;
+ height = solo_dev->video_vsize / 3;
+ solo_reg_write(solo_dev, SOLO_DIM_SCALE4,
+ SOLO_DIM_H_MB_NUM(width / 16) |
+ SOLO_DIM_V_MB_NUM_FRAME(height / 8) |
+ SOLO_DIM_V_MB_NUM_FIELD(height / 16));
+
+ /* Set scale 5, 13 dimension */
+ width = solo_dev->video_hsize / 4;
+ height = solo_dev->video_vsize / 2;
+ solo_reg_write(solo_dev, SOLO_DIM_SCALE5,
+ SOLO_DIM_H_MB_NUM(width / 16) |
+ SOLO_DIM_V_MB_NUM_FRAME(height / 8) |
+ SOLO_DIM_V_MB_NUM_FIELD(height / 16));
+
+ /* Progressive */
+ width = VI_PROG_HSIZE;
+ height = VI_PROG_VSIZE;
+ solo_reg_write(solo_dev, SOLO_DIM_PROG,
+ SOLO_DIM_H_MB_NUM(width / 16) |
+ SOLO_DIM_V_MB_NUM_FRAME(height / 16) |
+ SOLO_DIM_V_MB_NUM_FIELD(height / 16));
+
+ /* Clear OSD */
+ solo_reg_write(solo_dev, SOLO_VE_OSD_CH, 0);
+ solo_reg_write(solo_dev, SOLO_VE_OSD_BASE,
+ SOLO_EOSD_EXT_ADDR(solo_dev) >> 16);
+ solo_reg_write(solo_dev, SOLO_VE_OSD_CLR,
+ 0xF0 << 16 | 0x80 << 8 | 0x80);
+ solo_reg_write(solo_dev, SOLO_VE_OSD_OPT, 0);
+
+ /* Clear OSG buffer */
+ buf = kzalloc(OSG_BUFFER_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ for (i = 0; i < solo_dev->nr_chans; i++) {
+ for (j = 0; j < SOLO_EOSD_EXT_SIZE; j += OSG_BUFFER_SIZE) {
+ solo_p2m_dma(solo_dev, SOLO_P2M_DMA_ID_MP4E, 1, buf,
+ SOLO_EOSD_EXT_ADDR(solo_dev) +
+ (i * SOLO_EOSD_EXT_SIZE) + j,
+ OSG_BUFFER_SIZE);
+ }
+ }
+ kfree(buf);
+}
+
+int solo_osd_print(struct solo_enc_dev *solo_enc)
+{
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+ char *str = solo_enc->osd_text;
+ u8 *buf;
+ u32 reg = solo_reg_read(solo_dev, SOLO_VE_OSD_CH);
+ int len = strlen(str);
+ int i, j;
+ int x = 1, y = 1;
+
+ if (len == 0) {
+ reg &= ~(1 << solo_enc->ch);
+ solo_reg_write(solo_dev, SOLO_VE_OSD_CH, reg);
+ return 0;
+ }
+
+ buf = kzalloc(SOLO_EOSD_EXT_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++) {
+ for (j = 0; j < 16; j++) {
+ buf[(j*2) + (i%2) + ((x + (i/2)) * 32) + (y * 2048)] =
+ (solo_osd_font[(str[i] * 4) + (j / 4)]
+ >> ((3 - (j % 4)) * 8)) & 0xff;
+ }
+ }
+
+ solo_p2m_dma(solo_dev, 0, 1, buf, SOLO_EOSD_EXT_ADDR(solo_dev) +
+ (solo_enc->ch * SOLO_EOSD_EXT_SIZE), SOLO_EOSD_EXT_SIZE);
+ reg |= (1 << solo_enc->ch);
+ solo_reg_write(solo_dev, SOLO_VE_OSD_CH, reg);
+
+ kfree(buf);
+
+ return 0;
+}
+
+static void solo_jpeg_config(struct solo6010_dev *solo_dev)
+{
+ solo_reg_write(solo_dev, SOLO_VE_JPEG_QP_TBL,
+ (2 << 24) | (2 << 16) | (2 << 8) | (2 << 0));
+ solo_reg_write(solo_dev, SOLO_VE_JPEG_QP_CH_L, 0);
+ solo_reg_write(solo_dev, SOLO_VE_JPEG_QP_CH_H, 0);
+ solo_reg_write(solo_dev, SOLO_VE_JPEG_CFG,
+ (SOLO_JPEG_EXT_SIZE(solo_dev) & 0xffff0000) |
+ ((SOLO_JPEG_EXT_ADDR(solo_dev) >> 16) & 0x0000ffff));
+ solo_reg_write(solo_dev, SOLO_VE_JPEG_CTRL, 0xffffffff);
+}
+
+static void solo_mp4e_config(struct solo6010_dev *solo_dev)
+{
+ int i;
+
+ /* We can only use VE_INTR_CTRL(0) if we want to support mjpeg */
+ solo_reg_write(solo_dev, SOLO_VE_CFG0,
+ SOLO_VE_INTR_CTRL(0) |
+ SOLO_VE_BLOCK_SIZE(SOLO_MP4E_EXT_SIZE(solo_dev) >> 16) |
+ SOLO_VE_BLOCK_BASE(SOLO_MP4E_EXT_ADDR(solo_dev) >> 16));
+
+ solo_reg_write(solo_dev, SOLO_VE_CFG1,
+ SOLO_VE_BYTE_ALIGN(2) |
+ SOLO_VE_INSERT_INDEX | SOLO_VE_MOTION_MODE(0));
+
+ solo_reg_write(solo_dev, SOLO_VE_WMRK_POLY, 0);
+ solo_reg_write(solo_dev, SOLO_VE_VMRK_INIT_KEY, 0);
+ solo_reg_write(solo_dev, SOLO_VE_WMRK_STRL, 0);
+ solo_reg_write(solo_dev, SOLO_VE_ENCRYP_POLY, 0);
+ solo_reg_write(solo_dev, SOLO_VE_ENCRYP_INIT, 0);
+
+ solo_reg_write(solo_dev, SOLO_VE_ATTR,
+ SOLO_VE_LITTLE_ENDIAN |
+ SOLO_COMP_ATTR_FCODE(1) |
+ SOLO_COMP_TIME_INC(0) |
+ SOLO_COMP_TIME_WIDTH(15) |
+ SOLO_DCT_INTERVAL(36 / 4));
+
+ for (i = 0; i < solo_dev->nr_chans; i++)
+ solo_reg_write(solo_dev, SOLO_VE_CH_REF_BASE(i),
+ (SOLO_EREF_EXT_ADDR(solo_dev) +
+ (i * SOLO_EREF_EXT_SIZE)) >> 16);
+}
+
+int solo_enc_init(struct solo6010_dev *solo_dev)
+{
+ int i;
+
+ solo_capture_config(solo_dev);
+ solo_mp4e_config(solo_dev);
+ solo_jpeg_config(solo_dev);
+
+ for (i = 0; i < solo_dev->nr_chans; i++) {
+ solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(i), 0);
+ solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(i), 0);
+ }
+
+ solo6010_irq_on(solo_dev, SOLO_IRQ_ENCODER);
+
+ return 0;
+}
+
+void solo_enc_exit(struct solo6010_dev *solo_dev)
+{
+ int i;
+
+ solo6010_irq_off(solo_dev, SOLO_IRQ_ENCODER);
+
+ for (i = 0; i < solo_dev->nr_chans; i++) {
+ solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(i), 0);
+ solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(i), 0);
+ }
+}
diff --git a/drivers/staging/solo6x10/solo6010-g723.c b/drivers/staging/solo6x10/solo6010-g723.c
new file mode 100644
index 00000000000..e82846c1d6c
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-g723.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mempool.h>
+#include <linux/poll.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/control.h>
+
+#include "solo6010.h"
+#include "solo6010-tw28.h"
+
+#define G723_INTR_ORDER 0
+#define G723_FDMA_PAGES 32
+#define G723_PERIOD_BYTES 48
+#define G723_PERIOD_BLOCK 1024
+#define G723_FRAMES_PER_PAGE 48
+
+/* Sets up channels 16-19 for decoding and 0-15 for encoding */
+#define OUTMODE_MASK 0x300
+
+#define SAMPLERATE 8000
+#define BITRATE 25
+
+/* The solo writes to 1k byte pages, 32 pages, in the dma. Each 1k page
+ * is broken down to 20 * 48 byte regions (one for each channel possible)
+ * with the rest of the page being dummy data. */
+#define MAX_BUFFER (G723_PERIOD_BYTES * PERIODS_MAX)
+#define IRQ_PAGES 4 // 0 - 4
+#define PERIODS_MIN (1 << IRQ_PAGES)
+#define PERIODS_MAX G723_FDMA_PAGES
+
+struct solo_snd_pcm {
+ int on;
+ spinlock_t lock;
+ struct solo6010_dev *solo_dev;
+ unsigned char g723_buf[G723_PERIOD_BYTES];
+};
+
+static void solo_g723_config(struct solo6010_dev *solo_dev)
+{
+ int clk_div;
+
+ clk_div = SOLO_CLOCK_MHZ / (SAMPLERATE * (BITRATE * 2) * 2);
+
+ solo_reg_write(solo_dev, SOLO_AUDIO_SAMPLE,
+ SOLO_AUDIO_BITRATE(BITRATE) |
+ SOLO_AUDIO_CLK_DIV(clk_div));
+
+ solo_reg_write(solo_dev, SOLO_AUDIO_FDMA_INTR,
+ SOLO_AUDIO_FDMA_INTERVAL(IRQ_PAGES) |
+ SOLO_AUDIO_INTR_ORDER(G723_INTR_ORDER) |
+ SOLO_AUDIO_FDMA_BASE(SOLO_G723_EXT_ADDR(solo_dev) >> 16));
+
+ solo_reg_write(solo_dev, SOLO_AUDIO_CONTROL,
+ SOLO_AUDIO_ENABLE | SOLO_AUDIO_I2S_MODE |
+ SOLO_AUDIO_I2S_MULTI(3) | SOLO_AUDIO_MODE(OUTMODE_MASK));
+}
+
+void solo_g723_isr(struct solo6010_dev *solo_dev)
+{
+ struct snd_pcm_str *pstr =
+ &solo_dev->snd_pcm->streams[SNDRV_PCM_STREAM_CAPTURE];
+ struct snd_pcm_substream *ss;
+ struct solo_snd_pcm *solo_pcm;
+
+ solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_G723);
+
+ for (ss = pstr->substream; ss != NULL; ss = ss->next) {
+ if (snd_pcm_substream_chip(ss) == NULL)
+ continue;
+
+ /* This means open() hasn't been called on this one */
+ if (snd_pcm_substream_chip(ss) == solo_dev)
+ continue;
+
+ /* Haven't triggered a start yet */
+ solo_pcm = snd_pcm_substream_chip(ss);
+ if (!solo_pcm->on)
+ continue;
+
+ snd_pcm_period_elapsed(ss);
+ }
+}
+
+static int snd_solo_hw_params(struct snd_pcm_substream *ss,
+ struct snd_pcm_hw_params *hw_params)
+{
+ return snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(hw_params));
+}
+
+static int snd_solo_hw_free(struct snd_pcm_substream *ss)
+{
+ return snd_pcm_lib_free_pages(ss);
+}
+
+static struct snd_pcm_hardware snd_solo_pcm_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID),
+ .formats = SNDRV_PCM_FMTBIT_U8,
+ .rates = SNDRV_PCM_RATE_8000,
+ .rate_min = 8000,
+ .rate_max = 8000,
+ .channels_min = 1,
+ .channels_max = 1,
+ .buffer_bytes_max = MAX_BUFFER,
+ .period_bytes_min = G723_PERIOD_BYTES,
+ .period_bytes_max = G723_PERIOD_BYTES,
+ .periods_min = PERIODS_MIN,
+ .periods_max = PERIODS_MAX,
+};
+
+static int snd_solo_pcm_open(struct snd_pcm_substream *ss)
+{
+ struct solo6010_dev *solo_dev = snd_pcm_substream_chip(ss);
+ struct solo_snd_pcm *solo_pcm;
+
+ solo_pcm = kzalloc(sizeof(*solo_pcm), GFP_KERNEL);
+ if (solo_pcm == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&solo_pcm->lock);
+ solo_pcm->solo_dev = solo_dev;
+ ss->runtime->hw = snd_solo_pcm_hw;
+
+ snd_pcm_substream_chip(ss) = solo_pcm;
+
+ return 0;
+}
+
+static int snd_solo_pcm_close(struct snd_pcm_substream *ss)
+{
+ struct solo_snd_pcm *solo_pcm = snd_pcm_substream_chip(ss);
+
+ snd_pcm_substream_chip(ss) = solo_pcm->solo_dev;
+ kfree(solo_pcm);
+
+ return 0;
+}
+
+static int snd_solo_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
+{
+ struct solo_snd_pcm *solo_pcm = snd_pcm_substream_chip(ss);
+ struct solo6010_dev *solo_dev = solo_pcm->solo_dev;
+ int ret = 0;
+
+ spin_lock(&solo_pcm->lock);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ if (solo_pcm->on == 0) {
+ /* If this is the first user, switch on interrupts */
+ if (atomic_inc_return(&solo_dev->snd_users) == 1)
+ solo6010_irq_on(solo_dev, SOLO_IRQ_G723);
+ solo_pcm->on = 1;
+ }
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ if (solo_pcm->on) {
+ /* If this was our last user, switch them off */
+ if (atomic_dec_return(&solo_dev->snd_users) == 0)
+ solo6010_irq_off(solo_dev, SOLO_IRQ_G723);
+ solo_pcm->on = 0;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ spin_unlock(&solo_pcm->lock);
+
+ return ret;
+}
+
+static int snd_solo_pcm_prepare(struct snd_pcm_substream *ss)
+{
+ return 0;
+}
+
+static snd_pcm_uframes_t snd_solo_pcm_pointer(struct snd_pcm_substream *ss)
+{
+ struct solo_snd_pcm *solo_pcm = snd_pcm_substream_chip(ss);
+ struct solo6010_dev *solo_dev = solo_pcm->solo_dev;
+ snd_pcm_uframes_t idx = solo_reg_read(solo_dev, SOLO_AUDIO_STA) & 0x1f;
+
+ return idx * G723_FRAMES_PER_PAGE;
+}
+
+static int snd_solo_pcm_copy(struct snd_pcm_substream *ss, int channel,
+ snd_pcm_uframes_t pos, void __user *dst,
+ snd_pcm_uframes_t count)
+{
+ struct solo_snd_pcm *solo_pcm = snd_pcm_substream_chip(ss);
+ struct solo6010_dev *solo_dev = solo_pcm->solo_dev;
+ int err, i;
+
+ for (i = 0; i < (count / G723_FRAMES_PER_PAGE); i++) {
+ int page = (pos / G723_FRAMES_PER_PAGE) + i;
+
+ err = solo_p2m_dma(solo_dev, SOLO_P2M_DMA_ID_G723E, 0,
+ solo_pcm->g723_buf,
+ SOLO_G723_EXT_ADDR(solo_dev) +
+ (page * G723_PERIOD_BLOCK) +
+ (ss->number * G723_PERIOD_BYTES),
+ G723_PERIOD_BYTES);
+ if (err)
+ return err;
+
+ err = copy_to_user(dst + (i * G723_PERIOD_BYTES),
+ solo_pcm->g723_buf, G723_PERIOD_BYTES);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct snd_pcm_ops snd_solo_pcm_ops = {
+ .open = snd_solo_pcm_open,
+ .close = snd_solo_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_solo_hw_params,
+ .hw_free = snd_solo_hw_free,
+ .prepare = snd_solo_pcm_prepare,
+ .trigger = snd_solo_pcm_trigger,
+ .pointer = snd_solo_pcm_pointer,
+ .copy = snd_solo_pcm_copy,
+};
+
+static int snd_solo_capture_volume_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ info->count = 1;
+ info->value.integer.min = 0;
+ info->value.integer.max = 15;
+ info->value.integer.step = 1;
+
+ return 0;
+}
+
+static int snd_solo_capture_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ struct solo6010_dev *solo_dev = snd_kcontrol_chip(kcontrol);
+ u8 ch = value->id.numid - 1;
+
+ value->value.integer.value[0] = tw28_get_audio_gain(solo_dev, ch);
+
+ return 0;
+}
+
+static int snd_solo_capture_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ struct solo6010_dev *solo_dev = snd_kcontrol_chip(kcontrol);
+ u8 ch = value->id.numid - 1;
+ u8 old_val;
+
+ old_val = tw28_get_audio_gain(solo_dev, ch);
+ if (old_val == value->value.integer.value[0])
+ return 0;
+
+ tw28_set_audio_gain(solo_dev, ch, value->value.integer.value[0]);
+
+ return 1;
+}
+
+static struct snd_kcontrol_new snd_solo_capture_volume = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Capture Volume",
+ .info = snd_solo_capture_volume_info,
+ .get = snd_solo_capture_volume_get,
+ .put = snd_solo_capture_volume_put,
+};
+
+static int solo_snd_pcm_init(struct solo6010_dev *solo_dev)
+{
+ struct snd_card *card = solo_dev->snd_card;
+ struct snd_pcm *pcm;
+ struct snd_pcm_substream *ss;
+ int ret;
+ int i;
+
+ ret = snd_pcm_new(card, card->driver, 0, 0, solo_dev->nr_chans,
+ &pcm);
+ if (ret < 0)
+ return ret;
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
+ &snd_solo_pcm_ops);
+
+ snd_pcm_chip(pcm) = solo_dev;
+ pcm->info_flags = 0;
+ strcpy(pcm->name, card->shortname);
+
+ for (i = 0, ss = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ ss; ss = ss->next, i++)
+ sprintf(ss->name, "Camera #%d Audio", i);
+
+ ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ MAX_BUFFER, MAX_BUFFER);
+ if (ret < 0)
+ return ret;
+
+ solo_dev->snd_pcm = pcm;
+
+ return 0;
+}
+
+int solo_g723_init(struct solo6010_dev *solo_dev)
+{
+ static struct snd_device_ops ops = { NULL };
+ struct snd_card *card;
+ struct snd_kcontrol_new kctl;
+ char name[32];
+ int ret;
+
+ atomic_set(&solo_dev->snd_users, 0);
+
+ /* Allows for easier mapping between video and audio */
+ sprintf(name, "Softlogic%d", solo_dev->vfd->num);
+
+ ret = snd_card_create(SNDRV_DEFAULT_IDX1, name, THIS_MODULE, 0,
+ &solo_dev->snd_card);
+ if (ret < 0)
+ return ret;
+
+ card = solo_dev->snd_card;
+
+ strcpy(card->driver, SOLO6010_NAME);
+ strcpy(card->shortname, "SOLO-6010 Audio");
+ sprintf(card->longname, "%s on %s IRQ %d", card->shortname,
+ pci_name(solo_dev->pdev), solo_dev->pdev->irq);
+ snd_card_set_dev(card, &solo_dev->pdev->dev);
+
+ ret = snd_device_new(card, SNDRV_DEV_LOWLEVEL, solo_dev, &ops);
+ if (ret < 0)
+ goto snd_error;
+
+ /* Mixer controls */
+ strcpy(card->mixername, "SOLO-6010");
+ kctl = snd_solo_capture_volume;
+ kctl.count = solo_dev->nr_chans;
+ ret = snd_ctl_add(card, snd_ctl_new1(&kctl, solo_dev));
+ if (ret < 0)
+ return ret;
+
+ if ((ret = solo_snd_pcm_init(solo_dev)) < 0)
+ goto snd_error;
+
+ if ((ret = snd_card_register(card)) < 0)
+ goto snd_error;
+
+ solo_g723_config(solo_dev);
+
+ dev_info(&solo_dev->pdev->dev, "Alsa sound card as %s\n", name);
+
+ return 0;
+
+snd_error:
+ snd_card_free(card);
+ return ret;
+}
+
+void solo_g723_exit(struct solo6010_dev *solo_dev)
+{
+ solo_reg_write(solo_dev, SOLO_AUDIO_CONTROL, 0);
+ solo6010_irq_off(solo_dev, SOLO_IRQ_G723);
+
+ snd_card_free(solo_dev->snd_card);
+}
diff --git a/drivers/staging/solo6x10/solo6010-gpio.c b/drivers/staging/solo6x10/solo6010-gpio.c
new file mode 100644
index 00000000000..46f7a71edab
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-gpio.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+
+#include "solo6010.h"
+
+static void solo_gpio_mode(struct solo6010_dev *solo_dev,
+ unsigned int port_mask, unsigned int mode)
+{
+ int port;
+ unsigned int ret;
+
+ ret = solo_reg_read(solo_dev, SOLO_GPIO_CONFIG_0);
+
+ /* To set gpio */
+ for (port = 0; port < 16; port++) {
+ if (!((1 << port) & port_mask))
+ continue;
+
+ ret &= (~(3 << (port << 1)));
+ ret |= ((mode & 3) << (port << 1));
+ }
+
+ solo_reg_write(solo_dev, SOLO_GPIO_CONFIG_0, ret);
+
+ /* To set extended gpio - sensor */
+ ret = solo_reg_read(solo_dev, SOLO_GPIO_CONFIG_1);
+
+ for (port = 0; port < 16; port++) {
+ if (!((1 << (port + 16)) & port_mask))
+ continue;
+
+ if (!mode)
+ ret &= ~(1 << port);
+ else
+ ret |= 1 << port;
+ }
+
+ solo_reg_write(solo_dev, SOLO_GPIO_CONFIG_1, ret);
+}
+
+static void solo_gpio_set(struct solo6010_dev *solo_dev, unsigned int value)
+{
+ solo_reg_write(solo_dev, SOLO_GPIO_DATA_OUT,
+ solo_reg_read(solo_dev, SOLO_GPIO_DATA_OUT) | value);
+}
+
+static void solo_gpio_clear(struct solo6010_dev *solo_dev, unsigned int value)
+{
+ solo_reg_write(solo_dev, SOLO_GPIO_DATA_OUT,
+ solo_reg_read(solo_dev, SOLO_GPIO_DATA_OUT) & ~value);
+}
+
+static void solo_gpio_config(struct solo6010_dev *solo_dev)
+{
+ /* Video reset */
+ solo_gpio_mode(solo_dev, 0x30, 1);
+ solo_gpio_clear(solo_dev, 0x30);
+ udelay(100);
+ solo_gpio_set(solo_dev, 0x30);
+ udelay(100);
+
+ /* Warning: Don't touch the next line unless you're sure of what
+ * you're doing: first four gpio [0-3] are used for video. */
+ solo_gpio_mode(solo_dev, 0x0f, 2);
+
+ /* We use bit 8-15 of SOLO_GPIO_CONFIG_0 for relay purposes */
+ solo_gpio_mode(solo_dev, 0xff00, 1);
+
+ /* Initially set relay status to 0 */
+ solo_gpio_clear(solo_dev, 0xff00);
+}
+
+int solo_gpio_init(struct solo6010_dev *solo_dev)
+{
+ solo_gpio_config(solo_dev);
+ return 0;
+}
+
+void solo_gpio_exit(struct solo6010_dev *solo_dev)
+{
+ solo_gpio_clear(solo_dev, 0x30);
+ solo_gpio_config(solo_dev);
+}
diff --git a/drivers/staging/solo6x10/solo6010-i2c.c b/drivers/staging/solo6x10/solo6010-i2c.c
new file mode 100644
index 00000000000..2bb86fa9e9e
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-i2c.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* XXX: The SOLO6010 i2c does not have separate interrupts for each i2c
+ * channel. The bus can only handle one i2c event at a time. The below handles
+ * this all wrong. We should be using the status registers to see if the bus
+ * is in use, and have a global lock to check the status register. Also,
+ * the bulk of the work should be handled out-of-interrupt. The ugly loops
+ * that occur during interrupt scare me. The ISR should merely signal
+ * thread context, ACK the interrupt, and move on. -- BenC */
+
+#include <linux/kernel.h>
+
+#include "solo6010.h"
+
+u8 solo_i2c_readbyte(struct solo6010_dev *solo_dev, int id, u8 addr, u8 off)
+{
+ struct i2c_msg msgs[2];
+ u8 data;
+
+ msgs[0].flags = 0;
+ msgs[0].addr = addr;
+ msgs[0].len = 1;
+ msgs[0].buf = &off;
+
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].addr = addr;
+ msgs[1].len = 1;
+ msgs[1].buf = &data;
+
+ i2c_transfer(&solo_dev->i2c_adap[id], msgs, 2);
+
+ return data;
+}
+
+void solo_i2c_writebyte(struct solo6010_dev *solo_dev, int id, u8 addr,
+ u8 off, u8 data)
+{
+ struct i2c_msg msgs;
+ u8 buf[2];
+
+ buf[0] = off;
+ buf[1] = data;
+ msgs.flags = 0;
+ msgs.addr = addr;
+ msgs.len = 2;
+ msgs.buf = buf;
+
+ i2c_transfer(&solo_dev->i2c_adap[id], &msgs, 1);
+}
+
+static void solo_i2c_flush(struct solo6010_dev *solo_dev, int wr)
+{
+ u32 ctrl;
+
+ ctrl = SOLO_IIC_CH_SET(solo_dev->i2c_id);
+
+ if (solo_dev->i2c_state == IIC_STATE_START)
+ ctrl |= SOLO_IIC_START;
+
+ if (wr) {
+ ctrl |= SOLO_IIC_WRITE;
+ } else {
+ ctrl |= SOLO_IIC_READ;
+ if (!(solo_dev->i2c_msg->flags & I2C_M_NO_RD_ACK))
+ ctrl |= SOLO_IIC_ACK_EN;
+ }
+
+ if (solo_dev->i2c_msg_ptr == solo_dev->i2c_msg->len)
+ ctrl |= SOLO_IIC_STOP;
+
+ solo_reg_write(solo_dev, SOLO_IIC_CTRL, ctrl);
+}
+
+static void solo_i2c_start(struct solo6010_dev *solo_dev)
+{
+ u32 addr = solo_dev->i2c_msg->addr << 1;
+
+ if (solo_dev->i2c_msg->flags & I2C_M_RD)
+ addr |= 1;
+
+ solo_dev->i2c_state = IIC_STATE_START;
+ solo_reg_write(solo_dev, SOLO_IIC_TXD, addr);
+ solo_i2c_flush(solo_dev, 1);
+}
+
+static void solo_i2c_stop(struct solo6010_dev *solo_dev)
+{
+ solo6010_irq_off(solo_dev, SOLO_IRQ_IIC);
+ solo_reg_write(solo_dev, SOLO_IIC_CTRL, 0);
+ solo_dev->i2c_state = IIC_STATE_STOP;
+ wake_up(&solo_dev->i2c_wait);
+}
+
+static int solo_i2c_handle_read(struct solo6010_dev *solo_dev)
+{
+prepare_read:
+ if (solo_dev->i2c_msg_ptr != solo_dev->i2c_msg->len) {
+ solo_i2c_flush(solo_dev, 0);
+ return 0;
+ }
+
+ solo_dev->i2c_msg_ptr = 0;
+ solo_dev->i2c_msg++;
+ solo_dev->i2c_msg_num--;
+
+ if (solo_dev->i2c_msg_num == 0) {
+ solo_i2c_stop(solo_dev);
+ return 0;
+ }
+
+ if (!(solo_dev->i2c_msg->flags & I2C_M_NOSTART)) {
+ solo_i2c_start(solo_dev);
+ } else {
+ if (solo_dev->i2c_msg->flags & I2C_M_RD)
+ goto prepare_read;
+ else
+ solo_i2c_stop(solo_dev);
+ }
+
+ return 0;
+}
+
+static int solo_i2c_handle_write(struct solo6010_dev *solo_dev)
+{
+retry_write:
+ if (solo_dev->i2c_msg_ptr != solo_dev->i2c_msg->len) {
+ solo_reg_write(solo_dev, SOLO_IIC_TXD,
+ solo_dev->i2c_msg->buf[solo_dev->i2c_msg_ptr]);
+ solo_dev->i2c_msg_ptr++;
+ solo_i2c_flush(solo_dev, 1);
+ return 0;
+ }
+
+ solo_dev->i2c_msg_ptr = 0;
+ solo_dev->i2c_msg++;
+ solo_dev->i2c_msg_num--;
+
+ if (solo_dev->i2c_msg_num == 0) {
+ solo_i2c_stop(solo_dev);
+ return 0;
+ }
+
+ if (!(solo_dev->i2c_msg->flags & I2C_M_NOSTART)) {
+ solo_i2c_start(solo_dev);
+ } else {
+ if (solo_dev->i2c_msg->flags & I2C_M_RD)
+ solo_i2c_stop(solo_dev);
+ else
+ goto retry_write;
+ }
+
+ return 0;
+}
+
+int solo_i2c_isr(struct solo6010_dev *solo_dev)
+{
+ u32 status = solo_reg_read(solo_dev, SOLO_IIC_CTRL);
+ int ret = -EINVAL;
+
+ solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_IIC);
+
+ if (status & (SOLO_IIC_STATE_TRNS & SOLO_IIC_STATE_SIG_ERR) ||
+ solo_dev->i2c_id < 0) {
+ solo_i2c_stop(solo_dev);
+ return -ENXIO;
+ }
+
+ switch (solo_dev->i2c_state) {
+ case IIC_STATE_START:
+ if (solo_dev->i2c_msg->flags & I2C_M_RD) {
+ solo_dev->i2c_state = IIC_STATE_READ;
+ ret = solo_i2c_handle_read(solo_dev);
+ break;
+ }
+
+ solo_dev->i2c_state = IIC_STATE_WRITE;
+ case IIC_STATE_WRITE:
+ ret = solo_i2c_handle_write(solo_dev);
+ break;
+
+ case IIC_STATE_READ:
+ solo_dev->i2c_msg->buf[solo_dev->i2c_msg_ptr] =
+ solo_reg_read(solo_dev, SOLO_IIC_RXD);
+ solo_dev->i2c_msg_ptr++;
+
+ ret = solo_i2c_handle_read(solo_dev);
+ break;
+
+ default:
+ solo_i2c_stop(solo_dev);
+ }
+
+ return ret;
+}
+
+static int solo_i2c_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct solo6010_dev *solo_dev = adap->algo_data;
+ unsigned long timeout;
+ int ret;
+ int i;
+ DEFINE_WAIT(wait);
+
+ for (i = 0; i < SOLO_I2C_ADAPTERS; i++) {
+ if (&solo_dev->i2c_adap[i] == adap)
+ break;
+ }
+
+ if (i == SOLO_I2C_ADAPTERS)
+ return num; // XXX Right return value for failure?
+
+ down(&solo_dev->i2c_sem);
+ solo_dev->i2c_id = i;
+ solo_dev->i2c_msg = msgs;
+ solo_dev->i2c_msg_num = num;
+ solo_dev->i2c_msg_ptr = 0;
+
+ solo_reg_write(solo_dev, SOLO_IIC_CTRL, 0);
+ solo6010_irq_on(solo_dev, SOLO_IRQ_IIC);
+ solo_i2c_start(solo_dev);
+
+ timeout = HZ / 2;
+
+ for (;;) {
+ prepare_to_wait(&solo_dev->i2c_wait, &wait, TASK_INTERRUPTIBLE);
+
+ if (solo_dev->i2c_state == IIC_STATE_STOP)
+ break;
+
+ timeout = schedule_timeout(timeout);
+ if (!timeout)
+ break;
+
+ if (signal_pending(current))
+ break;
+ }
+
+ finish_wait(&solo_dev->i2c_wait, &wait);
+ ret = num - solo_dev->i2c_msg_num;
+ solo_dev->i2c_state = IIC_STATE_IDLE;
+ solo_dev->i2c_id = -1;
+
+ up(&solo_dev->i2c_sem);
+
+ return ret;
+}
+
+static u32 solo_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm solo_i2c_algo = {
+ .master_xfer = solo_i2c_master_xfer,
+ .functionality = solo_i2c_functionality,
+};
+
+int solo_i2c_init(struct solo6010_dev *solo_dev)
+{
+ int i;
+ int ret;
+
+ solo_reg_write(solo_dev, SOLO_IIC_CFG,
+ SOLO_IIC_PRESCALE(8) | SOLO_IIC_ENABLE);
+
+ solo_dev->i2c_id = -1;
+ solo_dev->i2c_state = IIC_STATE_IDLE;
+ init_waitqueue_head(&solo_dev->i2c_wait);
+ init_MUTEX(&solo_dev->i2c_sem);
+
+ for (i = 0; i < SOLO_I2C_ADAPTERS; i++) {
+ struct i2c_adapter *adap = &solo_dev->i2c_adap[i];
+
+ snprintf(adap->name, I2C_NAME_SIZE, "%s I2C %d",
+ SOLO6010_NAME, i);
+ adap->algo = &solo_i2c_algo;
+ adap->algo_data = solo_dev;
+ adap->retries = 1;
+ adap->dev.parent = &solo_dev->pdev->dev;
+
+ if ((ret = i2c_add_adapter(adap))) {
+ adap->algo_data = NULL;
+ break;
+ }
+ }
+
+ if (ret) {
+ for (i = 0; i < SOLO_I2C_ADAPTERS; i++) {
+ if (!solo_dev->i2c_adap[i].algo_data)
+ break;
+ i2c_del_adapter(&solo_dev->i2c_adap[i]);
+ solo_dev->i2c_adap[i].algo_data = NULL;
+ }
+ return ret;
+ }
+
+ dev_info(&solo_dev->pdev->dev, "Enabled %d i2c adapters\n",
+ SOLO_I2C_ADAPTERS);
+
+ return 0;
+}
+
+void solo_i2c_exit(struct solo6010_dev *solo_dev)
+{
+ int i;
+
+ for (i = 0; i < SOLO_I2C_ADAPTERS; i++) {
+ if (!solo_dev->i2c_adap[i].algo_data)
+ continue;
+ i2c_del_adapter(&solo_dev->i2c_adap[i]);
+ solo_dev->i2c_adap[i].algo_data = NULL;
+ }
+}
diff --git a/drivers/staging/solo6x10/solo6010-jpeg.h b/drivers/staging/solo6x10/solo6010-jpeg.h
new file mode 100644
index 00000000000..fb0507ecb30
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-jpeg.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __SOLO6010_JPEG_H
+#define __SOLO6010_JPEG_H
+
+static unsigned char jpeg_header[] = {
+ 0xff, 0xd8, 0xff, 0xfe, 0x00, 0x0d, 0x42, 0x6c,
+ 0x75, 0x65, 0x63, 0x68, 0x65, 0x72, 0x72, 0x79,
+ 0x20, 0xff, 0xdb, 0x00, 0x43, 0x00, 0x20, 0x16,
+ 0x18, 0x1c, 0x18, 0x14, 0x20, 0x1c, 0x1a, 0x1c,
+ 0x24, 0x22, 0x20, 0x26, 0x30, 0x50, 0x34, 0x30,
+ 0x2c, 0x2c, 0x30, 0x62, 0x46, 0x4a, 0x3a, 0x50,
+ 0x74, 0x66, 0x7a, 0x78, 0x72, 0x66, 0x70, 0x6e,
+ 0x80, 0x90, 0xb8, 0x9c, 0x80, 0x88, 0xae, 0x8a,
+ 0x6e, 0x70, 0xa0, 0xda, 0xa2, 0xae, 0xbe, 0xc4,
+ 0xce, 0xd0, 0xce, 0x7c, 0x9a, 0xe2, 0xf2, 0xe0,
+ 0xc8, 0xf0, 0xb8, 0xca, 0xce, 0xc6, 0xff, 0xdb,
+ 0x00, 0x43, 0x01, 0x22, 0x24, 0x24, 0x30, 0x2a,
+ 0x30, 0x5e, 0x34, 0x34, 0x5e, 0xc6, 0x84, 0x70,
+ 0x84, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+ 0xc6, 0xc6, 0xc6, 0xff, 0xc4, 0x01, 0xa2, 0x00,
+ 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x10, 0x00, 0x02, 0x01,
+ 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04,
+ 0x04, 0x00, 0x00, 0x01, 0x7d, 0x01, 0x02, 0x03,
+ 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41,
+ 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14,
+ 0x32, 0x81, 0x91, 0xa1, 0x08, 0x23, 0x42, 0xb1,
+ 0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24, 0x33, 0x62,
+ 0x72, 0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19,
+ 0x1a, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x34,
+ 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44,
+ 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54,
+ 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64,
+ 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74,
+ 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x83, 0x84,
+ 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93,
+ 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2,
+ 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa,
+ 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9,
+ 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8,
+ 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5,
+ 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0x01,
+ 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x11, 0x00, 0x02, 0x01,
+ 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04,
+ 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02,
+ 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12,
+ 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32,
+ 0x81, 0x08, 0x14, 0x42, 0x91, 0xa1, 0xb1, 0xc1,
+ 0x09, 0x23, 0x33, 0x52, 0xf0, 0x15, 0x62, 0x72,
+ 0xd1, 0x0a, 0x16, 0x24, 0x34, 0xe1, 0x25, 0xf1,
+ 0x17, 0x18, 0x19, 0x1a, 0x26, 0x27, 0x28, 0x29,
+ 0x2a, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43,
+ 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53,
+ 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63,
+ 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73,
+ 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x82,
+ 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a,
+ 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99,
+ 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8,
+ 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6,
+ 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5,
+ 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe2, 0xe3, 0xe4,
+ 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xff,
+ 0xc0, 0x00, 0x11, 0x08, 0x00, 0xf0, 0x02, 0xc0,
+ 0x03, 0x01, 0x22, 0x00, 0x02, 0x11, 0x01, 0x03,
+ 0x11, 0x01, 0xff, 0xda, 0x00, 0x0c, 0x03, 0x01,
+ 0x00, 0x02, 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00
+};
+
+/* This is the byte marker for the start of SOF0: 0xffc0 marker */
+#define SOF0_START 575
+
+#endif /* __SOLO6010_JPEG_H */
diff --git a/drivers/staging/solo6x10/solo6010-offsets.h b/drivers/staging/solo6x10/solo6010-offsets.h
new file mode 100644
index 00000000000..2431de989c0
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-offsets.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __SOLO6010_OFFSETS_H
+#define __SOLO6010_OFFSETS_H
+
+/* Offsets and sizes of the external address */
+#define SOLO_DISP_EXT_ADDR(__solo) 0x00000000
+#define SOLO_DISP_EXT_SIZE 0x00480000
+
+#define SOLO_DEC2LIVE_EXT_ADDR(__solo) \
+ (SOLO_DISP_EXT_ADDR(__solo) + SOLO_DISP_EXT_SIZE)
+#define SOLO_DEC2LIVE_EXT_SIZE 0x00240000
+
+#define SOLO_OSG_EXT_ADDR(__solo) \
+ (SOLO_DEC2LIVE_EXT_ADDR(__solo) + SOLO_DEC2LIVE_EXT_SIZE)
+#define SOLO_OSG_EXT_SIZE 0x00120000
+
+#define SOLO_EOSD_EXT_ADDR(__solo) \
+ (SOLO_OSG_EXT_ADDR(__solo) + SOLO_OSG_EXT_SIZE)
+#define SOLO_EOSD_EXT_SIZE 0x00010000
+
+#define SOLO_MOTION_EXT_ADDR(__solo) \
+ (SOLO_EOSD_EXT_ADDR(__solo) + \
+ (SOLO_EOSD_EXT_SIZE * __solo->nr_chans))
+#define SOLO_MOTION_EXT_SIZE 0x00080000
+
+#define SOLO_G723_EXT_ADDR(__solo) \
+ (SOLO_MOTION_EXT_ADDR(__solo) + SOLO_MOTION_EXT_SIZE)
+#define SOLO_G723_EXT_SIZE 0x00010000
+
+#define SOLO_CAP_EXT_ADDR(__solo) \
+ (SOLO_G723_EXT_ADDR(__solo) + SOLO_G723_EXT_SIZE)
+#define SOLO_CAP_EXT_MAX_PAGE (18 + 15)
+#define SOLO_CAP_EXT_SIZE (SOLO_CAP_EXT_MAX_PAGE * 65536)
+
+/* This +1 is very important -- Why?! -- BenC */
+#define SOLO_EREF_EXT_ADDR(__solo) \
+ (SOLO_CAP_EXT_ADDR(__solo) + \
+ (SOLO_CAP_EXT_SIZE * (__solo->nr_chans + 1)))
+#define SOLO_EREF_EXT_SIZE 0x00140000
+
+#define SOLO_MP4E_EXT_ADDR(__solo) \
+ (SOLO_EREF_EXT_ADDR(__solo) + \
+ (SOLO_EREF_EXT_SIZE * __solo->nr_chans))
+#define SOLO_MP4E_EXT_SIZE(__solo) (0x00080000 * __solo->nr_chans)
+
+#define SOLO_DREF_EXT_ADDR(__solo) \
+ (SOLO_MP4E_EXT_ADDR(__solo) + SOLO_MP4E_EXT_SIZE(__solo))
+#define SOLO_DREF_EXT_SIZE 0x00140000
+
+#define SOLO_MP4D_EXT_ADDR(__solo) \
+ (SOLO_DREF_EXT_ADDR(__solo) + \
+ (SOLO_DREF_EXT_SIZE * __solo->nr_chans))
+#define SOLO_MP4D_EXT_SIZE 0x00080000
+
+#define SOLO_JPEG_EXT_ADDR(__solo) \
+ (SOLO_MP4D_EXT_ADDR(__solo) + \
+ (SOLO_MP4D_EXT_SIZE * __solo->nr_chans))
+#define SOLO_JPEG_EXT_SIZE(__solo) (0x00080000 * __solo->nr_chans)
+
+#endif /* __SOLO6010_OFFSETS_H */
diff --git a/drivers/staging/solo6x10/solo6010-osd-font.h b/drivers/staging/solo6x10/solo6010-osd-font.h
new file mode 100644
index 00000000000..d6f565bd76c
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-osd-font.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __SOLO6010_OSD_FONT_H
+#define __SOLO6010_OSD_FONT_H
+
+static const unsigned int solo_osd_font[] = {
+ 0x00000000, 0x0000c0c8, 0xccfefe0c, 0x08000000,
+ 0x00000000, 0x10103838, 0x7c7cfefe, 0x00000000, // 0
+ 0x00000000, 0xfefe7c7c, 0x38381010, 0x10000000,
+ 0x00000000, 0x7c82fefe, 0xfefefe7c, 0x00000000,
+ 0x00000000, 0x00001038, 0x10000000, 0x00000000,
+ 0x00000000, 0x0010387c, 0xfe7c3810, 0x00000000,
+ 0x00000000, 0x00384444, 0x44380000, 0x00000000,
+ 0x00000000, 0x38448282, 0x82443800, 0x00000000,
+ 0x00000000, 0x007c7c7c, 0x7c7c0000, 0x00000000,
+ 0x00000000, 0x6c6c6c6c, 0x6c6c6c6c, 0x00000000,
+ 0x00000000, 0x061e7efe, 0xfe7e1e06, 0x00000000,
+ 0x00000000, 0xc0f0fcfe, 0xfefcf0c0, 0x00000000,
+ 0x00000000, 0xc6cedefe, 0xfedecec6, 0x00000000,
+ 0x00000000, 0xc6e6f6fe, 0xfef6e6c6, 0x00000000,
+ 0x00000000, 0x12367efe, 0xfe7e3612, 0x00000000,
+ 0x00000000, 0x90d8fcfe, 0xfefcd890, 0x00000000,
+ 0x00000038, 0x7cc692ba, 0x92c67c38, 0x00000000,
+ 0x00000038, 0x7cc6aa92, 0xaac67c38, 0x00000000,
+ 0x00000038, 0x7830107c, 0xbaa8680c, 0x00000000,
+ 0x00000038, 0x3c18127c, 0xb8382c60, 0x00000000,
+ 0x00000044, 0xaa6c8254, 0x38eec67c, 0x00000000,
+ 0x00000082, 0x44288244, 0x38c6827c, 0x00000000,
+ 0x00000038, 0x444444fe, 0xfeeec6fe, 0x00000000,
+ 0x00000018, 0x78187818, 0x3c7e7e3c, 0x00000000,
+ 0x00000000, 0x3854929a, 0x82443800, 0x00000000,
+ 0x00000000, 0x00c0c8cc, 0xfefe0c08, 0x00000000,
+ 0x0000e0a0, 0xe040e00e, 0x8a0ea40e, 0x00000000,
+ 0x0000e0a0, 0xe040e00e, 0x0a8e440e, 0x00000000,
+ 0x0000007c, 0x82829292, 0x929282fe, 0x00000000,
+ 0x000000f8, 0xfc046494, 0x946404fc, 0x00000000,
+ 0x0000003f, 0x7f404c52, 0x524c407f, 0x00000000,
+ 0x0000007c, 0x82ba82ba, 0x82ba82fe, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x183c3c3c, 0x18180018, 0x18000000, // 32 !
+ 0x00000066, 0x66240000, 0x00000000, 0x00000000,
+ 0x00000000, 0x6c6cfe6c, 0x6c6cfe6c, 0x6c000000, // 34 " #
+ 0x00001010, 0x7cd6d616, 0x7cd0d6d6, 0x7c101000,
+ 0x00000000, 0x0086c660, 0x30180cc6, 0xc2000000, // 36 $ %
+ 0x00000000, 0x386c6c38, 0xdc766666, 0xdc000000,
+ 0x0000000c, 0x0c0c0600, 0x00000000, 0x00000000, // 38 & '
+ 0x00000000, 0x30180c0c, 0x0c0c0c18, 0x30000000,
+ 0x00000000, 0x0c183030, 0x30303018, 0x0c000000, // 40 ( )
+ 0x00000000, 0x0000663c, 0xff3c6600, 0x00000000,
+ 0x00000000, 0x00001818, 0x7e181800, 0x00000000, // 42 * +
+ 0x00000000, 0x00000000, 0x00000e0e, 0x0c060000,
+ 0x00000000, 0x00000000, 0x7e000000, 0x00000000, // 44 , -
+ 0x00000000, 0x00000000, 0x00000006, 0x06000000,
+ 0x00000000, 0x80c06030, 0x180c0602, 0x00000000, // 46 . /
+ 0x0000007c, 0xc6e6f6de, 0xcec6c67c, 0x00000000,
+ 0x00000030, 0x383c3030, 0x303030fc, 0x00000000, // 48 0 1
+ 0x0000007c, 0xc6c06030, 0x180cc6fe, 0x00000000,
+ 0x0000007c, 0xc6c0c07c, 0xc0c0c67c, 0x00000000, // 50 2 3
+ 0x00000060, 0x70786c66, 0xfe6060f0, 0x00000000,
+ 0x000000fe, 0x0606067e, 0xc0c0c67c, 0x00000000, // 52 4 5
+ 0x00000038, 0x0c06067e, 0xc6c6c67c, 0x00000000,
+ 0x000000fe, 0xc6c06030, 0x18181818, 0x00000000, // 54 6 7
+ 0x0000007c, 0xc6c6c67c, 0xc6c6c67c, 0x00000000,
+ 0x0000007c, 0xc6c6c6fc, 0xc0c06038, 0x00000000, // 56 8 9
+ 0x00000000, 0x18180000, 0x00181800, 0x00000000,
+ 0x00000000, 0x18180000, 0x0018180c, 0x00000000, // 58 : ;
+ 0x00000060, 0x30180c06, 0x0c183060, 0x00000000,
+ 0x00000000, 0x007e0000, 0x007e0000, 0x00000000,
+ 0x00000006, 0x0c183060, 0x30180c06, 0x00000000,
+ 0x0000007c, 0xc6c66030, 0x30003030, 0x00000000,
+ 0x0000007c, 0xc6f6d6d6, 0x7606067c, 0x00000000,
+ 0x00000010, 0x386cc6c6, 0xfec6c6c6, 0x00000000, // 64 @ A
+ 0x0000007e, 0xc6c6c67e, 0xc6c6c67e, 0x00000000,
+ 0x00000078, 0xcc060606, 0x0606cc78, 0x00000000, // 66
+ 0x0000003e, 0x66c6c6c6, 0xc6c6663e, 0x00000000,
+ 0x000000fe, 0x0606063e, 0x060606fe, 0x00000000, // 68
+ 0x000000fe, 0x0606063e, 0x06060606, 0x00000000,
+ 0x00000078, 0xcc060606, 0xf6c6ccb8, 0x00000000, // 70
+ 0x000000c6, 0xc6c6c6fe, 0xc6c6c6c6, 0x00000000,
+ 0x0000003c, 0x18181818, 0x1818183c, 0x00000000, // 72
+ 0x00000060, 0x60606060, 0x6066663c, 0x00000000,
+ 0x000000c6, 0xc666361e, 0x3666c6c6, 0x00000000, // 74
+ 0x00000006, 0x06060606, 0x060606fe, 0x00000000,
+ 0x000000c6, 0xeefed6c6, 0xc6c6c6c6, 0x00000000, // 76
+ 0x000000c6, 0xcedefef6, 0xe6c6c6c6, 0x00000000,
+ 0x00000038, 0x6cc6c6c6, 0xc6c66c38, 0x00000000, // 78
+ 0x0000007e, 0xc6c6c67e, 0x06060606, 0x00000000,
+ 0x00000038, 0x6cc6c6c6, 0xc6d67c38, 0x60000000, // 80
+ 0x0000007e, 0xc6c6c67e, 0x66c6c6c6, 0x00000000,
+ 0x0000007c, 0xc6c60c38, 0x60c6c67c, 0x00000000, // 82
+ 0x0000007e, 0x18181818, 0x18181818, 0x00000000,
+ 0x000000c6, 0xc6c6c6c6, 0xc6c6c67c, 0x00000000, // 84
+ 0x000000c6, 0xc6c6c6c6, 0xc66c3810, 0x00000000,
+ 0x000000c6, 0xc6c6c6c6, 0xd6d6fe6c, 0x00000000, // 86
+ 0x000000c6, 0xc6c66c38, 0x6cc6c6c6, 0x00000000,
+ 0x00000066, 0x66666666, 0x3c181818, 0x00000000, // 88
+ 0x000000fe, 0xc0603018, 0x0c0606fe, 0x00000000,
+ 0x0000003c, 0x0c0c0c0c, 0x0c0c0c3c, 0x00000000, // 90
+ 0x00000002, 0x060c1830, 0x60c08000, 0x00000000,
+ 0x0000003c, 0x30303030, 0x3030303c, 0x00000000, // 92
+ 0x00001038, 0x6cc60000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00fe0000,
+ 0x00001818, 0x30000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00003c60, 0x7c66667c, 0x00000000,
+ 0x0000000c, 0x0c0c7ccc, 0xcccccc7c, 0x00000000,
+ 0x00000000, 0x00007cc6, 0x0606c67c, 0x00000000,
+ 0x00000060, 0x60607c66, 0x6666667c, 0x00000000,
+ 0x00000000, 0x00007cc6, 0xfe06c67c, 0x00000000,
+ 0x00000078, 0x0c0c0c3e, 0x0c0c0c0c, 0x00000000,
+ 0x00000000, 0x00007c66, 0x6666667c, 0x60603e00,
+ 0x0000000c, 0x0c0c7ccc, 0xcccccccc, 0x00000000,
+ 0x00000030, 0x30003830, 0x30303078, 0x00000000,
+ 0x00000030, 0x30003c30, 0x30303030, 0x30301f00,
+ 0x0000000c, 0x0c0ccc6c, 0x3c6ccccc, 0x00000000,
+ 0x00000030, 0x30303030, 0x30303030, 0x00000000,
+ 0x00000000, 0x000066fe, 0xd6d6d6d6, 0x00000000,
+ 0x00000000, 0x000078cc, 0xcccccccc, 0x00000000,
+ 0x00000000, 0x00007cc6, 0xc6c6c67c, 0x00000000,
+ 0x00000000, 0x00007ccc, 0xcccccc7c, 0x0c0c0c00,
+ 0x00000000, 0x00007c66, 0x6666667c, 0x60606000,
+ 0x00000000, 0x000076dc, 0x0c0c0c0c, 0x00000000,
+ 0x00000000, 0x00007cc6, 0x1c70c67c, 0x00000000,
+ 0x00000000, 0x1818fe18, 0x18181870, 0x00000000,
+ 0x00000000, 0x00006666, 0x6666663c, 0x00000000,
+ 0x00000000, 0x0000c6c6, 0xc66c3810, 0x00000000,
+ 0x00000000, 0x0000c6d6, 0xd6d6fe6c, 0x00000000,
+ 0x00000000, 0x0000c66c, 0x38386cc6, 0x00000000,
+ 0x00000000, 0x00006666, 0x6666667c, 0x60603e00,
+ 0x00000000, 0x0000fe60, 0x30180cfe, 0x00000000,
+ 0x00000070, 0x1818180e, 0x18181870, 0x00000000,
+ 0x00000018, 0x18181800, 0x18181818, 0x00000000,
+ 0x0000000e, 0x18181870, 0x1818180e, 0x00000000,
+ 0x000000dc, 0x76000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x0010386c, 0xc6c6fe00, 0x00000000
+};
+
+#endif /* __SOLO6010_OSD_FONT_H */
diff --git a/drivers/staging/solo6x10/solo6010-p2m.c b/drivers/staging/solo6x10/solo6010-p2m.c
new file mode 100644
index 00000000000..1b81f069c7f
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-p2m.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+
+#include "solo6010.h"
+
+// #define SOLO_TEST_P2M
+
+int solo_p2m_dma(struct solo6010_dev *solo_dev, u8 id, int wr,
+ void *sys_addr, u32 ext_addr, u32 size)
+{
+ dma_addr_t dma_addr;
+ int ret;
+
+ WARN_ON(!size);
+ WARN_ON(id >= SOLO_NR_P2M);
+ if (!size || id >= SOLO_NR_P2M)
+ return -EINVAL;
+
+ dma_addr = pci_map_single(solo_dev->pdev, sys_addr, size,
+ wr ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+
+ ret = solo_p2m_dma_t(solo_dev, id, wr, dma_addr, ext_addr, size);
+
+ pci_unmap_single(solo_dev->pdev, dma_addr, size,
+ wr ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+
+ return ret;
+}
+
+int solo_p2m_dma_t(struct solo6010_dev *solo_dev, u8 id, int wr,
+ dma_addr_t dma_addr, u32 ext_addr, u32 size)
+{
+ struct solo_p2m_dev *p2m_dev;
+ unsigned int timeout = 0;
+
+ WARN_ON(!size);
+ WARN_ON(id >= SOLO_NR_P2M);
+ if (!size || id >= SOLO_NR_P2M)
+ return -EINVAL;
+
+ p2m_dev = &solo_dev->p2m_dev[id];
+
+ down(&p2m_dev->sem);
+
+start_dma:
+ INIT_COMPLETION(p2m_dev->completion);
+ p2m_dev->error = 0;
+ solo_reg_write(solo_dev, SOLO_P2M_TAR_ADR(id), dma_addr);
+ solo_reg_write(solo_dev, SOLO_P2M_EXT_ADR(id), ext_addr);
+ solo_reg_write(solo_dev, SOLO_P2M_EXT_CFG(id),
+ SOLO_P2M_COPY_SIZE(size >> 2));
+ solo_reg_write(solo_dev, SOLO_P2M_CONTROL(id),
+ SOLO_P2M_BURST_SIZE(SOLO_P2M_BURST_256) |
+ (wr ? SOLO_P2M_WRITE : 0) | SOLO_P2M_TRANS_ON);
+
+ timeout = wait_for_completion_timeout(&p2m_dev->completion, HZ);
+
+ solo_reg_write(solo_dev, SOLO_P2M_CONTROL(id), 0);
+
+ /* XXX Really looks to me like we will get stuck here if a
+ * real PCI P2M error occurs */
+ if (p2m_dev->error)
+ goto start_dma;
+
+ up(&p2m_dev->sem);
+
+ return (timeout == 0) ? -EAGAIN : 0;
+}
+
+#ifdef SOLO_TEST_P2M
+
+#define P2M_TEST_CHAR 0xbe
+
+static unsigned long long p2m_test(struct solo6010_dev *solo_dev, u8 id,
+ u32 base, int size)
+{
+ u8 *wr_buf;
+ u8 *rd_buf;
+ int i;
+ unsigned long long err_cnt = 0;
+
+ wr_buf = kmalloc(size, GFP_KERNEL);
+ if (!wr_buf) {
+ printk(SOLO6010_NAME ": Failed to malloc for p2m_test\n");
+ return size;
+ }
+
+ rd_buf = kmalloc(size, GFP_KERNEL);
+ if (!rd_buf) {
+ printk(SOLO6010_NAME ": Failed to malloc for p2m_test\n");
+ kfree(wr_buf);
+ return size;
+ }
+
+ memset(wr_buf, P2M_TEST_CHAR, size);
+ memset(rd_buf, P2M_TEST_CHAR + 1, size);
+
+ solo_p2m_dma(solo_dev, id, 1, wr_buf, base, size);
+ solo_p2m_dma(solo_dev, id, 0, rd_buf, base, size);
+
+ for (i = 0; i < size; i++)
+ if (wr_buf[i] != rd_buf[i])
+ err_cnt++;
+
+ kfree(wr_buf);
+ kfree(rd_buf);
+
+ return err_cnt;
+}
+
+#define TEST_CHUNK_SIZE (8 * 1024)
+
+static void run_p2m_test(struct solo6010_dev *solo_dev)
+{
+ unsigned long long errs = 0;
+ u32 size = SOLO_JPEG_EXT_ADDR(solo_dev) + SOLO_JPEG_EXT_SIZE(solo_dev);
+ int i, d;
+
+ printk(KERN_WARNING "%s: Testing %u bytes of external ram\n",
+ SOLO6010_NAME, size);
+
+ for (i = 0; i < size; i += TEST_CHUNK_SIZE)
+ for (d = 0; d < 4; d++)
+ errs += p2m_test(solo_dev, d, i, TEST_CHUNK_SIZE);
+
+ printk(KERN_WARNING "%s: Found %llu errors during p2m test\n",
+ SOLO6010_NAME, errs);
+
+ return;
+}
+#else
+#define run_p2m_test(__solo) do{}while(0)
+#endif
+
+void solo_p2m_isr(struct solo6010_dev *solo_dev, int id)
+{
+ solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_P2M(id));
+ complete(&solo_dev->p2m_dev[id].completion);
+}
+
+void solo_p2m_error_isr(struct solo6010_dev *solo_dev, u32 status)
+{
+ struct solo_p2m_dev *p2m_dev;
+ int i;
+
+ if (!(status & SOLO_PCI_ERR_P2M))
+ return;
+
+ for (i = 0; i < SOLO_NR_P2M; i++) {
+ p2m_dev = &solo_dev->p2m_dev[i];
+ p2m_dev->error = 1;
+ solo_reg_write(solo_dev, SOLO_P2M_CONTROL(i), 0);
+ complete(&p2m_dev->completion);
+ }
+}
+
+void solo_p2m_exit(struct solo6010_dev *solo_dev)
+{
+ int i;
+
+ for (i = 0; i < SOLO_NR_P2M; i++)
+ solo6010_irq_off(solo_dev, SOLO_IRQ_P2M(i));
+}
+
+int solo_p2m_init(struct solo6010_dev *solo_dev)
+{
+ struct solo_p2m_dev *p2m_dev;
+ int i;
+
+ for (i = 0; i < SOLO_NR_P2M; i++) {
+ p2m_dev = &solo_dev->p2m_dev[i];
+
+ init_MUTEX(&p2m_dev->sem);
+ init_completion(&p2m_dev->completion);
+
+ solo_reg_write(solo_dev, SOLO_P2M_DES_ADR(i),
+ __pa(p2m_dev->desc));
+
+ solo_reg_write(solo_dev, SOLO_P2M_CONTROL(i), 0);
+ solo_reg_write(solo_dev, SOLO_P2M_CONFIG(i),
+ SOLO_P2M_CSC_16BIT_565 |
+ SOLO_P2M_DMA_INTERVAL(0) |
+ SOLO_P2M_PCI_MASTER_MODE);
+ solo6010_irq_on(solo_dev, SOLO_IRQ_P2M(i));
+ }
+
+ run_p2m_test(solo_dev);
+
+ return 0;
+}
diff --git a/drivers/staging/solo6x10/solo6010-registers.h b/drivers/staging/solo6x10/solo6010-registers.h
new file mode 100644
index 00000000000..d39d3c636f5
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-registers.h
@@ -0,0 +1,657 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __SOLO6010_REGISTERS_H
+#define __SOLO6010_REGISTERS_H
+
+#include "solo6010-offsets.h"
+
+/* Global 6010 system configuration */
+#define SOLO_SYS_CFG 0x0000
+#define SOLO_SYS_CFG_FOUT_EN 0x00000001
+#define SOLO_SYS_CFG_PLL_BYPASS 0x00000002
+#define SOLO_SYS_CFG_PLL_PWDN 0x00000004
+#define SOLO_SYS_CFG_OUTDIV(__n) (((__n) & 0x003) << 3)
+#define SOLO_SYS_CFG_FEEDBACKDIV(__n) (((__n) & 0x1ff) << 5)
+#define SOLO_SYS_CFG_INPUTDIV(__n) (((__n) & 0x01f) << 14)
+#define SOLO_SYS_CFG_CLOCK_DIV 0x00080000
+#define SOLO_SYS_CFG_NCLK_DELAY(__n) (((__n) & 0x003) << 24)
+#define SOLO_SYS_CFG_PCLK_DELAY(__n) (((__n) & 0x00f) << 26)
+#define SOLO_SYS_CFG_SDRAM64BIT 0x40000000
+#define SOLO_SYS_CFG_RESET 0x80000000
+
+#define SOLO_DMA_CTRL 0x0004
+#define SOLO_DMA_CTRL_REFRESH_CYCLE(n) ((n)<<8)
+/* 0=16/32MB, 1=32/64MB, 2=64/128MB, 3=128/256MB */
+#define SOLO_DMA_CTRL_SDRAM_SIZE(n) ((n)<<6)
+#define SOLO_DMA_CTRL_SDRAM_CLK_INVERT (1<<5)
+#define SOLO_DMA_CTRL_STROBE_SELECT (1<<4)
+#define SOLO_DMA_CTRL_READ_DATA_SELECT (1<<3)
+#define SOLO_DMA_CTRL_READ_CLK_SELECT (1<<2)
+#define SOLO_DMA_CTRL_LATENCY(n) ((n)<<0)
+
+#define SOLO_SYS_VCLK 0x000C
+#define SOLO_VCLK_INVERT (1<<22)
+/* 0=sys_clk/4, 1=sys_clk/2, 2=clk_in/2 of system input */
+#define SOLO_VCLK_SELECT(n) ((n)<<20)
+#define SOLO_VCLK_VIN1415_DELAY(n) ((n)<<14)
+#define SOLO_VCLK_VIN1213_DELAY(n) ((n)<<12)
+#define SOLO_VCLK_VIN1011_DELAY(n) ((n)<<10)
+#define SOLO_VCLK_VIN0809_DELAY(n) ((n)<<8)
+#define SOLO_VCLK_VIN0607_DELAY(n) ((n)<<6)
+#define SOLO_VCLK_VIN0405_DELAY(n) ((n)<<4)
+#define SOLO_VCLK_VIN0203_DELAY(n) ((n)<<2)
+#define SOLO_VCLK_VIN0001_DELAY(n) ((n)<<0)
+
+#define SOLO_IRQ_STAT 0x0010
+#define SOLO_IRQ_ENABLE 0x0014
+#define SOLO_IRQ_P2M(n) (1<<((n)+17))
+#define SOLO_IRQ_GPIO (1<<16)
+#define SOLO_IRQ_VIDEO_LOSS (1<<15)
+#define SOLO_IRQ_VIDEO_IN (1<<14)
+#define SOLO_IRQ_MOTION (1<<13)
+#define SOLO_IRQ_ATA_CMD (1<<12)
+#define SOLO_IRQ_ATA_DIR (1<<11)
+#define SOLO_IRQ_PCI_ERR (1<<10)
+#define SOLO_IRQ_PS2_1 (1<<9)
+#define SOLO_IRQ_PS2_0 (1<<8)
+#define SOLO_IRQ_SPI (1<<7)
+#define SOLO_IRQ_IIC (1<<6)
+#define SOLO_IRQ_UART(n) (1<<((n) + 4))
+#define SOLO_IRQ_G723 (1<<3)
+#define SOLO_IRQ_DECODER (1<<1)
+#define SOLO_IRQ_ENCODER (1<<0)
+
+#define SOLO_CHIP_OPTION 0x001C
+#define SOLO_CHIP_ID_MASK 0x00000007
+
+#define SOLO_EEPROM_CTRL 0x0060
+#define SOLO_EEPROM_ACCESS_EN (1<<7)
+#define SOLO_EEPROM_CS (1<<3)
+#define SOLO_EEPROM_CLK (1<<2)
+#define SOLO_EEPROM_DO (1<<1)
+#define SOLO_EEPROM_DI (1<<0)
+#define SOLO_EEPROM_ENABLE (EEPROM_ACCESS_EN | EEPROM_CS)
+
+#define SOLO_PCI_ERR 0x0070
+#define SOLO_PCI_ERR_FATAL 0x00000001
+#define SOLO_PCI_ERR_PARITY 0x00000002
+#define SOLO_PCI_ERR_TARGET 0x00000004
+#define SOLO_PCI_ERR_TIMEOUT 0x00000008
+#define SOLO_PCI_ERR_P2M 0x00000010
+#define SOLO_PCI_ERR_ATA 0x00000020
+#define SOLO_PCI_ERR_P2M_DESC 0x00000040
+#define SOLO_PCI_ERR_FSM0(__s) (((__s) >> 16) & 0x0f)
+#define SOLO_PCI_ERR_FSM1(__s) (((__s) >> 20) & 0x0f)
+#define SOLO_PCI_ERR_FSM2(__s) (((__s) >> 24) & 0x1f)
+
+#define SOLO_P2M_BASE 0x0080
+
+#define SOLO_P2M_CONFIG(n) (0x0080 + ((n)*0x20))
+#define SOLO_P2M_DMA_INTERVAL(n) ((n)<<6)/* N*32 clocks */
+#define SOLO_P2M_CSC_BYTE_REORDER (1<<5) /* BGR -> RGB */
+/* 0:r=[14:10] g=[9:5] b=[4:0], 1:r=[15:11] g=[10:5] b=[4:0] */
+#define SOLO_P2M_CSC_16BIT_565 (1<<4)
+#define SOLO_P2M_UV_SWAP (1<<3)
+#define SOLO_P2M_PCI_MASTER_MODE (1<<2)
+#define SOLO_P2M_DESC_INTR_OPT (1<<1) /* 1:Empty, 0:Each */
+#define SOLO_P2M_DESC_MODE (1<<0)
+
+#define SOLO_P2M_DES_ADR(n) (0x0084 + ((n)*0x20))
+
+#define SOLO_P2M_DESC_ID(n) (0x0088 + ((n)*0x20))
+#define SOLO_P2M_UPDATE_ID(n) ((n)<<0)
+
+#define SOLO_P2M_STATUS(n) (0x008C + ((n)*0x20))
+#define SOLO_P2M_COMMAND_DONE (1<<8)
+#define SOLO_P2M_CURRENT_ID(stat) (0xff & (stat))
+
+#define SOLO_P2M_CONTROL(n) (0x0090 + ((n)*0x20))
+#define SOLO_P2M_PCI_INC(n) ((n)<<20)
+#define SOLO_P2M_REPEAT(n) ((n)<<10)
+/* 0:512, 1:256, 2:128, 3:64, 4:32, 5:128(2page) */
+#define SOLO_P2M_BURST_SIZE(n) ((n)<<7)
+#define SOLO_P2M_BURST_512 0
+#define SOLO_P2M_BURST_256 1
+#define SOLO_P2M_BURST_128 2
+#define SOLO_P2M_BURST_64 3
+#define SOLO_P2M_BURST_32 4
+#define SOLO_P2M_CSC_16BIT (1<<6) /* 0:24bit, 1:16bit */
+/* 0:Y[0]<-0(OFF), 1:Y[0]<-1(ON), 2:Y[0]<-G[0], 3:Y[0]<-Bit[15] */
+#define SOLO_P2M_ALPHA_MODE(n) ((n)<<4)
+#define SOLO_P2M_CSC_ON (1<<3)
+#define SOLO_P2M_INTERRUPT_REQ (1<<2)
+#define SOLO_P2M_WRITE (1<<1)
+#define SOLO_P2M_TRANS_ON (1<<0)
+
+#define SOLO_P2M_EXT_CFG(n) (0x0094 + ((n)*0x20))
+#define SOLO_P2M_EXT_INC(n) ((n)<<20)
+#define SOLO_P2M_COPY_SIZE(n) ((n)<<0)
+
+#define SOLO_P2M_TAR_ADR(n) (0x0098 + ((n)*0x20))
+
+#define SOLO_P2M_EXT_ADR(n) (0x009C + ((n)*0x20))
+
+#define SOLO_P2M_BUFFER(i) (0x2000 + ((i)*4))
+
+#define SOLO_VI_CH_SWITCH_0 0x0100
+#define SOLO_VI_CH_SWITCH_1 0x0104
+#define SOLO_VI_CH_SWITCH_2 0x0108
+
+#define SOLO_VI_CH_ENA 0x010C
+#define SOLO_VI_CH_FORMAT 0x0110
+#define SOLO_VI_FD_SEL_MASK(n) ((n)<<16)
+#define SOLO_VI_PROG_MASK(n) ((n)<<0)
+
+#define SOLO_VI_FMT_CFG 0x0114
+#define SOLO_VI_FMT_CHECK_VCOUNT (1<<31)
+#define SOLO_VI_FMT_CHECK_HCOUNT (1<<30)
+#define SOLO_VI_FMT_TEST_SIGNAL (1<<28)
+
+#define SOLO_VI_PAGE_SW 0x0118
+#define SOLO_FI_INV_DISP_LIVE(n) ((n)<<8)
+#define SOLO_FI_INV_DISP_OUT(n) ((n)<<7)
+#define SOLO_DISP_SYNC_FI(n) ((n)<<6)
+#define SOLO_PIP_PAGE_ADD(n) ((n)<<3)
+#define SOLO_NORMAL_PAGE_ADD(n) ((n)<<0)
+
+#define SOLO_VI_ACT_I_P 0x011C
+#define SOLO_VI_ACT_I_S 0x0120
+#define SOLO_VI_ACT_P 0x0124
+#define SOLO_VI_FI_INVERT (1<<31)
+#define SOLO_VI_H_START(n) ((n)<<21)
+#define SOLO_VI_V_START(n) ((n)<<11)
+#define SOLO_VI_V_STOP(n) ((n)<<0)
+
+#define SOLO_VI_STATUS0 0x0128
+#define SOLO_VI_STATUS0_PAGE(__n) ((__n) & 0x07)
+#define SOLO_VI_STATUS1 0x012C
+
+/* XXX: Might be better off in kernel level disp.h */
+#define DISP_PAGE(stat) ((stat) & 0x07)
+
+#define SOLO_VI_PB_CONFIG 0x0130
+#define SOLO_VI_PB_USER_MODE (1<<1)
+#define SOLO_VI_PB_PAL (1<<0)
+#define SOLO_VI_PB_RANGE_HV 0x0134
+#define SOLO_VI_PB_HSIZE(h) ((h)<<12)
+#define SOLO_VI_PB_VSIZE(v) ((v)<<0)
+#define SOLO_VI_PB_ACT_H 0x0138
+#define SOLO_VI_PB_HSTART(n) ((n)<<12)
+#define SOLO_VI_PB_HSTOP(n) ((n)<<0)
+#define SOLO_VI_PB_ACT_V 0x013C
+#define SOLO_VI_PB_VSTART(n) ((n)<<12)
+#define SOLO_VI_PB_VSTOP(n) ((n)<<0)
+
+#define SOLO_VI_MOSAIC(ch) (0x0140 + ((ch)*4))
+#define SOLO_VI_MOSAIC_SX(x) ((x)<<24)
+#define SOLO_VI_MOSAIC_EX(x) ((x)<<16)
+#define SOLO_VI_MOSAIC_SY(x) ((x)<<8)
+#define SOLO_VI_MOSAIC_EY(x) ((x)<<0)
+
+#define SOLO_VI_WIN_CTRL0(ch) (0x0180 + ((ch)*4))
+#define SOLO_VI_WIN_CTRL1(ch) (0x01C0 + ((ch)*4))
+
+#define SOLO_VI_WIN_CHANNEL(n) ((n)<<28)
+
+#define SOLO_VI_WIN_PIP(n) ((n)<<27)
+#define SOLO_VI_WIN_SCALE(n) ((n)<<24)
+
+#define SOLO_VI_WIN_SX(x) ((x)<<12)
+#define SOLO_VI_WIN_EX(x) ((x)<<0)
+
+#define SOLO_VI_WIN_SY(x) ((x)<<12)
+#define SOLO_VI_WIN_EY(x) ((x)<<0)
+
+#define SOLO_VI_WIN_ON(ch) (0x0200 + ((ch)*4))
+
+#define SOLO_VI_WIN_SW 0x0240
+#define SOLO_VI_WIN_LIVE_AUTO_MUTE 0x0244
+
+#define SOLO_VI_MOT_ADR 0x0260
+#define SOLO_VI_MOTION_EN(mask) ((mask)<<16)
+#define SOLO_VI_MOT_CTRL 0x0264
+#define SOLO_VI_MOTION_FRAME_COUNT(n) ((n)<<24)
+#define SOLO_VI_MOTION_SAMPLE_LENGTH(n) ((n)<<16)
+#define SOLO_VI_MOTION_INTR_START_STOP (1<<15)
+#define SOLO_VI_MOTION_FREEZE_DATA (1<<14)
+#define SOLO_VI_MOTION_SAMPLE_COUNT(n) ((n)<<0)
+#define SOLO_VI_MOT_CLEAR 0x0268
+#define SOLO_VI_MOT_STATUS 0x026C
+#define SOLO_VI_MOTION_CNT(n) ((n)<<0)
+#define SOLO_VI_MOTION_BORDER 0x0270
+#define SOLO_VI_MOTION_BAR 0x0274
+#define SOLO_VI_MOTION_Y_SET (1<<29)
+#define SOLO_VI_MOTION_Y_ADD (1<<28)
+#define SOLO_VI_MOTION_CB_SET (1<<27)
+#define SOLO_VI_MOTION_CB_ADD (1<<26)
+#define SOLO_VI_MOTION_CR_SET (1<<25)
+#define SOLO_VI_MOTION_CR_ADD (1<<24)
+#define SOLO_VI_MOTION_Y_VALUE(v) ((v)<<16)
+#define SOLO_VI_MOTION_CB_VALUE(v) ((v)<<8)
+#define SOLO_VI_MOTION_CR_VALUE(v) ((v)<<0)
+
+#define SOLO_VO_FMT_ENC 0x0300
+#define SOLO_VO_SCAN_MODE_PROGRESSIVE (1<<31)
+#define SOLO_VO_FMT_TYPE_PAL (1<<30)
+#define SOLO_VO_FMT_TYPE_NTSC 0
+#define SOLO_VO_USER_SET (1<<29)
+
+#define SOLO_VO_FI_CHANGE (1<<20)
+#define SOLO_VO_USER_COLOR_SET_VSYNC (1<<19)
+#define SOLO_VO_USER_COLOR_SET_HSYNC (1<<18)
+#define SOLO_VO_USER_COLOR_SET_NAV (1<<17)
+#define SOLO_VO_USER_COLOR_SET_NAH (1<<16)
+#define SOLO_VO_NA_COLOR_Y(Y) ((Y)<<8)
+#define SOLO_VO_NA_COLOR_CB(CB) (((CB)/16)<<4)
+#define SOLO_VO_NA_COLOR_CR(CR) (((CR)/16)<<0)
+
+#define SOLO_VO_ACT_H 0x0304
+#define SOLO_VO_H_BLANK(n) ((n)<<22)
+#define SOLO_VO_H_START(n) ((n)<<11)
+#define SOLO_VO_H_STOP(n) ((n)<<0)
+
+#define SOLO_VO_ACT_V 0x0308
+#define SOLO_VO_V_BLANK(n) ((n)<<22)
+#define SOLO_VO_V_START(n) ((n)<<11)
+#define SOLO_VO_V_STOP(n) ((n)<<0)
+
+#define SOLO_VO_RANGE_HV 0x030C
+#define SOLO_VO_SYNC_INVERT (1<<24)
+#define SOLO_VO_HSYNC_INVERT (1<<23)
+#define SOLO_VO_VSYNC_INVERT (1<<22)
+#define SOLO_VO_H_LEN(n) ((n)<<11)
+#define SOLO_VO_V_LEN(n) ((n)<<0)
+
+#define SOLO_VO_DISP_CTRL 0x0310
+#define SOLO_VO_DISP_ON (1<<31)
+#define SOLO_VO_DISP_ERASE_COUNT(n) ((n&0xf)<<24)
+#define SOLO_VO_DISP_DOUBLE_SCAN (1<<22)
+#define SOLO_VO_DISP_SINGLE_PAGE (1<<21)
+#define SOLO_VO_DISP_BASE(n) (((n)>>16) & 0xffff)
+
+#define SOLO_VO_DISP_ERASE 0x0314
+#define SOLO_VO_DISP_ERASE_ON (1<<0)
+
+#define SOLO_VO_ZOOM_CTRL 0x0318
+#define SOLO_VO_ZOOM_VER_ON (1<<24)
+#define SOLO_VO_ZOOM_HOR_ON (1<<23)
+#define SOLO_VO_ZOOM_V_COMP (1<<22)
+#define SOLO_VO_ZOOM_SX(h) (((h)/2)<<11)
+#define SOLO_VO_ZOOM_SY(v) (((v)/2)<<0)
+
+#define SOLO_VO_FREEZE_CTRL 0x031C
+#define SOLO_VO_FREEZE_ON (1<<1)
+#define SOLO_VO_FREEZE_INTERPOLATION (1<<0)
+
+#define SOLO_VO_BKG_COLOR 0x0320
+#define SOLO_BG_Y(y) ((y)<<16)
+#define SOLO_BG_U(u) ((u)<<8)
+#define SOLO_BG_V(v) ((v)<<0)
+
+#define SOLO_VO_DEINTERLACE 0x0324
+#define SOLO_VO_DEINTERLACE_THRESHOLD(n) ((n)<<8)
+#define SOLO_VO_DEINTERLACE_EDGE_VALUE(n) ((n)<<0)
+
+#define SOLO_VO_BORDER_LINE_COLOR 0x0330
+#define SOLO_VO_BORDER_FILL_COLOR 0x0334
+#define SOLO_VO_BORDER_LINE_MASK 0x0338
+#define SOLO_VO_BORDER_FILL_MASK 0x033c
+
+#define SOLO_VO_BORDER_X(n) (0x0340+((n)*4))
+#define SOLO_VO_BORDER_Y(n) (0x0354+((n)*4))
+
+#define SOLO_VO_CELL_EXT_SET 0x0368
+#define SOLO_VO_CELL_EXT_START 0x036c
+#define SOLO_VO_CELL_EXT_STOP 0x0370
+
+#define SOLO_VO_CELL_EXT_SET2 0x0374
+#define SOLO_VO_CELL_EXT_START2 0x0378
+#define SOLO_VO_CELL_EXT_STOP2 0x037c
+
+#define SOLO_VO_RECTANGLE_CTRL(n) (0x0368+((n)*12))
+#define SOLO_VO_RECTANGLE_START(n) (0x036c+((n)*12))
+#define SOLO_VO_RECTANGLE_STOP(n) (0x0370+((n)*12))
+
+#define SOLO_VO_CURSOR_POS (0x0380)
+#define SOLO_VO_CURSOR_CLR (0x0384)
+#define SOLO_VO_CURSOR_CLR2 (0x0388)
+#define SOLO_VO_CURSOR_MASK(id) (0x0390+((id)*4))
+
+#define SOLO_VO_EXPANSION(id) (0x0250+((id)*4))
+
+#define SOLO_OSG_CONFIG 0x03E0
+#define SOLO_VO_OSG_ON (1<<31)
+#define SOLO_VO_OSG_COLOR_MUTE (1<<28)
+#define SOLO_VO_OSG_ALPHA_RATE(n) ((n)<<22)
+#define SOLO_VO_OSG_ALPHA_BG_RATE(n) ((n)<<16)
+#define SOLO_VO_OSG_BASE(offset) (((offset)>>16)&0xffff)
+
+#define SOLO_OSG_ERASE 0x03E4
+#define SOLO_OSG_ERASE_ON (0x80)
+#define SOLO_OSG_ERASE_OFF (0x00)
+
+#define SOLO_VO_OSG_BLINK 0x03E8
+#define SOLO_VO_OSG_BLINK_ON (1<<1)
+#define SOLO_VO_OSG_BLINK_INTREVAL18 (1<<0)
+
+#define SOLO_CAP_BASE 0x0400
+#define SOLO_CAP_MAX_PAGE(n) ((n)<<16)
+#define SOLO_CAP_BASE_ADDR(n) ((n)<<0)
+#define SOLO_CAP_BTW 0x0404
+#define SOLO_CAP_PROG_BANDWIDTH(n) ((n)<<8)
+#define SOLO_CAP_MAX_BANDWIDTH(n) ((n)<<0)
+
+#define SOLO_DIM_SCALE1 0x0408
+#define SOLO_DIM_SCALE2 0x040C
+#define SOLO_DIM_SCALE3 0x0410
+#define SOLO_DIM_SCALE4 0x0414
+#define SOLO_DIM_SCALE5 0x0418
+#define SOLO_DIM_V_MB_NUM_FRAME(n) ((n)<<16)
+#define SOLO_DIM_V_MB_NUM_FIELD(n) ((n)<<8)
+#define SOLO_DIM_H_MB_NUM(n) ((n)<<0)
+
+#define SOLO_DIM_PROG 0x041C
+#define SOLO_CAP_STATUS 0x0420
+
+#define SOLO_CAP_CH_SCALE(ch) (0x0440+((ch)*4))
+#define SOLO_CAP_CH_COMP_ENA_E(ch) (0x0480+((ch)*4))
+#define SOLO_CAP_CH_INTV(ch) (0x04C0+((ch)*4))
+#define SOLO_CAP_CH_INTV_E(ch) (0x0500+((ch)*4))
+
+
+#define SOLO_VE_CFG0 0x0610
+#define SOLO_VE_TWO_PAGE_MODE (1<<31)
+#define SOLO_VE_INTR_CTRL(n) ((n)<<24)
+#define SOLO_VE_BLOCK_SIZE(n) ((n)<<16)
+#define SOLO_VE_BLOCK_BASE(n) ((n)<<0)
+
+#define SOLO_VE_CFG1 0x0614
+#define SOLO_VE_BYTE_ALIGN(n) ((n)<<24)
+#define SOLO_VE_INSERT_INDEX (1<<18)
+#define SOLO_VE_MOTION_MODE(n) ((n)<<16)
+#define SOLO_VE_MOTION_BASE(n) ((n)<<0)
+
+#define SOLO_VE_WMRK_POLY 0x061C
+#define SOLO_VE_VMRK_INIT_KEY 0x0620
+#define SOLO_VE_WMRK_STRL 0x0624
+#define SOLO_VE_ENCRYP_POLY 0x0628
+#define SOLO_VE_ENCRYP_INIT 0x062C
+#define SOLO_VE_ATTR 0x0630
+#define SOLO_VE_LITTLE_ENDIAN (1<<31)
+#define SOLO_COMP_ATTR_RN (1<<30)
+#define SOLO_COMP_ATTR_FCODE(n) ((n)<<27)
+#define SOLO_COMP_TIME_INC(n) ((n)<<25)
+#define SOLO_COMP_TIME_WIDTH(n) ((n)<<21)
+#define SOLO_DCT_INTERVAL(n) ((n)<<16)
+
+#define SOLO_VE_STATE(n) (0x0640+((n)*4))
+struct videnc_status {
+ union {
+ u32 status0;
+ struct {
+ u32 mp4_enc_code_size:20, sad_motion:1, vid_motion:1,
+ vop_type:2, video_channel:5, source_field_idx:1,
+ interlace:1, progressive:1;
+ } status0_st;
+ };
+ union {
+ u32 status1;
+ struct {
+ u32 vsize:8, hsize:8, last_queue:4, foo1:8, scale:4;
+ } status1_st;
+ };
+ union {
+ u32 status4;
+ struct {
+ u32 jpeg_code_size:20, interval:10, foo1:2;
+ } status4_st;
+ };
+ union {
+ u32 status9;
+ struct {
+ u32 channel:5, foo1:27;
+ } status9_st;
+ };
+ union {
+ u32 status10;
+ struct {
+ u32 mp4_code_size:20, foo:12;
+ } status10_st;
+ };
+ union {
+ u32 status11;
+ struct {
+ u32 last_queue:8, foo1:24;
+ } status11_st;
+ };
+};
+
+#define SOLO_VE_JPEG_QP_TBL 0x0670
+#define SOLO_VE_JPEG_QP_CH_L 0x0674
+#define SOLO_VE_JPEG_QP_CH_H 0x0678
+#define SOLO_VE_JPEG_CFG 0x067C
+#define SOLO_VE_JPEG_CTRL 0x0680
+
+#define SOLO_VE_OSD_CH 0x0690
+#define SOLO_VE_OSD_BASE 0x0694
+#define SOLO_VE_OSD_CLR 0x0698
+#define SOLO_VE_OSD_OPT 0x069C
+
+#define SOLO_VE_CH_INTL(ch) (0x0700+((ch)*4))
+#define SOLO_VE_CH_MOT(ch) (0x0740+((ch)*4))
+#define SOLO_VE_CH_QP(ch) (0x0780+((ch)*4))
+#define SOLO_VE_CH_QP_E(ch) (0x07C0+((ch)*4))
+#define SOLO_VE_CH_GOP(ch) (0x0800+((ch)*4))
+#define SOLO_VE_CH_GOP_E(ch) (0x0840+((ch)*4))
+#define SOLO_VE_CH_REF_BASE(ch) (0x0880+((ch)*4))
+#define SOLO_VE_CH_REF_BASE_E(ch) (0x08C0+((ch)*4))
+
+#define SOLO_VE_MPEG4_QUE(n) (0x0A00+((n)*8))
+#define SOLO_VE_JPEG_QUE(n) (0x0A04+((n)*8))
+
+#define SOLO_VD_CFG0 0x0900
+#define SOLO_VD_CFG_NO_WRITE_NO_WINDOW (1<<24)
+#define SOLO_VD_CFG_BUSY_WIAT_CODE (1<<23)
+#define SOLO_VD_CFG_BUSY_WIAT_REF (1<<22)
+#define SOLO_VD_CFG_BUSY_WIAT_RES (1<<21)
+#define SOLO_VD_CFG_BUSY_WIAT_MS (1<<20)
+#define SOLO_VD_CFG_SINGLE_MODE (1<<18)
+#define SOLO_VD_CFG_SCAL_MANUAL (1<<17)
+#define SOLO_VD_CFG_USER_PAGE_CTRL (1<<16)
+#define SOLO_VD_CFG_LITTLE_ENDIAN (1<<15)
+#define SOLO_VD_CFG_START_FI (1<<14)
+#define SOLO_VD_CFG_ERR_LOCK (1<<13)
+#define SOLO_VD_CFG_ERR_INT_ENA (1<<12)
+#define SOLO_VD_CFG_TIME_WIDTH(n) ((n)<<8)
+#define SOLO_VD_CFG_DCT_INTERVAL(n) ((n)<<0)
+
+#define SOLO_VD_CFG1 0x0904
+
+#define SOLO_VD_DEINTERLACE 0x0908
+#define SOLO_VD_DEINTERLACE_THRESHOLD(n) ((n)<<8)
+#define SOLO_VD_DEINTERLACE_EDGE_VALUE(n) ((n)<<0)
+
+#define SOLO_VD_CODE_ADR 0x090C
+
+#define SOLO_VD_CTRL 0x0910
+#define SOLO_VD_OPER_ON (1<<31)
+#define SOLO_VD_MAX_ITEM(n) ((n)<<0)
+
+#define SOLO_VD_STATUS0 0x0920
+#define SOLO_VD_STATUS0_INTR_ACK (1<<22)
+#define SOLO_VD_STATUS0_INTR_EMPTY (1<<21)
+#define SOLO_VD_STATUS0_INTR_ERR (1<<20)
+
+#define SOLO_VD_STATUS1 0x0924
+
+#define SOLO_VD_IDX0 0x0930
+#define SOLO_VD_IDX_INTERLACE (1<<30)
+#define SOLO_VD_IDX_CHANNEL(n) ((n)<<24)
+#define SOLO_VD_IDX_SIZE(n) ((n)<<0)
+
+#define SOLO_VD_IDX1 0x0934
+#define SOLO_VD_IDX_SRC_SCALE(n) ((n)<<28)
+#define SOLO_VD_IDX_WINDOW(n) ((n)<<24)
+#define SOLO_VD_IDX_DEINTERLACE (1<<16)
+#define SOLO_VD_IDX_H_BLOCK(n) ((n)<<8)
+#define SOLO_VD_IDX_V_BLOCK(n) ((n)<<0)
+
+#define SOLO_VD_IDX2 0x0938
+#define SOLO_VD_IDX_REF_BASE_SIDE (1<<31)
+#define SOLO_VD_IDX_REF_BASE(n) (((n)>>16)&0xffff)
+
+#define SOLO_VD_IDX3 0x093C
+#define SOLO_VD_IDX_DISP_SCALE(n) ((n)<<28)
+#define SOLO_VD_IDX_INTERLACE_WR (1<<27)
+#define SOLO_VD_IDX_INTERPOL (1<<26)
+#define SOLO_VD_IDX_HOR2X (1<<25)
+#define SOLO_VD_IDX_OFFSET_X(n) ((n)<<12)
+#define SOLO_VD_IDX_OFFSET_Y(n) ((n)<<0)
+
+#define SOLO_VD_IDX4 0x0940
+#define SOLO_VD_IDX_DEC_WR_PAGE(n) ((n)<<8)
+#define SOLO_VD_IDX_DISP_RD_PAGE(n) ((n)<<0)
+
+#define SOLO_VD_WR_PAGE(n) (0x03F0 + ((n) * 4))
+
+
+#define SOLO_GPIO_CONFIG_0 0x0B00
+#define SOLO_GPIO_CONFIG_1 0x0B04
+#define SOLO_GPIO_DATA_OUT 0x0B08
+#define SOLO_GPIO_DATA_IN 0x0B0C
+#define SOLO_GPIO_INT_ACK_STA 0x0B10
+#define SOLO_GPIO_INT_ENA 0x0B14
+#define SOLO_GPIO_INT_CFG_0 0x0B18
+#define SOLO_GPIO_INT_CFG_1 0x0B1C
+
+
+#define SOLO_IIC_CFG 0x0B20
+#define SOLO_IIC_ENABLE (1<<8)
+#define SOLO_IIC_PRESCALE(n) ((n)<<0)
+
+#define SOLO_IIC_CTRL 0x0B24
+#define SOLO_IIC_AUTO_CLEAR (1<<20)
+#define SOLO_IIC_STATE_RX_ACK (1<<19)
+#define SOLO_IIC_STATE_BUSY (1<<18)
+#define SOLO_IIC_STATE_SIG_ERR (1<<17)
+#define SOLO_IIC_STATE_TRNS (1<<16)
+#define SOLO_IIC_CH_SET(n) ((n)<<5)
+#define SOLO_IIC_ACK_EN (1<<4)
+#define SOLO_IIC_START (1<<3)
+#define SOLO_IIC_STOP (1<<2)
+#define SOLO_IIC_READ (1<<1)
+#define SOLO_IIC_WRITE (1<<0)
+
+#define SOLO_IIC_TXD 0x0B28
+#define SOLO_IIC_RXD 0x0B2C
+
+/*
+ * UART REGISTER
+ */
+#define SOLO_UART_CONTROL(n) (0x0BA0 + ((n)*0x20))
+#define SOLO_UART_CLK_DIV(n) ((n)<<24)
+#define SOLO_MODEM_CTRL_EN (1<<20)
+#define SOLO_PARITY_ERROR_DROP (1<<18)
+#define SOLO_IRQ_ERR_EN (1<<17)
+#define SOLO_IRQ_RX_EN (1<<16)
+#define SOLO_IRQ_TX_EN (1<<15)
+#define SOLO_RX_EN (1<<14)
+#define SOLO_TX_EN (1<<13)
+#define SOLO_UART_HALF_DUPLEX (1<<12)
+#define SOLO_UART_LOOPBACK (1<<11)
+
+#define SOLO_BAUDRATE_230400 ((0<<9)|(0<<6))
+#define SOLO_BAUDRATE_115200 ((0<<9)|(1<<6))
+#define SOLO_BAUDRATE_57600 ((0<<9)|(2<<6))
+#define SOLO_BAUDRATE_38400 ((0<<9)|(3<<6))
+#define SOLO_BAUDRATE_19200 ((0<<9)|(4<<6))
+#define SOLO_BAUDRATE_9600 ((0<<9)|(5<<6))
+#define SOLO_BAUDRATE_4800 ((0<<9)|(6<<6))
+#define SOLO_BAUDRATE_2400 ((1<<9)|(6<<6))
+#define SOLO_BAUDRATE_1200 ((2<<9)|(6<<6))
+#define SOLO_BAUDRATE_300 ((3<<9)|(6<<6))
+
+#define SOLO_UART_DATA_BIT_8 (3<<4)
+#define SOLO_UART_DATA_BIT_7 (2<<4)
+#define SOLO_UART_DATA_BIT_6 (1<<4)
+#define SOLO_UART_DATA_BIT_5 (0<<4)
+
+#define SOLO_UART_STOP_BIT_1 (0<<2)
+#define SOLO_UART_STOP_BIT_2 (1<<2)
+
+#define SOLO_UART_PARITY_NONE (0<<0)
+#define SOLO_UART_PARITY_EVEN (2<<0)
+#define SOLO_UART_PARITY_ODD (3<<0)
+
+#define SOLO_UART_STATUS(n) (0x0BA4 + ((n)*0x20))
+#define SOLO_UART_CTS (1<<15)
+#define SOLO_UART_RX_BUSY (1<<14)
+#define SOLO_UART_OVERRUN (1<<13)
+#define SOLO_UART_FRAME_ERR (1<<12)
+#define SOLO_UART_PARITY_ERR (1<<11)
+#define SOLO_UART_TX_BUSY (1<<5)
+
+#define SOLO_UART_RX_BUFF_CNT(stat) (((stat)>>6) & 0x1f)
+#define SOLO_UART_RX_BUFF_SIZE 8
+#define SOLO_UART_TX_BUFF_CNT(stat) (((stat)>>0) & 0x1f)
+#define SOLO_UART_TX_BUFF_SIZE 8
+
+#define SOLO_UART_TX_DATA(n) (0x0BA8 + ((n)*0x20))
+#define SOLO_UART_TX_DATA_PUSH (1<<8)
+#define SOLO_UART_RX_DATA(n) (0x0BAC + ((n)*0x20))
+#define SOLO_UART_RX_DATA_POP (1<<8)
+
+#define SOLO_TIMER_CLOCK_NUM 0x0be0
+#define SOLO_TIMER_WATCHDOG 0x0be4
+#define SOLO_TIMER_USEC 0x0be8
+#define SOLO_TIMER_SEC 0x0bec
+
+#define SOLO_AUDIO_CONTROL 0x0D00
+#define SOLO_AUDIO_ENABLE (1<<31)
+#define SOLO_AUDIO_MASTER_MODE (1<<30)
+#define SOLO_AUDIO_I2S_MODE (1<<29)
+#define SOLO_AUDIO_I2S_LR_SWAP (1<<27)
+#define SOLO_AUDIO_I2S_8BIT (1<<26)
+#define SOLO_AUDIO_I2S_MULTI(n) ((n)<<24)
+#define SOLO_AUDIO_MIX_9TO0 (1<<23)
+#define SOLO_AUDIO_DEC_9TO0_VOL(n) ((n)<<20)
+#define SOLO_AUDIO_MIX_19TO10 (1<<19)
+#define SOLO_AUDIO_DEC_19TO10_VOL(n) ((n)<<16)
+#define SOLO_AUDIO_MODE(n) ((n)<<0)
+#define SOLO_AUDIO_SAMPLE 0x0D04
+#define SOLO_AUDIO_EE_MODE_ON (1<<30)
+#define SOLO_AUDIO_EE_ENC_CH(ch) ((ch)<<25)
+#define SOLO_AUDIO_BITRATE(n) ((n)<<16)
+#define SOLO_AUDIO_CLK_DIV(n) ((n)<<0)
+#define SOLO_AUDIO_FDMA_INTR 0x0D08
+#define SOLO_AUDIO_FDMA_INTERVAL(n) ((n)<<19)
+#define SOLO_AUDIO_INTR_ORDER(n) ((n)<<16)
+#define SOLO_AUDIO_FDMA_BASE(n) ((n)<<0)
+#define SOLO_AUDIO_EVOL_0 0x0D0C
+#define SOLO_AUDIO_EVOL_1 0x0D10
+#define SOLO_AUDIO_EVOL(ch, value) ((value)<<((ch)%10))
+#define SOLO_AUDIO_STA 0x0D14
+
+
+#define SOLO_WATCHDOG 0x0BE4
+#define WATCHDOG_STAT(status) (status<<8)
+#define WATCHDOG_TIME(sec) (sec&0xff)
+
+#endif /* __SOLO6010_REGISTERS_H */
diff --git a/drivers/staging/solo6x10/solo6010-tw28.c b/drivers/staging/solo6x10/solo6010-tw28.c
new file mode 100644
index 00000000000..0159c839243
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-tw28.c
@@ -0,0 +1,823 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+
+#include "solo6010.h"
+#include "solo6010-tw28.h"
+
+/* XXX: Some of these values are masked into an 8-bit regs, and shifted
+ * around for other 8-bit regs. What are the magic bits in these values? */
+#define DEFAULT_HDELAY_NTSC (32 - 4)
+#define DEFAULT_HACTIVE_NTSC (720 + 16)
+#define DEFAULT_VDELAY_NTSC (7 - 2)
+#define DEFAULT_VACTIVE_NTSC (240 + 4)
+
+#define DEFAULT_HDELAY_PAL (32 + 4)
+#define DEFAULT_HACTIVE_PAL (864-DEFAULT_HDELAY_PAL)
+#define DEFAULT_VDELAY_PAL (6)
+#define DEFAULT_VACTIVE_PAL (312-DEFAULT_VDELAY_PAL)
+
+static u8 tbl_tw2864_template[] = {
+ 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, // 0x00
+ 0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, // 0x10
+ 0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, // 0x20
+ 0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, // 0x30
+ 0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x40
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x50
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x60
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x70
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA3, 0x00,
+ 0x00, 0x02, 0x00, 0xcc, 0x00, 0x80, 0x44, 0x50, // 0x80
+ 0x22, 0x01, 0xd8, 0xbc, 0xb8, 0x44, 0x38, 0x00,
+ 0x00, 0x78, 0x72, 0x3e, 0x14, 0xa5, 0xe4, 0x05, // 0x90
+ 0x00, 0x28, 0x44, 0x44, 0xa0, 0x88, 0x5a, 0x01,
+ 0x08, 0x08, 0x08, 0x08, 0x1a, 0x1a, 0x1a, 0x1a, // 0xa0
+ 0x00, 0x00, 0x00, 0xf0, 0xf0, 0xf0, 0xf0, 0x44,
+ 0x44, 0x0a, 0x00, 0xff, 0xef, 0xef, 0xef, 0xef, // 0xb0
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0xc0
+ 0x00, 0x00, 0x55, 0x00, 0xb1, 0xe4, 0x40, 0x00,
+ 0x77, 0x77, 0x01, 0x13, 0x57, 0x9b, 0xdf, 0x20, // 0xd0
+ 0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81,
+ 0x10, 0xe0, 0xbb, 0xbb, 0x00, 0x11, 0x00, 0x00, // 0xe0
+ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00,
+ 0x83, 0xb5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, // 0xf0
+ 0x64, 0x11, 0x40, 0xaf, 0xff, 0x00, 0x00, 0x00,
+};
+
+static u8 tbl_tw2865_ntsc_template[] = {
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, // 0x00
+ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, // 0x10
+ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, // 0x20
+ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0xf0, 0x70, 0x48, 0x80, 0x80, 0x00, 0x02, // 0x30
+ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x00, 0x90, 0x68, 0x00, 0x38, 0x80, 0x80, // 0x40
+ 0x80, 0x80, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x50
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x60
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x43,
+ 0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, // 0x70
+ 0xE9, 0x03, 0xD9, 0x15, 0x15, 0xE4, 0xA3, 0x80,
+ 0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, // 0x80
+ 0x22, 0x01, 0xD8, 0xBC, 0xB8, 0x44, 0x38, 0x00,
+ 0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, // 0x90
+ 0x00, 0x28, 0x44, 0x44, 0xA0, 0x90, 0x52, 0x13,
+ 0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1B, 0x1A, // 0xa0
+ 0x00, 0x00, 0x00, 0xF0, 0xF0, 0xF0, 0xF0, 0x44,
+ 0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, // 0xb0
+ 0xFF, 0xE7, 0xE9, 0xE9, 0xEB, 0xFF, 0xD6, 0xD8,
+ 0xD8, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0xc0
+ 0x00, 0x00, 0x55, 0x00, 0xE4, 0x39, 0x00, 0x80,
+ 0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, // 0xd0
+ 0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81,
+ 0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, // 0xe0
+ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00,
+ 0x83, 0xB5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, // 0xf0
+ 0x64, 0x51, 0x40, 0xaf, 0xFF, 0xF0, 0x00, 0xC0,
+};
+
+static u8 tbl_tw2865_pal_template[] = {
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, // 0x00
+ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f,
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, // 0x10
+ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f,
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, // 0x20
+ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f,
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, // 0x30
+ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f,
+ 0x00, 0x94, 0x90, 0x48, 0x00, 0x38, 0x7F, 0x80, // 0x40
+ 0x80, 0x80, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x50
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x60
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x43,
+ 0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, // 0x70
+ 0xEA, 0x03, 0xD9, 0x15, 0x15, 0xE4, 0xA3, 0x80,
+ 0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, // 0x80
+ 0x22, 0x01, 0xD8, 0xBC, 0xB8, 0x44, 0x38, 0x00,
+ 0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, // 0x90
+ 0x00, 0x28, 0x44, 0x44, 0xA0, 0x90, 0x52, 0x13,
+ 0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1A, 0x1A, // 0xa0
+ 0x00, 0x00, 0x00, 0xF0, 0xF0, 0xF0, 0xF0, 0x44,
+ 0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, // 0xb0
+ 0xFF, 0xE7, 0xE9, 0xE9, 0xE9, 0xFF, 0xD7, 0xD8,
+ 0xD9, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0xc0
+ 0x00, 0x00, 0x55, 0x00, 0xE4, 0x39, 0x00, 0x80,
+ 0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, // 0xd0
+ 0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81,
+ 0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, // 0xe0
+ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00,
+ 0x83, 0xB5, 0x09, 0x00, 0xA0, 0x00, 0x01, 0x20, // 0xf0
+ 0x64, 0x51, 0x40, 0xaf, 0xFF, 0xF0, 0x00, 0xC0,
+};
+
+#define is_tw286x(__solo, __id) (!(__solo->tw2815 & (1 << __id)))
+
+static u8 tw_readbyte(struct solo6010_dev *solo_dev, int chip_id, u8 tw6x_off,
+ u8 tw_off)
+{
+ if (is_tw286x(solo_dev, chip_id))
+ return solo_i2c_readbyte(solo_dev, SOLO_I2C_TW,
+ TW_CHIP_OFFSET_ADDR(chip_id),
+ tw6x_off);
+ else
+ return solo_i2c_readbyte(solo_dev, SOLO_I2C_TW,
+ TW_CHIP_OFFSET_ADDR(chip_id),
+ tw_off);
+}
+
+static void tw_writebyte(struct solo6010_dev *solo_dev, int chip_id,
+ u8 tw6x_off, u8 tw_off, u8 val)
+{
+ if (is_tw286x(solo_dev, chip_id))
+ solo_i2c_writebyte(solo_dev, SOLO_I2C_TW,
+ TW_CHIP_OFFSET_ADDR(chip_id),
+ tw6x_off, val);
+ else
+ solo_i2c_writebyte(solo_dev, SOLO_I2C_TW,
+ TW_CHIP_OFFSET_ADDR(chip_id),
+ tw_off, val);
+}
+
+static void tw_write_and_verify(struct solo6010_dev *solo_dev, u8 addr, u8 off,
+ u8 val)
+{
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ u8 rval = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, addr, off);
+ if (rval == val)
+ return;
+
+ solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, addr, off, val);
+ msleep_interruptible(1);
+ }
+
+// printk("solo6010/tw28: Error writing register: %02x->%02x [%02x]\n",
+// addr, off, val);
+}
+
+static int tw2865_setup(struct solo6010_dev *solo_dev, u8 dev_addr)
+{
+ u8 tbl_tw2865_common[256];
+ int i;
+
+ if (solo_dev->video_type == SOLO_VO_FMT_TYPE_PAL)
+ memcpy(tbl_tw2865_common, tbl_tw2865_pal_template,
+ sizeof(tbl_tw2865_common));
+ else
+ memcpy(tbl_tw2865_common, tbl_tw2865_ntsc_template,
+ sizeof(tbl_tw2865_common));
+
+ /* ALINK Mode */
+ if (solo_dev->nr_chans == 4) {
+ tbl_tw2865_common[0xd2] = 0x01;
+ tbl_tw2865_common[0xcf] = 0x00;
+ } else if (solo_dev->nr_chans == 8) {
+ tbl_tw2865_common[0xd2] = 0x02;
+ if (dev_addr == TW_CHIP_OFFSET_ADDR(1))
+ tbl_tw2865_common[0xcf] = 0x80;
+ } else if (solo_dev->nr_chans == 16) {
+ tbl_tw2865_common[0xd2] = 0x03;
+ if (dev_addr == TW_CHIP_OFFSET_ADDR(1))
+ tbl_tw2865_common[0xcf] = 0x83;
+ else if (dev_addr == TW_CHIP_OFFSET_ADDR(2))
+ tbl_tw2865_common[0xcf] = 0x83;
+ else if (dev_addr == TW_CHIP_OFFSET_ADDR(3))
+ tbl_tw2865_common[0xcf] = 0x80;
+ }
+
+ for (i = 0; i < 0xff; i++) {
+ /* Skip read only registers */
+ if (i >= 0xb8 && i <= 0xc1 )
+ continue;
+ if ((i & ~0x30) == 0x00 ||
+ (i & ~0x30) == 0x0c ||
+ (i & ~0x30) == 0x0d)
+ continue;
+ if (i >= 0xc4 && i <= 0xc7)
+ continue;
+ if (i == 0xfd)
+ continue;
+
+ tw_write_and_verify(solo_dev, dev_addr, i,
+ tbl_tw2865_common[i]);
+ }
+
+ return 0;
+}
+
+static int tw2864_setup(struct solo6010_dev *solo_dev, u8 dev_addr)
+{
+ u8 tbl_tw2864_common[sizeof(tbl_tw2864_template)];
+ int i;
+
+ memcpy(tbl_tw2864_common, tbl_tw2864_template,
+ sizeof(tbl_tw2864_common));
+
+ if (solo_dev->tw2865 == 0) {
+ /* IRQ Mode */
+ if (solo_dev->nr_chans == 4) {
+ tbl_tw2864_common[0xd2] = 0x01;
+ tbl_tw2864_common[0xcf] = 0x00;
+ } else if (solo_dev->nr_chans == 8) {
+ tbl_tw2864_common[0xd2] = 0x02;
+ if (dev_addr == TW_CHIP_OFFSET_ADDR(0))
+ tbl_tw2864_common[0xcf] = 0x43;
+ else if (dev_addr == TW_CHIP_OFFSET_ADDR(1))
+ tbl_tw2864_common[0xcf] = 0x40;
+ } else if (solo_dev->nr_chans == 16) {
+ tbl_tw2864_common[0xd2] = 0x03;
+ if (dev_addr == TW_CHIP_OFFSET_ADDR(0))
+ tbl_tw2864_common[0xcf] = 0x43;
+ else if (dev_addr == TW_CHIP_OFFSET_ADDR(1))
+ tbl_tw2864_common[0xcf] = 0x43;
+ else if (dev_addr == TW_CHIP_OFFSET_ADDR(2))
+ tbl_tw2864_common[0xcf] = 0x43;
+ else if (dev_addr == TW_CHIP_OFFSET_ADDR(3))
+ tbl_tw2864_common[0xcf] = 0x40;
+ }
+ } else {
+ /* ALINK Mode. Assumes that the first tw28xx is a
+ * 2865 and these are in cascade. */
+ for (i = 0; i <= 4; i++)
+ tbl_tw2864_common[0x08 | i << 4] = 0x12;
+
+ if (solo_dev->nr_chans == 8) {
+ tbl_tw2864_common[0xd2] = 0x02;
+ if (dev_addr == TW_CHIP_OFFSET_ADDR(1))
+ tbl_tw2864_common[0xcf] = 0x80;
+ } else if (solo_dev->nr_chans == 16) {
+ tbl_tw2864_common[0xd2] = 0x03;
+ if (dev_addr == TW_CHIP_OFFSET_ADDR(1))
+ tbl_tw2864_common[0xcf] = 0x83;
+ else if (dev_addr == TW_CHIP_OFFSET_ADDR(2))
+ tbl_tw2864_common[0xcf] = 0x83;
+ else if (dev_addr == TW_CHIP_OFFSET_ADDR(3))
+ tbl_tw2864_common[0xcf] = 0x80;
+ }
+ }
+
+ /* NTSC or PAL */
+ if (solo_dev->video_type == SOLO_VO_FMT_TYPE_PAL) {
+ for (i = 0; i < 4; i++) {
+ tbl_tw2864_common[0x07 | (i << 4)] |= 0x10;
+ tbl_tw2864_common[0x08 | (i << 4)] |= 0x06;
+ tbl_tw2864_common[0x0a | (i << 4)] |= 0x08;
+ tbl_tw2864_common[0x0b | (i << 4)] |= 0x13;
+ tbl_tw2864_common[0x0e | (i << 4)] |= 0x01;
+ }
+ tbl_tw2864_common[0x9d] = 0x90;
+ tbl_tw2864_common[0xf3] = 0x00;
+ tbl_tw2864_common[0xf4] = 0xa0;
+ }
+
+ for (i = 0; i < 0xff; i++) {
+ /* Skip read only registers */
+ if (i >= 0xb8 && i <= 0xc1 )
+ continue;
+ if ((i & ~0x30) == 0x00 ||
+ (i & ~0x30) == 0x0c ||
+ (i & ~0x30) == 0x0d)
+ continue;
+ if (i == 0x74 || i == 0x77 || i == 0x78 ||
+ i == 0x79 || i == 0x7a)
+ continue;
+ if (i == 0xfd)
+ continue;
+
+ tw_write_and_verify(solo_dev, dev_addr, i,
+ tbl_tw2864_common[i]);
+ }
+
+ return 0;
+}
+
+static int tw2815_setup(struct solo6010_dev *solo_dev, u8 dev_addr)
+{
+ u8 tbl_ntsc_tw2815_common[] = {
+ 0x00, 0xc8, 0x20, 0xd0, 0x06, 0xf0, 0x08, 0x80,
+ 0x80, 0x80, 0x80, 0x02, 0x06, 0x00, 0x11,
+ };
+
+ u8 tbl_pal_tw2815_common[] = {
+ 0x00, 0x88, 0x20, 0xd0, 0x05, 0x20, 0x28, 0x80,
+ 0x80, 0x80, 0x80, 0x82, 0x06, 0x00, 0x11,
+ };
+
+ u8 tbl_tw2815_sfr[] = {
+ 0x00, 0x00, 0x00, 0xc0, 0x45, 0xa0, 0xd0, 0x2f, // 0x00
+ 0x64, 0x80, 0x80, 0x82, 0x82, 0x00, 0x00, 0x00,
+ 0x00, 0x0f, 0x05, 0x00, 0x00, 0x80, 0x06, 0x00, // 0x10
+ 0x00, 0x00, 0x00, 0xff, 0x8f, 0x00, 0x00, 0x00,
+ 0x88, 0x88, 0xc0, 0x00, 0x20, 0x64, 0xa8, 0xec, // 0x20
+ 0x31, 0x75, 0xb9, 0xfd, 0x00, 0x00, 0x88, 0x88,
+ 0x88, 0x11, 0x00, 0x88, 0x88, 0x00, // 0x30
+ };
+ u8 *tbl_tw2815_common;
+ int i;
+ int ch;
+
+ tbl_ntsc_tw2815_common[0x06] = 0;
+
+ /* Horizontal Delay Control */
+ tbl_ntsc_tw2815_common[0x02] = DEFAULT_HDELAY_NTSC & 0xff;
+ tbl_ntsc_tw2815_common[0x06] |= 0x03 & (DEFAULT_HDELAY_NTSC >> 8);
+
+ /* Horizontal Active Control */
+ tbl_ntsc_tw2815_common[0x03] = DEFAULT_HACTIVE_NTSC & 0xff;
+ tbl_ntsc_tw2815_common[0x06] |=
+ ((0x03 & (DEFAULT_HACTIVE_NTSC >> 8)) << 2);
+
+ /* Vertical Delay Control */
+ tbl_ntsc_tw2815_common[0x04] = DEFAULT_VDELAY_NTSC & 0xff;
+ tbl_ntsc_tw2815_common[0x06] |=
+ ((0x01 & (DEFAULT_VDELAY_NTSC >> 8)) << 4);
+
+ /* Vertical Active Control */
+ tbl_ntsc_tw2815_common[0x05] = DEFAULT_VACTIVE_NTSC & 0xff;
+ tbl_ntsc_tw2815_common[0x06] |=
+ ((0x01 & (DEFAULT_VACTIVE_NTSC >> 8)) << 5);
+
+ tbl_pal_tw2815_common[0x06] = 0;
+
+ /* Horizontal Delay Control */
+ tbl_pal_tw2815_common[0x02] = DEFAULT_HDELAY_PAL & 0xff;
+ tbl_pal_tw2815_common[0x06] |= 0x03 & (DEFAULT_HDELAY_PAL >> 8);
+
+ /* Horizontal Active Control */
+ tbl_pal_tw2815_common[0x03] = DEFAULT_HACTIVE_PAL & 0xff;
+ tbl_pal_tw2815_common[0x06] |=
+ ((0x03 & (DEFAULT_HACTIVE_PAL >> 8)) << 2);
+
+ /* Vertical Delay Control */
+ tbl_pal_tw2815_common[0x04] = DEFAULT_VDELAY_PAL & 0xff;
+ tbl_pal_tw2815_common[0x06] |=
+ ((0x01 & (DEFAULT_VDELAY_PAL >> 8)) << 4);
+
+ /* Vertical Active Control */
+ tbl_pal_tw2815_common[0x05] = DEFAULT_VACTIVE_PAL & 0xff;
+ tbl_pal_tw2815_common[0x06] |=
+ ((0x01 & (DEFAULT_VACTIVE_PAL >> 8)) << 5);
+
+ tbl_tw2815_common =
+ (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) ?
+ tbl_ntsc_tw2815_common : tbl_pal_tw2815_common;
+
+ /* Dual ITU-R BT.656 format */
+ tbl_tw2815_common[0x0d] |= 0x04;
+
+ /* Audio configuration */
+ tbl_tw2815_sfr[0x62 - 0x40] &= ~(3 << 6);
+
+ if (solo_dev->nr_chans == 4) {
+ tbl_tw2815_sfr[0x63 - 0x40] |= 1;
+ tbl_tw2815_sfr[0x62 - 0x40] |= 3 << 6;
+ } else if (solo_dev->nr_chans == 8) {
+ tbl_tw2815_sfr[0x63 - 0x40] |= 2;
+ if (dev_addr == TW_CHIP_OFFSET_ADDR(0))
+ tbl_tw2815_sfr[0x62 - 0x40] |= 1 << 6;
+ else if (dev_addr == TW_CHIP_OFFSET_ADDR(1))
+ tbl_tw2815_sfr[0x62 - 0x40] |= 2 << 6;
+ } else if (solo_dev->nr_chans == 16) {
+ tbl_tw2815_sfr[0x63 - 0x40] |= 3;
+ if (dev_addr == TW_CHIP_OFFSET_ADDR(0))
+ tbl_tw2815_sfr[0x62 - 0x40] |= 1 << 6;
+ else if (dev_addr == TW_CHIP_OFFSET_ADDR(1))
+ tbl_tw2815_sfr[0x62 - 0x40] |= 0 << 6;
+ else if (dev_addr == TW_CHIP_OFFSET_ADDR(2))
+ tbl_tw2815_sfr[0x62 - 0x40] |= 0 << 6;
+ else if (dev_addr == TW_CHIP_OFFSET_ADDR(3))
+ tbl_tw2815_sfr[0x62 - 0x40] |= 2 << 6;
+ }
+
+ /* Output mode of R_ADATM pin (0 mixing, 1 record) */
+ /* tbl_tw2815_sfr[0x63 - 0x40] |= 0 << 2; */
+
+ /* 8KHz, used to be 16KHz, but changed for remote client compat */
+ tbl_tw2815_sfr[0x62 - 0x40] |= 0 << 2;
+ tbl_tw2815_sfr[0x6c - 0x40] |= 0 << 2;
+
+ /* Playback of right channel */
+ tbl_tw2815_sfr[0x6c - 0x40] |= 1 << 5;
+
+ /* Reserved value (XXX ??) */
+ tbl_tw2815_sfr[0x5c - 0x40] |= 1 << 5;
+
+ /* Analog output gain and mix ratio playback on full */
+ tbl_tw2815_sfr[0x70 - 0x40] |= 0xff;
+ /* Select playback audio and mute all except */
+ tbl_tw2815_sfr[0x71 - 0x40] |= 0x10;
+ tbl_tw2815_sfr[0x6d - 0x40] |= 0x0f;
+
+ /* End of audio configuration */
+
+ for (ch = 0; ch < 4; ch++) {
+ tbl_tw2815_common[0x0d] &= ~3;
+ switch (ch) {
+ case 0:
+ tbl_tw2815_common[0x0d] |= 0x21;
+ break;
+ case 1:
+ tbl_tw2815_common[0x0d] |= 0x20;
+ break;
+ case 2:
+ tbl_tw2815_common[0x0d] |= 0x23;
+ break;
+ case 3:
+ tbl_tw2815_common[0x0d] |= 0x22;
+ break;
+ }
+
+ for (i = 0; i < 0x0f; i++) {
+ if (i == 0x00)
+ continue; // read-only
+ solo_i2c_writebyte(solo_dev, SOLO_I2C_TW,
+ dev_addr, (ch * 0x10) + i,
+ tbl_tw2815_common[i]);
+ }
+ }
+
+ for (i = 0x40; i < 0x76; i++) {
+ /* Skip read-only and nop registers */
+ if (i == 0x40 || i == 0x59 || i == 0x5a ||
+ i == 0x5d || i == 0x5e || i == 0x5f)
+ continue;
+
+ solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, dev_addr, i,
+ tbl_tw2815_sfr[i - 0x40]);
+ }
+
+ return 0;
+}
+
+#define FIRST_ACTIVE_LINE 0x0008
+#define LAST_ACTIVE_LINE 0x0102
+
+static void saa7128_setup(struct solo6010_dev *solo_dev)
+{
+ int i;
+ unsigned char regs[128] = {
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1C, 0x2B, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00,
+ 0x59, 0x1d, 0x75, 0x3f, 0x06, 0x3f, 0x00, 0x00,
+ 0x1c, 0x33, 0x00, 0x3f, 0x00, 0x00, 0x3f, 0x00,
+ 0x1a, 0x1a, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x68, 0x10, 0x97, 0x4c, 0x18,
+ 0x9b, 0x93, 0x9f, 0xff, 0x7c, 0x34, 0x3f, 0x3f,
+ 0x3f, 0x83, 0x83, 0x80, 0x0d, 0x0f, 0xc3, 0x06,
+ 0x02, 0x80, 0x71, 0x77, 0xa7, 0x67, 0x66, 0x2e,
+ 0x7b, 0x11, 0x4f, 0x1f, 0x7c, 0xf0, 0x21, 0x77,
+ 0x41, 0x88, 0x41, 0x12, 0xed, 0x10, 0x10, 0x00,
+ 0x41, 0xc3, 0x00, 0x3e, 0xb8, 0x02, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0xff, 0x80, 0x00, 0xff, 0xff,
+ };
+
+ regs[0x7A] = FIRST_ACTIVE_LINE & 0xff;
+ regs[0x7B] = LAST_ACTIVE_LINE & 0xff;
+ regs[0x7C] = ((1 << 7) |
+ (((LAST_ACTIVE_LINE >> 8) & 1) << 6) |
+ (((FIRST_ACTIVE_LINE >> 8) & 1) << 4));
+
+ /* PAL: XXX: We could do a second set of regs to avoid this */
+ if (solo_dev->video_type != SOLO_VO_FMT_TYPE_NTSC) {
+ regs[0x28] = 0xE1;
+
+ regs[0x5A] = 0x0F;
+ regs[0x61] = 0x02;
+ regs[0x62] = 0x35;
+ regs[0x63] = 0xCB;
+ regs[0x64] = 0x8A;
+ regs[0x65] = 0x09;
+ regs[0x66] = 0x2A;
+
+ regs[0x6C] = 0xf1;
+ regs[0x6E] = 0x20;
+
+ regs[0x7A] = 0x06 + 12;
+ regs[0x7b] = 0x24 + 12;
+ regs[0x7c] |= 1 << 6;
+ }
+
+ /* First 0x25 bytes are read-only? */
+ for (i = 0x26; i < 128; i++) {
+ if (i == 0x60 || i == 0x7D)
+ continue;
+ solo_i2c_writebyte(solo_dev, SOLO_I2C_SAA, 0x46, i, regs[i]);
+ }
+
+ return;
+}
+
+int solo_tw28_init(struct solo6010_dev *solo_dev)
+{
+ int i;
+ u8 value;
+
+ /* Detect techwell chip type */
+ for (i = 0; i < TW_NUM_CHIP; i++) {
+ value = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW,
+ TW_CHIP_OFFSET_ADDR(i), 0xFF);
+
+ switch (value >> 3) {
+ case 0x18:
+ solo_dev->tw2865 |= 1 << i;
+ solo_dev->tw28_cnt++;
+ break;
+ case 0x0c:
+ solo_dev->tw2864 |= 1 << i;
+ solo_dev->tw28_cnt++;
+ break;
+ default:
+ value = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW,
+ TW_CHIP_OFFSET_ADDR(i), 0x59);
+ if ((value >> 3) == 0x04) {
+ solo_dev->tw2815 |= 1 << i;
+ solo_dev->tw28_cnt++;
+ }
+ }
+ }
+
+ if (!solo_dev->tw28_cnt)
+ return -EINVAL;
+
+ saa7128_setup(solo_dev);
+
+ for (i = 0; i < solo_dev->tw28_cnt; i++) {
+ if ((solo_dev->tw2865 & (1 << i)))
+ tw2865_setup(solo_dev, TW_CHIP_OFFSET_ADDR(i));
+ else if ((solo_dev->tw2864 & (1 << i)))
+ tw2864_setup(solo_dev, TW_CHIP_OFFSET_ADDR(i));
+ else
+ tw2815_setup(solo_dev, TW_CHIP_OFFSET_ADDR(i));
+ }
+
+ dev_info(&solo_dev->pdev->dev, "Initialized %d tw28xx chip%s:",
+ solo_dev->tw28_cnt, solo_dev->tw28_cnt == 1 ? "" : "s");
+
+ if (solo_dev->tw2865)
+ printk(" tw2865[%d]", hweight32(solo_dev->tw2865));
+ if (solo_dev->tw2864)
+ printk(" tw2864[%d]", hweight32(solo_dev->tw2864));
+ if (solo_dev->tw2815)
+ printk(" tw2815[%d]", hweight32(solo_dev->tw2815));
+ printk("\n");
+
+ return 0;
+}
+
+/*
+ * We accessed the video status signal in the Techwell chip through
+ * iic/i2c because the video status reported by register REG_VI_STATUS1
+ * (address 0x012C) of the SOLO6010 chip doesn't give the correct video
+ * status signal values.
+ */
+int tw28_get_video_status(struct solo6010_dev *solo_dev, u8 ch)
+{
+ u8 val, chip_num;
+
+ /* Get the right chip and on-chip channel */
+ chip_num = ch / 4;
+ ch %= 4;
+
+ val = tw_readbyte(solo_dev, chip_num, TW286X_AV_STAT_ADDR,
+ TW_AV_STAT_ADDR) & 0x0f;
+
+ return val & (1 << ch) ? 1 : 0;
+}
+
+#if 0
+/* Status of audio from up to 4 techwell chips are combined into 1 variable.
+ * See techwell datasheet for details. */
+u16 tw28_get_audio_status(struct solo6010_dev *solo_dev)
+{
+ u8 val;
+ u16 status = 0;
+ int i;
+
+ for (i = 0; i < solo_dev->tw28_cnt; i++) {
+ val = (tw_readbyte(solo_dev, i, TW286X_AV_STAT_ADDR,
+ TW_AV_STAT_ADDR) & 0xf0) >> 4;
+ status |= val << (i * 4);
+ }
+
+ return status;
+}
+#endif
+
+int tw28_set_ctrl_val(struct solo6010_dev *solo_dev, u32 ctrl, u8 ch,
+ s32 val)
+{
+ char sval;
+ u8 chip_num;
+
+ /* Get the right chip and on-chip channel */
+ chip_num = ch / 4;
+ ch %= 4;
+
+ if (val > 255 || val < 0)
+ return -ERANGE;
+
+ switch (ctrl) {
+ case V4L2_CID_SHARPNESS:
+ /* Only 286x has sharpness */
+ if (val > 0x0f || val < 0)
+ return -ERANGE;
+ if (is_tw286x(solo_dev, chip_num)) {
+ u8 v = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW,
+ TW_CHIP_OFFSET_ADDR(chip_num),
+ TW286x_SHARPNESS(chip_num));
+ v &= 0xf0;
+ v |= val;
+ solo_i2c_writebyte(solo_dev, SOLO_I2C_TW,
+ TW_CHIP_OFFSET_ADDR(chip_num),
+ TW286x_SHARPNESS(chip_num), v);
+ } else if (val != 0)
+ return -ERANGE;
+ break;
+
+ case V4L2_CID_HUE:
+ if (is_tw286x(solo_dev, chip_num))
+ sval = val - 128;
+ else
+ sval = (char)val;
+ tw_writebyte(solo_dev, chip_num, TW286x_HUE_ADDR(ch),
+ TW_HUE_ADDR(ch), sval);
+
+ break;
+
+ case V4L2_CID_SATURATION:
+ if (is_tw286x(solo_dev, chip_num)) {
+ solo_i2c_writebyte(solo_dev, SOLO_I2C_TW,
+ TW_CHIP_OFFSET_ADDR(chip_num),
+ TW286x_SATURATIONU_ADDR(ch), val);
+ }
+ tw_writebyte(solo_dev, chip_num, TW286x_SATURATIONV_ADDR(ch),
+ TW_SATURATION_ADDR(ch), val);
+
+ break;
+
+ case V4L2_CID_CONTRAST:
+ tw_writebyte(solo_dev, chip_num, TW286x_CONTRAST_ADDR(ch),
+ TW_CONTRAST_ADDR(ch), val);
+ break;
+
+ case V4L2_CID_BRIGHTNESS:
+ if (is_tw286x(solo_dev, chip_num))
+ sval = val - 128;
+ else
+ sval = (char)val;
+ tw_writebyte(solo_dev, chip_num, TW286x_BRIGHTNESS_ADDR(ch),
+ TW_BRIGHTNESS_ADDR(ch), sval);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int tw28_get_ctrl_val(struct solo6010_dev *solo_dev, u32 ctrl, u8 ch,
+ s32 *val)
+{
+ u8 rval, chip_num;
+
+ /* Get the right chip and on-chip channel */
+ chip_num = ch / 4;
+ ch %= 4;
+
+ switch (ctrl) {
+ case V4L2_CID_SHARPNESS:
+ /* Only 286x has sharpness */
+ if (is_tw286x(solo_dev, chip_num)) {
+ rval = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW,
+ TW_CHIP_OFFSET_ADDR(chip_num),
+ TW286x_SHARPNESS(chip_num));
+ *val = rval & 0x0f;
+ } else
+ *val = 0;
+ break;
+ case V4L2_CID_HUE:
+ rval = tw_readbyte(solo_dev, chip_num, TW286x_HUE_ADDR(ch),
+ TW_HUE_ADDR(ch));
+ if (is_tw286x(solo_dev, chip_num))
+ *val = (s32)((char)rval) + 128;
+ else
+ *val = rval;
+ break;
+ case V4L2_CID_SATURATION:
+ *val = tw_readbyte(solo_dev, chip_num,
+ TW286x_SATURATIONU_ADDR(ch),
+ TW_SATURATION_ADDR(ch));
+ break;
+ case V4L2_CID_CONTRAST:
+ *val = tw_readbyte(solo_dev, chip_num,
+ TW286x_CONTRAST_ADDR(ch),
+ TW_CONTRAST_ADDR(ch));
+ break;
+ case V4L2_CID_BRIGHTNESS:
+ rval = tw_readbyte(solo_dev, chip_num,
+ TW286x_BRIGHTNESS_ADDR(ch),
+ TW_BRIGHTNESS_ADDR(ch));
+ if (is_tw286x(solo_dev, chip_num))
+ *val = (s32)((char)rval) + 128;
+ else
+ *val = rval;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#if 0
+/*
+ * For audio output volume, the output channel is only 1. In this case we
+ * don't need to offset TW_CHIP_OFFSET_ADDR. The TW_CHIP_OFFSET_ADDR used
+ * is the base address of the techwell chip.
+ */
+void tw2815_Set_AudioOutVol(struct solo6010_dev *solo_dev, unsigned int u_val)
+{
+ unsigned int val;
+ unsigned int chip_num;
+
+ chip_num = (solo_dev->nr_chans - 1) / 4;
+
+ val = tw_readbyte(solo_dev, chip_num, TW286x_AUDIO_OUTPUT_VOL_ADDR,
+ TW_AUDIO_OUTPUT_VOL_ADDR);
+
+ u_val = (val & 0x0f) | (u_val << 4);
+
+ tw_writebyte(solo_dev, chip_num, TW286x_AUDIO_OUTPUT_VOL_ADDR,
+ TW_AUDIO_OUTPUT_VOL_ADDR, u_val);
+}
+#endif
+
+u8 tw28_get_audio_gain(struct solo6010_dev *solo_dev, u8 ch)
+{
+ u8 val;
+ u8 chip_num;
+
+ /* Get the right chip and on-chip channel */
+ chip_num = ch / 4;
+ ch %= 4;
+
+ val = tw_readbyte(solo_dev, chip_num,
+ TW286x_AUDIO_INPUT_GAIN_ADDR(ch),
+ TW_AUDIO_INPUT_GAIN_ADDR(ch));
+
+ return (ch % 2) ? (val >> 4) : (val & 0x0f);
+}
+
+void tw28_set_audio_gain(struct solo6010_dev *solo_dev, u8 ch, u8 val)
+{
+ u8 old_val;
+ u8 chip_num;
+
+ /* Get the right chip and on-chip channel */
+ chip_num = ch / 4;
+ ch %= 4;
+
+ old_val = tw_readbyte(solo_dev, chip_num,
+ TW286x_AUDIO_INPUT_GAIN_ADDR(ch),
+ TW_AUDIO_INPUT_GAIN_ADDR(ch));
+
+ val = (old_val & ((ch % 2) ? 0x0f : 0xf0)) |
+ ((ch % 2) ? (val << 4) : val);
+
+ tw_writebyte(solo_dev, chip_num, TW286x_AUDIO_INPUT_GAIN_ADDR(ch),
+ TW_AUDIO_INPUT_GAIN_ADDR(ch), val);
+}
diff --git a/drivers/staging/solo6x10/solo6010-tw28.h b/drivers/staging/solo6x10/solo6010-tw28.h
new file mode 100644
index 00000000000..a7eecfa1a81
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-tw28.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __SOLO6010_TW28_H
+#define __SOLO6010_TW28_H
+
+#include "solo6010.h"
+
+#define TW_NUM_CHIP 4
+#define TW_BASE_ADDR 0x28
+#define TW_CHIP_OFFSET_ADDR(n) (TW_BASE_ADDR + (n))
+
+/* tw2815 */
+#define TW_AV_STAT_ADDR 0x5a
+#define TW_HUE_ADDR(n) (0x07 | ((n) << 4))
+#define TW_SATURATION_ADDR(n) (0x08 | ((n) << 4))
+#define TW_CONTRAST_ADDR(n) (0x09 | ((n) << 4))
+#define TW_BRIGHTNESS_ADDR(n) (0x0a | ((n) << 4))
+#define TW_AUDIO_OUTPUT_VOL_ADDR 0x70
+#define TW_AUDIO_INPUT_GAIN_ADDR(n) (0x60 + ((n > 1) ? 1 : 0))
+
+/* tw286x */
+#define TW286X_AV_STAT_ADDR 0xfd
+#define TW286x_HUE_ADDR(n) (0x06 | ((n) << 4))
+#define TW286x_SATURATIONU_ADDR(n) (0x04 | ((n) << 4))
+#define TW286x_SATURATIONV_ADDR(n) (0x05 | ((n) << 4))
+#define TW286x_CONTRAST_ADDR(n) (0x02 | ((n) << 4))
+#define TW286x_BRIGHTNESS_ADDR(n) (0x01 | ((n) << 4))
+#define TW286x_SHARPNESS(n) (0x03 | ((n) << 4))
+#define TW286x_AUDIO_OUTPUT_VOL_ADDR 0xdf
+#define TW286x_AUDIO_INPUT_GAIN_ADDR(n) (0xD0 + ((n > 1) ? 1 : 0))
+
+int solo_tw28_init(struct solo6010_dev *solo_dev);
+
+int tw28_set_ctrl_val(struct solo6010_dev *solo_dev, u32 ctrl, u8 ch,
+ s32 val);
+int tw28_get_ctrl_val(struct solo6010_dev *solo_dev, u32 ctrl, u8 ch,
+ s32 *val);
+
+u8 tw28_get_audio_gain(struct solo6010_dev *solo_dev, u8 ch);
+void tw28_set_audio_gain(struct solo6010_dev *solo_dev, u8 ch, u8 val);
+int tw28_get_video_status(struct solo6010_dev *solo_dev, u8 ch);
+
+#if 0
+unsigned int tw2815_get_audio_status(struct SOLO6010 *solo6010);
+void tw2815_Set_AudioOutVol(struct SOLO6010 *solo6010, unsigned int u_val);
+#endif
+
+#endif /* __SOLO6010_TW28_H */
diff --git a/drivers/staging/solo6x10/solo6010-v4l2-enc.c b/drivers/staging/solo6x10/solo6010-v4l2-enc.c
new file mode 100644
index 00000000000..f114b4b7d8e
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-v4l2-enc.c
@@ -0,0 +1,1564 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-common.h>
+#include <media/videobuf-dma-contig.h>
+
+#include "solo6010.h"
+#include "solo6010-tw28.h"
+#include "solo6010-jpeg.h"
+
+#define MIN_VID_BUFFERS 4
+#define FRAME_BUF_SIZE (128 * 1024)
+#define MP4_QS 16
+
+static int solo_enc_thread(void *data);
+
+extern unsigned video_nr;
+
+struct solo_enc_fh {
+ struct solo_enc_dev *enc;
+ u32 fmt;
+ u16 rd_idx;
+ u8 enc_on;
+ enum solo_enc_types type;
+ struct videobuf_queue vidq;
+ struct list_head vidq_active;
+ struct task_struct *kthread;
+};
+
+static unsigned char vid_vop_header[] = {
+ 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20,
+ 0x02, 0x48, 0x05, 0xc0, 0x00, 0x40, 0x00, 0x40,
+ 0x00, 0x40, 0x00, 0x80, 0x00, 0x97, 0x53, 0x04,
+ 0x1f, 0x4c, 0x58, 0x10, 0x78, 0x51, 0x18, 0x3e,
+};
+
+/*
+ * Things we can change around:
+ *
+ * byte 10, 4-bits 01111000 aspect
+ * bytes 21,22,23 16-bits 000x1111 11111111 1111x000 fps/res
+ * bytes 23,24,25 15-bits 00000n11 11111111 11111x00 interval
+ * bytes 25,26,27 13-bits 00000x11 11111111 111x0000 width
+ * bytes 27,28,29 13-bits 000x1111 11111111 1x000000 height
+ * byte 29 1-bit 0x100000 interlace
+ */
+
+/* For aspect */
+#define XVID_PAR_43_PAL 2
+#define XVID_PAR_43_NTSC 3
+
+static const u32 solo_user_ctrls[] = {
+ V4L2_CID_BRIGHTNESS,
+ V4L2_CID_CONTRAST,
+ V4L2_CID_SATURATION,
+ V4L2_CID_HUE,
+ V4L2_CID_SHARPNESS,
+ 0
+};
+
+static const u32 solo_mpeg_ctrls[] = {
+ V4L2_CID_MPEG_VIDEO_ENCODING,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 0
+};
+
+static const u32 solo_private_ctrls[] = {
+ V4L2_CID_MOTION_ENABLE,
+ V4L2_CID_MOTION_THRESHOLD,
+ 0
+};
+
+static const u32 solo_fmtx_ctrls[] = {
+ V4L2_CID_RDS_TX_RADIO_TEXT,
+ 0
+};
+
+static const u32 *solo_ctrl_classes[] = {
+ solo_user_ctrls,
+ solo_mpeg_ctrls,
+ solo_fmtx_ctrls,
+ solo_private_ctrls,
+ NULL
+};
+
+struct vop_header {
+ /* VD_IDX0 */
+ u32 size:20, sync_start:1, page_stop:1, vop_type:2, channel:4,
+ nop0:1, source_fl:1, interlace:1, progressive:1;
+
+ /* VD_IDX1 */
+ u32 vsize:8, hsize:8, frame_interop:1, nop1:7, win_id:4, scale:4;
+
+ /* VD_IDX2 */
+ u32 base_addr:16, nop2:15, hoff:1;
+
+ /* VD_IDX3 - User set macros */
+ u32 sy:12, sx:12, nop3:1, hzoom:1, read_interop:1, write_interlace:1,
+ scale_mode:4;
+
+ /* VD_IDX4 - User set macros continued */
+ u32 write_page:8, nop4:24;
+
+ /* VD_IDX5 */
+ u32 next_code_addr;
+
+ u32 end_nops[10];
+} __attribute__((packed));
+
+static int solo_is_motion_on(struct solo_enc_dev *solo_enc)
+{
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+ u8 ch = solo_enc->ch;
+
+ if (solo_dev->motion_mask & (1 << ch))
+ return 1;
+ return 0;
+}
+
+static void solo_motion_toggle(struct solo_enc_dev *solo_enc, int on)
+{
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+ u8 ch = solo_enc->ch;
+
+ spin_lock(&solo_enc->lock);
+
+ if (on)
+ solo_dev->motion_mask |= (1 << ch);
+ else
+ solo_dev->motion_mask &= ~(1 << ch);
+
+ solo_reg_write(solo_dev, SOLO_VI_MOT_ADR,
+ SOLO_VI_MOTION_EN(solo_dev->motion_mask) |
+ (SOLO_MOTION_EXT_ADDR(solo_dev) >> 16));
+
+ if (solo_dev->motion_mask)
+ solo6010_irq_on(solo_dev, SOLO_IRQ_MOTION);
+ else
+ solo6010_irq_off(solo_dev, SOLO_IRQ_MOTION);
+
+ spin_unlock(&solo_enc->lock);
+}
+
+/* Should be called with solo_enc->lock held */
+static void solo_update_mode(struct solo_enc_dev *solo_enc)
+{
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+
+ assert_spin_locked(&solo_enc->lock);
+
+ solo_enc->interlaced = (solo_enc->mode & 0x08) ? 1 : 0;
+ solo_enc->bw_weight = max(solo_dev->fps / solo_enc->interval, 1);
+
+ switch (solo_enc->mode) {
+ case SOLO_ENC_MODE_CIF:
+ solo_enc->width = solo_dev->video_hsize >> 1;
+ solo_enc->height = solo_dev->video_vsize;
+ break;
+ case SOLO_ENC_MODE_D1:
+ solo_enc->width = solo_dev->video_hsize;
+ solo_enc->height = solo_dev->video_vsize << 1;
+ solo_enc->bw_weight <<= 2;
+ break;
+ default:
+ WARN(1, "mode is unknown");
+ }
+}
+
+/* Should be called with solo_enc->lock held */
+static int solo_enc_on(struct solo_enc_fh *fh)
+{
+ struct solo_enc_dev *solo_enc = fh->enc;
+ u8 ch = solo_enc->ch;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+ u8 interval;
+
+ assert_spin_locked(&solo_enc->lock);
+
+ if (fh->enc_on)
+ return 0;
+
+ solo_update_mode(solo_enc);
+
+ /* Make sure to bw check on first reader */
+ if (!atomic_read(&solo_enc->readers)) {
+ if (solo_enc->bw_weight > solo_dev->enc_bw_remain)
+ return -EBUSY;
+ else
+ solo_dev->enc_bw_remain -= solo_enc->bw_weight;
+ }
+
+ fh->kthread = kthread_run(solo_enc_thread, fh, SOLO6010_NAME "_enc");
+
+ if (IS_ERR(fh->kthread))
+ return PTR_ERR(fh->kthread);
+
+ fh->enc_on = 1;
+ fh->rd_idx = solo_enc->solo_dev->enc_wr_idx;
+
+ if (fh->type == SOLO_ENC_TYPE_EXT)
+ solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(ch), 1);
+
+ if (atomic_inc_return(&solo_enc->readers) > 1)
+ return 0;
+
+ /* Disable all encoding for this channel */
+ solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(ch), 0);
+
+ /* Common for both std and ext encoding */
+ solo_reg_write(solo_dev, SOLO_VE_CH_INTL(ch),
+ solo_enc->interlaced ? 1 : 0);
+
+ if (solo_enc->interlaced)
+ interval = solo_enc->interval - 1;
+ else
+ interval = solo_enc->interval;
+
+ /* Standard encoding only */
+ solo_reg_write(solo_dev, SOLO_VE_CH_GOP(ch), solo_enc->gop);
+ solo_reg_write(solo_dev, SOLO_VE_CH_QP(ch), solo_enc->qp);
+ solo_reg_write(solo_dev, SOLO_CAP_CH_INTV(ch), interval);
+
+ /* Extended encoding only */
+ solo_reg_write(solo_dev, SOLO_VE_CH_GOP_E(ch), solo_enc->gop);
+ solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(ch), solo_enc->qp);
+ solo_reg_write(solo_dev, SOLO_CAP_CH_INTV_E(ch), interval);
+
+ /* Enables the standard encoder */
+ solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(ch), solo_enc->mode);
+
+ /* Settle down Beavis... */
+ mdelay(10);
+
+ return 0;
+}
+
+static void solo_enc_off(struct solo_enc_fh *fh)
+{
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+
+ if (!fh->enc_on)
+ return;
+
+ if (fh->kthread) {
+ kthread_stop(fh->kthread);
+ fh->kthread = NULL;
+ }
+
+ solo_dev->enc_bw_remain += solo_enc->bw_weight;
+ fh->enc_on = 0;
+
+ if (atomic_dec_return(&solo_enc->readers) > 0)
+ return;
+
+ solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(solo_enc->ch), 0);
+ solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(solo_enc->ch), 0);
+}
+
+static void enc_reset_gop(struct solo6010_dev *solo_dev, u8 ch)
+{
+ BUG_ON(ch >= solo_dev->nr_chans);
+ solo_reg_write(solo_dev, SOLO_VE_CH_GOP(ch), 1);
+ solo_dev->v4l2_enc[ch]->reset_gop = 1;
+}
+
+static int enc_gop_reset(struct solo6010_dev *solo_dev, u8 ch, u8 vop)
+{
+ BUG_ON(ch >= solo_dev->nr_chans);
+ if (!solo_dev->v4l2_enc[ch]->reset_gop)
+ return 0;
+ if (vop)
+ return 1;
+ solo_dev->v4l2_enc[ch]->reset_gop = 0;
+ solo_reg_write(solo_dev, SOLO_VE_CH_GOP(ch),
+ solo_dev->v4l2_enc[ch]->gop);
+ return 0;
+}
+
+static int enc_get_mpeg_dma_t(struct solo6010_dev *solo_dev, dma_addr_t buf,
+ unsigned int off, unsigned int size)
+{
+ int ret;
+
+ if (off > SOLO_MP4E_EXT_SIZE(solo_dev))
+ return -EINVAL;
+
+ if (off + size <= SOLO_MP4E_EXT_SIZE(solo_dev))
+ return solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_MP4E, 0, buf,
+ SOLO_MP4E_EXT_ADDR(solo_dev) + off, size);
+
+ /* Buffer wrap */
+ ret = solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_MP4E, 0, buf,
+ SOLO_MP4E_EXT_ADDR(solo_dev) + off,
+ SOLO_MP4E_EXT_SIZE(solo_dev) - off);
+
+ ret |= solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_MP4E, 0,
+ buf + SOLO_MP4E_EXT_SIZE(solo_dev) - off,
+ SOLO_MP4E_EXT_ADDR(solo_dev),
+ size + off - SOLO_MP4E_EXT_SIZE(solo_dev));
+
+ return ret;
+}
+
+static int enc_get_mpeg_dma(struct solo6010_dev *solo_dev, void *buf,
+ unsigned int off, unsigned int size)
+{
+ int ret;
+
+ dma_addr_t dma_addr = pci_map_single(solo_dev->pdev, buf, size,
+ PCI_DMA_FROMDEVICE);
+ ret = enc_get_mpeg_dma_t(solo_dev, dma_addr, off, size);
+ pci_unmap_single(solo_dev->pdev, dma_addr, size, PCI_DMA_FROMDEVICE);
+
+ return ret;
+}
+
+static int enc_get_jpeg_dma(struct solo6010_dev *solo_dev, dma_addr_t buf,
+ unsigned int off, unsigned int size)
+{
+ int ret;
+
+ if (off > SOLO_JPEG_EXT_SIZE(solo_dev))
+ return -EINVAL;
+
+ if (off + size <= SOLO_JPEG_EXT_SIZE(solo_dev))
+ return solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_JPEG, 0, buf,
+ SOLO_JPEG_EXT_ADDR(solo_dev) + off, size);
+
+ /* Buffer wrap */
+ ret = solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_JPEG, 0, buf,
+ SOLO_JPEG_EXT_ADDR(solo_dev) + off,
+ SOLO_JPEG_EXT_SIZE(solo_dev) - off);
+
+ ret |= solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_JPEG, 0,
+ buf + SOLO_JPEG_EXT_SIZE(solo_dev) - off,
+ SOLO_JPEG_EXT_ADDR(solo_dev),
+ size + off - SOLO_JPEG_EXT_SIZE(solo_dev));
+
+ return ret;
+}
+
+static int solo_fill_jpeg(struct solo_enc_fh *fh, struct solo_enc_buf *enc_buf,
+ struct videobuf_buffer *vb, dma_addr_t vbuf)
+{
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+ u8 *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
+
+ memcpy(p, jpeg_header, sizeof(jpeg_header));
+ p[SOF0_START + 5] = 0xff & (solo_enc->height >> 8);
+ p[SOF0_START + 6] = 0xff & solo_enc->height;
+ p[SOF0_START + 7] = 0xff & (solo_enc->width >> 8);
+ p[SOF0_START + 8] = 0xff & solo_enc->width;
+
+ vbuf += sizeof(jpeg_header);
+ vb->size = enc_buf->jpeg_size + sizeof(jpeg_header);
+
+ return enc_get_jpeg_dma(solo_dev, vbuf, enc_buf->jpeg_off,
+ enc_buf->jpeg_size);
+}
+
+static int solo_fill_mpeg(struct solo_enc_fh *fh, struct solo_enc_buf *enc_buf,
+ struct videobuf_buffer *vb, dma_addr_t vbuf)
+{
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+ struct vop_header vh;
+ int ret;
+ int frame_size, frame_off;
+
+ if (WARN_ON_ONCE(enc_buf->size <= sizeof(vh)))
+ return -1;
+
+ /* First get the hardware vop header (not real mpeg) */
+ ret = enc_get_mpeg_dma(solo_dev, &vh, enc_buf->off, sizeof(vh));
+ if (ret)
+ return -1;
+
+ if (WARN_ON_ONCE(vh.size > enc_buf->size))
+ return -1;
+
+ vb->width = vh.hsize << 4;
+ vb->height = vh.vsize << 4;
+ vb->size = vh.size;
+
+ /* If this is a key frame, add extra m4v header */
+ if (!enc_buf->vop) {
+ u16 fps = solo_dev->fps * 1000;
+ u16 interval = solo_enc->interval * 1000;
+ u8 *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
+
+ memcpy(p, vid_vop_header, sizeof(vid_vop_header));
+
+ if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC)
+ p[10] |= ((XVID_PAR_43_NTSC << 3) & 0x78);
+ else
+ p[10] |= ((XVID_PAR_43_PAL << 3) & 0x78);
+
+ /* Frame rate and interval */
+ p[22] = fps >> 4;
+ p[23] = ((fps << 4) & 0xf0) | 0x0c | ((interval >> 13) & 0x3);
+ p[24] = (interval >> 5) & 0xff;
+ p[25] = ((interval << 3) & 0xf8) | 0x04;
+
+ /* Width and height */
+ p[26] = (vb->width >> 3) & 0xff;
+ p[27] = ((vb->height >> 9) & 0x0f) | 0x10;
+ p[28] = (vb->height >> 1) & 0xff;
+
+ /* Interlace */
+ if (vh.interlace)
+ p[29] |= 0x20;
+
+ /* Adjust the dma buffer past this header */
+ vb->size += sizeof(vid_vop_header);
+ vbuf += sizeof(vid_vop_header);
+ }
+
+ /* Now get the actual mpeg payload */
+ frame_off = (enc_buf->off + sizeof(vh)) % SOLO_MP4E_EXT_SIZE(solo_dev);
+ frame_size = enc_buf->size - sizeof(vh);
+ ret = enc_get_mpeg_dma_t(solo_dev, vbuf, frame_off, frame_size);
+ if (WARN_ON_ONCE(ret))
+ return -1;
+
+ return 0;
+}
+
+/* On successful return (0), leaves solo_enc->lock unlocked */
+static int solo_enc_fillbuf(struct solo_enc_fh *fh,
+ struct videobuf_buffer *vb)
+{
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+ struct solo_enc_buf *enc_buf = NULL;
+ dma_addr_t vbuf;
+ int ret;
+ u16 idx = fh->rd_idx;
+
+ while (idx != solo_dev->enc_wr_idx) {
+ struct solo_enc_buf *ebuf = &solo_dev->enc_buf[idx];
+ idx = (idx + 1) % SOLO_NR_RING_BUFS;
+ if (fh->fmt == V4L2_PIX_FMT_MPEG) {
+ if (fh->type != ebuf->type)
+ continue;
+ if (ebuf->ch == solo_enc->ch) {
+ enc_buf = ebuf;
+ break;
+ }
+ } else if (ebuf->ch == solo_enc->ch) {
+ /* For mjpeg, keep reading to the newest frame */
+ enc_buf = ebuf;
+ }
+ }
+
+ fh->rd_idx = idx;
+
+ if (!enc_buf)
+ return -1;
+
+ if ((fh->fmt == V4L2_PIX_FMT_MPEG &&
+ vb->bsize < enc_buf->size) ||
+ (fh->fmt == V4L2_PIX_FMT_MJPEG &&
+ vb->bsize < (enc_buf->jpeg_size + sizeof(jpeg_header)))) {
+ return -1;
+ }
+
+ if (!(vbuf = videobuf_to_dma_contig(vb)))
+ return -1;
+
+ /* Is it ok that we mess with this buffer out of lock? */
+ spin_unlock(&solo_enc->lock);
+
+ if (fh->fmt == V4L2_PIX_FMT_MPEG)
+ ret = solo_fill_mpeg(fh, enc_buf, vb, vbuf);
+ else
+ ret = solo_fill_jpeg(fh, enc_buf, vb, vbuf);
+
+ if (ret) // Ignore failures
+ return 0;
+
+ list_del(&vb->queue);
+ vb->field_count++;
+ vb->ts = enc_buf->ts;
+ vb->state = VIDEOBUF_DONE;
+
+ wake_up(&vb->done);
+
+ return 0;
+}
+
+static void solo_enc_thread_try(struct solo_enc_fh *fh)
+{
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct videobuf_buffer *vb;
+
+ for (;;) {
+ spin_lock(&solo_enc->lock);
+
+ if (list_empty(&fh->vidq_active))
+ break;
+
+ vb = list_first_entry(&fh->vidq_active,
+ struct videobuf_buffer, queue);
+
+ if (!waitqueue_active(&vb->done))
+ break;
+
+ /* On success, returns with solo_enc->lock unlocked */
+ if (solo_enc_fillbuf(fh, vb))
+ break;
+ }
+
+ assert_spin_locked(&solo_enc->lock);
+ spin_unlock(&solo_enc->lock);
+}
+
+static int solo_enc_thread(void *data)
+{
+ struct solo_enc_fh *fh = data;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_freezable();
+ add_wait_queue(&solo_enc->thread_wait, &wait);
+
+ for (;;) {
+ long timeout = schedule_timeout_interruptible(HZ);
+ if (timeout == -ERESTARTSYS || kthread_should_stop())
+ break;
+ solo_enc_thread_try(fh);
+ try_to_freeze();
+ }
+
+ remove_wait_queue(&solo_enc->thread_wait, &wait);
+
+ return 0;
+}
+
+void solo_motion_isr(struct solo6010_dev *solo_dev)
+{
+ u32 status;
+ int i;
+
+ solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_MOTION);
+
+ status = solo_reg_read(solo_dev, SOLO_VI_MOT_STATUS);
+
+ for (i = 0; i < solo_dev->nr_chans; i++) {
+ struct solo_enc_dev *solo_enc = solo_dev->v4l2_enc[i];
+
+ BUG_ON(solo_enc == NULL);
+
+ if (solo_enc->motion_detected)
+ continue;
+ if (!(status & (1 << i)))
+ continue;
+
+ solo_enc->motion_detected = 1;
+ }
+}
+
+void solo_enc_v4l2_isr(struct solo6010_dev *solo_dev)
+{
+ struct solo_enc_buf *enc_buf;
+ struct videnc_status vstatus;
+ u32 mpeg_current, mpeg_next, mpeg_size;
+ u32 jpeg_current, jpeg_next, jpeg_size;
+ u32 reg_mpeg_size;
+ u8 cur_q, vop_type;
+ u8 ch;
+ enum solo_enc_types enc_type;
+
+ solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_ENCODER);
+
+ vstatus.status11 = solo_reg_read(solo_dev, SOLO_VE_STATE(11));
+ cur_q = (vstatus.status11_st.last_queue + 1) % MP4_QS;
+
+ vstatus.status0 = solo_reg_read(solo_dev, SOLO_VE_STATE(0));
+ reg_mpeg_size = (vstatus.status0_st.mp4_enc_code_size + 64 + 32) &
+ (~31);
+
+ while (solo_dev->enc_idx != cur_q) {
+ mpeg_current = solo_reg_read(solo_dev,
+ SOLO_VE_MPEG4_QUE(solo_dev->enc_idx));
+ jpeg_current = solo_reg_read(solo_dev,
+ SOLO_VE_JPEG_QUE(solo_dev->enc_idx));
+ solo_dev->enc_idx = (solo_dev->enc_idx + 1) % MP4_QS;
+ mpeg_next = solo_reg_read(solo_dev,
+ SOLO_VE_MPEG4_QUE(solo_dev->enc_idx));
+ jpeg_next = solo_reg_read(solo_dev,
+ SOLO_VE_JPEG_QUE(solo_dev->enc_idx));
+
+ if ((ch = (mpeg_current >> 24) & 0x1f) >= SOLO_MAX_CHANNELS) {
+ ch -= SOLO_MAX_CHANNELS;
+ enc_type = SOLO_ENC_TYPE_EXT;
+ } else
+ enc_type = SOLO_ENC_TYPE_STD;
+
+ vop_type = (mpeg_current >> 29) & 3;
+
+ mpeg_current &= 0x00ffffff;
+ mpeg_next &= 0x00ffffff;
+ jpeg_current &= 0x00ffffff;
+ jpeg_next &= 0x00ffffff;
+
+ mpeg_size = (SOLO_MP4E_EXT_SIZE(solo_dev) +
+ mpeg_next - mpeg_current) %
+ SOLO_MP4E_EXT_SIZE(solo_dev);
+
+ jpeg_size = (SOLO_JPEG_EXT_SIZE(solo_dev) +
+ jpeg_next - jpeg_current) %
+ SOLO_JPEG_EXT_SIZE(solo_dev);
+
+ /* XXX I think this means we had a ring overflow? */
+ if (mpeg_current > mpeg_next && mpeg_size != reg_mpeg_size) {
+ enc_reset_gop(solo_dev, ch);
+ continue;
+ }
+
+ /* When resetting the GOP, skip frames until I-frame */
+ if (enc_gop_reset(solo_dev, ch, vop_type))
+ continue;
+
+ enc_buf = &solo_dev->enc_buf[solo_dev->enc_wr_idx];
+
+ enc_buf->vop = vop_type;
+ enc_buf->ch = ch;
+ enc_buf->off = mpeg_current;
+ enc_buf->size = mpeg_size;
+ enc_buf->jpeg_off = jpeg_current;
+ enc_buf->jpeg_size = jpeg_size;
+ enc_buf->type = enc_type;
+
+ do_gettimeofday(&enc_buf->ts);
+
+ solo_dev->enc_wr_idx = (solo_dev->enc_wr_idx + 1) %
+ SOLO_NR_RING_BUFS;
+
+ wake_up_interruptible(&solo_dev->v4l2_enc[ch]->thread_wait);
+ }
+
+ return;
+}
+
+static int solo_enc_buf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ *size = FRAME_BUF_SIZE;
+
+ if (*count < MIN_VID_BUFFERS)
+ *count = MIN_VID_BUFFERS;
+
+ return 0;
+}
+
+static int solo_enc_buf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct solo_enc_fh *fh = vq->priv_data;
+ struct solo_enc_dev *solo_enc = fh->enc;
+
+ vb->size = FRAME_BUF_SIZE;
+ if (vb->baddr != 0 && vb->bsize < vb->size)
+ return -EINVAL;
+
+ /* These properties only change when queue is idle */
+ vb->width = solo_enc->width;
+ vb->height = solo_enc->height;
+ vb->field = field;
+
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ int rc = videobuf_iolock(vq, vb, NULL);
+ if (rc < 0) {
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ return rc;
+ }
+ }
+ vb->state = VIDEOBUF_PREPARED;
+
+ return 0;
+}
+
+static void solo_enc_buf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct solo_enc_fh *fh = vq->priv_data;
+
+ vb->state = VIDEOBUF_QUEUED;
+ list_add_tail(&vb->queue, &fh->vidq_active);
+ wake_up_interruptible(&fh->enc->thread_wait);
+}
+
+static void solo_enc_buf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static struct videobuf_queue_ops solo_enc_video_qops = {
+ .buf_setup = solo_enc_buf_setup,
+ .buf_prepare = solo_enc_buf_prepare,
+ .buf_queue = solo_enc_buf_queue,
+ .buf_release = solo_enc_buf_release,
+};
+
+static unsigned int solo_enc_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct solo_enc_fh *fh = file->private_data;
+
+ return videobuf_poll_stream(file, &fh->vidq, wait);
+}
+
+static int solo_enc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct solo_enc_fh *fh = file->private_data;
+
+ return videobuf_mmap_mapper(&fh->vidq, vma);
+}
+
+static int solo_enc_open(struct file *file)
+{
+ struct solo_enc_dev *solo_enc = video_drvdata(file);
+ struct solo_enc_fh *fh;
+
+ if ((fh = kzalloc(sizeof(*fh), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ spin_lock(&solo_enc->lock);
+
+ fh->enc = solo_enc;
+ file->private_data = fh;
+ INIT_LIST_HEAD(&fh->vidq_active);
+ fh->fmt = V4L2_PIX_FMT_MPEG;
+ fh->type = SOLO_ENC_TYPE_STD;
+
+ videobuf_queue_dma_contig_init(&fh->vidq, &solo_enc_video_qops,
+ &solo_enc->solo_dev->pdev->dev,
+ &solo_enc->lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct videobuf_buffer), fh);
+
+ spin_unlock(&solo_enc->lock);
+
+ return 0;
+}
+
+static ssize_t solo_enc_read(struct file *file, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct solo_enc_fh *fh = file->private_data;
+ struct solo_enc_dev *solo_enc = fh->enc;
+
+ /* Make sure the encoder is on */
+ if (!fh->enc_on) {
+ int ret;
+
+ spin_lock(&solo_enc->lock);
+ ret = solo_enc_on(fh);
+ spin_unlock(&solo_enc->lock);
+ if (ret)
+ return ret;
+ }
+
+ return videobuf_read_stream(&fh->vidq, data, count, ppos, 0,
+ file->f_flags & O_NONBLOCK);
+}
+
+static int solo_enc_release(struct file *file)
+{
+ struct solo_enc_fh *fh = file->private_data;
+
+ videobuf_stop(&fh->vidq);
+ videobuf_mmap_free(&fh->vidq);
+ solo_enc_off(fh);
+ kfree(fh);
+
+ return 0;
+}
+
+static int solo_enc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+
+ strcpy(cap->driver, SOLO6010_NAME);
+ snprintf(cap->card, sizeof(cap->card), "Softlogic 6010 Enc %d",
+ solo_enc->ch);
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI %s",
+ pci_name(solo_dev->pdev));
+ cap->version = SOLO6010_VER_NUM;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
+ return 0;
+}
+
+static int solo_enc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+
+ if (input->index)
+ return -EINVAL;
+
+ snprintf(input->name, sizeof(input->name), "Encoder %d",
+ solo_enc->ch + 1);
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+
+ if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC)
+ input->std = V4L2_STD_NTSC_M;
+ else
+ input->std = V4L2_STD_PAL_M;
+
+ if (!tw28_get_video_status(solo_dev, solo_enc->ch))
+ input->status = V4L2_IN_ST_NO_SIGNAL;
+
+ return 0;
+}
+
+static int solo_enc_set_input(struct file *file, void *priv, unsigned int index)
+{
+ if (index)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int solo_enc_get_input(struct file *file, void *priv,
+ unsigned int *index)
+{
+ *index = 0;
+
+ return 0;
+}
+
+static int solo_enc_enum_fmt_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ switch (f->index) {
+ case 0:
+ f->pixelformat = V4L2_PIX_FMT_MPEG;
+ strcpy(f->description, "MPEG-4 AVC");
+ break;
+ case 1:
+ f->pixelformat = V4L2_PIX_FMT_MJPEG;
+ strcpy(f->description, "MJPEG");
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ f->flags = V4L2_FMT_FLAG_COMPRESSED;
+
+ return 0;
+}
+
+static int solo_enc_try_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+
+ if (pix->pixelformat != V4L2_PIX_FMT_MPEG &&
+ pix->pixelformat != V4L2_PIX_FMT_MJPEG)
+ return -EINVAL;
+
+ /* We cannot change width/height in mid read */
+ if (atomic_read(&solo_enc->readers) > 0) {
+ if (pix->width != solo_enc->width ||
+ pix->height != solo_enc->height)
+ return -EBUSY;
+ } else if (!(pix->width == solo_dev->video_hsize &&
+ pix->height == solo_dev->video_vsize << 1) &&
+ !(pix->width == solo_dev->video_hsize >> 1 &&
+ pix->height == solo_dev->video_vsize)) {
+ /* Default to CIF 1/2 size */
+ pix->width = solo_dev->video_hsize >> 1;
+ pix->height = solo_dev->video_vsize;
+ }
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_INTERLACED;
+ else if (pix->field != V4L2_FIELD_INTERLACED) {
+ pix->field = V4L2_FIELD_INTERLACED;
+ }
+
+ /* Just set these */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ pix->sizeimage = FRAME_BUF_SIZE;
+
+ return 0;
+}
+
+static int solo_enc_set_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int ret;
+
+ spin_lock(&solo_enc->lock);
+
+ if ((ret = solo_enc_try_fmt_cap(file, priv, f))) {
+ spin_unlock(&solo_enc->lock);
+ return ret;
+ }
+
+ if (pix->width == solo_dev->video_hsize)
+ solo_enc->mode = SOLO_ENC_MODE_D1;
+ else
+ solo_enc->mode = SOLO_ENC_MODE_CIF;
+
+ /* This does not change the encoder at all */
+ fh->fmt = pix->pixelformat;
+
+ if (pix->priv)
+ fh->type = SOLO_ENC_TYPE_EXT;
+ ret = solo_enc_on(fh);
+
+ spin_unlock(&solo_enc->lock);
+
+ return ret;
+}
+
+static int solo_enc_get_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+
+ pix->width = solo_enc->width;
+ pix->height = solo_enc->height;
+ pix->pixelformat = fh->fmt;
+ pix->field = solo_enc->interlaced ? V4L2_FIELD_INTERLACED :
+ V4L2_FIELD_NONE;
+ pix->sizeimage = FRAME_BUF_SIZE;
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ return 0;
+}
+
+static int solo_enc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *req)
+{
+ struct solo_enc_fh *fh = priv;
+
+ return videobuf_reqbufs(&fh->vidq, req);
+}
+
+static int solo_enc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct solo_enc_fh *fh = priv;
+
+ return videobuf_querybuf(&fh->vidq, buf);
+}
+
+static int solo_enc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct solo_enc_fh *fh = priv;
+
+ return videobuf_qbuf(&fh->vidq, buf);
+}
+
+static int solo_enc_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ int ret;
+
+ /* Make sure the encoder is on */
+ if (!fh->enc_on) {
+ spin_lock(&solo_enc->lock);
+ ret = solo_enc_on(fh);
+ spin_unlock(&solo_enc->lock);
+ if (ret)
+ return ret;
+ }
+
+ ret = videobuf_dqbuf(&fh->vidq, buf, file->f_flags & O_NONBLOCK);
+ if (ret)
+ return ret;
+
+ /* Signal motion detection */
+ if (solo_is_motion_on(solo_enc)) {
+ buf->flags |= V4L2_BUF_FLAG_MOTION_ON;
+ if (solo_enc->motion_detected) {
+ buf->flags |= V4L2_BUF_FLAG_MOTION_DETECTED;
+ solo_reg_write(solo_enc->solo_dev, SOLO_VI_MOT_CLEAR,
+ 1 << solo_enc->ch);
+ solo_enc->motion_detected = 0;
+ }
+ }
+
+ /* Check for key frame on mpeg data */
+ if (fh->fmt == V4L2_PIX_FMT_MPEG) {
+ struct videobuf_buffer *vb = fh->vidq.bufs[buf->index];
+ u8 *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
+ if (p[3] == 0x00)
+ buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ else
+ buf->flags |= V4L2_BUF_FLAG_PFRAME;
+ }
+
+ return 0;
+}
+
+static int solo_enc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type i)
+{
+ struct solo_enc_fh *fh = priv;
+
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ return videobuf_streamon(&fh->vidq);
+}
+
+static int solo_enc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type i)
+{
+ struct solo_enc_fh *fh = priv;
+
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ return videobuf_streamoff(&fh->vidq);
+}
+
+static int solo_enc_s_std(struct file *file, void *priv, v4l2_std_id *i)
+{
+ return 0;
+}
+
+static int solo_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo6010_dev *solo_dev = fh->enc->solo_dev;
+
+ if (fsize->pixel_format != V4L2_PIX_FMT_MPEG)
+ return -EINVAL;
+
+ switch (fsize->index) {
+ case 0:
+ fsize->discrete.width = solo_dev->video_hsize >> 1;
+ fsize->discrete.height = solo_dev->video_vsize;
+ break;
+ case 1:
+ fsize->discrete.width = solo_dev->video_hsize;
+ fsize->discrete.height = solo_dev->video_vsize << 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+
+ return 0;
+}
+
+static int solo_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fintv)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo6010_dev *solo_dev = fh->enc->solo_dev;
+
+ if (fintv->pixel_format != V4L2_PIX_FMT_MPEG || fintv->index)
+ return -EINVAL;
+
+ fintv->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+
+ fintv->stepwise.min.numerator = solo_dev->fps;
+ fintv->stepwise.min.denominator = 1;
+
+ fintv->stepwise.max.numerator = solo_dev->fps;
+ fintv->stepwise.max.denominator = 15;
+
+ fintv->stepwise.step.numerator = 1;
+ fintv->stepwise.step.denominator = 1;
+
+ return 0;
+}
+
+static int solo_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *sp)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+ struct v4l2_captureparm *cp = &sp->parm.capture;
+
+ cp->capability = V4L2_CAP_TIMEPERFRAME;
+ cp->timeperframe.numerator = solo_enc->interval;
+ cp->timeperframe.denominator = solo_dev->fps;
+ cp->capturemode = 0;
+ /* XXX: Shouldn't we be able to get/set this from videobuf? */
+ cp->readbuffers = 2;
+
+ return 0;
+}
+
+static int solo_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *sp)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+ struct v4l2_captureparm *cp = &sp->parm.capture;
+
+ spin_lock(&solo_enc->lock);
+
+ if (atomic_read(&solo_enc->readers) > 0) {
+ spin_unlock(&solo_enc->lock);
+ return -EBUSY;
+ }
+
+ if ((cp->timeperframe.numerator == 0) ||
+ (cp->timeperframe.denominator == 0)) {
+ /* reset framerate */
+ cp->timeperframe.numerator = 1;
+ cp->timeperframe.denominator = solo_dev->fps;
+ }
+
+ if (cp->timeperframe.denominator != solo_dev->fps)
+ cp->timeperframe.denominator = solo_dev->fps;
+
+ if (cp->timeperframe.numerator > 15)
+ cp->timeperframe.numerator = 15;
+
+ solo_enc->interval = cp->timeperframe.numerator;
+
+ cp->capability = V4L2_CAP_TIMEPERFRAME;
+
+ solo_enc->gop = max(solo_dev->fps / solo_enc->interval, 1);
+ solo_update_mode(solo_enc);
+
+ spin_unlock(&solo_enc->lock);
+
+ return 0;
+}
+
+static int solo_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+
+ qc->id = v4l2_ctrl_next(solo_ctrl_classes, qc->id);
+ if (!qc->id)
+ return -EINVAL;
+
+ switch (qc->id) {
+ case V4L2_CID_BRIGHTNESS:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(qc, 0x00, 0xff, 1, 0x80);
+ case V4L2_CID_SHARPNESS:
+ return v4l2_ctrl_query_fill(qc, 0x00, 0x0f, 1, 0x00);
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ return v4l2_ctrl_query_fill(
+ qc, V4L2_MPEG_VIDEO_ENCODING_MPEG_1,
+ V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 1,
+ V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC);
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ return v4l2_ctrl_query_fill(qc, 1, 255, 1, solo_dev->fps);
+#ifdef PRIVATE_CIDS
+ case V4L2_CID_MOTION_THRESHOLD:
+ qc->flags |= V4L2_CTRL_FLAG_SLIDER;
+ qc->type = V4L2_CTRL_TYPE_INTEGER;
+ qc->minimum = 0;
+ qc->maximum = 0xffff;
+ qc->step = 1;
+ qc->default_value = SOLO_DEF_MOT_THRESH;
+ strlcpy(qc->name, "Motion Detection Threshold",
+ sizeof(qc->name));
+ return 0;
+ case V4L2_CID_MOTION_ENABLE:
+ qc->type = V4L2_CTRL_TYPE_BOOLEAN;
+ qc->minimum = 0;
+ qc->maximum = qc->step = 1;
+ qc->default_value = 0;
+ strlcpy(qc->name, "Motion Detection Enable", sizeof(qc->name));
+ return 0;
+#else
+ case V4L2_CID_MOTION_THRESHOLD:
+ return v4l2_ctrl_query_fill(qc, 0, 0xffff, 1,
+ SOLO_DEF_MOT_THRESH);
+ case V4L2_CID_MOTION_ENABLE:
+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
+#endif
+ case V4L2_CID_RDS_TX_RADIO_TEXT:
+ qc->type = V4L2_CTRL_TYPE_STRING;
+ qc->minimum = 0;
+ qc->maximum = OSD_TEXT_MAX;
+ qc->step = 1;
+ qc->default_value = 0;
+ strlcpy(qc->name, "OSD Text", sizeof(qc->name));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int solo_querymenu(struct file *file, void *priv,
+ struct v4l2_querymenu *qmenu)
+{
+ struct v4l2_queryctrl qctrl;
+ int err;
+
+ qctrl.id = qmenu->id;
+ if ((err = solo_queryctrl(file, priv, &qctrl)))
+ return err;
+
+ return v4l2_ctrl_query_menu(qmenu, &qctrl, NULL);
+}
+
+static int solo_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_HUE:
+ case V4L2_CID_SHARPNESS:
+ return tw28_get_ctrl_val(solo_dev, ctrl->id, solo_enc->ch,
+ &ctrl->value);
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ ctrl->value = V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctrl->value = solo_enc->gop;
+ break;
+ case V4L2_CID_MOTION_THRESHOLD:
+ ctrl->value = solo_enc->motion_thresh;
+ break;
+ case V4L2_CID_MOTION_ENABLE:
+ ctrl->value = solo_is_motion_on(solo_enc);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int solo_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_HUE:
+ case V4L2_CID_SHARPNESS:
+ return tw28_set_ctrl_val(solo_dev, ctrl->id, solo_enc->ch,
+ ctrl->value);
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ if (ctrl->value != V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC)
+ return -ERANGE;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ if (ctrl->value < 1 || ctrl->value > 255)
+ return -ERANGE;
+ solo_enc->gop = ctrl->value;
+ solo_reg_write(solo_dev, SOLO_VE_CH_GOP(solo_enc->ch),
+ solo_enc->gop);
+ solo_reg_write(solo_dev, SOLO_VE_CH_GOP_E(solo_enc->ch),
+ solo_enc->gop);
+ break;
+ case V4L2_CID_MOTION_THRESHOLD:
+ /* TODO accept value on lower 16-bits and use high
+ * 16-bits to assign the value to a specific block */
+ if (ctrl->value < 0 || ctrl->value > 0xffff)
+ return -ERANGE;
+ solo_enc->motion_thresh = ctrl->value;
+ solo_set_motion_threshold(solo_dev, solo_enc->ch, ctrl->value);
+ break;
+ case V4L2_CID_MOTION_ENABLE:
+ solo_motion_toggle(solo_enc, ctrl->value);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int solo_s_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ int i;
+
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = (ctrls->controls + i);
+ int err;
+
+ switch (ctrl->id) {
+ case V4L2_CID_RDS_TX_RADIO_TEXT:
+ if (ctrl->size - 1 > OSD_TEXT_MAX)
+ err = -ERANGE;
+ else {
+ err = copy_from_user(solo_enc->osd_text,
+ ctrl->string,
+ OSD_TEXT_MAX);
+ solo_enc->osd_text[OSD_TEXT_MAX] = '\0';
+ if (!err)
+ err = solo_osd_print(solo_enc);
+ }
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (err < 0) {
+ ctrls->error_idx = i;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int solo_g_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct solo_enc_fh *fh = priv;
+ struct solo_enc_dev *solo_enc = fh->enc;
+ int i;
+
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = (ctrls->controls + i);
+ int err;
+
+ switch (ctrl->id) {
+ case V4L2_CID_RDS_TX_RADIO_TEXT:
+ if (ctrl->size < OSD_TEXT_MAX) {
+ ctrl->size = OSD_TEXT_MAX;
+ err = -ENOSPC;
+ } else {
+ err = copy_to_user(ctrl->string,
+ solo_enc->osd_text,
+ OSD_TEXT_MAX);
+ }
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (err < 0) {
+ ctrls->error_idx = i;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static const struct v4l2_file_operations solo_enc_fops = {
+ .owner = THIS_MODULE,
+ .open = solo_enc_open,
+ .release = solo_enc_release,
+ .read = solo_enc_read,
+ .poll = solo_enc_poll,
+ .mmap = solo_enc_mmap,
+ .ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops solo_enc_ioctl_ops = {
+ .vidioc_querycap = solo_enc_querycap,
+ .vidioc_s_std = solo_enc_s_std,
+ /* Input callbacks */
+ .vidioc_enum_input = solo_enc_enum_input,
+ .vidioc_s_input = solo_enc_set_input,
+ .vidioc_g_input = solo_enc_get_input,
+ /* Video capture format callbacks */
+ .vidioc_enum_fmt_vid_cap = solo_enc_enum_fmt_cap,
+ .vidioc_try_fmt_vid_cap = solo_enc_try_fmt_cap,
+ .vidioc_s_fmt_vid_cap = solo_enc_set_fmt_cap,
+ .vidioc_g_fmt_vid_cap = solo_enc_get_fmt_cap,
+ /* Streaming I/O */
+ .vidioc_reqbufs = solo_enc_reqbufs,
+ .vidioc_querybuf = solo_enc_querybuf,
+ .vidioc_qbuf = solo_enc_qbuf,
+ .vidioc_dqbuf = solo_enc_dqbuf,
+ .vidioc_streamon = solo_enc_streamon,
+ .vidioc_streamoff = solo_enc_streamoff,
+ /* Frame size and interval */
+ .vidioc_enum_framesizes = solo_enum_framesizes,
+ .vidioc_enum_frameintervals = solo_enum_frameintervals,
+ /* Video capture parameters */
+ .vidioc_s_parm = solo_s_parm,
+ .vidioc_g_parm = solo_g_parm,
+ /* Controls */
+ .vidioc_queryctrl = solo_queryctrl,
+ .vidioc_querymenu = solo_querymenu,
+ .vidioc_g_ctrl = solo_g_ctrl,
+ .vidioc_s_ctrl = solo_s_ctrl,
+ .vidioc_g_ext_ctrls = solo_g_ext_ctrls,
+ .vidioc_s_ext_ctrls = solo_s_ext_ctrls,
+};
+
+static struct video_device solo_enc_template = {
+ .name = SOLO6010_NAME,
+ .fops = &solo_enc_fops,
+ .ioctl_ops = &solo_enc_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+
+ .tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL_M,
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+static struct solo_enc_dev *solo_enc_alloc(struct solo6010_dev *solo_dev, u8 ch)
+{
+ struct solo_enc_dev *solo_enc;
+ int ret;
+
+ solo_enc = kzalloc(sizeof(*solo_enc), GFP_KERNEL);
+ if (!solo_enc)
+ return ERR_PTR(-ENOMEM);
+
+ solo_enc->vfd = video_device_alloc();
+ if (!solo_enc->vfd) {
+ kfree(solo_enc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ solo_enc->solo_dev = solo_dev;
+ solo_enc->ch = ch;
+
+ *solo_enc->vfd = solo_enc_template;
+ solo_enc->vfd->parent = &solo_dev->pdev->dev;
+ ret = video_register_device(solo_enc->vfd, VFL_TYPE_GRABBER,
+ video_nr);
+ if (ret < 0) {
+ video_device_release(solo_enc->vfd);
+ kfree(solo_enc);
+ return ERR_PTR(ret);
+ }
+
+ video_set_drvdata(solo_enc->vfd, solo_enc);
+
+ snprintf(solo_enc->vfd->name, sizeof(solo_enc->vfd->name),
+ "%s-enc (%i/%i)", SOLO6010_NAME, solo_dev->vfd->num,
+ solo_enc->vfd->num);
+
+ if (video_nr >= 0)
+ video_nr++;
+
+ spin_lock_init(&solo_enc->lock);
+ init_waitqueue_head(&solo_enc->thread_wait);
+ atomic_set(&solo_enc->readers, 0);
+
+ solo_enc->qp = SOLO_DEFAULT_QP;
+ solo_enc->gop = solo_dev->fps;
+ solo_enc->interval = 1;
+ solo_enc->mode = SOLO_ENC_MODE_CIF;
+ solo_enc->motion_thresh = SOLO_DEF_MOT_THRESH;
+
+ spin_lock(&solo_enc->lock);
+ solo_update_mode(solo_enc);
+ spin_unlock(&solo_enc->lock);
+
+ return solo_enc;
+}
+
+static void solo_enc_free(struct solo_enc_dev *solo_enc)
+{
+ if (solo_enc == NULL)
+ return;
+
+ video_unregister_device(solo_enc->vfd);
+ kfree(solo_enc);
+}
+
+int solo_enc_v4l2_init(struct solo6010_dev *solo_dev)
+{
+ int i;
+
+ for (i = 0; i < solo_dev->nr_chans; i++) {
+ solo_dev->v4l2_enc[i] = solo_enc_alloc(solo_dev, i);
+ if (IS_ERR(solo_dev->v4l2_enc[i]))
+ break;
+ }
+
+ if (i != solo_dev->nr_chans) {
+ int ret = PTR_ERR(solo_dev->v4l2_enc[i]);
+ while (i--)
+ solo_enc_free(solo_dev->v4l2_enc[i]);
+ return ret;
+ }
+
+ /* D1@MAX-FPS * 4 */
+ solo_dev->enc_bw_remain = solo_dev->fps * 4 * 4;
+
+ dev_info(&solo_dev->pdev->dev, "Encoders as /dev/video%d-%d\n",
+ solo_dev->v4l2_enc[0]->vfd->num,
+ solo_dev->v4l2_enc[solo_dev->nr_chans - 1]->vfd->num);
+
+ return 0;
+}
+
+void solo_enc_v4l2_exit(struct solo6010_dev *solo_dev)
+{
+ int i;
+
+ solo6010_irq_off(solo_dev, SOLO_IRQ_MOTION);
+
+ for (i = 0; i < solo_dev->nr_chans; i++)
+ solo_enc_free(solo_dev->v4l2_enc[i]);
+}
diff --git a/drivers/staging/solo6x10/solo6010-v4l2.c b/drivers/staging/solo6x10/solo6010-v4l2.c
new file mode 100644
index 00000000000..9537cc6ee3b
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010-v4l2.c
@@ -0,0 +1,859 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-common.h>
+#include <media/videobuf-dma-contig.h>
+
+#include "solo6010.h"
+#include "solo6010-tw28.h"
+
+#define SOLO_HW_BPL 2048
+#define SOLO_DISP_PIX_FIELD V4L2_FIELD_INTERLACED
+#define SOLO_DISP_BUF_SIZE (64 * 1024) // 64k
+
+/* Image size is two fields, SOLO_HW_BPL is one horizontal line */
+#define solo_vlines(__solo) (__solo->video_vsize * 2)
+#define solo_image_size(__solo) (solo_bytesperline(__solo) * \
+ solo_vlines(__solo))
+#define solo_bytesperline(__solo) (__solo->video_hsize * 2)
+
+#define MIN_VID_BUFFERS 4
+
+/* Simple file handle */
+struct solo_filehandle {
+ struct solo6010_dev *solo_dev;
+ struct videobuf_queue vidq;
+ struct task_struct *kthread;
+ spinlock_t slock;
+ int old_write;
+ struct list_head vidq_active;
+};
+
+unsigned video_nr = -1;
+module_param(video_nr, uint, 0644);
+MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect (default)");
+
+static void erase_on(struct solo6010_dev *solo_dev)
+{
+ solo_reg_write(solo_dev, SOLO_VO_DISP_ERASE, SOLO_VO_DISP_ERASE_ON);
+ solo_dev->erasing = 1;
+ solo_dev->frame_blank = 0;
+}
+
+static int erase_off(struct solo6010_dev *solo_dev)
+{
+ if (!solo_dev->erasing)
+ return 0;
+
+ /* First time around, assert erase off */
+ if (!solo_dev->frame_blank)
+ solo_reg_write(solo_dev, SOLO_VO_DISP_ERASE, 0);
+ /* Keep the erasing flag on for 8 frames minimum */
+ if (solo_dev->frame_blank++ >= 8)
+ solo_dev->erasing = 0;
+
+ return 1;
+}
+
+void solo_video_in_isr(struct solo6010_dev *solo_dev)
+{
+ solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_VIDEO_IN);
+ wake_up_interruptible(&solo_dev->disp_thread_wait);
+}
+
+static void solo_win_setup(struct solo6010_dev *solo_dev, u8 ch,
+ int sx, int sy, int ex, int ey, int scale)
+{
+ if (ch >= solo_dev->nr_chans)
+ return;
+
+ /* Here, we just keep window/channel the same */
+ solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL0(ch),
+ SOLO_VI_WIN_CHANNEL(ch) |
+ SOLO_VI_WIN_SX(sx) |
+ SOLO_VI_WIN_EX(ex) |
+ SOLO_VI_WIN_SCALE(scale));
+
+ solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL1(ch),
+ SOLO_VI_WIN_SY(sy) |
+ SOLO_VI_WIN_EY(ey));
+}
+
+static int solo_v4l2_ch_ext_4up(struct solo6010_dev *solo_dev, u8 idx, int on)
+{
+ u8 ch = idx * 4;
+
+ if (ch >= solo_dev->nr_chans)
+ return -EINVAL;
+
+ if (!on) {
+ u8 i;
+ for (i = ch; i < ch + 4; i++)
+ solo_win_setup(solo_dev, i, solo_dev->video_hsize,
+ solo_vlines(solo_dev),
+ solo_dev->video_hsize,
+ solo_vlines(solo_dev), 0);
+ return 0;
+ }
+
+ /* Row 1 */
+ solo_win_setup(solo_dev, ch, 0, 0, solo_dev->video_hsize / 2,
+ solo_vlines(solo_dev) / 2, 3);
+ solo_win_setup(solo_dev, ch + 1, solo_dev->video_hsize / 2, 0,
+ solo_dev->video_hsize, solo_vlines(solo_dev) / 2, 3);
+ /* Row 2 */
+ solo_win_setup(solo_dev, ch + 2, 0, solo_vlines(solo_dev) / 2,
+ solo_dev->video_hsize / 2, solo_vlines(solo_dev), 3);
+ solo_win_setup(solo_dev, ch + 3, solo_dev->video_hsize / 2,
+ solo_vlines(solo_dev) / 2, solo_dev->video_hsize,
+ solo_vlines(solo_dev), 3);
+
+ return 0;
+}
+
+static int solo_v4l2_ch_ext_16up(struct solo6010_dev *solo_dev, int on)
+{
+ int sy, ysize, hsize, i;
+
+ if (!on) {
+ for (i = 0; i < 16; i++)
+ solo_win_setup(solo_dev, i, solo_dev->video_hsize,
+ solo_vlines(solo_dev),
+ solo_dev->video_hsize,
+ solo_vlines(solo_dev), 0);
+ return 0;
+ }
+
+ ysize = solo_vlines(solo_dev) / 4;
+ hsize = solo_dev->video_hsize / 4;
+
+ for (sy = 0, i = 0; i < 4; i++, sy += ysize) {
+ solo_win_setup(solo_dev, i * 4, 0, sy, hsize,
+ sy + ysize, 5);
+ solo_win_setup(solo_dev, (i * 4) + 1, hsize, sy,
+ hsize * 2, sy + ysize, 5);
+ solo_win_setup(solo_dev, (i * 4) + 2, hsize * 2, sy,
+ hsize * 3, sy + ysize, 5);
+ solo_win_setup(solo_dev, (i * 4) + 3, hsize * 3, sy,
+ solo_dev->video_hsize, sy + ysize, 5);
+ }
+
+ return 0;
+}
+
+static int solo_v4l2_ch(struct solo6010_dev *solo_dev, u8 ch, int on)
+{
+ u8 ext_ch;
+
+ if (ch < solo_dev->nr_chans) {
+ solo_win_setup(solo_dev, ch, on ? 0 : solo_dev->video_hsize,
+ on ? 0 : solo_vlines(solo_dev),
+ solo_dev->video_hsize, solo_vlines(solo_dev),
+ on ? 1 : 0);
+ return 0;
+ }
+
+ if (ch >= solo_dev->nr_chans + solo_dev->nr_ext)
+ return -EINVAL;
+
+ ext_ch = ch - solo_dev->nr_chans;
+
+ /* 4up's first */
+ if (ext_ch < 4)
+ return solo_v4l2_ch_ext_4up(solo_dev, ext_ch, on);
+
+ /* Remaining case is 16up for 16-port */
+ return solo_v4l2_ch_ext_16up(solo_dev, on);
+}
+
+static int solo_v4l2_set_ch(struct solo6010_dev *solo_dev, u8 ch)
+{
+ if (ch >= solo_dev->nr_chans + solo_dev->nr_ext)
+ return -EINVAL;
+
+ erase_on(solo_dev);
+
+ solo_v4l2_ch(solo_dev, solo_dev->cur_disp_ch, 0);
+ solo_v4l2_ch(solo_dev, ch, 1);
+
+ solo_dev->cur_disp_ch = ch;
+
+ return 0;
+}
+
+static void solo_fillbuf(struct solo_filehandle *fh,
+ struct videobuf_buffer *vb)
+{
+ struct solo6010_dev *solo_dev = fh->solo_dev;
+ dma_addr_t vbuf;
+ unsigned int fdma_addr;
+ int frame_size;
+ int error = 1;
+ int i;
+
+ if (!(vbuf = videobuf_to_dma_contig(vb)))
+ goto finish_buf;
+
+ if (erase_off(solo_dev)) {
+ void *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
+ int image_size = solo_image_size(solo_dev);
+ for (i = 0; i < image_size; i += 2) {
+ ((u8 *)p)[i] = 0x80;
+ ((u8 *)p)[i + 1] = 0x00;
+ }
+ error = 0;
+ goto finish_buf;
+ }
+
+ frame_size = SOLO_HW_BPL * solo_vlines(solo_dev);
+ fdma_addr = SOLO_DISP_EXT_ADDR(solo_dev) + (fh->old_write * frame_size);
+
+ for (i = 0; i < frame_size / SOLO_DISP_BUF_SIZE; i++) {
+ int j;
+ for (j = 0; j < (SOLO_DISP_BUF_SIZE / SOLO_HW_BPL); j++) {
+ if (solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_DISP, 0,
+ vbuf, fdma_addr + (j * SOLO_HW_BPL),
+ solo_bytesperline(solo_dev)))
+ goto finish_buf;
+ vbuf += solo_bytesperline(solo_dev);
+ }
+ fdma_addr += SOLO_DISP_BUF_SIZE;
+ }
+ error = 0;
+
+finish_buf:
+ if (error) {
+ vb->state = VIDEOBUF_ERROR;
+ } else {
+ vb->state = VIDEOBUF_DONE;
+ vb->field_count++;
+ do_gettimeofday(&vb->ts);
+ }
+
+ wake_up(&vb->done);
+
+ return;
+}
+
+static void solo_thread_try(struct solo_filehandle *fh)
+{
+ struct videobuf_buffer *vb;
+ unsigned int cur_write;
+
+ for (;;) {
+ spin_lock(&fh->slock);
+
+ if (list_empty(&fh->vidq_active))
+ break;
+
+ vb = list_first_entry(&fh->vidq_active, struct videobuf_buffer,
+ queue);
+
+ if (!waitqueue_active(&vb->done))
+ break;
+
+ cur_write = SOLO_VI_STATUS0_PAGE(solo_reg_read(fh->solo_dev,
+ SOLO_VI_STATUS0));
+ if (cur_write == fh->old_write)
+ break;
+
+ fh->old_write = cur_write;
+ list_del(&vb->queue);
+
+ spin_unlock(&fh->slock);
+
+ solo_fillbuf(fh, vb);
+ }
+
+ assert_spin_locked(&fh->slock);
+ spin_unlock(&fh->slock);
+}
+
+static int solo_thread(void *data)
+{
+ struct solo_filehandle *fh = data;
+ struct solo6010_dev *solo_dev = fh->solo_dev;
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_freezable();
+ add_wait_queue(&solo_dev->disp_thread_wait, &wait);
+
+ for (;;) {
+ long timeout = schedule_timeout_interruptible(HZ);
+ if (timeout == -ERESTARTSYS || kthread_should_stop())
+ break;
+ solo_thread_try(fh);
+ try_to_freeze();
+ }
+
+ remove_wait_queue(&solo_dev->disp_thread_wait, &wait);
+
+ return 0;
+}
+
+static int solo_start_thread(struct solo_filehandle *fh)
+{
+ fh->kthread = kthread_run(solo_thread, fh, SOLO6010_NAME "_disp");
+
+ if (IS_ERR(fh->kthread))
+ return PTR_ERR(fh->kthread);
+
+ return 0;
+}
+
+static void solo_stop_thread(struct solo_filehandle *fh)
+{
+ if (fh->kthread) {
+ kthread_stop(fh->kthread);
+ fh->kthread = NULL;
+ }
+}
+
+static int solo_buf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct solo_filehandle *fh = vq->priv_data;
+ struct solo6010_dev *solo_dev = fh->solo_dev;
+
+ *size = solo_image_size(solo_dev);
+
+ if (*count < MIN_VID_BUFFERS)
+ *count = MIN_VID_BUFFERS;
+
+ return 0;
+}
+
+static int solo_buf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb, enum v4l2_field field)
+{
+ struct solo_filehandle *fh = vq->priv_data;
+ struct solo6010_dev *solo_dev = fh->solo_dev;
+
+ vb->size = solo_image_size(solo_dev);
+ if (vb->baddr != 0 && vb->bsize < vb->size)
+ return -EINVAL;
+
+ /* XXX: These properties only change when queue is idle */
+ vb->width = solo_dev->video_hsize;
+ vb->height = solo_vlines(solo_dev);
+ vb->bytesperline = solo_bytesperline(solo_dev);
+ vb->field = field;
+
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ int rc = videobuf_iolock(vq, vb, NULL);
+ if (rc < 0) {
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ return rc;
+ }
+ }
+ vb->state = VIDEOBUF_PREPARED;
+
+ return 0;
+}
+
+static void solo_buf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct solo_filehandle *fh = vq->priv_data;
+ struct solo6010_dev *solo_dev = fh->solo_dev;
+
+ vb->state = VIDEOBUF_QUEUED;
+ list_add_tail(&vb->queue, &fh->vidq_active);
+ wake_up_interruptible(&solo_dev->disp_thread_wait);
+}
+
+static void solo_buf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static struct videobuf_queue_ops solo_video_qops = {
+ .buf_setup = solo_buf_setup,
+ .buf_prepare = solo_buf_prepare,
+ .buf_queue = solo_buf_queue,
+ .buf_release = solo_buf_release,
+};
+
+static unsigned int solo_v4l2_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct solo_filehandle *fh = file->private_data;
+
+ return videobuf_poll_stream(file, &fh->vidq, wait);
+}
+
+static int solo_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct solo_filehandle *fh = file->private_data;
+
+ return videobuf_mmap_mapper(&fh->vidq, vma);
+}
+
+static int solo_v4l2_open(struct file *file)
+{
+ struct solo6010_dev *solo_dev = video_drvdata(file);
+ struct solo_filehandle *fh;
+ int ret;
+
+ if ((fh = kzalloc(sizeof(*fh), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&fh->slock);
+ INIT_LIST_HEAD(&fh->vidq_active);
+ fh->solo_dev = solo_dev;
+ file->private_data = fh;
+
+ if ((ret = solo_start_thread(fh))) {
+ kfree(fh);
+ return ret;
+ }
+
+ videobuf_queue_dma_contig_init(&fh->vidq, &solo_video_qops,
+ &solo_dev->pdev->dev, &fh->slock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ SOLO_DISP_PIX_FIELD,
+ sizeof(struct videobuf_buffer), fh);
+
+ return 0;
+}
+
+static ssize_t solo_v4l2_read(struct file *file, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct solo_filehandle *fh = file->private_data;
+
+ return videobuf_read_stream(&fh->vidq, data, count, ppos, 0,
+ file->f_flags & O_NONBLOCK);
+}
+
+static int solo_v4l2_release(struct file *file)
+{
+ struct solo_filehandle *fh = file->private_data;
+
+ videobuf_stop(&fh->vidq);
+ videobuf_mmap_free(&fh->vidq);
+ solo_stop_thread(fh);
+ kfree(fh);
+
+ return 0;
+}
+
+static int solo_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct solo_filehandle *fh = priv;
+ struct solo6010_dev *solo_dev = fh->solo_dev;
+
+ strcpy(cap->driver, SOLO6010_NAME);
+ strcpy(cap->card, "Softlogic 6010");
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI %s",
+ pci_name(solo_dev->pdev));
+ cap->version = SOLO6010_VER_NUM;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
+ return 0;
+}
+
+static int solo_enum_ext_input(struct solo6010_dev *solo_dev,
+ struct v4l2_input *input)
+{
+ static const char *dispnames_1[] = { "4UP" };
+ static const char *dispnames_2[] = { "4UP-1", "4UP-2" };
+ static const char *dispnames_5[] = {
+ "4UP-1", "4UP-2", "4UP-3", "4UP-4", "16UP"
+ };
+ const char **dispnames;
+
+ if (input->index >= (solo_dev->nr_chans + solo_dev->nr_ext))
+ return -EINVAL;
+
+ if (solo_dev->nr_ext == 5)
+ dispnames = dispnames_5;
+ else if (solo_dev->nr_ext == 2)
+ dispnames = dispnames_2;
+ else
+ dispnames = dispnames_1;
+
+ snprintf(input->name, sizeof(input->name), "Multi %s",
+ dispnames[input->index - solo_dev->nr_chans]);
+
+ return 0;
+}
+
+static int solo_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+ struct solo_filehandle *fh = priv;
+ struct solo6010_dev *solo_dev = fh->solo_dev;
+
+ if (input->index >= solo_dev->nr_chans) {
+ int ret = solo_enum_ext_input(solo_dev, input);
+ if (ret < 0)
+ return ret;
+ } else {
+ snprintf(input->name, sizeof(input->name), "Camera %d",
+ input->index + 1);
+
+ /* We can only check this for normal inputs */
+ if (!tw28_get_video_status(solo_dev, input->index))
+ input->status = V4L2_IN_ST_NO_SIGNAL;
+ }
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+
+ if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC)
+ input->std = V4L2_STD_NTSC_M;
+ else
+ input->std = V4L2_STD_PAL_M;
+
+ return 0;
+}
+
+static int solo_set_input(struct file *file, void *priv, unsigned int index)
+{
+ struct solo_filehandle *fh = priv;
+
+ return solo_v4l2_set_ch(fh->solo_dev, index);
+}
+
+static int solo_get_input(struct file *file, void *priv, unsigned int *index)
+{
+ struct solo_filehandle *fh = priv;
+
+ *index = fh->solo_dev->cur_disp_ch;
+
+ return 0;
+}
+
+static int solo_enum_fmt_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_PIX_FMT_UYVY;
+ strlcpy(f->description, "UYUV 4:2:2 Packed", sizeof(f->description));
+
+ return 0;
+}
+
+static int solo_try_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct solo_filehandle *fh = priv;
+ struct solo6010_dev *solo_dev = fh->solo_dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int image_size = solo_image_size(solo_dev);
+
+ /* Check supported sizes */
+ if (pix->width != solo_dev->video_hsize)
+ pix->width = solo_dev->video_hsize;
+ if (pix->height != solo_vlines(solo_dev))
+ pix->height = solo_vlines(solo_dev);
+ if (pix->sizeimage != image_size)
+ pix->sizeimage = image_size;
+
+ /* Check formats */
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = SOLO_DISP_PIX_FIELD;
+
+ if (pix->pixelformat != V4L2_PIX_FMT_UYVY ||
+ pix->field != SOLO_DISP_PIX_FIELD ||
+ pix->colorspace != V4L2_COLORSPACE_SMPTE170M)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int solo_set_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct solo_filehandle *fh = priv;
+
+ if (videobuf_queue_is_busy(&fh->vidq))
+ return -EBUSY;
+
+ /* For right now, if it doesn't match our running config,
+ * then fail */
+ return solo_try_fmt_cap(file, priv, f);
+}
+
+static int solo_get_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct solo_filehandle *fh = priv;
+ struct solo6010_dev *solo_dev = fh->solo_dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+
+ pix->width = solo_dev->video_hsize;
+ pix->height = solo_vlines(solo_dev);
+ pix->pixelformat = V4L2_PIX_FMT_UYVY;
+ pix->field = SOLO_DISP_PIX_FIELD;
+ pix->sizeimage = solo_image_size(solo_dev);
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ pix->bytesperline = solo_bytesperline(solo_dev);
+
+ return 0;
+}
+
+static int solo_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *req)
+{
+ struct solo_filehandle *fh = priv;
+
+ return videobuf_reqbufs(&fh->vidq, req);
+}
+
+static int solo_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct solo_filehandle *fh = priv;
+
+ return videobuf_querybuf(&fh->vidq, buf);
+}
+
+static int solo_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct solo_filehandle *fh = priv;
+
+ return videobuf_qbuf(&fh->vidq, buf);
+}
+
+static int solo_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct solo_filehandle *fh = priv;
+
+ return videobuf_dqbuf(&fh->vidq, buf, file->f_flags & O_NONBLOCK);
+}
+
+static int solo_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct solo_filehandle *fh = priv;
+
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ return videobuf_streamon(&fh->vidq);
+}
+
+static int solo_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct solo_filehandle *fh = priv;
+
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ return videobuf_streamoff(&fh->vidq);
+}
+
+static int solo_s_std(struct file *file, void *priv, v4l2_std_id *i)
+{
+ return 0;
+}
+
+static const u32 solo_motion_ctrls[] = {
+ V4L2_CID_MOTION_TRACE,
+ 0
+};
+
+static const u32 *solo_ctrl_classes[] = {
+ solo_motion_ctrls,
+ NULL
+};
+
+static int solo_disp_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ qc->id = v4l2_ctrl_next(solo_ctrl_classes, qc->id);
+ if (!qc->id)
+ return -EINVAL;
+
+ switch (qc->id) {
+#ifdef PRIVATE_CIDS
+ case V4L2_CID_MOTION_TRACE:
+ qc->type = V4L2_CTRL_TYPE_BOOLEAN;
+ qc->minimum = 0;
+ qc->maximum = qc->step = 1;
+ qc->default_value = 0;
+ strlcpy(qc->name, "Motion Detection Trace", sizeof(qc->name));
+ return 0;
+#else
+ case V4L2_CID_MOTION_TRACE:
+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
+#endif
+ }
+ return -EINVAL;
+}
+
+static int solo_disp_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct solo_filehandle *fh = priv;
+ struct solo6010_dev *solo_dev = fh->solo_dev;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MOTION_TRACE:
+ ctrl->value = solo_reg_read(solo_dev, SOLO_VI_MOTION_BAR)
+ ? 1 : 0;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int solo_disp_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct solo_filehandle *fh = priv;
+ struct solo6010_dev *solo_dev = fh->solo_dev;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MOTION_TRACE:
+ if (ctrl->value) {
+ solo_reg_write(solo_dev, SOLO_VI_MOTION_BORDER,
+ SOLO_VI_MOTION_Y_ADD |
+ SOLO_VI_MOTION_Y_VALUE(0x20) |
+ SOLO_VI_MOTION_CB_VALUE(0x10) |
+ SOLO_VI_MOTION_CR_VALUE(0x10));
+ solo_reg_write(solo_dev, SOLO_VI_MOTION_BAR,
+ SOLO_VI_MOTION_CR_ADD |
+ SOLO_VI_MOTION_Y_VALUE(0x10) |
+ SOLO_VI_MOTION_CB_VALUE(0x80) |
+ SOLO_VI_MOTION_CR_VALUE(0x10));
+ } else {
+ solo_reg_write(solo_dev, SOLO_VI_MOTION_BORDER, 0);
+ solo_reg_write(solo_dev, SOLO_VI_MOTION_BAR, 0);
+ }
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static const struct v4l2_file_operations solo_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .open = solo_v4l2_open,
+ .release = solo_v4l2_release,
+ .read = solo_v4l2_read,
+ .poll = solo_v4l2_poll,
+ .mmap = solo_v4l2_mmap,
+ .ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops solo_v4l2_ioctl_ops = {
+ .vidioc_querycap = solo_querycap,
+ .vidioc_s_std = solo_s_std,
+ /* Input callbacks */
+ .vidioc_enum_input = solo_enum_input,
+ .vidioc_s_input = solo_set_input,
+ .vidioc_g_input = solo_get_input,
+ /* Video capture format callbacks */
+ .vidioc_enum_fmt_vid_cap = solo_enum_fmt_cap,
+ .vidioc_try_fmt_vid_cap = solo_try_fmt_cap,
+ .vidioc_s_fmt_vid_cap = solo_set_fmt_cap,
+ .vidioc_g_fmt_vid_cap = solo_get_fmt_cap,
+ /* Streaming I/O */
+ .vidioc_reqbufs = solo_reqbufs,
+ .vidioc_querybuf = solo_querybuf,
+ .vidioc_qbuf = solo_qbuf,
+ .vidioc_dqbuf = solo_dqbuf,
+ .vidioc_streamon = solo_streamon,
+ .vidioc_streamoff = solo_streamoff,
+ /* Controls */
+ .vidioc_queryctrl = solo_disp_queryctrl,
+ .vidioc_g_ctrl = solo_disp_g_ctrl,
+ .vidioc_s_ctrl = solo_disp_s_ctrl,
+};
+
+static struct video_device solo_v4l2_template = {
+ .name = SOLO6010_NAME,
+ .fops = &solo_v4l2_fops,
+ .ioctl_ops = &solo_v4l2_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+
+ .tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL_M,
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+int solo_v4l2_init(struct solo6010_dev *solo_dev)
+{
+ int ret;
+ int i;
+
+ init_waitqueue_head(&solo_dev->disp_thread_wait);
+
+ solo_dev->vfd = video_device_alloc();
+ if (!solo_dev->vfd)
+ return -ENOMEM;
+
+ *solo_dev->vfd = solo_v4l2_template;
+ solo_dev->vfd->parent = &solo_dev->pdev->dev;
+
+ ret = video_register_device(solo_dev->vfd, VFL_TYPE_GRABBER, video_nr);
+ if (ret < 0) {
+ video_device_release(solo_dev->vfd);
+ solo_dev->vfd = NULL;
+ return ret;
+ }
+
+ video_set_drvdata(solo_dev->vfd, solo_dev);
+
+ snprintf(solo_dev->vfd->name, sizeof(solo_dev->vfd->name), "%s (%i)",
+ SOLO6010_NAME, solo_dev->vfd->num);
+
+ if (video_nr >= 0)
+ video_nr++;
+
+ dev_info(&solo_dev->pdev->dev, "Display as /dev/video%d with "
+ "%d inputs (%d extended)\n", solo_dev->vfd->num,
+ solo_dev->nr_chans, solo_dev->nr_ext);
+
+ /* Cycle all the channels and clear */
+ for (i = 0; i < solo_dev->nr_chans; i++) {
+ solo_v4l2_set_ch(solo_dev, i);
+ while (erase_off(solo_dev))
+ ;// Do nothing
+ }
+
+ /* Set the default display channel */
+ solo_v4l2_set_ch(solo_dev, 0);
+ while (erase_off(solo_dev))
+ ;// Do nothing
+
+ solo6010_irq_on(solo_dev, SOLO_IRQ_VIDEO_IN);
+
+ return 0;
+}
+
+void solo_v4l2_exit(struct solo6010_dev *solo_dev)
+{
+ solo6010_irq_off(solo_dev, SOLO_IRQ_VIDEO_IN);
+ if (solo_dev->vfd) {
+ video_unregister_device(solo_dev->vfd);
+ solo_dev->vfd = NULL;
+ }
+}
diff --git a/drivers/staging/solo6x10/solo6010.h b/drivers/staging/solo6x10/solo6010.h
new file mode 100644
index 00000000000..dca8e3e1545
--- /dev/null
+++ b/drivers/staging/solo6x10/solo6010.h
@@ -0,0 +1,317 @@
+/*
+ * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
+ * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __SOLO6010_H
+#define __SOLO6010_H
+
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/semaphore.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+
+#include <linux/videodev2.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf-core.h>
+
+#include "solo6010-registers.h"
+
+#ifndef PCI_VENDOR_ID_SOFTLOGIC
+#define PCI_VENDOR_ID_SOFTLOGIC 0x9413
+#define PCI_DEVICE_ID_SOLO6010 0x6010
+#endif
+
+#ifndef PCI_VENDOR_ID_BLUECHERRY
+#define PCI_VENDOR_ID_BLUECHERRY 0x1BB3
+/* Neugent Softlogic 6010 based cards */
+#define PCI_DEVICE_ID_NEUSOLO_4 0x4304
+#define PCI_DEVICE_ID_NEUSOLO_9 0x4309
+#define PCI_DEVICE_ID_NEUSOLO_16 0x4310
+/* Commell Softlogic 6010 based cards */
+#define PCI_DEVICE_ID_COMMSOLO_4 0x4E04
+#define PCI_DEVICE_ID_COMMSOLO_9 0x4E09
+#define PCI_DEVICE_ID_COMMSOLO_16 0x4E10
+#endif /* Bluecherry */
+
+#define SOLO6010_NAME "solo6010"
+
+#define SOLO_MAX_CHANNELS 16
+
+/* Make sure these two match */
+#define SOLO6010_VERSION "2.0.0"
+#define SOLO6010_VER_MAJOR 2
+#define SOLO6010_VER_MINOR 0
+#define SOLO6010_VER_SUB 0
+#define SOLO6010_VER_NUM \
+ KERNEL_VERSION(SOLO6010_VER_MAJOR, SOLO6010_VER_MINOR, SOLO6010_VER_SUB)
+
+/*
+ * The SOLO6010 actually has 8 i2c channels, but we only use 2.
+ * 0 - Techwell chip(s)
+ * 1 - SAA7128
+ */
+#define SOLO_I2C_ADAPTERS 2
+#define SOLO_I2C_TW 0
+#define SOLO_I2C_SAA 1
+
+/* DMA Engine setup */
+#define SOLO_NR_P2M 4
+#define SOLO_NR_P2M_DESC 256
+#define SOLO_P2M_DESC_SIZE (SOLO_NR_P2M_DESC * 16)
+/* MPEG and JPEG share the same interrupt and locks so they must be together
+ * in the same dma channel. */
+#define SOLO_P2M_DMA_ID_MP4E 0
+#define SOLO_P2M_DMA_ID_JPEG 0
+#define SOLO_P2M_DMA_ID_MP4D 1
+#define SOLO_P2M_DMA_ID_G723D 1
+#define SOLO_P2M_DMA_ID_DISP 2
+#define SOLO_P2M_DMA_ID_OSG 2
+#define SOLO_P2M_DMA_ID_G723E 3
+#define SOLO_P2M_DMA_ID_VIN 3
+
+/* Encoder standard modes */
+#define SOLO_ENC_MODE_CIF 2
+#define SOLO_ENC_MODE_HD1 1
+#define SOLO_ENC_MODE_D1 9
+
+#define SOLO_DEFAULT_GOP 30
+#define SOLO_DEFAULT_QP 3
+
+/* There is 8MB memory available for solo to buffer MPEG4 frames.
+ * This gives us 512 * 16kbyte queues. */
+#define SOLO_NR_RING_BUFS 512
+
+#define SOLO_CLOCK_MHZ 108
+
+#ifndef V4L2_BUF_FLAG_MOTION_ON
+#define V4L2_BUF_FLAG_MOTION_ON 0x0400
+#define V4L2_BUF_FLAG_MOTION_DETECTED 0x0800
+#endif
+#ifndef V4L2_CID_MOTION_ENABLE
+#define PRIVATE_CIDS
+#define V4L2_CID_MOTION_ENABLE (V4L2_CID_PRIVATE_BASE+0)
+#define V4L2_CID_MOTION_THRESHOLD (V4L2_CID_PRIVATE_BASE+1)
+#define V4L2_CID_MOTION_TRACE (V4L2_CID_PRIVATE_BASE+2)
+#endif
+
+enum SOLO_I2C_STATE {
+ IIC_STATE_IDLE,
+ IIC_STATE_START,
+ IIC_STATE_READ,
+ IIC_STATE_WRITE,
+ IIC_STATE_STOP
+};
+
+struct solo_p2m_dev {
+ struct semaphore sem;
+ struct completion completion;
+ int error;
+ u8 desc[SOLO_P2M_DESC_SIZE];
+};
+
+#define OSD_TEXT_MAX 30
+
+enum solo_enc_types {
+ SOLO_ENC_TYPE_STD,
+ SOLO_ENC_TYPE_EXT,
+};
+
+struct solo_enc_dev {
+ struct solo6010_dev *solo_dev;
+ /* V4L2 Items */
+ struct video_device *vfd;
+ /* General accounting */
+ wait_queue_head_t thread_wait;
+ spinlock_t lock;
+ atomic_t readers;
+ u8 ch;
+ u8 mode, gop, qp, interlaced, interval;
+ u8 reset_gop;
+ u8 bw_weight;
+ u8 motion_detected;
+ u16 motion_thresh;
+ u16 width;
+ u16 height;
+ char osd_text[OSD_TEXT_MAX + 1];
+};
+
+struct solo_enc_buf {
+ u8 vop;
+ u8 ch;
+ enum solo_enc_types type;
+ u32 off;
+ u32 size;
+ u32 jpeg_off;
+ u32 jpeg_size;
+ struct timeval ts;
+};
+
+/* The SOLO6010 PCI Device */
+struct solo6010_dev {
+ /* General stuff */
+ struct pci_dev *pdev;
+ u8 __iomem *reg_base;
+ int nr_chans;
+ int nr_ext;
+ u32 irq_mask;
+ u32 motion_mask;
+ spinlock_t reg_io_lock;
+
+ /* tw28xx accounting */
+ u8 tw2865, tw2864, tw2815;
+ u8 tw28_cnt;
+
+ /* i2c related items */
+ struct i2c_adapter i2c_adap[SOLO_I2C_ADAPTERS];
+ enum SOLO_I2C_STATE i2c_state;
+ struct semaphore i2c_sem;
+ int i2c_id;
+ wait_queue_head_t i2c_wait;
+ struct i2c_msg *i2c_msg;
+ unsigned int i2c_msg_num;
+ unsigned int i2c_msg_ptr;
+
+ /* P2M DMA Engine */
+ struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
+
+ /* V4L2 Display items */
+ struct video_device *vfd;
+ unsigned int erasing;
+ unsigned int frame_blank;
+ u8 cur_disp_ch;
+ wait_queue_head_t disp_thread_wait;
+
+ /* V4L2 Encoder items */
+ struct solo_enc_dev *v4l2_enc[SOLO_MAX_CHANNELS];
+ u16 enc_bw_remain;
+ /* IDX into hw mp4 encoder */
+ u8 enc_idx;
+ /* Our software ring of enc buf references */
+ u16 enc_wr_idx;
+ struct solo_enc_buf enc_buf[SOLO_NR_RING_BUFS];
+
+ /* Current video settings */
+ u32 video_type;
+ u16 video_hsize, video_vsize;
+ u16 vout_hstart, vout_vstart;
+ u16 vin_hstart, vin_vstart;
+ u8 fps;
+
+ /* Audio components */
+ struct snd_card *snd_card;
+ struct snd_pcm *snd_pcm;
+ atomic_t snd_users;
+ int g723_hw_idx;
+};
+
+static inline u32 solo_reg_read(struct solo6010_dev *solo_dev, int reg)
+{
+ unsigned long flags;
+ u32 ret;
+ u16 val;
+
+ spin_lock_irqsave(&solo_dev->reg_io_lock, flags);
+
+ ret = readl(solo_dev->reg_base + reg);
+ rmb();
+ pci_read_config_word(solo_dev->pdev, PCI_STATUS, &val);
+ rmb();
+
+ spin_unlock_irqrestore(&solo_dev->reg_io_lock, flags);
+
+ return ret;
+}
+
+static inline void solo_reg_write(struct solo6010_dev *solo_dev, int reg,
+ u32 data)
+{
+ unsigned long flags;
+ u16 val;
+
+ spin_lock_irqsave(&solo_dev->reg_io_lock, flags);
+
+ writel(data, solo_dev->reg_base + reg);
+ wmb();
+ pci_read_config_word(solo_dev->pdev, PCI_STATUS, &val);
+ rmb();
+
+ spin_unlock_irqrestore(&solo_dev->reg_io_lock, flags);
+}
+
+void solo6010_irq_on(struct solo6010_dev *solo_dev, u32 mask);
+void solo6010_irq_off(struct solo6010_dev *solo_dev, u32 mask);
+
+/* Init/exit routeines for subsystems */
+int solo_disp_init(struct solo6010_dev *solo_dev);
+void solo_disp_exit(struct solo6010_dev *solo_dev);
+
+int solo_gpio_init(struct solo6010_dev *solo_dev);
+void solo_gpio_exit(struct solo6010_dev *solo_dev);
+
+int solo_i2c_init(struct solo6010_dev *solo_dev);
+void solo_i2c_exit(struct solo6010_dev *solo_dev);
+
+int solo_p2m_init(struct solo6010_dev *solo_dev);
+void solo_p2m_exit(struct solo6010_dev *solo_dev);
+
+int solo_v4l2_init(struct solo6010_dev *solo_dev);
+void solo_v4l2_exit(struct solo6010_dev *solo_dev);
+
+int solo_enc_init(struct solo6010_dev *solo_dev);
+void solo_enc_exit(struct solo6010_dev *solo_dev);
+
+int solo_enc_v4l2_init(struct solo6010_dev *solo_dev);
+void solo_enc_v4l2_exit(struct solo6010_dev *solo_dev);
+
+int solo_g723_init(struct solo6010_dev *solo_dev);
+void solo_g723_exit(struct solo6010_dev *solo_dev);
+
+/* ISR's */
+int solo_i2c_isr(struct solo6010_dev *solo_dev);
+void solo_p2m_isr(struct solo6010_dev *solo_dev, int id);
+void solo_p2m_error_isr(struct solo6010_dev *solo_dev, u32 status);
+void solo_enc_v4l2_isr(struct solo6010_dev *solo_dev);
+void solo_g723_isr(struct solo6010_dev *solo_dev);
+void solo_motion_isr(struct solo6010_dev *solo_dev);
+void solo_video_in_isr(struct solo6010_dev *solo_dev);
+
+/* i2c read/write */
+u8 solo_i2c_readbyte(struct solo6010_dev *solo_dev, int id, u8 addr, u8 off);
+void solo_i2c_writebyte(struct solo6010_dev *solo_dev, int id, u8 addr, u8 off,
+ u8 data);
+
+/* P2M DMA */
+int solo_p2m_dma_t(struct solo6010_dev *solo_dev, u8 id, int wr,
+ dma_addr_t dma_addr, u32 ext_addr, u32 size);
+int solo_p2m_dma(struct solo6010_dev *solo_dev, u8 id, int wr,
+ void *sys_addr, u32 ext_addr, u32 size);
+
+/* Set the threshold for motion detection */
+void solo_set_motion_threshold(struct solo6010_dev *solo_dev, u8 ch, u16 val);
+#define SOLO_DEF_MOT_THRESH 0x0300
+
+/* Write text on OSD */
+int solo_osd_print(struct solo_enc_dev *solo_enc);
+
+#endif /* __SOLO6010_H */
diff --git a/drivers/staging/spectra/Kconfig b/drivers/staging/spectra/Kconfig
new file mode 100644
index 00000000000..d231ae27299
--- /dev/null
+++ b/drivers/staging/spectra/Kconfig
@@ -0,0 +1,41 @@
+
+menuconfig SPECTRA
+ tristate "Denali Spectra Flash Translation Layer"
+ depends on BLOCK
+ depends on X86_MRST
+ default n
+ ---help---
+ Enable the FTL pseudo-filesystem used with the NAND Flash
+ controller on Intel Moorestown Platform to pretend to be a disk
+
+choice
+ prompt "Compile for"
+ depends on SPECTRA
+ default SPECTRA_MRST_HW
+
+config SPECTRA_MRST_HW
+ bool "Moorestown hardware mode"
+ help
+ Driver communicates with the Moorestown hardware's register interface.
+ in DMA mode.
+
+config SPECTRA_MTD
+ bool "Linux MTD mode"
+ depends on MTD
+ help
+ Driver communicates with the kernel MTD subsystem instead of its own
+ built-in hardware driver.
+
+config SPECTRA_EMU
+ bool "RAM emulator testing"
+ help
+ Driver emulates Flash on a RAM buffer and / or disk file. Useful to test the behavior of FTL layer.
+
+endchoice
+
+config SPECTRA_MRST_HW_DMA
+ bool
+ default n
+ depends on SPECTRA_MRST_HW
+ help
+ Use DMA for native hardware interface.
diff --git a/drivers/staging/spectra/Makefile b/drivers/staging/spectra/Makefile
new file mode 100644
index 00000000000..f777dfba05a
--- /dev/null
+++ b/drivers/staging/spectra/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile of Intel Moorestown NAND controller driver
+#
+
+obj-$(CONFIG_SPECTRA) += spectra.o
+spectra-y := ffsport.o flash.o lld.o
+spectra-$(CONFIG_SPECTRA_MRST_HW) += lld_nand.o
+spectra-$(CONFIG_SPECTRA_MRST_HW_DMA) += lld_cdma.o
+spectra-$(CONFIG_SPECTRA_EMU) += lld_emu.o
+spectra-$(CONFIG_SPECTRA_MTD) += lld_mtd.o
+
diff --git a/drivers/staging/spectra/README b/drivers/staging/spectra/README
new file mode 100644
index 00000000000..ecba559b899
--- /dev/null
+++ b/drivers/staging/spectra/README
@@ -0,0 +1,29 @@
+This is a driver for NAND controller of Intel Moorestown platform.
+
+This driver is a standalone linux block device driver, it acts as if it's a normal hard disk.
+It includes three layer:
+ block layer interface - file ffsport.c
+ Flash Translation Layer (FTL) - file flash.c (implement the NAND flash Translation Layer, includs address mapping, garbage collection, wear-leveling and so on)
+ Low level layer - file lld_nand.c/lld_cdma.c/lld_emu.c (which implements actual controller hardware registers access)
+
+This driver can be build as modules or build-in.
+
+Dependency:
+This driver has dependency on IA Firmware of Intel Moorestown platform.
+It need the IA Firmware to create the block table for the first time.
+And to validate this driver code without IA Firmware, you can change the
+macro AUTO_FORMAT_FLASH from 0 to 1 in file spectraswconfig.h. Thus the
+driver will erase the whole nand flash and create a new block table.
+
+TODO:
+ - Enable Command DMA feature support
+ - lower the memory footprint
+ - Remove most of the unnecessary global variables
+ - Change all the upcase variable / functions name to lowercase
+ - Some other misc bugs
+
+Please send patches to:
+ Greg Kroah-Hartman <gregkh@suse.de>
+
+And Cc to: Gao Yunpeng <yunpeng.gao@intel.com>
+
diff --git a/drivers/staging/spectra/ffsdefs.h b/drivers/staging/spectra/ffsdefs.h
new file mode 100644
index 00000000000..a9e9cd233d2
--- /dev/null
+++ b/drivers/staging/spectra/ffsdefs.h
@@ -0,0 +1,58 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _FFSDEFS_
+#define _FFSDEFS_
+
+#define CLEAR 0 /*use this to clear a field instead of "fail"*/
+#define SET 1 /*use this to set a field instead of "pass"*/
+#define FAIL 1 /*failed flag*/
+#define PASS 0 /*success flag*/
+#define ERR -1 /*error flag*/
+
+#define ERASE_CMD 10
+#define WRITE_MAIN_CMD 11
+#define READ_MAIN_CMD 12
+#define WRITE_SPARE_CMD 13
+#define READ_SPARE_CMD 14
+#define WRITE_MAIN_SPARE_CMD 15
+#define READ_MAIN_SPARE_CMD 16
+#define MEMCOPY_CMD 17
+#define DUMMY_CMD 99
+
+#define EVENT_PASS 0x00
+#define EVENT_CORRECTABLE_DATA_ERROR_FIXED 0x01
+#define EVENT_UNCORRECTABLE_DATA_ERROR 0x02
+#define EVENT_TIME_OUT 0x03
+#define EVENT_PROGRAM_FAILURE 0x04
+#define EVENT_ERASE_FAILURE 0x05
+#define EVENT_MEMCOPY_FAILURE 0x06
+#define EVENT_FAIL 0x07
+
+#define EVENT_NONE 0x22
+#define EVENT_DMA_CMD_COMP 0x77
+#define EVENT_ECC_TRANSACTION_DONE 0x88
+#define EVENT_DMA_CMD_FAIL 0x99
+
+#define CMD_PASS 0
+#define CMD_FAIL 1
+#define CMD_ABORT 2
+#define CMD_NOT_DONE 3
+
+#endif /* _FFSDEFS_ */
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
new file mode 100644
index 00000000000..fa21a0fd8e8
--- /dev/null
+++ b/drivers/staging/spectra/ffsport.c
@@ -0,0 +1,831 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "ffsport.h"
+#include "flash.h"
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/log2.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
+
+/**** Helper functions used for Div, Remainder operation on u64 ****/
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_Calc_Used_Bits
+* Inputs: Power of 2 number
+* Outputs: Number of Used Bits
+* 0, if the argument is 0
+* Description: Calculate the number of bits used by a given power of 2 number
+* Number can be upto 32 bit
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_Calc_Used_Bits(u32 n)
+{
+ int tot_bits = 0;
+
+ if (n >= 1 << 16) {
+ n >>= 16;
+ tot_bits += 16;
+ }
+
+ if (n >= 1 << 8) {
+ n >>= 8;
+ tot_bits += 8;
+ }
+
+ if (n >= 1 << 4) {
+ n >>= 4;
+ tot_bits += 4;
+ }
+
+ if (n >= 1 << 2) {
+ n >>= 2;
+ tot_bits += 2;
+ }
+
+ if (n >= 1 << 1)
+ tot_bits += 1;
+
+ return ((n == 0) ? (0) : tot_bits);
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_u64_Div
+* Inputs: Number of u64
+* A power of 2 number as Division
+* Outputs: Quotient of the Divisor operation
+* Description: It divides the address by divisor by using bit shift operation
+* (essentially without explicitely using "/").
+* Divisor is a power of 2 number and Divided is of u64
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u64 GLOB_u64_Div(u64 addr, u32 divisor)
+{
+ return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_u64_Remainder
+* Inputs: Number of u64
+* Divisor Type (1 -PageAddress, 2- BlockAddress)
+* Outputs: Remainder of the Division operation
+* Description: It calculates the remainder of a number (of u64) by
+* divisor(power of 2 number ) by using bit shifting and multiply
+* operation(essentially without explicitely using "/").
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
+{
+ u64 result = 0;
+
+ if (divisor_type == 1) { /* Remainder -- Page */
+ result = (addr >> DeviceInfo.nBitsInPageDataSize);
+ result = result * DeviceInfo.wPageDataSize;
+ } else if (divisor_type == 2) { /* Remainder -- Block */
+ result = (addr >> DeviceInfo.nBitsInBlockDataSize);
+ result = result * DeviceInfo.wBlockDataSize;
+ }
+
+ result = addr - result;
+
+ return result;
+}
+
+#define NUM_DEVICES 1
+#define PARTITIONS 8
+
+#define GLOB_SBD_NAME "nd"
+#define GLOB_SBD_IRQ_NUM (29)
+
+#define GLOB_SBD_IOCTL_GC (0x7701)
+#define GLOB_SBD_IOCTL_WL (0x7702)
+#define GLOB_SBD_IOCTL_FORMAT (0x7703)
+#define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
+#define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
+#define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
+#define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
+#define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
+#define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
+#define GLOB_SBD_IOCTL_READ_DATA (0x770A)
+
+static int reserved_mb = 0;
+module_param(reserved_mb, int, 0);
+MODULE_PARM_DESC(reserved_mb, "Reserved space for OS image, in MiB (default 25 MiB)");
+
+int nand_debug_level;
+module_param(nand_debug_level, int, 0644);
+MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
+
+MODULE_LICENSE("GPL");
+
+struct spectra_nand_dev {
+ struct pci_dev *dev;
+ u64 size;
+ u16 users;
+ spinlock_t qlock;
+ void __iomem *ioaddr; /* Mapped address */
+ struct request_queue *queue;
+ struct task_struct *thread;
+ struct gendisk *gd;
+ u8 *tmp_buf;
+};
+
+
+static int GLOB_SBD_majornum;
+
+static char *GLOB_version = GLOB_VERSION;
+
+static struct spectra_nand_dev nand_device[NUM_DEVICES];
+
+static struct mutex spectra_lock;
+
+static int res_blks_os = 1;
+
+struct spectra_indentfy_dev_tag IdentifyDeviceData;
+
+static int force_flush_cache(void)
+{
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (ERR == GLOB_FTL_Flush_Cache()) {
+ printk(KERN_ERR "Fail to Flush FTL Cache!\n");
+ return -EFAULT;
+ }
+#if CMD_DMA
+ if (glob_ftl_execute_cmds())
+ return -EIO;
+ else
+ return 0;
+#endif
+ return 0;
+}
+
+struct ioctl_rw_page_info {
+ u8 *data;
+ unsigned int page;
+};
+
+static int ioctl_read_page_data(unsigned long arg)
+{
+ u8 *buf;
+ struct ioctl_rw_page_info info;
+ int result = PASS;
+
+ if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
+ return -EFAULT;
+
+ buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
+ if (!buf) {
+ printk(KERN_ERR "ioctl_read_page_data: "
+ "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&spectra_lock);
+ result = GLOB_FTL_Page_Read(buf,
+ (u64)info.page * IdentifyDeviceData.PageDataSize);
+ mutex_unlock(&spectra_lock);
+
+ if (copy_to_user((void __user *)info.data, buf,
+ IdentifyDeviceData.PageDataSize)) {
+ printk(KERN_ERR "ioctl_read_page_data: "
+ "failed to copy user data\n");
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ kfree(buf);
+ return result;
+}
+
+static int ioctl_write_page_data(unsigned long arg)
+{
+ u8 *buf;
+ struct ioctl_rw_page_info info;
+ int result = PASS;
+
+ if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
+ return -EFAULT;
+
+ buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
+ if (!buf) {
+ printk(KERN_ERR "ioctl_write_page_data: "
+ "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(buf, (void __user *)info.data,
+ IdentifyDeviceData.PageDataSize)) {
+ printk(KERN_ERR "ioctl_write_page_data: "
+ "failed to copy user data\n");
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ mutex_lock(&spectra_lock);
+ result = GLOB_FTL_Page_Write(buf,
+ (u64)info.page * IdentifyDeviceData.PageDataSize);
+ mutex_unlock(&spectra_lock);
+
+ kfree(buf);
+ return result;
+}
+
+/* Return how many blocks should be reserved for bad block replacement */
+static int get_res_blk_num_bad_blk(void)
+{
+ return IdentifyDeviceData.wDataBlockNum / 10;
+}
+
+/* Return how many blocks should be reserved for OS image */
+static int get_res_blk_num_os(void)
+{
+ u32 res_blks, blk_size;
+
+ blk_size = IdentifyDeviceData.PageDataSize *
+ IdentifyDeviceData.PagesPerBlock;
+
+ res_blks = (reserved_mb * 1024 * 1024) / blk_size;
+
+ if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
+ res_blks = 1; /* Reserved 1 block for block table */
+
+ return res_blks;
+}
+
+/* Transfer a full request. */
+static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
+{
+ u64 start_addr, addr;
+ u32 logical_start_sect, hd_start_sect;
+ u32 nsect, hd_sects;
+ u32 rsect, tsect = 0;
+ char *buf;
+ u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
+
+ start_addr = (u64)(blk_rq_pos(req)) << 9;
+ /* Add a big enough offset to prevent the OS Image from
+ * being accessed or damaged by file system */
+ start_addr += IdentifyDeviceData.PageDataSize *
+ IdentifyDeviceData.PagesPerBlock *
+ res_blks_os;
+
+ if (req->cmd_type & REQ_FLUSH) {
+ if (force_flush_cache()) /* Fail to flush cache */
+ return -EIO;
+ else
+ return 0;
+ }
+
+ if (req->cmd_type != REQ_TYPE_FS)
+ return -EIO;
+
+ if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
+ printk(KERN_ERR "Spectra error: request over the NAND "
+ "capacity!sector %d, current_nr_sectors %d, "
+ "while capacity is %d\n",
+ (int)blk_rq_pos(req),
+ blk_rq_cur_sectors(req),
+ (int)get_capacity(tr->gd));
+ return -EIO;
+ }
+
+ logical_start_sect = start_addr >> 9;
+ hd_start_sect = logical_start_sect / ratio;
+ rsect = logical_start_sect - hd_start_sect * ratio;
+
+ addr = (u64)hd_start_sect * ratio * 512;
+ buf = req->buffer;
+ nsect = blk_rq_cur_sectors(req);
+
+ if (rsect)
+ tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
+
+ switch (rq_data_dir(req)) {
+ case READ:
+ /* Read the first NAND page */
+ if (rsect) {
+ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
+ printk(KERN_ERR "Error in %s, Line %d\n",
+ __FILE__, __LINE__);
+ return -EIO;
+ }
+ memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
+ addr += IdentifyDeviceData.PageDataSize;
+ buf += tsect << 9;
+ nsect -= tsect;
+ }
+
+ /* Read the other NAND pages */
+ for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
+ if (GLOB_FTL_Page_Read(buf, addr)) {
+ printk(KERN_ERR "Error in %s, Line %d\n",
+ __FILE__, __LINE__);
+ return -EIO;
+ }
+ addr += IdentifyDeviceData.PageDataSize;
+ buf += IdentifyDeviceData.PageDataSize;
+ }
+
+ /* Read the last NAND pages */
+ if (nsect % ratio) {
+ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
+ printk(KERN_ERR "Error in %s, Line %d\n",
+ __FILE__, __LINE__);
+ return -EIO;
+ }
+ memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
+ }
+#if CMD_DMA
+ if (glob_ftl_execute_cmds())
+ return -EIO;
+ else
+ return 0;
+#endif
+ return 0;
+
+ case WRITE:
+ /* Write the first NAND page */
+ if (rsect) {
+ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
+ printk(KERN_ERR "Error in %s, Line %d\n",
+ __FILE__, __LINE__);
+ return -EIO;
+ }
+ memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
+ if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
+ printk(KERN_ERR "Error in %s, Line %d\n",
+ __FILE__, __LINE__);
+ return -EIO;
+ }
+ addr += IdentifyDeviceData.PageDataSize;
+ buf += tsect << 9;
+ nsect -= tsect;
+ }
+
+ /* Write the other NAND pages */
+ for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
+ if (GLOB_FTL_Page_Write(buf, addr)) {
+ printk(KERN_ERR "Error in %s, Line %d\n",
+ __FILE__, __LINE__);
+ return -EIO;
+ }
+ addr += IdentifyDeviceData.PageDataSize;
+ buf += IdentifyDeviceData.PageDataSize;
+ }
+
+ /* Write the last NAND pages */
+ if (nsect % ratio) {
+ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
+ printk(KERN_ERR "Error in %s, Line %d\n",
+ __FILE__, __LINE__);
+ return -EIO;
+ }
+ memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
+ if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
+ printk(KERN_ERR "Error in %s, Line %d\n",
+ __FILE__, __LINE__);
+ return -EIO;
+ }
+ }
+#if CMD_DMA
+ if (glob_ftl_execute_cmds())
+ return -EIO;
+ else
+ return 0;
+#endif
+ return 0;
+
+ default:
+ printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
+ return -EIO;
+ }
+}
+
+/* This function is copied from drivers/mtd/mtd_blkdevs.c */
+static int spectra_trans_thread(void *arg)
+{
+ struct spectra_nand_dev *tr = arg;
+ struct request_queue *rq = tr->queue;
+ struct request *req = NULL;
+
+ /* we might get involved when memory gets low, so use PF_MEMALLOC */
+ current->flags |= PF_MEMALLOC;
+
+ spin_lock_irq(rq->queue_lock);
+ while (!kthread_should_stop()) {
+ int res;
+
+ if (!req) {
+ req = blk_fetch_request(rq);
+ if (!req) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irq(rq->queue_lock);
+ schedule();
+ spin_lock_irq(rq->queue_lock);
+ continue;
+ }
+ }
+
+ spin_unlock_irq(rq->queue_lock);
+
+ mutex_lock(&spectra_lock);
+ res = do_transfer(tr, req);
+ mutex_unlock(&spectra_lock);
+
+ spin_lock_irq(rq->queue_lock);
+
+ if (!__blk_end_request_cur(req, res))
+ req = NULL;
+ }
+
+ if (req)
+ __blk_end_request_all(req, -EIO);
+
+ spin_unlock_irq(rq->queue_lock);
+
+ return 0;
+}
+
+
+/* Request function that "handles clustering". */
+static void GLOB_SBD_request(struct request_queue *rq)
+{
+ struct spectra_nand_dev *pdev = rq->queuedata;
+ wake_up_process(pdev->thread);
+}
+
+static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
+
+{
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+ return 0;
+}
+
+static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
+{
+ int ret;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ mutex_lock(&spectra_lock);
+ ret = force_flush_cache();
+ mutex_unlock(&spectra_lock);
+
+ return 0;
+}
+
+static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ geo->heads = 4;
+ geo->sectors = 16;
+ geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "heads: %d, sectors: %d, cylinders: %d\n",
+ geo->heads, geo->sectors, geo->cylinders);
+
+ return 0;
+}
+
+int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ switch (cmd) {
+ case GLOB_SBD_IOCTL_GC:
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Spectra IOCTL: Garbage Collection "
+ "being performed\n");
+ if (PASS != GLOB_FTL_Garbage_Collection())
+ return -EFAULT;
+ return 0;
+
+ case GLOB_SBD_IOCTL_WL:
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Spectra IOCTL: Static Wear Leveling "
+ "being performed\n");
+ if (PASS != GLOB_FTL_Wear_Leveling())
+ return -EFAULT;
+ return 0;
+
+ case GLOB_SBD_IOCTL_FORMAT:
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
+ "being performed\n");
+ if (PASS != GLOB_FTL_Flash_Format())
+ return -EFAULT;
+ return 0;
+
+ case GLOB_SBD_IOCTL_FLUSH_CACHE:
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
+ "being performed\n");
+ mutex_lock(&spectra_lock);
+ ret = force_flush_cache();
+ mutex_unlock(&spectra_lock);
+ return ret;
+
+ case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
+ "Copy block table\n");
+ if (copy_to_user((void __user *)arg,
+ get_blk_table_start_addr(),
+ get_blk_table_len()))
+ return -EFAULT;
+ return 0;
+
+ case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
+ "Copy wear leveling table\n");
+ if (copy_to_user((void __user *)arg,
+ get_wear_leveling_table_start_addr(),
+ get_wear_leveling_table_len()))
+ return -EFAULT;
+ return 0;
+
+ case GLOB_SBD_IOCTL_GET_NAND_INFO:
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
+ "Get NAND info\n");
+ if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
+ sizeof(IdentifyDeviceData)))
+ return -EFAULT;
+ return 0;
+
+ case GLOB_SBD_IOCTL_WRITE_DATA:
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
+ "Write one page data\n");
+ return ioctl_write_page_data(arg);
+
+ case GLOB_SBD_IOCTL_READ_DATA:
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
+ "Read one page data\n");
+ return ioctl_read_page_data(arg);
+ }
+
+ return -ENOTTY;
+}
+
+int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
+static struct block_device_operations GLOB_SBD_ops = {
+ .owner = THIS_MODULE,
+ .open = GLOB_SBD_open,
+ .release = GLOB_SBD_release,
+ .ioctl = GLOB_SBD_unlocked_ioctl,
+ .getgeo = GLOB_SBD_getgeo,
+};
+
+static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
+{
+ int res_blks;
+ u32 sects;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ memset(dev, 0, sizeof(struct spectra_nand_dev));
+
+ nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
+ "for OS image, %d blocks for bad block replacement.\n",
+ get_res_blk_num_os(),
+ get_res_blk_num_bad_blk());
+
+ res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
+
+ dev->size = (u64)IdentifyDeviceData.PageDataSize *
+ IdentifyDeviceData.PagesPerBlock *
+ (IdentifyDeviceData.wDataBlockNum - res_blks);
+
+ res_blks_os = get_res_blk_num_os();
+
+ spin_lock_init(&dev->qlock);
+
+ dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
+ if (!dev->tmp_buf) {
+ printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
+ __FILE__, __LINE__);
+ goto out_vfree;
+ }
+
+ dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
+ if (dev->queue == NULL) {
+ printk(KERN_ERR
+ "Spectra: Request queue could not be initialized."
+ " Aborting\n ");
+ goto out_vfree;
+ }
+ dev->queue->queuedata = dev;
+
+ /* As Linux block layer doens't support >4KB hardware sector, */
+ /* Here we force report 512 byte hardware sector size to Kernel */
+ blk_queue_logical_block_size(dev->queue, 512);
+
+ blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH);
+
+ dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
+ if (IS_ERR(dev->thread)) {
+ blk_cleanup_queue(dev->queue);
+ unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
+ return PTR_ERR(dev->thread);
+ }
+
+ dev->gd = alloc_disk(PARTITIONS);
+ if (!dev->gd) {
+ printk(KERN_ERR
+ "Spectra: Could not allocate disk. Aborting \n ");
+ goto out_vfree;
+ }
+ dev->gd->major = GLOB_SBD_majornum;
+ dev->gd->first_minor = which * PARTITIONS;
+ dev->gd->fops = &GLOB_SBD_ops;
+ dev->gd->queue = dev->queue;
+ dev->gd->private_data = dev;
+ snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
+
+ sects = dev->size >> 9;
+ nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
+ set_capacity(dev->gd, sects);
+
+ add_disk(dev->gd);
+
+ return 0;
+out_vfree:
+ return -ENOMEM;
+}
+
+/*
+static ssize_t show_nand_block_num(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ (int)IdentifyDeviceData.wDataBlockNum);
+}
+
+static ssize_t show_nand_pages_per_block(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ (int)IdentifyDeviceData.PagesPerBlock);
+}
+
+static ssize_t show_nand_page_size(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ (int)IdentifyDeviceData.PageDataSize);
+}
+
+static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
+static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
+static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
+
+static void create_sysfs_entry(struct device *dev)
+{
+ if (device_create_file(dev, &dev_attr_nand_block_num))
+ printk(KERN_ERR "Spectra: "
+ "failed to create sysfs entry nand_block_num.\n");
+ if (device_create_file(dev, &dev_attr_nand_pages_per_block))
+ printk(KERN_ERR "Spectra: "
+ "failed to create sysfs entry nand_pages_per_block.\n");
+ if (device_create_file(dev, &dev_attr_nand_page_size))
+ printk(KERN_ERR "Spectra: "
+ "failed to create sysfs entry nand_page_size.\n");
+}
+*/
+
+static int GLOB_SBD_init(void)
+{
+ int i;
+
+ /* Set debug output level (0~3) here. 3 is most verbose */
+ printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
+
+ mutex_init(&spectra_lock);
+
+ GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
+ if (GLOB_SBD_majornum <= 0) {
+ printk(KERN_ERR "Unable to get the major %d for Spectra",
+ GLOB_SBD_majornum);
+ return -EBUSY;
+ }
+
+ if (PASS != GLOB_FTL_Flash_Init()) {
+ printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
+ "Aborting\n");
+ goto out_flash_register;
+ }
+
+ /* create_sysfs_entry(&dev->dev); */
+
+ if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
+ printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
+ "Aborting\n");
+ goto out_flash_register;
+ } else {
+ nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
+ "Num blocks=%d, pagesperblock=%d, "
+ "pagedatasize=%d, ECCBytesPerSector=%d\n",
+ (int)IdentifyDeviceData.NumBlocks,
+ (int)IdentifyDeviceData.PagesPerBlock,
+ (int)IdentifyDeviceData.PageDataSize,
+ (int)IdentifyDeviceData.wECCBytesPerSector);
+ }
+
+ printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
+ if (GLOB_FTL_Init() != PASS) {
+ printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
+ "Aborting\n");
+ goto out_ftl_flash_register;
+ }
+ printk(KERN_ALERT "Spectra: block table has been found.\n");
+
+ for (i = 0; i < NUM_DEVICES; i++)
+ if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
+ goto out_ftl_flash_register;
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Spectra: module loaded with major number %d\n",
+ GLOB_SBD_majornum);
+
+ return 0;
+
+out_ftl_flash_register:
+ GLOB_FTL_Cache_Release();
+out_flash_register:
+ GLOB_FTL_Flash_Release();
+ unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
+ printk(KERN_ERR "Spectra: Module load failed.\n");
+
+ return -ENOMEM;
+}
+
+static void __exit GLOB_SBD_exit(void)
+{
+ int i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0; i < NUM_DEVICES; i++) {
+ struct spectra_nand_dev *dev = &nand_device[i];
+ if (dev->gd) {
+ del_gendisk(dev->gd);
+ put_disk(dev->gd);
+ }
+ if (dev->queue)
+ blk_cleanup_queue(dev->queue);
+ kfree(dev->tmp_buf);
+ }
+
+ unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
+
+ mutex_lock(&spectra_lock);
+ force_flush_cache();
+ mutex_unlock(&spectra_lock);
+
+ GLOB_FTL_Cache_Release();
+
+ GLOB_FTL_Flash_Release();
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Spectra FTL module (major number %d) unloaded.\n",
+ GLOB_SBD_majornum);
+}
+
+module_init(GLOB_SBD_init);
+module_exit(GLOB_SBD_exit);
diff --git a/drivers/staging/spectra/ffsport.h b/drivers/staging/spectra/ffsport.h
new file mode 100644
index 00000000000..6c5d90c5343
--- /dev/null
+++ b/drivers/staging/spectra/ffsport.h
@@ -0,0 +1,84 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _FFSPORT_
+#define _FFSPORT_
+
+#include "ffsdefs.h"
+
+#if defined __GNUC__
+#define PACKED
+#define PACKED_GNU __attribute__ ((packed))
+#define UNALIGNED
+#endif
+
+#include <linux/semaphore.h>
+#include <linux/string.h> /* for strcpy(), stricmp(), etc */
+#include <linux/mm.h> /* for kmalloc(), kfree() */
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h> /* printk() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/pci.h>
+#include "flash.h"
+
+#define VERBOSE 1
+
+#define NAND_DBG_WARN 1
+#define NAND_DBG_DEBUG 2
+#define NAND_DBG_TRACE 3
+
+extern int nand_debug_level;
+
+#ifdef VERBOSE
+#define nand_dbg_print(level, args...) \
+ do { \
+ if (level <= nand_debug_level) \
+ printk(KERN_ALERT args); \
+ } while (0)
+#else
+#define nand_dbg_print(level, args...)
+#endif
+
+#ifdef SUPPORT_BIG_ENDIAN
+#define INVERTUINT16(w) ((u16)(((u16)(w)) << 8) | \
+ (u16)((u16)(w) >> 8))
+
+#define INVERTUINT32(dw) (((u32)(dw) << 24) | \
+ (((u32)(dw) << 8) & 0x00ff0000) | \
+ (((u32)(dw) >> 8) & 0x0000ff00) | \
+ ((u32)(dw) >> 24))
+#else
+#define INVERTUINT16(w) w
+#define INVERTUINT32(dw) dw
+#endif
+
+extern int GLOB_Calc_Used_Bits(u32 n);
+extern u64 GLOB_u64_Div(u64 addr, u32 divisor);
+extern u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type);
+
+#endif /* _FFSPORT_ */
diff --git a/drivers/staging/spectra/flash.c b/drivers/staging/spectra/flash.c
new file mode 100644
index 00000000000..9b5218b6ada
--- /dev/null
+++ b/drivers/staging/spectra/flash.c
@@ -0,0 +1,4315 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+
+#include "flash.h"
+#include "ffsdefs.h"
+#include "lld.h"
+#include "lld_nand.h"
+#if CMD_DMA
+#include "lld_cdma.h"
+#endif
+
+#define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
+#define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
+ DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
+
+#define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
+ BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
+
+#define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
+
+#define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
+ BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
+
+#define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
+
+#if DEBUG_BNDRY
+void debug_boundary_lineno_error(int chnl, int limit, int no,
+ int lineno, char *filename)
+{
+ if (chnl >= limit)
+ printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
+ "at %s:%d. Other info:%d. Aborting...\n",
+ chnl, limit, filename, lineno, no);
+}
+/* static int globalmemsize; */
+#endif
+
+static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
+static int FTL_Cache_Read(u64 dwPageAddr);
+static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
+ u16 cache_blk);
+static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
+ u8 cache_blk, u16 flag);
+static int FTL_Cache_Write(void);
+static void FTL_Calculate_LRU(void);
+static u32 FTL_Get_Block_Index(u32 wBlockNum);
+
+static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
+ u8 BT_Tag, u16 *Page);
+static int FTL_Read_Block_Table(void);
+static int FTL_Write_Block_Table(int wForce);
+static int FTL_Write_Block_Table_Data(void);
+static int FTL_Check_Block_Table(int wOldTable);
+static int FTL_Static_Wear_Leveling(void);
+static u32 FTL_Replace_Block_Table(void);
+static int FTL_Write_IN_Progress_Block_Table_Page(void);
+
+static u32 FTL_Get_Page_Num(u64 length);
+static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
+
+static u32 FTL_Replace_OneBlock(u32 wBlockNum,
+ u32 wReplaceNum);
+static u32 FTL_Replace_LWBlock(u32 wBlockNum,
+ int *pGarbageCollect);
+static u32 FTL_Replace_MWBlock(void);
+static int FTL_Replace_Block(u64 blk_addr);
+static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
+
+struct device_info_tag DeviceInfo;
+struct flash_cache_tag Cache;
+static struct spectra_l2_cache_info cache_l2;
+
+static u8 *cache_l2_page_buf;
+static u8 *cache_l2_blk_buf;
+
+u8 *g_pBlockTable;
+u8 *g_pWearCounter;
+u16 *g_pReadCounter;
+u32 *g_pBTBlocks;
+static u16 g_wBlockTableOffset;
+static u32 g_wBlockTableIndex;
+static u8 g_cBlockTableStatus;
+
+static u8 *g_pTempBuf;
+static u8 *flag_check_blk_table;
+static u8 *tmp_buf_search_bt_in_block;
+static u8 *spare_buf_search_bt_in_block;
+static u8 *spare_buf_bt_search_bt_in_block;
+static u8 *tmp_buf1_read_blk_table;
+static u8 *tmp_buf2_read_blk_table;
+static u8 *flags_static_wear_leveling;
+static u8 *tmp_buf_write_blk_table_data;
+static u8 *tmp_buf_read_disturbance;
+
+u8 *buf_read_page_main_spare;
+u8 *buf_write_page_main_spare;
+u8 *buf_read_page_spare;
+u8 *buf_get_bad_block;
+
+#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
+struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
+struct flash_cache_tag cache_start_copy;
+#endif
+
+int g_wNumFreeBlocks;
+u8 g_SBDCmdIndex;
+
+static u8 *g_pIPF;
+static u8 bt_flag = FIRST_BT_ID;
+static u8 bt_block_changed;
+
+static u16 cache_block_to_write;
+static u8 last_erased = FIRST_BT_ID;
+
+static u8 GC_Called;
+static u8 BT_GC_Called;
+
+#if CMD_DMA
+#define COPY_BACK_BUF_NUM 10
+
+static u8 ftl_cmd_cnt; /* Init value is 0 */
+u8 *g_pBTDelta;
+u8 *g_pBTDelta_Free;
+u8 *g_pBTStartingCopy;
+u8 *g_pWearCounterCopy;
+u16 *g_pReadCounterCopy;
+u8 *g_pBlockTableCopies;
+u8 *g_pNextBlockTable;
+static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
+static int cp_back_buf_idx;
+
+static u8 *g_temp_buf;
+
+#pragma pack(push, 1)
+#pragma pack(1)
+struct BTableChangesDelta {
+ u8 ftl_cmd_cnt;
+ u8 ValidFields;
+ u16 g_wBlockTableOffset;
+ u32 g_wBlockTableIndex;
+ u32 BT_Index;
+ u32 BT_Entry_Value;
+ u32 WC_Index;
+ u8 WC_Entry_Value;
+ u32 RC_Index;
+ u16 RC_Entry_Value;
+};
+
+#pragma pack(pop)
+
+struct BTableChangesDelta *p_BTableChangesDelta;
+#endif
+
+
+#define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
+#define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
+
+#define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
+ sizeof(u32))
+#define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
+ sizeof(u8))
+#define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
+ sizeof(u16))
+#if SUPPORT_LARGE_BLOCKNUM
+#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
+ sizeof(u8) * 3)
+#else
+#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
+ sizeof(u16))
+#endif
+#define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
+ FTL_Get_WearCounter_Table_Mem_Size_Bytes
+#define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
+ FTL_Get_ReadCounter_Table_Mem_Size_Bytes
+
+static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
+{
+ u32 byte_num;
+
+ if (DeviceInfo.MLCDevice) {
+ byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
+ DeviceInfo.wDataBlockNum * sizeof(u8) +
+ DeviceInfo.wDataBlockNum * sizeof(u16);
+ } else {
+ byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
+ DeviceInfo.wDataBlockNum * sizeof(u8);
+ }
+
+ byte_num += 4 * sizeof(u8);
+
+ return byte_num;
+}
+
+static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
+{
+ return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
+}
+
+static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
+ u32 sizeTxed)
+{
+ u32 wBytesCopied, blk_tbl_size, wBytes;
+ u32 *pbt = (u32 *)g_pBlockTable;
+
+ blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
+ for (wBytes = 0;
+ (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
+ wBytes++) {
+#if SUPPORT_LARGE_BLOCKNUM
+ flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
+ >> (((wBytes + sizeTxed) % 3) ?
+ ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
+#else
+ flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
+ >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
+#endif
+ }
+
+ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
+ blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
+ wBytesCopied = wBytes;
+ wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
+ (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
+ memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
+
+ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
+
+ if (DeviceInfo.MLCDevice) {
+ blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
+ wBytesCopied += wBytes;
+ for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
+ ((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
+ flashBuf[wBytes + wBytesCopied] =
+ (g_pReadCounter[(wBytes + sizeTxed) / 2] >>
+ (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
+ }
+
+ return wBytesCopied + wBytes;
+}
+
+static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
+ u32 sizeToTx, u32 sizeTxed)
+{
+ u32 wBytesCopied, blk_tbl_size, wBytes;
+ u32 *pbt = (u32 *)g_pBlockTable;
+
+ blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
+ for (wBytes = 0; (wBytes < sizeToTx) &&
+ ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
+#if SUPPORT_LARGE_BLOCKNUM
+ if (!((wBytes + sizeTxed) % 3))
+ pbt[(wBytes + sizeTxed) / 3] = 0;
+ pbt[(wBytes + sizeTxed) / 3] |=
+ (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
+ ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
+#else
+ if (!((wBytes + sizeTxed) % 2))
+ pbt[(wBytes + sizeTxed) / 2] = 0;
+ pbt[(wBytes + sizeTxed) / 2] |=
+ (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
+ 0 : 8));
+#endif
+ }
+
+ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
+ blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
+ wBytesCopied = wBytes;
+ wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
+ (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
+ memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
+ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
+
+ if (DeviceInfo.MLCDevice) {
+ wBytesCopied += wBytes;
+ blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
+ for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
+ ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
+ if (((wBytes + sizeTxed) % 2))
+ g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
+ g_pReadCounter[(wBytes + sizeTxed) / 2] |=
+ (flashBuf[wBytes] <<
+ (((wBytes + sizeTxed) % 2) ? 0 : 8));
+ }
+ }
+
+ return wBytesCopied+wBytes;
+}
+
+static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
+{
+ int i;
+
+ for (i = 0; i < BTSIG_BYTES; i++)
+ buf[BTSIG_OFFSET + i] =
+ ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
+ (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
+
+ return PASS;
+}
+
+static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
+{
+ static u8 tag[BTSIG_BYTES >> 1];
+ int i, j, k, tagi, tagtemp, status;
+
+ *tagarray = (u8 *)tag;
+ tagi = 0;
+
+ for (i = 0; i < (BTSIG_BYTES - 1); i++) {
+ for (j = i + 1; (j < BTSIG_BYTES) &&
+ (tagi < (BTSIG_BYTES >> 1)); j++) {
+ tagtemp = buf[BTSIG_OFFSET + j] -
+ buf[BTSIG_OFFSET + i];
+ if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
+ tagtemp = (buf[BTSIG_OFFSET + i] +
+ (1 + LAST_BT_ID - FIRST_BT_ID) -
+ (i * BTSIG_DELTA)) %
+ (1 + LAST_BT_ID - FIRST_BT_ID);
+ status = FAIL;
+ for (k = 0; k < tagi; k++) {
+ if (tagtemp == tag[k])
+ status = PASS;
+ }
+
+ if (status == FAIL) {
+ tag[tagi++] = tagtemp;
+ i = (j == (i + 1)) ? i + 1 : i;
+ j = (j == (i + 1)) ? i + 1 : i;
+ }
+ }
+ }
+ }
+
+ return tagi;
+}
+
+
+static int FTL_Execute_SPL_Recovery(void)
+{
+ u32 j, block, blks;
+ u32 *pbt = (u32 *)g_pBlockTable;
+ int ret;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
+ for (j = 0; j <= blks; j++) {
+ block = (pbt[j]);
+ if (((block & BAD_BLOCK) != BAD_BLOCK) &&
+ ((block & SPARE_BLOCK) == SPARE_BLOCK)) {
+ ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
+ if (FAIL == ret) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Program fail in %s, Line %d, "
+ "Function: %s, new Bad Block %d "
+ "generated!\n",
+ __FILE__, __LINE__, __func__,
+ (int)(block & ~BAD_BLOCK));
+ MARK_BLOCK_AS_BAD(pbt[j]);
+ }
+ }
+ }
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_IdentifyDevice
+* Inputs: pointer to identify data structure
+* Outputs: PASS / FAIL
+* Description: the identify data structure is filled in with
+* information for the block driver.
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
+ dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
+ dev_data->PageDataSize = DeviceInfo.wPageDataSize;
+ dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
+ dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
+
+ return PASS;
+}
+
+/* ..... */
+static int allocate_memory(void)
+{
+ u32 block_table_size, page_size, block_size, mem_size;
+ u32 total_bytes = 0;
+ int i;
+#if CMD_DMA
+ int j;
+#endif
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ page_size = DeviceInfo.wPageSize;
+ block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
+
+ block_table_size = DeviceInfo.wDataBlockNum *
+ (sizeof(u32) + sizeof(u8) + sizeof(u16));
+ block_table_size += (DeviceInfo.wPageDataSize -
+ (block_table_size % DeviceInfo.wPageDataSize)) %
+ DeviceInfo.wPageDataSize;
+
+ /* Malloc memory for block tables */
+ g_pBlockTable = kmalloc(block_table_size, GFP_ATOMIC);
+ if (!g_pBlockTable)
+ goto block_table_fail;
+ memset(g_pBlockTable, 0, block_table_size);
+ total_bytes += block_table_size;
+
+ g_pWearCounter = (u8 *)(g_pBlockTable +
+ DeviceInfo.wDataBlockNum * sizeof(u32));
+
+ if (DeviceInfo.MLCDevice)
+ g_pReadCounter = (u16 *)(g_pBlockTable +
+ DeviceInfo.wDataBlockNum *
+ (sizeof(u32) + sizeof(u8)));
+
+ /* Malloc memory and init for cache items */
+ for (i = 0; i < CACHE_ITEM_NUM; i++) {
+ Cache.array[i].address = NAND_CACHE_INIT_ADDR;
+ Cache.array[i].use_cnt = 0;
+ Cache.array[i].changed = CLEAR;
+ Cache.array[i].buf = kmalloc(Cache.cache_item_size,
+ GFP_ATOMIC);
+ if (!Cache.array[i].buf)
+ goto cache_item_fail;
+ memset(Cache.array[i].buf, 0, Cache.cache_item_size);
+ total_bytes += Cache.cache_item_size;
+ }
+
+ /* Malloc memory for IPF */
+ g_pIPF = kmalloc(page_size, GFP_ATOMIC);
+ if (!g_pIPF)
+ goto ipf_fail;
+ memset(g_pIPF, 0, page_size);
+ total_bytes += page_size;
+
+ /* Malloc memory for data merging during Level2 Cache flush */
+ cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
+ if (!cache_l2_page_buf)
+ goto cache_l2_page_buf_fail;
+ memset(cache_l2_page_buf, 0xff, page_size);
+ total_bytes += page_size;
+
+ cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
+ if (!cache_l2_blk_buf)
+ goto cache_l2_blk_buf_fail;
+ memset(cache_l2_blk_buf, 0xff, block_size);
+ total_bytes += block_size;
+
+ /* Malloc memory for temp buffer */
+ g_pTempBuf = kmalloc(Cache.cache_item_size, GFP_ATOMIC);
+ if (!g_pTempBuf)
+ goto Temp_buf_fail;
+ memset(g_pTempBuf, 0, Cache.cache_item_size);
+ total_bytes += Cache.cache_item_size;
+
+ /* Malloc memory for block table blocks */
+ mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
+ g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
+ if (!g_pBTBlocks)
+ goto bt_blocks_fail;
+ memset(g_pBTBlocks, 0xff, mem_size);
+ total_bytes += mem_size;
+
+ /* Malloc memory for function FTL_Check_Block_Table */
+ flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
+ if (!flag_check_blk_table)
+ goto flag_check_blk_table_fail;
+ total_bytes += DeviceInfo.wDataBlockNum;
+
+ /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
+ tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
+ if (!tmp_buf_search_bt_in_block)
+ goto tmp_buf_search_bt_in_block_fail;
+ memset(tmp_buf_search_bt_in_block, 0xff, page_size);
+ total_bytes += page_size;
+
+ mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
+ spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
+ if (!spare_buf_search_bt_in_block)
+ goto spare_buf_search_bt_in_block_fail;
+ memset(spare_buf_search_bt_in_block, 0xff, mem_size);
+ total_bytes += mem_size;
+
+ spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
+ if (!spare_buf_bt_search_bt_in_block)
+ goto spare_buf_bt_search_bt_in_block_fail;
+ memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
+ total_bytes += mem_size;
+
+ /* Malloc memory for function FTL_Read_Block_Table */
+ tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
+ if (!tmp_buf1_read_blk_table)
+ goto tmp_buf1_read_blk_table_fail;
+ memset(tmp_buf1_read_blk_table, 0xff, page_size);
+ total_bytes += page_size;
+
+ tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
+ if (!tmp_buf2_read_blk_table)
+ goto tmp_buf2_read_blk_table_fail;
+ memset(tmp_buf2_read_blk_table, 0xff, page_size);
+ total_bytes += page_size;
+
+ /* Malloc memory for function FTL_Static_Wear_Leveling */
+ flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
+ GFP_ATOMIC);
+ if (!flags_static_wear_leveling)
+ goto flags_static_wear_leveling_fail;
+ total_bytes += DeviceInfo.wDataBlockNum;
+
+ /* Malloc memory for function FTL_Write_Block_Table_Data */
+ if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
+ mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
+ 2 * DeviceInfo.wPageSize;
+ else
+ mem_size = DeviceInfo.wPageSize;
+ tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
+ if (!tmp_buf_write_blk_table_data)
+ goto tmp_buf_write_blk_table_data_fail;
+ memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
+ total_bytes += mem_size;
+
+ /* Malloc memory for function FTL_Read_Disturbance */
+ tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
+ if (!tmp_buf_read_disturbance)
+ goto tmp_buf_read_disturbance_fail;
+ memset(tmp_buf_read_disturbance, 0xff, block_size);
+ total_bytes += block_size;
+
+ /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
+ buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
+ if (!buf_read_page_main_spare)
+ goto buf_read_page_main_spare_fail;
+ total_bytes += DeviceInfo.wPageSize;
+
+ /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
+ buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
+ if (!buf_write_page_main_spare)
+ goto buf_write_page_main_spare_fail;
+ total_bytes += DeviceInfo.wPageSize;
+
+ /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
+ buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
+ if (!buf_read_page_spare)
+ goto buf_read_page_spare_fail;
+ memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
+ total_bytes += DeviceInfo.wPageSpareSize;
+
+ /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
+ buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
+ if (!buf_get_bad_block)
+ goto buf_get_bad_block_fail;
+ memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
+ total_bytes += DeviceInfo.wPageSpareSize;
+
+#if CMD_DMA
+ g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
+ if (!g_temp_buf)
+ goto temp_buf_fail;
+ memset(g_temp_buf, 0xff, block_size);
+ total_bytes += block_size;
+
+ /* Malloc memory for copy of block table used in CDMA mode */
+ g_pBTStartingCopy = kmalloc(block_table_size, GFP_ATOMIC);
+ if (!g_pBTStartingCopy)
+ goto bt_starting_copy;
+ memset(g_pBTStartingCopy, 0, block_table_size);
+ total_bytes += block_table_size;
+
+ g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
+ DeviceInfo.wDataBlockNum * sizeof(u32));
+
+ if (DeviceInfo.MLCDevice)
+ g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
+ DeviceInfo.wDataBlockNum *
+ (sizeof(u32) + sizeof(u8)));
+
+ /* Malloc memory for block table copies */
+ mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
+ 5 * DeviceInfo.wDataBlockNum * sizeof(u8);
+ if (DeviceInfo.MLCDevice)
+ mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
+ g_pBlockTableCopies = kmalloc(mem_size, GFP_ATOMIC);
+ if (!g_pBlockTableCopies)
+ goto blk_table_copies_fail;
+ memset(g_pBlockTableCopies, 0, mem_size);
+ total_bytes += mem_size;
+ g_pNextBlockTable = g_pBlockTableCopies;
+
+ /* Malloc memory for Block Table Delta */
+ mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
+ g_pBTDelta = kmalloc(mem_size, GFP_ATOMIC);
+ if (!g_pBTDelta)
+ goto bt_delta_fail;
+ memset(g_pBTDelta, 0, mem_size);
+ total_bytes += mem_size;
+ g_pBTDelta_Free = g_pBTDelta;
+
+ /* Malloc memory for Copy Back Buffers */
+ for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
+ cp_back_buf_copies[j] = kmalloc(block_size, GFP_ATOMIC);
+ if (!cp_back_buf_copies[j])
+ goto cp_back_buf_copies_fail;
+ memset(cp_back_buf_copies[j], 0, block_size);
+ total_bytes += block_size;
+ }
+ cp_back_buf_idx = 0;
+
+ /* Malloc memory for pending commands list */
+ mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
+ info.pcmds = kzalloc(mem_size, GFP_KERNEL);
+ if (!info.pcmds)
+ goto pending_cmds_buf_fail;
+ total_bytes += mem_size;
+
+ /* Malloc memory for CDMA descripter table */
+ mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
+ info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
+ if (!info.cdma_desc_buf)
+ goto cdma_desc_buf_fail;
+ total_bytes += mem_size;
+
+ /* Malloc memory for Memcpy descripter table */
+ mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
+ info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
+ if (!info.memcp_desc_buf)
+ goto memcp_desc_buf_fail;
+ total_bytes += mem_size;
+#endif
+
+ nand_dbg_print(NAND_DBG_WARN,
+ "Total memory allocated in FTL layer: %d\n", total_bytes);
+
+ return PASS;
+
+#if CMD_DMA
+memcp_desc_buf_fail:
+ kfree(info.cdma_desc_buf);
+cdma_desc_buf_fail:
+ kfree(info.pcmds);
+pending_cmds_buf_fail:
+cp_back_buf_copies_fail:
+ j--;
+ for (; j >= 0; j--)
+ kfree(cp_back_buf_copies[j]);
+ kfree(g_pBTDelta);
+bt_delta_fail:
+ kfree(g_pBlockTableCopies);
+blk_table_copies_fail:
+ kfree(g_pBTStartingCopy);
+bt_starting_copy:
+ kfree(g_temp_buf);
+temp_buf_fail:
+ kfree(buf_get_bad_block);
+#endif
+
+buf_get_bad_block_fail:
+ kfree(buf_read_page_spare);
+buf_read_page_spare_fail:
+ kfree(buf_write_page_main_spare);
+buf_write_page_main_spare_fail:
+ kfree(buf_read_page_main_spare);
+buf_read_page_main_spare_fail:
+ kfree(tmp_buf_read_disturbance);
+tmp_buf_read_disturbance_fail:
+ kfree(tmp_buf_write_blk_table_data);
+tmp_buf_write_blk_table_data_fail:
+ kfree(flags_static_wear_leveling);
+flags_static_wear_leveling_fail:
+ kfree(tmp_buf2_read_blk_table);
+tmp_buf2_read_blk_table_fail:
+ kfree(tmp_buf1_read_blk_table);
+tmp_buf1_read_blk_table_fail:
+ kfree(spare_buf_bt_search_bt_in_block);
+spare_buf_bt_search_bt_in_block_fail:
+ kfree(spare_buf_search_bt_in_block);
+spare_buf_search_bt_in_block_fail:
+ kfree(tmp_buf_search_bt_in_block);
+tmp_buf_search_bt_in_block_fail:
+ kfree(flag_check_blk_table);
+flag_check_blk_table_fail:
+ kfree(g_pBTBlocks);
+bt_blocks_fail:
+ kfree(g_pTempBuf);
+Temp_buf_fail:
+ kfree(cache_l2_blk_buf);
+cache_l2_blk_buf_fail:
+ kfree(cache_l2_page_buf);
+cache_l2_page_buf_fail:
+ kfree(g_pIPF);
+ipf_fail:
+cache_item_fail:
+ i--;
+ for (; i >= 0; i--)
+ kfree(Cache.array[i].buf);
+ kfree(g_pBlockTable);
+block_table_fail:
+ printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
+ __FILE__, __LINE__);
+
+ return -ENOMEM;
+}
+
+/* .... */
+static int free_memory(void)
+{
+ int i;
+
+#if CMD_DMA
+ kfree(info.memcp_desc_buf);
+ kfree(info.cdma_desc_buf);
+ kfree(info.pcmds);
+ for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
+ kfree(cp_back_buf_copies[i]);
+ kfree(g_pBTDelta);
+ kfree(g_pBlockTableCopies);
+ kfree(g_pBTStartingCopy);
+ kfree(g_temp_buf);
+ kfree(buf_get_bad_block);
+#endif
+ kfree(buf_read_page_spare);
+ kfree(buf_write_page_main_spare);
+ kfree(buf_read_page_main_spare);
+ kfree(tmp_buf_read_disturbance);
+ kfree(tmp_buf_write_blk_table_data);
+ kfree(flags_static_wear_leveling);
+ kfree(tmp_buf2_read_blk_table);
+ kfree(tmp_buf1_read_blk_table);
+ kfree(spare_buf_bt_search_bt_in_block);
+ kfree(spare_buf_search_bt_in_block);
+ kfree(tmp_buf_search_bt_in_block);
+ kfree(flag_check_blk_table);
+ kfree(g_pBTBlocks);
+ kfree(g_pTempBuf);
+ kfree(g_pIPF);
+ for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
+ kfree(Cache.array[i].buf);
+ kfree(g_pBlockTable);
+
+ return 0;
+}
+
+static void dump_cache_l2_table(void)
+{
+ struct list_head *p;
+ struct spectra_l2_cache_list *pnd;
+ int n;
+
+ n = 0;
+ list_for_each(p, &cache_l2.table.list) {
+ pnd = list_entry(p, struct spectra_l2_cache_list, list);
+ nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
+/*
+ for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
+ if (pnd->pages_array[i] != MAX_U32_VALUE)
+ nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
+ }
+*/
+ n++;
+ }
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_Init
+* Inputs: none
+* Outputs: PASS=0 / FAIL=1
+* Description: allocates the memory for cache array,
+* important data structures
+* clears the cache array
+* reads the block table from flash into array
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_FTL_Init(void)
+{
+ int i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ Cache.pages_per_item = 1;
+ Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
+
+ if (allocate_memory() != PASS)
+ return FAIL;
+
+#if CMD_DMA
+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
+ memcpy((void *)&cache_start_copy, (void *)&Cache,
+ sizeof(struct flash_cache_tag));
+ memset((void *)&int_cache, -1,
+ sizeof(struct flash_cache_delta_list_tag) *
+ (MAX_CHANS + MAX_DESCS));
+#endif
+ ftl_cmd_cnt = 0;
+#endif
+
+ if (FTL_Read_Block_Table() != PASS)
+ return FAIL;
+
+ /* Init the Level2 Cache data structure */
+ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
+ cache_l2.blk_array[i] = MAX_U32_VALUE;
+ cache_l2.cur_blk_idx = 0;
+ cache_l2.cur_page_num = 0;
+ INIT_LIST_HEAD(&cache_l2.table.list);
+ cache_l2.table.logical_blk_num = MAX_U32_VALUE;
+
+ dump_cache_l2_table();
+
+ return 0;
+}
+
+
+#if CMD_DMA
+#if 0
+static void save_blk_table_changes(u16 idx)
+{
+ u8 ftl_cmd;
+ u32 *pbt = (u32 *)g_pBTStartingCopy;
+
+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
+ u16 id;
+ u8 cache_blks;
+
+ id = idx - MAX_CHANS;
+ if (int_cache[id].item != -1) {
+ cache_blks = int_cache[id].item;
+ cache_start_copy.array[cache_blks].address =
+ int_cache[id].cache.address;
+ cache_start_copy.array[cache_blks].changed =
+ int_cache[id].cache.changed;
+ }
+#endif
+
+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
+
+ while (ftl_cmd <= PendingCMD[idx].Tag) {
+ if (p_BTableChangesDelta->ValidFields == 0x01) {
+ g_wBlockTableOffset =
+ p_BTableChangesDelta->g_wBlockTableOffset;
+ } else if (p_BTableChangesDelta->ValidFields == 0x0C) {
+ pbt[p_BTableChangesDelta->BT_Index] =
+ p_BTableChangesDelta->BT_Entry_Value;
+ debug_boundary_error(((
+ p_BTableChangesDelta->BT_Index)),
+ DeviceInfo.wDataBlockNum, 0);
+ } else if (p_BTableChangesDelta->ValidFields == 0x03) {
+ g_wBlockTableOffset =
+ p_BTableChangesDelta->g_wBlockTableOffset;
+ g_wBlockTableIndex =
+ p_BTableChangesDelta->g_wBlockTableIndex;
+ } else if (p_BTableChangesDelta->ValidFields == 0x30) {
+ g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
+ p_BTableChangesDelta->WC_Entry_Value;
+ } else if ((DeviceInfo.MLCDevice) &&
+ (p_BTableChangesDelta->ValidFields == 0xC0)) {
+ g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
+ p_BTableChangesDelta->RC_Entry_Value;
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "In event status setting read counter "
+ "GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
+ ftl_cmd,
+ p_BTableChangesDelta->RC_Entry_Value,
+ (unsigned int)p_BTableChangesDelta->RC_Index);
+ } else {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "This should never occur \n");
+ }
+ p_BTableChangesDelta += 1;
+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
+ }
+}
+
+static void discard_cmds(u16 n)
+{
+ u32 *pbt = (u32 *)g_pBTStartingCopy;
+ u8 ftl_cmd;
+ unsigned long k;
+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
+ u8 cache_blks;
+ u16 id;
+#endif
+
+ if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
+ (PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
+ for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
+ if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
+ MARK_BLK_AS_DISCARD(pbt[k]);
+ }
+ }
+
+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
+ while (ftl_cmd <= PendingCMD[n].Tag) {
+ p_BTableChangesDelta += 1;
+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
+ }
+
+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
+ id = n - MAX_CHANS;
+
+ if (int_cache[id].item != -1) {
+ cache_blks = int_cache[id].item;
+ if (PendingCMD[n].CMD == MEMCOPY_CMD) {
+ if ((cache_start_copy.array[cache_blks].buf <=
+ PendingCMD[n].DataDestAddr) &&
+ ((cache_start_copy.array[cache_blks].buf +
+ Cache.cache_item_size) >
+ PendingCMD[n].DataDestAddr)) {
+ cache_start_copy.array[cache_blks].address =
+ NAND_CACHE_INIT_ADDR;
+ cache_start_copy.array[cache_blks].use_cnt =
+ 0;
+ cache_start_copy.array[cache_blks].changed =
+ CLEAR;
+ }
+ } else {
+ cache_start_copy.array[cache_blks].address =
+ int_cache[id].cache.address;
+ cache_start_copy.array[cache_blks].changed =
+ int_cache[id].cache.changed;
+ }
+ }
+#endif
+}
+
+static void process_cmd_pass(int *first_failed_cmd, u16 idx)
+{
+ if (0 == *first_failed_cmd)
+ save_blk_table_changes(idx);
+ else
+ discard_cmds(idx);
+}
+
+static void process_cmd_fail_abort(int *first_failed_cmd,
+ u16 idx, int event)
+{
+ u32 *pbt = (u32 *)g_pBTStartingCopy;
+ u8 ftl_cmd;
+ unsigned long i;
+ int erase_fail, program_fail;
+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
+ u8 cache_blks;
+ u16 id;
+#endif
+
+ if (0 == *first_failed_cmd)
+ *first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occured "
+ "while executing %u Command %u accesing Block %u\n",
+ (unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
+ PendingCMD[idx].CMD,
+ (unsigned int)PendingCMD[idx].Block);
+
+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
+ while (ftl_cmd <= PendingCMD[idx].Tag) {
+ p_BTableChangesDelta += 1;
+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
+ }
+
+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
+ id = idx - MAX_CHANS;
+
+ if (int_cache[id].item != -1) {
+ cache_blks = int_cache[id].item;
+ if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
+ cache_start_copy.array[cache_blks].address =
+ int_cache[id].cache.address;
+ cache_start_copy.array[cache_blks].changed = SET;
+ } else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
+ cache_start_copy.array[cache_blks].address =
+ NAND_CACHE_INIT_ADDR;
+ cache_start_copy.array[cache_blks].use_cnt = 0;
+ cache_start_copy.array[cache_blks].changed =
+ CLEAR;
+ } else if (PendingCMD[idx].CMD == ERASE_CMD) {
+ /* ? */
+ } else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
+ /* ? */
+ }
+ }
+#endif
+
+ erase_fail = (event == EVENT_ERASE_FAILURE) &&
+ (PendingCMD[idx].CMD == ERASE_CMD);
+
+ program_fail = (event == EVENT_PROGRAM_FAILURE) &&
+ ((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
+ (PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
+
+ if (erase_fail || program_fail) {
+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
+ if (PendingCMD[idx].Block ==
+ (pbt[i] & (~BAD_BLOCK)))
+ MARK_BLOCK_AS_BAD(pbt[i]);
+ }
+ }
+}
+
+static void process_cmd(int *first_failed_cmd, u16 idx, int event)
+{
+ u8 ftl_cmd;
+ int cmd_match = 0;
+
+ if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
+ cmd_match = 1;
+
+ if (PendingCMD[idx].Status == CMD_PASS) {
+ process_cmd_pass(first_failed_cmd, idx);
+ } else if ((PendingCMD[idx].Status == CMD_FAIL) ||
+ (PendingCMD[idx].Status == CMD_ABORT)) {
+ process_cmd_fail_abort(first_failed_cmd, idx, event);
+ } else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
+ PendingCMD[idx].Tag) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ " Command no. %hu is not executed\n",
+ (unsigned int)PendingCMD[idx].Tag);
+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
+ while (ftl_cmd <= PendingCMD[idx].Tag) {
+ p_BTableChangesDelta += 1;
+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
+ }
+ }
+}
+#endif
+
+static void process_cmd(int *first_failed_cmd, u16 idx, int event)
+{
+ printk(KERN_ERR "temporary workaround function. "
+ "Should not be called! \n");
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_Event_Status
+* Inputs: none
+* Outputs: Event Code
+* Description: It is called by SBD after hardware interrupt signalling
+* completion of commands chain
+* It does following things
+* get event status from LLD
+* analyze command chain status
+* determine last command executed
+* analyze results
+* rebuild the block table in case of uncorrectable error
+* return event code
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_FTL_Event_Status(int *first_failed_cmd)
+{
+ int event_code = PASS;
+ u16 i_P;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ *first_failed_cmd = 0;
+
+ event_code = GLOB_LLD_Event_Status();
+
+ switch (event_code) {
+ case EVENT_PASS:
+ nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
+ break;
+ case EVENT_UNCORRECTABLE_DATA_ERROR:
+ nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
+ break;
+ case EVENT_PROGRAM_FAILURE:
+ case EVENT_ERASE_FAILURE:
+ nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
+ "Event code: 0x%x\n", event_code);
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta;
+ for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
+ i_P++)
+ process_cmd(first_failed_cmd, i_P, event_code);
+ memcpy(g_pBlockTable, g_pBTStartingCopy,
+ DeviceInfo.wDataBlockNum * sizeof(u32));
+ memcpy(g_pWearCounter, g_pWearCounterCopy,
+ DeviceInfo.wDataBlockNum * sizeof(u8));
+ if (DeviceInfo.MLCDevice)
+ memcpy(g_pReadCounter, g_pReadCounterCopy,
+ DeviceInfo.wDataBlockNum * sizeof(u16));
+
+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
+ memcpy((void *)&Cache, (void *)&cache_start_copy,
+ sizeof(struct flash_cache_tag));
+ memset((void *)&int_cache, -1,
+ sizeof(struct flash_cache_delta_list_tag) *
+ (MAX_DESCS + MAX_CHANS));
+#endif
+ break;
+ default:
+ nand_dbg_print(NAND_DBG_WARN,
+ "Handling unexpected event code - 0x%x\n",
+ event_code);
+ event_code = ERR;
+ break;
+ }
+
+ memcpy(g_pBTStartingCopy, g_pBlockTable,
+ DeviceInfo.wDataBlockNum * sizeof(u32));
+ memcpy(g_pWearCounterCopy, g_pWearCounter,
+ DeviceInfo.wDataBlockNum * sizeof(u8));
+ if (DeviceInfo.MLCDevice)
+ memcpy(g_pReadCounterCopy, g_pReadCounter,
+ DeviceInfo.wDataBlockNum * sizeof(u16));
+
+ g_pBTDelta_Free = g_pBTDelta;
+ ftl_cmd_cnt = 0;
+ g_pNextBlockTable = g_pBlockTableCopies;
+ cp_back_buf_idx = 0;
+
+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
+ memcpy((void *)&cache_start_copy, (void *)&Cache,
+ sizeof(struct flash_cache_tag));
+ memset((void *)&int_cache, -1,
+ sizeof(struct flash_cache_delta_list_tag) *
+ (MAX_DESCS + MAX_CHANS));
+#endif
+
+ return event_code;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: glob_ftl_execute_cmds
+* Inputs: none
+* Outputs: none
+* Description: pass thru to LLD
+***************************************************************/
+u16 glob_ftl_execute_cmds(void)
+{
+ nand_dbg_print(NAND_DBG_TRACE,
+ "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
+ (unsigned int)ftl_cmd_cnt);
+ g_SBDCmdIndex = 0;
+ return glob_lld_execute_cmds();
+}
+
+#endif
+
+#if !CMD_DMA
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_Read Immediate
+* Inputs: pointer to data
+* address of data
+* Outputs: PASS / FAIL
+* Description: Reads one page of data into RAM directly from flash without
+* using or disturbing cache.It is assumed this function is called
+* with CMD-DMA disabled.
+*****************************************************************/
+int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
+{
+ int wResult = FAIL;
+ u32 Block;
+ u16 Page;
+ u32 phy_blk;
+ u32 *pbt = (u32 *)g_pBlockTable;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ Block = BLK_FROM_ADDR(addr);
+ Page = PAGE_FROM_ADDR(addr, Block);
+
+ if (!IS_SPARE_BLOCK(Block))
+ return FAIL;
+
+ phy_blk = pbt[Block];
+ wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
+
+ if (DeviceInfo.MLCDevice) {
+ g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
+ if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
+ >= MAX_READ_COUNTER)
+ FTL_Read_Disturbance(phy_blk);
+ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
+ FTL_Write_IN_Progress_Block_Table_Page();
+ }
+ }
+
+ return wResult;
+}
+#endif
+
+#ifdef SUPPORT_BIG_ENDIAN
+/*********************************************************************
+* Function: FTL_Invert_Block_Table
+* Inputs: none
+* Outputs: none
+* Description: Re-format the block table in ram based on BIG_ENDIAN and
+* LARGE_BLOCKNUM if necessary
+**********************************************************************/
+static void FTL_Invert_Block_Table(void)
+{
+ u32 i;
+ u32 *pbt = (u32 *)g_pBlockTable;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+#ifdef SUPPORT_LARGE_BLOCKNUM
+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
+ pbt[i] = INVERTUINT32(pbt[i]);
+ g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
+ }
+#else
+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
+ pbt[i] = INVERTUINT16(pbt[i]);
+ g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
+ }
+#endif
+}
+#endif
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_Flash_Init
+* Inputs: none
+* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
+* Description: The flash controller is initialized
+* The flash device is reset
+* Perform a flash READ ID command to confirm that a
+* valid device is attached and active.
+* The DeviceInfo structure gets filled in
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_FTL_Flash_Init(void)
+{
+ int status = FAIL;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ g_SBDCmdIndex = 0;
+
+ GLOB_LLD_Flash_Init();
+
+ status = GLOB_LLD_Read_Device_ID();
+
+ return status;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Inputs: none
+* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
+* Description: The flash controller is released
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_FTL_Flash_Release(void)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ return GLOB_LLD_Flash_Release();
+}
+
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_Cache_Release
+* Inputs: none
+* Outputs: none
+* Description: release all allocated memory in GLOB_FTL_Init
+* (allocated in GLOB_FTL_Init)
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+void GLOB_FTL_Cache_Release(void)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ free_memory();
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Cache_If_Hit
+* Inputs: Page Address
+* Outputs: Block number/UNHIT BLOCK
+* Description: Determines if the addressed page is in cache
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static u16 FTL_Cache_If_Hit(u64 page_addr)
+{
+ u16 item;
+ u64 addr;
+ int i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ item = UNHIT_CACHE_ITEM;
+ for (i = 0; i < CACHE_ITEM_NUM; i++) {
+ addr = Cache.array[i].address;
+ if ((page_addr >= addr) &&
+ (page_addr < (addr + Cache.cache_item_size))) {
+ item = i;
+ break;
+ }
+ }
+
+ return item;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Calculate_LRU
+* Inputs: None
+* Outputs: None
+* Description: Calculate the least recently block in a cache and record its
+* index in LRU field.
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static void FTL_Calculate_LRU(void)
+{
+ u16 i, bCurrentLRU, bTempCount;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ bCurrentLRU = 0;
+ bTempCount = MAX_WORD_VALUE;
+
+ for (i = 0; i < CACHE_ITEM_NUM; i++) {
+ if (Cache.array[i].use_cnt < bTempCount) {
+ bCurrentLRU = i;
+ bTempCount = Cache.array[i].use_cnt;
+ }
+ }
+
+ Cache.LRU = bCurrentLRU;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Cache_Read_Page
+* Inputs: pointer to read buffer, logical address and cache item number
+* Outputs: None
+* Description: Read the page from the cached block addressed by blocknumber
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
+{
+ u8 *start_addr;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ start_addr = Cache.array[cache_item].buf;
+ start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
+ DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
+
+#if CMD_DMA
+ GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
+ DeviceInfo.wPageDataSize, 0);
+ ftl_cmd_cnt++;
+#else
+ memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
+#endif
+
+ if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
+ Cache.array[cache_item].use_cnt++;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Cache_Read_All
+* Inputs: pointer to read buffer,block address
+* Outputs: PASS=0 / FAIL =1
+* Description: It reads pages in cache
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
+{
+ int wResult = PASS;
+ u32 Block;
+ u32 lba;
+ u16 Page;
+ u16 PageCount;
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u32 i;
+
+ Block = BLK_FROM_ADDR(phy_addr);
+ Page = PAGE_FROM_ADDR(phy_addr, Block);
+ PageCount = Cache.pages_per_item;
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "%s, Line %d, Function: %s, Block: 0x%x\n",
+ __FILE__, __LINE__, __func__, Block);
+
+ lba = 0xffffffff;
+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
+ if ((pbt[i] & (~BAD_BLOCK)) == Block) {
+ lba = i;
+ if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
+ IS_DISCARDED_BLOCK(i)) {
+ /* Add by yunpeng -2008.12.3 */
+#if CMD_DMA
+ GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
+ PageCount * DeviceInfo.wPageDataSize, 0);
+ ftl_cmd_cnt++;
+#else
+ memset(pData, 0xFF,
+ PageCount * DeviceInfo.wPageDataSize);
+#endif
+ return wResult;
+ } else {
+ continue; /* break ?? */
+ }
+ }
+ }
+
+ if (0xffffffff == lba)
+ printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
+
+#if CMD_DMA
+ wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
+ PageCount, LLD_CMD_FLAG_MODE_CDMA);
+ if (DeviceInfo.MLCDevice) {
+ g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Read Counter modified in ftl_cmd_cnt %u"
+ " Block %u Counter%u\n",
+ ftl_cmd_cnt, (unsigned int)Block,
+ g_pReadCounter[Block -
+ DeviceInfo.wSpectraStartBlock]);
+
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
+ p_BTableChangesDelta->RC_Index =
+ Block - DeviceInfo.wSpectraStartBlock;
+ p_BTableChangesDelta->RC_Entry_Value =
+ g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
+ p_BTableChangesDelta->ValidFields = 0xC0;
+
+ ftl_cmd_cnt++;
+
+ if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
+ MAX_READ_COUNTER)
+ FTL_Read_Disturbance(Block);
+ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
+ FTL_Write_IN_Progress_Block_Table_Page();
+ }
+ } else {
+ ftl_cmd_cnt++;
+ }
+#else
+ wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
+ if (wResult == FAIL)
+ return wResult;
+
+ if (DeviceInfo.MLCDevice) {
+ g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
+ if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
+ MAX_READ_COUNTER)
+ FTL_Read_Disturbance(Block);
+ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
+ FTL_Write_IN_Progress_Block_Table_Page();
+ }
+ }
+#endif
+ return wResult;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Cache_Write_All
+* Inputs: pointer to cache in sys memory
+* address of free block in flash
+* Outputs: PASS=0 / FAIL=1
+* Description: writes all the pages of the block in cache to flash
+*
+* NOTE:need to make sure this works ok when cache is limited
+* to a partial block. This is where copy-back would be
+* activated. This would require knowing which pages in the
+* cached block are clean/dirty.Right now we only know if
+* the whole block is clean/dirty.
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
+{
+ u16 wResult = PASS;
+ u32 Block;
+ u16 Page;
+ u16 PageCount;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
+ "on %d\n", cache_block_to_write,
+ (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
+
+ Block = BLK_FROM_ADDR(blk_addr);
+ Page = PAGE_FROM_ADDR(blk_addr, Block);
+ PageCount = Cache.pages_per_item;
+
+#if CMD_DMA
+ if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
+ Block, Page, PageCount)) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Program fail in %s, Line %d, "
+ "Function: %s, new Bad Block %d generated! "
+ "Need Bad Block replacing.\n",
+ __FILE__, __LINE__, __func__, Block);
+ wResult = FAIL;
+ }
+ ftl_cmd_cnt++;
+#else
+ if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
+ nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
+ " Line %d, Function %s, new Bad Block %d generated!"
+ "Need Bad Block replacing.\n",
+ __FILE__, __LINE__, __func__, Block);
+ wResult = FAIL;
+ }
+#endif
+ return wResult;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Copy_Block
+* Inputs: source block address
+* Destination block address
+* Outputs: PASS=0 / FAIL=1
+* Description: used only for static wear leveling to move the block
+* containing static data to new blocks(more worn)
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
+{
+ int i, r1, r2, wResult = PASS;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
+ r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
+ i * DeviceInfo.wPageDataSize);
+ r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
+ i * DeviceInfo.wPageDataSize);
+ if ((ERR == r1) || (FAIL == r2)) {
+ wResult = FAIL;
+ break;
+ }
+ }
+
+ return wResult;
+}
+
+/* Search the block table to find out the least wear block and then return it */
+static u32 find_least_worn_blk_for_l2_cache(void)
+{
+ int i;
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u8 least_wear_cnt = MAX_BYTE_VALUE;
+ u32 least_wear_blk_idx = MAX_U32_VALUE;
+ u32 phy_idx;
+
+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
+ if (IS_SPARE_BLOCK(i)) {
+ phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
+ if (phy_idx > DeviceInfo.wSpectraEndBlock)
+ printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
+ "Too big phy block num (%d)\n", phy_idx);
+ if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
+ least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
+ least_wear_blk_idx = i;
+ }
+ }
+ }
+
+ nand_dbg_print(NAND_DBG_WARN,
+ "find_least_worn_blk_for_l2_cache: "
+ "find block %d with least worn counter (%d)\n",
+ least_wear_blk_idx, least_wear_cnt);
+
+ return least_wear_blk_idx;
+}
+
+
+
+/* Get blocks for Level2 Cache */
+static int get_l2_cache_blks(void)
+{
+ int n;
+ u32 blk;
+ u32 *pbt = (u32 *)g_pBlockTable;
+
+ for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
+ blk = find_least_worn_blk_for_l2_cache();
+ if (blk > DeviceInfo.wDataBlockNum) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "find_least_worn_blk_for_l2_cache: "
+ "No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
+ return FAIL;
+ }
+ /* Tag the free block as discard in block table */
+ pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
+ /* Add the free block to the L2 Cache block array */
+ cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
+ }
+
+ return PASS;
+}
+
+static int erase_l2_cache_blocks(void)
+{
+ int i, ret = PASS;
+ u32 pblk, lblk = BAD_BLOCK;
+ u64 addr;
+ u32 *pbt = (u32 *)g_pBlockTable;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
+ pblk = cache_l2.blk_array[i];
+
+ /* If the L2 cache block is invalid, then just skip it */
+ if (MAX_U32_VALUE == pblk)
+ continue;
+
+ BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
+
+ addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
+ if (PASS == GLOB_FTL_Block_Erase(addr)) {
+ /* Get logical block number of the erased block */
+ lblk = FTL_Get_Block_Index(pblk);
+ BUG_ON(BAD_BLOCK == lblk);
+ /* Tag it as free in the block table */
+ pbt[lblk] &= (u32)(~DISCARD_BLOCK);
+ pbt[lblk] |= (u32)(SPARE_BLOCK);
+ } else {
+ MARK_BLOCK_AS_BAD(pbt[lblk]);
+ ret = ERR;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Merge the valid data page in the L2 cache blocks into NAND.
+*/
+static int flush_l2_cache(void)
+{
+ struct list_head *p;
+ struct spectra_l2_cache_list *pnd, *tmp_pnd;
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u32 phy_blk, l2_blk;
+ u64 addr;
+ u16 l2_page;
+ int i, ret = PASS;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (list_empty(&cache_l2.table.list)) /* No data to flush */
+ return ret;
+
+ //dump_cache_l2_table();
+
+ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
+ FTL_Write_IN_Progress_Block_Table_Page();
+ }
+
+ list_for_each(p, &cache_l2.table.list) {
+ pnd = list_entry(p, struct spectra_l2_cache_list, list);
+ if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
+ IS_BAD_BLOCK(pnd->logical_blk_num) ||
+ IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
+ memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
+ } else {
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
+ phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
+ ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
+ phy_blk, 0, DeviceInfo.wPagesPerBlock);
+ if (ret == FAIL) {
+ printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
+ }
+ }
+
+ for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
+ if (pnd->pages_array[i] != MAX_U32_VALUE) {
+ l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
+ l2_page = pnd->pages_array[i] & 0xffff;
+ ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
+ if (ret == FAIL) {
+ printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
+ }
+ memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
+ }
+ }
+
+ /* Find a free block and tag the original block as discarded */
+ addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
+ ret = FTL_Replace_Block(addr);
+ if (ret == FAIL) {
+ printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
+ }
+
+ /* Write back the updated data into NAND */
+ phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
+ if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "Program NAND block %d fail in %s, Line %d\n",
+ phy_blk, __FILE__, __LINE__);
+ /* This may not be really a bad block. So just tag it as discarded. */
+ /* Then it has a chance to be erased when garbage collection. */
+ /* If it is really bad, then the erase will fail and it will be marked */
+ /* as bad then. Otherwise it will be marked as free and can be used again */
+ MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
+ /* Find another free block and write it again */
+ FTL_Replace_Block(addr);
+ phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
+ if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
+ printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
+ "Some data will be lost!\n", phy_blk);
+ MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
+ }
+ } else {
+ /* tag the new free block as used block */
+ pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
+ }
+ }
+
+ /* Destroy the L2 Cache table and free the memory of all nodes */
+ list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
+ list_del(&pnd->list);
+ kfree(pnd);
+ }
+
+ /* Erase discard L2 cache blocks */
+ if (erase_l2_cache_blocks() != PASS)
+ nand_dbg_print(NAND_DBG_WARN,
+ " Erase L2 cache blocks error in %s, Line %d\n",
+ __FILE__, __LINE__);
+
+ /* Init the Level2 Cache data structure */
+ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
+ cache_l2.blk_array[i] = MAX_U32_VALUE;
+ cache_l2.cur_blk_idx = 0;
+ cache_l2.cur_page_num = 0;
+ INIT_LIST_HEAD(&cache_l2.table.list);
+ cache_l2.table.logical_blk_num = MAX_U32_VALUE;
+
+ return ret;
+}
+
+/*
+ * Write back a changed victim cache item to the Level2 Cache
+ * and update the L2 Cache table to map the change.
+ * If the L2 Cache is full, then start to do the L2 Cache flush.
+*/
+static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
+{
+ u32 logical_blk_num;
+ u16 logical_page_num;
+ struct list_head *p;
+ struct spectra_l2_cache_list *pnd, *pnd_new;
+ u32 node_size;
+ int i, found;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ /*
+ * If Level2 Cache table is empty, then it means either:
+ * 1. This is the first time that the function called after FTL_init
+ * or
+ * 2. The Level2 Cache has just been flushed
+ *
+ * So, 'steal' some free blocks from NAND for L2 Cache using
+ * by just mask them as discard in the block table
+ */
+ if (list_empty(&cache_l2.table.list)) {
+ BUG_ON(cache_l2.cur_blk_idx != 0);
+ BUG_ON(cache_l2.cur_page_num!= 0);
+ BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
+ if (FAIL == get_l2_cache_blks()) {
+ GLOB_FTL_Garbage_Collection();
+ if (FAIL == get_l2_cache_blks()) {
+ printk(KERN_ALERT "Fail to get L2 cache blks!\n");
+ return FAIL;
+ }
+ }
+ }
+
+ logical_blk_num = BLK_FROM_ADDR(logical_addr);
+ logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
+ BUG_ON(logical_blk_num == MAX_U32_VALUE);
+
+ /* Write the cache item data into the current position of L2 Cache */
+#if CMD_DMA
+ /*
+ * TODO
+ */
+#else
+ if (FAIL == GLOB_LLD_Write_Page_Main(buf,
+ cache_l2.blk_array[cache_l2.cur_blk_idx],
+ cache_l2.cur_page_num, 1)) {
+ nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
+ "%s, Line %d, new Bad Block %d generated!\n",
+ __FILE__, __LINE__,
+ cache_l2.blk_array[cache_l2.cur_blk_idx]);
+
+ /* TODO: tag the current block as bad and try again */
+
+ return FAIL;
+ }
+#endif
+
+ /*
+ * Update the L2 Cache table.
+ *
+ * First seaching in the table to see whether the logical block
+ * has been mapped. If not, then kmalloc a new node for the
+ * logical block, fill data, and then insert it to the list.
+ * Otherwise, just update the mapped node directly.
+ */
+ found = 0;
+ list_for_each(p, &cache_l2.table.list) {
+ pnd = list_entry(p, struct spectra_l2_cache_list, list);
+ if (pnd->logical_blk_num == logical_blk_num) {
+ pnd->pages_array[logical_page_num] =
+ (cache_l2.cur_blk_idx << 16) |
+ cache_l2.cur_page_num;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) { /* Create new node for the logical block here */
+
+ /* The logical pages to physical pages map array is
+ * located at the end of struct spectra_l2_cache_list.
+ */
+ node_size = sizeof(struct spectra_l2_cache_list) +
+ sizeof(u32) * DeviceInfo.wPagesPerBlock;
+ pnd_new = kmalloc(node_size, GFP_ATOMIC);
+ if (!pnd_new) {
+ printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
+ __FILE__, __LINE__);
+ /*
+ * TODO: Need to flush all the L2 cache into NAND ASAP
+ * since no memory available here
+ */
+ }
+ pnd_new->logical_blk_num = logical_blk_num;
+ for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
+ pnd_new->pages_array[i] = MAX_U32_VALUE;
+ pnd_new->pages_array[logical_page_num] =
+ (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
+ list_add(&pnd_new->list, &cache_l2.table.list);
+ }
+
+ /* Increasing the current position pointer of the L2 Cache */
+ cache_l2.cur_page_num++;
+ if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
+ cache_l2.cur_blk_idx++;
+ if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
+ /* The L2 Cache is full. Need to flush it now */
+ nand_dbg_print(NAND_DBG_WARN,
+ "L2 Cache is full, will start to flush it\n");
+ flush_l2_cache();
+ } else {
+ cache_l2.cur_page_num = 0;
+ }
+ }
+
+ return PASS;
+}
+
+/*
+ * Seach in the Level2 Cache table to find the cache item.
+ * If find, read the data from the NAND page of L2 Cache,
+ * Otherwise, return FAIL.
+ */
+static int search_l2_cache(u8 *buf, u64 logical_addr)
+{
+ u32 logical_blk_num;
+ u16 logical_page_num;
+ struct list_head *p;
+ struct spectra_l2_cache_list *pnd;
+ u32 tmp = MAX_U32_VALUE;
+ u32 phy_blk;
+ u16 phy_page;
+ int ret = FAIL;
+
+ logical_blk_num = BLK_FROM_ADDR(logical_addr);
+ logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
+
+ list_for_each(p, &cache_l2.table.list) {
+ pnd = list_entry(p, struct spectra_l2_cache_list, list);
+ if (pnd->logical_blk_num == logical_blk_num) {
+ tmp = pnd->pages_array[logical_page_num];
+ break;
+ }
+ }
+
+ if (tmp != MAX_U32_VALUE) { /* Found valid map */
+ phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
+ phy_page = tmp & 0xFFFF;
+#if CMD_DMA
+ /* TODO */
+#else
+ ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
+#endif
+ }
+
+ return ret;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Cache_Write_Page
+* Inputs: Pointer to buffer, page address, cache block number
+* Outputs: PASS=0 / FAIL=1
+* Description: It writes the data in Cache Block
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
+ u8 cache_blk, u16 flag)
+{
+ u8 *pDest;
+ u64 addr;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ addr = Cache.array[cache_blk].address;
+ pDest = Cache.array[cache_blk].buf;
+
+ pDest += (unsigned long)(page_addr - addr);
+ Cache.array[cache_blk].changed = SET;
+#if CMD_DMA
+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
+ int_cache[ftl_cmd_cnt].item = cache_blk;
+ int_cache[ftl_cmd_cnt].cache.address =
+ Cache.array[cache_blk].address;
+ int_cache[ftl_cmd_cnt].cache.changed =
+ Cache.array[cache_blk].changed;
+#endif
+ GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
+ ftl_cmd_cnt++;
+#else
+ memcpy(pDest, pData, DeviceInfo.wPageDataSize);
+#endif
+ if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
+ Cache.array[cache_blk].use_cnt++;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Cache_Write
+* Inputs: none
+* Outputs: PASS=0 / FAIL=1
+* Description: It writes least frequently used Cache block to flash if it
+* has been changed
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static int FTL_Cache_Write(void)
+{
+ int i, bResult = PASS;
+ u16 bNO, least_count = 0xFFFF;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ FTL_Calculate_LRU();
+
+ bNO = Cache.LRU;
+ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
+ "Least used cache block is %d\n", bNO);
+
+ if (Cache.array[bNO].changed != SET)
+ return bResult;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
+ " Block %d containing logical block %d is dirty\n",
+ bNO,
+ (u32)(Cache.array[bNO].address >>
+ DeviceInfo.nBitsInBlockDataSize));
+#if CMD_DMA
+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
+ int_cache[ftl_cmd_cnt].item = bNO;
+ int_cache[ftl_cmd_cnt].cache.address =
+ Cache.array[bNO].address;
+ int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
+#endif
+#endif
+ bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
+ Cache.array[bNO].address);
+ if (bResult != ERR)
+ Cache.array[bNO].changed = CLEAR;
+
+ least_count = Cache.array[bNO].use_cnt;
+
+ for (i = 0; i < CACHE_ITEM_NUM; i++) {
+ if (i == bNO)
+ continue;
+ if (Cache.array[i].use_cnt > 0)
+ Cache.array[i].use_cnt -= least_count;
+ }
+
+ return bResult;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Cache_Read
+* Inputs: Page address
+* Outputs: PASS=0 / FAIL=1
+* Description: It reads the block from device in Cache Block
+* Set the LRU count to 1
+* Mark the Cache Block as clean
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static int FTL_Cache_Read(u64 logical_addr)
+{
+ u64 item_addr, phy_addr;
+ u16 num;
+ int ret;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ num = Cache.LRU; /* The LRU cache item will be overwritten */
+
+ item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
+ Cache.cache_item_size;
+ Cache.array[num].address = item_addr;
+ Cache.array[num].use_cnt = 1;
+ Cache.array[num].changed = CLEAR;
+
+#if CMD_DMA
+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
+ int_cache[ftl_cmd_cnt].item = num;
+ int_cache[ftl_cmd_cnt].cache.address =
+ Cache.array[num].address;
+ int_cache[ftl_cmd_cnt].cache.changed =
+ Cache.array[num].changed;
+#endif
+#endif
+ /*
+ * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
+ * Otherwise, read it from NAND
+ */
+ ret = search_l2_cache(Cache.array[num].buf, logical_addr);
+ if (PASS == ret) /* Hit in L2 Cache */
+ return ret;
+
+ /* Compute the physical start address of NAND device according to */
+ /* the logical start address of the cache item (LRU cache item) */
+ phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
+ GLOB_u64_Remainder(item_addr, 2);
+
+ return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Check_Block_Table
+* Inputs: ?
+* Outputs: PASS=0 / FAIL=1
+* Description: It checks the correctness of each block table entry
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static int FTL_Check_Block_Table(int wOldTable)
+{
+ u32 i;
+ int wResult = PASS;
+ u32 blk_idx;
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u8 *pFlag = flag_check_blk_table;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (NULL != pFlag) {
+ memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
+ blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
+
+ /*
+ * 20081006/KBV - Changed to pFlag[i] reference
+ * to avoid buffer overflow
+ */
+
+ /*
+ * 2008-10-20 Yunpeng Note: This change avoid
+ * buffer overflow, but changed function of
+ * the code, so it should be re-write later
+ */
+ if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
+ PASS == pFlag[i]) {
+ wResult = FAIL;
+ break;
+ } else {
+ pFlag[i] = PASS;
+ }
+ }
+ }
+
+ return wResult;
+}
+
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Write_Block_Table
+* Inputs: flasg
+* Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
+* happen. -1 Error
+* Description: It writes the block table
+* Block table always mapped to LBA 0 which inturn mapped
+* to any physical block
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static int FTL_Write_Block_Table(int wForce)
+{
+ u32 *pbt = (u32 *)g_pBlockTable;
+ int wSuccess = PASS;
+ u32 wTempBlockTableIndex;
+ u16 bt_pages, new_bt_offset;
+ u8 blockchangeoccured = 0;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
+
+ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
+ return 0;
+
+ if (PASS == wForce) {
+ g_wBlockTableOffset =
+ (u16)(DeviceInfo.wPagesPerBlock - bt_pages);
+#if CMD_DMA
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+
+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
+ p_BTableChangesDelta->g_wBlockTableOffset =
+ g_wBlockTableOffset;
+ p_BTableChangesDelta->ValidFields = 0x01;
+#endif
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Inside FTL_Write_Block_Table: block %d Page:%d\n",
+ g_wBlockTableIndex, g_wBlockTableOffset);
+
+ do {
+ new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
+ if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
+ (new_bt_offset > DeviceInfo.wPagesPerBlock) ||
+ (FAIL == wSuccess)) {
+ wTempBlockTableIndex = FTL_Replace_Block_Table();
+ if (BAD_BLOCK == wTempBlockTableIndex)
+ return ERR;
+ if (!blockchangeoccured) {
+ bt_block_changed = 1;
+ blockchangeoccured = 1;
+ }
+
+ g_wBlockTableIndex = wTempBlockTableIndex;
+ g_wBlockTableOffset = 0;
+ pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
+#if CMD_DMA
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+
+ p_BTableChangesDelta->ftl_cmd_cnt =
+ ftl_cmd_cnt;
+ p_BTableChangesDelta->g_wBlockTableOffset =
+ g_wBlockTableOffset;
+ p_BTableChangesDelta->g_wBlockTableIndex =
+ g_wBlockTableIndex;
+ p_BTableChangesDelta->ValidFields = 0x03;
+
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free +=
+ sizeof(struct BTableChangesDelta);
+
+ p_BTableChangesDelta->ftl_cmd_cnt =
+ ftl_cmd_cnt;
+ p_BTableChangesDelta->BT_Index =
+ BLOCK_TABLE_INDEX;
+ p_BTableChangesDelta->BT_Entry_Value =
+ pbt[BLOCK_TABLE_INDEX];
+ p_BTableChangesDelta->ValidFields = 0x0C;
+#endif
+ }
+
+ wSuccess = FTL_Write_Block_Table_Data();
+ if (FAIL == wSuccess)
+ MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
+ } while (FAIL == wSuccess);
+
+ g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
+
+ return 1;
+}
+
+static int force_format_nand(void)
+{
+ u32 i;
+
+ /* Force erase the whole unprotected physical partiton of NAND */
+ printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
+ printk(KERN_ALERT "From phyical block %d to %d\n",
+ DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
+ for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
+ if (GLOB_LLD_Erase_Block(i))
+ printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
+ }
+ printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
+ while(1);
+
+ return PASS;
+}
+
+int GLOB_FTL_Flash_Format(void)
+{
+ //return FTL_Format_Flash(1);
+ return force_format_nand();
+
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Search_Block_Table_IN_Block
+* Inputs: Block Number
+* Pointer to page
+* Outputs: PASS / FAIL
+* Page contatining the block table
+* Description: It searches the block table in the block
+* passed as an argument.
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
+ u8 BT_Tag, u16 *Page)
+{
+ u16 i, j, k;
+ u16 Result = PASS;
+ u16 Last_IPF = 0;
+ u8 BT_Found = 0;
+ u8 *tagarray;
+ u8 *tempbuf = tmp_buf_search_bt_in_block;
+ u8 *pSpareBuf = spare_buf_search_bt_in_block;
+ u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
+ u8 bt_flag_last_page = 0xFF;
+ u8 search_in_previous_pages = 0;
+ u16 bt_pages;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Searching block table in %u block\n",
+ (unsigned int)BT_Block);
+
+ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
+
+ for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
+ i += (bt_pages + 1)) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Searching last IPF: %d\n", i);
+ Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
+ BT_Block, i, 1);
+
+ if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
+ if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
+ continue;
+ } else {
+ search_in_previous_pages = 1;
+ Last_IPF = i;
+ }
+ }
+
+ if (!search_in_previous_pages) {
+ if (i != bt_pages) {
+ i -= (bt_pages + 1);
+ Last_IPF = i;
+ }
+ }
+
+ if (0 == Last_IPF)
+ break;
+
+ if (!search_in_previous_pages) {
+ i = i + 1;
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Reading the spare area of Block %u Page %u",
+ (unsigned int)BT_Block, i);
+ Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
+ BT_Block, i, 1);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Reading the spare area of Block %u Page %u",
+ (unsigned int)BT_Block, i + bt_pages - 1);
+ Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
+ BT_Block, i + bt_pages - 1, 1);
+
+ k = 0;
+ j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
+ if (j) {
+ for (; k < j; k++) {
+ if (tagarray[k] == BT_Tag)
+ break;
+ }
+ }
+
+ if (k < j)
+ bt_flag = tagarray[k];
+ else
+ Result = FAIL;
+
+ if (Result == PASS) {
+ k = 0;
+ j = FTL_Extract_Block_Table_Tag(
+ pSpareBufBTLastPage, &tagarray);
+ if (j) {
+ for (; k < j; k++) {
+ if (tagarray[k] == BT_Tag)
+ break;
+ }
+ }
+
+ if (k < j)
+ bt_flag_last_page = tagarray[k];
+ else
+ Result = FAIL;
+
+ if (Result == PASS) {
+ if (bt_flag == bt_flag_last_page) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Block table is found"
+ " in page after IPF "
+ "at block %d "
+ "page %d\n",
+ (int)BT_Block, i);
+ BT_Found = 1;
+ *Page = i;
+ g_cBlockTableStatus =
+ CURRENT_BLOCK_TABLE;
+ break;
+ } else {
+ Result = FAIL;
+ }
+ }
+ }
+ }
+
+ if (search_in_previous_pages)
+ i = i - bt_pages;
+ else
+ i = i - (bt_pages + 1);
+
+ Result = PASS;
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Reading the spare area of Block %d Page %d",
+ (int)BT_Block, i);
+
+ Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Reading the spare area of Block %u Page %u",
+ (unsigned int)BT_Block, i + bt_pages - 1);
+
+ Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
+ BT_Block, i + bt_pages - 1, 1);
+
+ k = 0;
+ j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
+ if (j) {
+ for (; k < j; k++) {
+ if (tagarray[k] == BT_Tag)
+ break;
+ }
+ }
+
+ if (k < j)
+ bt_flag = tagarray[k];
+ else
+ Result = FAIL;
+
+ if (Result == PASS) {
+ k = 0;
+ j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
+ &tagarray);
+ if (j) {
+ for (; k < j; k++) {
+ if (tagarray[k] == BT_Tag)
+ break;
+ }
+ }
+
+ if (k < j) {
+ bt_flag_last_page = tagarray[k];
+ } else {
+ Result = FAIL;
+ break;
+ }
+
+ if (Result == PASS) {
+ if (bt_flag == bt_flag_last_page) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Block table is found "
+ "in page prior to IPF "
+ "at block %u page %d\n",
+ (unsigned int)BT_Block, i);
+ BT_Found = 1;
+ *Page = i;
+ g_cBlockTableStatus =
+ IN_PROGRESS_BLOCK_TABLE;
+ break;
+ } else {
+ Result = FAIL;
+ break;
+ }
+ }
+ }
+ }
+
+ if (Result == FAIL) {
+ if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
+ BT_Found = 1;
+ *Page = i - (bt_pages + 1);
+ }
+ if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
+ goto func_return;
+ }
+
+ if (Last_IPF == 0) {
+ i = 0;
+ Result = PASS;
+ nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
+ "Block %u Page %u", (unsigned int)BT_Block, i);
+
+ Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Reading the spare area of Block %u Page %u",
+ (unsigned int)BT_Block, i + bt_pages - 1);
+ Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
+ BT_Block, i + bt_pages - 1, 1);
+
+ k = 0;
+ j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
+ if (j) {
+ for (; k < j; k++) {
+ if (tagarray[k] == BT_Tag)
+ break;
+ }
+ }
+
+ if (k < j)
+ bt_flag = tagarray[k];
+ else
+ Result = FAIL;
+
+ if (Result == PASS) {
+ k = 0;
+ j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
+ &tagarray);
+ if (j) {
+ for (; k < j; k++) {
+ if (tagarray[k] == BT_Tag)
+ break;
+ }
+ }
+
+ if (k < j)
+ bt_flag_last_page = tagarray[k];
+ else
+ Result = FAIL;
+
+ if (Result == PASS) {
+ if (bt_flag == bt_flag_last_page) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Block table is found "
+ "in page after IPF at "
+ "block %u page %u\n",
+ (unsigned int)BT_Block,
+ (unsigned int)i);
+ BT_Found = 1;
+ *Page = i;
+ g_cBlockTableStatus =
+ CURRENT_BLOCK_TABLE;
+ goto func_return;
+ } else {
+ Result = FAIL;
+ }
+ }
+ }
+
+ if (Result == FAIL)
+ goto func_return;
+ }
+func_return:
+ return Result;
+}
+
+u8 *get_blk_table_start_addr(void)
+{
+ return g_pBlockTable;
+}
+
+unsigned long get_blk_table_len(void)
+{
+ return DeviceInfo.wDataBlockNum * sizeof(u32);
+}
+
+u8 *get_wear_leveling_table_start_addr(void)
+{
+ return g_pWearCounter;
+}
+
+unsigned long get_wear_leveling_table_len(void)
+{
+ return DeviceInfo.wDataBlockNum * sizeof(u8);
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Read_Block_Table
+* Inputs: none
+* Outputs: PASS / FAIL
+* Description: read the flash spare area and find a block containing the
+* most recent block table(having largest block_table_counter).
+* Find the last written Block table in this block.
+* Check the correctness of Block Table
+* If CDMA is enabled, this function is called in
+* polling mode.
+* We don't need to store changes in Block table in this
+* function as it is called only at initialization
+*
+* Note: Currently this function is called at initialization
+* before any read/erase/write command issued to flash so,
+* there is no need to wait for CDMA list to complete as of now
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static int FTL_Read_Block_Table(void)
+{
+ u16 i = 0;
+ int k, j;
+ u8 *tempBuf, *tagarray;
+ int wResult = FAIL;
+ int status = FAIL;
+ u8 block_table_found = 0;
+ int search_result;
+ u32 Block;
+ u16 Page = 0;
+ u16 PageCount;
+ u16 bt_pages;
+ int wBytesCopied = 0, tempvar;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ tempBuf = tmp_buf1_read_blk_table;
+ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
+
+ for (j = DeviceInfo.wSpectraStartBlock;
+ j <= (int)DeviceInfo.wSpectraEndBlock;
+ j++) {
+ status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
+ k = 0;
+ i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
+ if (i) {
+ status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
+ j, 0, 1);
+ for (; k < i; k++) {
+ if (tagarray[k] == tempBuf[3])
+ break;
+ }
+ }
+
+ if (k < i)
+ k = tagarray[k];
+ else
+ continue;
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Block table is contained in Block %d %d\n",
+ (unsigned int)j, (unsigned int)k);
+
+ if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
+ g_pBTBlocks[k-FIRST_BT_ID] = j;
+ block_table_found = 1;
+ } else {
+ printk(KERN_ERR "FTL_Read_Block_Table -"
+ "This should never happens. "
+ "Two block table have same counter %u!\n", k);
+ }
+ }
+
+ if (block_table_found) {
+ if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
+ g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
+ j = LAST_BT_ID;
+ while ((j > FIRST_BT_ID) &&
+ (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
+ j--;
+ if (j == FIRST_BT_ID) {
+ j = LAST_BT_ID;
+ last_erased = LAST_BT_ID;
+ } else {
+ last_erased = (u8)j + 1;
+ while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
+ g_pBTBlocks[j - FIRST_BT_ID]))
+ j--;
+ }
+ } else {
+ j = FIRST_BT_ID;
+ while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
+ j++;
+ last_erased = (u8)j;
+ while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
+ g_pBTBlocks[j - FIRST_BT_ID]))
+ j++;
+ if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
+ j--;
+ }
+
+ if (last_erased > j)
+ j += (1 + LAST_BT_ID - FIRST_BT_ID);
+
+ for (; (j >= last_erased) && (FAIL == wResult); j--) {
+ i = (j - FIRST_BT_ID) %
+ (1 + LAST_BT_ID - FIRST_BT_ID);
+ search_result =
+ FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
+ i + FIRST_BT_ID, &Page);
+ if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
+ block_table_found = 0;
+
+ while ((search_result == PASS) && (FAIL == wResult)) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "FTL_Read_Block_Table:"
+ "Block: %u Page: %u "
+ "contains block table\n",
+ (unsigned int)g_pBTBlocks[i],
+ (unsigned int)Page);
+
+ tempBuf = tmp_buf2_read_blk_table;
+
+ for (k = 0; k < bt_pages; k++) {
+ Block = g_pBTBlocks[i];
+ PageCount = 1;
+
+ status =
+ GLOB_LLD_Read_Page_Main_Polling(
+ tempBuf, Block, Page, PageCount);
+
+ tempvar = k ? 0 : 4;
+
+ wBytesCopied +=
+ FTL_Copy_Block_Table_From_Flash(
+ tempBuf + tempvar,
+ DeviceInfo.wPageDataSize - tempvar,
+ wBytesCopied);
+
+ Page++;
+ }
+
+ wResult = FTL_Check_Block_Table(FAIL);
+ if (FAIL == wResult) {
+ block_table_found = 0;
+ if (Page > bt_pages)
+ Page -= ((bt_pages<<1) + 1);
+ else
+ search_result = FAIL;
+ }
+ }
+ }
+ }
+
+ if (PASS == wResult) {
+ if (!block_table_found)
+ FTL_Execute_SPL_Recovery();
+
+ if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
+ g_wBlockTableOffset = (u16)Page + 1;
+ else
+ g_wBlockTableOffset = (u16)Page - bt_pages;
+
+ g_wBlockTableIndex = (u32)g_pBTBlocks[i];
+
+#if CMD_DMA
+ if (DeviceInfo.MLCDevice)
+ memcpy(g_pBTStartingCopy, g_pBlockTable,
+ DeviceInfo.wDataBlockNum * sizeof(u32)
+ + DeviceInfo.wDataBlockNum * sizeof(u8)
+ + DeviceInfo.wDataBlockNum * sizeof(u16));
+ else
+ memcpy(g_pBTStartingCopy, g_pBlockTable,
+ DeviceInfo.wDataBlockNum * sizeof(u32)
+ + DeviceInfo.wDataBlockNum * sizeof(u8));
+#endif
+ }
+
+ if (FAIL == wResult)
+ printk(KERN_ERR "Yunpeng - "
+ "Can not find valid spectra block table!\n");
+
+#if AUTO_FORMAT_FLASH
+ if (FAIL == wResult) {
+ nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
+ wResult = FTL_Format_Flash(0);
+ }
+#endif
+
+ return wResult;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Get_Page_Num
+* Inputs: Size in bytes
+* Outputs: Size in pages
+* Description: It calculates the pages required for the length passed
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static u32 FTL_Get_Page_Num(u64 length)
+{
+ return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
+ (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Get_Physical_Block_Addr
+* Inputs: Block Address (byte format)
+* Outputs: Physical address of the block.
+* Description: It translates LBA to PBA by returning address stored
+* at the LBA location in the block table
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
+{
+ u32 *pbt;
+ u64 physical_addr;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ pbt = (u32 *)g_pBlockTable;
+ physical_addr = (u64) DeviceInfo.wBlockDataSize *
+ (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
+
+ return physical_addr;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Get_Block_Index
+* Inputs: Physical Block no.
+* Outputs: Logical block no. /BAD_BLOCK
+* Description: It returns the logical block no. for the PBA passed
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static u32 FTL_Get_Block_Index(u32 wBlockNum)
+{
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u32 i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
+ if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
+ return i;
+
+ return BAD_BLOCK;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_Wear_Leveling
+* Inputs: none
+* Outputs: PASS=0
+* Description: This is static wear leveling (done by explicit call)
+* do complete static wear leveling
+* do complete garbage collection
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_FTL_Wear_Leveling(void)
+{
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ FTL_Static_Wear_Leveling();
+ GLOB_FTL_Garbage_Collection();
+
+ return PASS;
+}
+
+static void find_least_most_worn(u8 *chg,
+ u32 *least_idx, u8 *least_cnt,
+ u32 *most_idx, u8 *most_cnt)
+{
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u32 idx;
+ u8 cnt;
+ int i;
+
+ for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
+ if (IS_BAD_BLOCK(i) || PASS == chg[i])
+ continue;
+
+ idx = (u32) ((~BAD_BLOCK) & pbt[i]);
+ cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
+
+ if (IS_SPARE_BLOCK(i)) {
+ if (cnt > *most_cnt) {
+ *most_cnt = cnt;
+ *most_idx = idx;
+ }
+ }
+
+ if (IS_DATA_BLOCK(i)) {
+ if (cnt < *least_cnt) {
+ *least_cnt = cnt;
+ *least_idx = idx;
+ }
+ }
+
+ if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
+ debug_boundary_error(*most_idx,
+ DeviceInfo.wDataBlockNum, 0);
+ debug_boundary_error(*least_idx,
+ DeviceInfo.wDataBlockNum, 0);
+ continue;
+ }
+ }
+}
+
+static int move_blks_for_wear_leveling(u8 *chg,
+ u32 *least_idx, u32 *rep_blk_num, int *result)
+{
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u32 rep_blk;
+ int j, ret_cp_blk, ret_erase;
+ int ret = PASS;
+
+ chg[*least_idx] = PASS;
+ debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
+
+ rep_blk = FTL_Replace_MWBlock();
+ if (rep_blk != BAD_BLOCK) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "More than two spare blocks exist so do it\n");
+ nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
+ rep_blk);
+
+ chg[rep_blk] = PASS;
+
+ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
+ FTL_Write_IN_Progress_Block_Table_Page();
+ }
+
+ for (j = 0; j < RETRY_TIMES; j++) {
+ ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
+ DeviceInfo.wBlockDataSize,
+ (u64)rep_blk * DeviceInfo.wBlockDataSize);
+ if (FAIL == ret_cp_blk) {
+ ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
+ * DeviceInfo.wBlockDataSize);
+ if (FAIL == ret_erase)
+ MARK_BLOCK_AS_BAD(pbt[rep_blk]);
+ } else {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "FTL_Copy_Block == OK\n");
+ break;
+ }
+ }
+
+ if (j < RETRY_TIMES) {
+ u32 tmp;
+ u32 old_idx = FTL_Get_Block_Index(*least_idx);
+ u32 rep_idx = FTL_Get_Block_Index(rep_blk);
+ tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
+ pbt[old_idx] = (u32)((~SPARE_BLOCK) &
+ pbt[rep_idx]);
+ pbt[rep_idx] = tmp;
+#if CMD_DMA
+ p_BTableChangesDelta = (struct BTableChangesDelta *)
+ g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+ p_BTableChangesDelta->ftl_cmd_cnt =
+ ftl_cmd_cnt;
+ p_BTableChangesDelta->BT_Index = old_idx;
+ p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
+ p_BTableChangesDelta->ValidFields = 0x0C;
+
+ p_BTableChangesDelta = (struct BTableChangesDelta *)
+ g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+
+ p_BTableChangesDelta->ftl_cmd_cnt =
+ ftl_cmd_cnt;
+ p_BTableChangesDelta->BT_Index = rep_idx;
+ p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
+ p_BTableChangesDelta->ValidFields = 0x0C;
+#endif
+ } else {
+ pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
+#if CMD_DMA
+ p_BTableChangesDelta = (struct BTableChangesDelta *)
+ g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+
+ p_BTableChangesDelta->ftl_cmd_cnt =
+ ftl_cmd_cnt;
+ p_BTableChangesDelta->BT_Index =
+ FTL_Get_Block_Index(rep_blk);
+ p_BTableChangesDelta->BT_Entry_Value =
+ pbt[FTL_Get_Block_Index(rep_blk)];
+ p_BTableChangesDelta->ValidFields = 0x0C;
+#endif
+ *result = FAIL;
+ ret = FAIL;
+ }
+
+ if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
+ ret = FAIL;
+ } else {
+ printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
+ ret = FAIL;
+ }
+
+ return ret;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Static_Wear_Leveling
+* Inputs: none
+* Outputs: PASS=0 / FAIL=1
+* Description: This is static wear leveling (done by explicit call)
+* search for most&least used
+* if difference < GATE:
+* update the block table with exhange
+* mark block table in flash as IN_PROGRESS
+* copy flash block
+* the caller should handle GC clean up after calling this function
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int FTL_Static_Wear_Leveling(void)
+{
+ u8 most_worn_cnt;
+ u8 least_worn_cnt;
+ u32 most_worn_idx;
+ u32 least_worn_idx;
+ int result = PASS;
+ int go_on = PASS;
+ u32 replaced_blks = 0;
+ u8 *chang_flag = flags_static_wear_leveling;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (!chang_flag)
+ return FAIL;
+
+ memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
+ while (go_on == PASS) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "starting static wear leveling\n");
+ most_worn_cnt = 0;
+ least_worn_cnt = 0xFF;
+ least_worn_idx = BLOCK_TABLE_INDEX;
+ most_worn_idx = BLOCK_TABLE_INDEX;
+
+ find_least_most_worn(chang_flag, &least_worn_idx,
+ &least_worn_cnt, &most_worn_idx, &most_worn_cnt);
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Used and least worn is block %u, whos count is %u\n",
+ (unsigned int)least_worn_idx,
+ (unsigned int)least_worn_cnt);
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Free and most worn is block %u, whos count is %u\n",
+ (unsigned int)most_worn_idx,
+ (unsigned int)most_worn_cnt);
+
+ if ((most_worn_cnt > least_worn_cnt) &&
+ (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
+ go_on = move_blks_for_wear_leveling(chang_flag,
+ &least_worn_idx, &replaced_blks, &result);
+ else
+ go_on = FAIL;
+ }
+
+ return result;
+}
+
+#if CMD_DMA
+static int do_garbage_collection(u32 discard_cnt)
+{
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u32 pba;
+ u8 bt_block_erased = 0;
+ int i, cnt, ret = FAIL;
+ u64 addr;
+
+ i = 0;
+ while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
+ ((ftl_cmd_cnt + 28) < 256)) {
+ if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
+ (pbt[i] & DISCARD_BLOCK)) {
+ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
+ FTL_Write_IN_Progress_Block_Table_Page();
+ }
+
+ addr = FTL_Get_Physical_Block_Addr((u64)i *
+ DeviceInfo.wBlockDataSize);
+ pba = BLK_FROM_ADDR(addr);
+
+ for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
+ if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "GC will erase BT block %u\n",
+ (unsigned int)pba);
+ discard_cnt--;
+ i++;
+ bt_block_erased = 1;
+ break;
+ }
+ }
+
+ if (bt_block_erased) {
+ bt_block_erased = 0;
+ continue;
+ }
+
+ addr = FTL_Get_Physical_Block_Addr((u64)i *
+ DeviceInfo.wBlockDataSize);
+
+ if (PASS == GLOB_FTL_Block_Erase(addr)) {
+ pbt[i] &= (u32)(~DISCARD_BLOCK);
+ pbt[i] |= (u32)(SPARE_BLOCK);
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)
+ g_pBTDelta_Free;
+ g_pBTDelta_Free +=
+ sizeof(struct BTableChangesDelta);
+ p_BTableChangesDelta->ftl_cmd_cnt =
+ ftl_cmd_cnt - 1;
+ p_BTableChangesDelta->BT_Index = i;
+ p_BTableChangesDelta->BT_Entry_Value = pbt[i];
+ p_BTableChangesDelta->ValidFields = 0x0C;
+ discard_cnt--;
+ ret = PASS;
+ } else {
+ MARK_BLOCK_AS_BAD(pbt[i]);
+ }
+ }
+
+ i++;
+ }
+
+ return ret;
+}
+
+#else
+static int do_garbage_collection(u32 discard_cnt)
+{
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u32 pba;
+ u8 bt_block_erased = 0;
+ int i, cnt, ret = FAIL;
+ u64 addr;
+
+ i = 0;
+ while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
+ if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
+ (pbt[i] & DISCARD_BLOCK)) {
+ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
+ FTL_Write_IN_Progress_Block_Table_Page();
+ }
+
+ addr = FTL_Get_Physical_Block_Addr((u64)i *
+ DeviceInfo.wBlockDataSize);
+ pba = BLK_FROM_ADDR(addr);
+
+ for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
+ if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "GC will erase BT block %d\n",
+ pba);
+ discard_cnt--;
+ i++;
+ bt_block_erased = 1;
+ break;
+ }
+ }
+
+ if (bt_block_erased) {
+ bt_block_erased = 0;
+ continue;
+ }
+
+ /* If the discard block is L2 cache block, then just skip it */
+ for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
+ if (cache_l2.blk_array[cnt] == pba) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "GC will erase L2 cache blk %d\n",
+ pba);
+ break;
+ }
+ }
+ if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
+ discard_cnt--;
+ i++;
+ continue;
+ }
+
+ addr = FTL_Get_Physical_Block_Addr((u64)i *
+ DeviceInfo.wBlockDataSize);
+
+ if (PASS == GLOB_FTL_Block_Erase(addr)) {
+ pbt[i] &= (u32)(~DISCARD_BLOCK);
+ pbt[i] |= (u32)(SPARE_BLOCK);
+ discard_cnt--;
+ ret = PASS;
+ } else {
+ MARK_BLOCK_AS_BAD(pbt[i]);
+ }
+ }
+
+ i++;
+ }
+
+ return ret;
+}
+#endif
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_Garbage_Collection
+* Inputs: none
+* Outputs: PASS / FAIL (returns the number of un-erased blocks
+* Description: search the block table for all discarded blocks to erase
+* for each discarded block:
+* set the flash block to IN_PROGRESS
+* erase the block
+* update the block table
+* write the block table to flash
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_FTL_Garbage_Collection(void)
+{
+ u32 i;
+ u32 wDiscard = 0;
+ int wResult = FAIL;
+ u32 *pbt = (u32 *)g_pBlockTable;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (GC_Called) {
+ printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
+ "has been re-entered! Exit.\n");
+ return PASS;
+ }
+
+ GC_Called = 1;
+
+ GLOB_FTL_BT_Garbage_Collection();
+
+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
+ if (IS_DISCARDED_BLOCK(i))
+ wDiscard++;
+ }
+
+ if (wDiscard <= 0) {
+ GC_Called = 0;
+ return wResult;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Found %d discarded blocks\n", wDiscard);
+
+ FTL_Write_Block_Table(FAIL);
+
+ wResult = do_garbage_collection(wDiscard);
+
+ FTL_Write_Block_Table(FAIL);
+
+ GC_Called = 0;
+
+ return wResult;
+}
+
+
+#if CMD_DMA
+static int do_bt_garbage_collection(void)
+{
+ u32 pba, lba;
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
+ u64 addr;
+ int i, ret = FAIL;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (BT_GC_Called)
+ return PASS;
+
+ BT_GC_Called = 1;
+
+ for (i = last_erased; (i <= LAST_BT_ID) &&
+ (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
+ FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
+ ((ftl_cmd_cnt + 28)) < 256; i++) {
+ pba = pBTBlocksNode[i - FIRST_BT_ID];
+ lba = FTL_Get_Block_Index(pba);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "do_bt_garbage_collection: pba %d, lba %d\n",
+ pba, lba);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Block Table Entry: %d", pbt[lba]);
+
+ if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
+ (pbt[lba] & DISCARD_BLOCK)) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "do_bt_garbage_collection_cdma: "
+ "Erasing Block tables present in block %d\n",
+ pba);
+ addr = FTL_Get_Physical_Block_Addr((u64)lba *
+ DeviceInfo.wBlockDataSize);
+ if (PASS == GLOB_FTL_Block_Erase(addr)) {
+ pbt[lba] &= (u32)(~DISCARD_BLOCK);
+ pbt[lba] |= (u32)(SPARE_BLOCK);
+
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)
+ g_pBTDelta_Free;
+ g_pBTDelta_Free +=
+ sizeof(struct BTableChangesDelta);
+
+ p_BTableChangesDelta->ftl_cmd_cnt =
+ ftl_cmd_cnt - 1;
+ p_BTableChangesDelta->BT_Index = lba;
+ p_BTableChangesDelta->BT_Entry_Value =
+ pbt[lba];
+
+ p_BTableChangesDelta->ValidFields = 0x0C;
+
+ ret = PASS;
+ pBTBlocksNode[last_erased - FIRST_BT_ID] =
+ BTBLOCK_INVAL;
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "resetting bt entry at index %d "
+ "value %d\n", i,
+ pBTBlocksNode[i - FIRST_BT_ID]);
+ if (last_erased == LAST_BT_ID)
+ last_erased = FIRST_BT_ID;
+ else
+ last_erased++;
+ } else {
+ MARK_BLOCK_AS_BAD(pbt[lba]);
+ }
+ }
+ }
+
+ BT_GC_Called = 0;
+
+ return ret;
+}
+
+#else
+static int do_bt_garbage_collection(void)
+{
+ u32 pba, lba;
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
+ u64 addr;
+ int i, ret = FAIL;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (BT_GC_Called)
+ return PASS;
+
+ BT_GC_Called = 1;
+
+ for (i = last_erased; (i <= LAST_BT_ID) &&
+ (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
+ FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
+ pba = pBTBlocksNode[i - FIRST_BT_ID];
+ lba = FTL_Get_Block_Index(pba);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
+ pba, lba);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Block Table Entry: %d", pbt[lba]);
+
+ if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
+ (pbt[lba] & DISCARD_BLOCK)) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "do_bt_garbage_collection: "
+ "Erasing Block tables present in block %d\n",
+ pba);
+ addr = FTL_Get_Physical_Block_Addr((u64)lba *
+ DeviceInfo.wBlockDataSize);
+ if (PASS == GLOB_FTL_Block_Erase(addr)) {
+ pbt[lba] &= (u32)(~DISCARD_BLOCK);
+ pbt[lba] |= (u32)(SPARE_BLOCK);
+ ret = PASS;
+ pBTBlocksNode[last_erased - FIRST_BT_ID] =
+ BTBLOCK_INVAL;
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "resetting bt entry at index %d "
+ "value %d\n", i,
+ pBTBlocksNode[i - FIRST_BT_ID]);
+ if (last_erased == LAST_BT_ID)
+ last_erased = FIRST_BT_ID;
+ else
+ last_erased++;
+ } else {
+ MARK_BLOCK_AS_BAD(pbt[lba]);
+ }
+ }
+ }
+
+ BT_GC_Called = 0;
+
+ return ret;
+}
+
+#endif
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_BT_Garbage_Collection
+* Inputs: none
+* Outputs: PASS / FAIL (returns the number of un-erased blocks
+* Description: Erases discarded blocks containing Block table
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_FTL_BT_Garbage_Collection(void)
+{
+ return do_bt_garbage_collection();
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Replace_OneBlock
+* Inputs: Block number 1
+* Block number 2
+* Outputs: Replaced Block Number
+* Description: Interchange block table entries at wBlockNum and wReplaceNum
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
+{
+ u32 tmp_blk;
+ u32 replace_node = BAD_BLOCK;
+ u32 *pbt = (u32 *)g_pBlockTable;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (rep_blk != BAD_BLOCK) {
+ if (IS_BAD_BLOCK(blk))
+ tmp_blk = pbt[blk];
+ else
+ tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
+
+ replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
+ pbt[blk] = replace_node;
+ pbt[rep_blk] = tmp_blk;
+
+#if CMD_DMA
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+
+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
+ p_BTableChangesDelta->BT_Index = blk;
+ p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
+
+ p_BTableChangesDelta->ValidFields = 0x0C;
+
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+
+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
+ p_BTableChangesDelta->BT_Index = rep_blk;
+ p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
+ p_BTableChangesDelta->ValidFields = 0x0C;
+#endif
+ }
+
+ return replace_node;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Write_Block_Table_Data
+* Inputs: Block table size in pages
+* Outputs: PASS=0 / FAIL=1
+* Description: Write block table data in flash
+* If first page and last page
+* Write data+BT flag
+* else
+* Write data
+* BT flag is a counter. Its value is incremented for block table
+* write in a new Block
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static int FTL_Write_Block_Table_Data(void)
+{
+ u64 dwBlockTableAddr, pTempAddr;
+ u32 Block;
+ u16 Page, PageCount;
+ u8 *tempBuf = tmp_buf_write_blk_table_data;
+ int wBytesCopied;
+ u16 bt_pages;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ dwBlockTableAddr =
+ (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
+ (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
+ pTempAddr = dwBlockTableAddr;
+
+ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
+
+ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
+ "page= %d BlockTableIndex= %d "
+ "BlockTableOffset=%d\n", bt_pages,
+ g_wBlockTableIndex, g_wBlockTableOffset);
+
+ Block = BLK_FROM_ADDR(pTempAddr);
+ Page = PAGE_FROM_ADDR(pTempAddr, Block);
+ PageCount = 1;
+
+ if (bt_block_changed) {
+ if (bt_flag == LAST_BT_ID) {
+ bt_flag = FIRST_BT_ID;
+ g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
+ } else if (bt_flag < LAST_BT_ID) {
+ bt_flag++;
+ g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
+ }
+
+ if ((bt_flag > (LAST_BT_ID-4)) &&
+ g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
+ BTBLOCK_INVAL) {
+ bt_block_changed = 0;
+ GLOB_FTL_BT_Garbage_Collection();
+ }
+
+ bt_block_changed = 0;
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Block Table Counter is %u Block %u\n",
+ bt_flag, (unsigned int)Block);
+ }
+
+ memset(tempBuf, 0, 3);
+ tempBuf[3] = bt_flag;
+ wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
+ DeviceInfo.wPageDataSize - 4, 0);
+ memset(&tempBuf[wBytesCopied + 4], 0xff,
+ DeviceInfo.wPageSize - (wBytesCopied + 4));
+ FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
+ bt_flag);
+
+#if CMD_DMA
+ memcpy(g_pNextBlockTable, tempBuf,
+ DeviceInfo.wPageSize * sizeof(u8));
+ nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
+ "Block %u Page %u\n", (unsigned int)Block, Page);
+ if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
+ Block, Page, 1,
+ LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
+ nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
+ "%s, Line %d, Function: %s, "
+ "new Bad Block %d generated!\n",
+ __FILE__, __LINE__, __func__, Block);
+ goto func_return;
+ }
+
+ ftl_cmd_cnt++;
+ g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
+#else
+ if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Program fail in %s, Line %d, Function: %s, "
+ "new Bad Block %d generated!\n",
+ __FILE__, __LINE__, __func__, Block);
+ goto func_return;
+ }
+#endif
+
+ if (bt_pages > 1) {
+ PageCount = bt_pages - 1;
+ if (PageCount > 1) {
+ wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
+ DeviceInfo.wPageDataSize * (PageCount - 1),
+ wBytesCopied);
+
+#if CMD_DMA
+ memcpy(g_pNextBlockTable, tempBuf,
+ (PageCount - 1) * DeviceInfo.wPageDataSize);
+ if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
+ g_pNextBlockTable, Block, Page + 1,
+ PageCount - 1)) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Program fail in %s, Line %d, "
+ "Function: %s, "
+ "new Bad Block %d generated!\n",
+ __FILE__, __LINE__, __func__,
+ (int)Block);
+ goto func_return;
+ }
+
+ ftl_cmd_cnt++;
+ g_pNextBlockTable += (PageCount - 1) *
+ DeviceInfo.wPageDataSize * sizeof(u8);
+#else
+ if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
+ Block, Page + 1, PageCount - 1)) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Program fail in %s, Line %d, "
+ "Function: %s, "
+ "new Bad Block %d generated!\n",
+ __FILE__, __LINE__, __func__,
+ (int)Block);
+ goto func_return;
+ }
+#endif
+ }
+
+ wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
+ DeviceInfo.wPageDataSize, wBytesCopied);
+ memset(&tempBuf[wBytesCopied], 0xff,
+ DeviceInfo.wPageSize-wBytesCopied);
+ FTL_Insert_Block_Table_Signature(
+ &tempBuf[DeviceInfo.wPageDataSize], bt_flag);
+#if CMD_DMA
+ memcpy(g_pNextBlockTable, tempBuf,
+ DeviceInfo.wPageSize * sizeof(u8));
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Writing the last Page of Block Table "
+ "Block %u Page %u\n",
+ (unsigned int)Block, Page + bt_pages - 1);
+ if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
+ g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
+ LLD_CMD_FLAG_MODE_CDMA |
+ LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Program fail in %s, Line %d, "
+ "Function: %s, new Bad Block %d generated!\n",
+ __FILE__, __LINE__, __func__, Block);
+ goto func_return;
+ }
+ ftl_cmd_cnt++;
+#else
+ if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
+ Block, Page+bt_pages - 1, 1)) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Program fail in %s, Line %d, "
+ "Function: %s, "
+ "new Bad Block %d generated!\n",
+ __FILE__, __LINE__, __func__, Block);
+ goto func_return;
+ }
+#endif
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
+
+func_return:
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Replace_Block_Table
+* Inputs: None
+* Outputs: PASS=0 / FAIL=1
+* Description: Get a new block to write block table
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static u32 FTL_Replace_Block_Table(void)
+{
+ u32 blk;
+ int gc;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
+
+ if ((BAD_BLOCK == blk) && (PASS == gc)) {
+ GLOB_FTL_Garbage_Collection();
+ blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
+ }
+ if (BAD_BLOCK == blk)
+ printk(KERN_ERR "%s, %s: There is no spare block. "
+ "It should never happen\n",
+ __FILE__, __func__);
+
+ nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
+
+ return blk;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Replace_LWBlock
+* Inputs: Block number
+* Pointer to Garbage Collect flag
+* Outputs:
+* Description: Determine the least weared block by traversing
+* block table
+* Set Garbage collection to be called if number of spare
+* block is less than Free Block Gate count
+* Change Block table entry to map least worn block for current
+* operation
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
+{
+ u32 i;
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u8 wLeastWornCounter = 0xFF;
+ u32 wLeastWornIndex = BAD_BLOCK;
+ u32 wSpareBlockNum = 0;
+ u32 wDiscardBlockNum = 0;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (IS_SPARE_BLOCK(wBlockNum)) {
+ *pGarbageCollect = FAIL;
+ pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
+#if CMD_DMA
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+ p_BTableChangesDelta->ftl_cmd_cnt =
+ ftl_cmd_cnt;
+ p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
+ p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
+ p_BTableChangesDelta->ValidFields = 0x0C;
+#endif
+ return pbt[wBlockNum];
+ }
+
+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
+ if (IS_DISCARDED_BLOCK(i))
+ wDiscardBlockNum++;
+
+ if (IS_SPARE_BLOCK(i)) {
+ u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
+ if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
+ printk(KERN_ERR "FTL_Replace_LWBlock: "
+ "This should never occur!\n");
+ if (g_pWearCounter[wPhysicalIndex -
+ DeviceInfo.wSpectraStartBlock] <
+ wLeastWornCounter) {
+ wLeastWornCounter =
+ g_pWearCounter[wPhysicalIndex -
+ DeviceInfo.wSpectraStartBlock];
+ wLeastWornIndex = i;
+ }
+ wSpareBlockNum++;
+ }
+ }
+
+ nand_dbg_print(NAND_DBG_WARN,
+ "FTL_Replace_LWBlock: Least Worn Counter %d\n",
+ (int)wLeastWornCounter);
+
+ if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
+ (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
+ *pGarbageCollect = PASS;
+ else
+ *pGarbageCollect = FAIL;
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
+ " Blocks %u\n",
+ (unsigned int)wDiscardBlockNum,
+ (unsigned int)wSpareBlockNum);
+
+ return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Replace_MWBlock
+* Inputs: None
+* Outputs: most worn spare block no./BAD_BLOCK
+* Description: It finds most worn spare block.
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static u32 FTL_Replace_MWBlock(void)
+{
+ u32 i;
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u8 wMostWornCounter = 0;
+ u32 wMostWornIndex = BAD_BLOCK;
+ u32 wSpareBlockNum = 0;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
+ if (IS_SPARE_BLOCK(i)) {
+ u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
+ if (g_pWearCounter[wPhysicalIndex -
+ DeviceInfo.wSpectraStartBlock] >
+ wMostWornCounter) {
+ wMostWornCounter =
+ g_pWearCounter[wPhysicalIndex -
+ DeviceInfo.wSpectraStartBlock];
+ wMostWornIndex = wPhysicalIndex;
+ }
+ wSpareBlockNum++;
+ }
+ }
+
+ if (wSpareBlockNum <= 2)
+ return BAD_BLOCK;
+
+ return wMostWornIndex;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Replace_Block
+* Inputs: Block Address
+* Outputs: PASS=0 / FAIL=1
+* Description: If block specified by blk_addr parameter is not free,
+* replace it with the least worn block.
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static int FTL_Replace_Block(u64 blk_addr)
+{
+ u32 current_blk = BLK_FROM_ADDR(blk_addr);
+ u32 *pbt = (u32 *)g_pBlockTable;
+ int wResult = PASS;
+ int GarbageCollect = FAIL;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (IS_SPARE_BLOCK(current_blk)) {
+ pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
+#if CMD_DMA
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+ p_BTableChangesDelta->ftl_cmd_cnt =
+ ftl_cmd_cnt;
+ p_BTableChangesDelta->BT_Index = current_blk;
+ p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
+ p_BTableChangesDelta->ValidFields = 0x0C ;
+#endif
+ return wResult;
+ }
+
+ FTL_Replace_LWBlock(current_blk, &GarbageCollect);
+
+ if (PASS == GarbageCollect)
+ wResult = GLOB_FTL_Garbage_Collection();
+
+ return wResult;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_Is_BadBlock
+* Inputs: block number to test
+* Outputs: PASS (block is BAD) / FAIL (block is not bad)
+* Description: test if this block number is flagged as bad
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
+{
+ u32 *pbt = (u32 *)g_pBlockTable;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (wBlockNum >= DeviceInfo.wSpectraStartBlock
+ && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
+ return PASS;
+ else
+ return FAIL;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_Flush_Cache
+* Inputs: none
+* Outputs: PASS=0 / FAIL=1
+* Description: flush all the cache blocks to flash
+* if a cache block is not dirty, don't do anything with it
+* else, write the block and update the block table
+* Note: This function should be called at shutdown/power down.
+* to write important data into device
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_FTL_Flush_Cache(void)
+{
+ int i, ret;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0; i < CACHE_ITEM_NUM; i++) {
+ if (SET == Cache.array[i].changed) {
+#if CMD_DMA
+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
+ int_cache[ftl_cmd_cnt].item = i;
+ int_cache[ftl_cmd_cnt].cache.address =
+ Cache.array[i].address;
+ int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
+#endif
+#endif
+ ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
+ if (PASS == ret) {
+ Cache.array[i].changed = CLEAR;
+ } else {
+ printk(KERN_ALERT "Failed when write back to L2 cache!\n");
+ /* TODO - How to handle this? */
+ }
+ }
+ }
+
+ flush_l2_cache();
+
+ return FTL_Write_Block_Table(FAIL);
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_Page_Read
+* Inputs: pointer to data
+* logical address of data (u64 is LBA * Bytes/Page)
+* Outputs: PASS=0 / FAIL=1
+* Description: reads a page of data into RAM from the cache
+* if the data is not already in cache, read from flash to cache
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
+{
+ u16 cache_item;
+ int res = PASS;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
+ "page_addr: %llu\n", logical_addr);
+
+ cache_item = FTL_Cache_If_Hit(logical_addr);
+
+ if (UNHIT_CACHE_ITEM == cache_item) {
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "GLOB_FTL_Page_Read: Cache not hit\n");
+ res = FTL_Cache_Write();
+ if (ERR == FTL_Cache_Read(logical_addr))
+ res = ERR;
+ cache_item = Cache.LRU;
+ }
+
+ FTL_Cache_Read_Page(data, logical_addr, cache_item);
+
+ return res;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_Page_Write
+* Inputs: pointer to data
+* address of data (ADDRESSTYPE is LBA * Bytes/Page)
+* Outputs: PASS=0 / FAIL=1
+* Description: writes a page of data from RAM to the cache
+* if the data is not already in cache, write back the
+* least recently used block and read the addressed block
+* from flash to cache
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
+{
+ u16 cache_blk;
+ u32 *pbt = (u32 *)g_pBlockTable;
+ int wResult = PASS;
+
+ nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
+ "dwPageAddr: %llu\n", dwPageAddr);
+
+ cache_blk = FTL_Cache_If_Hit(dwPageAddr);
+
+ if (UNHIT_CACHE_ITEM == cache_blk) {
+ wResult = FTL_Cache_Write();
+ if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
+ wResult = FTL_Replace_Block(dwPageAddr);
+ pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
+ if (wResult == FAIL)
+ return FAIL;
+ }
+ if (ERR == FTL_Cache_Read(dwPageAddr))
+ wResult = ERR;
+ cache_blk = Cache.LRU;
+ FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
+ } else {
+#if CMD_DMA
+ FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
+ LLD_CMD_FLAG_ORDER_BEFORE_REST);
+#else
+ FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
+#endif
+ }
+
+ return wResult;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: GLOB_FTL_Block_Erase
+* Inputs: address of block to erase (now in byte format, should change to
+* block format)
+* Outputs: PASS=0 / FAIL=1
+* Description: erases the specified block
+* increments the erase count
+* If erase count reaches its upper limit,call function to
+* do the ajustment as per the relative erase count values
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int GLOB_FTL_Block_Erase(u64 blk_addr)
+{
+ int status;
+ u32 BlkIdx;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
+
+ if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
+ printk(KERN_ERR "GLOB_FTL_Block_Erase: "
+ "This should never occur\n");
+ return FAIL;
+ }
+
+#if CMD_DMA
+ status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
+ if (status == FAIL)
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Program fail in %s, Line %d, "
+ "Function: %s, new Bad Block %d generated!\n",
+ __FILE__, __LINE__, __func__, BlkIdx);
+#else
+ status = GLOB_LLD_Erase_Block(BlkIdx);
+ if (status == FAIL) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Program fail in %s, Line %d, "
+ "Function: %s, new Bad Block %d generated!\n",
+ __FILE__, __LINE__, __func__, BlkIdx);
+ return status;
+ }
+#endif
+
+ if (DeviceInfo.MLCDevice) {
+ g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
+ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
+ FTL_Write_IN_Progress_Block_Table_Page();
+ }
+ }
+
+ g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
+
+#if CMD_DMA
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
+ p_BTableChangesDelta->WC_Index =
+ BlkIdx - DeviceInfo.wSpectraStartBlock;
+ p_BTableChangesDelta->WC_Entry_Value =
+ g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
+ p_BTableChangesDelta->ValidFields = 0x30;
+
+ if (DeviceInfo.MLCDevice) {
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+ p_BTableChangesDelta->ftl_cmd_cnt =
+ ftl_cmd_cnt;
+ p_BTableChangesDelta->RC_Index =
+ BlkIdx - DeviceInfo.wSpectraStartBlock;
+ p_BTableChangesDelta->RC_Entry_Value =
+ g_pReadCounter[BlkIdx -
+ DeviceInfo.wSpectraStartBlock];
+ p_BTableChangesDelta->ValidFields = 0xC0;
+ }
+
+ ftl_cmd_cnt++;
+#endif
+
+ if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
+ FTL_Adjust_Relative_Erase_Count(BlkIdx);
+
+ return status;
+}
+
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Adjust_Relative_Erase_Count
+* Inputs: index to block that was just incremented and is at the max
+* Outputs: PASS=0 / FAIL=1
+* Description: If any erase counts at MAX, adjusts erase count of every
+* block by substracting least worn
+* counter from counter value of every entry in wear table
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
+{
+ u8 wLeastWornCounter = MAX_BYTE_VALUE;
+ u8 wWearCounter;
+ u32 i, wWearIndex;
+ u32 *pbt = (u32 *)g_pBlockTable;
+ int wResult = PASS;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
+ if (IS_BAD_BLOCK(i))
+ continue;
+ wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
+
+ if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
+ printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
+ "This should never occur\n");
+ wWearCounter = g_pWearCounter[wWearIndex -
+ DeviceInfo.wSpectraStartBlock];
+ if (wWearCounter < wLeastWornCounter)
+ wLeastWornCounter = wWearCounter;
+ }
+
+ if (wLeastWornCounter == 0) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "Adjusting Wear Levelling Counters: Special Case\n");
+ g_pWearCounter[Index_of_MAX -
+ DeviceInfo.wSpectraStartBlock]--;
+#if CMD_DMA
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
+ p_BTableChangesDelta->WC_Index =
+ Index_of_MAX - DeviceInfo.wSpectraStartBlock;
+ p_BTableChangesDelta->WC_Entry_Value =
+ g_pWearCounter[Index_of_MAX -
+ DeviceInfo.wSpectraStartBlock];
+ p_BTableChangesDelta->ValidFields = 0x30;
+#endif
+ FTL_Static_Wear_Leveling();
+ } else {
+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
+ if (!IS_BAD_BLOCK(i)) {
+ wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
+ g_pWearCounter[wWearIndex -
+ DeviceInfo.wSpectraStartBlock] =
+ (u8)(g_pWearCounter
+ [wWearIndex -
+ DeviceInfo.wSpectraStartBlock] -
+ wLeastWornCounter);
+#if CMD_DMA
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free +=
+ sizeof(struct BTableChangesDelta);
+
+ p_BTableChangesDelta->ftl_cmd_cnt =
+ ftl_cmd_cnt;
+ p_BTableChangesDelta->WC_Index = wWearIndex -
+ DeviceInfo.wSpectraStartBlock;
+ p_BTableChangesDelta->WC_Entry_Value =
+ g_pWearCounter[wWearIndex -
+ DeviceInfo.wSpectraStartBlock];
+ p_BTableChangesDelta->ValidFields = 0x30;
+#endif
+ }
+ }
+
+ return wResult;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Write_IN_Progress_Block_Table_Page
+* Inputs: None
+* Outputs: None
+* Description: It writes in-progress flag page to the page next to
+* block table
+***********************************************************************/
+static int FTL_Write_IN_Progress_Block_Table_Page(void)
+{
+ int wResult = PASS;
+ u16 bt_pages;
+ u16 dwIPFPageAddr;
+#if CMD_DMA
+#else
+ u32 *pbt = (u32 *)g_pBlockTable;
+ u32 wTempBlockTableIndex;
+#endif
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
+
+ dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
+ "Block %d Page %d\n",
+ g_wBlockTableIndex, dwIPFPageAddr);
+
+#if CMD_DMA
+ wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
+ g_wBlockTableIndex, dwIPFPageAddr, 1,
+ LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
+ if (wResult == FAIL) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Program fail in %s, Line %d, "
+ "Function: %s, new Bad Block %d generated!\n",
+ __FILE__, __LINE__, __func__,
+ g_wBlockTableIndex);
+ }
+ g_wBlockTableOffset = dwIPFPageAddr + 1;
+ p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
+ p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
+ p_BTableChangesDelta->ValidFields = 0x01;
+ ftl_cmd_cnt++;
+#else
+ wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
+ g_wBlockTableIndex, dwIPFPageAddr, 1);
+ if (wResult == FAIL) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Program fail in %s, Line %d, "
+ "Function: %s, new Bad Block %d generated!\n",
+ __FILE__, __LINE__, __func__,
+ (int)g_wBlockTableIndex);
+ MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
+ wTempBlockTableIndex = FTL_Replace_Block_Table();
+ bt_block_changed = 1;
+ if (BAD_BLOCK == wTempBlockTableIndex)
+ return ERR;
+ g_wBlockTableIndex = wTempBlockTableIndex;
+ g_wBlockTableOffset = 0;
+ /* Block table tag is '00'. Means it's used one */
+ pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
+ return FAIL;
+ }
+ g_wBlockTableOffset = dwIPFPageAddr + 1;
+#endif
+ return wResult;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: FTL_Read_Disturbance
+* Inputs: block address
+* Outputs: PASS=0 / FAIL=1
+* Description: used to handle read disturbance. Data in block that
+* reaches its read limit is moved to new block
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int FTL_Read_Disturbance(u32 blk_addr)
+{
+ int wResult = FAIL;
+ u32 *pbt = (u32 *) g_pBlockTable;
+ u32 dwOldBlockAddr = blk_addr;
+ u32 wBlockNum;
+ u32 i;
+ u32 wLeastReadCounter = 0xFFFF;
+ u32 wLeastReadIndex = BAD_BLOCK;
+ u32 wSpareBlockNum = 0;
+ u32 wTempNode;
+ u32 wReplacedNode;
+ u8 *g_pTempBuf;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+#if CMD_DMA
+ g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
+ cp_back_buf_idx++;
+ if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
+ printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
+ "Maybe too many pending commands in your CDMA chain.\n");
+ return FAIL;
+ }
+#else
+ g_pTempBuf = tmp_buf_read_disturbance;
+#endif
+
+ wBlockNum = FTL_Get_Block_Index(blk_addr);
+
+ do {
+ /* This is a bug.Here 'i' should be logical block number
+ * and start from 1 (0 is reserved for block table).
+ * Have fixed it. - Yunpeng 2008. 12. 19
+ */
+ for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
+ if (IS_SPARE_BLOCK(i)) {
+ u32 wPhysicalIndex =
+ (u32)((~SPARE_BLOCK) & pbt[i]);
+ if (g_pReadCounter[wPhysicalIndex -
+ DeviceInfo.wSpectraStartBlock] <
+ wLeastReadCounter) {
+ wLeastReadCounter =
+ g_pReadCounter[wPhysicalIndex -
+ DeviceInfo.wSpectraStartBlock];
+ wLeastReadIndex = i;
+ }
+ wSpareBlockNum++;
+ }
+ }
+
+ if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
+ wResult = GLOB_FTL_Garbage_Collection();
+ if (PASS == wResult)
+ continue;
+ else
+ break;
+ } else {
+ wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
+ wReplacedNode = (u32)((~SPARE_BLOCK) &
+ pbt[wLeastReadIndex]);
+#if CMD_DMA
+ pbt[wBlockNum] = wReplacedNode;
+ pbt[wLeastReadIndex] = wTempNode;
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+
+ p_BTableChangesDelta->ftl_cmd_cnt =
+ ftl_cmd_cnt;
+ p_BTableChangesDelta->BT_Index = wBlockNum;
+ p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
+ p_BTableChangesDelta->ValidFields = 0x0C;
+
+ p_BTableChangesDelta =
+ (struct BTableChangesDelta *)g_pBTDelta_Free;
+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
+
+ p_BTableChangesDelta->ftl_cmd_cnt =
+ ftl_cmd_cnt;
+ p_BTableChangesDelta->BT_Index = wLeastReadIndex;
+ p_BTableChangesDelta->BT_Entry_Value =
+ pbt[wLeastReadIndex];
+ p_BTableChangesDelta->ValidFields = 0x0C;
+
+ wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
+ dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
+ LLD_CMD_FLAG_MODE_CDMA);
+ if (wResult == FAIL)
+ return wResult;
+
+ ftl_cmd_cnt++;
+
+ if (wResult != FAIL) {
+ if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
+ g_pTempBuf, pbt[wBlockNum], 0,
+ DeviceInfo.wPagesPerBlock)) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Program fail in "
+ "%s, Line %d, Function: %s, "
+ "new Bad Block %d "
+ "generated!\n",
+ __FILE__, __LINE__, __func__,
+ (int)pbt[wBlockNum]);
+ wResult = FAIL;
+ MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
+ }
+ ftl_cmd_cnt++;
+ }
+#else
+ wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
+ dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
+ if (wResult == FAIL)
+ return wResult;
+
+ if (wResult != FAIL) {
+ /* This is a bug. At this time, pbt[wBlockNum]
+ is still the physical address of
+ discard block, and should not be write.
+ Have fixed it as below.
+ -- Yunpeng 2008.12.19
+ */
+ wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
+ wReplacedNode, 0,
+ DeviceInfo.wPagesPerBlock);
+ if (wResult == FAIL) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Program fail in "
+ "%s, Line %d, Function: %s, "
+ "new Bad Block %d "
+ "generated!\n",
+ __FILE__, __LINE__, __func__,
+ (int)wReplacedNode);
+ MARK_BLOCK_AS_BAD(wReplacedNode);
+ } else {
+ pbt[wBlockNum] = wReplacedNode;
+ pbt[wLeastReadIndex] = wTempNode;
+ }
+ }
+
+ if ((wResult == PASS) && (g_cBlockTableStatus !=
+ IN_PROGRESS_BLOCK_TABLE)) {
+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
+ FTL_Write_IN_Progress_Block_Table_Page();
+ }
+#endif
+ }
+ } while (wResult != PASS)
+ ;
+
+#if CMD_DMA
+ /* ... */
+#endif
+
+ return wResult;
+}
+
diff --git a/drivers/staging/spectra/flash.h b/drivers/staging/spectra/flash.h
new file mode 100644
index 00000000000..5ed05805cf6
--- /dev/null
+++ b/drivers/staging/spectra/flash.h
@@ -0,0 +1,198 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _FLASH_INTERFACE_
+#define _FLASH_INTERFACE_
+
+#include "ffsport.h"
+#include "spectraswconfig.h"
+
+#define MAX_BYTE_VALUE 0xFF
+#define MAX_WORD_VALUE 0xFFFF
+#define MAX_U32_VALUE 0xFFFFFFFF
+
+#define MAX_BLOCKNODE_VALUE 0xFFFFFF
+#define DISCARD_BLOCK 0x800000
+#define SPARE_BLOCK 0x400000
+#define BAD_BLOCK 0xC00000
+
+#define UNHIT_CACHE_ITEM 0xFFFF
+
+#define NAND_CACHE_INIT_ADDR 0xffffffffffffffffULL
+
+#define IN_PROGRESS_BLOCK_TABLE 0x00
+#define CURRENT_BLOCK_TABLE 0x01
+
+#define BTSIG_OFFSET (0)
+#define BTSIG_BYTES (5)
+#define BTSIG_DELTA (3)
+
+#define MAX_READ_COUNTER 0x2710
+
+#define FIRST_BT_ID (1)
+#define LAST_BT_ID (254)
+#define BTBLOCK_INVAL (u32)(0xFFFFFFFF)
+
+struct device_info_tag {
+ u16 wDeviceMaker;
+ u16 wDeviceID;
+ u32 wDeviceType;
+ u32 wSpectraStartBlock;
+ u32 wSpectraEndBlock;
+ u32 wTotalBlocks;
+ u16 wPagesPerBlock;
+ u16 wPageSize;
+ u16 wPageDataSize;
+ u16 wPageSpareSize;
+ u16 wNumPageSpareFlag;
+ u16 wECCBytesPerSector;
+ u32 wBlockSize;
+ u32 wBlockDataSize;
+ u32 wDataBlockNum;
+ u8 bPlaneNum;
+ u16 wDeviceMainAreaSize;
+ u16 wDeviceSpareAreaSize;
+ u16 wDevicesConnected;
+ u16 wDeviceWidth;
+ u16 wHWRevision;
+ u16 wHWFeatures;
+
+ u16 wONFIDevFeatures;
+ u16 wONFIOptCommands;
+ u16 wONFITimingMode;
+ u16 wONFIPgmCacheTimingMode;
+
+ u16 MLCDevice;
+ u16 wSpareSkipBytes;
+
+ u8 nBitsInPageNumber;
+ u8 nBitsInPageDataSize;
+ u8 nBitsInBlockDataSize;
+};
+
+extern struct device_info_tag DeviceInfo;
+
+/* Cache item format */
+struct flash_cache_item_tag {
+ u64 address;
+ u16 use_cnt;
+ u16 changed;
+ u8 *buf;
+};
+
+struct flash_cache_tag {
+ u32 cache_item_size; /* Size in bytes of each cache item */
+ u16 pages_per_item; /* How many NAND pages in each cache item */
+ u16 LRU; /* No. of the least recently used cache item */
+ struct flash_cache_item_tag array[CACHE_ITEM_NUM];
+};
+
+/*
+ *Data structure for each list node of the managment table
+ * used for the Level 2 Cache. Each node maps one logical NAND block.
+ */
+struct spectra_l2_cache_list {
+ struct list_head list;
+ u32 logical_blk_num; /* Logical block number */
+ u32 pages_array[]; /* Page map array of this logical block.
+ * Array index is the logical block number,
+ * and for every item of this arry:
+ * high 16 bit is index of the L2 cache block num,
+ * low 16 bit is the phy page num
+ * of the above L2 cache block.
+ * This array will be kmalloc during run time.
+ */
+};
+
+struct spectra_l2_cache_info {
+ u32 blk_array[BLK_NUM_FOR_L2_CACHE];
+ u16 cur_blk_idx; /* idx to the phy block number of current using */
+ u16 cur_page_num; /* pages number of current using */
+ struct spectra_l2_cache_list table; /* First node of the table */
+};
+
+#define RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE 1
+
+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
+struct flash_cache_mod_item_tag {
+ u64 address;
+ u8 changed;
+};
+
+struct flash_cache_delta_list_tag {
+ u8 item; /* used cache item */
+ struct flash_cache_mod_item_tag cache;
+};
+#endif
+
+extern struct flash_cache_tag Cache;
+
+extern u8 *buf_read_page_main_spare;
+extern u8 *buf_write_page_main_spare;
+extern u8 *buf_read_page_spare;
+extern u8 *buf_get_bad_block;
+extern u8 *cdma_desc_buf;
+extern u8 *memcp_desc_buf;
+
+/* struture used for IndentfyDevice function */
+struct spectra_indentfy_dev_tag {
+ u32 NumBlocks;
+ u16 PagesPerBlock;
+ u16 PageDataSize;
+ u16 wECCBytesPerSector;
+ u32 wDataBlockNum;
+};
+
+int GLOB_FTL_Flash_Init(void);
+int GLOB_FTL_Flash_Release(void);
+/*void GLOB_FTL_Erase_Flash(void);*/
+int GLOB_FTL_Block_Erase(u64 block_addr);
+int GLOB_FTL_Is_BadBlock(u32 block_num);
+int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data);
+int GLOB_FTL_Event_Status(int *);
+u16 glob_ftl_execute_cmds(void);
+
+/*int FTL_Read_Disturbance(ADDRESSTYPE dwBlockAddr);*/
+int FTL_Read_Disturbance(u32 dwBlockAddr);
+
+/*Flash r/w based on cache*/
+int GLOB_FTL_Page_Read(u8 *read_data, u64 page_addr);
+int GLOB_FTL_Page_Write(u8 *write_data, u64 page_addr);
+int GLOB_FTL_Wear_Leveling(void);
+int GLOB_FTL_Flash_Format(void);
+int GLOB_FTL_Init(void);
+int GLOB_FTL_Flush_Cache(void);
+int GLOB_FTL_Garbage_Collection(void);
+int GLOB_FTL_BT_Garbage_Collection(void);
+void GLOB_FTL_Cache_Release(void);
+u8 *get_blk_table_start_addr(void);
+u8 *get_wear_leveling_table_start_addr(void);
+unsigned long get_blk_table_len(void);
+unsigned long get_wear_leveling_table_len(void);
+
+#if DEBUG_BNDRY
+void debug_boundary_lineno_error(int chnl, int limit, int no, int lineno,
+ char *filename);
+#define debug_boundary_error(chnl, limit, no) debug_boundary_lineno_error(chnl,\
+ limit, no, __LINE__, __FILE__)
+#else
+#define debug_boundary_error(chnl, limit, no) ;
+#endif
+
+#endif /*_FLASH_INTERFACE_*/
diff --git a/drivers/staging/spectra/lld.c b/drivers/staging/spectra/lld.c
new file mode 100644
index 00000000000..5c3b9762dc3
--- /dev/null
+++ b/drivers/staging/spectra/lld.c
@@ -0,0 +1,339 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "spectraswconfig.h"
+#include "ffsport.h"
+#include "ffsdefs.h"
+#include "lld.h"
+#include "lld_nand.h"
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+#if FLASH_EMU /* vector all the LLD calls to the LLD_EMU code */
+#include "lld_emu.h"
+#include "lld_cdma.h"
+
+/* common functions: */
+u16 GLOB_LLD_Flash_Reset(void)
+{
+ return emu_Flash_Reset();
+}
+
+u16 GLOB_LLD_Read_Device_ID(void)
+{
+ return emu_Read_Device_ID();
+}
+
+int GLOB_LLD_Flash_Release(void)
+{
+ return emu_Flash_Release();
+}
+
+u16 GLOB_LLD_Flash_Init(void)
+{
+ return emu_Flash_Init();
+}
+
+u16 GLOB_LLD_Erase_Block(u32 block_add)
+{
+ return emu_Erase_Block(block_add);
+}
+
+u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
+ u16 PageCount)
+{
+ return emu_Write_Page_Main(write_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
+ u16 PageCount)
+{
+ return emu_Read_Page_Main(read_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
+ u32 block, u16 page, u16 page_count)
+{
+ return emu_Read_Page_Main(read_data, block, page, page_count);
+}
+
+u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
+ u16 Page, u16 PageCount)
+{
+ return emu_Write_Page_Main_Spare(write_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
+ u16 Page, u16 PageCount)
+{
+ return emu_Read_Page_Main_Spare(read_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
+ u16 PageCount)
+{
+ return emu_Write_Page_Spare(write_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
+ u16 PageCount)
+{
+ return emu_Read_Page_Spare(read_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Get_Bad_Block(u32 block)
+{
+ return emu_Get_Bad_Block(block);
+}
+
+#endif /* FLASH_EMU */
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+#if FLASH_MTD /* vector all the LLD calls to the LLD_MTD code */
+#include "lld_mtd.h"
+#include "lld_cdma.h"
+
+/* common functions: */
+u16 GLOB_LLD_Flash_Reset(void)
+{
+ return mtd_Flash_Reset();
+}
+
+u16 GLOB_LLD_Read_Device_ID(void)
+{
+ return mtd_Read_Device_ID();
+}
+
+int GLOB_LLD_Flash_Release(void)
+{
+ return mtd_Flash_Release();
+}
+
+u16 GLOB_LLD_Flash_Init(void)
+{
+ return mtd_Flash_Init();
+}
+
+u16 GLOB_LLD_Erase_Block(u32 block_add)
+{
+ return mtd_Erase_Block(block_add);
+}
+
+u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
+ u16 PageCount)
+{
+ return mtd_Write_Page_Main(write_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
+ u16 PageCount)
+{
+ return mtd_Read_Page_Main(read_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
+ u32 block, u16 page, u16 page_count)
+{
+ return mtd_Read_Page_Main(read_data, block, page, page_count);
+}
+
+u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
+ u16 Page, u16 PageCount)
+{
+ return mtd_Write_Page_Main_Spare(write_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
+ u16 Page, u16 PageCount)
+{
+ return mtd_Read_Page_Main_Spare(read_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
+ u16 PageCount)
+{
+ return mtd_Write_Page_Spare(write_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
+ u16 PageCount)
+{
+ return mtd_Read_Page_Spare(read_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Get_Bad_Block(u32 block)
+{
+ return mtd_Get_Bad_Block(block);
+}
+
+#endif /* FLASH_MTD */
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+#if FLASH_NAND /* vector all the LLD calls to the NAND controller code */
+#include "lld_nand.h"
+#include "lld_cdma.h"
+#include "flash.h"
+
+/* common functions for LLD_NAND */
+void GLOB_LLD_ECC_Control(int enable)
+{
+ NAND_ECC_Ctrl(enable);
+}
+
+/* common functions for LLD_NAND */
+u16 GLOB_LLD_Flash_Reset(void)
+{
+ return NAND_Flash_Reset();
+}
+
+u16 GLOB_LLD_Read_Device_ID(void)
+{
+ return NAND_Read_Device_ID();
+}
+
+u16 GLOB_LLD_UnlockArrayAll(void)
+{
+ return NAND_UnlockArrayAll();
+}
+
+u16 GLOB_LLD_Flash_Init(void)
+{
+ return NAND_Flash_Init();
+}
+
+int GLOB_LLD_Flash_Release(void)
+{
+ return nand_release_spectra();
+}
+
+u16 GLOB_LLD_Erase_Block(u32 block_add)
+{
+ return NAND_Erase_Block(block_add);
+}
+
+
+u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
+ u16 PageCount)
+{
+ return NAND_Write_Page_Main(write_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 page,
+ u16 page_count)
+{
+ if (page_count == 1) /* Using polling to improve read speed */
+ return NAND_Read_Page_Main_Polling(read_data, block, page, 1);
+ else
+ return NAND_Read_Page_Main(read_data, block, page, page_count);
+}
+
+u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
+ u32 block, u16 page, u16 page_count)
+{
+ return NAND_Read_Page_Main_Polling(read_data,
+ block, page, page_count);
+}
+
+u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
+ u16 Page, u16 PageCount)
+{
+ return NAND_Write_Page_Main_Spare(write_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
+ u16 PageCount)
+{
+ return NAND_Write_Page_Spare(write_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
+ u16 page, u16 page_count)
+{
+ return NAND_Read_Page_Main_Spare(read_data, block, page, page_count);
+}
+
+u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
+ u16 PageCount)
+{
+ return NAND_Read_Page_Spare(read_data, block, Page, PageCount);
+}
+
+u16 GLOB_LLD_Get_Bad_Block(u32 block)
+{
+ return NAND_Get_Bad_Block(block);
+}
+
+#if CMD_DMA
+u16 GLOB_LLD_Event_Status(void)
+{
+ return CDMA_Event_Status();
+}
+
+u16 glob_lld_execute_cmds(void)
+{
+ return CDMA_Execute_CMDs();
+}
+
+u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src,
+ u32 ByteCount, u16 flag)
+{
+ /* Replace the hardware memcopy with software memcpy function */
+ if (CDMA_Execute_CMDs())
+ return FAIL;
+ memcpy(dest, src, ByteCount);
+ return PASS;
+
+ /* return CDMA_MemCopy_CMD(dest, src, ByteCount, flag); */
+}
+
+u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags)
+{
+ return CDMA_Data_CMD(ERASE_CMD, 0, block, 0, 0, flags);
+}
+
+u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data, u32 block, u16 page, u16 count)
+{
+ return CDMA_Data_CMD(WRITE_MAIN_CMD, data, block, page, count, 0);
+}
+
+u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data, u32 block, u16 page,
+ u16 count, u16 flags)
+{
+ return CDMA_Data_CMD(READ_MAIN_CMD, data, block, page, count, flags);
+}
+
+u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data, u32 block, u16 page,
+ u16 count, u16 flags)
+{
+ return CDMA_Data_CMD(WRITE_MAIN_SPARE_CMD,
+ data, block, page, count, flags);
+}
+
+u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
+ u32 block, u16 page, u16 count)
+{
+ return CDMA_Data_CMD(READ_MAIN_SPARE_CMD, data, block, page, count,
+ LLD_CMD_FLAG_MODE_CDMA);
+}
+
+#endif /* CMD_DMA */
+#endif /* FLASH_NAND */
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+
+/* end of LLD.c */
diff --git a/drivers/staging/spectra/lld.h b/drivers/staging/spectra/lld.h
new file mode 100644
index 00000000000..d3738e0e1fe
--- /dev/null
+++ b/drivers/staging/spectra/lld.h
@@ -0,0 +1,111 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+
+
+#ifndef _LLD_
+#define _LLD_
+
+#include "ffsport.h"
+#include "spectraswconfig.h"
+#include "flash.h"
+
+#define GOOD_BLOCK 0
+#define DEFECTIVE_BLOCK 1
+#define READ_ERROR 2
+
+#define CLK_X 5
+#define CLK_MULTI 4
+
+/* Typedefs */
+
+/* prototypes: API for LLD */
+/* Currently, Write_Page_Main
+ * MemCopy
+ * Read_Page_Main_Spare
+ * do not have flag because they were not implemented prior to this
+ * They are not being added to keep changes to a minimum for now.
+ * Currently, they are not required (only reqd for Wr_P_M_S.)
+ * Later on, these NEED to be changed.
+ */
+
+extern void GLOB_LLD_ECC_Control(int enable);
+
+extern u16 GLOB_LLD_Flash_Reset(void);
+
+extern u16 GLOB_LLD_Read_Device_ID(void);
+
+extern u16 GLOB_LLD_UnlockArrayAll(void);
+
+extern u16 GLOB_LLD_Flash_Init(void);
+
+extern int GLOB_LLD_Flash_Release(void);
+
+extern u16 GLOB_LLD_Erase_Block(u32 block_add);
+
+extern u16 GLOB_LLD_Write_Page_Main(u8 *write_data,
+ u32 block, u16 Page, u16 PageCount);
+
+extern u16 GLOB_LLD_Read_Page_Main(u8 *read_data,
+ u32 block, u16 page, u16 page_count);
+
+extern u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
+ u32 block, u16 page, u16 page_count);
+
+extern u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data,
+ u32 block, u16 Page, u16 PageCount);
+
+extern u16 GLOB_LLD_Write_Page_Spare(u8 *write_data,
+ u32 block, u16 Page, u16 PageCount);
+
+extern u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data,
+ u32 block, u16 page, u16 page_count);
+
+extern u16 GLOB_LLD_Read_Page_Spare(u8 *read_data,
+ u32 block, u16 Page, u16 PageCount);
+
+extern u16 GLOB_LLD_Get_Bad_Block(u32 block);
+
+extern u16 GLOB_LLD_Event_Status(void);
+
+extern u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src, u32 ByteCount, u16 flag);
+
+extern u16 glob_lld_execute_cmds(void);
+
+extern u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags);
+
+extern u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data,
+ u32 block, u16 page, u16 count);
+
+extern u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data,
+ u32 block, u16 page, u16 count, u16 flags);
+
+extern u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data,
+ u32 block, u16 page, u16 count, u16 flags);
+
+extern u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
+ u32 block, u16 page, u16 count);
+
+#define LLD_CMD_FLAG_ORDER_BEFORE_REST (0x1)
+#define LLD_CMD_FLAG_MODE_CDMA (0x8)
+
+
+#endif /*_LLD_ */
+
+
diff --git a/drivers/staging/spectra/lld_cdma.c b/drivers/staging/spectra/lld_cdma.c
new file mode 100644
index 00000000000..c6e76103d43
--- /dev/null
+++ b/drivers/staging/spectra/lld_cdma.c
@@ -0,0 +1,910 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+
+#include "spectraswconfig.h"
+#include "lld.h"
+#include "lld_nand.h"
+#include "lld_cdma.h"
+#include "lld_emu.h"
+#include "flash.h"
+#include "nand_regs.h"
+
+#define MAX_PENDING_CMDS 4
+#define MODE_02 (0x2 << 26)
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: CDMA_Data_Cmd
+* Inputs: cmd code (aligned for hw)
+* data: pointer to source or destination
+* block: block address
+* page: page address
+* num: num pages to transfer
+* Outputs: PASS
+* Description: This function takes the parameters and puts them
+* into the "pending commands" array.
+* It does not parse or validate the parameters.
+* The array index is same as the tag.
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags)
+{
+ u8 bank;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (0 == cmd)
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "%s, Line %d, Illegal cmd (0)\n", __FILE__, __LINE__);
+
+ /* If a command of another bank comes, then first execute */
+ /* pending commands of the current bank, then set the new */
+ /* bank as current bank */
+ bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
+ if (bank != info.flash_bank) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "Will access new bank. old bank: %d, new bank: %d\n",
+ info.flash_bank, bank);
+ if (CDMA_Execute_CMDs()) {
+ printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
+ return FAIL;
+ }
+ info.flash_bank = bank;
+ }
+
+ info.pcmds[info.pcmds_num].CMD = cmd;
+ info.pcmds[info.pcmds_num].DataAddr = data;
+ info.pcmds[info.pcmds_num].Block = block;
+ info.pcmds[info.pcmds_num].Page = page;
+ info.pcmds[info.pcmds_num].PageCount = num;
+ info.pcmds[info.pcmds_num].DataDestAddr = 0;
+ info.pcmds[info.pcmds_num].DataSrcAddr = 0;
+ info.pcmds[info.pcmds_num].MemCopyByteCnt = 0;
+ info.pcmds[info.pcmds_num].Flags = flags;
+ info.pcmds[info.pcmds_num].Status = 0xB0B;
+
+ switch (cmd) {
+ case WRITE_MAIN_SPARE_CMD:
+ Conv_Main_Spare_Data_Log2Phy_Format(data, num);
+ break;
+ case WRITE_SPARE_CMD:
+ Conv_Spare_Data_Log2Phy_Format(data);
+ break;
+ default:
+ break;
+ }
+
+ info.pcmds_num++;
+
+ if (info.pcmds_num >= MAX_PENDING_CMDS) {
+ if (CDMA_Execute_CMDs()) {
+ printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
+ return FAIL;
+ }
+ }
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: CDMA_MemCopy_CMD
+* Inputs: dest: pointer to destination
+* src: pointer to source
+* count: num bytes to transfer
+* Outputs: PASS
+* Description: This function takes the parameters and puts them
+* into the "pending commands" array.
+* It does not parse or validate the parameters.
+* The array index is same as the tag.
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags)
+{
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ info.pcmds[info.pcmds_num].CMD = MEMCOPY_CMD;
+ info.pcmds[info.pcmds_num].DataAddr = 0;
+ info.pcmds[info.pcmds_num].Block = 0;
+ info.pcmds[info.pcmds_num].Page = 0;
+ info.pcmds[info.pcmds_num].PageCount = 0;
+ info.pcmds[info.pcmds_num].DataDestAddr = dest;
+ info.pcmds[info.pcmds_num].DataSrcAddr = src;
+ info.pcmds[info.pcmds_num].MemCopyByteCnt = byte_cnt;
+ info.pcmds[info.pcmds_num].Flags = flags;
+ info.pcmds[info.pcmds_num].Status = 0xB0B;
+
+ info.pcmds_num++;
+
+ if (info.pcmds_num >= MAX_PENDING_CMDS) {
+ if (CDMA_Execute_CMDs()) {
+ printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
+ return FAIL;
+ }
+ }
+
+ return PASS;
+}
+
+#if 0
+/* Prints the PendingCMDs array */
+void print_pending_cmds(void)
+{
+ u16 i;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0; i < info.pcmds_num; i++) {
+ nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
+ switch (info.pcmds[i].CMD) {
+ case ERASE_CMD:
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Erase Command (0x%x)\n",
+ info.pcmds[i].CMD);
+ break;
+ case WRITE_MAIN_CMD:
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Write Main Command (0x%x)\n",
+ info.pcmds[i].CMD);
+ break;
+ case WRITE_MAIN_SPARE_CMD:
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Write Main Spare Command (0x%x)\n",
+ info.pcmds[i].CMD);
+ break;
+ case READ_MAIN_SPARE_CMD:
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Read Main Spare Command (0x%x)\n",
+ info.pcmds[i].CMD);
+ break;
+ case READ_MAIN_CMD:
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Read Main Command (0x%x)\n",
+ info.pcmds[i].CMD);
+ break;
+ case MEMCOPY_CMD:
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Memcopy Command (0x%x)\n",
+ info.pcmds[i].CMD);
+ break;
+ case DUMMY_CMD:
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Dummy Command (0x%x)\n",
+ info.pcmds[i].CMD);
+ break;
+ default:
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Illegal Command (0x%x)\n",
+ info.pcmds[i].CMD);
+ break;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "DataAddr: 0x%x\n",
+ (u32)info.pcmds[i].DataAddr);
+ nand_dbg_print(NAND_DBG_DEBUG, "Block: %d\n",
+ info.pcmds[i].Block);
+ nand_dbg_print(NAND_DBG_DEBUG, "Page: %d\n",
+ info.pcmds[i].Page);
+ nand_dbg_print(NAND_DBG_DEBUG, "PageCount: %d\n",
+ info.pcmds[i].PageCount);
+ nand_dbg_print(NAND_DBG_DEBUG, "DataDestAddr: 0x%x\n",
+ (u32)info.pcmds[i].DataDestAddr);
+ nand_dbg_print(NAND_DBG_DEBUG, "DataSrcAddr: 0x%x\n",
+ (u32)info.pcmds[i].DataSrcAddr);
+ nand_dbg_print(NAND_DBG_DEBUG, "MemCopyByteCnt: %d\n",
+ info.pcmds[i].MemCopyByteCnt);
+ nand_dbg_print(NAND_DBG_DEBUG, "Flags: 0x%x\n",
+ info.pcmds[i].Flags);
+ nand_dbg_print(NAND_DBG_DEBUG, "Status: 0x%x\n",
+ info.pcmds[i].Status);
+ }
+}
+
+/* Print the CDMA descriptors */
+void print_cdma_descriptors(void)
+{
+ struct cdma_descriptor *pc;
+ int i;
+
+ pc = (struct cdma_descriptor *)info.cdma_desc_buf;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump cdma descriptors:\n");
+
+ for (i = 0; i < info.cdma_num; i++) {
+ nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
+ pc[i].NxtPointerHi, pc[i].NxtPointerLo);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "FlashPointerHi: 0x%x, FlashPointerLo: 0x%x\n",
+ pc[i].FlashPointerHi, pc[i].FlashPointerLo);
+ nand_dbg_print(NAND_DBG_DEBUG, "CommandType: 0x%x\n",
+ pc[i].CommandType);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "MemAddrHi: 0x%x, MemAddrLo: 0x%x\n",
+ pc[i].MemAddrHi, pc[i].MemAddrLo);
+ nand_dbg_print(NAND_DBG_DEBUG, "CommandFlags: 0x%x\n",
+ pc[i].CommandFlags);
+ nand_dbg_print(NAND_DBG_DEBUG, "Channel: %d, Status: 0x%x\n",
+ pc[i].Channel, pc[i].Status);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "MemCopyPointerHi: 0x%x, MemCopyPointerLo: 0x%x\n",
+ pc[i].MemCopyPointerHi, pc[i].MemCopyPointerLo);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Reserved12: 0x%x, Reserved13: 0x%x, "
+ "Reserved14: 0x%x, pcmd: %d\n",
+ pc[i].Reserved12, pc[i].Reserved13,
+ pc[i].Reserved14, pc[i].pcmd);
+ }
+}
+
+/* Print the Memory copy descriptors */
+static void print_memcp_descriptors(void)
+{
+ struct memcpy_descriptor *pm;
+ int i;
+
+ pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump mem_cpy descriptors:\n");
+
+ for (i = 0; i < info.cdma_num; i++) {
+ nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
+ pm[i].NxtPointerHi, pm[i].NxtPointerLo);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "SrcAddrHi: 0x%x, SrcAddrLo: 0x%x\n",
+ pm[i].SrcAddrHi, pm[i].SrcAddrLo);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "DestAddrHi: 0x%x, DestAddrLo: 0x%x\n",
+ pm[i].DestAddrHi, pm[i].DestAddrLo);
+ nand_dbg_print(NAND_DBG_DEBUG, "XferSize: %d\n",
+ pm[i].XferSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "MemCopyFlags: 0x%x\n",
+ pm[i].MemCopyFlags);
+ nand_dbg_print(NAND_DBG_DEBUG, "MemCopyStatus: %d\n",
+ pm[i].MemCopyStatus);
+ nand_dbg_print(NAND_DBG_DEBUG, "reserved9: 0x%x\n",
+ pm[i].reserved9);
+ nand_dbg_print(NAND_DBG_DEBUG, "reserved10: 0x%x\n",
+ pm[i].reserved10);
+ nand_dbg_print(NAND_DBG_DEBUG, "reserved11: 0x%x\n",
+ pm[i].reserved11);
+ nand_dbg_print(NAND_DBG_DEBUG, "reserved12: 0x%x\n",
+ pm[i].reserved12);
+ nand_dbg_print(NAND_DBG_DEBUG, "reserved13: 0x%x\n",
+ pm[i].reserved13);
+ nand_dbg_print(NAND_DBG_DEBUG, "reserved14: 0x%x\n",
+ pm[i].reserved14);
+ nand_dbg_print(NAND_DBG_DEBUG, "reserved15: 0x%x\n",
+ pm[i].reserved15);
+ }
+}
+#endif
+
+/* Reset cdma_descriptor chain to 0 */
+static void reset_cdma_desc(int i)
+{
+ struct cdma_descriptor *ptr;
+
+ BUG_ON(i >= MAX_DESCS);
+
+ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
+
+ ptr[i].NxtPointerHi = 0;
+ ptr[i].NxtPointerLo = 0;
+ ptr[i].FlashPointerHi = 0;
+ ptr[i].FlashPointerLo = 0;
+ ptr[i].CommandType = 0;
+ ptr[i].MemAddrHi = 0;
+ ptr[i].MemAddrLo = 0;
+ ptr[i].CommandFlags = 0;
+ ptr[i].Channel = 0;
+ ptr[i].Status = 0;
+ ptr[i].MemCopyPointerHi = 0;
+ ptr[i].MemCopyPointerLo = 0;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: CDMA_UpdateEventStatus
+* Inputs: none
+* Outputs: none
+* Description: This function update the event status of all the channels
+* when an error condition is reported.
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+void CDMA_UpdateEventStatus(void)
+{
+ int i, j, active_chan;
+ struct cdma_descriptor *ptr;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
+
+ for (j = 0; j < info.cdma_num; j++) {
+ /* Check for the descriptor with failure */
+ if ((ptr[j].Status & CMD_DMA_DESC_FAIL))
+ break;
+
+ }
+
+ /* All the previous cmd's status for this channel must be good */
+ for (i = 0; i < j; i++) {
+ if (ptr[i].pcmd != 0xff)
+ info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
+ }
+
+ /* Abort the channel with type 0 reset command. It resets the */
+ /* selected channel after the descriptor completes the flash */
+ /* operation and status has been updated for the descriptor. */
+ /* Memory Copy and Sync associated with this descriptor will */
+ /* not be executed */
+ active_chan = ioread32(FlashReg + CHNL_ACTIVE);
+ if ((active_chan & (1 << info.flash_bank)) == (1 << info.flash_bank)) {
+ iowrite32(MODE_02 | (0 << 4), FlashMem); /* Type 0 reset */
+ iowrite32((0xF << 4) | info.flash_bank, FlashMem + 0x10);
+ } else { /* Should not reached here */
+ printk(KERN_ERR "Error! Used bank is not set in"
+ " reg CHNL_ACTIVE\n");
+ }
+}
+
+static void cdma_trans(u16 chan)
+{
+ u32 addr;
+
+ addr = info.cdma_desc;
+
+ iowrite32(MODE_10 | (chan << 24), FlashMem);
+ iowrite32((1 << 7) | chan, FlashMem + 0x10);
+
+ iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & (addr >> 16)) << 8),
+ FlashMem);
+ iowrite32((1 << 7) | (1 << 4) | 0, FlashMem + 0x10);
+
+ iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & addr) << 8), FlashMem);
+ iowrite32((1 << 7) | (1 << 5) | 0, FlashMem + 0x10);
+
+ iowrite32(MODE_10 | (chan << 24), FlashMem);
+ iowrite32((1 << 7) | (1 << 5) | (1 << 4) | 0, FlashMem + 0x10);
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: CDMA_Execute_CMDs (for use with CMD_DMA)
+* Inputs: tag_count: the number of pending cmds to do
+* Outputs: PASS/FAIL
+* Description: Build the SDMA chain(s) by making one CMD-DMA descriptor
+* for each pending command, start the CDMA engine, and return.
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 CDMA_Execute_CMDs(void)
+{
+ int i, ret;
+ u64 flash_add;
+ u32 ptr;
+ dma_addr_t map_addr, next_ptr;
+ u16 status = PASS;
+ u16 tmp_c;
+ struct cdma_descriptor *pc;
+ struct memcpy_descriptor *pm;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ /* No pending cmds to execute, just exit */
+ if (0 == info.pcmds_num) {
+ nand_dbg_print(NAND_DBG_TRACE,
+ "No pending cmds to execute. Just exit.\n");
+ return PASS;
+ }
+
+ for (i = 0; i < MAX_DESCS; i++)
+ reset_cdma_desc(i);
+
+ pc = (struct cdma_descriptor *)info.cdma_desc_buf;
+ pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
+
+ info.cdma_desc = virt_to_bus(info.cdma_desc_buf);
+ info.memcp_desc = virt_to_bus(info.memcp_desc_buf);
+ next_ptr = info.cdma_desc;
+ info.cdma_num = 0;
+
+ for (i = 0; i < info.pcmds_num; i++) {
+ if (info.pcmds[i].Block >= DeviceInfo.wTotalBlocks) {
+ info.pcmds[i].Status = CMD_NOT_DONE;
+ continue;
+ }
+
+ next_ptr += sizeof(struct cdma_descriptor);
+ pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
+ pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
+
+ /* Use the Block offset within a bank */
+ tmp_c = info.pcmds[i].Block /
+ (DeviceInfo.wTotalBlocks / totalUsedBanks);
+ flash_add = (u64)(info.pcmds[i].Block - tmp_c *
+ (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
+ DeviceInfo.wBlockDataSize +
+ (u64)(info.pcmds[i].Page) *
+ DeviceInfo.wPageDataSize;
+
+ ptr = MODE_10 | (info.flash_bank << 24) |
+ (u32)GLOB_u64_Div(flash_add,
+ DeviceInfo.wPageDataSize);
+ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
+ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
+
+ if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
+ (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
+ /* Descriptor to set Main+Spare Access Mode */
+ pc[info.cdma_num].CommandType = 0x43;
+ pc[info.cdma_num].CommandFlags =
+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
+ pc[info.cdma_num].MemAddrHi = 0;
+ pc[info.cdma_num].MemAddrLo = 0;
+ pc[info.cdma_num].Channel = 0;
+ pc[info.cdma_num].Status = 0;
+ pc[info.cdma_num].pcmd = i;
+
+ info.cdma_num++;
+ BUG_ON(info.cdma_num >= MAX_DESCS);
+
+ reset_cdma_desc(info.cdma_num);
+ next_ptr += sizeof(struct cdma_descriptor);
+ pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
+ pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
+ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
+ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
+ }
+
+ switch (info.pcmds[i].CMD) {
+ case ERASE_CMD:
+ pc[info.cdma_num].CommandType = 1;
+ pc[info.cdma_num].CommandFlags =
+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
+ pc[info.cdma_num].MemAddrHi = 0;
+ pc[info.cdma_num].MemAddrLo = 0;
+ break;
+
+ case WRITE_MAIN_CMD:
+ pc[info.cdma_num].CommandType =
+ 0x2100 | info.pcmds[i].PageCount;
+ pc[info.cdma_num].CommandFlags =
+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
+ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
+ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
+ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
+ break;
+
+ case READ_MAIN_CMD:
+ pc[info.cdma_num].CommandType =
+ 0x2000 | info.pcmds[i].PageCount;
+ pc[info.cdma_num].CommandFlags =
+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
+ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
+ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
+ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
+ break;
+
+ case WRITE_MAIN_SPARE_CMD:
+ pc[info.cdma_num].CommandType =
+ 0x2100 | info.pcmds[i].PageCount;
+ pc[info.cdma_num].CommandFlags =
+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
+ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
+ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
+ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
+ break;
+
+ case READ_MAIN_SPARE_CMD:
+ pc[info.cdma_num].CommandType =
+ 0x2000 | info.pcmds[i].PageCount;
+ pc[info.cdma_num].CommandFlags =
+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
+ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
+ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
+ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
+ break;
+
+ case MEMCOPY_CMD:
+ pc[info.cdma_num].CommandType = 0xFFFF; /* NOP cmd */
+ /* Set bit 11 to let the CDMA engine continue to */
+ /* execute only after it has finished processing */
+ /* the memcopy descriptor. */
+ /* Also set bit 10 and bit 9 to 1 */
+ pc[info.cdma_num].CommandFlags = 0x0E40;
+ map_addr = info.memcp_desc + info.cdma_num *
+ sizeof(struct memcpy_descriptor);
+ pc[info.cdma_num].MemCopyPointerHi = map_addr >> 16;
+ pc[info.cdma_num].MemCopyPointerLo = map_addr & 0xffff;
+
+ pm[info.cdma_num].NxtPointerHi = 0;
+ pm[info.cdma_num].NxtPointerLo = 0;
+
+ map_addr = virt_to_bus(info.pcmds[i].DataSrcAddr);
+ pm[info.cdma_num].SrcAddrHi = map_addr >> 16;
+ pm[info.cdma_num].SrcAddrLo = map_addr & 0xffff;
+ map_addr = virt_to_bus(info.pcmds[i].DataDestAddr);
+ pm[info.cdma_num].DestAddrHi = map_addr >> 16;
+ pm[info.cdma_num].DestAddrLo = map_addr & 0xffff;
+
+ pm[info.cdma_num].XferSize =
+ info.pcmds[i].MemCopyByteCnt;
+ pm[info.cdma_num].MemCopyFlags =
+ (0 << 15 | 0 << 14 | 27 << 8 | 0x40);
+ pm[info.cdma_num].MemCopyStatus = 0;
+ break;
+
+ case DUMMY_CMD:
+ default:
+ pc[info.cdma_num].CommandType = 0XFFFF;
+ pc[info.cdma_num].CommandFlags =
+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
+ pc[info.cdma_num].MemAddrHi = 0;
+ pc[info.cdma_num].MemAddrLo = 0;
+ break;
+ }
+
+ pc[info.cdma_num].Channel = 0;
+ pc[info.cdma_num].Status = 0;
+ pc[info.cdma_num].pcmd = i;
+
+ info.cdma_num++;
+ BUG_ON(info.cdma_num >= MAX_DESCS);
+
+ if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
+ (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
+ /* Descriptor to set back Main Area Access Mode */
+ reset_cdma_desc(info.cdma_num);
+ next_ptr += sizeof(struct cdma_descriptor);
+ pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
+ pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
+
+ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
+ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
+
+ pc[info.cdma_num].CommandType = 0x42;
+ pc[info.cdma_num].CommandFlags =
+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
+ pc[info.cdma_num].MemAddrHi = 0;
+ pc[info.cdma_num].MemAddrLo = 0;
+
+ pc[info.cdma_num].Channel = 0;
+ pc[info.cdma_num].Status = 0;
+ pc[info.cdma_num].pcmd = i;
+
+ info.cdma_num++;
+ BUG_ON(info.cdma_num >= MAX_DESCS);
+ }
+ }
+
+ /* Add a dummy descriptor at end of the CDMA chain */
+ reset_cdma_desc(info.cdma_num);
+ ptr = MODE_10 | (info.flash_bank << 24);
+ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
+ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
+ pc[info.cdma_num].CommandType = 0xFFFF; /* NOP command */
+ /* Set Command Flags for the last CDMA descriptor: */
+ /* set Continue bit (bit 9) to 0 and Interrupt bit (bit 8) to 1 */
+ pc[info.cdma_num].CommandFlags =
+ (0 << 10) | (0 << 9) | (1 << 8) | 0x40;
+ pc[info.cdma_num].pcmd = 0xff; /* Set it to an illegal value */
+ info.cdma_num++;
+ BUG_ON(info.cdma_num >= MAX_DESCS);
+
+ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
+
+ iowrite32(1, FlashReg + DMA_ENABLE);
+ /* Wait for DMA to be enabled before issuing the next command */
+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+ cdma_trans(info.flash_bank);
+
+ ret = wait_for_completion_timeout(&info.complete, 50 * HZ);
+ if (!ret)
+ printk(KERN_ERR "Wait for completion timeout "
+ "in %s, Line %d\n", __FILE__, __LINE__);
+ status = info.ret;
+
+ info.pcmds_num = 0; /* Clear the pending cmds number to 0 */
+
+ return status;
+}
+
+int is_cdma_interrupt(void)
+{
+ u32 ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma;
+ u32 int_en_mask;
+ u32 cdma_int_en_mask;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ /* Set the global Enable masks for only those interrupts
+ * that are supported */
+ cdma_int_en_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
+ DMA_INTR__DESC_COMP_CHANNEL1 |
+ DMA_INTR__DESC_COMP_CHANNEL2 |
+ DMA_INTR__DESC_COMP_CHANNEL3 |
+ DMA_INTR__MEMCOPY_DESC_COMP);
+
+ int_en_mask = (INTR_STATUS0__ECC_ERR |
+ INTR_STATUS0__PROGRAM_FAIL |
+ INTR_STATUS0__ERASE_FAIL);
+
+ ints_b0 = ioread32(FlashReg + INTR_STATUS0) & int_en_mask;
+ ints_b1 = ioread32(FlashReg + INTR_STATUS1) & int_en_mask;
+ ints_b2 = ioread32(FlashReg + INTR_STATUS2) & int_en_mask;
+ ints_b3 = ioread32(FlashReg + INTR_STATUS3) & int_en_mask;
+ ints_cdma = ioread32(FlashReg + DMA_INTR) & cdma_int_en_mask;
+
+ nand_dbg_print(NAND_DBG_WARN, "ints_bank0 to ints_bank3: "
+ "0x%x, 0x%x, 0x%x, 0x%x, ints_cdma: 0x%x\n",
+ ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma);
+
+ if (ints_b0 || ints_b1 || ints_b2 || ints_b3 || ints_cdma) {
+ return 1;
+ } else {
+ iowrite32(ints_b0, FlashReg + INTR_STATUS0);
+ iowrite32(ints_b1, FlashReg + INTR_STATUS1);
+ iowrite32(ints_b2, FlashReg + INTR_STATUS2);
+ iowrite32(ints_b3, FlashReg + INTR_STATUS3);
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Not a NAND controller interrupt! Ignore it.\n");
+ return 0;
+ }
+}
+
+static void update_event_status(void)
+{
+ int i;
+ struct cdma_descriptor *ptr;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
+
+ for (i = 0; i < info.cdma_num; i++) {
+ if (ptr[i].pcmd != 0xff)
+ info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
+ if ((ptr[i].CommandType == 0x41) ||
+ (ptr[i].CommandType == 0x42) ||
+ (ptr[i].CommandType == 0x43))
+ continue;
+
+ switch (info.pcmds[ptr[i].pcmd].CMD) {
+ case READ_MAIN_SPARE_CMD:
+ Conv_Main_Spare_Data_Phy2Log_Format(
+ info.pcmds[ptr[i].pcmd].DataAddr,
+ info.pcmds[ptr[i].pcmd].PageCount);
+ break;
+ case READ_SPARE_CMD:
+ Conv_Spare_Data_Phy2Log_Format(
+ info.pcmds[ptr[i].pcmd].DataAddr);
+ break;
+ }
+ }
+}
+
+static u16 do_ecc_for_desc(u32 ch, u8 *buf, u16 page)
+{
+ u16 event = EVENT_NONE;
+ u16 err_byte;
+ u16 err_page = 0;
+ u8 err_sector;
+ u8 err_device;
+ u16 ecc_correction_info;
+ u16 err_address;
+ u32 eccSectorSize;
+ u8 *err_pos;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
+
+ do {
+ if (0 == ch)
+ err_page = ioread32(FlashReg + ERR_PAGE_ADDR0);
+ else if (1 == ch)
+ err_page = ioread32(FlashReg + ERR_PAGE_ADDR1);
+ else if (2 == ch)
+ err_page = ioread32(FlashReg + ERR_PAGE_ADDR2);
+ else if (3 == ch)
+ err_page = ioread32(FlashReg + ERR_PAGE_ADDR3);
+
+ err_address = ioread32(FlashReg + ECC_ERROR_ADDRESS);
+ err_byte = err_address & ECC_ERROR_ADDRESS__OFFSET;
+ err_sector = ((err_address &
+ ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
+
+ ecc_correction_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
+ err_device = ((ecc_correction_info &
+ ERR_CORRECTION_INFO__DEVICE_NR) >> 8);
+
+ if (ecc_correction_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
+ event = EVENT_UNCORRECTABLE_DATA_ERROR;
+ } else {
+ event = EVENT_CORRECTABLE_DATA_ERROR_FIXED;
+ if (err_byte < ECC_SECTOR_SIZE) {
+ err_pos = buf +
+ (err_page - page) *
+ DeviceInfo.wPageDataSize +
+ err_sector * eccSectorSize +
+ err_byte *
+ DeviceInfo.wDevicesConnected +
+ err_device;
+ *err_pos ^= ecc_correction_info &
+ ERR_CORRECTION_INFO__BYTEMASK;
+ }
+ }
+ } while (!(ecc_correction_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
+
+ return event;
+}
+
+static u16 process_ecc_int(u32 c, u16 *p_desc_num)
+{
+ struct cdma_descriptor *ptr;
+ u16 j;
+ int event = EVENT_PASS;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (c != info.flash_bank)
+ printk(KERN_ERR "Error!info.flash_bank is %d, while c is %d\n",
+ info.flash_bank, c);
+
+ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
+
+ for (j = 0; j < info.cdma_num; j++)
+ if ((ptr[j].Status & CMD_DMA_DESC_COMP) != CMD_DMA_DESC_COMP)
+ break;
+
+ *p_desc_num = j; /* Pass the descripter number found here */
+
+ if (j >= info.cdma_num) {
+ printk(KERN_ERR "Can not find the correct descriptor number "
+ "when ecc interrupt triggered!"
+ "info.cdma_num: %d, j: %d\n", info.cdma_num, j);
+ return EVENT_UNCORRECTABLE_DATA_ERROR;
+ }
+
+ event = do_ecc_for_desc(c, info.pcmds[ptr[j].pcmd].DataAddr,
+ info.pcmds[ptr[j].pcmd].Page);
+
+ if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
+ printk(KERN_ERR "Uncorrectable ECC error!"
+ "info.cdma_num: %d, j: %d, "
+ "pending cmd CMD: 0x%x, "
+ "Block: 0x%x, Page: 0x%x, PageCount: 0x%x\n",
+ info.cdma_num, j,
+ info.pcmds[ptr[j].pcmd].CMD,
+ info.pcmds[ptr[j].pcmd].Block,
+ info.pcmds[ptr[j].pcmd].Page,
+ info.pcmds[ptr[j].pcmd].PageCount);
+
+ if (ptr[j].pcmd != 0xff)
+ info.pcmds[ptr[j].pcmd].Status = CMD_FAIL;
+ CDMA_UpdateEventStatus();
+ }
+
+ return event;
+}
+
+static void process_prog_erase_fail_int(u16 desc_num)
+{
+ struct cdma_descriptor *ptr;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
+
+ if (ptr[desc_num].pcmd != 0xFF)
+ info.pcmds[ptr[desc_num].pcmd].Status = CMD_FAIL;
+
+ CDMA_UpdateEventStatus();
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: CDMA_Event_Status (for use with CMD_DMA)
+* Inputs: none
+* Outputs: Event_Status code
+* Description: This function is called after an interrupt has happened
+* It reads the HW status register and ...tbd
+* It returns the appropriate event status
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 CDMA_Event_Status(void)
+{
+ u32 ints_addr[4] = {INTR_STATUS0, INTR_STATUS1,
+ INTR_STATUS2, INTR_STATUS3};
+ u32 dma_intr_bit[4] = {DMA_INTR__DESC_COMP_CHANNEL0,
+ DMA_INTR__DESC_COMP_CHANNEL1,
+ DMA_INTR__DESC_COMP_CHANNEL2,
+ DMA_INTR__DESC_COMP_CHANNEL3};
+ u32 cdma_int_status, int_status;
+ u32 ecc_enable = 0;
+ u16 event = EVENT_PASS;
+ u16 cur_desc = 0;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ ecc_enable = ioread32(FlashReg + ECC_ENABLE);
+
+ while (1) {
+ int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
+ if (ecc_enable && (int_status & INTR_STATUS0__ECC_ERR)) {
+ event = process_ecc_int(info.flash_bank, &cur_desc);
+ iowrite32(INTR_STATUS0__ECC_ERR,
+ FlashReg + ints_addr[info.flash_bank]);
+ if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "ints_bank0 to ints_bank3: "
+ "0x%x, 0x%x, 0x%x, 0x%x, "
+ "ints_cdma: 0x%x\n",
+ ioread32(FlashReg + INTR_STATUS0),
+ ioread32(FlashReg + INTR_STATUS1),
+ ioread32(FlashReg + INTR_STATUS2),
+ ioread32(FlashReg + INTR_STATUS3),
+ ioread32(FlashReg + DMA_INTR));
+ break;
+ }
+ } else if (int_status & INTR_STATUS0__PROGRAM_FAIL) {
+ printk(KERN_ERR "NAND program fail interrupt!\n");
+ process_prog_erase_fail_int(cur_desc);
+ event = EVENT_PROGRAM_FAILURE;
+ break;
+ } else if (int_status & INTR_STATUS0__ERASE_FAIL) {
+ printk(KERN_ERR "NAND erase fail interrupt!\n");
+ process_prog_erase_fail_int(cur_desc);
+ event = EVENT_ERASE_FAILURE;
+ break;
+ } else {
+ cdma_int_status = ioread32(FlashReg + DMA_INTR);
+ if (cdma_int_status & dma_intr_bit[info.flash_bank]) {
+ iowrite32(dma_intr_bit[info.flash_bank],
+ FlashReg + DMA_INTR);
+ update_event_status();
+ event = EVENT_PASS;
+ break;
+ }
+ }
+ }
+
+ int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
+ iowrite32(int_status, FlashReg + ints_addr[info.flash_bank]);
+ cdma_int_status = ioread32(FlashReg + DMA_INTR);
+ iowrite32(cdma_int_status, FlashReg + DMA_INTR);
+
+ iowrite32(0, FlashReg + DMA_ENABLE);
+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ return event;
+}
+
+
+
diff --git a/drivers/staging/spectra/lld_cdma.h b/drivers/staging/spectra/lld_cdma.h
new file mode 100644
index 00000000000..854ea066f0c
--- /dev/null
+++ b/drivers/staging/spectra/lld_cdma.h
@@ -0,0 +1,123 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/* header for LLD_CDMA.c module */
+
+#ifndef _LLD_CDMA_
+#define _LLD_CDMA_
+
+#include "flash.h"
+
+#define DEBUG_SYNC 1
+
+/*/////////// CDMA specific MACRO definition */
+#define MAX_DESCS (255)
+#define MAX_CHANS (4)
+#define MAX_SYNC_POINTS (16)
+#define MAX_DESC_PER_CHAN (MAX_DESCS * 3 + MAX_SYNC_POINTS + 2)
+
+#define CHANNEL_SYNC_MASK (0x000F)
+#define CHANNEL_DMA_MASK (0x00F0)
+#define CHANNEL_ID_MASK (0x0300)
+#define CHANNEL_CONT_MASK (0x4000)
+#define CHANNEL_INTR_MASK (0x8000)
+
+#define CHANNEL_SYNC_OFFSET (0)
+#define CHANNEL_DMA_OFFSET (4)
+#define CHANNEL_ID_OFFSET (8)
+#define CHANNEL_CONT_OFFSET (14)
+#define CHANNEL_INTR_OFFSET (15)
+
+u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags);
+u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags);
+u16 CDMA_Execute_CMDs(void);
+void print_pending_cmds(void);
+void print_cdma_descriptors(void);
+
+extern u8 g_SBDCmdIndex;
+extern struct mrst_nand_info info;
+
+
+/*/////////// prototypes: APIs for LLD_CDMA */
+int is_cdma_interrupt(void);
+u16 CDMA_Event_Status(void);
+
+/* CMD-DMA Descriptor Struct. These are defined by the CMD_DMA HW */
+struct cdma_descriptor {
+ u32 NxtPointerHi;
+ u32 NxtPointerLo;
+ u32 FlashPointerHi;
+ u32 FlashPointerLo;
+ u32 CommandType;
+ u32 MemAddrHi;
+ u32 MemAddrLo;
+ u32 CommandFlags;
+ u32 Channel;
+ u32 Status;
+ u32 MemCopyPointerHi;
+ u32 MemCopyPointerLo;
+ u32 Reserved12;
+ u32 Reserved13;
+ u32 Reserved14;
+ u32 pcmd; /* pending cmd num related to this descriptor */
+};
+
+/* This struct holds one MemCopy descriptor as defined by the HW */
+struct memcpy_descriptor {
+ u32 NxtPointerHi;
+ u32 NxtPointerLo;
+ u32 SrcAddrHi;
+ u32 SrcAddrLo;
+ u32 DestAddrHi;
+ u32 DestAddrLo;
+ u32 XferSize;
+ u32 MemCopyFlags;
+ u32 MemCopyStatus;
+ u32 reserved9;
+ u32 reserved10;
+ u32 reserved11;
+ u32 reserved12;
+ u32 reserved13;
+ u32 reserved14;
+ u32 reserved15;
+};
+
+/* Pending CMD table entries (includes MemCopy parameters */
+struct pending_cmd {
+ u8 CMD;
+ u8 *DataAddr;
+ u32 Block;
+ u16 Page;
+ u16 PageCount;
+ u8 *DataDestAddr;
+ u8 *DataSrcAddr;
+ u32 MemCopyByteCnt;
+ u16 Flags;
+ u16 Status;
+};
+
+#if DEBUG_SYNC
+extern u32 debug_sync_cnt;
+#endif
+
+/* Definitions for CMD DMA descriptor chain fields */
+#define CMD_DMA_DESC_COMP 0x8000
+#define CMD_DMA_DESC_FAIL 0x4000
+
+#endif /*_LLD_CDMA_*/
diff --git a/drivers/staging/spectra/lld_emu.c b/drivers/staging/spectra/lld_emu.c
new file mode 100644
index 00000000000..60eb0f6fdba
--- /dev/null
+++ b/drivers/staging/spectra/lld_emu.c
@@ -0,0 +1,780 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include "flash.h"
+#include "ffsdefs.h"
+#include "lld_emu.h"
+#include "lld.h"
+#if CMD_DMA
+#include "lld_cdma.h"
+#endif
+
+#define GLOB_LLD_PAGES 64
+#define GLOB_LLD_PAGE_SIZE (512+16)
+#define GLOB_LLD_PAGE_DATA_SIZE 512
+#define GLOB_LLD_BLOCKS 2048
+
+#if (CMD_DMA && FLASH_EMU)
+#include "lld_cdma.h"
+u32 totalUsedBanks;
+u32 valid_banks[MAX_CHANS];
+#endif
+
+#if FLASH_EMU /* This is for entire module */
+
+static u8 *flash_memory[GLOB_LLD_BLOCKS * GLOB_LLD_PAGES];
+
+/* Read nand emu file and then fill it's content to flash_memory */
+int emu_load_file_to_mem(void)
+{
+ mm_segment_t fs;
+ struct file *nef_filp = NULL;
+ struct inode *inode = NULL;
+ loff_t nef_size = 0;
+ loff_t tmp_file_offset, file_offset;
+ ssize_t nread;
+ int i, rc = -EINVAL;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ fs = get_fs();
+ set_fs(get_ds());
+
+ nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
+ if (IS_ERR(nef_filp)) {
+ printk(KERN_ERR "filp_open error: "
+ "Unable to open nand emu file!\n");
+ return PTR_ERR(nef_filp);
+ }
+
+ if (nef_filp->f_path.dentry) {
+ inode = nef_filp->f_path.dentry->d_inode;
+ } else {
+ printk(KERN_ERR "Can not get valid inode!\n");
+ goto out;
+ }
+
+ nef_size = i_size_read(inode->i_mapping->host);
+ if (nef_size <= 0) {
+ printk(KERN_ERR "Invalid nand emu file size: "
+ "0x%llx\n", nef_size);
+ goto out;
+ } else {
+ nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: %lld\n",
+ nef_size);
+ }
+
+ file_offset = 0;
+ for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
+ tmp_file_offset = file_offset;
+ nread = vfs_read(nef_filp,
+ (char __user *)flash_memory[i],
+ GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
+ if (nread < GLOB_LLD_PAGE_SIZE) {
+ printk(KERN_ERR "%s, Line %d - "
+ "nand emu file partial read: "
+ "%d bytes\n", __FILE__, __LINE__, (int)nread);
+ goto out;
+ }
+ file_offset += GLOB_LLD_PAGE_SIZE;
+ }
+ rc = 0;
+
+out:
+ filp_close(nef_filp, current->files);
+ set_fs(fs);
+ return rc;
+}
+
+/* Write contents of flash_memory to nand emu file */
+int emu_write_mem_to_file(void)
+{
+ mm_segment_t fs;
+ struct file *nef_filp = NULL;
+ struct inode *inode = NULL;
+ loff_t nef_size = 0;
+ loff_t tmp_file_offset, file_offset;
+ ssize_t nwritten;
+ int i, rc = -EINVAL;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ fs = get_fs();
+ set_fs(get_ds());
+
+ nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
+ if (IS_ERR(nef_filp)) {
+ printk(KERN_ERR "filp_open error: "
+ "Unable to open nand emu file!\n");
+ return PTR_ERR(nef_filp);
+ }
+
+ if (nef_filp->f_path.dentry) {
+ inode = nef_filp->f_path.dentry->d_inode;
+ } else {
+ printk(KERN_ERR "Invalid " "nef_filp->f_path.dentry value!\n");
+ goto out;
+ }
+
+ nef_size = i_size_read(inode->i_mapping->host);
+ if (nef_size <= 0) {
+ printk(KERN_ERR "Invalid "
+ "nand emu file size: 0x%llx\n", nef_size);
+ goto out;
+ } else {
+ nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: "
+ "%lld\n", nef_size);
+ }
+
+ file_offset = 0;
+ for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
+ tmp_file_offset = file_offset;
+ nwritten = vfs_write(nef_filp,
+ (char __user *)flash_memory[i],
+ GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
+ if (nwritten < GLOB_LLD_PAGE_SIZE) {
+ printk(KERN_ERR "%s, Line %d - "
+ "nand emu file partial write: "
+ "%d bytes\n", __FILE__, __LINE__, (int)nwritten);
+ goto out;
+ }
+ file_offset += GLOB_LLD_PAGE_SIZE;
+ }
+ rc = 0;
+
+out:
+ filp_close(nef_filp, current->files);
+ set_fs(fs);
+ return rc;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: emu_Flash_Init
+* Inputs: none
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Creates & initializes the flash RAM array.
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 emu_Flash_Init(void)
+{
+ int i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ flash_memory[0] = (u8 *)vmalloc(GLOB_LLD_PAGE_SIZE *
+ GLOB_LLD_BLOCKS *
+ GLOB_LLD_PAGES *
+ sizeof(u8));
+ if (!flash_memory[0]) {
+ printk(KERN_ERR "Fail to allocate memory "
+ "for nand emulator!\n");
+ return ERR;
+ }
+
+ memset((char *)(flash_memory[0]), 0xFF,
+ GLOB_LLD_PAGE_SIZE * GLOB_LLD_BLOCKS * GLOB_LLD_PAGES *
+ sizeof(u8));
+
+ for (i = 1; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++)
+ flash_memory[i] = flash_memory[i - 1] + GLOB_LLD_PAGE_SIZE;
+
+ emu_load_file_to_mem(); /* Load nand emu file to mem */
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: emu_Flash_Release
+* Inputs: none
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Releases the flash.
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int emu_Flash_Release(void)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ emu_write_mem_to_file(); /* Write back mem to nand emu file */
+
+ vfree(flash_memory[0]);
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: emu_Read_Device_ID
+* Inputs: none
+* Outputs: PASS=1 FAIL=0
+* Description: Reads the info from the controller registers.
+* Sets up DeviceInfo structure with device parameters
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+
+u16 emu_Read_Device_ID(void)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ DeviceInfo.wDeviceMaker = 0;
+ DeviceInfo.wDeviceType = 8;
+ DeviceInfo.wSpectraStartBlock = 36;
+ DeviceInfo.wSpectraEndBlock = GLOB_LLD_BLOCKS - 1;
+ DeviceInfo.wTotalBlocks = GLOB_LLD_BLOCKS;
+ DeviceInfo.wPagesPerBlock = GLOB_LLD_PAGES;
+ DeviceInfo.wPageSize = GLOB_LLD_PAGE_SIZE;
+ DeviceInfo.wPageDataSize = GLOB_LLD_PAGE_DATA_SIZE;
+ DeviceInfo.wPageSpareSize = GLOB_LLD_PAGE_SIZE -
+ GLOB_LLD_PAGE_DATA_SIZE;
+ DeviceInfo.wBlockSize = DeviceInfo.wPageSize * GLOB_LLD_PAGES;
+ DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * GLOB_LLD_PAGES;
+ DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
+ DeviceInfo.wSpectraStartBlock
+ + 1);
+ DeviceInfo.MLCDevice = 1; /* Emulate MLC device */
+ DeviceInfo.nBitsInPageNumber =
+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
+ DeviceInfo.nBitsInPageDataSize =
+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
+ DeviceInfo.nBitsInBlockDataSize =
+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
+
+#if CMD_DMA
+ totalUsedBanks = 4;
+ valid_banks[0] = 1;
+ valid_banks[1] = 1;
+ valid_banks[2] = 1;
+ valid_banks[3] = 1;
+#endif
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: emu_Flash_Reset
+* Inputs: none
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Reset the flash
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 emu_Flash_Reset(void)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: emu_Erase_Block
+* Inputs: Address
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Erase a block
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 emu_Erase_Block(u32 block_add)
+{
+ int i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (block_add >= DeviceInfo.wTotalBlocks) {
+ printk(KERN_ERR "emu_Erase_Block error! "
+ "Too big block address: %d\n", block_add);
+ return FAIL;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
+ (int)block_add);
+
+ for (i = block_add * GLOB_LLD_PAGES;
+ i < ((block_add + 1) * GLOB_LLD_PAGES); i++) {
+ if (flash_memory[i]) {
+ memset((u8 *)(flash_memory[i]), 0xFF,
+ DeviceInfo.wPageSize * sizeof(u8));
+ }
+ }
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: emu_Write_Page_Main
+* Inputs: Write buffer address pointer
+* Block number
+* Page number
+* Number of pages to process
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Write the data in the buffer to main area of flash
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
+ u16 Page, u16 PageCount)
+{
+ int i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (Block >= DeviceInfo.wTotalBlocks)
+ return FAIL;
+
+ if (Page + PageCount > DeviceInfo.wPagesPerBlock)
+ return FAIL;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "emu_Write_Page_Main: "
+ "lba %u Page %u PageCount %u\n",
+ (unsigned int)Block,
+ (unsigned int)Page, (unsigned int)PageCount);
+
+ for (i = 0; i < PageCount; i++) {
+ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
+ printk(KERN_ERR "Run out of memory\n");
+ return FAIL;
+ }
+ memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
+ write_data, DeviceInfo.wPageDataSize);
+ write_data += DeviceInfo.wPageDataSize;
+ Page++;
+ }
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: emu_Read_Page_Main
+* Inputs: Read buffer address pointer
+* Block number
+* Page number
+* Number of pages to process
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Read the data from the flash main area to the buffer
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 emu_Read_Page_Main(u8 *read_data, u32 Block,
+ u16 Page, u16 PageCount)
+{
+ int i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (Block >= DeviceInfo.wTotalBlocks)
+ return FAIL;
+
+ if (Page + PageCount > DeviceInfo.wPagesPerBlock)
+ return FAIL;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "emu_Read_Page_Main: "
+ "lba %u Page %u PageCount %u\n",
+ (unsigned int)Block,
+ (unsigned int)Page, (unsigned int)PageCount);
+
+ for (i = 0; i < PageCount; i++) {
+ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
+ memset(read_data, 0xFF, DeviceInfo.wPageDataSize);
+ } else {
+ memcpy(read_data,
+ (u8 *) (flash_memory[Block * GLOB_LLD_PAGES
+ + Page]),
+ DeviceInfo.wPageDataSize);
+ }
+ read_data += DeviceInfo.wPageDataSize;
+ Page++;
+ }
+
+ return PASS;
+}
+
+#ifndef ELDORA
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: emu_Read_Page_Main_Spare
+* Inputs: Write Buffer
+* Address
+* Buffer size
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Read from flash main+spare area
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
+ u16 Page, u16 PageCount)
+{
+ int i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (Block >= DeviceInfo.wTotalBlocks) {
+ printk(KERN_ERR "Read Page Main+Spare "
+ "Error: Block Address too big\n");
+ return FAIL;
+ }
+
+ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
+ printk(KERN_ERR "Read Page Main+Spare "
+ "Error: Page number too big\n");
+ return FAIL;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
+ "No. of pages %u block %u start page %u\n",
+ (unsigned int)PageCount,
+ (unsigned int)Block, (unsigned int)Page);
+
+ for (i = 0; i < PageCount; i++) {
+ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
+ memset(read_data, 0xFF, DeviceInfo.wPageSize);
+ } else {
+ memcpy(read_data, (u8 *) (flash_memory[Block *
+ GLOB_LLD_PAGES
+ + Page]),
+ DeviceInfo.wPageSize);
+ }
+
+ read_data += DeviceInfo.wPageSize;
+ Page++;
+ }
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: emu_Write_Page_Main_Spare
+* Inputs: Write buffer
+* address
+* buffer length
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Write the buffer to main+spare area of flash
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
+ u16 Page, u16 page_count)
+{
+ u16 i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (Block >= DeviceInfo.wTotalBlocks) {
+ printk(KERN_ERR "Write Page Main + Spare "
+ "Error: Block Address too big\n");
+ return FAIL;
+ }
+
+ if (Page + page_count > DeviceInfo.wPagesPerBlock) {
+ printk(KERN_ERR "Write Page Main + Spare "
+ "Error: Page number too big\n");
+ return FAIL;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
+ "No. of pages %u block %u start page %u\n",
+ (unsigned int)page_count,
+ (unsigned int)Block, (unsigned int)Page);
+
+ for (i = 0; i < page_count; i++) {
+ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
+ printk(KERN_ERR "Run out of memory!\n");
+ return FAIL;
+ }
+ memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
+ write_data, DeviceInfo.wPageSize);
+ write_data += DeviceInfo.wPageSize;
+ Page++;
+ }
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: emu_Write_Page_Spare
+* Inputs: Write buffer
+* Address
+* buffer size
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Write the buffer in the spare area
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
+ u16 Page, u16 PageCount)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (Block >= DeviceInfo.wTotalBlocks) {
+ printk(KERN_ERR "Read Page Spare Error: "
+ "Block Address too big\n");
+ return FAIL;
+ }
+
+ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
+ printk(KERN_ERR "Read Page Spare Error: "
+ "Page number too big\n");
+ return FAIL;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Write Page Spare- "
+ "block %u page %u\n",
+ (unsigned int)Block, (unsigned int)Page);
+
+ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
+ printk(KERN_ERR "Run out of memory!\n");
+ return FAIL;
+ }
+
+ memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page] +
+ DeviceInfo.wPageDataSize), write_data,
+ (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: emu_Read_Page_Spare
+* Inputs: Write Buffer
+* Address
+* Buffer size
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Read data from the spare area
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 emu_Read_Page_Spare(u8 *write_data, u32 Block,
+ u16 Page, u16 PageCount)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (Block >= DeviceInfo.wTotalBlocks) {
+ printk(KERN_ERR "Read Page Spare "
+ "Error: Block Address too big\n");
+ return FAIL;
+ }
+
+ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
+ printk(KERN_ERR "Read Page Spare "
+ "Error: Page number too big\n");
+ return FAIL;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
+ "block %u page %u\n",
+ (unsigned int)Block, (unsigned int)Page);
+
+ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
+ memset(write_data, 0xFF,
+ (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
+ } else {
+ memcpy(write_data,
+ (u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]
+ + DeviceInfo.wPageDataSize),
+ (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
+ }
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: emu_Enable_Disable_Interrupts
+* Inputs: enable or disable
+* Outputs: none
+* Description: NOP
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+void emu_Enable_Disable_Interrupts(u16 INT_ENABLE)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+}
+
+u16 emu_Get_Bad_Block(u32 block)
+{
+ return 0;
+}
+
+#if CMD_DMA
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Support for CDMA functions
+************************************
+* emu_CDMA_Flash_Init
+* CDMA_process_data command (use LLD_CDMA)
+* CDMA_MemCopy_CMD (use LLD_CDMA)
+* emu_CDMA_execute all commands
+* emu_CDMA_Event_Status
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 emu_CDMA_Flash_Init(void)
+{
+ u16 i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
+ PendingCMD[i].CMD = 0;
+ PendingCMD[i].Tag = 0;
+ PendingCMD[i].DataAddr = 0;
+ PendingCMD[i].Block = 0;
+ PendingCMD[i].Page = 0;
+ PendingCMD[i].PageCount = 0;
+ PendingCMD[i].DataDestAddr = 0;
+ PendingCMD[i].DataSrcAddr = 0;
+ PendingCMD[i].MemCopyByteCnt = 0;
+ PendingCMD[i].ChanSync[0] = 0;
+ PendingCMD[i].ChanSync[1] = 0;
+ PendingCMD[i].ChanSync[2] = 0;
+ PendingCMD[i].ChanSync[3] = 0;
+ PendingCMD[i].ChanSync[4] = 0;
+ PendingCMD[i].Status = 3;
+ }
+
+ return PASS;
+}
+
+static void emu_isr(int irq, void *dev_id)
+{
+ /* TODO: ... */
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: CDMA_Execute_CMDs
+* Inputs: tag_count: the number of pending cmds to do
+* Outputs: PASS/FAIL
+* Description: execute each command in the pending CMD array
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 emu_CDMA_Execute_CMDs(u16 tag_count)
+{
+ u16 i, j;
+ u8 CMD; /* cmd parameter */
+ u8 *data;
+ u32 block;
+ u16 page;
+ u16 count;
+ u16 status = PASS;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
+ "Tag Count %u\n", tag_count);
+
+ for (i = 0; i < totalUsedBanks; i++) {
+ PendingCMD[i].CMD = DUMMY_CMD;
+ PendingCMD[i].Tag = 0xFF;
+ PendingCMD[i].Block =
+ (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
+
+ for (j = 0; j <= MAX_CHANS; j++)
+ PendingCMD[i].ChanSync[j] = 0;
+ }
+
+ CDMA_Execute_CMDs(tag_count);
+
+ print_pending_cmds(tag_count);
+
+#if DEBUG_SYNC
+ }
+ debug_sync_cnt++;
+#endif
+
+ for (i = MAX_CHANS;
+ i < tag_count + MAX_CHANS; i++) {
+ CMD = PendingCMD[i].CMD;
+ data = PendingCMD[i].DataAddr;
+ block = PendingCMD[i].Block;
+ page = PendingCMD[i].Page;
+ count = PendingCMD[i].PageCount;
+
+ switch (CMD) {
+ case ERASE_CMD:
+ emu_Erase_Block(block);
+ PendingCMD[i].Status = PASS;
+ break;
+ case WRITE_MAIN_CMD:
+ emu_Write_Page_Main(data, block, page, count);
+ PendingCMD[i].Status = PASS;
+ break;
+ case WRITE_MAIN_SPARE_CMD:
+ emu_Write_Page_Main_Spare(data, block, page, count);
+ PendingCMD[i].Status = PASS;
+ break;
+ case READ_MAIN_CMD:
+ emu_Read_Page_Main(data, block, page, count);
+ PendingCMD[i].Status = PASS;
+ break;
+ case MEMCOPY_CMD:
+ memcpy(PendingCMD[i].DataDestAddr,
+ PendingCMD[i].DataSrcAddr,
+ PendingCMD[i].MemCopyByteCnt);
+ case DUMMY_CMD:
+ PendingCMD[i].Status = PASS;
+ break;
+ default:
+ PendingCMD[i].Status = FAIL;
+ break;
+ }
+ }
+
+ /*
+ * Temperory adding code to reset PendingCMD array for basic testing.
+ * It should be done at the end of event status function.
+ */
+ for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
+ PendingCMD[i].CMD = 0;
+ PendingCMD[i].Tag = 0;
+ PendingCMD[i].DataAddr = 0;
+ PendingCMD[i].Block = 0;
+ PendingCMD[i].Page = 0;
+ PendingCMD[i].PageCount = 0;
+ PendingCMD[i].DataDestAddr = 0;
+ PendingCMD[i].DataSrcAddr = 0;
+ PendingCMD[i].MemCopyByteCnt = 0;
+ PendingCMD[i].ChanSync[0] = 0;
+ PendingCMD[i].ChanSync[1] = 0;
+ PendingCMD[i].ChanSync[2] = 0;
+ PendingCMD[i].ChanSync[3] = 0;
+ PendingCMD[i].ChanSync[4] = 0;
+ PendingCMD[i].Status = CMD_NOT_DONE;
+ }
+
+ nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
+
+ emu_isr(0, 0); /* This is a null isr now. Need fill it in future */
+
+ return status;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: emu_Event_Status
+* Inputs: none
+* Outputs: Event_Status code
+* Description: This function can also be used to force errors
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 emu_CDMA_Event_Status(void)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ return EVENT_PASS;
+}
+
+#endif /* CMD_DMA */
+#endif /* !ELDORA */
+#endif /* FLASH_EMU */
diff --git a/drivers/staging/spectra/lld_emu.h b/drivers/staging/spectra/lld_emu.h
new file mode 100644
index 00000000000..63f84c38d3c
--- /dev/null
+++ b/drivers/staging/spectra/lld_emu.h
@@ -0,0 +1,51 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _LLD_EMU_
+#define _LLD_EMU_
+
+#include "ffsport.h"
+#include "ffsdefs.h"
+
+/* prototypes: emulator API functions */
+extern u16 emu_Flash_Reset(void);
+extern u16 emu_Flash_Init(void);
+extern int emu_Flash_Release(void);
+extern u16 emu_Read_Device_ID(void);
+extern u16 emu_Erase_Block(u32 block_addr);
+extern u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
+ u16 Page, u16 PageCount);
+extern u16 emu_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
+ u16 PageCount);
+extern u16 emu_Event_Status(void);
+extern void emu_Enable_Disable_Interrupts(u16 INT_ENABLE);
+extern u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
+ u16 Page, u16 PageCount);
+extern u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
+ u16 Page, u16 PageCount);
+extern u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
+ u16 Page, u16 PageCount);
+extern u16 emu_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
+ u16 PageCount);
+extern u16 emu_Get_Bad_Block(u32 block);
+
+u16 emu_CDMA_Flash_Init(void);
+u16 emu_CDMA_Execute_CMDs(u16 tag_count);
+u16 emu_CDMA_Event_Status(void);
+#endif /*_LLD_EMU_*/
diff --git a/drivers/staging/spectra/lld_mtd.c b/drivers/staging/spectra/lld_mtd.c
new file mode 100644
index 00000000000..0de05b1e75f
--- /dev/null
+++ b/drivers/staging/spectra/lld_mtd.c
@@ -0,0 +1,687 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include "flash.h"
+#include "ffsdefs.h"
+#include "lld_emu.h"
+#include "lld.h"
+#if CMD_DMA
+#include "lld_cdma.h"
+#endif
+
+#define GLOB_LLD_PAGES 64
+#define GLOB_LLD_PAGE_SIZE (512+16)
+#define GLOB_LLD_PAGE_DATA_SIZE 512
+#define GLOB_LLD_BLOCKS 2048
+
+#if CMD_DMA
+#include "lld_cdma.h"
+u32 totalUsedBanks;
+u32 valid_banks[MAX_CHANS];
+#endif
+
+static struct mtd_info *spectra_mtd;
+static int mtddev = -1;
+module_param(mtddev, int, 0);
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: mtd_Flash_Init
+* Inputs: none
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Creates & initializes the flash RAM array.
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 mtd_Flash_Init(void)
+{
+ if (mtddev == -1) {
+ printk(KERN_ERR "No MTD device specified. Give mtddev parameter\n");
+ return FAIL;
+ }
+
+ spectra_mtd = get_mtd_device(NULL, mtddev);
+ if (!spectra_mtd) {
+ printk(KERN_ERR "Failed to obtain MTD device #%d\n", mtddev);
+ return FAIL;
+ }
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: mtd_Flash_Release
+* Inputs: none
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Releases the flash.
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+int mtd_Flash_Release(void)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+ if (!spectra_mtd)
+ return PASS;
+
+ put_mtd_device(spectra_mtd);
+ spectra_mtd = NULL;
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: mtd_Read_Device_ID
+* Inputs: none
+* Outputs: PASS=1 FAIL=0
+* Description: Reads the info from the controller registers.
+* Sets up DeviceInfo structure with device parameters
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+
+u16 mtd_Read_Device_ID(void)
+{
+ uint64_t tmp;
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (!spectra_mtd)
+ return FAIL;
+
+ DeviceInfo.wDeviceMaker = 0;
+ DeviceInfo.wDeviceType = 8;
+ DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
+ tmp = spectra_mtd->size;
+ do_div(tmp, spectra_mtd->erasesize);
+ DeviceInfo.wTotalBlocks = tmp;
+ DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
+ DeviceInfo.wPagesPerBlock = spectra_mtd->erasesize / spectra_mtd->writesize;
+ DeviceInfo.wPageSize = spectra_mtd->writesize + spectra_mtd->oobsize;
+ DeviceInfo.wPageDataSize = spectra_mtd->writesize;
+ DeviceInfo.wPageSpareSize = spectra_mtd->oobsize;
+ DeviceInfo.wBlockSize = DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
+ DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * DeviceInfo.wPagesPerBlock;
+ DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
+ DeviceInfo.wSpectraStartBlock
+ + 1);
+ DeviceInfo.MLCDevice = 0;//spectra_mtd->celltype & NAND_CI_CELLTYPE_MSK;
+ DeviceInfo.nBitsInPageNumber =
+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
+ DeviceInfo.nBitsInPageDataSize =
+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
+ DeviceInfo.nBitsInBlockDataSize =
+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
+
+#if CMD_DMA
+ totalUsedBanks = 4;
+ valid_banks[0] = 1;
+ valid_banks[1] = 1;
+ valid_banks[2] = 1;
+ valid_banks[3] = 1;
+#endif
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: mtd_Flash_Reset
+* Inputs: none
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Reset the flash
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 mtd_Flash_Reset(void)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ return PASS;
+}
+
+void erase_callback(struct erase_info *e)
+{
+ complete((void *)e->priv);
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: mtd_Erase_Block
+* Inputs: Address
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Erase a block
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 mtd_Erase_Block(u32 block_add)
+{
+ struct erase_info erase;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ int ret;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (block_add >= DeviceInfo.wTotalBlocks) {
+ printk(KERN_ERR "mtd_Erase_Block error! "
+ "Too big block address: %d\n", block_add);
+ return FAIL;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
+ (int)block_add);
+
+ erase.mtd = spectra_mtd;
+ erase.callback = erase_callback;
+ erase.addr = block_add * spectra_mtd->erasesize;
+ erase.len = spectra_mtd->erasesize;
+ erase.priv = (unsigned long)&comp;
+
+ ret = spectra_mtd->erase(spectra_mtd, &erase);
+ if (!ret) {
+ wait_for_completion(&comp);
+ if (erase.state != MTD_ERASE_DONE)
+ ret = -EIO;
+ }
+ if (ret) {
+ printk(KERN_WARNING "mtd_Erase_Block error! "
+ "erase of region [0x%llx, 0x%llx] failed\n",
+ erase.addr, erase.len);
+ return FAIL;
+ }
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: mtd_Write_Page_Main
+* Inputs: Write buffer address pointer
+* Block number
+* Page number
+* Number of pages to process
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Write the data in the buffer to main area of flash
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 mtd_Write_Page_Main(u8 *write_data, u32 Block,
+ u16 Page, u16 PageCount)
+{
+ size_t retlen;
+ int ret = 0;
+
+ if (Block >= DeviceInfo.wTotalBlocks)
+ return FAIL;
+
+ if (Page + PageCount > DeviceInfo.wPagesPerBlock)
+ return FAIL;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "mtd_Write_Page_Main: "
+ "lba %u Page %u PageCount %u\n",
+ (unsigned int)Block,
+ (unsigned int)Page, (unsigned int)PageCount);
+
+
+ while (PageCount) {
+ ret = spectra_mtd->write(spectra_mtd,
+ (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
+ DeviceInfo.wPageDataSize, &retlen, write_data);
+ if (ret) {
+ printk(KERN_ERR "%s failed %d\n", __func__, ret);
+ return FAIL;
+ }
+ write_data += DeviceInfo.wPageDataSize;
+ Page++;
+ PageCount--;
+ }
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: mtd_Read_Page_Main
+* Inputs: Read buffer address pointer
+* Block number
+* Page number
+* Number of pages to process
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Read the data from the flash main area to the buffer
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 mtd_Read_Page_Main(u8 *read_data, u32 Block,
+ u16 Page, u16 PageCount)
+{
+ size_t retlen;
+ int ret = 0;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (Block >= DeviceInfo.wTotalBlocks)
+ return FAIL;
+
+ if (Page + PageCount > DeviceInfo.wPagesPerBlock)
+ return FAIL;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "mtd_Read_Page_Main: "
+ "lba %u Page %u PageCount %u\n",
+ (unsigned int)Block,
+ (unsigned int)Page, (unsigned int)PageCount);
+
+
+ while (PageCount) {
+ ret = spectra_mtd->read(spectra_mtd,
+ (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
+ DeviceInfo.wPageDataSize, &retlen, read_data);
+ if (ret) {
+ printk(KERN_ERR "%s failed %d\n", __func__, ret);
+ return FAIL;
+ }
+ read_data += DeviceInfo.wPageDataSize;
+ Page++;
+ PageCount--;
+ }
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ return PASS;
+}
+
+#ifndef ELDORA
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: mtd_Read_Page_Main_Spare
+* Inputs: Write Buffer
+* Address
+* Buffer size
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Read from flash main+spare area
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
+ u16 Page, u16 PageCount)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (Block >= DeviceInfo.wTotalBlocks) {
+ printk(KERN_ERR "Read Page Main+Spare "
+ "Error: Block Address too big\n");
+ return FAIL;
+ }
+
+ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
+ printk(KERN_ERR "Read Page Main+Spare "
+ "Error: Page number %d+%d too big in block %d\n",
+ Page, PageCount, Block);
+ return FAIL;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
+ "No. of pages %u block %u start page %u\n",
+ (unsigned int)PageCount,
+ (unsigned int)Block, (unsigned int)Page);
+
+
+ while (PageCount) {
+ struct mtd_oob_ops ops;
+ int ret;
+
+ ops.mode = MTD_OOB_AUTO;
+ ops.datbuf = read_data;
+ ops.len = DeviceInfo.wPageDataSize;
+ ops.oobbuf = read_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
+ ops.ooblen = BTSIG_BYTES;
+ ops.ooboffs = 0;
+
+ ret = spectra_mtd->read_oob(spectra_mtd,
+ (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
+ &ops);
+ if (ret) {
+ printk(KERN_ERR "%s failed %d\n", __func__, ret);
+ return FAIL;
+ }
+ read_data += DeviceInfo.wPageSize;
+ Page++;
+ PageCount--;
+ }
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: mtd_Write_Page_Main_Spare
+* Inputs: Write buffer
+* address
+* buffer length
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Write the buffer to main+spare area of flash
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
+ u16 Page, u16 page_count)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (Block >= DeviceInfo.wTotalBlocks) {
+ printk(KERN_ERR "Write Page Main + Spare "
+ "Error: Block Address too big\n");
+ return FAIL;
+ }
+
+ if (Page + page_count > DeviceInfo.wPagesPerBlock) {
+ printk(KERN_ERR "Write Page Main + Spare "
+ "Error: Page number %d+%d too big in block %d\n",
+ Page, page_count, Block);
+ WARN_ON(1);
+ return FAIL;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
+ "No. of pages %u block %u start page %u\n",
+ (unsigned int)page_count,
+ (unsigned int)Block, (unsigned int)Page);
+
+ while (page_count) {
+ struct mtd_oob_ops ops;
+ int ret;
+
+ ops.mode = MTD_OOB_AUTO;
+ ops.datbuf = write_data;
+ ops.len = DeviceInfo.wPageDataSize;
+ ops.oobbuf = write_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
+ ops.ooblen = BTSIG_BYTES;
+ ops.ooboffs = 0;
+
+ ret = spectra_mtd->write_oob(spectra_mtd,
+ (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
+ &ops);
+ if (ret) {
+ printk(KERN_ERR "%s failed %d\n", __func__, ret);
+ return FAIL;
+ }
+ write_data += DeviceInfo.wPageSize;
+ Page++;
+ page_count--;
+ }
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: mtd_Write_Page_Spare
+* Inputs: Write buffer
+* Address
+* buffer size
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Write the buffer in the spare area
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 mtd_Write_Page_Spare(u8 *write_data, u32 Block,
+ u16 Page, u16 PageCount)
+{
+ WARN_ON(1);
+ return FAIL;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: mtd_Read_Page_Spare
+* Inputs: Write Buffer
+* Address
+* Buffer size
+* Outputs: PASS=0 (notice 0=ok here)
+* Description: Read data from the spare area
+*
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block,
+ u16 Page, u16 PageCount)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (Block >= DeviceInfo.wTotalBlocks) {
+ printk(KERN_ERR "Read Page Spare "
+ "Error: Block Address too big\n");
+ return FAIL;
+ }
+
+ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
+ printk(KERN_ERR "Read Page Spare "
+ "Error: Page number too big\n");
+ return FAIL;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
+ "block %u page %u (%u pages)\n",
+ (unsigned int)Block, (unsigned int)Page, PageCount);
+
+ while (PageCount) {
+ struct mtd_oob_ops ops;
+ int ret;
+
+ ops.mode = MTD_OOB_AUTO;
+ ops.datbuf = NULL;
+ ops.len = 0;
+ ops.oobbuf = read_data;
+ ops.ooblen = BTSIG_BYTES;
+ ops.ooboffs = 0;
+
+ ret = spectra_mtd->read_oob(spectra_mtd,
+ (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
+ &ops);
+ if (ret) {
+ printk(KERN_ERR "%s failed %d\n", __func__, ret);
+ return FAIL;
+ }
+
+ read_data += DeviceInfo.wPageSize;
+ Page++;
+ PageCount--;
+ }
+
+ return PASS;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: mtd_Enable_Disable_Interrupts
+* Inputs: enable or disable
+* Outputs: none
+* Description: NOP
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+void mtd_Enable_Disable_Interrupts(u16 INT_ENABLE)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+}
+
+u16 mtd_Get_Bad_Block(u32 block)
+{
+ return 0;
+}
+
+#if CMD_DMA
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Support for CDMA functions
+************************************
+* mtd_CDMA_Flash_Init
+* CDMA_process_data command (use LLD_CDMA)
+* CDMA_MemCopy_CMD (use LLD_CDMA)
+* mtd_CDMA_execute all commands
+* mtd_CDMA_Event_Status
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 mtd_CDMA_Flash_Init(void)
+{
+ u16 i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
+ PendingCMD[i].CMD = 0;
+ PendingCMD[i].Tag = 0;
+ PendingCMD[i].DataAddr = 0;
+ PendingCMD[i].Block = 0;
+ PendingCMD[i].Page = 0;
+ PendingCMD[i].PageCount = 0;
+ PendingCMD[i].DataDestAddr = 0;
+ PendingCMD[i].DataSrcAddr = 0;
+ PendingCMD[i].MemCopyByteCnt = 0;
+ PendingCMD[i].ChanSync[0] = 0;
+ PendingCMD[i].ChanSync[1] = 0;
+ PendingCMD[i].ChanSync[2] = 0;
+ PendingCMD[i].ChanSync[3] = 0;
+ PendingCMD[i].ChanSync[4] = 0;
+ PendingCMD[i].Status = 3;
+ }
+
+ return PASS;
+}
+
+static void mtd_isr(int irq, void *dev_id)
+{
+ /* TODO: ... */
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: CDMA_Execute_CMDs
+* Inputs: tag_count: the number of pending cmds to do
+* Outputs: PASS/FAIL
+* Description: execute each command in the pending CMD array
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 mtd_CDMA_Execute_CMDs(u16 tag_count)
+{
+ u16 i, j;
+ u8 CMD; /* cmd parameter */
+ u8 *data;
+ u32 block;
+ u16 page;
+ u16 count;
+ u16 status = PASS;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
+ "Tag Count %u\n", tag_count);
+
+ for (i = 0; i < totalUsedBanks; i++) {
+ PendingCMD[i].CMD = DUMMY_CMD;
+ PendingCMD[i].Tag = 0xFF;
+ PendingCMD[i].Block =
+ (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
+
+ for (j = 0; j <= MAX_CHANS; j++)
+ PendingCMD[i].ChanSync[j] = 0;
+ }
+
+ CDMA_Execute_CMDs(tag_count);
+
+#ifdef VERBOSE
+ print_pending_cmds(tag_count);
+#endif
+#if DEBUG_SYNC
+ }
+ debug_sync_cnt++;
+#endif
+
+ for (i = MAX_CHANS;
+ i < tag_count + MAX_CHANS; i++) {
+ CMD = PendingCMD[i].CMD;
+ data = PendingCMD[i].DataAddr;
+ block = PendingCMD[i].Block;
+ page = PendingCMD[i].Page;
+ count = PendingCMD[i].PageCount;
+
+ switch (CMD) {
+ case ERASE_CMD:
+ mtd_Erase_Block(block);
+ PendingCMD[i].Status = PASS;
+ break;
+ case WRITE_MAIN_CMD:
+ mtd_Write_Page_Main(data, block, page, count);
+ PendingCMD[i].Status = PASS;
+ break;
+ case WRITE_MAIN_SPARE_CMD:
+ mtd_Write_Page_Main_Spare(data, block, page, count);
+ PendingCMD[i].Status = PASS;
+ break;
+ case READ_MAIN_CMD:
+ mtd_Read_Page_Main(data, block, page, count);
+ PendingCMD[i].Status = PASS;
+ break;
+ case MEMCOPY_CMD:
+ memcpy(PendingCMD[i].DataDestAddr,
+ PendingCMD[i].DataSrcAddr,
+ PendingCMD[i].MemCopyByteCnt);
+ case DUMMY_CMD:
+ PendingCMD[i].Status = PASS;
+ break;
+ default:
+ PendingCMD[i].Status = FAIL;
+ break;
+ }
+ }
+
+ /*
+ * Temperory adding code to reset PendingCMD array for basic testing.
+ * It should be done at the end of event status function.
+ */
+ for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
+ PendingCMD[i].CMD = 0;
+ PendingCMD[i].Tag = 0;
+ PendingCMD[i].DataAddr = 0;
+ PendingCMD[i].Block = 0;
+ PendingCMD[i].Page = 0;
+ PendingCMD[i].PageCount = 0;
+ PendingCMD[i].DataDestAddr = 0;
+ PendingCMD[i].DataSrcAddr = 0;
+ PendingCMD[i].MemCopyByteCnt = 0;
+ PendingCMD[i].ChanSync[0] = 0;
+ PendingCMD[i].ChanSync[1] = 0;
+ PendingCMD[i].ChanSync[2] = 0;
+ PendingCMD[i].ChanSync[3] = 0;
+ PendingCMD[i].ChanSync[4] = 0;
+ PendingCMD[i].Status = CMD_NOT_DONE;
+ }
+
+ nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
+
+ mtd_isr(0, 0); /* This is a null isr now. Need fill it in future */
+
+ return status;
+}
+
+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+* Function: mtd_Event_Status
+* Inputs: none
+* Outputs: Event_Status code
+* Description: This function can also be used to force errors
+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
+u16 mtd_CDMA_Event_Status(void)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ return EVENT_PASS;
+}
+
+#endif /* CMD_DMA */
+#endif /* !ELDORA */
diff --git a/drivers/staging/spectra/lld_mtd.h b/drivers/staging/spectra/lld_mtd.h
new file mode 100644
index 00000000000..4e81ee87b53
--- /dev/null
+++ b/drivers/staging/spectra/lld_mtd.h
@@ -0,0 +1,51 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _LLD_MTD_
+#define _LLD_MTD_
+
+#include "ffsport.h"
+#include "ffsdefs.h"
+
+/* prototypes: MTD API functions */
+extern u16 mtd_Flash_Reset(void);
+extern u16 mtd_Flash_Init(void);
+extern int mtd_Flash_Release(void);
+extern u16 mtd_Read_Device_ID(void);
+extern u16 mtd_Erase_Block(u32 block_addr);
+extern u16 mtd_Write_Page_Main(u8 *write_data, u32 Block,
+ u16 Page, u16 PageCount);
+extern u16 mtd_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
+ u16 PageCount);
+extern u16 mtd_Event_Status(void);
+extern void mtd_Enable_Disable_Interrupts(u16 INT_ENABLE);
+extern u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
+ u16 Page, u16 PageCount);
+extern u16 mtd_Write_Page_Spare(u8 *write_data, u32 Block,
+ u16 Page, u16 PageCount);
+extern u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
+ u16 Page, u16 PageCount);
+extern u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
+ u16 PageCount);
+extern u16 mtd_Get_Bad_Block(u32 block);
+
+u16 mtd_CDMA_Flash_Init(void);
+u16 mtd_CDMA_Execute_CMDs(u16 tag_count);
+u16 mtd_CDMA_Event_Status(void);
+#endif /*_LLD_MTD_*/
diff --git a/drivers/staging/spectra/lld_nand.c b/drivers/staging/spectra/lld_nand.c
new file mode 100644
index 00000000000..13c3ad2db39
--- /dev/null
+++ b/drivers/staging/spectra/lld_nand.c
@@ -0,0 +1,2601 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "lld.h"
+#include "lld_nand.h"
+#include "lld_cdma.h"
+
+#include "spectraswconfig.h"
+#include "flash.h"
+#include "ffsdefs.h"
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+
+#include "nand_regs.h"
+
+#define SPECTRA_NAND_NAME "nd"
+
+#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
+#define MAX_PAGES_PER_RW 128
+
+#define INT_IDLE_STATE 0
+#define INT_READ_PAGE_MAIN 0x01
+#define INT_WRITE_PAGE_MAIN 0x02
+#define INT_PIPELINE_READ_AHEAD 0x04
+#define INT_PIPELINE_WRITE_AHEAD 0x08
+#define INT_MULTI_PLANE_READ 0x10
+#define INT_MULTI_PLANE_WRITE 0x11
+
+static u32 enable_ecc;
+
+struct mrst_nand_info info;
+
+int totalUsedBanks;
+u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
+
+void __iomem *FlashReg;
+void __iomem *FlashMem;
+
+u16 conf_parameters[] = {
+ 0x0000,
+ 0x0000,
+ 0x01F4,
+ 0x01F4,
+ 0x01F4,
+ 0x01F4,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0040,
+ 0x0001,
+ 0x000A,
+ 0x000A,
+ 0x000A,
+ 0x0000,
+ 0x0000,
+ 0x0005,
+ 0x0012,
+ 0x000C
+};
+
+u16 NAND_Get_Bad_Block(u32 block)
+{
+ u32 status = PASS;
+ u32 flag_bytes = 0;
+ u32 skip_bytes = DeviceInfo.wSpareSkipBytes;
+ u32 page, i;
+ u8 *pReadSpareBuf = buf_get_bad_block;
+
+ if (enable_ecc)
+ flag_bytes = DeviceInfo.wNumPageSpareFlag;
+
+ for (page = 0; page < 2; page++) {
+ status = NAND_Read_Page_Spare(pReadSpareBuf, block, page, 1);
+ if (status != PASS)
+ return READ_ERROR;
+ for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
+ if (pReadSpareBuf[i] != 0xff)
+ return DEFECTIVE_BLOCK;
+ }
+
+ for (page = 1; page < 3; page++) {
+ status = NAND_Read_Page_Spare(pReadSpareBuf, block,
+ DeviceInfo.wPagesPerBlock - page , 1);
+ if (status != PASS)
+ return READ_ERROR;
+ for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
+ if (pReadSpareBuf[i] != 0xff)
+ return DEFECTIVE_BLOCK;
+ }
+
+ return GOOD_BLOCK;
+}
+
+
+u16 NAND_Flash_Reset(void)
+{
+ u32 i;
+ u32 intr_status_rst_comp[4] = {INTR_STATUS0__RST_COMP,
+ INTR_STATUS1__RST_COMP,
+ INTR_STATUS2__RST_COMP,
+ INTR_STATUS3__RST_COMP};
+ u32 intr_status_time_out[4] = {INTR_STATUS0__TIME_OUT,
+ INTR_STATUS1__TIME_OUT,
+ INTR_STATUS2__TIME_OUT,
+ INTR_STATUS3__TIME_OUT};
+ u32 intr_status[4] = {INTR_STATUS0, INTR_STATUS1,
+ INTR_STATUS2, INTR_STATUS3};
+ u32 device_reset_banks[4] = {DEVICE_RESET__BANK0,
+ DEVICE_RESET__BANK1,
+ DEVICE_RESET__BANK2,
+ DEVICE_RESET__BANK3};
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
+ iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
+ FlashReg + intr_status[i]);
+
+ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
+ iowrite32(device_reset_banks[i], FlashReg + DEVICE_RESET);
+ while (!(ioread32(FlashReg + intr_status[i]) &
+ (intr_status_rst_comp[i] | intr_status_time_out[i])))
+ ;
+ if (ioread32(FlashReg + intr_status[i]) &
+ intr_status_time_out[i])
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Reset operation timed out on bank %d\n", i);
+ }
+
+ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
+ iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
+ FlashReg + intr_status[i]);
+
+ return PASS;
+}
+
+static void NAND_ONFi_Timing_Mode(u16 mode)
+{
+ u16 Trea[6] = {40, 30, 25, 20, 20, 16};
+ u16 Trp[6] = {50, 25, 17, 15, 12, 10};
+ u16 Treh[6] = {30, 15, 15, 10, 10, 7};
+ u16 Trc[6] = {100, 50, 35, 30, 25, 20};
+ u16 Trhoh[6] = {0, 15, 15, 15, 15, 15};
+ u16 Trloh[6] = {0, 0, 0, 0, 5, 5};
+ u16 Tcea[6] = {100, 45, 30, 25, 25, 25};
+ u16 Tadl[6] = {200, 100, 100, 100, 70, 70};
+ u16 Trhw[6] = {200, 100, 100, 100, 100, 100};
+ u16 Trhz[6] = {200, 100, 100, 100, 100, 100};
+ u16 Twhr[6] = {120, 80, 80, 60, 60, 60};
+ u16 Tcs[6] = {70, 35, 25, 25, 20, 15};
+
+ u16 TclsRising = 1;
+ u16 data_invalid_rhoh, data_invalid_rloh, data_invalid;
+ u16 dv_window = 0;
+ u16 en_lo, en_hi;
+ u16 acc_clks;
+ u16 addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ en_lo = CEIL_DIV(Trp[mode], CLK_X);
+ en_hi = CEIL_DIV(Treh[mode], CLK_X);
+
+#if ONFI_BLOOM_TIME
+ if ((en_hi * CLK_X) < (Treh[mode] + 2))
+ en_hi++;
+#endif
+
+ if ((en_lo + en_hi) * CLK_X < Trc[mode])
+ en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
+
+ if ((en_lo + en_hi) < CLK_MULTI)
+ en_lo += CLK_MULTI - en_lo - en_hi;
+
+ while (dv_window < 8) {
+ data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
+
+ data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
+
+ data_invalid =
+ data_invalid_rhoh <
+ data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
+
+ dv_window = data_invalid - Trea[mode];
+
+ if (dv_window < 8)
+ en_lo++;
+ }
+
+ acc_clks = CEIL_DIV(Trea[mode], CLK_X);
+
+ while (((acc_clks * CLK_X) - Trea[mode]) < 3)
+ acc_clks++;
+
+ if ((data_invalid - acc_clks * CLK_X) < 2)
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
+ __FILE__, __LINE__);
+
+ addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
+ re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
+ re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
+ we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
+ cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
+ if (!TclsRising)
+ cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
+ if (cs_cnt == 0)
+ cs_cnt = 1;
+
+ if (Tcea[mode]) {
+ while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
+ cs_cnt++;
+ }
+
+#if MODE5_WORKAROUND
+ if (mode == 5)
+ acc_clks = 5;
+#endif
+
+ /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
+ if ((ioread32(FlashReg + MANUFACTURER_ID) == 0) &&
+ (ioread32(FlashReg + DEVICE_ID) == 0x88))
+ acc_clks = 6;
+
+ iowrite32(acc_clks, FlashReg + ACC_CLKS);
+ iowrite32(re_2_we, FlashReg + RE_2_WE);
+ iowrite32(re_2_re, FlashReg + RE_2_RE);
+ iowrite32(we_2_re, FlashReg + WE_2_RE);
+ iowrite32(addr_2_data, FlashReg + ADDR_2_DATA);
+ iowrite32(en_lo, FlashReg + RDWR_EN_LO_CNT);
+ iowrite32(en_hi, FlashReg + RDWR_EN_HI_CNT);
+ iowrite32(cs_cnt, FlashReg + CS_SETUP_CNT);
+}
+
+static void index_addr(u32 address, u32 data)
+{
+ iowrite32(address, FlashMem);
+ iowrite32(data, FlashMem + 0x10);
+}
+
+static void index_addr_read_data(u32 address, u32 *pdata)
+{
+ iowrite32(address, FlashMem);
+ *pdata = ioread32(FlashMem + 0x10);
+}
+
+static void set_ecc_config(void)
+{
+#if SUPPORT_8BITECC
+ if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
+ (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) <= 128))
+ iowrite32(8, FlashReg + ECC_CORRECTION);
+#endif
+
+ if ((ioread32(FlashReg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
+ == 1) {
+ DeviceInfo.wECCBytesPerSector = 4;
+ DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
+ DeviceInfo.wNumPageSpareFlag =
+ DeviceInfo.wPageSpareSize -
+ DeviceInfo.wPageDataSize /
+ (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
+ DeviceInfo.wECCBytesPerSector
+ - DeviceInfo.wSpareSkipBytes;
+ } else {
+ DeviceInfo.wECCBytesPerSector =
+ (ioread32(FlashReg + ECC_CORRECTION) &
+ ECC_CORRECTION__VALUE) * 13 / 8;
+ if ((DeviceInfo.wECCBytesPerSector) % 2 == 0)
+ DeviceInfo.wECCBytesPerSector += 2;
+ else
+ DeviceInfo.wECCBytesPerSector += 1;
+
+ DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
+ DeviceInfo.wNumPageSpareFlag = DeviceInfo.wPageSpareSize -
+ DeviceInfo.wPageDataSize /
+ (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
+ DeviceInfo.wECCBytesPerSector
+ - DeviceInfo.wSpareSkipBytes;
+ }
+}
+
+static u16 get_onfi_nand_para(void)
+{
+ int i;
+ u16 blks_lun_l, blks_lun_h, n_of_luns;
+ u32 blockperlun, id;
+
+ iowrite32(DEVICE_RESET__BANK0, FlashReg + DEVICE_RESET);
+
+ while (!((ioread32(FlashReg + INTR_STATUS0) &
+ INTR_STATUS0__RST_COMP) |
+ (ioread32(FlashReg + INTR_STATUS0) &
+ INTR_STATUS0__TIME_OUT)))
+ ;
+
+ if (ioread32(FlashReg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
+ iowrite32(DEVICE_RESET__BANK1, FlashReg + DEVICE_RESET);
+ while (!((ioread32(FlashReg + INTR_STATUS1) &
+ INTR_STATUS1__RST_COMP) |
+ (ioread32(FlashReg + INTR_STATUS1) &
+ INTR_STATUS1__TIME_OUT)))
+ ;
+
+ if (ioread32(FlashReg + INTR_STATUS1) &
+ INTR_STATUS1__RST_COMP) {
+ iowrite32(DEVICE_RESET__BANK2,
+ FlashReg + DEVICE_RESET);
+ while (!((ioread32(FlashReg + INTR_STATUS2) &
+ INTR_STATUS2__RST_COMP) |
+ (ioread32(FlashReg + INTR_STATUS2) &
+ INTR_STATUS2__TIME_OUT)))
+ ;
+
+ if (ioread32(FlashReg + INTR_STATUS2) &
+ INTR_STATUS2__RST_COMP) {
+ iowrite32(DEVICE_RESET__BANK3,
+ FlashReg + DEVICE_RESET);
+ while (!((ioread32(FlashReg + INTR_STATUS3) &
+ INTR_STATUS3__RST_COMP) |
+ (ioread32(FlashReg + INTR_STATUS3) &
+ INTR_STATUS3__TIME_OUT)))
+ ;
+ } else {
+ printk(KERN_ERR "Getting a time out for bank 2!\n");
+ }
+ } else {
+ printk(KERN_ERR "Getting a time out for bank 1!\n");
+ }
+ }
+
+ iowrite32(INTR_STATUS0__TIME_OUT, FlashReg + INTR_STATUS0);
+ iowrite32(INTR_STATUS1__TIME_OUT, FlashReg + INTR_STATUS1);
+ iowrite32(INTR_STATUS2__TIME_OUT, FlashReg + INTR_STATUS2);
+ iowrite32(INTR_STATUS3__TIME_OUT, FlashReg + INTR_STATUS3);
+
+ DeviceInfo.wONFIDevFeatures =
+ ioread32(FlashReg + ONFI_DEVICE_FEATURES);
+ DeviceInfo.wONFIOptCommands =
+ ioread32(FlashReg + ONFI_OPTIONAL_COMMANDS);
+ DeviceInfo.wONFITimingMode =
+ ioread32(FlashReg + ONFI_TIMING_MODE);
+ DeviceInfo.wONFIPgmCacheTimingMode =
+ ioread32(FlashReg + ONFI_PGM_CACHE_TIMING_MODE);
+
+ n_of_luns = ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
+ ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
+ blks_lun_l = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
+ blks_lun_h = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
+
+ blockperlun = (blks_lun_h << 16) | blks_lun_l;
+
+ DeviceInfo.wTotalBlocks = n_of_luns * blockperlun;
+
+ if (!(ioread32(FlashReg + ONFI_TIMING_MODE) &
+ ONFI_TIMING_MODE__VALUE))
+ return FAIL;
+
+ for (i = 5; i > 0; i--) {
+ if (ioread32(FlashReg + ONFI_TIMING_MODE) & (0x01 << i))
+ break;
+ }
+
+ NAND_ONFi_Timing_Mode(i);
+
+ index_addr(MODE_11 | 0, 0x90);
+ index_addr(MODE_11 | 1, 0);
+
+ for (i = 0; i < 3; i++)
+ index_addr_read_data(MODE_11 | 2, &id);
+
+ nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
+
+ DeviceInfo.MLCDevice = id & 0x0C;
+
+ /* By now, all the ONFI devices we know support the page cache */
+ /* rw feature. So here we enable the pipeline_rw_ahead feature */
+ /* iowrite32(1, FlashReg + CACHE_WRITE_ENABLE); */
+ /* iowrite32(1, FlashReg + CACHE_READ_ENABLE); */
+
+ return PASS;
+}
+
+static void get_samsung_nand_para(void)
+{
+ u8 no_of_planes;
+ u32 blk_size;
+ u64 plane_size, capacity;
+ u32 id_bytes[5];
+ int i;
+
+ index_addr((u32)(MODE_11 | 0), 0x90);
+ index_addr((u32)(MODE_11 | 1), 0);
+ for (i = 0; i < 5; i++)
+ index_addr_read_data((u32)(MODE_11 | 2), &id_bytes[i]);
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
+ id_bytes[0], id_bytes[1], id_bytes[2],
+ id_bytes[3], id_bytes[4]);
+
+ if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
+ /* Set timing register values according to datasheet */
+ iowrite32(5, FlashReg + ACC_CLKS);
+ iowrite32(20, FlashReg + RE_2_WE);
+ iowrite32(12, FlashReg + WE_2_RE);
+ iowrite32(14, FlashReg + ADDR_2_DATA);
+ iowrite32(3, FlashReg + RDWR_EN_LO_CNT);
+ iowrite32(2, FlashReg + RDWR_EN_HI_CNT);
+ iowrite32(2, FlashReg + CS_SETUP_CNT);
+ }
+
+ no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
+ plane_size = (u64)64 << ((id_bytes[4] & 0x70) >> 4);
+ blk_size = 64 << ((ioread32(FlashReg + DEVICE_PARAM_1) & 0x30) >> 4);
+ capacity = (u64)128 * plane_size * no_of_planes;
+
+ DeviceInfo.wTotalBlocks = (u32)GLOB_u64_Div(capacity, blk_size);
+}
+
+static void get_toshiba_nand_para(void)
+{
+ void __iomem *scratch_reg;
+ u32 tmp;
+
+ /* Workaround to fix a controller bug which reports a wrong */
+ /* spare area size for some kind of Toshiba NAND device */
+ if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
+ (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) == 64)) {
+ iowrite32(216, FlashReg + DEVICE_SPARE_AREA_SIZE);
+ tmp = ioread32(FlashReg + DEVICES_CONNECTED) *
+ ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
+ iowrite32(tmp, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
+#if SUPPORT_15BITECC
+ iowrite32(15, FlashReg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+ iowrite32(8, FlashReg + ECC_CORRECTION);
+#endif
+ }
+
+ /* As Toshiba NAND can not provide it's block number, */
+ /* so here we need user to provide the correct block */
+ /* number in a scratch register before the Linux NAND */
+ /* driver is loaded. If no valid value found in the scratch */
+ /* register, then we use default block number value */
+ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
+ if (!scratch_reg) {
+ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
+ __FILE__, __LINE__);
+ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ } else {
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
+ DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
+ if (DeviceInfo.wTotalBlocks < 512)
+ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ iounmap(scratch_reg);
+ }
+}
+
+static void get_hynix_nand_para(void)
+{
+ void __iomem *scratch_reg;
+ u32 main_size, spare_size;
+
+ switch (DeviceInfo.wDeviceID) {
+ case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
+ case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
+ iowrite32(128, FlashReg + PAGES_PER_BLOCK);
+ iowrite32(4096, FlashReg + DEVICE_MAIN_AREA_SIZE);
+ iowrite32(224, FlashReg + DEVICE_SPARE_AREA_SIZE);
+ main_size = 4096 * ioread32(FlashReg + DEVICES_CONNECTED);
+ spare_size = 224 * ioread32(FlashReg + DEVICES_CONNECTED);
+ iowrite32(main_size, FlashReg + LOGICAL_PAGE_DATA_SIZE);
+ iowrite32(spare_size, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
+ iowrite32(0, FlashReg + DEVICE_WIDTH);
+#if SUPPORT_15BITECC
+ iowrite32(15, FlashReg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+ iowrite32(8, FlashReg + ECC_CORRECTION);
+#endif
+ DeviceInfo.MLCDevice = 1;
+ break;
+ default:
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
+ "Will use default parameter values instead.\n",
+ DeviceInfo.wDeviceID);
+ }
+
+ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
+ if (!scratch_reg) {
+ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
+ __FILE__, __LINE__);
+ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ } else {
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
+ DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
+ if (DeviceInfo.wTotalBlocks < 512)
+ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ iounmap(scratch_reg);
+ }
+}
+
+static void find_valid_banks(void)
+{
+ u32 id[LLD_MAX_FLASH_BANKS];
+ int i;
+
+ totalUsedBanks = 0;
+ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
+ index_addr((u32)(MODE_11 | (i << 24) | 0), 0x90);
+ index_addr((u32)(MODE_11 | (i << 24) | 1), 0);
+ index_addr_read_data((u32)(MODE_11 | (i << 24) | 2), &id[i]);
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Return 1st ID for bank[%d]: %x\n", i, id[i]);
+
+ if (i == 0) {
+ if (id[i] & 0x0ff)
+ GLOB_valid_banks[i] = 1;
+ } else {
+ if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
+ GLOB_valid_banks[i] = 1;
+ }
+
+ totalUsedBanks += GLOB_valid_banks[i];
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "totalUsedBanks: %d\n", totalUsedBanks);
+}
+
+static void detect_partition_feature(void)
+{
+ if (ioread32(FlashReg + FEATURES) & FEATURES__PARTITION) {
+ if ((ioread32(FlashReg + PERM_SRC_ID_1) &
+ PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
+ DeviceInfo.wSpectraStartBlock =
+ ((ioread32(FlashReg + MIN_MAX_BANK_1) &
+ MIN_MAX_BANK_1__MIN_VALUE) *
+ DeviceInfo.wTotalBlocks)
+ +
+ (ioread32(FlashReg + MIN_BLK_ADDR_1) &
+ MIN_BLK_ADDR_1__VALUE);
+
+ DeviceInfo.wSpectraEndBlock =
+ (((ioread32(FlashReg + MIN_MAX_BANK_1) &
+ MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
+ DeviceInfo.wTotalBlocks)
+ +
+ (ioread32(FlashReg + MAX_BLK_ADDR_1) &
+ MAX_BLK_ADDR_1__VALUE);
+
+ DeviceInfo.wTotalBlocks *= totalUsedBanks;
+
+ if (DeviceInfo.wSpectraEndBlock >=
+ DeviceInfo.wTotalBlocks) {
+ DeviceInfo.wSpectraEndBlock =
+ DeviceInfo.wTotalBlocks - 1;
+ }
+
+ DeviceInfo.wDataBlockNum =
+ DeviceInfo.wSpectraEndBlock -
+ DeviceInfo.wSpectraStartBlock + 1;
+ } else {
+ DeviceInfo.wTotalBlocks *= totalUsedBanks;
+ DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
+ DeviceInfo.wSpectraEndBlock =
+ DeviceInfo.wTotalBlocks - 1;
+ DeviceInfo.wDataBlockNum =
+ DeviceInfo.wSpectraEndBlock -
+ DeviceInfo.wSpectraStartBlock + 1;
+ }
+ } else {
+ DeviceInfo.wTotalBlocks *= totalUsedBanks;
+ DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
+ DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
+ DeviceInfo.wDataBlockNum =
+ DeviceInfo.wSpectraEndBlock -
+ DeviceInfo.wSpectraStartBlock + 1;
+ }
+}
+
+static void dump_device_info(void)
+{
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceInfo:\n");
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
+ DeviceInfo.wDeviceMaker);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
+ DeviceInfo.wDeviceID);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
+ DeviceInfo.wDeviceType);
+ nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
+ DeviceInfo.wSpectraStartBlock);
+ nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
+ DeviceInfo.wSpectraEndBlock);
+ nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
+ DeviceInfo.wTotalBlocks);
+ nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
+ DeviceInfo.wPagesPerBlock);
+ nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
+ DeviceInfo.wPageSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
+ DeviceInfo.wPageDataSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
+ DeviceInfo.wPageSpareSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
+ DeviceInfo.wNumPageSpareFlag);
+ nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
+ DeviceInfo.wECCBytesPerSector);
+ nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
+ DeviceInfo.wBlockSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
+ DeviceInfo.wBlockDataSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
+ DeviceInfo.wDataBlockNum);
+ nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
+ DeviceInfo.bPlaneNum);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
+ DeviceInfo.wDeviceMainAreaSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
+ DeviceInfo.wDeviceSpareAreaSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
+ DeviceInfo.wDevicesConnected);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
+ DeviceInfo.wDeviceWidth);
+ nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
+ DeviceInfo.wHWRevision);
+ nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
+ DeviceInfo.wHWFeatures);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
+ DeviceInfo.wONFIDevFeatures);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
+ DeviceInfo.wONFIOptCommands);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
+ DeviceInfo.wONFITimingMode);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
+ DeviceInfo.wONFIPgmCacheTimingMode);
+ nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
+ DeviceInfo.MLCDevice ? "Yes" : "No");
+ nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
+ DeviceInfo.wSpareSkipBytes);
+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
+ DeviceInfo.nBitsInPageNumber);
+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
+ DeviceInfo.nBitsInPageDataSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
+ DeviceInfo.nBitsInBlockDataSize);
+}
+
+u16 NAND_Read_Device_ID(void)
+{
+ u16 status = PASS;
+ u8 no_of_planes;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ iowrite32(0x02, FlashReg + SPARE_AREA_SKIP_BYTES);
+ iowrite32(0xffff, FlashReg + SPARE_AREA_MARKER);
+ DeviceInfo.wDeviceMaker = ioread32(FlashReg + MANUFACTURER_ID);
+ DeviceInfo.wDeviceID = ioread32(FlashReg + DEVICE_ID);
+ DeviceInfo.MLCDevice = ioread32(FlashReg + DEVICE_PARAM_0) & 0x0c;
+
+ if (ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
+ ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
+ if (FAIL == get_onfi_nand_para())
+ return FAIL;
+ } else if (DeviceInfo.wDeviceMaker == 0xEC) { /* Samsung NAND */
+ get_samsung_nand_para();
+ } else if (DeviceInfo.wDeviceMaker == 0x98) { /* Toshiba NAND */
+ get_toshiba_nand_para();
+ } else if (DeviceInfo.wDeviceMaker == 0xAD) { /* Hynix NAND */
+ get_hynix_nand_para();
+ } else {
+ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
+ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
+ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
+ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
+ ioread32(FlashReg + ACC_CLKS),
+ ioread32(FlashReg + RE_2_WE),
+ ioread32(FlashReg + WE_2_RE),
+ ioread32(FlashReg + ADDR_2_DATA),
+ ioread32(FlashReg + RDWR_EN_LO_CNT),
+ ioread32(FlashReg + RDWR_EN_HI_CNT),
+ ioread32(FlashReg + CS_SETUP_CNT));
+
+ DeviceInfo.wHWRevision = ioread32(FlashReg + REVISION);
+ DeviceInfo.wHWFeatures = ioread32(FlashReg + FEATURES);
+
+ DeviceInfo.wDeviceMainAreaSize =
+ ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE);
+ DeviceInfo.wDeviceSpareAreaSize =
+ ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
+
+ DeviceInfo.wPageDataSize =
+ ioread32(FlashReg + LOGICAL_PAGE_DATA_SIZE);
+
+ /* Note: When using the Micon 4K NAND device, the controller will report
+ * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
+ * And if force set it to 218 bytes, the controller can not work
+ * correctly. So just let it be. But keep in mind that this bug may
+ * cause
+ * other problems in future. - Yunpeng 2008-10-10
+ */
+ DeviceInfo.wPageSpareSize =
+ ioread32(FlashReg + LOGICAL_PAGE_SPARE_SIZE);
+
+ DeviceInfo.wPagesPerBlock = ioread32(FlashReg + PAGES_PER_BLOCK);
+
+ DeviceInfo.wPageSize =
+ DeviceInfo.wPageDataSize + DeviceInfo.wPageSpareSize;
+ DeviceInfo.wBlockSize =
+ DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
+ DeviceInfo.wBlockDataSize =
+ DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
+
+ DeviceInfo.wDeviceWidth = ioread32(FlashReg + DEVICE_WIDTH);
+ DeviceInfo.wDeviceType =
+ ((ioread32(FlashReg + DEVICE_WIDTH) > 0) ? 16 : 8);
+
+ DeviceInfo.wDevicesConnected = ioread32(FlashReg + DEVICES_CONNECTED);
+
+ DeviceInfo.wSpareSkipBytes =
+ ioread32(FlashReg + SPARE_AREA_SKIP_BYTES) *
+ DeviceInfo.wDevicesConnected;
+
+ DeviceInfo.nBitsInPageNumber =
+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
+ DeviceInfo.nBitsInPageDataSize =
+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
+ DeviceInfo.nBitsInBlockDataSize =
+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
+
+ set_ecc_config();
+
+ no_of_planes = ioread32(FlashReg + NUMBER_OF_PLANES) &
+ NUMBER_OF_PLANES__VALUE;
+
+ switch (no_of_planes) {
+ case 0:
+ case 1:
+ case 3:
+ case 7:
+ DeviceInfo.bPlaneNum = no_of_planes + 1;
+ break;
+ default:
+ status = FAIL;
+ break;
+ }
+
+ find_valid_banks();
+
+ detect_partition_feature();
+
+ dump_device_info();
+
+ return status;
+}
+
+u16 NAND_UnlockArrayAll(void)
+{
+ u64 start_addr, end_addr;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ start_addr = 0;
+ end_addr = ((u64)DeviceInfo.wBlockSize *
+ (DeviceInfo.wTotalBlocks - 1)) >>
+ DeviceInfo.nBitsInPageDataSize;
+
+ index_addr((u32)(MODE_10 | (u32)start_addr), 0x10);
+ index_addr((u32)(MODE_10 | (u32)end_addr), 0x11);
+
+ return PASS;
+}
+
+void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (INT_ENABLE)
+ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE);
+ else
+ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
+}
+
+u16 NAND_Erase_Block(u32 block)
+{
+ u16 status = PASS;
+ u64 flash_add;
+ u16 flash_bank;
+ u32 intr_status = 0;
+ u32 intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
+ * DeviceInfo.wBlockDataSize;
+
+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
+
+ if (block >= DeviceInfo.wTotalBlocks)
+ status = FAIL;
+
+ if (status == PASS) {
+ intr_status = intr_status_addresses[flash_bank];
+
+ iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
+ FlashReg + intr_status);
+
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 1);
+
+ while (!(ioread32(FlashReg + intr_status) &
+ (INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL)))
+ ;
+
+ if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ERASE_FAIL)
+ status = FAIL;
+
+ iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
+ FlashReg + intr_status);
+ }
+
+ return status;
+}
+
+static u32 Boundary_Check_Block_Page(u32 block, u16 page,
+ u16 page_count)
+{
+ u32 status = PASS;
+
+ if (block >= DeviceInfo.wTotalBlocks)
+ status = FAIL;
+
+ if (page + page_count > DeviceInfo.wPagesPerBlock)
+ status = FAIL;
+
+ return status;
+}
+
+u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
+ u16 page_count)
+{
+ u32 status = PASS;
+ u32 i;
+ u64 flash_add;
+ u32 PageSpareSize = DeviceInfo.wPageSpareSize;
+ u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
+ u32 flash_bank;
+ u32 intr_status = 0;
+ u32 intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
+ u8 *page_spare = buf_read_page_spare;
+
+ if (block >= DeviceInfo.wTotalBlocks) {
+ printk(KERN_ERR "block too big: %d\n", (int)block);
+ status = FAIL;
+ }
+
+ if (page >= DeviceInfo.wPagesPerBlock) {
+ printk(KERN_ERR "page too big: %d\n", page);
+ status = FAIL;
+ }
+
+ if (page_count > 1) {
+ printk(KERN_ERR "page count too big: %d\n", page_count);
+ status = FAIL;
+ }
+
+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
+ * DeviceInfo.wBlockDataSize +
+ (u64)page * DeviceInfo.wPageDataSize;
+
+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
+
+ if (status == PASS) {
+ intr_status = intr_status_addresses[flash_bank];
+ iowrite32(ioread32(FlashReg + intr_status),
+ FlashReg + intr_status);
+
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
+ 0x41);
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
+ 0x2000 | page_count);
+ while (!(ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__LOAD_COMP))
+ ;
+
+ iowrite32((u32)(MODE_01 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
+ FlashMem);
+
+ for (i = 0; i < (PageSpareSize / 4); i++)
+ *((u32 *)page_spare + i) =
+ ioread32(FlashMem + 0x10);
+
+ if (enable_ecc) {
+ for (i = 0; i < spareFlagBytes; i++)
+ read_data[i] =
+ page_spare[PageSpareSize -
+ spareFlagBytes + i];
+ for (i = 0; i < (PageSpareSize - spareFlagBytes); i++)
+ read_data[spareFlagBytes + i] =
+ page_spare[i];
+ } else {
+ for (i = 0; i < PageSpareSize; i++)
+ read_data[i] = page_spare[i];
+ }
+
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
+ }
+
+ return status;
+}
+
+/* No use function. Should be removed later */
+u16 NAND_Write_Page_Spare(u8 *write_data, u32 block, u16 page,
+ u16 page_count)
+{
+ printk(KERN_ERR
+ "Error! This function (NAND_Write_Page_Spare) should never"
+ " be called!\n");
+ return ERR;
+}
+
+/* op value: 0 - DDMA read; 1 - DDMA write */
+static void ddma_trans(u8 *data, u64 flash_add,
+ u32 flash_bank, int op, u32 numPages)
+{
+ u32 data_addr;
+
+ /* Map virtual address to bus address for DDMA */
+ data_addr = virt_to_bus(data);
+
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
+ (u16)(2 << 12) | (op << 8) | numPages);
+
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ ((u16)(0x0FFFF & (data_addr >> 16)) << 8)),
+ (u16)(2 << 12) | (2 << 8) | 0);
+
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ ((u16)(0x0FFFF & data_addr) << 8)),
+ (u16)(2 << 12) | (3 << 8) | 0);
+
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (1 << 16) | (0x40 << 8)),
+ (u16)(2 << 12) | (4 << 8) | 0);
+}
+
+/* If data in buf are all 0xff, then return 1; otherwise return 0 */
+static int check_all_1(u8 *buf)
+{
+ int i, j, cnt;
+
+ for (i = 0; i < DeviceInfo.wPageDataSize; i++) {
+ if (buf[i] != 0xff) {
+ cnt = 0;
+ nand_dbg_print(NAND_DBG_WARN,
+ "the first non-0xff data byte is: %d\n", i);
+ for (j = i; j < DeviceInfo.wPageDataSize; j++) {
+ nand_dbg_print(NAND_DBG_WARN, "0x%x ", buf[j]);
+ cnt++;
+ if (cnt > 8)
+ break;
+ }
+ nand_dbg_print(NAND_DBG_WARN, "\n");
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int do_ecc_new(unsigned long bank, u8 *buf,
+ u32 block, u16 page)
+{
+ int status = PASS;
+ u16 err_page = 0;
+ u16 err_byte;
+ u8 err_sect;
+ u8 err_dev;
+ u16 err_fix_info;
+ u16 err_addr;
+ u32 ecc_sect_size;
+ u8 *err_pos;
+ u32 err_page_addr[4] = {ERR_PAGE_ADDR0,
+ ERR_PAGE_ADDR1, ERR_PAGE_ADDR2, ERR_PAGE_ADDR3};
+
+ ecc_sect_size = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
+
+ do {
+ err_page = ioread32(FlashReg + err_page_addr[bank]);
+ err_addr = ioread32(FlashReg + ECC_ERROR_ADDRESS);
+ err_byte = err_addr & ECC_ERROR_ADDRESS__OFFSET;
+ err_sect = ((err_addr & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
+ err_fix_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
+ err_dev = ((err_fix_info & ERR_CORRECTION_INFO__DEVICE_NR)
+ >> 8);
+ if (err_fix_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "%s, Line %d Uncorrectable ECC error "
+ "when read block %d page %d."
+ "PTN_INTR register: 0x%x "
+ "err_page: %d, err_sect: %d, err_byte: %d, "
+ "err_dev: %d, ecc_sect_size: %d, "
+ "err_fix_info: 0x%x\n",
+ __FILE__, __LINE__, block, page,
+ ioread32(FlashReg + PTN_INTR),
+ err_page, err_sect, err_byte, err_dev,
+ ecc_sect_size, (u32)err_fix_info);
+
+ if (check_all_1(buf))
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
+ "All 0xff!\n",
+ __FILE__, __LINE__);
+ else
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
+ "Not all 0xff!\n",
+ __FILE__, __LINE__);
+ status = FAIL;
+ } else {
+ nand_dbg_print(NAND_DBG_WARN,
+ "%s, Line %d Found ECC error "
+ "when read block %d page %d."
+ "err_page: %d, err_sect: %d, err_byte: %d, "
+ "err_dev: %d, ecc_sect_size: %d, "
+ "err_fix_info: 0x%x\n",
+ __FILE__, __LINE__, block, page,
+ err_page, err_sect, err_byte, err_dev,
+ ecc_sect_size, (u32)err_fix_info);
+ if (err_byte < ECC_SECTOR_SIZE) {
+ err_pos = buf +
+ (err_page - page) *
+ DeviceInfo.wPageDataSize +
+ err_sect * ecc_sect_size +
+ err_byte *
+ DeviceInfo.wDevicesConnected +
+ err_dev;
+
+ *err_pos ^= err_fix_info &
+ ERR_CORRECTION_INFO__BYTEMASK;
+ }
+ }
+ } while (!(err_fix_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
+
+ return status;
+}
+
+u16 NAND_Read_Page_Main_Polling(u8 *read_data,
+ u32 block, u16 page, u16 page_count)
+{
+ u32 status = PASS;
+ u64 flash_add;
+ u32 intr_status = 0;
+ u32 flash_bank;
+ u32 intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
+ u8 *read_data_l;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ status = Boundary_Check_Block_Page(block, page, page_count);
+ if (status != PASS)
+ return status;
+
+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
+ * DeviceInfo.wBlockDataSize +
+ (u64)page * DeviceInfo.wPageDataSize;
+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+
+ intr_status = intr_status_addresses[flash_bank];
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ if (page_count > 1) {
+ read_data_l = read_data;
+ while (page_count > MAX_PAGES_PER_RW) {
+ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
+ status = NAND_Multiplane_Read(read_data_l,
+ block, page, MAX_PAGES_PER_RW);
+ else
+ status = NAND_Pipeline_Read_Ahead_Polling(
+ read_data_l, block, page,
+ MAX_PAGES_PER_RW);
+
+ if (status == FAIL)
+ return status;
+
+ read_data_l += DeviceInfo.wPageDataSize *
+ MAX_PAGES_PER_RW;
+ page_count -= MAX_PAGES_PER_RW;
+ page += MAX_PAGES_PER_RW;
+ }
+ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
+ status = NAND_Multiplane_Read(read_data_l,
+ block, page, page_count);
+ else
+ status = NAND_Pipeline_Read_Ahead_Polling(
+ read_data_l, block, page, page_count);
+
+ return status;
+ }
+
+ iowrite32(1, FlashReg + DMA_ENABLE);
+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ ddma_trans(read_data, flash_add, flash_bank, 0, 1);
+
+ if (enable_ecc) {
+ while (!(ioread32(FlashReg + intr_status) &
+ (INTR_STATUS0__ECC_TRANSACTION_DONE |
+ INTR_STATUS0__ECC_ERR)))
+ ;
+
+ if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_ERR) {
+ iowrite32(INTR_STATUS0__ECC_ERR,
+ FlashReg + intr_status);
+ status = do_ecc_new(flash_bank, read_data,
+ block, page);
+ }
+
+ if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_TRANSACTION_DONE &
+ INTR_STATUS0__ECC_ERR)
+ iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE |
+ INTR_STATUS0__ECC_ERR,
+ FlashReg + intr_status);
+ else if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_TRANSACTION_DONE)
+ iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
+ FlashReg + intr_status);
+ else if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_ERR)
+ iowrite32(INTR_STATUS0__ECC_ERR,
+ FlashReg + intr_status);
+ } else {
+ while (!(ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__DMA_CMD_COMP))
+ ;
+ iowrite32(INTR_STATUS0__DMA_CMD_COMP, FlashReg + intr_status);
+ }
+
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ iowrite32(0, FlashReg + DMA_ENABLE);
+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ return status;
+}
+
+u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
+ u32 block, u16 page, u16 page_count)
+{
+ u32 status = PASS;
+ u32 NumPages = page_count;
+ u64 flash_add;
+ u32 flash_bank;
+ u32 intr_status = 0;
+ u32 intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
+ u32 ecc_done_OR_dma_comp;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ status = Boundary_Check_Block_Page(block, page, page_count);
+
+ if (page_count < 2)
+ status = FAIL;
+
+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
+ *DeviceInfo.wBlockDataSize +
+ (u64)page * DeviceInfo.wPageDataSize;
+
+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
+
+ if (status == PASS) {
+ intr_status = intr_status_addresses[flash_bank];
+ iowrite32(ioread32(FlashReg + intr_status),
+ FlashReg + intr_status);
+
+ iowrite32(1, FlashReg + DMA_ENABLE);
+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
+ ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
+
+ ecc_done_OR_dma_comp = 0;
+ while (1) {
+ if (enable_ecc) {
+ while (!ioread32(FlashReg + intr_status))
+ ;
+
+ if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_ERR) {
+ iowrite32(INTR_STATUS0__ECC_ERR,
+ FlashReg + intr_status);
+ status = do_ecc_new(flash_bank,
+ read_data, block, page);
+ } else if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__DMA_CMD_COMP) {
+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
+ FlashReg + intr_status);
+
+ if (1 == ecc_done_OR_dma_comp)
+ break;
+
+ ecc_done_OR_dma_comp = 1;
+ } else if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_TRANSACTION_DONE) {
+ iowrite32(
+ INTR_STATUS0__ECC_TRANSACTION_DONE,
+ FlashReg + intr_status);
+
+ if (1 == ecc_done_OR_dma_comp)
+ break;
+
+ ecc_done_OR_dma_comp = 1;
+ }
+ } else {
+ while (!(ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__DMA_CMD_COMP))
+ ;
+
+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
+ FlashReg + intr_status);
+ break;
+ }
+
+ iowrite32((~INTR_STATUS0__ECC_ERR) &
+ (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
+ (~INTR_STATUS0__DMA_CMD_COMP),
+ FlashReg + intr_status);
+
+ }
+
+ iowrite32(ioread32(FlashReg + intr_status),
+ FlashReg + intr_status);
+
+ iowrite32(0, FlashReg + DMA_ENABLE);
+
+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+ }
+ return status;
+}
+
+u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
+ u16 page_count)
+{
+ u32 status = PASS;
+ u64 flash_add;
+ u32 intr_status = 0;
+ u32 flash_bank;
+ u32 intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
+ int ret;
+ u8 *read_data_l;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ status = Boundary_Check_Block_Page(block, page, page_count);
+ if (status != PASS)
+ return status;
+
+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
+ * DeviceInfo.wBlockDataSize +
+ (u64)page * DeviceInfo.wPageDataSize;
+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+
+ intr_status = intr_status_addresses[flash_bank];
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ if (page_count > 1) {
+ read_data_l = read_data;
+ while (page_count > MAX_PAGES_PER_RW) {
+ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
+ status = NAND_Multiplane_Read(read_data_l,
+ block, page, MAX_PAGES_PER_RW);
+ else
+ status = NAND_Pipeline_Read_Ahead(
+ read_data_l, block, page,
+ MAX_PAGES_PER_RW);
+
+ if (status == FAIL)
+ return status;
+
+ read_data_l += DeviceInfo.wPageDataSize *
+ MAX_PAGES_PER_RW;
+ page_count -= MAX_PAGES_PER_RW;
+ page += MAX_PAGES_PER_RW;
+ }
+ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
+ status = NAND_Multiplane_Read(read_data_l,
+ block, page, page_count);
+ else
+ status = NAND_Pipeline_Read_Ahead(
+ read_data_l, block, page, page_count);
+
+ return status;
+ }
+
+ iowrite32(1, FlashReg + DMA_ENABLE);
+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ /* Fill the mrst_nand_info structure */
+ info.state = INT_READ_PAGE_MAIN;
+ info.read_data = read_data;
+ info.flash_bank = flash_bank;
+ info.block = block;
+ info.page = page;
+ info.ret = PASS;
+
+ ddma_trans(read_data, flash_add, flash_bank, 0, 1);
+
+ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
+
+ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
+ if (!ret) {
+ printk(KERN_ERR "Wait for completion timeout "
+ "in %s, Line %d\n", __FILE__, __LINE__);
+ status = ERR;
+ } else {
+ status = info.ret;
+ }
+
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ iowrite32(0, FlashReg + DMA_ENABLE);
+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ return status;
+}
+
+void Conv_Spare_Data_Log2Phy_Format(u8 *data)
+{
+ int i;
+ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
+ const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
+
+ if (enable_ecc) {
+ for (i = spareFlagBytes - 1; i >= 0; i++)
+ data[PageSpareSize - spareFlagBytes + i] = data[i];
+ }
+}
+
+void Conv_Spare_Data_Phy2Log_Format(u8 *data)
+{
+ int i;
+ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
+ const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
+
+ if (enable_ecc) {
+ for (i = 0; i < spareFlagBytes; i++)
+ data[i] = data[PageSpareSize - spareFlagBytes + i];
+ }
+}
+
+
+void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count)
+{
+ const u32 PageSize = DeviceInfo.wPageSize;
+ const u32 PageDataSize = DeviceInfo.wPageDataSize;
+ const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
+ const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
+ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
+ u32 eccSectorSize;
+ u32 page_offset;
+ int i, j;
+
+ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
+ if (enable_ecc) {
+ while (page_count > 0) {
+ page_offset = (page_count - 1) * PageSize;
+ j = (DeviceInfo.wPageDataSize / eccSectorSize);
+ for (i = spareFlagBytes - 1; i >= 0; i--)
+ data[page_offset +
+ (eccSectorSize + eccBytes) * j + i] =
+ data[page_offset + PageDataSize + i];
+ for (j--; j >= 1; j--) {
+ for (i = eccSectorSize - 1; i >= 0; i--)
+ data[page_offset +
+ (eccSectorSize + eccBytes) * j + i] =
+ data[page_offset +
+ eccSectorSize * j + i];
+ }
+ for (i = (PageSize - spareSkipBytes) - 1;
+ i >= PageDataSize; i--)
+ data[page_offset + i + spareSkipBytes] =
+ data[page_offset + i];
+ page_count--;
+ }
+ }
+}
+
+void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count)
+{
+ const u32 PageSize = DeviceInfo.wPageSize;
+ const u32 PageDataSize = DeviceInfo.wPageDataSize;
+ const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
+ const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
+ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
+ u32 eccSectorSize;
+ u32 page_offset;
+ int i, j;
+
+ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
+ if (enable_ecc) {
+ while (page_count > 0) {
+ page_offset = (page_count - 1) * PageSize;
+ for (i = PageDataSize;
+ i < PageSize - spareSkipBytes;
+ i++)
+ data[page_offset + i] =
+ data[page_offset + i +
+ spareSkipBytes];
+ for (j = 1;
+ j < DeviceInfo.wPageDataSize / eccSectorSize;
+ j++) {
+ for (i = 0; i < eccSectorSize; i++)
+ data[page_offset +
+ eccSectorSize * j + i] =
+ data[page_offset +
+ (eccSectorSize + eccBytes) * j
+ + i];
+ }
+ for (i = 0; i < spareFlagBytes; i++)
+ data[page_offset + PageDataSize + i] =
+ data[page_offset +
+ (eccSectorSize + eccBytes) * j + i];
+ page_count--;
+ }
+ }
+}
+
+/* Un-tested function */
+u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
+ u16 page_count)
+{
+ u32 status = PASS;
+ u32 NumPages = page_count;
+ u64 flash_add;
+ u32 flash_bank;
+ u32 intr_status = 0;
+ u32 intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
+ u32 ecc_done_OR_dma_comp;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ status = Boundary_Check_Block_Page(block, page, page_count);
+
+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
+ * DeviceInfo.wBlockDataSize +
+ (u64)page * DeviceInfo.wPageDataSize;
+
+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
+
+ if (status == PASS) {
+ intr_status = intr_status_addresses[flash_bank];
+ iowrite32(ioread32(FlashReg + intr_status),
+ FlashReg + intr_status);
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+ iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
+
+ iowrite32(1, FlashReg + DMA_ENABLE);
+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
+ ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
+
+ ecc_done_OR_dma_comp = 0;
+ while (1) {
+ if (enable_ecc) {
+ while (!ioread32(FlashReg + intr_status))
+ ;
+
+ if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_ERR) {
+ iowrite32(INTR_STATUS0__ECC_ERR,
+ FlashReg + intr_status);
+ status = do_ecc_new(flash_bank,
+ read_data, block, page);
+ } else if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__DMA_CMD_COMP) {
+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
+ FlashReg + intr_status);
+
+ if (1 == ecc_done_OR_dma_comp)
+ break;
+
+ ecc_done_OR_dma_comp = 1;
+ } else if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_TRANSACTION_DONE) {
+ iowrite32(
+ INTR_STATUS0__ECC_TRANSACTION_DONE,
+ FlashReg + intr_status);
+
+ if (1 == ecc_done_OR_dma_comp)
+ break;
+
+ ecc_done_OR_dma_comp = 1;
+ }
+ } else {
+ while (!(ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__DMA_CMD_COMP))
+ ;
+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
+ FlashReg + intr_status);
+ break;
+ }
+
+ iowrite32((~INTR_STATUS0__ECC_ERR) &
+ (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
+ (~INTR_STATUS0__DMA_CMD_COMP),
+ FlashReg + intr_status);
+
+ }
+
+ iowrite32(ioread32(FlashReg + intr_status),
+ FlashReg + intr_status);
+
+ iowrite32(0, FlashReg + DMA_ENABLE);
+
+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
+ }
+
+ return status;
+}
+
+u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block,
+ u16 page, u16 page_count)
+{
+ u32 status = PASS;
+ u32 NumPages = page_count;
+ u64 flash_add;
+ u32 flash_bank;
+ u32 intr_status = 0;
+ u32 intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
+ int ret;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ status = Boundary_Check_Block_Page(block, page, page_count);
+
+ if (page_count < 2)
+ status = FAIL;
+
+ if (status != PASS)
+ return status;
+
+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
+ *DeviceInfo.wBlockDataSize +
+ (u64)page * DeviceInfo.wPageDataSize;
+
+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
+
+ intr_status = intr_status_addresses[flash_bank];
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ iowrite32(1, FlashReg + DMA_ENABLE);
+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+
+ /* Fill the mrst_nand_info structure */
+ info.state = INT_PIPELINE_READ_AHEAD;
+ info.read_data = read_data;
+ info.flash_bank = flash_bank;
+ info.block = block;
+ info.page = page;
+ info.ret = PASS;
+
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
+
+ ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
+
+ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
+
+ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
+ if (!ret) {
+ printk(KERN_ERR "Wait for completion timeout "
+ "in %s, Line %d\n", __FILE__, __LINE__);
+ status = ERR;
+ } else {
+ status = info.ret;
+ }
+
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ iowrite32(0, FlashReg + DMA_ENABLE);
+
+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ return status;
+}
+
+
+u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
+ u16 page_count)
+{
+ u32 status = PASS;
+ u64 flash_add;
+ u32 intr_status = 0;
+ u32 flash_bank;
+ u32 intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
+ int ret;
+ u8 *write_data_l;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ status = Boundary_Check_Block_Page(block, page, page_count);
+ if (status != PASS)
+ return status;
+
+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
+ * DeviceInfo.wBlockDataSize +
+ (u64)page * DeviceInfo.wPageDataSize;
+
+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
+
+ intr_status = intr_status_addresses[flash_bank];
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+
+ iowrite32(INTR_STATUS0__PROGRAM_COMP |
+ INTR_STATUS0__PROGRAM_FAIL, FlashReg + intr_status);
+
+ if (page_count > 1) {
+ write_data_l = write_data;
+ while (page_count > MAX_PAGES_PER_RW) {
+ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
+ status = NAND_Multiplane_Write(write_data_l,
+ block, page, MAX_PAGES_PER_RW);
+ else
+ status = NAND_Pipeline_Write_Ahead(
+ write_data_l, block, page,
+ MAX_PAGES_PER_RW);
+ if (status == FAIL)
+ return status;
+
+ write_data_l += DeviceInfo.wPageDataSize *
+ MAX_PAGES_PER_RW;
+ page_count -= MAX_PAGES_PER_RW;
+ page += MAX_PAGES_PER_RW;
+ }
+ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
+ status = NAND_Multiplane_Write(write_data_l,
+ block, page, page_count);
+ else
+ status = NAND_Pipeline_Write_Ahead(write_data_l,
+ block, page, page_count);
+
+ return status;
+ }
+
+ iowrite32(1, FlashReg + DMA_ENABLE);
+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ /* Fill the mrst_nand_info structure */
+ info.state = INT_WRITE_PAGE_MAIN;
+ info.write_data = write_data;
+ info.flash_bank = flash_bank;
+ info.block = block;
+ info.page = page;
+ info.ret = PASS;
+
+ ddma_trans(write_data, flash_add, flash_bank, 1, 1);
+
+ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
+
+ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
+ if (!ret) {
+ printk(KERN_ERR "Wait for completion timeout "
+ "in %s, Line %d\n", __FILE__, __LINE__);
+ status = ERR;
+ } else {
+ status = info.ret;
+ }
+
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ iowrite32(0, FlashReg + DMA_ENABLE);
+ while (ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG)
+ ;
+
+ return status;
+}
+
+void NAND_ECC_Ctrl(int enable)
+{
+ if (enable) {
+ nand_dbg_print(NAND_DBG_WARN,
+ "Will enable ECC in %s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+ iowrite32(1, FlashReg + ECC_ENABLE);
+ enable_ecc = 1;
+ } else {
+ nand_dbg_print(NAND_DBG_WARN,
+ "Will disable ECC in %s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+ iowrite32(0, FlashReg + ECC_ENABLE);
+ enable_ecc = 0;
+ }
+}
+
+u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
+ u16 page, u16 page_count)
+{
+ u32 status = PASS;
+ u32 i, j, page_num = 0;
+ u32 PageSize = DeviceInfo.wPageSize;
+ u32 PageDataSize = DeviceInfo.wPageDataSize;
+ u32 eccBytes = DeviceInfo.wECCBytesPerSector;
+ u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
+ u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
+ u64 flash_add;
+ u32 eccSectorSize;
+ u32 flash_bank;
+ u32 intr_status = 0;
+ u32 intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
+ u8 *page_main_spare = buf_write_page_main_spare;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
+
+ status = Boundary_Check_Block_Page(block, page, page_count);
+
+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
+
+ if (status == PASS) {
+ intr_status = intr_status_addresses[flash_bank];
+
+ iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
+
+ while ((status != FAIL) && (page_count > 0)) {
+ flash_add = (u64)(block %
+ (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
+ DeviceInfo.wBlockDataSize +
+ (u64)page * DeviceInfo.wPageDataSize;
+
+ iowrite32(ioread32(FlashReg + intr_status),
+ FlashReg + intr_status);
+
+ iowrite32((u32)(MODE_01 | (flash_bank << 24) |
+ (flash_add >>
+ DeviceInfo.nBitsInPageDataSize)),
+ FlashMem);
+
+ if (enable_ecc) {
+ for (j = 0;
+ j <
+ DeviceInfo.wPageDataSize / eccSectorSize;
+ j++) {
+ for (i = 0; i < eccSectorSize; i++)
+ page_main_spare[(eccSectorSize +
+ eccBytes) * j +
+ i] =
+ write_data[eccSectorSize *
+ j + i];
+
+ for (i = 0; i < eccBytes; i++)
+ page_main_spare[(eccSectorSize +
+ eccBytes) * j +
+ eccSectorSize +
+ i] =
+ write_data[PageDataSize +
+ spareFlagBytes +
+ eccBytes * j +
+ i];
+ }
+
+ for (i = 0; i < spareFlagBytes; i++)
+ page_main_spare[(eccSectorSize +
+ eccBytes) * j + i] =
+ write_data[PageDataSize + i];
+
+ for (i = PageSize - 1; i >= PageDataSize +
+ spareSkipBytes; i--)
+ page_main_spare[i] = page_main_spare[i -
+ spareSkipBytes];
+
+ for (i = PageDataSize; i < PageDataSize +
+ spareSkipBytes; i++)
+ page_main_spare[i] = 0xff;
+
+ for (i = 0; i < PageSize / 4; i++)
+ iowrite32(
+ *((u32 *)page_main_spare + i),
+ FlashMem + 0x10);
+ } else {
+
+ for (i = 0; i < PageSize / 4; i++)
+ iowrite32(*((u32 *)write_data + i),
+ FlashMem + 0x10);
+ }
+
+ while (!(ioread32(FlashReg + intr_status) &
+ (INTR_STATUS0__PROGRAM_COMP |
+ INTR_STATUS0__PROGRAM_FAIL)))
+ ;
+
+ if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__PROGRAM_FAIL)
+ status = FAIL;
+
+ iowrite32(ioread32(FlashReg + intr_status),
+ FlashReg + intr_status);
+
+ page_num++;
+ page_count--;
+ write_data += PageSize;
+ }
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+ }
+
+ return status;
+}
+
+u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
+ u16 page_count)
+{
+ u32 status = PASS;
+ u32 i, j;
+ u64 flash_add = 0;
+ u32 PageSize = DeviceInfo.wPageSize;
+ u32 PageDataSize = DeviceInfo.wPageDataSize;
+ u32 PageSpareSize = DeviceInfo.wPageSpareSize;
+ u32 eccBytes = DeviceInfo.wECCBytesPerSector;
+ u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
+ u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
+ u32 eccSectorSize;
+ u32 flash_bank;
+ u32 intr_status = 0;
+ u8 *read_data_l = read_data;
+ u32 intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
+ u8 *page_main_spare = buf_read_page_main_spare;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
+
+ status = Boundary_Check_Block_Page(block, page, page_count);
+
+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
+
+ if (status == PASS) {
+ intr_status = intr_status_addresses[flash_bank];
+
+ iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
+
+ iowrite32(ioread32(FlashReg + intr_status),
+ FlashReg + intr_status);
+
+ while ((status != FAIL) && (page_count > 0)) {
+ flash_add = (u64)(block %
+ (DeviceInfo.wTotalBlocks / totalUsedBanks))
+ * DeviceInfo.wBlockDataSize +
+ (u64)page * DeviceInfo.wPageDataSize;
+
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
+ 0x43);
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
+ 0x2000 | page_count);
+
+ while (!(ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__LOAD_COMP))
+ ;
+
+ iowrite32((u32)(MODE_01 | (flash_bank << 24) |
+ (flash_add >>
+ DeviceInfo.nBitsInPageDataSize)),
+ FlashMem);
+
+ for (i = 0; i < PageSize / 4; i++)
+ *(((u32 *)page_main_spare) + i) =
+ ioread32(FlashMem + 0x10);
+
+ if (enable_ecc) {
+ for (i = PageDataSize; i < PageSize -
+ spareSkipBytes; i++)
+ page_main_spare[i] = page_main_spare[i +
+ spareSkipBytes];
+
+ for (j = 0;
+ j < DeviceInfo.wPageDataSize / eccSectorSize;
+ j++) {
+
+ for (i = 0; i < eccSectorSize; i++)
+ read_data_l[eccSectorSize * j +
+ i] =
+ page_main_spare[
+ (eccSectorSize +
+ eccBytes) * j + i];
+
+ for (i = 0; i < eccBytes; i++)
+ read_data_l[PageDataSize +
+ spareFlagBytes +
+ eccBytes * j + i] =
+ page_main_spare[
+ (eccSectorSize +
+ eccBytes) * j +
+ eccSectorSize + i];
+ }
+
+ for (i = 0; i < spareFlagBytes; i++)
+ read_data_l[PageDataSize + i] =
+ page_main_spare[(eccSectorSize +
+ eccBytes) * j + i];
+ } else {
+ for (i = 0; i < (PageDataSize + PageSpareSize);
+ i++)
+ read_data_l[i] = page_main_spare[i];
+
+ }
+
+ if (enable_ecc) {
+ while (!(ioread32(FlashReg + intr_status) &
+ (INTR_STATUS0__ECC_TRANSACTION_DONE |
+ INTR_STATUS0__ECC_ERR)))
+ ;
+
+ if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_ERR) {
+ iowrite32(INTR_STATUS0__ECC_ERR,
+ FlashReg + intr_status);
+ status = do_ecc_new(flash_bank,
+ read_data, block, page);
+ }
+
+ if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_TRANSACTION_DONE &
+ INTR_STATUS0__ECC_ERR) {
+ iowrite32(INTR_STATUS0__ECC_ERR |
+ INTR_STATUS0__ECC_TRANSACTION_DONE,
+ FlashReg + intr_status);
+ } else if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_TRANSACTION_DONE) {
+ iowrite32(
+ INTR_STATUS0__ECC_TRANSACTION_DONE,
+ FlashReg + intr_status);
+ } else if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_ERR) {
+ iowrite32(INTR_STATUS0__ECC_ERR,
+ FlashReg + intr_status);
+ }
+ }
+
+ page++;
+ page_count--;
+ read_data_l += PageSize;
+ }
+ }
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
+
+ return status;
+}
+
+u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
+ u16 page, u16 page_count)
+{
+ u16 status = PASS;
+ u32 NumPages = page_count;
+ u64 flash_add;
+ u32 flash_bank;
+ u32 intr_status = 0;
+ u32 intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
+ int ret;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ status = Boundary_Check_Block_Page(block, page, page_count);
+
+ if (page_count < 2)
+ status = FAIL;
+
+ if (status != PASS)
+ return status;
+
+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
+ * DeviceInfo.wBlockDataSize +
+ (u64)page * DeviceInfo.wPageDataSize;
+
+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
+
+ intr_status = intr_status_addresses[flash_bank];
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ iowrite32(1, FlashReg + DMA_ENABLE);
+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+
+ /* Fill the mrst_nand_info structure */
+ info.state = INT_PIPELINE_WRITE_AHEAD;
+ info.write_data = write_data;
+ info.flash_bank = flash_bank;
+ info.block = block;
+ info.page = page;
+ info.ret = PASS;
+
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
+
+ ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
+
+ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
+
+ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
+ if (!ret) {
+ printk(KERN_ERR "Wait for completion timeout "
+ "in %s, Line %d\n", __FILE__, __LINE__);
+ status = ERR;
+ } else {
+ status = info.ret;
+ }
+
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ iowrite32(0, FlashReg + DMA_ENABLE);
+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ return status;
+}
+
+/* Un-tested function */
+u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
+ u16 page_count)
+{
+ u16 status = PASS;
+ u32 NumPages = page_count;
+ u64 flash_add;
+ u32 flash_bank;
+ u32 intr_status = 0;
+ u32 intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
+ u16 status2 = PASS;
+ u32 t;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ status = Boundary_Check_Block_Page(block, page, page_count);
+ if (status != PASS)
+ return status;
+
+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
+ * DeviceInfo.wBlockDataSize +
+ (u64)page * DeviceInfo.wPageDataSize;
+
+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
+
+ intr_status = intr_status_addresses[flash_bank];
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+ iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
+
+ iowrite32(1, FlashReg + DMA_ENABLE);
+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
+
+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
+
+ ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
+
+ while (1) {
+ while (!ioread32(FlashReg + intr_status))
+ ;
+
+ if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__DMA_CMD_COMP) {
+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
+ FlashReg + intr_status);
+ status = PASS;
+ if (status2 == FAIL)
+ status = FAIL;
+ break;
+ } else if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__PROGRAM_FAIL) {
+ status2 = FAIL;
+ status = FAIL;
+ t = ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__PROGRAM_FAIL;
+ iowrite32(t, FlashReg + intr_status);
+ } else {
+ iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
+ (~INTR_STATUS0__DMA_CMD_COMP),
+ FlashReg + intr_status);
+ }
+ }
+
+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
+
+ iowrite32(0, FlashReg + DMA_ENABLE);
+
+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
+ ;
+
+ iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
+
+ return status;
+}
+
+
+#if CMD_DMA
+static irqreturn_t cdma_isr(int irq, void *dev_id)
+{
+ struct mrst_nand_info *dev = dev_id;
+ int first_failed_cmd;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (!is_cdma_interrupt())
+ return IRQ_NONE;
+
+ /* Disable controller interrupts */
+ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
+ GLOB_FTL_Event_Status(&first_failed_cmd);
+ complete(&dev->complete);
+
+ return IRQ_HANDLED;
+}
+#else
+static void handle_nand_int_read(struct mrst_nand_info *dev)
+{
+ u32 intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
+ u32 intr_status;
+ u32 ecc_done_OR_dma_comp = 0;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ dev->ret = PASS;
+ intr_status = intr_status_addresses[dev->flash_bank];
+
+ while (1) {
+ if (enable_ecc) {
+ if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_ERR) {
+ iowrite32(INTR_STATUS0__ECC_ERR,
+ FlashReg + intr_status);
+ dev->ret = do_ecc_new(dev->flash_bank,
+ dev->read_data,
+ dev->block, dev->page);
+ } else if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__DMA_CMD_COMP) {
+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
+ FlashReg + intr_status);
+ if (1 == ecc_done_OR_dma_comp)
+ break;
+ ecc_done_OR_dma_comp = 1;
+ } else if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__ECC_TRANSACTION_DONE) {
+ iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
+ FlashReg + intr_status);
+ if (1 == ecc_done_OR_dma_comp)
+ break;
+ ecc_done_OR_dma_comp = 1;
+ }
+ } else {
+ if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__DMA_CMD_COMP) {
+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
+ FlashReg + intr_status);
+ break;
+ } else {
+ printk(KERN_ERR "Illegal INTS "
+ "(offset addr 0x%x) value: 0x%x\n",
+ intr_status,
+ ioread32(FlashReg + intr_status));
+ }
+ }
+
+ iowrite32((~INTR_STATUS0__ECC_ERR) &
+ (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
+ (~INTR_STATUS0__DMA_CMD_COMP),
+ FlashReg + intr_status);
+ }
+}
+
+static void handle_nand_int_write(struct mrst_nand_info *dev)
+{
+ u32 intr_status;
+ u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
+ INTR_STATUS2, INTR_STATUS3};
+ int status = PASS;
+
+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ dev->ret = PASS;
+ intr_status = intr[dev->flash_bank];
+
+ while (1) {
+ while (!ioread32(FlashReg + intr_status))
+ ;
+
+ if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__DMA_CMD_COMP) {
+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
+ FlashReg + intr_status);
+ if (FAIL == status)
+ dev->ret = FAIL;
+ break;
+ } else if (ioread32(FlashReg + intr_status) &
+ INTR_STATUS0__PROGRAM_FAIL) {
+ status = FAIL;
+ iowrite32(INTR_STATUS0__PROGRAM_FAIL,
+ FlashReg + intr_status);
+ } else {
+ iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
+ (~INTR_STATUS0__DMA_CMD_COMP),
+ FlashReg + intr_status);
+ }
+ }
+}
+
+static irqreturn_t ddma_isr(int irq, void *dev_id)
+{
+ struct mrst_nand_info *dev = dev_id;
+ u32 int_mask, ints0, ints1, ints2, ints3, ints_offset;
+ u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
+ INTR_STATUS2, INTR_STATUS3};
+
+ int_mask = INTR_STATUS0__DMA_CMD_COMP |
+ INTR_STATUS0__ECC_TRANSACTION_DONE |
+ INTR_STATUS0__ECC_ERR |
+ INTR_STATUS0__PROGRAM_FAIL |
+ INTR_STATUS0__ERASE_FAIL;
+
+ ints0 = ioread32(FlashReg + INTR_STATUS0);
+ ints1 = ioread32(FlashReg + INTR_STATUS1);
+ ints2 = ioread32(FlashReg + INTR_STATUS2);
+ ints3 = ioread32(FlashReg + INTR_STATUS3);
+
+ ints_offset = intr[dev->flash_bank];
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "INTR0: 0x%x, INTR1: 0x%x, INTR2: 0x%x, INTR3: 0x%x, "
+ "DMA_INTR: 0x%x, "
+ "dev->state: 0x%x, dev->flash_bank: %d\n",
+ ints0, ints1, ints2, ints3,
+ ioread32(FlashReg + DMA_INTR),
+ dev->state, dev->flash_bank);
+
+ if (!(ioread32(FlashReg + ints_offset) & int_mask)) {
+ iowrite32(ints0, FlashReg + INTR_STATUS0);
+ iowrite32(ints1, FlashReg + INTR_STATUS1);
+ iowrite32(ints2, FlashReg + INTR_STATUS2);
+ iowrite32(ints3, FlashReg + INTR_STATUS3);
+ nand_dbg_print(NAND_DBG_WARN,
+ "ddma_isr: Invalid interrupt for NAND controller. "
+ "Ignore it\n");
+ return IRQ_NONE;
+ }
+
+ switch (dev->state) {
+ case INT_READ_PAGE_MAIN:
+ case INT_PIPELINE_READ_AHEAD:
+ /* Disable controller interrupts */
+ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
+ handle_nand_int_read(dev);
+ break;
+ case INT_WRITE_PAGE_MAIN:
+ case INT_PIPELINE_WRITE_AHEAD:
+ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
+ handle_nand_int_write(dev);
+ break;
+ default:
+ printk(KERN_ERR "ddma_isr - Illegal state: 0x%x\n",
+ dev->state);
+ return IRQ_NONE;
+ }
+
+ dev->state = INT_IDLE_STATE;
+ complete(&dev->complete);
+ return IRQ_HANDLED;
+}
+#endif
+
+static const struct pci_device_id nand_pci_ids[] = {
+ {
+ .vendor = 0x8086,
+ .device = 0x0809,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { /* end: all zeroes */ }
+};
+
+static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int ret = -ENODEV;
+ unsigned long csr_base;
+ unsigned long csr_len;
+ struct mrst_nand_info *pndev = &info;
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ ret = pci_enable_device(dev);
+ if (ret) {
+ printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
+ return ret;
+ }
+
+ pci_set_master(dev);
+ pndev->dev = dev;
+
+ csr_base = pci_resource_start(dev, 0);
+ if (!csr_base) {
+ printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
+ return -ENODEV;
+ }
+
+ csr_len = pci_resource_len(dev, 0);
+ if (!csr_len) {
+ printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
+ return -ENODEV;
+ }
+
+ ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
+ if (ret) {
+ printk(KERN_ERR "Spectra: Unable to request "
+ "memory region\n");
+ goto failed_req_csr;
+ }
+
+ pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
+ if (!pndev->ioaddr) {
+ printk(KERN_ERR "Spectra: Unable to remap memory region\n");
+ ret = -ENOMEM;
+ goto failed_remap_csr;
+ }
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
+ csr_base, pndev->ioaddr, csr_len);
+
+ init_completion(&pndev->complete);
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
+
+#if CMD_DMA
+ if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
+ SPECTRA_NAND_NAME, &info)) {
+ printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
+ ret = -ENODEV;
+ iounmap(pndev->ioaddr);
+ goto failed_remap_csr;
+ }
+#else
+ if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
+ SPECTRA_NAND_NAME, &info)) {
+ printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
+ ret = -ENODEV;
+ iounmap(pndev->ioaddr);
+ goto failed_remap_csr;
+ }
+#endif
+
+ pci_set_drvdata(dev, pndev);
+
+ return 0;
+
+failed_remap_csr:
+ pci_release_regions(dev);
+failed_req_csr:
+
+ return ret;
+}
+
+static void nand_pci_remove(struct pci_dev *dev)
+{
+ struct mrst_nand_info *pndev = pci_get_drvdata(dev);
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+#if CMD_DMA
+ free_irq(dev->irq, pndev);
+#endif
+ iounmap(pndev->ioaddr);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+}
+
+MODULE_DEVICE_TABLE(pci, nand_pci_ids);
+
+static struct pci_driver nand_pci_driver = {
+ .name = SPECTRA_NAND_NAME,
+ .id_table = nand_pci_ids,
+ .probe = nand_pci_probe,
+ .remove = nand_pci_remove,
+};
+
+int NAND_Flash_Init(void)
+{
+ int retval;
+ u32 int_mask;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
+ GLOB_HWCTL_REG_SIZE);
+ if (!FlashReg) {
+ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
+ return -ENOMEM;
+ }
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: Remapped reg base address: "
+ "0x%p, len: %d\n",
+ FlashReg, GLOB_HWCTL_REG_SIZE);
+
+ FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
+ GLOB_HWCTL_MEM_SIZE);
+ if (!FlashMem) {
+ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
+ iounmap(FlashReg);
+ return -ENOMEM;
+ }
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: Remapped flash base address: "
+ "0x%p, len: %d\n",
+ (void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
+ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
+ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
+ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
+ ioread32(FlashReg + ACC_CLKS),
+ ioread32(FlashReg + RE_2_WE),
+ ioread32(FlashReg + WE_2_RE),
+ ioread32(FlashReg + ADDR_2_DATA),
+ ioread32(FlashReg + RDWR_EN_LO_CNT),
+ ioread32(FlashReg + RDWR_EN_HI_CNT),
+ ioread32(FlashReg + CS_SETUP_CNT));
+
+ NAND_Flash_Reset();
+
+ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
+
+#if CMD_DMA
+ info.pcmds_num = 0;
+ info.flash_bank = 0;
+ info.cdma_num = 0;
+ int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
+ DMA_INTR__DESC_COMP_CHANNEL1 |
+ DMA_INTR__DESC_COMP_CHANNEL2 |
+ DMA_INTR__DESC_COMP_CHANNEL3 |
+ DMA_INTR__MEMCOPY_DESC_COMP);
+ iowrite32(int_mask, FlashReg + DMA_INTR_EN);
+ iowrite32(0xFFFF, FlashReg + DMA_INTR);
+
+ int_mask = (INTR_STATUS0__ECC_ERR |
+ INTR_STATUS0__PROGRAM_FAIL |
+ INTR_STATUS0__ERASE_FAIL);
+#else
+ int_mask = INTR_STATUS0__DMA_CMD_COMP |
+ INTR_STATUS0__ECC_TRANSACTION_DONE |
+ INTR_STATUS0__ECC_ERR |
+ INTR_STATUS0__PROGRAM_FAIL |
+ INTR_STATUS0__ERASE_FAIL;
+#endif
+ iowrite32(int_mask, FlashReg + INTR_EN0);
+ iowrite32(int_mask, FlashReg + INTR_EN1);
+ iowrite32(int_mask, FlashReg + INTR_EN2);
+ iowrite32(int_mask, FlashReg + INTR_EN3);
+
+ /* Clear all status bits */
+ iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
+ iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
+ iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
+ iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
+
+ iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
+ iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
+
+ /* Should set value for these registers when init */
+ iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
+ iowrite32(1, FlashReg + ECC_ENABLE);
+ enable_ecc = 1;
+
+ retval = pci_register_driver(&nand_pci_driver);
+ if (retval)
+ return -ENOMEM;
+
+ return PASS;
+}
+
+/* Free memory */
+int nand_release_spectra(void)
+{
+ pci_unregister_driver(&nand_pci_driver);
+ iounmap(FlashMem);
+ iounmap(FlashReg);
+
+ return 0;
+}
+
+
+
diff --git a/drivers/staging/spectra/lld_nand.h b/drivers/staging/spectra/lld_nand.h
new file mode 100644
index 00000000000..d08388287da
--- /dev/null
+++ b/drivers/staging/spectra/lld_nand.h
@@ -0,0 +1,131 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _LLD_NAND_
+#define _LLD_NAND_
+
+#ifdef ELDORA
+#include "defs.h"
+#else
+#include "flash.h"
+#include "ffsport.h"
+#endif
+
+#define MODE_00 0x00000000
+#define MODE_01 0x04000000
+#define MODE_10 0x08000000
+#define MODE_11 0x0C000000
+
+
+#define DATA_TRANSFER_MODE 0
+#define PROTECTION_PER_BLOCK 1
+#define LOAD_WAIT_COUNT 2
+#define PROGRAM_WAIT_COUNT 3
+#define ERASE_WAIT_COUNT 4
+#define INT_MONITOR_CYCLE_COUNT 5
+#define READ_BUSY_PIN_ENABLED 6
+#define MULTIPLANE_OPERATION_SUPPORT 7
+#define PRE_FETCH_MODE 8
+#define CE_DONT_CARE_SUPPORT 9
+#define COPYBACK_SUPPORT 10
+#define CACHE_WRITE_SUPPORT 11
+#define CACHE_READ_SUPPORT 12
+#define NUM_PAGES_IN_BLOCK 13
+#define ECC_ENABLE_SELECT 14
+#define WRITE_ENABLE_2_READ_ENABLE 15
+#define ADDRESS_2_DATA 16
+#define READ_ENABLE_2_WRITE_ENABLE 17
+#define TWO_ROW_ADDRESS_CYCLES 18
+#define MULTIPLANE_ADDRESS_RESTRICT 19
+#define ACC_CLOCKS 20
+#define READ_WRITE_ENABLE_LOW_COUNT 21
+#define READ_WRITE_ENABLE_HIGH_COUNT 22
+
+#define ECC_SECTOR_SIZE 512
+#define LLD_MAX_FLASH_BANKS 4
+
+struct mrst_nand_info {
+ struct pci_dev *dev;
+ u32 state;
+ u32 flash_bank;
+ u8 *read_data;
+ u8 *write_data;
+ u32 block;
+ u16 page;
+ u32 use_dma;
+ void __iomem *ioaddr; /* Mapped io reg base address */
+ int ret;
+ u32 pcmds_num;
+ struct pending_cmd *pcmds;
+ int cdma_num; /* CDMA descriptor number in this chan */
+ u8 *cdma_desc_buf; /* CDMA descriptor table */
+ u8 *memcp_desc_buf; /* Memory copy descriptor table */
+ dma_addr_t cdma_desc; /* Mapped CDMA descriptor table */
+ dma_addr_t memcp_desc; /* Mapped memory copy descriptor table */
+ struct completion complete;
+};
+
+int NAND_Flash_Init(void);
+int nand_release_spectra(void);
+u16 NAND_Flash_Reset(void);
+u16 NAND_Read_Device_ID(void);
+u16 NAND_Erase_Block(u32 flash_add);
+u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
+ u16 page_count);
+u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
+ u16 page_count);
+u16 NAND_UnlockArrayAll(void);
+u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
+ u16 page, u16 page_count);
+u16 NAND_Write_Page_Spare(u8 *read_data, u32 block, u16 page,
+ u16 page_count);
+u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
+ u16 page_count);
+u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
+ u16 page_count);
+void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE);
+u16 NAND_Get_Bad_Block(u32 block);
+u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block, u16 page,
+ u16 page_count);
+u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
+ u16 page, u16 page_count);
+u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
+ u16 page_count);
+u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
+ u16 page_count);
+void NAND_ECC_Ctrl(int enable);
+u16 NAND_Read_Page_Main_Polling(u8 *read_data,
+ u32 block, u16 page, u16 page_count);
+u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
+ u32 block, u16 page, u16 page_count);
+void Conv_Spare_Data_Log2Phy_Format(u8 *data);
+void Conv_Spare_Data_Phy2Log_Format(u8 *data);
+void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count);
+void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count);
+
+extern void __iomem *FlashReg;
+extern void __iomem *FlashMem;
+
+extern int totalUsedBanks;
+extern u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
+
+#endif /*_LLD_NAND_*/
+
+
+
diff --git a/drivers/staging/spectra/nand_regs.h b/drivers/staging/spectra/nand_regs.h
new file mode 100644
index 00000000000..e192e4ae8c1
--- /dev/null
+++ b/drivers/staging/spectra/nand_regs.h
@@ -0,0 +1,619 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#define DEVICE_RESET 0x0
+#define DEVICE_RESET__BANK0 0x0001
+#define DEVICE_RESET__BANK1 0x0002
+#define DEVICE_RESET__BANK2 0x0004
+#define DEVICE_RESET__BANK3 0x0008
+
+#define TRANSFER_SPARE_REG 0x10
+#define TRANSFER_SPARE_REG__FLAG 0x0001
+
+#define LOAD_WAIT_CNT 0x20
+#define LOAD_WAIT_CNT__VALUE 0xffff
+
+#define PROGRAM_WAIT_CNT 0x30
+#define PROGRAM_WAIT_CNT__VALUE 0xffff
+
+#define ERASE_WAIT_CNT 0x40
+#define ERASE_WAIT_CNT__VALUE 0xffff
+
+#define INT_MON_CYCCNT 0x50
+#define INT_MON_CYCCNT__VALUE 0xffff
+
+#define RB_PIN_ENABLED 0x60
+#define RB_PIN_ENABLED__BANK0 0x0001
+#define RB_PIN_ENABLED__BANK1 0x0002
+#define RB_PIN_ENABLED__BANK2 0x0004
+#define RB_PIN_ENABLED__BANK3 0x0008
+
+#define MULTIPLANE_OPERATION 0x70
+#define MULTIPLANE_OPERATION__FLAG 0x0001
+
+#define MULTIPLANE_READ_ENABLE 0x80
+#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
+
+#define COPYBACK_DISABLE 0x90
+#define COPYBACK_DISABLE__FLAG 0x0001
+
+#define CACHE_WRITE_ENABLE 0xa0
+#define CACHE_WRITE_ENABLE__FLAG 0x0001
+
+#define CACHE_READ_ENABLE 0xb0
+#define CACHE_READ_ENABLE__FLAG 0x0001
+
+#define PREFETCH_MODE 0xc0
+#define PREFETCH_MODE__PREFETCH_EN 0x0001
+#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
+
+#define CHIP_ENABLE_DONT_CARE 0xd0
+#define CHIP_EN_DONT_CARE__FLAG 0x01
+
+#define ECC_ENABLE 0xe0
+#define ECC_ENABLE__FLAG 0x0001
+
+#define GLOBAL_INT_ENABLE 0xf0
+#define GLOBAL_INT_EN_FLAG 0x01
+
+#define WE_2_RE 0x100
+#define WE_2_RE__VALUE 0x003f
+
+#define ADDR_2_DATA 0x110
+#define ADDR_2_DATA__VALUE 0x003f
+
+#define RE_2_WE 0x120
+#define RE_2_WE__VALUE 0x003f
+
+#define ACC_CLKS 0x130
+#define ACC_CLKS__VALUE 0x000f
+
+#define NUMBER_OF_PLANES 0x140
+#define NUMBER_OF_PLANES__VALUE 0x0007
+
+#define PAGES_PER_BLOCK 0x150
+#define PAGES_PER_BLOCK__VALUE 0xffff
+
+#define DEVICE_WIDTH 0x160
+#define DEVICE_WIDTH__VALUE 0x0003
+
+#define DEVICE_MAIN_AREA_SIZE 0x170
+#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
+
+#define DEVICE_SPARE_AREA_SIZE 0x180
+#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
+
+#define TWO_ROW_ADDR_CYCLES 0x190
+#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
+
+#define MULTIPLANE_ADDR_RESTRICT 0x1a0
+#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
+
+#define ECC_CORRECTION 0x1b0
+#define ECC_CORRECTION__VALUE 0x001f
+
+#define READ_MODE 0x1c0
+#define READ_MODE__VALUE 0x000f
+
+#define WRITE_MODE 0x1d0
+#define WRITE_MODE__VALUE 0x000f
+
+#define COPYBACK_MODE 0x1e0
+#define COPYBACK_MODE__VALUE 0x000f
+
+#define RDWR_EN_LO_CNT 0x1f0
+#define RDWR_EN_LO_CNT__VALUE 0x001f
+
+#define RDWR_EN_HI_CNT 0x200
+#define RDWR_EN_HI_CNT__VALUE 0x001f
+
+#define MAX_RD_DELAY 0x210
+#define MAX_RD_DELAY__VALUE 0x000f
+
+#define CS_SETUP_CNT 0x220
+#define CS_SETUP_CNT__VALUE 0x001f
+
+#define SPARE_AREA_SKIP_BYTES 0x230
+#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
+
+#define SPARE_AREA_MARKER 0x240
+#define SPARE_AREA_MARKER__VALUE 0xffff
+
+#define DEVICES_CONNECTED 0x250
+#define DEVICES_CONNECTED__VALUE 0x0007
+
+#define DIE_MASK 0x260
+#define DIE_MASK__VALUE 0x00ff
+
+#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
+#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
+
+#define WRITE_PROTECT 0x280
+#define WRITE_PROTECT__FLAG 0x0001
+
+#define RE_2_RE 0x290
+#define RE_2_RE__VALUE 0x003f
+
+#define MANUFACTURER_ID 0x300
+#define MANUFACTURER_ID__VALUE 0x00ff
+
+#define DEVICE_ID 0x310
+#define DEVICE_ID__VALUE 0x00ff
+
+#define DEVICE_PARAM_0 0x320
+#define DEVICE_PARAM_0__VALUE 0x00ff
+
+#define DEVICE_PARAM_1 0x330
+#define DEVICE_PARAM_1__VALUE 0x00ff
+
+#define DEVICE_PARAM_2 0x340
+#define DEVICE_PARAM_2__VALUE 0x00ff
+
+#define LOGICAL_PAGE_DATA_SIZE 0x350
+#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
+
+#define LOGICAL_PAGE_SPARE_SIZE 0x360
+#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
+
+#define REVISION 0x370
+#define REVISION__VALUE 0xffff
+
+#define ONFI_DEVICE_FEATURES 0x380
+#define ONFI_DEVICE_FEATURES__VALUE 0x003f
+
+#define ONFI_OPTIONAL_COMMANDS 0x390
+#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
+
+#define ONFI_TIMING_MODE 0x3a0
+#define ONFI_TIMING_MODE__VALUE 0x003f
+
+#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
+#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
+
+#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
+#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
+#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
+
+#define FEATURES 0x3f0
+#define FEATURES__N_BANKS 0x0003
+#define FEATURES__ECC_MAX_ERR 0x003c
+#define FEATURES__DMA 0x0040
+#define FEATURES__CMD_DMA 0x0080
+#define FEATURES__PARTITION 0x0100
+#define FEATURES__XDMA_SIDEBAND 0x0200
+#define FEATURES__GPREG 0x0400
+#define FEATURES__INDEX_ADDR 0x0800
+
+#define TRANSFER_MODE 0x400
+#define TRANSFER_MODE__VALUE 0x0003
+
+#define INTR_STATUS0 0x410
+#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS0__ECC_ERR 0x0002
+#define INTR_STATUS0__DMA_CMD_COMP 0x0004
+#define INTR_STATUS0__TIME_OUT 0x0008
+#define INTR_STATUS0__PROGRAM_FAIL 0x0010
+#define INTR_STATUS0__ERASE_FAIL 0x0020
+#define INTR_STATUS0__LOAD_COMP 0x0040
+#define INTR_STATUS0__PROGRAM_COMP 0x0080
+#define INTR_STATUS0__ERASE_COMP 0x0100
+#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS0__LOCKED_BLK 0x0400
+#define INTR_STATUS0__UNSUP_CMD 0x0800
+#define INTR_STATUS0__INT_ACT 0x1000
+#define INTR_STATUS0__RST_COMP 0x2000
+#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS0__PAGE_XFER_INC 0x8000
+
+#define INTR_EN0 0x420
+#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN0__ECC_ERR 0x0002
+#define INTR_EN0__DMA_CMD_COMP 0x0004
+#define INTR_EN0__TIME_OUT 0x0008
+#define INTR_EN0__PROGRAM_FAIL 0x0010
+#define INTR_EN0__ERASE_FAIL 0x0020
+#define INTR_EN0__LOAD_COMP 0x0040
+#define INTR_EN0__PROGRAM_COMP 0x0080
+#define INTR_EN0__ERASE_COMP 0x0100
+#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN0__LOCKED_BLK 0x0400
+#define INTR_EN0__UNSUP_CMD 0x0800
+#define INTR_EN0__INT_ACT 0x1000
+#define INTR_EN0__RST_COMP 0x2000
+#define INTR_EN0__PIPE_CMD_ERR 0x4000
+#define INTR_EN0__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT0 0x430
+#define PAGE_CNT0__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR0 0x440
+#define ERR_PAGE_ADDR0__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR0 0x450
+#define ERR_BLOCK_ADDR0__VALUE 0xffff
+
+#define INTR_STATUS1 0x460
+#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS1__ECC_ERR 0x0002
+#define INTR_STATUS1__DMA_CMD_COMP 0x0004
+#define INTR_STATUS1__TIME_OUT 0x0008
+#define INTR_STATUS1__PROGRAM_FAIL 0x0010
+#define INTR_STATUS1__ERASE_FAIL 0x0020
+#define INTR_STATUS1__LOAD_COMP 0x0040
+#define INTR_STATUS1__PROGRAM_COMP 0x0080
+#define INTR_STATUS1__ERASE_COMP 0x0100
+#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS1__LOCKED_BLK 0x0400
+#define INTR_STATUS1__UNSUP_CMD 0x0800
+#define INTR_STATUS1__INT_ACT 0x1000
+#define INTR_STATUS1__RST_COMP 0x2000
+#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS1__PAGE_XFER_INC 0x8000
+
+#define INTR_EN1 0x470
+#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN1__ECC_ERR 0x0002
+#define INTR_EN1__DMA_CMD_COMP 0x0004
+#define INTR_EN1__TIME_OUT 0x0008
+#define INTR_EN1__PROGRAM_FAIL 0x0010
+#define INTR_EN1__ERASE_FAIL 0x0020
+#define INTR_EN1__LOAD_COMP 0x0040
+#define INTR_EN1__PROGRAM_COMP 0x0080
+#define INTR_EN1__ERASE_COMP 0x0100
+#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN1__LOCKED_BLK 0x0400
+#define INTR_EN1__UNSUP_CMD 0x0800
+#define INTR_EN1__INT_ACT 0x1000
+#define INTR_EN1__RST_COMP 0x2000
+#define INTR_EN1__PIPE_CMD_ERR 0x4000
+#define INTR_EN1__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT1 0x480
+#define PAGE_CNT1__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR1 0x490
+#define ERR_PAGE_ADDR1__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR1 0x4a0
+#define ERR_BLOCK_ADDR1__VALUE 0xffff
+
+#define INTR_STATUS2 0x4b0
+#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS2__ECC_ERR 0x0002
+#define INTR_STATUS2__DMA_CMD_COMP 0x0004
+#define INTR_STATUS2__TIME_OUT 0x0008
+#define INTR_STATUS2__PROGRAM_FAIL 0x0010
+#define INTR_STATUS2__ERASE_FAIL 0x0020
+#define INTR_STATUS2__LOAD_COMP 0x0040
+#define INTR_STATUS2__PROGRAM_COMP 0x0080
+#define INTR_STATUS2__ERASE_COMP 0x0100
+#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS2__LOCKED_BLK 0x0400
+#define INTR_STATUS2__UNSUP_CMD 0x0800
+#define INTR_STATUS2__INT_ACT 0x1000
+#define INTR_STATUS2__RST_COMP 0x2000
+#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS2__PAGE_XFER_INC 0x8000
+
+#define INTR_EN2 0x4c0
+#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN2__ECC_ERR 0x0002
+#define INTR_EN2__DMA_CMD_COMP 0x0004
+#define INTR_EN2__TIME_OUT 0x0008
+#define INTR_EN2__PROGRAM_FAIL 0x0010
+#define INTR_EN2__ERASE_FAIL 0x0020
+#define INTR_EN2__LOAD_COMP 0x0040
+#define INTR_EN2__PROGRAM_COMP 0x0080
+#define INTR_EN2__ERASE_COMP 0x0100
+#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN2__LOCKED_BLK 0x0400
+#define INTR_EN2__UNSUP_CMD 0x0800
+#define INTR_EN2__INT_ACT 0x1000
+#define INTR_EN2__RST_COMP 0x2000
+#define INTR_EN2__PIPE_CMD_ERR 0x4000
+#define INTR_EN2__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT2 0x4d0
+#define PAGE_CNT2__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR2 0x4e0
+#define ERR_PAGE_ADDR2__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR2 0x4f0
+#define ERR_BLOCK_ADDR2__VALUE 0xffff
+
+#define INTR_STATUS3 0x500
+#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS3__ECC_ERR 0x0002
+#define INTR_STATUS3__DMA_CMD_COMP 0x0004
+#define INTR_STATUS3__TIME_OUT 0x0008
+#define INTR_STATUS3__PROGRAM_FAIL 0x0010
+#define INTR_STATUS3__ERASE_FAIL 0x0020
+#define INTR_STATUS3__LOAD_COMP 0x0040
+#define INTR_STATUS3__PROGRAM_COMP 0x0080
+#define INTR_STATUS3__ERASE_COMP 0x0100
+#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS3__LOCKED_BLK 0x0400
+#define INTR_STATUS3__UNSUP_CMD 0x0800
+#define INTR_STATUS3__INT_ACT 0x1000
+#define INTR_STATUS3__RST_COMP 0x2000
+#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS3__PAGE_XFER_INC 0x8000
+
+#define INTR_EN3 0x510
+#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN3__ECC_ERR 0x0002
+#define INTR_EN3__DMA_CMD_COMP 0x0004
+#define INTR_EN3__TIME_OUT 0x0008
+#define INTR_EN3__PROGRAM_FAIL 0x0010
+#define INTR_EN3__ERASE_FAIL 0x0020
+#define INTR_EN3__LOAD_COMP 0x0040
+#define INTR_EN3__PROGRAM_COMP 0x0080
+#define INTR_EN3__ERASE_COMP 0x0100
+#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN3__LOCKED_BLK 0x0400
+#define INTR_EN3__UNSUP_CMD 0x0800
+#define INTR_EN3__INT_ACT 0x1000
+#define INTR_EN3__RST_COMP 0x2000
+#define INTR_EN3__PIPE_CMD_ERR 0x4000
+#define INTR_EN3__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT3 0x520
+#define PAGE_CNT3__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR3 0x530
+#define ERR_PAGE_ADDR3__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR3 0x540
+#define ERR_BLOCK_ADDR3__VALUE 0xffff
+
+#define DATA_INTR 0x550
+#define DATA_INTR__WRITE_SPACE_AV 0x0001
+#define DATA_INTR__READ_DATA_AV 0x0002
+
+#define DATA_INTR_EN 0x560
+#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
+#define DATA_INTR_EN__READ_DATA_AV 0x0002
+
+#define GPREG_0 0x570
+#define GPREG_0__VALUE 0xffff
+
+#define GPREG_1 0x580
+#define GPREG_1__VALUE 0xffff
+
+#define GPREG_2 0x590
+#define GPREG_2__VALUE 0xffff
+
+#define GPREG_3 0x5a0
+#define GPREG_3__VALUE 0xffff
+
+#define ECC_THRESHOLD 0x600
+#define ECC_THRESHOLD__VALUE 0x03ff
+
+#define ECC_ERROR_BLOCK_ADDRESS 0x610
+#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
+
+#define ECC_ERROR_PAGE_ADDRESS 0x620
+#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
+#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
+
+#define ECC_ERROR_ADDRESS 0x630
+#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
+#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
+
+#define ERR_CORRECTION_INFO 0x640
+#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
+#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
+#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
+#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
+
+#define DMA_ENABLE 0x700
+#define DMA_ENABLE__FLAG 0x0001
+
+#define IGNORE_ECC_DONE 0x710
+#define IGNORE_ECC_DONE__FLAG 0x0001
+
+#define DMA_INTR 0x720
+#define DMA_INTR__TARGET_ERROR 0x0001
+#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
+#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
+#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
+#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
+#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
+
+#define DMA_INTR_EN 0x730
+#define DMA_INTR_EN__TARGET_ERROR 0x0001
+#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
+#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
+#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
+#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
+#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
+
+#define TARGET_ERR_ADDR_LO 0x740
+#define TARGET_ERR_ADDR_LO__VALUE 0xffff
+
+#define TARGET_ERR_ADDR_HI 0x750
+#define TARGET_ERR_ADDR_HI__VALUE 0xffff
+
+#define CHNL_ACTIVE 0x760
+#define CHNL_ACTIVE__CHANNEL0 0x0001
+#define CHNL_ACTIVE__CHANNEL1 0x0002
+#define CHNL_ACTIVE__CHANNEL2 0x0004
+#define CHNL_ACTIVE__CHANNEL3 0x0008
+
+#define ACTIVE_SRC_ID 0x800
+#define ACTIVE_SRC_ID__VALUE 0x00ff
+
+#define PTN_INTR 0x810
+#define PTN_INTR__CONFIG_ERROR 0x0001
+#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
+#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
+#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
+#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
+#define PTN_INTR__REG_ACCESS_ERROR 0x0020
+
+#define PTN_INTR_EN 0x820
+#define PTN_INTR_EN__CONFIG_ERROR 0x0001
+#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
+#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
+#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
+#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
+#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
+
+#define PERM_SRC_ID_0 0x830
+#define PERM_SRC_ID_0__SRCID 0x00ff
+#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_0 0x840
+#define MIN_BLK_ADDR_0__VALUE 0xffff
+
+#define MAX_BLK_ADDR_0 0x850
+#define MAX_BLK_ADDR_0__VALUE 0xffff
+
+#define MIN_MAX_BANK_0 0x860
+#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_1 0x870
+#define PERM_SRC_ID_1__SRCID 0x00ff
+#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_1 0x880
+#define MIN_BLK_ADDR_1__VALUE 0xffff
+
+#define MAX_BLK_ADDR_1 0x890
+#define MAX_BLK_ADDR_1__VALUE 0xffff
+
+#define MIN_MAX_BANK_1 0x8a0
+#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_2 0x8b0
+#define PERM_SRC_ID_2__SRCID 0x00ff
+#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_2 0x8c0
+#define MIN_BLK_ADDR_2__VALUE 0xffff
+
+#define MAX_BLK_ADDR_2 0x8d0
+#define MAX_BLK_ADDR_2__VALUE 0xffff
+
+#define MIN_MAX_BANK_2 0x8e0
+#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_3 0x8f0
+#define PERM_SRC_ID_3__SRCID 0x00ff
+#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_3 0x900
+#define MIN_BLK_ADDR_3__VALUE 0xffff
+
+#define MAX_BLK_ADDR_3 0x910
+#define MAX_BLK_ADDR_3__VALUE 0xffff
+
+#define MIN_MAX_BANK_3 0x920
+#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_4 0x930
+#define PERM_SRC_ID_4__SRCID 0x00ff
+#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_4 0x940
+#define MIN_BLK_ADDR_4__VALUE 0xffff
+
+#define MAX_BLK_ADDR_4 0x950
+#define MAX_BLK_ADDR_4__VALUE 0xffff
+
+#define MIN_MAX_BANK_4 0x960
+#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_5 0x970
+#define PERM_SRC_ID_5__SRCID 0x00ff
+#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_5 0x980
+#define MIN_BLK_ADDR_5__VALUE 0xffff
+
+#define MAX_BLK_ADDR_5 0x990
+#define MAX_BLK_ADDR_5__VALUE 0xffff
+
+#define MIN_MAX_BANK_5 0x9a0
+#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_6 0x9b0
+#define PERM_SRC_ID_6__SRCID 0x00ff
+#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_6 0x9c0
+#define MIN_BLK_ADDR_6__VALUE 0xffff
+
+#define MAX_BLK_ADDR_6 0x9d0
+#define MAX_BLK_ADDR_6__VALUE 0xffff
+
+#define MIN_MAX_BANK_6 0x9e0
+#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_7 0x9f0
+#define PERM_SRC_ID_7__SRCID 0x00ff
+#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_7 0xa00
+#define MIN_BLK_ADDR_7__VALUE 0xffff
+
+#define MAX_BLK_ADDR_7 0xa10
+#define MAX_BLK_ADDR_7__VALUE 0xffff
+
+#define MIN_MAX_BANK_7 0xa20
+#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
diff --git a/drivers/staging/spectra/spectraswconfig.h b/drivers/staging/spectra/spectraswconfig.h
new file mode 100644
index 00000000000..17259469e95
--- /dev/null
+++ b/drivers/staging/spectra/spectraswconfig.h
@@ -0,0 +1,82 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _SPECTRASWCONFIG_
+#define _SPECTRASWCONFIG_
+
+/* NAND driver version */
+#define GLOB_VERSION "driver version 20100311"
+
+
+/***** Common Parameters *****/
+#define RETRY_TIMES 3
+
+#define READ_BADBLOCK_INFO 1
+#define READBACK_VERIFY 0
+#define AUTO_FORMAT_FLASH 0
+
+/***** Cache Parameters *****/
+#define CACHE_ITEM_NUM 128
+#define BLK_NUM_FOR_L2_CACHE 16
+
+/***** Block Table Parameters *****/
+#define BLOCK_TABLE_INDEX 0
+
+/***** Wear Leveling Parameters *****/
+#define WEAR_LEVELING_GATE 0x10
+#define WEAR_LEVELING_BLOCK_NUM 10
+
+#define DEBUG_BNDRY 0
+
+/***** Product Feature Support *****/
+#define FLASH_EMU defined(CONFIG_SPECTRA_EMU)
+#define FLASH_NAND defined(CONFIG_SPECTRA_MRST_HW)
+#define FLASH_MTD defined(CONFIG_SPECTRA_MTD)
+#define CMD_DMA defined(CONFIG_SPECTRA_MRST_HW_DMA)
+
+#define SPECTRA_PARTITION_ID 0
+
+/* Enable this macro if the number of flash blocks is larger than 16K. */
+#define SUPPORT_LARGE_BLOCKNUM 1
+
+/**** Block Table and Reserved Block Parameters *****/
+#define SPECTRA_START_BLOCK 3
+//#define NUM_FREE_BLOCKS_GATE 30
+#define NUM_FREE_BLOCKS_GATE 60
+
+/**** Hardware Parameters ****/
+#define GLOB_HWCTL_REG_BASE 0xFFA40000
+#define GLOB_HWCTL_REG_SIZE 4096
+
+#define GLOB_HWCTL_MEM_BASE 0xFFA48000
+#define GLOB_HWCTL_MEM_SIZE 4096
+
+/* KBV - Updated to LNW scratch register address */
+#define SCRATCH_REG_ADDR 0xFF108018
+#define SCRATCH_REG_SIZE 64
+
+#define GLOB_HWCTL_DEFAULT_BLKS 2048
+
+#define SUPPORT_15BITECC 1
+#define SUPPORT_8BITECC 1
+
+#define ONFI_BLOOM_TIME 0
+#define MODE5_WORKAROUND 1
+
+#endif /*_SPECTRASWCONFIG_*/
diff --git a/drivers/staging/ti-st/Kconfig b/drivers/staging/ti-st/Kconfig
index 3ab204ddc29..68ad3d0b84a 100644
--- a/drivers/staging/ti-st/Kconfig
+++ b/drivers/staging/ti-st/Kconfig
@@ -4,7 +4,7 @@
#
menu "Texas Instruments shared transport line discipline"
config TI_ST
- tristate "shared transport core driver"
+ tristate "Shared transport core driver"
depends on RFKILL
select FW_LOADER
help
diff --git a/drivers/staging/ti-st/TODO b/drivers/staging/ti-st/TODO
index 2c4fe583901..ebfd6bb6017 100644
--- a/drivers/staging/ti-st/TODO
+++ b/drivers/staging/ti-st/TODO
@@ -1,17 +1,6 @@
TODO:
-1. A per-device/tty port context required to support multiple devices
-on same platform.
-
-2. REMOVE the sysfs entry PID passing mechanism, since there should
-be a better way to request user-space to install line discipline.
-
-3. Re-view/Re-work on the locking.
-
-4. Re-structure to make the ldisc driver more generic for chipsets which mux
-multiple connectivity (BT, FM, GPS) upon 1 TTY port.
-
-5. Step up and maintain this driver to ensure that it continues
+1. Step up and maintain this driver to ensure that it continues
to work. Having the hardware for this is pretty much a
requirement. If this does not happen, the will be removed in
the 2.6.35 kernel release.
diff --git a/drivers/staging/ti-st/bt_drv.c b/drivers/staging/ti-st/bt_drv.c
index d8420b5c91f..61ae98833b1 100644
--- a/drivers/staging/ti-st/bt_drv.c
+++ b/drivers/staging/ti-st/bt_drv.c
@@ -80,31 +80,33 @@ static inline void hci_st_tx_complete(struct hci_st *hst, int pkt_type)
* status.hci_st_open() function will wait for signal from this
* API when st_register() function returns ST_PENDING.
*/
-static void hci_st_registration_completion_cb(char data)
+static void hci_st_registration_completion_cb(void *priv_data, char data)
{
+ struct hci_st *lhst = (struct hci_st *)priv_data;
BTDRV_API_START();
/* hci_st_open() function needs value of 'data' to know
* the registration status(success/fail),So have a back
* up of it.
*/
- hst->streg_cbdata = data;
+ lhst->streg_cbdata = data;
/* Got a feedback from ST for BT driver registration
* request.Wackup hci_st_open() function to continue
* it's open operation.
*/
- complete(&hst->wait_for_btdrv_reg_completion);
+ complete(&lhst->wait_for_btdrv_reg_completion);
BTDRV_API_EXIT(0);
}
/* Called by Shared Transport layer when receive data is
* available */
-static long hci_st_receive(struct sk_buff *skb)
+static long hci_st_receive(void *priv_data, struct sk_buff *skb)
{
int err;
int len;
+ struct hci_st *lhst = (struct hci_st *)priv_data;
BTDRV_API_START();
@@ -116,13 +118,13 @@ static long hci_st_receive(struct sk_buff *skb)
BTDRV_API_EXIT(-EFAULT);
return -EFAULT;
}
- if (!hst) {
+ if (!lhst) {
kfree_skb(skb);
BT_DRV_ERR("Invalid hci_st memory,freeing SKB");
BTDRV_API_EXIT(-EFAULT);
return -EFAULT;
}
- if (!test_bit(BT_DRV_RUNNING, &hst->flags)) {
+ if (!test_bit(BT_DRV_RUNNING, &lhst->flags)) {
kfree_skb(skb);
BT_DRV_ERR("Device is not running,freeing SKB");
BTDRV_API_EXIT(-EINVAL);
@@ -130,7 +132,7 @@ static long hci_st_receive(struct sk_buff *skb)
}
len = skb->len;
- skb->dev = (struct net_device *)hst->hdev;
+ skb->dev = (struct net_device *)lhst->hdev;
/* Forward skb to HCI CORE layer */
err = hci_recv_frame(skb);
@@ -141,7 +143,7 @@ static long hci_st_receive(struct sk_buff *skb)
BTDRV_API_EXIT(err);
return err;
}
- hst->hdev->stat.byte_rx += len;
+ lhst->hdev->stat.byte_rx += len;
BTDRV_API_EXIT(0);
return 0;
@@ -189,9 +191,14 @@ static int hci_st_open(struct hci_dev *hdev)
* make it as NULL */
hci_st_proto.write = NULL;
+ /* send in the hst to be received at registration complete callback
+ * and during st's receive
+ */
+ hci_st_proto.priv_data = hst;
+
/* Register with ST layer */
err = st_register(&hci_st_proto);
- if (err == ST_ERR_PENDING) {
+ if (err == -EINPROGRESS) {
/* Prepare wait-for-completion handler data structures.
* Needed to syncronize this and st_registration_completion_cb()
* functions.
@@ -232,7 +239,7 @@ static int hci_st_open(struct hci_dev *hdev)
return -EAGAIN;
}
err = 0;
- } else if (err == ST_ERR_FAILURE) {
+ } else if (err == -1) {
BT_DRV_ERR("st_register failed %d", err);
BTDRV_API_EXIT(-EAGAIN);
return -EAGAIN;
@@ -280,7 +287,7 @@ static int hci_st_close(struct hci_dev *hdev)
/* Unregister from ST layer */
if (test_and_clear_bit(BT_ST_REGISTERED, &hst->flags)) {
err = st_unregister(ST_BT);
- if (err != ST_SUCCESS) {
+ if (err != 0) {
BT_DRV_ERR("st_unregister failed %d", err);
BTDRV_API_EXIT(-EBUSY);
return -EBUSY;
diff --git a/drivers/staging/ti-st/st.h b/drivers/staging/ti-st/st.h
index e8fc97e32c9..1b3060eb292 100644
--- a/drivers/staging/ti-st/st.h
+++ b/drivers/staging/ti-st/st.h
@@ -24,24 +24,24 @@
#define ST_H
#include <linux/skbuff.h>
-/*
- * st.h
- */
/* TODO:
* Move the following to tty.h upon acceptance
*/
#define N_TI_WL 20 /* Ldisc for TI's WL BT, FM, GPS combo chips */
-/* some gpios have active high, others like fm have
- * active low
+/**
+ * enum kim_gpio_state - Few protocols such as FM have ACTIVE LOW
+ * gpio states for their chip/core enable gpios
*/
enum kim_gpio_state {
KIM_GPIO_INACTIVE,
KIM_GPIO_ACTIVE,
};
-/*
- * the list of protocols on chip
+
+/**
+ * enum proto-type - The protocol on WiLink chips which share a
+ * common physical interface like UART.
*/
enum proto_type {
ST_BT,
@@ -50,41 +50,34 @@ enum proto_type {
ST_MAX,
};
-enum {
- ST_ERR_FAILURE = -1, /* check struct */
- ST_SUCCESS,
- ST_ERR_PENDING = -5, /* to call reg_complete_cb */
- ST_ERR_ALREADY, /* already registered */
- ST_ERR_INPROGRESS,
- ST_ERR_NOPROTO, /* protocol not supported */
-};
-
-/* per protocol structure
- * for BT/FM and GPS
+/**
+ * struct st_proto_s - Per Protocol structure from BT/FM/GPS to ST
+ * @type: type of the protocol being registered among the
+ * available proto_type(BT, FM, GPS the protocol which share TTY).
+ * @recv: the receiver callback pointing to a function in the
+ * protocol drivers called by the ST driver upon receiving
+ * relevant data.
+ * @match_packet: reserved for future use, to make ST more generic
+ * @reg_complete_cb: callback handler pointing to a function in protocol
+ * handler called by ST when the pending registrations are complete.
+ * The registrations are marked pending, in situations when fw
+ * download is in progress.
+ * @write: pointer to function in ST provided to protocol drivers from ST,
+ * to be made use when protocol drivers have data to send to TTY.
+ * @priv_data: privdate data holder for the protocol drivers, sent
+ * from the protocol drivers during registration, and sent back on
+ * reg_complete_cb and recv.
*/
struct st_proto_s {
enum proto_type type;
-/*
- * to be called by ST when data arrives
- */
- long (*recv) (struct sk_buff *);
-/*
- * for future use, logic now to be in ST
- */
+ long (*recv) (void *, struct sk_buff *);
unsigned char (*match_packet) (const unsigned char *data);
-/*
- * subsequent registration return PENDING,
- * signalled complete by this callback function
- */
- void (*reg_complete_cb) (char data);
-/*
- * write function, sent in as NULL and to be returned to
- * protocol drivers
- */
+ void (*reg_complete_cb) (void *, char data);
long (*write) (struct sk_buff *skb);
+ void *priv_data;
};
-extern long st_register(struct st_proto_s *new_proto);
-extern long st_unregister(enum proto_type type);
+extern long st_register(struct st_proto_s *);
+extern long st_unregister(enum proto_type);
#endif /* ST_H */
diff --git a/drivers/staging/ti-st/st_core.c b/drivers/staging/ti-st/st_core.c
index 4e93694e1c2..b85d8bfdf60 100644
--- a/drivers/staging/ti-st/st_core.c
+++ b/drivers/staging/ti-st/st_core.c
@@ -38,7 +38,6 @@
#include "st_ll.h"
#include "st.h"
-#ifdef DEBUG
/* strings to be used for rfkill entries and by
* ST Core to be used for sysfs debug entry
*/
@@ -48,7 +47,6 @@ const unsigned char *protocol_strngs[] = {
PROTO_ENTRY(ST_FM, "FM"),
PROTO_ENTRY(ST_GPS, "GPS"),
};
-#endif
/* function pointer pointing to either,
* st_kim_recv during registration to receive fw download responses
* st_int_recv after registration to receive proto stack responses
@@ -61,7 +59,7 @@ void (*st_recv) (void*, const unsigned char*, long);
bool is_protocol_list_empty(void)
{
unsigned char i = 0;
- pr_info(" %s ", __func__);
+ pr_debug(" %s ", __func__);
for (i = 0; i < ST_MAX; i++) {
if (st_gdata->list[i] != NULL)
return ST_NOTEMPTY;
@@ -71,6 +69,7 @@ bool is_protocol_list_empty(void)
return ST_EMPTY;
}
#endif
+
/* can be called in from
* -- KIM (during fw download)
* -- ST Core (during st_write)
@@ -81,20 +80,15 @@ bool is_protocol_list_empty(void)
int st_int_write(struct st_data_s *st_gdata,
const unsigned char *data, int count)
{
-#ifdef VERBOSE /* for debug */
- int i;
-#endif
struct tty_struct *tty;
if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) {
pr_err("tty unavailable to perform write");
- return ST_ERR_FAILURE;
+ return -1;
}
tty = st_gdata->tty;
#ifdef VERBOSE
- printk(KERN_ERR "start data..\n");
- for (i = 0; i < count; i++) /* no newlines for each datum */
- printk(" %x", data[i]);
- printk(KERN_ERR "\n ..end data\n");
+ print_hex_dump(KERN_DEBUG, "<out<", DUMP_PREFIX_NONE,
+ 16, 1, data, count, 0);
#endif
return tty->ops->write(tty, data, count);
@@ -122,8 +116,10 @@ void st_send_frame(enum proto_type protoid, struct st_data_s *st_gdata)
* protocol stack driver
*/
if (likely(st_gdata->list[protoid]->recv != NULL)) {
- if (unlikely(st_gdata->list[protoid]->recv(st_gdata->rx_skb)
- != ST_SUCCESS)) {
+ if (unlikely
+ (st_gdata->list[protoid]->recv
+ (st_gdata->list[protoid]->priv_data, st_gdata->rx_skb)
+ != 0)) {
pr_err(" proto stack %d's ->recv failed", protoid);
kfree_skb(st_gdata->rx_skb);
return;
@@ -132,11 +128,11 @@ void st_send_frame(enum proto_type protoid, struct st_data_s *st_gdata)
pr_err(" proto stack %d's ->recv null", protoid);
kfree_skb(st_gdata->rx_skb);
}
- pr_info(" done %s", __func__);
return;
}
-/*
+/**
+ * st_reg_complete -
* to call registration complete callbacks
* of all protocol stack drivers
*/
@@ -147,7 +143,8 @@ void st_reg_complete(struct st_data_s *st_gdata, char err)
for (i = 0; i < ST_MAX; i++) {
if (likely(st_gdata != NULL && st_gdata->list[i] != NULL &&
st_gdata->list[i]->reg_complete_cb != NULL))
- st_gdata->list[i]->reg_complete_cb(err);
+ st_gdata->list[i]->reg_complete_cb
+ (st_gdata->list[i]->priv_data, err);
}
}
@@ -156,7 +153,7 @@ static inline int st_check_data_len(struct st_data_s *st_gdata,
{
register int room = skb_tailroom(st_gdata->rx_skb);
- pr_info("len %d room %d", len, room);
+ pr_debug("len %d room %d", len, room);
if (!len) {
/* Received packet has only packet header and
@@ -190,8 +187,9 @@ static inline int st_check_data_len(struct st_data_s *st_gdata,
return 0;
}
-/* internal function for action when wake-up ack
- * received
+/**
+ * st_wakeup_ack - internal function for action when wake-up ack
+ * received
*/
static inline void st_wakeup_ack(struct st_data_s *st_gdata,
unsigned char cmd)
@@ -214,9 +212,13 @@ static inline void st_wakeup_ack(struct st_data_s *st_gdata,
st_tx_wakeup(st_gdata);
}
-/* Decodes received RAW data and forwards to corresponding
- * client drivers (Bluetooth,FM,GPS..etc).
- *
+/**
+ * st_int_recv - ST's internal receive function.
+ * Decodes received RAW data and forwards to corresponding
+ * client drivers (Bluetooth,FM,GPS..etc).
+ * This can receive various types of packets,
+ * HCI-Events, ACL, SCO, 4 types of HCI-LL PM packets
+ * CH-8 packets from FM, CH-9 packets from GPS cores.
*/
void st_int_recv(void *disc_data,
const unsigned char *data, long count)
@@ -259,7 +261,7 @@ void st_int_recv(void *disc_data,
/* Waiting for complete packet ? */
case ST_BT_W4_DATA:
- pr_info("Complete pkt received");
+ pr_debug("Complete pkt received");
/* Ask ST CORE to forward
* the packet to protocol driver */
@@ -275,7 +277,7 @@ void st_int_recv(void *disc_data,
eh = (struct hci_event_hdr *)st_gdata->rx_skb->
data;
- pr_info("Event header: evt 0x%2.2x"
+ pr_debug("Event header: evt 0x%2.2x"
"plen %d", eh->evt, eh->plen);
st_check_data_len(st_gdata, protoid, eh->plen);
@@ -439,45 +441,43 @@ void st_int_recv(void *disc_data,
break;
}
}
- pr_info("done %s", __func__);
+ pr_debug("done %s", __func__);
return;
}
-/* internal de-Q function
- * -- return previous in-completely written skb
- * or return the skb in the txQ
+/**
+ * st_int_dequeue - internal de-Q function.
+ * If the previous data set was not written
+ * completely, return that skb which has the pending data.
+ * In normal cases, return top of txq.
*/
struct sk_buff *st_int_dequeue(struct st_data_s *st_gdata)
{
struct sk_buff *returning_skb;
- pr_info("%s", __func__);
- /* if the previous skb wasn't written completely
- */
+ pr_debug("%s", __func__);
if (st_gdata->tx_skb != NULL) {
returning_skb = st_gdata->tx_skb;
st_gdata->tx_skb = NULL;
return returning_skb;
}
-
- /* de-Q from the txQ always if previous write is complete */
return skb_dequeue(&st_gdata->txq);
}
-/* internal Q-ing function
- * will either Q the skb to txq or the tx_waitq
- * depending on the ST LL state
- *
- * lock the whole func - since ll_getstate and Q-ing should happen
- * in one-shot
+/**
+ * st_int_enqueue - internal Q-ing function.
+ * Will either Q the skb to txq or the tx_waitq
+ * depending on the ST LL state.
+ * If the chip is asleep, then Q it onto waitq and
+ * wakeup the chip.
+ * txq and waitq needs protection since the other contexts
+ * may be sending data, waking up chip.
*/
void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
{
unsigned long flags = 0;
- pr_info("%s", __func__);
- /* this function can be invoked in more then one context.
- * so have a lock */
+ pr_debug("%s", __func__);
spin_lock_irqsave(&st_gdata->lock, flags);
switch (st_ll_getstate(st_gdata)) {
@@ -488,16 +488,12 @@ void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
case ST_LL_ASLEEP_TO_AWAKE:
skb_queue_tail(&st_gdata->tx_waitq, skb);
break;
- case ST_LL_AWAKE_TO_ASLEEP: /* host cannot be in this state */
+ case ST_LL_AWAKE_TO_ASLEEP:
pr_err("ST LL is illegal state(%ld),"
"purging received skb.", st_ll_getstate(st_gdata));
kfree_skb(skb);
break;
-
case ST_LL_ASLEEP:
- /* call a function of ST LL to put data
- * in tx_waitQ and wake_ind in txQ
- */
skb_queue_tail(&st_gdata->tx_waitq, skb);
st_ll_wakeup(st_gdata);
break;
@@ -507,8 +503,9 @@ void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
kfree_skb(skb);
break;
}
+
spin_unlock_irqrestore(&st_gdata->lock, flags);
- pr_info("done %s", __func__);
+ pr_debug("done %s", __func__);
return;
}
@@ -522,7 +519,7 @@ void st_tx_wakeup(struct st_data_s *st_data)
{
struct sk_buff *skb;
unsigned long flags; /* for irq save flags */
- pr_info("%s", __func__);
+ pr_debug("%s", __func__);
/* check for sending & set flag sending here */
if (test_and_set_bit(ST_TX_SENDING, &st_data->tx_state)) {
pr_info("ST already sending");
@@ -563,33 +560,13 @@ void st_tx_wakeup(struct st_data_s *st_data)
/********************************************************************/
/* functions called from ST KIM
*/
-void kim_st_list_protocols(struct st_data_s *st_gdata, char *buf)
+void kim_st_list_protocols(struct st_data_s *st_gdata, void *buf)
{
- unsigned long flags = 0;
-#ifdef DEBUG
- unsigned char i = ST_MAX;
-#endif
- spin_lock_irqsave(&st_gdata->lock, flags);
-#ifdef DEBUG /* more detailed log */
- for (i = 0; i < ST_MAX; i++) {
- if (i == 0) {
- sprintf(buf, "%s is %s", protocol_strngs[i],
- st_gdata->list[i] !=
- NULL ? "Registered" : "Unregistered");
- } else {
- sprintf(buf, "%s\n%s is %s", buf, protocol_strngs[i],
- st_gdata->list[i] !=
- NULL ? "Registered" : "Unregistered");
- }
- }
- sprintf(buf, "%s\n", buf);
-#else /* limited info */
- sprintf(buf, "BT=%c\nFM=%c\nGPS=%c\n",
- st_gdata->list[ST_BT] != NULL ? 'R' : 'U',
- st_gdata->list[ST_FM] != NULL ? 'R' : 'U',
- st_gdata->list[ST_GPS] != NULL ? 'R' : 'U');
-#endif
- spin_unlock_irqrestore(&st_gdata->lock, flags);
+ seq_printf(buf, "[%d]\nBT=%c\nFM=%c\nGPS=%c\n",
+ st_gdata->protos_registered,
+ st_gdata->list[ST_BT] != NULL ? 'R' : 'U',
+ st_gdata->list[ST_FM] != NULL ? 'R' : 'U',
+ st_gdata->list[ST_GPS] != NULL ? 'R' : 'U');
}
/********************************************************************/
@@ -600,25 +577,25 @@ void kim_st_list_protocols(struct st_data_s *st_gdata, char *buf)
long st_register(struct st_proto_s *new_proto)
{
struct st_data_s *st_gdata;
- long err = ST_SUCCESS;
+ long err = 0;
unsigned long flags = 0;
- st_kim_ref(&st_gdata);
+ st_kim_ref(&st_gdata, 0);
pr_info("%s(%d) ", __func__, new_proto->type);
if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
|| new_proto->reg_complete_cb == NULL) {
pr_err("gdata/new_proto/recv or reg_complete_cb not ready");
- return ST_ERR_FAILURE;
+ return -1;
}
if (new_proto->type < ST_BT || new_proto->type >= ST_MAX) {
pr_err("protocol %d not supported", new_proto->type);
- return ST_ERR_NOPROTO;
+ return -EPROTONOSUPPORT;
}
if (st_gdata->list[new_proto->type] != NULL) {
pr_err("protocol %d already registered", new_proto->type);
- return ST_ERR_ALREADY;
+ return -EALREADY;
}
/* can be from process context only */
@@ -630,11 +607,12 @@ long st_register(struct st_proto_s *new_proto)
st_kim_chip_toggle(new_proto->type, KIM_GPIO_ACTIVE);
st_gdata->list[new_proto->type] = new_proto;
+ st_gdata->protos_registered++;
new_proto->write = st_write;
set_bit(ST_REG_PENDING, &st_gdata->st_state);
spin_unlock_irqrestore(&st_gdata->lock, flags);
- return ST_ERR_PENDING;
+ return -EINPROGRESS;
} else if (st_gdata->protos_registered == ST_EMPTY) {
pr_info(" protocol list empty :%d ", new_proto->type);
set_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
@@ -648,16 +626,16 @@ long st_register(struct st_proto_s *new_proto)
/* this may take a while to complete
* since it involves BT fw download
*/
- err = st_kim_start();
- if (err != ST_SUCCESS) {
+ err = st_kim_start(st_gdata->kim_data);
+ if (err != 0) {
clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
if ((st_gdata->protos_registered != ST_EMPTY) &&
(test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
pr_err(" KIM failure complete callback ");
- st_reg_complete(st_gdata, ST_ERR_FAILURE);
+ st_reg_complete(st_gdata, -1);
}
- return ST_ERR_FAILURE;
+ return -1;
}
/* the protocol might require other gpios to be toggled
@@ -672,9 +650,8 @@ long st_register(struct st_proto_s *new_proto)
*/
if ((st_gdata->protos_registered != ST_EMPTY) &&
(test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
- pr_info(" call reg complete callback ");
- st_gdata->protos_registered++;
- st_reg_complete(st_gdata, ST_SUCCESS);
+ pr_debug(" call reg complete callback ");
+ st_reg_complete(st_gdata, 0);
}
clear_bit(ST_REG_PENDING, &st_gdata->st_state);
@@ -684,11 +661,12 @@ long st_register(struct st_proto_s *new_proto)
if (st_gdata->list[new_proto->type] != NULL) {
pr_err(" proto %d already registered ",
new_proto->type);
- return ST_ERR_ALREADY;
+ return -EALREADY;
}
spin_lock_irqsave(&st_gdata->lock, flags);
st_gdata->list[new_proto->type] = new_proto;
+ st_gdata->protos_registered++;
new_proto->write = st_write;
spin_unlock_irqrestore(&st_gdata->lock, flags);
return err;
@@ -707,18 +685,19 @@ long st_register(struct st_proto_s *new_proto)
default:
pr_err("%d protocol not supported",
new_proto->type);
- err = ST_ERR_NOPROTO;
+ err = -EPROTONOSUPPORT;
/* something wrong */
break;
}
st_gdata->list[new_proto->type] = new_proto;
+ st_gdata->protos_registered++;
new_proto->write = st_write;
/* lock already held before entering else */
spin_unlock_irqrestore(&st_gdata->lock, flags);
return err;
}
- pr_info("done %s(%d) ", __func__, new_proto->type);
+ pr_debug("done %s(%d) ", __func__, new_proto->type);
}
EXPORT_SYMBOL_GPL(st_register);
@@ -727,16 +706,16 @@ EXPORT_SYMBOL_GPL(st_register);
*/
long st_unregister(enum proto_type type)
{
- long err = ST_SUCCESS;
+ long err = 0;
unsigned long flags = 0;
struct st_data_s *st_gdata;
- pr_info("%s: %d ", __func__, type);
+ pr_debug("%s: %d ", __func__, type);
- st_kim_ref(&st_gdata);
+ st_kim_ref(&st_gdata, 0);
if (type < ST_BT || type >= ST_MAX) {
pr_err(" protocol %d not supported", type);
- return ST_ERR_NOPROTO;
+ return -EPROTONOSUPPORT;
}
spin_lock_irqsave(&st_gdata->lock, flags);
@@ -744,7 +723,7 @@ long st_unregister(enum proto_type type)
if (st_gdata->list[type] == NULL) {
pr_err(" protocol %d not registered", type);
spin_unlock_irqrestore(&st_gdata->lock, flags);
- return ST_ERR_NOPROTO;
+ return -EPROTONOSUPPORT;
}
st_gdata->protos_registered--;
@@ -768,7 +747,7 @@ long st_unregister(enum proto_type type)
}
/* all protocols now unregistered */
- st_kim_stop();
+ st_kim_stop(st_gdata->kim_data);
/* disable ST LL */
st_ll_disable(st_gdata);
}
@@ -787,11 +766,11 @@ long st_write(struct sk_buff *skb)
#endif
long len;
- st_kim_ref(&st_gdata);
+ st_kim_ref(&st_gdata, 0);
if (unlikely(skb == NULL || st_gdata == NULL
|| st_gdata->tty == NULL)) {
pr_err("data/tty unavailable to perform write");
- return ST_ERR_FAILURE;
+ return -1;
}
#ifdef DEBUG /* open-up skb to read the 1st byte */
switch (skb->data[0]) {
@@ -810,10 +789,10 @@ long st_write(struct sk_buff *skb)
if (unlikely(st_gdata->list[protoid] == NULL)) {
pr_err(" protocol %d not registered, and writing? ",
protoid);
- return ST_ERR_FAILURE;
+ return -1;
}
#endif
- pr_info("%d to be written", skb->len);
+ pr_debug("%d to be written", skb->len);
len = skb->len;
/* st_ll to decide where to enqueue the skb */
@@ -834,11 +813,11 @@ EXPORT_SYMBOL_GPL(st_unregister);
*/
static int st_tty_open(struct tty_struct *tty)
{
- int err = ST_SUCCESS;
+ int err = 0;
struct st_data_s *st_gdata;
pr_info("%s ", __func__);
- st_kim_ref(&st_gdata);
+ st_kim_ref(&st_gdata, 0);
st_gdata->tty = tty;
tty->disc_data = st_gdata;
@@ -855,8 +834,8 @@ static int st_tty_open(struct tty_struct *tty)
* signal to UIM via KIM that -
* installation of N_TI_WL ldisc is complete
*/
- st_kim_complete();
- pr_info("done %s", __func__);
+ st_kim_complete(st_gdata->kim_data);
+ pr_debug("done %s", __func__);
return err;
}
@@ -878,12 +857,13 @@ static void st_tty_close(struct tty_struct *tty)
pr_err("%d not un-registered", i);
st_gdata->list[i] = NULL;
}
+ st_gdata->protos_registered = 0;
spin_unlock_irqrestore(&st_gdata->lock, flags);
/*
* signal to UIM via KIM that -
* N_TI_WL ldisc is un-installed
*/
- st_kim_complete();
+ st_kim_complete(st_gdata->kim_data);
st_gdata->tty = NULL;
/* Flush any pending characters in the driver and discipline. */
tty_ldisc_flush(tty);
@@ -900,7 +880,7 @@ static void st_tty_close(struct tty_struct *tty)
st_gdata->rx_skb = NULL;
spin_unlock_irqrestore(&st_gdata->lock, flags);
- pr_info("%s: done ", __func__);
+ pr_debug("%s: done ", __func__);
}
static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
@@ -908,11 +888,8 @@ static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
{
#ifdef VERBOSE
- long i;
- printk(KERN_ERR "incoming data...\n");
- for (i = 0; i < count; i++)
- printk(" %x", data[i]);
- printk(KERN_ERR "\n.. data end\n");
+ print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
+ 16, 1, data, count, 0);
#endif
/*
@@ -920,7 +897,7 @@ static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
* to KIM for validation
*/
st_recv(tty->disc_data, data, count);
- pr_info("done %s", __func__);
+ pr_debug("done %s", __func__);
}
/* wake-up function called in from the TTY layer
@@ -929,7 +906,7 @@ static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
static void st_tty_wakeup(struct tty_struct *tty)
{
struct st_data_s *st_gdata = tty->disc_data;
- pr_info("%s ", __func__);
+ pr_debug("%s ", __func__);
/* don't do an wakeup for now */
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
@@ -940,7 +917,7 @@ static void st_tty_wakeup(struct tty_struct *tty)
static void st_tty_flush_buffer(struct tty_struct *tty)
{
struct st_data_s *st_gdata = tty->disc_data;
- pr_info("%s ", __func__);
+ pr_debug("%s ", __func__);
kfree_skb(st_gdata->tx_skb);
st_gdata->tx_skb = NULL;
@@ -979,7 +956,7 @@ int st_core_init(struct st_data_s **core_data)
kfree(st_ldisc_ops);
return err;
}
- pr_info("registered n_shared line discipline");
+ pr_debug("registered n_shared line discipline");
st_gdata = kzalloc(sizeof(struct st_data_s), GFP_KERNEL);
if (!st_gdata) {
diff --git a/drivers/staging/ti-st/st_core.h b/drivers/staging/ti-st/st_core.h
index f271c88a808..8601320a679 100644
--- a/drivers/staging/ti-st/st_core.h
+++ b/drivers/staging/ti-st/st_core.h
@@ -36,58 +36,88 @@
#define ST_REG_PENDING 3
#define ST_WAITING_FOR_RESP 4
-/*
- * local data required for ST/KIM/ST-HCI-LL
+/**
+ * struct st_data_s - ST core internal structure
+ * @st_state: different states of ST like initializing, registration
+ * in progress, this is mainly used to return relevant err codes
+ * when protocol drivers are registering. It is also used to track
+ * the recv function, as in during fw download only HCI events
+ * can occur , where as during other times other events CH8, CH9
+ * can occur.
+ * @tty: tty provided by the TTY core for line disciplines.
+ * @ldisc_ops: the procedures that this line discipline registers with TTY.
+ * @tx_skb: If for some reason the tty's write returns lesser bytes written
+ * then to maintain the rest of data to be written on next instance.
+ * This needs to be protected, hence the lock inside wakeup func.
+ * @tx_state: if the data is being written onto the TTY and protocol driver
+ * wants to send more, queue up data and mark that there is
+ * more data to send.
+ * @list: the list of protocols registered, only MAX can exist, one protocol
+ * can register only once.
+ * @rx_state: states to be maintained inside st's tty receive
+ * @rx_count: count to be maintained inside st's tty receieve
+ * @rx_skb: the skb where all data for a protocol gets accumulated,
+ * since tty might not call receive when a complete event packet
+ * is received, the states, count and the skb needs to be maintained.
+ * @txq: the list of skbs which needs to be sent onto the TTY.
+ * @tx_waitq: if the chip is not in AWAKE state, the skbs needs to be queued
+ * up in here, PM(WAKEUP_IND) data needs to be sent and then the skbs
+ * from waitq can be moved onto the txq.
+ * Needs locking too.
+ * @lock: the lock to protect skbs, queues, and ST states.
+ * @protos_registered: count of the protocols registered, also when 0 the
+ * chip enable gpio can be toggled, and when it changes to 1 the fw
+ * needs to be downloaded to initialize chip side ST.
+ * @ll_state: the various PM states the chip can be, the states are notified
+ * to us, when the chip sends relevant PM packets(SLEEP_IND, WAKE_IND).
+ * @kim_data: reference to the parent encapsulating structure.
+ *
*/
struct st_data_s {
unsigned long st_state;
-/*
- * an instance of tty_struct & ldisc ops to move around
- */
struct tty_struct *tty;
struct tty_ldisc_ops *ldisc_ops;
-/*
- * the tx skb -
- * if the skb is already dequeued and the tty failed to write the same
- * maintain the skb to write in the next transaction
- */
struct sk_buff *tx_skb;
#define ST_TX_SENDING 1
#define ST_TX_WAKEUP 2
unsigned long tx_state;
-/*
- * list of protocol registered
- */
struct st_proto_s *list[ST_MAX];
-/*
- * lock
- */
unsigned long rx_state;
unsigned long rx_count;
struct sk_buff *rx_skb;
struct sk_buff_head txq, tx_waitq;
- spinlock_t lock; /* ST LL state lock */
+ spinlock_t lock;
unsigned char protos_registered;
- unsigned long ll_state; /* ST LL power state */
+ unsigned long ll_state;
+ void *kim_data;
};
-/* point this to tty->driver->write or tty->ops->write
+/**
+ * st_int_write -
+ * point this to tty->driver->write or tty->ops->write
* depending upon the kernel version
*/
int st_int_write(struct st_data_s*, const unsigned char*, int);
-/* internal write function, passed onto protocol drivers
+
+/**
+ * st_write -
+ * internal write function, passed onto protocol drivers
* via the write function ptr of protocol struct
*/
long st_write(struct sk_buff *);
-/* function to be called from ST-LL
- */
+
+/* function to be called from ST-LL */
void st_ll_send_frame(enum proto_type, struct sk_buff *);
+
/* internal wake up function */
void st_tx_wakeup(struct st_data_s *st_data);
+/* init, exit entry funcs called from KIM */
int st_core_init(struct st_data_s **);
void st_core_exit(struct st_data_s *);
-void st_kim_ref(struct st_data_s **);
+
+/* ask for reference from KIM */
+void st_kim_ref(struct st_data_s **, int);
#define GPS_STUB_TEST
#ifdef GPS_STUB_TEST
diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/staging/ti-st/st_kim.c
index 98cbabba384..9e99463f76e 100644
--- a/drivers/staging/ti-st/st_kim.c
+++ b/drivers/staging/ti-st/st_kim.c
@@ -26,6 +26,8 @@
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/gpio.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <linux/sched.h>
@@ -55,37 +57,10 @@ static struct platform_driver kim_platform_driver = {
},
};
-#ifndef LEGACY_RFKILL_SUPPORT
-static ssize_t show_pid(struct device *dev, struct device_attribute
- *attr, char *buf);
-static ssize_t store_pid(struct device *dev, struct device_attribute
- *devattr, char *buf, size_t count);
-static ssize_t show_list(struct device *dev, struct device_attribute
- *attr, char *buf);
-
-/* structures specific for sysfs entries */
-static struct kobj_attribute pid_attr =
-__ATTR(pid, 0644, (void *)show_pid, (void *)store_pid);
-
-static struct kobj_attribute list_protocols =
-__ATTR(protocols, 0444, (void *)show_list, NULL);
-
-static struct attribute *uim_attrs[] = {
- &pid_attr.attr,
- /* add more debug sysfs entries */
- &list_protocols.attr,
- NULL,
-};
-
-static struct attribute_group uim_attr_grp = {
- .attrs = uim_attrs,
-};
-#else
static int kim_toggle_radio(void*, bool);
static const struct rfkill_ops kim_rfkill_ops = {
.set_block = kim_toggle_radio,
};
-#endif /* LEGACY_RFKILL_SUPPORT */
/* strings to be used for rfkill entries and by
* ST Core to be used for sysfs debug entry
@@ -97,18 +72,34 @@ const unsigned char *protocol_names[] = {
PROTO_ENTRY(ST_GPS, "GPS"),
};
-struct kim_data_s *kim_gdata;
+#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
+struct platform_device *st_kim_devices[MAX_ST_DEVICES];
/**********************************************************************/
/* internal functions */
-/*
- * function to return whether the firmware response was proper
- * in case of error don't complete so that waiting for proper
- * response times out
+/**
+ * st_get_plat_device -
+ * function which returns the reference to the platform device
+ * requested by id. As of now only 1 such device exists (id=0)
+ * the context requesting for reference can get the id to be
+ * requested by a. The protocol driver which is registering or
+ * b. the tty device which is opened.
+ */
+static struct platform_device *st_get_plat_device(int id)
+{
+ return st_kim_devices[id];
+}
+
+/**
+ * validate_firmware_response -
+ * function to return whether the firmware response was proper
+ * in case of error don't complete so that waiting for proper
+ * response times out
*/
-void validate_firmware_response(struct sk_buff *skb)
+void validate_firmware_response(struct kim_data_s *kim_gdata)
{
+ struct sk_buff *skb = kim_gdata->rx_skb;
if (unlikely(skb->data[5] != 0)) {
pr_err("no proper response during fw download");
pr_err("data6 %x", skb->data[5]);
@@ -122,14 +113,14 @@ void validate_firmware_response(struct sk_buff *skb)
/* check for data len received inside kim_int_recv
* most often hit the last case to update state to waiting for data
*/
-static inline int kim_check_data_len(int len)
+static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
{
register int room = skb_tailroom(kim_gdata->rx_skb);
- pr_info("len %d room %d", len, room);
+ pr_debug("len %d room %d", len, room);
if (!len) {
- validate_firmware_response(kim_gdata->rx_skb);
+ validate_firmware_response(kim_gdata);
} else if (len > room) {
/* Received packet's payload length is larger.
* We can't accommodate it in created skb.
@@ -155,18 +146,20 @@ static inline int kim_check_data_len(int len)
return 0;
}
-/* receive function called during firmware download
- * - firmware download responses on different UART drivers
- * have been observed to come in bursts of different
- * tty_receive and hence the logic
+/**
+ * kim_int_recv - receive function called during firmware download
+ * firmware download responses on different UART drivers
+ * have been observed to come in bursts of different
+ * tty_receive and hence the logic
*/
-void kim_int_recv(const unsigned char *data, long count)
+void kim_int_recv(struct kim_data_s *kim_gdata,
+ const unsigned char *data, long count)
{
register char *ptr;
struct hci_event_hdr *eh;
register int len = 0, type = 0;
- pr_info("%s", __func__);
+ pr_debug("%s", __func__);
/* Decode received bytes here */
ptr = (char *)data;
if (unlikely(ptr == NULL)) {
@@ -188,8 +181,8 @@ void kim_int_recv(const unsigned char *data, long count)
switch (kim_gdata->rx_state) {
/* Waiting for complete packet ? */
case ST_BT_W4_DATA:
- pr_info("Complete pkt received");
- validate_firmware_response(kim_gdata->rx_skb);
+ pr_debug("Complete pkt received");
+ validate_firmware_response(kim_gdata);
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_skb = NULL;
continue;
@@ -197,9 +190,9 @@ void kim_int_recv(const unsigned char *data, long count)
case ST_BT_W4_EVENT_HDR:
eh = (struct hci_event_hdr *)kim_gdata->
rx_skb->data;
- pr_info("Event header: evt 0x%2.2x"
+ pr_debug("Event header: evt 0x%2.2x"
"plen %d", eh->evt, eh->plen);
- kim_check_data_len(eh->plen);
+ kim_check_data_len(kim_gdata, eh->plen);
continue;
} /* end of switch */
} /* end of if rx_state */
@@ -216,7 +209,7 @@ void kim_int_recv(const unsigned char *data, long count)
ptr++;
count--;
continue;
- } /* end of switch *ptr */
+ }
ptr++;
count--;
kim_gdata->rx_skb =
@@ -226,34 +219,35 @@ void kim_int_recv(const unsigned char *data, long count)
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_count = 0;
return;
- } /* not necessary in this case */
+ }
bt_cb(kim_gdata->rx_skb)->pkt_type = type;
- } /* end of while count */
+ }
pr_info("done %s", __func__);
return;
}
-static long read_local_version(char *bts_scr_name)
+static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
{
unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0;
char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 };
- pr_info("%s", __func__);
+ pr_debug("%s", __func__);
INIT_COMPLETION(kim_gdata->kim_rcvd);
if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) {
pr_err("kim: couldn't write 4 bytes");
- return ST_ERR_FAILURE;
+ return -1;
}
if (!wait_for_completion_timeout
(&kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME))) {
pr_err(" waiting for ver info- timed out ");
- return ST_ERR_FAILURE;
+ return -1;
}
version =
- MAKEWORD(kim_gdata->resp_buffer[13], kim_gdata->resp_buffer[14]);
+ MAKEWORD(kim_gdata->resp_buffer[13],
+ kim_gdata->resp_buffer[14]);
chip = (version & 0x7C00) >> 10;
min_ver = (version & 0x007F);
maj_ver = (version & 0x0380) >> 7;
@@ -262,25 +256,32 @@ static long read_local_version(char *bts_scr_name)
maj_ver |= 0x0008;
sprintf(bts_scr_name, "TIInit_%d.%d.%d.bts", chip, maj_ver, min_ver);
+
+ /* to be accessed later via sysfs entry */
+ kim_gdata->version.full = version;
+ kim_gdata->version.chip = chip;
+ kim_gdata->version.maj_ver = maj_ver;
+ kim_gdata->version.min_ver = min_ver;
+
pr_info("%s", bts_scr_name);
- return ST_SUCCESS;
+ return 0;
}
-/* internal function which parses through the .bts firmware script file
- * intreprets SEND, DELAY actions only as of now
+/**
+ * download_firmware -
+ * internal function which parses through the .bts firmware
+ * script file intreprets SEND, DELAY actions only as of now
*/
-static long download_firmware(void)
+static long download_firmware(struct kim_data_s *kim_gdata)
{
- long err = ST_SUCCESS;
+ long err = 0;
long len = 0;
register unsigned char *ptr = NULL;
register unsigned char *action_ptr = NULL;
unsigned char bts_scr_name[30] = { 0 }; /* 30 char long bts scr name? */
- pr_info("%s", __func__);
-
- err = read_local_version(bts_scr_name);
- if (err != ST_SUCCESS) {
+ err = read_local_version(kim_gdata, bts_scr_name);
+ if (err != 0) {
pr_err("kim: failed to read local ver");
return err;
}
@@ -291,7 +292,7 @@ static long download_firmware(void)
(kim_gdata->fw_entry->size == 0))) {
pr_err(" request_firmware failed(errno %ld) for %s", err,
bts_scr_name);
- return ST_ERR_FAILURE;
+ return -1;
}
ptr = (void *)kim_gdata->fw_entry->data;
len = kim_gdata->fw_entry->size;
@@ -302,7 +303,7 @@ static long download_firmware(void)
len -= sizeof(struct bts_header);
while (len > 0 && ptr) {
- pr_info(" action size %d, type %d ",
+ pr_debug(" action size %d, type %d ",
((struct bts_action *)ptr)->size,
((struct bts_action *)ptr)->type);
@@ -315,8 +316,8 @@ static long download_firmware(void)
/* ignore remote change
* baud rate HCI VS command */
pr_err
- (" change remote baud\
- rate command in firmware");
+ (" change remote baud"
+ " rate command in firmware");
break;
}
@@ -326,7 +327,7 @@ static long download_firmware(void)
((struct bts_action *)ptr)->size);
if (unlikely(err < 0)) {
release_firmware(kim_gdata->fw_entry);
- return ST_ERR_FAILURE;
+ return -1;
}
if (!wait_for_completion_timeout
(&kim_gdata->kim_rcvd,
@@ -335,7 +336,7 @@ static long download_firmware(void)
(" response timeout during fw download ");
/* timed out */
release_firmware(kim_gdata->fw_entry);
- return ST_ERR_FAILURE;
+ return -1;
}
break;
case ACTION_DELAY: /* sleep */
@@ -353,19 +354,23 @@ static long download_firmware(void)
}
/* fw download complete */
release_firmware(kim_gdata->fw_entry);
- return ST_SUCCESS;
+ return 0;
}
/**********************************************************************/
/* functions called from ST core */
-
/* function to toggle the GPIO
* needs to know whether the GPIO is active high or active low
*/
void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state)
{
+ struct platform_device *kim_pdev;
+ struct kim_data_s *kim_gdata;
pr_info(" %s ", __func__);
+ kim_pdev = st_get_plat_device(0);
+ kim_gdata = dev_get_drvdata(&kim_pdev->dev);
+
if (kim_gdata->gpios[type] == -1) {
pr_info(" gpio not requested for protocol %s",
protocol_names[type]);
@@ -405,6 +410,9 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state)
*/
void st_kim_recv(void *disc_data, const unsigned char *data, long count)
{
+ struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
+ struct kim_data_s *kim_gdata = st_gdata->kim_data;
+
pr_info(" %s ", __func__);
/* copy to local buffer */
if (unlikely(data[4] == 0x01 && data[5] == 0x10 && data[0] == 0x04)) {
@@ -413,7 +421,7 @@ void st_kim_recv(void *disc_data, const unsigned char *data, long count)
complete_all(&kim_gdata->kim_rcvd);
return;
} else {
- kim_int_recv(data, count);
+ kim_int_recv(kim_gdata, data, count);
/* either completes or times out */
}
return;
@@ -422,27 +430,33 @@ void st_kim_recv(void *disc_data, const unsigned char *data, long count)
/* to signal completion of line discipline installation
* called from ST Core, upon tty_open
*/
-void st_kim_complete(void)
+void st_kim_complete(void *kim_data)
{
+ struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
complete(&kim_gdata->ldisc_installed);
}
-/* called from ST Core upon 1st registration
-*/
-long st_kim_start(void)
+/**
+ * st_kim_start - called from ST Core upon 1st registration
+ * This involves toggling the chip enable gpio, reading
+ * the firmware version from chip, forming the fw file name
+ * based on the chip version, requesting the fw, parsing it
+ * and perform download(send/recv).
+ */
+long st_kim_start(void *kim_data)
{
- long err = ST_SUCCESS;
+ long err = 0;
long retry = POR_RETRY_COUNT;
+ struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
+
pr_info(" %s", __func__);
do {
-#ifdef LEGACY_RFKILL_SUPPORT
/* TODO: this is only because rfkill sub-system
* doesn't send events to user-space if the state
* isn't changed
*/
rfkill_set_hw_state(kim_gdata->rfkill[ST_BT], 1);
-#endif
/* Configure BT nShutdown to HIGH state */
gpio_set_value(kim_gdata->gpios[ST_BT], GPIO_LOW);
mdelay(5); /* FIXME: a proper toggle */
@@ -450,30 +464,29 @@ long st_kim_start(void)
mdelay(100);
/* re-initialize the completion */
INIT_COMPLETION(kim_gdata->ldisc_installed);
-#ifndef LEGACY_RFKILL_SUPPORT
+#if 0 /* older way of signalling user-space UIM */
/* send signal to UIM */
err = kill_pid(find_get_pid(kim_gdata->uim_pid), SIGUSR2, 0);
if (err != 0) {
pr_info(" sending SIGUSR2 to uim failed %ld", err);
- err = ST_ERR_FAILURE;
+ err = -1;
continue;
}
-#else
+#endif
/* unblock and send event to UIM via /dev/rfkill */
rfkill_set_hw_state(kim_gdata->rfkill[ST_BT], 0);
-#endif
/* wait for ldisc to be installed */
err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
msecs_to_jiffies(LDISC_TIME));
if (!err) { /* timeout */
pr_err("line disc installation timed out ");
- err = ST_ERR_FAILURE;
+ err = -1;
continue;
} else {
/* ldisc installed now */
pr_info(" line discipline installed ");
- err = download_firmware();
- if (err != ST_SUCCESS) {
+ err = download_firmware(kim_gdata);
+ if (err != 0) {
pr_err("download firmware failed");
continue;
} else { /* on success don't retry */
@@ -484,31 +497,33 @@ long st_kim_start(void)
return err;
}
-/* called from ST Core, on the last un-registration
-*/
-long st_kim_stop(void)
+/**
+ * st_kim_stop - called from ST Core, on the last un-registration
+ * toggle low the chip enable gpio
+ */
+long st_kim_stop(void *kim_data)
{
- long err = ST_SUCCESS;
+ long err = 0;
+ struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
INIT_COMPLETION(kim_gdata->ldisc_installed);
-#ifndef LEGACY_RFKILL_SUPPORT
+#if 0 /* older way of signalling user-space UIM */
/* send signal to UIM */
err = kill_pid(find_get_pid(kim_gdata->uim_pid), SIGUSR2, 1);
if (err != 0) {
pr_err("sending SIGUSR2 to uim failed %ld", err);
- return ST_ERR_FAILURE;
+ return -1;
}
-#else
+#endif
/* set BT rfkill to be blocked */
err = rfkill_set_hw_state(kim_gdata->rfkill[ST_BT], 1);
-#endif
/* wait for ldisc to be un-installed */
err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
msecs_to_jiffies(LDISC_TIME));
if (!err) { /* timeout */
pr_err(" timed out waiting for ldisc to be un-installed");
- return ST_ERR_FAILURE;
+ return -1;
}
/* By default configure BT nShutdown to LOW state */
@@ -522,37 +537,24 @@ long st_kim_stop(void)
/**********************************************************************/
/* functions called from subsystems */
+/* called when debugfs entry is read from */
-#ifndef LEGACY_RFKILL_SUPPORT
-/* called when sysfs entry is written to */
-static ssize_t store_pid(struct device *dev, struct device_attribute
- *devattr, char *buf, size_t count)
-{
- pr_info("%s: pid %s ", __func__, buf);
- sscanf(buf, "%ld", &kim_gdata->uim_pid);
- /* to be made use by kim_start to signal SIGUSR2
- */
- return strlen(buf);
-}
-
-/* called when sysfs entry is read from */
-static ssize_t show_pid(struct device *dev, struct device_attribute
- *attr, char *buf)
+static int show_version(struct seq_file *s, void *unused)
{
- sprintf(buf, "%ld", kim_gdata->uim_pid);
- return strlen(buf);
+ struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private;
+ seq_printf(s, "%04X %d.%d.%d\n", kim_gdata->version.full,
+ kim_gdata->version.chip, kim_gdata->version.maj_ver,
+ kim_gdata->version.min_ver);
+ return 0;
}
-/* called when sysfs entry is read from */
-static ssize_t show_list(struct device *dev, struct device_attribute
- *attr, char *buf)
+static int show_list(struct seq_file *s, void *unused)
{
- kim_st_list_protocols(kim_gdata->core_data, buf);
- return strlen(buf);
+ struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private;
+ kim_st_list_protocols(kim_gdata->core_data, s);
+ return 0;
}
-#else /* LEGACY_RFKILL_SUPPORT */
-
/* function called from rfkill subsystem, when someone from
* user space would write 0/1 on the sysfs entry
* /sys/class/rfkill/rfkill0,1,3/state
@@ -560,7 +562,7 @@ static ssize_t show_list(struct device *dev, struct device_attribute
static int kim_toggle_radio(void *data, bool blocked)
{
enum proto_type type = *((enum proto_type *)data);
- pr_info(" %s: %d ", __func__, type);
+ pr_debug(" %s: %d ", __func__, type);
switch (type) {
case ST_BT:
@@ -577,33 +579,80 @@ static int kim_toggle_radio(void *data, bool blocked)
pr_err(" wrong proto type ");
break;
}
- return ST_SUCCESS;
+ return 0;
}
-#endif /* LEGACY_RFKILL_SUPPORT */
-
-void st_kim_ref(struct st_data_s **core_data)
+/**
+ * st_kim_ref - reference the core's data
+ * This references the per-ST platform device in the arch/xx/
+ * board-xx.c file.
+ * This would enable multiple such platform devices to exist
+ * on a given platform
+ */
+void st_kim_ref(struct st_data_s **core_data, int id)
{
+ struct platform_device *pdev;
+ struct kim_data_s *kim_gdata;
+ /* get kim_gdata reference from platform device */
+ pdev = st_get_plat_device(id);
+ kim_gdata = dev_get_drvdata(&pdev->dev);
*core_data = kim_gdata->core_data;
}
+static int kim_version_open(struct inode *i, struct file *f)
+{
+ return single_open(f, show_version, i->i_private);
+}
+
+static int kim_list_open(struct inode *i, struct file *f)
+{
+ return single_open(f, show_list, i->i_private);
+}
+
+static const struct file_operations version_debugfs_fops = {
+ /* version info */
+ .open = kim_version_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+static const struct file_operations list_debugfs_fops = {
+ /* protocols info */
+ .open = kim_list_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/**********************************************************************/
/* functions called from platform device driver subsystem
* need to have a relevant platform device entry in the platform's
* board-*.c file
*/
+struct dentry *kim_debugfs_dir;
static int kim_probe(struct platform_device *pdev)
{
long status;
long proto;
long *gpios = pdev->dev.platform_data;
+ struct kim_data_s *kim_gdata;
+
+ st_kim_devices[pdev->id] = pdev;
+ kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
+ if (!kim_gdata) {
+ pr_err("no mem to allocate");
+ return -ENOMEM;
+ }
+ dev_set_drvdata(&pdev->dev, kim_gdata);
status = st_core_init(&kim_gdata->core_data);
if (status != 0) {
pr_err(" ST core init failed");
- return ST_ERR_FAILURE;
+ return -1;
}
+ /* refer to itself */
+ kim_gdata->core_data->kim_data = kim_gdata;
for (proto = 0; proto < ST_MAX; proto++) {
kim_gdata->gpios[proto] = gpios[proto];
@@ -639,30 +688,12 @@ static int kim_probe(struct platform_device *pdev)
return status;
}
}
-#ifndef LEGACY_RFKILL_SUPPORT
- /* pdev to contain BT, FM and GPS enable/N-Shutdown GPIOs
- * execute request_gpio, set output direction
- */
- kim_gdata->kim_kobj = kobject_create_and_add("uim", NULL);
- /* create the sysfs entry for UIM to put in pid */
- if (sysfs_create_group(kim_gdata->kim_kobj, &uim_attr_grp)) {
- pr_err(" sysfs entry creation failed");
- kobject_put(kim_gdata->kim_kobj);
- /* free requested GPIOs and fail probe */
- for (proto = ST_BT; proto < ST_MAX; proto++) {
- if (gpios[proto] != -1)
- gpio_free(gpios[proto]);
- }
- return -1; /* fail insmod */
- }
- pr_info(" sysfs entry created ");
-#endif
/* get reference of pdev for request_firmware
*/
kim_gdata->kim_pdev = pdev;
init_completion(&kim_gdata->kim_rcvd);
init_completion(&kim_gdata->ldisc_installed);
-#ifdef LEGACY_RFKILL_SUPPORT
+
for (proto = 0; (proto < ST_MAX) && (gpios[proto] != -1); proto++) {
/* TODO: should all types be rfkill_type_bt ? */
kim_gdata->rf_protos[proto] = proto;
@@ -685,8 +716,20 @@ static int kim_probe(struct platform_device *pdev)
}
pr_info("rfkill entry created for %ld", gpios[proto]);
}
-#endif
- return ST_SUCCESS;
+
+ kim_debugfs_dir = debugfs_create_dir("ti-st", NULL);
+ if (IS_ERR(kim_debugfs_dir)) {
+ pr_err(" debugfs entries creation failed ");
+ kim_debugfs_dir = NULL;
+ return -1;
+ }
+
+ debugfs_create_file("version", S_IRUGO, kim_debugfs_dir,
+ kim_gdata, &version_debugfs_fops);
+ debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir,
+ kim_gdata, &list_debugfs_fops);
+ pr_info(" debugfs entries created ");
+ return 0;
}
static int kim_remove(struct platform_device *pdev)
@@ -695,27 +738,27 @@ static int kim_remove(struct platform_device *pdev)
*/
long *gpios = pdev->dev.platform_data;
long proto;
+ struct kim_data_s *kim_gdata;
+
+ kim_gdata = dev_get_drvdata(&pdev->dev);
for (proto = 0; (proto < ST_MAX) && (gpios[proto] != -1); proto++) {
/* Claim the Bluetooth/FM/GPIO
* nShutdown gpio from the system
*/
gpio_free(gpios[proto]);
-#ifdef LEGACY_RFKILL_SUPPORT
rfkill_unregister(kim_gdata->rfkill[proto]);
rfkill_destroy(kim_gdata->rfkill[proto]);
kim_gdata->rfkill[proto] = NULL;
-#endif
}
pr_info("kim: GPIO Freed");
-#ifndef LEGACY_RFKILL_SUPPORT
- /* delete the sysfs entries */
- sysfs_remove_group(kim_gdata->kim_kobj, &uim_attr_grp);
- kobject_put(kim_gdata->kim_kobj);
-#endif
+ debugfs_remove_recursive(kim_debugfs_dir);
kim_gdata->kim_pdev = NULL;
st_core_exit(kim_gdata->core_data);
- return ST_SUCCESS;
+
+ kfree(kim_gdata);
+ kim_gdata = NULL;
+ return 0;
}
/**********************************************************************/
@@ -723,27 +766,19 @@ static int kim_remove(struct platform_device *pdev)
static int __init st_kim_init(void)
{
- long ret = ST_SUCCESS;
- kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
- if (!kim_gdata) {
- pr_err("no mem to allocate");
- return -ENOMEM;
- }
-
+ long ret = 0;
ret = platform_driver_register(&kim_platform_driver);
if (ret != 0) {
pr_err("platform drv registration failed");
- return ST_ERR_FAILURE;
+ return -1;
}
- return ST_SUCCESS;
+ return 0;
}
static void __exit st_kim_deinit(void)
{
/* the following returns void */
platform_driver_unregister(&kim_platform_driver);
- kfree(kim_gdata);
- kim_gdata = NULL;
}
diff --git a/drivers/staging/ti-st/st_kim.h b/drivers/staging/ti-st/st_kim.h
index ff3270ec784..7de2541f2de 100644
--- a/drivers/staging/ti-st/st_kim.h
+++ b/drivers/staging/ti-st/st_kim.h
@@ -43,50 +43,72 @@
* since the self-test for chip takes a while
*/
#define POR_RETRY_COUNT 5
-/*
- * legacy rfkill support where-in 3 rfkill
- * devices are created for the 3 gpios
- * that ST has requested
+
+/**
+ * struct chip_version - save the chip version
*/
-#define LEGACY_RFKILL_SUPPORT
-/*
- * header file for ST provided by KIM
+struct chip_version {
+ unsigned short full;
+ unsigned short chip;
+ unsigned short min_ver;
+ unsigned short maj_ver;
+};
+
+/**
+ * struct kim_data_s - the KIM internal data, embedded as the
+ * platform's drv data. One for each ST device in the system.
+ * @uim_pid: KIM needs to communicate with UIM to request to install
+ * the ldisc by opening UART when protocol drivers register.
+ * @kim_pdev: the platform device added in one of the board-XX.c file
+ * in arch/XX/ directory, 1 for each ST device.
+ * @kim_rcvd: completion handler to notify when data was received,
+ * mainly used during fw download, which involves multiple send/wait
+ * for each of the HCI-VS commands.
+ * @ldisc_installed: completion handler to notify that the UIM accepted
+ * the request to install ldisc, notify from tty_open which suggests
+ * the ldisc was properly installed.
+ * @resp_buffer: data buffer for the .bts fw file name.
+ * @fw_entry: firmware class struct to request/release the fw.
+ * @gpios: the list of core/chip enable gpios for BT, FM and GPS cores.
+ * @rx_state: the rx state for kim's receive func during fw download.
+ * @rx_count: the rx count for the kim's receive func during fw download.
+ * @rx_skb: all of fw data might not come at once, and hence data storage for
+ * whole of the fw response, only HCI_EVENTs and hence diff from ST's
+ * response.
+ * @rfkill: rfkill data for each of the cores to be registered with rfkill.
+ * @rf_protos: proto types of the data registered with rfkill sub-system.
+ * @core_data: ST core's data, which mainly is the tty's disc_data
+ * @version: chip version available via a sysfs entry.
+ *
*/
struct kim_data_s {
long uim_pid;
struct platform_device *kim_pdev;
struct completion kim_rcvd, ldisc_installed;
- /* MAX len of the .bts firmware script name */
char resp_buffer[30];
const struct firmware *fw_entry;
long gpios[ST_MAX];
- struct kobject *kim_kobj;
-/* used by kim_int_recv to validate fw response */
unsigned long rx_state;
unsigned long rx_count;
struct sk_buff *rx_skb;
-#ifdef LEGACY_RFKILL_SUPPORT
struct rfkill *rfkill[ST_MAX];
enum proto_type rf_protos[ST_MAX];
-#endif
struct st_data_s *core_data;
+ struct chip_version version;
};
-long st_kim_start(void);
-long st_kim_stop(void);
-/*
- * called from st_tty_receive to authenticate fw_download
+/**
+ * functions called when 1 of the protocol drivers gets
+ * registered, these need to communicate with UIM to request
+ * ldisc installed, read chip_version, download relevant fw
*/
-void st_kim_recv(void *, const unsigned char *, long count);
+long st_kim_start(void *);
+long st_kim_stop(void *);
+void st_kim_recv(void *, const unsigned char *, long count);
void st_kim_chip_toggle(enum proto_type, enum kim_gpio_state);
-
-void st_kim_complete(void);
-
-/* function called from ST KIM to ST Core, to
- * list out the protocols registered
- */
-void kim_st_list_protocols(struct st_data_s *, char *);
+void st_kim_complete(void *);
+void kim_st_list_protocols(struct st_data_s *, void *);
/*
* BTS headers
@@ -98,9 +120,13 @@ void kim_st_list_protocols(struct st_data_s *, char *);
#define ACTION_RUN_SCRIPT 5
#define ACTION_REMARKS 6
-/*
- * * BRF Firmware header
- * */
+/**
+ * struct bts_header - the fw file is NOT binary which can
+ * be sent onto TTY as is. The .bts is more a script
+ * file which has different types of actions.
+ * Each such action needs to be parsed by the KIM and
+ * relevant procedure to be called.
+ */
struct bts_header {
uint32_t magic;
uint32_t version;
@@ -108,9 +134,10 @@ struct bts_header {
uint8_t actions[0];
} __attribute__ ((packed));
-/*
- * * BRF Actions structure
- * */
+/**
+ * struct bts_action - Each .bts action has its own type of
+ * data.
+ */
struct bts_action {
uint16_t type;
uint16_t size;
@@ -136,8 +163,11 @@ struct bts_action_serial {
uint32_t flow_control;
} __attribute__ ((packed));
-/* for identifying the change speed HCI VS
- * command
+/**
+ * struct hci_command - the HCI-VS for intrepreting
+ * the change baud rate of host-side UART, which
+ * needs to be ignored, since UIM would do that
+ * when it receives request from KIM for ldisc installation.
*/
struct hci_command {
uint8_t prefix;
diff --git a/drivers/staging/ti-st/st_ll.c b/drivers/staging/ti-st/st_ll.c
index 0685a100db6..7a1fb6de830 100644
--- a/drivers/staging/ti-st/st_ll.c
+++ b/drivers/staging/ti-st/st_ll.c
@@ -34,7 +34,7 @@ static void send_ll_cmd(struct st_data_s *st_data,
static void ll_device_want_to_sleep(struct st_data_s *st_data)
{
- pr_info("%s", __func__);
+ pr_debug("%s", __func__);
/* sanity check */
if (st_data->ll_state != ST_LL_AWAKE)
pr_err("ERR hcill: ST_LL_GO_TO_SLEEP_IND"
@@ -101,7 +101,7 @@ void st_ll_wakeup(struct st_data_s *ll)
/* called when ST Core wants the state */
unsigned long st_ll_getstate(struct st_data_s *ll)
{
- pr_info(" returning state %ld", ll->ll_state);
+ pr_debug(" returning state %ld", ll->ll_state);
return ll->ll_state;
}
@@ -127,9 +127,9 @@ unsigned long st_ll_sleep_state(struct st_data_s *st_data,
break;
default:
pr_err(" unknown input/state ");
- return ST_ERR_FAILURE;
+ return -1;
}
- return ST_SUCCESS;
+ return 0;
}
/* Called from ST CORE to initialize ST LL */
diff --git a/drivers/staging/ti-st/st_ll.h b/drivers/staging/ti-st/st_ll.h
index 77dfbf07e7b..e4dfacd83d9 100644
--- a/drivers/staging/ti-st/st_ll.h
+++ b/drivers/staging/ti-st/st_ll.h
@@ -41,6 +41,7 @@
#define ST_LL_AWAKE_TO_ASLEEP 3
#define ST_LL_INVALID 4
+/* different PM notifications coming from chip */
#define LL_SLEEP_IND 0x30
#define LL_SLEEP_ACK 0x31
#define LL_WAKE_UP_IND 0x32
@@ -50,13 +51,19 @@
long st_ll_init(struct st_data_s *);
long st_ll_deinit(struct st_data_s *);
-/* enable/disable ST LL along with KIM start/stop
+/**
+ * enable/disable ST LL along with KIM start/stop
* called by ST Core
*/
void st_ll_enable(struct st_data_s *);
void st_ll_disable(struct st_data_s *);
+/**
+ * various funcs used by ST core to set/get the various PM states
+ * of the chip.
+ */
unsigned long st_ll_getstate(struct st_data_s *);
unsigned long st_ll_sleep_state(struct st_data_s *, unsigned char);
void st_ll_wakeup(struct st_data_s *);
+
#endif /* ST_LL_H */
diff --git a/drivers/staging/ti-st/sysfs-uim b/drivers/staging/ti-st/sysfs-uim
index 10311afcbd4..626bda51ee8 100644
--- a/drivers/staging/ti-st/sysfs-uim
+++ b/drivers/staging/ti-st/sysfs-uim
@@ -14,3 +14,15 @@ Description:
uninstallation would be ppolling on this device and listening
on events which would suggest either to install or un-install
line discipline
+
+What: /sys/kernel/debug/ti-st/version
+Contact: Pavan Savoy <pavan_savoy@ti.com>
+Description:
+ WiLink chip's ROM version exposed to user-space for some
+ proprietary protocol stacks to make use of.
+
+What: /sys/kernel/debug/ti-st/protocols
+Contact: Pavan Savoy <pavan_savoy@ti.com>
+Description:
+ The reason for chip being ON, the list of protocols registered.
+
diff --git a/drivers/staging/tidspbridge/Documentation/CONTRIBUTORS b/drivers/staging/tidspbridge/Documentation/CONTRIBUTORS
new file mode 100644
index 00000000000..86f578727f9
--- /dev/null
+++ b/drivers/staging/tidspbridge/Documentation/CONTRIBUTORS
@@ -0,0 +1,45 @@
+TI DSP/Bridge Driver - Contributors File
+
+The DSP/Bridge project wish to thank all of its contributors, current bridge
+driver is the result of the work of all of them. If any name is accidentally
+omitted, let us know by sending a mail to omar.ramirez@ti.com or
+x095840@ti.com.
+
+Please keep the following list in alphabetical order.
+
+ Suman Anna
+ Sripal Bagadia
+ Felipe Balbi
+ Ohad Ben-Cohen
+ Phil Carmody
+ Deepak Chitriki
+ Felipe Contreras
+ Hiroshi Doyu
+ Seth Forshee
+ Ivan Gomez Castellanos
+ Mark Grosen
+ Ramesh Gupta G
+ Fernando Guzman Lugo
+ Axel Haslam
+ Janet Head
+ Shivananda Hebbar
+ Hari Kanigeri
+ Tony Lindgren
+ Antonio Luna
+ Hari Nagalla
+ Nishanth Menon
+ Ameya Palande
+ Vijay Pasam
+ Gilbert Pitney
+ Omar Ramirez Luna
+ Ernesto Ramos
+ Chris Ring
+ Larry Schiefer
+ Rebecca Schultz Zavin
+ Bhavin Shah
+ Andy Shevchenko
+ Jeff Taylor
+ Roman Tereshonkov
+ Armando Uribe de Leon
+ Nischal Varide
+ Wenbiao Wang
diff --git a/drivers/staging/tidspbridge/Documentation/README b/drivers/staging/tidspbridge/Documentation/README
new file mode 100644
index 00000000000..df6d371161e
--- /dev/null
+++ b/drivers/staging/tidspbridge/Documentation/README
@@ -0,0 +1,70 @@
+ Linux DSP/BIOS Bridge release
+
+DSP/BIOS Bridge overview
+========================
+
+DSP/BIOS Bridge is designed for platforms that contain a GPP and one or more
+attached DSPs. The GPP is considered the master or "host" processor, and the
+attached DSPs are processing resources that can be utilized by applications
+and drivers running on the GPP.
+
+The abstraction that DSP/BIOS Bridge supplies, is a direct link between a GPP
+program and a DSP task. This communication link is partitioned into two
+types of sub-links: messaging (short, fixed-length packets) and data
+streaming (multiple, large buffers). Each sub-link operates independently,
+and features in-order delivery of data, meaning that messages are delivered
+in the order they were submitted to the message link, and stream buffers are
+delivered in the order they were submitted to the stream link.
+
+In addition, a GPP client can specify what inputs and outputs a DSP task
+uses. DSP tasks typically use message objects for passing control and status
+information and stream objects for efficient streaming of real-time data.
+
+GPP Software Architecture
+=========================
+
+A GPP application communicates with its associated DSP task running on the
+DSP subsystem using the DSP/BIOS Bridge API. For example, a GPP audio
+application can use the API to pass messages to a DSP task that is managing
+data flowing from analog-to-digital converters (ADCs) to digital-to-analog
+converters (DACs).
+
+From the perspective of the GPP OS, the DSP is treated as just another
+peripheral device. Most high level GPP OS typically support a device driver
+model, whereby applications can safely access and share a hardware peripheral
+through standard driver interfaces. Therefore, to allow multiple GPP
+applications to share access to the DSP, the GPP side of DSP/BIOS Bridge
+implements a device driver for the DSP.
+
+Since driver interfaces are not always standard across GPP OS, and to provide
+some level of interoperability of application code using DSP/BIOS Bridge
+between GPP OS, DSP/BIOS Bridge provides a standard library of APIs which
+wrap calls into the device driver. So, rather than calling GPP OS specific
+driver interfaces, applications (and even other device drivers) can use the
+standard API library directly.
+
+DSP Software Architecture
+=========================
+
+For DSP/BIOS, DSP/BIOS Bridge adds a device-independent streaming I/O (STRM)
+interface, a messaging interface (NODE), and a Resource Manager (RM) Server.
+The RM Server runs as a task of DSP/BIOS and is subservient to commands
+and queries from the GPP. It executes commands to start and stop DSP signal
+processing nodes in response to GPP programs making requests through the
+(GPP-side) API.
+
+DSP tasks started by the RM Server are similar to any other DSP task with two
+important differences: they must follow a specific task model consisting of
+three C-callable functions (node create, execute, and delete), with specific
+sets of arguments, and they have a pre-defined task environment established
+by the RM Server.
+
+Tasks started by the RM Server communicate using the STRM and NODE interfaces
+and act as servers for their corresponding GPP clients, performing signal
+processing functions as requested by messages sent by their GPP client.
+Typically, a DSP task moves data from source devices to sink devices using
+device independent I/O streams, performing application-specific processing
+and transformations on the data while it is moved. For example, an audio
+task might perform audio decompression (ADPCM, MPEG, CELP) on data received
+from a GPP audio driver and then send the decompressed linear samples to a
+digital-to-analog converter.
diff --git a/drivers/staging/tidspbridge/Documentation/error-codes b/drivers/staging/tidspbridge/Documentation/error-codes
new file mode 100644
index 00000000000..12826e2a3aa
--- /dev/null
+++ b/drivers/staging/tidspbridge/Documentation/error-codes
@@ -0,0 +1,157 @@
+ DSP/Bridge Error Code Guide
+
+
+Success code is always taken as 0, except for one case where a success status
+different than 0 can be possible, this is when enumerating a series of dsp
+objects, if the enumeration doesn't have any more objects it is considered as a
+successful case. In this case a positive ENODATA is returned (TODO: Change to
+avoid this case).
+
+Error codes are returned as a negative 1, if an specific code is expected, it
+can be propagated to user space by reading errno symbol defined in errno.h, for
+specific details on the implementation a copy of the standard used should be
+read first.
+
+The error codes used by this driver are:
+
+[EPERM]
+ General driver failure.
+
+ According to the use case the following might apply:
+ - Device is in 'sleep/suspend' mode due to DPM.
+ - User cannot mark end of stream on an input channel.
+ - Requested operation is invalid for the node type.
+ - Invalid alignment for the node messaging buffer.
+ - The specified direction is invalid for the stream.
+ - Invalid stream mode.
+
+[ENOENT]
+ The specified object or file was not found.
+
+[ESRCH]
+ A shared memory buffer contained in a message or stream could not be mapped
+ to the GPP client process's virtual space.
+
+[EIO]
+ Driver interface I/O error.
+
+ or:
+ - Unable to plug channel ISR for configured IRQ.
+ - No free I/O request packets are available.
+
+[ENXIO]
+ Unable to find a named section in DSP executable or a non-existent memory
+ segment identifier was specified.
+
+[EBADF]
+ General error for file handling:
+
+ - Unable to open file.
+ - Unable to read file.
+ - An error occurred while parsing the DSP executable file.
+
+[ENOMEM]
+ A memory allocation failure occurred.
+
+[EACCES]
+ - Unable to read content of DCD data section; this is typically caused by
+ improperly configured nodes.
+ - Unable to decode DCD data section content; this is typically caused by
+ changes to DSP/BIOS Bridge data structures.
+ - Unable to get pointer to DCD data section; this is typically caused by
+ improperly configured UUIDs.
+ - Unable to load file containing DCD data section; this is typically
+ caused by a missing COFF file.
+ - The specified COFF file does not contain a valid node registration
+ section.
+
+[EFAULT]
+ Invalid pointer or handler.
+
+[EEXIST]
+ Attempted to create a channel manager when one already exists.
+
+[EINVAL]
+ Invalid argument.
+
+[ESPIPE]
+ Symbol not found in the COFF file. DSPNode_Create will return this if
+ the iAlg function table for an xDAIS socket is not found in the COFF file.
+ In this case, force the symbol to be linked into the COFF file.
+ DSPNode_Create, DSPNode_Execute, and DSPNode_Delete will return this if
+ the create, execute, or delete phase function, respectively, could not be
+ found in the COFF file.
+
+ - No symbol table is loaded/found for this board.
+ - Unable to initialize the ZL COFF parsing module.
+
+[EPIPE]
+ I/O is currently pending.
+
+ - End of stream was already requested on this output channel.
+
+[EDOM]
+ A parameter is specified outside its valid range.
+
+[ENOSYS]
+ The indicated operation is not supported.
+
+[EIDRM]
+ During enumeration a change in the number or properties of the objects
+ has occurred.
+
+[ECHRNG]
+ Attempt to created channel manager with too many channels or channel ID out
+ of range.
+
+[EBADR]
+ The state of the specified object is incorrect for the requested operation.
+
+ - Invalid segment ID.
+
+[ENODATA]
+ Unable to retrieve resource information from the registry.
+
+ - No more registry values.
+
+[ETIME]
+ A timeout occurred before the requested operation could complete.
+
+[ENOSR]
+ A stream has been issued the maximum number of buffers allowed in the
+ stream at once; buffers must be reclaimed from the stream before any more
+ can be issued.
+
+ - No free channels are available.
+
+[EILSEQ]
+ Error occurred in a dynamic loader library function.
+
+[EISCONN]
+ The Specified Connection already exists.
+
+[ENOTCONN]
+ Nodes not connected.
+
+[ETIMEDOUT]
+ Timeout occurred waiting for a response from the hardware.
+
+ - Wait for flush operation on an output channel timed out.
+
+[ECONNREFUSED]
+ No more connections can be made for this node.
+
+[EALREADY]
+ Channel is already in use.
+
+[EREMOTEIO]
+ dwTimeOut parameter was CHNL_IOCNOWAIT, yet no I/O completions were
+ queued.
+
+[ECANCELED]
+ I/O has been cancelled on this channel.
+
+[ENOKEY]
+ Invalid subkey parameter.
+
+ - UUID not found in registry.
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
new file mode 100644
index 00000000000..93de4f2e8bf
--- /dev/null
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -0,0 +1,90 @@
+#
+# DSP Bridge Driver Support
+#
+
+menuconfig TIDSPBRIDGE
+ tristate "DSP Bridge driver"
+ depends on ARCH_OMAP3
+ select OMAP_MBOX_FWK
+ help
+ DSP/BIOS Bridge is designed for platforms that contain a GPP and
+ one or more attached DSPs. The GPP is considered the master or
+ "host" processor, and the attached DSPs are processing resources
+ that can be utilized by applications and drivers running on the GPP.
+
+ This driver depends on OMAP Mailbox (OMAP_MBOX_FWK).
+
+config TIDSPBRIDGE_DVFS
+ bool "Enable Bridge Dynamic Voltage and Frequency Scaling (DVFS)"
+ depends on TIDSPBRIDGE && OMAP_PM_SRF && CPU_FREQ
+ help
+ DVFS allows DSP Bridge to initiate the operating point change to
+ scale the chip voltage and frequency in order to match the
+ performance and power consumption to the current processing
+ requirements.
+
+config TIDSPBRIDGE_MEMPOOL_SIZE
+ hex "Physical memory pool size (Byte)"
+ depends on TIDSPBRIDGE
+ default 0x600000
+ help
+ Allocate specified size of memory at booting time to avoid allocation
+ failure under heavy memory fragmentation after some use time.
+
+config TIDSPBRIDGE_DEBUG
+ bool "Debug Support"
+ depends on TIDSPBRIDGE
+ help
+ Say Y to enable Bridge debugging capabilities
+
+config TIDSPBRIDGE_RECOVERY
+ bool "Recovery Support"
+ depends on TIDSPBRIDGE
+ default y
+ help
+ In case of DSP fatal error, BRIDGE driver will try to
+ recover itself.
+
+config TIDSPBRIDGE_CACHE_LINE_CHECK
+ bool "Check buffers to be 128 byte aligned"
+ depends on TIDSPBRIDGE
+ help
+ When the DSP processes data, the DSP cache controller loads 128-Byte
+ chunks (lines) from SDRAM and writes the data back in 128-Byte chunks.
+ If a DMM buffer does not start and end on a 128-Byte boundary, the data
+ preceding the start address (SA) from the 128-Byte boundary to the SA
+ and the data at addresses trailing the end address (EA) from the EA to
+ the next 128-Byte boundary will be loaded and written back as well.
+ This can lead to heap corruption. Say Y, to enforce the check for 128
+ byte alignment, buffers failing this check will be rejected.
+
+config TIDSPBRIDGE_WDT3
+ bool "Enable watchdog timer"
+ depends on TIDSPBRIDGE
+ help
+ WTD3 is managed by DSP and once it is enabled, DSP side bridge is in
+ charge of refreshing the timer before overflow, if the DSP hangs MPU
+ will caught the interrupt and try to recover DSP.
+
+config TIDSPBRIDGE_WDT_TIMEOUT
+ int "Watchdog timer timeout (in secs)"
+ depends on TIDSPBRIDGE && TIDSPBRIDGE_WDT3
+ default 5
+ help
+ Watchdog timer timeout value, after that time if the watchdog timer
+ counter is not reset the wdt overflow interrupt will be triggered
+
+config TIDSPBRIDGE_NTFY_PWRERR
+ bool "Notify power errors"
+ depends on TIDSPBRIDGE
+ help
+ Enable notifications to registered clients on the event of power errror
+ trying to suspend bridge driver. Say Y, to signal this event as a fatal
+ error, this will require a bridge restart to recover.
+
+config TIDSPBRIDGE_BACKTRACE
+ bool "Dump backtraces on fatal errors"
+ depends on TIDSPBRIDGE
+ help
+ Enable useful information to backtrace fatal errors. Say Y if you
+ want to dump information for testing purposes.
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
new file mode 100644
index 00000000000..65671724e6f
--- /dev/null
+++ b/drivers/staging/tidspbridge/Makefile
@@ -0,0 +1,34 @@
+obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o
+
+libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o
+libservices = services/sync.o services/cfg.o \
+ services/ntfy.o services/services.o
+libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
+ core/tiomap3430_pwr.o core/tiomap_io.o \
+ core/ue_deh.o core/wdt.o core/dsp-clock.o
+libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \
+ pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o
+librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \
+ rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \
+ rmgr/nldr.o rmgr/drv_interface.o
+libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
+ dynload/tramp.o
+libhw = hw/hw_mmu.o
+
+bridgedriver-objs = $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
+ $(libdload) $(libhw)
+
+#Machine dependent
+ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \
+ -DTICFG_PROC_VER -DTICFG_EVM_TYPE -DCHNL_SMCLASS \
+ -DCHNL_MESSAGES -DUSE_LEVEL_1_MACROS
+
+ccflags-y += -Idrivers/staging/tidspbridge/include
+ccflags-y += -Idrivers/staging/tidspbridge/services
+ccflags-y += -Idrivers/staging/tidspbridge/core
+ccflags-y += -Idrivers/staging/tidspbridge/pmgr
+ccflags-y += -Idrivers/staging/tidspbridge/rmgr
+ccflags-y += -Idrivers/staging/tidspbridge/dynload
+ccflags-y += -Idrivers/staging/tidspbridge/hw
+ccflags-y += -Iarch/arm
+
diff --git a/drivers/staging/tidspbridge/TODO b/drivers/staging/tidspbridge/TODO
new file mode 100644
index 00000000000..54f4a296738
--- /dev/null
+++ b/drivers/staging/tidspbridge/TODO
@@ -0,0 +1,18 @@
+* Migrate to (and if necessary, extend) existing upstream code such as
+ iommu, wdt, mcbsp, gptimers
+* Decouple hardware-specific code (e.g. bridge_brd_start/stop/delete/monitor)
+* DOFF binary loader: consider pushing to user space. at the very least
+ eliminate the direct filesystem access
+* Eliminate general services and libraries - use or extend existing kernel
+ libraries instead (e.g. gcf/lcm in nldr.c, global helpers in gen/)
+* Eliminate direct manipulation of OMAP_SYSC_BASE
+* Eliminate list.h : seem like a redundant wrapper to existing kernel lists
+* Eliminate DSP_SUCCEEDED macros and their imposed redundant indentations
+ (adopt the kernel way of checking for return values)
+* Audit interfaces exposed to user space
+* Audit and clean up header files folder
+* Use kernel coding style
+* checkpatch.pl fixes
+
+Please send any patches to Greg Kroah-Hartman <greg@kroah.com>
+and Omar Ramirez Luna <omar.ramirez@ti.com>.
diff --git a/drivers/staging/tidspbridge/core/_cmm.h b/drivers/staging/tidspbridge/core/_cmm.h
new file mode 100644
index 00000000000..7660bef6ebb
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/_cmm.h
@@ -0,0 +1,45 @@
+/*
+ * _cmm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Private header file defining CMM manager objects and defines needed
+ * by IO manager to register shared memory regions when DSP base image
+ * is loaded(bridge_io_on_loaded).
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _CMM_
+#define _CMM_
+
+/*
+ * These target side symbols define the beginning and ending addresses
+ * of the section of shared memory used for shared memory manager CMM.
+ * They are defined in the *cfg.cmd file by cdb code.
+ */
+#define SHM0_SHARED_BASE_SYM "_SHM0_BEG"
+#define SHM0_SHARED_END_SYM "_SHM0_END"
+#define SHM0_SHARED_RESERVED_BASE_SYM "_SHM0_RSVDSTRT"
+
+/*
+ * Shared Memory Region #0(SHMSEG0) is used in the following way:
+ *
+ * |(_SHM0_BEG) | (_SHM0_RSVDSTRT) | (_SHM0_END)
+ * V V V
+ * ------------------------------------------------------------
+ * | DSP-side allocations | GPP-side allocations |
+ * ------------------------------------------------------------
+ *
+ *
+ */
+
+#endif /* _CMM_ */
diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h
new file mode 100644
index 00000000000..16723cd3483
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/_deh.h
@@ -0,0 +1,35 @@
+/*
+ * _deh.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Private header for DEH module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ * Copyright (C) 2010 Felipe Contreras
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _DEH_
+#define _DEH_
+
+#include <dspbridge/ntfy.h>
+#include <dspbridge/dspdefs.h>
+
+/* DEH Manager: only one created per board: */
+struct deh_mgr {
+ struct bridge_dev_context *hbridge_context; /* Bridge context. */
+ struct ntfy_object *ntfy_obj; /* NTFY object */
+
+ /* MMU Fault DPC */
+ struct tasklet_struct dpc_tasklet;
+};
+
+#endif /* _DEH_ */
diff --git a/drivers/staging/tidspbridge/core/_msg_sm.h b/drivers/staging/tidspbridge/core/_msg_sm.h
new file mode 100644
index 00000000000..556de5c025d
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/_msg_sm.h
@@ -0,0 +1,142 @@
+/*
+ * _msg_sm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Private header file defining msg_ctrl manager objects and defines needed
+ * by IO manager.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _MSG_SM_
+#define _MSG_SM_
+
+#include <dspbridge/list.h>
+#include <dspbridge/msgdefs.h>
+
+/*
+ * These target side symbols define the beginning and ending addresses
+ * of the section of shared memory used for messages. They are
+ * defined in the *cfg.cmd file by cdb code.
+ */
+#define MSG_SHARED_BUFFER_BASE_SYM "_MSG_BEG"
+#define MSG_SHARED_BUFFER_LIMIT_SYM "_MSG_END"
+
+#ifndef _CHNL_WORDSIZE
+#define _CHNL_WORDSIZE 4 /* default _CHNL_WORDSIZE is 2 bytes/word */
+#endif
+
+/*
+ * ======== msg_ctrl ========
+ * There is a control structure for messages to the DSP, and a control
+ * structure for messages from the DSP. The shared memory region for
+ * transferring messages is partitioned as follows:
+ *
+ * ----------------------------------------------------------
+ * |Control | Messages from DSP | Control | Messages to DSP |
+ * ----------------------------------------------------------
+ *
+ * msg_ctrl control structure for messages to the DSP is used in the following
+ * way:
+ *
+ * buf_empty - This flag is set to FALSE by the GPP after it has output
+ * messages for the DSP. The DSP host driver sets it to
+ * TRUE after it has copied the messages.
+ * post_swi - Set to 1 by the GPP after it has written the messages,
+ * set the size, and set buf_empty to FALSE.
+ * The DSP Host driver uses SWI_andn of the post_swi field
+ * when a host interrupt occurs. The host driver clears
+ * this after posting the SWI.
+ * size - Number of messages to be read by the DSP.
+ *
+ * For messages from the DSP:
+ * buf_empty - This flag is set to FALSE by the DSP after it has output
+ * messages for the GPP. The DPC on the GPP sets it to
+ * TRUE after it has copied the messages.
+ * post_swi - Set to 1 the DPC on the GPP after copying the messages.
+ * size - Number of messages to be read by the GPP.
+ */
+struct msg_ctrl {
+ u32 buf_empty; /* to/from DSP buffer is empty */
+ u32 post_swi; /* Set to "1" to post msg_ctrl SWI */
+ u32 size; /* Number of messages to/from the DSP */
+ u32 resvd;
+};
+
+/*
+ * ======== msg_mgr ========
+ * The msg_mgr maintains a list of all MSG_QUEUEs. Each NODE object can
+ * have msg_queue to hold all messages that come up from the corresponding
+ * node on the DSP. The msg_mgr also has a shared queue of messages
+ * ready to go to the DSP.
+ */
+struct msg_mgr {
+ /* The first field must match that in msgobj.h */
+
+ /* Function interface to Bridge driver */
+ struct bridge_drv_interface *intf_fxns;
+
+ struct io_mgr *hio_mgr; /* IO manager */
+ struct lst_list *queue_list; /* List of MSG_QUEUEs */
+ spinlock_t msg_mgr_lock; /* For critical sections */
+ /* Signalled when MsgFrame is available */
+ struct sync_object *sync_event;
+ struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */
+ struct lst_list *msg_used_list; /* MsgFrames ready to go to DSP */
+ u32 msgs_pending; /* # of queued messages to go to DSP */
+ u32 max_msgs; /* Max # of msgs that fit in buffer */
+ msg_onexit on_exit; /* called when RMS_EXIT is received */
+};
+
+/*
+ * ======== msg_queue ========
+ * Each NODE has a msg_queue for receiving messages from the
+ * corresponding node on the DSP. The msg_queue object maintains a list
+ * of messages that have been sent to the host, but not yet read (MSG_Get),
+ * and a list of free frames that can be filled when new messages arrive
+ * from the DSP.
+ * The msg_queue's hSynEvent gets posted when a message is ready.
+ */
+struct msg_queue {
+ struct list_head list_elem;
+ struct msg_mgr *hmsg_mgr;
+ u32 max_msgs; /* Node message depth */
+ u32 msgq_id; /* Node environment pointer */
+ struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */
+ /* Filled MsgFramess waiting to be read */
+ struct lst_list *msg_used_list;
+ void *arg; /* Handle passed to mgr on_exit callback */
+ struct sync_object *sync_event; /* Signalled when message is ready */
+ struct sync_object *sync_done; /* For synchronizing cleanup */
+ struct sync_object *sync_done_ack; /* For synchronizing cleanup */
+ struct ntfy_object *ntfy_obj; /* For notification of message ready */
+ bool done; /* TRUE <==> deleting the object */
+ u32 io_msg_pend; /* Number of pending MSG_get/put calls */
+};
+
+/*
+ * ======== msg_dspmsg ========
+ */
+struct msg_dspmsg {
+ struct dsp_msg msg;
+ u32 msgq_id; /* Identifies the node the message goes to */
+};
+
+/*
+ * ======== msg_frame ========
+ */
+struct msg_frame {
+ struct list_head list_elem;
+ struct msg_dspmsg msg_data;
+};
+
+#endif /* _MSG_SM_ */
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
new file mode 100644
index 00000000000..1c1f157e167
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -0,0 +1,371 @@
+/*
+ * _tiomap.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Definitions and types private to this Bridge driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _TIOMAP_
+#define _TIOMAP_
+
+#include <plat/powerdomain.h>
+#include <plat/clockdomain.h>
+#include <mach-omap2/prm-regbits-34xx.h>
+#include <mach-omap2/cm-regbits-34xx.h>
+#include <dspbridge/devdefs.h>
+#include <hw_defs.h>
+#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */
+#include <dspbridge/sync.h>
+#include <dspbridge/clk.h>
+
+struct map_l4_peripheral {
+ u32 phys_addr;
+ u32 dsp_virt_addr;
+};
+
+#define ARM_MAILBOX_START 0xfffcf000
+#define ARM_MAILBOX_LENGTH 0x800
+
+/* New Registers in OMAP3.1 */
+
+#define TESTBLOCK_ID_START 0xfffed400
+#define TESTBLOCK_ID_LENGTH 0xff
+
+/* ID Returned by OMAP1510 */
+#define TBC_ID_VALUE 0xB47002F
+
+#define SPACE_LENGTH 0x2000
+#define API_CLKM_DPLL_DMA 0xfffec000
+#define ARM_INTERRUPT_OFFSET 0xb00
+
+#define BIOS24XX
+
+#define L4_PERIPHERAL_NULL 0x0
+#define DSPVA_PERIPHERAL_NULL 0x0
+
+#define MAX_LOCK_TLB_ENTRIES 15
+
+#define L4_PERIPHERAL_PRM 0x48306000 /*PRM L4 Peripheral */
+#define DSPVA_PERIPHERAL_PRM 0x1181e000
+#define L4_PERIPHERAL_SCM 0x48002000 /*SCM L4 Peripheral */
+#define DSPVA_PERIPHERAL_SCM 0x1181f000
+#define L4_PERIPHERAL_MMU 0x5D000000 /*MMU L4 Peripheral */
+#define DSPVA_PERIPHERAL_MMU 0x11820000
+#define L4_PERIPHERAL_CM 0x48004000 /* Core L4, Clock Management */
+#define DSPVA_PERIPHERAL_CM 0x1181c000
+#define L4_PERIPHERAL_PER 0x48005000 /* PER */
+#define DSPVA_PERIPHERAL_PER 0x1181d000
+
+#define L4_PERIPHERAL_GPIO1 0x48310000
+#define DSPVA_PERIPHERAL_GPIO1 0x11809000
+#define L4_PERIPHERAL_GPIO2 0x49050000
+#define DSPVA_PERIPHERAL_GPIO2 0x1180a000
+#define L4_PERIPHERAL_GPIO3 0x49052000
+#define DSPVA_PERIPHERAL_GPIO3 0x1180b000
+#define L4_PERIPHERAL_GPIO4 0x49054000
+#define DSPVA_PERIPHERAL_GPIO4 0x1180c000
+#define L4_PERIPHERAL_GPIO5 0x49056000
+#define DSPVA_PERIPHERAL_GPIO5 0x1180d000
+
+#define L4_PERIPHERAL_IVA2WDT 0x49030000
+#define DSPVA_PERIPHERAL_IVA2WDT 0x1180e000
+
+#define L4_PERIPHERAL_DISPLAY 0x48050000
+#define DSPVA_PERIPHERAL_DISPLAY 0x1180f000
+
+#define L4_PERIPHERAL_SSI 0x48058000
+#define DSPVA_PERIPHERAL_SSI 0x11804000
+#define L4_PERIPHERAL_GDD 0x48059000
+#define DSPVA_PERIPHERAL_GDD 0x11805000
+#define L4_PERIPHERAL_SS1 0x4805a000
+#define DSPVA_PERIPHERAL_SS1 0x11806000
+#define L4_PERIPHERAL_SS2 0x4805b000
+#define DSPVA_PERIPHERAL_SS2 0x11807000
+
+#define L4_PERIPHERAL_CAMERA 0x480BC000
+#define DSPVA_PERIPHERAL_CAMERA 0x11819000
+
+#define L4_PERIPHERAL_SDMA 0x48056000
+#define DSPVA_PERIPHERAL_SDMA 0x11810000 /* 0x1181d000 conflict w/ PER */
+
+#define L4_PERIPHERAL_UART1 0x4806a000
+#define DSPVA_PERIPHERAL_UART1 0x11811000
+#define L4_PERIPHERAL_UART2 0x4806c000
+#define DSPVA_PERIPHERAL_UART2 0x11812000
+#define L4_PERIPHERAL_UART3 0x49020000
+#define DSPVA_PERIPHERAL_UART3 0x11813000
+
+#define L4_PERIPHERAL_MCBSP1 0x48074000
+#define DSPVA_PERIPHERAL_MCBSP1 0x11814000
+#define L4_PERIPHERAL_MCBSP2 0x49022000
+#define DSPVA_PERIPHERAL_MCBSP2 0x11815000
+#define L4_PERIPHERAL_MCBSP3 0x49024000
+#define DSPVA_PERIPHERAL_MCBSP3 0x11816000
+#define L4_PERIPHERAL_MCBSP4 0x49026000
+#define DSPVA_PERIPHERAL_MCBSP4 0x11817000
+#define L4_PERIPHERAL_MCBSP5 0x48096000
+#define DSPVA_PERIPHERAL_MCBSP5 0x11818000
+
+#define L4_PERIPHERAL_GPTIMER5 0x49038000
+#define DSPVA_PERIPHERAL_GPTIMER5 0x11800000
+#define L4_PERIPHERAL_GPTIMER6 0x4903a000
+#define DSPVA_PERIPHERAL_GPTIMER6 0x11801000
+#define L4_PERIPHERAL_GPTIMER7 0x4903c000
+#define DSPVA_PERIPHERAL_GPTIMER7 0x11802000
+#define L4_PERIPHERAL_GPTIMER8 0x4903e000
+#define DSPVA_PERIPHERAL_GPTIMER8 0x11803000
+
+#define L4_PERIPHERAL_SPI1 0x48098000
+#define DSPVA_PERIPHERAL_SPI1 0x1181a000
+#define L4_PERIPHERAL_SPI2 0x4809a000
+#define DSPVA_PERIPHERAL_SPI2 0x1181b000
+
+#define L4_PERIPHERAL_MBOX 0x48094000
+#define DSPVA_PERIPHERAL_MBOX 0x11808000
+
+#define PM_GRPSEL_BASE 0x48307000
+#define DSPVA_GRPSEL_BASE 0x11821000
+
+#define L4_PERIPHERAL_SIDETONE_MCBSP2 0x49028000
+#define DSPVA_PERIPHERAL_SIDETONE_MCBSP2 0x11824000
+#define L4_PERIPHERAL_SIDETONE_MCBSP3 0x4902a000
+#define DSPVA_PERIPHERAL_SIDETONE_MCBSP3 0x11825000
+
+/* define a static array with L4 mappings */
+static const struct map_l4_peripheral l4_peripheral_table[] = {
+ {L4_PERIPHERAL_MBOX, DSPVA_PERIPHERAL_MBOX},
+ {L4_PERIPHERAL_SCM, DSPVA_PERIPHERAL_SCM},
+ {L4_PERIPHERAL_MMU, DSPVA_PERIPHERAL_MMU},
+ {L4_PERIPHERAL_GPTIMER5, DSPVA_PERIPHERAL_GPTIMER5},
+ {L4_PERIPHERAL_GPTIMER6, DSPVA_PERIPHERAL_GPTIMER6},
+ {L4_PERIPHERAL_GPTIMER7, DSPVA_PERIPHERAL_GPTIMER7},
+ {L4_PERIPHERAL_GPTIMER8, DSPVA_PERIPHERAL_GPTIMER8},
+ {L4_PERIPHERAL_GPIO1, DSPVA_PERIPHERAL_GPIO1},
+ {L4_PERIPHERAL_GPIO2, DSPVA_PERIPHERAL_GPIO2},
+ {L4_PERIPHERAL_GPIO3, DSPVA_PERIPHERAL_GPIO3},
+ {L4_PERIPHERAL_GPIO4, DSPVA_PERIPHERAL_GPIO4},
+ {L4_PERIPHERAL_GPIO5, DSPVA_PERIPHERAL_GPIO5},
+ {L4_PERIPHERAL_IVA2WDT, DSPVA_PERIPHERAL_IVA2WDT},
+ {L4_PERIPHERAL_DISPLAY, DSPVA_PERIPHERAL_DISPLAY},
+ {L4_PERIPHERAL_SSI, DSPVA_PERIPHERAL_SSI},
+ {L4_PERIPHERAL_GDD, DSPVA_PERIPHERAL_GDD},
+ {L4_PERIPHERAL_SS1, DSPVA_PERIPHERAL_SS1},
+ {L4_PERIPHERAL_SS2, DSPVA_PERIPHERAL_SS2},
+ {L4_PERIPHERAL_UART1, DSPVA_PERIPHERAL_UART1},
+ {L4_PERIPHERAL_UART2, DSPVA_PERIPHERAL_UART2},
+ {L4_PERIPHERAL_UART3, DSPVA_PERIPHERAL_UART3},
+ {L4_PERIPHERAL_MCBSP1, DSPVA_PERIPHERAL_MCBSP1},
+ {L4_PERIPHERAL_MCBSP2, DSPVA_PERIPHERAL_MCBSP2},
+ {L4_PERIPHERAL_MCBSP3, DSPVA_PERIPHERAL_MCBSP3},
+ {L4_PERIPHERAL_MCBSP4, DSPVA_PERIPHERAL_MCBSP4},
+ {L4_PERIPHERAL_MCBSP5, DSPVA_PERIPHERAL_MCBSP5},
+ {L4_PERIPHERAL_CAMERA, DSPVA_PERIPHERAL_CAMERA},
+ {L4_PERIPHERAL_SPI1, DSPVA_PERIPHERAL_SPI1},
+ {L4_PERIPHERAL_SPI2, DSPVA_PERIPHERAL_SPI2},
+ {L4_PERIPHERAL_PRM, DSPVA_PERIPHERAL_PRM},
+ {L4_PERIPHERAL_CM, DSPVA_PERIPHERAL_CM},
+ {L4_PERIPHERAL_PER, DSPVA_PERIPHERAL_PER},
+ {PM_GRPSEL_BASE, DSPVA_GRPSEL_BASE},
+ {L4_PERIPHERAL_SIDETONE_MCBSP2, DSPVA_PERIPHERAL_SIDETONE_MCBSP2},
+ {L4_PERIPHERAL_SIDETONE_MCBSP3, DSPVA_PERIPHERAL_SIDETONE_MCBSP3},
+ {L4_PERIPHERAL_NULL, DSPVA_PERIPHERAL_NULL}
+};
+
+/*
+ * 15 10 0
+ * ---------------------------------
+ * |0|0|1|0|0|0|c|c|c|i|i|i|i|i|i|i|
+ * ---------------------------------
+ * | (class) | (module specific) |
+ *
+ * where c -> Externel Clock Command: Clk & Autoidle Disable/Enable
+ * i -> External Clock ID Timers 5,6,7,8, McBSP1,2 and WDT3
+ */
+
+/* MBX_PM_CLK_IDMASK: DSP External clock id mask. */
+#define MBX_PM_CLK_IDMASK 0x7F
+
+/* MBX_PM_CLK_CMDSHIFT: DSP External clock command shift. */
+#define MBX_PM_CLK_CMDSHIFT 7
+
+/* MBX_PM_CLK_CMDMASK: DSP External clock command mask. */
+#define MBX_PM_CLK_CMDMASK 7
+
+/* MBX_PM_MAX_RESOURCES: CORE 1 Clock resources. */
+#define MBX_CORE1_RESOURCES 7
+
+/* MBX_PM_MAX_RESOURCES: CORE 2 Clock Resources. */
+#define MBX_CORE2_RESOURCES 1
+
+/* MBX_PM_MAX_RESOURCES: TOTAL Clock Reosurces. */
+#define MBX_PM_MAX_RESOURCES 11
+
+/* Power Management Commands */
+#define BPWR_DISABLE_CLOCK 0
+#define BPWR_ENABLE_CLOCK 1
+
+/* OMAP242x specific resources */
+enum bpwr_ext_clock_id {
+ BPWR_GP_TIMER5 = 0x10,
+ BPWR_GP_TIMER6,
+ BPWR_GP_TIMER7,
+ BPWR_GP_TIMER8,
+ BPWR_WD_TIMER3,
+ BPWR_MCBSP1,
+ BPWR_MCBSP2,
+ BPWR_MCBSP3,
+ BPWR_MCBSP4,
+ BPWR_MCBSP5,
+ BPWR_SSI = 0x20
+};
+
+static const u32 bpwr_clkid[] = {
+ (u32) BPWR_GP_TIMER5,
+ (u32) BPWR_GP_TIMER6,
+ (u32) BPWR_GP_TIMER7,
+ (u32) BPWR_GP_TIMER8,
+ (u32) BPWR_WD_TIMER3,
+ (u32) BPWR_MCBSP1,
+ (u32) BPWR_MCBSP2,
+ (u32) BPWR_MCBSP3,
+ (u32) BPWR_MCBSP4,
+ (u32) BPWR_MCBSP5,
+ (u32) BPWR_SSI
+};
+
+struct bpwr_clk_t {
+ u32 clk_id;
+ enum dsp_clk_id clk;
+};
+
+static const struct bpwr_clk_t bpwr_clks[] = {
+ {(u32) BPWR_GP_TIMER5, DSP_CLK_GPT5},
+ {(u32) BPWR_GP_TIMER6, DSP_CLK_GPT6},
+ {(u32) BPWR_GP_TIMER7, DSP_CLK_GPT7},
+ {(u32) BPWR_GP_TIMER8, DSP_CLK_GPT8},
+ {(u32) BPWR_WD_TIMER3, DSP_CLK_WDT3},
+ {(u32) BPWR_MCBSP1, DSP_CLK_MCBSP1},
+ {(u32) BPWR_MCBSP2, DSP_CLK_MCBSP2},
+ {(u32) BPWR_MCBSP3, DSP_CLK_MCBSP3},
+ {(u32) BPWR_MCBSP4, DSP_CLK_MCBSP4},
+ {(u32) BPWR_MCBSP5, DSP_CLK_MCBSP5},
+ {(u32) BPWR_SSI, DSP_CLK_SSI}
+};
+
+/* Interrupt Register Offsets */
+#define INTH_IT_REG_OFFSET 0x00 /* Interrupt register offset */
+#define INTH_MASK_IT_REG_OFFSET 0x04 /* Mask Interrupt reg offset */
+
+#define DSP_MAILBOX1_INT 10
+/*
+ * Bit definition of Interrupt Level Registers
+ */
+
+/* Mail Box defines */
+#define MB_ARM2DSP1_REG_OFFSET 0x00
+
+#define MB_ARM2DSP1B_REG_OFFSET 0x04
+
+#define MB_DSP2ARM1B_REG_OFFSET 0x0C
+
+#define MB_ARM2DSP1_FLAG_REG_OFFSET 0x18
+
+#define MB_ARM2DSP_FLAG 0x0001
+
+#define MBOX_ARM2DSP HW_MBOX_ID0
+#define MBOX_DSP2ARM HW_MBOX_ID1
+#define MBOX_ARM HW_MBOX_U0_ARM
+#define MBOX_DSP HW_MBOX_U1_DSP1
+
+#define ENABLE true
+#define DISABLE false
+
+#define HIGH_LEVEL true
+#define LOW_LEVEL false
+
+/* Macro's */
+#define CLEAR_BIT(reg, mask) (reg &= ~mask)
+#define SET_BIT(reg, mask) (reg |= mask)
+
+#define SET_GROUP_BITS16(reg, position, width, value) \
+ do {\
+ reg &= ~((0xFFFF >> (16 - (width))) << (position)) ; \
+ reg |= ((value & (0xFFFF >> (16 - (width)))) << (position)); \
+ } while (0);
+
+#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index)))
+
+/* This Bridge driver's device context: */
+struct bridge_dev_context {
+ struct dev_object *hdev_obj; /* Handle to Bridge device object. */
+ u32 dw_dsp_base_addr; /* Arm's API to DSP virt base addr */
+ /*
+ * DSP External memory prog address as seen virtually by the OS on
+ * the host side.
+ */
+ u32 dw_dsp_ext_base_addr; /* See the comment above */
+ u32 dw_api_reg_base; /* API mem map'd registers */
+ void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */
+ u32 dw_api_clk_base; /* CLK Registers */
+ u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
+ u32 dw_public_rhea; /* Pub Rhea */
+ u32 dw_int_addr; /* MB INTR reg */
+ u32 dw_tc_endianism; /* TC Endianism register */
+ u32 dw_test_base; /* DSP MMU Mapped registers */
+ u32 dw_self_loop; /* Pointer to the selfloop */
+ u32 dw_dsp_start_add; /* API Boot vector */
+ u32 dw_internal_size; /* Internal memory size */
+
+ struct omap_mbox *mbox; /* Mail box handle */
+
+ struct cfg_hostres *resources; /* Host Resources */
+
+ /*
+ * Processor specific info is set when prog loaded and read from DCD.
+ * [See bridge_dev_ctrl()] PROC info contains DSP-MMU TLB entries.
+ */
+ /* DMMU TLB entries */
+ struct bridge_ioctl_extproc atlb_entry[BRDIOCTL_NUMOFMMUTLB];
+ u32 dw_brd_state; /* Last known board state. */
+
+ /* TC Settings */
+ bool tc_word_swap_on; /* Traffic Controller Word Swap */
+ struct pg_table_attrs *pt_attrs;
+ u32 dsp_per_clks;
+};
+
+/*
+ * If dsp_debug is true, do not branch to the DSP entry
+ * point and wait for DSP to boot.
+ */
+extern s32 dsp_debug;
+
+/*
+ * ======== sm_interrupt_dsp ========
+ * Purpose:
+ * Set interrupt value & send an interrupt to the DSP processor(s).
+ * This is typicaly used when mailbox interrupt mechanisms allow data
+ * to be associated with interrupt such as for OMAP's CMD/DATA regs.
+ * Parameters:
+ * dev_context: Handle to Bridge driver defined device info.
+ * mb_val: Value associated with interrupt(e.g. mailbox value).
+ * Returns:
+ * 0: Interrupt sent;
+ * else: Unable to send interrupt.
+ * Requires:
+ * Ensures:
+ */
+int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val);
+
+#endif /* _TIOMAP_ */
diff --git a/drivers/staging/tidspbridge/core/_tiomap_pwr.h b/drivers/staging/tidspbridge/core/_tiomap_pwr.h
new file mode 100644
index 00000000000..bd0354d9ad0
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/_tiomap_pwr.h
@@ -0,0 +1,85 @@
+/*
+ * _tiomap_pwr.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Definitions and types for the DSP wake/sleep routines.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _TIOMAP_PWR_
+#define _TIOMAP_PWR_
+
+#ifdef CONFIG_PM
+extern s32 dsp_test_sleepstate;
+#endif
+
+extern struct mailbox_context mboxsetting;
+
+/*
+ * ======== wake_dsp =========
+ * Wakes up the DSP from DeepSleep
+ */
+extern int wake_dsp(struct bridge_dev_context *dev_context,
+ void *pargs);
+
+/*
+ * ======== sleep_dsp =========
+ * Places the DSP in DeepSleep.
+ */
+extern int sleep_dsp(struct bridge_dev_context *dev_context,
+ u32 dw_cmd, void *pargs);
+/*
+ * ========interrupt_dsp========
+ * Sends an interrupt to DSP unconditionally.
+ */
+extern void interrupt_dsp(struct bridge_dev_context *dev_context,
+ u16 mb_val);
+
+/*
+ * ======== wake_dsp =========
+ * Wakes up the DSP from DeepSleep
+ */
+extern int dsp_peripheral_clk_ctrl(struct bridge_dev_context
+ *dev_context, void *pargs);
+/*
+ * ======== handle_hibernation_from_dsp ========
+ * Handle Hibernation requested from DSP
+ */
+int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context);
+/*
+ * ======== post_scale_dsp ========
+ * Handle Post Scale notification to DSP
+ */
+int post_scale_dsp(struct bridge_dev_context *dev_context,
+ void *pargs);
+/*
+ * ======== pre_scale_dsp ========
+ * Handle Pre Scale notification to DSP
+ */
+int pre_scale_dsp(struct bridge_dev_context *dev_context,
+ void *pargs);
+/*
+ * ======== handle_constraints_set ========
+ * Handle constraints request from DSP
+ */
+int handle_constraints_set(struct bridge_dev_context *dev_context,
+ void *pargs);
+
+/*
+ * ======== dsp_clk_wakeup_event_ctrl ========
+ * This function sets the group selction bits for while
+ * enabling/disabling.
+ */
+void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable);
+
+#endif /* _TIOMAP_PWR_ */
diff --git a/drivers/staging/tidspbridge/core/chnl_sm.c b/drivers/staging/tidspbridge/core/chnl_sm.c
new file mode 100644
index 00000000000..bee2b23a09a
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/chnl_sm.c
@@ -0,0 +1,1014 @@
+/*
+ * chnl_sm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implements upper edge functions for Bridge driver channel module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * The lower edge functions must be implemented by the Bridge driver
+ * writer, and are declared in chnl_sm.h.
+ *
+ * Care is taken in this code to prevent simulataneous access to channel
+ * queues from
+ * 1. Threads.
+ * 2. io_dpc(), scheduled from the io_isr() as an event.
+ *
+ * This is done primarily by:
+ * - Semaphores.
+ * - state flags in the channel object; and
+ * - ensuring the IO_Dispatch() routine, which is called from both
+ * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
+ *
+ * Channel Invariant:
+ * There is an important invariant condition which must be maintained per
+ * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
+ * which may cause timeouts and/or failure offunction sync_wait_on_event.
+ * This invariant condition is:
+ *
+ * LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is reset
+ * and
+ * !LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is set.
+ */
+
+#include <linux/types.h>
+
+/* ----------------------------------- OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/sync.h>
+
+/* ----------------------------------- Bridge Driver */
+#include <dspbridge/dspdefs.h>
+#include <dspbridge/dspchnl.h>
+#include "_tiomap.h"
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+
+/* ----------------------------------- Others */
+#include <dspbridge/io_sm.h>
+
+/* ----------------------------------- Define for This */
+#define USERMODE_ADDR PAGE_OFFSET
+
+#define MAILBOX_IRQ INT_MAIL_MPU_IRQ
+
+/* ----------------------------------- Function Prototypes */
+static struct lst_list *create_chirp_list(u32 chirps);
+
+static void free_chirp_list(struct lst_list *chirp_list);
+
+static struct chnl_irp *make_new_chirp(void);
+
+static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
+ u32 *chnl);
+
+/*
+ * ======== bridge_chnl_add_io_req ========
+ * Enqueue an I/O request for data transfer on a channel to the DSP.
+ * The direction (mode) is specified in the channel object. Note the DSP
+ * address is specified for channels opened in direct I/O mode.
+ */
+int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
+ u32 byte_size, u32 buf_size,
+ u32 dw_dsp_addr, u32 dw_arg)
+{
+ int status = 0;
+ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+ struct chnl_irp *chnl_packet_obj = NULL;
+ struct bridge_dev_context *dev_ctxt;
+ struct dev_object *dev_obj;
+ u8 dw_state;
+ bool is_eos;
+ struct chnl_mgr *chnl_mgr_obj = pchnl->chnl_mgr_obj;
+ u8 *host_sys_buf = NULL;
+ bool sched_dpc = false;
+ u16 mb_val = 0;
+
+ is_eos = (byte_size == 0);
+
+ /* Validate args */
+ if (!host_buf || !pchnl) {
+ status = -EFAULT;
+ } else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) {
+ status = -EPERM;
+ } else {
+ /*
+ * Check the channel state: only queue chirp if channel state
+ * allows it.
+ */
+ dw_state = pchnl->dw_state;
+ if (dw_state != CHNL_STATEREADY) {
+ if (dw_state & CHNL_STATECANCEL)
+ status = -ECANCELED;
+ else if ((dw_state & CHNL_STATEEOS) &&
+ CHNL_IS_OUTPUT(pchnl->chnl_mode))
+ status = -EPIPE;
+ else
+ /* No other possible states left */
+ DBC_ASSERT(0);
+ }
+ }
+
+ dev_obj = dev_get_first();
+ dev_get_bridge_context(dev_obj, &dev_ctxt);
+ if (!dev_ctxt)
+ status = -EFAULT;
+
+ if (status)
+ goto func_end;
+
+ if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) {
+ if (!(host_buf < (void *)USERMODE_ADDR)) {
+ host_sys_buf = host_buf;
+ goto func_cont;
+ }
+ /* if addr in user mode, then copy to kernel space */
+ host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
+ if (host_sys_buf == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
+ status = copy_from_user(host_sys_buf, host_buf,
+ buf_size);
+ if (status) {
+ kfree(host_sys_buf);
+ host_sys_buf = NULL;
+ status = -EFAULT;
+ goto func_end;
+ }
+ }
+ }
+func_cont:
+ /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
+ * channels. DPCCS is held to avoid race conditions with PCPY channels.
+ * If DPC is scheduled in process context (iosm_schedule) and any
+ * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
+ * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
+ spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+ omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
+ if (pchnl->chnl_type == CHNL_PCPY) {
+ /* This is a processor-copy channel. */
+ if (!status && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
+ /* Check buffer size on output channels for fit. */
+ if (byte_size >
+ io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
+ status = -EINVAL;
+
+ }
+ }
+ if (!status) {
+ /* Get a free chirp: */
+ chnl_packet_obj =
+ (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
+ if (chnl_packet_obj == NULL)
+ status = -EIO;
+
+ }
+ if (!status) {
+ /* Enqueue the chirp on the chnl's IORequest queue: */
+ chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
+ host_buf;
+ if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
+ chnl_packet_obj->host_sys_buf = host_sys_buf;
+
+ /*
+ * Note: for dma chans dw_dsp_addr contains dsp address
+ * of SM buffer.
+ */
+ DBC_ASSERT(chnl_mgr_obj->word_size != 0);
+ /* DSP address */
+ chnl_packet_obj->dsp_tx_addr =
+ dw_dsp_addr / chnl_mgr_obj->word_size;
+ chnl_packet_obj->byte_size = byte_size;
+ chnl_packet_obj->buf_size = buf_size;
+ /* Only valid for output channel */
+ chnl_packet_obj->dw_arg = dw_arg;
+ chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
+ CHNL_IOCSTATCOMPLETE);
+ lst_put_tail(pchnl->pio_requests,
+ (struct list_head *)chnl_packet_obj);
+ pchnl->cio_reqs++;
+ DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
+ /*
+ * If end of stream, update the channel state to prevent
+ * more IOR's.
+ */
+ if (is_eos)
+ pchnl->dw_state |= CHNL_STATEEOS;
+
+ /* Legacy DSM Processor-Copy */
+ DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
+ /* Request IO from the DSP */
+ io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl,
+ (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
+ IO_OUTPUT), &mb_val);
+ sched_dpc = true;
+
+ }
+ omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
+ spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+ if (mb_val != 0)
+ sm_interrupt_dsp(dev_ctxt, mb_val);
+
+ /* Schedule a DPC, to do the actual data transfer */
+ if (sched_dpc)
+ iosm_schedule(chnl_mgr_obj->hio_mgr);
+
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_cancel_io ========
+ * Return all I/O requests to the client which have not yet been
+ * transferred. The channel's I/O completion object is
+ * signalled, and all the I/O requests are queued as IOC's, with the
+ * status field set to CHNL_IOCSTATCANCEL.
+ * This call is typically used in abort situations, and is a prelude to
+ * chnl_close();
+ */
+int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
+{
+ int status = 0;
+ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+ u32 chnl_id = -1;
+ s8 chnl_mode;
+ struct chnl_irp *chnl_packet_obj;
+ struct chnl_mgr *chnl_mgr_obj = NULL;
+
+ /* Check args: */
+ if (pchnl && pchnl->chnl_mgr_obj) {
+ chnl_id = pchnl->chnl_id;
+ chnl_mode = pchnl->chnl_mode;
+ chnl_mgr_obj = pchnl->chnl_mgr_obj;
+ } else {
+ status = -EFAULT;
+ }
+ if (status)
+ goto func_end;
+
+ /* Mark this channel as cancelled, to prevent further IORequests or
+ * IORequests or dispatching. */
+ spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+ pchnl->dw_state |= CHNL_STATECANCEL;
+ if (LST_IS_EMPTY(pchnl->pio_requests))
+ goto func_cont;
+
+ if (pchnl->chnl_type == CHNL_PCPY) {
+ /* Indicate we have no more buffers available for transfer: */
+ if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
+ io_cancel_chnl(chnl_mgr_obj->hio_mgr, chnl_id);
+ } else {
+ /* Record that we no longer have output buffers
+ * available: */
+ chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
+ }
+ }
+ /* Move all IOR's to IOC queue: */
+ while (!LST_IS_EMPTY(pchnl->pio_requests)) {
+ chnl_packet_obj =
+ (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
+ if (chnl_packet_obj) {
+ chnl_packet_obj->byte_size = 0;
+ chnl_packet_obj->status |= CHNL_IOCSTATCANCEL;
+ lst_put_tail(pchnl->pio_completions,
+ (struct list_head *)chnl_packet_obj);
+ pchnl->cio_cs++;
+ pchnl->cio_reqs--;
+ DBC_ASSERT(pchnl->cio_reqs >= 0);
+ }
+ }
+func_cont:
+ spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_close ========
+ * Purpose:
+ * Ensures all pending I/O on this channel is cancelled, discards all
+ * queued I/O completion notifications, then frees the resources allocated
+ * for this channel, and makes the corresponding logical channel id
+ * available for subsequent use.
+ */
+int bridge_chnl_close(struct chnl_object *chnl_obj)
+{
+ int status;
+ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+
+ /* Check args: */
+ if (!pchnl) {
+ status = -EFAULT;
+ goto func_cont;
+ }
+ {
+ /* Cancel IO: this ensures no further IO requests or
+ * notifications. */
+ status = bridge_chnl_cancel_io(chnl_obj);
+ }
+func_cont:
+ if (!status) {
+ /* Assert I/O on this channel is now cancelled: Protects
+ * from io_dpc. */
+ DBC_ASSERT((pchnl->dw_state & CHNL_STATECANCEL));
+ /* Invalidate channel object: Protects from
+ * CHNL_GetIOCompletion(). */
+ /* Free the slot in the channel manager: */
+ pchnl->chnl_mgr_obj->ap_channel[pchnl->chnl_id] = NULL;
+ spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
+ pchnl->chnl_mgr_obj->open_channels -= 1;
+ spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
+ if (pchnl->ntfy_obj) {
+ ntfy_delete(pchnl->ntfy_obj);
+ kfree(pchnl->ntfy_obj);
+ pchnl->ntfy_obj = NULL;
+ }
+ /* Reset channel event: (NOTE: user_event freed in user
+ * context.). */
+ if (pchnl->sync_event) {
+ sync_reset_event(pchnl->sync_event);
+ kfree(pchnl->sync_event);
+ pchnl->sync_event = NULL;
+ }
+ /* Free I/O request and I/O completion queues: */
+ if (pchnl->pio_completions) {
+ free_chirp_list(pchnl->pio_completions);
+ pchnl->pio_completions = NULL;
+ pchnl->cio_cs = 0;
+ }
+ if (pchnl->pio_requests) {
+ free_chirp_list(pchnl->pio_requests);
+ pchnl->pio_requests = NULL;
+ pchnl->cio_reqs = 0;
+ }
+ if (pchnl->free_packets_list) {
+ free_chirp_list(pchnl->free_packets_list);
+ pchnl->free_packets_list = NULL;
+ }
+ /* Release channel object. */
+ kfree(pchnl);
+ pchnl = NULL;
+ }
+ DBC_ENSURE(status || !pchnl);
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_create ========
+ * Create a channel manager object, responsible for opening new channels
+ * and closing old ones for a given board.
+ */
+int bridge_chnl_create(struct chnl_mgr **channel_mgr,
+ struct dev_object *hdev_obj,
+ const struct chnl_mgrattrs *mgr_attrts)
+{
+ int status = 0;
+ struct chnl_mgr *chnl_mgr_obj = NULL;
+ u8 max_channels;
+
+ /* Check DBC requirements: */
+ DBC_REQUIRE(channel_mgr != NULL);
+ DBC_REQUIRE(mgr_attrts != NULL);
+ DBC_REQUIRE(mgr_attrts->max_channels > 0);
+ DBC_REQUIRE(mgr_attrts->max_channels <= CHNL_MAXCHANNELS);
+ DBC_REQUIRE(mgr_attrts->word_size != 0);
+
+ /* Allocate channel manager object */
+ chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
+ if (chnl_mgr_obj) {
+ /*
+ * The max_channels attr must equal the # of supported chnls for
+ * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
+ * mgr_attrts->max_channels = CHNL_MAXCHANNELS =
+ * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
+ */
+ DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS);
+ max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
+ /* Create array of channels */
+ chnl_mgr_obj->ap_channel = kzalloc(sizeof(struct chnl_object *)
+ * max_channels, GFP_KERNEL);
+ if (chnl_mgr_obj->ap_channel) {
+ /* Initialize chnl_mgr object */
+ chnl_mgr_obj->dw_type = CHNL_TYPESM;
+ chnl_mgr_obj->word_size = mgr_attrts->word_size;
+ /* Total # chnls supported */
+ chnl_mgr_obj->max_channels = max_channels;
+ chnl_mgr_obj->open_channels = 0;
+ chnl_mgr_obj->dw_output_mask = 0;
+ chnl_mgr_obj->dw_last_output = 0;
+ chnl_mgr_obj->hdev_obj = hdev_obj;
+ spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
+ } else {
+ status = -ENOMEM;
+ }
+ } else {
+ status = -ENOMEM;
+ }
+
+ if (status) {
+ bridge_chnl_destroy(chnl_mgr_obj);
+ *channel_mgr = NULL;
+ } else {
+ /* Return channel manager object to caller... */
+ *channel_mgr = chnl_mgr_obj;
+ }
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_destroy ========
+ * Purpose:
+ * Close all open channels, and destroy the channel manager.
+ */
+int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
+{
+ int status = 0;
+ struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
+ u32 chnl_id;
+
+ if (hchnl_mgr) {
+ /* Close all open channels: */
+ for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels;
+ chnl_id++) {
+ status =
+ bridge_chnl_close(chnl_mgr_obj->ap_channel
+ [chnl_id]);
+ if (status)
+ dev_dbg(bridge, "%s: Error status 0x%x\n",
+ __func__, status);
+ }
+
+ /* Free channel manager object: */
+ kfree(chnl_mgr_obj->ap_channel);
+
+ /* Set hchnl_mgr to NULL in device object. */
+ dev_set_chnl_mgr(chnl_mgr_obj->hdev_obj, NULL);
+ /* Free this Chnl Mgr object: */
+ kfree(hchnl_mgr);
+ } else {
+ status = -EFAULT;
+ }
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_flush_io ========
+ * purpose:
+ * Flushes all the outstanding data requests on a channel.
+ */
+int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
+{
+ int status = 0;
+ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+ s8 chnl_mode = -1;
+ struct chnl_mgr *chnl_mgr_obj;
+ struct chnl_ioc chnl_ioc_obj;
+ /* Check args: */
+ if (pchnl) {
+ if ((timeout == CHNL_IOCNOWAIT)
+ && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
+ status = -EINVAL;
+ } else {
+ chnl_mode = pchnl->chnl_mode;
+ chnl_mgr_obj = pchnl->chnl_mgr_obj;
+ }
+ } else {
+ status = -EFAULT;
+ }
+ if (!status) {
+ /* Note: Currently, if another thread continues to add IO
+ * requests to this channel, this function will continue to
+ * flush all such queued IO requests. */
+ if (CHNL_IS_OUTPUT(chnl_mode)
+ && (pchnl->chnl_type == CHNL_PCPY)) {
+ /* Wait for IO completions, up to the specified
+ * timeout: */
+ while (!LST_IS_EMPTY(pchnl->pio_requests) && !status) {
+ status = bridge_chnl_get_ioc(chnl_obj,
+ timeout, &chnl_ioc_obj);
+ if (status)
+ continue;
+
+ if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT)
+ status = -ETIMEDOUT;
+
+ }
+ } else {
+ status = bridge_chnl_cancel_io(chnl_obj);
+ /* Now, leave the channel in the ready state: */
+ pchnl->dw_state &= ~CHNL_STATECANCEL;
+ }
+ }
+ DBC_ENSURE(status || LST_IS_EMPTY(pchnl->pio_requests));
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_get_info ========
+ * Purpose:
+ * Retrieve information related to a channel.
+ */
+int bridge_chnl_get_info(struct chnl_object *chnl_obj,
+ struct chnl_info *channel_info)
+{
+ int status = 0;
+ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+ if (channel_info != NULL) {
+ if (pchnl) {
+ /* Return the requested information: */
+ channel_info->hchnl_mgr = pchnl->chnl_mgr_obj;
+ channel_info->event_obj = pchnl->user_event;
+ channel_info->cnhl_id = pchnl->chnl_id;
+ channel_info->dw_mode = pchnl->chnl_mode;
+ channel_info->bytes_tx = pchnl->bytes_moved;
+ channel_info->process = pchnl->process;
+ channel_info->sync_event = pchnl->sync_event;
+ channel_info->cio_cs = pchnl->cio_cs;
+ channel_info->cio_reqs = pchnl->cio_reqs;
+ channel_info->dw_state = pchnl->dw_state;
+ } else {
+ status = -EFAULT;
+ }
+ } else {
+ status = -EFAULT;
+ }
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_get_ioc ========
+ * Optionally wait for I/O completion on a channel. Dequeue an I/O
+ * completion record, which contains information about the completed
+ * I/O request.
+ * Note: Ensures Channel Invariant (see notes above).
+ */
+int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
+ struct chnl_ioc *chan_ioc)
+{
+ int status = 0;
+ struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+ struct chnl_irp *chnl_packet_obj;
+ int stat_sync;
+ bool dequeue_ioc = true;
+ struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
+ u8 *host_sys_buf = NULL;
+ struct bridge_dev_context *dev_ctxt;
+ struct dev_object *dev_obj;
+
+ /* Check args: */
+ if (!chan_ioc || !pchnl) {
+ status = -EFAULT;
+ } else if (timeout == CHNL_IOCNOWAIT) {
+ if (LST_IS_EMPTY(pchnl->pio_completions))
+ status = -EREMOTEIO;
+
+ }
+
+ dev_obj = dev_get_first();
+ dev_get_bridge_context(dev_obj, &dev_ctxt);
+ if (!dev_ctxt)
+ status = -EFAULT;
+
+ if (status)
+ goto func_end;
+
+ ioc.status = CHNL_IOCSTATCOMPLETE;
+ if (timeout !=
+ CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
+ if (timeout == CHNL_IOCINFINITE)
+ timeout = SYNC_INFINITE;
+
+ stat_sync = sync_wait_on_event(pchnl->sync_event, timeout);
+ if (stat_sync == -ETIME) {
+ /* No response from DSP */
+ ioc.status |= CHNL_IOCSTATTIMEOUT;
+ dequeue_ioc = false;
+ } else if (stat_sync == -EPERM) {
+ /* This can occur when the user mode thread is
+ * aborted (^C), or when _VWIN32_WaitSingleObject()
+ * fails due to unkown causes. */
+ /* Even though Wait failed, there may be something in
+ * the Q: */
+ if (LST_IS_EMPTY(pchnl->pio_completions)) {
+ ioc.status |= CHNL_IOCSTATCANCEL;
+ dequeue_ioc = false;
+ }
+ }
+ }
+ /* See comment in AddIOReq */
+ spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
+ omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
+ if (dequeue_ioc) {
+ /* Dequeue IOC and set chan_ioc; */
+ DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
+ chnl_packet_obj =
+ (struct chnl_irp *)lst_get_head(pchnl->pio_completions);
+ /* Update chan_ioc from channel state and chirp: */
+ if (chnl_packet_obj) {
+ pchnl->cio_cs--;
+ /* If this is a zero-copy channel, then set IOC's pbuf
+ * to the DSP's address. This DSP address will get
+ * translated to user's virtual addr later. */
+ {
+ host_sys_buf = chnl_packet_obj->host_sys_buf;
+ ioc.pbuf = chnl_packet_obj->host_user_buf;
+ }
+ ioc.byte_size = chnl_packet_obj->byte_size;
+ ioc.buf_size = chnl_packet_obj->buf_size;
+ ioc.dw_arg = chnl_packet_obj->dw_arg;
+ ioc.status |= chnl_packet_obj->status;
+ /* Place the used chirp on the free list: */
+ lst_put_tail(pchnl->free_packets_list,
+ (struct list_head *)chnl_packet_obj);
+ } else {
+ ioc.pbuf = NULL;
+ ioc.byte_size = 0;
+ }
+ } else {
+ ioc.pbuf = NULL;
+ ioc.byte_size = 0;
+ ioc.dw_arg = 0;
+ ioc.buf_size = 0;
+ }
+ /* Ensure invariant: If any IOC's are queued for this channel... */
+ if (!LST_IS_EMPTY(pchnl->pio_completions)) {
+ /* Since DSPStream_Reclaim() does not take a timeout
+ * parameter, we pass the stream's timeout value to
+ * bridge_chnl_get_ioc. We cannot determine whether or not
+ * we have waited in User mode. Since the stream's timeout
+ * value may be non-zero, we still have to set the event.
+ * Therefore, this optimization is taken out.
+ *
+ * if (timeout == CHNL_IOCNOWAIT) {
+ * ... ensure event is set..
+ * sync_set_event(pchnl->sync_event);
+ * } */
+ sync_set_event(pchnl->sync_event);
+ } else {
+ /* else, if list is empty, ensure event is reset. */
+ sync_reset_event(pchnl->sync_event);
+ }
+ omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
+ spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
+ if (dequeue_ioc
+ && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
+ if (!(ioc.pbuf < (void *)USERMODE_ADDR))
+ goto func_cont;
+
+ /* If the addr is in user mode, then copy it */
+ if (!host_sys_buf || !ioc.pbuf) {
+ status = -EFAULT;
+ goto func_cont;
+ }
+ if (!CHNL_IS_INPUT(pchnl->chnl_mode))
+ goto func_cont1;
+
+ /*host_user_buf */
+ status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size);
+ if (status) {
+ if (current->flags & PF_EXITING)
+ status = 0;
+ }
+ if (status)
+ status = -EFAULT;
+func_cont1:
+ kfree(host_sys_buf);
+ }
+func_cont:
+ /* Update User's IOC block: */
+ *chan_ioc = ioc;
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_get_mgr_info ========
+ * Retrieve information related to the channel manager.
+ */
+int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id,
+ struct chnl_mgrinfo *mgr_info)
+{
+ int status = 0;
+ struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr;
+
+ if (mgr_info != NULL) {
+ if (ch_id <= CHNL_MAXCHANNELS) {
+ if (hchnl_mgr) {
+ /* Return the requested information: */
+ mgr_info->chnl_obj =
+ chnl_mgr_obj->ap_channel[ch_id];
+ mgr_info->open_channels =
+ chnl_mgr_obj->open_channels;
+ mgr_info->dw_type = chnl_mgr_obj->dw_type;
+ /* total # of chnls */
+ mgr_info->max_channels =
+ chnl_mgr_obj->max_channels;
+ } else {
+ status = -EFAULT;
+ }
+ } else {
+ status = -ECHRNG;
+ }
+ } else {
+ status = -EFAULT;
+ }
+
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_idle ========
+ * Idles a particular channel.
+ */
+int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
+ bool flush_data)
+{
+ s8 chnl_mode;
+ struct chnl_mgr *chnl_mgr_obj;
+ int status = 0;
+
+ DBC_REQUIRE(chnl_obj);
+
+ chnl_mode = chnl_obj->chnl_mode;
+ chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
+
+ if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) {
+ /* Wait for IO completions, up to the specified timeout: */
+ status = bridge_chnl_flush_io(chnl_obj, timeout);
+ } else {
+ status = bridge_chnl_cancel_io(chnl_obj);
+
+ /* Reset the byte count and put channel back in ready state. */
+ chnl_obj->bytes_moved = 0;
+ chnl_obj->dw_state &= ~CHNL_STATECANCEL;
+ }
+
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_open ========
+ * Open a new half-duplex channel to the DSP board.
+ */
+int bridge_chnl_open(struct chnl_object **chnl,
+ struct chnl_mgr *hchnl_mgr, s8 chnl_mode,
+ u32 ch_id, const struct chnl_attr *pattrs)
+{
+ int status = 0;
+ struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
+ struct chnl_object *pchnl = NULL;
+ struct sync_object *sync_event = NULL;
+ /* Ensure DBC requirements: */
+ DBC_REQUIRE(chnl != NULL);
+ DBC_REQUIRE(pattrs != NULL);
+ DBC_REQUIRE(hchnl_mgr != NULL);
+ *chnl = NULL;
+ /* Validate Args: */
+ if (pattrs->uio_reqs == 0) {
+ status = -EINVAL;
+ } else {
+ if (!hchnl_mgr) {
+ status = -EFAULT;
+ } else {
+ if (ch_id != CHNL_PICKFREE) {
+ if (ch_id >= chnl_mgr_obj->max_channels)
+ status = -ECHRNG;
+ else if (chnl_mgr_obj->ap_channel[ch_id] !=
+ NULL)
+ status = -EALREADY;
+ } else {
+ /* Check for free channel */
+ status =
+ search_free_channel(chnl_mgr_obj, &ch_id);
+ }
+ }
+ }
+ if (status)
+ goto func_end;
+
+ DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels);
+ /* Create channel object: */
+ pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
+ if (!pchnl) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ /* Protect queues from io_dpc: */
+ pchnl->dw_state = CHNL_STATECANCEL;
+ /* Allocate initial IOR and IOC queues: */
+ pchnl->free_packets_list = create_chirp_list(pattrs->uio_reqs);
+ pchnl->pio_requests = create_chirp_list(0);
+ pchnl->pio_completions = create_chirp_list(0);
+ pchnl->chnl_packets = pattrs->uio_reqs;
+ pchnl->cio_cs = 0;
+ pchnl->cio_reqs = 0;
+ sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
+ if (sync_event)
+ sync_init_event(sync_event);
+ else
+ status = -ENOMEM;
+
+ if (!status) {
+ pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
+ GFP_KERNEL);
+ if (pchnl->ntfy_obj)
+ ntfy_init(pchnl->ntfy_obj);
+ else
+ status = -ENOMEM;
+ }
+
+ if (!status) {
+ if (pchnl->pio_completions && pchnl->pio_requests &&
+ pchnl->free_packets_list) {
+ /* Initialize CHNL object fields: */
+ pchnl->chnl_mgr_obj = chnl_mgr_obj;
+ pchnl->chnl_id = ch_id;
+ pchnl->chnl_mode = chnl_mode;
+ pchnl->user_event = sync_event;
+ pchnl->sync_event = sync_event;
+ /* Get the process handle */
+ pchnl->process = current->tgid;
+ pchnl->pcb_arg = 0;
+ pchnl->bytes_moved = 0;
+ /* Default to proc-copy */
+ pchnl->chnl_type = CHNL_PCPY;
+ } else {
+ status = -ENOMEM;
+ }
+ }
+
+ if (status) {
+ /* Free memory */
+ if (pchnl->pio_completions) {
+ free_chirp_list(pchnl->pio_completions);
+ pchnl->pio_completions = NULL;
+ pchnl->cio_cs = 0;
+ }
+ if (pchnl->pio_requests) {
+ free_chirp_list(pchnl->pio_requests);
+ pchnl->pio_requests = NULL;
+ }
+ if (pchnl->free_packets_list) {
+ free_chirp_list(pchnl->free_packets_list);
+ pchnl->free_packets_list = NULL;
+ }
+ kfree(sync_event);
+ sync_event = NULL;
+
+ if (pchnl->ntfy_obj) {
+ ntfy_delete(pchnl->ntfy_obj);
+ kfree(pchnl->ntfy_obj);
+ pchnl->ntfy_obj = NULL;
+ }
+ kfree(pchnl);
+ } else {
+ /* Insert channel object in channel manager: */
+ chnl_mgr_obj->ap_channel[pchnl->chnl_id] = pchnl;
+ spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+ chnl_mgr_obj->open_channels++;
+ spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+ /* Return result... */
+ pchnl->dw_state = CHNL_STATEREADY;
+ *chnl = pchnl;
+ }
+func_end:
+ DBC_ENSURE((!status && pchnl) || (*chnl == NULL));
+ return status;
+}
+
+/*
+ * ======== bridge_chnl_register_notify ========
+ * Registers for events on a particular channel.
+ */
+int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
+ u32 event_mask, u32 notify_type,
+ struct dsp_notification *hnotification)
+{
+ int status = 0;
+
+ DBC_ASSERT(!(event_mask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION)));
+
+ if (event_mask)
+ status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
+ event_mask, notify_type);
+ else
+ status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification);
+
+ return status;
+}
+
+/*
+ * ======== create_chirp_list ========
+ * Purpose:
+ * Initialize a queue of channel I/O Request/Completion packets.
+ * Parameters:
+ * chirps: Number of Chirps to allocate.
+ * Returns:
+ * Pointer to queue of IRPs, or NULL.
+ * Requires:
+ * Ensures:
+ */
+static struct lst_list *create_chirp_list(u32 chirps)
+{
+ struct lst_list *chirp_list;
+ struct chnl_irp *chnl_packet_obj;
+ u32 i;
+
+ chirp_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
+
+ if (chirp_list) {
+ INIT_LIST_HEAD(&chirp_list->head);
+ /* Make N chirps and place on queue. */
+ for (i = 0; (i < chirps)
+ && ((chnl_packet_obj = make_new_chirp()) != NULL); i++) {
+ lst_put_tail(chirp_list,
+ (struct list_head *)chnl_packet_obj);
+ }
+
+ /* If we couldn't allocate all chirps, free those allocated: */
+ if (i != chirps) {
+ free_chirp_list(chirp_list);
+ chirp_list = NULL;
+ }
+ }
+
+ return chirp_list;
+}
+
+/*
+ * ======== free_chirp_list ========
+ * Purpose:
+ * Free the queue of Chirps.
+ */
+static void free_chirp_list(struct lst_list *chirp_list)
+{
+ DBC_REQUIRE(chirp_list != NULL);
+
+ while (!LST_IS_EMPTY(chirp_list))
+ kfree(lst_get_head(chirp_list));
+
+ kfree(chirp_list);
+}
+
+/*
+ * ======== make_new_chirp ========
+ * Allocate the memory for a new channel IRP.
+ */
+static struct chnl_irp *make_new_chirp(void)
+{
+ struct chnl_irp *chnl_packet_obj;
+
+ chnl_packet_obj = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
+ if (chnl_packet_obj != NULL) {
+ /* lst_init_elem only resets the list's member values. */
+ lst_init_elem(&chnl_packet_obj->link);
+ }
+
+ return chnl_packet_obj;
+}
+
+/*
+ * ======== search_free_channel ========
+ * Search for a free channel slot in the array of channel pointers.
+ */
+static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
+ u32 *chnl)
+{
+ int status = -ENOSR;
+ u32 i;
+
+ DBC_REQUIRE(chnl_mgr_obj);
+
+ for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
+ if (chnl_mgr_obj->ap_channel[i] == NULL) {
+ status = 0;
+ *chnl = i;
+ break;
+ }
+ }
+
+ return status;
+}
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
new file mode 100644
index 00000000000..5b1a0c5bb14
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -0,0 +1,422 @@
+/*
+ * clk.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Clock and Timer services.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+#include <plat/dmtimer.h>
+#include <plat/mcbsp.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/cfg.h>
+#include <dspbridge/drv.h>
+#include <dspbridge/dev.h>
+#include "_tiomap.h"
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/clk.h>
+
+/* ----------------------------------- Defines, Data Structures, Typedefs */
+
+#define OMAP_SSI_OFFSET 0x58000
+#define OMAP_SSI_SIZE 0x1000
+#define OMAP_SSI_SYSCONFIG_OFFSET 0x10
+
+#define SSI_AUTOIDLE (1 << 0)
+#define SSI_SIDLE_SMARTIDLE (2 << 3)
+#define SSI_MIDLE_NOIDLE (1 << 12)
+
+/* Clk types requested by the dsp */
+#define IVA2_CLK 0
+#define GPT_CLK 1
+#define WDT_CLK 2
+#define MCBSP_CLK 3
+#define SSI_CLK 4
+
+/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
+#define DMT_ID(id) ((id) + 4)
+
+/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
+#define MCBSP_ID(id) ((id) - 6)
+
+static struct omap_dm_timer *timer[4];
+
+struct clk *iva2_clk;
+
+struct dsp_ssi {
+ struct clk *sst_fck;
+ struct clk *ssr_fck;
+ struct clk *ick;
+};
+
+static struct dsp_ssi ssi;
+
+static u32 dsp_clocks;
+
+static inline u32 is_dsp_clk_active(u32 clk, u8 id)
+{
+ return clk & (1 << id);
+}
+
+static inline void set_dsp_clk_active(u32 *clk, u8 id)
+{
+ *clk |= (1 << id);
+}
+
+static inline void set_dsp_clk_inactive(u32 *clk, u8 id)
+{
+ *clk &= ~(1 << id);
+}
+
+static s8 get_clk_type(u8 id)
+{
+ s8 type;
+
+ if (id == DSP_CLK_IVA2)
+ type = IVA2_CLK;
+ else if (id <= DSP_CLK_GPT8)
+ type = GPT_CLK;
+ else if (id == DSP_CLK_WDT3)
+ type = WDT_CLK;
+ else if (id <= DSP_CLK_MCBSP5)
+ type = MCBSP_CLK;
+ else if (id == DSP_CLK_SSI)
+ type = SSI_CLK;
+ else
+ type = -1;
+
+ return type;
+}
+
+/*
+ * ======== dsp_clk_exit ========
+ * Purpose:
+ * Cleanup CLK module.
+ */
+void dsp_clk_exit(void)
+{
+ dsp_clock_disable_all(dsp_clocks);
+
+ clk_put(iva2_clk);
+ clk_put(ssi.sst_fck);
+ clk_put(ssi.ssr_fck);
+ clk_put(ssi.ick);
+}
+
+/*
+ * ======== dsp_clk_init ========
+ * Purpose:
+ * Initialize CLK module.
+ */
+void dsp_clk_init(void)
+{
+ static struct platform_device dspbridge_device;
+
+ dspbridge_device.dev.bus = &platform_bus_type;
+
+ iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
+ if (IS_ERR(iva2_clk))
+ dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
+
+ ssi.sst_fck = clk_get(&dspbridge_device.dev, "ssi_sst_fck");
+ ssi.ssr_fck = clk_get(&dspbridge_device.dev, "ssi_ssr_fck");
+ ssi.ick = clk_get(&dspbridge_device.dev, "ssi_ick");
+
+ if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick))
+ dev_err(bridge, "failed to get ssi: sst %p, ssr %p, ick %p\n",
+ ssi.sst_fck, ssi.ssr_fck, ssi.ick);
+}
+
+#ifdef CONFIG_OMAP_MCBSP
+static void mcbsp_clk_prepare(bool flag, u8 id)
+{
+ struct cfg_hostres *resources;
+ struct dev_object *hdev_object = NULL;
+ struct bridge_dev_context *bridge_context = NULL;
+ u32 val;
+
+ hdev_object = (struct dev_object *)drv_get_first_dev_object();
+ if (!hdev_object)
+ return;
+
+ dev_get_bridge_context(hdev_object, &bridge_context);
+ if (!bridge_context)
+ return;
+
+ resources = bridge_context->resources;
+ if (!resources)
+ return;
+
+ if (flag) {
+ if (id == DSP_CLK_MCBSP1) {
+ /* set MCBSP1_CLKS, on McBSP1 ON */
+ val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
+ val |= 1 << 2;
+ __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
+ } else if (id == DSP_CLK_MCBSP2) {
+ /* set MCBSP2_CLKS, on McBSP2 ON */
+ val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
+ val |= 1 << 6;
+ __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
+ }
+ } else {
+ if (id == DSP_CLK_MCBSP1) {
+ /* clear MCBSP1_CLKS, on McBSP1 OFF */
+ val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
+ val &= ~(1 << 2);
+ __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
+ } else if (id == DSP_CLK_MCBSP2) {
+ /* clear MCBSP2_CLKS, on McBSP2 OFF */
+ val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
+ val &= ~(1 << 6);
+ __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
+ }
+ }
+}
+#endif
+
+/**
+ * dsp_gpt_wait_overflow - set gpt overflow and wait for fixed timeout
+ * @clk_id: GP Timer clock id.
+ * @load: Overflow value.
+ *
+ * Sets an overflow interrupt for the desired GPT waiting for a timeout
+ * of 5 msecs for the interrupt to occur.
+ */
+void dsp_gpt_wait_overflow(short int clk_id, unsigned int load)
+{
+ struct omap_dm_timer *gpt = timer[clk_id - 1];
+ unsigned long timeout;
+
+ if (!gpt)
+ return;
+
+ /* Enable overflow interrupt */
+ omap_dm_timer_set_int_enable(gpt, OMAP_TIMER_INT_OVERFLOW);
+
+ /*
+ * Set counter value to overflow counter after
+ * one tick and start timer.
+ */
+ omap_dm_timer_set_load_start(gpt, 0, load);
+
+ /* Wait 80us for timer to overflow */
+ udelay(80);
+
+ timeout = msecs_to_jiffies(5);
+ /* Check interrupt status and wait for interrupt */
+ while (!(omap_dm_timer_read_status(gpt) & OMAP_TIMER_INT_OVERFLOW)) {
+ if (time_is_after_jiffies(timeout)) {
+ pr_err("%s: GPTimer interrupt failed\n", __func__);
+ break;
+ }
+ }
+}
+
+/*
+ * ======== dsp_clk_enable ========
+ * Purpose:
+ * Enable Clock .
+ *
+ */
+int dsp_clk_enable(enum dsp_clk_id clk_id)
+{
+ int status = 0;
+
+ if (is_dsp_clk_active(dsp_clocks, clk_id)) {
+ dev_err(bridge, "WARN: clock id %d already enabled\n", clk_id);
+ goto out;
+ }
+
+ switch (get_clk_type(clk_id)) {
+ case IVA2_CLK:
+ clk_enable(iva2_clk);
+ break;
+ case GPT_CLK:
+ timer[clk_id - 1] =
+ omap_dm_timer_request_specific(DMT_ID(clk_id));
+ break;
+#ifdef CONFIG_OMAP_MCBSP
+ case MCBSP_CLK:
+ mcbsp_clk_prepare(true, clk_id);
+ omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO);
+ omap_mcbsp_request(MCBSP_ID(clk_id));
+ break;
+#endif
+ case WDT_CLK:
+ dev_err(bridge, "ERROR: DSP requested to enable WDT3 clk\n");
+ break;
+ case SSI_CLK:
+ clk_enable(ssi.sst_fck);
+ clk_enable(ssi.ssr_fck);
+ clk_enable(ssi.ick);
+
+ /*
+ * The SSI module need to configured not to have the Forced
+ * idle for master interface. If it is set to forced idle,
+ * the SSI module is transitioning to standby thereby causing
+ * the client in the DSP hang waiting for the SSI module to
+ * be active after enabling the clocks
+ */
+ ssi_clk_prepare(true);
+ break;
+ default:
+ dev_err(bridge, "Invalid clock id for enable\n");
+ status = -EPERM;
+ }
+
+ if (!status)
+ set_dsp_clk_active(&dsp_clocks, clk_id);
+
+out:
+ return status;
+}
+
+/**
+ * dsp_clock_enable_all - Enable clocks used by the DSP
+ * @dev_context Driver's device context strucure
+ *
+ * This function enables all the peripheral clocks that were requested by DSP.
+ */
+u32 dsp_clock_enable_all(u32 dsp_per_clocks)
+{
+ u32 clk_id;
+ u32 status = -EPERM;
+
+ for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) {
+ if (is_dsp_clk_active(dsp_per_clocks, clk_id))
+ status = dsp_clk_enable(clk_id);
+ }
+
+ return status;
+}
+
+/*
+ * ======== dsp_clk_disable ========
+ * Purpose:
+ * Disable the clock.
+ *
+ */
+int dsp_clk_disable(enum dsp_clk_id clk_id)
+{
+ int status = 0;
+
+ if (!is_dsp_clk_active(dsp_clocks, clk_id)) {
+ dev_err(bridge, "ERR: clock id %d already disabled\n", clk_id);
+ goto out;
+ }
+
+ switch (get_clk_type(clk_id)) {
+ case IVA2_CLK:
+ clk_disable(iva2_clk);
+ break;
+ case GPT_CLK:
+ omap_dm_timer_free(timer[clk_id - 1]);
+ break;
+#ifdef CONFIG_OMAP_MCBSP
+ case MCBSP_CLK:
+ mcbsp_clk_prepare(false, clk_id);
+ omap_mcbsp_free(MCBSP_ID(clk_id));
+ break;
+#endif
+ case WDT_CLK:
+ dev_err(bridge, "ERROR: DSP requested to disable WDT3 clk\n");
+ break;
+ case SSI_CLK:
+ ssi_clk_prepare(false);
+ ssi_clk_prepare(false);
+ clk_disable(ssi.sst_fck);
+ clk_disable(ssi.ssr_fck);
+ clk_disable(ssi.ick);
+ break;
+ default:
+ dev_err(bridge, "Invalid clock id for disable\n");
+ status = -EPERM;
+ }
+
+ if (!status)
+ set_dsp_clk_inactive(&dsp_clocks, clk_id);
+
+out:
+ return status;
+}
+
+/**
+ * dsp_clock_disable_all - Disable all active clocks
+ * @dev_context Driver's device context structure
+ *
+ * This function disables all the peripheral clocks that were enabled by DSP.
+ * It is meant to be called only when DSP is entering hibernation or when DSP
+ * is in error state.
+ */
+u32 dsp_clock_disable_all(u32 dsp_per_clocks)
+{
+ u32 clk_id;
+ u32 status = -EPERM;
+
+ for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) {
+ if (is_dsp_clk_active(dsp_per_clocks, clk_id))
+ status = dsp_clk_disable(clk_id);
+ }
+
+ return status;
+}
+
+u32 dsp_clk_get_iva2_rate(void)
+{
+ u32 clk_speed_khz;
+
+ clk_speed_khz = clk_get_rate(iva2_clk);
+ clk_speed_khz /= 1000;
+ dev_dbg(bridge, "%s: clk speed Khz = %d\n", __func__, clk_speed_khz);
+
+ return clk_speed_khz;
+}
+
+void ssi_clk_prepare(bool FLAG)
+{
+ void __iomem *ssi_base;
+ unsigned int value;
+
+ ssi_base = ioremap(L4_34XX_BASE + OMAP_SSI_OFFSET, OMAP_SSI_SIZE);
+ if (!ssi_base) {
+ pr_err("%s: error, SSI not configured\n", __func__);
+ return;
+ }
+
+ if (FLAG) {
+ /* Set Autoidle, SIDLEMode to smart idle, and MIDLEmode to
+ * no idle
+ */
+ value = SSI_AUTOIDLE | SSI_SIDLE_SMARTIDLE | SSI_MIDLE_NOIDLE;
+ } else {
+ /* Set Autoidle, SIDLEMode to forced idle, and MIDLEmode to
+ * forced idle
+ */
+ value = SSI_AUTOIDLE;
+ }
+
+ __raw_writel(value, ssi_base + OMAP_SSI_SYSCONFIG_OFFSET);
+ iounmap(ssi_base);
+}
+
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
new file mode 100644
index 00000000000..02c660dbcf6
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -0,0 +1,2333 @@
+/*
+ * io_sm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * IO dispatcher for a shared memory channel driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * Channel Invariant:
+ * There is an important invariant condition which must be maintained per
+ * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
+ * which may cause timeouts and/or failure of the sync_wait_on_event
+ * function.
+ */
+#include <linux/types.h>
+
+/* Host OS */
+#include <dspbridge/host_os.h>
+#include <linux/workqueue.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* Services Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/ntfy.h>
+#include <dspbridge/sync.h>
+
+/* Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
+/* Bridge Driver */
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/dspio.h>
+#include <dspbridge/dspioctl.h>
+#include <dspbridge/wdt.h>
+#include <_tiomap.h>
+#include <tiomap_io.h>
+#include <_tiomap_pwr.h>
+
+/* Platform Manager */
+#include <dspbridge/cod.h>
+#include <dspbridge/node.h>
+#include <dspbridge/dev.h>
+
+/* Others */
+#include <dspbridge/rms_sh.h>
+#include <dspbridge/mgr.h>
+#include <dspbridge/drv.h>
+#include "_cmm.h"
+#include "module_list.h"
+
+/* This */
+#include <dspbridge/io_sm.h>
+#include "_msg_sm.h"
+
+/* Defines, Data Structures, Typedefs */
+#define OUTPUTNOTREADY 0xffff
+#define NOTENABLED 0xffff /* Channel(s) not enabled */
+
+#define EXTEND "_EXT_END"
+
+#define SWAP_WORD(x) (x)
+#define UL_PAGE_ALIGN_SIZE 0x10000 /* Page Align Size */
+
+#define MAX_PM_REQS 32
+
+#define MMU_FAULT_HEAD1 0xa5a5a5a5
+#define MMU_FAULT_HEAD2 0x96969696
+#define POLL_MAX 1000
+#define MAX_MMU_DBGBUFF 10240
+
+/* IO Manager: only one created per board */
+struct io_mgr {
+ /* These four fields must be the first fields in a io_mgr_ struct */
+ /* Bridge device context */
+ struct bridge_dev_context *hbridge_context;
+ /* Function interface to Bridge driver */
+ struct bridge_drv_interface *intf_fxns;
+ struct dev_object *hdev_obj; /* Device this board represents */
+
+ /* These fields initialized in bridge_io_create() */
+ struct chnl_mgr *hchnl_mgr;
+ struct shm *shared_mem; /* Shared Memory control */
+ u8 *input; /* Address of input channel */
+ u8 *output; /* Address of output channel */
+ struct msg_mgr *hmsg_mgr; /* Message manager */
+ /* Msg control for from DSP messages */
+ struct msg_ctrl *msg_input_ctrl;
+ /* Msg control for to DSP messages */
+ struct msg_ctrl *msg_output_ctrl;
+ u8 *msg_input; /* Address of input messages */
+ u8 *msg_output; /* Address of output messages */
+ u32 usm_buf_size; /* Size of a shared memory I/O channel */
+ bool shared_irq; /* Is this IRQ shared? */
+ u32 word_size; /* Size in bytes of DSP word */
+ u16 intr_val; /* Interrupt value */
+ /* Private extnd proc info; mmu setup */
+ struct mgr_processorextinfo ext_proc_info;
+ struct cmm_object *hcmm_mgr; /* Shared Mem Mngr */
+ struct work_struct io_workq; /* workqueue */
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+ u32 ul_trace_buffer_begin; /* Trace message start address */
+ u32 ul_trace_buffer_end; /* Trace message end address */
+ u32 ul_trace_buffer_current; /* Trace message current address */
+ u32 ul_gpp_read_pointer; /* GPP Read pointer to Trace buffer */
+ u8 *pmsg;
+ u32 ul_gpp_va;
+ u32 ul_dsp_va;
+#endif
+ /* IO Dpc */
+ u32 dpc_req; /* Number of requested DPC's. */
+ u32 dpc_sched; /* Number of executed DPC's. */
+ struct tasklet_struct dpc_tasklet;
+ spinlock_t dpc_lock;
+
+};
+
+/* Function Prototypes */
+static void io_dispatch_pm(struct io_mgr *pio_mgr);
+static void notify_chnl_complete(struct chnl_object *pchnl,
+ struct chnl_irp *chnl_packet_obj);
+static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
+ u8 io_mode);
+static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
+ u8 io_mode);
+static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
+static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
+static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
+ struct chnl_object *pchnl, u32 mask);
+
+/* Bus Addr (cached kernel) */
+static int register_shm_segs(struct io_mgr *hio_mgr,
+ struct cod_manager *cod_man,
+ u32 dw_gpp_base_pa);
+
+static inline void set_chnl_free(struct shm *sm, u32 chnl)
+{
+ sm->host_free_mask &= ~(1 << chnl);
+}
+
+static inline void set_chnl_busy(struct shm *sm, u32 chnl)
+{
+ sm->host_free_mask |= 1 << chnl;
+}
+
+
+/*
+ * ======== bridge_io_create ========
+ * Create an IO manager object.
+ */
+int bridge_io_create(struct io_mgr **io_man,
+ struct dev_object *hdev_obj,
+ const struct io_attrs *mgr_attrts)
+{
+ int status = 0;
+ struct io_mgr *pio_mgr = NULL;
+ struct shm *shared_mem = NULL;
+ struct bridge_dev_context *hbridge_context = NULL;
+ struct cfg_devnode *dev_node_obj;
+ struct chnl_mgr *hchnl_mgr;
+ u8 dev_type;
+
+ /* Check requirements */
+ if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
+ if (!hchnl_mgr || hchnl_mgr->hio_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ /*
+ * Message manager will be created when a file is loaded, since
+ * size of message buffer in shared memory is configurable in
+ * the base image.
+ */
+ dev_get_bridge_context(hdev_obj, &hbridge_context);
+ if (!hbridge_context) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ dev_get_dev_type(hdev_obj, &dev_type);
+ /*
+ * DSP shared memory area will get set properly when
+ * a program is loaded. They are unknown until a COFF file is
+ * loaded. I chose the value -1 because it was less likely to be
+ * a valid address than 0.
+ */
+ shared_mem = (struct shm *)-1;
+
+ /* Allocate IO manager object */
+ pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
+ if (pio_mgr == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ /* Initialize chnl_mgr object */
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+ pio_mgr->pmsg = NULL;
+#endif
+ pio_mgr->hchnl_mgr = hchnl_mgr;
+ pio_mgr->word_size = mgr_attrts->word_size;
+ pio_mgr->shared_mem = shared_mem;
+
+ if (dev_type == DSP_UNIT) {
+ /* Create an IO DPC */
+ tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr);
+
+ /* Initialize DPC counters */
+ pio_mgr->dpc_req = 0;
+ pio_mgr->dpc_sched = 0;
+
+ spin_lock_init(&pio_mgr->dpc_lock);
+
+ status = dev_get_dev_node(hdev_obj, &dev_node_obj);
+ }
+
+ if (!status) {
+ pio_mgr->hbridge_context = hbridge_context;
+ pio_mgr->shared_irq = mgr_attrts->irq_shared;
+ if (dsp_wdt_init())
+ status = -EPERM;
+ } else {
+ status = -EIO;
+ }
+func_end:
+ if (status) {
+ /* Cleanup */
+ bridge_io_destroy(pio_mgr);
+ if (io_man)
+ *io_man = NULL;
+ } else {
+ /* Return IO manager object to caller... */
+ hchnl_mgr->hio_mgr = pio_mgr;
+ *io_man = pio_mgr;
+ }
+ return status;
+}
+
+/*
+ * ======== bridge_io_destroy ========
+ * Purpose:
+ * Disable interrupts, destroy the IO manager.
+ */
+int bridge_io_destroy(struct io_mgr *hio_mgr)
+{
+ int status = 0;
+ if (hio_mgr) {
+ /* Free IO DPC object */
+ tasklet_kill(&hio_mgr->dpc_tasklet);
+
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+ kfree(hio_mgr->pmsg);
+#endif
+ dsp_wdt_exit();
+ /* Free this IO manager object */
+ kfree(hio_mgr);
+ } else {
+ status = -EFAULT;
+ }
+
+ return status;
+}
+
+/*
+ * ======== bridge_io_on_loaded ========
+ * Purpose:
+ * Called when a new program is loaded to get shared memory buffer
+ * parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit
+ * are in DSP address units.
+ */
+int bridge_io_on_loaded(struct io_mgr *hio_mgr)
+{
+ struct cod_manager *cod_man;
+ struct chnl_mgr *hchnl_mgr;
+ struct msg_mgr *hmsg_mgr;
+ u32 ul_shm_base;
+ u32 ul_shm_base_offset;
+ u32 ul_shm_limit;
+ u32 ul_shm_length = -1;
+ u32 ul_mem_length = -1;
+ u32 ul_msg_base;
+ u32 ul_msg_limit;
+ u32 ul_msg_length = -1;
+ u32 ul_ext_end;
+ u32 ul_gpp_pa = 0;
+ u32 ul_gpp_va = 0;
+ u32 ul_dsp_va = 0;
+ u32 ul_seg_size = 0;
+ u32 ul_pad_size = 0;
+ u32 i;
+ int status = 0;
+ u8 num_procs = 0;
+ s32 ndx = 0;
+ /* DSP MMU setup table */
+ struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
+ struct cfg_hostres *host_res;
+ struct bridge_dev_context *pbridge_context;
+ u32 map_attrs;
+ u32 shm0_end;
+ u32 ul_dyn_ext_base;
+ u32 ul_seg1_size = 0;
+ u32 pa_curr = 0;
+ u32 va_curr = 0;
+ u32 gpp_va_curr = 0;
+ u32 num_bytes = 0;
+ u32 all_bits = 0;
+ u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
+ HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
+ };
+
+ status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
+ if (!pbridge_context) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ host_res = pbridge_context->resources;
+ if (!host_res) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
+ if (!cod_man) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ hchnl_mgr = hio_mgr->hchnl_mgr;
+ /* The message manager is destroyed when the board is stopped. */
+ dev_get_msg_mgr(hio_mgr->hdev_obj, &hio_mgr->hmsg_mgr);
+ hmsg_mgr = hio_mgr->hmsg_mgr;
+ if (!hchnl_mgr || !hmsg_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ if (hio_mgr->shared_mem)
+ hio_mgr->shared_mem = NULL;
+
+ /* Get start and length of channel part of shared memory */
+ status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
+ &ul_shm_base);
+ if (status) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
+ &ul_shm_limit);
+ if (status) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ if (ul_shm_limit <= ul_shm_base) {
+ status = -EINVAL;
+ goto func_end;
+ }
+ /* Get total length in bytes */
+ ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size;
+ /* Calculate size of a PROCCOPY shared memory region */
+ dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
+ __func__, (ul_shm_length - sizeof(struct shm)));
+
+ /* Get start and length of message part of shared memory */
+ status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
+ &ul_msg_base);
+ if (!status) {
+ status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
+ &ul_msg_limit);
+ if (!status) {
+ if (ul_msg_limit <= ul_msg_base) {
+ status = -EINVAL;
+ } else {
+ /*
+ * Length (bytes) of messaging part of shared
+ * memory.
+ */
+ ul_msg_length =
+ (ul_msg_limit - ul_msg_base +
+ 1) * hio_mgr->word_size;
+ /*
+ * Total length (bytes) of shared memory:
+ * chnl + msg.
+ */
+ ul_mem_length = ul_shm_length + ul_msg_length;
+ }
+ } else {
+ status = -EFAULT;
+ }
+ } else {
+ status = -EFAULT;
+ }
+ if (!status) {
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+ status =
+ cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
+#else
+ status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
+ &shm0_end);
+#endif
+ if (status)
+ status = -EFAULT;
+ }
+ if (!status) {
+ status =
+ cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
+ if (status)
+ status = -EFAULT;
+ }
+ if (!status) {
+ status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
+ if (status)
+ status = -EFAULT;
+ }
+ if (!status) {
+ /* Get memory reserved in host resources */
+ (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
+ &hio_mgr->ext_proc_info,
+ sizeof(struct
+ mgr_processorextinfo),
+ &num_procs);
+
+ /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
+ ndx = 0;
+ ul_gpp_pa = host_res->dw_mem_phys[1];
+ ul_gpp_va = host_res->dw_mem_base[1];
+ /* This is the virtual uncached ioremapped address!!! */
+ /* Why can't we directly take the DSPVA from the symbols? */
+ ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt;
+ ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
+ ul_seg1_size =
+ (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
+ /* 4K align */
+ ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL);
+ /* 64K align */
+ ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL);
+ ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) %
+ UL_PAGE_ALIGN_SIZE);
+ if (ul_pad_size == UL_PAGE_ALIGN_SIZE)
+ ul_pad_size = 0x0;
+
+ dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
+ "shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
+ "ul_seg_size %x ul_seg1_size %x \n", __func__,
+ ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end,
+ ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
+
+ if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
+ host_res->dw_mem_length[1]) {
+ pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
+ __func__, host_res->dw_mem_length[1],
+ ul_seg_size + ul_seg1_size + ul_pad_size);
+ status = -ENOMEM;
+ }
+ }
+ if (status)
+ goto func_end;
+
+ pa_curr = ul_gpp_pa;
+ va_curr = ul_dyn_ext_base * hio_mgr->word_size;
+ gpp_va_curr = ul_gpp_va;
+ num_bytes = ul_seg1_size;
+
+ /*
+ * Try to fit into TLB entries. If not possible, push them to page
+ * tables. It is quite possible that if sections are not on
+ * bigger page boundary, we may end up making several small pages.
+ * So, push them onto page tables, if that is the case.
+ */
+ map_attrs = 0x00000000;
+ map_attrs = DSP_MAPLITTLEENDIAN;
+ map_attrs |= DSP_MAPPHYSICALADDR;
+ map_attrs |= DSP_MAPELEMSIZE32;
+ map_attrs |= DSP_MAPDONOTLOCK;
+
+ while (num_bytes) {
+ /*
+ * To find the max. page size with which both PA & VA are
+ * aligned.
+ */
+ all_bits = pa_curr | va_curr;
+ dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
+ "num_bytes %x\n", all_bits, pa_curr, va_curr,
+ num_bytes);
+ for (i = 0; i < 4; i++) {
+ if ((num_bytes >= page_size[i]) && ((all_bits &
+ (page_size[i] -
+ 1)) == 0)) {
+ status =
+ hio_mgr->intf_fxns->
+ pfn_brd_mem_map(hio_mgr->hbridge_context,
+ pa_curr, va_curr,
+ page_size[i], map_attrs,
+ NULL);
+ if (status)
+ goto func_end;
+ pa_curr += page_size[i];
+ va_curr += page_size[i];
+ gpp_va_curr += page_size[i];
+ num_bytes -= page_size[i];
+ /*
+ * Don't try smaller sizes. Hopefully we have
+ * reached an address aligned to a bigger page
+ * size.
+ */
+ break;
+ }
+ }
+ }
+ pa_curr += ul_pad_size;
+ va_curr += ul_pad_size;
+ gpp_va_curr += ul_pad_size;
+
+ /* Configure the TLB entries for the next cacheable segment */
+ num_bytes = ul_seg_size;
+ va_curr = ul_dsp_va * hio_mgr->word_size;
+ while (num_bytes) {
+ /*
+ * To find the max. page size with which both PA & VA are
+ * aligned.
+ */
+ all_bits = pa_curr | va_curr;
+ dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
+ "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
+ va_curr, num_bytes);
+ for (i = 0; i < 4; i++) {
+ if (!(num_bytes >= page_size[i]) ||
+ !((all_bits & (page_size[i] - 1)) == 0))
+ continue;
+ if (ndx < MAX_LOCK_TLB_ENTRIES) {
+ /*
+ * This is the physical address written to
+ * DSP MMU.
+ */
+ ae_proc[ndx].ul_gpp_pa = pa_curr;
+ /*
+ * This is the virtual uncached ioremapped
+ * address!!!
+ */
+ ae_proc[ndx].ul_gpp_va = gpp_va_curr;
+ ae_proc[ndx].ul_dsp_va =
+ va_curr / hio_mgr->word_size;
+ ae_proc[ndx].ul_size = page_size[i];
+ ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
+ ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
+ ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
+ dev_dbg(bridge, "shm MMU TLB entry PA %x"
+ " VA %x DSP_VA %x Size %x\n",
+ ae_proc[ndx].ul_gpp_pa,
+ ae_proc[ndx].ul_gpp_va,
+ ae_proc[ndx].ul_dsp_va *
+ hio_mgr->word_size, page_size[i]);
+ ndx++;
+ } else {
+ status =
+ hio_mgr->intf_fxns->
+ pfn_brd_mem_map(hio_mgr->hbridge_context,
+ pa_curr, va_curr,
+ page_size[i], map_attrs,
+ NULL);
+ dev_dbg(bridge,
+ "shm MMU PTE entry PA %x"
+ " VA %x DSP_VA %x Size %x\n",
+ ae_proc[ndx].ul_gpp_pa,
+ ae_proc[ndx].ul_gpp_va,
+ ae_proc[ndx].ul_dsp_va *
+ hio_mgr->word_size, page_size[i]);
+ if (status)
+ goto func_end;
+ }
+ pa_curr += page_size[i];
+ va_curr += page_size[i];
+ gpp_va_curr += page_size[i];
+ num_bytes -= page_size[i];
+ /*
+ * Don't try smaller sizes. Hopefully we have reached
+ * an address aligned to a bigger page size.
+ */
+ break;
+ }
+ }
+
+ /*
+ * Copy remaining entries from CDB. All entries are 1 MB and
+ * should not conflict with shm entries on MPU or DSP side.
+ */
+ for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
+ if (hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys == 0)
+ continue;
+
+ if ((hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys >
+ ul_gpp_pa - 0x100000
+ && hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys <=
+ ul_gpp_pa + ul_seg_size)
+ || (hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt >
+ ul_dsp_va - 0x100000 / hio_mgr->word_size
+ && hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt <=
+ ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
+ dev_dbg(bridge,
+ "CDB MMU entry %d conflicts with "
+ "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
+ "GppPa %x, DspVa %x, Bytes %x.\n", i,
+ hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys,
+ hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt,
+ ul_gpp_pa, ul_dsp_va, ul_seg_size);
+ status = -EPERM;
+ } else {
+ if (ndx < MAX_LOCK_TLB_ENTRIES) {
+ ae_proc[ndx].ul_dsp_va =
+ hio_mgr->ext_proc_info.ty_tlb[i].
+ ul_dsp_virt;
+ ae_proc[ndx].ul_gpp_pa =
+ hio_mgr->ext_proc_info.ty_tlb[i].
+ ul_gpp_phys;
+ ae_proc[ndx].ul_gpp_va = 0;
+ /* 1 MB */
+ ae_proc[ndx].ul_size = 0x100000;
+ dev_dbg(bridge, "shm MMU entry PA %x "
+ "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
+ ae_proc[ndx].ul_dsp_va);
+ ndx++;
+ } else {
+ status = hio_mgr->intf_fxns->pfn_brd_mem_map
+ (hio_mgr->hbridge_context,
+ hio_mgr->ext_proc_info.ty_tlb[i].
+ ul_gpp_phys,
+ hio_mgr->ext_proc_info.ty_tlb[i].
+ ul_dsp_virt, 0x100000, map_attrs,
+ NULL);
+ }
+ }
+ if (status)
+ goto func_end;
+ }
+
+ map_attrs = 0x00000000;
+ map_attrs = DSP_MAPLITTLEENDIAN;
+ map_attrs |= DSP_MAPPHYSICALADDR;
+ map_attrs |= DSP_MAPELEMSIZE32;
+ map_attrs |= DSP_MAPDONOTLOCK;
+
+ /* Map the L4 peripherals */
+ i = 0;
+ while (l4_peripheral_table[i].phys_addr) {
+ status = hio_mgr->intf_fxns->pfn_brd_mem_map
+ (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
+ l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
+ map_attrs, NULL);
+ if (status)
+ goto func_end;
+ i++;
+ }
+
+ for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
+ ae_proc[i].ul_dsp_va = 0;
+ ae_proc[i].ul_gpp_pa = 0;
+ ae_proc[i].ul_gpp_va = 0;
+ ae_proc[i].ul_size = 0;
+ }
+ /*
+ * Set the shm physical address entry (grayed out in CDB file)
+ * to the virtual uncached ioremapped address of shm reserved
+ * on MPU.
+ */
+ hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys =
+ (ul_gpp_va + ul_seg1_size + ul_pad_size);
+
+ /*
+ * Need shm Phys addr. IO supports only one DSP for now:
+ * num_procs = 1.
+ */
+ if (!hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys || num_procs != 1) {
+ status = -EFAULT;
+ goto func_end;
+ } else {
+ if (ae_proc[0].ul_dsp_va > ul_shm_base) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /* ul_shm_base may not be at ul_dsp_va address */
+ ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
+ hio_mgr->word_size;
+ /*
+ * bridge_dev_ctrl() will set dev context dsp-mmu info. In
+ * bridge_brd_start() the MMU will be re-programed with MMU
+ * DSPVa-GPPPa pair info while DSP is in a known
+ * (reset) state.
+ */
+
+ status =
+ hio_mgr->intf_fxns->pfn_dev_cntrl(hio_mgr->hbridge_context,
+ BRDIOCTL_SETMMUCONFIG,
+ ae_proc);
+ if (status)
+ goto func_end;
+ ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
+ ul_shm_base += ul_shm_base_offset;
+ ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
+ ul_mem_length);
+ if (ul_shm_base == 0) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ /* Register SM */
+ status =
+ register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
+ }
+
+ hio_mgr->shared_mem = (struct shm *)ul_shm_base;
+ hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
+ hio_mgr->output = hio_mgr->input + (ul_shm_length -
+ sizeof(struct shm)) / 2;
+ hio_mgr->usm_buf_size = hio_mgr->output - hio_mgr->input;
+
+ /* Set up Shared memory addresses for messaging. */
+ hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
+ + ul_shm_length);
+ hio_mgr->msg_input =
+ (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
+ hio_mgr->msg_output_ctrl =
+ (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
+ ul_msg_length / 2);
+ hio_mgr->msg_output =
+ (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
+ hmsg_mgr->max_msgs =
+ ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input)
+ / sizeof(struct msg_dspmsg);
+ dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
+ "output %p, msg_input_ctrl %p, msg_input %p, "
+ "msg_output_ctrl %p, msg_output %p\n",
+ (u8 *) hio_mgr->shared_mem, hio_mgr->input,
+ hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl,
+ hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl,
+ hio_mgr->msg_output);
+ dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n",
+ hmsg_mgr->max_msgs);
+ memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
+
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+ /* Get the start address of trace buffer */
+ status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
+ &hio_mgr->ul_trace_buffer_begin);
+ if (status) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ hio_mgr->ul_gpp_read_pointer = hio_mgr->ul_trace_buffer_begin =
+ (ul_gpp_va + ul_seg1_size + ul_pad_size) +
+ (hio_mgr->ul_trace_buffer_begin - ul_dsp_va);
+ /* Get the end address of trace buffer */
+ status = cod_get_sym_value(cod_man, SYS_PUTCEND,
+ &hio_mgr->ul_trace_buffer_end);
+ if (status) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ hio_mgr->ul_trace_buffer_end =
+ (ul_gpp_va + ul_seg1_size + ul_pad_size) +
+ (hio_mgr->ul_trace_buffer_end - ul_dsp_va);
+ /* Get the current address of DSP write pointer */
+ status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
+ &hio_mgr->ul_trace_buffer_current);
+ if (status) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ hio_mgr->ul_trace_buffer_current =
+ (ul_gpp_va + ul_seg1_size + ul_pad_size) +
+ (hio_mgr->ul_trace_buffer_current - ul_dsp_va);
+ /* Calculate the size of trace buffer */
+ kfree(hio_mgr->pmsg);
+ hio_mgr->pmsg = kmalloc(((hio_mgr->ul_trace_buffer_end -
+ hio_mgr->ul_trace_buffer_begin) *
+ hio_mgr->word_size) + 2, GFP_KERNEL);
+ if (!hio_mgr->pmsg)
+ status = -ENOMEM;
+
+ hio_mgr->ul_dsp_va = ul_dsp_va;
+ hio_mgr->ul_gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
+
+#endif
+func_end:
+ return status;
+}
+
+/*
+ * ======== io_buf_size ========
+ * Size of shared memory I/O channel.
+ */
+u32 io_buf_size(struct io_mgr *hio_mgr)
+{
+ if (hio_mgr)
+ return hio_mgr->usm_buf_size;
+ else
+ return 0;
+}
+
+/*
+ * ======== io_cancel_chnl ========
+ * Cancel IO on a given PCPY channel.
+ */
+void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl)
+{
+ struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr;
+ struct shm *sm;
+
+ if (!hio_mgr)
+ goto func_end;
+ sm = hio_mgr->shared_mem;
+
+ /* Inform DSP that we have no more buffers on this channel */
+ set_chnl_free(sm, chnl);
+
+ sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+func_end:
+ return;
+}
+
+
+/*
+ * ======== io_dispatch_pm ========
+ * Performs I/O dispatch on PM related messages from DSP
+ */
+static void io_dispatch_pm(struct io_mgr *pio_mgr)
+{
+ int status;
+ u32 parg[2];
+
+ /* Perform Power message processing here */
+ parg[0] = pio_mgr->intr_val;
+
+ /* Send the command to the Bridge clk/pwr manager to handle */
+ if (parg[0] == MBX_PM_HIBERNATE_EN) {
+ dev_dbg(bridge, "PM: Hibernate command\n");
+ status = pio_mgr->intf_fxns->
+ pfn_dev_cntrl(pio_mgr->hbridge_context,
+ BRDIOCTL_PWR_HIBERNATE, parg);
+ if (status)
+ pr_err("%s: hibernate cmd failed 0x%x\n",
+ __func__, status);
+ } else if (parg[0] == MBX_PM_OPP_REQ) {
+ parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
+ dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
+ status = pio_mgr->intf_fxns->
+ pfn_dev_cntrl(pio_mgr->hbridge_context,
+ BRDIOCTL_CONSTRAINT_REQUEST, parg);
+ if (status)
+ dev_dbg(bridge, "PM: Failed to set constraint "
+ "= 0x%x\n", parg[1]);
+ } else {
+ dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
+ parg[0]);
+ status = pio_mgr->intf_fxns->
+ pfn_dev_cntrl(pio_mgr->hbridge_context,
+ BRDIOCTL_CLK_CTRL, parg);
+ if (status)
+ dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
+ "= 0x%x\n", *parg);
+ }
+}
+
+/*
+ * ======== io_dpc ========
+ * Deferred procedure call for shared memory channel driver ISR. Carries
+ * out the dispatch of I/O as a non-preemptible event.It can only be
+ * pre-empted by an ISR.
+ */
+void io_dpc(unsigned long ref_data)
+{
+ struct io_mgr *pio_mgr = (struct io_mgr *)ref_data;
+ struct chnl_mgr *chnl_mgr_obj;
+ struct msg_mgr *msg_mgr_obj;
+ struct deh_mgr *hdeh_mgr;
+ u32 requested;
+ u32 serviced;
+
+ if (!pio_mgr)
+ goto func_end;
+ chnl_mgr_obj = pio_mgr->hchnl_mgr;
+ dev_get_msg_mgr(pio_mgr->hdev_obj, &msg_mgr_obj);
+ dev_get_deh_mgr(pio_mgr->hdev_obj, &hdeh_mgr);
+ if (!chnl_mgr_obj)
+ goto func_end;
+
+ requested = pio_mgr->dpc_req;
+ serviced = pio_mgr->dpc_sched;
+
+ if (serviced == requested)
+ goto func_end;
+
+ /* Process pending DPC's */
+ do {
+ /* Check value of interrupt reg to ensure it's a valid error */
+ if ((pio_mgr->intr_val > DEH_BASE) &&
+ (pio_mgr->intr_val < DEH_LIMIT)) {
+ /* Notify DSP/BIOS exception */
+ if (hdeh_mgr) {
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+ print_dsp_debug_trace(pio_mgr);
+#endif
+ bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
+ pio_mgr->intr_val);
+ }
+ }
+ /* Proc-copy chanel dispatch */
+ input_chnl(pio_mgr, NULL, IO_SERVICE);
+ output_chnl(pio_mgr, NULL, IO_SERVICE);
+
+#ifdef CHNL_MESSAGES
+ if (msg_mgr_obj) {
+ /* Perform I/O dispatch on message queues */
+ input_msg(pio_mgr, msg_mgr_obj);
+ output_msg(pio_mgr, msg_mgr_obj);
+ }
+
+#endif
+#ifdef CONFIG_TIDSPBRIDGE_DEBUG
+ if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
+ /* Notify DSP Trace message */
+ print_dsp_debug_trace(pio_mgr);
+ }
+#endif
+ serviced++;
+ } while (serviced != requested);
+ pio_mgr->dpc_sched = requested;
+func_end:
+ return;
+}
+
+/*
+ * ======== io_mbox_msg ========
+ * Main interrupt handler for the shared memory IO manager.
+ * Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
+ * schedules a DPC to dispatch I/O.
+ */
+void io_mbox_msg(u32 msg)
+{
+ struct io_mgr *pio_mgr;
+ struct dev_object *dev_obj;
+ unsigned long flags;
+
+ dev_obj = dev_get_first();
+ dev_get_io_mgr(dev_obj, &pio_mgr);
+
+ if (!pio_mgr)
+ return;
+
+ pio_mgr->intr_val = (u16)msg;
+ if (pio_mgr->intr_val & MBX_PM_CLASS)
+ io_dispatch_pm(pio_mgr);
+
+ if (pio_mgr->intr_val == MBX_DEH_RESET) {
+ pio_mgr->intr_val = 0;
+ } else {
+ spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
+ pio_mgr->dpc_req++;
+ spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
+ tasklet_schedule(&pio_mgr->dpc_tasklet);
+ }
+ return;
+}
+
+/*
+ * ======== io_request_chnl ========
+ * Purpose:
+ * Request chanenel I/O from the DSP. Sets flags in shared memory, then
+ * interrupts the DSP.
+ */
+void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
+ u8 io_mode, u16 *mbx_val)
+{
+ struct chnl_mgr *chnl_mgr_obj;
+ struct shm *sm;
+
+ if (!pchnl || !mbx_val)
+ goto func_end;
+ chnl_mgr_obj = io_manager->hchnl_mgr;
+ sm = io_manager->shared_mem;
+ if (io_mode == IO_INPUT) {
+ /*
+ * Assertion fires if CHNL_AddIOReq() called on a stream
+ * which was cancelled, or attached to a dead board.
+ */
+ DBC_ASSERT((pchnl->dw_state == CHNL_STATEREADY) ||
+ (pchnl->dw_state == CHNL_STATEEOS));
+ /* Indicate to the DSP we have a buffer available for input */
+ set_chnl_busy(sm, pchnl->chnl_id);
+ *mbx_val = MBX_PCPY_CLASS;
+ } else if (io_mode == IO_OUTPUT) {
+ /*
+ * This assertion fails if CHNL_AddIOReq() was called on a
+ * stream which was cancelled, or attached to a dead board.
+ */
+ DBC_ASSERT((pchnl->dw_state & ~CHNL_STATEEOS) ==
+ CHNL_STATEREADY);
+ /*
+ * Record the fact that we have a buffer available for
+ * output.
+ */
+ chnl_mgr_obj->dw_output_mask |= (1 << pchnl->chnl_id);
+ } else {
+ DBC_ASSERT(io_mode); /* Shouldn't get here. */
+ }
+func_end:
+ return;
+}
+
+/*
+ * ======== iosm_schedule ========
+ * Schedule DPC for IO.
+ */
+void iosm_schedule(struct io_mgr *io_manager)
+{
+ unsigned long flags;
+
+ if (!io_manager)
+ return;
+
+ /* Increment count of DPC's pending. */
+ spin_lock_irqsave(&io_manager->dpc_lock, flags);
+ io_manager->dpc_req++;
+ spin_unlock_irqrestore(&io_manager->dpc_lock, flags);
+
+ /* Schedule DPC */
+ tasklet_schedule(&io_manager->dpc_tasklet);
+}
+
+/*
+ * ======== find_ready_output ========
+ * Search for a host output channel which is ready to send. If this is
+ * called as a result of servicing the DPC, then implement a round
+ * robin search; otherwise, this was called by a client thread (via
+ * IO_Dispatch()), so just start searching from the current channel id.
+ */
+static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
+ struct chnl_object *pchnl, u32 mask)
+{
+ u32 ret = OUTPUTNOTREADY;
+ u32 id, start_id;
+ u32 shift;
+
+ id = (pchnl !=
+ NULL ? pchnl->chnl_id : (chnl_mgr_obj->dw_last_output + 1));
+ id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
+ if (id >= CHNL_MAXCHANNELS)
+ goto func_end;
+ if (mask) {
+ shift = (1 << id);
+ start_id = id;
+ do {
+ if (mask & shift) {
+ ret = id;
+ if (pchnl == NULL)
+ chnl_mgr_obj->dw_last_output = id;
+ break;
+ }
+ id = id + 1;
+ id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
+ shift = (1 << id);
+ } while (id != start_id);
+ }
+func_end:
+ return ret;
+}
+
+/*
+ * ======== input_chnl ========
+ * Dispatch a buffer on an input channel.
+ */
+static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
+ u8 io_mode)
+{
+ struct chnl_mgr *chnl_mgr_obj;
+ struct shm *sm;
+ u32 chnl_id;
+ u32 bytes;
+ struct chnl_irp *chnl_packet_obj = NULL;
+ u32 dw_arg;
+ bool clear_chnl = false;
+ bool notify_client = false;
+
+ sm = pio_mgr->shared_mem;
+ chnl_mgr_obj = pio_mgr->hchnl_mgr;
+
+ /* Attempt to perform input */
+ if (!sm->input_full)
+ goto func_end;
+
+ bytes = sm->input_size * chnl_mgr_obj->word_size;
+ chnl_id = sm->input_id;
+ dw_arg = sm->arg;
+ if (chnl_id >= CHNL_MAXCHANNELS) {
+ /* Shouldn't be here: would indicate corrupted shm. */
+ DBC_ASSERT(chnl_id);
+ goto func_end;
+ }
+ pchnl = chnl_mgr_obj->ap_channel[chnl_id];
+ if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
+ if ((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
+ if (!pchnl->pio_requests)
+ goto func_end;
+ /* Get the I/O request, and attempt a transfer */
+ chnl_packet_obj = (struct chnl_irp *)
+ lst_get_head(pchnl->pio_requests);
+ if (chnl_packet_obj) {
+ pchnl->cio_reqs--;
+ if (pchnl->cio_reqs < 0)
+ goto func_end;
+ /*
+ * Ensure we don't overflow the client's
+ * buffer.
+ */
+ bytes = min(bytes, chnl_packet_obj->byte_size);
+ memcpy(chnl_packet_obj->host_sys_buf,
+ pio_mgr->input, bytes);
+ pchnl->bytes_moved += bytes;
+ chnl_packet_obj->byte_size = bytes;
+ chnl_packet_obj->dw_arg = dw_arg;
+ chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
+
+ if (bytes == 0) {
+ /*
+ * This assertion fails if the DSP
+ * sends EOS more than once on this
+ * channel.
+ */
+ if (pchnl->dw_state & CHNL_STATEEOS)
+ goto func_end;
+ /*
+ * Zero bytes indicates EOS. Update
+ * IOC status for this chirp, and also
+ * the channel state.
+ */
+ chnl_packet_obj->status |=
+ CHNL_IOCSTATEOS;
+ pchnl->dw_state |= CHNL_STATEEOS;
+ /*
+ * Notify that end of stream has
+ * occurred.
+ */
+ ntfy_notify(pchnl->ntfy_obj,
+ DSP_STREAMDONE);
+ }
+ /* Tell DSP if no more I/O buffers available */
+ if (!pchnl->pio_requests)
+ goto func_end;
+ if (LST_IS_EMPTY(pchnl->pio_requests)) {
+ set_chnl_free(sm, pchnl->chnl_id);
+ }
+ clear_chnl = true;
+ notify_client = true;
+ } else {
+ /*
+ * Input full for this channel, but we have no
+ * buffers available. The channel must be
+ * "idling". Clear out the physical input
+ * channel.
+ */
+ clear_chnl = true;
+ }
+ } else {
+ /* Input channel cancelled: clear input channel */
+ clear_chnl = true;
+ }
+ } else {
+ /* DPC fired after host closed channel: clear input channel */
+ clear_chnl = true;
+ }
+ if (clear_chnl) {
+ /* Indicate to the DSP we have read the input */
+ sm->input_full = 0;
+ sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+ }
+ if (notify_client) {
+ /* Notify client with IO completion record */
+ notify_chnl_complete(pchnl, chnl_packet_obj);
+ }
+func_end:
+ return;
+}
+
+/*
+ * ======== input_msg ========
+ * Copies messages from shared memory to the message queues.
+ */
+static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
+{
+ u32 num_msgs;
+ u32 i;
+ u8 *msg_input;
+ struct msg_queue *msg_queue_obj;
+ struct msg_frame *pmsg;
+ struct msg_dspmsg msg;
+ struct msg_ctrl *msg_ctr_obj;
+ u32 input_empty;
+ u32 addr;
+
+ msg_ctr_obj = pio_mgr->msg_input_ctrl;
+ /* Get the number of input messages to be read */
+ input_empty = msg_ctr_obj->buf_empty;
+ num_msgs = msg_ctr_obj->size;
+ if (input_empty)
+ goto func_end;
+
+ msg_input = pio_mgr->msg_input;
+ for (i = 0; i < num_msgs; i++) {
+ /* Read the next message */
+ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_cmd);
+ msg.msg.dw_cmd =
+ read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg1);
+ msg.msg.dw_arg1 =
+ read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg2);
+ msg.msg.dw_arg2 =
+ read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
+ msg.msgq_id =
+ read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+ msg_input += sizeof(struct msg_dspmsg);
+ if (!hmsg_mgr->queue_list)
+ goto func_end;
+
+ /* Determine which queue to put the message in */
+ msg_queue_obj =
+ (struct msg_queue *)lst_first(hmsg_mgr->queue_list);
+ dev_dbg(bridge, "input msg: dw_cmd=0x%x dw_arg1=0x%x "
+ "dw_arg2=0x%x msgq_id=0x%x \n", msg.msg.dw_cmd,
+ msg.msg.dw_arg1, msg.msg.dw_arg2, msg.msgq_id);
+ /*
+ * Interrupt may occur before shared memory and message
+ * input locations have been set up. If all nodes were
+ * cleaned up, hmsg_mgr->max_msgs should be 0.
+ */
+ while (msg_queue_obj != NULL) {
+ if (msg.msgq_id == msg_queue_obj->msgq_id) {
+ /* Found it */
+ if (msg.msg.dw_cmd == RMS_EXITACK) {
+ /*
+ * Call the node exit notification.
+ * The exit message does not get
+ * queued.
+ */
+ (*hmsg_mgr->on_exit) ((void *)
+ msg_queue_obj->arg,
+ msg.msg.dw_arg1);
+ } else {
+ /*
+ * Not an exit acknowledgement, queue
+ * the message.
+ */
+ if (!msg_queue_obj->msg_free_list)
+ goto func_end;
+ pmsg = (struct msg_frame *)lst_get_head
+ (msg_queue_obj->msg_free_list);
+ if (msg_queue_obj->msg_used_list
+ && pmsg) {
+ pmsg->msg_data = msg;
+ lst_put_tail
+ (msg_queue_obj->msg_used_list,
+ (struct list_head *)pmsg);
+ ntfy_notify
+ (msg_queue_obj->ntfy_obj,
+ DSP_NODEMESSAGEREADY);
+ sync_set_event
+ (msg_queue_obj->sync_event);
+ } else {
+ /*
+ * No free frame to copy the
+ * message into.
+ */
+ pr_err("%s: no free msg frames,"
+ " discarding msg\n",
+ __func__);
+ }
+ }
+ break;
+ }
+
+ if (!hmsg_mgr->queue_list || !msg_queue_obj)
+ goto func_end;
+ msg_queue_obj =
+ (struct msg_queue *)lst_next(hmsg_mgr->queue_list,
+ (struct list_head *)
+ msg_queue_obj);
+ }
+ }
+ /* Set the post SWI flag */
+ if (num_msgs > 0) {
+ /* Tell the DSP we've read the messages */
+ msg_ctr_obj->buf_empty = true;
+ msg_ctr_obj->post_swi = true;
+ sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+ }
+func_end:
+ return;
+}
+
+/*
+ * ======== notify_chnl_complete ========
+ * Purpose:
+ * Signal the channel event, notifying the client that I/O has completed.
+ */
+static void notify_chnl_complete(struct chnl_object *pchnl,
+ struct chnl_irp *chnl_packet_obj)
+{
+ bool signal_event;
+
+ if (!pchnl || !pchnl->sync_event ||
+ !pchnl->pio_completions || !chnl_packet_obj)
+ goto func_end;
+
+ /*
+ * Note: we signal the channel event only if the queue of IO
+ * completions is empty. If it is not empty, the event is sure to be
+ * signalled by the only IO completion list consumer:
+ * bridge_chnl_get_ioc().
+ */
+ signal_event = LST_IS_EMPTY(pchnl->pio_completions);
+ /* Enqueue the IO completion info for the client */
+ lst_put_tail(pchnl->pio_completions,
+ (struct list_head *)chnl_packet_obj);
+ pchnl->cio_cs++;
+
+ if (pchnl->cio_cs > pchnl->chnl_packets)
+ goto func_end;
+ /* Signal the channel event (if not already set) that IO is complete */
+ if (signal_event)
+ sync_set_event(pchnl->sync_event);
+
+ /* Notify that IO is complete */
+ ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION);
+func_end:
+ return;
+}
+
+/*
+ * ======== output_chnl ========
+ * Purpose:
+ * Dispatch a buffer on an output channel.
+ */
+static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
+ u8 io_mode)
+{
+ struct chnl_mgr *chnl_mgr_obj;
+ struct shm *sm;
+ u32 chnl_id;
+ struct chnl_irp *chnl_packet_obj;
+ u32 dw_dsp_f_mask;
+
+ chnl_mgr_obj = pio_mgr->hchnl_mgr;
+ sm = pio_mgr->shared_mem;
+ /* Attempt to perform output */
+ if (sm->output_full)
+ goto func_end;
+
+ if (pchnl && !((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
+ goto func_end;
+
+ /* Look to see if both a PC and DSP output channel are ready */
+ dw_dsp_f_mask = sm->dsp_free_mask;
+ chnl_id =
+ find_ready_output(chnl_mgr_obj, pchnl,
+ (chnl_mgr_obj->dw_output_mask & dw_dsp_f_mask));
+ if (chnl_id == OUTPUTNOTREADY)
+ goto func_end;
+
+ pchnl = chnl_mgr_obj->ap_channel[chnl_id];
+ if (!pchnl || !pchnl->pio_requests) {
+ /* Shouldn't get here */
+ goto func_end;
+ }
+ /* Get the I/O request, and attempt a transfer */
+ chnl_packet_obj = (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
+ if (!chnl_packet_obj)
+ goto func_end;
+
+ pchnl->cio_reqs--;
+ if (pchnl->cio_reqs < 0 || !pchnl->pio_requests)
+ goto func_end;
+
+ /* Record fact that no more I/O buffers available */
+ if (LST_IS_EMPTY(pchnl->pio_requests))
+ chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
+
+ /* Transfer buffer to DSP side */
+ chnl_packet_obj->byte_size = min(pio_mgr->usm_buf_size,
+ chnl_packet_obj->byte_size);
+ memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf,
+ chnl_packet_obj->byte_size);
+ pchnl->bytes_moved += chnl_packet_obj->byte_size;
+ /* Write all 32 bits of arg */
+ sm->arg = chnl_packet_obj->dw_arg;
+#if _CHNL_WORDSIZE == 2
+ /* Access can be different SM access word size (e.g. 16/32 bit words) */
+ sm->output_id = (u16) chnl_id;
+ sm->output_size = (u16) (chnl_packet_obj->byte_size +
+ chnl_mgr_obj->word_size - 1) /
+ (u16) chnl_mgr_obj->word_size;
+#else
+ sm->output_id = chnl_id;
+ sm->output_size = (chnl_packet_obj->byte_size +
+ chnl_mgr_obj->word_size - 1) / chnl_mgr_obj->word_size;
+#endif
+ sm->output_full = 1;
+ /* Indicate to the DSP we have written the output */
+ sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+ /* Notify client with IO completion record (keep EOS) */
+ chnl_packet_obj->status &= CHNL_IOCSTATEOS;
+ notify_chnl_complete(pchnl, chnl_packet_obj);
+ /* Notify if stream is done. */
+ if (chnl_packet_obj->status & CHNL_IOCSTATEOS)
+ ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE);
+
+func_end:
+ return;
+}
+
+/*
+ * ======== output_msg ========
+ * Copies messages from the message queues to the shared memory.
+ */
+static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
+{
+ u32 num_msgs = 0;
+ u32 i;
+ u8 *msg_output;
+ struct msg_frame *pmsg;
+ struct msg_ctrl *msg_ctr_obj;
+ u32 output_empty;
+ u32 val;
+ u32 addr;
+
+ msg_ctr_obj = pio_mgr->msg_output_ctrl;
+
+ /* Check if output has been cleared */
+ output_empty = msg_ctr_obj->buf_empty;
+ if (output_empty) {
+ num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ?
+ hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending;
+ msg_output = pio_mgr->msg_output;
+ /* Copy num_msgs messages into shared memory */
+ for (i = 0; i < num_msgs; i++) {
+ if (!hmsg_mgr->msg_used_list) {
+ pmsg = NULL;
+ goto func_end;
+ } else {
+ pmsg = (struct msg_frame *)
+ lst_get_head(hmsg_mgr->msg_used_list);
+ }
+ if (pmsg != NULL) {
+ val = (pmsg->msg_data).msgq_id;
+ addr = (u32) &(((struct msg_dspmsg *)
+ msg_output)->msgq_id);
+ write_ext32_bit_dsp_data(
+ pio_mgr->hbridge_context, addr, val);
+ val = (pmsg->msg_data).msg.dw_cmd;
+ addr = (u32) &((((struct msg_dspmsg *)
+ msg_output)->msg).dw_cmd);
+ write_ext32_bit_dsp_data(
+ pio_mgr->hbridge_context, addr, val);
+ val = (pmsg->msg_data).msg.dw_arg1;
+ addr = (u32) &((((struct msg_dspmsg *)
+ msg_output)->msg).dw_arg1);
+ write_ext32_bit_dsp_data(
+ pio_mgr->hbridge_context, addr, val);
+ val = (pmsg->msg_data).msg.dw_arg2;
+ addr = (u32) &((((struct msg_dspmsg *)
+ msg_output)->msg).dw_arg2);
+ write_ext32_bit_dsp_data(
+ pio_mgr->hbridge_context, addr, val);
+ msg_output += sizeof(struct msg_dspmsg);
+ if (!hmsg_mgr->msg_free_list)
+ goto func_end;
+ lst_put_tail(hmsg_mgr->msg_free_list,
+ (struct list_head *)pmsg);
+ sync_set_event(hmsg_mgr->sync_event);
+ }
+ }
+
+ if (num_msgs > 0) {
+ hmsg_mgr->msgs_pending -= num_msgs;
+#if _CHNL_WORDSIZE == 2
+ /*
+ * Access can be different SM access word size
+ * (e.g. 16/32 bit words)
+ */
+ msg_ctr_obj->size = (u16) num_msgs;
+#else
+ msg_ctr_obj->size = num_msgs;
+#endif
+ msg_ctr_obj->buf_empty = false;
+ /* Set the post SWI flag */
+ msg_ctr_obj->post_swi = true;
+ /* Tell the DSP we have written the output. */
+ sm_interrupt_dsp(pio_mgr->hbridge_context,
+ MBX_PCPY_CLASS);
+ }
+ }
+func_end:
+ return;
+}
+
+/*
+ * ======== register_shm_segs ========
+ * purpose:
+ * Registers GPP SM segment with CMM.
+ */
+static int register_shm_segs(struct io_mgr *hio_mgr,
+ struct cod_manager *cod_man,
+ u32 dw_gpp_base_pa)
+{
+ int status = 0;
+ u32 ul_shm0_base = 0;
+ u32 shm0_end = 0;
+ u32 ul_shm0_rsrvd_start = 0;
+ u32 ul_rsrvd_size = 0;
+ u32 ul_gpp_phys;
+ u32 ul_dsp_virt;
+ u32 ul_shm_seg_id0 = 0;
+ u32 dw_offset, dw_gpp_base_va, ul_dsp_size;
+
+ /*
+ * Read address and size info for first SM region.
+ * Get start of 1st SM Heap region.
+ */
+ status =
+ cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base);
+ if (ul_shm0_base == 0) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /* Get end of 1st SM Heap region */
+ if (!status) {
+ /* Get start and length of message part of shared memory */
+ status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
+ &shm0_end);
+ if (shm0_end == 0) {
+ status = -EPERM;
+ goto func_end;
+ }
+ }
+ /* Start of Gpp reserved region */
+ if (!status) {
+ /* Get start and length of message part of shared memory */
+ status =
+ cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
+ &ul_shm0_rsrvd_start);
+ if (ul_shm0_rsrvd_start == 0) {
+ status = -EPERM;
+ goto func_end;
+ }
+ }
+ /* Register with CMM */
+ if (!status) {
+ status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr);
+ if (!status) {
+ status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr,
+ CMM_ALLSEGMENTS);
+ }
+ }
+ /* Register new SM region(s) */
+ if (!status && (shm0_end - ul_shm0_base) > 0) {
+ /* Calc size (bytes) of SM the GPP can alloc from */
+ ul_rsrvd_size =
+ (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
+ if (ul_rsrvd_size <= 0) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /* Calc size of SM DSP can alloc from */
+ ul_dsp_size =
+ (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size;
+ if (ul_dsp_size <= 0) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /* First TLB entry reserved for Bridge SM use. */
+ ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
+ /* Get size in bytes */
+ ul_dsp_virt =
+ hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt *
+ hio_mgr->word_size;
+ /*
+ * Calc byte offset used to convert GPP phys <-> DSP byte
+ * address.
+ */
+ if (dw_gpp_base_pa > ul_dsp_virt)
+ dw_offset = dw_gpp_base_pa - ul_dsp_virt;
+ else
+ dw_offset = ul_dsp_virt - dw_gpp_base_pa;
+
+ if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /*
+ * Calc Gpp phys base of SM region.
+ * This is actually uncached kernel virtual address.
+ */
+ dw_gpp_base_va =
+ ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size -
+ ul_dsp_virt;
+ /*
+ * Calc Gpp phys base of SM region.
+ * This is the physical address.
+ */
+ dw_gpp_base_pa =
+ dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size -
+ ul_dsp_virt;
+ /* Register SM Segment 0. */
+ status =
+ cmm_register_gppsm_seg(hio_mgr->hcmm_mgr, dw_gpp_base_pa,
+ ul_rsrvd_size, dw_offset,
+ (dw_gpp_base_pa >
+ ul_dsp_virt) ? CMM_ADDTODSPPA :
+ CMM_SUBFROMDSPPA,
+ (u32) (ul_shm0_base *
+ hio_mgr->word_size),
+ ul_dsp_size, &ul_shm_seg_id0,
+ dw_gpp_base_va);
+ /* First SM region is seg_id = 1 */
+ if (ul_shm_seg_id0 != 1)
+ status = -EPERM;
+ }
+func_end:
+ return status;
+}
+
+/* ZCPY IO routines. */
+/*
+ * ======== IO_SHMcontrol ========
+ * Sets the requested shm setting.
+ */
+int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs)
+{
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ u32 i;
+ struct dspbridge_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ switch (desc) {
+ case SHM_CURROPP:
+ /* Update the shared memory with requested OPP information */
+ if (pargs != NULL)
+ hio_mgr->shared_mem->opp_table_struct.curr_opp_pt =
+ *(u32 *) pargs;
+ else
+ return -EPERM;
+ break;
+ case SHM_OPPINFO:
+ /*
+ * Update the shared memory with the voltage, frequency,
+ * min and max frequency values for an OPP.
+ */
+ for (i = 0; i <= dsp_max_opps; i++) {
+ hio_mgr->shared_mem->opp_table_struct.opp_point[i].
+ voltage = vdd1_dsp_freq[i][0];
+ dev_dbg(bridge, "OPP-shm: voltage: %d\n",
+ vdd1_dsp_freq[i][0]);
+ hio_mgr->shared_mem->opp_table_struct.
+ opp_point[i].frequency = vdd1_dsp_freq[i][1];
+ dev_dbg(bridge, "OPP-shm: frequency: %d\n",
+ vdd1_dsp_freq[i][1]);
+ hio_mgr->shared_mem->opp_table_struct.opp_point[i].
+ min_freq = vdd1_dsp_freq[i][2];
+ dev_dbg(bridge, "OPP-shm: min freq: %d\n",
+ vdd1_dsp_freq[i][2]);
+ hio_mgr->shared_mem->opp_table_struct.opp_point[i].
+ max_freq = vdd1_dsp_freq[i][3];
+ dev_dbg(bridge, "OPP-shm: max freq: %d\n",
+ vdd1_dsp_freq[i][3]);
+ }
+ hio_mgr->shared_mem->opp_table_struct.num_opp_pts =
+ dsp_max_opps;
+ dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps);
+ /* Update the current OPP number */
+ if (pdata->dsp_get_opp)
+ i = (*pdata->dsp_get_opp) ();
+ hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i;
+ dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i);
+ break;
+ case SHM_GETOPP:
+ /* Get the OPP that DSP has requested */
+ *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt;
+ break;
+ default:
+ break;
+ }
+#endif
+ return 0;
+}
+
+/*
+ * ======== bridge_io_get_proc_load ========
+ * Gets the Processor's Load information
+ */
+int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
+ struct dsp_procloadstat *proc_lstat)
+{
+ proc_lstat->curr_load =
+ hio_mgr->shared_mem->load_mon_info.curr_dsp_load;
+ proc_lstat->predicted_load =
+ hio_mgr->shared_mem->load_mon_info.pred_dsp_load;
+ proc_lstat->curr_dsp_freq =
+ hio_mgr->shared_mem->load_mon_info.curr_dsp_freq;
+ proc_lstat->predicted_freq =
+ hio_mgr->shared_mem->load_mon_info.pred_dsp_freq;
+
+ dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
+ "Pred Freq = %d\n", proc_lstat->curr_load,
+ proc_lstat->predicted_load, proc_lstat->curr_dsp_freq,
+ proc_lstat->predicted_freq);
+ return 0;
+}
+
+void io_sm_init(void)
+{
+ /* Do nothing */
+}
+
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+void print_dsp_debug_trace(struct io_mgr *hio_mgr)
+{
+ u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
+
+ while (true) {
+ /* Get the DSP current pointer */
+ ul_gpp_cur_pointer =
+ *(u32 *) (hio_mgr->ul_trace_buffer_current);
+ ul_gpp_cur_pointer =
+ hio_mgr->ul_gpp_va + (ul_gpp_cur_pointer -
+ hio_mgr->ul_dsp_va);
+
+ /* No new debug messages available yet */
+ if (ul_gpp_cur_pointer == hio_mgr->ul_gpp_read_pointer) {
+ break;
+ } else if (ul_gpp_cur_pointer > hio_mgr->ul_gpp_read_pointer) {
+ /* Continuous data */
+ ul_new_message_length =
+ ul_gpp_cur_pointer - hio_mgr->ul_gpp_read_pointer;
+
+ memcpy(hio_mgr->pmsg,
+ (char *)hio_mgr->ul_gpp_read_pointer,
+ ul_new_message_length);
+ hio_mgr->pmsg[ul_new_message_length] = '\0';
+ /*
+ * Advance the GPP trace pointer to DSP current
+ * pointer.
+ */
+ hio_mgr->ul_gpp_read_pointer += ul_new_message_length;
+ /* Print the trace messages */
+ pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
+ } else if (ul_gpp_cur_pointer < hio_mgr->ul_gpp_read_pointer) {
+ /* Handle trace buffer wraparound */
+ memcpy(hio_mgr->pmsg,
+ (char *)hio_mgr->ul_gpp_read_pointer,
+ hio_mgr->ul_trace_buffer_end -
+ hio_mgr->ul_gpp_read_pointer);
+ ul_new_message_length =
+ ul_gpp_cur_pointer - hio_mgr->ul_trace_buffer_begin;
+ memcpy(&hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
+ hio_mgr->ul_gpp_read_pointer],
+ (char *)hio_mgr->ul_trace_buffer_begin,
+ ul_new_message_length);
+ hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
+ hio_mgr->ul_gpp_read_pointer +
+ ul_new_message_length] = '\0';
+ /*
+ * Advance the GPP trace pointer to DSP current
+ * pointer.
+ */
+ hio_mgr->ul_gpp_read_pointer =
+ hio_mgr->ul_trace_buffer_begin +
+ ul_new_message_length;
+ /* Print the trace messages */
+ pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
+ }
+ }
+}
+#endif
+
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+/*
+ * ======== print_dsp_trace_buffer ========
+ * Prints the trace buffer returned from the DSP (if DBG_Trace is enabled).
+ * Parameters:
+ * hdeh_mgr: Handle to DEH manager object
+ * number of extra carriage returns to generate.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Unable to allocate memory.
+ * Requires:
+ * hdeh_mgr muse be valid. Checked in bridge_deh_notify.
+ */
+int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
+{
+ int status = 0;
+ struct cod_manager *cod_mgr;
+ u32 ul_trace_end;
+ u32 ul_trace_begin;
+ u32 trace_cur_pos;
+ u32 ul_num_bytes = 0;
+ u32 ul_num_words = 0;
+ u32 ul_word_size = 2;
+ char *psz_buf;
+ char *str_beg;
+ char *trace_end;
+ char *buf_end;
+ char *new_line;
+
+ struct bridge_dev_context *pbridge_context = hbridge_context;
+ struct bridge_drv_interface *intf_fxns;
+ struct dev_object *dev_obj = (struct dev_object *)
+ pbridge_context->hdev_obj;
+
+ status = dev_get_cod_mgr(dev_obj, &cod_mgr);
+
+ if (cod_mgr) {
+ /* Look for SYS_PUTCBEG/SYS_PUTCEND */
+ status =
+ cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin);
+ } else {
+ status = -EFAULT;
+ }
+ if (!status)
+ status =
+ cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
+
+ if (!status)
+ /* trace_cur_pos will hold the address of a DSP pointer */
+ status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
+ &trace_cur_pos);
+
+ if (status)
+ goto func_end;
+
+ ul_num_bytes = (ul_trace_end - ul_trace_begin);
+
+ ul_num_words = ul_num_bytes * ul_word_size;
+ status = dev_get_intf_fxns(dev_obj, &intf_fxns);
+
+ if (status)
+ goto func_end;
+
+ psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
+ if (psz_buf != NULL) {
+ /* Read trace buffer data */
+ status = (*intf_fxns->pfn_brd_read)(pbridge_context,
+ (u8 *)psz_buf, (u32)ul_trace_begin,
+ ul_num_bytes, 0);
+
+ if (status)
+ goto func_end;
+
+ /* Pack and do newline conversion */
+ pr_debug("PrintDspTraceBuffer: "
+ "before pack and unpack.\n");
+ pr_debug("%s: DSP Trace Buffer Begin:\n"
+ "=======================\n%s\n",
+ __func__, psz_buf);
+
+ /* Read the value at the DSP address in trace_cur_pos. */
+ status = (*intf_fxns->pfn_brd_read)(pbridge_context,
+ (u8 *)&trace_cur_pos, (u32)trace_cur_pos,
+ 4, 0);
+ if (status)
+ goto func_end;
+ /* Pack and do newline conversion */
+ pr_info("DSP Trace Buffer Begin:\n"
+ "=======================\n%s\n",
+ psz_buf);
+
+
+ /* convert to offset */
+ trace_cur_pos = trace_cur_pos - ul_trace_begin;
+
+ if (ul_num_bytes) {
+ /*
+ * The buffer is not full, find the end of the
+ * data -- buf_end will be >= pszBuf after
+ * while.
+ */
+ buf_end = &psz_buf[ul_num_bytes+1];
+ /* DSP print position */
+ trace_end = &psz_buf[trace_cur_pos];
+
+ /*
+ * Search buffer for a new_line and replace it
+ * with '\0', then print as string.
+ * Continue until end of buffer is reached.
+ */
+ str_beg = trace_end;
+ ul_num_bytes = buf_end - str_beg;
+
+ while (str_beg < buf_end) {
+ new_line = strnchr(str_beg, ul_num_bytes,
+ '\n');
+ if (new_line && new_line < buf_end) {
+ *new_line = 0;
+ pr_debug("%s\n", str_beg);
+ str_beg = ++new_line;
+ ul_num_bytes = buf_end - str_beg;
+ } else {
+ /*
+ * Assume buffer empty if it contains
+ * a zero
+ */
+ if (*str_beg != '\0') {
+ str_beg[ul_num_bytes] = 0;
+ pr_debug("%s\n", str_beg);
+ }
+ str_beg = buf_end;
+ ul_num_bytes = 0;
+ }
+ }
+ /*
+ * Search buffer for a nNewLine and replace it
+ * with '\0', then print as string.
+ * Continue until buffer is exhausted.
+ */
+ str_beg = psz_buf;
+ ul_num_bytes = trace_end - str_beg;
+
+ while (str_beg < trace_end) {
+ new_line = strnchr(str_beg, ul_num_bytes, '\n');
+ if (new_line != NULL && new_line < trace_end) {
+ *new_line = 0;
+ pr_debug("%s\n", str_beg);
+ str_beg = ++new_line;
+ ul_num_bytes = trace_end - str_beg;
+ } else {
+ /*
+ * Assume buffer empty if it contains
+ * a zero
+ */
+ if (*str_beg != '\0') {
+ str_beg[ul_num_bytes] = 0;
+ pr_debug("%s\n", str_beg);
+ }
+ str_beg = trace_end;
+ ul_num_bytes = 0;
+ }
+ }
+ }
+ pr_info("\n=======================\n"
+ "DSP Trace Buffer End:\n");
+ kfree(psz_buf);
+ } else {
+ status = -ENOMEM;
+ }
+func_end:
+ if (status)
+ dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status);
+ return status;
+}
+
+/**
+ * dump_dsp_stack() - This function dumps the data on the DSP stack.
+ * @bridge_context: Bridge driver's device context pointer.
+ *
+ */
+int dump_dsp_stack(struct bridge_dev_context *bridge_context)
+{
+ int status = 0;
+ struct cod_manager *code_mgr;
+ struct node_mgr *node_mgr;
+ u32 trace_begin;
+ char name[256];
+ struct {
+ u32 head[2];
+ u32 size;
+ } mmu_fault_dbg_info;
+ u32 *buffer;
+ u32 *buffer_beg;
+ u32 *buffer_end;
+ u32 exc_type;
+ u32 dyn_ext_base;
+ u32 i;
+ u32 offset_output;
+ u32 total_size;
+ u32 poll_cnt;
+ const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR",
+ "IRP", "NRP", "AMR", "SSR",
+ "ILC", "RILC", "IER", "CSR"};
+ const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
+ struct bridge_drv_interface *intf_fxns;
+ struct dev_object *dev_object = bridge_context->hdev_obj;
+
+ status = dev_get_cod_mgr(dev_object, &code_mgr);
+ if (!code_mgr) {
+ pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
+ status = -EFAULT;
+ }
+
+ if (!status) {
+ status = dev_get_node_manager(dev_object, &node_mgr);
+ if (!node_mgr) {
+ pr_debug("%s: Failed on dev_get_node_manager.\n",
+ __func__);
+ status = -EFAULT;
+ }
+ }
+
+ if (!status) {
+ /* Look for SYS_PUTCBEG/SYS_PUTCEND: */
+ status =
+ cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
+ pr_debug("%s: trace_begin Value 0x%x\n",
+ __func__, trace_begin);
+ if (status)
+ pr_debug("%s: Failed on cod_get_sym_value.\n",
+ __func__);
+ }
+ if (!status)
+ status = dev_get_intf_fxns(dev_object, &intf_fxns);
+ /*
+ * Check for the "magic number" in the trace buffer. If it has
+ * yet to appear then poll the trace buffer to wait for it. Its
+ * appearance signals that the DSP has finished dumping its state.
+ */
+ mmu_fault_dbg_info.head[0] = 0;
+ mmu_fault_dbg_info.head[1] = 0;
+ if (!status) {
+ poll_cnt = 0;
+ while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
+ mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
+ poll_cnt < POLL_MAX) {
+
+ /* Read DSP dump size from the DSP trace buffer... */
+ status = (*intf_fxns->pfn_brd_read)(bridge_context,
+ (u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
+ sizeof(mmu_fault_dbg_info), 0);
+
+ if (status)
+ break;
+
+ poll_cnt++;
+ }
+
+ if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 &&
+ mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) {
+ status = -ETIME;
+ pr_err("%s:No DSP MMU-Fault information available.\n",
+ __func__);
+ }
+ }
+
+ if (!status) {
+ total_size = mmu_fault_dbg_info.size;
+ /* Limit the size in case DSP went crazy */
+ if (total_size > MAX_MMU_DBGBUFF)
+ total_size = MAX_MMU_DBGBUFF;
+
+ buffer = kzalloc(total_size, GFP_ATOMIC);
+ if (!buffer) {
+ status = -ENOMEM;
+ pr_debug("%s: Failed to "
+ "allocate stack dump buffer.\n", __func__);
+ goto func_end;
+ }
+
+ buffer_beg = buffer;
+ buffer_end = buffer + total_size / 4;
+
+ /* Read bytes from the DSP trace buffer... */
+ status = (*intf_fxns->pfn_brd_read)(bridge_context,
+ (u8 *)buffer, (u32)trace_begin,
+ total_size, 0);
+ if (status) {
+ pr_debug("%s: Failed to Read Trace Buffer.\n",
+ __func__);
+ goto func_end;
+ }
+
+ pr_err("\nAproximate Crash Position:\n"
+ "--------------------------\n");
+
+ exc_type = buffer[3];
+ if (!exc_type)
+ i = buffer[79]; /* IRP */
+ else
+ i = buffer[80]; /* NRP */
+
+ status =
+ cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base);
+ if (status) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i,
+ 0x1000, &offset_output, name) == 0))
+ pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name,
+ i - offset_output);
+ else
+ pr_err("0x%-8x [Unable to match to a symbol.]\n", i);
+
+ buffer += 4;
+
+ pr_err("\nExecution Info:\n"
+ "---------------\n");
+
+ if (*buffer < ARRAY_SIZE(exec_ctxt)) {
+ pr_err("Execution context \t%s\n",
+ exec_ctxt[*buffer++]);
+ } else {
+ pr_err("Execution context corrupt\n");
+ kfree(buffer_beg);
+ return -EFAULT;
+ }
+ pr_err("Task Handle\t\t0x%x\n", *buffer++);
+ pr_err("Stack Pointer\t\t0x%x\n", *buffer++);
+ pr_err("Stack Top\t\t0x%x\n", *buffer++);
+ pr_err("Stack Bottom\t\t0x%x\n", *buffer++);
+ pr_err("Stack Size\t\t0x%x\n", *buffer++);
+ pr_err("Stack Size In Use\t0x%x\n", *buffer++);
+
+ pr_err("\nCPU Registers\n"
+ "---------------\n");
+
+ for (i = 0; i < 32; i++) {
+ if (i == 4 || i == 6 || i == 8)
+ pr_err("A%d 0x%-8x [Function Argument %d]\n",
+ i, *buffer++, i-3);
+ else if (i == 15)
+ pr_err("A15 0x%-8x [Frame Pointer]\n",
+ *buffer++);
+ else
+ pr_err("A%d 0x%x\n", i, *buffer++);
+ }
+
+ pr_err("\nB0 0x%x\n", *buffer++);
+ pr_err("B1 0x%x\n", *buffer++);
+ pr_err("B2 0x%x\n", *buffer++);
+
+ if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr,
+ *buffer, 0x1000, &offset_output, name) == 0))
+
+ pr_err("B3 0x%-8x [Function Return Pointer:"
+ " \"%s\" + 0x%x]\n", *buffer, name,
+ *buffer - offset_output);
+ else
+ pr_err("B3 0x%-8x [Function Return Pointer:"
+ "Unable to match to a symbol.]\n", *buffer);
+
+ buffer++;
+
+ for (i = 4; i < 32; i++) {
+ if (i == 4 || i == 6 || i == 8)
+ pr_err("B%d 0x%-8x [Function Argument %d]\n",
+ i, *buffer++, i-2);
+ else if (i == 14)
+ pr_err("B14 0x%-8x [Data Page Pointer]\n",
+ *buffer++);
+ else
+ pr_err("B%d 0x%x\n", i, *buffer++);
+ }
+
+ pr_err("\n");
+
+ for (i = 0; i < ARRAY_SIZE(dsp_regs); i++)
+ pr_err("%s 0x%x\n", dsp_regs[i], *buffer++);
+
+ pr_err("\nStack:\n"
+ "------\n");
+
+ for (i = 0; buffer < buffer_end; i++, buffer++) {
+ if ((*buffer > dyn_ext_base) && (
+ node_find_addr(node_mgr, *buffer , 0x600,
+ &offset_output, name) == 0))
+ pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n",
+ i, *buffer, name,
+ *buffer - offset_output);
+ else
+ pr_err("[%d] 0x%x\n", i, *buffer);
+ }
+ kfree(buffer_beg);
+ }
+func_end:
+ return status;
+}
+
+/**
+ * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side
+ * @bridge_context: Bridge driver's device context pointer.
+ *
+ */
+void dump_dl_modules(struct bridge_dev_context *bridge_context)
+{
+ struct cod_manager *code_mgr;
+ struct bridge_drv_interface *intf_fxns;
+ struct bridge_dev_context *bridge_ctxt = bridge_context;
+ struct dev_object *dev_object = bridge_ctxt->hdev_obj;
+ struct modules_header modules_hdr;
+ struct dll_module *module_struct = NULL;
+ u32 module_dsp_addr;
+ u32 module_size;
+ u32 module_struct_size = 0;
+ u32 sect_ndx;
+ char *sect_str ;
+ int status = 0;
+
+ status = dev_get_intf_fxns(dev_object, &intf_fxns);
+ if (status) {
+ pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__);
+ goto func_end;
+ }
+
+ status = dev_get_cod_mgr(dev_object, &code_mgr);
+ if (!code_mgr) {
+ pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ /* Lookup the address of the modules_header structure */
+ status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr);
+ if (status) {
+ pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n",
+ __func__);
+ goto func_end;
+ }
+
+ pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
+
+ /* Copy the modules_header structure from DSP memory. */
+ status = (*intf_fxns->pfn_brd_read)(bridge_context, (u8 *) &modules_hdr,
+ (u32) module_dsp_addr, sizeof(modules_hdr), 0);
+
+ if (status) {
+ pr_debug("%s: Failed failed to read modules header.\n",
+ __func__);
+ goto func_end;
+ }
+
+ module_dsp_addr = modules_hdr.first_module;
+ module_size = modules_hdr.first_module_size;
+
+ pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr,
+ module_size);
+
+ pr_err("\nDynamically Loaded Modules:\n"
+ "---------------------------\n");
+
+ /* For each dll_module structure in the list... */
+ while (module_size) {
+ /*
+ * Allocate/re-allocate memory to hold the dll_module
+ * structure. The memory is re-allocated only if the existing
+ * allocation is too small.
+ */
+ if (module_size > module_struct_size) {
+ kfree(module_struct);
+ module_struct = kzalloc(module_size+128, GFP_ATOMIC);
+ module_struct_size = module_size+128;
+ pr_debug("%s: allocated module struct %p %d\n",
+ __func__, module_struct, module_struct_size);
+ if (!module_struct)
+ goto func_end;
+ }
+ /* Copy the dll_module structure from DSP memory */
+ status = (*intf_fxns->pfn_brd_read)(bridge_context,
+ (u8 *)module_struct, module_dsp_addr, module_size, 0);
+
+ if (status) {
+ pr_debug(
+ "%s: Failed to read dll_module stuct for 0x%x.\n",
+ __func__, module_dsp_addr);
+ break;
+ }
+
+ /* Update info regarding the _next_ module in the list. */
+ module_dsp_addr = module_struct->next_module;
+ module_size = module_struct->next_module_size;
+
+ pr_debug("%s: next module 0x%x %d, this module num sects %d\n",
+ __func__, module_dsp_addr, module_size,
+ module_struct->num_sects);
+
+ /*
+ * The section name strings start immedialty following
+ * the array of dll_sect structures.
+ */
+ sect_str = (char *) &module_struct->
+ sects[module_struct->num_sects];
+ pr_err("%s\n", sect_str);
+
+ /*
+ * Advance to the first section name string.
+ * Each string follows the one before.
+ */
+ sect_str += strlen(sect_str) + 1;
+
+ /* Access each dll_sect structure and its name string. */
+ for (sect_ndx = 0;
+ sect_ndx < module_struct->num_sects; sect_ndx++) {
+ pr_err(" Section: 0x%x ",
+ module_struct->sects[sect_ndx].sect_load_adr);
+
+ if (((u32) sect_str - (u32) module_struct) <
+ module_struct_size) {
+ pr_err("%s\n", sect_str);
+ /* Each string follows the one before. */
+ sect_str += strlen(sect_str)+1;
+ } else {
+ pr_err("<string error>\n");
+ pr_debug("%s: section name sting address "
+ "is invalid %p\n", __func__, sect_str);
+ }
+ }
+ }
+func_end:
+ kfree(module_struct);
+}
+#endif
diff --git a/drivers/staging/tidspbridge/core/msg_sm.c b/drivers/staging/tidspbridge/core/msg_sm.c
new file mode 100644
index 00000000000..87712e24dfb
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/msg_sm.c
@@ -0,0 +1,673 @@
+/*
+ * msg_sm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implements upper edge functions for Bridge message module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/list.h>
+#include <dspbridge/sync.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+
+/* ----------------------------------- Others */
+#include <dspbridge/io_sm.h>
+
+/* ----------------------------------- This */
+#include <_msg_sm.h>
+#include <dspbridge/dspmsg.h>
+
+/* ----------------------------------- Function Prototypes */
+static int add_new_msg(struct lst_list *msg_list);
+static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
+static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp);
+static void free_msg_list(struct lst_list *msg_list);
+
+/*
+ * ======== bridge_msg_create ========
+ * Create an object to manage message queues. Only one of these objects
+ * can exist per device object.
+ */
+int bridge_msg_create(struct msg_mgr **msg_man,
+ struct dev_object *hdev_obj,
+ msg_onexit msg_callback)
+{
+ struct msg_mgr *msg_mgr_obj;
+ struct io_mgr *hio_mgr;
+ int status = 0;
+
+ if (!msg_man || !msg_callback || !hdev_obj) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ dev_get_io_mgr(hdev_obj, &hio_mgr);
+ if (!hio_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ *msg_man = NULL;
+ /* Allocate msg_ctrl manager object */
+ msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
+
+ if (msg_mgr_obj) {
+ msg_mgr_obj->on_exit = msg_callback;
+ msg_mgr_obj->hio_mgr = hio_mgr;
+ /* List of MSG_QUEUEs */
+ msg_mgr_obj->queue_list = kzalloc(sizeof(struct lst_list),
+ GFP_KERNEL);
+ /* Queues of message frames for messages to the DSP. Message
+ * frames will only be added to the free queue when a
+ * msg_queue object is created. */
+ msg_mgr_obj->msg_free_list = kzalloc(sizeof(struct lst_list),
+ GFP_KERNEL);
+ msg_mgr_obj->msg_used_list = kzalloc(sizeof(struct lst_list),
+ GFP_KERNEL);
+ if (msg_mgr_obj->queue_list == NULL ||
+ msg_mgr_obj->msg_free_list == NULL ||
+ msg_mgr_obj->msg_used_list == NULL) {
+ status = -ENOMEM;
+ } else {
+ INIT_LIST_HEAD(&msg_mgr_obj->queue_list->head);
+ INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list->head);
+ INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list->head);
+ spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
+ }
+
+ /* Create an event to be used by bridge_msg_put() in waiting
+ * for an available free frame from the message manager. */
+ msg_mgr_obj->sync_event =
+ kzalloc(sizeof(struct sync_object), GFP_KERNEL);
+ if (!msg_mgr_obj->sync_event)
+ status = -ENOMEM;
+ else
+ sync_init_event(msg_mgr_obj->sync_event);
+
+ if (!status)
+ *msg_man = msg_mgr_obj;
+ else
+ delete_msg_mgr(msg_mgr_obj);
+
+ } else {
+ status = -ENOMEM;
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_msg_create_queue ========
+ * Create a msg_queue for sending/receiving messages to/from a node
+ * on the DSP.
+ */
+int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
+ struct msg_queue **msgq,
+ u32 msgq_id, u32 max_msgs, void *arg)
+{
+ u32 i;
+ u32 num_allocated = 0;
+ struct msg_queue *msg_q;
+ int status = 0;
+
+ if (!hmsg_mgr || msgq == NULL || !hmsg_mgr->msg_free_list) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ *msgq = NULL;
+ /* Allocate msg_queue object */
+ msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
+ if (!msg_q) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ lst_init_elem((struct list_head *)msg_q);
+ msg_q->max_msgs = max_msgs;
+ msg_q->hmsg_mgr = hmsg_mgr;
+ msg_q->arg = arg; /* Node handle */
+ msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */
+ /* Queues of Message frames for messages from the DSP */
+ msg_q->msg_free_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
+ msg_q->msg_used_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
+ if (msg_q->msg_free_list == NULL || msg_q->msg_used_list == NULL)
+ status = -ENOMEM;
+ else {
+ INIT_LIST_HEAD(&msg_q->msg_free_list->head);
+ INIT_LIST_HEAD(&msg_q->msg_used_list->head);
+ }
+
+ /* Create event that will be signalled when a message from
+ * the DSP is available. */
+ if (!status) {
+ msg_q->sync_event = kzalloc(sizeof(struct sync_object),
+ GFP_KERNEL);
+ if (msg_q->sync_event)
+ sync_init_event(msg_q->sync_event);
+ else
+ status = -ENOMEM;
+ }
+
+ /* Create a notification list for message ready notification. */
+ if (!status) {
+ msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
+ GFP_KERNEL);
+ if (msg_q->ntfy_obj)
+ ntfy_init(msg_q->ntfy_obj);
+ else
+ status = -ENOMEM;
+ }
+
+ /* Create events that will be used to synchronize cleanup
+ * when the object is deleted. sync_done will be set to
+ * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
+ * will be set by the unblocked thread to signal that it
+ * is unblocked and will no longer reference the object. */
+ if (!status) {
+ msg_q->sync_done = kzalloc(sizeof(struct sync_object),
+ GFP_KERNEL);
+ if (msg_q->sync_done)
+ sync_init_event(msg_q->sync_done);
+ else
+ status = -ENOMEM;
+ }
+
+ if (!status) {
+ msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
+ GFP_KERNEL);
+ if (msg_q->sync_done_ack)
+ sync_init_event(msg_q->sync_done_ack);
+ else
+ status = -ENOMEM;
+ }
+
+ if (!status) {
+ /* Enter critical section */
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+ /* Initialize message frames and put in appropriate queues */
+ for (i = 0; i < max_msgs && !status; i++) {
+ status = add_new_msg(hmsg_mgr->msg_free_list);
+ if (!status) {
+ num_allocated++;
+ status = add_new_msg(msg_q->msg_free_list);
+ }
+ }
+ if (status) {
+ /* Stay inside CS to prevent others from taking any
+ * of the newly allocated message frames. */
+ delete_msg_queue(msg_q, num_allocated);
+ } else {
+ lst_put_tail(hmsg_mgr->queue_list,
+ (struct list_head *)msg_q);
+ *msgq = msg_q;
+ /* Signal that free frames are now available */
+ if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
+ sync_set_event(hmsg_mgr->sync_event);
+
+ }
+ /* Exit critical section */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ } else {
+ delete_msg_queue(msg_q, 0);
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_msg_delete ========
+ * Delete a msg_ctrl manager allocated in bridge_msg_create().
+ */
+void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
+{
+ if (hmsg_mgr)
+ delete_msg_mgr(hmsg_mgr);
+}
+
+/*
+ * ======== bridge_msg_delete_queue ========
+ * Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
+ */
+void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
+{
+ struct msg_mgr *hmsg_mgr;
+ u32 io_msg_pend;
+
+ if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr)
+ goto func_end;
+
+ hmsg_mgr = msg_queue_obj->hmsg_mgr;
+ msg_queue_obj->done = true;
+ /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */
+ io_msg_pend = msg_queue_obj->io_msg_pend;
+ while (io_msg_pend) {
+ /* Unblock thread */
+ sync_set_event(msg_queue_obj->sync_done);
+ /* Wait for acknowledgement */
+ sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
+ io_msg_pend = msg_queue_obj->io_msg_pend;
+ }
+ /* Remove message queue from hmsg_mgr->queue_list */
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+ lst_remove_elem(hmsg_mgr->queue_list,
+ (struct list_head *)msg_queue_obj);
+ /* Free the message queue object */
+ delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
+ if (!hmsg_mgr->msg_free_list)
+ goto func_cont;
+ if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
+ sync_reset_event(hmsg_mgr->sync_event);
+func_cont:
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+func_end:
+ return;
+}
+
+/*
+ * ======== bridge_msg_get ========
+ * Get a message from a msg_ctrl queue.
+ */
+int bridge_msg_get(struct msg_queue *msg_queue_obj,
+ struct dsp_msg *pmsg, u32 utimeout)
+{
+ struct msg_frame *msg_frame_obj;
+ struct msg_mgr *hmsg_mgr;
+ bool got_msg = false;
+ struct sync_object *syncs[2];
+ u32 index;
+ int status = 0;
+
+ if (!msg_queue_obj || pmsg == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ hmsg_mgr = msg_queue_obj->hmsg_mgr;
+ if (!msg_queue_obj->msg_used_list) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ /* Enter critical section */
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+ /* If a message is already there, get it */
+ if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) {
+ msg_frame_obj = (struct msg_frame *)
+ lst_get_head(msg_queue_obj->msg_used_list);
+ if (msg_frame_obj != NULL) {
+ *pmsg = msg_frame_obj->msg_data.msg;
+ lst_put_tail(msg_queue_obj->msg_free_list,
+ (struct list_head *)msg_frame_obj);
+ if (LST_IS_EMPTY(msg_queue_obj->msg_used_list))
+ sync_reset_event(msg_queue_obj->sync_event);
+
+ got_msg = true;
+ }
+ } else {
+ if (msg_queue_obj->done)
+ status = -EPERM;
+ else
+ msg_queue_obj->io_msg_pend++;
+
+ }
+ /* Exit critical section */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ if (!status && !got_msg) {
+ /* Wait til message is available, timeout, or done. We don't
+ * have to schedule the DPC, since the DSP will send messages
+ * when they are available. */
+ syncs[0] = msg_queue_obj->sync_event;
+ syncs[1] = msg_queue_obj->sync_done;
+ status = sync_wait_on_multiple_events(syncs, 2, utimeout,
+ &index);
+ /* Enter critical section */
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+ if (msg_queue_obj->done) {
+ msg_queue_obj->io_msg_pend--;
+ /* Exit critical section */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ /* Signal that we're not going to access msg_queue_obj
+ * anymore, so it can be deleted. */
+ (void)sync_set_event(msg_queue_obj->sync_done_ack);
+ status = -EPERM;
+ } else {
+ if (!status) {
+ DBC_ASSERT(!LST_IS_EMPTY
+ (msg_queue_obj->msg_used_list));
+ /* Get msg from used list */
+ msg_frame_obj = (struct msg_frame *)
+ lst_get_head(msg_queue_obj->msg_used_list);
+ /* Copy message into pmsg and put frame on the
+ * free list */
+ if (msg_frame_obj != NULL) {
+ *pmsg = msg_frame_obj->msg_data.msg;
+ lst_put_tail
+ (msg_queue_obj->msg_free_list,
+ (struct list_head *)
+ msg_frame_obj);
+ }
+ }
+ msg_queue_obj->io_msg_pend--;
+ /* Reset the event if there are still queued messages */
+ if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list))
+ sync_set_event(msg_queue_obj->sync_event);
+
+ /* Exit critical section */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ }
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_msg_put ========
+ * Put a message onto a msg_ctrl queue.
+ */
+int bridge_msg_put(struct msg_queue *msg_queue_obj,
+ const struct dsp_msg *pmsg, u32 utimeout)
+{
+ struct msg_frame *msg_frame_obj;
+ struct msg_mgr *hmsg_mgr;
+ bool put_msg = false;
+ struct sync_object *syncs[2];
+ u32 index;
+ int status = 0;
+
+ if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ hmsg_mgr = msg_queue_obj->hmsg_mgr;
+ if (!hmsg_mgr->msg_free_list) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+
+ /* If a message frame is available, use it */
+ if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
+ msg_frame_obj =
+ (struct msg_frame *)lst_get_head(hmsg_mgr->msg_free_list);
+ if (msg_frame_obj != NULL) {
+ msg_frame_obj->msg_data.msg = *pmsg;
+ msg_frame_obj->msg_data.msgq_id =
+ msg_queue_obj->msgq_id;
+ lst_put_tail(hmsg_mgr->msg_used_list,
+ (struct list_head *)msg_frame_obj);
+ hmsg_mgr->msgs_pending++;
+ put_msg = true;
+ }
+ if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
+ sync_reset_event(hmsg_mgr->sync_event);
+
+ /* Release critical section before scheduling DPC */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ /* Schedule a DPC, to do the actual data transfer: */
+ iosm_schedule(hmsg_mgr->hio_mgr);
+ } else {
+ if (msg_queue_obj->done)
+ status = -EPERM;
+ else
+ msg_queue_obj->io_msg_pend++;
+
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ }
+ if (!status && !put_msg) {
+ /* Wait til a free message frame is available, timeout,
+ * or done */
+ syncs[0] = hmsg_mgr->sync_event;
+ syncs[1] = msg_queue_obj->sync_done;
+ status = sync_wait_on_multiple_events(syncs, 2, utimeout,
+ &index);
+ if (status)
+ goto func_end;
+ /* Enter critical section */
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+ if (msg_queue_obj->done) {
+ msg_queue_obj->io_msg_pend--;
+ /* Exit critical section */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ /* Signal that we're not going to access msg_queue_obj
+ * anymore, so it can be deleted. */
+ (void)sync_set_event(msg_queue_obj->sync_done_ack);
+ status = -EPERM;
+ } else {
+ if (LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
+ status = -EFAULT;
+ goto func_cont;
+ }
+ /* Get msg from free list */
+ msg_frame_obj = (struct msg_frame *)
+ lst_get_head(hmsg_mgr->msg_free_list);
+ /*
+ * Copy message into pmsg and put frame on the
+ * used list.
+ */
+ if (msg_frame_obj) {
+ msg_frame_obj->msg_data.msg = *pmsg;
+ msg_frame_obj->msg_data.msgq_id =
+ msg_queue_obj->msgq_id;
+ lst_put_tail(hmsg_mgr->msg_used_list,
+ (struct list_head *)msg_frame_obj);
+ hmsg_mgr->msgs_pending++;
+ /*
+ * Schedule a DPC, to do the actual
+ * data transfer.
+ */
+ iosm_schedule(hmsg_mgr->hio_mgr);
+ }
+
+ msg_queue_obj->io_msg_pend--;
+ /* Reset event if there are still frames available */
+ if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
+ sync_set_event(hmsg_mgr->sync_event);
+func_cont:
+ /* Exit critical section */
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+ }
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_msg_register_notify ========
+ */
+int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
+ u32 event_mask, u32 notify_type,
+ struct dsp_notification *hnotification)
+{
+ int status = 0;
+
+ if (!msg_queue_obj || !hnotification) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) {
+ status = -EPERM;
+ goto func_end;
+ }
+
+ if (notify_type != DSP_SIGNALEVENT) {
+ status = -EBADR;
+ goto func_end;
+ }
+
+ if (event_mask)
+ status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification,
+ event_mask, notify_type);
+ else
+ status = ntfy_unregister(msg_queue_obj->ntfy_obj,
+ hnotification);
+
+ if (status == -EINVAL) {
+ /* Not registered. Ok, since we couldn't have known. Node
+ * notifications are split between node state change handled
+ * by NODE, and message ready handled by msg_ctrl. */
+ status = 0;
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_msg_set_queue_id ========
+ */
+void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
+{
+ /*
+ * A message queue must be created when a node is allocated,
+ * so that node_register_notify() can be called before the node
+ * is created. Since we don't know the node environment until the
+ * node is created, we need this function to set msg_queue_obj->msgq_id
+ * to the node environment, after the node is created.
+ */
+ if (msg_queue_obj)
+ msg_queue_obj->msgq_id = msgq_id;
+}
+
+/*
+ * ======== add_new_msg ========
+ * Must be called in message manager critical section.
+ */
+static int add_new_msg(struct lst_list *msg_list)
+{
+ struct msg_frame *pmsg;
+ int status = 0;
+
+ pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
+ if (pmsg != NULL) {
+ lst_init_elem((struct list_head *)pmsg);
+ lst_put_tail(msg_list, (struct list_head *)pmsg);
+ } else {
+ status = -ENOMEM;
+ }
+
+ return status;
+}
+
+/*
+ * ======== delete_msg_mgr ========
+ */
+static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
+{
+ if (!hmsg_mgr)
+ goto func_end;
+
+ if (hmsg_mgr->queue_list) {
+ if (LST_IS_EMPTY(hmsg_mgr->queue_list)) {
+ kfree(hmsg_mgr->queue_list);
+ hmsg_mgr->queue_list = NULL;
+ }
+ }
+
+ if (hmsg_mgr->msg_free_list) {
+ free_msg_list(hmsg_mgr->msg_free_list);
+ hmsg_mgr->msg_free_list = NULL;
+ }
+
+ if (hmsg_mgr->msg_used_list) {
+ free_msg_list(hmsg_mgr->msg_used_list);
+ hmsg_mgr->msg_used_list = NULL;
+ }
+
+ kfree(hmsg_mgr->sync_event);
+
+ kfree(hmsg_mgr);
+func_end:
+ return;
+}
+
+/*
+ * ======== delete_msg_queue ========
+ */
+static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
+{
+ struct msg_mgr *hmsg_mgr;
+ struct msg_frame *pmsg;
+ u32 i;
+
+ if (!msg_queue_obj ||
+ !msg_queue_obj->hmsg_mgr || !msg_queue_obj->hmsg_mgr->msg_free_list)
+ goto func_end;
+
+ hmsg_mgr = msg_queue_obj->hmsg_mgr;
+
+ /* Pull off num_to_dsp message frames from Msg manager and free */
+ for (i = 0; i < num_to_dsp; i++) {
+
+ if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
+ pmsg = (struct msg_frame *)
+ lst_get_head(hmsg_mgr->msg_free_list);
+ kfree(pmsg);
+ } else {
+ /* Cannot free all of the message frames */
+ break;
+ }
+ }
+
+ if (msg_queue_obj->msg_free_list) {
+ free_msg_list(msg_queue_obj->msg_free_list);
+ msg_queue_obj->msg_free_list = NULL;
+ }
+
+ if (msg_queue_obj->msg_used_list) {
+ free_msg_list(msg_queue_obj->msg_used_list);
+ msg_queue_obj->msg_used_list = NULL;
+ }
+
+ if (msg_queue_obj->ntfy_obj) {
+ ntfy_delete(msg_queue_obj->ntfy_obj);
+ kfree(msg_queue_obj->ntfy_obj);
+ }
+
+ kfree(msg_queue_obj->sync_event);
+ kfree(msg_queue_obj->sync_done);
+ kfree(msg_queue_obj->sync_done_ack);
+
+ kfree(msg_queue_obj);
+func_end:
+ return;
+
+}
+
+/*
+ * ======== free_msg_list ========
+ */
+static void free_msg_list(struct lst_list *msg_list)
+{
+ struct msg_frame *pmsg;
+
+ if (!msg_list)
+ goto func_end;
+
+ while ((pmsg = (struct msg_frame *)lst_get_head(msg_list)) != NULL)
+ kfree(pmsg);
+
+ DBC_ASSERT(LST_IS_EMPTY(msg_list));
+
+ kfree(msg_list);
+func_end:
+ return;
+}
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
new file mode 100644
index 00000000000..f914829c70f
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -0,0 +1,1802 @@
+/*
+ * tiomap.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Processor Manager Driver for TI OMAP3430 EVM.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <plat/control.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/drv.h>
+#include <dspbridge/sync.h>
+
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
+/* ----------------------------------- Link Driver */
+#include <dspbridge/dspdefs.h>
+#include <dspbridge/dspchnl.h>
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/dspio.h>
+#include <dspbridge/dspmsg.h>
+#include <dspbridge/pwr.h>
+#include <dspbridge/io_sm.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/dspapi.h>
+#include <dspbridge/dmm.h>
+#include <dspbridge/wdt.h>
+
+/* ----------------------------------- Local */
+#include "_tiomap.h"
+#include "_tiomap_pwr.h"
+#include "tiomap_io.h"
+
+/* Offset in shared mem to write to in order to synchronize start with DSP */
+#define SHMSYNCOFFSET 4 /* GPP byte offset */
+
+#define BUFFERSIZE 1024
+
+#define TIHELEN_ACKTIMEOUT 10000
+
+#define MMU_SECTION_ADDR_MASK 0xFFF00000
+#define MMU_SSECTION_ADDR_MASK 0xFF000000
+#define MMU_LARGE_PAGE_MASK 0xFFFF0000
+#define MMU_SMALL_PAGE_MASK 0xFFFFF000
+#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
+#define PAGES_II_LVL_TABLE 512
+#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
+
+/* Forward Declarations: */
+static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
+static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff,
+ u32 dsp_addr, u32 ul_num_bytes,
+ u32 mem_type);
+static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
+ u32 dsp_addr);
+static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
+ int *board_state);
+static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt);
+static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff,
+ u32 dsp_addr, u32 ul_num_bytes,
+ u32 mem_type);
+static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
+ u32 brd_state);
+static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
+ u32 dsp_dest_addr, u32 dsp_src_addr,
+ u32 ul_num_bytes, u32 mem_type);
+static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type);
+static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
+ u32 ul_mpu_addr, u32 virt_addr,
+ u32 ul_num_bytes, u32 ul_map_attr,
+ struct page **mapped_pages);
+static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
+ u32 virt_addr, u32 ul_num_bytes);
+static int bridge_dev_create(struct bridge_dev_context
+ **dev_cntxt,
+ struct dev_object *hdev_obj,
+ struct cfg_hostres *config_param);
+static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
+ u32 dw_cmd, void *pargs);
+static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
+static u32 user_va2_pa(struct mm_struct *mm, u32 address);
+static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
+ u32 va, u32 size,
+ struct hw_mmu_map_attrs_t *map_attrs);
+static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
+ u32 size, struct hw_mmu_map_attrs_t *attrs);
+static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
+ u32 ul_mpu_addr, u32 virt_addr,
+ u32 ul_num_bytes,
+ struct hw_mmu_map_attrs_t *hw_attrs);
+
+bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
+
+/* ----------------------------------- Globals */
+
+/* Attributes of L2 page tables for DSP MMU */
+struct page_info {
+ u32 num_entries; /* Number of valid PTEs in the L2 PT */
+};
+
+/* Attributes used to manage the DSP MMU page tables */
+struct pg_table_attrs {
+ spinlock_t pg_lock; /* Critical section object handle */
+
+ u32 l1_base_pa; /* Physical address of the L1 PT */
+ u32 l1_base_va; /* Virtual address of the L1 PT */
+ u32 l1_size; /* Size of the L1 PT */
+ u32 l1_tbl_alloc_pa;
+ /* Physical address of Allocated mem for L1 table. May not be aligned */
+ u32 l1_tbl_alloc_va;
+ /* Virtual address of Allocated mem for L1 table. May not be aligned */
+ u32 l1_tbl_alloc_sz;
+ /* Size of consistent memory allocated for L1 table.
+ * May not be aligned */
+
+ u32 l2_base_pa; /* Physical address of the L2 PT */
+ u32 l2_base_va; /* Virtual address of the L2 PT */
+ u32 l2_size; /* Size of the L2 PT */
+ u32 l2_tbl_alloc_pa;
+ /* Physical address of Allocated mem for L2 table. May not be aligned */
+ u32 l2_tbl_alloc_va;
+ /* Virtual address of Allocated mem for L2 table. May not be aligned */
+ u32 l2_tbl_alloc_sz;
+ /* Size of consistent memory allocated for L2 table.
+ * May not be aligned */
+
+ u32 l2_num_pages; /* Number of allocated L2 PT */
+ /* Array [l2_num_pages] of L2 PT info structs */
+ struct page_info *pg_info;
+};
+
+/*
+ * This Bridge driver's function interface table.
+ */
+static struct bridge_drv_interface drv_interface_fxns = {
+ /* Bridge API ver. for which this bridge driver is built. */
+ BRD_API_MAJOR_VERSION,
+ BRD_API_MINOR_VERSION,
+ bridge_dev_create,
+ bridge_dev_destroy,
+ bridge_dev_ctrl,
+ bridge_brd_monitor,
+ bridge_brd_start,
+ bridge_brd_stop,
+ bridge_brd_status,
+ bridge_brd_read,
+ bridge_brd_write,
+ bridge_brd_set_state,
+ bridge_brd_mem_copy,
+ bridge_brd_mem_write,
+ bridge_brd_mem_map,
+ bridge_brd_mem_un_map,
+ /* The following CHNL functions are provided by chnl_io.lib: */
+ bridge_chnl_create,
+ bridge_chnl_destroy,
+ bridge_chnl_open,
+ bridge_chnl_close,
+ bridge_chnl_add_io_req,
+ bridge_chnl_get_ioc,
+ bridge_chnl_cancel_io,
+ bridge_chnl_flush_io,
+ bridge_chnl_get_info,
+ bridge_chnl_get_mgr_info,
+ bridge_chnl_idle,
+ bridge_chnl_register_notify,
+ /* The following IO functions are provided by chnl_io.lib: */
+ bridge_io_create,
+ bridge_io_destroy,
+ bridge_io_on_loaded,
+ bridge_io_get_proc_load,
+ /* The following msg_ctrl functions are provided by chnl_io.lib: */
+ bridge_msg_create,
+ bridge_msg_create_queue,
+ bridge_msg_delete,
+ bridge_msg_delete_queue,
+ bridge_msg_get,
+ bridge_msg_put,
+ bridge_msg_register_notify,
+ bridge_msg_set_queue_id,
+};
+
+static inline void flush_all(struct bridge_dev_context *dev_context)
+{
+ if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
+ dev_context->dw_brd_state == BRD_HIBERNATION)
+ wake_dsp(dev_context, NULL);
+
+ hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
+}
+
+static void bad_page_dump(u32 pa, struct page *pg)
+{
+ pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
+ pr_emerg("Bad page state in process '%s'\n"
+ "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
+ "Backtrace:\n",
+ current->comm, pg, (int)(2 * sizeof(unsigned long)),
+ (unsigned long)pg->flags, pg->mapping,
+ page_mapcount(pg), page_count(pg));
+ dump_stack();
+}
+
+/*
+ * ======== bridge_drv_entry ========
+ * purpose:
+ * Bridge Driver entry point.
+ */
+void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
+ const char *driver_file_name)
+{
+
+ DBC_REQUIRE(driver_file_name != NULL);
+
+ io_sm_init(); /* Initialization of io_sm module */
+
+ if (strcmp(driver_file_name, "UMA") == 0)
+ *drv_intf = &drv_interface_fxns;
+ else
+ dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
+
+}
+
+/*
+ * ======== bridge_brd_monitor ========
+ * purpose:
+ * This bridge_brd_monitor puts DSP into a Loadable state.
+ * i.e Application can load and start the device.
+ *
+ * Preconditions:
+ * Device in 'OFF' state.
+ */
+static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
+{
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ u32 temp;
+ struct dspbridge_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+ OMAP_POWERSTATEST_MASK;
+ if (!(temp & 0x02)) {
+ /* IVA2 is not in ON state */
+ /* Read and set PM_PWSTCTRL_IVA2 to ON */
+ (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
+ PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
+ /* Set the SW supervised state transition */
+ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
+ OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
+
+ /* Wait until the state has moved to ON */
+ while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+ OMAP_INTRANSITION_MASK)
+ ;
+ /* Disable Automatic transition */
+ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
+ OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
+ }
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
+ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+ dsp_clk_enable(DSP_CLK_IVA2);
+
+ /* set the device state to IDLE */
+ dev_context->dw_brd_state = BRD_IDLE;
+
+ return 0;
+}
+
+/*
+ * ======== bridge_brd_read ========
+ * purpose:
+ * Reads buffers for DSP memory.
+ */
+static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ u32 offset;
+ u32 dsp_base_addr = dev_ctxt->dw_dsp_base_addr;
+
+ if (dsp_addr < dev_context->dw_dsp_start_add) {
+ status = -EPERM;
+ return status;
+ }
+ /* change here to account for the 3 bands of the DSP internal memory */
+ if ((dsp_addr - dev_context->dw_dsp_start_add) <
+ dev_context->dw_internal_size) {
+ offset = dsp_addr - dev_context->dw_dsp_start_add;
+ } else {
+ status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
+ ul_num_bytes, mem_type);
+ return status;
+ }
+ /* copy the data from DSP memory, */
+ memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes);
+ return status;
+}
+
+/*
+ * ======== bridge_brd_set_state ========
+ * purpose:
+ * This routine updates the Board status.
+ */
+static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
+ u32 brd_state)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+
+ dev_context->dw_brd_state = brd_state;
+ return status;
+}
+
+/*
+ * ======== bridge_brd_start ========
+ * purpose:
+ * Initializes DSP MMU and Starts DSP.
+ *
+ * Preconditions:
+ * a) DSP domain is 'ACTIVE'.
+ * b) DSP_RST1 is asserted.
+ * b) DSP_RST2 is released.
+ */
+static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
+ u32 dsp_addr)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ u32 dw_sync_addr = 0;
+ u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
+ u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
+ u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
+ /* Offset of shm_base_virt from tlb_base_virt */
+ u32 ul_shm_offset_virt;
+ s32 entry_ndx;
+ s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
+ struct cfg_hostres *resources = NULL;
+ u32 temp;
+ u32 ul_dsp_clk_rate;
+ u32 ul_dsp_clk_addr;
+ u32 ul_bios_gp_timer;
+ u32 clk_cmd;
+ struct io_mgr *hio_mgr;
+ u32 ul_load_monitor_timer;
+ struct dspbridge_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ /* The device context contains all the mmu setup info from when the
+ * last dsp base image was loaded. The first entry is always
+ * SHMMEM base. */
+ /* Get SHM_BEG - convert to byte address */
+ (void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME,
+ &ul_shm_base_virt);
+ ul_shm_base_virt *= DSPWORDSIZE;
+ DBC_ASSERT(ul_shm_base_virt != 0);
+ /* DSP Virtual address */
+ ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
+ DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
+ ul_shm_offset_virt =
+ ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
+ /* Kernel logical address */
+ ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
+
+ DBC_ASSERT(ul_shm_base != 0);
+ /* 2nd wd is used as sync field */
+ dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
+ /* Write a signature into the shm base + offset; this will
+ * get cleared when the DSP program starts. */
+ if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
+ pr_err("%s: Illegal SM base\n", __func__);
+ status = -EPERM;
+ } else
+ __raw_writel(0xffffffff, dw_sync_addr);
+
+ if (!status) {
+ resources = dev_context->resources;
+ if (!resources)
+ status = -EPERM;
+
+ /* Assert RST1 i.e only the RST only for DSP megacell */
+ if (!status) {
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
+ OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
+ OMAP2_RM_RSTCTRL);
+ /* Mask address with 1K for compatibility */
+ __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
+ OMAP343X_CTRL_REGADDR(
+ OMAP343X_CONTROL_IVA2_BOOTADDR));
+ /*
+ * Set bootmode to self loop if dsp_debug flag is true
+ */
+ __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
+ OMAP343X_CTRL_REGADDR(
+ OMAP343X_CONTROL_IVA2_BOOTMOD));
+ }
+ }
+ if (!status) {
+ /* Reset and Unreset the RST2, so that BOOTADDR is copied to
+ * IVA2 SYSC register */
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
+ OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+ udelay(100);
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
+ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+ udelay(100);
+
+ /* Disbale the DSP MMU */
+ hw_mmu_disable(resources->dw_dmmu_base);
+ /* Disable TWL */
+ hw_mmu_twl_disable(resources->dw_dmmu_base);
+
+ /* Only make TLB entry if both addresses are non-zero */
+ for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
+ entry_ndx++) {
+ struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
+ struct hw_mmu_map_attrs_t map_attrs = {
+ .endianism = e->endianism,
+ .element_size = e->elem_size,
+ .mixed_size = e->mixed_mode,
+ };
+
+ if (!e->ul_gpp_pa || !e->ul_dsp_va)
+ continue;
+
+ dev_dbg(bridge,
+ "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
+ itmp_entry_ndx,
+ e->ul_gpp_pa,
+ e->ul_dsp_va,
+ e->ul_size);
+
+ hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
+ e->ul_gpp_pa,
+ e->ul_dsp_va,
+ e->ul_size,
+ itmp_entry_ndx,
+ &map_attrs, 1, 1);
+
+ itmp_entry_ndx++;
+ }
+ }
+
+ /* Lock the above TLB entries and get the BIOS and load monitor timer
+ * information */
+ if (!status) {
+ hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
+ hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
+ hw_mmu_ttb_set(resources->dw_dmmu_base,
+ dev_context->pt_attrs->l1_base_pa);
+ hw_mmu_twl_enable(resources->dw_dmmu_base);
+ /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
+
+ temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
+ temp = (temp & 0xFFFFFFEF) | 0x11;
+ __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
+
+ /* Let the DSP MMU run */
+ hw_mmu_enable(resources->dw_dmmu_base);
+
+ /* Enable the BIOS clock */
+ (void)dev_get_symbol(dev_context->hdev_obj,
+ BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
+ (void)dev_get_symbol(dev_context->hdev_obj,
+ BRIDGEINIT_LOADMON_GPTIMER,
+ &ul_load_monitor_timer);
+ }
+
+ if (!status) {
+ if (ul_load_monitor_timer != 0xFFFF) {
+ clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
+ ul_load_monitor_timer;
+ dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
+ } else {
+ dev_dbg(bridge, "Not able to get the symbol for Load "
+ "Monitor Timer\n");
+ }
+ }
+
+ if (!status) {
+ if (ul_bios_gp_timer != 0xFFFF) {
+ clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
+ ul_bios_gp_timer;
+ dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
+ } else {
+ dev_dbg(bridge,
+ "Not able to get the symbol for BIOS Timer\n");
+ }
+ }
+
+ if (!status) {
+ /* Set the DSP clock rate */
+ (void)dev_get_symbol(dev_context->hdev_obj,
+ "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
+ /*Set Autoidle Mode for IVA2 PLL */
+ (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
+ OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
+
+ if ((unsigned int *)ul_dsp_clk_addr != NULL) {
+ /* Get the clock rate */
+ ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
+ dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
+ __func__, ul_dsp_clk_rate);
+ (void)bridge_brd_write(dev_context,
+ (u8 *) &ul_dsp_clk_rate,
+ ul_dsp_clk_addr, sizeof(u32), 0);
+ }
+ /*
+ * Enable Mailbox events and also drain any pending
+ * stale messages.
+ */
+ dev_context->mbox = omap_mbox_get("dsp");
+ if (IS_ERR(dev_context->mbox)) {
+ dev_context->mbox = NULL;
+ pr_err("%s: Failed to get dsp mailbox handle\n",
+ __func__);
+ status = -EPERM;
+ }
+
+ }
+ if (!status) {
+ dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
+
+/*PM_IVA2GRPSEL_PER = 0xC0;*/
+ temp = readl(resources->dw_per_pm_base + 0xA8);
+ temp = (temp & 0xFFFFFF30) | 0xC0;
+ writel(temp, resources->dw_per_pm_base + 0xA8);
+
+/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
+ temp = readl(resources->dw_per_pm_base + 0xA4);
+ temp = (temp & 0xFFFFFF3F);
+ writel(temp, resources->dw_per_pm_base + 0xA4);
+/*CM_SLEEPDEP_PER |= 0x04; */
+ temp = readl(resources->dw_per_base + 0x44);
+ temp = (temp & 0xFFFFFFFB) | 0x04;
+ writel(temp, resources->dw_per_base + 0x44);
+
+/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
+ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
+ OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
+
+ /* Let DSP go */
+ dev_dbg(bridge, "%s Unreset\n", __func__);
+ /* Enable DSP MMU Interrupts */
+ hw_mmu_event_enable(resources->dw_dmmu_base,
+ HW_MMU_ALL_INTERRUPTS);
+ /* release the RST1, DSP starts executing now .. */
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
+ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+
+ dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
+ dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr);
+ if (dsp_debug)
+ while (__raw_readw(dw_sync_addr))
+ ;;
+
+ /* Wait for DSP to clear word in shared memory */
+ /* Read the Location */
+ if (!wait_for_start(dev_context, dw_sync_addr))
+ status = -ETIMEDOUT;
+
+ /* Start wdt */
+ dsp_wdt_sm_set((void *)ul_shm_base);
+ dsp_wdt_enable(true);
+
+ status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
+ if (hio_mgr) {
+ io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
+ /* Write the synchronization bit to indicate the
+ * completion of OPP table update to DSP
+ */
+ __raw_writel(0XCAFECAFE, dw_sync_addr);
+
+ /* update board state */
+ dev_context->dw_brd_state = BRD_RUNNING;
+ /* (void)chnlsm_enable_interrupt(dev_context); */
+ } else {
+ dev_context->dw_brd_state = BRD_UNKNOWN;
+ }
+ }
+ return status;
+}
+
+/*
+ * ======== bridge_brd_stop ========
+ * purpose:
+ * Puts DSP in self loop.
+ *
+ * Preconditions :
+ * a) None
+ */
+static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ struct pg_table_attrs *pt_attrs;
+ u32 dsp_pwr_state;
+ int clk_status;
+ struct dspbridge_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ if (dev_context->dw_brd_state == BRD_STOPPED)
+ return status;
+
+ /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
+ * before turning off the clocks.. This is to ensure that there are no
+ * pending L3 or other transactons from IVA2 */
+ dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+ OMAP_POWERSTATEST_MASK;
+ if (dsp_pwr_state != PWRDM_POWER_OFF) {
+ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
+ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+ sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
+ mdelay(10);
+
+ /* IVA2 is not in OFF state */
+ /* Set PM_PWSTCTRL_IVA2 to OFF */
+ (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
+ PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
+ /* Set the SW supervised state transition for Sleep */
+ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
+ OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
+ }
+ udelay(10);
+ /* Release the Ext Base virtual Address as the next DSP Program
+ * may have a different load address */
+ if (dev_context->dw_dsp_ext_base_addr)
+ dev_context->dw_dsp_ext_base_addr = 0;
+
+ dev_context->dw_brd_state = BRD_STOPPED; /* update board state */
+
+ dsp_wdt_enable(false);
+
+ /* This is a good place to clear the MMU page tables as well */
+ if (dev_context->pt_attrs) {
+ pt_attrs = dev_context->pt_attrs;
+ memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
+ memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
+ memset((u8 *) pt_attrs->pg_info, 0x00,
+ (pt_attrs->l2_num_pages * sizeof(struct page_info)));
+ }
+ /* Disable the mailbox interrupts */
+ if (dev_context->mbox) {
+ omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
+ omap_mbox_put(dev_context->mbox);
+ dev_context->mbox = NULL;
+ }
+ /* Reset IVA2 clocks*/
+ (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
+ OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+
+ clk_status = dsp_clk_disable(DSP_CLK_IVA2);
+
+ return status;
+}
+
+/*
+ * ======== bridge_brd_status ========
+ * Returns the board status.
+ */
+static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
+ int *board_state)
+{
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ *board_state = dev_context->dw_brd_state;
+ return 0;
+}
+
+/*
+ * ======== bridge_brd_write ========
+ * Copies the buffers to DSP internal or external memory.
+ */
+static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+
+ if (dsp_addr < dev_context->dw_dsp_start_add) {
+ status = -EPERM;
+ return status;
+ }
+ if ((dsp_addr - dev_context->dw_dsp_start_add) <
+ dev_context->dw_internal_size) {
+ status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
+ ul_num_bytes, mem_type);
+ } else {
+ status = write_ext_dsp_data(dev_context, host_buff, dsp_addr,
+ ul_num_bytes, mem_type, false);
+ }
+
+ return status;
+}
+
+/*
+ * ======== bridge_dev_create ========
+ * Creates a driver object. Puts DSP in self loop.
+ */
+static int bridge_dev_create(struct bridge_dev_context
+ **dev_cntxt,
+ struct dev_object *hdev_obj,
+ struct cfg_hostres *config_param)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = NULL;
+ s32 entry_ndx;
+ struct cfg_hostres *resources = config_param;
+ struct pg_table_attrs *pt_attrs;
+ u32 pg_tbl_pa;
+ u32 pg_tbl_va;
+ u32 align_size;
+ struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+ /* Allocate and initialize a data structure to contain the bridge driver
+ * state, which becomes the context for later calls into this driver */
+ dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
+ if (!dev_context) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE;
+ dev_context->dw_self_loop = (u32) NULL;
+ dev_context->dsp_per_clks = 0;
+ dev_context->dw_internal_size = OMAP_DSP_SIZE;
+ /* Clear dev context MMU table entries.
+ * These get set on bridge_io_on_loaded() call after program loaded. */
+ for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
+ dev_context->atlb_entry[entry_ndx].ul_gpp_pa =
+ dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0;
+ }
+ dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
+ (config_param->
+ dw_mem_base
+ [3]),
+ config_param->
+ dw_mem_length
+ [3]);
+ if (!dev_context->dw_dsp_base_addr)
+ status = -EPERM;
+
+ pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
+ if (pt_attrs != NULL) {
+ /* Assuming that we use only DSP's memory map
+ * until 0x4000:0000 , we would need only 1024
+ * L1 enties i.e L1 size = 4K */
+ pt_attrs->l1_size = 0x1000;
+ align_size = pt_attrs->l1_size;
+ /* Align sizes are expected to be power of 2 */
+ /* we like to get aligned on L1 table size */
+ pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
+ align_size, &pg_tbl_pa);
+
+ /* Check if the PA is aligned for us */
+ if ((pg_tbl_pa) & (align_size - 1)) {
+ /* PA not aligned to page table size ,
+ * try with more allocation and align */
+ mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
+ pt_attrs->l1_size);
+ /* we like to get aligned on L1 table size */
+ pg_tbl_va =
+ (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
+ align_size, &pg_tbl_pa);
+ /* We should be able to get aligned table now */
+ pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
+ pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
+ pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
+ /* Align the PA to the next 'align' boundary */
+ pt_attrs->l1_base_pa =
+ ((pg_tbl_pa) +
+ (align_size - 1)) & (~(align_size - 1));
+ pt_attrs->l1_base_va =
+ pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
+ } else {
+ /* We got aligned PA, cool */
+ pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
+ pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
+ pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
+ pt_attrs->l1_base_pa = pg_tbl_pa;
+ pt_attrs->l1_base_va = pg_tbl_va;
+ }
+ if (pt_attrs->l1_base_va)
+ memset((u8 *) pt_attrs->l1_base_va, 0x00,
+ pt_attrs->l1_size);
+
+ /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
+ * L4 pages */
+ pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
+ pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
+ pt_attrs->l2_num_pages;
+ align_size = 4; /* Make it u32 aligned */
+ /* we like to get aligned on L1 table size */
+ pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
+ align_size, &pg_tbl_pa);
+ pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
+ pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
+ pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
+ pt_attrs->l2_base_pa = pg_tbl_pa;
+ pt_attrs->l2_base_va = pg_tbl_va;
+
+ if (pt_attrs->l2_base_va)
+ memset((u8 *) pt_attrs->l2_base_va, 0x00,
+ pt_attrs->l2_size);
+
+ pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
+ sizeof(struct page_info), GFP_KERNEL);
+ dev_dbg(bridge,
+ "L1 pa %x, va %x, size %x\n L2 pa %x, va "
+ "%x, size %x\n", pt_attrs->l1_base_pa,
+ pt_attrs->l1_base_va, pt_attrs->l1_size,
+ pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
+ pt_attrs->l2_size);
+ dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
+ pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
+ }
+ if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
+ (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
+ dev_context->pt_attrs = pt_attrs;
+ else
+ status = -ENOMEM;
+
+ if (!status) {
+ spin_lock_init(&pt_attrs->pg_lock);
+ dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
+
+ /* Set the Clock Divisor for the DSP module */
+ udelay(5);
+ /* MMU address is obtained from the host
+ * resources struct */
+ dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
+ }
+ if (!status) {
+ dev_context->hdev_obj = hdev_obj;
+ /* Store current board state. */
+ dev_context->dw_brd_state = BRD_UNKNOWN;
+ dev_context->resources = resources;
+ dsp_clk_enable(DSP_CLK_IVA2);
+ bridge_brd_stop(dev_context);
+ /* Return ptr to our device state to the DSP API for storage */
+ *dev_cntxt = dev_context;
+ } else {
+ if (pt_attrs != NULL) {
+ kfree(pt_attrs->pg_info);
+
+ if (pt_attrs->l2_tbl_alloc_va) {
+ mem_free_phys_mem((void *)
+ pt_attrs->l2_tbl_alloc_va,
+ pt_attrs->l2_tbl_alloc_pa,
+ pt_attrs->l2_tbl_alloc_sz);
+ }
+ if (pt_attrs->l1_tbl_alloc_va) {
+ mem_free_phys_mem((void *)
+ pt_attrs->l1_tbl_alloc_va,
+ pt_attrs->l1_tbl_alloc_pa,
+ pt_attrs->l1_tbl_alloc_sz);
+ }
+ }
+ kfree(pt_attrs);
+ kfree(dev_context);
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== bridge_dev_ctrl ========
+ * Receives device specific commands.
+ */
+static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
+ u32 dw_cmd, void *pargs)
+{
+ int status = 0;
+ struct bridge_ioctl_extproc *pa_ext_proc =
+ (struct bridge_ioctl_extproc *)pargs;
+ s32 ndx;
+
+ switch (dw_cmd) {
+ case BRDIOCTL_CHNLREAD:
+ break;
+ case BRDIOCTL_CHNLWRITE:
+ break;
+ case BRDIOCTL_SETMMUCONFIG:
+ /* store away dsp-mmu setup values for later use */
+ for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
+ dev_context->atlb_entry[ndx] = *pa_ext_proc;
+ break;
+ case BRDIOCTL_DEEPSLEEP:
+ case BRDIOCTL_EMERGENCYSLEEP:
+ /* Currently only DSP Idle is supported Need to update for
+ * later releases */
+ status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
+ break;
+ case BRDIOCTL_WAKEUP:
+ status = wake_dsp(dev_context, pargs);
+ break;
+ case BRDIOCTL_CLK_CTRL:
+ status = 0;
+ /* Looking For Baseport Fix for Clocks */
+ status = dsp_peripheral_clk_ctrl(dev_context, pargs);
+ break;
+ case BRDIOCTL_PWR_HIBERNATE:
+ status = handle_hibernation_from_dsp(dev_context);
+ break;
+ case BRDIOCTL_PRESCALE_NOTIFY:
+ status = pre_scale_dsp(dev_context, pargs);
+ break;
+ case BRDIOCTL_POSTSCALE_NOTIFY:
+ status = post_scale_dsp(dev_context, pargs);
+ break;
+ case BRDIOCTL_CONSTRAINT_REQUEST:
+ status = handle_constraints_set(dev_context, pargs);
+ break;
+ default:
+ status = -EPERM;
+ break;
+ }
+ return status;
+}
+
+/*
+ * ======== bridge_dev_destroy ========
+ * Destroys the driver object.
+ */
+static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
+{
+ struct pg_table_attrs *pt_attrs;
+ int status = 0;
+ struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
+ dev_ctxt;
+ struct cfg_hostres *host_res;
+ u32 shm_size;
+ struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+ /* It should never happen */
+ if (!dev_ctxt)
+ return -EFAULT;
+
+ /* first put the device to stop state */
+ bridge_brd_stop(dev_context);
+ if (dev_context->pt_attrs) {
+ pt_attrs = dev_context->pt_attrs;
+ kfree(pt_attrs->pg_info);
+
+ if (pt_attrs->l2_tbl_alloc_va) {
+ mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
+ pt_attrs->l2_tbl_alloc_pa,
+ pt_attrs->l2_tbl_alloc_sz);
+ }
+ if (pt_attrs->l1_tbl_alloc_va) {
+ mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
+ pt_attrs->l1_tbl_alloc_pa,
+ pt_attrs->l1_tbl_alloc_sz);
+ }
+ kfree(pt_attrs);
+
+ }
+
+ if (dev_context->resources) {
+ host_res = dev_context->resources;
+ shm_size = drv_datap->shm_size;
+ if (shm_size >= 0x10000) {
+ if ((host_res->dw_mem_base[1]) &&
+ (host_res->dw_mem_phys[1])) {
+ mem_free_phys_mem((void *)
+ host_res->dw_mem_base
+ [1],
+ host_res->dw_mem_phys
+ [1], shm_size);
+ }
+ } else {
+ dev_dbg(bridge, "%s: Error getting shm size "
+ "from registry: %x. Not calling "
+ "mem_free_phys_mem\n", __func__,
+ status);
+ }
+ host_res->dw_mem_base[1] = 0;
+ host_res->dw_mem_phys[1] = 0;
+
+ if (host_res->dw_mem_base[0])
+ iounmap((void *)host_res->dw_mem_base[0]);
+ if (host_res->dw_mem_base[2])
+ iounmap((void *)host_res->dw_mem_base[2]);
+ if (host_res->dw_mem_base[3])
+ iounmap((void *)host_res->dw_mem_base[3]);
+ if (host_res->dw_mem_base[4])
+ iounmap((void *)host_res->dw_mem_base[4]);
+ if (host_res->dw_dmmu_base)
+ iounmap(host_res->dw_dmmu_base);
+ if (host_res->dw_per_base)
+ iounmap(host_res->dw_per_base);
+ if (host_res->dw_per_pm_base)
+ iounmap((void *)host_res->dw_per_pm_base);
+ if (host_res->dw_core_pm_base)
+ iounmap((void *)host_res->dw_core_pm_base);
+ if (host_res->dw_sys_ctrl_base)
+ iounmap(host_res->dw_sys_ctrl_base);
+
+ host_res->dw_mem_base[0] = (u32) NULL;
+ host_res->dw_mem_base[2] = (u32) NULL;
+ host_res->dw_mem_base[3] = (u32) NULL;
+ host_res->dw_mem_base[4] = (u32) NULL;
+ host_res->dw_dmmu_base = NULL;
+ host_res->dw_sys_ctrl_base = NULL;
+
+ kfree(host_res);
+ }
+
+ /* Free the driver's device context: */
+ kfree(drv_datap->base_img);
+ kfree(drv_datap);
+ dev_set_drvdata(bridge, NULL);
+ kfree((void *)dev_ctxt);
+ return status;
+}
+
+static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
+ u32 dsp_dest_addr, u32 dsp_src_addr,
+ u32 ul_num_bytes, u32 mem_type)
+{
+ int status = 0;
+ u32 src_addr = dsp_src_addr;
+ u32 dest_addr = dsp_dest_addr;
+ u32 copy_bytes = 0;
+ u32 total_bytes = ul_num_bytes;
+ u8 host_buf[BUFFERSIZE];
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ while (total_bytes > 0 && !status) {
+ copy_bytes =
+ total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
+ /* Read from External memory */
+ status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
+ copy_bytes, mem_type);
+ if (!status) {
+ if (dest_addr < (dev_context->dw_dsp_start_add +
+ dev_context->dw_internal_size)) {
+ /* Write to Internal memory */
+ status = write_dsp_data(dev_ctxt, host_buf,
+ dest_addr, copy_bytes,
+ mem_type);
+ } else {
+ /* Write to External memory */
+ status =
+ write_ext_dsp_data(dev_ctxt, host_buf,
+ dest_addr, copy_bytes,
+ mem_type, false);
+ }
+ }
+ total_bytes -= copy_bytes;
+ src_addr += copy_bytes;
+ dest_addr += copy_bytes;
+ }
+ return status;
+}
+
+/* Mem Write does not halt the DSP to write unlike bridge_brd_write */
+static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ u32 ul_remain_bytes = 0;
+ u32 ul_bytes = 0;
+ ul_remain_bytes = ul_num_bytes;
+ while (ul_remain_bytes > 0 && !status) {
+ ul_bytes =
+ ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
+ if (dsp_addr < (dev_context->dw_dsp_start_add +
+ dev_context->dw_internal_size)) {
+ status =
+ write_dsp_data(dev_ctxt, host_buff, dsp_addr,
+ ul_bytes, mem_type);
+ } else {
+ status = write_ext_dsp_data(dev_ctxt, host_buff,
+ dsp_addr, ul_bytes,
+ mem_type, true);
+ }
+ ul_remain_bytes -= ul_bytes;
+ dsp_addr += ul_bytes;
+ host_buff = host_buff + ul_bytes;
+ }
+ return status;
+}
+
+/*
+ * ======== bridge_brd_mem_map ========
+ * This function maps MPU buffer to the DSP address space. It performs
+ * linear to physical address translation if required. It translates each
+ * page since linear addresses can be physically non-contiguous
+ * All address & size arguments are assumed to be page aligned (in proc.c)
+ *
+ * TODO: Disable MMU while updating the page tables (but that'll stall DSP)
+ */
+static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
+ u32 ul_mpu_addr, u32 virt_addr,
+ u32 ul_num_bytes, u32 ul_map_attr,
+ struct page **mapped_pages)
+{
+ u32 attrs;
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ struct hw_mmu_map_attrs_t hw_attrs;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ u32 write = 0;
+ u32 num_usr_pgs = 0;
+ struct page *mapped_page, *pg;
+ s32 pg_num;
+ u32 va = virt_addr;
+ struct task_struct *curr_task = current;
+ u32 pg_i = 0;
+ u32 mpu_addr, pa;
+
+ dev_dbg(bridge,
+ "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
+ __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
+ ul_map_attr);
+ if (ul_num_bytes == 0)
+ return -EINVAL;
+
+ if (ul_map_attr & DSP_MAP_DIR_MASK) {
+ attrs = ul_map_attr;
+ } else {
+ /* Assign default attributes */
+ attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
+ }
+ /* Take mapping properties */
+ if (attrs & DSP_MAPBIGENDIAN)
+ hw_attrs.endianism = HW_BIG_ENDIAN;
+ else
+ hw_attrs.endianism = HW_LITTLE_ENDIAN;
+
+ hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
+ ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
+ /* Ignore element_size if mixed_size is enabled */
+ if (hw_attrs.mixed_size == 0) {
+ if (attrs & DSP_MAPELEMSIZE8) {
+ /* Size is 8 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE8BIT;
+ } else if (attrs & DSP_MAPELEMSIZE16) {
+ /* Size is 16 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE16BIT;
+ } else if (attrs & DSP_MAPELEMSIZE32) {
+ /* Size is 32 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE32BIT;
+ } else if (attrs & DSP_MAPELEMSIZE64) {
+ /* Size is 64 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE64BIT;
+ } else {
+ /*
+ * Mixedsize isn't enabled, so size can't be
+ * zero here
+ */
+ return -EINVAL;
+ }
+ }
+ if (attrs & DSP_MAPDONOTLOCK)
+ hw_attrs.donotlockmpupage = 1;
+ else
+ hw_attrs.donotlockmpupage = 0;
+
+ if (attrs & DSP_MAPVMALLOCADDR) {
+ return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
+ ul_num_bytes, &hw_attrs);
+ }
+ /*
+ * Do OS-specific user-va to pa translation.
+ * Combine physically contiguous regions to reduce TLBs.
+ * Pass the translated pa to pte_update.
+ */
+ if ((attrs & DSP_MAPPHYSICALADDR)) {
+ status = pte_update(dev_context, ul_mpu_addr, virt_addr,
+ ul_num_bytes, &hw_attrs);
+ goto func_cont;
+ }
+
+ /*
+ * Important Note: ul_mpu_addr is mapped from user application process
+ * to current process - it must lie completely within the current
+ * virtual memory address space in order to be of use to us here!
+ */
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, ul_mpu_addr);
+ if (vma)
+ dev_dbg(bridge,
+ "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
+ "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
+ ul_num_bytes, vma->vm_start, vma->vm_end,
+ vma->vm_flags);
+
+ /*
+ * It is observed that under some circumstances, the user buffer is
+ * spread across several VMAs. So loop through and check if the entire
+ * user buffer is covered
+ */
+ while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
+ /* jump to the next VMA region */
+ vma = find_vma(mm, vma->vm_end + 1);
+ dev_dbg(bridge,
+ "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
+ "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
+ ul_num_bytes, vma->vm_start, vma->vm_end,
+ vma->vm_flags);
+ }
+ if (!vma) {
+ pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
+ __func__, ul_mpu_addr, ul_num_bytes);
+ status = -EINVAL;
+ up_read(&mm->mmap_sem);
+ goto func_cont;
+ }
+
+ if (vma->vm_flags & VM_IO) {
+ num_usr_pgs = ul_num_bytes / PG_SIZE4K;
+ mpu_addr = ul_mpu_addr;
+
+ /* Get the physical addresses for user buffer */
+ for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
+ pa = user_va2_pa(mm, mpu_addr);
+ if (!pa) {
+ status = -EPERM;
+ pr_err("DSPBRIDGE: VM_IO mapping physical"
+ "address is invalid\n");
+ break;
+ }
+ if (pfn_valid(__phys_to_pfn(pa))) {
+ pg = PHYS_TO_PAGE(pa);
+ get_page(pg);
+ if (page_count(pg) < 1) {
+ pr_err("Bad page in VM_IO buffer\n");
+ bad_page_dump(pa, pg);
+ }
+ }
+ status = pte_set(dev_context->pt_attrs, pa,
+ va, HW_PAGE_SIZE4KB, &hw_attrs);
+ if (status)
+ break;
+
+ va += HW_PAGE_SIZE4KB;
+ mpu_addr += HW_PAGE_SIZE4KB;
+ pa += HW_PAGE_SIZE4KB;
+ }
+ } else {
+ num_usr_pgs = ul_num_bytes / PG_SIZE4K;
+ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+ write = 1;
+
+ for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
+ pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
+ write, 1, &mapped_page, NULL);
+ if (pg_num > 0) {
+ if (page_count(mapped_page) < 1) {
+ pr_err("Bad page count after doing"
+ "get_user_pages on"
+ "user buffer\n");
+ bad_page_dump(page_to_phys(mapped_page),
+ mapped_page);
+ }
+ status = pte_set(dev_context->pt_attrs,
+ page_to_phys(mapped_page), va,
+ HW_PAGE_SIZE4KB, &hw_attrs);
+ if (status)
+ break;
+
+ if (mapped_pages)
+ mapped_pages[pg_i] = mapped_page;
+
+ va += HW_PAGE_SIZE4KB;
+ ul_mpu_addr += HW_PAGE_SIZE4KB;
+ } else {
+ pr_err("DSPBRIDGE: get_user_pages FAILED,"
+ "MPU addr = 0x%x,"
+ "vma->vm_flags = 0x%lx,"
+ "get_user_pages Err"
+ "Value = %d, Buffer"
+ "size=0x%x\n", ul_mpu_addr,
+ vma->vm_flags, pg_num, ul_num_bytes);
+ status = -EPERM;
+ break;
+ }
+ }
+ }
+ up_read(&mm->mmap_sem);
+func_cont:
+ if (status) {
+ /*
+ * Roll out the mapped pages incase it failed in middle of
+ * mapping
+ */
+ if (pg_i) {
+ bridge_brd_mem_un_map(dev_context, virt_addr,
+ (pg_i * PG_SIZE4K));
+ }
+ status = -EPERM;
+ }
+ /*
+ * In any case, flush the TLB
+ * This is called from here instead from pte_update to avoid unnecessary
+ * repetition while mapping non-contiguous physical regions of a virtual
+ * region
+ */
+ flush_all(dev_context);
+ dev_dbg(bridge, "%s status %x\n", __func__, status);
+ return status;
+}
+
+/*
+ * ======== bridge_brd_mem_un_map ========
+ * Invalidate the PTEs for the DSP VA block to be unmapped.
+ *
+ * PTEs of a mapped memory block are contiguous in any page table
+ * So, instead of looking up the PTE address for every 4K block,
+ * we clear consecutive PTEs until we unmap all the bytes
+ */
+static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
+ u32 virt_addr, u32 ul_num_bytes)
+{
+ u32 l1_base_va;
+ u32 l2_base_va;
+ u32 l2_base_pa;
+ u32 l2_page_num;
+ u32 pte_val;
+ u32 pte_size;
+ u32 pte_count;
+ u32 pte_addr_l1;
+ u32 pte_addr_l2 = 0;
+ u32 rem_bytes;
+ u32 rem_bytes_l2;
+ u32 va_curr;
+ struct page *pg = NULL;
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ struct pg_table_attrs *pt = dev_context->pt_attrs;
+ u32 temp;
+ u32 paddr;
+ u32 numof4k_pages = 0;
+
+ va_curr = virt_addr;
+ rem_bytes = ul_num_bytes;
+ rem_bytes_l2 = 0;
+ l1_base_va = pt->l1_base_va;
+ pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
+ dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
+ "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
+ ul_num_bytes, l1_base_va, pte_addr_l1);
+
+ while (rem_bytes && !status) {
+ u32 va_curr_orig = va_curr;
+ /* Find whether the L1 PTE points to a valid L2 PT */
+ pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
+ pte_val = *(u32 *) pte_addr_l1;
+ pte_size = hw_mmu_pte_size_l1(pte_val);
+
+ if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
+ goto skip_coarse_page;
+
+ /*
+ * Get the L2 PA from the L1 PTE, and find
+ * corresponding L2 VA
+ */
+ l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
+ l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
+ l2_page_num =
+ (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
+ /*
+ * Find the L2 PTE address from which we will start
+ * clearing, the number of PTEs to be cleared on this
+ * page, and the size of VA space that needs to be
+ * cleared on this L2 page
+ */
+ pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
+ pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
+ pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
+ if (rem_bytes < (pte_count * PG_SIZE4K))
+ pte_count = rem_bytes / PG_SIZE4K;
+ rem_bytes_l2 = pte_count * PG_SIZE4K;
+
+ /*
+ * Unmap the VA space on this L2 PT. A quicker way
+ * would be to clear pte_count entries starting from
+ * pte_addr_l2. However, below code checks that we don't
+ * clear invalid entries or less than 64KB for a 64KB
+ * entry. Similar checking is done for L1 PTEs too
+ * below
+ */
+ while (rem_bytes_l2 && !status) {
+ pte_val = *(u32 *) pte_addr_l2;
+ pte_size = hw_mmu_pte_size_l2(pte_val);
+ /* va_curr aligned to pte_size? */
+ if (pte_size == 0 || rem_bytes_l2 < pte_size ||
+ va_curr & (pte_size - 1)) {
+ status = -EPERM;
+ break;
+ }
+
+ /* Collect Physical addresses from VA */
+ paddr = (pte_val & ~(pte_size - 1));
+ if (pte_size == HW_PAGE_SIZE64KB)
+ numof4k_pages = 16;
+ else
+ numof4k_pages = 1;
+ temp = 0;
+ while (temp++ < numof4k_pages) {
+ if (!pfn_valid(__phys_to_pfn(paddr))) {
+ paddr += HW_PAGE_SIZE4KB;
+ continue;
+ }
+ pg = PHYS_TO_PAGE(paddr);
+ if (page_count(pg) < 1) {
+ pr_info("DSPBRIDGE: UNMAP function: "
+ "COUNT 0 FOR PA 0x%x, size = "
+ "0x%x\n", paddr, ul_num_bytes);
+ bad_page_dump(paddr, pg);
+ } else {
+ set_page_dirty(pg);
+ page_cache_release(pg);
+ }
+ paddr += HW_PAGE_SIZE4KB;
+ }
+ if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
+ status = -EPERM;
+ goto EXIT_LOOP;
+ }
+
+ status = 0;
+ rem_bytes_l2 -= pte_size;
+ va_curr += pte_size;
+ pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
+ }
+ spin_lock(&pt->pg_lock);
+ if (rem_bytes_l2 == 0) {
+ pt->pg_info[l2_page_num].num_entries -= pte_count;
+ if (pt->pg_info[l2_page_num].num_entries == 0) {
+ /*
+ * Clear the L1 PTE pointing to the L2 PT
+ */
+ if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
+ HW_MMU_COARSE_PAGE_SIZE))
+ status = 0;
+ else {
+ status = -EPERM;
+ spin_unlock(&pt->pg_lock);
+ goto EXIT_LOOP;
+ }
+ }
+ rem_bytes -= pte_count * PG_SIZE4K;
+ } else
+ status = -EPERM;
+
+ spin_unlock(&pt->pg_lock);
+ continue;
+skip_coarse_page:
+ /* va_curr aligned to pte_size? */
+ /* pte_size = 1 MB or 16 MB */
+ if (pte_size == 0 || rem_bytes < pte_size ||
+ va_curr & (pte_size - 1)) {
+ status = -EPERM;
+ break;
+ }
+
+ if (pte_size == HW_PAGE_SIZE1MB)
+ numof4k_pages = 256;
+ else
+ numof4k_pages = 4096;
+ temp = 0;
+ /* Collect Physical addresses from VA */
+ paddr = (pte_val & ~(pte_size - 1));
+ while (temp++ < numof4k_pages) {
+ if (pfn_valid(__phys_to_pfn(paddr))) {
+ pg = PHYS_TO_PAGE(paddr);
+ if (page_count(pg) < 1) {
+ pr_info("DSPBRIDGE: UNMAP function: "
+ "COUNT 0 FOR PA 0x%x, size = "
+ "0x%x\n", paddr, ul_num_bytes);
+ bad_page_dump(paddr, pg);
+ } else {
+ set_page_dirty(pg);
+ page_cache_release(pg);
+ }
+ }
+ paddr += HW_PAGE_SIZE4KB;
+ }
+ if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
+ status = 0;
+ rem_bytes -= pte_size;
+ va_curr += pte_size;
+ } else {
+ status = -EPERM;
+ goto EXIT_LOOP;
+ }
+ }
+ /*
+ * It is better to flush the TLB here, so that any stale old entries
+ * get flushed
+ */
+EXIT_LOOP:
+ flush_all(dev_context);
+ dev_dbg(bridge,
+ "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
+ " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
+ pte_addr_l2, rem_bytes, rem_bytes_l2, status);
+ return status;
+}
+
+/*
+ * ======== user_va2_pa ========
+ * Purpose:
+ * This function walks through the page tables to convert a userland
+ * virtual address to physical address
+ */
+static u32 user_va2_pa(struct mm_struct *mm, u32 address)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ pgd = pgd_offset(mm, address);
+ if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
+ pmd = pmd_offset(pgd, address);
+ if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
+ ptep = pte_offset_map(pmd, address);
+ if (ptep) {
+ pte = *ptep;
+ if (pte_present(pte))
+ return pte & PAGE_MASK;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * ======== pte_update ========
+ * This function calculates the optimum page-aligned addresses and sizes
+ * Caller must pass page-aligned values
+ */
+static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
+ u32 va, u32 size,
+ struct hw_mmu_map_attrs_t *map_attrs)
+{
+ u32 i;
+ u32 all_bits;
+ u32 pa_curr = pa;
+ u32 va_curr = va;
+ u32 num_bytes = size;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ int status = 0;
+ u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
+ HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
+ };
+
+ while (num_bytes && !status) {
+ /* To find the max. page size with which both PA & VA are
+ * aligned */
+ all_bits = pa_curr | va_curr;
+
+ for (i = 0; i < 4; i++) {
+ if ((num_bytes >= page_size[i]) && ((all_bits &
+ (page_size[i] -
+ 1)) == 0)) {
+ status =
+ pte_set(dev_context->pt_attrs, pa_curr,
+ va_curr, page_size[i], map_attrs);
+ pa_curr += page_size[i];
+ va_curr += page_size[i];
+ num_bytes -= page_size[i];
+ /* Don't try smaller sizes. Hopefully we have
+ * reached an address aligned to a bigger page
+ * size */
+ break;
+ }
+ }
+ }
+
+ return status;
+}
+
+/*
+ * ======== pte_set ========
+ * This function calculates PTE address (MPU virtual) to be updated
+ * It also manages the L2 page tables
+ */
+static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
+ u32 size, struct hw_mmu_map_attrs_t *attrs)
+{
+ u32 i;
+ u32 pte_val;
+ u32 pte_addr_l1;
+ u32 pte_size;
+ /* Base address of the PT that will be updated */
+ u32 pg_tbl_va;
+ u32 l1_base_va;
+ /* Compiler warns that the next three variables might be used
+ * uninitialized in this function. Doesn't seem so. Working around,
+ * anyways. */
+ u32 l2_base_va = 0;
+ u32 l2_base_pa = 0;
+ u32 l2_page_num = 0;
+ int status = 0;
+
+ l1_base_va = pt->l1_base_va;
+ pg_tbl_va = l1_base_va;
+ if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
+ /* Find whether the L1 PTE points to a valid L2 PT */
+ pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
+ if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
+ pte_val = *(u32 *) pte_addr_l1;
+ pte_size = hw_mmu_pte_size_l1(pte_val);
+ } else {
+ return -EPERM;
+ }
+ spin_lock(&pt->pg_lock);
+ if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
+ /* Get the L2 PA from the L1 PTE, and find
+ * corresponding L2 VA */
+ l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
+ l2_base_va =
+ l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
+ l2_page_num =
+ (l2_base_pa -
+ pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
+ } else if (pte_size == 0) {
+ /* L1 PTE is invalid. Allocate a L2 PT and
+ * point the L1 PTE to it */
+ /* Find a free L2 PT. */
+ for (i = 0; (i < pt->l2_num_pages) &&
+ (pt->pg_info[i].num_entries != 0); i++)
+ ;;
+ if (i < pt->l2_num_pages) {
+ l2_page_num = i;
+ l2_base_pa = pt->l2_base_pa + (l2_page_num *
+ HW_MMU_COARSE_PAGE_SIZE);
+ l2_base_va = pt->l2_base_va + (l2_page_num *
+ HW_MMU_COARSE_PAGE_SIZE);
+ /* Endianness attributes are ignored for
+ * HW_MMU_COARSE_PAGE_SIZE */
+ status =
+ hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
+ HW_MMU_COARSE_PAGE_SIZE,
+ attrs);
+ } else {
+ status = -ENOMEM;
+ }
+ } else {
+ /* Found valid L1 PTE of another size.
+ * Should not overwrite it. */
+ status = -EPERM;
+ }
+ if (!status) {
+ pg_tbl_va = l2_base_va;
+ if (size == HW_PAGE_SIZE64KB)
+ pt->pg_info[l2_page_num].num_entries += 16;
+ else
+ pt->pg_info[l2_page_num].num_entries++;
+ dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
+ "%x, num_entries %x\n", l2_base_va,
+ l2_base_pa, l2_page_num,
+ pt->pg_info[l2_page_num].num_entries);
+ }
+ spin_unlock(&pt->pg_lock);
+ }
+ if (!status) {
+ dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
+ pg_tbl_va, pa, va, size);
+ dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
+ "mixed_size %x\n", attrs->endianism,
+ attrs->element_size, attrs->mixed_size);
+ status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
+ }
+
+ return status;
+}
+
+/* Memory map kernel VA -- memory allocated with vmalloc */
+static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
+ u32 ul_mpu_addr, u32 virt_addr,
+ u32 ul_num_bytes,
+ struct hw_mmu_map_attrs_t *hw_attrs)
+{
+ int status = 0;
+ struct page *page[1];
+ u32 i;
+ u32 pa_curr;
+ u32 pa_next;
+ u32 va_curr;
+ u32 size_curr;
+ u32 num_pages;
+ u32 pa;
+ u32 num_of4k_pages;
+ u32 temp = 0;
+
+ /*
+ * Do Kernel va to pa translation.
+ * Combine physically contiguous regions to reduce TLBs.
+ * Pass the translated pa to pte_update.
+ */
+ num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */
+ i = 0;
+ va_curr = ul_mpu_addr;
+ page[0] = vmalloc_to_page((void *)va_curr);
+ pa_next = page_to_phys(page[0]);
+ while (!status && (i < num_pages)) {
+ /*
+ * Reuse pa_next from the previous iteraion to avoid
+ * an extra va2pa call
+ */
+ pa_curr = pa_next;
+ size_curr = PAGE_SIZE;
+ /*
+ * If the next page is physically contiguous,
+ * map it with the current one by increasing
+ * the size of the region to be mapped
+ */
+ while (++i < num_pages) {
+ page[0] =
+ vmalloc_to_page((void *)(va_curr + size_curr));
+ pa_next = page_to_phys(page[0]);
+
+ if (pa_next == (pa_curr + size_curr))
+ size_curr += PAGE_SIZE;
+ else
+ break;
+
+ }
+ if (pa_next == 0) {
+ status = -ENOMEM;
+ break;
+ }
+ pa = pa_curr;
+ num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
+ while (temp++ < num_of4k_pages) {
+ get_page(PHYS_TO_PAGE(pa));
+ pa += HW_PAGE_SIZE4KB;
+ }
+ status = pte_update(dev_context, pa_curr, virt_addr +
+ (va_curr - ul_mpu_addr), size_curr,
+ hw_attrs);
+ va_curr += size_curr;
+ }
+ /*
+ * In any case, flush the TLB
+ * This is called from here instead from pte_update to avoid unnecessary
+ * repetition while mapping non-contiguous physical regions of a virtual
+ * region
+ */
+ flush_all(dev_context);
+ dev_dbg(bridge, "%s status %x\n", __func__, status);
+ return status;
+}
+
+/*
+ * ======== wait_for_start ========
+ * Wait for the singal from DSP that it has started, or time out.
+ */
+bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
+{
+ u16 timeout = TIHELEN_ACKTIMEOUT;
+
+ /* Wait for response from board */
+ while (__raw_readw(dw_sync_addr) && --timeout)
+ udelay(10);
+
+ /* If timed out: return false */
+ if (!timeout) {
+ pr_err("%s: Timed out waiting DSP to Start\n", __func__);
+ return false;
+ }
+ return true;
+}
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
new file mode 100644
index 00000000000..b789f8fdd89
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -0,0 +1,550 @@
+/*
+ * tiomap_pwr.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implementation of DSP wake/sleep routines.
+ *
+ * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/cfg.h>
+#include <dspbridge/drv.h>
+#include <dspbridge/io_sm.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/brddefs.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/iodefs.h>
+
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
+#include <dspbridge/pwr_sh.h>
+
+/* ----------------------------------- Bridge Driver */
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/wdt.h>
+
+/* ----------------------------------- specific to this file */
+#include "_tiomap.h"
+#include "_tiomap_pwr.h"
+#include <mach-omap2/prm-regbits-34xx.h>
+#include <mach-omap2/cm-regbits-34xx.h>
+
+#define PWRSTST_TIMEOUT 200
+
+/*
+ * ======== handle_constraints_set ========
+ * Sets new DSP constraint
+ */
+int handle_constraints_set(struct bridge_dev_context *dev_context,
+ void *pargs)
+{
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ u32 *constraint_val;
+ struct dspbridge_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ constraint_val = (u32 *) (pargs);
+ /* Read the target value requested by DSP */
+ dev_dbg(bridge, "OPP: %s opp requested = 0x%x\n", __func__,
+ (u32) *(constraint_val + 1));
+
+ /* Set the new opp value */
+ if (pdata->dsp_set_min_opp)
+ (*pdata->dsp_set_min_opp) ((u32) *(constraint_val + 1));
+#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
+ return 0;
+}
+
+/*
+ * ======== handle_hibernation_from_dsp ========
+ * Handle Hibernation requested from DSP
+ */
+int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context)
+{
+ int status = 0;
+#ifdef CONFIG_PM
+ u16 timeout = PWRSTST_TIMEOUT / 10;
+ u32 pwr_state;
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ u32 opplevel;
+ struct io_mgr *hio_mgr;
+#endif
+ struct dspbridge_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+ OMAP_POWERSTATEST_MASK;
+ /* Wait for DSP to move into OFF state */
+ while ((pwr_state != PWRDM_POWER_OFF) && --timeout) {
+ if (msleep_interruptible(10)) {
+ pr_err("Waiting for DSP OFF mode interrupted\n");
+ return -EPERM;
+ }
+ pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
+ OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
+ }
+ if (timeout == 0) {
+ pr_err("%s: Timed out waiting for DSP off mode\n", __func__);
+ status = -ETIMEDOUT;
+ return status;
+ } else {
+
+ /* Save mailbox settings */
+ omap_mbox_save_ctx(dev_context->mbox);
+
+ /* Turn off DSP Peripheral clocks and DSP Load monitor timer */
+ status = dsp_clock_disable_all(dev_context->dsp_per_clks);
+
+ /* Disable wdt on hibernation. */
+ dsp_wdt_enable(false);
+
+ if (!status) {
+ /* Update the Bridger Driver state */
+ dev_context->dw_brd_state = BRD_DSP_HIBERNATION;
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ status =
+ dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
+ if (!hio_mgr) {
+ status = DSP_EHANDLE;
+ return status;
+ }
+ io_sh_msetting(hio_mgr, SHM_GETOPP, &opplevel);
+
+ /*
+ * Set the OPP to low level before moving to OFF
+ * mode
+ */
+ if (pdata->dsp_set_min_opp)
+ (*pdata->dsp_set_min_opp) (VDD1_OPP1);
+ status = 0;
+#endif /* CONFIG_TIDSPBRIDGE_DVFS */
+ }
+ }
+#endif
+ return status;
+}
+
+/*
+ * ======== sleep_dsp ========
+ * Put DSP in low power consuming state.
+ */
+int sleep_dsp(struct bridge_dev_context *dev_context, u32 dw_cmd,
+ void *pargs)
+{
+ int status = 0;
+#ifdef CONFIG_PM
+#ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR
+ struct deh_mgr *hdeh_mgr;
+#endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */
+ u16 timeout = PWRSTST_TIMEOUT / 10;
+ u32 pwr_state, target_pwr_state;
+ struct dspbridge_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ /* Check if sleep code is valid */
+ if ((dw_cmd != PWR_DEEPSLEEP) && (dw_cmd != PWR_EMERGENCYDEEPSLEEP))
+ return -EINVAL;
+
+ switch (dev_context->dw_brd_state) {
+ case BRD_RUNNING:
+ omap_mbox_save_ctx(dev_context->mbox);
+ if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
+ sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE);
+ dev_dbg(bridge, "PM: %s - sent hibernate cmd to DSP\n",
+ __func__);
+ target_pwr_state = PWRDM_POWER_OFF;
+ } else {
+ sm_interrupt_dsp(dev_context, MBX_PM_DSPRETENTION);
+ target_pwr_state = PWRDM_POWER_RET;
+ }
+ break;
+ case BRD_RETENTION:
+ omap_mbox_save_ctx(dev_context->mbox);
+ if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
+ sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE);
+ target_pwr_state = PWRDM_POWER_OFF;
+ } else
+ return 0;
+ break;
+ case BRD_HIBERNATION:
+ case BRD_DSP_HIBERNATION:
+ /* Already in Hibernation, so just return */
+ dev_dbg(bridge, "PM: %s - DSP already in hibernation\n",
+ __func__);
+ return 0;
+ case BRD_STOPPED:
+ dev_dbg(bridge, "PM: %s - Board in STOP state\n", __func__);
+ return 0;
+ default:
+ dev_dbg(bridge, "PM: %s - Bridge in Illegal state\n", __func__);
+ return -EPERM;
+ }
+
+ /* Get the PRCM DSP power domain status */
+ pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+ OMAP_POWERSTATEST_MASK;
+
+ /* Wait for DSP to move into target power state */
+ while ((pwr_state != target_pwr_state) && --timeout) {
+ if (msleep_interruptible(10)) {
+ pr_err("Waiting for DSP to Suspend interrupted\n");
+ return -EPERM;
+ }
+ pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
+ OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
+ }
+
+ if (!timeout) {
+ pr_err("%s: Timed out waiting for DSP off mode, state %x\n",
+ __func__, pwr_state);
+#ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR
+ dev_get_deh_mgr(dev_context->hdev_obj, &hdeh_mgr);
+ bridge_deh_notify(hdeh_mgr, DSP_PWRERROR, 0);
+#endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */
+ return -ETIMEDOUT;
+ } else {
+ /* Update the Bridger Driver state */
+ if (dsp_test_sleepstate == PWRDM_POWER_OFF)
+ dev_context->dw_brd_state = BRD_HIBERNATION;
+ else
+ dev_context->dw_brd_state = BRD_RETENTION;
+
+ /* Disable wdt on hibernation. */
+ dsp_wdt_enable(false);
+
+ /* Turn off DSP Peripheral clocks */
+ status = dsp_clock_disable_all(dev_context->dsp_per_clks);
+ if (status)
+ return status;
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ else if (target_pwr_state == PWRDM_POWER_OFF) {
+ /*
+ * Set the OPP to low level before moving to OFF mode
+ */
+ if (pdata->dsp_set_min_opp)
+ (*pdata->dsp_set_min_opp) (VDD1_OPP1);
+ }
+#endif /* CONFIG_TIDSPBRIDGE_DVFS */
+ }
+#endif /* CONFIG_PM */
+ return status;
+}
+
+/*
+ * ======== wake_dsp ========
+ * Wake up DSP from sleep.
+ */
+int wake_dsp(struct bridge_dev_context *dev_context, void *pargs)
+{
+ int status = 0;
+#ifdef CONFIG_PM
+
+ /* Check the board state, if it is not 'SLEEP' then return */
+ if (dev_context->dw_brd_state == BRD_RUNNING ||
+ dev_context->dw_brd_state == BRD_STOPPED) {
+ /* The Device is in 'RET' or 'OFF' state and Bridge state is not
+ * 'SLEEP', this means state inconsistency, so return */
+ return 0;
+ }
+
+ /* Send a wakeup message to DSP */
+ sm_interrupt_dsp(dev_context, MBX_PM_DSPWAKEUP);
+
+ /* Set the device state to RUNNIG */
+ dev_context->dw_brd_state = BRD_RUNNING;
+#endif /* CONFIG_PM */
+ return status;
+}
+
+/*
+ * ======== dsp_peripheral_clk_ctrl ========
+ * Enable/Disable the DSP peripheral clocks as needed..
+ */
+int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context,
+ void *pargs)
+{
+ u32 ext_clk = 0;
+ u32 ext_clk_id = 0;
+ u32 ext_clk_cmd = 0;
+ u32 clk_id_index = MBX_PM_MAX_RESOURCES;
+ u32 tmp_index;
+ u32 dsp_per_clks_before;
+ int status = 0;
+
+ dsp_per_clks_before = dev_context->dsp_per_clks;
+
+ ext_clk = (u32) *((u32 *) pargs);
+ ext_clk_id = ext_clk & MBX_PM_CLK_IDMASK;
+
+ /* process the power message -- TODO, keep it in a separate function */
+ for (tmp_index = 0; tmp_index < MBX_PM_MAX_RESOURCES; tmp_index++) {
+ if (ext_clk_id == bpwr_clkid[tmp_index]) {
+ clk_id_index = tmp_index;
+ break;
+ }
+ }
+ /* TODO -- Assert may be a too hard restriction here.. May be we should
+ * just return with failure when the CLK ID does not match */
+ /* DBC_ASSERT(clk_id_index < MBX_PM_MAX_RESOURCES); */
+ if (clk_id_index == MBX_PM_MAX_RESOURCES) {
+ /* return with a more meaningfull error code */
+ return -EPERM;
+ }
+ ext_clk_cmd = (ext_clk >> MBX_PM_CLK_CMDSHIFT) & MBX_PM_CLK_CMDMASK;
+ switch (ext_clk_cmd) {
+ case BPWR_DISABLE_CLOCK:
+ status = dsp_clk_disable(bpwr_clks[clk_id_index].clk);
+ dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id,
+ false);
+ if (!status) {
+ (dev_context->dsp_per_clks) &=
+ (~((u32) (1 << bpwr_clks[clk_id_index].clk)));
+ }
+ break;
+ case BPWR_ENABLE_CLOCK:
+ status = dsp_clk_enable(bpwr_clks[clk_id_index].clk);
+ dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, true);
+ if (!status)
+ (dev_context->dsp_per_clks) |=
+ (1 << bpwr_clks[clk_id_index].clk);
+ break;
+ default:
+ dev_dbg(bridge, "%s: Unsupported CMD\n", __func__);
+ /* unsupported cmd */
+ /* TODO -- provide support for AUTOIDLE Enable/Disable
+ * commands */
+ }
+ return status;
+}
+
+/*
+ * ========pre_scale_dsp========
+ * Sends prescale notification to DSP
+ *
+ */
+int pre_scale_dsp(struct bridge_dev_context *dev_context, void *pargs)
+{
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ u32 level;
+ u32 voltage_domain;
+
+ voltage_domain = *((u32 *) pargs);
+ level = *((u32 *) pargs + 1);
+
+ dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
+ __func__, voltage_domain, level);
+ if ((dev_context->dw_brd_state == BRD_HIBERNATION) ||
+ (dev_context->dw_brd_state == BRD_RETENTION) ||
+ (dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) {
+ dev_dbg(bridge, "OPP: %s IVA in sleep. No message to DSP\n");
+ return 0;
+ } else if ((dev_context->dw_brd_state == BRD_RUNNING)) {
+ /* Send a prenotificatio to DSP */
+ dev_dbg(bridge, "OPP: %s sent notification to DSP\n", __func__);
+ sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_PRENOTIFY);
+ return 0;
+ } else {
+ return -EPERM;
+ }
+#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
+ return 0;
+}
+
+/*
+ * ========post_scale_dsp========
+ * Sends postscale notification to DSP
+ *
+ */
+int post_scale_dsp(struct bridge_dev_context *dev_context,
+ void *pargs)
+{
+ int status = 0;
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ u32 level;
+ u32 voltage_domain;
+ struct io_mgr *hio_mgr;
+
+ status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
+ if (!hio_mgr)
+ return -EFAULT;
+
+ voltage_domain = *((u32 *) pargs);
+ level = *((u32 *) pargs + 1);
+ dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
+ __func__, voltage_domain, level);
+ if ((dev_context->dw_brd_state == BRD_HIBERNATION) ||
+ (dev_context->dw_brd_state == BRD_RETENTION) ||
+ (dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) {
+ /* Update the OPP value in shared memory */
+ io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
+ dev_dbg(bridge, "OPP: %s IVA in sleep. Wrote to shm\n",
+ __func__);
+ } else if ((dev_context->dw_brd_state == BRD_RUNNING)) {
+ /* Update the OPP value in shared memory */
+ io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
+ /* Send a post notification to DSP */
+ sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_POSTNOTIFY);
+ dev_dbg(bridge, "OPP: %s wrote to shm. Sent post notification "
+ "to DSP\n", __func__);
+ } else {
+ status = -EPERM;
+ }
+#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
+ return status;
+}
+
+void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
+{
+ struct cfg_hostres *resources;
+ int status = 0;
+ u32 iva2_grpsel;
+ u32 mpu_grpsel;
+ struct dev_object *hdev_object = NULL;
+ struct bridge_dev_context *bridge_context = NULL;
+
+ hdev_object = (struct dev_object *)drv_get_first_dev_object();
+ if (!hdev_object)
+ return;
+
+ status = dev_get_bridge_context(hdev_object, &bridge_context);
+ if (!bridge_context)
+ return;
+
+ resources = bridge_context->resources;
+ if (!resources)
+ return;
+
+ switch (clock_id) {
+ case BPWR_GP_TIMER5:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_GP_TIMER6:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_GP_TIMER7:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_GP_TIMER8:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_MCBSP1:
+ iva2_grpsel = readl(resources->dw_core_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_core_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_core_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_core_pm_base + 0xA4);
+ break;
+ case BPWR_MCBSP2:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_MCBSP3:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_MCBSP4:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ case BPWR_MCBSP5:
+ iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ if (enable) {
+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
+ } else {
+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
+ }
+ writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ break;
+ }
+}
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
new file mode 100644
index 00000000000..190c028afe9
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -0,0 +1,455 @@
+/*
+ * tiomap_io.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implementation for the io read/write routines.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/drv.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/wdt.h>
+
+/* ----------------------------------- specific to this file */
+#include "_tiomap.h"
+#include "_tiomap_pwr.h"
+#include "tiomap_io.h"
+
+static u32 ul_ext_base;
+static u32 ul_ext_end;
+
+static u32 shm0_end;
+static u32 ul_dyn_ext_base;
+static u32 ul_trace_sec_beg;
+static u32 ul_trace_sec_end;
+static u32 ul_shm_base_virt;
+
+bool symbols_reloaded = true;
+
+/*
+ * ======== read_ext_dsp_data ========
+ * Copies DSP external memory buffers to the host side buffers.
+ */
+int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type)
+{
+ int status = 0;
+ struct bridge_dev_context *dev_context = dev_ctxt;
+ u32 offset;
+ u32 ul_tlb_base_virt = 0;
+ u32 ul_shm_offset_virt = 0;
+ u32 dw_ext_prog_virt_mem;
+ u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr;
+ bool trace_read = false;
+
+ if (!ul_shm_base_virt) {
+ status = dev_get_symbol(dev_context->hdev_obj,
+ SHMBASENAME, &ul_shm_base_virt);
+ }
+ DBC_ASSERT(ul_shm_base_virt != 0);
+
+ /* Check if it is a read of Trace section */
+ if (!status && !ul_trace_sec_beg) {
+ status = dev_get_symbol(dev_context->hdev_obj,
+ DSP_TRACESEC_BEG, &ul_trace_sec_beg);
+ }
+ DBC_ASSERT(ul_trace_sec_beg != 0);
+
+ if (!status && !ul_trace_sec_end) {
+ status = dev_get_symbol(dev_context->hdev_obj,
+ DSP_TRACESEC_END, &ul_trace_sec_end);
+ }
+ DBC_ASSERT(ul_trace_sec_end != 0);
+
+ if (!status) {
+ if ((dsp_addr <= ul_trace_sec_end) &&
+ (dsp_addr >= ul_trace_sec_beg))
+ trace_read = true;
+ }
+
+ /* If reading from TRACE, force remap/unmap */
+ if (trace_read && dw_base_addr) {
+ dw_base_addr = 0;
+ dev_context->dw_dsp_ext_base_addr = 0;
+ }
+
+ if (!dw_base_addr) {
+ /* Initialize ul_ext_base and ul_ext_end */
+ ul_ext_base = 0;
+ ul_ext_end = 0;
+
+ /* Get DYNEXT_BEG, EXT_BEG and EXT_END. */
+ if (!status && !ul_dyn_ext_base) {
+ status = dev_get_symbol(dev_context->hdev_obj,
+ DYNEXTBASE, &ul_dyn_ext_base);
+ }
+ DBC_ASSERT(ul_dyn_ext_base != 0);
+
+ if (!status) {
+ status = dev_get_symbol(dev_context->hdev_obj,
+ EXTBASE, &ul_ext_base);
+ }
+ DBC_ASSERT(ul_ext_base != 0);
+
+ if (!status) {
+ status = dev_get_symbol(dev_context->hdev_obj,
+ EXTEND, &ul_ext_end);
+ }
+ DBC_ASSERT(ul_ext_end != 0);
+
+ /* Trace buffer is right after the shm SEG0,
+ * so set the base address to SHMBASE */
+ if (trace_read) {
+ ul_ext_base = ul_shm_base_virt;
+ ul_ext_end = ul_trace_sec_end;
+ }
+
+ DBC_ASSERT(ul_ext_end != 0);
+ DBC_ASSERT(ul_ext_end > ul_ext_base);
+
+ if (ul_ext_end < ul_ext_base)
+ status = -EPERM;
+
+ if (!status) {
+ ul_tlb_base_virt =
+ dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
+ DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
+ dw_ext_prog_virt_mem =
+ dev_context->atlb_entry[0].ul_gpp_va;
+
+ if (!trace_read) {
+ ul_shm_offset_virt =
+ ul_shm_base_virt - ul_tlb_base_virt;
+ ul_shm_offset_virt +=
+ PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base +
+ 1, HW_PAGE_SIZE64KB);
+ dw_ext_prog_virt_mem -= ul_shm_offset_virt;
+ dw_ext_prog_virt_mem +=
+ (ul_ext_base - ul_dyn_ext_base);
+ dev_context->dw_dsp_ext_base_addr =
+ dw_ext_prog_virt_mem;
+
+ /*
+ * This dw_dsp_ext_base_addr will get cleared
+ * only when the board is stopped.
+ */
+ if (!dev_context->dw_dsp_ext_base_addr)
+ status = -EPERM;
+ }
+
+ dw_base_addr = dw_ext_prog_virt_mem;
+ }
+ }
+
+ if (!dw_base_addr || !ul_ext_base || !ul_ext_end)
+ status = -EPERM;
+
+ offset = dsp_addr - ul_ext_base;
+
+ if (!status)
+ memcpy(host_buff, (u8 *) dw_base_addr + offset, ul_num_bytes);
+
+ return status;
+}
+
+/*
+ * ======== write_dsp_data ========
+ * purpose:
+ * Copies buffers to the DSP internal/external memory.
+ */
+int write_dsp_data(struct bridge_dev_context *dev_context,
+ u8 *host_buff, u32 dsp_addr, u32 ul_num_bytes,
+ u32 mem_type)
+{
+ u32 offset;
+ u32 dw_base_addr = dev_context->dw_dsp_base_addr;
+ struct cfg_hostres *resources = dev_context->resources;
+ int status = 0;
+ u32 base1, base2, base3;
+ base1 = OMAP_DSP_MEM1_SIZE;
+ base2 = OMAP_DSP_MEM2_BASE - OMAP_DSP_MEM1_BASE;
+ base3 = OMAP_DSP_MEM3_BASE - OMAP_DSP_MEM1_BASE;
+
+ if (!resources)
+ return -EPERM;
+
+ offset = dsp_addr - dev_context->dw_dsp_start_add;
+ if (offset < base1) {
+ dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[2],
+ resources->dw_mem_length[2]);
+ } else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) {
+ dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[3],
+ resources->dw_mem_length[3]);
+ offset = offset - base2;
+ } else if (offset >= base2 + OMAP_DSP_MEM2_SIZE &&
+ offset < base3 + OMAP_DSP_MEM3_SIZE) {
+ dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[4],
+ resources->dw_mem_length[4]);
+ offset = offset - base3;
+ } else {
+ return -EPERM;
+ }
+ if (ul_num_bytes)
+ memcpy((u8 *) (dw_base_addr + offset), host_buff, ul_num_bytes);
+ else
+ *((u32 *) host_buff) = dw_base_addr + offset;
+
+ return status;
+}
+
+/*
+ * ======== write_ext_dsp_data ========
+ * purpose:
+ * Copies buffers to the external memory.
+ *
+ */
+int write_ext_dsp_data(struct bridge_dev_context *dev_context,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type,
+ bool dynamic_load)
+{
+ u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr;
+ u32 dw_offset = 0;
+ u8 temp_byte1, temp_byte2;
+ u8 remain_byte[4];
+ s32 i;
+ int ret = 0;
+ u32 dw_ext_prog_virt_mem;
+ u32 ul_tlb_base_virt = 0;
+ u32 ul_shm_offset_virt = 0;
+ struct cfg_hostres *host_res = dev_context->resources;
+ bool trace_load = false;
+ temp_byte1 = 0x0;
+ temp_byte2 = 0x0;
+
+ if (symbols_reloaded) {
+ /* Check if it is a load to Trace section */
+ ret = dev_get_symbol(dev_context->hdev_obj,
+ DSP_TRACESEC_BEG, &ul_trace_sec_beg);
+ if (!ret)
+ ret = dev_get_symbol(dev_context->hdev_obj,
+ DSP_TRACESEC_END,
+ &ul_trace_sec_end);
+ }
+ if (!ret) {
+ if ((dsp_addr <= ul_trace_sec_end) &&
+ (dsp_addr >= ul_trace_sec_beg))
+ trace_load = true;
+ }
+
+ /* If dynamic, force remap/unmap */
+ if ((dynamic_load || trace_load) && dw_base_addr) {
+ dw_base_addr = 0;
+ MEM_UNMAP_LINEAR_ADDRESS((void *)
+ dev_context->dw_dsp_ext_base_addr);
+ dev_context->dw_dsp_ext_base_addr = 0x0;
+ }
+ if (!dw_base_addr) {
+ if (symbols_reloaded)
+ /* Get SHM_BEG EXT_BEG and EXT_END. */
+ ret = dev_get_symbol(dev_context->hdev_obj,
+ SHMBASENAME, &ul_shm_base_virt);
+ DBC_ASSERT(ul_shm_base_virt != 0);
+ if (dynamic_load) {
+ if (!ret) {
+ if (symbols_reloaded)
+ ret =
+ dev_get_symbol
+ (dev_context->hdev_obj, DYNEXTBASE,
+ &ul_ext_base);
+ }
+ DBC_ASSERT(ul_ext_base != 0);
+ if (!ret) {
+ /* DR OMAPS00013235 : DLModules array may be
+ * in EXTMEM. It is expected that DYNEXTMEM and
+ * EXTMEM are contiguous, so checking for the
+ * upper bound at EXTEND should be Ok. */
+ if (symbols_reloaded)
+ ret =
+ dev_get_symbol
+ (dev_context->hdev_obj, EXTEND,
+ &ul_ext_end);
+ }
+ } else {
+ if (symbols_reloaded) {
+ if (!ret)
+ ret =
+ dev_get_symbol
+ (dev_context->hdev_obj, EXTBASE,
+ &ul_ext_base);
+ DBC_ASSERT(ul_ext_base != 0);
+ if (!ret)
+ ret =
+ dev_get_symbol
+ (dev_context->hdev_obj, EXTEND,
+ &ul_ext_end);
+ }
+ }
+ /* Trace buffer it right after the shm SEG0, so set the
+ * base address to SHMBASE */
+ if (trace_load)
+ ul_ext_base = ul_shm_base_virt;
+
+ DBC_ASSERT(ul_ext_end != 0);
+ DBC_ASSERT(ul_ext_end > ul_ext_base);
+ if (ul_ext_end < ul_ext_base)
+ ret = -EPERM;
+
+ if (!ret) {
+ ul_tlb_base_virt =
+ dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
+ DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
+
+ if (symbols_reloaded) {
+ ret = dev_get_symbol
+ (dev_context->hdev_obj,
+ DSP_TRACESEC_END, &shm0_end);
+ if (!ret) {
+ ret =
+ dev_get_symbol
+ (dev_context->hdev_obj, DYNEXTBASE,
+ &ul_dyn_ext_base);
+ }
+ }
+ ul_shm_offset_virt =
+ ul_shm_base_virt - ul_tlb_base_virt;
+ if (trace_load) {
+ dw_ext_prog_virt_mem =
+ dev_context->atlb_entry[0].ul_gpp_va;
+ } else {
+ dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
+ dw_ext_prog_virt_mem +=
+ (ul_ext_base - ul_dyn_ext_base);
+ }
+
+ dev_context->dw_dsp_ext_base_addr =
+ (u32) MEM_LINEAR_ADDRESS((void *)
+ dw_ext_prog_virt_mem,
+ ul_ext_end - ul_ext_base);
+ dw_base_addr += dev_context->dw_dsp_ext_base_addr;
+ /* This dw_dsp_ext_base_addr will get cleared only when
+ * the board is stopped. */
+ if (!dev_context->dw_dsp_ext_base_addr)
+ ret = -EPERM;
+ }
+ }
+ if (!dw_base_addr || !ul_ext_base || !ul_ext_end)
+ ret = -EPERM;
+
+ if (!ret) {
+ for (i = 0; i < 4; i++)
+ remain_byte[i] = 0x0;
+
+ dw_offset = dsp_addr - ul_ext_base;
+ /* Also make sure the dsp_addr is < ul_ext_end */
+ if (dsp_addr > ul_ext_end || dw_offset > dsp_addr)
+ ret = -EPERM;
+ }
+ if (!ret) {
+ if (ul_num_bytes)
+ memcpy((u8 *) dw_base_addr + dw_offset, host_buff,
+ ul_num_bytes);
+ else
+ *((u32 *) host_buff) = dw_base_addr + dw_offset;
+ }
+ /* Unmap here to force remap for other Ext loads */
+ if ((dynamic_load || trace_load) && dev_context->dw_dsp_ext_base_addr) {
+ MEM_UNMAP_LINEAR_ADDRESS((void *)
+ dev_context->dw_dsp_ext_base_addr);
+ dev_context->dw_dsp_ext_base_addr = 0x0;
+ }
+ symbols_reloaded = false;
+ return ret;
+}
+
+int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
+{
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ u32 opplevel = 0;
+#endif
+ struct dspbridge_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+ struct cfg_hostres *resources = dev_context->resources;
+ int status = 0;
+ u32 temp;
+
+ if (!dev_context->mbox)
+ return 0;
+
+ if (!resources)
+ return -EPERM;
+
+ if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
+ dev_context->dw_brd_state == BRD_HIBERNATION) {
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ if (pdata->dsp_get_opp)
+ opplevel = (*pdata->dsp_get_opp) ();
+ if (opplevel == VDD1_OPP1) {
+ if (pdata->dsp_set_min_opp)
+ (*pdata->dsp_set_min_opp) (VDD1_OPP2);
+ }
+#endif
+ /* Restart the peripheral clocks */
+ dsp_clock_enable_all(dev_context->dsp_per_clks);
+ dsp_wdt_enable(true);
+
+ /*
+ * 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control
+ * in CM_AUTOIDLE_PLL_IVA2 register
+ */
+ (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
+ OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
+
+ /*
+ * 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to
+ * 0.75 MHz - 1.0 MHz
+ * 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode
+ */
+ (*pdata->dsp_cm_rmw_bits)(OMAP3430_IVA2_DPLL_FREQSEL_MASK |
+ OMAP3430_EN_IVA2_DPLL_MASK,
+ 0x3 << OMAP3430_IVA2_DPLL_FREQSEL_SHIFT |
+ 0x7 << OMAP3430_EN_IVA2_DPLL_SHIFT,
+ OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL);
+
+ /* Restore mailbox settings */
+ omap_mbox_restore_ctx(dev_context->mbox);
+
+ /* Access MMU SYS CONFIG register to generate a short wakeup */
+ temp = readl(resources->dw_dmmu_base + 0x10);
+
+ dev_context->dw_brd_state = BRD_RUNNING;
+ } else if (dev_context->dw_brd_state == BRD_RETENTION) {
+ /* Restart the peripheral clocks */
+ dsp_clock_enable_all(dev_context->dsp_per_clks);
+ }
+
+ status = omap_mbox_msg_send(dev_context->mbox, mb_val);
+
+ if (status) {
+ pr_err("omap_mbox_msg_send Fail and status = %d\n", status);
+ status = -EPERM;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.h b/drivers/staging/tidspbridge/core/tiomap_io.h
new file mode 100644
index 00000000000..a3f19c7b79f
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/tiomap_io.h
@@ -0,0 +1,104 @@
+/*
+ * tiomap_io.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Definitions, types and function prototypes for the io (r/w external mem).
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _TIOMAP_IO_
+#define _TIOMAP_IO_
+
+/*
+ * Symbol that defines beginning of shared memory.
+ * For OMAP (Helen) this is the DSP Virtual base address of SDRAM.
+ * This will be used to program DSP MMU to map DSP Virt to GPP phys.
+ * (see dspMmuTlbEntry()).
+ */
+#define SHMBASENAME "SHM_BEG"
+#define EXTBASE "EXT_BEG"
+#define EXTEND "_EXT_END"
+#define DYNEXTBASE "_DYNEXT_BEG"
+#define DYNEXTEND "_DYNEXT_END"
+#define IVAEXTMEMBASE "_IVAEXTMEM_BEG"
+#define IVAEXTMEMEND "_IVAEXTMEM_END"
+
+#define DSP_TRACESEC_BEG "_BRIDGE_TRACE_BEG"
+#define DSP_TRACESEC_END "_BRIDGE_TRACE_END"
+
+#define SYS_PUTCBEG "_SYS_PUTCBEG"
+#define SYS_PUTCEND "_SYS_PUTCEND"
+#define BRIDGE_SYS_PUTC_CURRENT "_BRIDGE_SYS_PUTC_current"
+
+#define WORDSWAP_ENABLE 0x3 /* Enable word swap */
+
+/*
+ * ======== read_ext_dsp_data ========
+ * Reads it from DSP External memory. The external memory for the DSP
+ * is configured by the combination of DSP MMU and shm Memory manager in the CDB
+ */
+extern int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type);
+
+/*
+ * ======== write_dsp_data ========
+ */
+extern int write_dsp_data(struct bridge_dev_context *dev_context,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type);
+
+/*
+ * ======== write_ext_dsp_data ========
+ * Writes to the DSP External memory for external program.
+ * The ext mem for progra is configured by the combination of DSP MMU and
+ * shm Memory manager in the CDB
+ */
+extern int write_ext_dsp_data(struct bridge_dev_context *dev_context,
+ u8 *host_buff, u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type,
+ bool dynamic_load);
+
+/*
+ * ======== write_ext32_bit_dsp_data ========
+ * Writes 32 bit data to the external memory
+ */
+extern inline void write_ext32_bit_dsp_data(const
+ struct bridge_dev_context *dev_context,
+ u32 dsp_addr, u32 val)
+{
+ *(u32 *) dsp_addr = ((dev_context->tc_word_swap_on) ? (((val << 16) &
+ 0xFFFF0000) |
+ ((val >> 16) &
+ 0x0000FFFF)) :
+ val);
+}
+
+/*
+ * ======== read_ext32_bit_dsp_data ========
+ * Reads 32 bit data from the external memory
+ */
+extern inline u32 read_ext32_bit_dsp_data(const struct bridge_dev_context
+ *dev_context, u32 dsp_addr)
+{
+ u32 ret;
+ ret = *(u32 *) dsp_addr;
+
+ ret = ((dev_context->tc_word_swap_on) ? (((ret << 16)
+ & 0xFFFF0000) | ((ret >> 16) &
+ 0x0000FFFF))
+ : ret);
+ return ret;
+}
+
+#endif /* _TIOMAP_IO_ */
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
new file mode 100644
index 00000000000..3430418190d
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/ue_deh.c
@@ -0,0 +1,273 @@
+/*
+ * ue_deh.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implements upper edge DSP exception handling (DEH) functions.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ * Copyright (C) 2010 Felipe Contreras
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <plat/dmtimer.h>
+
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/dev.h>
+#include "_tiomap.h"
+#include "_deh.h"
+
+#include <dspbridge/io_sm.h>
+#include <dspbridge/drv.h>
+#include <dspbridge/wdt.h>
+
+static u32 fault_addr;
+
+static void mmu_fault_dpc(unsigned long data)
+{
+ struct deh_mgr *deh = (void *)data;
+
+ if (!deh)
+ return;
+
+ bridge_deh_notify(deh, DSP_MMUFAULT, 0);
+}
+
+static irqreturn_t mmu_fault_isr(int irq, void *data)
+{
+ struct deh_mgr *deh = data;
+ struct cfg_hostres *resources;
+ u32 event;
+
+ if (!deh)
+ return IRQ_HANDLED;
+
+ resources = deh->hbridge_context->resources;
+ if (!resources) {
+ dev_dbg(bridge, "%s: Failed to get Host Resources\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ hw_mmu_event_status(resources->dw_dmmu_base, &event);
+ if (event == HW_MMU_TRANSLATION_FAULT) {
+ hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
+ dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
+ event, fault_addr);
+ /*
+ * Schedule a DPC directly. In the future, it may be
+ * necessary to check if DSP MMU fault is intended for
+ * Bridge.
+ */
+ tasklet_schedule(&deh->dpc_tasklet);
+
+ /* Disable the MMU events, else once we clear it will
+ * start to raise INTs again */
+ hw_mmu_event_disable(resources->dw_dmmu_base,
+ HW_MMU_TRANSLATION_FAULT);
+ } else {
+ hw_mmu_event_disable(resources->dw_dmmu_base,
+ HW_MMU_ALL_INTERRUPTS);
+ }
+ return IRQ_HANDLED;
+}
+
+int bridge_deh_create(struct deh_mgr **ret_deh,
+ struct dev_object *hdev_obj)
+{
+ int status;
+ struct deh_mgr *deh;
+ struct bridge_dev_context *hbridge_context = NULL;
+
+ /* Message manager will be created when a file is loaded, since
+ * size of message buffer in shared memory is configurable in
+ * the base image. */
+ /* Get Bridge context info. */
+ dev_get_bridge_context(hdev_obj, &hbridge_context);
+ /* Allocate IO manager object: */
+ deh = kzalloc(sizeof(*deh), GFP_KERNEL);
+ if (!deh) {
+ status = -ENOMEM;
+ goto err;
+ }
+
+ /* Create an NTFY object to manage notifications */
+ deh->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
+ if (!deh->ntfy_obj) {
+ status = -ENOMEM;
+ goto err;
+ }
+ ntfy_init(deh->ntfy_obj);
+
+ /* Create a MMUfault DPC */
+ tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);
+
+ /* Fill in context structure */
+ deh->hbridge_context = hbridge_context;
+
+ /* Install ISR function for DSP MMU fault */
+ status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
+ "DspBridge\tiommu fault", deh);
+ if (status < 0)
+ goto err;
+
+ *ret_deh = deh;
+ return 0;
+
+err:
+ bridge_deh_destroy(deh);
+ *ret_deh = NULL;
+ return status;
+}
+
+int bridge_deh_destroy(struct deh_mgr *deh)
+{
+ if (!deh)
+ return -EFAULT;
+
+ /* If notification object exists, delete it */
+ if (deh->ntfy_obj) {
+ ntfy_delete(deh->ntfy_obj);
+ kfree(deh->ntfy_obj);
+ }
+ /* Disable DSP MMU fault */
+ free_irq(INT_DSP_MMU_IRQ, deh);
+
+ /* Free DPC object */
+ tasklet_kill(&deh->dpc_tasklet);
+
+ /* Deallocate the DEH manager object */
+ kfree(deh);
+
+ return 0;
+}
+
+int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask,
+ u32 notify_type,
+ struct dsp_notification *hnotification)
+{
+ if (!deh)
+ return -EFAULT;
+
+ if (event_mask)
+ return ntfy_register(deh->ntfy_obj, hnotification,
+ event_mask, notify_type);
+ else
+ return ntfy_unregister(deh->ntfy_obj, hnotification);
+}
+
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
+{
+ struct cfg_hostres *resources;
+ struct hw_mmu_map_attrs_t map_attrs = {
+ .endianism = HW_LITTLE_ENDIAN,
+ .element_size = HW_ELEM_SIZE16BIT,
+ .mixed_size = HW_MMU_CPUES,
+ };
+ void *dummy_va_addr;
+
+ resources = dev_context->resources;
+ dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
+
+ /*
+ * Before acking the MMU fault, let's make sure MMU can only
+ * access entry #0. Then add a new entry so that the DSP OS
+ * can continue in order to dump the stack.
+ */
+ hw_mmu_twl_disable(resources->dw_dmmu_base);
+ hw_mmu_tlb_flush_all(resources->dw_dmmu_base);
+
+ hw_mmu_tlb_add(resources->dw_dmmu_base,
+ virt_to_phys(dummy_va_addr), fault_addr,
+ HW_PAGE_SIZE4KB, 1,
+ &map_attrs, HW_SET, HW_SET);
+
+ dsp_clk_enable(DSP_CLK_GPT8);
+
+ dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
+
+ /* Clear MMU interrupt */
+ hw_mmu_event_ack(resources->dw_dmmu_base,
+ HW_MMU_TRANSLATION_FAULT);
+ dump_dsp_stack(dev_context);
+ dsp_clk_disable(DSP_CLK_GPT8);
+
+ hw_mmu_disable(resources->dw_dmmu_base);
+ free_page((unsigned long)dummy_va_addr);
+}
+#endif
+
+static inline const char *event_to_string(int event)
+{
+ switch (event) {
+ case DSP_SYSERROR: return "DSP_SYSERROR"; break;
+ case DSP_MMUFAULT: return "DSP_MMUFAULT"; break;
+ case DSP_PWRERROR: return "DSP_PWRERROR"; break;
+ case DSP_WDTOVERFLOW: return "DSP_WDTOVERFLOW"; break;
+ default: return "unkown event"; break;
+ }
+}
+
+void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
+{
+ struct bridge_dev_context *dev_context;
+ const char *str = event_to_string(event);
+
+ if (!deh)
+ return;
+
+ dev_dbg(bridge, "%s: device exception", __func__);
+ dev_context = deh->hbridge_context;
+
+ switch (event) {
+ case DSP_SYSERROR:
+ dev_err(bridge, "%s: %s, info=0x%x", __func__,
+ str, info);
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+ dump_dl_modules(dev_context);
+ dump_dsp_stack(dev_context);
+#endif
+ break;
+ case DSP_MMUFAULT:
+ dev_err(bridge, "%s: %s, addr=0x%x", __func__,
+ str, fault_addr);
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+ print_dsp_trace_buffer(dev_context);
+ dump_dl_modules(dev_context);
+ mmu_fault_print_stack(dev_context);
+#endif
+ break;
+ default:
+ dev_err(bridge, "%s: %s", __func__, str);
+ break;
+ }
+
+ /* Filter subsequent notifications when an error occurs */
+ if (dev_context->dw_brd_state != BRD_ERROR) {
+ ntfy_notify(deh->ntfy_obj, event);
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ bridge_recover_schedule();
+#endif
+ }
+
+ /* Set the Board state as ERROR */
+ dev_context->dw_brd_state = BRD_ERROR;
+ /* Disable all the clocks that were enabled by DSP */
+ dsp_clock_disable_all(dev_context->dsp_per_clks);
+ /*
+ * Avoid the subsequent WDT if it happens once,
+ * also if fatal error occurs.
+ */
+ dsp_wdt_enable(false);
+}
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c
new file mode 100644
index 00000000000..2126f597753
--- /dev/null
+++ b/drivers/staging/tidspbridge/core/wdt.c
@@ -0,0 +1,150 @@
+/*
+ * wdt.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * IO dispatcher for a shared memory channel driver.
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/_chnl_sm.h>
+#include <dspbridge/wdt.h>
+#include <dspbridge/host_os.h>
+
+
+#ifdef CONFIG_TIDSPBRIDGE_WDT3
+
+#define OMAP34XX_WDT3_BASE (L4_PER_34XX_BASE + 0x30000)
+
+static struct dsp_wdt_setting dsp_wdt;
+
+void dsp_wdt_dpc(unsigned long data)
+{
+ struct deh_mgr *deh_mgr;
+ dev_get_deh_mgr(dev_get_first(), &deh_mgr);
+ if (deh_mgr)
+ bridge_deh_notify(deh_mgr, DSP_WDTOVERFLOW, 0);
+}
+
+irqreturn_t dsp_wdt_isr(int irq, void *data)
+{
+ u32 value;
+ /* ack wdt3 interrupt */
+ value = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
+ __raw_writel(value, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
+
+ tasklet_schedule(&dsp_wdt.wdt3_tasklet);
+ return IRQ_HANDLED;
+}
+
+int dsp_wdt_init(void)
+{
+ int ret = 0;
+
+ dsp_wdt.sm_wdt = NULL;
+ dsp_wdt.reg_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_WDT3_BASE);
+ tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0);
+
+ dsp_wdt.fclk = clk_get(NULL, "wdt3_fck");
+
+ if (dsp_wdt.fclk) {
+ dsp_wdt.iclk = clk_get(NULL, "wdt3_ick");
+ if (!dsp_wdt.iclk) {
+ clk_put(dsp_wdt.fclk);
+ dsp_wdt.fclk = NULL;
+ ret = -EFAULT;
+ }
+ } else
+ ret = -EFAULT;
+
+ if (!ret)
+ ret = request_irq(INT_34XX_WDT3_IRQ, dsp_wdt_isr, 0,
+ "dsp_wdt", &dsp_wdt);
+
+ /* Disable at this moment, it will be enabled when DSP starts */
+ if (!ret)
+ disable_irq(INT_34XX_WDT3_IRQ);
+
+ return ret;
+}
+
+void dsp_wdt_sm_set(void *data)
+{
+ dsp_wdt.sm_wdt = data;
+ dsp_wdt.sm_wdt->wdt_overflow = CONFIG_TIDSPBRIDGE_WDT_TIMEOUT;
+}
+
+
+void dsp_wdt_exit(void)
+{
+ free_irq(INT_34XX_WDT3_IRQ, &dsp_wdt);
+ tasklet_kill(&dsp_wdt.wdt3_tasklet);
+
+ if (dsp_wdt.fclk)
+ clk_put(dsp_wdt.fclk);
+ if (dsp_wdt.iclk)
+ clk_put(dsp_wdt.iclk);
+
+ dsp_wdt.fclk = NULL;
+ dsp_wdt.iclk = NULL;
+ dsp_wdt.sm_wdt = NULL;
+ dsp_wdt.reg_base = NULL;
+}
+
+void dsp_wdt_enable(bool enable)
+{
+ u32 tmp;
+ static bool wdt_enable;
+
+ if (wdt_enable == enable || !dsp_wdt.fclk || !dsp_wdt.iclk)
+ return;
+
+ wdt_enable = enable;
+
+ if (enable) {
+ clk_enable(dsp_wdt.fclk);
+ clk_enable(dsp_wdt.iclk);
+ dsp_wdt.sm_wdt->wdt_setclocks = 1;
+ tmp = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
+ __raw_writel(tmp, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
+ enable_irq(INT_34XX_WDT3_IRQ);
+ } else {
+ disable_irq(INT_34XX_WDT3_IRQ);
+ dsp_wdt.sm_wdt->wdt_setclocks = 0;
+ clk_disable(dsp_wdt.iclk);
+ clk_disable(dsp_wdt.fclk);
+ }
+}
+
+#else
+void dsp_wdt_enable(bool enable)
+{
+}
+
+void dsp_wdt_sm_set(void *data)
+{
+}
+
+int dsp_wdt_init(void)
+{
+ return 0;
+}
+
+void dsp_wdt_exit(void)
+{
+}
+#endif
+
diff --git a/drivers/staging/tidspbridge/dynload/cload.c b/drivers/staging/tidspbridge/dynload/cload.c
new file mode 100644
index 00000000000..c85a5e88361
--- /dev/null
+++ b/drivers/staging/tidspbridge/dynload/cload.c
@@ -0,0 +1,1953 @@
+/*
+ * cload.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include "header.h"
+
+#include "module_list.h"
+#define LINKER_MODULES_HEADER ("_" MODULES_HEADER)
+
+/*
+ * forward references
+ */
+static void dload_symbols(struct dload_state *dlthis);
+static void dload_data(struct dload_state *dlthis);
+static void allocate_sections(struct dload_state *dlthis);
+static void string_table_free(struct dload_state *dlthis);
+static void symbol_table_free(struct dload_state *dlthis);
+static void section_table_free(struct dload_state *dlthis);
+static void init_module_handle(struct dload_state *dlthis);
+#if BITS_PER_AU > BITS_PER_BYTE
+static char *unpack_name(struct dload_state *dlthis, u32 soffset);
+#endif
+
+static const char cinitname[] = { ".cinit" };
+static const char loader_dllview_root[] = { "?DLModules?" };
+
+/*
+ * Error strings
+ */
+static const char readstrm[] = { "Error reading %s from input stream" };
+static const char err_alloc[] = { "Syms->dload_allocate( %d ) failed" };
+static const char tgtalloc[] = {
+ "Target memory allocate failed, section %s size " FMT_UI32 };
+static const char initfail[] = { "%s to target address " FMT_UI32 " failed" };
+static const char dlvwrite[] = { "Write to DLLview list failed" };
+static const char iconnect[] = { "Connect call to init interface failed" };
+static const char err_checksum[] = { "Checksum failed on %s" };
+
+/*************************************************************************
+ * Procedure dload_error
+ *
+ * Parameters:
+ * errtxt description of the error, printf style
+ * ... additional information
+ *
+ * Effect:
+ * Reports or records the error as appropriate.
+ *********************************************************************** */
+void dload_error(struct dload_state *dlthis, const char *errtxt, ...)
+{
+ va_list args;
+
+ va_start(args, errtxt);
+ dlthis->mysym->error_report(dlthis->mysym, errtxt, args);
+ va_end(args);
+ dlthis->dload_errcount += 1;
+
+} /* dload_error */
+
+#define DL_ERROR(zza, zzb) dload_error(dlthis, zza, zzb)
+
+/*************************************************************************
+ * Procedure dload_syms_error
+ *
+ * Parameters:
+ * errtxt description of the error, printf style
+ * ... additional information
+ *
+ * Effect:
+ * Reports or records the error as appropriate.
+ *********************************************************************** */
+void dload_syms_error(struct dynamic_loader_sym *syms, const char *errtxt, ...)
+{
+ va_list args;
+
+ va_start(args, errtxt);
+ syms->error_report(syms, errtxt, args);
+ va_end(args);
+}
+
+/*************************************************************************
+ * Procedure dynamic_load_module
+ *
+ * Parameters:
+ * module The input stream that supplies the module image
+ * syms Host-side symbol table and malloc/free functions
+ * alloc Target-side memory allocation
+ * init Target-side memory initialization
+ * options Option flags DLOAD_*
+ * mhandle A module handle for use with Dynamic_Unload
+ *
+ * Effect:
+ * The module image is read using *module. Target storage for the new
+ * image is
+ * obtained from *alloc. Symbols defined and referenced by the module are
+ * managed using *syms. The image is then relocated and references
+ * resolved as necessary, and the resulting executable bits are placed
+ * into target memory using *init.
+ *
+ * Returns:
+ * On a successful load, a module handle is placed in *mhandle,
+ * and zero is returned. On error, the number of errors detected is
+ * returned. Individual errors are reported during the load process
+ * using syms->error_report().
+ ********************************************************************** */
+int dynamic_load_module(struct dynamic_loader_stream *module,
+ struct dynamic_loader_sym *syms,
+ struct dynamic_loader_allocate *alloc,
+ struct dynamic_loader_initialize *init,
+ unsigned options, void **mhandle)
+{
+ register unsigned *dp, sz;
+ struct dload_state dl_state; /* internal state for this call */
+
+ /* blast our internal state */
+ dp = (unsigned *)&dl_state;
+ for (sz = sizeof(dl_state) / sizeof(unsigned); sz > 0; sz -= 1)
+ *dp++ = 0;
+
+ /* Enable _only_ BSS initialization if enabled by user */
+ if ((options & DLOAD_INITBSS) == DLOAD_INITBSS)
+ dl_state.myoptions = DLOAD_INITBSS;
+
+ /* Check that mandatory arguments are present */
+ if (!module || !syms) {
+ dload_error(&dl_state, "Required parameter is NULL");
+ } else {
+ dl_state.strm = module;
+ dl_state.mysym = syms;
+ dload_headers(&dl_state);
+ if (!dl_state.dload_errcount)
+ dload_strings(&dl_state, false);
+ if (!dl_state.dload_errcount)
+ dload_sections(&dl_state);
+
+ if (init && !dl_state.dload_errcount) {
+ if (init->connect(init)) {
+ dl_state.myio = init;
+ dl_state.myalloc = alloc;
+ /* do now, before reducing symbols */
+ allocate_sections(&dl_state);
+ } else
+ dload_error(&dl_state, iconnect);
+ }
+
+ if (!dl_state.dload_errcount) {
+ /* fix up entry point address */
+ unsigned sref = dl_state.dfile_hdr.df_entry_secn - 1;
+ if (sref < dl_state.allocated_secn_count)
+ dl_state.dfile_hdr.df_entrypt +=
+ dl_state.ldr_sections[sref].run_addr;
+
+ dload_symbols(&dl_state);
+ }
+
+ if (init && !dl_state.dload_errcount)
+ dload_data(&dl_state);
+
+ init_module_handle(&dl_state);
+
+ /* dl_state.myio is init or 0 at this point. */
+ if (dl_state.myio) {
+ if ((!dl_state.dload_errcount) &&
+ (dl_state.dfile_hdr.df_entry_secn != DN_UNDEF) &&
+ (!init->execute(init,
+ dl_state.dfile_hdr.df_entrypt)))
+ dload_error(&dl_state, "Init->Execute Failed");
+ init->release(init);
+ }
+
+ symbol_table_free(&dl_state);
+ section_table_free(&dl_state);
+ string_table_free(&dl_state);
+ dload_tramp_cleanup(&dl_state);
+
+ if (dl_state.dload_errcount) {
+ dynamic_unload_module(dl_state.myhandle, syms, alloc,
+ init);
+ dl_state.myhandle = NULL;
+ }
+ }
+
+ if (mhandle)
+ *mhandle = dl_state.myhandle; /* give back the handle */
+
+ return dl_state.dload_errcount;
+} /* DLOAD_File */
+
+/*************************************************************************
+ * Procedure dynamic_open_module
+ *
+ * Parameters:
+ * module The input stream that supplies the module image
+ * syms Host-side symbol table and malloc/free functions
+ * alloc Target-side memory allocation
+ * init Target-side memory initialization
+ * options Option flags DLOAD_*
+ * mhandle A module handle for use with Dynamic_Unload
+ *
+ * Effect:
+ * The module image is read using *module. Target storage for the new
+ * image is
+ * obtained from *alloc. Symbols defined and referenced by the module are
+ * managed using *syms. The image is then relocated and references
+ * resolved as necessary, and the resulting executable bits are placed
+ * into target memory using *init.
+ *
+ * Returns:
+ * On a successful load, a module handle is placed in *mhandle,
+ * and zero is returned. On error, the number of errors detected is
+ * returned. Individual errors are reported during the load process
+ * using syms->error_report().
+ ********************************************************************** */
+int
+dynamic_open_module(struct dynamic_loader_stream *module,
+ struct dynamic_loader_sym *syms,
+ struct dynamic_loader_allocate *alloc,
+ struct dynamic_loader_initialize *init,
+ unsigned options, void **mhandle)
+{
+ register unsigned *dp, sz;
+ struct dload_state dl_state; /* internal state for this call */
+
+ /* blast our internal state */
+ dp = (unsigned *)&dl_state;
+ for (sz = sizeof(dl_state) / sizeof(unsigned); sz > 0; sz -= 1)
+ *dp++ = 0;
+
+ /* Enable _only_ BSS initialization if enabled by user */
+ if ((options & DLOAD_INITBSS) == DLOAD_INITBSS)
+ dl_state.myoptions = DLOAD_INITBSS;
+
+ /* Check that mandatory arguments are present */
+ if (!module || !syms) {
+ dload_error(&dl_state, "Required parameter is NULL");
+ } else {
+ dl_state.strm = module;
+ dl_state.mysym = syms;
+ dload_headers(&dl_state);
+ if (!dl_state.dload_errcount)
+ dload_strings(&dl_state, false);
+ if (!dl_state.dload_errcount)
+ dload_sections(&dl_state);
+
+ if (init && !dl_state.dload_errcount) {
+ if (init->connect(init)) {
+ dl_state.myio = init;
+ dl_state.myalloc = alloc;
+ /* do now, before reducing symbols */
+ allocate_sections(&dl_state);
+ } else
+ dload_error(&dl_state, iconnect);
+ }
+
+ if (!dl_state.dload_errcount) {
+ /* fix up entry point address */
+ unsigned sref = dl_state.dfile_hdr.df_entry_secn - 1;
+ if (sref < dl_state.allocated_secn_count)
+ dl_state.dfile_hdr.df_entrypt +=
+ dl_state.ldr_sections[sref].run_addr;
+
+ dload_symbols(&dl_state);
+ }
+
+ init_module_handle(&dl_state);
+
+ /* dl_state.myio is either 0 or init at this point. */
+ if (dl_state.myio) {
+ if ((!dl_state.dload_errcount) &&
+ (dl_state.dfile_hdr.df_entry_secn != DN_UNDEF) &&
+ (!init->execute(init,
+ dl_state.dfile_hdr.df_entrypt)))
+ dload_error(&dl_state, "Init->Execute Failed");
+ init->release(init);
+ }
+
+ symbol_table_free(&dl_state);
+ section_table_free(&dl_state);
+ string_table_free(&dl_state);
+
+ if (dl_state.dload_errcount) {
+ dynamic_unload_module(dl_state.myhandle, syms, alloc,
+ init);
+ dl_state.myhandle = NULL;
+ }
+ }
+
+ if (mhandle)
+ *mhandle = dl_state.myhandle; /* give back the handle */
+
+ return dl_state.dload_errcount;
+} /* DLOAD_File */
+
+/*************************************************************************
+ * Procedure dload_headers
+ *
+ * Parameters:
+ * none
+ *
+ * Effect:
+ * Loads the DOFF header and verify record. Deals with any byte-order
+ * issues and checks them for validity.
+ *********************************************************************** */
+#define COMBINED_HEADER_SIZE (sizeof(struct doff_filehdr_t)+ \
+ sizeof(struct doff_verify_rec_t))
+
+void dload_headers(struct dload_state *dlthis)
+{
+ u32 map;
+
+ /* Read the header and the verify record as one. If we don't get it
+ all, we're done */
+ if (dlthis->strm->read_buffer(dlthis->strm, &dlthis->dfile_hdr,
+ COMBINED_HEADER_SIZE) !=
+ COMBINED_HEADER_SIZE) {
+ DL_ERROR(readstrm, "File Headers");
+ return;
+ }
+ /*
+ * Verify that we have the byte order of the file correct.
+ * If not, must fix it before we can continue
+ */
+ map = REORDER_MAP(dlthis->dfile_hdr.df_byte_reshuffle);
+ if (map != REORDER_MAP(BYTE_RESHUFFLE_VALUE)) {
+ /* input is either byte-shuffled or bad */
+ if ((map & 0xFCFCFCFC) == 0) { /* no obviously bogus bits */
+ dload_reorder(&dlthis->dfile_hdr, COMBINED_HEADER_SIZE,
+ map);
+ }
+ if (dlthis->dfile_hdr.df_byte_reshuffle !=
+ BYTE_RESHUFFLE_VALUE) {
+ /* didn't fix the problem, the byte swap map is bad */
+ dload_error(dlthis,
+ "Bad byte swap map " FMT_UI32 " in header",
+ dlthis->dfile_hdr.df_byte_reshuffle);
+ return;
+ }
+ dlthis->reorder_map = map; /* keep map for future use */
+ }
+
+ /*
+ * Verify checksum of header and verify record
+ */
+ if (~dload_checksum(&dlthis->dfile_hdr,
+ sizeof(struct doff_filehdr_t)) ||
+ ~dload_checksum(&dlthis->verify,
+ sizeof(struct doff_verify_rec_t))) {
+ DL_ERROR(err_checksum, "header or verify record");
+ return;
+ }
+#if HOST_ENDIANNESS
+ dlthis->dfile_hdr.df_byte_reshuffle = map; /* put back for later */
+#endif
+
+ /* Check for valid target ID */
+ if ((dlthis->dfile_hdr.df_target_id != TARGET_ID) &&
+ -(dlthis->dfile_hdr.df_target_id != TMS470_ID)) {
+ dload_error(dlthis, "Bad target ID 0x%x and TARGET_ID 0x%x",
+ dlthis->dfile_hdr.df_target_id, TARGET_ID);
+ return;
+ }
+ /* Check for valid file format */
+ if ((dlthis->dfile_hdr.df_doff_version != DOFF0)) {
+ dload_error(dlthis, "Bad DOFF version 0x%x",
+ dlthis->dfile_hdr.df_doff_version);
+ return;
+ }
+
+ /*
+ * Apply reasonableness checks to count fields
+ */
+ if (dlthis->dfile_hdr.df_strtab_size > MAX_REASONABLE_STRINGTAB) {
+ dload_error(dlthis, "Excessive string table size " FMT_UI32,
+ dlthis->dfile_hdr.df_strtab_size);
+ return;
+ }
+ if (dlthis->dfile_hdr.df_no_scns > MAX_REASONABLE_SECTIONS) {
+ dload_error(dlthis, "Excessive section count 0x%x",
+ dlthis->dfile_hdr.df_no_scns);
+ return;
+ }
+#ifndef TARGET_ENDIANNESS
+ /*
+ * Check that endianness does not disagree with explicit specification
+ */
+ if ((dlthis->dfile_hdr.df_flags >> ALIGN_COFF_ENDIANNESS) &
+ dlthis->myoptions & ENDIANNESS_MASK) {
+ dload_error(dlthis,
+ "Input endianness disagrees with specified option");
+ return;
+ }
+ dlthis->big_e_target = dlthis->dfile_hdr.df_flags & DF_BIG;
+#endif
+
+} /* dload_headers */
+
+/* COFF Section Processing
+ *
+ * COFF sections are read in and retained intact. Each record is embedded
+ * in a new structure that records the updated load and
+ * run addresses of the section */
+
+static const char secn_errid[] = { "section" };
+
+/*************************************************************************
+ * Procedure dload_sections
+ *
+ * Parameters:
+ * none
+ *
+ * Effect:
+ * Loads the section records into an internal table.
+ *********************************************************************** */
+void dload_sections(struct dload_state *dlthis)
+{
+ s16 siz;
+ struct doff_scnhdr_t *shp;
+ unsigned nsecs = dlthis->dfile_hdr.df_no_scns;
+
+ /* allocate space for the DOFF section records */
+ siz = nsecs * sizeof(struct doff_scnhdr_t);
+ shp =
+ (struct doff_scnhdr_t *)dlthis->mysym->dload_allocate(dlthis->mysym,
+ siz);
+ if (!shp) { /* not enough storage */
+ DL_ERROR(err_alloc, siz);
+ return;
+ }
+ dlthis->sect_hdrs = shp;
+
+ /* read in the section records */
+ if (dlthis->strm->read_buffer(dlthis->strm, shp, siz) != siz) {
+ DL_ERROR(readstrm, secn_errid);
+ return;
+ }
+
+ /* if we need to fix up byte order, do it now */
+ if (dlthis->reorder_map)
+ dload_reorder(shp, siz, dlthis->reorder_map);
+
+ /* check for validity */
+ if (~dload_checksum(dlthis->sect_hdrs, siz) !=
+ dlthis->verify.dv_scn_rec_checksum) {
+ DL_ERROR(err_checksum, secn_errid);
+ return;
+ }
+
+} /* dload_sections */
+
+/*****************************************************************************
+ * Procedure allocate_sections
+ *
+ * Parameters:
+ * alloc target memory allocator class
+ *
+ * Effect:
+ * Assigns new (target) addresses for sections
+ **************************************************************************** */
+static void allocate_sections(struct dload_state *dlthis)
+{
+ u16 curr_sect, nsecs, siz;
+ struct doff_scnhdr_t *shp;
+ struct ldr_section_info *asecs;
+ struct my_handle *hndl;
+ nsecs = dlthis->dfile_hdr.df_no_scns;
+ if (!nsecs)
+ return;
+ if ((dlthis->myalloc == NULL) &&
+ (dlthis->dfile_hdr.df_target_scns > 0)) {
+ DL_ERROR("Arg 3 (alloc) required but NULL", 0);
+ return;
+ }
+ /*
+ * allocate space for the module handle, which we will keep for unload
+ * purposes include an additional section store for an auto-generated
+ * trampoline section in case we need it.
+ */
+ siz = (dlthis->dfile_hdr.df_target_scns + 1) *
+ sizeof(struct ldr_section_info) + MY_HANDLE_SIZE;
+
+ hndl =
+ (struct my_handle *)dlthis->mysym->dload_allocate(dlthis->mysym,
+ siz);
+ if (!hndl) { /* not enough storage */
+ DL_ERROR(err_alloc, siz);
+ return;
+ }
+ /* initialize the handle header */
+ hndl->dm.hnext = hndl->dm.hprev = hndl; /* circular list */
+ hndl->dm.hroot = NULL;
+ hndl->dm.dbthis = 0;
+ dlthis->myhandle = hndl; /* save away for return */
+ /* pointer to the section list of allocated sections */
+ dlthis->ldr_sections = asecs = hndl->secns;
+ /* * Insert names into all sections, make copies of
+ the sections we allocate */
+ shp = dlthis->sect_hdrs;
+ for (curr_sect = 0; curr_sect < nsecs; curr_sect++) {
+ u32 soffset = shp->ds_offset;
+#if BITS_PER_AU <= BITS_PER_BYTE
+ /* attempt to insert the name of this section */
+ if (soffset < dlthis->dfile_hdr.df_strtab_size)
+ ((struct ldr_section_info *)shp)->name =
+ dlthis->str_head + soffset;
+ else {
+ dload_error(dlthis, "Bad name offset in section %d",
+ curr_sect);
+ ((struct ldr_section_info *)shp)->name = NULL;
+ }
+#endif
+ /* allocate target storage for sections that require it */
+ if (ds_needs_allocation(shp)) {
+ *asecs = *(struct ldr_section_info *)shp;
+ asecs->context = 0; /* zero the context field */
+#if BITS_PER_AU > BITS_PER_BYTE
+ asecs->name = unpack_name(dlthis, soffset);
+ dlthis->debug_string_size = soffset + dlthis->temp_len;
+#else
+ dlthis->debug_string_size = soffset;
+#endif
+ if (dlthis->myalloc != NULL) {
+ if (!dlthis->myalloc->
+ dload_allocate(dlthis->myalloc, asecs,
+ ds_alignment(asecs->type))) {
+ dload_error(dlthis, tgtalloc,
+ asecs->name, asecs->size);
+ return;
+ }
+ }
+ /* keep address deltas in original section table */
+ shp->ds_vaddr = asecs->load_addr - shp->ds_vaddr;
+ shp->ds_paddr = asecs->run_addr - shp->ds_paddr;
+ dlthis->allocated_secn_count += 1;
+ } /* allocate target storage */
+ shp += 1;
+ asecs += 1;
+ }
+#if BITS_PER_AU <= BITS_PER_BYTE
+ dlthis->debug_string_size +=
+ strlen(dlthis->str_head + dlthis->debug_string_size) + 1;
+#endif
+} /* allocate sections */
+
+/*************************************************************************
+ * Procedure section_table_free
+ *
+ * Parameters:
+ * none
+ *
+ * Effect:
+ * Frees any state used by the symbol table.
+ *
+ * WARNING:
+ * This routine is not allowed to declare errors!
+ *********************************************************************** */
+static void section_table_free(struct dload_state *dlthis)
+{
+ struct doff_scnhdr_t *shp;
+
+ shp = dlthis->sect_hdrs;
+ if (shp)
+ dlthis->mysym->dload_deallocate(dlthis->mysym, shp);
+
+} /* section_table_free */
+
+/*************************************************************************
+ * Procedure dload_strings
+ *
+ * Parameters:
+ * sec_names_only If true only read in the "section names"
+ * portion of the string table
+ *
+ * Effect:
+ * Loads the DOFF string table into memory. DOFF keeps all strings in a
+ * big unsorted array. We just read that array into memory in bulk.
+ *********************************************************************** */
+static const char stringtbl[] = { "string table" };
+
+void dload_strings(struct dload_state *dlthis, bool sec_names_only)
+{
+ u32 ssiz;
+ char *strbuf;
+
+ if (sec_names_only) {
+ ssiz = BYTE_TO_HOST(DOFF_ALIGN
+ (dlthis->dfile_hdr.df_scn_name_size));
+ } else {
+ ssiz = BYTE_TO_HOST(DOFF_ALIGN
+ (dlthis->dfile_hdr.df_strtab_size));
+ }
+ if (ssiz == 0)
+ return;
+
+ /* get some memory for the string table */
+#if BITS_PER_AU > BITS_PER_BYTE
+ strbuf = (char *)dlthis->mysym->dload_allocate(dlthis->mysym, ssiz +
+ dlthis->dfile_hdr.
+ df_max_str_len);
+#else
+ strbuf = (char *)dlthis->mysym->dload_allocate(dlthis->mysym, ssiz);
+#endif
+ if (strbuf == NULL) {
+ DL_ERROR(err_alloc, ssiz);
+ return;
+ }
+ dlthis->str_head = strbuf;
+#if BITS_PER_AU > BITS_PER_BYTE
+ dlthis->str_temp = strbuf + ssiz;
+#endif
+ /* read in the strings and verify them */
+ if ((unsigned)(dlthis->strm->read_buffer(dlthis->strm, strbuf,
+ ssiz)) != ssiz) {
+ DL_ERROR(readstrm, stringtbl);
+ }
+ /* if we need to fix up byte order, do it now */
+#ifndef _BIG_ENDIAN
+ if (dlthis->reorder_map)
+ dload_reorder(strbuf, ssiz, dlthis->reorder_map);
+
+ if ((!sec_names_only) && (~dload_checksum(strbuf, ssiz) !=
+ dlthis->verify.dv_str_tab_checksum)) {
+ DL_ERROR(err_checksum, stringtbl);
+ }
+#else
+ if (dlthis->dfile_hdr.df_byte_reshuffle !=
+ HOST_BYTE_ORDER(REORDER_MAP(BYTE_RESHUFFLE_VALUE))) {
+ /* put strings in big-endian order, not in PC order */
+ dload_reorder(strbuf, ssiz,
+ HOST_BYTE_ORDER(dlthis->
+ dfile_hdr.df_byte_reshuffle));
+ }
+ if ((!sec_names_only) && (~dload_reverse_checksum(strbuf, ssiz) !=
+ dlthis->verify.dv_str_tab_checksum)) {
+ DL_ERROR(err_checksum, stringtbl);
+ }
+#endif
+} /* dload_strings */
+
+/*************************************************************************
+ * Procedure string_table_free
+ *
+ * Parameters:
+ * none
+ *
+ * Effect:
+ * Frees any state used by the string table.
+ *
+ * WARNING:
+ * This routine is not allowed to declare errors!
+ ************************************************************************ */
+static void string_table_free(struct dload_state *dlthis)
+{
+ if (dlthis->str_head)
+ dlthis->mysym->dload_deallocate(dlthis->mysym,
+ dlthis->str_head);
+
+} /* string_table_free */
+
+/*
+ * Symbol Table Maintenance Functions
+ *
+ * COFF symbols are read by dload_symbols(), which is called after
+ * sections have been allocated. Symbols which might be used in
+ * relocation (ie, not debug info) are retained in an internal temporary
+ * compressed table (type local_symbol). A particular symbol is recovered
+ * by index by calling dload_find_symbol(). dload_find_symbol
+ * reconstructs a more explicit representation (type SLOTVEC) which is
+ * used by reloc.c
+ */
+/* real size of debug header */
+#define DBG_HDR_SIZE (sizeof(struct dll_module) - sizeof(struct dll_sect))
+
+static const char sym_errid[] = { "symbol" };
+
+/**************************************************************************
+ * Procedure dload_symbols
+ *
+ * Parameters:
+ * none
+ *
+ * Effect:
+ * Reads in symbols and retains ones that might be needed for relocation
+ * purposes.
+ *********************************************************************** */
+/* size of symbol buffer no bigger than target data buffer, to limit stack
+ * usage */
+#define MY_SYM_BUF_SIZ (BYTE_TO_HOST(IMAGE_PACKET_SIZE)/\
+ sizeof(struct doff_syment_t))
+
+static void dload_symbols(struct dload_state *dlthis)
+{
+ u32 sym_count, siz, dsiz, symbols_left;
+ u32 checks;
+ struct local_symbol *sp;
+ struct dynload_symbol *symp;
+ struct dynload_symbol *newsym;
+
+ sym_count = dlthis->dfile_hdr.df_no_syms;
+ if (sym_count == 0)
+ return;
+
+ /*
+ * We keep a local symbol table for all of the symbols in the input.
+ * This table contains only section & value info, as we do not have
+ * to do any name processing for locals. We reuse this storage
+ * as a temporary for .dllview record construction.
+ * Allocate storage for the whole table. Add 1 to the section count
+ * in case a trampoline section is auto-generated as well as the
+ * size of the trampoline section name so DLLView doens't get lost.
+ */
+
+ siz = sym_count * sizeof(struct local_symbol);
+ dsiz = DBG_HDR_SIZE +
+ (sizeof(struct dll_sect) * dlthis->allocated_secn_count) +
+ BYTE_TO_HOST_ROUND(dlthis->debug_string_size + 1);
+ if (dsiz > siz)
+ siz = dsiz; /* larger of symbols and .dllview temp */
+ sp = (struct local_symbol *)dlthis->mysym->dload_allocate(dlthis->mysym,
+ siz);
+ if (!sp) {
+ DL_ERROR(err_alloc, siz);
+ return;
+ }
+ dlthis->local_symtab = sp;
+ /* Read the symbols in the input, store them in the table, and post any
+ * globals to the global symbol table. In the process, externals
+ become defined from the global symbol table */
+ checks = dlthis->verify.dv_sym_tab_checksum;
+ symbols_left = sym_count;
+ do { /* read all symbols */
+ char *sname;
+ u32 val;
+ s32 delta;
+ struct doff_syment_t *input_sym;
+ unsigned syms_in_buf;
+ struct doff_syment_t my_sym_buf[MY_SYM_BUF_SIZ];
+ input_sym = my_sym_buf;
+ syms_in_buf = symbols_left > MY_SYM_BUF_SIZ ?
+ MY_SYM_BUF_SIZ : symbols_left;
+ siz = syms_in_buf * sizeof(struct doff_syment_t);
+ if (dlthis->strm->read_buffer(dlthis->strm, input_sym, siz) !=
+ siz) {
+ DL_ERROR(readstrm, sym_errid);
+ return;
+ }
+ if (dlthis->reorder_map)
+ dload_reorder(input_sym, siz, dlthis->reorder_map);
+
+ checks += dload_checksum(input_sym, siz);
+ do { /* process symbols in buffer */
+ symbols_left -= 1;
+ /* attempt to derive the name of this symbol */
+ sname = NULL;
+ if (input_sym->dn_offset > 0) {
+#if BITS_PER_AU <= BITS_PER_BYTE
+ if ((u32) input_sym->dn_offset <
+ dlthis->dfile_hdr.df_strtab_size)
+ sname = dlthis->str_head +
+ BYTE_TO_HOST(input_sym->dn_offset);
+ else
+ dload_error(dlthis,
+ "Bad name offset in symbol "
+ " %d", symbols_left);
+#else
+ sname = unpack_name(dlthis,
+ input_sym->dn_offset);
+#endif
+ }
+ val = input_sym->dn_value;
+ delta = 0;
+ sp->sclass = input_sym->dn_sclass;
+ sp->secnn = input_sym->dn_scnum;
+ /* if this is an undefined symbol,
+ * define it (or fail) now */
+ if (sp->secnn == DN_UNDEF) {
+ /* pointless for static undefined */
+ if (input_sym->dn_sclass != DN_EXT)
+ goto loop_cont;
+
+ /* try to define symbol from previously
+ * loaded images */
+ symp = dlthis->mysym->find_matching_symbol
+ (dlthis->mysym, sname);
+ if (!symp) {
+ DL_ERROR
+ ("Undefined external symbol %s",
+ sname);
+ goto loop_cont;
+ }
+ val = delta = symp->value;
+#ifdef ENABLE_TRAMP_DEBUG
+ dload_syms_error(dlthis->mysym,
+ "===> ext sym [%s] at %x",
+ sname, val);
+#endif
+
+ goto loop_cont;
+ }
+ /* symbol defined by this module */
+ if (sp->secnn > 0) {
+ /* symbol references a section */
+ if ((unsigned)sp->secnn <=
+ dlthis->allocated_secn_count) {
+ /* section was allocated */
+ struct doff_scnhdr_t *srefp =
+ &dlthis->sect_hdrs[sp->secnn - 1];
+
+ if (input_sym->dn_sclass ==
+ DN_STATLAB ||
+ input_sym->dn_sclass == DN_EXTLAB) {
+ /* load */
+ delta = srefp->ds_vaddr;
+ } else {
+ /* run */
+ delta = srefp->ds_paddr;
+ }
+ val += delta;
+ }
+ goto loop_itr;
+ }
+ /* This symbol is an absolute symbol */
+ if (sp->secnn == DN_ABS && ((sp->sclass == DN_EXT) ||
+ (sp->sclass ==
+ DN_EXTLAB))) {
+ symp =
+ dlthis->mysym->find_matching_symbol(dlthis->
+ mysym,
+ sname);
+ if (!symp)
+ goto loop_itr;
+ /* This absolute symbol is already defined. */
+ if (symp->value == input_sym->dn_value) {
+ /* If symbol values are equal, continue
+ * but don't add to the global symbol
+ * table */
+ sp->value = val;
+ sp->delta = delta;
+ sp += 1;
+ input_sym += 1;
+ continue;
+ } else {
+ /* If symbol values are not equal,
+ * return with redefinition error */
+ DL_ERROR("Absolute symbol %s is "
+ "defined multiple times with "
+ "different values", sname);
+ return;
+ }
+ }
+loop_itr:
+ /* if this is a global symbol, post it to the
+ * global table */
+ if (input_sym->dn_sclass == DN_EXT ||
+ input_sym->dn_sclass == DN_EXTLAB) {
+ /* Keep this global symbol for subsequent
+ * modules. Don't complain on error, to allow
+ * symbol API to suppress global symbols */
+ if (!sname)
+ goto loop_cont;
+
+ newsym = dlthis->mysym->add_to_symbol_table
+ (dlthis->mysym, sname,
+ (unsigned)dlthis->myhandle);
+ if (newsym)
+ newsym->value = val;
+
+ } /* global */
+loop_cont:
+ sp->value = val;
+ sp->delta = delta;
+ sp += 1;
+ input_sym += 1;
+ } while ((syms_in_buf -= 1) > 0); /* process sym in buf */
+ } while (symbols_left > 0); /* read all symbols */
+ if (~checks)
+ dload_error(dlthis, "Checksum of symbols failed");
+
+} /* dload_symbols */
+
+/*****************************************************************************
+ * Procedure symbol_table_free
+ *
+ * Parameters:
+ * none
+ *
+ * Effect:
+ * Frees any state used by the symbol table.
+ *
+ * WARNING:
+ * This routine is not allowed to declare errors!
+ **************************************************************************** */
+static void symbol_table_free(struct dload_state *dlthis)
+{
+ if (dlthis->local_symtab) {
+ if (dlthis->dload_errcount) { /* blow off our symbols */
+ dlthis->mysym->purge_symbol_table(dlthis->mysym,
+ (unsigned)
+ dlthis->myhandle);
+ }
+ dlthis->mysym->dload_deallocate(dlthis->mysym,
+ dlthis->local_symtab);
+ }
+} /* symbol_table_free */
+
+/* .cinit Processing
+ *
+ * The dynamic loader does .cinit interpretation. cload_cinit()
+ * acts as a special write-to-target function, in that it takes relocated
+ * data from the normal data flow, and interprets it as .cinit actions.
+ * Because the normal data flow does not necessarily process the whole
+ * .cinit section in one buffer, cload_cinit() must be prepared to
+ * interpret the data piecemeal. A state machine is used for this
+ * purpose.
+ */
+
+/* The following are only for use by reloc.c and things it calls */
+static const struct ldr_section_info cinit_info_init = { cinitname, 0, 0,
+ (ldr_addr)-1, 0, DLOAD_BSS, 0
+};
+
+/*************************************************************************
+ * Procedure cload_cinit
+ *
+ * Parameters:
+ * ipacket Pointer to data packet to be loaded
+ *
+ * Effect:
+ * Interprets the data in the buffer as .cinit data, and performs the
+ * appropriate initializations.
+ *********************************************************************** */
+static void cload_cinit(struct dload_state *dlthis,
+ struct image_packet_t *ipacket)
+{
+#if TDATA_TO_HOST(CINIT_COUNT)*BITS_PER_AU > 16
+ s32 init_count, left;
+#else
+ s16 init_count, left;
+#endif
+ unsigned char *pktp = ipacket->img_data;
+ unsigned char *pktend = pktp + BYTE_TO_HOST_ROUND(ipacket->packet_size);
+ int temp;
+ ldr_addr atmp;
+ struct ldr_section_info cinit_info;
+
+ /* PROCESS ALL THE INITIALIZATION RECORDS THE BUFFER. */
+ while (true) {
+ left = pktend - pktp;
+ switch (dlthis->cinit_state) {
+ case CI_COUNT: /* count field */
+ if (left < TDATA_TO_HOST(CINIT_COUNT))
+ goto loopexit;
+ temp = dload_unpack(dlthis, (tgt_au_t *) pktp,
+ CINIT_COUNT * TDATA_AU_BITS, 0,
+ ROP_SGN);
+ pktp += TDATA_TO_HOST(CINIT_COUNT);
+ /* negative signifies BSS table, zero means done */
+ if (temp <= 0) {
+ dlthis->cinit_state = CI_DONE;
+ break;
+ }
+ dlthis->cinit_count = temp;
+ dlthis->cinit_state = CI_ADDRESS;
+ break;
+#if CINIT_ALIGN < CINIT_ADDRESS
+ case CI_PARTADDRESS:
+ pktp -= TDATA_TO_HOST(CINIT_ALIGN);
+ /* back up pointer into space courtesy of caller */
+ *(uint16_t *) pktp = dlthis->cinit_addr;
+ /* stuff in saved bits !! FALL THRU !! */
+#endif
+ case CI_ADDRESS: /* Address field for a copy packet */
+ if (left < TDATA_TO_HOST(CINIT_ADDRESS)) {
+#if CINIT_ALIGN < CINIT_ADDRESS
+ if (left == TDATA_TO_HOST(CINIT_ALIGN)) {
+ /* address broken into halves */
+ dlthis->cinit_addr = *(uint16_t *) pktp;
+ /* remember 1st half */
+ dlthis->cinit_state = CI_PARTADDRESS;
+ left = 0;
+ }
+#endif
+ goto loopexit;
+ }
+ atmp = dload_unpack(dlthis, (tgt_au_t *) pktp,
+ CINIT_ADDRESS * TDATA_AU_BITS, 0,
+ ROP_UNS);
+ pktp += TDATA_TO_HOST(CINIT_ADDRESS);
+#if CINIT_PAGE_BITS > 0
+ dlthis->cinit_page = atmp &
+ ((1 << CINIT_PAGE_BITS) - 1);
+ atmp >>= CINIT_PAGE_BITS;
+#else
+ dlthis->cinit_page = CINIT_DEFAULT_PAGE;
+#endif
+ dlthis->cinit_addr = atmp;
+ dlthis->cinit_state = CI_COPY;
+ break;
+ case CI_COPY: /* copy bits to the target */
+ init_count = HOST_TO_TDATA(left);
+ if (init_count > dlthis->cinit_count)
+ init_count = dlthis->cinit_count;
+ if (init_count == 0)
+ goto loopexit; /* get more bits */
+ cinit_info = cinit_info_init;
+ cinit_info.page = dlthis->cinit_page;
+ if (!dlthis->myio->writemem(dlthis->myio, pktp,
+ TDATA_TO_TADDR
+ (dlthis->cinit_addr),
+ &cinit_info,
+ TDATA_TO_HOST(init_count))) {
+ dload_error(dlthis, initfail, "write",
+ dlthis->cinit_addr);
+ }
+ dlthis->cinit_count -= init_count;
+ if (dlthis->cinit_count <= 0) {
+ dlthis->cinit_state = CI_COUNT;
+ init_count = (init_count + CINIT_ALIGN - 1) &
+ -CINIT_ALIGN;
+ /* align to next init */
+ }
+ pktp += TDATA_TO_HOST(init_count);
+ dlthis->cinit_addr += init_count;
+ break;
+ case CI_DONE: /* no more .cinit to do */
+ return;
+ } /* switch (cinit_state) */
+ } /* while */
+
+loopexit:
+ if (left > 0) {
+ dload_error(dlthis, "%d bytes left over in cinit packet", left);
+ dlthis->cinit_state = CI_DONE; /* left over bytes are bad */
+ }
+} /* cload_cinit */
+
+/* Functions to interface to reloc.c
+ *
+ * reloc.c is the relocation module borrowed from the linker, with
+ * minimal (we hope) changes for our purposes. cload_sect_data() invokes
+ * this module on a section to relocate and load the image data for that
+ * section. The actual read and write actions are supplied by the global
+ * routines below.
+ */
+
+/************************************************************************
+ * Procedure relocate_packet
+ *
+ * Parameters:
+ * ipacket Pointer to an image packet to relocate
+ *
+ * Effect:
+ * Performs the required relocations on the packet. Returns a checksum
+ * of the relocation operations.
+ *********************************************************************** */
+#define MY_RELOC_BUF_SIZ 8
+/* careful! exists at the same time as the image buffer */
+static int relocate_packet(struct dload_state *dlthis,
+ struct image_packet_t *ipacket,
+ u32 *checks, bool *tramps_generated)
+{
+ u32 rnum;
+ *tramps_generated = false;
+
+ rnum = ipacket->num_relocs;
+ do { /* all relocs */
+ unsigned rinbuf;
+ int siz;
+ struct reloc_record_t *rp, rrec[MY_RELOC_BUF_SIZ];
+ rp = rrec;
+ rinbuf = rnum > MY_RELOC_BUF_SIZ ? MY_RELOC_BUF_SIZ : rnum;
+ siz = rinbuf * sizeof(struct reloc_record_t);
+ if (dlthis->strm->read_buffer(dlthis->strm, rp, siz) != siz) {
+ DL_ERROR(readstrm, "relocation");
+ return 0;
+ }
+ /* reorder the bytes if need be */
+ if (dlthis->reorder_map)
+ dload_reorder(rp, siz, dlthis->reorder_map);
+
+ *checks += dload_checksum(rp, siz);
+ do {
+ /* perform the relocation operation */
+ dload_relocate(dlthis, (tgt_au_t *) ipacket->img_data,
+ rp, tramps_generated, false);
+ rp += 1;
+ rnum -= 1;
+ } while ((rinbuf -= 1) > 0);
+ } while (rnum > 0); /* all relocs */
+ /* If trampoline(s) were generated, we need to do an update of the
+ * trampoline copy of the packet since a 2nd phase relo will be done
+ * later. */
+ if (*tramps_generated == true) {
+ dload_tramp_pkt_udpate(dlthis,
+ (dlthis->image_secn -
+ dlthis->ldr_sections),
+ dlthis->image_offset, ipacket);
+ }
+
+ return 1;
+} /* dload_read_reloc */
+
+#define IPH_SIZE (sizeof(struct image_packet_t) - sizeof(u32))
+
+/* VERY dangerous */
+static const char imagepak[] = { "image packet" };
+
+/*************************************************************************
+ * Procedure dload_data
+ *
+ * Parameters:
+ * none
+ *
+ * Effect:
+ * Read image data from input file, relocate it, and download it to the
+ * target.
+ *********************************************************************** */
+static void dload_data(struct dload_state *dlthis)
+{
+ u16 curr_sect;
+ struct doff_scnhdr_t *sptr = dlthis->sect_hdrs;
+ struct ldr_section_info *lptr = dlthis->ldr_sections;
+#ifdef OPT_ZERO_COPY_LOADER
+ bool zero_copy = false;
+#endif
+ u8 *dest;
+
+ struct {
+ struct image_packet_t ipacket;
+ u8 bufr[BYTE_TO_HOST(IMAGE_PACKET_SIZE)];
+ } ibuf;
+
+ /* Indicates whether CINIT processing has occurred */
+ bool cinit_processed = false;
+
+ /* Loop through the sections and load them one at a time.
+ */
+ for (curr_sect = 0; curr_sect < dlthis->dfile_hdr.df_no_scns;
+ curr_sect += 1) {
+ if (ds_needs_download(sptr)) {
+ s32 nip;
+ ldr_addr image_offset = 0;
+ /* set relocation info for this section */
+ if (curr_sect < dlthis->allocated_secn_count)
+ dlthis->delta_runaddr = sptr->ds_paddr;
+ else {
+ lptr = (struct ldr_section_info *)sptr;
+ dlthis->delta_runaddr = 0;
+ }
+ dlthis->image_secn = lptr;
+#if BITS_PER_AU > BITS_PER_BYTE
+ lptr->name = unpack_name(dlthis, sptr->ds_offset);
+#endif
+ nip = sptr->ds_nipacks;
+ while ((nip -= 1) >= 0) { /* process packets */
+
+ s32 ipsize;
+ u32 checks;
+ bool tramp_generated = false;
+
+ /* get the fixed header bits */
+ if (dlthis->strm->read_buffer(dlthis->strm,
+ &ibuf.ipacket,
+ IPH_SIZE) !=
+ IPH_SIZE) {
+ DL_ERROR(readstrm, imagepak);
+ return;
+ }
+ /* reorder the header if need be */
+ if (dlthis->reorder_map) {
+ dload_reorder(&ibuf.ipacket, IPH_SIZE,
+ dlthis->reorder_map);
+ }
+ /* now read the rest of the packet */
+ ipsize =
+ BYTE_TO_HOST(DOFF_ALIGN
+ (ibuf.ipacket.packet_size));
+ if (ipsize > BYTE_TO_HOST(IMAGE_PACKET_SIZE)) {
+ DL_ERROR("Bad image packet size %d",
+ ipsize);
+ return;
+ }
+ dest = ibuf.bufr;
+#ifdef OPT_ZERO_COPY_LOADER
+ zero_copy = false;
+ if (!dload_check_type(sptr, DLOAD_CINIT) {
+ dlthis->myio->writemem(dlthis->myio,
+ &dest,
+ lptr->load_addr +
+ image_offset,
+ lptr, 0);
+ zero_copy = (dest != ibuf.bufr);
+ }
+#endif
+ /* End of determination */
+
+ if (dlthis->strm->read_buffer(dlthis->strm,
+ ibuf.bufr,
+ ipsize) !=
+ ipsize) {
+ DL_ERROR(readstrm, imagepak);
+ return;
+ }
+ ibuf.ipacket.img_data = dest;
+
+ /* reorder the bytes if need be */
+#if !defined(_BIG_ENDIAN) || (TARGET_AU_BITS > 16)
+ if (dlthis->reorder_map) {
+ dload_reorder(dest, ipsize,
+ dlthis->reorder_map);
+ }
+ checks = dload_checksum(dest, ipsize);
+#else
+ if (dlthis->dfile_hdr.df_byte_reshuffle !=
+ TARGET_ORDER(REORDER_MAP
+ (BYTE_RESHUFFLE_VALUE))) {
+ /* put image bytes in big-endian order,
+ * not PC order */
+ dload_reorder(dest, ipsize,
+ TARGET_ORDER
+ (dlthis->dfile_hdr.
+ df_byte_reshuffle));
+ }
+#if TARGET_AU_BITS > 8
+ checks = dload_reverse_checksum16(dest, ipsize);
+#else
+ checks = dload_reverse_checksum(dest, ipsize);
+#endif
+#endif
+
+ checks += dload_checksum(&ibuf.ipacket,
+ IPH_SIZE);
+ /* relocate the image bits as needed */
+ if (ibuf.ipacket.num_relocs) {
+ dlthis->image_offset = image_offset;
+ if (!relocate_packet(dlthis,
+ &ibuf.ipacket,
+ &checks,
+ &tramp_generated))
+ return; /* serious error */
+ }
+ if (~checks)
+ DL_ERROR(err_checksum, imagepak);
+ /* Only write the result to the target if no
+ * trampoline was generated. Otherwise it
+ *will be done during trampoline finalize. */
+
+ if (tramp_generated == false) {
+
+ /* stuff the result into target
+ * memory */
+ if (dload_check_type(sptr,
+ DLOAD_CINIT)) {
+ cload_cinit(dlthis,
+ &ibuf.ipacket);
+ cinit_processed = true;
+ } else {
+#ifdef OPT_ZERO_COPY_LOADER
+ if (!zero_copy) {
+#endif
+ /* FIXME */
+ if (!dlthis->myio->
+ writemem(dlthis->
+ myio,
+ ibuf.bufr,
+ lptr->
+ load_addr +
+ image_offset,
+ lptr,
+ BYTE_TO_HOST
+ (ibuf.
+ ipacket.
+ packet_size))) {
+ DL_ERROR
+ ("Write to "
+ FMT_UI32
+ " failed",
+ lptr->
+ load_addr +
+ image_offset);
+ }
+#ifdef OPT_ZERO_COPY_LOADER
+ }
+#endif
+ }
+ }
+ image_offset +=
+ BYTE_TO_TADDR(ibuf.ipacket.packet_size);
+ } /* process packets */
+ /* if this is a BSS section, we may want to fill it */
+ if (!dload_check_type(sptr, DLOAD_BSS))
+ goto loop_cont;
+
+ if (!(dlthis->myoptions & DLOAD_INITBSS))
+ goto loop_cont;
+
+ if (cinit_processed) {
+ /* Don't clear BSS after load-time
+ * initialization */
+ DL_ERROR
+ ("Zero-initialization at " FMT_UI32
+ " after " "load-time initialization!",
+ lptr->load_addr);
+ goto loop_cont;
+ }
+ /* fill the .bss area */
+ dlthis->myio->fillmem(dlthis->myio,
+ TADDR_TO_HOST(lptr->load_addr),
+ lptr, TADDR_TO_HOST(lptr->size),
+ DLOAD_FILL_BSS);
+ goto loop_cont;
+ }
+ /* if DS_DOWNLOAD_MASK */
+ /* If not loading, but BSS, zero initialize */
+ if (!dload_check_type(sptr, DLOAD_BSS))
+ goto loop_cont;
+
+ if (!(dlthis->myoptions & DLOAD_INITBSS))
+ goto loop_cont;
+
+ if (curr_sect >= dlthis->allocated_secn_count)
+ lptr = (struct ldr_section_info *)sptr;
+
+ if (cinit_processed) {
+ /*Don't clear BSS after load-time initialization */
+ DL_ERROR("Zero-initialization at " FMT_UI32
+ " attempted after "
+ "load-time initialization!", lptr->load_addr);
+ goto loop_cont;
+ }
+ /* fill the .bss area */
+ dlthis->myio->fillmem(dlthis->myio,
+ TADDR_TO_HOST(lptr->load_addr), lptr,
+ TADDR_TO_HOST(lptr->size),
+ DLOAD_FILL_BSS);
+loop_cont:
+ sptr += 1;
+ lptr += 1;
+ } /* load sections */
+
+ /* Finalize any trampolines that were created during the load */
+ if (dload_tramp_finalize(dlthis) == 0) {
+ DL_ERROR("Finalization of auto-trampolines (size = " FMT_UI32
+ ") failed", dlthis->tramp.tramp_sect_next_addr);
+ }
+} /* dload_data */
+
+/*************************************************************************
+ * Procedure dload_reorder
+ *
+ * Parameters:
+ * data 32-bit aligned pointer to data to be byte-swapped
+ * dsiz size of the data to be reordered in sizeof() units.
+ * map 32-bit map defining how to reorder the data. Value
+ * must be REORDER_MAP() of some permutation
+ * of 0x00 01 02 03
+ *
+ * Effect:
+ * Re-arranges the bytes in each word according to the map specified.
+ *
+ *********************************************************************** */
+/* mask for byte shift count */
+#define SHIFT_COUNT_MASK (3 << LOG_BITS_PER_BYTE)
+
+void dload_reorder(void *data, int dsiz, unsigned int map)
+{
+ register u32 tmp, tmap, datv;
+ u32 *dp = (u32 *) data;
+
+ map <<= LOG_BITS_PER_BYTE; /* align map with SHIFT_COUNT_MASK */
+ do {
+ tmp = 0;
+ datv = *dp;
+ tmap = map;
+ do {
+ tmp |= (datv & BYTE_MASK) << (tmap & SHIFT_COUNT_MASK);
+ tmap >>= BITS_PER_BYTE;
+ } while (datv >>= BITS_PER_BYTE);
+ *dp++ = tmp;
+ } while ((dsiz -= sizeof(u32)) > 0);
+} /* dload_reorder */
+
+/*************************************************************************
+ * Procedure dload_checksum
+ *
+ * Parameters:
+ * data 32-bit aligned pointer to data to be checksummed
+ * siz size of the data to be checksummed in sizeof() units.
+ *
+ * Effect:
+ * Returns a checksum of the specified block
+ *
+ *********************************************************************** */
+u32 dload_checksum(void *data, unsigned siz)
+{
+ u32 sum;
+ u32 *dp;
+ int left;
+
+ sum = 0;
+ dp = (u32 *) data;
+ for (left = siz; left > 0; left -= sizeof(u32))
+ sum += *dp++;
+ return sum;
+} /* dload_checksum */
+
+#if HOST_ENDIANNESS
+/*************************************************************************
+ * Procedure dload_reverse_checksum
+ *
+ * Parameters:
+ * data 32-bit aligned pointer to data to be checksummed
+ * siz size of the data to be checksummed in sizeof() units.
+ *
+ * Effect:
+ * Returns a checksum of the specified block, which is assumed to be bytes
+ * in big-endian order.
+ *
+ * Notes:
+ * In a big-endian host, things like the string table are stored as bytes
+ * in host order. But dllcreate always checksums in little-endian order.
+ * It is most efficient to just handle the difference a word at a time.
+ *
+ ********************************************************************** */
+u32 dload_reverse_checksum(void *data, unsigned siz)
+{
+ u32 sum, temp;
+ u32 *dp;
+ int left;
+
+ sum = 0;
+ dp = (u32 *) data;
+
+ for (left = siz; left > 0; left -= sizeof(u32)) {
+ temp = *dp++;
+ sum += temp << BITS_PER_BYTE * 3;
+ sum += temp >> BITS_PER_BYTE * 3;
+ sum += (temp >> BITS_PER_BYTE) & (BYTE_MASK << BITS_PER_BYTE);
+ sum += (temp & (BYTE_MASK << BITS_PER_BYTE)) << BITS_PER_BYTE;
+ }
+
+ return sum;
+} /* dload_reverse_checksum */
+
+#if (TARGET_AU_BITS > 8) && (TARGET_AU_BITS < 32)
+u32 dload_reverse_checksum16(void *data, unsigned siz)
+{
+ uint_fast32_t sum, temp;
+ u32 *dp;
+ int left;
+
+ sum = 0;
+ dp = (u32 *) data;
+
+ for (left = siz; left > 0; left -= sizeof(u32)) {
+ temp = *dp++;
+ sum += temp << BITS_PER_BYTE * 2;
+ sum += temp >> BITS_PER_BYTE * 2;
+ }
+
+ return sum;
+} /* dload_reverse_checksum16 */
+#endif
+#endif
+
+/*************************************************************************
+ * Procedure swap_words
+ *
+ * Parameters:
+ * data 32-bit aligned pointer to data to be swapped
+ * siz size of the data to be swapped.
+ * bitmap Bit map of how to swap each 32-bit word; 1 => 2 shorts,
+ * 0 => 1 long
+ *
+ * Effect:
+ * Swaps the specified data according to the specified map
+ *
+ *********************************************************************** */
+static void swap_words(void *data, unsigned siz, unsigned bitmap)
+{
+ register int i;
+#if TARGET_AU_BITS < 16
+ register u16 *sp;
+#endif
+ register u32 *lp;
+
+ siz /= sizeof(u16);
+
+#if TARGET_AU_BITS < 16
+ /* pass 1: do all the bytes */
+ i = siz;
+ sp = (u16 *) data;
+ do {
+ register u16 tmp;
+ tmp = *sp;
+ *sp++ = SWAP16BY8(tmp);
+ } while ((i -= 1) > 0);
+#endif
+
+#if TARGET_AU_BITS < 32
+ /* pass 2: fixup the 32-bit words */
+ i = siz >> 1;
+ lp = (u32 *) data;
+ do {
+ if ((bitmap & 1) == 0) {
+ register u32 tmp;
+ tmp = *lp;
+ *lp = SWAP32BY16(tmp);
+ }
+ lp += 1;
+ bitmap >>= 1;
+ } while ((i -= 1) > 0);
+#endif
+} /* swap_words */
+
+/*************************************************************************
+ * Procedure copy_tgt_strings
+ *
+ * Parameters:
+ * dstp Destination address. Assumed to be 32-bit aligned
+ * srcp Source address. Assumed to be 32-bit aligned
+ * charcount Number of characters to copy.
+ *
+ * Effect:
+ * Copies strings from the source (which is in usual .dof file order on
+ * the loading processor) to the destination buffer (which should be in proper
+ * target addressable unit order). Makes sure the last string in the
+ * buffer is NULL terminated (for safety).
+ * Returns the first unused destination address.
+ *********************************************************************** */
+static char *copy_tgt_strings(void *dstp, void *srcp, unsigned charcount)
+{
+ register tgt_au_t *src = (tgt_au_t *) srcp;
+ register tgt_au_t *dst = (tgt_au_t *) dstp;
+ register int cnt = charcount;
+ do {
+#if TARGET_AU_BITS <= BITS_PER_AU
+ /* byte-swapping issues may exist for strings on target */
+ *dst++ = *src++;
+#else
+ *dst++ = *src++;
+#endif
+ } while ((cnt -= (sizeof(tgt_au_t) * BITS_PER_AU / BITS_PER_BYTE)) > 0);
+ /*apply force to make sure that the string table has null terminator */
+#if (BITS_PER_AU == BITS_PER_BYTE) && (TARGET_AU_BITS == BITS_PER_BYTE)
+ dst[-1] = 0;
+#else
+ /* little endian */
+ dst[-1] &= (1 << (BITS_PER_AU - BITS_PER_BYTE)) - 1;
+#endif
+ return (char *)dst;
+} /* copy_tgt_strings */
+
+/*************************************************************************
+ * Procedure init_module_handle
+ *
+ * Parameters:
+ * none
+ *
+ * Effect:
+ * Initializes the module handle we use to enable unloading, and installs
+ * the debug information required by the target.
+ *
+ * Notes:
+ * The handle returned from dynamic_load_module needs to encapsulate all the
+ * allocations done for the module, and enable them plus the modules symbols to
+ * be deallocated.
+ *
+ *********************************************************************** */
+#ifndef _BIG_ENDIAN
+static const struct ldr_section_info dllview_info_init = { ".dllview", 0, 0,
+ (ldr_addr)-1, DBG_LIST_PAGE, DLOAD_DATA, 0
+};
+#else
+static const struct ldr_section_info dllview_info_init = { ".dllview", 0, 0,
+ (ldr_addr)-1, DLOAD_DATA, DBG_LIST_PAGE, 0
+};
+#endif
+static void init_module_handle(struct dload_state *dlthis)
+{
+ struct my_handle *hndl;
+ u16 curr_sect;
+ struct ldr_section_info *asecs;
+ struct dll_module *dbmod;
+ struct dll_sect *dbsec;
+ struct dbg_mirror_root *mlist;
+ register char *cp;
+ struct modules_header mhdr;
+ struct ldr_section_info dllview_info;
+ struct dynload_symbol *debug_mirror_sym;
+ hndl = dlthis->myhandle;
+ if (!hndl)
+ return; /* must be errors detected, so forget it */
+
+ /* Store the section count */
+ hndl->secn_count = dlthis->allocated_secn_count;
+
+ /* If a trampoline section was created, add it in */
+ if (dlthis->tramp.tramp_sect_next_addr != 0)
+ hndl->secn_count += 1;
+
+ hndl->secn_count = hndl->secn_count << 1;
+
+ hndl->secn_count = dlthis->allocated_secn_count << 1;
+#ifndef TARGET_ENDIANNESS
+ if (dlthis->big_e_target)
+ hndl->secn_count += 1; /* flag for big-endian */
+#endif
+ if (dlthis->dload_errcount)
+ return; /* abandon if errors detected */
+ /* Locate the symbol that names the header for the CCS debug list
+ of modules. If not found, we just don't generate the debug record.
+ If found, we create our modules list. We make sure to create the
+ loader_dllview_root even if there is no relocation info to record,
+ just to try to put both symbols in the same symbol table and
+ module. */
+ debug_mirror_sym = dlthis->mysym->find_matching_symbol(dlthis->mysym,
+ loader_dllview_root);
+ if (!debug_mirror_sym) {
+ struct dynload_symbol *dlmodsym;
+ struct dbg_mirror_root *mlst;
+
+ /* our root symbol is not yet present;
+ check if we have DLModules defined */
+ dlmodsym = dlthis->mysym->find_matching_symbol(dlthis->mysym,
+ LINKER_MODULES_HEADER);
+ if (!dlmodsym)
+ return; /* no DLModules list so no debug info */
+ /* if we have DLModules defined, construct our header */
+ mlst = (struct dbg_mirror_root *)
+ dlthis->mysym->dload_allocate(dlthis->mysym,
+ sizeof(struct
+ dbg_mirror_root));
+ if (!mlst) {
+ DL_ERROR(err_alloc, sizeof(struct dbg_mirror_root));
+ return;
+ }
+ mlst->hnext = NULL;
+ mlst->changes = 0;
+ mlst->refcount = 0;
+ mlst->dbthis = TDATA_TO_TADDR(dlmodsym->value);
+ /* add our root symbol */
+ debug_mirror_sym = dlthis->mysym->add_to_symbol_table
+ (dlthis->mysym, loader_dllview_root,
+ (unsigned)dlthis->myhandle);
+ if (!debug_mirror_sym) {
+ /* failed, recover memory */
+ dlthis->mysym->dload_deallocate(dlthis->mysym, mlst);
+ return;
+ }
+ debug_mirror_sym->value = (u32) mlst;
+ }
+ /* First create the DLLview record and stuff it into the buffer.
+ Then write it to the DSP. Record pertinent locations in our hndl,
+ and add it to the per-processor list of handles with debug info. */
+#ifndef DEBUG_HEADER_IN_LOADER
+ mlist = (struct dbg_mirror_root *)debug_mirror_sym->value;
+ if (!mlist)
+ return;
+#else
+ mlist = (struct dbg_mirror_root *)&debug_list_header;
+#endif
+ hndl->dm.hroot = mlist; /* set pointer to root into our handle */
+ if (!dlthis->allocated_secn_count)
+ return; /* no load addresses to be recorded */
+ /* reuse temporary symbol storage */
+ dbmod = (struct dll_module *)dlthis->local_symtab;
+ /* Create the DLLview record in the memory we retain for our handle */
+ dbmod->num_sects = dlthis->allocated_secn_count;
+ dbmod->timestamp = dlthis->verify.dv_timdat;
+ dbmod->version = INIT_VERSION;
+ dbmod->verification = VERIFICATION;
+ asecs = dlthis->ldr_sections;
+ dbsec = dbmod->sects;
+ for (curr_sect = dlthis->allocated_secn_count;
+ curr_sect > 0; curr_sect -= 1) {
+ dbsec->sect_load_adr = asecs->load_addr;
+ dbsec->sect_run_adr = asecs->run_addr;
+ dbsec += 1;
+ asecs += 1;
+ }
+
+ /* If a trampoline section was created go ahead and add its info */
+ if (dlthis->tramp.tramp_sect_next_addr != 0) {
+ dbmod->num_sects++;
+ dbsec->sect_load_adr = asecs->load_addr;
+ dbsec->sect_run_adr = asecs->run_addr;
+ dbsec++;
+ asecs++;
+ }
+
+ /* now cram in the names */
+ cp = copy_tgt_strings(dbsec, dlthis->str_head,
+ dlthis->debug_string_size);
+
+ /* If a trampoline section was created, add its name so DLLView
+ * can show the user the section info. */
+ if (dlthis->tramp.tramp_sect_next_addr != 0) {
+ cp = copy_tgt_strings(cp,
+ dlthis->tramp.final_string_table,
+ strlen(dlthis->tramp.final_string_table) +
+ 1);
+ }
+
+ /* round off the size of the debug record, and remember same */
+ hndl->dm.dbsiz = HOST_TO_TDATA_ROUND(cp - (char *)dbmod);
+ *cp = 0; /* strictly to make our test harness happy */
+ dllview_info = dllview_info_init;
+ dllview_info.size = TDATA_TO_TADDR(hndl->dm.dbsiz);
+ /* Initialize memory context to default heap */
+ dllview_info.context = 0;
+ hndl->dm.context = 0;
+ /* fill in next pointer and size */
+ if (mlist->hnext) {
+ dbmod->next_module = TADDR_TO_TDATA(mlist->hnext->dm.dbthis);
+ dbmod->next_module_size = mlist->hnext->dm.dbsiz;
+ } else {
+ dbmod->next_module_size = 0;
+ dbmod->next_module = 0;
+ }
+ /* allocate memory for on-DSP DLLview debug record */
+ if (!dlthis->myalloc)
+ return;
+ if (!dlthis->myalloc->dload_allocate(dlthis->myalloc, &dllview_info,
+ HOST_TO_TADDR(sizeof(u32)))) {
+ return;
+ }
+ /* Store load address of .dllview section */
+ hndl->dm.dbthis = dllview_info.load_addr;
+ /* Store memory context (segid) in which .dllview section
+ * was allocated */
+ hndl->dm.context = dllview_info.context;
+ mlist->refcount += 1;
+ /* swap bytes in the entire debug record, but not the string table */
+ if (TARGET_ENDIANNESS_DIFFERS(TARGET_BIG_ENDIAN)) {
+ swap_words(dbmod, (char *)dbsec - (char *)dbmod,
+ DLL_MODULE_BITMAP);
+ }
+ /* Update the DLLview list on the DSP write new record */
+ if (!dlthis->myio->writemem(dlthis->myio, dbmod,
+ dllview_info.load_addr, &dllview_info,
+ TADDR_TO_HOST(dllview_info.size))) {
+ return;
+ }
+ /* write new header */
+ mhdr.first_module_size = hndl->dm.dbsiz;
+ mhdr.first_module = TADDR_TO_TDATA(dllview_info.load_addr);
+ /* swap bytes in the module header, if needed */
+ if (TARGET_ENDIANNESS_DIFFERS(TARGET_BIG_ENDIAN)) {
+ swap_words(&mhdr, sizeof(struct modules_header) - sizeof(u16),
+ MODULES_HEADER_BITMAP);
+ }
+ dllview_info = dllview_info_init;
+ if (!dlthis->myio->writemem(dlthis->myio, &mhdr, mlist->dbthis,
+ &dllview_info,
+ sizeof(struct modules_header) -
+ sizeof(u16))) {
+ return;
+ }
+ /* Add the module handle to this processor's list
+ of handles with debug info */
+ hndl->dm.hnext = mlist->hnext;
+ if (hndl->dm.hnext)
+ hndl->dm.hnext->dm.hprev = hndl;
+ hndl->dm.hprev = (struct my_handle *)mlist;
+ mlist->hnext = hndl; /* insert after root */
+} /* init_module_handle */
+
+/*************************************************************************
+ * Procedure dynamic_unload_module
+ *
+ * Parameters:
+ * mhandle A module handle from dynamic_load_module
+ * syms Host-side symbol table and malloc/free functions
+ * alloc Target-side memory allocation
+ *
+ * Effect:
+ * The module specified by mhandle is unloaded. Unloading causes all
+ * target memory to be deallocated, all symbols defined by the module to
+ * be purged, and any host-side storage used by the dynamic loader for
+ * this module to be released.
+ *
+ * Returns:
+ * Zero for success. On error, the number of errors detected is returned.
+ * Individual errors are reported using syms->error_report().
+ *********************************************************************** */
+int dynamic_unload_module(void *mhandle,
+ struct dynamic_loader_sym *syms,
+ struct dynamic_loader_allocate *alloc,
+ struct dynamic_loader_initialize *init)
+{
+ s16 curr_sect;
+ struct ldr_section_info *asecs;
+ struct my_handle *hndl;
+ struct dbg_mirror_root *root;
+ unsigned errcount = 0;
+ struct ldr_section_info dllview_info = dllview_info_init;
+ struct modules_header mhdr;
+
+ hndl = (struct my_handle *)mhandle;
+ if (!hndl)
+ return 0; /* if handle is null, nothing to do */
+ /* Clear out the module symbols
+ * Note that if this is the module that defined MODULES_HEADER
+ (the head of the target debug list)
+ * then this operation will blow away that symbol.
+ It will therefore be impossible for subsequent
+ * operations to add entries to this un-referenceable list. */
+ if (!syms)
+ return 1;
+ syms->purge_symbol_table(syms, (unsigned)hndl);
+ /* Deallocate target memory for sections
+ * NOTE: The trampoline section, if created, gets deleted here, too */
+
+ asecs = hndl->secns;
+ if (alloc)
+ for (curr_sect = (hndl->secn_count >> 1); curr_sect > 0;
+ curr_sect -= 1) {
+ asecs->name = NULL;
+ alloc->dload_deallocate(alloc, asecs++);
+ }
+ root = hndl->dm.hroot;
+ if (!root) {
+ /* there is a debug list containing this module */
+ goto func_end;
+ }
+ if (!hndl->dm.dbthis) { /* target-side dllview record exists */
+ goto loop_end;
+ }
+ /* Retrieve memory context in which .dllview was allocated */
+ dllview_info.context = hndl->dm.context;
+ if (hndl->dm.hprev == hndl)
+ goto exitunltgt;
+
+ /* target-side dllview record is in list */
+ /* dequeue this record from our GPP-side mirror list */
+ hndl->dm.hprev->dm.hnext = hndl->dm.hnext;
+ if (hndl->dm.hnext)
+ hndl->dm.hnext->dm.hprev = hndl->dm.hprev;
+ /* Update next_module of previous entry in target list
+ * We are using mhdr here as a surrogate for either a
+ struct modules_header or a dll_module */
+ if (hndl->dm.hnext) {
+ mhdr.first_module = TADDR_TO_TDATA(hndl->dm.hnext->dm.dbthis);
+ mhdr.first_module_size = hndl->dm.hnext->dm.dbsiz;
+ } else {
+ mhdr.first_module = 0;
+ mhdr.first_module_size = 0;
+ }
+ if (!init)
+ goto exitunltgt;
+
+ if (!init->connect(init)) {
+ dload_syms_error(syms, iconnect);
+ errcount += 1;
+ goto exitunltgt;
+ }
+ /* swap bytes in the module header, if needed */
+ if (TARGET_ENDIANNESS_DIFFERS(hndl->secn_count & 0x1)) {
+ swap_words(&mhdr, sizeof(struct modules_header) - sizeof(u16),
+ MODULES_HEADER_BITMAP);
+ }
+ if (!init->writemem(init, &mhdr, hndl->dm.hprev->dm.dbthis,
+ &dllview_info, sizeof(struct modules_header) -
+ sizeof(mhdr.update_flag))) {
+ dload_syms_error(syms, dlvwrite);
+ errcount += 1;
+ }
+ /* update change counter */
+ root->changes += 1;
+ if (!init->writemem(init, &(root->changes),
+ root->dbthis + HOST_TO_TADDR
+ (sizeof(mhdr.first_module) +
+ sizeof(mhdr.first_module_size)),
+ &dllview_info, sizeof(mhdr.update_flag))) {
+ dload_syms_error(syms, dlvwrite);
+ errcount += 1;
+ }
+ init->release(init);
+exitunltgt:
+ /* release target storage */
+ dllview_info.size = TDATA_TO_TADDR(hndl->dm.dbsiz);
+ dllview_info.load_addr = hndl->dm.dbthis;
+ if (alloc)
+ alloc->dload_deallocate(alloc, &dllview_info);
+ root->refcount -= 1;
+ /* target-side dllview record exists */
+loop_end:
+#ifndef DEBUG_HEADER_IN_LOADER
+ if (root->refcount <= 0) {
+ /* if all references gone, blow off the header */
+ /* our root symbol may be gone due to the Purge above,
+ but if not, do not destroy the root */
+ if (syms->find_matching_symbol
+ (syms, loader_dllview_root) == NULL)
+ syms->dload_deallocate(syms, root);
+ }
+#endif
+func_end:
+ /* there is a debug list containing this module */
+ syms->dload_deallocate(syms, mhandle); /* release our storage */
+ return errcount;
+} /* dynamic_unload_module */
+
+#if BITS_PER_AU > BITS_PER_BYTE
+/*************************************************************************
+ * Procedure unpack_name
+ *
+ * Parameters:
+ * soffset Byte offset into the string table
+ *
+ * Effect:
+ * Returns a pointer to the string specified by the offset supplied, or
+ * NULL for error.
+ *
+ *********************************************************************** */
+static char *unpack_name(struct dload_state *dlthis, u32 soffset)
+{
+ u8 tmp, *src;
+ char *dst;
+
+ if (soffset >= dlthis->dfile_hdr.df_strtab_size) {
+ dload_error(dlthis, "Bad string table offset " FMT_UI32,
+ soffset);
+ return NULL;
+ }
+ src = (uint_least8_t *) dlthis->str_head +
+ (soffset >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE));
+ dst = dlthis->str_temp;
+ if (soffset & 1)
+ *dst++ = *src++; /* only 1 character in first word */
+ do {
+ tmp = *src++;
+ *dst = (tmp >> BITS_PER_BYTE);
+ if (!(*dst++))
+ break;
+ } while ((*dst++ = tmp & BYTE_MASK));
+ dlthis->temp_len = dst - dlthis->str_temp;
+ /* squirrel away length including terminating null */
+ return dlthis->str_temp;
+} /* unpack_name */
+#endif
diff --git a/drivers/staging/tidspbridge/dynload/dload_internal.h b/drivers/staging/tidspbridge/dynload/dload_internal.h
new file mode 100644
index 00000000000..302a7c53e12
--- /dev/null
+++ b/drivers/staging/tidspbridge/dynload/dload_internal.h
@@ -0,0 +1,344 @@
+/*
+ * dload_internal.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _DLOAD_INTERNAL_
+#define _DLOAD_INTERNAL_
+
+#include <linux/types.h>
+
+/*
+ * Internal state definitions for the dynamic loader
+ */
+
+/* type used for relocation intermediate results */
+typedef s32 rvalue;
+
+/* unsigned version of same; must have at least as many bits */
+typedef u32 urvalue;
+
+/*
+ * Dynamic loader configuration constants
+ */
+/* error issued if input has more sections than this limit */
+#define REASONABLE_SECTION_LIMIT 100
+
+/* (Addressable unit) value used to clear BSS section */
+#define DLOAD_FILL_BSS 0
+
+/*
+ * Reorder maps explained (?)
+ *
+ * The doff file format defines a 32-bit pattern used to determine the
+ * byte order of an image being read. That value is
+ * BYTE_RESHUFFLE_VALUE == 0x00010203
+ * For purposes of the reorder routine, we would rather have the all-is-OK
+ * for 32-bits pattern be 0x03020100. This first macro makes the
+ * translation from doff file header value to MAP value: */
+#define REORDER_MAP(rawmap) ((rawmap) ^ 0x3030303)
+/* This translation is made in dload_headers. Thereafter, the all-is-OK
+ * value for the maps stored in dlthis is REORDER_MAP(BYTE_RESHUFFLE_VALUE).
+ * But sadly, not all bits of the doff file are 32-bit integers.
+ * The notable exceptions are strings and image bits.
+ * Strings obey host byte order: */
+#if defined(_BIG_ENDIAN)
+#define HOST_BYTE_ORDER(cookedmap) ((cookedmap) ^ 0x3030303)
+#else
+#define HOST_BYTE_ORDER(cookedmap) (cookedmap)
+#endif
+/* Target bits consist of target AUs (could be bytes, or 16-bits,
+ * or 32-bits) stored as an array in host order. A target order
+ * map is defined by: */
+#if !defined(_BIG_ENDIAN) || TARGET_AU_BITS > 16
+#define TARGET_ORDER(cookedmap) (cookedmap)
+#elif TARGET_AU_BITS > 8
+#define TARGET_ORDER(cookedmap) ((cookedmap) ^ 0x2020202)
+#else
+#define TARGET_ORDER(cookedmap) ((cookedmap) ^ 0x3030303)
+#endif
+
+/* forward declaration for handle returned by dynamic loader */
+struct my_handle;
+
+/*
+ * a list of module handles, which mirrors the debug list on the target
+ */
+struct dbg_mirror_root {
+ /* must be same as dbg_mirror_list; __DLModules address on target */
+ u32 dbthis;
+ struct my_handle *hnext; /* must be same as dbg_mirror_list */
+ u16 changes; /* change counter */
+ u16 refcount; /* number of modules referencing this root */
+};
+
+struct dbg_mirror_list {
+ u32 dbthis;
+ struct my_handle *hnext, *hprev;
+ struct dbg_mirror_root *hroot;
+ u16 dbsiz;
+ u32 context; /* Save context for .dllview memory allocation */
+};
+
+#define VARIABLE_SIZE 1
+/*
+ * the structure we actually return as an opaque module handle
+ */
+struct my_handle {
+ struct dbg_mirror_list dm; /* !!! must be first !!! */
+ /* sections following << 1, LSB is set for big-endian target */
+ u16 secn_count;
+ struct ldr_section_info secns[VARIABLE_SIZE];
+};
+#define MY_HANDLE_SIZE (sizeof(struct my_handle) -\
+ sizeof(struct ldr_section_info))
+/* real size of my_handle */
+
+/*
+ * reduced symbol structure used for symbols during relocation
+ */
+struct local_symbol {
+ s32 value; /* Relocated symbol value */
+ s32 delta; /* Original value in input file */
+ s16 secnn; /* section number */
+ s16 sclass; /* symbol class */
+};
+
+/*
+ * Trampoline data structures
+ */
+#define TRAMP_NO_GEN_AVAIL 65535
+#define TRAMP_SYM_PREFIX "__$dbTR__"
+#define TRAMP_SECT_NAME ".dbTR"
+/* MUST MATCH THE LENGTH ABOVE!! */
+#define TRAMP_SYM_PREFIX_LEN 9
+/* Includes NULL termination */
+#define TRAMP_SYM_HEX_ASCII_LEN 9
+
+#define GET_CONTAINER(ptr, type, field) ((type *)((unsigned long)ptr -\
+ (unsigned long)(&((type *)0)->field)))
+#ifndef FIELD_OFFSET
+#define FIELD_OFFSET(type, field) ((unsigned long)(&((type *)0)->field))
+#endif
+
+/*
+ The trampoline code for the target is located in a table called
+ "tramp_gen_info" with is indexed by looking up the index in the table
+ "tramp_map". The tramp_map index is acquired using the target
+ HASH_FUNC on the relocation type that caused the trampoline. Each
+ trampoline code table entry MUST follow this format:
+
+ |----------------------------------------------|
+ | tramp_gen_code_hdr |
+ |----------------------------------------------|
+ | Trampoline image code |
+ | (the raw instruction code for the target) |
+ |----------------------------------------------|
+ | Relocation entries for the image code |
+ |----------------------------------------------|
+
+ This is very similar to how image data is laid out in the DOFF file
+ itself.
+ */
+struct tramp_gen_code_hdr {
+ u32 tramp_code_size; /* in BYTES */
+ u32 num_relos;
+ u32 relo_offset; /* in BYTES */
+};
+
+struct tramp_img_pkt {
+ struct tramp_img_pkt *next; /* MUST BE FIRST */
+ u32 base;
+ struct tramp_gen_code_hdr hdr;
+ u8 payload[VARIABLE_SIZE];
+};
+
+struct tramp_img_dup_relo {
+ struct tramp_img_dup_relo *next;
+ struct reloc_record_t relo;
+};
+
+struct tramp_img_dup_pkt {
+ struct tramp_img_dup_pkt *next; /* MUST BE FIRST */
+ s16 secnn;
+ u32 offset;
+ struct image_packet_t img_pkt;
+ struct tramp_img_dup_relo *relo_chain;
+
+ /* PAYLOAD OF IMG PKT FOLLOWS */
+};
+
+struct tramp_sym {
+ struct tramp_sym *next; /* MUST BE FIRST */
+ u32 index;
+ u32 str_index;
+ struct local_symbol sym_info;
+};
+
+struct tramp_string {
+ struct tramp_string *next; /* MUST BE FIRST */
+ u32 index;
+ char str[VARIABLE_SIZE]; /* NULL terminated */
+};
+
+struct tramp_info {
+ u32 tramp_sect_next_addr;
+ struct ldr_section_info sect_info;
+
+ struct tramp_sym *symbol_head;
+ struct tramp_sym *symbol_tail;
+ u32 tramp_sym_next_index;
+ struct local_symbol *final_sym_table;
+
+ struct tramp_string *string_head;
+ struct tramp_string *string_tail;
+ u32 tramp_string_next_index;
+ u32 tramp_string_size;
+ char *final_string_table;
+
+ struct tramp_img_pkt *tramp_pkts;
+ struct tramp_img_dup_pkt *dup_pkts;
+};
+
+/*
+ * States of the .cinit state machine
+ */
+enum cinit_mode {
+ CI_COUNT = 0, /* expecting a count */
+ CI_ADDRESS, /* expecting an address */
+#if CINIT_ALIGN < CINIT_ADDRESS /* handle case of partial address field */
+ CI_PARTADDRESS, /* have only part of the address */
+#endif
+ CI_COPY, /* in the middle of copying data */
+ CI_DONE /* end of .cinit table */
+};
+
+/*
+ * The internal state of the dynamic loader, which is passed around as
+ * an object
+ */
+struct dload_state {
+ struct dynamic_loader_stream *strm; /* The module input stream */
+ struct dynamic_loader_sym *mysym; /* Symbols for this session */
+ /* target memory allocator */
+ struct dynamic_loader_allocate *myalloc;
+ struct dynamic_loader_initialize *myio; /* target memory initializer */
+ unsigned myoptions; /* Options parameter dynamic_load_module */
+
+ char *str_head; /* Pointer to string table */
+#if BITS_PER_AU > BITS_PER_BYTE
+ char *str_temp; /* Pointer to temporary buffer for strings */
+ /* big enough to hold longest string */
+ unsigned temp_len; /* length of last temporary string */
+ char *xstrings; /* Pointer to buffer for expanded */
+ /* strings for sec names */
+#endif
+ /* Total size of strings for DLLView section names */
+ unsigned debug_string_size;
+ /* Pointer to parallel section info for allocated sections only */
+ struct doff_scnhdr_t *sect_hdrs; /* Pointer to section table */
+ struct ldr_section_info *ldr_sections;
+#if TMS32060
+ /* The address of the start of the .bss section */
+ ldr_addr bss_run_base;
+#endif
+ struct local_symbol *local_symtab; /* Relocation symbol table */
+
+ /* pointer to DL section info for the section being relocated */
+ struct ldr_section_info *image_secn;
+ /* change in run address for current section during relocation */
+ ldr_addr delta_runaddr;
+ ldr_addr image_offset; /* offset of current packet in section */
+ enum cinit_mode cinit_state; /* current state of cload_cinit() */
+ int cinit_count; /* the current count */
+ ldr_addr cinit_addr; /* the current address */
+ s16 cinit_page; /* the current page */
+ /* Handle to be returned by dynamic_load_module */
+ struct my_handle *myhandle;
+ unsigned dload_errcount; /* Total # of errors reported so far */
+ /* Number of target sections that require allocation and relocation */
+ unsigned allocated_secn_count;
+#ifndef TARGET_ENDIANNESS
+ int big_e_target; /* Target data in big-endian format */
+#endif
+ /* map for reordering bytes, 0 if not needed */
+ u32 reorder_map;
+ struct doff_filehdr_t dfile_hdr; /* DOFF file header structure */
+ struct doff_verify_rec_t verify; /* Verify record */
+
+ struct tramp_info tramp; /* Trampoline data, if needed */
+
+ int relstkidx; /* index into relocation value stack */
+ /* relocation value stack used in relexp.c */
+ rvalue relstk[STATIC_EXPR_STK_SIZE];
+
+};
+
+#ifdef TARGET_ENDIANNESS
+#define TARGET_BIG_ENDIAN TARGET_ENDIANNESS
+#else
+#define TARGET_BIG_ENDIAN (dlthis->big_e_target)
+#endif
+
+/*
+ * Exports from cload.c to rest of the world
+ */
+extern void dload_error(struct dload_state *dlthis, const char *errtxt, ...);
+extern void dload_syms_error(struct dynamic_loader_sym *syms,
+ const char *errtxt, ...);
+extern void dload_headers(struct dload_state *dlthis);
+extern void dload_strings(struct dload_state *dlthis, bool sec_names_only);
+extern void dload_sections(struct dload_state *dlthis);
+extern void dload_reorder(void *data, int dsiz, u32 map);
+extern u32 dload_checksum(void *data, unsigned siz);
+
+#if HOST_ENDIANNESS
+extern uint32_t dload_reverse_checksum(void *data, unsigned siz);
+#if (TARGET_AU_BITS > 8) && (TARGET_AU_BITS < 32)
+extern uint32_t dload_reverse_checksum16(void *data, unsigned siz);
+#endif
+#endif
+
+/*
+ * exported by reloc.c
+ */
+extern void dload_relocate(struct dload_state *dlthis, tgt_au_t * data,
+ struct reloc_record_t *rp, bool * tramps_generated,
+ bool second_pass);
+
+extern rvalue dload_unpack(struct dload_state *dlthis, tgt_au_t * data,
+ int fieldsz, int offset, unsigned sgn);
+
+extern int dload_repack(struct dload_state *dlthis, rvalue val, tgt_au_t * data,
+ int fieldsz, int offset, unsigned sgn);
+
+/*
+ * exported by tramp.c
+ */
+extern bool dload_tramp_avail(struct dload_state *dlthis,
+ struct reloc_record_t *rp);
+
+int dload_tramp_generate(struct dload_state *dlthis, s16 secnn,
+ u32 image_offset, struct image_packet_t *ipacket,
+ struct reloc_record_t *rp);
+
+extern int dload_tramp_pkt_udpate(struct dload_state *dlthis,
+ s16 secnn, u32 image_offset,
+ struct image_packet_t *ipacket);
+
+extern int dload_tramp_finalize(struct dload_state *dlthis);
+
+extern void dload_tramp_cleanup(struct dload_state *dlthis);
+
+#endif /* _DLOAD_INTERNAL_ */
diff --git a/drivers/staging/tidspbridge/dynload/doff.h b/drivers/staging/tidspbridge/dynload/doff.h
new file mode 100644
index 00000000000..a7c3145746e
--- /dev/null
+++ b/drivers/staging/tidspbridge/dynload/doff.h
@@ -0,0 +1,354 @@
+/*
+ * doff.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Structures & definitions used for dynamically loaded modules file format.
+ * This format is a reformatted version of COFF. It optimizes the layout for
+ * the dynamic loader.
+ *
+ * .dof files, when viewed as a sequence of 32-bit integers, look the same
+ * on big-endian and little-endian machines.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _DOFF_H
+#define _DOFF_H
+
+
+#define BYTE_RESHUFFLE_VALUE 0x00010203
+
+/* DOFF file header containing fields categorizing the remainder of the file */
+struct doff_filehdr_t {
+
+ /* string table size, including filename, in bytes */
+ u32 df_strtab_size;
+
+ /* entry point if one exists */
+ u32 df_entrypt;
+
+ /* identifies byte ordering of file;
+ * always set to BYTE_RESHUFFLE_VALUE */
+ u32 df_byte_reshuffle;
+
+ /* Size of the string table up to and including the last section name */
+ /* Size includes the name of the COFF file also */
+ u32 df_scn_name_size;
+
+#ifndef _BIG_ENDIAN
+ /* number of symbols */
+ u16 df_no_syms;
+
+ /* length in bytes of the longest string, including terminating NULL */
+ /* excludes the name of the file */
+ u16 df_max_str_len;
+
+ /* total number of sections including no-load ones */
+ u16 df_no_scns;
+
+ /* number of sections containing target code allocated or downloaded */
+ u16 df_target_scns;
+
+ /* unique id for dll file format & version */
+ u16 df_doff_version;
+
+ /* identifies ISA */
+ u16 df_target_id;
+
+ /* useful file flags */
+ u16 df_flags;
+
+ /* section reference for entry point, N_UNDEF for none, */
+ /* N_ABS for absolute address */
+ s16 df_entry_secn;
+#else
+ /* length of the longest string, including terminating NULL */
+ u16 df_max_str_len;
+
+ /* number of symbols */
+ u16 df_no_syms;
+
+ /* number of sections containing target code allocated or downloaded */
+ u16 df_target_scns;
+
+ /* total number of sections including no-load ones */
+ u16 df_no_scns;
+
+ /* identifies ISA */
+ u16 df_target_id;
+
+ /* unique id for dll file format & version */
+ u16 df_doff_version;
+
+ /* section reference for entry point, N_UNDEF for none, */
+ /* N_ABS for absolute address */
+ s16 df_entry_secn;
+
+ /* useful file flags */
+ u16 df_flags;
+#endif
+ /* checksum for file header record */
+ u32 df_checksum;
+
+};
+
+/* flags in the df_flags field */
+#define DF_LITTLE 0x100
+#define DF_BIG 0x200
+#define DF_BYTE_ORDER (DF_LITTLE | DF_BIG)
+
+/* Supported processors */
+#define TMS470_ID 0x97
+#define LEAD_ID 0x98
+#define TMS32060_ID 0x99
+#define LEAD3_ID 0x9c
+
+/* Primary processor for loading */
+#if TMS32060
+#define TARGET_ID TMS32060_ID
+#endif
+
+/* Verification record containing values used to test integrity of the bits */
+struct doff_verify_rec_t {
+
+ /* time and date stamp */
+ u32 dv_timdat;
+
+ /* checksum for all section records */
+ u32 dv_scn_rec_checksum;
+
+ /* checksum for string table */
+ u32 dv_str_tab_checksum;
+
+ /* checksum for symbol table */
+ u32 dv_sym_tab_checksum;
+
+ /* checksum for verification record */
+ u32 dv_verify_rec_checksum;
+
+};
+
+/* String table is an array of null-terminated strings. The first entry is
+ * the filename, which is added by DLLcreate. No new structure definitions
+ * are required.
+ */
+
+/* Section Records including information on the corresponding image packets */
+/*
+ * !!WARNING!!
+ *
+ * This structure is expected to match in form ldr_section_info in
+ * dynamic_loader.h
+ */
+
+struct doff_scnhdr_t {
+
+ s32 ds_offset; /* offset into string table of name */
+ s32 ds_paddr; /* RUN address, in target AU */
+ s32 ds_vaddr; /* LOAD address, in target AU */
+ s32 ds_size; /* section size, in target AU */
+#ifndef _BIG_ENDIAN
+ u16 ds_page; /* memory page id */
+ u16 ds_flags; /* section flags */
+#else
+ u16 ds_flags; /* section flags */
+ u16 ds_page; /* memory page id */
+#endif
+ u32 ds_first_pkt_offset;
+ /* Absolute byte offset into the file */
+ /* where the first image record resides */
+
+ s32 ds_nipacks; /* number of image packets */
+
+};
+
+/* Symbol table entry */
+struct doff_syment_t {
+
+ s32 dn_offset; /* offset into string table of name */
+ s32 dn_value; /* value of symbol */
+#ifndef _BIG_ENDIAN
+ s16 dn_scnum; /* section number */
+ s16 dn_sclass; /* storage class */
+#else
+ s16 dn_sclass; /* storage class */
+ s16 dn_scnum; /* section number, 1-based */
+#endif
+
+};
+
+/* special values for dn_scnum */
+#define DN_UNDEF 0 /* undefined symbol */
+#define DN_ABS (-1) /* value of symbol is absolute */
+/* special values for dn_sclass */
+#define DN_EXT 2
+#define DN_STATLAB 20
+#define DN_EXTLAB 21
+
+/* Default value of image bits in packet */
+/* Configurable by user on the command line */
+#define IMAGE_PACKET_SIZE 1024
+
+/* An image packet contains a chunk of data from a section along with */
+/* information necessary for its processing. */
+struct image_packet_t {
+
+ s32 num_relocs; /* number of relocations for */
+ /* this packet */
+
+ s32 packet_size; /* number of bytes in array */
+ /* "bits" occupied by */
+ /* valid data. Could be */
+ /* < IMAGE_PACKET_SIZE to */
+ /* prevent splitting a */
+ /* relocation across packets. */
+ /* Last packet of a section */
+ /* will most likely contain */
+ /* < IMAGE_PACKET_SIZE bytes */
+ /* of valid data */
+
+ s32 img_chksum; /* Checksum for image packet */
+ /* and the corresponding */
+ /* relocation records */
+
+ u8 *img_data; /* Actual data in section */
+
+};
+
+/* The relocation structure definition matches the COFF version. Offsets */
+/* however are relative to the image packet base not the section base. */
+struct reloc_record_t {
+
+ s32 vaddr;
+
+ /* expressed in target AUs */
+
+ union {
+ struct {
+#ifndef _BIG_ENDIAN
+ u8 _offset; /* bit offset of rel fld */
+ u8 _fieldsz; /* size of rel fld */
+ u8 _wordsz; /* # bytes containing rel fld */
+ u8 _dum1;
+ u16 _dum2;
+ u16 _type;
+#else
+ unsigned _dum1:8;
+ unsigned _wordsz:8; /* # bytes containing rel fld */
+ unsigned _fieldsz:8; /* size of rel fld */
+ unsigned _offset:8; /* bit offset of rel fld */
+ u16 _type;
+ u16 _dum2;
+#endif
+ } _r_field;
+
+ struct {
+ u32 _spc; /* image packet relative PC */
+#ifndef _BIG_ENDIAN
+ u16 _dum;
+ u16 _type; /* relocation type */
+#else
+ u16 _type; /* relocation type */
+ u16 _dum;
+#endif
+ } _r_spc;
+
+ struct {
+ u32 _uval; /* constant value */
+#ifndef _BIG_ENDIAN
+ u16 _dum;
+ u16 _type; /* relocation type */
+#else
+ u16 _type; /* relocation type */
+ u16 _dum;
+#endif
+ } _r_uval;
+
+ struct {
+ s32 _symndx; /* 32-bit sym tbl index */
+#ifndef _BIG_ENDIAN
+ u16 _disp; /* extra addr encode data */
+ u16 _type; /* relocation type */
+#else
+ u16 _type; /* relocation type */
+ u16 _disp; /* extra addr encode data */
+#endif
+ } _r_sym;
+ } _u_reloc;
+
+};
+
+/* abbreviations for convenience */
+#ifndef TYPE
+#define TYPE _u_reloc._r_sym._type
+#define UVAL _u_reloc._r_uval._uval
+#define SYMNDX _u_reloc._r_sym._symndx
+#define OFFSET _u_reloc._r_field._offset
+#define FIELDSZ _u_reloc._r_field._fieldsz
+#define WORDSZ _u_reloc._r_field._wordsz
+#define R_DISP _u_reloc._r_sym._disp
+#endif
+
+/**************************************************************************** */
+/* */
+/* Important DOFF macros used for file processing */
+/* */
+/**************************************************************************** */
+
+/* DOFF Versions */
+#define DOFF0 0
+
+/* Return the address/size >= to addr that is at a 32-bit boundary */
+/* This assumes that a byte is 8 bits */
+#define DOFF_ALIGN(addr) (((addr) + 3) & ~3UL)
+
+/**************************************************************************** */
+/* */
+/* The DOFF section header flags field is laid out as follows: */
+/* */
+/* Bits 0-3 : Section Type */
+/* Bit 4 : Set when section requires target memory to be allocated by DL */
+/* Bit 5 : Set when section requires downloading */
+/* Bits 8-11: Alignment, same as COFF */
+/* */
+/**************************************************************************** */
+
+/* Enum for DOFF section types (bits 0-3 of flag): See dynamic_loader.h */
+#define DS_SECTION_TYPE_MASK 0xF
+/* DS_ALLOCATE indicates whether a section needs space on the target */
+#define DS_ALLOCATE_MASK 0x10
+/* DS_DOWNLOAD indicates that the loader needs to copy bits */
+#define DS_DOWNLOAD_MASK 0x20
+/* Section alignment requirement in AUs */
+#define DS_ALIGNMENT_SHIFT 8
+
+static inline bool dload_check_type(struct doff_scnhdr_t *sptr, u32 flag)
+{
+ return (sptr->ds_flags & DS_SECTION_TYPE_MASK) == flag;
+}
+static inline bool ds_needs_allocation(struct doff_scnhdr_t *sptr)
+{
+ return sptr->ds_flags & DS_ALLOCATE_MASK;
+}
+
+static inline bool ds_needs_download(struct doff_scnhdr_t *sptr)
+{
+ return sptr->ds_flags & DS_DOWNLOAD_MASK;
+}
+
+static inline int ds_alignment(u16 ds_flags)
+{
+ return 1 << ((ds_flags >> DS_ALIGNMENT_SHIFT) & DS_SECTION_TYPE_MASK);
+}
+
+
+#endif /* _DOFF_H */
diff --git a/drivers/staging/tidspbridge/dynload/getsection.c b/drivers/staging/tidspbridge/dynload/getsection.c
new file mode 100644
index 00000000000..e0b37714dd6
--- /dev/null
+++ b/drivers/staging/tidspbridge/dynload/getsection.c
@@ -0,0 +1,407 @@
+/*
+ * getsection.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <dspbridge/getsection.h>
+#include "header.h"
+
+/*
+ * Error strings
+ */
+static const char readstrm[] = { "Error reading %s from input stream" };
+static const char seek[] = { "Set file position to %d failed" };
+static const char isiz[] = { "Bad image packet size %d" };
+static const char err_checksum[] = { "Checksum failed on %s" };
+
+static const char err_reloc[] = { "dload_get_section unable to read"
+ "sections containing relocation entries"
+};
+
+#if BITS_PER_AU > BITS_PER_BYTE
+static const char err_alloc[] = { "Syms->dload_allocate( %d ) failed" };
+static const char stbl[] = { "Bad string table offset " FMT_UI32 };
+#endif
+
+/************************************************************** */
+/********************* SUPPORT FUNCTIONS ********************** */
+/************************************************************** */
+
+#if BITS_PER_AU > BITS_PER_BYTE
+/**************************************************************************
+ * Procedure unpack_sec_name
+ *
+ * Parameters:
+ * dlthis Handle from dload_module_open for this module
+ * soffset Byte offset into the string table
+ * dst Place to store the expanded string
+ *
+ * Effect:
+ * Stores a string from the string table into the destination, expanding
+ * it in the process. Returns a pointer just past the end of the stored
+ * string on success, or NULL on failure.
+ *
+ ************************************************************************ */
+static char *unpack_sec_name(struct dload_state *dlthis, u32 soffset, char *dst)
+{
+ u8 tmp, *src;
+
+ if (soffset >= dlthis->dfile_hdr.df_scn_name_size) {
+ dload_error(dlthis, stbl, soffset);
+ return NULL;
+ }
+ src = (u8 *) dlthis->str_head +
+ (soffset >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE));
+ if (soffset & 1)
+ *dst++ = *src++; /* only 1 character in first word */
+ do {
+ tmp = *src++;
+ *dst = (tmp >> BITS_PER_BYTE)
+ if (!(*dst++))
+ break;
+ } while ((*dst++ = tmp & BYTE_MASK));
+
+ return dst;
+}
+
+/**************************************************************************
+ * Procedure expand_sec_names
+ *
+ * Parameters:
+ * dlthis Handle from dload_module_open for this module
+ *
+ * Effect:
+ * Allocates a buffer, unpacks and copies strings from string table into it.
+ * Stores a pointer to the buffer into a state variable.
+ ************************************************************************* */
+static void expand_sec_names(struct dload_state *dlthis)
+{
+ char *xstrings, *curr, *next;
+ u32 xsize;
+ u16 sec;
+ struct ldr_section_info *shp;
+ /* assume worst-case size requirement */
+ xsize = dlthis->dfile_hdr.df_max_str_len * dlthis->dfile_hdr.df_no_scns;
+ xstrings = (char *)dlthis->mysym->dload_allocate(dlthis->mysym, xsize);
+ if (xstrings == NULL) {
+ dload_error(dlthis, err_alloc, xsize);
+ return;
+ }
+ dlthis->xstrings = xstrings;
+ /* For each sec, copy and expand its name */
+ curr = xstrings;
+ for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) {
+ shp = (struct ldr_section_info *)&dlthis->sect_hdrs[sec];
+ next = unpack_sec_name(dlthis, *(u32 *) &shp->name, curr);
+ if (next == NULL)
+ break; /* error */
+ shp->name = curr;
+ curr = next;
+ }
+}
+
+#endif
+
+/************************************************************** */
+/********************* EXPORTED FUNCTIONS ********************* */
+/************************************************************** */
+
+/**************************************************************************
+ * Procedure dload_module_open
+ *
+ * Parameters:
+ * module The input stream that supplies the module image
+ * syms Host-side malloc/free and error reporting functions.
+ * Other methods are unused.
+ *
+ * Effect:
+ * Reads header information from a dynamic loader module using the
+ specified
+ * stream object, and returns a handle for the module information. This
+ * handle may be used in subsequent query calls to obtain information
+ * contained in the module.
+ *
+ * Returns:
+ * NULL if an error is encountered, otherwise a module handle for use
+ * in subsequent operations.
+ ************************************************************************* */
+void *dload_module_open(struct dynamic_loader_stream *module,
+ struct dynamic_loader_sym *syms)
+{
+ struct dload_state *dlthis; /* internal state for this call */
+ unsigned *dp, sz;
+ u32 sec_start;
+#if BITS_PER_AU <= BITS_PER_BYTE
+ u16 sec;
+#endif
+
+ /* Check that mandatory arguments are present */
+ if (!module || !syms) {
+ if (syms != NULL)
+ dload_syms_error(syms, "Required parameter is NULL");
+
+ return NULL;
+ }
+
+ dlthis = (struct dload_state *)
+ syms->dload_allocate(syms, sizeof(struct dload_state));
+ if (!dlthis) {
+ /* not enough storage */
+ dload_syms_error(syms, "Can't allocate module info");
+ return NULL;
+ }
+
+ /* clear our internal state */
+ dp = (unsigned *)dlthis;
+ for (sz = sizeof(struct dload_state) / sizeof(unsigned);
+ sz > 0; sz -= 1)
+ *dp++ = 0;
+
+ dlthis->strm = module;
+ dlthis->mysym = syms;
+
+ /* read in the doff image and store in our state variable */
+ dload_headers(dlthis);
+
+ if (!dlthis->dload_errcount)
+ dload_strings(dlthis, true);
+
+ /* skip ahead past the unread portion of the string table */
+ sec_start = sizeof(struct doff_filehdr_t) +
+ sizeof(struct doff_verify_rec_t) +
+ BYTE_TO_HOST(DOFF_ALIGN(dlthis->dfile_hdr.df_strtab_size));
+
+ if (dlthis->strm->set_file_posn(dlthis->strm, sec_start) != 0) {
+ dload_error(dlthis, seek, sec_start);
+ return NULL;
+ }
+
+ if (!dlthis->dload_errcount)
+ dload_sections(dlthis);
+
+ if (dlthis->dload_errcount) {
+ dload_module_close(dlthis); /* errors, blow off our state */
+ dlthis = NULL;
+ return NULL;
+ }
+#if BITS_PER_AU > BITS_PER_BYTE
+ /* Expand all section names from the string table into the */
+ /* state variable, and convert section names from a relative */
+ /* string table offset to a pointers to the expanded string. */
+ expand_sec_names(dlthis);
+#else
+ /* Convert section names from a relative string table offset */
+ /* to a pointer into the string table. */
+ for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) {
+ struct ldr_section_info *shp =
+ (struct ldr_section_info *)&dlthis->sect_hdrs[sec];
+ shp->name = dlthis->str_head + *(u32 *) &shp->name;
+ }
+#endif
+
+ return dlthis;
+}
+
+/***************************************************************************
+ * Procedure dload_get_section_info
+ *
+ * Parameters:
+ * minfo Handle from dload_module_open for this module
+ * section_name Pointer to the string name of the section desired
+ * section_info Address of a section info structure pointer to be
+ * initialized
+ *
+ * Effect:
+ * Finds the specified section in the module information, and initializes
+ * the provided struct ldr_section_info pointer.
+ *
+ * Returns:
+ * true for success, false for section not found
+ ************************************************************************* */
+int dload_get_section_info(void *minfo, const char *section_name,
+ const struct ldr_section_info **const section_info)
+{
+ struct dload_state *dlthis;
+ struct ldr_section_info *shp;
+ u16 sec;
+
+ dlthis = (struct dload_state *)minfo;
+ if (!dlthis)
+ return false;
+
+ for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) {
+ shp = (struct ldr_section_info *)&dlthis->sect_hdrs[sec];
+ if (strcmp(section_name, shp->name) == 0) {
+ *section_info = shp;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+#define IPH_SIZE (sizeof(struct image_packet_t) - sizeof(u32))
+
+/**************************************************************************
+ * Procedure dload_get_section
+ *
+ * Parameters:
+ * minfo Handle from dload_module_open for this module
+ * section_info Pointer to a section info structure for the desired
+ * section
+ * section_data Buffer to contain the section initialized data
+ *
+ * Effect:
+ * Copies the initialized data for the specified section into the
+ * supplied buffer.
+ *
+ * Returns:
+ * true for success, false for section not found
+ ************************************************************************* */
+int dload_get_section(void *minfo,
+ const struct ldr_section_info *section_info,
+ void *section_data)
+{
+ struct dload_state *dlthis;
+ u32 pos;
+ struct doff_scnhdr_t *sptr = NULL;
+ s32 nip;
+ struct image_packet_t ipacket;
+ s32 ipsize;
+ u32 checks;
+ s8 *dest = (s8 *) section_data;
+
+ dlthis = (struct dload_state *)minfo;
+ if (!dlthis)
+ return false;
+ sptr = (struct doff_scnhdr_t *)section_info;
+ if (sptr == NULL)
+ return false;
+
+ /* skip ahead to the start of the first packet */
+ pos = BYTE_TO_HOST(DOFF_ALIGN((u32) sptr->ds_first_pkt_offset));
+ if (dlthis->strm->set_file_posn(dlthis->strm, pos) != 0) {
+ dload_error(dlthis, seek, pos);
+ return false;
+ }
+
+ nip = sptr->ds_nipacks;
+ while ((nip -= 1) >= 0) { /* for each packet */
+ /* get the fixed header bits */
+ if (dlthis->strm->read_buffer(dlthis->strm, &ipacket,
+ IPH_SIZE) != IPH_SIZE) {
+ dload_error(dlthis, readstrm, "image packet");
+ return false;
+ }
+ /* reorder the header if need be */
+ if (dlthis->reorder_map)
+ dload_reorder(&ipacket, IPH_SIZE, dlthis->reorder_map);
+
+ /* Now read the packet image bits. Note: round the size up to
+ * the next multiple of 4 bytes; this is what checksum
+ * routines want. */
+ ipsize = BYTE_TO_HOST(DOFF_ALIGN(ipacket.packet_size));
+ if (ipsize > BYTE_TO_HOST(IMAGE_PACKET_SIZE)) {
+ dload_error(dlthis, isiz, ipsize);
+ return false;
+ }
+ if (dlthis->strm->read_buffer
+ (dlthis->strm, dest, ipsize) != ipsize) {
+ dload_error(dlthis, readstrm, "image packet");
+ return false;
+ }
+ /* reorder the bytes if need be */
+#if !defined(_BIG_ENDIAN) || (TARGET_AU_BITS > 16)
+ if (dlthis->reorder_map)
+ dload_reorder(dest, ipsize, dlthis->reorder_map);
+
+ checks = dload_checksum(dest, ipsize);
+#else
+ if (dlthis->dfile_hdr.df_byte_reshuffle !=
+ TARGET_ORDER(REORDER_MAP(BYTE_RESHUFFLE_VALUE))) {
+ /* put image bytes in big-endian order, not PC order */
+ dload_reorder(dest, ipsize,
+ TARGET_ORDER(dlthis->
+ dfile_hdr.df_byte_reshuffle));
+ }
+#if TARGET_AU_BITS > 8
+ checks = dload_reverse_checksum16(dest, ipsize);
+#else
+ checks = dload_reverse_checksum(dest, ipsize);
+#endif
+#endif
+ checks += dload_checksum(&ipacket, IPH_SIZE);
+
+ /* NYI: unable to handle relocation entries here. Reloc
+ * entries referring to fields that span the packet boundaries
+ * may result in packets of sizes that are not multiple of
+ * 4 bytes. Our checksum implementation works on 32-bit words
+ * only. */
+ if (ipacket.num_relocs != 0) {
+ dload_error(dlthis, err_reloc, ipsize);
+ return false;
+ }
+
+ if (~checks) {
+ dload_error(dlthis, err_checksum, "image packet");
+ return false;
+ }
+
+ /*Advance destination ptr by the size of the just-read packet */
+ dest += ipsize;
+ }
+
+ return true;
+}
+
+/***************************************************************************
+ * Procedure dload_module_close
+ *
+ * Parameters:
+ * minfo Handle from dload_module_open for this module
+ *
+ * Effect:
+ * Releases any storage associated with the module handle. On return,
+ * the module handle is invalid.
+ *
+ * Returns:
+ * Zero for success. On error, the number of errors detected is returned.
+ * Individual errors are reported using syms->error_report(), where syms was
+ * an argument to dload_module_open
+ ************************************************************************* */
+void dload_module_close(void *minfo)
+{
+ struct dload_state *dlthis;
+
+ dlthis = (struct dload_state *)minfo;
+ if (!dlthis)
+ return;
+
+ if (dlthis->str_head)
+ dlthis->mysym->dload_deallocate(dlthis->mysym,
+ dlthis->str_head);
+
+ if (dlthis->sect_hdrs)
+ dlthis->mysym->dload_deallocate(dlthis->mysym,
+ dlthis->sect_hdrs);
+
+#if BITS_PER_AU > BITS_PER_BYTE
+ if (dlthis->xstrings)
+ dlthis->mysym->dload_deallocate(dlthis->mysym,
+ dlthis->xstrings);
+
+#endif
+
+ dlthis->mysym->dload_deallocate(dlthis->mysym, dlthis);
+}
diff --git a/drivers/staging/tidspbridge/dynload/header.h b/drivers/staging/tidspbridge/dynload/header.h
new file mode 100644
index 00000000000..5b50a15a343
--- /dev/null
+++ b/drivers/staging/tidspbridge/dynload/header.h
@@ -0,0 +1,49 @@
+/*
+ * header.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/string.h>
+#define DL_STRCMP strcmp
+
+/* maximum parenthesis nesting in relocation stack expressions */
+#define STATIC_EXPR_STK_SIZE 10
+
+#include <linux/types.h>
+
+#include "doff.h"
+#include <dspbridge/dynamic_loader.h>
+#include "params.h"
+#include "dload_internal.h"
+#include "reloc_table.h"
+
+/*
+ * Plausibility limits
+ *
+ * These limits are imposed upon the input DOFF file as a check for validity.
+ * They are hard limits, in that the load will fail if they are exceeded.
+ * The numbers selected are arbitrary, in that the loader implementation does
+ * not require these limits.
+ */
+
+/* maximum number of bytes in string table */
+#define MAX_REASONABLE_STRINGTAB (0x100000)
+/* maximum number of code,data,etc. sections */
+#define MAX_REASONABLE_SECTIONS (200)
+/* maximum number of linker symbols */
+#define MAX_REASONABLE_SYMBOLS (100000)
+
+/* shift count to align F_BIG with DLOAD_LITTLE */
+#define ALIGN_COFF_ENDIANNESS 7
+#define ENDIANNESS_MASK (DF_BYTE_ORDER >> ALIGN_COFF_ENDIANNESS)
diff --git a/drivers/staging/tidspbridge/dynload/module_list.h b/drivers/staging/tidspbridge/dynload/module_list.h
new file mode 100644
index 00000000000..a216bb131a4
--- /dev/null
+++ b/drivers/staging/tidspbridge/dynload/module_list.h
@@ -0,0 +1,159 @@
+/*
+ * dspbridge/mpu_driver/src/dynload/module_list.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * This C header file gives the layout of the data structure created by the
+ * dynamic loader to describe the set of modules loaded into the DSP.
+ *
+ * Linked List Structure:
+ * ----------------------
+ * The data structure defined here is a singly-linked list. The list
+ * represents the set of modules which are currently loaded in the DSP memory.
+ * The first entry in the list is a header record which contains a flag
+ * representing the state of the list. The rest of the entries in the list
+ * are module records.
+ *
+ * Global symbol _DLModules designates the first record in the list (i.e. the
+ * header record). This symbol must be defined in any program that wishes to
+ * use DLLview plug-in.
+ *
+ * String Representation:
+ * ----------------------
+ * The string names of the module and its sections are stored in a block of
+ * memory which follows the module record itself. The strings are ordered:
+ * module name first, followed by section names in order from the first
+ * section to the last. String names are tightly packed arrays of 8-bit
+ * characters (two characters per 16-bit word on the C55x). Strings are
+ * zero-byte-terminated.
+ *
+ * Creating and updating the list:
+ * -------------------------------
+ * Upon loading a new module into the DSP memory the dynamic loader inserts a
+ * new module record as the first module record in the list. The fields of
+ * this module record are initialized to reflect the properties of the module.
+ * The dynamic loader does NOT increment the flag/counter in the list's header
+ * record.
+ *
+ * Upon unloading a module from the DSP memory the dynamic loader removes the
+ * module's record from this list. The dynamic loader also increments the
+ * flag/counter in the list's header record to indicate that the list has been
+ * changed.
+ */
+
+#ifndef _MODULE_LIST_H_
+#define _MODULE_LIST_H_
+
+#include <linux/types.h>
+
+/* Global pointer to the modules_header structure */
+#define MODULES_HEADER "_DLModules"
+#define MODULES_HEADER_NO_UNDERSCORE "DLModules"
+
+/* Initial version number */
+#define INIT_VERSION 1
+
+/* Verification number -- to be recorded in each module record */
+#define VERIFICATION 0x79
+
+/* forward declarations */
+struct dll_module;
+struct dll_sect;
+
+/* the first entry in the list is the modules_header record;
+ * its address is contained in the global _DLModules pointer */
+struct modules_header {
+
+ /*
+ * Address of the first dll_module record in the list or NULL.
+ * Note: for C55x this is a word address (C55x data is
+ * word-addressable)
+ */
+ u32 first_module;
+
+ /* Combined storage size (in target addressable units) of the
+ * dll_module record which follows this header record, or zero
+ * if the list is empty. This size includes the module's string table.
+ * Note: for C55x the unit is a 16-bit word */
+ u16 first_module_size;
+
+ /* Counter is incremented whenever a module record is removed from
+ * the list */
+ u16 update_flag;
+
+};
+
+/* for each 32-bits in above structure, a bitmap, LSB first, whose bits are:
+ * 0 => a 32-bit value, 1 => 2 16-bit values */
+/* swapping bitmap for type modules_header */
+#define MODULES_HEADER_BITMAP 0x2
+
+/* information recorded about each section in a module */
+struct dll_sect {
+
+ /* Load-time address of the section.
+ * Note: for C55x this is a byte address for program sections, and
+ * a word address for data sections. C55x program memory is
+ * byte-addressable, while data memory is word-addressable. */
+ u32 sect_load_adr;
+
+ /* Run-time address of the section.
+ * Note 1: for C55x this is a byte address for program sections, and
+ * a word address for data sections.
+ * Note 2: for C55x two most significant bits of this field indicate
+ * the section type: '00' for a code section, '11' for a data section
+ * (C55 addresses are really only 24-bits wide). */
+ u32 sect_run_adr;
+
+};
+
+/* the rest of the entries in the list are module records */
+struct dll_module {
+
+ /* Address of the next dll_module record in the list, or 0 if this is
+ * the last record in the list.
+ * Note: for C55x this is a word address (C55x data is
+ * word-addressable) */
+ u32 next_module;
+
+ /* Combined storage size (in target addressable units) of the
+ * dll_module record which follows this one, or zero if this is the
+ * last record in the list. This size includes the module's string
+ * table.
+ * Note: for C55x the unit is a 16-bit word. */
+ u16 next_module_size;
+
+ /* version number of the tooling; set to INIT_VERSION for Phase 1 */
+ u16 version;
+
+ /* the verification word; set to VERIFICATION */
+ u16 verification;
+
+ /* Number of sections in the sects array */
+ u16 num_sects;
+
+ /* Module's "unique" id; copy of the timestamp from the host
+ * COFF file */
+ u32 timestamp;
+
+ /* Array of num_sects elements of the module's section records */
+ struct dll_sect sects[1];
+};
+
+/* for each 32 bits in above structure, a bitmap, LSB first, whose bits are:
+ * 0 => a 32-bit value, 1 => 2 16-bit values */
+#define DLL_MODULE_BITMAP 0x6 /* swapping bitmap for type dll_module */
+
+#endif /* _MODULE_LIST_H_ */
diff --git a/drivers/staging/tidspbridge/dynload/params.h b/drivers/staging/tidspbridge/dynload/params.h
new file mode 100644
index 00000000000..d797fcd3b66
--- /dev/null
+++ b/drivers/staging/tidspbridge/dynload/params.h
@@ -0,0 +1,226 @@
+/*
+ * params.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * This file defines host and target properties for all machines
+ * supported by the dynamic loader. To be tedious...
+ *
+ * host: the machine on which the dynamic loader runs
+ * target: the machine that the dynamic loader is loading
+ *
+ * Host and target may or may not be the same, depending upon the particular
+ * use.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/******************************************************************************
+ *
+ * Host Properties
+ *
+ **************************************************************************** */
+
+#define BITS_PER_BYTE 8 /* bits in the standard PC/SUN byte */
+#define LOG_BITS_PER_BYTE 3 /* log base 2 of same */
+#define BYTE_MASK ((1U<<BITS_PER_BYTE)-1)
+
+#if defined(__TMS320C55X__) || defined(_TMS320C5XX)
+#define BITS_PER_AU 16
+#define LOG_BITS_PER_AU 4
+ /* use this print string in error messages for uint32_t */
+#define FMT_UI32 "0x%lx"
+#define FMT8_UI32 "%08lx" /* same but no 0x, fixed width field */
+#else
+/* bits in the smallest addressable data storage unit */
+#define BITS_PER_AU 8
+/* log base 2 of the same; useful for shift counts */
+#define LOG_BITS_PER_AU 3
+#define FMT_UI32 "0x%x"
+#define FMT8_UI32 "%08x"
+#endif
+
+/* generic fastest method for swapping bytes and shorts */
+#define SWAP32BY16(zz) (((zz) << 16) | ((zz) >> 16))
+#define SWAP16BY8(zz) (((zz) << 8) | ((zz) >> 8))
+
+/* !! don't be tempted to insert type definitions here; use <stdint.h> !! */
+
+/******************************************************************************
+ *
+ * Target Properties
+ *
+ **************************************************************************** */
+
+/*-------------------------------------------------------------------------- */
+/* TMS320C6x Target Specific Parameters (byte-addressable) */
+/*-------------------------------------------------------------------------- */
+#if TMS32060
+#define MEMORG 0x0L /* Size of configured memory */
+#define MEMSIZE 0x0L /* (full address space) */
+
+#define CINIT_ALIGN 8 /* alignment of cinit record in TDATA AUs */
+#define CINIT_COUNT 4 /* width of count field in TDATA AUs */
+#define CINIT_ADDRESS 4 /* width of address field in TDATA AUs */
+#define CINIT_PAGE_BITS 0 /* Number of LSBs of address that
+ * are page number */
+
+#define LENIENT_SIGNED_RELEXPS 0 /* DOES SIGNED ALLOW MAX UNSIGNED */
+
+#undef TARGET_ENDIANNESS /* may be big or little endian */
+
+/* align a target address to a word boundary */
+#define TARGET_WORD_ALIGN(zz) (((zz) + 0x3) & -0x4)
+#endif
+
+/*--------------------------------------------------------------------------
+ *
+ * DEFAULT SETTINGS and DERIVED PROPERTIES
+ *
+ * This section establishes defaults for values not specified above
+ *-------------------------------------------------------------------------- */
+#ifndef TARGET_AU_BITS
+#define TARGET_AU_BITS 8 /* width of the target addressable unit */
+#define LOG_TARGET_AU_BITS 3 /* log2 of same */
+#endif
+
+#ifndef CINIT_DEFAULT_PAGE
+#define CINIT_DEFAULT_PAGE 0 /* default .cinit page number */
+#endif
+
+#ifndef DATA_RUN2LOAD
+#define DATA_RUN2LOAD(zz) (zz) /* translate data run address to load address */
+#endif
+
+#ifndef DBG_LIST_PAGE
+#define DBG_LIST_PAGE 0 /* page number for .dllview section */
+#endif
+
+#ifndef TARGET_WORD_ALIGN
+/* align a target address to a word boundary */
+#define TARGET_WORD_ALIGN(zz) (zz)
+#endif
+
+#ifndef TDATA_TO_TADDR
+#define TDATA_TO_TADDR(zz) (zz) /* target data address to target AU address */
+#define TADDR_TO_TDATA(zz) (zz) /* target AU address to target data address */
+#define TDATA_AU_BITS TARGET_AU_BITS /* bits per data AU */
+#define LOG_TDATA_AU_BITS LOG_TARGET_AU_BITS
+#endif
+
+/*
+ *
+ * Useful properties and conversions derived from the above
+ *
+ */
+
+/*
+ * Conversions between host and target addresses
+ */
+#if LOG_BITS_PER_AU == LOG_TARGET_AU_BITS
+/* translate target addressable unit to host address */
+#define TADDR_TO_HOST(x) (x)
+/* translate host address to target addressable unit */
+#define HOST_TO_TADDR(x) (x)
+#elif LOG_BITS_PER_AU > LOG_TARGET_AU_BITS
+#define TADDR_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU-LOG_TARGET_AU_BITS))
+#define HOST_TO_TADDR(x) ((x) << (LOG_BITS_PER_AU-LOG_TARGET_AU_BITS))
+#else
+#define TADDR_TO_HOST(x) ((x) << (LOG_TARGET_AU_BITS-LOG_BITS_PER_AU))
+#define HOST_TO_TADDR(x) ((x) >> (LOG_TARGET_AU_BITS-LOG_BITS_PER_AU))
+#endif
+
+#if LOG_BITS_PER_AU == LOG_TDATA_AU_BITS
+/* translate target addressable unit to host address */
+#define TDATA_TO_HOST(x) (x)
+/* translate host address to target addressable unit */
+#define HOST_TO_TDATA(x) (x)
+/* translate host address to target addressable unit, round up */
+#define HOST_TO_TDATA_ROUND(x) (x)
+/* byte offset to host offset, rounded up for TDATA size */
+#define BYTE_TO_HOST_TDATA_ROUND(x) BYTE_TO_HOST_ROUND(x)
+#elif LOG_BITS_PER_AU > LOG_TDATA_AU_BITS
+#define TDATA_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS))
+#define HOST_TO_TDATA(x) ((x) << (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS))
+#define HOST_TO_TDATA_ROUND(x) ((x) << (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS))
+#define BYTE_TO_HOST_TDATA_ROUND(x) BYTE_TO_HOST_ROUND(x)
+#else
+#define TDATA_TO_HOST(x) ((x) << (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU))
+#define HOST_TO_TDATA(x) ((x) >> (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU))
+#define HOST_TO_TDATA_ROUND(x) (((x) +\
+ (1<<(LOG_TDATA_AU_BITS-LOG_BITS_PER_AU))-1) >>\
+ (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU))
+#define BYTE_TO_HOST_TDATA_ROUND(x) (BYTE_TO_HOST((x) +\
+ (1<<(LOG_TDATA_AU_BITS-LOG_BITS_PER_BYTE))-1) &\
+ -(TDATA_AU_BITS/BITS_PER_AU))
+#endif
+
+/*
+ * Input in DOFF format is always expresed in bytes, regardless of loading host
+ * so we wind up converting from bytes to target and host units even when the
+ * host is not a byte machine.
+ */
+#if LOG_BITS_PER_AU == LOG_BITS_PER_BYTE
+#define BYTE_TO_HOST(x) (x)
+#define BYTE_TO_HOST_ROUND(x) (x)
+#define HOST_TO_BYTE(x) (x)
+#elif LOG_BITS_PER_AU >= LOG_BITS_PER_BYTE
+#define BYTE_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE))
+#define BYTE_TO_HOST_ROUND(x) ((x + (BITS_PER_AU/BITS_PER_BYTE-1)) >>\
+ (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE))
+#define HOST_TO_BYTE(x) ((x) << (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE))
+#else
+/* lets not try to deal with sub-8-bit byte machines */
+#endif
+
+#if LOG_TARGET_AU_BITS == LOG_BITS_PER_BYTE
+/* translate target addressable unit to byte address */
+#define TADDR_TO_BYTE(x) (x)
+/* translate byte address to target addressable unit */
+#define BYTE_TO_TADDR(x) (x)
+#elif LOG_TARGET_AU_BITS > LOG_BITS_PER_BYTE
+#define TADDR_TO_BYTE(x) ((x) << (LOG_TARGET_AU_BITS-LOG_BITS_PER_BYTE))
+#define BYTE_TO_TADDR(x) ((x) >> (LOG_TARGET_AU_BITS-LOG_BITS_PER_BYTE))
+#else
+/* lets not try to deal with sub-8-bit byte machines */
+#endif
+
+#ifdef _BIG_ENDIAN
+#define HOST_ENDIANNESS 1
+#else
+#define HOST_ENDIANNESS 0
+#endif
+
+#ifdef TARGET_ENDIANNESS
+#define TARGET_ENDIANNESS_DIFFERS(rtend) (HOST_ENDIANNESS^TARGET_ENDIANNESS)
+#elif HOST_ENDIANNESS
+#define TARGET_ENDIANNESS_DIFFERS(rtend) (!(rtend))
+#else
+#define TARGET_ENDIANNESS_DIFFERS(rtend) (rtend)
+#endif
+
+/* the unit in which we process target image data */
+#if TARGET_AU_BITS <= 8
+typedef u8 tgt_au_t;
+#elif TARGET_AU_BITS <= 16
+typedef u16 tgt_au_t;
+#else
+typedef u32 tgt_au_t;
+#endif
+
+/* size of that unit */
+#if TARGET_AU_BITS < BITS_PER_AU
+#define TGTAU_BITS BITS_PER_AU
+#define LOG_TGTAU_BITS LOG_BITS_PER_AU
+#else
+#define TGTAU_BITS TARGET_AU_BITS
+#define LOG_TGTAU_BITS LOG_TARGET_AU_BITS
+#endif
diff --git a/drivers/staging/tidspbridge/dynload/reloc.c b/drivers/staging/tidspbridge/dynload/reloc.c
new file mode 100644
index 00000000000..7b28c07ed7c
--- /dev/null
+++ b/drivers/staging/tidspbridge/dynload/reloc.c
@@ -0,0 +1,484 @@
+/*
+ * reloc.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include "header.h"
+
+#if TMS32060
+/* the magic symbol for the start of BSS */
+static const char bsssymbol[] = { ".bss" };
+#endif
+
+#if TMS32060
+#include "reloc_table_c6000.c"
+#endif
+
+#if TMS32060
+/* From coff.h - ignore these relocation operations */
+#define R_C60ALIGN 0x76 /* C60: Alignment info for compressor */
+#define R_C60FPHEAD 0x77 /* C60: Explicit assembly directive */
+#define R_C60NOCMP 0x100 /* C60: Don't compress this code scn */
+#endif
+
+/**************************************************************************
+ * Procedure dload_unpack
+ *
+ * Parameters:
+ * data pointer to storage unit containing lowest host address of
+ * image data
+ * fieldsz Size of bit field, 0 < fieldsz <= sizeof(rvalue)*BITS_PER_AU
+ * offset Offset from LSB, 0 <= offset < BITS_PER_AU
+ * sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY)
+ *
+ * Effect:
+ * Extracts the specified field and returns it.
+ ************************************************************************* */
+rvalue dload_unpack(struct dload_state *dlthis, tgt_au_t * data, int fieldsz,
+ int offset, unsigned sgn)
+{
+ register rvalue objval;
+ register int shift, direction;
+ register tgt_au_t *dp = data;
+
+ fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value */
+ /* * collect up enough bits to contain the desired field */
+ if (TARGET_BIG_ENDIAN) {
+ dp += (fieldsz + offset) >> LOG_TGTAU_BITS;
+ direction = -1;
+ } else
+ direction = 1;
+ objval = *dp >> offset;
+ shift = TGTAU_BITS - offset;
+ while (shift <= fieldsz) {
+ dp += direction;
+ objval += (rvalue) *dp << shift;
+ shift += TGTAU_BITS;
+ }
+
+ /* * sign or zero extend the value appropriately */
+ if (sgn == ROP_UNS)
+ objval &= (2 << fieldsz) - 1;
+ else {
+ shift = sizeof(rvalue) * BITS_PER_AU - 1 - fieldsz;
+ objval = (objval << shift) >> shift;
+ }
+
+ return objval;
+
+} /* dload_unpack */
+
+/**************************************************************************
+ * Procedure dload_repack
+ *
+ * Parameters:
+ * val Value to insert
+ * data Pointer to storage unit containing lowest host address of
+ * image data
+ * fieldsz Size of bit field, 0 < fieldsz <= sizeof(rvalue)*BITS_PER_AU
+ * offset Offset from LSB, 0 <= offset < BITS_PER_AU
+ * sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY)
+ *
+ * Effect:
+ * Stuffs the specified value in the specified field. Returns 0 for
+ * success
+ * or 1 if the value will not fit in the specified field according to the
+ * specified signedness rule.
+ ************************************************************************* */
+static const unsigned char ovf_limit[] = { 1, 2, 2 };
+
+int dload_repack(struct dload_state *dlthis, rvalue val, tgt_au_t * data,
+ int fieldsz, int offset, unsigned sgn)
+{
+ register urvalue objval, mask;
+ register int shift, direction;
+ register tgt_au_t *dp = data;
+
+ fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value */
+ /* clip the bits */
+ mask = (2UL << fieldsz) - 1;
+ objval = (val & mask);
+ /* * store the bits through the specified mask */
+ if (TARGET_BIG_ENDIAN) {
+ dp += (fieldsz + offset) >> LOG_TGTAU_BITS;
+ direction = -1;
+ } else
+ direction = 1;
+
+ /* insert LSBs */
+ *dp = (*dp & ~(mask << offset)) + (objval << offset);
+ shift = TGTAU_BITS - offset;
+ /* align mask and objval with AU boundary */
+ objval >>= shift;
+ mask >>= shift;
+
+ while (mask) {
+ dp += direction;
+ *dp = (*dp & ~mask) + objval;
+ objval >>= TGTAU_BITS;
+ mask >>= TGTAU_BITS;
+ }
+
+ /*
+ * check for overflow
+ */
+ if (sgn) {
+ unsigned tmp = (val >> fieldsz) + (sgn & 0x1);
+ if (tmp > ovf_limit[sgn - 1])
+ return 1;
+ }
+ return 0;
+
+} /* dload_repack */
+
+/* lookup table for the scaling amount in a C6x instruction */
+#if TMS32060
+#define SCALE_BITS 4 /* there are 4 bits in the scale field */
+#define SCALE_MASK 0x7 /* we really only use the bottom 3 bits */
+static const u8 c60_scale[SCALE_MASK + 1] = {
+ 1, 0, 0, 0, 1, 1, 2, 2
+};
+#endif
+
+/**************************************************************************
+ * Procedure dload_relocate
+ *
+ * Parameters:
+ * data Pointer to base of image data
+ * rp Pointer to relocation operation
+ *
+ * Effect:
+ * Performs the specified relocation operation
+ ************************************************************************* */
+void dload_relocate(struct dload_state *dlthis, tgt_au_t * data,
+ struct reloc_record_t *rp, bool *tramps_generated,
+ bool second_pass)
+{
+ rvalue val, reloc_amt, orig_val = 0;
+ unsigned int fieldsz = 0;
+ unsigned int offset = 0;
+ unsigned int reloc_info = 0;
+ unsigned int reloc_action = 0;
+ register int rx = 0;
+ rvalue *stackp = NULL;
+ int top;
+ struct local_symbol *svp = NULL;
+#ifdef RFV_SCALE
+ unsigned int scale = 0;
+#endif
+ struct image_packet_t *img_pkt = NULL;
+
+ /* The image packet data struct is only used during first pass
+ * relocation in the event that a trampoline is needed. 2nd pass
+ * relocation doesn't guarantee that data is coming from an
+ * image_packet_t structure. See cload.c, dload_data for how img_data is
+ * set. If that changes this needs to be updated!!! */
+ if (second_pass == false)
+ img_pkt = (struct image_packet_t *)((u8 *) data -
+ sizeof(struct
+ image_packet_t));
+
+ rx = HASH_FUNC(rp->TYPE);
+ while (rop_map1[rx] != rp->TYPE) {
+ rx = HASH_L(rop_map2[rx]);
+ if (rx < 0) {
+#if TMS32060
+ switch (rp->TYPE) {
+ case R_C60ALIGN:
+ case R_C60NOCMP:
+ case R_C60FPHEAD:
+ /* Ignore these reloc types and return */
+ break;
+ default:
+ /* Unknown reloc type, print error and return */
+ dload_error(dlthis, "Bad coff operator 0x%x",
+ rp->TYPE);
+ }
+#else
+ dload_error(dlthis, "Bad coff operator 0x%x", rp->TYPE);
+#endif
+ return;
+ }
+ }
+ rx = HASH_I(rop_map2[rx]);
+ if ((rx < (sizeof(rop_action) / sizeof(u16)))
+ && (rx < (sizeof(rop_info) / sizeof(u16))) && (rx > 0)) {
+ reloc_action = rop_action[rx];
+ reloc_info = rop_info[rx];
+ } else {
+ dload_error(dlthis, "Buffer Overflow - Array Index Out "
+ "of Bounds");
+ }
+
+ /* Compute the relocation amount for the referenced symbol, if any */
+ reloc_amt = rp->UVAL;
+ if (RFV_SYM(reloc_info)) { /* relocation uses a symbol reference */
+ /* If this is first pass, use the module local symbol table,
+ * else use the trampoline symbol table. */
+ if (second_pass == false) {
+ if ((u32) rp->SYMNDX < dlthis->dfile_hdr.df_no_syms) {
+ /* real symbol reference */
+ svp = &dlthis->local_symtab[rp->SYMNDX];
+ reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ?
+ svp->delta : svp->value;
+ }
+ /* reloc references current section */
+ else if (rp->SYMNDX == -1) {
+ reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ?
+ dlthis->delta_runaddr :
+ dlthis->image_secn->run_addr;
+ }
+ }
+ }
+ /* relocation uses a symbol reference */
+ /* Handle stack adjustment */
+ val = 0;
+ top = RFV_STK(reloc_info);
+ if (top) {
+ top += dlthis->relstkidx - RSTK_UOP;
+ if (top >= STATIC_EXPR_STK_SIZE) {
+ dload_error(dlthis,
+ "Expression stack overflow in %s at offset "
+ FMT_UI32, dlthis->image_secn->name,
+ rp->vaddr + dlthis->image_offset);
+ return;
+ }
+ val = dlthis->relstk[dlthis->relstkidx];
+ dlthis->relstkidx = top;
+ stackp = &dlthis->relstk[top];
+ }
+ /* Derive field position and size, if we need them */
+ if (reloc_info & ROP_RW) { /* read or write action in our future */
+ fieldsz = RFV_WIDTH(reloc_action);
+ if (fieldsz) { /* field info from table */
+ offset = RFV_POSN(reloc_action);
+ if (TARGET_BIG_ENDIAN)
+ /* make sure vaddr is the lowest target
+ * address containing bits */
+ rp->vaddr += RFV_BIGOFF(reloc_info);
+ } else { /* field info from relocation op */
+ fieldsz = rp->FIELDSZ;
+ offset = rp->OFFSET;
+ if (TARGET_BIG_ENDIAN)
+ /* make sure vaddr is the lowest target
+ address containing bits */
+ rp->vaddr += (rp->WORDSZ - offset - fieldsz)
+ >> LOG_TARGET_AU_BITS;
+ }
+ data = (tgt_au_t *) ((char *)data + TADDR_TO_HOST(rp->vaddr));
+ /* compute lowest host location of referenced data */
+#if BITS_PER_AU > TARGET_AU_BITS
+ /* conversion from target address to host address may lose
+ address bits; add loss to offset */
+ if (TARGET_BIG_ENDIAN) {
+ offset += -((rp->vaddr << LOG_TARGET_AU_BITS) +
+ offset + fieldsz) &
+ (BITS_PER_AU - TARGET_AU_BITS);
+ } else {
+ offset += (rp->vaddr << LOG_TARGET_AU_BITS) &
+ (BITS_PER_AU - 1);
+ }
+#endif
+#ifdef RFV_SCALE
+ scale = RFV_SCALE(reloc_info);
+#endif
+ }
+ /* read the object value from the current image, if so ordered */
+ if (reloc_info & ROP_R) {
+ /* relocation reads current image value */
+ val = dload_unpack(dlthis, data, fieldsz, offset,
+ RFV_SIGN(reloc_info));
+ /* Save off the original value in case the relo overflows and
+ * we can trampoline it. */
+ orig_val = val;
+
+#ifdef RFV_SCALE
+ val <<= scale;
+#endif
+ }
+ /* perform the necessary arithmetic */
+ switch (RFV_ACTION(reloc_action)) { /* relocation actions */
+ case RACT_VAL:
+ break;
+ case RACT_ASGN:
+ val = reloc_amt;
+ break;
+ case RACT_ADD:
+ val += reloc_amt;
+ break;
+ case RACT_PCR:
+ /*-----------------------------------------------------------
+ * Handle special cases of jumping from absolute sections
+ * (special reloc type) or to absolute destination
+ * (symndx == -1). In either case, set the appropriate
+ * relocation amount to 0.
+ *----------------------------------------------------------- */
+ if (rp->SYMNDX == -1)
+ reloc_amt = 0;
+ val += reloc_amt - dlthis->delta_runaddr;
+ break;
+ case RACT_ADDISP:
+ val += rp->R_DISP + reloc_amt;
+ break;
+ case RACT_ASGPC:
+ val = dlthis->image_secn->run_addr + reloc_amt;
+ break;
+ case RACT_PLUS:
+ if (stackp != NULL)
+ val += *stackp;
+ break;
+ case RACT_SUB:
+ if (stackp != NULL)
+ val = *stackp - val;
+ break;
+ case RACT_NEG:
+ val = -val;
+ break;
+ case RACT_MPY:
+ if (stackp != NULL)
+ val *= *stackp;
+ break;
+ case RACT_DIV:
+ if (stackp != NULL)
+ val = *stackp / val;
+ break;
+ case RACT_MOD:
+ if (stackp != NULL)
+ val = *stackp % val;
+ break;
+ case RACT_SR:
+ if (val >= sizeof(rvalue) * BITS_PER_AU)
+ val = 0;
+ else if (stackp != NULL)
+ val = (urvalue) *stackp >> val;
+ break;
+ case RACT_ASR:
+ if (val >= sizeof(rvalue) * BITS_PER_AU)
+ val = sizeof(rvalue) * BITS_PER_AU - 1;
+ else if (stackp != NULL)
+ val = *stackp >> val;
+ break;
+ case RACT_SL:
+ if (val >= sizeof(rvalue) * BITS_PER_AU)
+ val = 0;
+ else if (stackp != NULL)
+ val = *stackp << val;
+ break;
+ case RACT_AND:
+ if (stackp != NULL)
+ val &= *stackp;
+ break;
+ case RACT_OR:
+ if (stackp != NULL)
+ val |= *stackp;
+ break;
+ case RACT_XOR:
+ if (stackp != NULL)
+ val ^= *stackp;
+ break;
+ case RACT_NOT:
+ val = ~val;
+ break;
+#if TMS32060
+ case RACT_C6SECT:
+ /* actually needed address of secn containing symbol */
+ if (svp != NULL) {
+ if (rp->SYMNDX >= 0)
+ if (svp->secnn > 0)
+ reloc_amt = dlthis->ldr_sections
+ [svp->secnn - 1].run_addr;
+ }
+ /* !!! FALL THRU !!! */
+ case RACT_C6BASE:
+ if (dlthis->bss_run_base == 0) {
+ struct dynload_symbol *symp;
+ symp = dlthis->mysym->find_matching_symbol
+ (dlthis->mysym, bsssymbol);
+ /* lookup value of global BSS base */
+ if (symp)
+ dlthis->bss_run_base = symp->value;
+ else
+ dload_error(dlthis,
+ "Global BSS base referenced in %s "
+ "offset" FMT_UI32 " but not "
+ "defined",
+ dlthis->image_secn->name,
+ rp->vaddr + dlthis->image_offset);
+ }
+ reloc_amt -= dlthis->bss_run_base;
+ /* !!! FALL THRU !!! */
+ case RACT_C6DSPL:
+ /* scale factor determined by 3 LSBs of field */
+ scale = c60_scale[val & SCALE_MASK];
+ offset += SCALE_BITS;
+ fieldsz -= SCALE_BITS;
+ val >>= SCALE_BITS; /* ignore the scale field hereafter */
+ val <<= scale;
+ val += reloc_amt; /* do the usual relocation */
+ if (((1 << scale) - 1) & val)
+ dload_error(dlthis,
+ "Unaligned reference in %s offset "
+ FMT_UI32, dlthis->image_secn->name,
+ rp->vaddr + dlthis->image_offset);
+ break;
+#endif
+ } /* relocation actions */
+ /* * Put back result as required */
+ if (reloc_info & ROP_W) { /* relocation writes image value */
+#ifdef RFV_SCALE
+ val >>= scale;
+#endif
+ if (dload_repack(dlthis, val, data, fieldsz, offset,
+ RFV_SIGN(reloc_info))) {
+ /* Check to see if this relo can be trampolined,
+ * but only in first phase relocation. 2nd phase
+ * relocation cannot trampoline. */
+ if ((second_pass == false) &&
+ (dload_tramp_avail(dlthis, rp) == true)) {
+
+ /* Before generating the trampoline, restore
+ * the value to its original so the 2nd pass
+ * relo will work. */
+ dload_repack(dlthis, orig_val, data, fieldsz,
+ offset, RFV_SIGN(reloc_info));
+ if (!dload_tramp_generate(dlthis,
+ (dlthis->image_secn -
+ dlthis->ldr_sections),
+ dlthis->image_offset,
+ img_pkt, rp)) {
+ dload_error(dlthis,
+ "Failed to "
+ "generate trampoline for "
+ "bit overflow");
+ dload_error(dlthis,
+ "Relocation val " FMT_UI32
+ " overflows %d bits in %s "
+ "offset " FMT_UI32, val,
+ fieldsz,
+ dlthis->image_secn->name,
+ dlthis->image_offset +
+ rp->vaddr);
+ } else
+ *tramps_generated = true;
+ } else {
+ dload_error(dlthis, "Relocation value "
+ FMT_UI32 " overflows %d bits in %s"
+ " offset " FMT_UI32, val, fieldsz,
+ dlthis->image_secn->name,
+ dlthis->image_offset + rp->vaddr);
+ }
+ }
+ } else if (top)
+ *stackp = val;
+} /* reloc_value */
diff --git a/drivers/staging/tidspbridge/dynload/reloc_table.h b/drivers/staging/tidspbridge/dynload/reloc_table.h
new file mode 100644
index 00000000000..6aab03d4668
--- /dev/null
+++ b/drivers/staging/tidspbridge/dynload/reloc_table.h
@@ -0,0 +1,102 @@
+/*
+ * reloc_table.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _RELOC_TABLE_H_
+#define _RELOC_TABLE_H_
+/*
+ * Table of relocation operator properties
+ */
+#include <linux/types.h>
+
+/* How does this relocation operation access the program image? */
+#define ROP_N 0 /* does not access image */
+#define ROP_R 1 /* read from image */
+#define ROP_W 2 /* write to image */
+#define ROP_RW 3 /* read from and write to image */
+
+/* For program image access, what are the overflow rules for the bit field? */
+/* Beware! Procedure repack depends on this encoding */
+#define ROP_ANY 0 /* no overflow ever, just truncate the value */
+#define ROP_SGN 1 /* signed field */
+#define ROP_UNS 2 /* unsigned field */
+#define ROP_MAX 3 /* allow maximum range of either signed or unsigned */
+
+/* How does the relocation operation use the symbol reference */
+#define ROP_IGN 0 /* no symbol is referenced */
+#define ROP_LIT 0 /* use rp->UVAL literal field */
+#define ROP_SYM 1 /* symbol value is used in relocation */
+#define ROP_SYMD 2 /* delta value vs last link is used */
+
+/* How does the reloc op use the stack? */
+#define RSTK_N 0 /* Does not use */
+#define RSTK_POP 1 /* Does a POP */
+#define RSTK_UOP 2 /* Unary op, stack position unaffected */
+#define RSTK_PSH 3 /* Does a push */
+
+/*
+ * Computational actions performed by the dynamic loader
+ */
+enum dload_actions {
+ /* don't alter the current val (from stack or mem fetch) */
+ RACT_VAL,
+ /* set value to reference amount (from symbol reference) */
+ RACT_ASGN,
+ RACT_ADD, /* add reference to value */
+ RACT_PCR, /* add reference minus PC delta to value */
+ RACT_ADDISP, /* add reference plus R_DISP */
+ RACT_ASGPC, /* set value to section addr plus reference */
+
+ RACT_PLUS, /* stack + */
+ RACT_SUB, /* stack - */
+ RACT_NEG, /* stack unary - */
+
+ RACT_MPY, /* stack * */
+ RACT_DIV, /* stack / */
+ RACT_MOD, /* stack % */
+
+ RACT_SR, /* stack unsigned >> */
+ RACT_ASR, /* stack signed >> */
+ RACT_SL, /* stack << */
+ RACT_AND, /* stack & */
+ RACT_OR, /* stack | */
+ RACT_XOR, /* stack ^ */
+ RACT_NOT, /* stack ~ */
+ RACT_C6SECT, /* for C60 R_SECT op */
+ RACT_C6BASE, /* for C60 R_BASE op */
+ RACT_C6DSPL, /* for C60 scaled 15-bit displacement */
+ RACT_PCR23T /* for ARM Thumb long branch */
+};
+
+/*
+ * macros used to extract values
+ */
+#define RFV_POSN(aaa) ((aaa) & 0xF)
+#define RFV_WIDTH(aaa) (((aaa) >> 4) & 0x3F)
+#define RFV_ACTION(aaa) ((aaa) >> 10)
+
+#define RFV_SIGN(iii) (((iii) >> 2) & 0x3)
+#define RFV_SYM(iii) (((iii) >> 4) & 0x3)
+#define RFV_STK(iii) (((iii) >> 6) & 0x3)
+#define RFV_ACCS(iii) ((iii) & 0x3)
+
+#if (TMS32060)
+#define RFV_SCALE(iii) ((iii) >> 11)
+#define RFV_BIGOFF(iii) (((iii) >> 8) & 0x7)
+#else
+#define RFV_BIGOFF(iii) ((iii) >> 8)
+#endif
+
+#endif /* _RELOC_TABLE_H_ */
diff --git a/drivers/staging/tidspbridge/dynload/reloc_table_c6000.c b/drivers/staging/tidspbridge/dynload/reloc_table_c6000.c
new file mode 100644
index 00000000000..a28bc044249
--- /dev/null
+++ b/drivers/staging/tidspbridge/dynload/reloc_table_c6000.c
@@ -0,0 +1,257 @@
+/*
+ * reloc_table_c6000.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* Tables generated for c6000 */
+
+#define HASH_FUNC(zz) (((((zz) + 1) * 1845UL) >> 11) & 63)
+#define HASH_L(zz) ((zz) >> 8)
+#define HASH_I(zz) ((zz) & 0xFF)
+
+static const u16 rop_map1[] = {
+ 0,
+ 1,
+ 2,
+ 20,
+ 4,
+ 5,
+ 6,
+ 15,
+ 80,
+ 81,
+ 82,
+ 83,
+ 84,
+ 85,
+ 86,
+ 87,
+ 17,
+ 18,
+ 19,
+ 21,
+ 16,
+ 16394,
+ 16404,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 32,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 40,
+ 112,
+ 113,
+ 65535,
+ 16384,
+ 16385,
+ 16386,
+ 16387,
+ 16388,
+ 16389,
+ 16390,
+ 16391,
+ 16392,
+ 16393,
+ 16395,
+ 16396,
+ 16397,
+ 16398,
+ 16399,
+ 16400,
+ 16401,
+ 16402,
+ 16403,
+ 16405,
+ 16406,
+ 65535,
+ 65535,
+ 65535
+};
+
+static const s16 rop_map2[] = {
+ -256,
+ -255,
+ -254,
+ -245,
+ -253,
+ -252,
+ -251,
+ -250,
+ -241,
+ -240,
+ -239,
+ -238,
+ -237,
+ -236,
+ 1813,
+ 5142,
+ -248,
+ -247,
+ 778,
+ -244,
+ -249,
+ -221,
+ -211,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -243,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -242,
+ -233,
+ -232,
+ -1,
+ -231,
+ -230,
+ -229,
+ -228,
+ -227,
+ -226,
+ -225,
+ -224,
+ -223,
+ 5410,
+ -220,
+ -219,
+ -218,
+ -217,
+ -216,
+ -215,
+ -214,
+ -213,
+ 5676,
+ -210,
+ -209,
+ -1,
+ -1,
+ -1
+};
+
+static const u16 rop_action[] = {
+ 2560,
+ 2304,
+ 2304,
+ 2432,
+ 2432,
+ 2560,
+ 2176,
+ 2304,
+ 2560,
+ 3200,
+ 3328,
+ 3584,
+ 3456,
+ 2304,
+ 4208,
+ 20788,
+ 21812,
+ 3415,
+ 3245,
+ 2311,
+ 4359,
+ 19764,
+ 2311,
+ 3191,
+ 3280,
+ 6656,
+ 7680,
+ 8704,
+ 9728,
+ 10752,
+ 11776,
+ 12800,
+ 13824,
+ 14848,
+ 15872,
+ 16896,
+ 17920,
+ 18944,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1536,
+ 1536,
+ 1536,
+ 5632,
+ 512,
+ 0
+};
+
+static const u16 rop_info[] = {
+ 0,
+ 35,
+ 35,
+ 35,
+ 35,
+ 35,
+ 35,
+ 35,
+ 35,
+ 39,
+ 39,
+ 39,
+ 39,
+ 35,
+ 34,
+ 283,
+ 299,
+ 4135,
+ 4391,
+ 291,
+ 33059,
+ 283,
+ 295,
+ 4647,
+ 4135,
+ 64,
+ 64,
+ 128,
+ 64,
+ 64,
+ 64,
+ 64,
+ 64,
+ 64,
+ 64,
+ 64,
+ 64,
+ 128,
+ 201,
+ 197,
+ 74,
+ 70,
+ 208,
+ 196,
+ 200,
+ 192,
+ 192,
+ 66
+};
diff --git a/drivers/staging/tidspbridge/dynload/tramp.c b/drivers/staging/tidspbridge/dynload/tramp.c
new file mode 100644
index 00000000000..60d22ea4705
--- /dev/null
+++ b/drivers/staging/tidspbridge/dynload/tramp.c
@@ -0,0 +1,1143 @@
+/*
+ * tramp.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include "header.h"
+
+#if TMS32060
+#include "tramp_table_c6000.c"
+#endif
+
+#define MAX_RELOS_PER_PASS 4
+
+/*
+ * Function: priv_tramp_sect_tgt_alloc
+ * Description: Allocate target memory for the trampoline section. The
+ * target mem size is easily obtained as the next available address.
+ */
+static int priv_tramp_sect_tgt_alloc(struct dload_state *dlthis)
+{
+ int ret_val = 0;
+ struct ldr_section_info *sect_info;
+
+ /* Populate the trampoline loader section and allocate it on the
+ * target. The section name is ALWAYS the first string in the final
+ * string table for trampolines. The trampoline section is always
+ * 1 beyond the total number of allocated sections. */
+ sect_info = &dlthis->ldr_sections[dlthis->allocated_secn_count];
+
+ sect_info->name = dlthis->tramp.final_string_table;
+ sect_info->size = dlthis->tramp.tramp_sect_next_addr;
+ sect_info->context = 0;
+ sect_info->type =
+ (4 << 8) | DLOAD_TEXT | DS_ALLOCATE_MASK | DS_DOWNLOAD_MASK;
+ sect_info->page = 0;
+ sect_info->run_addr = 0;
+ sect_info->load_addr = 0;
+ ret_val = dlthis->myalloc->dload_allocate(dlthis->myalloc,
+ sect_info,
+ ds_alignment
+ (sect_info->type));
+
+ if (ret_val == 0)
+ dload_error(dlthis, "Failed to allocate target memory for"
+ " trampoline");
+
+ return ret_val;
+}
+
+/*
+ * Function: priv_h2a
+ * Description: Helper function to convert a hex value to its ASCII
+ * representation. Used for trampoline symbol name generation.
+ */
+static u8 priv_h2a(u8 value)
+{
+ if (value > 0xF)
+ return 0xFF;
+
+ if (value <= 9)
+ value += 0x30;
+ else
+ value += 0x37;
+
+ return value;
+}
+
+/*
+ * Function: priv_tramp_sym_gen_name
+ * Description: Generate a trampoline symbol name (ASCII) using the value
+ * of the symbol. This places the new name into the user buffer.
+ * The name is fixed in length and of the form: __$dbTR__xxxxxxxx
+ * (where "xxxxxxxx" is the hex value.
+ */
+static void priv_tramp_sym_gen_name(u32 value, char *dst)
+{
+ u32 i;
+ char *prefix = TRAMP_SYM_PREFIX;
+ char *dst_local = dst;
+ u8 tmp;
+
+ /* Clear out the destination, including the ending NULL */
+ for (i = 0; i < (TRAMP_SYM_PREFIX_LEN + TRAMP_SYM_HEX_ASCII_LEN); i++)
+ *(dst_local + i) = 0;
+
+ /* Copy the prefix to start */
+ for (i = 0; i < strlen(TRAMP_SYM_PREFIX); i++) {
+ *dst_local = *(prefix + i);
+ dst_local++;
+ }
+
+ /* Now convert the value passed in to a string equiv of the hex */
+ for (i = 0; i < sizeof(value); i++) {
+#ifndef _BIG_ENDIAN
+ tmp = *(((u8 *) &value) + (sizeof(value) - 1) - i);
+ *dst_local = priv_h2a((tmp & 0xF0) >> 4);
+ dst_local++;
+ *dst_local = priv_h2a(tmp & 0x0F);
+ dst_local++;
+#else
+ tmp = *(((u8 *) &value) + i);
+ *dst_local = priv_h2a((tmp & 0xF0) >> 4);
+ dst_local++;
+ *dst_local = priv_h2a(tmp & 0x0F);
+ dst_local++;
+#endif
+ }
+
+ /* NULL terminate */
+ *dst_local = 0;
+}
+
+/*
+ * Function: priv_tramp_string_create
+ * Description: Create a new string specific to the trampoline loading and add
+ * it to the trampoline string list. This list contains the
+ * trampoline section name and trampoline point symbols.
+ */
+static struct tramp_string *priv_tramp_string_create(struct dload_state *dlthis,
+ u32 str_len, char *str)
+{
+ struct tramp_string *new_string = NULL;
+ u32 i;
+
+ /* Create a new string object with the specified size. */
+ new_string =
+ (struct tramp_string *)dlthis->mysym->dload_allocate(dlthis->mysym,
+ (sizeof
+ (struct
+ tramp_string)
+ + str_len +
+ 1));
+ if (new_string != NULL) {
+ /* Clear the string first. This ensures the ending NULL is
+ * present and the optimizer won't touch it. */
+ for (i = 0; i < (sizeof(struct tramp_string) + str_len + 1);
+ i++)
+ *((u8 *) new_string + i) = 0;
+
+ /* Add this string to our virtual table by assigning it the
+ * next index and pushing it to the tail of the list. */
+ new_string->index = dlthis->tramp.tramp_string_next_index;
+ dlthis->tramp.tramp_string_next_index++;
+ dlthis->tramp.tramp_string_size += str_len + 1;
+
+ new_string->next = NULL;
+ if (dlthis->tramp.string_head == NULL)
+ dlthis->tramp.string_head = new_string;
+ else
+ dlthis->tramp.string_tail->next = new_string;
+
+ dlthis->tramp.string_tail = new_string;
+
+ /* Copy the string over to the new object */
+ for (i = 0; i < str_len; i++)
+ new_string->str[i] = str[i];
+ }
+
+ return new_string;
+}
+
+/*
+ * Function: priv_tramp_string_find
+ * Description: Walk the trampoline string list and find a match for the
+ * provided string. If not match is found, NULL is returned.
+ */
+static struct tramp_string *priv_tramp_string_find(struct dload_state *dlthis,
+ char *str)
+{
+ struct tramp_string *cur_str = NULL;
+ struct tramp_string *ret_val = NULL;
+ u32 i;
+ u32 str_len = strlen(str);
+
+ for (cur_str = dlthis->tramp.string_head;
+ (ret_val == NULL) && (cur_str != NULL); cur_str = cur_str->next) {
+ /* If the string lengths aren't equal, don't bother
+ * comparing */
+ if (str_len != strlen(cur_str->str))
+ continue;
+
+ /* Walk the strings until one of them ends */
+ for (i = 0; i < str_len; i++) {
+ /* If they don't match in the current position then
+ * break out now, no sense in continuing to look at
+ * this string. */
+ if (str[i] != cur_str->str[i])
+ break;
+ }
+
+ if (i == str_len)
+ ret_val = cur_str;
+ }
+
+ return ret_val;
+}
+
+/*
+ * Function: priv_string_tbl_finalize
+ * Description: Flatten the trampoline string list into a table of NULL
+ * terminated strings. This is the same format of string table
+ * as used by the COFF/DOFF file.
+ */
+static int priv_string_tbl_finalize(struct dload_state *dlthis)
+{
+ int ret_val = 0;
+ struct tramp_string *cur_string;
+ char *cur_loc;
+ char *tmp;
+
+ /* Allocate enough space for all strings that have been created. The
+ * table is simply all strings concatenated together will NULL
+ * endings. */
+ dlthis->tramp.final_string_table =
+ (char *)dlthis->mysym->dload_allocate(dlthis->mysym,
+ dlthis->tramp.
+ tramp_string_size);
+ if (dlthis->tramp.final_string_table != NULL) {
+ /* We got our buffer, walk the list and release the nodes as*
+ * we go */
+ cur_loc = dlthis->tramp.final_string_table;
+ cur_string = dlthis->tramp.string_head;
+ while (cur_string != NULL) {
+ /* Move the head/tail pointers */
+ dlthis->tramp.string_head = cur_string->next;
+ if (dlthis->tramp.string_tail == cur_string)
+ dlthis->tramp.string_tail = NULL;
+
+ /* Copy the string contents */
+ for (tmp = cur_string->str;
+ *tmp != '\0'; tmp++, cur_loc++)
+ *cur_loc = *tmp;
+
+ /* Pick up the NULL termination since it was missed by
+ * breaking using it to end the above loop. */
+ *cur_loc = '\0';
+ cur_loc++;
+
+ /* Free the string node, we don't need it any more. */
+ dlthis->mysym->dload_deallocate(dlthis->mysym,
+ cur_string);
+
+ /* Move our pointer to the next one */
+ cur_string = dlthis->tramp.string_head;
+ }
+
+ /* Update our return value to success */
+ ret_val = 1;
+ } else
+ dload_error(dlthis, "Failed to allocate trampoline "
+ "string table");
+
+ return ret_val;
+}
+
+/*
+ * Function: priv_tramp_sect_alloc
+ * Description: Virtually allocate space from the trampoline section. This
+ * function returns the next offset within the trampoline section
+ * that is available and moved the next available offset by the
+ * requested size. NO TARGET ALLOCATION IS DONE AT THIS TIME.
+ */
+static u32 priv_tramp_sect_alloc(struct dload_state *dlthis, u32 tramp_size)
+{
+ u32 ret_val;
+
+ /* If the next available address is 0, this is our first allocation.
+ * Create a section name string to go into the string table . */
+ if (dlthis->tramp.tramp_sect_next_addr == 0) {
+ dload_syms_error(dlthis->mysym, "*** WARNING *** created "
+ "dynamic TRAMPOLINE section for module %s",
+ dlthis->str_head);
+ }
+
+ /* Reserve space for the new trampoline */
+ ret_val = dlthis->tramp.tramp_sect_next_addr;
+ dlthis->tramp.tramp_sect_next_addr += tramp_size;
+ return ret_val;
+}
+
+/*
+ * Function: priv_tramp_sym_create
+ * Description: Allocate and create a new trampoline specific symbol and add
+ * it to the trampoline symbol list. These symbols will include
+ * trampoline points as well as the external symbols they
+ * reference.
+ */
+static struct tramp_sym *priv_tramp_sym_create(struct dload_state *dlthis,
+ u32 str_index,
+ struct local_symbol *tmp_sym)
+{
+ struct tramp_sym *new_sym = NULL;
+ u32 i;
+
+ /* Allocate new space for the symbol in the symbol table. */
+ new_sym =
+ (struct tramp_sym *)dlthis->mysym->dload_allocate(dlthis->mysym,
+ sizeof(struct tramp_sym));
+ if (new_sym != NULL) {
+ for (i = 0; i != sizeof(struct tramp_sym); i++)
+ *((char *)new_sym + i) = 0;
+
+ /* Assign this symbol the next symbol index for easier
+ * reference later during relocation. */
+ new_sym->index = dlthis->tramp.tramp_sym_next_index;
+ dlthis->tramp.tramp_sym_next_index++;
+
+ /* Populate the symbol information. At this point any
+ * trampoline symbols will be the offset location, not the
+ * final. Copy over the symbol info to start, then be sure to
+ * get the string index from the trampoline string table. */
+ new_sym->sym_info = *tmp_sym;
+ new_sym->str_index = str_index;
+
+ /* Push the new symbol to the tail of the symbol table list */
+ new_sym->next = NULL;
+ if (dlthis->tramp.symbol_head == NULL)
+ dlthis->tramp.symbol_head = new_sym;
+ else
+ dlthis->tramp.symbol_tail->next = new_sym;
+
+ dlthis->tramp.symbol_tail = new_sym;
+ }
+
+ return new_sym;
+}
+
+/*
+ * Function: priv_tramp_sym_get
+ * Description: Search for the symbol with the matching string index (from
+ * the trampoline string table) and return the trampoline
+ * symbol object, if found. Otherwise return NULL.
+ */
+static struct tramp_sym *priv_tramp_sym_get(struct dload_state *dlthis,
+ u32 string_index)
+{
+ struct tramp_sym *sym_found = NULL;
+
+ /* Walk the symbol table list and search vs. the string index */
+ for (sym_found = dlthis->tramp.symbol_head;
+ sym_found != NULL; sym_found = sym_found->next) {
+ if (sym_found->str_index == string_index)
+ break;
+ }
+
+ return sym_found;
+}
+
+/*
+ * Function: priv_tramp_sym_find
+ * Description: Search for a trampoline symbol based on the string name of
+ * the symbol. Return the symbol object, if found, otherwise
+ * return NULL.
+ */
+static struct tramp_sym *priv_tramp_sym_find(struct dload_state *dlthis,
+ char *string)
+{
+ struct tramp_sym *sym_found = NULL;
+ struct tramp_string *str_found = NULL;
+
+ /* First, search for the string, then search for the sym based on the
+ string index. */
+ str_found = priv_tramp_string_find(dlthis, string);
+ if (str_found != NULL)
+ sym_found = priv_tramp_sym_get(dlthis, str_found->index);
+
+ return sym_found;
+}
+
+/*
+ * Function: priv_tramp_sym_finalize
+ * Description: Allocate a flat symbol table for the trampoline section,
+ * put each trampoline symbol into the table, adjust the
+ * symbol value based on the section address on the target and
+ * free the trampoline symbol list nodes.
+ */
+static int priv_tramp_sym_finalize(struct dload_state *dlthis)
+{
+ int ret_val = 0;
+ struct tramp_sym *cur_sym;
+ struct ldr_section_info *tramp_sect =
+ &dlthis->ldr_sections[dlthis->allocated_secn_count];
+ struct local_symbol *new_sym;
+
+ /* Allocate a table to hold a flattened version of all symbols
+ * created. */
+ dlthis->tramp.final_sym_table =
+ (struct local_symbol *)dlthis->mysym->dload_allocate(dlthis->mysym,
+ (sizeof(struct local_symbol) * dlthis->tramp.
+ tramp_sym_next_index));
+ if (dlthis->tramp.final_sym_table != NULL) {
+ /* Walk the list of all symbols, copy it over to the flattened
+ * table. After it has been copied, the node can be freed as
+ * it is no longer needed. */
+ new_sym = dlthis->tramp.final_sym_table;
+ cur_sym = dlthis->tramp.symbol_head;
+ while (cur_sym != NULL) {
+ /* Pop it off the list */
+ dlthis->tramp.symbol_head = cur_sym->next;
+ if (cur_sym == dlthis->tramp.symbol_tail)
+ dlthis->tramp.symbol_tail = NULL;
+
+ /* Copy the symbol contents into the flat table */
+ *new_sym = cur_sym->sym_info;
+
+ /* Now finaize the symbol. If it is in the tramp
+ * section, we need to adjust for the section start.
+ * If it is external then we don't need to adjust at
+ * all.
+ * NOTE: THIS CODE ASSUMES THAT THE TRAMPOLINE IS
+ * REFERENCED LIKE A CALL TO AN EXTERNAL SO VALUE AND
+ * DELTA ARE THE SAME. SEE THE FUNCTION dload_symbols
+ * WHERE DN_UNDEF IS HANDLED FOR MORE REFERENCE. */
+ if (new_sym->secnn < 0) {
+ new_sym->value += tramp_sect->load_addr;
+ new_sym->delta = new_sym->value;
+ }
+
+ /* Let go of the symbol node */
+ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_sym);
+
+ /* Move to the next node */
+ cur_sym = dlthis->tramp.symbol_head;
+ new_sym++;
+ }
+
+ ret_val = 1;
+ } else
+ dload_error(dlthis, "Failed to alloc trampoline sym table");
+
+ return ret_val;
+}
+
+/*
+ * Function: priv_tgt_img_gen
+ * Description: Allocate storage for and copy the target specific image data
+ * and fix up its relocations for the new external symbol. If
+ * a trampoline image packet was successfully created it is added
+ * to the trampoline list.
+ */
+static int priv_tgt_img_gen(struct dload_state *dlthis, u32 base,
+ u32 gen_index, struct tramp_sym *new_ext_sym)
+{
+ struct tramp_img_pkt *new_img_pkt = NULL;
+ u32 i;
+ u32 pkt_size = tramp_img_pkt_size_get();
+ u8 *gen_tbl_entry;
+ u8 *pkt_data;
+ struct reloc_record_t *cur_relo;
+ int ret_val = 0;
+
+ /* Allocate a new image packet and set it up. */
+ new_img_pkt =
+ (struct tramp_img_pkt *)dlthis->mysym->dload_allocate(dlthis->mysym,
+ pkt_size);
+ if (new_img_pkt != NULL) {
+ /* Save the base, this is where it goes in the section */
+ new_img_pkt->base = base;
+
+ /* Copy over the image data and relos from the target table */
+ pkt_data = (u8 *) &new_img_pkt->hdr;
+ gen_tbl_entry = (u8 *) &tramp_gen_info[gen_index];
+ for (i = 0; i < pkt_size; i++) {
+ *pkt_data = *gen_tbl_entry;
+ pkt_data++;
+ gen_tbl_entry++;
+ }
+
+ /* Update the relocations to point to the external symbol */
+ cur_relo =
+ (struct reloc_record_t *)((u8 *) &new_img_pkt->hdr +
+ new_img_pkt->hdr.relo_offset);
+ for (i = 0; i < new_img_pkt->hdr.num_relos; i++)
+ cur_relo[i].SYMNDX = new_ext_sym->index;
+
+ /* Add it to the trampoline list. */
+ new_img_pkt->next = dlthis->tramp.tramp_pkts;
+ dlthis->tramp.tramp_pkts = new_img_pkt;
+
+ ret_val = 1;
+ }
+
+ return ret_val;
+}
+
+/*
+ * Function: priv_pkt_relo
+ * Description: Take the provided image data and the collection of relocations
+ * for it and perform the relocations. Note that all relocations
+ * at this stage are considered SECOND PASS since the original
+ * image has already been processed in the first pass. This means
+ * TRAMPOLINES ARE TREATED AS 2ND PASS even though this is really
+ * the first (and only) relocation that will be performed on them.
+ */
+static int priv_pkt_relo(struct dload_state *dlthis, tgt_au_t * data,
+ struct reloc_record_t *rp[], u32 relo_count)
+{
+ int ret_val = 1;
+ u32 i;
+ bool tmp;
+
+ /* Walk through all of the relos and process them. This function is
+ * the equivalent of relocate_packet() from cload.c, but specialized
+ * for trampolines and 2nd phase relocations. */
+ for (i = 0; i < relo_count; i++)
+ dload_relocate(dlthis, data, rp[i], &tmp, true);
+
+ return ret_val;
+}
+
+/*
+ * Function: priv_tramp_pkt_finalize
+ * Description: Walk the list of all trampoline packets and finalize them.
+ * Each trampoline image packet will be relocated now that the
+ * trampoline section has been allocated on the target. Once
+ * all of the relocations are done the trampoline image data
+ * is written into target memory and the trampoline packet
+ * is freed: it is no longer needed after this point.
+ */
+static int priv_tramp_pkt_finalize(struct dload_state *dlthis)
+{
+ int ret_val = 1;
+ struct tramp_img_pkt *cur_pkt = NULL;
+ struct reloc_record_t *relos[MAX_RELOS_PER_PASS];
+ u32 relos_done;
+ u32 i;
+ struct reloc_record_t *cur_relo;
+ struct ldr_section_info *sect_info =
+ &dlthis->ldr_sections[dlthis->allocated_secn_count];
+
+ /* Walk the list of trampoline packets and relocate each packet. This
+ * function is the trampoline equivalent of dload_data() from
+ * cload.c. */
+ cur_pkt = dlthis->tramp.tramp_pkts;
+ while ((ret_val != 0) && (cur_pkt != NULL)) {
+ /* Remove the pkt from the list */
+ dlthis->tramp.tramp_pkts = cur_pkt->next;
+
+ /* Setup section and image offset information for the relo */
+ dlthis->image_secn = sect_info;
+ dlthis->image_offset = cur_pkt->base;
+ dlthis->delta_runaddr = sect_info->run_addr;
+
+ /* Walk through all relos for the packet */
+ relos_done = 0;
+ cur_relo = (struct reloc_record_t *)((u8 *) &cur_pkt->hdr +
+ cur_pkt->hdr.relo_offset);
+ while (relos_done < cur_pkt->hdr.num_relos) {
+#ifdef ENABLE_TRAMP_DEBUG
+ dload_syms_error(dlthis->mysym,
+ "===> Trampoline %x branches to %x",
+ sect_info->run_addr +
+ dlthis->image_offset,
+ dlthis->
+ tramp.final_sym_table[cur_relo->
+ SYMNDX].value);
+#endif
+
+ for (i = 0;
+ ((i < MAX_RELOS_PER_PASS) &&
+ ((i + relos_done) < cur_pkt->hdr.num_relos)); i++)
+ relos[i] = cur_relo + i;
+
+ /* Do the actual relo */
+ ret_val = priv_pkt_relo(dlthis,
+ (tgt_au_t *) &cur_pkt->payload,
+ relos, i);
+ if (ret_val == 0) {
+ dload_error(dlthis,
+ "Relocation of trampoline pkt at %x"
+ " failed", cur_pkt->base +
+ sect_info->run_addr);
+ break;
+ }
+
+ relos_done += i;
+ cur_relo += i;
+ }
+
+ /* Make sure we didn't hit a problem */
+ if (ret_val != 0) {
+ /* Relos are done for the packet, write it to the
+ * target */
+ ret_val = dlthis->myio->writemem(dlthis->myio,
+ &cur_pkt->payload,
+ sect_info->load_addr +
+ cur_pkt->base,
+ sect_info,
+ BYTE_TO_HOST
+ (cur_pkt->hdr.
+ tramp_code_size));
+ if (ret_val == 0) {
+ dload_error(dlthis,
+ "Write to " FMT_UI32 " failed",
+ sect_info->load_addr +
+ cur_pkt->base);
+ }
+
+ /* Done with the pkt, let it go */
+ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_pkt);
+
+ /* Get the next packet to process */
+ cur_pkt = dlthis->tramp.tramp_pkts;
+ }
+ }
+
+ return ret_val;
+}
+
+/*
+ * Function: priv_dup_pkt_finalize
+ * Description: Walk the list of duplicate image packets and finalize them.
+ * Each duplicate packet will be relocated again for the
+ * relocations that previously failed and have been adjusted
+ * to point at a trampoline. Once all relocations for a packet
+ * have been done, write the packet into target memory. The
+ * duplicate packet and its relocation chain are all freed
+ * after use here as they are no longer needed after this.
+ */
+static int priv_dup_pkt_finalize(struct dload_state *dlthis)
+{
+ int ret_val = 1;
+ struct tramp_img_dup_pkt *cur_pkt;
+ struct tramp_img_dup_relo *cur_relo;
+ struct reloc_record_t *relos[MAX_RELOS_PER_PASS];
+ struct doff_scnhdr_t *sect_hdr = NULL;
+ s32 i;
+
+ /* Similar to the trampoline pkt finalize, this function walks each dup
+ * pkt that was generated and performs all relocations that were
+ * deferred to a 2nd pass. This is the equivalent of dload_data() from
+ * cload.c, but does not need the additional reorder and checksum
+ * processing as it has already been done. */
+ cur_pkt = dlthis->tramp.dup_pkts;
+ while ((ret_val != 0) && (cur_pkt != NULL)) {
+ /* Remove the node from the list, we'll be freeing it
+ * shortly */
+ dlthis->tramp.dup_pkts = cur_pkt->next;
+
+ /* Setup the section and image offset for relocation */
+ dlthis->image_secn = &dlthis->ldr_sections[cur_pkt->secnn];
+ dlthis->image_offset = cur_pkt->offset;
+
+ /* In order to get the delta run address, we need to reference
+ * the original section header. It's a bit ugly, but needed
+ * for relo. */
+ i = (s32) (dlthis->image_secn - dlthis->ldr_sections);
+ sect_hdr = dlthis->sect_hdrs + i;
+ dlthis->delta_runaddr = sect_hdr->ds_paddr;
+
+ /* Walk all relos in the chain and process each. */
+ cur_relo = cur_pkt->relo_chain;
+ while (cur_relo != NULL) {
+ /* Process them a chunk at a time to be efficient */
+ for (i = 0; (i < MAX_RELOS_PER_PASS)
+ && (cur_relo != NULL);
+ i++, cur_relo = cur_relo->next) {
+ relos[i] = &cur_relo->relo;
+ cur_pkt->relo_chain = cur_relo->next;
+ }
+
+ /* Do the actual relo */
+ ret_val = priv_pkt_relo(dlthis,
+ cur_pkt->img_pkt.img_data,
+ relos, i);
+ if (ret_val == 0) {
+ dload_error(dlthis,
+ "Relocation of dup pkt at %x"
+ " failed", cur_pkt->offset +
+ dlthis->image_secn->run_addr);
+ break;
+ }
+
+ /* Release all of these relos, we're done with them */
+ while (i > 0) {
+ dlthis->mysym->dload_deallocate(dlthis->mysym,
+ GET_CONTAINER
+ (relos[i - 1],
+ struct tramp_img_dup_relo,
+ relo));
+ i--;
+ }
+
+ /* DO NOT ADVANCE cur_relo, IT IS ALREADY READY TO
+ * GO! */
+ }
+
+ /* Done with all relos. Make sure we didn't have a problem and
+ * write it out to the target */
+ if (ret_val != 0) {
+ ret_val = dlthis->myio->writemem(dlthis->myio,
+ cur_pkt->img_pkt.
+ img_data,
+ dlthis->image_secn->
+ load_addr +
+ cur_pkt->offset,
+ dlthis->image_secn,
+ BYTE_TO_HOST
+ (cur_pkt->img_pkt.
+ packet_size));
+ if (ret_val == 0) {
+ dload_error(dlthis,
+ "Write to " FMT_UI32 " failed",
+ dlthis->image_secn->load_addr +
+ cur_pkt->offset);
+ }
+
+ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_pkt);
+
+ /* Advance to the next packet */
+ cur_pkt = dlthis->tramp.dup_pkts;
+ }
+ }
+
+ return ret_val;
+}
+
+/*
+ * Function: priv_dup_find
+ * Description: Walk the list of existing duplicate packets and find a
+ * match based on the section number and image offset. Return
+ * the duplicate packet if found, otherwise NULL.
+ */
+static struct tramp_img_dup_pkt *priv_dup_find(struct dload_state *dlthis,
+ s16 secnn, u32 image_offset)
+{
+ struct tramp_img_dup_pkt *cur_pkt = NULL;
+
+ for (cur_pkt = dlthis->tramp.dup_pkts;
+ cur_pkt != NULL; cur_pkt = cur_pkt->next) {
+ if ((cur_pkt->secnn == secnn) &&
+ (cur_pkt->offset == image_offset)) {
+ /* Found a match, break out */
+ break;
+ }
+ }
+
+ return cur_pkt;
+}
+
+/*
+ * Function: priv_img_pkt_dup
+ * Description: Duplicate the original image packet. If this is the first
+ * time this image packet has been seen (based on section number
+ * and image offset), create a new duplicate packet and add it
+ * to the dup packet list. If not, just get the existing one and
+ * update it with the current packet contents (since relocation
+ * on the packet is still ongoing in first pass.) Create a
+ * duplicate of the provided relocation, but update it to point
+ * to the new trampoline symbol. Add the new relocation dup to
+ * the dup packet's relo chain for 2nd pass relocation later.
+ */
+static int priv_img_pkt_dup(struct dload_state *dlthis,
+ s16 secnn, u32 image_offset,
+ struct image_packet_t *ipacket,
+ struct reloc_record_t *rp,
+ struct tramp_sym *new_tramp_sym)
+{
+ struct tramp_img_dup_pkt *dup_pkt = NULL;
+ u32 new_dup_size;
+ s32 i;
+ int ret_val = 0;
+ struct tramp_img_dup_relo *dup_relo = NULL;
+
+ /* Determinne if this image packet is already being tracked in the
+ dup list for other trampolines. */
+ dup_pkt = priv_dup_find(dlthis, secnn, image_offset);
+
+ if (dup_pkt == NULL) {
+ /* This image packet does not exist in our tracking, so create
+ * a new one and add it to the head of the list. */
+ new_dup_size = sizeof(struct tramp_img_dup_pkt) +
+ ipacket->packet_size;
+
+ dup_pkt = (struct tramp_img_dup_pkt *)
+ dlthis->mysym->dload_allocate(dlthis->mysym, new_dup_size);
+ if (dup_pkt != NULL) {
+ /* Save off the section and offset information */
+ dup_pkt->secnn = secnn;
+ dup_pkt->offset = image_offset;
+ dup_pkt->relo_chain = NULL;
+
+ /* Copy the original packet content */
+ dup_pkt->img_pkt = *ipacket;
+ dup_pkt->img_pkt.img_data = (u8 *) (dup_pkt + 1);
+ for (i = 0; i < ipacket->packet_size; i++)
+ *(dup_pkt->img_pkt.img_data + i) =
+ *(ipacket->img_data + i);
+
+ /* Add the packet to the dup list */
+ dup_pkt->next = dlthis->tramp.dup_pkts;
+ dlthis->tramp.dup_pkts = dup_pkt;
+ } else
+ dload_error(dlthis, "Failed to create dup packet!");
+ } else {
+ /* The image packet contents could have changed since
+ * trampoline detection happens during relocation of the image
+ * packets. So, we need to update the image packet contents
+ * before adding relo information. */
+ for (i = 0; i < dup_pkt->img_pkt.packet_size; i++)
+ *(dup_pkt->img_pkt.img_data + i) =
+ *(ipacket->img_data + i);
+ }
+
+ /* Since the previous code may have allocated a new dup packet for us,
+ double check that we actually have one. */
+ if (dup_pkt != NULL) {
+ /* Allocate a new node for the relo chain. Each image packet
+ * can potentially have multiple relocations that cause a
+ * trampoline to be generated. So, we keep them in a chain,
+ * order is not important. */
+ dup_relo = dlthis->mysym->dload_allocate(dlthis->mysym,
+ sizeof(struct tramp_img_dup_relo));
+ if (dup_relo != NULL) {
+ /* Copy the relo contents, adjust for the new
+ * trampoline and add it to the list. */
+ dup_relo->relo = *rp;
+ dup_relo->relo.SYMNDX = new_tramp_sym->index;
+
+ dup_relo->next = dup_pkt->relo_chain;
+ dup_pkt->relo_chain = dup_relo;
+
+ /* That's it, we're done. Make sure we update our
+ * return value to be success since everything finished
+ * ok */
+ ret_val = 1;
+ } else
+ dload_error(dlthis, "Unable to alloc dup relo");
+ }
+
+ return ret_val;
+}
+
+/*
+ * Function: dload_tramp_avail
+ * Description: Check to see if the target supports a trampoline for this type
+ * of relocation. Return true if it does, otherwise false.
+ */
+bool dload_tramp_avail(struct dload_state *dlthis, struct reloc_record_t *rp)
+{
+ bool ret_val = false;
+ u16 map_index;
+ u16 gen_index;
+
+ /* Check type hash vs. target tramp table */
+ map_index = HASH_FUNC(rp->TYPE);
+ gen_index = tramp_map[map_index];
+ if (gen_index != TRAMP_NO_GEN_AVAIL)
+ ret_val = true;
+
+ return ret_val;
+}
+
+/*
+ * Function: dload_tramp_generate
+ * Description: Create a new trampoline for the provided image packet and
+ * relocation causing problems. This will create the trampoline
+ * as well as duplicate/update the image packet and relocation
+ * causing the problem, which will be relo'd again during
+ * finalization.
+ */
+int dload_tramp_generate(struct dload_state *dlthis, s16 secnn,
+ u32 image_offset, struct image_packet_t *ipacket,
+ struct reloc_record_t *rp)
+{
+ u16 map_index;
+ u16 gen_index;
+ int ret_val = 1;
+ char tramp_sym_str[TRAMP_SYM_PREFIX_LEN + TRAMP_SYM_HEX_ASCII_LEN];
+ struct local_symbol *ref_sym;
+ struct tramp_sym *new_tramp_sym;
+ struct tramp_sym *new_ext_sym;
+ struct tramp_string *new_tramp_str;
+ u32 new_tramp_base;
+ struct local_symbol tmp_sym;
+ struct local_symbol ext_tmp_sym;
+
+ /* Hash the relo type to get our generator information */
+ map_index = HASH_FUNC(rp->TYPE);
+ gen_index = tramp_map[map_index];
+ if (gen_index != TRAMP_NO_GEN_AVAIL) {
+ /* If this is the first trampoline, create the section name in
+ * our string table for debug help later. */
+ if (dlthis->tramp.string_head == NULL) {
+ priv_tramp_string_create(dlthis,
+ strlen(TRAMP_SECT_NAME),
+ TRAMP_SECT_NAME);
+ }
+#ifdef ENABLE_TRAMP_DEBUG
+ dload_syms_error(dlthis->mysym,
+ "Trampoline at img loc %x, references %x",
+ dlthis->ldr_sections[secnn].run_addr +
+ image_offset + rp->vaddr,
+ dlthis->local_symtab[rp->SYMNDX].value);
+#endif
+
+ /* Generate the trampoline string, check if already defined.
+ * If the relo symbol index is -1, it means we need the section
+ * info for relo later. To do this we'll dummy up a symbol
+ * with the section delta and run addresses. */
+ if (rp->SYMNDX == -1) {
+ ext_tmp_sym.value =
+ dlthis->ldr_sections[secnn].run_addr;
+ ext_tmp_sym.delta = dlthis->sect_hdrs[secnn].ds_paddr;
+ ref_sym = &ext_tmp_sym;
+ } else
+ ref_sym = &(dlthis->local_symtab[rp->SYMNDX]);
+
+ priv_tramp_sym_gen_name(ref_sym->value, tramp_sym_str);
+ new_tramp_sym = priv_tramp_sym_find(dlthis, tramp_sym_str);
+ if (new_tramp_sym == NULL) {
+ /* If tramp string not defined, create it and a new
+ * string, and symbol for it as well as the original
+ * symbol which caused the trampoline. */
+ new_tramp_str = priv_tramp_string_create(dlthis,
+ strlen
+ (tramp_sym_str),
+ tramp_sym_str);
+ if (new_tramp_str == NULL) {
+ dload_error(dlthis, "Failed to create new "
+ "trampoline string\n");
+ ret_val = 0;
+ } else {
+ /* Allocate tramp section space for the new
+ * tramp from the target */
+ new_tramp_base = priv_tramp_sect_alloc(dlthis,
+ tramp_size_get());
+
+ /* We have a string, create the new symbol and
+ * duplicate the external. */
+ tmp_sym.value = new_tramp_base;
+ tmp_sym.delta = 0;
+ tmp_sym.secnn = -1;
+ tmp_sym.sclass = 0;
+ new_tramp_sym = priv_tramp_sym_create(dlthis,
+ new_tramp_str->
+ index,
+ &tmp_sym);
+
+ new_ext_sym = priv_tramp_sym_create(dlthis, -1,
+ ref_sym);
+
+ if ((new_tramp_sym != NULL) &&
+ (new_ext_sym != NULL)) {
+ /* Call the image generator to get the
+ * new image data and fix up its
+ * relocations for the external
+ * symbol. */
+ ret_val = priv_tgt_img_gen(dlthis,
+ new_tramp_base,
+ gen_index,
+ new_ext_sym);
+
+ /* Add generated image data to tramp
+ * image list */
+ if (ret_val != 1) {
+ dload_error(dlthis, "Failed to "
+ "create img pkt for"
+ " trampoline\n");
+ }
+ } else {
+ dload_error(dlthis, "Failed to create "
+ "new tramp syms "
+ "(%8.8X, %8.8X)\n",
+ new_tramp_sym, new_ext_sym);
+ ret_val = 0;
+ }
+ }
+ }
+
+ /* Duplicate the image data and relo record that caused the
+ * tramp, including update the relo data to point to the tramp
+ * symbol. */
+ if (ret_val == 1) {
+ ret_val = priv_img_pkt_dup(dlthis, secnn, image_offset,
+ ipacket, rp, new_tramp_sym);
+ if (ret_val != 1) {
+ dload_error(dlthis, "Failed to create dup of "
+ "original img pkt\n");
+ }
+ }
+ }
+
+ return ret_val;
+}
+
+/*
+ * Function: dload_tramp_pkt_update
+ * Description: Update the duplicate copy of this image packet, which the
+ * trampoline layer is already tracking. This is call is critical
+ * to make if trampolines were generated anywhere within the
+ * packet and first pass relo continued on the remainder. The
+ * trampoline layer needs the updates image data so when 2nd
+ * pass relo is done during finalize the image packet can be
+ * written to the target since all relo is done.
+ */
+int dload_tramp_pkt_udpate(struct dload_state *dlthis, s16 secnn,
+ u32 image_offset, struct image_packet_t *ipacket)
+{
+ struct tramp_img_dup_pkt *dup_pkt = NULL;
+ s32 i;
+ int ret_val = 0;
+
+ /* Find the image packet in question, the caller needs us to update it
+ since a trampoline was previously generated. */
+ dup_pkt = priv_dup_find(dlthis, secnn, image_offset);
+ if (dup_pkt != NULL) {
+ for (i = 0; i < dup_pkt->img_pkt.packet_size; i++)
+ *(dup_pkt->img_pkt.img_data + i) =
+ *(ipacket->img_data + i);
+
+ ret_val = 1;
+ } else {
+ dload_error(dlthis,
+ "Unable to find existing DUP pkt for %x, offset %x",
+ secnn, image_offset);
+
+ }
+
+ return ret_val;
+}
+
+/*
+ * Function: dload_tramp_finalize
+ * Description: If any trampolines were created, finalize everything on the
+ * target by allocating the trampoline section on the target,
+ * finalizing the trampoline symbols, finalizing the trampoline
+ * packets (write the new section to target memory) and finalize
+ * the duplicate packets by doing 2nd pass relo over them.
+ */
+int dload_tramp_finalize(struct dload_state *dlthis)
+{
+ int ret_val = 1;
+
+ if (dlthis->tramp.tramp_sect_next_addr != 0) {
+ /* Finalize strings into a flat table. This is needed so it
+ * can be added to the debug string table later. */
+ ret_val = priv_string_tbl_finalize(dlthis);
+
+ /* Do target allocation for section BEFORE finalizing
+ * symbols. */
+ if (ret_val != 0)
+ ret_val = priv_tramp_sect_tgt_alloc(dlthis);
+
+ /* Finalize symbols with their correct target information and
+ * flatten */
+ if (ret_val != 0)
+ ret_val = priv_tramp_sym_finalize(dlthis);
+
+ /* Finalize all trampoline packets. This performs the
+ * relocation on the packets as well as writing them to target
+ * memory. */
+ if (ret_val != 0)
+ ret_val = priv_tramp_pkt_finalize(dlthis);
+
+ /* Perform a 2nd pass relocation on the dup list. */
+ if (ret_val != 0)
+ ret_val = priv_dup_pkt_finalize(dlthis);
+ }
+
+ return ret_val;
+}
+
+/*
+ * Function: dload_tramp_cleanup
+ * Description: Release all temporary resources used in the trampoline layer.
+ * Note that the target memory which may have been allocated and
+ * written to store the trampolines is NOT RELEASED HERE since it
+ * is potentially still in use. It is automatically released
+ * when the module is unloaded.
+ */
+void dload_tramp_cleanup(struct dload_state *dlthis)
+{
+ struct tramp_info *tramp = &dlthis->tramp;
+ struct tramp_sym *cur_sym;
+ struct tramp_string *cur_string;
+ struct tramp_img_pkt *cur_tramp_pkt;
+ struct tramp_img_dup_pkt *cur_dup_pkt;
+ struct tramp_img_dup_relo *cur_dup_relo;
+
+ /* If there were no tramps generated, just return */
+ if (tramp->tramp_sect_next_addr == 0)
+ return;
+
+ /* Destroy all tramp information */
+ for (cur_sym = tramp->symbol_head;
+ cur_sym != NULL; cur_sym = tramp->symbol_head) {
+ tramp->symbol_head = cur_sym->next;
+ if (tramp->symbol_tail == cur_sym)
+ tramp->symbol_tail = NULL;
+
+ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_sym);
+ }
+
+ if (tramp->final_sym_table != NULL)
+ dlthis->mysym->dload_deallocate(dlthis->mysym,
+ tramp->final_sym_table);
+
+ for (cur_string = tramp->string_head;
+ cur_string != NULL; cur_string = tramp->string_head) {
+ tramp->string_head = cur_string->next;
+ if (tramp->string_tail == cur_string)
+ tramp->string_tail = NULL;
+
+ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_string);
+ }
+
+ if (tramp->final_string_table != NULL)
+ dlthis->mysym->dload_deallocate(dlthis->mysym,
+ tramp->final_string_table);
+
+ for (cur_tramp_pkt = tramp->tramp_pkts;
+ cur_tramp_pkt != NULL; cur_tramp_pkt = tramp->tramp_pkts) {
+ tramp->tramp_pkts = cur_tramp_pkt->next;
+ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_tramp_pkt);
+ }
+
+ for (cur_dup_pkt = tramp->dup_pkts;
+ cur_dup_pkt != NULL; cur_dup_pkt = tramp->dup_pkts) {
+ tramp->dup_pkts = cur_dup_pkt->next;
+
+ for (cur_dup_relo = cur_dup_pkt->relo_chain;
+ cur_dup_relo != NULL;
+ cur_dup_relo = cur_dup_pkt->relo_chain) {
+ cur_dup_pkt->relo_chain = cur_dup_relo->next;
+ dlthis->mysym->dload_deallocate(dlthis->mysym,
+ cur_dup_relo);
+ }
+
+ dlthis->mysym->dload_deallocate(dlthis->mysym, cur_dup_pkt);
+ }
+}
diff --git a/drivers/staging/tidspbridge/dynload/tramp_table_c6000.c b/drivers/staging/tidspbridge/dynload/tramp_table_c6000.c
new file mode 100644
index 00000000000..09cc64f213c
--- /dev/null
+++ b/drivers/staging/tidspbridge/dynload/tramp_table_c6000.c
@@ -0,0 +1,164 @@
+/*
+ * tramp_table_c6000.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include "dload_internal.h"
+
+/* These are defined in coff.h, but may not be available on all platforms
+ so we'll go ahead and define them here. */
+#ifndef R_C60LO16
+#define R_C60LO16 0x54 /* C60: MVK Low Half Register */
+#define R_C60HI16 0x55 /* C60: MVKH/MVKLH High Half Register */
+#endif
+
+#define C6X_TRAMP_WORD_COUNT 8
+#define C6X_TRAMP_MAX_RELOS 8
+
+/* THIS HASH FUNCTION MUST MATCH THE ONE reloc_table_c6000.c */
+#define HASH_FUNC(zz) (((((zz) + 1) * 1845UL) >> 11) & 63)
+
+/* THIS MUST MATCH reloc_record_t FOR A SYMBOL BASED RELO */
+struct c6000_relo_record {
+ s32 vaddr;
+ s32 symndx;
+#ifndef _BIG_ENDIAN
+ u16 disp;
+ u16 type;
+#else
+ u16 type;
+ u16 disp;
+#endif
+};
+
+struct c6000_gen_code {
+ struct tramp_gen_code_hdr hdr;
+ u32 tramp_instrs[C6X_TRAMP_WORD_COUNT];
+ struct c6000_relo_record relos[C6X_TRAMP_MAX_RELOS];
+};
+
+/* Hash mapping for relos that can cause trampolines. */
+static const u16 tramp_map[] = {
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 0,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535,
+ 65535
+};
+
+static const struct c6000_gen_code tramp_gen_info[] = {
+ /* Tramp caused by R_C60PCR21 */
+ {
+ /* Header - 8 instructions, 2 relos */
+ {
+ sizeof(u32) * C6X_TRAMP_WORD_COUNT,
+ 2,
+ FIELD_OFFSET(struct c6000_gen_code, relos)
+ },
+
+ /* Trampoline instructions */
+ {
+ 0x053C54F7, /* STW.D2T2 B10, *sp--[2] */
+ 0x0500002A, /* || MVK.S2 <blank>, B10 */
+ 0x0500006A, /* MVKH.S2 <blank>, B10 */
+ 0x00280362, /* B.S2 B10 */
+ 0x053C52E6, /* LDW.D2T2 *++sp[2], B10 */
+ 0x00006000, /* NOP 4 */
+ 0x00000000, /* NOP */
+ 0x00000000 /* NOP */
+ },
+
+ /* Relocations */
+ {
+ {4, 0, 0, R_C60LO16},
+ {8, 0, 0, R_C60HI16},
+ {0, 0, 0, 0x0000},
+ {0, 0, 0, 0x0000},
+ {0, 0, 0, 0x0000},
+ {0, 0, 0, 0x0000},
+ {0, 0, 0, 0x0000},
+ {0, 0, 0, 0x0000}
+ }
+ }
+};
+
+/* TARGET SPECIFIC FUNCTIONS THAT MUST BE DEFINED */
+static u32 tramp_size_get(void)
+{
+ return sizeof(u32) * C6X_TRAMP_WORD_COUNT;
+}
+
+static u32 tramp_img_pkt_size_get(void)
+{
+ return sizeof(struct c6000_gen_code);
+}
diff --git a/drivers/staging/tidspbridge/gen/gb.c b/drivers/staging/tidspbridge/gen/gb.c
new file mode 100644
index 00000000000..06eb3d36122
--- /dev/null
+++ b/drivers/staging/tidspbridge/gen/gb.c
@@ -0,0 +1,167 @@
+/*
+ * gb.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Generic bitmap operations.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <linux/types.h>
+/* ----------------------------------- This */
+#include <dspbridge/gs.h>
+#include <dspbridge/gb.h>
+
+struct gb_t_map {
+ u32 len;
+ u32 wcnt;
+ u32 *words;
+};
+
+/*
+ * ======== gb_clear ========
+ * purpose:
+ * Clears a bit in the bit map.
+ */
+
+void gb_clear(struct gb_t_map *map, u32 bitn)
+{
+ u32 mask;
+
+ mask = 1L << (bitn % BITS_PER_LONG);
+ map->words[bitn / BITS_PER_LONG] &= ~mask;
+}
+
+/*
+ * ======== gb_create ========
+ * purpose:
+ * Creates a bit map.
+ */
+
+struct gb_t_map *gb_create(u32 len)
+{
+ struct gb_t_map *map;
+ u32 i;
+ map = (struct gb_t_map *)gs_alloc(sizeof(struct gb_t_map));
+ if (map != NULL) {
+ map->len = len;
+ map->wcnt = len / BITS_PER_LONG + 1;
+ map->words = (u32 *) gs_alloc(map->wcnt * sizeof(u32));
+ if (map->words != NULL) {
+ for (i = 0; i < map->wcnt; i++)
+ map->words[i] = 0L;
+
+ } else {
+ gs_frees(map, sizeof(struct gb_t_map));
+ map = NULL;
+ }
+ }
+
+ return map;
+}
+
+/*
+ * ======== gb_delete ========
+ * purpose:
+ * Frees a bit map.
+ */
+
+void gb_delete(struct gb_t_map *map)
+{
+ gs_frees(map->words, map->wcnt * sizeof(u32));
+ gs_frees(map, sizeof(struct gb_t_map));
+}
+
+/*
+ * ======== gb_findandset ========
+ * purpose:
+ * Finds a free bit and sets it.
+ */
+u32 gb_findandset(struct gb_t_map *map)
+{
+ u32 bitn;
+
+ bitn = gb_minclear(map);
+
+ if (bitn != GB_NOBITS)
+ gb_set(map, bitn);
+
+ return bitn;
+}
+
+/*
+ * ======== gb_minclear ========
+ * purpose:
+ * returns the location of the first unset bit in the bit map.
+ */
+u32 gb_minclear(struct gb_t_map *map)
+{
+ u32 bit_location = 0;
+ u32 bit_acc = 0;
+ u32 i;
+ u32 bit;
+ u32 *word;
+
+ for (word = map->words, i = 0; i < map->wcnt; word++, i++) {
+ if (~*word) {
+ for (bit = 0; bit < BITS_PER_LONG; bit++, bit_acc++) {
+ if (bit_acc == map->len)
+ return GB_NOBITS;
+
+ if (~*word & (1L << bit)) {
+ bit_location = i * BITS_PER_LONG + bit;
+ return bit_location;
+ }
+
+ }
+ } else {
+ bit_acc += BITS_PER_LONG;
+ }
+ }
+
+ return GB_NOBITS;
+}
+
+/*
+ * ======== gb_set ========
+ * purpose:
+ * Sets a bit in the bit map.
+ */
+
+void gb_set(struct gb_t_map *map, u32 bitn)
+{
+ u32 mask;
+
+ mask = 1L << (bitn % BITS_PER_LONG);
+ map->words[bitn / BITS_PER_LONG] |= mask;
+}
+
+/*
+ * ======== gb_test ========
+ * purpose:
+ * Returns true if the bit is set in the specified location.
+ */
+
+bool gb_test(struct gb_t_map *map, u32 bitn)
+{
+ bool state;
+ u32 mask;
+ u32 word;
+
+ mask = 1L << (bitn % BITS_PER_LONG);
+ word = map->words[bitn / BITS_PER_LONG];
+ state = word & mask ? true : false;
+
+ return state;
+}
diff --git a/drivers/staging/tidspbridge/gen/gh.c b/drivers/staging/tidspbridge/gen/gh.c
new file mode 100644
index 00000000000..f72d943c480
--- /dev/null
+++ b/drivers/staging/tidspbridge/gen/gh.c
@@ -0,0 +1,215 @@
+/*
+ * gh.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+
+#include <dspbridge/host_os.h>
+
+#include <dspbridge/gs.h>
+
+#include <dspbridge/gh.h>
+
+struct element {
+ struct element *next;
+ u8 data[1];
+};
+
+struct gh_t_hash_tab {
+ u16 max_bucket;
+ u16 val_size;
+ struct element **buckets;
+ u16(*hash) (void *, u16);
+ bool(*match) (void *, void *);
+ void (*delete) (void *);
+};
+
+static void noop(void *p);
+static s32 cur_init;
+static void myfree(void *ptr, s32 size);
+
+/*
+ * ======== gh_create ========
+ */
+
+struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
+ u16(*hash) (void *, u16), bool(*match) (void *,
+ void *),
+ void (*delete) (void *))
+{
+ struct gh_t_hash_tab *hash_tab;
+ u16 i;
+ hash_tab =
+ (struct gh_t_hash_tab *)gs_alloc(sizeof(struct gh_t_hash_tab));
+ if (hash_tab == NULL)
+ return NULL;
+ hash_tab->max_bucket = max_bucket;
+ hash_tab->val_size = val_size;
+ hash_tab->hash = hash;
+ hash_tab->match = match;
+ hash_tab->delete = delete == NULL ? noop : delete;
+
+ hash_tab->buckets = (struct element **)
+ gs_alloc(sizeof(struct element *) * max_bucket);
+ if (hash_tab->buckets == NULL) {
+ gh_delete(hash_tab);
+ return NULL;
+ }
+
+ for (i = 0; i < max_bucket; i++)
+ hash_tab->buckets[i] = NULL;
+
+ return hash_tab;
+}
+
+/*
+ * ======== gh_delete ========
+ */
+void gh_delete(struct gh_t_hash_tab *hash_tab)
+{
+ struct element *elem, *next;
+ u16 i;
+
+ if (hash_tab != NULL) {
+ if (hash_tab->buckets != NULL) {
+ for (i = 0; i < hash_tab->max_bucket; i++) {
+ for (elem = hash_tab->buckets[i]; elem != NULL;
+ elem = next) {
+ next = elem->next;
+ (*hash_tab->delete) (elem->data);
+ myfree(elem,
+ sizeof(struct element) - 1 +
+ hash_tab->val_size);
+ }
+ }
+
+ myfree(hash_tab->buckets, sizeof(struct element *)
+ * hash_tab->max_bucket);
+ }
+
+ myfree(hash_tab, sizeof(struct gh_t_hash_tab));
+ }
+}
+
+/*
+ * ======== gh_exit ========
+ */
+
+void gh_exit(void)
+{
+ if (cur_init-- == 1)
+ gs_exit();
+
+}
+
+/*
+ * ======== gh_find ========
+ */
+
+void *gh_find(struct gh_t_hash_tab *hash_tab, void *key)
+{
+ struct element *elem;
+
+ elem = hash_tab->buckets[(*hash_tab->hash) (key, hash_tab->max_bucket)];
+
+ for (; elem; elem = elem->next) {
+ if ((*hash_tab->match) (key, elem->data))
+ return elem->data;
+ }
+
+ return NULL;
+}
+
+/*
+ * ======== gh_init ========
+ */
+
+void gh_init(void)
+{
+ if (cur_init++ == 0)
+ gs_init();
+}
+
+/*
+ * ======== gh_insert ========
+ */
+
+void *gh_insert(struct gh_t_hash_tab *hash_tab, void *key, void *value)
+{
+ struct element *elem;
+ u16 i;
+ char *src, *dst;
+
+ elem = (struct element *)gs_alloc(sizeof(struct element) - 1 +
+ hash_tab->val_size);
+ if (elem != NULL) {
+
+ dst = (char *)elem->data;
+ src = (char *)value;
+ for (i = 0; i < hash_tab->val_size; i++)
+ *dst++ = *src++;
+
+ i = (*hash_tab->hash) (key, hash_tab->max_bucket);
+ elem->next = hash_tab->buckets[i];
+ hash_tab->buckets[i] = elem;
+
+ return elem->data;
+ }
+
+ return NULL;
+}
+
+/*
+ * ======== noop ========
+ */
+/* ARGSUSED */
+static void noop(void *p)
+{
+ p = p; /* stifle compiler warning */
+}
+
+/*
+ * ======== myfree ========
+ */
+static void myfree(void *ptr, s32 size)
+{
+ gs_free(ptr);
+}
+
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+/**
+ * gh_iterate() - This function goes through all the elements in the hash table
+ * looking for the dsp symbols.
+ * @hash_tab: Hash table
+ * @callback: pointer to callback function
+ * @user_data: User data, contains the find_symbol_context pointer
+ *
+ */
+void gh_iterate(struct gh_t_hash_tab *hash_tab,
+ void (*callback)(void *, void *), void *user_data)
+{
+ struct element *elem;
+ u32 i;
+
+ if (hash_tab && hash_tab->buckets)
+ for (i = 0; i < hash_tab->max_bucket; i++) {
+ elem = hash_tab->buckets[i];
+ while (elem) {
+ callback(&elem->data, user_data);
+ elem = elem->next;
+ }
+ }
+}
+#endif
diff --git a/drivers/staging/tidspbridge/gen/gs.c b/drivers/staging/tidspbridge/gen/gs.c
new file mode 100644
index 00000000000..9fc614439ba
--- /dev/null
+++ b/drivers/staging/tidspbridge/gen/gs.c
@@ -0,0 +1,89 @@
+/*
+ * gs.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * General storage memory allocator services.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+#include <linux/types.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/gs.h>
+
+#include <linux/slab.h>
+
+/* ----------------------------------- Globals */
+static u32 cumsize;
+
+/*
+ * ======== gs_alloc ========
+ * purpose:
+ * Allocates memory of the specified size.
+ */
+void *gs_alloc(u32 size)
+{
+ void *p;
+
+ p = kzalloc(size, GFP_KERNEL);
+ if (p == NULL)
+ return NULL;
+ cumsize += size;
+ return p;
+}
+
+/*
+ * ======== gs_exit ========
+ * purpose:
+ * Discontinue the usage of the GS module.
+ */
+void gs_exit(void)
+{
+ /* Do nothing */
+}
+
+/*
+ * ======== gs_free ========
+ * purpose:
+ * Frees the memory.
+ */
+void gs_free(void *ptr)
+{
+ kfree(ptr);
+ /* ack! no size info */
+ /* cumsize -= size; */
+}
+
+/*
+ * ======== gs_frees ========
+ * purpose:
+ * Frees the memory.
+ */
+void gs_frees(void *ptr, u32 size)
+{
+ kfree(ptr);
+ cumsize -= size;
+}
+
+/*
+ * ======== gs_init ========
+ * purpose:
+ * Initializes the GS module.
+ */
+void gs_init(void)
+{
+ /* Do nothing */
+}
diff --git a/drivers/staging/tidspbridge/gen/uuidutil.c b/drivers/staging/tidspbridge/gen/uuidutil.c
new file mode 100644
index 00000000000..da39c4fbf33
--- /dev/null
+++ b/drivers/staging/tidspbridge/gen/uuidutil.c
@@ -0,0 +1,113 @@
+/*
+ * uuidutil.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * This file contains the implementation of UUID helper functions.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/uuidutil.h>
+
+/*
+ * ======== uuid_uuid_to_string ========
+ * Purpose:
+ * Converts a struct dsp_uuid to a string.
+ * Note: snprintf format specifier is:
+ * %[flags] [width] [.precision] [{h | l | I64 | L}]type
+ */
+void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid,
+ s32 size)
+{
+ s32 i; /* return result from snprintf. */
+
+ DBC_REQUIRE(uuid_obj && sz_uuid);
+
+ i = snprintf(sz_uuid, size,
+ "%.8X_%.4X_%.4X_%.2X%.2X_%.2X%.2X%.2X%.2X%.2X%.2X",
+ uuid_obj->ul_data1, uuid_obj->us_data2, uuid_obj->us_data3,
+ uuid_obj->uc_data4, uuid_obj->uc_data5,
+ uuid_obj->uc_data6[0], uuid_obj->uc_data6[1],
+ uuid_obj->uc_data6[2], uuid_obj->uc_data6[3],
+ uuid_obj->uc_data6[4], uuid_obj->uc_data6[5]);
+
+ DBC_ENSURE(i != -1);
+}
+
+static s32 uuid_hex_to_bin(char *buf, s32 len)
+{
+ s32 i;
+ s32 result = 0;
+ int value;
+
+ for (i = 0; i < len; i++) {
+ value = hex_to_bin(*buf++);
+ result *= 16;
+ if (value > 0)
+ result += value;
+ }
+
+ return result;
+}
+
+/*
+ * ======== uuid_uuid_from_string ========
+ * Purpose:
+ * Converts a string to a struct dsp_uuid.
+ */
+void uuid_uuid_from_string(char *sz_uuid, struct dsp_uuid *uuid_obj)
+{
+ s32 j;
+
+ uuid_obj->ul_data1 = uuid_hex_to_bin(sz_uuid, 8);
+ sz_uuid += 8;
+
+ /* Step over underscore */
+ sz_uuid++;
+
+ uuid_obj->us_data2 = (u16) uuid_hex_to_bin(sz_uuid, 4);
+ sz_uuid += 4;
+
+ /* Step over underscore */
+ sz_uuid++;
+
+ uuid_obj->us_data3 = (u16) uuid_hex_to_bin(sz_uuid, 4);
+ sz_uuid += 4;
+
+ /* Step over underscore */
+ sz_uuid++;
+
+ uuid_obj->uc_data4 = (u8) uuid_hex_to_bin(sz_uuid, 2);
+ sz_uuid += 2;
+
+ uuid_obj->uc_data5 = (u8) uuid_hex_to_bin(sz_uuid, 2);
+ sz_uuid += 2;
+
+ /* Step over underscore */
+ sz_uuid++;
+
+ for (j = 0; j < 6; j++) {
+ uuid_obj->uc_data6[j] = (u8) uuid_hex_to_bin(sz_uuid, 2);
+ sz_uuid += 2;
+ }
+}
diff --git a/drivers/staging/tidspbridge/hw/EasiGlobal.h b/drivers/staging/tidspbridge/hw/EasiGlobal.h
new file mode 100644
index 00000000000..e48d7f67c60
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/EasiGlobal.h
@@ -0,0 +1,41 @@
+/*
+ * EasiGlobal.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _EASIGLOBAL_H
+#define _EASIGLOBAL_H
+#include <linux/types.h>
+
+/*
+ * DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE
+ *
+ * DESCRIPTION: Defines used to describe register types for EASI-checker tests.
+ */
+
+#define READ_ONLY 1
+#define WRITE_ONLY 2
+#define READ_WRITE 3
+
+/*
+ * MACRO: _DEBUG_LEVEL1_EASI
+ *
+ * DESCRIPTION: A MACRO which can be used to indicate that a particular beach
+ * register access function was called.
+ *
+ * NOTE: We currently dont use this functionality.
+ */
+#define _DEBUG_LEVEL1_EASI(easi_num) ((void)0)
+
+#endif /* _EASIGLOBAL_H */
diff --git a/drivers/staging/tidspbridge/hw/MMUAccInt.h b/drivers/staging/tidspbridge/hw/MMUAccInt.h
new file mode 100644
index 00000000000..1cefca321d7
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMUAccInt.h
@@ -0,0 +1,76 @@
+/*
+ * MMUAccInt.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _MMU_ACC_INT_H
+#define _MMU_ACC_INT_H
+
+/* Mappings of level 1 EASI function numbers to function names */
+
+#define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3)
+#define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32 (MMU_BASE_EASIL1 + 17)
+#define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32 (MMU_BASE_EASIL1 + 39)
+#define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 51)
+#define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102)
+#define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103)
+#define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156)
+#define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174)
+#define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 180)
+#define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 190)
+#define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32 (MMU_BASE_EASIL1 + 194)
+#define EASIL1_MMUMMU_TTB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 198)
+#define EASIL1_MMUMMU_LOCK_READ_REGISTER32 (MMU_BASE_EASIL1 + 203)
+#define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 204)
+#define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32 (MMU_BASE_EASIL1 + 205)
+#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209)
+#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211)
+#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32 (MMU_BASE_EASIL1 + 212)
+#define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32 (MMU_BASE_EASIL1 + 213)
+#define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 214)
+#define EASIL1_MMUMMU_CAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 226)
+#define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268)
+#define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 322)
+
+/* Register offset address definitions */
+#define MMU_MMU_SYSCONFIG_OFFSET 0x10
+#define MMU_MMU_IRQSTATUS_OFFSET 0x18
+#define MMU_MMU_IRQENABLE_OFFSET 0x1c
+#define MMU_MMU_WALKING_ST_OFFSET 0x40
+#define MMU_MMU_CNTL_OFFSET 0x44
+#define MMU_MMU_FAULT_AD_OFFSET 0x48
+#define MMU_MMU_TTB_OFFSET 0x4c
+#define MMU_MMU_LOCK_OFFSET 0x50
+#define MMU_MMU_LD_TLB_OFFSET 0x54
+#define MMU_MMU_CAM_OFFSET 0x58
+#define MMU_MMU_RAM_OFFSET 0x5c
+#define MMU_MMU_GFLUSH_OFFSET 0x60
+#define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64
+/* Bitfield mask and offset declarations */
+#define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK 0x18
+#define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET 3
+#define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK 0x1
+#define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET 0
+#define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1
+#define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET 0
+#define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4
+#define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2
+#define MMU_MMU_CNTL_MMU_ENABLE_MASK 0x2
+#define MMU_MMU_CNTL_MMU_ENABLE_OFFSET 1
+#define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00
+#define MMU_MMU_LOCK_BASE_VALUE_OFFSET 10
+#define MMU_MMU_LOCK_CURRENT_VICTIM_MASK 0x3f0
+#define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET 4
+
+#endif /* _MMU_ACC_INT_H */
diff --git a/drivers/staging/tidspbridge/hw/MMURegAcM.h b/drivers/staging/tidspbridge/hw/MMURegAcM.h
new file mode 100644
index 00000000000..ab1a16da731
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMURegAcM.h
@@ -0,0 +1,225 @@
+/*
+ * MMURegAcM.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _MMU_REG_ACM_H
+#define _MMU_REG_ACM_H
+
+#include <linux/io.h>
+#include <EasiGlobal.h>
+
+#include "MMUAccInt.h"
+
+#if defined(USE_LEVEL_1_MACROS)
+
+#define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\
+ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\
+ __raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET))
+
+#define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
+ register u32 data = __raw_readl((base_address)+offset);\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\
+ data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\
+ new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
+ new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
+ new_value |= data;\
+ __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
+ register u32 data = __raw_readl((base_address)+offset);\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\
+ data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\
+ new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
+ new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
+ new_value |= data;\
+ __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\
+ (_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\
+ __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
+
+#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\
+ __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\
+ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\
+ __raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET))
+
+#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\
+ __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\
+ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\
+ (((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\
+ & MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\
+ MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET))
+
+#define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\
+ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\
+ (((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\
+ MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\
+ MMU_MMU_CNTL_TWL_ENABLE_OFFSET))
+
+#define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_CNTL_OFFSET;\
+ register u32 data = __raw_readl((base_address)+offset);\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\
+ data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\
+ new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
+ new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
+ new_value |= data;\
+ __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_CNTL_OFFSET;\
+ register u32 data = __raw_readl((base_address)+offset);\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\
+ data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\
+ new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
+ new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
+ new_value |= data;\
+ __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\
+ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\
+ __raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET))
+
+#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_TTB_OFFSET;\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\
+ __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_LOCK_READ_REGISTER32(base_address)\
+ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\
+ __raw_readl((base_address)+MMU_MMU_LOCK_OFFSET))
+
+#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_LOCK_OFFSET;\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\
+ __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\
+ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\
+ (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
+ MMU_MMU_LOCK_BASE_VALUE_MASK) >>\
+ MMU_MMU_LOCK_BASE_VALUE_OFFSET))
+
+#define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_LOCK_OFFSET;\
+ register u32 data = __raw_readl((base_address)+offset);\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\
+ data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\
+ new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
+ new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
+ new_value |= data;\
+ __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\
+ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\
+ (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
+ MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\
+ MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET))
+
+#define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_LOCK_OFFSET;\
+ register u32 data = __raw_readl((base_address)+offset);\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\
+ data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\
+ new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
+ new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
+ new_value |= data;\
+ __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\
+ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\
+ (((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\
+ (((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\
+ MMU_MMU_LOCK_CURRENT_VICTIM_MASK)))
+
+#define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\
+ (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\
+ __raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET))
+
+#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\
+ __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_CAM_OFFSET;\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\
+ __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_RAM_OFFSET;\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\
+ __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\
+{\
+ const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
+ register u32 new_value = (value);\
+ _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\
+ __raw_writel(new_value, (base_address)+offset);\
+}
+
+#endif /* USE_LEVEL_1_MACROS */
+
+#endif /* _MMU_REG_ACM_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_defs.h b/drivers/staging/tidspbridge/hw/hw_defs.h
new file mode 100644
index 00000000000..d5266d4c163
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_defs.h
@@ -0,0 +1,58 @@
+/*
+ * hw_defs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global HW definitions
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HW_DEFS_H
+#define _HW_DEFS_H
+
+/* Page size */
+#define HW_PAGE_SIZE4KB 0x1000
+#define HW_PAGE_SIZE64KB 0x10000
+#define HW_PAGE_SIZE1MB 0x100000
+#define HW_PAGE_SIZE16MB 0x1000000
+
+/* hw_status: return type for HW API */
+typedef long hw_status;
+
+/* Macro used to set and clear any bit */
+#define HW_CLEAR 0
+#define HW_SET 1
+
+/* hw_endianism_t: Enumerated Type used to specify the endianism
+ * Do NOT change these values. They are used as bit fields. */
+enum hw_endianism_t {
+ HW_LITTLE_ENDIAN,
+ HW_BIG_ENDIAN
+};
+
+/* hw_element_size_t: Enumerated Type used to specify the element size
+ * Do NOT change these values. They are used as bit fields. */
+enum hw_element_size_t {
+ HW_ELEM_SIZE8BIT,
+ HW_ELEM_SIZE16BIT,
+ HW_ELEM_SIZE32BIT,
+ HW_ELEM_SIZE64BIT
+};
+
+/* hw_idle_mode_t: Enumerated Type used to specify Idle modes */
+enum hw_idle_mode_t {
+ HW_FORCE_IDLE,
+ HW_NO_IDLE,
+ HW_SMART_IDLE
+};
+
+#endif /* _HW_DEFS_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c
new file mode 100644
index 00000000000..014f5d5293a
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.c
@@ -0,0 +1,562 @@
+/*
+ * hw_mmu.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * API definitions to setup MMU TLB and PTE
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/io.h>
+#include "MMURegAcM.h"
+#include <hw_defs.h>
+#include <hw_mmu.h>
+#include <linux/types.h>
+#include <linux/err.h>
+
+#define MMU_BASE_VAL_MASK 0xFC00
+#define MMU_PAGE_MAX 3
+#define MMU_ELEMENTSIZE_MAX 3
+#define MMU_ADDR_MASK 0xFFFFF000
+#define MMU_TTB_MASK 0xFFFFC000
+#define MMU_SECTION_ADDR_MASK 0xFFF00000
+#define MMU_SSECTION_ADDR_MASK 0xFF000000
+#define MMU_PAGE_TABLE_MASK 0xFFFFFC00
+#define MMU_LARGE_PAGE_MASK 0xFFFF0000
+#define MMU_SMALL_PAGE_MASK 0xFFFFF000
+
+#define MMU_LOAD_TLB 0x00000001
+#define MMU_GFLUSH 0x60
+
+/*
+ * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
+ */
+enum hw_mmu_page_size_t {
+ HW_MMU_SECTION,
+ HW_MMU_LARGE_PAGE,
+ HW_MMU_SMALL_PAGE,
+ HW_MMU_SUPERSECTION
+};
+
+/*
+ * FUNCTION : mmu_flush_entry
+ *
+ * INPUTS:
+ *
+ * Identifier : base_address
+ * Type : const u32
+ * Description : Base Address of instance of MMU module
+ *
+ * RETURNS:
+ *
+ * Type : hw_status
+ * Description : 0 -- No errors occured
+ * RET_BAD_NULL_PARAM -- A Pointer
+ * Paramater was set to NULL
+ *
+ * PURPOSE: : Flush the TLB entry pointed by the
+ * lock counter register
+ * even if this entry is set protected
+ *
+ * METHOD: : Check the Input parameter and Flush a
+ * single entry in the TLB.
+ */
+static hw_status mmu_flush_entry(const void __iomem *base_address);
+
+/*
+ * FUNCTION : mmu_set_cam_entry
+ *
+ * INPUTS:
+ *
+ * Identifier : base_address
+ * TypE : const u32
+ * Description : Base Address of instance of MMU module
+ *
+ * Identifier : page_sz
+ * TypE : const u32
+ * Description : It indicates the page size
+ *
+ * Identifier : preserved_bit
+ * Type : const u32
+ * Description : It indicates the TLB entry is preserved entry
+ * or not
+ *
+ * Identifier : valid_bit
+ * Type : const u32
+ * Description : It indicates the TLB entry is valid entry or not
+ *
+ *
+ * Identifier : virtual_addr_tag
+ * Type : const u32
+ * Description : virtual Address
+ *
+ * RETURNS:
+ *
+ * Type : hw_status
+ * Description : 0 -- No errors occured
+ * RET_BAD_NULL_PARAM -- A Pointer Paramater
+ * was set to NULL
+ * RET_PARAM_OUT_OF_RANGE -- Input Parameter out
+ * of Range
+ *
+ * PURPOSE: : Set MMU_CAM reg
+ *
+ * METHOD: : Check the Input parameters and set the CAM entry.
+ */
+static hw_status mmu_set_cam_entry(const void __iomem *base_address,
+ const u32 page_sz,
+ const u32 preserved_bit,
+ const u32 valid_bit,
+ const u32 virtual_addr_tag);
+
+/*
+ * FUNCTION : mmu_set_ram_entry
+ *
+ * INPUTS:
+ *
+ * Identifier : base_address
+ * Type : const u32
+ * Description : Base Address of instance of MMU module
+ *
+ * Identifier : physical_addr
+ * Type : const u32
+ * Description : Physical Address to which the corresponding
+ * virtual Address shouldpoint
+ *
+ * Identifier : endianism
+ * Type : hw_endianism_t
+ * Description : endianism for the given page
+ *
+ * Identifier : element_size
+ * Type : hw_element_size_t
+ * Description : The element size ( 8,16, 32 or 64 bit)
+ *
+ * Identifier : mixed_size
+ * Type : hw_mmu_mixed_size_t
+ * Description : Element Size to follow CPU or TLB
+ *
+ * RETURNS:
+ *
+ * Type : hw_status
+ * Description : 0 -- No errors occured
+ * RET_BAD_NULL_PARAM -- A Pointer Paramater
+ * was set to NULL
+ * RET_PARAM_OUT_OF_RANGE -- Input Parameter
+ * out of Range
+ *
+ * PURPOSE: : Set MMU_CAM reg
+ *
+ * METHOD: : Check the Input parameters and set the RAM entry.
+ */
+static hw_status mmu_set_ram_entry(const void __iomem *base_address,
+ const u32 physical_addr,
+ enum hw_endianism_t endianism,
+ enum hw_element_size_t element_size,
+ enum hw_mmu_mixed_size_t mixed_size);
+
+/* HW FUNCTIONS */
+
+hw_status hw_mmu_enable(const void __iomem *base_address)
+{
+ hw_status status = 0;
+
+ MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
+
+ return status;
+}
+
+hw_status hw_mmu_disable(const void __iomem *base_address)
+{
+ hw_status status = 0;
+
+ MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
+
+ return status;
+}
+
+hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
+ u32 num_locked_entries)
+{
+ hw_status status = 0;
+
+ MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
+
+ return status;
+}
+
+hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
+ u32 victim_entry_num)
+{
+ hw_status status = 0;
+
+ MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
+
+ return status;
+}
+
+hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
+{
+ hw_status status = 0;
+
+ MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
+
+ return status;
+}
+
+hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
+{
+ hw_status status = 0;
+ u32 irq_reg;
+
+ irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
+
+ MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
+
+ return status;
+}
+
+hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
+{
+ hw_status status = 0;
+ u32 irq_reg;
+
+ irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
+
+ MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
+
+ return status;
+}
+
+hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
+{
+ hw_status status = 0;
+
+ *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
+
+ return status;
+}
+
+hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
+{
+ hw_status status = 0;
+
+ /* read values from register */
+ *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
+
+ return status;
+}
+
+hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
+{
+ hw_status status = 0;
+ u32 load_ttb;
+
+ load_ttb = ttb_phys_addr & ~0x7FUL;
+ /* write values to register */
+ MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
+
+ return status;
+}
+
+hw_status hw_mmu_twl_enable(const void __iomem *base_address)
+{
+ hw_status status = 0;
+
+ MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
+
+ return status;
+}
+
+hw_status hw_mmu_twl_disable(const void __iomem *base_address)
+{
+ hw_status status = 0;
+
+ MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
+
+ return status;
+}
+
+hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
+ u32 page_sz)
+{
+ hw_status status = 0;
+ u32 virtual_addr_tag;
+ enum hw_mmu_page_size_t pg_size_bits;
+
+ switch (page_sz) {
+ case HW_PAGE_SIZE4KB:
+ pg_size_bits = HW_MMU_SMALL_PAGE;
+ break;
+
+ case HW_PAGE_SIZE64KB:
+ pg_size_bits = HW_MMU_LARGE_PAGE;
+ break;
+
+ case HW_PAGE_SIZE1MB:
+ pg_size_bits = HW_MMU_SECTION;
+ break;
+
+ case HW_PAGE_SIZE16MB:
+ pg_size_bits = HW_MMU_SUPERSECTION;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Generate the 20-bit tag from virtual address */
+ virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
+
+ mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
+
+ mmu_flush_entry(base_address);
+
+ return status;
+}
+
+hw_status hw_mmu_tlb_add(const void __iomem *base_address,
+ u32 physical_addr,
+ u32 virtual_addr,
+ u32 page_sz,
+ u32 entry_num,
+ struct hw_mmu_map_attrs_t *map_attrs,
+ s8 preserved_bit, s8 valid_bit)
+{
+ hw_status status = 0;
+ u32 lock_reg;
+ u32 virtual_addr_tag;
+ enum hw_mmu_page_size_t mmu_pg_size;
+
+ /*Check the input Parameters */
+ switch (page_sz) {
+ case HW_PAGE_SIZE4KB:
+ mmu_pg_size = HW_MMU_SMALL_PAGE;
+ break;
+
+ case HW_PAGE_SIZE64KB:
+ mmu_pg_size = HW_MMU_LARGE_PAGE;
+ break;
+
+ case HW_PAGE_SIZE1MB:
+ mmu_pg_size = HW_MMU_SECTION;
+ break;
+
+ case HW_PAGE_SIZE16MB:
+ mmu_pg_size = HW_MMU_SUPERSECTION;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
+
+ /* Generate the 20-bit tag from virtual address */
+ virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
+
+ /* Write the fields in the CAM Entry Register */
+ mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
+ virtual_addr_tag);
+
+ /* Write the different fields of the RAM Entry Register */
+ /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
+ mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
+ map_attrs->element_size, map_attrs->mixed_size);
+
+ /* Update the MMU Lock Register */
+ /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
+ MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
+
+ /* Enable loading of an entry in TLB by writing 1
+ into LD_TLB_REG register */
+ MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
+
+ MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
+
+ return status;
+}
+
+hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
+ u32 physical_addr,
+ u32 virtual_addr,
+ u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
+{
+ hw_status status = 0;
+ u32 pte_addr, pte_val;
+ s32 num_entries = 1;
+
+ switch (page_sz) {
+ case HW_PAGE_SIZE4KB:
+ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+ virtual_addr &
+ MMU_SMALL_PAGE_MASK);
+ pte_val =
+ ((physical_addr & MMU_SMALL_PAGE_MASK) |
+ (map_attrs->endianism << 9) | (map_attrs->
+ element_size << 4) |
+ (map_attrs->mixed_size << 11) | 2);
+ break;
+
+ case HW_PAGE_SIZE64KB:
+ num_entries = 16;
+ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+ virtual_addr &
+ MMU_LARGE_PAGE_MASK);
+ pte_val =
+ ((physical_addr & MMU_LARGE_PAGE_MASK) |
+ (map_attrs->endianism << 9) | (map_attrs->
+ element_size << 4) |
+ (map_attrs->mixed_size << 11) | 1);
+ break;
+
+ case HW_PAGE_SIZE1MB:
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+ virtual_addr &
+ MMU_SECTION_ADDR_MASK);
+ pte_val =
+ ((((physical_addr & MMU_SECTION_ADDR_MASK) |
+ (map_attrs->endianism << 15) | (map_attrs->
+ element_size << 10) |
+ (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
+ break;
+
+ case HW_PAGE_SIZE16MB:
+ num_entries = 16;
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+ virtual_addr &
+ MMU_SSECTION_ADDR_MASK);
+ pte_val =
+ (((physical_addr & MMU_SSECTION_ADDR_MASK) |
+ (map_attrs->endianism << 15) | (map_attrs->
+ element_size << 10) |
+ (map_attrs->mixed_size << 17)
+ ) | 0x40000 | 0x2);
+ break;
+
+ case HW_MMU_COARSE_PAGE_SIZE:
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+ virtual_addr &
+ MMU_SECTION_ADDR_MASK);
+ pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ while (--num_entries >= 0)
+ ((u32 *) pte_addr)[num_entries] = pte_val;
+
+ return status;
+}
+
+hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
+{
+ hw_status status = 0;
+ u32 pte_addr;
+ s32 num_entries = 1;
+
+ switch (page_size) {
+ case HW_PAGE_SIZE4KB:
+ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+ virtual_addr &
+ MMU_SMALL_PAGE_MASK);
+ break;
+
+ case HW_PAGE_SIZE64KB:
+ num_entries = 16;
+ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+ virtual_addr &
+ MMU_LARGE_PAGE_MASK);
+ break;
+
+ case HW_PAGE_SIZE1MB:
+ case HW_MMU_COARSE_PAGE_SIZE:
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+ virtual_addr &
+ MMU_SECTION_ADDR_MASK);
+ break;
+
+ case HW_PAGE_SIZE16MB:
+ num_entries = 16;
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+ virtual_addr &
+ MMU_SSECTION_ADDR_MASK);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ while (--num_entries >= 0)
+ ((u32 *) pte_addr)[num_entries] = 0;
+
+ return status;
+}
+
+/* mmu_flush_entry */
+static hw_status mmu_flush_entry(const void __iomem *base_address)
+{
+ hw_status status = 0;
+ u32 flush_entry_data = 0x1;
+
+ /* write values to register */
+ MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
+
+ return status;
+}
+
+/* mmu_set_cam_entry */
+static hw_status mmu_set_cam_entry(const void __iomem *base_address,
+ const u32 page_sz,
+ const u32 preserved_bit,
+ const u32 valid_bit,
+ const u32 virtual_addr_tag)
+{
+ hw_status status = 0;
+ u32 mmu_cam_reg;
+
+ mmu_cam_reg = (virtual_addr_tag << 12);
+ mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
+ (preserved_bit << 3);
+
+ /* write values to register */
+ MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
+
+ return status;
+}
+
+/* mmu_set_ram_entry */
+static hw_status mmu_set_ram_entry(const void __iomem *base_address,
+ const u32 physical_addr,
+ enum hw_endianism_t endianism,
+ enum hw_element_size_t element_size,
+ enum hw_mmu_mixed_size_t mixed_size)
+{
+ hw_status status = 0;
+ u32 mmu_ram_reg;
+
+ mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
+ mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
+ (mixed_size << 6));
+
+ /* write values to register */
+ MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
+
+ return status;
+
+}
+
+void hw_mmu_tlb_flush_all(const void __iomem *base)
+{
+ __raw_writeb(1, base + MMU_GFLUSH);
+}
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.h b/drivers/staging/tidspbridge/hw/hw_mmu.h
new file mode 100644
index 00000000000..1458a2c6027
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.h
@@ -0,0 +1,163 @@
+/*
+ * hw_mmu.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * MMU types and API declarations
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HW_MMU_H
+#define _HW_MMU_H
+
+#include <linux/types.h>
+
+/* Bitmasks for interrupt sources */
+#define HW_MMU_TRANSLATION_FAULT 0x2
+#define HW_MMU_ALL_INTERRUPTS 0x1F
+
+#define HW_MMU_COARSE_PAGE_SIZE 0x400
+
+/* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow
+ CPU/TLB Element size */
+enum hw_mmu_mixed_size_t {
+ HW_MMU_TLBES,
+ HW_MMU_CPUES
+};
+
+/* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */
+struct hw_mmu_map_attrs_t {
+ enum hw_endianism_t endianism;
+ enum hw_element_size_t element_size;
+ enum hw_mmu_mixed_size_t mixed_size;
+ bool donotlockmpupage;
+};
+
+extern hw_status hw_mmu_enable(const void __iomem *base_address);
+
+extern hw_status hw_mmu_disable(const void __iomem *base_address);
+
+extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
+ u32 num_locked_entries);
+
+extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
+ u32 victim_entry_num);
+
+/* For MMU faults */
+extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
+ u32 irq_mask);
+
+extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
+ u32 irq_mask);
+
+extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
+ u32 irq_mask);
+
+extern hw_status hw_mmu_event_status(const void __iomem *base_address,
+ u32 *irq_mask);
+
+extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
+ u32 *addr);
+
+/* Set the TT base address */
+extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
+ u32 ttb_phys_addr);
+
+extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
+
+extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
+
+extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
+ u32 virtual_addr, u32 page_sz);
+
+extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
+ u32 physical_addr,
+ u32 virtual_addr,
+ u32 page_sz,
+ u32 entry_num,
+ struct hw_mmu_map_attrs_t *map_attrs,
+ s8 preserved_bit, s8 valid_bit);
+
+/* For PTEs */
+extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
+ u32 physical_addr,
+ u32 virtual_addr,
+ u32 page_sz,
+ struct hw_mmu_map_attrs_t *map_attrs);
+
+extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
+ u32 virtual_addr, u32 page_size);
+
+void hw_mmu_tlb_flush_all(const void __iomem *base);
+
+static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
+{
+ u32 pte_addr;
+ u32 va31_to20;
+
+ va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */
+ va31_to20 &= 0xFFFFFFFCUL;
+ pte_addr = l1_base + va31_to20;
+
+ return pte_addr;
+}
+
+static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
+{
+ u32 pte_addr;
+
+ pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
+
+ return pte_addr;
+}
+
+static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
+{
+ u32 pte_coarse;
+
+ pte_coarse = pte_val & 0xFFFFFC00;
+
+ return pte_coarse;
+}
+
+static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
+{
+ u32 pte_size = 0;
+
+ if ((pte_val & 0x3) == 0x1) {
+ /* Points to L2 PT */
+ pte_size = HW_MMU_COARSE_PAGE_SIZE;
+ }
+
+ if ((pte_val & 0x3) == 0x2) {
+ if (pte_val & (1 << 18))
+ pte_size = HW_PAGE_SIZE16MB;
+ else
+ pte_size = HW_PAGE_SIZE1MB;
+ }
+
+ return pte_size;
+}
+
+static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
+{
+ u32 pte_size = 0;
+
+ if (pte_val & 0x2)
+ pte_size = HW_PAGE_SIZE4KB;
+ else if (pte_val & 0x1)
+ pte_size = HW_PAGE_SIZE64KB;
+
+ return pte_size;
+}
+
+#endif /* _HW_MMU_H */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h b/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
new file mode 100644
index 00000000000..8efd1fba2f6
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
@@ -0,0 +1,181 @@
+/*
+ * _chnl_sm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Private header file defining channel manager and channel objects for
+ * a shared memory channel driver.
+ *
+ * Shared between the modules implementing the shared memory channel class
+ * library.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _CHNL_SM_
+#define _CHNL_SM_
+
+#include <dspbridge/dspapi.h>
+#include <dspbridge/dspdefs.h>
+
+#include <dspbridge/list.h>
+#include <dspbridge/ntfy.h>
+
+/*
+ * These target side symbols define the beginning and ending addresses
+ * of shared memory buffer. They are defined in the *cfg.cmd file by
+ * cdb code.
+ */
+#define CHNL_SHARED_BUFFER_BASE_SYM "_SHM_BEG"
+#define CHNL_SHARED_BUFFER_LIMIT_SYM "_SHM_END"
+#define BRIDGEINIT_BIOSGPTIMER "_BRIDGEINIT_BIOSGPTIMER"
+#define BRIDGEINIT_LOADMON_GPTIMER "_BRIDGEINIT_LOADMON_GPTIMER"
+
+#ifndef _CHNL_WORDSIZE
+#define _CHNL_WORDSIZE 4 /* default _CHNL_WORDSIZE is 2 bytes/word */
+#endif
+
+#define MAXOPPS 16
+
+/* Shared memory config options */
+#define SHM_CURROPP 0 /* Set current OPP in shm */
+#define SHM_OPPINFO 1 /* Set dsp voltage and freq table values */
+#define SHM_GETOPP 2 /* Get opp requested by DSP */
+
+struct opp_table_entry {
+ u32 voltage;
+ u32 frequency;
+ u32 min_freq;
+ u32 max_freq;
+};
+
+struct opp_struct {
+ u32 curr_opp_pt;
+ u32 num_opp_pts;
+ struct opp_table_entry opp_point[MAXOPPS];
+};
+
+/* Request to MPU */
+struct opp_rqst_struct {
+ u32 rqst_dsp_freq;
+ u32 rqst_opp_pt;
+};
+
+/* Info to MPU */
+struct load_mon_struct {
+ u32 curr_dsp_load;
+ u32 curr_dsp_freq;
+ u32 pred_dsp_load;
+ u32 pred_dsp_freq;
+};
+
+/* Structure in shared between DSP and PC for communication. */
+struct shm {
+ u32 dsp_free_mask; /* Written by DSP, read by PC. */
+ u32 host_free_mask; /* Written by PC, read by DSP */
+
+ u32 input_full; /* Input channel has unread data. */
+ u32 input_id; /* Channel for which input is available. */
+ u32 input_size; /* Size of data block (in DSP words). */
+
+ u32 output_full; /* Output channel has unread data. */
+ u32 output_id; /* Channel for which output is available. */
+ u32 output_size; /* Size of data block (in DSP words). */
+
+ u32 arg; /* Arg for Issue/Reclaim (23 bits for 55x). */
+ u32 resvd; /* Keep structure size even for 32-bit DSPs */
+
+ /* Operating Point structure */
+ struct opp_struct opp_table_struct;
+ /* Operating Point Request structure */
+ struct opp_rqst_struct opp_request;
+ /* load monitor information structure */
+ struct load_mon_struct load_mon_info;
+#ifdef CONFIG_TIDSPBRIDGE_WDT3
+ /* Flag for WDT enable/disable F/I clocks */
+ u32 wdt_setclocks;
+ u32 wdt_overflow; /* WDT overflow time */
+ char dummy[176]; /* padding to 256 byte boundary */
+#else
+ char dummy[184]; /* padding to 256 byte boundary */
+#endif
+ u32 shm_dbg_var[64]; /* shared memory debug variables */
+};
+
+ /* Channel Manager: only one created per board: */
+struct chnl_mgr {
+ /* Function interface to Bridge driver */
+ struct bridge_drv_interface *intf_fxns;
+ struct io_mgr *hio_mgr; /* IO manager */
+ /* Device this board represents */
+ struct dev_object *hdev_obj;
+
+ /* These fields initialized in bridge_chnl_create(): */
+ u32 dw_output_mask; /* Host output channels w/ full buffers */
+ u32 dw_last_output; /* Last output channel fired from DPC */
+ /* Critical section object handle */
+ spinlock_t chnl_mgr_lock;
+ u32 word_size; /* Size in bytes of DSP word */
+ u8 max_channels; /* Total number of channels */
+ u8 open_channels; /* Total number of open channels */
+ struct chnl_object **ap_channel; /* Array of channels */
+ u8 dw_type; /* Type of channel class library */
+ /* If no shm syms, return for CHNL_Open */
+ int chnl_open_status;
+};
+
+/*
+ * Channel: up to CHNL_MAXCHANNELS per board or if DSP-DMA supported then
+ * up to CHNL_MAXCHANNELS + CHNL_MAXDDMACHNLS per board.
+ */
+struct chnl_object {
+ /* Pointer back to channel manager */
+ struct chnl_mgr *chnl_mgr_obj;
+ u32 chnl_id; /* Channel id */
+ u8 dw_state; /* Current channel state */
+ s8 chnl_mode; /* Chnl mode and attributes */
+ /* Chnl I/O completion event (user mode) */
+ void *user_event;
+ /* Abstract syncronization object */
+ struct sync_object *sync_event;
+ u32 process; /* Process which created this channel */
+ u32 pcb_arg; /* Argument to use with callback */
+ struct lst_list *pio_requests; /* List of IOR's to driver */
+ s32 cio_cs; /* Number of IOC's in queue */
+ s32 cio_reqs; /* Number of IORequests in queue */
+ s32 chnl_packets; /* Initial number of free Irps */
+ /* List of IOC's from driver */
+ struct lst_list *pio_completions;
+ struct lst_list *free_packets_list; /* List of free Irps */
+ struct ntfy_object *ntfy_obj;
+ u32 bytes_moved; /* Total number of bytes transfered */
+
+ /* For DSP-DMA */
+
+ /* Type of chnl transport:CHNL_[PCPY][DDMA] */
+ u32 chnl_type;
+};
+
+/* I/O Request/completion packet: */
+struct chnl_irp {
+ struct list_head link; /* Link to next CHIRP in queue. */
+ /* Buffer to be filled/emptied. (User) */
+ u8 *host_user_buf;
+ /* Buffer to be filled/emptied. (System) */
+ u8 *host_sys_buf;
+ u32 dw_arg; /* Issue/Reclaim argument. */
+ u32 dsp_tx_addr; /* Transfer address on DSP side. */
+ u32 byte_size; /* Bytes transferred. */
+ u32 buf_size; /* Actual buffer size when allocated. */
+ u32 status; /* Status of IO completion. */
+};
+
+#endif /* _CHNL_SM_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/brddefs.h b/drivers/staging/tidspbridge/include/dspbridge/brddefs.h
new file mode 100644
index 00000000000..f80d9a5f05a
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/brddefs.h
@@ -0,0 +1,39 @@
+/*
+ * brddefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global BRD constants and types, shared between DSP API and Bridge driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef BRDDEFS_
+#define BRDDEFS_
+
+/* platform status values */
+#define BRD_STOPPED 0x0 /* No Monitor Loaded, Not running. */
+#define BRD_IDLE 0x1 /* Monitor Loaded, but suspended. */
+#define BRD_RUNNING 0x2 /* Monitor loaded, and executing. */
+#define BRD_UNKNOWN 0x3 /* Board state is indeterminate. */
+#define BRD_SYNCINIT 0x4
+#define BRD_LOADED 0x5
+#define BRD_LASTSTATE BRD_LOADED /* Set to highest legal board state. */
+#define BRD_SLEEP_TRANSITION 0x6 /* Sleep transition in progress */
+#define BRD_HIBERNATION 0x7 /* MPU initiated hibernation */
+#define BRD_RETENTION 0x8 /* Retention mode */
+#define BRD_DSP_HIBERNATION 0x9 /* DSP initiated hibernation */
+#define BRD_ERROR 0xA /* Board state is Error */
+
+/* BRD Object */
+struct brd_object;
+
+#endif /* BRDDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfg.h b/drivers/staging/tidspbridge/include/dspbridge/cfg.h
new file mode 100644
index 00000000000..05a8999070f
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/cfg.h
@@ -0,0 +1,222 @@
+/*
+ * cfg.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * PM Configuration module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef CFG_
+#define CFG_
+#include <dspbridge/host_os.h>
+#include <dspbridge/cfgdefs.h>
+
+/*
+ * ======== cfg_exit ========
+ * Purpose:
+ * Discontinue usage of the CFG module.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * cfg_init(void) was previously called.
+ * Ensures:
+ * Resources acquired in cfg_init(void) are freed.
+ */
+extern void cfg_exit(void);
+
+/*
+ * ======== cfg_get_auto_start ========
+ * Purpose:
+ * Retreive the autostart mask, if any, for this board.
+ * Parameters:
+ * dev_node_obj: Handle to the dev_node who's driver we are querying.
+ * auto_start: Ptr to location for 32 bit autostart mask.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: dev_node_obj is invalid.
+ * -ENODATA: Unable to retreive resource.
+ * Requires:
+ * CFG initialized.
+ * Ensures:
+ * 0: *auto_start contains autostart mask for this devnode.
+ */
+extern int cfg_get_auto_start(struct cfg_devnode *dev_node_obj,
+ u32 *auto_start);
+
+/*
+ * ======== cfg_get_cd_version ========
+ * Purpose:
+ * Retrieves the version of the PM Class Driver.
+ * Parameters:
+ * version: Ptr to u32 to contain version number upon return.
+ * Returns:
+ * 0: Success. version contains Class Driver version in
+ * the form: 0xAABBCCDD where AABB is Major version and
+ * CCDD is Minor.
+ * -EPERM: Failure.
+ * Requires:
+ * CFG initialized.
+ * Ensures:
+ * 0: Success.
+ * else: *version is NULL.
+ */
+extern int cfg_get_cd_version(u32 *version);
+
+/*
+ * ======== cfg_get_dev_object ========
+ * Purpose:
+ * Retrieve the Device Object handle for a given devnode.
+ * Parameters:
+ * dev_node_obj: Platform's dev_node handle from which to retrieve
+ * value.
+ * value: Ptr to location to store the value.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: dev_node_obj is invalid or device_obj is invalid.
+ * -ENODATA: The resource is not available.
+ * Requires:
+ * CFG initialized.
+ * Ensures:
+ * 0: *value is set to the retrieved u32.
+ * else: *value is set to 0L.
+ */
+extern int cfg_get_dev_object(struct cfg_devnode *dev_node_obj,
+ u32 *value);
+
+/*
+ * ======== cfg_get_exec_file ========
+ * Purpose:
+ * Retreive the default executable, if any, for this board.
+ * Parameters:
+ * dev_node_obj: Handle to the dev_node who's driver we are querying.
+ * buf_size: Size of buffer.
+ * str_exec_file: Ptr to character buf to hold ExecFile.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: dev_node_obj is invalid or str_exec_file is invalid.
+ * -ENODATA: The resource is not available.
+ * Requires:
+ * CFG initialized.
+ * Ensures:
+ * 0: Not more than buf_size bytes were copied into str_exec_file,
+ * and *str_exec_file contains default executable for this
+ * devnode.
+ */
+extern int cfg_get_exec_file(struct cfg_devnode *dev_node_obj,
+ u32 buf_size, char *str_exec_file);
+
+/*
+ * ======== cfg_get_object ========
+ * Purpose:
+ * Retrieve the Driver Object handle From the Registry
+ * Parameters:
+ * value: Ptr to location to store the value.
+ * dw_type Type of Object to Get
+ * Returns:
+ * 0: Success.
+ * Requires:
+ * CFG initialized.
+ * Ensures:
+ * 0: *value is set to the retrieved u32(non-Zero).
+ * else: *value is set to 0L.
+ */
+extern int cfg_get_object(u32 *value, u8 dw_type);
+
+/*
+ * ======== cfg_get_perf_value ========
+ * Purpose:
+ * Retrieve a flag indicating whether PERF should log statistics for the
+ * PM class driver.
+ * Parameters:
+ * enable_perf: Location to store flag. 0 indicates the key was
+ * not found, or had a zero value. A nonzero value
+ * means the key was found and had a nonzero value.
+ * Returns:
+ * Requires:
+ * enable_perf != NULL;
+ * Ensures:
+ */
+extern void cfg_get_perf_value(bool *enable_perf);
+
+/*
+ * ======== cfg_get_zl_file ========
+ * Purpose:
+ * Retreive the ZLFile, if any, for this board.
+ * Parameters:
+ * dev_node_obj: Handle to the dev_node who's driver we are querying.
+ * buf_size: Size of buffer.
+ * str_zl_file_name: Ptr to character buf to hold ZLFileName.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: str_zl_file_name is invalid or dev_node_obj is invalid.
+ * -ENODATA: couldn't find the ZLFileName.
+ * Requires:
+ * CFG initialized.
+ * Ensures:
+ * 0: Not more than buf_size bytes were copied into
+ * str_zl_file_name, and *str_zl_file_name contains ZLFileName
+ * for this devnode.
+ */
+extern int cfg_get_zl_file(struct cfg_devnode *dev_node_obj,
+ u32 buf_size, char *str_zl_file_name);
+
+/*
+ * ======== cfg_init ========
+ * Purpose:
+ * Initialize the CFG module's private state.
+ * Parameters:
+ * Returns:
+ * TRUE if initialized; FALSE if error occured.
+ * Requires:
+ * Ensures:
+ * A requirement for each of the other public CFG functions.
+ */
+extern bool cfg_init(void);
+
+/*
+ * ======== cfg_set_dev_object ========
+ * Purpose:
+ * Store the Device Object handle for a given devnode.
+ * Parameters:
+ * dev_node_obj: Platform's dev_node handle we are storing value with.
+ * value: Arbitrary value to store.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: dev_node_obj is invalid.
+ * -EPERM: Internal Error.
+ * Requires:
+ * CFG initialized.
+ * Ensures:
+ * 0: The Private u32 was successfully set.
+ */
+extern int cfg_set_dev_object(struct cfg_devnode *dev_node_obj,
+ u32 value);
+
+/*
+ * ======== CFG_SetDrvObject ========
+ * Purpose:
+ * Store the Driver Object handle.
+ * Parameters:
+ * value: Arbitrary value to store.
+ * dw_type Type of Object to Store
+ * Returns:
+ * 0: Success.
+ * -EPERM: Internal Error.
+ * Requires:
+ * CFG initialized.
+ * Ensures:
+ * 0: The Private u32 was successfully set.
+ */
+extern int cfg_set_object(u32 value, u8 dw_type);
+
+#endif /* CFG_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
new file mode 100644
index 00000000000..38122dbf877
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
@@ -0,0 +1,81 @@
+/*
+ * cfgdefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global CFG constants and types, shared between DSP API and Bridge driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef CFGDEFS_
+#define CFGDEFS_
+
+/* Maximum length of module search path. */
+#define CFG_MAXSEARCHPATHLEN 255
+
+/* Maximum length of general paths. */
+#define CFG_MAXPATH 255
+
+/* Host Resources: */
+#define CFG_MAXMEMREGISTERS 9
+#define CFG_MAXIOPORTS 20
+#define CFG_MAXIRQS 7
+#define CFG_MAXDMACHANNELS 7
+
+/* IRQ flag */
+#define CFG_IRQSHARED 0x01 /* IRQ can be shared */
+
+/* DSP Resources: */
+#define CFG_DSPMAXMEMTYPES 10
+#define CFG_DEFAULT_NUM_WINDOWS 1 /* We support only one window. */
+
+/* A platform-related device handle: */
+struct cfg_devnode;
+
+/*
+ * Host resource structure.
+ */
+struct cfg_hostres {
+ u32 num_mem_windows; /* Set to default */
+ /* This is the base.memory */
+ u32 dw_mem_base[CFG_MAXMEMREGISTERS]; /* shm virtual address */
+ u32 dw_mem_length[CFG_MAXMEMREGISTERS]; /* Length of the Base */
+ u32 dw_mem_phys[CFG_MAXMEMREGISTERS]; /* shm Physical address */
+ u8 birq_registers; /* IRQ Number */
+ u8 birq_attrib; /* IRQ Attribute */
+ u32 dw_offset_for_monitor; /* The Shared memory starts from
+ * dw_mem_base + this offset */
+ /*
+ * Info needed by NODE for allocating channels to communicate with RMS:
+ * dw_chnl_offset: Offset of RMS channels. Lower channels are
+ * reserved.
+ * dw_chnl_buf_size: Size of channel buffer to send to RMS
+ * dw_num_chnls: Total number of channels
+ * (including reserved).
+ */
+ u32 dw_chnl_offset;
+ u32 dw_chnl_buf_size;
+ u32 dw_num_chnls;
+ void __iomem *dw_per_base;
+ u32 dw_per_pm_base;
+ u32 dw_core_pm_base;
+ void __iomem *dw_dmmu_base;
+ void __iomem *dw_sys_ctrl_base;
+};
+
+struct cfg_dspmemdesc {
+ u32 mem_type; /* Type of memory. */
+ u32 ul_min; /* Minimum amount of memory of this type. */
+ u32 ul_max; /* Maximum amount of memory of this type. */
+};
+
+#endif /* CFGDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/chnl.h b/drivers/staging/tidspbridge/include/dspbridge/chnl.h
new file mode 100644
index 00000000000..8733b3b8193
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/chnl.h
@@ -0,0 +1,130 @@
+/*
+ * chnl.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP API channel interface: multiplexes data streams through the single
+ * physical link managed by a Bridge driver.
+ *
+ * See DSP API chnl.h for more details.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef CHNL_
+#define CHNL_
+
+#include <dspbridge/chnlpriv.h>
+
+/*
+ * ======== chnl_close ========
+ * Purpose:
+ * Ensures all pending I/O on this channel is cancelled, discards all
+ * queued I/O completion notifications, then frees the resources allocated
+ * for this channel, and makes the corresponding logical channel id
+ * available for subsequent use.
+ * Parameters:
+ * chnl_obj: Channel object handle.
+ * Returns:
+ * 0: Success;
+ * -EFAULT: Invalid chnl_obj.
+ * Requires:
+ * chnl_init(void) called.
+ * No thread must be blocked on this channel's I/O completion event.
+ * Ensures:
+ * 0: The I/O completion event for this channel is freed.
+ * chnl_obj is no longer valid.
+ */
+extern int chnl_close(struct chnl_object *chnl_obj);
+
+/*
+ * ======== chnl_create ========
+ * Purpose:
+ * Create a channel manager object, responsible for opening new channels
+ * and closing old ones for a given board.
+ * Parameters:
+ * channel_mgr: Location to store a channel manager object on output.
+ * hdev_obj: Handle to a device object.
+ * mgr_attrts: Channel manager attributes.
+ * mgr_attrts->max_channels: Max channels
+ * mgr_attrts->birq: Channel's I/O IRQ number.
+ * mgr_attrts->irq_shared: TRUE if the IRQ is shareable.
+ * mgr_attrts->word_size: DSP Word size in equivalent PC bytes..
+ * Returns:
+ * 0: Success;
+ * -EFAULT: hdev_obj is invalid.
+ * -EINVAL: max_channels is 0.
+ * Invalid DSP word size (must be > 0).
+ * Invalid base address for DSP communications.
+ * -ENOMEM: Insufficient memory for requested resources.
+ * -EIO: Unable to plug channel ISR for configured IRQ.
+ * -ECHRNG: This manager cannot handle this many channels.
+ * -EEXIST: Channel manager already exists for this device.
+ * Requires:
+ * chnl_init(void) called.
+ * channel_mgr != NULL.
+ * mgr_attrts != NULL.
+ * Ensures:
+ * 0: Subsequent calls to chnl_create() for the same
+ * board without an intervening call to
+ * chnl_destroy() will fail.
+ */
+extern int chnl_create(struct chnl_mgr **channel_mgr,
+ struct dev_object *hdev_obj,
+ const struct chnl_mgrattrs *mgr_attrts);
+
+/*
+ * ======== chnl_destroy ========
+ * Purpose:
+ * Close all open channels, and destroy the channel manager.
+ * Parameters:
+ * hchnl_mgr: Channel manager object.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: hchnl_mgr was invalid.
+ * Requires:
+ * chnl_init(void) called.
+ * Ensures:
+ * 0: Cancels I/O on each open channel.
+ * Closes each open channel.
+ * chnl_create may subsequently be called for the
+ * same board.
+ */
+extern int chnl_destroy(struct chnl_mgr *hchnl_mgr);
+
+/*
+ * ======== chnl_exit ========
+ * Purpose:
+ * Discontinue usage of the CHNL module.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * chnl_init(void) previously called.
+ * Ensures:
+ * Resources, if any acquired in chnl_init(void), are freed when the last
+ * client of CHNL calls chnl_exit(void).
+ */
+extern void chnl_exit(void);
+
+/*
+ * ======== chnl_init ========
+ * Purpose:
+ * Initialize the CHNL module's private state.
+ * Parameters:
+ * Returns:
+ * TRUE if initialized; FALSE if error occurred.
+ * Requires:
+ * Ensures:
+ * A requirement for each of the other public CHNL functions.
+ */
+extern bool chnl_init(void);
+
+#endif /* CHNL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/chnldefs.h b/drivers/staging/tidspbridge/include/dspbridge/chnldefs.h
new file mode 100644
index 00000000000..5bf5f6b0b7b
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/chnldefs.h
@@ -0,0 +1,66 @@
+/*
+ * chnldefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * System-wide channel objects and constants.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef CHNLDEFS_
+#define CHNLDEFS_
+
+/* Channel id option. */
+#define CHNL_PICKFREE (~0UL) /* Let manager pick a free channel. */
+
+/* Channel manager limits: */
+#define CHNL_INITIOREQS 4 /* Default # of I/O requests. */
+
+/* Channel modes */
+#define CHNL_MODETODSP 0 /* Data streaming to the DSP. */
+#define CHNL_MODEFROMDSP 1 /* Data streaming from the DSP. */
+
+/* GetIOCompletion flags */
+#define CHNL_IOCINFINITE 0xffffffff /* Wait forever for IO completion. */
+#define CHNL_IOCNOWAIT 0x0 /* Dequeue an IOC, if available. */
+
+/* IO Completion Record status: */
+#define CHNL_IOCSTATCOMPLETE 0x0000 /* IO Completed. */
+#define CHNL_IOCSTATCANCEL 0x0002 /* IO was cancelled */
+#define CHNL_IOCSTATTIMEOUT 0x0008 /* Wait for IOC timed out. */
+#define CHNL_IOCSTATEOS 0x8000 /* End Of Stream reached. */
+
+/* Macros for checking I/O Completion status: */
+#define CHNL_IS_IO_COMPLETE(ioc) (!(ioc.status & ~CHNL_IOCSTATEOS))
+#define CHNL_IS_IO_CANCELLED(ioc) (ioc.status & CHNL_IOCSTATCANCEL)
+#define CHNL_IS_TIMED_OUT(ioc) (ioc.status & CHNL_IOCSTATTIMEOUT)
+
+/* Channel attributes: */
+struct chnl_attr {
+ u32 uio_reqs; /* Max # of preallocated I/O requests. */
+ void *event_obj; /* User supplied auto-reset event object. */
+ char *pstr_event_name; /* Ptr to name of user event object. */
+ void *reserved1; /* Reserved for future use. */
+ u32 reserved2; /* Reserved for future use. */
+
+};
+
+/* I/O completion record: */
+struct chnl_ioc {
+ void *pbuf; /* Buffer to be filled/emptied. */
+ u32 byte_size; /* Bytes transferred. */
+ u32 buf_size; /* Actual buffer size in bytes */
+ u32 status; /* Status of IO completion. */
+ u32 dw_arg; /* User argument associated with pbuf. */
+};
+
+#endif /* CHNLDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h b/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h
new file mode 100644
index 00000000000..9292100b1c0
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h
@@ -0,0 +1,98 @@
+/*
+ * chnlpriv.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Private channel header shared between DSPSYS, DSPAPI and
+ * Bridge driver modules.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef CHNLPRIV_
+#define CHNLPRIV_
+
+#include <dspbridge/chnldefs.h>
+#include <dspbridge/devdefs.h>
+#include <dspbridge/sync.h>
+
+/* Channel manager limits: */
+#define CHNL_MAXCHANNELS 32 /* Max channels available per transport */
+
+/*
+ * Trans port channel Id definitions:(must match dsp-side).
+ *
+ * For CHNL_MAXCHANNELS = 16:
+ *
+ * ChnlIds:
+ * 0-15 (PCPY) - transport 0)
+ * 16-31 (DDMA) - transport 1)
+ * 32-47 (ZCPY) - transport 2)
+ */
+#define CHNL_PCPY 0 /* Proc-copy transport 0 */
+
+#define CHNL_MAXIRQ 0xff /* Arbitrarily large number. */
+
+/* The following modes are private: */
+#define CHNL_MODEUSEREVENT 0x1000 /* User provided the channel event. */
+#define CHNL_MODEMASK 0x1001
+
+/* Higher level channel states: */
+#define CHNL_STATEREADY 0 /* Channel ready for I/O. */
+#define CHNL_STATECANCEL 1 /* I/O was cancelled. */
+#define CHNL_STATEEOS 2 /* End Of Stream reached. */
+
+/* Macros for checking mode: */
+#define CHNL_IS_INPUT(mode) (mode & CHNL_MODEFROMDSP)
+#define CHNL_IS_OUTPUT(mode) (!CHNL_IS_INPUT(mode))
+
+/* Types of channel class libraries: */
+#define CHNL_TYPESM 1 /* Shared memory driver. */
+#define CHNL_TYPEBM 2 /* Bus Mastering driver. */
+
+/* Max string length of channel I/O completion event name - change if needed */
+#define CHNL_MAXEVTNAMELEN 32
+
+/* Max memory pages lockable in CHNL_PrepareBuffer() - change if needed */
+#define CHNL_MAXLOCKPAGES 64
+
+/* Channel info. */
+struct chnl_info {
+ struct chnl_mgr *hchnl_mgr; /* Owning channel manager. */
+ u32 cnhl_id; /* Channel ID. */
+ void *event_obj; /* Channel I/O completion event. */
+ /*Abstraction of I/O completion event. */
+ struct sync_object *sync_event;
+ s8 dw_mode; /* Channel mode. */
+ u8 dw_state; /* Current channel state. */
+ u32 bytes_tx; /* Total bytes transferred. */
+ u32 cio_cs; /* Number of IOCs in queue. */
+ u32 cio_reqs; /* Number of IO Requests in queue. */
+ u32 process; /* Process owning this channel. */
+};
+
+/* Channel manager info: */
+struct chnl_mgrinfo {
+ u8 dw_type; /* Type of channel class library. */
+ /* Channel handle, given the channel id. */
+ struct chnl_object *chnl_obj;
+ u8 open_channels; /* Number of open channels. */
+ u8 max_channels; /* total # of chnls supported */
+};
+
+/* Channel Manager Attrs: */
+struct chnl_mgrattrs {
+ /* Max number of channels this manager can use. */
+ u8 max_channels;
+ u32 word_size; /* DSP Word size. */
+};
+
+#endif /* CHNLPRIV_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/clk.h b/drivers/staging/tidspbridge/include/dspbridge/clk.h
new file mode 100644
index 00000000000..b2395032342
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/clk.h
@@ -0,0 +1,101 @@
+/*
+ * clk.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Provides Clock functions.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _CLK_H
+#define _CLK_H
+
+enum dsp_clk_id {
+ DSP_CLK_IVA2 = 0,
+ DSP_CLK_GPT5,
+ DSP_CLK_GPT6,
+ DSP_CLK_GPT7,
+ DSP_CLK_GPT8,
+ DSP_CLK_WDT3,
+ DSP_CLK_MCBSP1,
+ DSP_CLK_MCBSP2,
+ DSP_CLK_MCBSP3,
+ DSP_CLK_MCBSP4,
+ DSP_CLK_MCBSP5,
+ DSP_CLK_SSI,
+ DSP_CLK_NOT_DEFINED
+};
+
+/*
+ * ======== dsp_clk_exit ========
+ * Purpose:
+ * Discontinue usage of module; free resources when reference count
+ * reaches 0.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * CLK initialized.
+ * Ensures:
+ * Resources used by module are freed when cRef reaches zero.
+ */
+extern void dsp_clk_exit(void);
+
+/*
+ * ======== dsp_clk_init ========
+ * Purpose:
+ * Initializes private state of CLK module.
+ * Parameters:
+ * Returns:
+ * TRUE if initialized; FALSE if error occured.
+ * Requires:
+ * Ensures:
+ * CLK initialized.
+ */
+extern void dsp_clk_init(void);
+
+void dsp_gpt_wait_overflow(short int clk_id, unsigned int load);
+
+/*
+ * ======== dsp_clk_enable ========
+ * Purpose:
+ * Enables the clock requested.
+ * Parameters:
+ * Returns:
+ * 0: Success.
+ * -EPERM: Error occured while enabling the clock.
+ * Requires:
+ * Ensures:
+ */
+extern int dsp_clk_enable(enum dsp_clk_id clk_id);
+
+u32 dsp_clock_enable_all(u32 dsp_per_clocks);
+
+/*
+ * ======== dsp_clk_disable ========
+ * Purpose:
+ * Disables the clock requested.
+ * Parameters:
+ * Returns:
+ * 0: Success.
+ * -EPERM: Error occured while disabling the clock.
+ * Requires:
+ * Ensures:
+ */
+extern int dsp_clk_disable(enum dsp_clk_id clk_id);
+
+extern u32 dsp_clk_get_iva2_rate(void);
+
+u32 dsp_clock_disable_all(u32 dsp_per_clocks);
+
+extern void ssi_clk_prepare(bool FLAG);
+
+#endif /* _SYNC_H */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cmm.h b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
new file mode 100644
index 00000000000..a921f1b6ee7
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
@@ -0,0 +1,386 @@
+/*
+ * cmm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * The Communication Memory Management(CMM) module provides shared memory
+ * management services for DSP/BIOS Bridge data streaming and messaging.
+ * Multiple shared memory segments can be registered with CMM. Memory is
+ * coelesced back to the appropriate pool when a buffer is freed.
+ *
+ * The CMM_Xlator[xxx] functions are used for node messaging and data
+ * streaming address translation to perform zero-copy inter-processor
+ * data transfer(GPP<->DSP). A "translator" object is created for a node or
+ * stream object that contains per thread virtual address information. This
+ * translator info is used at runtime to perform SM address translation
+ * to/from the DSP address space.
+ *
+ * Notes:
+ * cmm_xlator_alloc_buf - Used by Node and Stream modules for SM address
+ * translation.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef CMM_
+#define CMM_
+
+#include <dspbridge/devdefs.h>
+
+#include <dspbridge/cmmdefs.h>
+#include <dspbridge/host_os.h>
+
+/*
+ * ======== cmm_calloc_buf ========
+ * Purpose:
+ * Allocate memory buffers that can be used for data streaming or
+ * messaging.
+ * Parameters:
+ * hcmm_mgr: Cmm Mgr handle.
+ * usize: Number of bytes to allocate.
+ * pattr: Attributes of memory to allocate.
+ * pp_buf_va: Address of where to place VA.
+ * Returns:
+ * Pointer to a zero'd block of SM memory;
+ * NULL if memory couldn't be allocated,
+ * or if byte_size == 0,
+ * Requires:
+ * Valid hcmm_mgr.
+ * CMM initialized.
+ * Ensures:
+ * The returned pointer, if not NULL, points to a valid memory block of
+ * the size requested.
+ *
+ */
+extern void *cmm_calloc_buf(struct cmm_object *hcmm_mgr,
+ u32 usize, struct cmm_attrs *pattrs,
+ void **pp_buf_va);
+
+/*
+ * ======== cmm_create ========
+ * Purpose:
+ * Create a communication memory manager object.
+ * Parameters:
+ * ph_cmm_mgr: Location to store a communication manager handle on
+ * output.
+ * hdev_obj: Handle to a device object.
+ * mgr_attrts: Comm mem manager attributes.
+ * Returns:
+ * 0: Success;
+ * -ENOMEM: Insufficient memory for requested resources.
+ * -EPERM: Failed to initialize critical sect sync object.
+ *
+ * Requires:
+ * cmm_init(void) called.
+ * ph_cmm_mgr != NULL.
+ * mgr_attrts->ul_min_block_size >= 4 bytes.
+ * Ensures:
+ *
+ */
+extern int cmm_create(struct cmm_object **ph_cmm_mgr,
+ struct dev_object *hdev_obj,
+ const struct cmm_mgrattrs *mgr_attrts);
+
+/*
+ * ======== cmm_destroy ========
+ * Purpose:
+ * Destroy the communication memory manager object.
+ * Parameters:
+ * hcmm_mgr: Cmm Mgr handle.
+ * force: Force deallocation of all cmm memory immediately if set TRUE.
+ * If FALSE, and outstanding allocations will return -EPERM
+ * status.
+ * Returns:
+ * 0: CMM object & resources deleted.
+ * -EPERM: Unable to free CMM object due to outstanding allocation.
+ * -EFAULT: Unable to free CMM due to bad handle.
+ * Requires:
+ * CMM is initialized.
+ * hcmm_mgr != NULL.
+ * Ensures:
+ * Memory resources used by Cmm Mgr are freed.
+ */
+extern int cmm_destroy(struct cmm_object *hcmm_mgr, bool force);
+
+/*
+ * ======== cmm_exit ========
+ * Purpose:
+ * Discontinue usage of module. Cleanup CMM module if CMM cRef reaches zero.
+ * Parameters:
+ * n/a
+ * Returns:
+ * n/a
+ * Requires:
+ * CMM is initialized.
+ * Ensures:
+ */
+extern void cmm_exit(void);
+
+/*
+ * ======== cmm_free_buf ========
+ * Purpose:
+ * Free the given buffer.
+ * Parameters:
+ * hcmm_mgr: Cmm Mgr handle.
+ * pbuf: Pointer to memory allocated by cmm_calloc_buf().
+ * ul_seg_id: SM segment Id used in CMM_Calloc() attrs.
+ * Set to 0 to use default segment.
+ * Returns:
+ * 0
+ * -EPERM
+ * Requires:
+ * CMM initialized.
+ * buf_pa != NULL
+ * Ensures:
+ *
+ */
+extern int cmm_free_buf(struct cmm_object *hcmm_mgr,
+ void *buf_pa, u32 ul_seg_id);
+
+/*
+ * ======== cmm_get_handle ========
+ * Purpose:
+ * Return the handle to the cmm mgr for the given device obj.
+ * Parameters:
+ * hprocessor: Handle to a Processor.
+ * ph_cmm_mgr: Location to store the shared memory mgr handle on
+ * output.
+ *
+ * Returns:
+ * 0: Cmm Mgr opaque handle returned.
+ * -EFAULT: Invalid handle.
+ * Requires:
+ * ph_cmm_mgr != NULL
+ * hdev_obj != NULL
+ * Ensures:
+ */
+extern int cmm_get_handle(void *hprocessor,
+ struct cmm_object **ph_cmm_mgr);
+
+/*
+ * ======== cmm_get_info ========
+ * Purpose:
+ * Return the current SM and VM utilization information.
+ * Parameters:
+ * hcmm_mgr: Handle to a Cmm Mgr.
+ * cmm_info_obj: Location to store the Cmm information on output.
+ *
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid handle.
+ * -EINVAL Invalid input argument.
+ * Requires:
+ * Ensures:
+ *
+ */
+extern int cmm_get_info(struct cmm_object *hcmm_mgr,
+ struct cmm_info *cmm_info_obj);
+
+/*
+ * ======== cmm_init ========
+ * Purpose:
+ * Initializes private state of CMM module.
+ * Parameters:
+ * Returns:
+ * TRUE if initialized; FALSE if error occured.
+ * Requires:
+ * Ensures:
+ * CMM initialized.
+ */
+extern bool cmm_init(void);
+
+/*
+ * ======== cmm_register_gppsm_seg ========
+ * Purpose:
+ * Register a block of SM with the CMM.
+ * Parameters:
+ * hcmm_mgr: Handle to a Cmm Mgr.
+ * lpGPPBasePA: GPP Base Physical address.
+ * ul_size: Size in GPP bytes.
+ * dsp_addr_offset GPP PA to DSP PA Offset.
+ * c_factor: Add offset if CMM_ADDTODSPPA, sub if CMM_SUBFROMDSPPA.
+ * dw_dsp_base: DSP virtual base byte address.
+ * ul_dsp_size: Size of DSP segment in bytes.
+ * sgmt_id: Address to store segment Id.
+ *
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hcmm_mgr handle.
+ * -EINVAL: Invalid input argument.
+ * -EPERM: Unable to register.
+ * - On success *sgmt_id is a valid SM segment ID.
+ * Requires:
+ * ul_size > 0
+ * sgmt_id != NULL
+ * dw_gpp_base_pa != 0
+ * c_factor = CMM_ADDTODSPPA || c_factor = CMM_SUBFROMDSPPA
+ * Ensures:
+ *
+ */
+extern int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
+ unsigned int dw_gpp_base_pa,
+ u32 ul_size,
+ u32 dsp_addr_offset,
+ s8 c_factor,
+ unsigned int dw_dsp_base,
+ u32 ul_dsp_size,
+ u32 *sgmt_id, u32 gpp_base_va);
+
+/*
+ * ======== cmm_un_register_gppsm_seg ========
+ * Purpose:
+ * Unregister the given memory segment that was previously registered
+ * by cmm_register_gppsm_seg.
+ * Parameters:
+ * hcmm_mgr: Handle to a Cmm Mgr.
+ * ul_seg_id Segment identifier returned by cmm_register_gppsm_seg.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid handle.
+ * -EINVAL: Invalid ul_seg_id.
+ * -EPERM: Unable to unregister for unknown reason.
+ * Requires:
+ * Ensures:
+ *
+ */
+extern int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
+ u32 ul_seg_id);
+
+/*
+ * ======== cmm_xlator_alloc_buf ========
+ * Purpose:
+ * Allocate the specified SM buffer and create a local memory descriptor.
+ * Place on the descriptor on the translator's HaQ (Host Alloc'd Queue).
+ * Parameters:
+ * xlator: Handle to a Xlator object.
+ * va_buf: Virtual address ptr(client context)
+ * pa_size: Size of SM memory to allocate.
+ * Returns:
+ * Ptr to valid physical address(Pa) of pa_size bytes, NULL if failed.
+ * Requires:
+ * va_buf != 0.
+ * pa_size != 0.
+ * Ensures:
+ *
+ */
+extern void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator,
+ void *va_buf, u32 pa_size);
+
+/*
+ * ======== cmm_xlator_create ========
+ * Purpose:
+ * Create a translator(xlator) object used for process specific Va<->Pa
+ * address translation. Node messaging and streams use this to perform
+ * inter-processor(GPP<->DSP) zero-copy data transfer.
+ * Parameters:
+ * xlator: Address to place handle to a new Xlator handle.
+ * hcmm_mgr: Handle to Cmm Mgr associated with this translator.
+ * xlator_attrs: Translator attributes used for the client NODE or STREAM.
+ * Returns:
+ * 0: Success.
+ * -EINVAL: Bad input Attrs.
+ * -ENOMEM: Insufficient memory(local) for requested resources.
+ * Requires:
+ * xlator != NULL
+ * hcmm_mgr != NULL
+ * xlator_attrs != NULL
+ * Ensures:
+ *
+ */
+extern int cmm_xlator_create(struct cmm_xlatorobject **xlator,
+ struct cmm_object *hcmm_mgr,
+ struct cmm_xlatorattrs *xlator_attrs);
+
+/*
+ * ======== cmm_xlator_delete ========
+ * Purpose:
+ * Delete translator resources
+ * Parameters:
+ * xlator: handle to translator.
+ * force: force = TRUE will free XLators SM buffers/dscriptrs.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Bad translator handle.
+ * -EPERM: Unable to free translator resources.
+ * Requires:
+ * refs > 0
+ * Ensures:
+ *
+ */
+extern int cmm_xlator_delete(struct cmm_xlatorobject *xlator,
+ bool force);
+
+/*
+ * ======== cmm_xlator_free_buf ========
+ * Purpose:
+ * Free SM buffer and descriptor.
+ * Does not free client process VM.
+ * Parameters:
+ * xlator: handle to translator.
+ * buf_va Virtual address of PA to free.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Bad translator handle.
+ * Requires:
+ * Ensures:
+ *
+ */
+extern int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator,
+ void *buf_va);
+
+/*
+ * ======== cmm_xlator_info ========
+ * Purpose:
+ * Set/Get process specific "translator" address info.
+ * This is used to perform fast virtaul address translation
+ * for shared memory buffers between the GPP and DSP.
+ * Parameters:
+ * xlator: handle to translator.
+ * paddr: Virtual base address of segment.
+ * ul_size: Size in bytes.
+ * segm_id: Segment identifier of SM segment(s)
+ * set_info Set xlator fields if TRUE, else return base addr
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Bad translator handle.
+ * Requires:
+ * (refs > 0)
+ * (paddr != NULL)
+ * (ul_size > 0)
+ * Ensures:
+ *
+ */
+extern int cmm_xlator_info(struct cmm_xlatorobject *xlator,
+ u8 **paddr,
+ u32 ul_size, u32 segm_id, bool set_info);
+
+/*
+ * ======== cmm_xlator_translate ========
+ * Purpose:
+ * Perform address translation VA<->PA for the specified stream or
+ * message shared memory buffer.
+ * Parameters:
+ * xlator: handle to translator.
+ * paddr address of buffer to translate.
+ * xtype Type of address xlation. CMM_PA2VA or CMM_VA2PA.
+ * Returns:
+ * Valid address on success, else NULL.
+ * Requires:
+ * refs > 0
+ * paddr != NULL
+ * xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA)
+ * Ensures:
+ *
+ */
+extern void *cmm_xlator_translate(struct cmm_xlatorobject *xlator,
+ void *paddr, enum cmm_xlatetype xtype);
+
+#endif /* CMM_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h
new file mode 100644
index 00000000000..fbff372d2f5
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h
@@ -0,0 +1,105 @@
+/*
+ * cmmdefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global MEM constants and types.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef CMMDEFS_
+#define CMMDEFS_
+
+#include <dspbridge/list.h>
+
+/* Cmm attributes used in cmm_create() */
+struct cmm_mgrattrs {
+ /* Minimum SM allocation; default 32 bytes. */
+ u32 ul_min_block_size;
+};
+
+/* Attributes for CMM_AllocBuf() & CMM_AllocDesc() */
+struct cmm_attrs {
+ u32 ul_seg_id; /* 1,2... are SM segments. 0 is not. */
+ u32 ul_alignment; /* 0,1,2,4....ul_min_block_size */
+};
+
+/*
+ * DSPPa to GPPPa Conversion Factor.
+ *
+ * For typical platforms:
+ * converted Address = PaDSP + ( c_factor * addressToConvert).
+ */
+#define CMM_SUBFROMDSPPA -1
+#define CMM_ADDTODSPPA 1
+
+#define CMM_ALLSEGMENTS 0xFFFFFF /* All SegIds */
+#define CMM_MAXGPPSEGS 1 /* Maximum # of SM segs */
+
+/*
+ * SMSEGs are SM segments the DSP allocates from.
+ *
+ * This info is used by the GPP to xlate DSP allocated PAs.
+ */
+
+struct cmm_seginfo {
+ u32 dw_seg_base_pa; /* Start Phys address of SM segment */
+ /* Total size in bytes of segment: DSP+GPP */
+ u32 ul_total_seg_size;
+ u32 dw_gpp_base_pa; /* Start Phys addr of Gpp SM seg */
+ u32 ul_gpp_size; /* Size of Gpp SM seg in bytes */
+ u32 dw_dsp_base_va; /* DSP virt base byte address */
+ u32 ul_dsp_size; /* DSP seg size in bytes */
+ /* # of current GPP allocations from this segment */
+ u32 ul_in_use_cnt;
+ u32 dw_seg_base_va; /* Start Virt address of SM seg */
+
+};
+
+/* CMM useful information */
+struct cmm_info {
+ /* # of SM segments registered with this Cmm. */
+ u32 ul_num_gppsm_segs;
+ /* Total # of allocations outstanding for CMM */
+ u32 ul_total_in_use_cnt;
+ /* Min SM block size allocation from cmm_create() */
+ u32 ul_min_block_size;
+ /* Info per registered SM segment. */
+ struct cmm_seginfo seg_info[CMM_MAXGPPSEGS];
+};
+
+/* XlatorCreate attributes */
+struct cmm_xlatorattrs {
+ u32 ul_seg_id; /* segment Id used for SM allocations */
+ u32 dw_dsp_bufs; /* # of DSP-side bufs */
+ u32 dw_dsp_buf_size; /* size of DSP-side bufs in GPP bytes */
+ /* Vm base address alloc'd in client process context */
+ void *vm_base;
+ /* dw_vm_size must be >= (dwMaxNumBufs * dwMaxSize) */
+ u32 dw_vm_size;
+};
+
+/*
+ * Cmm translation types. Use to map SM addresses to process context.
+ */
+enum cmm_xlatetype {
+ CMM_VA2PA = 0, /* Virtual to GPP physical address xlation */
+ CMM_PA2VA = 1, /* GPP Physical to virtual */
+ CMM_VA2DSPPA = 2, /* Va to DSP Pa */
+ CMM_PA2DSPPA = 3, /* GPP Pa to DSP Pa */
+ CMM_DSPPA2PA = 4, /* DSP Pa to GPP Pa */
+};
+
+struct cmm_object;
+struct cmm_xlatorobject;
+
+#endif /* CMMDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cod.h b/drivers/staging/tidspbridge/include/dspbridge/cod.h
new file mode 100644
index 00000000000..42bce2eec80
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/cod.h
@@ -0,0 +1,369 @@
+/*
+ * cod.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Code management module for DSPs. This module provides an interface
+ * interface for loading both static and dynamic code objects onto DSP
+ * systems.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef COD_
+#define COD_
+
+#include <dspbridge/dblldefs.h>
+
+#define COD_MAXPATHLENGTH 255
+#define COD_TRACEBEG "SYS_PUTCBEG"
+#define COD_TRACEEND "SYS_PUTCEND"
+#define COD_TRACECURPOS "BRIDGE_SYS_PUTC_current"
+#define COD_TRACESECT "trace"
+#define COD_TRACEBEGOLD "PUTCBEG"
+#define COD_TRACEENDOLD "PUTCEND"
+
+#define COD_NOLOAD DBLL_NOLOAD
+#define COD_SYMB DBLL_SYMB
+
+/* COD code manager handle */
+struct cod_manager;
+
+/* COD library handle */
+struct cod_libraryobj;
+
+/* COD attributes */
+struct cod_attrs {
+ u32 ul_reserved;
+};
+
+/*
+ * Function prototypes for writing memory to a DSP system, allocating
+ * and freeing DSP memory.
+ */
+typedef u32(*cod_writefxn) (void *priv_ref, u32 dsp_add,
+ void *pbuf, u32 ul_num_bytes, u32 mem_space);
+
+/*
+ * ======== cod_close ========
+ * Purpose:
+ * Close a library opened with cod_open().
+ * Parameters:
+ * lib - Library handle returned by cod_open().
+ * Returns:
+ * None.
+ * Requires:
+ * COD module initialized.
+ * valid lib.
+ * Ensures:
+ *
+ */
+extern void cod_close(struct cod_libraryobj *lib);
+
+/*
+ * ======== cod_create ========
+ * Purpose:
+ * Create an object to manage code on a DSP system. This object can be
+ * used to load an initial program image with arguments that can later
+ * be expanded with dynamically loaded object files.
+ * Symbol table information is managed by this object and can be retrieved
+ * using the cod_get_sym_value() function.
+ * Parameters:
+ * manager: created manager object
+ * str_zl_file: ZL DLL filename, of length < COD_MAXPATHLENGTH.
+ * attrs: attributes to be used by this object. A NULL value
+ * will cause default attrs to be used.
+ * Returns:
+ * 0: Success.
+ * -ESPIPE: ZL_Create failed.
+ * -ENOSYS: attrs was not NULL. We don't yet support
+ * non default values of attrs.
+ * Requires:
+ * COD module initialized.
+ * str_zl_file != NULL
+ * Ensures:
+ */
+extern int cod_create(struct cod_manager **mgr,
+ char *str_zl_file,
+ const struct cod_attrs *attrs);
+
+/*
+ * ======== cod_delete ========
+ * Purpose:
+ * Delete a code manager object.
+ * Parameters:
+ * cod_mgr_obj: handle of manager to be deleted
+ * Returns:
+ * None.
+ * Requires:
+ * COD module initialized.
+ * valid cod_mgr_obj.
+ * Ensures:
+ */
+extern void cod_delete(struct cod_manager *cod_mgr_obj);
+
+/*
+ * ======== cod_exit ========
+ * Purpose:
+ * Discontinue usage of the COD module.
+ * Parameters:
+ * None.
+ * Returns:
+ * None.
+ * Requires:
+ * COD initialized.
+ * Ensures:
+ * Resources acquired in cod_init(void) are freed.
+ */
+extern void cod_exit(void);
+
+/*
+ * ======== cod_get_base_lib ========
+ * Purpose:
+ * Get handle to the base image DBL library.
+ * Parameters:
+ * cod_mgr_obj: handle of manager to be deleted
+ * plib: location to store library handle on output.
+ * Returns:
+ * 0: Success.
+ * Requires:
+ * COD module initialized.
+ * valid cod_mgr_obj.
+ * plib != NULL.
+ * Ensures:
+ */
+extern int cod_get_base_lib(struct cod_manager *cod_mgr_obj,
+ struct dbll_library_obj **plib);
+
+/*
+ * ======== cod_get_base_name ========
+ * Purpose:
+ * Get the name of the base image DBL library.
+ * Parameters:
+ * cod_mgr_obj: handle of manager to be deleted
+ * sz_name: location to store library name on output.
+ * usize: size of name buffer.
+ * Returns:
+ * 0: Success.
+ * -EPERM: Buffer too small.
+ * Requires:
+ * COD module initialized.
+ * valid cod_mgr_obj.
+ * sz_name != NULL.
+ * Ensures:
+ */
+extern int cod_get_base_name(struct cod_manager *cod_mgr_obj,
+ char *sz_name, u32 usize);
+
+/*
+ * ======== cod_get_entry ========
+ * Purpose:
+ * Retrieve the entry point of a loaded DSP program image
+ * Parameters:
+ * cod_mgr_obj: handle of manager to be deleted
+ * entry_pt: pointer to location for entry point
+ * Returns:
+ * 0: Success.
+ * Requires:
+ * COD module initialized.
+ * valid cod_mgr_obj.
+ * entry_pt != NULL.
+ * Ensures:
+ */
+extern int cod_get_entry(struct cod_manager *cod_mgr_obj,
+ u32 *entry_pt);
+
+/*
+ * ======== cod_get_loader ========
+ * Purpose:
+ * Get handle to the DBL loader.
+ * Parameters:
+ * cod_mgr_obj: handle of manager to be deleted
+ * loader: location to store loader handle on output.
+ * Returns:
+ * 0: Success.
+ * Requires:
+ * COD module initialized.
+ * valid cod_mgr_obj.
+ * loader != NULL.
+ * Ensures:
+ */
+extern int cod_get_loader(struct cod_manager *cod_mgr_obj,
+ struct dbll_tar_obj **loader);
+
+/*
+ * ======== cod_get_section ========
+ * Purpose:
+ * Retrieve the starting address and length of a section in the COFF file
+ * given the section name.
+ * Parameters:
+ * lib Library handle returned from cod_open().
+ * str_sect: name of the section, with or without leading "."
+ * addr: Location to store address.
+ * len: Location to store length.
+ * Returns:
+ * 0: Success
+ * -ESPIPE: Symbols could not be found or have not been loaded onto
+ * the board.
+ * Requires:
+ * COD module initialized.
+ * valid cod_mgr_obj.
+ * str_sect != NULL;
+ * addr != NULL;
+ * len != NULL;
+ * Ensures:
+ * 0: *addr and *len contain the address and length of the
+ * section.
+ * else: *addr == 0 and *len == 0;
+ *
+ */
+extern int cod_get_section(struct cod_libraryobj *lib,
+ char *str_sect,
+ u32 *addr, u32 *len);
+
+/*
+ * ======== cod_get_sym_value ========
+ * Purpose:
+ * Retrieve the value for the specified symbol. The symbol is first
+ * searched for literally and then, if not found, searched for as a
+ * C symbol.
+ * Parameters:
+ * lib: library handle returned from cod_open().
+ * pstrSymbol: name of the symbol
+ * value: value of the symbol
+ * Returns:
+ * 0: Success.
+ * -ESPIPE: Symbols could not be found or have not been loaded onto
+ * the board.
+ * Requires:
+ * COD module initialized.
+ * Valid cod_mgr_obj.
+ * str_sym != NULL.
+ * pul_value != NULL.
+ * Ensures:
+ */
+extern int cod_get_sym_value(struct cod_manager *cod_mgr_obj,
+ char *str_sym, u32 * pul_value);
+
+/*
+ * ======== cod_init ========
+ * Purpose:
+ * Initialize the COD module's private state.
+ * Parameters:
+ * None.
+ * Returns:
+ * TRUE if initialized; FALSE if error occured.
+ * Requires:
+ * Ensures:
+ * A requirement for each of the other public COD functions.
+ */
+extern bool cod_init(void);
+
+/*
+ * ======== cod_load_base ========
+ * Purpose:
+ * Load the initial program image, optionally with command-line arguments,
+ * on the DSP system managed by the supplied handle. The program to be
+ * loaded must be the first element of the args array and must be a fully
+ * qualified pathname.
+ * Parameters:
+ * hmgr: manager to load the code with
+ * num_argc: number of arguments in the args array
+ * args: array of strings for arguments to DSP program
+ * write_fxn: board-specific function to write data to DSP system
+ * arb: arbitrary pointer to be passed as first arg to write_fxn
+ * envp: array of environment strings for DSP exec.
+ * Returns:
+ * 0: Success.
+ * -EBADF: Failed to open target code.
+ * Requires:
+ * COD module initialized.
+ * hmgr is valid.
+ * num_argc > 0.
+ * args != NULL.
+ * args[0] != NULL.
+ * pfn_write != NULL.
+ * Ensures:
+ */
+extern int cod_load_base(struct cod_manager *cod_mgr_obj,
+ u32 num_argc, char *args[],
+ cod_writefxn pfn_write, void *arb,
+ char *envp[]);
+
+/*
+ * ======== cod_open ========
+ * Purpose:
+ * Open a library for reading sections. Does not load or set the base.
+ * Parameters:
+ * hmgr: manager to load the code with
+ * sz_coff_path: Coff file to open.
+ * flags: COD_NOLOAD (don't load symbols) or COD_SYMB (load
+ * symbols).
+ * lib_obj: Handle returned that can be used in calls to cod_close
+ * and cod_get_section.
+ * Returns:
+ * S_OK: Success.
+ * -EBADF: Failed to open target code.
+ * Requires:
+ * COD module initialized.
+ * hmgr is valid.
+ * flags == COD_NOLOAD || flags == COD_SYMB.
+ * sz_coff_path != NULL.
+ * Ensures:
+ */
+extern int cod_open(struct cod_manager *hmgr,
+ char *sz_coff_path,
+ u32 flags, struct cod_libraryobj **lib_obj);
+
+/*
+ * ======== cod_open_base ========
+ * Purpose:
+ * Open base image for reading sections. Does not load the base.
+ * Parameters:
+ * hmgr: manager to load the code with
+ * sz_coff_path: Coff file to open.
+ * flags: Specifies whether to load symbols.
+ * Returns:
+ * 0: Success.
+ * -EBADF: Failed to open target code.
+ * Requires:
+ * COD module initialized.
+ * hmgr is valid.
+ * sz_coff_path != NULL.
+ * Ensures:
+ */
+extern int cod_open_base(struct cod_manager *hmgr, char *sz_coff_path,
+ dbll_flags flags);
+
+/*
+ * ======== cod_read_section ========
+ * Purpose:
+ * Retrieve the content of a code section given the section name.
+ * Parameters:
+ * cod_mgr_obj - manager in which to search for the symbol
+ * str_sect - name of the section, with or without leading "."
+ * str_content - buffer to store content of the section.
+ * Returns:
+ * 0: on success, error code on failure
+ * -ESPIPE: Symbols have not been loaded onto the board.
+ * Requires:
+ * COD module initialized.
+ * valid cod_mgr_obj.
+ * str_sect != NULL;
+ * str_content != NULL;
+ * Ensures:
+ * 0: *str_content stores the content of the named section.
+ */
+extern int cod_read_section(struct cod_libraryobj *lib,
+ char *str_sect,
+ char *str_content, u32 content_size);
+
+#endif /* COD_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbc.h b/drivers/staging/tidspbridge/include/dspbridge/dbc.h
new file mode 100644
index 00000000000..463760f499a
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dbc.h
@@ -0,0 +1,46 @@
+/*
+ * dbc.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * "Design by Contract" programming macros.
+ *
+ * Notes:
+ * Requires that the GT->ERROR function has been defaulted to a valid
+ * error handler for the given execution environment.
+ *
+ * Does not require that GT_init() be called.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DBC_
+#define DBC_
+
+/* Assertion Macros: */
+#ifdef CONFIG_TIDSPBRIDGE_DEBUG
+
+#define DBC_ASSERT(exp) \
+ if (!(exp)) \
+ pr_err("%s, line %d: Assertion (" #exp ") failed.\n", \
+ __FILE__, __LINE__)
+#define DBC_REQUIRE DBC_ASSERT /* Function Precondition. */
+#define DBC_ENSURE DBC_ASSERT /* Function Postcondition. */
+
+#else
+
+#define DBC_ASSERT(exp) {}
+#define DBC_REQUIRE(exp) {}
+#define DBC_ENSURE(exp) {}
+
+#endif /* DEBUG */
+
+#endif /* DBC_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbdcd.h b/drivers/staging/tidspbridge/include/dspbridge/dbdcd.h
new file mode 100644
index 00000000000..7cc3e12686e
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dbdcd.h
@@ -0,0 +1,358 @@
+/*
+ * dbdcd.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Defines the DSP/BIOS Bridge Configuration Database (DCD) API.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DBDCD_
+#define DBDCD_
+
+#include <dspbridge/dbdcddef.h>
+#include <dspbridge/host_os.h>
+#include <dspbridge/nldrdefs.h>
+
+/*
+ * ======== dcd_auto_register ========
+ * Purpose:
+ * This function automatically registers DCD objects specified in a
+ * special COFF section called ".dcd_register"
+ * Parameters:
+ * hdcd_mgr: A DCD manager handle.
+ * sz_coff_path: Pointer to name of COFF file containing DCD
+ * objects to be registered.
+ * Returns:
+ * 0: Success.
+ * -EACCES: Unable to find auto-registration/read/load section.
+ * -EFAULT: Invalid DCD_HMANAGER handle..
+ * Requires:
+ * DCD initialized.
+ * Ensures:
+ * Note:
+ * Due to the DCD database construction, it is essential for a DCD-enabled
+ * COFF file to contain the right COFF sections, especially
+ * ".dcd_register", which is used for auto registration.
+ */
+extern int dcd_auto_register(struct dcd_manager *hdcd_mgr,
+ char *sz_coff_path);
+
+/*
+ * ======== dcd_auto_unregister ========
+ * Purpose:
+ * This function automatically unregisters DCD objects specified in a
+ * special COFF section called ".dcd_register"
+ * Parameters:
+ * hdcd_mgr: A DCD manager handle.
+ * sz_coff_path: Pointer to name of COFF file containing
+ * DCD objects to be unregistered.
+ * Returns:
+ * 0: Success.
+ * -EACCES: Unable to find auto-registration/read/load section.
+ * -EFAULT: Invalid DCD_HMANAGER handle..
+ * Requires:
+ * DCD initialized.
+ * Ensures:
+ * Note:
+ * Due to the DCD database construction, it is essential for a DCD-enabled
+ * COFF file to contain the right COFF sections, especially
+ * ".dcd_register", which is used for auto unregistration.
+ */
+extern int dcd_auto_unregister(struct dcd_manager *hdcd_mgr,
+ char *sz_coff_path);
+
+/*
+ * ======== dcd_create_manager ========
+ * Purpose:
+ * This function creates a DCD module manager.
+ * Parameters:
+ * sz_zl_dll_name: Pointer to a DLL name string.
+ * dcd_mgr: A pointer to a DCD manager handle.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Unable to allocate memory for DCD manager handle.
+ * -EPERM: General failure.
+ * Requires:
+ * DCD initialized.
+ * sz_zl_dll_name is non-NULL.
+ * dcd_mgr is non-NULL.
+ * Ensures:
+ * A DCD manager handle is created.
+ */
+extern int dcd_create_manager(char *sz_zl_dll_name,
+ struct dcd_manager **dcd_mgr);
+
+/*
+ * ======== dcd_destroy_manager ========
+ * Purpose:
+ * This function destroys a DCD module manager.
+ * Parameters:
+ * hdcd_mgr: A DCD manager handle.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid DCD manager handle.
+ * Requires:
+ * DCD initialized.
+ * Ensures:
+ */
+extern int dcd_destroy_manager(struct dcd_manager *hdcd_mgr);
+
+/*
+ * ======== dcd_enumerate_object ========
+ * Purpose:
+ * This function enumerates currently visible DSP/BIOS Bridge objects
+ * and returns the UUID and type of each enumerated object.
+ * Parameters:
+ * index: The object enumeration index.
+ * obj_type: Type of object to enumerate.
+ * uuid_obj: Pointer to a dsp_uuid object.
+ * Returns:
+ * 0: Success.
+ * -EPERM: Unable to enumerate through the DCD database.
+ * ENODATA: Enumeration completed. This is not an error code.
+ * Requires:
+ * DCD initialized.
+ * uuid_obj is a valid pointer.
+ * Ensures:
+ * Details:
+ * This function can be used in conjunction with dcd_get_object_def to
+ * retrieve object properties.
+ */
+extern int dcd_enumerate_object(s32 index,
+ enum dsp_dcdobjtype obj_type,
+ struct dsp_uuid *uuid_obj);
+
+/*
+ * ======== dcd_exit ========
+ * Purpose:
+ * This function cleans up the DCD module.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * DCD initialized.
+ * Ensures:
+ */
+extern void dcd_exit(void);
+
+/*
+ * ======== dcd_get_dep_libs ========
+ * Purpose:
+ * Given the uuid of a library and size of array of uuids, this function
+ * fills the array with the uuids of all dependent libraries of the input
+ * library.
+ * Parameters:
+ * hdcd_mgr: A DCD manager handle.
+ * uuid_obj: Pointer to a dsp_uuid for a library.
+ * num_libs: Size of uuid array (number of library uuids).
+ * dep_lib_uuids: Array of dependent library uuids to be filled in.
+ * prstnt_dep_libs: Array indicating if corresponding lib is persistent.
+ * phase: phase to obtain correct input library
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Memory allocation failure.
+ * -EACCES: Failure to read section containing library info.
+ * -EPERM: General failure.
+ * Requires:
+ * DCD initialized.
+ * Valid hdcd_mgr.
+ * uuid_obj != NULL
+ * dep_lib_uuids != NULL.
+ * Ensures:
+ */
+extern int dcd_get_dep_libs(struct dcd_manager *hdcd_mgr,
+ struct dsp_uuid *uuid_obj,
+ u16 num_libs,
+ struct dsp_uuid *dep_lib_uuids,
+ bool *prstnt_dep_libs,
+ enum nldr_phase phase);
+
+/*
+ * ======== dcd_get_num_dep_libs ========
+ * Purpose:
+ * Given the uuid of a library, determine its number of dependent
+ * libraries.
+ * Parameters:
+ * hdcd_mgr: A DCD manager handle.
+ * uuid_obj: Pointer to a dsp_uuid for a library.
+ * num_libs: Size of uuid array (number of library uuids).
+ * num_pers_libs: number of persistent dependent library.
+ * phase: Phase to obtain correct input library
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Memory allocation failure.
+ * -EACCES: Failure to read section containing library info.
+ * -EPERM: General failure.
+ * Requires:
+ * DCD initialized.
+ * Valid hdcd_mgr.
+ * uuid_obj != NULL
+ * num_libs != NULL.
+ * Ensures:
+ */
+extern int dcd_get_num_dep_libs(struct dcd_manager *hdcd_mgr,
+ struct dsp_uuid *uuid_obj,
+ u16 *num_libs,
+ u16 *num_pers_libs,
+ enum nldr_phase phase);
+
+/*
+ * ======== dcd_get_library_name ========
+ * Purpose:
+ * This function returns the name of a (dynamic) library for a given
+ * UUID.
+ * Parameters:
+ * hdcd_mgr: A DCD manager handle.
+ * uuid_obj: Pointer to a dsp_uuid that represents a unique DSP/BIOS
+ * Bridge object.
+ * str_lib_name: Buffer to hold library name.
+ * buff_size: Contains buffer size. Set to string size on output.
+ * phase: Which phase to load
+ * phase_split: Are phases in multiple libraries
+ * Returns:
+ * 0: Success.
+ * -EPERM: General failure.
+ * Requires:
+ * DCD initialized.
+ * Valid hdcd_mgr.
+ * str_lib_name != NULL.
+ * uuid_obj != NULL
+ * buff_size != NULL.
+ * Ensures:
+ */
+extern int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
+ struct dsp_uuid *uuid_obj,
+ char *str_lib_name,
+ u32 *buff_size,
+ enum nldr_phase phase,
+ bool *phase_split);
+
+/*
+ * ======== dcd_get_object_def ========
+ * Purpose:
+ * This function returns the properties/attributes of a DSP/BIOS Bridge
+ * object.
+ * Parameters:
+ * hdcd_mgr: A DCD manager handle.
+ * uuid_obj: Pointer to a dsp_uuid that represents a unique
+ * DSP/BIOS Bridge object.
+ * obj_type: The type of DSP/BIOS Bridge object to be
+ * referenced (node, processor, etc).
+ * obj_def: Pointer to an object definition structure. A
+ * union of various possible DCD object types.
+ * Returns:
+ * 0: Success.
+ * -EACCES: Unable to access/read/parse/load content of object code
+ * section.
+ * -EPERM: General failure.
+ * -EFAULT: Invalid DCD_HMANAGER handle.
+ * Requires:
+ * DCD initialized.
+ * obj_uuid is non-NULL.
+ * obj_def is non-NULL.
+ * Ensures:
+ */
+extern int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
+ struct dsp_uuid *obj_uuid,
+ enum dsp_dcdobjtype obj_type,
+ struct dcd_genericobj *obj_def);
+
+/*
+ * ======== dcd_get_objects ========
+ * Purpose:
+ * This function finds all DCD objects specified in a special
+ * COFF section called ".dcd_register", and for each object,
+ * call a "register" function. The "register" function may perform
+ * various actions, such as 1) register nodes in the node database, 2)
+ * unregister nodes from the node database, and 3) add overlay nodes.
+ * Parameters:
+ * hdcd_mgr: A DCD manager handle.
+ * sz_coff_path: Pointer to name of COFF file containing DCD
+ * objects.
+ * register_fxn: Callback fxn to be applied on each located
+ * DCD object.
+ * handle: Handle to pass to callback.
+ * Returns:
+ * 0: Success.
+ * -EACCES: Unable to access/read/parse/load content of object code
+ * section.
+ * -EFAULT: Invalid DCD_HMANAGER handle..
+ * Requires:
+ * DCD initialized.
+ * Ensures:
+ * Note:
+ * Due to the DCD database construction, it is essential for a DCD-enabled
+ * COFF file to contain the right COFF sections, especially
+ * ".dcd_register", which is used for auto registration.
+ */
+extern int dcd_get_objects(struct dcd_manager *hdcd_mgr,
+ char *sz_coff_path,
+ dcd_registerfxn register_fxn, void *handle);
+
+/*
+ * ======== dcd_init ========
+ * Purpose:
+ * This function initializes DCD.
+ * Parameters:
+ * Returns:
+ * FALSE: Initialization failed.
+ * TRUE: Initialization succeeded.
+ * Requires:
+ * Ensures:
+ * DCD initialized.
+ */
+extern bool dcd_init(void);
+
+/*
+ * ======== dcd_register_object ========
+ * Purpose:
+ * This function registers a DSP/BIOS Bridge object in the DCD database.
+ * Parameters:
+ * uuid_obj: Pointer to a dsp_uuid that identifies a DSP/BIOS
+ * Bridge object.
+ * obj_type: Type of object.
+ * psz_path_name: Path to the object's COFF file.
+ * Returns:
+ * 0: Success.
+ * -EPERM: Failed to register object.
+ * Requires:
+ * DCD initialized.
+ * uuid_obj and szPathName are non-NULL values.
+ * obj_type is a valid type value.
+ * Ensures:
+ */
+extern int dcd_register_object(struct dsp_uuid *uuid_obj,
+ enum dsp_dcdobjtype obj_type,
+ char *psz_path_name);
+
+/*
+ * ======== dcd_unregister_object ========
+ * Purpose:
+ * This function de-registers a valid DSP/BIOS Bridge object from the DCD
+ * database.
+ * Parameters:
+ * uuid_obj: Pointer to a dsp_uuid that identifies a DSP/BIOS Bridge
+ * object.
+ * obj_type: Type of object.
+ * Returns:
+ * 0: Success.
+ * -EPERM: Unable to de-register the specified object.
+ * Requires:
+ * DCD initialized.
+ * uuid_obj is a non-NULL value.
+ * obj_type is a valid type value.
+ * Ensures:
+ */
+extern int dcd_unregister_object(struct dsp_uuid *uuid_obj,
+ enum dsp_dcdobjtype obj_type);
+
+#endif /* _DBDCD_H */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h b/drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h
new file mode 100644
index 00000000000..1daa4b57b73
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h
@@ -0,0 +1,78 @@
+/*
+ * dbdcddef.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DCD (DSP/BIOS Bridge Configuration Database) constants and types.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DBDCDDEF_
+#define DBDCDDEF_
+
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/mgrpriv.h> /* for mgr_processorextinfo */
+
+/*
+ * The following defines are critical elements for the DCD module:
+ *
+ * - DCD_REGKEY enables DCD functions to locate registered DCD objects.
+ * - DCD_REGISTER_SECTION identifies the COFF section where the UUID of
+ * registered DCD objects are stored.
+ */
+#define DCD_REGKEY "Software\\TexasInstruments\\DspBridge\\DCD"
+#define DCD_REGISTER_SECTION ".dcd_register"
+
+#define DCD_MAXPATHLENGTH 255
+
+/* DCD Manager Object */
+struct dcd_manager;
+
+struct dcd_key_elem {
+ struct list_head link; /* Make it linked to a list */
+ char name[DCD_MAXPATHLENGTH]; /* Name of a given value entry */
+ char *path; /* Pointer to the actual data */
+};
+
+/* DCD Node Properties */
+struct dcd_nodeprops {
+ struct dsp_ndbprops ndb_props;
+ u32 msg_segid;
+ u32 msg_notify_type;
+ char *pstr_create_phase_fxn;
+ char *pstr_delete_phase_fxn;
+ char *pstr_execute_phase_fxn;
+ char *pstr_i_alg_name;
+
+ /* Dynamic load properties */
+ u16 us_load_type; /* Static, dynamic, overlay */
+ u32 ul_data_mem_seg_mask; /* Data memory requirements */
+ u32 ul_code_mem_seg_mask; /* Code memory requirements */
+};
+
+/* DCD Generic Object Type */
+struct dcd_genericobj {
+ union dcd_obj {
+ struct dcd_nodeprops node_obj; /* node object. */
+ /* processor object. */
+ struct dsp_processorinfo proc_info;
+ /* extended proc object (private) */
+ struct mgr_processorextinfo ext_proc_obj;
+ } obj_data;
+};
+
+/* DCD Internal Callback Type */
+typedef int(*dcd_registerfxn) (struct dsp_uuid *uuid_obj,
+ enum dsp_dcdobjtype obj_type,
+ void *handle);
+
+#endif /* DBDCDDEF_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dbdefs.h
new file mode 100644
index 00000000000..5af075def87
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dbdefs.h
@@ -0,0 +1,514 @@
+/*
+ * dbdefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global definitions and constants for DSP/BIOS Bridge.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DBDEFS_
+#define DBDEFS_
+
+#include <linux/types.h>
+
+#include <dspbridge/rms_sh.h> /* Types shared between GPP and DSP */
+
+#define PG_SIZE4K 4096
+#define PG_MASK(pg_size) (~((pg_size)-1))
+#define PG_ALIGN_LOW(addr, pg_size) ((addr) & PG_MASK(pg_size))
+#define PG_ALIGN_HIGH(addr, pg_size) (((addr)+(pg_size)-1) & PG_MASK(pg_size))
+
+/* API return value and calling convention */
+#define DBAPI int
+
+/* Infinite time value for the utimeout parameter to DSPStream_Select() */
+#define DSP_FOREVER (-1)
+
+/* Maximum length of node name, used in dsp_ndbprops */
+#define DSP_MAXNAMELEN 32
+
+/* notify_type values for the RegisterNotify() functions. */
+#define DSP_SIGNALEVENT 0x00000001
+
+/* Types of events for processors */
+#define DSP_PROCESSORSTATECHANGE 0x00000001
+#define DSP_PROCESSORATTACH 0x00000002
+#define DSP_PROCESSORDETACH 0x00000004
+#define DSP_PROCESSORRESTART 0x00000008
+
+/* DSP exception events (DSP/BIOS and DSP MMU fault) */
+#define DSP_MMUFAULT 0x00000010
+#define DSP_SYSERROR 0x00000020
+#define DSP_EXCEPTIONABORT 0x00000300
+#define DSP_PWRERROR 0x00000080
+#define DSP_WDTOVERFLOW 0x00000040
+
+/* IVA exception events (IVA MMU fault) */
+#define IVA_MMUFAULT 0x00000040
+/* Types of events for nodes */
+#define DSP_NODESTATECHANGE 0x00000100
+#define DSP_NODEMESSAGEREADY 0x00000200
+
+/* Types of events for streams */
+#define DSP_STREAMDONE 0x00001000
+#define DSP_STREAMIOCOMPLETION 0x00002000
+
+/* Handle definition representing the GPP node in DSPNode_Connect() calls */
+#define DSP_HGPPNODE 0xFFFFFFFF
+
+/* Node directions used in DSPNode_Connect() */
+#define DSP_TONODE 1
+#define DSP_FROMNODE 2
+
+/* Define Node Minimum and Maximum Priorities */
+#define DSP_NODE_MIN_PRIORITY 1
+#define DSP_NODE_MAX_PRIORITY 15
+
+/* Pre-Defined Message Command Codes available to user: */
+#define DSP_RMSUSERCODESTART RMS_USER /* Start of RMS user cmd codes */
+/* end of user codes */
+#define DSP_RMSUSERCODEEND (RMS_USER + RMS_MAXUSERCODES);
+/* msg_ctrl contains SM buffer description */
+#define DSP_RMSBUFDESC RMS_BUFDESC
+
+/* Shared memory identifier for MEM segment named "SHMSEG0" */
+#define DSP_SHMSEG0 (u32)(-1)
+
+/* Processor ID numbers */
+#define DSP_UNIT 0
+#define IVA_UNIT 1
+
+#define DSPWORD unsigned char
+#define DSPWORDSIZE sizeof(DSPWORD)
+
+/* Power control enumerations */
+#define PROC_PWRCONTROL 0x8070
+
+#define PROC_PWRMGT_ENABLE (PROC_PWRCONTROL + 0x3)
+#define PROC_PWRMGT_DISABLE (PROC_PWRCONTROL + 0x4)
+
+/* Bridge Code Version */
+#define BRIDGE_VERSION_CODE 333
+
+#define MAX_PROFILES 16
+
+/* DSP chip type */
+#define DSPTYPE64 0x99
+
+/* Handy Macros */
+#define VALID_PROC_EVENT (DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH | \
+ DSP_PROCESSORDETACH | DSP_PROCESSORRESTART | DSP_NODESTATECHANGE | \
+ DSP_STREAMDONE | DSP_STREAMIOCOMPLETION | DSP_MMUFAULT | \
+ DSP_SYSERROR | DSP_WDTOVERFLOW | DSP_PWRERROR)
+
+static inline bool is_valid_proc_event(u32 x)
+{
+ return (x == 0 || (x & VALID_PROC_EVENT && !(x & ~VALID_PROC_EVENT)));
+}
+
+/* The Node UUID structure */
+struct dsp_uuid {
+ u32 ul_data1;
+ u16 us_data2;
+ u16 us_data3;
+ u8 uc_data4;
+ u8 uc_data5;
+ u8 uc_data6[6];
+};
+
+/* DCD types */
+enum dsp_dcdobjtype {
+ DSP_DCDNODETYPE,
+ DSP_DCDPROCESSORTYPE,
+ DSP_DCDLIBRARYTYPE,
+ DSP_DCDCREATELIBTYPE,
+ DSP_DCDEXECUTELIBTYPE,
+ DSP_DCDDELETELIBTYPE,
+ /* DSP_DCDMAXOBJTYPE is meant to be the last DCD object type */
+ DSP_DCDMAXOBJTYPE
+};
+
+/* Processor states */
+enum dsp_procstate {
+ PROC_STOPPED,
+ PROC_LOADED,
+ PROC_RUNNING,
+ PROC_ERROR
+};
+
+/*
+ * Node types: Message node, task node, xDAIS socket node, and
+ * device node. _NODE_GPP is used when defining a stream connection
+ * between a task or socket node and the GPP.
+ *
+ */
+enum node_type {
+ NODE_DEVICE,
+ NODE_TASK,
+ NODE_DAISSOCKET,
+ NODE_MESSAGE,
+ NODE_GPP
+};
+
+/*
+ * ======== node_state ========
+ * Internal node states.
+ */
+enum node_state {
+ NODE_ALLOCATED,
+ NODE_CREATED,
+ NODE_RUNNING,
+ NODE_PAUSED,
+ NODE_DONE,
+ NODE_CREATING,
+ NODE_STARTING,
+ NODE_PAUSING,
+ NODE_TERMINATING,
+ NODE_DELETING,
+};
+
+/* Stream states */
+enum dsp_streamstate {
+ STREAM_IDLE,
+ STREAM_READY,
+ STREAM_PENDING,
+ STREAM_DONE
+};
+
+/* Stream connect types */
+enum dsp_connecttype {
+ CONNECTTYPE_NODEOUTPUT,
+ CONNECTTYPE_GPPOUTPUT,
+ CONNECTTYPE_NODEINPUT,
+ CONNECTTYPE_GPPINPUT
+};
+
+/* Stream mode types */
+enum dsp_strmmode {
+ STRMMODE_PROCCOPY, /* Processor(s) copy stream data payloads */
+ STRMMODE_ZEROCOPY, /* Strm buffer ptrs swapped no data copied */
+ STRMMODE_LDMA, /* Local DMA : OMAP's System-DMA device */
+ STRMMODE_RDMA /* Remote DMA: OMAP's DSP-DMA device */
+};
+
+/* Resource Types */
+enum dsp_resourceinfotype {
+ DSP_RESOURCE_DYNDARAM = 0,
+ DSP_RESOURCE_DYNSARAM,
+ DSP_RESOURCE_DYNEXTERNAL,
+ DSP_RESOURCE_DYNSRAM,
+ DSP_RESOURCE_PROCLOAD
+};
+
+/* Memory Segment Types */
+enum dsp_memtype {
+ DSP_DYNDARAM = 0,
+ DSP_DYNSARAM,
+ DSP_DYNEXTERNAL,
+ DSP_DYNSRAM
+};
+
+/* Memory Flush Types */
+enum dsp_flushtype {
+ PROC_INVALIDATE_MEM = 0,
+ PROC_WRITEBACK_MEM,
+ PROC_WRITEBACK_INVALIDATE_MEM,
+};
+
+/* Memory Segment Status Values */
+struct dsp_memstat {
+ u32 ul_size;
+ u32 ul_total_free_size;
+ u32 ul_len_max_free_block;
+ u32 ul_num_free_blocks;
+ u32 ul_num_alloc_blocks;
+};
+
+/* Processor Load information Values */
+struct dsp_procloadstat {
+ u32 curr_load;
+ u32 predicted_load;
+ u32 curr_dsp_freq;
+ u32 predicted_freq;
+};
+
+/* Attributes for STRM connections between nodes */
+struct dsp_strmattr {
+ u32 seg_id; /* Memory segment on DSP to allocate buffers */
+ u32 buf_size; /* Buffer size (DSP words) */
+ u32 num_bufs; /* Number of buffers */
+ u32 buf_alignment; /* Buffer alignment */
+ u32 utimeout; /* Timeout for blocking STRM calls */
+ enum dsp_strmmode strm_mode; /* mode of stream when opened */
+ /* DMA chnl id if dsp_strmmode is LDMA or RDMA */
+ u32 udma_chnl_id;
+ u32 udma_priority; /* DMA channel priority 0=lowest, >0=high */
+};
+
+/* The dsp_cbdata structure */
+struct dsp_cbdata {
+ u32 cb_data;
+ u8 node_data[1];
+};
+
+/* The dsp_msg structure */
+struct dsp_msg {
+ u32 dw_cmd;
+ u32 dw_arg1;
+ u32 dw_arg2;
+};
+
+/* The dsp_resourcereqmts structure for node's resource requirements */
+struct dsp_resourcereqmts {
+ u32 cb_struct;
+ u32 static_data_size;
+ u32 global_data_size;
+ u32 program_mem_size;
+ u32 uwc_execution_time;
+ u32 uwc_period;
+ u32 uwc_deadline;
+ u32 avg_exection_time;
+ u32 minimum_period;
+};
+
+/*
+ * The dsp_streamconnect structure describes a stream connection
+ * between two nodes, or between a node and the GPP
+ */
+struct dsp_streamconnect {
+ u32 cb_struct;
+ enum dsp_connecttype connect_type;
+ u32 this_node_stream_index;
+ void *connected_node;
+ struct dsp_uuid ui_connected_node_id;
+ u32 connected_node_stream_index;
+};
+
+struct dsp_nodeprofs {
+ u32 ul_heap_size;
+};
+
+/* The dsp_ndbprops structure reports the attributes of a node */
+struct dsp_ndbprops {
+ u32 cb_struct;
+ struct dsp_uuid ui_node_id;
+ char ac_name[DSP_MAXNAMELEN];
+ enum node_type ntype;
+ u32 cache_on_gpp;
+ struct dsp_resourcereqmts dsp_resource_reqmts;
+ s32 prio;
+ u32 stack_size;
+ u32 sys_stack_size;
+ u32 stack_seg;
+ u32 message_depth;
+ u32 num_input_streams;
+ u32 num_output_streams;
+ u32 utimeout;
+ u32 count_profiles; /* Number of supported profiles */
+ /* Array of profiles */
+ struct dsp_nodeprofs node_profiles[MAX_PROFILES];
+ u32 stack_seg_name; /* Stack Segment Name */
+};
+
+ /* The dsp_nodeattrin structure describes the attributes of a
+ * node client */
+struct dsp_nodeattrin {
+ u32 cb_struct;
+ s32 prio;
+ u32 utimeout;
+ u32 profile_id;
+ /* Reserved, for Bridge Internal use only */
+ u32 heap_size;
+ void *pgpp_virt_addr; /* Reserved, for Bridge Internal use only */
+};
+
+ /* The dsp_nodeinfo structure is used to retrieve information
+ * about a node */
+struct dsp_nodeinfo {
+ u32 cb_struct;
+ struct dsp_ndbprops nb_node_database_props;
+ u32 execution_priority;
+ enum node_state ns_execution_state;
+ void *device_owner;
+ u32 number_streams;
+ struct dsp_streamconnect sc_stream_connection[16];
+ u32 node_env;
+};
+
+ /* The dsp_nodeattr structure describes the attributes of a node */
+struct dsp_nodeattr {
+ u32 cb_struct;
+ struct dsp_nodeattrin in_node_attr_in;
+ u32 node_attr_inputs;
+ u32 node_attr_outputs;
+ struct dsp_nodeinfo node_info;
+};
+
+/*
+ * Notification type: either the name of an opened event, or an event or
+ * window handle.
+ */
+struct dsp_notification {
+ char *ps_name;
+ void *handle;
+};
+
+/* The dsp_processorattrin structure describes the attributes of a processor */
+struct dsp_processorattrin {
+ u32 cb_struct;
+ u32 utimeout;
+};
+/*
+ * The dsp_processorinfo structure describes basic capabilities of a
+ * DSP processor
+ */
+struct dsp_processorinfo {
+ u32 cb_struct;
+ int processor_family;
+ int processor_type;
+ u32 clock_rate;
+ u32 ul_internal_mem_size;
+ u32 ul_external_mem_size;
+ u32 processor_id;
+ int ty_running_rtos;
+ s32 node_min_priority;
+ s32 node_max_priority;
+};
+
+/* Error information of last DSP exception signalled to the GPP */
+struct dsp_errorinfo {
+ u32 dw_err_mask;
+ u32 dw_val1;
+ u32 dw_val2;
+ u32 dw_val3;
+};
+
+/* The dsp_processorstate structure describes the state of a DSP processor */
+struct dsp_processorstate {
+ u32 cb_struct;
+ enum dsp_procstate proc_state;
+};
+
+/*
+ * The dsp_resourceinfo structure is used to retrieve information about a
+ * processor's resources
+ */
+struct dsp_resourceinfo {
+ u32 cb_struct;
+ enum dsp_resourceinfotype resource_type;
+ union {
+ u32 ul_resource;
+ struct dsp_memstat mem_stat;
+ struct dsp_procloadstat proc_load_stat;
+ } result;
+};
+
+/*
+ * The dsp_streamattrin structure describes the attributes of a stream,
+ * including segment and alignment of data buffers allocated with
+ * DSPStream_AllocateBuffers(), if applicable
+ */
+struct dsp_streamattrin {
+ u32 cb_struct;
+ u32 utimeout;
+ u32 segment_id;
+ u32 buf_alignment;
+ u32 num_bufs;
+ enum dsp_strmmode strm_mode;
+ u32 udma_chnl_id;
+ u32 udma_priority;
+};
+
+/* The dsp_bufferattr structure describes the attributes of a data buffer */
+struct dsp_bufferattr {
+ u32 cb_struct;
+ u32 segment_id;
+ u32 buf_alignment;
+};
+
+/*
+ * The dsp_streaminfo structure is used to retrieve information
+ * about a stream.
+ */
+struct dsp_streaminfo {
+ u32 cb_struct;
+ u32 number_bufs_allowed;
+ u32 number_bufs_in_stream;
+ u32 ul_number_bytes;
+ void *sync_object_handle;
+ enum dsp_streamstate ss_stream_state;
+};
+
+/* DMM MAP attributes
+It is a bit mask with each bit value indicating a specific attribute
+bit 0 - GPP address type (user virtual=0, physical=1)
+bit 1 - MMU Endianism (Big Endian=1, Little Endian=0)
+bit 2 - MMU mixed page attribute (Mixed/ CPUES=1, TLBES =0)
+bit 3 - MMU element size = 8bit (valid only for non mixed page entries)
+bit 4 - MMU element size = 16bit (valid only for non mixed page entries)
+bit 5 - MMU element size = 32bit (valid only for non mixed page entries)
+bit 6 - MMU element size = 64bit (valid only for non mixed page entries)
+
+bit 14 - Input (read only) buffer
+bit 15 - Output (writeable) buffer
+*/
+
+/* Types of mapping attributes */
+
+/* MPU address is virtual and needs to be translated to physical addr */
+#define DSP_MAPVIRTUALADDR 0x00000000
+#define DSP_MAPPHYSICALADDR 0x00000001
+
+/* Mapped data is big endian */
+#define DSP_MAPBIGENDIAN 0x00000002
+#define DSP_MAPLITTLEENDIAN 0x00000000
+
+/* Element size is based on DSP r/w access size */
+#define DSP_MAPMIXEDELEMSIZE 0x00000004
+
+/*
+ * Element size for MMU mapping (8, 16, 32, or 64 bit)
+ * Ignored if DSP_MAPMIXEDELEMSIZE enabled
+ */
+#define DSP_MAPELEMSIZE8 0x00000008
+#define DSP_MAPELEMSIZE16 0x00000010
+#define DSP_MAPELEMSIZE32 0x00000020
+#define DSP_MAPELEMSIZE64 0x00000040
+
+#define DSP_MAPVMALLOCADDR 0x00000080
+
+#define DSP_MAPDONOTLOCK 0x00000100
+
+#define DSP_MAP_DIR_MASK 0x3FFF
+
+#define GEM_CACHE_LINE_SIZE 128
+#define GEM_L1P_PREFETCH_SIZE 128
+
+/*
+ * Definitions from dbreg.h
+ */
+
+#define DSPPROCTYPE_C64 6410
+#define IVAPROCTYPE_ARM7 470
+
+#define REG_MGR_OBJECT 1
+#define REG_DRV_OBJECT 2
+
+/* registry */
+#define DRVOBJECT "DrvObject"
+#define MGROBJECT "MgrObject"
+
+/* Max registry path length. Also the max registry value length. */
+#define MAXREGPATHLENGTH 255
+
+#endif /* DBDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbldefs.h b/drivers/staging/tidspbridge/include/dspbridge/dbldefs.h
new file mode 100644
index 00000000000..bf4fb99529a
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dbldefs.h
@@ -0,0 +1,141 @@
+/*
+ * dbldefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DBLDEFS_
+#define DBLDEFS_
+
+/*
+ * Bit masks for dbl_flags.
+ */
+#define DBL_NOLOAD 0x0 /* Don't load symbols, code, or data */
+#define DBL_SYMB 0x1 /* load symbols */
+#define DBL_CODE 0x2 /* load code */
+#define DBL_DATA 0x4 /* load data */
+#define DBL_DYNAMIC 0x8 /* dynamic load */
+#define DBL_BSS 0x20 /* Unitialized section */
+
+#define DBL_MAXPATHLENGTH 255
+
+/*
+ * ======== dbl_flags ========
+ * Specifies whether to load code, data, or symbols
+ */
+typedef s32 dbl_flags;
+
+/*
+ * ======== dbl_sect_info ========
+ * For collecting info on overlay sections
+ */
+struct dbl_sect_info {
+ const char *name; /* name of section */
+ u32 sect_run_addr; /* run address of section */
+ u32 sect_load_addr; /* load address of section */
+ u32 size; /* size of section (target MAUs) */
+ dbl_flags type; /* Code, data, or BSS */
+};
+
+/*
+ * ======== dbl_symbol ========
+ * (Needed for dynamic load library)
+ */
+struct dbl_symbol {
+ u32 value;
+};
+
+/*
+ * ======== dbl_alloc_fxn ========
+ * Allocate memory function. Allocate or reserve (if reserved == TRUE)
+ * "size" bytes of memory from segment "space" and return the address in
+ * *dsp_address (or starting at *dsp_address if reserve == TRUE). Returns 0 on
+ * success, or an error code on failure.
+ */
+typedef s32(*dbl_alloc_fxn) (void *hdl, s32 space, u32 size, u32 align,
+ u32 *dsp_address, s32 seg_id, s32 req,
+ bool reserved);
+
+/*
+ * ======== dbl_free_fxn ========
+ * Free memory function. Free, or unreserve (if reserved == TRUE) "size"
+ * bytes of memory from segment "space"
+ */
+typedef bool(*dbl_free_fxn) (void *hdl, u32 addr, s32 space, u32 size,
+ bool reserved);
+
+/*
+ * ======== dbl_log_write_fxn ========
+ * Function to call when writing data from a section, to log the info.
+ * Can be NULL if no logging is required.
+ */
+typedef int(*dbl_log_write_fxn) (void *handle,
+ struct dbl_sect_info *sect, u32 addr,
+ u32 bytes);
+
+/*
+ * ======== dbl_sym_lookup ========
+ * Symbol lookup function - Find the symbol name and return its value.
+ *
+ * Parameters:
+ * handle - Opaque handle
+ * parg - Opaque argument.
+ * name - Name of symbol to lookup.
+ * sym - Location to store address of symbol structure.
+ *
+ * Returns:
+ * TRUE: Success (symbol was found).
+ * FALSE: Failed to find symbol.
+ */
+typedef bool(*dbl_sym_lookup) (void *handle, void *parg, void *rmm_handle,
+ const char *name, struct dbl_symbol ** sym);
+
+/*
+ * ======== dbl_write_fxn ========
+ * Write memory function. Write "n" HOST bytes of memory to segment "mtype"
+ * starting at address "dsp_address" from the buffer "buf". The buffer is
+ * formatted as an array of words appropriate for the DSP.
+ */
+typedef s32(*dbl_write_fxn) (void *hdl, u32 dsp_address, void *buf,
+ u32 n, s32 mtype);
+
+/*
+ * ======== dbl_attrs ========
+ */
+struct dbl_attrs {
+ dbl_alloc_fxn alloc;
+ dbl_free_fxn free;
+ void *rmm_handle; /* Handle to pass to alloc, free functions */
+ dbl_write_fxn write;
+ void *input_params; /* Handle to pass to write, cinit function */
+
+ dbl_log_write_fxn log_write;
+ void *log_write_handle;
+
+ /* Symbol matching function and handle to pass to it */
+ dbl_sym_lookup sym_lookup;
+ void *sym_handle;
+ void *sym_arg;
+
+ /*
+ * These file manipulation functions should be compatible with the
+ * "C" run time library functions of the same name.
+ */
+ s32(*fread) (void *, size_t, size_t, void *);
+ s32(*fseek) (void *, long, int);
+ s32(*ftell) (void *);
+ s32(*fclose) (void *);
+ void *(*fopen) (const char *, const char *);
+};
+
+#endif /* DBLDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbll.h b/drivers/staging/tidspbridge/include/dspbridge/dbll.h
new file mode 100644
index 00000000000..b0186761466
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dbll.h
@@ -0,0 +1,62 @@
+/*
+ * dbll.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge Dynamic load library module interface. Function header
+ * comments are in the file dblldefs.h.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DBLL_
+#define DBLL_
+
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/dblldefs.h>
+
+extern bool symbols_reloaded;
+
+extern void dbll_close(struct dbll_library_obj *zl_lib);
+extern int dbll_create(struct dbll_tar_obj **target_obj,
+ struct dbll_attrs *pattrs);
+extern void dbll_delete(struct dbll_tar_obj *target);
+extern void dbll_exit(void);
+extern bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name,
+ struct dbll_sym_val **sym_val);
+extern void dbll_get_attrs(struct dbll_tar_obj *target,
+ struct dbll_attrs *pattrs);
+extern bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name,
+ struct dbll_sym_val **sym_val);
+extern int dbll_get_sect(struct dbll_library_obj *lib, char *name,
+ u32 *paddr, u32 *psize);
+extern bool dbll_init(void);
+extern int dbll_load(struct dbll_library_obj *lib,
+ dbll_flags flags,
+ struct dbll_attrs *attrs, u32 * entry);
+extern int dbll_load_sect(struct dbll_library_obj *zl_lib,
+ char *sec_name, struct dbll_attrs *attrs);
+extern int dbll_open(struct dbll_tar_obj *target, char *file,
+ dbll_flags flags,
+ struct dbll_library_obj **lib_obj);
+extern int dbll_read_sect(struct dbll_library_obj *lib,
+ char *name, char *buf, u32 size);
+extern void dbll_set_attrs(struct dbll_tar_obj *target,
+ struct dbll_attrs *pattrs);
+extern void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs);
+extern int dbll_unload_sect(struct dbll_library_obj *lib,
+ char *sect_name, struct dbll_attrs *attrs);
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+bool dbll_find_dsp_symbol(struct dbll_library_obj *zl_lib, u32 address,
+ u32 offset_range, u32 *sym_addr_output, char *name_output);
+#endif
+
+#endif /* DBLL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dblldefs.h b/drivers/staging/tidspbridge/include/dspbridge/dblldefs.h
new file mode 100644
index 00000000000..d2b4fda3429
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dblldefs.h
@@ -0,0 +1,496 @@
+/*
+ * dblldefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DBLLDEFS_
+#define DBLLDEFS_
+
+/*
+ * Bit masks for dbl_flags.
+ */
+#define DBLL_NOLOAD 0x0 /* Don't load symbols, code, or data */
+#define DBLL_SYMB 0x1 /* load symbols */
+#define DBLL_CODE 0x2 /* load code */
+#define DBLL_DATA 0x4 /* load data */
+#define DBLL_DYNAMIC 0x8 /* dynamic load */
+#define DBLL_BSS 0x20 /* Unitialized section */
+
+#define DBLL_MAXPATHLENGTH 255
+
+/*
+ * ======== DBLL_Target ========
+ *
+ */
+struct dbll_tar_obj;
+
+/*
+ * ======== dbll_flags ========
+ * Specifies whether to load code, data, or symbols
+ */
+typedef s32 dbll_flags;
+
+/*
+ * ======== DBLL_Library ========
+ *
+ */
+struct dbll_library_obj;
+
+/*
+ * ======== dbll_sect_info ========
+ * For collecting info on overlay sections
+ */
+struct dbll_sect_info {
+ const char *name; /* name of section */
+ u32 sect_run_addr; /* run address of section */
+ u32 sect_load_addr; /* load address of section */
+ u32 size; /* size of section (target MAUs) */
+ dbll_flags type; /* Code, data, or BSS */
+};
+
+/*
+ * ======== dbll_sym_val ========
+ * (Needed for dynamic load library)
+ */
+struct dbll_sym_val {
+ u32 value;
+};
+
+/*
+ * ======== dbll_alloc_fxn ========
+ * Allocate memory function. Allocate or reserve (if reserved == TRUE)
+ * "size" bytes of memory from segment "space" and return the address in
+ * *dsp_address (or starting at *dsp_address if reserve == TRUE). Returns 0 on
+ * success, or an error code on failure.
+ */
+typedef s32(*dbll_alloc_fxn) (void *hdl, s32 space, u32 size, u32 align,
+ u32 *dsp_address, s32 seg_id, s32 req,
+ bool reserved);
+
+/*
+ * ======== dbll_close_fxn ========
+ */
+typedef s32(*dbll_f_close_fxn) (void *);
+
+/*
+ * ======== dbll_free_fxn ========
+ * Free memory function. Free, or unreserve (if reserved == TRUE) "size"
+ * bytes of memory from segment "space"
+ */
+typedef bool(*dbll_free_fxn) (void *hdl, u32 addr, s32 space, u32 size,
+ bool reserved);
+
+/*
+ * ======== dbll_f_open_fxn ========
+ */
+typedef void *(*dbll_f_open_fxn) (const char *, const char *);
+
+/*
+ * ======== dbll_log_write_fxn ========
+ * Function to call when writing data from a section, to log the info.
+ * Can be NULL if no logging is required.
+ */
+typedef int(*dbll_log_write_fxn) (void *handle,
+ struct dbll_sect_info *sect, u32 addr,
+ u32 bytes);
+
+/*
+ * ======== dbll_read_fxn ========
+ */
+typedef s32(*dbll_read_fxn) (void *, size_t, size_t, void *);
+
+/*
+ * ======== dbll_seek_fxn ========
+ */
+typedef s32(*dbll_seek_fxn) (void *, long, int);
+
+/*
+ * ======== dbll_sym_lookup ========
+ * Symbol lookup function - Find the symbol name and return its value.
+ *
+ * Parameters:
+ * handle - Opaque handle
+ * parg - Opaque argument.
+ * name - Name of symbol to lookup.
+ * sym - Location to store address of symbol structure.
+ *
+ * Returns:
+ * TRUE: Success (symbol was found).
+ * FALSE: Failed to find symbol.
+ */
+typedef bool(*dbll_sym_lookup) (void *handle, void *parg, void *rmm_handle,
+ const char *name, struct dbll_sym_val ** sym);
+
+/*
+ * ======== dbll_tell_fxn ========
+ */
+typedef s32(*dbll_tell_fxn) (void *);
+
+/*
+ * ======== dbll_write_fxn ========
+ * Write memory function. Write "n" HOST bytes of memory to segment "mtype"
+ * starting at address "dsp_address" from the buffer "buf". The buffer is
+ * formatted as an array of words appropriate for the DSP.
+ */
+typedef s32(*dbll_write_fxn) (void *hdl, u32 dsp_address, void *buf,
+ u32 n, s32 mtype);
+
+/*
+ * ======== dbll_attrs ========
+ */
+struct dbll_attrs {
+ dbll_alloc_fxn alloc;
+ dbll_free_fxn free;
+ void *rmm_handle; /* Handle to pass to alloc, free functions */
+ dbll_write_fxn write;
+ void *input_params; /* Handle to pass to write, cinit function */
+ bool base_image;
+ dbll_log_write_fxn log_write;
+ void *log_write_handle;
+
+ /* Symbol matching function and handle to pass to it */
+ dbll_sym_lookup sym_lookup;
+ void *sym_handle;
+ void *sym_arg;
+
+ /*
+ * These file manipulation functions should be compatible with the
+ * "C" run time library functions of the same name.
+ */
+ s32(*fread) (void *, size_t, size_t, void *);
+ s32(*fseek) (void *, long, int);
+ s32(*ftell) (void *);
+ s32(*fclose) (void *);
+ void *(*fopen) (const char *, const char *);
+};
+
+/*
+ * ======== dbll_close ========
+ * Close library opened with dbll_open.
+ * Parameters:
+ * lib - Handle returned from dbll_open().
+ * Returns:
+ * Requires:
+ * DBL initialized.
+ * Valid lib.
+ * Ensures:
+ */
+typedef void (*dbll_close_fxn) (struct dbll_library_obj *library);
+
+/*
+ * ======== dbll_create ========
+ * Create a target object, specifying the alloc, free, and write functions.
+ * Parameters:
+ * target_obj - Location to store target handle on output.
+ * pattrs - Attributes.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Memory allocation failed.
+ * Requires:
+ * DBL initialized.
+ * pattrs != NULL.
+ * target_obj != NULL;
+ * Ensures:
+ * Success: *target_obj != NULL.
+ * Failure: *target_obj == NULL.
+ */
+typedef int(*dbll_create_fxn) (struct dbll_tar_obj **target_obj,
+ struct dbll_attrs *attrs);
+
+/*
+ * ======== dbll_delete ========
+ * Delete target object and free resources for any loaded libraries.
+ * Parameters:
+ * target - Handle returned from DBLL_Create().
+ * Returns:
+ * Requires:
+ * DBL initialized.
+ * Valid target.
+ * Ensures:
+ */
+typedef void (*dbll_delete_fxn) (struct dbll_tar_obj *target);
+
+/*
+ * ======== dbll_exit ========
+ * Discontinue use of DBL module.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * refs > 0.
+ * Ensures:
+ * refs >= 0.
+ */
+typedef void (*dbll_exit_fxn) (void);
+
+/*
+ * ======== dbll_get_addr ========
+ * Get address of name in the specified library.
+ * Parameters:
+ * lib - Handle returned from dbll_open().
+ * name - Name of symbol
+ * sym_val - Location to store symbol address on output.
+ * Returns:
+ * TRUE: Success.
+ * FALSE: Symbol not found.
+ * Requires:
+ * DBL initialized.
+ * Valid library.
+ * name != NULL.
+ * sym_val != NULL.
+ * Ensures:
+ */
+typedef bool(*dbll_get_addr_fxn) (struct dbll_library_obj *lib, char *name,
+ struct dbll_sym_val **sym_val);
+
+/*
+ * ======== dbll_get_attrs ========
+ * Retrieve the attributes of the target.
+ * Parameters:
+ * target - Handle returned from DBLL_Create().
+ * pattrs - Location to store attributes on output.
+ * Returns:
+ * Requires:
+ * DBL initialized.
+ * Valid target.
+ * pattrs != NULL.
+ * Ensures:
+ */
+typedef void (*dbll_get_attrs_fxn) (struct dbll_tar_obj *target,
+ struct dbll_attrs *attrs);
+
+/*
+ * ======== dbll_get_c_addr ========
+ * Get address of "C" name on the specified library.
+ * Parameters:
+ * lib - Handle returned from dbll_open().
+ * name - Name of symbol
+ * sym_val - Location to store symbol address on output.
+ * Returns:
+ * TRUE: Success.
+ * FALSE: Symbol not found.
+ * Requires:
+ * DBL initialized.
+ * Valid target.
+ * name != NULL.
+ * sym_val != NULL.
+ * Ensures:
+ */
+typedef bool(*dbll_get_c_addr_fxn) (struct dbll_library_obj *lib, char *name,
+ struct dbll_sym_val **sym_val);
+
+/*
+ * ======== dbll_get_sect ========
+ * Get address and size of a named section.
+ * Parameters:
+ * lib - Library handle returned from dbll_open().
+ * name - Name of section.
+ * paddr - Location to store section address on output.
+ * psize - Location to store section size on output.
+ * Returns:
+ * 0: Success.
+ * -ENXIO: Section not found.
+ * Requires:
+ * DBL initialized.
+ * Valid lib.
+ * name != NULL.
+ * paddr != NULL;
+ * psize != NULL.
+ * Ensures:
+ */
+typedef int(*dbll_get_sect_fxn) (struct dbll_library_obj *lib,
+ char *name, u32 * addr, u32 * size);
+
+/*
+ * ======== dbll_init ========
+ * Initialize DBL module.
+ * Parameters:
+ * Returns:
+ * TRUE: Success.
+ * FALSE: Failure.
+ * Requires:
+ * refs >= 0.
+ * Ensures:
+ * Success: refs > 0.
+ * Failure: refs >= 0.
+ */
+typedef bool(*dbll_init_fxn) (void);
+
+/*
+ * ======== dbll_load ========
+ * Load library onto the target.
+ *
+ * Parameters:
+ * lib - Library handle returned from dbll_open().
+ * flags - Load code, data and/or symbols.
+ * attrs - May contain alloc, free, and write function.
+ * entry_pt - Location to store program entry on output.
+ * Returns:
+ * 0: Success.
+ * -EBADF: File read failed.
+ * -EILSEQ: Failure in dynamic loader library.
+ * Requires:
+ * DBL initialized.
+ * Valid lib.
+ * entry != NULL.
+ * Ensures:
+ */
+typedef int(*dbll_load_fxn) (struct dbll_library_obj *lib,
+ dbll_flags flags,
+ struct dbll_attrs *attrs, u32 *entry);
+
+/*
+ * ======== dbll_load_sect ========
+ * Load a named section from an library (for overlay support).
+ * Parameters:
+ * lib - Handle returned from dbll_open().
+ * sec_name - Name of section to load.
+ * attrs - Contains write function and handle to pass to it.
+ * Returns:
+ * 0: Success.
+ * -ENXIO: Section not found.
+ * -ENOSYS: Function not implemented.
+ * Requires:
+ * Valid lib.
+ * sec_name != NULL.
+ * attrs != NULL.
+ * attrs->write != NULL.
+ * Ensures:
+ */
+typedef int(*dbll_load_sect_fxn) (struct dbll_library_obj *lib,
+ char *sz_sect_name,
+ struct dbll_attrs *attrs);
+
+/*
+ * ======== dbll_open ========
+ * dbll_open() returns a library handle that can be used to load/unload
+ * the symbols/code/data via dbll_load()/dbll_unload().
+ * Parameters:
+ * target - Handle returned from dbll_create().
+ * file - Name of file to open.
+ * flags - If flags & DBLL_SYMB, load symbols.
+ * lib_obj - Location to store library handle on output.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Memory allocation failure.
+ * -EBADF: File open/read failure.
+ * Unable to determine target type.
+ * Requires:
+ * DBL initialized.
+ * Valid target.
+ * file != NULL.
+ * lib_obj != NULL.
+ * dbll_attrs fopen function non-NULL.
+ * Ensures:
+ * Success: Valid *lib_obj.
+ * Failure: *lib_obj == NULL.
+ */
+typedef int(*dbll_open_fxn) (struct dbll_tar_obj *target, char *file,
+ dbll_flags flags,
+ struct dbll_library_obj **lib_obj);
+
+/*
+ * ======== dbll_read_sect ========
+ * Read COFF section into a character buffer.
+ * Parameters:
+ * lib - Library handle returned from dbll_open().
+ * name - Name of section.
+ * pbuf - Buffer to write section contents into.
+ * size - Buffer size
+ * Returns:
+ * 0: Success.
+ * -ENXIO: Named section does not exists.
+ * Requires:
+ * DBL initialized.
+ * Valid lib.
+ * name != NULL.
+ * pbuf != NULL.
+ * size != 0.
+ * Ensures:
+ */
+typedef int(*dbll_read_sect_fxn) (struct dbll_library_obj *lib,
+ char *name, char *content,
+ u32 cont_size);
+
+/*
+ * ======== dbll_set_attrs ========
+ * Set the attributes of the target.
+ * Parameters:
+ * target - Handle returned from dbll_create().
+ * pattrs - New attributes.
+ * Returns:
+ * Requires:
+ * DBL initialized.
+ * Valid target.
+ * pattrs != NULL.
+ * Ensures:
+ */
+typedef void (*dbll_set_attrs_fxn) (struct dbll_tar_obj *target,
+ struct dbll_attrs *attrs);
+
+/*
+ * ======== dbll_unload ========
+ * Unload library loaded with dbll_load().
+ * Parameters:
+ * lib - Handle returned from dbll_open().
+ * attrs - Contains free() function and handle to pass to it.
+ * Returns:
+ * Requires:
+ * DBL initialized.
+ * Valid lib.
+ * Ensures:
+ */
+typedef void (*dbll_unload_fxn) (struct dbll_library_obj *library,
+ struct dbll_attrs *attrs);
+
+/*
+ * ======== dbll_unload_sect ========
+ * Unload a named section from an library (for overlay support).
+ * Parameters:
+ * lib - Handle returned from dbll_open().
+ * sec_name - Name of section to load.
+ * attrs - Contains free() function and handle to pass to it.
+ * Returns:
+ * 0: Success.
+ * -ENXIO: Named section not found.
+ * -ENOSYS
+ * Requires:
+ * DBL initialized.
+ * Valid lib.
+ * sec_name != NULL.
+ * Ensures:
+ */
+typedef int(*dbll_unload_sect_fxn) (struct dbll_library_obj *lib,
+ char *sz_sect_name,
+ struct dbll_attrs *attrs);
+
+struct dbll_fxns {
+ dbll_close_fxn close_fxn;
+ dbll_create_fxn create_fxn;
+ dbll_delete_fxn delete_fxn;
+ dbll_exit_fxn exit_fxn;
+ dbll_get_attrs_fxn get_attrs_fxn;
+ dbll_get_addr_fxn get_addr_fxn;
+ dbll_get_c_addr_fxn get_c_addr_fxn;
+ dbll_get_sect_fxn get_sect_fxn;
+ dbll_init_fxn init_fxn;
+ dbll_load_fxn load_fxn;
+ dbll_load_sect_fxn load_sect_fxn;
+ dbll_open_fxn open_fxn;
+ dbll_read_sect_fxn read_sect_fxn;
+ dbll_set_attrs_fxn set_attrs_fxn;
+ dbll_unload_fxn unload_fxn;
+ dbll_unload_sect_fxn unload_sect_fxn;
+};
+
+#endif /* DBLDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dehdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dehdefs.h
new file mode 100644
index 00000000000..09f8bf83ab0
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dehdefs.h
@@ -0,0 +1,32 @@
+/*
+ * dehdefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Definition for Bridge driver module DEH.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DEHDEFS_
+#define DEHDEFS_
+
+#include <dspbridge/mbx_sh.h> /* shared mailbox codes */
+
+/* DEH object manager */
+struct deh_mgr;
+
+/* Magic code used to determine if DSP signaled exception. */
+#define DEH_BASE MBX_DEH_BASE
+#define DEH_USERS_BASE MBX_DEH_USERS_BASE
+#define DEH_LIMIT MBX_DEH_LIMIT
+
+#endif /* _DEHDEFS_H */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h
new file mode 100644
index 00000000000..357458fadd2
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h
@@ -0,0 +1,702 @@
+/*
+ * dev.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Bridge Bridge driver device operations.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DEV_
+#define DEV_
+
+/* ----------------------------------- Module Dependent Headers */
+#include <dspbridge/chnldefs.h>
+#include <dspbridge/cmm.h>
+#include <dspbridge/cod.h>
+#include <dspbridge/dehdefs.h>
+#include <dspbridge/nodedefs.h>
+#include <dspbridge/dispdefs.h>
+#include <dspbridge/dspdefs.h>
+#include <dspbridge/dmm.h>
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/devdefs.h>
+
+/*
+ * ======== dev_brd_write_fxn ========
+ * Purpose:
+ * Exported function to be used as the COD write function. This function
+ * is passed a handle to a DEV_hObject by ZL in arb, then calls the
+ * device's bridge_brd_write() function.
+ * Parameters:
+ * arb: Handle to a Device Object.
+ * dev_ctxt: Handle to Bridge driver defined device info.
+ * dsp_addr: Address on DSP board (Destination).
+ * host_buf: Pointer to host buffer (Source).
+ * ul_num_bytes: Number of bytes to transfer.
+ * mem_type: Memory space on DSP to which to transfer.
+ * Returns:
+ * Number of bytes written. Returns 0 if the DEV_hObject passed in via
+ * arb is invalid.
+ * Requires:
+ * DEV Initialized.
+ * host_buf != NULL
+ * Ensures:
+ */
+extern u32 dev_brd_write_fxn(void *arb,
+ u32 dsp_add,
+ void *host_buf, u32 ul_num_bytes, u32 mem_space);
+
+/*
+ * ======== dev_create_device ========
+ * Purpose:
+ * Called by the operating system to load the Bridge Driver for a
+ * 'Bridge device.
+ * Parameters:
+ * device_obj: Ptr to location to receive the device object handle.
+ * driver_file_name: Name of Bridge driver PE DLL file to load. If the
+ * absolute path is not provided, the file is loaded
+ * through 'Bridge's module search path.
+ * host_config: Host configuration information, to be passed down
+ * to the Bridge driver when bridge_dev_create() is called.
+ * pDspConfig: DSP resources, to be passed down to the Bridge driver
+ * when bridge_dev_create() is called.
+ * dev_node_obj: Platform specific device node.
+ * Returns:
+ * 0: Module is loaded, device object has been created
+ * -ENOMEM: Insufficient memory to create needed resources.
+ * -EPERM: Unable to find Bridge driver entry point function.
+ * -ESPIPE: Unable to load ZL DLL.
+ * Requires:
+ * DEV Initialized.
+ * device_obj != NULL.
+ * driver_file_name != NULL.
+ * host_config != NULL.
+ * pDspConfig != NULL.
+ * Ensures:
+ * 0: *device_obj will contain handle to the new device object.
+ * Otherwise, does not create the device object, ensures the Bridge driver
+ * module is unloaded, and sets *device_obj to NULL.
+ */
+extern int dev_create_device(struct dev_object
+ **device_obj,
+ const char *driver_file_name,
+ struct cfg_devnode *dev_node_obj);
+
+/*
+ * ======== dev_create_iva_device ========
+ * Purpose:
+ * Called by the operating system to load the Bridge Driver for IVA.
+ * Parameters:
+ * device_obj: Ptr to location to receive the device object handle.
+ * driver_file_name: Name of Bridge driver PE DLL file to load. If the
+ * absolute path is not provided, the file is loaded
+ * through 'Bridge's module search path.
+ * host_config: Host configuration information, to be passed down
+ * to the Bridge driver when bridge_dev_create() is called.
+ * pDspConfig: DSP resources, to be passed down to the Bridge driver
+ * when bridge_dev_create() is called.
+ * dev_node_obj: Platform specific device node.
+ * Returns:
+ * 0: Module is loaded, device object has been created
+ * -ENOMEM: Insufficient memory to create needed resources.
+ * -EPERM: Unable to find Bridge driver entry point function.
+ * -ESPIPE: Unable to load ZL DLL.
+ * Requires:
+ * DEV Initialized.
+ * device_obj != NULL.
+ * driver_file_name != NULL.
+ * host_config != NULL.
+ * pDspConfig != NULL.
+ * Ensures:
+ * 0: *device_obj will contain handle to the new device object.
+ * Otherwise, does not create the device object, ensures the Bridge driver
+ * module is unloaded, and sets *device_obj to NULL.
+ */
+extern int dev_create_iva_device(struct dev_object
+ **device_obj,
+ const char *driver_file_name,
+ const struct cfg_hostres
+ *host_config,
+ struct cfg_devnode *dev_node_obj);
+
+/*
+ * ======== dev_create2 ========
+ * Purpose:
+ * After successful loading of the image from api_init_complete2
+ * (PROC Auto_Start) or proc_load this fxn is called. This creates
+ * the Node Manager and updates the DEV Object.
+ * Parameters:
+ * hdev_obj: Handle to device object created with dev_create_device().
+ * Returns:
+ * 0: Successful Creation of Node Manager
+ * -EPERM: Some Error Occurred.
+ * Requires:
+ * DEV Initialized
+ * Valid hdev_obj
+ * Ensures:
+ * 0 and hdev_obj->hnode_mgr != NULL
+ * else hdev_obj->hnode_mgr == NULL
+ */
+extern int dev_create2(struct dev_object *hdev_obj);
+
+/*
+ * ======== dev_destroy2 ========
+ * Purpose:
+ * Destroys the Node manager for this device.
+ * Parameters:
+ * hdev_obj: Handle to device object created with dev_create_device().
+ * Returns:
+ * 0: Successful Creation of Node Manager
+ * -EPERM: Some Error Occurred.
+ * Requires:
+ * DEV Initialized
+ * Valid hdev_obj
+ * Ensures:
+ * 0 and hdev_obj->hnode_mgr == NULL
+ * else -EPERM.
+ */
+extern int dev_destroy2(struct dev_object *hdev_obj);
+
+/*
+ * ======== dev_destroy_device ========
+ * Purpose:
+ * Destroys the channel manager for this device, if any, calls
+ * bridge_dev_destroy(), and then attempts to unload the Bridge module.
+ * Parameters:
+ * hdev_obj: Handle to device object created with
+ * dev_create_device().
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hdev_obj.
+ * -EPERM: The Bridge driver failed it's bridge_dev_destroy() function.
+ * Requires:
+ * DEV Initialized.
+ * Ensures:
+ */
+extern int dev_destroy_device(struct dev_object
+ *hdev_obj);
+
+/*
+ * ======== dev_get_chnl_mgr ========
+ * Purpose:
+ * Retrieve the handle to the channel manager created for this device.
+ * Parameters:
+ * hdev_obj: Handle to device object created with
+ * dev_create_device().
+ * *mgr: Ptr to location to store handle.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hdev_obj.
+ * Requires:
+ * mgr != NULL.
+ * DEV Initialized.
+ * Ensures:
+ * 0: *mgr contains a handle to a channel manager object,
+ * or NULL.
+ * else: *mgr is NULL.
+ */
+extern int dev_get_chnl_mgr(struct dev_object *hdev_obj,
+ struct chnl_mgr **mgr);
+
+/*
+ * ======== dev_get_cmm_mgr ========
+ * Purpose:
+ * Retrieve the handle to the shared memory manager created for this
+ * device.
+ * Parameters:
+ * hdev_obj: Handle to device object created with
+ * dev_create_device().
+ * *mgr: Ptr to location to store handle.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hdev_obj.
+ * Requires:
+ * mgr != NULL.
+ * DEV Initialized.
+ * Ensures:
+ * 0: *mgr contains a handle to a channel manager object,
+ * or NULL.
+ * else: *mgr is NULL.
+ */
+extern int dev_get_cmm_mgr(struct dev_object *hdev_obj,
+ struct cmm_object **mgr);
+
+/*
+ * ======== dev_get_dmm_mgr ========
+ * Purpose:
+ * Retrieve the handle to the dynamic memory manager created for this
+ * device.
+ * Parameters:
+ * hdev_obj: Handle to device object created with
+ * dev_create_device().
+ * *mgr: Ptr to location to store handle.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hdev_obj.
+ * Requires:
+ * mgr != NULL.
+ * DEV Initialized.
+ * Ensures:
+ * 0: *mgr contains a handle to a channel manager object,
+ * or NULL.
+ * else: *mgr is NULL.
+ */
+extern int dev_get_dmm_mgr(struct dev_object *hdev_obj,
+ struct dmm_object **mgr);
+
+/*
+ * ======== dev_get_cod_mgr ========
+ * Purpose:
+ * Retrieve the COD manager create for this device.
+ * Parameters:
+ * hdev_obj: Handle to device object created with
+ * dev_create_device().
+ * *cod_mgr: Ptr to location to store handle.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hdev_obj.
+ * Requires:
+ * cod_mgr != NULL.
+ * DEV Initialized.
+ * Ensures:
+ * 0: *cod_mgr contains a handle to a COD manager object.
+ * else: *cod_mgr is NULL.
+ */
+extern int dev_get_cod_mgr(struct dev_object *hdev_obj,
+ struct cod_manager **cod_mgr);
+
+/*
+ * ======== dev_get_deh_mgr ========
+ * Purpose:
+ * Retrieve the DEH manager created for this device.
+ * Parameters:
+ * hdev_obj: Handle to device object created with dev_create_device().
+ * *deh_manager: Ptr to location to store handle.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hdev_obj.
+ * Requires:
+ * deh_manager != NULL.
+ * DEH Initialized.
+ * Ensures:
+ * 0: *deh_manager contains a handle to a DEH manager object.
+ * else: *deh_manager is NULL.
+ */
+extern int dev_get_deh_mgr(struct dev_object *hdev_obj,
+ struct deh_mgr **deh_manager);
+
+/*
+ * ======== dev_get_dev_node ========
+ * Purpose:
+ * Retrieve the platform specific device ID for this device.
+ * Parameters:
+ * hdev_obj: Handle to device object created with
+ * dev_create_device().
+ * dev_nde: Ptr to location to get the device node handle.
+ * Returns:
+ * 0: Returns a DEVNODE in *dev_node_obj.
+ * -EFAULT: Invalid hdev_obj.
+ * Requires:
+ * dev_nde != NULL.
+ * DEV Initialized.
+ * Ensures:
+ * 0: *dev_nde contains a platform specific device ID;
+ * else: *dev_nde is NULL.
+ */
+extern int dev_get_dev_node(struct dev_object *hdev_obj,
+ struct cfg_devnode **dev_nde);
+
+/*
+ * ======== dev_get_dev_type ========
+ * Purpose:
+ * Retrieve the platform specific device ID for this device.
+ * Parameters:
+ * hdev_obj: Handle to device object created with
+ * dev_create_device().
+ * dev_nde: Ptr to location to get the device node handle.
+ * Returns:
+ * 0: Success
+ * -EFAULT: Invalid hdev_obj.
+ * Requires:
+ * dev_nde != NULL.
+ * DEV Initialized.
+ * Ensures:
+ * 0: *dev_nde contains a platform specific device ID;
+ * else: *dev_nde is NULL.
+ */
+extern int dev_get_dev_type(struct dev_object *device_obj,
+ u8 *dev_type);
+
+/*
+ * ======== dev_get_first ========
+ * Purpose:
+ * Retrieve the first Device Object handle from an internal linked list of
+ * of DEV_OBJECTs maintained by DEV.
+ * Parameters:
+ * Returns:
+ * NULL if there are no device objects stored; else
+ * a valid DEV_HOBJECT.
+ * Requires:
+ * No calls to dev_create_device or dev_destroy_device (which my modify the
+ * internal device object list) may occur between calls to dev_get_first
+ * and dev_get_next.
+ * Ensures:
+ * The DEV_HOBJECT returned is valid.
+ * A subsequent call to dev_get_next will return the next device object in
+ * the list.
+ */
+extern struct dev_object *dev_get_first(void);
+
+/*
+ * ======== dev_get_intf_fxns ========
+ * Purpose:
+ * Retrieve the Bridge driver interface function structure for the
+ * loaded Bridge driver.
+ * Parameters:
+ * hdev_obj: Handle to device object created with
+ * dev_create_device().
+ * *if_fxns: Ptr to location to store fxn interface.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hdev_obj.
+ * Requires:
+ * if_fxns != NULL.
+ * DEV Initialized.
+ * Ensures:
+ * 0: *if_fxns contains a pointer to the Bridge
+ * driver interface;
+ * else: *if_fxns is NULL.
+ */
+extern int dev_get_intf_fxns(struct dev_object *hdev_obj,
+ struct bridge_drv_interface **if_fxns);
+
+/*
+ * ======== dev_get_io_mgr ========
+ * Purpose:
+ * Retrieve the handle to the IO manager created for this device.
+ * Parameters:
+ * hdev_obj: Handle to device object created with
+ * dev_create_device().
+ * *mgr: Ptr to location to store handle.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hdev_obj.
+ * Requires:
+ * mgr != NULL.
+ * DEV Initialized.
+ * Ensures:
+ * 0: *mgr contains a handle to an IO manager object.
+ * else: *mgr is NULL.
+ */
+extern int dev_get_io_mgr(struct dev_object *hdev_obj,
+ struct io_mgr **mgr);
+
+/*
+ * ======== dev_get_next ========
+ * Purpose:
+ * Retrieve the next Device Object handle from an internal linked list of
+ * of DEV_OBJECTs maintained by DEV, after having previously called
+ * dev_get_first() and zero or more dev_get_next
+ * Parameters:
+ * hdev_obj: Handle to the device object returned from a previous
+ * call to dev_get_first() or dev_get_next().
+ * Returns:
+ * NULL if there are no further device objects on the list or hdev_obj
+ * was invalid;
+ * else the next valid DEV_HOBJECT in the list.
+ * Requires:
+ * No calls to dev_create_device or dev_destroy_device (which my modify the
+ * internal device object list) may occur between calls to dev_get_first
+ * and dev_get_next.
+ * Ensures:
+ * The DEV_HOBJECT returned is valid.
+ * A subsequent call to dev_get_next will return the next device object in
+ * the list.
+ */
+extern struct dev_object *dev_get_next(struct dev_object
+ *hdev_obj);
+
+/*
+ * ========= dev_get_msg_mgr ========
+ * Purpose:
+ * Retrieve the msg_ctrl Manager Handle from the DevObject.
+ * Parameters:
+ * hdev_obj: Handle to the Dev Object
+ * msg_man: Location where msg_ctrl Manager handle will be returned.
+ * Returns:
+ * Requires:
+ * DEV Initialized.
+ * Valid hdev_obj.
+ * node_man != NULL.
+ * Ensures:
+ */
+extern void dev_get_msg_mgr(struct dev_object *hdev_obj,
+ struct msg_mgr **msg_man);
+
+/*
+ * ========= dev_get_node_manager ========
+ * Purpose:
+ * Retrieve the Node Manager Handle from the DevObject. It is an
+ * accessor function
+ * Parameters:
+ * hdev_obj: Handle to the Dev Object
+ * node_man: Location where Handle to the Node Manager will be
+ * returned..
+ * Returns:
+ * 0: Success
+ * -EFAULT: Invalid Dev Object handle.
+ * Requires:
+ * DEV Initialized.
+ * node_man is not null
+ * Ensures:
+ * 0: *node_man contains a handle to a Node manager object.
+ * else: *node_man is NULL.
+ */
+extern int dev_get_node_manager(struct dev_object
+ *hdev_obj,
+ struct node_mgr **node_man);
+
+/*
+ * ======== dev_get_symbol ========
+ * Purpose:
+ * Get the value of a symbol in the currently loaded program.
+ * Parameters:
+ * hdev_obj: Handle to device object created with
+ * dev_create_device().
+ * str_sym: Name of symbol to look up.
+ * pul_value: Ptr to symbol value.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hdev_obj.
+ * -ESPIPE: Symbols couldn not be found or have not been loaded onto
+ * the board.
+ * Requires:
+ * str_sym != NULL.
+ * pul_value != NULL.
+ * DEV Initialized.
+ * Ensures:
+ * 0: *pul_value contains the symbol value;
+ */
+extern int dev_get_symbol(struct dev_object *hdev_obj,
+ const char *str_sym, u32 * pul_value);
+
+/*
+ * ======== dev_get_bridge_context ========
+ * Purpose:
+ * Retrieve the Bridge Context handle, as returned by the
+ * bridge_dev_create fxn.
+ * Parameters:
+ * hdev_obj: Handle to device object created with dev_create_device()
+ * *phbridge_context: Ptr to location to store context handle.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hdev_obj.
+ * Requires:
+ * phbridge_context != NULL.
+ * DEV Initialized.
+ * Ensures:
+ * 0: *phbridge_context contains context handle;
+ * else: *phbridge_context is NULL;
+ */
+extern int dev_get_bridge_context(struct dev_object *hdev_obj,
+ struct bridge_dev_context
+ **phbridge_context);
+
+/*
+ * ======== dev_exit ========
+ * Purpose:
+ * Decrement reference count, and free resources when reference count is
+ * 0.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * DEV is initialized.
+ * Ensures:
+ * When reference count == 0, DEV's private resources are freed.
+ */
+extern void dev_exit(void);
+
+/*
+ * ======== dev_init ========
+ * Purpose:
+ * Initialize DEV's private state, keeping a reference count on each call.
+ * Parameters:
+ * Returns:
+ * TRUE if initialized; FALSE if error occured.
+ * Requires:
+ * Ensures:
+ * TRUE: A requirement for the other public DEV functions.
+ */
+extern bool dev_init(void);
+
+/*
+ * ======== dev_is_locked ========
+ * Purpose:
+ * Predicate function to determine if the device has been
+ * locked by a client for exclusive access.
+ * Parameters:
+ * hdev_obj: Handle to device object created with
+ * dev_create_device().
+ * Returns:
+ * 0: TRUE: device has been locked.
+ * 0: FALSE: device not locked.
+ * -EFAULT: hdev_obj was invalid.
+ * Requires:
+ * DEV Initialized.
+ * Ensures:
+ */
+extern int dev_is_locked(struct dev_object *hdev_obj);
+
+/*
+ * ======== dev_insert_proc_object ========
+ * Purpose:
+ * Inserts the Processor Object into the List of PROC Objects
+ * kept in the DEV Object
+ * Parameters:
+ * proc_obj: Handle to the Proc Object
+ * hdev_obj Handle to the Dev Object
+ * bAttachedNew Specifies if there are already processors attached
+ * Returns:
+ * 0: Successfully inserted into the list
+ * Requires:
+ * proc_obj is not NULL
+ * hdev_obj is a valid handle to the DEV.
+ * DEV Initialized.
+ * List(of Proc object in Dev) Exists.
+ * Ensures:
+ * 0 & the PROC Object is inserted and the list is not empty
+ * Details:
+ * If the List of Proc Object is empty bAttachedNew is TRUE, it indicated
+ * this is the first Processor attaching.
+ * If it is False, there are already processors attached.
+ */
+extern int dev_insert_proc_object(struct dev_object
+ *hdev_obj,
+ u32 proc_obj,
+ bool *already_attached);
+
+/*
+ * ======== dev_remove_proc_object ========
+ * Purpose:
+ * Search for and remove a Proc object from the given list maintained
+ * by the DEV
+ * Parameters:
+ * p_proc_object: Ptr to ProcObject to insert.
+ * dev_obj: Ptr to Dev Object where the list is.
+ * already_attached: Ptr to return the bool
+ * Returns:
+ * 0: If successful.
+ * -EPERM Failure to Remove the PROC Object from the list
+ * Requires:
+ * DevObject is Valid
+ * proc_obj != 0
+ * dev_obj->proc_list != NULL
+ * !LST_IS_EMPTY(dev_obj->proc_list)
+ * already_attached !=NULL
+ * Ensures:
+ * Details:
+ * List will be deleted when the DEV is destroyed.
+ *
+ */
+extern int dev_remove_proc_object(struct dev_object
+ *hdev_obj, u32 proc_obj);
+
+/*
+ * ======== dev_notify_clients ========
+ * Purpose:
+ * Notify all clients of this device of a change in device status.
+ * Clients may include multiple users of BRD, as well as CHNL.
+ * This function is asychronous, and may be called by a timer event
+ * set up by a watchdog timer.
+ * Parameters:
+ * hdev_obj: Handle to device object created with dev_create_device().
+ * ret: A status word, most likely a BRD_STATUS.
+ * Returns:
+ * 0: All registered clients were asynchronously notified.
+ * -EINVAL: Invalid hdev_obj.
+ * Requires:
+ * DEV Initialized.
+ * Ensures:
+ * 0: Notifications are queued by the operating system to be
+ * delivered to clients. This function does not ensure that
+ * the notifications will ever be delivered.
+ */
+extern int dev_notify_clients(struct dev_object *hdev_obj, u32 ret);
+
+/*
+ * ======== dev_remove_device ========
+ * Purpose:
+ * Destroys the Device Object created by dev_start_device.
+ * Parameters:
+ * dev_node_obj: Device node as it is know to OS.
+ * Returns:
+ * 0: If success;
+ * <error code> Otherwise.
+ * Requires:
+ * Ensures:
+ */
+extern int dev_remove_device(struct cfg_devnode *dev_node_obj);
+
+/*
+ * ======== dev_set_chnl_mgr ========
+ * Purpose:
+ * Set the channel manager for this device.
+ * Parameters:
+ * hdev_obj: Handle to device object created with
+ * dev_create_device().
+ * hmgr: Handle to a channel manager, or NULL.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hdev_obj.
+ * Requires:
+ * DEV Initialized.
+ * Ensures:
+ */
+extern int dev_set_chnl_mgr(struct dev_object *hdev_obj,
+ struct chnl_mgr *hmgr);
+
+/*
+ * ======== dev_set_msg_mgr ========
+ * Purpose:
+ * Set the Message manager for this device.
+ * Parameters:
+ * hdev_obj: Handle to device object created with dev_create_device().
+ * hmgr: Handle to a message manager, or NULL.
+ * Returns:
+ * Requires:
+ * DEV Initialized.
+ * Ensures:
+ */
+extern void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr);
+
+/*
+ * ======== dev_start_device ========
+ * Purpose:
+ * Initializes the new device with bridge environment. This involves
+ * querying CM for allocated resources, querying the registry for
+ * necessary dsp resources (requested in the INF file), and using this
+ * information to create a bridge device object.
+ * Parameters:
+ * dev_node_obj: Device node as it is know to OS.
+ * Returns:
+ * 0: If success;
+ * <error code> Otherwise.
+ * Requires:
+ * DEV initialized.
+ * Ensures:
+ */
+extern int dev_start_device(struct cfg_devnode *dev_node_obj);
+
+#endif /* DEV_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/devdefs.h b/drivers/staging/tidspbridge/include/dspbridge/devdefs.h
new file mode 100644
index 00000000000..a2f9241ff13
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/devdefs.h
@@ -0,0 +1,26 @@
+/*
+ * devdefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Definition of common include typedef between dspdefs.h and dev.h. Required
+ * to break circular dependency between Bridge driver and DEV include files.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DEVDEFS_
+#define DEVDEFS_
+
+/* Bridge Device Object */
+struct dev_object;
+
+#endif /* DEVDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/disp.h b/drivers/staging/tidspbridge/include/dspbridge/disp.h
new file mode 100644
index 00000000000..82bf721447a
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/disp.h
@@ -0,0 +1,204 @@
+/*
+ * disp.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge Node Dispatcher.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DISP_
+#define DISP_
+
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/nodedefs.h>
+#include <dspbridge/nodepriv.h>
+#include <dspbridge/dispdefs.h>
+
+/*
+ * ======== disp_create ========
+ * Create a NODE Dispatcher object. This object handles the creation,
+ * deletion, and execution of nodes on the DSP target, through communication
+ * with the Resource Manager Server running on the target. Each NODE
+ * Manager object should have exactly one NODE Dispatcher.
+ *
+ * Parameters:
+ * dispatch_obj: Location to store node dispatcher object on output.
+ * hdev_obj: Device for this processor.
+ * disp_attrs: Node dispatcher attributes.
+ * Returns:
+ * 0: Success;
+ * -ENOMEM: Insufficient memory for requested resources.
+ * -EPERM: Unable to create dispatcher.
+ * Requires:
+ * disp_init(void) called.
+ * disp_attrs != NULL.
+ * hdev_obj != NULL.
+ * dispatch_obj != NULL.
+ * Ensures:
+ * 0: IS_VALID(*dispatch_obj).
+ * error: *dispatch_obj == NULL.
+ */
+extern int disp_create(struct disp_object **dispatch_obj,
+ struct dev_object *hdev_obj,
+ const struct disp_attr *disp_attrs);
+
+/*
+ * ======== disp_delete ========
+ * Delete the NODE Dispatcher.
+ *
+ * Parameters:
+ * disp_obj: Node Dispatcher object.
+ * Returns:
+ * Requires:
+ * disp_init(void) called.
+ * Valid disp_obj.
+ * Ensures:
+ * disp_obj is invalid.
+ */
+extern void disp_delete(struct disp_object *disp_obj);
+
+/*
+ * ======== disp_exit ========
+ * Discontinue usage of DISP module.
+ *
+ * Parameters:
+ * Returns:
+ * Requires:
+ * disp_init(void) previously called.
+ * Ensures:
+ * Any resources acquired in disp_init(void) will be freed when last DISP
+ * client calls disp_exit(void).
+ */
+extern void disp_exit(void);
+
+/*
+ * ======== disp_init ========
+ * Initialize the DISP module.
+ *
+ * Parameters:
+ * Returns:
+ * TRUE if initialization succeeded, FALSE otherwise.
+ * Ensures:
+ */
+extern bool disp_init(void);
+
+/*
+ * ======== disp_node_change_priority ========
+ * Change the priority of a node currently running on the target.
+ *
+ * Parameters:
+ * disp_obj: Node Dispatcher object.
+ * hnode: Node object representing a node currently
+ * allocated or running on the DSP.
+ * ulFxnAddress: Address of RMS function for changing priority.
+ * node_env: Address of node's environment structure.
+ * prio: New priority level to set node's priority to.
+ * Returns:
+ * 0: Success.
+ * -ETIME: A timeout occurred before the DSP responded.
+ * Requires:
+ * disp_init(void) called.
+ * Valid disp_obj.
+ * hnode != NULL.
+ * Ensures:
+ */
+extern int disp_node_change_priority(struct disp_object
+ *disp_obj,
+ struct node_object *hnode,
+ u32 rms_fxn,
+ nodeenv node_env, s32 prio);
+
+/*
+ * ======== disp_node_create ========
+ * Create a node on the DSP by remotely calling the node's create function.
+ *
+ * Parameters:
+ * disp_obj: Node Dispatcher object.
+ * hnode: Node handle obtained from node_allocate().
+ * ul_fxn_addr: Address or RMS create node function.
+ * ul_create_fxn: Address of node's create function.
+ * pargs: Arguments to pass to RMS node create function.
+ * node_env: Location to store node environment pointer on
+ * output.
+ * Returns:
+ * 0: Success.
+ * -ETIME: A timeout occurred before the DSP responded.
+ * -EPERM: A failure occurred, unable to create node.
+ * Requires:
+ * disp_init(void) called.
+ * Valid disp_obj.
+ * pargs != NULL.
+ * hnode != NULL.
+ * node_env != NULL.
+ * node_get_type(hnode) != NODE_DEVICE.
+ * Ensures:
+ */
+extern int disp_node_create(struct disp_object *disp_obj,
+ struct node_object *hnode,
+ u32 rms_fxn,
+ u32 ul_create_fxn,
+ const struct node_createargs
+ *pargs, nodeenv *node_env);
+
+/*
+ * ======== disp_node_delete ========
+ * Delete a node on the DSP by remotely calling the node's delete function.
+ *
+ * Parameters:
+ * disp_obj: Node Dispatcher object.
+ * hnode: Node object representing a node currently
+ * loaded on the DSP.
+ * ul_fxn_addr: Address or RMS delete node function.
+ * ul_delete_fxn: Address of node's delete function.
+ * node_env: Address of node's environment structure.
+ * Returns:
+ * 0: Success.
+ * -ETIME: A timeout occurred before the DSP responded.
+ * Requires:
+ * disp_init(void) called.
+ * Valid disp_obj.
+ * hnode != NULL.
+ * Ensures:
+ */
+extern int disp_node_delete(struct disp_object *disp_obj,
+ struct node_object *hnode,
+ u32 rms_fxn,
+ u32 ul_delete_fxn, nodeenv node_env);
+
+/*
+ * ======== disp_node_run ========
+ * Start execution of a node's execute phase, or resume execution of a node
+ * that has been suspended (via DISP_NodePause()) on the DSP.
+ *
+ * Parameters:
+ * disp_obj: Node Dispatcher object.
+ * hnode: Node object representing a node to be executed
+ * on the DSP.
+ * ul_fxn_addr: Address or RMS node execute function.
+ * ul_execute_fxn: Address of node's execute function.
+ * node_env: Address of node's environment structure.
+ * Returns:
+ * 0: Success.
+ * -ETIME: A timeout occurred before the DSP responded.
+ * Requires:
+ * disp_init(void) called.
+ * Valid disp_obj.
+ * hnode != NULL.
+ * Ensures:
+ */
+extern int disp_node_run(struct disp_object *disp_obj,
+ struct node_object *hnode,
+ u32 rms_fxn,
+ u32 ul_execute_fxn, nodeenv node_env);
+
+#endif /* DISP_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dispdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dispdefs.h
new file mode 100644
index 00000000000..946551a3dbb
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dispdefs.h
@@ -0,0 +1,35 @@
+/*
+ * dispdefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global DISP constants and types, shared by PROCESSOR, NODE, and DISP.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DISPDEFS_
+#define DISPDEFS_
+
+struct disp_object;
+
+/* Node Dispatcher attributes */
+struct disp_attr {
+ u32 ul_chnl_offset; /* Offset of channel ids reserved for RMS */
+ /* Size of buffer for sending data to RMS */
+ u32 ul_chnl_buf_size;
+ int proc_family; /* eg, 5000 */
+ int proc_type; /* eg, 5510 */
+ void *reserved1; /* Reserved for future use. */
+ u32 reserved2; /* Reserved for future use. */
+};
+
+#endif /* DISPDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
new file mode 100644
index 00000000000..6c58335c5f6
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
@@ -0,0 +1,75 @@
+/*
+ * dmm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address
+ * space that can be directly mapped to any MPU buffer or memory region.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DMM_
+#define DMM_
+
+#include <dspbridge/dbdefs.h>
+
+struct dmm_object;
+
+/* DMM attributes used in dmm_create() */
+struct dmm_mgrattrs {
+ u32 reserved;
+};
+
+#define DMMPOOLSIZE 0x4000000
+
+/*
+ * ======== dmm_get_handle ========
+ * Purpose:
+ * Return the dynamic memory manager object for this device.
+ * This is typically called from the client process.
+ */
+
+extern int dmm_get_handle(void *hprocessor,
+ struct dmm_object **dmm_manager);
+
+extern int dmm_reserve_memory(struct dmm_object *dmm_mgr,
+ u32 size, u32 *prsv_addr);
+
+extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr,
+ u32 rsv_addr);
+
+extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr,
+ u32 size);
+
+extern int dmm_un_map_memory(struct dmm_object *dmm_mgr,
+ u32 addr, u32 *psize);
+
+extern int dmm_destroy(struct dmm_object *dmm_mgr);
+
+extern int dmm_delete_tables(struct dmm_object *dmm_mgr);
+
+extern int dmm_create(struct dmm_object **dmm_manager,
+ struct dev_object *hdev_obj,
+ const struct dmm_mgrattrs *mgr_attrts);
+
+extern bool dmm_init(void);
+
+extern void dmm_exit(void);
+
+extern int dmm_create_tables(struct dmm_object *dmm_mgr,
+ u32 addr, u32 size);
+
+#ifdef DSP_DMM_DEBUG
+u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr);
+#endif
+
+#endif /* DMM_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h
new file mode 100644
index 00000000000..f3650153ef3
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h
@@ -0,0 +1,521 @@
+/*
+ * drv.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DRV Resource allocation module. Driver Object gets Created
+ * at the time of Loading. It holds the List of Device Objects
+ * in the system.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DRV_
+#define DRV_
+
+#include <dspbridge/devdefs.h>
+
+#include <dspbridge/drvdefs.h>
+#include <linux/idr.h>
+
+#define DRV_ASSIGN 1
+#define DRV_RELEASE 0
+
+/* Provide the DSP Internal memory windows that can be accessed from L3 address
+ * space */
+
+#define OMAP_GEM_BASE 0x107F8000
+#define OMAP_DSP_SIZE 0x00720000
+
+/* MEM1 is L2 RAM + L2 Cache space */
+#define OMAP_DSP_MEM1_BASE 0x5C7F8000
+#define OMAP_DSP_MEM1_SIZE 0x18000
+#define OMAP_DSP_GEM1_BASE 0x107F8000
+
+/* MEM2 is L1P RAM/CACHE space */
+#define OMAP_DSP_MEM2_BASE 0x5CE00000
+#define OMAP_DSP_MEM2_SIZE 0x8000
+#define OMAP_DSP_GEM2_BASE 0x10E00000
+
+/* MEM3 is L1D RAM/CACHE space */
+#define OMAP_DSP_MEM3_BASE 0x5CF04000
+#define OMAP_DSP_MEM3_SIZE 0x14000
+#define OMAP_DSP_GEM3_BASE 0x10F04000
+
+#define OMAP_IVA2_PRM_BASE 0x48306000
+#define OMAP_IVA2_PRM_SIZE 0x1000
+
+#define OMAP_IVA2_CM_BASE 0x48004000
+#define OMAP_IVA2_CM_SIZE 0x1000
+
+#define OMAP_PER_CM_BASE 0x48005000
+#define OMAP_PER_CM_SIZE 0x1000
+
+#define OMAP_PER_PRM_BASE 0x48307000
+#define OMAP_PER_PRM_SIZE 0x1000
+
+#define OMAP_CORE_PRM_BASE 0x48306A00
+#define OMAP_CORE_PRM_SIZE 0x1000
+
+#define OMAP_SYSC_BASE 0x48002000
+#define OMAP_SYSC_SIZE 0x1000
+
+#define OMAP_DMMU_BASE 0x5D000000
+#define OMAP_DMMU_SIZE 0x1000
+
+#define OMAP_PRCM_VDD1_DOMAIN 1
+#define OMAP_PRCM_VDD2_DOMAIN 2
+
+/* GPP PROCESS CLEANUP Data structures */
+
+/* New structure (member of process context) abstracts NODE resource info */
+struct node_res_object {
+ void *hnode;
+ s32 node_allocated; /* Node status */
+ s32 heap_allocated; /* Heap status */
+ s32 streams_allocated; /* Streams status */
+ int id;
+};
+
+/* used to cache dma mapping information */
+struct bridge_dma_map_info {
+ /* direction of DMA in action, or DMA_NONE */
+ enum dma_data_direction dir;
+ /* number of elements requested by us */
+ int num_pages;
+ /* number of elements returned from dma_map_sg */
+ int sg_num;
+ /* list of buffers used in this DMA action */
+ struct scatterlist *sg;
+};
+
+/* Used for DMM mapped memory accounting */
+struct dmm_map_object {
+ struct list_head link;
+ u32 dsp_addr;
+ u32 mpu_addr;
+ u32 size;
+ u32 num_usr_pgs;
+ struct page **pages;
+ struct bridge_dma_map_info dma_info;
+};
+
+/* Used for DMM reserved memory accounting */
+struct dmm_rsv_object {
+ struct list_head link;
+ u32 dsp_reserved_addr;
+};
+
+/* New structure (member of process context) abstracts DMM resource info */
+struct dspheap_res_object {
+ s32 heap_allocated; /* DMM status */
+ u32 ul_mpu_addr;
+ u32 ul_dsp_addr;
+ u32 ul_dsp_res_addr;
+ u32 heap_size;
+ void *hprocessor;
+ struct dspheap_res_object *next;
+};
+
+/* New structure (member of process context) abstracts stream resource info */
+struct strm_res_object {
+ s32 stream_allocated; /* Stream status */
+ void *hstream;
+ u32 num_bufs;
+ u32 dir;
+ int id;
+};
+
+/* Overall Bridge process resource usage state */
+enum gpp_proc_res_state {
+ PROC_RES_ALLOCATED,
+ PROC_RES_FREED
+};
+
+/* Bridge Data */
+struct drv_data {
+ char *base_img;
+ s32 shm_size;
+ int tc_wordswapon;
+ void *drv_object;
+ void *dev_object;
+ void *mgr_object;
+};
+
+/* Process Context */
+struct process_context {
+ /* Process State */
+ enum gpp_proc_res_state res_state;
+
+ /* Handle to Processor */
+ void *hprocessor;
+
+ /* DSP Node resources */
+ struct idr *node_id;
+
+ /* DMM mapped memory resources */
+ struct list_head dmm_map_list;
+ spinlock_t dmm_map_lock;
+
+ /* DMM reserved memory resources */
+ struct list_head dmm_rsv_list;
+ spinlock_t dmm_rsv_lock;
+
+ /* DSP Heap resources */
+ struct dspheap_res_object *pdspheap_list;
+
+ /* Stream resources */
+ struct idr *stream_id;
+};
+
+/*
+ * ======== drv_create ========
+ * Purpose:
+ * Creates the Driver Object. This is done during the driver loading.
+ * There is only one Driver Object in the DSP/BIOS Bridge.
+ * Parameters:
+ * drv_obj: Location to store created DRV Object handle.
+ * Returns:
+ * 0: Sucess
+ * -ENOMEM: Failed in Memory allocation
+ * -EPERM: General Failure
+ * Requires:
+ * DRV Initialized (refs > 0 )
+ * drv_obj != NULL.
+ * Ensures:
+ * 0: - *drv_obj is a valid DRV interface to the device.
+ * - List of DevObject Created and Initialized.
+ * - List of dev_node String created and intialized.
+ * - Registry is updated with the DRV Object.
+ * !0: DRV Object not created
+ * Details:
+ * There is one Driver Object for the Driver representing
+ * the driver itself. It contains the list of device
+ * Objects and the list of Device Extensions in the system.
+ * Also it can hold other neccessary
+ * information in its storage area.
+ */
+extern int drv_create(struct drv_object **drv_obj);
+
+/*
+ * ======== drv_destroy ========
+ * Purpose:
+ * destroys the Dev Object list, DrvExt list
+ * and destroy the DRV object
+ * Called upon driver unLoading.or unsuccesful loading of the driver.
+ * Parameters:
+ * driver_obj: Handle to Driver object .
+ * Returns:
+ * 0: Success.
+ * -EPERM: Failed to destroy DRV Object
+ * Requires:
+ * DRV Initialized (cRegs > 0 )
+ * hdrv_obj is not NULL and a valid DRV handle .
+ * List of DevObject is Empty.
+ * List of DrvExt is Empty
+ * Ensures:
+ * 0: - DRV Object destroyed and hdrv_obj is not a valid
+ * DRV handle.
+ * - Registry is updated with "0" as the DRV Object.
+ */
+extern int drv_destroy(struct drv_object *driver_obj);
+
+/*
+ * ======== drv_exit ========
+ * Purpose:
+ * Exit the DRV module, freeing any modules initialized in drv_init.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * Ensures:
+ */
+extern void drv_exit(void);
+
+/*
+ * ======== drv_get_first_dev_object ========
+ * Purpose:
+ * Returns the Ptr to the FirstDev Object in the List
+ * Parameters:
+ * Requires:
+ * DRV Initialized
+ * Returns:
+ * dw_dev_object: Ptr to the First Dev Object as a u32
+ * 0 if it fails to retrieve the First Dev Object
+ * Ensures:
+ */
+extern u32 drv_get_first_dev_object(void);
+
+/*
+ * ======== drv_get_first_dev_extension ========
+ * Purpose:
+ * Returns the Ptr to the First Device Extension in the List
+ * Parameters:
+ * Requires:
+ * DRV Initialized
+ * Returns:
+ * dw_dev_extension: Ptr to the First Device Extension as a u32
+ * 0: Failed to Get the Device Extension
+ * Ensures:
+ */
+extern u32 drv_get_first_dev_extension(void);
+
+/*
+ * ======== drv_get_dev_object ========
+ * Purpose:
+ * Given a index, returns a handle to DevObject from the list
+ * Parameters:
+ * hdrv_obj: Handle to the Manager
+ * device_obj: Location to store the Dev Handle
+ * Requires:
+ * DRV Initialized
+ * index >= 0
+ * hdrv_obj is not NULL and Valid DRV Object
+ * device_obj is not NULL
+ * Device Object List not Empty
+ * Returns:
+ * 0: Success
+ * -EPERM: Failed to Get the Dev Object
+ * Ensures:
+ * 0: *device_obj != NULL
+ * -EPERM: *device_obj = NULL
+ */
+extern int drv_get_dev_object(u32 index,
+ struct drv_object *hdrv_obj,
+ struct dev_object **device_obj);
+
+/*
+ * ======== drv_get_next_dev_object ========
+ * Purpose:
+ * Returns the Ptr to the Next Device Object from the the List
+ * Parameters:
+ * hdev_obj: Handle to the Device Object
+ * Requires:
+ * DRV Initialized
+ * hdev_obj != 0
+ * Returns:
+ * dw_dev_object: Ptr to the Next Dev Object as a u32
+ * 0: If it fail to get the next Dev Object.
+ * Ensures:
+ */
+extern u32 drv_get_next_dev_object(u32 hdev_obj);
+
+/*
+ * ======== drv_get_next_dev_extension ========
+ * Purpose:
+ * Returns the Ptr to the Next Device Extension from the the List
+ * Parameters:
+ * dev_extension: Handle to the Device Extension
+ * Requires:
+ * DRV Initialized
+ * dev_extension != 0.
+ * Returns:
+ * dw_dev_extension: Ptr to the Next Dev Extension
+ * 0: If it fail to Get the next Dev Extension
+ * Ensures:
+ */
+extern u32 drv_get_next_dev_extension(u32 dev_extension);
+
+/*
+ * ======== drv_init ========
+ * Purpose:
+ * Initialize the DRV module.
+ * Parameters:
+ * Returns:
+ * TRUE if success; FALSE otherwise.
+ * Requires:
+ * Ensures:
+ */
+extern int drv_init(void);
+
+/*
+ * ======== drv_insert_dev_object ========
+ * Purpose:
+ * Insert a DeviceObject into the list of Driver object.
+ * Parameters:
+ * driver_obj: Handle to DrvObject
+ * hdev_obj: Handle to DeviceObject to insert.
+ * Returns:
+ * 0: If successful.
+ * -EPERM: General Failure:
+ * Requires:
+ * hdrv_obj != NULL and Valid DRV Handle.
+ * hdev_obj != NULL.
+ * Ensures:
+ * 0: Device Object is inserted and the List is not empty.
+ */
+extern int drv_insert_dev_object(struct drv_object *driver_obj,
+ struct dev_object *hdev_obj);
+
+/*
+ * ======== drv_remove_dev_object ========
+ * Purpose:
+ * Search for and remove a Device object from the given list of Device Obj
+ * objects.
+ * Parameters:
+ * driver_obj: Handle to DrvObject
+ * hdev_obj: Handle to DevObject to Remove
+ * Returns:
+ * 0: Success.
+ * -EPERM: Unable to find dev_obj.
+ * Requires:
+ * hdrv_obj != NULL and a Valid DRV Handle.
+ * hdev_obj != NULL.
+ * List exists and is not empty.
+ * Ensures:
+ * List either does not exist (NULL), or is not empty if it does exist.
+ */
+extern int drv_remove_dev_object(struct drv_object *driver_obj,
+ struct dev_object *hdev_obj);
+
+/*
+ * ======== drv_request_resources ========
+ * Purpose:
+ * Assigns the Resources or Releases them.
+ * Parameters:
+ * dw_context: Path to the driver Registry Key.
+ * dev_node_strg: Ptr to dev_node String stored in the Device Ext.
+ * Returns:
+ * TRUE if success; FALSE otherwise.
+ * Requires:
+ * Ensures:
+ * The Resources are assigned based on Bus type.
+ * The hardware is initialized. Resource information is
+ * gathered from the Registry(ISA, PCMCIA)or scanned(PCI)
+ * Resource structure is stored in the registry which will be
+ * later used by the CFG module.
+ */
+extern int drv_request_resources(u32 dw_context,
+ u32 *dev_node_strg);
+
+/*
+ * ======== drv_release_resources ========
+ * Purpose:
+ * Assigns the Resources or Releases them.
+ * Parameters:
+ * dw_context: Path to the driver Registry Key.
+ * hdrv_obj: Handle to the Driver Object.
+ * Returns:
+ * TRUE if success; FALSE otherwise.
+ * Requires:
+ * Ensures:
+ * The Resources are released based on Bus type.
+ * Resource structure is deleted from the registry
+ */
+extern int drv_release_resources(u32 dw_context,
+ struct drv_object *hdrv_obj);
+
+/**
+ * drv_request_bridge_res_dsp() - Reserves shared memory for bridge.
+ * @phost_resources: pointer to host resources.
+ */
+int drv_request_bridge_res_dsp(void **phost_resources);
+
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+void bridge_recover_schedule(void);
+#endif
+
+/*
+ * ======== mem_ext_phys_pool_init ========
+ * Purpose:
+ * Uses the physical memory chunk passed for internal consitent memory
+ * allocations.
+ * physical address based on the page frame address.
+ * Parameters:
+ * pool_phys_base starting address of the physical memory pool.
+ * pool_size size of the physical memory pool.
+ * Returns:
+ * none.
+ * Requires:
+ * - MEM initialized.
+ * - valid physical address for the base and size > 0
+ */
+extern void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size);
+
+/*
+ * ======== mem_ext_phys_pool_release ========
+ */
+extern void mem_ext_phys_pool_release(void);
+
+/* ======== mem_alloc_phys_mem ========
+ * Purpose:
+ * Allocate physically contiguous, uncached memory
+ * Parameters:
+ * byte_size: Number of bytes to allocate.
+ * align_mask: Alignment Mask.
+ * physical_address: Physical address of allocated memory.
+ * Returns:
+ * Pointer to a block of memory;
+ * NULL if memory couldn't be allocated, or if byte_size == 0.
+ * Requires:
+ * MEM initialized.
+ * Ensures:
+ * The returned pointer, if not NULL, points to a valid memory block of
+ * the size requested. Returned physical address refers to physical
+ * location of memory.
+ */
+extern void *mem_alloc_phys_mem(u32 byte_size,
+ u32 align_mask, u32 *physical_address);
+
+/*
+ * ======== mem_free_phys_mem ========
+ * Purpose:
+ * Free the given block of physically contiguous memory.
+ * Parameters:
+ * virtual_address: Pointer to virtual memory region allocated
+ * by mem_alloc_phys_mem().
+ * physical_address: Pointer to physical memory region allocated
+ * by mem_alloc_phys_mem().
+ * byte_size: Size of the memory region allocated by mem_alloc_phys_mem().
+ * Returns:
+ * Requires:
+ * MEM initialized.
+ * virtual_address is a valid memory address returned by
+ * mem_alloc_phys_mem()
+ * Ensures:
+ * virtual_address is no longer a valid pointer to memory.
+ */
+extern void mem_free_phys_mem(void *virtual_address,
+ u32 physical_address, u32 byte_size);
+
+/*
+ * ======== MEM_LINEAR_ADDRESS ========
+ * Purpose:
+ * Get the linear address corresponding to the given physical address.
+ * Parameters:
+ * phys_addr: Physical address to be mapped.
+ * byte_size: Number of bytes in physical range to map.
+ * Returns:
+ * The corresponding linear address, or NULL if unsuccessful.
+ * Requires:
+ * MEM initialized.
+ * Ensures:
+ * Notes:
+ * If valid linear address is returned, be sure to call
+ * MEM_UNMAP_LINEAR_ADDRESS().
+ */
+#define MEM_LINEAR_ADDRESS(phy_addr, byte_size) phy_addr
+
+/*
+ * ======== MEM_UNMAP_LINEAR_ADDRESS ========
+ * Purpose:
+ * Unmap the linear address mapped in MEM_LINEAR_ADDRESS.
+ * Parameters:
+ * base_addr: Ptr to mapped memory (as returned by MEM_LINEAR_ADDRESS()).
+ * Returns:
+ * Requires:
+ * - MEM initialized.
+ * - base_addr is a valid linear address mapped in MEM_LINEAR_ADDRESS.
+ * Ensures:
+ * - base_addr no longer points to a valid linear address.
+ */
+#define MEM_UNMAP_LINEAR_ADDRESS(base_addr) {}
+
+#endif /* DRV_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drvdefs.h b/drivers/staging/tidspbridge/include/dspbridge/drvdefs.h
new file mode 100644
index 00000000000..2920917bbc5
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/drvdefs.h
@@ -0,0 +1,25 @@
+/*
+ * drvdefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Definition of common struct between dspdefs.h and drv.h.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DRVDEFS_
+#define DRVDEFS_
+
+/* Bridge Driver Object */
+struct drv_object;
+
+#endif /* DRVDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h
new file mode 100644
index 00000000000..8da5bd8ede8
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h
@@ -0,0 +1,475 @@
+/*
+ * dspapi-ioctl.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Contains structures and commands that are used for interaction
+ * between the DDSP API and Bridge driver.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DSPAPIIOCTL_
+#define DSPAPIIOCTL_
+
+#include <dspbridge/cmm.h>
+#include <dspbridge/strmdefs.h>
+#include <dspbridge/dbdcd.h>
+
+union trapped_args {
+
+ /* MGR Module */
+ struct {
+ u32 node_id;
+ struct dsp_ndbprops __user *pndb_props;
+ u32 undb_props_size;
+ u32 __user *pu_num_nodes;
+ } args_mgr_enumnode_info;
+
+ struct {
+ u32 processor_id;
+ struct dsp_processorinfo __user *processor_info;
+ u32 processor_info_size;
+ u32 __user *pu_num_procs;
+ } args_mgr_enumproc_info;
+
+ struct {
+ struct dsp_uuid *uuid_obj;
+ enum dsp_dcdobjtype obj_type;
+ char *psz_path_name;
+ } args_mgr_registerobject;
+
+ struct {
+ struct dsp_uuid *uuid_obj;
+ enum dsp_dcdobjtype obj_type;
+ } args_mgr_unregisterobject;
+
+ struct {
+ struct dsp_notification __user *__user *anotifications;
+ u32 count;
+ u32 __user *pu_index;
+ u32 utimeout;
+ } args_mgr_wait;
+
+ /* PROC Module */
+ struct {
+ u32 processor_id;
+ struct dsp_processorattrin __user *attr_in;
+ void *__user *ph_processor;
+ } args_proc_attach;
+
+ struct {
+ void *hprocessor;
+ u32 dw_cmd;
+ struct dsp_cbdata __user *pargs;
+ } args_proc_ctrl;
+
+ struct {
+ void *hprocessor;
+ } args_proc_detach;
+
+ struct {
+ void *hprocessor;
+ void *__user *node_tab;
+ u32 node_tab_size;
+ u32 __user *pu_num_nodes;
+ u32 __user *pu_allocated;
+ } args_proc_enumnode_info;
+
+ struct {
+ void *hprocessor;
+ u32 resource_type;
+ struct dsp_resourceinfo *resource_info;
+ u32 resource_info_size;
+ } args_proc_enumresources;
+
+ struct {
+ void *hprocessor;
+ struct dsp_processorstate __user *proc_state_obj;
+ u32 state_info_size;
+ } args_proc_getstate;
+
+ struct {
+ void *hprocessor;
+ u8 __user *pbuf;
+ u8 __user *psize;
+ u32 max_size;
+ } args_proc_gettrace;
+
+ struct {
+ void *hprocessor;
+ s32 argc_index;
+ char __user *__user *user_args;
+ char *__user *user_envp;
+ } args_proc_load;
+
+ struct {
+ void *hprocessor;
+ u32 event_mask;
+ u32 notify_type;
+ struct dsp_notification __user *hnotification;
+ } args_proc_register_notify;
+
+ struct {
+ void *hprocessor;
+ } args_proc_start;
+
+ struct {
+ void *hprocessor;
+ u32 ul_size;
+ void *__user *pp_rsv_addr;
+ } args_proc_rsvmem;
+
+ struct {
+ void *hprocessor;
+ u32 ul_size;
+ void *prsv_addr;
+ } args_proc_unrsvmem;
+
+ struct {
+ void *hprocessor;
+ void *pmpu_addr;
+ u32 ul_size;
+ void *req_addr;
+ void *__user *pp_map_addr;
+ u32 ul_map_attr;
+ } args_proc_mapmem;
+
+ struct {
+ void *hprocessor;
+ u32 ul_size;
+ void *map_addr;
+ } args_proc_unmapmem;
+
+ struct {
+ void *hprocessor;
+ void *pmpu_addr;
+ u32 ul_size;
+ u32 dir;
+ } args_proc_dma;
+
+ struct {
+ void *hprocessor;
+ void *pmpu_addr;
+ u32 ul_size;
+ u32 ul_flags;
+ } args_proc_flushmemory;
+
+ struct {
+ void *hprocessor;
+ } args_proc_stop;
+
+ struct {
+ void *hprocessor;
+ void *pmpu_addr;
+ u32 ul_size;
+ } args_proc_invalidatememory;
+
+ /* NODE Module */
+ struct {
+ void *hprocessor;
+ struct dsp_uuid __user *node_id_ptr;
+ struct dsp_cbdata __user *pargs;
+ struct dsp_nodeattrin __user *attr_in;
+ void *__user *ph_node;
+ } args_node_allocate;
+
+ struct {
+ void *hnode;
+ u32 usize;
+ struct dsp_bufferattr __user *pattr;
+ u8 *__user *pbuffer;
+ } args_node_allocmsgbuf;
+
+ struct {
+ void *hnode;
+ s32 prio;
+ } args_node_changepriority;
+
+ struct {
+ void *hnode;
+ u32 stream_id;
+ void *other_node;
+ u32 other_stream;
+ struct dsp_strmattr __user *pattrs;
+ struct dsp_cbdata __user *conn_param;
+ } args_node_connect;
+
+ struct {
+ void *hnode;
+ } args_node_create;
+
+ struct {
+ void *hnode;
+ } args_node_delete;
+
+ struct {
+ void *hnode;
+ struct dsp_bufferattr __user *pattr;
+ u8 *pbuffer;
+ } args_node_freemsgbuf;
+
+ struct {
+ void *hnode;
+ struct dsp_nodeattr __user *pattr;
+ u32 attr_size;
+ } args_node_getattr;
+
+ struct {
+ void *hnode;
+ struct dsp_msg __user *message;
+ u32 utimeout;
+ } args_node_getmessage;
+
+ struct {
+ void *hnode;
+ } args_node_pause;
+
+ struct {
+ void *hnode;
+ struct dsp_msg __user *message;
+ u32 utimeout;
+ } args_node_putmessage;
+
+ struct {
+ void *hnode;
+ u32 event_mask;
+ u32 notify_type;
+ struct dsp_notification __user *hnotification;
+ } args_node_registernotify;
+
+ struct {
+ void *hnode;
+ } args_node_run;
+
+ struct {
+ void *hnode;
+ int __user *pstatus;
+ } args_node_terminate;
+
+ struct {
+ void *hprocessor;
+ struct dsp_uuid __user *node_id_ptr;
+ struct dsp_ndbprops __user *node_props;
+ } args_node_getuuidprops;
+
+ /* STRM module */
+
+ struct {
+ void *hstream;
+ u32 usize;
+ u8 *__user *ap_buffer;
+ u32 num_bufs;
+ } args_strm_allocatebuffer;
+
+ struct {
+ void *hstream;
+ } args_strm_close;
+
+ struct {
+ void *hstream;
+ u8 *__user *ap_buffer;
+ u32 num_bufs;
+ } args_strm_freebuffer;
+
+ struct {
+ void *hstream;
+ void **ph_event;
+ } args_strm_geteventhandle;
+
+ struct {
+ void *hstream;
+ struct stream_info __user *stream_info;
+ u32 stream_info_size;
+ } args_strm_getinfo;
+
+ struct {
+ void *hstream;
+ bool flush_flag;
+ } args_strm_idle;
+
+ struct {
+ void *hstream;
+ u8 *pbuffer;
+ u32 dw_bytes;
+ u32 dw_buf_size;
+ u32 dw_arg;
+ } args_strm_issue;
+
+ struct {
+ void *hnode;
+ u32 direction;
+ u32 index;
+ struct strm_attr __user *attr_in;
+ void *__user *ph_stream;
+ } args_strm_open;
+
+ struct {
+ void *hstream;
+ u8 *__user *buf_ptr;
+ u32 __user *bytes;
+ u32 __user *buf_size_ptr;
+ u32 __user *pdw_arg;
+ } args_strm_reclaim;
+
+ struct {
+ void *hstream;
+ u32 event_mask;
+ u32 notify_type;
+ struct dsp_notification __user *hnotification;
+ } args_strm_registernotify;
+
+ struct {
+ void *__user *stream_tab;
+ u32 strm_num;
+ u32 __user *pmask;
+ u32 utimeout;
+ } args_strm_select;
+
+ /* CMM Module */
+ struct {
+ struct cmm_object *hcmm_mgr;
+ u32 usize;
+ struct cmm_attrs *pattrs;
+ void **pp_buf_va;
+ } args_cmm_allocbuf;
+
+ struct {
+ struct cmm_object *hcmm_mgr;
+ void *buf_pa;
+ u32 ul_seg_id;
+ } args_cmm_freebuf;
+
+ struct {
+ void *hprocessor;
+ struct cmm_object *__user *ph_cmm_mgr;
+ } args_cmm_gethandle;
+
+ struct {
+ struct cmm_object *hcmm_mgr;
+ struct cmm_info __user *cmm_info_obj;
+ } args_cmm_getinfo;
+
+ /* UTIL module */
+ struct {
+ s32 util_argc;
+ char **pp_argv;
+ } args_util_testdll;
+};
+
+/*
+ * Dspbridge Ioctl numbering scheme
+ *
+ * 7 0
+ * ---------------------------------
+ * | Module | Ioctl Number |
+ * ---------------------------------
+ * | x | x | x | 0 | 0 | 0 | 0 | 0 |
+ * ---------------------------------
+ */
+
+/* Ioctl driver identifier */
+#define DB 0xDB
+
+/*
+ * Following are used to distinguish between module ioctls, this is needed
+ * in case new ioctls are introduced.
+ */
+#define DB_MODULE_MASK 0xE0
+#define DB_IOC_MASK 0x1F
+
+/* Ioctl module masks */
+#define DB_MGR 0x0
+#define DB_PROC 0x20
+#define DB_NODE 0x40
+#define DB_STRM 0x60
+#define DB_CMM 0x80
+
+#define DB_MODULE_SHIFT 5
+
+/* Used to calculate the ioctl per dspbridge module */
+#define DB_IOC(module, num) \
+ (((module) & DB_MODULE_MASK) | ((num) & DB_IOC_MASK))
+/* Used to get dspbridge ioctl module */
+#define DB_GET_MODULE(cmd) ((cmd) & DB_MODULE_MASK)
+/* Used to get dspbridge ioctl number */
+#define DB_GET_IOC(cmd) ((cmd) & DB_IOC_MASK)
+
+/* TODO: Remove deprecated and not implemented */
+
+/* MGR Module */
+#define MGR_ENUMNODE_INFO _IOWR(DB, DB_IOC(DB_MGR, 0), unsigned long)
+#define MGR_ENUMPROC_INFO _IOWR(DB, DB_IOC(DB_MGR, 1), unsigned long)
+#define MGR_REGISTEROBJECT _IOWR(DB, DB_IOC(DB_MGR, 2), unsigned long)
+#define MGR_UNREGISTEROBJECT _IOWR(DB, DB_IOC(DB_MGR, 3), unsigned long)
+#define MGR_WAIT _IOWR(DB, DB_IOC(DB_MGR, 4), unsigned long)
+/* MGR_GET_PROC_RES Deprecated */
+#define MGR_GET_PROC_RES _IOR(DB, DB_IOC(DB_MGR, 5), unsigned long)
+
+/* PROC Module */
+#define PROC_ATTACH _IOWR(DB, DB_IOC(DB_PROC, 0), unsigned long)
+#define PROC_CTRL _IOR(DB, DB_IOC(DB_PROC, 1), unsigned long)
+/* PROC_DETACH Deprecated */
+#define PROC_DETACH _IOR(DB, DB_IOC(DB_PROC, 2), unsigned long)
+#define PROC_ENUMNODE _IOWR(DB, DB_IOC(DB_PROC, 3), unsigned long)
+#define PROC_ENUMRESOURCES _IOWR(DB, DB_IOC(DB_PROC, 4), unsigned long)
+#define PROC_GET_STATE _IOWR(DB, DB_IOC(DB_PROC, 5), unsigned long)
+#define PROC_GET_TRACE _IOWR(DB, DB_IOC(DB_PROC, 6), unsigned long)
+#define PROC_LOAD _IOW(DB, DB_IOC(DB_PROC, 7), unsigned long)
+#define PROC_REGISTERNOTIFY _IOWR(DB, DB_IOC(DB_PROC, 8), unsigned long)
+#define PROC_START _IOW(DB, DB_IOC(DB_PROC, 9), unsigned long)
+#define PROC_RSVMEM _IOWR(DB, DB_IOC(DB_PROC, 10), unsigned long)
+#define PROC_UNRSVMEM _IOW(DB, DB_IOC(DB_PROC, 11), unsigned long)
+#define PROC_MAPMEM _IOWR(DB, DB_IOC(DB_PROC, 12), unsigned long)
+#define PROC_UNMAPMEM _IOR(DB, DB_IOC(DB_PROC, 13), unsigned long)
+#define PROC_FLUSHMEMORY _IOW(DB, DB_IOC(DB_PROC, 14), unsigned long)
+#define PROC_STOP _IOWR(DB, DB_IOC(DB_PROC, 15), unsigned long)
+#define PROC_INVALIDATEMEMORY _IOW(DB, DB_IOC(DB_PROC, 16), unsigned long)
+#define PROC_BEGINDMA _IOW(DB, DB_IOC(DB_PROC, 17), unsigned long)
+#define PROC_ENDDMA _IOW(DB, DB_IOC(DB_PROC, 18), unsigned long)
+
+/* NODE Module */
+#define NODE_ALLOCATE _IOWR(DB, DB_IOC(DB_NODE, 0), unsigned long)
+#define NODE_ALLOCMSGBUF _IOWR(DB, DB_IOC(DB_NODE, 1), unsigned long)
+#define NODE_CHANGEPRIORITY _IOW(DB, DB_IOC(DB_NODE, 2), unsigned long)
+#define NODE_CONNECT _IOW(DB, DB_IOC(DB_NODE, 3), unsigned long)
+#define NODE_CREATE _IOW(DB, DB_IOC(DB_NODE, 4), unsigned long)
+#define NODE_DELETE _IOW(DB, DB_IOC(DB_NODE, 5), unsigned long)
+#define NODE_FREEMSGBUF _IOW(DB, DB_IOC(DB_NODE, 6), unsigned long)
+#define NODE_GETATTR _IOWR(DB, DB_IOC(DB_NODE, 7), unsigned long)
+#define NODE_GETMESSAGE _IOWR(DB, DB_IOC(DB_NODE, 8), unsigned long)
+#define NODE_PAUSE _IOW(DB, DB_IOC(DB_NODE, 9), unsigned long)
+#define NODE_PUTMESSAGE _IOW(DB, DB_IOC(DB_NODE, 10), unsigned long)
+#define NODE_REGISTERNOTIFY _IOWR(DB, DB_IOC(DB_NODE, 11), unsigned long)
+#define NODE_RUN _IOW(DB, DB_IOC(DB_NODE, 12), unsigned long)
+#define NODE_TERMINATE _IOWR(DB, DB_IOC(DB_NODE, 13), unsigned long)
+#define NODE_GETUUIDPROPS _IOWR(DB, DB_IOC(DB_NODE, 14), unsigned long)
+
+/* STRM Module */
+#define STRM_ALLOCATEBUFFER _IOWR(DB, DB_IOC(DB_STRM, 0), unsigned long)
+#define STRM_CLOSE _IOW(DB, DB_IOC(DB_STRM, 1), unsigned long)
+#define STRM_FREEBUFFER _IOWR(DB, DB_IOC(DB_STRM, 2), unsigned long)
+#define STRM_GETEVENTHANDLE _IO(DB, DB_IOC(DB_STRM, 3)) /* Not Impl'd */
+#define STRM_GETINFO _IOWR(DB, DB_IOC(DB_STRM, 4), unsigned long)
+#define STRM_IDLE _IOW(DB, DB_IOC(DB_STRM, 5), unsigned long)
+#define STRM_ISSUE _IOW(DB, DB_IOC(DB_STRM, 6), unsigned long)
+#define STRM_OPEN _IOWR(DB, DB_IOC(DB_STRM, 7), unsigned long)
+#define STRM_RECLAIM _IOWR(DB, DB_IOC(DB_STRM, 8), unsigned long)
+#define STRM_REGISTERNOTIFY _IOWR(DB, DB_IOC(DB_STRM, 9), unsigned long)
+#define STRM_SELECT _IOWR(DB, DB_IOC(DB_STRM, 10), unsigned long)
+
+/* CMM Module */
+#define CMM_ALLOCBUF _IO(DB, DB_IOC(DB_CMM, 0)) /* Not Impl'd */
+#define CMM_FREEBUF _IO(DB, DB_IOC(DB_CMM, 1)) /* Not Impl'd */
+#define CMM_GETHANDLE _IOR(DB, DB_IOC(DB_CMM, 2), unsigned long)
+#define CMM_GETINFO _IOR(DB, DB_IOC(DB_CMM, 3), unsigned long)
+
+#endif /* DSPAPIIOCTL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspapi.h b/drivers/staging/tidspbridge/include/dspbridge/dspapi.h
new file mode 100644
index 00000000000..c99c68738b0
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspapi.h
@@ -0,0 +1,167 @@
+/*
+ * dspapi.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Includes the wrapper functions called directly by the
+ * DeviceIOControl interface.
+ *
+ * Notes:
+ * Bridge services exported to Bridge driver are initialized by the DSPAPI on
+ * behalf of the Bridge driver. Bridge driver must not call module Init/Exit
+ * functions.
+ *
+ * To ensure Bridge driver binary compatibility across different platforms,
+ * for the same processor, a Bridge driver must restrict its usage of system
+ * services to those exported by the DSPAPI library.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DSPAPI_
+#define DSPAPI_
+
+#include <dspbridge/dspapi-ioctl.h>
+
+/* This BRD API Library Version: */
+#define BRD_API_MAJOR_VERSION (u32)8 /* .8x - Alpha, .9x - Beta, 1.x FCS */
+#define BRD_API_MINOR_VERSION (u32)0
+
+/*
+ * ======== api_call_dev_ioctl ========
+ * Purpose:
+ * Call the (wrapper) function for the corresponding API IOCTL.
+ * Parameters:
+ * cmd: IOCTL id, base 0.
+ * args: Argument structure.
+ * result:
+ * Returns:
+ * 0 if command called; -EINVAL if command not in IOCTL
+ * table.
+ * Requires:
+ * Ensures:
+ */
+extern int api_call_dev_ioctl(unsigned int cmd,
+ union trapped_args *args,
+ u32 *result, void *pr_ctxt);
+
+/*
+ * ======== api_init ========
+ * Purpose:
+ * Initialize modules used by Bridge API.
+ * This procedure is called when the driver is loaded.
+ * Parameters:
+ * Returns:
+ * TRUE if success; FALSE otherwise.
+ * Requires:
+ * Ensures:
+ */
+extern bool api_init(void);
+
+/*
+ * ======== api_init_complete2 ========
+ * Purpose:
+ * Perform any required bridge initialization which cannot
+ * be performed in api_init() or dev_start_device() due
+ * to the fact that some services are not yet
+ * completely initialized.
+ * Parameters:
+ * Returns:
+ * 0: Allow this device to load
+ * -EPERM: Failure.
+ * Requires:
+ * Bridge API initialized.
+ * Ensures:
+ */
+extern int api_init_complete2(void);
+
+/*
+ * ======== api_exit ========
+ * Purpose:
+ * Exit all modules initialized in api_init(void).
+ * This procedure is called when the driver is unloaded.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * api_init(void) was previously called.
+ * Ensures:
+ * Resources acquired in api_init(void) are freed.
+ */
+extern void api_exit(void);
+
+/* MGR wrapper functions */
+extern u32 mgrwrap_enum_node_info(union trapped_args *args, void *pr_ctxt);
+extern u32 mgrwrap_enum_proc_info(union trapped_args *args, void *pr_ctxt);
+extern u32 mgrwrap_register_object(union trapped_args *args, void *pr_ctxt);
+extern u32 mgrwrap_unregister_object(union trapped_args *args, void *pr_ctxt);
+extern u32 mgrwrap_wait_for_bridge_events(union trapped_args *args,
+ void *pr_ctxt);
+
+extern u32 mgrwrap_get_process_resources_info(union trapped_args *args,
+ void *pr_ctxt);
+
+/* CPRC (Processor) wrapper Functions */
+extern u32 procwrap_attach(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_ctrl(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_detach(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_enum_node_info(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_enum_resources(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_get_state(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_get_trace(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_load(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_start(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_map(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_flush_memory(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_stop(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_invalidate_memory(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_begin_dma(union trapped_args *args, void *pr_ctxt);
+extern u32 procwrap_end_dma(union trapped_args *args, void *pr_ctxt);
+
+/* NODE wrapper functions */
+extern u32 nodewrap_allocate(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_alloc_msg_buf(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_change_priority(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_connect(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_create(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_delete(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_free_msg_buf(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_get_attr(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_get_message(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_pause(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_put_message(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_register_notify(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_run(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_terminate(union trapped_args *args, void *pr_ctxt);
+extern u32 nodewrap_get_uuid_props(union trapped_args *args, void *pr_ctxt);
+
+/* STRM wrapper functions */
+extern u32 strmwrap_allocate_buffer(union trapped_args *args, void *pr_ctxt);
+extern u32 strmwrap_close(union trapped_args *args, void *pr_ctxt);
+extern u32 strmwrap_free_buffer(union trapped_args *args, void *pr_ctxt);
+extern u32 strmwrap_get_event_handle(union trapped_args *args, void *pr_ctxt);
+extern u32 strmwrap_get_info(union trapped_args *args, void *pr_ctxt);
+extern u32 strmwrap_idle(union trapped_args *args, void *pr_ctxt);
+extern u32 strmwrap_issue(union trapped_args *args, void *pr_ctxt);
+extern u32 strmwrap_open(union trapped_args *args, void *pr_ctxt);
+extern u32 strmwrap_reclaim(union trapped_args *args, void *pr_ctxt);
+extern u32 strmwrap_register_notify(union trapped_args *args, void *pr_ctxt);
+extern u32 strmwrap_select(union trapped_args *args, void *pr_ctxt);
+
+extern u32 cmmwrap_calloc_buf(union trapped_args *args, void *pr_ctxt);
+extern u32 cmmwrap_free_buf(union trapped_args *args, void *pr_ctxt);
+extern u32 cmmwrap_get_handle(union trapped_args *args, void *pr_ctxt);
+extern u32 cmmwrap_get_info(union trapped_args *args, void *pr_ctxt);
+
+#endif /* DSPAPI_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspchnl.h b/drivers/staging/tidspbridge/include/dspbridge/dspchnl.h
new file mode 100644
index 00000000000..7146a5057e2
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspchnl.h
@@ -0,0 +1,72 @@
+/*
+ * dspchnl.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Declares the upper edge channel class library functions required by
+ * all Bridge driver / DSP API driver interface tables. These functions are
+ * implemented by every class of Bridge channel library.
+ *
+ * Notes:
+ * The function comment headers reside in dspdefs.h.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DSPCHNL_
+#define DSPCHNL_
+
+extern int bridge_chnl_create(struct chnl_mgr **channel_mgr,
+ struct dev_object *hdev_obj,
+ const struct chnl_mgrattrs
+ *mgr_attrts);
+
+extern int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr);
+
+extern int bridge_chnl_open(struct chnl_object **chnl,
+ struct chnl_mgr *hchnl_mgr,
+ s8 chnl_mode,
+ u32 ch_id,
+ const struct chnl_attr
+ *pattrs);
+
+extern int bridge_chnl_close(struct chnl_object *chnl_obj);
+
+extern int bridge_chnl_add_io_req(struct chnl_object *chnl_obj,
+ void *host_buf,
+ u32 byte_size, u32 buf_size,
+ u32 dw_dsp_addr, u32 dw_arg);
+
+extern int bridge_chnl_get_ioc(struct chnl_object *chnl_obj,
+ u32 timeout, struct chnl_ioc *chan_ioc);
+
+extern int bridge_chnl_cancel_io(struct chnl_object *chnl_obj);
+
+extern int bridge_chnl_flush_io(struct chnl_object *chnl_obj,
+ u32 timeout);
+
+extern int bridge_chnl_get_info(struct chnl_object *chnl_obj,
+ struct chnl_info *channel_info);
+
+extern int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr,
+ u32 ch_id, struct chnl_mgrinfo
+ *mgr_info);
+
+extern int bridge_chnl_idle(struct chnl_object *chnl_obj,
+ u32 timeout, bool flush_data);
+
+extern int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
+ u32 event_mask,
+ u32 notify_type,
+ struct dsp_notification
+ *hnotification);
+
+#endif /* DSPCHNL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
new file mode 100644
index 00000000000..0ae7d1646a1
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
@@ -0,0 +1,1054 @@
+/*
+ * dspdefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Bridge driver entry point and interface function declarations.
+ *
+ * Notes:
+ * The DSP API obtains it's function interface to
+ * the Bridge driver via a call to bridge_drv_entry().
+ *
+ * Bridge services exported to Bridge drivers are initialized by the
+ * DSP API on behalf of the Bridge driver.
+ *
+ * Bridge function DBC Requires and Ensures are also made by the DSP API on
+ * behalf of the Bridge driver, to simplify the Bridge driver code.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DSPDEFS_
+#define DSPDEFS_
+
+#include <dspbridge/brddefs.h>
+#include <dspbridge/cfgdefs.h>
+#include <dspbridge/chnlpriv.h>
+#include <dspbridge/dehdefs.h>
+#include <dspbridge/devdefs.h>
+#include <dspbridge/iodefs.h>
+#include <dspbridge/msgdefs.h>
+
+/*
+ * Any IOCTLS at or above this value are reserved for standard Bridge driver
+ * interfaces.
+ */
+#define BRD_RESERVEDIOCTLBASE 0x8000
+
+/* Handle to Bridge driver's private device context. */
+struct bridge_dev_context;
+
+/*--------------------------------------------------------------------------- */
+/* BRIDGE DRIVER FUNCTION TYPES */
+/*--------------------------------------------------------------------------- */
+
+/*
+ * ======== bridge_brd_monitor ========
+ * Purpose:
+ * Bring the board to the BRD_IDLE (monitor) state.
+ * Parameters:
+ * dev_ctxt: Handle to Bridge driver defined device context.
+ * Returns:
+ * 0: Success.
+ * -ETIMEDOUT: Timeout occured waiting for a response from hardware.
+ * -EPERM: Other, unspecified error.
+ * Requires:
+ * dev_ctxt != NULL
+ * Ensures:
+ * 0: Board is in BRD_IDLE state;
+ * else: Board state is indeterminate.
+ */
+typedef int(*fxn_brd_monitor) (struct bridge_dev_context *dev_ctxt);
+
+/*
+ * ======== fxn_brd_setstate ========
+ * Purpose:
+ * Sets the Bridge driver state
+ * Parameters:
+ * dev_ctxt: Handle to Bridge driver defined device info.
+ * brd_state: Board state
+ * Returns:
+ * 0: Success.
+ * -EPERM: Other, unspecified error.
+ * Requires:
+ * dev_ctxt != NULL;
+ * brd_state <= BRD_LASTSTATE.
+ * Ensures:
+ * brd_state <= BRD_LASTSTATE.
+ * Update the Board state to the specified state.
+ */
+typedef int(*fxn_brd_setstate) (struct bridge_dev_context
+ * dev_ctxt, u32 brd_state);
+
+/*
+ * ======== bridge_brd_start ========
+ * Purpose:
+ * Bring board to the BRD_RUNNING (start) state.
+ * Parameters:
+ * dev_ctxt: Handle to Bridge driver defined device context.
+ * dsp_addr: DSP address at which to start execution.
+ * Returns:
+ * 0: Success.
+ * -ETIMEDOUT: Timeout occured waiting for a response from hardware.
+ * -EPERM: Other, unspecified error.
+ * Requires:
+ * dev_ctxt != NULL
+ * Board is in monitor (BRD_IDLE) state.
+ * Ensures:
+ * 0: Board is in BRD_RUNNING state.
+ * Interrupts to the PC are enabled.
+ * else: Board state is indeterminate.
+ */
+typedef int(*fxn_brd_start) (struct bridge_dev_context
+ * dev_ctxt, u32 dsp_addr);
+
+/*
+ * ======== bridge_brd_mem_copy ========
+ * Purpose:
+ * Copy memory from one DSP address to another
+ * Parameters:
+ * dev_context: Pointer to context handle
+ * dsp_dest_addr: DSP address to copy to
+ * dsp_src_addr: DSP address to copy from
+ * ul_num_bytes: Number of bytes to copy
+ * mem_type: What section of memory to copy to
+ * Returns:
+ * 0: Success.
+ * -EPERM: Other, unspecified error.
+ * Requires:
+ * dev_context != NULL
+ * Ensures:
+ * 0: Board is in BRD_RUNNING state.
+ * Interrupts to the PC are enabled.
+ * else: Board state is indeterminate.
+ */
+typedef int(*fxn_brd_memcopy) (struct bridge_dev_context
+ * dev_ctxt,
+ u32 dsp_dest_addr,
+ u32 dsp_src_addr,
+ u32 ul_num_bytes, u32 mem_type);
+/*
+ * ======== bridge_brd_mem_write ========
+ * Purpose:
+ * Write a block of host memory into a DSP address, into a given memory
+ * space. Unlike bridge_brd_write, this API does reset the DSP
+ * Parameters:
+ * dev_ctxt: Handle to Bridge driver defined device info.
+ * dsp_addr: Address on DSP board (Destination).
+ * host_buf: Pointer to host buffer (Source).
+ * ul_num_bytes: Number of bytes to transfer.
+ * mem_type: Memory space on DSP to which to transfer.
+ * Returns:
+ * 0: Success.
+ * -ETIMEDOUT: Timeout occured waiting for a response from hardware.
+ * -EPERM: Other, unspecified error.
+ * Requires:
+ * dev_ctxt != NULL;
+ * host_buf != NULL.
+ * Ensures:
+ */
+typedef int(*fxn_brd_memwrite) (struct bridge_dev_context
+ * dev_ctxt,
+ u8 *host_buf,
+ u32 dsp_addr, u32 ul_num_bytes,
+ u32 mem_type);
+
+/*
+ * ======== bridge_brd_mem_map ========
+ * Purpose:
+ * Map a MPU memory region to a DSP/IVA memory space
+ * Parameters:
+ * dev_ctxt: Handle to Bridge driver defined device info.
+ * ul_mpu_addr: MPU memory region start address.
+ * virt_addr: DSP/IVA memory region u8 address.
+ * ul_num_bytes: Number of bytes to map.
+ * map_attrs: Mapping attributes (e.g. endianness).
+ * Returns:
+ * 0: Success.
+ * -EPERM: Other, unspecified error.
+ * Requires:
+ * dev_ctxt != NULL;
+ * Ensures:
+ */
+typedef int(*fxn_brd_memmap) (struct bridge_dev_context
+ * dev_ctxt, u32 ul_mpu_addr,
+ u32 virt_addr, u32 ul_num_bytes,
+ u32 map_attr,
+ struct page **mapped_pages);
+
+/*
+ * ======== bridge_brd_mem_un_map ========
+ * Purpose:
+ * UnMap an MPU memory region from DSP/IVA memory space
+ * Parameters:
+ * dev_ctxt: Handle to Bridge driver defined device info.
+ * virt_addr: DSP/IVA memory region u8 address.
+ * ul_num_bytes: Number of bytes to unmap.
+ * Returns:
+ * 0: Success.
+ * -EPERM: Other, unspecified error.
+ * Requires:
+ * dev_ctxt != NULL;
+ * Ensures:
+ */
+typedef int(*fxn_brd_memunmap) (struct bridge_dev_context
+ * dev_ctxt,
+ u32 virt_addr, u32 ul_num_bytes);
+
+/*
+ * ======== bridge_brd_stop ========
+ * Purpose:
+ * Bring board to the BRD_STOPPED state.
+ * Parameters:
+ * dev_ctxt: Handle to Bridge driver defined device context.
+ * Returns:
+ * 0: Success.
+ * -ETIMEDOUT: Timeout occured waiting for a response from hardware.
+ * -EPERM: Other, unspecified error.
+ * Requires:
+ * dev_ctxt != NULL
+ * Ensures:
+ * 0: Board is in BRD_STOPPED (stop) state;
+ * Interrupts to the PC are disabled.
+ * else: Board state is indeterminate.
+ */
+typedef int(*fxn_brd_stop) (struct bridge_dev_context *dev_ctxt);
+
+/*
+ * ======== bridge_brd_status ========
+ * Purpose:
+ * Report the current state of the board.
+ * Parameters:
+ * dev_ctxt: Handle to Bridge driver defined device context.
+ * board_state: Ptr to BRD status variable.
+ * Returns:
+ * 0:
+ * Requires:
+ * board_state != NULL;
+ * dev_ctxt != NULL
+ * Ensures:
+ * *board_state is one of
+ * {BRD_STOPPED, BRD_IDLE, BRD_RUNNING, BRD_UNKNOWN};
+ */
+typedef int(*fxn_brd_status) (struct bridge_dev_context *dev_ctxt,
+ int *board_state);
+
+/*
+ * ======== bridge_brd_read ========
+ * Purpose:
+ * Read a block of DSP memory, from a given memory space, into a host
+ * buffer.
+ * Parameters:
+ * dev_ctxt: Handle to Bridge driver defined device info.
+ * host_buf: Pointer to host buffer (Destination).
+ * dsp_addr: Address on DSP board (Source).
+ * ul_num_bytes: Number of bytes to transfer.
+ * mem_type: Memory space on DSP from which to transfer.
+ * Returns:
+ * 0: Success.
+ * -ETIMEDOUT: Timeout occured waiting for a response from hardware.
+ * -EPERM: Other, unspecified error.
+ * Requires:
+ * dev_ctxt != NULL;
+ * host_buf != NULL.
+ * Ensures:
+ * Will not write more than ul_num_bytes bytes into host_buf.
+ */
+typedef int(*fxn_brd_read) (struct bridge_dev_context *dev_ctxt,
+ u8 *host_buf,
+ u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type);
+
+/*
+ * ======== bridge_brd_write ========
+ * Purpose:
+ * Write a block of host memory into a DSP address, into a given memory
+ * space.
+ * Parameters:
+ * dev_ctxt: Handle to Bridge driver defined device info.
+ * dsp_addr: Address on DSP board (Destination).
+ * host_buf: Pointer to host buffer (Source).
+ * ul_num_bytes: Number of bytes to transfer.
+ * mem_type: Memory space on DSP to which to transfer.
+ * Returns:
+ * 0: Success.
+ * -ETIMEDOUT: Timeout occured waiting for a response from hardware.
+ * -EPERM: Other, unspecified error.
+ * Requires:
+ * dev_ctxt != NULL;
+ * host_buf != NULL.
+ * Ensures:
+ */
+typedef int(*fxn_brd_write) (struct bridge_dev_context *dev_ctxt,
+ u8 *host_buf,
+ u32 dsp_addr,
+ u32 ul_num_bytes, u32 mem_type);
+
+/*
+ * ======== bridge_chnl_create ========
+ * Purpose:
+ * Create a channel manager object, responsible for opening new channels
+ * and closing old ones for a given 'Bridge board.
+ * Parameters:
+ * channel_mgr: Location to store a channel manager object on output.
+ * hdev_obj: Handle to a device object.
+ * mgr_attrts: Channel manager attributes.
+ * mgr_attrts->max_channels: Max channels
+ * mgr_attrts->birq: Channel's I/O IRQ number.
+ * mgr_attrts->irq_shared: TRUE if the IRQ is shareable.
+ * mgr_attrts->word_size: DSP Word size in equivalent PC bytes..
+ * mgr_attrts->shm_base: Base physical address of shared memory, if any.
+ * mgr_attrts->usm_length: Bytes of shared memory block.
+ * Returns:
+ * 0: Success;
+ * -ENOMEM: Insufficient memory for requested resources.
+ * -EIO: Unable to plug ISR for given IRQ.
+ * -EFAULT: Couldn't map physical address to a virtual one.
+ * Requires:
+ * channel_mgr != NULL.
+ * mgr_attrts != NULL
+ * mgr_attrts field are all valid:
+ * 0 < max_channels <= CHNL_MAXCHANNELS.
+ * birq <= 15.
+ * word_size > 0.
+ * hdev_obj != NULL
+ * No channel manager exists for this board.
+ * Ensures:
+ */
+typedef int(*fxn_chnl_create) (struct chnl_mgr
+ **channel_mgr,
+ struct dev_object
+ * hdev_obj,
+ const struct
+ chnl_mgrattrs * mgr_attrts);
+
+/*
+ * ======== bridge_chnl_destroy ========
+ * Purpose:
+ * Close all open channels, and destroy the channel manager.
+ * Parameters:
+ * hchnl_mgr: Channel manager object.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: hchnl_mgr was invalid.
+ * Requires:
+ * Ensures:
+ * 0: Cancels I/O on each open channel. Closes each open channel.
+ * chnl_create may subsequently be called for the same device.
+ */
+typedef int(*fxn_chnl_destroy) (struct chnl_mgr *hchnl_mgr);
+/*
+ * ======== bridge_deh_notify ========
+ * Purpose:
+ * When notified of DSP error, take appropriate action.
+ * Parameters:
+ * hdeh_mgr: Handle to DEH manager object.
+ * evnt_mask: Indicate the type of exception
+ * error_info: Error information
+ * Returns:
+ *
+ * Requires:
+ * hdeh_mgr != NULL;
+ * evnt_mask with a valid exception
+ * Ensures:
+ */
+typedef void (*fxn_deh_notify) (struct deh_mgr *hdeh_mgr,
+ u32 evnt_mask, u32 error_info);
+
+/*
+ * ======== bridge_chnl_open ========
+ * Purpose:
+ * Open a new half-duplex channel to the DSP board.
+ * Parameters:
+ * chnl: Location to store a channel object handle.
+ * hchnl_mgr: Handle to channel manager, as returned by
+ * CHNL_GetMgr().
+ * chnl_mode: One of {CHNL_MODETODSP, CHNL_MODEFROMDSP} specifies
+ * direction of data transfer.
+ * ch_id: If CHNL_PICKFREE is specified, the channel manager will
+ * select a free channel id (default);
+ * otherwise this field specifies the id of the channel.
+ * pattrs: Channel attributes. Attribute fields are as follows:
+ * pattrs->uio_reqs: Specifies the maximum number of I/O requests which can
+ * be pending at any given time. All request packets are
+ * preallocated when the channel is opened.
+ * pattrs->event_obj: This field allows the user to supply an auto reset
+ * event object for channel I/O completion notifications.
+ * It is the responsibility of the user to destroy this
+ * object AFTER closing the channel.
+ * This channel event object can be retrieved using
+ * CHNL_GetEventHandle().
+ * pattrs->hReserved: The kernel mode handle of this event object.
+ *
+ * Returns:
+ * 0: Success.
+ * -EFAULT: hchnl_mgr is invalid.
+ * -ENOMEM: Insufficient memory for requested resources.
+ * -EINVAL: Invalid number of IOReqs.
+ * -ENOSR: No free channels available.
+ * -ECHRNG: Channel ID is out of range.
+ * -EALREADY: Channel is in use.
+ * -EIO: No free IO request packets available for
+ * queuing.
+ * Requires:
+ * chnl != NULL.
+ * pattrs != NULL.
+ * pattrs->event_obj is a valid event handle.
+ * pattrs->hReserved is the kernel mode handle for pattrs->event_obj.
+ * Ensures:
+ * 0: *chnl is a valid channel.
+ * else: *chnl is set to NULL if (chnl != NULL);
+ */
+typedef int(*fxn_chnl_open) (struct chnl_object
+ **chnl,
+ struct chnl_mgr *hchnl_mgr,
+ s8 chnl_mode,
+ u32 ch_id,
+ const struct
+ chnl_attr * pattrs);
+
+/*
+ * ======== bridge_chnl_close ========
+ * Purpose:
+ * Ensures all pending I/O on this channel is cancelled, discards all
+ * queued I/O completion notifications, then frees the resources allocated
+ * for this channel, and makes the corresponding logical channel id
+ * available for subsequent use.
+ * Parameters:
+ * chnl_obj: Handle to a channel object.
+ * Returns:
+ * 0: Success;
+ * -EFAULT: Invalid chnl_obj.
+ * Requires:
+ * No thread must be blocked on this channel's I/O completion event.
+ * Ensures:
+ * 0: chnl_obj is no longer valid.
+ */
+typedef int(*fxn_chnl_close) (struct chnl_object *chnl_obj);
+
+/*
+ * ======== bridge_chnl_add_io_req ========
+ * Purpose:
+ * Enqueue an I/O request for data transfer on a channel to the DSP.
+ * The direction (mode) is specified in the channel object. Note the DSP
+ * address is specified for channels opened in direct I/O mode.
+ * Parameters:
+ * chnl_obj: Channel object handle.
+ * host_buf: Host buffer address source.
+ * byte_size: Number of PC bytes to transfer. A zero value indicates
+ * that this buffer is the last in the output channel.
+ * A zero value is invalid for an input channel.
+ *! buf_size: Actual buffer size in host bytes.
+ * dw_dsp_addr: DSP address for transfer. (Currently ignored).
+ * dw_arg: A user argument that travels with the buffer.
+ * Returns:
+ * 0: Success;
+ * -EFAULT: Invalid chnl_obj or host_buf.
+ * -EPERM: User cannot mark EOS on an input channel.
+ * -ECANCELED: I/O has been cancelled on this channel. No further
+ * I/O is allowed.
+ * -EPIPE: End of stream was already marked on a previous
+ * IORequest on this channel. No further I/O is expected.
+ * -EINVAL: Buffer submitted to this output channel is larger than
+ * the size of the physical shared memory output window.
+ * Requires:
+ * Ensures:
+ * 0: The buffer will be transferred if the channel is ready;
+ * otherwise, will be queued for transfer when the channel becomes
+ * ready. In any case, notifications of I/O completion are
+ * asynchronous.
+ * If byte_size is 0 for an output channel, subsequent CHNL_AddIOReq's
+ * on this channel will fail with error code -EPIPE. The
+ * corresponding IOC for this I/O request will have its status flag
+ * set to CHNL_IOCSTATEOS.
+ */
+typedef int(*fxn_chnl_addioreq) (struct chnl_object
+ * chnl_obj,
+ void *host_buf,
+ u32 byte_size,
+ u32 buf_size,
+ u32 dw_dsp_addr, u32 dw_arg);
+
+/*
+ * ======== bridge_chnl_get_ioc ========
+ * Purpose:
+ * Dequeue an I/O completion record, which contains information about the
+ * completed I/O request.
+ * Parameters:
+ * chnl_obj: Channel object handle.
+ * timeout: A value of CHNL_IOCNOWAIT will simply dequeue the
+ * first available IOC.
+ * chan_ioc: On output, contains host buffer address, bytes
+ * transferred, and status of I/O completion.
+ * chan_ioc->status: See chnldefs.h.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid chnl_obj or chan_ioc.
+ * -EREMOTEIO: CHNL_IOCNOWAIT was specified as the timeout parameter
+ * yet no I/O completions were queued.
+ * Requires:
+ * timeout == CHNL_IOCNOWAIT.
+ * Ensures:
+ * 0: if there are any remaining IOC's queued before this call
+ * returns, the channel event object will be left in a signalled
+ * state.
+ */
+typedef int(*fxn_chnl_getioc) (struct chnl_object *chnl_obj,
+ u32 timeout,
+ struct chnl_ioc *chan_ioc);
+
+/*
+ * ======== bridge_chnl_cancel_io ========
+ * Purpose:
+ * Return all I/O requests to the client which have not yet been
+ * transferred. The channel's I/O completion object is
+ * signalled, and all the I/O requests are queued as IOC's, with the
+ * status field set to CHNL_IOCSTATCANCEL.
+ * This call is typically used in abort situations, and is a prelude to
+ * chnl_close();
+ * Parameters:
+ * chnl_obj: Channel object handle.
+ * Returns:
+ * 0: Success;
+ * -EFAULT: Invalid chnl_obj.
+ * Requires:
+ * Ensures:
+ * Subsequent I/O requests to this channel will not be accepted.
+ */
+typedef int(*fxn_chnl_cancelio) (struct chnl_object *chnl_obj);
+
+/*
+ * ======== bridge_chnl_flush_io ========
+ * Purpose:
+ * For an output stream (to the DSP), indicates if any IO requests are in
+ * the output request queue. For input streams (from the DSP), will
+ * cancel all pending IO requests.
+ * Parameters:
+ * chnl_obj: Channel object handle.
+ * timeout: Timeout value for flush operation.
+ * Returns:
+ * 0: Success;
+ * S_CHNLIOREQUEST: Returned if any IORequests are in the output queue.
+ * -EFAULT: Invalid chnl_obj.
+ * Requires:
+ * Ensures:
+ * 0: No I/O requests will be pending on this channel.
+ */
+typedef int(*fxn_chnl_flushio) (struct chnl_object *chnl_obj,
+ u32 timeout);
+
+/*
+ * ======== bridge_chnl_get_info ========
+ * Purpose:
+ * Retrieve information related to a channel.
+ * Parameters:
+ * chnl_obj: Handle to a valid channel object, or NULL.
+ * channel_info: Location to store channel info.
+ * Returns:
+ * 0: Success;
+ * -EFAULT: Invalid chnl_obj or channel_info.
+ * Requires:
+ * Ensures:
+ * 0: channel_info points to a filled in chnl_info struct,
+ * if (channel_info != NULL).
+ */
+typedef int(*fxn_chnl_getinfo) (struct chnl_object *chnl_obj,
+ struct chnl_info *channel_info);
+
+/*
+ * ======== bridge_chnl_get_mgr_info ========
+ * Purpose:
+ * Retrieve information related to the channel manager.
+ * Parameters:
+ * hchnl_mgr: Handle to a valid channel manager, or NULL.
+ * ch_id: Channel ID.
+ * mgr_info: Location to store channel manager info.
+ * Returns:
+ * 0: Success;
+ * -EFAULT: Invalid hchnl_mgr or mgr_info.
+ * -ECHRNG: Invalid channel ID.
+ * Requires:
+ * Ensures:
+ * 0: mgr_info points to a filled in chnl_mgrinfo
+ * struct, if (mgr_info != NULL).
+ */
+typedef int(*fxn_chnl_getmgrinfo) (struct chnl_mgr
+ * hchnl_mgr,
+ u32 ch_id,
+ struct chnl_mgrinfo *mgr_info);
+
+/*
+ * ======== bridge_chnl_idle ========
+ * Purpose:
+ * Idle a channel. If this is an input channel, or if this is an output
+ * channel and flush_data is TRUE, all currently enqueued buffers will be
+ * dequeued (data discarded for output channel).
+ * If this is an output channel and flush_data is FALSE, this function
+ * will block until all currently buffered data is output, or the timeout
+ * specified has been reached.
+ *
+ * Parameters:
+ * chnl_obj: Channel object handle.
+ * timeout: If output channel and flush_data is FALSE, timeout value
+ * to wait for buffers to be output. (Not used for
+ * input channel).
+ * flush_data: If output channel and flush_data is TRUE, discard any
+ * currently buffered data. If FALSE, wait for currently
+ * buffered data to be output, or timeout, whichever
+ * occurs first. flush_data is ignored for input channel.
+ * Returns:
+ * 0: Success;
+ * -EFAULT: Invalid chnl_obj.
+ * -ETIMEDOUT: Timeout occured before channel could be idled.
+ * Requires:
+ * Ensures:
+ */
+typedef int(*fxn_chnl_idle) (struct chnl_object *chnl_obj,
+ u32 timeout, bool flush_data);
+
+/*
+ * ======== bridge_chnl_register_notify ========
+ * Purpose:
+ * Register for notification of events on a channel.
+ * Parameters:
+ * chnl_obj: Channel object handle.
+ * event_mask: Type of events to be notified about: IO completion
+ * (DSP_STREAMIOCOMPLETION) or end of stream
+ * (DSP_STREAMDONE).
+ * notify_type: DSP_SIGNALEVENT.
+ * hnotification: Handle of a dsp_notification object.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Insufficient memory.
+ * -EINVAL: event_mask is 0 and hnotification was not
+ * previously registered.
+ * -EFAULT: NULL hnotification, hnotification event name
+ * too long, or hnotification event name NULL.
+ * Requires:
+ * Valid chnl_obj.
+ * hnotification != NULL.
+ * (event_mask & ~(DSP_STREAMIOCOMPLETION | DSP_STREAMDONE)) == 0.
+ * notify_type == DSP_SIGNALEVENT.
+ * Ensures:
+ */
+typedef int(*fxn_chnl_registernotify)
+ (struct chnl_object *chnl_obj,
+ u32 event_mask, u32 notify_type, struct dsp_notification *hnotification);
+
+/*
+ * ======== bridge_dev_create ========
+ * Purpose:
+ * Complete creation of the device object for this board.
+ * Parameters:
+ * device_ctx: Ptr to location to store a Bridge device context.
+ * hdev_obj: Handle to a Device Object, created and managed by DSP API.
+ * config_param: Ptr to configuration parameters provided by the
+ * Configuration Manager during device loading.
+ * pDspConfig: DSP resources, as specified in the registry key for this
+ * device.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Unable to allocate memory for device context.
+ * Requires:
+ * device_ctx != NULL;
+ * hdev_obj != NULL;
+ * config_param != NULL;
+ * pDspConfig != NULL;
+ * Fields in config_param and pDspConfig contain valid values.
+ * Ensures:
+ * 0: All Bridge driver specific DSP resource and other
+ * board context has been allocated.
+ * -ENOMEM: Bridge failed to allocate resources.
+ * Any acquired resources have been freed. The DSP API
+ * will not call bridge_dev_destroy() if
+ * bridge_dev_create() fails.
+ * Details:
+ * Called during the CONFIGMG's Device_Init phase. Based on host and
+ * DSP configuration information, create a board context, a handle to
+ * which is passed into other Bridge BRD and CHNL functions. The
+ * board context contains state information for the device. Since the
+ * addresses of all pointer parameters may be invalid when this
+ * function returns, they must not be stored into the device context
+ * structure.
+ */
+typedef int(*fxn_dev_create) (struct bridge_dev_context
+ **device_ctx,
+ struct dev_object
+ * hdev_obj,
+ struct cfg_hostres
+ * config_param);
+
+/*
+ * ======== bridge_dev_ctrl ========
+ * Purpose:
+ * Bridge driver specific interface.
+ * Parameters:
+ * dev_ctxt: Handle to Bridge driver defined device info.
+ * dw_cmd: Bridge driver defined command code.
+ * pargs: Pointer to an arbitrary argument structure.
+ * Returns:
+ * 0 or -EPERM. Actual command error codes should be passed back in
+ * the pargs structure, and are defined by the Bridge driver implementor.
+ * Requires:
+ * All calls are currently assumed to be synchronous. There are no
+ * IOCTL completion routines provided.
+ * Ensures:
+ */
+typedef int(*fxn_dev_ctrl) (struct bridge_dev_context *dev_ctxt,
+ u32 dw_cmd, void *pargs);
+
+/*
+ * ======== bridge_dev_destroy ========
+ * Purpose:
+ * Deallocate Bridge device extension structures and all other resources
+ * acquired by the Bridge driver.
+ * No calls to other Bridge driver functions may subsequently
+ * occur, except for bridge_dev_create().
+ * Parameters:
+ * dev_ctxt: Handle to Bridge driver defined device information.
+ * Returns:
+ * 0: Success.
+ * -EPERM: Failed to release a resource previously acquired.
+ * Requires:
+ * dev_ctxt != NULL;
+ * Ensures:
+ * 0: Device context is freed.
+ */
+typedef int(*fxn_dev_destroy) (struct bridge_dev_context *dev_ctxt);
+
+/*
+ * ======== bridge_io_create ========
+ * Purpose:
+ * Create an object that manages I/O between CHNL and msg_ctrl.
+ * Parameters:
+ * io_man: Location to store IO manager on output.
+ * hchnl_mgr: Handle to channel manager.
+ * hmsg_mgr: Handle to message manager.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Memory allocation failure.
+ * -EPERM: Creation failed.
+ * Requires:
+ * hdev_obj != NULL;
+ * Channel manager already created;
+ * Message manager already created;
+ * mgr_attrts != NULL;
+ * io_man != NULL;
+ * Ensures:
+ */
+typedef int(*fxn_io_create) (struct io_mgr **io_man,
+ struct dev_object *hdev_obj,
+ const struct io_attrs *mgr_attrts);
+
+/*
+ * ======== bridge_io_destroy ========
+ * Purpose:
+ * Destroy object created in bridge_io_create.
+ * Parameters:
+ * hio_mgr: IO Manager.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Memory allocation failure.
+ * -EPERM: Creation failed.
+ * Requires:
+ * Valid hio_mgr;
+ * Ensures:
+ */
+typedef int(*fxn_io_destroy) (struct io_mgr *hio_mgr);
+
+/*
+ * ======== bridge_io_on_loaded ========
+ * Purpose:
+ * Called whenever a program is loaded to update internal data. For
+ * example, if shared memory is used, this function would update the
+ * shared memory location and address.
+ * Parameters:
+ * hio_mgr: IO Manager.
+ * Returns:
+ * 0: Success.
+ * -EPERM: Internal failure occurred.
+ * Requires:
+ * Valid hio_mgr;
+ * Ensures:
+ */
+typedef int(*fxn_io_onloaded) (struct io_mgr *hio_mgr);
+
+/*
+ * ======== fxn_io_getprocload ========
+ * Purpose:
+ * Called to get the Processor's current and predicted load
+ * Parameters:
+ * hio_mgr: IO Manager.
+ * proc_load_stat Processor Load statistics
+ * Returns:
+ * 0: Success.
+ * -EPERM: Internal failure occurred.
+ * Requires:
+ * Valid hio_mgr;
+ * Ensures:
+ */
+typedef int(*fxn_io_getprocload) (struct io_mgr *hio_mgr,
+ struct dsp_procloadstat *
+ proc_load_stat);
+
+/*
+ * ======== bridge_msg_create ========
+ * Purpose:
+ * Create an object to manage message queues. Only one of these objects
+ * can exist per device object.
+ * Parameters:
+ * msg_man: Location to store msg_ctrl manager on output.
+ * hdev_obj: Handle to a device object.
+ * msg_callback: Called whenever an RMS_EXIT message is received.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Insufficient memory.
+ * Requires:
+ * msg_man != NULL.
+ * msg_callback != NULL.
+ * hdev_obj != NULL.
+ * Ensures:
+ */
+typedef int(*fxn_msg_create)
+ (struct msg_mgr **msg_man,
+ struct dev_object *hdev_obj, msg_onexit msg_callback);
+
+/*
+ * ======== bridge_msg_create_queue ========
+ * Purpose:
+ * Create a msg_ctrl queue for sending or receiving messages from a Message
+ * node on the DSP.
+ * Parameters:
+ * hmsg_mgr: msg_ctrl queue manager handle returned from
+ * bridge_msg_create.
+ * msgq: Location to store msg_ctrl queue on output.
+ * msgq_id: Identifier for messages (node environment pointer).
+ * max_msgs: Max number of simultaneous messages for the node.
+ * h: Handle passed to hmsg_mgr->msg_callback().
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Insufficient memory.
+ * Requires:
+ * msgq != NULL.
+ * h != NULL.
+ * max_msgs > 0.
+ * Ensures:
+ * msgq !=NULL <==> 0.
+ */
+typedef int(*fxn_msg_createqueue)
+ (struct msg_mgr *hmsg_mgr,
+ struct msg_queue **msgq, u32 msgq_id, u32 max_msgs, void *h);
+
+/*
+ * ======== bridge_msg_delete ========
+ * Purpose:
+ * Delete a msg_ctrl manager allocated in bridge_msg_create().
+ * Parameters:
+ * hmsg_mgr: Handle returned from bridge_msg_create().
+ * Returns:
+ * Requires:
+ * Valid hmsg_mgr.
+ * Ensures:
+ */
+typedef void (*fxn_msg_delete) (struct msg_mgr *hmsg_mgr);
+
+/*
+ * ======== bridge_msg_delete_queue ========
+ * Purpose:
+ * Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
+ * Parameters:
+ * msg_queue_obj: Handle to msg_ctrl queue returned from
+ * bridge_msg_create_queue.
+ * Returns:
+ * Requires:
+ * Valid msg_queue_obj.
+ * Ensures:
+ */
+typedef void (*fxn_msg_deletequeue) (struct msg_queue *msg_queue_obj);
+
+/*
+ * ======== bridge_msg_get ========
+ * Purpose:
+ * Get a message from a msg_ctrl queue.
+ * Parameters:
+ * msg_queue_obj: Handle to msg_ctrl queue returned from
+ * bridge_msg_create_queue.
+ * pmsg: Location to copy message into.
+ * utimeout: Timeout to wait for a message.
+ * Returns:
+ * 0: Success.
+ * -ETIME: Timeout occurred.
+ * -EPERM: No frames available for message (max_msgs too
+ * small).
+ * Requires:
+ * Valid msg_queue_obj.
+ * pmsg != NULL.
+ * Ensures:
+ */
+typedef int(*fxn_msg_get) (struct msg_queue *msg_queue_obj,
+ struct dsp_msg *pmsg, u32 utimeout);
+
+/*
+ * ======== bridge_msg_put ========
+ * Purpose:
+ * Put a message onto a msg_ctrl queue.
+ * Parameters:
+ * msg_queue_obj: Handle to msg_ctrl queue returned from
+ * bridge_msg_create_queue.
+ * pmsg: Pointer to message.
+ * utimeout: Timeout to wait for a message.
+ * Returns:
+ * 0: Success.
+ * -ETIME: Timeout occurred.
+ * -EPERM: No frames available for message (max_msgs too
+ * small).
+ * Requires:
+ * Valid msg_queue_obj.
+ * pmsg != NULL.
+ * Ensures:
+ */
+typedef int(*fxn_msg_put) (struct msg_queue *msg_queue_obj,
+ const struct dsp_msg *pmsg, u32 utimeout);
+
+/*
+ * ======== bridge_msg_register_notify ========
+ * Purpose:
+ * Register notification for when a message is ready.
+ * Parameters:
+ * msg_queue_obj: Handle to msg_ctrl queue returned from
+ * bridge_msg_create_queue.
+ * event_mask: Type of events to be notified about: Must be
+ * DSP_NODEMESSAGEREADY, or 0 to unregister.
+ * notify_type: DSP_SIGNALEVENT.
+ * hnotification: Handle of notification object.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Insufficient memory.
+ * Requires:
+ * Valid msg_queue_obj.
+ * hnotification != NULL.
+ * notify_type == DSP_SIGNALEVENT.
+ * event_mask == DSP_NODEMESSAGEREADY || event_mask == 0.
+ * Ensures:
+ */
+typedef int(*fxn_msg_registernotify)
+ (struct msg_queue *msg_queue_obj,
+ u32 event_mask, u32 notify_type, struct dsp_notification *hnotification);
+
+/*
+ * ======== bridge_msg_set_queue_id ========
+ * Purpose:
+ * Set message queue id to node environment. Allows bridge_msg_create_queue
+ * to be called in node_allocate, before the node environment is known.
+ * Parameters:
+ * msg_queue_obj: Handle to msg_ctrl queue returned from
+ * bridge_msg_create_queue.
+ * msgq_id: Node environment pointer.
+ * Returns:
+ * Requires:
+ * Valid msg_queue_obj.
+ * msgq_id != 0.
+ * Ensures:
+ */
+typedef void (*fxn_msg_setqueueid) (struct msg_queue *msg_queue_obj,
+ u32 msgq_id);
+
+/*
+ * Bridge Driver interface function table.
+ *
+ * The information in this table is filled in by the specific Bridge driver,
+ * and copied into the DSP API's own space. If any interface
+ * function field is set to a value of NULL, then the DSP API will
+ * consider that function not implemented, and return the error code
+ * -ENOSYS when a Bridge driver client attempts to call that function.
+ *
+ * This function table contains DSP API version numbers, which are used by the
+ * Bridge driver loader to help ensure backwards compatility between older
+ * Bridge drivers and newer DSP API. These must be set to
+ * BRD_API_MAJOR_VERSION and BRD_API_MINOR_VERSION, respectively.
+ *
+ * A Bridge driver need not export a CHNL interface. In this case, *all* of
+ * the bridge_chnl_* entries must be set to NULL.
+ */
+struct bridge_drv_interface {
+ u32 brd_api_major_version; /* Set to BRD_API_MAJOR_VERSION. */
+ u32 brd_api_minor_version; /* Set to BRD_API_MINOR_VERSION. */
+ fxn_dev_create pfn_dev_create; /* Create device context */
+ fxn_dev_destroy pfn_dev_destroy; /* Destroy device context */
+ fxn_dev_ctrl pfn_dev_cntrl; /* Optional vendor interface */
+ fxn_brd_monitor pfn_brd_monitor; /* Load and/or start monitor */
+ fxn_brd_start pfn_brd_start; /* Start DSP program. */
+ fxn_brd_stop pfn_brd_stop; /* Stop/reset board. */
+ fxn_brd_status pfn_brd_status; /* Get current board status. */
+ fxn_brd_read pfn_brd_read; /* Read board memory */
+ fxn_brd_write pfn_brd_write; /* Write board memory. */
+ fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */
+ fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */
+ fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */
+ fxn_brd_memmap pfn_brd_mem_map; /* Maps MPU mem to DSP mem */
+ fxn_brd_memunmap pfn_brd_mem_un_map; /* Unmaps MPU mem to DSP mem */
+ fxn_chnl_create pfn_chnl_create; /* Create channel manager. */
+ fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */
+ fxn_chnl_open pfn_chnl_open; /* Create a new channel. */
+ fxn_chnl_close pfn_chnl_close; /* Close a channel. */
+ fxn_chnl_addioreq pfn_chnl_add_io_req; /* Req I/O on a channel. */
+ fxn_chnl_getioc pfn_chnl_get_ioc; /* Wait for I/O completion. */
+ fxn_chnl_cancelio pfn_chnl_cancel_io; /* Cancl I/O on a channel. */
+ fxn_chnl_flushio pfn_chnl_flush_io; /* Flush I/O. */
+ fxn_chnl_getinfo pfn_chnl_get_info; /* Get channel specific info */
+ /* Get channel manager info. */
+ fxn_chnl_getmgrinfo pfn_chnl_get_mgr_info;
+ fxn_chnl_idle pfn_chnl_idle; /* Idle the channel */
+ /* Register for notif. */
+ fxn_chnl_registernotify pfn_chnl_register_notify;
+ fxn_io_create pfn_io_create; /* Create IO manager */
+ fxn_io_destroy pfn_io_destroy; /* Destroy IO manager */
+ fxn_io_onloaded pfn_io_on_loaded; /* Notify of program loaded */
+ /* Get Processor's current and predicted load */
+ fxn_io_getprocload pfn_io_get_proc_load;
+ fxn_msg_create pfn_msg_create; /* Create message manager */
+ /* Create message queue */
+ fxn_msg_createqueue pfn_msg_create_queue;
+ fxn_msg_delete pfn_msg_delete; /* Delete message manager */
+ /* Delete message queue */
+ fxn_msg_deletequeue pfn_msg_delete_queue;
+ fxn_msg_get pfn_msg_get; /* Get a message */
+ fxn_msg_put pfn_msg_put; /* Send a message */
+ /* Register for notif. */
+ fxn_msg_registernotify pfn_msg_register_notify;
+ /* Set message queue id */
+ fxn_msg_setqueueid pfn_msg_set_queue_id;
+};
+
+/*
+ * ======== bridge_drv_entry ========
+ * Purpose:
+ * Registers Bridge driver functions with the DSP API. Called only once
+ * by the DSP API. The caller will first check DSP API version
+ * compatibility, and then copy the interface functions into its own
+ * memory space.
+ * Parameters:
+ * drv_intf Pointer to a location to receive a pointer to the
+ * Bridge driver interface.
+ * Returns:
+ * Requires:
+ * The code segment this function resides in must expect to be discarded
+ * after completion.
+ * Ensures:
+ * drv_intf pointer initialized to Bridge driver's function
+ * interface. No system resources are acquired by this function.
+ * Details:
+ * Called during the Device_Init phase.
+ */
+void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
+ const char *driver_file_name);
+
+#endif /* DSPDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdeh.h b/drivers/staging/tidspbridge/include/dspbridge/dspdeh.h
new file mode 100644
index 00000000000..d258ab6a41d
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspdeh.h
@@ -0,0 +1,43 @@
+/*
+ * dspdeh.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Defines upper edge DEH functions required by all Bridge driver/DSP API
+ * interface tables.
+ *
+ * Notes:
+ * Function comment headers reside with the function typedefs in dspdefs.h.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ * Copyright (C) 2010 Felipe Contreras
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DSPDEH_
+#define DSPDEH_
+
+struct deh_mgr;
+struct dev_object;
+struct dsp_notification;
+
+int bridge_deh_create(struct deh_mgr **ret_deh,
+ struct dev_object *hdev_obj);
+
+int bridge_deh_destroy(struct deh_mgr *deh);
+
+int bridge_deh_register_notify(struct deh_mgr *deh,
+ u32 event_mask,
+ u32 notify_type,
+ struct dsp_notification *hnotification);
+
+void bridge_deh_notify(struct deh_mgr *deh, int event, int info);
+
+#endif /* DSPDEH_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdrv.h b/drivers/staging/tidspbridge/include/dspbridge/dspdrv.h
new file mode 100644
index 00000000000..0bb250f95ba
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspdrv.h
@@ -0,0 +1,62 @@
+/*
+ * dspdrv.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * This is the Stream Interface for the DSp API.
+ * All Device operations are performed via DeviceIOControl.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#if !defined _DSPDRV_H_
+#define _DSPDRV_H_
+
+#define MAX_DEV 10 /* Max support of 10 devices */
+
+/*
+ * ======== dsp_deinit ========
+ * Purpose:
+ * This function is called by Device Manager to de-initialize a device.
+ * This function is not called by applications.
+ * Parameters:
+ * device_context:Handle to the device context. The XXX_Init function
+ * creates and returns this identifier.
+ * Returns:
+ * TRUE indicates the device successfully de-initialized. Otherwise it
+ * returns FALSE.
+ * Requires:
+ * device_context!= NULL. For a built in device this should never
+ * get called.
+ * Ensures:
+ */
+extern bool dsp_deinit(u32 device_context);
+
+/*
+ * ======== dsp_init ========
+ * Purpose:
+ * This function is called by Device Manager to initialize a device.
+ * This function is not called by applications
+ * Parameters:
+ * dw_context: Specifies a pointer to a string containing the registry
+ * path to the active key for the stream interface driver.
+ * HKEY_LOCAL_MACHINE\Drivers\Active
+ * Returns:
+ * Returns a handle to the device context created. This is the our actual
+ * Device Object representing the DSP Device instance.
+ * Requires:
+ * Ensures:
+ * Succeeded: device context > 0
+ * Failed: device Context = 0
+ */
+extern u32 dsp_init(u32 *init_status);
+
+#endif
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspio.h b/drivers/staging/tidspbridge/include/dspbridge/dspio.h
new file mode 100644
index 00000000000..88f5f90fe92
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspio.h
@@ -0,0 +1,41 @@
+/*
+ * dspio.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Declares the upper edge IO functions required by all Bridge driver /DSP API
+ * interface tables.
+ *
+ * Notes:
+ * Function comment headers reside in dspdefs.h.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DSPIO_
+#define DSPIO_
+
+#include <dspbridge/devdefs.h>
+#include <dspbridge/iodefs.h>
+
+extern int bridge_io_create(struct io_mgr **io_man,
+ struct dev_object *hdev_obj,
+ const struct io_attrs *mgr_attrts);
+
+extern int bridge_io_destroy(struct io_mgr *hio_mgr);
+
+extern int bridge_io_on_loaded(struct io_mgr *hio_mgr);
+
+extern int iva_io_on_loaded(struct io_mgr *hio_mgr);
+extern int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
+ struct dsp_procloadstat *proc_lstat);
+
+#endif /* DSPIO_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
new file mode 100644
index 00000000000..41e0594dff3
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
@@ -0,0 +1,73 @@
+/*
+ * dspioctl.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Bridge driver BRD_IOCtl reserved command definitions.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DSPIOCTL_
+#define DSPIOCTL_
+
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
+/*
+ * Any IOCTLS at or above this value are reserved for standard Bridge driver
+ * interfaces.
+ */
+#define BRDIOCTL_RESERVEDBASE 0x8000
+
+#define BRDIOCTL_CHNLREAD (BRDIOCTL_RESERVEDBASE + 0x10)
+#define BRDIOCTL_CHNLWRITE (BRDIOCTL_RESERVEDBASE + 0x20)
+#define BRDIOCTL_GETINTRCOUNT (BRDIOCTL_RESERVEDBASE + 0x30)
+#define BRDIOCTL_RESETINTRCOUNT (BRDIOCTL_RESERVEDBASE + 0x40)
+#define BRDIOCTL_INTERRUPTDSP (BRDIOCTL_RESERVEDBASE + 0x50)
+/* DMMU */
+#define BRDIOCTL_SETMMUCONFIG (BRDIOCTL_RESERVEDBASE + 0x60)
+/* PWR */
+#define BRDIOCTL_PWRCONTROL (BRDIOCTL_RESERVEDBASE + 0x70)
+
+/* attention, modifiers:
+ * Some of these control enumerations are made visible to user for power
+ * control, so any changes to this list, should also be updated in the user
+ * header file 'dbdefs.h' ***/
+/* These ioctls are reserved for PWR power commands for the DSP */
+#define BRDIOCTL_DEEPSLEEP (BRDIOCTL_PWRCONTROL + 0x0)
+#define BRDIOCTL_EMERGENCYSLEEP (BRDIOCTL_PWRCONTROL + 0x1)
+#define BRDIOCTL_WAKEUP (BRDIOCTL_PWRCONTROL + 0x2)
+#define BRDIOCTL_PWRENABLE (BRDIOCTL_PWRCONTROL + 0x3)
+#define BRDIOCTL_PWRDISABLE (BRDIOCTL_PWRCONTROL + 0x4)
+#define BRDIOCTL_CLK_CTRL (BRDIOCTL_PWRCONTROL + 0x7)
+/* DSP Initiated Hibernate */
+#define BRDIOCTL_PWR_HIBERNATE (BRDIOCTL_PWRCONTROL + 0x8)
+#define BRDIOCTL_PRESCALE_NOTIFY (BRDIOCTL_PWRCONTROL + 0x9)
+#define BRDIOCTL_POSTSCALE_NOTIFY (BRDIOCTL_PWRCONTROL + 0xA)
+#define BRDIOCTL_CONSTRAINT_REQUEST (BRDIOCTL_PWRCONTROL + 0xB)
+
+/* Number of actual DSP-MMU TLB entrries */
+#define BRDIOCTL_NUMOFMMUTLB 32
+
+struct bridge_ioctl_extproc {
+ u32 ul_dsp_va; /* DSP virtual address */
+ u32 ul_gpp_pa; /* GPP physical address */
+ /* GPP virtual address. __va does not work for ioremapped addresses */
+ u32 ul_gpp_va;
+ u32 ul_size; /* Size of the mapped memory in bytes */
+ enum hw_endianism_t endianism;
+ enum hw_mmu_mixed_size_t mixed_mode;
+ enum hw_element_size_t elem_size;
+};
+
+#endif /* DSPIOCTL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspmsg.h b/drivers/staging/tidspbridge/include/dspbridge/dspmsg.h
new file mode 100644
index 00000000000..d4bd458bc8b
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspmsg.h
@@ -0,0 +1,56 @@
+/*
+ * dspmsg.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Declares the upper edge message class library functions required by
+ * all Bridge driver / DSP API interface tables. These functions are
+ * implemented by every class of Bridge driver channel library.
+ *
+ * Notes:
+ * Function comment headers reside in dspdefs.h.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DSPMSG_
+#define DSPMSG_
+
+#include <dspbridge/msgdefs.h>
+
+extern int bridge_msg_create(struct msg_mgr **msg_man,
+ struct dev_object *hdev_obj,
+ msg_onexit msg_callback);
+
+extern int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
+ struct msg_queue **msgq,
+ u32 msgq_id, u32 max_msgs, void *arg);
+
+extern void bridge_msg_delete(struct msg_mgr *hmsg_mgr);
+
+extern void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj);
+
+extern int bridge_msg_get(struct msg_queue *msg_queue_obj,
+ struct dsp_msg *pmsg, u32 utimeout);
+
+extern int bridge_msg_put(struct msg_queue *msg_queue_obj,
+ const struct dsp_msg *pmsg, u32 utimeout);
+
+extern int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
+ u32 event_mask,
+ u32 notify_type,
+ struct dsp_notification
+ *hnotification);
+
+extern void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj,
+ u32 msgq_id);
+
+#endif /* DSPMSG_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h b/drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h
new file mode 100644
index 00000000000..4b109d173b1
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h
@@ -0,0 +1,492 @@
+/*
+ * dynamic_loader.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _DYNAMIC_LOADER_H_
+#define _DYNAMIC_LOADER_H_
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/*
+ * Dynamic Loader
+ *
+ * The function of the dynamic loader is to load a "module" containing
+ * instructions for a "target" processor into that processor. In the process
+ * it assigns memory for the module, resolves symbol references made by the
+ * module, and remembers symbols defined by the module.
+ *
+ * The dynamic loader is parameterized for a particular system by 4 classes
+ * that supply the module and system specific functions it requires
+ */
+ /* The read functions for the module image to be loaded */
+struct dynamic_loader_stream;
+
+ /* This class defines "host" symbol and support functions */
+struct dynamic_loader_sym;
+
+ /* This class defines the allocator for "target" memory */
+struct dynamic_loader_allocate;
+
+ /* This class defines the copy-into-target-memory functions */
+struct dynamic_loader_initialize;
+
+/*
+ * Option flags to modify the behavior of module loading
+ */
+#define DLOAD_INITBSS 0x1 /* initialize BSS sections to zero */
+#define DLOAD_BIGEND 0x2 /* require big-endian load module */
+#define DLOAD_LITTLE 0x4 /* require little-endian load module */
+
+/*****************************************************************************
+ * Procedure dynamic_load_module
+ *
+ * Parameters:
+ * module The input stream that supplies the module image
+ * syms Host-side symbol table and malloc/free functions
+ * alloc Target-side memory allocation
+ * init Target-side memory initialization, or NULL for symbol read only
+ * options Option flags DLOAD_*
+ * mhandle A module handle for use with Dynamic_Unload
+ *
+ * Effect:
+ * The module image is read using *module. Target storage for the new image is
+ * obtained from *alloc. Symbols defined and referenced by the module are
+ * managed using *syms. The image is then relocated and references resolved
+ * as necessary, and the resulting executable bits are placed into target memory
+ * using *init.
+ *
+ * Returns:
+ * On a successful load, a module handle is placed in *mhandle, and zero is
+ * returned. On error, the number of errors detected is returned. Individual
+ * errors are reported during the load process using syms->error_report().
+ **************************************************************************** */
+extern int dynamic_load_module(
+ /* the source for the module image */
+ struct dynamic_loader_stream *module,
+ /* host support for symbols and storage */
+ struct dynamic_loader_sym *syms,
+ /* the target memory allocator */
+ struct dynamic_loader_allocate *alloc,
+ /* the target memory initializer */
+ struct dynamic_loader_initialize *init,
+ unsigned options, /* option flags */
+ /* the returned module handle */
+ void **mhandle);
+
+/*****************************************************************************
+ * Procedure dynamic_open_module
+ *
+ * Parameters:
+ * module The input stream that supplies the module image
+ * syms Host-side symbol table and malloc/free functions
+ * alloc Target-side memory allocation
+ * init Target-side memory initialization, or NULL for symbol read only
+ * options Option flags DLOAD_*
+ * mhandle A module handle for use with Dynamic_Unload
+ *
+ * Effect:
+ * The module image is read using *module. Target storage for the new image is
+ * obtained from *alloc. Symbols defined and referenced by the module are
+ * managed using *syms. The image is then relocated and references resolved
+ * as necessary, and the resulting executable bits are placed into target memory
+ * using *init.
+ *
+ * Returns:
+ * On a successful load, a module handle is placed in *mhandle, and zero is
+ * returned. On error, the number of errors detected is returned. Individual
+ * errors are reported during the load process using syms->error_report().
+ **************************************************************************** */
+extern int dynamic_open_module(
+ /* the source for the module image */
+ struct dynamic_loader_stream *module,
+ /* host support for symbols and storage */
+ struct dynamic_loader_sym *syms,
+ /* the target memory allocator */
+ struct dynamic_loader_allocate *alloc,
+ /* the target memory initializer */
+ struct dynamic_loader_initialize *init,
+ unsigned options, /* option flags */
+ /* the returned module handle */
+ void **mhandle);
+
+/*****************************************************************************
+ * Procedure dynamic_unload_module
+ *
+ * Parameters:
+ * mhandle A module handle from dynamic_load_module
+ * syms Host-side symbol table and malloc/free functions
+ * alloc Target-side memory allocation
+ *
+ * Effect:
+ * The module specified by mhandle is unloaded. Unloading causes all
+ * target memory to be deallocated, all symbols defined by the module to
+ * be purged, and any host-side storage used by the dynamic loader for
+ * this module to be released.
+ *
+ * Returns:
+ * Zero for success. On error, the number of errors detected is returned.
+ * Individual errors are reported using syms->error_report().
+ **************************************************************************** */
+extern int dynamic_unload_module(void *mhandle, /* the module
+ * handle */
+ /* host support for symbols and
+ * storage */
+ struct dynamic_loader_sym *syms,
+ /* the target memory allocator */
+ struct dynamic_loader_allocate *alloc,
+ /* the target memory initializer */
+ struct dynamic_loader_initialize *init);
+
+/*****************************************************************************
+ *****************************************************************************
+ * A class used by the dynamic loader for input of the module image
+ *****************************************************************************
+ **************************************************************************** */
+struct dynamic_loader_stream {
+/* public: */
+ /*************************************************************************
+ * read_buffer
+ *
+ * PARAMETERS :
+ * buffer Pointer to the buffer to fill
+ * bufsiz Amount of data desired in sizeof() units
+ *
+ * EFFECT :
+ * Reads the specified amount of data from the module input stream
+ * into the specified buffer. Returns the amount of data read in sizeof()
+ * units (which if less than the specification, represents an error).
+ *
+ * NOTES:
+ * In release 1 increments the file position by the number of bytes read
+ *
+ ************************************************************************ */
+ int (*read_buffer) (struct dynamic_loader_stream *thisptr,
+ void *buffer, unsigned bufsiz);
+
+ /*************************************************************************
+ * set_file_posn (release 1 only)
+ *
+ * PARAMETERS :
+ * posn Desired file position relative to start of file in sizeof() units.
+ *
+ * EFFECT :
+ * Adjusts the internal state of the stream object so that the next
+ * read_buffer call will begin to read at the specified offset from
+ * the beginning of the input module. Returns 0 for success, non-zero
+ * for failure.
+ *
+ ************************************************************************ */
+ int (*set_file_posn) (struct dynamic_loader_stream *thisptr,
+ /* to be eliminated in release 2 */
+ unsigned int posn);
+
+};
+
+/*****************************************************************************
+ *****************************************************************************
+ * A class used by the dynamic loader for symbol table support and
+ * miscellaneous host-side functions
+ *****************************************************************************
+ **************************************************************************** */
+
+typedef u32 ldr_addr;
+
+/*
+ * the structure of a symbol known to the dynamic loader
+ */
+struct dynload_symbol {
+ ldr_addr value;
+};
+
+struct dynamic_loader_sym {
+/* public: */
+ /*************************************************************************
+ * find_matching_symbol
+ *
+ * PARAMETERS :
+ * name The name of the desired symbol
+ *
+ * EFFECT :
+ * Locates a symbol matching the name specified. A pointer to the
+ * symbol is returned if it exists; 0 is returned if no such symbol is
+ * found.
+ *
+ ************************************************************************ */
+ struct dynload_symbol *(*find_matching_symbol)
+ (struct dynamic_loader_sym *thisptr, const char *name);
+
+ /*************************************************************************
+ * add_to_symbol_table
+ *
+ * PARAMETERS :
+ * nname Pointer to the name of the new symbol
+ * moduleid An opaque module id assigned by the dynamic loader
+ *
+ * EFFECT :
+ * The new symbol is added to the table. A pointer to the symbol is
+ * returned, or NULL is returned for failure.
+ *
+ * NOTES:
+ * It is permissible for this function to return NULL; the effect is that
+ * the named symbol will not be available to resolve references in
+ * subsequent loads. Returning NULL will not cause the current load
+ * to fail.
+ ************************************************************************ */
+ struct dynload_symbol *(*add_to_symbol_table)
+ (struct dynamic_loader_sym *
+ thisptr, const char *nname, unsigned moduleid);
+
+ /*************************************************************************
+ * purge_symbol_table
+ *
+ * PARAMETERS :
+ * moduleid An opaque module id assigned by the dynamic loader
+ *
+ * EFFECT :
+ * Each symbol in the symbol table whose moduleid matches the argument
+ * is removed from the table.
+ ************************************************************************ */
+ void (*purge_symbol_table) (struct dynamic_loader_sym *thisptr,
+ unsigned moduleid);
+
+ /*************************************************************************
+ * dload_allocate
+ *
+ * PARAMETERS :
+ * memsiz size of desired memory in sizeof() units
+ *
+ * EFFECT :
+ * Returns a pointer to some "host" memory for use by the dynamic
+ * loader, or NULL for failure.
+ * This function is serves as a replaceable form of "malloc" to
+ * allow the user to configure the memory usage of the dynamic loader.
+ ************************************************************************ */
+ void *(*dload_allocate) (struct dynamic_loader_sym *thisptr,
+ unsigned memsiz);
+
+ /*************************************************************************
+ * dload_deallocate
+ *
+ * PARAMETERS :
+ * memptr pointer to previously allocated memory
+ *
+ * EFFECT :
+ * Releases the previously allocated "host" memory.
+ ************************************************************************ */
+ void (*dload_deallocate) (struct dynamic_loader_sym *thisptr,
+ void *memptr);
+
+ /*************************************************************************
+ * error_report
+ *
+ * PARAMETERS :
+ * errstr pointer to an error string
+ * args additional arguments
+ *
+ * EFFECT :
+ * This function provides an error reporting interface for the dynamic
+ * loader. The error string and arguments are designed as for the
+ * library function vprintf.
+ ************************************************************************ */
+ void (*error_report) (struct dynamic_loader_sym *thisptr,
+ const char *errstr, va_list args);
+
+}; /* class dynamic_loader_sym */
+
+/*****************************************************************************
+ *****************************************************************************
+ * A class used by the dynamic loader to allocate and deallocate target memory.
+ *****************************************************************************
+ **************************************************************************** */
+
+struct ldr_section_info {
+ /* Name of the memory section assigned at build time */
+ const char *name;
+ ldr_addr run_addr; /* execution address of the section */
+ ldr_addr load_addr; /* load address of the section */
+ ldr_addr size; /* size of the section in addressable units */
+#ifndef _BIG_ENDIAN
+ u16 page; /* memory page or view */
+ u16 type; /* one of the section types below */
+#else
+ u16 type; /* one of the section types below */
+ u16 page; /* memory page or view */
+#endif
+ /* a context field for use by dynamic_loader_allocate;
+ * ignored but maintained by the dynamic loader */
+ u32 context;
+};
+
+/* use this macro to extract type of section from ldr_section_info.type field */
+#define DLOAD_SECTION_TYPE(typeinfo) (typeinfo & 0xF)
+
+/* type of section to be allocated */
+#define DLOAD_TEXT 0
+#define DLOAD_DATA 1
+#define DLOAD_BSS 2
+ /* internal use only, run-time cinit will be of type DLOAD_DATA */
+#define DLOAD_CINIT 3
+
+struct dynamic_loader_allocate {
+/* public: */
+
+ /*************************************************************************
+ * Function allocate
+ *
+ * Parameters:
+ * info A pointer to an information block for the section
+ * align The alignment of the storage in target AUs
+ *
+ * Effect:
+ * Allocates target memory for the specified section and fills in the
+ * load_addr and run_addr fields of the section info structure. Returns TRUE
+ * for success, FALSE for failure.
+ *
+ * Notes:
+ * Frequently load_addr and run_addr are the same, but if they are not
+ * load_addr is used with dynamic_loader_initialize, and run_addr is
+ * used for almost all relocations. This function should always initialize
+ * both fields.
+ ************************************************************************ */
+ int (*dload_allocate) (struct dynamic_loader_allocate *thisptr,
+ struct ldr_section_info *info, unsigned align);
+
+ /*************************************************************************
+ * Function deallocate
+ *
+ * Parameters:
+ * info A pointer to an information block for the section
+ *
+ * Effect:
+ * Releases the target memory previously allocated.
+ *
+ * Notes:
+ * The content of the info->name field is undefined on call to this function.
+ ************************************************************************ */
+ void (*dload_deallocate) (struct dynamic_loader_allocate *thisptr,
+ struct ldr_section_info *info);
+
+}; /* class dynamic_loader_allocate */
+
+/*****************************************************************************
+ *****************************************************************************
+ * A class used by the dynamic loader to load data into a target. This class
+ * provides the interface-specific functions needed to load data.
+ *****************************************************************************
+ **************************************************************************** */
+
+struct dynamic_loader_initialize {
+/* public: */
+ /*************************************************************************
+ * Function connect
+ *
+ * Parameters:
+ * none
+ *
+ * Effect:
+ * Connect to the initialization interface. Returns TRUE for success,
+ * FALSE for failure.
+ *
+ * Notes:
+ * This function is called prior to use of any other functions in
+ * this interface.
+ ************************************************************************ */
+ int (*connect) (struct dynamic_loader_initialize *thisptr);
+
+ /*************************************************************************
+ * Function readmem
+ *
+ * Parameters:
+ * bufr Pointer to a word-aligned buffer for the result
+ * locn Target address of first data element
+ * info Section info for the section in which the address resides
+ * bytsiz Size of the data to be read in sizeof() units
+ *
+ * Effect:
+ * Fills the specified buffer with data from the target. Returns TRUE for
+ * success, FALSE for failure.
+ ************************************************************************ */
+ int (*readmem) (struct dynamic_loader_initialize *thisptr,
+ void *bufr,
+ ldr_addr locn,
+ struct ldr_section_info *info, unsigned bytsiz);
+
+ /*************************************************************************
+ * Function writemem
+ *
+ * Parameters:
+ * bufr Pointer to a word-aligned buffer of data
+ * locn Target address of first data element to be written
+ * info Section info for the section in which the address resides
+ * bytsiz Size of the data to be written in sizeof() units
+ *
+ * Effect:
+ * Writes the specified buffer to the target. Returns TRUE for success,
+ * FALSE for failure.
+ ************************************************************************ */
+ int (*writemem) (struct dynamic_loader_initialize *thisptr,
+ void *bufr,
+ ldr_addr locn,
+ struct ldr_section_info *info, unsigned bytsiz);
+
+ /*************************************************************************
+ * Function fillmem
+ *
+ * Parameters:
+ * locn Target address of first data element to be written
+ * info Section info for the section in which the address resides
+ * bytsiz Size of the data to be written in sizeof() units
+ * val Value to be written in each byte
+ * Effect:
+ * Fills the specified area of target memory. Returns TRUE for success,
+ * FALSE for failure.
+ ************************************************************************ */
+ int (*fillmem) (struct dynamic_loader_initialize *thisptr,
+ ldr_addr locn, struct ldr_section_info *info,
+ unsigned bytsiz, unsigned val);
+
+ /*************************************************************************
+ * Function execute
+ *
+ * Parameters:
+ * start Starting address
+ *
+ * Effect:
+ * The target code at the specified starting address is executed.
+ *
+ * Notes:
+ * This function is called at the end of the dynamic load process
+ * if the input module has specified a starting address.
+ ************************************************************************ */
+ int (*execute) (struct dynamic_loader_initialize *thisptr,
+ ldr_addr start);
+
+ /*************************************************************************
+ * Function release
+ *
+ * Parameters:
+ * none
+ *
+ * Effect:
+ * Releases the connection to the load interface.
+ *
+ * Notes:
+ * This function is called at the end of the dynamic load process.
+ ************************************************************************ */
+ void (*release) (struct dynamic_loader_initialize *thisptr);
+
+}; /* class dynamic_loader_initialize */
+
+#endif /* _DYNAMIC_LOADER_H_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/gb.h b/drivers/staging/tidspbridge/include/dspbridge/gb.h
new file mode 100644
index 00000000000..fda783aa160
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/gb.h
@@ -0,0 +1,79 @@
+/*
+ * gb.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Generic bitmap manager.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef GB_
+#define GB_
+
+#define GB_NOBITS (~0)
+#include <dspbridge/host_os.h>
+
+struct gb_t_map;
+
+/*
+ * ======== gb_clear ========
+ * Clear the bit in position bitn in the bitmap map. Bit positions are
+ * zero based.
+ */
+
+extern void gb_clear(struct gb_t_map *map, u32 bitn);
+
+/*
+ * ======== gb_create ========
+ * Create a bit map with len bits. Initially all bits are cleared.
+ */
+
+extern struct gb_t_map *gb_create(u32 len);
+
+/*
+ * ======== gb_delete ========
+ * Delete previously created bit map
+ */
+
+extern void gb_delete(struct gb_t_map *map);
+
+/*
+ * ======== gb_findandset ========
+ * Finds a clear bit, sets it, and returns the position
+ */
+
+extern u32 gb_findandset(struct gb_t_map *map);
+
+/*
+ * ======== gb_minclear ========
+ * gb_minclear returns the minimum clear bit position. If no bit is
+ * clear, gb_minclear returns -1.
+ */
+extern u32 gb_minclear(struct gb_t_map *map);
+
+/*
+ * ======== gb_set ========
+ * Set the bit in position bitn in the bitmap map. Bit positions are
+ * zero based.
+ */
+
+extern void gb_set(struct gb_t_map *map, u32 bitn);
+
+/*
+ * ======== gb_test ========
+ * Returns TRUE if the bit in position bitn is set in map; otherwise
+ * gb_test returns FALSE. Bit positions are zero based.
+ */
+
+extern bool gb_test(struct gb_t_map *map, u32 bitn);
+
+#endif /*GB_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/getsection.h b/drivers/staging/tidspbridge/include/dspbridge/getsection.h
new file mode 100644
index 00000000000..626063dd9df
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/getsection.h
@@ -0,0 +1,108 @@
+/*
+ * getsection.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * This file provides an API add-on to the dynamic loader that allows the user
+ * to query section information and extract section data from dynamic load
+ * modules.
+ *
+ * Notes:
+ * Functions in this API assume that the supplied dynamic_loader_stream
+ * object supports the set_file_posn method.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _GETSECTION_H_
+#define _GETSECTION_H_
+
+#include "dynamic_loader.h"
+
+/*
+ * Procedure dload_module_open
+ *
+ * Parameters:
+ * module The input stream that supplies the module image
+ * syms Host-side malloc/free and error reporting functions.
+ * Other methods are unused.
+ *
+ * Effect:
+ * Reads header information from a dynamic loader module using the specified
+ * stream object, and returns a handle for the module information. This
+ * handle may be used in subsequent query calls to obtain information
+ * contained in the module.
+ *
+ * Returns:
+ * NULL if an error is encountered, otherwise a module handle for use
+ * in subsequent operations.
+ */
+extern void *dload_module_open(struct dynamic_loader_stream
+ *module, struct dynamic_loader_sym
+ *syms);
+
+/*
+ * Procedure dload_get_section_info
+ *
+ * Parameters:
+ * minfo Handle from dload_module_open for this module
+ * section_name Pointer to the string name of the section desired
+ * section_info Address of a section info structure pointer to be initialized
+ *
+ * Effect:
+ * Finds the specified section in the module information, and fills in
+ * the provided ldr_section_info structure.
+ *
+ * Returns:
+ * TRUE for success, FALSE for section not found
+ */
+extern int dload_get_section_info(void *minfo,
+ const char *section_name,
+ const struct ldr_section_info
+ **const section_info);
+
+/*
+ * Procedure dload_get_section
+ *
+ * Parameters:
+ * minfo Handle from dload_module_open for this module
+ * section_info Pointer to a section info structure for the desired section
+ * section_data Buffer to contain the section initialized data
+ *
+ * Effect:
+ * Copies the initialized data for the specified section into the
+ * supplied buffer.
+ *
+ * Returns:
+ * TRUE for success, FALSE for section not found
+ */
+extern int dload_get_section(void *minfo,
+ const struct ldr_section_info *section_info,
+ void *section_data);
+
+/*
+ * Procedure dload_module_close
+ *
+ * Parameters:
+ * minfo Handle from dload_module_open for this module
+ *
+ * Effect:
+ * Releases any storage associated with the module handle. On return,
+ * the module handle is invalid.
+ *
+ * Returns:
+ * Zero for success. On error, the number of errors detected is returned.
+ * Individual errors are reported using syms->error_report(), where syms was
+ * an argument to dload_module_open
+ */
+extern void dload_module_close(void *minfo);
+
+#endif /* _GETSECTION_H_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/gh.h b/drivers/staging/tidspbridge/include/dspbridge/gh.h
new file mode 100644
index 00000000000..9de291d1f56
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/gh.h
@@ -0,0 +1,34 @@
+/*
+ * gh.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef GH_
+#define GH_
+#include <dspbridge/host_os.h>
+
+extern struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
+ u16(*hash) (void *, u16),
+ bool(*match) (void *, void *),
+ void (*delete) (void *));
+extern void gh_delete(struct gh_t_hash_tab *hash_tab);
+extern void gh_exit(void);
+extern void *gh_find(struct gh_t_hash_tab *hash_tab, void *key);
+extern void gh_init(void);
+extern void *gh_insert(struct gh_t_hash_tab *hash_tab, void *key, void *value);
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+void gh_iterate(struct gh_t_hash_tab *hash_tab,
+ void (*callback)(void *, void *), void *user_data);
+#endif
+#endif /* GH_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/gs.h b/drivers/staging/tidspbridge/include/dspbridge/gs.h
new file mode 100644
index 00000000000..f32d8d9af41
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/gs.h
@@ -0,0 +1,59 @@
+/*
+ * gs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Memory allocation/release wrappers. This module allows clients to
+ * avoid OS spacific issues related to memory allocation. It also provides
+ * simple diagnostic capabilities to assist in the detection of memory
+ * leaks.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef GS_
+#define GS_
+
+/*
+ * ======== gs_alloc ========
+ * Alloc size bytes of space. Returns pointer to space
+ * allocated, otherwise NULL.
+ */
+extern void *gs_alloc(u32 size);
+
+/*
+ * ======== gs_exit ========
+ * Module exit. Do not change to "#define gs_init()"; in
+ * some environments this operation must actually do some work!
+ */
+extern void gs_exit(void);
+
+/*
+ * ======== gs_free ========
+ * Free space allocated by gs_alloc() or GS_calloc().
+ */
+extern void gs_free(void *ptr);
+
+/*
+ * ======== gs_frees ========
+ * Free space allocated by gs_alloc() or GS_calloc() and assert that
+ * the size of the allocation is size bytes.
+ */
+extern void gs_frees(void *ptr, u32 size);
+
+/*
+ * ======== gs_init ========
+ * Module initialization. Do not change to "#define gs_init()"; in
+ * some environments this operation must actually do some work!
+ */
+extern void gs_init(void);
+
+#endif /*GS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/host_os.h b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
new file mode 100644
index 00000000000..6b4feb4d015
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
@@ -0,0 +1,88 @@
+/*
+ * host_os.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HOST_OS_H_
+#define _HOST_OS_H_
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/syscalls.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <plat/clock.h>
+#include <linux/clk.h>
+#include <plat/mailbox.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+/* TODO -- Remove, once BP defines them */
+#define INT_DSP_MMU_IRQ 28
+
+struct dspbridge_platform_data {
+ void (*dsp_set_min_opp) (u8 opp_id);
+ u8(*dsp_get_opp) (void);
+ void (*cpu_set_freq) (unsigned long f);
+ unsigned long (*cpu_get_freq) (void);
+ unsigned long mpu_speed[6];
+
+ /* functions to write and read PRCM registers */
+ void (*dsp_prm_write)(u32, s16 , u16);
+ u32 (*dsp_prm_read)(s16 , u16);
+ u32 (*dsp_prm_rmw_bits)(u32, u32, s16, s16);
+ void (*dsp_cm_write)(u32, s16 , u16);
+ u32 (*dsp_cm_read)(s16 , u16);
+ u32 (*dsp_cm_rmw_bits)(u32, u32, s16, s16);
+
+ u32 phys_mempool_base;
+ u32 phys_mempool_size;
+};
+
+#define PRCM_VDD1 1
+
+extern struct platform_device *omap_dspbridge_dev;
+extern struct device *bridge;
+
+#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
+extern void dspbridge_reserve_sdram(void);
+#else
+static inline void dspbridge_reserve_sdram(void)
+{
+}
+#endif
+
+extern unsigned long dspbridge_get_mempool_base(void);
+#endif
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io.h b/drivers/staging/tidspbridge/include/dspbridge/io.h
new file mode 100644
index 00000000000..bc346f9a01c
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/io.h
@@ -0,0 +1,114 @@
+/*
+ * io.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * The io module manages IO between CHNL and msg_ctrl.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef IO_
+#define IO_
+
+#include <dspbridge/cfgdefs.h>
+#include <dspbridge/devdefs.h>
+
+#include <dspbridge/iodefs.h>
+
+/*
+ * ======== io_create ========
+ * Purpose:
+ * Create an IO manager object, responsible for managing IO between
+ * CHNL and msg_ctrl.
+ * Parameters:
+ * channel_mgr: Location to store a channel manager object on
+ * output.
+ * hdev_obj: Handle to a device object.
+ * mgr_attrts: IO manager attributes.
+ * mgr_attrts->birq: I/O IRQ number.
+ * mgr_attrts->irq_shared: TRUE if the IRQ is shareable.
+ * mgr_attrts->word_size: DSP Word size in equivalent PC bytes..
+ * Returns:
+ * 0: Success;
+ * -ENOMEM: Insufficient memory for requested resources.
+ * -EIO: Unable to plug channel ISR for configured IRQ.
+ * -EINVAL: Invalid DSP word size (must be > 0).
+ * Invalid base address for DSP communications.
+ * Requires:
+ * io_init(void) called.
+ * io_man != NULL.
+ * mgr_attrts != NULL.
+ * Ensures:
+ */
+extern int io_create(struct io_mgr **io_man,
+ struct dev_object *hdev_obj,
+ const struct io_attrs *mgr_attrts);
+
+/*
+ * ======== io_destroy ========
+ * Purpose:
+ * Destroy the IO manager.
+ * Parameters:
+ * hio_mgr: IOmanager object.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: hio_mgr was invalid.
+ * Requires:
+ * io_init(void) called.
+ * Ensures:
+ */
+extern int io_destroy(struct io_mgr *hio_mgr);
+
+/*
+ * ======== io_exit ========
+ * Purpose:
+ * Discontinue usage of the IO module.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * io_init(void) previously called.
+ * Ensures:
+ * Resources, if any acquired in io_init(void), are freed when the last
+ * client of IO calls io_exit(void).
+ */
+extern void io_exit(void);
+
+/*
+ * ======== io_init ========
+ * Purpose:
+ * Initialize the IO module's private state.
+ * Parameters:
+ * Returns:
+ * TRUE if initialized; FALSE if error occurred.
+ * Requires:
+ * Ensures:
+ * A requirement for each of the other public CHNL functions.
+ */
+extern bool io_init(void);
+
+/*
+ * ======== io_on_loaded ========
+ * Purpose:
+ * Called when a program is loaded so IO manager can update its
+ * internal state.
+ * Parameters:
+ * hio_mgr: IOmanager object.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: hio_mgr was invalid.
+ * Requires:
+ * io_init(void) called.
+ * Ensures:
+ */
+extern int io_on_loaded(struct io_mgr *hio_mgr);
+
+#endif /* CHNL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
new file mode 100644
index 00000000000..18aec55d864
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
@@ -0,0 +1,298 @@
+/*
+ * io_sm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * IO dispatcher for a shared memory channel driver.
+ * Also, includes macros to simulate shm via port io calls.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef IOSM_
+#define IOSM_
+
+#include <dspbridge/_chnl_sm.h>
+#include <dspbridge/host_os.h>
+
+#include <dspbridge/iodefs.h>
+
+#define IO_INPUT 0
+#define IO_OUTPUT 1
+#define IO_SERVICE 2
+#define IO_MAXSERVICE IO_SERVICE
+
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+/* The maximum number of OPPs that are supported */
+extern s32 dsp_max_opps;
+/* The Vdd1 opp table information */
+extern u32 vdd1_dsp_freq[6][4];
+#endif
+
+/*
+ * ======== io_cancel_chnl ========
+ * Purpose:
+ * Cancel IO on a given channel.
+ * Parameters:
+ * hio_mgr: IO Manager.
+ * chnl: Index of channel to cancel IO on.
+ * Returns:
+ * Requires:
+ * Valid hio_mgr.
+ * Ensures:
+ */
+extern void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl);
+
+/*
+ * ======== io_dpc ========
+ * Purpose:
+ * Deferred procedure call for shared memory channel driver ISR. Carries
+ * out the dispatch of I/O.
+ * Parameters:
+ * ref_data: Pointer to reference data registered via a call to
+ * DPC_Create().
+ * Returns:
+ * Requires:
+ * Must not block.
+ * Must not acquire resources.
+ * All data touched must be locked in memory if running in kernel mode.
+ * Ensures:
+ * Non-preemptible (but interruptible).
+ */
+extern void io_dpc(unsigned long ref_data);
+
+/*
+ * ======== io_mbox_msg ========
+ * Purpose:
+ * Main interrupt handler for the shared memory Bridge channel manager.
+ * Calls the Bridge's chnlsm_isr to determine if this interrupt is ours,
+ * then schedules a DPC to dispatch I/O.
+ * Parameters:
+ * ref_data: Pointer to the channel manager object for this board.
+ * Set in an initial call to ISR_Install().
+ * Returns:
+ * TRUE if interrupt handled; FALSE otherwise.
+ * Requires:
+ * Must be in locked memory if executing in kernel mode.
+ * Must only call functions which are in locked memory if Kernel mode.
+ * Must only call asynchronous services.
+ * Interrupts are disabled and EOI for this interrupt has been sent.
+ * Ensures:
+ */
+void io_mbox_msg(u32 msg);
+
+/*
+ * ======== io_request_chnl ========
+ * Purpose:
+ * Request I/O from the DSP. Sets flags in shared memory, then interrupts
+ * the DSP.
+ * Parameters:
+ * hio_mgr: IO manager handle.
+ * pchnl: Ptr to the channel requesting I/O.
+ * io_mode: Mode of channel: {IO_INPUT | IO_OUTPUT}.
+ * Returns:
+ * Requires:
+ * pchnl != NULL
+ * Ensures:
+ */
+extern void io_request_chnl(struct io_mgr *io_manager,
+ struct chnl_object *pchnl,
+ u8 io_mode, u16 *mbx_val);
+
+/*
+ * ======== iosm_schedule ========
+ * Purpose:
+ * Schedule DPC for IO.
+ * Parameters:
+ * pio_mgr: Ptr to a I/O manager.
+ * Returns:
+ * Requires:
+ * pchnl != NULL
+ * Ensures:
+ */
+extern void iosm_schedule(struct io_mgr *io_manager);
+
+/*
+ * DSP-DMA IO functions
+ */
+
+/*
+ * ======== io_ddma_init_chnl_desc ========
+ * Purpose:
+ * Initialize DSP DMA channel descriptor.
+ * Parameters:
+ * hio_mgr: Handle to a I/O manager.
+ * ddma_chnl_id: DDMA channel identifier.
+ * num_desc: Number of buffer descriptors(equals # of IOReqs &
+ * Chirps)
+ * dsp: Dsp address;
+ * Returns:
+ * Requires:
+ * ddma_chnl_id < DDMA_MAXDDMACHNLS
+ * num_desc > 0
+ * pVa != NULL
+ * pDspPa != NULL
+ *
+ * Ensures:
+ */
+extern void io_ddma_init_chnl_desc(struct io_mgr *hio_mgr, u32 ddma_chnl_id,
+ u32 num_desc, void *dsp);
+
+/*
+ * ======== io_ddma_clear_chnl_desc ========
+ * Purpose:
+ * Clear DSP DMA channel descriptor.
+ * Parameters:
+ * hio_mgr: Handle to a I/O manager.
+ * ddma_chnl_id: DDMA channel identifier.
+ * Returns:
+ * Requires:
+ * ddma_chnl_id < DDMA_MAXDDMACHNLS
+ * Ensures:
+ */
+extern void io_ddma_clear_chnl_desc(struct io_mgr *hio_mgr, u32 ddma_chnl_id);
+
+/*
+ * ======== io_ddma_request_chnl ========
+ * Purpose:
+ * Request channel DSP-DMA from the DSP. Sets up SM descriptors and
+ * control fields in shared memory.
+ * Parameters:
+ * hio_mgr: Handle to a I/O manager.
+ * pchnl: Ptr to channel object
+ * chnl_packet_obj: Ptr to channel i/o request packet.
+ * Returns:
+ * Requires:
+ * pchnl != NULL
+ * pchnl->cio_reqs > 0
+ * chnl_packet_obj != NULL
+ * Ensures:
+ */
+extern void io_ddma_request_chnl(struct io_mgr *hio_mgr,
+ struct chnl_object *pchnl,
+ struct chnl_irp *chnl_packet_obj,
+ u16 *mbx_val);
+
+/*
+ * Zero-copy IO functions
+ */
+
+/*
+ * ======== io_ddzc_init_chnl_desc ========
+ * Purpose:
+ * Initialize ZCPY channel descriptor.
+ * Parameters:
+ * hio_mgr: Handle to a I/O manager.
+ * zid: zero-copy channel identifier.
+ * Returns:
+ * Requires:
+ * ddma_chnl_id < DDMA_MAXZCPYCHNLS
+ * hio_mgr != Null
+ * Ensures:
+ */
+extern void io_ddzc_init_chnl_desc(struct io_mgr *hio_mgr, u32 zid);
+
+/*
+ * ======== io_ddzc_clear_chnl_desc ========
+ * Purpose:
+ * Clear DSP ZC channel descriptor.
+ * Parameters:
+ * hio_mgr: Handle to a I/O manager.
+ * ch_id: ZC channel identifier.
+ * Returns:
+ * Requires:
+ * hio_mgr is valid
+ * ch_id < DDMA_MAXZCPYCHNLS
+ * Ensures:
+ */
+extern void io_ddzc_clear_chnl_desc(struct io_mgr *hio_mgr, u32 ch_id);
+
+/*
+ * ======== io_ddzc_request_chnl ========
+ * Purpose:
+ * Request zero-copy channel transfer. Sets up SM descriptors and
+ * control fields in shared memory.
+ * Parameters:
+ * hio_mgr: Handle to a I/O manager.
+ * pchnl: Ptr to channel object
+ * chnl_packet_obj: Ptr to channel i/o request packet.
+ * Returns:
+ * Requires:
+ * pchnl != NULL
+ * pchnl->cio_reqs > 0
+ * chnl_packet_obj != NULL
+ * Ensures:
+ */
+extern void io_ddzc_request_chnl(struct io_mgr *hio_mgr,
+ struct chnl_object *pchnl,
+ struct chnl_irp *chnl_packet_obj,
+ u16 *mbx_val);
+
+/*
+ * ======== io_sh_msetting ========
+ * Purpose:
+ * Sets the shared memory setting
+ * Parameters:
+ * hio_mgr: Handle to a I/O manager.
+ * desc: Shared memory type
+ * pargs: Ptr to shm setting
+ * Returns:
+ * Requires:
+ * hio_mgr != NULL
+ * pargs != NULL
+ * Ensures:
+ */
+extern int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs);
+
+/*
+ * Misc functions for the CHNL_IO shared memory library:
+ */
+
+/* Maximum channel bufsize that can be used. */
+extern u32 io_buf_size(struct io_mgr *hio_mgr);
+
+extern u32 io_read_value(struct bridge_dev_context *dev_ctxt, u32 dsp_addr);
+
+extern void io_write_value(struct bridge_dev_context *dev_ctxt,
+ u32 dsp_addr, u32 value);
+
+extern u32 io_read_value_long(struct bridge_dev_context *dev_ctxt,
+ u32 dsp_addr);
+
+extern void io_write_value_long(struct bridge_dev_context *dev_ctxt,
+ u32 dsp_addr, u32 value);
+
+extern void io_or_set_value(struct bridge_dev_context *dev_ctxt,
+ u32 dsp_addr, u32 value);
+
+extern void io_and_set_value(struct bridge_dev_context *dev_ctxt,
+ u32 dsp_addr, u32 value);
+
+extern void io_sm_init(void);
+
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+/*
+ * ========print_dsp_trace_buffer ========
+ * Print DSP tracebuffer.
+ */
+extern int print_dsp_trace_buffer(struct bridge_dev_context
+ *hbridge_context);
+
+int dump_dsp_stack(struct bridge_dev_context *bridge_context);
+
+void dump_dl_modules(struct bridge_dev_context *bridge_context);
+
+#endif
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+void print_dsp_debug_trace(struct io_mgr *hio_mgr);
+#endif
+
+#endif /* IOSM_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/iodefs.h b/drivers/staging/tidspbridge/include/dspbridge/iodefs.h
new file mode 100644
index 00000000000..8bd10a04200
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/iodefs.h
@@ -0,0 +1,36 @@
+/*
+ * iodefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * System-wide channel objects and constants.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef IODEFS_
+#define IODEFS_
+
+#define IO_MAXIRQ 0xff /* Arbitrarily large number. */
+
+/* IO Objects: */
+struct io_mgr;
+
+/* IO manager attributes: */
+struct io_attrs {
+ u8 birq; /* Channel's I/O IRQ number. */
+ bool irq_shared; /* TRUE if the IRQ is shareable. */
+ u32 word_size; /* DSP Word size. */
+ u32 shm_base; /* Physical base address of shared memory. */
+ u32 usm_length; /* Size (in bytes) of shared memory. */
+};
+
+#endif /* IODEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/ldr.h b/drivers/staging/tidspbridge/include/dspbridge/ldr.h
new file mode 100644
index 00000000000..6a0269cd07e
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/ldr.h
@@ -0,0 +1,29 @@
+/*
+ * ldr.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Provide module loading services and symbol export services.
+ *
+ * Notes:
+ * This service is meant to be used by modules of the DSP/BIOS Bridge
+ * driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef LDR_
+#define LDR_
+
+/* Loader objects: */
+struct ldr_module;
+
+#endif /* LDR_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/list.h b/drivers/staging/tidspbridge/include/dspbridge/list.h
new file mode 100644
index 00000000000..6837b614073
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/list.h
@@ -0,0 +1,225 @@
+/*
+ * list.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Declarations of list management control structures and definitions
+ * of inline list management functions.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef LIST_
+#define LIST_
+
+#include <dspbridge/host_os.h>
+#include <linux/list.h>
+
+#define LST_IS_EMPTY(l) list_empty(&(l)->head)
+
+struct lst_list {
+ struct list_head head;
+};
+
+/*
+ * ======== lst_first ========
+ * Purpose:
+ * Returns a pointer to the first element of the list, or NULL if the list
+ * is empty.
+ * Parameters:
+ * lst: Pointer to list control structure.
+ * Returns:
+ * Pointer to first list element, or NULL.
+ * Requires:
+ * - LST initialized.
+ * - lst != NULL.
+ * Ensures:
+ */
+static inline struct list_head *lst_first(struct lst_list *lst)
+{
+ if (lst && !list_empty(&lst->head))
+ return lst->head.next;
+ return NULL;
+}
+
+/*
+ * ======== lst_get_head ========
+ * Purpose:
+ * Pops the head off the list and returns a pointer to it.
+ * Details:
+ * If the list is empty, returns NULL.
+ * Else, removes the element at the head of the list, making the next
+ * element the head of the list.
+ * The head is removed by making the tail element of the list point its
+ * "next" pointer at the next element after the head, and by making the
+ * "prev" pointer of the next element after the head point at the tail
+ * element. So the next element after the head becomes the new head of
+ * the list.
+ * Parameters:
+ * lst: Pointer to list control structure of list whose head
+ * element is to be removed
+ * Returns:
+ * Pointer to element that was at the head of the list (success)
+ * NULL No elements in list
+ * Requires:
+ * - LST initialized.
+ * - lst != NULL.
+ * Ensures:
+ * Notes:
+ * Because the tail of the list points forward (its "next" pointer) to
+ * the head of the list, and the head of the list points backward (its
+ * "prev" pointer) to the tail of the list, this list is circular.
+ */
+static inline struct list_head *lst_get_head(struct lst_list *lst)
+{
+ struct list_head *elem_list;
+
+ if (!lst || list_empty(&lst->head))
+ return NULL;
+
+ elem_list = lst->head.next;
+ lst->head.next = elem_list->next;
+ elem_list->next->prev = &lst->head;
+
+ return elem_list;
+}
+
+/*
+ * ======== lst_init_elem ========
+ * Purpose:
+ * Initializes a list element to default (cleared) values
+ * Details:
+ * Parameters:
+ * elem_list: Pointer to list element to be reset
+ * Returns:
+ * Requires:
+ * LST initialized.
+ * Ensures:
+ * Notes:
+ * This function must not be called to "reset" an element in the middle
+ * of a list chain -- that would break the chain.
+ *
+ */
+static inline void lst_init_elem(struct list_head *elem_list)
+{
+ if (elem_list) {
+ elem_list->next = NULL;
+ elem_list->prev = NULL;
+ }
+}
+
+/*
+ * ======== lst_insert_before ========
+ * Purpose:
+ * Insert the element before the existing element.
+ * Parameters:
+ * lst: Pointer to list control structure.
+ * elem_list: Pointer to element in list to insert.
+ * elem_existing: Pointer to existing list element.
+ * Returns:
+ * Requires:
+ * - LST initialized.
+ * - lst != NULL.
+ * - elem_list != NULL.
+ * - elem_existing != NULL.
+ * Ensures:
+ */
+static inline void lst_insert_before(struct lst_list *lst,
+ struct list_head *elem_list,
+ struct list_head *elem_existing)
+{
+ if (lst && elem_list && elem_existing)
+ list_add_tail(elem_list, elem_existing);
+}
+
+/*
+ * ======== lst_next ========
+ * Purpose:
+ * Returns a pointer to the next element of the list, or NULL if the next
+ * element is the head of the list or the list is empty.
+ * Parameters:
+ * lst: Pointer to list control structure.
+ * cur_elem: Pointer to element in list to remove.
+ * Returns:
+ * Pointer to list element, or NULL.
+ * Requires:
+ * - LST initialized.
+ * - lst != NULL.
+ * - cur_elem != NULL.
+ * Ensures:
+ */
+static inline struct list_head *lst_next(struct lst_list *lst,
+ struct list_head *cur_elem)
+{
+ if (lst && !list_empty(&lst->head) && cur_elem &&
+ (cur_elem->next != &lst->head))
+ return cur_elem->next;
+ return NULL;
+}
+
+/*
+ * ======== lst_put_tail ========
+ * Purpose:
+ * Adds the specified element to the tail of the list
+ * Details:
+ * Sets new element's "prev" pointer to the address previously held by
+ * the head element's prev pointer. This is the previous tail member of
+ * the list.
+ * Sets the new head's prev pointer to the address of the element.
+ * Sets next pointer of the previous tail member of the list to point to
+ * the new element (rather than the head, which it had been pointing at).
+ * Sets new element's next pointer to the address of the head element.
+ * Sets head's prev pointer to the address of the new element.
+ * Parameters:
+ * lst: Pointer to list control structure to which *elem_list will be
+ * added
+ * elem_list: Pointer to list element to be added
+ * Returns:
+ * Void
+ * Requires:
+ * *elem_list and *lst must both exist.
+ * LST initialized.
+ * Ensures:
+ * Notes:
+ * Because the tail is always "just before" the head of the list (the
+ * tail's "next" pointer points at the head of the list, and the head's
+ * "prev" pointer points at the tail of the list), the list is circular.
+ */
+static inline void lst_put_tail(struct lst_list *lst,
+ struct list_head *elem_list)
+{
+ if (lst && elem_list)
+ list_add_tail(elem_list, &lst->head);
+}
+
+/*
+ * ======== lst_remove_elem ========
+ * Purpose:
+ * Removes (unlinks) the given element from the list, if the list is not
+ * empty. Does not free the list element.
+ * Parameters:
+ * lst: Pointer to list control structure.
+ * cur_elem: Pointer to element in list to remove.
+ * Returns:
+ * Requires:
+ * - LST initialized.
+ * - lst != NULL.
+ * - cur_elem != NULL.
+ * Ensures:
+ */
+static inline void lst_remove_elem(struct lst_list *lst,
+ struct list_head *cur_elem)
+{
+ if (lst && !list_empty(&lst->head) && cur_elem)
+ list_del_init(cur_elem);
+}
+
+#endif /* LIST_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h b/drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h
new file mode 100644
index 00000000000..5d165cd932f
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h
@@ -0,0 +1,184 @@
+/*
+ * mbx_sh.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Definitions for shared mailbox cmd/data values.(used on both
+ * the GPP and DSP sides).
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * Bridge usage of OMAP mailbox 1 is determined by the "class" of the
+ * mailbox interrupt's cmd value received. The class value are defined
+ * as a bit (10 thru 15) being set.
+ *
+ * Note: Only 16 bits of each is used. Other 16 bit data reg available.
+ *
+ * 16 bit Mbx bit defns:
+ *
+ * A). Exception/Error handling (Module DEH) : class = 0.
+ *
+ * 15 10 0
+ * ---------------------------------
+ * |0|0|0|0|0|0|x|x|x|x|x|x|x|x|x|x|
+ * ---------------------------------
+ * | (class) | (module specific) |
+ *
+ *
+ * B: DSP-DMA link driver channels (DDMA) : class = 1.
+ *
+ * 15 10 0
+ * ---------------------------------
+ * |0|0|0|0|0|1|b|b|b|b|b|c|c|c|c|c|
+ * ---------------------------------
+ * | (class) | (module specific) |
+ *
+ * where b -> buffer index (32 DDMA buffers/chnl max)
+ * c -> channel Id (32 DDMA chnls max)
+ *
+ *
+ * C: Proc-copy link driver channels (PCPY) : class = 2.
+ *
+ * 15 10 0
+ * ---------------------------------
+ * |0|0|0|0|1|0|x|x|x|x|x|x|x|x|x|x|
+ * ---------------------------------
+ * | (class) | (module specific) |
+ *
+ *
+ * D: Zero-copy link driver channels (DDZC) : class = 4.
+ *
+ * 15 10 0
+ * ---------------------------------
+ * |0|0|0|1|0|0|x|x|x|x|x|c|c|c|c|c|
+ * ---------------------------------
+ * | (class) | (module specific) |
+ *
+ * where x -> not used
+ * c -> channel Id (32 ZCPY chnls max)
+ *
+ *
+ * E: Power management : class = 8.
+ *
+ * 15 10 0
+ * ---------------------------------
+ * |0|0|1|0|0|0|x|x|x|x|x|c|c|c|c|c|
+
+ * 0010 00xx xxxc cccc
+ * 0010 00nn pppp qqqq
+ * nn:
+ * 00 = reserved
+ * 01 = pwr state change
+ * 10 = opp pre-change
+ * 11 = opp post-change
+ *
+ * if nn = pwr state change:
+ * pppp = don't care
+ * qqqq:
+ * 0010 = hibernate
+ * 0010 0001 0000 0010
+ * 0110 = retention
+ * 0010 0001 0000 0110
+ * others reserved
+ *
+ * if nn = opp pre-change:
+ * pppp = current opp
+ * qqqq = next opp
+ *
+ * if nn = opp post-change:
+ * pppp = prev opp
+ * qqqq = current opp
+ *
+ * ---------------------------------
+ * | (class) | (module specific) |
+ *
+ * where x -> not used
+ * c -> Power management command
+ *
+ */
+
+#ifndef _MBX_SH_H
+#define _MBX_SH_H
+
+#define MBX_CLASS_MSK 0xFC00 /* Class bits are 10 thru 15 */
+#define MBX_VALUE_MSK 0x03FF /* Value is 0 thru 9 */
+
+#define MBX_DEH_CLASS 0x0000 /* DEH owns Mbx INTR */
+#define MBX_DDMA_CLASS 0x0400 /* DSP-DMA link drvr chnls owns INTR */
+#define MBX_PCPY_CLASS 0x0800 /* PROC-COPY " */
+#define MBX_ZCPY_CLASS 0x1000 /* ZERO-COPY " */
+#define MBX_PM_CLASS 0x2000 /* Power Management */
+#define MBX_DBG_CLASS 0x4000 /* For debugging purpose */
+
+/*
+ * Exception Handler codes
+ * Magic code used to determine if DSP signaled exception.
+ */
+#define MBX_DEH_BASE 0x0
+#define MBX_DEH_USERS_BASE 0x100 /* 256 */
+#define MBX_DEH_LIMIT 0x3FF /* 1023 */
+#define MBX_DEH_RESET 0x101 /* DSP RESET (DEH) */
+#define MBX_DEH_EMMU 0X103 /*DSP MMU FAULT RECOVERY */
+
+/*
+ * Link driver command/status codes.
+ */
+/* DSP-DMA */
+#define MBX_DDMA_NUMCHNLBITS 5 /* # chnl Id: # bits available */
+#define MBX_DDMA_CHNLSHIFT 0 /* # of bits to shift */
+#define MBX_DDMA_CHNLMSK 0x01F /* bits 0 thru 4 */
+
+#define MBX_DDMA_NUMBUFBITS 5 /* buffer index: # of bits avail */
+#define MBX_DDMA_BUFSHIFT (MBX_DDMA_NUMCHNLBITS + MBX_DDMA_CHNLSHIFT)
+#define MBX_DDMA_BUFMSK 0x3E0 /* bits 5 thru 9 */
+
+/* Zero-Copy */
+#define MBX_ZCPY_NUMCHNLBITS 5 /* # chnl Id: # bits available */
+#define MBX_ZCPY_CHNLSHIFT 0 /* # of bits to shift */
+#define MBX_ZCPY_CHNLMSK 0x01F /* bits 0 thru 4 */
+
+/* Power Management Commands */
+#define MBX_PM_DSPIDLE (MBX_PM_CLASS + 0x0)
+#define MBX_PM_DSPWAKEUP (MBX_PM_CLASS + 0x1)
+#define MBX_PM_EMERGENCYSLEEP (MBX_PM_CLASS + 0x2)
+#define MBX_PM_SLEEPUNTILRESTART (MBX_PM_CLASS + 0x3)
+#define MBX_PM_DSPGLOBALIDLE_OFF (MBX_PM_CLASS + 0x4)
+#define MBX_PM_DSPGLOBALIDLE_ON (MBX_PM_CLASS + 0x5)
+#define MBX_PM_SETPOINT_PRENOTIFY (MBX_PM_CLASS + 0x6)
+#define MBX_PM_SETPOINT_POSTNOTIFY (MBX_PM_CLASS + 0x7)
+#define MBX_PM_DSPRETN (MBX_PM_CLASS + 0x8)
+#define MBX_PM_DSPRETENTION (MBX_PM_CLASS + 0x8)
+#define MBX_PM_DSPHIBERNATE (MBX_PM_CLASS + 0x9)
+#define MBX_PM_HIBERNATE_EN (MBX_PM_CLASS + 0xA)
+#define MBX_PM_OPP_REQ (MBX_PM_CLASS + 0xB)
+#define MBX_PM_OPP_CHG (MBX_PM_CLASS + 0xC)
+
+#define MBX_PM_TYPE_MASK 0x0300
+#define MBX_PM_TYPE_PWR_CHNG 0x0100
+#define MBX_PM_TYPE_OPP_PRECHNG 0x0200
+#define MBX_PM_TYPE_OPP_POSTCHNG 0x0300
+#define MBX_PM_TYPE_OPP_MASK 0x0300
+#define MBX_PM_OPP_PRECHNG (MBX_PM_CLASS | MBX_PM_TYPE_OPP_PRECHNG)
+/* DSP to MPU */
+#define MBX_PM_OPP_CHNG(OPP) (MBX_PM_CLASS | MBX_PM_TYPE_OPP_PRECHNG | (OPP))
+#define MBX_PM_RET (MBX_PM_CLASS | MBX_PM_TYPE_PWR_CHNG | 0x0006)
+#define MBX_PM_HIB (MBX_PM_CLASS | MBX_PM_TYPE_PWR_CHNG | 0x0002)
+#define MBX_PM_OPP1 0
+#define MBX_PM_OPP2 1
+#define MBX_PM_OPP3 2
+#define MBX_PM_OPP4 3
+
+/* Bridge Debug Commands */
+#define MBX_DBG_SYSPRINTF (MBX_DBG_CLASS + 0x0)
+
+#endif /* _MBX_SH_H */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/memdefs.h b/drivers/staging/tidspbridge/include/dspbridge/memdefs.h
new file mode 100644
index 00000000000..78d2c5d0045
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/memdefs.h
@@ -0,0 +1,30 @@
+/*
+ * memdefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global MEM constants and types, shared between Bridge driver and DSP API.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef MEMDEFS_
+#define MEMDEFS_
+
+/*
+ * MEM_VIRTUALSEGID is used by Node & Strm to access virtual address space in
+ * the correct client process context.
+ */
+#define MEM_SETVIRTUALSEGID 0x10000000
+#define MEM_GETVIRTUALSEGID 0x20000000
+#define MEM_MASKVIRTUALSEGID (MEM_SETVIRTUALSEGID | MEM_GETVIRTUALSEGID)
+
+#endif /* MEMDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/mgr.h b/drivers/staging/tidspbridge/include/dspbridge/mgr.h
new file mode 100644
index 00000000000..99f7dc0116b
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/mgr.h
@@ -0,0 +1,205 @@
+/*
+ * mgr.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * This is the DSP API RM module interface.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef MGR_
+#define MGR_
+
+#include <dspbridge/mgrpriv.h>
+
+#define MAX_EVENTS 32
+
+/*
+ * ======== mgr_wait_for_bridge_events ========
+ * Purpose:
+ * Block on any Bridge event(s)
+ * Parameters:
+ * anotifications : array of pointers to notification objects.
+ * count : number of elements in above array
+ * pu_index : index of signaled event object
+ * utimeout : timeout interval in milliseocnds
+ * Returns:
+ * 0 : Success.
+ * -ETIME : Wait timed out. *pu_index is undetermined.
+ * Details:
+ */
+
+int mgr_wait_for_bridge_events(struct dsp_notification
+ **anotifications,
+ u32 count, u32 *pu_index,
+ u32 utimeout);
+
+/*
+ * ======== mgr_create ========
+ * Purpose:
+ * Creates the Manager Object. This is done during the driver loading.
+ * There is only one Manager Object in the DSP/BIOS Bridge.
+ * Parameters:
+ * mgr_obj: Location to store created MGR Object handle.
+ * dev_node_obj: Device object as known to the system.
+ * Returns:
+ * 0: Success
+ * -ENOMEM: Failed to Create the Object
+ * -EPERM: General Failure
+ * Requires:
+ * MGR Initialized (refs > 0 )
+ * mgr_obj != NULL.
+ * Ensures:
+ * 0: *mgr_obj is a valid MGR interface to the device.
+ * MGR Object stores the DCD Manager Handle.
+ * MGR Object stored in the Regsitry.
+ * !0: MGR Object not created
+ * Details:
+ * DCD Dll is loaded and MGR Object stores the handle of the DLL.
+ */
+extern int mgr_create(struct mgr_object **mgr_obj,
+ struct cfg_devnode *dev_node_obj);
+
+/*
+ * ======== mgr_destroy ========
+ * Purpose:
+ * Destroys the MGR object. Called upon driver unloading.
+ * Parameters:
+ * hmgr_obj: Handle to Manager object .
+ * Returns:
+ * 0: Success.
+ * DCD Manager freed; MGR Object destroyed;
+ * MGR Object deleted from the Registry.
+ * -EPERM: Failed to destroy MGR Object
+ * Requires:
+ * MGR Initialized (refs > 0 )
+ * hmgr_obj is a valid MGR handle .
+ * Ensures:
+ * 0: MGR Object destroyed and hmgr_obj is Invalid MGR
+ * Handle.
+ */
+extern int mgr_destroy(struct mgr_object *hmgr_obj);
+
+/*
+ * ======== mgr_enum_node_info ========
+ * Purpose:
+ * Enumerate and get configuration information about nodes configured
+ * in the node database.
+ * Parameters:
+ * node_id: The node index (base 0).
+ * pndb_props: Ptr to the dsp_ndbprops structure for output.
+ * undb_props_size: Size of the dsp_ndbprops structure.
+ * pu_num_nodes: Location where the number of nodes configured
+ * in the database will be returned.
+ * Returns:
+ * 0: Success.
+ * -EINVAL: Parameter node_id is > than the number of nodes.
+ * configutred in the system
+ * -EIDRM: During Enumeration there has been a change in
+ * the number of nodes configured or in the
+ * the properties of the enumerated nodes.
+ * -EPERM: Failed to querry the Node Data Base
+ * Requires:
+ * pNDBPROPS is not null
+ * undb_props_size >= sizeof(dsp_ndbprops)
+ * pu_num_nodes is not null
+ * MGR Initialized (refs > 0 )
+ * Ensures:
+ * SUCCESS on successful retreival of data and *pu_num_nodes > 0 OR
+ * DSP_FAILED && *pu_num_nodes == 0.
+ * Details:
+ */
+extern int mgr_enum_node_info(u32 node_id,
+ struct dsp_ndbprops *pndb_props,
+ u32 undb_props_size,
+ u32 *pu_num_nodes);
+
+/*
+ * ======== mgr_enum_processor_info ========
+ * Purpose:
+ * Enumerate and get configuration information about available DSP
+ * processors
+ * Parameters:
+ * processor_id: The processor index (zero-based).
+ * processor_info: Ptr to the dsp_processorinfo structure .
+ * processor_info_size: Size of dsp_processorinfo structure.
+ * pu_num_procs: Location where the number of DSPs configured
+ * in the database will be returned
+ * Returns:
+ * 0: Success.
+ * -EINVAL: Parameter processor_id is > than the number of
+ * DSP Processors in the system.
+ * -EPERM: Failed to querry the Node Data Base
+ * Requires:
+ * processor_info is not null
+ * pu_num_procs is not null
+ * processor_info_size >= sizeof(dsp_processorinfo)
+ * MGR Initialized (refs > 0 )
+ * Ensures:
+ * SUCCESS on successful retreival of data and *pu_num_procs > 0 OR
+ * DSP_FAILED && *pu_num_procs == 0.
+ * Details:
+ */
+extern int mgr_enum_processor_info(u32 processor_id,
+ struct dsp_processorinfo
+ *processor_info,
+ u32 processor_info_size,
+ u8 *pu_num_procs);
+/*
+ * ======== mgr_exit ========
+ * Purpose:
+ * Decrement reference count, and free resources when reference count is
+ * 0.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * MGR is initialized.
+ * Ensures:
+ * When reference count == 0, MGR's private resources are freed.
+ */
+extern void mgr_exit(void);
+
+/*
+ * ======== mgr_get_dcd_handle ========
+ * Purpose:
+ * Retrieves the MGR handle. Accessor Function
+ * Parameters:
+ * mgr_handle: Handle to the Manager Object
+ * dcd_handle: Ptr to receive the DCD Handle.
+ * Returns:
+ * 0: Sucess
+ * -EPERM: Failure to get the Handle
+ * Requires:
+ * MGR is initialized.
+ * dcd_handle != NULL
+ * Ensures:
+ * 0 and *dcd_handle != NULL ||
+ * -EPERM and *dcd_handle == NULL
+ */
+extern int mgr_get_dcd_handle(struct mgr_object
+ *mgr_handle, u32 *dcd_handle);
+
+/*
+ * ======== mgr_init ========
+ * Purpose:
+ * Initialize MGR's private state, keeping a reference count on each
+ * call. Intializes the DCD.
+ * Parameters:
+ * Returns:
+ * TRUE if initialized; FALSE if error occured.
+ * Requires:
+ * Ensures:
+ * TRUE: A requirement for the other public MGR functions.
+ */
+extern bool mgr_init(void);
+
+#endif /* MGR_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h b/drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h
new file mode 100644
index 00000000000..bca4e103c7f
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h
@@ -0,0 +1,45 @@
+/*
+ * mgrpriv.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global MGR constants and types, shared by PROC, MGR, and DSP API.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef MGRPRIV_
+#define MGRPRIV_
+
+/*
+ * OMAP1510 specific
+ */
+#define MGR_MAXTLBENTRIES 32
+
+/* RM MGR Object */
+struct mgr_object;
+
+struct mgr_tlbentry {
+ u32 ul_dsp_virt; /* DSP virtual address */
+ u32 ul_gpp_phys; /* GPP physical address */
+};
+
+/*
+ * The DSP_PROCESSOREXTINFO structure describes additional extended
+ * capabilities of a DSP processor not exposed to user.
+ */
+struct mgr_processorextinfo {
+ struct dsp_processorinfo ty_basic; /* user processor info */
+ /* private dsp mmu entries */
+ struct mgr_tlbentry ty_tlb[MGR_MAXTLBENTRIES];
+};
+
+#endif /* MGRPRIV_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/msg.h b/drivers/staging/tidspbridge/include/dspbridge/msg.h
new file mode 100644
index 00000000000..95778bcb5aa
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/msg.h
@@ -0,0 +1,86 @@
+/*
+ * msg.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge msg_ctrl Module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef MSG_
+#define MSG_
+
+#include <dspbridge/devdefs.h>
+#include <dspbridge/msgdefs.h>
+
+/*
+ * ======== msg_create ========
+ * Purpose:
+ * Create an object to manage message queues. Only one of these objects
+ * can exist per device object. The msg_ctrl manager must be created before
+ * the IO Manager.
+ * Parameters:
+ * msg_man: Location to store msg_ctrl manager handle on output.
+ * hdev_obj: The device object.
+ * msg_callback: Called whenever an RMS_EXIT message is received.
+ * Returns:
+ * Requires:
+ * msg_mod_init(void) called.
+ * msg_man != NULL.
+ * hdev_obj != NULL.
+ * msg_callback != NULL.
+ * Ensures:
+ */
+extern int msg_create(struct msg_mgr **msg_man,
+ struct dev_object *hdev_obj,
+ msg_onexit msg_callback);
+
+/*
+ * ======== msg_delete ========
+ * Purpose:
+ * Delete a msg_ctrl manager allocated in msg_create().
+ * Parameters:
+ * hmsg_mgr: Handle returned from msg_create().
+ * Returns:
+ * Requires:
+ * msg_mod_init(void) called.
+ * Valid hmsg_mgr.
+ * Ensures:
+ */
+extern void msg_delete(struct msg_mgr *hmsg_mgr);
+
+/*
+ * ======== msg_exit ========
+ * Purpose:
+ * Discontinue usage of msg_ctrl module.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * msg_mod_init(void) successfully called before.
+ * Ensures:
+ * Any resources acquired in msg_mod_init(void) will be freed when last
+ * msg_ctrl client calls msg_exit(void).
+ */
+extern void msg_exit(void);
+
+/*
+ * ======== msg_mod_init ========
+ * Purpose:
+ * Initialize the msg_ctrl module.
+ * Parameters:
+ * Returns:
+ * TRUE if initialization succeeded, FALSE otherwise.
+ * Ensures:
+ */
+extern bool msg_mod_init(void);
+
+#endif /* MSG_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/msgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/msgdefs.h
new file mode 100644
index 00000000000..80a3fa1a8a3
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/msgdefs.h
@@ -0,0 +1,29 @@
+/*
+ * msgdefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global msg_ctrl constants and types.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef MSGDEFS_
+#define MSGDEFS_
+
+/* msg_ctrl Objects: */
+struct msg_mgr;
+struct msg_queue;
+
+/* Function prototype for callback to be called on RMS_EXIT message received */
+typedef void (*msg_onexit) (void *h, s32 node_status);
+
+#endif /* MSGDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nldr.h b/drivers/staging/tidspbridge/include/dspbridge/nldr.h
new file mode 100644
index 00000000000..d9653ee667e
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/nldr.h
@@ -0,0 +1,57 @@
+/*
+ * nldr.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge dynamic loader interface.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/dbdcddef.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/rmm.h>
+#include <dspbridge/nldrdefs.h>
+
+#ifndef NLDR_
+#define NLDR_
+
+extern int nldr_allocate(struct nldr_object *nldr_obj,
+ void *priv_ref, const struct dcd_nodeprops
+ *node_props,
+ struct nldr_nodeobject **nldr_nodeobj,
+ bool *pf_phase_split);
+
+extern int nldr_create(struct nldr_object **nldr,
+ struct dev_object *hdev_obj,
+ const struct nldr_attrs *pattrs);
+
+extern void nldr_delete(struct nldr_object *nldr_obj);
+extern void nldr_exit(void);
+
+extern int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
+ char *str_fxn, u32 * addr);
+
+extern int nldr_get_rmm_manager(struct nldr_object *nldr,
+ struct rmm_target_obj **rmm_mgr);
+
+extern bool nldr_init(void);
+extern int nldr_load(struct nldr_nodeobject *nldr_node_obj,
+ enum nldr_phase phase);
+extern int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
+ enum nldr_phase phase);
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
+ u32 offset_range, void *offset_output, char *sym_name);
+#endif
+
+#endif /* NLDR_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h b/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
new file mode 100644
index 00000000000..c85d3da3fe2
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
@@ -0,0 +1,293 @@
+/*
+ * nldrdefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global Dynamic + static/overlay Node loader (NLDR) constants and types.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef NLDRDEFS_
+#define NLDRDEFS_
+
+#include <dspbridge/dbdcddef.h>
+#include <dspbridge/devdefs.h>
+
+#define NLDR_MAXPATHLENGTH 255
+/* NLDR Objects: */
+struct nldr_object;
+struct nldr_nodeobject;
+
+/*
+ * ======== nldr_loadtype ========
+ * Load types for a node. Must match values in node.h55.
+ */
+enum nldr_loadtype {
+ NLDR_STATICLOAD, /* Linked in base image, not overlay */
+ NLDR_DYNAMICLOAD, /* Dynamically loaded node */
+ NLDR_OVLYLOAD /* Linked in base image, overlay node */
+};
+
+/*
+ * ======== nldr_ovlyfxn ========
+ * Causes code or data to be copied from load address to run address. This
+ * is the "cod_writefxn" that gets passed to the DBLL_Library and is used as
+ * the ZL write function.
+ *
+ * Parameters:
+ * priv_ref: Handle to identify the node.
+ * dsp_run_addr: Run address of code or data.
+ * dsp_load_addr: Load address of code or data.
+ * ul_num_bytes: Number of (GPP) bytes to copy.
+ * mem_space: RMS_CODE or RMS_DATA.
+ * Returns:
+ * ul_num_bytes: Success.
+ * 0: Failure.
+ * Requires:
+ * Ensures:
+ */
+typedef u32(*nldr_ovlyfxn) (void *priv_ref, u32 dsp_run_addr,
+ u32 dsp_load_addr, u32 ul_num_bytes, u32 mem_space);
+
+/*
+ * ======== nldr_writefxn ========
+ * Write memory function. Used for dynamic load writes.
+ * Parameters:
+ * priv_ref: Handle to identify the node.
+ * dsp_add: Address of code or data.
+ * pbuf: Code or data to be written
+ * ul_num_bytes: Number of (GPP) bytes to write.
+ * mem_space: DBLL_DATA or DBLL_CODE.
+ * Returns:
+ * ul_num_bytes: Success.
+ * 0: Failure.
+ * Requires:
+ * Ensures:
+ */
+typedef u32(*nldr_writefxn) (void *priv_ref,
+ u32 dsp_add, void *pbuf,
+ u32 ul_num_bytes, u32 mem_space);
+
+/*
+ * ======== nldr_attrs ========
+ * Attributes passed to nldr_create function.
+ */
+struct nldr_attrs {
+ nldr_ovlyfxn pfn_ovly;
+ nldr_writefxn pfn_write;
+ u16 us_dsp_word_size;
+ u16 us_dsp_mau_size;
+};
+
+/*
+ * ======== nldr_phase ========
+ * Indicates node create, delete, or execute phase function.
+ */
+enum nldr_phase {
+ NLDR_CREATE,
+ NLDR_DELETE,
+ NLDR_EXECUTE,
+ NLDR_NOPHASE
+};
+
+/*
+ * Typedefs of loader functions imported from a DLL, or defined in a
+ * function table.
+ */
+
+/*
+ * ======== nldr_allocate ========
+ * Allocate resources to manage the loading of a node on the DSP.
+ *
+ * Parameters:
+ * nldr_obj: Handle of loader that will load the node.
+ * priv_ref: Handle to identify the node.
+ * node_props: Pointer to a dcd_nodeprops for the node.
+ * nldr_nodeobj: Location to store node handle on output. This handle
+ * will be passed to nldr_load/nldr_unload.
+ * pf_phase_split: pointer to int variable referenced in node.c
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Insufficient memory on GPP.
+ * Requires:
+ * nldr_init(void) called.
+ * Valid nldr_obj.
+ * node_props != NULL.
+ * nldr_nodeobj != NULL.
+ * Ensures:
+ * 0: IsValidNode(*nldr_nodeobj).
+ * error: *nldr_nodeobj == NULL.
+ */
+typedef int(*nldr_allocatefxn) (struct nldr_object *nldr_obj,
+ void *priv_ref,
+ const struct dcd_nodeprops
+ * node_props,
+ struct nldr_nodeobject
+ **nldr_nodeobj,
+ bool *pf_phase_split);
+
+/*
+ * ======== nldr_create ========
+ * Create a loader object. This object handles the loading and unloading of
+ * create, delete, and execute phase functions of nodes on the DSP target.
+ *
+ * Parameters:
+ * nldr: Location to store loader handle on output.
+ * hdev_obj: Device for this processor.
+ * pattrs: Loader attributes.
+ * Returns:
+ * 0: Success;
+ * -ENOMEM: Insufficient memory for requested resources.
+ * Requires:
+ * nldr_init(void) called.
+ * nldr != NULL.
+ * hdev_obj != NULL.
+ * pattrs != NULL.
+ * Ensures:
+ * 0: Valid *nldr.
+ * error: *nldr == NULL.
+ */
+typedef int(*nldr_createfxn) (struct nldr_object **nldr,
+ struct dev_object *hdev_obj,
+ const struct nldr_attrs *pattrs);
+
+/*
+ * ======== nldr_delete ========
+ * Delete the NLDR loader.
+ *
+ * Parameters:
+ * nldr_obj: Node manager object.
+ * Returns:
+ * Requires:
+ * nldr_init(void) called.
+ * Valid nldr_obj.
+ * Ensures:
+ * nldr_obj invalid
+ */
+typedef void (*nldr_deletefxn) (struct nldr_object *nldr_obj);
+
+/*
+ * ======== nldr_exit ========
+ * Discontinue usage of NLDR module.
+ *
+ * Parameters:
+ * Returns:
+ * Requires:
+ * nldr_init(void) successfully called before.
+ * Ensures:
+ * Any resources acquired in nldr_init(void) will be freed when last NLDR
+ * client calls nldr_exit(void).
+ */
+typedef void (*nldr_exitfxn) (void);
+
+/*
+ * ======== NLDR_Free ========
+ * Free resources allocated in nldr_allocate.
+ *
+ * Parameters:
+ * nldr_node_obj: Handle returned from nldr_allocate().
+ * Returns:
+ * Requires:
+ * nldr_init(void) called.
+ * Valid nldr_node_obj.
+ * Ensures:
+ */
+typedef void (*nldr_freefxn) (struct nldr_nodeobject *nldr_node_obj);
+
+/*
+ * ======== nldr_get_fxn_addr ========
+ * Get address of create, delete, or execute phase function of a node on
+ * the DSP.
+ *
+ * Parameters:
+ * nldr_node_obj: Handle returned from nldr_allocate().
+ * str_fxn: Name of function.
+ * addr: Location to store function address.
+ * Returns:
+ * 0: Success.
+ * -ESPIPE: Address of function not found.
+ * Requires:
+ * nldr_init(void) called.
+ * Valid nldr_node_obj.
+ * addr != NULL;
+ * str_fxn != NULL;
+ * Ensures:
+ */
+typedef int(*nldr_getfxnaddrfxn) (struct nldr_nodeobject
+ * nldr_node_obj,
+ char *str_fxn, u32 * addr);
+
+/*
+ * ======== nldr_init ========
+ * Initialize the NLDR module.
+ *
+ * Parameters:
+ * Returns:
+ * TRUE if initialization succeeded, FALSE otherwise.
+ * Ensures:
+ */
+typedef bool(*nldr_initfxn) (void);
+
+/*
+ * ======== nldr_load ========
+ * Load create, delete, or execute phase function of a node on the DSP.
+ *
+ * Parameters:
+ * nldr_node_obj: Handle returned from nldr_allocate().
+ * phase: Type of function to load (create, delete, or execute).
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Insufficient memory on GPP.
+ * -ENXIO: Can't overlay phase because overlay memory
+ * is already in use.
+ * -EILSEQ: Failure in dynamic loader library.
+ * Requires:
+ * nldr_init(void) called.
+ * Valid nldr_node_obj.
+ * Ensures:
+ */
+typedef int(*nldr_loadfxn) (struct nldr_nodeobject *nldr_node_obj,
+ enum nldr_phase phase);
+
+/*
+ * ======== nldr_unload ========
+ * Unload create, delete, or execute phase function of a node on the DSP.
+ *
+ * Parameters:
+ * nldr_node_obj: Handle returned from nldr_allocate().
+ * phase: Node function to unload (create, delete, or execute).
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Insufficient memory on GPP.
+ * Requires:
+ * nldr_init(void) called.
+ * Valid nldr_node_obj.
+ * Ensures:
+ */
+typedef int(*nldr_unloadfxn) (struct nldr_nodeobject *nldr_node_obj,
+ enum nldr_phase phase);
+
+/*
+ * ======== node_ldr_fxns ========
+ */
+struct node_ldr_fxns {
+ nldr_allocatefxn pfn_allocate;
+ nldr_createfxn pfn_create;
+ nldr_deletefxn pfn_delete;
+ nldr_exitfxn pfn_exit;
+ nldr_getfxnaddrfxn pfn_get_fxn_addr;
+ nldr_initfxn pfn_init;
+ nldr_loadfxn pfn_load;
+ nldr_unloadfxn pfn_unload;
+};
+
+#endif /* NLDRDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/node.h b/drivers/staging/tidspbridge/include/dspbridge/node.h
new file mode 100644
index 00000000000..49ed5c1128e
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/node.h
@@ -0,0 +1,583 @@
+/*
+ * node.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge Node Manager.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef NODE_
+#define NODE_
+
+#include <dspbridge/procpriv.h>
+
+#include <dspbridge/nodedefs.h>
+#include <dspbridge/dispdefs.h>
+#include <dspbridge/nldrdefs.h>
+#include <dspbridge/drv.h>
+
+/*
+ * ======== node_allocate ========
+ * Purpose:
+ * Allocate GPP resources to manage a node on the DSP.
+ * Parameters:
+ * hprocessor: Handle of processor that is allocating the node.
+ * node_uuid: Pointer to a dsp_uuid for the node.
+ * pargs: Optional arguments to be passed to the node.
+ * attr_in: Optional pointer to node attributes (priority,
+ * timeout...)
+ * noderes: Location to store node resource info.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Insufficient memory on GPP.
+ * -ENOKEY: Node UUID has not been registered.
+ * -ESPIPE: iAlg functions not found for a DAIS node.
+ * -EDOM: attr_in != NULL and attr_in->prio out of
+ * range.
+ * -EPERM: A failure occured, unable to allocate node.
+ * -EBADR: Proccessor is not in the running state.
+ * Requires:
+ * node_init(void) called.
+ * hprocessor != NULL.
+ * node_uuid != NULL.
+ * noderes != NULL.
+ * Ensures:
+ * 0: IsValidNode(*ph_node).
+ * error: *noderes == NULL.
+ */
+extern int node_allocate(struct proc_object *hprocessor,
+ const struct dsp_uuid *node_uuid,
+ const struct dsp_cbdata
+ *pargs, const struct dsp_nodeattrin
+ *attr_in,
+ struct node_res_object **noderes,
+ struct process_context *pr_ctxt);
+
+/*
+ * ======== node_alloc_msg_buf ========
+ * Purpose:
+ * Allocate and Prepare a buffer whose descriptor will be passed to a
+ * Node within a (dsp_msg)message
+ * Parameters:
+ * hnode: The node handle.
+ * usize: The size of the buffer to be allocated.
+ * pattr: Pointer to a dsp_bufferattr structure.
+ * pbuffer: Location to store the address of the allocated
+ * buffer on output.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid node handle.
+ * -ENOMEM: Insufficent memory.
+ * -EPERM: General Failure.
+ * -EINVAL: Invalid Size.
+ * Requires:
+ * node_init(void) called.
+ * pbuffer != NULL.
+ * Ensures:
+ */
+extern int node_alloc_msg_buf(struct node_object *hnode,
+ u32 usize, struct dsp_bufferattr
+ *pattr, u8 **pbuffer);
+
+/*
+ * ======== node_change_priority ========
+ * Purpose:
+ * Change the priority of an allocated node.
+ * Parameters:
+ * hnode: Node handle returned from node_allocate.
+ * prio: New priority level to set node's priority to.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * -EDOM: prio is out of range.
+ * -EPERM: The specified node is not a task node.
+ * Unable to change node's runtime priority level.
+ * -EBADR: Node is not in the NODE_ALLOCATED, NODE_PAUSED,
+ * or NODE_RUNNING state.
+ * -ETIME: A timeout occurred before the DSP responded.
+ * Requires:
+ * node_init(void) called.
+ * Ensures:
+ * 0 && (Node's current priority == prio)
+ */
+extern int node_change_priority(struct node_object *hnode, s32 prio);
+
+/*
+ * ======== node_close_orphans ========
+ * Purpose:
+ * Delete all nodes whose owning processor is being destroyed.
+ * Parameters:
+ * hnode_mgr: Node manager object.
+ * proc: Handle to processor object being destroyed.
+ * Returns:
+ * 0: Success.
+ * -EPERM: Unable to delete all nodes belonging to proc.
+ * Requires:
+ * Valid hnode_mgr.
+ * proc != NULL.
+ * Ensures:
+ */
+extern int node_close_orphans(struct node_mgr *hnode_mgr,
+ struct proc_object *proc);
+
+/*
+ * ======== node_connect ========
+ * Purpose:
+ * Connect two nodes on the DSP, or a node on the DSP to the GPP. In the
+ * case that the connnection is being made between a node on the DSP and
+ * the GPP, one of the node handles (either node1 or node2) must be
+ * the constant NODE_HGPPNODE.
+ * Parameters:
+ * node1: Handle of first node to connect to second node. If
+ * this is a connection from the GPP to node2, node1
+ * must be the constant NODE_HGPPNODE. Otherwise, node1
+ * must be a node handle returned from a successful call
+ * to Node_Allocate().
+ * node2: Handle of second node. Must be either NODE_HGPPNODE
+ * if this is a connection from DSP node to GPP, or a
+ * node handle returned from a successful call to
+ * node_allocate().
+ * stream1: Output stream index on first node, to be connected
+ * to second node's input stream. Value must range from
+ * 0 <= stream1 < number of output streams.
+ * stream2: Input stream index on second node. Value must range
+ * from 0 <= stream2 < number of input streams.
+ * pattrs: Stream attributes (NULL ==> use defaults).
+ * conn_param: A pointer to a dsp_cbdata structure that defines
+ * connection parameter for device nodes to pass to DSP
+ * side.
+ * If the value of this parameter is NULL, then this API
+ * behaves like DSPNode_Connect. This parameter will have
+ * length of the string and the null terminated string in
+ * dsp_cbdata struct. This can be extended in future tp
+ * pass binary data.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid node1 or node2.
+ * -ENOMEM: Insufficient host memory.
+ * -EINVAL: A stream index parameter is invalid.
+ * -EISCONN: A connection already exists for one of the
+ * indices stream1 or stream2.
+ * -EBADR: Either node1 or node2 is not in the
+ * NODE_ALLOCATED state.
+ * -ECONNREFUSED: No more connections available.
+ * -EPERM: Attempt to make an illegal connection (eg,
+ * Device node to device node, or device node to
+ * GPP), the two nodes are on different DSPs.
+ * Requires:
+ * node_init(void) called.
+ * Ensures:
+ */
+extern int node_connect(struct node_object *node1,
+ u32 stream1,
+ struct node_object *node2,
+ u32 stream2,
+ struct dsp_strmattr *pattrs,
+ struct dsp_cbdata
+ *conn_param);
+
+/*
+ * ======== node_create ========
+ * Purpose:
+ * Create a node on the DSP by remotely calling the node's create
+ * function. If necessary, load code that contains the node's create
+ * function.
+ * Parameters:
+ * hnode: Node handle returned from node_allocate().
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * -ESPIPE: Create function not found in the COFF file.
+ * -EBADR: Node is not in the NODE_ALLOCATED state.
+ * -ENOMEM: Memory allocation failure on the DSP.
+ * -ETIME: A timeout occurred before the DSP responded.
+ * -EPERM: A failure occurred, unable to create node.
+ * Requires:
+ * node_init(void) called.
+ * Ensures:
+ */
+extern int node_create(struct node_object *hnode);
+
+/*
+ * ======== node_create_mgr ========
+ * Purpose:
+ * Create a NODE Manager object. This object handles the creation,
+ * deletion, and execution of nodes on the DSP target. The NODE Manager
+ * also maintains a pipe map of used and available node connections.
+ * Each DEV object should have exactly one NODE Manager object.
+ *
+ * Parameters:
+ * node_man: Location to store node manager handle on output.
+ * hdev_obj: Device for this processor.
+ * Returns:
+ * 0: Success;
+ * -ENOMEM: Insufficient memory for requested resources.
+ * -EPERM: General failure.
+ * Requires:
+ * node_init(void) called.
+ * node_man != NULL.
+ * hdev_obj != NULL.
+ * Ensures:
+ * 0: Valide *node_man.
+ * error: *node_man == NULL.
+ */
+extern int node_create_mgr(struct node_mgr **node_man,
+ struct dev_object *hdev_obj);
+
+/*
+ * ======== node_delete ========
+ * Purpose:
+ * Delete resources allocated in node_allocate(). If the node was
+ * created, delete the node on the DSP by remotely calling the node's
+ * delete function. Loads the node's delete function if necessary.
+ * GPP side resources are freed after node's delete function returns.
+ * Parameters:
+ * noderes: Node resource info handle returned from
+ * node_allocate().
+ * pr_ctxt: Poninter to process context data.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * -ETIME: A timeout occurred before the DSP responded.
+ * -EPERM: A failure occurred in deleting the node.
+ * -ESPIPE: Delete function not found in the COFF file.
+ * Requires:
+ * node_init(void) called.
+ * Ensures:
+ * 0: hnode is invalid.
+ */
+extern int node_delete(struct node_res_object *noderes,
+ struct process_context *pr_ctxt);
+
+/*
+ * ======== node_delete_mgr ========
+ * Purpose:
+ * Delete the NODE Manager.
+ * Parameters:
+ * hnode_mgr: Node manager object.
+ * Returns:
+ * 0: Success.
+ * Requires:
+ * node_init(void) called.
+ * Valid hnode_mgr.
+ * Ensures:
+ */
+extern int node_delete_mgr(struct node_mgr *hnode_mgr);
+
+/*
+ * ======== node_enum_nodes ========
+ * Purpose:
+ * Enumerate the nodes currently allocated for the DSP.
+ * Parameters:
+ * hnode_mgr: Node manager returned from node_create_mgr().
+ * node_tab: Array to copy node handles into.
+ * node_tab_size: Number of handles that can be written to node_tab.
+ * pu_num_nodes: Location where number of node handles written to
+ * node_tab will be written.
+ * pu_allocated: Location to write total number of allocated nodes.
+ * Returns:
+ * 0: Success.
+ * -EINVAL: node_tab is too small to hold all node handles.
+ * Requires:
+ * Valid hnode_mgr.
+ * node_tab != NULL || node_tab_size == 0.
+ * pu_num_nodes != NULL.
+ * pu_allocated != NULL.
+ * Ensures:
+ * - (-EINVAL && *pu_num_nodes == 0)
+ * - || (0 && *pu_num_nodes <= node_tab_size) &&
+ * (*pu_allocated == *pu_num_nodes)
+ */
+extern int node_enum_nodes(struct node_mgr *hnode_mgr,
+ void **node_tab,
+ u32 node_tab_size,
+ u32 *pu_num_nodes,
+ u32 *pu_allocated);
+
+/*
+ * ======== node_exit ========
+ * Purpose:
+ * Discontinue usage of NODE module.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * node_init(void) successfully called before.
+ * Ensures:
+ * Any resources acquired in node_init(void) will be freed when last NODE
+ * client calls node_exit(void).
+ */
+extern void node_exit(void);
+
+/*
+ * ======== node_free_msg_buf ========
+ * Purpose:
+ * Free a message buffer previously allocated with node_alloc_msg_buf.
+ * Parameters:
+ * hnode: The node handle.
+ * pbuffer: (Address) Buffer allocated by node_alloc_msg_buf.
+ * pattr: Same buffer attributes passed to node_alloc_msg_buf.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid node handle.
+ * -EPERM: Failure to free the buffer.
+ * Requires:
+ * node_init(void) called.
+ * pbuffer != NULL.
+ * Ensures:
+ */
+extern int node_free_msg_buf(struct node_object *hnode,
+ u8 *pbuffer,
+ struct dsp_bufferattr
+ *pattr);
+
+/*
+ * ======== node_get_attr ========
+ * Purpose:
+ * Copy the current attributes of the specified node into a dsp_nodeattr
+ * structure.
+ * Parameters:
+ * hnode: Node object allocated from node_allocate().
+ * pattr: Pointer to dsp_nodeattr structure to copy node's
+ * attributes.
+ * attr_size: Size of pattr.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * Requires:
+ * node_init(void) called.
+ * pattr != NULL.
+ * Ensures:
+ * 0: *pattrs contains the node's current attributes.
+ */
+extern int node_get_attr(struct node_object *hnode,
+ struct dsp_nodeattr *pattr, u32 attr_size);
+
+/*
+ * ======== node_get_message ========
+ * Purpose:
+ * Retrieve a message from a node on the DSP. The node must be either a
+ * message node, task node, or XDAIS socket node.
+ * If a message is not available, this function will block until a
+ * message is available, or the node's timeout value is reached.
+ * Parameters:
+ * hnode: Node handle returned from node_allocate().
+ * message: Pointer to dsp_msg structure to copy the
+ * message into.
+ * utimeout: Timeout in milliseconds to wait for message.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * -EPERM: Cannot retrieve messages from this type of node.
+ * Error occurred while trying to retrieve a message.
+ * -ETIME: Timeout occurred and no message is available.
+ * Requires:
+ * node_init(void) called.
+ * message != NULL.
+ * Ensures:
+ */
+extern int node_get_message(struct node_object *hnode,
+ struct dsp_msg *message, u32 utimeout);
+
+/*
+ * ======== node_get_nldr_obj ========
+ * Purpose:
+ * Retrieve the Nldr manager
+ * Parameters:
+ * hnode_mgr: Node Manager
+ * nldr_ovlyobj: Pointer to a Nldr manager handle
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * Ensures:
+ */
+extern int node_get_nldr_obj(struct node_mgr *hnode_mgr,
+ struct nldr_object **nldr_ovlyobj);
+
+/*
+ * ======== node_init ========
+ * Purpose:
+ * Initialize the NODE module.
+ * Parameters:
+ * Returns:
+ * TRUE if initialization succeeded, FALSE otherwise.
+ * Ensures:
+ */
+extern bool node_init(void);
+
+/*
+ * ======== node_on_exit ========
+ * Purpose:
+ * Gets called when RMS_EXIT is received for a node. PROC needs to pass
+ * this function as a parameter to msg_create(). This function then gets
+ * called by the Bridge driver when an exit message for a node is received.
+ * Parameters:
+ * hnode: Handle of the node that the exit message is for.
+ * node_status: Return status of the node's execute phase.
+ * Returns:
+ * Ensures:
+ */
+void node_on_exit(struct node_object *hnode, s32 node_status);
+
+/*
+ * ======== node_pause ========
+ * Purpose:
+ * Suspend execution of a node currently running on the DSP.
+ * Parameters:
+ * hnode: Node object representing a node currently
+ * running on the DSP.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * -EPERM: Node is not a task or socket node.
+ * Failed to pause node.
+ * -ETIME: A timeout occurred before the DSP responded.
+ * DSP_EWRONGSTSATE: Node is not in NODE_RUNNING state.
+ * Requires:
+ * node_init(void) called.
+ * Ensures:
+ */
+extern int node_pause(struct node_object *hnode);
+
+/*
+ * ======== node_put_message ========
+ * Purpose:
+ * Send a message to a message node, task node, or XDAIS socket node.
+ * This function will block until the message stream can accommodate
+ * the message, or a timeout occurs. The message will be copied, so Msg
+ * can be re-used immediately after return.
+ * Parameters:
+ * hnode: Node handle returned by node_allocate().
+ * pmsg: Location of message to be sent to the node.
+ * utimeout: Timeout in msecs to wait.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * -EPERM: Messages can't be sent to this type of node.
+ * Unable to send message.
+ * -ETIME: Timeout occurred before message could be set.
+ * -EBADR: Node is in invalid state for sending messages.
+ * Requires:
+ * node_init(void) called.
+ * pmsg != NULL.
+ * Ensures:
+ */
+extern int node_put_message(struct node_object *hnode,
+ const struct dsp_msg *pmsg, u32 utimeout);
+
+/*
+ * ======== node_register_notify ========
+ * Purpose:
+ * Register to be notified on specific events for this node.
+ * Parameters:
+ * hnode: Node handle returned by node_allocate().
+ * event_mask: Mask of types of events to be notified about.
+ * notify_type: Type of notification to be sent.
+ * hnotification: Handle to be used for notification.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * -ENOMEM: Insufficient memory on GPP.
+ * -EINVAL: event_mask is invalid.
+ * -ENOSYS: Notification type specified by notify_type is not
+ * supported.
+ * Requires:
+ * node_init(void) called.
+ * hnotification != NULL.
+ * Ensures:
+ */
+extern int node_register_notify(struct node_object *hnode,
+ u32 event_mask, u32 notify_type,
+ struct dsp_notification
+ *hnotification);
+
+/*
+ * ======== node_run ========
+ * Purpose:
+ * Start execution of a node's execute phase, or resume execution of
+ * a node that has been suspended (via node_pause()) on the DSP. Load
+ * the node's execute function if necessary.
+ * Parameters:
+ * hnode: Node object representing a node currently
+ * running on the DSP.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * -EPERM: hnode doesn't represent a message, task or dais socket node.
+ * Unable to start or resume execution.
+ * -ETIME: A timeout occurred before the DSP responded.
+ * DSP_EWRONGSTSATE: Node is not in NODE_PAUSED or NODE_CREATED state.
+ * -ESPIPE: Execute function not found in the COFF file.
+ * Requires:
+ * node_init(void) called.
+ * Ensures:
+ */
+extern int node_run(struct node_object *hnode);
+
+/*
+ * ======== node_terminate ========
+ * Purpose:
+ * Signal a node running on the DSP that it should exit its execute
+ * phase function.
+ * Parameters:
+ * hnode: Node object representing a node currently
+ * running on the DSP.
+ * pstatus: Location to store execute-phase function return
+ * value.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * -ETIME: A timeout occurred before the DSP responded.
+ * -EPERM: Type of node specified cannot be terminated.
+ * Unable to terminate the node.
+ * -EBADR: Operation not valid for the current node state.
+ * Requires:
+ * node_init(void) called.
+ * pstatus != NULL.
+ * Ensures:
+ */
+extern int node_terminate(struct node_object *hnode,
+ int *pstatus);
+
+/*
+ * ======== node_get_uuid_props ========
+ * Purpose:
+ * Fetch Node properties given the UUID
+ * Parameters:
+ *
+ */
+extern int node_get_uuid_props(void *hprocessor,
+ const struct dsp_uuid *node_uuid,
+ struct dsp_ndbprops
+ *node_props);
+
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+/**
+ * node_find_addr() - Find the closest symbol to the given address.
+ *
+ * @node_mgr: Node manager handle
+ * @sym_addr: Given address to find the closest symbol
+ * @offset_range: offset range to look fo the closest symbol
+ * @sym_addr_output: Symbol Output address
+ * @sym_name: String with the symbol name of the closest symbol
+ *
+ * This function finds the closest symbol to the address where a MMU
+ * Fault occurred on the DSP side.
+ */
+int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
+ u32 offset_range, void *sym_addr_output,
+ char *sym_name);
+
+enum node_state node_get_state(void *hnode);
+#endif
+
+#endif /* NODE_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nodedefs.h b/drivers/staging/tidspbridge/include/dspbridge/nodedefs.h
new file mode 100644
index 00000000000..fb9623d8a79
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/nodedefs.h
@@ -0,0 +1,28 @@
+/*
+ * nodedefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global NODE constants and types, shared by PROCESSOR, NODE, and DISP.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef NODEDEFS_
+#define NODEDEFS_
+
+#define NODE_SUSPENDEDPRI -1
+
+/* NODE Objects: */
+struct node_mgr;
+struct node_object;
+
+#endif /* NODEDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h b/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
new file mode 100644
index 00000000000..16b0233fc5d
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
@@ -0,0 +1,182 @@
+/*
+ * nodepriv.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Private node header shared by NODE and DISP.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef NODEPRIV_
+#define NODEPRIV_
+
+#include <dspbridge/strmdefs.h>
+#include <dspbridge/nodedefs.h>
+#include <dspbridge/nldrdefs.h>
+
+/* DSP address of node environment structure */
+typedef u32 nodeenv;
+
+/*
+ * Node create structures
+ */
+
+/* Message node */
+struct node_msgargs {
+ u32 max_msgs; /* Max # of simultaneous messages for node */
+ u32 seg_id; /* Segment for allocating message buffers */
+ u32 notify_type; /* Notify type (SEM_post, SWI_post, etc.) */
+ u32 arg_length; /* Length in 32-bit words of arg data block */
+ u8 *pdata; /* Argument data for node */
+};
+
+struct node_strmdef {
+ u32 buf_size; /* Size of buffers for SIO stream */
+ u32 num_bufs; /* max # of buffers in SIO stream at once */
+ u32 seg_id; /* Memory segment id to allocate buffers */
+ u32 utimeout; /* Timeout for blocking SIO calls */
+ u32 buf_alignment; /* Buffer alignment */
+ char *sz_device; /* Device name for stream */
+};
+
+/* Task node */
+struct node_taskargs {
+ struct node_msgargs node_msg_args;
+ s32 prio;
+ u32 stack_size;
+ u32 sys_stack_size;
+ u32 stack_seg;
+ u32 udsp_heap_res_addr; /* DSP virtual heap address */
+ u32 udsp_heap_addr; /* DSP virtual heap address */
+ u32 heap_size; /* Heap size */
+ u32 ugpp_heap_addr; /* GPP virtual heap address */
+ u32 profile_id; /* Profile ID */
+ u32 num_inputs;
+ u32 num_outputs;
+ u32 ul_dais_arg; /* Address of iAlg object */
+ struct node_strmdef *strm_in_def;
+ struct node_strmdef *strm_out_def;
+};
+
+/*
+ * ======== node_createargs ========
+ */
+struct node_createargs {
+ union {
+ struct node_msgargs node_msg_args;
+ struct node_taskargs task_arg_obj;
+ } asa;
+};
+
+/*
+ * ======== node_get_channel_id ========
+ * Purpose:
+ * Get the channel index reserved for a stream connection between the
+ * host and a node. This index is reserved when node_connect() is called
+ * to connect the node with the host. This index should be passed to
+ * the CHNL_Open function when the stream is actually opened.
+ * Parameters:
+ * hnode: Node object allocated from node_allocate().
+ * dir: Input (DSP_TONODE) or output (DSP_FROMNODE).
+ * index: Stream index.
+ * chan_id: Location to store channel index.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * -EPERM: Not a task or DAIS socket node.
+ * -EINVAL: The node's stream corresponding to index and dir
+ * is not a stream to or from the host.
+ * Requires:
+ * node_init(void) called.
+ * Valid dir.
+ * chan_id != NULL.
+ * Ensures:
+ */
+extern int node_get_channel_id(struct node_object *hnode,
+ u32 dir, u32 index, u32 *chan_id);
+
+/*
+ * ======== node_get_strm_mgr ========
+ * Purpose:
+ * Get the STRM manager for a node.
+ * Parameters:
+ * hnode: Node allocated with node_allocate().
+ * strm_man: Location to store STRM manager on output.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * Requires:
+ * strm_man != NULL.
+ * Ensures:
+ */
+extern int node_get_strm_mgr(struct node_object *hnode,
+ struct strm_mgr **strm_man);
+
+/*
+ * ======== node_get_timeout ========
+ * Purpose:
+ * Get the timeout value of a node.
+ * Parameters:
+ * hnode: Node allocated with node_allocate(), or DSP_HGPPNODE.
+ * Returns:
+ * Node's timeout value.
+ * Requires:
+ * Valid hnode.
+ * Ensures:
+ */
+extern u32 node_get_timeout(struct node_object *hnode);
+
+/*
+ * ======== node_get_type ========
+ * Purpose:
+ * Get the type (device, message, task, or XDAIS socket) of a node.
+ * Parameters:
+ * hnode: Node allocated with node_allocate(), or DSP_HGPPNODE.
+ * Returns:
+ * Node type: NODE_DEVICE, NODE_TASK, NODE_XDAIS, or NODE_GPP.
+ * Requires:
+ * Valid hnode.
+ * Ensures:
+ */
+extern enum node_type node_get_type(struct node_object *hnode);
+
+/*
+ * ======== get_node_info ========
+ * Purpose:
+ * Get node information without holding semaphore.
+ * Parameters:
+ * hnode: Node allocated with node_allocate(), or DSP_HGPPNODE.
+ * Returns:
+ * Node info: priority, device owner, no. of streams, execution state
+ * NDB properties.
+ * Requires:
+ * Valid hnode.
+ * Ensures:
+ */
+extern void get_node_info(struct node_object *hnode,
+ struct dsp_nodeinfo *node_info);
+
+/*
+ * ======== node_get_load_type ========
+ * Purpose:
+ * Get the load type (dynamic, overlay, static) of a node.
+ * Parameters:
+ * hnode: Node allocated with node_allocate(), or DSP_HGPPNODE.
+ * Returns:
+ * Node type: NLDR_DYNAMICLOAD, NLDR_OVLYLOAD, NLDR_STATICLOAD
+ * Requires:
+ * Valid hnode.
+ * Ensures:
+ */
+extern enum nldr_loadtype node_get_load_type(struct node_object *hnode);
+
+#endif /* NODEPRIV_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/ntfy.h b/drivers/staging/tidspbridge/include/dspbridge/ntfy.h
new file mode 100644
index 00000000000..cbc8819c61c
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/ntfy.h
@@ -0,0 +1,217 @@
+/*
+ * ntfy.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Manage lists of notification events.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef NTFY_
+#define NTFY_
+
+#include <dspbridge/host_os.h>
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/sync.h>
+
+/**
+ * ntfy_object - head structure to nofify dspbridge events
+ * @head: List of notify objects
+ * @ntfy_lock: lock for list access.
+ *
+ */
+struct ntfy_object {
+ struct raw_notifier_head head;/* List of notifier objects */
+ spinlock_t ntfy_lock; /* For critical sections */
+};
+
+/**
+ * ntfy_event - structure store specify event to be notified
+ * @noti_block: List of notify objects
+ * @event: event that it respond
+ * @type: event type (only DSP_SIGNALEVENT supported)
+ * @sync_obj: sync_event used to set the event
+ *
+ */
+struct ntfy_event {
+ struct notifier_block noti_block;
+ u32 event; /* Events to be notified about */
+ u32 type; /* Type of notification to be sent */
+ struct sync_object sync_obj;
+};
+
+
+/**
+ * dsp_notifier_event() - callback function to nofity events
+ * @this: pointer to itself struct notifier_block
+ * @event: event to be notified.
+ * @data: Currently not used.
+ *
+ */
+int dsp_notifier_event(struct notifier_block *this, unsigned long event,
+ void *data);
+
+/**
+ * ntfy_init() - Set the initial state of the ntfy_object structure.
+ * @no: pointer to ntfy_object structure.
+ *
+ * This function sets the initial state of the ntfy_object in order it
+ * can be used by the other ntfy functions.
+ */
+
+static inline void ntfy_init(struct ntfy_object *no)
+{
+ spin_lock_init(&no->ntfy_lock);
+ RAW_INIT_NOTIFIER_HEAD(&no->head);
+}
+
+/**
+ * ntfy_delete() - delete list of nofy events registered.
+ * @ntfy_obj: Pointer to the ntfy object structure.
+ *
+ * This function is used to remove all the notify events registered.
+ * unregister function is not needed in this function, to unregister
+ * a ntfy_event please look at ntfy_register function.
+ *
+ */
+static inline void ntfy_delete(struct ntfy_object *ntfy_obj)
+{
+ struct ntfy_event *ne;
+ struct notifier_block *nb;
+
+ spin_lock_bh(&ntfy_obj->ntfy_lock);
+ nb = ntfy_obj->head.head;
+ while (nb) {
+ ne = container_of(nb, struct ntfy_event, noti_block);
+ nb = nb->next;
+ kfree(ne);
+ }
+ spin_unlock_bh(&ntfy_obj->ntfy_lock);
+}
+
+/**
+ * ntfy_notify() - nofity all event register for an specific event.
+ * @ntfy_obj: Pointer to the ntfy_object structure.
+ * @event: event to be notified.
+ *
+ * This function traverses all the ntfy events registers and
+ * set the event with mach with @event.
+ */
+static inline void ntfy_notify(struct ntfy_object *ntfy_obj, u32 event)
+{
+ spin_lock_bh(&ntfy_obj->ntfy_lock);
+ raw_notifier_call_chain(&ntfy_obj->head, event, NULL);
+ spin_unlock_bh(&ntfy_obj->ntfy_lock);
+}
+
+
+
+/**
+ * ntfy_init() - Create and initialize a ntfy_event structure.
+ * @event: event that the ntfy event will respond
+ * @type event type (only DSP_SIGNALEVENT supported)
+ *
+ * This function create a ntfy_event element and sets the event it will
+ * respond the ntfy_event in order it can be used by the other ntfy functions.
+ * In case of success it will return a pointer to the ntfy_event struct
+ * created. Otherwise it will return NULL;
+ */
+
+static inline struct ntfy_event *ntfy_event_create(u32 event, u32 type)
+{
+ struct ntfy_event *ne;
+ ne = kmalloc(sizeof(struct ntfy_event), GFP_KERNEL);
+ if (ne) {
+ sync_init_event(&ne->sync_obj);
+ ne->noti_block.notifier_call = dsp_notifier_event;
+ ne->event = event;
+ ne->type = type;
+ }
+ return ne;
+}
+
+/**
+ * ntfy_register() - register new ntfy_event into a given ntfy_object
+ * @ntfy_obj: Pointer to the ntfy_object structure.
+ * @noti: Pointer to the handle to be returned to the user space.
+ * @event event that the ntfy event will respond
+ * @type event type (only DSP_SIGNALEVENT supported)
+ *
+ * This function register a new ntfy_event into the ntfy_object list,
+ * which will respond to the @event passed.
+ * This function will return 0 in case of error.
+ * -EFAULT in case of bad pointers and
+ * DSP_EMemory in case of no memory to create ntfy_event.
+ */
+static inline int ntfy_register(struct ntfy_object *ntfy_obj,
+ struct dsp_notification *noti,
+ u32 event, u32 type)
+{
+ struct ntfy_event *ne;
+ int status = 0;
+
+ if (!noti || !ntfy_obj) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ if (!event) {
+ status = -EINVAL;
+ goto func_end;
+ }
+ ne = ntfy_event_create(event, type);
+ if (!ne) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ noti->handle = &ne->sync_obj;
+
+ spin_lock_bh(&ntfy_obj->ntfy_lock);
+ raw_notifier_chain_register(&ntfy_obj->head, &ne->noti_block);
+ spin_unlock_bh(&ntfy_obj->ntfy_lock);
+func_end:
+ return status;
+}
+
+/**
+ * ntfy_unregister() - unregister a ntfy_event from a given ntfy_object
+ * @ntfy_obj: Pointer to the ntfy_object structure.
+ * @noti: Pointer to the event that will be removed.
+ *
+ * This function unregister a ntfy_event from the ntfy_object list,
+ * @noti contains the event which is wanted to be removed.
+ * This function will return 0 in case of error.
+ * -EFAULT in case of bad pointers and
+ * DSP_EMemory in case of no memory to create ntfy_event.
+ */
+static inline int ntfy_unregister(struct ntfy_object *ntfy_obj,
+ struct dsp_notification *noti)
+{
+ int status = 0;
+ struct ntfy_event *ne;
+
+ if (!noti || !ntfy_obj) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ ne = container_of((struct sync_object *)noti, struct ntfy_event,
+ sync_obj);
+ spin_lock_bh(&ntfy_obj->ntfy_lock);
+ raw_notifier_chain_unregister(&ntfy_obj->head,
+ &ne->noti_block);
+ kfree(ne);
+ spin_unlock_bh(&ntfy_obj->ntfy_lock);
+func_end:
+ return status;
+}
+
+#endif /* NTFY_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h
new file mode 100644
index 00000000000..5e09fd165d9
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h
@@ -0,0 +1,621 @@
+/*
+ * proc.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * This is the DSP API RM module interface.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef PROC_
+#define PROC_
+
+#include <dspbridge/cfgdefs.h>
+#include <dspbridge/devdefs.h>
+#include <dspbridge/drv.h>
+
+extern char *iva_img;
+
+/*
+ * ======== proc_attach ========
+ * Purpose:
+ * Prepare for communication with a particular DSP processor, and return
+ * a handle to the processor object. The PROC Object gets created
+ * Parameters:
+ * processor_id : The processor index (zero-based).
+ * hmgr_obj : Handle to the Manager Object
+ * attr_in : Ptr to the dsp_processorattrin structure.
+ * A NULL value means use default values.
+ * ph_processor : Ptr to location to store processor handle.
+ * Returns:
+ * 0 : Success.
+ * -EPERM : General failure.
+ * -EFAULT : Invalid processor handle.
+ * 0: Success; Processor already attached.
+ * Requires:
+ * ph_processor != NULL.
+ * PROC Initialized.
+ * Ensures:
+ * -EPERM, and *ph_processor == NULL, OR
+ * Success and *ph_processor is a Valid Processor handle OR
+ * 0 and *ph_processor is a Valid Processor.
+ * Details:
+ * When attr_in is NULL, the default timeout value is 10 seconds.
+ */
+extern int proc_attach(u32 processor_id,
+ const struct dsp_processorattrin
+ *attr_in, void **ph_processor,
+ struct process_context *pr_ctxt);
+
+/*
+ * ======== proc_auto_start =========
+ * Purpose:
+ * A Particular device gets loaded with the default image
+ * if the AutoStart flag is set.
+ * Parameters:
+ * hdev_obj : Handle to the Device
+ * Returns:
+ * 0 : On Successful Loading
+ * -ENOENT : No DSP exec file found.
+ * -EPERM : General Failure
+ * Requires:
+ * hdev_obj != NULL.
+ * dev_node_obj != NULL.
+ * PROC Initialized.
+ * Ensures:
+ */
+extern int proc_auto_start(struct cfg_devnode *dev_node_obj,
+ struct dev_object *hdev_obj);
+
+/*
+ * ======== proc_ctrl ========
+ * Purpose:
+ * Pass control information to the GPP device driver managing the DSP
+ * processor. This will be an OEM-only function, and not part of the
+ * 'Bridge application developer's API.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * dw_cmd : Private driver IOCTL cmd ID.
+ * pargs : Ptr to an driver defined argument structure.
+ * Returns:
+ * 0 : SUCCESS
+ * -EFAULT : Invalid processor handle.
+ * -ETIME: A Timeout Occured before the Control information
+ * could be sent.
+ * -EPERM : General Failure.
+ * Requires:
+ * PROC Initialized.
+ * Ensures
+ * Details:
+ * This function Calls bridge_dev_ctrl.
+ */
+extern int proc_ctrl(void *hprocessor,
+ u32 dw_cmd, struct dsp_cbdata *arg);
+
+/*
+ * ======== proc_detach ========
+ * Purpose:
+ * Close a DSP processor and de-allocate all (GPP) resources reserved
+ * for it. The Processor Object is deleted.
+ * Parameters:
+ * pr_ctxt : The processor handle.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : InValid Handle.
+ * -EPERM : General failure.
+ * Requires:
+ * PROC Initialized.
+ * Ensures:
+ * PROC Object is destroyed.
+ */
+extern int proc_detach(struct process_context *pr_ctxt);
+
+/*
+ * ======== proc_enum_nodes ========
+ * Purpose:
+ * Enumerate the nodes currently allocated on a processor.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * node_tab : The first Location of an array allocated for node
+ * handles.
+ * node_tab_size: The number of (DSP_HNODE) handles that can be held
+ * to the memory the client has allocated for node_tab
+ * pu_num_nodes : Location where DSPProcessor_EnumNodes will return
+ * the number of valid handles written to node_tab
+ * pu_allocated : Location where DSPProcessor_EnumNodes will return
+ * the number of nodes that are allocated on the DSP.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EINVAL : The amount of memory allocated for node_tab is
+ * insufficent. That is the number of nodes actually
+ * allocated on the DSP is greater than the value
+ * specified for node_tab_size.
+ * -EPERM : Unable to get Resource Information.
+ * Details:
+ * Requires
+ * pu_num_nodes is not NULL.
+ * pu_allocated is not NULL.
+ * node_tab is not NULL.
+ * PROC Initialized.
+ * Ensures:
+ * Details:
+ */
+extern int proc_enum_nodes(void *hprocessor,
+ void **node_tab,
+ u32 node_tab_size,
+ u32 *pu_num_nodes,
+ u32 *pu_allocated);
+
+/*
+ * ======== proc_get_resource_info ========
+ * Purpose:
+ * Enumerate the resources currently available on a processor.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * resource_type: Type of resource .
+ * resource_info: Ptr to the dsp_resourceinfo structure.
+ * resource_info_size: Size of the structure.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EBADR: The processor is not in the PROC_RUNNING state.
+ * -ETIME: A timeout occured before the DSP responded to the
+ * querry.
+ * -EPERM : Unable to get Resource Information
+ * Requires:
+ * resource_info is not NULL.
+ * Parameter resource_type is Valid.[TBD]
+ * resource_info_size is >= sizeof dsp_resourceinfo struct.
+ * PROC Initialized.
+ * Ensures:
+ * Details:
+ * This function currently returns
+ * -ENOSYS, and does not write any data to the resource_info struct.
+ */
+extern int proc_get_resource_info(void *hprocessor,
+ u32 resource_type,
+ struct dsp_resourceinfo
+ *resource_info,
+ u32 resource_info_size);
+
+/*
+ * ======== proc_exit ========
+ * Purpose:
+ * Decrement reference count, and free resources when reference count is
+ * 0.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * PROC is initialized.
+ * Ensures:
+ * When reference count == 0, PROC's private resources are freed.
+ */
+extern void proc_exit(void);
+
+/*
+ * ======== proc_get_dev_object =========
+ * Purpose:
+ * Returns the DEV Hanlde for a given Processor handle
+ * Parameters:
+ * hprocessor : Processor Handle
+ * device_obj : Location to store the DEV Handle.
+ * Returns:
+ * 0 : Success; *device_obj has Dev handle
+ * -EPERM : Failure; *device_obj is zero.
+ * Requires:
+ * device_obj is not NULL
+ * PROC Initialized.
+ * Ensures:
+ * 0 : *device_obj is not NULL
+ * -EPERM : *device_obj is NULL.
+ */
+extern int proc_get_dev_object(void *hprocessor,
+ struct dev_object **device_obj);
+
+/*
+ * ======== proc_init ========
+ * Purpose:
+ * Initialize PROC's private state, keeping a reference count on each
+ * call.
+ * Parameters:
+ * Returns:
+ * TRUE if initialized; FALSE if error occured.
+ * Requires:
+ * Ensures:
+ * TRUE: A requirement for the other public PROC functions.
+ */
+extern bool proc_init(void);
+
+/*
+ * ======== proc_get_state ========
+ * Purpose:
+ * Report the state of the specified DSP processor.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * proc_state_obj : Ptr to location to store the dsp_processorstate
+ * structure.
+ * state_info_size: Size of dsp_processorstate.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EPERM : General failure while querying processor state.
+ * Requires:
+ * proc_state_obj is not NULL
+ * state_info_size is >= than the size of dsp_processorstate structure.
+ * PROC Initialized.
+ * Ensures:
+ * Details:
+ */
+extern int proc_get_state(void *hprocessor, struct dsp_processorstate
+ *proc_state_obj, u32 state_info_size);
+
+/*
+ * ======== PROC_GetProcessorID ========
+ * Purpose:
+ * Report the state of the specified DSP processor.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * proc_id : Processor ID
+ *
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EPERM : General failure while querying processor state.
+ * Requires:
+ * proc_state_obj is not NULL
+ * state_info_size is >= than the size of dsp_processorstate structure.
+ * PROC Initialized.
+ * Ensures:
+ * Details:
+ */
+extern int proc_get_processor_id(void *proc, u32 * proc_id);
+
+/*
+ * ======== proc_get_trace ========
+ * Purpose:
+ * Retrieve the trace buffer from the specified DSP processor.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * pbuf : Ptr to buffer to hold trace output.
+ * max_size : Maximum size of the output buffer.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EPERM : General failure while retireving processor trace
+ * Buffer.
+ * Requires:
+ * pbuf is not NULL
+ * max_size is > 0.
+ * PROC Initialized.
+ * Ensures:
+ * Details:
+ */
+extern int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size);
+
+/*
+ * ======== proc_load ========
+ * Purpose:
+ * Reset a processor and load a new base program image.
+ * This will be an OEM-only function.
+ * Parameters:
+ * hprocessor: The processor handle.
+ * argc_index: The number of Arguments(strings)in the aArgV[]
+ * user_args: An Array of Arguments(Unicode Strings)
+ * user_envp: An Array of Environment settings(Unicode Strings)
+ * Returns:
+ * 0: Success.
+ * -ENOENT: The DSP Execuetable was not found.
+ * -EFAULT: Invalid processor handle.
+ * -EPERM : Unable to Load the Processor
+ * Requires:
+ * user_args is not NULL
+ * argc_index is > 0
+ * PROC Initialized.
+ * Ensures:
+ * Success and ProcState == PROC_LOADED
+ * or DSP_FAILED status.
+ * Details:
+ * Does not implement access rights to control which GPP application
+ * can load the processor.
+ */
+extern int proc_load(void *hprocessor,
+ const s32 argc_index, const char **user_args,
+ const char **user_envp);
+
+/*
+ * ======== proc_register_notify ========
+ * Purpose:
+ * Register to be notified of specific processor events
+ * Parameters:
+ * hprocessor : The processor handle.
+ * event_mask : Mask of types of events to be notified about.
+ * notify_type : Type of notification to be sent.
+ * hnotification: Handle to be used for notification.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle or hnotification.
+ * -EINVAL : Parameter event_mask is Invalid
+ * DSP_ENOTIMP : The notification type specified in uNotifyMask
+ * is not supported.
+ * -EPERM : Unable to register for notification.
+ * Requires:
+ * hnotification is not NULL
+ * PROC Initialized.
+ * Ensures:
+ * Details:
+ */
+extern int proc_register_notify(void *hprocessor,
+ u32 event_mask, u32 notify_type,
+ struct dsp_notification
+ *hnotification);
+
+/*
+ * ======== proc_notify_clients ========
+ * Purpose:
+ * Notify the Processor Clients
+ * Parameters:
+ * proc : The processor handle.
+ * events : Event to be notified about.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EPERM : Failure to Set or Reset the Event
+ * Requires:
+ * events is Supported or Valid type of Event
+ * proc is a valid handle
+ * PROC Initialized.
+ * Ensures:
+ */
+extern int proc_notify_clients(void *proc, u32 events);
+
+/*
+ * ======== proc_notify_all_clients ========
+ * Purpose:
+ * Notify the Processor Clients
+ * Parameters:
+ * proc : The processor handle.
+ * events : Event to be notified about.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EPERM : Failure to Set or Reset the Event
+ * Requires:
+ * events is Supported or Valid type of Event
+ * proc is a valid handle
+ * PROC Initialized.
+ * Ensures:
+ * Details:
+ * NODE And STRM would use this function to notify their clients
+ * about the state changes in NODE or STRM.
+ */
+extern int proc_notify_all_clients(void *proc, u32 events);
+
+/*
+ * ======== proc_start ========
+ * Purpose:
+ * Start a processor running.
+ * Processor must be in PROC_LOADED state.
+ * This will be an OEM-only function, and not part of the 'Bridge
+ * application developer's API.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EBADR: Processor is not in PROC_LOADED state.
+ * -EPERM : Unable to start the processor.
+ * Requires:
+ * PROC Initialized.
+ * Ensures:
+ * Success and ProcState == PROC_RUNNING or DSP_FAILED status.
+ * Details:
+ */
+extern int proc_start(void *hprocessor);
+
+/*
+ * ======== proc_stop ========
+ * Purpose:
+ * Start a processor running.
+ * Processor must be in PROC_LOADED state.
+ * This will be an OEM-only function, and not part of the 'Bridge
+ * application developer's API.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EBADR: Processor is not in PROC_LOADED state.
+ * -EPERM : Unable to start the processor.
+ * Requires:
+ * PROC Initialized.
+ * Ensures:
+ * Success and ProcState == PROC_RUNNING or DSP_FAILED status.
+ * Details:
+ */
+extern int proc_stop(void *hprocessor);
+
+/*
+ * ======== proc_end_dma ========
+ * Purpose:
+ * Begin a DMA transfer
+ * Parameters:
+ * hprocessor : The processor handle.
+ * pmpu_addr : Buffer start address
+ * ul_size : Buffer size
+ * dir : The direction of the transfer
+ * Requires:
+ * Memory was previously mapped.
+ */
+extern int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
+ enum dma_data_direction dir);
+/*
+ * ======== proc_begin_dma ========
+ * Purpose:
+ * Begin a DMA transfer
+ * Parameters:
+ * hprocessor : The processor handle.
+ * pmpu_addr : Buffer start address
+ * ul_size : Buffer size
+ * dir : The direction of the transfer
+ * Requires:
+ * Memory was previously mapped.
+ */
+extern int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
+ enum dma_data_direction dir);
+
+/*
+ * ======== proc_flush_memory ========
+ * Purpose:
+ * Flushes a buffer from the MPU data cache.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * pmpu_addr : Buffer start address
+ * ul_size : Buffer size
+ * ul_flags : Reserved.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EPERM : General failure.
+ * Requires:
+ * PROC Initialized.
+ * Ensures:
+ * Details:
+ * All the arguments are currently ignored.
+ */
+extern int proc_flush_memory(void *hprocessor,
+ void *pmpu_addr, u32 ul_size, u32 ul_flags);
+
+/*
+ * ======== proc_invalidate_memory ========
+ * Purpose:
+ * Invalidates a buffer from the MPU data cache.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * pmpu_addr : Buffer start address
+ * ul_size : Buffer size
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EPERM : General failure.
+ * Requires:
+ * PROC Initialized.
+ * Ensures:
+ * Details:
+ * All the arguments are currently ignored.
+ */
+extern int proc_invalidate_memory(void *hprocessor,
+ void *pmpu_addr, u32 ul_size);
+
+/*
+ * ======== proc_map ========
+ * Purpose:
+ * Maps a MPU buffer to DSP address space.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * pmpu_addr : Starting address of the memory region to map.
+ * ul_size : Size of the memory region to map.
+ * req_addr : Requested DSP start address. Offset-adjusted actual
+ * mapped address is in the last argument.
+ * pp_map_addr : Ptr to DSP side mapped u8 address.
+ * ul_map_attr : Optional endianness attributes, virt to phys flag.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EPERM : General failure.
+ * -ENOMEM : MPU side memory allocation error.
+ * -ENOENT : Cannot find a reserved region starting with this
+ * : address.
+ * Requires:
+ * pmpu_addr is not NULL
+ * ul_size is not zero
+ * pp_map_addr is not NULL
+ * PROC Initialized.
+ * Ensures:
+ * Details:
+ */
+extern int proc_map(void *hprocessor,
+ void *pmpu_addr,
+ u32 ul_size,
+ void *req_addr,
+ void **pp_map_addr, u32 ul_map_attr,
+ struct process_context *pr_ctxt);
+
+/*
+ * ======== proc_reserve_memory ========
+ * Purpose:
+ * Reserve a virtually contiguous region of DSP address space.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * ul_size : Size of the address space to reserve.
+ * pp_rsv_addr : Ptr to DSP side reserved u8 address.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EPERM : General failure.
+ * -ENOMEM : Cannot reserve chunk of this size.
+ * Requires:
+ * pp_rsv_addr is not NULL
+ * PROC Initialized.
+ * Ensures:
+ * Details:
+ */
+extern int proc_reserve_memory(void *hprocessor,
+ u32 ul_size, void **pp_rsv_addr,
+ struct process_context *pr_ctxt);
+
+/*
+ * ======== proc_un_map ========
+ * Purpose:
+ * Removes a MPU buffer mapping from the DSP address space.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * map_addr : Starting address of the mapped memory region.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EPERM : General failure.
+ * -ENOENT : Cannot find a mapped region starting with this
+ * : address.
+ * Requires:
+ * map_addr is not NULL
+ * PROC Initialized.
+ * Ensures:
+ * Details:
+ */
+extern int proc_un_map(void *hprocessor, void *map_addr,
+ struct process_context *pr_ctxt);
+
+/*
+ * ======== proc_un_reserve_memory ========
+ * Purpose:
+ * Frees a previously reserved region of DSP address space.
+ * Parameters:
+ * hprocessor : The processor handle.
+ * prsv_addr : Ptr to DSP side reservedBYTE address.
+ * Returns:
+ * 0 : Success.
+ * -EFAULT : Invalid processor handle.
+ * -EPERM : General failure.
+ * -ENOENT : Cannot find a reserved region starting with this
+ * : address.
+ * Requires:
+ * prsv_addr is not NULL
+ * PROC Initialized.
+ * Ensures:
+ * Details:
+ */
+extern int proc_un_reserve_memory(void *hprocessor,
+ void *prsv_addr,
+ struct process_context *pr_ctxt);
+
+#endif /* PROC_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/procpriv.h b/drivers/staging/tidspbridge/include/dspbridge/procpriv.h
new file mode 100644
index 00000000000..77d1f0ef95c
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/procpriv.h
@@ -0,0 +1,25 @@
+/*
+ * procpriv.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global PROC constants and types, shared by PROC, MGR and DSP API.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef PROCPRIV_
+#define PROCPRIV_
+
+/* RM PROC Object */
+struct proc_object;
+
+#endif /* PROCPRIV_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/pwr.h b/drivers/staging/tidspbridge/include/dspbridge/pwr.h
new file mode 100644
index 00000000000..a6dc783904e
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/pwr.h
@@ -0,0 +1,107 @@
+/*
+ * pwr.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef PWR_
+#define PWR_
+
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/pwr_sh.h>
+
+/*
+ * ======== pwr_sleep_dsp ========
+ * Signal the DSP to go to sleep.
+ *
+ * Parameters:
+ * sleep_code: New sleep state for DSP. (Initially, valid codes
+ * are PWR_DEEPSLEEP or PWR_EMERGENCYDEEPSLEEP; both of
+ * these codes will simply put the DSP in deep sleep.)
+ *
+ * timeout: Maximum time (msec) that PWR should wait for
+ * confirmation that the DSP sleep state has been
+ * reached. If PWR should simply send the command to
+ * the DSP to go to sleep and then return (i.e.,
+ * asynchrounous sleep), the timeout should be
+ * specified as zero.
+ *
+ * Returns:
+ * 0: Success.
+ * 0: Success, but the DSP was already asleep.
+ * -EINVAL: The specified sleep_code is not supported.
+ * -ETIME: A timeout occured while waiting for DSP sleep
+ * confirmation.
+ * -EPERM: General failure, unable to send sleep command to
+ * the DSP.
+ */
+extern int pwr_sleep_dsp(const u32 sleep_code, const u32 timeout);
+
+/*
+ * ======== pwr_wake_dsp ========
+ * Signal the DSP to wake from sleep.
+ *
+ * Parameters:
+ * timeout: Maximum time (msec) that PWR should wait for
+ * confirmation that the DSP is awake. If PWR should
+ * simply send a command to the DSP to wake and then
+ * return (i.e., asynchrounous wake), timeout should
+ * be specified as zero.
+ *
+ * Returns:
+ * 0: Success.
+ * 0: Success, but the DSP was already awake.
+ * -ETIME: A timeout occured while waiting for wake
+ * confirmation.
+ * -EPERM: General failure, unable to send wake command to
+ * the DSP.
+ */
+extern int pwr_wake_dsp(const u32 timeout);
+
+/*
+ * ======== pwr_pm_pre_scale ========
+ * Prescale notification to DSP.
+ *
+ * Parameters:
+ * voltage_domain: The voltage domain for which notification is sent
+ * level: The level of voltage domain
+ *
+ * Returns:
+ * 0: Success.
+ * 0: Success, but the DSP was already awake.
+ * -ETIME: A timeout occured while waiting for wake
+ * confirmation.
+ * -EPERM: General failure, unable to send wake command to
+ * the DSP.
+ */
+extern int pwr_pm_pre_scale(u16 voltage_domain, u32 level);
+
+/*
+ * ======== pwr_pm_post_scale ========
+ * PostScale notification to DSP.
+ *
+ * Parameters:
+ * voltage_domain: The voltage domain for which notification is sent
+ * level: The level of voltage domain
+ *
+ * Returns:
+ * 0: Success.
+ * 0: Success, but the DSP was already awake.
+ * -ETIME: A timeout occured while waiting for wake
+ * confirmation.
+ * -EPERM: General failure, unable to send wake command to
+ * the DSP.
+ */
+extern int pwr_pm_post_scale(u16 voltage_domain, u32 level);
+
+#endif /* PWR_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/pwr_sh.h b/drivers/staging/tidspbridge/include/dspbridge/pwr_sh.h
new file mode 100644
index 00000000000..1b4a090abe7
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/pwr_sh.h
@@ -0,0 +1,33 @@
+/*
+ * pwr_sh.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Power Manager shared definitions (used on both GPP and DSP sides).
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef PWR_SH_
+#define PWR_SH_
+
+#include <dspbridge/mbx_sh.h>
+
+/* valid sleep command codes that can be sent by GPP via mailbox: */
+#define PWR_DEEPSLEEP MBX_PM_DSPIDLE
+#define PWR_EMERGENCYDEEPSLEEP MBX_PM_EMERGENCYSLEEP
+#define PWR_SLEEPUNTILRESTART MBX_PM_SLEEPUNTILRESTART
+#define PWR_WAKEUP MBX_PM_DSPWAKEUP
+#define PWR_AUTOENABLE MBX_PM_PWRENABLE
+#define PWR_AUTODISABLE MBX_PM_PWRDISABLE
+#define PWR_RETENTION MBX_PM_DSPRETN
+
+#endif /* PWR_SH_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h b/drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h
new file mode 100644
index 00000000000..dfaf0c6c06f
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h
@@ -0,0 +1,52 @@
+/*
+ * resourcecleanup.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <dspbridge/nodepriv.h>
+#include <dspbridge/drv.h>
+
+extern int drv_get_proc_ctxt_list(struct process_context **pctxt,
+ struct drv_object *hdrv_obj);
+
+extern int drv_insert_proc_context(struct drv_object *driver_obj,
+ void *process_ctxt);
+
+extern int drv_remove_all_dmm_res_elements(void *process_ctxt);
+
+extern int drv_remove_all_node_res_elements(void *process_ctxt);
+
+extern int drv_proc_set_pid(void *ctxt, s32 process);
+
+extern int drv_remove_all_resources(void *process_ctxt);
+
+extern int drv_remove_proc_context(struct drv_object *driver_obj,
+ void *pr_ctxt);
+
+extern int drv_insert_node_res_element(void *hnode, void *node_resource,
+ void *process_ctxt);
+
+extern void drv_proc_node_update_heap_status(void *node_resource, s32 status);
+
+extern void drv_proc_node_update_status(void *node_resource, s32 status);
+
+extern int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources);
+
+extern int drv_proc_insert_strm_res_element(void *stream_obj,
+ void *strm_res,
+ void *process_ctxt);
+
+extern int drv_remove_all_strm_res_elements(void *process_ctxt);
+
+extern enum node_state node_get_state(void *hnode);
diff --git a/drivers/staging/tidspbridge/include/dspbridge/rmm.h b/drivers/staging/tidspbridge/include/dspbridge/rmm.h
new file mode 100644
index 00000000000..baea536681e
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/rmm.h
@@ -0,0 +1,181 @@
+/*
+ * rmm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * This memory manager provides general heap management and arbitrary
+ * alignment for any number of memory segments, and management of overlay
+ * memory.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef RMM_
+#define RMM_
+
+/*
+ * ======== rmm_addr ========
+ * DSP address + segid
+ */
+struct rmm_addr {
+ u32 addr;
+ s32 segid;
+};
+
+/*
+ * ======== rmm_segment ========
+ * Memory segment on the DSP available for remote allocations.
+ */
+struct rmm_segment {
+ u32 base; /* Base of the segment */
+ u32 length; /* Size of the segment (target MAUs) */
+ s32 space; /* Code or data */
+ u32 number; /* Number of Allocated Blocks */
+};
+
+/*
+ * ======== RMM_Target ========
+ */
+struct rmm_target_obj;
+
+/*
+ * ======== rmm_alloc ========
+ *
+ * rmm_alloc is used to remotely allocate or reserve memory on the DSP.
+ *
+ * Parameters:
+ * target - Target returned from rmm_create().
+ * segid - Memory segment to allocate from.
+ * size - Size (target MAUS) to allocate.
+ * align - alignment.
+ * dsp_address - If reserve is FALSE, the location to store allocated
+ * address on output, otherwise, the DSP address to
+ * reserve.
+ * reserve - If TRUE, reserve the memory specified by dsp_address.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Memory allocation on GPP failed.
+ * -ENXIO: Cannot "allocate" overlay memory because it's
+ * already in use.
+ * Requires:
+ * RMM initialized.
+ * Valid target.
+ * dsp_address != NULL.
+ * size > 0
+ * reserve || target->num_segs > 0.
+ * Ensures:
+ */
+extern int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
+ u32 align, u32 *dsp_address, bool reserve);
+
+/*
+ * ======== rmm_create ========
+ * Create a target object with memory segments for remote allocation. If
+ * seg_tab == NULL or num_segs == 0, memory can only be reserved through
+ * rmm_alloc().
+ *
+ * Parameters:
+ * target_obj: - Location to store target on output.
+ * seg_tab: - Table of memory segments.
+ * num_segs: - Number of memory segments.
+ * Returns:
+ * 0: Success.
+ * -ENOMEM: Memory allocation failed.
+ * Requires:
+ * RMM initialized.
+ * target_obj != NULL.
+ * num_segs == 0 || seg_tab != NULL.
+ * Ensures:
+ * Success: Valid *target_obj.
+ * Failure: *target_obj == NULL.
+ */
+extern int rmm_create(struct rmm_target_obj **target_obj,
+ struct rmm_segment seg_tab[], u32 num_segs);
+
+/*
+ * ======== rmm_delete ========
+ * Delete target allocated in rmm_create().
+ *
+ * Parameters:
+ * target - Target returned from rmm_create().
+ * Returns:
+ * Requires:
+ * RMM initialized.
+ * Valid target.
+ * Ensures:
+ */
+extern void rmm_delete(struct rmm_target_obj *target);
+
+/*
+ * ======== rmm_exit ========
+ * Exit the RMM module
+ *
+ * Parameters:
+ * Returns:
+ * Requires:
+ * rmm_init successfully called.
+ * Ensures:
+ */
+extern void rmm_exit(void);
+
+/*
+ * ======== rmm_free ========
+ * Free or unreserve memory allocated through rmm_alloc().
+ *
+ * Parameters:
+ * target: - Target returned from rmm_create().
+ * segid: - Segment of memory to free.
+ * dsp_address: - Address to free or unreserve.
+ * size: - Size of memory to free or unreserve.
+ * reserved: - TRUE if memory was reserved only, otherwise FALSE.
+ * Returns:
+ * Requires:
+ * RMM initialized.
+ * Valid target.
+ * reserved || segid < target->num_segs.
+ * reserve || [dsp_address, dsp_address + size] is a valid memory range.
+ * Ensures:
+ */
+extern bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr,
+ u32 size, bool reserved);
+
+/*
+ * ======== rmm_init ========
+ * Initialize the RMM module
+ *
+ * Parameters:
+ * Returns:
+ * TRUE: Success.
+ * FALSE: Failure.
+ * Requires:
+ * Ensures:
+ */
+extern bool rmm_init(void);
+
+/*
+ * ======== rmm_stat ========
+ * Obtain memory segment status
+ *
+ * Parameters:
+ * segid: Segment ID of the dynamic loading segment.
+ * mem_stat_buf: Pointer to allocated buffer into which memory stats are
+ * placed.
+ * Returns:
+ * TRUE: Success.
+ * FALSE: Failure.
+ * Requires:
+ * segid < target->num_segs
+ * Ensures:
+ */
+extern bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
+ struct dsp_memstat *mem_stat_buf);
+
+#endif /* RMM_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/rms_sh.h b/drivers/staging/tidspbridge/include/dspbridge/rms_sh.h
new file mode 100644
index 00000000000..7bc5574342a
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/rms_sh.h
@@ -0,0 +1,95 @@
+/*
+ * rms_sh.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge Resource Manager Server shared definitions (used on both
+ * GPP and DSP sides).
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef RMS_SH_
+#define RMS_SH_
+
+#include <dspbridge/rmstypes.h>
+
+/* Node Types: */
+#define RMS_TASK 1 /* Task node */
+#define RMS_DAIS 2 /* xDAIS socket node */
+#define RMS_MSG 3 /* Message node */
+
+/* Memory Types: */
+#define RMS_CODE 0 /* Program space */
+#define RMS_DATA 1 /* Data space */
+#define RMS_IO 2 /* I/O space */
+
+/* RM Server Command and Response Buffer Sizes: */
+#define RMS_COMMANDBUFSIZE 256 /* Size of command buffer */
+#define RMS_RESPONSEBUFSIZE 16 /* Size of response buffer */
+
+/* Pre-Defined Command/Response Codes: */
+#define RMS_EXIT 0x80000000 /* GPP->Node: shutdown */
+#define RMS_EXITACK 0x40000000 /* Node->GPP: ack shutdown */
+#define RMS_BUFDESC 0x20000000 /* Arg1 SM buf, Arg2 SM size */
+#define RMS_KILLTASK 0x10000000 /* GPP->Node: Kill Task */
+#define RMS_USER 0x0 /* Start of user-defined msg codes */
+#define RMS_MAXUSERCODES 0xfff /* Maximum user defined C/R Codes */
+
+/* RM Server RPC Command Structure: */
+struct rms_command {
+ rms_word fxn; /* Server function address */
+ rms_word arg1; /* First argument */
+ rms_word arg2; /* Second argument */
+ rms_word data; /* Function-specific data array */
+};
+
+/*
+ * The rms_strm_def structure defines the parameters for both input and output
+ * streams, and is passed to a node's create function.
+ */
+struct rms_strm_def {
+ rms_word bufsize; /* Buffer size (in DSP words) */
+ rms_word nbufs; /* Max number of bufs in stream */
+ rms_word segid; /* Segment to allocate buffers */
+ rms_word align; /* Alignment for allocated buffers */
+ rms_word timeout; /* Timeout (msec) for blocking calls */
+ char name[1]; /* Device Name (terminated by '\0') */
+};
+
+/* Message node create args structure: */
+struct rms_msg_args {
+ rms_word max_msgs; /* Max # simultaneous msgs to node */
+ rms_word segid; /* Mem segment for NODE_allocMsgBuf */
+ rms_word notify_type; /* Type of message notification */
+ rms_word arg_length; /* Length (in DSP chars) of arg data */
+ rms_word arg_data; /* Arg data for node */
+};
+
+/* Partial task create args structure */
+struct rms_more_task_args {
+ rms_word priority; /* Task's runtime priority level */
+ rms_word stack_size; /* Task's stack size */
+ rms_word sysstack_size; /* Task's system stack size (55x) */
+ rms_word stack_seg; /* Memory segment for task's stack */
+ rms_word heap_addr; /* base address of the node memory heap in
+ * external memory (DSP virtual address) */
+ rms_word heap_size; /* size in MAUs of the node memory heap in
+ * external memory */
+ rms_word misc; /* Misc field. Not used for 'normal'
+ * task nodes; for xDAIS socket nodes
+ * specifies the IALG_Fxn pointer.
+ */
+ /* # input STRM definition structures */
+ rms_word num_input_streams;
+};
+
+#endif /* RMS_SH_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/rmstypes.h b/drivers/staging/tidspbridge/include/dspbridge/rmstypes.h
new file mode 100644
index 00000000000..83c0f1d9619
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/rmstypes.h
@@ -0,0 +1,24 @@
+/*
+ * rmstypes.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge Resource Manager Server shared data type definitions.
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef RMSTYPES_
+#define RMSTYPES_
+#include <linux/types.h>
+typedef u32 rms_word;
+
+#endif /* RMSTYPES_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/services.h b/drivers/staging/tidspbridge/include/dspbridge/services.h
new file mode 100644
index 00000000000..eb26c867c93
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/services.h
@@ -0,0 +1,50 @@
+/*
+ * services.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Provide loading and unloading of SERVICES modules.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef SERVICES_
+#define SERVICES_
+
+#include <dspbridge/host_os.h>
+/*
+ * ======== services_exit ========
+ * Purpose:
+ * Discontinue usage of module; free resources when reference count
+ * reaches 0.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * SERVICES initialized.
+ * Ensures:
+ * Resources used by module are freed when cRef reaches zero.
+ */
+extern void services_exit(void);
+
+/*
+ * ======== services_init ========
+ * Purpose:
+ * Initializes SERVICES modules.
+ * Parameters:
+ * Returns:
+ * TRUE if all modules initialized; otherwise FALSE.
+ * Requires:
+ * Ensures:
+ * SERVICES modules initialized.
+ */
+extern bool services_init(void);
+
+#endif /* SERVICES_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/strm.h b/drivers/staging/tidspbridge/include/dspbridge/strm.h
new file mode 100644
index 00000000000..3e4671e7f91
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/strm.h
@@ -0,0 +1,404 @@
+/*
+ * strm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSPBridge Stream Manager.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef STRM_
+#define STRM_
+
+#include <dspbridge/dev.h>
+
+#include <dspbridge/strmdefs.h>
+#include <dspbridge/proc.h>
+
+/*
+ * ======== strm_allocate_buffer ========
+ * Purpose:
+ * Allocate data buffer(s) for use with a stream.
+ * Parameter:
+ * strmres: Stream resource info handle returned from strm_open().
+ * usize: Size (GPP bytes) of the buffer(s).
+ * num_bufs: Number of buffers to allocate.
+ * ap_buffer: Array to hold buffer addresses.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid stream_obj.
+ * -ENOMEM: Insufficient memory.
+ * -EPERM: Failure occurred, unable to allocate buffers.
+ * -EINVAL: usize must be > 0 bytes.
+ * Requires:
+ * strm_init(void) called.
+ * ap_buffer != NULL.
+ * Ensures:
+ */
+extern int strm_allocate_buffer(struct strm_res_object *strmres,
+ u32 usize,
+ u8 **ap_buffer,
+ u32 num_bufs,
+ struct process_context *pr_ctxt);
+
+/*
+ * ======== strm_close ========
+ * Purpose:
+ * Close a stream opened with strm_open().
+ * Parameter:
+ * strmres: Stream resource info handle returned from strm_open().
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid stream_obj.
+ * -EPIPE: Some data buffers issued to the stream have not
+ * been reclaimed.
+ * -EPERM: Failure to close stream.
+ * Requires:
+ * strm_init(void) called.
+ * Ensures:
+ */
+extern int strm_close(struct strm_res_object *strmres,
+ struct process_context *pr_ctxt);
+
+/*
+ * ======== strm_create ========
+ * Purpose:
+ * Create a STRM manager object. This object holds information about the
+ * device needed to open streams.
+ * Parameters:
+ * strm_man: Location to store handle to STRM manager object on
+ * output.
+ * dev_obj: Device for this processor.
+ * Returns:
+ * 0: Success;
+ * -ENOMEM: Insufficient memory for requested resources.
+ * -EPERM: General failure.
+ * Requires:
+ * strm_init(void) called.
+ * strm_man != NULL.
+ * dev_obj != NULL.
+ * Ensures:
+ * 0: Valid *strm_man.
+ * error: *strm_man == NULL.
+ */
+extern int strm_create(struct strm_mgr **strm_man,
+ struct dev_object *dev_obj);
+
+/*
+ * ======== strm_delete ========
+ * Purpose:
+ * Delete the STRM Object.
+ * Parameters:
+ * strm_mgr_obj: Handle to STRM manager object from strm_create.
+ * Returns:
+ * Requires:
+ * strm_init(void) called.
+ * Valid strm_mgr_obj.
+ * Ensures:
+ * strm_mgr_obj is not valid.
+ */
+extern void strm_delete(struct strm_mgr *strm_mgr_obj);
+
+/*
+ * ======== strm_exit ========
+ * Purpose:
+ * Discontinue usage of STRM module.
+ * Parameters:
+ * Returns:
+ * Requires:
+ * strm_init(void) successfully called before.
+ * Ensures:
+ */
+extern void strm_exit(void);
+
+/*
+ * ======== strm_free_buffer ========
+ * Purpose:
+ * Free buffer(s) allocated with strm_allocate_buffer.
+ * Parameter:
+ * strmres: Stream resource info handle returned from strm_open().
+ * ap_buffer: Array containing buffer addresses.
+ * num_bufs: Number of buffers to be freed.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid stream handle.
+ * -EPERM: Failure occurred, unable to free buffers.
+ * Requires:
+ * strm_init(void) called.
+ * ap_buffer != NULL.
+ * Ensures:
+ */
+extern int strm_free_buffer(struct strm_res_object *strmres,
+ u8 **ap_buffer, u32 num_bufs,
+ struct process_context *pr_ctxt);
+
+/*
+ * ======== strm_get_event_handle ========
+ * Purpose:
+ * Get stream's user event handle. This function is used when closing
+ * a stream, so the event can be closed.
+ * Parameter:
+ * stream_obj: Stream handle returned from strm_open().
+ * ph_event: Location to store event handle on output.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid stream_obj.
+ * Requires:
+ * strm_init(void) called.
+ * ph_event != NULL.
+ * Ensures:
+ */
+extern int strm_get_event_handle(struct strm_object *stream_obj,
+ void **ph_event);
+
+/*
+ * ======== strm_get_info ========
+ * Purpose:
+ * Get information about a stream. User's dsp_streaminfo is contained
+ * in stream_info struct. stream_info also contains Bridge private info.
+ * Parameters:
+ * stream_obj: Stream handle returned from strm_open().
+ * stream_info: Location to store stream info on output.
+ * uSteamInfoSize: Size of user's dsp_streaminfo structure.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid stream_obj.
+ * -EINVAL: stream_info_size < sizeof(dsp_streaminfo).
+ * -EPERM: Unable to get stream info.
+ * Requires:
+ * strm_init(void) called.
+ * stream_info != NULL.
+ * Ensures:
+ */
+extern int strm_get_info(struct strm_object *stream_obj,
+ struct stream_info *stream_info,
+ u32 stream_info_size);
+
+/*
+ * ======== strm_idle ========
+ * Purpose:
+ * Idle a stream and optionally flush output data buffers.
+ * If this is an output stream and flush_data is TRUE, all data currently
+ * enqueued will be discarded.
+ * If this is an output stream and flush_data is FALSE, this function
+ * will block until all currently buffered data is output, or the timeout
+ * specified has been reached.
+ * After a successful call to strm_idle(), all buffers can immediately
+ * be reclaimed.
+ * Parameters:
+ * stream_obj: Stream handle returned from strm_open().
+ * flush_data: If TRUE, discard output buffers.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid stream_obj.
+ * -ETIME: A timeout occurred before the stream could be idled.
+ * -EPERM: Unable to idle stream.
+ * Requires:
+ * strm_init(void) called.
+ * Ensures:
+ */
+extern int strm_idle(struct strm_object *stream_obj, bool flush_data);
+
+/*
+ * ======== strm_init ========
+ * Purpose:
+ * Initialize the STRM module.
+ * Parameters:
+ * Returns:
+ * TRUE if initialization succeeded, FALSE otherwise.
+ * Requires:
+ * Ensures:
+ */
+extern bool strm_init(void);
+
+/*
+ * ======== strm_issue ========
+ * Purpose:
+ * Send a buffer of data to a stream.
+ * Parameters:
+ * stream_obj: Stream handle returned from strm_open().
+ * pbuf: Pointer to buffer of data to be sent to the stream.
+ * ul_bytes: Number of bytes of data in the buffer.
+ * ul_buf_size: Actual buffer size in bytes.
+ * dw_arg: A user argument that travels with the buffer.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid stream_obj.
+ * -ENOSR: The stream is full.
+ * -EPERM: Failure occurred, unable to issue buffer.
+ * Requires:
+ * strm_init(void) called.
+ * pbuf != NULL.
+ * Ensures:
+ */
+extern int strm_issue(struct strm_object *stream_obj, u8 * pbuf,
+ u32 ul_bytes, u32 ul_buf_size, u32 dw_arg);
+
+/*
+ * ======== strm_open ========
+ * Purpose:
+ * Open a stream for sending/receiving data buffers to/from a task of
+ * DAIS socket node on the DSP.
+ * Parameters:
+ * hnode: Node handle returned from node_allocate().
+ * dir: DSP_TONODE or DSP_FROMNODE.
+ * index: Stream index.
+ * pattr: Pointer to structure containing attributes to be
+ * applied to stream. Cannot be NULL.
+ * strmres: Location to store stream resuorce info handle on output.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hnode.
+ * -EPERM: Invalid direction.
+ * hnode is not a task or DAIS socket node.
+ * Unable to open stream.
+ * -EINVAL: Invalid index.
+ * Requires:
+ * strm_init(void) called.
+ * strmres != NULL.
+ * pattr != NULL.
+ * Ensures:
+ * 0: *strmres is valid.
+ * error: *strmres == NULL.
+ */
+extern int strm_open(struct node_object *hnode, u32 dir,
+ u32 index, struct strm_attr *pattr,
+ struct strm_res_object **strmres,
+ struct process_context *pr_ctxt);
+
+/*
+ * ======== strm_prepare_buffer ========
+ * Purpose:
+ * Prepare a data buffer not allocated by DSPStream_AllocateBuffers()
+ * for use with a stream.
+ * Parameter:
+ * stream_obj: Stream handle returned from strm_open().
+ * usize: Size (GPP bytes) of the buffer.
+ * pbuffer: Buffer address.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid stream_obj.
+ * -EPERM: Failure occurred, unable to prepare buffer.
+ * Requires:
+ * strm_init(void) called.
+ * pbuffer != NULL.
+ * Ensures:
+ */
+extern int strm_prepare_buffer(struct strm_object *stream_obj,
+ u32 usize, u8 *pbuffer);
+
+/*
+ * ======== strm_reclaim ========
+ * Purpose:
+ * Request a buffer back from a stream.
+ * Parameters:
+ * stream_obj: Stream handle returned from strm_open().
+ * buf_ptr: Location to store pointer to reclaimed buffer.
+ * nbytes: Location where number of bytes of data in the
+ * buffer will be written.
+ * buff_size: Location where actual buffer size will be written.
+ * pdw_arg: Location where user argument that travels with
+ * the buffer will be written.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid stream_obj.
+ * -ETIME: A timeout occurred before a buffer could be
+ * retrieved.
+ * -EPERM: Failure occurred, unable to reclaim buffer.
+ * Requires:
+ * strm_init(void) called.
+ * buf_ptr != NULL.
+ * nbytes != NULL.
+ * pdw_arg != NULL.
+ * Ensures:
+ */
+extern int strm_reclaim(struct strm_object *stream_obj,
+ u8 **buf_ptr, u32 * nbytes,
+ u32 *buff_size, u32 *pdw_arg);
+
+/*
+ * ======== strm_register_notify ========
+ * Purpose:
+ * Register to be notified on specific events for this stream.
+ * Parameters:
+ * stream_obj: Stream handle returned by strm_open().
+ * event_mask: Mask of types of events to be notified about.
+ * notify_type: Type of notification to be sent.
+ * hnotification: Handle to be used for notification.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid stream_obj.
+ * -ENOMEM: Insufficient memory on GPP.
+ * -EINVAL: event_mask is invalid.
+ * -ENOSYS: Notification type specified by notify_type is not
+ * supported.
+ * Requires:
+ * strm_init(void) called.
+ * hnotification != NULL.
+ * Ensures:
+ */
+extern int strm_register_notify(struct strm_object *stream_obj,
+ u32 event_mask, u32 notify_type,
+ struct dsp_notification
+ *hnotification);
+
+/*
+ * ======== strm_select ========
+ * Purpose:
+ * Select a ready stream.
+ * Parameters:
+ * strm_tab: Array of stream handles returned from strm_open().
+ * strms: Number of stream handles in array.
+ * pmask: Location to store mask of ready streams on output.
+ * utimeout: Timeout value (milliseconds).
+ * Returns:
+ * 0: Success.
+ * -EDOM: strms out of range.
+
+ * -EFAULT: Invalid stream handle in array.
+ * -ETIME: A timeout occurred before a stream became ready.
+ * -EPERM: Failure occurred, unable to select a stream.
+ * Requires:
+ * strm_init(void) called.
+ * strm_tab != NULL.
+ * strms > 0.
+ * pmask != NULL.
+ * Ensures:
+ * 0: *pmask != 0 || utimeout == 0.
+ * Error: *pmask == 0.
+ */
+extern int strm_select(struct strm_object **strm_tab,
+ u32 strms, u32 *pmask, u32 utimeout);
+
+/*
+ * ======== strm_unprepare_buffer ========
+ * Purpose:
+ * Unprepare a data buffer that was previously prepared for a stream
+ * with DSPStream_PrepareBuffer(), and that will no longer be used with
+ * the stream.
+ * Parameter:
+ * stream_obj: Stream handle returned from strm_open().
+ * usize: Size (GPP bytes) of the buffer.
+ * pbuffer: Buffer address.
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid stream_obj.
+ * -EPERM: Failure occurred, unable to unprepare buffer.
+ * Requires:
+ * strm_init(void) called.
+ * pbuffer != NULL.
+ * Ensures:
+ */
+extern int strm_unprepare_buffer(struct strm_object *stream_obj,
+ u32 usize, u8 *pbuffer);
+
+#endif /* STRM_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/strmdefs.h b/drivers/staging/tidspbridge/include/dspbridge/strmdefs.h
new file mode 100644
index 00000000000..b363f794de3
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/strmdefs.h
@@ -0,0 +1,46 @@
+/*
+ * strmdefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global STRM constants and types.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef STRMDEFS_
+#define STRMDEFS_
+
+#define STRM_MAXEVTNAMELEN 32
+
+struct strm_mgr;
+
+struct strm_object;
+
+struct strm_attr {
+ void *user_event;
+ char *pstr_event_name;
+ void *virt_base; /* Process virtual base address of
+ * mapped SM */
+ u32 ul_virt_size; /* Size of virtual space in bytes */
+ struct dsp_streamattrin *stream_attr_in;
+};
+
+struct stream_info {
+ enum dsp_strmmode strm_mode; /* transport mode of
+ * stream(DMA, ZEROCOPY..) */
+ u32 segment_id; /* Segment strm allocs from. 0 is local mem */
+ void *virt_base; /* " " Stream'process virt base */
+ struct dsp_streaminfo *user_strm; /* User's stream information
+ * returned */
+};
+
+#endif /* STRMDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/sync.h b/drivers/staging/tidspbridge/include/dspbridge/sync.h
new file mode 100644
index 00000000000..e2651e7b1c4
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/sync.h
@@ -0,0 +1,109 @@
+/*
+ * sync.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Provide synchronization services.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _SYNC_H
+#define _SYNC_H
+
+#include <dspbridge/dbdefs.h>
+
+
+/* Special timeout value indicating an infinite wait: */
+#define SYNC_INFINITE 0xffffffff
+
+/**
+ * struct sync_object - the basic sync_object structure
+ * @comp: use to signal events
+ * @multi_comp: use to signal multiple events.
+ *
+ */
+struct sync_object{
+ struct completion comp;
+ struct completion *multi_comp;
+};
+
+/**
+ * sync_init_event() - set initial state for a sync_event element
+ * @event: event to be initialized.
+ *
+ * Set the initial state for a sync_event element.
+ */
+
+static inline void sync_init_event(struct sync_object *event)
+{
+ init_completion(&event->comp);
+ event->multi_comp = NULL;
+}
+
+/**
+ * sync_reset_event() - reset a sync_event element
+ * @event: event to be reset.
+ *
+ * This function reset to the initial state to @event.
+ */
+
+static inline void sync_reset_event(struct sync_object *event)
+{
+ INIT_COMPLETION(event->comp);
+ event->multi_comp = NULL;
+}
+
+/**
+ * sync_set_event() - set or signal and specified event
+ * @event: Event to be set..
+ *
+ * set the @event, if there is an thread waiting for the event
+ * it will be waken up, this function only wakes one thread.
+ */
+
+void sync_set_event(struct sync_object *event);
+
+/**
+ * sync_wait_on_event() - waits for a event to be set.
+ * @event: events to wait for it.
+ * @timeout timeout on waiting for the evetn.
+ *
+ * This functios will wait until @event is set or until timeout. In case of
+ * success the function will return 0 and
+ * in case of timeout the function will return -ETIME
+ */
+
+static inline int sync_wait_on_event(struct sync_object *event,
+ unsigned timeout)
+{
+ return wait_for_completion_timeout(&event->comp,
+ msecs_to_jiffies(timeout)) ? 0 : -ETIME;
+}
+
+/**
+ * sync_wait_on_multiple_events() - waits for multiple events to be set.
+ * @events: Array of events to wait for them.
+ * @count: number of elements of the array.
+ * @timeout timeout on waiting for the evetns.
+ * @pu_index index of the event set.
+ *
+ * This functios will wait until any of the array element is set or until
+ * timeout. In case of success the function will return 0 and
+ * @pu_index will store the index of the array element set and in case
+ * of timeout the function will return -ETIME.
+ */
+
+int sync_wait_on_multiple_events(struct sync_object **events,
+ unsigned count, unsigned timeout,
+ unsigned *index);
+
+#endif /* _SYNC_H */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/utildefs.h b/drivers/staging/tidspbridge/include/dspbridge/utildefs.h
new file mode 100644
index 00000000000..8fe5414824c
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/utildefs.h
@@ -0,0 +1,39 @@
+/*
+ * utildefs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global UTIL constants and types, shared between DSP API and DSPSYS.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef UTILDEFS_
+#define UTILDEFS_
+
+/* constants taken from configmg.h */
+#define UTIL_MAXMEMREGS 9
+#define UTIL_MAXIOPORTS 20
+#define UTIL_MAXIRQS 7
+#define UTIL_MAXDMACHNLS 7
+
+/* misc. constants */
+#define UTIL_MAXARGVS 10
+
+/* Platform specific important info */
+struct util_sysinfo {
+ /* Granularity of page protection; usually 1k or 4k */
+ u32 dw_page_size;
+ u32 dw_allocation_granularity; /* VM granularity, usually 64K */
+ u32 dw_number_of_processors; /* Used as sanity check */
+};
+
+#endif /* UTILDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h b/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h
new file mode 100644
index 00000000000..9a994753e9b
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h
@@ -0,0 +1,62 @@
+/*
+ * uuidutil.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * This file contains the specification of UUID helper functions.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef UUIDUTIL_
+#define UUIDUTIL_
+
+#define MAXUUIDLEN 37
+
+/*
+ * ======== uuid_uuid_to_string ========
+ * Purpose:
+ * Converts a dsp_uuid to an ANSI string.
+ * Parameters:
+ * uuid_obj: Pointer to a dsp_uuid object.
+ * sz_uuid: Pointer to a buffer to receive a NULL-terminated UUID
+ * string.
+ * size: Maximum size of the sz_uuid string.
+ * Returns:
+ * Requires:
+ * uuid_obj & sz_uuid are non-NULL values.
+ * Ensures:
+ * Lenghth of sz_uuid is less than MAXUUIDLEN.
+ * Details:
+ * UUID string limit currently set at MAXUUIDLEN.
+ */
+void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid,
+ s32 size);
+
+/*
+ * ======== uuid_uuid_from_string ========
+ * Purpose:
+ * Converts an ANSI string to a dsp_uuid.
+ * Parameters:
+ * sz_uuid: Pointer to a string that represents a dsp_uuid object.
+ * uuid_obj: Pointer to a dsp_uuid object.
+ * Returns:
+ * Requires:
+ * uuid_obj & sz_uuid are non-NULL values.
+ * Ensures:
+ * Details:
+ * We assume the string representation of a UUID has the following format:
+ * "12345678_1234_1234_1234_123456789abc".
+ */
+extern void uuid_uuid_from_string(char *sz_uuid,
+ struct dsp_uuid *uuid_obj);
+
+#endif /* UUIDUTIL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/wdt.h b/drivers/staging/tidspbridge/include/dspbridge/wdt.h
new file mode 100644
index 00000000000..4c00ba5fa5b
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/wdt.h
@@ -0,0 +1,79 @@
+/*
+ * wdt.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * IO dispatcher for a shared memory channel driver.
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#ifndef __DSP_WDT3_H_
+#define __DSP_WDT3_H_
+
+/* WDT defines */
+#define OMAP3_WDT3_ISR_OFFSET 0x0018
+
+
+/**
+ * struct dsp_wdt_setting - the basic dsp_wdt_setting structure
+ * @reg_base: pointer to the base of the wdt registers
+ * @sm_wdt: pointer to flags in shared memory
+ * @wdt3_tasklet tasklet to manage wdt event
+ * @fclk handle to wdt3 functional clock
+ * @iclk handle to wdt3 interface clock
+ *
+ * This struct is used in the function to manage wdt3.
+ */
+
+struct dsp_wdt_setting {
+ void __iomem *reg_base;
+ struct shm *sm_wdt;
+ struct tasklet_struct wdt3_tasklet;
+ struct clk *fclk;
+ struct clk *iclk;
+};
+
+/**
+ * dsp_wdt_init() - initialize wdt3 module.
+ *
+ * This function initilize to wdt3 module, so that
+ * other wdt3 function can be used.
+ */
+int dsp_wdt_init(void);
+
+/**
+ * dsp_wdt_exit() - initialize wdt3 module.
+ *
+ * This function frees all resources allocated for wdt3 module.
+ */
+void dsp_wdt_exit(void);
+
+/**
+ * dsp_wdt_enable() - enable/disable wdt3
+ * @enable: bool value to enable/disable wdt3
+ *
+ * This function enables or disables wdt3 base on @enable value.
+ *
+ */
+void dsp_wdt_enable(bool enable);
+
+/**
+ * dsp_wdt_sm_set() - store pointer to the share memory
+ * @data: pointer to dspbridge share memory
+ *
+ * This function is used to pass a valid pointer to share memory,
+ * so that the flags can be set in order DSP side can read them.
+ *
+ */
+void dsp_wdt_sm_set(void *data);
+
+#endif
+
diff --git a/drivers/staging/tidspbridge/pmgr/chnl.c b/drivers/staging/tidspbridge/pmgr/chnl.c
new file mode 100644
index 00000000000..90317b58f8e
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/chnl.c
@@ -0,0 +1,163 @@
+/*
+ * chnl.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP API channel interface: multiplexes data streams through the single
+ * physical link managed by a Bridge Bridge driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/sync.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/proc.h>
+#include <dspbridge/dev.h>
+
+/* ----------------------------------- Others */
+#include <dspbridge/chnlpriv.h>
+#include <chnlobj.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/chnl.h>
+
+/* ----------------------------------- Globals */
+static u32 refs;
+
+/*
+ * ======== chnl_create ========
+ * Purpose:
+ * Create a channel manager object, responsible for opening new channels
+ * and closing old ones for a given 'Bridge board.
+ */
+int chnl_create(struct chnl_mgr **channel_mgr,
+ struct dev_object *hdev_obj,
+ const struct chnl_mgrattrs *mgr_attrts)
+{
+ int status;
+ struct chnl_mgr *hchnl_mgr;
+ struct chnl_mgr_ *chnl_mgr_obj = NULL;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(channel_mgr != NULL);
+ DBC_REQUIRE(mgr_attrts != NULL);
+
+ *channel_mgr = NULL;
+
+ /* Validate args: */
+ if ((0 < mgr_attrts->max_channels) &&
+ (mgr_attrts->max_channels <= CHNL_MAXCHANNELS))
+ status = 0;
+ else if (mgr_attrts->max_channels == 0)
+ status = -EINVAL;
+ else
+ status = -ECHRNG;
+
+ if (mgr_attrts->word_size == 0)
+ status = -EINVAL;
+
+ if (!status) {
+ status = dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
+ if (!status && hchnl_mgr != NULL)
+ status = -EEXIST;
+
+ }
+
+ if (!status) {
+ struct bridge_drv_interface *intf_fxns;
+ dev_get_intf_fxns(hdev_obj, &intf_fxns);
+ /* Let Bridge channel module finish the create: */
+ status = (*intf_fxns->pfn_chnl_create) (&hchnl_mgr, hdev_obj,
+ mgr_attrts);
+ if (!status) {
+ /* Fill in DSP API channel module's fields of the
+ * chnl_mgr structure */
+ chnl_mgr_obj = (struct chnl_mgr_ *)hchnl_mgr;
+ chnl_mgr_obj->intf_fxns = intf_fxns;
+ /* Finally, return the new channel manager handle: */
+ *channel_mgr = hchnl_mgr;
+ }
+ }
+
+ DBC_ENSURE(status || chnl_mgr_obj);
+
+ return status;
+}
+
+/*
+ * ======== chnl_destroy ========
+ * Purpose:
+ * Close all open channels, and destroy the channel manager.
+ */
+int chnl_destroy(struct chnl_mgr *hchnl_mgr)
+{
+ struct chnl_mgr_ *chnl_mgr_obj = (struct chnl_mgr_ *)hchnl_mgr;
+ struct bridge_drv_interface *intf_fxns;
+ int status;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (chnl_mgr_obj) {
+ intf_fxns = chnl_mgr_obj->intf_fxns;
+ /* Let Bridge channel module destroy the chnl_mgr: */
+ status = (*intf_fxns->pfn_chnl_destroy) (hchnl_mgr);
+ } else {
+ status = -EFAULT;
+ }
+
+ return status;
+}
+
+/*
+ * ======== chnl_exit ========
+ * Purpose:
+ * Discontinue usage of the CHNL module.
+ */
+void chnl_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== chnl_init ========
+ * Purpose:
+ * Initialize the CHNL module's private state.
+ */
+bool chnl_init(void)
+{
+ bool ret = true;
+
+ DBC_REQUIRE(refs >= 0);
+
+ if (ret)
+ refs++;
+
+ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+ return ret;
+}
diff --git a/drivers/staging/tidspbridge/pmgr/chnlobj.h b/drivers/staging/tidspbridge/pmgr/chnlobj.h
new file mode 100644
index 00000000000..6795e0aa8fd
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/chnlobj.h
@@ -0,0 +1,46 @@
+/*
+ * chnlobj.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Structure subcomponents of channel class library channel objects which
+ * are exposed to DSP API from Bridge driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef CHNLOBJ_
+#define CHNLOBJ_
+
+#include <dspbridge/chnldefs.h>
+#include <dspbridge/dspdefs.h>
+
+/*
+ * This struct is the first field in a chnl_mgr struct. Other. implementation
+ * specific fields follow this structure in memory.
+ */
+struct chnl_mgr_ {
+ /* These must be the first fields in a chnl_mgr struct: */
+
+ /* Function interface to Bridge driver. */
+ struct bridge_drv_interface *intf_fxns;
+};
+
+/*
+ * This struct is the first field in a chnl_object struct. Other,
+ * implementation specific fields follow this structure in memory.
+ */
+struct chnl_object_ {
+ /* These must be the first fields in a chnl_object struct: */
+ struct chnl_mgr_ *chnl_mgr_obj; /* Pointer back to channel manager. */
+};
+
+#endif /* CHNLOBJ_ */
diff --git a/drivers/staging/tidspbridge/pmgr/cmm.c b/drivers/staging/tidspbridge/pmgr/cmm.c
new file mode 100644
index 00000000000..ce3dc8822af
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/cmm.c
@@ -0,0 +1,1154 @@
+/*
+ * cmm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * The Communication(Shared) Memory Management(CMM) module provides
+ * shared memory management services for DSP/BIOS Bridge data streaming
+ * and messaging.
+ *
+ * Multiple shared memory segments can be registered with CMM.
+ * Each registered SM segment is represented by a SM "allocator" that
+ * describes a block of physically contiguous shared memory used for
+ * future allocations by CMM.
+ *
+ * Memory is coelesced back to the appropriate heap when a buffer is
+ * freed.
+ *
+ * Notes:
+ * Va: Virtual address.
+ * Pa: Physical or kernel system address.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/list.h>
+#include <dspbridge/sync.h>
+#include <dspbridge/utildefs.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/proc.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/cmm.h>
+
+/* ----------------------------------- Defines, Data Structures, Typedefs */
+#define NEXT_PA(pnode) (pnode->dw_pa + pnode->ul_size)
+
+/* Other bus/platform translations */
+#define DSPPA2GPPPA(base, x, y) ((x)+(y))
+#define GPPPA2DSPPA(base, x, y) ((x)-(y))
+
+/*
+ * Allocators define a block of contiguous memory used for future allocations.
+ *
+ * sma - shared memory allocator.
+ * vma - virtual memory allocator.(not used).
+ */
+struct cmm_allocator { /* sma */
+ unsigned int shm_base; /* Start of physical SM block */
+ u32 ul_sm_size; /* Size of SM block in bytes */
+ unsigned int dw_vm_base; /* Start of VM block. (Dev driver
+ * context for 'sma') */
+ u32 dw_dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
+ * SM space */
+ s8 c_factor; /* DSPPa to GPPPa Conversion Factor */
+ unsigned int dw_dsp_base; /* DSP virt base byte address */
+ u32 ul_dsp_size; /* DSP seg size in bytes */
+ struct cmm_object *hcmm_mgr; /* back ref to parent mgr */
+ /* node list of available memory */
+ struct lst_list *free_list_head;
+ /* node list of memory in use */
+ struct lst_list *in_use_list_head;
+};
+
+struct cmm_xlator { /* Pa<->Va translator object */
+ /* CMM object this translator associated */
+ struct cmm_object *hcmm_mgr;
+ /*
+ * Client process virtual base address that corresponds to phys SM
+ * base address for translator's ul_seg_id.
+ * Only 1 segment ID currently supported.
+ */
+ unsigned int dw_virt_base; /* virtual base address */
+ u32 ul_virt_size; /* size of virt space in bytes */
+ u32 ul_seg_id; /* Segment Id */
+};
+
+/* CMM Mgr */
+struct cmm_object {
+ /*
+ * Cmm Lock is used to serialize access mem manager for multi-threads.
+ */
+ struct mutex cmm_lock; /* Lock to access cmm mgr */
+ struct lst_list *node_free_list_head; /* Free list of memory nodes */
+ u32 ul_min_block_size; /* Min SM block; default 16 bytes */
+ u32 dw_page_size; /* Memory Page size (1k/4k) */
+ /* GPP SM segment ptrs */
+ struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
+};
+
+/* Default CMM Mgr attributes */
+static struct cmm_mgrattrs cmm_dfltmgrattrs = {
+ /* ul_min_block_size, min block size(bytes) allocated by cmm mgr */
+ 16
+};
+
+/* Default allocation attributes */
+static struct cmm_attrs cmm_dfltalctattrs = {
+ 1 /* ul_seg_id, default segment Id for allocator */
+};
+
+/* Address translator default attrs */
+static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
+ /* ul_seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
+ 1,
+ 0, /* dw_dsp_bufs */
+ 0, /* dw_dsp_buf_size */
+ NULL, /* vm_base */
+ 0, /* dw_vm_size */
+};
+
+/* SM node representing a block of memory. */
+struct cmm_mnode {
+ struct list_head link; /* must be 1st element */
+ u32 dw_pa; /* Phys addr */
+ u32 dw_va; /* Virtual address in device process context */
+ u32 ul_size; /* SM block size in bytes */
+ u32 client_proc; /* Process that allocated this mem block */
+};
+
+/* ----------------------------------- Globals */
+static u32 refs; /* module reference count */
+
+/* ----------------------------------- Function Prototypes */
+static void add_to_free_list(struct cmm_allocator *allocator,
+ struct cmm_mnode *pnode);
+static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
+ u32 ul_seg_id);
+static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
+ u32 usize);
+static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
+ u32 dw_va, u32 ul_size);
+/* get available slot for new allocator */
+static s32 get_slot(struct cmm_object *cmm_mgr_obj);
+static void un_register_gppsm_seg(struct cmm_allocator *psma);
+
+/*
+ * ======== cmm_calloc_buf ========
+ * Purpose:
+ * Allocate a SM buffer, zero contents, and return the physical address
+ * and optional driver context virtual address(pp_buf_va).
+ *
+ * The freelist is sorted in increasing size order. Get the first
+ * block that satifies the request and sort the remaining back on
+ * the freelist; if large enough. The kept block is placed on the
+ * inUseList.
+ */
+void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
+ struct cmm_attrs *pattrs, void **pp_buf_va)
+{
+ struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
+ void *buf_pa = NULL;
+ struct cmm_mnode *pnode = NULL;
+ struct cmm_mnode *new_node = NULL;
+ struct cmm_allocator *allocator = NULL;
+ u32 delta_size;
+ u8 *pbyte = NULL;
+ s32 cnt;
+
+ if (pattrs == NULL)
+ pattrs = &cmm_dfltalctattrs;
+
+ if (pp_buf_va != NULL)
+ *pp_buf_va = NULL;
+
+ if (cmm_mgr_obj && (usize != 0)) {
+ if (pattrs->ul_seg_id > 0) {
+ /* SegId > 0 is SM */
+ /* get the allocator object for this segment id */
+ allocator =
+ get_allocator(cmm_mgr_obj, pattrs->ul_seg_id);
+ /* keep block size a multiple of ul_min_block_size */
+ usize =
+ ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size -
+ 1))
+ + cmm_mgr_obj->ul_min_block_size;
+ mutex_lock(&cmm_mgr_obj->cmm_lock);
+ pnode = get_free_block(allocator, usize);
+ }
+ if (pnode) {
+ delta_size = (pnode->ul_size - usize);
+ if (delta_size >= cmm_mgr_obj->ul_min_block_size) {
+ /* create a new block with the leftovers and
+ * add to freelist */
+ new_node =
+ get_node(cmm_mgr_obj, pnode->dw_pa + usize,
+ pnode->dw_va + usize,
+ (u32) delta_size);
+ /* leftovers go free */
+ add_to_free_list(allocator, new_node);
+ /* adjust our node's size */
+ pnode->ul_size = usize;
+ }
+ /* Tag node with client process requesting allocation
+ * We'll need to free up a process's alloc'd SM if the
+ * client process goes away.
+ */
+ /* Return TGID instead of process handle */
+ pnode->client_proc = current->tgid;
+
+ /* put our node on InUse list */
+ lst_put_tail(allocator->in_use_list_head,
+ (struct list_head *)pnode);
+ buf_pa = (void *)pnode->dw_pa; /* physical address */
+ /* clear mem */
+ pbyte = (u8 *) pnode->dw_va;
+ for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
+ *pbyte = 0;
+
+ if (pp_buf_va != NULL) {
+ /* Virtual address */
+ *pp_buf_va = (void *)pnode->dw_va;
+ }
+ }
+ mutex_unlock(&cmm_mgr_obj->cmm_lock);
+ }
+ return buf_pa;
+}
+
+/*
+ * ======== cmm_create ========
+ * Purpose:
+ * Create a communication memory manager object.
+ */
+int cmm_create(struct cmm_object **ph_cmm_mgr,
+ struct dev_object *hdev_obj,
+ const struct cmm_mgrattrs *mgr_attrts)
+{
+ struct cmm_object *cmm_obj = NULL;
+ int status = 0;
+ struct util_sysinfo sys_info;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(ph_cmm_mgr != NULL);
+
+ *ph_cmm_mgr = NULL;
+ /* create, zero, and tag a cmm mgr object */
+ cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
+ if (cmm_obj != NULL) {
+ if (mgr_attrts == NULL)
+ mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
+
+ /* 4 bytes minimum */
+ DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4);
+ /* save away smallest block allocation for this cmm mgr */
+ cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size;
+ /* save away the systems memory page size */
+ sys_info.dw_page_size = PAGE_SIZE;
+ sys_info.dw_allocation_granularity = PAGE_SIZE;
+ sys_info.dw_number_of_processors = 1;
+
+ cmm_obj->dw_page_size = sys_info.dw_page_size;
+
+ /* Note: DSP SM seg table(aDSPSMSegTab[]) zero'd by
+ * MEM_ALLOC_OBJECT */
+
+ /* create node free list */
+ cmm_obj->node_free_list_head =
+ kzalloc(sizeof(struct lst_list),
+ GFP_KERNEL);
+ if (cmm_obj->node_free_list_head == NULL) {
+ status = -ENOMEM;
+ cmm_destroy(cmm_obj, true);
+ } else {
+ INIT_LIST_HEAD(&cmm_obj->
+ node_free_list_head->head);
+ mutex_init(&cmm_obj->cmm_lock);
+ *ph_cmm_mgr = cmm_obj;
+ }
+ } else {
+ status = -ENOMEM;
+ }
+ return status;
+}
+
+/*
+ * ======== cmm_destroy ========
+ * Purpose:
+ * Release the communication memory manager resources.
+ */
+int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
+{
+ struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
+ struct cmm_info temp_info;
+ int status = 0;
+ s32 slot_seg;
+ struct cmm_mnode *pnode;
+
+ DBC_REQUIRE(refs > 0);
+ if (!hcmm_mgr) {
+ status = -EFAULT;
+ return status;
+ }
+ mutex_lock(&cmm_mgr_obj->cmm_lock);
+ /* If not force then fail if outstanding allocations exist */
+ if (!force) {
+ /* Check for outstanding memory allocations */
+ status = cmm_get_info(hcmm_mgr, &temp_info);
+ if (!status) {
+ if (temp_info.ul_total_in_use_cnt > 0) {
+ /* outstanding allocations */
+ status = -EPERM;
+ }
+ }
+ }
+ if (!status) {
+ /* UnRegister SM allocator */
+ for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
+ if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
+ un_register_gppsm_seg
+ (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
+ /* Set slot to NULL for future reuse */
+ cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
+ }
+ }
+ }
+ if (cmm_mgr_obj->node_free_list_head != NULL) {
+ /* Free the free nodes */
+ while (!LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
+ pnode = (struct cmm_mnode *)
+ lst_get_head(cmm_mgr_obj->node_free_list_head);
+ kfree(pnode);
+ }
+ /* delete NodeFreeList list */
+ kfree(cmm_mgr_obj->node_free_list_head);
+ }
+ mutex_unlock(&cmm_mgr_obj->cmm_lock);
+ if (!status) {
+ /* delete CS & cmm mgr object */
+ mutex_destroy(&cmm_mgr_obj->cmm_lock);
+ kfree(cmm_mgr_obj);
+ }
+ return status;
+}
+
+/*
+ * ======== cmm_exit ========
+ * Purpose:
+ * Discontinue usage of module; free resources when reference count
+ * reaches 0.
+ */
+void cmm_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+}
+
+/*
+ * ======== cmm_free_buf ========
+ * Purpose:
+ * Free the given buffer.
+ */
+int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa,
+ u32 ul_seg_id)
+{
+ struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
+ int status = -EFAULT;
+ struct cmm_mnode *mnode_obj = NULL;
+ struct cmm_allocator *allocator = NULL;
+ struct cmm_attrs *pattrs;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(buf_pa != NULL);
+
+ if (ul_seg_id == 0) {
+ pattrs = &cmm_dfltalctattrs;
+ ul_seg_id = pattrs->ul_seg_id;
+ }
+ if (!hcmm_mgr || !(ul_seg_id > 0)) {
+ status = -EFAULT;
+ return status;
+ }
+ /* get the allocator for this segment id */
+ allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
+ if (allocator != NULL) {
+ mutex_lock(&cmm_mgr_obj->cmm_lock);
+ mnode_obj =
+ (struct cmm_mnode *)lst_first(allocator->in_use_list_head);
+ while (mnode_obj) {
+ if ((u32) buf_pa == mnode_obj->dw_pa) {
+ /* Found it */
+ lst_remove_elem(allocator->in_use_list_head,
+ (struct list_head *)mnode_obj);
+ /* back to freelist */
+ add_to_free_list(allocator, mnode_obj);
+ status = 0; /* all right! */
+ break;
+ }
+ /* next node. */
+ mnode_obj = (struct cmm_mnode *)
+ lst_next(allocator->in_use_list_head,
+ (struct list_head *)mnode_obj);
+ }
+ mutex_unlock(&cmm_mgr_obj->cmm_lock);
+ }
+ return status;
+}
+
+/*
+ * ======== cmm_get_handle ========
+ * Purpose:
+ * Return the communication memory manager object for this device.
+ * This is typically called from the client process.
+ */
+int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
+{
+ int status = 0;
+ struct dev_object *hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(ph_cmm_mgr != NULL);
+ if (hprocessor != NULL)
+ status = proc_get_dev_object(hprocessor, &hdev_obj);
+ else
+ hdev_obj = dev_get_first(); /* default */
+
+ if (!status)
+ status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
+
+ return status;
+}
+
+/*
+ * ======== cmm_get_info ========
+ * Purpose:
+ * Return the current memory utilization information.
+ */
+int cmm_get_info(struct cmm_object *hcmm_mgr,
+ struct cmm_info *cmm_info_obj)
+{
+ struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
+ u32 ul_seg;
+ int status = 0;
+ struct cmm_allocator *altr;
+ struct cmm_mnode *mnode_obj = NULL;
+
+ DBC_REQUIRE(cmm_info_obj != NULL);
+
+ if (!hcmm_mgr) {
+ status = -EFAULT;
+ return status;
+ }
+ mutex_lock(&cmm_mgr_obj->cmm_lock);
+ cmm_info_obj->ul_num_gppsm_segs = 0; /* # of SM segments */
+ /* Total # of outstanding alloc */
+ cmm_info_obj->ul_total_in_use_cnt = 0;
+ /* min block size */
+ cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size;
+ /* check SM memory segments */
+ for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
+ /* get the allocator object for this segment id */
+ altr = get_allocator(cmm_mgr_obj, ul_seg);
+ if (altr != NULL) {
+ cmm_info_obj->ul_num_gppsm_segs++;
+ cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_pa =
+ altr->shm_base - altr->ul_dsp_size;
+ cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
+ altr->ul_dsp_size + altr->ul_sm_size;
+ cmm_info_obj->seg_info[ul_seg - 1].dw_gpp_base_pa =
+ altr->shm_base;
+ cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size =
+ altr->ul_sm_size;
+ cmm_info_obj->seg_info[ul_seg - 1].dw_dsp_base_va =
+ altr->dw_dsp_base;
+ cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size =
+ altr->ul_dsp_size;
+ cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_va =
+ altr->dw_vm_base - altr->ul_dsp_size;
+ cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0;
+ mnode_obj = (struct cmm_mnode *)
+ lst_first(altr->in_use_list_head);
+ /* Count inUse blocks */
+ while (mnode_obj) {
+ cmm_info_obj->ul_total_in_use_cnt++;
+ cmm_info_obj->seg_info[ul_seg -
+ 1].ul_in_use_cnt++;
+ /* next node. */
+ mnode_obj = (struct cmm_mnode *)
+ lst_next(altr->in_use_list_head,
+ (struct list_head *)mnode_obj);
+ }
+ }
+ } /* end for */
+ mutex_unlock(&cmm_mgr_obj->cmm_lock);
+ return status;
+}
+
+/*
+ * ======== cmm_init ========
+ * Purpose:
+ * Initializes private state of CMM module.
+ */
+bool cmm_init(void)
+{
+ bool ret = true;
+
+ DBC_REQUIRE(refs >= 0);
+ if (ret)
+ refs++;
+
+ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+ return ret;
+}
+
+/*
+ * ======== cmm_register_gppsm_seg ========
+ * Purpose:
+ * Register a block of SM with the CMM to be used for later GPP SM
+ * allocations.
+ */
+int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
+ u32 dw_gpp_base_pa, u32 ul_size,
+ u32 dsp_addr_offset, s8 c_factor,
+ u32 dw_dsp_base, u32 ul_dsp_size,
+ u32 *sgmt_id, u32 gpp_base_va)
+{
+ struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
+ struct cmm_allocator *psma = NULL;
+ int status = 0;
+ struct cmm_mnode *new_node;
+ s32 slot_seg;
+
+ DBC_REQUIRE(ul_size > 0);
+ DBC_REQUIRE(sgmt_id != NULL);
+ DBC_REQUIRE(dw_gpp_base_pa != 0);
+ DBC_REQUIRE(gpp_base_va != 0);
+ DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
+ (c_factor >= CMM_SUBFROMDSPPA));
+ dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
+ "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n", __func__,
+ dw_gpp_base_pa, ul_size, dsp_addr_offset, dw_dsp_base,
+ ul_dsp_size, gpp_base_va);
+ if (!hcmm_mgr) {
+ status = -EFAULT;
+ return status;
+ }
+ /* make sure we have room for another allocator */
+ mutex_lock(&cmm_mgr_obj->cmm_lock);
+ slot_seg = get_slot(cmm_mgr_obj);
+ if (slot_seg < 0) {
+ /* get a slot number */
+ status = -EPERM;
+ goto func_end;
+ }
+ /* Check if input ul_size is big enough to alloc at least one block */
+ if (ul_size < cmm_mgr_obj->ul_min_block_size) {
+ status = -EINVAL;
+ goto func_end;
+ }
+
+ /* create, zero, and tag an SM allocator object */
+ psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
+ if (psma != NULL) {
+ psma->hcmm_mgr = hcmm_mgr; /* ref to parent */
+ psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
+ psma->ul_sm_size = ul_size; /* SM segment size in bytes */
+ psma->dw_vm_base = gpp_base_va;
+ psma->dw_dsp_phys_addr_offset = dsp_addr_offset;
+ psma->c_factor = c_factor;
+ psma->dw_dsp_base = dw_dsp_base;
+ psma->ul_dsp_size = ul_dsp_size;
+ if (psma->dw_vm_base == 0) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /* return the actual segment identifier */
+ *sgmt_id = (u32) slot_seg + 1;
+ /* create memory free list */
+ psma->free_list_head = kzalloc(sizeof(struct lst_list),
+ GFP_KERNEL);
+ if (psma->free_list_head == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ INIT_LIST_HEAD(&psma->free_list_head->head);
+
+ /* create memory in-use list */
+ psma->in_use_list_head = kzalloc(sizeof(struct
+ lst_list), GFP_KERNEL);
+ if (psma->in_use_list_head == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ INIT_LIST_HEAD(&psma->in_use_list_head->head);
+
+ /* Get a mem node for this hunk-o-memory */
+ new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
+ psma->dw_vm_base, ul_size);
+ /* Place node on the SM allocator's free list */
+ if (new_node) {
+ lst_put_tail(psma->free_list_head,
+ (struct list_head *)new_node);
+ } else {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ } else {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ /* make entry */
+ cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
+
+func_end:
+ if (status && psma) {
+ /* Cleanup allocator */
+ un_register_gppsm_seg(psma);
+ }
+
+ mutex_unlock(&cmm_mgr_obj->cmm_lock);
+ return status;
+}
+
+/*
+ * ======== cmm_un_register_gppsm_seg ========
+ * Purpose:
+ * UnRegister GPP SM segments with the CMM.
+ */
+int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
+ u32 ul_seg_id)
+{
+ struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
+ int status = 0;
+ struct cmm_allocator *psma;
+ u32 ul_id = ul_seg_id;
+
+ DBC_REQUIRE(ul_seg_id > 0);
+ if (hcmm_mgr) {
+ if (ul_seg_id == CMM_ALLSEGMENTS)
+ ul_id = 1;
+
+ if ((ul_id > 0) && (ul_id <= CMM_MAXGPPSEGS)) {
+ while (ul_id <= CMM_MAXGPPSEGS) {
+ mutex_lock(&cmm_mgr_obj->cmm_lock);
+ /* slot = seg_id-1 */
+ psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
+ if (psma != NULL) {
+ un_register_gppsm_seg(psma);
+ /* Set alctr ptr to NULL for future
+ * reuse */
+ cmm_mgr_obj->pa_gppsm_seg_tab[ul_id -
+ 1] = NULL;
+ } else if (ul_seg_id != CMM_ALLSEGMENTS) {
+ status = -EPERM;
+ }
+ mutex_unlock(&cmm_mgr_obj->cmm_lock);
+ if (ul_seg_id != CMM_ALLSEGMENTS)
+ break;
+
+ ul_id++;
+ } /* end while */
+ } else {
+ status = -EINVAL;
+ }
+ } else {
+ status = -EFAULT;
+ }
+ return status;
+}
+
+/*
+ * ======== un_register_gppsm_seg ========
+ * Purpose:
+ * UnRegister the SM allocator by freeing all its resources and
+ * nulling cmm mgr table entry.
+ * Note:
+ * This routine is always called within cmm lock crit sect.
+ */
+static void un_register_gppsm_seg(struct cmm_allocator *psma)
+{
+ struct cmm_mnode *mnode_obj = NULL;
+ struct cmm_mnode *next_node = NULL;
+
+ DBC_REQUIRE(psma != NULL);
+ if (psma->free_list_head != NULL) {
+ /* free nodes on free list */
+ mnode_obj = (struct cmm_mnode *)lst_first(psma->free_list_head);
+ while (mnode_obj) {
+ next_node =
+ (struct cmm_mnode *)lst_next(psma->free_list_head,
+ (struct list_head *)
+ mnode_obj);
+ lst_remove_elem(psma->free_list_head,
+ (struct list_head *)mnode_obj);
+ kfree((void *)mnode_obj);
+ /* next node. */
+ mnode_obj = next_node;
+ }
+ kfree(psma->free_list_head); /* delete freelist */
+ /* free nodes on InUse list */
+ mnode_obj =
+ (struct cmm_mnode *)lst_first(psma->in_use_list_head);
+ while (mnode_obj) {
+ next_node =
+ (struct cmm_mnode *)lst_next(psma->in_use_list_head,
+ (struct list_head *)
+ mnode_obj);
+ lst_remove_elem(psma->in_use_list_head,
+ (struct list_head *)mnode_obj);
+ kfree((void *)mnode_obj);
+ /* next node. */
+ mnode_obj = next_node;
+ }
+ kfree(psma->in_use_list_head); /* delete InUse list */
+ }
+ if ((void *)psma->dw_vm_base != NULL)
+ MEM_UNMAP_LINEAR_ADDRESS((void *)psma->dw_vm_base);
+
+ /* Free allocator itself */
+ kfree(psma);
+}
+
+/*
+ * ======== get_slot ========
+ * Purpose:
+ * An available slot # is returned. Returns negative on failure.
+ */
+static s32 get_slot(struct cmm_object *cmm_mgr_obj)
+{
+ s32 slot_seg = -1; /* neg on failure */
+ DBC_REQUIRE(cmm_mgr_obj != NULL);
+ /* get first available slot in cmm mgr SMSegTab[] */
+ for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
+ if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
+ break;
+
+ }
+ if (slot_seg == CMM_MAXGPPSEGS)
+ slot_seg = -1; /* failed */
+
+ return slot_seg;
+}
+
+/*
+ * ======== get_node ========
+ * Purpose:
+ * Get a memory node from freelist or create a new one.
+ */
+static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
+ u32 dw_va, u32 ul_size)
+{
+ struct cmm_mnode *pnode = NULL;
+
+ DBC_REQUIRE(cmm_mgr_obj != NULL);
+ DBC_REQUIRE(dw_pa != 0);
+ DBC_REQUIRE(dw_va != 0);
+ DBC_REQUIRE(ul_size != 0);
+ /* Check cmm mgr's node freelist */
+ if (LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
+ pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
+ } else {
+ /* surely a valid element */
+ pnode = (struct cmm_mnode *)
+ lst_get_head(cmm_mgr_obj->node_free_list_head);
+ }
+ if (pnode) {
+ lst_init_elem((struct list_head *)pnode); /* set self */
+ pnode->dw_pa = dw_pa; /* Physical addr of start of block */
+ pnode->dw_va = dw_va; /* Virtual " " */
+ pnode->ul_size = ul_size; /* Size of block */
+ }
+ return pnode;
+}
+
+/*
+ * ======== delete_node ========
+ * Purpose:
+ * Put a memory node on the cmm nodelist for later use.
+ * Doesn't actually delete the node. Heap thrashing friendly.
+ */
+static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
+{
+ DBC_REQUIRE(pnode != NULL);
+ lst_init_elem((struct list_head *)pnode); /* init .self ptr */
+ lst_put_tail(cmm_mgr_obj->node_free_list_head,
+ (struct list_head *)pnode);
+}
+
+/*
+ * ====== get_free_block ========
+ * Purpose:
+ * Scan the free block list and return the first block that satisfies
+ * the size.
+ */
+static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
+ u32 usize)
+{
+ if (allocator) {
+ struct cmm_mnode *mnode_obj = (struct cmm_mnode *)
+ lst_first(allocator->free_list_head);
+ while (mnode_obj) {
+ if (usize <= (u32) mnode_obj->ul_size) {
+ lst_remove_elem(allocator->free_list_head,
+ (struct list_head *)mnode_obj);
+ return mnode_obj;
+ }
+ /* next node. */
+ mnode_obj = (struct cmm_mnode *)
+ lst_next(allocator->free_list_head,
+ (struct list_head *)mnode_obj);
+ }
+ }
+ return NULL;
+}
+
+/*
+ * ======== add_to_free_list ========
+ * Purpose:
+ * Coelesce node into the freelist in ascending size order.
+ */
+static void add_to_free_list(struct cmm_allocator *allocator,
+ struct cmm_mnode *pnode)
+{
+ struct cmm_mnode *node_prev = NULL;
+ struct cmm_mnode *node_next = NULL;
+ struct cmm_mnode *mnode_obj;
+ u32 dw_this_pa;
+ u32 dw_next_pa;
+
+ DBC_REQUIRE(pnode != NULL);
+ DBC_REQUIRE(allocator != NULL);
+ dw_this_pa = pnode->dw_pa;
+ dw_next_pa = NEXT_PA(pnode);
+ mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
+ while (mnode_obj) {
+ if (dw_this_pa == NEXT_PA(mnode_obj)) {
+ /* found the block ahead of this one */
+ node_prev = mnode_obj;
+ } else if (dw_next_pa == mnode_obj->dw_pa) {
+ node_next = mnode_obj;
+ }
+ if ((node_prev == NULL) || (node_next == NULL)) {
+ /* next node. */
+ mnode_obj = (struct cmm_mnode *)
+ lst_next(allocator->free_list_head,
+ (struct list_head *)mnode_obj);
+ } else {
+ /* got 'em */
+ break;
+ }
+ } /* while */
+ if (node_prev != NULL) {
+ /* combine with previous block */
+ lst_remove_elem(allocator->free_list_head,
+ (struct list_head *)node_prev);
+ /* grow node to hold both */
+ pnode->ul_size += node_prev->ul_size;
+ pnode->dw_pa = node_prev->dw_pa;
+ pnode->dw_va = node_prev->dw_va;
+ /* place node on mgr nodeFreeList */
+ delete_node((struct cmm_object *)allocator->hcmm_mgr,
+ node_prev);
+ }
+ if (node_next != NULL) {
+ /* combine with next block */
+ lst_remove_elem(allocator->free_list_head,
+ (struct list_head *)node_next);
+ /* grow da node */
+ pnode->ul_size += node_next->ul_size;
+ /* place node on mgr nodeFreeList */
+ delete_node((struct cmm_object *)allocator->hcmm_mgr,
+ node_next);
+ }
+ /* Now, let's add to freelist in increasing size order */
+ mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
+ while (mnode_obj) {
+ if (pnode->ul_size <= mnode_obj->ul_size)
+ break;
+
+ /* next node. */
+ mnode_obj =
+ (struct cmm_mnode *)lst_next(allocator->free_list_head,
+ (struct list_head *)mnode_obj);
+ }
+ /* if mnode_obj is NULL then add our pnode to the end of the freelist */
+ if (mnode_obj == NULL) {
+ lst_put_tail(allocator->free_list_head,
+ (struct list_head *)pnode);
+ } else {
+ /* insert our node before the current traversed node */
+ lst_insert_before(allocator->free_list_head,
+ (struct list_head *)pnode,
+ (struct list_head *)mnode_obj);
+ }
+}
+
+/*
+ * ======== get_allocator ========
+ * Purpose:
+ * Return the allocator for the given SM Segid.
+ * SegIds: 1,2,3..max.
+ */
+static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
+ u32 ul_seg_id)
+{
+ struct cmm_allocator *allocator = NULL;
+
+ DBC_REQUIRE(cmm_mgr_obj != NULL);
+ DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
+ allocator = cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
+ if (allocator != NULL) {
+ /* make sure it's for real */
+ if (!allocator) {
+ allocator = NULL;
+ DBC_ASSERT(false);
+ }
+ }
+ return allocator;
+}
+
+/*
+ * The CMM_Xlator[xxx] routines below are used by Node and Stream
+ * to perform SM address translation to the client process address space.
+ * A "translator" object is created by a node/stream for each SM seg used.
+ */
+
+/*
+ * ======== cmm_xlator_create ========
+ * Purpose:
+ * Create an address translator object.
+ */
+int cmm_xlator_create(struct cmm_xlatorobject **xlator,
+ struct cmm_object *hcmm_mgr,
+ struct cmm_xlatorattrs *xlator_attrs)
+{
+ struct cmm_xlator *xlator_object = NULL;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(xlator != NULL);
+ DBC_REQUIRE(hcmm_mgr != NULL);
+
+ *xlator = NULL;
+ if (xlator_attrs == NULL)
+ xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */
+
+ xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
+ if (xlator_object != NULL) {
+ xlator_object->hcmm_mgr = hcmm_mgr; /* ref back to CMM */
+ /* SM seg_id */
+ xlator_object->ul_seg_id = xlator_attrs->ul_seg_id;
+ } else {
+ status = -ENOMEM;
+ }
+ if (!status)
+ *xlator = (struct cmm_xlatorobject *)xlator_object;
+
+ return status;
+}
+
+/*
+ * ======== cmm_xlator_delete ========
+ * Purpose:
+ * Free the Xlator resources.
+ * VM gets freed later.
+ */
+int cmm_xlator_delete(struct cmm_xlatorobject *xlator, bool force)
+{
+ struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
+
+ DBC_REQUIRE(refs > 0);
+
+ kfree(xlator_obj);
+
+ return 0;
+}
+
+/*
+ * ======== cmm_xlator_alloc_buf ========
+ */
+void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
+ u32 pa_size)
+{
+ struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
+ void *pbuf = NULL;
+ void *tmp_va_buff;
+ struct cmm_attrs attrs;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(xlator != NULL);
+ DBC_REQUIRE(xlator_obj->hcmm_mgr != NULL);
+ DBC_REQUIRE(va_buf != NULL);
+ DBC_REQUIRE(pa_size > 0);
+ DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
+
+ if (xlator_obj) {
+ attrs.ul_seg_id = xlator_obj->ul_seg_id;
+ __raw_writel(0, va_buf);
+ /* Alloc SM */
+ pbuf =
+ cmm_calloc_buf(xlator_obj->hcmm_mgr, pa_size, &attrs, NULL);
+ if (pbuf) {
+ /* convert to translator(node/strm) process Virtual
+ * address */
+ tmp_va_buff = cmm_xlator_translate(xlator,
+ pbuf, CMM_PA2VA);
+ __raw_writel((u32)tmp_va_buff, va_buf);
+ }
+ }
+ return pbuf;
+}
+
+/*
+ * ======== cmm_xlator_free_buf ========
+ * Purpose:
+ * Free the given SM buffer and descriptor.
+ * Does not free virtual memory.
+ */
+int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
+{
+ struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
+ int status = -EPERM;
+ void *buf_pa = NULL;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(buf_va != NULL);
+ DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
+
+ if (xlator_obj) {
+ /* convert Va to Pa so we can free it. */
+ buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
+ if (buf_pa) {
+ status = cmm_free_buf(xlator_obj->hcmm_mgr, buf_pa,
+ xlator_obj->ul_seg_id);
+ if (status) {
+ /* Uh oh, this shouldn't happen. Descriptor
+ * gone! */
+ DBC_ASSERT(false); /* CMM is leaking mem */
+ }
+ }
+ }
+ return status;
+}
+
+/*
+ * ======== cmm_xlator_info ========
+ * Purpose:
+ * Set/Get translator info.
+ */
+int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
+ u32 ul_size, u32 segm_id, bool set_info)
+{
+ struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(paddr != NULL);
+ DBC_REQUIRE((segm_id > 0) && (segm_id <= CMM_MAXGPPSEGS));
+
+ if (xlator_obj) {
+ if (set_info) {
+ /* set translators virtual address range */
+ xlator_obj->dw_virt_base = (u32) *paddr;
+ xlator_obj->ul_virt_size = ul_size;
+ } else { /* return virt base address */
+ *paddr = (u8 *) xlator_obj->dw_virt_base;
+ }
+ } else {
+ status = -EFAULT;
+ }
+ return status;
+}
+
+/*
+ * ======== cmm_xlator_translate ========
+ */
+void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
+ enum cmm_xlatetype xtype)
+{
+ u32 dw_addr_xlate = 0;
+ struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
+ struct cmm_object *cmm_mgr_obj = NULL;
+ struct cmm_allocator *allocator = NULL;
+ u32 dw_offset = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(paddr != NULL);
+ DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
+
+ if (!xlator_obj)
+ goto loop_cont;
+
+ cmm_mgr_obj = (struct cmm_object *)xlator_obj->hcmm_mgr;
+ /* get this translator's default SM allocator */
+ DBC_ASSERT(xlator_obj->ul_seg_id > 0);
+ allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->ul_seg_id - 1];
+ if (!allocator)
+ goto loop_cont;
+
+ if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
+ (xtype == CMM_PA2VA)) {
+ if (xtype == CMM_PA2VA) {
+ /* Gpp Va = Va Base + offset */
+ dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
+ allocator->
+ ul_dsp_size);
+ dw_addr_xlate = xlator_obj->dw_virt_base + dw_offset;
+ /* Check if translated Va base is in range */
+ if ((dw_addr_xlate < xlator_obj->dw_virt_base) ||
+ (dw_addr_xlate >=
+ (xlator_obj->dw_virt_base +
+ xlator_obj->ul_virt_size))) {
+ dw_addr_xlate = 0; /* bad address */
+ }
+ } else {
+ /* Gpp PA = Gpp Base + offset */
+ dw_offset =
+ (u8 *) paddr - (u8 *) xlator_obj->dw_virt_base;
+ dw_addr_xlate =
+ allocator->shm_base - allocator->ul_dsp_size +
+ dw_offset;
+ }
+ } else {
+ dw_addr_xlate = (u32) paddr;
+ }
+ /*Now convert address to proper target physical address if needed */
+ if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
+ /* Got Gpp Pa now, convert to DSP Pa */
+ dw_addr_xlate =
+ GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size),
+ dw_addr_xlate,
+ allocator->dw_dsp_phys_addr_offset *
+ allocator->c_factor);
+ } else if (xtype == CMM_DSPPA2PA) {
+ /* Got DSP Pa, convert to GPP Pa */
+ dw_addr_xlate =
+ DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size,
+ dw_addr_xlate,
+ allocator->dw_dsp_phys_addr_offset *
+ allocator->c_factor);
+ }
+loop_cont:
+ return (void *)dw_addr_xlate;
+}
diff --git a/drivers/staging/tidspbridge/pmgr/cod.c b/drivers/staging/tidspbridge/pmgr/cod.c
new file mode 100644
index 00000000000..52989ab67cf
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/cod.c
@@ -0,0 +1,652 @@
+/*
+ * cod.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * This module implements DSP code management for the DSP/BIOS Bridge
+ * environment. It is mostly a thin wrapper.
+ *
+ * This module provides an interface for loading both static and
+ * dynamic code objects onto DSP systems.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/ldr.h>
+
+/* ----------------------------------- Platform Manager */
+/* Include appropriate loader header file */
+#include <dspbridge/dbll.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/cod.h>
+
+/*
+ * ======== cod_manager ========
+ */
+struct cod_manager {
+ struct dbll_tar_obj *target;
+ struct dbll_library_obj *base_lib;
+ bool loaded; /* Base library loaded? */
+ u32 ul_entry;
+ struct ldr_module *dll_obj;
+ struct dbll_fxns fxns;
+ struct dbll_attrs attrs;
+ char sz_zl_file[COD_MAXPATHLENGTH];
+};
+
+/*
+ * ======== cod_libraryobj ========
+ */
+struct cod_libraryobj {
+ struct dbll_library_obj *dbll_lib;
+ struct cod_manager *cod_mgr;
+};
+
+static u32 refs = 0L;
+
+static struct dbll_fxns ldr_fxns = {
+ (dbll_close_fxn) dbll_close,
+ (dbll_create_fxn) dbll_create,
+ (dbll_delete_fxn) dbll_delete,
+ (dbll_exit_fxn) dbll_exit,
+ (dbll_get_attrs_fxn) dbll_get_attrs,
+ (dbll_get_addr_fxn) dbll_get_addr,
+ (dbll_get_c_addr_fxn) dbll_get_c_addr,
+ (dbll_get_sect_fxn) dbll_get_sect,
+ (dbll_init_fxn) dbll_init,
+ (dbll_load_fxn) dbll_load,
+ (dbll_load_sect_fxn) dbll_load_sect,
+ (dbll_open_fxn) dbll_open,
+ (dbll_read_sect_fxn) dbll_read_sect,
+ (dbll_set_attrs_fxn) dbll_set_attrs,
+ (dbll_unload_fxn) dbll_unload,
+ (dbll_unload_sect_fxn) dbll_unload_sect,
+};
+
+static bool no_op(void);
+
+/*
+ * File operations (originally were under kfile.c)
+ */
+static s32 cod_f_close(struct file *filp)
+{
+ /* Check for valid handle */
+ if (!filp)
+ return -EFAULT;
+
+ filp_close(filp, NULL);
+
+ /* we can't use 0 here */
+ return 0;
+}
+
+static struct file *cod_f_open(const char *psz_file_name, const char *sz_mode)
+{
+ mm_segment_t fs;
+ struct file *filp;
+
+ fs = get_fs();
+ set_fs(get_ds());
+
+ /* ignore given mode and open file as read-only */
+ filp = filp_open(psz_file_name, O_RDONLY, 0);
+
+ if (IS_ERR(filp))
+ filp = NULL;
+
+ set_fs(fs);
+
+ return filp;
+}
+
+static s32 cod_f_read(void __user *pbuffer, s32 size, s32 count,
+ struct file *filp)
+{
+ /* check for valid file handle */
+ if (!filp)
+ return -EFAULT;
+
+ if ((size > 0) && (count > 0) && pbuffer) {
+ u32 dw_bytes_read;
+ mm_segment_t fs;
+
+ /* read from file */
+ fs = get_fs();
+ set_fs(get_ds());
+ dw_bytes_read = filp->f_op->read(filp, pbuffer, size * count,
+ &(filp->f_pos));
+ set_fs(fs);
+
+ if (!dw_bytes_read)
+ return -EBADF;
+
+ return dw_bytes_read / size;
+ }
+
+ return -EINVAL;
+}
+
+static s32 cod_f_seek(struct file *filp, s32 offset, s32 origin)
+{
+ loff_t dw_cur_pos;
+
+ /* check for valid file handle */
+ if (!filp)
+ return -EFAULT;
+
+ /* based on the origin flag, move the internal pointer */
+ dw_cur_pos = filp->f_op->llseek(filp, offset, origin);
+
+ if ((s32) dw_cur_pos < 0)
+ return -EPERM;
+
+ /* we can't use 0 here */
+ return 0;
+}
+
+static s32 cod_f_tell(struct file *filp)
+{
+ loff_t dw_cur_pos;
+
+ if (!filp)
+ return -EFAULT;
+
+ /* Get current position */
+ dw_cur_pos = filp->f_op->llseek(filp, 0, SEEK_CUR);
+
+ if ((s32) dw_cur_pos < 0)
+ return -EPERM;
+
+ return dw_cur_pos;
+}
+
+/*
+ * ======== cod_close ========
+ */
+void cod_close(struct cod_libraryobj *lib)
+{
+ struct cod_manager *hmgr;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(lib != NULL);
+ DBC_REQUIRE(lib->cod_mgr);
+
+ hmgr = lib->cod_mgr;
+ hmgr->fxns.close_fxn(lib->dbll_lib);
+
+ kfree(lib);
+}
+
+/*
+ * ======== cod_create ========
+ * Purpose:
+ * Create an object to manage code on a DSP system.
+ * This object can be used to load an initial program image with
+ * arguments that can later be expanded with
+ * dynamically loaded object files.
+ *
+ */
+int cod_create(struct cod_manager **mgr, char *str_zl_file,
+ const struct cod_attrs *attrs)
+{
+ struct cod_manager *mgr_new;
+ struct dbll_attrs zl_attrs;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(mgr != NULL);
+
+ /* assume failure */
+ *mgr = NULL;
+
+ /* we don't support non-default attrs yet */
+ if (attrs != NULL)
+ return -ENOSYS;
+
+ mgr_new = kzalloc(sizeof(struct cod_manager), GFP_KERNEL);
+ if (mgr_new == NULL)
+ return -ENOMEM;
+
+ /* Set up loader functions */
+ mgr_new->fxns = ldr_fxns;
+
+ /* initialize the ZL module */
+ mgr_new->fxns.init_fxn();
+
+ zl_attrs.alloc = (dbll_alloc_fxn) no_op;
+ zl_attrs.free = (dbll_free_fxn) no_op;
+ zl_attrs.fread = (dbll_read_fxn) cod_f_read;
+ zl_attrs.fseek = (dbll_seek_fxn) cod_f_seek;
+ zl_attrs.ftell = (dbll_tell_fxn) cod_f_tell;
+ zl_attrs.fclose = (dbll_f_close_fxn) cod_f_close;
+ zl_attrs.fopen = (dbll_f_open_fxn) cod_f_open;
+ zl_attrs.sym_lookup = NULL;
+ zl_attrs.base_image = true;
+ zl_attrs.log_write = NULL;
+ zl_attrs.log_write_handle = NULL;
+ zl_attrs.write = NULL;
+ zl_attrs.rmm_handle = NULL;
+ zl_attrs.input_params = NULL;
+ zl_attrs.sym_handle = NULL;
+ zl_attrs.sym_arg = NULL;
+
+ mgr_new->attrs = zl_attrs;
+
+ status = mgr_new->fxns.create_fxn(&mgr_new->target, &zl_attrs);
+
+ if (status) {
+ cod_delete(mgr_new);
+ return -ESPIPE;
+ }
+
+ /* return the new manager */
+ *mgr = mgr_new;
+
+ return 0;
+}
+
+/*
+ * ======== cod_delete ========
+ * Purpose:
+ * Delete a code manager object.
+ */
+void cod_delete(struct cod_manager *cod_mgr_obj)
+{
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(cod_mgr_obj);
+
+ if (cod_mgr_obj->base_lib) {
+ if (cod_mgr_obj->loaded)
+ cod_mgr_obj->fxns.unload_fxn(cod_mgr_obj->base_lib,
+ &cod_mgr_obj->attrs);
+
+ cod_mgr_obj->fxns.close_fxn(cod_mgr_obj->base_lib);
+ }
+ if (cod_mgr_obj->target) {
+ cod_mgr_obj->fxns.delete_fxn(cod_mgr_obj->target);
+ cod_mgr_obj->fxns.exit_fxn();
+ }
+ kfree(cod_mgr_obj);
+}
+
+/*
+ * ======== cod_exit ========
+ * Purpose:
+ * Discontinue usage of the COD module.
+ *
+ */
+void cod_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== cod_get_base_lib ========
+ * Purpose:
+ * Get handle to the base image DBL library.
+ */
+int cod_get_base_lib(struct cod_manager *cod_mgr_obj,
+ struct dbll_library_obj **plib)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(cod_mgr_obj);
+ DBC_REQUIRE(plib != NULL);
+
+ *plib = (struct dbll_library_obj *)cod_mgr_obj->base_lib;
+
+ return status;
+}
+
+/*
+ * ======== cod_get_base_name ========
+ */
+int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *sz_name,
+ u32 usize)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(cod_mgr_obj);
+ DBC_REQUIRE(sz_name != NULL);
+
+ if (usize <= COD_MAXPATHLENGTH)
+ strncpy(sz_name, cod_mgr_obj->sz_zl_file, usize);
+ else
+ status = -EPERM;
+
+ return status;
+}
+
+/*
+ * ======== cod_get_entry ========
+ * Purpose:
+ * Retrieve the entry point of a loaded DSP program image
+ *
+ */
+int cod_get_entry(struct cod_manager *cod_mgr_obj, u32 *entry_pt)
+{
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(cod_mgr_obj);
+ DBC_REQUIRE(entry_pt != NULL);
+
+ *entry_pt = cod_mgr_obj->ul_entry;
+
+ return 0;
+}
+
+/*
+ * ======== cod_get_loader ========
+ * Purpose:
+ * Get handle to the DBLL loader.
+ */
+int cod_get_loader(struct cod_manager *cod_mgr_obj,
+ struct dbll_tar_obj **loader)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(cod_mgr_obj);
+ DBC_REQUIRE(loader != NULL);
+
+ *loader = (struct dbll_tar_obj *)cod_mgr_obj->target;
+
+ return status;
+}
+
+/*
+ * ======== cod_get_section ========
+ * Purpose:
+ * Retrieve the starting address and length of a section in the COFF file
+ * given the section name.
+ */
+int cod_get_section(struct cod_libraryobj *lib, char *str_sect,
+ u32 *addr, u32 *len)
+{
+ struct cod_manager *cod_mgr_obj;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(lib != NULL);
+ DBC_REQUIRE(lib->cod_mgr);
+ DBC_REQUIRE(str_sect != NULL);
+ DBC_REQUIRE(addr != NULL);
+ DBC_REQUIRE(len != NULL);
+
+ *addr = 0;
+ *len = 0;
+ if (lib != NULL) {
+ cod_mgr_obj = lib->cod_mgr;
+ status = cod_mgr_obj->fxns.get_sect_fxn(lib->dbll_lib, str_sect,
+ addr, len);
+ } else {
+ status = -ESPIPE;
+ }
+
+ DBC_ENSURE(!status || ((*addr == 0) && (*len == 0)));
+
+ return status;
+}
+
+/*
+ * ======== cod_get_sym_value ========
+ * Purpose:
+ * Retrieve the value for the specified symbol. The symbol is first
+ * searched for literally and then, if not found, searched for as a
+ * C symbol.
+ *
+ */
+int cod_get_sym_value(struct cod_manager *cod_mgr_obj, char *str_sym,
+ u32 *pul_value)
+{
+ struct dbll_sym_val *dbll_sym;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(cod_mgr_obj);
+ DBC_REQUIRE(str_sym != NULL);
+ DBC_REQUIRE(pul_value != NULL);
+
+ dev_dbg(bridge, "%s: cod_mgr_obj: %p str_sym: %s pul_value: %p\n",
+ __func__, cod_mgr_obj, str_sym, pul_value);
+ if (cod_mgr_obj->base_lib) {
+ if (!cod_mgr_obj->fxns.
+ get_addr_fxn(cod_mgr_obj->base_lib, str_sym, &dbll_sym)) {
+ if (!cod_mgr_obj->fxns.
+ get_c_addr_fxn(cod_mgr_obj->base_lib, str_sym,
+ &dbll_sym))
+ return -ESPIPE;
+ }
+ } else {
+ return -ESPIPE;
+ }
+
+ *pul_value = dbll_sym->value;
+
+ return 0;
+}
+
+/*
+ * ======== cod_init ========
+ * Purpose:
+ * Initialize the COD module's private state.
+ *
+ */
+bool cod_init(void)
+{
+ bool ret = true;
+
+ DBC_REQUIRE(refs >= 0);
+
+ if (ret)
+ refs++;
+
+ DBC_ENSURE((ret && refs > 0) || (!ret && refs >= 0));
+ return ret;
+}
+
+/*
+ * ======== cod_load_base ========
+ * Purpose:
+ * Load the initial program image, optionally with command-line arguments,
+ * on the DSP system managed by the supplied handle. The program to be
+ * loaded must be the first element of the args array and must be a fully
+ * qualified pathname.
+ * Details:
+ * if num_argc doesn't match the number of arguments in the args array, the
+ * args array is searched for a NULL terminating entry, and argc is
+ * recalculated to reflect this. In this way, we can support NULL
+ * terminating args arrays, if num_argc is very large.
+ */
+int cod_load_base(struct cod_manager *cod_mgr_obj, u32 num_argc, char *args[],
+ cod_writefxn pfn_write, void *arb, char *envp[])
+{
+ dbll_flags flags;
+ struct dbll_attrs save_attrs;
+ struct dbll_attrs new_attrs;
+ int status;
+ u32 i;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(cod_mgr_obj);
+ DBC_REQUIRE(num_argc > 0);
+ DBC_REQUIRE(args != NULL);
+ DBC_REQUIRE(args[0] != NULL);
+ DBC_REQUIRE(pfn_write != NULL);
+ DBC_REQUIRE(cod_mgr_obj->base_lib != NULL);
+
+ /*
+ * Make sure every argv[] stated in argc has a value, or change argc to
+ * reflect true number in NULL terminated argv array.
+ */
+ for (i = 0; i < num_argc; i++) {
+ if (args[i] == NULL) {
+ num_argc = i;
+ break;
+ }
+ }
+
+ /* set the write function for this operation */
+ cod_mgr_obj->fxns.get_attrs_fxn(cod_mgr_obj->target, &save_attrs);
+
+ new_attrs = save_attrs;
+ new_attrs.write = (dbll_write_fxn) pfn_write;
+ new_attrs.input_params = arb;
+ new_attrs.alloc = (dbll_alloc_fxn) no_op;
+ new_attrs.free = (dbll_free_fxn) no_op;
+ new_attrs.log_write = NULL;
+ new_attrs.log_write_handle = NULL;
+
+ /* Load the image */
+ flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB;
+ status = cod_mgr_obj->fxns.load_fxn(cod_mgr_obj->base_lib, flags,
+ &new_attrs,
+ &cod_mgr_obj->ul_entry);
+ if (status)
+ cod_mgr_obj->fxns.close_fxn(cod_mgr_obj->base_lib);
+
+ if (!status)
+ cod_mgr_obj->loaded = true;
+ else
+ cod_mgr_obj->base_lib = NULL;
+
+ return status;
+}
+
+/*
+ * ======== cod_open ========
+ * Open library for reading sections.
+ */
+int cod_open(struct cod_manager *hmgr, char *sz_coff_path,
+ u32 flags, struct cod_libraryobj **lib_obj)
+{
+ int status = 0;
+ struct cod_libraryobj *lib = NULL;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hmgr);
+ DBC_REQUIRE(sz_coff_path != NULL);
+ DBC_REQUIRE(flags == COD_NOLOAD || flags == COD_SYMB);
+ DBC_REQUIRE(lib_obj != NULL);
+
+ *lib_obj = NULL;
+
+ lib = kzalloc(sizeof(struct cod_libraryobj), GFP_KERNEL);
+ if (lib == NULL)
+ status = -ENOMEM;
+
+ if (!status) {
+ lib->cod_mgr = hmgr;
+ status = hmgr->fxns.open_fxn(hmgr->target, sz_coff_path, flags,
+ &lib->dbll_lib);
+ if (!status)
+ *lib_obj = lib;
+ }
+
+ if (status)
+ pr_err("%s: error status 0x%x, sz_coff_path: %s flags: 0x%x\n",
+ __func__, status, sz_coff_path, flags);
+ return status;
+}
+
+/*
+ * ======== cod_open_base ========
+ * Purpose:
+ * Open base image for reading sections.
+ */
+int cod_open_base(struct cod_manager *hmgr, char *sz_coff_path,
+ dbll_flags flags)
+{
+ int status = 0;
+ struct dbll_library_obj *lib;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hmgr);
+ DBC_REQUIRE(sz_coff_path != NULL);
+
+ /* if we previously opened a base image, close it now */
+ if (hmgr->base_lib) {
+ if (hmgr->loaded) {
+ hmgr->fxns.unload_fxn(hmgr->base_lib, &hmgr->attrs);
+ hmgr->loaded = false;
+ }
+ hmgr->fxns.close_fxn(hmgr->base_lib);
+ hmgr->base_lib = NULL;
+ }
+ status = hmgr->fxns.open_fxn(hmgr->target, sz_coff_path, flags, &lib);
+ if (!status) {
+ /* hang onto the library for subsequent sym table usage */
+ hmgr->base_lib = lib;
+ strncpy(hmgr->sz_zl_file, sz_coff_path, COD_MAXPATHLENGTH - 1);
+ hmgr->sz_zl_file[COD_MAXPATHLENGTH - 1] = '\0';
+ }
+
+ if (status)
+ pr_err("%s: error status 0x%x sz_coff_path: %s\n", __func__,
+ status, sz_coff_path);
+ return status;
+}
+
+/*
+ * ======== cod_read_section ========
+ * Purpose:
+ * Retrieve the content of a code section given the section name.
+ */
+int cod_read_section(struct cod_libraryobj *lib, char *str_sect,
+ char *str_content, u32 content_size)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(lib != NULL);
+ DBC_REQUIRE(lib->cod_mgr);
+ DBC_REQUIRE(str_sect != NULL);
+ DBC_REQUIRE(str_content != NULL);
+
+ if (lib != NULL)
+ status =
+ lib->cod_mgr->fxns.read_sect_fxn(lib->dbll_lib, str_sect,
+ str_content, content_size);
+ else
+ status = -ESPIPE;
+
+ return status;
+}
+
+/*
+ * ======== no_op ========
+ * Purpose:
+ * No Operation.
+ *
+ */
+static bool no_op(void)
+{
+ return true;
+}
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
new file mode 100644
index 00000000000..23406386f61
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -0,0 +1,1585 @@
+/*
+ * dbll.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+#include <dspbridge/gh.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+
+/* Dynamic loader library interface */
+#include <dspbridge/dynamic_loader.h>
+#include <dspbridge/getsection.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/dbll.h>
+#include <dspbridge/rmm.h>
+
+/* Number of buckets for symbol hash table */
+#define MAXBUCKETS 211
+
+/* Max buffer length */
+#define MAXEXPR 128
+
+#define DOFF_ALIGN(x) (((x) + 3) & ~3UL)
+
+/*
+ * ======== struct dbll_tar_obj* ========
+ * A target may have one or more libraries of symbols/code/data loaded
+ * onto it, where a library is simply the symbols/code/data contained
+ * in a DOFF file.
+ */
+/*
+ * ======== dbll_tar_obj ========
+ */
+struct dbll_tar_obj {
+ struct dbll_attrs attrs;
+ struct dbll_library_obj *head; /* List of all opened libraries */
+};
+
+/*
+ * The following 4 typedefs are "super classes" of the dynamic loader
+ * library types used in dynamic loader functions (dynamic_loader.h).
+ */
+/*
+ * ======== dbll_stream ========
+ * Contains dynamic_loader_stream
+ */
+struct dbll_stream {
+ struct dynamic_loader_stream dl_stream;
+ struct dbll_library_obj *lib;
+};
+
+/*
+ * ======== ldr_symbol ========
+ */
+struct ldr_symbol {
+ struct dynamic_loader_sym dl_symbol;
+ struct dbll_library_obj *lib;
+};
+
+/*
+ * ======== dbll_alloc ========
+ */
+struct dbll_alloc {
+ struct dynamic_loader_allocate dl_alloc;
+ struct dbll_library_obj *lib;
+};
+
+/*
+ * ======== dbll_init_obj ========
+ */
+struct dbll_init_obj {
+ struct dynamic_loader_initialize dl_init;
+ struct dbll_library_obj *lib;
+};
+
+/*
+ * ======== DBLL_Library ========
+ * A library handle is returned by DBLL_Open() and is passed to dbll_load()
+ * to load symbols/code/data, and to dbll_unload(), to remove the
+ * symbols/code/data loaded by dbll_load().
+ */
+
+/*
+ * ======== dbll_library_obj ========
+ */
+struct dbll_library_obj {
+ struct dbll_library_obj *next; /* Next library in target's list */
+ struct dbll_library_obj *prev; /* Previous in the list */
+ struct dbll_tar_obj *target_obj; /* target for this library */
+
+ /* Objects needed by dynamic loader */
+ struct dbll_stream stream;
+ struct ldr_symbol symbol;
+ struct dbll_alloc allocate;
+ struct dbll_init_obj init;
+ void *dload_mod_obj;
+
+ char *file_name; /* COFF file name */
+ void *fp; /* Opaque file handle */
+ u32 entry; /* Entry point */
+ void *desc; /* desc of DOFF file loaded */
+ u32 open_ref; /* Number of times opened */
+ u32 load_ref; /* Number of times loaded */
+ struct gh_t_hash_tab *sym_tab; /* Hash table of symbols */
+ u32 ul_pos;
+};
+
+/*
+ * ======== dbll_symbol ========
+ */
+struct dbll_symbol {
+ struct dbll_sym_val value;
+ char *name;
+};
+
+static void dof_close(struct dbll_library_obj *zl_lib);
+static int dof_open(struct dbll_library_obj *zl_lib);
+static s32 no_op(struct dynamic_loader_initialize *thisptr, void *bufr,
+ ldr_addr locn, struct ldr_section_info *info,
+ unsigned bytsize);
+
+/*
+ * Functions called by dynamic loader
+ *
+ */
+/* dynamic_loader_stream */
+static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer,
+ unsigned bufsize);
+static int dbll_set_file_posn(struct dynamic_loader_stream *this,
+ unsigned int pos);
+/* dynamic_loader_sym */
+static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
+ const char *name);
+static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym
+ *this, const char *name,
+ unsigned module_id);
+static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
+ *this, const char *name,
+ unsigned moduleid);
+static void dbll_purge_symbol_table(struct dynamic_loader_sym *this,
+ unsigned module_id);
+static void *allocate(struct dynamic_loader_sym *this, unsigned memsize);
+static void deallocate(struct dynamic_loader_sym *this, void *mem_ptr);
+static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr,
+ va_list args);
+/* dynamic_loader_allocate */
+static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
+ struct ldr_section_info *info, unsigned align);
+static void rmm_dealloc(struct dynamic_loader_allocate *this,
+ struct ldr_section_info *info);
+
+/* dynamic_loader_initialize */
+static int connect(struct dynamic_loader_initialize *this);
+static int read_mem(struct dynamic_loader_initialize *this, void *buf,
+ ldr_addr addr, struct ldr_section_info *info,
+ unsigned bytes);
+static int write_mem(struct dynamic_loader_initialize *this, void *buf,
+ ldr_addr addr, struct ldr_section_info *info,
+ unsigned nbytes);
+static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr,
+ struct ldr_section_info *info, unsigned bytes,
+ unsigned val);
+static int execute(struct dynamic_loader_initialize *this, ldr_addr start);
+static void release(struct dynamic_loader_initialize *this);
+
+/* symbol table hash functions */
+static u16 name_hash(void *key, u16 max_bucket);
+static bool name_match(void *key, void *sp);
+static void sym_delete(void *value);
+
+static u32 refs; /* module reference count */
+
+/* Symbol Redefinition */
+static int redefined_symbol;
+static int gbl_search = 1;
+
+/*
+ * ======== dbll_close ========
+ */
+void dbll_close(struct dbll_library_obj *zl_lib)
+{
+ struct dbll_tar_obj *zl_target;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(zl_lib);
+ DBC_REQUIRE(zl_lib->open_ref > 0);
+ zl_target = zl_lib->target_obj;
+ zl_lib->open_ref--;
+ if (zl_lib->open_ref == 0) {
+ /* Remove library from list */
+ if (zl_target->head == zl_lib)
+ zl_target->head = zl_lib->next;
+
+ if (zl_lib->prev)
+ (zl_lib->prev)->next = zl_lib->next;
+
+ if (zl_lib->next)
+ (zl_lib->next)->prev = zl_lib->prev;
+
+ /* Free DOF resources */
+ dof_close(zl_lib);
+ kfree(zl_lib->file_name);
+
+ /* remove symbols from symbol table */
+ if (zl_lib->sym_tab)
+ gh_delete(zl_lib->sym_tab);
+
+ /* remove the library object itself */
+ kfree(zl_lib);
+ zl_lib = NULL;
+ }
+}
+
+/*
+ * ======== dbll_create ========
+ */
+int dbll_create(struct dbll_tar_obj **target_obj,
+ struct dbll_attrs *pattrs)
+{
+ struct dbll_tar_obj *pzl_target;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(pattrs != NULL);
+ DBC_REQUIRE(target_obj != NULL);
+
+ /* Allocate DBL target object */
+ pzl_target = kzalloc(sizeof(struct dbll_tar_obj), GFP_KERNEL);
+ if (target_obj != NULL) {
+ if (pzl_target == NULL) {
+ *target_obj = NULL;
+ status = -ENOMEM;
+ } else {
+ pzl_target->attrs = *pattrs;
+ *target_obj = (struct dbll_tar_obj *)pzl_target;
+ }
+ DBC_ENSURE((!status && *target_obj) ||
+ (status && *target_obj == NULL));
+ }
+
+ return status;
+}
+
+/*
+ * ======== dbll_delete ========
+ */
+void dbll_delete(struct dbll_tar_obj *target)
+{
+ struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(zl_target);
+
+ if (zl_target != NULL)
+ kfree(zl_target);
+
+}
+
+/*
+ * ======== dbll_exit ========
+ * Discontinue usage of DBL module.
+ */
+void dbll_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+
+ if (refs == 0)
+ gh_exit();
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== dbll_get_addr ========
+ * Get address of name in the specified library.
+ */
+bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name,
+ struct dbll_sym_val **sym_val)
+{
+ struct dbll_symbol *sym;
+ bool status = false;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(zl_lib);
+ DBC_REQUIRE(name != NULL);
+ DBC_REQUIRE(sym_val != NULL);
+ DBC_REQUIRE(zl_lib->sym_tab != NULL);
+
+ sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, name);
+ if (sym != NULL) {
+ *sym_val = &sym->value;
+ status = true;
+ }
+
+ dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p, status 0x%x\n",
+ __func__, zl_lib, name, sym_val, status);
+ return status;
+}
+
+/*
+ * ======== dbll_get_attrs ========
+ * Retrieve the attributes of the target.
+ */
+void dbll_get_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs)
+{
+ struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(zl_target);
+ DBC_REQUIRE(pattrs != NULL);
+
+ if ((pattrs != NULL) && (zl_target != NULL))
+ *pattrs = zl_target->attrs;
+
+}
+
+/*
+ * ======== dbll_get_c_addr ========
+ * Get address of a "C" name in the specified library.
+ */
+bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name,
+ struct dbll_sym_val **sym_val)
+{
+ struct dbll_symbol *sym;
+ char cname[MAXEXPR + 1];
+ bool status = false;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(zl_lib);
+ DBC_REQUIRE(sym_val != NULL);
+ DBC_REQUIRE(zl_lib->sym_tab != NULL);
+ DBC_REQUIRE(name != NULL);
+
+ cname[0] = '_';
+
+ strncpy(cname + 1, name, sizeof(cname) - 2);
+ cname[MAXEXPR] = '\0'; /* insure '\0' string termination */
+
+ /* Check for C name, if not found */
+ sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, cname);
+
+ if (sym != NULL) {
+ *sym_val = &sym->value;
+ status = true;
+ }
+
+ return status;
+}
+
+/*
+ * ======== dbll_get_sect ========
+ * Get the base address and size (in bytes) of a COFF section.
+ */
+int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
+ u32 *psize)
+{
+ u32 byte_size;
+ bool opened_doff = false;
+ const struct ldr_section_info *sect = NULL;
+ struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(name != NULL);
+ DBC_REQUIRE(paddr != NULL);
+ DBC_REQUIRE(psize != NULL);
+ DBC_REQUIRE(zl_lib);
+
+ /* If DOFF file is not open, we open it. */
+ if (zl_lib != NULL) {
+ if (zl_lib->fp == NULL) {
+ status = dof_open(zl_lib);
+ if (!status)
+ opened_doff = true;
+
+ } else {
+ (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
+ zl_lib->ul_pos,
+ SEEK_SET);
+ }
+ } else {
+ status = -EFAULT;
+ }
+ if (!status) {
+ byte_size = 1;
+ if (dload_get_section_info(zl_lib->desc, name, &sect)) {
+ *paddr = sect->load_addr;
+ *psize = sect->size * byte_size;
+ /* Make sure size is even for good swap */
+ if (*psize % 2)
+ (*psize)++;
+
+ /* Align size */
+ *psize = DOFF_ALIGN(*psize);
+ } else {
+ status = -ENXIO;
+ }
+ }
+ if (opened_doff) {
+ dof_close(zl_lib);
+ opened_doff = false;
+ }
+
+ dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p psize: %p, "
+ "status 0x%x\n", __func__, lib, name, paddr, psize, status);
+
+ return status;
+}
+
+/*
+ * ======== dbll_init ========
+ */
+bool dbll_init(void)
+{
+ DBC_REQUIRE(refs >= 0);
+
+ if (refs == 0)
+ gh_init();
+
+ refs++;
+
+ return true;
+}
+
+/*
+ * ======== dbll_load ========
+ */
+int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
+ struct dbll_attrs *attrs, u32 *entry)
+{
+ struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
+ struct dbll_tar_obj *dbzl;
+ bool got_symbols = true;
+ s32 err;
+ int status = 0;
+ bool opened_doff = false;
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(zl_lib);
+ DBC_REQUIRE(entry != NULL);
+ DBC_REQUIRE(attrs != NULL);
+
+ /*
+ * Load if not already loaded.
+ */
+ if (zl_lib->load_ref == 0 || !(flags & DBLL_DYNAMIC)) {
+ dbzl = zl_lib->target_obj;
+ dbzl->attrs = *attrs;
+ /* Create a hash table for symbols if not already created */
+ if (zl_lib->sym_tab == NULL) {
+ got_symbols = false;
+ zl_lib->sym_tab = gh_create(MAXBUCKETS,
+ sizeof(struct dbll_symbol),
+ name_hash,
+ name_match, sym_delete);
+ if (zl_lib->sym_tab == NULL)
+ status = -ENOMEM;
+
+ }
+ /*
+ * Set up objects needed by the dynamic loader
+ */
+ /* Stream */
+ zl_lib->stream.dl_stream.read_buffer = dbll_read_buffer;
+ zl_lib->stream.dl_stream.set_file_posn = dbll_set_file_posn;
+ zl_lib->stream.lib = zl_lib;
+ /* Symbol */
+ zl_lib->symbol.dl_symbol.find_matching_symbol =
+ dbll_find_symbol;
+ if (got_symbols) {
+ zl_lib->symbol.dl_symbol.add_to_symbol_table =
+ find_in_symbol_table;
+ } else {
+ zl_lib->symbol.dl_symbol.add_to_symbol_table =
+ dbll_add_to_symbol_table;
+ }
+ zl_lib->symbol.dl_symbol.purge_symbol_table =
+ dbll_purge_symbol_table;
+ zl_lib->symbol.dl_symbol.dload_allocate = allocate;
+ zl_lib->symbol.dl_symbol.dload_deallocate = deallocate;
+ zl_lib->symbol.dl_symbol.error_report = dbll_err_report;
+ zl_lib->symbol.lib = zl_lib;
+ /* Allocate */
+ zl_lib->allocate.dl_alloc.dload_allocate = dbll_rmm_alloc;
+ zl_lib->allocate.dl_alloc.dload_deallocate = rmm_dealloc;
+ zl_lib->allocate.lib = zl_lib;
+ /* Init */
+ zl_lib->init.dl_init.connect = connect;
+ zl_lib->init.dl_init.readmem = read_mem;
+ zl_lib->init.dl_init.writemem = write_mem;
+ zl_lib->init.dl_init.fillmem = fill_mem;
+ zl_lib->init.dl_init.execute = execute;
+ zl_lib->init.dl_init.release = release;
+ zl_lib->init.lib = zl_lib;
+ /* If COFF file is not open, we open it. */
+ if (zl_lib->fp == NULL) {
+ status = dof_open(zl_lib);
+ if (!status)
+ opened_doff = true;
+
+ }
+ if (!status) {
+ zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell))
+ (zl_lib->fp);
+ /* Reset file cursor */
+ (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
+ (long)0,
+ SEEK_SET);
+ symbols_reloaded = true;
+ /* The 5th argument, DLOAD_INITBSS, tells the DLL
+ * module to zero-init all BSS sections. In general,
+ * this is not necessary and also increases load time.
+ * We may want to make this configurable by the user */
+ err = dynamic_load_module(&zl_lib->stream.dl_stream,
+ &zl_lib->symbol.dl_symbol,
+ &zl_lib->allocate.dl_alloc,
+ &zl_lib->init.dl_init,
+ DLOAD_INITBSS,
+ &zl_lib->dload_mod_obj);
+
+ if (err != 0) {
+ status = -EILSEQ;
+ } else if (redefined_symbol) {
+ zl_lib->load_ref++;
+ dbll_unload(zl_lib, (struct dbll_attrs *)attrs);
+ redefined_symbol = false;
+ status = -EILSEQ;
+ } else {
+ *entry = zl_lib->entry;
+ }
+ }
+ }
+ if (!status)
+ zl_lib->load_ref++;
+
+ /* Clean up DOFF resources */
+ if (opened_doff)
+ dof_close(zl_lib);
+
+ DBC_ENSURE(status || zl_lib->load_ref > 0);
+
+ dev_dbg(bridge, "%s: lib: %p flags: 0x%x entry: %p, status 0x%x\n",
+ __func__, lib, flags, entry, status);
+
+ return status;
+}
+
+/*
+ * ======== dbll_load_sect ========
+ * Not supported for COFF.
+ */
+int dbll_load_sect(struct dbll_library_obj *zl_lib, char *sec_name,
+ struct dbll_attrs *attrs)
+{
+ DBC_REQUIRE(zl_lib);
+
+ return -ENOSYS;
+}
+
+/*
+ * ======== dbll_open ========
+ */
+int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
+ struct dbll_library_obj **lib_obj)
+{
+ struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
+ struct dbll_library_obj *zl_lib = NULL;
+ s32 err;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(zl_target);
+ DBC_REQUIRE(zl_target->attrs.fopen != NULL);
+ DBC_REQUIRE(file != NULL);
+ DBC_REQUIRE(lib_obj != NULL);
+
+ zl_lib = zl_target->head;
+ while (zl_lib != NULL) {
+ if (strcmp(zl_lib->file_name, file) == 0) {
+ /* Library is already opened */
+ zl_lib->open_ref++;
+ break;
+ }
+ zl_lib = zl_lib->next;
+ }
+ if (zl_lib == NULL) {
+ /* Allocate DBL library object */
+ zl_lib = kzalloc(sizeof(struct dbll_library_obj), GFP_KERNEL);
+ if (zl_lib == NULL) {
+ status = -ENOMEM;
+ } else {
+ zl_lib->ul_pos = 0;
+ /* Increment ref count to allow close on failure
+ * later on */
+ zl_lib->open_ref++;
+ zl_lib->target_obj = zl_target;
+ /* Keep a copy of the file name */
+ zl_lib->file_name = kzalloc(strlen(file) + 1,
+ GFP_KERNEL);
+ if (zl_lib->file_name == NULL) {
+ status = -ENOMEM;
+ } else {
+ strncpy(zl_lib->file_name, file,
+ strlen(file) + 1);
+ }
+ zl_lib->sym_tab = NULL;
+ }
+ }
+ /*
+ * Set up objects needed by the dynamic loader
+ */
+ if (status)
+ goto func_cont;
+
+ /* Stream */
+ zl_lib->stream.dl_stream.read_buffer = dbll_read_buffer;
+ zl_lib->stream.dl_stream.set_file_posn = dbll_set_file_posn;
+ zl_lib->stream.lib = zl_lib;
+ /* Symbol */
+ zl_lib->symbol.dl_symbol.add_to_symbol_table = dbll_add_to_symbol_table;
+ zl_lib->symbol.dl_symbol.find_matching_symbol = dbll_find_symbol;
+ zl_lib->symbol.dl_symbol.purge_symbol_table = dbll_purge_symbol_table;
+ zl_lib->symbol.dl_symbol.dload_allocate = allocate;
+ zl_lib->symbol.dl_symbol.dload_deallocate = deallocate;
+ zl_lib->symbol.dl_symbol.error_report = dbll_err_report;
+ zl_lib->symbol.lib = zl_lib;
+ /* Allocate */
+ zl_lib->allocate.dl_alloc.dload_allocate = dbll_rmm_alloc;
+ zl_lib->allocate.dl_alloc.dload_deallocate = rmm_dealloc;
+ zl_lib->allocate.lib = zl_lib;
+ /* Init */
+ zl_lib->init.dl_init.connect = connect;
+ zl_lib->init.dl_init.readmem = read_mem;
+ zl_lib->init.dl_init.writemem = write_mem;
+ zl_lib->init.dl_init.fillmem = fill_mem;
+ zl_lib->init.dl_init.execute = execute;
+ zl_lib->init.dl_init.release = release;
+ zl_lib->init.lib = zl_lib;
+ if (!status && zl_lib->fp == NULL)
+ status = dof_open(zl_lib);
+
+ zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp);
+ (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, SEEK_SET);
+ /* Create a hash table for symbols if flag is set */
+ if (zl_lib->sym_tab != NULL || !(flags & DBLL_SYMB))
+ goto func_cont;
+
+ zl_lib->sym_tab =
+ gh_create(MAXBUCKETS, sizeof(struct dbll_symbol), name_hash,
+ name_match, sym_delete);
+ if (zl_lib->sym_tab == NULL) {
+ status = -ENOMEM;
+ } else {
+ /* Do a fake load to get symbols - set write func to no_op */
+ zl_lib->init.dl_init.writemem = no_op;
+ err = dynamic_open_module(&zl_lib->stream.dl_stream,
+ &zl_lib->symbol.dl_symbol,
+ &zl_lib->allocate.dl_alloc,
+ &zl_lib->init.dl_init, 0,
+ &zl_lib->dload_mod_obj);
+ if (err != 0) {
+ status = -EILSEQ;
+ } else {
+ /* Now that we have the symbol table, we can unload */
+ err = dynamic_unload_module(zl_lib->dload_mod_obj,
+ &zl_lib->symbol.dl_symbol,
+ &zl_lib->allocate.dl_alloc,
+ &zl_lib->init.dl_init);
+ if (err != 0)
+ status = -EILSEQ;
+
+ zl_lib->dload_mod_obj = NULL;
+ }
+ }
+func_cont:
+ if (!status) {
+ if (zl_lib->open_ref == 1) {
+ /* First time opened - insert in list */
+ if (zl_target->head)
+ (zl_target->head)->prev = zl_lib;
+
+ zl_lib->prev = NULL;
+ zl_lib->next = zl_target->head;
+ zl_target->head = zl_lib;
+ }
+ *lib_obj = (struct dbll_library_obj *)zl_lib;
+ } else {
+ *lib_obj = NULL;
+ if (zl_lib != NULL)
+ dbll_close((struct dbll_library_obj *)zl_lib);
+
+ }
+ DBC_ENSURE((!status && (zl_lib->open_ref > 0) && *lib_obj)
+ || (status && *lib_obj == NULL));
+
+ dev_dbg(bridge, "%s: target: %p file: %s lib_obj: %p, status 0x%x\n",
+ __func__, target, file, lib_obj, status);
+
+ return status;
+}
+
+/*
+ * ======== dbll_read_sect ========
+ * Get the content of a COFF section.
+ */
+int dbll_read_sect(struct dbll_library_obj *lib, char *name,
+ char *buf, u32 size)
+{
+ struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
+ bool opened_doff = false;
+ u32 byte_size; /* size of bytes */
+ u32 ul_sect_size; /* size of section */
+ const struct ldr_section_info *sect = NULL;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(zl_lib);
+ DBC_REQUIRE(name != NULL);
+ DBC_REQUIRE(buf != NULL);
+ DBC_REQUIRE(size != 0);
+
+ /* If DOFF file is not open, we open it. */
+ if (zl_lib != NULL) {
+ if (zl_lib->fp == NULL) {
+ status = dof_open(zl_lib);
+ if (!status)
+ opened_doff = true;
+
+ } else {
+ (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
+ zl_lib->ul_pos,
+ SEEK_SET);
+ }
+ } else {
+ status = -EFAULT;
+ }
+ if (status)
+ goto func_cont;
+
+ byte_size = 1;
+ if (!dload_get_section_info(zl_lib->desc, name, &sect)) {
+ status = -ENXIO;
+ goto func_cont;
+ }
+ /*
+ * Ensure the supplied buffer size is sufficient to store
+ * the section buf to be read.
+ */
+ ul_sect_size = sect->size * byte_size;
+ /* Make sure size is even for good swap */
+ if (ul_sect_size % 2)
+ ul_sect_size++;
+
+ /* Align size */
+ ul_sect_size = DOFF_ALIGN(ul_sect_size);
+ if (ul_sect_size > size) {
+ status = -EPERM;
+ } else {
+ if (!dload_get_section(zl_lib->desc, sect, buf))
+ status = -EBADF;
+
+ }
+func_cont:
+ if (opened_doff) {
+ dof_close(zl_lib);
+ opened_doff = false;
+ }
+
+ dev_dbg(bridge, "%s: lib: %p name: %s buf: %p size: 0x%x, "
+ "status 0x%x\n", __func__, lib, name, buf, size, status);
+ return status;
+}
+
+/*
+ * ======== dbll_set_attrs ========
+ * Set the attributes of the target.
+ */
+void dbll_set_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs)
+{
+ struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(zl_target);
+ DBC_REQUIRE(pattrs != NULL);
+
+ if ((pattrs != NULL) && (zl_target != NULL))
+ zl_target->attrs = *pattrs;
+
+}
+
+/*
+ * ======== dbll_unload ========
+ */
+void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs)
+{
+ struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
+ s32 err = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(zl_lib);
+ DBC_REQUIRE(zl_lib->load_ref > 0);
+ dev_dbg(bridge, "%s: lib: %p\n", __func__, lib);
+ zl_lib->load_ref--;
+ /* Unload only if reference count is 0 */
+ if (zl_lib->load_ref != 0)
+ goto func_end;
+
+ zl_lib->target_obj->attrs = *attrs;
+ if (zl_lib->dload_mod_obj) {
+ err = dynamic_unload_module(zl_lib->dload_mod_obj,
+ &zl_lib->symbol.dl_symbol,
+ &zl_lib->allocate.dl_alloc,
+ &zl_lib->init.dl_init);
+ if (err != 0)
+ dev_dbg(bridge, "%s: failed: 0x%x\n", __func__, err);
+ }
+ /* remove symbols from symbol table */
+ if (zl_lib->sym_tab != NULL) {
+ gh_delete(zl_lib->sym_tab);
+ zl_lib->sym_tab = NULL;
+ }
+ /* delete DOFF desc since it holds *lots* of host OS
+ * resources */
+ dof_close(zl_lib);
+func_end:
+ DBC_ENSURE(zl_lib->load_ref >= 0);
+}
+
+/*
+ * ======== dbll_unload_sect ========
+ * Not supported for COFF.
+ */
+int dbll_unload_sect(struct dbll_library_obj *lib, char *sec_name,
+ struct dbll_attrs *attrs)
+{
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(sec_name != NULL);
+
+ return -ENOSYS;
+}
+
+/*
+ * ======== dof_close ========
+ */
+static void dof_close(struct dbll_library_obj *zl_lib)
+{
+ if (zl_lib->desc) {
+ dload_module_close(zl_lib->desc);
+ zl_lib->desc = NULL;
+ }
+ /* close file */
+ if (zl_lib->fp) {
+ (zl_lib->target_obj->attrs.fclose) (zl_lib->fp);
+ zl_lib->fp = NULL;
+ }
+}
+
+/*
+ * ======== dof_open ========
+ */
+static int dof_open(struct dbll_library_obj *zl_lib)
+{
+ void *open = *(zl_lib->target_obj->attrs.fopen);
+ int status = 0;
+
+ /* First open the file for the dynamic loader, then open COF */
+ zl_lib->fp =
+ (void *)((dbll_f_open_fxn) (open)) (zl_lib->file_name, "rb");
+
+ /* Open DOFF module */
+ if (zl_lib->fp && zl_lib->desc == NULL) {
+ (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0,
+ SEEK_SET);
+ zl_lib->desc =
+ dload_module_open(&zl_lib->stream.dl_stream,
+ &zl_lib->symbol.dl_symbol);
+ if (zl_lib->desc == NULL) {
+ (zl_lib->target_obj->attrs.fclose) (zl_lib->fp);
+ zl_lib->fp = NULL;
+ status = -EBADF;
+ }
+ } else {
+ status = -EBADF;
+ }
+
+ return status;
+}
+
+/*
+ * ======== name_hash ========
+ */
+static u16 name_hash(void *key, u16 max_bucket)
+{
+ u16 ret;
+ u16 hash;
+ char *name = (char *)key;
+
+ DBC_REQUIRE(name != NULL);
+
+ hash = 0;
+
+ while (*name) {
+ hash <<= 1;
+ hash ^= *name++;
+ }
+
+ ret = hash % max_bucket;
+
+ return ret;
+}
+
+/*
+ * ======== name_match ========
+ */
+static bool name_match(void *key, void *sp)
+{
+ DBC_REQUIRE(key != NULL);
+ DBC_REQUIRE(sp != NULL);
+
+ if ((key != NULL) && (sp != NULL)) {
+ if (strcmp((char *)key, ((struct dbll_symbol *)sp)->name) ==
+ 0)
+ return true;
+ }
+ return false;
+}
+
+/*
+ * ======== no_op ========
+ */
+static int no_op(struct dynamic_loader_initialize *thisptr, void *bufr,
+ ldr_addr locn, struct ldr_section_info *info, unsigned bytsize)
+{
+ return 1;
+}
+
+/*
+ * ======== sym_delete ========
+ */
+static void sym_delete(void *value)
+{
+ struct dbll_symbol *sp = (struct dbll_symbol *)value;
+
+ kfree(sp->name);
+}
+
+/*
+ * Dynamic Loader Functions
+ */
+
+/* dynamic_loader_stream */
+/*
+ * ======== dbll_read_buffer ========
+ */
+static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer,
+ unsigned bufsize)
+{
+ struct dbll_stream *pstream = (struct dbll_stream *)this;
+ struct dbll_library_obj *lib;
+ int bytes_read = 0;
+
+ DBC_REQUIRE(this != NULL);
+ lib = pstream->lib;
+ DBC_REQUIRE(lib);
+
+ if (lib != NULL) {
+ bytes_read =
+ (*(lib->target_obj->attrs.fread)) (buffer, 1, bufsize,
+ lib->fp);
+ }
+ return bytes_read;
+}
+
+/*
+ * ======== dbll_set_file_posn ========
+ */
+static int dbll_set_file_posn(struct dynamic_loader_stream *this,
+ unsigned int pos)
+{
+ struct dbll_stream *pstream = (struct dbll_stream *)this;
+ struct dbll_library_obj *lib;
+ int status = 0; /* Success */
+
+ DBC_REQUIRE(this != NULL);
+ lib = pstream->lib;
+ DBC_REQUIRE(lib);
+
+ if (lib != NULL) {
+ status = (*(lib->target_obj->attrs.fseek)) (lib->fp, (long)pos,
+ SEEK_SET);
+ }
+
+ return status;
+}
+
+/* dynamic_loader_sym */
+
+/*
+ * ======== dbll_find_symbol ========
+ */
+static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
+ const char *name)
+{
+ struct dynload_symbol *ret_sym;
+ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+ struct dbll_library_obj *lib;
+ struct dbll_sym_val *dbll_sym = NULL;
+ bool status = false; /* Symbol not found yet */
+
+ DBC_REQUIRE(this != NULL);
+ lib = ldr_sym->lib;
+ DBC_REQUIRE(lib);
+
+ if (lib != NULL) {
+ if (lib->target_obj->attrs.sym_lookup) {
+ /* Check current lib + base lib + dep lib +
+ * persistent lib */
+ status = (*(lib->target_obj->attrs.sym_lookup))
+ (lib->target_obj->attrs.sym_handle,
+ lib->target_obj->attrs.sym_arg,
+ lib->target_obj->attrs.rmm_handle, name,
+ &dbll_sym);
+ } else {
+ /* Just check current lib for symbol */
+ status = dbll_get_addr((struct dbll_library_obj *)lib,
+ (char *)name, &dbll_sym);
+ if (!status) {
+ status =
+ dbll_get_c_addr((struct dbll_library_obj *)
+ lib, (char *)name,
+ &dbll_sym);
+ }
+ }
+ }
+
+ if (!status && gbl_search)
+ dev_dbg(bridge, "%s: Symbol not found: %s\n", __func__, name);
+
+ DBC_ASSERT((status && (dbll_sym != NULL))
+ || (!status && (dbll_sym == NULL)));
+
+ ret_sym = (struct dynload_symbol *)dbll_sym;
+ return ret_sym;
+}
+
+/*
+ * ======== find_in_symbol_table ========
+ */
+static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
+ *this, const char *name,
+ unsigned moduleid)
+{
+ struct dynload_symbol *ret_sym;
+ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+ struct dbll_library_obj *lib;
+ struct dbll_symbol *sym;
+
+ DBC_REQUIRE(this != NULL);
+ lib = ldr_sym->lib;
+ DBC_REQUIRE(lib);
+ DBC_REQUIRE(lib->sym_tab != NULL);
+
+ sym = (struct dbll_symbol *)gh_find(lib->sym_tab, (char *)name);
+
+ ret_sym = (struct dynload_symbol *)&sym->value;
+ return ret_sym;
+}
+
+/*
+ * ======== dbll_add_to_symbol_table ========
+ */
+static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym
+ *this, const char *name,
+ unsigned module_id)
+{
+ struct dbll_symbol *sym_ptr = NULL;
+ struct dbll_symbol symbol;
+ struct dynload_symbol *dbll_sym = NULL;
+ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+ struct dbll_library_obj *lib;
+ struct dynload_symbol *ret;
+
+ DBC_REQUIRE(this != NULL);
+ DBC_REQUIRE(name);
+ lib = ldr_sym->lib;
+ DBC_REQUIRE(lib);
+
+ /* Check to see if symbol is already defined in symbol table */
+ if (!(lib->target_obj->attrs.base_image)) {
+ gbl_search = false;
+ dbll_sym = dbll_find_symbol(this, name);
+ gbl_search = true;
+ if (dbll_sym) {
+ redefined_symbol = true;
+ dev_dbg(bridge, "%s already defined in symbol table\n",
+ name);
+ return NULL;
+ }
+ }
+ /* Allocate string to copy symbol name */
+ symbol.name = kzalloc(strlen((char *const)name) + 1, GFP_KERNEL);
+ if (symbol.name == NULL)
+ return NULL;
+
+ if (symbol.name != NULL) {
+ /* Just copy name (value will be filled in by dynamic loader) */
+ strncpy(symbol.name, (char *const)name,
+ strlen((char *const)name) + 1);
+
+ /* Add symbol to symbol table */
+ sym_ptr =
+ (struct dbll_symbol *)gh_insert(lib->sym_tab, (void *)name,
+ (void *)&symbol);
+ if (sym_ptr == NULL)
+ kfree(symbol.name);
+
+ }
+ if (sym_ptr != NULL)
+ ret = (struct dynload_symbol *)&sym_ptr->value;
+ else
+ ret = NULL;
+
+ return ret;
+}
+
+/*
+ * ======== dbll_purge_symbol_table ========
+ */
+static void dbll_purge_symbol_table(struct dynamic_loader_sym *this,
+ unsigned module_id)
+{
+ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+ struct dbll_library_obj *lib;
+
+ DBC_REQUIRE(this != NULL);
+ lib = ldr_sym->lib;
+ DBC_REQUIRE(lib);
+
+ /* May not need to do anything */
+}
+
+/*
+ * ======== allocate ========
+ */
+static void *allocate(struct dynamic_loader_sym *this, unsigned memsize)
+{
+ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+ struct dbll_library_obj *lib;
+ void *buf;
+
+ DBC_REQUIRE(this != NULL);
+ lib = ldr_sym->lib;
+ DBC_REQUIRE(lib);
+
+ buf = kzalloc(memsize, GFP_KERNEL);
+
+ return buf;
+}
+
+/*
+ * ======== deallocate ========
+ */
+static void deallocate(struct dynamic_loader_sym *this, void *mem_ptr)
+{
+ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+ struct dbll_library_obj *lib;
+
+ DBC_REQUIRE(this != NULL);
+ lib = ldr_sym->lib;
+ DBC_REQUIRE(lib);
+
+ kfree(mem_ptr);
+}
+
+/*
+ * ======== dbll_err_report ========
+ */
+static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr,
+ va_list args)
+{
+ struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+ struct dbll_library_obj *lib;
+ char temp_buf[MAXEXPR];
+
+ DBC_REQUIRE(this != NULL);
+ lib = ldr_sym->lib;
+ DBC_REQUIRE(lib);
+ vsnprintf((char *)temp_buf, MAXEXPR, (char *)errstr, args);
+ dev_dbg(bridge, "%s\n", temp_buf);
+}
+
+/* dynamic_loader_allocate */
+
+/*
+ * ======== dbll_rmm_alloc ========
+ */
+static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
+ struct ldr_section_info *info, unsigned align)
+{
+ struct dbll_alloc *dbll_alloc_obj = (struct dbll_alloc *)this;
+ struct dbll_library_obj *lib;
+ int status = 0;
+ u32 mem_sect_type;
+ struct rmm_addr rmm_addr_obj;
+ s32 ret = true;
+ unsigned stype = DLOAD_SECTION_TYPE(info->type);
+ char *token = NULL;
+ char *sz_sec_last_token = NULL;
+ char *sz_last_token = NULL;
+ char *sz_sect_name = NULL;
+ char *psz_cur;
+ s32 token_len = 0;
+ s32 seg_id = -1;
+ s32 req = -1;
+ s32 count = 0;
+ u32 alloc_size = 0;
+ u32 run_addr_flag = 0;
+
+ DBC_REQUIRE(this != NULL);
+ lib = dbll_alloc_obj->lib;
+ DBC_REQUIRE(lib);
+
+ mem_sect_type =
+ (stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
+ DLOAD_BSS) ? DBLL_BSS :
+ DBLL_DATA;
+
+ /* Attempt to extract the segment ID and requirement information from
+ the name of the section */
+ DBC_REQUIRE(info->name);
+ token_len = strlen((char *)(info->name)) + 1;
+
+ sz_sect_name = kzalloc(token_len, GFP_KERNEL);
+ sz_last_token = kzalloc(token_len, GFP_KERNEL);
+ sz_sec_last_token = kzalloc(token_len, GFP_KERNEL);
+
+ if (sz_sect_name == NULL || sz_sec_last_token == NULL ||
+ sz_last_token == NULL) {
+ status = -ENOMEM;
+ goto func_cont;
+ }
+ strncpy(sz_sect_name, (char *)(info->name), token_len);
+ psz_cur = sz_sect_name;
+ while ((token = strsep(&psz_cur, ":")) && *token != '\0') {
+ strncpy(sz_sec_last_token, sz_last_token,
+ strlen(sz_last_token) + 1);
+ strncpy(sz_last_token, token, strlen(token) + 1);
+ token = strsep(&psz_cur, ":");
+ count++; /* optimizes processing */
+ }
+ /* If token is 0 or 1, and sz_sec_last_token is DYN_DARAM or DYN_SARAM,
+ or DYN_EXTERNAL, then mem granularity information is present
+ within the section name - only process if there are at least three
+ tokens within the section name (just a minor optimization) */
+ if (count >= 3)
+ strict_strtol(sz_last_token, 10, (long *)&req);
+
+ if ((req == 0) || (req == 1)) {
+ if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) {
+ seg_id = 0;
+ } else {
+ if (strcmp(sz_sec_last_token, "DYN_SARAM") == 0) {
+ seg_id = 1;
+ } else {
+ if (strcmp(sz_sec_last_token,
+ "DYN_EXTERNAL") == 0)
+ seg_id = 2;
+ }
+ }
+ }
+func_cont:
+ kfree(sz_sect_name);
+ sz_sect_name = NULL;
+ kfree(sz_last_token);
+ sz_last_token = NULL;
+ kfree(sz_sec_last_token);
+ sz_sec_last_token = NULL;
+
+ if (mem_sect_type == DBLL_CODE)
+ alloc_size = info->size + GEM_L1P_PREFETCH_SIZE;
+ else
+ alloc_size = info->size;
+
+ if (info->load_addr != info->run_addr)
+ run_addr_flag = 1;
+ /* TODO - ideally, we can pass the alignment requirement also
+ * from here */
+ if (lib != NULL) {
+ status =
+ (lib->target_obj->attrs.alloc) (lib->target_obj->attrs.
+ rmm_handle, mem_sect_type,
+ alloc_size, align,
+ (u32 *) &rmm_addr_obj,
+ seg_id, req, false);
+ }
+ if (status) {
+ ret = false;
+ } else {
+ /* RMM gives word address. Need to convert to byte address */
+ info->load_addr = rmm_addr_obj.addr * DSPWORDSIZE;
+ if (!run_addr_flag)
+ info->run_addr = info->load_addr;
+ info->context = (u32) rmm_addr_obj.segid;
+ dev_dbg(bridge, "%s: %s base = 0x%x len = 0x%x, "
+ "info->run_addr 0x%x, info->load_addr 0x%x\n",
+ __func__, info->name, info->load_addr / DSPWORDSIZE,
+ info->size / DSPWORDSIZE, info->run_addr,
+ info->load_addr);
+ }
+ return ret;
+}
+
+/*
+ * ======== rmm_dealloc ========
+ */
+static void rmm_dealloc(struct dynamic_loader_allocate *this,
+ struct ldr_section_info *info)
+{
+ struct dbll_alloc *dbll_alloc_obj = (struct dbll_alloc *)this;
+ struct dbll_library_obj *lib;
+ u32 segid;
+ int status = 0;
+ unsigned stype = DLOAD_SECTION_TYPE(info->type);
+ u32 mem_sect_type;
+ u32 free_size = 0;
+
+ mem_sect_type =
+ (stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
+ DLOAD_BSS) ? DBLL_BSS :
+ DBLL_DATA;
+ DBC_REQUIRE(this != NULL);
+ lib = dbll_alloc_obj->lib;
+ DBC_REQUIRE(lib);
+ /* segid was set by alloc function */
+ segid = (u32) info->context;
+ if (mem_sect_type == DBLL_CODE)
+ free_size = info->size + GEM_L1P_PREFETCH_SIZE;
+ else
+ free_size = info->size;
+ if (lib != NULL) {
+ status =
+ (lib->target_obj->attrs.free) (lib->target_obj->attrs.
+ sym_handle, segid,
+ info->load_addr /
+ DSPWORDSIZE, free_size,
+ false);
+ }
+}
+
+/* dynamic_loader_initialize */
+/*
+ * ======== connect ========
+ */
+static int connect(struct dynamic_loader_initialize *this)
+{
+ return true;
+}
+
+/*
+ * ======== read_mem ========
+ * This function does not need to be implemented.
+ */
+static int read_mem(struct dynamic_loader_initialize *this, void *buf,
+ ldr_addr addr, struct ldr_section_info *info,
+ unsigned nbytes)
+{
+ struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
+ struct dbll_library_obj *lib;
+ int bytes_read = 0;
+
+ DBC_REQUIRE(this != NULL);
+ lib = init_obj->lib;
+ DBC_REQUIRE(lib);
+ /* Need bridge_brd_read function */
+ return bytes_read;
+}
+
+/*
+ * ======== write_mem ========
+ */
+static int write_mem(struct dynamic_loader_initialize *this, void *buf,
+ ldr_addr addr, struct ldr_section_info *info,
+ unsigned bytes)
+{
+ struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
+ struct dbll_library_obj *lib;
+ struct dbll_tar_obj *target_obj;
+ struct dbll_sect_info sect_info;
+ u32 mem_sect_type;
+ bool ret = true;
+
+ DBC_REQUIRE(this != NULL);
+ lib = init_obj->lib;
+ if (!lib)
+ return false;
+
+ target_obj = lib->target_obj;
+
+ mem_sect_type =
+ (DLOAD_SECTION_TYPE(info->type) ==
+ DLOAD_TEXT) ? DBLL_CODE : DBLL_DATA;
+ if (target_obj && target_obj->attrs.write) {
+ ret =
+ (*target_obj->attrs.write) (target_obj->attrs.input_params,
+ addr, buf, bytes,
+ mem_sect_type);
+
+ if (target_obj->attrs.log_write) {
+ sect_info.name = info->name;
+ sect_info.sect_run_addr = info->run_addr;
+ sect_info.sect_load_addr = info->load_addr;
+ sect_info.size = info->size;
+ sect_info.type = mem_sect_type;
+ /* Pass the information about what we've written to
+ * another module */
+ (*target_obj->attrs.log_write) (target_obj->attrs.
+ log_write_handle,
+ &sect_info, addr,
+ bytes);
+ }
+ }
+ return ret;
+}
+
+/*
+ * ======== fill_mem ========
+ * Fill bytes of memory at a given address with a given value by
+ * writing from a buffer containing the given value. Write in
+ * sets of MAXEXPR (128) bytes to avoid large stack buffer issues.
+ */
+static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr,
+ struct ldr_section_info *info, unsigned bytes, unsigned val)
+{
+ bool ret = true;
+ char *pbuf;
+ struct dbll_library_obj *lib;
+ struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
+
+ DBC_REQUIRE(this != NULL);
+ lib = init_obj->lib;
+ pbuf = NULL;
+ /* Pass the NULL pointer to write_mem to get the start address of Shared
+ memory. This is a trick to just get the start address, there is no
+ writing taking place with this Writemem
+ */
+ if ((lib->target_obj->attrs.write) != (dbll_write_fxn) no_op)
+ write_mem(this, &pbuf, addr, info, 0);
+ if (pbuf)
+ memset(pbuf, val, bytes);
+
+ return ret;
+}
+
+/*
+ * ======== execute ========
+ */
+static int execute(struct dynamic_loader_initialize *this, ldr_addr start)
+{
+ struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
+ struct dbll_library_obj *lib;
+ bool ret = true;
+
+ DBC_REQUIRE(this != NULL);
+ lib = init_obj->lib;
+ DBC_REQUIRE(lib);
+ /* Save entry point */
+ if (lib != NULL)
+ lib->entry = (u32) start;
+
+ return ret;
+}
+
+/*
+ * ======== release ========
+ */
+static void release(struct dynamic_loader_initialize *this)
+{
+}
+
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+/**
+ * find_symbol_context - Basic symbol context structure
+ * @address: Symbol Adress
+ * @offset_range: Offset range where the search for the DSP symbol
+ * started.
+ * @cur_best_offset: Best offset to start looking for the DSP symbol
+ * @sym_addr: Address of the DSP symbol
+ * @name: Symbol name
+ *
+ */
+struct find_symbol_context {
+ /* input */
+ u32 address;
+ u32 offset_range;
+ /* state */
+ u32 cur_best_offset;
+ /* output */
+ u32 sym_addr;
+ char name[120];
+};
+
+/**
+ * find_symbol_callback() - Validates symbol address and copies the symbol name
+ * to the user data.
+ * @elem: dsp library context
+ * @user_data: Find symbol context
+ *
+ */
+void find_symbol_callback(void *elem, void *user_data)
+{
+ struct dbll_symbol *symbol = elem;
+ struct find_symbol_context *context = user_data;
+ u32 symbol_addr = symbol->value.value;
+ u32 offset = context->address - symbol_addr;
+
+ /*
+ * Address given should be greater than symbol address,
+ * symbol address should be within specified range
+ * and the offset should be better than previous one
+ */
+ if (context->address >= symbol_addr && symbol_addr < (u32)-1 &&
+ offset < context->cur_best_offset) {
+ context->cur_best_offset = offset;
+ context->sym_addr = symbol_addr;
+ strncpy(context->name, symbol->name, sizeof(context->name));
+ }
+
+ return;
+}
+
+/**
+ * dbll_find_dsp_symbol() - This function retrieves the dsp symbol from the dsp binary.
+ * @zl_lib: DSP binary obj library pointer
+ * @address: Given address to find the dsp symbol
+ * @offset_range: offset range to look for dsp symbol
+ * @sym_addr_output: Symbol Output address
+ * @name_output: String with the dsp symbol
+ *
+ * This function retrieves the dsp symbol from the dsp binary.
+ */
+bool dbll_find_dsp_symbol(struct dbll_library_obj *zl_lib, u32 address,
+ u32 offset_range, u32 *sym_addr_output,
+ char *name_output)
+{
+ bool status = false;
+ struct find_symbol_context context;
+
+ context.address = address;
+ context.offset_range = offset_range;
+ context.cur_best_offset = offset_range;
+ context.sym_addr = 0;
+ context.name[0] = '\0';
+
+ gh_iterate(zl_lib->sym_tab, find_symbol_callback, &context);
+
+ if (context.name[0]) {
+ status = true;
+ strcpy(name_output, context.name);
+ *sym_addr_output = context.sym_addr;
+ }
+
+ return status;
+}
+#endif
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
new file mode 100644
index 00000000000..4ddf03d3b1a
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -0,0 +1,1151 @@
+/*
+ * dev.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implementation of Bridge Bridge driver device operations.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/ldr.h>
+#include <dspbridge/list.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/cod.h>
+#include <dspbridge/drv.h>
+#include <dspbridge/proc.h>
+#include <dspbridge/dmm.h>
+
+/* ----------------------------------- Resource Manager */
+#include <dspbridge/mgr.h>
+#include <dspbridge/node.h>
+
+/* ----------------------------------- Others */
+#include <dspbridge/dspapi.h> /* DSP API version info. */
+
+#include <dspbridge/chnl.h>
+#include <dspbridge/io.h>
+#include <dspbridge/msg.h>
+#include <dspbridge/cmm.h>
+#include <dspbridge/dspdeh.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/dev.h>
+
+/* ----------------------------------- Defines, Data Structures, Typedefs */
+
+#define MAKEVERSION(major, minor) (major * 10 + minor)
+#define BRD_API_VERSION MAKEVERSION(BRD_API_MAJOR_VERSION, \
+ BRD_API_MINOR_VERSION)
+
+/* The Bridge device object: */
+struct dev_object {
+ /* LST requires "link" to be first field! */
+ struct list_head link; /* Link to next dev_object. */
+ u8 dev_type; /* Device Type */
+ struct cfg_devnode *dev_node_obj; /* Platform specific dev id */
+ /* Bridge Context Handle */
+ struct bridge_dev_context *hbridge_context;
+ /* Function interface to Bridge driver. */
+ struct bridge_drv_interface bridge_interface;
+ struct brd_object *lock_owner; /* Client with exclusive access. */
+ struct cod_manager *cod_mgr; /* Code manager handle. */
+ struct chnl_mgr *hchnl_mgr; /* Channel manager. */
+ struct deh_mgr *hdeh_mgr; /* DEH manager. */
+ struct msg_mgr *hmsg_mgr; /* Message manager. */
+ struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */
+ struct cmm_object *hcmm_mgr; /* SM memory manager. */
+ struct dmm_object *dmm_mgr; /* Dynamic memory manager. */
+ struct ldr_module *module_obj; /* Bridge Module handle. */
+ u32 word_size; /* DSP word size: quick access. */
+ struct drv_object *hdrv_obj; /* Driver Object */
+ struct lst_list *proc_list; /* List of Proceeosr attached to
+ * this device */
+ struct node_mgr *hnode_mgr;
+};
+
+/* ----------------------------------- Globals */
+static u32 refs; /* Module reference count */
+
+/* ----------------------------------- Function Prototypes */
+static int fxn_not_implemented(int arg, ...);
+static int init_cod_mgr(struct dev_object *dev_obj);
+static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
+ struct bridge_drv_interface *intf_fxns);
+/*
+ * ======== dev_brd_write_fxn ========
+ * Purpose:
+ * Exported function to be used as the COD write function. This function
+ * is passed a handle to a DEV_hObject, then calls the
+ * device's bridge_brd_write() function.
+ */
+u32 dev_brd_write_fxn(void *arb, u32 dsp_add, void *host_buf,
+ u32 ul_num_bytes, u32 mem_space)
+{
+ struct dev_object *dev_obj = (struct dev_object *)arb;
+ u32 ul_written = 0;
+ int status;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(host_buf != NULL); /* Required of BrdWrite(). */
+ if (dev_obj) {
+ /* Require of BrdWrite() */
+ DBC_ASSERT(dev_obj->hbridge_context != NULL);
+ status = (*dev_obj->bridge_interface.pfn_brd_write) (
+ dev_obj->hbridge_context, host_buf,
+ dsp_add, ul_num_bytes, mem_space);
+ /* Special case of getting the address only */
+ if (ul_num_bytes == 0)
+ ul_num_bytes = 1;
+ if (!status)
+ ul_written = ul_num_bytes;
+
+ }
+ return ul_written;
+}
+
+/*
+ * ======== dev_create_device ========
+ * Purpose:
+ * Called by the operating system to load the PM Bridge Driver for a
+ * PM board (device).
+ */
+int dev_create_device(struct dev_object **device_obj,
+ const char *driver_file_name,
+ struct cfg_devnode *dev_node_obj)
+{
+ struct cfg_hostres *host_res;
+ struct ldr_module *module_obj = NULL;
+ struct bridge_drv_interface *drv_fxns = NULL;
+ struct dev_object *dev_obj = NULL;
+ struct chnl_mgrattrs mgr_attrs;
+ struct io_attrs io_mgr_attrs;
+ u32 num_windows;
+ struct drv_object *hdrv_obj = NULL;
+ int status = 0;
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(device_obj != NULL);
+ DBC_REQUIRE(driver_file_name != NULL);
+
+ status = drv_request_bridge_res_dsp((void *)&host_res);
+
+ if (status) {
+ dev_dbg(bridge, "%s: Failed to reserve bridge resources\n",
+ __func__);
+ goto leave;
+ }
+
+ /* Get the Bridge driver interface functions */
+ bridge_drv_entry(&drv_fxns, driver_file_name);
+ if (cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT)) {
+ /* don't propogate CFG errors from this PROC function */
+ status = -EPERM;
+ }
+ /* Create the device object, and pass a handle to the Bridge driver for
+ * storage. */
+ if (!status) {
+ DBC_ASSERT(drv_fxns);
+ dev_obj = kzalloc(sizeof(struct dev_object), GFP_KERNEL);
+ if (dev_obj) {
+ /* Fill out the rest of the Dev Object structure: */
+ dev_obj->dev_node_obj = dev_node_obj;
+ dev_obj->module_obj = module_obj;
+ dev_obj->cod_mgr = NULL;
+ dev_obj->hchnl_mgr = NULL;
+ dev_obj->hdeh_mgr = NULL;
+ dev_obj->lock_owner = NULL;
+ dev_obj->word_size = DSPWORDSIZE;
+ dev_obj->hdrv_obj = hdrv_obj;
+ dev_obj->dev_type = DSP_UNIT;
+ /* Store this Bridge's interface functions, based on its
+ * version. */
+ store_interface_fxns(drv_fxns,
+ &dev_obj->bridge_interface);
+
+ /* Call fxn_dev_create() to get the Bridge's device
+ * context handle. */
+ status = (dev_obj->bridge_interface.pfn_dev_create)
+ (&dev_obj->hbridge_context, dev_obj,
+ host_res);
+ /* Assert bridge_dev_create()'s ensure clause: */
+ DBC_ASSERT(status
+ || (dev_obj->hbridge_context != NULL));
+ } else {
+ status = -ENOMEM;
+ }
+ }
+ /* Attempt to create the COD manager for this device: */
+ if (!status)
+ status = init_cod_mgr(dev_obj);
+
+ /* Attempt to create the channel manager for this device: */
+ if (!status) {
+ mgr_attrs.max_channels = CHNL_MAXCHANNELS;
+ io_mgr_attrs.birq = host_res->birq_registers;
+ io_mgr_attrs.irq_shared =
+ (host_res->birq_attrib & CFG_IRQSHARED);
+ io_mgr_attrs.word_size = DSPWORDSIZE;
+ mgr_attrs.word_size = DSPWORDSIZE;
+ num_windows = host_res->num_mem_windows;
+ if (num_windows) {
+ /* Assume last memory window is for CHNL */
+ io_mgr_attrs.shm_base = host_res->dw_mem_base[1] +
+ host_res->dw_offset_for_monitor;
+ io_mgr_attrs.usm_length =
+ host_res->dw_mem_length[1] -
+ host_res->dw_offset_for_monitor;
+ } else {
+ io_mgr_attrs.shm_base = 0;
+ io_mgr_attrs.usm_length = 0;
+ pr_err("%s: No memory reserved for shared structures\n",
+ __func__);
+ }
+ status = chnl_create(&dev_obj->hchnl_mgr, dev_obj, &mgr_attrs);
+ if (status == -ENOSYS) {
+ /* It's OK for a device not to have a channel
+ * manager: */
+ status = 0;
+ }
+ /* Create CMM mgr even if Msg Mgr not impl. */
+ status = cmm_create(&dev_obj->hcmm_mgr,
+ (struct dev_object *)dev_obj, NULL);
+ /* Only create IO manager if we have a channel manager */
+ if (!status && dev_obj->hchnl_mgr) {
+ status = io_create(&dev_obj->hio_mgr, dev_obj,
+ &io_mgr_attrs);
+ }
+ /* Only create DEH manager if we have an IO manager */
+ if (!status) {
+ /* Instantiate the DEH module */
+ status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj);
+ }
+ /* Create DMM mgr . */
+ status = dmm_create(&dev_obj->dmm_mgr,
+ (struct dev_object *)dev_obj, NULL);
+ }
+ /* Add the new DEV_Object to the global list: */
+ if (!status) {
+ lst_init_elem(&dev_obj->link);
+ status = drv_insert_dev_object(hdrv_obj, dev_obj);
+ }
+ /* Create the Processor List */
+ if (!status) {
+ dev_obj->proc_list = kzalloc(sizeof(struct lst_list),
+ GFP_KERNEL);
+ if (!(dev_obj->proc_list))
+ status = -EPERM;
+ else
+ INIT_LIST_HEAD(&dev_obj->proc_list->head);
+ }
+leave:
+ /* If all went well, return a handle to the dev object;
+ * else, cleanup and return NULL in the OUT parameter. */
+ if (!status) {
+ *device_obj = dev_obj;
+ } else {
+ if (dev_obj) {
+ kfree(dev_obj->proc_list);
+ if (dev_obj->cod_mgr)
+ cod_delete(dev_obj->cod_mgr);
+ if (dev_obj->dmm_mgr)
+ dmm_destroy(dev_obj->dmm_mgr);
+ kfree(dev_obj);
+ }
+
+ *device_obj = NULL;
+ }
+
+ DBC_ENSURE((!status && *device_obj) || (status && !*device_obj));
+ return status;
+}
+
+/*
+ * ======== dev_create2 ========
+ * Purpose:
+ * After successful loading of the image from api_init_complete2
+ * (PROC Auto_Start) or proc_load this fxn is called. This creates
+ * the Node Manager and updates the DEV Object.
+ */
+int dev_create2(struct dev_object *hdev_obj)
+{
+ int status = 0;
+ struct dev_object *dev_obj = hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hdev_obj);
+
+ /* There can be only one Node Manager per DEV object */
+ DBC_ASSERT(!dev_obj->hnode_mgr);
+ status = node_create_mgr(&dev_obj->hnode_mgr, hdev_obj);
+ if (status)
+ dev_obj->hnode_mgr = NULL;
+
+ DBC_ENSURE((!status && dev_obj->hnode_mgr != NULL)
+ || (status && dev_obj->hnode_mgr == NULL));
+ return status;
+}
+
+/*
+ * ======== dev_destroy2 ========
+ * Purpose:
+ * Destroys the Node manager for this device.
+ */
+int dev_destroy2(struct dev_object *hdev_obj)
+{
+ int status = 0;
+ struct dev_object *dev_obj = hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hdev_obj);
+
+ if (dev_obj->hnode_mgr) {
+ if (node_delete_mgr(dev_obj->hnode_mgr))
+ status = -EPERM;
+ else
+ dev_obj->hnode_mgr = NULL;
+
+ }
+
+ DBC_ENSURE((!status && dev_obj->hnode_mgr == NULL) || status);
+ return status;
+}
+
+/*
+ * ======== dev_destroy_device ========
+ * Purpose:
+ * Destroys the channel manager for this device, if any, calls
+ * bridge_dev_destroy(), and then attempts to unload the Bridge module.
+ */
+int dev_destroy_device(struct dev_object *hdev_obj)
+{
+ int status = 0;
+ struct dev_object *dev_obj = hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (hdev_obj) {
+ if (dev_obj->cod_mgr) {
+ cod_delete(dev_obj->cod_mgr);
+ dev_obj->cod_mgr = NULL;
+ }
+
+ if (dev_obj->hnode_mgr) {
+ node_delete_mgr(dev_obj->hnode_mgr);
+ dev_obj->hnode_mgr = NULL;
+ }
+
+ /* Free the io, channel, and message managers for this board: */
+ if (dev_obj->hio_mgr) {
+ io_destroy(dev_obj->hio_mgr);
+ dev_obj->hio_mgr = NULL;
+ }
+ if (dev_obj->hchnl_mgr) {
+ chnl_destroy(dev_obj->hchnl_mgr);
+ dev_obj->hchnl_mgr = NULL;
+ }
+ if (dev_obj->hmsg_mgr) {
+ msg_delete(dev_obj->hmsg_mgr);
+ dev_obj->hmsg_mgr = NULL;
+ }
+
+ if (dev_obj->hdeh_mgr) {
+ /* Uninitialize DEH module. */
+ bridge_deh_destroy(dev_obj->hdeh_mgr);
+ dev_obj->hdeh_mgr = NULL;
+ }
+ if (dev_obj->hcmm_mgr) {
+ cmm_destroy(dev_obj->hcmm_mgr, true);
+ dev_obj->hcmm_mgr = NULL;
+ }
+
+ if (dev_obj->dmm_mgr) {
+ dmm_destroy(dev_obj->dmm_mgr);
+ dev_obj->dmm_mgr = NULL;
+ }
+
+ /* Call the driver's bridge_dev_destroy() function: */
+ /* Require of DevDestroy */
+ if (dev_obj->hbridge_context) {
+ status = (*dev_obj->bridge_interface.pfn_dev_destroy)
+ (dev_obj->hbridge_context);
+ dev_obj->hbridge_context = NULL;
+ } else
+ status = -EPERM;
+ if (!status) {
+ kfree(dev_obj->proc_list);
+ dev_obj->proc_list = NULL;
+
+ /* Remove this DEV_Object from the global list: */
+ drv_remove_dev_object(dev_obj->hdrv_obj, dev_obj);
+ /* Free The library * LDR_FreeModule
+ * (dev_obj->module_obj); */
+ /* Free this dev object: */
+ kfree(dev_obj);
+ dev_obj = NULL;
+ }
+ } else {
+ status = -EFAULT;
+ }
+
+ return status;
+}
+
+/*
+ * ======== dev_get_chnl_mgr ========
+ * Purpose:
+ * Retrieve the handle to the channel manager handle created for this
+ * device.
+ */
+int dev_get_chnl_mgr(struct dev_object *hdev_obj,
+ struct chnl_mgr **mgr)
+{
+ int status = 0;
+ struct dev_object *dev_obj = hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(mgr != NULL);
+
+ if (hdev_obj) {
+ *mgr = dev_obj->hchnl_mgr;
+ } else {
+ *mgr = NULL;
+ status = -EFAULT;
+ }
+
+ DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
+ return status;
+}
+
+/*
+ * ======== dev_get_cmm_mgr ========
+ * Purpose:
+ * Retrieve the handle to the shared memory manager created for this
+ * device.
+ */
+int dev_get_cmm_mgr(struct dev_object *hdev_obj,
+ struct cmm_object **mgr)
+{
+ int status = 0;
+ struct dev_object *dev_obj = hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(mgr != NULL);
+
+ if (hdev_obj) {
+ *mgr = dev_obj->hcmm_mgr;
+ } else {
+ *mgr = NULL;
+ status = -EFAULT;
+ }
+
+ DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
+ return status;
+}
+
+/*
+ * ======== dev_get_dmm_mgr ========
+ * Purpose:
+ * Retrieve the handle to the dynamic memory manager created for this
+ * device.
+ */
+int dev_get_dmm_mgr(struct dev_object *hdev_obj,
+ struct dmm_object **mgr)
+{
+ int status = 0;
+ struct dev_object *dev_obj = hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(mgr != NULL);
+
+ if (hdev_obj) {
+ *mgr = dev_obj->dmm_mgr;
+ } else {
+ *mgr = NULL;
+ status = -EFAULT;
+ }
+
+ DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
+ return status;
+}
+
+/*
+ * ======== dev_get_cod_mgr ========
+ * Purpose:
+ * Retrieve the COD manager create for this device.
+ */
+int dev_get_cod_mgr(struct dev_object *hdev_obj,
+ struct cod_manager **cod_mgr)
+{
+ int status = 0;
+ struct dev_object *dev_obj = hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(cod_mgr != NULL);
+
+ if (hdev_obj) {
+ *cod_mgr = dev_obj->cod_mgr;
+ } else {
+ *cod_mgr = NULL;
+ status = -EFAULT;
+ }
+
+ DBC_ENSURE(!status || (cod_mgr != NULL && *cod_mgr == NULL));
+ return status;
+}
+
+/*
+ * ========= dev_get_deh_mgr ========
+ */
+int dev_get_deh_mgr(struct dev_object *hdev_obj,
+ struct deh_mgr **deh_manager)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(deh_manager != NULL);
+ DBC_REQUIRE(hdev_obj);
+ if (hdev_obj) {
+ *deh_manager = hdev_obj->hdeh_mgr;
+ } else {
+ *deh_manager = NULL;
+ status = -EFAULT;
+ }
+ return status;
+}
+
+/*
+ * ======== dev_get_dev_node ========
+ * Purpose:
+ * Retrieve the platform specific device ID for this device.
+ */
+int dev_get_dev_node(struct dev_object *hdev_obj,
+ struct cfg_devnode **dev_nde)
+{
+ int status = 0;
+ struct dev_object *dev_obj = hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(dev_nde != NULL);
+
+ if (hdev_obj) {
+ *dev_nde = dev_obj->dev_node_obj;
+ } else {
+ *dev_nde = NULL;
+ status = -EFAULT;
+ }
+
+ DBC_ENSURE(!status || (dev_nde != NULL && *dev_nde == NULL));
+ return status;
+}
+
+/*
+ * ======== dev_get_first ========
+ * Purpose:
+ * Retrieve the first Device Object handle from an internal linked list
+ * DEV_OBJECTs maintained by DEV.
+ */
+struct dev_object *dev_get_first(void)
+{
+ struct dev_object *dev_obj = NULL;
+
+ dev_obj = (struct dev_object *)drv_get_first_dev_object();
+
+ return dev_obj;
+}
+
+/*
+ * ======== dev_get_intf_fxns ========
+ * Purpose:
+ * Retrieve the Bridge interface function structure for the loaded driver.
+ * if_fxns != NULL.
+ */
+int dev_get_intf_fxns(struct dev_object *hdev_obj,
+ struct bridge_drv_interface **if_fxns)
+{
+ int status = 0;
+ struct dev_object *dev_obj = hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(if_fxns != NULL);
+
+ if (hdev_obj) {
+ *if_fxns = &dev_obj->bridge_interface;
+ } else {
+ *if_fxns = NULL;
+ status = -EFAULT;
+ }
+
+ DBC_ENSURE(!status || ((if_fxns != NULL) && (*if_fxns == NULL)));
+ return status;
+}
+
+/*
+ * ========= dev_get_io_mgr ========
+ */
+int dev_get_io_mgr(struct dev_object *hdev_obj,
+ struct io_mgr **io_man)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(io_man != NULL);
+ DBC_REQUIRE(hdev_obj);
+
+ if (hdev_obj) {
+ *io_man = hdev_obj->hio_mgr;
+ } else {
+ *io_man = NULL;
+ status = -EFAULT;
+ }
+
+ return status;
+}
+
+/*
+ * ======== dev_get_next ========
+ * Purpose:
+ * Retrieve the next Device Object handle from an internal linked list
+ * of DEV_OBJECTs maintained by DEV, after having previously called
+ * dev_get_first() and zero or more dev_get_next
+ */
+struct dev_object *dev_get_next(struct dev_object *hdev_obj)
+{
+ struct dev_object *next_dev_object = NULL;
+
+ if (hdev_obj) {
+ next_dev_object = (struct dev_object *)
+ drv_get_next_dev_object((u32) hdev_obj);
+ }
+
+ return next_dev_object;
+}
+
+/*
+ * ========= dev_get_msg_mgr ========
+ */
+void dev_get_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr **msg_man)
+{
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(msg_man != NULL);
+ DBC_REQUIRE(hdev_obj);
+
+ *msg_man = hdev_obj->hmsg_mgr;
+}
+
+/*
+ * ======== dev_get_node_manager ========
+ * Purpose:
+ * Retrieve the Node Manager Handle
+ */
+int dev_get_node_manager(struct dev_object *hdev_obj,
+ struct node_mgr **node_man)
+{
+ int status = 0;
+ struct dev_object *dev_obj = hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(node_man != NULL);
+
+ if (hdev_obj) {
+ *node_man = dev_obj->hnode_mgr;
+ } else {
+ *node_man = NULL;
+ status = -EFAULT;
+ }
+
+ DBC_ENSURE(!status || (node_man != NULL && *node_man == NULL));
+ return status;
+}
+
+/*
+ * ======== dev_get_symbol ========
+ */
+int dev_get_symbol(struct dev_object *hdev_obj,
+ const char *str_sym, u32 * pul_value)
+{
+ int status = 0;
+ struct cod_manager *cod_mgr;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(str_sym != NULL && pul_value != NULL);
+
+ if (hdev_obj) {
+ status = dev_get_cod_mgr(hdev_obj, &cod_mgr);
+ if (cod_mgr)
+ status = cod_get_sym_value(cod_mgr, (char *)str_sym,
+ pul_value);
+ else
+ status = -EFAULT;
+ }
+
+ return status;
+}
+
+/*
+ * ======== dev_get_bridge_context ========
+ * Purpose:
+ * Retrieve the Bridge Context handle, as returned by the
+ * bridge_dev_create fxn.
+ */
+int dev_get_bridge_context(struct dev_object *hdev_obj,
+ struct bridge_dev_context **phbridge_context)
+{
+ int status = 0;
+ struct dev_object *dev_obj = hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(phbridge_context != NULL);
+
+ if (hdev_obj) {
+ *phbridge_context = dev_obj->hbridge_context;
+ } else {
+ *phbridge_context = NULL;
+ status = -EFAULT;
+ }
+
+ DBC_ENSURE(!status || ((phbridge_context != NULL) &&
+ (*phbridge_context == NULL)));
+ return status;
+}
+
+/*
+ * ======== dev_exit ========
+ * Purpose:
+ * Decrement reference count, and free resources when reference count is
+ * 0.
+ */
+void dev_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+
+ if (refs == 0) {
+ cmm_exit();
+ dmm_exit();
+ }
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== dev_init ========
+ * Purpose:
+ * Initialize DEV's private state, keeping a reference count on each call.
+ */
+bool dev_init(void)
+{
+ bool cmm_ret, dmm_ret, ret = true;
+
+ DBC_REQUIRE(refs >= 0);
+
+ if (refs == 0) {
+ cmm_ret = cmm_init();
+ dmm_ret = dmm_init();
+
+ ret = cmm_ret && dmm_ret;
+
+ if (!ret) {
+ if (cmm_ret)
+ cmm_exit();
+
+ if (dmm_ret)
+ dmm_exit();
+
+ }
+ }
+
+ if (ret)
+ refs++;
+
+ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+ return ret;
+}
+
+/*
+ * ======== dev_notify_clients ========
+ * Purpose:
+ * Notify all clients of this device of a change in device status.
+ */
+int dev_notify_clients(struct dev_object *hdev_obj, u32 ret)
+{
+ int status = 0;
+
+ struct dev_object *dev_obj = hdev_obj;
+ void *proc_obj;
+
+ for (proc_obj = (void *)lst_first(dev_obj->proc_list);
+ proc_obj != NULL;
+ proc_obj = (void *)lst_next(dev_obj->proc_list,
+ (struct list_head *)proc_obj))
+ proc_notify_clients(proc_obj, (u32) ret);
+
+ return status;
+}
+
+/*
+ * ======== dev_remove_device ========
+ */
+int dev_remove_device(struct cfg_devnode *dev_node_obj)
+{
+ struct dev_object *hdev_obj; /* handle to device object */
+ int status = 0;
+ struct dev_object *dev_obj;
+
+ /* Retrieve the device object handle originaly stored with
+ * the dev_node: */
+ status = cfg_get_dev_object(dev_node_obj, (u32 *) &hdev_obj);
+ if (!status) {
+ /* Remove the Processor List */
+ dev_obj = (struct dev_object *)hdev_obj;
+ /* Destroy the device object. */
+ status = dev_destroy_device(hdev_obj);
+ }
+
+ return status;
+}
+
+/*
+ * ======== dev_set_chnl_mgr ========
+ * Purpose:
+ * Set the channel manager for this device.
+ */
+int dev_set_chnl_mgr(struct dev_object *hdev_obj,
+ struct chnl_mgr *hmgr)
+{
+ int status = 0;
+ struct dev_object *dev_obj = hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (hdev_obj)
+ dev_obj->hchnl_mgr = hmgr;
+ else
+ status = -EFAULT;
+
+ DBC_ENSURE(status || (dev_obj->hchnl_mgr == hmgr));
+ return status;
+}
+
+/*
+ * ======== dev_set_msg_mgr ========
+ * Purpose:
+ * Set the message manager for this device.
+ */
+void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr)
+{
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hdev_obj);
+
+ hdev_obj->hmsg_mgr = hmgr;
+}
+
+/*
+ * ======== dev_start_device ========
+ * Purpose:
+ * Initializes the new device with the BRIDGE environment.
+ */
+int dev_start_device(struct cfg_devnode *dev_node_obj)
+{
+ struct dev_object *hdev_obj = NULL; /* handle to 'Bridge Device */
+ /* Bridge driver filename */
+ char bridge_file_name[CFG_MAXSEARCHPATHLEN] = "UMA";
+ int status;
+ struct mgr_object *hmgr_obj = NULL;
+
+ DBC_REQUIRE(refs > 0);
+
+ /* Given all resources, create a device object. */
+ status = dev_create_device(&hdev_obj, bridge_file_name,
+ dev_node_obj);
+ if (!status) {
+ /* Store away the hdev_obj with the DEVNODE */
+ status = cfg_set_dev_object(dev_node_obj, (u32) hdev_obj);
+ if (status) {
+ /* Clean up */
+ dev_destroy_device(hdev_obj);
+ hdev_obj = NULL;
+ }
+ }
+ if (!status) {
+ /* Create the Manager Object */
+ status = mgr_create(&hmgr_obj, dev_node_obj);
+ }
+ if (status) {
+ if (hdev_obj)
+ dev_destroy_device(hdev_obj);
+
+ /* Ensure the device extension is NULL */
+ cfg_set_dev_object(dev_node_obj, 0L);
+ }
+
+ return status;
+}
+
+/*
+ * ======== fxn_not_implemented ========
+ * Purpose:
+ * Takes the place of a Bridge Null Function.
+ * Parameters:
+ * Multiple, optional.
+ * Returns:
+ * -ENOSYS: Always.
+ */
+static int fxn_not_implemented(int arg, ...)
+{
+ return -ENOSYS;
+}
+
+/*
+ * ======== init_cod_mgr ========
+ * Purpose:
+ * Create a COD manager for this device.
+ * Parameters:
+ * dev_obj: Pointer to device object created with
+ * dev_create_device()
+ * Returns:
+ * 0: Success.
+ * -EFAULT: Invalid hdev_obj.
+ * Requires:
+ * Should only be called once by dev_create_device() for a given DevObject.
+ * Ensures:
+ */
+static int init_cod_mgr(struct dev_object *dev_obj)
+{
+ int status = 0;
+ char *sz_dummy_file = "dummy";
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(!dev_obj || (dev_obj->cod_mgr == NULL));
+
+ status = cod_create(&dev_obj->cod_mgr, sz_dummy_file, NULL);
+
+ return status;
+}
+
+/*
+ * ======== dev_insert_proc_object ========
+ * Purpose:
+ * Insert a ProcObject into the list maintained by DEV.
+ * Parameters:
+ * p_proc_object: Ptr to ProcObject to insert.
+ * dev_obj: Ptr to Dev Object where the list is.
+ * already_attached: Ptr to return the bool
+ * Returns:
+ * 0: If successful.
+ * Requires:
+ * List Exists
+ * hdev_obj is Valid handle
+ * DEV Initialized
+ * already_attached != NULL
+ * proc_obj != 0
+ * Ensures:
+ * 0 and List is not Empty.
+ */
+int dev_insert_proc_object(struct dev_object *hdev_obj,
+ u32 proc_obj, bool *already_attached)
+{
+ int status = 0;
+ struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(dev_obj);
+ DBC_REQUIRE(proc_obj != 0);
+ DBC_REQUIRE(dev_obj->proc_list != NULL);
+ DBC_REQUIRE(already_attached != NULL);
+ if (!LST_IS_EMPTY(dev_obj->proc_list))
+ *already_attached = true;
+
+ /* Add DevObject to tail. */
+ lst_put_tail(dev_obj->proc_list, (struct list_head *)proc_obj);
+
+ DBC_ENSURE(!status && !LST_IS_EMPTY(dev_obj->proc_list));
+
+ return status;
+}
+
+/*
+ * ======== dev_remove_proc_object ========
+ * Purpose:
+ * Search for and remove a Proc object from the given list maintained
+ * by the DEV
+ * Parameters:
+ * p_proc_object: Ptr to ProcObject to insert.
+ * dev_obj Ptr to Dev Object where the list is.
+ * Returns:
+ * 0: If successful.
+ * Requires:
+ * List exists and is not empty
+ * proc_obj != 0
+ * hdev_obj is a valid Dev handle.
+ * Ensures:
+ * Details:
+ * List will be deleted when the DEV is destroyed.
+ */
+int dev_remove_proc_object(struct dev_object *hdev_obj, u32 proc_obj)
+{
+ int status = -EPERM;
+ struct list_head *cur_elem;
+ struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
+
+ DBC_REQUIRE(dev_obj);
+ DBC_REQUIRE(proc_obj != 0);
+ DBC_REQUIRE(dev_obj->proc_list != NULL);
+ DBC_REQUIRE(!LST_IS_EMPTY(dev_obj->proc_list));
+
+ /* Search list for dev_obj: */
+ for (cur_elem = lst_first(dev_obj->proc_list); cur_elem != NULL;
+ cur_elem = lst_next(dev_obj->proc_list, cur_elem)) {
+ /* If found, remove it. */
+ if ((u32) cur_elem == proc_obj) {
+ lst_remove_elem(dev_obj->proc_list, cur_elem);
+ status = 0;
+ break;
+ }
+ }
+
+ return status;
+}
+
+int dev_get_dev_type(struct dev_object *device_obj, u8 *dev_type)
+{
+ int status = 0;
+ struct dev_object *dev_obj = (struct dev_object *)device_obj;
+
+ *dev_type = dev_obj->dev_type;
+
+ return status;
+}
+
+/*
+ * ======== store_interface_fxns ========
+ * Purpose:
+ * Copy the Bridge's interface functions into the device object,
+ * ensuring that fxn_not_implemented() is set for:
+ *
+ * 1. All Bridge function pointers which are NULL; and
+ * 2. All function slots in the struct dev_object structure which have no
+ * corresponding slots in the the Bridge's interface, because the Bridge
+ * is of an *older* version.
+ * Parameters:
+ * intf_fxns: Interface fxn Structure of the Bridge's Dev Object.
+ * drv_fxns: Interface Fxns offered by the Bridge during DEV_Create().
+ * Returns:
+ * Requires:
+ * Input pointers are valid.
+ * Bridge driver is *not* written for a newer DSP API.
+ * Ensures:
+ * All function pointers in the dev object's fxn interface are not NULL.
+ */
+static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
+ struct bridge_drv_interface *intf_fxns)
+{
+ u32 bridge_version;
+
+ /* Local helper macro: */
+#define STORE_FXN(cast, pfn) \
+ (intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \
+ (cast)fxn_not_implemented))
+
+ DBC_REQUIRE(intf_fxns != NULL);
+ DBC_REQUIRE(drv_fxns != NULL);
+ DBC_REQUIRE(MAKEVERSION(drv_fxns->brd_api_major_version,
+ drv_fxns->brd_api_minor_version) <= BRD_API_VERSION);
+ bridge_version = MAKEVERSION(drv_fxns->brd_api_major_version,
+ drv_fxns->brd_api_minor_version);
+ intf_fxns->brd_api_major_version = drv_fxns->brd_api_major_version;
+ intf_fxns->brd_api_minor_version = drv_fxns->brd_api_minor_version;
+ /* Install functions up to DSP API version .80 (first alpha): */
+ if (bridge_version > 0) {
+ STORE_FXN(fxn_dev_create, pfn_dev_create);
+ STORE_FXN(fxn_dev_destroy, pfn_dev_destroy);
+ STORE_FXN(fxn_dev_ctrl, pfn_dev_cntrl);
+ STORE_FXN(fxn_brd_monitor, pfn_brd_monitor);
+ STORE_FXN(fxn_brd_start, pfn_brd_start);
+ STORE_FXN(fxn_brd_stop, pfn_brd_stop);
+ STORE_FXN(fxn_brd_status, pfn_brd_status);
+ STORE_FXN(fxn_brd_read, pfn_brd_read);
+ STORE_FXN(fxn_brd_write, pfn_brd_write);
+ STORE_FXN(fxn_brd_setstate, pfn_brd_set_state);
+ STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy);
+ STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write);
+ STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map);
+ STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map);
+ STORE_FXN(fxn_chnl_create, pfn_chnl_create);
+ STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy);
+ STORE_FXN(fxn_chnl_open, pfn_chnl_open);
+ STORE_FXN(fxn_chnl_close, pfn_chnl_close);
+ STORE_FXN(fxn_chnl_addioreq, pfn_chnl_add_io_req);
+ STORE_FXN(fxn_chnl_getioc, pfn_chnl_get_ioc);
+ STORE_FXN(fxn_chnl_cancelio, pfn_chnl_cancel_io);
+ STORE_FXN(fxn_chnl_flushio, pfn_chnl_flush_io);
+ STORE_FXN(fxn_chnl_getinfo, pfn_chnl_get_info);
+ STORE_FXN(fxn_chnl_getmgrinfo, pfn_chnl_get_mgr_info);
+ STORE_FXN(fxn_chnl_idle, pfn_chnl_idle);
+ STORE_FXN(fxn_chnl_registernotify, pfn_chnl_register_notify);
+ STORE_FXN(fxn_io_create, pfn_io_create);
+ STORE_FXN(fxn_io_destroy, pfn_io_destroy);
+ STORE_FXN(fxn_io_onloaded, pfn_io_on_loaded);
+ STORE_FXN(fxn_io_getprocload, pfn_io_get_proc_load);
+ STORE_FXN(fxn_msg_create, pfn_msg_create);
+ STORE_FXN(fxn_msg_createqueue, pfn_msg_create_queue);
+ STORE_FXN(fxn_msg_delete, pfn_msg_delete);
+ STORE_FXN(fxn_msg_deletequeue, pfn_msg_delete_queue);
+ STORE_FXN(fxn_msg_get, pfn_msg_get);
+ STORE_FXN(fxn_msg_put, pfn_msg_put);
+ STORE_FXN(fxn_msg_registernotify, pfn_msg_register_notify);
+ STORE_FXN(fxn_msg_setqueueid, pfn_msg_set_queue_id);
+ }
+ /* Add code for any additional functions in newerBridge versions here */
+ /* Ensure postcondition: */
+ DBC_ENSURE(intf_fxns->pfn_dev_create != NULL);
+ DBC_ENSURE(intf_fxns->pfn_dev_destroy != NULL);
+ DBC_ENSURE(intf_fxns->pfn_dev_cntrl != NULL);
+ DBC_ENSURE(intf_fxns->pfn_brd_monitor != NULL);
+ DBC_ENSURE(intf_fxns->pfn_brd_start != NULL);
+ DBC_ENSURE(intf_fxns->pfn_brd_stop != NULL);
+ DBC_ENSURE(intf_fxns->pfn_brd_status != NULL);
+ DBC_ENSURE(intf_fxns->pfn_brd_read != NULL);
+ DBC_ENSURE(intf_fxns->pfn_brd_write != NULL);
+ DBC_ENSURE(intf_fxns->pfn_chnl_create != NULL);
+ DBC_ENSURE(intf_fxns->pfn_chnl_destroy != NULL);
+ DBC_ENSURE(intf_fxns->pfn_chnl_open != NULL);
+ DBC_ENSURE(intf_fxns->pfn_chnl_close != NULL);
+ DBC_ENSURE(intf_fxns->pfn_chnl_add_io_req != NULL);
+ DBC_ENSURE(intf_fxns->pfn_chnl_get_ioc != NULL);
+ DBC_ENSURE(intf_fxns->pfn_chnl_cancel_io != NULL);
+ DBC_ENSURE(intf_fxns->pfn_chnl_flush_io != NULL);
+ DBC_ENSURE(intf_fxns->pfn_chnl_get_info != NULL);
+ DBC_ENSURE(intf_fxns->pfn_chnl_get_mgr_info != NULL);
+ DBC_ENSURE(intf_fxns->pfn_chnl_idle != NULL);
+ DBC_ENSURE(intf_fxns->pfn_chnl_register_notify != NULL);
+ DBC_ENSURE(intf_fxns->pfn_io_create != NULL);
+ DBC_ENSURE(intf_fxns->pfn_io_destroy != NULL);
+ DBC_ENSURE(intf_fxns->pfn_io_on_loaded != NULL);
+ DBC_ENSURE(intf_fxns->pfn_io_get_proc_load != NULL);
+ DBC_ENSURE(intf_fxns->pfn_msg_set_queue_id != NULL);
+
+#undef STORE_FXN
+}
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
new file mode 100644
index 00000000000..8685233d762
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -0,0 +1,533 @@
+/*
+ * dmm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
+ * space that can be directly mapped to any MPU buffer or memory region
+ *
+ * Notes:
+ * Region: Generic memory entitiy having a start address and a size
+ * Chunk: Reserved region
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/sync.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/proc.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/dmm.h>
+
+/* ----------------------------------- Defines, Data Structures, Typedefs */
+#define DMM_ADDR_VIRTUAL(a) \
+ (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
+ dyn_mem_map_beg)
+#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
+
+/* DMM Mgr */
+struct dmm_object {
+ /* Dmm Lock is used to serialize access mem manager for
+ * multi-threads. */
+ spinlock_t dmm_lock; /* Lock to access dmm mgr */
+};
+
+/* ----------------------------------- Globals */
+static u32 refs; /* module reference count */
+struct map_page {
+ u32 region_size:15;
+ u32 mapped_size:15;
+ u32 reserved:1;
+ u32 mapped:1;
+};
+
+/* Create the free list */
+static struct map_page *virtual_mapping_table;
+static u32 free_region; /* The index of free region */
+static u32 free_size;
+static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */
+static u32 table_size; /* The size of virt and phys pages tables */
+
+/* ----------------------------------- Function Prototypes */
+static struct map_page *get_region(u32 addr);
+static struct map_page *get_free_region(u32 len);
+static struct map_page *get_mapped_region(u32 addrs);
+
+/* ======== dmm_create_tables ========
+ * Purpose:
+ * Create table to hold the information of physical address
+ * the buffer pages that is passed by the user, and the table
+ * to hold the information of the virtual memory that is reserved
+ * for DSP.
+ */
+int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
+{
+ struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+ int status = 0;
+
+ status = dmm_delete_tables(dmm_obj);
+ if (!status) {
+ dyn_mem_map_beg = addr;
+ table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
+ /* Create the free list */
+ virtual_mapping_table = __vmalloc(table_size *
+ sizeof(struct map_page), GFP_KERNEL |
+ __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+ if (virtual_mapping_table == NULL)
+ status = -ENOMEM;
+ else {
+ /* On successful allocation,
+ * all entries are zero ('free') */
+ free_region = 0;
+ free_size = table_size * PG_SIZE4K;
+ virtual_mapping_table[0].region_size = table_size;
+ }
+ }
+
+ if (status)
+ pr_err("%s: failure, status 0x%x\n", __func__, status);
+
+ return status;
+}
+
+/*
+ * ======== dmm_create ========
+ * Purpose:
+ * Create a dynamic memory manager object.
+ */
+int dmm_create(struct dmm_object **dmm_manager,
+ struct dev_object *hdev_obj,
+ const struct dmm_mgrattrs *mgr_attrts)
+{
+ struct dmm_object *dmm_obj = NULL;
+ int status = 0;
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(dmm_manager != NULL);
+
+ *dmm_manager = NULL;
+ /* create, zero, and tag a cmm mgr object */
+ dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
+ if (dmm_obj != NULL) {
+ spin_lock_init(&dmm_obj->dmm_lock);
+ *dmm_manager = dmm_obj;
+ } else {
+ status = -ENOMEM;
+ }
+
+ return status;
+}
+
+/*
+ * ======== dmm_destroy ========
+ * Purpose:
+ * Release the communication memory manager resources.
+ */
+int dmm_destroy(struct dmm_object *dmm_mgr)
+{
+ struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ if (dmm_mgr) {
+ status = dmm_delete_tables(dmm_obj);
+ if (!status)
+ kfree(dmm_obj);
+ } else
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== dmm_delete_tables ========
+ * Purpose:
+ * Delete DMM Tables.
+ */
+int dmm_delete_tables(struct dmm_object *dmm_mgr)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ /* Delete all DMM tables */
+ if (dmm_mgr)
+ vfree(virtual_mapping_table);
+ else
+ status = -EFAULT;
+ return status;
+}
+
+/*
+ * ======== dmm_exit ========
+ * Purpose:
+ * Discontinue usage of module; free resources when reference count
+ * reaches 0.
+ */
+void dmm_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+}
+
+/*
+ * ======== dmm_get_handle ========
+ * Purpose:
+ * Return the dynamic memory manager object for this device.
+ * This is typically called from the client process.
+ */
+int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
+{
+ int status = 0;
+ struct dev_object *hdev_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(dmm_manager != NULL);
+ if (hprocessor != NULL)
+ status = proc_get_dev_object(hprocessor, &hdev_obj);
+ else
+ hdev_obj = dev_get_first(); /* default */
+
+ if (!status)
+ status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
+
+ return status;
+}
+
+/*
+ * ======== dmm_init ========
+ * Purpose:
+ * Initializes private state of DMM module.
+ */
+bool dmm_init(void)
+{
+ bool ret = true;
+
+ DBC_REQUIRE(refs >= 0);
+
+ if (ret)
+ refs++;
+
+ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+ virtual_mapping_table = NULL;
+ table_size = 0;
+
+ return ret;
+}
+
+/*
+ * ======== dmm_map_memory ========
+ * Purpose:
+ * Add a mapping block to the reserved chunk. DMM assumes that this block
+ * will be mapped in the DSP/IVA's address space. DMM returns an error if a
+ * mapping overlaps another one. This function stores the info that will be
+ * required later while unmapping the block.
+ */
+int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
+{
+ struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+ struct map_page *chunk;
+ int status = 0;
+
+ spin_lock(&dmm_obj->dmm_lock);
+ /* Find the Reserved memory chunk containing the DSP block to
+ * be mapped */
+ chunk = (struct map_page *)get_region(addr);
+ if (chunk != NULL) {
+ /* Mark the region 'mapped', leave the 'reserved' info as-is */
+ chunk->mapped = true;
+ chunk->mapped_size = (size / PG_SIZE4K);
+ } else
+ status = -ENOENT;
+ spin_unlock(&dmm_obj->dmm_lock);
+
+ dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
+ "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
+
+ return status;
+}
+
+/*
+ * ======== dmm_reserve_memory ========
+ * Purpose:
+ * Reserve a chunk of virtually contiguous DSP/IVA address space.
+ */
+int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
+ u32 *prsv_addr)
+{
+ int status = 0;
+ struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+ struct map_page *node;
+ u32 rsv_addr = 0;
+ u32 rsv_size = 0;
+
+ spin_lock(&dmm_obj->dmm_lock);
+
+ /* Try to get a DSP chunk from the free list */
+ node = get_free_region(size);
+ if (node != NULL) {
+ /* DSP chunk of given size is available. */
+ rsv_addr = DMM_ADDR_VIRTUAL(node);
+ /* Calculate the number entries to use */
+ rsv_size = size / PG_SIZE4K;
+ if (rsv_size < node->region_size) {
+ /* Mark remainder of free region */
+ node[rsv_size].mapped = false;
+ node[rsv_size].reserved = false;
+ node[rsv_size].region_size =
+ node->region_size - rsv_size;
+ node[rsv_size].mapped_size = 0;
+ }
+ /* get_region will return first fit chunk. But we only use what
+ is requested. */
+ node->mapped = false;
+ node->reserved = true;
+ node->region_size = rsv_size;
+ node->mapped_size = 0;
+ /* Return the chunk's starting address */
+ *prsv_addr = rsv_addr;
+ } else
+ /*dSP chunk of given size is not available */
+ status = -ENOMEM;
+
+ spin_unlock(&dmm_obj->dmm_lock);
+
+ dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
+ "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
+ prsv_addr, status, rsv_addr, rsv_size);
+
+ return status;
+}
+
+/*
+ * ======== dmm_un_map_memory ========
+ * Purpose:
+ * Remove the mapped block from the reserved chunk.
+ */
+int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
+{
+ struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+ struct map_page *chunk;
+ int status = 0;
+
+ spin_lock(&dmm_obj->dmm_lock);
+ chunk = get_mapped_region(addr);
+ if (chunk == NULL)
+ status = -ENOENT;
+
+ if (!status) {
+ /* Unmap the region */
+ *psize = chunk->mapped_size * PG_SIZE4K;
+ chunk->mapped = false;
+ chunk->mapped_size = 0;
+ }
+ spin_unlock(&dmm_obj->dmm_lock);
+
+ dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
+ "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
+
+ return status;
+}
+
+/*
+ * ======== dmm_un_reserve_memory ========
+ * Purpose:
+ * Free a chunk of reserved DSP/IVA address space.
+ */
+int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
+{
+ struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+ struct map_page *chunk;
+ u32 i;
+ int status = 0;
+ u32 chunk_size;
+
+ spin_lock(&dmm_obj->dmm_lock);
+
+ /* Find the chunk containing the reserved address */
+ chunk = get_mapped_region(rsv_addr);
+ if (chunk == NULL)
+ status = -ENOENT;
+
+ if (!status) {
+ /* Free all the mapped pages for this reserved region */
+ i = 0;
+ while (i < chunk->region_size) {
+ if (chunk[i].mapped) {
+ /* Remove mapping from the page tables. */
+ chunk_size = chunk[i].mapped_size;
+ /* Clear the mapping flags */
+ chunk[i].mapped = false;
+ chunk[i].mapped_size = 0;
+ i += chunk_size;
+ } else
+ i++;
+ }
+ /* Clear the flags (mark the region 'free') */
+ chunk->reserved = false;
+ /* NOTE: We do NOT coalesce free regions here.
+ * Free regions are coalesced in get_region(), as it traverses
+ *the whole mapping table
+ */
+ }
+ spin_unlock(&dmm_obj->dmm_lock);
+
+ dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
+ __func__, dmm_mgr, rsv_addr, status, chunk);
+
+ return status;
+}
+
+/*
+ * ======== get_region ========
+ * Purpose:
+ * Returns a region containing the specified memory region
+ */
+static struct map_page *get_region(u32 addr)
+{
+ struct map_page *curr_region = NULL;
+ u32 i = 0;
+
+ if (virtual_mapping_table != NULL) {
+ /* find page mapped by this address */
+ i = DMM_ADDR_TO_INDEX(addr);
+ if (i < table_size)
+ curr_region = virtual_mapping_table + i;
+ }
+
+ dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
+ __func__, curr_region, free_region, free_size);
+ return curr_region;
+}
+
+/*
+ * ======== get_free_region ========
+ * Purpose:
+ * Returns the requested free region
+ */
+static struct map_page *get_free_region(u32 len)
+{
+ struct map_page *curr_region = NULL;
+ u32 i = 0;
+ u32 region_size = 0;
+ u32 next_i = 0;
+
+ if (virtual_mapping_table == NULL)
+ return curr_region;
+ if (len > free_size) {
+ /* Find the largest free region
+ * (coalesce during the traversal) */
+ while (i < table_size) {
+ region_size = virtual_mapping_table[i].region_size;
+ next_i = i + region_size;
+ if (virtual_mapping_table[i].reserved == false) {
+ /* Coalesce, if possible */
+ if (next_i < table_size &&
+ virtual_mapping_table[next_i].reserved
+ == false) {
+ virtual_mapping_table[i].region_size +=
+ virtual_mapping_table
+ [next_i].region_size;
+ continue;
+ }
+ region_size *= PG_SIZE4K;
+ if (region_size > free_size) {
+ free_region = i;
+ free_size = region_size;
+ }
+ }
+ i = next_i;
+ }
+ }
+ if (len <= free_size) {
+ curr_region = virtual_mapping_table + free_region;
+ free_region += (len / PG_SIZE4K);
+ free_size -= len;
+ }
+ return curr_region;
+}
+
+/*
+ * ======== get_mapped_region ========
+ * Purpose:
+ * Returns the requestedmapped region
+ */
+static struct map_page *get_mapped_region(u32 addrs)
+{
+ u32 i = 0;
+ struct map_page *curr_region = NULL;
+
+ if (virtual_mapping_table == NULL)
+ return curr_region;
+
+ i = DMM_ADDR_TO_INDEX(addrs);
+ if (i < table_size && (virtual_mapping_table[i].mapped ||
+ virtual_mapping_table[i].reserved))
+ curr_region = virtual_mapping_table + i;
+ return curr_region;
+}
+
+#ifdef DSP_DMM_DEBUG
+u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
+{
+ struct map_page *curr_node = NULL;
+ u32 i;
+ u32 freemem = 0;
+ u32 bigsize = 0;
+
+ spin_lock(&dmm_mgr->dmm_lock);
+
+ if (virtual_mapping_table != NULL) {
+ for (i = 0; i < table_size; i +=
+ virtual_mapping_table[i].region_size) {
+ curr_node = virtual_mapping_table + i;
+ if (curr_node->reserved) {
+ /*printk("RESERVED size = 0x%x, "
+ "Map size = 0x%x\n",
+ (curr_node->region_size * PG_SIZE4K),
+ (curr_node->mapped == false) ? 0 :
+ (curr_node->mapped_size * PG_SIZE4K));
+ */
+ } else {
+/* printk("UNRESERVED size = 0x%x\n",
+ (curr_node->region_size * PG_SIZE4K));
+ */
+ freemem += (curr_node->region_size * PG_SIZE4K);
+ if (curr_node->region_size > bigsize)
+ bigsize = curr_node->region_size;
+ }
+ }
+ }
+ spin_unlock(&dmm_mgr->dmm_lock);
+ printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
+ freemem / (1024 * 1024));
+ printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
+ (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
+ printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
+ (bigsize * PG_SIZE4K / (1024 * 1024)));
+
+ return 0;
+}
+#endif
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
new file mode 100644
index 00000000000..7b42f72a97b
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -0,0 +1,1906 @@
+/*
+ * dspapi.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Common DSP API functions, also includes the wrapper
+ * functions called directly by the DeviceIOControl interface.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/ntfy.h>
+#include <dspbridge/services.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/chnl.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/drv.h>
+
+#include <dspbridge/proc.h>
+#include <dspbridge/strm.h>
+
+/* ----------------------------------- Resource Manager */
+#include <dspbridge/disp.h>
+#include <dspbridge/mgr.h>
+#include <dspbridge/node.h>
+#include <dspbridge/rmm.h>
+
+/* ----------------------------------- Others */
+#include <dspbridge/msg.h>
+#include <dspbridge/cmm.h>
+#include <dspbridge/io.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/dspapi.h>
+#include <dspbridge/dbdcd.h>
+
+#include <dspbridge/resourcecleanup.h>
+
+/* ----------------------------------- Defines, Data Structures, Typedefs */
+#define MAX_TRACEBUFLEN 255
+#define MAX_LOADARGS 16
+#define MAX_NODES 64
+#define MAX_STREAMS 16
+#define MAX_BUFS 64
+
+/* Used to get dspbridge ioctl table */
+#define DB_GET_IOC_TABLE(cmd) (DB_GET_MODULE(cmd) >> DB_MODULE_SHIFT)
+
+/* Device IOCtl function pointer */
+struct api_cmd {
+ u32(*fxn) (union trapped_args *args, void *pr_ctxt);
+ u32 dw_index;
+};
+
+/* ----------------------------------- Globals */
+static u32 api_c_refs;
+
+/*
+ * Function tables.
+ * The order of these functions MUST be the same as the order of the command
+ * numbers defined in dspapi-ioctl.h This is how an IOCTL number in user mode
+ * turns into a function call in kernel mode.
+ */
+
+/* MGR wrapper functions */
+static struct api_cmd mgr_cmd[] = {
+ {mgrwrap_enum_node_info}, /* MGR_ENUMNODE_INFO */
+ {mgrwrap_enum_proc_info}, /* MGR_ENUMPROC_INFO */
+ {mgrwrap_register_object}, /* MGR_REGISTEROBJECT */
+ {mgrwrap_unregister_object}, /* MGR_UNREGISTEROBJECT */
+ {mgrwrap_wait_for_bridge_events}, /* MGR_WAIT */
+ {mgrwrap_get_process_resources_info}, /* MGR_GET_PROC_RES */
+};
+
+/* PROC wrapper functions */
+static struct api_cmd proc_cmd[] = {
+ {procwrap_attach}, /* PROC_ATTACH */
+ {procwrap_ctrl}, /* PROC_CTRL */
+ {procwrap_detach}, /* PROC_DETACH */
+ {procwrap_enum_node_info}, /* PROC_ENUMNODE */
+ {procwrap_enum_resources}, /* PROC_ENUMRESOURCES */
+ {procwrap_get_state}, /* PROC_GET_STATE */
+ {procwrap_get_trace}, /* PROC_GET_TRACE */
+ {procwrap_load}, /* PROC_LOAD */
+ {procwrap_register_notify}, /* PROC_REGISTERNOTIFY */
+ {procwrap_start}, /* PROC_START */
+ {procwrap_reserve_memory}, /* PROC_RSVMEM */
+ {procwrap_un_reserve_memory}, /* PROC_UNRSVMEM */
+ {procwrap_map}, /* PROC_MAPMEM */
+ {procwrap_un_map}, /* PROC_UNMAPMEM */
+ {procwrap_flush_memory}, /* PROC_FLUSHMEMORY */
+ {procwrap_stop}, /* PROC_STOP */
+ {procwrap_invalidate_memory}, /* PROC_INVALIDATEMEMORY */
+ {procwrap_begin_dma}, /* PROC_BEGINDMA */
+ {procwrap_end_dma}, /* PROC_ENDDMA */
+};
+
+/* NODE wrapper functions */
+static struct api_cmd node_cmd[] = {
+ {nodewrap_allocate}, /* NODE_ALLOCATE */
+ {nodewrap_alloc_msg_buf}, /* NODE_ALLOCMSGBUF */
+ {nodewrap_change_priority}, /* NODE_CHANGEPRIORITY */
+ {nodewrap_connect}, /* NODE_CONNECT */
+ {nodewrap_create}, /* NODE_CREATE */
+ {nodewrap_delete}, /* NODE_DELETE */
+ {nodewrap_free_msg_buf}, /* NODE_FREEMSGBUF */
+ {nodewrap_get_attr}, /* NODE_GETATTR */
+ {nodewrap_get_message}, /* NODE_GETMESSAGE */
+ {nodewrap_pause}, /* NODE_PAUSE */
+ {nodewrap_put_message}, /* NODE_PUTMESSAGE */
+ {nodewrap_register_notify}, /* NODE_REGISTERNOTIFY */
+ {nodewrap_run}, /* NODE_RUN */
+ {nodewrap_terminate}, /* NODE_TERMINATE */
+ {nodewrap_get_uuid_props}, /* NODE_GETUUIDPROPS */
+};
+
+/* STRM wrapper functions */
+static struct api_cmd strm_cmd[] = {
+ {strmwrap_allocate_buffer}, /* STRM_ALLOCATEBUFFER */
+ {strmwrap_close}, /* STRM_CLOSE */
+ {strmwrap_free_buffer}, /* STRM_FREEBUFFER */
+ {strmwrap_get_event_handle}, /* STRM_GETEVENTHANDLE */
+ {strmwrap_get_info}, /* STRM_GETINFO */
+ {strmwrap_idle}, /* STRM_IDLE */
+ {strmwrap_issue}, /* STRM_ISSUE */
+ {strmwrap_open}, /* STRM_OPEN */
+ {strmwrap_reclaim}, /* STRM_RECLAIM */
+ {strmwrap_register_notify}, /* STRM_REGISTERNOTIFY */
+ {strmwrap_select}, /* STRM_SELECT */
+};
+
+/* CMM wrapper functions */
+static struct api_cmd cmm_cmd[] = {
+ {cmmwrap_calloc_buf}, /* CMM_ALLOCBUF */
+ {cmmwrap_free_buf}, /* CMM_FREEBUF */
+ {cmmwrap_get_handle}, /* CMM_GETHANDLE */
+ {cmmwrap_get_info}, /* CMM_GETINFO */
+};
+
+/* Array used to store ioctl table sizes. It can hold up to 8 entries */
+static u8 size_cmd[] = {
+ ARRAY_SIZE(mgr_cmd),
+ ARRAY_SIZE(proc_cmd),
+ ARRAY_SIZE(node_cmd),
+ ARRAY_SIZE(strm_cmd),
+ ARRAY_SIZE(cmm_cmd),
+};
+
+static inline void _cp_fm_usr(void *to, const void __user * from,
+ int *err, unsigned long bytes)
+{
+ if (*err)
+ return;
+
+ if (unlikely(!from)) {
+ *err = -EFAULT;
+ return;
+ }
+
+ if (unlikely(copy_from_user(to, from, bytes)))
+ *err = -EFAULT;
+}
+
+#define CP_FM_USR(to, from, err, n) \
+ _cp_fm_usr(to, from, &(err), (n) * sizeof(*(to)))
+
+static inline void _cp_to_usr(void __user *to, const void *from,
+ int *err, unsigned long bytes)
+{
+ if (*err)
+ return;
+
+ if (unlikely(!to)) {
+ *err = -EFAULT;
+ return;
+ }
+
+ if (unlikely(copy_to_user(to, from, bytes)))
+ *err = -EFAULT;
+}
+
+#define CP_TO_USR(to, from, err, n) \
+ _cp_to_usr(to, from, &(err), (n) * sizeof(*(from)))
+
+/*
+ * ======== api_call_dev_ioctl ========
+ * Purpose:
+ * Call the (wrapper) function for the corresponding API IOCTL.
+ */
+inline int api_call_dev_ioctl(u32 cmd, union trapped_args *args,
+ u32 *result, void *pr_ctxt)
+{
+ u32(*ioctl_cmd) (union trapped_args *args, void *pr_ctxt) = NULL;
+ int i;
+
+ if (_IOC_TYPE(cmd) != DB) {
+ pr_err("%s: Incompatible dspbridge ioctl number\n", __func__);
+ goto err;
+ }
+
+ if (DB_GET_IOC_TABLE(cmd) > ARRAY_SIZE(size_cmd)) {
+ pr_err("%s: undefined ioctl module\n", __func__);
+ goto err;
+ }
+
+ /* Check the size of the required cmd table */
+ i = DB_GET_IOC(cmd);
+ if (i > size_cmd[DB_GET_IOC_TABLE(cmd)]) {
+ pr_err("%s: requested ioctl %d out of bounds for table %d\n",
+ __func__, i, DB_GET_IOC_TABLE(cmd));
+ goto err;
+ }
+
+ switch (DB_GET_MODULE(cmd)) {
+ case DB_MGR:
+ ioctl_cmd = mgr_cmd[i].fxn;
+ break;
+ case DB_PROC:
+ ioctl_cmd = proc_cmd[i].fxn;
+ break;
+ case DB_NODE:
+ ioctl_cmd = node_cmd[i].fxn;
+ break;
+ case DB_STRM:
+ ioctl_cmd = strm_cmd[i].fxn;
+ break;
+ case DB_CMM:
+ ioctl_cmd = cmm_cmd[i].fxn;
+ break;
+ }
+
+ if (!ioctl_cmd) {
+ pr_err("%s: requested ioctl not defined\n", __func__);
+ goto err;
+ } else {
+ *result = (*ioctl_cmd) (args, pr_ctxt);
+ }
+
+ return 0;
+
+err:
+ return -EINVAL;
+}
+
+/*
+ * ======== api_exit ========
+ */
+void api_exit(void)
+{
+ DBC_REQUIRE(api_c_refs > 0);
+ api_c_refs--;
+
+ if (api_c_refs == 0) {
+ /* Release all modules initialized in api_init(). */
+ cod_exit();
+ dev_exit();
+ chnl_exit();
+ msg_exit();
+ io_exit();
+ strm_exit();
+ disp_exit();
+ node_exit();
+ proc_exit();
+ mgr_exit();
+ rmm_exit();
+ drv_exit();
+ }
+ DBC_ENSURE(api_c_refs >= 0);
+}
+
+/*
+ * ======== api_init ========
+ * Purpose:
+ * Module initialization used by Bridge API.
+ */
+bool api_init(void)
+{
+ bool ret = true;
+ bool fdrv, fdev, fcod, fchnl, fmsg, fio;
+ bool fmgr, fproc, fnode, fdisp, fstrm, frmm;
+
+ if (api_c_refs == 0) {
+ /* initialize driver and other modules */
+ fdrv = drv_init();
+ fmgr = mgr_init();
+ fproc = proc_init();
+ fnode = node_init();
+ fdisp = disp_init();
+ fstrm = strm_init();
+ frmm = rmm_init();
+ fchnl = chnl_init();
+ fmsg = msg_mod_init();
+ fio = io_init();
+ fdev = dev_init();
+ fcod = cod_init();
+ ret = fdrv && fdev && fchnl && fcod && fmsg && fio;
+ ret = ret && fmgr && fproc && frmm;
+ if (!ret) {
+ if (fdrv)
+ drv_exit();
+
+ if (fmgr)
+ mgr_exit();
+
+ if (fstrm)
+ strm_exit();
+
+ if (fproc)
+ proc_exit();
+
+ if (fnode)
+ node_exit();
+
+ if (fdisp)
+ disp_exit();
+
+ if (fchnl)
+ chnl_exit();
+
+ if (fmsg)
+ msg_exit();
+
+ if (fio)
+ io_exit();
+
+ if (fdev)
+ dev_exit();
+
+ if (fcod)
+ cod_exit();
+
+ if (frmm)
+ rmm_exit();
+
+ }
+ }
+ if (ret)
+ api_c_refs++;
+
+ return ret;
+}
+
+/*
+ * ======== api_init_complete2 ========
+ * Purpose:
+ * Perform any required bridge initialization which cannot
+ * be performed in api_init() or dev_start_device() due
+ * to the fact that some services are not yet
+ * completely initialized.
+ * Parameters:
+ * Returns:
+ * 0: Allow this device to load
+ * -EPERM: Failure.
+ * Requires:
+ * Bridge API initialized.
+ * Ensures:
+ */
+int api_init_complete2(void)
+{
+ int status = 0;
+ struct cfg_devnode *dev_node;
+ struct dev_object *hdev_obj;
+ u8 dev_type;
+ u32 tmp;
+
+ DBC_REQUIRE(api_c_refs > 0);
+
+ /* Walk the list of DevObjects, get each devnode, and attempting to
+ * autostart the board. Note that this requires COF loading, which
+ * requires KFILE. */
+ for (hdev_obj = dev_get_first(); hdev_obj != NULL;
+ hdev_obj = dev_get_next(hdev_obj)) {
+ if (dev_get_dev_node(hdev_obj, &dev_node))
+ continue;
+
+ if (dev_get_dev_type(hdev_obj, &dev_type))
+ continue;
+
+ if ((dev_type == DSP_UNIT) || (dev_type == IVA_UNIT))
+ if (cfg_get_auto_start(dev_node, &tmp) == 0
+ && tmp)
+ proc_auto_start(dev_node, hdev_obj);
+ }
+
+ return status;
+}
+
+/* TODO: Remove deprecated and not implemented ioctl wrappers */
+
+/*
+ * ======== mgrwrap_enum_node_info ========
+ */
+u32 mgrwrap_enum_node_info(union trapped_args *args, void *pr_ctxt)
+{
+ u8 *pndb_props;
+ u32 num_nodes;
+ int status = 0;
+ u32 size = args->args_mgr_enumnode_info.undb_props_size;
+
+ if (size < sizeof(struct dsp_ndbprops))
+ return -EINVAL;
+
+ pndb_props = kmalloc(size, GFP_KERNEL);
+ if (pndb_props == NULL)
+ status = -ENOMEM;
+
+ if (!status) {
+ status =
+ mgr_enum_node_info(args->args_mgr_enumnode_info.node_id,
+ (struct dsp_ndbprops *)pndb_props, size,
+ &num_nodes);
+ }
+ CP_TO_USR(args->args_mgr_enumnode_info.pndb_props, pndb_props, status,
+ size);
+ CP_TO_USR(args->args_mgr_enumnode_info.pu_num_nodes, &num_nodes, status,
+ 1);
+ kfree(pndb_props);
+
+ return status;
+}
+
+/*
+ * ======== mgrwrap_enum_proc_info ========
+ */
+u32 mgrwrap_enum_proc_info(union trapped_args *args, void *pr_ctxt)
+{
+ u8 *processor_info;
+ u8 num_procs;
+ int status = 0;
+ u32 size = args->args_mgr_enumproc_info.processor_info_size;
+
+ if (size < sizeof(struct dsp_processorinfo))
+ return -EINVAL;
+
+ processor_info = kmalloc(size, GFP_KERNEL);
+ if (processor_info == NULL)
+ status = -ENOMEM;
+
+ if (!status) {
+ status =
+ mgr_enum_processor_info(args->args_mgr_enumproc_info.
+ processor_id,
+ (struct dsp_processorinfo *)
+ processor_info, size, &num_procs);
+ }
+ CP_TO_USR(args->args_mgr_enumproc_info.processor_info, processor_info,
+ status, size);
+ CP_TO_USR(args->args_mgr_enumproc_info.pu_num_procs, &num_procs,
+ status, 1);
+ kfree(processor_info);
+
+ return status;
+}
+
+#define WRAP_MAP2CALLER(x) x
+/*
+ * ======== mgrwrap_register_object ========
+ */
+u32 mgrwrap_register_object(union trapped_args *args, void *pr_ctxt)
+{
+ u32 ret;
+ struct dsp_uuid uuid_obj;
+ u32 path_size = 0;
+ char *psz_path_name = NULL;
+ int status = 0;
+
+ CP_FM_USR(&uuid_obj, args->args_mgr_registerobject.uuid_obj, status, 1);
+ if (status)
+ goto func_end;
+ /* path_size is increased by 1 to accommodate NULL */
+ path_size = strlen_user((char *)
+ args->args_mgr_registerobject.psz_path_name) +
+ 1;
+ psz_path_name = kmalloc(path_size, GFP_KERNEL);
+ if (!psz_path_name)
+ goto func_end;
+ ret = strncpy_from_user(psz_path_name,
+ (char *)args->args_mgr_registerobject.
+ psz_path_name, path_size);
+ if (!ret) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ if (args->args_mgr_registerobject.obj_type >= DSP_DCDMAXOBJTYPE)
+ return -EINVAL;
+
+ status = dcd_register_object(&uuid_obj,
+ args->args_mgr_registerobject.obj_type,
+ (char *)psz_path_name);
+func_end:
+ kfree(psz_path_name);
+ return status;
+}
+
+/*
+ * ======== mgrwrap_unregister_object ========
+ */
+u32 mgrwrap_unregister_object(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct dsp_uuid uuid_obj;
+
+ CP_FM_USR(&uuid_obj, args->args_mgr_registerobject.uuid_obj, status, 1);
+ if (status)
+ goto func_end;
+
+ status = dcd_unregister_object(&uuid_obj,
+ args->args_mgr_unregisterobject.
+ obj_type);
+func_end:
+ return status;
+
+}
+
+/*
+ * ======== mgrwrap_wait_for_bridge_events ========
+ */
+u32 mgrwrap_wait_for_bridge_events(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct dsp_notification *anotifications[MAX_EVENTS];
+ struct dsp_notification notifications[MAX_EVENTS];
+ u32 index, i;
+ u32 count = args->args_mgr_wait.count;
+
+ if (count > MAX_EVENTS)
+ status = -EINVAL;
+
+ /* get the array of pointers to user structures */
+ CP_FM_USR(anotifications, args->args_mgr_wait.anotifications,
+ status, count);
+ /* get the events */
+ for (i = 0; i < count; i++) {
+ CP_FM_USR(&notifications[i], anotifications[i], status, 1);
+ if (status || !notifications[i].handle) {
+ status = -EINVAL;
+ break;
+ }
+ /* set the array of pointers to kernel structures */
+ anotifications[i] = &notifications[i];
+ }
+ if (!status) {
+ status = mgr_wait_for_bridge_events(anotifications, count,
+ &index,
+ args->args_mgr_wait.
+ utimeout);
+ }
+ CP_TO_USR(args->args_mgr_wait.pu_index, &index, status, 1);
+ return status;
+}
+
+/*
+ * ======== MGRWRAP_GetProcessResourceInfo ========
+ */
+u32 __deprecated mgrwrap_get_process_resources_info(union trapped_args * args,
+ void *pr_ctxt)
+{
+ pr_err("%s: deprecated dspbridge ioctl\n", __func__);
+ return 0;
+}
+
+/*
+ * ======== procwrap_attach ========
+ */
+u32 procwrap_attach(union trapped_args *args, void *pr_ctxt)
+{
+ void *processor;
+ int status = 0;
+ struct dsp_processorattrin proc_attr_in, *attr_in = NULL;
+
+ /* Optional argument */
+ if (args->args_proc_attach.attr_in) {
+ CP_FM_USR(&proc_attr_in, args->args_proc_attach.attr_in, status,
+ 1);
+ if (!status)
+ attr_in = &proc_attr_in;
+ else
+ goto func_end;
+
+ }
+ status = proc_attach(args->args_proc_attach.processor_id, attr_in,
+ &processor, pr_ctxt);
+ CP_TO_USR(args->args_proc_attach.ph_processor, &processor, status, 1);
+func_end:
+ return status;
+}
+
+/*
+ * ======== procwrap_ctrl ========
+ */
+u32 procwrap_ctrl(union trapped_args *args, void *pr_ctxt)
+{
+ u32 cb_data_size, __user * psize = (u32 __user *)
+ args->args_proc_ctrl.pargs;
+ u8 *pargs = NULL;
+ int status = 0;
+ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+ if (psize) {
+ if (get_user(cb_data_size, psize)) {
+ status = -EPERM;
+ goto func_end;
+ }
+ cb_data_size += sizeof(u32);
+ pargs = kmalloc(cb_data_size, GFP_KERNEL);
+ if (pargs == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ CP_FM_USR(pargs, args->args_proc_ctrl.pargs, status,
+ cb_data_size);
+ }
+ if (!status) {
+ status = proc_ctrl(hprocessor,
+ args->args_proc_ctrl.dw_cmd,
+ (struct dsp_cbdata *)pargs);
+ }
+
+ /* CP_TO_USR(args->args_proc_ctrl.pargs, pargs, status, 1); */
+ kfree(pargs);
+func_end:
+ return status;
+}
+
+/*
+ * ======== procwrap_detach ========
+ */
+u32 __deprecated procwrap_detach(union trapped_args * args, void *pr_ctxt)
+{
+ /* proc_detach called at bridge_release only */
+ pr_err("%s: deprecated dspbridge ioctl\n", __func__);
+ return 0;
+}
+
+/*
+ * ======== procwrap_enum_node_info ========
+ */
+u32 procwrap_enum_node_info(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+ void *node_tab[MAX_NODES];
+ u32 num_nodes;
+ u32 alloc_cnt;
+ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+ if (!args->args_proc_enumnode_info.node_tab_size)
+ return -EINVAL;
+
+ status = proc_enum_nodes(hprocessor,
+ node_tab,
+ args->args_proc_enumnode_info.node_tab_size,
+ &num_nodes, &alloc_cnt);
+ CP_TO_USR(args->args_proc_enumnode_info.node_tab, node_tab, status,
+ num_nodes);
+ CP_TO_USR(args->args_proc_enumnode_info.pu_num_nodes, &num_nodes,
+ status, 1);
+ CP_TO_USR(args->args_proc_enumnode_info.pu_allocated, &alloc_cnt,
+ status, 1);
+ return status;
+}
+
+u32 procwrap_end_dma(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+
+ if (args->args_proc_dma.dir >= DMA_NONE)
+ return -EINVAL;
+
+ status = proc_end_dma(pr_ctxt,
+ args->args_proc_dma.pmpu_addr,
+ args->args_proc_dma.ul_size,
+ args->args_proc_dma.dir);
+ return status;
+}
+
+u32 procwrap_begin_dma(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+
+ if (args->args_proc_dma.dir >= DMA_NONE)
+ return -EINVAL;
+
+ status = proc_begin_dma(pr_ctxt,
+ args->args_proc_dma.pmpu_addr,
+ args->args_proc_dma.ul_size,
+ args->args_proc_dma.dir);
+ return status;
+}
+
+/*
+ * ======== procwrap_flush_memory ========
+ */
+u32 procwrap_flush_memory(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+
+ if (args->args_proc_flushmemory.ul_flags >
+ PROC_WRITEBACK_INVALIDATE_MEM)
+ return -EINVAL;
+
+ status = proc_flush_memory(pr_ctxt,
+ args->args_proc_flushmemory.pmpu_addr,
+ args->args_proc_flushmemory.ul_size,
+ args->args_proc_flushmemory.ul_flags);
+ return status;
+}
+
+/*
+ * ======== procwrap_invalidate_memory ========
+ */
+u32 procwrap_invalidate_memory(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+
+ status =
+ proc_invalidate_memory(pr_ctxt,
+ args->args_proc_invalidatememory.pmpu_addr,
+ args->args_proc_invalidatememory.ul_size);
+ return status;
+}
+
+/*
+ * ======== procwrap_enum_resources ========
+ */
+u32 procwrap_enum_resources(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct dsp_resourceinfo resource_info;
+ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+ if (args->args_proc_enumresources.resource_info_size <
+ sizeof(struct dsp_resourceinfo))
+ return -EINVAL;
+
+ status =
+ proc_get_resource_info(hprocessor,
+ args->args_proc_enumresources.resource_type,
+ &resource_info,
+ args->args_proc_enumresources.
+ resource_info_size);
+
+ CP_TO_USR(args->args_proc_enumresources.resource_info, &resource_info,
+ status, 1);
+
+ return status;
+
+}
+
+/*
+ * ======== procwrap_get_state ========
+ */
+u32 procwrap_get_state(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+ struct dsp_processorstate proc_state;
+ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+ if (args->args_proc_getstate.state_info_size <
+ sizeof(struct dsp_processorstate))
+ return -EINVAL;
+
+ status = proc_get_state(hprocessor, &proc_state,
+ args->args_proc_getstate.state_info_size);
+ CP_TO_USR(args->args_proc_getstate.proc_state_obj, &proc_state, status,
+ 1);
+ return status;
+
+}
+
+/*
+ * ======== procwrap_get_trace ========
+ */
+u32 procwrap_get_trace(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+ u8 *pbuf;
+ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+ if (args->args_proc_gettrace.max_size > MAX_TRACEBUFLEN)
+ return -EINVAL;
+
+ pbuf = kzalloc(args->args_proc_gettrace.max_size, GFP_KERNEL);
+ if (pbuf != NULL) {
+ status = proc_get_trace(hprocessor, pbuf,
+ args->args_proc_gettrace.max_size);
+ } else {
+ status = -ENOMEM;
+ }
+ CP_TO_USR(args->args_proc_gettrace.pbuf, pbuf, status,
+ args->args_proc_gettrace.max_size);
+ kfree(pbuf);
+
+ return status;
+}
+
+/*
+ * ======== procwrap_load ========
+ */
+u32 procwrap_load(union trapped_args *args, void *pr_ctxt)
+{
+ s32 i, len;
+ int status = 0;
+ char *temp;
+ s32 count = args->args_proc_load.argc_index;
+ u8 **argv = NULL, **envp = NULL;
+ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+ if (count <= 0 || count > MAX_LOADARGS) {
+ status = -EINVAL;
+ goto func_cont;
+ }
+
+ argv = kmalloc(count * sizeof(u8 *), GFP_KERNEL);
+ if (!argv) {
+ status = -ENOMEM;
+ goto func_cont;
+ }
+
+ CP_FM_USR(argv, args->args_proc_load.user_args, status, count);
+ if (status) {
+ kfree(argv);
+ argv = NULL;
+ goto func_cont;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (argv[i]) {
+ /* User space pointer to argument */
+ temp = (char *)argv[i];
+ /* len is increased by 1 to accommodate NULL */
+ len = strlen_user((char *)temp) + 1;
+ /* Kernel space pointer to argument */
+ argv[i] = kmalloc(len, GFP_KERNEL);
+ if (argv[i]) {
+ CP_FM_USR(argv[i], temp, status, len);
+ if (status) {
+ kfree(argv[i]);
+ argv[i] = NULL;
+ goto func_cont;
+ }
+ } else {
+ status = -ENOMEM;
+ goto func_cont;
+ }
+ }
+ }
+ /* TODO: validate this */
+ if (args->args_proc_load.user_envp) {
+ /* number of elements in the envp array including NULL */
+ count = 0;
+ do {
+ get_user(temp, args->args_proc_load.user_envp + count);
+ count++;
+ } while (temp);
+ envp = kmalloc(count * sizeof(u8 *), GFP_KERNEL);
+ if (!envp) {
+ status = -ENOMEM;
+ goto func_cont;
+ }
+
+ CP_FM_USR(envp, args->args_proc_load.user_envp, status, count);
+ if (status) {
+ kfree(envp);
+ envp = NULL;
+ goto func_cont;
+ }
+ for (i = 0; envp[i]; i++) {
+ /* User space pointer to argument */
+ temp = (char *)envp[i];
+ /* len is increased by 1 to accommodate NULL */
+ len = strlen_user((char *)temp) + 1;
+ /* Kernel space pointer to argument */
+ envp[i] = kmalloc(len, GFP_KERNEL);
+ if (envp[i]) {
+ CP_FM_USR(envp[i], temp, status, len);
+ if (status) {
+ kfree(envp[i]);
+ envp[i] = NULL;
+ goto func_cont;
+ }
+ } else {
+ status = -ENOMEM;
+ goto func_cont;
+ }
+ }
+ }
+
+ if (!status) {
+ status = proc_load(hprocessor,
+ args->args_proc_load.argc_index,
+ (const char **)argv, (const char **)envp);
+ }
+func_cont:
+ if (envp) {
+ i = 0;
+ while (envp[i])
+ kfree(envp[i++]);
+
+ kfree(envp);
+ }
+
+ if (argv) {
+ count = args->args_proc_load.argc_index;
+ for (i = 0; (i < count) && argv[i]; i++)
+ kfree(argv[i]);
+
+ kfree(argv);
+ }
+
+ return status;
+}
+
+/*
+ * ======== procwrap_map ========
+ */
+u32 procwrap_map(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+ void *map_addr;
+ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+ if (!args->args_proc_mapmem.ul_size)
+ return -EINVAL;
+
+ status = proc_map(args->args_proc_mapmem.hprocessor,
+ args->args_proc_mapmem.pmpu_addr,
+ args->args_proc_mapmem.ul_size,
+ args->args_proc_mapmem.req_addr, &map_addr,
+ args->args_proc_mapmem.ul_map_attr, pr_ctxt);
+ if (!status) {
+ if (put_user(map_addr, args->args_proc_mapmem.pp_map_addr)) {
+ status = -EINVAL;
+ proc_un_map(hprocessor, map_addr, pr_ctxt);
+ }
+
+ }
+ return status;
+}
+
+/*
+ * ======== procwrap_register_notify ========
+ */
+u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+ struct dsp_notification notification;
+ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+ /* Initialize the notification data structure */
+ notification.ps_name = NULL;
+ notification.handle = NULL;
+
+ status = proc_register_notify(hprocessor,
+ args->args_proc_register_notify.event_mask,
+ args->args_proc_register_notify.notify_type,
+ &notification);
+ CP_TO_USR(args->args_proc_register_notify.hnotification, &notification,
+ status, 1);
+ return status;
+}
+
+/*
+ * ======== procwrap_reserve_memory ========
+ */
+u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+ void *prsv_addr;
+ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+ if ((args->args_proc_rsvmem.ul_size <= 0) ||
+ (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0)
+ return -EINVAL;
+
+ status = proc_reserve_memory(hprocessor,
+ args->args_proc_rsvmem.ul_size, &prsv_addr,
+ pr_ctxt);
+ if (!status) {
+ if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) {
+ status = -EINVAL;
+ proc_un_reserve_memory(args->args_proc_rsvmem.
+ hprocessor, prsv_addr, pr_ctxt);
+ }
+ }
+ return status;
+}
+
+/*
+ * ======== procwrap_start ========
+ */
+u32 procwrap_start(union trapped_args *args, void *pr_ctxt)
+{
+ u32 ret;
+
+ ret = proc_start(((struct process_context *)pr_ctxt)->hprocessor);
+ return ret;
+}
+
+/*
+ * ======== procwrap_un_map ========
+ */
+u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+
+ status = proc_un_map(((struct process_context *)pr_ctxt)->hprocessor,
+ args->args_proc_unmapmem.map_addr, pr_ctxt);
+ return status;
+}
+
+/*
+ * ======== procwrap_un_reserve_memory ========
+ */
+u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+ status = proc_un_reserve_memory(hprocessor,
+ args->args_proc_unrsvmem.prsv_addr,
+ pr_ctxt);
+ return status;
+}
+
+/*
+ * ======== procwrap_stop ========
+ */
+u32 procwrap_stop(union trapped_args *args, void *pr_ctxt)
+{
+ u32 ret;
+
+ ret = proc_stop(((struct process_context *)pr_ctxt)->hprocessor);
+
+ return ret;
+}
+
+/*
+ * ======== find_handle =========
+ */
+inline void find_node_handle(struct node_res_object **noderes,
+ void *pr_ctxt, void *hnode)
+{
+ rcu_read_lock();
+ *noderes = idr_find(((struct process_context *)pr_ctxt)->node_id,
+ (int)hnode - 1);
+ rcu_read_unlock();
+ return;
+}
+
+
+/*
+ * ======== nodewrap_allocate ========
+ */
+u32 nodewrap_allocate(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct dsp_uuid node_uuid;
+ u32 cb_data_size = 0;
+ u32 __user *psize = (u32 __user *) args->args_node_allocate.pargs;
+ u8 *pargs = NULL;
+ struct dsp_nodeattrin proc_attr_in, *attr_in = NULL;
+ struct node_res_object *node_res;
+ int nodeid;
+ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+ /* Optional argument */
+ if (psize) {
+ if (get_user(cb_data_size, psize))
+ status = -EPERM;
+
+ cb_data_size += sizeof(u32);
+ if (!status) {
+ pargs = kmalloc(cb_data_size, GFP_KERNEL);
+ if (pargs == NULL)
+ status = -ENOMEM;
+
+ }
+ CP_FM_USR(pargs, args->args_node_allocate.pargs, status,
+ cb_data_size);
+ }
+ CP_FM_USR(&node_uuid, args->args_node_allocate.node_id_ptr, status, 1);
+ if (status)
+ goto func_cont;
+ /* Optional argument */
+ if (args->args_node_allocate.attr_in) {
+ CP_FM_USR(&proc_attr_in, args->args_node_allocate.attr_in,
+ status, 1);
+ if (!status)
+ attr_in = &proc_attr_in;
+ else
+ status = -ENOMEM;
+
+ }
+ if (!status) {
+ status = node_allocate(hprocessor,
+ &node_uuid, (struct dsp_cbdata *)pargs,
+ attr_in, &node_res, pr_ctxt);
+ }
+ if (!status) {
+ nodeid = node_res->id + 1;
+ CP_TO_USR(args->args_node_allocate.ph_node, &nodeid,
+ status, 1);
+ if (status) {
+ status = -EFAULT;
+ node_delete(node_res, pr_ctxt);
+ }
+ }
+func_cont:
+ kfree(pargs);
+
+ return status;
+}
+
+/*
+ * ======== nodewrap_alloc_msg_buf ========
+ */
+u32 nodewrap_alloc_msg_buf(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct dsp_bufferattr *pattr = NULL;
+ struct dsp_bufferattr attr;
+ u8 *pbuffer = NULL;
+ struct node_res_object *node_res;
+
+ find_node_handle(&node_res, pr_ctxt,
+ args->args_node_allocmsgbuf.hnode);
+
+ if (!node_res)
+ return -EFAULT;
+
+ if (!args->args_node_allocmsgbuf.usize)
+ return -EINVAL;
+
+ if (args->args_node_allocmsgbuf.pattr) { /* Optional argument */
+ CP_FM_USR(&attr, args->args_node_allocmsgbuf.pattr, status, 1);
+ if (!status)
+ pattr = &attr;
+
+ }
+ /* argument */
+ CP_FM_USR(&pbuffer, args->args_node_allocmsgbuf.pbuffer, status, 1);
+ if (!status) {
+ status = node_alloc_msg_buf(node_res->hnode,
+ args->args_node_allocmsgbuf.usize,
+ pattr, &pbuffer);
+ }
+ CP_TO_USR(args->args_node_allocmsgbuf.pbuffer, &pbuffer, status, 1);
+ return status;
+}
+
+/*
+ * ======== nodewrap_change_priority ========
+ */
+u32 nodewrap_change_priority(union trapped_args *args, void *pr_ctxt)
+{
+ u32 ret;
+ struct node_res_object *node_res;
+
+ find_node_handle(&node_res, pr_ctxt,
+ args->args_node_changepriority.hnode);
+
+ if (!node_res)
+ return -EFAULT;
+
+ ret = node_change_priority(node_res->hnode,
+ args->args_node_changepriority.prio);
+
+ return ret;
+}
+
+/*
+ * ======== nodewrap_connect ========
+ */
+u32 nodewrap_connect(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct dsp_strmattr attrs;
+ struct dsp_strmattr *pattrs = NULL;
+ u32 cb_data_size;
+ u32 __user *psize = (u32 __user *) args->args_node_connect.conn_param;
+ u8 *pargs = NULL;
+ struct node_res_object *node_res1, *node_res2;
+ struct node_object *node1 = NULL, *node2 = NULL;
+
+ if ((int)args->args_node_connect.hnode != DSP_HGPPNODE) {
+ find_node_handle(&node_res1, pr_ctxt,
+ args->args_node_connect.hnode);
+ if (node_res1)
+ node1 = node_res1->hnode;
+ } else {
+ node1 = args->args_node_connect.hnode;
+ }
+
+ if ((int)args->args_node_connect.other_node != DSP_HGPPNODE) {
+ find_node_handle(&node_res2, pr_ctxt,
+ args->args_node_connect.other_node);
+ if (node_res2)
+ node2 = node_res2->hnode;
+ } else {
+ node2 = args->args_node_connect.other_node;
+ }
+
+ if (!node1 || !node2)
+ return -EFAULT;
+
+ /* Optional argument */
+ if (psize) {
+ if (get_user(cb_data_size, psize))
+ status = -EPERM;
+
+ cb_data_size += sizeof(u32);
+ if (!status) {
+ pargs = kmalloc(cb_data_size, GFP_KERNEL);
+ if (pargs == NULL) {
+ status = -ENOMEM;
+ goto func_cont;
+ }
+
+ }
+ CP_FM_USR(pargs, args->args_node_connect.conn_param, status,
+ cb_data_size);
+ if (status)
+ goto func_cont;
+ }
+ if (args->args_node_connect.pattrs) { /* Optional argument */
+ CP_FM_USR(&attrs, args->args_node_connect.pattrs, status, 1);
+ if (!status)
+ pattrs = &attrs;
+
+ }
+ if (!status) {
+ status = node_connect(node1,
+ args->args_node_connect.stream_id,
+ node2,
+ args->args_node_connect.other_stream,
+ pattrs, (struct dsp_cbdata *)pargs);
+ }
+func_cont:
+ kfree(pargs);
+
+ return status;
+}
+
+/*
+ * ======== nodewrap_create ========
+ */
+u32 nodewrap_create(union trapped_args *args, void *pr_ctxt)
+{
+ u32 ret;
+ struct node_res_object *node_res;
+
+ find_node_handle(&node_res, pr_ctxt, args->args_node_create.hnode);
+
+ if (!node_res)
+ return -EFAULT;
+
+ ret = node_create(node_res->hnode);
+
+ return ret;
+}
+
+/*
+ * ======== nodewrap_delete ========
+ */
+u32 nodewrap_delete(union trapped_args *args, void *pr_ctxt)
+{
+ u32 ret;
+ struct node_res_object *node_res;
+
+ find_node_handle(&node_res, pr_ctxt, args->args_node_delete.hnode);
+
+ if (!node_res)
+ return -EFAULT;
+
+ ret = node_delete(node_res, pr_ctxt);
+
+ return ret;
+}
+
+/*
+ * ======== nodewrap_free_msg_buf ========
+ */
+u32 nodewrap_free_msg_buf(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct dsp_bufferattr *pattr = NULL;
+ struct dsp_bufferattr attr;
+ struct node_res_object *node_res;
+
+ find_node_handle(&node_res, pr_ctxt, args->args_node_freemsgbuf.hnode);
+
+ if (!node_res)
+ return -EFAULT;
+
+ if (args->args_node_freemsgbuf.pattr) { /* Optional argument */
+ CP_FM_USR(&attr, args->args_node_freemsgbuf.pattr, status, 1);
+ if (!status)
+ pattr = &attr;
+
+ }
+
+ if (!args->args_node_freemsgbuf.pbuffer)
+ return -EFAULT;
+
+ if (!status) {
+ status = node_free_msg_buf(node_res->hnode,
+ args->args_node_freemsgbuf.pbuffer,
+ pattr);
+ }
+
+ return status;
+}
+
+/*
+ * ======== nodewrap_get_attr ========
+ */
+u32 nodewrap_get_attr(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct dsp_nodeattr attr;
+ struct node_res_object *node_res;
+
+ find_node_handle(&node_res, pr_ctxt, args->args_node_getattr.hnode);
+
+ if (!node_res)
+ return -EFAULT;
+
+ status = node_get_attr(node_res->hnode, &attr,
+ args->args_node_getattr.attr_size);
+ CP_TO_USR(args->args_node_getattr.pattr, &attr, status, 1);
+
+ return status;
+}
+
+/*
+ * ======== nodewrap_get_message ========
+ */
+u32 nodewrap_get_message(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+ struct dsp_msg msg;
+ struct node_res_object *node_res;
+
+ find_node_handle(&node_res, pr_ctxt, args->args_node_getmessage.hnode);
+
+ if (!node_res)
+ return -EFAULT;
+
+ status = node_get_message(node_res->hnode, &msg,
+ args->args_node_getmessage.utimeout);
+
+ CP_TO_USR(args->args_node_getmessage.message, &msg, status, 1);
+
+ return status;
+}
+
+/*
+ * ======== nodewrap_pause ========
+ */
+u32 nodewrap_pause(union trapped_args *args, void *pr_ctxt)
+{
+ u32 ret;
+ struct node_res_object *node_res;
+
+ find_node_handle(&node_res, pr_ctxt, args->args_node_pause.hnode);
+
+ if (!node_res)
+ return -EFAULT;
+
+ ret = node_pause(node_res->hnode);
+
+ return ret;
+}
+
+/*
+ * ======== nodewrap_put_message ========
+ */
+u32 nodewrap_put_message(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct dsp_msg msg;
+ struct node_res_object *node_res;
+
+ find_node_handle(&node_res, pr_ctxt, args->args_node_putmessage.hnode);
+
+ if (!node_res)
+ return -EFAULT;
+
+ CP_FM_USR(&msg, args->args_node_putmessage.message, status, 1);
+
+ if (!status) {
+ status =
+ node_put_message(node_res->hnode, &msg,
+ args->args_node_putmessage.utimeout);
+ }
+
+ return status;
+}
+
+/*
+ * ======== nodewrap_register_notify ========
+ */
+u32 nodewrap_register_notify(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct dsp_notification notification;
+ struct node_res_object *node_res;
+
+ find_node_handle(&node_res, pr_ctxt,
+ args->args_node_registernotify.hnode);
+
+ if (!node_res)
+ return -EFAULT;
+
+ /* Initialize the notification data structure */
+ notification.ps_name = NULL;
+ notification.handle = NULL;
+
+ if (!args->args_proc_register_notify.event_mask)
+ CP_FM_USR(&notification,
+ args->args_proc_register_notify.hnotification,
+ status, 1);
+
+ status = node_register_notify(node_res->hnode,
+ args->args_node_registernotify.event_mask,
+ args->args_node_registernotify.
+ notify_type, &notification);
+ CP_TO_USR(args->args_node_registernotify.hnotification, &notification,
+ status, 1);
+ return status;
+}
+
+/*
+ * ======== nodewrap_run ========
+ */
+u32 nodewrap_run(union trapped_args *args, void *pr_ctxt)
+{
+ u32 ret;
+ struct node_res_object *node_res;
+
+ find_node_handle(&node_res, pr_ctxt, args->args_node_run.hnode);
+
+ if (!node_res)
+ return -EFAULT;
+
+ ret = node_run(node_res->hnode);
+
+ return ret;
+}
+
+/*
+ * ======== nodewrap_terminate ========
+ */
+u32 nodewrap_terminate(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+ int tempstatus;
+ struct node_res_object *node_res;
+
+ find_node_handle(&node_res, pr_ctxt, args->args_node_terminate.hnode);
+
+ if (!node_res)
+ return -EFAULT;
+
+ status = node_terminate(node_res->hnode, &tempstatus);
+
+ CP_TO_USR(args->args_node_terminate.pstatus, &tempstatus, status, 1);
+
+ return status;
+}
+
+/*
+ * ======== nodewrap_get_uuid_props ========
+ */
+u32 nodewrap_get_uuid_props(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct dsp_uuid node_uuid;
+ struct dsp_ndbprops *pnode_props = NULL;
+ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+ CP_FM_USR(&node_uuid, args->args_node_getuuidprops.node_id_ptr, status,
+ 1);
+ if (status)
+ goto func_cont;
+ pnode_props = kmalloc(sizeof(struct dsp_ndbprops), GFP_KERNEL);
+ if (pnode_props != NULL) {
+ status =
+ node_get_uuid_props(hprocessor, &node_uuid, pnode_props);
+ CP_TO_USR(args->args_node_getuuidprops.node_props, pnode_props,
+ status, 1);
+ } else
+ status = -ENOMEM;
+func_cont:
+ kfree(pnode_props);
+ return status;
+}
+
+/*
+ * ======== find_strm_handle =========
+ */
+inline void find_strm_handle(struct strm_res_object **strmres,
+ void *pr_ctxt, void *hstream)
+{
+ rcu_read_lock();
+ *strmres = idr_find(((struct process_context *)pr_ctxt)->stream_id,
+ (int)hstream - 1);
+ rcu_read_unlock();
+ return;
+}
+
+/*
+ * ======== strmwrap_allocate_buffer ========
+ */
+u32 strmwrap_allocate_buffer(union trapped_args *args, void *pr_ctxt)
+{
+ int status;
+ u8 **ap_buffer = NULL;
+ u32 num_bufs = args->args_strm_allocatebuffer.num_bufs;
+ struct strm_res_object *strm_res;
+
+ find_strm_handle(&strm_res, pr_ctxt,
+ args->args_strm_allocatebuffer.hstream);
+
+ if (!strm_res)
+ return -EFAULT;
+
+ if (num_bufs > MAX_BUFS)
+ return -EINVAL;
+
+ ap_buffer = kmalloc((num_bufs * sizeof(u8 *)), GFP_KERNEL);
+ if (ap_buffer == NULL)
+ return -ENOMEM;
+
+ status = strm_allocate_buffer(strm_res,
+ args->args_strm_allocatebuffer.usize,
+ ap_buffer, num_bufs, pr_ctxt);
+ if (!status) {
+ CP_TO_USR(args->args_strm_allocatebuffer.ap_buffer, ap_buffer,
+ status, num_bufs);
+ if (status) {
+ status = -EFAULT;
+ strm_free_buffer(strm_res,
+ ap_buffer, num_bufs, pr_ctxt);
+ }
+ }
+ kfree(ap_buffer);
+
+ return status;
+}
+
+/*
+ * ======== strmwrap_close ========
+ */
+u32 strmwrap_close(union trapped_args *args, void *pr_ctxt)
+{
+ struct strm_res_object *strm_res;
+
+ find_strm_handle(&strm_res, pr_ctxt, args->args_strm_close.hstream);
+
+ if (!strm_res)
+ return -EFAULT;
+
+ return strm_close(strm_res, pr_ctxt);
+}
+
+/*
+ * ======== strmwrap_free_buffer ========
+ */
+u32 strmwrap_free_buffer(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ u8 **ap_buffer = NULL;
+ u32 num_bufs = args->args_strm_freebuffer.num_bufs;
+ struct strm_res_object *strm_res;
+
+ find_strm_handle(&strm_res, pr_ctxt,
+ args->args_strm_freebuffer.hstream);
+
+ if (!strm_res)
+ return -EFAULT;
+
+ if (num_bufs > MAX_BUFS)
+ return -EINVAL;
+
+ ap_buffer = kmalloc((num_bufs * sizeof(u8 *)), GFP_KERNEL);
+ if (ap_buffer == NULL)
+ return -ENOMEM;
+
+ CP_FM_USR(ap_buffer, args->args_strm_freebuffer.ap_buffer, status,
+ num_bufs);
+
+ if (!status)
+ status = strm_free_buffer(strm_res,
+ ap_buffer, num_bufs, pr_ctxt);
+
+ CP_TO_USR(args->args_strm_freebuffer.ap_buffer, ap_buffer, status,
+ num_bufs);
+ kfree(ap_buffer);
+
+ return status;
+}
+
+/*
+ * ======== strmwrap_get_event_handle ========
+ */
+u32 __deprecated strmwrap_get_event_handle(union trapped_args * args,
+ void *pr_ctxt)
+{
+ pr_err("%s: deprecated dspbridge ioctl\n", __func__);
+ return -ENOSYS;
+}
+
+/*
+ * ======== strmwrap_get_info ========
+ */
+u32 strmwrap_get_info(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct stream_info strm_info;
+ struct dsp_streaminfo user;
+ struct dsp_streaminfo *temp;
+ struct strm_res_object *strm_res;
+
+ find_strm_handle(&strm_res, pr_ctxt,
+ args->args_strm_getinfo.hstream);
+
+ if (!strm_res)
+ return -EFAULT;
+
+ CP_FM_USR(&strm_info, args->args_strm_getinfo.stream_info, status, 1);
+ temp = strm_info.user_strm;
+
+ strm_info.user_strm = &user;
+
+ if (!status) {
+ status = strm_get_info(strm_res->hstream,
+ &strm_info,
+ args->args_strm_getinfo.
+ stream_info_size);
+ }
+ CP_TO_USR(temp, strm_info.user_strm, status, 1);
+ strm_info.user_strm = temp;
+ CP_TO_USR(args->args_strm_getinfo.stream_info, &strm_info, status, 1);
+ return status;
+}
+
+/*
+ * ======== strmwrap_idle ========
+ */
+u32 strmwrap_idle(union trapped_args *args, void *pr_ctxt)
+{
+ u32 ret;
+ struct strm_res_object *strm_res;
+
+ find_strm_handle(&strm_res, pr_ctxt, args->args_strm_idle.hstream);
+
+ if (!strm_res)
+ return -EFAULT;
+
+ ret = strm_idle(strm_res->hstream, args->args_strm_idle.flush_flag);
+
+ return ret;
+}
+
+/*
+ * ======== strmwrap_issue ========
+ */
+u32 strmwrap_issue(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct strm_res_object *strm_res;
+
+ find_strm_handle(&strm_res, pr_ctxt, args->args_strm_issue.hstream);
+
+ if (!strm_res)
+ return -EFAULT;
+
+ if (!args->args_strm_issue.pbuffer)
+ return -EFAULT;
+
+ /* No need of doing CP_FM_USR for the user buffer (pbuffer)
+ as this is done in Bridge internal function bridge_chnl_add_io_req
+ in chnl_sm.c */
+ status = strm_issue(strm_res->hstream,
+ args->args_strm_issue.pbuffer,
+ args->args_strm_issue.dw_bytes,
+ args->args_strm_issue.dw_buf_size,
+ args->args_strm_issue.dw_arg);
+
+ return status;
+}
+
+/*
+ * ======== strmwrap_open ========
+ */
+u32 strmwrap_open(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct strm_attr attr;
+ struct strm_res_object *strm_res_obj;
+ struct dsp_streamattrin strm_attr_in;
+ struct node_res_object *node_res;
+ int strmid;
+
+ find_node_handle(&node_res, pr_ctxt, args->args_strm_open.hnode);
+
+ if (!node_res)
+ return -EFAULT;
+
+ CP_FM_USR(&attr, args->args_strm_open.attr_in, status, 1);
+
+ if (attr.stream_attr_in != NULL) { /* Optional argument */
+ CP_FM_USR(&strm_attr_in, attr.stream_attr_in, status, 1);
+ if (!status) {
+ attr.stream_attr_in = &strm_attr_in;
+ if (attr.stream_attr_in->strm_mode == STRMMODE_LDMA)
+ return -ENOSYS;
+ }
+
+ }
+ status = strm_open(node_res->hnode,
+ args->args_strm_open.direction,
+ args->args_strm_open.index, &attr, &strm_res_obj,
+ pr_ctxt);
+ if (!status) {
+ strmid = strm_res_obj->id + 1;
+ CP_TO_USR(args->args_strm_open.ph_stream, &strmid, status, 1);
+ }
+ return status;
+}
+
+/*
+ * ======== strmwrap_reclaim ========
+ */
+u32 strmwrap_reclaim(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ u8 *buf_ptr;
+ u32 ul_bytes;
+ u32 dw_arg;
+ u32 ul_buf_size;
+ struct strm_res_object *strm_res;
+
+ find_strm_handle(&strm_res, pr_ctxt, args->args_strm_reclaim.hstream);
+
+ if (!strm_res)
+ return -EFAULT;
+
+ status = strm_reclaim(strm_res->hstream, &buf_ptr,
+ &ul_bytes, &ul_buf_size, &dw_arg);
+ CP_TO_USR(args->args_strm_reclaim.buf_ptr, &buf_ptr, status, 1);
+ CP_TO_USR(args->args_strm_reclaim.bytes, &ul_bytes, status, 1);
+ CP_TO_USR(args->args_strm_reclaim.pdw_arg, &dw_arg, status, 1);
+
+ if (args->args_strm_reclaim.buf_size_ptr != NULL) {
+ CP_TO_USR(args->args_strm_reclaim.buf_size_ptr, &ul_buf_size,
+ status, 1);
+ }
+
+ return status;
+}
+
+/*
+ * ======== strmwrap_register_notify ========
+ */
+u32 strmwrap_register_notify(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct dsp_notification notification;
+ struct strm_res_object *strm_res;
+
+ find_strm_handle(&strm_res, pr_ctxt,
+ args->args_strm_registernotify.hstream);
+
+ if (!strm_res)
+ return -EFAULT;
+
+ /* Initialize the notification data structure */
+ notification.ps_name = NULL;
+ notification.handle = NULL;
+
+ status = strm_register_notify(strm_res->hstream,
+ args->args_strm_registernotify.event_mask,
+ args->args_strm_registernotify.
+ notify_type, &notification);
+ CP_TO_USR(args->args_strm_registernotify.hnotification, &notification,
+ status, 1);
+
+ return status;
+}
+
+/*
+ * ======== strmwrap_select ========
+ */
+u32 strmwrap_select(union trapped_args *args, void *pr_ctxt)
+{
+ u32 mask;
+ struct strm_object *strm_tab[MAX_STREAMS];
+ int status = 0;
+ struct strm_res_object *strm_res;
+ int *ids[MAX_STREAMS];
+ int i;
+
+ if (args->args_strm_select.strm_num > MAX_STREAMS)
+ return -EINVAL;
+
+ CP_FM_USR(ids, args->args_strm_select.stream_tab, status,
+ args->args_strm_select.strm_num);
+
+ if (status)
+ return status;
+
+ for (i = 0; i < args->args_strm_select.strm_num; i++) {
+ find_strm_handle(&strm_res, pr_ctxt, ids[i]);
+
+ if (!strm_res)
+ return -EFAULT;
+
+ strm_tab[i] = strm_res->hstream;
+ }
+
+ if (!status) {
+ status = strm_select(strm_tab, args->args_strm_select.strm_num,
+ &mask, args->args_strm_select.utimeout);
+ }
+ CP_TO_USR(args->args_strm_select.pmask, &mask, status, 1);
+ return status;
+}
+
+/* CMM */
+
+/*
+ * ======== cmmwrap_calloc_buf ========
+ */
+u32 __deprecated cmmwrap_calloc_buf(union trapped_args * args, void *pr_ctxt)
+{
+ /* This operation is done in kernel */
+ pr_err("%s: deprecated dspbridge ioctl\n", __func__);
+ return -ENOSYS;
+}
+
+/*
+ * ======== cmmwrap_free_buf ========
+ */
+u32 __deprecated cmmwrap_free_buf(union trapped_args * args, void *pr_ctxt)
+{
+ /* This operation is done in kernel */
+ pr_err("%s: deprecated dspbridge ioctl\n", __func__);
+ return -ENOSYS;
+}
+
+/*
+ * ======== cmmwrap_get_handle ========
+ */
+u32 cmmwrap_get_handle(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct cmm_object *hcmm_mgr;
+ void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+ status = cmm_get_handle(hprocessor, &hcmm_mgr);
+
+ CP_TO_USR(args->args_cmm_gethandle.ph_cmm_mgr, &hcmm_mgr, status, 1);
+
+ return status;
+}
+
+/*
+ * ======== cmmwrap_get_info ========
+ */
+u32 cmmwrap_get_info(union trapped_args *args, void *pr_ctxt)
+{
+ int status = 0;
+ struct cmm_info cmm_info_obj;
+
+ status = cmm_get_info(args->args_cmm_getinfo.hcmm_mgr, &cmm_info_obj);
+
+ CP_TO_USR(args->args_cmm_getinfo.cmm_info_obj, &cmm_info_obj, status,
+ 1);
+
+ return status;
+}
diff --git a/drivers/staging/tidspbridge/pmgr/io.c b/drivers/staging/tidspbridge/pmgr/io.c
new file mode 100644
index 00000000000..7970fe55648
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/io.c
@@ -0,0 +1,142 @@
+/*
+ * io.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * IO manager interface: Manages IO between CHNL and msg_ctrl.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+
+/* ----------------------------------- This */
+#include <ioobj.h>
+#include <dspbridge/iodefs.h>
+#include <dspbridge/io.h>
+
+/* ----------------------------------- Globals */
+static u32 refs;
+
+/*
+ * ======== io_create ========
+ * Purpose:
+ * Create an IO manager object, responsible for managing IO between
+ * CHNL and msg_ctrl
+ */
+int io_create(struct io_mgr **io_man, struct dev_object *hdev_obj,
+ const struct io_attrs *mgr_attrts)
+{
+ struct bridge_drv_interface *intf_fxns;
+ struct io_mgr *hio_mgr = NULL;
+ struct io_mgr_ *pio_mgr = NULL;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(io_man != NULL);
+ DBC_REQUIRE(mgr_attrts != NULL);
+
+ *io_man = NULL;
+
+ /* A memory base of 0 implies no memory base: */
+ if ((mgr_attrts->shm_base != 0) && (mgr_attrts->usm_length == 0))
+ status = -EINVAL;
+
+ if (mgr_attrts->word_size == 0)
+ status = -EINVAL;
+
+ if (!status) {
+ dev_get_intf_fxns(hdev_obj, &intf_fxns);
+
+ /* Let Bridge channel module finish the create: */
+ status = (*intf_fxns->pfn_io_create) (&hio_mgr, hdev_obj,
+ mgr_attrts);
+
+ if (!status) {
+ pio_mgr = (struct io_mgr_ *)hio_mgr;
+ pio_mgr->intf_fxns = intf_fxns;
+ pio_mgr->hdev_obj = hdev_obj;
+
+ /* Return the new channel manager handle: */
+ *io_man = hio_mgr;
+ }
+ }
+
+ return status;
+}
+
+/*
+ * ======== io_destroy ========
+ * Purpose:
+ * Delete IO manager.
+ */
+int io_destroy(struct io_mgr *hio_mgr)
+{
+ struct bridge_drv_interface *intf_fxns;
+ struct io_mgr_ *pio_mgr = (struct io_mgr_ *)hio_mgr;
+ int status;
+
+ DBC_REQUIRE(refs > 0);
+
+ intf_fxns = pio_mgr->intf_fxns;
+
+ /* Let Bridge channel module destroy the io_mgr: */
+ status = (*intf_fxns->pfn_io_destroy) (hio_mgr);
+
+ return status;
+}
+
+/*
+ * ======== io_exit ========
+ * Purpose:
+ * Discontinue usage of the IO module.
+ */
+void io_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== io_init ========
+ * Purpose:
+ * Initialize the IO module's private state.
+ */
+bool io_init(void)
+{
+ bool ret = true;
+
+ DBC_REQUIRE(refs >= 0);
+
+ if (ret)
+ refs++;
+
+ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+ return ret;
+}
diff --git a/drivers/staging/tidspbridge/pmgr/ioobj.h b/drivers/staging/tidspbridge/pmgr/ioobj.h
new file mode 100644
index 00000000000..f46355fa7b2
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/ioobj.h
@@ -0,0 +1,38 @@
+/*
+ * ioobj.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Structure subcomponents of channel class library IO objects which
+ * are exposed to DSP API from Bridge driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef IOOBJ_
+#define IOOBJ_
+
+#include <dspbridge/devdefs.h>
+#include <dspbridge/dspdefs.h>
+
+/*
+ * This struct is the first field in a io_mgr struct. Other, implementation
+ * specific fields follow this structure in memory.
+ */
+struct io_mgr_ {
+ /* These must be the first fields in a io_mgr struct: */
+ struct bridge_dev_context *hbridge_context; /* Bridge context. */
+ /* Function interface to Bridge driver. */
+ struct bridge_drv_interface *intf_fxns;
+ struct dev_object *hdev_obj; /* Device this board represents. */
+};
+
+#endif /* IOOBJ_ */
diff --git a/drivers/staging/tidspbridge/pmgr/msg.c b/drivers/staging/tidspbridge/pmgr/msg.c
new file mode 100644
index 00000000000..abd43659062
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/msg.c
@@ -0,0 +1,129 @@
+/*
+ * msg.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge msg_ctrl Module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- Bridge Driver */
+#include <dspbridge/dspdefs.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+
+/* ----------------------------------- This */
+#include <msgobj.h>
+#include <dspbridge/msg.h>
+
+/* ----------------------------------- Globals */
+static u32 refs; /* module reference count */
+
+/*
+ * ======== msg_create ========
+ * Purpose:
+ * Create an object to manage message queues. Only one of these objects
+ * can exist per device object.
+ */
+int msg_create(struct msg_mgr **msg_man,
+ struct dev_object *hdev_obj, msg_onexit msg_callback)
+{
+ struct bridge_drv_interface *intf_fxns;
+ struct msg_mgr_ *msg_mgr_obj;
+ struct msg_mgr *hmsg_mgr;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(msg_man != NULL);
+ DBC_REQUIRE(msg_callback != NULL);
+ DBC_REQUIRE(hdev_obj != NULL);
+
+ *msg_man = NULL;
+
+ dev_get_intf_fxns(hdev_obj, &intf_fxns);
+
+ /* Let Bridge message module finish the create: */
+ status =
+ (*intf_fxns->pfn_msg_create) (&hmsg_mgr, hdev_obj, msg_callback);
+
+ if (!status) {
+ /* Fill in DSP API message module's fields of the msg_mgr
+ * structure */
+ msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr;
+ msg_mgr_obj->intf_fxns = intf_fxns;
+
+ /* Finally, return the new message manager handle: */
+ *msg_man = hmsg_mgr;
+ } else {
+ status = -EPERM;
+ }
+ return status;
+}
+
+/*
+ * ======== msg_delete ========
+ * Purpose:
+ * Delete a msg_ctrl manager allocated in msg_create().
+ */
+void msg_delete(struct msg_mgr *hmsg_mgr)
+{
+ struct msg_mgr_ *msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr;
+ struct bridge_drv_interface *intf_fxns;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (msg_mgr_obj) {
+ intf_fxns = msg_mgr_obj->intf_fxns;
+
+ /* Let Bridge message module destroy the msg_mgr: */
+ (*intf_fxns->pfn_msg_delete) (hmsg_mgr);
+ } else {
+ dev_dbg(bridge, "%s: Error hmsg_mgr handle: %p\n",
+ __func__, hmsg_mgr);
+ }
+}
+
+/*
+ * ======== msg_exit ========
+ */
+void msg_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+ refs--;
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== msg_mod_init ========
+ */
+bool msg_mod_init(void)
+{
+ DBC_REQUIRE(refs >= 0);
+
+ refs++;
+
+ DBC_ENSURE(refs >= 0);
+
+ return true;
+}
diff --git a/drivers/staging/tidspbridge/pmgr/msgobj.h b/drivers/staging/tidspbridge/pmgr/msgobj.h
new file mode 100644
index 00000000000..14ca633c56c
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/msgobj.h
@@ -0,0 +1,38 @@
+/*
+ * msgobj.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Structure subcomponents of channel class library msg_ctrl objects which
+ * are exposed to DSP API from Bridge driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef MSGOBJ_
+#define MSGOBJ_
+
+#include <dspbridge/dspdefs.h>
+
+#include <dspbridge/msgdefs.h>
+
+/*
+ * This struct is the first field in a msg_mgr struct. Other, implementation
+ * specific fields follow this structure in memory.
+ */
+struct msg_mgr_ {
+ /* The first field must match that in _msg_sm.h */
+
+ /* Function interface to Bridge driver. */
+ struct bridge_drv_interface *intf_fxns;
+};
+
+#endif /* MSGOBJ_ */
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
new file mode 100644
index 00000000000..f71e8606f95
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -0,0 +1,1512 @@
+/*
+ * dbdcd.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * This file contains the implementation of the DSP/BIOS Bridge
+ * Configuration Database (DCD).
+ *
+ * Notes:
+ * The fxn dcd_get_objects can apply a callback fxn to each DCD object
+ * that is located in a specified COFF file. At the moment,
+ * dcd_auto_register, dcd_auto_unregister, and NLDR module all use
+ * dcd_get_objects.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/cod.h>
+
+/* ----------------------------------- Others */
+#include <dspbridge/uuidutil.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/dbdcd.h>
+
+/* ----------------------------------- Global defines. */
+#define MAX_INT2CHAR_LENGTH 16 /* Max int2char len of 32 bit int */
+
+/* Name of section containing dependent libraries */
+#define DEPLIBSECT ".dspbridge_deplibs"
+
+/* DCD specific structures. */
+struct dcd_manager {
+ struct cod_manager *cod_mgr; /* Handle to COD manager object. */
+};
+
+/* Pointer to the registry support key */
+static struct list_head reg_key_list;
+static DEFINE_SPINLOCK(dbdcd_lock);
+
+/* Global reference variables. */
+static u32 refs;
+static u32 enum_refs;
+
+/* Helper function prototypes. */
+static s32 atoi(char *psz_buf);
+static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
+ enum dsp_dcdobjtype obj_type,
+ struct dcd_genericobj *gen_obj);
+static void compress_buf(char *psz_buf, u32 ul_buf_size, s32 char_size);
+static char dsp_char2_gpp_char(char *word, s32 dsp_char_size);
+static int get_dep_lib_info(struct dcd_manager *hdcd_mgr,
+ struct dsp_uuid *uuid_obj,
+ u16 *num_libs,
+ u16 *num_pers_libs,
+ struct dsp_uuid *dep_lib_uuids,
+ bool *prstnt_dep_libs,
+ enum nldr_phase phase);
+
+/*
+ * ======== dcd_auto_register ========
+ * Purpose:
+ * Parses the supplied image and resigsters with DCD.
+ */
+int dcd_auto_register(struct dcd_manager *hdcd_mgr,
+ char *sz_coff_path)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (hdcd_mgr)
+ status = dcd_get_objects(hdcd_mgr, sz_coff_path,
+ (dcd_registerfxn) dcd_register_object,
+ (void *)sz_coff_path);
+ else
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== dcd_auto_unregister ========
+ * Purpose:
+ * Parses the supplied DSP image and unresiters from DCD.
+ */
+int dcd_auto_unregister(struct dcd_manager *hdcd_mgr,
+ char *sz_coff_path)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (hdcd_mgr)
+ status = dcd_get_objects(hdcd_mgr, sz_coff_path,
+ (dcd_registerfxn) dcd_register_object,
+ NULL);
+ else
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== dcd_create_manager ========
+ * Purpose:
+ * Creates DCD manager.
+ */
+int dcd_create_manager(char *sz_zl_dll_name,
+ struct dcd_manager **dcd_mgr)
+{
+ struct cod_manager *cod_mgr; /* COD manager handle */
+ struct dcd_manager *dcd_mgr_obj = NULL; /* DCD Manager pointer */
+ int status = 0;
+
+ DBC_REQUIRE(refs >= 0);
+ DBC_REQUIRE(dcd_mgr);
+
+ status = cod_create(&cod_mgr, sz_zl_dll_name, NULL);
+ if (status)
+ goto func_end;
+
+ /* Create a DCD object. */
+ dcd_mgr_obj = kzalloc(sizeof(struct dcd_manager), GFP_KERNEL);
+ if (dcd_mgr_obj != NULL) {
+ /* Fill out the object. */
+ dcd_mgr_obj->cod_mgr = cod_mgr;
+
+ /* Return handle to this DCD interface. */
+ *dcd_mgr = dcd_mgr_obj;
+ } else {
+ status = -ENOMEM;
+
+ /*
+ * If allocation of DcdManager object failed, delete the
+ * COD manager.
+ */
+ cod_delete(cod_mgr);
+ }
+
+ DBC_ENSURE((!status) ||
+ ((dcd_mgr_obj == NULL) && (status == -ENOMEM)));
+
+func_end:
+ return status;
+}
+
+/*
+ * ======== dcd_destroy_manager ========
+ * Purpose:
+ * Frees DCD Manager object.
+ */
+int dcd_destroy_manager(struct dcd_manager *hdcd_mgr)
+{
+ struct dcd_manager *dcd_mgr_obj = hdcd_mgr;
+ int status = -EFAULT;
+
+ DBC_REQUIRE(refs >= 0);
+
+ if (hdcd_mgr) {
+ /* Delete the COD manager. */
+ cod_delete(dcd_mgr_obj->cod_mgr);
+
+ /* Deallocate a DCD manager object. */
+ kfree(dcd_mgr_obj);
+
+ status = 0;
+ }
+
+ return status;
+}
+
+/*
+ * ======== dcd_enumerate_object ========
+ * Purpose:
+ * Enumerates objects in the DCD.
+ */
+int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
+ struct dsp_uuid *uuid_obj)
+{
+ int status = 0;
+ char sz_reg_key[DCD_MAXPATHLENGTH];
+ char sz_value[DCD_MAXPATHLENGTH];
+ struct dsp_uuid dsp_uuid_obj;
+ char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
+ u32 dw_key_len = 0;
+ struct dcd_key_elem *dcd_key;
+ int len;
+
+ DBC_REQUIRE(refs >= 0);
+ DBC_REQUIRE(index >= 0);
+ DBC_REQUIRE(uuid_obj != NULL);
+
+ if ((index != 0) && (enum_refs == 0)) {
+ /*
+ * If an enumeration is being performed on an index greater
+ * than zero, then the current enum_refs must have been
+ * incremented to greater than zero.
+ */
+ status = -EIDRM;
+ } else {
+ /*
+ * Pre-determine final key length. It's length of DCD_REGKEY +
+ * "_\0" + length of sz_obj_type string + terminating NULL.
+ */
+ dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
+ DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
+
+ /* Create proper REG key; concatenate DCD_REGKEY with
+ * obj_type. */
+ strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
+ if ((strlen(sz_reg_key) + strlen("_\0")) <
+ DCD_MAXPATHLENGTH) {
+ strncat(sz_reg_key, "_\0", 2);
+ } else {
+ status = -EPERM;
+ }
+
+ /* This snprintf is guaranteed not to exceed max size of an
+ * integer. */
+ status = snprintf(sz_obj_type, MAX_INT2CHAR_LENGTH, "%d",
+ obj_type);
+
+ if (status == -1) {
+ status = -EPERM;
+ } else {
+ status = 0;
+ if ((strlen(sz_reg_key) + strlen(sz_obj_type)) <
+ DCD_MAXPATHLENGTH) {
+ strncat(sz_reg_key, sz_obj_type,
+ strlen(sz_obj_type) + 1);
+ } else {
+ status = -EPERM;
+ }
+ }
+
+ if (!status) {
+ len = strlen(sz_reg_key);
+ spin_lock(&dbdcd_lock);
+ list_for_each_entry(dcd_key, &reg_key_list, link) {
+ if (!strncmp(dcd_key->name, sz_reg_key, len)
+ && !index--) {
+ strncpy(sz_value, &dcd_key->name[len],
+ strlen(&dcd_key->name[len]) + 1);
+ break;
+ }
+ }
+ spin_unlock(&dbdcd_lock);
+
+ if (&dcd_key->link == &reg_key_list)
+ status = -ENODATA;
+ }
+
+ if (!status) {
+ /* Create UUID value using string retrieved from
+ * registry. */
+ uuid_uuid_from_string(sz_value, &dsp_uuid_obj);
+
+ *uuid_obj = dsp_uuid_obj;
+
+ /* Increment enum_refs to update reference count. */
+ enum_refs++;
+
+ status = 0;
+ } else if (status == -ENODATA) {
+ /* At the end of enumeration. Reset enum_refs. */
+ enum_refs = 0;
+
+ /*
+ * TODO: Revisit, this is not an errror case but code
+ * expects non-zero value.
+ */
+ status = ENODATA;
+ } else {
+ status = -EPERM;
+ }
+ }
+
+ DBC_ENSURE(uuid_obj || (status == -EPERM));
+
+ return status;
+}
+
+/*
+ * ======== dcd_exit ========
+ * Purpose:
+ * Discontinue usage of the DCD module.
+ */
+void dcd_exit(void)
+{
+ struct dcd_key_elem *rv, *rv_tmp;
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+ if (refs == 0) {
+ cod_exit();
+ list_for_each_entry_safe(rv, rv_tmp, &reg_key_list, link) {
+ list_del(&rv->link);
+ kfree(rv->path);
+ kfree(rv);
+ }
+ }
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== dcd_get_dep_libs ========
+ */
+int dcd_get_dep_libs(struct dcd_manager *hdcd_mgr,
+ struct dsp_uuid *uuid_obj,
+ u16 num_libs, struct dsp_uuid *dep_lib_uuids,
+ bool *prstnt_dep_libs,
+ enum nldr_phase phase)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hdcd_mgr);
+ DBC_REQUIRE(uuid_obj != NULL);
+ DBC_REQUIRE(dep_lib_uuids != NULL);
+ DBC_REQUIRE(prstnt_dep_libs != NULL);
+
+ status =
+ get_dep_lib_info(hdcd_mgr, uuid_obj, &num_libs, NULL, dep_lib_uuids,
+ prstnt_dep_libs, phase);
+
+ return status;
+}
+
+/*
+ * ======== dcd_get_num_dep_libs ========
+ */
+int dcd_get_num_dep_libs(struct dcd_manager *hdcd_mgr,
+ struct dsp_uuid *uuid_obj,
+ u16 *num_libs, u16 *num_pers_libs,
+ enum nldr_phase phase)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hdcd_mgr);
+ DBC_REQUIRE(num_libs != NULL);
+ DBC_REQUIRE(num_pers_libs != NULL);
+ DBC_REQUIRE(uuid_obj != NULL);
+
+ status = get_dep_lib_info(hdcd_mgr, uuid_obj, num_libs, num_pers_libs,
+ NULL, NULL, phase);
+
+ return status;
+}
+
+/*
+ * ======== dcd_get_object_def ========
+ * Purpose:
+ * Retrieves the properties of a node or processor based on the UUID and
+ * object type.
+ */
+int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
+ struct dsp_uuid *obj_uuid,
+ enum dsp_dcdobjtype obj_type,
+ struct dcd_genericobj *obj_def)
+{
+ struct dcd_manager *dcd_mgr_obj = hdcd_mgr; /* ptr to DCD mgr */
+ struct cod_libraryobj *lib = NULL;
+ int status = 0;
+ u32 ul_addr = 0; /* Used by cod_get_section */
+ u32 ul_len = 0; /* Used by cod_get_section */
+ u32 dw_buf_size; /* Used by REG functions */
+ char sz_reg_key[DCD_MAXPATHLENGTH];
+ char *sz_uuid; /*[MAXUUIDLEN]; */
+ struct dcd_key_elem *dcd_key = NULL;
+ char sz_sect_name[MAXUUIDLEN + 2]; /* ".[UUID]\0" */
+ char *psz_coff_buf;
+ u32 dw_key_len; /* Len of REG key. */
+ char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(obj_def != NULL);
+ DBC_REQUIRE(obj_uuid != NULL);
+
+ sz_uuid = kzalloc(MAXUUIDLEN, GFP_KERNEL);
+ if (!sz_uuid) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ if (!hdcd_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ /* Pre-determine final key length. It's length of DCD_REGKEY +
+ * "_\0" + length of sz_obj_type string + terminating NULL */
+ dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
+ DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
+
+ /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
+ strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
+
+ if ((strlen(sz_reg_key) + strlen("_\0")) < DCD_MAXPATHLENGTH)
+ strncat(sz_reg_key, "_\0", 2);
+ else
+ status = -EPERM;
+
+ status = snprintf(sz_obj_type, MAX_INT2CHAR_LENGTH, "%d", obj_type);
+ if (status == -1) {
+ status = -EPERM;
+ } else {
+ status = 0;
+
+ if ((strlen(sz_reg_key) + strlen(sz_obj_type)) <
+ DCD_MAXPATHLENGTH) {
+ strncat(sz_reg_key, sz_obj_type,
+ strlen(sz_obj_type) + 1);
+ } else {
+ status = -EPERM;
+ }
+
+ /* Create UUID value to set in registry. */
+ uuid_uuid_to_string(obj_uuid, sz_uuid, MAXUUIDLEN);
+
+ if ((strlen(sz_reg_key) + MAXUUIDLEN) < DCD_MAXPATHLENGTH)
+ strncat(sz_reg_key, sz_uuid, MAXUUIDLEN);
+ else
+ status = -EPERM;
+
+ /* Retrieve paths from the registry based on struct dsp_uuid */
+ dw_buf_size = DCD_MAXPATHLENGTH;
+ }
+ if (!status) {
+ spin_lock(&dbdcd_lock);
+ list_for_each_entry(dcd_key, &reg_key_list, link) {
+ if (!strncmp(dcd_key->name, sz_reg_key,
+ strlen(sz_reg_key) + 1))
+ break;
+ }
+ spin_unlock(&dbdcd_lock);
+ if (&dcd_key->link == &reg_key_list) {
+ status = -ENOKEY;
+ goto func_end;
+ }
+ }
+
+
+ /* Open COFF file. */
+ status = cod_open(dcd_mgr_obj->cod_mgr, dcd_key->path,
+ COD_NOLOAD, &lib);
+ if (status) {
+ status = -EACCES;
+ goto func_end;
+ }
+
+ /* Ensure sz_uuid + 1 is not greater than sizeof sz_sect_name. */
+ DBC_ASSERT((strlen(sz_uuid) + 1) < sizeof(sz_sect_name));
+
+ /* Create section name based on node UUID. A period is
+ * pre-pended to the UUID string to form the section name.
+ * I.e. ".24BC8D90_BB45_11d4_B756_006008BDB66F" */
+ strncpy(sz_sect_name, ".", 2);
+ strncat(sz_sect_name, sz_uuid, strlen(sz_uuid));
+
+ /* Get section information. */
+ status = cod_get_section(lib, sz_sect_name, &ul_addr, &ul_len);
+ if (status) {
+ status = -EACCES;
+ goto func_end;
+ }
+
+ /* Allocate zeroed buffer. */
+ psz_coff_buf = kzalloc(ul_len + 4, GFP_KERNEL);
+#ifdef _DB_TIOMAP
+ if (strstr(dcd_key->path, "iva") == NULL) {
+ /* Locate section by objectID and read its content. */
+ status =
+ cod_read_section(lib, sz_sect_name, psz_coff_buf, ul_len);
+ } else {
+ status =
+ cod_read_section(lib, sz_sect_name, psz_coff_buf, ul_len);
+ dev_dbg(bridge, "%s: Skipped Byte swap for IVA!!\n", __func__);
+ }
+#else
+ status = cod_read_section(lib, sz_sect_name, psz_coff_buf, ul_len);
+#endif
+ if (!status) {
+ /* Compres DSP buffer to conform to PC format. */
+ if (strstr(dcd_key->path, "iva") == NULL) {
+ compress_buf(psz_coff_buf, ul_len, DSPWORDSIZE);
+ } else {
+ compress_buf(psz_coff_buf, ul_len, 1);
+ dev_dbg(bridge, "%s: Compressing IVA COFF buffer by 1 "
+ "for IVA!!\n", __func__);
+ }
+
+ /* Parse the content of the COFF buffer. */
+ status =
+ get_attrs_from_buf(psz_coff_buf, ul_len, obj_type, obj_def);
+ if (status)
+ status = -EACCES;
+ } else {
+ status = -EACCES;
+ }
+
+ /* Free the previously allocated dynamic buffer. */
+ kfree(psz_coff_buf);
+func_end:
+ if (lib)
+ cod_close(lib);
+
+ kfree(sz_uuid);
+
+ return status;
+}
+
+/*
+ * ======== dcd_get_objects ========
+ */
+int dcd_get_objects(struct dcd_manager *hdcd_mgr,
+ char *sz_coff_path, dcd_registerfxn register_fxn,
+ void *handle)
+{
+ struct dcd_manager *dcd_mgr_obj = hdcd_mgr;
+ int status = 0;
+ char *psz_coff_buf;
+ char *psz_cur;
+ struct cod_libraryobj *lib = NULL;
+ u32 ul_addr = 0; /* Used by cod_get_section */
+ u32 ul_len = 0; /* Used by cod_get_section */
+ char seps[] = ":, ";
+ char *token = NULL;
+ struct dsp_uuid dsp_uuid_obj;
+ s32 object_type;
+
+ DBC_REQUIRE(refs > 0);
+ if (!hdcd_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ /* Open DSP coff file, don't load symbols. */
+ status = cod_open(dcd_mgr_obj->cod_mgr, sz_coff_path, COD_NOLOAD, &lib);
+ if (status) {
+ status = -EACCES;
+ goto func_cont;
+ }
+
+ /* Get DCD_RESIGER_SECTION section information. */
+ status = cod_get_section(lib, DCD_REGISTER_SECTION, &ul_addr, &ul_len);
+ if (status || !(ul_len > 0)) {
+ status = -EACCES;
+ goto func_cont;
+ }
+
+ /* Allocate zeroed buffer. */
+ psz_coff_buf = kzalloc(ul_len + 4, GFP_KERNEL);
+#ifdef _DB_TIOMAP
+ if (strstr(sz_coff_path, "iva") == NULL) {
+ /* Locate section by objectID and read its content. */
+ status = cod_read_section(lib, DCD_REGISTER_SECTION,
+ psz_coff_buf, ul_len);
+ } else {
+ dev_dbg(bridge, "%s: Skipped Byte swap for IVA!!\n", __func__);
+ status = cod_read_section(lib, DCD_REGISTER_SECTION,
+ psz_coff_buf, ul_len);
+ }
+#else
+ status =
+ cod_read_section(lib, DCD_REGISTER_SECTION, psz_coff_buf, ul_len);
+#endif
+ if (!status) {
+ /* Compress DSP buffer to conform to PC format. */
+ if (strstr(sz_coff_path, "iva") == NULL) {
+ compress_buf(psz_coff_buf, ul_len, DSPWORDSIZE);
+ } else {
+ compress_buf(psz_coff_buf, ul_len, 1);
+ dev_dbg(bridge, "%s: Compress COFF buffer with 1 word "
+ "for IVA!!\n", __func__);
+ }
+
+ /* Read from buffer and register object in buffer. */
+ psz_cur = psz_coff_buf;
+ while ((token = strsep(&psz_cur, seps)) && *token != '\0') {
+ /* Retrieve UUID string. */
+ uuid_uuid_from_string(token, &dsp_uuid_obj);
+
+ /* Retrieve object type */
+ token = strsep(&psz_cur, seps);
+
+ /* Retrieve object type */
+ object_type = atoi(token);
+
+ /*
+ * Apply register_fxn to the found DCD object.
+ * Possible actions include:
+ *
+ * 1) Register found DCD object.
+ * 2) Unregister found DCD object (when handle == NULL)
+ * 3) Add overlay node.
+ */
+ status =
+ register_fxn(&dsp_uuid_obj, object_type, handle);
+ if (status) {
+ /* if error occurs, break from while loop. */
+ break;
+ }
+ }
+ } else {
+ status = -EACCES;
+ }
+
+ /* Free the previously allocated dynamic buffer. */
+ kfree(psz_coff_buf);
+func_cont:
+ if (lib)
+ cod_close(lib);
+
+func_end:
+ return status;
+}
+
+/*
+ * ======== dcd_get_library_name ========
+ * Purpose:
+ * Retrieves the library name for the given UUID.
+ *
+ */
+int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
+ struct dsp_uuid *uuid_obj,
+ char *str_lib_name,
+ u32 *buff_size,
+ enum nldr_phase phase, bool *phase_split)
+{
+ char sz_reg_key[DCD_MAXPATHLENGTH];
+ char sz_uuid[MAXUUIDLEN];
+ u32 dw_key_len; /* Len of REG key. */
+ char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
+ int status = 0;
+ struct dcd_key_elem *dcd_key = NULL;
+
+ DBC_REQUIRE(uuid_obj != NULL);
+ DBC_REQUIRE(str_lib_name != NULL);
+ DBC_REQUIRE(buff_size != NULL);
+ DBC_REQUIRE(hdcd_mgr);
+
+ dev_dbg(bridge, "%s: hdcd_mgr %p, uuid_obj %p, str_lib_name %p,"
+ " buff_size %p\n", __func__, hdcd_mgr, uuid_obj, str_lib_name,
+ buff_size);
+
+ /*
+ * Pre-determine final key length. It's length of DCD_REGKEY +
+ * "_\0" + length of sz_obj_type string + terminating NULL.
+ */
+ dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
+ DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
+
+ /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
+ strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
+ if ((strlen(sz_reg_key) + strlen("_\0")) < DCD_MAXPATHLENGTH)
+ strncat(sz_reg_key, "_\0", 2);
+ else
+ status = -EPERM;
+
+ switch (phase) {
+ case NLDR_CREATE:
+ /* create phase type */
+ sprintf(sz_obj_type, "%d", DSP_DCDCREATELIBTYPE);
+ break;
+ case NLDR_EXECUTE:
+ /* execute phase type */
+ sprintf(sz_obj_type, "%d", DSP_DCDEXECUTELIBTYPE);
+ break;
+ case NLDR_DELETE:
+ /* delete phase type */
+ sprintf(sz_obj_type, "%d", DSP_DCDDELETELIBTYPE);
+ break;
+ case NLDR_NOPHASE:
+ /* known to be a dependent library */
+ sprintf(sz_obj_type, "%d", DSP_DCDLIBRARYTYPE);
+ break;
+ default:
+ status = -EINVAL;
+ DBC_ASSERT(false);
+ }
+ if (!status) {
+ if ((strlen(sz_reg_key) + strlen(sz_obj_type)) <
+ DCD_MAXPATHLENGTH) {
+ strncat(sz_reg_key, sz_obj_type,
+ strlen(sz_obj_type) + 1);
+ } else {
+ status = -EPERM;
+ }
+ /* Create UUID value to find match in registry. */
+ uuid_uuid_to_string(uuid_obj, sz_uuid, MAXUUIDLEN);
+ if ((strlen(sz_reg_key) + MAXUUIDLEN) < DCD_MAXPATHLENGTH)
+ strncat(sz_reg_key, sz_uuid, MAXUUIDLEN);
+ else
+ status = -EPERM;
+ }
+ if (!status) {
+ spin_lock(&dbdcd_lock);
+ list_for_each_entry(dcd_key, &reg_key_list, link) {
+ /* See if the name matches. */
+ if (!strncmp(dcd_key->name, sz_reg_key,
+ strlen(sz_reg_key) + 1))
+ break;
+ }
+ spin_unlock(&dbdcd_lock);
+ }
+
+ if (&dcd_key->link == &reg_key_list)
+ status = -ENOKEY;
+
+ /* If can't find, phases might be registered as generic LIBRARYTYPE */
+ if (status && phase != NLDR_NOPHASE) {
+ if (phase_split)
+ *phase_split = false;
+
+ strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
+ if ((strlen(sz_reg_key) + strlen("_\0")) <
+ DCD_MAXPATHLENGTH) {
+ strncat(sz_reg_key, "_\0", 2);
+ } else {
+ status = -EPERM;
+ }
+ sprintf(sz_obj_type, "%d", DSP_DCDLIBRARYTYPE);
+ if ((strlen(sz_reg_key) + strlen(sz_obj_type))
+ < DCD_MAXPATHLENGTH) {
+ strncat(sz_reg_key, sz_obj_type,
+ strlen(sz_obj_type) + 1);
+ } else {
+ status = -EPERM;
+ }
+ uuid_uuid_to_string(uuid_obj, sz_uuid, MAXUUIDLEN);
+ if ((strlen(sz_reg_key) + MAXUUIDLEN) < DCD_MAXPATHLENGTH)
+ strncat(sz_reg_key, sz_uuid, MAXUUIDLEN);
+ else
+ status = -EPERM;
+
+ spin_lock(&dbdcd_lock);
+ list_for_each_entry(dcd_key, &reg_key_list, link) {
+ /* See if the name matches. */
+ if (!strncmp(dcd_key->name, sz_reg_key,
+ strlen(sz_reg_key) + 1))
+ break;
+ }
+ spin_unlock(&dbdcd_lock);
+
+ status = (&dcd_key->link != &reg_key_list) ?
+ 0 : -ENOKEY;
+ }
+
+ if (!status)
+ memcpy(str_lib_name, dcd_key->path, strlen(dcd_key->path) + 1);
+ return status;
+}
+
+/*
+ * ======== dcd_init ========
+ * Purpose:
+ * Initialize the DCD module.
+ */
+bool dcd_init(void)
+{
+ bool init_cod;
+ bool ret = true;
+
+ DBC_REQUIRE(refs >= 0);
+
+ if (refs == 0) {
+ /* Initialize required modules. */
+ init_cod = cod_init();
+
+ if (!init_cod) {
+ ret = false;
+ /* Exit initialized modules. */
+ if (init_cod)
+ cod_exit();
+ }
+
+ INIT_LIST_HEAD(&reg_key_list);
+ }
+
+ if (ret)
+ refs++;
+
+ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs == 0)));
+
+ return ret;
+}
+
+/*
+ * ======== dcd_register_object ========
+ * Purpose:
+ * Registers a node or a processor with the DCD.
+ * If psz_path_name == NULL, unregister the specified DCD object.
+ */
+int dcd_register_object(struct dsp_uuid *uuid_obj,
+ enum dsp_dcdobjtype obj_type,
+ char *psz_path_name)
+{
+ int status = 0;
+ char sz_reg_key[DCD_MAXPATHLENGTH];
+ char sz_uuid[MAXUUIDLEN + 1];
+ u32 dw_path_size = 0;
+ u32 dw_key_len; /* Len of REG key. */
+ char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
+ struct dcd_key_elem *dcd_key = NULL;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(uuid_obj != NULL);
+ DBC_REQUIRE((obj_type == DSP_DCDNODETYPE) ||
+ (obj_type == DSP_DCDPROCESSORTYPE) ||
+ (obj_type == DSP_DCDLIBRARYTYPE) ||
+ (obj_type == DSP_DCDCREATELIBTYPE) ||
+ (obj_type == DSP_DCDEXECUTELIBTYPE) ||
+ (obj_type == DSP_DCDDELETELIBTYPE));
+
+ dev_dbg(bridge, "%s: object UUID %p, obj_type %d, szPathName %s\n",
+ __func__, uuid_obj, obj_type, psz_path_name);
+
+ /*
+ * Pre-determine final key length. It's length of DCD_REGKEY +
+ * "_\0" + length of sz_obj_type string + terminating NULL.
+ */
+ dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
+ DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
+
+ /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
+ strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
+ if ((strlen(sz_reg_key) + strlen("_\0")) < DCD_MAXPATHLENGTH)
+ strncat(sz_reg_key, "_\0", 2);
+ else {
+ status = -EPERM;
+ goto func_end;
+ }
+
+ status = snprintf(sz_obj_type, MAX_INT2CHAR_LENGTH, "%d", obj_type);
+ if (status == -1) {
+ status = -EPERM;
+ } else {
+ status = 0;
+ if ((strlen(sz_reg_key) + strlen(sz_obj_type)) <
+ DCD_MAXPATHLENGTH) {
+ strncat(sz_reg_key, sz_obj_type,
+ strlen(sz_obj_type) + 1);
+ } else
+ status = -EPERM;
+
+ /* Create UUID value to set in registry. */
+ uuid_uuid_to_string(uuid_obj, sz_uuid, MAXUUIDLEN);
+ if ((strlen(sz_reg_key) + MAXUUIDLEN) < DCD_MAXPATHLENGTH)
+ strncat(sz_reg_key, sz_uuid, MAXUUIDLEN);
+ else
+ status = -EPERM;
+ }
+
+ if (status)
+ goto func_end;
+
+ /*
+ * If psz_path_name != NULL, perform registration, otherwise,
+ * perform unregistration.
+ */
+
+ if (psz_path_name) {
+ dw_path_size = strlen(psz_path_name) + 1;
+ spin_lock(&dbdcd_lock);
+ list_for_each_entry(dcd_key, &reg_key_list, link) {
+ /* See if the name matches. */
+ if (!strncmp(dcd_key->name, sz_reg_key,
+ strlen(sz_reg_key) + 1))
+ break;
+ }
+ spin_unlock(&dbdcd_lock);
+ if (&dcd_key->link == &reg_key_list) {
+ /*
+ * Add new reg value (UUID+obj_type)
+ * with COFF path info
+ */
+
+ dcd_key = kmalloc(sizeof(struct dcd_key_elem),
+ GFP_KERNEL);
+ if (!dcd_key) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ dcd_key->path = kmalloc(strlen(sz_reg_key) + 1,
+ GFP_KERNEL);
+
+ if (!dcd_key->path) {
+ kfree(dcd_key);
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ strncpy(dcd_key->name, sz_reg_key,
+ strlen(sz_reg_key) + 1);
+ strncpy(dcd_key->path, psz_path_name ,
+ dw_path_size);
+ spin_lock(&dbdcd_lock);
+ list_add_tail(&dcd_key->link, &reg_key_list);
+ spin_unlock(&dbdcd_lock);
+ } else {
+ /* Make sure the new data is the same. */
+ if (strncmp(dcd_key->path, psz_path_name,
+ dw_path_size)) {
+ /* The caller needs a different data size! */
+ kfree(dcd_key->path);
+ dcd_key->path = kmalloc(dw_path_size,
+ GFP_KERNEL);
+ if (dcd_key->path == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ }
+
+ /* We have a match! Copy out the data. */
+ memcpy(dcd_key->path, psz_path_name, dw_path_size);
+ }
+ dev_dbg(bridge, "%s: psz_path_name=%s, dw_path_size=%d\n",
+ __func__, psz_path_name, dw_path_size);
+ } else {
+ /* Deregister an existing object */
+ spin_lock(&dbdcd_lock);
+ list_for_each_entry(dcd_key, &reg_key_list, link) {
+ if (!strncmp(dcd_key->name, sz_reg_key,
+ strlen(sz_reg_key) + 1)) {
+ list_del(&dcd_key->link);
+ kfree(dcd_key->path);
+ kfree(dcd_key);
+ break;
+ }
+ }
+ spin_unlock(&dbdcd_lock);
+ if (&dcd_key->link == &reg_key_list)
+ status = -EPERM;
+ }
+
+ if (!status) {
+ /*
+ * Because the node database has been updated through a
+ * successful object registration/de-registration operation,
+ * we need to reset the object enumeration counter to allow
+ * current enumerations to reflect this update in the node
+ * database.
+ */
+ enum_refs = 0;
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== dcd_unregister_object ========
+ * Call DCD_Register object with psz_path_name set to NULL to
+ * perform actual object de-registration.
+ */
+int dcd_unregister_object(struct dsp_uuid *uuid_obj,
+ enum dsp_dcdobjtype obj_type)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(uuid_obj != NULL);
+ DBC_REQUIRE((obj_type == DSP_DCDNODETYPE) ||
+ (obj_type == DSP_DCDPROCESSORTYPE) ||
+ (obj_type == DSP_DCDLIBRARYTYPE) ||
+ (obj_type == DSP_DCDCREATELIBTYPE) ||
+ (obj_type == DSP_DCDEXECUTELIBTYPE) ||
+ (obj_type == DSP_DCDDELETELIBTYPE));
+
+ /*
+ * When dcd_register_object is called with NULL as pathname,
+ * it indicates an unregister object operation.
+ */
+ status = dcd_register_object(uuid_obj, obj_type, NULL);
+
+ return status;
+}
+
+/*
+ **********************************************************************
+ * DCD Helper Functions
+ **********************************************************************
+ */
+
+/*
+ * ======== atoi ========
+ * Purpose:
+ * This function converts strings in decimal or hex format to integers.
+ */
+static s32 atoi(char *psz_buf)
+{
+ char *pch = psz_buf;
+ s32 base = 0;
+ unsigned long res;
+ int ret_val;
+
+ while (isspace(*pch))
+ pch++;
+
+ if (*pch == '-' || *pch == '+') {
+ base = 10;
+ pch++;
+ } else if (*pch && tolower(pch[strlen(pch) - 1]) == 'h') {
+ base = 16;
+ }
+
+ ret_val = strict_strtoul(pch, base, &res);
+
+ return ret_val ? : res;
+}
+
+/*
+ * ======== get_attrs_from_buf ========
+ * Purpose:
+ * Parse the content of a buffer filled with DSP-side data and
+ * retrieve an object's attributes from it. IMPORTANT: Assume the
+ * buffer has been converted from DSP format to GPP format.
+ */
+static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
+ enum dsp_dcdobjtype obj_type,
+ struct dcd_genericobj *gen_obj)
+{
+ int status = 0;
+ char seps[] = ", ";
+ char *psz_cur;
+ char *token;
+ s32 token_len = 0;
+ u32 i = 0;
+#ifdef _DB_TIOMAP
+ s32 entry_id;
+#endif
+
+ DBC_REQUIRE(psz_buf != NULL);
+ DBC_REQUIRE(ul_buf_size != 0);
+ DBC_REQUIRE((obj_type == DSP_DCDNODETYPE)
+ || (obj_type == DSP_DCDPROCESSORTYPE));
+ DBC_REQUIRE(gen_obj != NULL);
+
+ switch (obj_type) {
+ case DSP_DCDNODETYPE:
+ /*
+ * Parse COFF sect buffer to retrieve individual tokens used
+ * to fill in object attrs.
+ */
+ psz_cur = psz_buf;
+ token = strsep(&psz_cur, seps);
+
+ /* u32 cb_struct */
+ gen_obj->obj_data.node_obj.ndb_props.cb_struct =
+ (u32) atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ /* dsp_uuid ui_node_id */
+ uuid_uuid_from_string(token,
+ &gen_obj->obj_data.node_obj.ndb_props.
+ ui_node_id);
+ token = strsep(&psz_cur, seps);
+
+ /* ac_name */
+ DBC_REQUIRE(token);
+ token_len = strlen(token);
+ if (token_len > DSP_MAXNAMELEN - 1)
+ token_len = DSP_MAXNAMELEN - 1;
+
+ strncpy(gen_obj->obj_data.node_obj.ndb_props.ac_name,
+ token, token_len);
+ gen_obj->obj_data.node_obj.ndb_props.ac_name[token_len] = '\0';
+ token = strsep(&psz_cur, seps);
+ /* u32 ntype */
+ gen_obj->obj_data.node_obj.ndb_props.ntype = atoi(token);
+ token = strsep(&psz_cur, seps);
+ /* u32 cache_on_gpp */
+ gen_obj->obj_data.node_obj.ndb_props.cache_on_gpp = atoi(token);
+ token = strsep(&psz_cur, seps);
+ /* dsp_resourcereqmts dsp_resource_reqmts */
+ gen_obj->obj_data.node_obj.ndb_props.dsp_resource_reqmts.
+ cb_struct = (u32) atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.node_obj.ndb_props.
+ dsp_resource_reqmts.static_data_size = atoi(token);
+ token = strsep(&psz_cur, seps);
+ gen_obj->obj_data.node_obj.ndb_props.
+ dsp_resource_reqmts.global_data_size = atoi(token);
+ token = strsep(&psz_cur, seps);
+ gen_obj->obj_data.node_obj.ndb_props.
+ dsp_resource_reqmts.program_mem_size = atoi(token);
+ token = strsep(&psz_cur, seps);
+ gen_obj->obj_data.node_obj.ndb_props.
+ dsp_resource_reqmts.uwc_execution_time = atoi(token);
+ token = strsep(&psz_cur, seps);
+ gen_obj->obj_data.node_obj.ndb_props.
+ dsp_resource_reqmts.uwc_period = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.node_obj.ndb_props.
+ dsp_resource_reqmts.uwc_deadline = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.node_obj.ndb_props.
+ dsp_resource_reqmts.avg_exection_time = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.node_obj.ndb_props.
+ dsp_resource_reqmts.minimum_period = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ /* s32 prio */
+ gen_obj->obj_data.node_obj.ndb_props.prio = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ /* u32 stack_size */
+ gen_obj->obj_data.node_obj.ndb_props.stack_size = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ /* u32 sys_stack_size */
+ gen_obj->obj_data.node_obj.ndb_props.sys_stack_size =
+ atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ /* u32 stack_seg */
+ gen_obj->obj_data.node_obj.ndb_props.stack_seg = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ /* u32 message_depth */
+ gen_obj->obj_data.node_obj.ndb_props.message_depth =
+ atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ /* u32 num_input_streams */
+ gen_obj->obj_data.node_obj.ndb_props.num_input_streams =
+ atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ /* u32 num_output_streams */
+ gen_obj->obj_data.node_obj.ndb_props.num_output_streams =
+ atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ /* u32 utimeout */
+ gen_obj->obj_data.node_obj.ndb_props.utimeout = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ /* char *pstr_create_phase_fxn */
+ DBC_REQUIRE(token);
+ token_len = strlen(token);
+ gen_obj->obj_data.node_obj.pstr_create_phase_fxn =
+ kzalloc(token_len + 1, GFP_KERNEL);
+ strncpy(gen_obj->obj_data.node_obj.pstr_create_phase_fxn,
+ token, token_len);
+ gen_obj->obj_data.node_obj.pstr_create_phase_fxn[token_len] =
+ '\0';
+ token = strsep(&psz_cur, seps);
+
+ /* char *pstr_execute_phase_fxn */
+ DBC_REQUIRE(token);
+ token_len = strlen(token);
+ gen_obj->obj_data.node_obj.pstr_execute_phase_fxn =
+ kzalloc(token_len + 1, GFP_KERNEL);
+ strncpy(gen_obj->obj_data.node_obj.pstr_execute_phase_fxn,
+ token, token_len);
+ gen_obj->obj_data.node_obj.pstr_execute_phase_fxn[token_len] =
+ '\0';
+ token = strsep(&psz_cur, seps);
+
+ /* char *pstr_delete_phase_fxn */
+ DBC_REQUIRE(token);
+ token_len = strlen(token);
+ gen_obj->obj_data.node_obj.pstr_delete_phase_fxn =
+ kzalloc(token_len + 1, GFP_KERNEL);
+ strncpy(gen_obj->obj_data.node_obj.pstr_delete_phase_fxn,
+ token, token_len);
+ gen_obj->obj_data.node_obj.pstr_delete_phase_fxn[token_len] =
+ '\0';
+ token = strsep(&psz_cur, seps);
+
+ /* Segment id for message buffers */
+ gen_obj->obj_data.node_obj.msg_segid = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ /* Message notification type */
+ gen_obj->obj_data.node_obj.msg_notify_type = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ /* char *pstr_i_alg_name */
+ if (token) {
+ token_len = strlen(token);
+ gen_obj->obj_data.node_obj.pstr_i_alg_name =
+ kzalloc(token_len + 1, GFP_KERNEL);
+ strncpy(gen_obj->obj_data.node_obj.pstr_i_alg_name,
+ token, token_len);
+ gen_obj->obj_data.node_obj.pstr_i_alg_name[token_len] =
+ '\0';
+ token = strsep(&psz_cur, seps);
+ }
+
+ /* Load type (static, dynamic, or overlay) */
+ if (token) {
+ gen_obj->obj_data.node_obj.us_load_type = atoi(token);
+ token = strsep(&psz_cur, seps);
+ }
+
+ /* Dynamic load data requirements */
+ if (token) {
+ gen_obj->obj_data.node_obj.ul_data_mem_seg_mask =
+ atoi(token);
+ token = strsep(&psz_cur, seps);
+ }
+
+ /* Dynamic load code requirements */
+ if (token) {
+ gen_obj->obj_data.node_obj.ul_code_mem_seg_mask =
+ atoi(token);
+ token = strsep(&psz_cur, seps);
+ }
+
+ /* Extract node profiles into node properties */
+ if (token) {
+
+ gen_obj->obj_data.node_obj.ndb_props.count_profiles =
+ atoi(token);
+ for (i = 0;
+ i <
+ gen_obj->obj_data.node_obj.
+ ndb_props.count_profiles; i++) {
+ token = strsep(&psz_cur, seps);
+ if (token) {
+ /* Heap Size for the node */
+ gen_obj->obj_data.node_obj.
+ ndb_props.node_profiles[i].
+ ul_heap_size = atoi(token);
+ }
+ }
+ }
+ token = strsep(&psz_cur, seps);
+ if (token) {
+ gen_obj->obj_data.node_obj.ndb_props.stack_seg_name =
+ (u32) (token);
+ }
+
+ break;
+
+ case DSP_DCDPROCESSORTYPE:
+ /*
+ * Parse COFF sect buffer to retrieve individual tokens used
+ * to fill in object attrs.
+ */
+ psz_cur = psz_buf;
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.proc_info.cb_struct = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.proc_info.processor_family = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.proc_info.processor_type = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.proc_info.clock_rate = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.proc_info.ul_internal_mem_size = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.proc_info.ul_external_mem_size = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.proc_info.processor_id = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.proc_info.ty_running_rtos = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.proc_info.node_min_priority = atoi(token);
+ token = strsep(&psz_cur, seps);
+
+ gen_obj->obj_data.proc_info.node_max_priority = atoi(token);
+
+#ifdef _DB_TIOMAP
+ /* Proc object may contain additional(extended) attributes. */
+ /* attr must match proc.hxx */
+ for (entry_id = 0; entry_id < 7; entry_id++) {
+ token = strsep(&psz_cur, seps);
+ gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id].
+ ul_gpp_phys = atoi(token);
+
+ token = strsep(&psz_cur, seps);
+ gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id].
+ ul_dsp_virt = atoi(token);
+ }
+#endif
+
+ break;
+
+ default:
+ status = -EPERM;
+ break;
+ }
+
+ return status;
+}
+
+/*
+ * ======== CompressBuffer ========
+ * Purpose:
+ * Compress the DSP buffer, if necessary, to conform to PC format.
+ */
+static void compress_buf(char *psz_buf, u32 ul_buf_size, s32 char_size)
+{
+ char *p;
+ char ch;
+ char *q;
+
+ p = psz_buf;
+ if (p == NULL)
+ return;
+
+ for (q = psz_buf; q < (psz_buf + ul_buf_size);) {
+ ch = dsp_char2_gpp_char(q, char_size);
+ if (ch == '\\') {
+ q += char_size;
+ ch = dsp_char2_gpp_char(q, char_size);
+ switch (ch) {
+ case 't':
+ *p = '\t';
+ break;
+
+ case 'n':
+ *p = '\n';
+ break;
+
+ case 'r':
+ *p = '\r';
+ break;
+
+ case '0':
+ *p = '\0';
+ break;
+
+ default:
+ *p = ch;
+ break;
+ }
+ } else {
+ *p = ch;
+ }
+ p++;
+ q += char_size;
+ }
+
+ /* NULL out remainder of buffer. */
+ while (p < q)
+ *p++ = '\0';
+}
+
+/*
+ * ======== dsp_char2_gpp_char ========
+ * Purpose:
+ * Convert DSP char to host GPP char in a portable manner
+ */
+static char dsp_char2_gpp_char(char *word, s32 dsp_char_size)
+{
+ char ch = '\0';
+ char *ch_src;
+ s32 i;
+
+ for (ch_src = word, i = dsp_char_size; i > 0; i--)
+ ch |= *ch_src++;
+
+ return ch;
+}
+
+/*
+ * ======== get_dep_lib_info ========
+ */
+static int get_dep_lib_info(struct dcd_manager *hdcd_mgr,
+ struct dsp_uuid *uuid_obj,
+ u16 *num_libs,
+ u16 *num_pers_libs,
+ struct dsp_uuid *dep_lib_uuids,
+ bool *prstnt_dep_libs,
+ enum nldr_phase phase)
+{
+ struct dcd_manager *dcd_mgr_obj = hdcd_mgr;
+ char *psz_coff_buf = NULL;
+ char *psz_cur;
+ char *psz_file_name = NULL;
+ struct cod_libraryobj *lib = NULL;
+ u32 ul_addr = 0; /* Used by cod_get_section */
+ u32 ul_len = 0; /* Used by cod_get_section */
+ u32 dw_data_size = COD_MAXPATHLENGTH;
+ char seps[] = ", ";
+ char *token = NULL;
+ bool get_uuids = (dep_lib_uuids != NULL);
+ u16 dep_libs = 0;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+
+ DBC_REQUIRE(hdcd_mgr);
+ DBC_REQUIRE(num_libs != NULL);
+ DBC_REQUIRE(uuid_obj != NULL);
+
+ /* Initialize to 0 dependent libraries, if only counting number of
+ * dependent libraries */
+ if (!get_uuids) {
+ *num_libs = 0;
+ *num_pers_libs = 0;
+ }
+
+ /* Allocate a buffer for file name */
+ psz_file_name = kzalloc(dw_data_size, GFP_KERNEL);
+ if (psz_file_name == NULL) {
+ status = -ENOMEM;
+ } else {
+ /* Get the name of the library */
+ status = dcd_get_library_name(hdcd_mgr, uuid_obj, psz_file_name,
+ &dw_data_size, phase, NULL);
+ }
+
+ /* Open the library */
+ if (!status) {
+ status = cod_open(dcd_mgr_obj->cod_mgr, psz_file_name,
+ COD_NOLOAD, &lib);
+ }
+ if (!status) {
+ /* Get dependent library section information. */
+ status = cod_get_section(lib, DEPLIBSECT, &ul_addr, &ul_len);
+
+ if (status) {
+ /* Ok, no dependent libraries */
+ ul_len = 0;
+ status = 0;
+ }
+ }
+
+ if (status || !(ul_len > 0))
+ goto func_cont;
+
+ /* Allocate zeroed buffer. */
+ psz_coff_buf = kzalloc(ul_len + 4, GFP_KERNEL);
+ if (psz_coff_buf == NULL)
+ status = -ENOMEM;
+
+ /* Read section contents. */
+ status = cod_read_section(lib, DEPLIBSECT, psz_coff_buf, ul_len);
+ if (status)
+ goto func_cont;
+
+ /* Compress and format DSP buffer to conform to PC format. */
+ compress_buf(psz_coff_buf, ul_len, DSPWORDSIZE);
+
+ /* Read from buffer */
+ psz_cur = psz_coff_buf;
+ while ((token = strsep(&psz_cur, seps)) && *token != '\0') {
+ if (get_uuids) {
+ if (dep_libs >= *num_libs) {
+ /* Gone beyond the limit */
+ break;
+ } else {
+ /* Retrieve UUID string. */
+ uuid_uuid_from_string(token,
+ &(dep_lib_uuids
+ [dep_libs]));
+ /* Is this library persistent? */
+ token = strsep(&psz_cur, seps);
+ prstnt_dep_libs[dep_libs] = atoi(token);
+ dep_libs++;
+ }
+ } else {
+ /* Advanc to next token */
+ token = strsep(&psz_cur, seps);
+ if (atoi(token))
+ (*num_pers_libs)++;
+
+ /* Just counting number of dependent libraries */
+ (*num_libs)++;
+ }
+ }
+func_cont:
+ if (lib)
+ cod_close(lib);
+
+ /* Free previously allocated dynamic buffers. */
+ kfree(psz_file_name);
+
+ kfree(psz_coff_buf);
+
+ return status;
+}
diff --git a/drivers/staging/tidspbridge/rmgr/disp.c b/drivers/staging/tidspbridge/rmgr/disp.c
new file mode 100644
index 00000000000..b7ce4353e06
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/disp.c
@@ -0,0 +1,752 @@
+/*
+ * disp.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Node Dispatcher interface. Communicates with Resource Manager Server
+ * (RMS) on DSP. Access to RMS is synchronized in NODE.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/sync.h>
+
+/* ----------------------------------- Link Driver */
+#include <dspbridge/dspdefs.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/chnldefs.h>
+
+/* ----------------------------------- Resource Manager */
+#include <dspbridge/nodedefs.h>
+#include <dspbridge/nodepriv.h>
+#include <dspbridge/rms_sh.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/disp.h>
+
+/* Size of a reply from RMS */
+#define REPLYSIZE (3 * sizeof(rms_word))
+
+/* Reserved channel offsets for communication with RMS */
+#define CHNLTORMSOFFSET 0
+#define CHNLFROMRMSOFFSET 1
+
+#define CHNLIOREQS 1
+
+/*
+ * ======== disp_object ========
+ */
+struct disp_object {
+ struct dev_object *hdev_obj; /* Device for this processor */
+ /* Function interface to Bridge driver */
+ struct bridge_drv_interface *intf_fxns;
+ struct chnl_mgr *hchnl_mgr; /* Channel manager */
+ struct chnl_object *chnl_to_dsp; /* Chnl for commands to RMS */
+ struct chnl_object *chnl_from_dsp; /* Chnl for replies from RMS */
+ u8 *pbuf; /* Buffer for commands, replies */
+ u32 ul_bufsize; /* pbuf size in bytes */
+ u32 ul_bufsize_rms; /* pbuf size in RMS words */
+ u32 char_size; /* Size of DSP character */
+ u32 word_size; /* Size of DSP word */
+ u32 data_mau_size; /* Size of DSP Data MAU */
+};
+
+static u32 refs;
+
+static void delete_disp(struct disp_object *disp_obj);
+static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
+ struct node_strmdef strm_def, u32 max,
+ u32 chars_in_rms_word);
+static int send_message(struct disp_object *disp_obj, u32 timeout,
+ u32 ul_bytes, u32 *pdw_arg);
+
+/*
+ * ======== disp_create ========
+ * Create a NODE Dispatcher object.
+ */
+int disp_create(struct disp_object **dispatch_obj,
+ struct dev_object *hdev_obj,
+ const struct disp_attr *disp_attrs)
+{
+ struct disp_object *disp_obj;
+ struct bridge_drv_interface *intf_fxns;
+ u32 ul_chnl_id;
+ struct chnl_attr chnl_attr_obj;
+ int status = 0;
+ u8 dev_type;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(dispatch_obj != NULL);
+ DBC_REQUIRE(disp_attrs != NULL);
+ DBC_REQUIRE(hdev_obj != NULL);
+
+ *dispatch_obj = NULL;
+
+ /* Allocate Node Dispatcher object */
+ disp_obj = kzalloc(sizeof(struct disp_object), GFP_KERNEL);
+ if (disp_obj == NULL)
+ status = -ENOMEM;
+ else
+ disp_obj->hdev_obj = hdev_obj;
+
+ /* Get Channel manager and Bridge function interface */
+ if (!status) {
+ status = dev_get_chnl_mgr(hdev_obj, &(disp_obj->hchnl_mgr));
+ if (!status) {
+ (void)dev_get_intf_fxns(hdev_obj, &intf_fxns);
+ disp_obj->intf_fxns = intf_fxns;
+ }
+ }
+
+ /* check device type and decide if streams or messag'ing is used for
+ * RMS/EDS */
+ if (status)
+ goto func_cont;
+
+ status = dev_get_dev_type(hdev_obj, &dev_type);
+
+ if (status)
+ goto func_cont;
+
+ if (dev_type != DSP_UNIT) {
+ status = -EPERM;
+ goto func_cont;
+ }
+
+ disp_obj->char_size = DSPWORDSIZE;
+ disp_obj->word_size = DSPWORDSIZE;
+ disp_obj->data_mau_size = DSPWORDSIZE;
+ /* Open channels for communicating with the RMS */
+ chnl_attr_obj.uio_reqs = CHNLIOREQS;
+ chnl_attr_obj.event_obj = NULL;
+ ul_chnl_id = disp_attrs->ul_chnl_offset + CHNLTORMSOFFSET;
+ status = (*intf_fxns->pfn_chnl_open) (&(disp_obj->chnl_to_dsp),
+ disp_obj->hchnl_mgr,
+ CHNL_MODETODSP, ul_chnl_id,
+ &chnl_attr_obj);
+
+ if (!status) {
+ ul_chnl_id = disp_attrs->ul_chnl_offset + CHNLFROMRMSOFFSET;
+ status =
+ (*intf_fxns->pfn_chnl_open) (&(disp_obj->chnl_from_dsp),
+ disp_obj->hchnl_mgr,
+ CHNL_MODEFROMDSP, ul_chnl_id,
+ &chnl_attr_obj);
+ }
+ if (!status) {
+ /* Allocate buffer for commands, replies */
+ disp_obj->ul_bufsize = disp_attrs->ul_chnl_buf_size;
+ disp_obj->ul_bufsize_rms = RMS_COMMANDBUFSIZE;
+ disp_obj->pbuf = kzalloc(disp_obj->ul_bufsize, GFP_KERNEL);
+ if (disp_obj->pbuf == NULL)
+ status = -ENOMEM;
+ }
+func_cont:
+ if (!status)
+ *dispatch_obj = disp_obj;
+ else
+ delete_disp(disp_obj);
+
+ DBC_ENSURE((status && *dispatch_obj == NULL) ||
+ (!status && *dispatch_obj));
+ return status;
+}
+
+/*
+ * ======== disp_delete ========
+ * Delete the NODE Dispatcher.
+ */
+void disp_delete(struct disp_object *disp_obj)
+{
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(disp_obj);
+
+ delete_disp(disp_obj);
+}
+
+/*
+ * ======== disp_exit ========
+ * Discontinue usage of DISP module.
+ */
+void disp_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== disp_init ========
+ * Initialize the DISP module.
+ */
+bool disp_init(void)
+{
+ bool ret = true;
+
+ DBC_REQUIRE(refs >= 0);
+
+ if (ret)
+ refs++;
+
+ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+ return ret;
+}
+
+/*
+ * ======== disp_node_change_priority ========
+ * Change the priority of a node currently running on the target.
+ */
+int disp_node_change_priority(struct disp_object *disp_obj,
+ struct node_object *hnode,
+ u32 rms_fxn, nodeenv node_env, s32 prio)
+{
+ u32 dw_arg;
+ struct rms_command *rms_cmd;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(disp_obj);
+ DBC_REQUIRE(hnode != NULL);
+
+ /* Send message to RMS to change priority */
+ rms_cmd = (struct rms_command *)(disp_obj->pbuf);
+ rms_cmd->fxn = (rms_word) (rms_fxn);
+ rms_cmd->arg1 = (rms_word) node_env;
+ rms_cmd->arg2 = prio;
+ status = send_message(disp_obj, node_get_timeout(hnode),
+ sizeof(struct rms_command), &dw_arg);
+
+ return status;
+}
+
+/*
+ * ======== disp_node_create ========
+ * Create a node on the DSP by remotely calling the node's create function.
+ */
+int disp_node_create(struct disp_object *disp_obj,
+ struct node_object *hnode, u32 rms_fxn,
+ u32 ul_create_fxn,
+ const struct node_createargs *pargs,
+ nodeenv *node_env)
+{
+ struct node_msgargs node_msg_args;
+ struct node_taskargs task_arg_obj;
+ struct rms_command *rms_cmd;
+ struct rms_msg_args *pmsg_args;
+ struct rms_more_task_args *more_task_args;
+ enum node_type node_type;
+ u32 dw_length;
+ rms_word *pdw_buf = NULL;
+ u32 ul_bytes;
+ u32 i;
+ u32 total;
+ u32 chars_in_rms_word;
+ s32 task_args_offset;
+ s32 sio_in_def_offset;
+ s32 sio_out_def_offset;
+ s32 sio_defs_offset;
+ s32 args_offset = -1;
+ s32 offset;
+ struct node_strmdef strm_def;
+ u32 max;
+ int status = 0;
+ struct dsp_nodeinfo node_info;
+ u8 dev_type;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(disp_obj);
+ DBC_REQUIRE(hnode != NULL);
+ DBC_REQUIRE(node_get_type(hnode) != NODE_DEVICE);
+ DBC_REQUIRE(node_env != NULL);
+
+ status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
+
+ if (status)
+ goto func_end;
+
+ if (dev_type != DSP_UNIT) {
+ dev_dbg(bridge, "%s: unknown device type = 0x%x\n",
+ __func__, dev_type);
+ goto func_end;
+ }
+ DBC_REQUIRE(pargs != NULL);
+ node_type = node_get_type(hnode);
+ node_msg_args = pargs->asa.node_msg_args;
+ max = disp_obj->ul_bufsize_rms; /*Max # of RMS words that can be sent */
+ DBC_ASSERT(max == RMS_COMMANDBUFSIZE);
+ chars_in_rms_word = sizeof(rms_word) / disp_obj->char_size;
+ /* Number of RMS words needed to hold arg data */
+ dw_length =
+ (node_msg_args.arg_length + chars_in_rms_word -
+ 1) / chars_in_rms_word;
+ /* Make sure msg args and command fit in buffer */
+ total = sizeof(struct rms_command) / sizeof(rms_word) +
+ sizeof(struct rms_msg_args)
+ / sizeof(rms_word) - 1 + dw_length;
+ if (total >= max) {
+ status = -EPERM;
+ dev_dbg(bridge, "%s: Message args too large for buffer! size "
+ "= %d, max = %d\n", __func__, total, max);
+ }
+ /*
+ * Fill in buffer to send to RMS.
+ * The buffer will have the following format:
+ *
+ * RMS command:
+ * Address of RMS_CreateNode()
+ * Address of node's create function
+ * dummy argument
+ * node type
+ *
+ * Message Args:
+ * max number of messages
+ * segid for message buffer allocation
+ * notification type to use when message is received
+ * length of message arg data
+ * message args data
+ *
+ * Task Args (if task or socket node):
+ * priority
+ * stack size
+ * system stack size
+ * stack segment
+ * misc
+ * number of input streams
+ * pSTRMInDef[] - offsets of STRM definitions for input streams
+ * number of output streams
+ * pSTRMOutDef[] - offsets of STRM definitions for output
+ * streams
+ * STRMInDef[] - array of STRM definitions for input streams
+ * STRMOutDef[] - array of STRM definitions for output streams
+ *
+ * Socket Args (if DAIS socket node):
+ *
+ */
+ if (!status) {
+ total = 0; /* Total number of words in buffer so far */
+ pdw_buf = (rms_word *) disp_obj->pbuf;
+ rms_cmd = (struct rms_command *)pdw_buf;
+ rms_cmd->fxn = (rms_word) (rms_fxn);
+ rms_cmd->arg1 = (rms_word) (ul_create_fxn);
+ if (node_get_load_type(hnode) == NLDR_DYNAMICLOAD) {
+ /* Flush ICACHE on Load */
+ rms_cmd->arg2 = 1; /* dummy argument */
+ } else {
+ /* Do not flush ICACHE */
+ rms_cmd->arg2 = 0; /* dummy argument */
+ }
+ rms_cmd->data = node_get_type(hnode);
+ /*
+ * args_offset is the offset of the data field in struct
+ * rms_command structure. We need this to calculate stream
+ * definition offsets.
+ */
+ args_offset = 3;
+ total += sizeof(struct rms_command) / sizeof(rms_word);
+ /* Message args */
+ pmsg_args = (struct rms_msg_args *)(pdw_buf + total);
+ pmsg_args->max_msgs = node_msg_args.max_msgs;
+ pmsg_args->segid = node_msg_args.seg_id;
+ pmsg_args->notify_type = node_msg_args.notify_type;
+ pmsg_args->arg_length = node_msg_args.arg_length;
+ total += sizeof(struct rms_msg_args) / sizeof(rms_word) - 1;
+ memcpy(pdw_buf + total, node_msg_args.pdata,
+ node_msg_args.arg_length);
+ total += dw_length;
+ }
+ if (status)
+ goto func_end;
+
+ /* If node is a task node, copy task create arguments into buffer */
+ if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
+ task_arg_obj = pargs->asa.task_arg_obj;
+ task_args_offset = total;
+ total += sizeof(struct rms_more_task_args) / sizeof(rms_word) +
+ 1 + task_arg_obj.num_inputs + task_arg_obj.num_outputs;
+ /* Copy task arguments */
+ if (total < max) {
+ total = task_args_offset;
+ more_task_args = (struct rms_more_task_args *)(pdw_buf +
+ total);
+ /*
+ * Get some important info about the node. Note that we
+ * don't just reach into the hnode struct because
+ * that would break the node object's abstraction.
+ */
+ get_node_info(hnode, &node_info);
+ more_task_args->priority = node_info.execution_priority;
+ more_task_args->stack_size = task_arg_obj.stack_size;
+ more_task_args->sysstack_size =
+ task_arg_obj.sys_stack_size;
+ more_task_args->stack_seg = task_arg_obj.stack_seg;
+ more_task_args->heap_addr = task_arg_obj.udsp_heap_addr;
+ more_task_args->heap_size = task_arg_obj.heap_size;
+ more_task_args->misc = task_arg_obj.ul_dais_arg;
+ more_task_args->num_input_streams =
+ task_arg_obj.num_inputs;
+ total +=
+ sizeof(struct rms_more_task_args) /
+ sizeof(rms_word);
+ dev_dbg(bridge, "%s: udsp_heap_addr %x, heap_size %x\n",
+ __func__, task_arg_obj.udsp_heap_addr,
+ task_arg_obj.heap_size);
+ /* Keep track of pSIOInDef[] and pSIOOutDef[]
+ * positions in the buffer, since this needs to be
+ * filled in later. */
+ sio_in_def_offset = total;
+ total += task_arg_obj.num_inputs;
+ pdw_buf[total++] = task_arg_obj.num_outputs;
+ sio_out_def_offset = total;
+ total += task_arg_obj.num_outputs;
+ sio_defs_offset = total;
+ /* Fill SIO defs and offsets */
+ offset = sio_defs_offset;
+ for (i = 0; i < task_arg_obj.num_inputs; i++) {
+ if (status)
+ break;
+
+ pdw_buf[sio_in_def_offset + i] =
+ (offset - args_offset)
+ * (sizeof(rms_word) / DSPWORDSIZE);
+ strm_def = task_arg_obj.strm_in_def[i];
+ status =
+ fill_stream_def(pdw_buf, &total, offset,
+ strm_def, max,
+ chars_in_rms_word);
+ offset = total;
+ }
+ for (i = 0; (i < task_arg_obj.num_outputs) &&
+ (!status); i++) {
+ pdw_buf[sio_out_def_offset + i] =
+ (offset - args_offset)
+ * (sizeof(rms_word) / DSPWORDSIZE);
+ strm_def = task_arg_obj.strm_out_def[i];
+ status =
+ fill_stream_def(pdw_buf, &total, offset,
+ strm_def, max,
+ chars_in_rms_word);
+ offset = total;
+ }
+ } else {
+ /* Args won't fit */
+ status = -EPERM;
+ }
+ }
+ if (!status) {
+ ul_bytes = total * sizeof(rms_word);
+ DBC_ASSERT(ul_bytes < (RMS_COMMANDBUFSIZE * sizeof(rms_word)));
+ status = send_message(disp_obj, node_get_timeout(hnode),
+ ul_bytes, node_env);
+ if (status >= 0) {
+ /*
+ * Message successfully received from RMS.
+ * Return the status of the Node's create function
+ * on the DSP-side
+ */
+ status = (((rms_word *) (disp_obj->pbuf))[0]);
+ if (status < 0)
+ dev_dbg(bridge, "%s: DSP-side failed: 0x%x\n",
+ __func__, status);
+ }
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== disp_node_delete ========
+ * purpose:
+ * Delete a node on the DSP by remotely calling the node's delete function.
+ *
+ */
+int disp_node_delete(struct disp_object *disp_obj,
+ struct node_object *hnode, u32 rms_fxn,
+ u32 ul_delete_fxn, nodeenv node_env)
+{
+ u32 dw_arg;
+ struct rms_command *rms_cmd;
+ int status = 0;
+ u8 dev_type;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(disp_obj);
+ DBC_REQUIRE(hnode != NULL);
+
+ status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
+
+ if (!status) {
+
+ if (dev_type == DSP_UNIT) {
+
+ /*
+ * Fill in buffer to send to RMS
+ */
+ rms_cmd = (struct rms_command *)disp_obj->pbuf;
+ rms_cmd->fxn = (rms_word) (rms_fxn);
+ rms_cmd->arg1 = (rms_word) node_env;
+ rms_cmd->arg2 = (rms_word) (ul_delete_fxn);
+ rms_cmd->data = node_get_type(hnode);
+
+ status = send_message(disp_obj, node_get_timeout(hnode),
+ sizeof(struct rms_command),
+ &dw_arg);
+ if (status >= 0) {
+ /*
+ * Message successfully received from RMS.
+ * Return the status of the Node's delete
+ * function on the DSP-side
+ */
+ status = (((rms_word *) (disp_obj->pbuf))[0]);
+ if (status < 0)
+ dev_dbg(bridge, "%s: DSP-side failed: "
+ "0x%x\n", __func__, status);
+ }
+
+ }
+ }
+ return status;
+}
+
+/*
+ * ======== disp_node_run ========
+ * purpose:
+ * Start execution of a node's execute phase, or resume execution of a node
+ * that has been suspended (via DISP_NodePause()) on the DSP.
+ */
+int disp_node_run(struct disp_object *disp_obj,
+ struct node_object *hnode, u32 rms_fxn,
+ u32 ul_execute_fxn, nodeenv node_env)
+{
+ u32 dw_arg;
+ struct rms_command *rms_cmd;
+ int status = 0;
+ u8 dev_type;
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(disp_obj);
+ DBC_REQUIRE(hnode != NULL);
+
+ status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
+
+ if (!status) {
+
+ if (dev_type == DSP_UNIT) {
+
+ /*
+ * Fill in buffer to send to RMS.
+ */
+ rms_cmd = (struct rms_command *)disp_obj->pbuf;
+ rms_cmd->fxn = (rms_word) (rms_fxn);
+ rms_cmd->arg1 = (rms_word) node_env;
+ rms_cmd->arg2 = (rms_word) (ul_execute_fxn);
+ rms_cmd->data = node_get_type(hnode);
+
+ status = send_message(disp_obj, node_get_timeout(hnode),
+ sizeof(struct rms_command),
+ &dw_arg);
+ if (status >= 0) {
+ /*
+ * Message successfully received from RMS.
+ * Return the status of the Node's execute
+ * function on the DSP-side
+ */
+ status = (((rms_word *) (disp_obj->pbuf))[0]);
+ if (status < 0)
+ dev_dbg(bridge, "%s: DSP-side failed: "
+ "0x%x\n", __func__, status);
+ }
+
+ }
+ }
+
+ return status;
+}
+
+/*
+ * ======== delete_disp ========
+ * purpose:
+ * Frees the resources allocated for the dispatcher.
+ */
+static void delete_disp(struct disp_object *disp_obj)
+{
+ int status = 0;
+ struct bridge_drv_interface *intf_fxns;
+
+ if (disp_obj) {
+ intf_fxns = disp_obj->intf_fxns;
+
+ /* Free Node Dispatcher resources */
+ if (disp_obj->chnl_from_dsp) {
+ /* Channel close can fail only if the channel handle
+ * is invalid. */
+ status = (*intf_fxns->pfn_chnl_close)
+ (disp_obj->chnl_from_dsp);
+ if (status) {
+ dev_dbg(bridge, "%s: Failed to close channel "
+ "from RMS: 0x%x\n", __func__, status);
+ }
+ }
+ if (disp_obj->chnl_to_dsp) {
+ status =
+ (*intf_fxns->pfn_chnl_close) (disp_obj->
+ chnl_to_dsp);
+ if (status) {
+ dev_dbg(bridge, "%s: Failed to close channel to"
+ " RMS: 0x%x\n", __func__, status);
+ }
+ }
+ kfree(disp_obj->pbuf);
+
+ kfree(disp_obj);
+ }
+}
+
+/*
+ * ======== fill_stream_def ========
+ * purpose:
+ * Fills stream definitions.
+ */
+static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
+ struct node_strmdef strm_def, u32 max,
+ u32 chars_in_rms_word)
+{
+ struct rms_strm_def *strm_def_obj;
+ u32 total = *ptotal;
+ u32 name_len;
+ u32 dw_length;
+ int status = 0;
+
+ if (total + sizeof(struct rms_strm_def) / sizeof(rms_word) >= max) {
+ status = -EPERM;
+ } else {
+ strm_def_obj = (struct rms_strm_def *)(pdw_buf + total);
+ strm_def_obj->bufsize = strm_def.buf_size;
+ strm_def_obj->nbufs = strm_def.num_bufs;
+ strm_def_obj->segid = strm_def.seg_id;
+ strm_def_obj->align = strm_def.buf_alignment;
+ strm_def_obj->timeout = strm_def.utimeout;
+ }
+
+ if (!status) {
+ /*
+ * Since we haven't added the device name yet, subtract
+ * 1 from total.
+ */
+ total += sizeof(struct rms_strm_def) / sizeof(rms_word) - 1;
+ DBC_REQUIRE(strm_def.sz_device);
+ dw_length = strlen(strm_def.sz_device) + 1;
+
+ /* Number of RMS_WORDS needed to hold device name */
+ name_len =
+ (dw_length + chars_in_rms_word - 1) / chars_in_rms_word;
+
+ if (total + name_len >= max) {
+ status = -EPERM;
+ } else {
+ /*
+ * Zero out last word, since the device name may not
+ * extend to completely fill this word.
+ */
+ pdw_buf[total + name_len - 1] = 0;
+ /** TODO USE SERVICES * */
+ memcpy(pdw_buf + total, strm_def.sz_device, dw_length);
+ total += name_len;
+ *ptotal = total;
+ }
+ }
+
+ return status;
+}
+
+/*
+ * ======== send_message ======
+ * Send command message to RMS, get reply from RMS.
+ */
+static int send_message(struct disp_object *disp_obj, u32 timeout,
+ u32 ul_bytes, u32 *pdw_arg)
+{
+ struct bridge_drv_interface *intf_fxns;
+ struct chnl_object *chnl_obj;
+ u32 dw_arg = 0;
+ u8 *pbuf;
+ struct chnl_ioc chnl_ioc_obj;
+ int status = 0;
+
+ DBC_REQUIRE(pdw_arg != NULL);
+
+ *pdw_arg = (u32) NULL;
+ intf_fxns = disp_obj->intf_fxns;
+ chnl_obj = disp_obj->chnl_to_dsp;
+ pbuf = disp_obj->pbuf;
+
+ /* Send the command */
+ status = (*intf_fxns->pfn_chnl_add_io_req) (chnl_obj, pbuf, ul_bytes, 0,
+ 0L, dw_arg);
+ if (status)
+ goto func_end;
+
+ status =
+ (*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj);
+ if (!status) {
+ if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
+ if (CHNL_IS_TIMED_OUT(chnl_ioc_obj))
+ status = -ETIME;
+ else
+ status = -EPERM;
+ }
+ }
+ /* Get the reply */
+ if (status)
+ goto func_end;
+
+ chnl_obj = disp_obj->chnl_from_dsp;
+ ul_bytes = REPLYSIZE;
+ status = (*intf_fxns->pfn_chnl_add_io_req) (chnl_obj, pbuf, ul_bytes,
+ 0, 0L, dw_arg);
+ if (status)
+ goto func_end;
+
+ status =
+ (*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj);
+ if (!status) {
+ if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) {
+ status = -ETIME;
+ } else if (chnl_ioc_obj.byte_size < ul_bytes) {
+ /* Did not get all of the reply from the RMS */
+ status = -EPERM;
+ } else {
+ if (CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
+ DBC_ASSERT(chnl_ioc_obj.pbuf == pbuf);
+ status = (*((rms_word *) chnl_ioc_obj.pbuf));
+ *pdw_arg =
+ (((rms_word *) (chnl_ioc_obj.pbuf))[1]);
+ } else {
+ status = -EPERM;
+ }
+ }
+ }
+func_end:
+ return status;
+}
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
new file mode 100644
index 00000000000..8a8dea6efed
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -0,0 +1,929 @@
+/*
+ * drv.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge resource allocation module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/list.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/drv.h>
+#include <dspbridge/dev.h>
+
+#include <dspbridge/node.h>
+#include <dspbridge/proc.h>
+#include <dspbridge/strm.h>
+#include <dspbridge/nodepriv.h>
+#include <dspbridge/dspchnl.h>
+#include <dspbridge/resourcecleanup.h>
+
+/* ----------------------------------- Defines, Data Structures, Typedefs */
+struct drv_object {
+ struct lst_list *dev_list;
+ struct lst_list *dev_node_string;
+};
+
+/*
+ * This is the Device Extension. Named with the Prefix
+ * DRV_ since it is living in this module
+ */
+struct drv_ext {
+ struct list_head link;
+ char sz_string[MAXREGPATHLENGTH];
+};
+
+/* ----------------------------------- Globals */
+static s32 refs;
+static bool ext_phys_mem_pool_enabled;
+struct ext_phys_mem_pool {
+ u32 phys_mem_base;
+ u32 phys_mem_size;
+ u32 virt_mem_base;
+ u32 next_phys_alloc_ptr;
+};
+static struct ext_phys_mem_pool ext_mem_pool;
+
+/* ----------------------------------- Function Prototypes */
+static int request_bridge_resources(struct cfg_hostres *res);
+
+
+/* GPP PROCESS CLEANUP CODE */
+
+static int drv_proc_free_node_res(int id, void *p, void *data);
+
+/* Allocate and add a node resource element
+* This function is called from .Node_Allocate. */
+int drv_insert_node_res_element(void *hnode, void *node_resource,
+ void *process_ctxt)
+{
+ struct node_res_object **node_res_obj =
+ (struct node_res_object **)node_resource;
+ struct process_context *ctxt = (struct process_context *)process_ctxt;
+ int status = 0;
+ int retval;
+
+ *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
+ if (!*node_res_obj) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ (*node_res_obj)->hnode = hnode;
+ retval = idr_get_new(ctxt->node_id, *node_res_obj,
+ &(*node_res_obj)->id);
+ if (retval == -EAGAIN) {
+ if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) {
+ pr_err("%s: OUT OF MEMORY\n", __func__);
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ retval = idr_get_new(ctxt->node_id, *node_res_obj,
+ &(*node_res_obj)->id);
+ }
+ if (retval) {
+ pr_err("%s: FAILED, IDR is FULL\n", __func__);
+ status = -EFAULT;
+ }
+func_end:
+ if (status)
+ kfree(*node_res_obj);
+
+ return status;
+}
+
+/* Release all Node resources and its context
+ * Actual Node De-Allocation */
+static int drv_proc_free_node_res(int id, void *p, void *data)
+{
+ struct process_context *ctxt = data;
+ int status;
+ struct node_res_object *node_res_obj = p;
+ u32 node_state;
+
+ if (node_res_obj->node_allocated) {
+ node_state = node_get_state(node_res_obj->hnode);
+ if (node_state <= NODE_DELETING) {
+ if ((node_state == NODE_RUNNING) ||
+ (node_state == NODE_PAUSED) ||
+ (node_state == NODE_TERMINATING))
+ node_terminate
+ (node_res_obj->hnode, &status);
+
+ node_delete(node_res_obj, ctxt);
+ }
+ }
+
+ return 0;
+}
+
+/* Release all Mapped and Reserved DMM resources */
+int drv_remove_all_dmm_res_elements(void *process_ctxt)
+{
+ struct process_context *ctxt = (struct process_context *)process_ctxt;
+ int status = 0;
+ struct dmm_map_object *temp_map, *map_obj;
+ struct dmm_rsv_object *temp_rsv, *rsv_obj;
+
+ /* Free DMM mapped memory resources */
+ list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
+ status = proc_un_map(ctxt->hprocessor,
+ (void *)map_obj->dsp_addr, ctxt);
+ if (status)
+ pr_err("%s: proc_un_map failed!"
+ " status = 0x%xn", __func__, status);
+ }
+
+ /* Free DMM reserved memory resources */
+ list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
+ status = proc_un_reserve_memory(ctxt->hprocessor, (void *)
+ rsv_obj->dsp_reserved_addr,
+ ctxt);
+ if (status)
+ pr_err("%s: proc_un_reserve_memory failed!"
+ " status = 0x%xn", __func__, status);
+ }
+ return status;
+}
+
+/* Update Node allocation status */
+void drv_proc_node_update_status(void *node_resource, s32 status)
+{
+ struct node_res_object *node_res_obj =
+ (struct node_res_object *)node_resource;
+ DBC_ASSERT(node_resource != NULL);
+ node_res_obj->node_allocated = status;
+}
+
+/* Update Node Heap status */
+void drv_proc_node_update_heap_status(void *node_resource, s32 status)
+{
+ struct node_res_object *node_res_obj =
+ (struct node_res_object *)node_resource;
+ DBC_ASSERT(node_resource != NULL);
+ node_res_obj->heap_allocated = status;
+}
+
+/* Release all Node resources and its context
+* This is called from .bridge_release.
+ */
+int drv_remove_all_node_res_elements(void *process_ctxt)
+{
+ struct process_context *ctxt = process_ctxt;
+
+ idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt);
+ idr_destroy(ctxt->node_id);
+
+ return 0;
+}
+
+/* Allocate the STRM resource element
+* This is called after the actual resource is allocated
+ */
+int drv_proc_insert_strm_res_element(void *stream_obj,
+ void *strm_res, void *process_ctxt)
+{
+ struct strm_res_object **pstrm_res =
+ (struct strm_res_object **)strm_res;
+ struct process_context *ctxt = (struct process_context *)process_ctxt;
+ int status = 0;
+ int retval;
+
+ *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
+ if (*pstrm_res == NULL) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ (*pstrm_res)->hstream = stream_obj;
+ retval = idr_get_new(ctxt->stream_id, *pstrm_res,
+ &(*pstrm_res)->id);
+ if (retval == -EAGAIN) {
+ if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) {
+ pr_err("%s: OUT OF MEMORY\n", __func__);
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ retval = idr_get_new(ctxt->stream_id, *pstrm_res,
+ &(*pstrm_res)->id);
+ }
+ if (retval) {
+ pr_err("%s: FAILED, IDR is FULL\n", __func__);
+ status = -EPERM;
+ }
+
+func_end:
+ return status;
+}
+
+static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
+{
+ struct process_context *ctxt = process_ctxt;
+ struct strm_res_object *strm_res = p;
+ struct stream_info strm_info;
+ struct dsp_streaminfo user;
+ u8 **ap_buffer = NULL;
+ u8 *buf_ptr;
+ u32 ul_bytes;
+ u32 dw_arg;
+ s32 ul_buf_size;
+
+ if (strm_res->num_bufs) {
+ ap_buffer = kmalloc((strm_res->num_bufs *
+ sizeof(u8 *)), GFP_KERNEL);
+ if (ap_buffer) {
+ strm_free_buffer(strm_res,
+ ap_buffer,
+ strm_res->num_bufs,
+ ctxt);
+ kfree(ap_buffer);
+ }
+ }
+ strm_info.user_strm = &user;
+ user.number_bufs_in_stream = 0;
+ strm_get_info(strm_res->hstream, &strm_info, sizeof(strm_info));
+ while (user.number_bufs_in_stream--)
+ strm_reclaim(strm_res->hstream, &buf_ptr, &ul_bytes,
+ (u32 *) &ul_buf_size, &dw_arg);
+ strm_close(strm_res, ctxt);
+ return 0;
+}
+
+/* Release all Stream resources and its context
+* This is called from .bridge_release.
+ */
+int drv_remove_all_strm_res_elements(void *process_ctxt)
+{
+ struct process_context *ctxt = process_ctxt;
+
+ idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt);
+ idr_destroy(ctxt->stream_id);
+
+ return 0;
+}
+
+/* Updating the stream resource element */
+int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources)
+{
+ int status = 0;
+ struct strm_res_object **strm_res =
+ (struct strm_res_object **)strm_resources;
+
+ (*strm_res)->num_bufs = num_bufs;
+ return status;
+}
+
+/* GPP PROCESS CLEANUP CODE END */
+
+/*
+ * ======== = drv_create ======== =
+ * Purpose:
+ * DRV Object gets created only once during Driver Loading.
+ */
+int drv_create(struct drv_object **drv_obj)
+{
+ int status = 0;
+ struct drv_object *pdrv_object = NULL;
+
+ DBC_REQUIRE(drv_obj != NULL);
+ DBC_REQUIRE(refs > 0);
+
+ pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
+ if (pdrv_object) {
+ /* Create and Initialize List of device objects */
+ pdrv_object->dev_list = kzalloc(sizeof(struct lst_list),
+ GFP_KERNEL);
+ if (pdrv_object->dev_list) {
+ /* Create and Initialize List of device Extension */
+ pdrv_object->dev_node_string =
+ kzalloc(sizeof(struct lst_list), GFP_KERNEL);
+ if (!(pdrv_object->dev_node_string)) {
+ status = -EPERM;
+ } else {
+ INIT_LIST_HEAD(&pdrv_object->
+ dev_node_string->head);
+ INIT_LIST_HEAD(&pdrv_object->dev_list->head);
+ }
+ } else {
+ status = -ENOMEM;
+ }
+ } else {
+ status = -ENOMEM;
+ }
+ /* Store the DRV Object in the Registry */
+ if (!status)
+ status = cfg_set_object((u32) pdrv_object, REG_DRV_OBJECT);
+ if (!status) {
+ *drv_obj = pdrv_object;
+ } else {
+ kfree(pdrv_object->dev_list);
+ kfree(pdrv_object->dev_node_string);
+ /* Free the DRV Object */
+ kfree(pdrv_object);
+ }
+
+ DBC_ENSURE(status || pdrv_object);
+ return status;
+}
+
+/*
+ * ======== drv_exit ========
+ * Purpose:
+ * Discontinue usage of the DRV module.
+ */
+void drv_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== = drv_destroy ======== =
+ * purpose:
+ * Invoked during bridge de-initialization
+ */
+int drv_destroy(struct drv_object *driver_obj)
+{
+ int status = 0;
+ struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(pdrv_object);
+
+ /*
+ * Delete the List if it exists.Should not come here
+ * as the drv_remove_dev_object and the Last drv_request_resources
+ * removes the list if the lists are empty.
+ */
+ kfree(pdrv_object->dev_list);
+ kfree(pdrv_object->dev_node_string);
+ kfree(pdrv_object);
+ /* Update the DRV Object in Registry to be 0 */
+ (void)cfg_set_object(0, REG_DRV_OBJECT);
+
+ return status;
+}
+
+/*
+ * ======== drv_get_dev_object ========
+ * Purpose:
+ * Given a index, returns a handle to DevObject from the list.
+ */
+int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
+ struct dev_object **device_obj)
+{
+ int status = 0;
+#ifdef CONFIG_TIDSPBRIDGE_DEBUG
+ /* used only for Assertions and debug messages */
+ struct drv_object *pdrv_obj = (struct drv_object *)hdrv_obj;
+#endif
+ struct dev_object *dev_obj;
+ u32 i;
+ DBC_REQUIRE(pdrv_obj);
+ DBC_REQUIRE(device_obj != NULL);
+ DBC_REQUIRE(index >= 0);
+ DBC_REQUIRE(refs > 0);
+ DBC_ASSERT(!(LST_IS_EMPTY(pdrv_obj->dev_list)));
+
+ dev_obj = (struct dev_object *)drv_get_first_dev_object();
+ for (i = 0; i < index; i++) {
+ dev_obj =
+ (struct dev_object *)drv_get_next_dev_object((u32) dev_obj);
+ }
+ if (dev_obj) {
+ *device_obj = (struct dev_object *)dev_obj;
+ } else {
+ *device_obj = NULL;
+ status = -EPERM;
+ }
+
+ return status;
+}
+
+/*
+ * ======== drv_get_first_dev_object ========
+ * Purpose:
+ * Retrieve the first Device Object handle from an internal linked list of
+ * of DEV_OBJECTs maintained by DRV.
+ */
+u32 drv_get_first_dev_object(void)
+{
+ u32 dw_dev_object = 0;
+ struct drv_object *pdrv_obj;
+
+ if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
+ if ((pdrv_obj->dev_list != NULL) &&
+ !LST_IS_EMPTY(pdrv_obj->dev_list))
+ dw_dev_object = (u32) lst_first(pdrv_obj->dev_list);
+ }
+
+ return dw_dev_object;
+}
+
+/*
+ * ======== DRV_GetFirstDevNodeString ========
+ * Purpose:
+ * Retrieve the first Device Extension from an internal linked list of
+ * of Pointer to dev_node Strings maintained by DRV.
+ */
+u32 drv_get_first_dev_extension(void)
+{
+ u32 dw_dev_extension = 0;
+ struct drv_object *pdrv_obj;
+
+ if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
+
+ if ((pdrv_obj->dev_node_string != NULL) &&
+ !LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
+ dw_dev_extension =
+ (u32) lst_first(pdrv_obj->dev_node_string);
+ }
+ }
+
+ return dw_dev_extension;
+}
+
+/*
+ * ======== drv_get_next_dev_object ========
+ * Purpose:
+ * Retrieve the next Device Object handle from an internal linked list of
+ * of DEV_OBJECTs maintained by DRV, after having previously called
+ * drv_get_first_dev_object() and zero or more DRV_GetNext.
+ */
+u32 drv_get_next_dev_object(u32 hdev_obj)
+{
+ u32 dw_next_dev_object = 0;
+ struct drv_object *pdrv_obj;
+
+ DBC_REQUIRE(hdev_obj != 0);
+
+ if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
+
+ if ((pdrv_obj->dev_list != NULL) &&
+ !LST_IS_EMPTY(pdrv_obj->dev_list)) {
+ dw_next_dev_object = (u32) lst_next(pdrv_obj->dev_list,
+ (struct list_head *)
+ hdev_obj);
+ }
+ }
+ return dw_next_dev_object;
+}
+
+/*
+ * ======== drv_get_next_dev_extension ========
+ * Purpose:
+ * Retrieve the next Device Extension from an internal linked list of
+ * of pointer to DevNodeString maintained by DRV, after having previously
+ * called drv_get_first_dev_extension() and zero or more
+ * drv_get_next_dev_extension().
+ */
+u32 drv_get_next_dev_extension(u32 dev_extension)
+{
+ u32 dw_dev_extension = 0;
+ struct drv_object *pdrv_obj;
+
+ DBC_REQUIRE(dev_extension != 0);
+
+ if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
+ if ((pdrv_obj->dev_node_string != NULL) &&
+ !LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
+ dw_dev_extension =
+ (u32) lst_next(pdrv_obj->dev_node_string,
+ (struct list_head *)dev_extension);
+ }
+ }
+
+ return dw_dev_extension;
+}
+
+/*
+ * ======== drv_init ========
+ * Purpose:
+ * Initialize DRV module private state.
+ */
+int drv_init(void)
+{
+ s32 ret = 1; /* function return value */
+
+ DBC_REQUIRE(refs >= 0);
+
+ if (ret)
+ refs++;
+
+ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+ return ret;
+}
+
+/*
+ * ======== drv_insert_dev_object ========
+ * Purpose:
+ * Insert a DevObject into the list of Manager object.
+ */
+int drv_insert_dev_object(struct drv_object *driver_obj,
+ struct dev_object *hdev_obj)
+{
+ struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hdev_obj != NULL);
+ DBC_REQUIRE(pdrv_object);
+ DBC_ASSERT(pdrv_object->dev_list);
+
+ lst_put_tail(pdrv_object->dev_list, (struct list_head *)hdev_obj);
+
+ DBC_ENSURE(!LST_IS_EMPTY(pdrv_object->dev_list));
+
+ return 0;
+}
+
+/*
+ * ======== drv_remove_dev_object ========
+ * Purpose:
+ * Search for and remove a DeviceObject from the given list of DRV
+ * objects.
+ */
+int drv_remove_dev_object(struct drv_object *driver_obj,
+ struct dev_object *hdev_obj)
+{
+ int status = -EPERM;
+ struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
+ struct list_head *cur_elem;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(pdrv_object);
+ DBC_REQUIRE(hdev_obj != NULL);
+
+ DBC_REQUIRE(pdrv_object->dev_list != NULL);
+ DBC_REQUIRE(!LST_IS_EMPTY(pdrv_object->dev_list));
+
+ /* Search list for p_proc_object: */
+ for (cur_elem = lst_first(pdrv_object->dev_list); cur_elem != NULL;
+ cur_elem = lst_next(pdrv_object->dev_list, cur_elem)) {
+ /* If found, remove it. */
+ if ((struct dev_object *)cur_elem == hdev_obj) {
+ lst_remove_elem(pdrv_object->dev_list, cur_elem);
+ status = 0;
+ break;
+ }
+ }
+ /* Remove list if empty. */
+ if (LST_IS_EMPTY(pdrv_object->dev_list)) {
+ kfree(pdrv_object->dev_list);
+ pdrv_object->dev_list = NULL;
+ }
+ DBC_ENSURE((pdrv_object->dev_list == NULL) ||
+ !LST_IS_EMPTY(pdrv_object->dev_list));
+
+ return status;
+}
+
+/*
+ * ======== drv_request_resources ========
+ * Purpose:
+ * Requests resources from the OS.
+ */
+int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
+{
+ int status = 0;
+ struct drv_object *pdrv_object;
+ struct drv_ext *pszdev_node;
+
+ DBC_REQUIRE(dw_context != 0);
+ DBC_REQUIRE(dev_node_strg != NULL);
+
+ /*
+ * Allocate memory to hold the string. This will live untill
+ * it is freed in the Release resources. Update the driver object
+ * list.
+ */
+
+ status = cfg_get_object((u32 *) &pdrv_object, REG_DRV_OBJECT);
+ if (!status) {
+ pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL);
+ if (pszdev_node) {
+ lst_init_elem(&pszdev_node->link);
+ strncpy(pszdev_node->sz_string,
+ (char *)dw_context, MAXREGPATHLENGTH - 1);
+ pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0';
+ /* Update the Driver Object List */
+ *dev_node_strg = (u32) pszdev_node->sz_string;
+ lst_put_tail(pdrv_object->dev_node_string,
+ (struct list_head *)pszdev_node);
+ } else {
+ status = -ENOMEM;
+ *dev_node_strg = 0;
+ }
+ } else {
+ dev_dbg(bridge, "%s: Failed to get Driver Object from Registry",
+ __func__);
+ *dev_node_strg = 0;
+ }
+
+ DBC_ENSURE((!status && dev_node_strg != NULL &&
+ !LST_IS_EMPTY(pdrv_object->dev_node_string)) ||
+ (status && *dev_node_strg == 0));
+
+ return status;
+}
+
+/*
+ * ======== drv_release_resources ========
+ * Purpose:
+ * Releases resources from the OS.
+ */
+int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj)
+{
+ int status = 0;
+ struct drv_object *pdrv_object = (struct drv_object *)hdrv_obj;
+ struct drv_ext *pszdev_node;
+
+ /*
+ * Irrespective of the status go ahead and clean it
+ * The following will over write the status.
+ */
+ for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension();
+ pszdev_node != NULL; pszdev_node = (struct drv_ext *)
+ drv_get_next_dev_extension((u32) pszdev_node)) {
+ if (!pdrv_object->dev_node_string) {
+ /* When this could happen? */
+ continue;
+ }
+ if ((u32) pszdev_node == dw_context) {
+ /* Found it */
+ /* Delete from the Driver object list */
+ lst_remove_elem(pdrv_object->dev_node_string,
+ (struct list_head *)pszdev_node);
+ kfree((void *)pszdev_node);
+ break;
+ }
+ /* Delete the List if it is empty */
+ if (LST_IS_EMPTY(pdrv_object->dev_node_string)) {
+ kfree(pdrv_object->dev_node_string);
+ pdrv_object->dev_node_string = NULL;
+ }
+ }
+ return status;
+}
+
+/*
+ * ======== request_bridge_resources ========
+ * Purpose:
+ * Reserves shared memory for bridge.
+ */
+static int request_bridge_resources(struct cfg_hostres *res)
+{
+ struct cfg_hostres *host_res = res;
+
+ /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
+ host_res->num_mem_windows = 2;
+
+ /* First window is for DSP internal memory */
+ host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
+ dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
+ dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
+ dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
+
+ /* for 24xx base port is not mapping the mamory for DSP
+ * internal memory TODO Do a ioremap here */
+ /* Second window is for DSP external memory shared with MPU */
+
+ /* These are hard-coded values */
+ host_res->birq_registers = 0;
+ host_res->birq_attrib = 0;
+ host_res->dw_offset_for_monitor = 0;
+ host_res->dw_chnl_offset = 0;
+ /* CHNL_MAXCHANNELS */
+ host_res->dw_num_chnls = CHNL_MAXCHANNELS;
+ host_res->dw_chnl_buf_size = 0x400;
+
+ return 0;
+}
+
+/*
+ * ======== drv_request_bridge_res_dsp ========
+ * Purpose:
+ * Reserves shared memory for bridge.
+ */
+int drv_request_bridge_res_dsp(void **phost_resources)
+{
+ int status = 0;
+ struct cfg_hostres *host_res;
+ u32 dw_buff_size;
+ u32 dma_addr;
+ u32 shm_size;
+ struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+ dw_buff_size = sizeof(struct cfg_hostres);
+
+ host_res = kzalloc(dw_buff_size, GFP_KERNEL);
+
+ if (host_res != NULL) {
+ request_bridge_resources(host_res);
+ /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
+ host_res->num_mem_windows = 4;
+
+ host_res->dw_mem_base[0] = 0;
+ host_res->dw_mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
+ OMAP_DSP_MEM1_SIZE);
+ host_res->dw_mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
+ OMAP_DSP_MEM2_SIZE);
+ host_res->dw_mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
+ OMAP_DSP_MEM3_SIZE);
+ host_res->dw_per_base = ioremap(OMAP_PER_CM_BASE,
+ OMAP_PER_CM_SIZE);
+ host_res->dw_per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
+ OMAP_PER_PRM_SIZE);
+ host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
+ OMAP_CORE_PRM_SIZE);
+ host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
+ OMAP_DMMU_SIZE);
+
+ dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
+ host_res->dw_mem_base[0]);
+ dev_dbg(bridge, "dw_mem_base[1] 0x%x\n",
+ host_res->dw_mem_base[1]);
+ dev_dbg(bridge, "dw_mem_base[2] 0x%x\n",
+ host_res->dw_mem_base[2]);
+ dev_dbg(bridge, "dw_mem_base[3] 0x%x\n",
+ host_res->dw_mem_base[3]);
+ dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
+ host_res->dw_mem_base[4]);
+ dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
+
+ shm_size = drv_datap->shm_size;
+ if (shm_size >= 0x10000) {
+ /* Allocate Physically contiguous,
+ * non-cacheable memory */
+ host_res->dw_mem_base[1] =
+ (u32) mem_alloc_phys_mem(shm_size, 0x100000,
+ &dma_addr);
+ if (host_res->dw_mem_base[1] == 0) {
+ status = -ENOMEM;
+ pr_err("shm reservation Failed\n");
+ } else {
+ host_res->dw_mem_length[1] = shm_size;
+ host_res->dw_mem_phys[1] = dma_addr;
+
+ dev_dbg(bridge, "%s: Bridge shm address 0x%x "
+ "dma_addr %x size %x\n", __func__,
+ host_res->dw_mem_base[1],
+ dma_addr, shm_size);
+ }
+ }
+ if (!status) {
+ /* These are hard-coded values */
+ host_res->birq_registers = 0;
+ host_res->birq_attrib = 0;
+ host_res->dw_offset_for_monitor = 0;
+ host_res->dw_chnl_offset = 0;
+ /* CHNL_MAXCHANNELS */
+ host_res->dw_num_chnls = CHNL_MAXCHANNELS;
+ host_res->dw_chnl_buf_size = 0x400;
+ dw_buff_size = sizeof(struct cfg_hostres);
+ }
+ *phost_resources = host_res;
+ }
+ /* End Mem alloc */
+ return status;
+}
+
+void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size)
+{
+ u32 pool_virt_base;
+
+ /* get the virtual address for the physical memory pool passed */
+ pool_virt_base = (u32) ioremap(pool_phys_base, pool_size);
+
+ if ((void **)pool_virt_base == NULL) {
+ pr_err("%s: external physical memory map failed\n", __func__);
+ ext_phys_mem_pool_enabled = false;
+ } else {
+ ext_mem_pool.phys_mem_base = pool_phys_base;
+ ext_mem_pool.phys_mem_size = pool_size;
+ ext_mem_pool.virt_mem_base = pool_virt_base;
+ ext_mem_pool.next_phys_alloc_ptr = pool_phys_base;
+ ext_phys_mem_pool_enabled = true;
+ }
+}
+
+void mem_ext_phys_pool_release(void)
+{
+ if (ext_phys_mem_pool_enabled) {
+ iounmap((void *)(ext_mem_pool.virt_mem_base));
+ ext_phys_mem_pool_enabled = false;
+ }
+}
+
+/*
+ * ======== mem_ext_phys_mem_alloc ========
+ * Purpose:
+ * Allocate physically contiguous, uncached memory from external memory pool
+ */
+
+static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr)
+{
+ u32 new_alloc_ptr;
+ u32 offset;
+ u32 virt_addr;
+
+ if (align == 0)
+ align = 1;
+
+ if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
+ - ext_mem_pool.next_phys_alloc_ptr)) {
+ phys_addr = NULL;
+ return NULL;
+ } else {
+ offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
+ if (offset == 0)
+ new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
+ else
+ new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
+ (align - offset);
+ if ((new_alloc_ptr + bytes) <=
+ (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
+ /* we can allocate */
+ *phys_addr = new_alloc_ptr;
+ ext_mem_pool.next_phys_alloc_ptr =
+ new_alloc_ptr + bytes;
+ virt_addr =
+ ext_mem_pool.virt_mem_base + (new_alloc_ptr -
+ ext_mem_pool.
+ phys_mem_base);
+ return (void *)virt_addr;
+ } else {
+ *phys_addr = 0;
+ return NULL;
+ }
+ }
+}
+
+/*
+ * ======== mem_alloc_phys_mem ========
+ * Purpose:
+ * Allocate physically contiguous, uncached memory
+ */
+void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
+ u32 *physical_address)
+{
+ void *va_mem = NULL;
+ dma_addr_t pa_mem;
+
+ if (byte_size > 0) {
+ if (ext_phys_mem_pool_enabled) {
+ va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask,
+ (u32 *) &pa_mem);
+ } else
+ va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem,
+ GFP_KERNEL);
+ if (va_mem == NULL)
+ *physical_address = 0;
+ else
+ *physical_address = pa_mem;
+ }
+ return va_mem;
+}
+
+/*
+ * ======== mem_free_phys_mem ========
+ * Purpose:
+ * Free the given block of physically contiguous memory.
+ */
+void mem_free_phys_mem(void *virtual_address, u32 physical_address,
+ u32 byte_size)
+{
+ DBC_REQUIRE(virtual_address != NULL);
+
+ if (!ext_phys_mem_pool_enabled)
+ dma_free_coherent(NULL, byte_size, virtual_address,
+ physical_address);
+}
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
new file mode 100644
index 00000000000..7ee89492a75
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -0,0 +1,656 @@
+/*
+ * drv_interface.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge driver interface.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* ----------------------------------- Host OS */
+
+#include <dspbridge/host_os.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/cdev.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/services.h>
+#include <dspbridge/clk.h>
+#include <dspbridge/sync.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dspapi-ioctl.h>
+#include <dspbridge/dspapi.h>
+#include <dspbridge/dspdrv.h>
+
+/* ----------------------------------- Resource Manager */
+#include <dspbridge/pwr.h>
+
+/* ----------------------------------- This */
+#include <drv_interface.h>
+
+#include <dspbridge/cfg.h>
+#include <dspbridge/resourcecleanup.h>
+#include <dspbridge/chnl.h>
+#include <dspbridge/proc.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/drvdefs.h>
+#include <dspbridge/drv.h>
+
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+#include <mach-omap2/omap3-opp.h>
+#endif
+
+#define BRIDGE_NAME "C6410"
+/* ----------------------------------- Globals */
+#define DRIVER_NAME "DspBridge"
+#define DSPBRIDGE_VERSION "0.3"
+s32 dsp_debug;
+
+struct platform_device *omap_dspbridge_dev;
+struct device *bridge;
+
+/* This is a test variable used by Bridge to test different sleep states */
+s32 dsp_test_sleepstate;
+
+static struct cdev bridge_cdev;
+
+static struct class *bridge_class;
+
+static u32 driver_context;
+static s32 driver_major;
+static char *base_img;
+char *iva_img;
+static s32 shm_size = 0x500000; /* 5 MB */
+static int tc_wordswapon; /* Default value is always false */
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+#define REC_TIMEOUT 5000 /*recovery timeout in msecs */
+static atomic_t bridge_cref; /* number of bridge open handles */
+static struct workqueue_struct *bridge_rec_queue;
+static struct work_struct bridge_recovery_work;
+static DECLARE_COMPLETION(bridge_comp);
+static DECLARE_COMPLETION(bridge_open_comp);
+static bool recover;
+#endif
+
+#ifdef CONFIG_PM
+struct omap34_xx_bridge_suspend_data {
+ int suspended;
+ wait_queue_head_t suspend_wq;
+};
+
+static struct omap34_xx_bridge_suspend_data bridge_suspend_data;
+
+static int omap34_xxbridge_suspend_lockout(struct omap34_xx_bridge_suspend_data
+ *s, struct file *f)
+{
+ if ((s)->suspended) {
+ if ((f)->f_flags & O_NONBLOCK)
+ return -EPERM;
+ wait_event_interruptible((s)->suspend_wq, (s)->suspended == 0);
+ }
+ return 0;
+}
+#endif
+
+module_param(dsp_debug, int, 0);
+MODULE_PARM_DESC(dsp_debug, "Wait after loading DSP image. default = false");
+
+module_param(dsp_test_sleepstate, int, 0);
+MODULE_PARM_DESC(dsp_test_sleepstate, "DSP Sleep state = 0");
+
+module_param(base_img, charp, 0);
+MODULE_PARM_DESC(base_img, "DSP base image, default = NULL");
+
+module_param(shm_size, int, 0);
+MODULE_PARM_DESC(shm_size, "shm size, default = 4 MB, minimum = 64 KB");
+
+module_param(tc_wordswapon, int, 0);
+MODULE_PARM_DESC(tc_wordswapon, "TC Word Swap Option. default = 0");
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DSPBRIDGE_VERSION);
+
+static char *driver_name = DRIVER_NAME;
+
+static const struct file_operations bridge_fops = {
+ .open = bridge_open,
+ .release = bridge_release,
+ .unlocked_ioctl = bridge_ioctl,
+ .mmap = bridge_mmap,
+};
+
+#ifdef CONFIG_PM
+static u32 time_out = 1000;
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+s32 dsp_max_opps = VDD1_OPP5;
+#endif
+
+/* Maximum Opps that can be requested by IVA */
+/*vdd1 rate table */
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+const struct omap_opp vdd1_rate_table_bridge[] = {
+ {0, 0, 0},
+ /*OPP1 */
+ {S125M, VDD1_OPP1, 0},
+ /*OPP2 */
+ {S250M, VDD1_OPP2, 0},
+ /*OPP3 */
+ {S500M, VDD1_OPP3, 0},
+ /*OPP4 */
+ {S550M, VDD1_OPP4, 0},
+ /*OPP5 */
+ {S600M, VDD1_OPP5, 0},
+};
+#endif
+#endif
+
+struct dspbridge_platform_data *omap_dspbridge_pdata;
+
+u32 vdd1_dsp_freq[6][4] = {
+ {0, 0, 0, 0},
+ /*OPP1 */
+ {0, 90000, 0, 86000},
+ /*OPP2 */
+ {0, 180000, 80000, 170000},
+ /*OPP3 */
+ {0, 360000, 160000, 340000},
+ /*OPP4 */
+ {0, 396000, 325000, 376000},
+ /*OPP5 */
+ {0, 430000, 355000, 430000},
+};
+
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+static void bridge_recover(struct work_struct *work)
+{
+ struct dev_object *dev;
+ struct cfg_devnode *dev_node;
+ if (atomic_read(&bridge_cref)) {
+ INIT_COMPLETION(bridge_comp);
+ while (!wait_for_completion_timeout(&bridge_comp,
+ msecs_to_jiffies(REC_TIMEOUT)))
+ pr_info("%s:%d handle(s) still opened\n",
+ __func__, atomic_read(&bridge_cref));
+ }
+ dev = dev_get_first();
+ dev_get_dev_node(dev, &dev_node);
+ if (!dev_node || proc_auto_start(dev_node, dev))
+ pr_err("DSP could not be restarted\n");
+ recover = false;
+ complete_all(&bridge_open_comp);
+}
+
+void bridge_recover_schedule(void)
+{
+ INIT_COMPLETION(bridge_open_comp);
+ recover = true;
+ queue_work(bridge_rec_queue, &bridge_recovery_work);
+}
+#endif
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+static int dspbridge_scale_notification(struct notifier_block *op,
+ unsigned long val, void *ptr)
+{
+ struct dspbridge_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+
+ if (CPUFREQ_POSTCHANGE == val && pdata->dsp_get_opp)
+ pwr_pm_post_scale(PRCM_VDD1, pdata->dsp_get_opp());
+
+ return 0;
+}
+
+static struct notifier_block iva_clk_notifier = {
+ .notifier_call = dspbridge_scale_notification,
+ NULL,
+};
+#endif
+
+/**
+ * omap3_bridge_startup() - perform low lever initializations
+ * @pdev: pointer to platform device
+ *
+ * Initializes recovery, PM and DVFS required data, before calling
+ * clk and memory init routines.
+ */
+static int omap3_bridge_startup(struct platform_device *pdev)
+{
+ struct dspbridge_platform_data *pdata = pdev->dev.platform_data;
+ struct drv_data *drv_datap = NULL;
+ u32 phys_membase, phys_memsize;
+ int err;
+
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ bridge_rec_queue = create_workqueue("bridge_rec_queue");
+ INIT_WORK(&bridge_recovery_work, bridge_recover);
+ INIT_COMPLETION(bridge_comp);
+#endif
+
+#ifdef CONFIG_PM
+ /* Initialize the wait queue */
+ bridge_suspend_data.suspended = 0;
+ init_waitqueue_head(&bridge_suspend_data.suspend_wq);
+
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ for (i = 0; i < 6; i++)
+ pdata->mpu_speed[i] = vdd1_rate_table_bridge[i].rate;
+
+ err = cpufreq_register_notifier(&iva_clk_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (err)
+ pr_err("%s: clk_notifier_register failed for iva2_ck\n",
+ __func__);
+#endif
+#endif
+
+ dsp_clk_init();
+ services_init();
+
+ drv_datap = kzalloc(sizeof(struct drv_data), GFP_KERNEL);
+ if (!drv_datap) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ drv_datap->shm_size = shm_size;
+ drv_datap->tc_wordswapon = tc_wordswapon;
+
+ if (base_img) {
+ drv_datap->base_img = kmalloc(strlen(base_img) + 1, GFP_KERNEL);
+ if (!drv_datap->base_img) {
+ err = -ENOMEM;
+ goto err2;
+ }
+ strncpy(drv_datap->base_img, base_img, strlen(base_img) + 1);
+ }
+
+ dev_set_drvdata(bridge, drv_datap);
+
+ if (shm_size < 0x10000) { /* 64 KB */
+ err = -EINVAL;
+ pr_err("%s: shm size must be at least 64 KB\n", __func__);
+ goto err3;
+ }
+ dev_dbg(bridge, "%s: requested shm_size = 0x%x\n", __func__, shm_size);
+
+ phys_membase = pdata->phys_mempool_base;
+ phys_memsize = pdata->phys_mempool_size;
+ if (phys_membase > 0 && phys_memsize > 0)
+ mem_ext_phys_pool_init(phys_membase, phys_memsize);
+
+ if (tc_wordswapon)
+ dev_dbg(bridge, "%s: TC Word Swap is enabled\n", __func__);
+
+ driver_context = dsp_init(&err);
+ if (err) {
+ pr_err("DSP Bridge driver initialization failed\n");
+ goto err4;
+ }
+
+ return 0;
+
+err4:
+ mem_ext_phys_pool_release();
+err3:
+ kfree(drv_datap->base_img);
+err2:
+ kfree(drv_datap);
+err1:
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ cpufreq_unregister_notifier(&iva_clk_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+ dsp_clk_exit();
+ services_exit();
+
+ return err;
+}
+
+static int __devinit omap34_xx_bridge_probe(struct platform_device *pdev)
+{
+ int err;
+ dev_t dev = 0;
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ int i = 0;
+#endif
+
+ omap_dspbridge_dev = pdev;
+
+ /* Global bridge device */
+ bridge = &omap_dspbridge_dev->dev;
+
+ /* Bridge low level initializations */
+ err = omap3_bridge_startup(pdev);
+ if (err)
+ goto err1;
+
+ /* use 2.6 device model */
+ err = alloc_chrdev_region(&dev, 0, 1, driver_name);
+ if (err) {
+ pr_err("%s: Can't get major %d\n", __func__, driver_major);
+ goto err1;
+ }
+
+ cdev_init(&bridge_cdev, &bridge_fops);
+ bridge_cdev.owner = THIS_MODULE;
+
+ err = cdev_add(&bridge_cdev, dev, 1);
+ if (err) {
+ pr_err("%s: Failed to add bridge device\n", __func__);
+ goto err2;
+ }
+
+ /* udev support */
+ bridge_class = class_create(THIS_MODULE, "ti_bridge");
+ if (IS_ERR(bridge_class)) {
+ pr_err("%s: Error creating bridge class\n", __func__);
+ goto err3;
+ }
+
+ driver_major = MAJOR(dev);
+ device_create(bridge_class, NULL, MKDEV(driver_major, 0),
+ NULL, "DspBridge");
+ pr_info("DSP Bridge driver loaded\n");
+
+ return 0;
+
+err3:
+ cdev_del(&bridge_cdev);
+err2:
+ unregister_chrdev_region(dev, 1);
+err1:
+ return err;
+}
+
+static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev)
+{
+ dev_t devno;
+ bool ret;
+ int status = 0;
+ void *hdrv_obj = NULL;
+
+ status = cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT);
+ if (status)
+ goto func_cont;
+
+#ifdef CONFIG_TIDSPBRIDGE_DVFS
+ if (cpufreq_unregister_notifier(&iva_clk_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER))
+ pr_err("%s: cpufreq_unregister_notifier failed for iva2_ck\n",
+ __func__);
+#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
+
+ if (driver_context) {
+ /* Put the DSP in reset state */
+ ret = dsp_deinit(driver_context);
+ driver_context = 0;
+ DBC_ASSERT(ret == true);
+ }
+
+func_cont:
+ mem_ext_phys_pool_release();
+
+ dsp_clk_exit();
+ services_exit();
+
+ devno = MKDEV(driver_major, 0);
+ cdev_del(&bridge_cdev);
+ unregister_chrdev_region(devno, 1);
+ if (bridge_class) {
+ /* remove the device from sysfs */
+ device_destroy(bridge_class, MKDEV(driver_major, 0));
+ class_destroy(bridge_class);
+
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int BRIDGE_SUSPEND(struct platform_device *pdev, pm_message_t state)
+{
+ u32 status;
+ u32 command = PWR_EMERGENCYDEEPSLEEP;
+
+ status = pwr_sleep_dsp(command, time_out);
+ if (status)
+ return -1;
+
+ bridge_suspend_data.suspended = 1;
+ return 0;
+}
+
+static int BRIDGE_RESUME(struct platform_device *pdev)
+{
+ u32 status;
+
+ status = pwr_wake_dsp(time_out);
+ if (status)
+ return -1;
+
+ bridge_suspend_data.suspended = 0;
+ wake_up(&bridge_suspend_data.suspend_wq);
+ return 0;
+}
+#else
+#define BRIDGE_SUSPEND NULL
+#define BRIDGE_RESUME NULL
+#endif
+
+static struct platform_driver bridge_driver = {
+ .driver = {
+ .name = BRIDGE_NAME,
+ },
+ .probe = omap34_xx_bridge_probe,
+ .remove = __devexit_p(omap34_xx_bridge_remove),
+ .suspend = BRIDGE_SUSPEND,
+ .resume = BRIDGE_RESUME,
+};
+
+static int __init bridge_init(void)
+{
+ return platform_driver_register(&bridge_driver);
+}
+
+static void __exit bridge_exit(void)
+{
+ platform_driver_unregister(&bridge_driver);
+}
+
+/*
+ * This function is called when an application opens handle to the
+ * bridge driver.
+ */
+static int bridge_open(struct inode *ip, struct file *filp)
+{
+ int status = 0;
+ struct process_context *pr_ctxt = NULL;
+
+ /*
+ * Allocate a new process context and insert it into global
+ * process context list.
+ */
+
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (recover) {
+ if (filp->f_flags & O_NONBLOCK ||
+ wait_for_completion_interruptible(&bridge_open_comp))
+ return -EBUSY;
+ }
+#endif
+ pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL);
+ if (pr_ctxt) {
+ pr_ctxt->res_state = PROC_RES_ALLOCATED;
+ spin_lock_init(&pr_ctxt->dmm_map_lock);
+ INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
+ spin_lock_init(&pr_ctxt->dmm_rsv_lock);
+ INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
+
+ pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
+ if (pr_ctxt->node_id) {
+ idr_init(pr_ctxt->node_id);
+ } else {
+ status = -ENOMEM;
+ goto err;
+ }
+
+ pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
+ if (pr_ctxt->stream_id)
+ idr_init(pr_ctxt->stream_id);
+ else
+ status = -ENOMEM;
+ } else {
+ status = -ENOMEM;
+ }
+err:
+ filp->private_data = pr_ctxt;
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (!status)
+ atomic_inc(&bridge_cref);
+#endif
+ return status;
+}
+
+/*
+ * This function is called when an application closes handle to the bridge
+ * driver.
+ */
+static int bridge_release(struct inode *ip, struct file *filp)
+{
+ int status = 0;
+ struct process_context *pr_ctxt;
+
+ if (!filp->private_data) {
+ status = -EIO;
+ goto err;
+ }
+
+ pr_ctxt = filp->private_data;
+ flush_signals(current);
+ drv_remove_all_resources(pr_ctxt);
+ proc_detach(pr_ctxt);
+ kfree(pr_ctxt);
+
+ filp->private_data = NULL;
+
+err:
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (!atomic_dec_return(&bridge_cref))
+ complete(&bridge_comp);
+#endif
+ return status;
+}
+
+/* This function provides IO interface to the bridge driver. */
+static long bridge_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ int status;
+ u32 retval = 0;
+ union trapped_args buf_in;
+
+ DBC_REQUIRE(filp != NULL);
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (recover) {
+ status = -EIO;
+ goto err;
+ }
+#endif
+#ifdef CONFIG_PM
+ status = omap34_xxbridge_suspend_lockout(&bridge_suspend_data, filp);
+ if (status != 0)
+ return status;
+#endif
+
+ if (!filp->private_data) {
+ status = -EIO;
+ goto err;
+ }
+
+ status = copy_from_user(&buf_in, (union trapped_args *)args,
+ sizeof(union trapped_args));
+
+ if (!status) {
+ status = api_call_dev_ioctl(code, &buf_in, &retval,
+ filp->private_data);
+
+ if (!status) {
+ status = retval;
+ } else {
+ dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x "
+ "status 0x%x\n", __func__, code, status);
+ status = -1;
+ }
+
+ }
+
+err:
+ return status;
+}
+
+/* This function maps kernel space memory to user space memory. */
+static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ u32 offset = vma->vm_pgoff << PAGE_SHIFT;
+ u32 status;
+
+ DBC_ASSERT(vma->vm_start < vma->vm_end);
+
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ dev_dbg(bridge, "%s: vm filp %p offset %x start %lx end %lx page_prot "
+ "%lx flags %lx\n", __func__, filp, offset,
+ vma->vm_start, vma->vm_end, vma->vm_page_prot, vma->vm_flags);
+
+ status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ if (status != 0)
+ status = -EAGAIN;
+
+ return status;
+}
+
+/* To remove all process resources before removing the process from the
+ * process context list */
+int drv_remove_all_resources(void *process_ctxt)
+{
+ int status = 0;
+ struct process_context *ctxt = (struct process_context *)process_ctxt;
+ drv_remove_all_strm_res_elements(ctxt);
+ drv_remove_all_node_res_elements(ctxt);
+ drv_remove_all_dmm_res_elements(ctxt);
+ ctxt->res_state = PROC_RES_FREED;
+ return status;
+}
+
+/* Bridge driver initialization and de-initialization functions */
+module_init(bridge_init);
+module_exit(bridge_exit);
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.h b/drivers/staging/tidspbridge/rmgr/drv_interface.h
new file mode 100644
index 00000000000..ab070602adc
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.h
@@ -0,0 +1,28 @@
+/*
+ * drv_interface.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _DRV_INTERFACE_H_
+#define _DRV_INTERFACE_H_
+
+/* Prototypes for all functions in this bridge */
+static int __init bridge_init(void); /* Initialize bridge */
+static void __exit bridge_exit(void); /* Opposite of initialize */
+static int bridge_open(struct inode *ip, struct file *filp); /* Open */
+static int bridge_release(struct inode *ip, struct file *filp); /* Release */
+static long bridge_ioctl(struct file *filp, unsigned int code,
+ unsigned long args);
+static int bridge_mmap(struct file *filp, struct vm_area_struct *vma);
+#endif /* ifndef _DRV_INTERFACE_H_ */
diff --git a/drivers/staging/tidspbridge/rmgr/dspdrv.c b/drivers/staging/tidspbridge/rmgr/dspdrv.c
new file mode 100644
index 00000000000..714f348f526
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/dspdrv.c
@@ -0,0 +1,142 @@
+/*
+ * dspdrv.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Interface to allocate and free bridge resources.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* ----------------------------------- Host OS */
+#include <linux/types.h>
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/drv.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/dspapi.h>
+
+/* ----------------------------------- Resource Manager */
+#include <dspbridge/mgr.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/dspdrv.h>
+
+/*
+ * ======== dsp_init ========
+ * Allocates bridge resources. Loads a base image onto DSP, if specified.
+ */
+u32 dsp_init(u32 *init_status)
+{
+ char dev_node[MAXREGPATHLENGTH] = "TIOMAP1510";
+ int status = -EPERM;
+ struct drv_object *drv_obj = NULL;
+ u32 device_node;
+ u32 device_node_string;
+
+ if (!api_init())
+ goto func_cont;
+
+ status = drv_create(&drv_obj);
+ if (status) {
+ api_exit();
+ goto func_cont;
+ }
+
+ /* End drv_create */
+ /* Request Resources */
+ status = drv_request_resources((u32) &dev_node, &device_node_string);
+ if (!status) {
+ /* Attempt to Start the Device */
+ status = dev_start_device((struct cfg_devnode *)
+ device_node_string);
+ if (status)
+ (void)drv_release_resources
+ ((u32) device_node_string, drv_obj);
+ } else {
+ dev_dbg(bridge, "%s: drv_request_resources Failed\n", __func__);
+ status = -EPERM;
+ }
+
+ /* Unwind whatever was loaded */
+ if (status) {
+ /* irrespective of the status of dev_remove_device we conitinue
+ * unloading. Get the Driver Object iterate through and remove.
+ * Reset the status to E_FAIL to avoid going through
+ * api_init_complete2. */
+ for (device_node = drv_get_first_dev_extension();
+ device_node != 0;
+ device_node = drv_get_next_dev_extension(device_node)) {
+ (void)dev_remove_device((struct cfg_devnode *)
+ device_node);
+ (void)drv_release_resources((u32) device_node, drv_obj);
+ }
+ /* Remove the Driver Object */
+ (void)drv_destroy(drv_obj);
+ drv_obj = NULL;
+ api_exit();
+ dev_dbg(bridge, "%s: Logical device failed init\n", __func__);
+ } /* Unwinding the loaded drivers */
+func_cont:
+ /* Attempt to Start the Board */
+ if (!status) {
+ /* BRD_AutoStart could fail if the dsp execuetable is not the
+ * correct one. We should not propagate that error
+ * into the device loader. */
+ (void)api_init_complete2();
+ } else {
+ dev_dbg(bridge, "%s: Failed\n", __func__);
+ } /* End api_init_complete2 */
+ DBC_ENSURE((!status && drv_obj != NULL) ||
+ (status && drv_obj == NULL));
+ *init_status = status;
+ /* Return the Driver Object */
+ return (u32) drv_obj;
+}
+
+/*
+ * ======== dsp_deinit ========
+ * Frees the resources allocated for bridge.
+ */
+bool dsp_deinit(u32 device_context)
+{
+ bool ret = true;
+ u32 device_node;
+ struct mgr_object *mgr_obj = NULL;
+
+ while ((device_node = drv_get_first_dev_extension()) != 0) {
+ (void)dev_remove_device((struct cfg_devnode *)device_node);
+
+ (void)drv_release_resources((u32) device_node,
+ (struct drv_object *)device_context);
+ }
+
+ (void)drv_destroy((struct drv_object *)device_context);
+
+ /* Get the Manager Object from Registry
+ * MGR Destroy will unload the DCD dll */
+ if (!cfg_get_object((u32 *) &mgr_obj, REG_MGR_OBJECT))
+ (void)mgr_destroy(mgr_obj);
+
+ api_exit();
+
+ return ret;
+}
diff --git a/drivers/staging/tidspbridge/rmgr/mgr.c b/drivers/staging/tidspbridge/rmgr/mgr.c
new file mode 100644
index 00000000000..57a39b9c274
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/mgr.c
@@ -0,0 +1,375 @@
+/*
+ * mgr.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implementation of Manager interface to the device object at the
+ * driver level. This queries the NDB data base and retrieves the
+ * data about Node and Processor.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/sync.h>
+
+/* ----------------------------------- Others */
+#include <dspbridge/dbdcd.h>
+#include <dspbridge/drv.h>
+#include <dspbridge/dev.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/mgr.h>
+
+/* ----------------------------------- Defines, Data Structures, Typedefs */
+#define ZLDLLNAME ""
+
+struct mgr_object {
+ struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
+};
+
+/* ----------------------------------- Globals */
+static u32 refs;
+
+/*
+ * ========= mgr_create =========
+ * Purpose:
+ * MGR Object gets created only once during driver Loading.
+ */
+int mgr_create(struct mgr_object **mgr_obj,
+ struct cfg_devnode *dev_node_obj)
+{
+ int status = 0;
+ struct mgr_object *pmgr_obj = NULL;
+
+ DBC_REQUIRE(mgr_obj != NULL);
+ DBC_REQUIRE(refs > 0);
+
+ pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL);
+ if (pmgr_obj) {
+ status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->hdcd_mgr);
+ if (!status) {
+ /* If succeeded store the handle in the MGR Object */
+ status = cfg_set_object((u32) pmgr_obj, REG_MGR_OBJECT);
+ if (!status) {
+ *mgr_obj = pmgr_obj;
+ } else {
+ dcd_destroy_manager(pmgr_obj->hdcd_mgr);
+ kfree(pmgr_obj);
+ }
+ } else {
+ /* failed to Create DCD Manager */
+ kfree(pmgr_obj);
+ }
+ } else {
+ status = -ENOMEM;
+ }
+
+ DBC_ENSURE(status || pmgr_obj);
+ return status;
+}
+
+/*
+ * ========= mgr_destroy =========
+ * This function is invoked during bridge driver unloading.Frees MGR object.
+ */
+int mgr_destroy(struct mgr_object *hmgr_obj)
+{
+ int status = 0;
+ struct mgr_object *pmgr_obj = (struct mgr_object *)hmgr_obj;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hmgr_obj);
+
+ /* Free resources */
+ if (hmgr_obj->hdcd_mgr)
+ dcd_destroy_manager(hmgr_obj->hdcd_mgr);
+
+ kfree(pmgr_obj);
+ /* Update the Registry with NULL for MGR Object */
+ (void)cfg_set_object(0, REG_MGR_OBJECT);
+
+ return status;
+}
+
+/*
+ * ======== mgr_enum_node_info ========
+ * Enumerate and get configuration information about nodes configured
+ * in the node database.
+ */
+int mgr_enum_node_info(u32 node_id, struct dsp_ndbprops *pndb_props,
+ u32 undb_props_size, u32 *pu_num_nodes)
+{
+ int status = 0;
+ struct dsp_uuid node_uuid, temp_uuid;
+ u32 temp_index = 0;
+ u32 node_index = 0;
+ struct dcd_genericobj gen_obj;
+ struct mgr_object *pmgr_obj = NULL;
+
+ DBC_REQUIRE(pndb_props != NULL);
+ DBC_REQUIRE(pu_num_nodes != NULL);
+ DBC_REQUIRE(undb_props_size >= sizeof(struct dsp_ndbprops));
+ DBC_REQUIRE(refs > 0);
+
+ *pu_num_nodes = 0;
+ /* Get The Manager Object from the Registry */
+ status = cfg_get_object((u32 *) &pmgr_obj, REG_MGR_OBJECT);
+ if (status)
+ goto func_cont;
+
+ DBC_ASSERT(pmgr_obj);
+ /* Forever loop till we hit failed or no more items in the
+ * Enumeration. We will exit the loop other than 0; */
+ while (status == 0) {
+ status = dcd_enumerate_object(temp_index++, DSP_DCDNODETYPE,
+ &temp_uuid);
+ if (status == 0) {
+ node_index++;
+ if (node_id == (node_index - 1))
+ node_uuid = temp_uuid;
+
+ }
+ }
+ if (!status) {
+ if (node_id > (node_index - 1)) {
+ status = -EINVAL;
+ } else {
+ status = dcd_get_object_def(pmgr_obj->hdcd_mgr,
+ (struct dsp_uuid *)
+ &node_uuid, DSP_DCDNODETYPE,
+ &gen_obj);
+ if (!status) {
+ /* Get the Obj def */
+ *pndb_props =
+ gen_obj.obj_data.node_obj.ndb_props;
+ *pu_num_nodes = node_index;
+ }
+ }
+ }
+
+func_cont:
+ DBC_ENSURE((!status && *pu_num_nodes > 0) ||
+ (status && *pu_num_nodes == 0));
+
+ return status;
+}
+
+/*
+ * ======== mgr_enum_processor_info ========
+ * Enumerate and get configuration information about available
+ * DSP processors.
+ */
+int mgr_enum_processor_info(u32 processor_id,
+ struct dsp_processorinfo *
+ processor_info, u32 processor_info_size,
+ u8 *pu_num_procs)
+{
+ int status = 0;
+ int status1 = 0;
+ int status2 = 0;
+ struct dsp_uuid temp_uuid;
+ u32 temp_index = 0;
+ u32 proc_index = 0;
+ struct dcd_genericobj gen_obj;
+ struct mgr_object *pmgr_obj = NULL;
+ struct mgr_processorextinfo *ext_info;
+ struct dev_object *hdev_obj;
+ struct drv_object *hdrv_obj;
+ u8 dev_type;
+ struct cfg_devnode *dev_node;
+ bool proc_detect = false;
+
+ DBC_REQUIRE(processor_info != NULL);
+ DBC_REQUIRE(pu_num_procs != NULL);
+ DBC_REQUIRE(processor_info_size >= sizeof(struct dsp_processorinfo));
+ DBC_REQUIRE(refs > 0);
+
+ *pu_num_procs = 0;
+ status = cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT);
+ if (!status) {
+ status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
+ if (!status) {
+ status = dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
+ status = dev_get_dev_node(hdev_obj, &dev_node);
+ if (dev_type != DSP_UNIT)
+ status = -EPERM;
+
+ if (!status)
+ processor_info->processor_type = DSPTYPE64;
+ }
+ }
+ if (status)
+ goto func_end;
+
+ /* Get The Manager Object from the Registry */
+ if (cfg_get_object((u32 *) &pmgr_obj, REG_MGR_OBJECT)) {
+ dev_dbg(bridge, "%s: Failed to get MGR Object\n", __func__);
+ goto func_end;
+ }
+ DBC_ASSERT(pmgr_obj);
+ /* Forever loop till we hit no more items in the
+ * Enumeration. We will exit the loop other than 0; */
+ while (status1 == 0) {
+ status1 = dcd_enumerate_object(temp_index++,
+ DSP_DCDPROCESSORTYPE,
+ &temp_uuid);
+ if (status1 != 0)
+ break;
+
+ proc_index++;
+ /* Get the Object properties to find the Device/Processor
+ * Type */
+ if (proc_detect != false)
+ continue;
+
+ status2 = dcd_get_object_def(pmgr_obj->hdcd_mgr,
+ (struct dsp_uuid *)&temp_uuid,
+ DSP_DCDPROCESSORTYPE, &gen_obj);
+ if (!status2) {
+ /* Get the Obj def */
+ if (processor_info_size <
+ sizeof(struct mgr_processorextinfo)) {
+ *processor_info = gen_obj.obj_data.proc_info;
+ } else {
+ /* extended info */
+ ext_info = (struct mgr_processorextinfo *)
+ processor_info;
+ *ext_info = gen_obj.obj_data.ext_proc_obj;
+ }
+ dev_dbg(bridge, "%s: Got proctype from DCD %x\n",
+ __func__, processor_info->processor_type);
+ /* See if we got the needed processor */
+ if (dev_type == DSP_UNIT) {
+ if (processor_info->processor_type ==
+ DSPPROCTYPE_C64)
+ proc_detect = true;
+ } else if (dev_type == IVA_UNIT) {
+ if (processor_info->processor_type ==
+ IVAPROCTYPE_ARM7)
+ proc_detect = true;
+ }
+ /* User applciatiuons aonly check for chip type, so
+ * this clumsy overwrite */
+ processor_info->processor_type = DSPTYPE64;
+ } else {
+ dev_dbg(bridge, "%s: Failed to get DCD processor info "
+ "%x\n", __func__, status2);
+ status = -EPERM;
+ }
+ }
+ *pu_num_procs = proc_index;
+ if (proc_detect == false) {
+ dev_dbg(bridge, "%s: Failed to get proc info from DCD, so use "
+ "CFG registry\n", __func__);
+ processor_info->processor_type = DSPTYPE64;
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== mgr_exit ========
+ * Decrement reference count, and free resources when reference count is
+ * 0.
+ */
+void mgr_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+ refs--;
+ if (refs == 0)
+ dcd_exit();
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== mgr_get_dcd_handle ========
+ * Retrieves the MGR handle. Accessor Function.
+ */
+int mgr_get_dcd_handle(struct mgr_object *mgr_handle,
+ u32 *dcd_handle)
+{
+ int status = -EPERM;
+ struct mgr_object *pmgr_obj = (struct mgr_object *)mgr_handle;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(dcd_handle != NULL);
+
+ *dcd_handle = (u32) NULL;
+ if (pmgr_obj) {
+ *dcd_handle = (u32) pmgr_obj->hdcd_mgr;
+ status = 0;
+ }
+ DBC_ENSURE((!status && *dcd_handle != (u32) NULL) ||
+ (status && *dcd_handle == (u32) NULL));
+
+ return status;
+}
+
+/*
+ * ======== mgr_init ========
+ * Initialize MGR's private state, keeping a reference count on each call.
+ */
+bool mgr_init(void)
+{
+ bool ret = true;
+ bool init_dcd = false;
+
+ DBC_REQUIRE(refs >= 0);
+
+ if (refs == 0) {
+ init_dcd = dcd_init(); /* DCD Module */
+
+ if (!init_dcd)
+ ret = false;
+ }
+
+ if (ret)
+ refs++;
+
+ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+ return ret;
+}
+
+/*
+ * ======== mgr_wait_for_bridge_events ========
+ * Block on any Bridge event(s)
+ */
+int mgr_wait_for_bridge_events(struct dsp_notification **anotifications,
+ u32 count, u32 *pu_index,
+ u32 utimeout)
+{
+ int status;
+ struct sync_object *sync_events[MAX_EVENTS];
+ u32 i;
+
+ DBC_REQUIRE(count < MAX_EVENTS);
+
+ for (i = 0; i < count; i++)
+ sync_events[i] = anotifications[i]->handle;
+
+ status = sync_wait_on_multiple_events(sync_events, count, utimeout,
+ pu_index);
+
+ return status;
+
+}
diff --git a/drivers/staging/tidspbridge/rmgr/nldr.c b/drivers/staging/tidspbridge/rmgr/nldr.c
new file mode 100644
index 00000000000..d8f4eebf742
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/nldr.c
@@ -0,0 +1,1974 @@
+/*
+ * nldr.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge dynamic + overlay Node loader.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+
+#include <dspbridge/host_os.h>
+
+#include <dspbridge/dbdefs.h>
+
+#include <dspbridge/dbc.h>
+
+/* Platform manager */
+#include <dspbridge/cod.h>
+#include <dspbridge/dev.h>
+
+/* Resource manager */
+#include <dspbridge/dbll.h>
+#include <dspbridge/dbdcd.h>
+#include <dspbridge/rmm.h>
+#include <dspbridge/uuidutil.h>
+
+#include <dspbridge/nldr.h>
+#include <linux/gcd.h>
+
+/* Name of section containing dynamic load mem */
+#define DYNMEMSECT ".dspbridge_mem"
+
+/* Name of section containing dependent library information */
+#define DEPLIBSECT ".dspbridge_deplibs"
+
+/* Max depth of recursion for loading node's dependent libraries */
+#define MAXDEPTH 5
+
+/* Max number of persistent libraries kept by a node */
+#define MAXLIBS 5
+
+/*
+ * Defines for extracting packed dynamic load memory requirements from two
+ * masks.
+ * These defines must match node.cdb and dynm.cdb
+ * Format of data/code mask is:
+ * uuuuuuuu|fueeeeee|fudddddd|fucccccc|
+ * where
+ * u = unused
+ * cccccc = prefered/required dynamic mem segid for create phase data/code
+ * dddddd = prefered/required dynamic mem segid for delete phase data/code
+ * eeeeee = prefered/req. dynamic mem segid for execute phase data/code
+ * f = flag indicating if memory is preferred or required:
+ * f = 1 if required, f = 0 if preferred.
+ *
+ * The 6 bits of the segid are interpreted as follows:
+ *
+ * If the 6th bit (bit 5) is not set, then this specifies a memory segment
+ * between 0 and 31 (a maximum of 32 dynamic loading memory segments).
+ * If the 6th bit (bit 5) is set, segid has the following interpretation:
+ * segid = 32 - Any internal memory segment can be used.
+ * segid = 33 - Any external memory segment can be used.
+ * segid = 63 - Any memory segment can be used (in this case the
+ * required/preferred flag is irrelevant).
+ *
+ */
+/* Maximum allowed dynamic loading memory segments */
+#define MAXMEMSEGS 32
+
+#define MAXSEGID 3 /* Largest possible (real) segid */
+#define MEMINTERNALID 32 /* Segid meaning use internal mem */
+#define MEMEXTERNALID 33 /* Segid meaning use external mem */
+#define NULLID 63 /* Segid meaning no memory req/pref */
+#define FLAGBIT 7 /* 7th bit is pref./req. flag */
+#define SEGMASK 0x3f /* Bits 0 - 5 */
+
+#define CREATEBIT 0 /* Create segid starts at bit 0 */
+#define DELETEBIT 8 /* Delete segid starts at bit 8 */
+#define EXECUTEBIT 16 /* Execute segid starts at bit 16 */
+
+/*
+ * Masks that define memory type. Must match defines in dynm.cdb.
+ */
+#define DYNM_CODE 0x2
+#define DYNM_DATA 0x4
+#define DYNM_CODEDATA (DYNM_CODE | DYNM_DATA)
+#define DYNM_INTERNAL 0x8
+#define DYNM_EXTERNAL 0x10
+
+/*
+ * Defines for packing memory requirement/preference flags for code and
+ * data of each of the node's phases into one mask.
+ * The bit is set if the segid is required for loading code/data of the
+ * given phase. The bit is not set, if the segid is preferred only.
+ *
+ * These defines are also used as indeces into a segid array for the node.
+ * eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the
+ * create phase data is required or preferred to be loaded into.
+ */
+#define CREATEDATAFLAGBIT 0
+#define CREATECODEFLAGBIT 1
+#define EXECUTEDATAFLAGBIT 2
+#define EXECUTECODEFLAGBIT 3
+#define DELETEDATAFLAGBIT 4
+#define DELETECODEFLAGBIT 5
+#define MAXFLAGS 6
+
+ /*
+ * These names may be embedded in overlay sections to identify which
+ * node phase the section should be overlayed.
+ */
+#define PCREATE "create"
+#define PDELETE "delete"
+#define PEXECUTE "execute"
+
+static inline bool is_equal_uuid(struct dsp_uuid *uuid1,
+ struct dsp_uuid *uuid2)
+{
+ return !memcmp(uuid1, uuid2, sizeof(struct dsp_uuid));
+}
+
+ /*
+ * ======== mem_seg_info ========
+ * Format of dynamic loading memory segment info in coff file.
+ * Must match dynm.h55.
+ */
+struct mem_seg_info {
+ u32 segid; /* Dynamic loading memory segment number */
+ u32 base;
+ u32 len;
+ u32 type; /* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */
+};
+
+/*
+ * ======== lib_node ========
+ * For maintaining a tree of library dependencies.
+ */
+struct lib_node {
+ struct dbll_library_obj *lib; /* The library */
+ u16 dep_libs; /* Number of dependent libraries */
+ struct lib_node *dep_libs_tree; /* Dependent libraries of lib */
+};
+
+/*
+ * ======== ovly_sect ========
+ * Information needed to overlay a section.
+ */
+struct ovly_sect {
+ struct ovly_sect *next_sect;
+ u32 sect_load_addr; /* Load address of section */
+ u32 sect_run_addr; /* Run address of section */
+ u32 size; /* Size of section */
+ u16 page; /* DBL_CODE, DBL_DATA */
+};
+
+/*
+ * ======== ovly_node ========
+ * For maintaining a list of overlay nodes, with sections that need to be
+ * overlayed for each of the nodes phases.
+ */
+struct ovly_node {
+ struct dsp_uuid uuid;
+ char *node_name;
+ struct ovly_sect *create_sects_list;
+ struct ovly_sect *delete_sects_list;
+ struct ovly_sect *execute_sects_list;
+ struct ovly_sect *other_sects_list;
+ u16 create_sects;
+ u16 delete_sects;
+ u16 execute_sects;
+ u16 other_sects;
+ u16 create_ref;
+ u16 delete_ref;
+ u16 execute_ref;
+ u16 other_ref;
+};
+
+/*
+ * ======== nldr_object ========
+ * Overlay loader object.
+ */
+struct nldr_object {
+ struct dev_object *hdev_obj; /* Device object */
+ struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
+ struct dbll_tar_obj *dbll; /* The DBL loader */
+ struct dbll_library_obj *base_lib; /* Base image library */
+ struct rmm_target_obj *rmm; /* Remote memory manager for DSP */
+ struct dbll_fxns ldr_fxns; /* Loader function table */
+ struct dbll_attrs ldr_attrs; /* attrs to pass to loader functions */
+ nldr_ovlyfxn ovly_fxn; /* "write" for overlay nodes */
+ nldr_writefxn write_fxn; /* "write" for dynamic nodes */
+ struct ovly_node *ovly_table; /* Table of overlay nodes */
+ u16 ovly_nodes; /* Number of overlay nodes in base */
+ u16 ovly_nid; /* Index for tracking overlay nodes */
+ u16 dload_segs; /* Number of dynamic load mem segs */
+ u32 *seg_table; /* memtypes of dynamic memory segs
+ * indexed by segid
+ */
+ u16 us_dsp_mau_size; /* Size of DSP MAU */
+ u16 us_dsp_word_size; /* Size of DSP word */
+};
+
+/*
+ * ======== nldr_nodeobject ========
+ * Dynamic node object. This object is created when a node is allocated.
+ */
+struct nldr_nodeobject {
+ struct nldr_object *nldr_obj; /* Dynamic loader handle */
+ void *priv_ref; /* Handle to pass to dbl_write_fxn */
+ struct dsp_uuid uuid; /* Node's UUID */
+ bool dynamic; /* Dynamically loaded node? */
+ bool overlay; /* Overlay node? */
+ bool *pf_phase_split; /* Multiple phase libraries? */
+ struct lib_node root; /* Library containing node phase */
+ struct lib_node create_lib; /* Library with create phase lib */
+ struct lib_node execute_lib; /* Library with execute phase lib */
+ struct lib_node delete_lib; /* Library with delete phase lib */
+ /* libs remain loaded until Delete */
+ struct lib_node pers_lib_table[MAXLIBS];
+ s32 pers_libs; /* Number of persistent libraries */
+ /* Path in lib dependency tree */
+ struct dbll_library_obj *lib_path[MAXDEPTH + 1];
+ enum nldr_phase phase; /* Node phase currently being loaded */
+
+ /*
+ * Dynamic loading memory segments for data and code of each phase.
+ */
+ u16 seg_id[MAXFLAGS];
+
+ /*
+ * Mask indicating whether each mem segment specified in seg_id[]
+ * is preferred or required.
+ * For example
+ * if (code_data_flag_mask & (1 << EXECUTEDATAFLAGBIT)) != 0,
+ * then it is required to load execute phase data into the memory
+ * specified by seg_id[EXECUTEDATAFLAGBIT].
+ */
+ u32 code_data_flag_mask;
+};
+
+/* Dynamic loader function table */
+static struct dbll_fxns ldr_fxns = {
+ (dbll_close_fxn) dbll_close,
+ (dbll_create_fxn) dbll_create,
+ (dbll_delete_fxn) dbll_delete,
+ (dbll_exit_fxn) dbll_exit,
+ (dbll_get_attrs_fxn) dbll_get_attrs,
+ (dbll_get_addr_fxn) dbll_get_addr,
+ (dbll_get_c_addr_fxn) dbll_get_c_addr,
+ (dbll_get_sect_fxn) dbll_get_sect,
+ (dbll_init_fxn) dbll_init,
+ (dbll_load_fxn) dbll_load,
+ (dbll_load_sect_fxn) dbll_load_sect,
+ (dbll_open_fxn) dbll_open,
+ (dbll_read_sect_fxn) dbll_read_sect,
+ (dbll_set_attrs_fxn) dbll_set_attrs,
+ (dbll_unload_fxn) dbll_unload,
+ (dbll_unload_sect_fxn) dbll_unload_sect,
+};
+
+static u32 refs; /* module reference count */
+
+static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
+ u32 addr, u32 bytes);
+static int add_ovly_node(struct dsp_uuid *uuid_obj,
+ enum dsp_dcdobjtype obj_type, void *handle);
+static int add_ovly_sect(struct nldr_object *nldr_obj,
+ struct ovly_sect **lst,
+ struct dbll_sect_info *sect_inf,
+ bool *exists, u32 addr, u32 bytes);
+static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
+ s32 mtype);
+static void free_sects(struct nldr_object *nldr_obj,
+ struct ovly_sect *phase_sects, u16 alloc_num);
+static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
+ char *sym_name, struct dbll_sym_val **sym);
+static int load_lib(struct nldr_nodeobject *nldr_node_obj,
+ struct lib_node *root, struct dsp_uuid uuid,
+ bool root_prstnt,
+ struct dbll_library_obj **lib_path,
+ enum nldr_phase phase, u16 depth);
+static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
+ enum nldr_phase phase);
+static int remote_alloc(void **ref, u16 mem_sect, u32 size,
+ u32 align, u32 *dsp_address,
+ s32 segmnt_id,
+ s32 req, bool reserve);
+static int remote_free(void **ref, u16 space, u32 dsp_address, u32 size,
+ bool reserve);
+
+static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
+ struct lib_node *root);
+static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
+ enum nldr_phase phase);
+static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
+ struct dbll_library_obj *lib);
+static u32 find_lcm(u32 a, u32 b);
+
+/*
+ * ======== nldr_allocate ========
+ */
+int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
+ const struct dcd_nodeprops *node_props,
+ struct nldr_nodeobject **nldr_nodeobj,
+ bool *pf_phase_split)
+{
+ struct nldr_nodeobject *nldr_node_obj = NULL;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(node_props != NULL);
+ DBC_REQUIRE(nldr_nodeobj != NULL);
+ DBC_REQUIRE(nldr_obj);
+
+ /* Initialize handle in case of failure */
+ *nldr_nodeobj = NULL;
+ /* Allocate node object */
+ nldr_node_obj = kzalloc(sizeof(struct nldr_nodeobject), GFP_KERNEL);
+
+ if (nldr_node_obj == NULL) {
+ status = -ENOMEM;
+ } else {
+ nldr_node_obj->pf_phase_split = pf_phase_split;
+ nldr_node_obj->pers_libs = 0;
+ nldr_node_obj->nldr_obj = nldr_obj;
+ nldr_node_obj->priv_ref = priv_ref;
+ /* Save node's UUID. */
+ nldr_node_obj->uuid = node_props->ndb_props.ui_node_id;
+ /*
+ * Determine if node is a dynamically loaded node from
+ * ndb_props.
+ */
+ if (node_props->us_load_type == NLDR_DYNAMICLOAD) {
+ /* Dynamic node */
+ nldr_node_obj->dynamic = true;
+ /*
+ * Extract memory requirements from ndb_props masks
+ */
+ /* Create phase */
+ nldr_node_obj->seg_id[CREATEDATAFLAGBIT] = (u16)
+ (node_props->ul_data_mem_seg_mask >> CREATEBIT) &
+ SEGMASK;
+ nldr_node_obj->code_data_flag_mask |=
+ ((node_props->ul_data_mem_seg_mask >>
+ (CREATEBIT + FLAGBIT)) & 1) << CREATEDATAFLAGBIT;
+ nldr_node_obj->seg_id[CREATECODEFLAGBIT] = (u16)
+ (node_props->ul_code_mem_seg_mask >>
+ CREATEBIT) & SEGMASK;
+ nldr_node_obj->code_data_flag_mask |=
+ ((node_props->ul_code_mem_seg_mask >>
+ (CREATEBIT + FLAGBIT)) & 1) << CREATECODEFLAGBIT;
+ /* Execute phase */
+ nldr_node_obj->seg_id[EXECUTEDATAFLAGBIT] = (u16)
+ (node_props->ul_data_mem_seg_mask >>
+ EXECUTEBIT) & SEGMASK;
+ nldr_node_obj->code_data_flag_mask |=
+ ((node_props->ul_data_mem_seg_mask >>
+ (EXECUTEBIT + FLAGBIT)) & 1) <<
+ EXECUTEDATAFLAGBIT;
+ nldr_node_obj->seg_id[EXECUTECODEFLAGBIT] = (u16)
+ (node_props->ul_code_mem_seg_mask >>
+ EXECUTEBIT) & SEGMASK;
+ nldr_node_obj->code_data_flag_mask |=
+ ((node_props->ul_code_mem_seg_mask >>
+ (EXECUTEBIT + FLAGBIT)) & 1) <<
+ EXECUTECODEFLAGBIT;
+ /* Delete phase */
+ nldr_node_obj->seg_id[DELETEDATAFLAGBIT] = (u16)
+ (node_props->ul_data_mem_seg_mask >> DELETEBIT) &
+ SEGMASK;
+ nldr_node_obj->code_data_flag_mask |=
+ ((node_props->ul_data_mem_seg_mask >>
+ (DELETEBIT + FLAGBIT)) & 1) << DELETEDATAFLAGBIT;
+ nldr_node_obj->seg_id[DELETECODEFLAGBIT] = (u16)
+ (node_props->ul_code_mem_seg_mask >>
+ DELETEBIT) & SEGMASK;
+ nldr_node_obj->code_data_flag_mask |=
+ ((node_props->ul_code_mem_seg_mask >>
+ (DELETEBIT + FLAGBIT)) & 1) << DELETECODEFLAGBIT;
+ } else {
+ /* Non-dynamically loaded nodes are part of the
+ * base image */
+ nldr_node_obj->root.lib = nldr_obj->base_lib;
+ /* Check for overlay node */
+ if (node_props->us_load_type == NLDR_OVLYLOAD)
+ nldr_node_obj->overlay = true;
+
+ }
+ *nldr_nodeobj = (struct nldr_nodeobject *)nldr_node_obj;
+ }
+ /* Cleanup on failure */
+ if (status && nldr_node_obj)
+ kfree(nldr_node_obj);
+
+ DBC_ENSURE((!status && *nldr_nodeobj)
+ || (status && *nldr_nodeobj == NULL));
+ return status;
+}
+
+/*
+ * ======== nldr_create ========
+ */
+int nldr_create(struct nldr_object **nldr,
+ struct dev_object *hdev_obj,
+ const struct nldr_attrs *pattrs)
+{
+ struct cod_manager *cod_mgr; /* COD manager */
+ char *psz_coff_buf = NULL;
+ char sz_zl_file[COD_MAXPATHLENGTH];
+ struct nldr_object *nldr_obj = NULL;
+ struct dbll_attrs save_attrs;
+ struct dbll_attrs new_attrs;
+ dbll_flags flags;
+ u32 ul_entry;
+ u16 dload_segs = 0;
+ struct mem_seg_info *mem_info_obj;
+ u32 ul_len = 0;
+ u32 ul_addr;
+ struct rmm_segment *rmm_segs = NULL;
+ u16 i;
+ int status = 0;
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(nldr != NULL);
+ DBC_REQUIRE(hdev_obj != NULL);
+ DBC_REQUIRE(pattrs != NULL);
+ DBC_REQUIRE(pattrs->pfn_ovly != NULL);
+ DBC_REQUIRE(pattrs->pfn_write != NULL);
+
+ /* Allocate dynamic loader object */
+ nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
+ if (nldr_obj) {
+ nldr_obj->hdev_obj = hdev_obj;
+ /* warning, lazy status checking alert! */
+ dev_get_cod_mgr(hdev_obj, &cod_mgr);
+ if (cod_mgr) {
+ status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
+ DBC_ASSERT(!status);
+ status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
+ DBC_ASSERT(!status);
+ status =
+ cod_get_base_name(cod_mgr, sz_zl_file,
+ COD_MAXPATHLENGTH);
+ DBC_ASSERT(!status);
+ }
+ status = 0;
+ /* end lazy status checking */
+ nldr_obj->us_dsp_mau_size = pattrs->us_dsp_mau_size;
+ nldr_obj->us_dsp_word_size = pattrs->us_dsp_word_size;
+ nldr_obj->ldr_fxns = ldr_fxns;
+ if (!(nldr_obj->ldr_fxns.init_fxn()))
+ status = -ENOMEM;
+
+ } else {
+ status = -ENOMEM;
+ }
+ /* Create the DCD Manager */
+ if (!status)
+ status = dcd_create_manager(NULL, &nldr_obj->hdcd_mgr);
+
+ /* Get dynamic loading memory sections from base lib */
+ if (!status) {
+ status =
+ nldr_obj->ldr_fxns.get_sect_fxn(nldr_obj->base_lib,
+ DYNMEMSECT, &ul_addr,
+ &ul_len);
+ if (!status) {
+ psz_coff_buf =
+ kzalloc(ul_len * nldr_obj->us_dsp_mau_size,
+ GFP_KERNEL);
+ if (!psz_coff_buf)
+ status = -ENOMEM;
+ } else {
+ /* Ok to not have dynamic loading memory */
+ status = 0;
+ ul_len = 0;
+ dev_dbg(bridge, "%s: failed - no dynamic loading mem "
+ "segments: 0x%x\n", __func__, status);
+ }
+ }
+ if (!status && ul_len > 0) {
+ /* Read section containing dynamic load mem segments */
+ status =
+ nldr_obj->ldr_fxns.read_sect_fxn(nldr_obj->base_lib,
+ DYNMEMSECT, psz_coff_buf,
+ ul_len);
+ }
+ if (!status && ul_len > 0) {
+ /* Parse memory segment data */
+ dload_segs = (u16) (*((u32 *) psz_coff_buf));
+ if (dload_segs > MAXMEMSEGS)
+ status = -EBADF;
+ }
+ /* Parse dynamic load memory segments */
+ if (!status && dload_segs > 0) {
+ rmm_segs = kzalloc(sizeof(struct rmm_segment) * dload_segs,
+ GFP_KERNEL);
+ nldr_obj->seg_table =
+ kzalloc(sizeof(u32) * dload_segs, GFP_KERNEL);
+ if (rmm_segs == NULL || nldr_obj->seg_table == NULL) {
+ status = -ENOMEM;
+ } else {
+ nldr_obj->dload_segs = dload_segs;
+ mem_info_obj = (struct mem_seg_info *)(psz_coff_buf +
+ sizeof(u32));
+ for (i = 0; i < dload_segs; i++) {
+ rmm_segs[i].base = (mem_info_obj + i)->base;
+ rmm_segs[i].length = (mem_info_obj + i)->len;
+ rmm_segs[i].space = 0;
+ nldr_obj->seg_table[i] =
+ (mem_info_obj + i)->type;
+ dev_dbg(bridge,
+ "(proc) DLL MEMSEGMENT: %d, "
+ "Base: 0x%x, Length: 0x%x\n", i,
+ rmm_segs[i].base, rmm_segs[i].length);
+ }
+ }
+ }
+ /* Create Remote memory manager */
+ if (!status)
+ status = rmm_create(&nldr_obj->rmm, rmm_segs, dload_segs);
+
+ if (!status) {
+ /* set the alloc, free, write functions for loader */
+ nldr_obj->ldr_fxns.get_attrs_fxn(nldr_obj->dbll, &save_attrs);
+ new_attrs = save_attrs;
+ new_attrs.alloc = (dbll_alloc_fxn) remote_alloc;
+ new_attrs.free = (dbll_free_fxn) remote_free;
+ new_attrs.sym_lookup = (dbll_sym_lookup) get_symbol_value;
+ new_attrs.sym_handle = nldr_obj;
+ new_attrs.write = (dbll_write_fxn) pattrs->pfn_write;
+ nldr_obj->ovly_fxn = pattrs->pfn_ovly;
+ nldr_obj->write_fxn = pattrs->pfn_write;
+ nldr_obj->ldr_attrs = new_attrs;
+ }
+ kfree(rmm_segs);
+
+ kfree(psz_coff_buf);
+
+ /* Get overlay nodes */
+ if (!status) {
+ status =
+ cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
+ /* lazy check */
+ DBC_ASSERT(!status);
+ /* First count number of overlay nodes */
+ status =
+ dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
+ add_ovly_node, (void *)nldr_obj);
+ /* Now build table of overlay nodes */
+ if (!status && nldr_obj->ovly_nodes > 0) {
+ /* Allocate table for overlay nodes */
+ nldr_obj->ovly_table =
+ kzalloc(sizeof(struct ovly_node) *
+ nldr_obj->ovly_nodes, GFP_KERNEL);
+ /* Put overlay nodes in the table */
+ nldr_obj->ovly_nid = 0;
+ status = dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
+ add_ovly_node,
+ (void *)nldr_obj);
+ }
+ }
+ /* Do a fake reload of the base image to get overlay section info */
+ if (!status && nldr_obj->ovly_nodes > 0) {
+ save_attrs.write = fake_ovly_write;
+ save_attrs.log_write = add_ovly_info;
+ save_attrs.log_write_handle = nldr_obj;
+ flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB;
+ status = nldr_obj->ldr_fxns.load_fxn(nldr_obj->base_lib, flags,
+ &save_attrs, &ul_entry);
+ }
+ if (!status) {
+ *nldr = (struct nldr_object *)nldr_obj;
+ } else {
+ if (nldr_obj)
+ nldr_delete((struct nldr_object *)nldr_obj);
+
+ *nldr = NULL;
+ }
+ /* FIXME:Temp. Fix. Must be removed */
+ DBC_ENSURE((!status && *nldr) || (status && *nldr == NULL));
+ return status;
+}
+
+/*
+ * ======== nldr_delete ========
+ */
+void nldr_delete(struct nldr_object *nldr_obj)
+{
+ struct ovly_sect *ovly_section;
+ struct ovly_sect *next;
+ u16 i;
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(nldr_obj);
+
+ nldr_obj->ldr_fxns.exit_fxn();
+ if (nldr_obj->rmm)
+ rmm_delete(nldr_obj->rmm);
+
+ kfree(nldr_obj->seg_table);
+
+ if (nldr_obj->hdcd_mgr)
+ dcd_destroy_manager(nldr_obj->hdcd_mgr);
+
+ /* Free overlay node information */
+ if (nldr_obj->ovly_table) {
+ for (i = 0; i < nldr_obj->ovly_nodes; i++) {
+ ovly_section =
+ nldr_obj->ovly_table[i].create_sects_list;
+ while (ovly_section) {
+ next = ovly_section->next_sect;
+ kfree(ovly_section);
+ ovly_section = next;
+ }
+ ovly_section =
+ nldr_obj->ovly_table[i].delete_sects_list;
+ while (ovly_section) {
+ next = ovly_section->next_sect;
+ kfree(ovly_section);
+ ovly_section = next;
+ }
+ ovly_section =
+ nldr_obj->ovly_table[i].execute_sects_list;
+ while (ovly_section) {
+ next = ovly_section->next_sect;
+ kfree(ovly_section);
+ ovly_section = next;
+ }
+ ovly_section = nldr_obj->ovly_table[i].other_sects_list;
+ while (ovly_section) {
+ next = ovly_section->next_sect;
+ kfree(ovly_section);
+ ovly_section = next;
+ }
+ }
+ kfree(nldr_obj->ovly_table);
+ }
+ kfree(nldr_obj);
+}
+
+/*
+ * ======== nldr_exit ========
+ * Discontinue usage of NLDR module.
+ */
+void nldr_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+
+ if (refs == 0)
+ rmm_exit();
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== nldr_get_fxn_addr ========
+ */
+int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
+ char *str_fxn, u32 * addr)
+{
+ struct dbll_sym_val *dbll_sym;
+ struct nldr_object *nldr_obj;
+ int status = 0;
+ bool status1 = false;
+ s32 i = 0;
+ struct lib_node root = { NULL, 0, NULL };
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(nldr_node_obj);
+ DBC_REQUIRE(addr != NULL);
+ DBC_REQUIRE(str_fxn != NULL);
+
+ nldr_obj = nldr_node_obj->nldr_obj;
+ /* Called from node_create(), node_delete(), or node_run(). */
+ if (nldr_node_obj->dynamic && *nldr_node_obj->pf_phase_split) {
+ switch (nldr_node_obj->phase) {
+ case NLDR_CREATE:
+ root = nldr_node_obj->create_lib;
+ break;
+ case NLDR_EXECUTE:
+ root = nldr_node_obj->execute_lib;
+ break;
+ case NLDR_DELETE:
+ root = nldr_node_obj->delete_lib;
+ break;
+ default:
+ DBC_ASSERT(false);
+ break;
+ }
+ } else {
+ /* for Overlay nodes or non-split Dynamic nodes */
+ root = nldr_node_obj->root;
+ }
+ status1 =
+ nldr_obj->ldr_fxns.get_c_addr_fxn(root.lib, str_fxn, &dbll_sym);
+ if (!status1)
+ status1 =
+ nldr_obj->ldr_fxns.get_addr_fxn(root.lib, str_fxn,
+ &dbll_sym);
+
+ /* If symbol not found, check dependent libraries */
+ if (!status1) {
+ for (i = 0; i < root.dep_libs; i++) {
+ status1 =
+ nldr_obj->ldr_fxns.get_addr_fxn(root.dep_libs_tree
+ [i].lib, str_fxn,
+ &dbll_sym);
+ if (!status1) {
+ status1 =
+ nldr_obj->ldr_fxns.
+ get_c_addr_fxn(root.dep_libs_tree[i].lib,
+ str_fxn, &dbll_sym);
+ }
+ if (status1) {
+ /* Symbol found */
+ break;
+ }
+ }
+ }
+ /* Check persistent libraries */
+ if (!status1) {
+ for (i = 0; i < nldr_node_obj->pers_libs; i++) {
+ status1 =
+ nldr_obj->ldr_fxns.
+ get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
+ str_fxn, &dbll_sym);
+ if (!status1) {
+ status1 =
+ nldr_obj->ldr_fxns.
+ get_c_addr_fxn(nldr_node_obj->pers_lib_table
+ [i].lib, str_fxn, &dbll_sym);
+ }
+ if (status1) {
+ /* Symbol found */
+ break;
+ }
+ }
+ }
+
+ if (status1)
+ *addr = dbll_sym->value;
+ else
+ status = -ESPIPE;
+
+ return status;
+}
+
+/*
+ * ======== nldr_get_rmm_manager ========
+ * Given a NLDR object, retrieve RMM Manager Handle
+ */
+int nldr_get_rmm_manager(struct nldr_object *nldr,
+ struct rmm_target_obj **rmm_mgr)
+{
+ int status = 0;
+ struct nldr_object *nldr_obj = nldr;
+ DBC_REQUIRE(rmm_mgr != NULL);
+
+ if (nldr) {
+ *rmm_mgr = nldr_obj->rmm;
+ } else {
+ *rmm_mgr = NULL;
+ status = -EFAULT;
+ }
+
+ DBC_ENSURE(!status || (rmm_mgr != NULL && *rmm_mgr == NULL));
+
+ return status;
+}
+
+/*
+ * ======== nldr_init ========
+ * Initialize the NLDR module.
+ */
+bool nldr_init(void)
+{
+ DBC_REQUIRE(refs >= 0);
+
+ if (refs == 0)
+ rmm_init();
+
+ refs++;
+
+ DBC_ENSURE(refs > 0);
+ return true;
+}
+
+/*
+ * ======== nldr_load ========
+ */
+int nldr_load(struct nldr_nodeobject *nldr_node_obj,
+ enum nldr_phase phase)
+{
+ struct nldr_object *nldr_obj;
+ struct dsp_uuid lib_uuid;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(nldr_node_obj);
+
+ nldr_obj = nldr_node_obj->nldr_obj;
+
+ if (nldr_node_obj->dynamic) {
+ nldr_node_obj->phase = phase;
+
+ lib_uuid = nldr_node_obj->uuid;
+
+ /* At this point, we may not know if node is split into
+ * different libraries. So we'll go ahead and load the
+ * library, and then save the pointer to the appropriate
+ * location after we know. */
+
+ status =
+ load_lib(nldr_node_obj, &nldr_node_obj->root, lib_uuid,
+ false, nldr_node_obj->lib_path, phase, 0);
+
+ if (!status) {
+ if (*nldr_node_obj->pf_phase_split) {
+ switch (phase) {
+ case NLDR_CREATE:
+ nldr_node_obj->create_lib =
+ nldr_node_obj->root;
+ break;
+
+ case NLDR_EXECUTE:
+ nldr_node_obj->execute_lib =
+ nldr_node_obj->root;
+ break;
+
+ case NLDR_DELETE:
+ nldr_node_obj->delete_lib =
+ nldr_node_obj->root;
+ break;
+
+ default:
+ DBC_ASSERT(false);
+ break;
+ }
+ }
+ }
+ } else {
+ if (nldr_node_obj->overlay)
+ status = load_ovly(nldr_node_obj, phase);
+
+ }
+
+ return status;
+}
+
+/*
+ * ======== nldr_unload ========
+ */
+int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
+ enum nldr_phase phase)
+{
+ int status = 0;
+ struct lib_node *root_lib = NULL;
+ s32 i = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(nldr_node_obj);
+
+ if (nldr_node_obj != NULL) {
+ if (nldr_node_obj->dynamic) {
+ if (*nldr_node_obj->pf_phase_split) {
+ switch (phase) {
+ case NLDR_CREATE:
+ root_lib = &nldr_node_obj->create_lib;
+ break;
+ case NLDR_EXECUTE:
+ root_lib = &nldr_node_obj->execute_lib;
+ break;
+ case NLDR_DELETE:
+ root_lib = &nldr_node_obj->delete_lib;
+ /* Unload persistent libraries */
+ for (i = 0;
+ i < nldr_node_obj->pers_libs;
+ i++) {
+ unload_lib(nldr_node_obj,
+ &nldr_node_obj->
+ pers_lib_table[i]);
+ }
+ nldr_node_obj->pers_libs = 0;
+ break;
+ default:
+ DBC_ASSERT(false);
+ break;
+ }
+ } else {
+ /* Unload main library */
+ root_lib = &nldr_node_obj->root;
+ }
+ if (root_lib)
+ unload_lib(nldr_node_obj, root_lib);
+ } else {
+ if (nldr_node_obj->overlay)
+ unload_ovly(nldr_node_obj, phase);
+
+ }
+ }
+ return status;
+}
+
+/*
+ * ======== add_ovly_info ========
+ */
+static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
+ u32 addr, u32 bytes)
+{
+ char *node_name;
+ char *sect_name = (char *)sect_info->name;
+ bool sect_exists = false;
+ char seps = ':';
+ char *pch;
+ u16 i;
+ struct nldr_object *nldr_obj = (struct nldr_object *)handle;
+ int status = 0;
+
+ /* Is this an overlay section (load address != run address)? */
+ if (sect_info->sect_load_addr == sect_info->sect_run_addr)
+ goto func_end;
+
+ /* Find the node it belongs to */
+ for (i = 0; i < nldr_obj->ovly_nodes; i++) {
+ node_name = nldr_obj->ovly_table[i].node_name;
+ DBC_REQUIRE(node_name);
+ if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) {
+ /* Found the node */
+ break;
+ }
+ }
+ if (!(i < nldr_obj->ovly_nodes))
+ goto func_end;
+
+ /* Determine which phase this section belongs to */
+ for (pch = sect_name + 1; *pch && *pch != seps; pch++)
+ ;;
+
+ if (*pch) {
+ pch++; /* Skip over the ':' */
+ if (strncmp(pch, PCREATE, strlen(PCREATE)) == 0) {
+ status =
+ add_ovly_sect(nldr_obj,
+ &nldr_obj->
+ ovly_table[i].create_sects_list,
+ sect_info, &sect_exists, addr, bytes);
+ if (!status && !sect_exists)
+ nldr_obj->ovly_table[i].create_sects++;
+
+ } else if (strncmp(pch, PDELETE, strlen(PDELETE)) == 0) {
+ status =
+ add_ovly_sect(nldr_obj,
+ &nldr_obj->
+ ovly_table[i].delete_sects_list,
+ sect_info, &sect_exists, addr, bytes);
+ if (!status && !sect_exists)
+ nldr_obj->ovly_table[i].delete_sects++;
+
+ } else if (strncmp(pch, PEXECUTE, strlen(PEXECUTE)) == 0) {
+ status =
+ add_ovly_sect(nldr_obj,
+ &nldr_obj->
+ ovly_table[i].execute_sects_list,
+ sect_info, &sect_exists, addr, bytes);
+ if (!status && !sect_exists)
+ nldr_obj->ovly_table[i].execute_sects++;
+
+ } else {
+ /* Put in "other" sectins */
+ status =
+ add_ovly_sect(nldr_obj,
+ &nldr_obj->
+ ovly_table[i].other_sects_list,
+ sect_info, &sect_exists, addr, bytes);
+ if (!status && !sect_exists)
+ nldr_obj->ovly_table[i].other_sects++;
+
+ }
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== add_ovly_node =========
+ * Callback function passed to dcd_get_objects.
+ */
+static int add_ovly_node(struct dsp_uuid *uuid_obj,
+ enum dsp_dcdobjtype obj_type, void *handle)
+{
+ struct nldr_object *nldr_obj = (struct nldr_object *)handle;
+ char *node_name = NULL;
+ char *pbuf = NULL;
+ u32 len;
+ struct dcd_genericobj obj_def;
+ int status = 0;
+
+ if (obj_type != DSP_DCDNODETYPE)
+ goto func_end;
+
+ status =
+ dcd_get_object_def(nldr_obj->hdcd_mgr, uuid_obj, obj_type,
+ &obj_def);
+ if (status)
+ goto func_end;
+
+ /* If overlay node, add to the list */
+ if (obj_def.obj_data.node_obj.us_load_type == NLDR_OVLYLOAD) {
+ if (nldr_obj->ovly_table == NULL) {
+ nldr_obj->ovly_nodes++;
+ } else {
+ /* Add node to table */
+ nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid =
+ *uuid_obj;
+ DBC_REQUIRE(obj_def.obj_data.node_obj.ndb_props.
+ ac_name);
+ len =
+ strlen(obj_def.obj_data.node_obj.ndb_props.ac_name);
+ node_name = obj_def.obj_data.node_obj.ndb_props.ac_name;
+ pbuf = kzalloc(len + 1, GFP_KERNEL);
+ if (pbuf == NULL) {
+ status = -ENOMEM;
+ } else {
+ strncpy(pbuf, node_name, len);
+ nldr_obj->ovly_table[nldr_obj->ovly_nid].
+ node_name = pbuf;
+ nldr_obj->ovly_nid++;
+ }
+ }
+ }
+ /* These were allocated in dcd_get_object_def */
+ kfree(obj_def.obj_data.node_obj.pstr_create_phase_fxn);
+
+ kfree(obj_def.obj_data.node_obj.pstr_execute_phase_fxn);
+
+ kfree(obj_def.obj_data.node_obj.pstr_delete_phase_fxn);
+
+ kfree(obj_def.obj_data.node_obj.pstr_i_alg_name);
+
+func_end:
+ return status;
+}
+
+/*
+ * ======== add_ovly_sect ========
+ */
+static int add_ovly_sect(struct nldr_object *nldr_obj,
+ struct ovly_sect **lst,
+ struct dbll_sect_info *sect_inf,
+ bool *exists, u32 addr, u32 bytes)
+{
+ struct ovly_sect *new_sect = NULL;
+ struct ovly_sect *last_sect;
+ struct ovly_sect *ovly_section;
+ int status = 0;
+
+ ovly_section = last_sect = *lst;
+ *exists = false;
+ while (ovly_section) {
+ /*
+ * Make sure section has not already been added. Multiple
+ * 'write' calls may be made to load the section.
+ */
+ if (ovly_section->sect_load_addr == addr) {
+ /* Already added */
+ *exists = true;
+ break;
+ }
+ last_sect = ovly_section;
+ ovly_section = ovly_section->next_sect;
+ }
+
+ if (!ovly_section) {
+ /* New section */
+ new_sect = kzalloc(sizeof(struct ovly_sect), GFP_KERNEL);
+ if (new_sect == NULL) {
+ status = -ENOMEM;
+ } else {
+ new_sect->sect_load_addr = addr;
+ new_sect->sect_run_addr = sect_inf->sect_run_addr +
+ (addr - sect_inf->sect_load_addr);
+ new_sect->size = bytes;
+ new_sect->page = sect_inf->type;
+ }
+
+ /* Add to the list */
+ if (!status) {
+ if (*lst == NULL) {
+ /* First in the list */
+ *lst = new_sect;
+ } else {
+ last_sect->next_sect = new_sect;
+ }
+ }
+ }
+
+ return status;
+}
+
+/*
+ * ======== fake_ovly_write ========
+ */
+static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
+ s32 mtype)
+{
+ return (s32) bytes;
+}
+
+/*
+ * ======== free_sects ========
+ */
+static void free_sects(struct nldr_object *nldr_obj,
+ struct ovly_sect *phase_sects, u16 alloc_num)
+{
+ struct ovly_sect *ovly_section = phase_sects;
+ u16 i = 0;
+ bool ret;
+
+ while (ovly_section && i < alloc_num) {
+ /* 'Deallocate' */
+ /* segid - page not supported yet */
+ /* Reserved memory */
+ ret =
+ rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr,
+ ovly_section->size, true);
+ DBC_ASSERT(ret);
+ ovly_section = ovly_section->next_sect;
+ i++;
+ }
+}
+
+/*
+ * ======== get_symbol_value ========
+ * Find symbol in library's base image. If not there, check dependent
+ * libraries.
+ */
+static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
+ char *sym_name, struct dbll_sym_val **sym)
+{
+ struct nldr_object *nldr_obj = (struct nldr_object *)handle;
+ struct nldr_nodeobject *nldr_node_obj =
+ (struct nldr_nodeobject *)rmm_handle;
+ struct lib_node *root = (struct lib_node *)parg;
+ u16 i;
+ bool status = false;
+
+ /* check the base image */
+ status = nldr_obj->ldr_fxns.get_addr_fxn(nldr_obj->base_lib,
+ sym_name, sym);
+ if (!status)
+ status =
+ nldr_obj->ldr_fxns.get_c_addr_fxn(nldr_obj->base_lib,
+ sym_name, sym);
+
+ /*
+ * Check in root lib itself. If the library consists of
+ * multiple object files linked together, some symbols in the
+ * library may need to be resolved.
+ */
+ if (!status) {
+ status = nldr_obj->ldr_fxns.get_addr_fxn(root->lib, sym_name,
+ sym);
+ if (!status) {
+ status =
+ nldr_obj->ldr_fxns.get_c_addr_fxn(root->lib,
+ sym_name, sym);
+ }
+ }
+
+ /*
+ * Check in root lib's dependent libraries, but not dependent
+ * libraries' dependents.
+ */
+ if (!status) {
+ for (i = 0; i < root->dep_libs; i++) {
+ status =
+ nldr_obj->ldr_fxns.get_addr_fxn(root->
+ dep_libs_tree
+ [i].lib,
+ sym_name, sym);
+ if (!status) {
+ status =
+ nldr_obj->ldr_fxns.
+ get_c_addr_fxn(root->dep_libs_tree[i].lib,
+ sym_name, sym);
+ }
+ if (status) {
+ /* Symbol found */
+ break;
+ }
+ }
+ }
+ /*
+ * Check in persistent libraries
+ */
+ if (!status) {
+ for (i = 0; i < nldr_node_obj->pers_libs; i++) {
+ status =
+ nldr_obj->ldr_fxns.
+ get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
+ sym_name, sym);
+ if (!status) {
+ status = nldr_obj->ldr_fxns.get_c_addr_fxn
+ (nldr_node_obj->pers_lib_table[i].lib,
+ sym_name, sym);
+ }
+ if (status) {
+ /* Symbol found */
+ break;
+ }
+ }
+ }
+
+ return status;
+}
+
+/*
+ * ======== load_lib ========
+ * Recursively load library and all its dependent libraries. The library
+ * we're loading is specified by a uuid.
+ */
+static int load_lib(struct nldr_nodeobject *nldr_node_obj,
+ struct lib_node *root, struct dsp_uuid uuid,
+ bool root_prstnt,
+ struct dbll_library_obj **lib_path,
+ enum nldr_phase phase, u16 depth)
+{
+ struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
+ u16 nd_libs = 0; /* Number of dependent libraries */
+ u16 np_libs = 0; /* Number of persistent libraries */
+ u16 nd_libs_loaded = 0; /* Number of dep. libraries loaded */
+ u16 i;
+ u32 entry;
+ u32 dw_buf_size = NLDR_MAXPATHLENGTH;
+ dbll_flags flags = DBLL_SYMB | DBLL_CODE | DBLL_DATA | DBLL_DYNAMIC;
+ struct dbll_attrs new_attrs;
+ char *psz_file_name = NULL;
+ struct dsp_uuid *dep_lib_uui_ds = NULL;
+ bool *persistent_dep_libs = NULL;
+ int status = 0;
+ bool lib_status = false;
+ struct lib_node *dep_lib;
+
+ if (depth > MAXDEPTH) {
+ /* Error */
+ DBC_ASSERT(false);
+ }
+ root->lib = NULL;
+ /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
+ psz_file_name = kzalloc(DBLL_MAXPATHLENGTH, GFP_KERNEL);
+ if (psz_file_name == NULL)
+ status = -ENOMEM;
+
+ if (!status) {
+ /* Get the name of the library */
+ if (depth == 0) {
+ status =
+ dcd_get_library_name(nldr_node_obj->nldr_obj->
+ hdcd_mgr, &uuid, psz_file_name,
+ &dw_buf_size, phase,
+ nldr_node_obj->pf_phase_split);
+ } else {
+ /* Dependent libraries are registered with a phase */
+ status =
+ dcd_get_library_name(nldr_node_obj->nldr_obj->
+ hdcd_mgr, &uuid, psz_file_name,
+ &dw_buf_size, NLDR_NOPHASE,
+ NULL);
+ }
+ }
+ if (!status) {
+ /* Open the library, don't load symbols */
+ status =
+ nldr_obj->ldr_fxns.open_fxn(nldr_obj->dbll, psz_file_name,
+ DBLL_NOLOAD, &root->lib);
+ }
+ /* Done with file name */
+ kfree(psz_file_name);
+
+ /* Check to see if library not already loaded */
+ if (!status && root_prstnt) {
+ lib_status =
+ find_in_persistent_lib_array(nldr_node_obj, root->lib);
+ /* Close library */
+ if (lib_status) {
+ nldr_obj->ldr_fxns.close_fxn(root->lib);
+ return 0;
+ }
+ }
+ if (!status) {
+ /* Check for circular dependencies. */
+ for (i = 0; i < depth; i++) {
+ if (root->lib == lib_path[i]) {
+ /* This condition could be checked by a
+ * tool at build time. */
+ status = -EILSEQ;
+ }
+ }
+ }
+ if (!status) {
+ /* Add library to current path in dependency tree */
+ lib_path[depth] = root->lib;
+ depth++;
+ /* Get number of dependent libraries */
+ status =
+ dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->hdcd_mgr,
+ &uuid, &nd_libs, &np_libs, phase);
+ }
+ DBC_ASSERT(nd_libs >= np_libs);
+ if (!status) {
+ if (!(*nldr_node_obj->pf_phase_split))
+ np_libs = 0;
+
+ /* nd_libs = #of dependent libraries */
+ root->dep_libs = nd_libs - np_libs;
+ if (nd_libs > 0) {
+ dep_lib_uui_ds = kzalloc(sizeof(struct dsp_uuid) *
+ nd_libs, GFP_KERNEL);
+ persistent_dep_libs =
+ kzalloc(sizeof(bool) * nd_libs, GFP_KERNEL);
+ if (!dep_lib_uui_ds || !persistent_dep_libs)
+ status = -ENOMEM;
+
+ if (root->dep_libs > 0) {
+ /* Allocate arrays for dependent lib UUIDs,
+ * lib nodes */
+ root->dep_libs_tree = kzalloc
+ (sizeof(struct lib_node) *
+ (root->dep_libs), GFP_KERNEL);
+ if (!(root->dep_libs_tree))
+ status = -ENOMEM;
+
+ }
+
+ if (!status) {
+ /* Get the dependent library UUIDs */
+ status =
+ dcd_get_dep_libs(nldr_node_obj->
+ nldr_obj->hdcd_mgr, &uuid,
+ nd_libs, dep_lib_uui_ds,
+ persistent_dep_libs,
+ phase);
+ }
+ }
+ }
+
+ /*
+ * Recursively load dependent libraries.
+ */
+ if (!status) {
+ for (i = 0; i < nd_libs; i++) {
+ /* If root library is NOT persistent, and dep library
+ * is, then record it. If root library IS persistent,
+ * the deplib is already included */
+ if (!root_prstnt && persistent_dep_libs[i] &&
+ *nldr_node_obj->pf_phase_split) {
+ if ((nldr_node_obj->pers_libs) >= MAXLIBS) {
+ status = -EILSEQ;
+ break;
+ }
+
+ /* Allocate library outside of phase */
+ dep_lib =
+ &nldr_node_obj->pers_lib_table
+ [nldr_node_obj->pers_libs];
+ } else {
+ if (root_prstnt)
+ persistent_dep_libs[i] = true;
+
+ /* Allocate library within phase */
+ dep_lib = &root->dep_libs_tree[nd_libs_loaded];
+ }
+
+ status = load_lib(nldr_node_obj, dep_lib,
+ dep_lib_uui_ds[i],
+ persistent_dep_libs[i], lib_path,
+ phase, depth);
+
+ if (!status) {
+ if ((status != 0) &&
+ !root_prstnt && persistent_dep_libs[i] &&
+ *nldr_node_obj->pf_phase_split) {
+ (nldr_node_obj->pers_libs)++;
+ } else {
+ if (!persistent_dep_libs[i] ||
+ !(*nldr_node_obj->pf_phase_split)) {
+ nd_libs_loaded++;
+ }
+ }
+ } else {
+ break;
+ }
+ }
+ }
+
+ /* Now we can load the root library */
+ if (!status) {
+ new_attrs = nldr_obj->ldr_attrs;
+ new_attrs.sym_arg = root;
+ new_attrs.rmm_handle = nldr_node_obj;
+ new_attrs.input_params = nldr_node_obj->priv_ref;
+ new_attrs.base_image = false;
+
+ status =
+ nldr_obj->ldr_fxns.load_fxn(root->lib, flags, &new_attrs,
+ &entry);
+ }
+
+ /*
+ * In case of failure, unload any dependent libraries that
+ * were loaded, and close the root library.
+ * (Persistent libraries are unloaded from the very top)
+ */
+ if (status) {
+ if (phase != NLDR_EXECUTE) {
+ for (i = 0; i < nldr_node_obj->pers_libs; i++)
+ unload_lib(nldr_node_obj,
+ &nldr_node_obj->pers_lib_table[i]);
+
+ nldr_node_obj->pers_libs = 0;
+ }
+ for (i = 0; i < nd_libs_loaded; i++)
+ unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
+
+ if (root->lib)
+ nldr_obj->ldr_fxns.close_fxn(root->lib);
+
+ }
+
+ /* Going up one node in the dependency tree */
+ depth--;
+
+ kfree(dep_lib_uui_ds);
+ dep_lib_uui_ds = NULL;
+
+ kfree(persistent_dep_libs);
+ persistent_dep_libs = NULL;
+
+ return status;
+}
+
+/*
+ * ======== load_ovly ========
+ */
+static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
+ enum nldr_phase phase)
+{
+ struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
+ struct ovly_node *po_node = NULL;
+ struct ovly_sect *phase_sects = NULL;
+ struct ovly_sect *other_sects_list = NULL;
+ u16 i;
+ u16 alloc_num = 0;
+ u16 other_alloc = 0;
+ u16 *ref_count = NULL;
+ u16 *other_ref = NULL;
+ u32 bytes;
+ struct ovly_sect *ovly_section;
+ int status = 0;
+
+ /* Find the node in the table */
+ for (i = 0; i < nldr_obj->ovly_nodes; i++) {
+ if (is_equal_uuid
+ (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) {
+ /* Found it */
+ po_node = &(nldr_obj->ovly_table[i]);
+ break;
+ }
+ }
+
+ DBC_ASSERT(i < nldr_obj->ovly_nodes);
+
+ if (!po_node) {
+ status = -ENOENT;
+ goto func_end;
+ }
+
+ switch (phase) {
+ case NLDR_CREATE:
+ ref_count = &(po_node->create_ref);
+ other_ref = &(po_node->other_ref);
+ phase_sects = po_node->create_sects_list;
+ other_sects_list = po_node->other_sects_list;
+ break;
+
+ case NLDR_EXECUTE:
+ ref_count = &(po_node->execute_ref);
+ phase_sects = po_node->execute_sects_list;
+ break;
+
+ case NLDR_DELETE:
+ ref_count = &(po_node->delete_ref);
+ phase_sects = po_node->delete_sects_list;
+ break;
+
+ default:
+ DBC_ASSERT(false);
+ break;
+ }
+
+ if (ref_count == NULL)
+ goto func_end;
+
+ if (*ref_count != 0)
+ goto func_end;
+
+ /* 'Allocate' memory for overlay sections of this phase */
+ ovly_section = phase_sects;
+ while (ovly_section) {
+ /* allocate *//* page not supported yet */
+ /* reserve *//* align */
+ status = rmm_alloc(nldr_obj->rmm, 0, ovly_section->size, 0,
+ &(ovly_section->sect_run_addr), true);
+ if (!status) {
+ ovly_section = ovly_section->next_sect;
+ alloc_num++;
+ } else {
+ break;
+ }
+ }
+ if (other_ref && *other_ref == 0) {
+ /* 'Allocate' memory for other overlay sections
+ * (create phase) */
+ if (!status) {
+ ovly_section = other_sects_list;
+ while (ovly_section) {
+ /* page not supported *//* align */
+ /* reserve */
+ status =
+ rmm_alloc(nldr_obj->rmm, 0,
+ ovly_section->size, 0,
+ &(ovly_section->sect_run_addr),
+ true);
+ if (!status) {
+ ovly_section = ovly_section->next_sect;
+ other_alloc++;
+ } else {
+ break;
+ }
+ }
+ }
+ }
+ if (*ref_count == 0) {
+ if (!status) {
+ /* Load sections for this phase */
+ ovly_section = phase_sects;
+ while (ovly_section && !status) {
+ bytes =
+ (*nldr_obj->ovly_fxn) (nldr_node_obj->
+ priv_ref,
+ ovly_section->
+ sect_run_addr,
+ ovly_section->
+ sect_load_addr,
+ ovly_section->size,
+ ovly_section->page);
+ if (bytes != ovly_section->size)
+ status = -EPERM;
+
+ ovly_section = ovly_section->next_sect;
+ }
+ }
+ }
+ if (other_ref && *other_ref == 0) {
+ if (!status) {
+ /* Load other sections (create phase) */
+ ovly_section = other_sects_list;
+ while (ovly_section && !status) {
+ bytes =
+ (*nldr_obj->ovly_fxn) (nldr_node_obj->
+ priv_ref,
+ ovly_section->
+ sect_run_addr,
+ ovly_section->
+ sect_load_addr,
+ ovly_section->size,
+ ovly_section->page);
+ if (bytes != ovly_section->size)
+ status = -EPERM;
+
+ ovly_section = ovly_section->next_sect;
+ }
+ }
+ }
+ if (status) {
+ /* 'Deallocate' memory */
+ free_sects(nldr_obj, phase_sects, alloc_num);
+ free_sects(nldr_obj, other_sects_list, other_alloc);
+ }
+func_end:
+ if (!status && (ref_count != NULL)) {
+ *ref_count += 1;
+ if (other_ref)
+ *other_ref += 1;
+
+ }
+
+ return status;
+}
+
+/*
+ * ======== remote_alloc ========
+ */
+static int remote_alloc(void **ref, u16 mem_sect, u32 size,
+ u32 align, u32 *dsp_address,
+ s32 segmnt_id, s32 req,
+ bool reserve)
+{
+ struct nldr_nodeobject *hnode = (struct nldr_nodeobject *)ref;
+ struct nldr_object *nldr_obj;
+ struct rmm_target_obj *rmm;
+ u16 mem_phase_bit = MAXFLAGS;
+ u16 segid = 0;
+ u16 i;
+ u16 mem_sect_type;
+ u32 word_size;
+ struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
+ bool mem_load_req = false;
+ int status = -ENOMEM; /* Set to fail */
+ DBC_REQUIRE(hnode);
+ DBC_REQUIRE(mem_sect == DBLL_CODE || mem_sect == DBLL_DATA ||
+ mem_sect == DBLL_BSS);
+ nldr_obj = hnode->nldr_obj;
+ rmm = nldr_obj->rmm;
+ /* Convert size to DSP words */
+ word_size =
+ (size + nldr_obj->us_dsp_word_size -
+ 1) / nldr_obj->us_dsp_word_size;
+ /* Modify memory 'align' to account for DSP cache line size */
+ align = find_lcm(GEM_CACHE_LINE_SIZE, align);
+ dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align);
+ if (segmnt_id != -1) {
+ rmm_addr_obj->segid = segmnt_id;
+ segid = segmnt_id;
+ mem_load_req = req;
+ } else {
+ switch (hnode->phase) {
+ case NLDR_CREATE:
+ mem_phase_bit = CREATEDATAFLAGBIT;
+ break;
+ case NLDR_DELETE:
+ mem_phase_bit = DELETEDATAFLAGBIT;
+ break;
+ case NLDR_EXECUTE:
+ mem_phase_bit = EXECUTEDATAFLAGBIT;
+ break;
+ default:
+ DBC_ASSERT(false);
+ break;
+ }
+ if (mem_sect == DBLL_CODE)
+ mem_phase_bit++;
+
+ if (mem_phase_bit < MAXFLAGS)
+ segid = hnode->seg_id[mem_phase_bit];
+
+ /* Determine if there is a memory loading requirement */
+ if ((hnode->code_data_flag_mask >> mem_phase_bit) & 0x1)
+ mem_load_req = true;
+
+ }
+ mem_sect_type = (mem_sect == DBLL_CODE) ? DYNM_CODE : DYNM_DATA;
+
+ /* Find an appropriate segment based on mem_sect */
+ if (segid == NULLID) {
+ /* No memory requirements of preferences */
+ DBC_ASSERT(!mem_load_req);
+ goto func_cont;
+ }
+ if (segid <= MAXSEGID) {
+ DBC_ASSERT(segid < nldr_obj->dload_segs);
+ /* Attempt to allocate from segid first. */
+ rmm_addr_obj->segid = segid;
+ status =
+ rmm_alloc(rmm, segid, word_size, align, dsp_address, false);
+ if (status) {
+ dev_dbg(bridge, "%s: Unable allocate from segment %d\n",
+ __func__, segid);
+ }
+ } else {
+ /* segid > MAXSEGID ==> Internal or external memory */
+ DBC_ASSERT(segid == MEMINTERNALID || segid == MEMEXTERNALID);
+ /* Check for any internal or external memory segment,
+ * depending on segid. */
+ mem_sect_type |= segid == MEMINTERNALID ?
+ DYNM_INTERNAL : DYNM_EXTERNAL;
+ for (i = 0; i < nldr_obj->dload_segs; i++) {
+ if ((nldr_obj->seg_table[i] & mem_sect_type) !=
+ mem_sect_type)
+ continue;
+
+ status = rmm_alloc(rmm, i, word_size, align,
+ dsp_address, false);
+ if (!status) {
+ /* Save segid for freeing later */
+ rmm_addr_obj->segid = i;
+ break;
+ }
+ }
+ }
+func_cont:
+ /* Haven't found memory yet, attempt to find any segment that works */
+ if (status == -ENOMEM && !mem_load_req) {
+ dev_dbg(bridge, "%s: Preferred segment unavailable, trying "
+ "another\n", __func__);
+ for (i = 0; i < nldr_obj->dload_segs; i++) {
+ /* All bits of mem_sect_type must be set */
+ if ((nldr_obj->seg_table[i] & mem_sect_type) !=
+ mem_sect_type)
+ continue;
+
+ status = rmm_alloc(rmm, i, word_size, align,
+ dsp_address, false);
+ if (!status) {
+ /* Save segid */
+ rmm_addr_obj->segid = i;
+ break;
+ }
+ }
+ }
+
+ return status;
+}
+
+static int remote_free(void **ref, u16 space, u32 dsp_address,
+ u32 size, bool reserve)
+{
+ struct nldr_object *nldr_obj = (struct nldr_object *)ref;
+ struct rmm_target_obj *rmm;
+ u32 word_size;
+ int status = -ENOMEM; /* Set to fail */
+
+ DBC_REQUIRE(nldr_obj);
+
+ rmm = nldr_obj->rmm;
+
+ /* Convert size to DSP words */
+ word_size =
+ (size + nldr_obj->us_dsp_word_size -
+ 1) / nldr_obj->us_dsp_word_size;
+
+ if (rmm_free(rmm, space, dsp_address, word_size, reserve))
+ status = 0;
+
+ return status;
+}
+
+/*
+ * ======== unload_lib ========
+ */
+static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
+ struct lib_node *root)
+{
+ struct dbll_attrs new_attrs;
+ struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
+ u16 i;
+
+ DBC_ASSERT(root != NULL);
+
+ /* Unload dependent libraries */
+ for (i = 0; i < root->dep_libs; i++)
+ unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
+
+ root->dep_libs = 0;
+
+ new_attrs = nldr_obj->ldr_attrs;
+ new_attrs.rmm_handle = nldr_obj->rmm;
+ new_attrs.input_params = nldr_node_obj->priv_ref;
+ new_attrs.base_image = false;
+ new_attrs.sym_arg = root;
+
+ if (root->lib) {
+ /* Unload the root library */
+ nldr_obj->ldr_fxns.unload_fxn(root->lib, &new_attrs);
+ nldr_obj->ldr_fxns.close_fxn(root->lib);
+ }
+
+ /* Free dependent library list */
+ kfree(root->dep_libs_tree);
+ root->dep_libs_tree = NULL;
+}
+
+/*
+ * ======== unload_ovly ========
+ */
+static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
+ enum nldr_phase phase)
+{
+ struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
+ struct ovly_node *po_node = NULL;
+ struct ovly_sect *phase_sects = NULL;
+ struct ovly_sect *other_sects_list = NULL;
+ u16 i;
+ u16 alloc_num = 0;
+ u16 other_alloc = 0;
+ u16 *ref_count = NULL;
+ u16 *other_ref = NULL;
+
+ /* Find the node in the table */
+ for (i = 0; i < nldr_obj->ovly_nodes; i++) {
+ if (is_equal_uuid
+ (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) {
+ /* Found it */
+ po_node = &(nldr_obj->ovly_table[i]);
+ break;
+ }
+ }
+
+ DBC_ASSERT(i < nldr_obj->ovly_nodes);
+
+ if (!po_node)
+ /* TODO: Should we print warning here? */
+ return;
+
+ switch (phase) {
+ case NLDR_CREATE:
+ ref_count = &(po_node->create_ref);
+ phase_sects = po_node->create_sects_list;
+ alloc_num = po_node->create_sects;
+ break;
+ case NLDR_EXECUTE:
+ ref_count = &(po_node->execute_ref);
+ phase_sects = po_node->execute_sects_list;
+ alloc_num = po_node->execute_sects;
+ break;
+ case NLDR_DELETE:
+ ref_count = &(po_node->delete_ref);
+ other_ref = &(po_node->other_ref);
+ phase_sects = po_node->delete_sects_list;
+ /* 'Other' overlay sections are unloaded in the delete phase */
+ other_sects_list = po_node->other_sects_list;
+ alloc_num = po_node->delete_sects;
+ other_alloc = po_node->other_sects;
+ break;
+ default:
+ DBC_ASSERT(false);
+ break;
+ }
+ DBC_ASSERT(ref_count && (*ref_count > 0));
+ if (ref_count && (*ref_count > 0)) {
+ *ref_count -= 1;
+ if (other_ref) {
+ DBC_ASSERT(*other_ref > 0);
+ *other_ref -= 1;
+ }
+ }
+
+ if (ref_count && *ref_count == 0) {
+ /* 'Deallocate' memory */
+ free_sects(nldr_obj, phase_sects, alloc_num);
+ }
+ if (other_ref && *other_ref == 0)
+ free_sects(nldr_obj, other_sects_list, other_alloc);
+}
+
+/*
+ * ======== find_in_persistent_lib_array ========
+ */
+static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
+ struct dbll_library_obj *lib)
+{
+ s32 i = 0;
+
+ for (i = 0; i < nldr_node_obj->pers_libs; i++) {
+ if (lib == nldr_node_obj->pers_lib_table[i].lib)
+ return true;
+
+ }
+
+ return false;
+}
+
+/*
+ * ================ Find LCM (Least Common Multiplier ===
+ */
+static u32 find_lcm(u32 a, u32 b)
+{
+ u32 ret;
+
+ ret = a * b / gcd(a, b);
+
+ return ret;
+}
+
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+/**
+ * nldr_find_addr() - Find the closest symbol to the given address based on
+ * dynamic node object.
+ *
+ * @nldr_node: Dynamic node object
+ * @sym_addr: Given address to find the dsp symbol
+ * @offset_range: offset range to look for dsp symbol
+ * @offset_output: Symbol Output address
+ * @sym_name: String with the dsp symbol
+ *
+ * This function finds the node library for a given address and
+ * retrieves the dsp symbol by calling dbll_find_dsp_symbol.
+ */
+int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
+ u32 offset_range, void *offset_output, char *sym_name)
+{
+ int status = 0;
+ bool status1 = false;
+ s32 i = 0;
+ struct lib_node root = { NULL, 0, NULL };
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(offset_output != NULL);
+ DBC_REQUIRE(sym_name != NULL);
+ pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node,
+ sym_addr, offset_range, (u32) offset_output, sym_name);
+
+ if (nldr_node->dynamic && *nldr_node->pf_phase_split) {
+ switch (nldr_node->phase) {
+ case NLDR_CREATE:
+ root = nldr_node->create_lib;
+ break;
+ case NLDR_EXECUTE:
+ root = nldr_node->execute_lib;
+ break;
+ case NLDR_DELETE:
+ root = nldr_node->delete_lib;
+ break;
+ default:
+ DBC_ASSERT(false);
+ break;
+ }
+ } else {
+ /* for Overlay nodes or non-split Dynamic nodes */
+ root = nldr_node->root;
+ }
+
+ status1 = dbll_find_dsp_symbol(root.lib, sym_addr,
+ offset_range, offset_output, sym_name);
+
+ /* If symbol not found, check dependent libraries */
+ if (!status1)
+ for (i = 0; i < root.dep_libs; i++) {
+ status1 = dbll_find_dsp_symbol(
+ root.dep_libs_tree[i].lib, sym_addr,
+ offset_range, offset_output, sym_name);
+ if (status1)
+ /* Symbol found */
+ break;
+ }
+ /* Check persistent libraries */
+ if (!status1)
+ for (i = 0; i < nldr_node->pers_libs; i++) {
+ status1 = dbll_find_dsp_symbol(
+ nldr_node->pers_lib_table[i].lib, sym_addr,
+ offset_range, offset_output, sym_name);
+ if (status1)
+ /* Symbol found */
+ break;
+ }
+
+ if (!status1) {
+ pr_debug("%s: Address 0x%x not found in range %d.\n",
+ __func__, sym_addr, offset_range);
+ status = -ESPIPE;
+ }
+
+ return status;
+}
+#endif
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
new file mode 100644
index 00000000000..6e9441e2126
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -0,0 +1,3234 @@
+/*
+ * node.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge Node Manager.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/list.h>
+#include <dspbridge/memdefs.h>
+#include <dspbridge/proc.h>
+#include <dspbridge/strm.h>
+#include <dspbridge/sync.h>
+#include <dspbridge/ntfy.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/cmm.h>
+#include <dspbridge/cod.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/msg.h>
+
+/* ----------------------------------- Resource Manager */
+#include <dspbridge/dbdcd.h>
+#include <dspbridge/disp.h>
+#include <dspbridge/rms_sh.h>
+
+/* ----------------------------------- Link Driver */
+#include <dspbridge/dspdefs.h>
+#include <dspbridge/dspioctl.h>
+
+/* ----------------------------------- Others */
+#include <dspbridge/gb.h>
+#include <dspbridge/uuidutil.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/nodepriv.h>
+#include <dspbridge/node.h>
+#include <dspbridge/dmm.h>
+
+/* Static/Dynamic Loader includes */
+#include <dspbridge/dbll.h>
+#include <dspbridge/nldr.h>
+
+#include <dspbridge/drv.h>
+#include <dspbridge/drvdefs.h>
+#include <dspbridge/resourcecleanup.h>
+#include <_tiomap.h>
+
+#include <dspbridge/dspdeh.h>
+
+#define HOSTPREFIX "/host"
+#define PIPEPREFIX "/dbpipe"
+
+#define MAX_INPUTS(h) \
+ ((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
+#define MAX_OUTPUTS(h) \
+ ((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
+
+#define NODE_GET_PRIORITY(h) ((h)->prio)
+#define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
+#define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
+
+#define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */
+#define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
+
+#define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
+#define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
+
+#define MAXDEVNAMELEN 32 /* dsp_ndbprops.ac_name size */
+#define CREATEPHASE 1
+#define EXECUTEPHASE 2
+#define DELETEPHASE 3
+
+/* Define default STRM parameters */
+/*
+ * TBD: Put in header file, make global DSP_STRMATTRS with defaults,
+ * or make defaults configurable.
+ */
+#define DEFAULTBUFSIZE 32
+#define DEFAULTNBUFS 2
+#define DEFAULTSEGID 0
+#define DEFAULTALIGNMENT 0
+#define DEFAULTTIMEOUT 10000
+
+#define RMSQUERYSERVER 0
+#define RMSCONFIGURESERVER 1
+#define RMSCREATENODE 2
+#define RMSEXECUTENODE 3
+#define RMSDELETENODE 4
+#define RMSCHANGENODEPRIORITY 5
+#define RMSREADMEMORY 6
+#define RMSWRITEMEMORY 7
+#define RMSCOPY 8
+#define MAXTIMEOUT 2000
+
+#define NUMRMSFXNS 9
+
+#define PWR_TIMEOUT 500 /* default PWR timeout in msec */
+
+#define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Addr */
+
+/*
+ * ======== node_mgr ========
+ */
+struct node_mgr {
+ struct dev_object *hdev_obj; /* Device object */
+ /* Function interface to Bridge driver */
+ struct bridge_drv_interface *intf_fxns;
+ struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
+ struct disp_object *disp_obj; /* Node dispatcher */
+ struct lst_list *node_list; /* List of all allocated nodes */
+ u32 num_nodes; /* Number of nodes in node_list */
+ u32 num_created; /* Number of nodes *created* on DSP */
+ struct gb_t_map *pipe_map; /* Pipe connection bit map */
+ struct gb_t_map *pipe_done_map; /* Pipes that are half free */
+ struct gb_t_map *chnl_map; /* Channel allocation bit map */
+ struct gb_t_map *dma_chnl_map; /* DMA Channel allocation bit map */
+ struct gb_t_map *zc_chnl_map; /* Zero-Copy Channel alloc bit map */
+ struct ntfy_object *ntfy_obj; /* Manages registered notifications */
+ struct mutex node_mgr_lock; /* For critical sections */
+ u32 ul_fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */
+ struct msg_mgr *msg_mgr_obj;
+
+ /* Processor properties needed by Node Dispatcher */
+ u32 ul_num_chnls; /* Total number of channels */
+ u32 ul_chnl_offset; /* Offset of chnl ids rsvd for RMS */
+ u32 ul_chnl_buf_size; /* Buffer size for data to RMS */
+ int proc_family; /* eg, 5000 */
+ int proc_type; /* eg, 5510 */
+ u32 udsp_word_size; /* Size of DSP word on host bytes */
+ u32 udsp_data_mau_size; /* Size of DSP data MAU */
+ u32 udsp_mau_size; /* Size of MAU */
+ s32 min_pri; /* Minimum runtime priority for node */
+ s32 max_pri; /* Maximum runtime priority for node */
+
+ struct strm_mgr *strm_mgr_obj; /* STRM manager */
+
+ /* Loader properties */
+ struct nldr_object *nldr_obj; /* Handle to loader */
+ struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
+ bool loader_init; /* Loader Init function succeeded? */
+};
+
+/*
+ * ======== connecttype ========
+ */
+enum connecttype {
+ NOTCONNECTED = 0,
+ NODECONNECT,
+ HOSTCONNECT,
+ DEVICECONNECT,
+};
+
+/*
+ * ======== stream_chnl ========
+ */
+struct stream_chnl {
+ enum connecttype type; /* Type of stream connection */
+ u32 dev_id; /* pipe or channel id */
+};
+
+/*
+ * ======== node_object ========
+ */
+struct node_object {
+ struct list_head list_elem;
+ struct node_mgr *hnode_mgr; /* The manager of this node */
+ struct proc_object *hprocessor; /* Back pointer to processor */
+ struct dsp_uuid node_uuid; /* Node's ID */
+ s32 prio; /* Node's current priority */
+ u32 utimeout; /* Timeout for blocking NODE calls */
+ u32 heap_size; /* Heap Size */
+ u32 udsp_heap_virt_addr; /* Heap Size */
+ u32 ugpp_heap_virt_addr; /* Heap Size */
+ enum node_type ntype; /* Type of node: message, task, etc */
+ enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */
+ u32 num_inputs; /* Current number of inputs */
+ u32 num_outputs; /* Current number of outputs */
+ u32 max_input_index; /* Current max input stream index */
+ u32 max_output_index; /* Current max output stream index */
+ struct stream_chnl *inputs; /* Node's input streams */
+ struct stream_chnl *outputs; /* Node's output streams */
+ struct node_createargs create_args; /* Args for node create func */
+ nodeenv node_env; /* Environment returned by RMS */
+ struct dcd_genericobj dcd_props; /* Node properties from DCD */
+ struct dsp_cbdata *pargs; /* Optional args to pass to node */
+ struct ntfy_object *ntfy_obj; /* Manages registered notifications */
+ char *pstr_dev_name; /* device name, if device node */
+ struct sync_object *sync_done; /* Synchronize node_terminate */
+ s32 exit_status; /* execute function return status */
+
+ /* Information needed for node_get_attr() */
+ void *device_owner; /* If dev node, task that owns it */
+ u32 num_gpp_inputs; /* Current # of from GPP streams */
+ u32 num_gpp_outputs; /* Current # of to GPP streams */
+ /* Current stream connections */
+ struct dsp_streamconnect *stream_connect;
+
+ /* Message queue */
+ struct msg_queue *msg_queue_obj;
+
+ /* These fields used for SM messaging */
+ struct cmm_xlatorobject *xlator; /* Node's SM addr translator */
+
+ /* Handle to pass to dynamic loader */
+ struct nldr_nodeobject *nldr_node_obj;
+ bool loaded; /* Code is (dynamically) loaded */
+ bool phase_split; /* Phases split in many libs or ovly */
+
+};
+
+/* Default buffer attributes */
+static struct dsp_bufferattr node_dfltbufattrs = {
+ 0, /* cb_struct */
+ 1, /* segment_id */
+ 0, /* buf_alignment */
+};
+
+static void delete_node(struct node_object *hnode,
+ struct process_context *pr_ctxt);
+static void delete_node_mgr(struct node_mgr *hnode_mgr);
+static void fill_stream_connect(struct node_object *node1,
+ struct node_object *node2, u32 stream1,
+ u32 stream2);
+static void fill_stream_def(struct node_object *hnode,
+ struct node_strmdef *pstrm_def,
+ struct dsp_strmattr *pattrs);
+static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
+static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
+ u32 phase);
+static int get_node_props(struct dcd_manager *hdcd_mgr,
+ struct node_object *hnode,
+ const struct dsp_uuid *node_uuid,
+ struct dcd_genericobj *dcd_prop);
+static int get_proc_props(struct node_mgr *hnode_mgr,
+ struct dev_object *hdev_obj);
+static int get_rms_fxns(struct node_mgr *hnode_mgr);
+static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
+ u32 ul_num_bytes, u32 mem_space);
+static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
+ u32 ul_num_bytes, u32 mem_space);
+
+static u32 refs; /* module reference count */
+
+/* Dynamic loader functions. */
+static struct node_ldr_fxns nldr_fxns = {
+ nldr_allocate,
+ nldr_create,
+ nldr_delete,
+ nldr_exit,
+ nldr_get_fxn_addr,
+ nldr_init,
+ nldr_load,
+ nldr_unload,
+};
+
+enum node_state node_get_state(void *hnode)
+{
+ struct node_object *pnode = (struct node_object *)hnode;
+ if (!pnode)
+ return -1;
+ else
+ return pnode->node_state;
+}
+
+/*
+ * ======== node_allocate ========
+ * Purpose:
+ * Allocate GPP resources to manage a node on the DSP.
+ */
+int node_allocate(struct proc_object *hprocessor,
+ const struct dsp_uuid *node_uuid,
+ const struct dsp_cbdata *pargs,
+ const struct dsp_nodeattrin *attr_in,
+ struct node_res_object **noderes,
+ struct process_context *pr_ctxt)
+{
+ struct node_mgr *hnode_mgr;
+ struct dev_object *hdev_obj;
+ struct node_object *pnode = NULL;
+ enum node_type node_type = NODE_TASK;
+ struct node_msgargs *pmsg_args;
+ struct node_taskargs *ptask_args;
+ u32 num_streams;
+ struct bridge_drv_interface *intf_fxns;
+ int status = 0;
+ struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
+ u32 proc_id;
+ u32 pul_value;
+ u32 dynext_base;
+ u32 off_set = 0;
+ u32 ul_stack_seg_addr, ul_stack_seg_val;
+ u32 ul_gpp_mem_base;
+ struct cfg_hostres *host_res;
+ struct bridge_dev_context *pbridge_context;
+ u32 mapped_addr = 0;
+ u32 map_attrs = 0x0;
+ struct dsp_processorstate proc_state;
+#ifdef DSP_DMM_DEBUG
+ struct dmm_object *dmm_mgr;
+ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+#endif
+
+ void *node_res;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hprocessor != NULL);
+ DBC_REQUIRE(noderes != NULL);
+ DBC_REQUIRE(node_uuid != NULL);
+
+ *noderes = NULL;
+
+ status = proc_get_processor_id(hprocessor, &proc_id);
+
+ if (proc_id != DSP_UNIT)
+ goto func_end;
+
+ status = proc_get_dev_object(hprocessor, &hdev_obj);
+ if (!status) {
+ status = dev_get_node_manager(hdev_obj, &hnode_mgr);
+ if (hnode_mgr == NULL)
+ status = -EPERM;
+
+ }
+
+ if (status)
+ goto func_end;
+
+ status = dev_get_bridge_context(hdev_obj, &pbridge_context);
+ if (!pbridge_context) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ status = proc_get_state(hprocessor, &proc_state,
+ sizeof(struct dsp_processorstate));
+ if (status)
+ goto func_end;
+ /* If processor is in error state then don't attempt
+ to send the message */
+ if (proc_state.proc_state == PROC_ERROR) {
+ status = -EPERM;
+ goto func_end;
+ }
+
+ /* Assuming that 0 is not a valid function address */
+ if (hnode_mgr->ul_fxn_addrs[0] == 0) {
+ /* No RMS on target - we currently can't handle this */
+ pr_err("%s: Failed, no RMS in base image\n", __func__);
+ status = -EPERM;
+ } else {
+ /* Validate attr_in fields, if non-NULL */
+ if (attr_in) {
+ /* Check if attr_in->prio is within range */
+ if (attr_in->prio < hnode_mgr->min_pri ||
+ attr_in->prio > hnode_mgr->max_pri)
+ status = -EDOM;
+ }
+ }
+ /* Allocate node object and fill in */
+ if (status)
+ goto func_end;
+
+ pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
+ if (pnode == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ pnode->hnode_mgr = hnode_mgr;
+ /* This critical section protects get_node_props */
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+
+ /* Get dsp_ndbprops from node database */
+ status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid,
+ &(pnode->dcd_props));
+ if (status)
+ goto func_cont;
+
+ pnode->node_uuid = *node_uuid;
+ pnode->hprocessor = hprocessor;
+ pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
+ pnode->utimeout = pnode->dcd_props.obj_data.node_obj.ndb_props.utimeout;
+ pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
+
+ /* Currently only C64 DSP builds support Node Dynamic * heaps */
+ /* Allocate memory for node heap */
+ pnode->create_args.asa.task_arg_obj.heap_size = 0;
+ pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 0;
+ pnode->create_args.asa.task_arg_obj.udsp_heap_res_addr = 0;
+ pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 0;
+ if (!attr_in)
+ goto func_cont;
+
+ /* Check if we have a user allocated node heap */
+ if (!(attr_in->pgpp_virt_addr))
+ goto func_cont;
+
+ /* check for page aligned Heap size */
+ if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
+ pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
+ __func__, attr_in->heap_size);
+ status = -EINVAL;
+ } else {
+ pnode->create_args.asa.task_arg_obj.heap_size =
+ attr_in->heap_size;
+ pnode->create_args.asa.task_arg_obj.ugpp_heap_addr =
+ (u32) attr_in->pgpp_virt_addr;
+ }
+ if (status)
+ goto func_cont;
+
+ status = proc_reserve_memory(hprocessor,
+ pnode->create_args.asa.task_arg_obj.
+ heap_size + PAGE_SIZE,
+ (void **)&(pnode->create_args.asa.
+ task_arg_obj.udsp_heap_res_addr),
+ pr_ctxt);
+ if (status) {
+ pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
+ __func__, status);
+ goto func_cont;
+ }
+#ifdef DSP_DMM_DEBUG
+ status = dmm_get_handle(p_proc_object, &dmm_mgr);
+ if (!dmm_mgr) {
+ status = DSP_EHANDLE;
+ goto func_cont;
+ }
+
+ dmm_mem_map_dump(dmm_mgr);
+#endif
+
+ map_attrs |= DSP_MAPLITTLEENDIAN;
+ map_attrs |= DSP_MAPELEMSIZE32;
+ map_attrs |= DSP_MAPVIRTUALADDR;
+ status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
+ pnode->create_args.asa.task_arg_obj.heap_size,
+ (void *)pnode->create_args.asa.task_arg_obj.
+ udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
+ pr_ctxt);
+ if (status)
+ pr_err("%s: Failed to map memory for Heap: 0x%x\n",
+ __func__, status);
+ else
+ pnode->create_args.asa.task_arg_obj.udsp_heap_addr =
+ (u32) mapped_addr;
+
+func_cont:
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+ if (attr_in != NULL) {
+ /* Overrides of NBD properties */
+ pnode->utimeout = attr_in->utimeout;
+ pnode->prio = attr_in->prio;
+ }
+ /* Create object to manage notifications */
+ if (!status) {
+ pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
+ GFP_KERNEL);
+ if (pnode->ntfy_obj)
+ ntfy_init(pnode->ntfy_obj);
+ else
+ status = -ENOMEM;
+ }
+
+ if (!status) {
+ node_type = node_get_type(pnode);
+ /* Allocate dsp_streamconnect array for device, task, and
+ * dais socket nodes. */
+ if (node_type != NODE_MESSAGE) {
+ num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
+ pnode->stream_connect = kzalloc(num_streams *
+ sizeof(struct dsp_streamconnect),
+ GFP_KERNEL);
+ if (num_streams > 0 && pnode->stream_connect == NULL)
+ status = -ENOMEM;
+
+ }
+ if (!status && (node_type == NODE_TASK ||
+ node_type == NODE_DAISSOCKET)) {
+ /* Allocate arrays for maintainig stream connections */
+ pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
+ sizeof(struct stream_chnl), GFP_KERNEL);
+ pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
+ sizeof(struct stream_chnl), GFP_KERNEL);
+ ptask_args = &(pnode->create_args.asa.task_arg_obj);
+ ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
+ sizeof(struct node_strmdef),
+ GFP_KERNEL);
+ ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
+ sizeof(struct node_strmdef),
+ GFP_KERNEL);
+ if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
+ ptask_args->strm_in_def
+ == NULL))
+ || (MAX_OUTPUTS(pnode) > 0
+ && (pnode->outputs == NULL
+ || ptask_args->strm_out_def == NULL)))
+ status = -ENOMEM;
+ }
+ }
+ if (!status && (node_type != NODE_DEVICE)) {
+ /* Create an event that will be posted when RMS_EXIT is
+ * received. */
+ pnode->sync_done = kzalloc(sizeof(struct sync_object),
+ GFP_KERNEL);
+ if (pnode->sync_done)
+ sync_init_event(pnode->sync_done);
+ else
+ status = -ENOMEM;
+
+ if (!status) {
+ /*Get the shared mem mgr for this nodes dev object */
+ status = cmm_get_handle(hprocessor, &hcmm_mgr);
+ if (!status) {
+ /* Allocate a SM addr translator for this node
+ * w/ deflt attr */
+ status = cmm_xlator_create(&pnode->xlator,
+ hcmm_mgr, NULL);
+ }
+ }
+ if (!status) {
+ /* Fill in message args */
+ if ((pargs != NULL) && (pargs->cb_data > 0)) {
+ pmsg_args =
+ &(pnode->create_args.asa.node_msg_args);
+ pmsg_args->pdata = kzalloc(pargs->cb_data,
+ GFP_KERNEL);
+ if (pmsg_args->pdata == NULL) {
+ status = -ENOMEM;
+ } else {
+ pmsg_args->arg_length = pargs->cb_data;
+ memcpy(pmsg_args->pdata,
+ pargs->node_data,
+ pargs->cb_data);
+ }
+ }
+ }
+ }
+
+ if (!status && node_type != NODE_DEVICE) {
+ /* Create a message queue for this node */
+ intf_fxns = hnode_mgr->intf_fxns;
+ status =
+ (*intf_fxns->pfn_msg_create_queue) (hnode_mgr->msg_mgr_obj,
+ &pnode->msg_queue_obj,
+ 0,
+ pnode->create_args.asa.
+ node_msg_args.max_msgs,
+ pnode);
+ }
+
+ if (!status) {
+ /* Create object for dynamic loading */
+
+ status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj,
+ (void *)pnode,
+ &pnode->dcd_props.
+ obj_data.node_obj,
+ &pnode->
+ nldr_node_obj,
+ &pnode->phase_split);
+ }
+
+ /* Compare value read from Node Properties and check if it is same as
+ * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
+ * GPP Address, Read the value in that address and override the
+ * stack_seg value in task args */
+ if (!status &&
+ (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
+ stack_seg_name != NULL) {
+ if (strcmp((char *)
+ pnode->dcd_props.obj_data.node_obj.ndb_props.
+ stack_seg_name, STACKSEGLABEL) == 0) {
+ status =
+ hnode_mgr->nldr_fxns.
+ pfn_get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
+ &dynext_base);
+ if (status)
+ pr_err("%s: Failed to get addr for DYNEXT_BEG"
+ " status = 0x%x\n", __func__, status);
+
+ status =
+ hnode_mgr->nldr_fxns.
+ pfn_get_fxn_addr(pnode->nldr_node_obj,
+ "L1DSRAM_HEAP", &pul_value);
+
+ if (status)
+ pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
+ " status = 0x%x\n", __func__, status);
+
+ host_res = pbridge_context->resources;
+ if (!host_res)
+ status = -EPERM;
+
+ if (status) {
+ pr_err("%s: Failed to get host resource, status"
+ " = 0x%x\n", __func__, status);
+ goto func_end;
+ }
+
+ ul_gpp_mem_base = (u32) host_res->dw_mem_base[1];
+ off_set = pul_value - dynext_base;
+ ul_stack_seg_addr = ul_gpp_mem_base + off_set;
+ ul_stack_seg_val = readl(ul_stack_seg_addr);
+
+ dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
+ " 0x%x\n", __func__, ul_stack_seg_val,
+ ul_stack_seg_addr);
+
+ pnode->create_args.asa.task_arg_obj.stack_seg =
+ ul_stack_seg_val;
+
+ }
+ }
+
+ if (!status) {
+ /* Add the node to the node manager's list of allocated
+ * nodes. */
+ lst_init_elem((struct list_head *)pnode);
+ NODE_SET_STATE(pnode, NODE_ALLOCATED);
+
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+
+ lst_put_tail(hnode_mgr->node_list, (struct list_head *) pnode);
+ ++(hnode_mgr->num_nodes);
+
+ /* Exit critical section */
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+
+ /* Preset this to assume phases are split
+ * (for overlay and dll) */
+ pnode->phase_split = true;
+
+ /* Notify all clients registered for DSP_NODESTATECHANGE. */
+ proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
+ } else {
+ /* Cleanup */
+ if (pnode)
+ delete_node(pnode, pr_ctxt);
+
+ }
+
+ if (!status) {
+ status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
+ if (status) {
+ delete_node(pnode, pr_ctxt);
+ goto func_end;
+ }
+
+ *noderes = (struct node_res_object *)node_res;
+ drv_proc_node_update_heap_status(node_res, true);
+ drv_proc_node_update_status(node_res, true);
+ }
+ DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
+func_end:
+ dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
+ "node_res: %p status: 0x%x\n", __func__, hprocessor,
+ node_uuid, pargs, attr_in, noderes, status);
+ return status;
+}
+
+/*
+ * ======== node_alloc_msg_buf ========
+ * Purpose:
+ * Allocates buffer for zero copy messaging.
+ */
+DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
+ struct dsp_bufferattr *pattr,
+ u8 **pbuffer)
+{
+ struct node_object *pnode = (struct node_object *)hnode;
+ int status = 0;
+ bool va_flag = false;
+ bool set_info;
+ u32 proc_id;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(pbuffer != NULL);
+
+ DBC_REQUIRE(usize > 0);
+
+ if (!pnode)
+ status = -EFAULT;
+ else if (node_get_type(pnode) == NODE_DEVICE)
+ status = -EPERM;
+
+ if (status)
+ goto func_end;
+
+ if (pattr == NULL)
+ pattr = &node_dfltbufattrs; /* set defaults */
+
+ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
+ if (proc_id != DSP_UNIT) {
+ DBC_ASSERT(NULL);
+ goto func_end;
+ }
+ /* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
+ * virt address, so set this info in this node's translator
+ * object for future ref. If MEM_GETVIRTUALSEGID then retrieve
+ * virtual address from node's translator. */
+ if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
+ (pattr->segment_id & MEM_GETVIRTUALSEGID)) {
+ va_flag = true;
+ set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
+ true : false;
+ /* Clear mask bits */
+ pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
+ /* Set/get this node's translators virtual address base/size */
+ status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
+ pattr->segment_id, set_info);
+ }
+ if (!status && (!va_flag)) {
+ if (pattr->segment_id != 1) {
+ /* Node supports single SM segment only. */
+ status = -EBADR;
+ }
+ /* Arbitrary SM buffer alignment not supported for host side
+ * allocs, but guaranteed for the following alignment
+ * values. */
+ switch (pattr->buf_alignment) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ break;
+ default:
+ /* alignment value not suportted */
+ status = -EPERM;
+ break;
+ }
+ if (!status) {
+ /* allocate physical buffer from seg_id in node's
+ * translator */
+ (void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
+ usize);
+ if (*pbuffer == NULL) {
+ pr_err("%s: error - Out of shared memory\n",
+ __func__);
+ status = -ENOMEM;
+ }
+ }
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== node_change_priority ========
+ * Purpose:
+ * Change the priority of a node in the allocated state, or that is
+ * currently running or paused on the target.
+ */
+int node_change_priority(struct node_object *hnode, s32 prio)
+{
+ struct node_object *pnode = (struct node_object *)hnode;
+ struct node_mgr *hnode_mgr = NULL;
+ enum node_type node_type;
+ enum node_state state;
+ int status = 0;
+ u32 proc_id;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (!hnode || !hnode->hnode_mgr) {
+ status = -EFAULT;
+ } else {
+ hnode_mgr = hnode->hnode_mgr;
+ node_type = node_get_type(hnode);
+ if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
+ status = -EPERM;
+ else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
+ status = -EDOM;
+ }
+ if (status)
+ goto func_end;
+
+ /* Enter critical section */
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+
+ state = node_get_state(hnode);
+ if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
+ NODE_SET_PRIORITY(hnode, prio);
+ } else {
+ if (state != NODE_RUNNING) {
+ status = -EBADR;
+ goto func_cont;
+ }
+ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
+ if (proc_id == DSP_UNIT) {
+ status =
+ disp_node_change_priority(hnode_mgr->disp_obj,
+ hnode,
+ hnode_mgr->ul_fxn_addrs
+ [RMSCHANGENODEPRIORITY],
+ hnode->node_env, prio);
+ }
+ if (status >= 0)
+ NODE_SET_PRIORITY(hnode, prio);
+
+ }
+func_cont:
+ /* Leave critical section */
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+func_end:
+ return status;
+}
+
+/*
+ * ======== node_connect ========
+ * Purpose:
+ * Connect two nodes on the DSP, or a node on the DSP to the GPP.
+ */
+int node_connect(struct node_object *node1, u32 stream1,
+ struct node_object *node2,
+ u32 stream2, struct dsp_strmattr *pattrs,
+ struct dsp_cbdata *conn_param)
+{
+ struct node_mgr *hnode_mgr;
+ char *pstr_dev_name = NULL;
+ enum node_type node1_type = NODE_TASK;
+ enum node_type node2_type = NODE_TASK;
+ struct node_strmdef *pstrm_def;
+ struct node_strmdef *input = NULL;
+ struct node_strmdef *output = NULL;
+ struct node_object *dev_node_obj;
+ struct node_object *hnode;
+ struct stream_chnl *pstream;
+ u32 pipe_id = GB_NOBITS;
+ u32 chnl_id = GB_NOBITS;
+ s8 chnl_mode;
+ u32 dw_length;
+ int status = 0;
+ DBC_REQUIRE(refs > 0);
+
+ if ((node1 != (struct node_object *)DSP_HGPPNODE && !node1) ||
+ (node2 != (struct node_object *)DSP_HGPPNODE && !node2))
+ status = -EFAULT;
+
+ if (!status) {
+ /* The two nodes must be on the same processor */
+ if (node1 != (struct node_object *)DSP_HGPPNODE &&
+ node2 != (struct node_object *)DSP_HGPPNODE &&
+ node1->hnode_mgr != node2->hnode_mgr)
+ status = -EPERM;
+ /* Cannot connect a node to itself */
+ if (node1 == node2)
+ status = -EPERM;
+
+ }
+ if (!status) {
+ /* node_get_type() will return NODE_GPP if hnode =
+ * DSP_HGPPNODE. */
+ node1_type = node_get_type(node1);
+ node2_type = node_get_type(node2);
+ /* Check stream indices ranges */
+ if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
+ stream1 >= MAX_OUTPUTS(node1)) || (node2_type != NODE_GPP
+ && node2_type !=
+ NODE_DEVICE
+ && stream2 >=
+ MAX_INPUTS(node2)))
+ status = -EINVAL;
+ }
+ if (!status) {
+ /*
+ * Only the following types of connections are allowed:
+ * task/dais socket < == > task/dais socket
+ * task/dais socket < == > device
+ * task/dais socket < == > GPP
+ *
+ * ie, no message nodes, and at least one task or dais
+ * socket node.
+ */
+ if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
+ (node1_type != NODE_TASK && node1_type != NODE_DAISSOCKET &&
+ node2_type != NODE_TASK && node2_type != NODE_DAISSOCKET))
+ status = -EPERM;
+ }
+ /*
+ * Check stream mode. Default is STRMMODE_PROCCOPY.
+ */
+ if (!status && pattrs) {
+ if (pattrs->strm_mode != STRMMODE_PROCCOPY)
+ status = -EPERM; /* illegal stream mode */
+
+ }
+ if (status)
+ goto func_end;
+
+ if (node1_type != NODE_GPP) {
+ hnode_mgr = node1->hnode_mgr;
+ } else {
+ DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
+ hnode_mgr = node2->hnode_mgr;
+ }
+ /* Enter critical section */
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+
+ /* Nodes must be in the allocated state */
+ if (node1_type != NODE_GPP && node_get_state(node1) != NODE_ALLOCATED)
+ status = -EBADR;
+
+ if (node2_type != NODE_GPP && node_get_state(node2) != NODE_ALLOCATED)
+ status = -EBADR;
+
+ if (!status) {
+ /* Check that stream indices for task and dais socket nodes
+ * are not already be used. (Device nodes checked later) */
+ if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
+ output =
+ &(node1->create_args.asa.
+ task_arg_obj.strm_out_def[stream1]);
+ if (output->sz_device != NULL)
+ status = -EISCONN;
+
+ }
+ if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
+ input =
+ &(node2->create_args.asa.
+ task_arg_obj.strm_in_def[stream2]);
+ if (input->sz_device != NULL)
+ status = -EISCONN;
+
+ }
+ }
+ /* Connecting two task nodes? */
+ if (!status && ((node1_type == NODE_TASK ||
+ node1_type == NODE_DAISSOCKET)
+ && (node2_type == NODE_TASK
+ || node2_type == NODE_DAISSOCKET))) {
+ /* Find available pipe */
+ pipe_id = gb_findandset(hnode_mgr->pipe_map);
+ if (pipe_id == GB_NOBITS) {
+ status = -ECONNREFUSED;
+ } else {
+ node1->outputs[stream1].type = NODECONNECT;
+ node2->inputs[stream2].type = NODECONNECT;
+ node1->outputs[stream1].dev_id = pipe_id;
+ node2->inputs[stream2].dev_id = pipe_id;
+ output->sz_device = kzalloc(PIPENAMELEN + 1,
+ GFP_KERNEL);
+ input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
+ if (output->sz_device == NULL ||
+ input->sz_device == NULL) {
+ /* Undo the connection */
+ kfree(output->sz_device);
+
+ kfree(input->sz_device);
+
+ output->sz_device = NULL;
+ input->sz_device = NULL;
+ gb_clear(hnode_mgr->pipe_map, pipe_id);
+ status = -ENOMEM;
+ } else {
+ /* Copy "/dbpipe<pipId>" name to device names */
+ sprintf(output->sz_device, "%s%d",
+ PIPEPREFIX, pipe_id);
+ strcpy(input->sz_device, output->sz_device);
+ }
+ }
+ }
+ /* Connecting task node to host? */
+ if (!status && (node1_type == NODE_GPP ||
+ node2_type == NODE_GPP)) {
+ if (node1_type == NODE_GPP) {
+ chnl_mode = CHNL_MODETODSP;
+ } else {
+ DBC_ASSERT(node2_type == NODE_GPP);
+ chnl_mode = CHNL_MODEFROMDSP;
+ }
+ /* Reserve a channel id. We need to put the name "/host<id>"
+ * in the node's create_args, but the host
+ * side channel will not be opened until DSPStream_Open is
+ * called for this node. */
+ if (pattrs) {
+ if (pattrs->strm_mode == STRMMODE_RDMA) {
+ chnl_id =
+ gb_findandset(hnode_mgr->dma_chnl_map);
+ /* dma chans are 2nd transport chnl set
+ * ids(e.g. 16-31) */
+ (chnl_id != GB_NOBITS) ?
+ (chnl_id =
+ chnl_id +
+ hnode_mgr->ul_num_chnls) : chnl_id;
+ } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
+ chnl_id = gb_findandset(hnode_mgr->zc_chnl_map);
+ /* zero-copy chans are 3nd transport set
+ * (e.g. 32-47) */
+ (chnl_id != GB_NOBITS) ? (chnl_id = chnl_id +
+ (2 *
+ hnode_mgr->
+ ul_num_chnls))
+ : chnl_id;
+ } else { /* must be PROCCOPY */
+ DBC_ASSERT(pattrs->strm_mode ==
+ STRMMODE_PROCCOPY);
+ chnl_id = gb_findandset(hnode_mgr->chnl_map);
+ /* e.g. 0-15 */
+ }
+ } else {
+ /* default to PROCCOPY */
+ chnl_id = gb_findandset(hnode_mgr->chnl_map);
+ }
+ if (chnl_id == GB_NOBITS) {
+ status = -ECONNREFUSED;
+ goto func_cont2;
+ }
+ pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
+ if (pstr_dev_name != NULL)
+ goto func_cont2;
+
+ if (pattrs) {
+ if (pattrs->strm_mode == STRMMODE_RDMA) {
+ gb_clear(hnode_mgr->dma_chnl_map, chnl_id -
+ hnode_mgr->ul_num_chnls);
+ } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
+ gb_clear(hnode_mgr->zc_chnl_map, chnl_id -
+ (2 * hnode_mgr->ul_num_chnls));
+ } else {
+ DBC_ASSERT(pattrs->strm_mode ==
+ STRMMODE_PROCCOPY);
+ gb_clear(hnode_mgr->chnl_map, chnl_id);
+ }
+ } else {
+ gb_clear(hnode_mgr->chnl_map, chnl_id);
+ }
+ status = -ENOMEM;
+func_cont2:
+ if (!status) {
+ if (node1 == (struct node_object *)DSP_HGPPNODE) {
+ node2->inputs[stream2].type = HOSTCONNECT;
+ node2->inputs[stream2].dev_id = chnl_id;
+ input->sz_device = pstr_dev_name;
+ } else {
+ node1->outputs[stream1].type = HOSTCONNECT;
+ node1->outputs[stream1].dev_id = chnl_id;
+ output->sz_device = pstr_dev_name;
+ }
+ sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
+ }
+ }
+ /* Connecting task node to device node? */
+ if (!status && ((node1_type == NODE_DEVICE) ||
+ (node2_type == NODE_DEVICE))) {
+ if (node2_type == NODE_DEVICE) {
+ /* node1 == > device */
+ dev_node_obj = node2;
+ hnode = node1;
+ pstream = &(node1->outputs[stream1]);
+ pstrm_def = output;
+ } else {
+ /* device == > node2 */
+ dev_node_obj = node1;
+ hnode = node2;
+ pstream = &(node2->inputs[stream2]);
+ pstrm_def = input;
+ }
+ /* Set up create args */
+ pstream->type = DEVICECONNECT;
+ dw_length = strlen(dev_node_obj->pstr_dev_name);
+ if (conn_param != NULL) {
+ pstrm_def->sz_device = kzalloc(dw_length + 1 +
+ conn_param->cb_data,
+ GFP_KERNEL);
+ } else {
+ pstrm_def->sz_device = kzalloc(dw_length + 1,
+ GFP_KERNEL);
+ }
+ if (pstrm_def->sz_device == NULL) {
+ status = -ENOMEM;
+ } else {
+ /* Copy device name */
+ strncpy(pstrm_def->sz_device,
+ dev_node_obj->pstr_dev_name, dw_length);
+ if (conn_param != NULL) {
+ strncat(pstrm_def->sz_device,
+ (char *)conn_param->node_data,
+ (u32) conn_param->cb_data);
+ }
+ dev_node_obj->device_owner = hnode;
+ }
+ }
+ if (!status) {
+ /* Fill in create args */
+ if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
+ node1->create_args.asa.task_arg_obj.num_outputs++;
+ fill_stream_def(node1, output, pattrs);
+ }
+ if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
+ node2->create_args.asa.task_arg_obj.num_inputs++;
+ fill_stream_def(node2, input, pattrs);
+ }
+ /* Update node1 and node2 stream_connect */
+ if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
+ node1->num_outputs++;
+ if (stream1 > node1->max_output_index)
+ node1->max_output_index = stream1;
+
+ }
+ if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
+ node2->num_inputs++;
+ if (stream2 > node2->max_input_index)
+ node2->max_input_index = stream2;
+
+ }
+ fill_stream_connect(node1, node2, stream1, stream2);
+ }
+ /* end of sync_enter_cs */
+ /* Exit critical section */
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+func_end:
+ dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
+ "pattrs: %p status: 0x%x\n", __func__, node1,
+ stream1, node2, stream2, pattrs, status);
+ return status;
+}
+
+/*
+ * ======== node_create ========
+ * Purpose:
+ * Create a node on the DSP by remotely calling the node's create function.
+ */
+int node_create(struct node_object *hnode)
+{
+ struct node_object *pnode = (struct node_object *)hnode;
+ struct node_mgr *hnode_mgr;
+ struct bridge_drv_interface *intf_fxns;
+ u32 ul_create_fxn;
+ enum node_type node_type;
+ int status = 0;
+ int status1 = 0;
+ struct dsp_cbdata cb_data;
+ u32 proc_id = 255;
+ struct dsp_processorstate proc_state;
+ struct proc_object *hprocessor;
+#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
+ struct dspbridge_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+#endif
+
+ DBC_REQUIRE(refs > 0);
+ if (!pnode) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ hprocessor = hnode->hprocessor;
+ status = proc_get_state(hprocessor, &proc_state,
+ sizeof(struct dsp_processorstate));
+ if (status)
+ goto func_end;
+ /* If processor is in error state then don't attempt to create
+ new node */
+ if (proc_state.proc_state == PROC_ERROR) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /* create struct dsp_cbdata struct for PWR calls */
+ cb_data.cb_data = PWR_TIMEOUT;
+ node_type = node_get_type(hnode);
+ hnode_mgr = hnode->hnode_mgr;
+ intf_fxns = hnode_mgr->intf_fxns;
+ /* Get access to node dispatcher */
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+
+ /* Check node state */
+ if (node_get_state(hnode) != NODE_ALLOCATED)
+ status = -EBADR;
+
+ if (!status)
+ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
+
+ if (status)
+ goto func_cont2;
+
+ if (proc_id != DSP_UNIT)
+ goto func_cont2;
+
+ /* Make sure streams are properly connected */
+ if ((hnode->num_inputs && hnode->max_input_index >
+ hnode->num_inputs - 1) ||
+ (hnode->num_outputs && hnode->max_output_index >
+ hnode->num_outputs - 1))
+ status = -ENOTCONN;
+
+ if (!status) {
+ /* If node's create function is not loaded, load it */
+ /* Boost the OPP level to max level that DSP can be requested */
+#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
+ if (pdata->cpu_set_freq)
+ (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
+#endif
+ status = hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
+ NLDR_CREATE);
+ /* Get address of node's create function */
+ if (!status) {
+ hnode->loaded = true;
+ if (node_type != NODE_DEVICE) {
+ status = get_fxn_address(hnode, &ul_create_fxn,
+ CREATEPHASE);
+ }
+ } else {
+ pr_err("%s: failed to load create code: 0x%x\n",
+ __func__, status);
+ }
+ /* Request the lowest OPP level */
+#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
+ if (pdata->cpu_set_freq)
+ (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
+#endif
+ /* Get address of iAlg functions, if socket node */
+ if (!status) {
+ if (node_type == NODE_DAISSOCKET) {
+ status = hnode_mgr->nldr_fxns.pfn_get_fxn_addr
+ (hnode->nldr_node_obj,
+ hnode->dcd_props.obj_data.node_obj.
+ pstr_i_alg_name,
+ &hnode->create_args.asa.
+ task_arg_obj.ul_dais_arg);
+ }
+ }
+ }
+ if (!status) {
+ if (node_type != NODE_DEVICE) {
+ status = disp_node_create(hnode_mgr->disp_obj, hnode,
+ hnode_mgr->ul_fxn_addrs
+ [RMSCREATENODE],
+ ul_create_fxn,
+ &(hnode->create_args),
+ &(hnode->node_env));
+ if (status >= 0) {
+ /* Set the message queue id to the node env
+ * pointer */
+ intf_fxns = hnode_mgr->intf_fxns;
+ (*intf_fxns->pfn_msg_set_queue_id) (hnode->
+ msg_queue_obj,
+ hnode->node_env);
+ }
+ }
+ }
+ /* Phase II/Overlays: Create, execute, delete phases possibly in
+ * different files/sections. */
+ if (hnode->loaded && hnode->phase_split) {
+ /* If create code was dynamically loaded, we can now unload
+ * it. */
+ status1 = hnode_mgr->nldr_fxns.pfn_unload(hnode->nldr_node_obj,
+ NLDR_CREATE);
+ hnode->loaded = false;
+ }
+ if (status1)
+ pr_err("%s: Failed to unload create code: 0x%x\n",
+ __func__, status1);
+func_cont2:
+ /* Update node state and node manager state */
+ if (status >= 0) {
+ NODE_SET_STATE(hnode, NODE_CREATED);
+ hnode_mgr->num_created++;
+ goto func_cont;
+ }
+ if (status != -EBADR) {
+ /* Put back in NODE_ALLOCATED state if error occurred */
+ NODE_SET_STATE(hnode, NODE_ALLOCATED);
+ }
+func_cont:
+ /* Free access to node dispatcher */
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+func_end:
+ if (status >= 0) {
+ proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
+ ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
+ }
+
+ dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
+ hnode, status);
+ return status;
+}
+
+/*
+ * ======== node_create_mgr ========
+ * Purpose:
+ * Create a NODE Manager object.
+ */
+int node_create_mgr(struct node_mgr **node_man,
+ struct dev_object *hdev_obj)
+{
+ u32 i;
+ struct node_mgr *node_mgr_obj = NULL;
+ struct disp_attr disp_attr_obj;
+ char *sz_zl_file = "";
+ struct nldr_attrs nldr_attrs_obj;
+ int status = 0;
+ u8 dev_type;
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(node_man != NULL);
+ DBC_REQUIRE(hdev_obj != NULL);
+
+ *node_man = NULL;
+ /* Allocate Node manager object */
+ node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
+ if (node_mgr_obj) {
+ node_mgr_obj->hdev_obj = hdev_obj;
+ node_mgr_obj->node_list = kzalloc(sizeof(struct lst_list),
+ GFP_KERNEL);
+ node_mgr_obj->pipe_map = gb_create(MAXPIPES);
+ node_mgr_obj->pipe_done_map = gb_create(MAXPIPES);
+ if (node_mgr_obj->node_list == NULL
+ || node_mgr_obj->pipe_map == NULL
+ || node_mgr_obj->pipe_done_map == NULL) {
+ status = -ENOMEM;
+ } else {
+ INIT_LIST_HEAD(&node_mgr_obj->node_list->head);
+ node_mgr_obj->ntfy_obj = kmalloc(
+ sizeof(struct ntfy_object), GFP_KERNEL);
+ if (node_mgr_obj->ntfy_obj)
+ ntfy_init(node_mgr_obj->ntfy_obj);
+ else
+ status = -ENOMEM;
+ }
+ node_mgr_obj->num_created = 0;
+ } else {
+ status = -ENOMEM;
+ }
+ /* get devNodeType */
+ if (!status)
+ status = dev_get_dev_type(hdev_obj, &dev_type);
+
+ /* Create the DCD Manager */
+ if (!status) {
+ status =
+ dcd_create_manager(sz_zl_file, &node_mgr_obj->hdcd_mgr);
+ if (!status)
+ status = get_proc_props(node_mgr_obj, hdev_obj);
+
+ }
+ /* Create NODE Dispatcher */
+ if (!status) {
+ disp_attr_obj.ul_chnl_offset = node_mgr_obj->ul_chnl_offset;
+ disp_attr_obj.ul_chnl_buf_size = node_mgr_obj->ul_chnl_buf_size;
+ disp_attr_obj.proc_family = node_mgr_obj->proc_family;
+ disp_attr_obj.proc_type = node_mgr_obj->proc_type;
+ status =
+ disp_create(&node_mgr_obj->disp_obj, hdev_obj,
+ &disp_attr_obj);
+ }
+ /* Create a STRM Manager */
+ if (!status)
+ status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
+
+ if (!status) {
+ dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
+ /* Get msg_ctrl queue manager */
+ dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
+ mutex_init(&node_mgr_obj->node_mgr_lock);
+ node_mgr_obj->chnl_map = gb_create(node_mgr_obj->ul_num_chnls);
+ /* dma chnl map. ul_num_chnls is # per transport */
+ node_mgr_obj->dma_chnl_map =
+ gb_create(node_mgr_obj->ul_num_chnls);
+ node_mgr_obj->zc_chnl_map =
+ gb_create(node_mgr_obj->ul_num_chnls);
+ if ((node_mgr_obj->chnl_map == NULL)
+ || (node_mgr_obj->dma_chnl_map == NULL)
+ || (node_mgr_obj->zc_chnl_map == NULL)) {
+ status = -ENOMEM;
+ } else {
+ /* Block out reserved channels */
+ for (i = 0; i < node_mgr_obj->ul_chnl_offset; i++)
+ gb_set(node_mgr_obj->chnl_map, i);
+
+ /* Block out channels reserved for RMS */
+ gb_set(node_mgr_obj->chnl_map,
+ node_mgr_obj->ul_chnl_offset);
+ gb_set(node_mgr_obj->chnl_map,
+ node_mgr_obj->ul_chnl_offset + 1);
+ }
+ }
+ if (!status) {
+ /* NO RM Server on the IVA */
+ if (dev_type != IVA_UNIT) {
+ /* Get addresses of any RMS functions loaded */
+ status = get_rms_fxns(node_mgr_obj);
+ }
+ }
+
+ /* Get loader functions and create loader */
+ if (!status)
+ node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */
+
+ if (!status) {
+ nldr_attrs_obj.pfn_ovly = ovly;
+ nldr_attrs_obj.pfn_write = mem_write;
+ nldr_attrs_obj.us_dsp_word_size = node_mgr_obj->udsp_word_size;
+ nldr_attrs_obj.us_dsp_mau_size = node_mgr_obj->udsp_mau_size;
+ node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.pfn_init();
+ status =
+ node_mgr_obj->nldr_fxns.pfn_create(&node_mgr_obj->nldr_obj,
+ hdev_obj,
+ &nldr_attrs_obj);
+ }
+ if (!status)
+ *node_man = node_mgr_obj;
+ else
+ delete_node_mgr(node_mgr_obj);
+
+ DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man));
+
+ return status;
+}
+
+/*
+ * ======== node_delete ========
+ * Purpose:
+ * Delete a node on the DSP by remotely calling the node's delete function.
+ * Loads the node's delete function if necessary. Free GPP side resources
+ * after node's delete function returns.
+ */
+int node_delete(struct node_res_object *noderes,
+ struct process_context *pr_ctxt)
+{
+ struct node_object *pnode = noderes->hnode;
+ struct node_mgr *hnode_mgr;
+ struct proc_object *hprocessor;
+ struct disp_object *disp_obj;
+ u32 ul_delete_fxn;
+ enum node_type node_type;
+ enum node_state state;
+ int status = 0;
+ int status1 = 0;
+ struct dsp_cbdata cb_data;
+ u32 proc_id;
+ struct bridge_drv_interface *intf_fxns;
+
+ void *node_res = noderes;
+
+ struct dsp_processorstate proc_state;
+ DBC_REQUIRE(refs > 0);
+
+ if (!pnode) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ /* create struct dsp_cbdata struct for PWR call */
+ cb_data.cb_data = PWR_TIMEOUT;
+ hnode_mgr = pnode->hnode_mgr;
+ hprocessor = pnode->hprocessor;
+ disp_obj = hnode_mgr->disp_obj;
+ node_type = node_get_type(pnode);
+ intf_fxns = hnode_mgr->intf_fxns;
+ /* Enter critical section */
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+
+ state = node_get_state(pnode);
+ /* Execute delete phase code for non-device node in all cases
+ * except when the node was only allocated. Delete phase must be
+ * executed even if create phase was executed, but failed.
+ * If the node environment pointer is non-NULL, the delete phase
+ * code must be executed. */
+ if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
+ node_type != NODE_DEVICE) {
+ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
+ if (status)
+ goto func_cont1;
+
+ if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
+ /* If node has terminated, execute phase code will
+ * have already been unloaded in node_on_exit(). If the
+ * node is PAUSED, the execute phase is loaded, and it
+ * is now ok to unload it. If the node is running, we
+ * will unload the execute phase only after deleting
+ * the node. */
+ if (state == NODE_PAUSED && pnode->loaded &&
+ pnode->phase_split) {
+ /* Ok to unload execute code as long as node
+ * is not * running */
+ status1 =
+ hnode_mgr->nldr_fxns.
+ pfn_unload(pnode->nldr_node_obj,
+ NLDR_EXECUTE);
+ pnode->loaded = false;
+ NODE_SET_STATE(pnode, NODE_DONE);
+ }
+ /* Load delete phase code if not loaded or if haven't
+ * * unloaded EXECUTE phase */
+ if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
+ pnode->phase_split) {
+ status =
+ hnode_mgr->nldr_fxns.
+ pfn_load(pnode->nldr_node_obj, NLDR_DELETE);
+ if (!status)
+ pnode->loaded = true;
+ else
+ pr_err("%s: fail - load delete code:"
+ " 0x%x\n", __func__, status);
+ }
+ }
+func_cont1:
+ if (!status) {
+ /* Unblock a thread trying to terminate the node */
+ (void)sync_set_event(pnode->sync_done);
+ if (proc_id == DSP_UNIT) {
+ /* ul_delete_fxn = address of node's delete
+ * function */
+ status = get_fxn_address(pnode, &ul_delete_fxn,
+ DELETEPHASE);
+ } else if (proc_id == IVA_UNIT)
+ ul_delete_fxn = (u32) pnode->node_env;
+ if (!status) {
+ status = proc_get_state(hprocessor,
+ &proc_state,
+ sizeof(struct
+ dsp_processorstate));
+ if (proc_state.proc_state != PROC_ERROR) {
+ status =
+ disp_node_delete(disp_obj, pnode,
+ hnode_mgr->
+ ul_fxn_addrs
+ [RMSDELETENODE],
+ ul_delete_fxn,
+ pnode->node_env);
+ } else
+ NODE_SET_STATE(pnode, NODE_DONE);
+
+ /* Unload execute, if not unloaded, and delete
+ * function */
+ if (state == NODE_RUNNING &&
+ pnode->phase_split) {
+ status1 =
+ hnode_mgr->nldr_fxns.
+ pfn_unload(pnode->nldr_node_obj,
+ NLDR_EXECUTE);
+ }
+ if (status1)
+ pr_err("%s: fail - unload execute code:"
+ " 0x%x\n", __func__, status1);
+
+ status1 =
+ hnode_mgr->nldr_fxns.pfn_unload(pnode->
+ nldr_node_obj,
+ NLDR_DELETE);
+ pnode->loaded = false;
+ if (status1)
+ pr_err("%s: fail - unload delete code: "
+ "0x%x\n", __func__, status1);
+ }
+ }
+ }
+ /* Free host side resources even if a failure occurred */
+ /* Remove node from hnode_mgr->node_list */
+ lst_remove_elem(hnode_mgr->node_list, (struct list_head *)pnode);
+ hnode_mgr->num_nodes--;
+ /* Decrement count of nodes created on DSP */
+ if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
+ (pnode->node_env != (u32) NULL)))
+ hnode_mgr->num_created--;
+ /* Free host-side resources allocated by node_create()
+ * delete_node() fails if SM buffers not freed by client! */
+ drv_proc_node_update_status(node_res, false);
+ delete_node(pnode, pr_ctxt);
+
+ /*
+ * Release all Node resources and its context
+ */
+ idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
+ kfree(node_res);
+
+ /* Exit critical section */
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+ proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
+func_end:
+ dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
+ return status;
+}
+
+/*
+ * ======== node_delete_mgr ========
+ * Purpose:
+ * Delete the NODE Manager.
+ */
+int node_delete_mgr(struct node_mgr *hnode_mgr)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (hnode_mgr)
+ delete_node_mgr(hnode_mgr);
+ else
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== node_enum_nodes ========
+ * Purpose:
+ * Enumerate currently allocated nodes.
+ */
+int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
+ u32 node_tab_size, u32 *pu_num_nodes,
+ u32 *pu_allocated)
+{
+ struct node_object *hnode;
+ u32 i;
+ int status = 0;
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
+ DBC_REQUIRE(pu_num_nodes != NULL);
+ DBC_REQUIRE(pu_allocated != NULL);
+
+ if (!hnode_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ /* Enter critical section */
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+
+ if (hnode_mgr->num_nodes > node_tab_size) {
+ *pu_allocated = hnode_mgr->num_nodes;
+ *pu_num_nodes = 0;
+ status = -EINVAL;
+ } else {
+ hnode = (struct node_object *)lst_first(hnode_mgr->
+ node_list);
+ for (i = 0; i < hnode_mgr->num_nodes; i++) {
+ DBC_ASSERT(hnode);
+ node_tab[i] = hnode;
+ hnode = (struct node_object *)lst_next
+ (hnode_mgr->node_list,
+ (struct list_head *)hnode);
+ }
+ *pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
+ }
+ /* end of sync_enter_cs */
+ /* Exit critical section */
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+func_end:
+ return status;
+}
+
+/*
+ * ======== node_exit ========
+ * Purpose:
+ * Discontinue usage of NODE module.
+ */
+void node_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== node_free_msg_buf ========
+ * Purpose:
+ * Frees the message buffer.
+ */
+int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
+ struct dsp_bufferattr *pattr)
+{
+ struct node_object *pnode = (struct node_object *)hnode;
+ int status = 0;
+ u32 proc_id;
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(pbuffer != NULL);
+ DBC_REQUIRE(pnode != NULL);
+ DBC_REQUIRE(pnode->xlator != NULL);
+
+ if (!hnode) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
+ if (proc_id == DSP_UNIT) {
+ if (!status) {
+ if (pattr == NULL) {
+ /* set defaults */
+ pattr = &node_dfltbufattrs;
+ }
+ /* Node supports single SM segment only */
+ if (pattr->segment_id != 1)
+ status = -EBADR;
+
+ /* pbuffer is clients Va. */
+ status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
+ }
+ } else {
+ DBC_ASSERT(NULL); /* BUG */
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== node_get_attr ========
+ * Purpose:
+ * Copy the current attributes of the specified node into a dsp_nodeattr
+ * structure.
+ */
+int node_get_attr(struct node_object *hnode,
+ struct dsp_nodeattr *pattr, u32 attr_size)
+{
+ struct node_mgr *hnode_mgr;
+ int status = 0;
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(pattr != NULL);
+ DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
+
+ if (!hnode) {
+ status = -EFAULT;
+ } else {
+ hnode_mgr = hnode->hnode_mgr;
+ /* Enter hnode_mgr critical section (since we're accessing
+ * data that could be changed by node_change_priority() and
+ * node_connect(). */
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+ pattr->cb_struct = sizeof(struct dsp_nodeattr);
+ /* dsp_nodeattrin */
+ pattr->in_node_attr_in.cb_struct =
+ sizeof(struct dsp_nodeattrin);
+ pattr->in_node_attr_in.prio = hnode->prio;
+ pattr->in_node_attr_in.utimeout = hnode->utimeout;
+ pattr->in_node_attr_in.heap_size =
+ hnode->create_args.asa.task_arg_obj.heap_size;
+ pattr->in_node_attr_in.pgpp_virt_addr = (void *)
+ hnode->create_args.asa.task_arg_obj.ugpp_heap_addr;
+ pattr->node_attr_inputs = hnode->num_gpp_inputs;
+ pattr->node_attr_outputs = hnode->num_gpp_outputs;
+ /* dsp_nodeinfo */
+ get_node_info(hnode, &(pattr->node_info));
+ /* end of sync_enter_cs */
+ /* Exit critical section */
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+ }
+ return status;
+}
+
+/*
+ * ======== node_get_channel_id ========
+ * Purpose:
+ * Get the channel index reserved for a stream connection between the
+ * host and a node.
+ */
+int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
+ u32 *chan_id)
+{
+ enum node_type node_type;
+ int status = -EINVAL;
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(dir == DSP_TONODE || dir == DSP_FROMNODE);
+ DBC_REQUIRE(chan_id != NULL);
+
+ if (!hnode) {
+ status = -EFAULT;
+ return status;
+ }
+ node_type = node_get_type(hnode);
+ if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
+ status = -EPERM;
+ return status;
+ }
+ if (dir == DSP_TONODE) {
+ if (index < MAX_INPUTS(hnode)) {
+ if (hnode->inputs[index].type == HOSTCONNECT) {
+ *chan_id = hnode->inputs[index].dev_id;
+ status = 0;
+ }
+ }
+ } else {
+ DBC_ASSERT(dir == DSP_FROMNODE);
+ if (index < MAX_OUTPUTS(hnode)) {
+ if (hnode->outputs[index].type == HOSTCONNECT) {
+ *chan_id = hnode->outputs[index].dev_id;
+ status = 0;
+ }
+ }
+ }
+ return status;
+}
+
+/*
+ * ======== node_get_message ========
+ * Purpose:
+ * Retrieve a message from a node on the DSP.
+ */
+int node_get_message(struct node_object *hnode,
+ struct dsp_msg *message, u32 utimeout)
+{
+ struct node_mgr *hnode_mgr;
+ enum node_type node_type;
+ struct bridge_drv_interface *intf_fxns;
+ int status = 0;
+ void *tmp_buf;
+ struct dsp_processorstate proc_state;
+ struct proc_object *hprocessor;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(message != NULL);
+
+ if (!hnode) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ hprocessor = hnode->hprocessor;
+ status = proc_get_state(hprocessor, &proc_state,
+ sizeof(struct dsp_processorstate));
+ if (status)
+ goto func_end;
+ /* If processor is in error state then don't attempt to get the
+ message */
+ if (proc_state.proc_state == PROC_ERROR) {
+ status = -EPERM;
+ goto func_end;
+ }
+ hnode_mgr = hnode->hnode_mgr;
+ node_type = node_get_type(hnode);
+ if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
+ node_type != NODE_DAISSOCKET) {
+ status = -EPERM;
+ goto func_end;
+ }
+ /* This function will block unless a message is available. Since
+ * DSPNode_RegisterNotify() allows notification when a message
+ * is available, the system can be designed so that
+ * DSPNode_GetMessage() is only called when a message is
+ * available. */
+ intf_fxns = hnode_mgr->intf_fxns;
+ status =
+ (*intf_fxns->pfn_msg_get) (hnode->msg_queue_obj, message, utimeout);
+ /* Check if message contains SM descriptor */
+ if (status || !(message->dw_cmd & DSP_RMSBUFDESC))
+ goto func_end;
+
+ /* Translate DSP byte addr to GPP Va. */
+ tmp_buf = cmm_xlator_translate(hnode->xlator,
+ (void *)(message->dw_arg1 *
+ hnode->hnode_mgr->
+ udsp_word_size), CMM_DSPPA2PA);
+ if (tmp_buf != NULL) {
+ /* now convert this GPP Pa to Va */
+ tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
+ CMM_PA2VA);
+ if (tmp_buf != NULL) {
+ /* Adjust SM size in msg */
+ message->dw_arg1 = (u32) tmp_buf;
+ message->dw_arg2 *= hnode->hnode_mgr->udsp_word_size;
+ } else {
+ status = -ESRCH;
+ }
+ } else {
+ status = -ESRCH;
+ }
+func_end:
+ dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
+ hnode, message, utimeout);
+ return status;
+}
+
+/*
+ * ======== node_get_nldr_obj ========
+ */
+int node_get_nldr_obj(struct node_mgr *hnode_mgr,
+ struct nldr_object **nldr_ovlyobj)
+{
+ int status = 0;
+ struct node_mgr *node_mgr_obj = hnode_mgr;
+ DBC_REQUIRE(nldr_ovlyobj != NULL);
+
+ if (!hnode_mgr)
+ status = -EFAULT;
+ else
+ *nldr_ovlyobj = node_mgr_obj->nldr_obj;
+
+ DBC_ENSURE(!status || (nldr_ovlyobj != NULL && *nldr_ovlyobj == NULL));
+ return status;
+}
+
+/*
+ * ======== node_get_strm_mgr ========
+ * Purpose:
+ * Returns the Stream manager.
+ */
+int node_get_strm_mgr(struct node_object *hnode,
+ struct strm_mgr **strm_man)
+{
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (!hnode)
+ status = -EFAULT;
+ else
+ *strm_man = hnode->hnode_mgr->strm_mgr_obj;
+
+ return status;
+}
+
+/*
+ * ======== node_get_load_type ========
+ */
+enum nldr_loadtype node_get_load_type(struct node_object *hnode)
+{
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hnode);
+ if (!hnode) {
+ dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
+ return -1;
+ } else {
+ return hnode->dcd_props.obj_data.node_obj.us_load_type;
+ }
+}
+
+/*
+ * ======== node_get_timeout ========
+ * Purpose:
+ * Returns the timeout value for this node.
+ */
+u32 node_get_timeout(struct node_object *hnode)
+{
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hnode);
+ if (!hnode) {
+ dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
+ return 0;
+ } else {
+ return hnode->utimeout;
+ }
+}
+
+/*
+ * ======== node_get_type ========
+ * Purpose:
+ * Returns the node type.
+ */
+enum node_type node_get_type(struct node_object *hnode)
+{
+ enum node_type node_type;
+
+ if (hnode == (struct node_object *)DSP_HGPPNODE)
+ node_type = NODE_GPP;
+ else {
+ if (!hnode)
+ node_type = -1;
+ else
+ node_type = hnode->ntype;
+ }
+ return node_type;
+}
+
+/*
+ * ======== node_init ========
+ * Purpose:
+ * Initialize the NODE module.
+ */
+bool node_init(void)
+{
+ DBC_REQUIRE(refs >= 0);
+
+ refs++;
+
+ return true;
+}
+
+/*
+ * ======== node_on_exit ========
+ * Purpose:
+ * Gets called when RMS_EXIT is received for a node.
+ */
+void node_on_exit(struct node_object *hnode, s32 node_status)
+{
+ if (!hnode)
+ return;
+
+ /* Set node state to done */
+ NODE_SET_STATE(hnode, NODE_DONE);
+ hnode->exit_status = node_status;
+ if (hnode->loaded && hnode->phase_split) {
+ (void)hnode->hnode_mgr->nldr_fxns.pfn_unload(hnode->
+ nldr_node_obj,
+ NLDR_EXECUTE);
+ hnode->loaded = false;
+ }
+ /* Unblock call to node_terminate */
+ (void)sync_set_event(hnode->sync_done);
+ /* Notify clients */
+ proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
+ ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
+}
+
+/*
+ * ======== node_pause ========
+ * Purpose:
+ * Suspend execution of a node currently running on the DSP.
+ */
+int node_pause(struct node_object *hnode)
+{
+ struct node_object *pnode = (struct node_object *)hnode;
+ enum node_type node_type;
+ enum node_state state;
+ struct node_mgr *hnode_mgr;
+ int status = 0;
+ u32 proc_id;
+ struct dsp_processorstate proc_state;
+ struct proc_object *hprocessor;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (!hnode) {
+ status = -EFAULT;
+ } else {
+ node_type = node_get_type(hnode);
+ if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
+ status = -EPERM;
+ }
+ if (status)
+ goto func_end;
+
+ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
+
+ if (proc_id == IVA_UNIT)
+ status = -ENOSYS;
+
+ if (!status) {
+ hnode_mgr = hnode->hnode_mgr;
+
+ /* Enter critical section */
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+ state = node_get_state(hnode);
+ /* Check node state */
+ if (state != NODE_RUNNING)
+ status = -EBADR;
+
+ if (status)
+ goto func_cont;
+ hprocessor = hnode->hprocessor;
+ status = proc_get_state(hprocessor, &proc_state,
+ sizeof(struct dsp_processorstate));
+ if (status)
+ goto func_cont;
+ /* If processor is in error state then don't attempt
+ to send the message */
+ if (proc_state.proc_state == PROC_ERROR) {
+ status = -EPERM;
+ goto func_cont;
+ }
+
+ status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
+ hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY],
+ hnode->node_env, NODE_SUSPENDEDPRI);
+
+ /* Update state */
+ if (status >= 0)
+ NODE_SET_STATE(hnode, NODE_PAUSED);
+
+func_cont:
+ /* End of sync_enter_cs */
+ /* Leave critical section */
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+ if (status >= 0) {
+ proc_notify_clients(hnode->hprocessor,
+ DSP_NODESTATECHANGE);
+ ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
+ }
+ }
+func_end:
+ dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
+ return status;
+}
+
+/*
+ * ======== node_put_message ========
+ * Purpose:
+ * Send a message to a message node, task node, or XDAIS socket node. This
+ * function will block until the message stream can accommodate the
+ * message, or a timeout occurs.
+ */
+int node_put_message(struct node_object *hnode,
+ const struct dsp_msg *pmsg, u32 utimeout)
+{
+ struct node_mgr *hnode_mgr = NULL;
+ enum node_type node_type;
+ struct bridge_drv_interface *intf_fxns;
+ enum node_state state;
+ int status = 0;
+ void *tmp_buf;
+ struct dsp_msg new_msg;
+ struct dsp_processorstate proc_state;
+ struct proc_object *hprocessor;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(pmsg != NULL);
+
+ if (!hnode) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ hprocessor = hnode->hprocessor;
+ status = proc_get_state(hprocessor, &proc_state,
+ sizeof(struct dsp_processorstate));
+ if (status)
+ goto func_end;
+ /* If processor is in bad state then don't attempt sending the
+ message */
+ if (proc_state.proc_state == PROC_ERROR) {
+ status = -EPERM;
+ goto func_end;
+ }
+ hnode_mgr = hnode->hnode_mgr;
+ node_type = node_get_type(hnode);
+ if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
+ node_type != NODE_DAISSOCKET)
+ status = -EPERM;
+
+ if (!status) {
+ /* Check node state. Can't send messages to a node after
+ * we've sent the RMS_EXIT command. There is still the
+ * possibility that node_terminate can be called after we've
+ * checked the state. Could add another SYNC object to
+ * prevent this (can't use node_mgr_lock, since we don't
+ * want to block other NODE functions). However, the node may
+ * still exit on its own, before this message is sent. */
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+ state = node_get_state(hnode);
+ if (state == NODE_TERMINATING || state == NODE_DONE)
+ status = -EBADR;
+
+ /* end of sync_enter_cs */
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+ }
+ if (status)
+ goto func_end;
+
+ /* assign pmsg values to new msg */
+ new_msg = *pmsg;
+ /* Now, check if message contains a SM buffer descriptor */
+ if (pmsg->dw_cmd & DSP_RMSBUFDESC) {
+ /* Translate GPP Va to DSP physical buf Ptr. */
+ tmp_buf = cmm_xlator_translate(hnode->xlator,
+ (void *)new_msg.dw_arg1,
+ CMM_VA2DSPPA);
+ if (tmp_buf != NULL) {
+ /* got translation, convert to MAUs in msg */
+ if (hnode->hnode_mgr->udsp_word_size != 0) {
+ new_msg.dw_arg1 =
+ (u32) tmp_buf /
+ hnode->hnode_mgr->udsp_word_size;
+ /* MAUs */
+ new_msg.dw_arg2 /= hnode->hnode_mgr->
+ udsp_word_size;
+ } else {
+ pr_err("%s: udsp_word_size is zero!\n",
+ __func__);
+ status = -EPERM; /* bad DSPWordSize */
+ }
+ } else { /* failed to translate buffer address */
+ status = -ESRCH;
+ }
+ }
+ if (!status) {
+ intf_fxns = hnode_mgr->intf_fxns;
+ status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj,
+ &new_msg, utimeout);
+ }
+func_end:
+ dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
+ "status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
+ return status;
+}
+
+/*
+ * ======== node_register_notify ========
+ * Purpose:
+ * Register to be notified on specific events for this node.
+ */
+int node_register_notify(struct node_object *hnode, u32 event_mask,
+ u32 notify_type,
+ struct dsp_notification *hnotification)
+{
+ struct bridge_drv_interface *intf_fxns;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hnotification != NULL);
+
+ if (!hnode) {
+ status = -EFAULT;
+ } else {
+ /* Check if event mask is a valid node related event */
+ if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
+ status = -EINVAL;
+
+ /* Check if notify type is valid */
+ if (notify_type != DSP_SIGNALEVENT)
+ status = -EINVAL;
+
+ /* Only one Notification can be registered at a
+ * time - Limitation */
+ if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
+ status = -EINVAL;
+ }
+ if (!status) {
+ if (event_mask == DSP_NODESTATECHANGE) {
+ status = ntfy_register(hnode->ntfy_obj, hnotification,
+ event_mask & DSP_NODESTATECHANGE,
+ notify_type);
+ } else {
+ /* Send Message part of event mask to msg_ctrl */
+ intf_fxns = hnode->hnode_mgr->intf_fxns;
+ status = (*intf_fxns->pfn_msg_register_notify)
+ (hnode->msg_queue_obj,
+ event_mask & DSP_NODEMESSAGEREADY, notify_type,
+ hnotification);
+ }
+
+ }
+ dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
+ "hnotification: %p status 0x%x\n", __func__, hnode,
+ event_mask, notify_type, hnotification, status);
+ return status;
+}
+
+/*
+ * ======== node_run ========
+ * Purpose:
+ * Start execution of a node's execute phase, or resume execution of a node
+ * that has been suspended (via NODE_NodePause()) on the DSP. Load the
+ * node's execute function if necessary.
+ */
+int node_run(struct node_object *hnode)
+{
+ struct node_object *pnode = (struct node_object *)hnode;
+ struct node_mgr *hnode_mgr;
+ enum node_type node_type;
+ enum node_state state;
+ u32 ul_execute_fxn;
+ u32 ul_fxn_addr;
+ int status = 0;
+ u32 proc_id;
+ struct bridge_drv_interface *intf_fxns;
+ struct dsp_processorstate proc_state;
+ struct proc_object *hprocessor;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (!hnode) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ hprocessor = hnode->hprocessor;
+ status = proc_get_state(hprocessor, &proc_state,
+ sizeof(struct dsp_processorstate));
+ if (status)
+ goto func_end;
+ /* If processor is in error state then don't attempt to run the node */
+ if (proc_state.proc_state == PROC_ERROR) {
+ status = -EPERM;
+ goto func_end;
+ }
+ node_type = node_get_type(hnode);
+ if (node_type == NODE_DEVICE)
+ status = -EPERM;
+ if (status)
+ goto func_end;
+
+ hnode_mgr = hnode->hnode_mgr;
+ if (!hnode_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ intf_fxns = hnode_mgr->intf_fxns;
+ /* Enter critical section */
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+
+ state = node_get_state(hnode);
+ if (state != NODE_CREATED && state != NODE_PAUSED)
+ status = -EBADR;
+
+ if (!status)
+ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
+
+ if (status)
+ goto func_cont1;
+
+ if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
+ goto func_cont1;
+
+ if (state == NODE_CREATED) {
+ /* If node's execute function is not loaded, load it */
+ if (!(hnode->loaded) && hnode->phase_split) {
+ status =
+ hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
+ NLDR_EXECUTE);
+ if (!status) {
+ hnode->loaded = true;
+ } else {
+ pr_err("%s: fail - load execute code: 0x%x\n",
+ __func__, status);
+ }
+ }
+ if (!status) {
+ /* Get address of node's execute function */
+ if (proc_id == IVA_UNIT)
+ ul_execute_fxn = (u32) hnode->node_env;
+ else {
+ status = get_fxn_address(hnode, &ul_execute_fxn,
+ EXECUTEPHASE);
+ }
+ }
+ if (!status) {
+ ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSEXECUTENODE];
+ status =
+ disp_node_run(hnode_mgr->disp_obj, hnode,
+ ul_fxn_addr, ul_execute_fxn,
+ hnode->node_env);
+ }
+ } else if (state == NODE_PAUSED) {
+ ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY];
+ status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
+ ul_fxn_addr, hnode->node_env,
+ NODE_GET_PRIORITY(hnode));
+ } else {
+ /* We should never get here */
+ DBC_ASSERT(false);
+ }
+func_cont1:
+ /* Update node state. */
+ if (status >= 0)
+ NODE_SET_STATE(hnode, NODE_RUNNING);
+ else /* Set state back to previous value */
+ NODE_SET_STATE(hnode, state);
+ /*End of sync_enter_cs */
+ /* Exit critical section */
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+ if (status >= 0) {
+ proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
+ ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
+ }
+func_end:
+ dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
+ return status;
+}
+
+/*
+ * ======== node_terminate ========
+ * Purpose:
+ * Signal a node running on the DSP that it should exit its execute phase
+ * function.
+ */
+int node_terminate(struct node_object *hnode, int *pstatus)
+{
+ struct node_object *pnode = (struct node_object *)hnode;
+ struct node_mgr *hnode_mgr = NULL;
+ enum node_type node_type;
+ struct bridge_drv_interface *intf_fxns;
+ enum node_state state;
+ struct dsp_msg msg, killmsg;
+ int status = 0;
+ u32 proc_id, kill_time_out;
+ struct deh_mgr *hdeh_mgr;
+ struct dsp_processorstate proc_state;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(pstatus != NULL);
+
+ if (!hnode || !hnode->hnode_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ if (pnode->hprocessor == NULL) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ status = proc_get_processor_id(pnode->hprocessor, &proc_id);
+
+ if (!status) {
+ hnode_mgr = hnode->hnode_mgr;
+ node_type = node_get_type(hnode);
+ if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
+ status = -EPERM;
+ }
+ if (!status) {
+ /* Check node state */
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+ state = node_get_state(hnode);
+ if (state != NODE_RUNNING) {
+ status = -EBADR;
+ /* Set the exit status if node terminated on
+ * its own. */
+ if (state == NODE_DONE)
+ *pstatus = hnode->exit_status;
+
+ } else {
+ NODE_SET_STATE(hnode, NODE_TERMINATING);
+ }
+ /* end of sync_enter_cs */
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+ }
+ if (!status) {
+ /*
+ * Send exit message. Do not change state to NODE_DONE
+ * here. That will be done in callback.
+ */
+ status = proc_get_state(pnode->hprocessor, &proc_state,
+ sizeof(struct dsp_processorstate));
+ if (status)
+ goto func_cont;
+ /* If processor is in error state then don't attempt to send
+ * A kill task command */
+ if (proc_state.proc_state == PROC_ERROR) {
+ status = -EPERM;
+ goto func_cont;
+ }
+
+ msg.dw_cmd = RMS_EXIT;
+ msg.dw_arg1 = hnode->node_env;
+ killmsg.dw_cmd = RMS_KILLTASK;
+ killmsg.dw_arg1 = hnode->node_env;
+ intf_fxns = hnode_mgr->intf_fxns;
+
+ if (hnode->utimeout > MAXTIMEOUT)
+ kill_time_out = MAXTIMEOUT;
+ else
+ kill_time_out = (hnode->utimeout) * 2;
+
+ status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj, &msg,
+ hnode->utimeout);
+ if (status)
+ goto func_cont;
+
+ /*
+ * Wait on synchronization object that will be
+ * posted in the callback on receiving RMS_EXIT
+ * message, or by node_delete. Check for valid hnode,
+ * in case posted by node_delete().
+ */
+ status = sync_wait_on_event(hnode->sync_done,
+ kill_time_out / 2);
+ if (status != ETIME)
+ goto func_cont;
+
+ status = (*intf_fxns->pfn_msg_put)(hnode->msg_queue_obj,
+ &killmsg, hnode->utimeout);
+ if (status)
+ goto func_cont;
+ status = sync_wait_on_event(hnode->sync_done,
+ kill_time_out / 2);
+ if (status) {
+ /*
+ * Here it goes the part of the simulation of
+ * the DSP exception.
+ */
+ dev_get_deh_mgr(hnode_mgr->hdev_obj, &hdeh_mgr);
+ if (!hdeh_mgr)
+ goto func_cont;
+
+ bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
+ }
+ }
+func_cont:
+ if (!status) {
+ /* Enter CS before getting exit status, in case node was
+ * deleted. */
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+ /* Make sure node wasn't deleted while we blocked */
+ if (!hnode) {
+ status = -EPERM;
+ } else {
+ *pstatus = hnode->exit_status;
+ dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
+ __func__, hnode, hnode->node_env, status);
+ }
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+ } /*End of sync_enter_cs */
+func_end:
+ return status;
+}
+
+/*
+ * ======== delete_node ========
+ * Purpose:
+ * Free GPP resources allocated in node_allocate() or node_connect().
+ */
+static void delete_node(struct node_object *hnode,
+ struct process_context *pr_ctxt)
+{
+ struct node_mgr *hnode_mgr;
+ struct cmm_xlatorobject *xlator;
+ struct bridge_drv_interface *intf_fxns;
+ u32 i;
+ enum node_type node_type;
+ struct stream_chnl stream;
+ struct node_msgargs node_msg_args;
+ struct node_taskargs task_arg_obj;
+#ifdef DSP_DMM_DEBUG
+ struct dmm_object *dmm_mgr;
+ struct proc_object *p_proc_object =
+ (struct proc_object *)hnode->hprocessor;
+#endif
+ int status;
+ if (!hnode)
+ goto func_end;
+ hnode_mgr = hnode->hnode_mgr;
+ if (!hnode_mgr)
+ goto func_end;
+ xlator = hnode->xlator;
+ node_type = node_get_type(hnode);
+ if (node_type != NODE_DEVICE) {
+ node_msg_args = hnode->create_args.asa.node_msg_args;
+ kfree(node_msg_args.pdata);
+
+ /* Free msg_ctrl queue */
+ if (hnode->msg_queue_obj) {
+ intf_fxns = hnode_mgr->intf_fxns;
+ (*intf_fxns->pfn_msg_delete_queue) (hnode->
+ msg_queue_obj);
+ hnode->msg_queue_obj = NULL;
+ }
+
+ kfree(hnode->sync_done);
+
+ /* Free all stream info */
+ if (hnode->inputs) {
+ for (i = 0; i < MAX_INPUTS(hnode); i++) {
+ stream = hnode->inputs[i];
+ free_stream(hnode_mgr, stream);
+ }
+ kfree(hnode->inputs);
+ hnode->inputs = NULL;
+ }
+ if (hnode->outputs) {
+ for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
+ stream = hnode->outputs[i];
+ free_stream(hnode_mgr, stream);
+ }
+ kfree(hnode->outputs);
+ hnode->outputs = NULL;
+ }
+ task_arg_obj = hnode->create_args.asa.task_arg_obj;
+ if (task_arg_obj.strm_in_def) {
+ for (i = 0; i < MAX_INPUTS(hnode); i++) {
+ kfree(task_arg_obj.strm_in_def[i].sz_device);
+ task_arg_obj.strm_in_def[i].sz_device = NULL;
+ }
+ kfree(task_arg_obj.strm_in_def);
+ task_arg_obj.strm_in_def = NULL;
+ }
+ if (task_arg_obj.strm_out_def) {
+ for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
+ kfree(task_arg_obj.strm_out_def[i].sz_device);
+ task_arg_obj.strm_out_def[i].sz_device = NULL;
+ }
+ kfree(task_arg_obj.strm_out_def);
+ task_arg_obj.strm_out_def = NULL;
+ }
+ if (task_arg_obj.udsp_heap_res_addr) {
+ status = proc_un_map(hnode->hprocessor, (void *)
+ task_arg_obj.udsp_heap_addr,
+ pr_ctxt);
+
+ status = proc_un_reserve_memory(hnode->hprocessor,
+ (void *)
+ task_arg_obj.
+ udsp_heap_res_addr,
+ pr_ctxt);
+#ifdef DSP_DMM_DEBUG
+ status = dmm_get_handle(p_proc_object, &dmm_mgr);
+ if (dmm_mgr)
+ dmm_mem_map_dump(dmm_mgr);
+ else
+ status = DSP_EHANDLE;
+#endif
+ }
+ }
+ if (node_type != NODE_MESSAGE) {
+ kfree(hnode->stream_connect);
+ hnode->stream_connect = NULL;
+ }
+ kfree(hnode->pstr_dev_name);
+ hnode->pstr_dev_name = NULL;
+
+ if (hnode->ntfy_obj) {
+ ntfy_delete(hnode->ntfy_obj);
+ kfree(hnode->ntfy_obj);
+ hnode->ntfy_obj = NULL;
+ }
+
+ /* These were allocated in dcd_get_object_def (via node_allocate) */
+ kfree(hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn);
+ hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn = NULL;
+
+ kfree(hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn);
+ hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn = NULL;
+
+ kfree(hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn);
+ hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn = NULL;
+
+ kfree(hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name);
+ hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name = NULL;
+
+ /* Free all SM address translator resources */
+ if (xlator) {
+ (void)cmm_xlator_delete(xlator, true); /* force free */
+ xlator = NULL;
+ }
+
+ kfree(hnode->nldr_node_obj);
+ hnode->nldr_node_obj = NULL;
+ hnode->hnode_mgr = NULL;
+ kfree(hnode);
+ hnode = NULL;
+func_end:
+ return;
+}
+
+/*
+ * ======== delete_node_mgr ========
+ * Purpose:
+ * Frees the node manager.
+ */
+static void delete_node_mgr(struct node_mgr *hnode_mgr)
+{
+ struct node_object *hnode;
+
+ if (hnode_mgr) {
+ /* Free resources */
+ if (hnode_mgr->hdcd_mgr)
+ dcd_destroy_manager(hnode_mgr->hdcd_mgr);
+
+ /* Remove any elements remaining in lists */
+ if (hnode_mgr->node_list) {
+ while ((hnode = (struct node_object *)
+ lst_get_head(hnode_mgr->node_list)))
+ delete_node(hnode, NULL);
+
+ DBC_ASSERT(LST_IS_EMPTY(hnode_mgr->node_list));
+ kfree(hnode_mgr->node_list);
+ }
+ mutex_destroy(&hnode_mgr->node_mgr_lock);
+ if (hnode_mgr->ntfy_obj) {
+ ntfy_delete(hnode_mgr->ntfy_obj);
+ kfree(hnode_mgr->ntfy_obj);
+ }
+
+ if (hnode_mgr->pipe_map)
+ gb_delete(hnode_mgr->pipe_map);
+
+ if (hnode_mgr->pipe_done_map)
+ gb_delete(hnode_mgr->pipe_done_map);
+
+ if (hnode_mgr->chnl_map)
+ gb_delete(hnode_mgr->chnl_map);
+
+ if (hnode_mgr->dma_chnl_map)
+ gb_delete(hnode_mgr->dma_chnl_map);
+
+ if (hnode_mgr->zc_chnl_map)
+ gb_delete(hnode_mgr->zc_chnl_map);
+
+ if (hnode_mgr->disp_obj)
+ disp_delete(hnode_mgr->disp_obj);
+
+ if (hnode_mgr->strm_mgr_obj)
+ strm_delete(hnode_mgr->strm_mgr_obj);
+
+ /* Delete the loader */
+ if (hnode_mgr->nldr_obj)
+ hnode_mgr->nldr_fxns.pfn_delete(hnode_mgr->nldr_obj);
+
+ if (hnode_mgr->loader_init)
+ hnode_mgr->nldr_fxns.pfn_exit();
+
+ kfree(hnode_mgr);
+ }
+}
+
+/*
+ * ======== fill_stream_connect ========
+ * Purpose:
+ * Fills stream information.
+ */
+static void fill_stream_connect(struct node_object *node1,
+ struct node_object *node2,
+ u32 stream1, u32 stream2)
+{
+ u32 strm_index;
+ struct dsp_streamconnect *strm1 = NULL;
+ struct dsp_streamconnect *strm2 = NULL;
+ enum node_type node1_type = NODE_TASK;
+ enum node_type node2_type = NODE_TASK;
+
+ node1_type = node_get_type(node1);
+ node2_type = node_get_type(node2);
+ if (node1 != (struct node_object *)DSP_HGPPNODE) {
+
+ if (node1_type != NODE_DEVICE) {
+ strm_index = node1->num_inputs +
+ node1->num_outputs - 1;
+ strm1 = &(node1->stream_connect[strm_index]);
+ strm1->cb_struct = sizeof(struct dsp_streamconnect);
+ strm1->this_node_stream_index = stream1;
+ }
+
+ if (node2 != (struct node_object *)DSP_HGPPNODE) {
+ /* NODE == > NODE */
+ if (node1_type != NODE_DEVICE) {
+ strm1->connected_node = node2;
+ strm1->ui_connected_node_id = node2->node_uuid;
+ strm1->connected_node_stream_index = stream2;
+ strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
+ }
+ if (node2_type != NODE_DEVICE) {
+ strm_index = node2->num_inputs +
+ node2->num_outputs - 1;
+ strm2 = &(node2->stream_connect[strm_index]);
+ strm2->cb_struct =
+ sizeof(struct dsp_streamconnect);
+ strm2->this_node_stream_index = stream2;
+ strm2->connected_node = node1;
+ strm2->ui_connected_node_id = node1->node_uuid;
+ strm2->connected_node_stream_index = stream1;
+ strm2->connect_type = CONNECTTYPE_NODEINPUT;
+ }
+ } else if (node1_type != NODE_DEVICE)
+ strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
+ } else {
+ /* GPP == > NODE */
+ DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
+ strm_index = node2->num_inputs + node2->num_outputs - 1;
+ strm2 = &(node2->stream_connect[strm_index]);
+ strm2->cb_struct = sizeof(struct dsp_streamconnect);
+ strm2->this_node_stream_index = stream2;
+ strm2->connect_type = CONNECTTYPE_GPPINPUT;
+ }
+}
+
+/*
+ * ======== fill_stream_def ========
+ * Purpose:
+ * Fills Stream attributes.
+ */
+static void fill_stream_def(struct node_object *hnode,
+ struct node_strmdef *pstrm_def,
+ struct dsp_strmattr *pattrs)
+{
+ struct node_mgr *hnode_mgr = hnode->hnode_mgr;
+
+ if (pattrs != NULL) {
+ pstrm_def->num_bufs = pattrs->num_bufs;
+ pstrm_def->buf_size =
+ pattrs->buf_size / hnode_mgr->udsp_data_mau_size;
+ pstrm_def->seg_id = pattrs->seg_id;
+ pstrm_def->buf_alignment = pattrs->buf_alignment;
+ pstrm_def->utimeout = pattrs->utimeout;
+ } else {
+ pstrm_def->num_bufs = DEFAULTNBUFS;
+ pstrm_def->buf_size =
+ DEFAULTBUFSIZE / hnode_mgr->udsp_data_mau_size;
+ pstrm_def->seg_id = DEFAULTSEGID;
+ pstrm_def->buf_alignment = DEFAULTALIGNMENT;
+ pstrm_def->utimeout = DEFAULTTIMEOUT;
+ }
+}
+
+/*
+ * ======== free_stream ========
+ * Purpose:
+ * Updates the channel mask and frees the pipe id.
+ */
+static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
+{
+ /* Free up the pipe id unless other node has not yet been deleted. */
+ if (stream.type == NODECONNECT) {
+ if (gb_test(hnode_mgr->pipe_done_map, stream.dev_id)) {
+ /* The other node has already been deleted */
+ gb_clear(hnode_mgr->pipe_done_map, stream.dev_id);
+ gb_clear(hnode_mgr->pipe_map, stream.dev_id);
+ } else {
+ /* The other node has not been deleted yet */
+ gb_set(hnode_mgr->pipe_done_map, stream.dev_id);
+ }
+ } else if (stream.type == HOSTCONNECT) {
+ if (stream.dev_id < hnode_mgr->ul_num_chnls) {
+ gb_clear(hnode_mgr->chnl_map, stream.dev_id);
+ } else if (stream.dev_id < (2 * hnode_mgr->ul_num_chnls)) {
+ /* dsp-dma */
+ gb_clear(hnode_mgr->dma_chnl_map, stream.dev_id -
+ (1 * hnode_mgr->ul_num_chnls));
+ } else if (stream.dev_id < (3 * hnode_mgr->ul_num_chnls)) {
+ /* zero-copy */
+ gb_clear(hnode_mgr->zc_chnl_map, stream.dev_id -
+ (2 * hnode_mgr->ul_num_chnls));
+ }
+ }
+}
+
+/*
+ * ======== get_fxn_address ========
+ * Purpose:
+ * Retrieves the address for create, execute or delete phase for a node.
+ */
+static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
+ u32 phase)
+{
+ char *pstr_fxn_name = NULL;
+ struct node_mgr *hnode_mgr = hnode->hnode_mgr;
+ int status = 0;
+ DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
+ node_get_type(hnode) == NODE_DAISSOCKET ||
+ node_get_type(hnode) == NODE_MESSAGE);
+
+ switch (phase) {
+ case CREATEPHASE:
+ pstr_fxn_name =
+ hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn;
+ break;
+ case EXECUTEPHASE:
+ pstr_fxn_name =
+ hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn;
+ break;
+ case DELETEPHASE:
+ pstr_fxn_name =
+ hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn;
+ break;
+ default:
+ /* Should never get here */
+ DBC_ASSERT(false);
+ break;
+ }
+
+ status =
+ hnode_mgr->nldr_fxns.pfn_get_fxn_addr(hnode->nldr_node_obj,
+ pstr_fxn_name, fxn_addr);
+
+ return status;
+}
+
+/*
+ * ======== get_node_info ========
+ * Purpose:
+ * Retrieves the node information.
+ */
+void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
+{
+ u32 i;
+
+ DBC_REQUIRE(hnode);
+ DBC_REQUIRE(node_info != NULL);
+
+ node_info->cb_struct = sizeof(struct dsp_nodeinfo);
+ node_info->nb_node_database_props =
+ hnode->dcd_props.obj_data.node_obj.ndb_props;
+ node_info->execution_priority = hnode->prio;
+ node_info->device_owner = hnode->device_owner;
+ node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
+ node_info->node_env = hnode->node_env;
+
+ node_info->ns_execution_state = node_get_state(hnode);
+
+ /* Copy stream connect data */
+ for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
+ node_info->sc_stream_connection[i] = hnode->stream_connect[i];
+
+}
+
+/*
+ * ======== get_node_props ========
+ * Purpose:
+ * Retrieve node properties.
+ */
+static int get_node_props(struct dcd_manager *hdcd_mgr,
+ struct node_object *hnode,
+ const struct dsp_uuid *node_uuid,
+ struct dcd_genericobj *dcd_prop)
+{
+ u32 len;
+ struct node_msgargs *pmsg_args;
+ struct node_taskargs *task_arg_obj;
+ enum node_type node_type = NODE_TASK;
+ struct dsp_ndbprops *pndb_props =
+ &(dcd_prop->obj_data.node_obj.ndb_props);
+ int status = 0;
+ char sz_uuid[MAXUUIDLEN];
+
+ status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
+ DSP_DCDNODETYPE, dcd_prop);
+
+ if (!status) {
+ hnode->ntype = node_type = pndb_props->ntype;
+
+ /* Create UUID value to set in registry. */
+ uuid_uuid_to_string((struct dsp_uuid *)node_uuid, sz_uuid,
+ MAXUUIDLEN);
+ dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
+
+ /* Fill in message args that come from NDB */
+ if (node_type != NODE_DEVICE) {
+ pmsg_args = &(hnode->create_args.asa.node_msg_args);
+ pmsg_args->seg_id =
+ dcd_prop->obj_data.node_obj.msg_segid;
+ pmsg_args->notify_type =
+ dcd_prop->obj_data.node_obj.msg_notify_type;
+ pmsg_args->max_msgs = pndb_props->message_depth;
+ dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
+ pmsg_args->max_msgs);
+ } else {
+ /* Copy device name */
+ DBC_REQUIRE(pndb_props->ac_name);
+ len = strlen(pndb_props->ac_name);
+ DBC_ASSERT(len < MAXDEVNAMELEN);
+ hnode->pstr_dev_name = kzalloc(len + 1, GFP_KERNEL);
+ if (hnode->pstr_dev_name == NULL) {
+ status = -ENOMEM;
+ } else {
+ strncpy(hnode->pstr_dev_name,
+ pndb_props->ac_name, len);
+ }
+ }
+ }
+ if (!status) {
+ /* Fill in create args that come from NDB */
+ if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
+ task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
+ task_arg_obj->prio = pndb_props->prio;
+ task_arg_obj->stack_size = pndb_props->stack_size;
+ task_arg_obj->sys_stack_size =
+ pndb_props->sys_stack_size;
+ task_arg_obj->stack_seg = pndb_props->stack_seg;
+ dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
+ "0x%x words System Stack Size: 0x%x words "
+ "Stack Segment: 0x%x profile count : 0x%x\n",
+ task_arg_obj->prio, task_arg_obj->stack_size,
+ task_arg_obj->sys_stack_size,
+ task_arg_obj->stack_seg,
+ pndb_props->count_profiles);
+ }
+ }
+
+ return status;
+}
+
+/*
+ * ======== get_proc_props ========
+ * Purpose:
+ * Retrieve the processor properties.
+ */
+static int get_proc_props(struct node_mgr *hnode_mgr,
+ struct dev_object *hdev_obj)
+{
+ struct cfg_hostres *host_res;
+ struct bridge_dev_context *pbridge_context;
+ int status = 0;
+
+ status = dev_get_bridge_context(hdev_obj, &pbridge_context);
+ if (!pbridge_context)
+ status = -EFAULT;
+
+ if (!status) {
+ host_res = pbridge_context->resources;
+ if (!host_res)
+ return -EPERM;
+ hnode_mgr->ul_chnl_offset = host_res->dw_chnl_offset;
+ hnode_mgr->ul_chnl_buf_size = host_res->dw_chnl_buf_size;
+ hnode_mgr->ul_num_chnls = host_res->dw_num_chnls;
+
+ /*
+ * PROC will add an API to get dsp_processorinfo.
+ * Fill in default values for now.
+ */
+ /* TODO -- Instead of hard coding, take from registry */
+ hnode_mgr->proc_family = 6000;
+ hnode_mgr->proc_type = 6410;
+ hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
+ hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
+ hnode_mgr->udsp_word_size = DSPWORDSIZE;
+ hnode_mgr->udsp_data_mau_size = DSPWORDSIZE;
+ hnode_mgr->udsp_mau_size = 1;
+
+ }
+ return status;
+}
+
+/*
+ * ======== node_get_uuid_props ========
+ * Purpose:
+ * Fetch Node UUID properties from DCD/DOF file.
+ */
+int node_get_uuid_props(void *hprocessor,
+ const struct dsp_uuid *node_uuid,
+ struct dsp_ndbprops *node_props)
+{
+ struct node_mgr *hnode_mgr = NULL;
+ struct dev_object *hdev_obj;
+ int status = 0;
+ struct dcd_nodeprops dcd_node_props;
+ struct dsp_processorstate proc_state;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hprocessor != NULL);
+ DBC_REQUIRE(node_uuid != NULL);
+
+ if (hprocessor == NULL || node_uuid == NULL) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ status = proc_get_state(hprocessor, &proc_state,
+ sizeof(struct dsp_processorstate));
+ if (status)
+ goto func_end;
+ /* If processor is in error state then don't attempt
+ to send the message */
+ if (proc_state.proc_state == PROC_ERROR) {
+ status = -EPERM;
+ goto func_end;
+ }
+
+ status = proc_get_dev_object(hprocessor, &hdev_obj);
+ if (hdev_obj) {
+ status = dev_get_node_manager(hdev_obj, &hnode_mgr);
+ if (hnode_mgr == NULL) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ }
+
+ /*
+ * Enter the critical section. This is needed because
+ * dcd_get_object_def will ultimately end up calling dbll_open/close,
+ * which needs to be protected in order to not corrupt the zlib manager
+ * (COD).
+ */
+ mutex_lock(&hnode_mgr->node_mgr_lock);
+
+ dcd_node_props.pstr_create_phase_fxn = NULL;
+ dcd_node_props.pstr_execute_phase_fxn = NULL;
+ dcd_node_props.pstr_delete_phase_fxn = NULL;
+ dcd_node_props.pstr_i_alg_name = NULL;
+
+ status = dcd_get_object_def(hnode_mgr->hdcd_mgr,
+ (struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
+ (struct dcd_genericobj *)&dcd_node_props);
+
+ if (!status) {
+ *node_props = dcd_node_props.ndb_props;
+ kfree(dcd_node_props.pstr_create_phase_fxn);
+
+ kfree(dcd_node_props.pstr_execute_phase_fxn);
+
+ kfree(dcd_node_props.pstr_delete_phase_fxn);
+
+ kfree(dcd_node_props.pstr_i_alg_name);
+ }
+ /* Leave the critical section, we're done. */
+ mutex_unlock(&hnode_mgr->node_mgr_lock);
+func_end:
+ return status;
+}
+
+/*
+ * ======== get_rms_fxns ========
+ * Purpose:
+ * Retrieve the RMS functions.
+ */
+static int get_rms_fxns(struct node_mgr *hnode_mgr)
+{
+ s32 i;
+ struct dev_object *dev_obj = hnode_mgr->hdev_obj;
+ int status = 0;
+
+ static char *psz_fxns[NUMRMSFXNS] = {
+ "RMS_queryServer", /* RMSQUERYSERVER */
+ "RMS_configureServer", /* RMSCONFIGURESERVER */
+ "RMS_createNode", /* RMSCREATENODE */
+ "RMS_executeNode", /* RMSEXECUTENODE */
+ "RMS_deleteNode", /* RMSDELETENODE */
+ "RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */
+ "RMS_readMemory", /* RMSREADMEMORY */
+ "RMS_writeMemory", /* RMSWRITEMEMORY */
+ "RMS_copy", /* RMSCOPY */
+ };
+
+ for (i = 0; i < NUMRMSFXNS; i++) {
+ status = dev_get_symbol(dev_obj, psz_fxns[i],
+ &(hnode_mgr->ul_fxn_addrs[i]));
+ if (status) {
+ if (status == -ESPIPE) {
+ /*
+ * May be loaded dynamically (in the future),
+ * but return an error for now.
+ */
+ dev_dbg(bridge, "%s: RMS function: %s currently"
+ " not loaded\n", __func__, psz_fxns[i]);
+ } else {
+ dev_dbg(bridge, "%s: Symbol not found: %s "
+ "status = 0x%x\n", __func__,
+ psz_fxns[i], status);
+ break;
+ }
+ }
+ }
+
+ return status;
+}
+
+/*
+ * ======== ovly ========
+ * Purpose:
+ * Called during overlay.Sends command to RMS to copy a block of data.
+ */
+static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
+ u32 ul_num_bytes, u32 mem_space)
+{
+ struct node_object *hnode = (struct node_object *)priv_ref;
+ struct node_mgr *hnode_mgr;
+ u32 ul_bytes = 0;
+ u32 ul_size;
+ u32 ul_timeout;
+ int status = 0;
+ struct bridge_dev_context *hbridge_context;
+ /* Function interface to Bridge driver*/
+ struct bridge_drv_interface *intf_fxns;
+
+ DBC_REQUIRE(hnode);
+
+ hnode_mgr = hnode->hnode_mgr;
+
+ ul_size = ul_num_bytes / hnode_mgr->udsp_word_size;
+ ul_timeout = hnode->utimeout;
+
+ /* Call new MemCopy function */
+ intf_fxns = hnode_mgr->intf_fxns;
+ status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
+ if (!status) {
+ status =
+ (*intf_fxns->pfn_brd_mem_copy) (hbridge_context,
+ dsp_run_addr, dsp_load_addr,
+ ul_num_bytes, (u32) mem_space);
+ if (!status)
+ ul_bytes = ul_num_bytes;
+ else
+ pr_debug("%s: failed to copy brd memory, status 0x%x\n",
+ __func__, status);
+ } else {
+ pr_debug("%s: failed to get Bridge context, status 0x%x\n",
+ __func__, status);
+ }
+
+ return ul_bytes;
+}
+
+/*
+ * ======== mem_write ========
+ */
+static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
+ u32 ul_num_bytes, u32 mem_space)
+{
+ struct node_object *hnode = (struct node_object *)priv_ref;
+ struct node_mgr *hnode_mgr;
+ u16 mem_sect_type;
+ u32 ul_timeout;
+ int status = 0;
+ struct bridge_dev_context *hbridge_context;
+ /* Function interface to Bridge driver */
+ struct bridge_drv_interface *intf_fxns;
+
+ DBC_REQUIRE(hnode);
+ DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
+
+ hnode_mgr = hnode->hnode_mgr;
+
+ ul_timeout = hnode->utimeout;
+ mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
+
+ /* Call new MemWrite function */
+ intf_fxns = hnode_mgr->intf_fxns;
+ status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
+ status = (*intf_fxns->pfn_brd_mem_write) (hbridge_context, pbuf,
+ dsp_add, ul_num_bytes, mem_sect_type);
+
+ return ul_num_bytes;
+}
+
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+/*
+ * ======== node_find_addr ========
+ */
+int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
+ u32 offset_range, void *sym_addr_output, char *sym_name)
+{
+ struct node_object *node_obj;
+ int status = -ENOENT;
+ u32 n;
+
+ pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
+ (unsigned int) node_mgr,
+ sym_addr, offset_range,
+ (unsigned int) sym_addr_output, sym_name);
+
+ node_obj = (struct node_object *)(node_mgr->node_list->head.next);
+
+ for (n = 0; n < node_mgr->num_nodes; n++) {
+ status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
+ offset_range, sym_addr_output, sym_name);
+
+ if (!status)
+ break;
+
+ node_obj = (struct node_object *) (node_obj->list_elem.next);
+ }
+
+ return status;
+}
+#endif
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
new file mode 100644
index 00000000000..44c26e11fc4
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -0,0 +1,1936 @@
+/*
+ * proc.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Processor interface at the driver level.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+/* ------------------------------------ Host OS */
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/list.h>
+#include <dspbridge/ntfy.h>
+#include <dspbridge/sync.h>
+/* ----------------------------------- Bridge Driver */
+#include <dspbridge/dspdefs.h>
+#include <dspbridge/dspdeh.h>
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/cod.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/procpriv.h>
+#include <dspbridge/dmm.h>
+
+/* ----------------------------------- Resource Manager */
+#include <dspbridge/mgr.h>
+#include <dspbridge/node.h>
+#include <dspbridge/nldr.h>
+#include <dspbridge/rmm.h>
+
+/* ----------------------------------- Others */
+#include <dspbridge/dbdcd.h>
+#include <dspbridge/msg.h>
+#include <dspbridge/dspioctl.h>
+#include <dspbridge/drv.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/proc.h>
+#include <dspbridge/pwr.h>
+
+#include <dspbridge/resourcecleanup.h>
+/* ----------------------------------- Defines, Data Structures, Typedefs */
+#define MAXCMDLINELEN 255
+#define PROC_ENVPROCID "PROC_ID=%d"
+#define MAXPROCIDLEN (8 + 5)
+#define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */
+#define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */
+#define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */
+
+#define DSP_CACHE_LINE 128
+
+#define BUFMODE_MASK (3 << 14)
+
+/* Buffer modes from DSP perspective */
+#define RBUF 0x4000 /* Input buffer */
+#define WBUF 0x8000 /* Output Buffer */
+
+extern struct device *bridge;
+
+/* ----------------------------------- Globals */
+
+/* The proc_object structure. */
+struct proc_object {
+ struct list_head link; /* Link to next proc_object */
+ struct dev_object *hdev_obj; /* Device this PROC represents */
+ u32 process; /* Process owning this Processor */
+ struct mgr_object *hmgr_obj; /* Manager Object Handle */
+ u32 attach_count; /* Processor attach count */
+ u32 processor_id; /* Processor number */
+ u32 utimeout; /* Time out count */
+ enum dsp_procstate proc_state; /* Processor state */
+ u32 ul_unit; /* DDSP unit number */
+ bool is_already_attached; /*
+ * True if the Device below has
+ * GPP Client attached
+ */
+ struct ntfy_object *ntfy_obj; /* Manages notifications */
+ /* Bridge Context Handle */
+ struct bridge_dev_context *hbridge_context;
+ /* Function interface to Bridge driver */
+ struct bridge_drv_interface *intf_fxns;
+ char *psz_last_coff;
+ struct list_head proc_list;
+};
+
+static u32 refs;
+
+DEFINE_MUTEX(proc_lock); /* For critical sections */
+
+/* ----------------------------------- Function Prototypes */
+static int proc_monitor(struct proc_object *proc_obj);
+static s32 get_envp_count(char **envp);
+static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
+ s32 cnew_envp, char *sz_var);
+
+/* remember mapping information */
+static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
+ u32 mpu_addr, u32 dsp_addr, u32 size)
+{
+ struct dmm_map_object *map_obj;
+
+ u32 num_usr_pgs = size / PG_SIZE4K;
+
+ pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n",
+ __func__, mpu_addr,
+ dsp_addr, size);
+
+ map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL);
+ if (!map_obj) {
+ pr_err("%s: kzalloc failed\n", __func__);
+ return NULL;
+ }
+ INIT_LIST_HEAD(&map_obj->link);
+
+ map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!map_obj->pages) {
+ pr_err("%s: kzalloc failed\n", __func__);
+ kfree(map_obj);
+ return NULL;
+ }
+
+ map_obj->mpu_addr = mpu_addr;
+ map_obj->dsp_addr = dsp_addr;
+ map_obj->size = size;
+ map_obj->num_usr_pgs = num_usr_pgs;
+
+ spin_lock(&pr_ctxt->dmm_map_lock);
+ list_add(&map_obj->link, &pr_ctxt->dmm_map_list);
+ spin_unlock(&pr_ctxt->dmm_map_lock);
+
+ return map_obj;
+}
+
+static int match_exact_map_obj(struct dmm_map_object *map_obj,
+ u32 dsp_addr, u32 size)
+{
+ if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
+ pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
+ __func__, dsp_addr, map_obj->size, size);
+
+ return map_obj->dsp_addr == dsp_addr &&
+ map_obj->size == size;
+}
+
+static void remove_mapping_information(struct process_context *pr_ctxt,
+ u32 dsp_addr, u32 size)
+{
+ struct dmm_map_object *map_obj;
+
+ pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
+ dsp_addr, size);
+
+ spin_lock(&pr_ctxt->dmm_map_lock);
+ list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
+ pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
+ __func__,
+ map_obj->mpu_addr,
+ map_obj->dsp_addr,
+ map_obj->size);
+
+ if (match_exact_map_obj(map_obj, dsp_addr, size)) {
+ pr_debug("%s: match, deleting map info\n", __func__);
+ list_del(&map_obj->link);
+ kfree(map_obj->dma_info.sg);
+ kfree(map_obj->pages);
+ kfree(map_obj);
+ goto out;
+ }
+ pr_debug("%s: candidate didn't match\n", __func__);
+ }
+
+ pr_err("%s: failed to find given map info\n", __func__);
+out:
+ spin_unlock(&pr_ctxt->dmm_map_lock);
+}
+
+static int match_containing_map_obj(struct dmm_map_object *map_obj,
+ u32 mpu_addr, u32 size)
+{
+ u32 map_obj_end = map_obj->mpu_addr + map_obj->size;
+
+ return mpu_addr >= map_obj->mpu_addr &&
+ mpu_addr + size <= map_obj_end;
+}
+
+static struct dmm_map_object *find_containing_mapping(
+ struct process_context *pr_ctxt,
+ u32 mpu_addr, u32 size)
+{
+ struct dmm_map_object *map_obj;
+ pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__,
+ mpu_addr, size);
+
+ spin_lock(&pr_ctxt->dmm_map_lock);
+ list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
+ pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
+ __func__,
+ map_obj->mpu_addr,
+ map_obj->dsp_addr,
+ map_obj->size);
+ if (match_containing_map_obj(map_obj, mpu_addr, size)) {
+ pr_debug("%s: match!\n", __func__);
+ goto out;
+ }
+
+ pr_debug("%s: no match!\n", __func__);
+ }
+
+ map_obj = NULL;
+out:
+ spin_unlock(&pr_ctxt->dmm_map_lock);
+ return map_obj;
+}
+
+static int find_first_page_in_cache(struct dmm_map_object *map_obj,
+ unsigned long mpu_addr)
+{
+ u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT;
+ u32 requested_base_page = mpu_addr >> PAGE_SHIFT;
+ int pg_index = requested_base_page - mapped_base_page;
+
+ if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) {
+ pr_err("%s: failed (got %d)\n", __func__, pg_index);
+ return -1;
+ }
+
+ pr_debug("%s: first page is %d\n", __func__, pg_index);
+ return pg_index;
+}
+
+static inline struct page *get_mapping_page(struct dmm_map_object *map_obj,
+ int pg_i)
+{
+ pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__,
+ pg_i, map_obj->num_usr_pgs);
+
+ if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) {
+ pr_err("%s: requested pg_i %d is out of mapped range\n",
+ __func__, pg_i);
+ return NULL;
+ }
+
+ return map_obj->pages[pg_i];
+}
+
+/*
+ * ======== proc_attach ========
+ * Purpose:
+ * Prepare for communication with a particular DSP processor, and return
+ * a handle to the processor object.
+ */
+int
+proc_attach(u32 processor_id,
+ const struct dsp_processorattrin *attr_in,
+ void **ph_processor, struct process_context *pr_ctxt)
+{
+ int status = 0;
+ struct dev_object *hdev_obj;
+ struct proc_object *p_proc_object = NULL;
+ struct mgr_object *hmgr_obj = NULL;
+ struct drv_object *hdrv_obj = NULL;
+ u8 dev_type;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(ph_processor != NULL);
+
+ if (pr_ctxt->hprocessor) {
+ *ph_processor = pr_ctxt->hprocessor;
+ return status;
+ }
+
+ /* Get the Driver and Manager Object Handles */
+ status = cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT);
+ if (!status)
+ status = cfg_get_object((u32 *) &hmgr_obj, REG_MGR_OBJECT);
+
+ if (!status) {
+ /* Get the Device Object */
+ status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
+ }
+ if (!status)
+ status = dev_get_dev_type(hdev_obj, &dev_type);
+
+ if (status)
+ goto func_end;
+
+ /* If we made it this far, create the Proceesor object: */
+ p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
+ /* Fill out the Processor Object: */
+ if (p_proc_object == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ p_proc_object->hdev_obj = hdev_obj;
+ p_proc_object->hmgr_obj = hmgr_obj;
+ p_proc_object->processor_id = dev_type;
+ /* Store TGID instead of process handle */
+ p_proc_object->process = current->tgid;
+
+ INIT_LIST_HEAD(&p_proc_object->proc_list);
+
+ if (attr_in)
+ p_proc_object->utimeout = attr_in->utimeout;
+ else
+ p_proc_object->utimeout = PROC_DFLT_TIMEOUT;
+
+ status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
+ if (!status) {
+ status = dev_get_bridge_context(hdev_obj,
+ &p_proc_object->hbridge_context);
+ if (status)
+ kfree(p_proc_object);
+ } else
+ kfree(p_proc_object);
+
+ if (status)
+ goto func_end;
+
+ /* Create the Notification Object */
+ /* This is created with no event mask, no notify mask
+ * and no valid handle to the notification. They all get
+ * filled up when proc_register_notify is called */
+ p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
+ GFP_KERNEL);
+ if (p_proc_object->ntfy_obj)
+ ntfy_init(p_proc_object->ntfy_obj);
+ else
+ status = -ENOMEM;
+
+ if (!status) {
+ /* Insert the Processor Object into the DEV List.
+ * Return handle to this Processor Object:
+ * Find out if the Device is already attached to a
+ * Processor. If so, return AlreadyAttached status */
+ lst_init_elem(&p_proc_object->link);
+ status = dev_insert_proc_object(p_proc_object->hdev_obj,
+ (u32) p_proc_object,
+ &p_proc_object->
+ is_already_attached);
+ if (!status) {
+ if (p_proc_object->is_already_attached)
+ status = 0;
+ } else {
+ if (p_proc_object->ntfy_obj) {
+ ntfy_delete(p_proc_object->ntfy_obj);
+ kfree(p_proc_object->ntfy_obj);
+ }
+
+ kfree(p_proc_object);
+ }
+ if (!status) {
+ *ph_processor = (void *)p_proc_object;
+ pr_ctxt->hprocessor = *ph_processor;
+ (void)proc_notify_clients(p_proc_object,
+ DSP_PROCESSORATTACH);
+ }
+ } else {
+ /* Don't leak memory if status is failed */
+ kfree(p_proc_object);
+ }
+func_end:
+ DBC_ENSURE((status == -EPERM && *ph_processor == NULL) ||
+ (!status && p_proc_object) ||
+ (status == 0 && p_proc_object));
+
+ return status;
+}
+
+static int get_exec_file(struct cfg_devnode *dev_node_obj,
+ struct dev_object *hdev_obj,
+ u32 size, char *exec_file)
+{
+ u8 dev_type;
+ s32 len;
+
+ dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
+ if (dev_type == DSP_UNIT) {
+ return cfg_get_exec_file(dev_node_obj, size, exec_file);
+ } else if (dev_type == IVA_UNIT) {
+ if (iva_img) {
+ len = strlen(iva_img);
+ strncpy(exec_file, iva_img, len + 1);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/*
+ * ======== proc_auto_start ======== =
+ * Purpose:
+ * A Particular device gets loaded with the default image
+ * if the AutoStart flag is set.
+ * Parameters:
+ * hdev_obj: Handle to the Device
+ * Returns:
+ * 0: On Successful Loading
+ * -EPERM General Failure
+ * Requires:
+ * hdev_obj != NULL
+ * Ensures:
+ */
+int proc_auto_start(struct cfg_devnode *dev_node_obj,
+ struct dev_object *hdev_obj)
+{
+ int status = -EPERM;
+ struct proc_object *p_proc_object;
+ char sz_exec_file[MAXCMDLINELEN];
+ char *argv[2];
+ struct mgr_object *hmgr_obj = NULL;
+ u8 dev_type;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(dev_node_obj != NULL);
+ DBC_REQUIRE(hdev_obj != NULL);
+
+ /* Create a Dummy PROC Object */
+ status = cfg_get_object((u32 *) &hmgr_obj, REG_MGR_OBJECT);
+ if (status)
+ goto func_end;
+
+ p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
+ if (p_proc_object == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+ p_proc_object->hdev_obj = hdev_obj;
+ p_proc_object->hmgr_obj = hmgr_obj;
+ status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
+ if (!status)
+ status = dev_get_bridge_context(hdev_obj,
+ &p_proc_object->hbridge_context);
+ if (status)
+ goto func_cont;
+
+ /* Stop the Device, put it into standby mode */
+ status = proc_stop(p_proc_object);
+
+ if (status)
+ goto func_cont;
+
+ /* Get the default executable for this board... */
+ dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
+ p_proc_object->processor_id = dev_type;
+ status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file),
+ sz_exec_file);
+ if (!status) {
+ argv[0] = sz_exec_file;
+ argv[1] = NULL;
+ /* ...and try to load it: */
+ status = proc_load(p_proc_object, 1, (const char **)argv, NULL);
+ if (!status)
+ status = proc_start(p_proc_object);
+ }
+ kfree(p_proc_object->psz_last_coff);
+ p_proc_object->psz_last_coff = NULL;
+func_cont:
+ kfree(p_proc_object);
+func_end:
+ return status;
+}
+
+/*
+ * ======== proc_ctrl ========
+ * Purpose:
+ * Pass control information to the GPP device driver managing the
+ * DSP processor.
+ *
+ * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
+ * application developer's API.
+ * Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous
+ * Operation. arg can be null.
+ */
+int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg)
+{
+ int status = 0;
+ struct proc_object *p_proc_object = hprocessor;
+ u32 timeout = 0;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (p_proc_object) {
+ /* intercept PWR deep sleep command */
+ if (dw_cmd == BRDIOCTL_DEEPSLEEP) {
+ timeout = arg->cb_data;
+ status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
+ }
+ /* intercept PWR emergency sleep command */
+ else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) {
+ timeout = arg->cb_data;
+ status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout);
+ } else if (dw_cmd == PWR_DEEPSLEEP) {
+ /* timeout = arg->cb_data; */
+ status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
+ }
+ /* intercept PWR wake commands */
+ else if (dw_cmd == BRDIOCTL_WAKEUP) {
+ timeout = arg->cb_data;
+ status = pwr_wake_dsp(timeout);
+ } else if (dw_cmd == PWR_WAKEUP) {
+ /* timeout = arg->cb_data; */
+ status = pwr_wake_dsp(timeout);
+ } else
+ if (!((*p_proc_object->intf_fxns->pfn_dev_cntrl)
+ (p_proc_object->hbridge_context, dw_cmd,
+ arg))) {
+ status = 0;
+ } else {
+ status = -EPERM;
+ }
+ } else {
+ status = -EFAULT;
+ }
+
+ return status;
+}
+
+/*
+ * ======== proc_detach ========
+ * Purpose:
+ * Destroys the Processor Object. Removes the notification from the Dev
+ * List.
+ */
+int proc_detach(struct process_context *pr_ctxt)
+{
+ int status = 0;
+ struct proc_object *p_proc_object = NULL;
+
+ DBC_REQUIRE(refs > 0);
+
+ p_proc_object = (struct proc_object *)pr_ctxt->hprocessor;
+
+ if (p_proc_object) {
+ /* Notify the Client */
+ ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH);
+ /* Remove the notification memory */
+ if (p_proc_object->ntfy_obj) {
+ ntfy_delete(p_proc_object->ntfy_obj);
+ kfree(p_proc_object->ntfy_obj);
+ }
+
+ kfree(p_proc_object->psz_last_coff);
+ p_proc_object->psz_last_coff = NULL;
+ /* Remove the Proc from the DEV List */
+ (void)dev_remove_proc_object(p_proc_object->hdev_obj,
+ (u32) p_proc_object);
+ /* Free the Processor Object */
+ kfree(p_proc_object);
+ pr_ctxt->hprocessor = NULL;
+ } else {
+ status = -EFAULT;
+ }
+
+ return status;
+}
+
+/*
+ * ======== proc_enum_nodes ========
+ * Purpose:
+ * Enumerate and get configuration information about nodes allocated
+ * on a DSP processor.
+ */
+int proc_enum_nodes(void *hprocessor, void **node_tab,
+ u32 node_tab_size, u32 *pu_num_nodes,
+ u32 *pu_allocated)
+{
+ int status = -EPERM;
+ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+ struct node_mgr *hnode_mgr = NULL;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
+ DBC_REQUIRE(pu_num_nodes != NULL);
+ DBC_REQUIRE(pu_allocated != NULL);
+
+ if (p_proc_object) {
+ if (!(dev_get_node_manager(p_proc_object->hdev_obj,
+ &hnode_mgr))) {
+ if (hnode_mgr) {
+ status = node_enum_nodes(hnode_mgr, node_tab,
+ node_tab_size,
+ pu_num_nodes,
+ pu_allocated);
+ }
+ }
+ } else {
+ status = -EFAULT;
+ }
+
+ return status;
+}
+
+/* Cache operation against kernel address instead of users */
+static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start,
+ ssize_t len, int pg_i)
+{
+ struct page *page;
+ unsigned long offset;
+ ssize_t rest;
+ int ret = 0, i = 0;
+ struct scatterlist *sg = map_obj->dma_info.sg;
+
+ while (len) {
+ page = get_mapping_page(map_obj, pg_i);
+ if (!page) {
+ pr_err("%s: no page for %08lx\n", __func__, start);
+ ret = -EINVAL;
+ goto out;
+ } else if (IS_ERR(page)) {
+ pr_err("%s: err page for %08lx(%lu)\n", __func__, start,
+ PTR_ERR(page));
+ ret = PTR_ERR(page);
+ goto out;
+ }
+
+ offset = start & ~PAGE_MASK;
+ rest = min_t(ssize_t, PAGE_SIZE - offset, len);
+
+ sg_set_page(&sg[i], page, rest, offset);
+
+ len -= rest;
+ start += rest;
+ pg_i++, i++;
+ }
+
+ if (i != map_obj->dma_info.num_pages) {
+ pr_err("%s: bad number of sg iterations\n", __func__);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int memory_regain_ownership(struct dmm_map_object *map_obj,
+ unsigned long start, ssize_t len, enum dma_data_direction dir)
+{
+ int ret = 0;
+ unsigned long first_data_page = start >> PAGE_SHIFT;
+ unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
+ /* calculating the number of pages this area spans */
+ unsigned long num_pages = last_data_page - first_data_page + 1;
+ struct bridge_dma_map_info *dma_info = &map_obj->dma_info;
+
+ if (!dma_info->sg)
+ goto out;
+
+ if (dma_info->dir != dir || dma_info->num_pages != num_pages) {
+ pr_err("%s: dma info doesn't match given params\n", __func__);
+ return -EINVAL;
+ }
+
+ dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir);
+
+ pr_debug("%s: dma_map_sg unmapped\n", __func__);
+
+ kfree(dma_info->sg);
+
+ map_obj->dma_info.sg = NULL;
+
+out:
+ return ret;
+}
+
+/* Cache operation against kernel address instead of users */
+static int memory_give_ownership(struct dmm_map_object *map_obj,
+ unsigned long start, ssize_t len, enum dma_data_direction dir)
+{
+ int pg_i, ret, sg_num;
+ struct scatterlist *sg;
+ unsigned long first_data_page = start >> PAGE_SHIFT;
+ unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
+ /* calculating the number of pages this area spans */
+ unsigned long num_pages = last_data_page - first_data_page + 1;
+
+ pg_i = find_first_page_in_cache(map_obj, start);
+ if (pg_i < 0) {
+ pr_err("%s: failed to find first page in cache\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
+ if (!sg) {
+ pr_err("%s: kcalloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sg_init_table(sg, num_pages);
+
+ /* cleanup a previous sg allocation */
+ /* this may happen if application doesn't signal for e/o DMA */
+ kfree(map_obj->dma_info.sg);
+
+ map_obj->dma_info.sg = sg;
+ map_obj->dma_info.dir = dir;
+ map_obj->dma_info.num_pages = num_pages;
+
+ ret = build_dma_sg(map_obj, start, len, pg_i);
+ if (ret)
+ goto kfree_sg;
+
+ sg_num = dma_map_sg(bridge, sg, num_pages, dir);
+ if (sg_num < 1) {
+ pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num);
+ ret = -EFAULT;
+ goto kfree_sg;
+ }
+
+ pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num);
+ map_obj->dma_info.sg_num = sg_num;
+
+ return 0;
+
+kfree_sg:
+ kfree(sg);
+ map_obj->dma_info.sg = NULL;
+out:
+ return ret;
+}
+
+int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
+ enum dma_data_direction dir)
+{
+ /* Keep STATUS here for future additions to this function */
+ int status = 0;
+ struct process_context *pr_ctxt = (struct process_context *) hprocessor;
+ struct dmm_map_object *map_obj;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (!pr_ctxt) {
+ status = -EFAULT;
+ goto err_out;
+ }
+
+ pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
+ (u32)pmpu_addr,
+ ul_size, dir);
+
+ /* find requested memory are in cached mapping information */
+ map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
+ if (!map_obj) {
+ pr_err("%s: find_containing_mapping failed\n", __func__);
+ status = -EFAULT;
+ goto err_out;
+ }
+
+ if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
+ pr_err("%s: InValid address parameters %p %x\n",
+ __func__, pmpu_addr, ul_size);
+ status = -EFAULT;
+ }
+
+err_out:
+
+ return status;
+}
+
+int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
+ enum dma_data_direction dir)
+{
+ /* Keep STATUS here for future additions to this function */
+ int status = 0;
+ struct process_context *pr_ctxt = (struct process_context *) hprocessor;
+ struct dmm_map_object *map_obj;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (!pr_ctxt) {
+ status = -EFAULT;
+ goto err_out;
+ }
+
+ pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
+ (u32)pmpu_addr,
+ ul_size, dir);
+
+ /* find requested memory are in cached mapping information */
+ map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
+ if (!map_obj) {
+ pr_err("%s: find_containing_mapping failed\n", __func__);
+ status = -EFAULT;
+ goto err_out;
+ }
+
+ if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
+ pr_err("%s: InValid address parameters %p %x\n",
+ __func__, pmpu_addr, ul_size);
+ status = -EFAULT;
+ goto err_out;
+ }
+
+err_out:
+ return status;
+}
+
+/*
+ * ======== proc_flush_memory ========
+ * Purpose:
+ * Flush cache
+ */
+int proc_flush_memory(void *hprocessor, void *pmpu_addr,
+ u32 ul_size, u32 ul_flags)
+{
+ enum dma_data_direction dir = DMA_BIDIRECTIONAL;
+
+ return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir);
+}
+
+/*
+ * ======== proc_invalidate_memory ========
+ * Purpose:
+ * Invalidates the memory specified
+ */
+int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size)
+{
+ enum dma_data_direction dir = DMA_FROM_DEVICE;
+
+ return proc_begin_dma(hprocessor, pmpu_addr, size, dir);
+}
+
+/*
+ * ======== proc_get_resource_info ========
+ * Purpose:
+ * Enumerate the resources currently available on a processor.
+ */
+int proc_get_resource_info(void *hprocessor, u32 resource_type,
+ struct dsp_resourceinfo *resource_info,
+ u32 resource_info_size)
+{
+ int status = -EPERM;
+ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+ struct node_mgr *hnode_mgr = NULL;
+ struct nldr_object *nldr_obj = NULL;
+ struct rmm_target_obj *rmm = NULL;
+ struct io_mgr *hio_mgr = NULL; /* IO manager handle */
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(resource_info != NULL);
+ DBC_REQUIRE(resource_info_size >= sizeof(struct dsp_resourceinfo));
+
+ if (!p_proc_object) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ switch (resource_type) {
+ case DSP_RESOURCE_DYNDARAM:
+ case DSP_RESOURCE_DYNSARAM:
+ case DSP_RESOURCE_DYNEXTERNAL:
+ case DSP_RESOURCE_DYNSRAM:
+ status = dev_get_node_manager(p_proc_object->hdev_obj,
+ &hnode_mgr);
+ if (!hnode_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ status = node_get_nldr_obj(hnode_mgr, &nldr_obj);
+ if (!status) {
+ status = nldr_get_rmm_manager(nldr_obj, &rmm);
+ if (rmm) {
+ if (!rmm_stat(rmm,
+ (enum dsp_memtype)resource_type,
+ (struct dsp_memstat *)
+ &(resource_info->result.
+ mem_stat)))
+ status = -EINVAL;
+ } else {
+ status = -EFAULT;
+ }
+ }
+ break;
+ case DSP_RESOURCE_PROCLOAD:
+ status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr);
+ if (hio_mgr)
+ status =
+ p_proc_object->intf_fxns->
+ pfn_io_get_proc_load(hio_mgr,
+ (struct dsp_procloadstat *)
+ &(resource_info->result.
+ proc_load_stat));
+ else
+ status = -EFAULT;
+ break;
+ default:
+ status = -EPERM;
+ break;
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== proc_exit ========
+ * Purpose:
+ * Decrement reference count, and free resources when reference count is
+ * 0.
+ */
+void proc_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== proc_get_dev_object ========
+ * Purpose:
+ * Return the Dev Object handle for a given Processor.
+ *
+ */
+int proc_get_dev_object(void *hprocessor,
+ struct dev_object **device_obj)
+{
+ int status = -EPERM;
+ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(device_obj != NULL);
+
+ if (p_proc_object) {
+ *device_obj = p_proc_object->hdev_obj;
+ status = 0;
+ } else {
+ *device_obj = NULL;
+ status = -EFAULT;
+ }
+
+ DBC_ENSURE((!status && *device_obj != NULL) ||
+ (status && *device_obj == NULL));
+
+ return status;
+}
+
+/*
+ * ======== proc_get_state ========
+ * Purpose:
+ * Report the state of the specified DSP processor.
+ */
+int proc_get_state(void *hprocessor,
+ struct dsp_processorstate *proc_state_obj,
+ u32 state_info_size)
+{
+ int status = 0;
+ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+ int brd_status;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(proc_state_obj != NULL);
+ DBC_REQUIRE(state_info_size >= sizeof(struct dsp_processorstate));
+
+ if (p_proc_object) {
+ /* First, retrieve BRD state information */
+ status = (*p_proc_object->intf_fxns->pfn_brd_status)
+ (p_proc_object->hbridge_context, &brd_status);
+ if (!status) {
+ switch (brd_status) {
+ case BRD_STOPPED:
+ proc_state_obj->proc_state = PROC_STOPPED;
+ break;
+ case BRD_SLEEP_TRANSITION:
+ case BRD_DSP_HIBERNATION:
+ /* Fall through */
+ case BRD_RUNNING:
+ proc_state_obj->proc_state = PROC_RUNNING;
+ break;
+ case BRD_LOADED:
+ proc_state_obj->proc_state = PROC_LOADED;
+ break;
+ case BRD_ERROR:
+ proc_state_obj->proc_state = PROC_ERROR;
+ break;
+ default:
+ proc_state_obj->proc_state = 0xFF;
+ status = -EPERM;
+ break;
+ }
+ }
+ } else {
+ status = -EFAULT;
+ }
+ dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n",
+ __func__, status, proc_state_obj->proc_state);
+ return status;
+}
+
+/*
+ * ======== proc_get_trace ========
+ * Purpose:
+ * Retrieve the current contents of the trace buffer, located on the
+ * Processor. Predefined symbols for the trace buffer must have been
+ * configured into the DSP executable.
+ * Details:
+ * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a
+ * trace buffer, only. Treat it as an undocumented feature.
+ * This call is destructive, meaning the processor is placed in the monitor
+ * state as a result of this function.
+ */
+int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size)
+{
+ int status;
+ status = -ENOSYS;
+ return status;
+}
+
+/*
+ * ======== proc_init ========
+ * Purpose:
+ * Initialize PROC's private state, keeping a reference count on each call
+ */
+bool proc_init(void)
+{
+ bool ret = true;
+
+ DBC_REQUIRE(refs >= 0);
+
+ if (ret)
+ refs++;
+
+ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+ return ret;
+}
+
+/*
+ * ======== proc_load ========
+ * Purpose:
+ * Reset a processor and load a new base program image.
+ * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
+ * application developer's API.
+ */
+int proc_load(void *hprocessor, const s32 argc_index,
+ const char **user_args, const char **user_envp)
+{
+ int status = 0;
+ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+ struct io_mgr *hio_mgr; /* IO manager handle */
+ struct msg_mgr *hmsg_mgr;
+ struct cod_manager *cod_mgr; /* Code manager handle */
+ char *pargv0; /* temp argv[0] ptr */
+ char **new_envp; /* Updated envp[] array. */
+ char sz_proc_id[MAXPROCIDLEN]; /* Size of "PROC_ID=<n>" */
+ s32 envp_elems; /* Num elements in envp[]. */
+ s32 cnew_envp; /* " " in new_envp[] */
+ s32 nproc_id = 0; /* Anticipate MP version. */
+ struct dcd_manager *hdcd_handle;
+ struct dmm_object *dmm_mgr;
+ u32 dw_ext_end;
+ u32 proc_id;
+ int brd_state;
+ struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+#ifdef OPT_LOAD_TIME_INSTRUMENTATION
+ struct timeval tv1;
+ struct timeval tv2;
+#endif
+
+#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
+ struct dspbridge_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
+#endif
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(argc_index > 0);
+ DBC_REQUIRE(user_args != NULL);
+
+#ifdef OPT_LOAD_TIME_INSTRUMENTATION
+ do_gettimeofday(&tv1);
+#endif
+ if (!p_proc_object) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr);
+ if (!cod_mgr) {
+ status = -EPERM;
+ goto func_end;
+ }
+ status = proc_stop(hprocessor);
+ if (status)
+ goto func_end;
+
+ /* Place the board in the monitor state. */
+ status = proc_monitor(hprocessor);
+ if (status)
+ goto func_end;
+
+ /* Save ptr to original argv[0]. */
+ pargv0 = (char *)user_args[0];
+ /*Prepend "PROC_ID=<nproc_id>"to envp array for target. */
+ envp_elems = get_envp_count((char **)user_envp);
+ cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2));
+ new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL);
+ if (new_envp) {
+ status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID,
+ nproc_id);
+ if (status == -1) {
+ dev_dbg(bridge, "%s: Proc ID string overflow\n",
+ __func__);
+ status = -EPERM;
+ } else {
+ new_envp =
+ prepend_envp(new_envp, (char **)user_envp,
+ envp_elems, cnew_envp, sz_proc_id);
+ /* Get the DCD Handle */
+ status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
+ (u32 *) &hdcd_handle);
+ if (!status) {
+ /* Before proceeding with new load,
+ * check if a previously registered COFF
+ * exists.
+ * If yes, unregister nodes in previously
+ * registered COFF. If any error occurred,
+ * set previously registered COFF to NULL. */
+ if (p_proc_object->psz_last_coff != NULL) {
+ status =
+ dcd_auto_unregister(hdcd_handle,
+ p_proc_object->
+ psz_last_coff);
+ /* Regardless of auto unregister status,
+ * free previously allocated
+ * memory. */
+ kfree(p_proc_object->psz_last_coff);
+ p_proc_object->psz_last_coff = NULL;
+ }
+ }
+ /* On success, do cod_open_base() */
+ status = cod_open_base(cod_mgr, (char *)user_args[0],
+ COD_SYMB);
+ }
+ } else {
+ status = -ENOMEM;
+ }
+ if (!status) {
+ /* Auto-register data base */
+ /* Get the DCD Handle */
+ status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
+ (u32 *) &hdcd_handle);
+ if (!status) {
+ /* Auto register nodes in specified COFF
+ * file. If registration did not fail,
+ * (status = 0 or -EACCES)
+ * save the name of the COFF file for
+ * de-registration in the future. */
+ status =
+ dcd_auto_register(hdcd_handle,
+ (char *)user_args[0]);
+ if (status == -EACCES)
+ status = 0;
+
+ if (status) {
+ status = -EPERM;
+ } else {
+ DBC_ASSERT(p_proc_object->psz_last_coff ==
+ NULL);
+ /* Allocate memory for pszLastCoff */
+ p_proc_object->psz_last_coff =
+ kzalloc((strlen(user_args[0]) +
+ 1), GFP_KERNEL);
+ /* If memory allocated, save COFF file name */
+ if (p_proc_object->psz_last_coff) {
+ strncpy(p_proc_object->psz_last_coff,
+ (char *)user_args[0],
+ (strlen((char *)user_args[0]) +
+ 1));
+ }
+ }
+ }
+ }
+ /* Update shared memory address and size */
+ if (!status) {
+ /* Create the message manager. This must be done
+ * before calling the IOOnLoaded function. */
+ dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
+ if (!hmsg_mgr) {
+ status = msg_create(&hmsg_mgr, p_proc_object->hdev_obj,
+ (msg_onexit) node_on_exit);
+ DBC_ASSERT(!status);
+ dev_set_msg_mgr(p_proc_object->hdev_obj, hmsg_mgr);
+ }
+ }
+ if (!status) {
+ /* Set the Device object's message manager */
+ status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr);
+ if (hio_mgr)
+ status = (*p_proc_object->intf_fxns->pfn_io_on_loaded)
+ (hio_mgr);
+ else
+ status = -EFAULT;
+ }
+ if (!status) {
+ /* Now, attempt to load an exec: */
+
+ /* Boost the OPP level to Maximum level supported by baseport */
+#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
+ if (pdata->cpu_set_freq)
+ (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]);
+#endif
+ status = cod_load_base(cod_mgr, argc_index, (char **)user_args,
+ dev_brd_write_fxn,
+ p_proc_object->hdev_obj, NULL);
+ if (status) {
+ if (status == -EBADF) {
+ dev_dbg(bridge, "%s: Failure to Load the EXE\n",
+ __func__);
+ }
+ if (status == -ESPIPE) {
+ pr_err("%s: Couldn't parse the file\n",
+ __func__);
+ }
+ }
+ /* Requesting the lowest opp supported */
+#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
+ if (pdata->cpu_set_freq)
+ (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
+#endif
+
+ }
+ if (!status) {
+ /* Update the Processor status to loaded */
+ status = (*p_proc_object->intf_fxns->pfn_brd_set_state)
+ (p_proc_object->hbridge_context, BRD_LOADED);
+ if (!status) {
+ p_proc_object->proc_state = PROC_LOADED;
+ if (p_proc_object->ntfy_obj)
+ proc_notify_clients(p_proc_object,
+ DSP_PROCESSORSTATECHANGE);
+ }
+ }
+ if (!status) {
+ status = proc_get_processor_id(hprocessor, &proc_id);
+ if (proc_id == DSP_UNIT) {
+ /* Use all available DSP address space after EXTMEM
+ * for DMM */
+ if (!status)
+ status = cod_get_sym_value(cod_mgr, EXTEND,
+ &dw_ext_end);
+
+ /* Reset DMM structs and add an initial free chunk */
+ if (!status) {
+ status =
+ dev_get_dmm_mgr(p_proc_object->hdev_obj,
+ &dmm_mgr);
+ if (dmm_mgr) {
+ /* Set dw_ext_end to DMM START u8
+ * address */
+ dw_ext_end =
+ (dw_ext_end + 1) * DSPWORDSIZE;
+ /* DMM memory is from EXT_END */
+ status = dmm_create_tables(dmm_mgr,
+ dw_ext_end,
+ DMMPOOLSIZE);
+ } else {
+ status = -EFAULT;
+ }
+ }
+ }
+ }
+ /* Restore the original argv[0] */
+ kfree(new_envp);
+ user_args[0] = pargv0;
+ if (!status) {
+ if (!((*p_proc_object->intf_fxns->pfn_brd_status)
+ (p_proc_object->hbridge_context, &brd_state))) {
+ pr_info("%s: Processor Loaded %s\n", __func__, pargv0);
+ kfree(drv_datap->base_img);
+ drv_datap->base_img = kmalloc(strlen(pargv0) + 1,
+ GFP_KERNEL);
+ if (drv_datap->base_img)
+ strncpy(drv_datap->base_img, pargv0,
+ strlen(pargv0) + 1);
+ else
+ status = -ENOMEM;
+ DBC_ASSERT(brd_state == BRD_LOADED);
+ }
+ }
+
+func_end:
+ if (status) {
+ pr_err("%s: Processor failed to load\n", __func__);
+ proc_stop(p_proc_object);
+ }
+ DBC_ENSURE((!status
+ && p_proc_object->proc_state == PROC_LOADED)
+ || status);
+#ifdef OPT_LOAD_TIME_INSTRUMENTATION
+ do_gettimeofday(&tv2);
+ if (tv2.tv_usec < tv1.tv_usec) {
+ tv2.tv_usec += 1000000;
+ tv2.tv_sec--;
+ }
+ dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__,
+ tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec);
+#endif
+ return status;
+}
+
+/*
+ * ======== proc_map ========
+ * Purpose:
+ * Maps a MPU buffer to DSP address space.
+ */
+int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
+ void *req_addr, void **pp_map_addr, u32 ul_map_attr,
+ struct process_context *pr_ctxt)
+{
+ u32 va_align;
+ u32 pa_align;
+ struct dmm_object *dmm_mgr;
+ u32 size_align;
+ int status = 0;
+ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+ struct dmm_map_object *map_obj;
+ u32 tmp_addr = 0;
+
+#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
+ if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
+ if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) ||
+ !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) {
+ pr_err("%s: not aligned: 0x%x (%d)\n", __func__,
+ (u32)pmpu_addr, ul_size);
+ return -EFAULT;
+ }
+ }
+#endif
+
+ /* Calculate the page-aligned PA, VA and size */
+ va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K);
+ pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K);
+ size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align,
+ PG_SIZE4K);
+
+ if (!p_proc_object) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ /* Critical section */
+ mutex_lock(&proc_lock);
+ dmm_get_handle(p_proc_object, &dmm_mgr);
+ if (dmm_mgr)
+ status = dmm_map_memory(dmm_mgr, va_align, size_align);
+ else
+ status = -EFAULT;
+
+ /* Add mapping to the page tables. */
+ if (!status) {
+
+ /* Mapped address = MSB of VA | LSB of PA */
+ tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
+ /* mapped memory resource tracking */
+ map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
+ size_align);
+ if (!map_obj)
+ status = -ENOMEM;
+ else
+ status = (*p_proc_object->intf_fxns->pfn_brd_mem_map)
+ (p_proc_object->hbridge_context, pa_align, va_align,
+ size_align, ul_map_attr, map_obj->pages);
+ }
+ if (!status) {
+ /* Mapped address = MSB of VA | LSB of PA */
+ *pp_map_addr = (void *) tmp_addr;
+ } else {
+ remove_mapping_information(pr_ctxt, tmp_addr, size_align);
+ dmm_un_map_memory(dmm_mgr, va_align, &size_align);
+ }
+ mutex_unlock(&proc_lock);
+
+ if (status)
+ goto func_end;
+
+func_end:
+ dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, "
+ "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, "
+ "pa_align %x, size_align %x status 0x%x\n", __func__,
+ hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr,
+ pp_map_addr, va_align, pa_align, size_align, status);
+
+ return status;
+}
+
+/*
+ * ======== proc_register_notify ========
+ * Purpose:
+ * Register to be notified of specific processor events.
+ */
+int proc_register_notify(void *hprocessor, u32 event_mask,
+ u32 notify_type, struct dsp_notification
+ * hnotification)
+{
+ int status = 0;
+ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+ struct deh_mgr *hdeh_mgr;
+
+ DBC_REQUIRE(hnotification != NULL);
+ DBC_REQUIRE(refs > 0);
+
+ /* Check processor handle */
+ if (!p_proc_object) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ /* Check if event mask is a valid processor related event */
+ if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH |
+ DSP_PROCESSORDETACH | DSP_PROCESSORRESTART |
+ DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR |
+ DSP_WDTOVERFLOW))
+ status = -EINVAL;
+
+ /* Check if notify type is valid */
+ if (notify_type != DSP_SIGNALEVENT)
+ status = -EINVAL;
+
+ if (!status) {
+ /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT,
+ * or DSP_PWRERROR then register event immediately. */
+ if (event_mask &
+ ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR |
+ DSP_WDTOVERFLOW)) {
+ status = ntfy_register(p_proc_object->ntfy_obj,
+ hnotification, event_mask,
+ notify_type);
+ /* Special case alert, special case alert!
+ * If we're trying to *deregister* (i.e. event_mask
+ * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification,
+ * we have to deregister with the DEH manager.
+ * There's no way to know, based on event_mask which
+ * manager the notification event was registered with,
+ * so if we're trying to deregister and ntfy_register
+ * failed, we'll give the deh manager a shot.
+ */
+ if ((event_mask == 0) && status) {
+ status =
+ dev_get_deh_mgr(p_proc_object->hdev_obj,
+ &hdeh_mgr);
+ status =
+ bridge_deh_register_notify(hdeh_mgr,
+ event_mask,
+ notify_type,
+ hnotification);
+ }
+ } else {
+ status = dev_get_deh_mgr(p_proc_object->hdev_obj,
+ &hdeh_mgr);
+ status =
+ bridge_deh_register_notify(hdeh_mgr,
+ event_mask,
+ notify_type,
+ hnotification);
+
+ }
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== proc_reserve_memory ========
+ * Purpose:
+ * Reserve a virtually contiguous region of DSP address space.
+ */
+int proc_reserve_memory(void *hprocessor, u32 ul_size,
+ void **pp_rsv_addr,
+ struct process_context *pr_ctxt)
+{
+ struct dmm_object *dmm_mgr;
+ int status = 0;
+ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+ struct dmm_rsv_object *rsv_obj;
+
+ if (!p_proc_object) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ status = dmm_get_handle(p_proc_object, &dmm_mgr);
+ if (!dmm_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
+ if (status != 0)
+ goto func_end;
+
+ /*
+ * A successful reserve should be followed by insertion of rsv_obj
+ * into dmm_rsv_list, so that reserved memory resource tracking
+ * remains uptodate
+ */
+ rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
+ if (rsv_obj) {
+ rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
+ spin_lock(&pr_ctxt->dmm_rsv_lock);
+ list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
+ spin_unlock(&pr_ctxt->dmm_rsv_lock);
+ }
+
+func_end:
+ dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
+ "status 0x%x\n", __func__, hprocessor,
+ ul_size, pp_rsv_addr, status);
+ return status;
+}
+
+/*
+ * ======== proc_start ========
+ * Purpose:
+ * Start a processor running.
+ */
+int proc_start(void *hprocessor)
+{
+ int status = 0;
+ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+ struct cod_manager *cod_mgr; /* Code manager handle */
+ u32 dw_dsp_addr; /* Loaded code's entry point. */
+ int brd_state;
+
+ DBC_REQUIRE(refs > 0);
+ if (!p_proc_object) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ /* Call the bridge_brd_start */
+ if (p_proc_object->proc_state != PROC_LOADED) {
+ status = -EBADR;
+ goto func_end;
+ }
+ status = dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr);
+ if (!cod_mgr) {
+ status = -EFAULT;
+ goto func_cont;
+ }
+
+ status = cod_get_entry(cod_mgr, &dw_dsp_addr);
+ if (status)
+ goto func_cont;
+
+ status = (*p_proc_object->intf_fxns->pfn_brd_start)
+ (p_proc_object->hbridge_context, dw_dsp_addr);
+ if (status)
+ goto func_cont;
+
+ /* Call dev_create2 */
+ status = dev_create2(p_proc_object->hdev_obj);
+ if (!status) {
+ p_proc_object->proc_state = PROC_RUNNING;
+ /* Deep sleep switces off the peripheral clocks.
+ * we just put the DSP CPU in idle in the idle loop.
+ * so there is no need to send a command to DSP */
+
+ if (p_proc_object->ntfy_obj) {
+ proc_notify_clients(p_proc_object,
+ DSP_PROCESSORSTATECHANGE);
+ }
+ } else {
+ /* Failed to Create Node Manager and DISP Object
+ * Stop the Processor from running. Put it in STOPPED State */
+ (void)(*p_proc_object->intf_fxns->
+ pfn_brd_stop) (p_proc_object->hbridge_context);
+ p_proc_object->proc_state = PROC_STOPPED;
+ }
+func_cont:
+ if (!status) {
+ if (!((*p_proc_object->intf_fxns->pfn_brd_status)
+ (p_proc_object->hbridge_context, &brd_state))) {
+ pr_info("%s: dsp in running state\n", __func__);
+ DBC_ASSERT(brd_state != BRD_HIBERNATION);
+ }
+ } else {
+ pr_err("%s: Failed to start the dsp\n", __func__);
+ proc_stop(p_proc_object);
+ }
+
+func_end:
+ DBC_ENSURE((!status && p_proc_object->proc_state ==
+ PROC_RUNNING) || status);
+ return status;
+}
+
+/*
+ * ======== proc_stop ========
+ * Purpose:
+ * Stop a processor running.
+ */
+int proc_stop(void *hprocessor)
+{
+ int status = 0;
+ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+ struct msg_mgr *hmsg_mgr;
+ struct node_mgr *hnode_mgr;
+ void *hnode;
+ u32 node_tab_size = 1;
+ u32 num_nodes = 0;
+ u32 nodes_allocated = 0;
+ int brd_state;
+
+ DBC_REQUIRE(refs > 0);
+ if (!p_proc_object) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ /* check if there are any running nodes */
+ status = dev_get_node_manager(p_proc_object->hdev_obj, &hnode_mgr);
+ if (!status && hnode_mgr) {
+ status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size,
+ &num_nodes, &nodes_allocated);
+ if ((status == -EINVAL) || (nodes_allocated > 0)) {
+ pr_err("%s: Can't stop device, active nodes = %d \n",
+ __func__, nodes_allocated);
+ return -EBADR;
+ }
+ }
+ /* Call the bridge_brd_stop */
+ /* It is OK to stop a device that does n't have nodes OR not started */
+ status =
+ (*p_proc_object->intf_fxns->
+ pfn_brd_stop) (p_proc_object->hbridge_context);
+ if (!status) {
+ dev_dbg(bridge, "%s: processor in standby mode\n", __func__);
+ p_proc_object->proc_state = PROC_STOPPED;
+ /* Destory the Node Manager, msg_ctrl Manager */
+ if (!(dev_destroy2(p_proc_object->hdev_obj))) {
+ /* Destroy the msg_ctrl by calling msg_delete */
+ dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
+ if (hmsg_mgr) {
+ msg_delete(hmsg_mgr);
+ dev_set_msg_mgr(p_proc_object->hdev_obj, NULL);
+ }
+ if (!((*p_proc_object->
+ intf_fxns->pfn_brd_status) (p_proc_object->
+ hbridge_context,
+ &brd_state)))
+ DBC_ASSERT(brd_state == BRD_STOPPED);
+ }
+ } else {
+ pr_err("%s: Failed to stop the processor\n", __func__);
+ }
+func_end:
+
+ return status;
+}
+
+/*
+ * ======== proc_un_map ========
+ * Purpose:
+ * Removes a MPU buffer mapping from the DSP address space.
+ */
+int proc_un_map(void *hprocessor, void *map_addr,
+ struct process_context *pr_ctxt)
+{
+ int status = 0;
+ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+ struct dmm_object *dmm_mgr;
+ u32 va_align;
+ u32 size_align;
+
+ va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
+ if (!p_proc_object) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ status = dmm_get_handle(hprocessor, &dmm_mgr);
+ if (!dmm_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ /* Critical section */
+ mutex_lock(&proc_lock);
+ /*
+ * Update DMM structures. Get the size to unmap.
+ * This function returns error if the VA is not mapped
+ */
+ status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
+ /* Remove mapping from the page tables. */
+ if (!status) {
+ status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
+ (p_proc_object->hbridge_context, va_align, size_align);
+ }
+
+ mutex_unlock(&proc_lock);
+ if (status)
+ goto func_end;
+
+ /*
+ * A successful unmap should be followed by removal of map_obj
+ * from dmm_map_list, so that mapped memory resource tracking
+ * remains uptodate
+ */
+ remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
+
+func_end:
+ dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
+ __func__, hprocessor, map_addr, status);
+ return status;
+}
+
+/*
+ * ======== proc_un_reserve_memory ========
+ * Purpose:
+ * Frees a previously reserved region of DSP address space.
+ */
+int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
+ struct process_context *pr_ctxt)
+{
+ struct dmm_object *dmm_mgr;
+ int status = 0;
+ struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+ struct dmm_rsv_object *rsv_obj;
+
+ if (!p_proc_object) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ status = dmm_get_handle(p_proc_object, &dmm_mgr);
+ if (!dmm_mgr) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
+ if (status != 0)
+ goto func_end;
+
+ /*
+ * A successful unreserve should be followed by removal of rsv_obj
+ * from dmm_rsv_list, so that reserved memory resource tracking
+ * remains uptodate
+ */
+ spin_lock(&pr_ctxt->dmm_rsv_lock);
+ list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
+ if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
+ list_del(&rsv_obj->link);
+ kfree(rsv_obj);
+ break;
+ }
+ }
+ spin_unlock(&pr_ctxt->dmm_rsv_lock);
+
+func_end:
+ dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
+ __func__, hprocessor, prsv_addr, status);
+ return status;
+}
+
+/*
+ * ======== = proc_monitor ======== ==
+ * Purpose:
+ * Place the Processor in Monitor State. This is an internal
+ * function and a requirement before Processor is loaded.
+ * This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor.
+ * In dev_destroy2 we delete the node manager.
+ * Parameters:
+ * p_proc_object: Pointer to Processor Object
+ * Returns:
+ * 0: Processor placed in monitor mode.
+ * !0: Failed to place processor in monitor mode.
+ * Requires:
+ * Valid Processor Handle
+ * Ensures:
+ * Success: ProcObject state is PROC_IDLE
+ */
+static int proc_monitor(struct proc_object *proc_obj)
+{
+ int status = -EPERM;
+ struct msg_mgr *hmsg_mgr;
+ int brd_state;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(proc_obj);
+
+ /* This is needed only when Device is loaded when it is
+ * already 'ACTIVE' */
+ /* Destory the Node Manager, msg_ctrl Manager */
+ if (!dev_destroy2(proc_obj->hdev_obj)) {
+ /* Destroy the msg_ctrl by calling msg_delete */
+ dev_get_msg_mgr(proc_obj->hdev_obj, &hmsg_mgr);
+ if (hmsg_mgr) {
+ msg_delete(hmsg_mgr);
+ dev_set_msg_mgr(proc_obj->hdev_obj, NULL);
+ }
+ }
+ /* Place the Board in the Monitor State */
+ if (!((*proc_obj->intf_fxns->pfn_brd_monitor)
+ (proc_obj->hbridge_context))) {
+ status = 0;
+ if (!((*proc_obj->intf_fxns->pfn_brd_status)
+ (proc_obj->hbridge_context, &brd_state)))
+ DBC_ASSERT(brd_state == BRD_IDLE);
+ }
+
+ DBC_ENSURE((!status && brd_state == BRD_IDLE) ||
+ status);
+ return status;
+}
+
+/*
+ * ======== get_envp_count ========
+ * Purpose:
+ * Return the number of elements in the envp array, including the
+ * terminating NULL element.
+ */
+static s32 get_envp_count(char **envp)
+{
+ s32 ret = 0;
+ if (envp) {
+ while (*envp++)
+ ret++;
+
+ ret += 1; /* Include the terminating NULL in the count. */
+ }
+
+ return ret;
+}
+
+/*
+ * ======== prepend_envp ========
+ * Purpose:
+ * Prepend an environment variable=value pair to the new envp array, and
+ * copy in the existing var=value pairs in the old envp array.
+ */
+static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
+ s32 cnew_envp, char *sz_var)
+{
+ char **pp_envp = new_envp;
+
+ DBC_REQUIRE(new_envp);
+
+ /* Prepend new environ var=value string */
+ *new_envp++ = sz_var;
+
+ /* Copy user's environment into our own. */
+ while (envp_elems--)
+ *new_envp++ = *envp++;
+
+ /* Ensure NULL terminates the new environment strings array. */
+ if (envp_elems == 0)
+ *new_envp = NULL;
+
+ return pp_envp;
+}
+
+/*
+ * ======== proc_notify_clients ========
+ * Purpose:
+ * Notify the processor the events.
+ */
+int proc_notify_clients(void *proc, u32 events)
+{
+ int status = 0;
+ struct proc_object *p_proc_object = (struct proc_object *)proc;
+
+ DBC_REQUIRE(p_proc_object);
+ DBC_REQUIRE(is_valid_proc_event(events));
+ DBC_REQUIRE(refs > 0);
+ if (!p_proc_object) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ ntfy_notify(p_proc_object->ntfy_obj, events);
+func_end:
+ return status;
+}
+
+/*
+ * ======== proc_notify_all_clients ========
+ * Purpose:
+ * Notify the processor the events. This includes notifying all clients
+ * attached to a particulat DSP.
+ */
+int proc_notify_all_clients(void *proc, u32 events)
+{
+ int status = 0;
+ struct proc_object *p_proc_object = (struct proc_object *)proc;
+
+ DBC_REQUIRE(is_valid_proc_event(events));
+ DBC_REQUIRE(refs > 0);
+
+ if (!p_proc_object) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ dev_notify_clients(p_proc_object->hdev_obj, events);
+
+func_end:
+ return status;
+}
+
+/*
+ * ======== proc_get_processor_id ========
+ * Purpose:
+ * Retrieves the processor ID.
+ */
+int proc_get_processor_id(void *proc, u32 * proc_id)
+{
+ int status = 0;
+ struct proc_object *p_proc_object = (struct proc_object *)proc;
+
+ if (p_proc_object)
+ *proc_id = p_proc_object->processor_id;
+ else
+ status = -EFAULT;
+
+ return status;
+}
diff --git a/drivers/staging/tidspbridge/rmgr/pwr.c b/drivers/staging/tidspbridge/rmgr/pwr.c
new file mode 100644
index 00000000000..85cb1a2bc0b
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/pwr.c
@@ -0,0 +1,176 @@
+/*
+ * pwr.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * PWR API for controlling DSP power states.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/pwr.h>
+
+/* ----------------------------------- Resource Manager */
+#include <dspbridge/devdefs.h>
+#include <dspbridge/drv.h>
+
+/* ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+
+/* ----------------------------------- Link Driver */
+#include <dspbridge/dspioctl.h>
+
+/*
+ * ======== pwr_sleep_dsp ========
+ * Send command to DSP to enter sleep state.
+ */
+int pwr_sleep_dsp(const u32 sleep_code, const u32 timeout)
+{
+ struct bridge_drv_interface *intf_fxns;
+ struct bridge_dev_context *dw_context;
+ int status = -EPERM;
+ struct dev_object *hdev_obj = NULL;
+ u32 ioctlcode = 0;
+ u32 arg = timeout;
+
+ for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
+ hdev_obj != NULL;
+ hdev_obj =
+ (struct dev_object *)drv_get_next_dev_object((u32) hdev_obj)) {
+ if (dev_get_bridge_context(hdev_obj,
+ (struct bridge_dev_context **)
+ &dw_context)) {
+ continue;
+ }
+ if (dev_get_intf_fxns(hdev_obj,
+ (struct bridge_drv_interface **)
+ &intf_fxns)) {
+ continue;
+ }
+ if (sleep_code == PWR_DEEPSLEEP)
+ ioctlcode = BRDIOCTL_DEEPSLEEP;
+ else if (sleep_code == PWR_EMERGENCYDEEPSLEEP)
+ ioctlcode = BRDIOCTL_EMERGENCYSLEEP;
+ else
+ status = -EINVAL;
+
+ if (status != -EINVAL) {
+ status = (*intf_fxns->pfn_dev_cntrl) (dw_context,
+ ioctlcode,
+ (void *)&arg);
+ }
+ }
+ return status;
+}
+
+/*
+ * ======== pwr_wake_dsp ========
+ * Send command to DSP to wake it from sleep.
+ */
+int pwr_wake_dsp(const u32 timeout)
+{
+ struct bridge_drv_interface *intf_fxns;
+ struct bridge_dev_context *dw_context;
+ int status = -EPERM;
+ struct dev_object *hdev_obj = NULL;
+ u32 arg = timeout;
+
+ for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
+ hdev_obj != NULL;
+ hdev_obj = (struct dev_object *)drv_get_next_dev_object
+ ((u32) hdev_obj)) {
+ if (!(dev_get_bridge_context(hdev_obj,
+ (struct bridge_dev_context
+ **)&dw_context))) {
+ if (!(dev_get_intf_fxns(hdev_obj,
+ (struct bridge_drv_interface **)&intf_fxns))) {
+ status =
+ (*intf_fxns->pfn_dev_cntrl) (dw_context,
+ BRDIOCTL_WAKEUP,
+ (void *)&arg);
+ }
+ }
+ }
+ return status;
+}
+
+/*
+ * ======== pwr_pm_pre_scale========
+ * Sends pre-notification message to DSP.
+ */
+int pwr_pm_pre_scale(u16 voltage_domain, u32 level)
+{
+ struct bridge_drv_interface *intf_fxns;
+ struct bridge_dev_context *dw_context;
+ int status = -EPERM;
+ struct dev_object *hdev_obj = NULL;
+ u32 arg[2];
+
+ arg[0] = voltage_domain;
+ arg[1] = level;
+
+ for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
+ hdev_obj != NULL;
+ hdev_obj = (struct dev_object *)drv_get_next_dev_object
+ ((u32) hdev_obj)) {
+ if (!(dev_get_bridge_context(hdev_obj,
+ (struct bridge_dev_context
+ **)&dw_context))) {
+ if (!(dev_get_intf_fxns(hdev_obj,
+ (struct bridge_drv_interface **)&intf_fxns))) {
+ status =
+ (*intf_fxns->pfn_dev_cntrl) (dw_context,
+ BRDIOCTL_PRESCALE_NOTIFY,
+ (void *)&arg);
+ }
+ }
+ }
+ return status;
+}
+
+/*
+ * ======== pwr_pm_post_scale========
+ * Sends post-notification message to DSP.
+ */
+int pwr_pm_post_scale(u16 voltage_domain, u32 level)
+{
+ struct bridge_drv_interface *intf_fxns;
+ struct bridge_dev_context *dw_context;
+ int status = -EPERM;
+ struct dev_object *hdev_obj = NULL;
+ u32 arg[2];
+
+ arg[0] = voltage_domain;
+ arg[1] = level;
+
+ for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
+ hdev_obj != NULL;
+ hdev_obj = (struct dev_object *)drv_get_next_dev_object
+ ((u32) hdev_obj)) {
+ if (!(dev_get_bridge_context(hdev_obj,
+ (struct bridge_dev_context
+ **)&dw_context))) {
+ if (!(dev_get_intf_fxns(hdev_obj,
+ (struct bridge_drv_interface **)&intf_fxns))) {
+ status =
+ (*intf_fxns->pfn_dev_cntrl) (dw_context,
+ BRDIOCTL_POSTSCALE_NOTIFY,
+ (void *)&arg);
+ }
+ }
+ }
+ return status;
+
+}
diff --git a/drivers/staging/tidspbridge/rmgr/rmm.c b/drivers/staging/tidspbridge/rmgr/rmm.c
new file mode 100644
index 00000000000..761e8f4fa46
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/rmm.c
@@ -0,0 +1,537 @@
+/*
+ * rmm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * This memory manager provides general heap management and arbitrary
+ * alignment for any number of memory segments.
+ *
+ * Notes:
+ *
+ * Memory blocks are allocated from the end of the first free memory
+ * block large enough to satisfy the request. Alignment requirements
+ * are satisfied by "sliding" the block forward until its base satisfies
+ * the alignment specification; if this is not possible then the next
+ * free block large enough to hold the request is tried.
+ *
+ * Since alignment can cause the creation of a new free block - the
+ * unused memory formed between the start of the original free block
+ * and the start of the allocated block - the memory manager must free
+ * this memory to prevent a memory leak.
+ *
+ * Overlay memory is managed by reserving through rmm_alloc, and freeing
+ * it through rmm_free. The memory manager prevents DSP code/data that is
+ * overlayed from being overwritten as long as the memory it runs at has
+ * been allocated, and not yet freed.
+ */
+
+#include <linux/types.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/list.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/rmm.h>
+
+/*
+ * ======== rmm_header ========
+ * This header is used to maintain a list of free memory blocks.
+ */
+struct rmm_header {
+ struct rmm_header *next; /* form a free memory link list */
+ u32 size; /* size of the free memory */
+ u32 addr; /* DSP address of memory block */
+};
+
+/*
+ * ======== rmm_ovly_sect ========
+ * Keeps track of memory occupied by overlay section.
+ */
+struct rmm_ovly_sect {
+ struct list_head list_elem;
+ u32 addr; /* Start of memory section */
+ u32 size; /* Length (target MAUs) of section */
+ s32 page; /* Memory page */
+};
+
+/*
+ * ======== rmm_target_obj ========
+ */
+struct rmm_target_obj {
+ struct rmm_segment *seg_tab;
+ struct rmm_header **free_list;
+ u32 num_segs;
+ struct lst_list *ovly_list; /* List of overlay memory in use */
+};
+
+static u32 refs; /* module reference count */
+
+static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
+ u32 align, u32 *dsp_address);
+static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
+ u32 size);
+
+/*
+ * ======== rmm_alloc ========
+ */
+int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
+ u32 align, u32 *dsp_address, bool reserve)
+{
+ struct rmm_ovly_sect *sect;
+ struct rmm_ovly_sect *prev_sect = NULL;
+ struct rmm_ovly_sect *new_sect;
+ u32 addr;
+ int status = 0;
+
+ DBC_REQUIRE(target);
+ DBC_REQUIRE(dsp_address != NULL);
+ DBC_REQUIRE(size > 0);
+ DBC_REQUIRE(reserve || (target->num_segs > 0));
+ DBC_REQUIRE(refs > 0);
+
+ if (!reserve) {
+ if (!alloc_block(target, segid, size, align, dsp_address)) {
+ status = -ENOMEM;
+ } else {
+ /* Increment the number of allocated blocks in this
+ * segment */
+ target->seg_tab[segid].number++;
+ }
+ goto func_end;
+ }
+ /* An overlay section - See if block is already in use. If not,
+ * insert into the list in ascending address size. */
+ addr = *dsp_address;
+ sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
+ /* Find place to insert new list element. List is sorted from
+ * smallest to largest address. */
+ while (sect != NULL) {
+ if (addr <= sect->addr) {
+ /* Check for overlap with sect */
+ if ((addr + size > sect->addr) || (prev_sect &&
+ (prev_sect->addr +
+ prev_sect->size >
+ addr))) {
+ status = -ENXIO;
+ }
+ break;
+ }
+ prev_sect = sect;
+ sect = (struct rmm_ovly_sect *)lst_next(target->ovly_list,
+ (struct list_head *)
+ sect);
+ }
+ if (!status) {
+ /* No overlap - allocate list element for new section. */
+ new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
+ if (new_sect == NULL) {
+ status = -ENOMEM;
+ } else {
+ lst_init_elem((struct list_head *)new_sect);
+ new_sect->addr = addr;
+ new_sect->size = size;
+ new_sect->page = segid;
+ if (sect == NULL) {
+ /* Put new section at the end of the list */
+ lst_put_tail(target->ovly_list,
+ (struct list_head *)new_sect);
+ } else {
+ /* Put new section just before sect */
+ lst_insert_before(target->ovly_list,
+ (struct list_head *)new_sect,
+ (struct list_head *)sect);
+ }
+ }
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== rmm_create ========
+ */
+int rmm_create(struct rmm_target_obj **target_obj,
+ struct rmm_segment seg_tab[], u32 num_segs)
+{
+ struct rmm_header *hptr;
+ struct rmm_segment *sptr, *tmp;
+ struct rmm_target_obj *target;
+ s32 i;
+ int status = 0;
+
+ DBC_REQUIRE(target_obj != NULL);
+ DBC_REQUIRE(num_segs == 0 || seg_tab != NULL);
+
+ /* Allocate DBL target object */
+ target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
+
+ if (target == NULL)
+ status = -ENOMEM;
+
+ if (status)
+ goto func_cont;
+
+ target->num_segs = num_segs;
+ if (!(num_segs > 0))
+ goto func_cont;
+
+ /* Allocate the memory for freelist from host's memory */
+ target->free_list = kzalloc(num_segs * sizeof(struct rmm_header *),
+ GFP_KERNEL);
+ if (target->free_list == NULL) {
+ status = -ENOMEM;
+ } else {
+ /* Allocate headers for each element on the free list */
+ for (i = 0; i < (s32) num_segs; i++) {
+ target->free_list[i] =
+ kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
+ if (target->free_list[i] == NULL) {
+ status = -ENOMEM;
+ break;
+ }
+ }
+ /* Allocate memory for initial segment table */
+ target->seg_tab = kzalloc(num_segs * sizeof(struct rmm_segment),
+ GFP_KERNEL);
+ if (target->seg_tab == NULL) {
+ status = -ENOMEM;
+ } else {
+ /* Initialize segment table and free list */
+ sptr = target->seg_tab;
+ for (i = 0, tmp = seg_tab; num_segs > 0;
+ num_segs--, i++) {
+ *sptr = *tmp;
+ hptr = target->free_list[i];
+ hptr->addr = tmp->base;
+ hptr->size = tmp->length;
+ hptr->next = NULL;
+ tmp++;
+ sptr++;
+ }
+ }
+ }
+func_cont:
+ /* Initialize overlay memory list */
+ if (!status) {
+ target->ovly_list = kzalloc(sizeof(struct lst_list),
+ GFP_KERNEL);
+ if (target->ovly_list == NULL)
+ status = -ENOMEM;
+ else
+ INIT_LIST_HEAD(&target->ovly_list->head);
+ }
+
+ if (!status) {
+ *target_obj = target;
+ } else {
+ *target_obj = NULL;
+ if (target)
+ rmm_delete(target);
+
+ }
+
+ DBC_ENSURE((!status && *target_obj)
+ || (status && *target_obj == NULL));
+
+ return status;
+}
+
+/*
+ * ======== rmm_delete ========
+ */
+void rmm_delete(struct rmm_target_obj *target)
+{
+ struct rmm_ovly_sect *ovly_section;
+ struct rmm_header *hptr;
+ struct rmm_header *next;
+ u32 i;
+
+ DBC_REQUIRE(target);
+
+ kfree(target->seg_tab);
+
+ if (target->ovly_list) {
+ while ((ovly_section = (struct rmm_ovly_sect *)lst_get_head
+ (target->ovly_list))) {
+ kfree(ovly_section);
+ }
+ DBC_ASSERT(LST_IS_EMPTY(target->ovly_list));
+ kfree(target->ovly_list);
+ }
+
+ if (target->free_list != NULL) {
+ /* Free elements on freelist */
+ for (i = 0; i < target->num_segs; i++) {
+ hptr = next = target->free_list[i];
+ while (next) {
+ hptr = next;
+ next = hptr->next;
+ kfree(hptr);
+ }
+ }
+ kfree(target->free_list);
+ }
+
+ kfree(target);
+}
+
+/*
+ * ======== rmm_exit ========
+ */
+void rmm_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== rmm_free ========
+ */
+bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
+ bool reserved)
+{
+ struct rmm_ovly_sect *sect;
+ bool ret = true;
+
+ DBC_REQUIRE(target);
+
+ DBC_REQUIRE(reserved || segid < target->num_segs);
+ DBC_REQUIRE(reserved || (dsp_addr >= target->seg_tab[segid].base &&
+ (dsp_addr + size) <= (target->seg_tab[segid].
+ base +
+ target->seg_tab[segid].
+ length)));
+
+ /*
+ * Free or unreserve memory.
+ */
+ if (!reserved) {
+ ret = free_block(target, segid, dsp_addr, size);
+ if (ret)
+ target->seg_tab[segid].number--;
+
+ } else {
+ /* Unreserve memory */
+ sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
+ while (sect != NULL) {
+ if (dsp_addr == sect->addr) {
+ DBC_ASSERT(size == sect->size);
+ /* Remove from list */
+ lst_remove_elem(target->ovly_list,
+ (struct list_head *)sect);
+ kfree(sect);
+ break;
+ }
+ sect =
+ (struct rmm_ovly_sect *)lst_next(target->ovly_list,
+ (struct list_head
+ *)sect);
+ }
+ if (sect == NULL)
+ ret = false;
+
+ }
+ return ret;
+}
+
+/*
+ * ======== rmm_init ========
+ */
+bool rmm_init(void)
+{
+ DBC_REQUIRE(refs >= 0);
+
+ refs++;
+
+ return true;
+}
+
+/*
+ * ======== rmm_stat ========
+ */
+bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
+ struct dsp_memstat *mem_stat_buf)
+{
+ struct rmm_header *head;
+ bool ret = false;
+ u32 max_free_size = 0;
+ u32 total_free_size = 0;
+ u32 free_blocks = 0;
+
+ DBC_REQUIRE(mem_stat_buf != NULL);
+ DBC_ASSERT(target != NULL);
+
+ if ((u32) segid < target->num_segs) {
+ head = target->free_list[segid];
+
+ /* Collect data from free_list */
+ while (head != NULL) {
+ max_free_size = max(max_free_size, head->size);
+ total_free_size += head->size;
+ free_blocks++;
+ head = head->next;
+ }
+
+ /* ul_size */
+ mem_stat_buf->ul_size = target->seg_tab[segid].length;
+
+ /* ul_num_free_blocks */
+ mem_stat_buf->ul_num_free_blocks = free_blocks;
+
+ /* ul_total_free_size */
+ mem_stat_buf->ul_total_free_size = total_free_size;
+
+ /* ul_len_max_free_block */
+ mem_stat_buf->ul_len_max_free_block = max_free_size;
+
+ /* ul_num_alloc_blocks */
+ mem_stat_buf->ul_num_alloc_blocks =
+ target->seg_tab[segid].number;
+
+ ret = true;
+ }
+
+ return ret;
+}
+
+/*
+ * ======== balloc ========
+ * This allocation function allocates memory from the lowest addresses
+ * first.
+ */
+static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
+ u32 align, u32 *dsp_address)
+{
+ struct rmm_header *head;
+ struct rmm_header *prevhead = NULL;
+ struct rmm_header *next;
+ u32 tmpalign;
+ u32 alignbytes;
+ u32 hsize;
+ u32 allocsize;
+ u32 addr;
+
+ alignbytes = (align == 0) ? 1 : align;
+ prevhead = NULL;
+ head = target->free_list[segid];
+
+ do {
+ hsize = head->size;
+ next = head->next;
+
+ addr = head->addr; /* alloc from the bottom */
+
+ /* align allocation */
+ (tmpalign = (u32) addr % alignbytes);
+ if (tmpalign != 0)
+ tmpalign = alignbytes - tmpalign;
+
+ allocsize = size + tmpalign;
+
+ if (hsize >= allocsize) { /* big enough */
+ if (hsize == allocsize && prevhead != NULL) {
+ prevhead->next = next;
+ kfree(head);
+ } else {
+ head->size = hsize - allocsize;
+ head->addr += allocsize;
+ }
+
+ /* free up any hole created by alignment */
+ if (tmpalign)
+ free_block(target, segid, addr, tmpalign);
+
+ *dsp_address = addr + tmpalign;
+ return true;
+ }
+
+ prevhead = head;
+ head = next;
+
+ } while (head != NULL);
+
+ return false;
+}
+
+/*
+ * ======== free_block ========
+ * TO DO: free_block() allocates memory, which could result in failure.
+ * Could allocate an rmm_header in rmm_alloc(), to be kept in a pool.
+ * free_block() could use an rmm_header from the pool, freeing as blocks
+ * are coalesced.
+ */
+static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
+ u32 size)
+{
+ struct rmm_header *head;
+ struct rmm_header *thead;
+ struct rmm_header *rhead;
+ bool ret = true;
+
+ /* Create a memory header to hold the newly free'd block. */
+ rhead = kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
+ if (rhead == NULL) {
+ ret = false;
+ } else {
+ /* search down the free list to find the right place for addr */
+ head = target->free_list[segid];
+
+ if (addr >= head->addr) {
+ while (head->next != NULL && addr > head->next->addr)
+ head = head->next;
+
+ thead = head->next;
+
+ head->next = rhead;
+ rhead->next = thead;
+ rhead->addr = addr;
+ rhead->size = size;
+ } else {
+ *rhead = *head;
+ head->next = rhead;
+ head->addr = addr;
+ head->size = size;
+ thead = rhead->next;
+ }
+
+ /* join with upper block, if possible */
+ if (thead != NULL && (rhead->addr + rhead->size) ==
+ thead->addr) {
+ head->next = rhead->next;
+ thead->size = size + thead->size;
+ thead->addr = addr;
+ kfree(rhead);
+ rhead = thead;
+ }
+
+ /* join with the lower block, if possible */
+ if ((head->addr + head->size) == rhead->addr) {
+ head->next = rhead->next;
+ head->size = head->size + rhead->size;
+ kfree(rhead);
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/staging/tidspbridge/rmgr/strm.c b/drivers/staging/tidspbridge/rmgr/strm.c
new file mode 100644
index 00000000000..ef2ec9497b1
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/strm.c
@@ -0,0 +1,853 @@
+/*
+ * strm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge Stream Manager.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/sync.h>
+
+/* ----------------------------------- Bridge Driver */
+#include <dspbridge/dspdefs.h>
+
+/* ----------------------------------- Resource Manager */
+#include <dspbridge/nodepriv.h>
+
+/* ----------------------------------- Others */
+#include <dspbridge/cmm.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/strm.h>
+
+#include <dspbridge/cfg.h>
+#include <dspbridge/resourcecleanup.h>
+
+/* ----------------------------------- Defines, Data Structures, Typedefs */
+#define DEFAULTTIMEOUT 10000
+#define DEFAULTNUMBUFS 2
+
+/*
+ * ======== strm_mgr ========
+ * The strm_mgr contains device information needed to open the underlying
+ * channels of a stream.
+ */
+struct strm_mgr {
+ struct dev_object *dev_obj; /* Device for this processor */
+ struct chnl_mgr *hchnl_mgr; /* Channel manager */
+ /* Function interface to Bridge driver */
+ struct bridge_drv_interface *intf_fxns;
+};
+
+/*
+ * ======== strm_object ========
+ * This object is allocated in strm_open().
+ */
+struct strm_object {
+ struct strm_mgr *strm_mgr_obj;
+ struct chnl_object *chnl_obj;
+ u32 dir; /* DSP_TONODE or DSP_FROMNODE */
+ u32 utimeout;
+ u32 num_bufs; /* Max # of bufs allowed in stream */
+ u32 un_bufs_in_strm; /* Current # of bufs in stream */
+ u32 ul_n_bytes; /* bytes transferred since idled */
+ /* STREAM_IDLE, STREAM_READY, ... */
+ enum dsp_streamstate strm_state;
+ void *user_event; /* Saved for strm_get_info() */
+ enum dsp_strmmode strm_mode; /* STRMMODE_[PROCCOPY][ZEROCOPY]... */
+ u32 udma_chnl_id; /* DMA chnl id */
+ u32 udma_priority; /* DMA priority:DMAPRI_[LOW][HIGH] */
+ u32 segment_id; /* >0 is SM segment.=0 is local heap */
+ u32 buf_alignment; /* Alignment for stream bufs */
+ /* Stream's SM address translator */
+ struct cmm_xlatorobject *xlator;
+};
+
+/* ----------------------------------- Globals */
+static u32 refs; /* module reference count */
+
+/* ----------------------------------- Function Prototypes */
+static int delete_strm(struct strm_object *stream_obj);
+
+/*
+ * ======== strm_allocate_buffer ========
+ * Purpose:
+ * Allocates buffers for a stream.
+ */
+int strm_allocate_buffer(struct strm_res_object *strmres, u32 usize,
+ u8 **ap_buffer, u32 num_bufs,
+ struct process_context *pr_ctxt)
+{
+ int status = 0;
+ u32 alloc_cnt = 0;
+ u32 i;
+ struct strm_object *stream_obj = strmres->hstream;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(ap_buffer != NULL);
+
+ if (stream_obj) {
+ /*
+ * Allocate from segment specified at time of stream open.
+ */
+ if (usize == 0)
+ status = -EINVAL;
+
+ } else {
+ status = -EFAULT;
+ }
+
+ if (status)
+ goto func_end;
+
+ for (i = 0; i < num_bufs; i++) {
+ DBC_ASSERT(stream_obj->xlator != NULL);
+ (void)cmm_xlator_alloc_buf(stream_obj->xlator, &ap_buffer[i],
+ usize);
+ if (ap_buffer[i] == NULL) {
+ status = -ENOMEM;
+ alloc_cnt = i;
+ break;
+ }
+ }
+ if (status)
+ strm_free_buffer(strmres, ap_buffer, alloc_cnt, pr_ctxt);
+
+ if (status)
+ goto func_end;
+
+ drv_proc_update_strm_res(num_bufs, strmres);
+
+func_end:
+ return status;
+}
+
+/*
+ * ======== strm_close ========
+ * Purpose:
+ * Close a stream opened with strm_open().
+ */
+int strm_close(struct strm_res_object *strmres,
+ struct process_context *pr_ctxt)
+{
+ struct bridge_drv_interface *intf_fxns;
+ struct chnl_info chnl_info_obj;
+ int status = 0;
+ struct strm_object *stream_obj = strmres->hstream;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (!stream_obj) {
+ status = -EFAULT;
+ } else {
+ /* Have all buffers been reclaimed? If not, return
+ * -EPIPE */
+ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
+ status =
+ (*intf_fxns->pfn_chnl_get_info) (stream_obj->chnl_obj,
+ &chnl_info_obj);
+ DBC_ASSERT(!status);
+
+ if (chnl_info_obj.cio_cs > 0 || chnl_info_obj.cio_reqs > 0)
+ status = -EPIPE;
+ else
+ status = delete_strm(stream_obj);
+ }
+
+ if (status)
+ goto func_end;
+
+ idr_remove(pr_ctxt->stream_id, strmres->id);
+func_end:
+ DBC_ENSURE(status == 0 || status == -EFAULT ||
+ status == -EPIPE || status == -EPERM);
+
+ dev_dbg(bridge, "%s: stream_obj: %p, status 0x%x\n", __func__,
+ stream_obj, status);
+ return status;
+}
+
+/*
+ * ======== strm_create ========
+ * Purpose:
+ * Create a STRM manager object.
+ */
+int strm_create(struct strm_mgr **strm_man,
+ struct dev_object *dev_obj)
+{
+ struct strm_mgr *strm_mgr_obj;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(strm_man != NULL);
+ DBC_REQUIRE(dev_obj != NULL);
+
+ *strm_man = NULL;
+ /* Allocate STRM manager object */
+ strm_mgr_obj = kzalloc(sizeof(struct strm_mgr), GFP_KERNEL);
+ if (strm_mgr_obj == NULL)
+ status = -ENOMEM;
+ else
+ strm_mgr_obj->dev_obj = dev_obj;
+
+ /* Get Channel manager and Bridge function interface */
+ if (!status) {
+ status = dev_get_chnl_mgr(dev_obj, &(strm_mgr_obj->hchnl_mgr));
+ if (!status) {
+ (void)dev_get_intf_fxns(dev_obj,
+ &(strm_mgr_obj->intf_fxns));
+ DBC_ASSERT(strm_mgr_obj->intf_fxns != NULL);
+ }
+ }
+
+ if (!status)
+ *strm_man = strm_mgr_obj;
+ else
+ kfree(strm_mgr_obj);
+
+ DBC_ENSURE((!status && *strm_man) || (status && *strm_man == NULL));
+
+ return status;
+}
+
+/*
+ * ======== strm_delete ========
+ * Purpose:
+ * Delete the STRM Manager Object.
+ */
+void strm_delete(struct strm_mgr *strm_mgr_obj)
+{
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(strm_mgr_obj);
+
+ kfree(strm_mgr_obj);
+}
+
+/*
+ * ======== strm_exit ========
+ * Purpose:
+ * Discontinue usage of STRM module.
+ */
+void strm_exit(void)
+{
+ DBC_REQUIRE(refs > 0);
+
+ refs--;
+
+ DBC_ENSURE(refs >= 0);
+}
+
+/*
+ * ======== strm_free_buffer ========
+ * Purpose:
+ * Frees the buffers allocated for a stream.
+ */
+int strm_free_buffer(struct strm_res_object *strmres, u8 ** ap_buffer,
+ u32 num_bufs, struct process_context *pr_ctxt)
+{
+ int status = 0;
+ u32 i = 0;
+ struct strm_object *stream_obj = strmres->hstream;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(ap_buffer != NULL);
+
+ if (!stream_obj)
+ status = -EFAULT;
+
+ if (!status) {
+ for (i = 0; i < num_bufs; i++) {
+ DBC_ASSERT(stream_obj->xlator != NULL);
+ status =
+ cmm_xlator_free_buf(stream_obj->xlator,
+ ap_buffer[i]);
+ if (status)
+ break;
+ ap_buffer[i] = NULL;
+ }
+ }
+ drv_proc_update_strm_res(num_bufs - i, strmres);
+
+ return status;
+}
+
+/*
+ * ======== strm_get_info ========
+ * Purpose:
+ * Retrieves information about a stream.
+ */
+int strm_get_info(struct strm_object *stream_obj,
+ struct stream_info *stream_info,
+ u32 stream_info_size)
+{
+ struct bridge_drv_interface *intf_fxns;
+ struct chnl_info chnl_info_obj;
+ int status = 0;
+ void *virt_base = NULL; /* NULL if no SM used */
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(stream_info != NULL);
+ DBC_REQUIRE(stream_info_size >= sizeof(struct stream_info));
+
+ if (!stream_obj) {
+ status = -EFAULT;
+ } else {
+ if (stream_info_size < sizeof(struct stream_info)) {
+ /* size of users info */
+ status = -EINVAL;
+ }
+ }
+ if (status)
+ goto func_end;
+
+ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
+ status =
+ (*intf_fxns->pfn_chnl_get_info) (stream_obj->chnl_obj,
+ &chnl_info_obj);
+ if (status)
+ goto func_end;
+
+ if (stream_obj->xlator) {
+ /* We have a translator */
+ DBC_ASSERT(stream_obj->segment_id > 0);
+ cmm_xlator_info(stream_obj->xlator, (u8 **) &virt_base, 0,
+ stream_obj->segment_id, false);
+ }
+ stream_info->segment_id = stream_obj->segment_id;
+ stream_info->strm_mode = stream_obj->strm_mode;
+ stream_info->virt_base = virt_base;
+ stream_info->user_strm->number_bufs_allowed = stream_obj->num_bufs;
+ stream_info->user_strm->number_bufs_in_stream = chnl_info_obj.cio_cs +
+ chnl_info_obj.cio_reqs;
+ /* # of bytes transferred since last call to DSPStream_Idle() */
+ stream_info->user_strm->ul_number_bytes = chnl_info_obj.bytes_tx;
+ stream_info->user_strm->sync_object_handle = chnl_info_obj.event_obj;
+ /* Determine stream state based on channel state and info */
+ if (chnl_info_obj.dw_state & CHNL_STATEEOS) {
+ stream_info->user_strm->ss_stream_state = STREAM_DONE;
+ } else {
+ if (chnl_info_obj.cio_cs > 0)
+ stream_info->user_strm->ss_stream_state = STREAM_READY;
+ else if (chnl_info_obj.cio_reqs > 0)
+ stream_info->user_strm->ss_stream_state =
+ STREAM_PENDING;
+ else
+ stream_info->user_strm->ss_stream_state = STREAM_IDLE;
+
+ }
+func_end:
+ return status;
+}
+
+/*
+ * ======== strm_idle ========
+ * Purpose:
+ * Idles a particular stream.
+ */
+int strm_idle(struct strm_object *stream_obj, bool flush_data)
+{
+ struct bridge_drv_interface *intf_fxns;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+
+ if (!stream_obj) {
+ status = -EFAULT;
+ } else {
+ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
+
+ status = (*intf_fxns->pfn_chnl_idle) (stream_obj->chnl_obj,
+ stream_obj->utimeout,
+ flush_data);
+ }
+
+ dev_dbg(bridge, "%s: stream_obj: %p flush_data: 0x%x status: 0x%x\n",
+ __func__, stream_obj, flush_data, status);
+ return status;
+}
+
+/*
+ * ======== strm_init ========
+ * Purpose:
+ * Initialize the STRM module.
+ */
+bool strm_init(void)
+{
+ bool ret = true;
+
+ DBC_REQUIRE(refs >= 0);
+
+ if (ret)
+ refs++;
+
+ DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+ return ret;
+}
+
+/*
+ * ======== strm_issue ========
+ * Purpose:
+ * Issues a buffer on a stream
+ */
+int strm_issue(struct strm_object *stream_obj, u8 *pbuf, u32 ul_bytes,
+ u32 ul_buf_size, u32 dw_arg)
+{
+ struct bridge_drv_interface *intf_fxns;
+ int status = 0;
+ void *tmp_buf = NULL;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(pbuf != NULL);
+
+ if (!stream_obj) {
+ status = -EFAULT;
+ } else {
+ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
+
+ if (stream_obj->segment_id != 0) {
+ tmp_buf = cmm_xlator_translate(stream_obj->xlator,
+ (void *)pbuf,
+ CMM_VA2DSPPA);
+ if (tmp_buf == NULL)
+ status = -ESRCH;
+
+ }
+ if (!status) {
+ status = (*intf_fxns->pfn_chnl_add_io_req)
+ (stream_obj->chnl_obj, pbuf, ul_bytes, ul_buf_size,
+ (u32) tmp_buf, dw_arg);
+ }
+ if (status == -EIO)
+ status = -ENOSR;
+ }
+
+ dev_dbg(bridge, "%s: stream_obj: %p pbuf: %p ul_bytes: 0x%x dw_arg:"
+ " 0x%x status: 0x%x\n", __func__, stream_obj, pbuf,
+ ul_bytes, dw_arg, status);
+ return status;
+}
+
+/*
+ * ======== strm_open ========
+ * Purpose:
+ * Open a stream for sending/receiving data buffers to/from a task or
+ * XDAIS socket node on the DSP.
+ */
+int strm_open(struct node_object *hnode, u32 dir, u32 index,
+ struct strm_attr *pattr,
+ struct strm_res_object **strmres,
+ struct process_context *pr_ctxt)
+{
+ struct strm_mgr *strm_mgr_obj;
+ struct bridge_drv_interface *intf_fxns;
+ u32 ul_chnl_id;
+ struct strm_object *strm_obj = NULL;
+ s8 chnl_mode;
+ struct chnl_attr chnl_attr_obj;
+ int status = 0;
+ struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
+
+ void *stream_res;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(strmres != NULL);
+ DBC_REQUIRE(pattr != NULL);
+ *strmres = NULL;
+ if (dir != DSP_TONODE && dir != DSP_FROMNODE) {
+ status = -EPERM;
+ } else {
+ /* Get the channel id from the node (set in node_connect()) */
+ status = node_get_channel_id(hnode, dir, index, &ul_chnl_id);
+ }
+ if (!status)
+ status = node_get_strm_mgr(hnode, &strm_mgr_obj);
+
+ if (!status) {
+ strm_obj = kzalloc(sizeof(struct strm_object), GFP_KERNEL);
+ if (strm_obj == NULL) {
+ status = -ENOMEM;
+ } else {
+ strm_obj->strm_mgr_obj = strm_mgr_obj;
+ strm_obj->dir = dir;
+ strm_obj->strm_state = STREAM_IDLE;
+ strm_obj->user_event = pattr->user_event;
+ if (pattr->stream_attr_in != NULL) {
+ strm_obj->utimeout =
+ pattr->stream_attr_in->utimeout;
+ strm_obj->num_bufs =
+ pattr->stream_attr_in->num_bufs;
+ strm_obj->strm_mode =
+ pattr->stream_attr_in->strm_mode;
+ strm_obj->segment_id =
+ pattr->stream_attr_in->segment_id;
+ strm_obj->buf_alignment =
+ pattr->stream_attr_in->buf_alignment;
+ strm_obj->udma_chnl_id =
+ pattr->stream_attr_in->udma_chnl_id;
+ strm_obj->udma_priority =
+ pattr->stream_attr_in->udma_priority;
+ chnl_attr_obj.uio_reqs =
+ pattr->stream_attr_in->num_bufs;
+ } else {
+ strm_obj->utimeout = DEFAULTTIMEOUT;
+ strm_obj->num_bufs = DEFAULTNUMBUFS;
+ strm_obj->strm_mode = STRMMODE_PROCCOPY;
+ strm_obj->segment_id = 0; /* local mem */
+ strm_obj->buf_alignment = 0;
+ strm_obj->udma_chnl_id = 0;
+ strm_obj->udma_priority = 0;
+ chnl_attr_obj.uio_reqs = DEFAULTNUMBUFS;
+ }
+ chnl_attr_obj.reserved1 = NULL;
+ /* DMA chnl flush timeout */
+ chnl_attr_obj.reserved2 = strm_obj->utimeout;
+ chnl_attr_obj.event_obj = NULL;
+ if (pattr->user_event != NULL)
+ chnl_attr_obj.event_obj = pattr->user_event;
+
+ }
+ }
+ if (status)
+ goto func_cont;
+
+ if ((pattr->virt_base == NULL) || !(pattr->ul_virt_size > 0))
+ goto func_cont;
+
+ /* No System DMA */
+ DBC_ASSERT(strm_obj->strm_mode != STRMMODE_LDMA);
+ /* Get the shared mem mgr for this streams dev object */
+ status = dev_get_cmm_mgr(strm_mgr_obj->dev_obj, &hcmm_mgr);
+ if (!status) {
+ /*Allocate a SM addr translator for this strm. */
+ status = cmm_xlator_create(&strm_obj->xlator, hcmm_mgr, NULL);
+ if (!status) {
+ DBC_ASSERT(strm_obj->segment_id > 0);
+ /* Set translators Virt Addr attributes */
+ status = cmm_xlator_info(strm_obj->xlator,
+ (u8 **) &pattr->virt_base,
+ pattr->ul_virt_size,
+ strm_obj->segment_id, true);
+ }
+ }
+func_cont:
+ if (!status) {
+ /* Open channel */
+ chnl_mode = (dir == DSP_TONODE) ?
+ CHNL_MODETODSP : CHNL_MODEFROMDSP;
+ intf_fxns = strm_mgr_obj->intf_fxns;
+ status = (*intf_fxns->pfn_chnl_open) (&(strm_obj->chnl_obj),
+ strm_mgr_obj->hchnl_mgr,
+ chnl_mode, ul_chnl_id,
+ &chnl_attr_obj);
+ if (status) {
+ /*
+ * over-ride non-returnable status codes so we return
+ * something documented
+ */
+ if (status != -ENOMEM && status !=
+ -EINVAL && status != -EPERM) {
+ /*
+ * We got a status that's not return-able.
+ * Assert that we got something we were
+ * expecting (-EFAULT isn't acceptable,
+ * strm_mgr_obj->hchnl_mgr better be valid or we
+ * assert here), and then return -EPERM.
+ */
+ DBC_ASSERT(status == -ENOSR ||
+ status == -ECHRNG ||
+ status == -EALREADY ||
+ status == -EIO);
+ status = -EPERM;
+ }
+ }
+ }
+ if (!status) {
+ status = drv_proc_insert_strm_res_element(strm_obj,
+ &stream_res, pr_ctxt);
+ if (status)
+ delete_strm(strm_obj);
+ else
+ *strmres = (struct strm_res_object *)stream_res;
+ } else {
+ (void)delete_strm(strm_obj);
+ }
+
+ /* ensure we return a documented error code */
+ DBC_ENSURE((!status && strm_obj) ||
+ (*strmres == NULL && (status == -EFAULT ||
+ status == -EPERM
+ || status == -EINVAL)));
+
+ dev_dbg(bridge, "%s: hnode: %p dir: 0x%x index: 0x%x pattr: %p "
+ "strmres: %p status: 0x%x\n", __func__,
+ hnode, dir, index, pattr, strmres, status);
+ return status;
+}
+
+/*
+ * ======== strm_reclaim ========
+ * Purpose:
+ * Relcaims a buffer from a stream.
+ */
+int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr,
+ u32 *nbytes, u32 *buff_size, u32 *pdw_arg)
+{
+ struct bridge_drv_interface *intf_fxns;
+ struct chnl_ioc chnl_ioc_obj;
+ int status = 0;
+ void *tmp_buf = NULL;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(buf_ptr != NULL);
+ DBC_REQUIRE(nbytes != NULL);
+ DBC_REQUIRE(pdw_arg != NULL);
+
+ if (!stream_obj) {
+ status = -EFAULT;
+ goto func_end;
+ }
+ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
+
+ status =
+ (*intf_fxns->pfn_chnl_get_ioc) (stream_obj->chnl_obj,
+ stream_obj->utimeout,
+ &chnl_ioc_obj);
+ if (!status) {
+ *nbytes = chnl_ioc_obj.byte_size;
+ if (buff_size)
+ *buff_size = chnl_ioc_obj.buf_size;
+
+ *pdw_arg = chnl_ioc_obj.dw_arg;
+ if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
+ if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) {
+ status = -ETIME;
+ } else {
+ /* Allow reclaims after idle to succeed */
+ if (!CHNL_IS_IO_CANCELLED(chnl_ioc_obj))
+ status = -EPERM;
+
+ }
+ }
+ /* Translate zerocopy buffer if channel not canceled. */
+ if (!status
+ && (!CHNL_IS_IO_CANCELLED(chnl_ioc_obj))
+ && (stream_obj->strm_mode == STRMMODE_ZEROCOPY)) {
+ /*
+ * This is a zero-copy channel so chnl_ioc_obj.pbuf
+ * contains the DSP address of SM. We need to
+ * translate it to a virtual address for the user
+ * thread to access.
+ * Note: Could add CMM_DSPPA2VA to CMM in the future.
+ */
+ tmp_buf = cmm_xlator_translate(stream_obj->xlator,
+ chnl_ioc_obj.pbuf,
+ CMM_DSPPA2PA);
+ if (tmp_buf != NULL) {
+ /* now convert this GPP Pa to Va */
+ tmp_buf = cmm_xlator_translate(stream_obj->
+ xlator,
+ tmp_buf,
+ CMM_PA2VA);
+ }
+ if (tmp_buf == NULL)
+ status = -ESRCH;
+
+ chnl_ioc_obj.pbuf = tmp_buf;
+ }
+ *buf_ptr = chnl_ioc_obj.pbuf;
+ }
+func_end:
+ /* ensure we return a documented return code */
+ DBC_ENSURE(!status || status == -EFAULT ||
+ status == -ETIME || status == -ESRCH ||
+ status == -EPERM);
+
+ dev_dbg(bridge, "%s: stream_obj: %p buf_ptr: %p nbytes: %p "
+ "pdw_arg: %p status 0x%x\n", __func__, stream_obj,
+ buf_ptr, nbytes, pdw_arg, status);
+ return status;
+}
+
+/*
+ * ======== strm_register_notify ========
+ * Purpose:
+ * Register to be notified on specific events for this stream.
+ */
+int strm_register_notify(struct strm_object *stream_obj, u32 event_mask,
+ u32 notify_type, struct dsp_notification
+ * hnotification)
+{
+ struct bridge_drv_interface *intf_fxns;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(hnotification != NULL);
+
+ if (!stream_obj) {
+ status = -EFAULT;
+ } else if ((event_mask & ~((DSP_STREAMIOCOMPLETION) |
+ DSP_STREAMDONE)) != 0) {
+ status = -EINVAL;
+ } else {
+ if (notify_type != DSP_SIGNALEVENT)
+ status = -ENOSYS;
+
+ }
+ if (!status) {
+ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
+
+ status =
+ (*intf_fxns->pfn_chnl_register_notify) (stream_obj->
+ chnl_obj,
+ event_mask,
+ notify_type,
+ hnotification);
+ }
+ /* ensure we return a documented return code */
+ DBC_ENSURE(!status || status == -EFAULT ||
+ status == -ETIME || status == -ESRCH ||
+ status == -ENOSYS || status == -EPERM);
+ return status;
+}
+
+/*
+ * ======== strm_select ========
+ * Purpose:
+ * Selects a ready stream.
+ */
+int strm_select(struct strm_object **strm_tab, u32 strms,
+ u32 *pmask, u32 utimeout)
+{
+ u32 index;
+ struct chnl_info chnl_info_obj;
+ struct bridge_drv_interface *intf_fxns;
+ struct sync_object **sync_events = NULL;
+ u32 i;
+ int status = 0;
+
+ DBC_REQUIRE(refs > 0);
+ DBC_REQUIRE(strm_tab != NULL);
+ DBC_REQUIRE(pmask != NULL);
+ DBC_REQUIRE(strms > 0);
+
+ *pmask = 0;
+ for (i = 0; i < strms; i++) {
+ if (!strm_tab[i]) {
+ status = -EFAULT;
+ break;
+ }
+ }
+ if (status)
+ goto func_end;
+
+ /* Determine which channels have IO ready */
+ for (i = 0; i < strms; i++) {
+ intf_fxns = strm_tab[i]->strm_mgr_obj->intf_fxns;
+ status = (*intf_fxns->pfn_chnl_get_info) (strm_tab[i]->chnl_obj,
+ &chnl_info_obj);
+ if (status) {
+ break;
+ } else {
+ if (chnl_info_obj.cio_cs > 0)
+ *pmask |= (1 << i);
+
+ }
+ }
+ if (!status && utimeout > 0 && *pmask == 0) {
+ /* Non-zero timeout */
+ sync_events = kmalloc(strms * sizeof(struct sync_object *),
+ GFP_KERNEL);
+
+ if (sync_events == NULL) {
+ status = -ENOMEM;
+ } else {
+ for (i = 0; i < strms; i++) {
+ intf_fxns =
+ strm_tab[i]->strm_mgr_obj->intf_fxns;
+ status = (*intf_fxns->pfn_chnl_get_info)
+ (strm_tab[i]->chnl_obj, &chnl_info_obj);
+ if (status)
+ break;
+ else
+ sync_events[i] =
+ chnl_info_obj.sync_event;
+
+ }
+ }
+ if (!status) {
+ status =
+ sync_wait_on_multiple_events(sync_events, strms,
+ utimeout, &index);
+ if (!status) {
+ /* Since we waited on the event, we have to
+ * reset it */
+ sync_set_event(sync_events[index]);
+ *pmask = 1 << index;
+ }
+ }
+ }
+func_end:
+ kfree(sync_events);
+
+ DBC_ENSURE((!status && (*pmask != 0 || utimeout == 0)) ||
+ (status && *pmask == 0));
+
+ return status;
+}
+
+/*
+ * ======== delete_strm ========
+ * Purpose:
+ * Frees the resources allocated for a stream.
+ */
+static int delete_strm(struct strm_object *stream_obj)
+{
+ struct bridge_drv_interface *intf_fxns;
+ int status = 0;
+
+ if (stream_obj) {
+ if (stream_obj->chnl_obj) {
+ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
+ /* Channel close can fail only if the channel handle
+ * is invalid. */
+ status = (*intf_fxns->pfn_chnl_close)
+ (stream_obj->chnl_obj);
+ /* Free all SM address translator resources */
+ if (!status) {
+ if (stream_obj->xlator) {
+ /* force free */
+ (void)cmm_xlator_delete(stream_obj->
+ xlator,
+ true);
+ }
+ }
+ }
+ kfree(stream_obj);
+ } else {
+ status = -EFAULT;
+ }
+ return status;
+}
diff --git a/drivers/staging/tidspbridge/services/cfg.c b/drivers/staging/tidspbridge/services/cfg.c
new file mode 100644
index 00000000000..a7af74f482d
--- /dev/null
+++ b/drivers/staging/tidspbridge/services/cfg.c
@@ -0,0 +1,253 @@
+/*
+ * cfg.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implementation of platform specific config services.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+
+/* ----------------------------------- This */
+#include <dspbridge/cfg.h>
+#include <dspbridge/drv.h>
+
+struct drv_ext {
+ struct list_head link;
+ char sz_string[MAXREGPATHLENGTH];
+};
+
+/*
+ * ======== cfg_exit ========
+ * Purpose:
+ * Discontinue usage of the CFG module.
+ */
+void cfg_exit(void)
+{
+ /* Do nothing */
+}
+
+/*
+ * ======== cfg_get_auto_start ========
+ * Purpose:
+ * Retreive the autostart mask, if any, for this board.
+ */
+int cfg_get_auto_start(struct cfg_devnode *dev_node_obj,
+ u32 *auto_start)
+{
+ int status = 0;
+ u32 dw_buf_size;
+ struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+ dw_buf_size = sizeof(*auto_start);
+ if (!dev_node_obj)
+ status = -EFAULT;
+ if (!auto_start || !drv_datap)
+ status = -EFAULT;
+ if (!status)
+ *auto_start = (drv_datap->base_img) ? 1 : 0;
+
+ DBC_ENSURE((status == 0 &&
+ (*auto_start == 0 || *auto_start == 1))
+ || status != 0);
+ return status;
+}
+
+/*
+ * ======== cfg_get_dev_object ========
+ * Purpose:
+ * Retrieve the Device Object handle for a given devnode.
+ */
+int cfg_get_dev_object(struct cfg_devnode *dev_node_obj,
+ u32 *value)
+{
+ int status = 0;
+ u32 dw_buf_size;
+ struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+ if (!drv_datap)
+ status = -EPERM;
+
+ if (!dev_node_obj)
+ status = -EFAULT;
+
+ if (!value)
+ status = -EFAULT;
+
+ dw_buf_size = sizeof(value);
+ if (!status) {
+
+ /* check the device string and then store dev object */
+ if (!
+ (strcmp
+ ((char *)((struct drv_ext *)dev_node_obj)->sz_string,
+ "TIOMAP1510")))
+ *value = (u32)drv_datap->dev_object;
+ }
+ if (status)
+ pr_err("%s: Failed, status 0x%x\n", __func__, status);
+ return status;
+}
+
+/*
+ * ======== cfg_get_exec_file ========
+ * Purpose:
+ * Retreive the default executable, if any, for this board.
+ */
+int cfg_get_exec_file(struct cfg_devnode *dev_node_obj, u32 buf_size,
+ char *str_exec_file)
+{
+ int status = 0;
+ struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+ if (!dev_node_obj)
+ status = -EFAULT;
+
+ else if (!str_exec_file || !drv_datap)
+ status = -EFAULT;
+
+ if (strlen(drv_datap->base_img) > buf_size)
+ status = -EINVAL;
+
+ if (!status && drv_datap->base_img)
+ strcpy(str_exec_file, drv_datap->base_img);
+
+ if (status)
+ pr_err("%s: Failed, status 0x%x\n", __func__, status);
+ DBC_ENSURE(((status == 0) &&
+ (strlen(str_exec_file) <= buf_size))
+ || (status != 0));
+ return status;
+}
+
+/*
+ * ======== cfg_get_object ========
+ * Purpose:
+ * Retrieve the Object handle from the Registry
+ */
+int cfg_get_object(u32 *value, u8 dw_type)
+{
+ int status = -EINVAL;
+ struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+ DBC_REQUIRE(value != NULL);
+
+ if (!drv_datap)
+ return -EPERM;
+
+ switch (dw_type) {
+ case (REG_DRV_OBJECT):
+ if (drv_datap->drv_object) {
+ *value = (u32)drv_datap->drv_object;
+ status = 0;
+ } else {
+ status = -ENODATA;
+ }
+ break;
+ case (REG_MGR_OBJECT):
+ if (drv_datap->mgr_object) {
+ *value = (u32)drv_datap->mgr_object;
+ status = 0;
+ } else {
+ status = -ENODATA;
+ }
+ break;
+
+ default:
+ break;
+ }
+ if (status) {
+ *value = 0;
+ pr_err("%s: Failed, status 0x%x\n", __func__, status);
+ }
+ DBC_ENSURE((!status && *value != 0) || (status && *value == 0));
+ return status;
+}
+
+/*
+ * ======== cfg_init ========
+ * Purpose:
+ * Initialize the CFG module's private state.
+ */
+bool cfg_init(void)
+{
+ return true;
+}
+
+/*
+ * ======== cfg_set_dev_object ========
+ * Purpose:
+ * Store the Device Object handle and dev_node pointer for a given devnode.
+ */
+int cfg_set_dev_object(struct cfg_devnode *dev_node_obj, u32 value)
+{
+ int status = 0;
+ struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+ if (!drv_datap) {
+ pr_err("%s: Failed, status 0x%x\n", __func__, status);
+ return -EPERM;
+ }
+
+ if (!dev_node_obj)
+ status = -EFAULT;
+
+ if (!status) {
+ /* Store the Bridge device object in the Registry */
+
+ if (!(strcmp((char *)dev_node_obj, "TIOMAP1510")))
+ drv_datap->dev_object = (void *) value;
+ }
+ if (status)
+ pr_err("%s: Failed, status 0x%x\n", __func__, status);
+
+ return status;
+}
+
+/*
+ * ======== cfg_set_object ========
+ * Purpose:
+ * Store the Driver Object handle
+ */
+int cfg_set_object(u32 value, u8 dw_type)
+{
+ int status = -EINVAL;
+ struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+ if (!drv_datap)
+ return -EPERM;
+
+ switch (dw_type) {
+ case (REG_DRV_OBJECT):
+ drv_datap->drv_object = (void *)value;
+ status = 0;
+ break;
+ case (REG_MGR_OBJECT):
+ drv_datap->mgr_object = (void *)value;
+ status = 0;
+ break;
+ default:
+ break;
+ }
+ if (status)
+ pr_err("%s: Failed, status 0x%x\n", __func__, status);
+ return status;
+}
diff --git a/drivers/staging/tidspbridge/services/ntfy.c b/drivers/staging/tidspbridge/services/ntfy.c
new file mode 100644
index 00000000000..a2ea698be24
--- /dev/null
+++ b/drivers/staging/tidspbridge/services/ntfy.c
@@ -0,0 +1,31 @@
+/*
+ * ntfy.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Manage lists of notification events.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* ----------------------------------- This */
+#include <dspbridge/ntfy.h>
+
+int dsp_notifier_event(struct notifier_block *this, unsigned long event,
+ void *data)
+{
+ struct ntfy_event *ne = container_of(this, struct ntfy_event,
+ noti_block);
+ if (ne->event & event)
+ sync_set_event(&ne->sync_obj);
+ return NOTIFY_OK;
+}
+
diff --git a/drivers/staging/tidspbridge/services/services.c b/drivers/staging/tidspbridge/services/services.c
new file mode 100644
index 00000000000..6a7dd6f3ecb
--- /dev/null
+++ b/drivers/staging/tidspbridge/services/services.c
@@ -0,0 +1,70 @@
+/*
+ * services.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Provide SERVICES loading.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/* ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/ntfy.h>
+#include <dspbridge/sync.h>
+#include <dspbridge/clk.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/services.h>
+
+/*
+ * ======== services_exit ========
+ * Purpose:
+ * Discontinue usage of module; free resources when reference count
+ * reaches 0.
+ */
+void services_exit(void)
+{
+ cfg_exit();
+}
+
+/*
+ * ======== services_init ========
+ * Purpose:
+ * Initializes SERVICES modules.
+ */
+bool services_init(void)
+{
+ bool ret = true;
+ bool fcfg;
+
+ /* Perform required initialization of SERVICES modules. */
+ fcfg = cfg_init();
+
+ ret = fcfg;
+
+ if (!ret) {
+ if (fcfg)
+ cfg_exit();
+ }
+
+ return ret;
+}
diff --git a/drivers/staging/tidspbridge/services/sync.c b/drivers/staging/tidspbridge/services/sync.c
new file mode 100644
index 00000000000..9010b37bf5b
--- /dev/null
+++ b/drivers/staging/tidspbridge/services/sync.c
@@ -0,0 +1,104 @@
+/*
+ * sync.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Synchronization services.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/* ----------------------------------- This */
+#include <dspbridge/sync.h>
+
+DEFINE_SPINLOCK(sync_lock);
+
+/**
+ * sync_set_event() - set or signal and specified event
+ * @event: Event to be set..
+ *
+ * set the @event, if there is an thread waiting for the event
+ * it will be waken up, this function only wakes one thread.
+ */
+
+void sync_set_event(struct sync_object *event)
+{
+ spin_lock_bh(&sync_lock);
+ complete(&event->comp);
+ if (event->multi_comp)
+ complete(event->multi_comp);
+ spin_unlock_bh(&sync_lock);
+}
+
+/**
+ * sync_wait_on_multiple_events() - waits for multiple events to be set.
+ * @events: Array of events to wait for them.
+ * @count: number of elements of the array.
+ * @timeout timeout on waiting for the evetns.
+ * @pu_index index of the event set.
+ *
+ * This functios will wait until any of the array element is set or until
+ * timeout. In case of success the function will return 0 and
+ * @pu_index will store the index of the array element set or in case
+ * of timeout the function will return -ETIME or in case of
+ * interrupting by a signal it will return -EPERM.
+ */
+
+int sync_wait_on_multiple_events(struct sync_object **events,
+ unsigned count, unsigned timeout,
+ unsigned *index)
+{
+ unsigned i;
+ int status = -EPERM;
+ struct completion m_comp;
+
+ init_completion(&m_comp);
+
+ if (SYNC_INFINITE == timeout)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+
+ spin_lock_bh(&sync_lock);
+ for (i = 0; i < count; i++) {
+ if (completion_done(&events[i]->comp)) {
+ INIT_COMPLETION(events[i]->comp);
+ *index = i;
+ spin_unlock_bh(&sync_lock);
+ status = 0;
+ goto func_end;
+ }
+ }
+
+ for (i = 0; i < count; i++)
+ events[i]->multi_comp = &m_comp;
+
+ spin_unlock_bh(&sync_lock);
+
+ if (!wait_for_completion_interruptible_timeout(&m_comp,
+ msecs_to_jiffies(timeout)))
+ status = -ETIME;
+
+ spin_lock_bh(&sync_lock);
+ for (i = 0; i < count; i++) {
+ if (completion_done(&events[i]->comp)) {
+ INIT_COMPLETION(events[i]->comp);
+ *index = i;
+ status = 0;
+ }
+ events[i]->multi_comp = NULL;
+ }
+ spin_unlock_bh(&sync_lock);
+func_end:
+ return status;
+}
+
diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h
index 022d0649ac5..30dbfb6d16f 100644
--- a/drivers/staging/usbip/stub.h
+++ b/drivers/staging/usbip/stub.h
@@ -25,6 +25,11 @@
#include <linux/module.h>
#include <linux/net.h>
+#define STUB_BUSID_OTHER 0
+#define STUB_BUSID_REMOV 1
+#define STUB_BUSID_ADDED 2
+#define STUB_BUSID_ALLOC 3
+
struct stub_device {
struct usb_interface *interface;
struct list_head list;
@@ -72,6 +77,14 @@ struct stub_unlink {
__u32 status;
};
+#define BUSID_SIZE 20
+struct bus_id_priv {
+ char name[BUSID_SIZE];
+ char status;
+ int interf_count;
+ struct stub_device *sdev;
+ char shutdown_busid;
+};
extern struct kmem_cache *stub_priv_cache;
@@ -91,5 +104,7 @@ void stub_rx_loop(struct usbip_task *);
void stub_enqueue_ret_unlink(struct stub_device *, __u32, __u32);
/* stub_main.c */
-int match_busid(const char *busid);
+struct bus_id_priv *get_busid_priv(const char *busid);
+int del_match_busid(char *busid);
+
void stub_device_cleanup_urbs(struct stub_device *sdev);
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 3f95605427a..b6b753a4934 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -393,11 +393,14 @@ static int stub_probe(struct usb_interface *interface,
struct stub_device *sdev = NULL;
const char *udev_busid = dev_name(interface->dev.parent);
int err = 0;
+ struct bus_id_priv *busid_priv;
dev_dbg(&interface->dev, "Enter\n");
/* check we should claim or not by busid_table */
- if (match_busid(udev_busid)) {
+ busid_priv = get_busid_priv(udev_busid);
+ if (!busid_priv || (busid_priv->status == STUB_BUSID_REMOV) ||
+ (busid_priv->status == STUB_BUSID_OTHER)) {
dev_info(&interface->dev,
"this device %s is not in match_busid table. skip!\n",
udev_busid);
@@ -422,28 +425,80 @@ static int stub_probe(struct usb_interface *interface,
return -ENODEV;
}
+
+ if (busid_priv->status == STUB_BUSID_ALLOC) {
+ busid_priv->interf_count++;
+ sdev = busid_priv->sdev;
+ if (!sdev)
+ return -ENODEV;
+
+ dev_info(&interface->dev,
+ "USB/IP Stub: register a new interface "
+ "(bus %u dev %u ifn %u)\n", udev->bus->busnum, udev->devnum,
+ interface->cur_altsetting->desc.bInterfaceNumber);
+
+ /* set private data to usb_interface */
+ usb_set_intfdata(interface, sdev);
+
+ err = stub_add_files(&interface->dev);
+ if (err) {
+ dev_err(&interface->dev, "create sysfs files for %s\n",
+ udev_busid);
+ usb_set_intfdata(interface, NULL);
+ busid_priv->interf_count--;
+
+ return err;
+ }
+
+ return 0;
+ }
+
/* ok. this is my device. */
sdev = stub_device_alloc(interface);
if (!sdev)
return -ENOMEM;
- dev_info(&interface->dev, "USB/IP Stub: register a new interface "
+ dev_info(&interface->dev, "USB/IP Stub: register a new device "
"(bus %u dev %u ifn %u)\n", udev->bus->busnum, udev->devnum,
interface->cur_altsetting->desc.bInterfaceNumber);
+ busid_priv->interf_count = 0;
+ busid_priv->shutdown_busid = 0;
+
/* set private data to usb_interface */
usb_set_intfdata(interface, sdev);
+ busid_priv->interf_count++;
+
+ busid_priv->sdev = sdev;
err = stub_add_files(&interface->dev);
if (err) {
dev_err(&interface->dev, "create sysfs files for %s\n",
udev_busid);
+ usb_set_intfdata(interface, NULL);
+ busid_priv->interf_count = 0;
+
+ busid_priv->sdev = NULL;
+ stub_device_free(sdev);
return err;
}
+ busid_priv->status = STUB_BUSID_ALLOC;
return 0;
}
+static void shutdown_busid(struct bus_id_priv *busid_priv)
+{
+ if (busid_priv->sdev && !busid_priv->shutdown_busid) {
+ busid_priv->shutdown_busid = 1;
+ usbip_event_add(&busid_priv->sdev->ud, SDEV_EVENT_REMOVED);
+
+ /* 2. wait for the stop of the event handler */
+ usbip_stop_eh(&busid_priv->sdev->ud);
+ }
+
+}
+
/*
* called in usb_disconnect() or usb_deregister()
@@ -451,10 +506,21 @@ static int stub_probe(struct usb_interface *interface,
*/
static void stub_disconnect(struct usb_interface *interface)
{
- struct stub_device *sdev = usb_get_intfdata(interface);
+ struct stub_device *sdev;
+ const char *udev_busid = dev_name(interface->dev.parent);
+ struct bus_id_priv *busid_priv;
+
+ busid_priv = get_busid_priv(udev_busid);
usbip_udbg("Enter\n");
+ if (!busid_priv) {
+ BUG();
+ return;
+ }
+
+ sdev = usb_get_intfdata(interface);
+
/* get stub_device */
if (!sdev) {
err(" could not get device from inteface data");
@@ -464,22 +530,39 @@ static void stub_disconnect(struct usb_interface *interface)
usb_set_intfdata(interface, NULL);
-
/*
* NOTE:
* rx/tx threads are invoked for each usb_device.
*/
stub_remove_files(&interface->dev);
- /* 1. shutdown the current connection */
- usbip_event_add(&sdev->ud, SDEV_EVENT_REMOVED);
+ /*If usb reset called from event handler*/
+ if (busid_priv->sdev->ud.eh.thread == current) {
+ busid_priv->interf_count--;
+ return;
+ }
+
+ if (busid_priv->interf_count > 1) {
+ busid_priv->interf_count--;
+ shutdown_busid(busid_priv);
+ return;
+ }
+
+ busid_priv->interf_count = 0;
- /* 2. wait for the stop of the event handler */
- usbip_stop_eh(&sdev->ud);
+
+ /* 1. shutdown the current connection */
+ shutdown_busid(busid_priv);
/* 3. free sdev */
+ busid_priv->sdev = NULL;
stub_device_free(sdev);
-
+ if (busid_priv->status == STUB_BUSID_ALLOC) {
+ busid_priv->status = STUB_BUSID_ADDED;
+ } else {
+ busid_priv->status = STUB_BUSID_OTHER;
+ del_match_busid((char *)udev_busid);
+ }
usbip_udbg("bye\n");
}
diff --git a/drivers/staging/usbip/stub_main.c b/drivers/staging/usbip/stub_main.c
index 6665cefe573..f3a40968aae 100644
--- a/drivers/staging/usbip/stub_main.c
+++ b/drivers/staging/usbip/stub_main.c
@@ -41,8 +41,7 @@ struct kmem_cache *stub_priv_cache;
* remote host.
*/
#define MAX_BUSID 16
-#define BUSID_SIZE 20
-static char busid_table[MAX_BUSID][BUSID_SIZE];
+static struct bus_id_priv busid_table[MAX_BUSID];
static spinlock_t busid_table_lock;
@@ -53,8 +52,8 @@ int match_busid(const char *busid)
spin_lock(&busid_table_lock);
for (i = 0; i < MAX_BUSID; i++)
- if (busid_table[i][0])
- if (!strncmp(busid_table[i], busid, BUSID_SIZE)) {
+ if (busid_table[i].name[0])
+ if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
/* already registerd */
spin_unlock(&busid_table_lock);
return 0;
@@ -65,6 +64,25 @@ int match_busid(const char *busid)
return 1;
}
+struct bus_id_priv *get_busid_priv(const char *busid)
+{
+ int i;
+
+ spin_lock(&busid_table_lock);
+
+ for (i = 0; i < MAX_BUSID; i++)
+ if (busid_table[i].name[0])
+ if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
+ /* already registerd */
+ spin_unlock(&busid_table_lock);
+ return &(busid_table[i]);
+ }
+
+ spin_unlock(&busid_table_lock);
+
+ return NULL;
+}
+
static ssize_t show_match_busid(struct device_driver *drv, char *buf)
{
int i;
@@ -73,8 +91,8 @@ static ssize_t show_match_busid(struct device_driver *drv, char *buf)
spin_lock(&busid_table_lock);
for (i = 0; i < MAX_BUSID; i++)
- if (busid_table[i][0])
- out += sprintf(out, "%s ", busid_table[i]);
+ if (busid_table[i].name[0])
+ out += sprintf(out, "%s ", busid_table[i].name);
spin_unlock(&busid_table_lock);
@@ -93,8 +111,11 @@ static int add_match_busid(char *busid)
spin_lock(&busid_table_lock);
for (i = 0; i < MAX_BUSID; i++)
- if (!busid_table[i][0]) {
- strncpy(busid_table[i], busid, BUSID_SIZE);
+ if (!busid_table[i].name[0]) {
+ strncpy(busid_table[i].name, busid, BUSID_SIZE);
+ if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
+ (busid_table[i].status != STUB_BUSID_REMOV))
+ busid_table[i].status = STUB_BUSID_ADDED;
spin_unlock(&busid_table_lock);
return 0;
}
@@ -104,16 +125,21 @@ static int add_match_busid(char *busid)
return -1;
}
-static int del_match_busid(char *busid)
+int del_match_busid(char *busid)
{
int i;
spin_lock(&busid_table_lock);
for (i = 0; i < MAX_BUSID; i++)
- if (!strncmp(busid_table[i], busid, BUSID_SIZE)) {
+ if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
/* found */
- memset(busid_table[i], 0, BUSID_SIZE);
+ if (busid_table[i].status == STUB_BUSID_OTHER)
+ memset(busid_table[i].name, 0, BUSID_SIZE);
+ if ((busid_table[i].status != STUB_BUSID_OTHER) &&
+ (busid_table[i].status != STUB_BUSID_ADDED)) {
+ busid_table[i].status = STUB_BUSID_REMOV;
+ }
spin_unlock(&busid_table_lock);
return 0;
}
@@ -122,6 +148,20 @@ static int del_match_busid(char *busid)
return -1;
}
+static void init_busid_table(void)
+{
+ int i;
+
+
+ for (i = 0; i < MAX_BUSID; i++) {
+ memset(busid_table[i].name, 0, BUSID_SIZE);
+ busid_table[i].status = STUB_BUSID_OTHER;
+ busid_table[i].interf_count = 0;
+ busid_table[i].sdev = NULL;
+ busid_table[i].shutdown_busid = 0;
+ }
+ spin_lock_init(&busid_table_lock);
+}
static ssize_t store_match_busid(struct device_driver *dev, const char *buf,
size_t count)
@@ -261,8 +301,7 @@ static int __init usb_stub_init(void)
printk(KERN_INFO KBUILD_MODNAME ":"
DRIVER_DESC ":" DRIVER_VERSION "\n");
- memset(busid_table, 0, sizeof(busid_table));
- spin_lock_init(&busid_table_lock);
+ init_busid_table();
ret = driver_create_file(&stub_driver.drvwrap.driver,
&driver_attr_match_busid);
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 5972ae70e38..3de6fd2539d 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -362,54 +362,16 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
return priv;
}
-
-static struct usb_host_endpoint *get_ep_from_epnum(struct usb_device *udev,
- int epnum0)
-{
- struct usb_host_config *config;
- int i = 0, j = 0;
- struct usb_host_endpoint *ep = NULL;
- int epnum;
- int found = 0;
-
- if (epnum0 == 0)
- return &udev->ep0;
-
- config = udev->actconfig;
- if (!config)
- return NULL;
-
- for (i = 0; i < config->desc.bNumInterfaces; i++) {
- struct usb_host_interface *setting;
-
- setting = config->interface[i]->cur_altsetting;
-
- for (j = 0; j < setting->desc.bNumEndpoints; j++) {
- ep = &setting->endpoint[j];
- epnum = (ep->desc.bEndpointAddress & 0x7f);
-
- if (epnum == epnum0) {
- /* usbip_uinfo("found epnum %d\n", epnum0);*/
- found = 1;
- break;
- }
- }
- }
-
- if (found)
- return ep;
- else
- return NULL;
-}
-
-
static int get_pipe(struct stub_device *sdev, int epnum, int dir)
{
struct usb_device *udev = interface_to_usbdev(sdev->interface);
struct usb_host_endpoint *ep;
struct usb_endpoint_descriptor *epd = NULL;
- ep = get_ep_from_epnum(udev, epnum);
+ if (dir == USBIP_DIR_IN)
+ ep = udev->ep_in[epnum & 0x7f];
+ else
+ ep = udev->ep_out[epnum & 0x7f];
if (!ep) {
dev_err(&sdev->interface->dev, "no such endpoint?, %d\n",
epnum);
@@ -462,6 +424,60 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
return 0;
}
+static void masking_bogus_flags(struct urb *urb)
+{
+ int xfertype;
+ struct usb_device *dev;
+ struct usb_host_endpoint *ep;
+ int is_out;
+ unsigned int allowed;
+
+ if (!urb || urb->hcpriv || !urb->complete)
+ return;
+ dev = urb->dev;
+ if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
+ return;
+
+ ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
+ [usb_pipeendpoint(urb->pipe)];
+ if (!ep)
+ return;
+
+ xfertype = usb_endpoint_type(&ep->desc);
+ if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
+ struct usb_ctrlrequest *setup =
+ (struct usb_ctrlrequest *) urb->setup_packet;
+
+ if (!setup)
+ return;
+ is_out = !(setup->bRequestType & USB_DIR_IN) ||
+ !setup->wLength;
+ } else {
+ is_out = usb_endpoint_dir_out(&ep->desc);
+ }
+
+ /* enforce simple/standard policy */
+ allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
+ URB_DIR_MASK | URB_FREE_BUFFER);
+ switch (xfertype) {
+ case USB_ENDPOINT_XFER_BULK:
+ if (is_out)
+ allowed |= URB_ZERO_PACKET;
+ /* FALLTHROUGH */
+ case USB_ENDPOINT_XFER_CONTROL:
+ allowed |= URB_NO_FSBR; /* only affects UHCI */
+ /* FALLTHROUGH */
+ default: /* all non-iso endpoints */
+ if (!is_out)
+ allowed |= URB_SHORT_NOT_OK;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ allowed |= URB_ISO_ASAP;
+ break;
+ }
+ urb->transfer_flags &= allowed;
+}
+
static void stub_recv_cmd_submit(struct stub_device *sdev,
struct usbip_header *pdu)
{
@@ -528,6 +544,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
/* no need to submit an intercepted request, but harmless? */
tweak_special_requests(priv->urb);
+ masking_bogus_flags(priv->urb);
/* urb is now ready to submit */
ret = usb_submit_urb(priv->urb, GFP_KERNEL);
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index e1bbd1287e2..d280e234e06 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -172,7 +172,7 @@ struct usbip_header_basic {
#define USBIP_RET_UNLINK 0x0004
__u32 command;
- /* sequencial number which identifies requests.
+ /* sequential number which identifies requests.
* incremented per connections */
__u32 seqnum;
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index be5d8db9816..0574d848b90 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -215,7 +215,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
vhci = hcd_to_vhci(hcd);
spin_lock_irqsave(&vhci->lock, flags);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
usbip_dbg_vhci_rh("hw accessible flag in on?\n");
goto done;
}
@@ -269,7 +269,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u32 prev_port_status[VHCI_NPORTS];
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
+ if (!HCD_HW_ACCESSIBLE(hcd))
return -ETIMEDOUT;
/*
@@ -1041,7 +1041,7 @@ static int vhci_bus_resume(struct usb_hcd *hcd)
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
spin_lock_irq(&vhci->lock);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
rc = -ESHUTDOWN;
} else {
/* vhci->rh_state = DUMMY_RH_RUNNING;
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
index 0f9ea58ff71..06bd793c52b 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.c
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -900,7 +900,8 @@ unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
/* Address must be 4-byte aligned */
if (pci_addr & 0x3) {
dev_err(dev, "RMW Address not 4-byte aligned\n");
- return -EINVAL;
+ result = -EINVAL;
+ goto out;
}
/* Ensure RMW Disabled whilst configuring */
@@ -921,6 +922,7 @@ unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
/* Disable RMW */
iowrite32(0, bridge->base + SCYC_CTL);
+out:
spin_unlock(&(image->lock));
mutex_unlock(&(bridge->vme_rmw));
@@ -961,11 +963,11 @@ int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
if (dest->type == VME_DMA_VME) {
entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
- vme_attr = (struct vme_dma_vme *)dest->private;
- pci_attr = (struct vme_dma_pci *)src->private;
+ vme_attr = dest->private;
+ pci_attr = src->private;
} else {
- vme_attr = (struct vme_dma_vme *)src->private;
- pci_attr = (struct vme_dma_pci *)dest->private;
+ vme_attr = src->private;
+ pci_attr = dest->private;
}
/* Check we can do fullfill required attributes */
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c
index f09cac16313..492ddb2d510 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.c
+++ b/drivers/staging/vme/bridges/vme_tsi148.c
@@ -1649,7 +1649,7 @@ int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
/* Fill out source part */
switch (src->type) {
case VME_DMA_PATTERN:
- pattern_attr = (struct vme_dma_pattern *)src->private;
+ pattern_attr = src->private;
entry->descriptor.dsal = pattern_attr->pattern;
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
@@ -1663,7 +1663,7 @@ int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
break;
case VME_DMA_PCI:
- pci_attr = (struct vme_dma_pci *)src->private;
+ pci_attr = src->private;
reg_split((unsigned long long)pci_attr->address, &address_high,
&address_low);
@@ -1672,7 +1672,7 @@ int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
break;
case VME_DMA_VME:
- vme_attr = (struct vme_dma_vme *)src->private;
+ vme_attr = src->private;
reg_split((unsigned long long)vme_attr->address, &address_high,
&address_low);
@@ -1701,7 +1701,7 @@ int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
/* Fill out destination part */
switch (dest->type) {
case VME_DMA_PCI:
- pci_attr = (struct vme_dma_pci *)dest->private;
+ pci_attr = dest->private;
reg_split((unsigned long long)pci_attr->address, &address_high,
&address_low);
@@ -1710,7 +1710,7 @@ int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
break;
case VME_DMA_VME:
- vme_attr = (struct vme_dma_vme *)dest->private;
+ vme_attr = dest->private;
reg_split((unsigned long long)vme_attr->address, &address_high,
&address_low);
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index bc16fc070fd..8f77bd24630 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -31,15 +31,16 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <linux/types.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
#include "../vme.h"
#include "vme_user.h"
+static DEFINE_MUTEX(vme_user_mutex);
static char driver_name[] = "vme_user";
static int bus[USER_BUS_MAX];
@@ -48,19 +49,19 @@ static int bus_num;
/* Currently Documentation/devices.txt defines the following for VME:
*
* 221 char VME bus
- * 0 = /dev/bus/vme/m0 First master image
- * 1 = /dev/bus/vme/m1 Second master image
- * 2 = /dev/bus/vme/m2 Third master image
- * 3 = /dev/bus/vme/m3 Fourth master image
- * 4 = /dev/bus/vme/s0 First slave image
- * 5 = /dev/bus/vme/s1 Second slave image
- * 6 = /dev/bus/vme/s2 Third slave image
- * 7 = /dev/bus/vme/s3 Fourth slave image
- * 8 = /dev/bus/vme/ctl Control
+ * 0 = /dev/bus/vme/m0 First master image
+ * 1 = /dev/bus/vme/m1 Second master image
+ * 2 = /dev/bus/vme/m2 Third master image
+ * 3 = /dev/bus/vme/m3 Fourth master image
+ * 4 = /dev/bus/vme/s0 First slave image
+ * 5 = /dev/bus/vme/s1 Second slave image
+ * 6 = /dev/bus/vme/s2 Third slave image
+ * 7 = /dev/bus/vme/s3 Fourth slave image
+ * 8 = /dev/bus/vme/ctl Control
*
- * It is expected that all VME bus drivers will use the
- * same interface. For interface documentation see
- * http://www.vmelinux.org/.
+ * It is expected that all VME bus drivers will use the
+ * same interface. For interface documentation see
+ * http://www.vmelinux.org/.
*
* However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
* even support the tsi148 chipset (which has 8 master and 8 slave windows).
@@ -137,12 +138,12 @@ static int __init vme_user_probe(struct device *, int, int);
static int __exit vme_user_remove(struct device *, int, int);
static struct file_operations vme_user_fops = {
- .open = vme_user_open,
- .release = vme_user_release,
- .read = vme_user_read,
- .write = vme_user_write,
- .llseek = vme_user_llseek,
- .unlocked_ioctl = vme_user_unlocked_ioctl,
+ .open = vme_user_open,
+ .release = vme_user_release,
+ .read = vme_user_read,
+ .write = vme_user_write,
+ .llseek = vme_user_llseek,
+ .unlocked_ioctl = vme_user_unlocked_ioctl,
};
@@ -151,13 +152,13 @@ static struct file_operations vme_user_fops = {
*/
static void reset_counters(void)
{
- statistics.reads = 0;
- statistics.writes = 0;
- statistics.ioctls = 0;
- statistics.irqs = 0;
- statistics.berrs = 0;
- statistics.dmaErrors = 0;
- statistics.timeouts = 0;
+ statistics.reads = 0;
+ statistics.writes = 0;
+ statistics.ioctls = 0;
+ statistics.irqs = 0;
+ statistics.berrs = 0;
+ statistics.dmaErrors = 0;
+ statistics.timeouts = 0;
}
static int vme_user_open(struct inode *inode, struct file *file)
@@ -216,21 +217,20 @@ static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
/* We copy to kernel buffer */
copied = vme_master_read(image[minor].resource,
image[minor].kern_buf, count, *ppos);
- if (copied < 0) {
+ if (copied < 0)
return (int)copied;
- }
retval = __copy_to_user(buf, image[minor].kern_buf,
(unsigned long)copied);
if (retval != 0) {
copied = (copied - retval);
- printk("User copy failed\n");
+ printk(KERN_INFO "User copy failed\n");
return -EINVAL;
}
} else {
/* XXX Need to write this */
- printk("Currently don't support large transfers\n");
+ printk(KERN_INFO "Currently don't support large transfers\n");
/* Map in pages from userspace */
/* Call vme_master_read to do the transfer */
@@ -264,7 +264,7 @@ static ssize_t resource_from_user(unsigned int minor, const char *buf,
image[minor].kern_buf, copied, *ppos);
} else {
/* XXX Need to write this */
- printk("Currently don't support large transfers\n");
+ printk(KERN_INFO "Currently don't support large transfers\n");
/* Map in pages from userspace */
/* Call vme_master_write to do the transfer */
@@ -313,7 +313,7 @@ static ssize_t buffer_from_user(unsigned int minor, const char *buf,
}
static ssize_t vme_user_read(struct file *file, char *buf, size_t count,
- loff_t * ppos)
+ loff_t *ppos)
{
unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
ssize_t retval;
@@ -337,7 +337,7 @@ static ssize_t vme_user_read(struct file *file, char *buf, size_t count,
else
okcount = count;
- switch (type[minor]){
+ switch (type[minor]) {
case MASTER_MINOR:
retval = resource_to_user(minor, buf, okcount, ppos);
break;
@@ -380,7 +380,7 @@ static ssize_t vme_user_write(struct file *file, const char *buf, size_t count,
else
okcount = count;
- switch (type[minor]){
+ switch (type[minor]) {
case MASTER_MINOR:
retval = resource_from_user(minor, buf, okcount, ppos);
break;
@@ -560,9 +560,9 @@ vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret;
- lock_kernel();
+ mutex_lock(&vme_user_mutex);
ret = vme_user_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
- unlock_kernel();
+ mutex_unlock(&vme_user_mutex);
return ret;
}
@@ -571,7 +571,7 @@ vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/*
* Unallocate a previously allocated buffer
*/
-static void buf_unalloc (int num)
+static void buf_unalloc(int num)
{
if (image[num].kern_buf) {
#ifdef VME_DEBUG
@@ -594,8 +594,8 @@ static void buf_unalloc (int num)
}
static struct vme_driver vme_user_driver = {
- .name = driver_name,
- .probe = vme_user_probe,
+ .name = driver_name,
+ .probe = vme_user_probe,
.remove = vme_user_remove,
};
@@ -770,16 +770,16 @@ static int __init vme_user_probe(struct device *dev, int cur_bus, int cur_slot)
}
/* Add sysfs Entries */
- for (i=0; i<VME_DEVS; i++) {
+ for (i = 0; i < VME_DEVS; i++) {
switch (type[i]) {
case MASTER_MINOR:
- sprintf(name,"bus/vme/m%%d");
+ sprintf(name, "bus/vme/m%%d");
break;
case CONTROL_MINOR:
- sprintf(name,"bus/vme/ctl");
+ sprintf(name, "bus/vme/ctl");
break;
case SLAVE_MINOR:
- sprintf(name,"bus/vme/s%%d");
+ sprintf(name, "bus/vme/s%%d");
break;
default:
err = -EINVAL;
@@ -790,9 +790,9 @@ static int __init vme_user_probe(struct device *dev, int cur_bus, int cur_slot)
image[i].device =
device_create(vme_user_sysfs_class, NULL,
MKDEV(VME_MAJOR, i), NULL, name,
- (type[i] == SLAVE_MINOR)? i - (MASTER_MAX + 1) : i);
+ (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i);
if (IS_ERR(image[i].device)) {
- printk("%s: Error creating sysfs device\n",
+ printk(KERN_INFO "%s: Error creating sysfs device\n",
driver_name);
err = PTR_ERR(image[i].device);
goto err_sysfs;
@@ -804,7 +804,7 @@ static int __init vme_user_probe(struct device *dev, int cur_bus, int cur_slot)
/* Ensure counter set correcty to destroy all sysfs devices */
i = VME_DEVS;
err_sysfs:
- while (i > 0){
+ while (i > 0) {
i--;
device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
}
@@ -845,9 +845,8 @@ static int __exit vme_user_remove(struct device *dev, int cur_bus, int cur_slot)
int i;
/* Remove sysfs Entries */
- for(i=0; i<VME_DEVS; i++) {
+ for (i = 0; i < VME_DEVS; i++)
device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
- }
class_destroy(vme_user_sysfs_class);
for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++)
diff --git a/drivers/staging/vt6655/80211hdr.h b/drivers/staging/vt6655/80211hdr.h
index b7b170e19aa..f55283b8641 100644
--- a/drivers/staging/vt6655/80211hdr.h
+++ b/drivers/staging/vt6655/80211hdr.h
@@ -161,21 +161,21 @@
#ifdef __BIG_ENDIAN
/* GET & SET Frame Control bit */
-#define WLAN_GET_FC_PRVER(n) ((((WORD)(n) >> 8) & (BIT0 | BIT1))
-#define WLAN_GET_FC_FTYPE(n) ((((WORD)(n) >> 8) & (BIT2 | BIT3)) >> 2)
-#define WLAN_GET_FC_FSTYPE(n) ((((WORD)(n) >> 8) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
-#define WLAN_GET_FC_TODS(n) ((((WORD)(n) << 8) & (BIT8)) >> 8)
-#define WLAN_GET_FC_FROMDS(n) ((((WORD)(n) << 8) & (BIT9)) >> 9)
-#define WLAN_GET_FC_MOREFRAG(n) ((((WORD)(n) << 8) & (BIT10)) >> 10)
-#define WLAN_GET_FC_RETRY(n) ((((WORD)(n) << 8) & (BIT11)) >> 11)
-#define WLAN_GET_FC_PWRMGT(n) ((((WORD)(n) << 8) & (BIT12)) >> 12)
-#define WLAN_GET_FC_MOREDATA(n) ((((WORD)(n) << 8) & (BIT13)) >> 13)
-#define WLAN_GET_FC_ISWEP(n) ((((WORD)(n) << 8) & (BIT14)) >> 14)
-#define WLAN_GET_FC_ORDER(n) ((((WORD)(n) << 8) & (BIT15)) >> 15)
+#define WLAN_GET_FC_PRVER(n) ((((unsigned short)(n) >> 8) & (BIT0 | BIT1))
+#define WLAN_GET_FC_FTYPE(n) ((((unsigned short)(n) >> 8) & (BIT2 | BIT3)) >> 2)
+#define WLAN_GET_FC_FSTYPE(n) ((((unsigned short)(n) >> 8) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
+#define WLAN_GET_FC_TODS(n) ((((unsigned short)(n) << 8) & (BIT8)) >> 8)
+#define WLAN_GET_FC_FROMDS(n) ((((unsigned short)(n) << 8) & (BIT9)) >> 9)
+#define WLAN_GET_FC_MOREFRAG(n) ((((unsigned short)(n) << 8) & (BIT10)) >> 10)
+#define WLAN_GET_FC_RETRY(n) ((((unsigned short)(n) << 8) & (BIT11)) >> 11)
+#define WLAN_GET_FC_PWRMGT(n) ((((unsigned short)(n) << 8) & (BIT12)) >> 12)
+#define WLAN_GET_FC_MOREDATA(n) ((((unsigned short)(n) << 8) & (BIT13)) >> 13)
+#define WLAN_GET_FC_ISWEP(n) ((((unsigned short)(n) << 8) & (BIT14)) >> 14)
+#define WLAN_GET_FC_ORDER(n) ((((unsigned short)(n) << 8) & (BIT15)) >> 15)
/* Sequence Field bit */
-#define WLAN_GET_SEQ_FRGNUM(n) (((WORD)(n) >> 8) & (BIT0|BIT1|BIT2|BIT3))
-#define WLAN_GET_SEQ_SEQNUM(n) ((((WORD)(n) >> 8) & (~(BIT0|BIT1|BIT2|BIT3))) >> 4)
+#define WLAN_GET_SEQ_FRGNUM(n) (((unsigned short)(n) >> 8) & (BIT0|BIT1|BIT2|BIT3))
+#define WLAN_GET_SEQ_SEQNUM(n) ((((unsigned short)(n) >> 8) & (~(BIT0|BIT1|BIT2|BIT3))) >> 4)
/* Capability Field bit */
@@ -196,22 +196,22 @@
#else
/* GET & SET Frame Control bit */
-#define WLAN_GET_FC_PRVER(n) (((WORD)(n)) & (BIT0 | BIT1))
-#define WLAN_GET_FC_FTYPE(n) ((((WORD)(n)) & (BIT2 | BIT3)) >> 2)
-#define WLAN_GET_FC_FSTYPE(n) ((((WORD)(n)) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
-#define WLAN_GET_FC_TODS(n) ((((WORD)(n)) & (BIT8)) >> 8)
-#define WLAN_GET_FC_FROMDS(n) ((((WORD)(n)) & (BIT9)) >> 9)
-#define WLAN_GET_FC_MOREFRAG(n) ((((WORD)(n)) & (BIT10)) >> 10)
-#define WLAN_GET_FC_RETRY(n) ((((WORD)(n)) & (BIT11)) >> 11)
-#define WLAN_GET_FC_PWRMGT(n) ((((WORD)(n)) & (BIT12)) >> 12)
-#define WLAN_GET_FC_MOREDATA(n) ((((WORD)(n)) & (BIT13)) >> 13)
-#define WLAN_GET_FC_ISWEP(n) ((((WORD)(n)) & (BIT14)) >> 14)
-#define WLAN_GET_FC_ORDER(n) ((((WORD)(n)) & (BIT15)) >> 15)
+#define WLAN_GET_FC_PRVER(n) (((unsigned short)(n)) & (BIT0 | BIT1))
+#define WLAN_GET_FC_FTYPE(n) ((((unsigned short)(n)) & (BIT2 | BIT3)) >> 2)
+#define WLAN_GET_FC_FSTYPE(n) ((((unsigned short)(n)) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
+#define WLAN_GET_FC_TODS(n) ((((unsigned short)(n)) & (BIT8)) >> 8)
+#define WLAN_GET_FC_FROMDS(n) ((((unsigned short)(n)) & (BIT9)) >> 9)
+#define WLAN_GET_FC_MOREFRAG(n) ((((unsigned short)(n)) & (BIT10)) >> 10)
+#define WLAN_GET_FC_RETRY(n) ((((unsigned short)(n)) & (BIT11)) >> 11)
+#define WLAN_GET_FC_PWRMGT(n) ((((unsigned short)(n)) & (BIT12)) >> 12)
+#define WLAN_GET_FC_MOREDATA(n) ((((unsigned short)(n)) & (BIT13)) >> 13)
+#define WLAN_GET_FC_ISWEP(n) ((((unsigned short)(n)) & (BIT14)) >> 14)
+#define WLAN_GET_FC_ORDER(n) ((((unsigned short)(n)) & (BIT15)) >> 15)
/* Sequence Field bit */
-#define WLAN_GET_SEQ_FRGNUM(n) (((WORD)(n)) & (BIT0|BIT1|BIT2|BIT3))
-#define WLAN_GET_SEQ_SEQNUM(n) ((((WORD)(n)) & (~(BIT0|BIT1|BIT2|BIT3))) >> 4)
+#define WLAN_GET_SEQ_FRGNUM(n) (((unsigned short)(n)) & (BIT0|BIT1|BIT2|BIT3))
+#define WLAN_GET_SEQ_SEQNUM(n) ((((unsigned short)(n)) & (~(BIT0|BIT1|BIT2|BIT3))) >> 4)
/* Capability Field bit */
@@ -246,20 +246,20 @@
#define WLAN_SET_CAP_INFO_GRPACK(n) ((n) << 14)
-#define WLAN_SET_FC_PRVER(n) ((WORD)(n))
-#define WLAN_SET_FC_FTYPE(n) (((WORD)(n)) << 2)
-#define WLAN_SET_FC_FSTYPE(n) (((WORD)(n)) << 4)
-#define WLAN_SET_FC_TODS(n) (((WORD)(n)) << 8)
-#define WLAN_SET_FC_FROMDS(n) (((WORD)(n)) << 9)
-#define WLAN_SET_FC_MOREFRAG(n) (((WORD)(n)) << 10)
-#define WLAN_SET_FC_RETRY(n) (((WORD)(n)) << 11)
-#define WLAN_SET_FC_PWRMGT(n) (((WORD)(n)) << 12)
-#define WLAN_SET_FC_MOREDATA(n) (((WORD)(n)) << 13)
-#define WLAN_SET_FC_ISWEP(n) (((WORD)(n)) << 14)
-#define WLAN_SET_FC_ORDER(n) (((WORD)(n)) << 15)
+#define WLAN_SET_FC_PRVER(n) ((unsigned short)(n))
+#define WLAN_SET_FC_FTYPE(n) (((unsigned short)(n)) << 2)
+#define WLAN_SET_FC_FSTYPE(n) (((unsigned short)(n)) << 4)
+#define WLAN_SET_FC_TODS(n) (((unsigned short)(n)) << 8)
+#define WLAN_SET_FC_FROMDS(n) (((unsigned short)(n)) << 9)
+#define WLAN_SET_FC_MOREFRAG(n) (((unsigned short)(n)) << 10)
+#define WLAN_SET_FC_RETRY(n) (((unsigned short)(n)) << 11)
+#define WLAN_SET_FC_PWRMGT(n) (((unsigned short)(n)) << 12)
+#define WLAN_SET_FC_MOREDATA(n) (((unsigned short)(n)) << 13)
+#define WLAN_SET_FC_ISWEP(n) (((unsigned short)(n)) << 14)
+#define WLAN_SET_FC_ORDER(n) (((unsigned short)(n)) << 15)
-#define WLAN_SET_SEQ_FRGNUM(n) ((WORD)(n))
-#define WLAN_SET_SEQ_SEQNUM(n) (((WORD)(n)) << 4)
+#define WLAN_SET_SEQ_FRGNUM(n) ((unsigned short)(n))
+#define WLAN_SET_SEQ_SEQNUM(n) (((unsigned short)(n)) << 4)
/* ERP Field bit */
@@ -282,50 +282,50 @@
#define WLAN_MGMT_GET_TIM_OFFSET(b) (((b) & ~BIT0) >> 1)
/* 3-Addr & 4-Addr */
-#define WLAN_HDR_A3_DATA_PTR(p) (((PBYTE)(p)) + WLAN_HDR_ADDR3_LEN)
-#define WLAN_HDR_A4_DATA_PTR(p) (((PBYTE)(p)) + WLAN_HDR_ADDR4_LEN)
+#define WLAN_HDR_A3_DATA_PTR(p) (((unsigned char *)(p)) + WLAN_HDR_ADDR3_LEN)
+#define WLAN_HDR_A4_DATA_PTR(p) (((unsigned char *)(p)) + WLAN_HDR_ADDR4_LEN)
/* IEEE ADDR */
#define IEEE_ADDR_UNIVERSAL 0x02
#define IEEE_ADDR_GROUP 0x01
typedef struct {
- BYTE abyAddr[6];
+ unsigned char abyAddr[6];
} IEEE_ADDR, *PIEEE_ADDR;
/* 802.11 Header Format */
typedef struct tagWLAN_80211HDR_A2 {
- WORD wFrameCtl;
- WORD wDurationID;
- BYTE abyAddr1[WLAN_ADDR_LEN];
- BYTE abyAddr2[WLAN_ADDR_LEN];
+ unsigned short wFrameCtl;
+ unsigned short wDurationID;
+ unsigned char abyAddr1[WLAN_ADDR_LEN];
+ unsigned char abyAddr2[WLAN_ADDR_LEN];
} __attribute__ ((__packed__))
WLAN_80211HDR_A2, *PWLAN_80211HDR_A2;
typedef struct tagWLAN_80211HDR_A3 {
- WORD wFrameCtl;
- WORD wDurationID;
- BYTE abyAddr1[WLAN_ADDR_LEN];
- BYTE abyAddr2[WLAN_ADDR_LEN];
- BYTE abyAddr3[WLAN_ADDR_LEN];
- WORD wSeqCtl;
+ unsigned short wFrameCtl;
+ unsigned short wDurationID;
+ unsigned char abyAddr1[WLAN_ADDR_LEN];
+ unsigned char abyAddr2[WLAN_ADDR_LEN];
+ unsigned char abyAddr3[WLAN_ADDR_LEN];
+ unsigned short wSeqCtl;
}__attribute__ ((__packed__))
WLAN_80211HDR_A3, *PWLAN_80211HDR_A3;
typedef struct tagWLAN_80211HDR_A4 {
- WORD wFrameCtl;
- WORD wDurationID;
- BYTE abyAddr1[WLAN_ADDR_LEN];
- BYTE abyAddr2[WLAN_ADDR_LEN];
- BYTE abyAddr3[WLAN_ADDR_LEN];
- WORD wSeqCtl;
- BYTE abyAddr4[WLAN_ADDR_LEN];
+ unsigned short wFrameCtl;
+ unsigned short wDurationID;
+ unsigned char abyAddr1[WLAN_ADDR_LEN];
+ unsigned char abyAddr2[WLAN_ADDR_LEN];
+ unsigned char abyAddr3[WLAN_ADDR_LEN];
+ unsigned short wSeqCtl;
+ unsigned char abyAddr4[WLAN_ADDR_LEN];
} __attribute__ ((__packed__))
WLAN_80211HDR_A4, *PWLAN_80211HDR_A4;
diff --git a/drivers/staging/vt6655/80211mgr.c b/drivers/staging/vt6655/80211mgr.c
index 38697c86248..1ed0f260b16 100644
--- a/drivers/staging/vt6655/80211mgr.c
+++ b/drivers/staging/vt6655/80211mgr.c
@@ -99,9 +99,9 @@ vMgrEncodeBeacon(
// Fixed Fields
pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_TS);
- pFrame->pwBeaconInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_BCN_INT);
- pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_CAPINFO);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_BEACON_OFF_SSID;
@@ -133,15 +133,15 @@ vMgrDecodeBeacon(
// Fixed Fields
pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_TS);
- pFrame->pwBeaconInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_BCN_INT);
- pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_CAPINFO);
// Information elements
- pItem = (PWLAN_IE)((PBYTE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)))
+ pItem = (PWLAN_IE)((unsigned char *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)))
+ WLAN_BEACON_OFF_SSID);
- while( ((PBYTE)pItem) < (pFrame->pBuf + pFrame->len) ){
+ while( ((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len) ){
switch (pItem->byElementID) {
case WLAN_EID_SSID:
@@ -179,7 +179,7 @@ vMgrDecodeBeacon(
break;
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
+ if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
}
break;
@@ -223,7 +223,7 @@ vMgrDecodeBeacon(
break;
}
- pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
+ pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
}
return;
@@ -296,7 +296,7 @@ vMgrEncodeDisassociation(
// Fixed Fields
- pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DISASSOC_OFF_REASON);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DISASSOC_OFF_REASON + sizeof(*(pFrame->pwReason));
@@ -323,7 +323,7 @@ vMgrDecodeDisassociation(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
// Fixed Fields
- pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DISASSOC_OFF_REASON);
return;
@@ -348,9 +348,9 @@ vMgrEncodeAssocRequest(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
// Fixed Fields
- pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_LISTEN_INT);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCREQ_OFF_LISTEN_INT + sizeof(*(pFrame->pwListenInterval));
return;
@@ -377,16 +377,16 @@ vMgrDecodeAssocRequest(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
// Fixed Fields
- pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_LISTEN_INT);
// Information elements
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_SSID);
- while (((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) {
+ while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
switch (pItem->byElementID){
case WLAN_EID_SSID:
if (pFrame->pSSID == NULL)
@@ -404,7 +404,7 @@ vMgrDecodeAssocRequest(
break;
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
+ if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
}
break;
@@ -418,7 +418,7 @@ vMgrDecodeAssocRequest(
pItem->byElementID);
break;
}
- pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
+ pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
}
return;
}
@@ -442,11 +442,11 @@ vMgrEncodeAssocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
// Fixed Fields
- pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_AID);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCRESP_OFF_AID
+ sizeof(*(pFrame->pwAid));
@@ -476,11 +476,11 @@ vMgrDecodeAssocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
// Fixed Fields
- pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_AID);
// Information elements
@@ -488,9 +488,10 @@ vMgrDecodeAssocResponse(
+ WLAN_ASSOCRESP_OFF_SUPP_RATES);
pItem = (PWLAN_IE)(pFrame->pSuppRates);
- pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
+ pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
- if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
+ if ((((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) &&
+ (pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pFrame->pExtSuppRates=[%p].\n", pItem);
}
@@ -520,9 +521,9 @@ vMgrEncodeReassocRequest(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
// Fixed Fields
- pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_LISTEN_INT);
pFrame->pAddrCurrAP = (PIEEE_ADDR)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_CURR_AP);
@@ -553,9 +554,9 @@ vMgrDecodeReassocRequest(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
// Fixed Fields
- pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_LISTEN_INT);
pFrame->pAddrCurrAP = (PIEEE_ADDR)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_CURR_AP);
@@ -564,7 +565,7 @@ vMgrDecodeReassocRequest(
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_SSID);
- while(((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) {
+ while(((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
switch (pItem->byElementID){
case WLAN_EID_SSID:
@@ -583,7 +584,7 @@ vMgrDecodeReassocRequest(
break;
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
+ if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
}
break;
@@ -597,7 +598,7 @@ vMgrDecodeReassocRequest(
pItem->byElementID);
break;
}
- pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
+ pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
}
return;
}
@@ -649,7 +650,7 @@ vMgrDecodeProbeRequest(
// Information elements
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)));
- while( ((PBYTE)pItem) < (pFrame->pBuf + pFrame->len) ) {
+ while( ((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len) ) {
switch (pItem->byElementID) {
case WLAN_EID_SSID:
@@ -672,7 +673,7 @@ vMgrDecodeProbeRequest(
break;
}
- pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
+ pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
}
return;
}
@@ -700,9 +701,9 @@ vMgrEncodeProbeResponse(
// Fixed Fields
pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_TS);
- pFrame->pwBeaconInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_BCN_INT);
- pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_CAP_INFO);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_PROBERESP_OFF_CAP_INFO +
@@ -737,16 +738,16 @@ vMgrDecodeProbeResponse(
// Fixed Fields
pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_TS);
- pFrame->pwBeaconInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_BCN_INT);
- pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_CAP_INFO);
// Information elements
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_SSID);
- while( ((PBYTE)pItem) < (pFrame->pBuf + pFrame->len) ) {
+ while( ((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len) ) {
switch (pItem->byElementID) {
case WLAN_EID_SSID:
if (pFrame->pSSID == NULL)
@@ -778,7 +779,7 @@ vMgrDecodeProbeResponse(
break;
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
+ if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
}
break;
@@ -821,7 +822,7 @@ vMgrDecodeProbeResponse(
break;
}
- pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
+ pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
}
return;
}
@@ -846,11 +847,11 @@ vMgrEncodeAuthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
// Fixed Fields
- pFrame->pwAuthAlgorithm = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwAuthAlgorithm = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_AUTH_ALG);
- pFrame->pwAuthSequence = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwAuthSequence = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_AUTH_SEQ);
- pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_STATUS);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_AUTHEN_OFF_STATUS + sizeof(*(pFrame->pwStatus));
@@ -879,18 +880,18 @@ vMgrDecodeAuthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
// Fixed Fields
- pFrame->pwAuthAlgorithm = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwAuthAlgorithm = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_AUTH_ALG);
- pFrame->pwAuthSequence = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwAuthSequence = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_AUTH_SEQ);
- pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_STATUS);
// Information elements
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_CHALLENGE);
- if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_CHALLENGE)) {
+ if ((((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_CHALLENGE)) {
pFrame->pChallenge = (PWLAN_IE_CHALLENGE)pItem;
}
@@ -917,7 +918,7 @@ vMgrEncodeDeauthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
// Fixed Fields
- pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DEAUTHEN_OFF_REASON);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DEAUTHEN_OFF_REASON + sizeof(*(pFrame->pwReason));
@@ -944,7 +945,7 @@ vMgrDecodeDeauthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
// Fixed Fields
- pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DEAUTHEN_OFF_REASON);
return;
@@ -970,11 +971,11 @@ vMgrEncodeReassocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
// Fixed Fields
- pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_AID);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCRESP_OFF_AID + sizeof(*(pFrame->pwAid));
@@ -1005,11 +1006,11 @@ vMgrDecodeReassocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
// Fixed Fields
- pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_AID);
//Information elements
@@ -1017,9 +1018,10 @@ vMgrDecodeReassocResponse(
+ WLAN_REASSOCRESP_OFF_SUPP_RATES);
pItem = (PWLAN_IE)(pFrame->pSuppRates);
- pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
+ pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
- if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
+ if ((((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) &&
+ (pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
}
return;
diff --git a/drivers/staging/vt6655/80211mgr.h b/drivers/staging/vt6655/80211mgr.h
index 658fe144f89..3bdab3f56f1 100644
--- a/drivers/staging/vt6655/80211mgr.h
+++ b/drivers/staging/vt6655/80211mgr.h
@@ -19,7 +19,7 @@
*
* File: 80211mgr.h
*
- * Purpose: 802.11 managment frames pre-defines.
+ * Purpose: 802.11 management frames pre-defines.
*
*
* Author: Lyndon Chen
@@ -230,8 +230,8 @@
#pragma pack(1)
typedef struct tagWLAN_IE {
- BYTE byElementID;
- BYTE len;
+ unsigned char byElementID;
+ unsigned char len;
}__attribute__ ((__packed__))
WLAN_IE, *PWLAN_IE;
@@ -239,9 +239,9 @@ WLAN_IE, *PWLAN_IE;
// Service Set Identity (SSID)
#pragma pack(1)
typedef struct tagWLAN_IE_SSID {
- BYTE byElementID;
- BYTE len;
- BYTE abySSID[1];
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char abySSID[1];
}__attribute__ ((__packed__))
WLAN_IE_SSID, *PWLAN_IE_SSID;
@@ -249,9 +249,9 @@ WLAN_IE_SSID, *PWLAN_IE_SSID;
// Supported Rates
#pragma pack(1)
typedef struct tagWLAN_IE_SUPP_RATES {
- BYTE byElementID;
- BYTE len;
- BYTE abyRates[1];
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char abyRates[1];
}__attribute__ ((__packed__))
WLAN_IE_SUPP_RATES, *PWLAN_IE_SUPP_RATES;
@@ -260,20 +260,20 @@ WLAN_IE_SUPP_RATES, *PWLAN_IE_SUPP_RATES;
// FH Parameter Set
#pragma pack(1)
typedef struct _WLAN_IE_FH_PARMS {
- BYTE byElementID;
- BYTE len;
- WORD wDwellTime;
- BYTE byHopSet;
- BYTE byHopPattern;
- BYTE byHopIndex;
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned short wDwellTime;
+ unsigned char byHopSet;
+ unsigned char byHopPattern;
+ unsigned char byHopIndex;
} WLAN_IE_FH_PARMS, *PWLAN_IE_FH_PARMS;
// DS Parameter Set
#pragma pack(1)
typedef struct tagWLAN_IE_DS_PARMS {
- BYTE byElementID;
- BYTE len;
- BYTE byCurrChannel;
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char byCurrChannel;
}__attribute__ ((__packed__))
WLAN_IE_DS_PARMS, *PWLAN_IE_DS_PARMS;
@@ -281,12 +281,12 @@ WLAN_IE_DS_PARMS, *PWLAN_IE_DS_PARMS;
// CF Parameter Set
#pragma pack(1)
typedef struct tagWLAN_IE_CF_PARMS {
- BYTE byElementID;
- BYTE len;
- BYTE byCFPCount;
- BYTE byCFPPeriod;
- WORD wCFPMaxDuration;
- WORD wCFPDurRemaining;
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char byCFPCount;
+ unsigned char byCFPPeriod;
+ unsigned short wCFPMaxDuration;
+ unsigned short wCFPDurRemaining;
}__attribute__ ((__packed__))
WLAN_IE_CF_PARMS, *PWLAN_IE_CF_PARMS;
@@ -294,12 +294,12 @@ WLAN_IE_CF_PARMS, *PWLAN_IE_CF_PARMS;
// TIM
#pragma pack(1)
typedef struct tagWLAN_IE_TIM {
- BYTE byElementID;
- BYTE len;
- BYTE byDTIMCount;
- BYTE byDTIMPeriod;
- BYTE byBitMapCtl;
- BYTE byVirtBitMap[1];
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char byDTIMCount;
+ unsigned char byDTIMPeriod;
+ unsigned char byBitMapCtl;
+ unsigned char byVirtBitMap[1];
}__attribute__ ((__packed__))
WLAN_IE_TIM, *PWLAN_IE_TIM;
@@ -307,9 +307,9 @@ WLAN_IE_TIM, *PWLAN_IE_TIM;
// IBSS Parameter Set
#pragma pack(1)
typedef struct tagWLAN_IE_IBSS_PARMS {
- BYTE byElementID;
- BYTE len;
- WORD wATIMWindow;
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned short wATIMWindow;
}__attribute__ ((__packed__))
WLAN_IE_IBSS_PARMS, *PWLAN_IE_IBSS_PARMS;
@@ -317,84 +317,84 @@ WLAN_IE_IBSS_PARMS, *PWLAN_IE_IBSS_PARMS;
// Challenge Text
#pragma pack(1)
typedef struct tagWLAN_IE_CHALLENGE {
- BYTE byElementID;
- BYTE len;
- BYTE abyChallenge[1];
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char abyChallenge[1];
}__attribute__ ((__packed__))
WLAN_IE_CHALLENGE, *PWLAN_IE_CHALLENGE;
#pragma pack(1)
typedef struct tagWLAN_IE_RSN_EXT {
- BYTE byElementID;
- BYTE len;
- BYTE abyOUI[4];
- WORD wVersion;
- BYTE abyMulticast[4];
- WORD wPKCount;
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char abyOUI[4];
+ unsigned short wVersion;
+ unsigned char abyMulticast[4];
+ unsigned short wPKCount;
struct {
- BYTE abyOUI[4];
+ unsigned char abyOUI[4];
} PKSList[1]; // the rest is variable so need to
// overlay ieauth structure
} WLAN_IE_RSN_EXT, *PWLAN_IE_RSN_EXT;
#pragma pack(1)
typedef struct tagWLAN_IE_RSN_AUTH {
- WORD wAuthCount;
+ unsigned short wAuthCount;
struct {
- BYTE abyOUI[4];
+ unsigned char abyOUI[4];
} AuthKSList[1];
} WLAN_IE_RSN_AUTH, *PWLAN_IE_RSN_AUTH;
// RSN Identity
#pragma pack(1)
typedef struct tagWLAN_IE_RSN {
- BYTE byElementID;
- BYTE len;
- WORD wVersion;
- BYTE abyRSN[WLAN_MIN_ARRAY];
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned short wVersion;
+ unsigned char abyRSN[WLAN_MIN_ARRAY];
} WLAN_IE_RSN, *PWLAN_IE_RSN;
// ERP
#pragma pack(1)
typedef struct tagWLAN_IE_ERP {
- BYTE byElementID;
- BYTE len;
- BYTE byContext;
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char byContext;
}__attribute__ ((__packed__))
WLAN_IE_ERP, *PWLAN_IE_ERP;
#pragma pack(1)
typedef struct _MEASEURE_REQ {
- BYTE byChannel;
- BYTE abyStartTime[8];
- BYTE abyDuration[2];
+ unsigned char byChannel;
+ unsigned char abyStartTime[8];
+ unsigned char abyDuration[2];
} MEASEURE_REQ, *PMEASEURE_REQ,
MEASEURE_REQ_BASIC, *PMEASEURE_REQ_BASIC,
MEASEURE_REQ_CCA, *PMEASEURE_REQ_CCA,
MEASEURE_REQ_RPI, *PMEASEURE_REQ_RPI;
typedef struct _MEASEURE_REP_BASIC {
- BYTE byChannel;
- BYTE abyStartTime[8];
- BYTE abyDuration[2];
- BYTE byMap;
+ unsigned char byChannel;
+ unsigned char abyStartTime[8];
+ unsigned char abyDuration[2];
+ unsigned char byMap;
} MEASEURE_REP_BASIC, *PMEASEURE_REP_BASIC;
typedef struct _MEASEURE_REP_CCA {
- BYTE byChannel;
- BYTE abyStartTime[8];
- BYTE abyDuration[2];
- BYTE byCCABusyFraction;
+ unsigned char byChannel;
+ unsigned char abyStartTime[8];
+ unsigned char abyDuration[2];
+ unsigned char byCCABusyFraction;
} MEASEURE_REP_CCA, *PMEASEURE_REP_CCA;
typedef struct _MEASEURE_REP_RPI {
- BYTE byChannel;
- BYTE abyStartTime[8];
- BYTE abyDuration[2];
- BYTE abyRPIdensity[8];
+ unsigned char byChannel;
+ unsigned char abyStartTime[8];
+ unsigned char abyDuration[2];
+ unsigned char abyRPIdensity[8];
} MEASEURE_REP_RPI, *PMEASEURE_REP_RPI;
typedef union _MEASEURE_REP {
@@ -406,85 +406,85 @@ typedef union _MEASEURE_REP {
} MEASEURE_REP, *PMEASEURE_REP;
typedef struct _WLAN_IE_MEASURE_REQ {
- BYTE byElementID;
- BYTE len;
- BYTE byToken;
- BYTE byMode;
- BYTE byType;
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char byToken;
+ unsigned char byMode;
+ unsigned char byType;
MEASEURE_REQ sReq;
} WLAN_IE_MEASURE_REQ, *PWLAN_IE_MEASURE_REQ;
typedef struct _WLAN_IE_MEASURE_REP {
- BYTE byElementID;
- BYTE len;
- BYTE byToken;
- BYTE byMode;
- BYTE byType;
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char byToken;
+ unsigned char byMode;
+ unsigned char byType;
MEASEURE_REP sRep;
} WLAN_IE_MEASURE_REP, *PWLAN_IE_MEASURE_REP;
typedef struct _WLAN_IE_CH_SW {
- BYTE byElementID;
- BYTE len;
- BYTE byMode;
- BYTE byChannel;
- BYTE byCount;
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char byMode;
+ unsigned char byChannel;
+ unsigned char byCount;
} WLAN_IE_CH_SW, *PWLAN_IE_CH_SW;
typedef struct _WLAN_IE_QUIET {
- BYTE byElementID;
- BYTE len;
- BYTE byQuietCount;
- BYTE byQuietPeriod;
- BYTE abyQuietDuration[2];
- BYTE abyQuietOffset[2];
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char byQuietCount;
+ unsigned char byQuietPeriod;
+ unsigned char abyQuietDuration[2];
+ unsigned char abyQuietOffset[2];
} WLAN_IE_QUIET, *PWLAN_IE_QUIET;
typedef struct _WLAN_IE_COUNTRY {
- BYTE byElementID;
- BYTE len;
- BYTE abyCountryString[3];
- BYTE abyCountryInfo[3];
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char abyCountryString[3];
+ unsigned char abyCountryInfo[3];
} WLAN_IE_COUNTRY, *PWLAN_IE_COUNTRY;
typedef struct _WLAN_IE_PW_CONST {
- BYTE byElementID;
- BYTE len;
- BYTE byPower;
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char byPower;
} WLAN_IE_PW_CONST, *PWLAN_IE_PW_CONST;
typedef struct _WLAN_IE_PW_CAP {
- BYTE byElementID;
- BYTE len;
- BYTE byMinPower;
- BYTE byMaxPower;
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char byMinPower;
+ unsigned char byMaxPower;
} WLAN_IE_PW_CAP, *PWLAN_IE_PW_CAP;
typedef struct _WLAN_IE_SUPP_CH {
- BYTE byElementID;
- BYTE len;
- BYTE abyChannelTuple[2];
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char abyChannelTuple[2];
} WLAN_IE_SUPP_CH, *PWLAN_IE_SUPP_CH;
typedef struct _WLAN_IE_TPC_REQ {
- BYTE byElementID;
- BYTE len;
+ unsigned char byElementID;
+ unsigned char len;
} WLAN_IE_TPC_REQ, *PWLAN_IE_TPC_REQ;
typedef struct _WLAN_IE_TPC_REP {
- BYTE byElementID;
- BYTE len;
- BYTE byTxPower;
- BYTE byLinkMargin;
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char byTxPower;
+ unsigned char byLinkMargin;
} WLAN_IE_TPC_REP, *PWLAN_IE_TPC_REP;
typedef struct _WLAN_IE_IBSS_DFS {
- BYTE byElementID;
- BYTE len;
- BYTE abyDFSOwner[6];
- BYTE byDFSRecovery;
- BYTE abyChannelMap[2];
+ unsigned char byElementID;
+ unsigned char len;
+ unsigned char abyDFSOwner[6];
+ unsigned char byDFSRecovery;
+ unsigned char abyChannelMap[2];
} WLAN_IE_IBSS_DFS, *PWLAN_IE_IBSS_DFS;
#pragma pack()
@@ -495,9 +495,9 @@ typedef struct _WLAN_IE_IBSS_DFS {
// prototype structure, all mgmt frame types will start with these members
typedef struct tagWLAN_FR_MGMT {
- UINT uType;
- UINT len;
- PBYTE pBuf;
+ unsigned int uType;
+ unsigned int len;
+ unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
} WLAN_FR_MGMT, *PWLAN_FR_MGMT;
@@ -505,14 +505,14 @@ typedef struct tagWLAN_FR_MGMT {
// Beacon frame
typedef struct tagWLAN_FR_BEACON {
- UINT uType;
- UINT len;
- PBYTE pBuf;
+ unsigned int uType;
+ unsigned int len;
+ unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
// fixed fields
PQWORD pqwTimestamp;
- PWORD pwBeaconInterval;
- PWORD pwCapInfo;
+ unsigned short *pwBeaconInterval;
+ unsigned short *pwCapInfo;
/*-- info elements ----------*/
PWLAN_IE_SSID pSSID;
PWLAN_IE_SUPP_RATES pSuppRates;
@@ -537,9 +537,9 @@ typedef struct tagWLAN_FR_BEACON {
// IBSS ATIM frame
typedef struct tagWLAN_FR_IBSSATIM {
- UINT uType;
- UINT len;
- PBYTE pBuf;
+ unsigned int uType;
+ unsigned int len;
+ unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
// fixed fields
@@ -551,12 +551,12 @@ typedef struct tagWLAN_FR_IBSSATIM {
// Disassociation
typedef struct tagWLAN_FR_DISASSOC {
- UINT uType;
- UINT len;
- PBYTE pBuf;
+ unsigned int uType;
+ unsigned int len;
+ unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
- PWORD pwReason;
+ unsigned short *pwReason;
/*-- info elements ----------*/
} WLAN_FR_DISASSOC, *PWLAN_FR_DISASSOC;
@@ -564,13 +564,13 @@ typedef struct tagWLAN_FR_DISASSOC {
// Association Request
typedef struct tagWLAN_FR_ASSOCREQ {
- UINT uType;
- UINT len;
- PBYTE pBuf;
+ unsigned int uType;
+ unsigned int len;
+ unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
- PWORD pwCapInfo;
- PWORD pwListenInterval;
+ unsigned short *pwCapInfo;
+ unsigned short *pwListenInterval;
/*-- info elements ----------*/
PWLAN_IE_SSID pSSID;
PWLAN_IE_SUPP_RATES pSuppRates;
@@ -585,14 +585,14 @@ typedef struct tagWLAN_FR_ASSOCREQ {
// Association Response
typedef struct tagWLAN_FR_ASSOCRESP {
- UINT uType;
- UINT len;
- PBYTE pBuf;
+ unsigned int uType;
+ unsigned int len;
+ unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
- PWORD pwCapInfo;
- PWORD pwStatus;
- PWORD pwAid;
+ unsigned short *pwCapInfo;
+ unsigned short *pwStatus;
+ unsigned short *pwAid;
/*-- info elements ----------*/
PWLAN_IE_SUPP_RATES pSuppRates;
PWLAN_IE_SUPP_RATES pExtSuppRates;
@@ -602,14 +602,14 @@ typedef struct tagWLAN_FR_ASSOCRESP {
// Reassociation Request
typedef struct tagWLAN_FR_REASSOCREQ {
- UINT uType;
- UINT len;
- PBYTE pBuf;
+ unsigned int uType;
+ unsigned int len;
+ unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
- PWORD pwCapInfo;
- PWORD pwListenInterval;
+ unsigned short *pwCapInfo;
+ unsigned short *pwListenInterval;
PIEEE_ADDR pAddrCurrAP;
/*-- info elements ----------*/
@@ -624,14 +624,14 @@ typedef struct tagWLAN_FR_REASSOCREQ {
// Reassociation Response
typedef struct tagWLAN_FR_REASSOCRESP {
- UINT uType;
- UINT len;
- PBYTE pBuf;
+ unsigned int uType;
+ unsigned int len;
+ unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
- PWORD pwCapInfo;
- PWORD pwStatus;
- PWORD pwAid;
+ unsigned short *pwCapInfo;
+ unsigned short *pwStatus;
+ unsigned short *pwAid;
/*-- info elements ----------*/
PWLAN_IE_SUPP_RATES pSuppRates;
PWLAN_IE_SUPP_RATES pExtSuppRates;
@@ -641,9 +641,9 @@ typedef struct tagWLAN_FR_REASSOCRESP {
// Probe Request
typedef struct tagWLAN_FR_PROBEREQ {
- UINT uType;
- UINT len;
- PBYTE pBuf;
+ unsigned int uType;
+ unsigned int len;
+ unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
/*-- info elements ----------*/
@@ -656,14 +656,14 @@ typedef struct tagWLAN_FR_PROBEREQ {
// Probe Response
typedef struct tagWLAN_FR_PROBERESP {
- UINT uType;
- UINT len;
- PBYTE pBuf;
+ unsigned int uType;
+ unsigned int len;
+ unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
PQWORD pqwTimestamp;
- PWORD pwBeaconInterval;
- PWORD pwCapInfo;
+ unsigned short *pwBeaconInterval;
+ unsigned short *pwCapInfo;
/*-- info elements ----------*/
PWLAN_IE_SSID pSSID;
PWLAN_IE_SUPP_RATES pSuppRates;
@@ -685,14 +685,14 @@ typedef struct tagWLAN_FR_PROBERESP {
// Authentication
typedef struct tagWLAN_FR_AUTHEN {
- UINT uType;
- UINT len;
- PBYTE pBuf;
+ unsigned int uType;
+ unsigned int len;
+ unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
- PWORD pwAuthAlgorithm;
- PWORD pwAuthSequence;
- PWORD pwStatus;
+ unsigned short *pwAuthAlgorithm;
+ unsigned short *pwAuthSequence;
+ unsigned short *pwStatus;
/*-- info elements ----------*/
PWLAN_IE_CHALLENGE pChallenge;
@@ -701,12 +701,12 @@ typedef struct tagWLAN_FR_AUTHEN {
// Deauthenication
typedef struct tagWLAN_FR_DEAUTHEN {
- UINT uType;
- UINT len;
- PBYTE pBuf;
+ unsigned int uType;
+ unsigned int len;
+ unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
- PWORD pwReason;
+ unsigned short *pwReason;
/*-- info elements ----------*/
diff --git a/drivers/staging/vt6655/IEEE11h.c b/drivers/staging/vt6655/IEEE11h.c
index 22f12f5ef90..e07ebd578d4 100644
--- a/drivers/staging/vt6655/IEEE11h.c
+++ b/drivers/staging/vt6655/IEEE11h.c
@@ -38,6 +38,7 @@
#include "device.h"
#include "wmgr.h"
#include "rxtx.h"
+#include "channel.h"
/*--------------------- Static Definitions -------------------------*/
static int msglevel =MSG_LEVEL_INFO;
@@ -46,40 +47,40 @@ static int msglevel =MSG_LEVEL_INFO;
typedef struct _WLAN_FRAME_ACTION {
WLAN_80211HDR_A3 Header;
- BYTE byCategory;
- BYTE byAction;
- BYTE abyVars[1];
+ unsigned char byCategory;
+ unsigned char byAction;
+ unsigned char abyVars[1];
} WLAN_FRAME_ACTION, *PWLAN_FRAME_ACTION;
typedef struct _WLAN_FRAME_MSRREQ {
WLAN_80211HDR_A3 Header;
- BYTE byCategory;
- BYTE byAction;
- BYTE byDialogToken;
+ unsigned char byCategory;
+ unsigned char byAction;
+ unsigned char byDialogToken;
WLAN_IE_MEASURE_REQ sMSRReqEIDs[1];
} WLAN_FRAME_MSRREQ, *PWLAN_FRAME_MSRREQ;
typedef struct _WLAN_FRAME_MSRREP {
WLAN_80211HDR_A3 Header;
- BYTE byCategory;
- BYTE byAction;
- BYTE byDialogToken;
+ unsigned char byCategory;
+ unsigned char byAction;
+ unsigned char byDialogToken;
WLAN_IE_MEASURE_REP sMSRRepEIDs[1];
} WLAN_FRAME_MSRREP, *PWLAN_FRAME_MSRREP;
typedef struct _WLAN_FRAME_TPCREQ {
WLAN_80211HDR_A3 Header;
- BYTE byCategory;
- BYTE byAction;
- BYTE byDialogToken;
+ unsigned char byCategory;
+ unsigned char byAction;
+ unsigned char byDialogToken;
WLAN_IE_TPC_REQ sTPCReqEIDs;
} WLAN_FRAME_TPCREQ, *PWLAN_FRAME_TPCREQ;
typedef struct _WLAN_FRAME_TPCREP {
WLAN_80211HDR_A3 Header;
- BYTE byCategory;
- BYTE byAction;
- BYTE byDialogToken;
+ unsigned char byCategory;
+ unsigned char byAction;
+ unsigned char byDialogToken;
WLAN_IE_TPC_REP sTPCRepEIDs;
} WLAN_FRAME_TPCREP, *PWLAN_FRAME_TPCREP;
@@ -97,10 +98,11 @@ typedef struct _WLAN_FRAME_TPCREP {
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
-static BOOL s_bRxMSRReq(PSMgmtObject pMgmt, PWLAN_FRAME_MSRREQ pMSRReq, UINT uLength)
+static bool s_bRxMSRReq(PSMgmtObject pMgmt, PWLAN_FRAME_MSRREQ pMSRReq,
+ unsigned int uLength)
{
size_t uNumOfEIDs = 0;
- BOOL bResult = TRUE;
+ bool bResult = true;
if (uLength <= WLAN_A3FR_MAXLEN) {
memcpy(pMgmt->abyCurrentMSRReq, pMSRReq, uLength);
@@ -116,7 +118,7 @@ static BOOL s_bRxMSRReq(PSMgmtObject pMgmt, PWLAN_FRAME_MSRREQ pMSRReq, UINT uLe
}
-static BOOL s_bRxTPCReq(PSMgmtObject pMgmt, PWLAN_FRAME_TPCREQ pTPCReq, BYTE byRate, BYTE byRSSI)
+static bool s_bRxTPCReq(PSMgmtObject pMgmt, PWLAN_FRAME_TPCREQ pTPCReq, unsigned char byRate, unsigned char byRSSI)
{
PWLAN_FRAME_TPCREP pFrame;
PSTxMgmtPacket pTxPacket = NULL;
@@ -124,9 +126,9 @@ static BOOL s_bRxTPCReq(PSMgmtObject pMgmt, PWLAN_FRAME_TPCREQ pTPCReq, BYTE byR
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- pFrame = (PWLAN_FRAME_TPCREP)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pFrame = (PWLAN_FRAME_TPCREP)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
pFrame->Header.wFrameCtl = ( WLAN_SET_FC_FTYPE(WLAN_FTYPE_MGMT) |
WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ACTION)
@@ -174,8 +176,8 @@ static BOOL s_bRxTPCReq(PSMgmtObject pMgmt, PWLAN_FRAME_TPCREQ pTPCReq, BYTE byR
pTxPacket->cbMPDULen = sizeof(WLAN_FRAME_TPCREP);
pTxPacket->cbPayloadLen = sizeof(WLAN_FRAME_TPCREP) - WLAN_HDR_ADDR3_LEN;
if (csMgmt_xmit(pMgmt->pAdapter, pTxPacket) != CMD_STATUS_PENDING)
- return (FALSE);
- return (TRUE);
+ return (false);
+ return (true);
// return (CARDbSendPacket(pMgmt->pAdapter, pFrame, PKT_TYPE_802_11_MNG, sizeof(WLAN_FRAME_TPCREP)));
}
@@ -201,7 +203,7 @@ static BOOL s_bRxTPCReq(PSMgmtObject pMgmt, PWLAN_FRAME_TPCREQ pTPCReq, BYTE byR
* Return Value: None.
*
-*/
-BOOL
+bool
IEEE11hbMgrRxAction (
void *pMgmtHandle,
void *pRxPacket
@@ -209,14 +211,14 @@ IEEE11hbMgrRxAction (
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
PWLAN_FRAME_ACTION pAction = NULL;
- UINT uLength = 0;
+ unsigned int uLength = 0;
PWLAN_IE_CH_SW pChannelSwitch = NULL;
// decode the frame
uLength = ((PSRxMgmtPacket)pRxPacket)->cbMPDULen;
if (uLength > WLAN_A3FR_MAXLEN) {
- return (FALSE);
+ return (false);
}
@@ -233,7 +235,7 @@ IEEE11hbMgrRxAction (
return (s_bRxTPCReq(pMgmt,
(PWLAN_FRAME_TPCREQ) pAction,
((PSRxMgmtPacket)pRxPacket)->byRxRate,
- (BYTE) ((PSRxMgmtPacket)pRxPacket)->uRSSI));
+ (unsigned char) ((PSRxMgmtPacket)pRxPacket)->uRSSI));
break;
case ACTION_TPCREP:
break;
@@ -244,7 +246,7 @@ IEEE11hbMgrRxAction (
// valid element id
CARDbChannelSwitch( pMgmt->pAdapter,
pChannelSwitch->byMode,
- CARDbyGetChannelMapping(pMgmt->pAdapter, pChannelSwitch->byChannel, pMgmt->eCurrentPHYMode),
+ get_channel_mapping(pMgmt->pAdapter, pChannelSwitch->byChannel, pMgmt->eCurrentPHYMode),
pChannelSwitch->byCount
);
}
@@ -258,13 +260,13 @@ IEEE11hbMgrRxAction (
pAction->byCategory |= 0x80;
//return (CARDbSendPacket(pMgmt->pAdapter, pAction, PKT_TYPE_802_11_MNG, uLength));
- return (TRUE);
+ return (true);
}
- return (TRUE);
+ return (true);
}
-BOOL IEEE11hbMSRRepTx (
+bool IEEE11hbMSRRepTx (
void *pMgmtHandle
)
{
@@ -275,7 +277,7 @@ BOOL IEEE11hbMSRRepTx (
pTxPacket = (PSTxMgmtPacket)pMgmt->abyCurrentMSRRep;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
pMSRRep->Header.wFrameCtl = ( WLAN_SET_FC_FTYPE(WLAN_FTYPE_MGMT) |
@@ -295,8 +297,8 @@ BOOL IEEE11hbMSRRepTx (
pTxPacket->cbMPDULen = uLength;
pTxPacket->cbPayloadLen = uLength - WLAN_HDR_ADDR3_LEN;
if (csMgmt_xmit(pMgmt->pAdapter, pTxPacket) != CMD_STATUS_PENDING)
- return (FALSE);
- return (TRUE);
+ return (false);
+ return (true);
// return (CARDbSendPacket(pMgmt->pAdapter, pMSRRep, PKT_TYPE_802_11_MNG, uLength));
}
diff --git a/drivers/staging/vt6655/IEEE11h.h b/drivers/staging/vt6655/IEEE11h.h
index ae32498a511..542340b96e3 100644
--- a/drivers/staging/vt6655/IEEE11h.h
+++ b/drivers/staging/vt6655/IEEE11h.h
@@ -45,7 +45,7 @@
/*--------------------- Export Functions --------------------------*/
-BOOL IEEE11hbMSRRepTx (
+bool IEEE11hbMSRRepTx (
void *pMgmtHandle
);
diff --git a/drivers/staging/vt6655/Makefile b/drivers/staging/vt6655/Makefile
index 931deb109ee..824c9718787 100644
--- a/drivers/staging/vt6655/Makefile
+++ b/drivers/staging/vt6655/Makefile
@@ -4,6 +4,7 @@ EXTRA_CFLAGS += -DHOSTAP
vt6655_stage-y += device_main.o \
card.o \
+ channel.o \
mac.o \
baseband.o \
wctl.o \
diff --git a/drivers/staging/vt6655/TODO b/drivers/staging/vt6655/TODO
index cb04aaafc46..63607ef9c97 100644
--- a/drivers/staging/vt6655/TODO
+++ b/drivers/staging/vt6655/TODO
@@ -3,7 +3,6 @@ TODO:
- prepare for merge with vt6656 driver:
- rename DEVICE_PRT() to DBG_PRT() -- done
- share 80211*.h includes
- - move code for channel mapping from card.c to channel.c
- split rf.c
- remove dead code
- abstract VT3253 chipset specific code
@@ -11,6 +10,8 @@ TODO:
- kill ttype.h
- switch to use LIB80211
- switch to use MAC80211
+- verify unsigned long usage for x86-64 arch
+- reduce .data footprint
- use kernel coding style
- checkpatch.pl fixes
- sparse fixes
diff --git a/drivers/staging/vt6655/aes_ccmp.c b/drivers/staging/vt6655/aes_ccmp.c
index fef1b91c292..e30168f2da2 100644
--- a/drivers/staging/vt6655/aes_ccmp.c
+++ b/drivers/staging/vt6655/aes_ccmp.c
@@ -46,7 +46,7 @@
* SBOX Table
*/
-BYTE sbox_table[256] =
+unsigned char sbox_table[256] =
{
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
@@ -66,7 +66,7 @@ BYTE sbox_table[256] =
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
-BYTE dot2_table[256] = {
+unsigned char dot2_table[256] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
@@ -85,7 +85,7 @@ BYTE dot2_table[256] = {
0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5
};
-BYTE dot3_table[256] = {
+unsigned char dot3_table[256] = {
0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
@@ -110,11 +110,11 @@ BYTE dot3_table[256] = {
/*--------------------- Export Functions --------------------------*/
-void xor_128(BYTE *a, BYTE *b, BYTE *out)
+void xor_128(unsigned char *a, unsigned char *b, unsigned char *out)
{
-PDWORD dwPtrA = (PDWORD) a;
-PDWORD dwPtrB = (PDWORD) b;
-PDWORD dwPtrOut =(PDWORD) out;
+unsigned long *dwPtrA = (unsigned long *) a;
+unsigned long *dwPtrB = (unsigned long *) b;
+unsigned long *dwPtrOut =(unsigned long *) out;
(*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
(*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
@@ -123,19 +123,19 @@ PDWORD dwPtrOut =(PDWORD) out;
}
-void xor_32(BYTE *a, BYTE *b, BYTE *out)
+void xor_32(unsigned char *a, unsigned char *b, unsigned char *out)
{
-PDWORD dwPtrA = (PDWORD) a;
-PDWORD dwPtrB = (PDWORD) b;
-PDWORD dwPtrOut =(PDWORD) out;
+unsigned long *dwPtrA = (unsigned long *) a;
+unsigned long *dwPtrB = (unsigned long *) b;
+unsigned long *dwPtrOut =(unsigned long *) out;
(*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
}
-void AddRoundKey(BYTE *key, int round)
+void AddRoundKey(unsigned char *key, int round)
{
-BYTE sbox_key[4];
-BYTE rcon_table[10] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36};
+unsigned char sbox_key[4];
+unsigned char rcon_table[10] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36};
sbox_key[0] = sbox_table[key[13]];
sbox_key[1] = sbox_table[key[14]];
@@ -150,7 +150,7 @@ BYTE rcon_table[10] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x
xor_32(&key[12], &key[8], &key[12]);
}
-void SubBytes(BYTE *in, BYTE *out)
+void SubBytes(unsigned char *in, unsigned char *out)
{
int i;
@@ -160,7 +160,7 @@ int i;
}
}
-void ShiftRows(BYTE *in, BYTE *out)
+void ShiftRows(unsigned char *in, unsigned char *out)
{
out[0] = in[0];
out[1] = in[5];
@@ -180,7 +180,7 @@ void ShiftRows(BYTE *in, BYTE *out)
out[15] = in[11];
}
-void MixColumns(BYTE *in, BYTE *out)
+void MixColumns(unsigned char *in, unsigned char *out)
{
out[0] = dot2_table[in[0]] ^ dot3_table[in[1]] ^ in[2] ^ in[3];
@@ -190,13 +190,13 @@ void MixColumns(BYTE *in, BYTE *out)
}
-void AESv128(BYTE *key, BYTE *data, BYTE *ciphertext)
+void AESv128(unsigned char *key, unsigned char *data, unsigned char *ciphertext)
{
int i;
int round;
-BYTE TmpdataA[16];
-BYTE TmpdataB[16];
-BYTE abyRoundKey[16];
+unsigned char TmpdataA[16];
+unsigned char TmpdataB[16];
+unsigned char abyRoundKey[16];
for(i=0; i<16; i++)
abyRoundKey[i] = key[i];
@@ -243,33 +243,33 @@ BYTE abyRoundKey[16];
* Return Value: MIC compare result
*
*/
-BOOL AESbGenCCMP(PBYTE pbyRxKey, PBYTE pbyFrame, WORD wFrameSize)
+bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned short wFrameSize)
{
-BYTE abyNonce[13];
-BYTE MIC_IV[16];
-BYTE MIC_HDR1[16];
-BYTE MIC_HDR2[16];
-BYTE abyMIC[16];
-BYTE abyCTRPLD[16];
-BYTE abyTmp[16];
-BYTE abyPlainText[16];
-BYTE abyLastCipher[16];
+unsigned char abyNonce[13];
+unsigned char MIC_IV[16];
+unsigned char MIC_HDR1[16];
+unsigned char MIC_HDR2[16];
+unsigned char abyMIC[16];
+unsigned char abyCTRPLD[16];
+unsigned char abyTmp[16];
+unsigned char abyPlainText[16];
+unsigned char abyLastCipher[16];
PS802_11Header pMACHeader = (PS802_11Header) pbyFrame;
-PBYTE pbyIV;
-PBYTE pbyPayload;
-WORD wHLen = 22;
-WORD wPayloadSize = wFrameSize - 8 - 8 - 4 - WLAN_HDR_ADDR3_LEN;//8 is IV, 8 is MIC, 4 is CRC
-BOOL bA4 = FALSE;
-BYTE byTmp;
-WORD wCnt;
+unsigned char *pbyIV;
+unsigned char *pbyPayload;
+unsigned short wHLen = 22;
+unsigned short wPayloadSize = wFrameSize - 8 - 8 - 4 - WLAN_HDR_ADDR3_LEN;//8 is IV, 8 is MIC, 4 is CRC
+bool bA4 = false;
+unsigned char byTmp;
+unsigned short wCnt;
int ii,jj,kk;
pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN;
- if ( WLAN_GET_FC_TODS(*(PWORD)pbyFrame) &&
- WLAN_GET_FC_FROMDS(*(PWORD)pbyFrame) ) {
- bA4 = TRUE;
+ if ( WLAN_GET_FC_TODS(*(unsigned short *)pbyFrame) &&
+ WLAN_GET_FC_FROMDS(*(unsigned short *)pbyFrame) ) {
+ bA4 = true;
pbyIV += 6; // 6 is 802.11 address4
wHLen += 6;
wPayloadSize -= 6;
@@ -288,15 +288,15 @@ int ii,jj,kk;
//MIC_IV
MIC_IV[0] = 0x59;
memcpy(&(MIC_IV[1]), &(abyNonce[0]), 13);
- MIC_IV[14] = (BYTE)(wPayloadSize >> 8);
- MIC_IV[15] = (BYTE)(wPayloadSize & 0xff);
+ MIC_IV[14] = (unsigned char)(wPayloadSize >> 8);
+ MIC_IV[15] = (unsigned char)(wPayloadSize & 0xff);
//MIC_HDR1
- MIC_HDR1[0] = (BYTE)(wHLen >> 8);
- MIC_HDR1[1] = (BYTE)(wHLen & 0xff);
- byTmp = (BYTE)(pMACHeader->wFrameCtl & 0xff);
+ MIC_HDR1[0] = (unsigned char)(wHLen >> 8);
+ MIC_HDR1[1] = (unsigned char)(wHLen & 0xff);
+ byTmp = (unsigned char)(pMACHeader->wFrameCtl & 0xff);
MIC_HDR1[2] = byTmp & 0x8f;
- byTmp = (BYTE)(pMACHeader->wFrameCtl >> 8);
+ byTmp = (unsigned char)(pMACHeader->wFrameCtl >> 8);
byTmp &= 0x87;
MIC_HDR1[3] = byTmp | 0x40;
memcpy(&(MIC_HDR1[4]), pMACHeader->abyAddr1, ETH_ALEN);
@@ -304,7 +304,7 @@ int ii,jj,kk;
//MIC_HDR2
memcpy(&(MIC_HDR2[0]), pMACHeader->abyAddr3, ETH_ALEN);
- byTmp = (BYTE)(pMACHeader->wSeqCtl & 0xff);
+ byTmp = (unsigned char)(pMACHeader->wSeqCtl & 0xff);
MIC_HDR2[6] = byTmp & 0x0f;
MIC_HDR2[7] = 0;
if ( bA4 ) {
@@ -337,8 +337,8 @@ int ii,jj,kk;
for(jj=wPayloadSize; jj>16; jj=jj-16) {
- abyCTRPLD[14] = (BYTE) (wCnt >> 8);
- abyCTRPLD[15] = (BYTE) (wCnt & 0xff);
+ abyCTRPLD[14] = (unsigned char) (wCnt >> 8);
+ abyCTRPLD[15] = (unsigned char) (wCnt & 0xff);
AESv128(pbyRxKey,abyCTRPLD,abyTmp);
@@ -361,8 +361,8 @@ int ii,jj,kk;
abyLastCipher[ii] = 0x00;
}
- abyCTRPLD[14] = (BYTE) (wCnt >> 8);
- abyCTRPLD[15] = (BYTE) (wCnt & 0xff);
+ abyCTRPLD[14] = (unsigned char) (wCnt >> 8);
+ abyCTRPLD[15] = (unsigned char) (wCnt & 0xff);
AESv128(pbyRxKey,abyCTRPLD,abyTmp);
for ( kk=0; kk<16; kk++ ) {
@@ -384,8 +384,8 @@ int ii,jj,kk;
//--------------------------------------------
wCnt = 0;
- abyCTRPLD[14] = (BYTE) (wCnt >> 8);
- abyCTRPLD[15] = (BYTE) (wCnt & 0xff);
+ abyCTRPLD[14] = (unsigned char) (wCnt >> 8);
+ abyCTRPLD[15] = (unsigned char) (wCnt & 0xff);
AESv128(pbyRxKey,abyCTRPLD,abyTmp);
for ( kk=0; kk<8; kk++ ) {
abyTmp[kk] = abyTmp[kk] ^ pbyPayload[kk];
@@ -394,9 +394,9 @@ int ii,jj,kk;
//--------------------------------------------
if ( !memcmp(abyMIC,abyTmp,8) ) {
- return TRUE;
+ return true;
} else {
- return FALSE;
+ return false;
}
}
diff --git a/drivers/staging/vt6655/aes_ccmp.h b/drivers/staging/vt6655/aes_ccmp.h
index f2ba1d5aa1e..c8b28b0e9bd 100644
--- a/drivers/staging/vt6655/aes_ccmp.h
+++ b/drivers/staging/vt6655/aes_ccmp.h
@@ -41,6 +41,6 @@
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
-BOOL AESbGenCCMP(PBYTE pbyRxKey, PBYTE pbyFrame, WORD wFrameSize);
+bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned short wFrameSize);
#endif //__AES_H__
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 5414c6c6c05..1e1c6e34f78 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -79,7 +79,7 @@ static int msglevel =MSG_LEVEL_INFO;
#define CB_VT3253_INIT_FOR_RFMD 446
-BYTE byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
+unsigned char byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
{0x00, 0x30},
{0x01, 0x00},
{0x02, 0x00},
@@ -529,7 +529,7 @@ BYTE byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
};
#define CB_VT3253B0_INIT_FOR_RFMD 256
-BYTE byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
+unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
@@ -790,7 +790,7 @@ BYTE byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
#define CB_VT3253B0_AGC_FOR_RFMD2959 195
// For RFMD2959
-BYTE byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
+unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
{0xF0, 0x00},
{0xF1, 0x3E},
{0xF0, 0x80},
@@ -990,7 +990,7 @@ BYTE byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
#define CB_VT3253B0_INIT_FOR_AIROHA2230 256
// For AIROHA
-BYTE byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
+unsigned char byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
@@ -1254,7 +1254,7 @@ BYTE byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
#define CB_VT3253B0_INIT_FOR_UW2451 256
//For UW2451
-BYTE byVT3253B0_UW2451[CB_VT3253B0_INIT_FOR_UW2451][2] = {
+unsigned char byVT3253B0_UW2451[CB_VT3253B0_INIT_FOR_UW2451][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
@@ -1516,7 +1516,7 @@ BYTE byVT3253B0_UW2451[CB_VT3253B0_INIT_FOR_UW2451][2] = {
#define CB_VT3253B0_AGC 193
// For AIROHA
-BYTE byVT3253B0_AGC[CB_VT3253B0_AGC][2] = {
+unsigned char byVT3253B0_AGC[CB_VT3253B0_AGC][2] = {
{0xF0, 0x00},
{0xF1, 0x00},
{0xF0, 0x80},
@@ -1712,14 +1712,14 @@ BYTE byVT3253B0_AGC[CB_VT3253B0_AGC][2] = {
{0xF0, 0x00},
};
-const WORD awcFrameTime[MAX_RATE] =
+const unsigned short awcFrameTime[MAX_RATE] =
{10, 20, 55, 110, 24, 36, 48, 72, 96, 144, 192, 216};
/*--------------------- Static Functions --------------------------*/
static
-ULONG
+unsigned long
s_ulGetRatio(PSDevice pDevice);
static
@@ -1740,13 +1740,13 @@ s_vChangeAntenna (
#endif
if ( pDevice->dwRxAntennaSel == 0) {
pDevice->dwRxAntennaSel=1;
- if (pDevice->bTxRxAntInv == TRUE)
+ if (pDevice->bTxRxAntInv == true)
BBvSetRxAntennaMode(pDevice->PortOffset, ANT_A);
else
BBvSetRxAntennaMode(pDevice->PortOffset, ANT_B);
} else {
pDevice->dwRxAntennaSel=0;
- if (pDevice->bTxRxAntInv == TRUE)
+ if (pDevice->bTxRxAntInv == true)
BBvSetRxAntennaMode(pDevice->PortOffset, ANT_B);
else
BBvSetRxAntennaMode(pDevice->PortOffset, ANT_A);
@@ -1776,19 +1776,19 @@ s_vChangeAntenna (
* Return Value: FrameTime
*
*/
-UINT
+unsigned int
BBuGetFrameTime (
- BYTE byPreambleType,
- BYTE byPktType,
- UINT cbFrameLength,
- WORD wRate
+ unsigned char byPreambleType,
+ unsigned char byPktType,
+ unsigned int cbFrameLength,
+ unsigned short wRate
)
{
- UINT uFrameTime;
- UINT uPreamble;
- UINT uTmp;
- UINT uRateIdx = (UINT)wRate;
- UINT uRate = 0;
+ unsigned int uFrameTime;
+ unsigned int uPreamble;
+ unsigned int uTmp;
+ unsigned int uRateIdx = (unsigned int) wRate;
+ unsigned int uRate = 0;
if (uRateIdx > RATE_54M) {
@@ -1796,7 +1796,7 @@ BBuGetFrameTime (
return 0;
}
- uRate = (UINT)awcFrameTime[uRateIdx];
+ uRate = (unsigned int) awcFrameTime[uRateIdx];
if (uRateIdx <= 3) { //CCK mode
@@ -1846,23 +1846,23 @@ BBuGetFrameTime (
void
BBvCaculateParameter (
PSDevice pDevice,
- UINT cbFrameLength,
- WORD wRate,
- BYTE byPacketType,
- PWORD pwPhyLen,
- PBYTE pbyPhySrv,
- PBYTE pbyPhySgn
+ unsigned int cbFrameLength,
+ unsigned short wRate,
+ unsigned char byPacketType,
+ unsigned short *pwPhyLen,
+ unsigned char *pbyPhySrv,
+ unsigned char *pbyPhySgn
)
{
- UINT cbBitCount;
- UINT cbUsCount = 0;
- UINT cbTmp;
- BOOL bExtBit;
- BYTE byPreambleType = pDevice->byPreambleType;
- BOOL bCCK = pDevice->bCCK;
+ unsigned int cbBitCount;
+ unsigned int cbUsCount = 0;
+ unsigned int cbTmp;
+ bool bExtBit;
+ unsigned char byPreambleType = pDevice->byPreambleType;
+ bool bCCK = pDevice->bCCK;
cbBitCount = cbFrameLength * 8;
- bExtBit = FALSE;
+ bExtBit = false;
switch (wRate) {
case RATE_1M :
@@ -1879,7 +1879,7 @@ BBvCaculateParameter (
break;
case RATE_5M :
- if (bCCK == FALSE)
+ if (bCCK == false)
cbBitCount ++;
cbUsCount = (cbBitCount * 10) / 55;
cbTmp = (cbUsCount * 55) / 10;
@@ -1893,14 +1893,14 @@ BBvCaculateParameter (
case RATE_11M :
- if (bCCK == FALSE)
+ if (bCCK == false)
cbBitCount ++;
cbUsCount = cbBitCount / 11;
cbTmp = cbUsCount * 11;
if (cbTmp != cbBitCount) {
cbUsCount ++;
if ((cbBitCount - cbTmp) <= 3)
- bExtBit = TRUE;
+ bExtBit = true;
}
if (byPreambleType == 1)
*pbyPhySgn = 0x0b;
@@ -1994,11 +1994,11 @@ BBvCaculateParameter (
*pbyPhySrv = 0x00;
if (bExtBit)
*pbyPhySrv = *pbyPhySrv | 0x80;
- *pwPhyLen = (WORD)cbUsCount;
+ *pwPhyLen = (unsigned short)cbUsCount;
}
else {
*pbyPhySrv = 0x00;
- *pwPhyLen = (WORD)cbFrameLength;
+ *pwPhyLen = (unsigned short)cbFrameLength;
}
}
@@ -2012,13 +2012,13 @@ BBvCaculateParameter (
* Out:
* pbyData - data read
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL BBbReadEmbeded (DWORD_PTR dwIoBase, BYTE byBBAddr, PBYTE pbyData)
+bool BBbReadEmbeded (unsigned long dwIoBase, unsigned char byBBAddr, unsigned char *pbyData)
{
- WORD ww;
- BYTE byValue;
+ unsigned short ww;
+ unsigned char byValue;
// BB reg offset
VNSvOutPortB(dwIoBase + MAC_REG_BBREGADR, byBBAddr);
@@ -2038,9 +2038,9 @@ BOOL BBbReadEmbeded (DWORD_PTR dwIoBase, BYTE byBBAddr, PBYTE pbyData)
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x30);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x30)\n");
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
@@ -2055,13 +2055,13 @@ BOOL BBbReadEmbeded (DWORD_PTR dwIoBase, BYTE byBBAddr, PBYTE pbyData)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL BBbWriteEmbeded (DWORD_PTR dwIoBase, BYTE byBBAddr, BYTE byData)
+bool BBbWriteEmbeded (unsigned long dwIoBase, unsigned char byBBAddr, unsigned char byData)
{
- WORD ww;
- BYTE byValue;
+ unsigned short ww;
+ unsigned char byValue;
// BB reg offset
VNSvOutPortB(dwIoBase + MAC_REG_BBREGADR, byBBAddr);
@@ -2080,9 +2080,9 @@ BOOL BBbWriteEmbeded (DWORD_PTR dwIoBase, BYTE byBBAddr, BYTE byData)
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x31);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x31)\n");
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
@@ -2097,12 +2097,12 @@ BOOL BBbWriteEmbeded (DWORD_PTR dwIoBase, BYTE byBBAddr, BYTE byData)
* Out:
* none
*
- * Return Value: TRUE if all TestBits are set; FALSE otherwise.
+ * Return Value: true if all TestBits are set; false otherwise.
*
*/
-BOOL BBbIsRegBitsOn (DWORD_PTR dwIoBase, BYTE byBBAddr, BYTE byTestBits)
+bool BBbIsRegBitsOn (unsigned long dwIoBase, unsigned char byBBAddr, unsigned char byTestBits)
{
- BYTE byOrgData;
+ unsigned char byOrgData;
BBbReadEmbeded(dwIoBase, byBBAddr, &byOrgData);
return (byOrgData & byTestBits) == byTestBits;
@@ -2120,12 +2120,12 @@ BOOL BBbIsRegBitsOn (DWORD_PTR dwIoBase, BYTE byBBAddr, BYTE byTestBits)
* Out:
* none
*
- * Return Value: TRUE if all TestBits are clear; FALSE otherwise.
+ * Return Value: true if all TestBits are clear; false otherwise.
*
*/
-BOOL BBbIsRegBitsOff (DWORD_PTR dwIoBase, BYTE byBBAddr, BYTE byTestBits)
+bool BBbIsRegBitsOff (unsigned long dwIoBase, unsigned char byBBAddr, unsigned char byTestBits)
{
- BYTE byOrgData;
+ unsigned char byOrgData;
BBbReadEmbeded(dwIoBase, byBBAddr, &byOrgData);
return (byOrgData & byTestBits) == 0;
@@ -2142,17 +2142,17 @@ BOOL BBbIsRegBitsOff (DWORD_PTR dwIoBase, BYTE byBBAddr, BYTE byTestBits)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL BBbVT3253Init (PSDevice pDevice)
+bool BBbVT3253Init (PSDevice pDevice)
{
- BOOL bResult = TRUE;
+ bool bResult = true;
int ii;
- DWORD_PTR dwIoBase = pDevice->PortOffset;
- BYTE byRFType = pDevice->byRFType;
- BYTE byLocalID = pDevice->byLocalID;
+ unsigned long dwIoBase = pDevice->PortOffset;
+ unsigned char byRFType = pDevice->byRFType;
+ unsigned char byLocalID = pDevice->byLocalID;
if (byRFType == RF_RFMD2959) {
if (byLocalID <= REV_ID_VT3253_A1) {
@@ -2294,7 +2294,7 @@ BOOL BBbVT3253Init (PSDevice pDevice)
//}} RobertYu
} else {
// No VGA Table now
- pDevice->bUpdateBBVGA = FALSE;
+ pDevice->bUpdateBBVGA = false;
pDevice->abyBBVGA[0] = 0x1C;
}
@@ -2321,12 +2321,12 @@ BOOL BBbVT3253Init (PSDevice pDevice)
* Return Value: none
*
*/
-void BBvReadAllRegs (DWORD_PTR dwIoBase, PBYTE pbyBBRegs)
+void BBvReadAllRegs (unsigned long dwIoBase, unsigned char *pbyBBRegs)
{
int ii;
- BYTE byBase = 1;
+ unsigned char byBase = 1;
for (ii = 0; ii < BB_MAX_CONTEXT_SIZE; ii++) {
- BBbReadEmbeded(dwIoBase, (BYTE)(ii*byBase), pbyBBRegs);
+ BBbReadEmbeded(dwIoBase, (unsigned char)(ii*byBase), pbyBBRegs);
pbyBBRegs += byBase;
}
}
@@ -2348,8 +2348,8 @@ void BBvReadAllRegs (DWORD_PTR dwIoBase, PBYTE pbyBBRegs)
void BBvLoopbackOn (PSDevice pDevice)
{
- BYTE byData;
- DWORD_PTR dwIoBase = pDevice->PortOffset;
+ unsigned char byData;
+ unsigned long dwIoBase = pDevice->PortOffset;
//CR C9 = 0x00
BBbReadEmbeded(dwIoBase, 0xC9, &pDevice->byBBCRc9);//CR201
@@ -2363,7 +2363,7 @@ void BBvLoopbackOn (PSDevice pDevice)
if (pDevice->uConnectionRate <= RATE_11M) { //CCK
// Enable internal digital loopback: CR33 |= 0000 0001
BBbReadEmbeded(dwIoBase, 0x21, &byData);//CR33
- BBbWriteEmbeded(dwIoBase, 0x21, (BYTE)(byData | 0x01));//CR33
+ BBbWriteEmbeded(dwIoBase, 0x21, (unsigned char)(byData | 0x01));//CR33
// CR154 = 0x00
BBbWriteEmbeded(dwIoBase, 0x9A, 0); //CR154
@@ -2372,7 +2372,7 @@ void BBvLoopbackOn (PSDevice pDevice)
else { //OFDM
// Enable internal digital loopback:CR154 |= 0000 0001
BBbReadEmbeded(dwIoBase, 0x9A, &byData);//CR154
- BBbWriteEmbeded(dwIoBase, 0x9A, (BYTE)(byData | 0x01));//CR154
+ BBbWriteEmbeded(dwIoBase, 0x9A, (unsigned char)(byData | 0x01));//CR154
// CR33 = 0x00
BBbWriteEmbeded(dwIoBase, 0x21, 0); //CR33
@@ -2384,7 +2384,7 @@ void BBvLoopbackOn (PSDevice pDevice)
// Disable TX_IQUN
BBbReadEmbeded(pDevice->PortOffset, 0x09, &pDevice->byBBCR09);
- BBbWriteEmbeded(pDevice->PortOffset, 0x09, (BYTE)(pDevice->byBBCR09 & 0xDE));
+ BBbWriteEmbeded(pDevice->PortOffset, 0x09, (unsigned char)(pDevice->byBBCR09 & 0xDE));
}
/*
@@ -2402,8 +2402,8 @@ void BBvLoopbackOn (PSDevice pDevice)
*/
void BBvLoopbackOff (PSDevice pDevice)
{
- BYTE byData;
- DWORD_PTR dwIoBase = pDevice->PortOffset;
+ unsigned char byData;
+ unsigned long dwIoBase = pDevice->PortOffset;
BBbWriteEmbeded(dwIoBase, 0xC9, pDevice->byBBCRc9);//CR201
BBbWriteEmbeded(dwIoBase, 0x88, pDevice->byBBCR88);//CR136
@@ -2413,14 +2413,14 @@ void BBvLoopbackOff (PSDevice pDevice)
if (pDevice->uConnectionRate <= RATE_11M) { // CCK
// Set the CR33 Bit2 to disable internal Loopback.
BBbReadEmbeded(dwIoBase, 0x21, &byData);//CR33
- BBbWriteEmbeded(dwIoBase, 0x21, (BYTE)(byData & 0xFE));//CR33
+ BBbWriteEmbeded(dwIoBase, 0x21, (unsigned char)(byData & 0xFE));//CR33
}
else { // OFDM
BBbReadEmbeded(dwIoBase, 0x9A, &byData);//CR154
- BBbWriteEmbeded(dwIoBase, 0x9A, (BYTE)(byData & 0xFE));//CR154
+ BBbWriteEmbeded(dwIoBase, 0x9A, (unsigned char)(byData & 0xFE));//CR154
}
BBbReadEmbeded(dwIoBase, 0x0E, &byData);//CR14
- BBbWriteEmbeded(dwIoBase, 0x0E, (BYTE)(byData | 0x80));//CR14
+ BBbWriteEmbeded(dwIoBase, 0x0E, (unsigned char)(byData | 0x80));//CR14
}
@@ -2441,8 +2441,8 @@ void BBvLoopbackOff (PSDevice pDevice)
void
BBvSetShortSlotTime (PSDevice pDevice)
{
- BYTE byBBRxConf=0;
- BYTE byBBVGA=0;
+ unsigned char byBBRxConf=0;
+ unsigned char byBBVGA=0;
BBbReadEmbeded(pDevice->PortOffset, 0x0A, &byBBRxConf);//CR10
@@ -2462,9 +2462,9 @@ BBvSetShortSlotTime (PSDevice pDevice)
}
-void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
+void BBvSetVGAGainOffset(PSDevice pDevice, unsigned char byData)
{
- BYTE byBBRxConf=0;
+ unsigned char byBBRxConf=0;
BBbWriteEmbeded(pDevice->PortOffset, 0xE7, byData);
@@ -2495,7 +2495,7 @@ void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
*
*/
void
-BBvSoftwareReset (DWORD_PTR dwIoBase)
+BBvSoftwareReset (unsigned long dwIoBase)
{
BBbWriteEmbeded(dwIoBase, 0x50, 0x40);
BBbWriteEmbeded(dwIoBase, 0x50, 0);
@@ -2516,9 +2516,9 @@ BBvSoftwareReset (DWORD_PTR dwIoBase)
*
*/
void
-BBvPowerSaveModeON (DWORD_PTR dwIoBase)
+BBvPowerSaveModeON (unsigned long dwIoBase)
{
- BYTE byOrgData;
+ unsigned char byOrgData;
BBbReadEmbeded(dwIoBase, 0x0D, &byOrgData);
byOrgData |= BIT0;
@@ -2538,9 +2538,9 @@ BBvPowerSaveModeON (DWORD_PTR dwIoBase)
*
*/
void
-BBvPowerSaveModeOFF (DWORD_PTR dwIoBase)
+BBvPowerSaveModeOFF (unsigned long dwIoBase)
{
- BYTE byOrgData;
+ unsigned char byOrgData;
BBbReadEmbeded(dwIoBase, 0x0D, &byOrgData);
byOrgData &= ~(BIT0);
@@ -2562,9 +2562,9 @@ BBvPowerSaveModeOFF (DWORD_PTR dwIoBase)
*/
void
-BBvSetTxAntennaMode (DWORD_PTR dwIoBase, BYTE byAntennaMode)
+BBvSetTxAntennaMode (unsigned long dwIoBase, unsigned char byAntennaMode)
{
- BYTE byBBTxConf;
+ unsigned char byBBTxConf;
#ifdef PLICE_DEBUG
//printk("Enter BBvSetTxAntennaMode\n");
@@ -2604,9 +2604,9 @@ BBvSetTxAntennaMode (DWORD_PTR dwIoBase, BYTE byAntennaMode)
*/
void
-BBvSetRxAntennaMode (DWORD_PTR dwIoBase, BYTE byAntennaMode)
+BBvSetRxAntennaMode (unsigned long dwIoBase, unsigned char byAntennaMode)
{
- BYTE byBBRxConf;
+ unsigned char byBBRxConf;
BBbReadEmbeded(dwIoBase, 0x0A, &byBBRxConf);//CR10
if (byAntennaMode == ANT_DIVERSITY) {
@@ -2635,14 +2635,14 @@ BBvSetRxAntennaMode (DWORD_PTR dwIoBase, BYTE byAntennaMode)
*
*/
void
-BBvSetDeepSleep (DWORD_PTR dwIoBase, BYTE byLocalID)
+BBvSetDeepSleep (unsigned long dwIoBase, unsigned char byLocalID)
{
BBbWriteEmbeded(dwIoBase, 0x0C, 0x17);//CR12
BBbWriteEmbeded(dwIoBase, 0x0D, 0xB9);//CR13
}
void
-BBvExitDeepSleep (DWORD_PTR dwIoBase, BYTE byLocalID)
+BBvExitDeepSleep (unsigned long dwIoBase, unsigned char byLocalID)
{
BBbWriteEmbeded(dwIoBase, 0x0C, 0x00);//CR12
BBbWriteEmbeded(dwIoBase, 0x0D, 0x01);//CR13
@@ -2651,12 +2651,12 @@ BBvExitDeepSleep (DWORD_PTR dwIoBase, BYTE byLocalID)
static
-ULONG
+unsigned long
s_ulGetRatio (PSDevice pDevice)
{
-ULONG ulRatio = 0;
-ULONG ulMaxPacket;
-ULONG ulPacketNum;
+unsigned long ulRatio = 0;
+unsigned long ulMaxPacket;
+unsigned long ulPacketNum;
//This is a thousand-ratio
ulMaxPacket = pDevice->uNumSQ3[RATE_54M];
@@ -2762,7 +2762,7 @@ ULONG ulPacketNum;
void
BBvClearAntDivSQ3Value (PSDevice pDevice)
{
- UINT ii;
+ unsigned int ii;
pDevice->uDiversityCnt = 0;
for (ii = 0; ii < MAX_RATE; ii++) {
@@ -2787,7 +2787,7 @@ BBvClearAntDivSQ3Value (PSDevice pDevice)
*/
void
-BBvAntennaDiversity (PSDevice pDevice, BYTE byRxRate, BYTE bySQ3)
+BBvAntennaDiversity (PSDevice pDevice, unsigned char byRxRate, unsigned char bySQ3)
{
if ((byRxRate >= MAX_RATE) || (pDevice->wAntDiversityMaxRate >= MAX_RATE)) {
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index b236ff4139a..8294bdbb7b5 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -118,45 +118,45 @@
/*--------------------- Export Functions --------------------------*/
-UINT
+unsigned int
BBuGetFrameTime(
- BYTE byPreambleType,
- BYTE byPktType,
- UINT cbFrameLength,
- WORD wRate
+ unsigned char byPreambleType,
+ unsigned char byPktType,
+ unsigned int cbFrameLength,
+ unsigned short wRate
);
void
BBvCaculateParameter (
PSDevice pDevice,
- UINT cbFrameLength,
- WORD wRate,
- BYTE byPacketType,
- PWORD pwPhyLen,
- PBYTE pbyPhySrv,
- PBYTE pbyPhySgn
+ unsigned int cbFrameLength,
+ unsigned short wRate,
+ unsigned char byPacketType,
+ unsigned short *pwPhyLen,
+ unsigned char *pbyPhySrv,
+ unsigned char *pbyPhySgn
);
-BOOL BBbReadEmbeded(DWORD_PTR dwIoBase, BYTE byBBAddr, PBYTE pbyData);
-BOOL BBbWriteEmbeded(DWORD_PTR dwIoBase, BYTE byBBAddr, BYTE byData);
+bool BBbReadEmbeded(unsigned long dwIoBase, unsigned char byBBAddr, unsigned char *pbyData);
+bool BBbWriteEmbeded(unsigned long dwIoBase, unsigned char byBBAddr, unsigned char byData);
-void BBvReadAllRegs(DWORD_PTR dwIoBase, PBYTE pbyBBRegs);
+void BBvReadAllRegs(unsigned long dwIoBase, unsigned char *pbyBBRegs);
void BBvLoopbackOn(PSDevice pDevice);
void BBvLoopbackOff(PSDevice pDevice);
void BBvSetShortSlotTime(PSDevice pDevice);
-BOOL BBbIsRegBitsOn(DWORD_PTR dwIoBase, BYTE byBBAddr, BYTE byTestBits);
-BOOL BBbIsRegBitsOff(DWORD_PTR dwIoBase, BYTE byBBAddr, BYTE byTestBits);
-void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData);
+bool BBbIsRegBitsOn(unsigned long dwIoBase, unsigned char byBBAddr, unsigned char byTestBits);
+bool BBbIsRegBitsOff(unsigned long dwIoBase, unsigned char byBBAddr, unsigned char byTestBits);
+void BBvSetVGAGainOffset(PSDevice pDevice, unsigned char byData);
// VT3253 Baseband
-BOOL BBbVT3253Init(PSDevice pDevice);
-void BBvSoftwareReset(DWORD_PTR dwIoBase);
-void BBvPowerSaveModeON(DWORD_PTR dwIoBase);
-void BBvPowerSaveModeOFF(DWORD_PTR dwIoBase);
-void BBvSetTxAntennaMode(DWORD_PTR dwIoBase, BYTE byAntennaMode);
-void BBvSetRxAntennaMode(DWORD_PTR dwIoBase, BYTE byAntennaMode);
-void BBvSetDeepSleep(DWORD_PTR dwIoBase, BYTE byLocalID);
-void BBvExitDeepSleep(DWORD_PTR dwIoBase, BYTE byLocalID);
+bool BBbVT3253Init(PSDevice pDevice);
+void BBvSoftwareReset(unsigned long dwIoBase);
+void BBvPowerSaveModeON(unsigned long dwIoBase);
+void BBvPowerSaveModeOFF(unsigned long dwIoBase);
+void BBvSetTxAntennaMode(unsigned long dwIoBase, unsigned char byAntennaMode);
+void BBvSetRxAntennaMode(unsigned long dwIoBase, unsigned char byAntennaMode);
+void BBvSetDeepSleep(unsigned long dwIoBase, unsigned char byLocalID);
+void BBvExitDeepSleep(unsigned long dwIoBase, unsigned char byLocalID);
// timer for antenna diversity
@@ -170,7 +170,7 @@ TimerState1CallBack(
void *hDeviceContext
);
-void BBvAntennaDiversity(PSDevice pDevice, BYTE byRxRate, BYTE bySQ3);
+void BBvAntennaDiversity(PSDevice pDevice, unsigned char byRxRate, unsigned char bySQ3);
void
BBvClearAntDivSQ3Value (PSDevice pDevice);
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
index 6312a55dab1..57c1cc90afc 100644
--- a/drivers/staging/vt6655/bssdb.c
+++ b/drivers/staging/vt6655/bssdb.c
@@ -53,6 +53,7 @@
#include "baseband.h"
#include "rf.h"
#include "card.h"
+#include "channel.h"
#include "mac.h"
#include "wpa2.h"
#include "iowpa.h"
@@ -71,14 +72,14 @@ static int msglevel =MSG_LEVEL_INFO;
-const WORD awHWRetry0[5][5] = {
+const unsigned short awHWRetry0[5][5] = {
{RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
{RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
{RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
{RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
{RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
};
-const WORD awHWRetry1[5][5] = {
+const unsigned short awHWRetry1[5][5] = {
{RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
{RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
{RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
@@ -126,25 +127,25 @@ void s_vCheckPreEDThreshold(
PKnownBSS
BSSpSearchBSSList(
void *hDeviceContext,
- PBYTE pbyDesireBSSID,
- PBYTE pbyDesireSSID,
+ unsigned char *pbyDesireBSSID,
+ unsigned char *pbyDesireSSID,
CARD_PHY_TYPE ePhyType
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
- PBYTE pbyBSSID = NULL;
+ unsigned char *pbyBSSID = NULL;
PWLAN_IE_SSID pSSID = NULL;
PKnownBSS pCurrBSS = NULL;
PKnownBSS pSelect = NULL;
-BYTE ZeroBSSID[WLAN_BSSID_LEN]={0x00,0x00,0x00,0x00,0x00,0x00};
- UINT ii = 0;
-// UINT jj = 0; //DavidWang
+ unsigned char ZeroBSSID[WLAN_BSSID_LEN]={0x00,0x00,0x00,0x00,0x00,0x00};
+ unsigned int ii = 0;
+
if (pbyDesireBSSID != NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSpSearchBSSList BSSID[%02X %02X %02X-%02X %02X %02X]\n",
*pbyDesireBSSID,*(pbyDesireBSSID+1),*(pbyDesireBSSID+2),
*(pbyDesireBSSID+3),*(pbyDesireBSSID+4),*(pbyDesireBSSID+5));
- if ((!IS_BROADCAST_ADDRESS(pbyDesireBSSID)) &&
+ if ((!is_broadcast_ether_addr(pbyDesireBSSID)) &&
(memcmp(pbyDesireBSSID, ZeroBSSID, 6)!= 0)){
pbyBSSID = pbyDesireBSSID;
}
@@ -159,10 +160,10 @@ BYTE ZeroBSSID[WLAN_BSSID_LEN]={0x00,0x00,0x00,0x00,0x00,0x00};
// match BSSID first
for (ii = 0; ii <MAX_BSS_NUM; ii++) {
pCurrBSS = &(pMgmt->sBSSList[ii]);
-if(pDevice->bLinkPass==FALSE) pCurrBSS->bSelected = FALSE;
+if(pDevice->bLinkPass==false) pCurrBSS->bSelected = false;
if ((pCurrBSS->bActive) &&
- (pCurrBSS->bSelected == FALSE)) {
- if (IS_ETH_ADDRESS_EQUAL(pCurrBSS->abyBSSID, pbyBSSID)) {
+ (pCurrBSS->bSelected == false)) {
+ if (!compare_ether_addr(pCurrBSS->abyBSSID, pbyBSSID)) {
if (pSSID != NULL) {
// compare ssid
if ( !memcmp(pSSID->abySSID,
@@ -172,7 +173,7 @@ if(pDevice->bLinkPass==FALSE) pCurrBSS->bSelected = FALSE;
((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))
) {
- pCurrBSS->bSelected = TRUE;
+ pCurrBSS->bSelected = true;
return(pCurrBSS);
}
}
@@ -181,7 +182,7 @@ if(pDevice->bLinkPass==FALSE) pCurrBSS->bSelected = FALSE;
((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))
) {
- pCurrBSS->bSelected = TRUE;
+ pCurrBSS->bSelected = true;
return(pCurrBSS);
}
}
@@ -193,7 +194,7 @@ if(pDevice->bLinkPass==FALSE) pCurrBSS->bSelected = FALSE;
for (ii = 0; ii <MAX_BSS_NUM; ii++) {
pCurrBSS = &(pMgmt->sBSSList[ii]);
//2007-0721-01<Add>by MikeLiu
- pCurrBSS->bSelected = FALSE;
+ pCurrBSS->bSelected = false;
if (pCurrBSS->bActive) {
if (pSSID != NULL) {
@@ -224,19 +225,19 @@ if(pDevice->bLinkPass==FALSE) pCurrBSS->bSelected = FALSE;
}
/*
if (pMgmt->eAuthenMode < WMAC_AUTH_WPA) {
- if (pCurrBSS->bWPAValid == TRUE) {
+ if (pCurrBSS->bWPAValid == true) {
// WPA AP will reject connection of station without WPA enable.
continue;
}
} else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
(pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)) {
- if (pCurrBSS->bWPAValid == FALSE) {
+ if (pCurrBSS->bWPAValid == false) {
// station with WPA enable can't join NonWPA AP.
continue;
}
} else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
- if (pCurrBSS->bWPA2Valid == FALSE) {
+ if (pCurrBSS->bWPA2Valid == false) {
// station with WPA2 enable can't join NonWPA2 AP.
continue;
}
@@ -253,9 +254,9 @@ if(pDevice->bLinkPass==FALSE) pCurrBSS->bSelected = FALSE;
}
}
if (pSelect != NULL) {
- pSelect->bSelected = TRUE;
+ pSelect->bSelected = true;
/*
- if (pDevice->bRoaming == FALSE) {
+ if (pDevice->bRoaming == false) {
// Einsn Add @20070907
memset(pbyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
memcpy(pbyDesireSSID,pCurrBSS->abySSID,WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1) ;
@@ -283,18 +284,18 @@ if(pDevice->bLinkPass==FALSE) pCurrBSS->bSelected = FALSE;
void
BSSvClearBSSList(
void *hDeviceContext,
- BOOL bKeepCurrBSSID
+ bool bKeepCurrBSSID
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
- UINT ii;
+ unsigned int ii;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (bKeepCurrBSSID) {
if (pMgmt->sBSSList[ii].bActive &&
- IS_ETH_ADDRESS_EQUAL(pMgmt->sBSSList[ii].abyBSSID, pMgmt->abyCurrBSSID)) {
- // bKeepCurrBSSID = FALSE;
+ !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID, pMgmt->abyCurrBSSID)) {
+ // bKeepCurrBSSID = false;
continue;
}
}
@@ -304,7 +305,7 @@ BSSvClearBSSList(
continue;
}
- pMgmt->sBSSList[ii].bActive = FALSE;
+ pMgmt->sBSSList[ii].bActive = false;
memset(&pMgmt->sBSSList[ii], 0, sizeof(KnownBSS));
}
BSSvClearAnyBSSJoinRecord(pDevice);
@@ -320,25 +321,25 @@ BSSvClearBSSList(
* search BSS list by BSSID & SSID if matched
*
* Return Value:
- * TRUE if found.
+ * true if found.
*
-*/
PKnownBSS
BSSpAddrIsInBSSList(
void *hDeviceContext,
- PBYTE abyBSSID,
+ unsigned char *abyBSSID,
PWLAN_IE_SSID pSSID
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
PKnownBSS pBSSList = NULL;
- UINT ii;
+ unsigned int ii;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pBSSList = &(pMgmt->sBSSList[ii]);
if (pBSSList->bActive) {
- if (IS_ETH_ADDRESS_EQUAL(pBSSList->abyBSSID, abyBSSID)) {
+ if (!compare_ether_addr(pBSSList->abyBSSID, abyBSSID)) {
// if (pSSID == NULL)
// return pBSSList;
if (pSSID->len == ((PWLAN_IE_SSID)pBSSList->abySSID)->len){
@@ -362,18 +363,18 @@ BSSpAddrIsInBSSList(
* Insert a BSS set into known BSS list
*
* Return Value:
- * TRUE if success.
+ * true if success.
*
-*/
-BOOL
+bool
BSSbInsertToBSSList (
void *hDeviceContext,
- PBYTE abyBSSIDAddr,
+ unsigned char *abyBSSIDAddr,
QWORD qwTimestamp,
- WORD wBeaconInterval,
- WORD wCapInfo,
- BYTE byCurrChannel,
+ unsigned short wBeaconInterval,
+ unsigned short wCapInfo,
+ unsigned char byCurrChannel,
PWLAN_IE_SSID pSSID,
PWLAN_IE_SUPP_RATES pSuppRates,
PWLAN_IE_SUPP_RATES pExtSuppRates,
@@ -382,8 +383,8 @@ BSSbInsertToBSSList (
PWLAN_IE_RSN_EXT pRSNWPA,
PWLAN_IE_COUNTRY pIE_Country,
PWLAN_IE_QUIET pIE_Quiet,
- UINT uIELength,
- PBYTE pbyIEs,
+ unsigned int uIELength,
+ unsigned char *pbyIEs,
void *pRxPacketContext
)
{
@@ -392,8 +393,8 @@ BSSbInsertToBSSList (
PSMgmtObject pMgmt = pDevice->pMgmt;
PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext;
PKnownBSS pBSSList = NULL;
- UINT ii;
- BOOL bParsingQuiet = FALSE;
+ unsigned int ii;
+ bool bParsingQuiet = false;
PWLAN_IE_QUIET pQuiet = NULL;
@@ -408,10 +409,10 @@ BSSbInsertToBSSList (
if (ii == MAX_BSS_NUM){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get free KnowBSS node failed.\n");
- return FALSE;
+ return false;
}
// save the BSS info
- pBSSList->bActive = TRUE;
+ pBSSList->bActive = true;
memcpy( pBSSList->abyBSSID, abyBSSIDAddr, WLAN_BSSID_LEN);
HIDWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(HIDWORD(qwTimestamp));
LODWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(LODWORD(qwTimestamp));
@@ -445,7 +446,7 @@ BSSbInsertToBSSList (
if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
} else {
- if (pBSSList->sERP.bERPExist == TRUE) {
+ if (pBSSList->sERP.bERPExist == true) {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
} else {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
@@ -461,16 +462,16 @@ BSSbInsertToBSSList (
(pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
// assoc with BSS
if (pBSSList == pMgmt->pCurrBSS) {
- bParsingQuiet = TRUE;
+ bParsingQuiet = true;
}
}
WPA_ClearRSN(pBSSList);
if (pRSNWPA != NULL) {
- UINT uLen = pRSNWPA->len + 2;
+ unsigned int uLen = pRSNWPA->len + 2;
- if (uLen <= (uIELength - (UINT)(ULONG_PTR)((PBYTE)pRSNWPA - pbyIEs))) {
+ if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSNWPA - pbyIEs))) {
pBSSList->wWPALen = uLen;
memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
WPA_ParseRSN(pBSSList, pRSNWPA);
@@ -480,33 +481,33 @@ BSSbInsertToBSSList (
WPA2_ClearRSN(pBSSList);
if (pRSN != NULL) {
- UINT uLen = pRSN->len + 2;
- if (uLen <= (uIELength - (UINT)(ULONG_PTR)((PBYTE)pRSN - pbyIEs))) {
+ unsigned int uLen = pRSN->len + 2;
+ if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSN - pbyIEs))) {
pBSSList->wRSNLen = uLen;
memcpy(pBSSList->byRSNIE, pRSN, uLen);
WPA2vParseRSN(pBSSList, pRSN);
}
}
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pBSSList->bWPA2Valid == TRUE)) {
+ if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pBSSList->bWPA2Valid == true)) {
PSKeyItem pTransmitKey = NULL;
- BOOL bIs802_1x = FALSE;
+ bool bIs802_1x = false;
for (ii = 0; ii < pBSSList->wAKMSSAuthCount; ii ++) {
if (pBSSList->abyAKMSSAuthType[ii] == WLAN_11i_AKMSS_802_1X) {
- bIs802_1x = TRUE;
+ bIs802_1x = true;
break;
}
}
- if ((bIs802_1x == TRUE) && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
+ if ((bIs802_1x == true) && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID, pSSID->len))) {
bAdd_PMKID_Candidate((void *)pDevice, pBSSList->abyBSSID, &pBSSList->sRSNCapObj);
- if ((pDevice->bLinkPass == TRUE) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- if ((KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) == TRUE) ||
- (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey) == TRUE)) {
+ if ((pDevice->bLinkPass == true) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
+ if ((KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) == true) ||
+ (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey) == true)) {
pDevice->gsPMKIDCandidate.StatusType = Ndis802_11StatusType_PMKID_CandidateList;
pDevice->gsPMKIDCandidate.Version = 1;
@@ -519,46 +520,45 @@ BSSbInsertToBSSList (
if (pDevice->bUpdateBBVGA) {
// Moniter if RSSI is too strong.
pBSSList->byRSSIStatCnt = 0;
- RFvRSSITodBm(pDevice, (BYTE)(pRxPacket->uRSSI), &pBSSList->ldBmMAX);
+ RFvRSSITodBm(pDevice, (unsigned char)(pRxPacket->uRSSI), &pBSSList->ldBmMAX);
pBSSList->ldBmAverage[0] = pBSSList->ldBmMAX;
for (ii = 1; ii < RSSI_STAT_COUNT; ii++)
pBSSList->ldBmAverage[ii] = 0;
}
if ((pIE_Country != NULL) &&
- (pMgmt->b11hEnable == TRUE)) {
- CARDvSetCountryInfo(pMgmt->pAdapter,
- pBSSList->eNetworkTypeInUse,
+ (pMgmt->b11hEnable == true)) {
+ set_country_info(pMgmt->pAdapter, pBSSList->eNetworkTypeInUse,
pIE_Country);
}
- if ((bParsingQuiet == TRUE) && (pIE_Quiet != NULL)) {
+ if ((bParsingQuiet == true) && (pIE_Quiet != NULL)) {
if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) &&
(((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) {
// valid EID
if (pQuiet == NULL) {
pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
CARDbSetQuiet( pMgmt->pAdapter,
- TRUE,
+ true,
pQuiet->byQuietCount,
pQuiet->byQuietPeriod,
- *((PWORD)pQuiet->abyQuietDuration),
- *((PWORD)pQuiet->abyQuietOffset)
+ *((unsigned short *)pQuiet->abyQuietDuration),
+ *((unsigned short *)pQuiet->abyQuietOffset)
);
} else {
pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
CARDbSetQuiet( pMgmt->pAdapter,
- FALSE,
+ false,
pQuiet->byQuietCount,
pQuiet->byQuietPeriod,
- *((PWORD)pQuiet->abyQuietDuration),
- *((PWORD)pQuiet->abyQuietOffset)
+ *((unsigned short *)pQuiet->abyQuietDuration),
+ *((unsigned short *)pQuiet->abyQuietOffset)
);
}
}
}
- if ((bParsingQuiet == TRUE) &&
+ if ((bParsingQuiet == true) &&
(pQuiet != NULL)) {
CARDbStartQuiet(pMgmt->pAdapter);
}
@@ -568,7 +568,7 @@ BSSbInsertToBSSList (
pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
- return TRUE;
+ return true;
}
@@ -578,19 +578,19 @@ BSSbInsertToBSSList (
* Update BSS set in known BSS list
*
* Return Value:
- * TRUE if success.
+ * true if success.
*
-*/
// TODO: input structure modify
-BOOL
+bool
BSSbUpdateToBSSList (
void *hDeviceContext,
QWORD qwTimestamp,
- WORD wBeaconInterval,
- WORD wCapInfo,
- BYTE byCurrChannel,
- BOOL bChannelHit,
+ unsigned short wBeaconInterval,
+ unsigned short wCapInfo,
+ unsigned char byCurrChannel,
+ bool bChannelHit,
PWLAN_IE_SSID pSSID,
PWLAN_IE_SUPP_RATES pSuppRates,
PWLAN_IE_SUPP_RATES pExtSuppRates,
@@ -600,8 +600,8 @@ BSSbUpdateToBSSList (
PWLAN_IE_COUNTRY pIE_Country,
PWLAN_IE_QUIET pIE_Quiet,
PKnownBSS pBSSList,
- UINT uIELength,
- PBYTE pbyIEs,
+ unsigned int uIELength,
+ unsigned char *pbyIEs,
void *pRxPacketContext
)
{
@@ -609,14 +609,14 @@ BSSbUpdateToBSSList (
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext;
- LONG ldBm;
- BOOL bParsingQuiet = FALSE;
+ long ldBm;
+ bool bParsingQuiet = false;
PWLAN_IE_QUIET pQuiet = NULL;
if (pBSSList == NULL)
- return FALSE;
+ return false;
HIDWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(HIDWORD(qwTimestamp));
@@ -646,7 +646,7 @@ BSSbUpdateToBSSList (
if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
} else {
- if (pBSSList->sERP.bERPExist == TRUE) {
+ if (pBSSList->sERP.bERPExist == true) {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
} else {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
@@ -663,15 +663,15 @@ BSSbUpdateToBSSList (
(pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
// assoc with BSS
if (pBSSList == pMgmt->pCurrBSS) {
- bParsingQuiet = TRUE;
+ bParsingQuiet = true;
}
}
WPA_ClearRSN(pBSSList); //mike update
if (pRSNWPA != NULL) {
- UINT uLen = pRSNWPA->len + 2;
- if (uLen <= (uIELength - (UINT)(ULONG_PTR)((PBYTE)pRSNWPA - pbyIEs))) {
+ unsigned int uLen = pRSNWPA->len + 2;
+ if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSNWPA - pbyIEs))) {
pBSSList->wWPALen = uLen;
memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
WPA_ParseRSN(pBSSList, pRSNWPA);
@@ -681,8 +681,8 @@ BSSbUpdateToBSSList (
WPA2_ClearRSN(pBSSList); //mike update
if (pRSN != NULL) {
- UINT uLen = pRSN->len + 2;
- if (uLen <= (uIELength - (UINT)(ULONG_PTR)((PBYTE)pRSN - pbyIEs))) {
+ unsigned int uLen = pRSN->len + 2;
+ if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSN - pbyIEs))) {
pBSSList->wRSNLen = uLen;
memcpy(pBSSList->byRSNIE, pRSN, uLen);
WPA2vParseRSN(pBSSList, pRSN);
@@ -690,7 +690,7 @@ BSSbUpdateToBSSList (
}
if (pRxPacket->uRSSI != 0) {
- RFvRSSITodBm(pDevice, (BYTE)(pRxPacket->uRSSI), &ldBm);
+ RFvRSSITodBm(pDevice, (unsigned char)(pRxPacket->uRSSI), &ldBm);
// Moniter if RSSI is too strong.
pBSSList->byRSSIStatCnt++;
pBSSList->byRSSIStatCnt %= RSSI_STAT_COUNT;
@@ -703,39 +703,38 @@ BSSbUpdateToBSSList (
}
if ((pIE_Country != NULL) &&
- (pMgmt->b11hEnable == TRUE)) {
- CARDvSetCountryInfo(pMgmt->pAdapter,
- pBSSList->eNetworkTypeInUse,
+ (pMgmt->b11hEnable == true)) {
+ set_country_info(pMgmt->pAdapter, pBSSList->eNetworkTypeInUse,
pIE_Country);
}
- if ((bParsingQuiet == TRUE) && (pIE_Quiet != NULL)) {
+ if ((bParsingQuiet == true) && (pIE_Quiet != NULL)) {
if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) &&
(((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) {
// valid EID
if (pQuiet == NULL) {
pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
CARDbSetQuiet( pMgmt->pAdapter,
- TRUE,
+ true,
pQuiet->byQuietCount,
pQuiet->byQuietPeriod,
- *((PWORD)pQuiet->abyQuietDuration),
- *((PWORD)pQuiet->abyQuietOffset)
+ *((unsigned short *)pQuiet->abyQuietDuration),
+ *((unsigned short *)pQuiet->abyQuietOffset)
);
} else {
pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
CARDbSetQuiet( pMgmt->pAdapter,
- FALSE,
+ false,
pQuiet->byQuietCount,
pQuiet->byQuietPeriod,
- *((PWORD)pQuiet->abyQuietDuration),
- *((PWORD)pQuiet->abyQuietOffset)
+ *((unsigned short *)pQuiet->abyQuietDuration),
+ *((unsigned short *)pQuiet->abyQuietOffset)
);
}
}
}
- if ((bParsingQuiet == TRUE) &&
+ if ((bParsingQuiet == true) &&
(pQuiet != NULL)) {
CARDbStartQuiet(pMgmt->pAdapter);
}
@@ -745,7 +744,7 @@ BSSbUpdateToBSSList (
pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
- return TRUE;
+ return true;
}
@@ -762,27 +761,24 @@ BSSbUpdateToBSSList (
*
-*/
-BOOL
-BSSDBbIsSTAInNodeDB(
- void *pMgmtObject,
- PBYTE abyDstAddr,
- PUINT puNodeIndex
- )
+bool
+BSSDBbIsSTAInNodeDB(void *pMgmtObject, unsigned char *abyDstAddr,
+ unsigned int *puNodeIndex)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
- UINT ii;
+ unsigned int ii;
// Index = 0 reserved for AP Node
for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
- if (IS_ETH_ADDRESS_EQUAL(abyDstAddr, pMgmt->sNodeDBTable[ii].abyMACAddr)) {
+ if (!compare_ether_addr(abyDstAddr, pMgmt->sNodeDBTable[ii].abyMACAddr)) {
*puNodeIndex = ii;
- return TRUE;
+ return true;
}
}
}
- return FALSE;
+ return false;
};
@@ -798,17 +794,14 @@ BSSDBbIsSTAInNodeDB(
*
-*/
void
-BSSvCreateOneNode(
- void *hDeviceContext,
- PUINT puNodeIndex
- )
+BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
- UINT ii;
- UINT BigestCount = 0;
- UINT SelectIndex;
+ unsigned int ii;
+ unsigned int BigestCount = 0;
+ unsigned int SelectIndex;
struct sk_buff *skb;
// Index = 0 reserved for AP Node (In STA mode)
// Index = 0 reserved for Broadcast/MultiCast (In AP mode)
@@ -840,7 +833,7 @@ BSSvCreateOneNode(
}
memset(&pMgmt->sNodeDBTable[*puNodeIndex], 0, sizeof(KnownNodeDB));
- pMgmt->sNodeDBTable[*puNodeIndex].bActive = TRUE;
+ pMgmt->sNodeDBTable[*puNodeIndex].bActive = true;
pMgmt->sNodeDBTable[*puNodeIndex].uRatePollTimeout = FALLBACK_POLL_SECOND;
// for AP mode PS queue
skb_queue_head_init(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue);
@@ -865,13 +858,13 @@ BSSvCreateOneNode(
void
BSSvRemoveOneNode(
void *hDeviceContext,
- UINT uNodeIndex
+ unsigned int uNodeIndex
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
- BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
+ unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
struct sk_buff *skb;
@@ -898,18 +891,18 @@ BSSvRemoveOneNode(
void
BSSvUpdateAPNode(
void *hDeviceContext,
- PWORD pwCapInfo,
+ unsigned short *pwCapInfo,
PWLAN_IE_SUPP_RATES pSuppRates,
PWLAN_IE_SUPP_RATES pExtSuppRates
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
- UINT uRateLen = WLAN_RATES_MAXLEN;
+ unsigned int uRateLen = WLAN_RATES_MAXLEN;
memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
- pMgmt->sNodeDBTable[0].bActive = TRUE;
+ pMgmt->sNodeDBTable[0].bActive = true;
if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
uRateLen = WLAN_RATES_MAXLEN_11B;
}
@@ -922,7 +915,7 @@ BSSvUpdateAPNode(
RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- TRUE,
+ true,
&(pMgmt->sNodeDBTable[0].wMaxBasicRate),
&(pMgmt->sNodeDBTable[0].wMaxSuppRate),
&(pMgmt->sNodeDBTable[0].wSuppRate),
@@ -969,13 +962,13 @@ BSSvAddMulticastNode(
if (!pDevice->bEnableHostWEP)
memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
memset(pMgmt->sNodeDBTable[0].abyMACAddr, 0xff, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[0].bActive = TRUE;
- pMgmt->sNodeDBTable[0].bPSEnable = FALSE;
+ pMgmt->sNodeDBTable[0].bActive = true;
+ pMgmt->sNodeDBTable[0].bPSEnable = false;
skb_queue_head_init(&pMgmt->sNodeDBTable[0].sTxPSQueue);
RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- TRUE,
+ true,
&(pMgmt->sNodeDBTable[0].wMaxBasicRate),
&(pMgmt->sNodeDBTable[0].wMaxSuppRate),
&(pMgmt->sNodeDBTable[0].wSuppRate),
@@ -1008,8 +1001,8 @@ BSSvAddMulticastNode(
-*/
//2008-4-14 <add> by chester for led issue
#ifdef FOR_LED_ON_NOTEBOOK
-BOOL cc=FALSE;
-UINT status;
+bool cc=false;
+unsigned int status;
#endif
void
BSSvSecondCallBack(
@@ -1018,11 +1011,11 @@ BSSvSecondCallBack(
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
- UINT ii;
+ unsigned int ii;
PWLAN_IE_SSID pItemSSID, pCurrSSID;
- UINT uSleepySTACnt = 0;
- UINT uNonShortSlotSTACnt = 0;
- UINT uLongPreambleSTACnt = 0;
+ unsigned int uSleepySTACnt = 0;
+ unsigned int uNonShortSlotSTACnt = 0;
+ unsigned int uLongPreambleSTACnt = 0;
viawget_wpa_header* wpahdr; //DavidWang
spin_lock_irq(&pDevice->lock);
@@ -1034,22 +1027,22 @@ BSSvSecondCallBack(
//2008-4-14 <add> by chester for led issue
#ifdef FOR_LED_ON_NOTEBOOK
MACvGPIOIn(pDevice->PortOffset, &pDevice->byGPIO);
-if ((( !(pDevice->byGPIO & GPIO0_DATA)&&(pDevice->bHWRadioOff == FALSE))||((pDevice->byGPIO & GPIO0_DATA)&&(pDevice->bHWRadioOff == TRUE)))&&(cc==FALSE)){
-cc=TRUE;
+if ((( !(pDevice->byGPIO & GPIO0_DATA)&&(pDevice->bHWRadioOff == false))||((pDevice->byGPIO & GPIO0_DATA)&&(pDevice->bHWRadioOff == true)))&&(cc==false)){
+cc=true;
}
-else if(cc==TRUE){
+else if(cc==true){
-if(pDevice->bHWRadioOff == TRUE){
+if(pDevice->bHWRadioOff == true){
if ( !(pDevice->byGPIO & GPIO0_DATA))
//||( !(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV)))
{if(status==1) goto start;
status=1;
CARDbRadioPowerOff(pDevice);
- pMgmt->sNodeDBTable[0].bActive = FALSE;
+ pMgmt->sNodeDBTable[0].bActive = false;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pMgmt->eCurrState = WMAC_STATE_IDLE;
//netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
}
if (pDevice->byGPIO &GPIO0_DATA)
@@ -1064,11 +1057,11 @@ else{
{if(status==3) goto start;
status=3;
CARDbRadioPowerOff(pDevice);
- pMgmt->sNodeDBTable[0].bActive = FALSE;
+ pMgmt->sNodeDBTable[0].bActive = false;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pMgmt->eCurrState = WMAC_STATE_IDLE;
//netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
}
if ( !(pDevice->byGPIO & GPIO0_DATA))
@@ -1092,11 +1085,11 @@ start:
{
pDevice->byReAssocCount++;
- if((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != TRUE)) { //10 sec timeout
+ if((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != true)) { //10 sec timeout
printk("Re-association timeout!!!\n");
pDevice->byReAssocCount = 0;
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- // if(pDevice->bWPASuppWextEnabled == TRUE)
+ // if(pDevice->bWPASuppWextEnabled == true)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -1106,7 +1099,7 @@ start:
}
#endif
}
- else if(pDevice->bLinkPass == TRUE)
+ else if(pDevice->bLinkPass == true)
pDevice->byReAssocCount = 0;
}
@@ -1200,27 +1193,27 @@ start:
if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) {
if (!pDevice->bProtectMode) {
MACvEnableProtectMD(pDevice->PortOffset);
- pDevice->bProtectMode = TRUE;
+ pDevice->bProtectMode = true;
}
}
else {
if (pDevice->bProtectMode) {
MACvDisableProtectMD(pDevice->PortOffset);
- pDevice->bProtectMode = FALSE;
+ pDevice->bProtectMode = false;
}
}
// on/off short slot time
if (uNonShortSlotSTACnt > 0) {
if (pDevice->bShortSlotTime) {
- pDevice->bShortSlotTime = FALSE;
+ pDevice->bShortSlotTime = false;
BBvSetShortSlotTime(pDevice);
vUpdateIFS((void *)pDevice);
}
}
else {
if (!pDevice->bShortSlotTime) {
- pDevice->bShortSlotTime = TRUE;
+ pDevice->bShortSlotTime = true;
BBvSetShortSlotTime(pDevice);
vUpdateIFS((void *)pDevice);
}
@@ -1231,13 +1224,13 @@ start:
if (uLongPreambleSTACnt > 0) {
if (!pDevice->bBarkerPreambleMd) {
MACvEnableBarkerPreambleMd(pDevice->PortOffset);
- pDevice->bBarkerPreambleMd = TRUE;
+ pDevice->bBarkerPreambleMd = true;
}
}
else {
if (pDevice->bBarkerPreambleMd) {
MACvDisableBarkerPreambleMd(pDevice->PortOffset);
- pDevice->bBarkerPreambleMd = FALSE;
+ pDevice->bBarkerPreambleMd = false;
}
}
@@ -1247,9 +1240,9 @@ start:
// Check if any STA in PS mode, enable DTIM multicast deliver
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
if (uSleepySTACnt > 0)
- pMgmt->sNodeDBTable[0].bPSEnable = TRUE;
+ pMgmt->sNodeDBTable[0].bPSEnable = true;
else
- pMgmt->sNodeDBTable[0].bPSEnable = FALSE;
+ pMgmt->sNodeDBTable[0].bPSEnable = false;
}
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
@@ -1276,12 +1269,12 @@ start:
}
if (pMgmt->sNodeDBTable[0].uInActiveCount >= LOST_BEACON_COUNT) {
- pMgmt->sNodeDBTable[0].bActive = FALSE;
+ pMgmt->sNodeDBTable[0].bActive = false;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pMgmt->eCurrState = WMAC_STATE_IDLE;
netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = FALSE;
- pDevice->bRoaming = TRUE;
+ pDevice->bLinkPass = false;
+ pDevice->bRoaming = true;
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost AP beacon [%d] sec, disconnected !\n", pMgmt->sNodeDBTable[0].uInActiveCount);
if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
wpahdr = (viawget_wpa_header *)pDevice->skb->data;
@@ -1298,7 +1291,7 @@ start:
pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
};
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- // if(pDevice->bWPASuppWextEnabled == TRUE)
+ // if(pDevice->bWPASuppWextEnabled == true)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -1314,7 +1307,7 @@ start:
pDevice->uAutoReConnectTime++;
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
//network manager support need not do Roaming scan???
- if(pDevice->bWPASuppWextEnabled ==TRUE)
+ if(pDevice->bWPASuppWextEnabled ==true)
pDevice->uAutoReConnectTime = 0;
#endif
}
@@ -1358,7 +1351,7 @@ start:
pMgmt->sNodeDBTable[0].uInActiveCount = 0;
pMgmt->eCurrState = WMAC_STATE_STARTED;
netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
}
}
}
@@ -1391,23 +1384,23 @@ start:
void
BSSvUpdateNodeTxCounter(
void *hDeviceContext,
- BYTE byTsr0,
- BYTE byTsr1,
- PBYTE pbyBuffer,
- UINT uFIFOHeaderSize
+ unsigned char byTsr0,
+ unsigned char byTsr1,
+ unsigned char *pbyBuffer,
+ unsigned int uFIFOHeaderSize
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
- UINT uNodeIndex = 0;
- BYTE byTxRetry = (byTsr0 & TSR0_NCR);
+ unsigned int uNodeIndex = 0;
+ unsigned char byTxRetry = (byTsr0 & TSR0_NCR);
PSTxBufHead pTxBufHead;
PS802_11Header pMACHeader;
- WORD wRate;
- WORD wFallBackRate = RATE_1M;
- BYTE byFallBack;
- UINT ii;
-// UINT txRetryTemp;
+ unsigned short wRate;
+ unsigned short wFallBackRate = RATE_1M;
+ unsigned char byFallBack;
+ unsigned int ii;
+// unsigned int txRetryTemp;
//PLICE_DEBUG->
//txRetryTemp = byTxRetry;
//if (txRetryTemp== 8)
@@ -1584,14 +1577,14 @@ BSSvUpdateNodeTxCounter(
void
BSSvClearNodeDBTable(
void *hDeviceContext,
- UINT uStartIndex
+ unsigned int uStartIndex
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
struct sk_buff *skb;
- UINT ii;
+ unsigned int ii;
for (ii = uStartIndex; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
@@ -1629,8 +1622,8 @@ void s_vCheckSensitivity(
pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
if (pBSSList != NULL) {
// Updata BB Reg if RSSI is too strong.
- LONG LocalldBmAverage = 0;
- LONG uNumofdBm = 0;
+ long LocalldBmAverage = 0;
+ long uNumofdBm = 0;
for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
if (pBSSList->ldBmAverage[ii] != 0) {
uNumofdBm ++;
@@ -1666,10 +1659,10 @@ BSSvClearAnyBSSJoinRecord (
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
- UINT ii;
+ unsigned int ii;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pMgmt->sBSSList[ii].bSelected = FALSE;
+ pMgmt->sBSSList[ii].bSelected = false;
}
return;
}
@@ -1680,9 +1673,9 @@ void s_uCalculateLinkQual(
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
- ULONG TxOkRatio, TxCnt;
- ULONG RxOkRatio,RxCnt;
- ULONG RssiRatio;
+ unsigned long TxOkRatio, TxCnt;
+ unsigned long RxOkRatio,RxCnt;
+ unsigned long RssiRatio;
long ldBm;
TxCnt = pDevice->scStatistic.TxNoRetryOkCount +
@@ -1693,7 +1686,7 @@ RxCnt = pDevice->scStatistic.RxFcsErrCnt +
TxOkRatio = (TxCnt < 6) ? 4000:((pDevice->scStatistic.TxNoRetryOkCount * 4000) / TxCnt);
RxOkRatio = (RxCnt < 6) ? 2000:((pDevice->scStatistic.RxOkCnt * 2000) / RxCnt);
//decide link quality
-if(pDevice->bLinkPass !=TRUE)
+if(pDevice->bLinkPass !=true)
{
// printk("s_uCalculateLinkQual-->Link disconnect and Poor quality**\n");
pDevice->scStatistic.LinkQuality = 0;
@@ -1701,7 +1694,7 @@ if(pDevice->bLinkPass !=TRUE)
}
else
{
- RFvRSSITodBm(pDevice, (BYTE)(pDevice->uCurrRSSI), &ldBm);
+ RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm);
if(-ldBm < 50) {
RssiRatio = 4000;
}
@@ -1735,8 +1728,8 @@ void s_vCheckPreEDThreshold(
((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
if (pBSSList != NULL) {
- pDevice->byBBPreEDRSSI = (BYTE) (~(pBSSList->ldBmAverRange) + 1);
- //BBvUpdatePreEDThreshold(pDevice, FALSE);
+ pDevice->byBBPreEDRSSI = (unsigned char) (~(pBSSList->ldBmAverRange) + 1);
+ //BBvUpdatePreEDThreshold(pDevice, false);
}
}
return;
diff --git a/drivers/staging/vt6655/bssdb.h b/drivers/staging/vt6655/bssdb.h
index e09ef876297..0af42118612 100644
--- a/drivers/staging/vt6655/bssdb.h
+++ b/drivers/staging/vt6655/bssdb.h
@@ -90,69 +90,69 @@ typedef enum _NDIS_802_11_NETWORK_TYPE
typedef struct tagSERPObject {
- BOOL bERPExist;
- BYTE byERP;
+ bool bERPExist;
+ unsigned char byERP;
}ERPObject, *PERPObject;
typedef struct tagSRSNCapObject {
- BOOL bRSNCapExist;
- WORD wRSNCap;
+ bool bRSNCapExist;
+ unsigned short wRSNCap;
}SRSNCapObject, *PSRSNCapObject;
// BSS info(AP)
#pragma pack(1)
typedef struct tagKnownBSS {
// BSS info
- BOOL bActive;
- BYTE abyBSSID[WLAN_BSSID_LEN];
- UINT uChannel;
- BYTE abySuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- BYTE abyExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- UINT uRSSI;
- BYTE bySQ;
- WORD wBeaconInterval;
- WORD wCapInfo;
- BYTE abySSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- BYTE byRxRate;
-
-// WORD wATIMWindow;
- BYTE byRSSIStatCnt;
- LONG ldBmMAX;
- LONG ldBmAverage[RSSI_STAT_COUNT];
- LONG ldBmAverRange;
+ bool bActive;
+ unsigned char abyBSSID[WLAN_BSSID_LEN];
+ unsigned int uChannel;
+ unsigned char abySuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ unsigned char abyExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ unsigned int uRSSI;
+ unsigned char bySQ;
+ unsigned short wBeaconInterval;
+ unsigned short wCapInfo;
+ unsigned char abySSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ unsigned char byRxRate;
+
+// unsigned short wATIMWindow;
+ unsigned char byRSSIStatCnt;
+ long ldBmMAX;
+ long ldBmAverage[RSSI_STAT_COUNT];
+ long ldBmAverRange;
//For any BSSID selection improvment
- BOOL bSelected;
+ bool bSelected;
//++ WPA informations
- BOOL bWPAValid;
- BYTE byGKType;
- BYTE abyPKType[4];
- WORD wPKCount;
- BYTE abyAuthType[4];
- WORD wAuthCount;
- BYTE byDefaultK_as_PK;
- BYTE byReplayIdx;
+ bool bWPAValid;
+ unsigned char byGKType;
+ unsigned char abyPKType[4];
+ unsigned short wPKCount;
+ unsigned char abyAuthType[4];
+ unsigned short wAuthCount;
+ unsigned char byDefaultK_as_PK;
+ unsigned char byReplayIdx;
//--
//++ WPA2 informations
- BOOL bWPA2Valid;
- BYTE byCSSGK;
- WORD wCSSPKCount;
- BYTE abyCSSPK[4];
- WORD wAKMSSAuthCount;
- BYTE abyAKMSSAuthType[4];
+ bool bWPA2Valid;
+ unsigned char byCSSGK;
+ unsigned short wCSSPKCount;
+ unsigned char abyCSSPK[4];
+ unsigned short wAKMSSAuthCount;
+ unsigned char abyAKMSSAuthType[4];
//++ wpactl
- BYTE byWPAIE[MAX_WPA_IE_LEN];
- BYTE byRSNIE[MAX_WPA_IE_LEN];
- WORD wWPALen;
- WORD wRSNLen;
+ unsigned char byWPAIE[MAX_WPA_IE_LEN];
+ unsigned char byRSNIE[MAX_WPA_IE_LEN];
+ unsigned short wWPALen;
+ unsigned short wRSNLen;
// Clear count
- UINT uClearCount;
-// BYTE abyIEs[WLAN_BEACON_FR_MAXLEN];
- UINT uIELength;
+ unsigned int uClearCount;
+// unsigned char abyIEs[WLAN_BEACON_FR_MAXLEN];
+ unsigned int uIELength;
QWORD qwBSSTimestamp;
QWORD qwLocalTSF; // local TSF timer
@@ -161,7 +161,7 @@ typedef struct tagKnownBSS {
ERPObject sERP;
SRSNCapObject sRSNCapObj;
- BYTE abyIEs[1024]; // don't move this field !!
+ unsigned char abyIEs[1024]; // don't move this field !!
}__attribute__ ((__packed__))
KnownBSS , *PKnownBSS;
@@ -181,59 +181,59 @@ typedef enum tagNODE_STATE {
// STA node info
typedef struct tagKnownNodeDB {
// STA info
- BOOL bActive;
- BYTE abyMACAddr[WLAN_ADDR_LEN];
- BYTE abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
- BYTE abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
- WORD wTxDataRate;
- BOOL bShortPreamble;
- BOOL bERPExist;
- BOOL bShortSlotTime;
- UINT uInActiveCount;
- WORD wMaxBasicRate; //Get from byTopOFDMBasicRate or byTopCCKBasicRate which depends on packetTyp.
- WORD wMaxSuppRate; //Records the highest supported rate getting from SuppRates IE and ExtSuppRates IE in Beacon.
- WORD wSuppRate;
- BYTE byTopOFDMBasicRate;//Records the highest basic rate in OFDM mode
- BYTE byTopCCKBasicRate; //Records the highest basic rate in CCK mode
+ bool bActive;
+ unsigned char abyMACAddr[WLAN_ADDR_LEN];
+ unsigned char abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
+ unsigned char abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
+ unsigned short wTxDataRate;
+ bool bShortPreamble;
+ bool bERPExist;
+ bool bShortSlotTime;
+ unsigned int uInActiveCount;
+ unsigned short wMaxBasicRate; //Get from byTopOFDMBasicRate or byTopCCKBasicRate which depends on packetTyp.
+ unsigned short wMaxSuppRate; //Records the highest supported rate getting from SuppRates IE and ExtSuppRates IE in Beacon.
+ unsigned short wSuppRate;
+ unsigned char byTopOFDMBasicRate;//Records the highest basic rate in OFDM mode
+ unsigned char byTopCCKBasicRate; //Records the highest basic rate in CCK mode
// For AP mode
struct sk_buff_head sTxPSQueue;
- WORD wCapInfo;
- WORD wListenInterval;
- WORD wAID;
+ unsigned short wCapInfo;
+ unsigned short wListenInterval;
+ unsigned short wAID;
NODE_STATE eNodeState;
- BOOL bPSEnable;
- BOOL bRxPSPoll;
- BYTE byAuthSequence;
- ULONG ulLastRxJiffer;
- BYTE bySuppRate;
- DWORD dwFlags;
- WORD wEnQueueCnt;
-
- BOOL bOnFly;
- ULONGLONG KeyRSC;
- BYTE byKeyIndex;
- DWORD dwKeyIndex;
- BYTE byCipherSuite;
- DWORD dwTSC47_16;
- WORD wTSC15_0;
- UINT uWepKeyLength;
- BYTE abyWepKey[WLAN_WEPMAX_KEYLEN];
+ bool bPSEnable;
+ bool bRxPSPoll;
+ unsigned char byAuthSequence;
+ unsigned long ulLastRxJiffer;
+ unsigned char bySuppRate;
+ unsigned long dwFlags;
+ unsigned short wEnQueueCnt;
+
+ bool bOnFly;
+ unsigned long long KeyRSC;
+ unsigned char byKeyIndex;
+ unsigned long dwKeyIndex;
+ unsigned char byCipherSuite;
+ unsigned long dwTSC47_16;
+ unsigned short wTSC15_0;
+ unsigned int uWepKeyLength;
+ unsigned char abyWepKey[WLAN_WEPMAX_KEYLEN];
//
// Auto rate fallback vars
- BOOL bIsInFallback;
- UINT uAverageRSSI;
- UINT uRateRecoveryTimeout;
- UINT uRatePollTimeout;
- UINT uTxFailures;
- UINT uTxAttempts;
-
- UINT uTxRetry;
- UINT uFailureRatio;
- UINT uRetryRatio;
- UINT uTxOk[MAX_RATE+1];
- UINT uTxFail[MAX_RATE+1];
- UINT uTimeCount;
+ bool bIsInFallback;
+ unsigned int uAverageRSSI;
+ unsigned int uRateRecoveryTimeout;
+ unsigned int uRatePollTimeout;
+ unsigned int uTxFailures;
+ unsigned int uTxAttempts;
+
+ unsigned int uTxRetry;
+ unsigned int uFailureRatio;
+ unsigned int uRetryRatio;
+ unsigned int uTxOk[MAX_RATE+1];
+ unsigned int uTxFail[MAX_RATE+1];
+ unsigned int uTimeCount;
} KnownNodeDB, *PKnownNodeDB;
@@ -245,32 +245,32 @@ typedef struct tagKnownNodeDB {
PKnownBSS
BSSpSearchBSSList(
void *hDeviceContext,
- PBYTE pbyDesireBSSID,
- PBYTE pbyDesireSSID,
+ unsigned char *pbyDesireBSSID,
+ unsigned char *pbyDesireSSID,
CARD_PHY_TYPE ePhyType
);
PKnownBSS
BSSpAddrIsInBSSList(
void *hDeviceContext,
- PBYTE abyBSSID,
+ unsigned char *abyBSSID,
PWLAN_IE_SSID pSSID
);
void
BSSvClearBSSList(
void *hDeviceContext,
- BOOL bKeepCurrBSSID
+ bool bKeepCurrBSSID
);
-BOOL
+bool
BSSbInsertToBSSList(
void *hDeviceContext,
- PBYTE abyBSSIDAddr,
+ unsigned char *abyBSSIDAddr,
QWORD qwTimestamp,
- WORD wBeaconInterval,
- WORD wCapInfo,
- BYTE byCurrChannel,
+ unsigned short wBeaconInterval,
+ unsigned short wCapInfo,
+ unsigned char byCurrChannel,
PWLAN_IE_SSID pSSID,
PWLAN_IE_SUPP_RATES pSuppRates,
PWLAN_IE_SUPP_RATES pExtSuppRates,
@@ -279,20 +279,20 @@ BSSbInsertToBSSList(
PWLAN_IE_RSN_EXT pRSNWPA,
PWLAN_IE_COUNTRY pIE_Country,
PWLAN_IE_QUIET pIE_Quiet,
- UINT uIELength,
- PBYTE pbyIEs,
+ unsigned int uIELength,
+ unsigned char *pbyIEs,
void *pRxPacketContext
);
-BOOL
+bool
BSSbUpdateToBSSList(
void *hDeviceContext,
QWORD qwTimestamp,
- WORD wBeaconInterval,
- WORD wCapInfo,
- BYTE byCurrChannel,
- BOOL bChannelHit,
+ unsigned short wBeaconInterval,
+ unsigned short wCapInfo,
+ unsigned char byCurrChannel,
+ bool bChannelHit,
PWLAN_IE_SSID pSSID,
PWLAN_IE_SUPP_RATES pSuppRates,
PWLAN_IE_SUPP_RATES pExtSuppRates,
@@ -302,29 +302,23 @@ BSSbUpdateToBSSList(
PWLAN_IE_COUNTRY pIE_Country,
PWLAN_IE_QUIET pIE_Quiet,
PKnownBSS pBSSList,
- UINT uIELength,
- PBYTE pbyIEs,
+ unsigned int uIELength,
+ unsigned char *pbyIEs,
void *pRxPacketContext
);
-BOOL
-BSSDBbIsSTAInNodeDB(
- void *hDeviceContext,
- PBYTE abyDstAddr,
- PUINT puNodeIndex
- );
+bool
+BSSDBbIsSTAInNodeDB(void *hDeviceContext, unsigned char *abyDstAddr,
+ unsigned int *puNodeIndex);
void
-BSSvCreateOneNode(
- void *hDeviceContext,
- PUINT puNodeIndex
- );
+BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex);
void
BSSvUpdateAPNode(
void *hDeviceContext,
- PWORD pwCapInfo,
+ unsigned short *pwCapInfo,
PWLAN_IE_SUPP_RATES pItemRates,
PWLAN_IE_SUPP_RATES pExtSuppRates
);
@@ -339,16 +333,16 @@ BSSvSecondCallBack(
void
BSSvUpdateNodeTxCounter(
void *hDeviceContext,
- BYTE byTsr0,
- BYTE byTsr1,
- PBYTE pbyBuffer,
- UINT uFIFOHeaderSize
+ unsigned char byTsr0,
+ unsigned char byTsr1,
+ unsigned char *pbyBuffer,
+ unsigned int uFIFOHeaderSize
);
void
BSSvRemoveOneNode(
void *hDeviceContext,
- UINT uNodeIndex
+ unsigned int uNodeIndex
);
void
@@ -360,7 +354,7 @@ BSSvAddMulticastNode(
void
BSSvClearNodeDBTable(
void *hDeviceContext,
- UINT uStartIndex
+ unsigned int uStartIndex
);
void
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 7bc2d7654b0..32d095c4d51 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -56,6 +56,7 @@
#include "key.h"
#include "rc4.h"
#include "country.h"
+#include "channel.h"
/*--------------------- Static Definitions -------------------------*/
@@ -76,411 +77,39 @@ static int msglevel =MSG_LEVEL_INFO;
#define C_CWMAX 1023 // slot time
-#define CARD_MAX_CHANNEL_TBL 56
-
#define WAIT_BEACON_TX_DOWN_TMO 3 // Times
-typedef struct tagSChannelTblElement {
- BYTE byChannelNumber;
- UINT uFrequency;
- BOOL bValid;
- BYTE byMAP;
-}SChannelTblElement, *PSChannelTblElement;
-
//1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M
-static BYTE abyDefaultSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
+static unsigned char abyDefaultSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
//6M, 9M, 12M, 48M
-static BYTE abyDefaultExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
+static unsigned char abyDefaultExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
//6M, 9M, 12M, 18M, 24M, 36M, 48M, 54M
-static BYTE abyDefaultSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
+static unsigned char abyDefaultSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
//1M, 2M, 5M, 11M,
-static BYTE abyDefaultSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
-
-
+static unsigned char abyDefaultSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
-/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
-const WORD cwRXBCNTSFOff[MAX_RATE] =
+const unsigned short cwRXBCNTSFOff[MAX_RATE] =
{17, 17, 17, 17, 34, 23, 17, 11, 8, 5, 4, 3};
-static SChannelTblElement sChannelTbl[CARD_MAX_CHANNEL_TBL+1] =
-{
- {0, 0, FALSE, 0},
- {1, 2412, TRUE, 0},
- {2, 2417, TRUE, 0},
- {3, 2422, TRUE, 0},
- {4, 2427, TRUE, 0},
- {5, 2432, TRUE, 0},
- {6, 2437, TRUE, 0},
- {7, 2442, TRUE, 0},
- {8, 2447, TRUE, 0},
- {9, 2452, TRUE, 0},
- {10, 2457, TRUE, 0},
- {11, 2462, TRUE, 0},
- {12, 2467, TRUE, 0},
- {13, 2472, TRUE, 0},
- {14, 2484, TRUE, 0},
- {183, 4915, TRUE, 0},
- {184, 4920, TRUE, 0},
- {185, 4925, TRUE, 0},
- {187, 4935, TRUE, 0},
- {188, 4940, TRUE, 0},
- {189, 4945, TRUE, 0},
- {192, 4960, TRUE, 0},
- {196, 4980, TRUE, 0},
- {7, 5035, TRUE, 0},
- {8, 5040, TRUE, 0},
- {9, 5045, TRUE, 0},
- {11, 5055, TRUE, 0},
- {12, 5060, TRUE, 0},
- {16, 5080, TRUE, 0},
- {34, 5170, TRUE, 0},
- {36, 5180, TRUE, 0},
- {38, 5190, TRUE, 0},
- {40, 5200, TRUE, 0},
- {42, 5210, TRUE, 0},
- {44, 5220, TRUE, 0},
- {46, 5230, TRUE, 0},
- {48, 5240, TRUE, 0},
- {52, 5260, TRUE, 0},
- {56, 5280, TRUE, 0},
- {60, 5300, TRUE, 0},
- {64, 5320, TRUE, 0},
- {100, 5500, TRUE, 0},
- {104, 5520, TRUE, 0},
- {108, 5540, TRUE, 0},
- {112, 5560, TRUE, 0},
- {116, 5580, TRUE, 0},
- {120, 5600, TRUE, 0},
- {124, 5620, TRUE, 0},
- {128, 5640, TRUE, 0},
- {132, 5660, TRUE, 0},
- {136, 5680, TRUE, 0},
- {140, 5700, TRUE, 0},
- {149, 5745, TRUE, 0},
- {153, 5765, TRUE, 0},
- {157, 5785, TRUE, 0},
- {161, 5805, TRUE, 0},
- {165, 5825, TRUE, 0}
-};
-
-
-/************************************************************************
- * The Radar regulation rules for each country
- ************************************************************************/
-SCountryTable ChannelRuleTab[CCODE_MAX+1] =
-{
-/************************************************************************
- * This table is based on Athero driver rules
- ************************************************************************/
-/* Country Available channels, ended with 0 */
-/* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 */
-{CCODE_FCC, {'U','S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_TELEC, {'J','P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 23, 0, 0, 23, 0, 23, 23, 0, 23, 0, 0, 23, 23, 23, 0, 23, 0, 23, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_ETSI, {'E','U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_RESV3, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_RESV4, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_RESV5, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_RESV6, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_RESV7, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_RESV8, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_RESV9, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_RESVa, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_RESVb, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_RESVc, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_RESVd, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_RESVe, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_ALLBAND, {' ',' '}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_ALBANIA, {'A','L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_ALGERIA, {'D','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_ARGENTINA, {'A','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 0} },
-{CCODE_ARMENIA, {'A','M'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_AUSTRALIA, {'A','U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_AUSTRIA, {'A','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 15, 0, 15, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_AZERBAIJAN, {'A','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_BAHRAIN, {'B','H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_BELARUS, {'B','Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_BELGIUM, {'B','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_BELIZE, {'B','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_BOLIVIA, {'B','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_BRAZIL, {'B','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_BRUNEI_DARUSSALAM, {'B','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_BULGARIA, {'B','G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 0, 0, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0} },
-{CCODE_CANADA, {'C','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_CHILE, {'C','L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 17, 17} },
-{CCODE_CHINA, {'C','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_COLOMBIA, {'C','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_COSTA_RICA, {'C','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_CROATIA, {'H','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_CYPRUS, {'C','Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_CZECH, {'C','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_DENMARK, {'D','K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_DOMINICAN_REPUBLIC, {'D','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_ECUADOR, {'E','C'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_EGYPT, {'E','G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_EL_SALVADOR, {'S','V'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_ESTONIA, {'E','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_FINLAND, {'F','I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_FRANCE, {'F','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_GERMANY, {'D','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_GREECE, {'G','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_GEORGIA, {'G','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_GUATEMALA, {'G','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_HONDURAS, {'H','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_HONG_KONG, {'H','K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_HUNGARY, {'H','U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_ICELAND, {'I','S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_INDIA, {'I','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_INDONESIA, {'I','D'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_IRAN, {'I','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_IRELAND, {'I','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_ITALY, {'I','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_ISRAEL, {'I','L'}, { 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_JAPAN, {'J','P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_JORDAN, {'J','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_KAZAKHSTAN, {'K','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_KUWAIT, {'K','W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_LATVIA, {'L','V'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_LEBANON, {'L','B'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_LEICHTENSTEIN, {'L','I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_LITHUANIA, {'L','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_LUXEMBURG, {'L','U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_MACAU, {'M','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_MACEDONIA, {'M','K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_MALTA, {'M','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} },
-{CCODE_MALAYSIA, {'M','Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_MEXICO, {'M','X'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_MONACO, {'M','C'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_MOROCCO, {'M','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_NETHERLANDS, {'N','L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_NEW_ZEALAND, {'N','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_NORTH_KOREA, {'K','P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
-{CCODE_NORWAY, {'N','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_OMAN, {'O','M'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_PAKISTAN, {'P','K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_PANAMA, {'P','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_PERU, {'P','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_PHILIPPINES, {'P','H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_POLAND, {'P','L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_PORTUGAL, {'P','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_PUERTO_RICO, {'P','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_QATAR, {'Q','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_ROMANIA, {'R','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_RUSSIA, {'R','U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_SAUDI_ARABIA, {'S','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_SINGAPORE, {'S','G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 20, 20, 20, 20} },
-{CCODE_SLOVAKIA, {'S','K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} },
-{CCODE_SLOVENIA, {'S','I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_SOUTH_AFRICA, {'Z','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_SOUTH_KOREA, {'K','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
-{CCODE_SPAIN, {'E','S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} },
-{CCODE_SWEDEN, {'S','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_SWITZERLAND, {'C','H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_SYRIA, {'S','Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_TAIWAN, {'T','W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 0} },
-{CCODE_THAILAND, {'T','H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
-{CCODE_TRINIDAD_TOBAGO, {'T','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_TUNISIA, {'T','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_TURKEY, {'T','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_UK, {'G','B'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
-{CCODE_UKRAINE, {'U','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_UNITED_ARAB_EMIRATES, {'A','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_UNITED_STATES, {'U','S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
-{CCODE_URUGUAY, {'U','Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
-{CCODE_UZBEKISTAN, {'U','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_VENEZUELA, {'V','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
-{CCODE_VIETNAM, {'V','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_YEMEN, {'Y','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_ZIMBABWE, {'Z','W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_JAPAN_W52_W53, {'J','J'}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-{CCODE_MAX, {'U','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }
-/* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 */
-};
-
/*--------------------- Static Functions --------------------------*/
static
void
s_vCaculateOFDMRParameter(
- BYTE byRate,
+ unsigned char byRate,
CARD_PHY_TYPE ePHYType,
- PBYTE pbyTxRate,
- PBYTE pbyRsvTime
+ unsigned char *pbyTxRate,
+ unsigned char *pbyRsvTime
);
-/*--------------------- Export Variables --------------------------*/
-
/*--------------------- Export Functions --------------------------*/
-
-/*--------------------- Export function -------------------------*/
-/************************************************************************
- * Country Channel Valid
- * Input: CountryCode, ChannelNum
- * ChanneIndex is defined as VT3253 MAC channel:
- * 1 = 2.4G channel 1
- * 2 = 2.4G channel 2
- * ...
- * 14 = 2.4G channel 14
- * 15 = 4.9G channel 183
- * 16 = 4.9G channel 184
- * .....
- * Output: TRUE if the specified 5GHz band is allowed to be used.
- False otherwise.
-// 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22)
-
-// 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
-// 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
- ************************************************************************/
-//2008-8-4 <add> by chester
-BOOL
-ChannelValid(UINT CountryCode, UINT ChannelIndex)
-{
- BOOL bValid;
-
- bValid = FALSE;
- /*
- * If Channel Index is invalid, return invalid
- */
- if ((ChannelIndex > CB_MAX_CHANNEL) ||
- (ChannelIndex == 0))
- {
- bValid = FALSE;
- goto exit;
- }
-
- bValid = sChannelTbl[ChannelIndex].bValid;
-
-exit:
- return (bValid);
-
-} /* end ChannelValid */
-
-
/*
* Description: Caculate TxRate and RsvTime fields for RSPINF in OFDM mode.
*
@@ -498,10 +127,10 @@ exit:
static
void
s_vCaculateOFDMRParameter (
- BYTE byRate,
+ unsigned char byRate,
CARD_PHY_TYPE ePHYType,
- PBYTE pbyTxRate,
- PBYTE pbyRsvTime
+ unsigned char *pbyTxRate,
+ unsigned char *pbyRsvTime
)
{
switch (byRate) {
@@ -614,9 +243,9 @@ static
void
s_vSetRSPINF (PSDevice pDevice, CARD_PHY_TYPE ePHYType, void *pvSupportRateIEs, void *pvExtSupportRateIEs)
{
- BYTE byServ = 0, bySignal = 0; // For CCK
- WORD wLen = 0;
- BYTE byTxRate = 0, byRsvTime = 0; // For OFDM
+ unsigned char byServ = 0, bySignal = 0; // For CCK
+ unsigned short wLen = 0;
+ unsigned char byTxRate = 0, byRsvTime = 0; // For OFDM
//Set to Page1
MACvSelectPage1(pDevice->PortOffset);
@@ -722,120 +351,7 @@ s_vSetRSPINF (PSDevice pDevice, CARD_PHY_TYPE ePHYType, void *pvSupportRateIEs,
MACvSelectPage0(pDevice->PortOffset);
}
-
-
-
-/*--------------------- Export Variables --------------------------*/
-
/*--------------------- Export Functions --------------------------*/
-BYTE CARDbyGetChannelMapping (void *pDeviceHandler, BYTE byChannelNumber, CARD_PHY_TYPE ePhyType)
-{
- UINT ii;
-
- if ((ePhyType == PHY_TYPE_11B) || (ePhyType == PHY_TYPE_11G)) {
- return (byChannelNumber);
- }
-
- for(ii = (CB_MAX_CHANNEL_24G + 1); ii <= CB_MAX_CHANNEL; ) {
- if (sChannelTbl[ii].byChannelNumber == byChannelNumber) {
- return ((BYTE) ii);
- }
- ii++;
- }
- return (0);
-}
-
-
-BYTE CARDbyGetChannelNumber (void *pDeviceHandler, BYTE byChannelIndex)
-{
-// PSDevice pDevice = (PSDevice) pDeviceHandler;
- return(sChannelTbl[byChannelIndex].byChannelNumber);
-}
-
-/*
- * Description: Set NIC media channel
- *
- * Parameters:
- * In:
- * pDeviceHandler - The adapter to be set
- * uConnectionChannel - Channel to be set
- * Out:
- * none
- *
- * Return Value: TRUE if succeeded; FALSE if failed.
- *
- */
-BOOL CARDbSetChannel (void *pDeviceHandler, UINT uConnectionChannel)
-{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- BOOL bResult = TRUE;
-
-
- if (pDevice->byCurrentCh == uConnectionChannel) {
- return bResult;
- }
-
- if (sChannelTbl[uConnectionChannel].bValid == FALSE) {
- return (FALSE);
- }
-
- if ((uConnectionChannel > CB_MAX_CHANNEL_24G) &&
- (pDevice->eCurrentPHYType != PHY_TYPE_11A)) {
- CARDbSetPhyParameter(pDevice, PHY_TYPE_11A, 0, 0, NULL, NULL);
- } else if ((uConnectionChannel <= CB_MAX_CHANNEL_24G) &&
- (pDevice->eCurrentPHYType == PHY_TYPE_11A)) {
- CARDbSetPhyParameter(pDevice, PHY_TYPE_11G, 0, 0, NULL, NULL);
- }
- // clear NAV
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
-
- //{{ RobertYu: 20041202
- //// TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput
-
- if ( pDevice->byRFType == RF_AIROHA7230 )
- {
- RFbAL7230SelectChannelPostProcess(pDevice->PortOffset, pDevice->byCurrentCh, (BYTE)uConnectionChannel);
- }
- //}} RobertYu
-
-
- pDevice->byCurrentCh = (BYTE)uConnectionChannel;
- bResult &= RFbSelectChannel(pDevice->PortOffset, pDevice->byRFType, (BYTE)uConnectionChannel);
-
- // Init Synthesizer Table
- if (pDevice->bEnablePSMode == TRUE)
- RFvWriteWakeProgSyn(pDevice->PortOffset, pDevice->byRFType, uConnectionChannel);
-
-
- //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"CARDbSetMediaChannel: %d\n", (BYTE)uConnectionChannel);
- BBvSoftwareReset(pDevice->PortOffset);
-
- if (pDevice->byLocalID > REV_ID_VT3253_B1) {
- // set HW default power register
- MACvSelectPage1(pDevice->PortOffset);
- RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh);
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWRCCK, pDevice->byCurPwr);
- RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh);
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWROFDM, pDevice->byCurPwr);
- MACvSelectPage0(pDevice->PortOffset);
- }
-
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
-#ifdef PLICE_DEBUG
- //printk("Func:CARDbSetChannel:call RFbSetPower:11B\n");
-#endif
- RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh);
- } else {
-#ifdef PLICE_DEBUG
- //printk("Func:CARDbSetChannel:call RFbSetPower\n");
-#endif
- RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh);
- }
-
- return(bResult);
-}
-
-
/*
* Description: Card Send packet function
@@ -849,11 +365,11 @@ BOOL CARDbSetChannel (void *pDeviceHandler, UINT uConnectionChannel)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
/*
-BOOL CARDbSendPacket (void *pDeviceHandler, void *pPacket, CARD_PKT_TYPE ePktType, UINT uLength)
+bool CARDbSendPacket (void *pDeviceHandler, void *pPacket, CARD_PKT_TYPE ePktType, unsigned int uLength)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
if (ePktType == PKT_TYPE_802_11_MNG) {
@@ -864,7 +380,7 @@ BOOL CARDbSendPacket (void *pDeviceHandler, void *pPacket, CARD_PKT_TYPE ePktTyp
return TXbTD1Send(pDevice, pPacket, uLength);
}
- return (TRUE);
+ return (true);
}
*/
@@ -878,16 +394,16 @@ BOOL CARDbSendPacket (void *pDeviceHandler, void *pPacket, CARD_PKT_TYPE ePktTyp
* Out:
* none
*
- * Return Value: TRUE if short preamble; otherwise FALSE
+ * Return Value: true if short preamble; otherwise false
*
*/
-BOOL CARDbIsShortPreamble (void *pDeviceHandler)
+bool CARDbIsShortPreamble (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
if (pDevice->byPreambleType == 0) {
- return(FALSE);
+ return(false);
}
- return(TRUE);
+ return(true);
}
/*
@@ -899,10 +415,10 @@ BOOL CARDbIsShortPreamble (void *pDeviceHandler)
* Out:
* none
*
- * Return Value: TRUE if short slot time; otherwise FALSE
+ * Return Value: true if short slot time; otherwise false
*
*/
-BOOL CARDbIsShorSlotTime (void *pDeviceHandler)
+bool CARDbIsShorSlotTime (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
return(pDevice->bShortSlotTime);
@@ -921,14 +437,14 @@ BOOL CARDbIsShorSlotTime (void *pDeviceHandler)
* Return Value: None.
*
*/
-BOOL CARDbSetPhyParameter (void *pDeviceHandler, CARD_PHY_TYPE ePHYType, WORD wCapInfo, BYTE byERPField, void *pvSupportRateIEs, void *pvExtSupportRateIEs)
+bool CARDbSetPhyParameter (void *pDeviceHandler, CARD_PHY_TYPE ePHYType, unsigned short wCapInfo, unsigned char byERPField, void *pvSupportRateIEs, void *pvExtSupportRateIEs)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- BYTE byCWMaxMin = 0;
- BYTE bySlot = 0;
- BYTE bySIFS = 0;
- BYTE byDIFS = 0;
- BYTE byData;
+ unsigned char byCWMaxMin = 0;
+ unsigned char bySlot = 0;
+ unsigned char bySIFS = 0;
+ unsigned char byDIFS = 0;
+ unsigned char byData;
// PWLAN_IE_SUPP_RATES pRates = NULL;
PWLAN_IE_SUPP_RATES pSupportRates = (PWLAN_IE_SUPP_RATES) pvSupportRateIEs;
PWLAN_IE_SUPP_RATES pExtSupportRates = (PWLAN_IE_SUPP_RATES) pvExtSupportRateIEs;
@@ -1071,9 +587,9 @@ BOOL CARDbSetPhyParameter (void *pDeviceHandler, CARD_PHY_TYPE ePHYType, WORD wC
pDevice->bySlot = bySlot;
VNSvOutPortB(pDevice->PortOffset + MAC_REG_SLOT, pDevice->bySlot);
if (pDevice->bySlot == C_SLOT_SHORT) {
- pDevice->bShortSlotTime = TRUE;
+ pDevice->bShortSlotTime = true;
} else {
- pDevice->bShortSlotTime = FALSE;
+ pDevice->bShortSlotTime = false;
}
BBvSetShortSlotTime(pDevice);
}
@@ -1089,7 +605,7 @@ BOOL CARDbSetPhyParameter (void *pDeviceHandler, CARD_PHY_TYPE ePHYType, WORD wC
s_vSetRSPINF(pDevice, ePHYType, pSupportRates, pExtSupportRates);
pDevice->eCurrentPHYType = ePHYType;
// set for NDIS OID_802_11SUPPORTED_RATES
- return (TRUE);
+ return (true);
}
/*
@@ -1108,7 +624,7 @@ BOOL CARDbSetPhyParameter (void *pDeviceHandler, CARD_PHY_TYPE ePHYType, WORD wC
* Return Value: none
*
*/
-BOOL CARDbUpdateTSF (void *pDeviceHandler, BYTE byRxRate, QWORD qwBSSTimestamp, QWORD qwLocalTSF)
+bool CARDbUpdateTSF (void *pDeviceHandler, unsigned char byRxRate, QWORD qwBSSTimestamp, QWORD qwLocalTSF)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
QWORD qwTSFOffset;
@@ -1125,7 +641,7 @@ BOOL CARDbUpdateTSF (void *pDeviceHandler, BYTE byRxRate, QWORD qwBSSTimestamp,
VNSvOutPortD(pDevice->PortOffset + MAC_REG_TSFOFST + 4, HIDWORD(qwTSFOffset));
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_TSFSYNCEN);
}
- return(TRUE);
+ return(true);
}
@@ -1140,16 +656,16 @@ BOOL CARDbUpdateTSF (void *pDeviceHandler, BYTE byRxRate, QWORD qwBSSTimestamp,
* Out:
* none
*
- * Return Value: TRUE if succeed; otherwise FALSE
+ * Return Value: true if succeed; otherwise false
*
*/
-BOOL CARDbSetBeaconPeriod (void *pDeviceHandler, WORD wBeaconInterval)
+bool CARDbSetBeaconPeriod (void *pDeviceHandler, unsigned short wBeaconInterval)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT uBeaconInterval = 0;
- UINT uLowNextTBTT = 0;
- UINT uHighRemain = 0;
- UINT uLowRemain = 0;
+ unsigned int uBeaconInterval = 0;
+ unsigned int uLowNextTBTT = 0;
+ unsigned int uHighRemain = 0;
+ unsigned int uLowRemain = 0;
QWORD qwNextTBTT;
HIDWORD(qwNextTBTT) = 0;
@@ -1179,7 +695,7 @@ BOOL CARDbSetBeaconPeriod (void *pDeviceHandler, WORD wBeaconInterval)
VNSvOutPortD(pDevice->PortOffset + MAC_REG_NEXTTBTT + 4, HIDWORD(qwNextTBTT));
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
- return(TRUE);
+ return(true);
}
@@ -1194,51 +710,51 @@ BOOL CARDbSetBeaconPeriod (void *pDeviceHandler, WORD wBeaconInterval)
* Out:
* none
*
- * Return Value: TRUE if all data packet complete; otherwise FALSE.
+ * Return Value: true if all data packet complete; otherwise false.
*
*/
-BOOL CARDbStopTxPacket (void *pDeviceHandler, CARD_PKT_TYPE ePktType)
+bool CARDbStopTxPacket (void *pDeviceHandler, CARD_PKT_TYPE ePktType)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
if (ePktType == PKT_TYPE_802_11_ALL) {
- pDevice->bStopBeacon = TRUE;
- pDevice->bStopTx0Pkt = TRUE;
- pDevice->bStopDataPkt = TRUE;
+ pDevice->bStopBeacon = true;
+ pDevice->bStopTx0Pkt = true;
+ pDevice->bStopDataPkt = true;
} else if (ePktType == PKT_TYPE_802_11_BCN) {
- pDevice->bStopBeacon = TRUE;
+ pDevice->bStopBeacon = true;
} else if (ePktType == PKT_TYPE_802_11_MNG) {
- pDevice->bStopTx0Pkt = TRUE;
+ pDevice->bStopTx0Pkt = true;
} else if (ePktType == PKT_TYPE_802_11_DATA) {
- pDevice->bStopDataPkt = TRUE;
+ pDevice->bStopDataPkt = true;
}
- if (pDevice->bStopBeacon == TRUE) {
- if (pDevice->bIsBeaconBufReadySet == TRUE) {
+ if (pDevice->bStopBeacon == true) {
+ if (pDevice->bIsBeaconBufReadySet == true) {
if (pDevice->cbBeaconBufReadySetCnt < WAIT_BEACON_TX_DOWN_TMO) {
pDevice->cbBeaconBufReadySetCnt ++;
- return(FALSE);
+ return(false);
}
}
- pDevice->bIsBeaconBufReadySet = FALSE;
+ pDevice->bIsBeaconBufReadySet = false;
pDevice->cbBeaconBufReadySetCnt = 0;
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
}
// wait all TD0 complete
- if (pDevice->bStopTx0Pkt == TRUE) {
+ if (pDevice->bStopTx0Pkt == true) {
if (pDevice->iTDUsed[TYPE_TXDMA0] != 0){
- return(FALSE);
+ return(false);
}
}
// wait all Data TD complete
- if (pDevice->bStopDataPkt == TRUE) {
+ if (pDevice->bStopDataPkt == true) {
if (pDevice->iTDUsed[TYPE_AC0DMA] != 0){
- return(FALSE);
+ return(false);
}
}
- return(TRUE);
+ return(true);
}
@@ -1252,33 +768,33 @@ BOOL CARDbStopTxPacket (void *pDeviceHandler, CARD_PKT_TYPE ePktType)
* Out:
* none
*
- * Return Value: TRUE if success; FALSE if failed.
+ * Return Value: true if success; false if failed.
*
*/
-BOOL CARDbStartTxPacket (void *pDeviceHandler, CARD_PKT_TYPE ePktType)
+bool CARDbStartTxPacket (void *pDeviceHandler, CARD_PKT_TYPE ePktType)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
if (ePktType == PKT_TYPE_802_11_ALL) {
- pDevice->bStopBeacon = FALSE;
- pDevice->bStopTx0Pkt = FALSE;
- pDevice->bStopDataPkt = FALSE;
+ pDevice->bStopBeacon = false;
+ pDevice->bStopTx0Pkt = false;
+ pDevice->bStopDataPkt = false;
} else if (ePktType == PKT_TYPE_802_11_BCN) {
- pDevice->bStopBeacon = FALSE;
+ pDevice->bStopBeacon = false;
} else if (ePktType == PKT_TYPE_802_11_MNG) {
- pDevice->bStopTx0Pkt = FALSE;
+ pDevice->bStopTx0Pkt = false;
} else if (ePktType == PKT_TYPE_802_11_DATA) {
- pDevice->bStopDataPkt = FALSE;
+ pDevice->bStopDataPkt = false;
}
- if ((pDevice->bStopBeacon == FALSE) &&
- (pDevice->bBeaconBufReady == TRUE) &&
+ if ((pDevice->bStopBeacon == false) &&
+ (pDevice->bBeaconBufReady == true) &&
(pDevice->eOPMode == OP_MODE_ADHOC)) {
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
}
- return(TRUE);
+ return(true);
}
@@ -1294,10 +810,10 @@ BOOL CARDbStartTxPacket (void *pDeviceHandler, CARD_PKT_TYPE ePktType)
* Out:
* none
*
- * Return Value: TRUE if success; FALSE if failed.
+ * Return Value: true if success; false if failed.
*
*/
-BOOL CARDbSetBSSID(void *pDeviceHandler, PBYTE pbyBSSID, CARD_OP_MODE eOPMode)
+bool CARDbSetBSSID(void *pDeviceHandler, unsigned char *pbyBSSID, CARD_OP_MODE eOPMode)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -1315,20 +831,20 @@ BOOL CARDbSetBSSID(void *pDeviceHandler, PBYTE pbyBSSID, CARD_OP_MODE eOPMode)
}
if (eOPMode == OP_MODE_UNKNOWN) {
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_RCR, RCR_BSSID);
- pDevice->bBSSIDFilter = FALSE;
+ pDevice->bBSSIDFilter = false;
pDevice->byRxMode &= ~RCR_BSSID;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wcmd: rx_mode = %x\n", pDevice->byRxMode );
} else {
- if (IS_NULL_ADDRESS(pDevice->abyBSSID) == FALSE) {
+ if (is_zero_ether_addr(pDevice->abyBSSID) == false) {
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_RCR, RCR_BSSID);
- pDevice->bBSSIDFilter = TRUE;
+ pDevice->bBSSIDFilter = true;
pDevice->byRxMode |= RCR_BSSID;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wmgr: rx_mode = %x\n", pDevice->byRxMode );
}
// Adopt BSS state in Adapter Device Object
pDevice->eOPMode = eOPMode;
- return(TRUE);
+ return(true);
}
@@ -1342,7 +858,7 @@ BOOL CARDbSetBSSID(void *pDeviceHandler, PBYTE pbyBSSID, CARD_OP_MODE eOPMode)
* Out:
* none
*
- * Return Value: TRUE if success; FALSE if failed.
+ * Return Value: true if success; false if failed.
*
*/
@@ -1363,18 +879,18 @@ BOOL CARDbSetBSSID(void *pDeviceHandler, PBYTE pbyBSSID, CARD_OP_MODE eOPMode)
* Out:
* none
*
- * Return Value: TRUE if succeed; otherwise FALSE
+ * Return Value: true if succeed; otherwise false
*
*/
-BOOL CARDbSetTxDataRate(
+bool CARDbSetTxDataRate(
void *pDeviceHandler,
- WORD wDataRate
+ unsigned short wDataRate
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
pDevice->wCurrentRate = wDataRate;
- return(TRUE);
+ return(true);
}
/*+
@@ -1388,20 +904,20 @@ BOOL CARDbSetTxDataRate(
* Out:
* none
*
- * Return Value: TRUE if power down success; otherwise FALSE
+ * Return Value: true if power down success; otherwise false
*
-*/
-BOOL
+bool
CARDbPowerDown(
void *pDeviceHandler
)
{
PSDevice pDevice = (PSDevice)pDeviceHandler;
- UINT uIdx;
+ unsigned int uIdx;
// check if already in Doze mode
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS))
- return TRUE;
+ return true;
// Froce PSEN on
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
@@ -1410,12 +926,12 @@ CARDbPowerDown(
for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx ++) {
if (pDevice->iTDUsed[uIdx] != 0)
- return FALSE;
+ return false;
}
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_GO2DOZE);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Go to Doze ZZZZZZZZZZZZZZZ\n");
- return TRUE;
+ return true;
}
/*
@@ -1427,16 +943,16 @@ CARDbPowerDown(
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL CARDbRadioPowerOff (void *pDeviceHandler)
+bool CARDbRadioPowerOff (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- BOOL bResult = TRUE;
+ bool bResult = true;
- if (pDevice->bRadioOff == TRUE)
- return TRUE;
+ if (pDevice->bRadioOff == true)
+ return true;
switch (pDevice->byRFType) {
@@ -1459,7 +975,7 @@ BOOL CARDbRadioPowerOff (void *pDeviceHandler)
BBvSetDeepSleep(pDevice->PortOffset, pDevice->byLocalID);
- pDevice->bRadioOff = TRUE;
+ pDevice->bRadioOff = true;
//2007-0409-03,<Add> by chester
printk("chester power off\n");
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); //LED issue
@@ -1476,23 +992,23 @@ MACvRegBitsOn(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); //LED issue
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL CARDbRadioPowerOn (void *pDeviceHandler)
+bool CARDbRadioPowerOn (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- BOOL bResult = TRUE;
+ bool bResult = true;
printk("chester power on\n");
- if (pDevice->bRadioControlOff == TRUE){
-if (pDevice->bHWRadioOff == TRUE) printk("chester bHWRadioOff\n");
-if (pDevice->bRadioControlOff == TRUE) printk("chester bRadioControlOff\n");
- return FALSE;}
+ if (pDevice->bRadioControlOff == true){
+if (pDevice->bHWRadioOff == true) printk("chester bHWRadioOff\n");
+if (pDevice->bRadioControlOff == true) printk("chester bRadioControlOff\n");
+ return false;}
- if (pDevice->bRadioOff == FALSE)
+ if (pDevice->bRadioOff == false)
{
printk("chester pbRadioOff\n");
-return TRUE;}
+return true;}
BBvExitDeepSleep(pDevice->PortOffset, pDevice->byLocalID);
@@ -1514,7 +1030,7 @@ return TRUE;}
}
- pDevice->bRadioOff = FALSE;
+ pDevice->bRadioOff = false;
// 2007-0409-03,<Add> by chester
printk("chester power on\n");
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); //LED issue
@@ -1523,12 +1039,12 @@ MACvRegBitsOff(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); //LED issue
-BOOL CARDbRemoveKey (void *pDeviceHandler, PBYTE pbyBSSID)
+bool CARDbRemoveKey (void *pDeviceHandler, unsigned char *pbyBSSID)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
KeybRemoveAllKey(&(pDevice->sKey), pbyBSSID, pDevice->PortOffset);
- return (TRUE);
+ return (true);
}
@@ -1548,17 +1064,17 @@ BOOL CARDbRemoveKey (void *pDeviceHandler, PBYTE pbyBSSID)
* Return Value: none.
*
-*/
-BOOL
+bool
CARDbAdd_PMKID_Candidate (
void *pDeviceHandler,
- PBYTE pbyBSSID,
- BOOL bRSNCapExist,
- WORD wRSNCap
+ unsigned char *pbyBSSID,
+ bool bRSNCapExist,
+ unsigned short wRSNCap
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
PPMKID_CANDIDATE pCandidateList;
- UINT ii = 0;
+ unsigned int ii = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"bAdd_PMKID_Candidate START: (%d)\n", (int)pDevice->gsPMKIDCandidate.NumCandidates);
@@ -1577,18 +1093,18 @@ CARDbAdd_PMKID_Candidate (
for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
if ( !memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
- if ((bRSNCapExist == TRUE) && (wRSNCap & BIT0)) {
+ if ((bRSNCapExist == true) && (wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
}
- return TRUE;
+ return true;
}
}
// New Candidate
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[pDevice->gsPMKIDCandidate.NumCandidates];
- if ((bRSNCapExist == TRUE) && (wRSNCap & BIT0)) {
+ if ((bRSNCapExist == true) && (wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
@@ -1596,7 +1112,7 @@ CARDbAdd_PMKID_Candidate (
memcpy(pCandidateList->BSSID, pbyBSSID, ETH_ALEN);
pDevice->gsPMKIDCandidate.NumCandidates++;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"NumCandidates:%d\n", (int)pDevice->gsPMKIDCandidate.NumCandidates);
- return TRUE;
+ return true;
}
void *
@@ -1609,89 +1125,6 @@ CARDpGetCurrentAddress (
return (pDevice->abyCurrentNetAddr);
}
-
-
-void CARDvInitChannelTable (void *pDeviceHandler)
-{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- BOOL bMultiBand = FALSE;
- UINT ii;
-
- for(ii=1;ii<=CARD_MAX_CHANNEL_TBL;ii++) {
- sChannelTbl[ii].bValid = FALSE;
- }
-
- switch (pDevice->byRFType) {
- case RF_RFMD2959 :
- case RF_AIROHA :
- case RF_AL2230S:
- case RF_UW2451 :
- case RF_VT3226 :
- // printk("chester-false\n");
- bMultiBand = FALSE;
- break;
- case RF_AIROHA7230 :
- case RF_UW2452 :
- case RF_NOTHING :
- default :
- bMultiBand = TRUE;
- break;
- }
-
- if ((pDevice->dwDiagRefCount != 0) ||
- (pDevice->b11hEnable == TRUE)) {
- if (bMultiBand == TRUE) {
- for(ii=0;ii<CARD_MAX_CHANNEL_TBL;ii++) {
- sChannelTbl[ii+1].bValid = TRUE;
- pDevice->abyRegPwr[ii+1] = pDevice->abyOFDMDefaultPwr[ii+1];
- pDevice->abyLocalPwr[ii+1] = pDevice->abyOFDMDefaultPwr[ii+1];
- }
- for(ii=0;ii<CHANNEL_MAX_24G;ii++) {
- pDevice->abyRegPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
- pDevice->abyLocalPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
- }
- } else {
- for(ii=0;ii<CHANNEL_MAX_24G;ii++) {
-//2008-8-4 <add> by chester
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- sChannelTbl[ii+1].bValid = TRUE;
- pDevice->abyRegPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
- pDevice->abyLocalPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
- }
- }
- }
- } else if (pDevice->byZoneType <= CCODE_MAX) {
- if (bMultiBand == TRUE) {
- for(ii=0;ii<CARD_MAX_CHANNEL_TBL;ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- sChannelTbl[ii+1].bValid = TRUE;
- pDevice->abyRegPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- pDevice->abyLocalPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- }
- }
- } else {
- for(ii=0;ii<CHANNEL_MAX_24G;ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- sChannelTbl[ii+1].bValid = TRUE;
- pDevice->abyRegPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- pDevice->abyLocalPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- }
- }
- }
- }
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO"Zone=[%d][%c][%c]!!\n",pDevice->byZoneType,ChannelRuleTab[pDevice->byZoneType].chCountryCode[0],ChannelRuleTab[pDevice->byZoneType].chCountryCode[1]);
- for(ii=0;ii<CARD_MAX_CHANNEL_TBL;ii++) {
- if (pDevice->abyRegPwr[ii+1] == 0) {
- pDevice->abyRegPwr[ii+1] = pDevice->abyOFDMDefaultPwr[ii+1];
- }
- if (pDevice->abyLocalPwr[ii+1] == 0) {
- pDevice->abyLocalPwr[ii+1] = pDevice->abyOFDMDefaultPwr[ii+1];
- }
- }
-}
-
-
-
/*
*
* Description:
@@ -1706,27 +1139,27 @@ void CARDvInitChannelTable (void *pDeviceHandler)
* Return Value: none.
*
-*/
-BOOL
+bool
CARDbStartMeasure (
void *pDeviceHandler,
void *pvMeasureEIDs,
- UINT uNumOfMeasureEIDs
+ unsigned int uNumOfMeasureEIDs
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
PWLAN_IE_MEASURE_REQ pEID = (PWLAN_IE_MEASURE_REQ) pvMeasureEIDs;
QWORD qwCurrTSF;
QWORD qwStartTSF;
- BOOL bExpired = TRUE;
- WORD wDuration = 0;
+ bool bExpired = true;
+ unsigned short wDuration = 0;
if ((pEID == NULL) ||
(uNumOfMeasureEIDs == 0)) {
- return (TRUE);
+ return (true);
}
CARDbGetCurrentTSF(pDevice->PortOffset, &qwCurrTSF);
- if (pDevice->bMeasureInProgress == TRUE) {
- pDevice->bMeasureInProgress = FALSE;
+ if (pDevice->bMeasureInProgress == true) {
+ pDevice->bMeasureInProgress = false;
VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, pDevice->byOrgRCR);
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, pDevice->dwOrgMAR0);
@@ -1734,7 +1167,7 @@ CARDbStartMeasure (
// clear measure control
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
MACvSelectPage0(pDevice->PortOffset);
- CARDbSetChannel(pDevice, pDevice->byOrgChannel);
+ set_channel(pDevice, pDevice->byOrgChannel);
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
MACvSelectPage0(pDevice->PortOffset);
@@ -1749,7 +1182,7 @@ CARDbStartMeasure (
if (pDevice->byLocalID > REV_ID_VT3253_B1) {
HIDWORD(qwStartTSF) = HIDWORD(*((PQWORD) (pDevice->pCurrMeasureEID->sReq.abyStartTime)));
LODWORD(qwStartTSF) = LODWORD(*((PQWORD) (pDevice->pCurrMeasureEID->sReq.abyStartTime)));
- wDuration = *((PWORD) (pDevice->pCurrMeasureEID->sReq.abyDuration));
+ wDuration = *((unsigned short *) (pDevice->pCurrMeasureEID->sReq.abyDuration));
wDuration += 1; // 1 TU for channel switching
if ((LODWORD(qwStartTSF) == 0) && (HIDWORD(qwStartTSF) == 0)) {
@@ -1759,7 +1192,7 @@ CARDbStartMeasure (
if (LODWORD(qwCurrTSF) > LODWORD(qwStartTSF)) {
HIDWORD(qwStartTSF)++;
}
- bExpired = FALSE;
+ bExpired = false;
break;
} else {
// start at setting start TSF - 1TU(for channel switching)
@@ -1773,11 +1206,11 @@ CARDbStartMeasure (
((HIDWORD(qwCurrTSF) == HIDWORD(qwStartTSF)) &&
(LODWORD(qwCurrTSF) < LODWORD(qwStartTSF)))
) {
- bExpired = FALSE;
+ bExpired = false;
break;
}
VNTWIFIbMeasureReport( pDevice->pMgmt,
- FALSE,
+ false,
pDevice->pCurrMeasureEID,
MEASURE_MODE_LATE,
pDevice->byBasicMap,
@@ -1787,7 +1220,7 @@ CARDbStartMeasure (
} else {
// hardware do not support measure
VNTWIFIbMeasureReport( pDevice->pMgmt,
- FALSE,
+ false,
pDevice->pCurrMeasureEID,
MEASURE_MODE_INCAPABLE,
pDevice->byBasicMap,
@@ -1797,7 +1230,7 @@ CARDbStartMeasure (
}
} while (pDevice->uNumOfMeasureEIDs != 0);
- if (bExpired == FALSE) {
+ if (bExpired == false) {
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MSRSTART, LODWORD(qwStartTSF));
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MSRSTART + 4, HIDWORD(qwStartTSF));
@@ -1807,7 +1240,7 @@ CARDbStartMeasure (
} else {
// all measure start time expired we should complete action
VNTWIFIbMeasureReport( pDevice->pMgmt,
- TRUE,
+ true,
NULL,
0,
pDevice->byBasicMap,
@@ -1815,7 +1248,7 @@ CARDbStartMeasure (
pDevice->abyRPIs
);
}
- return (TRUE);
+ return (true);
}
@@ -1833,19 +1266,19 @@ CARDbStartMeasure (
* Return Value: none.
*
-*/
-BOOL
+bool
CARDbChannelSwitch (
void *pDeviceHandler,
- BYTE byMode,
- BYTE byNewChannel,
- BYTE byCount
+ unsigned char byMode,
+ unsigned char byNewChannel,
+ unsigned char byCount
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- BOOL bResult = TRUE;
+ bool bResult = true;
if (byCount == 0) {
- bResult = CARDbSetChannel(pDevice, byNewChannel);
+ bResult = set_channel(pDevice, byNewChannel);
VNTWIFIbChannelSwitch(pDevice->pMgmt, byNewChannel);
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
@@ -1854,7 +1287,7 @@ CARDbChannelSwitch (
}
pDevice->byChannelSwitchCount = byCount;
pDevice->byNewChannel = byNewChannel;
- pDevice->bChannelSwitch = TRUE;
+ pDevice->bChannelSwitch = true;
if (byMode == 1) {
bResult=CARDbStopTxPacket(pDevice, PKT_TYPE_802_11_ALL);
}
@@ -1876,34 +1309,34 @@ CARDbChannelSwitch (
* Return Value: none.
*
-*/
-BOOL
+bool
CARDbSetQuiet (
void *pDeviceHandler,
- BOOL bResetQuiet,
- BYTE byQuietCount,
- BYTE byQuietPeriod,
- WORD wQuietDuration,
- WORD wQuietOffset
+ bool bResetQuiet,
+ unsigned char byQuietCount,
+ unsigned char byQuietPeriod,
+ unsigned short wQuietDuration,
+ unsigned short wQuietOffset
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT ii = 0;
+ unsigned int ii = 0;
- if (bResetQuiet == TRUE) {
+ if (bResetQuiet == true) {
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
for(ii=0;ii<MAX_QUIET_COUNT;ii++) {
- pDevice->sQuiet[ii].bEnable = FALSE;
+ pDevice->sQuiet[ii].bEnable = false;
}
pDevice->uQuietEnqueue = 0;
- pDevice->bEnableFirstQuiet = FALSE;
- pDevice->bQuietEnable = FALSE;
+ pDevice->bEnableFirstQuiet = false;
+ pDevice->bQuietEnable = false;
pDevice->byQuietStartCount = byQuietCount;
}
- if (pDevice->sQuiet[pDevice->uQuietEnqueue].bEnable == FALSE) {
- pDevice->sQuiet[pDevice->uQuietEnqueue].bEnable = TRUE;
+ if (pDevice->sQuiet[pDevice->uQuietEnqueue].bEnable == false) {
+ pDevice->sQuiet[pDevice->uQuietEnqueue].bEnable = true;
pDevice->sQuiet[pDevice->uQuietEnqueue].byPeriod = byQuietPeriod;
pDevice->sQuiet[pDevice->uQuietEnqueue].wDuration = wQuietDuration;
- pDevice->sQuiet[pDevice->uQuietEnqueue].dwStartTime = (DWORD) byQuietCount;
+ pDevice->sQuiet[pDevice->uQuietEnqueue].dwStartTime = (unsigned long) byQuietCount;
pDevice->sQuiet[pDevice->uQuietEnqueue].dwStartTime *= pDevice->wBeaconInterval;
pDevice->sQuiet[pDevice->uQuietEnqueue].dwStartTime += wQuietOffset;
pDevice->uQuietEnqueue++;
@@ -1914,7 +1347,7 @@ CARDbSetQuiet (
} else {
// we can not handle Quiet EID more
}
- return (TRUE);
+ return (true);
}
@@ -1932,21 +1365,21 @@ CARDbSetQuiet (
* Return Value: none.
*
-*/
-BOOL
+bool
CARDbStartQuiet (
void *pDeviceHandler
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT ii = 0;
- DWORD dwStartTime = 0xFFFFFFFF;
- UINT uCurrentQuietIndex = 0;
- DWORD dwNextTime = 0;
- DWORD dwGap = 0;
- DWORD dwDuration = 0;
+ unsigned int ii = 0;
+ unsigned long dwStartTime = 0xFFFFFFFF;
+ unsigned int uCurrentQuietIndex = 0;
+ unsigned long dwNextTime = 0;
+ unsigned long dwGap = 0;
+ unsigned long dwDuration = 0;
for(ii=0;ii<MAX_QUIET_COUNT;ii++) {
- if ((pDevice->sQuiet[ii].bEnable == TRUE) &&
+ if ((pDevice->sQuiet[ii].bEnable == true) &&
(dwStartTime > pDevice->sQuiet[ii].dwStartTime)) {
dwStartTime = pDevice->sQuiet[ii].dwStartTime;
uCurrentQuietIndex = ii;
@@ -1954,22 +1387,22 @@ CARDbStartQuiet (
}
if (dwStartTime == 0xFFFFFFFF) {
// no more quiet
- pDevice->bQuietEnable = FALSE;
+ pDevice->bQuietEnable = false;
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
} else {
- if (pDevice->bQuietEnable == FALSE) {
+ if (pDevice->bQuietEnable == false) {
// first quiet
pDevice->byQuietStartCount--;
dwNextTime = pDevice->sQuiet[uCurrentQuietIndex].dwStartTime;
dwNextTime %= pDevice->wBeaconInterval;
MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETINIT, (WORD) dwNextTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETDUR, (WORD) pDevice->sQuiet[uCurrentQuietIndex].wDuration);
+ VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETINIT, (unsigned short) dwNextTime);
+ VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETDUR, (unsigned short) pDevice->sQuiet[uCurrentQuietIndex].wDuration);
if (pDevice->byQuietStartCount == 0) {
- pDevice->bEnableFirstQuiet = FALSE;
+ pDevice->bEnableFirstQuiet = false;
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
} else {
- pDevice->bEnableFirstQuiet = TRUE;
+ pDevice->bEnableFirstQuiet = true;
}
MACvSelectPage0(pDevice->PortOffset);
} else {
@@ -1977,8 +1410,8 @@ CARDbStartQuiet (
// overlap with previous Quiet
dwGap = pDevice->dwCurrentQuietEndTime - pDevice->sQuiet[uCurrentQuietIndex].dwStartTime;
if (dwGap >= pDevice->sQuiet[uCurrentQuietIndex].wDuration) {
- // return FALSE to indicate next quiet expired, should call this function again
- return (FALSE);
+ // return false to indicate next quiet expired, should call this function again
+ return (false);
}
dwDuration = pDevice->sQuiet[uCurrentQuietIndex].wDuration - dwGap;
dwGap = 0;
@@ -1988,94 +1421,34 @@ CARDbStartQuiet (
}
// set GAP and Next duration
MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETGAP, (WORD) dwGap);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETDUR, (WORD) dwDuration);
+ VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETGAP, (unsigned short) dwGap);
+ VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETDUR, (unsigned short) dwDuration);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_QUIETRPT);
MACvSelectPage0(pDevice->PortOffset);
}
- pDevice->bQuietEnable = TRUE;
+ pDevice->bQuietEnable = true;
pDevice->dwCurrentQuietEndTime = pDevice->sQuiet[uCurrentQuietIndex].dwStartTime;
pDevice->dwCurrentQuietEndTime += pDevice->sQuiet[uCurrentQuietIndex].wDuration;
if (pDevice->sQuiet[uCurrentQuietIndex].byPeriod == 0) {
// not period disable current quiet element
- pDevice->sQuiet[uCurrentQuietIndex].bEnable = FALSE;
+ pDevice->sQuiet[uCurrentQuietIndex].bEnable = false;
} else {
// set next period start time
- dwNextTime = (DWORD) pDevice->sQuiet[uCurrentQuietIndex].byPeriod;
+ dwNextTime = (unsigned long) pDevice->sQuiet[uCurrentQuietIndex].byPeriod;
dwNextTime *= pDevice->wBeaconInterval;
pDevice->sQuiet[uCurrentQuietIndex].dwStartTime = dwNextTime;
}
if (pDevice->dwCurrentQuietEndTime > 0x80010000) {
// decreament all time to avoid wrap around
for(ii=0;ii<MAX_QUIET_COUNT;ii++) {
- if (pDevice->sQuiet[ii].bEnable == TRUE) {
+ if (pDevice->sQuiet[ii].bEnable == true) {
pDevice->sQuiet[ii].dwStartTime -= 0x80000000;
}
}
pDevice->dwCurrentQuietEndTime -= 0x80000000;
}
}
- return (TRUE);
-}
-
-
-/*
- *
- * Description:
- * Set Channel Info of Country
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
--*/
-void
-CARDvSetCountryInfo (
- void *pDeviceHandler,
- CARD_PHY_TYPE ePHYType,
- void *pIE
- )
-{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT ii = 0;
- UINT uu = 0;
- UINT step = 0;
- UINT uNumOfCountryInfo = 0;
- BYTE byCh = 0;
- PWLAN_IE_COUNTRY pIE_Country = (PWLAN_IE_COUNTRY) pIE;
-
-
- uNumOfCountryInfo = (pIE_Country->len - 3);
- uNumOfCountryInfo /= 3;
-
- if (ePHYType == PHY_TYPE_11A) {
- pDevice->bCountryInfo5G = TRUE;
- for(ii=CB_MAX_CHANNEL_24G+1;ii<=CARD_MAX_CHANNEL_TBL;ii++) {
- sChannelTbl[ii].bValid = FALSE;
- }
- step = 4;
- } else {
- pDevice->bCountryInfo24G = TRUE;
- for(ii=1;ii<=CB_MAX_CHANNEL_24G;ii++) {
- sChannelTbl[ii].bValid = FALSE;
- }
- step = 1;
- }
- pDevice->abyCountryCode[0] = pIE_Country->abyCountryString[0];
- pDevice->abyCountryCode[1] = pIE_Country->abyCountryString[1];
- pDevice->abyCountryCode[2] = pIE_Country->abyCountryString[2];
-
- for(ii=0;ii<uNumOfCountryInfo;ii++) {
- for(uu=0;uu<pIE_Country->abyCountryInfo[ii*3+1];uu++) {
- byCh = CARDbyGetChannelMapping(pDevice, (BYTE)(pIE_Country->abyCountryInfo[ii*3]+step*uu), ePHYType);
- sChannelTbl[byCh].bValid = TRUE;
- pDevice->abyRegPwr[byCh] = pIE_Country->abyCountryInfo[ii*3+2];
- }
- }
+ return (true);
}
/*
@@ -2095,18 +1468,18 @@ CARDvSetCountryInfo (
void
CARDvSetPowerConstraint (
void *pDeviceHandler,
- BYTE byChannel,
- I8 byPower
+ unsigned char byChannel,
+ char byPower
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
if (byChannel > CB_MAX_CHANNEL_24G) {
- if (pDevice->bCountryInfo5G == TRUE) {
+ if (pDevice->bCountryInfo5G == true) {
pDevice->abyLocalPwr[byChannel] = pDevice->abyRegPwr[byChannel] - byPower;
}
} else {
- if (pDevice->bCountryInfo24G == TRUE) {
+ if (pDevice->bCountryInfo24G == true) {
pDevice->abyLocalPwr[byChannel] = pDevice->abyRegPwr[byChannel] - byPower;
}
}
@@ -2130,12 +1503,12 @@ CARDvSetPowerConstraint (
void
CARDvGetPowerCapability (
void *pDeviceHandler,
- PBYTE pbyMinPower,
- PBYTE pbyMaxPower
+ unsigned char *pbyMinPower,
+ unsigned char *pbyMaxPower
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- BYTE byDec = 0;
+ unsigned char byDec = 0;
*pbyMaxPower = pDevice->abyOFDMDefaultPwr[pDevice->byCurrentCh];
byDec = pDevice->abyOFDMPwrTbl[pDevice->byCurrentCh];
@@ -2148,98 +1521,6 @@ CARDvGetPowerCapability (
*pbyMinPower = pDevice->abyOFDMDefaultPwr[pDevice->byCurrentCh] - byDec;
}
-
-/*
- *
- * Description:
- * Set Support Channels IE defined in 802.11h
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
--*/
-BYTE
-CARDbySetSupportChannels (
- void *pDeviceHandler,
- PBYTE pbyIEs
- )
-{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT ii;
- BYTE byCount;
- PWLAN_IE_SUPP_CH pIE = (PWLAN_IE_SUPP_CH) pbyIEs;
- PBYTE pbyChTupple;
- BYTE byLen = 0;
-
-
- pIE->byElementID = WLAN_EID_SUPP_CH;
- pIE->len = 0;
- pbyChTupple = pIE->abyChannelTuple;
- byLen = 2;
- // lower band
- byCount = 0;
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[28] == TRUE) {
- for (ii=28;ii<36;ii+=2) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == TRUE) {
- byCount++;
- }
- }
- *pbyChTupple++ = 34;
- *pbyChTupple++ = byCount;
- byLen += 2;
- } else if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[29] == TRUE) {
- for (ii=29;ii<36;ii+=2) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == TRUE) {
- byCount++;
- }
- }
- *pbyChTupple++ = 36;
- *pbyChTupple++ = byCount;
- byLen += 2;
- }
- // middle band
- byCount = 0;
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[36] == TRUE) {
- for (ii=36;ii<40;ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == TRUE) {
- byCount++;
- }
- }
- *pbyChTupple++ = 52;
- *pbyChTupple++ = byCount;
- byLen += 2;
- }
- // higher band
- byCount = 0;
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[40] == TRUE) {
- for (ii=40;ii<51;ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == TRUE) {
- byCount++;
- }
- }
- *pbyChTupple++ = 100;
- *pbyChTupple++ = byCount;
- byLen += 2;
- } else if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[51] == TRUE) {
- for (ii=51;ii<56;ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == TRUE) {
- byCount++;
- }
- }
- *pbyChTupple++ = 149;
- *pbyChTupple++ = byCount;
- byLen += 2;
- }
- pIE->len += (byLen - 2);
- return (byLen);
-}
-
-
/*
*
* Description:
@@ -2253,8 +1534,8 @@ CARDbySetSupportChannels (
*
* Return Value: none.
*
--*/
-I8
+ */
+char
CARDbyGetTransmitPower (
void *pDeviceHandler
)
@@ -2264,161 +1545,6 @@ CARDbyGetTransmitPower (
return (pDevice->byCurPwrdBm);
}
-
-BOOL
-CARDbChannelGetList (
- UINT uCountryCodeIdx,
- PBYTE pbyChannelTable
- )
-{
- if (uCountryCodeIdx >= CCODE_MAX) {
- return (FALSE);
- }
- memcpy(pbyChannelTable, ChannelRuleTab[uCountryCodeIdx].bChannelIdxList, CB_MAX_CHANNEL);
- return (TRUE);
-}
-
-
-void
-CARDvSetCountryIE(
- void *pDeviceHandler,
- void *pIE
- )
-{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT ii;
- PWLAN_IE_COUNTRY pIECountry = (PWLAN_IE_COUNTRY) pIE;
-
- pIECountry->byElementID = WLAN_EID_COUNTRY;
- pIECountry->len = 0;
- pIECountry->abyCountryString[0] = ChannelRuleTab[pDevice->byZoneType].chCountryCode[0];
- pIECountry->abyCountryString[1] = ChannelRuleTab[pDevice->byZoneType].chCountryCode[1];
- pIECountry->abyCountryString[2] = ' ';
- for (ii = CB_MAX_CHANNEL_24G; ii < CB_MAX_CHANNEL; ii++ ) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- pIECountry->abyCountryInfo[pIECountry->len++] = sChannelTbl[ii+1].byChannelNumber;
- pIECountry->abyCountryInfo[pIECountry->len++] = 1;
- pIECountry->abyCountryInfo[pIECountry->len++] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- }
- }
- pIECountry->len += 3;
-}
-
-
-BOOL
-CARDbGetChannelMapInfo(
- void *pDeviceHandler,
- UINT uChannelIndex,
- PBYTE pbyChannelNumber,
- PBYTE pbyMap
- )
-{
-// PSDevice pDevice = (PSDevice) pDeviceHandler;
-
- if (uChannelIndex > CB_MAX_CHANNEL) {
- return FALSE;
- }
- *pbyChannelNumber = sChannelTbl[uChannelIndex].byChannelNumber;
- *pbyMap = sChannelTbl[uChannelIndex].byMAP;
- return sChannelTbl[uChannelIndex].bValid;
-}
-
-
-void
-CARDvSetChannelMapInfo(
- void *pDeviceHandler,
- UINT uChannelIndex,
- BYTE byMap
- )
-{
-// PSDevice pDevice = (PSDevice) pDeviceHandler;
-
- if (uChannelIndex > CB_MAX_CHANNEL) {
- return;
- }
- sChannelTbl[uChannelIndex].byMAP |= byMap;
-}
-
-
-void
-CARDvClearChannelMapInfo(
- void *pDeviceHandler
- )
-{
-// PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT ii = 0;
-
- for (ii = 1; ii <= CB_MAX_CHANNEL; ii++) {
- sChannelTbl[ii].byMAP = 0;
- }
-}
-
-
-BYTE
-CARDbyAutoChannelSelect(
- void *pDeviceHandler,
- CARD_PHY_TYPE ePHYType
- )
-{
-// PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT ii = 0;
- BYTE byOptionChannel = 0;
- INT aiWeight[CB_MAX_CHANNEL_24G+1] = {-1000,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
-
- if (ePHYType == PHY_TYPE_11A) {
- for(ii=CB_MAX_CHANNEL_24G+1;ii<=CB_MAX_CHANNEL;ii++) {
- if (sChannelTbl[ii].bValid == TRUE) {
- if (byOptionChannel == 0) {
- byOptionChannel = (BYTE) ii;
- }
- if (sChannelTbl[ii].byMAP == 0) {
- return ((BYTE) ii);
- } else if ( !(sChannelTbl[ii].byMAP & 0x08)) {
- byOptionChannel = (BYTE) ii;
- }
- }
- }
- } else {
- byOptionChannel = 0;
- for(ii=1;ii<=CB_MAX_CHANNEL_24G;ii++) {
- if (sChannelTbl[ii].bValid == TRUE) {
- if (sChannelTbl[ii].byMAP == 0) {
- aiWeight[ii] += 100;
- } else if (sChannelTbl[ii].byMAP & 0x01) {
- if (ii > 3) {
- aiWeight[ii-3] -= 10;
- }
- if (ii > 2) {
- aiWeight[ii-2] -= 20;
- }
- if (ii > 1) {
- aiWeight[ii-1] -= 40;
- }
- aiWeight[ii] -= 80;
- if (ii < CB_MAX_CHANNEL_24G) {
- aiWeight[ii+1] -= 40;
- }
- if (ii < (CB_MAX_CHANNEL_24G - 1)) {
- aiWeight[ii+2] -= 20;
- }
- if (ii < (CB_MAX_CHANNEL_24G - 2)) {
- aiWeight[ii+3] -= 10;
- }
- }
- }
- }
- for(ii=1;ii<=CB_MAX_CHANNEL_24G;ii++) {
- if ((sChannelTbl[ii].bValid == TRUE) &&
- (aiWeight[ii] > aiWeight[byOptionChannel])) {
- byOptionChannel = (BYTE) ii;
- }
- }
- }
- return (byOptionChannel);
-}
-
-
-
//xxx
void
CARDvSafeResetTx (
@@ -2426,7 +1552,7 @@ CARDvSafeResetTx (
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT uu;
+ unsigned int uu;
PSTxDesc pCurrTD;
// initialize TD index
@@ -2482,7 +1608,7 @@ CARDvSafeResetRx (
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT uu;
+ unsigned int uu;
PSRxDesc pDesc;
@@ -2494,17 +1620,17 @@ CARDvSafeResetRx (
// init state, all RD is chip's
for (uu = 0; uu < pDevice->sOpts.nRxDescs0; uu++) {
pDesc =&(pDevice->aRD0Ring[uu]);
- pDesc->m_rd0RD0.wResCount = (WORD)(pDevice->rx_buf_sz);
+ pDesc->m_rd0RD0.wResCount = (unsigned short)(pDevice->rx_buf_sz);
pDesc->m_rd0RD0.f1Owner=OWNED_BY_NIC;
- pDesc->m_rd1RD1.wReqCount = (WORD)(pDevice->rx_buf_sz);
+ pDesc->m_rd1RD1.wReqCount = (unsigned short)(pDevice->rx_buf_sz);
}
// init state, all RD is chip's
for (uu = 0; uu < pDevice->sOpts.nRxDescs1; uu++) {
pDesc =&(pDevice->aRD1Ring[uu]);
- pDesc->m_rd0RD0.wResCount = (WORD)(pDevice->rx_buf_sz);
+ pDesc->m_rd0RD0.wResCount = (unsigned short)(pDevice->rx_buf_sz);
pDesc->m_rd0RD0.f1Owner=OWNED_BY_NIC;
- pDesc->m_rd1RD1.wReqCount = (WORD)(pDevice->rx_buf_sz);
+ pDesc->m_rd1RD1.wReqCount = (unsigned short)(pDevice->rx_buf_sz);
}
pDevice->cbDFCB = CB_MAX_RX_FRAG;
@@ -2537,18 +1663,18 @@ CARDvSafeResetRx (
* Return Value: response Control frame rate
*
*/
-WORD CARDwGetCCKControlRate(void *pDeviceHandler, WORD wRateIdx)
+unsigned short CARDwGetCCKControlRate(void *pDeviceHandler, unsigned short wRateIdx)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT ui = (UINT)wRateIdx;
+ unsigned int ui = (unsigned int) wRateIdx;
while (ui > RATE_1M) {
- if (pDevice->wBasicRate & ((WORD)1 << ui)) {
- return (WORD)ui;
+ if (pDevice->wBasicRate & ((unsigned short)1 << ui)) {
+ return (unsigned short)ui;
}
ui --;
}
- return (WORD)RATE_1M;
+ return (unsigned short)RATE_1M;
}
/*
@@ -2564,10 +1690,10 @@ WORD CARDwGetCCKControlRate(void *pDeviceHandler, WORD wRateIdx)
* Return Value: response Control frame rate
*
*/
-WORD CARDwGetOFDMControlRate (void *pDeviceHandler, WORD wRateIdx)
+unsigned short CARDwGetOFDMControlRate (void *pDeviceHandler, unsigned short wRateIdx)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT ui = (UINT)wRateIdx;
+ unsigned int ui = (unsigned int) wRateIdx;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BASIC RATE: %X\n", pDevice->wBasicRate);
@@ -2578,14 +1704,14 @@ WORD CARDwGetOFDMControlRate (void *pDeviceHandler, WORD wRateIdx)
return wRateIdx;
}
while (ui > RATE_11M) {
- if (pDevice->wBasicRate & ((WORD)1 << ui)) {
+ if (pDevice->wBasicRate & ((unsigned short)1 << ui)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"CARDwGetOFDMControlRate : %d\n", ui);
- return (WORD)ui;
+ return (unsigned short)ui;
}
ui --;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"CARDwGetOFDMControlRate: 6M\n");
- return (WORD)RATE_24M;
+ return (unsigned short)RATE_24M;
}
@@ -2604,9 +1730,9 @@ WORD CARDwGetOFDMControlRate (void *pDeviceHandler, WORD wRateIdx)
void CARDvSetRSPINF (void *pDeviceHandler, CARD_PHY_TYPE ePHYType)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- BYTE byServ = 0x00, bySignal = 0x00; //For CCK
- WORD wLen = 0x0000;
- BYTE byTxRate, byRsvTime; //For OFDM
+ unsigned char byServ = 0x00, bySignal = 0x00; //For CCK
+ unsigned short wLen = 0x0000;
+ unsigned char byTxRate, byRsvTime; //For OFDM
//Set to Page1
MACvSelectPage1(pDevice->PortOffset);
@@ -2731,7 +1857,7 @@ void vUpdateIFS (void *pDeviceHandler)
//Set SIFS, DIFS, EIFS, SlotTime, CwMin
PSDevice pDevice = (PSDevice) pDeviceHandler;
- BYTE byMaxMin = 0;
+ unsigned char byMaxMin = 0;
if (pDevice->byPacketType==PK_TYPE_11A) {//0000 0000 0000 0000,11a
pDevice->uSlot = C_SLOT_SHORT;
pDevice->uSIFS = C_SIFS_A;
@@ -2768,27 +1894,27 @@ void vUpdateIFS (void *pDeviceHandler)
pDevice->uEIFS = C_EIFS;
if (pDevice->byRFType == RF_RFMD2959) {
// bcs TX_PE will reserve 3 us
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_SIFS, (BYTE)(pDevice->uSIFS - 3));
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_DIFS, (BYTE)(pDevice->uDIFS - 3));
+ VNSvOutPortB(pDevice->PortOffset + MAC_REG_SIFS, (unsigned char)(pDevice->uSIFS - 3));
+ VNSvOutPortB(pDevice->PortOffset + MAC_REG_DIFS, (unsigned char)(pDevice->uDIFS - 3));
} else {
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_SIFS, (BYTE)pDevice->uSIFS);
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_DIFS, (BYTE)pDevice->uDIFS);
+ VNSvOutPortB(pDevice->PortOffset + MAC_REG_SIFS, (unsigned char)pDevice->uSIFS);
+ VNSvOutPortB(pDevice->PortOffset + MAC_REG_DIFS, (unsigned char)pDevice->uDIFS);
}
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_EIFS, (BYTE)pDevice->uEIFS);
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_SLOT, (BYTE)pDevice->uSlot);
+ VNSvOutPortB(pDevice->PortOffset + MAC_REG_EIFS, (unsigned char)pDevice->uEIFS);
+ VNSvOutPortB(pDevice->PortOffset + MAC_REG_SLOT, (unsigned char)pDevice->uSlot);
byMaxMin |= 0xA0;//1010 1111,C_CWMAX = 1023
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_CWMAXMIN0, (BYTE)byMaxMin);
+ VNSvOutPortB(pDevice->PortOffset + MAC_REG_CWMAXMIN0, (unsigned char)byMaxMin);
}
void CARDvUpdateBasicTopRate (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- BYTE byTopOFDM = RATE_24M, byTopCCK = RATE_1M;
- BYTE ii;
+ unsigned char byTopOFDM = RATE_24M, byTopCCK = RATE_1M;
+ unsigned char ii;
//Determines the highest basic rate.
for (ii = RATE_54M; ii >= RATE_6M; ii --) {
- if ( (pDevice->wBasicRate) & ((WORD)(1<<ii)) ) {
+ if ( (pDevice->wBasicRate) & ((unsigned short)(1<<ii)) ) {
byTopOFDM = ii;
break;
}
@@ -2796,7 +1922,7 @@ void CARDvUpdateBasicTopRate (void *pDeviceHandler)
pDevice->byTopOFDMBasicRate = byTopOFDM;
for (ii = RATE_11M;; ii --) {
- if ( (pDevice->wBasicRate) & ((WORD)(1<<ii)) ) {
+ if ( (pDevice->wBasicRate) & ((unsigned short)(1<<ii)) ) {
byTopCCK = ii;
break;
}
@@ -2817,40 +1943,40 @@ void CARDvUpdateBasicTopRate (void *pDeviceHandler)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL CARDbAddBasicRate (void *pDeviceHandler, WORD wRateIdx)
+bool CARDbAddBasicRate (void *pDeviceHandler, unsigned short wRateIdx)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- WORD wRate = (WORD)(1<<wRateIdx);
+ unsigned short wRate = (unsigned short)(1<<wRateIdx);
pDevice->wBasicRate |= wRate;
//Determines the highest basic rate.
CARDvUpdateBasicTopRate((void *)pDevice);
- return(TRUE);
+ return(true);
}
-BOOL CARDbIsOFDMinBasicRate (void *pDeviceHandler)
+bool CARDbIsOFDMinBasicRate (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
int ii;
for (ii = RATE_54M; ii >= RATE_6M; ii --) {
- if ((pDevice->wBasicRate) & ((WORD)(1<<ii)))
- return TRUE;
+ if ((pDevice->wBasicRate) & ((unsigned short)(1<<ii)))
+ return true;
}
- return FALSE;
+ return false;
}
-BYTE CARDbyGetPktType (void *pDeviceHandler)
+unsigned char CARDbyGetPktType (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
if (pDevice->byBBType == BB_TYPE_11A || pDevice->byBBType == BB_TYPE_11B) {
- return (BYTE)pDevice->byBBType;
+ return (unsigned char)pDevice->byBBType;
}
else if (CARDbIsOFDMinBasicRate((void *)pDevice)) {
return PK_TYPE_11GA;
@@ -2873,7 +1999,7 @@ BYTE CARDbyGetPktType (void *pDeviceHandler)
* Return Value: none
*
*/
-void CARDvSetLoopbackMode (DWORD_PTR dwIoBase, WORD wLoopbackMode)
+void CARDvSetLoopbackMode (unsigned long dwIoBase, unsigned short wLoopbackMode)
{
switch(wLoopbackMode) {
case CARD_LB_NONE:
@@ -2881,7 +2007,7 @@ void CARDvSetLoopbackMode (DWORD_PTR dwIoBase, WORD wLoopbackMode)
case CARD_LB_PHY:
break;
default:
- ASSERT(FALSE);
+ ASSERT(false);
break;
}
// set MAC loopback
@@ -2902,15 +2028,15 @@ void CARDvSetLoopbackMode (DWORD_PTR dwIoBase, WORD wLoopbackMode)
* Return Value: none
*
*/
-BOOL CARDbSoftwareReset (void *pDeviceHandler)
+bool CARDbSoftwareReset (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
// reset MAC
if (!MACbSafeSoftwareReset(pDevice->PortOffset))
- return FALSE;
+ return false;
- return TRUE;
+ return true;
}
@@ -2929,16 +2055,16 @@ BOOL CARDbSoftwareReset (void *pDeviceHandler)
* Return Value: TSF Offset value
*
*/
-QWORD CARDqGetTSFOffset (BYTE byRxRate, QWORD qwTSF1, QWORD qwTSF2)
+QWORD CARDqGetTSFOffset (unsigned char byRxRate, QWORD qwTSF1, QWORD qwTSF2)
{
QWORD qwTSFOffset;
- WORD wRxBcnTSFOffst= 0;;
+ unsigned short wRxBcnTSFOffst= 0;;
HIDWORD(qwTSFOffset) = 0;
LODWORD(qwTSFOffset) = 0;
wRxBcnTSFOffst = cwRXBCNTSFOff[byRxRate%MAX_RATE];
- (qwTSF2).u.dwLowDword += (DWORD)(wRxBcnTSFOffst);
- if ((qwTSF2).u.dwLowDword < (DWORD)(wRxBcnTSFOffst)) {
+ (qwTSF2).u.dwLowDword += (unsigned long)(wRxBcnTSFOffst);
+ if ((qwTSF2).u.dwLowDword < (unsigned long)(wRxBcnTSFOffst)) {
(qwTSF2).u.dwHighDword++;
}
LODWORD(qwTSFOffset) = LODWORD(qwTSF1) - LODWORD(qwTSF2);
@@ -2963,13 +2089,13 @@ QWORD CARDqGetTSFOffset (BYTE byRxRate, QWORD qwTSF1, QWORD qwTSF2)
* Out:
* qwCurrTSF - Current TSF counter
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL CARDbGetCurrentTSF (DWORD_PTR dwIoBase, PQWORD pqwCurrTSF)
+bool CARDbGetCurrentTSF (unsigned long dwIoBase, PQWORD pqwCurrTSF)
{
- WORD ww;
- BYTE byData;
+ unsigned short ww;
+ unsigned char byData;
MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
@@ -2978,11 +2104,11 @@ BOOL CARDbGetCurrentTSF (DWORD_PTR dwIoBase, PQWORD pqwCurrTSF)
break;
}
if (ww == W_MAX_TIMEOUT)
- return(FALSE);
+ return(false);
VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR, &LODWORD(*pqwCurrTSF));
VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR + 4, &HIDWORD(*pqwCurrTSF));
- return(TRUE);
+ return(true);
}
@@ -3000,12 +2126,12 @@ BOOL CARDbGetCurrentTSF (DWORD_PTR dwIoBase, PQWORD pqwCurrTSF)
* Return Value: TSF value of next Beacon
*
*/
-QWORD CARDqGetNextTBTT (QWORD qwTSF, WORD wBeaconInterval)
+QWORD CARDqGetNextTBTT (QWORD qwTSF, unsigned short wBeaconInterval)
{
- UINT uLowNextTBTT;
- UINT uHighRemain, uLowRemain;
- UINT uBeaconInterval;
+ unsigned int uLowNextTBTT;
+ unsigned int uHighRemain, uLowRemain;
+ unsigned int uBeaconInterval;
uBeaconInterval = wBeaconInterval * 1024;
// Next TBTT = ((local_current_TSF / beacon_interval) + 1 ) * beacon_interval
@@ -3044,7 +2170,7 @@ QWORD CARDqGetNextTBTT (QWORD qwTSF, WORD wBeaconInterval)
* Return Value: none
*
*/
-void CARDvSetFirstNextTBTT (DWORD_PTR dwIoBase, WORD wBeaconInterval)
+void CARDvSetFirstNextTBTT (unsigned long dwIoBase, unsigned short wBeaconInterval)
{
QWORD qwNextTBTT;
@@ -3077,7 +2203,7 @@ void CARDvSetFirstNextTBTT (DWORD_PTR dwIoBase, WORD wBeaconInterval)
* Return Value: none
*
*/
-void CARDvUpdateNextTBTT (DWORD_PTR dwIoBase, QWORD qwTSF, WORD wBeaconInterval)
+void CARDvUpdateNextTBTT (unsigned long dwIoBase, QWORD qwTSF, unsigned short wBeaconInterval)
{
qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval);
@@ -3085,7 +2211,8 @@ void CARDvUpdateNextTBTT (DWORD_PTR dwIoBase, QWORD qwTSF, WORD wBeaconInterval)
VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, LODWORD(qwTSF));
VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, HIDWORD(qwTSF));
MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Card:Update Next TBTT[%8xh:%8xh] \n",(UINT)HIDWORD(qwTSF), (UINT)LODWORD(qwTSF));
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Card:Update Next TBTT[%8xh:%8xh] \n",
+ (unsigned int) HIDWORD(qwTSF), (unsigned int) LODWORD(qwTSF));
return;
}
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 76313462cf7..e0836e1d511 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -30,6 +30,7 @@
#define __CARD_H__
#include "ttype.h"
+#include <linux/types.h>
/*--------------------- Export Definitions -------------------------*/
//
@@ -86,57 +87,55 @@ typedef enum _CARD_OP_MODE {
/*--------------------- Export Functions --------------------------*/
-BOOL ChannelValid(UINT CountryCode, UINT ChannelIndex);
void CARDvSetRSPINF(void *pDeviceHandler, CARD_PHY_TYPE ePHYType);
void vUpdateIFS(void *pDeviceHandler);
void CARDvUpdateBasicTopRate(void *pDeviceHandler);
-BOOL CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx);
-BOOL CARDbIsOFDMinBasicRate(void *pDeviceHandler);
-void CARDvSetLoopbackMode(DWORD_PTR dwIoBase, WORD wLoopbackMode);
-BOOL CARDbSoftwareReset(void *pDeviceHandler);
-void CARDvSetFirstNextTBTT(DWORD_PTR dwIoBase, WORD wBeaconInterval);
-void CARDvUpdateNextTBTT(DWORD_PTR dwIoBase, QWORD qwTSF, WORD wBeaconInterval);
-BOOL CARDbGetCurrentTSF(DWORD_PTR dwIoBase, PQWORD pqwCurrTSF);
-QWORD CARDqGetNextTBTT(QWORD qwTSF, WORD wBeaconInterval);
-QWORD CARDqGetTSFOffset(BYTE byRxRate, QWORD qwTSF1, QWORD qwTSF2);
-BOOL CARDbSetTxPower(void *pDeviceHandler, ULONG ulTxPower);
-BYTE CARDbyGetPktType(void *pDeviceHandler);
+bool CARDbAddBasicRate(void *pDeviceHandler, unsigned short wRateIdx);
+bool CARDbIsOFDMinBasicRate(void *pDeviceHandler);
+void CARDvSetLoopbackMode(unsigned long dwIoBase, unsigned short wLoopbackMode);
+bool CARDbSoftwareReset(void *pDeviceHandler);
+void CARDvSetFirstNextTBTT(unsigned long dwIoBase, unsigned short wBeaconInterval);
+void CARDvUpdateNextTBTT(unsigned long dwIoBase, QWORD qwTSF, unsigned short wBeaconInterval);
+bool CARDbGetCurrentTSF(unsigned long dwIoBase, PQWORD pqwCurrTSF);
+QWORD CARDqGetNextTBTT(QWORD qwTSF, unsigned short wBeaconInterval);
+QWORD CARDqGetTSFOffset(unsigned char byRxRate, QWORD qwTSF1, QWORD qwTSF2);
+bool CARDbSetTxPower(void *pDeviceHandler, unsigned long ulTxPower);
+unsigned char CARDbyGetPktType(void *pDeviceHandler);
void CARDvSafeResetTx(void *pDeviceHandler);
void CARDvSafeResetRx(void *pDeviceHandler);
//xxx
-BOOL CARDbRadioPowerOff(void *pDeviceHandler);
-BOOL CARDbRadioPowerOn(void *pDeviceHandler);
-BOOL CARDbSetChannel(void *pDeviceHandler, UINT uConnectionChannel);
-//BOOL CARDbSendPacket(void *pDeviceHandler, void *pPacket, CARD_PKT_TYPE ePktType, UINT uLength);
-BOOL CARDbIsShortPreamble(void *pDeviceHandler);
-BOOL CARDbIsShorSlotTime(void *pDeviceHandler);
-BOOL CARDbSetPhyParameter(void *pDeviceHandler, CARD_PHY_TYPE ePHYType, WORD wCapInfo, BYTE byERPField, void *pvSupportRateIEs, void *pvExtSupportRateIEs);
-BOOL CARDbUpdateTSF(void *pDeviceHandler, BYTE byRxRate, QWORD qwBSSTimestamp, QWORD qwLocalTSF);
-BOOL CARDbStopTxPacket(void *pDeviceHandler, CARD_PKT_TYPE ePktType);
-BOOL CARDbStartTxPacket(void *pDeviceHandler, CARD_PKT_TYPE ePktType);
-BOOL CARDbSetBeaconPeriod(void *pDeviceHandler, WORD wBeaconInterval);
-BOOL CARDbSetBSSID(void *pDeviceHandler, PBYTE pbyBSSID, CARD_OP_MODE eOPMode);
-
-BOOL
+bool CARDbRadioPowerOff(void *pDeviceHandler);
+bool CARDbRadioPowerOn(void *pDeviceHandler);
+//bool CARDbSendPacket(void *pDeviceHandler, void *pPacket, CARD_PKT_TYPE ePktType, unsigned int uLength);
+bool CARDbIsShortPreamble(void *pDeviceHandler);
+bool CARDbIsShorSlotTime(void *pDeviceHandler);
+bool CARDbSetPhyParameter(void *pDeviceHandler, CARD_PHY_TYPE ePHYType, unsigned short wCapInfo, unsigned char byERPField, void *pvSupportRateIEs, void *pvExtSupportRateIEs);
+bool CARDbUpdateTSF(void *pDeviceHandler, unsigned char byRxRate, QWORD qwBSSTimestamp, QWORD qwLocalTSF);
+bool CARDbStopTxPacket(void *pDeviceHandler, CARD_PKT_TYPE ePktType);
+bool CARDbStartTxPacket(void *pDeviceHandler, CARD_PKT_TYPE ePktType);
+bool CARDbSetBeaconPeriod(void *pDeviceHandler, unsigned short wBeaconInterval);
+bool CARDbSetBSSID(void *pDeviceHandler, unsigned char *pbyBSSID, CARD_OP_MODE eOPMode);
+
+bool
CARDbPowerDown(
void *pDeviceHandler
);
-BOOL CARDbSetTxDataRate(
+bool CARDbSetTxDataRate(
void *pDeviceHandler,
- WORD wDataRate
+ unsigned short wDataRate
);
-BOOL CARDbRemoveKey (void *pDeviceHandler, PBYTE pbyBSSID);
+bool CARDbRemoveKey (void *pDeviceHandler, unsigned char *pbyBSSID);
-BOOL
+bool
CARDbAdd_PMKID_Candidate (
void *pDeviceHandler,
- PBYTE pbyBSSID,
- BOOL bRSNCapExist,
- WORD wRSNCap
+ unsigned char *pbyBSSID,
+ bool bRSNCapExist,
+ unsigned short wRSNCap
);
void *
@@ -144,112 +143,55 @@ CARDpGetCurrentAddress (
void *pDeviceHandler
);
-
-void CARDvInitChannelTable(void *pDeviceHandler);
-BYTE CARDbyGetChannelMapping(void *pDeviceHandler, BYTE byChannelNumber, CARD_PHY_TYPE ePhyType);
-
-BOOL
+bool
CARDbStartMeasure (
void *pDeviceHandler,
void *pvMeasureEIDs,
- UINT uNumOfMeasureEIDs
+ unsigned int uNumOfMeasureEIDs
);
-BOOL
+bool
CARDbChannelSwitch (
void *pDeviceHandler,
- BYTE byMode,
- BYTE byNewChannel,
- BYTE byCount
+ unsigned char byMode,
+ unsigned char byNewChannel,
+ unsigned char byCount
);
-BOOL
+bool
CARDbSetQuiet (
void *pDeviceHandler,
- BOOL bResetQuiet,
- BYTE byQuietCount,
- BYTE byQuietPeriod,
- WORD wQuietDuration,
- WORD wQuietOffset
+ bool bResetQuiet,
+ unsigned char byQuietCount,
+ unsigned char byQuietPeriod,
+ unsigned short wQuietDuration,
+ unsigned short wQuietOffset
);
-BOOL
+bool
CARDbStartQuiet (
void *pDeviceHandler
);
void
-CARDvSetCountryInfo (
- void *pDeviceHandler,
- CARD_PHY_TYPE ePHYType,
- void *pIE
- );
-
-void
CARDvSetPowerConstraint (
void *pDeviceHandler,
- BYTE byChannel,
- I8 byPower
+ unsigned char byChannel,
+ char byPower
);
void
CARDvGetPowerCapability (
void *pDeviceHandler,
- PBYTE pbyMinPower,
- PBYTE pbyMaxPower
+ unsigned char *pbyMinPower,
+ unsigned char *pbyMaxPower
);
-BYTE
-CARDbySetSupportChannels (
- void *pDeviceHandler,
- PBYTE pbyIEs
- );
-
-I8
+char
CARDbyGetTransmitPower (
void *pDeviceHandler
);
-BOOL
-CARDbChannelGetList (
- UINT uCountryCodeIdx,
- PBYTE pbyChannelTable
- );
-
-void
-CARDvSetCountryIE(
- void *pDeviceHandler,
- void *pIE
- );
-
-BOOL
-CARDbGetChannelMapInfo(
- void *pDeviceHandler,
- UINT uChannelIndex,
- PBYTE pbyChannelNumber,
- PBYTE pbyMap
- );
-
-void
-CARDvSetChannelMapInfo(
- void *pDeviceHandler,
- UINT uChannelIndex,
- BYTE byMap
- );
-
-void
-CARDvClearChannelMapInfo(
- void *pDeviceHandler
- );
-
-BYTE
-CARDbyAutoChannelSelect(
- void *pDeviceHandler,
- CARD_PHY_TYPE ePHYType
- );
-
-BYTE CARDbyGetChannelNumber(void *pDeviceHandler, BYTE byChannelIndex);
-
#endif // __CARD_H__
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
new file mode 100644
index 00000000000..47c156bb70a
--- /dev/null
+++ b/drivers/staging/vt6655/channel.c
@@ -0,0 +1,835 @@
+/*
+ * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * File: channel.c
+ *
+ */
+
+#include "baseband.h"
+#include "country.h"
+#include "channel.h"
+#include "device.h"
+#include "rf.h"
+
+/*--------------------- Static Definitions -------------------------*/
+
+#define CARD_MAX_CHANNEL_TBL 56
+
+//static int msglevel = MSG_LEVEL_DEBUG;
+static int msglevel = MSG_LEVEL_INFO;
+
+/*--------------------- Static Variables --------------------------*/
+
+static SChannelTblElement sChannelTbl[CARD_MAX_CHANNEL_TBL + 1] =
+{
+ {0, 0, false, 0},
+ {1, 2412, true, 0},
+ {2, 2417, true, 0},
+ {3, 2422, true, 0},
+ {4, 2427, true, 0},
+ {5, 2432, true, 0},
+ {6, 2437, true, 0},
+ {7, 2442, true, 0},
+ {8, 2447, true, 0},
+ {9, 2452, true, 0},
+ {10, 2457, true, 0},
+ {11, 2462, true, 0},
+ {12, 2467, true, 0},
+ {13, 2472, true, 0},
+ {14, 2484, true, 0},
+ {183, 4915, true, 0},
+ {184, 4920, true, 0},
+ {185, 4925, true, 0},
+ {187, 4935, true, 0},
+ {188, 4940, true, 0},
+ {189, 4945, true, 0},
+ {192, 4960, true, 0},
+ {196, 4980, true, 0},
+ {7, 5035, true, 0},
+ {8, 5040, true, 0},
+ {9, 5045, true, 0},
+ {11, 5055, true, 0},
+ {12, 5060, true, 0},
+ {16, 5080, true, 0},
+ {34, 5170, true, 0},
+ {36, 5180, true, 0},
+ {38, 5190, true, 0},
+ {40, 5200, true, 0},
+ {42, 5210, true, 0},
+ {44, 5220, true, 0},
+ {46, 5230, true, 0},
+ {48, 5240, true, 0},
+ {52, 5260, true, 0},
+ {56, 5280, true, 0},
+ {60, 5300, true, 0},
+ {64, 5320, true, 0},
+ {100, 5500, true, 0},
+ {104, 5520, true, 0},
+ {108, 5540, true, 0},
+ {112, 5560, true, 0},
+ {116, 5580, true, 0},
+ {120, 5600, true, 0},
+ {124, 5620, true, 0},
+ {128, 5640, true, 0},
+ {132, 5660, true, 0},
+ {136, 5680, true, 0},
+ {140, 5700, true, 0},
+ {149, 5745, true, 0},
+ {153, 5765, true, 0},
+ {157, 5785, true, 0},
+ {161, 5805, true, 0},
+ {165, 5825, true, 0}
+};
+
+/************************************************************************
+ * The Radar regulation rules for each country
+ ************************************************************************/
+static struct
+{
+ unsigned char byChannelCountryCode; /* The country code */
+ char chCountryCode[2];
+ unsigned char bChannelIdxList[CB_MAX_CHANNEL]; /* Available channels Index */
+ unsigned char byPower[CB_MAX_CHANNEL];
+} ChannelRuleTab[] =
+{
+/************************************************************************
+ * This table is based on Athero driver rules
+ ************************************************************************/
+/* Country Available channels, ended with 0 */
+/* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 */
+{CCODE_FCC, {'U','S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_TELEC, {'J','P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 23, 0, 0, 23, 0, 23, 23, 0, 23, 0, 0, 23, 23, 23, 0, 23, 0, 23, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_ETSI, {'E','U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_RESV3, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_RESV4, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_RESV5, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_RESV6, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_RESV7, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_RESV8, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_RESV9, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_RESVa, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_RESVb, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_RESVc, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_RESVd, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_RESVe, {' ',' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_ALLBAND, {' ',' '}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_ALBANIA, {'A','L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_ALGERIA, {'D','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_ARGENTINA, {'A','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 0} },
+{CCODE_ARMENIA, {'A','M'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_AUSTRALIA, {'A','U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_AUSTRIA, {'A','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 15, 0, 15, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_AZERBAIJAN, {'A','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_BAHRAIN, {'B','H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_BELARUS, {'B','Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_BELGIUM, {'B','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_BELIZE, {'B','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_BOLIVIA, {'B','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_BRAZIL, {'B','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_BRUNEI_DARUSSALAM, {'B','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_BULGARIA, {'B','G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 0, 0, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0} },
+{CCODE_CANADA, {'C','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_CHILE, {'C','L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 17, 17} },
+{CCODE_CHINA, {'C','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_COLOMBIA, {'C','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_COSTA_RICA, {'C','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_CROATIA, {'H','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_CYPRUS, {'C','Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_CZECH, {'C','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_DENMARK, {'D','K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_DOMINICAN_REPUBLIC, {'D','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_ECUADOR, {'E','C'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_EGYPT, {'E','G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_EL_SALVADOR, {'S','V'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_ESTONIA, {'E','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_FINLAND, {'F','I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_FRANCE, {'F','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_GERMANY, {'D','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_GREECE, {'G','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_GEORGIA, {'G','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_GUATEMALA, {'G','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_HONDURAS, {'H','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_HONG_KONG, {'H','K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_HUNGARY, {'H','U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_ICELAND, {'I','S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_INDIA, {'I','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_INDONESIA, {'I','D'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_IRAN, {'I','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_IRELAND, {'I','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_ITALY, {'I','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_ISRAEL, {'I','L'}, { 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_JAPAN, {'J','P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_JORDAN, {'J','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_KAZAKHSTAN, {'K','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_KUWAIT, {'K','W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_LATVIA, {'L','V'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_LEBANON, {'L','B'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_LEICHTENSTEIN, {'L','I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_LITHUANIA, {'L','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_LUXEMBURG, {'L','U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_MACAU, {'M','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_MACEDONIA, {'M','K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_MALTA, {'M','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
+ , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} },
+{CCODE_MALAYSIA, {'M','Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_MEXICO, {'M','X'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_MONACO, {'M','C'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_MOROCCO, {'M','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_NETHERLANDS, {'N','L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_NEW_ZEALAND, {'N','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_NORTH_KOREA, {'K','P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
+{CCODE_NORWAY, {'N','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_OMAN, {'O','M'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_PAKISTAN, {'P','K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_PANAMA, {'P','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_PERU, {'P','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_PHILIPPINES, {'P','H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_POLAND, {'P','L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_PORTUGAL, {'P','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_PUERTO_RICO, {'P','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_QATAR, {'Q','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_ROMANIA, {'R','O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_RUSSIA, {'R','U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_SAUDI_ARABIA, {'S','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_SINGAPORE, {'S','G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 20, 20, 20, 20} },
+{CCODE_SLOVAKIA, {'S','K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
+ , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} },
+{CCODE_SLOVENIA, {'S','I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_SOUTH_AFRICA, {'Z','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_SOUTH_KOREA, {'K','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
+{CCODE_SPAIN, {'E','S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
+ , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} },
+{CCODE_SWEDEN, {'S','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_SWITZERLAND, {'C','H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_SYRIA, {'S','Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_TAIWAN, {'T','W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 0} },
+{CCODE_THAILAND, {'T','H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
+{CCODE_TRINIDAD_TOBAGO, {'T','T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_TUNISIA, {'T','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_TURKEY, {'T','R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_UK, {'G','B'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
+{CCODE_UKRAINE, {'U','A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_UNITED_ARAB_EMIRATES, {'A','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_UNITED_STATES, {'U','S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
+ , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
+{CCODE_URUGUAY, {'U','Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
+{CCODE_UZBEKISTAN, {'U','Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_VENEZUELA, {'V','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
+ , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
+{CCODE_VIETNAM, {'V','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_YEMEN, {'Y','E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_ZIMBABWE, {'Z','W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_JAPAN_W52_W53, {'J','J'}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+{CCODE_MAX, {'U','N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
+ , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }
+/* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 */
+};
+
+/*--------------------- Export Functions --------------------------*/
+
+/**
+ * is_channel_valid() - Is Country Channel Valid
+ * @ChanneIndex: defined as VT3253 MAC channel:
+ * 1 = 2.4G channel 1
+ * 2 = 2.4G channel 2
+ * ...
+ * 14 = 2.4G channel 14
+ * 15 = 4.9G channel 183
+ * 16 = 4.9G channel 184
+ * .....
+ * Output: true if the specified 5GHz band is allowed to be used,
+ * false otherwise.
+ * 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22)
+ *
+ * 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
+ * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
+ */
+
+bool is_channel_valid(unsigned int ChannelIndex)
+{
+ bool bValid;
+
+ bValid = false;
+ /*
+ * If Channel Index is invalid, return invalid
+ */
+ if ((ChannelIndex > CB_MAX_CHANNEL) ||
+ (ChannelIndex == 0))
+ {
+ bValid = false;
+ goto exit;
+ }
+
+ bValid = sChannelTbl[ChannelIndex].bValid;
+
+exit:
+ return (bValid);
+
+}
+
+/**
+ * channel_get_list() - Get Available Channel List for a given country
+ * @CountryCode: The country code defined in country.h
+ *
+ * Output:
+ * pbyChannelTable: (QWORD *) correspondent bit mask
+ * of available channels
+ * 0x0000000000000001 means channel 1 is supported
+ * 0x0000000000000003 means channel 1,2 are supported
+ * 0x000000000000000F means channel 1,2,..15 are supported
+ */
+
+bool channel_get_list(unsigned int uCountryCodeIdx, unsigned char *pbyChannelTable)
+{
+ if (uCountryCodeIdx >= CCODE_MAX)
+ return (false);
+
+ memcpy(pbyChannelTable, ChannelRuleTab[uCountryCodeIdx].bChannelIdxList, CB_MAX_CHANNEL);
+
+ return (true);
+}
+
+void init_channel_table(void *pDeviceHandler)
+{
+ PSDevice pDevice = (PSDevice) pDeviceHandler;
+ bool bMultiBand = false;
+ unsigned int ii;
+
+ for(ii = 1 ; ii<=CARD_MAX_CHANNEL_TBL ; ii++) {
+ sChannelTbl[ii].bValid = false;
+ }
+
+ switch (pDevice->byRFType) {
+ case RF_RFMD2959 :
+ case RF_AIROHA :
+ case RF_AL2230S:
+ case RF_UW2451 :
+ case RF_VT3226 :
+ //printk("chester-false\n");
+ bMultiBand = false;
+ break;
+ case RF_AIROHA7230 :
+ case RF_UW2452 :
+ case RF_NOTHING :
+ default :
+ bMultiBand = true;
+ break;
+ }
+
+ if ((pDevice->dwDiagRefCount != 0) || (pDevice->b11hEnable == true)) {
+ if (bMultiBand == true) {
+ for(ii = 0 ; ii<CARD_MAX_CHANNEL_TBL ; ii++) {
+ sChannelTbl[ii+1].bValid = true;
+ pDevice->abyRegPwr[ii+1] = pDevice->abyOFDMDefaultPwr[ii+1];
+ pDevice->abyLocalPwr[ii+1] = pDevice->abyOFDMDefaultPwr[ii+1];
+ }
+ for(ii = 0 ; ii<CHANNEL_MAX_24G ; ii++) {
+ pDevice->abyRegPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
+ pDevice->abyLocalPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
+ }
+ } else {
+ for(ii = 0 ; ii<CHANNEL_MAX_24G ; ii++) {
+ //2008-8-4 <add> by chester
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
+ sChannelTbl[ii+1].bValid = true;
+ pDevice->abyRegPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
+ pDevice->abyLocalPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
+ }
+ }
+ }
+ } else if (pDevice->byZoneType <= CCODE_MAX) {
+ if (bMultiBand == true) {
+ for(ii = 0 ; ii<CARD_MAX_CHANNEL_TBL ; ii++) {
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
+ sChannelTbl[ii+1].bValid = true;
+ pDevice->abyRegPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
+ pDevice->abyLocalPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
+ }
+ }
+ } else {
+ for(ii = 0 ; ii<CHANNEL_MAX_24G ; ii++) {
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
+ sChannelTbl[ii+1].bValid = true;
+ pDevice->abyRegPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
+ pDevice->abyLocalPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
+ }
+ }
+ }
+ }
+
+ DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO"Zone=[%d][%c][%c]!!\n",pDevice->byZoneType,ChannelRuleTab[pDevice->byZoneType].chCountryCode[0],ChannelRuleTab[pDevice->byZoneType].chCountryCode[1]);
+
+ for(ii = 0 ; ii<CARD_MAX_CHANNEL_TBL ; ii++) {
+ if (pDevice->abyRegPwr[ii+1] == 0)
+ pDevice->abyRegPwr[ii+1] = pDevice->abyOFDMDefaultPwr[ii+1];
+ if (pDevice->abyLocalPwr[ii+1] == 0)
+ pDevice->abyLocalPwr[ii+1] = pDevice->abyOFDMDefaultPwr[ii+1];
+ }
+}
+
+unsigned char get_channel_mapping(void *pDeviceHandler, unsigned char byChannelNumber, CARD_PHY_TYPE ePhyType)
+{
+ unsigned int ii;
+
+ if ((ePhyType == PHY_TYPE_11B) || (ePhyType == PHY_TYPE_11G))
+ return (byChannelNumber);
+
+ for(ii = (CB_MAX_CHANNEL_24G + 1); ii <= CB_MAX_CHANNEL; ) {
+ if (sChannelTbl[ii].byChannelNumber == byChannelNumber)
+ return ((unsigned char) ii);
+ ii++;
+ }
+ return 0;
+}
+
+unsigned char get_channel_number(void *pDeviceHandler, unsigned char byChannelIndex)
+{
+ //PSDevice pDevice = (PSDevice) pDeviceHandler;
+ return(sChannelTbl[byChannelIndex].byChannelNumber);
+}
+
+/**
+ * set_channel() - Set NIC media channel
+ *
+ * @pDeviceHandler: The adapter to be set
+ * @uConnectionChannel: Channel to be set
+ *
+ * Return Value: true if succeeded; false if failed.
+ *
+ */
+bool set_channel (void *pDeviceHandler, unsigned int uConnectionChannel)
+{
+ PSDevice pDevice = (PSDevice) pDeviceHandler;
+ bool bResult = true;
+
+
+ if (pDevice->byCurrentCh == uConnectionChannel) {
+ return bResult;
+ }
+
+ if (sChannelTbl[uConnectionChannel].bValid == false) {
+ return (false);
+ }
+
+ if ((uConnectionChannel > CB_MAX_CHANNEL_24G) &&
+ (pDevice->eCurrentPHYType != PHY_TYPE_11A)) {
+ CARDbSetPhyParameter(pDevice, PHY_TYPE_11A, 0, 0, NULL, NULL);
+ } else if ((uConnectionChannel <= CB_MAX_CHANNEL_24G) &&
+ (pDevice->eCurrentPHYType == PHY_TYPE_11A)) {
+ CARDbSetPhyParameter(pDevice, PHY_TYPE_11G, 0, 0, NULL, NULL);
+ }
+ // clear NAV
+ MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
+
+ //{{ RobertYu: 20041202
+ //// TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput
+
+ if ( pDevice->byRFType == RF_AIROHA7230 )
+ {
+ RFbAL7230SelectChannelPostProcess(pDevice->PortOffset, pDevice->byCurrentCh, (unsigned char)uConnectionChannel);
+ }
+ //}} RobertYu
+
+
+ pDevice->byCurrentCh = (unsigned char)uConnectionChannel;
+ bResult &= RFbSelectChannel(pDevice->PortOffset, pDevice->byRFType, (unsigned char)uConnectionChannel);
+
+ // Init Synthesizer Table
+ if (pDevice->bEnablePSMode == true)
+ RFvWriteWakeProgSyn(pDevice->PortOffset, pDevice->byRFType, uConnectionChannel);
+
+
+ //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"CARDbSetMediaChannel: %d\n", (unsigned char)uConnectionChannel);
+ BBvSoftwareReset(pDevice->PortOffset);
+
+ if (pDevice->byLocalID > REV_ID_VT3253_B1) {
+ // set HW default power register
+ MACvSelectPage1(pDevice->PortOffset);
+ RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh);
+ VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWRCCK, pDevice->byCurPwr);
+ RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh);
+ VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWROFDM, pDevice->byCurPwr);
+ MACvSelectPage0(pDevice->PortOffset);
+ }
+
+ if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
+#ifdef PLICE_DEBUG
+ //printk("Func:ChbSetChannel:call RFbSetPower:11B\n");
+#endif
+ RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh);
+ } else {
+#ifdef PLICE_DEBUG
+ //printk("Func:ChbSetChannel:call RFbSetPower\n");
+#endif
+ RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh);
+ }
+
+ return(bResult);
+}
+
+/**
+ * set_country_info() - Set Channel Info of Country
+ *
+ * Return Value: none.
+ *
+ */
+
+void set_country_info(void *pDeviceHandler, CARD_PHY_TYPE ePHYType, void *pIE)
+{
+ PSDevice pDevice = (PSDevice) pDeviceHandler;
+ unsigned int ii = 0;
+ unsigned int uu = 0;
+ unsigned int step = 0;
+ unsigned int uNumOfCountryInfo = 0;
+ unsigned char byCh = 0;
+ PWLAN_IE_COUNTRY pIE_Country = (PWLAN_IE_COUNTRY) pIE;
+
+
+ uNumOfCountryInfo = (pIE_Country->len - 3);
+ uNumOfCountryInfo /= 3;
+
+ if (ePHYType == PHY_TYPE_11A) {
+ pDevice->bCountryInfo5G = true;
+ for(ii = CB_MAX_CHANNEL_24G + 1 ; ii <= CARD_MAX_CHANNEL_TBL ; ii++) {
+ sChannelTbl[ii].bValid = false;
+ }
+ step = 4;
+ } else {
+ pDevice->bCountryInfo24G = true;
+ for(ii = 1 ; ii <= CB_MAX_CHANNEL_24G ; ii++) {
+ sChannelTbl[ii].bValid = false;
+ }
+ step = 1;
+ }
+ pDevice->abyCountryCode[0] = pIE_Country->abyCountryString[0];
+ pDevice->abyCountryCode[1] = pIE_Country->abyCountryString[1];
+ pDevice->abyCountryCode[2] = pIE_Country->abyCountryString[2];
+
+ for(ii = 0 ; ii < uNumOfCountryInfo ; ii++) {
+ for(uu = 0 ; uu < pIE_Country->abyCountryInfo[ii*3+1] ; uu++) {
+ byCh = get_channel_mapping(pDevice, (unsigned char)(pIE_Country->abyCountryInfo[ii*3]+step*uu), ePHYType);
+ sChannelTbl[byCh].bValid = true;
+ pDevice->abyRegPwr[byCh] = pIE_Country->abyCountryInfo[ii*3+2];
+ }
+ }
+}
+
+/**
+ *
+ * set_support_channels() - Set Support Channels IE defined in 802.11h
+ *
+ * @hDeviceContext: device structure point
+ *
+ * Return Value: none.
+ *
+ */
+
+unsigned char set_support_channels(void *pDeviceHandler, unsigned char *pbyIEs)
+{
+ PSDevice pDevice = (PSDevice) pDeviceHandler;
+ unsigned int ii;
+ unsigned char byCount;
+ PWLAN_IE_SUPP_CH pIE = (PWLAN_IE_SUPP_CH) pbyIEs;
+ unsigned char *pbyChTupple;
+ unsigned char byLen = 0;
+
+
+ pIE->byElementID = WLAN_EID_SUPP_CH;
+ pIE->len = 0;
+ pbyChTupple = pIE->abyChannelTuple;
+ byLen = 2;
+ // lower band
+ byCount = 0;
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[28] == true) {
+ for (ii = 28 ; ii < 36 ; ii+= 2) {
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true) {
+ byCount++;
+ }
+ }
+ *pbyChTupple++ = 34;
+ *pbyChTupple++ = byCount;
+ byLen += 2;
+ } else if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[29] == true) {
+ for (ii = 29 ; ii < 36 ; ii+= 2) {
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true) {
+ byCount++;
+ }
+ }
+ *pbyChTupple++ = 36;
+ *pbyChTupple++ = byCount;
+ byLen += 2;
+ }
+ // middle band
+ byCount = 0;
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[36] == true) {
+ for (ii = 36 ; ii < 40 ; ii++) {
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true) {
+ byCount++;
+ }
+ }
+ *pbyChTupple++ = 52;
+ *pbyChTupple++ = byCount;
+ byLen += 2;
+ }
+ // higher band
+ byCount = 0;
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[40] == true) {
+ for (ii = 40 ; ii < 51 ; ii++) {
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true) {
+ byCount++;
+ }
+ }
+ *pbyChTupple++ = 100;
+ *pbyChTupple++ = byCount;
+ byLen += 2;
+ } else if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[51] == true) {
+ for (ii = 51 ; ii < 56 ; ii++) {
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true) {
+ byCount++;
+ }
+ }
+ *pbyChTupple++ = 149;
+ *pbyChTupple++ = byCount;
+ byLen += 2;
+ }
+ pIE->len += (byLen - 2);
+ return (byLen);
+}
+
+void set_country_IE(void *pDeviceHandler, void *pIE)
+{
+ PSDevice pDevice = (PSDevice) pDeviceHandler;
+ unsigned int ii;
+ PWLAN_IE_COUNTRY pIECountry = (PWLAN_IE_COUNTRY) pIE;
+
+ pIECountry->byElementID = WLAN_EID_COUNTRY;
+ pIECountry->len = 0;
+ pIECountry->abyCountryString[0] = ChannelRuleTab[pDevice->byZoneType].chCountryCode[0];
+ pIECountry->abyCountryString[1] = ChannelRuleTab[pDevice->byZoneType].chCountryCode[1];
+ pIECountry->abyCountryString[2] = ' ';
+ for (ii = CB_MAX_CHANNEL_24G; ii < CB_MAX_CHANNEL; ii++ ) {
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
+ pIECountry->abyCountryInfo[pIECountry->len++] = sChannelTbl[ii+1].byChannelNumber;
+ pIECountry->abyCountryInfo[pIECountry->len++] = 1;
+ pIECountry->abyCountryInfo[pIECountry->len++] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
+ }
+ }
+ pIECountry->len += 3;
+}
+
+bool get_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
+ unsigned char *pbyChannelNumber, unsigned char *pbyMap)
+{
+
+ if (uChannelIndex > CB_MAX_CHANNEL) {
+ return false;
+ }
+ *pbyChannelNumber = sChannelTbl[uChannelIndex].byChannelNumber;
+ *pbyMap = sChannelTbl[uChannelIndex].byMAP;
+ return sChannelTbl[uChannelIndex].bValid;
+}
+
+void set_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
+ unsigned char byMap)
+{
+
+ if (uChannelIndex > CB_MAX_CHANNEL) {
+ return;
+ }
+ sChannelTbl[uChannelIndex].byMAP |= byMap;
+}
+
+void clear_channel_map_info(void *pDeviceHandler)
+{
+ unsigned int ii = 0;
+
+ for (ii = 1; ii <= CB_MAX_CHANNEL; ii++) {
+ sChannelTbl[ii].byMAP = 0;
+ }
+}
+
+unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType)
+{
+ unsigned int ii = 0;
+ unsigned char byOptionChannel = 0;
+ int aiWeight[CB_MAX_CHANNEL_24G+1] = {-1000,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+
+ if (ePHYType == PHY_TYPE_11A) {
+ for(ii = CB_MAX_CHANNEL_24G + 1 ; ii <= CB_MAX_CHANNEL ; ii++) {
+ if (sChannelTbl[ii].bValid == true) {
+ if (byOptionChannel == 0) {
+ byOptionChannel = (unsigned char) ii;
+ }
+ if (sChannelTbl[ii].byMAP == 0) {
+ return ((unsigned char) ii);
+ } else if ( !(sChannelTbl[ii].byMAP & 0x08)) {
+ byOptionChannel = (unsigned char) ii;
+ }
+ }
+ }
+ } else {
+ byOptionChannel = 0;
+ for(ii = 1 ; ii <= CB_MAX_CHANNEL_24G ; ii++) {
+ if (sChannelTbl[ii].bValid == true) {
+ if (sChannelTbl[ii].byMAP == 0) {
+ aiWeight[ii] += 100;
+ } else if (sChannelTbl[ii].byMAP & 0x01) {
+ if (ii > 3) {
+ aiWeight[ii-3] -= 10;
+ }
+ if (ii > 2) {
+ aiWeight[ii-2] -= 20;
+ }
+ if (ii > 1) {
+ aiWeight[ii-1] -= 40;
+ }
+ aiWeight[ii] -= 80;
+ if (ii < CB_MAX_CHANNEL_24G) {
+ aiWeight[ii+1] -= 40;
+ }
+ if (ii < (CB_MAX_CHANNEL_24G - 1)) {
+ aiWeight[ii+2] -= 20;
+ }
+ if (ii < (CB_MAX_CHANNEL_24G - 2)) {
+ aiWeight[ii+3] -= 10;
+ }
+ }
+ }
+ }
+ for(ii = 1 ; ii <= CB_MAX_CHANNEL_24G ; ii++) {
+ if ((sChannelTbl[ii].bValid == true) &&
+ (aiWeight[ii] > aiWeight[byOptionChannel])) {
+ byOptionChannel = (unsigned char) ii;
+ }
+ }
+ }
+ return (byOptionChannel);
+}
diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h
new file mode 100644
index 00000000000..7038f0d3bde
--- /dev/null
+++ b/drivers/staging/vt6655/channel.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * File: channel.h
+ *
+ */
+
+#ifndef _CHANNEL_H_
+#define _CHANNEL_H_
+
+#include "ttype.h"
+#include "card.h"
+
+/*--------------------- Export Classes ----------------------------*/
+
+typedef struct tagSChannelTblElement {
+ unsigned char byChannelNumber;
+ unsigned int uFrequency;
+ bool bValid;
+ unsigned char byMAP;
+}SChannelTblElement, *PSChannelTblElement;
+
+
+/*--------------------- Export Functions --------------------------*/
+
+bool is_channel_valid(unsigned int CountryCode);
+void init_channel_table(void *pDeviceHandler);
+unsigned char get_channel_mapping(void *pDeviceHandler, unsigned char byChannelNumber, CARD_PHY_TYPE ePhyType);
+bool channel_get_list(unsigned int uCountryCodeIdx, unsigned char *pbyChannelTable);
+unsigned char get_channel_number(void *pDeviceHandler, unsigned char byChannelIndex);
+bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel);
+void set_country_info(void *pDeviceHandler, CARD_PHY_TYPE ePHYType, void *pIE);
+unsigned char set_support_channels(void *pDeviceHandler, unsigned char *pbyIEs);
+void set_country_IE(void *pDeviceHandler, void *pIE);
+bool get_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
+ unsigned char *pbyChannelNumber, unsigned char *pbyMap);
+void set_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
+ unsigned char byMap);
+void clear_channel_map_info(void *pDeviceHandler);
+unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType);
+
+
+#endif /* _CHANNEL_H_ */
diff --git a/drivers/staging/vt6655/country.h b/drivers/staging/vt6655/country.h
index 2005d276868..05fda410420 100644
--- a/drivers/staging/vt6655/country.h
+++ b/drivers/staging/vt6655/country.h
@@ -159,19 +159,4 @@ typedef enum _COUNTRY_CODE {
CCODE_MAX
} COUNTRY_CODE;
-typedef struct tagSCountryTable
-{
- BYTE byChannelCountryCode; /* The country code */
- CHAR chCountryCode[2];
- BYTE bChannelIdxList[CB_MAX_CHANNEL]; /* Available channels Index */
- BYTE byPower[CB_MAX_CHANNEL];
-} SCountryTable, *PSCountryTable;
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-extern SCountryTable ChannelRuleTab[CCODE_MAX+1];
-
-/*--------------------- Export Functions --------------------------*/
-
#endif /* __COUNTRY_H__ */
diff --git a/drivers/staging/vt6655/datarate.c b/drivers/staging/vt6655/datarate.c
index 38b09a7fb53..efbb8f45f72 100644
--- a/drivers/staging/vt6655/datarate.c
+++ b/drivers/staging/vt6655/datarate.c
@@ -51,11 +51,11 @@
/*--------------------- Static Classes ----------------------------*/
- extern WORD TxRate_iwconfig; //2008-5-8 <add> by chester
+ extern unsigned short TxRate_iwconfig; //2008-5-8 <add> by chester
/*--------------------- Static Variables --------------------------*/
//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel =MSG_LEVEL_INFO;
-const BYTE acbyIERate[MAX_RATE] =
+const unsigned char acbyIERate[MAX_RATE] =
{0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
#define AUTORATE_TXOK_CNT 0x0400
@@ -75,7 +75,7 @@ s_vResetCounter (
PKnownNodeDB psNodeDBTable
)
{
- BYTE ii;
+ unsigned char ii;
// clear statistic counter for auto_rate
for(ii=0;ii<=MAX_RATE;ii++) {
@@ -97,19 +97,19 @@ s_vResetCounter (
*
* Parameters:
* In:
- * BYTE - Rate value in SuppRates IE or ExtSuppRates IE
+ * unsigned char - Rate value in SuppRates IE or ExtSuppRates IE
* Out:
* none
*
* Return Value: RateIdx
*
-*/
-BYTE
+unsigned char
DATARATEbyGetRateIdx (
- BYTE byRate
+ unsigned char byRate
)
{
- BYTE ii;
+ unsigned char ii;
//Erase basicRate flag.
byRate = byRate & 0x7F;//0111 1111
@@ -151,19 +151,19 @@ DATARATEbyGetRateIdx (
*
* Parameters:
* In:
- * BYTE - Rate value in SuppRates IE or ExtSuppRates IE
+ * unsigned char - Rate value in SuppRates IE or ExtSuppRates IE
* Out:
* none
*
* Return Value: RateIdx
*
-*/
-WORD
+unsigned short
wGetRateIdx(
- BYTE byRate
+ unsigned char byRate
)
{
- WORD ii;
+ unsigned short ii;
//Erase basicRate flag.
byRate = byRate & 0x7F;//0111 1111
@@ -199,20 +199,20 @@ RATEvParseMaxRate (
void *pDeviceHandler,
PWLAN_IE_SUPP_RATES pItemRates,
PWLAN_IE_SUPP_RATES pItemExtRates,
- BOOL bUpdateBasicRate,
- PWORD pwMaxBasicRate,
- PWORD pwMaxSuppRate,
- PWORD pwSuppRate,
- PBYTE pbyTopCCKRate,
- PBYTE pbyTopOFDMRate
+ bool bUpdateBasicRate,
+ unsigned short *pwMaxBasicRate,
+ unsigned short *pwMaxSuppRate,
+ unsigned short *pwSuppRate,
+ unsigned char *pbyTopCCKRate,
+ unsigned char *pbyTopOFDMRate
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
-UINT ii;
-BYTE byHighSuppRate = 0;
-BYTE byRate = 0;
-WORD wOldBasicRate = pDevice->wBasicRate;
-UINT uRateLen;
+unsigned int ii;
+unsigned char byHighSuppRate = 0;
+unsigned char byRate = 0;
+unsigned short wOldBasicRate = pDevice->wBasicRate;
+unsigned int uRateLen;
if (pItemRates == NULL)
@@ -231,14 +231,14 @@ UINT uRateLen;
}
for (ii = 0; ii < uRateLen; ii++) {
- byRate = (BYTE)(pItemRates->abyRates[ii]);
+ byRate = (unsigned char)(pItemRates->abyRates[ii]);
if (WLAN_MGMT_IS_BASICRATE(byRate) &&
- (bUpdateBasicRate == TRUE)) {
+ (bUpdateBasicRate == true)) {
// Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", wGetRateIdx(byRate));
}
- byRate = (BYTE)(pItemRates->abyRates[ii]&0x7F);
+ byRate = (unsigned char)(pItemRates->abyRates[ii]&0x7F);
if (byHighSuppRate == 0)
byHighSuppRate = byRate;
if (byRate > byHighSuppRate)
@@ -248,20 +248,20 @@ UINT uRateLen;
if ((pItemExtRates != NULL) && (pItemExtRates->byElementID == WLAN_EID_EXTSUPP_RATES) &&
(pDevice->eCurrentPHYType != PHY_TYPE_11B)) {
- UINT uExtRateLen = pItemExtRates->len;
+ unsigned int uExtRateLen = pItemExtRates->len;
if (uExtRateLen > WLAN_RATES_MAXLEN)
uExtRateLen = WLAN_RATES_MAXLEN;
for (ii = 0; ii < uExtRateLen ; ii++) {
- byRate = (BYTE)(pItemExtRates->abyRates[ii]);
+ byRate = (unsigned char)(pItemExtRates->abyRates[ii]);
// select highest basic rate
if (WLAN_MGMT_IS_BASICRATE(pItemExtRates->abyRates[ii])) {
// Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", wGetRateIdx(byRate));
}
- byRate = (BYTE)(pItemExtRates->abyRates[ii]&0x7F);
+ byRate = (unsigned char)(pItemExtRates->abyRates[ii]&0x7F);
if (byHighSuppRate == 0)
byHighSuppRate = byRate;
if (byRate > byHighSuppRate)
@@ -314,14 +314,14 @@ RATEvTxRateFallBack (
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
-WORD wIdxDownRate = 0;
-UINT ii;
-//DWORD dwRateTable[MAX_RATE] = {1, 2, 5, 11, 6, 9, 12, 18, 24, 36, 48, 54};
-BOOL bAutoRate[MAX_RATE] = {TRUE,TRUE,TRUE,TRUE,FALSE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE};
-DWORD dwThroughputTbl[MAX_RATE] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
-DWORD dwThroughput = 0;
-WORD wIdxUpRate = 0;
-DWORD dwTxDiff = 0;
+unsigned short wIdxDownRate = 0;
+unsigned int ii;
+//unsigned long dwRateTable[MAX_RATE] = {1, 2, 5, 11, 6, 9, 12, 18, 24, 36, 48, 54};
+bool bAutoRate[MAX_RATE] = {true,true,true,true,false,false,true,true,true,true,true,true};
+ unsigned long dwThroughputTbl[MAX_RATE] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
+ unsigned long dwThroughput = 0;
+ unsigned short wIdxUpRate = 0;
+ unsigned long dwTxDiff = 0;
if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING) {
// Don't do Fallback when scanning Channel
@@ -346,11 +346,11 @@ DWORD dwTxDiff = 0;
for(ii=0;ii<MAX_RATE;ii++) {
if (psNodeDBTable->wSuppRate & (0x0001<<ii)) {
- if (bAutoRate[ii] == TRUE) {
- wIdxUpRate = (WORD) ii;
+ if (bAutoRate[ii] == true) {
+ wIdxUpRate = (unsigned short) ii;
}
} else {
- bAutoRate[ii] = FALSE;
+ bAutoRate[ii] = false;
}
}
@@ -372,9 +372,9 @@ DWORD dwTxDiff = 0;
for(ii = psNodeDBTable->wTxDataRate; ii > 0;) {
ii--;
if ( (dwThroughputTbl[ii] > dwThroughput) &&
- (bAutoRate[ii]==TRUE) ) {
+ (bAutoRate[ii]==true) ) {
dwThroughput = dwThroughputTbl[ii];
- wIdxDownRate = (WORD) ii;
+ wIdxDownRate = (unsigned short) ii;
}
}
psNodeDBTable->wTxDataRate = wIdxDownRate;
@@ -409,14 +409,14 @@ TxRate_iwconfig=psNodeDBTable->wTxDataRate;
* Return Value: None
*
-*/
-BYTE
+unsigned char
RATEuSetIE (
PWLAN_IE_SUPP_RATES pSrcRates,
PWLAN_IE_SUPP_RATES pDstRates,
- UINT uRateLen
+ unsigned int uRateLen
)
{
- UINT ii, uu, uRateCnt = 0;
+ unsigned int ii, uu, uRateCnt = 0;
if ((pSrcRates == NULL) || (pDstRates == NULL))
return 0;
@@ -432,6 +432,6 @@ RATEuSetIE (
}
}
}
- return (BYTE)uRateCnt;
+ return (unsigned char)uRateCnt;
}
diff --git a/drivers/staging/vt6655/datarate.h b/drivers/staging/vt6655/datarate.h
index b8ca792e9c6..4f8ea0b0532 100644
--- a/drivers/staging/vt6655/datarate.h
+++ b/drivers/staging/vt6655/datarate.h
@@ -59,12 +59,12 @@ RATEvParseMaxRate(
void *pDeviceHandler,
PWLAN_IE_SUPP_RATES pItemRates,
PWLAN_IE_SUPP_RATES pItemExtRates,
- BOOL bUpdateBasicRate,
- PWORD pwMaxBasicRate,
- PWORD pwMaxSuppRate,
- PWORD pwSuppRate,
- PBYTE pbyTopCCKRate,
- PBYTE pbyTopOFDMRate
+ bool bUpdateBasicRate,
+ unsigned short *pwMaxBasicRate,
+ unsigned short *pwMaxSuppRate,
+ unsigned short *pwSuppRate,
+ unsigned char *pbyTopCCKRate,
+ unsigned char *pbyTopOFDMRate
);
void
@@ -73,22 +73,22 @@ RATEvTxRateFallBack(
PKnownNodeDB psNodeDBTable
);
-BYTE
+unsigned char
RATEuSetIE(
PWLAN_IE_SUPP_RATES pSrcRates,
PWLAN_IE_SUPP_RATES pDstRates,
- UINT uRateLen
+ unsigned int uRateLen
);
-WORD
+unsigned short
wGetRateIdx(
- BYTE byRate
+ unsigned char byRate
);
-BYTE
+unsigned char
DATARATEbyGetRateIdx(
- BYTE byRate
+ unsigned char byRate
);
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index cedb1e7df4f..138897a7932 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -244,10 +244,10 @@ static inline PDEVICE_RD_INFO alloc_rd_info(void) {
/*
typedef struct tagRDES0 {
- WORD wResCount;
- WORD wf1Owner ;
-// WORD f15Reserved : 15;
-// WORD f1Owner : 1;
+ unsigned short wResCount;
+ unsigned short wf1Owner ;
+// unsigned short f15Reserved : 15;
+// unsigned short f1Owner : 1;
} __attribute__ ((__packed__))
SRDES0;
*/
@@ -255,13 +255,13 @@ SRDES0;
#ifdef __BIG_ENDIAN
typedef struct tagRDES0 {
- volatile WORD wResCount;
+ volatile unsigned short wResCount;
union {
- volatile U16 f15Reserved;
+ volatile u16 f15Reserved;
struct {
- volatile U8 f8Reserved1;
- volatile U8 f1Owner:1;
- volatile U8 f7Reserved:7;
+ volatile u8 f8Reserved1;
+ volatile u8 f1Owner:1;
+ volatile u8 f7Reserved:7;
} __attribute__ ((__packed__));
} __attribute__ ((__packed__));
} __attribute__ ((__packed__))
@@ -270,9 +270,9 @@ SRDES0, *PSRDES0;
#else
typedef struct tagRDES0 {
- WORD wResCount;
- WORD f15Reserved : 15;
- WORD f1Owner : 1;
+ unsigned short wResCount;
+ unsigned short f15Reserved : 15;
+ unsigned short f1Owner : 1;
} __attribute__ ((__packed__))
SRDES0;
@@ -280,8 +280,8 @@ SRDES0;
#endif
typedef struct tagRDES1 {
- WORD wReqCount;
- WORD wReserved;
+ unsigned short wReqCount;
+ unsigned short wReserved;
} __attribute__ ((__packed__))
SRDES1;
@@ -291,11 +291,11 @@ SRDES1;
typedef struct tagSRxDesc {
volatile SRDES0 m_rd0RD0;
volatile SRDES1 m_rd1RD1;
- volatile U32 buff_addr;
- volatile U32 next_desc;
+ volatile u32 buff_addr;
+ volatile u32 next_desc;
struct tagSRxDesc *next;//4 bytes
volatile PDEVICE_RD_INFO pRDInfo;//4 bytes
- volatile U32 Reserved[2];//8 bytes
+ volatile u32 Reserved[2];//8 bytes
} __attribute__ ((__packed__))
SRxDesc, *PSRxDesc;
typedef const SRxDesc *PCSRxDesc;
@@ -304,24 +304,24 @@ typedef const SRxDesc *PCSRxDesc;
/*
typedef struct tagTDES0 {
- volatile BYTE byTSR0;
- volatile BYTE byTSR1;
- volatile WORD wOwner_Txtime;
-// volatile WORD f15Txtime : 15;
-// volatile WORD f1Owner:1;
+ volatile unsigned char byTSR0;
+ volatile unsigned char byTSR1;
+ volatile unsigned short wOwner_Txtime;
+// volatile unsigned short f15Txtime : 15;
+// volatile unsigned short f1Owner:1;
} __attribute__ ((__packed__))
STDES0;
*/
typedef struct tagTDES0 {
- volatile BYTE byTSR0;
- volatile BYTE byTSR1;
+ volatile unsigned char byTSR0;
+ volatile unsigned char byTSR1;
union {
- volatile U16 f15Txtime;
+ volatile u16 f15Txtime;
struct {
- volatile U8 f8Reserved1;
- volatile U8 f1Owner:1;
- volatile U8 f7Reserved:7;
+ volatile u8 f8Reserved1;
+ volatile u8 f1Owner:1;
+ volatile u8 f7Reserved:7;
} __attribute__ ((__packed__));
} __attribute__ ((__packed__));
} __attribute__ ((__packed__))
@@ -330,10 +330,10 @@ STDES0, PSTDES0;
#else
typedef struct tagTDES0 {
- volatile BYTE byTSR0;
- volatile BYTE byTSR1;
- volatile WORD f15Txtime : 15;
- volatile WORD f1Owner:1;
+ volatile unsigned char byTSR0;
+ volatile unsigned char byTSR1;
+ volatile unsigned short f15Txtime : 15;
+ volatile unsigned short f1Owner:1;
} __attribute__ ((__packed__))
STDES0;
@@ -341,22 +341,22 @@ STDES0;
typedef struct tagTDES1 {
- volatile WORD wReqCount;
- volatile BYTE byTCR;
- volatile BYTE byReserved;
+ volatile unsigned short wReqCount;
+ volatile unsigned char byTCR;
+ volatile unsigned char byReserved;
} __attribute__ ((__packed__))
STDES1;
typedef struct tagDEVICE_TD_INFO{
struct sk_buff* skb;
- PBYTE buf;
+ unsigned char *buf;
dma_addr_t skb_dma;
dma_addr_t buf_dma;
dma_addr_t curr_desc;
- DWORD dwReqCount;
- DWORD dwHeaderLength;
- BYTE byFlags;
+ unsigned long dwReqCount;
+ unsigned long dwHeaderLength;
+ unsigned char byFlags;
} DEVICE_TD_INFO, *PDEVICE_TD_INFO;
/*
@@ -378,11 +378,11 @@ static inline PDEVICE_TD_INFO alloc_td_info(void) {
typedef struct tagSTxDesc {
volatile STDES0 m_td0TD0;
volatile STDES1 m_td1TD1;
- volatile U32 buff_addr;
- volatile U32 next_desc;
+ volatile u32 buff_addr;
+ volatile u32 next_desc;
struct tagSTxDesc* next; //4 bytes
volatile PDEVICE_TD_INFO pTDInfo;//4 bytes
- volatile U32 Reserved[2];//8 bytes
+ volatile u32 Reserved[2];//8 bytes
} __attribute__ ((__packed__))
STxDesc, *PSTxDesc;
typedef const STxDesc *PCSTxDesc;
@@ -391,13 +391,13 @@ typedef const STxDesc *PCSTxDesc;
typedef struct tagSTxSyncDesc {
volatile STDES0 m_td0TD0;
volatile STDES1 m_td1TD1;
- volatile DWORD buff_addr; // pointer to logical buffer
- volatile DWORD next_desc; // pointer to next logical descriptor
- volatile WORD m_wFIFOCtl;
- volatile WORD m_wTimeStamp;
+ volatile u32 buff_addr; // pointer to logical buffer
+ volatile u32 next_desc; // pointer to next logical descriptor
+ volatile unsigned short m_wFIFOCtl;
+ volatile unsigned short m_wTimeStamp;
struct tagSTxSyncDesc* next; //4 bytes
volatile PDEVICE_TD_INFO pTDInfo;//4 bytes
- volatile DWORD m_dwReserved2;
+ volatile u32 m_dwReserved2;
} __attribute__ ((__packed__))
STxSyncDesc, *PSTxSyncDesc;
typedef const STxSyncDesc *PCSTxSyncDesc;
@@ -407,35 +407,35 @@ typedef const STxSyncDesc *PCSTxSyncDesc;
// RsvTime buffer header
//
typedef struct tagSRrvTime_gRTS {
- WORD wRTSTxRrvTime_ba;
- WORD wRTSTxRrvTime_aa;
- WORD wRTSTxRrvTime_bb;
- WORD wReserved;
- WORD wTxRrvTime_b;
- WORD wTxRrvTime_a;
+ unsigned short wRTSTxRrvTime_ba;
+ unsigned short wRTSTxRrvTime_aa;
+ unsigned short wRTSTxRrvTime_bb;
+ unsigned short wReserved;
+ unsigned short wTxRrvTime_b;
+ unsigned short wTxRrvTime_a;
}__attribute__ ((__packed__))
SRrvTime_gRTS, *PSRrvTime_gRTS;
typedef const SRrvTime_gRTS *PCSRrvTime_gRTS;
typedef struct tagSRrvTime_gCTS {
- WORD wCTSTxRrvTime_ba;
- WORD wReserved;
- WORD wTxRrvTime_b;
- WORD wTxRrvTime_a;
+ unsigned short wCTSTxRrvTime_ba;
+ unsigned short wReserved;
+ unsigned short wTxRrvTime_b;
+ unsigned short wTxRrvTime_a;
}__attribute__ ((__packed__))
SRrvTime_gCTS, *PSRrvTime_gCTS;
typedef const SRrvTime_gCTS *PCSRrvTime_gCTS;
typedef struct tagSRrvTime_ab {
- WORD wRTSTxRrvTime;
- WORD wTxRrvTime;
+ unsigned short wRTSTxRrvTime;
+ unsigned short wTxRrvTime;
}__attribute__ ((__packed__))
SRrvTime_ab, *PSRrvTime_ab;
typedef const SRrvTime_ab *PCSRrvTime_ab;
typedef struct tagSRrvTime_atim {
- WORD wCTSTxRrvTime_ba;
- WORD wTxRrvTime_a;
+ unsigned short wCTSTxRrvTime_ba;
+ unsigned short wTxRrvTime_a;
}__attribute__ ((__packed__))
SRrvTime_atim, *PSRrvTime_atim;
typedef const SRrvTime_atim *PCSRrvTime_atim;
@@ -444,25 +444,25 @@ typedef const SRrvTime_atim *PCSRrvTime_atim;
// RTS buffer header
//
typedef struct tagSRTSData {
- WORD wFrameControl;
- WORD wDurationID;
- BYTE abyRA[ETH_ALEN];
- BYTE abyTA[ETH_ALEN];
+ unsigned short wFrameControl;
+ unsigned short wDurationID;
+ unsigned char abyRA[ETH_ALEN];
+ unsigned char abyTA[ETH_ALEN];
}__attribute__ ((__packed__))
SRTSData, *PSRTSData;
typedef const SRTSData *PCSRTSData;
typedef struct tagSRTS_g {
- BYTE bySignalField_b;
- BYTE byServiceField_b;
- WORD wTransmitLength_b;
- BYTE bySignalField_a;
- BYTE byServiceField_a;
- WORD wTransmitLength_a;
- WORD wDuration_ba;
- WORD wDuration_aa;
- WORD wDuration_bb;
- WORD wReserved;
+ unsigned char bySignalField_b;
+ unsigned char byServiceField_b;
+ unsigned short wTransmitLength_b;
+ unsigned char bySignalField_a;
+ unsigned char byServiceField_a;
+ unsigned short wTransmitLength_a;
+ unsigned short wDuration_ba;
+ unsigned short wDuration_aa;
+ unsigned short wDuration_bb;
+ unsigned short wReserved;
SRTSData Data;
}__attribute__ ((__packed__))
SRTS_g, *PSRTS_g;
@@ -470,20 +470,20 @@ typedef const SRTS_g *PCSRTS_g;
typedef struct tagSRTS_g_FB {
- BYTE bySignalField_b;
- BYTE byServiceField_b;
- WORD wTransmitLength_b;
- BYTE bySignalField_a;
- BYTE byServiceField_a;
- WORD wTransmitLength_a;
- WORD wDuration_ba;
- WORD wDuration_aa;
- WORD wDuration_bb;
- WORD wReserved;
- WORD wRTSDuration_ba_f0;
- WORD wRTSDuration_aa_f0;
- WORD wRTSDuration_ba_f1;
- WORD wRTSDuration_aa_f1;
+ unsigned char bySignalField_b;
+ unsigned char byServiceField_b;
+ unsigned short wTransmitLength_b;
+ unsigned char bySignalField_a;
+ unsigned char byServiceField_a;
+ unsigned short wTransmitLength_a;
+ unsigned short wDuration_ba;
+ unsigned short wDuration_aa;
+ unsigned short wDuration_bb;
+ unsigned short wReserved;
+ unsigned short wRTSDuration_ba_f0;
+ unsigned short wRTSDuration_aa_f0;
+ unsigned short wRTSDuration_ba_f1;
+ unsigned short wRTSDuration_aa_f1;
SRTSData Data;
}__attribute__ ((__packed__))
SRTS_g_FB, *PSRTS_g_FB;
@@ -491,11 +491,11 @@ typedef const SRTS_g_FB *PCSRTS_g_FB;
typedef struct tagSRTS_ab {
- BYTE bySignalField;
- BYTE byServiceField;
- WORD wTransmitLength;
- WORD wDuration;
- WORD wReserved;
+ unsigned char bySignalField;
+ unsigned char byServiceField;
+ unsigned short wTransmitLength;
+ unsigned short wDuration;
+ unsigned short wReserved;
SRTSData Data;
}__attribute__ ((__packed__))
SRTS_ab, *PSRTS_ab;
@@ -503,13 +503,13 @@ typedef const SRTS_ab *PCSRTS_ab;
typedef struct tagSRTS_a_FB {
- BYTE bySignalField;
- BYTE byServiceField;
- WORD wTransmitLength;
- WORD wDuration;
- WORD wReserved;
- WORD wRTSDuration_f0;
- WORD wRTSDuration_f1;
+ unsigned char bySignalField;
+ unsigned char byServiceField;
+ unsigned short wTransmitLength;
+ unsigned short wDuration;
+ unsigned short wReserved;
+ unsigned short wRTSDuration_f0;
+ unsigned short wRTSDuration_f1;
SRTSData Data;
}__attribute__ ((__packed__))
SRTS_a_FB, *PSRTS_a_FB;
@@ -520,32 +520,32 @@ typedef const SRTS_a_FB *PCSRTS_a_FB;
// CTS buffer header
//
typedef struct tagSCTSData {
- WORD wFrameControl;
- WORD wDurationID;
- BYTE abyRA[ETH_ALEN];
- WORD wReserved;
+ unsigned short wFrameControl;
+ unsigned short wDurationID;
+ unsigned char abyRA[ETH_ALEN];
+ unsigned short wReserved;
}__attribute__ ((__packed__))
SCTSData, *PSCTSData;
typedef struct tagSCTS {
- BYTE bySignalField_b;
- BYTE byServiceField_b;
- WORD wTransmitLength_b;
- WORD wDuration_ba;
- WORD wReserved;
+ unsigned char bySignalField_b;
+ unsigned char byServiceField_b;
+ unsigned short wTransmitLength_b;
+ unsigned short wDuration_ba;
+ unsigned short wReserved;
SCTSData Data;
}__attribute__ ((__packed__))
SCTS, *PSCTS;
typedef const SCTS *PCSCTS;
typedef struct tagSCTS_FB {
- BYTE bySignalField_b;
- BYTE byServiceField_b;
- WORD wTransmitLength_b;
- WORD wDuration_ba;
- WORD wReserved;
- WORD wCTSDuration_ba_f0;
- WORD wCTSDuration_ba_f1;
+ unsigned char bySignalField_b;
+ unsigned char byServiceField_b;
+ unsigned short wTransmitLength_b;
+ unsigned short wDuration_ba;
+ unsigned short wReserved;
+ unsigned short wCTSDuration_ba_f0;
+ unsigned short wCTSDuration_ba_f1;
SCTSData Data;
}__attribute__ ((__packed__))
SCTS_FB, *PSCTS_FB;
@@ -556,19 +556,19 @@ typedef const SCTS_FB *PCSCTS_FB;
// Tx FIFO header
//
typedef struct tagSTxBufHead {
- DWORD adwTxKey[4];
- WORD wFIFOCtl;
- WORD wTimeStamp;
- WORD wFragCtl;
- BYTE byTxPower;
- BYTE wReserved;
+ u32 adwTxKey[4];
+ unsigned short wFIFOCtl;
+ unsigned short wTimeStamp;
+ unsigned short wFragCtl;
+ unsigned char byTxPower;
+ unsigned char wReserved;
}__attribute__ ((__packed__))
STxBufHead, *PSTxBufHead;
typedef const STxBufHead *PCSTxBufHead;
typedef struct tagSTxShortBufHead {
- WORD wFIFOCtl;
- WORD wTimeStamp;
+ unsigned short wFIFOCtl;
+ unsigned short wTimeStamp;
}__attribute__ ((__packed__))
STxShortBufHead, *PSTxShortBufHead;
typedef const STxShortBufHead *PCSTxShortBufHead;
@@ -577,57 +577,57 @@ typedef const STxShortBufHead *PCSTxShortBufHead;
// Tx data header
//
typedef struct tagSTxDataHead_g {
- BYTE bySignalField_b;
- BYTE byServiceField_b;
- WORD wTransmitLength_b;
- BYTE bySignalField_a;
- BYTE byServiceField_a;
- WORD wTransmitLength_a;
- WORD wDuration_b;
- WORD wDuration_a;
- WORD wTimeStampOff_b;
- WORD wTimeStampOff_a;
+ unsigned char bySignalField_b;
+ unsigned char byServiceField_b;
+ unsigned short wTransmitLength_b;
+ unsigned char bySignalField_a;
+ unsigned char byServiceField_a;
+ unsigned short wTransmitLength_a;
+ unsigned short wDuration_b;
+ unsigned short wDuration_a;
+ unsigned short wTimeStampOff_b;
+ unsigned short wTimeStampOff_a;
}__attribute__ ((__packed__))
STxDataHead_g, *PSTxDataHead_g;
typedef const STxDataHead_g *PCSTxDataHead_g;
typedef struct tagSTxDataHead_g_FB {
- BYTE bySignalField_b;
- BYTE byServiceField_b;
- WORD wTransmitLength_b;
- BYTE bySignalField_a;
- BYTE byServiceField_a;
- WORD wTransmitLength_a;
- WORD wDuration_b;
- WORD wDuration_a;
- WORD wDuration_a_f0;
- WORD wDuration_a_f1;
- WORD wTimeStampOff_b;
- WORD wTimeStampOff_a;
+ unsigned char bySignalField_b;
+ unsigned char byServiceField_b;
+ unsigned short wTransmitLength_b;
+ unsigned char bySignalField_a;
+ unsigned char byServiceField_a;
+ unsigned short wTransmitLength_a;
+ unsigned short wDuration_b;
+ unsigned short wDuration_a;
+ unsigned short wDuration_a_f0;
+ unsigned short wDuration_a_f1;
+ unsigned short wTimeStampOff_b;
+ unsigned short wTimeStampOff_a;
}__attribute__ ((__packed__))
STxDataHead_g_FB, *PSTxDataHead_g_FB;
typedef const STxDataHead_g_FB *PCSTxDataHead_g_FB;
typedef struct tagSTxDataHead_ab {
- BYTE bySignalField;
- BYTE byServiceField;
- WORD wTransmitLength;
- WORD wDuration;
- WORD wTimeStampOff;
+ unsigned char bySignalField;
+ unsigned char byServiceField;
+ unsigned short wTransmitLength;
+ unsigned short wDuration;
+ unsigned short wTimeStampOff;
}__attribute__ ((__packed__))
STxDataHead_ab, *PSTxDataHead_ab;
typedef const STxDataHead_ab *PCSTxDataHead_ab;
typedef struct tagSTxDataHead_a_FB {
- BYTE bySignalField;
- BYTE byServiceField;
- WORD wTransmitLength;
- WORD wDuration;
- WORD wTimeStampOff;
- WORD wDuration_f0;
- WORD wDuration_f1;
+ unsigned char bySignalField;
+ unsigned char byServiceField;
+ unsigned short wTransmitLength;
+ unsigned short wDuration;
+ unsigned short wTimeStampOff;
+ unsigned short wDuration_f0;
+ unsigned short wDuration_f1;
}__attribute__ ((__packed__))
STxDataHead_a_FB, *PSTxDataHead_a_FB;
typedef const STxDataHead_a_FB *PCSTxDataHead_a_FB;
@@ -636,37 +636,37 @@ typedef const STxDataHead_a_FB *PCSTxDataHead_a_FB;
// MICHDR data header
//
typedef struct tagSMICHDRHead {
- DWORD adwHDR0[4];
- DWORD adwHDR1[4];
- DWORD adwHDR2[4];
+ u32 adwHDR0[4];
+ u32 adwHDR1[4];
+ u32 adwHDR2[4];
}__attribute__ ((__packed__))
SMICHDRHead, *PSMICHDRHead;
typedef const SMICHDRHead *PCSMICHDRHead;
typedef struct tagSBEACONCtl {
- DWORD BufReady : 1;
- DWORD TSF : 15;
- DWORD BufLen : 11;
- DWORD Reserved : 5;
+ u32 BufReady : 1;
+ u32 TSF : 15;
+ u32 BufLen : 11;
+ u32 Reserved : 5;
}__attribute__ ((__packed__))
SBEACONCtl;
typedef struct tagSSecretKey {
- DWORD dwLowDword;
- BYTE byHighByte;
+ u32 dwLowDword;
+ unsigned char byHighByte;
}__attribute__ ((__packed__))
SSecretKey;
typedef struct tagSKeyEntry {
- BYTE abyAddrHi[2];
- WORD wKCTL;
- BYTE abyAddrLo[4];
- DWORD dwKey0[4];
- DWORD dwKey1[4];
- DWORD dwKey2[4];
- DWORD dwKey3[4];
- DWORD dwKey4[4];
+ unsigned char abyAddrHi[2];
+ unsigned short wKCTL;
+ unsigned char abyAddrLo[4];
+ u32 dwKey0[4];
+ u32 dwKey1[4];
+ u32 dwKey2[4];
+ u32 dwKey3[4];
+ u32 dwKey4[4];
}__attribute__ ((__packed__))
SKeyEntry;
/*--------------------- Export Macros ------------------------------*/
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 40ee4e14237..2e7c2fd7b7e 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -48,10 +48,10 @@
#include <linux/wait.h>
#include <linux/if_arp.h>
#include <linux/sched.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/if.h>
//#include <linux/config.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/inetdevice.h>
#include <linux/reboot.h>
@@ -218,7 +218,7 @@ typedef enum __device_init_type {
#define NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED 0x01
// PMKID Structures
-typedef UCHAR NDIS_802_11_PMKID_VALUE[16];
+typedef unsigned char NDIS_802_11_PMKID_VALUE[16];
typedef enum _NDIS_802_11_WEP_STATUS
@@ -250,7 +250,7 @@ typedef enum _NDIS_802_11_STATUS_TYPE
//Added new types for PMKID Candidate lists.
typedef struct _PMKID_CANDIDATE {
NDIS_802_11_MAC_ADDRESS BSSID;
- ULONG Flags;
+ unsigned long Flags;
} PMKID_CANDIDATE, *PPMKID_CANDIDATE;
@@ -261,15 +261,15 @@ typedef struct _BSSID_INFO
} BSSID_INFO, *PBSSID_INFO;
typedef struct tagSPMKID {
- ULONG Length;
- ULONG BSSIDInfoCount;
+ unsigned long Length;
+ unsigned long BSSIDInfoCount;
BSSID_INFO BSSIDInfo[MAX_BSSIDINFO_4_PMKID];
} SPMKID, *PSPMKID;
typedef struct tagSPMKIDCandidateEvent {
NDIS_802_11_STATUS_TYPE StatusType;
- ULONG Version; // Version of the structure
- ULONG NumCandidates; // No. of pmkid candidates
+ unsigned long Version; // Version of the structure
+ unsigned long NumCandidates; // No. of pmkid candidates
PMKID_CANDIDATE CandidateList[MAX_PMKIDLIST];
} SPMKIDCandidateEvent, *PSPMKIDCandidateEvent;
@@ -279,10 +279,10 @@ typedef struct tagSPMKIDCandidateEvent {
#define MAX_QUIET_COUNT 8
typedef struct tagSQuietControl {
- BOOL bEnable;
- DWORD dwStartTime;
- BYTE byPeriod;
- WORD wDuration;
+ bool bEnable;
+ unsigned long dwStartTime;
+ unsigned char byPeriod;
+ unsigned short wDuration;
} SQuietControl, *PSQuietControl;
//--
@@ -291,7 +291,7 @@ typedef struct __chip_info_tbl{
char* name;
int io_size;
int nTxQueue;
- U32 flags;
+ u32 flags;
} CHIP_INFO, *PCHIP_INFO;
@@ -303,15 +303,15 @@ typedef enum {
// The receive duplicate detection cache entry
typedef struct tagSCacheEntry{
- WORD wFmSequence;
- BYTE abyAddr2[ETH_ALEN];
+ unsigned short wFmSequence;
+ unsigned char abyAddr2[ETH_ALEN];
} SCacheEntry, *PSCacheEntry;
typedef struct tagSCache{
/* The receive cache is updated circularly. The next entry to be written is
* indexed by the "InPtr".
*/
- UINT uInPtr; // Place to use next
+ unsigned int uInPtr; // Place to use next
SCacheEntry asCacheEntry[DUPLICATE_RX_CACHE_LENGTH];
} SCache, *PSCache;
@@ -319,14 +319,14 @@ typedef struct tagSCache{
// DeFragment Control Block, used for collecting fragments prior to reassembly
typedef struct tagSDeFragControlBlock
{
- WORD wSequence;
- WORD wFragNum;
- BYTE abyAddr2[ETH_ALEN];
- UINT uLifetime;
+ unsigned short wSequence;
+ unsigned short wFragNum;
+ unsigned char abyAddr2[ETH_ALEN];
+ unsigned int uLifetime;
struct sk_buff* skb;
- PBYTE pbyRxBuffer;
- UINT cbFrameLength;
- BOOL bInUse;
+ unsigned char *pbyRxBuffer;
+ unsigned int cbFrameLength;
+ bool bInUse;
} SDeFragControlBlock, *PSDeFragControlBlock;
@@ -386,7 +386,7 @@ typedef struct __device_opt {
int short_retry;
int long_retry;
int bbp_type;
- U32 flags;
+ u32 flags;
} OPTIONS, *POPTIONS;
@@ -417,21 +417,21 @@ typedef struct __device_info {
dma_addr_t tx_bufs_dma1;
dma_addr_t tx_beacon_dma;
- PBYTE tx0_bufs;
- PBYTE tx1_bufs;
- PBYTE tx_beacon_bufs;
+ unsigned char *tx0_bufs;
+ unsigned char *tx1_bufs;
+ unsigned char *tx_beacon_bufs;
CHIP_TYPE chip_id;
- U32 PortOffset;
- DWORD dwIsr;
- U32 memaddr;
- U32 ioaddr;
- U32 io_size;
+ unsigned long PortOffset;
+ unsigned long dwIsr;
+ u32 memaddr;
+ u32 ioaddr;
+ u32 io_size;
- BYTE byRevId;
- WORD SubSystemID;
- WORD SubVendorID;
+ unsigned char byRevId;
+ unsigned short SubSystemID;
+ unsigned short SubVendorID;
int nTxQueues;
volatile int iTDUsed[TYPE_MAXTD];
@@ -448,17 +448,17 @@ typedef struct __device_info {
SCache sDupRxCache;
SDeFragControlBlock sRxDFCB[CB_MAX_RX_FRAG];
- UINT cbDFCB;
- UINT cbFreeDFCB;
- UINT uCurrentDFCBIdx;
+ unsigned int cbDFCB;
+ unsigned int cbFreeDFCB;
+ unsigned int uCurrentDFCBIdx;
OPTIONS sOpts;
- U32 flags;
+ u32 flags;
- U32 rx_buf_sz;
+ u32 rx_buf_sz;
int multicast_limit;
- BYTE byRxMode;
+ unsigned char byRxMode;
spinlock_t lock;
//PLICE_DEBUG->
@@ -472,19 +472,19 @@ typedef struct __device_info {
//PLICE_DEBUG <-
- U32 rx_bytes;
+ u32 rx_bytes;
// Version control
- BYTE byLocalID;
- BYTE byRFType;
+ unsigned char byLocalID;
+ unsigned char byRFType;
- BYTE byMaxPwrLevel;
- BYTE byZoneType;
- BOOL bZoneRegExist;
- BYTE byOriginalZonetype;
- BYTE abyMacContext[MAC_MAX_CONTEXT_REG];
- BOOL bLinkPass; // link status: OK or fail
- BYTE abyCurrentNetAddr[ETH_ALEN];
+ unsigned char byMaxPwrLevel;
+ unsigned char byZoneType;
+ bool bZoneRegExist;
+ unsigned char byOriginalZonetype;
+ unsigned char abyMacContext[MAC_MAX_CONTEXT_REG];
+ bool bLinkPass; // link status: OK or fail
+ unsigned char abyCurrentNetAddr[ETH_ALEN];
// Adapter statistics
SStatCounter scStatistic;
@@ -497,249 +497,249 @@ typedef struct __device_info {
SMgmtObject sMgmtObj;
// 802.11 MAC specific
- UINT uCurrRSSI;
- BYTE byCurrSQ;
-
- DWORD dwTxAntennaSel;
- DWORD dwRxAntennaSel;
- BYTE byAntennaCount;
- BYTE byRxAntennaMode;
- BYTE byTxAntennaMode;
- BOOL bTxRxAntInv;
-
- PBYTE pbyTmpBuff;
- UINT uSIFS; //Current SIFS
- UINT uDIFS; //Current DIFS
- UINT uEIFS; //Current EIFS
- UINT uSlot; //Current SlotTime
- UINT uCwMin; //Current CwMin
- UINT uCwMax; //CwMax is fixed on 1023.
+ unsigned int uCurrRSSI;
+ unsigned char byCurrSQ;
+
+ unsigned long dwTxAntennaSel;
+ unsigned long dwRxAntennaSel;
+ unsigned char byAntennaCount;
+ unsigned char byRxAntennaMode;
+ unsigned char byTxAntennaMode;
+ bool bTxRxAntInv;
+
+ unsigned char *pbyTmpBuff;
+ unsigned int uSIFS; //Current SIFS
+ unsigned int uDIFS; //Current DIFS
+ unsigned int uEIFS; //Current EIFS
+ unsigned int uSlot; //Current SlotTime
+ unsigned int uCwMin; //Current CwMin
+ unsigned int uCwMax; //CwMax is fixed on 1023.
// PHY parameter
- BYTE bySIFS;
- BYTE byDIFS;
- BYTE byEIFS;
- BYTE bySlot;
- BYTE byCWMaxMin;
+ unsigned char bySIFS;
+ unsigned char byDIFS;
+ unsigned char byEIFS;
+ unsigned char bySlot;
+ unsigned char byCWMaxMin;
CARD_PHY_TYPE eCurrentPHYType;
VIA_BB_TYPE byBBType; //0: 11A, 1:11B, 2:11G
VIA_PKT_TYPE byPacketType; //0:11a,1:11b,2:11gb(only CCK in BasicRate),3:11ga(OFDM in Basic Rate)
- WORD wBasicRate;
- BYTE byACKRate;
- BYTE byTopOFDMBasicRate;
- BYTE byTopCCKBasicRate;
-
- BYTE byMinChannel;
- BYTE byMaxChannel;
- UINT uConnectionRate;
-
- BYTE byPreambleType;
- BYTE byShortPreamble;
-
- WORD wCurrentRate;
- WORD wRTSThreshold;
- WORD wFragmentationThreshold;
- BYTE byShortRetryLimit;
- BYTE byLongRetryLimit;
+ unsigned short wBasicRate;
+ unsigned char byACKRate;
+ unsigned char byTopOFDMBasicRate;
+ unsigned char byTopCCKBasicRate;
+
+ unsigned char byMinChannel;
+ unsigned char byMaxChannel;
+ unsigned int uConnectionRate;
+
+ unsigned char byPreambleType;
+ unsigned char byShortPreamble;
+
+ unsigned short wCurrentRate;
+ unsigned short wRTSThreshold;
+ unsigned short wFragmentationThreshold;
+ unsigned char byShortRetryLimit;
+ unsigned char byLongRetryLimit;
CARD_OP_MODE eOPMode;
- BYTE byOpMode;
- BOOL bBSSIDFilter;
- WORD wMaxTransmitMSDULifetime;
- BYTE abyBSSID[ETH_ALEN];
- BYTE abyDesireBSSID[ETH_ALEN];
- WORD wCTSDuration; // update while speed change
- WORD wACKDuration; // update while speed change
- WORD wRTSTransmitLen; // update while speed change
- BYTE byRTSServiceField; // update while speed change
- BYTE byRTSSignalField; // update while speed change
-
- DWORD dwMaxReceiveLifetime; // dot11MaxReceiveLifetime
-
- BOOL bCCK;
- BOOL bEncryptionEnable;
- BOOL bLongHeader;
- BOOL bShortSlotTime;
- BOOL bProtectMode;
- BOOL bNonERPPresent;
- BOOL bBarkerPreambleMd;
-
- BYTE byERPFlag;
- WORD wUseProtectCntDown;
-
- BOOL bRadioControlOff;
- BOOL bRadioOff;
- BOOL bEnablePSMode;
- WORD wListenInterval;
- BOOL bPWBitOn;
+ unsigned char byOpMode;
+ bool bBSSIDFilter;
+ unsigned short wMaxTransmitMSDULifetime;
+ unsigned char abyBSSID[ETH_ALEN];
+ unsigned char abyDesireBSSID[ETH_ALEN];
+ unsigned short wCTSDuration; // update while speed change
+ unsigned short wACKDuration; // update while speed change
+ unsigned short wRTSTransmitLen; // update while speed change
+ unsigned char byRTSServiceField; // update while speed change
+ unsigned char byRTSSignalField; // update while speed change
+
+ unsigned long dwMaxReceiveLifetime; // dot11MaxReceiveLifetime
+
+ bool bCCK;
+ bool bEncryptionEnable;
+ bool bLongHeader;
+ bool bShortSlotTime;
+ bool bProtectMode;
+ bool bNonERPPresent;
+ bool bBarkerPreambleMd;
+
+ unsigned char byERPFlag;
+ unsigned short wUseProtectCntDown;
+
+ bool bRadioControlOff;
+ bool bRadioOff;
+ bool bEnablePSMode;
+ unsigned short wListenInterval;
+ bool bPWBitOn;
WMAC_POWER_MODE ePSMode;
// GPIO Radio Control
- BYTE byRadioCtl;
- BYTE byGPIO;
- BOOL bHWRadioOff;
- BOOL bPrvActive4RadioOFF;
- BOOL bGPIOBlockRead;
+ unsigned char byRadioCtl;
+ unsigned char byGPIO;
+ bool bHWRadioOff;
+ bool bPrvActive4RadioOFF;
+ bool bGPIOBlockRead;
// Beacon releated
- WORD wSeqCounter;
- WORD wBCNBufLen;
- BOOL bBeaconBufReady;
- BOOL bBeaconSent;
- BOOL bIsBeaconBufReadySet;
- UINT cbBeaconBufReadySetCnt;
- BOOL bFixRate;
- BYTE byCurrentCh;
- UINT uScanTime;
+ unsigned short wSeqCounter;
+ unsigned short wBCNBufLen;
+ bool bBeaconBufReady;
+ bool bBeaconSent;
+ bool bIsBeaconBufReadySet;
+ unsigned int cbBeaconBufReadySetCnt;
+ bool bFixRate;
+ unsigned char byCurrentCh;
+ unsigned int uScanTime;
CMD_STATE eCommandState;
CMD_CODE eCommand;
- BOOL bBeaconTx;
+ bool bBeaconTx;
- BOOL bStopBeacon;
- BOOL bStopDataPkt;
- BOOL bStopTx0Pkt;
- UINT uAutoReConnectTime;
+ bool bStopBeacon;
+ bool bStopDataPkt;
+ bool bStopTx0Pkt;
+ unsigned int uAutoReConnectTime;
// 802.11 counter
CMD_ITEM eCmdQueue[CMD_Q_SIZE];
- UINT uCmdDequeueIdx;
- UINT uCmdEnqueueIdx;
- UINT cbFreeCmdQueue;
- BOOL bCmdRunning;
- BOOL bCmdClear;
+ unsigned int uCmdDequeueIdx;
+ unsigned int uCmdEnqueueIdx;
+ unsigned int cbFreeCmdQueue;
+ bool bCmdRunning;
+ bool bCmdClear;
- BOOL bRoaming;
+ bool bRoaming;
//WOW
- BYTE abyIPAddr[4];
+ unsigned char abyIPAddr[4];
- ULONG ulTxPower;
+ unsigned long ulTxPower;
NDIS_802_11_WEP_STATUS eEncryptionStatus;
- BOOL bTransmitKey;
+ bool bTransmitKey;
//2007-0925-01<Add>by MikeLiu
//mike add :save old Encryption
NDIS_802_11_WEP_STATUS eOldEncryptionStatus;
SKeyManagement sKey;
- DWORD dwIVCounter;
+ unsigned long dwIVCounter;
QWORD qwPacketNumber; //For CCMP and TKIP as TSC(6 bytes)
- UINT uCurrentWEPMode;
+ unsigned int uCurrentWEPMode;
RC4Ext SBox;
- BYTE abyPRNG[WLAN_WEPMAX_KEYLEN+3];
- BYTE byKeyIndex;
- UINT uKeyLength;
- BYTE abyKey[WLAN_WEP232_KEYLEN];
+ unsigned char abyPRNG[WLAN_WEPMAX_KEYLEN+3];
+ unsigned char byKeyIndex;
+ unsigned int uKeyLength;
+ unsigned char abyKey[WLAN_WEP232_KEYLEN];
- BOOL bAES;
- BYTE byCntMeasure;
+ bool bAES;
+ unsigned char byCntMeasure;
// for AP mode
- UINT uAssocCount;
- BOOL bMoreData;
+ unsigned int uAssocCount;
+ bool bMoreData;
// QoS
- BOOL bGrpAckPolicy;
+ bool bGrpAckPolicy;
// for OID_802_11_ASSOCIATION_INFORMATION
- BOOL bAssocInfoSet;
+ bool bAssocInfoSet;
- BYTE byAutoFBCtrl;
+ unsigned char byAutoFBCtrl;
- BOOL bTxMICFail;
- BOOL bRxMICFail;
+ bool bTxMICFail;
+ bool bRxMICFail;
- UINT uRATEIdx;
+ unsigned int uRATEIdx;
// For Update BaseBand VGA Gain Offset
- BOOL bUpdateBBVGA;
- UINT uBBVGADiffCount;
- BYTE byBBVGANew;
- BYTE byBBVGACurrent;
- BYTE abyBBVGA[BB_VGA_LEVEL];
- LONG ldBmThreshold[BB_VGA_LEVEL];
+ bool bUpdateBBVGA;
+ unsigned int uBBVGADiffCount;
+ unsigned char byBBVGANew;
+ unsigned char byBBVGACurrent;
+ unsigned char abyBBVGA[BB_VGA_LEVEL];
+ long ldBmThreshold[BB_VGA_LEVEL];
- BYTE byBBPreEDRSSI;
- BYTE byBBPreEDIndex;
+ unsigned char byBBPreEDRSSI;
+ unsigned char byBBPreEDIndex;
- BOOL bRadioCmd;
- DWORD dwDiagRefCount;
+ bool bRadioCmd;
+ unsigned long dwDiagRefCount;
// For FOE Tuning
- BYTE byFOETuning;
+ unsigned char byFOETuning;
// For Auto Power Tunning
- BYTE byAutoPwrTunning;
- SHORT sPSetPointCCK;
- SHORT sPSetPointOFDMG;
- SHORT sPSetPointOFDMA;
- LONG lPFormulaOffset;
- SHORT sPThreshold;
- CHAR cAdjustStep;
- CHAR cMinTxAGC;
+ unsigned char byAutoPwrTunning;
+ short sPSetPointCCK;
+ short sPSetPointOFDMG;
+ short sPSetPointOFDMA;
+ long lPFormulaOffset;
+ short sPThreshold;
+ char cAdjustStep;
+ char cMinTxAGC;
// For RF Power table
- BYTE byCCKPwr;
- BYTE byOFDMPwrG;
- BYTE byCurPwr;
- I8 byCurPwrdBm;
- BYTE abyCCKPwrTbl[CB_MAX_CHANNEL_24G+1];
- BYTE abyOFDMPwrTbl[CB_MAX_CHANNEL+1];
- I8 abyCCKDefaultPwr[CB_MAX_CHANNEL_24G+1];
- I8 abyOFDMDefaultPwr[CB_MAX_CHANNEL+1];
- I8 abyRegPwr[CB_MAX_CHANNEL+1];
- I8 abyLocalPwr[CB_MAX_CHANNEL+1];
+ unsigned char byCCKPwr;
+ unsigned char byOFDMPwrG;
+ unsigned char byCurPwr;
+ char byCurPwrdBm;
+ unsigned char abyCCKPwrTbl[CB_MAX_CHANNEL_24G+1];
+ unsigned char abyOFDMPwrTbl[CB_MAX_CHANNEL+1];
+ char abyCCKDefaultPwr[CB_MAX_CHANNEL_24G+1];
+ char abyOFDMDefaultPwr[CB_MAX_CHANNEL+1];
+ char abyRegPwr[CB_MAX_CHANNEL+1];
+ char abyLocalPwr[CB_MAX_CHANNEL+1];
// BaseBand Loopback Use
- BYTE byBBCR4d;
- BYTE byBBCRc9;
- BYTE byBBCR88;
- BYTE byBBCR09;
+ unsigned char byBBCR4d;
+ unsigned char byBBCRc9;
+ unsigned char byBBCR88;
+ unsigned char byBBCR09;
// command timer
struct timer_list sTimerCommand;
#ifdef TxInSleep
struct timer_list sTimerTxData;
- ULONG nTxDataTimeCout;
- BOOL fTxDataInSleep;
- BOOL IsTxDataTrigger;
+ unsigned long nTxDataTimeCout;
+ bool fTxDataInSleep;
+ bool IsTxDataTrigger;
#endif
#ifdef WPA_SM_Transtatus
- BOOL fWPA_Authened; //is WPA/WPA-PSK or WPA2/WPA2-PSK authen??
+ bool fWPA_Authened; //is WPA/WPA-PSK or WPA2/WPA2-PSK authen??
#endif
- BYTE byReAssocCount; //mike add:re-association retry times!
- BYTE byLinkWaitCount;
+ unsigned char byReAssocCount; //mike add:re-association retry times!
+ unsigned char byLinkWaitCount;
- BYTE abyNodeName[17];
+ unsigned char abyNodeName[17];
- BOOL bDiversityRegCtlON;
- BOOL bDiversityEnable;
- ULONG ulDiversityNValue;
- ULONG ulDiversityMValue;
- BYTE byTMax;
- BYTE byTMax2;
- BYTE byTMax3;
- ULONG ulSQ3TH;
+ bool bDiversityRegCtlON;
+ bool bDiversityEnable;
+ unsigned long ulDiversityNValue;
+ unsigned long ulDiversityMValue;
+ unsigned char byTMax;
+ unsigned char byTMax2;
+ unsigned char byTMax3;
+ unsigned long ulSQ3TH;
// ANT diversity
- ULONG uDiversityCnt;
- BYTE byAntennaState;
- ULONG ulRatio_State0;
- ULONG ulRatio_State1;
+ unsigned long uDiversityCnt;
+ unsigned char byAntennaState;
+ unsigned long ulRatio_State0;
+ unsigned long ulRatio_State1;
//SQ3 functions for antenna diversity
struct timer_list TimerSQ3Tmax1;
@@ -747,80 +747,80 @@ typedef struct __device_info {
struct timer_list TimerSQ3Tmax3;
- ULONG uNumSQ3[MAX_RATE];
- WORD wAntDiversityMaxRate;
+ unsigned long uNumSQ3[MAX_RATE];
+ unsigned short wAntDiversityMaxRate;
SEthernetHeader sTxEthHeader;
SEthernetHeader sRxEthHeader;
- BYTE abyBroadcastAddr[ETH_ALEN];
- BYTE abySNAP_RFC1042[ETH_ALEN];
- BYTE abySNAP_Bridgetunnel[ETH_ALEN];
- BYTE abyEEPROM[EEP_MAX_CONTEXT_SIZE]; //DWORD alignment
+ unsigned char abyBroadcastAddr[ETH_ALEN];
+ unsigned char abySNAP_RFC1042[ETH_ALEN];
+ unsigned char abySNAP_Bridgetunnel[ETH_ALEN];
+ unsigned char abyEEPROM[EEP_MAX_CONTEXT_SIZE]; //unsigned long alignment
// Pre-Authentication & PMK cache
SPMKID gsPMKID;
SPMKIDCandidateEvent gsPMKIDCandidate;
// for 802.11h
- BOOL b11hEnable;
- BYTE abyCountryCode[3];
+ bool b11hEnable;
+ unsigned char abyCountryCode[3];
// for 802.11h DFS
- UINT uNumOfMeasureEIDs;
+ unsigned int uNumOfMeasureEIDs;
PWLAN_IE_MEASURE_REQ pCurrMeasureEID;
- BOOL bMeasureInProgress;
- BYTE byOrgChannel;
- BYTE byOrgRCR;
- DWORD dwOrgMAR0;
- DWORD dwOrgMAR4;
- BYTE byBasicMap;
- BYTE byCCAFraction;
- BYTE abyRPIs[8];
- DWORD dwRPIs[8];
- BOOL bChannelSwitch;
- BYTE byNewChannel;
- BYTE byChannelSwitchCount;
- BOOL bQuietEnable;
- BOOL bEnableFirstQuiet;
- BYTE byQuietStartCount;
- UINT uQuietEnqueue;
- DWORD dwCurrentQuietEndTime;
+ bool bMeasureInProgress;
+ unsigned char byOrgChannel;
+ unsigned char byOrgRCR;
+ unsigned long dwOrgMAR0;
+ unsigned long dwOrgMAR4;
+ unsigned char byBasicMap;
+ unsigned char byCCAFraction;
+ unsigned char abyRPIs[8];
+ unsigned long dwRPIs[8];
+ bool bChannelSwitch;
+ unsigned char byNewChannel;
+ unsigned char byChannelSwitchCount;
+ bool bQuietEnable;
+ bool bEnableFirstQuiet;
+ unsigned char byQuietStartCount;
+ unsigned int uQuietEnqueue;
+ unsigned long dwCurrentQuietEndTime;
SQuietControl sQuiet[MAX_QUIET_COUNT];
// for 802.11h TPC
- BOOL bCountryInfo5G;
- BOOL bCountryInfo24G;
+ bool bCountryInfo5G;
+ bool bCountryInfo24G;
- WORD wBeaconInterval;
+ unsigned short wBeaconInterval;
//WPA supplicant deamon
struct net_device *wpadev;
- BOOL bWPADEVUp;
+ bool bWPADEVUp;
struct sk_buff *skb;
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
/*
- BOOL bwextstep0;
- BOOL bwextstep1;
- BOOL bwextstep2;
- BOOL bwextstep3;
+ bool bwextstep0;
+ bool bwextstep1;
+ bool bwextstep2;
+ bool bwextstep3;
*/
- UINT bwextcount;
- BOOL bWPASuppWextEnabled;
+ unsigned int bwextcount;
+ bool bWPASuppWextEnabled;
#endif
//--
#ifdef HOSTAP
// user space daemon: hostapd, is used for HOSTAP
- BOOL bEnableHostapd;
- BOOL bEnable8021x;
- BOOL bEnableHostWEP;
+ bool bEnableHostapd;
+ bool bEnable8021x;
+ bool bEnableHostWEP;
struct net_device *apdev;
int (*tx_80211)(struct sk_buff *skb, struct net_device *dev);
#endif
- UINT uChannel;
- BOOL bMACSuspend;
+ unsigned int uChannel;
+ bool bMACSuspend;
struct iw_statistics wstats; // wireless stats
- BOOL bCommit;
+ bool bCommit;
} DEVICE_INFO, *PSDevice;
@@ -880,7 +880,7 @@ void InitRxManagementQueue(PSDevice pDevice);
-inline static BOOL device_get_ip(PSDevice pInfo) {
+inline static bool device_get_ip(PSDevice pInfo) {
struct in_device* in_dev=(struct in_device*) pInfo->dev->ip_ptr;
struct in_ifaddr* ifa;
@@ -888,10 +888,10 @@ inline static BOOL device_get_ip(PSDevice pInfo) {
ifa=(struct in_ifaddr*) in_dev->ifa_list;
if (ifa!=NULL) {
memcpy(pInfo->abyIPAddr,&ifa->ifa_address,4);
- return TRUE;
+ return true;
}
}
- return FALSE;
+ return false;
}
@@ -920,9 +920,9 @@ static inline PDEVICE_TD_INFO alloc_td_info(void) {
/*--------------------- Export Functions --------------------------*/
-BOOL device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, UINT uNodeIndex);
-BOOL device_alloc_frag_buf(PSDevice pDevice, PSDeFragControlBlock pDeF);
-int Config_FileOperation(PSDevice pDevice,BOOL fwrite,unsigned char *Parameter);
+bool device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, unsigned int uNodeIndex);
+bool device_alloc_frag_buf(PSDevice pDevice, PSDeFragControlBlock pDeF);
+int Config_FileOperation(PSDevice pDevice, bool fwrite, unsigned char *Parameter);
#endif
diff --git a/drivers/staging/vt6655/device_cfg.h b/drivers/staging/vt6655/device_cfg.h
index d1e9c1930bd..408edc27075 100644
--- a/drivers/staging/vt6655/device_cfg.h
+++ b/drivers/staging/vt6655/device_cfg.h
@@ -39,14 +39,6 @@ struct _version {
unsigned char build;
} version_t, *pversion_t;
-#ifndef FALSE
-#define FALSE (0)
-#endif
-
-#ifndef TRUE
-#define TRUE (!(FALSE))
-#endif
-
#define VID_TABLE_SIZE 64
#define MCAST_TABLE_SIZE 64
#define MCAM_SIZE 32
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index e49bb258b5c..4d6b66a4fd9 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -26,9 +26,9 @@
*
* Functions:
*
- * device_found1 - module initial (insmod) driver entry
- * device_remove1 - module remove entry
- * device_init_info - device structure resource allocation function
+ * vt6655_probe - module initial (insmod) driver entry
+ * vt6655_remove - module remove entry
+ * vt6655_init_info - device structure resource allocation function
* device_free_info - device structure resource free function
* device_get_pci_info - get allocated pci io/mem resource
* device_print_info - print out resource
@@ -62,6 +62,7 @@
#include "device.h"
#include "card.h"
+#include "channel.h"
#include "baseband.h"
#include "mac.h"
#include "tether.h"
@@ -133,10 +134,10 @@ DEVICE_PARAM(TxDescriptors1,"Number of transmit descriptors1");
#define IP_ALIG_DEF 0
-/* IP_byte_align[] is used for IP header DWORD byte aligned
- 0: indicate the IP header won't be DWORD byte aligned.(Default) .
- 1: indicate the IP header will be DWORD byte aligned.
- In some enviroment, the IP header should be DWORD byte aligned,
+/* IP_byte_align[] is used for IP header unsigned long byte aligned
+ 0: indicate the IP header won't be unsigned long byte aligned.(Default) .
+ 1: indicate the IP header will be unsigned long byte aligned.
+ In some enviroment, the IP header should be unsigned long byte aligned,
or the packet will be droped when we receive it. (eg: IPVS)
*/
DEVICE_PARAM(IP_byte_align,"Enable IP header dword aligned");
@@ -284,7 +285,7 @@ static CHIP_INFO chip_info_table[]= {
{0,NULL}
};
-DEFINE_PCI_DEVICE_TABLE(device_id_table) = {
+DEFINE_PCI_DEVICE_TABLE(vt6655_pci_id_table) = {
{ PCI_VDEVICE(VIA, 0x3253), (kernel_ulong_t)chip_info_table},
{ 0, }
};
@@ -292,10 +293,10 @@ DEFINE_PCI_DEVICE_TABLE(device_id_table) = {
/*--------------------- Static Functions --------------------------*/
-static int device_found1(struct pci_dev *pcid, const struct pci_device_id *ent);
-static BOOL device_init_info(struct pci_dev* pcid, PSDevice* ppDevice, PCHIP_INFO);
+static int vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent);
+static bool vt6655_init_info(struct pci_dev* pcid, PSDevice* ppDevice, PCHIP_INFO);
static void device_free_info(PSDevice pDevice);
-static BOOL device_get_pci_info(PSDevice, struct pci_dev* pcid);
+static bool device_get_pci_info(PSDevice, struct pci_dev* pcid);
static void device_print_info(PSDevice pDevice);
static struct net_device_stats *device_get_stats(struct net_device *dev);
static void device_init_diversity_timer(PSDevice pDevice);
@@ -326,12 +327,12 @@ static void device_init_td1_ring(PSDevice pDevice);
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev);
//2008-0714<Add>by Mike Liu
-static BOOL device_release_WPADEV(PSDevice pDevice);
+static bool device_release_WPADEV(PSDevice pDevice);
static int ethtool_ioctl(struct net_device *dev, void *useraddr);
-static int device_rx_srv(PSDevice pDevice, UINT uIdx);
-static int device_tx_srv(PSDevice pDevice, UINT uIdx);
-static BOOL device_alloc_rx_buf(PSDevice pDevice, PSRxDesc pDesc);
+static int device_rx_srv(PSDevice pDevice, unsigned int uIdx);
+static int device_tx_srv(PSDevice pDevice, unsigned int uIdx);
+static bool device_alloc_rx_buf(PSDevice pDevice, PSRxDesc pDesc);
static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType);
static void device_free_tx_buf(PSDevice pDevice, PSTxDesc pDesc);
static void device_free_td0_ring(PSDevice pDevice);
@@ -340,7 +341,8 @@ static void device_free_rd0_ring(PSDevice pDevice);
static void device_free_rd1_ring(PSDevice pDevice);
static void device_free_rings(PSDevice pDevice);
static void device_free_frag_buf(PSDevice pDevice);
-static int Config_FileGetParameter(UCHAR *string, UCHAR *dest,UCHAR *source);
+static int Config_FileGetParameter(unsigned char *string,
+ unsigned char *dest, unsigned char *source);
/*--------------------- Export Variables --------------------------*/
@@ -357,7 +359,7 @@ static char* get_chip_name(int chip_id) {
return chip_info_table[i].name;
}
-static void device_remove1(struct pci_dev *pcid)
+static void __devexit vt6655_remove(struct pci_dev *pcid)
{
PSDevice pDevice=pci_get_drvdata(pcid);
@@ -384,7 +386,7 @@ device_set_int_opt(int *opt, int val, int min, int max, int def,char* name,char*
}
static void
-device_set_bool_opt(unsigned int *opt, int val,BOOL def,U32 flag, char* name,char* devname) {
+device_set_bool_opt(unsigned int *opt, int val,bool def,u32 flag, char* name,char* devname) {
(*opt)&=(~flag);
if (val==-1)
*opt|=(def ? flag : 0);
@@ -394,7 +396,7 @@ device_set_bool_opt(unsigned int *opt, int val,BOOL def,U32 flag, char* name,cha
*opt|=(def ? flag : 0);
} else {
DBG_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: set parameter %s to %s\n",
- devname,name , val ? "TRUE" : "FALSE");
+ devname,name , val ? "true" : "false");
*opt|=(val ? flag : 0);
}
}
@@ -429,9 +431,9 @@ pOpts->flags|=DEVICE_FLAGS_DiversityANT;
static void
device_set_options(PSDevice pDevice) {
- BYTE abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- BYTE abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
- BYTE abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
+ unsigned char abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ unsigned char abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
+ unsigned char abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
@@ -450,7 +452,7 @@ device_set_options(PSDevice pDevice) {
pDevice->b11hEnable = (pDevice->sOpts.flags & DEVICE_FLAGS_80211h_MODE) ? 1 : 0;
pDevice->bDiversityRegCtlON = (pDevice->sOpts.flags & DEVICE_FLAGS_DiversityANT) ? 1 : 0;
pDevice->uConnectionRate = pDevice->sOpts.data_rate;
- if (pDevice->uConnectionRate < RATE_AUTO) pDevice->bFixRate = TRUE;
+ if (pDevice->uConnectionRate < RATE_AUTO) pDevice->bFixRate = true;
pDevice->byBBType = pDevice->sOpts.bbp_type;
pDevice->byPacketType = pDevice->byBBType;
@@ -458,45 +460,45 @@ device_set_options(PSDevice pDevice) {
pDevice->byAutoFBCtrl = AUTO_FB_0;
//pDevice->byAutoFBCtrl = AUTO_FB_1;
//PLICE_DEBUG<-
-pDevice->bUpdateBBVGA = TRUE;
+pDevice->bUpdateBBVGA = true;
pDevice->byFOETuning = 0;
pDevice->wCTSDuration = 0;
pDevice->byPreambleType = 0;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" uChannel= %d\n",(INT)pDevice->uChannel);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byOpMode= %d\n",(INT)pDevice->byOpMode);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" ePSMode= %d\n",(INT)pDevice->ePSMode);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" wRTSThreshold= %d\n",(INT)pDevice->wRTSThreshold);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byShortRetryLimit= %d\n",(INT)pDevice->byShortRetryLimit);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byLongRetryLimit= %d\n",(INT)pDevice->byLongRetryLimit);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byPreambleType= %d\n",(INT)pDevice->byPreambleType);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byShortPreamble= %d\n",(INT)pDevice->byShortPreamble);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" uConnectionRate= %d\n",(INT)pDevice->uConnectionRate);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byBBType= %d\n",(INT)pDevice->byBBType);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->b11hEnable= %d\n",(INT)pDevice->b11hEnable);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->bDiversityRegCtlON= %d\n",(INT)pDevice->bDiversityRegCtlON);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" uChannel= %d\n",(int)pDevice->uChannel);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byOpMode= %d\n",(int)pDevice->byOpMode);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" ePSMode= %d\n",(int)pDevice->ePSMode);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" wRTSThreshold= %d\n",(int)pDevice->wRTSThreshold);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byShortRetryLimit= %d\n",(int)pDevice->byShortRetryLimit);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byLongRetryLimit= %d\n",(int)pDevice->byLongRetryLimit);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byPreambleType= %d\n",(int)pDevice->byPreambleType);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byShortPreamble= %d\n",(int)pDevice->byShortPreamble);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" uConnectionRate= %d\n",(int)pDevice->uConnectionRate);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byBBType= %d\n",(int)pDevice->byBBType);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->b11hEnable= %d\n",(int)pDevice->b11hEnable);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->bDiversityRegCtlON= %d\n",(int)pDevice->bDiversityRegCtlON);
}
-static void s_vCompleteCurrentMeasure (PSDevice pDevice, BYTE byResult)
+static void s_vCompleteCurrentMeasure (PSDevice pDevice, unsigned char byResult)
{
- UINT ii;
- DWORD dwDuration = 0;
- BYTE byRPI0 = 0;
+ unsigned int ii;
+ unsigned long dwDuration = 0;
+ unsigned char byRPI0 = 0;
for(ii=1;ii<8;ii++) {
pDevice->dwRPIs[ii] *= 255;
- dwDuration |= *((PWORD) (pDevice->pCurrMeasureEID->sReq.abyDuration));
+ dwDuration |= *((unsigned short *) (pDevice->pCurrMeasureEID->sReq.abyDuration));
dwDuration <<= 10;
pDevice->dwRPIs[ii] /= dwDuration;
- pDevice->abyRPIs[ii] = (BYTE) pDevice->dwRPIs[ii];
+ pDevice->abyRPIs[ii] = (unsigned char) pDevice->dwRPIs[ii];
byRPI0 += pDevice->abyRPIs[ii];
}
pDevice->abyRPIs[0] = (0xFF - byRPI0);
if (pDevice->uNumOfMeasureEIDs == 0) {
VNTWIFIbMeasureReport( pDevice->pMgmt,
- TRUE,
+ true,
pDevice->pCurrMeasureEID,
byResult,
pDevice->byBasicMap,
@@ -505,7 +507,7 @@ static void s_vCompleteCurrentMeasure (PSDevice pDevice, BYTE byResult)
);
} else {
VNTWIFIbMeasureReport( pDevice->pMgmt,
- FALSE,
+ false,
pDevice->pCurrMeasureEID,
byResult,
pDevice->byBasicMap,
@@ -525,12 +527,12 @@ static void s_vCompleteCurrentMeasure (PSDevice pDevice, BYTE byResult)
static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
{
- UINT ii;
- BYTE byValue;
- BYTE byValue1;
- BYTE byCCKPwrdBm = 0;
- BYTE byOFDMPwrdBm = 0;
- INT zonetype=0;
+ unsigned int ii;
+ unsigned char byValue;
+ unsigned char byValue1;
+ unsigned char byCCKPwrdBm = 0;
+ unsigned char byOFDMPwrdBm = 0;
+ int zonetype=0;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
MACbShutdown(pDevice->PortOffset);
BBvSoftwareReset(pDevice->PortOffset);
@@ -540,11 +542,11 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
// Do MACbSoftwareReset in MACvInitialize
MACbSoftwareReset(pDevice->PortOffset);
// force CCK
- pDevice->bCCK = TRUE;
- pDevice->bAES = FALSE;
- pDevice->bProtectMode = FALSE; //Only used in 11g type, sync with ERP IE
- pDevice->bNonERPPresent = FALSE;
- pDevice->bBarkerPreambleMd = FALSE;
+ pDevice->bCCK = true;
+ pDevice->bAES = false;
+ pDevice->bProtectMode = false; //Only used in 11g type, sync with ERP IE
+ pDevice->bNonERPPresent = false;
+ pDevice->bBarkerPreambleMd = false;
pDevice->wCurrentRate = RATE_1M;
pDevice->byTopOFDMBasicRate = RATE_24M;
pDevice->byTopCCKBasicRate = RATE_1M;
@@ -570,9 +572,9 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
// Get Antena
byValue = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_ANTENNA);
if (byValue & EEP_ANTINV)
- pDevice->bTxRxAntInv = TRUE;
+ pDevice->bTxRxAntInv = true;
else
- pDevice->bTxRxAntInv = FALSE;
+ pDevice->bTxRxAntInv = false;
#ifdef PLICE_DEBUG
//printk("init_register:TxRxAntInv is %d,byValue is %d\n",pDevice->bTxRxAntInv,byValue);
#endif
@@ -587,7 +589,7 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->ulDiversityMValue = 100*16;//SROMbyReadEmbedded(pDevice->PortOffset, 0x52);
pDevice->byTMax = 1;//SROMbyReadEmbedded(pDevice->PortOffset, 0x53);
pDevice->byTMax2 = 4;//SROMbyReadEmbedded(pDevice->PortOffset, 0x54);
- pDevice->ulSQ3TH = 0;//(ULONG) SROMbyReadEmbedded(pDevice->PortOffset, 0x55);
+ pDevice->ulSQ3TH = 0;//(unsigned long) SROMbyReadEmbedded(pDevice->PortOffset, 0x55);
pDevice->byTMax3 = 64;//SROMbyReadEmbedded(pDevice->PortOffset, 0x56);
if (byValue == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
@@ -595,7 +597,7 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->byTxAntennaMode = ANT_B;
pDevice->dwTxAntennaSel = 1;
pDevice->dwRxAntennaSel = 1;
- if (pDevice->bTxRxAntInv == TRUE)
+ if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
@@ -603,26 +605,26 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
byValue1 = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_ANTENNA);
// if (pDevice->bDiversityRegCtlON)
if((byValue1&0x08)==0)
- pDevice->bDiversityEnable = FALSE;//SROMbyReadEmbedded(pDevice->PortOffset, 0x50);
+ pDevice->bDiversityEnable = false;//SROMbyReadEmbedded(pDevice->PortOffset, 0x50);
else
- pDevice->bDiversityEnable = TRUE;
+ pDevice->bDiversityEnable = true;
#ifdef PLICE_DEBUG
//printk("aux |main antenna: RxAntennaMode is %d\n",pDevice->byRxAntennaMode);
#endif
} else {
- pDevice->bDiversityEnable = FALSE;
+ pDevice->bDiversityEnable = false;
pDevice->byAntennaCount = 1;
pDevice->dwTxAntennaSel = 0;
pDevice->dwRxAntennaSel = 0;
if (byValue & EEP_ANTENNA_AUX) {
pDevice->byTxAntennaMode = ANT_A;
- if (pDevice->bTxRxAntInv == TRUE)
+ if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_B;
else
pDevice->byRxAntennaMode = ANT_A;
} else {
pDevice->byTxAntennaMode = ANT_B;
- if (pDevice->bTxRxAntInv == TRUE)
+ if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
@@ -638,7 +640,7 @@ byValue1 = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_ANTENNA);
//2008-8-4 <add> by chester
//zonetype initial
pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
- zonetype = Config_FileOperation(pDevice,FALSE,NULL);
+ zonetype = Config_FileOperation(pDevice,false,NULL);
if (zonetype >= 0) { //read zonetype file ok!
if ((zonetype == 0)&&
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE] !=0x00)){ //for USA
@@ -680,7 +682,7 @@ else
pDevice->byRFType &= RF_MASK;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRFType = %x\n", pDevice->byRFType);
- if (pDevice->bZoneRegExist == FALSE) {
+ if (pDevice->bZoneRegExist == false) {
pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byZoneType = %x\n", pDevice->byZoneType);
@@ -700,11 +702,11 @@ else
for (ii=0;ii<CB_MAX_CHANNEL_24G;ii++) {
- pDevice->abyCCKPwrTbl[ii+1] = SROMbyReadEmbedded(pDevice->PortOffset, (BYTE)(ii + EEP_OFS_CCK_PWR_TBL));
+ pDevice->abyCCKPwrTbl[ii+1] = SROMbyReadEmbedded(pDevice->PortOffset, (unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
if (pDevice->abyCCKPwrTbl[ii+1] == 0) {
pDevice->abyCCKPwrTbl[ii+1] = pDevice->byCCKPwr;
}
- pDevice->abyOFDMPwrTbl[ii+1] = SROMbyReadEmbedded(pDevice->PortOffset, (BYTE)(ii + EEP_OFS_OFDM_PWR_TBL));
+ pDevice->abyOFDMPwrTbl[ii+1] = SROMbyReadEmbedded(pDevice->PortOffset, (unsigned char)(ii + EEP_OFS_OFDM_PWR_TBL));
if (pDevice->abyOFDMPwrTbl[ii+1] == 0) {
pDevice->abyOFDMPwrTbl[ii+1] = pDevice->byOFDMPwrG;
}
@@ -726,10 +728,10 @@ else
// Load OFDM A Power Table
for (ii=0;ii<CB_MAX_CHANNEL_5G;ii++) { //RobertYu:20041224, bug using CB_MAX_CHANNEL
- pDevice->abyOFDMPwrTbl[ii+CB_MAX_CHANNEL_24G+1] = SROMbyReadEmbedded(pDevice->PortOffset, (BYTE)(ii + EEP_OFS_OFDMA_PWR_TBL));
- pDevice->abyOFDMDefaultPwr[ii+CB_MAX_CHANNEL_24G+1] = SROMbyReadEmbedded(pDevice->PortOffset, (BYTE)(ii + EEP_OFS_OFDMA_PWR_dBm));
+ pDevice->abyOFDMPwrTbl[ii+CB_MAX_CHANNEL_24G+1] = SROMbyReadEmbedded(pDevice->PortOffset, (unsigned char)(ii + EEP_OFS_OFDMA_PWR_TBL));
+ pDevice->abyOFDMDefaultPwr[ii+CB_MAX_CHANNEL_24G+1] = SROMbyReadEmbedded(pDevice->PortOffset, (unsigned char)(ii + EEP_OFS_OFDMA_PWR_dBm));
}
- CARDvInitChannelTable((void *)pDevice);
+ init_channel_table((void *)pDevice);
if (pDevice->byLocalID > REV_ID_VT3253_B1) {
@@ -773,38 +775,38 @@ else
if (pDevice->uConnectionRate == RATE_AUTO) {
pDevice->wCurrentRate = RATE_54M;
} else {
- pDevice->wCurrentRate = (WORD)pDevice->uConnectionRate;
+ pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
// default G Mode
VNTWIFIbConfigPhyMode(pDevice->pMgmt, PHY_TYPE_11G);
VNTWIFIbConfigPhyMode(pDevice->pMgmt, PHY_TYPE_AUTO);
- pDevice->bRadioOff = FALSE;
+ pDevice->bRadioOff = false;
pDevice->byRadioCtl = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RADIOCTL);
- pDevice->bHWRadioOff = FALSE;
+ pDevice->bHWRadioOff = false;
if (pDevice->byRadioCtl & EEP_RADIOCTL_ENABLE) {
// Get GPIO
MACvGPIOIn(pDevice->PortOffset, &pDevice->byGPIO);
//2008-4-14 <add> by chester for led issue
#ifdef FOR_LED_ON_NOTEBOOK
-if (pDevice->byGPIO & GPIO0_DATA){pDevice->bHWRadioOff = TRUE;}
-if ( !(pDevice->byGPIO & GPIO0_DATA)){pDevice->bHWRadioOff = FALSE;}
+if (pDevice->byGPIO & GPIO0_DATA){pDevice->bHWRadioOff = true;}
+if ( !(pDevice->byGPIO & GPIO0_DATA)){pDevice->bHWRadioOff = false;}
}
- if ( (pDevice->bRadioControlOff == TRUE)) {
+ if ( (pDevice->bRadioControlOff == true)) {
CARDbRadioPowerOff(pDevice);
}
else CARDbRadioPowerOn(pDevice);
#else
if (((pDevice->byGPIO & GPIO0_DATA) && !(pDevice->byRadioCtl & EEP_RADIOCTL_INV)) ||
( !(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV))) {
- pDevice->bHWRadioOff = TRUE;
+ pDevice->bHWRadioOff = true;
}
}
- if ((pDevice->bHWRadioOff == TRUE) || (pDevice->bRadioControlOff == TRUE)) {
+ if ((pDevice->bHWRadioOff == true) || (pDevice->bRadioControlOff == true)) {
CARDbRadioPowerOff(pDevice);
}
@@ -850,17 +852,17 @@ else CARDbRadioPowerOn(pDevice);
static void device_init_diversity_timer(PSDevice pDevice) {
init_timer(&pDevice->TimerSQ3Tmax1);
- pDevice->TimerSQ3Tmax1.data = (ULONG)pDevice;
+ pDevice->TimerSQ3Tmax1.data = (unsigned long) pDevice;
pDevice->TimerSQ3Tmax1.function = (TimerFunction)TimerSQ3CallBack;
pDevice->TimerSQ3Tmax1.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax2);
- pDevice->TimerSQ3Tmax2.data = (ULONG)pDevice;
+ pDevice->TimerSQ3Tmax2.data = (unsigned long) pDevice;
pDevice->TimerSQ3Tmax2.function = (TimerFunction)TimerSQ3CallBack;
pDevice->TimerSQ3Tmax2.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax3);
- pDevice->TimerSQ3Tmax3.data = (ULONG)pDevice;
+ pDevice->TimerSQ3Tmax3.data = (unsigned long) pDevice;
pDevice->TimerSQ3Tmax3.function = (TimerFunction)TimerState1CallBack;
pDevice->TimerSQ3Tmax3.expires = RUN_AT(HZ);
@@ -868,13 +870,13 @@ static void device_init_diversity_timer(PSDevice pDevice) {
}
-static BOOL device_release_WPADEV(PSDevice pDevice)
+static bool device_release_WPADEV(PSDevice pDevice)
{
viawget_wpa_header *wpahdr;
int ii=0;
// wait_queue_head_t Set_wait;
//send device close to wpa_supplicnat layer
- if (pDevice->bWPADEVUp==TRUE) {
+ if (pDevice->bWPADEVUp==true) {
wpahdr = (viawget_wpa_header *)pDevice->skb->data;
wpahdr->type = VIAWGET_DEVICECLOSE_MSG;
wpahdr->resp_ie_len = 0;
@@ -891,7 +893,7 @@ static BOOL device_release_WPADEV(PSDevice pDevice)
//wait release WPADEV
// init_waitqueue_head(&Set_wait);
// wait_event_timeout(Set_wait, ((pDevice->wpadev==NULL)&&(pDevice->skb == NULL)),5*HZ); //1s wait
- while((pDevice->bWPADEVUp==TRUE)) {
+ while((pDevice->bWPADEVUp==true)) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout (HZ/20); //wait 50ms
ii++;
@@ -899,7 +901,7 @@ static BOOL device_release_WPADEV(PSDevice pDevice)
break;
}
};
- return TRUE;
+ return true;
}
@@ -914,10 +916,10 @@ static const struct net_device_ops device_netdev_ops = {
-static int
-device_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
+static int __devinit
+vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
{
- static BOOL bFirst = TRUE;
+ static bool bFirst = true;
struct net_device* dev = NULL;
PCHIP_INFO pChip_info = (PCHIP_INFO)ent->driver_data;
PSDevice pDevice;
@@ -944,10 +946,10 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
if (bFirst) {
printk(KERN_NOTICE "%s Ver. %s\n",DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
printk(KERN_NOTICE "Copyright (c) 2003 VIA Networking Technologies, Inc.\n");
- bFirst=FALSE;
+ bFirst=false;
}
- if (!device_init_info(pcid, &pDevice, pChip_info)) {
+ if (!vt6655_init_info(pcid, &pDevice, pChip_info)) {
return -ENOMEM;
}
pDevice->dev = dev;
@@ -962,7 +964,7 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
#ifdef DEBUG
printk("Before get pci_info memaddr is %x\n",pDevice->memaddr);
#endif
- if (device_get_pci_info(pDevice,pcid) == FALSE) {
+ if (device_get_pci_info(pDevice,pcid) == false) {
printk(KERN_ERR DEVICE_NAME ": Failed to find PCI device.\n");
device_free_info(pDevice);
return -ENODEV;
@@ -976,7 +978,7 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
printk("after get pci_info memaddr is %x, io addr is %x,io_size is %d\n",pDevice->memaddr,pDevice->ioaddr,pDevice->io_size);
{
int i;
- U32 bar,len;
+ u32 bar,len;
u32 address[] = {
PCI_BASE_ADDRESS_0,
PCI_BASE_ADDRESS_1,
@@ -1020,8 +1022,8 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
#ifdef DEBUG
//return 0 ;
#endif
- pDevice->PortOffset = (DWORD)ioremap(pDevice->memaddr & PCI_BASE_ADDRESS_MEM_MASK, pDevice->io_size);
- //pDevice->PortOffset = (DWORD)ioremap(pDevice->ioaddr & PCI_BASE_ADDRESS_IO_MASK, pDevice->io_size);
+ pDevice->PortOffset = (unsigned long)ioremap(pDevice->memaddr & PCI_BASE_ADDRESS_MEM_MASK, pDevice->io_size);
+ //pDevice->PortOffset = (unsigned long)ioremap(pDevice->ioaddr & PCI_BASE_ADDRESS_IO_MASK, pDevice->io_size);
if(pDevice->PortOffset == 0) {
printk(KERN_ERR DEVICE_NAME ": Failed to IO remapping ..\n");
@@ -1041,7 +1043,7 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
dev->base_addr = pDevice->ioaddr;
#ifdef PLICE_DEBUG
- BYTE value;
+ unsigned char value;
VNSvInPortB(pDevice->PortOffset+0x4F, &value);
printk("Before write: value is %x\n",value);
@@ -1111,16 +1113,17 @@ static void device_print_info(PSDevice pDevice)
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: %s\n",dev->name, get_chip_name(pDevice->chip_id));
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: MAC=%pM", dev->name, dev->dev_addr);
#ifdef IO_MAP
- DBG_PRT(MSG_LEVEL_INFO, KERN_INFO" IO=0x%lx ",(ULONG) pDevice->ioaddr);
+ DBG_PRT(MSG_LEVEL_INFO, KERN_INFO" IO=0x%lx ",(unsigned long) pDevice->ioaddr);
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO" IRQ=%d \n", pDevice->dev->irq);
#else
- DBG_PRT(MSG_LEVEL_INFO, KERN_INFO" IO=0x%lx Mem=0x%lx ",(ULONG) pDevice->ioaddr,(ULONG) pDevice->PortOffset);
+ DBG_PRT(MSG_LEVEL_INFO, KERN_INFO" IO=0x%lx Mem=0x%lx ",
+ (unsigned long) pDevice->ioaddr,(unsigned long) pDevice->PortOffset);
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO" IRQ=%d \n", pDevice->dev->irq);
#endif
}
-static BOOL device_init_info(struct pci_dev* pcid, PSDevice* ppDevice,
+static bool __devinit vt6655_init_info(struct pci_dev* pcid, PSDevice* ppDevice,
PCHIP_INFO pChip_info) {
PSDevice p;
@@ -1145,19 +1148,19 @@ static BOOL device_init_info(struct pci_dev* pcid, PSDevice* ppDevice,
spin_lock_init(&((*ppDevice)->lock));
- return TRUE;
+ return true;
}
-static BOOL device_get_pci_info(PSDevice pDevice, struct pci_dev* pcid) {
+static bool device_get_pci_info(PSDevice pDevice, struct pci_dev* pcid) {
- U16 pci_cmd;
- U8 b;
- UINT cis_addr;
+ u16 pci_cmd;
+ u8 b;
+ unsigned int cis_addr;
#ifdef PLICE_DEBUG
- BYTE pci_config[256];
- BYTE value =0x00;
+ unsigned char pci_config[256];
+ unsigned char value =0x00;
int ii,j;
- U16 max_lat=0x0000;
+ u16 max_lat=0x0000;
memset(pci_config,0x00,256);
#endif
@@ -1211,7 +1214,7 @@ static BOOL device_get_pci_info(PSDevice pDevice, struct pci_dev* pcid) {
}
}
#endif
- return TRUE;
+ return true;
}
static void device_free_info(PSDevice pDevice) {
@@ -1263,7 +1266,7 @@ device_release_WPADEV(pDevice);
}
}
-static BOOL device_init_rings(PSDevice pDevice) {
+static bool device_init_rings(PSDevice pDevice) {
void* vir_pool;
@@ -1277,7 +1280,7 @@ static BOOL device_init_rings(PSDevice pDevice) {
if (vir_pool == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : allocate desc dma memory failed\n", pDevice->dev->name);
- return FALSE;
+ return false;
}
memset(vir_pool, 0,
@@ -1312,7 +1315,7 @@ static BOOL device_init_rings(PSDevice pDevice) {
pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc),
vir_pool, pDevice->pool_dma
);
- return FALSE;
+ return false;
}
memset(pDevice->tx0_bufs, 0,
@@ -1358,7 +1361,7 @@ static BOOL device_init_rings(PSDevice pDevice) {
pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ;
- return TRUE;
+ return true;
}
static void device_free_rings(PSDevice pDevice) {
@@ -1593,7 +1596,7 @@ static void device_free_td1_ring(PSDevice pDevice) {
/*-----------------------------------------------------------------*/
-static int device_rx_srv(PSDevice pDevice, UINT uIdx) {
+static int device_rx_srv(PSDevice pDevice, unsigned int uIdx) {
PSRxDesc pRD;
int works = 0;
@@ -1621,7 +1624,7 @@ static int device_rx_srv(PSDevice pDevice, UINT uIdx) {
}
-static BOOL device_alloc_rx_buf(PSDevice pDevice, PSRxDesc pRD) {
+static bool device_alloc_rx_buf(PSDevice pDevice, PSRxDesc pRD) {
PDEVICE_RD_INFO pRDInfo=pRD->pRDInfo;
@@ -1631,7 +1634,7 @@ static BOOL device_alloc_rx_buf(PSDevice pDevice, PSRxDesc pRD) {
//printk("device_alloc_rx_buf:skb is %x\n",pRDInfo->skb);
#endif
if (pRDInfo->skb==NULL)
- return FALSE;
+ return false;
ASSERT(pRDInfo->skb);
pRDInfo->skb->dev = pDevice->dev;
pRDInfo->skb_dma = pci_map_single(pDevice->pcid, skb_tail_pointer(pRDInfo->skb),
@@ -1643,35 +1646,35 @@ static BOOL device_alloc_rx_buf(PSDevice pDevice, PSRxDesc pRD) {
pRD->m_rd1RD1.wReqCount = cpu_to_le16(pDevice->rx_buf_sz);
pRD->buff_addr = cpu_to_le32(pRDInfo->skb_dma);
- return TRUE;
+ return true;
}
-BOOL device_alloc_frag_buf(PSDevice pDevice, PSDeFragControlBlock pDeF) {
+bool device_alloc_frag_buf(PSDevice pDevice, PSDeFragControlBlock pDeF) {
pDeF->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
if (pDeF->skb == NULL)
- return FALSE;
+ return false;
ASSERT(pDeF->skb);
pDeF->skb->dev = pDevice->dev;
- return TRUE;
+ return true;
}
-static int device_tx_srv(PSDevice pDevice, UINT uIdx) {
+static int device_tx_srv(PSDevice pDevice, unsigned int uIdx) {
PSTxDesc pTD;
- BOOL bFull=FALSE;
+ bool bFull=false;
int works = 0;
- BYTE byTsr0;
- BYTE byTsr1;
- UINT uFrameSize, uFIFOHeaderSize;
+ unsigned char byTsr0;
+ unsigned char byTsr1;
+ unsigned int uFrameSize, uFIFOHeaderSize;
PSTxBufHead pTxBufHead;
struct net_device_stats* pStats = &pDevice->stats;
struct sk_buff* skb;
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
PSMgmtObject pMgmt = pDevice->pMgmt;
@@ -1697,20 +1700,20 @@ static int device_tx_srv(PSDevice pDevice, UINT uIdx) {
STAvUpdateTDStatCounter(&pDevice->scStatistic,
byTsr0, byTsr1,
- (PBYTE)(pTD->pTDInfo->buf + uFIFOHeaderSize),
+ (unsigned char *)(pTD->pTDInfo->buf + uFIFOHeaderSize),
uFrameSize, uIdx);
BSSvUpdateNodeTxCounter(pDevice,
byTsr0, byTsr1,
- (PBYTE)(pTD->pTDInfo->buf),
+ (unsigned char *)(pTD->pTDInfo->buf),
uFIFOHeaderSize
);
if ( !(byTsr1 & TSR1_TERR)) {
if (byTsr0 != 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X].\n",
- (INT)uIdx, byTsr1, byTsr0);
+ (int)uIdx, byTsr1, byTsr0);
}
if ((pTxBufHead->wFragCtl & FRAGCTL_ENDFRAG) != FRAGCTL_NONFRAG) {
pDevice->s802_11Counter.TransmittedFragmentCount ++;
@@ -1720,7 +1723,7 @@ static int device_tx_srv(PSDevice pDevice, UINT uIdx) {
}
else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Tx[%d] dropped & tsr1[%02X] tsr0[%02X].\n",
- (INT)uIdx, byTsr1, byTsr0);
+ (int)uIdx, byTsr1, byTsr0);
pStats->tx_errors++;
pStats->tx_dropped++;
}
@@ -1742,19 +1745,19 @@ static int device_tx_srv(PSDevice pDevice, UINT uIdx) {
if (byTsr1 & TSR1_TERR) {
if ((pTD->pTDInfo->byFlags & TD_FLAGS_PRIV_SKB) != 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X].\n",
- (INT)uIdx, byTsr1, byTsr0);
+ (int)uIdx, byTsr1, byTsr0);
}
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X].\n",
-// (INT)uIdx, byTsr1, byTsr0);
+// (int)uIdx, byTsr1, byTsr0);
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) &&
(pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)) {
- WORD wAID;
- BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
+ unsigned short wAID;
+ unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
skb = pTD->pTDInfo->skb;
- if (BSSDBbIsSTAInNodeDB(pMgmt, (PBYTE)(skb->data), &uNodeIndex)) {
+ if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(skb->data), &uNodeIndex)) {
if (pMgmt->sNodeDBTable[uNodeIndex].bPSEnable) {
skb_queue_tail(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue, skb);
pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt++;
@@ -1763,7 +1766,7 @@ static int device_tx_srv(PSDevice pDevice, UINT uIdx) {
pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7];
pTD->pTDInfo->byFlags &= ~(TD_FLAGS_NETIF_SKB);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "tx_srv:tx fail re-queue sta index= %d, QueCnt= %d\n"
- ,(INT)uNodeIndex, pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt);
+ ,(int)uNodeIndex, pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt);
pStats->tx_errors--;
pStats->tx_dropped--;
}
@@ -1780,10 +1783,10 @@ static int device_tx_srv(PSDevice pDevice, UINT uIdx) {
// RESERV_AC0DMA reserved for relay
if (AVAIL_TD(pDevice, uIdx) < RESERV_AC0DMA) {
- bFull = TRUE;
+ bFull = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " AC0DMA is Full = %d\n", pDevice->iTDUsed[uIdx]);
}
- if (netif_queue_stopped(pDevice->dev) && (bFull==FALSE)){
+ if (netif_queue_stopped(pDevice->dev) && (bFull==false)){
netif_wake_queue(pDevice->dev);
}
}
@@ -1795,7 +1798,7 @@ static int device_tx_srv(PSDevice pDevice, UINT uIdx) {
}
-static void device_error(PSDevice pDevice, WORD status) {
+static void device_error(PSDevice pDevice, unsigned short status) {
if (status & ISR_FETALERR) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
@@ -1804,7 +1807,7 @@ static void device_error(PSDevice pDevice, WORD status) {
netif_stop_queue(pDevice->dev);
del_timer(&pDevice->sTimerCommand);
del_timer(&(pDevice->pMgmt->sTimerSecondCallback));
- pDevice->bCmdRunning = FALSE;
+ pDevice->bCmdRunning = false;
MACbShutdown(pDevice->PortOffset);
return;
}
@@ -1844,7 +1847,7 @@ void InitRxManagementQueue(PSDevice pDevice)
//PLICE_DEBUG ->
-INT MlmeThread(
+int MlmeThread(
void * Context)
{
PSDevice pDevice = (PSDevice) Context;
@@ -1914,8 +1917,8 @@ static int device_open(struct net_device *dev) {
wpa_Result.proto = 0;
wpa_Result.key_mgmt = 0;
wpa_Result.eap_type = 0;
- wpa_Result.authenticated = FALSE;
- pDevice->fWPA_Authened = FALSE;
+ wpa_Result.authenticated = false;
+ pDevice->fWPA_Authened = false;
#endif
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "call device init rd0 ring\n");
device_init_rd0_ring(pDevice);
@@ -1980,20 +1983,20 @@ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "call device_init_registers\n");
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
/*
- pDevice->bwextstep0 = FALSE;
- pDevice->bwextstep1 = FALSE;
- pDevice->bwextstep2 = FALSE;
- pDevice->bwextstep3 = FALSE;
+ pDevice->bwextstep0 = false;
+ pDevice->bwextstep1 = false;
+ pDevice->bwextstep2 = false;
+ pDevice->bwextstep3 = false;
*/
pDevice->bwextcount=0;
- pDevice->bWPASuppWextEnabled = FALSE;
+ pDevice->bWPASuppWextEnabled = false;
#endif
pDevice->byReAssocCount = 0;
- pDevice->bWPADEVUp = FALSE;
+ pDevice->bWPADEVUp = false;
// Patch: if WEP key already set by iwconfig but device not yet open
- if ((pDevice->bEncryptionEnable == TRUE) && (pDevice->bTransmitKey == TRUE)) {
+ if ((pDevice->bEncryptionEnable == true) && (pDevice->bTransmitKey == true)) {
KeybSetDefaultKey(&(pDevice->sKey),
- (DWORD)(pDevice->byKeyIndex | (1 << 31)),
+ (unsigned long)(pDevice->byKeyIndex | (1 << 31)),
pDevice->uKeyLength,
NULL,
pDevice->abyKey,
@@ -2052,12 +2055,12 @@ static int device_close(struct net_device *dev) {
tasklet_kill(&pDevice->RxMngWorkItem);
#endif
netif_stop_queue(dev);
- pDevice->bCmdRunning = FALSE;
+ pDevice->bCmdRunning = false;
MACbShutdown(pDevice->PortOffset);
MACbSoftwareReset(pDevice->PortOffset);
CARDbRadioPowerOff(pDevice);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
device_free_td0_ring(pDevice);
@@ -2082,8 +2085,8 @@ device_release_WPADEV(pDevice);
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev) {
PSDevice pDevice=netdev_priv(dev);
- PBYTE pbMPDU;
- UINT cbMPDULen = 0;
+ unsigned char *pbMPDU;
+ unsigned int cbMPDULen = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_tx_80211\n");
@@ -2096,7 +2099,7 @@ static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev) {
return 0;
}
- if (pDevice->bStopTx0Pkt == TRUE) {
+ if (pDevice->bStopTx0Pkt == true) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
@@ -2115,36 +2118,36 @@ static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev) {
-BOOL device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, UINT uNodeIndex) {
+bool device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, unsigned int uNodeIndex) {
PSMgmtObject pMgmt = pDevice->pMgmt;
PSTxDesc pHeadTD, pLastTD;
- UINT cbFrameBodySize;
- UINT uMACfragNum;
- BYTE byPktType;
- BOOL bNeedEncryption = FALSE;
+ unsigned int cbFrameBodySize;
+ unsigned int uMACfragNum;
+ unsigned char byPktType;
+ bool bNeedEncryption = false;
PSKeyItem pTransmitKey = NULL;
- UINT cbHeaderSize;
- UINT ii;
+ unsigned int cbHeaderSize;
+ unsigned int ii;
SKeyItem STempKey;
-// BYTE byKeyIndex = 0;
+// unsigned char byKeyIndex = 0;
- if (pDevice->bStopTx0Pkt == TRUE) {
+ if (pDevice->bStopTx0Pkt == true) {
dev_kfree_skb_irq(skb);
- return FALSE;
+ return false;
};
if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0) {
dev_kfree_skb_irq(skb);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_xmit, td0 <=0\n");
- return FALSE;
+ return false;
}
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
if (pDevice->uAssocCount == 0) {
dev_kfree_skb_irq(skb);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_xmit, assocCount = 0\n");
- return FALSE;
+ return false;
}
}
@@ -2152,7 +2155,7 @@ BOOL device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, UINT uNodeIndex) {
pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)(skb->data), ETH_HLEN);
+ memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)(skb->data), ETH_HLEN);
cbFrameBodySize = skb->len - ETH_HLEN;
// 802.1H
@@ -2163,9 +2166,9 @@ BOOL device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, UINT uNodeIndex) {
if ( uMACfragNum > AVAIL_TD(pDevice, TYPE_TXDMA0)) {
dev_kfree_skb_irq(skb);
- return FALSE;
+ return false;
}
- byPktType = (BYTE)pDevice->byPacketType;
+ byPktType = (unsigned char)pDevice->byPacketType;
if (pDevice->bFixRate) {
@@ -2173,13 +2176,13 @@ BOOL device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, UINT uNodeIndex) {
if (pDevice->uConnectionRate >= RATE_11M) {
pDevice->wCurrentRate = RATE_11M;
} else {
- pDevice->wCurrentRate = (WORD)pDevice->uConnectionRate;
+ pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
} else {
if (pDevice->uConnectionRate >= RATE_54M)
pDevice->wCurrentRate = RATE_54M;
else
- pDevice->wCurrentRate = (WORD)pDevice->uConnectionRate;
+ pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
}
else {
@@ -2202,15 +2205,15 @@ BOOL device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, UINT uNodeIndex) {
} else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
byPktType = PK_TYPE_11A;
} else {
- if (pDevice->bProtectMode == TRUE) {
+ if (pDevice->bProtectMode == true) {
byPktType = PK_TYPE_11GB;
} else {
byPktType = PK_TYPE_11GA;
}
}
- if (pDevice->bEncryptionEnable == TRUE)
- bNeedEncryption = TRUE;
+ if (pDevice->bEncryptionEnable == true)
+ bNeedEncryption = true;
if (pDevice->bEnableHostWEP) {
pTransmitKey = &STempKey;
@@ -2226,7 +2229,7 @@ BOOL device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, UINT uNodeIndex) {
}
vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption,
cbFrameBodySize, TYPE_TXDMA0, pHeadTD,
- &pDevice->sTxEthHeader, (PBYTE)skb->data, pTransmitKey, uNodeIndex,
+ &pDevice->sTxEthHeader, (unsigned char *)skb->data, pTransmitKey, uNodeIndex,
&uMACfragNum,
&cbHeaderSize
);
@@ -2236,7 +2239,7 @@ BOOL device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, UINT uNodeIndex) {
MACbPSWakeup(pDevice->PortOffset);
}
- pDevice->bPWBitOn = FALSE;
+ pDevice->bPWBitOn = false;
pLastTD = pHeadTD;
for (ii = 0; ii < uMACfragNum; ii++) {
@@ -2260,7 +2263,7 @@ BOOL device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, UINT uNodeIndex) {
MACvTransmit0(pDevice->PortOffset);
- return TRUE;
+ return true;
}
//TYPE_AC0DMA data tx
@@ -2269,26 +2272,26 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
PSMgmtObject pMgmt = pDevice->pMgmt;
PSTxDesc pHeadTD, pLastTD;
- UINT uNodeIndex = 0;
- BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- WORD wAID;
- UINT uMACfragNum = 1;
- UINT cbFrameBodySize;
- BYTE byPktType;
- UINT cbHeaderSize;
- BOOL bNeedEncryption = FALSE;
+ unsigned int uNodeIndex = 0;
+ unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
+ unsigned short wAID;
+ unsigned int uMACfragNum = 1;
+ unsigned int cbFrameBodySize;
+ unsigned char byPktType;
+ unsigned int cbHeaderSize;
+ bool bNeedEncryption = false;
PSKeyItem pTransmitKey = NULL;
SKeyItem STempKey;
- UINT ii;
- BOOL bTKIP_UseGTK = FALSE;
- BOOL bNeedDeAuth = FALSE;
- PBYTE pbyBSSID;
- BOOL bNodeExist = FALSE;
+ unsigned int ii;
+ bool bTKIP_UseGTK = false;
+ bool bNeedDeAuth = false;
+ unsigned char *pbyBSSID;
+ bool bNodeExist = false;
spin_lock_irq(&pDevice->lock);
- if (pDevice->bLinkPass == FALSE) {
+ if (pDevice->bLinkPass == false) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
@@ -2307,9 +2310,9 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
spin_unlock_irq(&pDevice->lock);
return 0;
}
- if (IS_MULTICAST_ADDRESS((PBYTE)(skb->data))) {
+ if (is_multicast_ether_addr((unsigned char *)(skb->data))) {
uNodeIndex = 0;
- bNodeExist = TRUE;
+ bNodeExist = true;
if (pMgmt->sNodeDBTable[0].bPSEnable) {
skb_queue_tail(&(pMgmt->sNodeDBTable[0].sTxPSQueue), skb);
pMgmt->sNodeDBTable[0].wEnQueueCnt++;
@@ -2319,7 +2322,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
return 0;
}
}else {
- if (BSSDBbIsSTAInNodeDB(pMgmt, (PBYTE)(skb->data), &uNodeIndex)) {
+ if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(skb->data), &uNodeIndex)) {
if (pMgmt->sNodeDBTable[uNodeIndex].bPSEnable) {
skb_queue_tail(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue, skb);
pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt++;
@@ -2338,12 +2341,12 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
}else {
pDevice->byPreambleType = PREAMBLE_LONG;
}
- bNodeExist = TRUE;
+ bNodeExist = true;
}
}
- if (bNodeExist == FALSE) {
+ if (bNodeExist == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"Unknown STA not found in node DB \n");
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
@@ -2356,7 +2359,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)(skb->data), ETH_HLEN);
+ memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)(skb->data), ETH_HLEN);
cbFrameBodySize = skb->len - ETH_HLEN;
// 802.1H
if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN) {
@@ -2364,18 +2367,18 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
}
- if (pDevice->bEncryptionEnable == TRUE) {
- bNeedEncryption = TRUE;
+ if (pDevice->bEncryptionEnable == true) {
+ bNeedEncryption = true;
// get Transmit key
do {
if ((pDevice->pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pDevice->pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
pbyBSSID = pDevice->abyBSSID;
// get pairwise key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == FALSE) {
+ if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) {
// get group key
- if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == TRUE) {
- bTKIP_UseGTK = TRUE;
+ if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == true) {
+ bTKIP_UseGTK = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"Get GTK.\n");
break;
}
@@ -2392,12 +2395,12 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"\n");
// get pairwise key
- if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == TRUE)
+ if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == true)
break;
}
// get group key
pbyBSSID = pDevice->abyBroadcastAddr;
- if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == FALSE) {
+ if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
if (pDevice->pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"IBSS and KEY is NULL. [%d]\n", pDevice->pMgmt->eCurrMode);
@@ -2405,15 +2408,15 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
else
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"NOT IBSS and KEY is NULL. [%d]\n", pDevice->pMgmt->eCurrMode);
} else {
- bTKIP_UseGTK = TRUE;
+ bTKIP_UseGTK = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"Get GTK.\n");
}
- } while(FALSE);
+ } while(false);
}
if (pDevice->bEnableHostWEP) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"acdma0: STA index %d\n", uNodeIndex);
- if (pDevice->bEncryptionEnable == TRUE) {
+ if (pDevice->bEncryptionEnable == true) {
pTransmitKey = &STempKey;
pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
@@ -2443,7 +2446,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
}
}
- byPktType = (BYTE)pDevice->byPacketType;
+ byPktType = (unsigned char)pDevice->byPacketType;
if (pDevice->bFixRate) {
#ifdef PLICE_DEBUG
@@ -2454,7 +2457,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
if (pDevice->uConnectionRate >= RATE_11M) {
pDevice->wCurrentRate = RATE_11M;
} else {
- pDevice->wCurrentRate = (WORD)pDevice->uConnectionRate;
+ pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
} else {
if ((pDevice->eCurrentPHYType == PHY_TYPE_11A) &&
@@ -2464,11 +2467,11 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
if (pDevice->uConnectionRate >= RATE_54M)
pDevice->wCurrentRate = RATE_54M;
else
- pDevice->wCurrentRate = (WORD)pDevice->uConnectionRate;
+ pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
}
- pDevice->byACKRate = (BYTE) pDevice->wCurrentRate;
+ pDevice->byACKRate = (unsigned char) pDevice->wCurrentRate;
pDevice->byTopCCKBasicRate = RATE_1M;
pDevice->byTopOFDMBasicRate = RATE_6M;
}
@@ -2521,7 +2524,7 @@ pDevice->byTopCCKBasicRate,pDevice->byTopOFDMBasicRate);
} else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
byPktType = PK_TYPE_11A;
} else {
- if (pDevice->bProtectMode == TRUE) {
+ if (pDevice->bProtectMode == true) {
byPktType = PK_TYPE_11GB;
} else {
byPktType = PK_TYPE_11GA;
@@ -2532,28 +2535,28 @@ pDevice->byTopCCKBasicRate,pDevice->byTopOFDMBasicRate);
// printk("FIX RATE:CurrentRate is %d");
//#endif
- if (bNeedEncryption == TRUE) {
+ if (bNeedEncryption == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType));
if ((pDevice->sTxEthHeader.wType) == TYPE_PKT_802_1x) {
- bNeedEncryption = FALSE;
+ bNeedEncryption = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Pkt Type=%04x\n", (pDevice->sTxEthHeader.wType));
if ((pDevice->pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pDevice->pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
if (pTransmitKey == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Don't Find TX KEY\n");
}
else {
- if (bTKIP_UseGTK == TRUE) {
+ if (bTKIP_UseGTK == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"error: KEY is GTK!!~~\n");
}
else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
- bNeedEncryption = TRUE;
+ bNeedEncryption = true;
}
}
}
if (pDevice->byCntMeasure == 2) {
- bNeedDeAuth = TRUE;
+ bNeedDeAuth = true;
pDevice->s802_11Counter.TKIPCounterMeasuresInvoked++;
}
@@ -2561,7 +2564,7 @@ pDevice->byTopCCKBasicRate,pDevice->byTopOFDMBasicRate);
if ((uNodeIndex != 0) &&
(pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
- bNeedEncryption = TRUE;
+ bNeedEncryption = true;
}
}
}
@@ -2584,7 +2587,7 @@ pDevice->byTopCCKBasicRate,pDevice->byTopOFDMBasicRate);
#endif
vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption,
cbFrameBodySize, TYPE_AC0DMA, pHeadTD,
- &pDevice->sTxEthHeader, (PBYTE)skb->data, pTransmitKey, uNodeIndex,
+ &pDevice->sTxEthHeader, (unsigned char *)skb->data, pTransmitKey, uNodeIndex,
&uMACfragNum,
&cbHeaderSize
);
@@ -2593,7 +2596,7 @@ pDevice->byTopCCKBasicRate,pDevice->byTopOFDMBasicRate);
// Disable PS
MACbPSWakeup(pDevice->PortOffset);
}
- pDevice->bPWBitOn = FALSE;
+ pDevice->bPWBitOn = false;
pLastTD = pHeadTD;
for (ii = 0; ii < uMACfragNum; ii++) {
@@ -2631,11 +2634,11 @@ pDevice->byTopCCKBasicRate,pDevice->byTopOFDMBasicRate);
//#endif
{
- BYTE Protocol_Version; //802.1x Authentication
- BYTE Packet_Type; //802.1x Authentication
- BYTE Descriptor_type;
- WORD Key_info;
-BOOL bTxeapol_key = FALSE;
+ unsigned char Protocol_Version; //802.1x Authentication
+ unsigned char Packet_Type; //802.1x Authentication
+ unsigned char Descriptor_type;
+ unsigned short Key_info;
+bool bTxeapol_key = false;
Protocol_Version = skb->data[ETH_HLEN];
Packet_Type = skb->data[ETH_HLEN+1];
Descriptor_type = skb->data[ETH_HLEN+1+1+2];
@@ -2643,11 +2646,11 @@ BOOL bTxeapol_key = FALSE;
if (pDevice->sTxEthHeader.wType == TYPE_PKT_802_1x) {
if(((Protocol_Version==1) ||(Protocol_Version==2)) &&
(Packet_Type==3)) { //802.1x OR eapol-key challenge frame transfer
- bTxeapol_key = TRUE;
+ bTxeapol_key = true;
if((Descriptor_type==254)||(Descriptor_type==2)) { //WPA or RSN
if(!(Key_info & BIT3) && //group-key challenge
(Key_info & BIT8) && (Key_info & BIT9)) { //send 2/2 key
- pDevice->fWPA_Authened = TRUE;
+ pDevice->fWPA_Authened = true;
if(Descriptor_type==254)
printk("WPA ");
else
@@ -2674,13 +2677,13 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
PSDevice pDevice=(PSDevice) netdev_priv(dev);
int max_count=0;
- DWORD dwMIBCounter=0;
+ unsigned long dwMIBCounter=0;
PSMgmtObject pMgmt = pDevice->pMgmt;
- BYTE byOrgPageSel=0;
+ unsigned char byOrgPageSel=0;
int handled = 0;
- BYTE byData = 0;
+ unsigned char byData = 0;
int ii= 0;
-// BYTE byRSSI;
+// unsigned char byRSSI;
MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
@@ -2697,7 +2700,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
if ((pDevice->dwIsr & ISR_RXDMA0) &&
(pDevice->byLocalID != REV_ID_VT3253_B0) &&
- (pDevice->bBSSIDFilter == TRUE)) {
+ (pDevice->bBSSIDFilter == true)) {
// update RSSI
//BBbReadEmbeded(pDevice->PortOffset, 0x3E, &byRSSI);
//pDevice->uCurrRSSI = byRSSI;
@@ -2746,9 +2749,9 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
VNSvInPortD(pDevice->PortOffset + MAC_REG_MAR4, &(pDevice->dwOrgMAR4));
MACvSelectPage0(pDevice->PortOffset);
//xxxx
- // WCMDbFlushCommandQueue(pDevice->pMgmt, TRUE);
- if (CARDbSetChannel(pDevice, pDevice->pCurrMeasureEID->sReq.byChannel) == TRUE) {
- pDevice->bMeasureInProgress = TRUE;
+ // WCMDbFlushCommandQueue(pDevice->pMgmt, true);
+ if (set_channel(pDevice, pDevice->pCurrMeasureEID->sReq.byChannel) == true) {
+ pDevice->bMeasureInProgress = true;
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_READY);
MACvSelectPage0(pDevice->PortOffset);
@@ -2770,7 +2773,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
}
if (pDevice->dwIsr & ISR_MEASUREEND) {
// 802.11h measure end
- pDevice->bMeasureInProgress = FALSE;
+ pDevice->bMeasureInProgress = false;
VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, pDevice->byOrgRCR);
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, pDevice->dwOrgMAR0);
@@ -2782,7 +2785,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
// clear measure control
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
MACvSelectPage0(pDevice->PortOffset);
- CARDbSetChannel(pDevice, pDevice->byOrgChannel);
+ set_channel(pDevice, pDevice->byOrgChannel);
// WCMDbResetCommandQueue(pDevice->pMgmt);
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
@@ -2798,26 +2801,26 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
if (pDevice->dwIsr & ISR_QUIETSTART) {
do {
;
- } while (CARDbStartQuiet(pDevice) == FALSE);
+ } while (CARDbStartQuiet(pDevice) == false);
}
}
if (pDevice->dwIsr & ISR_TBTT) {
- if (pDevice->bEnableFirstQuiet == TRUE) {
+ if (pDevice->bEnableFirstQuiet == true) {
pDevice->byQuietStartCount--;
if (pDevice->byQuietStartCount == 0) {
- pDevice->bEnableFirstQuiet = FALSE;
+ pDevice->bEnableFirstQuiet = false;
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
MACvSelectPage0(pDevice->PortOffset);
}
}
- if ((pDevice->bChannelSwitch == TRUE) &&
+ if ((pDevice->bChannelSwitch == true) &&
(pDevice->eOPMode == OP_MODE_INFRASTRUCTURE)) {
pDevice->byChannelSwitchCount--;
if (pDevice->byChannelSwitchCount == 0) {
- pDevice->bChannelSwitch = FALSE;
- CARDbSetChannel(pDevice, pDevice->byNewChannel);
+ pDevice->bChannelSwitch = false;
+ set_channel(pDevice, pDevice->byNewChannel);
VNTWIFIbChannelSwitch(pDevice->pMgmt, pDevice->byNewChannel);
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
@@ -2827,12 +2830,12 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
}
}
if (pDevice->eOPMode == OP_MODE_ADHOC) {
- //pDevice->bBeaconSent = FALSE;
+ //pDevice->bBeaconSent = false;
} else {
- if ((pDevice->bUpdateBBVGA) && (pDevice->bLinkPass == TRUE) && (pDevice->uCurrRSSI != 0)) {
- LONG ldBm;
+ if ((pDevice->bUpdateBBVGA) && (pDevice->bLinkPass == true) && (pDevice->uCurrRSSI != 0)) {
+ long ldBm;
- RFvRSSITodBm(pDevice, (BYTE) pDevice->uCurrRSSI, &ldBm);
+ RFvRSSITodBm(pDevice, (unsigned char) pDevice->uCurrRSSI, &ldBm);
for (ii=0;ii<BB_VGA_LEVEL;ii++) {
if (ldBm < pDevice->ldBmThreshold[ii]) {
pDevice->byBBVGANew = pDevice->abyBBVGA[ii];
@@ -2858,7 +2861,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
}
}
- pDevice->bBeaconSent = FALSE;
+ pDevice->bBeaconSent = false;
if (pDevice->bEnablePSMode) {
PSbIsNextTBTTWakeUp((void *)pDevice);
};
@@ -2879,31 +2882,31 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
if (pDevice->dwIsr & ISR_BNTX) {
if (pDevice->eOPMode == OP_MODE_ADHOC) {
- pDevice->bIsBeaconBufReadySet = FALSE;
+ pDevice->bIsBeaconBufReadySet = false;
pDevice->cbBeaconBufReadySetCnt = 0;
};
if (pDevice->eOPMode == OP_MODE_AP) {
if(pMgmt->byDTIMCount > 0) {
pMgmt->byDTIMCount --;
- pMgmt->sNodeDBTable[0].bRxPSPoll = FALSE;
+ pMgmt->sNodeDBTable[0].bRxPSPoll = false;
}
else {
if(pMgmt->byDTIMCount == 0) {
// check if mutltcast tx bufferring
pMgmt->byDTIMCount = pMgmt->byDTIMPeriod - 1;
- pMgmt->sNodeDBTable[0].bRxPSPoll = TRUE;
+ pMgmt->sNodeDBTable[0].bRxPSPoll = true;
bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
}
}
}
- pDevice->bBeaconSent = TRUE;
+ pDevice->bBeaconSent = true;
- if (pDevice->bChannelSwitch == TRUE) {
+ if (pDevice->bChannelSwitch == true) {
pDevice->byChannelSwitchCount--;
if (pDevice->byChannelSwitchCount == 0) {
- pDevice->bChannelSwitch = FALSE;
- CARDbSetChannel(pDevice, pDevice->byNewChannel);
+ pDevice->bChannelSwitch = false;
+ set_channel(pDevice, pDevice->byNewChannel);
VNTWIFIbChannelSwitch(pDevice->pMgmt, pDevice->byNewChannel);
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
@@ -2978,9 +2981,10 @@ static inline u32 ether_crc(int length, unsigned char *data)
}
//2008-8-4 <add> by chester
-static int Config_FileGetParameter(UCHAR *string, UCHAR *dest,UCHAR *source)
+static int Config_FileGetParameter(unsigned char *string,
+ unsigned char *dest, unsigned char *source)
{
- UCHAR buf1[100];
+ unsigned char buf1[100];
int source_len = strlen(source);
memset(buf1,0,100);
@@ -2989,13 +2993,13 @@ static int Config_FileGetParameter(UCHAR *string, UCHAR *dest,UCHAR *source)
source+=strlen(buf1);
memcpy(dest,source,source_len-strlen(buf1));
- return TRUE;
+ return true;
}
-int Config_FileOperation(PSDevice pDevice,BOOL fwrite,unsigned char *Parameter) {
- UCHAR *config_path=CONFIG_PATH;
- UCHAR *buffer=NULL;
- UCHAR tmpbuffer[20];
+int Config_FileOperation(PSDevice pDevice,bool fwrite,unsigned char *Parameter) {
+ unsigned char *config_path = CONFIG_PATH;
+ unsigned char *buffer = NULL;
+ unsigned char tmpbuffer[20];
struct file *filp=NULL;
mm_segment_t old_fs = get_fs();
//int oldfsuid=0,oldfsgid=0;
@@ -3038,7 +3042,7 @@ if(filp->f_op->read(filp, buffer, 1024, &filp->f_pos)<0) {
goto error1;
}
-if(Config_FileGetParameter("ZONETYPE",tmpbuffer,buffer)!=TRUE) {
+if(Config_FileGetParameter("ZONETYPE",tmpbuffer,buffer)!=true) {
printk("get parameter error?\n");
result = -1;
goto error1;
@@ -3555,19 +3559,19 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Commit the settings\n");
spin_lock_irq(&pDevice->lock);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
netif_stop_queue(pDevice->dev);
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- if(pDevice->bWPASuppWextEnabled !=TRUE)
+ if(pDevice->bWPASuppWextEnabled !=true)
#endif
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
}
- pDevice->bCommit = FALSE;
+ pDevice->bCommit = false;
}
return rc;
@@ -3598,20 +3602,20 @@ static int ethtool_ioctl(struct net_device *dev, void *useraddr)
/*------------------------------------------------------------------*/
-MODULE_DEVICE_TABLE(pci, device_id_table);
+MODULE_DEVICE_TABLE(pci, vt6655_pci_id_table);
static struct pci_driver device_driver = {
name: DEVICE_NAME,
- id_table: device_id_table,
- probe: device_found1,
- remove: device_remove1,
+ id_table: vt6655_pci_id_table,
+ probe: vt6655_probe,
+ remove: vt6655_remove,
#ifdef CONFIG_PM
suspend: viawget_suspend,
resume: viawget_resume,
#endif
};
-static int __init device_init_module(void)
+static int __init vt6655_init_module(void)
{
int ret;
@@ -3627,7 +3631,7 @@ static int __init device_init_module(void)
return ret;
}
-static void __exit device_cleanup_module(void)
+static void __exit vt6655_cleanup_module(void)
{
@@ -3638,8 +3642,8 @@ static void __exit device_cleanup_module(void)
}
-module_init(device_init_module);
-module_exit(device_cleanup_module);
+module_init(vt6655_init_module);
+module_exit(vt6655_cleanup_module);
#ifdef CONFIG_PM
@@ -3651,7 +3655,7 @@ device_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
case SYS_DOWN:
case SYS_HALT:
case SYS_POWER_OFF:
- while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
+ for_each_pci_dev(pdev) {
if(pci_dev_driver(pdev) == &device_driver) {
if (pci_get_drvdata(pdev))
viawget_suspend(pdev, PMSG_HIBERNATE);
@@ -3677,10 +3681,10 @@ viawget_suspend(struct pci_dev *pcid, pm_message_t state)
pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
pDevice->uCmdDequeueIdx = 0;
pDevice->uCmdEnqueueIdx = 0;
- pDevice->bCmdRunning = FALSE;
+ pDevice->bCmdRunning = false;
MACbShutdown(pDevice->PortOffset);
MACvSaveContext(pDevice->PortOffset, pDevice->abyMacContext);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
pci_disable_device(pcid);
@@ -3704,9 +3708,9 @@ viawget_resume(struct pci_dev *pcid)
spin_lock_irq(&pDevice->lock);
MACvRestoreContext(pDevice->PortOffset, pDevice->abyMacContext);
device_init_registers(pDevice, DEVICE_INIT_DXPL);
- if (pMgmt->sNodeDBTable[0].bActive == TRUE) { // Assoc with BSS
- pMgmt->sNodeDBTable[0].bActive = FALSE;
- pDevice->bLinkPass = FALSE;
+ if (pMgmt->sNodeDBTable[0].bActive == true) { // Assoc with BSS
+ pMgmt->sNodeDBTable[0].bActive = false;
+ pDevice->bLinkPass = false;
if(pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
// In Adhoc, BSS state set back to started.
pMgmt->eCurrState = WMAC_STATE_STARTED;
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 6b758a8c1af..15130733693 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -66,7 +66,7 @@
//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel =MSG_LEVEL_INFO;
-const BYTE acbyRxRate[MAX_RATE] =
+const unsigned char acbyRxRate[MAX_RATE] =
{2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108};
@@ -76,70 +76,60 @@ const BYTE acbyRxRate[MAX_RATE] =
/*--------------------- Static Functions --------------------------*/
-static BYTE s_byGetRateIdx(BYTE byRate);
+static unsigned char s_byGetRateIdx(unsigned char byRate);
-static
-void
-s_vGetDASA(
- PBYTE pbyRxBufferAddr,
- PUINT pcbHeaderSize,
- PSEthernetHeader psEthHeader
- );
+static void
+s_vGetDASA(unsigned char *pbyRxBufferAddr, unsigned int *pcbHeaderSize,
+ PSEthernetHeader psEthHeader);
-static
-void
-s_vProcessRxMACHeader (
- PSDevice pDevice,
- PBYTE pbyRxBufferAddr,
- UINT cbPacketSize,
- BOOL bIsWEP,
- BOOL bExtIV,
- PUINT pcbHeadSize
- );
+static void
+s_vProcessRxMACHeader(PSDevice pDevice, unsigned char *pbyRxBufferAddr,
+ unsigned int cbPacketSize, bool bIsWEP, bool bExtIV,
+ unsigned int *pcbHeadSize);
-static BOOL s_bAPModeRxCtl(
+static bool s_bAPModeRxCtl(
PSDevice pDevice,
- PBYTE pbyFrame,
- INT iSANodeIndex
+ unsigned char *pbyFrame,
+ int iSANodeIndex
);
-static BOOL s_bAPModeRxData (
+static bool s_bAPModeRxData (
PSDevice pDevice,
struct sk_buff* skb,
- UINT FrameSize,
- UINT cbHeaderOffset,
- INT iSANodeIndex,
- INT iDANodeIndex
+ unsigned int FrameSize,
+ unsigned int cbHeaderOffset,
+ int iSANodeIndex,
+ int iDANodeIndex
);
-static BOOL s_bHandleRxEncryption(
+static bool s_bHandleRxEncryption(
PSDevice pDevice,
- PBYTE pbyFrame,
- UINT FrameSize,
- PBYTE pbyRsr,
- PBYTE pbyNewRsr,
+ unsigned char *pbyFrame,
+ unsigned int FrameSize,
+ unsigned char *pbyRsr,
+ unsigned char *pbyNewRsr,
PSKeyItem *pKeyOut,
- int * pbExtIV,
- PWORD pwRxTSC15_0,
- PDWORD pdwRxTSC47_16
+ bool *pbExtIV,
+ unsigned short *pwRxTSC15_0,
+ unsigned long *pdwRxTSC47_16
);
-static BOOL s_bHostWepRxEncryption(
+static bool s_bHostWepRxEncryption(
PSDevice pDevice,
- PBYTE pbyFrame,
- UINT FrameSize,
- PBYTE pbyRsr,
- BOOL bOnFly,
+ unsigned char *pbyFrame,
+ unsigned int FrameSize,
+ unsigned char *pbyRsr,
+ bool bOnFly,
PSKeyItem pKey,
- PBYTE pbyNewRsr,
- int * pbExtIV,
- PWORD pwRxTSC15_0,
- PDWORD pdwRxTSC47_16
+ unsigned char *pbyNewRsr,
+ bool *pbExtIV,
+ unsigned short *pwRxTSC15_0,
+ unsigned long *pdwRxTSC47_16
);
@@ -162,27 +152,21 @@ static BOOL s_bHostWepRxEncryption(
* Return Value: None
*
-*/
-static
-void
-s_vProcessRxMACHeader (
- PSDevice pDevice,
- PBYTE pbyRxBufferAddr,
- UINT cbPacketSize,
- BOOL bIsWEP,
- BOOL bExtIV,
- PUINT pcbHeadSize
- )
+static void
+s_vProcessRxMACHeader(PSDevice pDevice, unsigned char *pbyRxBufferAddr,
+ unsigned int cbPacketSize, bool bIsWEP, bool bExtIV,
+ unsigned int *pcbHeadSize)
{
- PBYTE pbyRxBuffer;
- UINT cbHeaderSize = 0;
- PWORD pwType;
+ unsigned char *pbyRxBuffer;
+ unsigned int cbHeaderSize = 0;
+ unsigned short *pwType;
PS802_11Header pMACHeader;
int ii;
pMACHeader = (PS802_11Header) (pbyRxBufferAddr + cbHeaderSize);
- s_vGetDASA((PBYTE)pMACHeader, &cbHeaderSize, &pDevice->sRxEthHeader);
+ s_vGetDASA((unsigned char *)pMACHeader, &cbHeaderSize, &pDevice->sRxEthHeader);
if (bIsWEP) {
if (bExtIV) {
@@ -197,18 +181,18 @@ s_vProcessRxMACHeader (
cbHeaderSize += WLAN_HDR_ADDR3_LEN;
};
- pbyRxBuffer = (PBYTE) (pbyRxBufferAddr + cbHeaderSize);
- if (IS_ETH_ADDRESS_EQUAL(pbyRxBuffer, &pDevice->abySNAP_Bridgetunnel[0])) {
+ pbyRxBuffer = (unsigned char *) (pbyRxBufferAddr + cbHeaderSize);
+ if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_Bridgetunnel[0])) {
cbHeaderSize += 6;
}
- else if (IS_ETH_ADDRESS_EQUAL(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) {
+ else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) {
cbHeaderSize += 6;
- pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
+ pwType = (unsigned short *) (pbyRxBufferAddr + cbHeaderSize);
if ((*pwType!= TYPE_PKT_IPX) && (*pwType != cpu_to_le16(0xF380))) {
}
else {
cbHeaderSize -= 8;
- pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
+ pwType = (unsigned short *) (pbyRxBufferAddr + cbHeaderSize);
if (bIsWEP) {
if (bExtIV) {
*pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 8); // 8 is IV&ExtIV
@@ -223,7 +207,7 @@ s_vProcessRxMACHeader (
}
else {
cbHeaderSize -= 2;
- pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
+ pwType = (unsigned short *) (pbyRxBufferAddr + cbHeaderSize);
if (bIsWEP) {
if (bExtIV) {
*pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 8); // 8 is IV&ExtIV
@@ -237,7 +221,7 @@ s_vProcessRxMACHeader (
}
cbHeaderSize -= (ETH_ALEN * 2);
- pbyRxBuffer = (PBYTE) (pbyRxBufferAddr + cbHeaderSize);
+ pbyRxBuffer = (unsigned char *) (pbyRxBufferAddr + cbHeaderSize);
for(ii=0;ii<ETH_ALEN;ii++)
*pbyRxBuffer++ = pDevice->sRxEthHeader.abyDstAddr[ii];
for(ii=0;ii<ETH_ALEN;ii++)
@@ -249,9 +233,9 @@ s_vProcessRxMACHeader (
-static BYTE s_byGetRateIdx (BYTE byRate)
+static unsigned char s_byGetRateIdx (unsigned char byRate)
{
- BYTE byRateIdx;
+ unsigned char byRateIdx;
for (byRateIdx = 0; byRateIdx <MAX_RATE ; byRateIdx++) {
if (acbyRxRate[byRateIdx%MAX_RATE] == byRate)
@@ -261,15 +245,11 @@ static BYTE s_byGetRateIdx (BYTE byRate)
}
-static
-void
-s_vGetDASA (
- PBYTE pbyRxBufferAddr,
- PUINT pcbHeaderSize,
- PSEthernetHeader psEthHeader
- )
+static void
+s_vGetDASA(unsigned char *pbyRxBufferAddr, unsigned int *pcbHeaderSize,
+ PSEthernetHeader psEthHeader)
{
- UINT cbHeaderSize = 0;
+ unsigned int cbHeaderSize = 0;
PS802_11Header pMACHeader;
int ii;
@@ -333,7 +313,7 @@ void MngWorkItem(void *Context)
-BOOL
+bool
device_receive_frame (
PSDevice pDevice,
PSRxDesc pCurrRD
@@ -349,36 +329,36 @@ device_receive_frame (
PSMgmtObject pMgmt = pDevice->pMgmt;
PSRxMgmtPacket pRxPacket = &(pDevice->pMgmt->sRxPacket);
PS802_11Header p802_11Header;
- PBYTE pbyRsr;
- PBYTE pbyNewRsr;
- PBYTE pbyRSSI;
+ unsigned char *pbyRsr;
+ unsigned char *pbyNewRsr;
+ unsigned char *pbyRSSI;
PQWORD pqwTSFTime;
- PWORD pwFrameSize;
- PBYTE pbyFrame;
- BOOL bDeFragRx = FALSE;
- BOOL bIsWEP = FALSE;
- UINT cbHeaderOffset;
- UINT FrameSize;
- WORD wEtherType = 0;
- INT iSANodeIndex = -1;
- INT iDANodeIndex = -1;
- UINT ii;
- UINT cbIVOffset;
- BOOL bExtIV = FALSE;
- PBYTE pbyRxSts;
- PBYTE pbyRxRate;
- PBYTE pbySQ;
- UINT cbHeaderSize;
+ unsigned short *pwFrameSize;
+ unsigned char *pbyFrame;
+ bool bDeFragRx = false;
+ bool bIsWEP = false;
+ unsigned int cbHeaderOffset;
+ unsigned int FrameSize;
+ unsigned short wEtherType = 0;
+ int iSANodeIndex = -1;
+ int iDANodeIndex = -1;
+ unsigned int ii;
+ unsigned int cbIVOffset;
+ bool bExtIV = false;
+ unsigned char *pbyRxSts;
+ unsigned char *pbyRxRate;
+ unsigned char *pbySQ;
+ unsigned int cbHeaderSize;
PSKeyItem pKey = NULL;
- WORD wRxTSC15_0 = 0;
- DWORD dwRxTSC47_16 = 0;
+ unsigned short wRxTSC15_0 = 0;
+ unsigned long dwRxTSC47_16 = 0;
SKeyItem STempKey;
// 802.11h RPI
- DWORD dwDuration = 0;
- LONG ldBm = 0;
- LONG ldBmThreshold = 0;
+ unsigned long dwDuration = 0;
+ long ldBm = 0;
+ long ldBmThreshold = 0;
PS802_11Header pMACHeader;
- BOOL bRxeapol_key = FALSE;
+ bool bRxeapol_key = false;
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---------- device_receive_frame---\n");
@@ -391,7 +371,7 @@ device_receive_frame (
pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
#endif
//PLICE_DEBUG<-
- pwFrameSize = (PWORD)(skb->data + 2);
+ pwFrameSize = (unsigned short *)(skb->data + 2);
FrameSize = cpu_to_le16(pCurrRD->m_rd1RD1.wReqCount) - cpu_to_le16(pCurrRD->m_rd0RD0.wResCount);
// Max: 2312Payload + 30HD +4CRC + 2Padding + 4Len + 8TSF + 4RSR
@@ -399,17 +379,17 @@ device_receive_frame (
if ((FrameSize > 2364)||(FrameSize <= 32)) {
// Frame Size error drop this packet.
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---------- WRONG Length 1 \n");
- return FALSE;
+ return false;
}
- pbyRxSts = (PBYTE) (skb->data);
- pbyRxRate = (PBYTE) (skb->data + 1);
- pbyRsr = (PBYTE) (skb->data + FrameSize - 1);
- pbyRSSI = (PBYTE) (skb->data + FrameSize - 2);
- pbyNewRsr = (PBYTE) (skb->data + FrameSize - 3);
- pbySQ = (PBYTE) (skb->data + FrameSize - 4);
+ pbyRxSts = (unsigned char *) (skb->data);
+ pbyRxRate = (unsigned char *) (skb->data + 1);
+ pbyRsr = (unsigned char *) (skb->data + FrameSize - 1);
+ pbyRSSI = (unsigned char *) (skb->data + FrameSize - 2);
+ pbyNewRsr = (unsigned char *) (skb->data + FrameSize - 3);
+ pbySQ = (unsigned char *) (skb->data + FrameSize - 4);
pqwTSFTime = (PQWORD) (skb->data + FrameSize - 12);
- pbyFrame = (PBYTE)(skb->data + 4);
+ pbyFrame = (unsigned char *)(skb->data + 4);
// get packet size
FrameSize = cpu_to_le16(*pwFrameSize);
@@ -417,7 +397,7 @@ device_receive_frame (
if ((FrameSize > 2346)|(FrameSize < 14)) { // Max: 2312Payload + 30HD +4CRC
// Min: 14 bytes ACK
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---------- WRONG Length 2 \n");
- return FALSE;
+ return false;
}
//PLICE_DEBUG->
#if 1
@@ -431,9 +411,9 @@ device_receive_frame (
#endif
- pMACHeader=(PS802_11Header)((PBYTE) (skb->data)+8);
+ pMACHeader=(PS802_11Header)((unsigned char *) (skb->data)+8);
//PLICE_DEBUG<-
- if (pDevice->bMeasureInProgress == TRUE) {
+ if (pDevice->bMeasureInProgress == true) {
if ((*pbyRsr & RSR_CRCOK) != 0) {
pDevice->byBasicMap |= 0x01;
}
@@ -460,13 +440,13 @@ device_receive_frame (
ii--;
}
pDevice->dwRPIs[ii] += dwDuration;
- return FALSE;
+ return false;
}
- if (!IS_MULTICAST_ADDRESS(pbyFrame) && !IS_BROADCAST_ADDRESS(pbyFrame)) {
+ if (!is_multicast_ether_addr(pbyFrame)) {
if (WCTLbIsDuplicate(&(pDevice->sDupRxCache), (PS802_11Header) (skb->data + 4))) {
pDevice->s802_11Counter.FrameDuplicateCount++;
- return FALSE;
+ return false;
}
}
@@ -475,14 +455,14 @@ device_receive_frame (
s_vGetDASA(skb->data+4, &cbHeaderSize, &pDevice->sRxEthHeader);
// filter packet send from myself
- if (IS_ETH_ADDRESS_EQUAL((PBYTE)&(pDevice->sRxEthHeader.abySrcAddr[0]), pDevice->abyCurrentNetAddr))
- return FALSE;
+ if (!compare_ether_addr((unsigned char *)&(pDevice->sRxEthHeader.abySrcAddr[0]), pDevice->abyCurrentNetAddr))
+ return false;
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
if (IS_CTL_PSPOLL(pbyFrame) || !IS_TYPE_CONTROL(pbyFrame)) {
p802_11Header = (PS802_11Header) (pbyFrame);
// get SA NodeIndex
- if (BSSDBbIsSTAInNodeDB(pMgmt, (PBYTE)(p802_11Header->abyAddr2), &iSANodeIndex)) {
+ if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(p802_11Header->abyAddr2), &iSANodeIndex)) {
pMgmt->sNodeDBTable[iSANodeIndex].ulLastRxJiffer = jiffies;
pMgmt->sNodeDBTable[iSANodeIndex].uInActiveCount = 0;
}
@@ -490,17 +470,17 @@ device_receive_frame (
}
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex) == TRUE) {
- return FALSE;
+ if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex) == true) {
+ return false;
}
}
if (IS_FC_WEP(pbyFrame)) {
- BOOL bRxDecryOK = FALSE;
+ bool bRxDecryOK = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"rx WEP pkt\n");
- bIsWEP = TRUE;
+ bIsWEP = true;
if ((pDevice->bEnableHostWEP) && (iSANodeIndex >= 0)) {
pKey = &STempKey;
pKey->byCipherSuite = pMgmt->sNodeDBTable[iSANodeIndex].byCipherSuite;
@@ -552,11 +532,11 @@ device_receive_frame (
// pDevice->s802_11Counter.WEPICVErrorCount.QuadPart++;
}
}
- return FALSE;
+ return false;
}
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"WEP Func Fail\n");
- return FALSE;
+ return false;
}
if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP))
FrameSize -= 8; // Message Integrity Code
@@ -584,18 +564,18 @@ device_receive_frame (
}
else {
- return FALSE;
+ return false;
}
}
// Management & Control frame Handle
- if ((IS_TYPE_DATA((skb->data+4))) == FALSE) {
+ if ((IS_TYPE_DATA((skb->data+4))) == false) {
// Handle Control & Manage Frame
if (IS_TYPE_MGMT((skb->data+4))) {
- PBYTE pbyData1;
- PBYTE pbyData2;
+ unsigned char *pbyData1;
+ unsigned char *pbyData2;
pRxPacket->p80211Header = (PUWLAN_80211HDR)(skb->data+4);
pRxPacket->cbMPDULen = FrameSize;
@@ -649,13 +629,13 @@ device_receive_frame (
skb->protocol = htons(ETH_P_802_2);
memset(skb->cb, 0, sizeof(skb->cb));
netif_rx(skb);
- return TRUE;
+ return true;
}
}
else {
// Control Frame
};
- return FALSE;
+ return false;
}
else {
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
@@ -667,12 +647,12 @@ device_receive_frame (
pDevice->dev->name);
}
}
- return FALSE;
+ return false;
}
}
else {
// discard DATA packet while not associate || BSSID error
- if ((pDevice->bLinkPass == FALSE) ||
+ if ((pDevice->bLinkPass == false) ||
!(*pbyRsr & RSR_BSSIDOK)) {
if (bDeFragRx) {
if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
@@ -680,12 +660,12 @@ device_receive_frame (
pDevice->dev->name);
}
}
- return FALSE;
+ return false;
}
//mike add:station mode check eapol-key challenge--->
{
- BYTE Protocol_Version; //802.1x Authentication
- BYTE Packet_Type; //802.1x Authentication
+ unsigned char Protocol_Version; //802.1x Authentication
+ unsigned char Packet_Type; //802.1x Authentication
if (bIsWEP)
cbIVOffset = 8;
else
@@ -697,7 +677,7 @@ device_receive_frame (
if (wEtherType == ETH_P_PAE) { //Protocol Type in LLC-Header
if(((Protocol_Version==1) ||(Protocol_Version==2)) &&
(Packet_Type==3)) { //802.1x OR eapol-key challenge frame receive
- bRxeapol_key = TRUE;
+ bRxeapol_key = true;
}
}
}
@@ -716,8 +696,8 @@ device_receive_frame (
}
}
else {
- if (pDevice->pMgmt->bInTIMWake == TRUE) {
- pDevice->pMgmt->bInTIMWake = FALSE;
+ if (pDevice->pMgmt->bInTIMWake == true) {
+ pDevice->pMgmt->bInTIMWake = false;
}
}
};
@@ -725,7 +705,7 @@ device_receive_frame (
// Now it only supports 802.11g Infrastructure Mode, and support rate must up to 54 Mbps
if (pDevice->bDiversityEnable && (FrameSize>50) &&
(pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) &&
- (pDevice->bLinkPass == TRUE)) {
+ (pDevice->bLinkPass == true)) {
//printk("device_receive_frame: RxRate is %d\n",*pbyRxRate);
BBvAntennaDiversity(pDevice, s_byGetRateIdx(*pbyRxRate), 0);
}
@@ -752,8 +732,8 @@ device_receive_frame (
// -----------------------------------------------
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnable8021x == TRUE)){
- BYTE abyMacHdr[24];
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnable8021x == true)){
+ unsigned char abyMacHdr[24];
// Only 802.1x packet incoming allowed
if (bIsWEP)
@@ -767,7 +747,7 @@ device_receive_frame (
if (wEtherType == ETH_P_PAE) {
skb->dev = pDevice->apdev;
- if (bIsWEP == TRUE) {
+ if (bIsWEP == true) {
// strip IV header(8)
memcpy(&abyMacHdr[0], (skb->data + 4), 24);
memcpy((skb->data + 4 + cbIVOffset), &abyMacHdr[0], 24);
@@ -781,12 +761,12 @@ device_receive_frame (
skb->protocol = htons(ETH_P_802_2);
memset(skb->cb, 0, sizeof(skb->cb));
netif_rx(skb);
- return TRUE;
+ return true;
}
// check if 802.1x authorized
if (!(pMgmt->sNodeDBTable[iSANodeIndex].dwFlags & WLAN_STA_AUTHORIZED))
- return FALSE;
+ return false;
}
@@ -800,53 +780,53 @@ device_receive_frame (
// Soft MIC
if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) {
if (bIsWEP) {
- PDWORD pdwMIC_L;
- PDWORD pdwMIC_R;
- DWORD dwMIC_Priority;
- DWORD dwMICKey0 = 0, dwMICKey1 = 0;
- DWORD dwLocalMIC_L = 0;
- DWORD dwLocalMIC_R = 0;
+ unsigned long *pdwMIC_L;
+ unsigned long *pdwMIC_R;
+ unsigned long dwMIC_Priority;
+ unsigned long dwMICKey0 = 0, dwMICKey1 = 0;
+ unsigned long dwLocalMIC_L = 0;
+ unsigned long dwLocalMIC_R = 0;
viawget_wpa_header *wpahdr;
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- dwMICKey0 = cpu_to_le32(*(PDWORD)(&pKey->abyKey[24]));
- dwMICKey1 = cpu_to_le32(*(PDWORD)(&pKey->abyKey[28]));
+ dwMICKey0 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[24]));
+ dwMICKey1 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[28]));
}
else {
if (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- dwMICKey0 = cpu_to_le32(*(PDWORD)(&pKey->abyKey[16]));
- dwMICKey1 = cpu_to_le32(*(PDWORD)(&pKey->abyKey[20]));
+ dwMICKey0 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[16]));
+ dwMICKey1 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[20]));
} else if ((pKey->dwKeyIndex & BIT28) == 0) {
- dwMICKey0 = cpu_to_le32(*(PDWORD)(&pKey->abyKey[16]));
- dwMICKey1 = cpu_to_le32(*(PDWORD)(&pKey->abyKey[20]));
+ dwMICKey0 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[16]));
+ dwMICKey1 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[20]));
} else {
- dwMICKey0 = cpu_to_le32(*(PDWORD)(&pKey->abyKey[24]));
- dwMICKey1 = cpu_to_le32(*(PDWORD)(&pKey->abyKey[28]));
+ dwMICKey0 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[24]));
+ dwMICKey1 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[28]));
}
}
MIC_vInit(dwMICKey0, dwMICKey1);
- MIC_vAppend((PBYTE)&(pDevice->sRxEthHeader.abyDstAddr[0]), 12);
+ MIC_vAppend((unsigned char *)&(pDevice->sRxEthHeader.abyDstAddr[0]), 12);
dwMIC_Priority = 0;
- MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
+ MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
// 4 is Rcv buffer header, 24 is MAC Header, and 8 is IV and Ext IV.
- MIC_vAppend((PBYTE)(skb->data + 4 + WLAN_HDR_ADDR3_LEN + 8),
+ MIC_vAppend((unsigned char *)(skb->data + 4 + WLAN_HDR_ADDR3_LEN + 8),
FrameSize - WLAN_HDR_ADDR3_LEN - 8);
MIC_vGetMIC(&dwLocalMIC_L, &dwLocalMIC_R);
MIC_vUnInit();
- pdwMIC_L = (PDWORD)(skb->data + 4 + FrameSize);
- pdwMIC_R = (PDWORD)(skb->data + 4 + FrameSize + 4);
+ pdwMIC_L = (unsigned long *)(skb->data + 4 + FrameSize);
+ pdwMIC_R = (unsigned long *)(skb->data + 4 + FrameSize + 4);
//DBG_PRN_GRP12(("RxL: %lx, RxR: %lx\n", *pdwMIC_L, *pdwMIC_R));
//DBG_PRN_GRP12(("LocalL: %lx, LocalR: %lx\n", dwLocalMIC_L, dwLocalMIC_R));
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwMICKey0= %lx,dwMICKey1= %lx \n", dwMICKey0, dwMICKey1);
if ((cpu_to_le32(*pdwMIC_L) != dwLocalMIC_L) || (cpu_to_le32(*pdwMIC_R) != dwLocalMIC_R) ||
- (pDevice->bRxMICFail == TRUE)) {
+ (pDevice->bRxMICFail == true)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC comparison is fail!\n");
- pDevice->bRxMICFail = FALSE;
+ pDevice->bRxMICFail = false;
//pDevice->s802_11Counter.TKIPLocalMICFailures.QuadPart++;
pDevice->s802_11Counter.TKIPLocalMICFailures++;
if (bDeFragRx) {
@@ -858,7 +838,7 @@ device_receive_frame (
//2008-0409-07, <Add> by Einsn Liu
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
//send event to wpa_supplicant
- //if(pDevice->bWPADevEnable == TRUE)
+ //if(pDevice->bWPADevEnable == true)
{
union iwreq_data wrqu;
struct iw_michaelmicfailure ev;
@@ -906,7 +886,7 @@ device_receive_frame (
pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
};
- return FALSE;
+ return false;
}
}
@@ -917,13 +897,13 @@ device_receive_frame (
if ((pKey != NULL) && ((pKey->byCipherSuite == KEY_CTL_TKIP) ||
(pKey->byCipherSuite == KEY_CTL_CCMP))) {
if (bIsWEP) {
- WORD wLocalTSC15_0 = 0;
- DWORD dwLocalTSC47_16 = 0;
- ULONGLONG RSC = 0;
+ unsigned short wLocalTSC15_0 = 0;
+ unsigned long dwLocalTSC47_16 = 0;
+ unsigned long long RSC = 0;
// endian issues
- RSC = *((ULONGLONG *) &(pKey->KeyRSC));
- wLocalTSC15_0 = (WORD) RSC;
- dwLocalTSC47_16 = (DWORD) (RSC>>16);
+ RSC = *((unsigned long long *) &(pKey->KeyRSC));
+ wLocalTSC15_0 = (unsigned short) RSC;
+ dwLocalTSC47_16 = (unsigned long) (RSC>>16);
RSC = dwRxTSC47_16;
RSC <<= 16;
@@ -950,7 +930,7 @@ device_receive_frame (
pDevice->dev->name);
}
}
- return FALSE;
+ return false;
}
}
}
@@ -963,13 +943,13 @@ device_receive_frame (
}
- s_vProcessRxMACHeader(pDevice, (PBYTE)(skb->data+4), FrameSize, bIsWEP, bExtIV, &cbHeaderOffset);
+ s_vProcessRxMACHeader(pDevice, (unsigned char *)(skb->data+4), FrameSize, bIsWEP, bExtIV, &cbHeaderOffset);
FrameSize -= cbHeaderOffset;
cbHeaderOffset += 4; // 4 is Rcv buffer header
// Null data, framesize = 14
if (FrameSize < 15)
- return FALSE;
+ return false;
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
if (s_bAPModeRxData(pDevice,
@@ -978,7 +958,7 @@ device_receive_frame (
cbHeaderOffset,
iSANodeIndex,
iDANodeIndex
- ) == FALSE) {
+ ) == false) {
if (bDeFragRx) {
if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
@@ -986,10 +966,10 @@ device_receive_frame (
pDevice->dev->name);
}
}
- return FALSE;
+ return false;
}
-// if(pDevice->bRxMICFail == FALSE) {
+// if(pDevice->bRxMICFail == false) {
// for (ii =0; ii < 100; ii++)
// printk(" %02x", *(skb->data + ii));
// printk("\n");
@@ -1016,7 +996,7 @@ device_receive_frame (
pDevice->dev->name);
}
}
- return FALSE;
+ return false;
}
}
*/
@@ -1031,17 +1011,17 @@ device_receive_frame (
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc more frag bufs\n",
pDevice->dev->name);
}
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
-static BOOL s_bAPModeRxCtl (
+static bool s_bAPModeRxCtl (
PSDevice pDevice,
- PBYTE pbyFrame,
- INT iSANodeIndex
+ unsigned char *pbyFrame,
+ int iSANodeIndex
)
{
PS802_11Header p802_11Header;
@@ -1063,30 +1043,30 @@ static BOOL s_bAPModeRxCtl (
// reason = (6) class 2 received from nonauth sta
vMgrDeAuthenBeginSta(pDevice,
pMgmt,
- (PBYTE)(p802_11Header->abyAddr2),
+ (unsigned char *)(p802_11Header->abyAddr2),
(WLAN_MGMT_REASON_CLASS2_NONAUTH),
&Status
);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: send vMgrDeAuthenBeginSta 1\n");
- return TRUE;
+ return true;
};
if (pMgmt->sNodeDBTable[iSANodeIndex].eNodeState < NODE_ASSOC) {
// send deassoc notification
// reason = (7) class 3 received from nonassoc sta
vMgrDisassocBeginSta(pDevice,
pMgmt,
- (PBYTE)(p802_11Header->abyAddr2),
+ (unsigned char *)(p802_11Header->abyAddr2),
(WLAN_MGMT_REASON_CLASS3_NONASSOC),
&Status
);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: send vMgrDisassocBeginSta 2\n");
- return TRUE;
+ return true;
};
if (pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable) {
// delcare received ps-poll event
if (IS_CTL_PSPOLL(pbyFrame)) {
- pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = TRUE;
+ pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true;
bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: WLAN_CMD_RX_PSPOLL 1\n");
}
@@ -1094,8 +1074,8 @@ static BOOL s_bAPModeRxCtl (
// check Data PS state
// if PW bit off, send out all PS bufferring packets.
if (!IS_FC_POWERMGT(pbyFrame)) {
- pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = FALSE;
- pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = TRUE;
+ pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = false;
+ pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true;
bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: WLAN_CMD_RX_PSPOLL 2\n");
}
@@ -1103,15 +1083,15 @@ static BOOL s_bAPModeRxCtl (
}
else {
if (IS_FC_POWERMGT(pbyFrame)) {
- pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = TRUE;
+ pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = true;
// Once if STA in PS state, enable multicast bufferring
- pMgmt->sNodeDBTable[0].bPSEnable = TRUE;
+ pMgmt->sNodeDBTable[0].bPSEnable = true;
}
else {
// clear all pending PS frame.
if (pMgmt->sNodeDBTable[iSANodeIndex].wEnQueueCnt > 0) {
- pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = FALSE;
- pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = TRUE;
+ pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = false;
+ pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true;
bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: WLAN_CMD_RX_PSPOLL 3\n");
@@ -1122,7 +1102,7 @@ static BOOL s_bAPModeRxCtl (
else {
vMgrDeAuthenBeginSta(pDevice,
pMgmt,
- (PBYTE)(p802_11Header->abyAddr2),
+ (unsigned char *)(p802_11Header->abyAddr2),
(WLAN_MGMT_REASON_CLASS2_NONAUTH),
&Status
);
@@ -1154,31 +1134,31 @@ static BOOL s_bAPModeRxCtl (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: wFrameCtl= %x\n", p802_11Header->wFrameCtl );
VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc:pDevice->byRxMode = %x\n", pDevice->byRxMode );
- return TRUE;
+ return true;
}
}
}
- return FALSE;
+ return false;
}
-static BOOL s_bHandleRxEncryption (
+static bool s_bHandleRxEncryption (
PSDevice pDevice,
- PBYTE pbyFrame,
- UINT FrameSize,
- PBYTE pbyRsr,
- PBYTE pbyNewRsr,
+ unsigned char *pbyFrame,
+ unsigned int FrameSize,
+ unsigned char *pbyRsr,
+ unsigned char *pbyNewRsr,
PSKeyItem *pKeyOut,
- int * pbExtIV,
- PWORD pwRxTSC15_0,
- PDWORD pdwRxTSC47_16
+ bool *pbExtIV,
+ unsigned short *pwRxTSC15_0,
+ unsigned long *pdwRxTSC47_16
)
{
- UINT PayloadLen = FrameSize;
- PBYTE pbyIV;
- BYTE byKeyIdx;
+ unsigned int PayloadLen = FrameSize;
+ unsigned char *pbyIV;
+ unsigned char byKeyIdx;
PSKeyItem pKey = NULL;
- BYTE byDecMode = KEY_CTL_WEP;
+ unsigned char byDecMode = KEY_CTL_WEP;
PSMgmtObject pMgmt = pDevice->pMgmt;
@@ -1186,8 +1166,8 @@ static BOOL s_bHandleRxEncryption (
*pdwRxTSC47_16 = 0;
pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN;
- if ( WLAN_GET_FC_TODS(*(PWORD)pbyFrame) &&
- WLAN_GET_FC_FROMDS(*(PWORD)pbyFrame) ) {
+ if ( WLAN_GET_FC_TODS(*(unsigned short *)pbyFrame) &&
+ WLAN_GET_FC_FROMDS(*(unsigned short *)pbyFrame) ) {
pbyIV += 6; // 6 is 802.11 address4
PayloadLen -= 6;
}
@@ -1204,7 +1184,7 @@ static BOOL s_bHandleRxEncryption (
(pDevice->pMgmt->byCSSPK != KEY_CTL_NONE)) {
// unicast pkt use pairwise key
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"unicast pkt\n");
- if (KeybGetKey(&(pDevice->sKey), pDevice->abyBSSID, 0xFFFFFFFF, &pKey) == TRUE) {
+ if (KeybGetKey(&(pDevice->sKey), pDevice->abyBSSID, 0xFFFFFFFF, &pKey) == true) {
if (pDevice->pMgmt->byCSSPK == KEY_CTL_TKIP)
byDecMode = KEY_CTL_TKIP;
else if (pDevice->pMgmt->byCSSPK == KEY_CTL_CCMP)
@@ -1238,24 +1218,24 @@ static BOOL s_bHandleRxEncryption (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey == NULL\n");
if (byDecMode == KEY_CTL_WEP) {
// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == TRUE) {
+ } else if (pDevice->bLinkPass == true) {
// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
}
- return FALSE;
+ return false;
}
if (byDecMode != pKey->byCipherSuite) {
if (byDecMode == KEY_CTL_WEP) {
// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == TRUE) {
+ } else if (pDevice->bLinkPass == true) {
// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
}
*pKeyOut = NULL;
- return FALSE;
+ return false;
}
if (byDecMode == KEY_CTL_WEP) {
// handle WEP
if ((pDevice->byLocalID <= REV_ID_VT3253_A1) ||
- (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == TRUE)) {
+ (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == true)) {
// Software WEP
// 1. 3253A
// 2. WEP 256
@@ -1275,12 +1255,12 @@ static BOOL s_bHandleRxEncryption (
// TKIP/AES
PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
- *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4));
+ *pdwRxTSC47_16 = cpu_to_le32(*(unsigned long *)(pbyIV + 4));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16);
if (byDecMode == KEY_CTL_TKIP) {
*pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
} else {
- *pwRxTSC15_0 = cpu_to_le16(*(PWORD)pbyIV);
+ *pwRxTSC15_0 = cpu_to_le16(*(unsigned short *)pbyIV);
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"TSC0_15: %x\n", *pwRxTSC15_0);
@@ -1303,28 +1283,28 @@ static BOOL s_bHandleRxEncryption (
}// end of TKIP/AES
if ((*(pbyIV+3) & 0x20) != 0)
- *pbExtIV = TRUE;
- return TRUE;
+ *pbExtIV = true;
+ return true;
}
-static BOOL s_bHostWepRxEncryption (
+static bool s_bHostWepRxEncryption (
PSDevice pDevice,
- PBYTE pbyFrame,
- UINT FrameSize,
- PBYTE pbyRsr,
- BOOL bOnFly,
+ unsigned char *pbyFrame,
+ unsigned int FrameSize,
+ unsigned char *pbyRsr,
+ bool bOnFly,
PSKeyItem pKey,
- PBYTE pbyNewRsr,
- int * pbExtIV,
- PWORD pwRxTSC15_0,
- PDWORD pdwRxTSC47_16
+ unsigned char *pbyNewRsr,
+ bool *pbExtIV,
+ unsigned short *pwRxTSC15_0,
+ unsigned long *pdwRxTSC47_16
)
{
- UINT PayloadLen = FrameSize;
- PBYTE pbyIV;
- BYTE byKeyIdx;
- BYTE byDecMode = KEY_CTL_WEP;
+ unsigned int PayloadLen = FrameSize;
+ unsigned char *pbyIV;
+ unsigned char byKeyIdx;
+ unsigned char byDecMode = KEY_CTL_WEP;
PS802_11Header pMACHeader;
@@ -1333,8 +1313,8 @@ static BOOL s_bHostWepRxEncryption (
*pdwRxTSC47_16 = 0;
pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN;
- if ( WLAN_GET_FC_TODS(*(PWORD)pbyFrame) &&
- WLAN_GET_FC_FROMDS(*(PWORD)pbyFrame) ) {
+ if ( WLAN_GET_FC_TODS(*(unsigned short *)pbyFrame) &&
+ WLAN_GET_FC_FROMDS(*(unsigned short *)pbyFrame) ) {
pbyIV += 6; // 6 is 802.11 address4
PayloadLen -= 6;
}
@@ -1353,18 +1333,18 @@ static BOOL s_bHostWepRxEncryption (
if (byDecMode != pKey->byCipherSuite) {
if (byDecMode == KEY_CTL_WEP) {
// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == TRUE) {
+ } else if (pDevice->bLinkPass == true) {
// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
}
- return FALSE;
+ return false;
}
if (byDecMode == KEY_CTL_WEP) {
// handle WEP
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"byDecMode == KEY_CTL_WEP \n");
if ((pDevice->byLocalID <= REV_ID_VT3253_A1) ||
- (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == TRUE) ||
- (bOnFly == FALSE)) {
+ (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == true) ||
+ (bOnFly == false)) {
// Software WEP
// 1. 3253A
// 2. WEP 256
@@ -1385,19 +1365,19 @@ static BOOL s_bHostWepRxEncryption (
// TKIP/AES
PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
- *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4));
+ *pdwRxTSC47_16 = cpu_to_le32(*(unsigned long *)(pbyIV + 4));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16);
if (byDecMode == KEY_CTL_TKIP) {
*pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
} else {
- *pwRxTSC15_0 = cpu_to_le16(*(PWORD)pbyIV);
+ *pwRxTSC15_0 = cpu_to_le16(*(unsigned short *)pbyIV);
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"TSC0_15: %x\n", *pwRxTSC15_0);
if (byDecMode == KEY_CTL_TKIP) {
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || (bOnFly == FALSE)) {
+ if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || (bOnFly == false)) {
// Software TKIP
// 1. 3253 A
// 2. NotOnFly
@@ -1417,7 +1397,7 @@ static BOOL s_bHostWepRxEncryption (
}
if (byDecMode == KEY_CTL_CCMP) {
- if (bOnFly == FALSE) {
+ if (bOnFly == false) {
// Software CCMP
// NotOnFly
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"soft KEY_CTL_CCMP\n");
@@ -1433,34 +1413,34 @@ static BOOL s_bHostWepRxEncryption (
}// end of TKIP/AES
if ((*(pbyIV+3) & 0x20) != 0)
- *pbExtIV = TRUE;
- return TRUE;
+ *pbExtIV = true;
+ return true;
}
-static BOOL s_bAPModeRxData (
+static bool s_bAPModeRxData (
PSDevice pDevice,
struct sk_buff* skb,
- UINT FrameSize,
- UINT cbHeaderOffset,
- INT iSANodeIndex,
- INT iDANodeIndex
+ unsigned int FrameSize,
+ unsigned int cbHeaderOffset,
+ int iSANodeIndex,
+ int iDANodeIndex
)
{
PSMgmtObject pMgmt = pDevice->pMgmt;
- BOOL bRelayAndForward = FALSE;
- BOOL bRelayOnly = FALSE;
- BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- WORD wAID;
+ bool bRelayAndForward = false;
+ bool bRelayOnly = false;
+ unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
+ unsigned short wAID;
struct sk_buff* skbcpy = NULL;
if (FrameSize > CB_MAX_BUF_SIZE)
- return FALSE;
+ return false;
// check DA
- if(IS_MULTICAST_ADDRESS((PBYTE)(skb->data+cbHeaderOffset))) {
+ if(is_multicast_ether_addr((unsigned char *)(skb->data+cbHeaderOffset))) {
if (pMgmt->sNodeDBTable[0].bPSEnable) {
skbcpy = dev_alloc_skb((int)pDevice->rx_buf_sz);
@@ -1481,12 +1461,12 @@ static BOOL s_bAPModeRxData (
}
}
else {
- bRelayAndForward = TRUE;
+ bRelayAndForward = true;
}
}
else {
// check if relay
- if (BSSDBbIsSTAInNodeDB(pMgmt, (PBYTE)(skb->data+cbHeaderOffset), &iDANodeIndex)) {
+ if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(skb->data+cbHeaderOffset), &iDANodeIndex)) {
if (pMgmt->sNodeDBTable[iDANodeIndex].eNodeState >= NODE_ASSOC) {
if (pMgmt->sNodeDBTable[iDANodeIndex].bPSEnable) {
// queue this skb until next PS tx, and then release.
@@ -1500,10 +1480,10 @@ static BOOL s_bAPModeRxData (
pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7];
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "relay: index= %d, pMgmt->abyPSTxMap[%d]= %d\n",
iDANodeIndex, (wAID >> 3), pMgmt->abyPSTxMap[wAID >> 3]);
- return TRUE;
+ return true;
}
else {
- bRelayOnly = TRUE;
+ bRelayOnly = true;
}
}
};
@@ -1515,16 +1495,16 @@ static BOOL s_bAPModeRxData (
iDANodeIndex = 0;
if ((pDevice->uAssocCount > 1) && (iDANodeIndex >= 0)) {
- ROUTEbRelay(pDevice, (PBYTE)(skb->data + cbHeaderOffset), FrameSize, (UINT)iDANodeIndex);
+ ROUTEbRelay(pDevice, (unsigned char *)(skb->data + cbHeaderOffset), FrameSize, (unsigned int)iDANodeIndex);
}
if (bRelayOnly)
- return FALSE;
+ return false;
}
// none associate, don't forward
if (pDevice->uAssocCount == 0)
- return FALSE;
+ return false;
- return TRUE;
+ return true;
}
diff --git a/drivers/staging/vt6655/dpc.h b/drivers/staging/vt6655/dpc.h
index e574963fee0..c1b6e76a421 100644
--- a/drivers/staging/vt6655/dpc.h
+++ b/drivers/staging/vt6655/dpc.h
@@ -41,7 +41,7 @@
/*--------------------- Export Functions --------------------------*/
-BOOL
+bool
device_receive_frame (
PSDevice pDevice,
PSRxDesc pCurrRD
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
index 195cc36654a..5b83f942cda 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -154,9 +154,9 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
}
kfree(pDevice->apdev);
pDevice->apdev = NULL;
- pDevice->bEnable8021x = FALSE;
- pDevice->bEnableHostWEP = FALSE;
- pDevice->bEncryptionEnable = FALSE;
+ pDevice->bEnable8021x = false;
+ pDevice->bEnableHostWEP = false;
+ pDevice->bEncryptionEnable = false;
//4.2007-0118-03,<Add> by EinsnLiu
//execute some clear work
@@ -215,7 +215,7 @@ int vt6655_hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked)
static int hostap_remove_sta(PSDevice pDevice,
struct viawget_hostapd_param *param)
{
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
if (BSSDBbIsSTAInNodeDB(pDevice->pMgmt, param->sta_addr, &uNodeIndex)) {
@@ -244,7 +244,7 @@ static int hostap_add_sta(PSDevice pDevice,
struct viawget_hostapd_param *param)
{
PSMgmtObject pMgmt = pDevice->pMgmt;
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
if (!BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex)) {
@@ -255,7 +255,7 @@ static int hostap_add_sta(PSDevice pDevice,
pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = param->u.add_sta.capability;
// TODO listenInterval
// pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = 1;
- pMgmt->sNodeDBTable[uNodeIndex].bPSEnable = FALSE;
+ pMgmt->sNodeDBTable[uNodeIndex].bPSEnable = false;
pMgmt->sNodeDBTable[uNodeIndex].bySuppRate = param->u.add_sta.tx_supp_rates;
// set max tx rate
@@ -267,7 +267,7 @@ static int hostap_add_sta(PSDevice pDevice,
pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble =
WLAN_GET_CAP_INFO_SHORTPREAMBLE(pMgmt->sNodeDBTable[uNodeIndex].wCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].wAID = (WORD)param->u.add_sta.aid;
+ pMgmt->sNodeDBTable[uNodeIndex].wAID = (unsigned short)param->u.add_sta.aid;
pMgmt->sNodeDBTable[uNodeIndex].ulLastRxJiffer = jiffies;
@@ -304,7 +304,7 @@ static int hostap_get_info_sta(PSDevice pDevice,
struct viawget_hostapd_param *param)
{
PSMgmtObject pMgmt = pDevice->pMgmt;
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex)) {
param->u.get_info_sta.inactive_sec =
@@ -328,7 +328,7 @@ static int hostap_get_info_sta(PSDevice pDevice,
* pDevice -
* param -
* Out:
- * TURE, FALSE
+ * true, false
*
* Return Value:
*
@@ -338,7 +338,7 @@ static int hostap_reset_txexc_sta(PSDevice pDevice,
struct viawget_hostapd_param *param)
{
PSMgmtObject pMgmt = pDevice->pMgmt;
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex)) {
pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts = 0;
@@ -368,13 +368,13 @@ static int hostap_set_flags_sta(PSDevice pDevice,
struct viawget_hostapd_param *param)
{
PSMgmtObject pMgmt = pDevice->pMgmt;
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex)) {
pMgmt->sNodeDBTable[uNodeIndex].dwFlags |= param->u.set_flags_sta.flags_or;
pMgmt->sNodeDBTable[uNodeIndex].dwFlags &= param->u.set_flags_sta.flags_and;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " dwFlags = %x \n",
- (UINT)pMgmt->sNodeDBTable[uNodeIndex].dwFlags);
+ (unsigned int)pMgmt->sNodeDBTable[uNodeIndex].dwFlags);
}
else {
return -ENOENT;
@@ -471,16 +471,16 @@ static int hostap_set_encryption(PSDevice pDevice,
int param_len)
{
PSMgmtObject pMgmt = pDevice->pMgmt;
- DWORD dwKeyIndex = 0;
- BYTE abyKey[MAX_KEY_LEN];
- BYTE abySeq[MAX_KEY_LEN];
+ unsigned long dwKeyIndex = 0;
+ unsigned char abyKey[MAX_KEY_LEN];
+ unsigned char abySeq[MAX_KEY_LEN];
NDIS_802_11_KEY_RSC KeyRSC;
- BYTE byKeyDecMode = KEY_CTL_WEP;
+ unsigned char byKeyDecMode = KEY_CTL_WEP;
int ret = 0;
int iNodeIndex = -1;
int ii;
- BOOL bKeyTableFull = FALSE;
- WORD wKeyCtl = 0;
+ bool bKeyTableFull = false;
+ unsigned short wKeyCtl = 0;
param->u.crypt.err = 0;
@@ -509,7 +509,7 @@ static int hostap_set_encryption(PSDevice pDevice,
iNodeIndex = 0;
} else {
- if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &iNodeIndex) == FALSE) {
+ if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &iNodeIndex) == false) {
param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " HOSTAP_CRYPT_ERR_UNKNOWN_ADDR\n");
return -EINVAL;
@@ -520,14 +520,14 @@ static int hostap_set_encryption(PSDevice pDevice,
if (param->u.crypt.alg == WPA_ALG_NONE) {
- if (pMgmt->sNodeDBTable[iNodeIndex].bOnFly == TRUE) {
+ if (pMgmt->sNodeDBTable[iNodeIndex].bOnFly == true) {
if (KeybRemoveKey(&(pDevice->sKey),
param->sta_addr,
pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex,
- pDevice->PortOffset) == FALSE) {
+ pDevice->PortOffset) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "KeybRemoveKey fail \n");
}
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = FALSE;
+ pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
}
pMgmt->sNodeDBTable[iNodeIndex].byKeyIndex = 0;
pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = 0;
@@ -553,16 +553,16 @@ static int hostap_set_encryption(PSDevice pDevice,
param->u.crypt.key_len
);
- dwKeyIndex = (DWORD)(param->u.crypt.idx);
+ dwKeyIndex = (unsigned long)(param->u.crypt.idx);
if (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) {
- pDevice->byKeyIndex = (BYTE)dwKeyIndex;
- pDevice->bTransmitKey = TRUE;
+ pDevice->byKeyIndex = (unsigned char)dwKeyIndex;
+ pDevice->bTransmitKey = true;
dwKeyIndex |= (1 << 31);
}
if (param->u.crypt.alg == WPA_ALG_WEP) {
- if ((pDevice->bEnable8021x == FALSE) || (iNodeIndex == 0)) {
+ if ((pDevice->bEnable8021x == false) || (iNodeIndex == 0)) {
KeybSetDefaultKey(&(pDevice->sKey),
dwKeyIndex & ~(BIT30 | USE_KEYRSC),
param->u.crypt.key_len,
@@ -580,21 +580,21 @@ static int hostap_set_encryption(PSDevice pDevice,
dwKeyIndex & ~(USE_KEYRSC),
param->u.crypt.key_len,
(PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
+ (unsigned char *)abyKey,
KEY_CTL_WEP,
pDevice->PortOffset,
- pDevice->byLocalID) == TRUE) {
+ pDevice->byLocalID) == true) {
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = TRUE;
+ pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
} else {
// Key Table Full
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = FALSE;
- bKeyTableFull = TRUE;
+ pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
+ bKeyTableFull = true;
}
}
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pDevice->bEncryptionEnable = TRUE;
+ pDevice->bEncryptionEnable = true;
pMgmt->byCSSPK = KEY_CTL_WEP;
pMgmt->byCSSGK = KEY_CTL_WEP;
pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = KEY_CTL_WEP;
@@ -640,7 +640,7 @@ static int hostap_set_encryption(PSDevice pDevice,
byKeyDecMode,
pDevice->PortOffset,
pDevice->byLocalID);
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = TRUE;
+ pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
} else {
dwKeyIndex |= (1 << 30); // set pairwise key
@@ -649,23 +649,23 @@ static int hostap_set_encryption(PSDevice pDevice,
dwKeyIndex,
param->u.crypt.key_len,
(PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
+ (unsigned char *)abyKey,
byKeyDecMode,
pDevice->PortOffset,
- pDevice->byLocalID) == TRUE) {
+ pDevice->byLocalID) == true) {
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = TRUE;
+ pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
} else {
// Key Table Full
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = FALSE;
- bKeyTableFull = TRUE;
+ pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
+ bKeyTableFull = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Key Table Full\n");
}
}
- if (bKeyTableFull == TRUE) {
+ if (bKeyTableFull == true) {
wKeyCtl &= 0x7F00; // clear all key control filed
wKeyCtl |= (byKeyDecMode << 4);
wKeyCtl |= (byKeyDecMode);
@@ -686,7 +686,7 @@ static int hostap_set_encryption(PSDevice pDevice,
);
// set wep key
- pDevice->bEncryptionEnable = TRUE;
+ pDevice->bEncryptionEnable = true;
pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = byKeyDecMode;
pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = dwKeyIndex;
pMgmt->sNodeDBTable[iNodeIndex].dwTSC47_16 = 0;
@@ -727,7 +727,7 @@ static int hostap_get_encryption(PSDevice pDevice,
param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
iNodeIndex = 0;
} else {
- if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &iNodeIndex) == FALSE) {
+ if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &iNodeIndex) == false) {
param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "hostap_get_encryption: HOSTAP_CRYPT_ERR_UNKNOWN_ADDR\n");
return -EINVAL;
@@ -736,7 +736,7 @@ static int hostap_get_encryption(PSDevice pDevice,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "hostap_get_encryption: %d\n", iNodeIndex);
memset(param->u.crypt.seq, 0, 8);
for (ii = 0 ; ii < 8 ; ii++) {
- param->u.crypt.seq[ii] = (BYTE)pMgmt->sNodeDBTable[iNodeIndex].KeyRSC >> (ii * 8);
+ param->u.crypt.seq[ii] = (unsigned char)pMgmt->sNodeDBTable[iNodeIndex].KeyRSC >> (ii * 8);
}
return ret;
diff --git a/drivers/staging/vt6655/iocmd.h b/drivers/staging/vt6655/iocmd.h
index 60c0a362361..53c50c0fc81 100644
--- a/drivers/staging/vt6655/iocmd.h
+++ b/drivers/staging/vt6655/iocmd.h
@@ -37,7 +37,6 @@
#define DEF
#endif
-//typedef int BOOL;
//typedef uint32_t u32;
//typedef uint16_t u16;
//typedef uint8_t u8;
@@ -109,10 +108,10 @@ typedef enum tagWZONETYPE {
//
#pragma pack(1)
typedef struct tagSCmdRequest {
- U8 name[16];
+ u8 name[16];
void *data;
- U16 wResult;
- U16 wCmdCode;
+ u16 wResult;
+ u16 wCmdCode;
} SCmdRequest, *PSCmdRequest;
//
@@ -121,7 +120,7 @@ typedef struct tagSCmdRequest {
typedef struct tagSCmdScan {
- U8 ssid[SSID_MAXLEN + 2];
+ u8 ssid[SSID_MAXLEN + 2];
} SCmdScan, *PSCmdScan;
@@ -132,12 +131,12 @@ typedef struct tagSCmdScan {
typedef struct tagSCmdBSSJoin {
- U16 wBSSType;
- U16 wBBPType;
- U8 ssid[SSID_MAXLEN + 2];
- U32 uChannel;
- BOOL bPSEnable;
- BOOL bShareKeyAuth;
+ u16 wBSSType;
+ u16 wBBPType;
+ u8 ssid[SSID_MAXLEN + 2];
+ u32 uChannel;
+ bool bPSEnable;
+ bool bShareKeyAuth;
} SCmdBSSJoin, *PSCmdBSSJoin;
@@ -147,7 +146,7 @@ typedef struct tagSCmdBSSJoin {
typedef struct tagSCmdZoneTypeSet {
- BOOL bWrite;
+ bool bWrite;
WZONETYPE ZoneType;
} SCmdZoneTypeSet, *PSCmdZoneTypeSet;
@@ -155,33 +154,33 @@ typedef struct tagSCmdZoneTypeSet {
#ifdef WPA_SM_Transtatus
typedef struct tagSWPAResult {
char ifname[100];
- U8 proto;
- U8 key_mgmt;
- U8 eap_type;
- BOOL authenticated;
+ u8 proto;
+ u8 key_mgmt;
+ u8 eap_type;
+ bool authenticated;
} SWPAResult, *PSWPAResult;
#endif
typedef struct tagSCmdStartAP {
- U16 wBSSType;
- U16 wBBPType;
- U8 ssid[SSID_MAXLEN + 2];
- U32 uChannel;
- U32 uBeaconInt;
- BOOL bShareKeyAuth;
- U8 byBasicRate;
+ u16 wBSSType;
+ u16 wBBPType;
+ u8 ssid[SSID_MAXLEN + 2];
+ u32 uChannel;
+ u32 uBeaconInt;
+ bool bShareKeyAuth;
+ u8 byBasicRate;
} SCmdStartAP, *PSCmdStartAP;
typedef struct tagSCmdSetWEP {
- BOOL bEnableWep;
- U8 byKeyIndex;
- U8 abyWepKey[WEP_NKEYS][WEP_KEYMAXLEN];
- BOOL bWepKeyAvailable[WEP_NKEYS];
- U32 auWepKeyLength[WEP_NKEYS];
+ bool bEnableWep;
+ u8 byKeyIndex;
+ u8 abyWepKey[WEP_NKEYS][WEP_KEYMAXLEN];
+ bool bWepKeyAvailable[WEP_NKEYS];
+ u32 auWepKeyLength[WEP_NKEYS];
} SCmdSetWEP, *PSCmdSetWEP;
@@ -189,39 +188,39 @@ typedef struct tagSCmdSetWEP {
typedef struct tagSBSSIDItem {
- U32 uChannel;
- U8 abyBSSID[BSSID_LEN];
- U8 abySSID[SSID_MAXLEN + 1];
+ u32 uChannel;
+ u8 abyBSSID[BSSID_LEN];
+ u8 abySSID[SSID_MAXLEN + 1];
//2006-1116-01,<Modify> by NomadZhao
- //U16 wBeaconInterval;
- //U16 wCapInfo;
- //U8 byNetType;
- U8 byNetType;
- U16 wBeaconInterval;
- U16 wCapInfo; // for address of byNetType at align 4
+ //u16 wBeaconInterval;
+ //u16 wCapInfo;
+ //u8 byNetType;
+ u8 byNetType;
+ u16 wBeaconInterval;
+ u16 wCapInfo; // for address of byNetType at align 4
- BOOL bWEPOn;
- U32 uRSSI;
+ bool bWEPOn;
+ u32 uRSSI;
} SBSSIDItem;
typedef struct tagSBSSIDList {
- U32 uItem;
+ u32 uItem;
SBSSIDItem sBSSIDList[0];
} SBSSIDList, *PSBSSIDList;
typedef struct tagSCmdLinkStatus {
- BOOL bLink;
- U16 wBSSType;
- U8 byState;
- U8 abyBSSID[BSSID_LEN];
- U8 abySSID[SSID_MAXLEN + 2];
- U32 uChannel;
- U32 uLinkRate;
+ bool bLink;
+ u16 wBSSType;
+ u8 byState;
+ u8 abyBSSID[BSSID_LEN];
+ u8 abySSID[SSID_MAXLEN + 2];
+ u32 uChannel;
+ u32 uLinkRate;
} SCmdLinkStatus, *PSCmdLinkStatus;
@@ -229,18 +228,18 @@ typedef struct tagSCmdLinkStatus {
// 802.11 counter
//
typedef struct tagSDot11MIBCount {
- U32 TransmittedFragmentCount;
- U32 MulticastTransmittedFrameCount;
- U32 FailedCount;
- U32 RetryCount;
- U32 MultipleRetryCount;
- U32 RTSSuccessCount;
- U32 RTSFailureCount;
- U32 ACKFailureCount;
- U32 FrameDuplicateCount;
- U32 ReceivedFragmentCount;
- U32 MulticastReceivedFrameCount;
- U32 FCSErrorCount;
+ u32 TransmittedFragmentCount;
+ u32 MulticastTransmittedFrameCount;
+ u32 FailedCount;
+ u32 RetryCount;
+ u32 MultipleRetryCount;
+ u32 RTSSuccessCount;
+ u32 RTSFailureCount;
+ u32 ACKFailureCount;
+ u32 FrameDuplicateCount;
+ u32 ReceivedFragmentCount;
+ u32 MulticastReceivedFrameCount;
+ u32 FCSErrorCount;
} SDot11MIBCount, *PSDot11MIBCount;
@@ -252,129 +251,129 @@ typedef struct tagSStatMIBCount {
//
// ISR status count
//
- U32 dwIsrTx0OK;
- U32 dwIsrTx1OK;
- U32 dwIsrBeaconTxOK;
- U32 dwIsrRxOK;
- U32 dwIsrTBTTInt;
- U32 dwIsrSTIMERInt;
- U32 dwIsrUnrecoverableError;
- U32 dwIsrSoftInterrupt;
- U32 dwIsrRxNoBuf;
+ u32 dwIsrTx0OK;
+ u32 dwIsrTx1OK;
+ u32 dwIsrBeaconTxOK;
+ u32 dwIsrRxOK;
+ u32 dwIsrTBTTInt;
+ u32 dwIsrSTIMERInt;
+ u32 dwIsrUnrecoverableError;
+ u32 dwIsrSoftInterrupt;
+ u32 dwIsrRxNoBuf;
/////////////////////////////////////
- U32 dwIsrUnknown; // unknown interrupt count
+ u32 dwIsrUnknown; // unknown interrupt count
// RSR status count
//
- U32 dwRsrFrmAlgnErr;
- U32 dwRsrErr;
- U32 dwRsrCRCErr;
- U32 dwRsrCRCOk;
- U32 dwRsrBSSIDOk;
- U32 dwRsrADDROk;
- U32 dwRsrICVOk;
- U32 dwNewRsrShortPreamble;
- U32 dwRsrLong;
- U32 dwRsrRunt;
-
- U32 dwRsrRxControl;
- U32 dwRsrRxData;
- U32 dwRsrRxManage;
-
- U32 dwRsrRxPacket;
- U32 dwRsrRxOctet;
- U32 dwRsrBroadcast;
- U32 dwRsrMulticast;
- U32 dwRsrDirected;
+ u32 dwRsrFrmAlgnErr;
+ u32 dwRsrErr;
+ u32 dwRsrCRCErr;
+ u32 dwRsrCRCOk;
+ u32 dwRsrBSSIDOk;
+ u32 dwRsrADDROk;
+ u32 dwRsrICVOk;
+ u32 dwNewRsrShortPreamble;
+ u32 dwRsrLong;
+ u32 dwRsrRunt;
+
+ u32 dwRsrRxControl;
+ u32 dwRsrRxData;
+ u32 dwRsrRxManage;
+
+ u32 dwRsrRxPacket;
+ u32 dwRsrRxOctet;
+ u32 dwRsrBroadcast;
+ u32 dwRsrMulticast;
+ u32 dwRsrDirected;
// 64-bit OID
- U32 ullRsrOK;
+ u32 ullRsrOK;
// for some optional OIDs (64 bits) and DMI support
- U32 ullRxBroadcastBytes;
- U32 ullRxMulticastBytes;
- U32 ullRxDirectedBytes;
- U32 ullRxBroadcastFrames;
- U32 ullRxMulticastFrames;
- U32 ullRxDirectedFrames;
-
- U32 dwRsrRxFragment;
- U32 dwRsrRxFrmLen64;
- U32 dwRsrRxFrmLen65_127;
- U32 dwRsrRxFrmLen128_255;
- U32 dwRsrRxFrmLen256_511;
- U32 dwRsrRxFrmLen512_1023;
- U32 dwRsrRxFrmLen1024_1518;
+ u32 ullRxBroadcastBytes;
+ u32 ullRxMulticastBytes;
+ u32 ullRxDirectedBytes;
+ u32 ullRxBroadcastFrames;
+ u32 ullRxMulticastFrames;
+ u32 ullRxDirectedFrames;
+
+ u32 dwRsrRxFragment;
+ u32 dwRsrRxFrmLen64;
+ u32 dwRsrRxFrmLen65_127;
+ u32 dwRsrRxFrmLen128_255;
+ u32 dwRsrRxFrmLen256_511;
+ u32 dwRsrRxFrmLen512_1023;
+ u32 dwRsrRxFrmLen1024_1518;
// TSR0,1 status count
//
- U32 dwTsrTotalRetry[2]; // total collision retry count
- U32 dwTsrOnceRetry[2]; // this packet only occur one collision
- U32 dwTsrMoreThanOnceRetry[2]; // this packet occur more than one collision
- U32 dwTsrRetry[2]; // this packet has ever occur collision,
+ u32 dwTsrTotalRetry[2]; // total collision retry count
+ u32 dwTsrOnceRetry[2]; // this packet only occur one collision
+ u32 dwTsrMoreThanOnceRetry[2]; // this packet occur more than one collision
+ u32 dwTsrRetry[2]; // this packet has ever occur collision,
// that is (dwTsrOnceCollision0 + dwTsrMoreThanOnceCollision0)
- U32 dwTsrACKData[2];
- U32 dwTsrErr[2];
- U32 dwAllTsrOK[2];
- U32 dwTsrRetryTimeout[2];
- U32 dwTsrTransmitTimeout[2];
-
- U32 dwTsrTxPacket[2];
- U32 dwTsrTxOctet[2];
- U32 dwTsrBroadcast[2];
- U32 dwTsrMulticast[2];
- U32 dwTsrDirected[2];
+ u32 dwTsrACKData[2];
+ u32 dwTsrErr[2];
+ u32 dwAllTsrOK[2];
+ u32 dwTsrRetryTimeout[2];
+ u32 dwTsrTransmitTimeout[2];
+
+ u32 dwTsrTxPacket[2];
+ u32 dwTsrTxOctet[2];
+ u32 dwTsrBroadcast[2];
+ u32 dwTsrMulticast[2];
+ u32 dwTsrDirected[2];
// RD/TD count
- U32 dwCntRxFrmLength;
- U32 dwCntTxBufLength;
+ u32 dwCntRxFrmLength;
+ u32 dwCntTxBufLength;
- U8 abyCntRxPattern[16];
- U8 abyCntTxPattern[16];
+ u8 abyCntRxPattern[16];
+ u8 abyCntTxPattern[16];
// Software check....
- U32 dwCntRxDataErr; // rx buffer data software compare CRC err count
- U32 dwCntDecryptErr; // rx buffer data software compare CRC err count
- U32 dwCntRxICVErr; // rx buffer data software compare CRC err count
- U32 idxRxErrorDesc; // index for rx data error RD
+ u32 dwCntRxDataErr; // rx buffer data software compare CRC err count
+ u32 dwCntDecryptErr; // rx buffer data software compare CRC err count
+ u32 dwCntRxICVErr; // rx buffer data software compare CRC err count
+ u32 idxRxErrorDesc; // index for rx data error RD
// 64-bit OID
- U32 ullTsrOK[2];
+ u32 ullTsrOK[2];
// for some optional OIDs (64 bits) and DMI support
- U32 ullTxBroadcastFrames[2];
- U32 ullTxMulticastFrames[2];
- U32 ullTxDirectedFrames[2];
- U32 ullTxBroadcastBytes[2];
- U32 ullTxMulticastBytes[2];
- U32 ullTxDirectedBytes[2];
+ u32 ullTxBroadcastFrames[2];
+ u32 ullTxMulticastFrames[2];
+ u32 ullTxDirectedFrames[2];
+ u32 ullTxBroadcastBytes[2];
+ u32 ullTxMulticastBytes[2];
+ u32 ullTxDirectedBytes[2];
} SStatMIBCount, *PSStatMIBCount;
typedef struct tagSNodeItem {
// STA info
- U16 wAID;
- U8 abyMACAddr[6];
- U16 wTxDataRate;
- U16 wInActiveCount;
- U16 wEnQueueCnt;
- U16 wFlags;
- BOOL bPWBitOn;
- U8 byKeyIndex;
- U16 wWepKeyLength;
- U8 abyWepKey[WEP_KEYMAXLEN];
+ u16 wAID;
+ u8 abyMACAddr[6];
+ u16 wTxDataRate;
+ u16 wInActiveCount;
+ u16 wEnQueueCnt;
+ u16 wFlags;
+ bool bPWBitOn;
+ u8 byKeyIndex;
+ u16 wWepKeyLength;
+ u8 abyWepKey[WEP_KEYMAXLEN];
// Auto rate fallback vars
- BOOL bIsInFallback;
- U32 uTxFailures;
- U32 uTxAttempts;
- U16 wFailureRatio;
+ bool bIsInFallback;
+ u32 uTxFailures;
+ u32 uTxAttempts;
+ u16 wFailureRatio;
} SNodeItem;
typedef struct tagSNodeList {
- U32 uItem;
+ u32 uItem;
SNodeItem sNodeList[0];
} SNodeList, *PSNodeList;
@@ -383,7 +382,7 @@ typedef struct tagSNodeList {
typedef struct tagSCmdValue {
- U32 dwValue;
+ u32 dwValue;
} SCmdValue, *PSCmdValue;
@@ -418,46 +417,46 @@ enum {
struct viawget_hostapd_param {
- U32 cmd;
- U8 sta_addr[6];
+ u32 cmd;
+ u8 sta_addr[6];
union {
struct {
- U16 aid;
- U16 capability;
- U8 tx_supp_rates;
+ u16 aid;
+ u16 capability;
+ u8 tx_supp_rates;
} add_sta;
struct {
- U32 inactive_sec;
+ u32 inactive_sec;
} get_info_sta;
struct {
- U8 alg;
- U32 flags;
- U32 err;
- U8 idx;
- U8 seq[8];
- U16 key_len;
- U8 key[0];
+ u8 alg;
+ u32 flags;
+ u32 err;
+ u8 idx;
+ u8 seq[8];
+ u16 key_len;
+ u8 key[0];
} crypt;
struct {
- U32 flags_and;
- U32 flags_or;
+ u32 flags_and;
+ u32 flags_or;
} set_flags_sta;
struct {
- U16 rid;
- U16 len;
- U8 data[0];
+ u16 rid;
+ u16 len;
+ u8 data[0];
} rid;
struct {
- U8 len;
- U8 data[0];
+ u8 len;
+ u8 data[0];
} generic_elem;
struct {
- U16 cmd;
- U16 reason_code;
+ u16 cmd;
+ u16 reason_code;
} mlme;
struct {
- U8 ssid_len;
- U8 ssid[32];
+ u8 ssid_len;
+ u8 ssid[32];
} scan_req;
} u;
};
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index 404287c6025..5624a41e3d5 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -70,16 +70,16 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
SNodeList sNodeList;
PSBSSIDList pList;
PSNodeList pNodeList;
- UINT cbListCount;
+ unsigned int cbListCount;
PKnownBSS pBSS;
PKnownNodeDB pNode;
- UINT ii, jj;
+ unsigned int ii, jj;
SCmdLinkStatus sLinkStatus;
- BYTE abySuppRates[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
- BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- DWORD dwKeyIndex= 0;
- BYTE abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- LONG ldBm;
+ unsigned char abySuppRates[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
+ unsigned char abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ unsigned long dwKeyIndex= 0;
+ unsigned char abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ long ldBm;
pReq->wResult = 0;
@@ -99,17 +99,17 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
memcpy(abyScanSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
}
- if (pDevice->bMACSuspend == TRUE) {
- if (pDevice->bRadioOff == TRUE)
+ if (pDevice->bMACSuspend == true) {
+ if (pDevice->bRadioOff == true)
CARDbRadioPowerOn(pDevice);
vMgrTimerInit(pDevice);
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
add_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bMACSuspend = FALSE;
+ pDevice->bMACSuspend = false;
}
spin_lock_irq(&pDevice->lock);
if (memcmp(pMgmt->abyCurrBSSID, &abyNullAddr[0], 6) == 0)
- BSSvClearBSSList((void *)pDevice, FALSE);
+ BSSvClearBSSList((void *)pDevice, false);
else
BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
@@ -130,7 +130,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
break;
};
- if(sZoneTypeCmd.bWrite==TRUE) {
+ if(sZoneTypeCmd.bWrite==true) {
//////write zonetype
if(sZoneTypeCmd.ZoneType == ZoneType_USA) {
//set to USA
@@ -147,7 +147,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
}
else {
///////read zonetype
- BYTE zonetype=0;
+ unsigned char zonetype=0;
if(zonetype == 0x00) { //USA
@@ -174,13 +174,13 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
case WLAN_CMD_BSS_JOIN:
- if (pDevice->bMACSuspend == TRUE) {
- if (pDevice->bRadioOff == TRUE)
+ if (pDevice->bMACSuspend == true) {
+ if (pDevice->bRadioOff == true)
CARDbRadioPowerOn(pDevice);
vMgrTimerInit(pDevice);
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
add_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bMACSuspend = FALSE;
+ pDevice->bMACSuspend = false;
}
if (copy_from_user(&sJoinCmd, pReq->data, sizeof(SCmdBSSJoin))) {
@@ -199,7 +199,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ioct set to STA mode\n");
}
- if (sJoinCmd.bPSEnable == TRUE) {
+ if (sJoinCmd.bPSEnable == true) {
pDevice->ePSMode = WMAC_POWER_FAST;
// pDevice->ePSMode = WMAC_POWER_MAX;
pMgmt->wListenInterval = 2;
@@ -211,12 +211,12 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Power Saving Off \n");
}
- if (sJoinCmd.bShareKeyAuth == TRUE){
- pMgmt->bShareKeyAlgorithm = TRUE;
+ if (sJoinCmd.bShareKeyAuth == true){
+ pMgmt->bShareKeyAlgorithm = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Share Key \n");
}
else {
- pMgmt->bShareKeyAlgorithm = FALSE;
+ pMgmt->bShareKeyAlgorithm = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Open System \n");
}
pDevice->uChannel = sJoinCmd.uChannel;
@@ -235,8 +235,8 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
result = -EFAULT;
break;
};
- if (sWEPCmd.bEnableWep != TRUE) {
- pDevice->bEncryptionEnable = FALSE;
+ if (sWEPCmd.bEnableWep != true) {
+ pDevice->bEncryptionEnable = false;
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
MACvDisableDefaultKey(pDevice->PortOffset);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WEP function disable. \n");
@@ -257,15 +257,15 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
dwKeyIndex,
sWEPCmd.auWepKeyLength[ii],
NULL,
- (PBYTE)&sWEPCmd.abyWepKey[ii][0],
+ (unsigned char *)&sWEPCmd.abyWepKey[ii][0],
KEY_CTL_WEP,
pDevice->PortOffset,
pDevice->byLocalID);
}
}
pDevice->byKeyIndex = sWEPCmd.byKeyIndex;
- pDevice->bTransmitKey = TRUE;
- pDevice->bEncryptionEnable = TRUE;
+ pDevice->bTransmitKey = true;
+ pDevice->bEncryptionEnable = true;
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
break;
@@ -286,8 +286,8 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
sLinkStatus.byState = ADHOC_STARTED;
sLinkStatus.uChannel = pMgmt->uCurrChannel;
- if (pDevice->bLinkPass == TRUE) {
- sLinkStatus.bLink = TRUE;
+ if (pDevice->bLinkPass == true) {
+ sLinkStatus.bLink = true;
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
memcpy(sLinkStatus.abySSID, pItemSSID->abySSID, pItemSSID->len);
memcpy(sLinkStatus.abyBSSID, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
@@ -295,7 +295,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Link Success ! \n");
}
else {
- sLinkStatus.bLink = FALSE;
+ sLinkStatus.bLink = false;
}
if (copy_to_user(pReq->data, &sLinkStatus, sizeof(SCmdLinkStatus))) {
result = -EFAULT;
@@ -340,8 +340,8 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
pList->sBSSIDList[ii].wBeaconInterval = pBSS->wBeaconInterval;
pList->sBSSIDList[ii].wCapInfo = pBSS->wCapInfo;
// pList->sBSSIDList[ii].uRSSI = pBSS->uRSSI;
- RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
- pList->sBSSIDList[ii].uRSSI = (UINT)ldBm;
+ RFvRSSITodBm(pDevice, (unsigned char)(pBSS->uRSSI), &ldBm);
+ pList->sBSSIDList[ii].uRSSI = (unsigned int)ldBm;
memcpy(pList->sBSSIDList[ii].abyBSSID, pBSS->abyBSSID, WLAN_BSSID_LEN);
pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
memset(pList->sBSSIDList[ii].abySSID, 0, WLAN_SSID_MAXLEN + 1);
@@ -353,10 +353,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
pList->sBSSIDList[ii].byNetType = ADHOC;
}
if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo)) {
- pList->sBSSIDList[ii].bWEPOn = TRUE;
+ pList->sBSSIDList[ii].bWEPOn = true;
}
else {
- pList->sBSSIDList[ii].bWEPOn = FALSE;
+ pList->sBSSIDList[ii].bWEPOn = false;
}
ii ++;
if (ii >= pList->uItem)
@@ -391,16 +391,16 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
- if (pDevice->bRadioOff == FALSE) {
+ if (pDevice->bRadioOff == false) {
CARDbRadioPowerOff(pDevice);
}
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
del_timer(&pDevice->sTimerCommand);
del_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bCmdRunning = FALSE;
- pDevice->bMACSuspend = TRUE;
+ pDevice->bCmdRunning = false;
+ pDevice->bMACSuspend = true;
MACvIntDisable(pDevice->PortOffset);
spin_unlock_irq(&pDevice->lock);
@@ -410,13 +410,13 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_START_MAC\n");
- if (pDevice->bMACSuspend == TRUE) {
- if (pDevice->bRadioOff == TRUE)
+ if (pDevice->bMACSuspend == true) {
+ if (pDevice->bRadioOff == true)
CARDbRadioPowerOn(pDevice);
vMgrTimerInit(pDevice);
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
add_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bMACSuspend = FALSE;
+ pDevice->bMACSuspend = false;
}
break;
@@ -458,11 +458,11 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
};
if (sValue.dwValue == 1) {
- pDevice->bEnable8021x = TRUE;
+ pDevice->bEnable8021x = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable 802.1x\n");
}
else {
- pDevice->bEnable8021x = FALSE;
+ pDevice->bEnable8021x = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable 802.1x\n");
}
@@ -478,11 +478,11 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
};
if (sValue.dwValue == 1) {
- pDevice->bEnableHostWEP = TRUE;
+ pDevice->bEnableHostWEP = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable HostWEP\n");
}
else {
- pDevice->bEnableHostWEP = FALSE;
+ pDevice->bEnableHostWEP = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable HostWEP\n");
}
@@ -498,11 +498,11 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
if (sValue.dwValue == 1) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n");
memcpy(pDevice->wpadev->dev_addr, pDevice->dev->dev_addr, ETH_ALEN);
- pDevice->bWPADEVUp = TRUE;
+ pDevice->bWPADEVUp = true;
}
else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "close wpadev\n");
- pDevice->bWPADEVUp = FALSE;
+ pDevice->bWPADEVUp = false;
}
break;
@@ -510,7 +510,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
case WLAN_CMD_AP_START:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_AP_START\n");
- if (pDevice->bRadioOff == TRUE) {
+ if (pDevice->bRadioOff == true) {
CARDbRadioPowerOn(pDevice);
vMgrTimerInit(pDevice);
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
@@ -554,12 +554,12 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
else
pMgmt->wIBSSBeaconPeriod = 100;
- if (sStartAPCmd.bShareKeyAuth == TRUE){
- pMgmt->bShareKeyAlgorithm = TRUE;
+ if (sStartAPCmd.bShareKeyAuth == true){
+ pMgmt->bShareKeyAlgorithm = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Share Key \n");
}
else {
- pMgmt->bShareKeyAlgorithm = FALSE;
+ pMgmt->bShareKeyAlgorithm = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Open System \n");
}
memcpy(pMgmt->abyIBSSSuppRates, abySuppRates, 6);
@@ -635,9 +635,9 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
pNodeList->sNodeList[jj].wAID = pNode->wAID;
memcpy(pNodeList->sNodeList[jj].abyMACAddr, pNode->abyMACAddr, WLAN_ADDR_LEN);
pNodeList->sNodeList[jj].wTxDataRate = pNode->wTxDataRate;
- pNodeList->sNodeList[jj].wInActiveCount = (WORD)pNode->uInActiveCount;
- pNodeList->sNodeList[jj].wEnQueueCnt = (WORD)pNode->wEnQueueCnt;
- pNodeList->sNodeList[jj].wFlags = (WORD)pNode->dwFlags;
+ pNodeList->sNodeList[jj].wInActiveCount = (unsigned short)pNode->uInActiveCount;
+ pNodeList->sNodeList[jj].wEnQueueCnt = (unsigned short)pNode->wEnQueueCnt;
+ pNodeList->sNodeList[jj].wFlags = (unsigned short)pNode->dwFlags;
pNodeList->sNodeList[jj].bPWBitOn = pNode->bPSEnable;
pNodeList->sNodeList[jj].byKeyIndex = pNode->byKeyIndex;
pNodeList->sNodeList[jj].wWepKeyLength = pNode->uWepKeyLength;
@@ -652,7 +652,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
pNodeList->sNodeList[jj].bIsInFallback = pNode->bIsInFallback;
pNodeList->sNodeList[jj].uTxFailures = pNode->uTxFailures;
pNodeList->sNodeList[jj].uTxAttempts = pNode->uTxAttempts;
- pNodeList->sNodeList[jj].wFailureRatio = (WORD)pNode->uFailureRatio;
+ pNodeList->sNodeList[jj].wFailureRatio = (unsigned short)pNode->uFailureRatio;
jj ++;
if (jj >= pNodeList->uItem)
break;
@@ -672,14 +672,14 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
wpa_Result.proto = 0;
wpa_Result.key_mgmt = 0;
wpa_Result.eap_type = 0;
- wpa_Result.authenticated = FALSE;
- pDevice->fWPA_Authened = FALSE;
+ wpa_Result.authenticated = false;
+ pDevice->fWPA_Authened = false;
if (copy_from_user(&wpa_Result, pReq->data, sizeof(wpa_Result))) {
result = -EFAULT;
break;
}
-if(wpa_Result.authenticated==TRUE) {
+if(wpa_Result.authenticated==true) {
#ifdef SndEvt_ToAPI
{
union iwreq_data wrqu;
@@ -692,7 +692,7 @@ if(wpa_Result.authenticated==TRUE) {
wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, pItemSSID->abySSID);
}
#endif
- pDevice->fWPA_Authened = TRUE; //is successful peer to wpa_Result.authenticated?
+ pDevice->fWPA_Authened = true; //is successful peer to wpa_Result.authenticated?
}
//printk("get private wpa_supplicant announce WPA SM\n");
@@ -700,7 +700,7 @@ if(wpa_Result.authenticated==TRUE) {
//printk("wpa-->proto=%d\n",wpa_Result.proto);
//printk("wpa-->key-mgmt=%d\n",wpa_Result.key_mgmt);
//printk("wpa-->eap_type=%d\n",wpa_Result.eap_type);
- //printk("wpa-->authenticated is %s\n",(wpa_Result.authenticated==TRUE)?"TRUE":"FALSE");
+ //printk("wpa-->authenticated is %s\n",(wpa_Result.authenticated==true)?"true":"false");
pReq->wResult = 0;
break;
@@ -717,9 +717,9 @@ if(wpa_Result.authenticated==TRUE) {
void
vConfigWEPKey (
PSDevice pDevice,
- DWORD dwKeyIndex,
- PBYTE pbyKey,
- ULONG uKeyLength
+ unsigned long dwKeyIndex,
+ unsigned char *pbyKey,
+ unsigned long uKeyLength
)
{
int ii;
@@ -728,15 +728,15 @@ vConfigWEPKey (
memset(&pDevice->abyWepKey[dwKeyIndex][0], 0, WLAN_WEPMAX_KEYLEN);
memcpy(&pDevice->abyWepKey[dwKeyIndex][0], pbyKey, uKeyLength);
- pDevice->bWepKeyAvailable[dwKeyIndex] = TRUE;
+ pDevice->bWepKeyAvailable[dwKeyIndex] = true;
pDevice->auWepKeyLength[dwKeyIndex] = uKeyLength;
MACvSetDefaultKeyEntry(pDevice->PortOffset, uKeyLength, dwKeyIndex,
- (PDWORD) &(pDevice->abyWepKey[dwKeyIndex][0]), pDevice->byLocalID);
+ (unsigned long *) &(pDevice->abyWepKey[dwKeyIndex][0]), pDevice->byLocalID);
if (pDevice->eEncryptionStatus < Ndis802_11EncryptionNotSupported) {
for(ii=0; ii<MAX_GROUP_KEY; ii++) {
- if ((pDevice->bWepKeyAvailable[ii] == TRUE) &&
+ if ((pDevice->bWepKeyAvailable[ii] == true) &&
(pDevice->auWepKeyLength[ii] == WLAN_WEP232_KEYLEN)) {
pDevice->uCurrentWEPMode = TX_WEP_SW232;
MACvDisableDefaultKey(pDevice->PortOffset);
diff --git a/drivers/staging/vt6655/ioctl.h b/drivers/staging/vt6655/ioctl.h
index 0d10c2a923c..ba85015c11b 100644
--- a/drivers/staging/vt6655/ioctl.h
+++ b/drivers/staging/vt6655/ioctl.h
@@ -45,9 +45,9 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq);
/*
void vConfigWEPKey (
PSDevice pDevice,
- DWORD dwKeyIndex,
- PBYTE pbyKey,
- ULONG uKeyLength
+ unsigned long dwKeyIndex,
+ unsigned char *pbyKey,
+ unsigned long uKeyLength
);
*/
diff --git a/drivers/staging/vt6655/iwctl.c b/drivers/staging/vt6655/iwctl.c
index cf69034fc0f..43227617aab 100644
--- a/drivers/staging/vt6655/iwctl.c
+++ b/drivers/staging/vt6655/iwctl.c
@@ -45,7 +45,7 @@
#endif
#include <net/iw_handler.h>
-extern WORD TxRate_iwconfig;//2008-5-8 <add> by chester
+extern unsigned short TxRate_iwconfig;//2008-5-8 <add> by chester
/*--------------------- Static Definitions -------------------------*/
@@ -99,16 +99,16 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
else
pDevice->scStatistic.LinkQuality = (96-pDevice->byCurrSQ)*100/76;
}
- if(pDevice->bLinkPass !=TRUE)
+ if(pDevice->bLinkPass !=true)
pDevice->scStatistic.LinkQuality = 0;
#endif
if(pDevice->scStatistic.LinkQuality > 100)
pDevice->scStatistic.LinkQuality = 100;
- pDevice->wstats.qual.qual =(BYTE) pDevice->scStatistic.LinkQuality;
+ pDevice->wstats.qual.qual =(unsigned char) pDevice->scStatistic.LinkQuality;
#else
pDevice->wstats.qual.qual = pDevice->byCurrSQ;
#endif
- RFvRSSITodBm(pDevice, (BYTE)(pDevice->uCurrRSSI), &ldBm);
+ RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm);
pDevice->wstats.qual.level = ldBm;
//pDevice->wstats.qual.level = 0x100 - pDevice->uCurrRSSI;
pDevice->wstats.qual.noise = 0;
@@ -116,7 +116,7 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
pDevice->wstats.discard.nwid = 0;
pDevice->wstats.discard.code = 0;
pDevice->wstats.discard.fragment = 0;
- pDevice->wstats.discard.retries = (U32)pDevice->scStatistic.dwTsrErr;
+ pDevice->wstats.discard.retries = (unsigned long)pDevice->scStatistic.dwTsrErr;
pDevice->wstats.discard.misc = 0;
pDevice->wstats.miss.beacon = 0;
@@ -175,7 +175,7 @@ int iwctl_siwscan(struct net_device *dev,
PSDevice pDevice = (PSDevice)netdev_priv(dev);
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
struct iw_scan_req *req = (struct iw_scan_req *)extra;
- BYTE abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ unsigned char abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
PWLAN_IE_SSID pItemSSID=NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWSCAN \n");
@@ -309,7 +309,7 @@ int iwctl_giwscan(struct net_device *dev,
//ADD quality
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVQUAL;
- RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
+ RFvRSSITodBm(pDevice, (unsigned char)(pBSS->uRSSI), &ldBm);
iwe.u.qual.level = ldBm;
iwe.u.qual.noise = 0;
//2008-0409-01, <Add> by Einsn Liu
@@ -426,7 +426,7 @@ int iwctl_siwfreq(struct net_device *dev,
pDevice->uChannel = channel;
//2007-0207-04,<Add> by EinsnLiu
//Make change effect at once
- pDevice->bCommit = TRUE;
+ pDevice->bCommit = true;
}
}
@@ -489,7 +489,7 @@ int iwctl_siwmode(struct net_device *dev,
if (pMgmt->eConfigMode != WMAC_CONFIG_IBSS_STA) {
pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
if (pDevice->flags & DEVICE_FLAGS_OPENED) {
- pDevice->bCommit = TRUE;
+ pDevice->bCommit = true;
}
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to ad-hoc \n");
@@ -499,7 +499,7 @@ int iwctl_siwmode(struct net_device *dev,
if (pMgmt->eConfigMode != WMAC_CONFIG_ESS_STA) {
pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
if (pDevice->flags & DEVICE_FLAGS_OPENED) {
- pDevice->bCommit = TRUE;
+ pDevice->bCommit = true;
}
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to infrastructure \n");
@@ -513,7 +513,7 @@ int iwctl_siwmode(struct net_device *dev,
if (pMgmt->eConfigMode != WMAC_CONFIG_AP) {
pMgmt->eConfigMode = WMAC_CONFIG_AP;
if (pDevice->flags & DEVICE_FLAGS_OPENED) {
- pDevice->bCommit = TRUE;
+ pDevice->bCommit = true;
}
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to Access Point \n");
@@ -577,7 +577,7 @@ int iwctl_giwrange(struct net_device *dev,
{
struct iw_range *range = (struct iw_range *) extra;
int i,k;
- BYTE abySupportedRates[13]= {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90};
+ unsigned char abySupportedRates[13]= {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90};
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRANGE \n");
@@ -688,7 +688,7 @@ int iwctl_siwap(struct net_device *dev,
PSDevice pDevice = (PSDevice)netdev_priv(dev);
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
int rc = 0;
- BYTE ZeroBSSID[WLAN_BSSID_LEN]={0x00,0x00,0x00,0x00,0x00,0x00};
+ unsigned char ZeroBSSID[WLAN_BSSID_LEN]={0x00,0x00,0x00,0x00,0x00,0x00};
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWAP \n");
if (pMgmt->eScanState == WMAC_IS_SCANNING) {
@@ -701,12 +701,12 @@ if (pMgmt->eScanState == WMAC_IS_SCANNING) {
else {
memcpy(pMgmt->abyDesireBSSID, wrq->sa_data, 6);
//2008-0409-05, <Add> by Einsn Liu
- if((pDevice->bLinkPass == TRUE) &&
+ if((pDevice->bLinkPass == true) &&
(memcmp(pMgmt->abyDesireBSSID, pMgmt->abyCurrBSSID, 6)== 0)){
return rc;
}
//mike :add
- if ((IS_BROADCAST_ADDRESS(pMgmt->abyDesireBSSID)) ||
+ if ((is_broadcast_ether_addr(pMgmt->abyDesireBSSID)) ||
(memcmp(pMgmt->abyDesireBSSID, ZeroBSSID, 6) == 0)){
PRINT_K("SIOCSIWAP:invalid desired BSSID return!\n");
return rc;
@@ -714,10 +714,10 @@ if (pMgmt->eScanState == WMAC_IS_SCANNING) {
//mike add: if desired AP is hidden ssid(there are two same BSSID in list),
// then ignore,because you don't known which one to be connect with??
{
- UINT ii , uSameBssidNum=0;
+ unsigned int ii , uSameBssidNum=0;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (pMgmt->sBSSList[ii].bActive &&
- IS_ETH_ADDRESS_EQUAL(pMgmt->sBSSList[ii].abyBSSID,pMgmt->abyDesireBSSID)) {
+ !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID, pMgmt->abyDesireBSSID)) {
uSameBssidNum++;
}
}
@@ -728,7 +728,7 @@ if (pMgmt->eScanState == WMAC_IS_SCANNING) {
}
if (pDevice->flags & DEVICE_FLAGS_OPENED) {
- pDevice->bCommit = TRUE;
+ pDevice->bCommit = true;
}
}
return rc;
@@ -751,7 +751,7 @@ int iwctl_giwap(struct net_device *dev,
memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6);
//2008-0410,<Modify> by Einsn Liu
- if ((pDevice->bLinkPass == FALSE) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP))
+ if ((pDevice->bLinkPass == false) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP))
memset(wrq->sa_data, 0, 6);
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
@@ -830,11 +830,11 @@ int iwctl_siwessid(struct net_device *dev,
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
PWLAN_IE_SSID pItemSSID;
//2008-0409-05, <Add> by Einsn Liu
- BYTE len;
+ unsigned char len;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWESSID \n");
- pDevice->fWPA_Authened = FALSE;
+ pDevice->fWPA_Authened = false;
if (pMgmt->eScanState == WMAC_IS_SCANNING) {
// In scanning..
printk("SIOCSIWESSID(??)-->In scanning...\n");
@@ -848,7 +848,7 @@ if (pMgmt->eScanState == WMAC_IS_SCANNING) {
PRINT_K("set essid to 'any' \n");
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
//Unknown desired AP,so here need not associate??
- //if(pDevice->bWPASuppWextEnabled == TRUE) {
+ //if(pDevice->bWPASuppWextEnabled == true) {
return 0;
// }
#endif
@@ -868,7 +868,7 @@ if (pMgmt->eScanState == WMAC_IS_SCANNING) {
printk("set essid to %s \n",pItemSSID->abySSID);
//2008-0409-05, <Add> by Einsn Liu
len=(pItemSSID->len > ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len)?pItemSSID->len:((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len;
- if((pDevice->bLinkPass == TRUE) &&
+ if((pDevice->bLinkPass == true) &&
(memcmp(pItemSSID->abySSID,((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID,len)==0))
return 0;
@@ -881,12 +881,12 @@ if (pMgmt->eScanState == WMAC_IS_SCANNING) {
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
//Wext wil order another command of siwap to link with desired AP,
//so here need not associate??
- if(pDevice->bWPASuppWextEnabled == TRUE) {
+ if(pDevice->bWPASuppWextEnabled == true) {
/*******search if in hidden ssid mode ****/
{
PKnownBSS pCurr = NULL;
- BYTE abyTmpDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- UINT ii , uSameBssidNum=0;
+ unsigned char abyTmpDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ unsigned int ii , uSameBssidNum=0;
memcpy(abyTmpDesireSSID,pMgmt->abyDesireSSID,sizeof(abyTmpDesireSSID));
pCurr = BSSpSearchBSSList(pDevice,
@@ -906,7 +906,7 @@ if (pMgmt->eScanState == WMAC_IS_SCANNING) {
// by means of judging if there are two same BSSID exist in list ?
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (pMgmt->sBSSList[ii].bActive &&
- IS_ETH_ADDRESS_EQUAL(pMgmt->sBSSList[ii].abyBSSID, pCurr->abyBSSID)) {
+ !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID, pCurr->abyBSSID)) {
uSameBssidNum++;
}
}
@@ -927,7 +927,7 @@ if (pMgmt->eScanState == WMAC_IS_SCANNING) {
}
if (pDevice->flags & DEVICE_FLAGS_OPENED) {
- pDevice->bCommit = TRUE;
+ pDevice->bCommit = true;
}
@@ -981,7 +981,7 @@ int iwctl_siwrate(struct net_device *dev,
int rc = 0;
u8 brate = 0;
int i;
- BYTE abySupportedRates[13]= {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90};
+ unsigned char abySupportedRates[13]= {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90};
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWRATE \n");
@@ -1033,7 +1033,7 @@ int iwctl_siwrate(struct net_device *dev,
// Fixed mode
// One rate, fixed
printk("Rate Fix\n");
- pDevice->bFixRate = TRUE;
+ pDevice->bFixRate = true;
if ((pDevice->byBBType == BB_TYPE_11B)&& (brate > 3)) {
pDevice->uConnectionRate = 3;
}
@@ -1044,7 +1044,7 @@ int iwctl_siwrate(struct net_device *dev,
}
else {
- pDevice->bFixRate = FALSE;
+ pDevice->bFixRate = false;
pDevice->uConnectionRate = 13;
printk("auto rate:connection_rate is 13\n");
}
@@ -1068,11 +1068,11 @@ int iwctl_giwrate(struct net_device *dev,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRATE \n");
{
- BYTE abySupportedRates[13]= {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90};
+ unsigned char abySupportedRates[13]= {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90};
int brate = 0;
//2008-5-8 <modify> by chester
if(pDevice->bLinkPass){
-if(pDevice->bFixRate == TRUE){
+if(pDevice->bFixRate == true){
if (pDevice->uConnectionRate < 13) {
brate = abySupportedRates[pDevice->uConnectionRate];
}else {
@@ -1108,8 +1108,8 @@ else brate =0;
// brate = abySupportedRates[pDevice->wCurrentRate];
wrq->value = brate * 500000;
// If more than one rate, set auto
- if (pDevice->bFixRate == TRUE)
- wrq->fixed = TRUE;
+ if (pDevice->bFixRate == true)
+ wrq->fixed = true;
}
@@ -1294,7 +1294,7 @@ int iwctl_siwencode(struct net_device *dev,
{
PSDevice pDevice = (PSDevice)netdev_priv(dev);
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- DWORD dwKeyIndex = (DWORD)(wrq->flags & IW_ENCODE_INDEX);
+ unsigned long dwKeyIndex = (unsigned long)(wrq->flags & IW_ENCODE_INDEX);
int ii,uu, rc = 0;
int index = (wrq->flags & IW_ENCODE_INDEX);
@@ -1358,7 +1358,7 @@ if((wrq->flags & IW_ENCODE_DISABLED)==0){
if (pDevice->flags & DEVICE_FLAGS_OPENED) {
spin_lock_irq(&pDevice->lock);
KeybSetDefaultKey(&(pDevice->sKey),
- (DWORD)(dwKeyIndex | (1 << 31)),
+ (unsigned long)(dwKeyIndex | (1 << 31)),
wrq->length,
NULL,
pDevice->abyKey,
@@ -1368,38 +1368,38 @@ if((wrq->flags & IW_ENCODE_DISABLED)==0){
);
spin_unlock_irq(&pDevice->lock);
}
- pDevice->byKeyIndex = (BYTE)dwKeyIndex;
+ pDevice->byKeyIndex = (unsigned char)dwKeyIndex;
pDevice->uKeyLength = wrq->length;
- pDevice->bTransmitKey = TRUE;
- pDevice->bEncryptionEnable = TRUE;
+ pDevice->bTransmitKey = true;
+ pDevice->bEncryptionEnable = true;
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
}else if(index>0){
//when the length is 0 the request only changes the default transmit key index
//check the new key has a non zero lenget
- if(pDevice->bEncryptionEnable==FALSE)
+ if(pDevice->bEncryptionEnable==false)
{
rc = -EINVAL;
return rc;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Just set Default key Index:\n");
pkeytab=&(pDevice->sKey.KeyTable[MAX_KEY_TABLE-1]);
- if(pkeytab->GroupKey[(BYTE)dwKeyIndex].uKeyLength==0){
+ if(pkeytab->GroupKey[(unsigned char)dwKeyIndex].uKeyLength==0){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Default key len is 0\n");
rc = -EINVAL;
return rc;
}
- pDevice->byKeyIndex =(BYTE)dwKeyIndex;
+ pDevice->byKeyIndex =(unsigned char)dwKeyIndex;
pkeytab->dwGTKeyIndex =dwKeyIndex | (1 << 31);
- pkeytab->GroupKey[(BYTE)dwKeyIndex].dwKeyIndex=dwKeyIndex | (1 << 31);
+ pkeytab->GroupKey[(unsigned char)dwKeyIndex].dwKeyIndex=dwKeyIndex | (1 << 31);
}
}else {//disable the key
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable WEP function\n");
- if(pDevice->bEncryptionEnable==FALSE)
+ if(pDevice->bEncryptionEnable==false)
return 0;
- pMgmt->bShareKeyAlgorithm = FALSE;
- pDevice->bEncryptionEnable = FALSE;
+ pMgmt->bShareKeyAlgorithm = false;
+ pDevice->bEncryptionEnable = false;
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
if (pDevice->flags & DEVICE_FLAGS_OPENED) {
spin_lock_irq(&pDevice->lock);
@@ -1450,7 +1450,7 @@ if((wrq->flags & IW_ENCODE_DISABLED)==0){
if (pDevice->flags & DEVICE_FLAGS_OPENED) {
spin_lock_irq(&pDevice->lock);
KeybSetDefaultKey(&(pDevice->sKey),
- (DWORD)(pDevice->byKeyIndex | (1 << 31)),
+ (unsigned long)(pDevice->byKeyIndex | (1 << 31)),
pDevice->uKeyLength,
NULL,
pDevice->abyKey,
@@ -1460,10 +1460,10 @@ if((wrq->flags & IW_ENCODE_DISABLED)==0){
);
spin_unlock_irq(&pDevice->lock);
}
- pDevice->byKeyIndex = (BYTE)dwKeyIndex;
+ pDevice->byKeyIndex = (unsigned char)dwKeyIndex;
pDevice->uKeyLength = wrq->length;
- pDevice->bTransmitKey = TRUE;
- pDevice->bEncryptionEnable = TRUE;
+ pDevice->bTransmitKey = true;
+ pDevice->bEncryptionEnable = true;
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
// Do we want to just set the transmit key index ?
@@ -1479,8 +1479,8 @@ if((wrq->flags & IW_ENCODE_DISABLED)==0){
if(wrq->flags & IW_ENCODE_DISABLED){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable WEP function\n");
- pMgmt->bShareKeyAlgorithm = FALSE;
- pDevice->bEncryptionEnable = FALSE;
+ pMgmt->bShareKeyAlgorithm = false;
+ pDevice->bEncryptionEnable = false;
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
if (pDevice->flags & DEVICE_FLAGS_OPENED) {
spin_lock_irq(&pDevice->lock);
@@ -1493,11 +1493,11 @@ if((wrq->flags & IW_ENCODE_DISABLED)==0){
if(wrq->flags & IW_ENCODE_RESTRICTED) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable WEP & ShareKey System\n");
- pMgmt->bShareKeyAlgorithm = TRUE;
+ pMgmt->bShareKeyAlgorithm = true;
}
if(wrq->flags & IW_ENCODE_OPEN) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable WEP & Open System\n");
- pMgmt->bShareKeyAlgorithm = FALSE;
+ pMgmt->bShareKeyAlgorithm = false;
}
return rc;
}
@@ -1515,7 +1515,7 @@ int iwctl_giwencode(struct net_device *dev,
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
int rc = 0;
char abyKey[WLAN_WEP232_KEYLEN];
- UINT index = (UINT)(wrq->flags & IW_ENCODE_INDEX);
+ unsigned int index = (unsigned int)(wrq->flags & IW_ENCODE_INDEX);
PSKeyItem pKey = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODE\n");
@@ -1549,7 +1549,7 @@ int iwctl_giwencode(struct net_device *dev,
else
wrq->flags |= IW_ENCODE_OPEN;
- if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (BYTE)index , &pKey)){
+ if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (unsigned char)index , &pKey)){
wrq->length = pKey->uKeyLength;
memcpy(abyKey, pKey->abyKey, pKey->uKeyLength);
//2007-0207-06,<Modify> by EinsnLiu
@@ -1584,7 +1584,7 @@ int iwctl_giwencode(struct net_device *dev,
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
char abyKey[WLAN_WEP232_KEYLEN];
- UINT index = (UINT)(wrq->flags & IW_ENCODE_INDEX);
+ unsigned int index = (unsigned int)(wrq->flags & IW_ENCODE_INDEX);
PSKeyItem pKey = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODE\n");
@@ -1622,7 +1622,7 @@ int iwctl_giwencode(struct net_device *dev,
memcpy(abyKey, pKey->abyKey, pKey->uKeyLength);
memcpy(extra, abyKey, WLAN_WEP232_KEYLEN);
}
- }else if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (BYTE)index , &pKey)){
+ }else if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (unsigned char)index , &pKey)){
wrq->length = pKey->uKeyLength;
memcpy(abyKey, pKey->abyKey, pKey->uKeyLength);
memcpy(extra, abyKey, WLAN_WEP232_KEYLEN);
@@ -1729,8 +1729,8 @@ int iwctl_giwsens(struct net_device *dev,
long ldBm;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWSENS \n");
- if (pDevice->bLinkPass == TRUE) {
- RFvRSSITodBm(pDevice, (BYTE)(pDevice->uCurrRSSI), &ldBm);
+ if (pDevice->bLinkPass == true) {
+ RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm);
wrq->value = ldBm;
}
else {
@@ -1763,7 +1763,7 @@ int iwctl_siwauth(struct net_device *dev,
wpa_version = wrq->value;
if(wrq->value == IW_AUTH_WPA_VERSION_DISABLED) {
PRINT_K("iwctl_siwauth:set WPADEV to disable at 1??????\n");
- //pDevice->bWPADevEnable = FALSE;
+ //pDevice->bWPADevEnable = false;
}
else if(wrq->value == IW_AUTH_WPA_VERSION_WPA) {
PRINT_K("iwctl_siwauth:set WPADEV to WPA1******\n");
@@ -1771,7 +1771,7 @@ int iwctl_siwauth(struct net_device *dev,
else {
PRINT_K("iwctl_siwauth:set WPADEV to WPA2******\n");
}
- //pDevice->bWPASuppWextEnabled =TRUE;
+ //pDevice->bWPASuppWextEnabled =true;
break;
case IW_AUTH_CIPHER_PAIRWISE:
pairwise = wrq->value;
@@ -1818,9 +1818,9 @@ int iwctl_siwauth(struct net_device *dev,
break;
case IW_AUTH_80211_AUTH_ALG:
if(wrq->value==IW_AUTH_ALG_OPEN_SYSTEM){
- pMgmt->bShareKeyAlgorithm=FALSE;
+ pMgmt->bShareKeyAlgorithm=false;
}else if(wrq->value==IW_AUTH_ALG_SHARED_KEY){
- pMgmt->bShareKeyAlgorithm=TRUE;
+ pMgmt->bShareKeyAlgorithm=true;
}
break;
case IW_AUTH_WPA_ENABLED:
@@ -1833,13 +1833,13 @@ int iwctl_siwauth(struct net_device *dev,
break;
case IW_AUTH_PRIVACY_INVOKED:
pDevice->bEncryptionEnable = !!wrq->value;
- if(pDevice->bEncryptionEnable == FALSE){
+ if(pDevice->bEncryptionEnable == false){
wpa_version = 0;
pairwise = 0;
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- pMgmt->bShareKeyAlgorithm = FALSE;
- pMgmt->eAuthenMode = FALSE;
- //pDevice->bWPADevEnable = FALSE;
+ pMgmt->bShareKeyAlgorithm = false;
+ pMgmt->eAuthenMode = false;
+ //pDevice->bWPADevEnable = false;
}
break;
@@ -1852,9 +1852,9 @@ int iwctl_siwauth(struct net_device *dev,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise = %d\n",pairwise);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->eEncryptionStatus = %d\n",pDevice->eEncryptionStatus);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->eAuthenMode = %d\n",pMgmt->eAuthenMode);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->bShareKeyAlgorithm = %s\n",pMgmt->bShareKeyAlgorithm?"TRUE":"FALSE");
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bEncryptionEnable = %s\n",pDevice->bEncryptionEnable?"TRUE":"FALSE");
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bWPADevEnable = %s\n",pDevice->bWPADevEnable?"TRUE":"FALSE");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->bShareKeyAlgorithm = %s\n",pMgmt->bShareKeyAlgorithm?"true":"false");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bEncryptionEnable = %s\n",pDevice->bEncryptionEnable?"true":"false");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bWPADevEnable = %s\n",pDevice->bWPADevEnable?"true":"false");
*/
return ret;
}
@@ -2055,12 +2055,12 @@ if(param->u.wpa_key.alg_name == WPA_ALG_NONE) {
if( pDevice->bwextcount == 4) {
printk("SIOCSIWENCODEEXT:Enable WPA WEXT SUPPORT!!!!!\n");
pDevice->bwextcount=0;
- pDevice->bWPASuppWextEnabled = TRUE;
+ pDevice->bWPASuppWextEnabled = true;
}
//******
spin_lock_irq(&pDevice->lock);
- ret = wpa_set_keys(pDevice, param, TRUE);
+ ret = wpa_set_keys(pDevice, param, true);
spin_unlock_irq(&pDevice->lock);
error:
@@ -2096,10 +2096,10 @@ int iwctl_siwmlme(struct net_device *dev,
switch(mlme->cmd){
case IW_MLME_DEAUTH:
//this command seems to be not complete,please test it --einsnliu
- //bScheduleCommand((void *) pDevice, WLAN_CMD_DEAUTH, (PBYTE)&reason);
+ //bScheduleCommand((void *) pDevice, WLAN_CMD_DEAUTH, (unsigned char *)&reason);
break;
case IW_MLME_DISASSOC:
- if(pDevice->bLinkPass == TRUE){
+ if(pDevice->bLinkPass == true){
printk("iwctl_siwmlme--->send DISASSOCIATE\n");
//clear related flags
memset(pMgmt->abyDesireBSSID, 0xFF,6);
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index bfc5c509d90..0ff8d7bbf2a 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -59,22 +59,22 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
static void
-s_vCheckKeyTableValid (PSKeyManagement pTable, DWORD_PTR dwIoBase)
+s_vCheckKeyTableValid (PSKeyManagement pTable, unsigned long dwIoBase)
{
int i;
for (i=0;i<MAX_KEY_TABLE;i++) {
- if ((pTable->KeyTable[i].bInUse == TRUE) &&
- (pTable->KeyTable[i].PairwiseKey.bKeyValid == FALSE) &&
- (pTable->KeyTable[i].GroupKey[0].bKeyValid == FALSE) &&
- (pTable->KeyTable[i].GroupKey[1].bKeyValid == FALSE) &&
- (pTable->KeyTable[i].GroupKey[2].bKeyValid == FALSE) &&
- (pTable->KeyTable[i].GroupKey[3].bKeyValid == FALSE)
+ if ((pTable->KeyTable[i].bInUse == true) &&
+ (pTable->KeyTable[i].PairwiseKey.bKeyValid == false) &&
+ (pTable->KeyTable[i].GroupKey[0].bKeyValid == false) &&
+ (pTable->KeyTable[i].GroupKey[1].bKeyValid == false) &&
+ (pTable->KeyTable[i].GroupKey[2].bKeyValid == false) &&
+ (pTable->KeyTable[i].GroupKey[3].bKeyValid == false)
) {
- pTable->KeyTable[i].bInUse = FALSE;
+ pTable->KeyTable[i].bInUse = false;
pTable->KeyTable[i].wKeyCtl = 0;
- pTable->KeyTable[i].bSoftWEP = FALSE;
+ pTable->KeyTable[i].bSoftWEP = false;
MACvDisableKeyEntry(dwIoBase, i);
}
}
@@ -96,22 +96,22 @@ s_vCheckKeyTableValid (PSKeyManagement pTable, DWORD_PTR dwIoBase)
* Return Value: none
*
*/
-void KeyvInitTable (PSKeyManagement pTable, DWORD_PTR dwIoBase)
+void KeyvInitTable (PSKeyManagement pTable, unsigned long dwIoBase)
{
int i;
int jj;
for (i=0;i<MAX_KEY_TABLE;i++) {
- pTable->KeyTable[i].bInUse = FALSE;
- pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE;
+ pTable->KeyTable[i].bInUse = false;
+ pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
pTable->KeyTable[i].PairwiseKey.pvKeyTable = (void *)&pTable->KeyTable[i];
for (jj=0; jj < MAX_GROUP_KEY; jj++) {
- pTable->KeyTable[i].GroupKey[jj].bKeyValid = FALSE;
+ pTable->KeyTable[i].GroupKey[jj].bKeyValid = false;
pTable->KeyTable[i].GroupKey[jj].pvKeyTable = (void *)&pTable->KeyTable[i];
}
pTable->KeyTable[i].wKeyCtl = 0;
pTable->KeyTable[i].dwGTKeyIndex = 0;
- pTable->KeyTable[i].bSoftWEP = FALSE;
+ pTable->KeyTable[i].bSoftWEP = false;
MACvDisableKeyEntry(dwIoBase, i);
}
}
@@ -128,13 +128,13 @@ void KeyvInitTable (PSKeyManagement pTable, DWORD_PTR dwIoBase)
* Out:
* pKey - Key return
*
- * Return Value: TRUE if found otherwise FALSE
+ * Return Value: true if found otherwise false
*
*/
-BOOL KeybGetKey (
+bool KeybGetKey (
PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD dwKeyIndex,
+ unsigned char *pbyBSSID,
+ unsigned long dwKeyIndex,
PSKeyItem *pKey
)
{
@@ -144,31 +144,31 @@ BOOL KeybGetKey (
*pKey = NULL;
for (i=0;i<MAX_KEY_TABLE;i++) {
- if ((pTable->KeyTable[i].bInUse == TRUE) &&
- IS_ETH_ADDRESS_EQUAL(pTable->KeyTable[i].abyBSSID,pbyBSSID)) {
+ if ((pTable->KeyTable[i].bInUse == true) &&
+ !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyIndex == 0xFFFFFFFF) {
- if (pTable->KeyTable[i].PairwiseKey.bKeyValid == TRUE) {
+ if (pTable->KeyTable[i].PairwiseKey.bKeyValid == true) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
- return (TRUE);
+ return (true);
}
else {
- return (FALSE);
+ return (false);
}
} else if (dwKeyIndex < MAX_GROUP_KEY) {
- if (pTable->KeyTable[i].GroupKey[dwKeyIndex].bKeyValid == TRUE) {
+ if (pTable->KeyTable[i].GroupKey[dwKeyIndex].bKeyValid == true) {
*pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex]);
- return (TRUE);
+ return (true);
}
else {
- return (FALSE);
+ return (false);
}
}
else {
- return (FALSE);
+ return (false);
}
}
}
- return (FALSE);
+ return (false);
}
@@ -186,37 +186,37 @@ BOOL KeybGetKey (
* Out:
* none
*
- * Return Value: TRUE if success otherwise FALSE
+ * Return Value: true if success otherwise false
*
*/
-BOOL KeybSetKey (
+bool KeybSetKey (
PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD dwKeyIndex,
- ULONG uKeyLength,
+ unsigned char *pbyBSSID,
+ unsigned long dwKeyIndex,
+ unsigned long uKeyLength,
PQWORD pKeyRSC,
- PBYTE pbyKey,
- BYTE byKeyDecMode,
- DWORD_PTR dwIoBase,
- BYTE byLocalID
+ unsigned char *pbyKey,
+ unsigned char byKeyDecMode,
+ unsigned long dwIoBase,
+ unsigned char byLocalID
)
{
int i,j;
- UINT ii;
+ unsigned int ii;
PSKeyItem pKey;
- UINT uKeyIdx;
+ unsigned int uKeyIdx;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetKey: %lX\n", dwKeyIndex);
j = (MAX_KEY_TABLE-1);
for (i=0;i<(MAX_KEY_TABLE-1);i++) {
- if ((pTable->KeyTable[i].bInUse == FALSE) &&
+ if ((pTable->KeyTable[i].bInUse == false) &&
(j == (MAX_KEY_TABLE-1))) {
// found empty table
j = i;
}
- if ((pTable->KeyTable[i].bInUse == TRUE) &&
- IS_ETH_ADDRESS_EQUAL(pTable->KeyTable[i].abyBSSID,pbyBSSID)) {
+ if ((pTable->KeyTable[i].bInUse == true) &&
+ !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
// found table already exist
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
// Pairwise key
@@ -227,7 +227,7 @@ BOOL KeybSetKey (
} else {
// Group key
if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY)
- return (FALSE);
+ return (false);
pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]);
if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
// Group transmit key
@@ -241,7 +241,7 @@ BOOL KeybSetKey (
}
pTable->KeyTable[i].wKeyCtl |= 0x8000; // enable on-fly
- pKey->bKeyValid = TRUE;
+ pKey->bKeyValid = true;
pKey->uKeyLength = uKeyLength;
pKey->dwKeyIndex = dwKeyIndex;
pKey->byCipherSuite = byKeyDecMode;
@@ -252,7 +252,7 @@ BOOL KeybSetKey (
if (uKeyLength == WLAN_WEP104_KEYLEN)
pKey->abyKey[15] |= 0x80;
}
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pbyBSSID, (PDWORD)pKey->abyKey, byLocalID);
+ MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pbyBSSID, (unsigned long *)pKey->abyKey, byLocalID);
if ((dwKeyIndex & USE_KEYRSC) == 0) {
// RSC set by NIC
@@ -277,12 +277,12 @@ BOOL KeybSetKey (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
- return (TRUE);
+ return (true);
}
}
if (j < (MAX_KEY_TABLE-1)) {
memcpy(pTable->KeyTable[j].abyBSSID,pbyBSSID,ETH_ALEN);
- pTable->KeyTable[j].bInUse = TRUE;
+ pTable->KeyTable[j].bInUse = true;
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
// Pairwise key
pKey = &(pTable->KeyTable[j].PairwiseKey);
@@ -292,7 +292,7 @@ BOOL KeybSetKey (
} else {
// Group key
if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY)
- return (FALSE);
+ return (false);
pKey = &(pTable->KeyTable[j].GroupKey[dwKeyIndex & 0x000000FF]);
if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
// Group transmit key
@@ -306,7 +306,7 @@ BOOL KeybSetKey (
}
pTable->KeyTable[j].wKeyCtl |= 0x8000; // enable on-fly
- pKey->bKeyValid = TRUE;
+ pKey->bKeyValid = true;
pKey->uKeyLength = uKeyLength;
pKey->dwKeyIndex = dwKeyIndex;
pKey->byCipherSuite = byKeyDecMode;
@@ -317,7 +317,7 @@ BOOL KeybSetKey (
if (uKeyLength == WLAN_WEP104_KEYLEN)
pKey->abyKey[15] |= 0x80;
}
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[j].wKeyCtl, j, uKeyIdx, pbyBSSID, (PDWORD)pKey->abyKey, byLocalID);
+ MACvSetKeyEntry(dwIoBase, pTable->KeyTable[j].wKeyCtl, j, uKeyIdx, pbyBSSID, (unsigned long *)pKey->abyKey, byLocalID);
if ((dwKeyIndex & USE_KEYRSC) == 0) {
// RSC set by NIC
@@ -342,9 +342,9 @@ BOOL KeybSetKey (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
- return (TRUE);
+ return (true);
}
- return (FALSE);
+ return (false);
}
@@ -359,66 +359,66 @@ BOOL KeybSetKey (
* Out:
* none
*
- * Return Value: TRUE if success otherwise FALSE
+ * Return Value: true if success otherwise false
*
*/
-BOOL KeybRemoveKey (
+bool KeybRemoveKey (
PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD dwKeyIndex,
- DWORD_PTR dwIoBase
+ unsigned char *pbyBSSID,
+ unsigned long dwKeyIndex,
+ unsigned long dwIoBase
)
{
int i;
- if (IS_BROADCAST_ADDRESS(pbyBSSID)) {
+ if (is_broadcast_ether_addr(pbyBSSID)) {
// dealte all key
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
for (i=0;i<MAX_KEY_TABLE;i++) {
- pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE;
+ pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
}
s_vCheckKeyTableValid(pTable, dwIoBase);
- return TRUE;
+ return true;
}
else if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
for (i=0;i<MAX_KEY_TABLE;i++) {
- pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = FALSE;
+ pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[i].dwGTKeyIndex & 0x7FFFFFFF)) {
// remove Group transmit key
pTable->KeyTable[i].dwGTKeyIndex = 0;
}
}
s_vCheckKeyTableValid(pTable, dwIoBase);
- return TRUE;
+ return true;
}
else {
- return FALSE;
+ return false;
}
}
for (i=0;i<MAX_KEY_TABLE;i++) {
- if ((pTable->KeyTable[i].bInUse == TRUE) &&
- IS_ETH_ADDRESS_EQUAL(pTable->KeyTable[i].abyBSSID,pbyBSSID)) {
+ if ((pTable->KeyTable[i].bInUse == true) &&
+ !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
- pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE;
+ pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
s_vCheckKeyTableValid(pTable, dwIoBase);
- return (TRUE);
+ return (true);
}
else if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
- pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = FALSE;
+ pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[i].dwGTKeyIndex & 0x7FFFFFFF)) {
// remove Group transmit key
pTable->KeyTable[i].dwGTKeyIndex = 0;
}
s_vCheckKeyTableValid(pTable, dwIoBase);
- return (TRUE);
+ return (true);
}
else {
- return (FALSE);
+ return (false);
}
}
}
- return (FALSE);
+ return (false);
}
@@ -432,30 +432,30 @@ BOOL KeybRemoveKey (
* Out:
* none
*
- * Return Value: TRUE if success otherwise FALSE
+ * Return Value: true if success otherwise false
*
*/
-BOOL KeybRemoveAllKey (
+bool KeybRemoveAllKey (
PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD_PTR dwIoBase
+ unsigned char *pbyBSSID,
+ unsigned long dwIoBase
)
{
int i,u;
for (i=0;i<MAX_KEY_TABLE;i++) {
- if ((pTable->KeyTable[i].bInUse == TRUE) &&
- IS_ETH_ADDRESS_EQUAL(pTable->KeyTable[i].abyBSSID,pbyBSSID)) {
- pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE;
+ if ((pTable->KeyTable[i].bInUse == true) &&
+ !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
+ pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
for(u=0;u<MAX_GROUP_KEY;u++) {
- pTable->KeyTable[i].GroupKey[u].bKeyValid = FALSE;
+ pTable->KeyTable[i].GroupKey[u].bKeyValid = false;
}
pTable->KeyTable[i].dwGTKeyIndex = 0;
s_vCheckKeyTableValid(pTable, dwIoBase);
- return (TRUE);
+ return (true);
}
}
- return (FALSE);
+ return (false);
}
/*
@@ -467,20 +467,20 @@ BOOL KeybRemoveAllKey (
* Out:
* none
*
- * Return Value: TRUE if success otherwise FALSE
+ * Return Value: true if success otherwise false
*
*/
void KeyvRemoveWEPKey (
PSKeyManagement pTable,
- DWORD dwKeyIndex,
- DWORD_PTR dwIoBase
+ unsigned long dwKeyIndex,
+ unsigned long dwIoBase
)
{
if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
- if (pTable->KeyTable[MAX_KEY_TABLE-1].bInUse == TRUE) {
+ if (pTable->KeyTable[MAX_KEY_TABLE-1].bInUse == true) {
if (pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].byCipherSuite == KEY_CTL_WEP) {
- pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = FALSE;
+ pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex & 0x7FFFFFFF)) {
// remove Group transmit key
pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = 0;
@@ -494,7 +494,7 @@ void KeyvRemoveWEPKey (
void KeyvRemoveAllWEPKey (
PSKeyManagement pTable,
- DWORD_PTR dwIoBase
+ unsigned long dwIoBase
)
{
int i;
@@ -514,13 +514,13 @@ void KeyvRemoveAllWEPKey (
* Out:
* pKey - Key return
*
- * Return Value: TRUE if found otherwise FALSE
+ * Return Value: true if found otherwise false
*
*/
-BOOL KeybGetTransmitKey (
+bool KeybGetTransmitKey (
PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD dwKeyType,
+ unsigned char *pbyBSSID,
+ unsigned long dwKeyType,
PSKeyItem *pKey
)
{
@@ -528,12 +528,12 @@ BOOL KeybGetTransmitKey (
*pKey = NULL;
for (i=0;i<MAX_KEY_TABLE;i++) {
- if ((pTable->KeyTable[i].bInUse == TRUE) &&
- IS_ETH_ADDRESS_EQUAL(pTable->KeyTable[i].abyBSSID,pbyBSSID)) {
+ if ((pTable->KeyTable[i].bInUse == true) &&
+ !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyType == PAIRWISE_KEY) {
- if (pTable->KeyTable[i].PairwiseKey.bKeyValid == TRUE) {
+ if (pTable->KeyTable[i].PairwiseKey.bKeyValid == true) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybGetTransmitKey:");
@@ -544,19 +544,19 @@ BOOL KeybGetTransmitKey (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
- return (TRUE);
+ return (true);
}
else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"PairwiseKey.bKeyValid == FALSE\n");
- return (FALSE);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"PairwiseKey.bKeyValid == false\n");
+ return (false);
}
} // End of Type == PAIRWISE
else {
if (pTable->KeyTable[i].dwGTKeyIndex == 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ERROR: dwGTKeyIndex == 0 !!!\n");
- return FALSE;
+ return false;
}
- if (pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)].bKeyValid == TRUE) {
+ if (pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)].bKeyValid == true) {
*pKey = &(pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)]);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybGetTransmitKey:");
@@ -567,11 +567,11 @@ BOOL KeybGetTransmitKey (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %lX\n", pTable->KeyTable[i].dwGTKeyIndex);
- return (TRUE);
+ return (true);
}
else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GroupKey.bKeyValid == FALSE\n");
- return (FALSE);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GroupKey.bKeyValid == false\n");
+ return (false);
}
} // End of Type = GROUP
} // BSSID match
@@ -581,7 +581,7 @@ BOOL KeybGetTransmitKey (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *(pbyBSSID+ii));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
- return (FALSE);
+ return (false);
}
@@ -594,10 +594,10 @@ BOOL KeybGetTransmitKey (
* Out:
* none
*
- * Return Value: TRUE if found otherwise FALSE
+ * Return Value: true if found otherwise false
*
*/
-BOOL KeybCheckPairewiseKey (
+bool KeybCheckPairewiseKey (
PSKeyManagement pTable,
PSKeyItem *pKey
)
@@ -606,13 +606,13 @@ BOOL KeybCheckPairewiseKey (
*pKey = NULL;
for (i=0;i<MAX_KEY_TABLE;i++) {
- if ((pTable->KeyTable[i].bInUse == TRUE) &&
- (pTable->KeyTable[i].PairwiseKey.bKeyValid == TRUE)) {
+ if ((pTable->KeyTable[i].bInUse == true) &&
+ (pTable->KeyTable[i].PairwiseKey.bKeyValid == true)) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
- return (TRUE);
+ return (true);
}
}
- return (FALSE);
+ return (false);
}
/*
@@ -628,34 +628,34 @@ BOOL KeybCheckPairewiseKey (
* Out:
* none
*
- * Return Value: TRUE if success otherwise FALSE
+ * Return Value: true if success otherwise false
*
*/
-BOOL KeybSetDefaultKey (
+bool KeybSetDefaultKey (
PSKeyManagement pTable,
- DWORD dwKeyIndex,
- ULONG uKeyLength,
+ unsigned long dwKeyIndex,
+ unsigned long uKeyLength,
PQWORD pKeyRSC,
- PBYTE pbyKey,
- BYTE byKeyDecMode,
- DWORD_PTR dwIoBase,
- BYTE byLocalID
+ unsigned char *pbyKey,
+ unsigned char byKeyDecMode,
+ unsigned long dwIoBase,
+ unsigned char byLocalID
)
{
- UINT ii;
+ unsigned int ii;
PSKeyItem pKey;
- UINT uKeyIdx;
+ unsigned int uKeyIdx;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetDefaultKey: %1x, %d \n", (int)dwKeyIndex, (int)uKeyLength);
if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key
- return (FALSE);
+ return (false);
} else if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY) {
- return (FALSE);
+ return (false);
}
- pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = TRUE;
+ pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = true;
for(ii=0;ii<ETH_ALEN;ii++)
pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF;
@@ -676,13 +676,13 @@ BOOL KeybSetDefaultKey (
if ((uKeyLength == WLAN_WEP232_KEYLEN) &&
(byKeyDecMode == KEY_CTL_WEP)) {
pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0x4000; // disable on-fly disable address match
- pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP = TRUE;
+ pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP = true;
} else {
- if (pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP == FALSE)
+ if (pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP == false)
pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0xC000; // enable on-fly disable address match
}
- pKey->bKeyValid = TRUE;
+ pKey->bKeyValid = true;
pKey->uKeyLength = uKeyLength;
pKey->dwKeyIndex = dwKeyIndex;
pKey->byCipherSuite = byKeyDecMode;
@@ -693,7 +693,7 @@ BOOL KeybSetDefaultKey (
if (uKeyLength == WLAN_WEP104_KEYLEN)
pKey->abyKey[15] |= 0x80;
}
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl, MAX_KEY_TABLE-1, uKeyIdx, pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID, (PDWORD)pKey->abyKey, byLocalID);
+ MACvSetKeyEntry(dwIoBase, pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl, MAX_KEY_TABLE-1, uKeyIdx, pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID, (unsigned long *)pKey->abyKey, byLocalID);
if ((dwKeyIndex & USE_KEYRSC) == 0) {
// RSC set by NIC
@@ -718,7 +718,7 @@ BOOL KeybSetDefaultKey (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n", pKey->wTSC15_0);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n", pKey->dwKeyIndex);
- return (TRUE);
+ return (true);
}
@@ -735,36 +735,36 @@ BOOL KeybSetDefaultKey (
* Out:
* none
*
- * Return Value: TRUE if success otherwise FALSE
+ * Return Value: true if success otherwise false
*
*/
-BOOL KeybSetAllGroupKey (
+bool KeybSetAllGroupKey (
PSKeyManagement pTable,
- DWORD dwKeyIndex,
- ULONG uKeyLength,
+ unsigned long dwKeyIndex,
+ unsigned long uKeyLength,
PQWORD pKeyRSC,
- PBYTE pbyKey,
- BYTE byKeyDecMode,
- DWORD_PTR dwIoBase,
- BYTE byLocalID
+ unsigned char *pbyKey,
+ unsigned char byKeyDecMode,
+ unsigned long dwIoBase,
+ unsigned char byLocalID
)
{
int i;
- UINT ii;
+ unsigned int ii;
PSKeyItem pKey;
- UINT uKeyIdx;
+ unsigned int uKeyIdx;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %lX\n", dwKeyIndex);
if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key
- return (FALSE);
+ return (false);
} else if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY) {
- return (FALSE);
+ return (false);
}
for (i=0; i < MAX_KEY_TABLE-1; i++) {
- if (pTable->KeyTable[i].bInUse == TRUE) {
+ if (pTable->KeyTable[i].bInUse == true) {
// found table already exist
// Group key
pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]);
@@ -781,7 +781,7 @@ BOOL KeybSetAllGroupKey (
pTable->KeyTable[i].wKeyCtl |= 0x8000; // enable on-fly
- pKey->bKeyValid = TRUE;
+ pKey->bKeyValid = true;
pKey->uKeyLength = uKeyLength;
pKey->dwKeyIndex = dwKeyIndex;
pKey->byCipherSuite = byKeyDecMode;
@@ -792,7 +792,7 @@ BOOL KeybSetAllGroupKey (
if (uKeyLength == WLAN_WEP104_KEYLEN)
pKey->abyKey[15] |= 0x80;
}
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pTable->KeyTable[i].abyBSSID, (PDWORD)pKey->abyKey, byLocalID);
+ MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pTable->KeyTable[i].abyBSSID, (unsigned long *)pKey->abyKey, byLocalID);
if ((dwKeyIndex & USE_KEYRSC) == 0) {
// RSC set by NIC
@@ -817,7 +817,7 @@ BOOL KeybSetAllGroupKey (
//DBG_PRN_GRP12(("pKey->wTSC15_0: %X\n ", pKey->wTSC15_0));
//DBG_PRN_GRP12(("pKey->dwKeyIndex: %lX\n ", pKey->dwKeyIndex));
- } // (pTable->KeyTable[i].bInUse == TRUE)
+ } // (pTable->KeyTable[i].bInUse == true)
}
- return (TRUE);
+ return (true);
}
diff --git a/drivers/staging/vt6655/key.h b/drivers/staging/vt6655/key.h
index 39403d93aeb..6b2dad331a5 100644
--- a/drivers/staging/vt6655/key.h
+++ b/drivers/staging/vt6655/key.h
@@ -57,33 +57,33 @@
typedef struct tagSKeyItem
{
- BOOL bKeyValid;
- ULONG uKeyLength;
- BYTE abyKey[MAX_KEY_LEN];
+ bool bKeyValid;
+ unsigned long uKeyLength;
+ unsigned char abyKey[MAX_KEY_LEN];
QWORD KeyRSC;
- DWORD dwTSC47_16;
- WORD wTSC15_0;
- BYTE byCipherSuite;
- BYTE byReserved0;
- DWORD dwKeyIndex;
+ unsigned long dwTSC47_16;
+ unsigned short wTSC15_0;
+ unsigned char byCipherSuite;
+ unsigned char byReserved0;
+ unsigned long dwKeyIndex;
void *pvKeyTable;
} SKeyItem, *PSKeyItem; //64
typedef struct tagSKeyTable
{
- BYTE abyBSSID[ETH_ALEN]; //6
- BYTE byReserved0[2]; //8
+ unsigned char abyBSSID[ETH_ALEN]; //6
+ unsigned char byReserved0[2]; //8
SKeyItem PairwiseKey;
SKeyItem GroupKey[MAX_GROUP_KEY]; //64*5 = 320, 320+8=328
- DWORD dwGTKeyIndex; // GroupTransmitKey Index
- BOOL bInUse;
+ unsigned long dwGTKeyIndex; // GroupTransmitKey Index
+ bool bInUse;
//2006-1116-01,<Modify> by NomadZhao
- //WORD wKeyCtl;
- //BOOL bSoftWEP;
- BOOL bSoftWEP;
- WORD wKeyCtl; // for address of wKeyCtl at align 4
+ //unsigned short wKeyCtl;
+ //bool bSoftWEP;
+ bool bSoftWEP;
+ unsigned short wKeyCtl; // for address of wKeyCtl at align 4
- BYTE byReserved1[6];
+ unsigned char byReserved1[6];
} SKeyTable, *PSKeyTable; //348
typedef struct tagSKeyManagement
@@ -101,83 +101,83 @@ typedef struct tagSKeyManagement
/*--------------------- Export Functions --------------------------*/
-void KeyvInitTable(PSKeyManagement pTable, DWORD_PTR dwIoBase);
+void KeyvInitTable(PSKeyManagement pTable, unsigned long dwIoBase);
-BOOL KeybGetKey(
+bool KeybGetKey(
PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD dwKeyIndex,
+ unsigned char *pbyBSSID,
+ unsigned long dwKeyIndex,
PSKeyItem *pKey
);
-BOOL KeybSetKey(
+bool KeybSetKey(
PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD dwKeyIndex,
- ULONG uKeyLength,
+ unsigned char *pbyBSSID,
+ unsigned long dwKeyIndex,
+ unsigned long uKeyLength,
PQWORD pKeyRSC,
- PBYTE pbyKey,
- BYTE byKeyDecMode,
- DWORD_PTR dwIoBase,
- BYTE byLocalID
+ unsigned char *pbyKey,
+ unsigned char byKeyDecMode,
+ unsigned long dwIoBase,
+ unsigned char byLocalID
);
-BOOL KeybSetDefaultKey(
+bool KeybSetDefaultKey(
PSKeyManagement pTable,
- DWORD dwKeyIndex,
- ULONG uKeyLength,
+ unsigned long dwKeyIndex,
+ unsigned long uKeyLength,
PQWORD pKeyRSC,
- PBYTE pbyKey,
- BYTE byKeyDecMode,
- DWORD_PTR dwIoBase,
- BYTE byLocalID
+ unsigned char *pbyKey,
+ unsigned char byKeyDecMode,
+ unsigned long dwIoBase,
+ unsigned char byLocalID
);
-BOOL KeybRemoveKey(
+bool KeybRemoveKey(
PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD dwKeyIndex,
- DWORD_PTR dwIoBase
+ unsigned char *pbyBSSID,
+ unsigned long dwKeyIndex,
+ unsigned long dwIoBase
);
-BOOL KeybGetTransmitKey(
+bool KeybGetTransmitKey(
PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD dwKeyType,
+ unsigned char *pbyBSSID,
+ unsigned long dwKeyType,
PSKeyItem *pKey
);
-BOOL KeybCheckPairewiseKey(
+bool KeybCheckPairewiseKey(
PSKeyManagement pTable,
PSKeyItem *pKey
);
-BOOL KeybRemoveAllKey(
+bool KeybRemoveAllKey(
PSKeyManagement pTable,
- PBYTE pbyBSSID,
- DWORD_PTR dwIoBase
+ unsigned char *pbyBSSID,
+ unsigned long dwIoBase
);
void KeyvRemoveWEPKey(
PSKeyManagement pTable,
- DWORD dwKeyIndex,
- DWORD_PTR dwIoBase
+ unsigned long dwKeyIndex,
+ unsigned long dwIoBase
);
void KeyvRemoveAllWEPKey(
PSKeyManagement pTable,
- DWORD_PTR dwIoBase
+ unsigned long dwIoBase
);
-BOOL KeybSetAllGroupKey (
+bool KeybSetAllGroupKey (
PSKeyManagement pTable,
- DWORD dwKeyIndex,
- ULONG uKeyLength,
+ unsigned long dwKeyIndex,
+ unsigned long uKeyLength,
PQWORD pKeyRSC,
- PBYTE pbyKey,
- BYTE byKeyDecMode,
- DWORD_PTR dwIoBase,
- BYTE byLocalID
+ unsigned char *pbyKey,
+ unsigned char byKeyDecMode,
+ unsigned long dwIoBase,
+ unsigned char byLocalID
);
#endif // __KEY_H__
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index f1ef7da75c2..f8d1651341f 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -72,7 +72,7 @@
#include "tether.h"
#include "mac.h"
-WORD TxRate_iwconfig;//2008-5-8 <add> by chester
+unsigned short TxRate_iwconfig;//2008-5-8 <add> by chester
/*--------------------- Static Definitions -------------------------*/
//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel =MSG_LEVEL_INFO;
@@ -103,7 +103,7 @@ static int msglevel =MSG_LEVEL_INFO;
* Return Value: none
*
*/
-void MACvReadAllRegs (DWORD_PTR dwIoBase, PBYTE pbyMacRegs)
+void MACvReadAllRegs (unsigned long dwIoBase, unsigned char *pbyMacRegs)
{
int ii;
@@ -137,12 +137,12 @@ void MACvReadAllRegs (DWORD_PTR dwIoBase, PBYTE pbyMacRegs)
* Out:
* none
*
- * Return Value: TRUE if all test bits On; otherwise FALSE
+ * Return Value: true if all test bits On; otherwise false
*
*/
-BOOL MACbIsRegBitsOn (DWORD_PTR dwIoBase, BYTE byRegOfs, BYTE byTestBits)
+bool MACbIsRegBitsOn (unsigned long dwIoBase, unsigned char byRegOfs, unsigned char byTestBits)
{
- BYTE byData;
+ unsigned char byData;
VNSvInPortB(dwIoBase + byRegOfs, &byData);
return (byData & byTestBits) == byTestBits;
@@ -160,12 +160,12 @@ BOOL MACbIsRegBitsOn (DWORD_PTR dwIoBase, BYTE byRegOfs, BYTE byTestBits)
* Out:
* none
*
- * Return Value: TRUE if all test bits Off; otherwise FALSE
+ * Return Value: true if all test bits Off; otherwise false
*
*/
-BOOL MACbIsRegBitsOff (DWORD_PTR dwIoBase, BYTE byRegOfs, BYTE byTestBits)
+bool MACbIsRegBitsOff (unsigned long dwIoBase, unsigned char byRegOfs, unsigned char byTestBits)
{
- BYTE byData;
+ unsigned char byData;
VNSvInPortB(dwIoBase + byRegOfs, &byData);
return !(byData & byTestBits);
@@ -181,18 +181,18 @@ BOOL MACbIsRegBitsOff (DWORD_PTR dwIoBase, BYTE byRegOfs, BYTE byTestBits)
* Out:
* none
*
- * Return Value: TRUE if interrupt is disable; otherwise FALSE
+ * Return Value: true if interrupt is disable; otherwise false
*
*/
-BOOL MACbIsIntDisable (DWORD_PTR dwIoBase)
+bool MACbIsIntDisable (unsigned long dwIoBase)
{
- DWORD dwData;
+ unsigned long dwData;
VNSvInPortD(dwIoBase + MAC_REG_IMR, &dwData);
if (dwData != 0)
- return FALSE;
+ return false;
- return TRUE;
+ return true;
}
/*
@@ -209,9 +209,9 @@ BOOL MACbIsIntDisable (DWORD_PTR dwIoBase)
* Return Value: Mask Value read
*
*/
-BYTE MACbyReadMultiAddr (DWORD_PTR dwIoBase, UINT uByteIdx)
+unsigned char MACbyReadMultiAddr (unsigned long dwIoBase, unsigned int uByteIdx)
{
- BYTE byData;
+ unsigned char byData;
MACvSelectPage1(dwIoBase);
VNSvInPortB(dwIoBase + MAC_REG_MAR0 + uByteIdx, &byData);
@@ -234,7 +234,7 @@ BYTE MACbyReadMultiAddr (DWORD_PTR dwIoBase, UINT uByteIdx)
* Return Value: none
*
*/
-void MACvWriteMultiAddr (DWORD_PTR dwIoBase, UINT uByteIdx, BYTE byData)
+void MACvWriteMultiAddr (unsigned long dwIoBase, unsigned int uByteIdx, unsigned char byData)
{
MACvSelectPage1(dwIoBase);
VNSvOutPortB(dwIoBase + MAC_REG_MAR0 + uByteIdx, byData);
@@ -255,11 +255,11 @@ void MACvWriteMultiAddr (DWORD_PTR dwIoBase, UINT uByteIdx, BYTE byData)
* Return Value: none
*
*/
-void MACvSetMultiAddrByHash (DWORD_PTR dwIoBase, BYTE byHashIdx)
+void MACvSetMultiAddrByHash (unsigned long dwIoBase, unsigned char byHashIdx)
{
- UINT uByteIdx;
- BYTE byBitMask;
- BYTE byOrgValue;
+ unsigned int uByteIdx;
+ unsigned char byBitMask;
+ unsigned char byOrgValue;
// calculate byte position
uByteIdx = byHashIdx / 8;
@@ -269,7 +269,7 @@ void MACvSetMultiAddrByHash (DWORD_PTR dwIoBase, BYTE byHashIdx)
byBitMask <<= (byHashIdx % 8);
// turn on the bit
byOrgValue = MACbyReadMultiAddr(dwIoBase, uByteIdx);
- MACvWriteMultiAddr(dwIoBase, uByteIdx, (BYTE)(byOrgValue | byBitMask));
+ MACvWriteMultiAddr(dwIoBase, uByteIdx, (unsigned char)(byOrgValue | byBitMask));
}
/*
@@ -286,11 +286,11 @@ void MACvSetMultiAddrByHash (DWORD_PTR dwIoBase, BYTE byHashIdx)
* Return Value: none
*
*/
-void MACvResetMultiAddrByHash (DWORD_PTR dwIoBase, BYTE byHashIdx)
+void MACvResetMultiAddrByHash (unsigned long dwIoBase, unsigned char byHashIdx)
{
- UINT uByteIdx;
- BYTE byBitMask;
- BYTE byOrgValue;
+ unsigned int uByteIdx;
+ unsigned char byBitMask;
+ unsigned char byOrgValue;
// calculate byte position
uByteIdx = byHashIdx / 8;
@@ -300,7 +300,7 @@ void MACvResetMultiAddrByHash (DWORD_PTR dwIoBase, BYTE byHashIdx)
byBitMask <<= (byHashIdx % 8);
// turn off the bit
byOrgValue = MACbyReadMultiAddr(dwIoBase, uByteIdx);
- MACvWriteMultiAddr(dwIoBase, uByteIdx, (BYTE)(byOrgValue & (~byBitMask)));
+ MACvWriteMultiAddr(dwIoBase, uByteIdx, (unsigned char)(byOrgValue & (~byBitMask)));
}
/*
@@ -317,9 +317,9 @@ void MACvResetMultiAddrByHash (DWORD_PTR dwIoBase, BYTE byHashIdx)
* Return Value: none
*
*/
-void MACvSetRxThreshold (DWORD_PTR dwIoBase, BYTE byThreshold)
+void MACvSetRxThreshold (unsigned long dwIoBase, unsigned char byThreshold)
{
- BYTE byOrgValue;
+ unsigned char byOrgValue;
ASSERT(byThreshold < 4);
@@ -342,7 +342,7 @@ void MACvSetRxThreshold (DWORD_PTR dwIoBase, BYTE byThreshold)
* Return Value: none
*
*/
-void MACvGetRxThreshold (DWORD_PTR dwIoBase, PBYTE pbyThreshold)
+void MACvGetRxThreshold (unsigned long dwIoBase, unsigned char *pbyThreshold)
{
// get FCR0
VNSvInPortB(dwIoBase + MAC_REG_FCR0, pbyThreshold);
@@ -363,9 +363,9 @@ void MACvGetRxThreshold (DWORD_PTR dwIoBase, PBYTE pbyThreshold)
* Return Value: none
*
*/
-void MACvSetTxThreshold (DWORD_PTR dwIoBase, BYTE byThreshold)
+void MACvSetTxThreshold (unsigned long dwIoBase, unsigned char byThreshold)
{
- BYTE byOrgValue;
+ unsigned char byOrgValue;
ASSERT(byThreshold < 4);
@@ -388,7 +388,7 @@ void MACvSetTxThreshold (DWORD_PTR dwIoBase, BYTE byThreshold)
* Return Value: none
*
*/
-void MACvGetTxThreshold (DWORD_PTR dwIoBase, PBYTE pbyThreshold)
+void MACvGetTxThreshold (unsigned long dwIoBase, unsigned char *pbyThreshold)
{
// get FCR0
VNSvInPortB(dwIoBase + MAC_REG_FCR0, pbyThreshold);
@@ -409,9 +409,9 @@ void MACvGetTxThreshold (DWORD_PTR dwIoBase, PBYTE pbyThreshold)
* Return Value: none
*
*/
-void MACvSetDmaLength (DWORD_PTR dwIoBase, BYTE byDmaLength)
+void MACvSetDmaLength (unsigned long dwIoBase, unsigned char byDmaLength)
{
- BYTE byOrgValue;
+ unsigned char byOrgValue;
ASSERT(byDmaLength < 4);
@@ -434,7 +434,7 @@ void MACvSetDmaLength (DWORD_PTR dwIoBase, BYTE byDmaLength)
* Return Value: none
*
*/
-void MACvGetDmaLength (DWORD_PTR dwIoBase, PBYTE pbyDmaLength)
+void MACvGetDmaLength (unsigned long dwIoBase, unsigned char *pbyDmaLength)
{
// get FCR0
VNSvInPortB(dwIoBase + MAC_REG_FCR0, pbyDmaLength);
@@ -455,7 +455,7 @@ void MACvGetDmaLength (DWORD_PTR dwIoBase, PBYTE pbyDmaLength)
* Return Value: none
*
*/
-void MACvSetShortRetryLimit (DWORD_PTR dwIoBase, BYTE byRetryLimit)
+void MACvSetShortRetryLimit (unsigned long dwIoBase, unsigned char byRetryLimit)
{
// set SRT
VNSvOutPortB(dwIoBase + MAC_REG_SRT, byRetryLimit);
@@ -474,7 +474,7 @@ void MACvSetShortRetryLimit (DWORD_PTR dwIoBase, BYTE byRetryLimit)
* Return Value: none
*
*/
-void MACvGetShortRetryLimit (DWORD_PTR dwIoBase, PBYTE pbyRetryLimit)
+void MACvGetShortRetryLimit (unsigned long dwIoBase, unsigned char *pbyRetryLimit)
{
// get SRT
VNSvInPortB(dwIoBase + MAC_REG_SRT, pbyRetryLimit);
@@ -494,7 +494,7 @@ void MACvGetShortRetryLimit (DWORD_PTR dwIoBase, PBYTE pbyRetryLimit)
* Return Value: none
*
*/
-void MACvSetLongRetryLimit (DWORD_PTR dwIoBase, BYTE byRetryLimit)
+void MACvSetLongRetryLimit (unsigned long dwIoBase, unsigned char byRetryLimit)
{
// set LRT
VNSvOutPortB(dwIoBase + MAC_REG_LRT, byRetryLimit);
@@ -513,7 +513,7 @@ void MACvSetLongRetryLimit (DWORD_PTR dwIoBase, BYTE byRetryLimit)
* Return Value: none
*
*/
-void MACvGetLongRetryLimit (DWORD_PTR dwIoBase, PBYTE pbyRetryLimit)
+void MACvGetLongRetryLimit (unsigned long dwIoBase, unsigned char *pbyRetryLimit)
{
// get LRT
VNSvInPortB(dwIoBase + MAC_REG_LRT, pbyRetryLimit);
@@ -533,9 +533,9 @@ void MACvGetLongRetryLimit (DWORD_PTR dwIoBase, PBYTE pbyRetryLimit)
* Return Value: none
*
*/
-void MACvSetLoopbackMode (DWORD_PTR dwIoBase, BYTE byLoopbackMode)
+void MACvSetLoopbackMode (unsigned long dwIoBase, unsigned char byLoopbackMode)
{
- BYTE byOrgValue;
+ unsigned char byOrgValue;
ASSERT(byLoopbackMode < 3);
byLoopbackMode <<= 6;
@@ -556,17 +556,17 @@ void MACvSetLoopbackMode (DWORD_PTR dwIoBase, BYTE byLoopbackMode)
* Out:
* none
*
- * Return Value: TRUE if in Loopback mode; otherwise FALSE
+ * Return Value: true if in Loopback mode; otherwise false
*
*/
-BOOL MACbIsInLoopbackMode (DWORD_PTR dwIoBase)
+bool MACbIsInLoopbackMode (unsigned long dwIoBase)
{
- BYTE byOrgValue;
+ unsigned char byOrgValue;
VNSvInPortB(dwIoBase + MAC_REG_TEST, &byOrgValue);
if (byOrgValue & (TEST_LBINT | TEST_LBEXT))
- return TRUE;
- return FALSE;
+ return true;
+ return false;
}
/*
@@ -583,10 +583,10 @@ BOOL MACbIsInLoopbackMode (DWORD_PTR dwIoBase)
* Return Value: none
*
*/
-void MACvSetPacketFilter (DWORD_PTR dwIoBase, WORD wFilterType)
+void MACvSetPacketFilter (unsigned long dwIoBase, unsigned short wFilterType)
{
- BYTE byOldRCR;
- BYTE byNewRCR = 0;
+ unsigned char byOldRCR;
+ unsigned char byNewRCR = 0;
// if only in DIRECTED mode, multicast-address will set to zero,
// but if other mode exist (e.g. PROMISCUOUS), multicast-address
@@ -595,7 +595,7 @@ void MACvSetPacketFilter (DWORD_PTR dwIoBase, WORD wFilterType)
// set multicast address to accept none
MACvSelectPage1(dwIoBase);
VNSvOutPortD(dwIoBase + MAC_REG_MAR0, 0L);
- VNSvOutPortD(dwIoBase + MAC_REG_MAR0 + sizeof(DWORD), 0L);
+ VNSvOutPortD(dwIoBase + MAC_REG_MAR0 + sizeof(unsigned long), 0L);
MACvSelectPage0(dwIoBase);
}
@@ -603,7 +603,7 @@ void MACvSetPacketFilter (DWORD_PTR dwIoBase, WORD wFilterType)
// set multicast address to accept all
MACvSelectPage1(dwIoBase);
VNSvOutPortD(dwIoBase + MAC_REG_MAR0, 0xFFFFFFFFL);
- VNSvOutPortD(dwIoBase + MAC_REG_MAR0 + sizeof(DWORD), 0xFFFFFFFFL);
+ VNSvOutPortD(dwIoBase + MAC_REG_MAR0 + sizeof(unsigned long), 0xFFFFFFFFL);
MACvSelectPage0(dwIoBase);
}
@@ -643,7 +643,7 @@ void MACvSetPacketFilter (DWORD_PTR dwIoBase, WORD wFilterType)
* Return Value: none
*
*/
-void MACvSaveContext (DWORD_PTR dwIoBase, PBYTE pbyCxtBuf)
+void MACvSaveContext (unsigned long dwIoBase, unsigned char *pbyCxtBuf)
{
int ii;
@@ -676,7 +676,7 @@ void MACvSaveContext (DWORD_PTR dwIoBase, PBYTE pbyCxtBuf)
* Return Value: none
*
*/
-void MACvRestoreContext (DWORD_PTR dwIoBase, PBYTE pbyCxtBuf)
+void MACvRestoreContext (unsigned long dwIoBase, unsigned char *pbyCxtBuf)
{
int ii;
@@ -703,14 +703,14 @@ void MACvRestoreContext (DWORD_PTR dwIoBase, PBYTE pbyCxtBuf)
}
// restore CURR_RX_DESC_ADDR, CURR_TX_DESC_ADDR
- VNSvOutPortD(dwIoBase + MAC_REG_TXDMAPTR0, *(PDWORD)(pbyCxtBuf + MAC_REG_TXDMAPTR0));
- VNSvOutPortD(dwIoBase + MAC_REG_AC0DMAPTR, *(PDWORD)(pbyCxtBuf + MAC_REG_AC0DMAPTR));
- VNSvOutPortD(dwIoBase + MAC_REG_BCNDMAPTR, *(PDWORD)(pbyCxtBuf + MAC_REG_BCNDMAPTR));
+ VNSvOutPortD(dwIoBase + MAC_REG_TXDMAPTR0, *(unsigned long *)(pbyCxtBuf + MAC_REG_TXDMAPTR0));
+ VNSvOutPortD(dwIoBase + MAC_REG_AC0DMAPTR, *(unsigned long *)(pbyCxtBuf + MAC_REG_AC0DMAPTR));
+ VNSvOutPortD(dwIoBase + MAC_REG_BCNDMAPTR, *(unsigned long *)(pbyCxtBuf + MAC_REG_BCNDMAPTR));
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR0, *(PDWORD)(pbyCxtBuf + MAC_REG_RXDMAPTR0));
+ VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR0, *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR0));
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR1, *(PDWORD)(pbyCxtBuf + MAC_REG_RXDMAPTR1));
+ VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR1, *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR1));
}
@@ -725,39 +725,39 @@ void MACvRestoreContext (DWORD_PTR dwIoBase, PBYTE pbyCxtBuf)
* Out:
* none
*
- * Return Value: TRUE if all values are the same; otherwise FALSE
+ * Return Value: true if all values are the same; otherwise false
*
*/
-BOOL MACbCompareContext (DWORD_PTR dwIoBase, PBYTE pbyCxtBuf)
+bool MACbCompareContext (unsigned long dwIoBase, unsigned char *pbyCxtBuf)
{
- DWORD dwData;
+ unsigned long dwData;
// compare MAC context to determine if this is a power lost init,
- // return TRUE for power remaining init, return FALSE for power lost init
+ // return true for power remaining init, return false for power lost init
// compare CURR_RX_DESC_ADDR, CURR_TX_DESC_ADDR
VNSvInPortD(dwIoBase + MAC_REG_TXDMAPTR0, &dwData);
- if (dwData != *(PDWORD)(pbyCxtBuf + MAC_REG_TXDMAPTR0)) {
- return FALSE;
+ if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_TXDMAPTR0)) {
+ return false;
}
VNSvInPortD(dwIoBase + MAC_REG_AC0DMAPTR, &dwData);
- if (dwData != *(PDWORD)(pbyCxtBuf + MAC_REG_AC0DMAPTR)) {
- return FALSE;
+ if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_AC0DMAPTR)) {
+ return false;
}
VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR0, &dwData);
- if (dwData != *(PDWORD)(pbyCxtBuf + MAC_REG_RXDMAPTR0)) {
- return FALSE;
+ if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR0)) {
+ return false;
}
VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR1, &dwData);
- if (dwData != *(PDWORD)(pbyCxtBuf + MAC_REG_RXDMAPTR1)) {
- return FALSE;
+ if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR1)) {
+ return false;
}
- return TRUE;
+ return true;
}
/*
@@ -770,13 +770,13 @@ BOOL MACbCompareContext (DWORD_PTR dwIoBase, PBYTE pbyCxtBuf)
* Out:
* none
*
- * Return Value: TRUE if Reset Success; otherwise FALSE
+ * Return Value: true if Reset Success; otherwise false
*
*/
-BOOL MACbSoftwareReset (DWORD_PTR dwIoBase)
+bool MACbSoftwareReset (unsigned long dwIoBase)
{
- BYTE byData;
- WORD ww;
+ unsigned char byData;
+ unsigned short ww;
// turn on HOSTCR_SOFTRST, just write 0x01 to reset
//MACvRegBitsOn(dwIoBase, MAC_REG_HOSTCR, HOSTCR_SOFTRST);
@@ -788,8 +788,8 @@ BOOL MACbSoftwareReset (DWORD_PTR dwIoBase)
break;
}
if (ww == W_MAX_TIMEOUT)
- return FALSE;
- return TRUE;
+ return false;
+ return true;
}
@@ -803,13 +803,13 @@ BOOL MACbSoftwareReset (DWORD_PTR dwIoBase)
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL MACbSafeSoftwareReset (DWORD_PTR dwIoBase)
+bool MACbSafeSoftwareReset (unsigned long dwIoBase)
{
- BYTE abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0+MAC_MAX_CONTEXT_SIZE_PAGE1];
- BOOL bRetVal;
+ unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0+MAC_MAX_CONTEXT_SIZE_PAGE1];
+ bool bRetVal;
// PATCH....
// save some important register's value, then do
@@ -836,14 +836,14 @@ BOOL MACbSafeSoftwareReset (DWORD_PTR dwIoBase)
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL MACbSafeRxOff (DWORD_PTR dwIoBase)
+bool MACbSafeRxOff (unsigned long dwIoBase)
{
- WORD ww;
- DWORD dwData;
- BYTE byData;
+ unsigned short ww;
+ unsigned long dwData;
+ unsigned char byData;
// turn off wow temp for turn off Rx safely
@@ -858,7 +858,7 @@ BOOL MACbSafeRxOff (DWORD_PTR dwIoBase)
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x10);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x10)\n");
- return(FALSE);
+ return(false);
}
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL1, &dwData);
@@ -868,7 +868,7 @@ BOOL MACbSafeRxOff (DWORD_PTR dwIoBase)
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x11);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x11)\n");
- return(FALSE);
+ return(false);
}
// try to safe shutdown RX
@@ -882,9 +882,9 @@ BOOL MACbSafeRxOff (DWORD_PTR dwIoBase)
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x12);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x12)\n");
- return(FALSE);
+ return(false);
}
- return TRUE;
+ return true;
}
/*
@@ -897,14 +897,14 @@ BOOL MACbSafeRxOff (DWORD_PTR dwIoBase)
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL MACbSafeTxOff (DWORD_PTR dwIoBase)
+bool MACbSafeTxOff (unsigned long dwIoBase)
{
- WORD ww;
- DWORD dwData;
- BYTE byData;
+ unsigned short ww;
+ unsigned long dwData;
+ unsigned char byData;
// Clear TX DMA
//Tx0
@@ -921,7 +921,7 @@ BOOL MACbSafeTxOff (DWORD_PTR dwIoBase)
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x20);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x20)\n");
- return(FALSE);
+ return(false);
}
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortD(dwIoBase + MAC_REG_AC0DMACTL, &dwData);
@@ -931,7 +931,7 @@ BOOL MACbSafeTxOff (DWORD_PTR dwIoBase)
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x21);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x21)\n");
- return(FALSE);
+ return(false);
}
// try to safe shutdown TX
@@ -946,9 +946,9 @@ BOOL MACbSafeTxOff (DWORD_PTR dwIoBase)
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x24);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x24)\n");
- return(FALSE);
+ return(false);
}
- return TRUE;
+ return true;
}
/*
@@ -961,29 +961,29 @@ BOOL MACbSafeTxOff (DWORD_PTR dwIoBase)
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL MACbSafeStop (DWORD_PTR dwIoBase)
+bool MACbSafeStop (unsigned long dwIoBase)
{
MACvRegBitsOff(dwIoBase, MAC_REG_TCR, TCR_AUTOBCNTX);
- if (MACbSafeRxOff(dwIoBase) == FALSE) {
+ if (MACbSafeRxOff(dwIoBase) == false) {
DBG_PORT80(0xA1);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" MACbSafeRxOff == FALSE)\n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" MACbSafeRxOff == false)\n");
MACbSafeSoftwareReset(dwIoBase);
- return FALSE;
+ return false;
}
- if (MACbSafeTxOff(dwIoBase) == FALSE) {
+ if (MACbSafeTxOff(dwIoBase) == false) {
DBG_PORT80(0xA2);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" MACbSafeTxOff == FALSE)\n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" MACbSafeTxOff == false)\n");
MACbSafeSoftwareReset(dwIoBase);
- return FALSE;
+ return false;
}
MACvRegBitsOff(dwIoBase, MAC_REG_HOSTCR, HOSTCR_MACEN);
- return TRUE;
+ return true;
}
/*
@@ -996,10 +996,10 @@ BOOL MACbSafeStop (DWORD_PTR dwIoBase)
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL MACbShutdown (DWORD_PTR dwIoBase)
+bool MACbShutdown (unsigned long dwIoBase)
{
// disable MAC IMR
MACvIntDisable(dwIoBase);
@@ -1007,10 +1007,10 @@ BOOL MACbShutdown (DWORD_PTR dwIoBase)
// stop the adapter
if (!MACbSafeStop(dwIoBase)) {
MACvSetLoopbackMode(dwIoBase, MAC_LB_NONE);
- return FALSE;
+ return false;
}
MACvSetLoopbackMode(dwIoBase, MAC_LB_NONE);
- return TRUE;
+ return true;
}
/*
@@ -1026,7 +1026,7 @@ BOOL MACbShutdown (DWORD_PTR dwIoBase)
* Return Value: none
*
*/
-void MACvInitialize (DWORD_PTR dwIoBase)
+void MACvInitialize (unsigned long dwIoBase)
{
// clear sticky bits
MACvClearStckDS(dwIoBase);
@@ -1045,8 +1045,8 @@ void MACvInitialize (DWORD_PTR dwIoBase)
// issue AUTOLD in EECSR to reload eeprom
//MACvRegBitsOn(dwIoBase, MAC_REG_I2MCSR, I2MCSR_AUTOLD);
// wait until EEPROM loading complete
- //while (TRUE) {
- // U8 u8Data;
+ //while (true) {
+ // u8 u8Data;
// VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &u8Data);
// if ( !(u8Data & I2MCSR_AUTOLD))
// break;
@@ -1079,11 +1079,11 @@ void MACvInitialize (DWORD_PTR dwIoBase)
* Return Value: none
*
*/
-void MACvSetCurrRx0DescAddr (DWORD_PTR dwIoBase, DWORD dwCurrDescAddr)
+void MACvSetCurrRx0DescAddr (unsigned long dwIoBase, unsigned long dwCurrDescAddr)
{
-WORD ww;
-BYTE byData;
-BYTE byOrgDMACtl;
+unsigned short ww;
+unsigned char byData;
+unsigned char byOrgDMACtl;
VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL0, &byOrgDMACtl);
if (byOrgDMACtl & DMACTL_RUN) {
@@ -1117,11 +1117,11 @@ BYTE byOrgDMACtl;
* Return Value: none
*
*/
-void MACvSetCurrRx1DescAddr (DWORD_PTR dwIoBase, DWORD dwCurrDescAddr)
+void MACvSetCurrRx1DescAddr (unsigned long dwIoBase, unsigned long dwCurrDescAddr)
{
-WORD ww;
-BYTE byData;
-BYTE byOrgDMACtl;
+unsigned short ww;
+unsigned char byData;
+unsigned char byOrgDMACtl;
VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL1, &byOrgDMACtl);
if (byOrgDMACtl & DMACTL_RUN) {
@@ -1155,11 +1155,11 @@ BYTE byOrgDMACtl;
* Return Value: none
*
*/
-void MACvSetCurrTx0DescAddrEx (DWORD_PTR dwIoBase, DWORD dwCurrDescAddr)
+void MACvSetCurrTx0DescAddrEx (unsigned long dwIoBase, unsigned long dwCurrDescAddr)
{
-WORD ww;
-BYTE byData;
-BYTE byOrgDMACtl;
+unsigned short ww;
+unsigned char byData;
+unsigned char byOrgDMACtl;
VNSvInPortB(dwIoBase + MAC_REG_TXDMACTL0, &byOrgDMACtl);
if (byOrgDMACtl & DMACTL_RUN) {
@@ -1194,11 +1194,11 @@ BYTE byOrgDMACtl;
*
*/
//TxDMA1 = AC0DMA
-void MACvSetCurrAC0DescAddrEx (DWORD_PTR dwIoBase, DWORD dwCurrDescAddr)
+void MACvSetCurrAC0DescAddrEx (unsigned long dwIoBase, unsigned long dwCurrDescAddr)
{
-WORD ww;
-BYTE byData;
-BYTE byOrgDMACtl;
+unsigned short ww;
+unsigned char byData;
+unsigned char byOrgDMACtl;
VNSvInPortB(dwIoBase + MAC_REG_AC0DMACTL, &byOrgDMACtl);
if (byOrgDMACtl & DMACTL_RUN) {
@@ -1221,7 +1221,7 @@ BYTE byOrgDMACtl;
-void MACvSetCurrTXDescAddr (int iTxType, DWORD_PTR dwIoBase, DWORD dwCurrDescAddr)
+void MACvSetCurrTXDescAddr (int iTxType, unsigned long dwIoBase, unsigned long dwCurrDescAddr)
{
if(iTxType == TYPE_AC0DMA){
MACvSetCurrAC0DescAddrEx(dwIoBase, dwCurrDescAddr);
@@ -1244,10 +1244,10 @@ void MACvSetCurrTXDescAddr (int iTxType, DWORD_PTR dwIoBase, DWORD dwCurrDescAdd
* Return Value: none
*
*/
-void MACvTimer0MicroSDelay (DWORD_PTR dwIoBase, UINT uDelay)
+void MACvTimer0MicroSDelay (unsigned long dwIoBase, unsigned int uDelay)
{
-BYTE byValue;
-UINT uu,ii;
+unsigned char byValue;
+unsigned int uu,ii;
VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0);
VNSvOutPortD(dwIoBase + MAC_REG_TMDATA0, uDelay);
@@ -1280,7 +1280,7 @@ UINT uu,ii;
* Return Value: none
*
*/
-void MACvOneShotTimer0MicroSec (DWORD_PTR dwIoBase, UINT uDelayTime)
+void MACvOneShotTimer0MicroSec (unsigned long dwIoBase, unsigned int uDelayTime)
{
VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0);
VNSvOutPortD(dwIoBase + MAC_REG_TMDATA0, uDelayTime);
@@ -1301,7 +1301,7 @@ void MACvOneShotTimer0MicroSec (DWORD_PTR dwIoBase, UINT uDelayTime)
* Return Value: none
*
*/
-void MACvOneShotTimer1MicroSec (DWORD_PTR dwIoBase, UINT uDelayTime)
+void MACvOneShotTimer1MicroSec (unsigned long dwIoBase, unsigned int uDelayTime)
{
VNSvOutPortB(dwIoBase + MAC_REG_TMCTL1, 0);
VNSvOutPortD(dwIoBase + MAC_REG_TMDATA1, uDelayTime);
@@ -1309,7 +1309,7 @@ void MACvOneShotTimer1MicroSec (DWORD_PTR dwIoBase, UINT uDelayTime)
}
-void MACvSetMISCFifo (DWORD_PTR dwIoBase, WORD wOffset, DWORD dwData)
+void MACvSetMISCFifo (unsigned long dwIoBase, unsigned short wOffset, unsigned long dwData)
{
if (wOffset > 273)
return;
@@ -1319,10 +1319,10 @@ void MACvSetMISCFifo (DWORD_PTR dwIoBase, WORD wOffset, DWORD dwData)
}
-BOOL MACbTxDMAOff (DWORD_PTR dwIoBase, UINT idx)
+bool MACbTxDMAOff (unsigned long dwIoBase, unsigned int idx)
{
-BYTE byData;
-UINT ww = 0;
+unsigned char byData;
+unsigned int ww = 0;
if (idx == TYPE_TXDMA0) {
VNSvOutPortB(dwIoBase + MAC_REG_TXDMACTL0+2, DMACTL_RUN);
@@ -1342,15 +1342,15 @@ UINT ww = 0;
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x29);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x29)\n");
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
-void MACvClearBusSusInd (DWORD_PTR dwIoBase)
+void MACvClearBusSusInd (unsigned long dwIoBase)
{
- DWORD dwOrgValue;
- UINT ww;
+ unsigned long dwOrgValue;
+ unsigned int ww;
// check if BcnSusInd enabled
VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue);
if( !(dwOrgValue & EnCFG_BcnSusInd))
@@ -1369,11 +1369,11 @@ void MACvClearBusSusInd (DWORD_PTR dwIoBase)
}
}
-void MACvEnableBusSusEn (DWORD_PTR dwIoBase)
+void MACvEnableBusSusEn (unsigned long dwIoBase)
{
- BYTE byOrgValue;
- DWORD dwOrgValue;
- UINT ww;
+ unsigned char byOrgValue;
+ unsigned long dwOrgValue;
+ unsigned int ww;
// check if BcnSusInd enabled
VNSvInPortB(dwIoBase + MAC_REG_CFG , &byOrgValue);
@@ -1391,10 +1391,10 @@ void MACvEnableBusSusEn (DWORD_PTR dwIoBase)
}
}
-BOOL MACbFlushSYNCFifo (DWORD_PTR dwIoBase)
+bool MACbFlushSYNCFifo (unsigned long dwIoBase)
{
- BYTE byOrgValue;
- UINT ww;
+ unsigned char byOrgValue;
+ unsigned int ww;
// Read MACCR
VNSvInPortB(dwIoBase + MAC_REG_MACCR , &byOrgValue);
@@ -1412,16 +1412,16 @@ BOOL MACbFlushSYNCFifo (DWORD_PTR dwIoBase)
DBG_PORT80(0x35);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x33)\n");
}
- return TRUE;
+ return true;
}
-BOOL MACbPSWakeup (DWORD_PTR dwIoBase)
+bool MACbPSWakeup (unsigned long dwIoBase)
{
- BYTE byOrgValue;
- UINT ww;
+ unsigned char byOrgValue;
+ unsigned int ww;
// Read PSCTL
if (MACbIsRegBitsOff(dwIoBase, MAC_REG_PSCTL, PSCTL_PS)) {
- return TRUE;
+ return true;
}
// Disable PS
MACvRegBitsOff(dwIoBase, MAC_REG_PSCTL, PSCTL_PSEN);
@@ -1435,9 +1435,9 @@ BOOL MACbPSWakeup (DWORD_PTR dwIoBase)
if (ww == W_MAX_TIMEOUT) {
DBG_PORT80(0x36);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" DBG_PORT80(0x33)\n");
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
/*
@@ -1455,10 +1455,11 @@ BOOL MACbPSWakeup (DWORD_PTR dwIoBase)
*
*/
-void MACvSetKeyEntry (DWORD_PTR dwIoBase, WORD wKeyCtl, UINT uEntryIdx, UINT uKeyIdx, PBYTE pbyAddr, PDWORD pdwKey, BYTE byLocalID)
+void MACvSetKeyEntry (unsigned long dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx,
+ unsigned int uKeyIdx, unsigned char *pbyAddr, unsigned long *pdwKey, unsigned char byLocalID)
{
-WORD wOffset;
-DWORD dwData;
+unsigned short wOffset;
+unsigned long dwData;
int ii;
if (byLocalID <= 1)
@@ -1521,9 +1522,9 @@ int ii;
* Return Value: none
*
*/
-void MACvDisableKeyEntry (DWORD_PTR dwIoBase, UINT uEntryIdx)
+void MACvDisableKeyEntry (unsigned long dwIoBase, unsigned int uEntryIdx)
{
-WORD wOffset;
+unsigned short wOffset;
wOffset = MISCFIFO_KEYETRY0;
wOffset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
@@ -1549,10 +1550,11 @@ WORD wOffset;
*
*/
-void MACvSetDefaultKeyEntry (DWORD_PTR dwIoBase, UINT uKeyLen, UINT uKeyIdx, PDWORD pdwKey, BYTE byLocalID)
+void MACvSetDefaultKeyEntry (unsigned long dwIoBase, unsigned int uKeyLen,
+ unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID)
{
-WORD wOffset;
-DWORD dwData;
+unsigned short wOffset;
+unsigned long dwData;
int ii;
if (byLocalID <= 1)
@@ -1599,10 +1601,10 @@ int ii;
*
*/
/*
-void MACvEnableDefaultKey (DWORD_PTR dwIoBase, BYTE byLocalID)
+void MACvEnableDefaultKey (unsigned long dwIoBase, unsigned char byLocalID)
{
-WORD wOffset;
-DWORD dwData;
+unsigned short wOffset;
+unsigned long dwData;
if (byLocalID <= 1)
@@ -1634,10 +1636,10 @@ DWORD dwData;
* Return Value: none
*
*/
-void MACvDisableDefaultKey (DWORD_PTR dwIoBase)
+void MACvDisableDefaultKey (unsigned long dwIoBase)
{
-WORD wOffset;
-DWORD dwData;
+unsigned short wOffset;
+unsigned long dwData;
wOffset = MISCFIFO_KEYETRY0;
@@ -1664,10 +1666,11 @@ DWORD dwData;
* Return Value: none
*
*/
-void MACvSetDefaultTKIPKeyEntry (DWORD_PTR dwIoBase, UINT uKeyLen, UINT uKeyIdx, PDWORD pdwKey, BYTE byLocalID)
+void MACvSetDefaultTKIPKeyEntry (unsigned long dwIoBase, unsigned int uKeyLen,
+ unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID)
{
-WORD wOffset;
-DWORD dwData;
+unsigned short wOffset;
+unsigned long dwData;
int ii;
if (byLocalID <= 1)
@@ -1720,10 +1723,10 @@ int ii;
*
*/
-void MACvSetDefaultKeyCtl (DWORD_PTR dwIoBase, WORD wKeyCtl, UINT uEntryIdx, BYTE byLocalID)
+void MACvSetDefaultKeyCtl (unsigned long dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx, unsigned char byLocalID)
{
-WORD wOffset;
-DWORD dwData;
+unsigned short wOffset;
+unsigned long dwData;
if (byLocalID <= 1)
return;
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 5eb7f57f718..b96d27ee254 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -663,28 +663,28 @@
#define MACvRegBitsOn(dwIoBase, byRegOfs, byBits) \
{ \
- BYTE byData; \
+ unsigned char byData; \
VNSvInPortB(dwIoBase + byRegOfs, &byData); \
VNSvOutPortB(dwIoBase + byRegOfs, byData | (byBits)); \
}
#define MACvWordRegBitsOn(dwIoBase, byRegOfs, wBits) \
{ \
- WORD wData; \
+ unsigned short wData; \
VNSvInPortW(dwIoBase + byRegOfs, &wData); \
VNSvOutPortW(dwIoBase + byRegOfs, wData | (wBits)); \
}
#define MACvDWordRegBitsOn(dwIoBase, byRegOfs, dwBits) \
{ \
- DWORD dwData; \
+ unsigned long dwData; \
VNSvInPortD(dwIoBase + byRegOfs, &dwData); \
VNSvOutPortD(dwIoBase + byRegOfs, dwData | (dwBits)); \
}
#define MACvRegBitsOnEx(dwIoBase, byRegOfs, byMask, byBits) \
{ \
- BYTE byData; \
+ unsigned char byData; \
VNSvInPortB(dwIoBase + byRegOfs, &byData); \
byData &= byMask; \
VNSvOutPortB(dwIoBase + byRegOfs, byData | (byBits)); \
@@ -692,21 +692,21 @@
#define MACvRegBitsOff(dwIoBase, byRegOfs, byBits) \
{ \
- BYTE byData; \
+ unsigned char byData; \
VNSvInPortB(dwIoBase + byRegOfs, &byData); \
VNSvOutPortB(dwIoBase + byRegOfs, byData & ~(byBits)); \
}
#define MACvWordRegBitsOff(dwIoBase, byRegOfs, wBits) \
{ \
- WORD wData; \
+ unsigned short wData; \
VNSvInPortW(dwIoBase + byRegOfs, &wData); \
VNSvOutPortW(dwIoBase + byRegOfs, wData & ~(wBits)); \
}
#define MACvDWordRegBitsOff(dwIoBase, byRegOfs, dwBits) \
{ \
- DWORD dwData; \
+ unsigned long dwData; \
VNSvInPortD(dwIoBase + byRegOfs, &dwData); \
VNSvOutPortD(dwIoBase + byRegOfs, dwData & ~(dwBits)); \
}
@@ -714,37 +714,37 @@
#define MACvGetCurrRx0DescAddr(dwIoBase, pdwCurrDescAddr) \
{ \
VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR0, \
- (PDWORD)pdwCurrDescAddr); \
+ (unsigned long *)pdwCurrDescAddr); \
}
#define MACvGetCurrRx1DescAddr(dwIoBase, pdwCurrDescAddr) \
{ \
VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR1, \
- (PDWORD)pdwCurrDescAddr); \
+ (unsigned long *)pdwCurrDescAddr); \
}
#define MACvGetCurrTx0DescAddr(dwIoBase, pdwCurrDescAddr) \
{ \
VNSvInPortD(dwIoBase + MAC_REG_TXDMAPTR0, \
- (PDWORD)pdwCurrDescAddr); \
+ (unsigned long *)pdwCurrDescAddr); \
}
#define MACvGetCurrAC0DescAddr(dwIoBase, pdwCurrDescAddr) \
{ \
VNSvInPortD(dwIoBase + MAC_REG_AC0DMAPTR, \
- (PDWORD)pdwCurrDescAddr); \
+ (unsigned long *)pdwCurrDescAddr); \
}
#define MACvGetCurrSyncDescAddr(dwIoBase, pdwCurrDescAddr) \
{ \
VNSvInPortD(dwIoBase + MAC_REG_SYNCDMAPTR, \
- (PDWORD)pdwCurrDescAddr); \
+ (unsigned long *)pdwCurrDescAddr); \
}
#define MACvGetCurrATIMDescAddr(dwIoBase, pdwCurrDescAddr) \
{ \
VNSvInPortD(dwIoBase + MAC_REG_ATIMDMAPTR, \
- (PDWORD)pdwCurrDescAddr); \
+ (unsigned long *)pdwCurrDescAddr); \
} \
// set the chip with current BCN tx descriptor address
@@ -765,7 +765,7 @@
{ \
VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \
VNSvInPortB(dwIoBase + MAC_REG_BSSID0, \
- (PBYTE)pbyEtherAddr); \
+ (unsigned char *)pbyEtherAddr); \
VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 1, \
pbyEtherAddr + 1); \
VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 2, \
@@ -801,7 +801,7 @@
{ \
VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \
VNSvInPortB(dwIoBase + MAC_REG_PAR0, \
- (PBYTE)pbyEtherAddr); \
+ (unsigned char *)pbyEtherAddr); \
VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 1, \
pbyEtherAddr + 1); \
VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 2, \
@@ -873,7 +873,7 @@
#define MACvReceive0(dwIoBase) \
{ \
- DWORD dwData; \
+ unsigned long dwData; \
VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL0, &dwData); \
if (dwData & DMACTL_RUN) { \
VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_WAKE);\
@@ -885,7 +885,7 @@
#define MACvReceive1(dwIoBase) \
{ \
- DWORD dwData; \
+ unsigned long dwData; \
VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL1, &dwData); \
if (dwData & DMACTL_RUN) { \
VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_WAKE);\
@@ -902,7 +902,7 @@
#define MACvTransmit0(dwIoBase) \
{ \
- DWORD dwData; \
+ unsigned long dwData; \
VNSvInPortD(dwIoBase + MAC_REG_TXDMACTL0, &dwData); \
if (dwData & DMACTL_RUN) { \
VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_WAKE);\
@@ -914,7 +914,7 @@
#define MACvTransmitAC0(dwIoBase) \
{ \
- DWORD dwData; \
+ unsigned long dwData; \
VNSvInPortD(dwIoBase + MAC_REG_AC0DMACTL, &dwData); \
if (dwData & DMACTL_RUN) { \
VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_WAKE);\
@@ -926,7 +926,7 @@
#define MACvTransmitSYNC(dwIoBase) \
{ \
- DWORD dwData; \
+ unsigned long dwData; \
VNSvInPortD(dwIoBase + MAC_REG_SYNCDMACTL, &dwData); \
if (dwData & DMACTL_RUN) { \
VNSvOutPortD(dwIoBase + MAC_REG_SYNCDMACTL, DMACTL_WAKE);\
@@ -938,7 +938,7 @@
#define MACvTransmitATIM(dwIoBase) \
{ \
- DWORD dwData; \
+ unsigned long dwData; \
VNSvInPortD(dwIoBase + MAC_REG_ATIMDMACTL, &dwData); \
if (dwData & DMACTL_RUN) { \
VNSvOutPortD(dwIoBase + MAC_REG_ATIMDMACTL, DMACTL_WAKE);\
@@ -955,7 +955,7 @@
#define MACvClearStckDS(dwIoBase) \
{ \
- BYTE byOrgValue; \
+ unsigned char byOrgValue; \
VNSvInPortB(dwIoBase + MAC_REG_STICKHW, &byOrgValue); \
byOrgValue = byOrgValue & 0xFC; \
VNSvOutPortB(dwIoBase + MAC_REG_STICKHW, byOrgValue); \
@@ -1002,7 +1002,7 @@
#define MACvEnableProtectMD(dwIoBase) \
{ \
- DWORD dwOrgValue; \
+ unsigned long dwOrgValue; \
VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue); \
dwOrgValue = dwOrgValue | EnCFG_ProtectMd; \
VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
@@ -1010,7 +1010,7 @@
#define MACvDisableProtectMD(dwIoBase) \
{ \
- DWORD dwOrgValue; \
+ unsigned long dwOrgValue; \
VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue); \
dwOrgValue = dwOrgValue & ~EnCFG_ProtectMd; \
VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
@@ -1018,7 +1018,7 @@
#define MACvEnableBarkerPreambleMd(dwIoBase) \
{ \
- DWORD dwOrgValue; \
+ unsigned long dwOrgValue; \
VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue); \
dwOrgValue = dwOrgValue | EnCFG_BarkerPream; \
VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
@@ -1026,7 +1026,7 @@
#define MACvDisableBarkerPreambleMd(dwIoBase) \
{ \
- DWORD dwOrgValue; \
+ unsigned long dwOrgValue; \
VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue); \
dwOrgValue = dwOrgValue & ~EnCFG_BarkerPream; \
VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
@@ -1034,10 +1034,10 @@
#define MACvSetBBType(dwIoBase, byTyp) \
{ \
- DWORD dwOrgValue; \
+ unsigned long dwOrgValue; \
VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue); \
dwOrgValue = dwOrgValue & ~EnCFG_BBType_MASK; \
- dwOrgValue = dwOrgValue | (DWORD) byTyp; \
+ dwOrgValue = dwOrgValue | (unsigned long) byTyp; \
VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
}
@@ -1074,78 +1074,81 @@
/*--------------------- Export Functions --------------------------*/
-extern WORD TxRate_iwconfig;//2008-5-8 <add> by chester
-void MACvReadAllRegs(DWORD_PTR dwIoBase, PBYTE pbyMacRegs);
-
-BOOL MACbIsRegBitsOn(DWORD_PTR dwIoBase, BYTE byRegOfs, BYTE byTestBits);
-BOOL MACbIsRegBitsOff(DWORD_PTR dwIoBase, BYTE byRegOfs, BYTE byTestBits);
-
-BOOL MACbIsIntDisable(DWORD_PTR dwIoBase);
-
-BYTE MACbyReadMultiAddr(DWORD_PTR dwIoBase, UINT uByteIdx);
-void MACvWriteMultiAddr(DWORD_PTR dwIoBase, UINT uByteIdx, BYTE byData);
-void MACvSetMultiAddrByHash(DWORD_PTR dwIoBase, BYTE byHashIdx);
-void MACvResetMultiAddrByHash(DWORD_PTR dwIoBase, BYTE byHashIdx);
-
-void MACvSetRxThreshold(DWORD_PTR dwIoBase, BYTE byThreshold);
-void MACvGetRxThreshold(DWORD_PTR dwIoBase, PBYTE pbyThreshold);
-
-void MACvSetTxThreshold(DWORD_PTR dwIoBase, BYTE byThreshold);
-void MACvGetTxThreshold(DWORD_PTR dwIoBase, PBYTE pbyThreshold);
-
-void MACvSetDmaLength(DWORD_PTR dwIoBase, BYTE byDmaLength);
-void MACvGetDmaLength(DWORD_PTR dwIoBase, PBYTE pbyDmaLength);
-
-void MACvSetShortRetryLimit(DWORD_PTR dwIoBase, BYTE byRetryLimit);
-void MACvGetShortRetryLimit(DWORD_PTR dwIoBase, PBYTE pbyRetryLimit);
-
-void MACvSetLongRetryLimit(DWORD_PTR dwIoBase, BYTE byRetryLimit);
-void MACvGetLongRetryLimit(DWORD_PTR dwIoBase, PBYTE pbyRetryLimit);
-
-void MACvSetLoopbackMode(DWORD_PTR dwIoBase, BYTE byLoopbackMode);
-BOOL MACbIsInLoopbackMode(DWORD_PTR dwIoBase);
-
-void MACvSetPacketFilter(DWORD_PTR dwIoBase, WORD wFilterType);
-
-void MACvSaveContext(DWORD_PTR dwIoBase, PBYTE pbyCxtBuf);
-void MACvRestoreContext(DWORD_PTR dwIoBase, PBYTE pbyCxtBuf);
-BOOL MACbCompareContext(DWORD_PTR dwIoBase, PBYTE pbyCxtBuf);
-
-BOOL MACbSoftwareReset(DWORD_PTR dwIoBase);
-BOOL MACbSafeSoftwareReset(DWORD_PTR dwIoBase);
-BOOL MACbSafeRxOff(DWORD_PTR dwIoBase);
-BOOL MACbSafeTxOff(DWORD_PTR dwIoBase);
-BOOL MACbSafeStop(DWORD_PTR dwIoBase);
-BOOL MACbShutdown(DWORD_PTR dwIoBase);
-void MACvInitialize(DWORD_PTR dwIoBase);
-void MACvSetCurrRx0DescAddr(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
-void MACvSetCurrRx1DescAddr(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
-void MACvSetCurrTXDescAddr(int iTxType, DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
-void MACvSetCurrTx0DescAddrEx(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
-void MACvSetCurrAC0DescAddrEx(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
-void MACvSetCurrSyncDescAddrEx(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
-void MACvSetCurrATIMDescAddrEx(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
-void MACvTimer0MicroSDelay(DWORD_PTR dwIoBase, UINT uDelay);
-void MACvOneShotTimer0MicroSec(DWORD_PTR dwIoBase, UINT uDelayTime);
-void MACvOneShotTimer1MicroSec(DWORD_PTR dwIoBase, UINT uDelayTime);
-
-void MACvSetMISCFifo(DWORD_PTR dwIoBase, WORD wOffset, DWORD dwData);
-
-BOOL MACbTxDMAOff (DWORD_PTR dwIoBase, UINT idx);
-
-void MACvClearBusSusInd(DWORD_PTR dwIoBase);
-void MACvEnableBusSusEn(DWORD_PTR dwIoBase);
-
-BOOL MACbFlushSYNCFifo(DWORD_PTR dwIoBase);
-BOOL MACbPSWakeup(DWORD_PTR dwIoBase);
-
-void MACvSetKeyEntry(DWORD_PTR dwIoBase, WORD wKeyCtl, UINT uEntryIdx, UINT uKeyIdx, PBYTE pbyAddr, PDWORD pdwKey, BYTE byLocalID);
-void MACvDisableKeyEntry(DWORD_PTR dwIoBase, UINT uEntryIdx);
-void MACvSetDefaultKeyEntry(DWORD_PTR dwIoBase, UINT uKeyLen, UINT uKeyIdx, PDWORD pdwKey, BYTE byLocalID);
-//void MACvEnableDefaultKey(DWORD_PTR dwIoBase, BYTE byLocalID);
-void MACvDisableDefaultKey(DWORD_PTR dwIoBase);
-void MACvSetDefaultTKIPKeyEntry(DWORD_PTR dwIoBase, UINT uKeyLen, UINT uKeyIdx, PDWORD pdwKey, BYTE byLocalID);
-void MACvSetDefaultKeyCtl(DWORD_PTR dwIoBase, WORD wKeyCtl, UINT uEntryIdx, BYTE byLocalID);
+extern unsigned short TxRate_iwconfig;//2008-5-8 <add> by chester
+void MACvReadAllRegs(unsigned long dwIoBase, unsigned char *pbyMacRegs);
+
+bool MACbIsRegBitsOn(unsigned long dwIoBase, unsigned char byRegOfs, unsigned char byTestBits);
+bool MACbIsRegBitsOff(unsigned long dwIoBase, unsigned char byRegOfs, unsigned char byTestBits);
+
+bool MACbIsIntDisable(unsigned long dwIoBase);
+
+unsigned char MACbyReadMultiAddr(unsigned long dwIoBase, unsigned int uByteIdx);
+void MACvWriteMultiAddr(unsigned long dwIoBase, unsigned int uByteIdx, unsigned char byData);
+void MACvSetMultiAddrByHash(unsigned long dwIoBase, unsigned char byHashIdx);
+void MACvResetMultiAddrByHash(unsigned long dwIoBase, unsigned char byHashIdx);
+
+void MACvSetRxThreshold(unsigned long dwIoBase, unsigned char byThreshold);
+void MACvGetRxThreshold(unsigned long dwIoBase, unsigned char *pbyThreshold);
+
+void MACvSetTxThreshold(unsigned long dwIoBase, unsigned char byThreshold);
+void MACvGetTxThreshold(unsigned long dwIoBase, unsigned char *pbyThreshold);
+
+void MACvSetDmaLength(unsigned long dwIoBase, unsigned char byDmaLength);
+void MACvGetDmaLength(unsigned long dwIoBase, unsigned char *pbyDmaLength);
+
+void MACvSetShortRetryLimit(unsigned long dwIoBase, unsigned char byRetryLimit);
+void MACvGetShortRetryLimit(unsigned long dwIoBase, unsigned char *pbyRetryLimit);
+
+void MACvSetLongRetryLimit(unsigned long dwIoBase, unsigned char byRetryLimit);
+void MACvGetLongRetryLimit(unsigned long dwIoBase, unsigned char *pbyRetryLimit);
+
+void MACvSetLoopbackMode(unsigned long dwIoBase, unsigned char byLoopbackMode);
+bool MACbIsInLoopbackMode(unsigned long dwIoBase);
+
+void MACvSetPacketFilter(unsigned long dwIoBase, unsigned short wFilterType);
+
+void MACvSaveContext(unsigned long dwIoBase, unsigned char *pbyCxtBuf);
+void MACvRestoreContext(unsigned long dwIoBase, unsigned char *pbyCxtBuf);
+bool MACbCompareContext(unsigned long dwIoBase, unsigned char *pbyCxtBuf);
+
+bool MACbSoftwareReset(unsigned long dwIoBase);
+bool MACbSafeSoftwareReset(unsigned long dwIoBase);
+bool MACbSafeRxOff(unsigned long dwIoBase);
+bool MACbSafeTxOff(unsigned long dwIoBase);
+bool MACbSafeStop(unsigned long dwIoBase);
+bool MACbShutdown(unsigned long dwIoBase);
+void MACvInitialize(unsigned long dwIoBase);
+void MACvSetCurrRx0DescAddr(unsigned long dwIoBase, unsigned long dwCurrDescAddr);
+void MACvSetCurrRx1DescAddr(unsigned long dwIoBase, unsigned long dwCurrDescAddr);
+void MACvSetCurrTXDescAddr(int iTxType, unsigned long dwIoBase, unsigned long dwCurrDescAddr);
+void MACvSetCurrTx0DescAddrEx(unsigned long dwIoBase, unsigned long dwCurrDescAddr);
+void MACvSetCurrAC0DescAddrEx(unsigned long dwIoBase, unsigned long dwCurrDescAddr);
+void MACvSetCurrSyncDescAddrEx(unsigned long dwIoBase, unsigned long dwCurrDescAddr);
+void MACvSetCurrATIMDescAddrEx(unsigned long dwIoBase, unsigned long dwCurrDescAddr);
+void MACvTimer0MicroSDelay(unsigned long dwIoBase, unsigned int uDelay);
+void MACvOneShotTimer0MicroSec(unsigned long dwIoBase, unsigned int uDelayTime);
+void MACvOneShotTimer1MicroSec(unsigned long dwIoBase, unsigned int uDelayTime);
+
+void MACvSetMISCFifo(unsigned long dwIoBase, unsigned short wOffset, unsigned long dwData);
+
+bool MACbTxDMAOff (unsigned long dwIoBase, unsigned int idx);
+
+void MACvClearBusSusInd(unsigned long dwIoBase);
+void MACvEnableBusSusEn(unsigned long dwIoBase);
+
+bool MACbFlushSYNCFifo(unsigned long dwIoBase);
+bool MACbPSWakeup(unsigned long dwIoBase);
+
+void MACvSetKeyEntry(unsigned long dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx,
+ unsigned int uKeyIdx, unsigned char *pbyAddr, unsigned long *pdwKey, unsigned char byLocalID);
+void MACvDisableKeyEntry(unsigned long dwIoBase, unsigned int uEntryIdx);
+void MACvSetDefaultKeyEntry(unsigned long dwIoBase, unsigned int uKeyLen,
+ unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID);
+//void MACvEnableDefaultKey(unsigned long dwIoBase, unsigned char byLocalID);
+void MACvDisableDefaultKey(unsigned long dwIoBase);
+void MACvSetDefaultTKIPKeyEntry(unsigned long dwIoBase, unsigned int uKeyLen,
+ unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID);
+void MACvSetDefaultKeyCtl(unsigned long dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx, unsigned char byLocalID);
#endif // __MAC_H__
diff --git a/drivers/staging/vt6655/mib.c b/drivers/staging/vt6655/mib.c
index 4ca7877075b..1b91a837095 100644
--- a/drivers/staging/vt6655/mib.c
+++ b/drivers/staging/vt6655/mib.c
@@ -90,7 +90,7 @@ void STAvClearAllCounter (PSStatCounter pStatistic)
* Return Value: none
*
*/
-void STAvUpdateIsrStatCounter (PSStatCounter pStatistic, DWORD dwIsr)
+void STAvUpdateIsrStatCounter (PSStatCounter pStatistic, unsigned long dwIsr)
{
/**********************/
/* ABNORMAL interrupt */
@@ -177,8 +177,8 @@ void STAvUpdateIsrStatCounter (PSStatCounter pStatistic, DWORD dwIsr)
*
*/
void STAvUpdateRDStatCounter (PSStatCounter pStatistic,
- BYTE byRSR, BYTE byNewRSR, BYTE byRxRate,
- PBYTE pbyBuffer, UINT cbFrameLength)
+ unsigned char byRSR, unsigned char byNewRSR, unsigned char byRxRate,
+ unsigned char *pbyBuffer, unsigned int cbFrameLength)
{
//need change
PS802_11Header pHeader = (PS802_11Header)pbyBuffer;
@@ -194,15 +194,15 @@ void STAvUpdateRDStatCounter (PSStatCounter pStatistic,
// update counters in case that successful transmit
if (byRSR & RSR_ADDRBROAD) {
pStatistic->ullRxBroadcastFrames++;
- pStatistic->ullRxBroadcastBytes += (ULONGLONG)cbFrameLength;
+ pStatistic->ullRxBroadcastBytes += (unsigned long long) cbFrameLength;
}
else if (byRSR & RSR_ADDRMULTI) {
pStatistic->ullRxMulticastFrames++;
- pStatistic->ullRxMulticastBytes += (ULONGLONG)cbFrameLength;
+ pStatistic->ullRxMulticastBytes += (unsigned long long) cbFrameLength;
}
else {
pStatistic->ullRxDirectedFrames++;
- pStatistic->ullRxDirectedBytes += (ULONGLONG)cbFrameLength;
+ pStatistic->ullRxDirectedBytes += (unsigned long long) cbFrameLength;
}
}
}
@@ -212,87 +212,87 @@ void STAvUpdateRDStatCounter (PSStatCounter pStatistic,
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr11MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"11M: ALL[%d], OK[%d]:[%02x]\n", (INT)pStatistic->CustomStat.ullRsr11M, (INT)pStatistic->CustomStat.ullRsr11MCRCOk, byRSR);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"11M: ALL[%d], OK[%d]:[%02x]\n", (int)pStatistic->CustomStat.ullRsr11M, (int)pStatistic->CustomStat.ullRsr11MCRCOk, byRSR);
}
else if(byRxRate==11) {
pStatistic->CustomStat.ullRsr5M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr5MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 5M: ALL[%d], OK[%d]:[%02x]\n", (INT)pStatistic->CustomStat.ullRsr5M, (INT)pStatistic->CustomStat.ullRsr5MCRCOk, byRSR);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 5M: ALL[%d], OK[%d]:[%02x]\n", (int)pStatistic->CustomStat.ullRsr5M, (int)pStatistic->CustomStat.ullRsr5MCRCOk, byRSR);
}
else if(byRxRate==4) {
pStatistic->CustomStat.ullRsr2M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr2MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 2M: ALL[%d], OK[%d]:[%02x]\n", (INT)pStatistic->CustomStat.ullRsr2M, (INT)pStatistic->CustomStat.ullRsr2MCRCOk, byRSR);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 2M: ALL[%d], OK[%d]:[%02x]\n", (int)pStatistic->CustomStat.ullRsr2M, (int)pStatistic->CustomStat.ullRsr2MCRCOk, byRSR);
}
else if(byRxRate==2){
pStatistic->CustomStat.ullRsr1M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr1MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 1M: ALL[%d], OK[%d]:[%02x]\n", (INT)pStatistic->CustomStat.ullRsr1M, (INT)pStatistic->CustomStat.ullRsr1MCRCOk, byRSR);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 1M: ALL[%d], OK[%d]:[%02x]\n", (int)pStatistic->CustomStat.ullRsr1M, (int)pStatistic->CustomStat.ullRsr1MCRCOk, byRSR);
}
else if(byRxRate==12){
pStatistic->CustomStat.ullRsr6M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr6MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 6M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr6M, (INT)pStatistic->CustomStat.ullRsr6MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 6M: ALL[%d], OK[%d]\n", (int)pStatistic->CustomStat.ullRsr6M, (int)pStatistic->CustomStat.ullRsr6MCRCOk);
}
else if(byRxRate==18){
pStatistic->CustomStat.ullRsr9M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr9MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 9M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr9M, (INT)pStatistic->CustomStat.ullRsr9MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 9M: ALL[%d], OK[%d]\n", (int)pStatistic->CustomStat.ullRsr9M, (int)pStatistic->CustomStat.ullRsr9MCRCOk);
}
else if(byRxRate==24){
pStatistic->CustomStat.ullRsr12M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr12MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"12M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr12M, (INT)pStatistic->CustomStat.ullRsr12MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"12M: ALL[%d], OK[%d]\n", (int)pStatistic->CustomStat.ullRsr12M, (int)pStatistic->CustomStat.ullRsr12MCRCOk);
}
else if(byRxRate==36){
pStatistic->CustomStat.ullRsr18M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr18MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"18M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr18M, (INT)pStatistic->CustomStat.ullRsr18MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"18M: ALL[%d], OK[%d]\n", (int)pStatistic->CustomStat.ullRsr18M, (int)pStatistic->CustomStat.ullRsr18MCRCOk);
}
else if(byRxRate==48){
pStatistic->CustomStat.ullRsr24M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr24MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"24M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr24M, (INT)pStatistic->CustomStat.ullRsr24MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"24M: ALL[%d], OK[%d]\n", (int)pStatistic->CustomStat.ullRsr24M, (int)pStatistic->CustomStat.ullRsr24MCRCOk);
}
else if(byRxRate==72){
pStatistic->CustomStat.ullRsr36M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr36MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"36M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr36M, (INT)pStatistic->CustomStat.ullRsr36MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"36M: ALL[%d], OK[%d]\n", (int)pStatistic->CustomStat.ullRsr36M, (int)pStatistic->CustomStat.ullRsr36MCRCOk);
}
else if(byRxRate==96){
pStatistic->CustomStat.ullRsr48M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr48MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"48M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr48M, (INT)pStatistic->CustomStat.ullRsr48MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"48M: ALL[%d], OK[%d]\n", (int)pStatistic->CustomStat.ullRsr48M, (int)pStatistic->CustomStat.ullRsr48MCRCOk);
}
else if(byRxRate==108){
pStatistic->CustomStat.ullRsr54M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr54MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"54M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr54M, (INT)pStatistic->CustomStat.ullRsr54MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"54M: ALL[%d], OK[%d]\n", (int)pStatistic->CustomStat.ullRsr54M, (int)pStatistic->CustomStat.ullRsr54MCRCOk);
}
else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Unknown: Total[%d], CRCOK[%d]\n", (INT)pStatistic->dwRsrRxPacket+1, (INT)pStatistic->dwRsrCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Unknown: Total[%d], CRCOK[%d]\n", (int)pStatistic->dwRsrRxPacket+1, (int)pStatistic->dwRsrCRCOk);
}
if (byRSR & RSR_BSSIDOK)
@@ -341,10 +341,10 @@ void STAvUpdateRDStatCounter (PSStatCounter pStatistic,
if (WLAN_GET_FC_MOREFRAG(pHeader->wFrameCtl))
pStatistic->dwRsrRxFragment++;
- if (cbFrameLength < MIN_PACKET_LEN + 4) {
+ if (cbFrameLength < ETH_ZLEN + 4) {
pStatistic->dwRsrRunt++;
}
- else if (cbFrameLength == MIN_PACKET_LEN + 4) {
+ else if (cbFrameLength == ETH_ZLEN + 4) {
pStatistic->dwRsrRxFrmLen64++;
}
else if ((65 <= cbFrameLength) && (cbFrameLength <= 127)) {
@@ -389,11 +389,11 @@ void STAvUpdateRDStatCounter (PSStatCounter pStatistic,
void
STAvUpdateRDStatCounterEx (
PSStatCounter pStatistic,
- BYTE byRSR,
- BYTE byNewRSR,
- BYTE byRxRate,
- PBYTE pbyBuffer,
- UINT cbFrameLength
+ unsigned char byRSR,
+ unsigned char byNewRSR,
+ unsigned char byRxRate,
+ unsigned char *pbyBuffer,
+ unsigned int cbFrameLength
)
{
STAvUpdateRDStatCounter(
@@ -408,7 +408,7 @@ STAvUpdateRDStatCounterEx (
// rx length
pStatistic->dwCntRxFrmLength = cbFrameLength;
// rx pattern, we just see 10 bytes for sample
- memcpy(pStatistic->abyCntRxPattern, (PBYTE)pbyBuffer, 10);
+ memcpy(pStatistic->abyCntRxPattern, (unsigned char *)pbyBuffer, 10);
}
@@ -432,16 +432,16 @@ STAvUpdateRDStatCounterEx (
void
STAvUpdateTDStatCounter (
PSStatCounter pStatistic,
- BYTE byTSR0,
- BYTE byTSR1,
- PBYTE pbyBuffer,
- UINT cbFrameLength,
- UINT uIdx
+ unsigned char byTSR0,
+ unsigned char byTSR1,
+ unsigned char *pbyBuffer,
+ unsigned int cbFrameLength,
+ unsigned int uIdx
)
{
PWLAN_80211HDR_A4 pHeader;
- PBYTE pbyDestAddr;
- BYTE byTSR0_NCR = byTSR0 & TSR0_NCR;
+ unsigned char *pbyDestAddr;
+ unsigned char byTSR0_NCR = byTSR0 & TSR0_NCR;
@@ -471,17 +471,17 @@ STAvUpdateTDStatCounter (
pStatistic->CustomStat.ullTsrAllOK =
(pStatistic->ullTsrOK[TYPE_AC0DMA] + pStatistic->ullTsrOK[TYPE_TXDMA0]);
// update counters in case that successful transmit
- if (IS_BROADCAST_ADDRESS(pbyDestAddr)) {
+ if (is_broadcast_ether_addr(pbyDestAddr)) {
pStatistic->ullTxBroadcastFrames[uIdx]++;
- pStatistic->ullTxBroadcastBytes[uIdx] += (ULONGLONG)cbFrameLength;
+ pStatistic->ullTxBroadcastBytes[uIdx] += (unsigned long long) cbFrameLength;
}
- else if (IS_MULTICAST_ADDRESS(pbyDestAddr)) {
+ else if (is_multicast_ether_addr(pbyDestAddr)) {
pStatistic->ullTxMulticastFrames[uIdx]++;
- pStatistic->ullTxMulticastBytes[uIdx] += (ULONGLONG)cbFrameLength;
+ pStatistic->ullTxMulticastBytes[uIdx] += (unsigned long long) cbFrameLength;
}
else {
pStatistic->ullTxDirectedFrames[uIdx]++;
- pStatistic->ullTxDirectedBytes[uIdx] += (ULONGLONG)cbFrameLength;
+ pStatistic->ullTxDirectedBytes[uIdx] += (unsigned long long) cbFrameLength;
}
}
else {
@@ -495,9 +495,9 @@ STAvUpdateTDStatCounter (
pStatistic->dwTsrACKData[uIdx]++;
}
- if (IS_BROADCAST_ADDRESS(pbyDestAddr))
+ if (is_broadcast_ether_addr(pbyDestAddr))
pStatistic->dwTsrBroadcast[uIdx]++;
- else if (IS_MULTICAST_ADDRESS(pbyDestAddr))
+ else if (is_multicast_ether_addr(pbyDestAddr))
pStatistic->dwTsrMulticast[uIdx]++;
else
pStatistic->dwTsrDirected[uIdx]++;
@@ -522,13 +522,13 @@ STAvUpdateTDStatCounter (
void
STAvUpdateTDStatCounterEx (
PSStatCounter pStatistic,
- PBYTE pbyBuffer,
- DWORD cbFrameLength
+ unsigned char *pbyBuffer,
+ unsigned long cbFrameLength
)
{
- UINT uPktLength;
+ unsigned int uPktLength;
- uPktLength = (UINT)cbFrameLength;
+ uPktLength = (unsigned int)cbFrameLength;
// tx length
pStatistic->dwCntTxBufLength = uPktLength;
@@ -555,25 +555,25 @@ void
STAvUpdate802_11Counter(
PSDot11Counters p802_11Counter,
PSStatCounter pStatistic,
- DWORD dwCounter
+ unsigned long dwCounter
)
{
//p802_11Counter->TransmittedFragmentCount
- p802_11Counter->MulticastTransmittedFrameCount = (ULONGLONG) (pStatistic->dwTsrBroadcast[TYPE_AC0DMA] +
+ p802_11Counter->MulticastTransmittedFrameCount = (unsigned long long) (pStatistic->dwTsrBroadcast[TYPE_AC0DMA] +
pStatistic->dwTsrBroadcast[TYPE_TXDMA0] +
pStatistic->dwTsrMulticast[TYPE_AC0DMA] +
pStatistic->dwTsrMulticast[TYPE_TXDMA0]);
- p802_11Counter->FailedCount = (ULONGLONG) (pStatistic->dwTsrErr[TYPE_AC0DMA] + pStatistic->dwTsrErr[TYPE_TXDMA0]);
- p802_11Counter->RetryCount = (ULONGLONG) (pStatistic->dwTsrRetry[TYPE_AC0DMA] + pStatistic->dwTsrRetry[TYPE_TXDMA0]);
- p802_11Counter->MultipleRetryCount = (ULONGLONG) (pStatistic->dwTsrMoreThanOnceRetry[TYPE_AC0DMA] +
+ p802_11Counter->FailedCount = (unsigned long long) (pStatistic->dwTsrErr[TYPE_AC0DMA] + pStatistic->dwTsrErr[TYPE_TXDMA0]);
+ p802_11Counter->RetryCount = (unsigned long long) (pStatistic->dwTsrRetry[TYPE_AC0DMA] + pStatistic->dwTsrRetry[TYPE_TXDMA0]);
+ p802_11Counter->MultipleRetryCount = (unsigned long long) (pStatistic->dwTsrMoreThanOnceRetry[TYPE_AC0DMA] +
pStatistic->dwTsrMoreThanOnceRetry[TYPE_TXDMA0]);
//p802_11Counter->FrameDuplicateCount
- p802_11Counter->RTSSuccessCount += (ULONGLONG) (dwCounter & 0x000000ff);
- p802_11Counter->RTSFailureCount += (ULONGLONG) ((dwCounter & 0x0000ff00) >> 8);
- p802_11Counter->ACKFailureCount += (ULONGLONG) ((dwCounter & 0x00ff0000) >> 16);
- p802_11Counter->FCSErrorCount += (ULONGLONG) ((dwCounter & 0xff000000) >> 24);
+ p802_11Counter->RTSSuccessCount += (unsigned long long) (dwCounter & 0x000000ff);
+ p802_11Counter->RTSFailureCount += (unsigned long long) ((dwCounter & 0x0000ff00) >> 8);
+ p802_11Counter->ACKFailureCount += (unsigned long long) ((dwCounter & 0x00ff0000) >> 16);
+ p802_11Counter->FCSErrorCount += (unsigned long long) ((dwCounter & 0xff000000) >> 24);
//p802_11Counter->ReceivedFragmentCount
- p802_11Counter->MulticastReceivedFrameCount = (ULONGLONG) (pStatistic->dwRsrBroadcast +
+ p802_11Counter->MulticastReceivedFrameCount = (unsigned long long) (pStatistic->dwRsrBroadcast +
pStatistic->dwRsrMulticast);
}
diff --git a/drivers/staging/vt6655/mib.h b/drivers/staging/vt6655/mib.h
index 2308319a405..009f3a4d29f 100644
--- a/drivers/staging/vt6655/mib.h
+++ b/drivers/staging/vt6655/mib.h
@@ -39,32 +39,32 @@
//
typedef struct tagSDot11Counters {
- ULONG Length; // Length of structure
- ULONGLONG TransmittedFragmentCount;
- ULONGLONG MulticastTransmittedFrameCount;
- ULONGLONG FailedCount;
- ULONGLONG RetryCount;
- ULONGLONG MultipleRetryCount;
- ULONGLONG RTSSuccessCount;
- ULONGLONG RTSFailureCount;
- ULONGLONG ACKFailureCount;
- ULONGLONG FrameDuplicateCount;
- ULONGLONG ReceivedFragmentCount;
- ULONGLONG MulticastReceivedFrameCount;
- ULONGLONG FCSErrorCount;
- ULONGLONG TKIPLocalMICFailures;
- ULONGLONG TKIPRemoteMICFailures;
- ULONGLONG TKIPICVErrors;
- ULONGLONG TKIPCounterMeasuresInvoked;
- ULONGLONG TKIPReplays;
- ULONGLONG CCMPFormatErrors;
- ULONGLONG CCMPReplays;
- ULONGLONG CCMPDecryptErrors;
- ULONGLONG FourWayHandshakeFailures;
-// ULONGLONG WEPUndecryptableCount;
-// ULONGLONG WEPICVErrorCount;
-// ULONGLONG DecryptSuccessCount;
-// ULONGLONG DecryptFailureCount;
+ unsigned long Length; // Length of structure
+ unsigned long long TransmittedFragmentCount;
+ unsigned long long MulticastTransmittedFrameCount;
+ unsigned long long FailedCount;
+ unsigned long long RetryCount;
+ unsigned long long MultipleRetryCount;
+ unsigned long long RTSSuccessCount;
+ unsigned long long RTSFailureCount;
+ unsigned long long ACKFailureCount;
+ unsigned long long FrameDuplicateCount;
+ unsigned long long ReceivedFragmentCount;
+ unsigned long long MulticastReceivedFrameCount;
+ unsigned long long FCSErrorCount;
+ unsigned long long TKIPLocalMICFailures;
+ unsigned long long TKIPRemoteMICFailures;
+ unsigned long long TKIPICVErrors;
+ unsigned long long TKIPCounterMeasuresInvoked;
+ unsigned long long TKIPReplays;
+ unsigned long long CCMPFormatErrors;
+ unsigned long long CCMPReplays;
+ unsigned long long CCMPDecryptErrors;
+ unsigned long long FourWayHandshakeFailures;
+// unsigned long long WEPUndecryptableCount;
+// unsigned long long WEPICVErrorCount;
+// unsigned long long DecryptSuccessCount;
+// unsigned long long DecryptFailureCount;
} SDot11Counters, *PSDot11Counters;
@@ -72,29 +72,29 @@ typedef struct tagSDot11Counters {
// MIB2 counter
//
typedef struct tagSMib2Counter {
- LONG ifIndex;
+ long ifIndex;
char ifDescr[256]; // max size 255 plus zero ending
// e.g. "interface 1"
- LONG ifType;
- LONG ifMtu;
- DWORD ifSpeed;
- BYTE ifPhysAddress[ETH_ALEN];
- LONG ifAdminStatus;
- LONG ifOperStatus;
- DWORD ifLastChange;
- DWORD ifInOctets;
- DWORD ifInUcastPkts;
- DWORD ifInNUcastPkts;
- DWORD ifInDiscards;
- DWORD ifInErrors;
- DWORD ifInUnknownProtos;
- DWORD ifOutOctets;
- DWORD ifOutUcastPkts;
- DWORD ifOutNUcastPkts;
- DWORD ifOutDiscards;
- DWORD ifOutErrors;
- DWORD ifOutQLen;
- DWORD ifSpecific;
+ long ifType;
+ long ifMtu;
+ unsigned long ifSpeed;
+ unsigned char ifPhysAddress[ETH_ALEN];
+ long ifAdminStatus;
+ long ifOperStatus;
+ unsigned long ifLastChange;
+ unsigned long ifInOctets;
+ unsigned long ifInUcastPkts;
+ unsigned long ifInNUcastPkts;
+ unsigned long ifInDiscards;
+ unsigned long ifInErrors;
+ unsigned long ifInUnknownProtos;
+ unsigned long ifOutOctets;
+ unsigned long ifOutUcastPkts;
+ unsigned long ifOutNUcastPkts;
+ unsigned long ifOutDiscards;
+ unsigned long ifOutErrors;
+ unsigned long ifOutQLen;
+ unsigned long ifSpecific;
} SMib2Counter, *PSMib2Counter;
// Value in the ifType entry
@@ -111,64 +111,64 @@ typedef struct tagSMib2Counter {
// RMON counter
//
typedef struct tagSRmonCounter {
- LONG etherStatsIndex;
- DWORD etherStatsDataSource;
- DWORD etherStatsDropEvents;
- DWORD etherStatsOctets;
- DWORD etherStatsPkts;
- DWORD etherStatsBroadcastPkts;
- DWORD etherStatsMulticastPkts;
- DWORD etherStatsCRCAlignErrors;
- DWORD etherStatsUndersizePkts;
- DWORD etherStatsOversizePkts;
- DWORD etherStatsFragments;
- DWORD etherStatsJabbers;
- DWORD etherStatsCollisions;
- DWORD etherStatsPkt64Octets;
- DWORD etherStatsPkt65to127Octets;
- DWORD etherStatsPkt128to255Octets;
- DWORD etherStatsPkt256to511Octets;
- DWORD etherStatsPkt512to1023Octets;
- DWORD etherStatsPkt1024to1518Octets;
- DWORD etherStatsOwners;
- DWORD etherStatsStatus;
+ long etherStatsIndex;
+ unsigned long etherStatsDataSource;
+ unsigned long etherStatsDropEvents;
+ unsigned long etherStatsOctets;
+ unsigned long etherStatsPkts;
+ unsigned long etherStatsBroadcastPkts;
+ unsigned long etherStatsMulticastPkts;
+ unsigned long etherStatsCRCAlignErrors;
+ unsigned long etherStatsUndersizePkts;
+ unsigned long etherStatsOversizePkts;
+ unsigned long etherStatsFragments;
+ unsigned long etherStatsJabbers;
+ unsigned long etherStatsCollisions;
+ unsigned long etherStatsPkt64Octets;
+ unsigned long etherStatsPkt65to127Octets;
+ unsigned long etherStatsPkt128to255Octets;
+ unsigned long etherStatsPkt256to511Octets;
+ unsigned long etherStatsPkt512to1023Octets;
+ unsigned long etherStatsPkt1024to1518Octets;
+ unsigned long etherStatsOwners;
+ unsigned long etherStatsStatus;
} SRmonCounter, *PSRmonCounter;
//
// Custom counter
//
typedef struct tagSCustomCounters {
- ULONG Length;
-
- ULONGLONG ullTsrAllOK;
-
- ULONGLONG ullRsr11M;
- ULONGLONG ullRsr5M;
- ULONGLONG ullRsr2M;
- ULONGLONG ullRsr1M;
-
- ULONGLONG ullRsr11MCRCOk;
- ULONGLONG ullRsr5MCRCOk;
- ULONGLONG ullRsr2MCRCOk;
- ULONGLONG ullRsr1MCRCOk;
-
- ULONGLONG ullRsr54M;
- ULONGLONG ullRsr48M;
- ULONGLONG ullRsr36M;
- ULONGLONG ullRsr24M;
- ULONGLONG ullRsr18M;
- ULONGLONG ullRsr12M;
- ULONGLONG ullRsr9M;
- ULONGLONG ullRsr6M;
-
- ULONGLONG ullRsr54MCRCOk;
- ULONGLONG ullRsr48MCRCOk;
- ULONGLONG ullRsr36MCRCOk;
- ULONGLONG ullRsr24MCRCOk;
- ULONGLONG ullRsr18MCRCOk;
- ULONGLONG ullRsr12MCRCOk;
- ULONGLONG ullRsr9MCRCOk;
- ULONGLONG ullRsr6MCRCOk;
+ unsigned long Length;
+
+ unsigned long long ullTsrAllOK;
+
+ unsigned long long ullRsr11M;
+ unsigned long long ullRsr5M;
+ unsigned long long ullRsr2M;
+ unsigned long long ullRsr1M;
+
+ unsigned long long ullRsr11MCRCOk;
+ unsigned long long ullRsr5MCRCOk;
+ unsigned long long ullRsr2MCRCOk;
+ unsigned long long ullRsr1MCRCOk;
+
+ unsigned long long ullRsr54M;
+ unsigned long long ullRsr48M;
+ unsigned long long ullRsr36M;
+ unsigned long long ullRsr24M;
+ unsigned long long ullRsr18M;
+ unsigned long long ullRsr12M;
+ unsigned long long ullRsr9M;
+ unsigned long long ullRsr6M;
+
+ unsigned long long ullRsr54MCRCOk;
+ unsigned long long ullRsr48MCRCOk;
+ unsigned long long ullRsr36MCRCOk;
+ unsigned long long ullRsr24MCRCOk;
+ unsigned long long ullRsr18MCRCOk;
+ unsigned long long ullRsr12MCRCOk;
+ unsigned long long ullRsr9MCRCOk;
+ unsigned long long ullRsr6MCRCOk;
} SCustomCounters, *PSCustomCounters;
@@ -177,29 +177,29 @@ typedef struct tagSCustomCounters {
// Custom counter
//
typedef struct tagSISRCounters {
- ULONG Length;
-
- DWORD dwIsrTx0OK;
- DWORD dwIsrAC0TxOK;
- DWORD dwIsrBeaconTxOK;
- DWORD dwIsrRx0OK;
- DWORD dwIsrTBTTInt;
- DWORD dwIsrSTIMERInt;
- DWORD dwIsrWatchDog;
- DWORD dwIsrUnrecoverableError;
- DWORD dwIsrSoftInterrupt;
- DWORD dwIsrMIBNearfull;
- DWORD dwIsrRxNoBuf;
-
- DWORD dwIsrUnknown; // unknown interrupt count
-
- DWORD dwIsrRx1OK;
- DWORD dwIsrATIMTxOK;
- DWORD dwIsrSYNCTxOK;
- DWORD dwIsrCFPEnd;
- DWORD dwIsrATIMEnd;
- DWORD dwIsrSYNCFlushOK;
- DWORD dwIsrSTIMER1Int;
+ unsigned long Length;
+
+ unsigned long dwIsrTx0OK;
+ unsigned long dwIsrAC0TxOK;
+ unsigned long dwIsrBeaconTxOK;
+ unsigned long dwIsrRx0OK;
+ unsigned long dwIsrTBTTInt;
+ unsigned long dwIsrSTIMERInt;
+ unsigned long dwIsrWatchDog;
+ unsigned long dwIsrUnrecoverableError;
+ unsigned long dwIsrSoftInterrupt;
+ unsigned long dwIsrMIBNearfull;
+ unsigned long dwIsrRxNoBuf;
+
+ unsigned long dwIsrUnknown; // unknown interrupt count
+
+ unsigned long dwIsrRx1OK;
+ unsigned long dwIsrATIMTxOK;
+ unsigned long dwIsrSYNCTxOK;
+ unsigned long dwIsrCFPEnd;
+ unsigned long dwIsrATIMEnd;
+ unsigned long dwIsrSYNCFlushOK;
+ unsigned long dwIsrSTIMER1Int;
/////////////////////////////////////
} SISRCounters, *PSISRCounters;
@@ -222,99 +222,99 @@ typedef struct tagSStatCounter {
// RSR status count
//
- DWORD dwRsrFrmAlgnErr;
- DWORD dwRsrErr;
- DWORD dwRsrCRCErr;
- DWORD dwRsrCRCOk;
- DWORD dwRsrBSSIDOk;
- DWORD dwRsrADDROk;
- DWORD dwRsrBCNSSIDOk;
- DWORD dwRsrLENErr;
- DWORD dwRsrTYPErr;
-
- DWORD dwNewRsrDECRYPTOK;
- DWORD dwNewRsrCFP;
- DWORD dwNewRsrUTSF;
- DWORD dwNewRsrHITAID;
- DWORD dwNewRsrHITAID0;
-
- DWORD dwRsrLong;
- DWORD dwRsrRunt;
-
- DWORD dwRsrRxControl;
- DWORD dwRsrRxData;
- DWORD dwRsrRxManage;
-
- DWORD dwRsrRxPacket;
- DWORD dwRsrRxOctet;
- DWORD dwRsrBroadcast;
- DWORD dwRsrMulticast;
- DWORD dwRsrDirected;
+ unsigned long dwRsrFrmAlgnErr;
+ unsigned long dwRsrErr;
+ unsigned long dwRsrCRCErr;
+ unsigned long dwRsrCRCOk;
+ unsigned long dwRsrBSSIDOk;
+ unsigned long dwRsrADDROk;
+ unsigned long dwRsrBCNSSIDOk;
+ unsigned long dwRsrLENErr;
+ unsigned long dwRsrTYPErr;
+
+ unsigned long dwNewRsrDECRYPTOK;
+ unsigned long dwNewRsrCFP;
+ unsigned long dwNewRsrUTSF;
+ unsigned long dwNewRsrHITAID;
+ unsigned long dwNewRsrHITAID0;
+
+ unsigned long dwRsrLong;
+ unsigned long dwRsrRunt;
+
+ unsigned long dwRsrRxControl;
+ unsigned long dwRsrRxData;
+ unsigned long dwRsrRxManage;
+
+ unsigned long dwRsrRxPacket;
+ unsigned long dwRsrRxOctet;
+ unsigned long dwRsrBroadcast;
+ unsigned long dwRsrMulticast;
+ unsigned long dwRsrDirected;
// 64-bit OID
- ULONGLONG ullRsrOK;
+ unsigned long long ullRsrOK;
// for some optional OIDs (64 bits) and DMI support
- ULONGLONG ullRxBroadcastBytes;
- ULONGLONG ullRxMulticastBytes;
- ULONGLONG ullRxDirectedBytes;
- ULONGLONG ullRxBroadcastFrames;
- ULONGLONG ullRxMulticastFrames;
- ULONGLONG ullRxDirectedFrames;
-
- DWORD dwRsrRxFragment;
- DWORD dwRsrRxFrmLen64;
- DWORD dwRsrRxFrmLen65_127;
- DWORD dwRsrRxFrmLen128_255;
- DWORD dwRsrRxFrmLen256_511;
- DWORD dwRsrRxFrmLen512_1023;
- DWORD dwRsrRxFrmLen1024_1518;
+ unsigned long long ullRxBroadcastBytes;
+ unsigned long long ullRxMulticastBytes;
+ unsigned long long ullRxDirectedBytes;
+ unsigned long long ullRxBroadcastFrames;
+ unsigned long long ullRxMulticastFrames;
+ unsigned long long ullRxDirectedFrames;
+
+ unsigned long dwRsrRxFragment;
+ unsigned long dwRsrRxFrmLen64;
+ unsigned long dwRsrRxFrmLen65_127;
+ unsigned long dwRsrRxFrmLen128_255;
+ unsigned long dwRsrRxFrmLen256_511;
+ unsigned long dwRsrRxFrmLen512_1023;
+ unsigned long dwRsrRxFrmLen1024_1518;
// TSR status count
//
- DWORD dwTsrTotalRetry[TYPE_MAXTD]; // total collision retry count
- DWORD dwTsrOnceRetry[TYPE_MAXTD]; // this packet only occur one collision
- DWORD dwTsrMoreThanOnceRetry[TYPE_MAXTD]; // this packet occur more than one collision
- DWORD dwTsrRetry[TYPE_MAXTD]; // this packet has ever occur collision,
+ unsigned long dwTsrTotalRetry[TYPE_MAXTD]; // total collision retry count
+ unsigned long dwTsrOnceRetry[TYPE_MAXTD]; // this packet only occur one collision
+ unsigned long dwTsrMoreThanOnceRetry[TYPE_MAXTD]; // this packet occur more than one collision
+ unsigned long dwTsrRetry[TYPE_MAXTD]; // this packet has ever occur collision,
// that is (dwTsrOnceCollision0 + dwTsrMoreThanOnceCollision0)
- DWORD dwTsrACKData[TYPE_MAXTD];
- DWORD dwTsrErr[TYPE_MAXTD];
- DWORD dwAllTsrOK[TYPE_MAXTD];
- DWORD dwTsrRetryTimeout[TYPE_MAXTD];
- DWORD dwTsrTransmitTimeout[TYPE_MAXTD];
-
- DWORD dwTsrTxPacket[TYPE_MAXTD];
- DWORD dwTsrTxOctet[TYPE_MAXTD];
- DWORD dwTsrBroadcast[TYPE_MAXTD];
- DWORD dwTsrMulticast[TYPE_MAXTD];
- DWORD dwTsrDirected[TYPE_MAXTD];
+ unsigned long dwTsrACKData[TYPE_MAXTD];
+ unsigned long dwTsrErr[TYPE_MAXTD];
+ unsigned long dwAllTsrOK[TYPE_MAXTD];
+ unsigned long dwTsrRetryTimeout[TYPE_MAXTD];
+ unsigned long dwTsrTransmitTimeout[TYPE_MAXTD];
+
+ unsigned long dwTsrTxPacket[TYPE_MAXTD];
+ unsigned long dwTsrTxOctet[TYPE_MAXTD];
+ unsigned long dwTsrBroadcast[TYPE_MAXTD];
+ unsigned long dwTsrMulticast[TYPE_MAXTD];
+ unsigned long dwTsrDirected[TYPE_MAXTD];
// RD/TD count
- DWORD dwCntRxFrmLength;
- DWORD dwCntTxBufLength;
+ unsigned long dwCntRxFrmLength;
+ unsigned long dwCntTxBufLength;
- BYTE abyCntRxPattern[16];
- BYTE abyCntTxPattern[16];
+ unsigned char abyCntRxPattern[16];
+ unsigned char abyCntTxPattern[16];
// Software check....
- DWORD dwCntRxDataErr; // rx buffer data software compare CRC err count
- DWORD dwCntDecryptErr; // rx buffer data software compare CRC err count
- DWORD dwCntRxICVErr; // rx buffer data software compare CRC err count
- UINT idxRxErrorDesc[TYPE_MAXRD]; // index for rx data error RD
+ unsigned long dwCntRxDataErr; // rx buffer data software compare CRC err count
+ unsigned long dwCntDecryptErr; // rx buffer data software compare CRC err count
+ unsigned long dwCntRxICVErr; // rx buffer data software compare CRC err count
+ unsigned int idxRxErrorDesc[TYPE_MAXRD]; // index for rx data error RD
// 64-bit OID
- ULONGLONG ullTsrOK[TYPE_MAXTD];
+ unsigned long long ullTsrOK[TYPE_MAXTD];
// for some optional OIDs (64 bits) and DMI support
- ULONGLONG ullTxBroadcastFrames[TYPE_MAXTD];
- ULONGLONG ullTxMulticastFrames[TYPE_MAXTD];
- ULONGLONG ullTxDirectedFrames[TYPE_MAXTD];
- ULONGLONG ullTxBroadcastBytes[TYPE_MAXTD];
- ULONGLONG ullTxMulticastBytes[TYPE_MAXTD];
- ULONGLONG ullTxDirectedBytes[TYPE_MAXTD];
-
-// DWORD dwTxRetryCount[8];
+ unsigned long long ullTxBroadcastFrames[TYPE_MAXTD];
+ unsigned long long ullTxMulticastFrames[TYPE_MAXTD];
+ unsigned long long ullTxDirectedFrames[TYPE_MAXTD];
+ unsigned long long ullTxBroadcastBytes[TYPE_MAXTD];
+ unsigned long long ullTxMulticastBytes[TYPE_MAXTD];
+ unsigned long long ullTxDirectedBytes[TYPE_MAXTD];
+
+// unsigned long dwTxRetryCount[8];
//
// ISR status count
//
@@ -324,15 +324,15 @@ typedef struct tagSStatCounter {
#ifdef Calcu_LinkQual
//Tx count:
- ULONG TxNoRetryOkCount; //success tx no retry !
- ULONG TxRetryOkCount; //success tx but retry !
- ULONG TxFailCount; //fail tx ?
+ unsigned long TxNoRetryOkCount; //success tx no retry !
+ unsigned long TxRetryOkCount; //success tx but retry !
+ unsigned long TxFailCount; //fail tx ?
//Rx count:
- ULONG RxOkCnt; //success rx !
- ULONG RxFcsErrCnt; //fail rx ?
+ unsigned long RxOkCnt; //success rx !
+ unsigned long RxFcsErrCnt; //fail rx ?
//statistic
- ULONG SignalStren;
- ULONG LinkQuality;
+ unsigned long SignalStren;
+ unsigned long LinkQuality;
#endif
} SStatCounter, *PSStatCounter;
@@ -344,30 +344,29 @@ typedef struct tagSStatCounter {
void STAvClearAllCounter(PSStatCounter pStatistic);
-void STAvUpdateIsrStatCounter(PSStatCounter pStatistic, DWORD dwIsr);
+void STAvUpdateIsrStatCounter(PSStatCounter pStatistic, unsigned long dwIsr);
void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
- BYTE byRSR, BYTE byNewRSR, BYTE byRxRate,
- PBYTE pbyBuffer, UINT cbFrameLength);
+ unsigned char byRSR, unsigned char byNewRSR, unsigned char byRxRate,
+ unsigned char *pbyBuffer, unsigned int cbFrameLength);
void STAvUpdateRDStatCounterEx(PSStatCounter pStatistic,
- BYTE byRSR, BYTE byNewRsr, BYTE byRxRate,
- PBYTE pbyBuffer, UINT cbFrameLength);
+ unsigned char byRSR, unsigned char byNewRsr, unsigned char byRxRate,
+ unsigned char *pbyBuffer, unsigned int cbFrameLength);
-void STAvUpdateTDStatCounter(PSStatCounter pStatistic,
- BYTE byTSR0, BYTE byTSR1,
- PBYTE pbyBuffer, UINT cbFrameLength, UINT uIdx );
+void STAvUpdateTDStatCounter(PSStatCounter pStatistic, unsigned char byTSR0, unsigned char byTSR1,
+ unsigned char *pbyBuffer, unsigned int cbFrameLength, unsigned int uIdx);
void STAvUpdateTDStatCounterEx(
PSStatCounter pStatistic,
- PBYTE pbyBuffer,
- DWORD cbFrameLength
+ unsigned char *pbyBuffer,
+ unsigned long cbFrameLength
);
void STAvUpdate802_11Counter(
PSDot11Counters p802_11Counter,
PSStatCounter pStatistic,
- DWORD dwCounter
+ unsigned long dwCounter
);
void STAvClear802_11Counter(PSDot11Counters p802_11Counter);
diff --git a/drivers/staging/vt6655/michael.c b/drivers/staging/vt6655/michael.c
index 0bf57efdede..67618f069d0 100644
--- a/drivers/staging/vt6655/michael.c
+++ b/drivers/staging/vt6655/michael.c
@@ -26,8 +26,8 @@
* Date: Sep 4, 2002
*
* Functions:
- * s_dwGetUINT32 - Convert from BYTE[] to DWORD in a portable way
- * s_vPutUINT32 - Convert from DWORD to BYTE[] in a portable way
+ * s_dwGetUINT32 - Convert from unsigned char [] to unsigned long in a portable way
+ * s_vPutUINT32 - Convert from unsigned long to unsigned char [] in a portable way
* s_vClear - Reset the state to the empty message.
* s_vSetKey - Set the key.
* MIC_vInit - Set the key.
@@ -48,29 +48,29 @@
/*--------------------- Static Functions --------------------------*/
/*
-static DWORD s_dwGetUINT32(BYTE * p); // Get DWORD from 4 bytes LSByte first
-static void s_vPutUINT32(BYTE* p, DWORD val); // Put DWORD into 4 bytes LSByte first
+static unsigned long s_dwGetUINT32(unsigned char *p); // Get unsigned long from 4 bytes LSByte first
+static void s_vPutUINT32(unsigned char *p, unsigned long val); // Put unsigned long into 4 bytes LSByte first
*/
static void s_vClear(void); // Clear the internal message,
// resets the object to the state just after construction.
-static void s_vSetKey(DWORD dwK0, DWORD dwK1);
-static void s_vAppendByte(BYTE b); // Add a single byte to the internal message
+static void s_vSetKey(unsigned long dwK0, unsigned long dwK1);
+static void s_vAppendByte(unsigned char b); // Add a single byte to the internal message
/*--------------------- Export Variables --------------------------*/
-static DWORD L, R; // Current state
+static unsigned long L, R; // Current state
-static DWORD K0, K1; // Key
-static DWORD M; // Message accumulator (single word)
-static UINT nBytesInM; // # bytes in M
+static unsigned long K0, K1; // Key
+static unsigned long M; // Message accumulator (single word)
+static unsigned int nBytesInM; // # bytes in M
/*--------------------- Export Functions --------------------------*/
/*
-static DWORD s_dwGetUINT32 (BYTE * p)
-// Convert from BYTE[] to DWORD in a portable way
+static unsigned long s_dwGetUINT32 (unsigned char *p)
+// Convert from unsigned char [] to unsigned long in a portable way
{
- DWORD res = 0;
- UINT i;
+ unsigned long res = 0;
+ unsigned int i;
for(i=0; i<4; i++ )
{
res |= (*p++) << (8*i);
@@ -78,13 +78,13 @@ static DWORD s_dwGetUINT32 (BYTE * p)
return res;
}
-static void s_vPutUINT32 (BYTE* p, DWORD val)
-// Convert from DWORD to BYTE[] in a portable way
+static void s_vPutUINT32 (unsigned char *p, unsigned long val)
+// Convert from unsigned long to unsigned char [] in a portable way
{
- UINT i;
+ unsigned int i;
for(i=0; i<4; i++ )
{
- *p++ = (BYTE) (val & 0xff);
+ *p++ = (unsigned char) (val & 0xff);
val >>= 8;
}
}
@@ -99,7 +99,7 @@ static void s_vClear (void)
M = 0;
}
-static void s_vSetKey (DWORD dwK0, DWORD dwK1)
+static void s_vSetKey (unsigned long dwK0, unsigned long dwK1)
{
// Set the key
K0 = dwK0;
@@ -108,7 +108,7 @@ static void s_vSetKey (DWORD dwK0, DWORD dwK1)
s_vClear();
}
-static void s_vAppendByte (BYTE b)
+static void s_vAppendByte (unsigned char b)
{
// Append the byte to our word-sized buffer
M |= b << (8*nBytesInM);
@@ -131,7 +131,7 @@ static void s_vAppendByte (BYTE b)
}
}
-void MIC_vInit (DWORD dwK0, DWORD dwK1)
+void MIC_vInit (unsigned long dwK0, unsigned long dwK1)
{
// Set the key
s_vSetKey(dwK0, dwK1);
@@ -149,7 +149,7 @@ void MIC_vUnInit (void)
s_vClear();
}
-void MIC_vAppend (PBYTE src, UINT nBytes)
+void MIC_vAppend (unsigned char *src, unsigned int nBytes)
{
// This is simple
while (nBytes > 0)
@@ -159,7 +159,7 @@ void MIC_vAppend (PBYTE src, UINT nBytes)
}
}
-void MIC_vGetMIC (PDWORD pdwL, PDWORD pdwR)
+void MIC_vGetMIC (unsigned long *pdwL, unsigned long *pdwR)
{
// Append the minimum padding
s_vAppendByte(0x5a);
diff --git a/drivers/staging/vt6655/michael.h b/drivers/staging/vt6655/michael.h
index 97de77b4da2..3131b161d6a 100644
--- a/drivers/staging/vt6655/michael.h
+++ b/drivers/staging/vt6655/michael.h
@@ -35,16 +35,16 @@
/*--------------------- Export Types ------------------------------*/
-void MIC_vInit(DWORD dwK0, DWORD dwK1);
+void MIC_vInit(unsigned long dwK0, unsigned long dwK1);
void MIC_vUnInit(void);
// Append bytes to the message to be MICed
-void MIC_vAppend(PBYTE src, UINT nBytes);
+void MIC_vAppend(unsigned char *src, unsigned int nBytes);
// Get the MIC result. Destination should accept 8 bytes of result.
// This also resets the message to empty.
-void MIC_vGetMIC(PDWORD pdwL, PDWORD pdwR);
+void MIC_vGetMIC(unsigned long *pdwL, unsigned long *pdwR);
/*--------------------- Export Macros ------------------------------*/
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 64c22c3e4bb..7207aca1301 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -77,12 +77,12 @@ static int msglevel =MSG_LEVEL_INFO;
void
PSvEnablePowerSaving(
void *hDeviceContext,
- WORD wListenInterval
+ unsigned short wListenInterval
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
- WORD wAID = pMgmt->wCurrAID | BIT14 | BIT15;
+ unsigned short wAID = pMgmt->wCurrAID | BIT14 | BIT15;
// set period of power up before TBTT
VNSvOutPortW(pDevice->PortOffset + MAC_REG_PWBT, C_PWBT);
@@ -115,7 +115,7 @@ PSvEnablePowerSaving(
// enable power saving hw function
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
- pDevice->bEnablePSMode = TRUE;
+ pDevice->bEnablePSMode = true;
if (pDevice->eOPMode == OP_MODE_ADHOC) {
// bMgrPrepareBeaconToSend((void *)pDevice, pMgmt);
@@ -124,7 +124,7 @@ PSvEnablePowerSaving(
else if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) {
PSbSendNullPacket(pDevice);
}
- pDevice->bPWBitOn = TRUE;
+ pDevice->bPWBitOn = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "PS:Power Saving Mode Enable... \n");
return;
}
@@ -161,12 +161,12 @@ PSvDisablePowerSaving(
// set always listen beacon
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
- pDevice->bEnablePSMode = FALSE;
+ pDevice->bEnablePSMode = false;
if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) {
PSbSendNullPacket(pDevice);
}
- pDevice->bPWBitOn = FALSE;
+ pDevice->bPWBitOn = false;
return;
}
@@ -177,35 +177,35 @@ PSvDisablePowerSaving(
* Consider to power down when no more packets to tx or rx.
*
* Return Value:
- * TRUE, if power down success
- * FALSE, if fail
+ * true, if power down success
+ * false, if fail
-*/
-BOOL
+bool
PSbConsiderPowerDown(
void *hDeviceContext,
- BOOL bCheckRxDMA,
- BOOL bCheckCountToWakeUp
+ bool bCheckRxDMA,
+ bool bCheckCountToWakeUp
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
- UINT uIdx;
+ unsigned int uIdx;
// check if already in Doze mode
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS))
- return TRUE;
+ return true;
if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
// check if in TIM wake period
if (pMgmt->bInTIMWake)
- return FALSE;
+ return false;
}
// check scan state
if (pDevice->bCmdRunning)
- return FALSE;
+ return false;
// Froce PSEN on
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
@@ -213,27 +213,27 @@ PSbConsiderPowerDown(
// check if all TD are empty,
for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx ++) {
if (pDevice->iTDUsed[uIdx] != 0)
- return FALSE;
+ return false;
}
// check if rx isr is clear
if (bCheckRxDMA &&
((pDevice->dwIsr& ISR_RXDMA0) != 0) &&
((pDevice->dwIsr & ISR_RXDMA1) != 0)){
- return FALSE;
+ return false;
};
if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
if (bCheckCountToWakeUp &&
(pMgmt->wCountToWakeUp == 0 || pMgmt->wCountToWakeUp == 1)) {
- return FALSE;
+ return false;
}
}
// no Tx, no Rx isr, now go to Doze
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_GO2DOZE);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Go to Doze ZZZZZZZZZZZZZZZ\n");
- return TRUE;
+ return true;
}
@@ -262,7 +262,7 @@ PSvSendPSPOLL(
memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_HDR_ADDR2_LEN);
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool;
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
pTxPacket->p80211Header->sA2.wFrameCtl = cpu_to_le16(
(
WLAN_SET_FC_FTYPE(WLAN_TYPE_CTL) |
@@ -296,7 +296,7 @@ PSvSendPSPOLL(
* None.
*
-*/
-BOOL
+bool
PSbSendNullPacket(
void *hDeviceContext
)
@@ -304,32 +304,32 @@ PSbSendNullPacket(
PSDevice pDevice = (PSDevice)hDeviceContext;
PSTxMgmtPacket pTxPacket = NULL;
PSMgmtObject pMgmt = pDevice->pMgmt;
- UINT uIdx;
+ unsigned int uIdx;
- if (pDevice->bLinkPass == FALSE) {
- return FALSE;
+ if (pDevice->bLinkPass == false) {
+ return false;
}
#ifdef TxInSleep
- if ((pDevice->bEnablePSMode == FALSE) &&
- (pDevice->fTxDataInSleep == FALSE)){
- return FALSE;
+ if ((pDevice->bEnablePSMode == false) &&
+ (pDevice->fTxDataInSleep == false)){
+ return false;
}
#else
- if (pDevice->bEnablePSMode == FALSE) {
- return FALSE;
+ if (pDevice->bEnablePSMode == false) {
+ return false;
}
#endif
if (pDevice->bEnablePSMode) {
for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx ++) {
if (pDevice->iTDUsed[uIdx] != 0)
- return FALSE;
+ return false;
}
}
memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_NULLDATA_FR_MAXLEN);
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool;
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
if (pDevice->bEnablePSMode) {
@@ -350,7 +350,7 @@ PSbSendNullPacket(
}
if(pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
- pTxPacket->p80211Header->sA3.wFrameCtl |= cpu_to_le16((WORD)WLAN_SET_FC_TODS(1));
+ pTxPacket->p80211Header->sA3.wFrameCtl |= cpu_to_le16((unsigned short)WLAN_SET_FC_TODS(1));
}
memcpy(pTxPacket->p80211Header->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
@@ -361,7 +361,7 @@ PSbSendNullPacket(
// send the frame
if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send Null Packet failed !\n");
- return FALSE;
+ return false;
}
else {
@@ -369,7 +369,7 @@ PSbSendNullPacket(
}
- return TRUE ;
+ return true ;
}
/*+
@@ -382,7 +382,7 @@ PSbSendNullPacket(
*
-*/
-BOOL
+bool
PSbIsNextTBTTWakeUp(
void *hDeviceContext
)
@@ -390,7 +390,7 @@ PSbIsNextTBTTWakeUp(
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
- BOOL bWakeUp = FALSE;
+ bool bWakeUp = false;
if (pMgmt->wListenInterval >= 2) {
if (pMgmt->wCountToWakeUp == 0) {
@@ -402,7 +402,7 @@ PSbIsNextTBTTWakeUp(
if (pMgmt->wCountToWakeUp == 1) {
// Turn on wake up to listen next beacon
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
- bWakeUp = TRUE;
+ bWakeUp = true;
}
}
diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h
index c0dbe216e97..01013b59228 100644
--- a/drivers/staging/vt6655/power.h
+++ b/drivers/staging/vt6655/power.h
@@ -48,11 +48,11 @@
// PSDevice pDevice
// PSDevice hDeviceContext
-BOOL
+bool
PSbConsiderPowerDown(
void *hDeviceContext,
- BOOL bCheckRxDMA,
- BOOL bCheckCountToWakeUp
+ bool bCheckRxDMA,
+ bool bCheckCountToWakeUp
);
void
@@ -63,7 +63,7 @@ PSvDisablePowerSaving(
void
PSvEnablePowerSaving(
void *hDeviceContext,
- WORD wListenInterval
+ unsigned short wListenInterval
);
void
@@ -71,12 +71,12 @@ PSvSendPSPOLL(
void *hDeviceContext
);
-BOOL
+bool
PSbSendNullPacket(
void *hDeviceContext
);
-BOOL
+bool
PSbIsNextTBTTWakeUp(
void *hDeviceContext
);
diff --git a/drivers/staging/vt6655/rc4.c b/drivers/staging/vt6655/rc4.c
index 4a53f159cb3..9856c08b3d7 100644
--- a/drivers/staging/vt6655/rc4.c
+++ b/drivers/staging/vt6655/rc4.c
@@ -32,38 +32,38 @@
#include "rc4.h"
-void rc4_init(PRC4Ext pRC4, PBYTE pbyKey, UINT cbKey_len)
+void rc4_init(PRC4Ext pRC4, unsigned char *pbyKey, unsigned int cbKey_len)
{
- UINT ust1, ust2;
- UINT keyindex;
- UINT stateindex;
- PBYTE pbyst;
- UINT idx;
+ unsigned int ust1, ust2;
+ unsigned int keyindex;
+ unsigned int stateindex;
+ unsigned char *pbyst;
+ unsigned int idx;
pbyst = pRC4->abystate;
pRC4->ux = 0;
pRC4->uy = 0;
for (idx = 0; idx < 256; idx++)
- pbyst[idx] = (BYTE)idx;
+ pbyst[idx] = (unsigned char)idx;
keyindex = 0;
stateindex = 0;
for (idx = 0; idx < 256; idx++) {
ust1 = pbyst[idx];
stateindex = (stateindex + pbyKey[keyindex] + ust1) & 0xff;
ust2 = pbyst[stateindex];
- pbyst[stateindex] = (BYTE)ust1;
- pbyst[idx] = (BYTE)ust2;
+ pbyst[stateindex] = (unsigned char)ust1;
+ pbyst[idx] = (unsigned char)ust2;
if (++keyindex >= cbKey_len)
keyindex = 0;
}
}
-UINT rc4_byte(PRC4Ext pRC4)
+unsigned int rc4_byte(PRC4Ext pRC4)
{
- UINT ux;
- UINT uy;
- UINT ustx, usty;
- PBYTE pbyst;
+ unsigned int ux;
+ unsigned int uy;
+ unsigned int ustx, usty;
+ unsigned char *pbyst;
pbyst = pRC4->abystate;
ux = (pRC4->ux + 1) & 0xff;
@@ -72,16 +72,16 @@ UINT rc4_byte(PRC4Ext pRC4)
usty = pbyst[uy];
pRC4->ux = ux;
pRC4->uy = uy;
- pbyst[uy] = (BYTE)ustx;
- pbyst[ux] = (BYTE)usty;
+ pbyst[uy] = (unsigned char)ustx;
+ pbyst[ux] = (unsigned char)usty;
return pbyst[(ustx + usty) & 0xff];
}
-void rc4_encrypt(PRC4Ext pRC4, PBYTE pbyDest,
- PBYTE pbySrc, UINT cbData_len)
+void rc4_encrypt(PRC4Ext pRC4, unsigned char *pbyDest,
+ unsigned char *pbySrc, unsigned int cbData_len)
{
- UINT ii;
+ unsigned int ii;
for (ii = 0; ii < cbData_len; ii++)
- pbyDest[ii] = (BYTE)(pbySrc[ii] ^ rc4_byte(pRC4));
+ pbyDest[ii] = (unsigned char)(pbySrc[ii] ^ rc4_byte(pRC4));
}
diff --git a/drivers/staging/vt6655/rc4.h b/drivers/staging/vt6655/rc4.h
index e65cae69efa..ad04e351365 100644
--- a/drivers/staging/vt6655/rc4.h
+++ b/drivers/staging/vt6655/rc4.h
@@ -35,13 +35,13 @@
/*--------------------- Export Definitions -------------------------*/
/*--------------------- Export Types ------------------------------*/
typedef struct {
- UINT ux;
- UINT uy;
- BYTE abystate[256];
+ unsigned int ux;
+ unsigned int uy;
+ unsigned char abystate[256];
} RC4Ext, *PRC4Ext;
-void rc4_init(PRC4Ext pRC4, PBYTE pbyKey, UINT cbKey_len);
-UINT rc4_byte(PRC4Ext pRC4);
-void rc4_encrypt(PRC4Ext pRC4, PBYTE pbyDest, PBYTE pbySrc, UINT cbData_len);
+void rc4_init(PRC4Ext pRC4, unsigned char *pbyKey, unsigned int cbKey_len);
+unsigned int rc4_byte(PRC4Ext pRC4);
+void rc4_encrypt(PRC4Ext pRC4, unsigned char *pbyDest, unsigned char *pbySrc, unsigned int cbData_len);
#endif //__RC4_H__
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 7cb86fe2eeb..b8ec783e55e 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -94,7 +94,7 @@
-const DWORD dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
+const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
0x01A00200+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
@@ -112,7 +112,7 @@ const DWORD dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
0x00580F00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
};
-const DWORD dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
+const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -129,7 +129,7 @@ const DWORD dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
0x03E7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW // channel = 14, Tf = 2412M
};
-const DWORD dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
+const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -146,7 +146,7 @@ const DWORD dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
0x06666100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW // channel = 14, Tf = 2412M
};
-DWORD dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
+unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
0x04040900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x04041900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x04042900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
@@ -216,7 +216,7 @@ DWORD dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
//{{ RobertYu:20050104
// 40MHz reference frequency
// Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
-const DWORD dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
+const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel1 // Need modify for 11a
0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel1 // Need modify for 11a
0x841FF200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 451FE2
@@ -239,7 +239,7 @@ const DWORD dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
0x1ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // Need modify for 11a: 12BACF
};
-const DWORD dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
+const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel184 // Need modify for 11b/g
0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel184 // Need modify for 11b/g
0x451FE200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g
@@ -259,7 +259,7 @@ const DWORD dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
};
-const DWORD dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
+const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -325,7 +325,7 @@ const DWORD dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56)
};
-const DWORD dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
+const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -389,7 +389,7 @@ const DWORD dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56)
};
-const DWORD dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
+const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -471,15 +471,15 @@ const DWORD dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL s_bAL7230Init (DWORD_PTR dwIoBase)
+bool s_bAL7230Init (unsigned long dwIoBase)
{
int ii;
- BOOL bResult;
+ bool bResult;
- bResult = TRUE;
+ bResult = true;
//3-wire control for normal mode
VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
@@ -517,11 +517,11 @@ BOOL s_bAL7230Init (DWORD_PTR dwIoBase)
}
// Need to Pull PLLON low when writing channel registers through 3-wire interface
-BOOL s_bAL7230SelectChannel (DWORD_PTR dwIoBase, BYTE byChannel)
+bool s_bAL7230SelectChannel (unsigned long dwIoBase, unsigned char byChannel)
{
- BOOL bResult;
+ bool bResult;
- bResult = TRUE;
+ bResult = true;
// PLLON Off
MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
@@ -552,7 +552,7 @@ BOOL s_bAL7230SelectChannel (DWORD_PTR dwIoBase, BYTE byChannel)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
@@ -567,7 +567,7 @@ BOOL s_bAL7230SelectChannel (DWORD_PTR dwIoBase, BYTE byChannel)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
@@ -585,7 +585,7 @@ BOOL s_bAL7230SelectChannel (DWORD_PTR dwIoBase, BYTE byChannel)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
@@ -599,7 +599,7 @@ BOOL s_bAL7230SelectChannel (DWORD_PTR dwIoBase, BYTE byChannel)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
@@ -619,13 +619,13 @@ BOOL s_bAL7230SelectChannel (DWORD_PTR dwIoBase, BYTE byChannel)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL IFRFbWriteEmbeded (DWORD_PTR dwIoBase, DWORD dwData)
+bool IFRFbWriteEmbeded (unsigned long dwIoBase, unsigned long dwData)
{
- WORD ww;
- DWORD dwValue;
+ unsigned short ww;
+ unsigned long dwValue;
VNSvOutPortD(dwIoBase + MAC_REG_IFREGCTL, dwData);
@@ -638,9 +638,9 @@ BOOL IFRFbWriteEmbeded (DWORD_PTR dwIoBase, DWORD dwData)
if (ww == W_MAX_TIMEOUT) {
// DBG_PORT80_ALWAYS(0x32);
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
@@ -654,7 +654,7 @@ BOOL IFRFbWriteEmbeded (DWORD_PTR dwIoBase, DWORD dwData)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
@@ -668,7 +668,7 @@ BOOL IFRFbWriteEmbeded (DWORD_PTR dwIoBase, DWORD dwData)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
@@ -681,15 +681,15 @@ BOOL IFRFbWriteEmbeded (DWORD_PTR dwIoBase, DWORD dwData)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL RFbAL2230Init (DWORD_PTR dwIoBase)
+bool RFbAL2230Init (unsigned long dwIoBase)
{
int ii;
- BOOL bResult;
+ bool bResult;
- bResult = TRUE;
+ bResult = true;
//3-wire control for normal mode
VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
@@ -734,11 +734,11 @@ MACvTimer0MicroSDelay(dwIoBase, 30); //delay 30 us
return bResult;
}
-BOOL RFbAL2230SelectChannel (DWORD_PTR dwIoBase, BYTE byChannel)
+bool RFbAL2230SelectChannel (unsigned long dwIoBase, unsigned char byChannel)
{
- BOOL bResult;
+ bool bResult;
- bResult = TRUE;
+ bResult = true;
bResult &= IFRFbWriteEmbeded (dwIoBase, dwAL2230ChannelTable0[byChannel-1]);
bResult &= IFRFbWriteEmbeded (dwIoBase, dwAL2230ChannelTable1[byChannel-1]);
@@ -761,7 +761,7 @@ BOOL RFbAL2230SelectChannel (DWORD_PTR dwIoBase, BYTE byChannel)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
@@ -776,7 +776,7 @@ BOOL RFbAL2230SelectChannel (DWORD_PTR dwIoBase, BYTE byChannel)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
@@ -790,7 +790,7 @@ BOOL RFbAL2230SelectChannel (DWORD_PTR dwIoBase, BYTE byChannel)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
@@ -804,14 +804,14 @@ BOOL RFbAL2230SelectChannel (DWORD_PTR dwIoBase, BYTE byChannel)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL RFbInit (
+bool RFbInit (
PSDevice pDevice
)
{
-BOOL bResult = TRUE;
+bool bResult = true;
switch (pDevice->byRFType) {
case RF_AIROHA :
case RF_AL2230S:
@@ -823,10 +823,10 @@ BOOL bResult = TRUE;
bResult = s_bAL7230Init(pDevice->PortOffset);
break;
case RF_NOTHING :
- bResult = TRUE;
+ bResult = true;
break;
default :
- bResult = FALSE;
+ bResult = false;
break;
}
return bResult;
@@ -842,21 +842,21 @@ BOOL bResult = TRUE;
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL RFbShutDown (
+bool RFbShutDown (
PSDevice pDevice
)
{
-BOOL bResult = TRUE;
+bool bResult = true;
switch (pDevice->byRFType) {
case RF_AIROHA7230 :
bResult = IFRFbWriteEmbeded (pDevice->PortOffset, 0x1ABAEF00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW);
break;
default :
- bResult = TRUE;
+ bResult = true;
break;
}
return bResult;
@@ -872,12 +872,12 @@ BOOL bResult = TRUE;
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL RFbSelectChannel (DWORD_PTR dwIoBase, BYTE byRFType, BYTE byChannel)
+bool RFbSelectChannel (unsigned long dwIoBase, unsigned char byRFType, unsigned char byChannel)
{
-BOOL bResult = TRUE;
+bool bResult = true;
switch (byRFType) {
case RF_AIROHA :
@@ -890,10 +890,10 @@ BOOL bResult = TRUE;
break;
//}} RobertYu
case RF_NOTHING :
- bResult = TRUE;
+ bResult = true;
break;
default:
- bResult = FALSE;
+ bResult = false;
break;
}
return bResult;
@@ -911,11 +911,11 @@ BOOL bResult = TRUE;
* Return Value: None.
*
*/
-BOOL RFvWriteWakeProgSyn (DWORD_PTR dwIoBase, BYTE byRFType, UINT uChannel)
+bool RFvWriteWakeProgSyn (unsigned long dwIoBase, unsigned char byRFType, unsigned int uChannel)
{
int ii;
- BYTE byInitCount = 0;
- BYTE bySleepCount = 0;
+ unsigned char byInitCount = 0;
+ unsigned char bySleepCount = 0;
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, 0);
switch (byRFType) {
@@ -923,20 +923,20 @@ BOOL RFvWriteWakeProgSyn (DWORD_PTR dwIoBase, BYTE byRFType, UINT uChannel)
case RF_AL2230S:
if (uChannel > CB_MAX_CHANNEL_24G)
- return FALSE;
+ return false;
byInitCount = CB_AL2230_INIT_SEQ + 2; // Init Reg + Channel Reg (2)
bySleepCount = 0;
if (byInitCount > (MISCFIFO_SYNDATASIZE - bySleepCount)) {
- return FALSE;
+ return false;
}
for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++ ) {
- MACvSetMISCFifo(dwIoBase, (WORD)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230InitTable[ii]);
+ MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230InitTable[ii]);
}
- MACvSetMISCFifo(dwIoBase, (WORD)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable0[uChannel-1]);
+ MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable0[uChannel-1]);
ii ++;
- MACvSetMISCFifo(dwIoBase, (WORD)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel-1]);
+ MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel-1]);
break;
//{{ RobertYu: 20050104
@@ -945,42 +945,42 @@ BOOL RFvWriteWakeProgSyn (DWORD_PTR dwIoBase, BYTE byRFType, UINT uChannel)
byInitCount = CB_AL7230_INIT_SEQ + 3; // Init Reg + Channel Reg (3)
bySleepCount = 0;
if (byInitCount > (MISCFIFO_SYNDATASIZE - bySleepCount)) {
- return FALSE;
+ return false;
}
if (uChannel <= CB_MAX_CHANNEL_24G)
{
for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++ ) {
- MACvSetMISCFifo(dwIoBase, (WORD)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTable[ii]);
+ MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTable[ii]);
}
}
else
{
for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++ ) {
- MACvSetMISCFifo(dwIoBase, (WORD)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTableAMode[ii]);
+ MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTableAMode[ii]);
}
}
- MACvSetMISCFifo(dwIoBase, (WORD)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable0[uChannel-1]);
+ MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable0[uChannel-1]);
ii ++;
- MACvSetMISCFifo(dwIoBase, (WORD)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable1[uChannel-1]);
+ MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable1[uChannel-1]);
ii ++;
- MACvSetMISCFifo(dwIoBase, (WORD)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel-1]);
+ MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel-1]);
break;
//}} RobertYu
case RF_NOTHING :
- return TRUE;
+ return true;
break;
default:
- return FALSE;
+ return false;
break;
}
- MACvSetMISCFifo(dwIoBase, MISCFIFO_SYNINFO_IDX, (DWORD)MAKEWORD(bySleepCount, byInitCount));
+ MACvSetMISCFifo(dwIoBase, MISCFIFO_SYNINFO_IDX, (unsigned long )MAKEWORD(bySleepCount, byInitCount));
- return TRUE;
+ return true;
}
/*
@@ -993,25 +993,25 @@ BOOL RFvWriteWakeProgSyn (DWORD_PTR dwIoBase, BYTE byRFType, UINT uChannel)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL RFbSetPower (
+bool RFbSetPower (
PSDevice pDevice,
- UINT uRATE,
- UINT uCH
+ unsigned int uRATE,
+ unsigned int uCH
)
{
-BOOL bResult = TRUE;
-BYTE byPwr = 0;
-BYTE byDec = 0;
-BYTE byPwrdBm = 0;
+bool bResult = true;
+unsigned char byPwr = 0;
+unsigned char byDec = 0;
+unsigned char byPwrdBm = 0;
if (pDevice->dwDiagRefCount != 0) {
- return TRUE;
+ return true;
}
if ((uCH < 1) || (uCH > CB_MAX_CHANNEL)) {
- return FALSE;
+ return false;
}
switch (uRATE) {
@@ -1070,7 +1070,7 @@ BYTE byPwrdBm = 0;
#if 0
// 802.11h TPC
- if (pDevice->bLinkPass == TRUE) {
+ if (pDevice->bLinkPass == true) {
// do not over local constraint
if (byPwrdBm > pDevice->abyLocalPwr[uCH]) {
pDevice->byCurPwrdBm = pDevice->abyLocalPwr[uCH];
@@ -1111,11 +1111,11 @@ BYTE byPwrdBm = 0;
// if (pDevice->byLocalID <= REV_ID_VT3253_B1) {
if (pDevice->byCurPwr == byPwr) {
- return TRUE;
+ return true;
}
bResult = RFbRawSetPower(pDevice, byPwr, uRATE);
// }
- if (bResult == TRUE) {
+ if (bResult == true) {
pDevice->byCurPwr = byPwr;
}
return bResult;
@@ -1131,21 +1131,21 @@ BYTE byPwrdBm = 0;
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL RFbRawSetPower (
+bool RFbRawSetPower (
PSDevice pDevice,
- BYTE byPwr,
- UINT uRATE
+ unsigned char byPwr,
+ unsigned int uRATE
)
{
-BOOL bResult = TRUE;
-DWORD dwMax7230Pwr = 0;
+bool bResult = true;
+unsigned long dwMax7230Pwr = 0;
if (byPwr >= pDevice->byMaxPwrLevel) {
- return (FALSE);
+ return (false);
}
switch (pDevice->byRFType) {
@@ -1204,14 +1204,14 @@ DWORD dwMax7230Pwr = 0;
void
RFvRSSITodBm (
PSDevice pDevice,
- BYTE byCurrRSSI,
+ unsigned char byCurrRSSI,
long * pldBm
)
{
- BYTE byIdx = (((byCurrRSSI & 0xC0) >> 6) & 0x03);
- LONG b = (byCurrRSSI & 0x3F);
- LONG a = 0;
- BYTE abyAIROHARF[4] = {0, 18, 0, 40};
+ unsigned char byIdx = (((byCurrRSSI & 0xC0) >> 6) & 0x03);
+ long b = (byCurrRSSI & 0x3F);
+ long a = 0;
+ unsigned char abyAIROHARF[4] = {0, 18, 0, 40};
switch (pDevice->byRFType) {
case RF_AIROHA:
@@ -1232,11 +1232,11 @@ RFvRSSITodBm (
// Post processing for the 11b/g and 11a.
// for save time on changing Reg2,3,5,7,10,12,15
-BOOL RFbAL7230SelectChannelPostProcess (DWORD_PTR dwIoBase, BYTE byOldChannel, BYTE byNewChannel)
+bool RFbAL7230SelectChannelPostProcess (unsigned long dwIoBase, unsigned char byOldChannel, unsigned char byNewChannel)
{
- BOOL bResult;
+ bool bResult;
- bResult = TRUE;
+ bResult = true;
// if change between 11 b/g and 11a need to update the following register
// Channel Index 1~14
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index 25dfc7942f6..1f8d82e1304 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -76,28 +76,28 @@
/*--------------------- Export Functions --------------------------*/
-BOOL IFRFbWriteEmbeded(DWORD_PTR dwIoBase, DWORD dwData);
-BOOL RFbSelectChannel(DWORD_PTR dwIoBase, BYTE byRFType, BYTE byChannel);
-BOOL RFbInit (
+bool IFRFbWriteEmbeded(unsigned long dwIoBase, unsigned long dwData);
+bool RFbSelectChannel(unsigned long dwIoBase, unsigned char byRFType, unsigned char byChannel);
+bool RFbInit (
PSDevice pDevice
);
-BOOL RFvWriteWakeProgSyn(DWORD_PTR dwIoBase, BYTE byRFType, UINT uChannel);
-BOOL RFbSetPower(PSDevice pDevice, UINT uRATE, UINT uCH);
-BOOL RFbRawSetPower(
+bool RFvWriteWakeProgSyn(unsigned long dwIoBase, unsigned char byRFType, unsigned int uChannel);
+bool RFbSetPower(PSDevice pDevice, unsigned int uRATE, unsigned int uCH);
+bool RFbRawSetPower(
PSDevice pDevice,
- BYTE byPwr,
- UINT uRATE
+ unsigned char byPwr,
+ unsigned int uRATE
);
void
RFvRSSITodBm(
PSDevice pDevice,
- BYTE byCurrRSSI,
+ unsigned char byCurrRSSI,
long *pldBm
);
//{{ RobertYu: 20050104
-BOOL RFbAL7230SelectChannelPostProcess(DWORD_PTR dwIoBase, BYTE byOldChannel, BYTE byNewChannel);
+bool RFbAL7230SelectChannelPostProcess(unsigned long dwIoBase, unsigned char byOldChannel, unsigned char byNewChannel);
//}} RobertYu
#endif // __RF_H__
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index a0445c3427e..c920cf69405 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -80,16 +80,16 @@ static int msglevel =MSG_LEVEL_INFO;
#define CRITICAL_PACKET_LEN 256 // if packet size < 256 -> in-direct send
// packet size >= 256 -> direct send
-const WORD wTimeStampOff[2][MAX_RATE] = {
+const unsigned short wTimeStampOff[2][MAX_RATE] = {
{384, 288, 226, 209, 54, 43, 37, 31, 28, 25, 24, 23}, // Long Preamble
{384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23}, // Short Preamble
};
-const WORD wFB_Opt0[2][5] = {
+const unsigned short wFB_Opt0[2][5] = {
{RATE_12M, RATE_18M, RATE_24M, RATE_36M, RATE_48M}, // fallback_rate0
{RATE_12M, RATE_12M, RATE_18M, RATE_24M, RATE_36M}, // fallback_rate1
};
-const WORD wFB_Opt1[2][5] = {
+const unsigned short wFB_Opt1[2][5] = {
{RATE_12M, RATE_18M, RATE_24M, RATE_24M, RATE_36M}, // fallback_rate0
{RATE_6M , RATE_6M, RATE_12M, RATE_12M, RATE_18M}, // fallback_rate1
};
@@ -118,12 +118,12 @@ static
void
s_vFillTxKey(
PSDevice pDevice,
- PBYTE pbyBuf,
- PBYTE pbyIVHead,
+ unsigned char *pbyBuf,
+ unsigned char *pbyIVHead,
PSKeyItem pTransmitKey,
- PBYTE pbyHdrBuf,
- WORD wPayloadLen,
- PBYTE pMICHDR
+ unsigned char *pbyHdrBuf,
+ unsigned short wPayloadLen,
+ unsigned char *pMICHDR
);
@@ -132,76 +132,65 @@ static
void
s_vFillRTSHead(
PSDevice pDevice,
- BYTE byPktType,
+ unsigned char byPktType,
void * pvRTS,
- UINT cbFrameLength,
- BOOL bNeedAck,
- BOOL bDisCRC,
+ unsigned int cbFrameLength,
+ bool bNeedAck,
+ bool bDisCRC,
PSEthernetHeader psEthHeader,
- WORD wCurrentRate,
- BYTE byFBOption
+ unsigned short wCurrentRate,
+ unsigned char byFBOption
);
static
void
s_vGenerateTxParameter(
PSDevice pDevice,
- BYTE byPktType,
+ unsigned char byPktType,
void * pTxBufHead,
void * pvRrvTime,
void * pvRTS,
void * pvCTS,
- UINT cbFrameSize,
- BOOL bNeedACK,
- UINT uDMAIdx,
+ unsigned int cbFrameSize,
+ bool bNeedACK,
+ unsigned int uDMAIdx,
PSEthernetHeader psEthHeader,
- WORD wCurrentRate
+ unsigned short wCurrentRate
);
static void s_vFillFragParameter(
PSDevice pDevice,
- PBYTE pbyBuffer,
- UINT uTxType,
+ unsigned char *pbyBuffer,
+ unsigned int uTxType,
void * pvtdCurr,
- WORD wFragType,
- UINT cbReqCount
+ unsigned short wFragType,
+ unsigned int cbReqCount
);
-static
-UINT
-s_cbFillTxBufHead (
- PSDevice pDevice,
- BYTE byPktType,
- PBYTE pbyTxBufferAddr,
- UINT cbFrameBodySize,
- UINT uDMAIdx,
- PSTxDesc pHeadTD,
- PSEthernetHeader psEthHeader,
- PBYTE pPacket,
- BOOL bNeedEncrypt,
- PSKeyItem pTransmitKey,
- UINT uNodeIndex,
- PUINT puMACfragNum
- );
+static unsigned int
+s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyTxBufferAddr,
+ unsigned int cbFrameBodySize, unsigned int uDMAIdx, PSTxDesc pHeadTD,
+ PSEthernetHeader psEthHeader, unsigned char *pPacket, bool bNeedEncrypt,
+ PSKeyItem pTransmitKey, unsigned int uNodeIndex, unsigned int *puMACfragNum);
static
-UINT
+unsigned int
s_uFillDataHead (
PSDevice pDevice,
- BYTE byPktType,
+ unsigned char byPktType,
void * pTxDataHead,
- UINT cbFrameLength,
- UINT uDMAIdx,
- BOOL bNeedAck,
- UINT uFragIdx,
- UINT cbLastFragmentSize,
- UINT uMACfragNum,
- BYTE byFBOption,
- WORD wCurrentRate
+ unsigned int cbFrameLength,
+ unsigned int uDMAIdx,
+ bool bNeedAck,
+ unsigned int uFragIdx,
+ unsigned int cbLastFragmentSize,
+ unsigned int uMACfragNum,
+ unsigned char byFBOption,
+ unsigned short wCurrentRate
);
@@ -213,20 +202,20 @@ static
void
s_vFillTxKey (
PSDevice pDevice,
- PBYTE pbyBuf,
- PBYTE pbyIVHead,
+ unsigned char *pbyBuf,
+ unsigned char *pbyIVHead,
PSKeyItem pTransmitKey,
- PBYTE pbyHdrBuf,
- WORD wPayloadLen,
- PBYTE pMICHDR
+ unsigned char *pbyHdrBuf,
+ unsigned short wPayloadLen,
+ unsigned char *pMICHDR
)
{
- PDWORD pdwIV = (PDWORD) pbyIVHead;
- PDWORD pdwExtIV = (PDWORD) ((PBYTE)pbyIVHead+4);
- WORD wValue;
+ unsigned long *pdwIV = (unsigned long *) pbyIVHead;
+ unsigned long *pdwExtIV = (unsigned long *) ((unsigned char *)pbyIVHead+4);
+ unsigned short wValue;
PS802_11Header pMACHeader = (PS802_11Header)pbyHdrBuf;
- DWORD dwRevIVCounter;
- BYTE byKeyIndex = 0;
+ unsigned long dwRevIVCounter;
+ unsigned char byKeyIndex = 0;
@@ -240,13 +229,13 @@ s_vFillTxKey (
if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN ){
- memcpy(pDevice->abyPRNG, (PBYTE)&(dwRevIVCounter), 3);
+ memcpy(pDevice->abyPRNG, (unsigned char *)&(dwRevIVCounter), 3);
memcpy(pDevice->abyPRNG+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
} else {
- memcpy(pbyBuf, (PBYTE)&(dwRevIVCounter), 3);
+ memcpy(pbyBuf, (unsigned char *)&(dwRevIVCounter), 3);
memcpy(pbyBuf+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
if(pTransmitKey->uKeyLength == WLAN_WEP40_KEYLEN) {
- memcpy(pbyBuf+8, (PBYTE)&(dwRevIVCounter), 3);
+ memcpy(pbyBuf+8, (unsigned char *)&(dwRevIVCounter), 3);
memcpy(pbyBuf+11, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
}
memcpy(pDevice->abyPRNG, pbyBuf, 16);
@@ -270,7 +259,7 @@ s_vFillTxKey (
// Make IV
memcpy(pdwIV, pDevice->abyPRNG, 3);
- *(pbyIVHead+3) = (BYTE)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
+ *(pbyIVHead+3) = (unsigned char)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
// Append IV&ExtIV after Mac Header
*pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV);
@@ -284,33 +273,33 @@ s_vFillTxKey (
// Make IV
*pdwIV = 0;
- *(pbyIVHead+3) = (BYTE)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
- *pdwIV |= cpu_to_le16((WORD)(pTransmitKey->wTSC15_0));
+ *(pbyIVHead+3) = (unsigned char)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
+ *pdwIV |= cpu_to_le16((unsigned short)(pTransmitKey->wTSC15_0));
//Append IV&ExtIV after Mac Header
*pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
//Fill MICHDR0
*pMICHDR = 0x59;
- *((PBYTE)(pMICHDR+1)) = 0; // TxPriority
+ *((unsigned char *)(pMICHDR+1)) = 0; // TxPriority
memcpy(pMICHDR+2, &(pMACHeader->abyAddr2[0]), 6);
- *((PBYTE)(pMICHDR+8)) = HIBYTE(HIWORD(pTransmitKey->dwTSC47_16));
- *((PBYTE)(pMICHDR+9)) = LOBYTE(HIWORD(pTransmitKey->dwTSC47_16));
- *((PBYTE)(pMICHDR+10)) = HIBYTE(LOWORD(pTransmitKey->dwTSC47_16));
- *((PBYTE)(pMICHDR+11)) = LOBYTE(LOWORD(pTransmitKey->dwTSC47_16));
- *((PBYTE)(pMICHDR+12)) = HIBYTE(pTransmitKey->wTSC15_0);
- *((PBYTE)(pMICHDR+13)) = LOBYTE(pTransmitKey->wTSC15_0);
- *((PBYTE)(pMICHDR+14)) = HIBYTE(wPayloadLen);
- *((PBYTE)(pMICHDR+15)) = LOBYTE(wPayloadLen);
+ *((unsigned char *)(pMICHDR+8)) = HIBYTE(HIWORD(pTransmitKey->dwTSC47_16));
+ *((unsigned char *)(pMICHDR+9)) = LOBYTE(HIWORD(pTransmitKey->dwTSC47_16));
+ *((unsigned char *)(pMICHDR+10)) = HIBYTE(LOWORD(pTransmitKey->dwTSC47_16));
+ *((unsigned char *)(pMICHDR+11)) = LOBYTE(LOWORD(pTransmitKey->dwTSC47_16));
+ *((unsigned char *)(pMICHDR+12)) = HIBYTE(pTransmitKey->wTSC15_0);
+ *((unsigned char *)(pMICHDR+13)) = LOBYTE(pTransmitKey->wTSC15_0);
+ *((unsigned char *)(pMICHDR+14)) = HIBYTE(wPayloadLen);
+ *((unsigned char *)(pMICHDR+15)) = LOBYTE(wPayloadLen);
//Fill MICHDR1
- *((PBYTE)(pMICHDR+16)) = 0; // HLEN[15:8]
+ *((unsigned char *)(pMICHDR+16)) = 0; // HLEN[15:8]
if (pDevice->bLongHeader) {
- *((PBYTE)(pMICHDR+17)) = 28; // HLEN[7:0]
+ *((unsigned char *)(pMICHDR+17)) = 28; // HLEN[7:0]
} else {
- *((PBYTE)(pMICHDR+17)) = 22; // HLEN[7:0]
+ *((unsigned char *)(pMICHDR+17)) = 22; // HLEN[7:0]
}
wValue = cpu_to_le16(pMACHeader->wFrameCtl & 0xC78F);
- memcpy(pMICHDR+18, (PBYTE)&wValue, 2); // MSKFRACTL
+ memcpy(pMICHDR+18, (unsigned char *)&wValue, 2); // MSKFRACTL
memcpy(pMICHDR+20, &(pMACHeader->abyAddr1[0]), 6);
memcpy(pMICHDR+26, &(pMACHeader->abyAddr2[0]), 6);
@@ -319,7 +308,7 @@ s_vFillTxKey (
wValue = pMACHeader->wSeqCtl;
wValue &= 0x000F;
wValue = cpu_to_le16(wValue);
- memcpy(pMICHDR+38, (PBYTE)&wValue, 2); // MSKSEQCTL
+ memcpy(pMICHDR+38, (unsigned char *)&wValue, 2); // MSKSEQCTL
if (pDevice->bLongHeader) {
memcpy(pMICHDR+40, &(pMACHeader->abyAddr4[0]), 6);
}
@@ -332,13 +321,13 @@ void
s_vSWencryption (
PSDevice pDevice,
PSKeyItem pTransmitKey,
- PBYTE pbyPayloadHead,
- WORD wPayloadSize
+ unsigned char *pbyPayloadHead,
+ unsigned short wPayloadSize
)
{
- UINT cbICVlen = 4;
- DWORD dwICV = 0xFFFFFFFFL;
- PDWORD pdwICV;
+ unsigned int cbICVlen = 4;
+ unsigned long dwICV = 0xFFFFFFFFL;
+ unsigned long *pdwICV;
if (pTransmitKey == NULL)
return;
@@ -347,7 +336,7 @@ s_vSWencryption (
//=======================================================================
// Append ICV after payload
dwICV = CRCdwGetCrc32Ex(pbyPayloadHead, wPayloadSize, dwICV);//ICV(Payload)
- pdwICV = (PDWORD)(pbyPayloadHead + wPayloadSize);
+ pdwICV = (unsigned long *)(pbyPayloadHead + wPayloadSize);
// finally, we must invert dwCRC to get the correct answer
*pdwICV = cpu_to_le32(~dwICV);
// RC4 encryption
@@ -358,7 +347,7 @@ s_vSWencryption (
//=======================================================================
//Append ICV after payload
dwICV = CRCdwGetCrc32Ex(pbyPayloadHead, wPayloadSize, dwICV);//ICV(Payload)
- pdwICV = (PDWORD)(pbyPayloadHead + wPayloadSize);
+ pdwICV = (unsigned long *)(pbyPayloadHead + wPayloadSize);
// finally, we must invert dwCRC to get the correct answer
*pdwICV = cpu_to_le32(~dwICV);
// RC4 encryption
@@ -377,25 +366,25 @@ s_vSWencryption (
PK_TYPE_11GA 3
*/
static
-UINT
+unsigned int
s_uGetTxRsvTime (
PSDevice pDevice,
- BYTE byPktType,
- UINT cbFrameLength,
- WORD wRate,
- BOOL bNeedAck
+ unsigned char byPktType,
+ unsigned int cbFrameLength,
+ unsigned short wRate,
+ bool bNeedAck
)
{
- UINT uDataTime, uAckTime;
+ unsigned int uDataTime, uAckTime;
uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wRate);
#ifdef PLICE_DEBUG
//printk("s_uGetTxRsvTime is %d\n",uDataTime);
#endif
if (byPktType == PK_TYPE_11B) {//llb,CCK mode
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (WORD)pDevice->byTopCCKBasicRate);
+ uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (unsigned short)pDevice->byTopCCKBasicRate);
} else {//11g 2.4G OFDM mode & 11a 5G OFDM mode
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (WORD)pDevice->byTopOFDMBasicRate);
+ uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (unsigned short)pDevice->byTopOFDMBasicRate);
}
if (bNeedAck) {
@@ -408,16 +397,16 @@ s_uGetTxRsvTime (
//byFreqType: 0=>5GHZ 1=>2.4GHZ
static
-UINT
+unsigned int
s_uGetRTSCTSRsvTime (
PSDevice pDevice,
- BYTE byRTSRsvType,
- BYTE byPktType,
- UINT cbFrameLength,
- WORD wCurrentRate
+ unsigned char byRTSRsvType,
+ unsigned char byPktType,
+ unsigned int cbFrameLength,
+ unsigned short wCurrentRate
)
{
- UINT uRrvTime , uRTSTime, uCTSTime, uAckTime, uDataTime;
+ unsigned int uRrvTime , uRTSTime, uCTSTime, uAckTime, uDataTime;
uRrvTime = uRTSTime = uCTSTime = uAckTime = uDataTime = 0;
@@ -450,22 +439,22 @@ s_uGetRTSCTSRsvTime (
//byFreqType 0: 5GHz, 1:2.4Ghz
static
-UINT
+unsigned int
s_uGetDataDuration (
PSDevice pDevice,
- BYTE byDurType,
- UINT cbFrameLength,
- BYTE byPktType,
- WORD wRate,
- BOOL bNeedAck,
- UINT uFragIdx,
- UINT cbLastFragmentSize,
- UINT uMACfragNum,
- BYTE byFBOption
+ unsigned char byDurType,
+ unsigned int cbFrameLength,
+ unsigned char byPktType,
+ unsigned short wRate,
+ bool bNeedAck,
+ unsigned int uFragIdx,
+ unsigned int cbLastFragmentSize,
+ unsigned int uMACfragNum,
+ unsigned char byFBOption
)
{
- BOOL bLastFrag = 0;
- UINT uAckTime =0, uNextPktTime = 0;
+ bool bLastFrag = 0;
+ unsigned int uAckTime =0, uNextPktTime = 0;
@@ -614,25 +603,25 @@ s_uGetDataDuration (
break;
}
- ASSERT(FALSE);
+ ASSERT(false);
return 0;
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
static
-UINT
+unsigned int
s_uGetRTSCTSDuration (
PSDevice pDevice,
- BYTE byDurType,
- UINT cbFrameLength,
- BYTE byPktType,
- WORD wRate,
- BOOL bNeedAck,
- BYTE byFBOption
+ unsigned char byDurType,
+ unsigned int cbFrameLength,
+ unsigned char byPktType,
+ unsigned short wRate,
+ bool bNeedAck,
+ unsigned char byFBOption
)
{
- UINT uCTSTime = 0, uDurTime = 0;
+ unsigned int uCTSTime = 0, uDurTime = 0;
switch (byDurType) {
@@ -719,22 +708,22 @@ s_uGetRTSCTSDuration (
static
-UINT
+unsigned int
s_uFillDataHead (
PSDevice pDevice,
- BYTE byPktType,
+ unsigned char byPktType,
void * pTxDataHead,
- UINT cbFrameLength,
- UINT uDMAIdx,
- BOOL bNeedAck,
- UINT uFragIdx,
- UINT cbLastFragmentSize,
- UINT uMACfragNum,
- BYTE byFBOption,
- WORD wCurrentRate
+ unsigned int cbFrameLength,
+ unsigned int uDMAIdx,
+ bool bNeedAck,
+ unsigned int uFragIdx,
+ unsigned int cbLastFragmentSize,
+ unsigned int uMACfragNum,
+ unsigned char byFBOption,
+ unsigned short wCurrentRate
)
{
- WORD wLen = 0x0000;
+ unsigned short wLen = 0x0000;
if (pTxDataHead == NULL) {
return 0;
@@ -745,19 +734,19 @@ s_uFillDataHead (
PSTxDataHead_g pBuf = (PSTxDataHead_g)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_a), (PBYTE)&(pBuf->bySignalField_a)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_a), (unsigned char *)&(pBuf->bySignalField_a)
);
pBuf->wTransmitLength_a = cpu_to_le16(wLen);
BBvCaculateParameter(pDevice, cbFrameLength, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_b), (PBYTE)&(pBuf->bySignalField_b)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
//Get Duration and TimeStamp
- pBuf->wDuration_a = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength,
+ pBuf->wDuration_a = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength,
byPktType, wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption)); //1: 2.4GHz
- pBuf->wDuration_b = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength,
+ pBuf->wDuration_b = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength,
PK_TYPE_11B, pDevice->byTopCCKBasicRate,
bNeedAck, uFragIdx, cbLastFragmentSize,
uMACfragNum, byFBOption)); //1: 2.4
@@ -771,21 +760,21 @@ s_uFillDataHead (
PSTxDataHead_g_FB pBuf = (PSTxDataHead_g_FB)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_a), (PBYTE)&(pBuf->bySignalField_a)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_a), (unsigned char *)&(pBuf->bySignalField_a)
);
pBuf->wTransmitLength_a = cpu_to_le16(wLen);
BBvCaculateParameter(pDevice, cbFrameLength, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_b), (PBYTE)&(pBuf->bySignalField_b)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
//Get Duration and TimeStamp
- pBuf->wDuration_a = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
+ pBuf->wDuration_a = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz
- pBuf->wDuration_b = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, PK_TYPE_11B,
+ pBuf->wDuration_b = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, PK_TYPE_11B,
pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz
- pBuf->wDuration_a_f0 = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
+ pBuf->wDuration_a_f0 = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz
- pBuf->wDuration_a_f1 = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
+ pBuf->wDuration_a_f1 = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz
pBuf->wTimeStampOff_a = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
@@ -800,16 +789,16 @@ s_uFillDataHead (
PSTxDataHead_a_FB pBuf = (PSTxDataHead_a_FB)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField), (PBYTE)&(pBuf->bySignalField)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration and TimeStampOff
- pBuf->wDuration = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
+ pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //0: 5GHz
- pBuf->wDuration_f0 = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
+ pBuf->wDuration_f0 = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //0: 5GHz
- pBuf->wDuration_f1 = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
+ pBuf->wDuration_f1 = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //0: 5GHz
pBuf->wTimeStampOff = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
return (pBuf->wDuration);
@@ -817,12 +806,12 @@ s_uFillDataHead (
PSTxDataHead_ab pBuf = (PSTxDataHead_ab)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField), (PBYTE)&(pBuf->bySignalField)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration and TimeStampOff
- pBuf->wDuration = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
+ pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption));
@@ -835,11 +824,11 @@ s_uFillDataHead (
PSTxDataHead_ab pBuf = (PSTxDataHead_ab)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField), (PBYTE)&(pBuf->bySignalField)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration and TimeStampOff
- pBuf->wDuration = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, byPktType,
+ pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption));
@@ -854,18 +843,18 @@ static
void
s_vFillRTSHead (
PSDevice pDevice,
- BYTE byPktType,
+ unsigned char byPktType,
void * pvRTS,
- UINT cbFrameLength,
- BOOL bNeedAck,
- BOOL bDisCRC,
+ unsigned int cbFrameLength,
+ bool bNeedAck,
+ bool bDisCRC,
PSEthernetHeader psEthHeader,
- WORD wCurrentRate,
- BYTE byFBOption
+ unsigned short wCurrentRate,
+ unsigned char byFBOption
)
{
- UINT uRTSFrameLen = 20;
- WORD wLen = 0x0000;
+ unsigned int uRTSFrameLen = 20;
+ unsigned short wLen = 0x0000;
if (pvRTS == NULL)
return;
@@ -883,17 +872,17 @@ s_vFillRTSHead (
PSRTS_g pBuf = (PSRTS_g)pvRTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_b), (PBYTE)&(pBuf->bySignalField_b)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_a), (PBYTE)&(pBuf->bySignalField_a)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_a), (unsigned char *)&(pBuf->bySignalField_a)
);
pBuf->wTransmitLength_a = cpu_to_le16(wLen);
//Get Duration
- pBuf->wDuration_bb = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
- pBuf->wDuration_aa = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3: 2.4G OFDMData
- pBuf->wDuration_ba = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
+ pBuf->wDuration_bb = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
+ pBuf->wDuration_aa = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3: 2.4G OFDMData
+ pBuf->wDuration_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->Data.wDurationID = pBuf->wDuration_aa;
//Get RTS Frame body
@@ -916,22 +905,22 @@ s_vFillRTSHead (
PSRTS_g_FB pBuf = (PSRTS_g_FB)pvRTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_b), (PBYTE)&(pBuf->bySignalField_b)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_a), (PBYTE)&(pBuf->bySignalField_a)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_a), (unsigned char *)&(pBuf->bySignalField_a)
);
pBuf->wTransmitLength_a = cpu_to_le16(wLen);
//Get Duration
- pBuf->wDuration_bb = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
- pBuf->wDuration_aa = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3:2.4G OFDMData
- pBuf->wDuration_ba = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDMData
- pBuf->wRTSDuration_ba_f0 = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //4:wRTSDuration_ba_f0, 1:2.4G, 1:CCKData
- pBuf->wRTSDuration_aa_f0 = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:wRTSDuration_aa_f0, 1:2.4G, 1:CCKData
- pBuf->wRTSDuration_ba_f1 = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //6:wRTSDuration_ba_f1, 1:2.4G, 1:CCKData
- pBuf->wRTSDuration_aa_f1 = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:wRTSDuration_aa_f1, 1:2.4G, 1:CCKData
+ pBuf->wDuration_bb = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
+ pBuf->wDuration_aa = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3:2.4G OFDMData
+ pBuf->wDuration_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDMData
+ pBuf->wRTSDuration_ba_f0 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //4:wRTSDuration_ba_f0, 1:2.4G, 1:CCKData
+ pBuf->wRTSDuration_aa_f0 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:wRTSDuration_aa_f0, 1:2.4G, 1:CCKData
+ pBuf->wRTSDuration_ba_f1 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //6:wRTSDuration_ba_f1, 1:2.4G, 1:CCKData
+ pBuf->wRTSDuration_aa_f1 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:wRTSDuration_aa_f1, 1:2.4G, 1:CCKData
pBuf->Data.wDurationID = pBuf->wDuration_aa;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
@@ -958,11 +947,11 @@ s_vFillRTSHead (
PSRTS_ab pBuf = (PSRTS_ab)pvRTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField), (PBYTE)&(pBuf->bySignalField)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration
- pBuf->wDuration = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData
+ pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData
pBuf->Data.wDurationID = pBuf->wDuration;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
@@ -987,13 +976,13 @@ s_vFillRTSHead (
PSRTS_a_FB pBuf = (PSRTS_a_FB)pvRTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField), (PBYTE)&(pBuf->bySignalField)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration
- pBuf->wDuration = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData
- pBuf->wRTSDuration_f0 = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:RTSDuration_aa_f0, 0:5G, 0: 5G OFDMData
- pBuf->wRTSDuration_f1 = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:RTSDuration_aa_f1, 0:5G, 0:
+ pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData
+ pBuf->wRTSDuration_f0 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:RTSDuration_aa_f0, 0:5G, 0: 5G OFDMData
+ pBuf->wRTSDuration_f1 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:RTSDuration_aa_f1, 0:5G, 0:
pBuf->Data.wDurationID = pBuf->wDuration;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
@@ -1017,11 +1006,11 @@ s_vFillRTSHead (
PSRTS_ab pBuf = (PSRTS_ab)pvRTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField), (PBYTE)&(pBuf->bySignalField)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration
- pBuf->wDuration = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
+ pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
pBuf->Data.wDurationID = pBuf->wDuration;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
@@ -1048,18 +1037,18 @@ static
void
s_vFillCTSHead (
PSDevice pDevice,
- UINT uDMAIdx,
- BYTE byPktType,
+ unsigned int uDMAIdx,
+ unsigned char byPktType,
void * pvCTS,
- UINT cbFrameLength,
- BOOL bNeedAck,
- BOOL bDisCRC,
- WORD wCurrentRate,
- BYTE byFBOption
+ unsigned int cbFrameLength,
+ bool bNeedAck,
+ bool bDisCRC,
+ unsigned short wCurrentRate,
+ unsigned char byFBOption
)
{
- UINT uCTSFrameLen = 14;
- WORD wLen = 0x0000;
+ unsigned int uCTSFrameLen = 14;
+ unsigned short wLen = 0x0000;
if (pvCTS == NULL) {
return;
@@ -1077,21 +1066,21 @@ s_vFillCTSHead (
PSCTS_FB pBuf = (PSCTS_FB)pvCTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uCTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_b), (PBYTE)&(pBuf->bySignalField_b)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
- pBuf->wDuration_ba = (WORD)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
+ pBuf->wDuration_ba = (unsigned short)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->wDuration_ba += pDevice->wCTSDuration;
pBuf->wDuration_ba = cpu_to_le16(pBuf->wDuration_ba);
//Get CTSDuration_ba_f0
- pBuf->wCTSDuration_ba_f0 = (WORD)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //8:CTSDuration_ba_f0, 1:2.4G, 2,3:2.4G OFDM Data
+ pBuf->wCTSDuration_ba_f0 = (unsigned short)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //8:CTSDuration_ba_f0, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->wCTSDuration_ba_f0 += pDevice->wCTSDuration;
pBuf->wCTSDuration_ba_f0 = cpu_to_le16(pBuf->wCTSDuration_ba_f0);
//Get CTSDuration_ba_f1
- pBuf->wCTSDuration_ba_f1 = (WORD)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //9:CTSDuration_ba_f1, 1:2.4G, 2,3:2.4G OFDM Data
+ pBuf->wCTSDuration_ba_f1 = (unsigned short)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //9:CTSDuration_ba_f1, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->wCTSDuration_ba_f1 += pDevice->wCTSDuration;
pBuf->wCTSDuration_ba_f1 = cpu_to_le16(pBuf->wCTSDuration_ba_f1);
//Get CTS Frame body
@@ -1104,11 +1093,11 @@ s_vFillCTSHead (
PSCTS pBuf = (PSCTS)pvCTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uCTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_b), (PBYTE)&(pBuf->bySignalField_b)
+ (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
//Get CTSDuration_ba
- pBuf->wDuration_ba = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
+ pBuf->wDuration_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->wDuration_ba += pDevice->wCTSDuration;
pBuf->wDuration_ba = cpu_to_le16(pBuf->wDuration_ba);
@@ -1148,28 +1137,28 @@ s_vFillCTSHead (
* Return Value: none
*
-*/
-// UINT cbFrameSize,//Hdr+Payload+FCS
+// unsigned int cbFrameSize,//Hdr+Payload+FCS
static
void
s_vGenerateTxParameter (
PSDevice pDevice,
- BYTE byPktType,
+ unsigned char byPktType,
void * pTxBufHead,
void * pvRrvTime,
void * pvRTS,
void * pvCTS,
- UINT cbFrameSize,
- BOOL bNeedACK,
- UINT uDMAIdx,
+ unsigned int cbFrameSize,
+ bool bNeedACK,
+ unsigned int uDMAIdx,
PSEthernetHeader psEthHeader,
- WORD wCurrentRate
+ unsigned short wCurrentRate
)
{
- UINT cbMACHdLen = WLAN_HDR_ADDR3_LEN; //24
- WORD wFifoCtl;
- BOOL bDisCRC = FALSE;
- BYTE byFBOption = AUTO_FB_NONE;
-// WORD wCurrentRate = pDevice->wCurrentRate;
+ unsigned int cbMACHdLen = WLAN_HDR_ADDR3_LEN; //24
+ unsigned short wFifoCtl;
+ bool bDisCRC = false;
+ unsigned char byFBOption = AUTO_FB_NONE;
+// unsigned short wCurrentRate = pDevice->wCurrentRate;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vGenerateTxParameter...\n");
PSTxBufHead pFifoHead = (PSTxBufHead)pTxBufHead;
@@ -1177,7 +1166,7 @@ s_vGenerateTxParameter (
wFifoCtl = pFifoHead->wFIFOCtl;
if (wFifoCtl & FIFOCTL_CRCDIS) {
- bDisCRC = TRUE;
+ bDisCRC = true;
}
if (wFifoCtl & FIFOCTL_AUTO_FB_0) {
@@ -1196,11 +1185,11 @@ s_vGenerateTxParameter (
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_gRTS pBuf = (PSRrvTime_gRTS)pvRrvTime;
- pBuf->wRTSTxRrvTime_aa = cpu_to_le16((WORD)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 1:2.4GHz
- pBuf->wRTSTxRrvTime_ba = cpu_to_le16((WORD)s_uGetRTSCTSRsvTime(pDevice, 1, byPktType, cbFrameSize, wCurrentRate));//1:RTSTxRrvTime_ba, 1:2.4GHz
- pBuf->wRTSTxRrvTime_bb = cpu_to_le16((WORD)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz
- pBuf->wTxRrvTime_a = cpu_to_le16((WORD) s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM
- pBuf->wTxRrvTime_b = cpu_to_le16((WORD) s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK
+ pBuf->wRTSTxRrvTime_aa = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 1:2.4GHz
+ pBuf->wRTSTxRrvTime_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 1, byPktType, cbFrameSize, wCurrentRate));//1:RTSTxRrvTime_ba, 1:2.4GHz
+ pBuf->wRTSTxRrvTime_bb = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz
+ pBuf->wTxRrvTime_a = cpu_to_le16((unsigned short) s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM
+ pBuf->wTxRrvTime_b = cpu_to_le16((unsigned short) s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK
}
//Fill RTS
s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
@@ -1210,9 +1199,9 @@ s_vGenerateTxParameter (
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_gCTS pBuf = (PSRrvTime_gCTS)pvRrvTime;
- pBuf->wTxRrvTime_a = cpu_to_le16((WORD)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM
- pBuf->wTxRrvTime_b = cpu_to_le16((WORD)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK
- pBuf->wCTSTxRrvTime_ba = cpu_to_le16((WORD)s_uGetRTSCTSRsvTime(pDevice, 3, byPktType, cbFrameSize, wCurrentRate));//3:CTSTxRrvTime_Ba, 1:2.4GHz
+ pBuf->wTxRrvTime_a = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM
+ pBuf->wTxRrvTime_b = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK
+ pBuf->wCTSTxRrvTime_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 3, byPktType, cbFrameSize, wCurrentRate));//3:CTSTxRrvTime_Ba, 1:2.4GHz
}
@@ -1226,8 +1215,8 @@ s_vGenerateTxParameter (
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
- pBuf->wRTSTxRrvTime = cpu_to_le16((WORD)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 0:5GHz
- pBuf->wTxRrvTime = cpu_to_le16((WORD)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//0:OFDM
+ pBuf->wRTSTxRrvTime = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 0:5GHz
+ pBuf->wTxRrvTime = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//0:OFDM
}
//Fill RTS
s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
@@ -1236,7 +1225,7 @@ s_vGenerateTxParameter (
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
- pBuf->wTxRrvTime = cpu_to_le16((WORD)s_uGetTxRsvTime(pDevice, PK_TYPE_11A, cbFrameSize, wCurrentRate, bNeedACK)); //0:OFDM
+ pBuf->wTxRrvTime = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, PK_TYPE_11A, cbFrameSize, wCurrentRate, bNeedACK)); //0:OFDM
}
}
}
@@ -1246,8 +1235,8 @@ s_vGenerateTxParameter (
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
- pBuf->wRTSTxRrvTime = cpu_to_le16((WORD)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz
- pBuf->wTxRrvTime = cpu_to_le16((WORD)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK));//1:CCK
+ pBuf->wRTSTxRrvTime = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz
+ pBuf->wTxRrvTime = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK));//1:CCK
}
//Fill RTS
s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
@@ -1256,26 +1245,26 @@ s_vGenerateTxParameter (
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
- pBuf->wTxRrvTime = cpu_to_le16((WORD)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK)); //1:CCK
+ pBuf->wTxRrvTime = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK)); //1:CCK
}
}
}
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vGenerateTxParameter END.\n");
}
/*
- PBYTE pbyBuffer,//point to pTxBufHead
- WORD wFragType,//00:Non-Frag, 01:Start, 02:Mid, 03:Last
- UINT cbFragmentSize,//Hdr+payoad+FCS
+ unsigned char *pbyBuffer,//point to pTxBufHead
+ unsigned short wFragType,//00:Non-Frag, 01:Start, 02:Mid, 03:Last
+ unsigned int cbFragmentSize,//Hdr+payoad+FCS
*/
static
void
s_vFillFragParameter(
PSDevice pDevice,
- PBYTE pbyBuffer,
- UINT uTxType,
+ unsigned char *pbyBuffer,
+ unsigned int uTxType,
void * pvtdCurr,
- WORD wFragType,
- UINT cbReqCount
+ unsigned short wFragType,
+ unsigned int cbReqCount
)
{
PSTxBufHead pTxBufHead = (PSTxBufHead) pbyBuffer;
@@ -1289,7 +1278,7 @@ s_vFillFragParameter(
ptdCurr->m_wFIFOCtl = pTxBufHead->wFIFOCtl;
ptdCurr->m_wTimeStamp = pTxBufHead->wTimeStamp;
//Set TSR1 & ReqCount in TxDescHead
- ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((WORD)(cbReqCount));
+ ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
if (wFragType == FRAGCTL_ENDFRAG) { //Last Fragmentation
ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
}
@@ -1301,7 +1290,7 @@ s_vFillFragParameter(
//PSTxDesc ptdCurr = (PSTxDesc)s_pvGetTxDescHead(pDevice, uTxType, uCurIdx);
PSTxDesc ptdCurr = (PSTxDesc)pvtdCurr;
//Set TSR1 & ReqCount in TxDescHead
- ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((WORD)(cbReqCount));
+ ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
if (wFragType == FRAGCTL_ENDFRAG) { //Last Fragmentation
ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
}
@@ -1310,81 +1299,70 @@ s_vFillFragParameter(
}
}
- pTxBufHead->wFragCtl |= (WORD)wFragType;//0x0001; //0000 0000 0000 0001
+ pTxBufHead->wFragCtl |= (unsigned short)wFragType;//0x0001; //0000 0000 0000 0001
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vFillFragParameter END\n");
}
-static
-UINT
-s_cbFillTxBufHead (
- PSDevice pDevice,
- BYTE byPktType,
- PBYTE pbyTxBufferAddr,
- UINT cbFrameBodySize,
- UINT uDMAIdx,
- PSTxDesc pHeadTD,
- PSEthernetHeader psEthHeader,
- PBYTE pPacket,
- BOOL bNeedEncrypt,
- PSKeyItem pTransmitKey,
- UINT uNodeIndex,
- PUINT puMACfragNum
- )
+static unsigned int
+s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyTxBufferAddr,
+ unsigned int cbFrameBodySize, unsigned int uDMAIdx, PSTxDesc pHeadTD,
+ PSEthernetHeader psEthHeader, unsigned char *pPacket, bool bNeedEncrypt,
+ PSKeyItem pTransmitKey, unsigned int uNodeIndex, unsigned int *puMACfragNum)
{
- UINT cbMACHdLen;
- UINT cbFrameSize;
- UINT cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
- UINT cbFragPayloadSize;
- UINT cbLastFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
- UINT cbLastFragPayloadSize;
- UINT uFragIdx;
- PBYTE pbyPayloadHead;
- PBYTE pbyIVHead;
- PBYTE pbyMacHdr;
- WORD wFragType; //00:Non-Frag, 01:Start, 10:Mid, 11:Last
- UINT uDuration;
- PBYTE pbyBuffer;
-// UINT uKeyEntryIdx = NUM_KEY_ENTRY+1;
-// BYTE byKeySel = 0xFF;
- UINT cbIVlen = 0;
- UINT cbICVlen = 0;
- UINT cbMIClen = 0;
- UINT cbFCSlen = 4;
- UINT cb802_1_H_len = 0;
- UINT uLength = 0;
- UINT uTmpLen = 0;
-// BYTE abyTmp[8];
-// DWORD dwCRC;
- UINT cbMICHDR = 0;
- DWORD dwMICKey0, dwMICKey1;
- DWORD dwMIC_Priority;
- PDWORD pdwMIC_L;
- PDWORD pdwMIC_R;
- DWORD dwSafeMIC_L, dwSafeMIC_R; //Fix "Last Frag Size" < "MIC length".
- BOOL bMIC2Frag = FALSE;
- UINT uMICFragLen = 0;
- UINT uMACfragNum = 1;
- UINT uPadding = 0;
- UINT cbReqCount = 0;
-
- BOOL bNeedACK;
- BOOL bRTS;
- BOOL bIsAdhoc;
- PBYTE pbyType;
+ unsigned int cbMACHdLen;
+ unsigned int cbFrameSize;
+ unsigned int cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
+ unsigned int cbFragPayloadSize;
+ unsigned int cbLastFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
+ unsigned int cbLastFragPayloadSize;
+ unsigned int uFragIdx;
+ unsigned char *pbyPayloadHead;
+ unsigned char *pbyIVHead;
+ unsigned char *pbyMacHdr;
+ unsigned short wFragType; //00:Non-Frag, 01:Start, 10:Mid, 11:Last
+ unsigned int uDuration;
+ unsigned char *pbyBuffer;
+// unsigned int uKeyEntryIdx = NUM_KEY_ENTRY+1;
+// unsigned char byKeySel = 0xFF;
+ unsigned int cbIVlen = 0;
+ unsigned int cbICVlen = 0;
+ unsigned int cbMIClen = 0;
+ unsigned int cbFCSlen = 4;
+ unsigned int cb802_1_H_len = 0;
+ unsigned int uLength = 0;
+ unsigned int uTmpLen = 0;
+// unsigned char abyTmp[8];
+// unsigned long dwCRC;
+ unsigned int cbMICHDR = 0;
+ unsigned long dwMICKey0, dwMICKey1;
+ unsigned long dwMIC_Priority;
+ unsigned long *pdwMIC_L;
+ unsigned long *pdwMIC_R;
+ unsigned long dwSafeMIC_L, dwSafeMIC_R; //Fix "Last Frag Size" < "MIC length".
+ bool bMIC2Frag = false;
+ unsigned int uMICFragLen = 0;
+ unsigned int uMACfragNum = 1;
+ unsigned int uPadding = 0;
+ unsigned int cbReqCount = 0;
+
+ bool bNeedACK;
+ bool bRTS;
+ bool bIsAdhoc;
+ unsigned char *pbyType;
PSTxDesc ptdCurr;
PSTxBufHead psTxBufHd = (PSTxBufHead) pbyTxBufferAddr;
-// UINT tmpDescIdx;
- UINT cbHeaderLength = 0;
+// unsigned int tmpDescIdx;
+ unsigned int cbHeaderLength = 0;
void * pvRrvTime;
PSMICHDRHead pMICHDR;
void * pvRTS;
void * pvCTS;
void * pvTxDataHd;
- WORD wTxBufSize; // FFinfo size
- UINT uTotalCopyLength = 0;
- BYTE byFBOption = AUTO_FB_NONE;
- BOOL bIsWEP256 = FALSE;
+ unsigned short wTxBufSize; // FFinfo size
+ unsigned int uTotalCopyLength = 0;
+ unsigned char byFBOption = AUTO_FB_NONE;
+ bool bIsWEP256 = false;
PSMgmtObject pMgmt = pDevice->pMgmt;
@@ -1394,19 +1372,16 @@ s_cbFillTxBufHead (
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
- if (IS_MULTICAST_ADDRESS(&(psEthHeader->abyDstAddr[0])) ||
- IS_BROADCAST_ADDRESS(&(psEthHeader->abyDstAddr[0]))) {
- bNeedACK = FALSE;
- }
- else {
- bNeedACK = TRUE;
- }
- bIsAdhoc = TRUE;
+ if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0])))
+ bNeedACK = false;
+ else
+ bNeedACK = true;
+ bIsAdhoc = true;
}
else {
// MSDUs in Infra mode always need ACK
- bNeedACK = TRUE;
- bIsAdhoc = FALSE;
+ bNeedACK = true;
+ bIsAdhoc = false;
}
if (pDevice->bLongHeader)
@@ -1415,12 +1390,12 @@ s_cbFillTxBufHead (
cbMACHdLen = WLAN_HDR_ADDR3_LEN;
- if ((bNeedEncrypt == TRUE) && (pTransmitKey != NULL)) {
+ if ((bNeedEncrypt == true) && (pTransmitKey != NULL)) {
if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
cbIVlen = 4;
cbICVlen = 4;
if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN) {
- bIsWEP256 = TRUE;
+ bIsWEP256 = true;
}
}
if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
@@ -1443,14 +1418,14 @@ s_cbFillTxBufHead (
cbFrameSize = cbMACHdLen + cbIVlen + (cbFrameBodySize + cbMIClen) + cbICVlen + cbFCSlen;
- if ((bNeedACK == FALSE) ||
+ if ((bNeedACK == false) ||
(cbFrameSize < pDevice->wRTSThreshold) ||
((cbFrameSize >= pDevice->wFragmentationThreshold) && (pDevice->wFragmentationThreshold <= pDevice->wRTSThreshold))
) {
- bRTS = FALSE;
+ bRTS = false;
}
else {
- bRTS = TRUE;
+ bRTS = true;
psTxBufHd->wFIFOCtl |= (FIFOCTL_RTS | FIFOCTL_LRETRY);
}
//
@@ -1469,7 +1444,7 @@ s_cbFillTxBufHead (
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
if (byFBOption == AUTO_FB_NONE) {
- if (bRTS == TRUE) {//RTS_need
+ if (bRTS == true) {//RTS_need
pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS));
pvRTS = (PSRTS_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR);
@@ -1487,7 +1462,7 @@ s_cbFillTxBufHead (
}
} else {
// Auto Fall Back
- if (bRTS == TRUE) {//RTS_need
+ if (bRTS == true) {//RTS_need
pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS));
pvRTS = (PSRTS_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR);
@@ -1508,7 +1483,7 @@ s_cbFillTxBufHead (
else {//802.11a/b packet
if (byFBOption == AUTO_FB_NONE) {
- if (bRTS == TRUE) {
+ if (bRTS == true) {
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = (PSRTS_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
@@ -1526,7 +1501,7 @@ s_cbFillTxBufHead (
}
} else {
// Auto Fall Back
- if (bRTS == TRUE) {//RTS_need
+ if (bRTS == true) {//RTS_need
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = (PSRTS_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
@@ -1547,40 +1522,40 @@ s_cbFillTxBufHead (
memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderLength - wTxBufSize));
//////////////////////////////////////////////////////////////////
- if ((bNeedEncrypt == TRUE) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
+ if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
if (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- dwMICKey0 = *(PDWORD)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(PDWORD)(&pTransmitKey->abyKey[20]);
+ dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]);
+ dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]);
}
else if ((pTransmitKey->dwKeyIndex & AUTHENTICATOR_KEY) != 0) {
- dwMICKey0 = *(PDWORD)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(PDWORD)(&pTransmitKey->abyKey[20]);
+ dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]);
+ dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]);
}
else {
- dwMICKey0 = *(PDWORD)(&pTransmitKey->abyKey[24]);
- dwMICKey1 = *(PDWORD)(&pTransmitKey->abyKey[28]);
+ dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[24]);
+ dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[28]);
}
// DO Software Michael
MIC_vInit(dwMICKey0, dwMICKey1);
- MIC_vAppend((PBYTE)&(psEthHeader->abyDstAddr[0]), 12);
+ MIC_vAppend((unsigned char *)&(psEthHeader->abyDstAddr[0]), 12);
dwMIC_Priority = 0;
- MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
+ MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
}
///////////////////////////////////////////////////////////////////
- pbyMacHdr = (PBYTE)(pbyTxBufferAddr + cbHeaderLength);
- pbyPayloadHead = (PBYTE)(pbyMacHdr + cbMACHdLen + uPadding + cbIVlen);
- pbyIVHead = (PBYTE)(pbyMacHdr + cbMACHdLen + uPadding);
+ pbyMacHdr = (unsigned char *)(pbyTxBufferAddr + cbHeaderLength);
+ pbyPayloadHead = (unsigned char *)(pbyMacHdr + cbMACHdLen + uPadding + cbIVlen);
+ pbyIVHead = (unsigned char *)(pbyMacHdr + cbMACHdLen + uPadding);
- if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == TRUE) && (bIsWEP256 == FALSE)) {
+ if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == true) && (bIsWEP256 == false)) {
// Fragmentation
// FragThreshold = Fragment size(Hdr+(IV)+fragment payload+(MIC)+(ICV)+FCS)
cbFragmentSize = pDevice->wFragmentationThreshold;
cbFragPayloadSize = cbFragmentSize - cbMACHdLen - cbIVlen - cbICVlen - cbFCSlen;
//FragNum = (FrameSize-(Hdr+FCS))/(Fragment Size -(Hrd+FCS)))
- uMACfragNum = (WORD) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize);
+ uMACfragNum = (unsigned short) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize);
cbLastFragPayloadSize = (cbFrameBodySize + cbMIClen) % cbFragPayloadSize;
if (cbLastFragPayloadSize == 0) {
cbLastFragPayloadSize = cbFragPayloadSize;
@@ -1606,13 +1581,13 @@ s_cbFillTxBufHead (
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFragmentSize, uDMAIdx, bNeedACK,
uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
// Generate TX MAC Header
- vGenerateMACHeader(pDevice, pbyMacHdr, (WORD)uDuration, psEthHeader, bNeedEncrypt,
+ vGenerateMACHeader(pDevice, pbyMacHdr, (unsigned short)uDuration, psEthHeader, bNeedEncrypt,
wFragType, uDMAIdx, uFragIdx);
- if (bNeedEncrypt == TRUE) {
+ if (bNeedEncrypt == true) {
//Fill TXKEY
- s_vFillTxKey(pDevice, (PBYTE)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (WORD)cbFragPayloadSize, (PBYTE)pMICHDR);
+ s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
+ pbyMacHdr, (unsigned short)cbFragPayloadSize, (unsigned char *)pMICHDR);
//Fill IV(ExtIV,RSNHDR)
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
@@ -1625,13 +1600,13 @@ s_cbFillTxBufHead (
if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
if ((psEthHeader->wType == TYPE_PKT_IPX) ||
(psEthHeader->wType == cpu_to_le16(0xF380))) {
- memcpy((PBYTE) (pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
+ memcpy((unsigned char *) (pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
}
else {
- memcpy((PBYTE) (pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6);
+ memcpy((unsigned char *) (pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6);
}
- pbyType = (PBYTE) (pbyPayloadHead + 6);
- memcpy(pbyType, &(psEthHeader->wType), sizeof(WORD));
+ pbyType = (unsigned char *) (pbyPayloadHead + 6);
+ memcpy(pbyType, &(psEthHeader->wType), sizeof(unsigned short));
cb802_1_H_len = 8;
}
@@ -1641,15 +1616,15 @@ s_cbFillTxBufHead (
//---------------------------
//Fill MICHDR
//if (pDevice->bAES) {
- // s_vFillMICHDR(pDevice, (PBYTE)pMICHDR, pbyMacHdr, (WORD)cbFragPayloadSize);
+ // s_vFillMICHDR(pDevice, (unsigned char *)pMICHDR, pbyMacHdr, (unsigned short)cbFragPayloadSize);
//}
//cbReqCount += s_uDoEncryption(pDevice, psEthHeader, (void *)psTxBufHd, byKeySel,
- // pbyPayloadHead, (WORD)cbFragPayloadSize, uDMAIdx);
+ // pbyPayloadHead, (unsigned short)cbFragPayloadSize, uDMAIdx);
- //pbyBuffer = (PBYTE)pDevice->aamTxBuf[uDMAIdx][uDescIdx].pbyVAddr;
- pbyBuffer = (PBYTE)pHeadTD->pTDInfo->buf;
+ //pbyBuffer = (unsigned char *)pDevice->aamTxBuf[uDMAIdx][uDescIdx].pbyVAddr;
+ pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len;
//copy TxBufferHeader + MacHeader to desc
@@ -1661,7 +1636,7 @@ s_cbFillTxBufHead (
uTotalCopyLength += cbFragPayloadSize - cb802_1_H_len;
- if ((bNeedEncrypt == TRUE) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
+ if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Start MIC: %d\n", cbFragPayloadSize);
MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFragPayloadSize);
@@ -1672,7 +1647,7 @@ s_cbFillTxBufHead (
//---------------------------
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
if (bNeedEncrypt) {
- s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength - cb802_1_H_len), (WORD)cbFragPayloadSize);
+ s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength - cb802_1_H_len), (unsigned short)cbFragPayloadSize);
cbReqCount += cbICVlen;
}
}
@@ -1711,13 +1686,13 @@ s_cbFillTxBufHead (
uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
// Generate TX MAC Header
- vGenerateMACHeader(pDevice, pbyMacHdr, (WORD)uDuration, psEthHeader, bNeedEncrypt,
+ vGenerateMACHeader(pDevice, pbyMacHdr, (unsigned short)uDuration, psEthHeader, bNeedEncrypt,
wFragType, uDMAIdx, uFragIdx);
- if (bNeedEncrypt == TRUE) {
+ if (bNeedEncrypt == true) {
//Fill TXKEY
- s_vFillTxKey(pDevice, (PBYTE)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (WORD)cbLastFragPayloadSize, (PBYTE)pMICHDR);
+ s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
+ pbyMacHdr, (unsigned short)cbLastFragPayloadSize, (unsigned char *)pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
@@ -1734,8 +1709,8 @@ s_cbFillTxBufHead (
- pbyBuffer = (PBYTE)pHeadTD->pTDInfo->buf;
- //pbyBuffer = (PBYTE)pDevice->aamTxBuf[uDMAIdx][tmpDescIdx].pbyVAddr;
+ pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
+ //pbyBuffer = (unsigned char *)pDevice->aamTxBuf[uDMAIdx][tmpDescIdx].pbyVAddr;
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen;
@@ -1743,7 +1718,7 @@ s_cbFillTxBufHead (
memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
// Copy the Packet into a tx Buffer
- if (bMIC2Frag == FALSE) {
+ if (bMIC2Frag == false) {
memcpy((pbyBuffer + uLength),
(pPacket + 14 + uTotalCopyLength),
@@ -1753,36 +1728,36 @@ s_cbFillTxBufHead (
uTmpLen = cbLastFragPayloadSize - cbMIClen;
}
- if ((bNeedEncrypt == TRUE) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
+ if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"LAST: uMICFragLen:%d, cbLastFragPayloadSize:%d, uTmpLen:%d\n",
uMICFragLen, cbLastFragPayloadSize, uTmpLen);
- if (bMIC2Frag == FALSE) {
+ if (bMIC2Frag == false) {
if (uTmpLen != 0)
MIC_vAppend((pbyBuffer + uLength), uTmpLen);
- pdwMIC_L = (PDWORD)(pbyBuffer + uLength + uTmpLen);
- pdwMIC_R = (PDWORD)(pbyBuffer + uLength + uTmpLen + 4);
+ pdwMIC_L = (unsigned long *)(pbyBuffer + uLength + uTmpLen);
+ pdwMIC_R = (unsigned long *)(pbyBuffer + uLength + uTmpLen + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Last MIC:%lX, %lX\n", *pdwMIC_L, *pdwMIC_R);
} else {
if (uMICFragLen >= 4) {
- memcpy((pbyBuffer + uLength), ((PBYTE)&dwSafeMIC_R + (uMICFragLen - 4)),
+ memcpy((pbyBuffer + uLength), ((unsigned char *)&dwSafeMIC_R + (uMICFragLen - 4)),
(cbMIClen - uMICFragLen));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"LAST: uMICFragLen >= 4: %X, %d\n",
- *(PBYTE)((PBYTE)&dwSafeMIC_R + (uMICFragLen - 4)),
+ *(unsigned char *)((unsigned char *)&dwSafeMIC_R + (uMICFragLen - 4)),
(cbMIClen - uMICFragLen));
} else {
- memcpy((pbyBuffer + uLength), ((PBYTE)&dwSafeMIC_L + uMICFragLen),
+ memcpy((pbyBuffer + uLength), ((unsigned char *)&dwSafeMIC_L + uMICFragLen),
(4 - uMICFragLen));
memcpy((pbyBuffer + uLength + (4 - uMICFragLen)), &dwSafeMIC_R, 4);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"LAST: uMICFragLen < 4: %X, %d\n",
- *(PBYTE)((PBYTE)&dwSafeMIC_R + uMICFragLen - 4),
+ *(unsigned char *)((unsigned char *)&dwSafeMIC_R + uMICFragLen - 4),
(cbMIClen - uMICFragLen));
}
/*
for (ii = 0; ii < cbLastFragPayloadSize + 8 + 24; ii++) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((PBYTE)((pbyBuffer + uLength) + ii - 8 - 24)));
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((unsigned char *)((pbyBuffer + uLength) + ii - 8 - 24)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n\n");
*/
@@ -1798,7 +1773,7 @@ s_cbFillTxBufHead (
//---------------------------
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
if (bNeedEncrypt) {
- s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (WORD)cbLastFragPayloadSize);
+ s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (unsigned short)cbLastFragPayloadSize);
cbReqCount += cbICVlen;
}
}
@@ -1841,14 +1816,14 @@ s_cbFillTxBufHead (
uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
// Generate TX MAC Header
- vGenerateMACHeader(pDevice, pbyMacHdr, (WORD)uDuration, psEthHeader, bNeedEncrypt,
+ vGenerateMACHeader(pDevice, pbyMacHdr, (unsigned short)uDuration, psEthHeader, bNeedEncrypt,
wFragType, uDMAIdx, uFragIdx);
- if (bNeedEncrypt == TRUE) {
+ if (bNeedEncrypt == true) {
//Fill TXKEY
- s_vFillTxKey(pDevice, (PBYTE)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (WORD)cbFragPayloadSize, (PBYTE)pMICHDR);
+ s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
+ pbyMacHdr, (unsigned short)cbFragPayloadSize, (unsigned char *)pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
@@ -1862,14 +1837,14 @@ s_cbFillTxBufHead (
//---------------------------
//Fill MICHDR
//if (pDevice->bAES) {
- // s_vFillMICHDR(pDevice, (PBYTE)pMICHDR, pbyMacHdr, (WORD)cbFragPayloadSize);
+ // s_vFillMICHDR(pDevice, (unsigned char *)pMICHDR, pbyMacHdr, (unsigned short)cbFragPayloadSize);
//}
//cbReqCount += s_uDoEncryption(pDevice, psEthHeader, (void *)psTxBufHd, byKeySel,
- // pbyPayloadHead, (WORD)cbFragPayloadSize, uDMAIdx);
+ // pbyPayloadHead, (unsigned short)cbFragPayloadSize, uDMAIdx);
- pbyBuffer = (PBYTE)pHeadTD->pTDInfo->buf;
- //pbyBuffer = (PBYTE)pDevice->aamTxBuf[uDMAIdx][tmpDescIdx].pbyVAddr;
+ pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
+ //pbyBuffer = (unsigned char *)pDevice->aamTxBuf[uDMAIdx][tmpDescIdx].pbyVAddr;
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen;
@@ -1886,17 +1861,17 @@ s_cbFillTxBufHead (
uTotalCopyLength += uTmpLen;
- if ((bNeedEncrypt == TRUE) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
+ if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
MIC_vAppend((pbyBuffer + uLength), uTmpLen);
if (uTmpLen < cbFragPayloadSize) {
- bMIC2Frag = TRUE;
+ bMIC2Frag = true;
uMICFragLen = cbFragPayloadSize - uTmpLen;
ASSERT(uMICFragLen < cbMIClen);
- pdwMIC_L = (PDWORD)(pbyBuffer + uLength + uTmpLen);
- pdwMIC_R = (PDWORD)(pbyBuffer + uLength + uTmpLen + 4);
+ pdwMIC_L = (unsigned long *)(pbyBuffer + uLength + uTmpLen);
+ pdwMIC_R = (unsigned long *)(pbyBuffer + uLength + uTmpLen + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
dwSafeMIC_L = *pdwMIC_L;
dwSafeMIC_R = *pdwMIC_R;
@@ -1906,7 +1881,7 @@ s_cbFillTxBufHead (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Fill MIC in Middle frag [%d]\n", uMICFragLen);
/*
for (ii = 0; ii < uMICFragLen; ii++) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((PBYTE)((pbyBuffer + uLength + uTmpLen) + ii)));
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((unsigned char *)((pbyBuffer + uLength + uTmpLen) + ii)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
*/
@@ -1915,7 +1890,7 @@ s_cbFillTxBufHead (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Middle frag len: %d\n", uTmpLen);
/*
for (ii = 0; ii < uTmpLen; ii++) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((PBYTE)((pbyBuffer + uLength) + ii)));
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((unsigned char *)((pbyBuffer + uLength) + ii)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n\n");
*/
@@ -1926,7 +1901,7 @@ s_cbFillTxBufHead (
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
if (bNeedEncrypt) {
- s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (WORD)cbFragPayloadSize);
+ s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (unsigned short)cbFragPayloadSize);
cbReqCount += cbICVlen;
}
}
@@ -1961,7 +1936,7 @@ s_cbFillTxBufHead (
wFragType = FRAGCTL_NONFRAG;
//Set FragCtl in TxBufferHead
- psTxBufHd->wFragCtl |= (WORD)wFragType;
+ psTxBufHd->wFragCtl |= (unsigned short)wFragType;
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
@@ -1971,13 +1946,13 @@ s_cbFillTxBufHead (
0, 0, uMACfragNum, byFBOption, pDevice->wCurrentRate);
// Generate TX MAC Header
- vGenerateMACHeader(pDevice, pbyMacHdr, (WORD)uDuration, psEthHeader, bNeedEncrypt,
+ vGenerateMACHeader(pDevice, pbyMacHdr, (unsigned short)uDuration, psEthHeader, bNeedEncrypt,
wFragType, uDMAIdx, 0);
- if (bNeedEncrypt == TRUE) {
+ if (bNeedEncrypt == true) {
//Fill TXKEY
- s_vFillTxKey(pDevice, (PBYTE)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (WORD)cbFrameBodySize, (PBYTE)pMICHDR);
+ s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
+ pbyMacHdr, (unsigned short)cbFrameBodySize, (unsigned char *)pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
@@ -1989,13 +1964,13 @@ s_cbFillTxBufHead (
if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
if ((psEthHeader->wType == TYPE_PKT_IPX) ||
(psEthHeader->wType == cpu_to_le16(0xF380))) {
- memcpy((PBYTE) (pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
+ memcpy((unsigned char *) (pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
}
else {
- memcpy((PBYTE) (pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6);
+ memcpy((unsigned char *) (pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6);
}
- pbyType = (PBYTE) (pbyPayloadHead + 6);
- memcpy(pbyType, &(psEthHeader->wType), sizeof(WORD));
+ pbyType = (unsigned char *) (pbyPayloadHead + 6);
+ memcpy(pbyType, &(psEthHeader->wType), sizeof(unsigned short));
cb802_1_H_len = 8;
}
@@ -2006,11 +1981,11 @@ s_cbFillTxBufHead (
//Fill MICHDR
//if (pDevice->bAES) {
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Fill MICHDR...\n");
- // s_vFillMICHDR(pDevice, (PBYTE)pMICHDR, pbyMacHdr, (WORD)cbFrameBodySize);
+ // s_vFillMICHDR(pDevice, (unsigned char *)pMICHDR, pbyMacHdr, (unsigned short)cbFrameBodySize);
//}
- pbyBuffer = (PBYTE)pHeadTD->pTDInfo->buf;
- //pbyBuffer = (PBYTE)pDevice->aamTxBuf[uDMAIdx][uDescIdx].pbyVAddr;
+ pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
+ //pbyBuffer = (unsigned char *)pDevice->aamTxBuf[uDMAIdx][uDescIdx].pbyVAddr;
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len;
@@ -2023,29 +1998,29 @@ s_cbFillTxBufHead (
cbFrameBodySize - cb802_1_H_len
);
- if ((bNeedEncrypt == TRUE) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)){
+ if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Length:%d, %d\n", cbFrameBodySize - cb802_1_H_len, uLength);
/*
for (ii = 0; ii < (cbFrameBodySize - cb802_1_H_len); ii++) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((PBYTE)((pbyBuffer + uLength) + ii)));
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((unsigned char *)((pbyBuffer + uLength) + ii)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
*/
MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFrameBodySize);
- pdwMIC_L = (PDWORD)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize);
- pdwMIC_R = (PDWORD)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize + 4);
+ pdwMIC_L = (unsigned long *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize);
+ pdwMIC_R = (unsigned long *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
MIC_vUnInit();
- if (pDevice->bTxMICFail == TRUE) {
+ if (pDevice->bTxMICFail == true) {
*pdwMIC_L = 0;
*pdwMIC_R = 0;
- pDevice->bTxMICFail = FALSE;
+ pDevice->bTxMICFail = false;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize);
@@ -2053,7 +2028,7 @@ s_cbFillTxBufHead (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
/*
for (ii = 0; ii < 8; ii++) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *(((PBYTE)(pdwMIC_L) + ii)));
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *(((unsigned char *)(pdwMIC_L) + ii)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
*/
@@ -2064,7 +2039,7 @@ s_cbFillTxBufHead (
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)){
if (bNeedEncrypt) {
s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength - cb802_1_H_len),
- (WORD)(cbFrameBodySize + cbMIClen));
+ (unsigned short)(cbFrameBodySize + cbMIClen));
cbReqCount += cbICVlen;
}
}
@@ -2078,7 +2053,7 @@ s_cbFillTxBufHead (
ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
//Set TSR1 & ReqCount in TxDescHead
ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
- ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((WORD)(cbReqCount));
+ ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
pDevice->iTDUsed[uDMAIdx]++;
@@ -2094,26 +2069,16 @@ s_cbFillTxBufHead (
void
-vGenerateFIFOHeader (
- PSDevice pDevice,
- BYTE byPktType,
- PBYTE pbyTxBufferAddr,
- BOOL bNeedEncrypt,
- UINT cbPayloadSize,
- UINT uDMAIdx,
- PSTxDesc pHeadTD,
- PSEthernetHeader psEthHeader,
- PBYTE pPacket,
- PSKeyItem pTransmitKey,
- UINT uNodeIndex,
- PUINT puMACfragNum,
- PUINT pcbHeaderSize
- )
+vGenerateFIFOHeader(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyTxBufferAddr,
+ bool bNeedEncrypt, unsigned int cbPayloadSize, unsigned int uDMAIdx,
+ PSTxDesc pHeadTD, PSEthernetHeader psEthHeader, unsigned char *pPacket,
+ PSKeyItem pTransmitKey, unsigned int uNodeIndex, unsigned int *puMACfragNum,
+ unsigned int *pcbHeaderSize)
{
- UINT wTxBufSize; // FFinfo size
- BOOL bNeedACK;
- BOOL bIsAdhoc;
- WORD cbMacHdLen;
+ unsigned int wTxBufSize; // FFinfo size
+ bool bNeedACK;
+ bool bIsAdhoc;
+ unsigned short cbMacHdLen;
PSTxBufHead pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxBufHead);
@@ -2123,22 +2088,21 @@ vGenerateFIFOHeader (
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
- if (IS_MULTICAST_ADDRESS(&(psEthHeader->abyDstAddr[0])) ||
- IS_BROADCAST_ADDRESS(&(psEthHeader->abyDstAddr[0]))) {
- bNeedACK = FALSE;
+ if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0]))) {
+ bNeedACK = false;
pTxBufHead->wFIFOCtl = pTxBufHead->wFIFOCtl & (~FIFOCTL_NEEDACK);
}
else {
- bNeedACK = TRUE;
+ bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
}
- bIsAdhoc = TRUE;
+ bIsAdhoc = true;
}
else {
// MSDUs in Infra mode always need ACK
- bNeedACK = TRUE;
+ bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- bIsAdhoc = FALSE;
+ bIsAdhoc = false;
}
@@ -2165,7 +2129,7 @@ vGenerateFIFOHeader (
} else {
cbMacHdLen = WLAN_HDR_ADDR3_LEN;
}
- pTxBufHead->wFragCtl |= cpu_to_le16((WORD)(cbMacHdLen << 10));
+ pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)(cbMacHdLen << 10));
//Set packet type
if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
@@ -2181,7 +2145,7 @@ vGenerateFIFOHeader (
pTxBufHead->wFIFOCtl |= FIFOCTL_11GA;
}
//Set FIFOCTL_GrpAckPolicy
- if (pDevice->bGrpAckPolicy == TRUE) {//0000 0100 0000 0000
+ if (pDevice->bGrpAckPolicy == true) {//0000 0100 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
}
@@ -2195,7 +2159,7 @@ vGenerateFIFOHeader (
}
//Set FRAGCTL_WEPTYP
- pDevice->bAES = FALSE;
+ pDevice->bAES = false;
//Set FRAGCTL_WEPTYP
if (pDevice->byLocalID > REV_ID_VT3253_A1) {
@@ -2267,13 +2231,13 @@ vGenerateFIFOHeader (
void
vGenerateMACHeader (
PSDevice pDevice,
- PBYTE pbyBufferAddr,
- WORD wDuration,
+ unsigned char *pbyBufferAddr,
+ unsigned short wDuration,
PSEthernetHeader psEthHeader,
- BOOL bNeedEncrypt,
- WORD wFragType,
- UINT uDMAIdx,
- UINT uFragIdx
+ bool bNeedEncrypt,
+ unsigned short wFragType,
+ unsigned int uDMAIdx,
+ unsigned int uFragIdx
)
{
PS802_11Header pMACHeader = (PS802_11Header)pbyBufferAddr;
@@ -2307,7 +2271,7 @@ vGenerateMACHeader (
}
if (bNeedEncrypt)
- pMACHeader->wFrameCtl |= cpu_to_le16((WORD)WLAN_SET_FC_ISWEP(1));
+ pMACHeader->wFrameCtl |= cpu_to_le16((unsigned short)WLAN_SET_FC_ISWEP(1));
pMACHeader->wDurationID = cpu_to_le16(wDuration);
@@ -2319,7 +2283,7 @@ vGenerateMACHeader (
pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
//Set FragNumber in Sequence Control
- pMACHeader->wSeqCtl |= cpu_to_le16((WORD)uFragIdx);
+ pMACHeader->wSeqCtl |= cpu_to_le16((unsigned short)uFragIdx);
if ((wFragType == FRAGCTL_ENDFRAG) || (wFragType == FRAGCTL_NONFRAG)) {
pDevice->wSeqCounter++;
@@ -2340,32 +2304,32 @@ vGenerateMACHeader (
CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
PSTxDesc pFrstTD;
- BYTE byPktType;
- PBYTE pbyTxBufferAddr;
+ unsigned char byPktType;
+ unsigned char *pbyTxBufferAddr;
void * pvRTS;
PSCTS pCTS;
void * pvTxDataHd;
- UINT uDuration;
- UINT cbReqCount;
+ unsigned int uDuration;
+ unsigned int cbReqCount;
PS802_11Header pMACHeader;
- UINT cbHeaderSize;
- UINT cbFrameBodySize;
- BOOL bNeedACK;
- BOOL bIsPSPOLL = FALSE;
+ unsigned int cbHeaderSize;
+ unsigned int cbFrameBodySize;
+ bool bNeedACK;
+ bool bIsPSPOLL = false;
PSTxBufHead pTxBufHead;
- UINT cbFrameSize;
- UINT cbIVlen = 0;
- UINT cbICVlen = 0;
- UINT cbMIClen = 0;
- UINT cbFCSlen = 4;
- UINT uPadding = 0;
- WORD wTxBufSize;
- UINT cbMacHdLen;
+ unsigned int cbFrameSize;
+ unsigned int cbIVlen = 0;
+ unsigned int cbICVlen = 0;
+ unsigned int cbMIClen = 0;
+ unsigned int cbFCSlen = 4;
+ unsigned int uPadding = 0;
+ unsigned short wTxBufSize;
+ unsigned int cbMacHdLen;
SEthernetHeader sEthHeader;
void * pvRrvTime;
void * pMICHDR;
PSMgmtObject pMgmt = pDevice->pMgmt;
- WORD wCurrentRate = RATE_1M;
+ unsigned short wCurrentRate = RATE_1M;
if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0) {
@@ -2373,7 +2337,7 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
}
pFrstTD = pDevice->apCurrTD[TYPE_TXDMA0];
- pbyTxBufferAddr = (PBYTE)pFrstTD->pTDInfo->buf;
+ pbyTxBufferAddr = (unsigned char *)pFrstTD->pTDInfo->buf;
cbFrameBodySize = pPacket->cbPayloadLen;
pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxBufHead);
@@ -2424,12 +2388,10 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
- if (IS_MULTICAST_ADDRESS(&(pPacket->p80211Header->sA3.abyAddr1[0])) ||
- IS_BROADCAST_ADDRESS(&(pPacket->p80211Header->sA3.abyAddr1[0]))) {
- bNeedACK = FALSE;
- }
+ if (is_multicast_ether_addr(&(pPacket->p80211Header->sA3.abyAddr1[0])))
+ bNeedACK = false;
else {
- bNeedACK = TRUE;
+ bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
};
@@ -2441,7 +2403,7 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
//pDevice->byPreambleType = PREAMBLE_LONG;
// probe-response don't retry
//if ((pPacket->p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_MGMT_PROBE_RSP) {
- // bNeedACK = FALSE;
+ // bNeedACK = false;
// pTxBufHead->wFIFOCtl &= (~FIFOCTL_NEEDACK);
//}
}
@@ -2449,19 +2411,19 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0);
if ((pPacket->p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) {
- bIsPSPOLL = TRUE;
+ bIsPSPOLL = true;
cbMacHdLen = WLAN_HDR_ADDR2_LEN;
} else {
cbMacHdLen = WLAN_HDR_ADDR3_LEN;
}
//Set FRAGCTL_MACHDCNT
- pTxBufHead->wFragCtl |= cpu_to_le16((WORD)(cbMacHdLen << 10));
+ pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)(cbMacHdLen << 10));
// Notes:
// Although spec says MMPDU can be fragmented; In most case,
// no one will send a MMPDU under fragmentation. With RTS may occur.
- pDevice->bAES = FALSE; //Set FRAGCTL_WEPTYP
+ pDevice->bAES = false; //Set FRAGCTL_WEPTYP
if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) {
if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
@@ -2482,7 +2444,7 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
pTxBufHead->wFragCtl |= FRAGCTL_AES;
- pDevice->bAES = TRUE;
+ pDevice->bAES = true;
}
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMacHdLen%4);
@@ -2492,7 +2454,7 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen;
//Set FIFOCTL_GrpAckPolicy
- if (pDevice->bGrpAckPolicy == TRUE) {//0000 0100 0000 0000
+ if (pDevice->bGrpAckPolicy == true) {//0000 0100 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
}
//the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
@@ -2523,7 +2485,7 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
//=========================
// No Fragmentation
//=========================
- pTxBufHead->wFragCtl |= (WORD)FRAGCTL_NONFRAG;
+ pTxBufHead->wFragCtl |= (unsigned short)FRAGCTL_NONFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
@@ -2539,17 +2501,17 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + cbFrameBodySize;
if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) {
- PBYTE pbyIVHead;
- PBYTE pbyPayloadHead;
- PBYTE pbyBSSID;
+ unsigned char *pbyIVHead;
+ unsigned char *pbyPayloadHead;
+ unsigned char *pbyBSSID;
PSKeyItem pTransmitKey = NULL;
- pbyIVHead = (PBYTE)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding);
- pbyPayloadHead = (PBYTE)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding + cbIVlen);
+ pbyIVHead = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding);
+ pbyPayloadHead = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding + cbIVlen);
//Fill TXKEY
//Kyle: Need fix: TKIP and AES did't encryt Mnt Packet.
- //s_vFillTxKey(pDevice, (PBYTE)pTxBufHead->adwTxKey, NULL);
+ //s_vFillTxKey(pDevice, (unsigned char *)pTxBufHead->adwTxKey, NULL);
//Fill IV(ExtIV,RSNHDR)
//s_vFillPrePayload(pDevice, pbyIVHead, NULL);
@@ -2558,16 +2520,16 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
//---------------------------
//Fill MICHDR
//if (pDevice->bAES) {
- // s_vFillMICHDR(pDevice, (PBYTE)pMICHDR, (PBYTE)pMACHeader, (WORD)cbFrameBodySize);
+ // s_vFillMICHDR(pDevice, (unsigned char *)pMICHDR, (unsigned char *)pMACHeader, (unsigned short)cbFrameBodySize);
//}
do {
if ((pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) &&
- (pDevice->bLinkPass == TRUE)) {
+ (pDevice->bLinkPass == true)) {
pbyBSSID = pDevice->abyBSSID;
// get pairwise key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == FALSE) {
+ if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) {
// get group key
- if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == TRUE) {
+ if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get GTK.\n");
break;
}
@@ -2578,19 +2540,19 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
}
// get group key
pbyBSSID = pDevice->abyBroadcastAddr;
- if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == FALSE) {
+ if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KEY is NULL. OP Mode[%d]\n", pDevice->eOPMode);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get GTK.\n");
}
- } while(FALSE);
+ } while(false);
//Fill TXKEY
- s_vFillTxKey(pDevice, (PBYTE)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
- (PBYTE)pMACHeader, (WORD)cbFrameBodySize, NULL);
+ s_vFillTxKey(pDevice, (unsigned char *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
+ (unsigned char *)pMACHeader, (unsigned short)cbFrameBodySize, NULL);
memcpy(pMACHeader, pPacket->p80211Header, cbMacHdLen);
- memcpy(pbyPayloadHead, ((PBYTE)(pPacket->p80211Header) + cbMacHdLen),
+ memcpy(pbyPayloadHead, ((unsigned char *)(pPacket->p80211Header) + cbMacHdLen),
cbFrameBodySize);
}
else {
@@ -2622,7 +2584,7 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
//Set TSR1 & ReqCount in TxDescHead
pFrstTD->m_td1TD1.byTCR = (TCR_STP | TCR_EDP | EDMSDU);
pFrstTD->pTDInfo->skb_dma = pFrstTD->pTDInfo->buf_dma;
- pFrstTD->m_td1TD1.wReqCount = cpu_to_le16((WORD)(cbReqCount));
+ pFrstTD->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
pFrstTD->buff_addr = cpu_to_le32(pFrstTD->pTDInfo->skb_dma);
pFrstTD->pTDInfo->byFlags = 0;
@@ -2630,7 +2592,7 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
// Disable PS
MACbPSWakeup(pDevice->PortOffset);
}
- pDevice->bPWBitOn = FALSE;
+ pDevice->bPWBitOn = false;
wmb();
pFrstTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
@@ -2661,16 +2623,16 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
CMD_STATUS csBeacon_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
- BYTE byPktType;
- PBYTE pbyBuffer = (PBYTE)pDevice->tx_beacon_bufs;
- UINT cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
- UINT cbHeaderSize = 0;
- WORD wTxBufSize = sizeof(STxShortBufHead);
+ unsigned char byPktType;
+ unsigned char *pbyBuffer = (unsigned char *)pDevice->tx_beacon_bufs;
+ unsigned int cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
+ unsigned int cbHeaderSize = 0;
+ unsigned short wTxBufSize = sizeof(STxShortBufHead);
PSTxShortBufHead pTxBufHead = (PSTxShortBufHead) pbyBuffer;
PSTxDataHead_ab pTxDataHead = (PSTxDataHead_ab) (pbyBuffer + wTxBufSize);
PS802_11Header pMACHeader;
- WORD wCurrentRate;
- WORD wLen = 0x0000;
+ unsigned short wCurrentRate;
+ unsigned short wLen = 0x0000;
memset(pTxBufHead, 0, wTxBufSize);
@@ -2693,17 +2655,17 @@ CMD_STATUS csBeacon_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
//Set packet type & Get Duration
if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
- pTxDataHead->wDuration = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameSize, byPktType,
- wCurrentRate, FALSE, 0, 0, 1, AUTO_FB_NONE));
+ pTxDataHead->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameSize, byPktType,
+ wCurrentRate, false, 0, 0, 1, AUTO_FB_NONE));
}
else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
- pTxDataHead->wDuration = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameSize, byPktType,
- wCurrentRate, FALSE, 0, 0, 1, AUTO_FB_NONE));
+ pTxDataHead->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameSize, byPktType,
+ wCurrentRate, false, 0, 0, 1, AUTO_FB_NONE));
}
BBvCaculateParameter(pDevice, cbFrameSize, wCurrentRate, byPktType,
- (PWORD)&(wLen), (PBYTE)&(pTxDataHead->byServiceField), (PBYTE)&(pTxDataHead->bySignalField)
+ (unsigned short *)&(wLen), (unsigned char *)&(pTxDataHead->byServiceField), (unsigned char *)&(pTxDataHead->bySignalField)
);
pTxDataHead->wTransmitLength = cpu_to_le16(wLen);
//Get TimeStampOff
@@ -2736,41 +2698,38 @@ CMD_STATUS csBeacon_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
-UINT
+unsigned int
cbGetFragCount (
PSDevice pDevice,
PSKeyItem pTransmitKey,
- UINT cbFrameBodySize,
+ unsigned int cbFrameBodySize,
PSEthernetHeader psEthHeader
)
{
- UINT cbMACHdLen;
- UINT cbFrameSize;
- UINT cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
- UINT cbFragPayloadSize;
- UINT cbLastFragPayloadSize;
- UINT cbIVlen = 0;
- UINT cbICVlen = 0;
- UINT cbMIClen = 0;
- UINT cbFCSlen = 4;
- UINT uMACfragNum = 1;
- BOOL bNeedACK;
+ unsigned int cbMACHdLen;
+ unsigned int cbFrameSize;
+ unsigned int cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
+ unsigned int cbFragPayloadSize;
+ unsigned int cbLastFragPayloadSize;
+ unsigned int cbIVlen = 0;
+ unsigned int cbICVlen = 0;
+ unsigned int cbMIClen = 0;
+ unsigned int cbFCSlen = 4;
+ unsigned int uMACfragNum = 1;
+ bool bNeedACK;
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
- if (IS_MULTICAST_ADDRESS(&(psEthHeader->abyDstAddr[0])) ||
- IS_BROADCAST_ADDRESS(&(psEthHeader->abyDstAddr[0]))) {
- bNeedACK = FALSE;
- }
- else {
- bNeedACK = TRUE;
- }
+ if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0])))
+ bNeedACK = false;
+ else
+ bNeedACK = true;
}
else {
// MSDUs in Infra mode always need ACK
- bNeedACK = TRUE;
+ bNeedACK = true;
}
if (pDevice->bLongHeader)
@@ -2779,7 +2738,7 @@ cbGetFragCount (
cbMACHdLen = WLAN_HDR_ADDR3_LEN;
- if (pDevice->bEncryptionEnable == TRUE) {
+ if (pDevice->bEncryptionEnable == true) {
if (pTransmitKey == NULL) {
if ((pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) ||
@@ -2809,11 +2768,11 @@ cbGetFragCount (
cbFrameSize = cbMACHdLen + cbIVlen + (cbFrameBodySize + cbMIClen) + cbICVlen + cbFCSlen;
- if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == TRUE)) {
+ if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == true)) {
// Fragmentation
cbFragmentSize = pDevice->wFragmentationThreshold;
cbFragPayloadSize = cbFragmentSize - cbMACHdLen - cbIVlen - cbICVlen - cbFCSlen;
- uMACfragNum = (WORD) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize);
+ uMACfragNum = (unsigned short) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize);
cbLastFragPayloadSize = (cbFrameBodySize + cbMIClen) % cbFragPayloadSize;
if (cbLastFragPayloadSize == 0) {
cbLastFragPayloadSize = cbFragPayloadSize;
@@ -2826,51 +2785,51 @@ cbGetFragCount (
void
-vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDULen) {
+vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, unsigned char *pbMPDU, unsigned int cbMPDULen) {
PSTxDesc pFrstTD;
- BYTE byPktType;
- PBYTE pbyTxBufferAddr;
+ unsigned char byPktType;
+ unsigned char *pbyTxBufferAddr;
void * pvRTS;
void * pvCTS;
void * pvTxDataHd;
- UINT uDuration;
- UINT cbReqCount;
+ unsigned int uDuration;
+ unsigned int cbReqCount;
PS802_11Header pMACHeader;
- UINT cbHeaderSize;
- UINT cbFrameBodySize;
- BOOL bNeedACK;
- BOOL bIsPSPOLL = FALSE;
+ unsigned int cbHeaderSize;
+ unsigned int cbFrameBodySize;
+ bool bNeedACK;
+ bool bIsPSPOLL = false;
PSTxBufHead pTxBufHead;
- UINT cbFrameSize;
- UINT cbIVlen = 0;
- UINT cbICVlen = 0;
- UINT cbMIClen = 0;
- UINT cbFCSlen = 4;
- UINT uPadding = 0;
- UINT cbMICHDR = 0;
- UINT uLength = 0;
- DWORD dwMICKey0, dwMICKey1;
- DWORD dwMIC_Priority;
- PDWORD pdwMIC_L;
- PDWORD pdwMIC_R;
- WORD wTxBufSize;
- UINT cbMacHdLen;
+ unsigned int cbFrameSize;
+ unsigned int cbIVlen = 0;
+ unsigned int cbICVlen = 0;
+ unsigned int cbMIClen = 0;
+ unsigned int cbFCSlen = 4;
+ unsigned int uPadding = 0;
+ unsigned int cbMICHDR = 0;
+ unsigned int uLength = 0;
+ unsigned long dwMICKey0, dwMICKey1;
+ unsigned long dwMIC_Priority;
+ unsigned long *pdwMIC_L;
+ unsigned long *pdwMIC_R;
+ unsigned short wTxBufSize;
+ unsigned int cbMacHdLen;
SEthernetHeader sEthHeader;
void * pvRrvTime;
void * pMICHDR;
PSMgmtObject pMgmt = pDevice->pMgmt;
- WORD wCurrentRate = RATE_1M;
+ unsigned short wCurrentRate = RATE_1M;
PUWLAN_80211HDR p80211Header;
- UINT uNodeIndex = 0;
- BOOL bNodeExist = FALSE;
+ unsigned int uNodeIndex = 0;
+ bool bNodeExist = false;
SKeyItem STempKey;
PSKeyItem pTransmitKey = NULL;
- PBYTE pbyIVHead;
- PBYTE pbyPayloadHead;
- PBYTE pbyMacHdr;
+ unsigned char *pbyIVHead;
+ unsigned char *pbyPayloadHead;
+ unsigned char *pbyMacHdr;
- UINT cbExtSuppRate = 0;
+ unsigned int cbExtSuppRate = 0;
// PWLAN_IE pItem;
@@ -2886,7 +2845,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
pFrstTD = pDevice->apCurrTD[TYPE_TXDMA0];
- pbyTxBufferAddr = (PBYTE)pFrstTD->pTDInfo->buf;
+ pbyTxBufferAddr = (unsigned char *)pFrstTD->pTDInfo->buf;
pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxBufHead);
memset(pTxBufHead, 0, wTxBufSize);
@@ -2938,20 +2897,19 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
- if (IS_MULTICAST_ADDRESS(&(p80211Header->sA3.abyAddr1[0])) ||
- IS_BROADCAST_ADDRESS(&(p80211Header->sA3.abyAddr1[0]))) {
- bNeedACK = FALSE;
+ if (is_multicast_ether_addr(&(p80211Header->sA3.abyAddr1[0]))) {
+ bNeedACK = false;
if (pDevice->bEnableHostWEP) {
uNodeIndex = 0;
- bNodeExist = TRUE;
+ bNodeExist = true;
};
}
else {
if (pDevice->bEnableHostWEP) {
- if (BSSDBbIsSTAInNodeDB(pDevice->pMgmt, (PBYTE)(p80211Header->sA3.abyAddr1), &uNodeIndex))
- bNodeExist = TRUE;
+ if (BSSDBbIsSTAInNodeDB(pDevice->pMgmt, (unsigned char *)(p80211Header->sA3.abyAddr1), &uNodeIndex))
+ bNodeExist = true;
};
- bNeedACK = TRUE;
+ bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
};
@@ -2964,7 +2922,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
// probe-response don't retry
//if ((p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_MGMT_PROBE_RSP) {
- // bNeedACK = FALSE;
+ // bNeedACK = false;
// pTxBufHead->wFIFOCtl &= (~FIFOCTL_NEEDACK);
//}
}
@@ -2972,7 +2930,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0);
if ((p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) {
- bIsPSPOLL = TRUE;
+ bIsPSPOLL = true;
cbMacHdLen = WLAN_HDR_ADDR2_LEN;
} else {
cbMacHdLen = WLAN_HDR_ADDR3_LEN;
@@ -2996,12 +2954,12 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
//Set FRAGCTL_MACHDCNT
- pTxBufHead->wFragCtl |= cpu_to_le16((WORD)cbMacHdLen << 10);
+ pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)cbMacHdLen << 10);
// Notes:
// Although spec says MMPDU can be fragmented; In most case,
// no one will send a MMPDU under fragmentation. With RTS may occur.
- pDevice->bAES = FALSE; //Set FRAGCTL_WEPTYP
+ pDevice->bAES = false; //Set FRAGCTL_WEPTYP
if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) {
@@ -3024,7 +2982,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
cbICVlen = 8;//MIC
cbMICHDR = sizeof(SMICHDRHead);
pTxBufHead->wFragCtl |= FRAGCTL_AES;
- pDevice->bAES = TRUE;
+ pDevice->bAES = true;
}
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMacHdLen%4);
@@ -3034,7 +2992,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen + cbExtSuppRate;
//Set FIFOCTL_GrpAckPolicy
- if (pDevice->bGrpAckPolicy == TRUE) {//0000 0100 0000 0000
+ if (pDevice->bGrpAckPolicy == true) {//0000 0100 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
}
//the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
@@ -3067,7 +3025,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
//=========================
// No Fragmentation
//=========================
- pTxBufHead->wFragCtl |= (WORD)FRAGCTL_NONFRAG;
+ pTxBufHead->wFragCtl |= (unsigned short)FRAGCTL_NONFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
@@ -3082,9 +3040,9 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + (cbFrameBodySize + cbMIClen) + cbExtSuppRate;
- pbyMacHdr = (PBYTE)(pbyTxBufferAddr + cbHeaderSize);
- pbyPayloadHead = (PBYTE)(pbyMacHdr + cbMacHdLen + uPadding + cbIVlen);
- pbyIVHead = (PBYTE)(pbyMacHdr + cbMacHdLen + uPadding);
+ pbyMacHdr = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize);
+ pbyPayloadHead = (unsigned char *)(pbyMacHdr + cbMacHdLen + uPadding + cbIVlen);
+ pbyIVHead = (unsigned char *)(pbyMacHdr + cbMacHdLen + uPadding);
// Copy the Packet into a tx Buffer
memcpy(pbyMacHdr, pbMPDU, cbMacHdLen);
@@ -3127,30 +3085,30 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
if ((pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- dwMICKey0 = *(PDWORD)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(PDWORD)(&pTransmitKey->abyKey[20]);
+ dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]);
+ dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]);
// DO Software Michael
MIC_vInit(dwMICKey0, dwMICKey1);
- MIC_vAppend((PBYTE)&(sEthHeader.abyDstAddr[0]), 12);
+ MIC_vAppend((unsigned char *)&(sEthHeader.abyDstAddr[0]), 12);
dwMIC_Priority = 0;
- MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
+ MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen;
MIC_vAppend((pbyTxBufferAddr + uLength), cbFrameBodySize);
- pdwMIC_L = (PDWORD)(pbyTxBufferAddr + uLength + cbFrameBodySize);
- pdwMIC_R = (PDWORD)(pbyTxBufferAddr + uLength + cbFrameBodySize + 4);
+ pdwMIC_L = (unsigned long *)(pbyTxBufferAddr + uLength + cbFrameBodySize);
+ pdwMIC_R = (unsigned long *)(pbyTxBufferAddr + uLength + cbFrameBodySize + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
MIC_vUnInit();
- if (pDevice->bTxMICFail == TRUE) {
+ if (pDevice->bTxMICFail == true) {
*pdwMIC_L = 0;
*pdwMIC_R = 0;
- pDevice->bTxMICFail = FALSE;
+ pDevice->bTxMICFail = false;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize);
@@ -3160,8 +3118,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
}
- s_vFillTxKey(pDevice, (PBYTE)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (WORD)cbFrameBodySize, (PBYTE)pMICHDR);
+ s_vFillTxKey(pDevice, (unsigned char *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
+ pbyMacHdr, (unsigned short)cbFrameBodySize, (unsigned char *)pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
@@ -3169,7 +3127,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
}
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
- s_vSWencryption(pDevice, pTransmitKey, pbyPayloadHead, (WORD)(cbFrameBodySize + cbMIClen));
+ s_vSWencryption(pDevice, pTransmitKey, pbyPayloadHead, (unsigned short)(cbFrameBodySize + cbMIClen));
}
}
@@ -3208,7 +3166,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
// Disable PS
MACbPSWakeup(pDevice->PortOffset);
}
- pDevice->bPWBitOn = FALSE;
+ pDevice->bPWBitOn = false;
wmb();
pFrstTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index b008fc23adb..fa827b828a3 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -40,67 +40,47 @@
/*--------------------- Export Functions --------------------------*/
/*
-void vGenerateMACHeader(
- PSDevice pDevice,
- DWORD dwTxBufferAddr,
- PBYTE pbySkbData,
- UINT cbPacketSize,
- BOOL bDMA0Used,
- PUINT pcbHeadSize,
- PUINT pcbAppendPayload
- );
-
-void vProcessRxMACHeader (
- PSDevice pDevice,
- DWORD dwRxBufferAddr,
- UINT cbPacketSize,
- BOOL bIsWEP,
- PUINT pcbHeadSize
- );
+void
+vGenerateMACHeader(PSDevice pDevice, unsigned long dwTxBufferAddr, unsigned char *pbySkbData,
+ unsigned int cbPacketSize, bool bDMA0Used, unsigned int *pcbHeadSize,
+ unsigned int *pcbAppendPayload);
+
+void
+vProcessRxMACHeader(PSDevice pDevice, unsigned long dwRxBufferAddr, unsigned int cbPacketSize,
+ bool bIsWEP, unsigned int *pcbHeadSize);
*/
void
vGenerateMACHeader (
PSDevice pDevice,
- PBYTE pbyBufferAddr,
- WORD wDuration,
+ unsigned char *pbyBufferAddr,
+ unsigned short wDuration,
PSEthernetHeader psEthHeader,
- BOOL bNeedEncrypt,
- WORD wFragType,
- UINT uDMAIdx,
- UINT uFragIdx
+ bool bNeedEncrypt,
+ unsigned short wFragType,
+ unsigned int uDMAIdx,
+ unsigned int uFragIdx
);
-UINT
+unsigned int
cbGetFragCount(
PSDevice pDevice,
PSKeyItem pTransmitKey,
- UINT cbFrameBodySize,
+ unsigned int cbFrameBodySize,
PSEthernetHeader psEthHeader
);
void
-vGenerateFIFOHeader (
- PSDevice pDevice,
- BYTE byPktTyp,
- PBYTE pbyTxBufferAddr,
- BOOL bNeedEncrypt,
- UINT cbPayloadSize,
- UINT uDMAIdx,
- PSTxDesc pHeadTD,
- PSEthernetHeader psEthHeader,
- PBYTE pPacket,
- PSKeyItem pTransmitKey,
- UINT uNodeIndex,
- PUINT puMACfragNum,
- PUINT pcbHeaderSize
- );
+vGenerateFIFOHeader(PSDevice pDevice, unsigned char byPktTyp, unsigned char *pbyTxBufferAddr,
+ bool bNeedEncrypt, unsigned int cbPayloadSize, unsigned int uDMAIdx, PSTxDesc pHeadTD,
+ PSEthernetHeader psEthHeader, unsigned char *pPacket, PSKeyItem pTransmitKey,
+ unsigned int uNodeIndex, unsigned int *puMACfragNum, unsigned int *pcbHeaderSize);
-void vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDULen);
+void vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, unsigned char *pbMPDU, unsigned int cbMPDULen);
CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket);
CMD_STATUS csBeacon_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket);
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index 418575fdc2c..6a0a232d1a0 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -76,12 +76,12 @@
* Return Value: data read
*
*/
-BYTE SROMbyReadEmbedded(DWORD_PTR dwIoBase, BYTE byContntOffset)
+unsigned char SROMbyReadEmbedded(unsigned long dwIoBase, unsigned char byContntOffset)
{
- WORD wDelay, wNoACK;
- BYTE byWait;
- BYTE byData;
- BYTE byOrg;
+ unsigned short wDelay, wNoACK;
+ unsigned char byWait;
+ unsigned char byData;
+ unsigned char byOrg;
byData = 0xFF;
VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg);
@@ -122,15 +122,15 @@ BYTE SROMbyReadEmbedded(DWORD_PTR dwIoBase, BYTE byContntOffset)
* Out:
* none
*
- * Return Value: TRUE if succeeded; FALSE if failed.
+ * Return Value: true if succeeded; false if failed.
*
*/
-BOOL SROMbWriteEmbedded(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byData)
+bool SROMbWriteEmbedded(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byData)
{
- WORD wDelay, wNoACK;
- BYTE byWait;
+ unsigned short wDelay, wNoACK;
+ unsigned char byWait;
- BYTE byOrg;
+ unsigned char byOrg;
VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg);
/* turn off hardware retry for getting NACK */
@@ -157,10 +157,10 @@ BOOL SROMbWriteEmbedded(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byData)
}
if (wNoACK == W_MAX_I2CRETRY) {
VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg);
- return FALSE;
+ return false;
}
VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg);
- return TRUE;
+ return true;
}
@@ -178,12 +178,12 @@ BOOL SROMbWriteEmbedded(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byData)
* Return Value: none
*
*/
-void SROMvRegBitsOn(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byBits)
+void SROMvRegBitsOn(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byBits)
{
- BYTE byOrgData;
+ unsigned char byOrgData;
byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset);
- SROMbWriteEmbedded(dwIoBase, byContntOffset,(BYTE)(byOrgData | byBits));
+ SROMbWriteEmbedded(dwIoBase, byContntOffset,(unsigned char)(byOrgData | byBits));
}
@@ -199,12 +199,12 @@ void SROMvRegBitsOn(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byBits)
* none
*
*/
-void SROMvRegBitsOff(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byBits)
+void SROMvRegBitsOff(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byBits)
{
- BYTE byOrgData;
+ unsigned char byOrgData;
byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset);
- SROMbWriteEmbedded(dwIoBase, byContntOffset,(BYTE)(byOrgData & (~byBits)));
+ SROMbWriteEmbedded(dwIoBase, byContntOffset,(unsigned char)(byOrgData & (~byBits)));
}
@@ -219,12 +219,12 @@ void SROMvRegBitsOff(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byBits)
* Out:
* none
*
- * Return Value: TRUE if all test bits on; otherwise FALSE
+ * Return Value: true if all test bits on; otherwise false
*
*/
-BOOL SROMbIsRegBitsOn(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byTestBits)
+bool SROMbIsRegBitsOn(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byTestBits)
{
- BYTE byOrgData;
+ unsigned char byOrgData;
byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset);
return (byOrgData & byTestBits) == byTestBits;
@@ -242,12 +242,12 @@ BOOL SROMbIsRegBitsOn(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byTestBits)
* Out:
* none
*
- * Return Value: TRUE if all test bits off; otherwise FALSE
+ * Return Value: true if all test bits off; otherwise false
*
*/
-BOOL SROMbIsRegBitsOff(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byTestBits)
+bool SROMbIsRegBitsOff(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byTestBits)
{
- BYTE byOrgData;
+ unsigned char byOrgData;
byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset);
return !(byOrgData & byTestBits);
@@ -266,13 +266,13 @@ BOOL SROMbIsRegBitsOff(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byTestBits)
* Return Value: none
*
*/
-void SROMvReadAllContents(DWORD_PTR dwIoBase, PBYTE pbyEepromRegs)
+void SROMvReadAllContents(unsigned long dwIoBase, unsigned char *pbyEepromRegs)
{
int ii;
/* ii = Rom Address */
for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
- *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase,(BYTE) ii);
+ *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase,(unsigned char) ii);
pbyEepromRegs++;
}
}
@@ -291,13 +291,13 @@ void SROMvReadAllContents(DWORD_PTR dwIoBase, PBYTE pbyEepromRegs)
* Return Value: none
*
*/
-void SROMvWriteAllContents(DWORD_PTR dwIoBase, PBYTE pbyEepromRegs)
+void SROMvWriteAllContents(unsigned long dwIoBase, unsigned char *pbyEepromRegs)
{
int ii;
/* ii = Rom Address */
for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
- SROMbWriteEmbedded(dwIoBase,(BYTE) ii, *pbyEepromRegs);
+ SROMbWriteEmbedded(dwIoBase,(unsigned char) ii, *pbyEepromRegs);
pbyEepromRegs++;
}
}
@@ -315,9 +315,9 @@ void SROMvWriteAllContents(DWORD_PTR dwIoBase, PBYTE pbyEepromRegs)
* Return Value: none
*
*/
-void SROMvReadEtherAddress(DWORD_PTR dwIoBase, PBYTE pbyEtherAddress)
+void SROMvReadEtherAddress(unsigned long dwIoBase, unsigned char *pbyEtherAddress)
{
- BYTE ii;
+ unsigned char ii;
/* ii = Rom Address */
for (ii = 0; ii < ETH_ALEN; ii++) {
@@ -340,9 +340,9 @@ void SROMvReadEtherAddress(DWORD_PTR dwIoBase, PBYTE pbyEtherAddress)
* Return Value: none
*
*/
-void SROMvWriteEtherAddress(DWORD_PTR dwIoBase, PBYTE pbyEtherAddress)
+void SROMvWriteEtherAddress(unsigned long dwIoBase, unsigned char *pbyEtherAddress)
{
- BYTE ii;
+ unsigned char ii;
/* ii = Rom Address */
for (ii = 0; ii < ETH_ALEN; ii++) {
@@ -364,11 +364,11 @@ void SROMvWriteEtherAddress(DWORD_PTR dwIoBase, PBYTE pbyEtherAddress)
* Return Value: none
*
*/
-void SROMvReadSubSysVenId(DWORD_PTR dwIoBase, PDWORD pdwSubSysVenId)
+void SROMvReadSubSysVenId(unsigned long dwIoBase, unsigned long *pdwSubSysVenId)
{
- PBYTE pbyData;
+ unsigned char *pbyData;
- pbyData = (PBYTE)pdwSubSysVenId;
+ pbyData = (unsigned char *)pdwSubSysVenId;
/* sub vendor */
*pbyData = SROMbyReadEmbedded(dwIoBase, 6);
*(pbyData+1) = SROMbyReadEmbedded(dwIoBase, 7);
@@ -386,15 +386,15 @@ void SROMvReadSubSysVenId(DWORD_PTR dwIoBase, PDWORD pdwSubSysVenId)
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL SROMbAutoLoad(DWORD_PTR dwIoBase)
+bool SROMbAutoLoad(unsigned long dwIoBase)
{
- BYTE byWait;
+ unsigned char byWait;
int ii;
- BYTE byOrg;
+ unsigned char byOrg;
VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg);
/* turn on hardware retry */
@@ -413,8 +413,8 @@ BOOL SROMbAutoLoad(DWORD_PTR dwIoBase)
VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg);
if (ii == EEP_MAX_CONTEXT_SIZE)
- return FALSE;
- return TRUE;
+ return false;
+ return true;
}
diff --git a/drivers/staging/vt6655/srom.h b/drivers/staging/vt6655/srom.h
index dbb3f5efe97..4c261dac01b 100644
--- a/drivers/staging/vt6655/srom.h
+++ b/drivers/staging/vt6655/srom.h
@@ -97,34 +97,34 @@
// 2048 bits = 256 bytes = 128 words
//
typedef struct tagSSromReg {
- BYTE abyPAR[6]; // 0x00 (WORD)
-
- WORD wSUB_VID; // 0x03 (WORD)
- WORD wSUB_SID;
-
- BYTE byBCFG0; // 0x05 (WORD)
- BYTE byBCFG1;
-
- BYTE byFCR0; // 0x06 (WORD)
- BYTE byFCR1;
- BYTE byPMC0; // 0x07 (WORD)
- BYTE byPMC1;
- BYTE byMAXLAT; // 0x08 (WORD)
- BYTE byMINGNT;
- BYTE byCFG0; // 0x09 (WORD)
- BYTE byCFG1;
- WORD wCISPTR; // 0x0A (WORD)
- WORD wRsv0; // 0x0B (WORD)
- WORD wRsv1; // 0x0C (WORD)
- BYTE byBBPAIR; // 0x0D (WORD)
- BYTE byRFTYPE;
- BYTE byMinChannel; // 0x0E (WORD)
- BYTE byMaxChannel;
- BYTE bySignature; // 0x0F (WORD)
- BYTE byCheckSum;
-
- BYTE abyReserved0[96]; // 0x10 (WORD)
- BYTE abyCIS[128]; // 0x80 (WORD)
+ unsigned char abyPAR[6]; // 0x00 (unsigned short)
+
+ unsigned short wSUB_VID; // 0x03 (unsigned short)
+ unsigned short wSUB_SID;
+
+ unsigned char byBCFG0; // 0x05 (unsigned short)
+ unsigned char byBCFG1;
+
+ unsigned char byFCR0; // 0x06 (unsigned short)
+ unsigned char byFCR1;
+ unsigned char byPMC0; // 0x07 (unsigned short)
+ unsigned char byPMC1;
+ unsigned char byMAXLAT; // 0x08 (unsigned short)
+ unsigned char byMINGNT;
+ unsigned char byCFG0; // 0x09 (unsigned short)
+ unsigned char byCFG1;
+ unsigned short wCISPTR; // 0x0A (unsigned short)
+ unsigned short wRsv0; // 0x0B (unsigned short)
+ unsigned short wRsv1; // 0x0C (unsigned short)
+ unsigned char byBBPAIR; // 0x0D (unsigned short)
+ unsigned char byRFTYPE;
+ unsigned char byMinChannel; // 0x0E (unsigned short)
+ unsigned char byMaxChannel;
+ unsigned char bySignature; // 0x0F (unsigned short)
+ unsigned char byCheckSum;
+
+ unsigned char abyReserved0[96]; // 0x10 (unsigned short)
+ unsigned char abyCIS[128]; // 0x80 (unsigned short)
} SSromReg, *PSSromReg;
/*--------------------- Export Macros ------------------------------*/
@@ -135,23 +135,23 @@ typedef struct tagSSromReg {
/*--------------------- Export Functions --------------------------*/
-BYTE SROMbyReadEmbedded(DWORD_PTR dwIoBase, BYTE byContntOffset);
-BOOL SROMbWriteEmbedded(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byData);
+unsigned char SROMbyReadEmbedded(unsigned long dwIoBase, unsigned char byContntOffset);
+bool SROMbWriteEmbedded(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byData);
-void SROMvRegBitsOn(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byBits);
-void SROMvRegBitsOff(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byBits);
+void SROMvRegBitsOn(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byBits);
+void SROMvRegBitsOff(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byBits);
-BOOL SROMbIsRegBitsOn(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byTestBits);
-BOOL SROMbIsRegBitsOff(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byTestBits);
+bool SROMbIsRegBitsOn(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byTestBits);
+bool SROMbIsRegBitsOff(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byTestBits);
-void SROMvReadAllContents(DWORD_PTR dwIoBase, PBYTE pbyEepromRegs);
-void SROMvWriteAllContents(DWORD_PTR dwIoBase, PBYTE pbyEepromRegs);
+void SROMvReadAllContents(unsigned long dwIoBase, unsigned char *pbyEepromRegs);
+void SROMvWriteAllContents(unsigned long dwIoBase, unsigned char *pbyEepromRegs);
-void SROMvReadEtherAddress(DWORD_PTR dwIoBase, PBYTE pbyEtherAddress);
-void SROMvWriteEtherAddress(DWORD_PTR dwIoBase, PBYTE pbyEtherAddress);
+void SROMvReadEtherAddress(unsigned long dwIoBase, unsigned char *pbyEtherAddress);
+void SROMvWriteEtherAddress(unsigned long dwIoBase, unsigned char *pbyEtherAddress);
-void SROMvReadSubSysVenId(DWORD_PTR dwIoBase, PDWORD pdwSubSysVenId);
+void SROMvReadSubSysVenId(unsigned long dwIoBase, unsigned long *pdwSubSysVenId);
-BOOL SROMbAutoLoad (DWORD_PTR dwIoBase);
+bool SROMbAutoLoad (unsigned long dwIoBase);
#endif // __EEPROM_H__
diff --git a/drivers/staging/vt6655/tcrc.c b/drivers/staging/vt6655/tcrc.c
index 5f0c74763f8..f9c28bf8a6a 100644
--- a/drivers/staging/vt6655/tcrc.c
+++ b/drivers/staging/vt6655/tcrc.c
@@ -42,7 +42,7 @@
/*--------------------- Static Variables --------------------------*/
// 32-bit CRC table
-static const DWORD s_adwCrc32Table[256] = {
+static const unsigned long s_adwCrc32Table[256] = {
0x00000000L, 0x77073096L, 0xEE0E612CL, 0x990951BAL,
0x076DC419L, 0x706AF48FL, 0xE963A535L, 0x9E6495A3L,
0x0EDB8832L, 0x79DCB8A4L, 0xE0D5E91EL, 0x97D2D988L,
@@ -132,13 +132,13 @@ static const DWORD s_adwCrc32Table[256] = {
* Return Value: CRC-32
*
-*/
-DWORD CRCdwCrc32 (PBYTE pbyData, UINT cbByte, DWORD dwCrcSeed)
+unsigned long CRCdwCrc32 (unsigned char *pbyData, unsigned int cbByte, unsigned long dwCrcSeed)
{
- DWORD dwCrc;
+ unsigned long dwCrc;
dwCrc = dwCrcSeed;
while (cbByte--) {
- dwCrc = s_adwCrc32Table[(BYTE)((dwCrc ^ (*pbyData)) & 0xFF)] ^ (dwCrc >> 8);
+ dwCrc = s_adwCrc32Table[(unsigned char)((dwCrc ^ (*pbyData)) & 0xFF)] ^ (dwCrc >> 8);
pbyData++;
}
@@ -164,7 +164,7 @@ DWORD CRCdwCrc32 (PBYTE pbyData, UINT cbByte, DWORD dwCrcSeed)
* Return Value: CRC-32
*
-*/
-DWORD CRCdwGetCrc32 (PBYTE pbyData, UINT cbByte)
+unsigned long CRCdwGetCrc32 (unsigned char *pbyData, unsigned int cbByte)
{
return ~CRCdwCrc32(pbyData, cbByte, 0xFFFFFFFFL);
}
@@ -190,7 +190,7 @@ DWORD CRCdwGetCrc32 (PBYTE pbyData, UINT cbByte)
* Return Value: CRC-32
*
-*/
-DWORD CRCdwGetCrc32Ex(PBYTE pbyData, UINT cbByte, DWORD dwPreCRC)
+unsigned long CRCdwGetCrc32Ex(unsigned char *pbyData, unsigned int cbByte, unsigned long dwPreCRC)
{
return CRCdwCrc32(pbyData, cbByte, dwPreCRC);
}
diff --git a/drivers/staging/vt6655/tcrc.h b/drivers/staging/vt6655/tcrc.h
index 5faa48b0a74..d0449855beb 100644
--- a/drivers/staging/vt6655/tcrc.h
+++ b/drivers/staging/vt6655/tcrc.h
@@ -43,9 +43,9 @@
/*--------------------- Export Functions --------------------------*/
-DWORD CRCdwCrc32(PBYTE pbyData, UINT cbByte, DWORD dwCrcSeed);
-DWORD CRCdwGetCrc32(PBYTE pbyData, UINT cbByte);
-DWORD CRCdwGetCrc32Ex(PBYTE pbyData, UINT cbByte, DWORD dwPreCRC);
+unsigned long CRCdwCrc32(unsigned char *pbyData, unsigned int cbByte, unsigned long dwCrcSeed);
+unsigned long CRCdwGetCrc32(unsigned char *pbyData, unsigned int cbByte);
+unsigned long CRCdwGetCrc32Ex(unsigned char *pbyData, unsigned int cbByte, unsigned long dwPreCRC);
#endif // __TCRC_H__
diff --git a/drivers/staging/vt6655/tether.c b/drivers/staging/vt6655/tether.c
index d8ba67395cb..1cf8508e407 100644
--- a/drivers/staging/vt6655/tether.c
+++ b/drivers/staging/vt6655/tether.c
@@ -61,14 +61,14 @@
* Return Value: Hash value
*
*/
-BYTE ETHbyGetHashIndexByCrc32 (PBYTE pbyMultiAddr)
+unsigned char ETHbyGetHashIndexByCrc32 (unsigned char *pbyMultiAddr)
{
int ii;
- BYTE byTmpHash;
- BYTE byHash = 0;
+ unsigned char byTmpHash;
+ unsigned char byHash = 0;
// get the least 6-bits from CRC generator
- byTmpHash = (BYTE)(CRCdwCrc32(pbyMultiAddr, ETH_ALEN,
+ byTmpHash = (unsigned char)(CRCdwCrc32(pbyMultiAddr, ETH_ALEN,
0xFFFFFFFFL) & 0x3F);
// reverse most bit to least bit
for (ii = 0; ii < (sizeof(byTmpHash) * 8); ii++) {
@@ -93,17 +93,17 @@ BYTE ETHbyGetHashIndexByCrc32 (PBYTE pbyMultiAddr)
* Out:
* none
*
- * Return Value: TRUE if ok; FALSE if error.
+ * Return Value: true if ok; false if error.
*
*/
-BOOL ETHbIsBufferCrc32Ok (PBYTE pbyBuffer, UINT cbFrameLength)
+bool ETHbIsBufferCrc32Ok (unsigned char *pbyBuffer, unsigned int cbFrameLength)
{
- DWORD dwCRC;
+ unsigned long dwCRC;
dwCRC = CRCdwGetCrc32(pbyBuffer, cbFrameLength - 4);
- if (cpu_to_le32(*((PDWORD)(pbyBuffer + cbFrameLength - 4))) != dwCRC) {
- return FALSE;
+ if (cpu_to_le32(*((unsigned long *)(pbyBuffer + cbFrameLength - 4))) != dwCRC) {
+ return false;
}
- return TRUE;
+ return true;
}
diff --git a/drivers/staging/vt6655/tether.h b/drivers/staging/vt6655/tether.h
index 3c9acd7903a..787d885deee 100644
--- a/drivers/staging/vt6655/tether.h
+++ b/drivers/staging/vt6655/tether.h
@@ -29,7 +29,7 @@
#ifndef __TETHER_H__
#define __TETHER_H__
-#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
#include "ttype.h"
/*--------------------- Export Definitions -------------------------*/
@@ -39,12 +39,6 @@
#define U_ETHER_ADDR_STR_LEN (ETH_ALEN * 2 + 1)
// Ethernet address string length
-#define MIN_DATA_LEN 46 // min data length
-
-#define MIN_PACKET_LEN (MIN_DATA_LEN + ETH_HLEN)
- // 60
- // min total packet length (tx)
-
#define MAX_LOOKAHEAD_SIZE ETH_FRAME_LEN
#define U_MULTI_ADDR_LEN 8 // multicast address length
@@ -160,9 +154,9 @@
// Ethernet packet
//
typedef struct tagSEthernetHeader {
- BYTE abyDstAddr[ETH_ALEN];
- BYTE abySrcAddr[ETH_ALEN];
- WORD wType;
+ unsigned char abyDstAddr[ETH_ALEN];
+ unsigned char abySrcAddr[ETH_ALEN];
+ unsigned short wType;
}__attribute__ ((__packed__))
SEthernetHeader, *PSEthernetHeader;
@@ -171,9 +165,9 @@ SEthernetHeader, *PSEthernetHeader;
// 802_3 packet
//
typedef struct tagS802_3Header {
- BYTE abyDstAddr[ETH_ALEN];
- BYTE abySrcAddr[ETH_ALEN];
- WORD wLen;
+ unsigned char abyDstAddr[ETH_ALEN];
+ unsigned char abySrcAddr[ETH_ALEN];
+ unsigned short wLen;
}__attribute__ ((__packed__))
S802_3Header, *PS802_3Header;
@@ -181,37 +175,17 @@ S802_3Header, *PS802_3Header;
// 802_11 packet
//
typedef struct tagS802_11Header {
- WORD wFrameCtl;
- WORD wDurationID;
- BYTE abyAddr1[ETH_ALEN];
- BYTE abyAddr2[ETH_ALEN];
- BYTE abyAddr3[ETH_ALEN];
- WORD wSeqCtl;
- BYTE abyAddr4[ETH_ALEN];
+ unsigned short wFrameCtl;
+ unsigned short wDurationID;
+ unsigned char abyAddr1[ETH_ALEN];
+ unsigned char abyAddr2[ETH_ALEN];
+ unsigned char abyAddr3[ETH_ALEN];
+ unsigned short wSeqCtl;
+ unsigned char abyAddr4[ETH_ALEN];
}__attribute__ ((__packed__))
S802_11Header, *PS802_11Header;
/*--------------------- Export Macros ------------------------------*/
-// Frame type macro
-
-#define IS_MULTICAST_ADDRESS(pbyEtherAddr) \
- ((*(PBYTE)(pbyEtherAddr) & 0x01) == 1)
-
-#define IS_BROADCAST_ADDRESS(pbyEtherAddr) ( \
- (*(PDWORD)(pbyEtherAddr) == 0xFFFFFFFFL) && \
- (*(PWORD)((PBYTE)(pbyEtherAddr) + 4) == 0xFFFF) \
-)
-
-#define IS_NULL_ADDRESS(pbyEtherAddr) ( \
- (*(PDWORD)(pbyEtherAddr) == 0L) && \
- (*(PWORD)((PBYTE)(pbyEtherAddr) + 4) == 0) \
-)
-
-#define IS_ETH_ADDRESS_EQUAL(pbyAddr1, pbyAddr2) ( \
- (*(PDWORD)(pbyAddr1) == *(PDWORD)(pbyAddr2)) && \
- (*(PWORD)((PBYTE)(pbyAddr1) + 4) == \
- *(PWORD)((PBYTE)(pbyAddr2) + 4)) \
-)
/*--------------------- Export Classes ----------------------------*/
@@ -219,9 +193,9 @@ S802_11Header, *PS802_11Header;
/*--------------------- Export Functions --------------------------*/
-BYTE ETHbyGetHashIndexByCrc32(PBYTE pbyMultiAddr);
-//BYTE ETHbyGetHashIndexByCrc(PBYTE pbyMultiAddr);
-BOOL ETHbIsBufferCrc32Ok(PBYTE pbyBuffer, UINT cbFrameLength);
+unsigned char ETHbyGetHashIndexByCrc32(unsigned char *pbyMultiAddr);
+//unsigned char ETHbyGetHashIndexByCrc(unsigned char *pbyMultiAddr);
+bool ETHbIsBufferCrc32Ok(unsigned char *pbyBuffer, unsigned int cbFrameLength);
#endif // __TETHER_H__
diff --git a/drivers/staging/vt6655/tkip.c b/drivers/staging/vt6655/tkip.c
index f83af5913aa..ed3eac17ae8 100644
--- a/drivers/staging/vt6655/tkip.c
+++ b/drivers/staging/vt6655/tkip.c
@@ -55,7 +55,7 @@
/* The 2nd table is the same as the 1st but with the upper and lower */
/* bytes swapped. To allow an endian tolerant implementation, the byte */
/* halves have been expressed independently here. */
-const BYTE TKIP_Sbox_Lower[256] = {
+const unsigned char TKIP_Sbox_Lower[256] = {
0xA5,0x84,0x99,0x8D,0x0D,0xBD,0xB1,0x54,
0x50,0x03,0xA9,0x7D,0x19,0x62,0xE6,0x9A,
0x45,0x9D,0x40,0x87,0x15,0xEB,0xC9,0x0B,
@@ -90,7 +90,7 @@ const BYTE TKIP_Sbox_Lower[256] = {
0xC3,0xB0,0x77,0x11,0xCB,0xFC,0xD6,0x3A
};
-const BYTE TKIP_Sbox_Upper[256] = {
+const unsigned char TKIP_Sbox_Upper[256] = {
0xC6,0xF8,0xEE,0xF6,0xFF,0xD6,0xDE,0x91,
0x60,0x02,0xCE,0x56,0xE7,0xB5,0x4D,0xEC,
0x8F,0x1F,0x89,0xFA,0xEF,0xB2,0x8E,0xFB,
@@ -184,11 +184,11 @@ unsigned int rotr1(unsigned int a)
*
*/
void TKIPvMixKey(
- PBYTE pbyTKey,
- PBYTE pbyTA,
- WORD wTSC15_0,
- DWORD dwTSC47_16,
- PBYTE pbyRC4Key
+ unsigned char *pbyTKey,
+ unsigned char *pbyTA,
+ unsigned short wTSC15_0,
+ unsigned long dwTSC47_16,
+ unsigned char *pbyRC4Key
)
{
unsigned int p1k[5];
diff --git a/drivers/staging/vt6655/tkip.h b/drivers/staging/vt6655/tkip.h
index 3dfa7f5ee7e..eb5951d726e 100644
--- a/drivers/staging/vt6655/tkip.h
+++ b/drivers/staging/vt6655/tkip.h
@@ -47,11 +47,11 @@
/*--------------------- Export Functions --------------------------*/
void TKIPvMixKey(
- PBYTE pbyTKey,
- PBYTE pbyTA,
- WORD wTSC15_0,
- DWORD dwTSC47_16,
- PBYTE pbyRC4Key
+ unsigned char *pbyTKey,
+ unsigned char *pbyTA,
+ unsigned short wTSC15_0,
+ unsigned long dwTSC47_16,
+ unsigned char *pbyRC4Key
);
#endif // __TKIP_H__
diff --git a/drivers/staging/vt6655/tmacro.h b/drivers/staging/vt6655/tmacro.h
index e96c140de05..e8b177d4128 100644
--- a/drivers/staging/vt6655/tmacro.h
+++ b/drivers/staging/vt6655/tmacro.h
@@ -34,27 +34,27 @@
/****** Common helper macros ***********************************************/
#if !defined(LOBYTE)
-#define LOBYTE(w) ((BYTE)(w))
+#define LOBYTE(w) ((unsigned char)(w))
#endif
#if !defined(HIBYTE)
-#define HIBYTE(w) ((BYTE)(((WORD)(w) >> 8) & 0xFF))
+#define HIBYTE(w) ((unsigned char)(((unsigned short)(w) >> 8) & 0xFF))
#endif
#if !defined(LOWORD)
-#define LOWORD(d) ((WORD)(d))
+#define LOWORD(d) ((unsigned short)(d))
#endif
#if !defined(HIWORD)
-#define HIWORD(d) ((WORD)((((DWORD)(d)) >> 16) & 0xFFFF))
+#define HIWORD(d) ((unsigned short)((((unsigned long)(d)) >> 16) & 0xFFFF))
#endif
#define LODWORD(q) ((q).u.dwLowDword)
#define HIDWORD(q) ((q).u.dwHighDword)
#if !defined(MAKEWORD)
-#define MAKEWORD(lb, hb) ((WORD)(((BYTE)(lb)) | (((WORD)((BYTE)(hb))) << 8)))
+#define MAKEWORD(lb, hb) ((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8)))
#endif
#if !defined(MAKEDWORD)
-#define MAKEDWORD(lw, hw) ((DWORD)(((WORD)(lw)) | (((DWORD)((WORD)(hw))) << 16)))
+#define MAKEDWORD(lw, hw) ((unsigned long)(((unsigned short)(lw)) | (((unsigned long)((unsigned short)(hw))) << 16)))
#endif
#endif // __TMACRO_H__
diff --git a/drivers/staging/vt6655/ttype.h b/drivers/staging/vt6655/ttype.h
index 2921083a9f2..37c8fba1fd1 100644
--- a/drivers/staging/vt6655/ttype.h
+++ b/drivers/staging/vt6655/ttype.h
@@ -33,23 +33,10 @@
/******* Common definitions and typedefs ***********************************/
-#ifndef OUT
-#define OUT
-#endif
-
#ifndef TxInSleep
#define TxInSleep
#endif
-typedef int BOOL;
-
-#if !defined(TRUE)
-#define TRUE 1
-#endif
-#if !defined(FALSE)
-#define FALSE 0
-#endif
-
//2007-0809-01<Add>by MikeLiu
#ifndef update_BssList
#define update_BssList
@@ -65,10 +52,6 @@ typedef int BOOL;
#define Calcu_LinkQual
#endif
-#ifndef Calcu_LinkQual
-#define Calcu_LinkQual
-#endif
-
/****** Simple typedefs ***************************************************/
/* These lines assume that your compiler's longs are 32 bits and
@@ -76,37 +59,13 @@ typedef int BOOL;
* but it doesn't matter if they're signed or unsigned.
*/
-typedef signed char I8; /* 8-bit signed integer */
-
-typedef unsigned char U8; /* 8-bit unsigned integer */
-typedef unsigned short U16; /* 16-bit unsigned integer */
-typedef unsigned long U32; /* 32-bit unsigned integer */
-
-
-typedef char CHAR;
-typedef signed short SHORT;
-typedef signed int INT;
-typedef signed long LONG;
-
-typedef unsigned char UCHAR;
-typedef unsigned short USHORT;
-typedef unsigned int UINT;
-typedef unsigned long ULONG;
-typedef unsigned long long ULONGLONG; //64 bit
-
-
-
-typedef unsigned char BYTE; // 8-bit
-typedef unsigned short WORD; // 16-bit
-typedef unsigned long DWORD; // 32-bit
-
// QWORD is for those situation that we want
// an 8-byte-aligned 8 byte long structure
// which is NOT really a floating point number.
typedef union tagUQuadWord {
struct {
- DWORD dwLowDword;
- DWORD dwHighDword;
+ unsigned int dwLowDword;
+ unsigned int dwHighDword;
} u;
double DoNotUseThisField;
} UQuadWord;
@@ -114,18 +73,6 @@ typedef UQuadWord QWORD; // 64-bit
/****** Common pointer types ***********************************************/
-typedef unsigned long ULONG_PTR; // 32-bit
-typedef unsigned long DWORD_PTR; // 32-bit
-
-// boolean pointer
-typedef unsigned int * PUINT;
-
-typedef BYTE * PBYTE;
-
-typedef WORD * PWORD;
-
-typedef DWORD * PDWORD;
-
typedef QWORD * PQWORD;
#endif // __TTYPE_H__
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index acd1b661490..9596fdef0e3 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -76,36 +76,36 @@
#define VNSvInPortB(dwIOAddress, pbyData) { \
- volatile BYTE* pbyAddr = ((PBYTE)(dwIOAddress)); \
+ volatile unsigned char * pbyAddr = ((unsigned char *)(dwIOAddress)); \
*(pbyData) = readb(pbyAddr); \
}
#define VNSvInPortW(dwIOAddress, pwData) { \
- volatile WORD* pwAddr = ((PWORD)(dwIOAddress)); \
+ volatile unsigned short *pwAddr = ((unsigned short *)(dwIOAddress)); \
*(pwData) = readw(pwAddr); \
}
#define VNSvInPortD(dwIOAddress, pdwData) { \
- volatile DWORD* pdwAddr = ((PDWORD)(dwIOAddress)); \
+ volatile unsigned long *pdwAddr = ((unsigned long *)(dwIOAddress)); \
*(pdwData) = readl(pdwAddr); \
}
#define VNSvOutPortB(dwIOAddress, byData) { \
- volatile BYTE* pbyAddr = ((PBYTE)(dwIOAddress)); \
- writeb((BYTE)byData, pbyAddr); \
+ volatile unsigned char * pbyAddr = ((unsigned char *)(dwIOAddress)); \
+ writeb((unsigned char)byData, pbyAddr); \
}
#define VNSvOutPortW(dwIOAddress, wData) { \
- volatile WORD* pwAddr = ((PWORD)(dwIOAddress)); \
- writew((WORD)wData, pwAddr); \
+ volatile unsigned short *pwAddr = ((unsigned short *)(dwIOAddress)); \
+ writew((unsigned short)wData, pwAddr); \
}
#define VNSvOutPortD(dwIOAddress, dwData) { \
- volatile DWORD* pdwAddr = ((PDWORD)(dwIOAddress)); \
- writel((DWORD)dwData, pdwAddr); \
+ volatile unsigned long *pdwAddr = ((unsigned long *)(dwIOAddress)); \
+ writel((unsigned long)dwData, pdwAddr); \
}
#endif
@@ -140,8 +140,8 @@
#define PCAvDelayByIO(uDelayUnit) { \
- BYTE byData; \
- ULONG ii; \
+ unsigned char byData; \
+ unsigned long ii; \
\
if (uDelayUnit <= 50) { \
udelay(uDelayUnit); \
diff --git a/drivers/staging/vt6655/vntwifi.c b/drivers/staging/vt6655/vntwifi.c
index b527a019188..fcf26ab920d 100644
--- a/drivers/staging/vt6655/vntwifi.c
+++ b/drivers/staging/vt6655/vntwifi.c
@@ -101,9 +101,9 @@ VNTWIFIvSetOPMode (
void
VNTWIFIvSetIBSSParameter (
void *pMgmtHandle,
- WORD wBeaconPeriod,
- WORD wATIMWindow,
- UINT uChannel
+ unsigned short wBeaconPeriod,
+ unsigned short wATIMWindow,
+ unsigned int uChannel
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
@@ -150,7 +150,7 @@ VNTWIFIpGetCurrentSSID (
* Return Value: current Channel.
*
-*/
-UINT
+unsigned int
VNTWIFIpGetCurrentChannel (
void *pMgmtHandle
)
@@ -176,7 +176,7 @@ VNTWIFIpGetCurrentChannel (
* Return Value: current Assoc ID
*
-*/
-WORD
+unsigned short
VNTWIFIwGetAssocID (
void *pMgmtHandle
)
@@ -202,15 +202,15 @@ VNTWIFIwGetAssocID (
* Return Value: max support rate
*
-*/
-BYTE
+unsigned char
VNTWIFIbyGetMaxSupportRate (
PWLAN_IE_SUPP_RATES pSupportRateIEs,
PWLAN_IE_SUPP_RATES pExtSupportRateIEs
)
{
- BYTE byMaxSupportRate = RATE_1M;
- BYTE bySupportRate = RATE_1M;
- UINT ii = 0;
+ unsigned char byMaxSupportRate = RATE_1M;
+ unsigned char bySupportRate = RATE_1M;
+ unsigned int ii = 0;
if (pSupportRateIEs) {
for (ii = 0; ii < pSupportRateIEs->len; ii++) {
@@ -248,16 +248,16 @@ VNTWIFIbyGetMaxSupportRate (
* Return Value: max support rate
*
-*/
-BYTE
+unsigned char
VNTWIFIbyGetACKTxRate (
- BYTE byRxDataRate,
+ unsigned char byRxDataRate,
PWLAN_IE_SUPP_RATES pSupportRateIEs,
PWLAN_IE_SUPP_RATES pExtSupportRateIEs
)
{
- BYTE byMaxAckRate;
- BYTE byBasicRate;
- UINT ii;
+ unsigned char byMaxAckRate;
+ unsigned char byBasicRate;
+ unsigned int ii;
if (byRxDataRate <= RATE_11M) {
byMaxAckRate = RATE_1M;
@@ -317,9 +317,9 @@ VNTWIFIvSetAuthenticationMode (
pMgmt->eAuthenMode = eAuthMode;
if ((eAuthMode == WMAC_AUTH_SHAREKEY) ||
(eAuthMode == WMAC_AUTH_AUTO)) {
- pMgmt->bShareKeyAlgorithm = TRUE;
+ pMgmt->bShareKeyAlgorithm = true;
} else {
- pMgmt->bShareKeyAlgorithm = FALSE;
+ pMgmt->bShareKeyAlgorithm = false;
}
}
@@ -350,15 +350,15 @@ VNTWIFIvSetEncryptionMode (
if ((eEncryptionMode == WMAC_ENCRYPTION_WEPEnabled) ||
(eEncryptionMode == WMAC_ENCRYPTION_TKIPEnabled) ||
(eEncryptionMode == WMAC_ENCRYPTION_AESEnabled) ) {
- pMgmt->bPrivacyInvoked = TRUE;
+ pMgmt->bPrivacyInvoked = true;
} else {
- pMgmt->bPrivacyInvoked = FALSE;
+ pMgmt->bPrivacyInvoked = false;
}
}
-BOOL
+bool
VNTWIFIbConfigPhyMode (
void *pMgmtHandle,
CARD_PHY_TYPE ePhyType
@@ -368,14 +368,14 @@ VNTWIFIbConfigPhyMode (
if ((ePhyType != PHY_TYPE_AUTO) &&
(ePhyType != pMgmt->eCurrentPHYMode)) {
- if (CARDbSetPhyParameter(pMgmt->pAdapter, ePhyType, 0, 0, NULL, NULL)==TRUE) {
+ if (CARDbSetPhyParameter(pMgmt->pAdapter, ePhyType, 0, 0, NULL, NULL)==true) {
pMgmt->eCurrentPHYMode = ePhyType;
} else {
- return(FALSE);
+ return(false);
}
}
pMgmt->eConfigPHYMode = ePhyType;
- return(TRUE);
+ return(true);
}
@@ -425,16 +425,12 @@ VNTWIFIbGetConfigPhyMode (
-*/
void
-VNTWIFIvQueryBSSList (
- void *pMgmtHandle,
- PUINT puBSSCount,
- void **pvFirstBSS
- )
+VNTWIFIvQueryBSSList(void *pMgmtHandle, unsigned int *puBSSCount, void **pvFirstBSS)
{
- UINT ii = 0;
+ unsigned int ii = 0;
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
PKnownBSS pBSS = NULL;
- UINT uCount = 0;
+ unsigned int uCount = 0;
*pvFirstBSS = NULL;
@@ -471,7 +467,7 @@ VNTWIFIvGetNextBSS (
if (pBSS > &(pMgmt->sBSSList[MAX_BSS_NUM])) {
return;
}
- if (pBSS->bActive == TRUE) {
+ if (pBSS->bActive == true) {
*pvNextBSS = pBSS;
return;
}
@@ -497,24 +493,24 @@ VNTWIFIvGetNextBSS (
void
VNTWIFIvUpdateNodeTxCounter(
void *pMgmtHandle,
- PBYTE pbyDestAddress,
- BOOL bTxOk,
- WORD wRate,
- PBYTE pbyTxFailCount
+ unsigned char *pbyDestAddress,
+ bool bTxOk,
+ unsigned short wRate,
+ unsigned char *pbyTxFailCount
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
- UINT uNodeIndex = 0;
- UINT ii;
+ unsigned int uNodeIndex = 0;
+ unsigned int ii;
if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ||
(pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
- if (BSSDBbIsSTAInNodeDB(pMgmt, pbyDestAddress, &uNodeIndex) == FALSE) {
+ if (BSSDBbIsSTAInNodeDB(pMgmt, pbyDestAddress, &uNodeIndex) == false) {
return;
}
}
pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts++;
- if (bTxOk == TRUE) {
+ if (bTxOk == true) {
// transmit success, TxAttempts at least plus one
pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wRate]++;
@@ -532,19 +528,19 @@ VNTWIFIvUpdateNodeTxCounter(
void
VNTWIFIvGetTxRate(
void *pMgmtHandle,
- PBYTE pbyDestAddress,
- PWORD pwTxDataRate,
- PBYTE pbyACKRate,
- PBYTE pbyCCKBasicRate,
- PBYTE pbyOFDMBasicRate
+ unsigned char *pbyDestAddress,
+ unsigned short *pwTxDataRate,
+ unsigned char *pbyACKRate,
+ unsigned char *pbyCCKBasicRate,
+ unsigned char *pbyOFDMBasicRate
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
- UINT uNodeIndex = 0;
- WORD wTxDataRate = RATE_1M;
- BYTE byACKRate = RATE_1M;
- BYTE byCCKBasicRate = RATE_1M;
- BYTE byOFDMBasicRate = RATE_24M;
+ unsigned int uNodeIndex = 0;
+ unsigned short wTxDataRate = RATE_1M;
+ unsigned char byACKRate = RATE_1M;
+ unsigned char byCCKBasicRate = RATE_1M;
+ unsigned char byOFDMBasicRate = RATE_24M;
PWLAN_IE_SUPP_RATES pSupportRateIEs = NULL;
PWLAN_IE_SUPP_RATES pExtSupportRateIEs = NULL;
@@ -579,12 +575,12 @@ VNTWIFIvGetTxRate(
pSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates;
pExtSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates;
}
- byACKRate = VNTWIFIbyGetACKTxRate( (BYTE) wTxDataRate,
+ byACKRate = VNTWIFIbyGetACKTxRate( (unsigned char) wTxDataRate,
pSupportRateIEs,
pExtSupportRateIEs
);
- if (byACKRate > (BYTE) wTxDataRate) {
- byACKRate = (BYTE) wTxDataRate;
+ if (byACKRate > (unsigned char) wTxDataRate) {
+ byACKRate = (unsigned char) wTxDataRate;
}
byCCKBasicRate = VNTWIFIbyGetACKTxRate( RATE_11M,
pSupportRateIEs,
@@ -601,15 +597,15 @@ VNTWIFIvGetTxRate(
return;
}
-BYTE
+unsigned char
VNTWIFIbyGetKeyCypher(
void *pMgmtHandle,
- BOOL bGroupKey
+ bool bGroupKey
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
- if (bGroupKey == TRUE) {
+ if (bGroupKey == true) {
return (pMgmt->byCSSGK);
} else {
return (pMgmt->byCSSPK);
@@ -618,7 +614,7 @@ VNTWIFIbyGetKeyCypher(
/*
-BOOL
+bool
VNTWIFIbInit(
void *pAdapterHandler,
void **pMgmtHandler
@@ -626,13 +622,13 @@ VNTWIFIbInit(
{
PSMgmtObject pMgmt = NULL;
- UINT ii;
+ unsigned int ii;
pMgmt = (PSMgmtObject)kmalloc(sizeof(SMgmtObject), (int)GFP_ATOMIC);
if (pMgmt == NULL) {
*pMgmtHandler = NULL;
- return FALSE;
+ return false;
}
memset(pMgmt, 0, sizeof(SMgmtObject));
@@ -652,41 +648,41 @@ VNTWIFIbInit(
pMgmt->uCmdDequeueIdx = 0;
pMgmt->uCmdEnqueueIdx = 0;
pMgmt->eCommandState = WLAN_CMD_STATE_IDLE;
- pMgmt->bCmdStop = FALSE;
- pMgmt->bCmdRunning = FALSE;
+ pMgmt->bCmdStop = false;
+ pMgmt->bCmdRunning = false;
*pMgmtHandler = pMgmt;
- return TRUE;
+ return true;
}
*/
-BOOL
+bool
VNTWIFIbSetPMKIDCache (
void *pMgmtObject,
- ULONG ulCount,
+ unsigned long ulCount,
void *pPMKIDInfo
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
if (ulCount > MAX_PMKID_CACHE) {
- return (FALSE);
+ return (false);
}
pMgmt->gsPMKIDCache.BSSIDInfoCount = ulCount;
memcpy(pMgmt->gsPMKIDCache.BSSIDInfo, pPMKIDInfo, (ulCount*sizeof(PMKIDInfo)));
- return (TRUE);
+ return (true);
}
-WORD
+unsigned short
VNTWIFIwGetMaxSupportRate(
void *pMgmtObject
)
{
- WORD wRate = RATE_54M;
+ unsigned short wRate = RATE_54M;
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
for(wRate = RATE_54M; wRate > RATE_1M; wRate--) {
@@ -705,7 +701,7 @@ VNTWIFIwGetMaxSupportRate(
void
VNTWIFIvSet11h (
void *pMgmtObject,
- BOOL b11hEnable
+ bool b11hEnable
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
@@ -713,19 +709,19 @@ VNTWIFIvSet11h (
pMgmt->b11hEnable = b11hEnable;
}
-BOOL
+bool
VNTWIFIbMeasureReport(
void *pMgmtObject,
- BOOL bEndOfReport,
+ bool bEndOfReport,
void *pvMeasureEID,
- BYTE byReportMode,
- BYTE byBasicMap,
- BYTE byCCAFraction,
- PBYTE pbyRPIs
+ unsigned char byReportMode,
+ unsigned char byBasicMap,
+ unsigned char byCCAFraction,
+ unsigned char *pbyRPIs
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
- PBYTE pbyCurrentEID = (PBYTE) (pMgmt->pCurrMeasureEIDRep);
+ unsigned char *pbyCurrentEID = (unsigned char *) (pMgmt->pCurrMeasureEIDRep);
//spin_lock_irq(&pDevice->lock);
if ((pvMeasureEID != NULL) &&
@@ -765,49 +761,49 @@ VNTWIFIbMeasureReport(
pMgmt->uLengthOfRepEIDs += (2 + pMgmt->pCurrMeasureEIDRep->len);
pMgmt->pCurrMeasureEIDRep = (PWLAN_IE_MEASURE_REP) pbyCurrentEID;
}
- if (bEndOfReport == TRUE) {
+ if (bEndOfReport == true) {
IEEE11hbMSRRepTx(pMgmt);
}
//spin_unlock_irq(&pDevice->lock);
- return (TRUE);
+ return (true);
}
-BOOL
+bool
VNTWIFIbChannelSwitch(
void *pMgmtObject,
- BYTE byNewChannel
+ unsigned char byNewChannel
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
//spin_lock_irq(&pDevice->lock);
pMgmt->uCurrChannel = byNewChannel;
- pMgmt->bSwitchChannel = FALSE;
+ pMgmt->bSwitchChannel = false;
//spin_unlock_irq(&pDevice->lock);
- return TRUE;
+ return true;
}
/*
-BOOL
+bool
VNTWIFIbRadarPresent(
void *pMgmtObject,
- BYTE byChannel
+ unsigned char byChannel
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
- (byChannel == (BYTE) pMgmt->uCurrChannel) &&
- (pMgmt->bSwitchChannel != TRUE) &&
- (pMgmt->b11hEnable == TRUE)) {
- if (IS_ETH_ADDRESS_EQUAL(pMgmt->abyIBSSDFSOwner, CARDpGetCurrentAddress(pMgmt->pAdapter))) {
- pMgmt->byNewChannel = CARDbyAutoChannelSelect(pMgmt->pAdapter,(BYTE) pMgmt->uCurrChannel);
- pMgmt->bSwitchChannel = TRUE;
+ (byChannel == (unsigned char) pMgmt->uCurrChannel) &&
+ (pMgmt->bSwitchChannel != true) &&
+ (pMgmt->b11hEnable == true)) {
+ if (!compare_ether_addr(pMgmt->abyIBSSDFSOwner, CARDpGetCurrentAddress(pMgmt->pAdapter))) {
+ pMgmt->byNewChannel = CARDbyAutoChannelSelect(pMgmt->pAdapter,(unsigned char) pMgmt->uCurrChannel);
+ pMgmt->bSwitchChannel = true;
}
BEACONbSendBeacon(pMgmt);
CARDbChannelSwitch(pMgmt->pAdapter, 0, pMgmt->byNewChannel, 10);
}
- return TRUE;
+ return true;
}
*/
diff --git a/drivers/staging/vt6655/vntwifi.h b/drivers/staging/vt6655/vntwifi.h
index c91dfd79adc..f4327abaa77 100644
--- a/drivers/staging/vt6655/vntwifi.h
+++ b/drivers/staging/vt6655/vntwifi.h
@@ -143,9 +143,9 @@ typedef enum tagWMAC_POWER_MODE {
void
VNTWIFIvSetIBSSParameter (
void *pMgmtHandle,
- WORD wBeaconPeriod,
- WORD wATIMWindow,
- UINT uChannel
+ unsigned short wBeaconPeriod,
+ unsigned short wATIMWindow,
+ unsigned int uChannel
);
void
@@ -159,25 +159,25 @@ VNTWIFIpGetCurrentSSID(
void *pMgmtHandle
);
-UINT
+unsigned int
VNTWIFIpGetCurrentChannel(
void *pMgmtHandle
);
-WORD
+unsigned short
VNTWIFIwGetAssocID (
void *pMgmtHandle
);
-BYTE
+unsigned char
VNTWIFIbyGetMaxSupportRate (
PWLAN_IE_SUPP_RATES pSupportRateIEs,
PWLAN_IE_SUPP_RATES pExtSupportRateIEs
);
-BYTE
+unsigned char
VNTWIFIbyGetACKTxRate (
- BYTE byRxDataRate,
+ unsigned char byRxDataRate,
PWLAN_IE_SUPP_RATES pSupportRateIEs,
PWLAN_IE_SUPP_RATES pExtSupportRateIEs
);
@@ -195,7 +195,7 @@ VNTWIFIvSetEncryptionMode (
);
-BOOL
+bool
VNTWIFIbConfigPhyMode(
void *pMgmtHandle,
CARD_PHY_TYPE ePhyType
@@ -208,14 +208,8 @@ VNTWIFIbGetConfigPhyMode(
);
void
-VNTWIFIvQueryBSSList(
- void *pMgmtHandle,
- PUINT puBSSCount,
- void **pvFirstBSS
- );
-
-
-
+VNTWIFIvQueryBSSList(void *pMgmtHandle, unsigned int *puBSSCount,
+ void **pvFirstBSS);
void
VNTWIFIvGetNextBSS (
@@ -229,52 +223,52 @@ VNTWIFIvGetNextBSS (
void
VNTWIFIvUpdateNodeTxCounter(
void *pMgmtHandle,
- PBYTE pbyDestAddress,
- BOOL bTxOk,
- WORD wRate,
- PBYTE pbyTxFailCount
+ unsigned char *pbyDestAddress,
+ bool bTxOk,
+ unsigned short wRate,
+ unsigned char *pbyTxFailCount
);
void
VNTWIFIvGetTxRate(
void *pMgmtHandle,
- PBYTE pbyDestAddress,
- PWORD pwTxDataRate,
- PBYTE pbyACKRate,
- PBYTE pbyCCKBasicRate,
- PBYTE pbyOFDMBasicRate
+ unsigned char *pbyDestAddress,
+ unsigned short *pwTxDataRate,
+ unsigned char *pbyACKRate,
+ unsigned char *pbyCCKBasicRate,
+ unsigned char *pbyOFDMBasicRate
);
/*
-BOOL
+bool
VNTWIFIbInit(
void *pAdapterHandler,
void **pMgmtHandler
);
*/
-BYTE
+unsigned char
VNTWIFIbyGetKeyCypher(
void *pMgmtHandle,
- BOOL bGroupKey
+ bool bGroupKey
);
-BOOL
+bool
VNTWIFIbSetPMKIDCache (
void *pMgmtObject,
- ULONG ulCount,
+ unsigned long ulCount,
void *pPMKIDInfo
);
-BOOL
+bool
VNTWIFIbCommandRunning (
void *pMgmtObject
);
-WORD
+unsigned short
VNTWIFIwGetMaxSupportRate(
void *pMgmtObject
);
@@ -283,30 +277,30 @@ VNTWIFIwGetMaxSupportRate(
void
VNTWIFIvSet11h (
void *pMgmtObject,
- BOOL b11hEnable
+ bool b11hEnable
);
-BOOL
+bool
VNTWIFIbMeasureReport(
void *pMgmtObject,
- BOOL bEndOfReport,
+ bool bEndOfReport,
void *pvMeasureEID,
- BYTE byReportMode,
- BYTE byBasicMap,
- BYTE byCCAFraction,
- PBYTE pbyRPIs
+ unsigned char byReportMode,
+ unsigned char byBasicMap,
+ unsigned char byCCAFraction,
+ unsigned char *pbyRPIs
);
-BOOL
+bool
VNTWIFIbChannelSwitch(
void *pMgmtObject,
- BYTE byNewChannel
+ unsigned char byNewChannel
);
/*
-BOOL
+bool
VNTWIFIbRadarPresent(
void *pMgmtObject,
- BYTE byChannel
+ unsigned char byChannel
);
*/
diff --git a/drivers/staging/vt6655/wcmd.c b/drivers/staging/vt6655/wcmd.c
index 28665d870f5..abd6745bc3f 100644
--- a/drivers/staging/vt6655/wcmd.c
+++ b/drivers/staging/vt6655/wcmd.c
@@ -52,6 +52,7 @@
#include "rxtx.h"
#include "rf.h"
#include "iowpa.h"
+#include "channel.h"
/*--------------------- Static Definitions -------------------------*/
@@ -77,7 +78,7 @@ PSTxMgmtPacket
s_MgrMakeProbeRequest(
PSDevice pDevice,
PSMgmtObject pMgmt,
- PBYTE pScanBSSID,
+ unsigned char *pScanBSSID,
PWLAN_IE_SSID pSSID,
PWLAN_IE_SUPP_RATES pCurrRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates
@@ -85,7 +86,7 @@ s_MgrMakeProbeRequest(
static
-BOOL
+bool
s_bCommandComplete (
PSDevice pDevice
);
@@ -116,7 +117,7 @@ vAdHocBeaconStop(PSDevice pDevice)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- BOOL bStop;
+ bool bStop;
/*
* temporarily stop Beacon packet for AdHoc Server
@@ -129,18 +130,18 @@ vAdHocBeaconStop(PSDevice pDevice)
* or
* (3.2) AdHoc channel is in A mode
*/
- bStop = FALSE;
+ bStop = false;
if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
(pMgmt->eCurrState >= WMAC_STATE_STARTED))
{
if ((pMgmt->uIBSSChannel <= CB_MAX_CHANNEL_24G) &&
(pMgmt->uScanChannel > CB_MAX_CHANNEL_24G))
{
- bStop = TRUE;
+ bStop = true;
}
if (pMgmt->uIBSSChannel > CB_MAX_CHANNEL_24G)
{
- bStop = TRUE;
+ bStop = true;
}
}
@@ -208,15 +209,15 @@ s_vProbeChannel(
)
{
//1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M
- BYTE abyCurrSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
- BYTE abyCurrExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
+ unsigned char abyCurrSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
+ unsigned char abyCurrExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
//6M, 9M, 12M, 48M
- BYTE abyCurrSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
- BYTE abyCurrSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
- PBYTE pbyRate;
+ unsigned char abyCurrSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
+ unsigned char abyCurrSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
+ unsigned char *pbyRate;
PSTxMgmtPacket pTxPacket;
PSMgmtObject pMgmt = pDevice->pMgmt;
- UINT ii;
+ unsigned int ii;
if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
@@ -269,7 +270,7 @@ PSTxMgmtPacket
s_MgrMakeProbeRequest(
PSDevice pDevice,
PSMgmtObject pMgmt,
- PBYTE pScanBSSID,
+ unsigned char *pScanBSSID,
PWLAN_IE_SSID pSSID,
PWLAN_IE_SUPP_RATES pCurrRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates
@@ -282,8 +283,8 @@ s_MgrMakeProbeRequest(
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_PROBEREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
+ sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
sFrame.len = WLAN_PROBEREQ_FR_MAXLEN;
vMgrEncodeProbeRequest(&sFrame);
sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
@@ -320,16 +321,16 @@ s_MgrMakeProbeRequest(
void
vCommandTimerWait(
void *hDeviceContext,
- UINT MSecond
+ unsigned int MSecond
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (ULONG)pDevice;
+ pDevice->sTimerCommand.data = (unsigned long) pDevice;
pDevice->sTimerCommand.function = (TimerFunction)vCommandTimer;
// RUN_AT :1 msec ~= (HZ/1024)
- pDevice->sTimerCommand.expires = (UINT)RUN_AT((MSecond * HZ) >> 10);
+ pDevice->sTimerCommand.expires = (unsigned int)RUN_AT((MSecond * HZ) >> 10);
add_timer(&pDevice->sTimerCommand);
return;
}
@@ -347,14 +348,14 @@ vCommandTimer (
PWLAN_IE_SSID pItemSSID;
PWLAN_IE_SSID pItemSSIDCurr;
CMD_STATUS Status;
- UINT ii;
- BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
+ unsigned int ii;
+ unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
struct sk_buff *skb;
if (pDevice->dwDiagRefCount != 0)
return;
- if (pDevice->bCmdRunning != TRUE)
+ if (pDevice->bCmdRunning != true)
return;
spin_lock_irq(&pDevice->lock);
@@ -364,7 +365,7 @@ vCommandTimer (
case WLAN_CMD_SCAN_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == TRUE) {
+ if (pDevice->bRadioOff == true) {
s_bCommandComplete(pDevice);
spin_unlock_irq(&pDevice->lock);
return;
@@ -396,7 +397,7 @@ vCommandTimer (
// Set Baseband's sensitivity back.
// Set channel back
- CARDbSetChannel(pMgmt->pAdapter, pMgmt->uCurrChannel);
+ set_channel(pMgmt->pAdapter, pMgmt->uCurrChannel);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel);
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_ADHOC);
@@ -408,7 +409,7 @@ vCommandTimer (
} else {
//2008-8-4 <add> by chester
- if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel)) {
+ if (!is_channel_valid(pMgmt->uScanChannel)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d \n",pMgmt->uScanChannel);
s_bCommandComplete(pDevice);
return;
@@ -431,7 +432,7 @@ vCommandTimer (
vAdHocBeaconStop(pDevice);
- if (CARDbSetChannel(pMgmt->pAdapter, pMgmt->uScanChannel) == TRUE) {
+ if (set_channel(pMgmt->pAdapter, pMgmt->uScanChannel) == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"SCAN Channel: %d\n", pMgmt->uScanChannel);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"SET SCAN Channel Fail: %d\n", pMgmt->uScanChannel);
@@ -441,7 +442,7 @@ vCommandTimer (
// printk("chester-ch=%d\n",pMgmt->uScanChannel);
pMgmt->uScanChannel++;
//2008-8-4 <modify> by chester
- if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel) &&
+ if (!is_channel_valid(pMgmt->uScanChannel) &&
pMgmt->uScanChannel <= pDevice->byMaxChannel ){
pMgmt->uScanChannel=pDevice->byMaxChannel+1;
pMgmt->eCommandState = WLAN_CMD_SCAN_END;
@@ -449,7 +450,7 @@ vCommandTimer (
}
- if ((pMgmt->b11hEnable == FALSE) ||
+ if ((pMgmt->b11hEnable == false) ||
(pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
s_vProbeChannel(pDevice);
spin_unlock_irq(&pDevice->lock);
@@ -469,7 +470,7 @@ vCommandTimer (
// Set Baseband's sensitivity back.
// Set channel back
- CARDbSetChannel(pMgmt->pAdapter, pMgmt->uCurrChannel);
+ set_channel(pMgmt->pAdapter, pMgmt->uCurrChannel);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel);
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, OP_MODE_ADHOC);
@@ -502,14 +503,14 @@ vCommandTimer (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n");
// reason = 8 : disassoc because sta has left
vMgrDisassocBeginSta((void *)pDevice, pMgmt, pMgmt->abyCurrBSSID, (8), &Status);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
// unlock command busy
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
pItemSSID->len = 0;
memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->sNodeDBTable[0].bActive = FALSE;
-// pDevice->bBeaconBufReady = FALSE;
+ pMgmt->sNodeDBTable[0].bActive = false;
+// pDevice->bBeaconBufReady = false;
}
netif_stop_queue(pDevice->dev);
pDevice->eCommandState = WLAN_DISASSOCIATE_WAIT;
@@ -539,7 +540,7 @@ vCommandTimer (
case WLAN_CMD_SSID_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == TRUE) {
+ if (pDevice->bRadioOff == true) {
s_bCommandComplete(pDevice);
spin_unlock_irq(&pDevice->lock);
return;
@@ -573,7 +574,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
}
netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
}
// set initial state
pMgmt->eCurrState = WMAC_STATE_IDLE;
@@ -607,9 +608,9 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
if (netif_queue_stopped(pDevice->dev)){
netif_wake_queue(pDevice->dev);
}
- pDevice->bLinkPass = TRUE;
+ pDevice->bLinkPass = true;
- pMgmt->sNodeDBTable[0].bActive = TRUE;
+ pMgmt->sNodeDBTable[0].bActive = true;
pMgmt->sNodeDBTable[0].uInActiveCount = 0;
bClearBSSID_SCAN(pDevice);
}
@@ -635,12 +636,12 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
if (netif_queue_stopped(pDevice->dev)){
netif_wake_queue(pDevice->dev);
}
- pDevice->bLinkPass = TRUE;
+ pDevice->bLinkPass = true;
}
else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disconnect SSID none\n");
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- // if(pDevice->bWPASuppWextEnabled == TRUE)
+ // if(pDevice->bWPASuppWextEnabled == true)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -685,7 +686,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
pDevice->byLinkWaitCount = 0;
#if 0
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- // if(pDevice->bWPASuppWextEnabled == TRUE)
+ // if(pDevice->bWPASuppWextEnabled == true)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -707,7 +708,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA) {
KeybRemoveAllKey(&(pDevice->sKey), pDevice->abyBSSID, pDevice->PortOffset);
}
- pDevice->bLinkPass = TRUE;
+ pDevice->bLinkPass = true;
pDevice->byLinkWaitCount = 0;
pDevice->byReAssocCount = 0;
bClearBSSID_SCAN(pDevice);
@@ -719,20 +720,20 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
netif_wake_queue(pDevice->dev);
}
#ifdef TxInSleep
- if(pDevice->IsTxDataTrigger != FALSE) { //TxDataTimer is not triggered at the first time
+ if(pDevice->IsTxDataTrigger != false) { //TxDataTimer is not triggered at the first time
// printk("Re-initial TxDataTimer****\n");
del_timer(&pDevice->sTimerTxData);
init_timer(&pDevice->sTimerTxData);
- pDevice->sTimerTxData.data = (ULONG)pDevice;
+ pDevice->sTimerTxData.data = (unsigned long) pDevice;
pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
- pDevice->fTxDataInSleep = FALSE;
+ pDevice->fTxDataInSleep = false;
pDevice->nTxDataTimeCout = 0;
}
else {
// printk("mike:-->First time triger TimerTxData InSleep\n");
}
- pDevice->IsTxDataTrigger = TRUE;
+ pDevice->IsTxDataTrigger = true;
add_timer(&pDevice->sTimerTxData);
#endif
}
@@ -749,7 +750,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
pDevice->byLinkWaitCount = 0;
#if 0
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- // if(pDevice->bWPASuppWextEnabled == TRUE)
+ // if(pDevice->bWPASuppWextEnabled == true)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -770,14 +771,14 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
del_timer(&pMgmt->sTimerSecondCallback);
pMgmt->eCurrState = WMAC_STATE_IDLE;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pDevice->bLinkPass = FALSE;
- if (pDevice->bEnableHostWEP == TRUE)
+ pDevice->bLinkPass = false;
+ if (pDevice->bEnableHostWEP == true)
BSSvClearNodeDBTable(pDevice, 1);
else
BSSvClearNodeDBTable(pDevice, 0);
pDevice->uAssocCount = 0;
pMgmt->eCurrState = WMAC_STATE_IDLE;
- pDevice->bFixRate = FALSE;
+ pDevice->bFixRate = false;
vMgrCreateOwnIBSS((void *)pDevice, &Status);
if (Status != CMD_STATUS_SUCCESS){
@@ -791,7 +792,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
if (netif_queue_stopped(pDevice->dev)){
netif_wake_queue(pDevice->dev);
}
- pDevice->bLinkPass = TRUE;
+ pDevice->bLinkPass = true;
add_timer(&pMgmt->sTimerSecondCallback);
}
s_bCommandComplete(pDevice);
@@ -803,10 +804,10 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) {
if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) {
pMgmt->abyPSTxMap[0] &= ~byMask[0];
- pDevice->bMoreData = FALSE;
+ pDevice->bMoreData = false;
}
else {
- pDevice->bMoreData = TRUE;
+ pDevice->bMoreData = true;
}
if (!device_dma0_xmit(pDevice, skb, 0)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Multicast ps tx fail \n");
@@ -826,10 +827,10 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
// clear tx map
pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
- pDevice->bMoreData = FALSE;
+ pDevice->bMoreData = false;
}
else {
- pDevice->bMoreData = TRUE;
+ pDevice->bMoreData = true;
}
if (!device_dma0_xmit(pDevice, skb, ii)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "sta ps tx fail \n");
@@ -846,7 +847,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d PS queue clear \n", ii);
}
- pMgmt->sNodeDBTable[ii].bRxPSPoll = FALSE;
+ pMgmt->sNodeDBTable[ii].bRxPSPoll = false;
}
}
@@ -856,7 +857,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
case WLAN_CMD_RADIO_START :
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_RADIO_START\n");
- if (pDevice->bRadioCmd == TRUE)
+ if (pDevice->bRadioCmd == true)
CARDbRadioPowerOn(pDevice);
else
CARDbRadioPowerOff(pDevice);
@@ -896,23 +897,23 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
static
-BOOL
+bool
s_bCommandComplete (
PSDevice pDevice
)
{
PWLAN_IE_SSID pSSID;
- BOOL bRadioCmd = FALSE;
- //WORD wDeAuthenReason = 0;
- BOOL bForceSCAN = TRUE;
+ bool bRadioCmd = false;
+ //unsigned short wDeAuthenReason = 0;
+ bool bForceSCAN = true;
PSMgmtObject pMgmt = pDevice->pMgmt;
pDevice->eCommandState = WLAN_CMD_IDLE;
if (pDevice->cbFreeCmdQueue == CMD_Q_SIZE) {
//Command Queue Empty
- pDevice->bCmdRunning = FALSE;
- return TRUE;
+ pDevice->bCmdRunning = false;
+ return true;
}
else {
pDevice->eCommand = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].eCmd;
@@ -921,7 +922,7 @@ s_bCommandComplete (
bForceSCAN = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bForceSCAN;
ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdDequeueIdx, CMD_Q_SIZE);
pDevice->cbFreeCmdQueue++;
- pDevice->bCmdRunning = TRUE;
+ pDevice->bCmdRunning = true;
switch ( pDevice->eCommand ) {
case WLAN_CMD_BSSID_SCAN:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState= WLAN_CMD_BSSID_SCAN\n");
@@ -933,7 +934,7 @@ s_bCommandComplete (
memset(pMgmt->abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
}
/*
- if ((bForceSCAN == FALSE) && (pDevice->bLinkPass == TRUE)) {
+ if ((bForceSCAN == false) && (pDevice->bLinkPass == true)) {
if ((pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) &&
( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID, pSSID->len))) {
pDevice->eCommandState = WLAN_CMD_IDLE;
@@ -974,25 +975,25 @@ s_bCommandComplete (
vCommandTimerWait((void *)pDevice, 0);
}
- return TRUE;
+ return true;
}
-BOOL bScheduleCommand (
+bool bScheduleCommand (
void *hDeviceContext,
CMD_CODE eCommand,
- PBYTE pbyItem0
+ unsigned char *pbyItem0
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
if (pDevice->cbFreeCmdQueue == 0) {
- return (FALSE);
+ return (false);
}
pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].eCmd = eCommand;
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = TRUE;
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = true;
memset(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID, 0 , WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
if (pbyItem0 != NULL) {
@@ -1001,7 +1002,7 @@ BOOL bScheduleCommand (
case WLAN_CMD_BSSID_SCAN:
memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = FALSE;
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = false;
break;
case WLAN_CMD_SSID:
@@ -1014,7 +1015,7 @@ BOOL bScheduleCommand (
break;
/*
case WLAN_CMD_DEAUTH:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].wDeAuthenReason = *((PWORD)pbyItem0);
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].wDeAuthenReason = *((unsigned short *)pbyItem0);
break;
*/
@@ -1037,12 +1038,12 @@ BOOL bScheduleCommand (
ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdEnqueueIdx, CMD_Q_SIZE);
pDevice->cbFreeCmdQueue--;
- if (pDevice->bCmdRunning == FALSE) {
+ if (pDevice->bCmdRunning == false) {
s_bCommandComplete(pDevice);
}
else {
}
- return (TRUE);
+ return (true);
}
@@ -1057,16 +1058,16 @@ BOOL bScheduleCommand (
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
+ * Return Value: true if success; otherwise false
*
*/
-BOOL bClearBSSID_SCAN (
+bool bClearBSSID_SCAN (
void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
- UINT uCmdDequeueIdx = pDevice->uCmdDequeueIdx;
- UINT ii;
+ unsigned int uCmdDequeueIdx = pDevice->uCmdDequeueIdx;
+ unsigned int ii;
if ((pDevice->cbFreeCmdQueue < CMD_Q_SIZE) && (uCmdDequeueIdx != pDevice->uCmdEnqueueIdx)) {
for (ii = 0; ii < (CMD_Q_SIZE - pDevice->cbFreeCmdQueue); ii ++) {
@@ -1077,7 +1078,7 @@ BOOL bClearBSSID_SCAN (
break;
}
}
- return TRUE;
+ return true;
}
//mike add:reset command timer
@@ -1092,15 +1093,15 @@ vResetCommandTimer(
del_timer(&pDevice->sTimerCommand);
//init timer
init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (ULONG)pDevice;
+ pDevice->sTimerCommand.data = (unsigned long) pDevice;
pDevice->sTimerCommand.function = (TimerFunction)vCommandTimer;
pDevice->sTimerCommand.expires = RUN_AT(HZ);
pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
pDevice->uCmdDequeueIdx = 0;
pDevice->uCmdEnqueueIdx = 0;
pDevice->eCommandState = WLAN_CMD_IDLE;
- pDevice->bCmdRunning = FALSE;
- pDevice->bCmdClear = FALSE;
+ pDevice->bCmdRunning = false;
+ pDevice->bCmdClear = false;
}
@@ -1125,16 +1126,16 @@ BSSvSecondTxData(
spin_lock_irq(&pDevice->lock);
#if 1
- if(((pDevice->bLinkPass ==TRUE)&&(pMgmt->eAuthenMode < WMAC_AUTH_WPA)) || //open && sharekey linking
- (pDevice->fWPA_Authened == TRUE)) { //wpa linking
+ if(((pDevice->bLinkPass ==true)&&(pMgmt->eAuthenMode < WMAC_AUTH_WPA)) || //open && sharekey linking
+ (pDevice->fWPA_Authened == true)) { //wpa linking
#else
- if(pDevice->bLinkPass ==TRUE) {
+ if(pDevice->bLinkPass ==true) {
#endif
// printk("mike:%s-->InSleep Tx Data Procedure\n",__FUNCTION__);
- pDevice->fTxDataInSleep = TRUE;
+ pDevice->fTxDataInSleep = true;
PSbSendNullPacket(pDevice); //send null packet
- pDevice->fTxDataInSleep = FALSE;
+ pDevice->fTxDataInSleep = false;
}
spin_unlock_irq(&pDevice->lock);
diff --git a/drivers/staging/vt6655/wcmd.h b/drivers/staging/vt6655/wcmd.h
index c3c41808951..69d4fc55b84 100644
--- a/drivers/staging/vt6655/wcmd.h
+++ b/drivers/staging/vt6655/wcmd.h
@@ -75,11 +75,11 @@ typedef enum tagCMD_STATUS {
typedef struct tagCMD_ITEM {
CMD_CODE eCmd;
- BYTE abyCmdDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- BOOL bNeedRadioOFF;
- WORD wDeAuthenReason;
- BOOL bRadioCmd;
- BOOL bForceSCAN;
+ unsigned char abyCmdDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ bool bNeedRadioOFF;
+ unsigned short wDeAuthenReason;
+ bool bRadioCmd;
+ bool bForceSCAN;
} CMD_ITEM, *PCMD_ITEM;
// Command state
@@ -119,21 +119,21 @@ vCommandTimer (
void *hDeviceContext
);
-BOOL bClearBSSID_SCAN(
+bool bClearBSSID_SCAN(
void *hDeviceContext
);
-BOOL
+bool
bScheduleCommand(
void *hDeviceContext,
CMD_CODE eCommand,
- PBYTE pbyItem0
+ unsigned char *pbyItem0
);
void
vCommandTimerWait(
void *hDeviceContext,
- UINT MSecond
+ unsigned int MSecond
);
#ifdef TxInSleep
void
diff --git a/drivers/staging/vt6655/wctl.c b/drivers/staging/vt6655/wctl.c
index 64a66b2f1fc..c096583a772 100644
--- a/drivers/staging/vt6655/wctl.c
+++ b/drivers/staging/vt6655/wctl.c
@@ -52,8 +52,8 @@
/*
* Description:
- * Scan Rx cache. Return TRUE if packet is duplicate, else
- * inserts in receive cache and returns FALSE.
+ * Scan Rx cache. Return true if packet is duplicate, else
+ * inserts in receive cache and returns false.
*
* Parameters:
* In:
@@ -62,14 +62,14 @@
* Out:
* none
*
- * Return Value: TRUE if packet duplicate; otherwise FALSE
+ * Return Value: true if packet duplicate; otherwise false
*
*/
-BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
+bool WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
{
- UINT uIndex;
- UINT ii;
+ unsigned int uIndex;
+ unsigned int ii;
PSCacheEntry pCacheEntry;
if (IS_FC_RETRY(pMACHeader)) {
@@ -78,10 +78,10 @@ BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
for (ii = 0; ii < DUPLICATE_RX_CACHE_LENGTH; ii++) {
pCacheEntry = &(pCache->asCacheEntry[uIndex]);
if ((pCacheEntry->wFmSequence == pMACHeader->wSeqCtl) &&
- (IS_ETH_ADDRESS_EQUAL (&(pCacheEntry->abyAddr2[0]), &(pMACHeader->abyAddr2[0])))
+ (!compare_ether_addr(&(pCacheEntry->abyAddr2[0]), &(pMACHeader->abyAddr2[0])))
) {
/* Duplicate match */
- return TRUE;
+ return true;
}
ADD_ONE_WITH_WRAP_AROUND(uIndex, DUPLICATE_RX_CACHE_LENGTH);
}
@@ -91,7 +91,7 @@ BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
pCacheEntry->wFmSequence = pMACHeader->wSeqCtl;
memcpy(&(pCacheEntry->abyAddr2[0]), &(pMACHeader->abyAddr2[0]), ETH_ALEN);
ADD_ONE_WITH_WRAP_AROUND(pCache->uInPtr, DUPLICATE_RX_CACHE_LENGTH);
- return FALSE;
+ return false;
}
/*
@@ -108,13 +108,13 @@ BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
* Return Value: index number in Defragment Database
*
*/
-UINT WCTLuSearchDFCB (PSDevice pDevice, PS802_11Header pMACHeader)
+unsigned int WCTLuSearchDFCB (PSDevice pDevice, PS802_11Header pMACHeader)
{
-UINT ii;
+unsigned int ii;
for(ii=0;ii<pDevice->cbDFCB;ii++) {
- if ((pDevice->sRxDFCB[ii].bInUse == TRUE) &&
- (IS_ETH_ADDRESS_EQUAL (&(pDevice->sRxDFCB[ii].abyAddr2[0]), &(pMACHeader->abyAddr2[0])))
+ if ((pDevice->sRxDFCB[ii].bInUse == true) &&
+ (!compare_ether_addr(&(pDevice->sRxDFCB[ii].abyAddr2[0]), &(pMACHeader->abyAddr2[0])))
) {
//
return(ii);
@@ -138,17 +138,17 @@ UINT ii;
* Return Value: index number in Defragment Database
*
*/
-UINT WCTLuInsertDFCB (PSDevice pDevice, PS802_11Header pMACHeader)
+unsigned int WCTLuInsertDFCB (PSDevice pDevice, PS802_11Header pMACHeader)
{
-UINT ii;
+unsigned int ii;
if (pDevice->cbFreeDFCB == 0)
return(pDevice->cbDFCB);
for(ii=0;ii<pDevice->cbDFCB;ii++) {
- if (pDevice->sRxDFCB[ii].bInUse == FALSE) {
+ if (pDevice->sRxDFCB[ii].bInUse == false) {
pDevice->cbFreeDFCB--;
pDevice->sRxDFCB[ii].uLifetime = pDevice->dwMaxReceiveLifetime;
- pDevice->sRxDFCB[ii].bInUse = TRUE;
+ pDevice->sRxDFCB[ii].bInUse = true;
pDevice->sRxDFCB[ii].wSequence = (pMACHeader->wSeqCtl >> 4);
pDevice->sRxDFCB[ii].wFragNum = (pMACHeader->wSeqCtl & 0x000F);
memcpy(&(pDevice->sRxDFCB[ii].abyAddr2[0]), &(pMACHeader->abyAddr2[0]), ETH_ALEN);
@@ -172,15 +172,15 @@ UINT ii;
* Out:
* none
*
- * Return Value: TRUE if it is valid fragment packet and we have resource to defragment; otherwise FALSE
+ * Return Value: true if it is valid fragment packet and we have resource to defragment; otherwise false
*
*/
-BOOL WCTLbHandleFragment (PSDevice pDevice, PS802_11Header pMACHeader, UINT cbFrameLength, BOOL bWEP, BOOL bExtIV)
+bool WCTLbHandleFragment (PSDevice pDevice, PS802_11Header pMACHeader, unsigned int cbFrameLength, bool bWEP, bool bExtIV)
{
-UINT uHeaderSize;
+unsigned int uHeaderSize;
- if (bWEP == TRUE) {
+ if (bWEP == true) {
uHeaderSize = 28;
if (bExtIV)
// ExtIV
@@ -201,17 +201,17 @@ UINT uHeaderSize;
else {
pDevice->uCurrentDFCBIdx = WCTLuInsertDFCB(pDevice, pMACHeader);
if (pDevice->uCurrentDFCBIdx == pDevice->cbDFCB) {
- return(FALSE);
+ return(false);
}
}
// reserve 4 byte to match MAC RX Buffer
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer = (PBYTE) (pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].skb->data + 4);
+ pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer = (unsigned char *) (pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].skb->data + 4);
memcpy(pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer, pMACHeader, cbFrameLength);
pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength = cbFrameLength;
pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer += cbFrameLength;
pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wFragNum++;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "First pDevice->uCurrentDFCBIdx= %d\n", pDevice->uCurrentDFCBIdx);
- return(FALSE);
+ return(false);
}
else {
pDevice->uCurrentDFCBIdx = WCTLuSearchDFCB(pDevice, pMACHeader);
@@ -220,7 +220,7 @@ UINT uHeaderSize;
(pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wFragNum == (pMACHeader->wSeqCtl & 0x000F)) &&
((pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength + cbFrameLength - uHeaderSize) < 2346)) {
- memcpy(pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer, ((PBYTE) (pMACHeader) + uHeaderSize), (cbFrameLength - uHeaderSize));
+ memcpy(pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer, ((unsigned char *) (pMACHeader) + uHeaderSize), (cbFrameLength - uHeaderSize));
pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength += (cbFrameLength - uHeaderSize);
pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer += (cbFrameLength - uHeaderSize);
pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wFragNum++;
@@ -229,21 +229,21 @@ UINT uHeaderSize;
else {
// seq error or frag # error flush DFCB
pDevice->cbFreeDFCB++;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].bInUse = FALSE;
- return(FALSE);
+ pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].bInUse = false;
+ return(false);
}
}
else {
- return(FALSE);
+ return(false);
}
if (IS_LAST_FRAGMENT_PKT(pMACHeader)) {
//enq defragcontrolblock
pDevice->cbFreeDFCB++;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].bInUse = FALSE;
+ pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].bInUse = false;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Last pDevice->uCurrentDFCBIdx= %d\n", pDevice->uCurrentDFCBIdx);
- return(TRUE);
+ return(true);
}
- return(FALSE);
+ return(false);
}
}
diff --git a/drivers/staging/vt6655/wctl.h b/drivers/staging/vt6655/wctl.h
index a1ac4791bfd..a92bb6d2b3f 100644
--- a/drivers/staging/vt6655/wctl.h
+++ b/drivers/staging/vt6655/wctl.h
@@ -97,10 +97,11 @@
/*--------------------- Export Functions --------------------------*/
-BOOL WCTLbIsDuplicate(PSCache pCache, PS802_11Header pMACHeader);
-BOOL WCTLbHandleFragment(PSDevice pDevice, PS802_11Header pMACHeader, UINT cbFrameLength, BOOL bWEP, BOOL bExtIV);
-UINT WCTLuSearchDFCB(PSDevice pDevice, PS802_11Header pMACHeader);
-UINT WCTLuInsertDFCB(PSDevice pDevice, PS802_11Header pMACHeader);
+bool WCTLbIsDuplicate(PSCache pCache, PS802_11Header pMACHeader);
+bool WCTLbHandleFragment(PSDevice pDevice, PS802_11Header pMACHeader,
+ unsigned int cbFrameLength, bool bWEP, bool bExtIV);
+unsigned int WCTLuSearchDFCB(PSDevice pDevice, PS802_11Header pMACHeader);
+unsigned int WCTLuInsertDFCB(PSDevice pDevice, PS802_11Header pMACHeader);
#endif // __WCTL_H__
diff --git a/drivers/staging/vt6655/wmgr.c b/drivers/staging/vt6655/wmgr.c
index 8af356fd139..e540110a430 100644
--- a/drivers/staging/vt6655/wmgr.c
+++ b/drivers/staging/vt6655/wmgr.c
@@ -65,6 +65,7 @@
#include "desc.h"
#include "device.h"
#include "card.h"
+#include "channel.h"
#include "80211hdr.h"
#include "80211mgr.h"
#include "wmgr.h"
@@ -93,9 +94,9 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
//2008-8-4 <add> by chester
-static BOOL ChannelExceedZoneType(
+static bool ChannelExceedZoneType(
PSDevice pDevice,
- BYTE byCurrChannel
+ unsigned char byCurrChannel
);
// Association/diassociation functions
@@ -104,9 +105,9 @@ PSTxMgmtPacket
s_MgrMakeAssocRequest(
PSDevice pDevice,
PSMgmtObject pMgmt,
- PBYTE pDAddr,
- WORD wCurrCapInfo,
- WORD wListenInterval,
+ unsigned char *pDAddr,
+ unsigned short wCurrCapInfo,
+ unsigned short wListenInterval,
PWLAN_IE_SSID pCurrSSID,
PWLAN_IE_SUPP_RATES pCurrRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates
@@ -118,7 +119,7 @@ s_vMgrRxAssocRequest(
PSDevice pDevice,
PSMgmtObject pMgmt,
PSRxMgmtPacket pRxPacket,
- UINT uNodeIndex
+ unsigned int uNodeIndex
);
static
@@ -126,9 +127,9 @@ PSTxMgmtPacket
s_MgrMakeReAssocRequest(
PSDevice pDevice,
PSMgmtObject pMgmt,
- PBYTE pDAddr,
- WORD wCurrCapInfo,
- WORD wListenInterval,
+ unsigned char *pDAddr,
+ unsigned short wCurrCapInfo,
+ unsigned short wListenInterval,
PWLAN_IE_SSID pCurrSSID,
PWLAN_IE_SUPP_RATES pCurrRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates
@@ -140,7 +141,7 @@ s_vMgrRxAssocResponse(
PSDevice pDevice,
PSMgmtObject pMgmt,
PSRxMgmtPacket pRxPacket,
- BOOL bReAssocType
+ bool bReAssocType
);
static
@@ -225,7 +226,7 @@ s_vMgrRxBeacon(
PSDevice pDevice,
PSMgmtObject pMgmt,
PSRxMgmtPacket pRxPacket,
- BOOL bInScan
+ bool bInScan
);
static
@@ -240,12 +241,12 @@ PSTxMgmtPacket
s_MgrMakeBeacon(
PSDevice pDevice,
PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wCurrBeaconPeriod,
- UINT uCurrChannel,
- WORD wCurrATIMWinodw,
+ unsigned short wCurrCapInfo,
+ unsigned short wCurrBeaconPeriod,
+ unsigned int uCurrChannel,
+ unsigned short wCurrATIMWinodw,
PWLAN_IE_SSID pCurrSSID,
- PBYTE pCurrBSSID,
+ unsigned char *pCurrBSSID,
PWLAN_IE_SUPP_RATES pCurrSuppRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
@@ -257,10 +258,10 @@ PSTxMgmtPacket
s_MgrMakeAssocResponse(
PSDevice pDevice,
PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wAssocStatus,
- WORD wAssocAID,
- PBYTE pDstAddr,
+ unsigned short wCurrCapInfo,
+ unsigned short wAssocStatus,
+ unsigned short wAssocAID,
+ unsigned char *pDstAddr,
PWLAN_IE_SUPP_RATES pCurrSuppRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
@@ -271,10 +272,10 @@ PSTxMgmtPacket
s_MgrMakeReAssocResponse(
PSDevice pDevice,
PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wAssocStatus,
- WORD wAssocAID,
- PBYTE pDstAddr,
+ unsigned short wCurrCapInfo,
+ unsigned short wAssocStatus,
+ unsigned short wAssocAID,
+ unsigned char *pDstAddr,
PWLAN_IE_SUPP_RATES pCurrSuppRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
@@ -285,16 +286,16 @@ PSTxMgmtPacket
s_MgrMakeProbeResponse(
PSDevice pDevice,
PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wCurrBeaconPeriod,
- UINT uCurrChannel,
- WORD wCurrATIMWinodw,
- PBYTE pDstAddr,
+ unsigned short wCurrCapInfo,
+ unsigned short wCurrBeaconPeriod,
+ unsigned int uCurrChannel,
+ unsigned short wCurrATIMWinodw,
+ unsigned char *pDstAddr,
PWLAN_IE_SSID pCurrSSID,
- PBYTE pCurrBSSID,
+ unsigned char *pCurrBSSID,
PWLAN_IE_SUPP_RATES pCurrSuppRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
- BYTE byPHYType
+ unsigned char byPHYType
);
// received status
@@ -302,7 +303,7 @@ static
void
s_vMgrLogStatus(
PSMgmtObject pMgmt,
- WORD wStatus
+ unsigned short wStatus
);
@@ -310,18 +311,18 @@ static
void
s_vMgrSynchBSS (
PSDevice pDevice,
- UINT uBSSMode,
+ unsigned int uBSSMode,
PKnownBSS pCurr,
PCMD_STATUS pStatus
);
-static BOOL
+static bool
s_bCipherMatch (
PKnownBSS pBSSNode,
NDIS_802_11_ENCRYPTION_STATUS EncStatus,
- PBYTE pbyCCSPK,
- PBYTE pbyCCSGK
+ unsigned char *pbyCCSPK,
+ unsigned char *pbyCCSGK
);
static void Encyption_Rebuild(
@@ -368,7 +369,7 @@ vMgrObjectInit(
pMgmt->byCSSPK = KEY_CTL_NONE;
pMgmt->byCSSGK = KEY_CTL_NONE;
pMgmt->wIBSSBeaconPeriod = DEFAULT_IBSS_BI;
- BSSvClearBSSList((void *)pDevice, FALSE);
+ BSSvClearBSSList((void *)pDevice, false);
return;
}
@@ -393,22 +394,22 @@ vMgrTimerInit(
init_timer(&pMgmt->sTimerSecondCallback);
- pMgmt->sTimerSecondCallback.data = (ULONG)pDevice;
+ pMgmt->sTimerSecondCallback.data = (unsigned long) pDevice;
pMgmt->sTimerSecondCallback.function = (TimerFunction)BSSvSecondCallBack;
pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ);
init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (ULONG)pDevice;
+ pDevice->sTimerCommand.data = (unsigned long) pDevice;
pDevice->sTimerCommand.function = (TimerFunction)vCommandTimer;
pDevice->sTimerCommand.expires = RUN_AT(HZ);
#ifdef TxInSleep
init_timer(&pDevice->sTimerTxData);
- pDevice->sTimerTxData.data = (ULONG)pDevice;
+ pDevice->sTimerTxData.data = (unsigned long) pDevice;
pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
- pDevice->fTxDataInSleep = FALSE;
- pDevice->IsTxDataTrigger = FALSE;
+ pDevice->fTxDataInSleep = false;
+ pDevice->IsTxDataTrigger = false;
pDevice->nTxDataTimeCout = 0;
#endif
@@ -441,7 +442,7 @@ vMgrObjectReset(
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pMgmt->eCurrState = WMAC_STATE_IDLE;
- pDevice->bEnablePSMode = FALSE;
+ pDevice->bEnablePSMode = false;
// TODO: timer
return;
@@ -487,15 +488,15 @@ vMgrAssocBeginSta(
// ERP Phy (802.11g) should support short preamble.
if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (CARDbIsShorSlotTime(pMgmt->pAdapter) == TRUE) {
+ if (CARDbIsShorSlotTime(pMgmt->pAdapter) == true) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
}
} else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) {
- if (CARDbIsShortPreamble(pMgmt->pAdapter) == TRUE) {
+ if (CARDbIsShortPreamble(pMgmt->pAdapter) == true) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
}
}
- if (pMgmt->b11hEnable == TRUE)
+ if (pMgmt->b11hEnable == true)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
/* build an assocreq frame and send it */
@@ -566,15 +567,15 @@ vMgrReAssocBeginSta(
// ERP Phy (802.11g) should support short preamble.
if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (CARDbIsShorSlotTime(pMgmt->pAdapter) == TRUE) {
+ if (CARDbIsShorSlotTime(pMgmt->pAdapter) == true) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
}
} else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) {
- if (CARDbIsShortPreamble(pMgmt->pAdapter) == TRUE) {
+ if (CARDbIsShortPreamble(pMgmt->pAdapter) == true) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
}
}
- if (pMgmt->b11hEnable == TRUE)
+ if (pMgmt->b11hEnable == true)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
@@ -619,8 +620,8 @@ void
vMgrDisassocBeginSta(
void *hDeviceContext,
PSMgmtObject pMgmt,
- PBYTE abyDestAddress,
- WORD wReason,
+ unsigned char *abyDestAddress,
+ unsigned short wReason,
PCMD_STATUS pStatus
)
{
@@ -630,10 +631,10 @@ vMgrDisassocBeginSta(
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_DISASSOC_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
// Setup the sFrame structure
- sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
sFrame.len = WLAN_DISASSOC_FR_MAXLEN;
// format fixed field frame structure
@@ -683,17 +684,17 @@ s_vMgrRxAssocRequest(
PSDevice pDevice,
PSMgmtObject pMgmt,
PSRxMgmtPacket pRxPacket,
- UINT uNodeIndex
+ unsigned int uNodeIndex
)
{
WLAN_FR_ASSOCREQ sFrame;
CMD_STATUS Status;
PSTxMgmtPacket pTxPacket;
- WORD wAssocStatus = 0;
- WORD wAssocAID = 0;
- UINT uRateLen = WLAN_RATES_MAXLEN;
- BYTE abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- BYTE abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ unsigned short wAssocStatus = 0;
+ unsigned short wAssocAID = 0;
+ unsigned int uRateLen = WLAN_RATES_MAXLEN;
+ unsigned char abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ unsigned char abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP)
@@ -708,7 +709,7 @@ s_vMgrRxAssocRequest(
memset(abyCurrSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
memset(abyCurrExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
vMgrDecodeAssocRequest(&sFrame);
@@ -717,7 +718,7 @@ s_vMgrRxAssocRequest(
pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = cpu_to_le16(*sFrame.pwCapInfo);
pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = cpu_to_le16(*sFrame.pwListenInterval);
pMgmt->sNodeDBTable[uNodeIndex].bPSEnable =
- WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? TRUE : FALSE;
+ WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? true : false;
// Todo: check sta basic rate, if ap can't support, set status code
if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
uRateLen = WLAN_RATES_MAXLEN_11B;
@@ -739,7 +740,7 @@ s_vMgrRxAssocRequest(
RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
- FALSE, // do not change our basic rate
+ false, // do not change our basic rate
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
@@ -758,20 +759,20 @@ s_vMgrRxAssocRequest(
WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo);
pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime =
WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].wAID = (WORD)uNodeIndex;
+ pMgmt->sNodeDBTable[uNodeIndex].wAID = (unsigned short)uNodeIndex;
wAssocStatus = WLAN_MGMT_STATUS_SUCCESS;
- wAssocAID = (WORD)uNodeIndex;
+ wAssocAID = (unsigned short)uNodeIndex;
// check if ERP support
if(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate > RATE_11M)
- pMgmt->sNodeDBTable[uNodeIndex].bERPExist = TRUE;
+ pMgmt->sNodeDBTable[uNodeIndex].bERPExist = true;
if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate <= RATE_11M) {
// B only STA join
- pDevice->bProtectMode = TRUE;
- pDevice->bNonERPPresent = TRUE;
+ pDevice->bProtectMode = true;
+ pDevice->bNonERPPresent = true;
}
- if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == FALSE) {
- pDevice->bBarkerPreambleMd = TRUE;
+ if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == false) {
+ pDevice->bBarkerPreambleMd = true;
}
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Associate AID= %d \n", wAssocAID);
@@ -845,17 +846,17 @@ s_vMgrRxReAssocRequest(
PSDevice pDevice,
PSMgmtObject pMgmt,
PSRxMgmtPacket pRxPacket,
- UINT uNodeIndex
+ unsigned int uNodeIndex
)
{
WLAN_FR_REASSOCREQ sFrame;
CMD_STATUS Status;
PSTxMgmtPacket pTxPacket;
- WORD wAssocStatus = 0;
- WORD wAssocAID = 0;
- UINT uRateLen = WLAN_RATES_MAXLEN;
- BYTE abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- BYTE abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ unsigned short wAssocStatus = 0;
+ unsigned short wAssocAID = 0;
+ unsigned int uRateLen = WLAN_RATES_MAXLEN;
+ unsigned char abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ unsigned char abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP)
return;
@@ -866,7 +867,7 @@ s_vMgrRxReAssocRequest(
//decode the frame
memset(&sFrame, 0, sizeof(WLAN_FR_REASSOCREQ));
sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
vMgrDecodeReassocRequest(&sFrame);
if (pMgmt->sNodeDBTable[uNodeIndex].eNodeState >= NODE_AUTH) {
@@ -874,7 +875,7 @@ s_vMgrRxReAssocRequest(
pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = cpu_to_le16(*sFrame.pwCapInfo);
pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = cpu_to_le16(*sFrame.pwListenInterval);
pMgmt->sNodeDBTable[uNodeIndex].bPSEnable =
- WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? TRUE : FALSE;
+ WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? true : false;
// Todo: check sta basic rate, if ap can't support, set status code
if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
@@ -898,7 +899,7 @@ s_vMgrRxReAssocRequest(
RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
- FALSE, // do not change our basic rate
+ false, // do not change our basic rate
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
@@ -917,21 +918,21 @@ s_vMgrRxReAssocRequest(
WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo);
pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime =
WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].wAID = (WORD)uNodeIndex;
+ pMgmt->sNodeDBTable[uNodeIndex].wAID = (unsigned short)uNodeIndex;
wAssocStatus = WLAN_MGMT_STATUS_SUCCESS;
- wAssocAID = (WORD)uNodeIndex;
+ wAssocAID = (unsigned short)uNodeIndex;
// if suppurt ERP
if(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate > RATE_11M)
- pMgmt->sNodeDBTable[uNodeIndex].bERPExist = TRUE;
+ pMgmt->sNodeDBTable[uNodeIndex].bERPExist = true;
if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate <= RATE_11M) {
// B only STA join
- pDevice->bProtectMode = TRUE;
- pDevice->bNonERPPresent = TRUE;
+ pDevice->bProtectMode = true;
+ pDevice->bNonERPPresent = true;
}
- if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == FALSE) {
- pDevice->bBarkerPreambleMd = TRUE;
+ if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == false) {
+ pDevice->bBarkerPreambleMd = true;
}
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Rx ReAssociate AID= %d \n", wAssocAID);
@@ -995,12 +996,12 @@ s_vMgrRxAssocResponse(
PSDevice pDevice,
PSMgmtObject pMgmt,
PSRxMgmtPacket pRxPacket,
- BOOL bReAssocType
+ bool bReAssocType
)
{
WLAN_FR_ASSOCRESP sFrame;
PWLAN_IE_SSID pItemSSID;
- PBYTE pbyIEs;
+ unsigned char *pbyIEs;
viawget_wpa_header *wpahdr;
@@ -1009,7 +1010,7 @@ s_vMgrRxAssocResponse(
pMgmt->eCurrState == WMAC_STATE_ASSOC) {
sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
// decode the frame
vMgrDecodeAssocResponse(&sFrame);
if ((sFrame.pwCapInfo == 0) ||
@@ -1044,7 +1045,7 @@ s_vMgrRxAssocResponse(
BSSvUpdateAPNode((void *)pDevice, sFrame.pwCapInfo, sFrame.pSuppRates, sFrame.pExtSuppRates);
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Link with AP(SSID): %s\n", pItemSSID->abySSID);
- pDevice->bLinkPass = TRUE;
+ pDevice->bLinkPass = true;
pDevice->uBBVGADiffCount = 0;
if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
if(skb_tailroom(pDevice->skb) <(sizeof(viawget_wpa_header)+pMgmt->sAssocInfo.AssocInfo.ResponseIELength+
@@ -1073,9 +1074,9 @@ s_vMgrRxAssocResponse(
//2008-0409-07, <Add> by Einsn Liu
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- //if(pDevice->bWPADevEnable == TRUE)
+ //if(pDevice->bWPADevEnable == true)
{
- BYTE buf[512];
+ unsigned char buf[512];
size_t len;
union iwreq_data wrqu;
int we_event;
@@ -1128,7 +1129,7 @@ s_vMgrRxAssocResponse(
//need clear flags related to Networkmanager
pDevice->bwextcount = 0;
- pDevice->bWPASuppWextEnabled = FALSE;
+ pDevice->bWPASuppWextEnabled = false;
#endif
@@ -1163,8 +1164,8 @@ vMgrAuthenBeginSta(
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
+ sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
vMgrEncodeAuthen(&sFrame);
/* insert values */
@@ -1212,8 +1213,8 @@ void
vMgrDeAuthenBeginSta(
void *hDeviceContext,
PSMgmtObject pMgmt,
- PBYTE abyDestAddress,
- WORD wReason,
+ unsigned char *abyDestAddress,
+ unsigned short wReason,
PCMD_STATUS pStatus
)
{
@@ -1224,8 +1225,8 @@ vMgrDeAuthenBeginSta(
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_DEAUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
+ sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
sFrame.len = WLAN_DEAUTHEN_FR_MAXLEN;
vMgrEncodeDeauthen(&sFrame);
/* insert values */
@@ -1282,7 +1283,7 @@ s_vMgrRxAuthentication(
// decode the frame
sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
vMgrDecodeAuthen(&sFrame);
switch (cpu_to_le16((*(sFrame.pwAuthSequence )))){
case 1:
@@ -1331,7 +1332,7 @@ s_vMgrRxAuthenSequence_1(
)
{
PSTxMgmtPacket pTxPacket = NULL;
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
WLAN_FR_AUTHEN sFrame;
PSKeyItem pTransmitKey;
@@ -1353,8 +1354,8 @@ s_vMgrRxAuthenSequence_1(
// send auth reply
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
+ sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
// format buffer structure
vMgrEncodeAuthen(&sFrame);
@@ -1393,7 +1394,7 @@ s_vMgrRxAuthenSequence_1(
sFrame.pChallenge->len = WLAN_CHALLENGE_LEN;
memset(pMgmt->abyChallenge, 0, WLAN_CHALLENGE_LEN);
// get group key
- if(KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, GROUP_KEY, &pTransmitKey) == TRUE) {
+ if(KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, GROUP_KEY, &pTransmitKey) == true) {
rc4_init(&pDevice->SBox, pDevice->abyPRNG, pTransmitKey->uKeyLength+3);
rc4_encrypt(&pDevice->SBox, pMgmt->abyChallenge, pMgmt->abyChallenge, WLAN_CHALLENGE_LEN);
}
@@ -1466,8 +1467,8 @@ s_vMgrRxAuthenSequence_2(
if (cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS) {
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
+ sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
// format buffer structure
vMgrEncodeAuthen(&sFrame);
@@ -1539,8 +1540,8 @@ s_vMgrRxAuthenSequence_3(
)
{
PSTxMgmtPacket pTxPacket = NULL;
- UINT uStatusCode = 0 ;
- UINT uNodeIndex = 0;
+ unsigned int uStatusCode = 0 ;
+ unsigned int uNodeIndex = 0;
WLAN_FR_AUTHEN sFrame;
if (!WLAN_GET_FC_ISWEP(pFrame->pHdr->sA3.wFrameCtl)) {
@@ -1573,8 +1574,8 @@ reply:
// send auth reply
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
+ sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
// format buffer structure
vMgrEncodeAuthen(&sFrame);
@@ -1666,7 +1667,7 @@ s_vMgrRxDisassociation(
)
{
WLAN_FR_DISASSOC sFrame;
- UINT uNodeIndex = 0;
+ unsigned int uNodeIndex = 0;
// CMD_STATUS CmdStatus;
viawget_wpa_header *wpahdr;
@@ -1674,7 +1675,7 @@ s_vMgrRxDisassociation(
// if is acting an AP..
// a STA is leaving this BSS..
sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
if (BSSDBbIsSTAInNodeDB(pMgmt, pRxPacket->p80211Header->sA3.abyAddr2, &uNodeIndex)) {
BSSvRemoveOneNode(pDevice, uNodeIndex);
}
@@ -1684,7 +1685,7 @@ s_vMgrRxDisassociation(
}
else if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA ){
sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
vMgrDecodeDisassociation(&sFrame);
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "AP disassociated me, reason=%d.\n", cpu_to_le16(*(sFrame.pwReason)));
//TODO: do something let upper layer know or
@@ -1709,7 +1710,7 @@ s_vMgrRxDisassociation(
};
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- // if(pDevice->bWPASuppWextEnabled == TRUE)
+ // if(pDevice->bWPASuppWextEnabled == true)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -1745,7 +1746,7 @@ s_vMgrRxDeauthentication(
)
{
WLAN_FR_DEAUTHEN sFrame;
- UINT uNodeIndex = 0;
+ unsigned int uNodeIndex = 0;
viawget_wpa_header *wpahdr;
@@ -1754,7 +1755,7 @@ s_vMgrRxDeauthentication(
// if is acting an AP..
// a STA is leaving this BSS..
sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
if (BSSDBbIsSTAInNodeDB(pMgmt, pRxPacket->p80211Header->sA3.abyAddr2, &uNodeIndex)) {
BSSvRemoveOneNode(pDevice, uNodeIndex);
}
@@ -1765,17 +1766,17 @@ s_vMgrRxDeauthentication(
else {
if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA ) {
sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
vMgrDecodeDeauthen(&sFrame);
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "AP deauthed me, reason=%d.\n", cpu_to_le16((*(sFrame.pwReason))));
// TODO: update BSS list for specific BSSID if pre-authentication case
- if (IS_ETH_ADDRESS_EQUAL(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID)) {
+ if (!compare_ether_addr(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID)) {
if (pMgmt->eCurrState >= WMAC_STATE_AUTHPENDING) {
- pMgmt->sNodeDBTable[0].bActive = FALSE;
+ pMgmt->sNodeDBTable[0].bActive = false;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pMgmt->eCurrState = WMAC_STATE_IDLE;
netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
}
};
@@ -1795,7 +1796,7 @@ s_vMgrRxDeauthentication(
};
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- // if(pDevice->bWPASuppWextEnabled == TRUE)
+ // if(pDevice->bWPASuppWextEnabled == true)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -1825,23 +1826,23 @@ s_vMgrRxDeauthentication(
* True:exceed;
* False:normal case
-*/
-static BOOL
+static bool
ChannelExceedZoneType(
PSDevice pDevice,
- BYTE byCurrChannel
+ unsigned char byCurrChannel
)
{
- BOOL exceed=FALSE;
+ bool exceed=false;
switch(pDevice->byZoneType) {
case 0x00: //USA:1~11
if((byCurrChannel<1) ||(byCurrChannel>11))
- exceed = TRUE;
+ exceed = true;
break;
case 0x01: //Japan:1~13
case 0x02: //Europe:1~13
if((byCurrChannel<1) ||(byCurrChannel>13))
- exceed = TRUE;
+ exceed = true;
break;
default: //reserve for other zonetype
break;
@@ -1868,39 +1869,39 @@ s_vMgrRxBeacon(
PSDevice pDevice,
PSMgmtObject pMgmt,
PSRxMgmtPacket pRxPacket,
- BOOL bInScan
+ bool bInScan
)
{
PKnownBSS pBSSList;
WLAN_FR_BEACON sFrame;
QWORD qwTSFOffset;
- BOOL bIsBSSIDEqual = FALSE;
- BOOL bIsSSIDEqual = FALSE;
- BOOL bTSFLargeDiff = FALSE;
- BOOL bTSFOffsetPostive = FALSE;
- BOOL bUpdateTSF = FALSE;
- BOOL bIsAPBeacon = FALSE;
- BOOL bIsChannelEqual = FALSE;
- UINT uLocateByteIndex;
- BYTE byTIMBitOn = 0;
- WORD wAIDNumber = 0;
- UINT uNodeIndex;
+ bool bIsBSSIDEqual = false;
+ bool bIsSSIDEqual = false;
+ bool bTSFLargeDiff = false;
+ bool bTSFOffsetPostive = false;
+ bool bUpdateTSF = false;
+ bool bIsAPBeacon = false;
+ bool bIsChannelEqual = false;
+ unsigned int uLocateByteIndex;
+ unsigned char byTIMBitOn = 0;
+ unsigned short wAIDNumber = 0;
+ unsigned int uNodeIndex;
QWORD qwTimestamp, qwLocalTSF;
QWORD qwCurrTSF;
- WORD wStartIndex = 0;
- WORD wAIDIndex = 0;
- BYTE byCurrChannel = pRxPacket->byRxChannel;
+ unsigned short wStartIndex = 0;
+ unsigned short wAIDIndex = 0;
+ unsigned char byCurrChannel = pRxPacket->byRxChannel;
ERPObject sERP;
- UINT uRateLen = WLAN_RATES_MAXLEN;
- BOOL bChannelHit = FALSE;
- BOOL bUpdatePhyParameter = FALSE;
- BYTE byIEChannel = 0;
+ unsigned int uRateLen = WLAN_RATES_MAXLEN;
+ bool bChannelHit = false;
+ bool bUpdatePhyParameter = false;
+ unsigned char byIEChannel = 0;
memset(&sFrame, 0, sizeof(WLAN_FR_BEACON));
sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
// decode the beacon frame
vMgrDecodeBeacon(&sFrame);
@@ -1917,29 +1918,29 @@ s_vMgrRxBeacon(
if (sFrame.pDSParms != NULL) {
if (byCurrChannel > CB_MAX_CHANNEL_24G) {
// channel remapping to
- byIEChannel = CARDbyGetChannelMapping(pDevice, sFrame.pDSParms->byCurrChannel, PHY_TYPE_11A);
+ byIEChannel = get_channel_mapping(pDevice, sFrame.pDSParms->byCurrChannel, PHY_TYPE_11A);
} else {
byIEChannel = sFrame.pDSParms->byCurrChannel;
}
if (byCurrChannel != byIEChannel) {
// adjust channel info. bcs we rcv adjcent channel pakckets
- bChannelHit = FALSE;
+ bChannelHit = false;
byCurrChannel = byIEChannel;
}
} else {
// no DS channel info
- bChannelHit = TRUE;
+ bChannelHit = true;
}
//2008-0730-01<Add>by MikeLiu
-if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
+if(ChannelExceedZoneType(pDevice,byCurrChannel)==true)
return;
if (sFrame.pERP != NULL) {
sERP.byERP = sFrame.pERP->byContext;
- sERP.bERPExist = TRUE;
+ sERP.bERPExist = true;
} else {
- sERP.bERPExist = FALSE;
+ sERP.bERPExist = false;
sERP.byERP = 0;
}
@@ -1993,8 +1994,8 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
return;
}
- if(byCurrChannel == (BYTE)pMgmt->uCurrChannel)
- bIsChannelEqual = TRUE;
+ if(byCurrChannel == (unsigned char)pMgmt->uCurrChannel)
+ bIsChannelEqual = true;
if (bIsChannelEqual && (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
@@ -2021,7 +2022,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)){
if (!pDevice->bProtectMode) {
MACvEnableProtectMD(pDevice->PortOffset);
- pDevice->bProtectMode = TRUE;
+ pDevice->bProtectMode = true;
}
}
}
@@ -2035,7 +2036,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
pMgmt->abyCurrBSSID,
WLAN_BSSID_LEN) == 0) {
- bIsBSSIDEqual = TRUE;
+ bIsBSSIDEqual = true;
// 2008-05-21 <add> by Richardtai
pDevice->uCurrRSSI = pRxPacket->uRSSI;
@@ -2052,30 +2053,30 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID,
sFrame.pSSID->len
) == 0) {
- bIsSSIDEqual = TRUE;
+ bIsSSIDEqual = true;
};
}
- if ((WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo)== TRUE) &&
- (bIsBSSIDEqual == TRUE) &&
- (bIsSSIDEqual == TRUE) &&
+ if ((WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo)== true) &&
+ (bIsBSSIDEqual == true) &&
+ (bIsSSIDEqual == true) &&
(pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
// add state check to prevent reconnect fail since we'll receive Beacon
- bIsAPBeacon = TRUE;
+ bIsAPBeacon = true;
if (pBSSList != NULL) {
// Compare PHY paramater setting
if (pMgmt->wCurrCapInfo != pBSSList->wCapInfo) {
- bUpdatePhyParameter = TRUE;
+ bUpdatePhyParameter = true;
pMgmt->wCurrCapInfo = pBSSList->wCapInfo;
}
if (sFrame.pERP != NULL) {
if ((sFrame.pERP->byElementID == WLAN_EID_ERP) &&
(pMgmt->byERPContext != sFrame.pERP->byContext)) {
- bUpdatePhyParameter = TRUE;
+ bUpdatePhyParameter = true;
pMgmt->byERPContext = sFrame.pERP->byContext;
}
}
@@ -2094,7 +2095,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
RATEvParseMaxRate( (void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- TRUE,
+ true,
&(pMgmt->sNodeDBTable[0].wMaxBasicRate),
&(pMgmt->sNodeDBTable[0].wMaxSuppRate),
&(pMgmt->sNodeDBTable[0].wSuppRate),
@@ -2104,7 +2105,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
#ifdef PLICE_DEBUG
//printk("RxBeacon:MaxSuppRate is %d\n",pMgmt->sNodeDBTable[0].wMaxSuppRate);
#endif
- if (bUpdatePhyParameter == TRUE) {
+ if (bUpdatePhyParameter == true) {
CARDbSetPhyParameter( pMgmt->pAdapter,
pMgmt->eCurrentPHYMode,
pMgmt->wCurrCapInfo,
@@ -2115,19 +2116,19 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
}
if (sFrame.pIE_PowerConstraint != NULL) {
CARDvSetPowerConstraint(pMgmt->pAdapter,
- (BYTE) pBSSList->uChannel,
+ (unsigned char) pBSSList->uChannel,
sFrame.pIE_PowerConstraint->byPower
);
}
if (sFrame.pIE_CHSW != NULL) {
CARDbChannelSwitch( pMgmt->pAdapter,
sFrame.pIE_CHSW->byMode,
- CARDbyGetChannelMapping(pMgmt->pAdapter, sFrame.pIE_CHSW->byMode, pMgmt->eCurrentPHYMode),
+ get_channel_mapping(pMgmt->pAdapter, sFrame.pIE_CHSW->byMode, pMgmt->eCurrentPHYMode),
sFrame.pIE_CHSW->byCount
);
- } else if (bIsChannelEqual == FALSE) {
- CARDbSetChannel(pMgmt->pAdapter, pBSSList->uChannel);
+ } else if (bIsChannelEqual == false) {
+ set_channel(pMgmt->pAdapter, pBSSList->uChannel);
}
}
}
@@ -2148,17 +2149,17 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
// check if beacon TSF larger or small than our local TSF
if (HIDWORD(qwTimestamp) == HIDWORD(qwLocalTSF)) {
if (LODWORD(qwTimestamp) >= LODWORD(qwLocalTSF)) {
- bTSFOffsetPostive = TRUE;
+ bTSFOffsetPostive = true;
}
else {
- bTSFOffsetPostive = FALSE;
+ bTSFOffsetPostive = false;
}
}
else if (HIDWORD(qwTimestamp) > HIDWORD(qwLocalTSF)) {
- bTSFOffsetPostive = TRUE;
+ bTSFOffsetPostive = true;
}
else if (HIDWORD(qwTimestamp) < HIDWORD(qwLocalTSF)) {
- bTSFOffsetPostive = FALSE;
+ bTSFOffsetPostive = false;
};
if (bTSFOffsetPostive) {
@@ -2170,21 +2171,21 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
if (HIDWORD(qwTSFOffset) != 0 ||
(LODWORD(qwTSFOffset) > TRIVIAL_SYNC_DIFFERENCE )) {
- bTSFLargeDiff = TRUE;
+ bTSFLargeDiff = true;
}
// if infra mode
- if (bIsAPBeacon == TRUE) {
+ if (bIsAPBeacon == true) {
// Infra mode: Local TSF always follow AP's TSF if Difference huge.
if (bTSFLargeDiff)
- bUpdateTSF = TRUE;
+ bUpdateTSF = true;
- if ((pDevice->bEnablePSMode == TRUE) &&(sFrame.pTIM != 0)) {
+ if ((pDevice->bEnablePSMode == true) &&(sFrame.pTIM != 0)) {
// deal with DTIM, analysis TIM
- pMgmt->bMulticastTIM = WLAN_MGMT_IS_MULTICAST_TIM(sFrame.pTIM->byBitMapCtl) ? TRUE : FALSE ;
+ pMgmt->bMulticastTIM = WLAN_MGMT_IS_MULTICAST_TIM(sFrame.pTIM->byBitMapCtl) ? true : false ;
pMgmt->byDTIMCount = sFrame.pTIM->byDTIMCount;
pMgmt->byDTIMPeriod = sFrame.pTIM->byDTIMPeriod;
wAIDNumber = pMgmt->wCurrAID & ~(BIT14|BIT15);
@@ -2199,19 +2200,19 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
// len = byDTIMCount + byDTIMPeriod + byDTIMPeriod + byVirtBitMap[0~250]
if (sFrame.pTIM->len >= (uLocateByteIndex + 4)) {
byTIMBitOn = (0x01) << ((wAIDNumber) % 8);
- pMgmt->bInTIM = sFrame.pTIM->byVirtBitMap[uLocateByteIndex] & byTIMBitOn ? TRUE : FALSE;
+ pMgmt->bInTIM = sFrame.pTIM->byVirtBitMap[uLocateByteIndex] & byTIMBitOn ? true : false;
}
else {
- pMgmt->bInTIM = FALSE;
+ pMgmt->bInTIM = false;
};
}
else {
- pMgmt->bInTIM = FALSE;
+ pMgmt->bInTIM = false;
};
if (pMgmt->bInTIM ||
(pMgmt->bMulticastTIM && (pMgmt->byDTIMCount == 0))) {
- pMgmt->bInTIMWake = TRUE;
+ pMgmt->bInTIMWake = true;
// send out ps-poll packet
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN:In TIM\n");
if (pMgmt->bInTIM) {
@@ -2221,14 +2222,14 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
}
else {
- pMgmt->bInTIMWake = FALSE;
+ pMgmt->bInTIMWake = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN: Not In TIM..\n");
- if (pDevice->bPWBitOn == FALSE) {
+ if (pDevice->bPWBitOn == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN: Send Null Packet\n");
if (PSbSendNullPacket(pDevice))
- pDevice->bPWBitOn = TRUE;
+ pDevice->bPWBitOn = true;
}
- if(PSbConsiderPowerDown(pDevice, FALSE, FALSE)) {
+ if(PSbConsiderPowerDown(pDevice, false, false)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN: Power down now...\n");
};
}
@@ -2246,7 +2247,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
// adhoc mode:TSF updated only when beacon larger then local TSF
if (bTSFLargeDiff && bTSFOffsetPostive &&
(pMgmt->eCurrState == WMAC_STATE_JOINTED))
- bUpdateTSF = TRUE;
+ bUpdateTSF = true;
// During dpc, already in spinlocked.
if (BSSDBbIsSTAInNodeDB(pMgmt, sFrame.pHdr->sA3.abyAddr2, &uNodeIndex)) {
@@ -2259,7 +2260,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
RATEvParseMaxRate( (void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
NULL,
- TRUE,
+ true,
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
@@ -2280,7 +2281,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
RATEvParseMaxRate( (void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
NULL,
- TRUE,
+ true,
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
&(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
@@ -2300,7 +2301,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
/*
pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime = WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo);
if(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate > RATE_11M)
- pMgmt->sNodeDBTable[uNodeIndex].bERPExist = TRUE;
+ pMgmt->sNodeDBTable[uNodeIndex].bERPExist = true;
*/
}
@@ -2308,11 +2309,11 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
if (pMgmt->eCurrState == WMAC_STATE_STARTED) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Current IBSS State: [Started]........to: [Jointed] \n");
pMgmt->eCurrState = WMAC_STATE_JOINTED;
- pDevice->bLinkPass = TRUE;
+ pDevice->bLinkPass = true;
if (netif_queue_stopped(pDevice->dev)){
netif_wake_queue(pDevice->dev);
}
- pMgmt->sNodeDBTable[0].bActive = TRUE;
+ pMgmt->sNodeDBTable[0].bActive = true;
pMgmt->sNodeDBTable[0].uInActiveCount = 0;
};
@@ -2392,16 +2393,16 @@ vMgrCreateOwnIBSS(
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
- WORD wMaxBasicRate;
- WORD wMaxSuppRate;
- BYTE byTopCCKBasicRate;
- BYTE byTopOFDMBasicRate;
+ unsigned short wMaxBasicRate;
+ unsigned short wMaxSuppRate;
+ unsigned char byTopCCKBasicRate;
+ unsigned char byTopOFDMBasicRate;
QWORD qwCurrTSF;
- UINT ii;
- BYTE abyRATE[] = {0x82, 0x84, 0x8B, 0x96, 0x24, 0x30, 0x48, 0x6C, 0x0C, 0x12, 0x18, 0x60};
- BYTE abyCCK_RATE[] = {0x82, 0x84, 0x8B, 0x96};
- BYTE abyOFDM_RATE[] = {0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
- WORD wSuppRate;
+ unsigned int ii;
+ unsigned char abyRATE[] = {0x82, 0x84, 0x8B, 0x96, 0x24, 0x30, 0x48, 0x6C, 0x0C, 0x12, 0x18, 0x60};
+ unsigned char abyCCK_RATE[] = {0x82, 0x84, 0x8B, 0x96};
+ unsigned char abyOFDM_RATE[] = {0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
+ unsigned short wSuppRate;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Create Basic Service Set .......\n");
@@ -2486,7 +2487,7 @@ vMgrCreateOwnIBSS(
// set basic rate
RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, TRUE,
+ (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, true,
&wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
&byTopCCKBasicRate, &byTopOFDMBasicRate);
@@ -2533,12 +2534,12 @@ vMgrCreateOwnIBSS(
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
// BSSID selected must be randomized as spec 11.1.3
- pMgmt->abyCurrBSSID[5] = (BYTE) (LODWORD(qwCurrTSF)& 0x000000ff);
- pMgmt->abyCurrBSSID[4] = (BYTE)((LODWORD(qwCurrTSF)& 0x0000ff00) >> 8);
- pMgmt->abyCurrBSSID[3] = (BYTE)((LODWORD(qwCurrTSF)& 0x00ff0000) >> 16);
- pMgmt->abyCurrBSSID[2] = (BYTE)((LODWORD(qwCurrTSF)& 0x00000ff0) >> 4);
- pMgmt->abyCurrBSSID[1] = (BYTE)((LODWORD(qwCurrTSF)& 0x000ff000) >> 12);
- pMgmt->abyCurrBSSID[0] = (BYTE)((LODWORD(qwCurrTSF)& 0x0ff00000) >> 20);
+ pMgmt->abyCurrBSSID[5] = (unsigned char) (LODWORD(qwCurrTSF)& 0x000000ff);
+ pMgmt->abyCurrBSSID[4] = (unsigned char)((LODWORD(qwCurrTSF)& 0x0000ff00) >> 8);
+ pMgmt->abyCurrBSSID[3] = (unsigned char)((LODWORD(qwCurrTSF)& 0x00ff0000) >> 16);
+ pMgmt->abyCurrBSSID[2] = (unsigned char)((LODWORD(qwCurrTSF)& 0x00000ff0) >> 4);
+ pMgmt->abyCurrBSSID[1] = (unsigned char)((LODWORD(qwCurrTSF)& 0x000ff000) >> 12);
+ pMgmt->abyCurrBSSID[0] = (unsigned char)((LODWORD(qwCurrTSF)& 0x0ff00000) >> 20);
pMgmt->abyCurrBSSID[5] ^= pMgmt->abyMACAddr[0];
pMgmt->abyCurrBSSID[4] ^= pMgmt->abyMACAddr[1];
pMgmt->abyCurrBSSID[3] ^= pMgmt->abyMACAddr[2];
@@ -2611,7 +2612,7 @@ vMgrCreateOwnIBSS(
CARDbSetBeaconPeriod(pMgmt->pAdapter, pMgmt->wIBSSBeaconPeriod);
// set channel and clear NAV
- CARDbSetChannel(pMgmt->pAdapter, pMgmt->uIBSSChannel);
+ set_channel(pMgmt->pAdapter, pMgmt->uIBSSChannel);
pMgmt->uCurrChannel = pMgmt->uIBSSChannel;
if (CARDbIsShortPreamble(pMgmt->pAdapter)) {
@@ -2620,7 +2621,7 @@ vMgrCreateOwnIBSS(
pMgmt->wCurrCapInfo &= (~WLAN_SET_CAP_INFO_SHORTPREAMBLE(1));
}
- if ((pMgmt->b11hEnable == TRUE) &&
+ if ((pMgmt->b11hEnable == true) &&
(pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
} else {
@@ -2661,20 +2662,20 @@ vMgrJoinBSSBegin(
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = pDevice->pMgmt;
PKnownBSS pCurr = NULL;
- UINT ii, uu;
+ unsigned int ii, uu;
PWLAN_IE_SUPP_RATES pItemRates = NULL;
PWLAN_IE_SUPP_RATES pItemExtRates = NULL;
PWLAN_IE_SSID pItemSSID;
- UINT uRateLen = WLAN_RATES_MAXLEN;
- WORD wMaxBasicRate = RATE_1M;
- WORD wMaxSuppRate = RATE_1M;
- WORD wSuppRate;
- BYTE byTopCCKBasicRate = RATE_1M;
- BYTE byTopOFDMBasicRate = RATE_1M;
+ unsigned int uRateLen = WLAN_RATES_MAXLEN;
+ unsigned short wMaxBasicRate = RATE_1M;
+ unsigned short wMaxSuppRate = RATE_1M;
+ unsigned short wSuppRate;
+ unsigned char byTopCCKBasicRate = RATE_1M;
+ unsigned char byTopOFDMBasicRate = RATE_1M;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (pMgmt->sBSSList[ii].bActive == TRUE)
+ if (pMgmt->sBSSList[ii].bActive == true)
break;
}
@@ -2708,14 +2709,14 @@ vMgrJoinBSSBegin(
// patch for CISCO migration mode
/*
if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- if (WPA_SearchRSN(0, WPA_TKIP, pCurr) == FALSE) {
+ if (WPA_SearchRSN(0, WPA_TKIP, pCurr) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"No match RSN info. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
// encryption mode error
pMgmt->eCurrState = WMAC_STATE_IDLE;
return;
}
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- if (WPA_SearchRSN(0, WPA_AESCCMP, pCurr) == FALSE) {
+ if (WPA_SearchRSN(0, WPA_AESCCMP, pCurr) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"No match RSN info. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
// encryption mode error
pMgmt->eCurrState = WMAC_STATE_IDLE;
@@ -2726,7 +2727,7 @@ vMgrJoinBSSBegin(
}
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- //if(pDevice->bWPASuppWextEnabled == TRUE)
+ //if(pDevice->bWPASuppWextEnabled == true)
Encyption_Rebuild(pDevice, pCurr);
#endif
// Infrastructure BSS
@@ -2764,15 +2765,15 @@ vMgrJoinBSSBegin(
uRateLen);
// Stuffing Rate IE
if ((pItemExtRates->len > 0) && (pItemRates->len < 8)) {
- for (ii = 0; ii < (UINT)(8 - pItemRates->len); ) {
+ for (ii = 0; ii < (unsigned int)(8 - pItemRates->len); ) {
pItemRates->abyRates[pItemRates->len + ii] = pItemExtRates->abyRates[ii];
ii ++;
if (pItemExtRates->len <= ii)
break;
}
- pItemRates->len += (BYTE)ii;
+ pItemRates->len += (unsigned char)ii;
if (pItemExtRates->len - ii > 0) {
- pItemExtRates->len -= (BYTE)ii;
+ pItemExtRates->len -= (unsigned char)ii;
for (uu = 0; uu < pItemExtRates->len; uu ++) {
pItemExtRates->abyRates[uu] = pItemExtRates->abyRates[uu + ii];
}
@@ -2781,7 +2782,7 @@ vMgrJoinBSSBegin(
}
}
- RATEvParseMaxRate((void *)pDevice, pItemRates, pItemExtRates, TRUE,
+ RATEvParseMaxRate((void *)pDevice, pItemRates, pItemExtRates, true,
&wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
&byTopCCKBasicRate, &byTopOFDMBasicRate);
@@ -2802,9 +2803,9 @@ vMgrJoinBSSBegin(
// Add current BSS to Candidate list
// This should only works for WPA2 BSS, and WPA2 BSS check must be done before.
if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) {
- BOOL bResult = bAdd_PMKID_Candidate((void *)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
+ bool bResult = bAdd_PMKID_Candidate((void *)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"bAdd_PMKID_Candidate: 1(%d)\n", bResult);
- if (bResult == FALSE) {
+ if (bResult == false) {
vFlush_PMKID_Candidate((void *)pDevice);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFlush_PMKID_Candidate: 4\n");
bAdd_PMKID_Candidate((void *)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
@@ -2831,13 +2832,13 @@ vMgrJoinBSSBegin(
if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- if (WPA_SearchRSN(0, WPA_TKIP, pCurr) == FALSE) {
+ if (WPA_SearchRSN(0, WPA_TKIP, pCurr) == false) {
// encryption mode error
pMgmt->eCurrState = WMAC_STATE_IDLE;
return;
}
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- if (WPA_SearchRSN(0, WPA_AESCCMP, pCurr) == FALSE) {
+ if (WPA_SearchRSN(0, WPA_AESCCMP, pCurr) == false) {
// encryption mode error
pMgmt->eCurrState = WMAC_STATE_IDLE;
return;
@@ -2868,7 +2869,7 @@ vMgrJoinBSSBegin(
WLAN_RATES_MAXLEN_11B);
// set basic rate
RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- NULL, TRUE, &wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
+ NULL, true, &wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
&byTopCCKBasicRate, &byTopOFDMBasicRate);
pMgmt->wCurrCapInfo = pCurr->wCapInfo;
@@ -2883,7 +2884,7 @@ vMgrJoinBSSBegin(
pMgmt->eCurrState = WMAC_STATE_STARTED;
// Adopt BSS state in Adapter Device Object
//pDevice->byOpMode = OP_MODE_ADHOC;
-// pDevice->bLinkPass = TRUE;
+// pDevice->bLinkPass = true;
// memcpy(pDevice->abyBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Join IBSS ok:%02x-%02x-%02x-%02x-%02x-%02x \n",
@@ -2923,7 +2924,7 @@ static
void
s_vMgrSynchBSS (
PSDevice pDevice,
- UINT uBSSMode,
+ unsigned int uBSSMode,
PKnownBSS pCurr,
PCMD_STATUS pStatus
)
@@ -2932,11 +2933,11 @@ s_vMgrSynchBSS (
PSMgmtObject pMgmt = pDevice->pMgmt;
// int ii;
//1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M
- BYTE abyCurrSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
- BYTE abyCurrExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
+ unsigned char abyCurrSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
+ unsigned char abyCurrExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
//6M, 9M, 12M, 48M
- BYTE abyCurrSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
- BYTE abyCurrSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
+ unsigned char abyCurrSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
+ unsigned char abyCurrSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
*pStatus = CMD_STATUS_FAILURE;
@@ -2944,7 +2945,7 @@ s_vMgrSynchBSS (
if (s_bCipherMatch(pCurr,
pDevice->eEncryptionStatus,
&(pMgmt->byCSSPK),
- &(pMgmt->byCSSGK)) == FALSE) {
+ &(pMgmt->byCSSGK)) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "s_bCipherMatch Fail .......\n");
return;
}
@@ -2958,12 +2959,12 @@ s_vMgrSynchBSS (
}
// Init the BSS informations
- pDevice->bCCK = TRUE;
- pDevice->bProtectMode = FALSE;
+ pDevice->bCCK = true;
+ pDevice->bProtectMode = false;
MACvDisableProtectMD(pDevice->PortOffset);
- pDevice->bBarkerPreambleMd = FALSE;
+ pDevice->bBarkerPreambleMd = false;
MACvDisableBarkerPreambleMd(pDevice->PortOffset);
- pDevice->bNonERPPresent = FALSE;
+ pDevice->bNonERPPresent = false;
pDevice->byPreambleType = 0;
pDevice->wBasicRate = 0;
// Set Basic Rate
@@ -3046,12 +3047,12 @@ s_vMgrSynchBSS (
pCurr->sERP.byERP,
pMgmt->abyCurrSuppRates,
pMgmt->abyCurrExtSuppRates
- ) != TRUE) {
+ ) != true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "<----s_bSynchBSS Set Phy Mode Fail [%d]\n", ePhyType);
return;
}
// set channel and clear NAV
- if (CARDbSetChannel(pMgmt->pAdapter, pCurr->uChannel) == FALSE) {
+ if (set_channel(pMgmt->pAdapter, pCurr->uChannel) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "<----s_bSynchBSS Set Channel [%d]\n", pCurr->uChannel);
return;
}
@@ -3077,7 +3078,7 @@ s_vMgrSynchBSS (
pMgmt->uCurrChannel = pCurr->uChannel;
pMgmt->eCurrentPHYMode = ePhyType;
pMgmt->byERPContext = pCurr->sERP.byERP;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Sync:Set to channel = [%d]\n", (INT)pCurr->uChannel);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Sync:Set to channel = [%d]\n", (int)pCurr->uChannel);
*pStatus = CMD_STATUS_SUCCESS;
@@ -3094,18 +3095,18 @@ s_vMgrSynchBSS (
)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- // UINT ii , uSameBssidNum=0;
+ // unsigned int ii , uSameBssidNum=0;
// for (ii = 0; ii < MAX_BSS_NUM; ii++) {
// if (pMgmt->sBSSList[ii].bActive &&
- // IS_ETH_ADDRESS_EQUAL(pMgmt->sBSSList[ii].abyBSSID, pCurr->abyBSSID)) {
+ // !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID, pCurr->abyBSSID)) {
// uSameBssidNum++;
// }
// }
// if( uSameBssidNum>=2) { //we only check AP in hidden sssid mode
if ((pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || //networkmanager 0.7.0 does not give the pairwise-key selsection,
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) { // so we need re-selsect it according to real pairwise-key info.
- if(pCurr->bWPAValid == TRUE) { //WPA-PSK
+ if(pCurr->bWPAValid == true) { //WPA-PSK
pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
if(pCurr->abyPKType[0] == WPA_TKIP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP
@@ -3116,7 +3117,7 @@ s_vMgrSynchBSS (
PRINT_K("Encyption_Rebuild--->ssid reset config to [WPAPSK-AES]\n");
}
}
- else if(pCurr->bWPA2Valid == TRUE) { //WPA2-PSK
+ else if(pCurr->bWPA2Valid == true) { //WPA2-PSK
pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
if(pCurr->abyCSSPK[0] == WLAN_11i_CSS_TKIP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP
@@ -3151,13 +3152,13 @@ s_vMgrFormatTIM(
PWLAN_IE_TIM pTIM
)
{
- BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- BYTE byMap;
- UINT ii, jj;
- BOOL bStartFound = FALSE;
- BOOL bMulticast = FALSE;
- WORD wStartIndex = 0;
- WORD wEndIndex = 0;
+ unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
+ unsigned char byMap;
+ unsigned int ii, jj;
+ bool bStartFound = false;
+ bool bMulticast = false;
+ unsigned short wStartIndex = 0;
+ unsigned short wEndIndex = 0;
// Find size of partial virtual bitmap
@@ -3167,13 +3168,13 @@ s_vMgrFormatTIM(
// Mask out the broadcast bit which is indicated separately.
bMulticast = (byMap & byMask[0]) != 0;
if(bMulticast) {
- pMgmt->sNodeDBTable[0].bRxPSPoll = TRUE;
+ pMgmt->sNodeDBTable[0].bRxPSPoll = true;
}
byMap = 0;
}
if (byMap) {
if (!bStartFound) {
- bStartFound = TRUE;
+ bStartFound = true;
wStartIndex = ii;
}
wEndIndex = ii;
@@ -3224,30 +3225,30 @@ PSTxMgmtPacket
s_MgrMakeBeacon(
PSDevice pDevice,
PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wCurrBeaconPeriod,
- UINT uCurrChannel,
- WORD wCurrATIMWinodw,
+ unsigned short wCurrCapInfo,
+ unsigned short wCurrBeaconPeriod,
+ unsigned int uCurrChannel,
+ unsigned short wCurrATIMWinodw,
PWLAN_IE_SSID pCurrSSID,
- PBYTE pCurrBSSID,
+ unsigned char *pCurrBSSID,
PWLAN_IE_SUPP_RATES pCurrSuppRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
{
PSTxMgmtPacket pTxPacket = NULL;
WLAN_FR_BEACON sFrame;
- BYTE abyBroadcastAddr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- PBYTE pbyBuffer;
- UINT uLength = 0;
+ unsigned char abyBroadcastAddr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ unsigned char *pbyBuffer;
+ unsigned int uLength = 0;
PWLAN_IE_IBSS_DFS pIBSSDFS = NULL;
- UINT ii;
+ unsigned int ii;
// prepare beacon frame
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_BEACON_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
// Setup the sFrame structure.
- sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
sFrame.len = WLAN_BEACON_FR_MAXLEN;
vMgrEncodeBeacon(&sFrame);
// Setup the header
@@ -3258,7 +3259,7 @@ s_MgrMakeBeacon(
));
if (pDevice->bEnablePSMode) {
- sFrame.pHdr->sA3.wFrameCtl |= cpu_to_le16((WORD)WLAN_SET_FC_PWRMGT(1));
+ sFrame.pHdr->sA3.wFrameCtl |= cpu_to_le16((unsigned short)WLAN_SET_FC_PWRMGT(1));
}
memcpy( sFrame.pHdr->sA3.abyAddr1, abyBroadcastAddr, WLAN_ADDR_LEN);
@@ -3286,7 +3287,7 @@ s_MgrMakeBeacon(
sFrame.len += (1) + WLAN_IEHDR_LEN;
sFrame.pDSParms->byElementID = WLAN_EID_DS_PARMS;
sFrame.pDSParms->len = 1;
- sFrame.pDSParms->byCurrChannel = (BYTE)uCurrChannel;
+ sFrame.pDSParms->byCurrChannel = (unsigned char)uCurrChannel;
}
// TIM field
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
@@ -3329,22 +3330,22 @@ s_MgrMakeBeacon(
// Pairwise Key Cipher Suite
sFrame.pRSNWPA->wPKCount = 0;
// Auth Key Management Suite
- *((PWORD)(sFrame.pBuf + sFrame.len + sFrame.pRSNWPA->len))=0;
+ *((unsigned short *)(sFrame.pBuf + sFrame.len + sFrame.pRSNWPA->len))=0;
sFrame.pRSNWPA->len +=2;
// RSN Capabilites
- *((PWORD)(sFrame.pBuf + sFrame.len + sFrame.pRSNWPA->len))=0;
+ *((unsigned short *)(sFrame.pBuf + sFrame.len + sFrame.pRSNWPA->len))=0;
sFrame.pRSNWPA->len +=2;
sFrame.len += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
}
}
- if ((pMgmt->b11hEnable == TRUE) &&
+ if ((pMgmt->b11hEnable == true) &&
(pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
// Country IE
- pbyBuffer = (PBYTE)(sFrame.pBuf + sFrame.len);
- CARDvSetCountryIE(pMgmt->pAdapter, pbyBuffer);
- CARDvSetCountryInfo(pMgmt->pAdapter, PHY_TYPE_11A, pbyBuffer);
+ pbyBuffer = (unsigned char *)(sFrame.pBuf + sFrame.len);
+ set_country_IE(pMgmt->pAdapter, pbyBuffer);
+ set_country_info(pMgmt->pAdapter, PHY_TYPE_11A, pbyBuffer);
uLength += ((PWLAN_IE_COUNTRY) pbyBuffer)->len + WLAN_IEHDR_LEN;
pbyBuffer += (((PWLAN_IE_COUNTRY) pbyBuffer)->len + WLAN_IEHDR_LEN);
// Power Constrain IE
@@ -3353,12 +3354,12 @@ s_MgrMakeBeacon(
((PWLAN_IE_PW_CONST) pbyBuffer)->byPower = 0;
pbyBuffer += (1) + WLAN_IEHDR_LEN;
uLength += (1) + WLAN_IEHDR_LEN;
- if (pMgmt->bSwitchChannel == TRUE) {
+ if (pMgmt->bSwitchChannel == true) {
// Channel Switch IE
((PWLAN_IE_CH_SW) pbyBuffer)->byElementID = WLAN_EID_CH_SWITCH;
((PWLAN_IE_CH_SW) pbyBuffer)->len = 3;
((PWLAN_IE_CH_SW) pbyBuffer)->byMode = 1;
- ((PWLAN_IE_CH_SW) pbyBuffer)->byChannel = CARDbyGetChannelNumber(pMgmt->pAdapter, pMgmt->byNewChannel);
+ ((PWLAN_IE_CH_SW) pbyBuffer)->byChannel = get_channel_number(pMgmt->pAdapter, pMgmt->byNewChannel);
((PWLAN_IE_CH_SW) pbyBuffer)->byCount = 0;
pbyBuffer += (3) + WLAN_IEHDR_LEN;
uLength += (3) + WLAN_IEHDR_LEN;
@@ -3382,7 +3383,7 @@ s_MgrMakeBeacon(
pbyBuffer += (7) + WLAN_IEHDR_LEN;
uLength += (7) + WLAN_IEHDR_LEN;
for(ii=CB_MAX_CHANNEL_24G+1; ii<=CB_MAX_CHANNEL; ii++ ) {
- if (CARDbGetChannelMapInfo(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1) == TRUE) {
+ if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1) == true) {
pbyBuffer += 2;
uLength += 2;
pIBSSDFS->len += 2;
@@ -3398,11 +3399,11 @@ s_MgrMakeBeacon(
sFrame.pERP->byElementID = WLAN_EID_ERP;
sFrame.pERP->len = 1;
sFrame.pERP->byContext = 0;
- if (pDevice->bProtectMode == TRUE)
+ if (pDevice->bProtectMode == true)
sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION;
- if (pDevice->bNonERPPresent == TRUE)
+ if (pDevice->bNonERPPresent == true)
sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT;
- if (pDevice->bBarkerPreambleMd == TRUE)
+ if (pDevice->bBarkerPreambleMd == true)
sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE;
}
if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) {
@@ -3414,7 +3415,7 @@ s_MgrMakeBeacon(
);
}
// hostapd wpa/wpa2 IE
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == TRUE)) {
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == true)) {
if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
if (pMgmt->wWPAIELen != 0) {
sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
@@ -3453,31 +3454,31 @@ PSTxMgmtPacket
s_MgrMakeProbeResponse(
PSDevice pDevice,
PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wCurrBeaconPeriod,
- UINT uCurrChannel,
- WORD wCurrATIMWinodw,
- PBYTE pDstAddr,
+ unsigned short wCurrCapInfo,
+ unsigned short wCurrBeaconPeriod,
+ unsigned int uCurrChannel,
+ unsigned short wCurrATIMWinodw,
+ unsigned char *pDstAddr,
PWLAN_IE_SSID pCurrSSID,
- PBYTE pCurrBSSID,
+ unsigned char *pCurrBSSID,
PWLAN_IE_SUPP_RATES pCurrSuppRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
- BYTE byPHYType
+ unsigned char byPHYType
)
{
PSTxMgmtPacket pTxPacket = NULL;
WLAN_FR_PROBERESP sFrame;
- PBYTE pbyBuffer;
- UINT uLength = 0;
+ unsigned char *pbyBuffer;
+ unsigned int uLength = 0;
PWLAN_IE_IBSS_DFS pIBSSDFS = NULL;
- UINT ii;
+ unsigned int ii;
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_PROBERESP_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
// Setup the sFrame structure.
- sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
sFrame.len = WLAN_PROBERESP_FR_MAXLEN;
vMgrEncodeProbeResponse(&sFrame);
// Setup the header
@@ -3493,7 +3494,7 @@ s_MgrMakeProbeResponse(
*sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo);
if (byPHYType == BB_TYPE_11B) {
- *sFrame.pwCapInfo &= cpu_to_le16((WORD)~(WLAN_SET_CAP_INFO_SHORTSLOTTIME(1)));
+ *sFrame.pwCapInfo &= cpu_to_le16((unsigned short)~(WLAN_SET_CAP_INFO_SHORTSLOTTIME(1)));
}
// Copy SSID
@@ -3518,7 +3519,7 @@ s_MgrMakeProbeResponse(
sFrame.len += (1) + WLAN_IEHDR_LEN;
sFrame.pDSParms->byElementID = WLAN_EID_DS_PARMS;
sFrame.pDSParms->len = 1;
- sFrame.pDSParms->byCurrChannel = (BYTE)uCurrChannel;
+ sFrame.pDSParms->byCurrChannel = (unsigned char)uCurrChannel;
}
if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP) {
@@ -3535,20 +3536,20 @@ s_MgrMakeProbeResponse(
sFrame.pERP->byElementID = WLAN_EID_ERP;
sFrame.pERP->len = 1;
sFrame.pERP->byContext = 0;
- if (pDevice->bProtectMode == TRUE)
+ if (pDevice->bProtectMode == true)
sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION;
- if (pDevice->bNonERPPresent == TRUE)
+ if (pDevice->bNonERPPresent == true)
sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT;
- if (pDevice->bBarkerPreambleMd == TRUE)
+ if (pDevice->bBarkerPreambleMd == true)
sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE;
}
- if ((pMgmt->b11hEnable == TRUE) &&
+ if ((pMgmt->b11hEnable == true) &&
(pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
// Country IE
- pbyBuffer = (PBYTE)(sFrame.pBuf + sFrame.len);
- CARDvSetCountryIE(pMgmt->pAdapter, pbyBuffer);
- CARDvSetCountryInfo(pMgmt->pAdapter, PHY_TYPE_11A, pbyBuffer);
+ pbyBuffer = (unsigned char *)(sFrame.pBuf + sFrame.len);
+ set_country_IE(pMgmt->pAdapter, pbyBuffer);
+ set_country_info(pMgmt->pAdapter, PHY_TYPE_11A, pbyBuffer);
uLength += ((PWLAN_IE_COUNTRY) pbyBuffer)->len + WLAN_IEHDR_LEN;
pbyBuffer += (((PWLAN_IE_COUNTRY) pbyBuffer)->len + WLAN_IEHDR_LEN);
// Power Constrain IE
@@ -3557,12 +3558,12 @@ s_MgrMakeProbeResponse(
((PWLAN_IE_PW_CONST) pbyBuffer)->byPower = 0;
pbyBuffer += (1) + WLAN_IEHDR_LEN;
uLength += (1) + WLAN_IEHDR_LEN;
- if (pMgmt->bSwitchChannel == TRUE) {
+ if (pMgmt->bSwitchChannel == true) {
// Channel Switch IE
((PWLAN_IE_CH_SW) pbyBuffer)->byElementID = WLAN_EID_CH_SWITCH;
((PWLAN_IE_CH_SW) pbyBuffer)->len = 3;
((PWLAN_IE_CH_SW) pbyBuffer)->byMode = 1;
- ((PWLAN_IE_CH_SW) pbyBuffer)->byChannel = CARDbyGetChannelNumber(pMgmt->pAdapter, pMgmt->byNewChannel);
+ ((PWLAN_IE_CH_SW) pbyBuffer)->byChannel = get_channel_number(pMgmt->pAdapter, pMgmt->byNewChannel);
((PWLAN_IE_CH_SW) pbyBuffer)->byCount = 0;
pbyBuffer += (3) + WLAN_IEHDR_LEN;
uLength += (3) + WLAN_IEHDR_LEN;
@@ -3586,7 +3587,7 @@ s_MgrMakeProbeResponse(
pbyBuffer += (7) + WLAN_IEHDR_LEN;
uLength += (7) + WLAN_IEHDR_LEN;
for(ii=CB_MAX_CHANNEL_24G+1; ii<=CB_MAX_CHANNEL; ii++ ) {
- if (CARDbGetChannelMapInfo(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1) == TRUE) {
+ if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1) == true) {
pbyBuffer += 2;
uLength += 2;
pIBSSDFS->len += 2;
@@ -3607,7 +3608,7 @@ s_MgrMakeProbeResponse(
}
// hostapd wpa/wpa2 IE
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == TRUE)) {
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == true)) {
if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
if (pMgmt->wWPAIELen != 0) {
sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
@@ -3642,9 +3643,9 @@ PSTxMgmtPacket
s_MgrMakeAssocRequest(
PSDevice pDevice,
PSMgmtObject pMgmt,
- PBYTE pDAddr,
- WORD wCurrCapInfo,
- WORD wListenInterval,
+ unsigned char *pDAddr,
+ unsigned short wCurrCapInfo,
+ unsigned short wListenInterval,
PWLAN_IE_SSID pCurrSSID,
PWLAN_IE_SUPP_RATES pCurrRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates
@@ -3652,15 +3653,15 @@ s_MgrMakeAssocRequest(
{
PSTxMgmtPacket pTxPacket = NULL;
WLAN_FR_ASSOCREQ sFrame;
- PBYTE pbyIEs;
- PBYTE pbyRSN;
+ unsigned char *pbyIEs;
+ unsigned char *pbyRSN;
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
// Setup the sFrame structure.
- sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
sFrame.len = WLAN_ASSOCREQ_FR_MAXLEN;
// format fixed field frame structure
vMgrEncodeAssocRequest(&sFrame);
@@ -3709,7 +3710,7 @@ s_MgrMakeAssocRequest(
pbyIEs += pCurrRates->len + WLAN_IEHDR_LEN;
// for 802.11h
- if (pMgmt->b11hEnable == TRUE) {
+ if (pMgmt->b11hEnable == true) {
if (sFrame.pCurrPowerCap == NULL) {
sFrame.pCurrPowerCap = (PWLAN_IE_PW_CAP)(sFrame.pBuf + sFrame.len);
sFrame.len += (2 + WLAN_IEHDR_LEN);
@@ -3722,7 +3723,7 @@ s_MgrMakeAssocRequest(
}
if (sFrame.pCurrSuppCh == NULL) {
sFrame.pCurrSuppCh = (PWLAN_IE_SUPP_CH)(sFrame.pBuf + sFrame.len);
- sFrame.len += CARDbySetSupportChannels(pMgmt->pAdapter,(PBYTE)sFrame.pCurrSuppCh);
+ sFrame.len += set_support_channels(pMgmt->pAdapter,(unsigned char *)sFrame.pCurrSuppCh);
}
}
@@ -3765,7 +3766,7 @@ s_MgrMakeAssocRequest(
sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_NONE;
}
// Auth Key Management Suite
- pbyRSN = (PBYTE)(sFrame.pBuf + sFrame.len + 2 + sFrame.pRSNWPA->len);
+ pbyRSN = (unsigned char *)(sFrame.pBuf + sFrame.len + 2 + sFrame.pRSNWPA->len);
*pbyRSN++=0x01;
*pbyRSN++=0x00;
*pbyRSN++=0x00;
@@ -3799,8 +3800,8 @@ s_MgrMakeAssocRequest(
} else if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
(pMgmt->pCurrBSS != NULL)) {
- UINT ii;
- PWORD pwPMKID;
+ unsigned int ii;
+ unsigned short *pwPMKID;
// WPA IE
sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
@@ -3854,7 +3855,7 @@ s_MgrMakeAssocRequest(
sFrame.pRSN->len +=6;
// RSN Capabilites
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == TRUE) {
+ if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == true) {
memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
} else {
sFrame.pRSN->abyRSN[16] = 0;
@@ -3862,10 +3863,10 @@ s_MgrMakeAssocRequest(
}
sFrame.pRSN->len +=2;
- if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == TRUE) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
+ if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == true) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
// RSN PMKID
pbyRSN = &sFrame.pRSN->abyRSN[18];
- pwPMKID = (PWORD)pbyRSN; // Point to PMKID count
+ pwPMKID = (unsigned short *)pbyRSN; // Point to PMKID count
*pwPMKID = 0; // Initialize PMKID count
pbyRSN += 2; // Point to PMKID list
for (ii = 0; ii < pDevice->gsPMKID.BSSIDInfoCount; ii++) {
@@ -3917,9 +3918,9 @@ PSTxMgmtPacket
s_MgrMakeReAssocRequest(
PSDevice pDevice,
PSMgmtObject pMgmt,
- PBYTE pDAddr,
- WORD wCurrCapInfo,
- WORD wListenInterval,
+ unsigned char *pDAddr,
+ unsigned short wCurrCapInfo,
+ unsigned short wListenInterval,
PWLAN_IE_SSID pCurrSSID,
PWLAN_IE_SUPP_RATES pCurrRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates
@@ -3927,15 +3928,15 @@ s_MgrMakeReAssocRequest(
{
PSTxMgmtPacket pTxPacket = NULL;
WLAN_FR_REASSOCREQ sFrame;
- PBYTE pbyIEs;
- PBYTE pbyRSN;
+ unsigned char *pbyIEs;
+ unsigned char *pbyRSN;
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset( pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_REASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
/* Setup the sFrame structure. */
- sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
sFrame.len = WLAN_REASSOCREQ_FR_MAXLEN;
// format fixed field frame structure
@@ -4024,7 +4025,7 @@ s_MgrMakeReAssocRequest(
sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_NONE;
}
// Auth Key Management Suite
- pbyRSN = (PBYTE)(sFrame.pBuf + sFrame.len + 2 + sFrame.pRSNWPA->len);
+ pbyRSN = (unsigned char *)(sFrame.pBuf + sFrame.len + 2 + sFrame.pRSNWPA->len);
*pbyRSN++=0x01;
*pbyRSN++=0x00;
*pbyRSN++=0x00;
@@ -4055,8 +4056,8 @@ s_MgrMakeReAssocRequest(
} else if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
(pMgmt->pCurrBSS != NULL)) {
- UINT ii;
- PWORD pwPMKID;
+ unsigned int ii;
+ unsigned short *pwPMKID;
/* WPA IE */
sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
@@ -4110,7 +4111,7 @@ s_MgrMakeReAssocRequest(
sFrame.pRSN->len +=6;
// RSN Capabilites
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == TRUE) {
+ if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == true) {
memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
} else {
sFrame.pRSN->abyRSN[16] = 0;
@@ -4118,10 +4119,10 @@ s_MgrMakeReAssocRequest(
}
sFrame.pRSN->len +=2;
- if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == TRUE) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
+ if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == true) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
// RSN PMKID
pbyRSN = &sFrame.pRSN->abyRSN[18];
- pwPMKID = (PWORD)pbyRSN; // Point to PMKID count
+ pwPMKID = (unsigned short *)pbyRSN; // Point to PMKID count
*pwPMKID = 0; // Initialize PMKID count
pbyRSN += 2; // Point to PMKID list
for (ii = 0; ii < pDevice->gsPMKID.BSSIDInfoCount; ii++) {
@@ -4169,10 +4170,10 @@ PSTxMgmtPacket
s_MgrMakeAssocResponse(
PSDevice pDevice,
PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wAssocStatus,
- WORD wAssocAID,
- PBYTE pDstAddr,
+ unsigned short wCurrCapInfo,
+ unsigned short wAssocStatus,
+ unsigned short wAssocAID,
+ unsigned char *pDstAddr,
PWLAN_IE_SUPP_RATES pCurrSuppRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
@@ -4183,9 +4184,9 @@ s_MgrMakeAssocResponse(
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
// Setup the sFrame structure
- sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
sFrame.len = WLAN_REASSOCRESP_FR_MAXLEN;
vMgrEncodeAssocResponse(&sFrame);
// Setup the header
@@ -4200,7 +4201,7 @@ s_MgrMakeAssocResponse(
*sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo);
*sFrame.pwStatus = cpu_to_le16(wAssocStatus);
- *sFrame.pwAid = cpu_to_le16((WORD)(wAssocAID | BIT14 | BIT15));
+ *sFrame.pwAid = cpu_to_le16((unsigned short)(wAssocAID | BIT14 | BIT15));
// Copy the rate set
sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
@@ -4243,10 +4244,10 @@ PSTxMgmtPacket
s_MgrMakeReAssocResponse(
PSDevice pDevice,
PSMgmtObject pMgmt,
- WORD wCurrCapInfo,
- WORD wAssocStatus,
- WORD wAssocAID,
- PBYTE pDstAddr,
+ unsigned short wCurrCapInfo,
+ unsigned short wAssocStatus,
+ unsigned short wAssocAID,
+ unsigned char *pDstAddr,
PWLAN_IE_SUPP_RATES pCurrSuppRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
@@ -4257,9 +4258,9 @@ s_MgrMakeReAssocResponse(
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
+ pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
// Setup the sFrame structure
- sFrame.pBuf = (PBYTE)pTxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
sFrame.len = WLAN_REASSOCRESP_FR_MAXLEN;
vMgrEncodeReassocResponse(&sFrame);
// Setup the header
@@ -4274,7 +4275,7 @@ s_MgrMakeReAssocResponse(
*sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo);
*sFrame.pwStatus = cpu_to_le16(wAssocStatus);
- *sFrame.pwAid = cpu_to_le16((WORD)(wAssocAID | BIT14 | BIT15));
+ *sFrame.pwAid = cpu_to_le16((unsigned short)(wAssocAID | BIT14 | BIT15));
// Copy the rate set
sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
@@ -4322,16 +4323,16 @@ s_vMgrRxProbeResponse(
{
PKnownBSS pBSSList = NULL;
WLAN_FR_PROBERESP sFrame;
- BYTE byCurrChannel = pRxPacket->byRxChannel;
+ unsigned char byCurrChannel = pRxPacket->byRxChannel;
ERPObject sERP;
- BYTE byIEChannel = 0;
- BOOL bChannelHit = TRUE;
+ unsigned char byIEChannel = 0;
+ bool bChannelHit = true;
memset(&sFrame, 0, sizeof(WLAN_FR_PROBERESP));
// decode the frame
sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
vMgrDecodeProbeResponse(&sFrame);
if ((sFrame.pqwTimestamp == 0) ||
@@ -4350,29 +4351,29 @@ s_vMgrRxProbeResponse(
if (sFrame.pDSParms != 0) {
if (byCurrChannel > CB_MAX_CHANNEL_24G) {
// channel remapping to
- byIEChannel = CARDbyGetChannelMapping(pMgmt->pAdapter, sFrame.pDSParms->byCurrChannel, PHY_TYPE_11A);
+ byIEChannel = get_channel_mapping(pMgmt->pAdapter, sFrame.pDSParms->byCurrChannel, PHY_TYPE_11A);
} else {
byIEChannel = sFrame.pDSParms->byCurrChannel;
}
if (byCurrChannel != byIEChannel) {
// adjust channel info. bcs we rcv adjcent channel pakckets
- bChannelHit = FALSE;
+ bChannelHit = false;
byCurrChannel = byIEChannel;
}
} else {
// no DS channel info
- bChannelHit = TRUE;
+ bChannelHit = true;
}
//2008-0730-01<Add>by MikeLiu
-if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
+if(ChannelExceedZoneType(pDevice,byCurrChannel)==true)
return;
if (sFrame.pERP != NULL) {
sERP.byERP = sFrame.pERP->byContext;
- sERP.bERPExist = TRUE;
+ sERP.bERPExist = true;
} else {
- sERP.bERPExist = FALSE;
+ sERP.bERPExist = false;
sERP.byERP = 0;
}
@@ -4448,7 +4449,7 @@ s_vMgrRxProbeRequest(
WLAN_FR_PROBEREQ sFrame;
CMD_STATUS Status;
PSTxMgmtPacket pTxPacket;
- BYTE byPHYType = BB_TYPE_11B;
+ unsigned char byPHYType = BB_TYPE_11B;
// STA in Ad-hoc mode: when latest TBTT beacon transmit success,
// STA have to response this request.
@@ -4458,7 +4459,7 @@ s_vMgrRxProbeRequest(
memset(&sFrame, 0, sizeof(WLAN_FR_PROBEREQ));
// decode the frame
sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
+ sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
vMgrDecodeProbeRequest(&sFrame);
/*
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request rx:MAC addr:%02x-%02x-%02x=%02x-%02x-%02x \n",
@@ -4495,7 +4496,7 @@ s_vMgrRxProbeRequest(
0,
sFrame.pHdr->sA3.abyAddr2,
(PWLAN_IE_SSID)pMgmt->abyCurrSSID,
- (PBYTE)pMgmt->abyCurrBSSID,
+ (unsigned char *)pMgmt->abyCurrBSSID,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
byPHYType
@@ -4542,8 +4543,8 @@ vMgrRxManagePacket(
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
- BOOL bInScan = FALSE;
- UINT uNodeIndex = 0;
+ bool bInScan = false;
+ unsigned int uNodeIndex = 0;
NODE_STATE eNodeState = 0;
CMD_STATUS Status;
@@ -4577,7 +4578,7 @@ vMgrRxManagePacket(
case WLAN_FSTYPE_ASSOCRESP:
// Frame Clase = 2
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx assocresp1\n");
- s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, FALSE);
+ s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, false);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx assocresp2\n");
break;
@@ -4603,7 +4604,7 @@ vMgrRxManagePacket(
case WLAN_FSTYPE_REASSOCRESP:
// Frame Clase = 2
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx reassocresp\n");
- s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, TRUE);
+ s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, true);
break;
case WLAN_FSTYPE_PROBEREQ:
@@ -4623,7 +4624,7 @@ vMgrRxManagePacket(
// Frame Clase = 0
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx beacon\n");
if (pMgmt->eScanState != WMAC_NO_SCANNING) {
- bInScan = TRUE;
+ bInScan = true;
};
s_vMgrRxBeacon(pDevice, pMgmt, pRxPacket, bInScan);
break;
@@ -4680,10 +4681,10 @@ vMgrRxManagePacket(
* Prepare beacon to send
*
* Return Value:
- * TRUE if success; FALSE if failed.
+ * true if success; false if failed.
*
-*/
-BOOL
+bool
bMgrPrepareBeaconToSend(
void *hDeviceContext,
PSMgmtObject pMgmt
@@ -4692,7 +4693,7 @@ bMgrPrepareBeaconToSend(
PSDevice pDevice = (PSDevice)hDeviceContext;
PSTxMgmtPacket pTxPacket;
-// pDevice->bBeaconBufReady = FALSE;
+// pDevice->bBeaconBufReady = false;
if (pDevice->bEncryptionEnable || pDevice->bEnable8021x){
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1);
}
@@ -4708,18 +4709,18 @@ bMgrPrepareBeaconToSend(
pMgmt->uCurrChannel,
pMgmt->wCurrATIMWindow, //0,
(PWLAN_IE_SSID)pMgmt->abyCurrSSID,
- (PBYTE)pMgmt->abyCurrBSSID,
+ (unsigned char *)pMgmt->abyCurrBSSID,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates
);
if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
(pMgmt->abyCurrBSSID[0] == 0))
- return FALSE;
+ return false;
csBeacon_xmit(pDevice, pTxPacket);
- return TRUE;
+ return true;
}
@@ -4741,7 +4742,7 @@ static
void
s_vMgrLogStatus(
PSMgmtObject pMgmt,
- WORD wStatus
+ unsigned short wStatus
)
{
switch( wStatus ){
@@ -4807,24 +4808,24 @@ s_vMgrLogStatus(
* Return Value: none.
*
-*/
-BOOL
+bool
bAdd_PMKID_Candidate (
void *hDeviceContext,
- PBYTE pbyBSSID,
+ unsigned char *pbyBSSID,
PSRSNCapObject psRSNCapObj
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PPMKID_CANDIDATE pCandidateList;
- UINT ii = 0;
+ unsigned int ii = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"bAdd_PMKID_Candidate START: (%d)\n", (int)pDevice->gsPMKIDCandidate.NumCandidates);
if ((pDevice == NULL) || (pbyBSSID == NULL) || (psRSNCapObj == NULL))
- return FALSE;
+ return false;
if (pDevice->gsPMKIDCandidate.NumCandidates >= MAX_PMKIDLIST)
- return FALSE;
+ return false;
@@ -4832,18 +4833,18 @@ bAdd_PMKID_Candidate (
for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
if ( !memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
- if ((psRSNCapObj->bRSNCapExist == TRUE) && (psRSNCapObj->wRSNCap & BIT0)) {
+ if ((psRSNCapObj->bRSNCapExist == true) && (psRSNCapObj->wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
}
- return TRUE;
+ return true;
}
}
// New Candidate
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[pDevice->gsPMKIDCandidate.NumCandidates];
- if ((psRSNCapObj->bRSNCapExist == TRUE) && (psRSNCapObj->wRSNCap & BIT0)) {
+ if ((psRSNCapObj->bRSNCapExist == true) && (psRSNCapObj->wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
@@ -4851,7 +4852,7 @@ bAdd_PMKID_Candidate (
memcpy(pCandidateList->BSSID, pbyBSSID, ETH_ALEN);
pDevice->gsPMKIDCandidate.NumCandidates++;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"NumCandidates:%d\n", (int)pDevice->gsPMKIDCandidate.NumCandidates);
- return TRUE;
+ return true;
}
/*
@@ -4881,20 +4882,20 @@ vFlush_PMKID_Candidate (
memset(&pDevice->gsPMKIDCandidate, 0, sizeof(SPMKIDCandidateEvent));
}
-static BOOL
+static bool
s_bCipherMatch (
PKnownBSS pBSSNode,
NDIS_802_11_ENCRYPTION_STATUS EncStatus,
- PBYTE pbyCCSPK,
- PBYTE pbyCCSGK
+ unsigned char *pbyCCSPK,
+ unsigned char *pbyCCSGK
)
{
- BYTE byMulticastCipher = KEY_CTL_INVALID;
- BYTE byCipherMask = 0x00;
+ unsigned char byMulticastCipher = KEY_CTL_INVALID;
+ unsigned char byCipherMask = 0x00;
int i;
if (pBSSNode == NULL)
- return FALSE;
+ return false;
// check cap. of BSS
if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
@@ -4904,7 +4905,7 @@ s_bCipherMatch (
}
if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- (pBSSNode->bWPA2Valid == TRUE) &&
+ (pBSSNode->bWPA2Valid == true) &&
//20080123-01,<Add> by Einsn Liu
((EncStatus == Ndis802_11Encryption3Enabled)||(EncStatus == Ndis802_11Encryption2Enabled))) {
//WPA2
@@ -4938,7 +4939,7 @@ s_bCipherMatch (
}
} else if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- (pBSSNode->bWPAValid == TRUE) &&
+ (pBSSNode->bWPAValid == true) &&
((EncStatus == Ndis802_11Encryption3Enabled) || (EncStatus == Ndis802_11Encryption2Enabled))) {
//WPA
// check Group Key Cipher
@@ -4978,9 +4979,9 @@ s_bCipherMatch (
(byCipherMask == 0)) {
*pbyCCSGK = KEY_CTL_WEP;
*pbyCCSPK = KEY_CTL_NONE;
- return TRUE;
+ return true;
} else {
- return FALSE;
+ return false;
}
} else if (EncStatus == Ndis802_11Encryption2Enabled) {
@@ -4988,45 +4989,45 @@ s_bCipherMatch (
(byCipherMask == 0)) {
*pbyCCSGK = KEY_CTL_TKIP;
*pbyCCSPK = KEY_CTL_NONE;
- return TRUE;
+ return true;
} else if ((byMulticastCipher == KEY_CTL_WEP) &&
((byCipherMask & 0x02) != 0)) {
*pbyCCSGK = KEY_CTL_WEP;
*pbyCCSPK = KEY_CTL_TKIP;
- return TRUE;
+ return true;
} else if ((byMulticastCipher == KEY_CTL_TKIP) &&
((byCipherMask & 0x02) != 0)) {
*pbyCCSGK = KEY_CTL_TKIP;
*pbyCCSPK = KEY_CTL_TKIP;
- return TRUE;
+ return true;
} else {
- return FALSE;
+ return false;
}
} else if (EncStatus == Ndis802_11Encryption3Enabled) {
if ((byMulticastCipher == KEY_CTL_CCMP) &&
(byCipherMask == 0)) {
// When CCMP is enable, "Use group cipher suite" shall not be a valid option.
- return FALSE;
+ return false;
} else if ((byMulticastCipher == KEY_CTL_WEP) &&
((byCipherMask & 0x04) != 0)) {
*pbyCCSGK = KEY_CTL_WEP;
*pbyCCSPK = KEY_CTL_CCMP;
- return TRUE;
+ return true;
} else if ((byMulticastCipher == KEY_CTL_TKIP) &&
((byCipherMask & 0x04) != 0)) {
*pbyCCSGK = KEY_CTL_TKIP;
*pbyCCSPK = KEY_CTL_CCMP;
- return TRUE;
+ return true;
} else if ((byMulticastCipher == KEY_CTL_CCMP) &&
((byCipherMask & 0x04) != 0)) {
*pbyCCSGK = KEY_CTL_CCMP;
*pbyCCSPK = KEY_CTL_CCMP;
- return TRUE;
+ return true;
} else {
- return FALSE;
+ return false;
}
}
- return TRUE;
+ return true;
}
diff --git a/drivers/staging/vt6655/wmgr.h b/drivers/staging/vt6655/wmgr.h
index 9ae7e0d55bc..141e80b843a 100644
--- a/drivers/staging/vt6655/wmgr.h
+++ b/drivers/staging/vt6655/wmgr.h
@@ -83,47 +83,47 @@
/*--------------------- Export Types ------------------------------*/
#define timer_expire(timer,next_tick) mod_timer(&timer, RUN_AT(next_tick))
-typedef void (*TimerFunction)(ULONG);
+typedef void (*TimerFunction)(unsigned long);
//+++ NDIS related
-typedef UCHAR NDIS_802_11_MAC_ADDRESS[6];
+typedef unsigned char NDIS_802_11_MAC_ADDRESS[6];
typedef struct _NDIS_802_11_AI_REQFI
{
- USHORT Capabilities;
- USHORT ListenInterval;
+ unsigned short Capabilities;
+ unsigned short ListenInterval;
NDIS_802_11_MAC_ADDRESS CurrentAPAddress;
} NDIS_802_11_AI_REQFI, *PNDIS_802_11_AI_REQFI;
typedef struct _NDIS_802_11_AI_RESFI
{
- USHORT Capabilities;
- USHORT StatusCode;
- USHORT AssociationId;
+ unsigned short Capabilities;
+ unsigned short StatusCode;
+ unsigned short AssociationId;
} NDIS_802_11_AI_RESFI, *PNDIS_802_11_AI_RESFI;
typedef struct _NDIS_802_11_ASSOCIATION_INFORMATION
{
- ULONG Length;
- USHORT AvailableRequestFixedIEs;
+ unsigned long Length;
+ unsigned short AvailableRequestFixedIEs;
NDIS_802_11_AI_REQFI RequestFixedIEs;
- ULONG RequestIELength;
- ULONG OffsetRequestIEs;
- USHORT AvailableResponseFixedIEs;
+ unsigned long RequestIELength;
+ unsigned long OffsetRequestIEs;
+ unsigned short AvailableResponseFixedIEs;
NDIS_802_11_AI_RESFI ResponseFixedIEs;
- ULONG ResponseIELength;
- ULONG OffsetResponseIEs;
+ unsigned long ResponseIELength;
+ unsigned long OffsetResponseIEs;
} NDIS_802_11_ASSOCIATION_INFORMATION, *PNDIS_802_11_ASSOCIATION_INFORMATION;
typedef struct tagSAssocInfo {
NDIS_802_11_ASSOCIATION_INFORMATION AssocInfo;
- BYTE abyIEs[WLAN_BEACON_FR_MAXLEN+WLAN_BEACON_FR_MAXLEN];
+ unsigned char abyIEs[WLAN_BEACON_FR_MAXLEN+WLAN_BEACON_FR_MAXLEN];
// store ReqIEs set by OID_802_11_ASSOCIATION_INFORMATION
- ULONG RequestIELength;
- BYTE abyReqIEs[WLAN_BEACON_FR_MAXLEN];
+ unsigned long RequestIELength;
+ unsigned char abyReqIEs[WLAN_BEACON_FR_MAXLEN];
} SAssocInfo, *PSAssocInfo;
//---
@@ -224,8 +224,8 @@ typedef enum tagWMAC_POWER_MODE {
typedef struct tagSTxMgmtPacket {
PUWLAN_80211HDR p80211Header;
- UINT cbMPDULen;
- UINT cbPayloadLen;
+ unsigned int cbMPDULen;
+ unsigned int cbPayloadLen;
} STxMgmtPacket, *PSTxMgmtPacket;
@@ -235,12 +235,12 @@ typedef struct tagSRxMgmtPacket {
PUWLAN_80211HDR p80211Header;
QWORD qwLocalTSF;
- UINT cbMPDULen;
- UINT cbPayloadLen;
- UINT uRSSI;
- BYTE bySQ;
- BYTE byRxRate;
- BYTE byRxChannel;
+ unsigned int cbMPDULen;
+ unsigned int cbPayloadLen;
+ unsigned int uRSSI;
+ unsigned char bySQ;
+ unsigned char byRxRate;
+ unsigned char byRxChannel;
} SRxMgmtPacket, *PSRxMgmtPacket;
@@ -251,7 +251,7 @@ typedef struct tagSMgmtObject
void * pAdapter;
// MAC address
- BYTE abyMACAddr[WLAN_ADDR_LEN];
+ unsigned char abyMACAddr[WLAN_ADDR_LEN];
// Configuration Mode
WMAC_CONFIG_MODE eConfigMode; // MAC pre-configed mode
@@ -264,86 +264,86 @@ typedef struct tagSMgmtObject
WMAC_BSS_STATE eCurrState; // MAC current BSS state
PKnownBSS pCurrBSS;
- BYTE byCSSGK;
- BYTE byCSSPK;
+ unsigned char byCSSGK;
+ unsigned char byCSSPK;
-// BYTE abyNewSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
-// BYTE abyNewExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
+// unsigned char abyNewSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
+// unsigned char abyNewExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
// Current state vars
- UINT uCurrChannel;
- BYTE abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- BYTE abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- BYTE abyCurrSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- BYTE abyCurrBSSID[WLAN_BSSID_LEN];
- WORD wCurrCapInfo;
- WORD wCurrAID;
- WORD wCurrATIMWindow;
- WORD wCurrBeaconPeriod;
- BOOL bIsDS;
- BYTE byERPContext;
+ unsigned int uCurrChannel;
+ unsigned char abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ unsigned char abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ unsigned char abyCurrSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ unsigned char abyCurrBSSID[WLAN_BSSID_LEN];
+ unsigned short wCurrCapInfo;
+ unsigned short wCurrAID;
+ unsigned short wCurrATIMWindow;
+ unsigned short wCurrBeaconPeriod;
+ bool bIsDS;
+ unsigned char byERPContext;
CMD_STATE eCommandState;
- UINT uScanChannel;
+ unsigned int uScanChannel;
// Desire joinning BSS vars
- BYTE abyDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- BYTE abyDesireBSSID[WLAN_BSSID_LEN];
+ unsigned char abyDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ unsigned char abyDesireBSSID[WLAN_BSSID_LEN];
// Adhoc or AP configuration vars
- //BYTE abyAdHocSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- WORD wIBSSBeaconPeriod;
- WORD wIBSSATIMWindow;
- UINT uIBSSChannel;
- BYTE abyIBSSSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- BYTE byAPBBType;
- BYTE abyWPAIE[MAX_WPA_IE_LEN];
- WORD wWPAIELen;
-
- UINT uAssocCount;
- BOOL bMoreData;
+ //unsigned char abyAdHocSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ unsigned short wIBSSBeaconPeriod;
+ unsigned short wIBSSATIMWindow;
+ unsigned int uIBSSChannel;
+ unsigned char abyIBSSSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
+ unsigned char byAPBBType;
+ unsigned char abyWPAIE[MAX_WPA_IE_LEN];
+ unsigned short wWPAIELen;
+
+ unsigned int uAssocCount;
+ bool bMoreData;
// Scan state vars
WMAC_SCAN_STATE eScanState;
WMAC_SCAN_TYPE eScanType;
- UINT uScanStartCh;
- UINT uScanEndCh;
- WORD wScanSteps;
- UINT uScanBSSType;
+ unsigned int uScanStartCh;
+ unsigned int uScanEndCh;
+ unsigned short wScanSteps;
+ unsigned int uScanBSSType;
// Desire scannig vars
- BYTE abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- BYTE abyScanBSSID[WLAN_BSSID_LEN];
+ unsigned char abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
+ unsigned char abyScanBSSID[WLAN_BSSID_LEN];
// Privacy
WMAC_AUTHENTICATION_MODE eAuthenMode;
WMAC_ENCRYPTION_MODE eEncryptionMode;
- BOOL bShareKeyAlgorithm;
- BYTE abyChallenge[WLAN_CHALLENGE_LEN];
- BOOL bPrivacyInvoked;
+ bool bShareKeyAlgorithm;
+ unsigned char abyChallenge[WLAN_CHALLENGE_LEN];
+ bool bPrivacyInvoked;
// Received beacon state vars
- BOOL bInTIM;
- BOOL bMulticastTIM;
- BYTE byDTIMCount;
- BYTE byDTIMPeriod;
+ bool bInTIM;
+ bool bMulticastTIM;
+ unsigned char byDTIMCount;
+ unsigned char byDTIMPeriod;
// Power saving state vars
WMAC_POWER_MODE ePSMode;
- WORD wListenInterval;
- WORD wCountToWakeUp;
- BOOL bInTIMWake;
- PBYTE pbyPSPacketPool;
- BYTE byPSPacketPool[sizeof(STxMgmtPacket) + WLAN_NULLDATA_FR_MAXLEN];
- BOOL bRxBeaconInTBTTWake;
- BYTE abyPSTxMap[MAX_NODE_NUM + 1];
+ unsigned short wListenInterval;
+ unsigned short wCountToWakeUp;
+ bool bInTIMWake;
+ unsigned char *pbyPSPacketPool;
+ unsigned char byPSPacketPool[sizeof(STxMgmtPacket) + WLAN_NULLDATA_FR_MAXLEN];
+ bool bRxBeaconInTBTTWake;
+ unsigned char abyPSTxMap[MAX_NODE_NUM + 1];
// management command related
- UINT uCmdBusy;
- UINT uCmdHostAPBusy;
+ unsigned int uCmdBusy;
+ unsigned int uCmdHostAPBusy;
// management packet pool
- PBYTE pbyMgmtPacketPool;
- BYTE byMgmtPacketPool[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
+ unsigned char *pbyMgmtPacketPool;
+ unsigned char byMgmtPacketPool[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
// One second callback timer
@@ -366,7 +366,7 @@ typedef struct tagSMgmtObject
// WPA2 PMKID Cache
SPMKIDCache gsPMKIDCache;
- BOOL bRoaming;
+ bool bRoaming;
// rate fall back vars
@@ -377,16 +377,16 @@ typedef struct tagSMgmtObject
// for 802.11h
- BOOL b11hEnable;
- BOOL bSwitchChannel;
- BYTE byNewChannel;
+ bool b11hEnable;
+ bool bSwitchChannel;
+ unsigned char byNewChannel;
PWLAN_IE_MEASURE_REP pCurrMeasureEIDRep;
- UINT uLengthOfRepEIDs;
- BYTE abyCurrentMSRReq[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
- BYTE abyCurrentMSRRep[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
- BYTE abyIECountry[WLAN_A3FR_MAXLEN];
- BYTE abyIBSSDFSOwner[6];
- BYTE byIBSSDFSRecovery;
+ unsigned int uLengthOfRepEIDs;
+ unsigned char abyCurrentMSRReq[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
+ unsigned char abyCurrentMSRRep[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
+ unsigned char abyIECountry[WLAN_A3FR_MAXLEN];
+ unsigned char abyIBSSDFSOwner[6];
+ unsigned char byIBSSDFSRecovery;
struct sk_buff skb;
@@ -432,8 +432,8 @@ void
vMgrDisassocBeginSta(
void *hDeviceContext,
PSMgmtObject pMgmt,
- PBYTE abyDestAddress,
- WORD wReason,
+ unsigned char *abyDestAddress,
+ unsigned short wReason,
PCMD_STATUS pStatus
);
@@ -475,22 +475,22 @@ void
vMgrDeAuthenBeginSta(
void *hDeviceContext,
PSMgmtObject pMgmt,
- PBYTE abyDestAddress,
- WORD wReason,
+ unsigned char *abyDestAddress,
+ unsigned short wReason,
PCMD_STATUS pStatus
);
-BOOL
+bool
bMgrPrepareBeaconToSend(
void *hDeviceContext,
PSMgmtObject pMgmt
);
-BOOL
+bool
bAdd_PMKID_Candidate (
void *hDeviceContext,
- PBYTE pbyBSSID,
+ unsigned char *pbyBSSID,
PSRSNCapObject psRSNCapObj
);
diff --git a/drivers/staging/vt6655/wpa.c b/drivers/staging/vt6655/wpa.c
index da5c814e200..61ac46fa505 100644
--- a/drivers/staging/vt6655/wpa.c
+++ b/drivers/staging/vt6655/wpa.c
@@ -45,12 +45,12 @@
/*--------------------- Static Variables --------------------------*/
static int msglevel =MSG_LEVEL_INFO;
-const BYTE abyOUI00[4] = { 0x00, 0x50, 0xf2, 0x00 };
-const BYTE abyOUI01[4] = { 0x00, 0x50, 0xf2, 0x01 };
-const BYTE abyOUI02[4] = { 0x00, 0x50, 0xf2, 0x02 };
-const BYTE abyOUI03[4] = { 0x00, 0x50, 0xf2, 0x03 };
-const BYTE abyOUI04[4] = { 0x00, 0x50, 0xf2, 0x04 };
-const BYTE abyOUI05[4] = { 0x00, 0x50, 0xf2, 0x05 };
+const unsigned char abyOUI00[4] = { 0x00, 0x50, 0xf2, 0x00 };
+const unsigned char abyOUI01[4] = { 0x00, 0x50, 0xf2, 0x01 };
+const unsigned char abyOUI02[4] = { 0x00, 0x50, 0xf2, 0x02 };
+const unsigned char abyOUI03[4] = { 0x00, 0x50, 0xf2, 0x03 };
+const unsigned char abyOUI04[4] = { 0x00, 0x50, 0xf2, 0x04 };
+const unsigned char abyOUI05[4] = { 0x00, 0x50, 0xf2, 0x05 };
/*+
@@ -83,9 +83,9 @@ WPA_ClearRSN (
pBSSList->wAuthCount = 0;
pBSSList->byDefaultK_as_PK = 0;
pBSSList->byReplayIdx = 0;
- pBSSList->sRSNCapObj.bRSNCapExist = FALSE;
+ pBSSList->sRSNCapObj.bRSNCapExist = false;
pBSSList->sRSNCapObj.wRSNCap = 0;
- pBSSList->bWPAValid = FALSE;
+ pBSSList->bWPAValid = false;
}
@@ -112,7 +112,7 @@ WPA_ParseRSN (
{
PWLAN_IE_RSN_AUTH pIE_RSN_Auth = NULL;
int i, j, m, n = 0;
- PBYTE pbyCaps;
+ unsigned char *pbyCaps;
WPA_ClearRSN(pBSSList);
@@ -148,7 +148,7 @@ WPA_ParseRSN (
{
j = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wPKCount: %d, sizeof(pBSSList->abyPKType): %zu\n", pRSN->wPKCount, sizeof(pBSSList->abyPKType));
- for(i = 0; (i < pRSN->wPKCount) && (j < sizeof(pBSSList->abyPKType)/sizeof(BYTE)); i++) {
+ for(i = 0; (i < pRSN->wPKCount) && (j < sizeof(pBSSList->abyPKType)/sizeof(unsigned char)); i++) {
if(pRSN->len >= 12+i*4+4) { //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)+PKS(4*i)
if ( !memcmp(pRSN->PKSList[i].abyOUI, abyOUI00, 4))
pBSSList->abyPKType[j++] = WPA_NONE;
@@ -166,7 +166,7 @@ WPA_ParseRSN (
break;
//DBG_PRN_GRP14(("abyPKType[%d]: %X\n", j-1, pBSSList->abyPKType[j-1]));
} //for
- pBSSList->wPKCount = (WORD)j;
+ pBSSList->wPKCount = (unsigned short)j;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wPKCount: %d\n", pBSSList->wPKCount);
}
@@ -180,7 +180,7 @@ WPA_ParseRSN (
j = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wAuthCount: %d, sizeof(pBSSList->abyAuthType): %zu\n",
pIE_RSN_Auth->wAuthCount, sizeof(pBSSList->abyAuthType));
- for(i = 0; (i < pIE_RSN_Auth->wAuthCount) && (j < sizeof(pBSSList->abyAuthType)/sizeof(BYTE)); i++) {
+ for(i = 0; (i < pIE_RSN_Auth->wAuthCount) && (j < sizeof(pBSSList->abyAuthType)/sizeof(unsigned char)); i++) {
if(pRSN->len >= 14+4+(m+i)*4) { //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)+PKS(4*m)+AKC(2)+AKS(4*i)
if ( !memcmp(pIE_RSN_Auth->AuthKSList[i].abyOUI, abyOUI01, 4))
pBSSList->abyAuthType[j++] = WPA_AUTH_IEEE802_1X;
@@ -195,7 +195,7 @@ WPA_ParseRSN (
//DBG_PRN_GRP14(("abyAuthType[%d]: %X\n", j-1, pBSSList->abyAuthType[j-1]));
}
if(j > 0)
- pBSSList->wAuthCount = (WORD)j;
+ pBSSList->wAuthCount = (unsigned short)j;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wAuthCount: %d\n", pBSSList->wAuthCount);
}
@@ -207,17 +207,17 @@ WPA_ParseRSN (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"14+4+(m+n)*4: %d\n", 14+4+(m+n)*4);
if(pRSN->len+2 >= 14+4+(m+n)*4) { //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)+PKS(4*m)+AKC(2)+AKS(4*n)+Cap(2)
- pbyCaps = (PBYTE)pIE_RSN_Auth->AuthKSList[n].abyOUI;
+ pbyCaps = (unsigned char *)pIE_RSN_Auth->AuthKSList[n].abyOUI;
pBSSList->byDefaultK_as_PK = (*pbyCaps) & WPA_GROUPFLAG;
pBSSList->byReplayIdx = 2 << ((*pbyCaps >> WPA_REPLAYBITSSHIFT) & WPA_REPLAYBITS);
- pBSSList->sRSNCapObj.bRSNCapExist = TRUE;
- pBSSList->sRSNCapObj.wRSNCap = *(PWORD)pbyCaps;
+ pBSSList->sRSNCapObj.bRSNCapExist = true;
+ pBSSList->sRSNCapObj.wRSNCap = *(unsigned short *)pbyCaps;
//DBG_PRN_GRP14(("pbyCaps: %X\n", *pbyCaps));
//DBG_PRN_GRP14(("byDefaultK_as_PK: %X\n", pBSSList->byDefaultK_as_PK));
//DBG_PRN_GRP14(("byReplayIdx: %X\n", pBSSList->byReplayIdx));
}
}
- pBSSList->bWPAValid = TRUE;
+ pBSSList->bWPAValid = true;
}
}
@@ -237,24 +237,24 @@ WPA_ParseRSN (
* Return Value: none.
*
-*/
-BOOL
+bool
WPA_SearchRSN (
- BYTE byCmd,
- BYTE byEncrypt,
+ unsigned char byCmd,
+ unsigned char byEncrypt,
PKnownBSS pBSSList
)
{
int ii;
- BYTE byPKType = WPA_NONE;
+ unsigned char byPKType = WPA_NONE;
- if (pBSSList->bWPAValid == FALSE)
- return FALSE;
+ if (pBSSList->bWPAValid == false)
+ return false;
switch(byCmd) {
case 0:
if (byEncrypt != pBSSList->byGKType)
- return FALSE;
+ return false;
if (pBSSList->wPKCount > 0) {
for (ii = 0; ii < pBSSList->wPKCount; ii ++) {
@@ -268,9 +268,9 @@ WPA_SearchRSN (
byPKType = WPA_WEP104;
}
if (byEncrypt != byPKType)
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
// if (pBSSList->wAuthCount > 0)
// for (ii=0; ii < pBSSList->wAuthCount; ii ++)
// if (byAuth == pBSSList->abyAuthType[ii])
@@ -280,7 +280,7 @@ WPA_SearchRSN (
default:
break;
}
- return FALSE;
+ return false;
}
/*+
@@ -297,20 +297,20 @@ WPA_SearchRSN (
* Return Value: none.
*
-*/
-BOOL
+bool
WPAb_Is_RSN (
PWLAN_IE_RSN_EXT pRSN
)
{
if (pRSN == NULL)
- return FALSE;
+ return false;
if ((pRSN->len >= 6) && // oui1(4)+ver(2)
(pRSN->byElementID == WLAN_EID_RSN_WPA) && !memcmp(pRSN->abyOUI, abyOUI01, 4) &&
(pRSN->wVersion == 1)) {
- return TRUE;
+ return true;
}
else
- return FALSE;
+ return false;
}
diff --git a/drivers/staging/vt6655/wpa.h b/drivers/staging/vt6655/wpa.h
index 80d990b09d2..921fd7ae9d3 100644
--- a/drivers/staging/vt6655/wpa.h
+++ b/drivers/staging/vt6655/wpa.h
@@ -69,14 +69,14 @@ WPA_ParseRSN(
PWLAN_IE_RSN_EXT pRSN
);
-BOOL
+bool
WPA_SearchRSN(
- BYTE byCmd,
- BYTE byEncrypt,
+ unsigned char byCmd,
+ unsigned char byEncrypt,
PKnownBSS pBSSList
);
-BOOL
+bool
WPAb_Is_RSN(
PWLAN_IE_RSN_EXT pRSN
);
diff --git a/drivers/staging/vt6655/wpa2.c b/drivers/staging/vt6655/wpa2.c
index 7a42a0aad7d..805164bed7e 100644
--- a/drivers/staging/vt6655/wpa2.c
+++ b/drivers/staging/vt6655/wpa2.c
@@ -42,14 +42,14 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Variables --------------------------*/
-const BYTE abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
-const BYTE abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-const BYTE abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
-const BYTE abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
-const BYTE abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
+const unsigned char abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
+const unsigned char abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
+const unsigned char abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
+const unsigned char abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
+const unsigned char abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
-const BYTE abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-const BYTE abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
+const unsigned char abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
+const unsigned char abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
/*--------------------- Static Functions --------------------------*/
@@ -79,7 +79,7 @@ WPA2_ClearRSN (
{
int ii;
- pBSSNode->bWPA2Valid = FALSE;
+ pBSSNode->bWPA2Valid = false;
pBSSNode->byCSSGK = WLAN_11i_CSS_CCMP;
for (ii=0; ii < 4; ii ++)
@@ -88,7 +88,7 @@ WPA2_ClearRSN (
for (ii=0; ii < 4; ii ++)
pBSSNode->abyAKMSSAuthType[ii] = WLAN_11i_AKMSS_802_1X;
pBSSNode->wAKMSSAuthCount = 1;
- pBSSNode->sRSNCapObj.bRSNCapExist = FALSE;
+ pBSSNode->sRSNCapObj.bRSNCapExist = false;
pBSSNode->sRSNCapObj.wRSNCap = 0;
}
@@ -114,9 +114,9 @@ WPA2vParseRSN (
)
{
int i, j;
- WORD m = 0, n = 0;
- PBYTE pbyOUI;
- BOOL bUseGK = FALSE;
+ unsigned short m = 0, n = 0;
+ unsigned char *pbyOUI;
+ bool bUseGK = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"WPA2_ParseRSN: [%d]\n", pRSN->len);
@@ -124,7 +124,7 @@ WPA2vParseRSN (
if (pRSN->len == 2) { // ver(2)
if ((pRSN->byElementID == WLAN_EID_RSN) && (pRSN->wVersion == 1)) {
- pBSSNode->bWPA2Valid = TRUE;
+ pBSSNode->bWPA2Valid = true;
}
return;
}
@@ -159,21 +159,21 @@ WPA2vParseRSN (
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"802.11i CSS: %X\n", pBSSNode->byCSSGK);
if (pRSN->len == 6) {
- pBSSNode->bWPA2Valid = TRUE;
+ pBSSNode->bWPA2Valid = true;
return;
}
if (pRSN->len >= 8) { // ver(2) + GK(4) + PK count(2)
- pBSSNode->wCSSPKCount = *((PWORD) &(pRSN->abyRSN[4]));
+ pBSSNode->wCSSPKCount = *((unsigned short *) &(pRSN->abyRSN[4]));
j = 0;
pbyOUI = &(pRSN->abyRSN[6]);
- for (i = 0; (i < pBSSNode->wCSSPKCount) && (j < sizeof(pBSSNode->abyCSSPK)/sizeof(BYTE)); i++) {
+ for (i = 0; (i < pBSSNode->wCSSPKCount) && (j < sizeof(pBSSNode->abyCSSPK)/sizeof(unsigned char)); i++) {
if (pRSN->len >= 8+i*4+4) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*i)
if ( !memcmp(pbyOUI, abyOUIGK, 4)) {
pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_USE_GROUP;
- bUseGK = TRUE;
+ bUseGK = true;
} else if ( !memcmp(pbyOUI, abyOUIWEP40, 4)) {
// Invialid CSS, continue to parsing
} else if ( !memcmp(pbyOUI, abyOUITKIP, 4)) {
@@ -195,7 +195,7 @@ WPA2vParseRSN (
break;
} //for
- if (bUseGK == TRUE) {
+ if (bUseGK == true) {
if (j != 1) {
// invalid CSS, This should be only PK CSS.
return;
@@ -209,17 +209,17 @@ WPA2vParseRSN (
// invalid CSS, No valid PK.
return;
}
- pBSSNode->wCSSPKCount = (WORD)j;
+ pBSSNode->wCSSPKCount = (unsigned short)j;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wCSSPKCount: %d\n", pBSSNode->wCSSPKCount);
}
- m = *((PWORD) &(pRSN->abyRSN[4]));
+ m = *((unsigned short *) &(pRSN->abyRSN[4]));
if (pRSN->len >= 10+m*4) { // ver(2) + GK(4) + PK count(2) + PKS(4*m) + AKMSS count(2)
- pBSSNode->wAKMSSAuthCount = *((PWORD) &(pRSN->abyRSN[6+4*m]));;
+ pBSSNode->wAKMSSAuthCount = *((unsigned short *) &(pRSN->abyRSN[6+4*m]));;
j = 0;
pbyOUI = &(pRSN->abyRSN[8+4*m]);
- for (i = 0; (i < pBSSNode->wAKMSSAuthCount) && (j < sizeof(pBSSNode->abyAKMSSAuthType)/sizeof(BYTE)); i++) {
+ for (i = 0; (i < pBSSNode->wAKMSSAuthCount) && (j < sizeof(pBSSNode->abyAKMSSAuthType)/sizeof(unsigned char)); i++) {
if (pRSN->len >= 10+(m+i)*4+4) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSS(2)+AKS(4*i)
if ( !memcmp(pbyOUI, abyOUI8021X, 4))
pBSSNode->abyAKMSSAuthType[j++] = WLAN_11i_AKMSS_802_1X;
@@ -232,17 +232,17 @@ WPA2vParseRSN (
} else
break;
}
- pBSSNode->wAKMSSAuthCount = (WORD)j;
+ pBSSNode->wAKMSSAuthCount = (unsigned short)j;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wAKMSSAuthCount: %d\n", pBSSNode->wAKMSSAuthCount);
- n = *((PWORD) &(pRSN->abyRSN[6+4*m]));;
+ n = *((unsigned short *) &(pRSN->abyRSN[6+4*m]));;
if (pRSN->len >= 12+4*m+4*n) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSSCnt(2)+AKMSS(4*n)+Cap(2)
- pBSSNode->sRSNCapObj.bRSNCapExist = TRUE;
- pBSSNode->sRSNCapObj.wRSNCap = *((PWORD) &(pRSN->abyRSN[8+4*m+4*n]));
+ pBSSNode->sRSNCapObj.bRSNCapExist = true;
+ pBSSNode->sRSNCapObj.wRSNCap = *((unsigned short *) &(pRSN->abyRSN[8+4*m+4*n]));
}
}
//ignore PMKID lists bcs only (Re)Assocrequest has this field
- pBSSNode->bWPA2Valid = TRUE;
+ pBSSNode->bWPA2Valid = true;
}
}
@@ -261,16 +261,16 @@ WPA2vParseRSN (
* Return Value: length of IEs.
*
-*/
-UINT
+unsigned int
WPA2uSetIEs(
void *pMgmtHandle,
PWLAN_IE_RSN pRSNIEs
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
- PBYTE pbyBuffer = NULL;
- UINT ii = 0;
- PWORD pwPMKID = NULL;
+ unsigned char *pbyBuffer = NULL;
+ unsigned int ii = 0;
+ unsigned short *pwPMKID = NULL;
if (pRSNIEs == NULL) {
return(0);
@@ -279,7 +279,7 @@ WPA2uSetIEs(
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
(pMgmt->pCurrBSS != NULL)) {
/* WPA2 IE */
- pbyBuffer = (PBYTE) pRSNIEs;
+ pbyBuffer = (unsigned char *) pRSNIEs;
pRSNIEs->byElementID = WLAN_EID_RSN;
pRSNIEs->len = 6; //Version(2)+GK(4)
pRSNIEs->wVersion = 1;
@@ -330,7 +330,7 @@ WPA2uSetIEs(
pRSNIEs->len +=6;
// RSN Capabilites
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == TRUE) {
+ if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == true) {
memcpy(&pRSNIEs->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
} else {
pRSNIEs->abyRSN[16] = 0;
@@ -339,10 +339,10 @@ WPA2uSetIEs(
pRSNIEs->len +=2;
if ((pMgmt->gsPMKIDCache.BSSIDInfoCount > 0) &&
- (pMgmt->bRoaming == TRUE) &&
+ (pMgmt->bRoaming == true) &&
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
// RSN PMKID
- pwPMKID = (PWORD)(&pRSNIEs->abyRSN[18]); // Point to PMKID count
+ pwPMKID = (unsigned short *)(&pRSNIEs->abyRSN[18]); // Point to PMKID count
*pwPMKID = 0; // Initialize PMKID count
pbyBuffer = &pRSNIEs->abyRSN[20]; // Point to PMKID list
for (ii = 0; ii < pMgmt->gsPMKIDCache.BSSIDInfoCount; ii++) {
diff --git a/drivers/staging/vt6655/wpa2.h b/drivers/staging/vt6655/wpa2.h
index 7200db37f43..718208beb72 100644
--- a/drivers/staging/vt6655/wpa2.h
+++ b/drivers/staging/vt6655/wpa2.h
@@ -40,12 +40,12 @@
#define MAX_PMKID_CACHE 16
typedef struct tagsPMKIDInfo {
- BYTE abyBSSID[6];
- BYTE abyPMKID[16];
+ unsigned char abyBSSID[6];
+ unsigned char abyPMKID[16];
} PMKIDInfo, *PPMKIDInfo;
typedef struct tagSPMKIDCache {
- ULONG BSSIDInfoCount;
+ unsigned long BSSIDInfoCount;
PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
} SPMKIDCache, *PSPMKIDCache;
@@ -69,7 +69,7 @@ WPA2vParseRSN (
PWLAN_IE_RSN pRSN
);
-UINT
+unsigned int
WPA2uSetIEs(
void *pMgmtHandle,
PWLAN_IE_RSN pRSNIEs
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 22c2fab3f32..4bdb8362de8 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -199,16 +199,16 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
*
*/
- int wpa_set_keys(PSDevice pDevice, void *ctx, BOOL fcpfkernel)
+ int wpa_set_keys(PSDevice pDevice, void *ctx, bool fcpfkernel)
{
struct viawget_wpa_param *param=ctx;
PSMgmtObject pMgmt = pDevice->pMgmt;
- DWORD dwKeyIndex = 0;
- BYTE abyKey[MAX_KEY_LEN];
- BYTE abySeq[MAX_KEY_LEN];
+ unsigned long dwKeyIndex = 0;
+ unsigned char abyKey[MAX_KEY_LEN];
+ unsigned char abySeq[MAX_KEY_LEN];
QWORD KeyRSC;
// NDIS_802_11_KEY_RSC KeyRSC;
- BYTE byKeyDecMode = KEY_CTL_WEP;
+ unsigned char byKeyDecMode = KEY_CTL_WEP;
int ret = 0;
int uu, ii;
@@ -219,9 +219,9 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n", param->u.wpa_key.alg_name);
if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- pDevice->bEncryptionEnable = FALSE;
+ pDevice->bEncryptionEnable = false;
pDevice->byKeyIndex = 0;
- pDevice->bTransmitKey = FALSE;
+ pDevice->bTransmitKey = false;
KeyvRemoveAllWEPKey(&(pDevice->sKey), pDevice->PortOffset);
for (uu=0; uu<MAX_KEY_TABLE; uu++) {
MACvDisableKeyEntry(pDevice->PortOffset, uu);
@@ -243,7 +243,7 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
spin_lock_irq(&pDevice->lock);
}
- dwKeyIndex = (DWORD)(param->u.wpa_key.key_index);
+ dwKeyIndex = (unsigned long)(param->u.wpa_key.key_index);
if (param->u.wpa_key.alg_name == WPA_ALG_WEP) {
if (dwKeyIndex > 3) {
@@ -251,8 +251,8 @@ spin_lock_irq(&pDevice->lock);
}
else {
if (param->u.wpa_key.set_tx) {
- pDevice->byKeyIndex = (BYTE)dwKeyIndex;
- pDevice->bTransmitKey = TRUE;
+ pDevice->byKeyIndex = (unsigned char)dwKeyIndex;
+ pDevice->bTransmitKey = true;
dwKeyIndex |= (1 << 31);
}
KeybSetDefaultKey(&(pDevice->sKey),
@@ -266,7 +266,7 @@ spin_lock_irq(&pDevice->lock);
}
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pDevice->bEncryptionEnable = TRUE;
+ pDevice->bEncryptionEnable = true;
return ret;
}
@@ -351,26 +351,26 @@ spin_lock_irq(&pDevice->lock);
}
// spin_lock_irq(&pDevice->lock);
- if (IS_BROADCAST_ADDRESS(&param->addr[0]) || (param->addr == NULL)) {
- // If IS_BROADCAST_ADDRESS, set the key as every key entry's group key.
+ if (is_broadcast_ether_addr(&param->addr[0]) || (param->addr == NULL)) {
+ // If is_broadcast_ether_addr, set the key as every key entry's group key.
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
if ((KeybSetAllGroupKey(&(pDevice->sKey),
dwKeyIndex,
param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
+ (unsigned char *)abyKey,
byKeyDecMode,
pDevice->PortOffset,
- pDevice->byLocalID) == TRUE) &&
+ pDevice->byLocalID) == true) &&
(KeybSetDefaultKey(&(pDevice->sKey),
dwKeyIndex,
param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
+ (unsigned char *)abyKey,
byKeyDecMode,
pDevice->PortOffset,
- pDevice->byLocalID) == TRUE) ) {
+ pDevice->byLocalID) == true) ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
} else {
@@ -400,15 +400,15 @@ spin_lock_irq(&pDevice->lock);
dwKeyIndex,
param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
+ (unsigned char *)abyKey,
byKeyDecMode,
pDevice->PortOffset,
- pDevice->byLocalID) == TRUE) {
+ pDevice->byLocalID) == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
} else {
// Key Table Full
- if (IS_ETH_ADDRESS_EQUAL(&param->addr[0], pDevice->abyBSSID)) {
+ if (!compare_ether_addr(&param->addr[0], pDevice->abyBSSID)) {
//DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
//spin_unlock_irq(&pDevice->lock);
return -EINVAL;
@@ -422,10 +422,10 @@ spin_lock_irq(&pDevice->lock);
}
} // BSSID not 0xffffffffffff
if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
- pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index;
- pDevice->bTransmitKey = TRUE;
+ pDevice->byKeyIndex = (unsigned char)param->u.wpa_key.key_index;
+ pDevice->bTransmitKey = true;
}
- pDevice->bEncryptionEnable = TRUE;
+ pDevice->bEncryptionEnable = true;
//spin_unlock_irq(&pDevice->lock);
/*
@@ -465,7 +465,7 @@ static int wpa_set_wpa(PSDevice pDevice,
int ret = 0;
pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- pMgmt->bShareKeyAlgorithm = FALSE;
+ pMgmt->bShareKeyAlgorithm = false;
return ret;
}
@@ -613,13 +613,13 @@ static int wpa_get_scan(PSDevice pDevice,
PSMgmtObject pMgmt = pDevice->pMgmt;
PWLAN_IE_SSID pItemSSID;
PKnownBSS pBSS;
- PBYTE pBuf;
+ unsigned char *pBuf;
int ret = 0;
u16 count = 0;
u16 ii, jj;
#if 1
- PBYTE ptempBSS;
+ unsigned char *ptempBSS;
@@ -639,9 +639,9 @@ static int wpa_get_scan(PSDevice pDevice,
for(jj=0;jj<MAX_BSS_NUM-ii-1;jj++) {
- if((pMgmt->sBSSList[jj].bActive!=TRUE) ||
+ if((pMgmt->sBSSList[jj].bActive!=true) ||
- ((pMgmt->sBSSList[jj].uRSSI>pMgmt->sBSSList[jj+1].uRSSI) &&(pMgmt->sBSSList[jj+1].bActive!=FALSE))) {
+ ((pMgmt->sBSSList[jj].uRSSI>pMgmt->sBSSList[jj+1].uRSSI) &&(pMgmt->sBSSList[jj+1].bActive!=false))) {
memcpy(ptempBSS,&pMgmt->sBSSList[jj],sizeof(KnownBSS));
@@ -713,7 +713,7 @@ static int wpa_get_scan(PSDevice pDevice,
scan_buf->rsn_ie_len = pBSS->wRSNLen;
memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
}
- scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result));
+ scan_buf = (struct viawget_scan_result *)((unsigned char *)scan_buf + sizeof(struct viawget_scan_result));
jj ++;
}
}
@@ -752,10 +752,10 @@ static int wpa_set_associate(PSDevice pDevice,
{
PSMgmtObject pMgmt = pDevice->pMgmt;
PWLAN_IE_SSID pItemSSID;
- BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- BYTE abyWPAIE[64];
+ unsigned char abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ unsigned char abyWPAIE[64];
int ret = 0;
- BOOL bWepEnabled=FALSE;
+ bool bWepEnabled=false;
// set key type & algorithm
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise_suite = %d\n", param->u.wpa_associate.pairwise_suite);
@@ -766,9 +766,14 @@ static int wpa_set_associate(PSDevice pDevice,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
- if (param->u.wpa_associate.wpa_ie &&
- copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
- return -EINVAL;
+ if (param->u.wpa_associate.wpa_ie_len) {
+ if (!param->u.wpa_associate.wpa_ie)
+ return -EINVAL;
+ if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
+ return -EINVAL;
+ if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
+ return -EFAULT;
+ }
if (param->u.wpa_associate.mode == 1)
pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
@@ -817,7 +822,7 @@ else
case CIPHER_WEP40:
case CIPHER_WEP104:
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- bWepEnabled=TRUE;
+ bWepEnabled=true;
break;
case CIPHER_NONE:
if (param->u.wpa_associate.group_suite == CIPHER_CCMP)
@@ -834,26 +839,26 @@ else
if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) {
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
//pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY;
- pMgmt->bShareKeyAlgorithm = TRUE;
+ pMgmt->bShareKeyAlgorithm = true;
}
else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) {
if(!bWepEnabled) pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
else pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
//pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- //pMgmt->bShareKeyAlgorithm = FALSE; //20080717-06,<Modify> by chester//Fix Open mode, WEP encrytion
+ //pMgmt->bShareKeyAlgorithm = false; //20080717-06,<Modify> by chester//Fix Open mode, WEP encrytion
}
//mike save old encryption status
pDevice->eOldEncryptionStatus = pDevice->eEncryptionStatus;
if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled)
- pDevice->bEncryptionEnable = TRUE;
+ pDevice->bEncryptionEnable = true;
else
- pDevice->bEncryptionEnable = FALSE;
+ pDevice->bEncryptionEnable = false;
if (!((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
- ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bWepEnabled==TRUE))) ) //DavidWang //20080717-06,<Modify> by chester//Not to initial WEP
+ ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bWepEnabled==true))) ) //DavidWang //20080717-06,<Modify> by chester//Not to initial WEP
KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
spin_lock_irq(&pDevice->lock);
- pDevice->bLinkPass = FALSE;
+ pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
netif_stop_queue(pDevice->dev);
@@ -922,7 +927,7 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
case VIAWGET_SET_KEY:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n");
spin_lock_irq(&pDevice->lock);
- ret = wpa_set_keys(pDevice, param, FALSE);
+ ret = wpa_set_keys(pDevice, param, false);
spin_unlock_irq(&pDevice->lock);
break;
diff --git a/drivers/staging/vt6655/wpactl.h b/drivers/staging/vt6655/wpactl.h
index b0d92d51a2a..dbe8e861d99 100644
--- a/drivers/staging/vt6655/wpactl.h
+++ b/drivers/staging/vt6655/wpactl.h
@@ -54,7 +54,7 @@ typedef enum { KEY_MGMT_802_1X, KEY_MGMT_CCKM,KEY_MGMT_PSK, KEY_MGMT_NONE,
-typedef ULONGLONG NDIS_802_11_KEY_RSC;
+typedef unsigned long long NDIS_802_11_KEY_RSC;
/*--------------------- Export Classes ----------------------------*/
@@ -64,7 +64,7 @@ typedef ULONGLONG NDIS_802_11_KEY_RSC;
int wpa_set_wpadev(PSDevice pDevice, int val);
int wpa_ioctl(PSDevice pDevice, struct iw_point *p);
-int wpa_set_keys(PSDevice pDevice, void *ctx, BOOL fcpfkernel);
+int wpa_set_keys(PSDevice pDevice, void *ctx, bool fcpfkernel);
#endif // __WPACL_H__
diff --git a/drivers/staging/vt6655/wroute.c b/drivers/staging/vt6655/wroute.c
index bf92fb9908f..66e2eeae628 100644
--- a/drivers/staging/vt6655/wroute.c
+++ b/drivers/staging/vt6655/wroute.c
@@ -53,45 +53,45 @@ static int msglevel =MSG_LEVEL_INFO;
/*
* Description:
- * Relay packet. Return TRUE if packet is copy to DMA1
+ * Relay packet. Return true if packet is copy to DMA1
*
* Parameters:
* In:
* pDevice -
* pbySkbData - rx packet skb data
* Out:
- * TURE, FALSE
+ * true, false
*
- * Return Value: TRUE if packet duplicate; otherwise FALSE
+ * Return Value: true if packet duplicate; otherwise false
*
*/
-BOOL ROUTEbRelay (PSDevice pDevice, PBYTE pbySkbData, UINT uDataLen, UINT uNodeIndex)
+bool ROUTEbRelay (PSDevice pDevice, unsigned char *pbySkbData, unsigned int uDataLen, unsigned int uNodeIndex)
{
PSMgmtObject pMgmt = pDevice->pMgmt;
PSTxDesc pHeadTD, pLastTD;
- UINT cbFrameBodySize;
- UINT uMACfragNum;
- BYTE byPktType;
- BOOL bNeedEncryption = FALSE;
+ unsigned int cbFrameBodySize;
+ unsigned int uMACfragNum;
+ unsigned char byPktType;
+ bool bNeedEncryption = false;
SKeyItem STempKey;
PSKeyItem pTransmitKey = NULL;
- UINT cbHeaderSize;
- UINT ii;
- PBYTE pbyBSSID;
+ unsigned int cbHeaderSize;
+ unsigned int ii;
+ unsigned char *pbyBSSID;
if (AVAIL_TD(pDevice, TYPE_AC0DMA)<=0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Relay can't allocate TD1..\n");
- return FALSE;
+ return false;
}
pHeadTD = pDevice->apCurrTD[TYPE_AC0DMA];
pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)pbySkbData, ETH_HLEN);
+ memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)pbySkbData, ETH_HLEN);
cbFrameBodySize = uDataLen - ETH_HLEN;
@@ -99,12 +99,12 @@ BOOL ROUTEbRelay (PSDevice pDevice, PBYTE pbySkbData, UINT uDataLen, UINT uNodeI
cbFrameBodySize += 8;
}
- if (pDevice->bEncryptionEnable == TRUE) {
- bNeedEncryption = TRUE;
+ if (pDevice->bEncryptionEnable == true) {
+ bNeedEncryption = true;
// get group key
pbyBSSID = pDevice->abyBroadcastAddr;
- if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == FALSE) {
+ if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"KEY is NULL. [%d]\n", pDevice->pMgmt->eCurrMode);
} else {
@@ -130,16 +130,16 @@ BOOL ROUTEbRelay (PSDevice pDevice, PBYTE pbySkbData, UINT uDataLen, UINT uNodeI
uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader);
if (uMACfragNum > AVAIL_TD(pDevice,TYPE_AC0DMA)) {
- return FALSE;
+ return false;
}
- byPktType = (BYTE)pDevice->byPacketType;
+ byPktType = (unsigned char)pDevice->byPacketType;
if (pDevice->bFixRate) {
if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
if (pDevice->uConnectionRate >= RATE_11M) {
pDevice->wCurrentRate = RATE_11M;
} else {
- pDevice->wCurrentRate = (WORD)pDevice->uConnectionRate;
+ pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
} else {
if ((pDevice->eCurrentPHYType == PHY_TYPE_11A) &&
@@ -149,7 +149,7 @@ BOOL ROUTEbRelay (PSDevice pDevice, PBYTE pbySkbData, UINT uDataLen, UINT uNodeI
if (pDevice->uConnectionRate >= RATE_54M)
pDevice->wCurrentRate = RATE_54M;
else
- pDevice->wCurrentRate = (WORD)pDevice->uConnectionRate;
+ pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
}
}
@@ -172,7 +172,7 @@ BOOL ROUTEbRelay (PSDevice pDevice, PBYTE pbySkbData, UINT uDataLen, UINT uNodeI
MACbPSWakeup(pDevice->PortOffset);
}
- pDevice->bPWBitOn = FALSE;
+ pDevice->bPWBitOn = false;
pLastTD = pHeadTD;
for (ii = 0; ii < uMACfragNum; ii++) {
@@ -192,7 +192,7 @@ BOOL ROUTEbRelay (PSDevice pDevice, PBYTE pbySkbData, UINT uDataLen, UINT uNodeI
MACvTransmitAC0(pDevice->PortOffset);
- return TRUE;
+ return true;
}
diff --git a/drivers/staging/vt6655/wroute.h b/drivers/staging/vt6655/wroute.h
index 295cdc5b8e9..34f9e43a6cc 100644
--- a/drivers/staging/vt6655/wroute.h
+++ b/drivers/staging/vt6655/wroute.h
@@ -39,7 +39,7 @@
/*--------------------- Export Functions --------------------------*/
-BOOL ROUTEbRelay (PSDevice pDevice, PBYTE pbySkbData, UINT uDataLen, UINT uNodeIndex);
+bool ROUTEbRelay (PSDevice pDevice, unsigned char *pbySkbData, unsigned int uDataLen, unsigned int uNodeIndex);
#endif // __WROUTE_H__
diff --git a/drivers/staging/vt6656/80211mgr.c b/drivers/staging/vt6656/80211mgr.c
index f24dc55e68f..fceec4999c3 100644
--- a/drivers/staging/vt6656/80211mgr.c
+++ b/drivers/staging/vt6656/80211mgr.c
@@ -18,7 +18,7 @@
*
* File: 80211mgr.c
*
- * Purpose: Handles the 802.11 managment support functions
+ * Purpose: Handles the 802.11 management support functions
*
* Author: Lyndon Chen
*
@@ -67,8 +67,8 @@
/*--------------------- Static Variables --------------------------*/
-static int msglevel =MSG_LEVEL_INFO;
-//static int msglevel =MSG_LEVEL_DEBUG;
+static int msglevel = MSG_LEVEL_INFO;
+/*static int msglevel =MSG_LEVEL_DEBUG;*/
/*--------------------- Static Functions --------------------------*/
@@ -96,7 +96,7 @@ vMgrEncodeBeacon(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_TS);
pFrame->pwBeaconInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -130,7 +130,7 @@ vMgrDecodeBeacon(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_TS);
pFrame->pwBeaconInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -138,88 +138,87 @@ vMgrDecodeBeacon(
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_CAPINFO);
- // Information elements
+ /* Information elements */
pItem = (PWLAN_IE)((PBYTE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)))
+ WLAN_BEACON_OFF_SSID);
- while( ((PBYTE)pItem) < (pFrame->pBuf + pFrame->len) ){
+ while (((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) {
switch (pItem->byElementID) {
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- break;
- case WLAN_EID_FH_PARMS:
- //pFrame->pFHParms = (PWLAN_IE_FH_PARMS)pItem;
- break;
- case WLAN_EID_DS_PARMS:
- if (pFrame->pDSParms == NULL)
- pFrame->pDSParms = (PWLAN_IE_DS_PARMS)pItem;
- break;
- case WLAN_EID_CF_PARMS:
- if (pFrame->pCFParms == NULL)
- pFrame->pCFParms = (PWLAN_IE_CF_PARMS)pItem;
- break;
- case WLAN_EID_IBSS_PARMS:
- if (pFrame->pIBSSParms == NULL)
- pFrame->pIBSSParms = (PWLAN_IE_IBSS_PARMS)pItem;
- break;
- case WLAN_EID_TIM:
- if (pFrame->pTIM == NULL)
- pFrame->pTIM = (PWLAN_IE_TIM)pItem;
- break;
-
- case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL) {
- pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- }
- break;
- case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
- pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
- }
- break;
-
- case WLAN_EID_ERP:
- if (pFrame->pERP == NULL)
- pFrame->pERP = (PWLAN_IE_ERP)pItem;
- break;
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_COUNTRY: //7
- if (pFrame->pIE_Country == NULL)
- pFrame->pIE_Country = (PWLAN_IE_COUNTRY)pItem;
- break;
-
- case WLAN_EID_PWR_CONSTRAINT: //32
- if (pFrame->pIE_PowerConstraint == NULL)
- pFrame->pIE_PowerConstraint = (PWLAN_IE_PW_CONST)pItem;
- break;
-
- case WLAN_EID_CH_SWITCH: //37
- if (pFrame->pIE_CHSW == NULL)
- pFrame->pIE_CHSW = (PWLAN_IE_CH_SW)pItem;
- break;
-
- case WLAN_EID_QUIET: //40
- if (pFrame->pIE_Quiet == NULL)
- pFrame->pIE_Quiet = (PWLAN_IE_QUIET)pItem;
- break;
-
- case WLAN_EID_IBSS_DFS:
- if (pFrame->pIE_IBSSDFS == NULL)
- pFrame->pIE_IBSSDFS = (PWLAN_IE_IBSS_DFS)pItem;
- break;
-
- default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unrecognized EID=%dd in beacon decode.\n", pItem->byElementID);
+ case WLAN_EID_SSID:
+ if (pFrame->pSSID == NULL)
+ pFrame->pSSID = (PWLAN_IE_SSID)pItem;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ if (pFrame->pSuppRates == NULL)
+ pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ break;
+ case WLAN_EID_FH_PARMS:
+ /* pFrame->pFHParms = (PWLAN_IE_FH_PARMS)pItem; */
+ break;
+ case WLAN_EID_DS_PARMS:
+ if (pFrame->pDSParms == NULL)
+ pFrame->pDSParms = (PWLAN_IE_DS_PARMS)pItem;
+ break;
+ case WLAN_EID_CF_PARMS:
+ if (pFrame->pCFParms == NULL)
+ pFrame->pCFParms = (PWLAN_IE_CF_PARMS)pItem;
+ break;
+ case WLAN_EID_IBSS_PARMS:
+ if (pFrame->pIBSSParms == NULL)
+ pFrame->pIBSSParms = (PWLAN_IE_IBSS_PARMS)pItem;
+ break;
+ case WLAN_EID_TIM:
+ if (pFrame->pTIM == NULL)
+ pFrame->pTIM = (PWLAN_IE_TIM)pItem;
+ break;
+
+ case WLAN_EID_RSN:
+ if (pFrame->pRSN == NULL)
+ pFrame->pRSN = (PWLAN_IE_RSN)pItem;
+ break;
+ case WLAN_EID_RSN_WPA:
+ if (pFrame->pRSNWPA == NULL) {
+ if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
+ pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
+ }
+ break;
+
+ case WLAN_EID_ERP:
+ if (pFrame->pERP == NULL)
+ pFrame->pERP = (PWLAN_IE_ERP)pItem;
+ break;
+ case WLAN_EID_EXTSUPP_RATES:
+ if (pFrame->pExtSuppRates == NULL)
+ pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ break;
+
+ case WLAN_EID_COUNTRY: /* 7 */
+ if (pFrame->pIE_Country == NULL)
+ pFrame->pIE_Country = (PWLAN_IE_COUNTRY)pItem;
+ break;
+
+ case WLAN_EID_PWR_CONSTRAINT: /* 32 */
+ if (pFrame->pIE_PowerConstraint == NULL)
+ pFrame->pIE_PowerConstraint = (PWLAN_IE_PW_CONST)pItem;
+ break;
+
+ case WLAN_EID_CH_SWITCH: /* 37 */
+ if (pFrame->pIE_CHSW == NULL)
+ pFrame->pIE_CHSW = (PWLAN_IE_CH_SW)pItem;
+ break;
+
+ case WLAN_EID_QUIET: /* 40 */
+ if (pFrame->pIE_Quiet == NULL)
+ pFrame->pIE_Quiet = (PWLAN_IE_QUIET)pItem;
+ break;
+
+ case WLAN_EID_IBSS_DFS:
+ if (pFrame->pIE_IBSSDFS == NULL)
+ pFrame->pIE_IBSSDFS = (PWLAN_IE_IBSS_DFS)pItem;
+ break;
+
+ default:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unrecognized EID=%dd in beacon decode.\n", pItem->byElementID);
break;
}
@@ -295,7 +294,7 @@ vMgrEncodeDisassociation(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DISASSOC_OFF_REASON);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DISASSOC_OFF_REASON + sizeof(*(pFrame->pwReason));
@@ -322,7 +321,7 @@ vMgrDecodeDisassociation(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DISASSOC_OFF_REASON);
@@ -347,7 +346,7 @@ vMgrEncodeAssocRequest(
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_CAP_INFO);
pFrame->pwListenInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -376,47 +375,46 @@ vMgrDecodeAssocRequest(
PWLAN_IE pItem;
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_CAP_INFO);
pFrame->pwListenInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_LISTEN_INT);
- // Information elements
+ /* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_SSID);
while (((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) {
- switch (pItem->byElementID){
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL) {
- pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- }
- break;
- case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
- pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
- }
- break;
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unrecognized EID=%dd in assocreq decode.\n",
- pItem->byElementID);
- break;
+ switch (pItem->byElementID) {
+ case WLAN_EID_SSID:
+ if (pFrame->pSSID == NULL)
+ pFrame->pSSID = (PWLAN_IE_SSID)pItem;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ if (pFrame->pSuppRates == NULL)
+ pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ break;
+
+ case WLAN_EID_RSN:
+ if (pFrame->pRSN == NULL)
+ pFrame->pRSN = (PWLAN_IE_RSN)pItem;
+ break;
+ case WLAN_EID_RSN_WPA:
+ if (pFrame->pRSNWPA == NULL) {
+ if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
+ pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
+ }
+ break;
+ case WLAN_EID_EXTSUPP_RATES:
+ if (pFrame->pExtSuppRates == NULL)
+ pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ break;
+
+ default:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unrecognized EID=%dd in assocreq decode.\n",
+ pItem->byElementID);
+ break;
}
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
@@ -441,7 +439,7 @@ vMgrEncodeAssocResponse(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_CAP_INFO);
pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -475,7 +473,7 @@ vMgrDecodeAssocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_CAP_INFO);
pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -483,7 +481,7 @@ vMgrDecodeAssocResponse(
pFrame->pwAid = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_AID);
- // Information elements
+ /* Information elements */
pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_SUPP_RATES);
@@ -493,8 +491,7 @@ vMgrDecodeAssocResponse(
if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pFrame->pExtSuppRates=[%p].\n", pItem);
- }
- else {
+ } else {
pFrame->pExtSuppRates = NULL;
}
return;
@@ -519,7 +516,7 @@ vMgrEncodeReassocRequest(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_CAP_INFO);
pFrame->pwListenInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -552,7 +549,7 @@ vMgrDecodeReassocRequest(
PWLAN_IE pItem;
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_CAP_INFO);
pFrame->pwListenInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -560,42 +557,41 @@ vMgrDecodeReassocRequest(
pFrame->pAddrCurrAP = (PIEEE_ADDR)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_CURR_AP);
- // Information elements
+ /* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_SSID);
- while(((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) {
-
- switch (pItem->byElementID){
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL) {
- pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- }
- break;
- case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
- pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
- }
- break;
+ while (((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) {
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- break;
- default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unrecognized EID=%dd in reassocreq decode.\n",
- pItem->byElementID);
- break;
+ switch (pItem->byElementID) {
+ case WLAN_EID_SSID:
+ if (pFrame->pSSID == NULL)
+ pFrame->pSSID = (PWLAN_IE_SSID)pItem;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ if (pFrame->pSuppRates == NULL)
+ pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ break;
+
+ case WLAN_EID_RSN:
+ if (pFrame->pRSN == NULL)
+ pFrame->pRSN = (PWLAN_IE_RSN)pItem;
+ break;
+ case WLAN_EID_RSN_WPA:
+ if (pFrame->pRSNWPA == NULL) {
+ if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
+ pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
+ }
+ break;
+
+ case WLAN_EID_EXTSUPP_RATES:
+ if (pFrame->pExtSuppRates == NULL)
+ pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ break;
+ default:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unrecognized EID=%dd in reassocreq decode.\n",
+ pItem->byElementID);
+ break;
}
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
@@ -646,30 +642,30 @@ vMgrDecodeProbeRequest(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Information elements
+ /* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)));
- while( ((PBYTE)pItem) < (pFrame->pBuf + pFrame->len) ) {
+ while (((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) {
switch (pItem->byElementID) {
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
+ case WLAN_EID_SSID:
+ if (pFrame->pSSID == NULL)
+ pFrame->pSSID = (PWLAN_IE_SSID)pItem;
+ break;
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- break;
+ case WLAN_EID_SUPP_RATES:
+ if (pFrame->pSuppRates == NULL)
+ pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ break;
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- break;
+ case WLAN_EID_EXTSUPP_RATES:
+ if (pFrame->pExtSuppRates == NULL)
+ pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ break;
- default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Bad EID=%dd in probereq\n", pItem->byElementID);
- break;
+ default:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Bad EID=%dd in probereq\n", pItem->byElementID);
+ break;
}
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
@@ -697,7 +693,7 @@ vMgrEncodeProbeResponse(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_TS);
pFrame->pwBeaconInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -734,7 +730,7 @@ vMgrDecodeProbeResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_TS);
pFrame->pwBeaconInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -742,83 +738,82 @@ vMgrDecodeProbeResponse(
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_CAP_INFO);
- // Information elements
+ /* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_SSID);
- while( ((PBYTE)pItem) < (pFrame->pBuf + pFrame->len) ) {
+ while (((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) {
switch (pItem->byElementID) {
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
+ case WLAN_EID_SSID:
+ if (pFrame->pSSID == NULL)
pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
+ break;
+ case WLAN_EID_SUPP_RATES:
+ if (pFrame->pSuppRates == NULL)
pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- break;
- case WLAN_EID_FH_PARMS:
- break;
- case WLAN_EID_DS_PARMS:
- if (pFrame->pDSParms == NULL)
- pFrame->pDSParms = (PWLAN_IE_DS_PARMS)pItem;
- break;
- case WLAN_EID_CF_PARMS:
- if (pFrame->pCFParms == NULL)
- pFrame->pCFParms = (PWLAN_IE_CF_PARMS)pItem;
- break;
- case WLAN_EID_IBSS_PARMS:
- if (pFrame->pIBSSParms == NULL)
- pFrame->pIBSSParms = (PWLAN_IE_IBSS_PARMS)pItem;
- break;
-
- case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL) {
- pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- }
- break;
- case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
- pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
- }
- break;
- case WLAN_EID_ERP:
- if (pFrame->pERP == NULL)
- pFrame->pERP = (PWLAN_IE_ERP)pItem;
- break;
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_COUNTRY: //7
- if (pFrame->pIE_Country == NULL)
- pFrame->pIE_Country = (PWLAN_IE_COUNTRY)pItem;
- break;
-
- case WLAN_EID_PWR_CONSTRAINT: //32
- if (pFrame->pIE_PowerConstraint == NULL)
- pFrame->pIE_PowerConstraint = (PWLAN_IE_PW_CONST)pItem;
- break;
-
- case WLAN_EID_CH_SWITCH: //37
- if (pFrame->pIE_CHSW == NULL)
- pFrame->pIE_CHSW = (PWLAN_IE_CH_SW)pItem;
- break;
-
- case WLAN_EID_QUIET: //40
- if (pFrame->pIE_Quiet == NULL)
- pFrame->pIE_Quiet = (PWLAN_IE_QUIET)pItem;
- break;
-
- case WLAN_EID_IBSS_DFS:
- if (pFrame->pIE_IBSSDFS == NULL)
- pFrame->pIE_IBSSDFS = (PWLAN_IE_IBSS_DFS)pItem;
- break;
-
- default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Bad EID=%dd in proberesp\n", pItem->byElementID);
- break;
+ break;
+ case WLAN_EID_FH_PARMS:
+ break;
+ case WLAN_EID_DS_PARMS:
+ if (pFrame->pDSParms == NULL)
+ pFrame->pDSParms = (PWLAN_IE_DS_PARMS)pItem;
+ break;
+ case WLAN_EID_CF_PARMS:
+ if (pFrame->pCFParms == NULL)
+ pFrame->pCFParms = (PWLAN_IE_CF_PARMS)pItem;
+ break;
+ case WLAN_EID_IBSS_PARMS:
+ if (pFrame->pIBSSParms == NULL)
+ pFrame->pIBSSParms = (PWLAN_IE_IBSS_PARMS)pItem;
+ break;
+
+ case WLAN_EID_RSN:
+ if (pFrame->pRSN == NULL)
+ pFrame->pRSN = (PWLAN_IE_RSN)pItem;
+ break;
+ case WLAN_EID_RSN_WPA:
+ if (pFrame->pRSNWPA == NULL) {
+ if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
+ pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
+ }
+ break;
+ case WLAN_EID_ERP:
+ if (pFrame->pERP == NULL)
+ pFrame->pERP = (PWLAN_IE_ERP)pItem;
+ break;
+ case WLAN_EID_EXTSUPP_RATES:
+ if (pFrame->pExtSuppRates == NULL)
+ pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ break;
+
+ case WLAN_EID_COUNTRY: /* 7 */
+ if (pFrame->pIE_Country == NULL)
+ pFrame->pIE_Country = (PWLAN_IE_COUNTRY)pItem;
+ break;
+
+ case WLAN_EID_PWR_CONSTRAINT: /* 32 */
+ if (pFrame->pIE_PowerConstraint == NULL)
+ pFrame->pIE_PowerConstraint = (PWLAN_IE_PW_CONST)pItem;
+ break;
+
+ case WLAN_EID_CH_SWITCH: /* 37 */
+ if (pFrame->pIE_CHSW == NULL)
+ pFrame->pIE_CHSW = (PWLAN_IE_CH_SW)pItem;
+ break;
+
+ case WLAN_EID_QUIET: /* 40 */
+ if (pFrame->pIE_Quiet == NULL)
+ pFrame->pIE_Quiet = (PWLAN_IE_QUIET)pItem;
+ break;
+
+ case WLAN_EID_IBSS_DFS:
+ if (pFrame->pIE_IBSSDFS == NULL)
+ pFrame->pIE_IBSSDFS = (PWLAN_IE_IBSS_DFS)pItem;
+ break;
+
+ default:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Bad EID=%dd in proberesp\n", pItem->byElementID);
+ break;
}
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
@@ -845,7 +840,7 @@ vMgrEncodeAuthen(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwAuthAlgorithm = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_AUTH_ALG);
pFrame->pwAuthSequence = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -853,7 +848,6 @@ vMgrEncodeAuthen(
pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_STATUS);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_AUTHEN_OFF_STATUS + sizeof(*(pFrame->pwStatus));
-
return;
}
@@ -878,7 +872,7 @@ vMgrDecodeAuthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwAuthAlgorithm = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_AUTH_ALG);
pFrame->pwAuthSequence = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -886,14 +880,12 @@ vMgrDecodeAuthen(
pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_STATUS);
- // Information elements
+ /* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_CHALLENGE);
- if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_CHALLENGE)) {
+ if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_CHALLENGE))
pFrame->pChallenge = (PWLAN_IE_CHALLENGE)pItem;
- }
-
return;
}
@@ -916,11 +908,10 @@ vMgrEncodeDeauthen(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DEAUTHEN_OFF_REASON);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DEAUTHEN_OFF_REASON + sizeof(*(pFrame->pwReason));
-
return;
}
@@ -943,10 +934,9 @@ vMgrDecodeDeauthen(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DEAUTHEN_OFF_REASON);
-
return;
}
@@ -969,7 +959,7 @@ vMgrEncodeReassocResponse(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_CAP_INFO);
pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -978,7 +968,6 @@ vMgrEncodeReassocResponse(
+ WLAN_REASSOCRESP_OFF_AID);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCRESP_OFF_AID + sizeof(*(pFrame->pwAid));
-
return;
}
@@ -1004,7 +993,7 @@ vMgrDecodeReassocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_CAP_INFO);
pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -1012,15 +1001,14 @@ vMgrDecodeReassocResponse(
pFrame->pwAid = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_AID);
- //Information elements
+ /* Information elements */
pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_SUPP_RATES);
pItem = (PWLAN_IE)(pFrame->pSuppRates);
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
- if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
+ if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_EXTSUPP_RATES))
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- }
return;
}
diff --git a/drivers/staging/vt6656/80211mgr.h b/drivers/staging/vt6656/80211mgr.h
index c140a957d9d..3d57f793986 100644
--- a/drivers/staging/vt6656/80211mgr.h
+++ b/drivers/staging/vt6656/80211mgr.h
@@ -19,7 +19,7 @@
*
* File: 80211mgr.h
*
- * Purpose: 802.11 managment frames pre-defines.
+ * Purpose: 802.11 management frames pre-defines.
*
*
* Author: Lyndon Chen
@@ -222,46 +222,39 @@
#define MEASURE_MODE_INCAPABLE 0x02
#define MEASURE_MODE_REFUSED 0x04
-
-
/*--------------------- Export Classes ----------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Types ------------------------------*/
-
// Information Element Types
#pragma pack(1)
typedef struct tagWLAN_IE {
BYTE byElementID;
BYTE len;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_IE, *PWLAN_IE;
-
// Service Set Identity (SSID)
#pragma pack(1)
typedef struct tagWLAN_IE_SSID {
BYTE byElementID;
BYTE len;
BYTE abySSID[1];
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_IE_SSID, *PWLAN_IE_SSID;
-
// Supported Rates
#pragma pack(1)
typedef struct tagWLAN_IE_SUPP_RATES {
BYTE byElementID;
BYTE len;
BYTE abyRates[1];
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_IE_SUPP_RATES, *PWLAN_IE_SUPP_RATES;
-
-
// FH Parameter Set
#pragma pack(1)
typedef struct _WLAN_IE_FH_PARMS {
@@ -279,10 +272,9 @@ typedef struct tagWLAN_IE_DS_PARMS {
BYTE byElementID;
BYTE len;
BYTE byCurrChannel;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_IE_DS_PARMS, *PWLAN_IE_DS_PARMS;
-
// CF Parameter Set
#pragma pack(1)
typedef struct tagWLAN_IE_CF_PARMS {
@@ -292,10 +284,9 @@ typedef struct tagWLAN_IE_CF_PARMS {
BYTE byCFPPeriod;
WORD wCFPMaxDuration;
WORD wCFPDurRemaining;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_IE_CF_PARMS, *PWLAN_IE_CF_PARMS;
-
// TIM
#pragma pack(1)
typedef struct tagWLAN_IE_TIM {
@@ -305,30 +296,27 @@ typedef struct tagWLAN_IE_TIM {
BYTE byDTIMPeriod;
BYTE byBitMapCtl;
BYTE byVirtBitMap[1];
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_IE_TIM, *PWLAN_IE_TIM;
-
// IBSS Parameter Set
#pragma pack(1)
typedef struct tagWLAN_IE_IBSS_PARMS {
BYTE byElementID;
BYTE len;
WORD wATIMWindow;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_IE_IBSS_PARMS, *PWLAN_IE_IBSS_PARMS;
-
// Challenge Text
#pragma pack(1)
typedef struct tagWLAN_IE_CHALLENGE {
BYTE byElementID;
BYTE len;
BYTE abyChallenge[1];
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_IE_CHALLENGE, *PWLAN_IE_CHALLENGE;
-
#pragma pack(1)
typedef struct tagWLAN_IE_RSN_EXT {
BYTE byElementID;
@@ -391,10 +379,9 @@ typedef struct tagWLAN_IE_ERP {
BYTE byElementID;
BYTE len;
BYTE byContext;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_IE_ERP, *PWLAN_IE_ERP;
-
#pragma pack(1)
typedef struct _MEASEURE_REQ {
BYTE byChannel;
diff --git a/drivers/staging/vt6656/aes_ccmp.c b/drivers/staging/vt6656/aes_ccmp.c
index b3d367b9bdc..f7a3b8f8da7 100644
--- a/drivers/staging/vt6656/aes_ccmp.c
+++ b/drivers/staging/vt6656/aes_ccmp.c
@@ -106,7 +106,7 @@ BYTE dot3_table[256] = {
/*--------------------- Export Functions --------------------------*/
-void xor_128(BYTE *a, BYTE *b, BYTE *out)
+static void xor_128(BYTE *a, BYTE *b, BYTE *out)
{
PDWORD dwPtrA = (PDWORD) a;
PDWORD dwPtrB = (PDWORD) b;
@@ -119,7 +119,7 @@ void xor_128(BYTE *a, BYTE *b, BYTE *out)
}
-void xor_32(BYTE *a, BYTE *b, BYTE *out)
+static void xor_32(BYTE *a, BYTE *b, BYTE *out)
{
PDWORD dwPtrA = (PDWORD) a;
PDWORD dwPtrB = (PDWORD) b;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index d3de94f36c6..29902492975 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -989,10 +989,10 @@ BBvSetAntennaMode (PSDevice pDevice, BYTE byAntennaMode)
* Return Value: none
*
*/
-BOOL
-BBbVT3184Init (PSDevice pDevice)
+
+BOOL BBbVT3184Init(PSDevice pDevice)
{
- NTSTATUS ntStatus;
+ int ntStatus;
WORD wLength;
PBYTE pbyAddr;
PBYTE pbyAgc;
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index bc4633d5fea..8db8cd07d5f 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -104,16 +104,13 @@ BBuGetFrameTime(
WORD wRate
);
-void
-BBvCaculateParameter (
- PSDevice pDevice,
- unsigned int cbFrameLength,
- WORD wRate,
- BYTE byPacketType,
- PWORD pwPhyLen,
- PBYTE pbyPhySrv,
- PBYTE pbyPhySgn
- );
+void BBvCaculateParameter(PSDevice pDevice,
+ unsigned int cbFrameLength,
+ WORD wRate,
+ BYTE byPacketType,
+ PWORD pwPhyLen,
+ PBYTE pbyPhySrv,
+ PBYTE pbyPhySgn);
// timer for antenna diversity
@@ -128,7 +125,7 @@ void BBvSoftwareReset(PSDevice pDevice);
void BBvSetShortSlotTime(PSDevice pDevice);
void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData);
void BBvSetAntennaMode(PSDevice pDevice, BYTE byAntennaMode);
-BOOL BBbVT3184Init (PSDevice pDevice);
+BOOL BBbVT3184Init(PSDevice pDevice);
void BBvSetDeepSleep(PSDevice pDevice);
void BBvExitDeepSleep(PSDevice pDevice);
void BBvUpdatePreEDThreshold(
diff --git a/drivers/staging/vt6656/bssdb.c b/drivers/staging/vt6656/bssdb.c
index 36ed61b595c..a9f68bd5afa 100644
--- a/drivers/staging/vt6656/bssdb.c
+++ b/drivers/staging/vt6656/bssdb.c
@@ -93,10 +93,7 @@ const WORD awHWRetry1[5][5] = {
void s_vCheckSensitivity(void *hDeviceContext);
void s_vCheckPreEDThreshold(void *hDeviceContext);
-
-#ifdef Calcu_LinkQual
void s_uCalculateLinkQual(void *hDeviceContext);
-#endif
/*--------------------- Export Variables --------------------------*/
@@ -135,7 +132,7 @@ PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSpSearchBSSList BSSID[%02X %02X %02X-%02X %02X %02X]\n",
*pbyDesireBSSID,*(pbyDesireBSSID+1),*(pbyDesireBSSID+2),
*(pbyDesireBSSID+3),*(pbyDesireBSSID+4),*(pbyDesireBSSID+5));
- if ((!IS_BROADCAST_ADDRESS(pbyDesireBSSID)) &&
+ if ((!is_broadcast_ether_addr(pbyDesireBSSID)) &&
(memcmp(pbyDesireBSSID, ZeroBSSID, 6)!= 0)){
pbyBSSID = pbyDesireBSSID;
}
@@ -156,7 +153,7 @@ PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
if ((pCurrBSS->bActive) &&
(pCurrBSS->bSelected == FALSE)) {
- if (IS_ETH_ADDRESS_EQUAL(pCurrBSS->abyBSSID, pbyBSSID)) {
+ if (!compare_ether_addr(pCurrBSS->abyBSSID, pbyBSSID)) {
if (pSSID != NULL) {
// compare ssid
if ( !memcmp(pSSID->abySSID,
@@ -296,7 +293,8 @@ void BSSvClearBSSList(void *hDeviceContext, BOOL bKeepCurrBSSID)
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (bKeepCurrBSSID) {
if (pMgmt->sBSSList[ii].bActive &&
- IS_ETH_ADDRESS_EQUAL(pMgmt->sBSSList[ii].abyBSSID, pMgmt->abyCurrBSSID)) {
+ !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID,
+ pMgmt->abyCurrBSSID)) {
//mike mark: there are two same BSSID in list if that AP is in hidden ssid mode,one 's SSID is null,
// but other's is obvious, so if it acssociate with your STA exactly,you must keep two
// of them!!!!!!!!!
@@ -341,7 +339,7 @@ PKnownBSS BSSpAddrIsInBSSList(void *hDeviceContext,
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pBSSList = &(pMgmt->sBSSList[ii]);
if (pBSSList->bActive) {
- if (IS_ETH_ADDRESS_EQUAL(pBSSList->abyBSSID, abyBSSID)) {
+ if (!compare_ether_addr(pBSSList->abyBSSID, abyBSSID)) {
if (pSSID->len == ((PWLAN_IE_SSID)pBSSList->abySSID)->len){
if (memcmp(pSSID->abySSID,
((PWLAN_IE_SSID)pBSSList->abySSID)->abySSID,
@@ -699,12 +697,14 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
pBSSList->byRSSIStatCnt %= RSSI_STAT_COUNT;
pBSSList->ldBmAverage[pBSSList->byRSSIStatCnt] = ldBm;
ldBmSum = 0;
- for(ii=0, jj=0;ii<RSSI_STAT_COUNT;ii++) {
- if (pBSSList->ldBmAverage[ii] != 0) {
- pBSSList->ldBmMAX = max(pBSSList->ldBmAverage[ii], ldBm);
- ldBmSum += pBSSList->ldBmAverage[ii];
- jj++;
- }
+ for (ii = 0, jj = 0; ii < RSSI_STAT_COUNT; ii++) {
+ if (pBSSList->ldBmAverage[ii] != 0) {
+ pBSSList->ldBmMAX =
+ max(pBSSList->ldBmAverage[ii], ldBm);
+ ldBmSum +=
+ pBSSList->ldBmAverage[ii];
+ jj++;
+ }
}
pBSSList->ldBmAverRange = ldBmSum /jj;
}
@@ -714,28 +714,6 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
-//mike add: if the AP in this pBSSList is hidden ssid and we can find two of them,
-// you need upgrade the other related pBSSList of which ssid is obvious,
-// for these two AP is the same one!!!!
-/********judge by:BSSID is the same,but ssid is different!*****************/
-#if 0
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (IS_ETH_ADDRESS_EQUAL(pMgmt->sBSSList[ii].abyBSSID, pBSSList->abyBSSID)) { //BSSID is the same!
- if (memcmp(((PWLAN_IE_SSID)pMgmt->sBSSList[ii].abySSID)->abySSID, //ssid is different??
- ((PWLAN_IE_SSID)pBSSList->abySSID)->abySSID,
- ((PWLAN_IE_SSID)pBSSList->abySSID)->len) != 0) {
- //reserve temp
- memset(abyTmpSSID,0,sizeof(abyTmpSSID));
- memcpy(abyTmpSSID,pMgmt->sBSSList[ii].abySSID,sizeof(abyTmpSSID));
- //upgrade the other one pBSSList
- memcpy(&(pMgmt->sBSSList[ii]),pBSSList,sizeof(KnownBSS));
- //recover ssid info
- memcpy(pMgmt->sBSSList[ii].abySSID,abyTmpSSID,sizeof(abyTmpSSID));
- }
- }
- }
-#endif
-
return TRUE;
}
@@ -755,7 +733,7 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
BOOL BSSbIsSTAInNodeDB(void *hDeviceContext,
PBYTE abyDstAddr,
- PUINT puNodeIndex)
+ unsigned int *puNodeIndex)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
@@ -764,7 +742,8 @@ BOOL BSSbIsSTAInNodeDB(void *hDeviceContext,
// Index = 0 reserved for AP Node
for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
- if (IS_ETH_ADDRESS_EQUAL(abyDstAddr, pMgmt->sNodeDBTable[ii].abyMACAddr)) {
+ if (!compare_ether_addr(abyDstAddr,
+ pMgmt->sNodeDBTable[ii].abyMACAddr)) {
*puNodeIndex = ii;
return TRUE;
}
@@ -786,7 +765,7 @@ BOOL BSSbIsSTAInNodeDB(void *hDeviceContext,
* None
*
-*/
-void BSSvCreateOneNode(void *hDeviceContext, PUINT puNodeIndex)
+void BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1023,7 +1002,6 @@ if(pDevice->byReAssocCount > 0) {
pDevice->byReAssocCount = 0;
}
-#ifdef SndEvt_ToAPI
if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
(pMgmt->eLastState==WMAC_STATE_ASSOC))
{
@@ -1033,11 +1011,8 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, NULL);
}
pMgmt->eLastState = pMgmt->eCurrState ;
-#endif
-#ifdef Calcu_LinkQual
s_uCalculateLinkQual((void *)pDevice);
-#endif
for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
@@ -1422,21 +1397,25 @@ void BSSvUpdateNodeTxCounter(void *hDeviceContext,
(wRate < RATE_18M) ) {
pMgmt->sNodeDBTable[0].uTxFail[wRate]+=byTxRetry;
} else if (byFallBack == AUTO_FB_0) {
- for(ii=0;ii<byTxRetry;ii++) {
- if (ii < 5)
- wFallBackRate = awHWRetry0[wRate-RATE_18M][ii];
- else
- wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
- }
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][ii];
+ else
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][4];
+ pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
+ }
} else if (byFallBack == AUTO_FB_1) {
- for(ii=0;ii<byTxRetry;ii++) {
- if (ii < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][ii];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
- }
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
+ wFallBackRate =
+ awHWRetry1[wRate-RATE_18M][ii];
+ else
+ wFallBackRate =
+ awHWRetry1[wRate-RATE_18M][4];
+ pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
+ }
}
}
};
@@ -1476,21 +1455,23 @@ void BSSvUpdateNodeTxCounter(void *hDeviceContext,
(wRate < RATE_18M) ) {
pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wRate]+=byTxRetry;
} else if (byFallBack == AUTO_FB_0) {
- for(ii=0;ii<byTxRetry;ii++) {
- if (ii < 5)
- wFallBackRate = awHWRetry0[wRate-RATE_18M][ii];
- else
- wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][ii];
+ else
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][4];
+ pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
}
} else if (byFallBack == AUTO_FB_1) {
- for(ii=0;ii<byTxRetry;ii++) {
- if (ii < 5)
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
wFallBackRate = awHWRetry1[wRate-RATE_18M][ii];
- else
+ else
wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
- }
+ pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
+ }
}
}
};
@@ -1587,7 +1568,6 @@ void s_vCheckSensitivity(void *hDeviceContext)
}
}
-#ifdef Calcu_LinkQual
void s_uCalculateLinkQual(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1632,7 +1612,6 @@ else
pDevice->scStatistic.TxRetryOkCount = 0;
return;
}
-#endif
void BSSvClearAnyBSSJoinRecord(void *hDeviceContext)
{
diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h
index 9686d8600d6..a8f97ebb659 100644
--- a/drivers/staging/vt6656/bssdb.h
+++ b/drivers/staging/vt6656/bssdb.h
@@ -40,7 +40,7 @@
#define MAX_NODE_NUM 64
#define MAX_BSS_NUM 42
-#define LOST_BEACON_COUNT 10 // 10 sec, XP defined
+#define LOST_BEACON_COUNT 10 /* 10 sec, XP defined */
#define MAX_PS_TX_BUF 32 // sta max power saving tx buf
#define ADHOC_LOST_BEACON_COUNT 30 // 30 sec, beacon lost for adhoc only
#define MAX_INACTIVE_COUNT 300 // 300 sec, inactive STA node refresh
@@ -83,13 +83,13 @@
typedef struct tagSERPObject {
BOOL bERPExist;
BYTE byERP;
-}ERPObject, *PERPObject;
+} ERPObject, *PERPObject;
typedef struct tagSRSNCapObject {
BOOL bRSNCapExist;
WORD wRSNCap;
-}SRSNCapObject, *PSRSNCapObject;
+} SRSNCapObject, *PSRSNCapObject;
// BSS info(AP)
#pragma pack(1)
@@ -153,7 +153,7 @@ typedef struct tagKnownBSS {
SRSNCapObject sRSNCapObj;
BYTE abyIEs[1024]; // don't move this field !!
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
KnownBSS , *PKnownBSS;
@@ -278,9 +278,9 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
BOOL BSSbIsSTAInNodeDB(void *hDeviceContext,
PBYTE abyDstAddr,
- PUINT puNodeIndex);
+ unsigned int *puNodeIndex);
-void BSSvCreateOneNode(void *hDeviceContext, PUINT puNodeIndex);
+void BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex);
void BSSvUpdateAPNode(void *hDeviceContext,
PWORD pwCapInfo,
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index fe4ec913ffe..35bf4fda330 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -457,12 +457,11 @@ void CARDvSetRSPINF(void *pDeviceHandler, BYTE byBBType)
abyData[14] = abySignal[3];
abyData[15] = abyServ[3];
- for(i=0;i<9;i++) {
- abyData[16+i*2] = abyTxRate[i];
- abyData[16+i*2+1] = abyRsvTime[i];
+ for (i = 0; i < 9; i++) {
+ abyData[16+i*2] = abyTxRate[i];
+ abyData[16+i*2+1] = abyRsvTime[i];
}
-
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_WRITE,
MAC_REG_RSPINF_B_1,
diff --git a/drivers/staging/vt6656/channel.c b/drivers/staging/vt6656/channel.c
index f49b6e13339..6ad03e492ed 100644
--- a/drivers/staging/vt6656/channel.c
+++ b/drivers/staging/vt6656/channel.c
@@ -441,11 +441,10 @@ void CHvInitChannelTable(void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BOOL bMultiBand = FALSE;
- unsigned int ii;
+ unsigned int ii;
- for(ii=1;ii<=CB_MAX_CHANNEL;ii++) {
- sChannelTbl[ii].bValid = FALSE;
- }
+ for (ii = 1; ii <= CB_MAX_CHANNEL; ii++)
+ sChannelTbl[ii].bValid = FALSE;
switch (pDevice->byRFType) {
case RF_AL2230:
@@ -464,43 +463,43 @@ void CHvInitChannelTable(void *pDeviceHandler)
if ((pDevice->dwDiagRefCount != 0) ||
(pDevice->b11hEable == TRUE)) {
if (bMultiBand == TRUE) {
- for(ii=0;ii<CB_MAX_CHANNEL;ii++) {
- sChannelTbl[ii+1].bValid = TRUE;
+ for (ii = 0; ii < CB_MAX_CHANNEL; ii++) {
+ sChannelTbl[ii+1].bValid = TRUE;
//pDevice->abyRegPwr[ii+1] = pDevice->abyOFDMDefaultPwr[ii+1];
//pDevice->abyLocalPwr[ii+1] = pDevice->abyOFDMDefaultPwr[ii+1];
- }
- for(ii=0;ii<CB_MAX_CHANNEL_24G;ii++) {
+ }
+ for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
//pDevice->abyRegPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
//pDevice->abyLocalPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
- }
+ }
} else {
- for(ii=0;ii<CB_MAX_CHANNEL_24G;ii++) {
- sChannelTbl[ii+1].bValid = TRUE;
+ for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
+ sChannelTbl[ii+1].bValid = TRUE;
//pDevice->abyRegPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
//pDevice->abyLocalPwr[ii+1] = pDevice->abyCCKDefaultPwr[ii+1];
- }
+ }
}
} else if (pDevice->byZoneType <= CCODE_MAX) {
if (bMultiBand == TRUE) {
- for(ii=0;ii<CB_MAX_CHANNEL;ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- sChannelTbl[ii+1].bValid = TRUE;
+ for (ii = 0; ii < CB_MAX_CHANNEL; ii++) {
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
+ sChannelTbl[ii+1].bValid = TRUE;
//pDevice->abyRegPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
//pDevice->abyLocalPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- }
- }
+ }
+ }
} else {
- for(ii=0;ii<CB_MAX_CHANNEL_24G;ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- sChannelTbl[ii+1].bValid = TRUE;
+ for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
+ if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
+ sChannelTbl[ii+1].bValid = TRUE;
//pDevice->abyRegPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
//pDevice->abyLocalPwr[ii+1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- }
- }
+ }
+ }
}
}
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO"Zone=[%d][%c][%c]!!\n",pDevice->byZoneType,ChannelRuleTab[pDevice->byZoneType].chCountryCode[0],ChannelRuleTab[pDevice->byZoneType].chCountryCode[1]);
- for(ii=0;ii<CB_MAX_CHANNEL;ii++) {
+ for (ii = 0; ii < CB_MAX_CHANNEL; ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Channel[%d] is [%d]\n",sChannelTbl[ii].byChannelNumber,sChannelTbl[ii+1].bValid);
/*if (pDevice->abyRegPwr[ii+1] == 0) {
pDevice->abyRegPwr[ii+1] = pDevice->abyOFDMDefaultPwr[ii+1];
diff --git a/drivers/staging/vt6656/channel.h b/drivers/staging/vt6656/channel.h
index 91c2ffc6f1f..e7b3c123182 100644
--- a/drivers/staging/vt6656/channel.h
+++ b/drivers/staging/vt6656/channel.h
@@ -35,23 +35,21 @@
/*--------------------- Export Definitions -------------------------*/
/*--------------------- Export Classes ----------------------------*/
+
typedef struct tagSChannelTblElement {
BYTE byChannelNumber;
unsigned int uFrequency;
BOOL bValid;
-}SChannelTblElement, *PSChannelTblElement;
+} SChannelTblElement, *PSChannelTblElement;
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
+
BOOL ChannelValid(unsigned int CountryCode, unsigned int ChannelNum);
void CHvInitChannelTable(void *pDeviceHandler);
BYTE CHbyGetChannelMapping(BYTE byChannelNumber);
-BOOL
-CHvChannelGetList (
- unsigned int uCountryCodeIdx,
- PBYTE pbyChannelTable
- );
+BOOL CHvChannelGetList(unsigned int uCountryCodeIdx, PBYTE pbyChannelTable);
-#endif /* _REGULATE_H_ */
+#endif /* _CHANNEL_H_ */
diff --git a/drivers/staging/vt6656/control.c b/drivers/staging/vt6656/control.c
index 8aab6718ff4..5d8c5719419 100644
--- a/drivers/staging/vt6656/control.c
+++ b/drivers/staging/vt6656/control.c
@@ -72,7 +72,7 @@ void ControlvWriteByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs,
void ControlvReadByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs,
PBYTE pbyData)
{
- NTSTATUS ntStatus;
+ int ntStatus;
BYTE byData1;
ntStatus = CONTROLnsRequestIn(pDevice,
MESSAGE_TYPE_READ,
diff --git a/drivers/staging/vt6656/control.h b/drivers/staging/vt6656/control.h
index 146b450e13d..bbe610fd8b5 100644
--- a/drivers/staging/vt6656/control.h
+++ b/drivers/staging/vt6656/control.h
@@ -36,16 +36,14 @@
/*--------------------- Export Definitions -------------------------*/
+#define CONTROLnsRequestOut(Device, Request, Value, Index, Length, Buffer) \
+ PIPEnsControlOut(Device, Request, Value, Index, Length, Buffer)
-#define CONTROLnsRequestOut( Device,Request,Value,Index,Length,Buffer) \
- PIPEnsControlOut( Device,Request,Value,Index,Length,Buffer)
-
-#define CONTROLnsRequestOutAsyn( Device,Request,Value,Index,Length,Buffer) \
- PIPEnsControlOutAsyn( Device,Request,Value,Index,Length,Buffer)
-
-#define CONTROLnsRequestIn( Device,Request,Value,Index,Length,Buffer) \
- PIPEnsControlIn( Device,Request,Value,Index,Length,Buffer)
+#define CONTROLnsRequestOutAsyn(Device, Request, Value, Index, Length, Buffer) \
+ PIPEnsControlOutAsyn(Device, Request, Value, Index, Length, Buffer)
+#define CONTROLnsRequestIn(Device, Request, Value, Index, Length, Buffer) \
+ PIPEnsControlIn(Device, Request, Value, Index, Length, Buffer)
/*--------------------- Export Classes ----------------------------*/
diff --git a/drivers/staging/vt6656/datarate.c b/drivers/staging/vt6656/datarate.c
index 2e183ddbfd0..5c2719fa72f 100644
--- a/drivers/staging/vt6656/datarate.c
+++ b/drivers/staging/vt6656/datarate.c
@@ -72,7 +72,7 @@ void s_vResetCounter(PKnownNodeDB psNodeDBTable)
BYTE ii;
// clear statistic counter for auto_rate
- for(ii=0;ii<=MAX_RATE;ii++) {
+ for (ii = 0; ii <= MAX_RATE; ii++) {
psNodeDBTable->uTxOk[ii] = 0;
psNodeDBTable->uTxFail[ii] = 0;
}
@@ -309,7 +309,6 @@ RATEvTxRateFallBack(
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-#if 1 //mike fixed old: use packet lose ratio algorithm to control rate
WORD wIdxDownRate = 0;
unsigned int ii;
BOOL bAutoRate[MAX_RATE] = {TRUE,TRUE,TRUE,TRUE,FALSE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE};
@@ -337,7 +336,7 @@ DWORD dwTxDiff = 0;
psNodeDBTable->uTimeCount = 0;
}
- for(ii=0;ii<MAX_RATE;ii++) {
+ for (ii = 0; ii < MAX_RATE; ii++) {
if (psNodeDBTable->wSuppRate & (0x0001<<ii)) {
if (bAutoRate[ii] == TRUE) {
wIdxUpRate = (WORD) ii;
@@ -347,7 +346,7 @@ DWORD dwTxDiff = 0;
}
}
- for(ii=0;ii<=psNodeDBTable->wTxDataRate;ii++) {
+ for (ii = 0; ii <= psNodeDBTable->wTxDataRate; ii++) {
if ( (psNodeDBTable->uTxOk[ii] != 0) ||
(psNodeDBTable->uTxFail[ii] != 0) ) {
dwThroughputTbl[ii] *= psNodeDBTable->uTxOk[ii];
@@ -362,7 +361,7 @@ DWORD dwTxDiff = 0;
dwThroughput = dwThroughputTbl[psNodeDBTable->wTxDataRate];
wIdxDownRate = psNodeDBTable->wTxDataRate;
- for(ii = psNodeDBTable->wTxDataRate; ii > 0;) {
+ for (ii = psNodeDBTable->wTxDataRate; ii > 0;) {
ii--;
if ( (dwThroughputTbl[ii] > dwThroughput) &&
(bAutoRate[ii]==TRUE) ) {
@@ -389,66 +388,6 @@ DWORD dwTxDiff = 0;
s_vResetCounter(psNodeDBTable);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate: %d, U:%d, D:%d\n", (int)psNodeDBTable->wTxDataRate, (int)wIdxUpRate, (int)wIdxDownRate);
return;
-#else //mike fixed new: use differ-signal strength to control rate
-WORD wIdxUpRate = 0;
-BOOL bAutoRate[MAX_RATE] = {TRUE,TRUE,TRUE,TRUE,FALSE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE};
-unsigned int ii;
-long ldBm;
-
- if (pMgmt->eScanState != WMAC_NO_SCANNING) {
- // Don't do Fallback when scanning Channel
- return;
- }
-
- for(ii=0;ii<MAX_RATE;ii++) {
- if (psNodeDBTable->wSuppRate & (0x0001<<ii)) {
- if (bAutoRate[ii] == TRUE) {
- wIdxUpRate = (WORD) ii;
- }
- } else {
- bAutoRate[ii] = FALSE;
- }
- }
-
- RFvRSSITodBm(pDevice, (BYTE)(pDevice->uCurrRSSI), &ldBm);
-
- if (ldBm > -55) {
- if ( psNodeDBTable->wSuppRate & (0x0001<<RATE_54M) ) //11a/g
- {
- psNodeDBTable->wTxDataRate = RATE_54M;
- }
- else{ //11b
- psNodeDBTable->wTxDataRate = RATE_11M;
- }
- }
-
-if (wIdxUpRate == RATE_54M ) { //11a/g
- if (ldBm > -56 )
- psNodeDBTable->wTxDataRate = RATE_54M;
- else if (ldBm > -61 )
- psNodeDBTable->wTxDataRate = RATE_48M;
- else if (ldBm > -66 )
- psNodeDBTable->wTxDataRate = RATE_36M;
- else if (ldBm > -72 )
- psNodeDBTable->wTxDataRate = RATE_24M;
- else if (ldBm > -80 )
- psNodeDBTable->wTxDataRate = RATE_5M;
- else {
- psNodeDBTable->wTxDataRate = RATE_1M;
- //increasingVGA = TRUE;
- }
- }
- else { //11b
- if (ldBm > -65 )
- psNodeDBTable->wTxDataRate = RATE_11M;
- else if (ldBm > -75 )
- psNodeDBTable->wTxDataRate = RATE_5M;
- else
- psNodeDBTable->wTxDataRate = RATE_1M;
- }
-
- return;
-#endif
}
/*+
diff --git a/drivers/staging/vt6656/desc.h b/drivers/staging/vt6656/desc.h
index 07f794ec6db..767112b3c4a 100644
--- a/drivers/staging/vt6656/desc.h
+++ b/drivers/staging/vt6656/desc.h
@@ -51,7 +51,6 @@
#define MAX_INTERRUPT_SIZE 32
-
#define RX_BLOCKS 64 // form 0x60 to 0xA0
#define TX_BLOCKS 32 // from 0xA0 to 0xC0
@@ -63,8 +62,6 @@
#define CB_RD_NUM 64 // default # of RD
#define CB_TD_NUM 64 // default # of TD
-
-
//
// Bits in the RSR register
//
@@ -87,7 +84,6 @@
#define NEWRSR_BCNHITAID 0x02 // 0000 0010
#define NEWRSR_BCNHITAID0 0x01 // 0000 0001
-
//
// Bits in the TSR register
//
@@ -96,17 +92,13 @@
#define TSR_ACKDATA 0x02 // 0000 0010
#define TSR_VALID 0x01 // 0000 0001
-
#define CB_PROTOCOL_RESERVED_SECTION 16
-
-
// if retrys excess 15 times , tx will abort, and
// if tx fifo underflow, tx will fail
// we should try to resend it
#define CB_MAX_TX_ABORT_RETRY 3
-
#define FIFOCTL_AUTO_FB_1 0x1000 // 0001 0000 0000 0000
#define FIFOCTL_AUTO_FB_0 0x0800 // 0000 1000 0000 0000
#define FIFOCTL_GRPACK 0x0400 // 0000 0100 0000 0000
@@ -137,7 +129,6 @@
#define FRAGCTL_STAFRAG 0x0001 // 0000 0000 0000 0001
#define FRAGCTL_NONFRAG 0x0000 // 0000 0000 0000 0000
-
//#define TYPE_AC0DMA 0
//#define TYPE_TXDMA0 1
#define TYPE_TXDMA0 0
@@ -152,8 +143,6 @@
#define TYPE_RXDMA1 1
#define TYPE_MAXRD 2
-
-
// TD_INFO flags control bit
#define TD_FLAGS_NETIF_SKB 0x01 // check if need release skb
#define TD_FLAGS_PRIV_SKB 0x02 // check if called from private skb(hostap)
@@ -162,7 +151,6 @@
/*--------------------- Export Types ------------------------------*/
-
//
// RsvTime buffer header
//
@@ -173,8 +161,9 @@ typedef struct tagSRrvTime_gRTS {
WORD wReserved;
WORD wTxRrvTime_b;
WORD wTxRrvTime_a;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SRrvTime_gRTS, *PSRrvTime_gRTS;
+
typedef const SRrvTime_gRTS *PCSRrvTime_gRTS;
typedef struct tagSRrvTime_gCTS {
@@ -182,22 +171,25 @@ typedef struct tagSRrvTime_gCTS {
WORD wReserved;
WORD wTxRrvTime_b;
WORD wTxRrvTime_a;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SRrvTime_gCTS, *PSRrvTime_gCTS;
+
typedef const SRrvTime_gCTS *PCSRrvTime_gCTS;
typedef struct tagSRrvTime_ab {
WORD wRTSTxRrvTime;
WORD wTxRrvTime;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SRrvTime_ab, *PSRrvTime_ab;
+
typedef const SRrvTime_ab *PCSRrvTime_ab;
typedef struct tagSRrvTime_atim {
WORD wCTSTxRrvTime_ba;
WORD wTxRrvTime_a;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SRrvTime_atim, *PSRrvTime_atim;
+
typedef const SRrvTime_atim *PCSRrvTime_atim;
//
@@ -208,8 +200,9 @@ typedef struct tagSRTSData {
WORD wDurationID;
BYTE abyRA[ETH_ALEN];
BYTE abyTA[ETH_ALEN];
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SRTSData, *PSRTSData;
+
typedef const SRTSData *PCSRTSData;
typedef struct tagSRTS_g {
@@ -224,11 +217,10 @@ typedef struct tagSRTS_g {
WORD wDuration_bb;
WORD wReserved;
SRTSData Data;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SRTS_g, *PSRTS_g;
typedef const SRTS_g *PCSRTS_g;
-
typedef struct tagSRTS_g_FB {
BYTE bySignalField_b;
BYTE byServiceField_b;
@@ -245,10 +237,10 @@ typedef struct tagSRTS_g_FB {
WORD wRTSDuration_ba_f1;
WORD wRTSDuration_aa_f1;
SRTSData Data;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SRTS_g_FB, *PSRTS_g_FB;
-typedef const SRTS_g_FB *PCSRTS_g_FB;
+typedef const SRTS_g_FB *PCSRTS_g_FB;
typedef struct tagSRTS_ab {
BYTE bySignalField;
@@ -257,10 +249,10 @@ typedef struct tagSRTS_ab {
WORD wDuration;
WORD wReserved;
SRTSData Data;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SRTS_ab, *PSRTS_ab;
-typedef const SRTS_ab *PCSRTS_ab;
+typedef const SRTS_ab *PCSRTS_ab;
typedef struct tagSRTS_a_FB {
BYTE bySignalField;
@@ -271,8 +263,9 @@ typedef struct tagSRTS_a_FB {
WORD wRTSDuration_f0;
WORD wRTSDuration_f1;
SRTSData Data;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SRTS_a_FB, *PSRTS_a_FB;
+
typedef const SRTS_a_FB *PCSRTS_a_FB;
@@ -284,7 +277,7 @@ typedef struct tagSCTSData {
WORD wDurationID;
BYTE abyRA[ETH_ALEN];
WORD wReserved;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SCTSData, *PSCTSData;
typedef struct tagSCTS {
@@ -294,8 +287,9 @@ typedef struct tagSCTS {
WORD wDuration_ba;
WORD wReserved;
SCTSData Data;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SCTS, *PSCTS;
+
typedef const SCTS *PCSCTS;
typedef struct tagSCTS_FB {
@@ -307,10 +301,10 @@ typedef struct tagSCTS_FB {
WORD wCTSDuration_ba_f0;
WORD wCTSDuration_ba_f1;
SCTSData Data;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SCTS_FB, *PSCTS_FB;
-typedef const SCTS_FB *PCSCTS_FB;
+typedef const SCTS_FB *PCSCTS_FB;
//
// Tx FIFO header
@@ -321,14 +315,14 @@ typedef struct tagSTxBufHead {
WORD wTimeStamp;
WORD wFragCtl;
WORD wReserved;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
STxBufHead, *PSTxBufHead;
typedef const STxBufHead *PCSTxBufHead;
typedef struct tagSTxShortBufHead {
WORD wFIFOCtl;
WORD wTimeStamp;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
STxShortBufHead, *PSTxShortBufHead;
typedef const STxShortBufHead *PCSTxShortBufHead;
@@ -346,8 +340,9 @@ typedef struct tagSTxDataHead_g {
WORD wDuration_a;
WORD wTimeStampOff_b;
WORD wTimeStampOff_a;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
STxDataHead_g, *PSTxDataHead_g;
+
typedef const STxDataHead_g *PCSTxDataHead_g;
typedef struct tagSTxDataHead_g_FB {
@@ -363,22 +358,20 @@ typedef struct tagSTxDataHead_g_FB {
WORD wDuration_a_f1;
WORD wTimeStampOff_b;
WORD wTimeStampOff_a;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
STxDataHead_g_FB, *PSTxDataHead_g_FB;
typedef const STxDataHead_g_FB *PCSTxDataHead_g_FB;
-
typedef struct tagSTxDataHead_ab {
BYTE bySignalField;
BYTE byServiceField;
WORD wTransmitLength;
WORD wDuration;
WORD wTimeStampOff;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
STxDataHead_ab, *PSTxDataHead_ab;
typedef const STxDataHead_ab *PCSTxDataHead_ab;
-
typedef struct tagSTxDataHead_a_FB {
BYTE bySignalField;
BYTE byServiceField;
@@ -387,7 +380,7 @@ typedef struct tagSTxDataHead_a_FB {
WORD wTimeStampOff;
WORD wDuration_f0;
WORD wDuration_f1;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
STxDataHead_a_FB, *PSTxDataHead_a_FB;
typedef const STxDataHead_a_FB *PCSTxDataHead_a_FB;
@@ -398,23 +391,23 @@ typedef struct tagSMICHDRHead {
DWORD adwHDR0[4];
DWORD adwHDR1[4];
DWORD adwHDR2[4];
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SMICHDRHead, *PSMICHDRHead;
+
typedef const SMICHDRHead *PCSMICHDRHead;
typedef struct tagSBEACONCtl {
DWORD BufReady : 1;
- DWORD TSF : 15;
- DWORD BufLen : 11;
+ DWORD TSF : 15;
+ DWORD BufLen : 11;
DWORD Reserved : 5;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SBEACONCtl;
-
typedef struct tagSSecretKey {
DWORD dwLowDword;
BYTE byHighByte;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SSecretKey;
typedef struct tagSKeyEntry {
@@ -426,7 +419,7 @@ typedef struct tagSKeyEntry {
DWORD dwKey2[4];
DWORD dwKey3[4];
DWORD dwKey4[4];
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SKeyEntry;
/*--------------------- Export Macros ------------------------------*/
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index ef9fd97d3ca..b9852aa22c0 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -71,9 +71,6 @@
#define WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
#endif
-//2007-0920-01<Add>by MikeLiu
-#ifndef SndEvt_ToAPI
-#define SndEvt_ToAPI
//please copy below macro to driver_event.c for API
#define RT_INSMOD_EVENT_FLAG 0x0101
#define RT_UPDEV_EVENT_FLAG 0x0102
@@ -81,7 +78,6 @@
#define RT_WPACONNECTED_EVENT_FLAG 0x0104
#define RT_DOWNDEV_EVENT_FLAG 0x0105
#define RT_RMMOD_EVENT_FLAG 0x0106
-#endif
//
// device specific
@@ -109,7 +105,6 @@
#define MAX_MULTICAST_ADDRESS_NUM 32
#define MULTICAST_ADDRESS_LIST_SIZE (MAX_MULTICAST_ADDRESS_NUM * ETH_ALEN)
-
//#define OP_MODE_INFRASTRUCTURE 0
//#define OP_MODE_ADHOC 1
//#define OP_MODE_AP 2
@@ -130,8 +125,6 @@
#define KEYSEL_TKIP 2
#define KEYSEL_CCMP 3
-
-
#define AUTO_FB_NONE 0
#define AUTO_FB_0 1
#define AUTO_FB_1 2
@@ -162,8 +155,6 @@
#define BB_VGA_LEVEL 4
#define BB_VGA_CHANGE_THRESHOLD 3
-
-
#ifndef RUN_AT
#define RUN_AT(x) (jiffies+(x))
#endif
@@ -175,24 +166,23 @@
/*--------------------- Export Types ------------------------------*/
-#define DBG_PRT(l, p, args...) {if (l<=msglevel) printk( p ,##args);}
-#define PRINT_K(p, args...) {if (PRIVATE_Message) printk( p ,##args);}
+#define DBG_PRT(l, p, args...) { if (l <= msglevel) printk(p, ##args); }
+#define PRINT_K(p, args...) { if (PRIVATE_Message) printk(p, ##args); }
typedef enum __device_msg_level {
- MSG_LEVEL_ERR=0, //Errors that will cause abnormal operation.
- MSG_LEVEL_NOTICE=1, //Some errors need users to be notified.
- MSG_LEVEL_INFO=2, //Normal message.
- MSG_LEVEL_VERBOSE=3, //Will report all trival errors.
- MSG_LEVEL_DEBUG=4 //Only for debug purpose.
+ MSG_LEVEL_ERR = 0, /* Errors causing abnormal operation */
+ MSG_LEVEL_NOTICE = 1, /* Errors needing user notification */
+ MSG_LEVEL_INFO = 2, /* Normal message. */
+ MSG_LEVEL_VERBOSE = 3, /* Will report all trival errors. */
+ MSG_LEVEL_DEBUG = 4 /* Only for debug purpose. */
} DEVICE_MSG_LEVEL, *PDEVICE_MSG_LEVEL;
typedef enum __device_init_type {
- DEVICE_INIT_COLD=0, // cold init
- DEVICE_INIT_RESET, // reset init or Dx to D0 power remain init
- DEVICE_INIT_DXPL // Dx to D0 power lost init
+ DEVICE_INIT_COLD = 0, /* cold init */
+ DEVICE_INIT_RESET, /* reset init or Dx to D0 power remain */
+ DEVICE_INIT_DXPL /* Dx to D0 power lost init */
} DEVICE_INIT_TYPE, *PDEVICE_INIT_TYPE;
-
//USB
//
@@ -203,9 +193,6 @@ typedef enum _CONTEXT_TYPE {
CONTEXT_MGMT_PACKET
} CONTEXT_TYPE;
-
-
-
// RCB (Receive Control Block)
typedef struct _RCB
{
@@ -219,7 +206,6 @@ typedef struct _RCB
} RCB, *PRCB;
-
// used to track bulk out irps
typedef struct _USB_SEND_CONTEXT {
void *pDevice;
@@ -233,7 +219,6 @@ typedef struct _USB_SEND_CONTEXT {
unsigned char Data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
} USB_SEND_CONTEXT, *PUSB_SEND_CONTEXT;
-
/* structure got from configuration file as user-desired default settings */
typedef struct _DEFAULT_CONFIG {
signed int ZoneType;
@@ -254,12 +239,10 @@ typedef struct {
BOOL bInUse;
} INT_BUFFER, *PINT_BUFFER;
-
-
//0:11A 1:11B 2:11G
typedef enum _VIA_BB_TYPE
{
- BB_TYPE_11A=0,
+ BB_TYPE_11A = 0,
BB_TYPE_11B,
BB_TYPE_11G
} VIA_BB_TYPE, *PVIA_BB_TYPE;
@@ -267,22 +250,18 @@ typedef enum _VIA_BB_TYPE
//0:11a,1:11b,2:11gb(only CCK in BasicRate),3:11ga(OFDM in Basic Rate)
typedef enum _VIA_PKT_TYPE
{
- PK_TYPE_11A=0,
+ PK_TYPE_11A = 0,
PK_TYPE_11B,
PK_TYPE_11GB,
PK_TYPE_11GA
} VIA_PKT_TYPE, *PVIA_PKT_TYPE;
-
-
-
//++ NDIS related
#define NDIS_STATUS int
-#define NTSTATUS int
typedef enum __DEVICE_NDIS_STATUS {
- STATUS_SUCCESS=0,
+ STATUS_SUCCESS = 0,
STATUS_FAILURE,
STATUS_RESOURCES,
STATUS_PENDING,
@@ -810,17 +789,12 @@ typedef struct __device_info {
// command timer
struct timer_list sTimerCommand;
-//2007-0115-01<Add>by MikeLiu
-#ifdef TxInSleep
struct timer_list sTimerTxData;
unsigned long nTxDataTimeCout;
BOOL fTxDataInSleep;
BOOL IsTxDataTrigger;
-#endif
-#ifdef WPA_SM_Transtatus
BOOL fWPA_Authened; //is WPA/WPA-PSK or WPA2/WPA2-PSK authen??
-#endif
BYTE byReAssocCount; //mike add:re-association retry times!
BYTE byLinkWaitCount;
diff --git a/drivers/staging/vt6656/device_cfg.h b/drivers/staging/vt6656/device_cfg.h
index c816901882a..a0b82169dad 100644
--- a/drivers/staging/vt6656/device_cfg.h
+++ b/drivers/staging/vt6656/device_cfg.h
@@ -77,25 +77,20 @@ struct _version {
//Max: 2378=2312Payload + 30HD +4CRC + 2Padding + 4Len + 8TSF + 4RSR
#define PKT_BUF_SZ 2390
-
#define MAX_UINTS 8
#define OPTION_DEFAULT { [0 ... MAX_UINTS-1] = -1}
-
-
-typedef enum _chip_type{
- VT3184=1
+typedef enum _chip_type {
+ VT3184 = 1
} CHIP_TYPE, *PCHIP_TYPE;
-
-
#ifdef VIAWET_DEBUG
#define ASSERT(x) { \
if (!(x)) { \
- printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\
+ printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x, \
__FUNCTION__, __LINE__);\
- *(int*) 0=0;\
- }\
+ *(int *) 0 = 0; \
+ } \
}
#define DBG_PORT80(value) outb(value, 0x80)
#else
@@ -103,5 +98,4 @@ typedef enum _chip_type{
#define DBG_PORT80(value)
#endif
-
#endif
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 9afe76cacef..5e88349d3b9 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -80,7 +80,7 @@ static
void
s_vGetDASA(
PBYTE pbyRxBufferAddr,
- PUINT pcbHeaderSize,
+ unsigned int *pcbHeaderSize,
PSEthernetHeader psEthHeader
);
@@ -92,7 +92,7 @@ s_vProcessRxMACHeader (
unsigned int cbPacketSize,
BOOL bIsWEP,
BOOL bExtIV,
- PUINT pcbHeadSize
+ unsigned int *pcbHeadSize
);
static BOOL s_bAPModeRxCtl(
@@ -167,7 +167,7 @@ s_vProcessRxMACHeader (
unsigned int cbPacketSize,
BOOL bIsWEP,
BOOL bExtIV,
- PUINT pcbHeadSize
+ unsigned int *pcbHeadSize
)
{
PBYTE pbyRxBuffer;
@@ -195,10 +195,9 @@ s_vProcessRxMACHeader (
};
pbyRxBuffer = (PBYTE) (pbyRxBufferAddr + cbHeaderSize);
- if (IS_ETH_ADDRESS_EQUAL(pbyRxBuffer, &pDevice->abySNAP_Bridgetunnel[0])) {
+ if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_Bridgetunnel[0])) {
cbHeaderSize += 6;
- }
- else if (IS_ETH_ADDRESS_EQUAL(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) {
+ } else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) {
cbHeaderSize += 6;
pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
if ((*pwType!= TYPE_PKT_IPX) && (*pwType != cpu_to_le16(0xF380))) {
@@ -262,7 +261,7 @@ static
void
s_vGetDASA (
PBYTE pbyRxBufferAddr,
- PUINT pcbHeaderSize,
+ unsigned int *pcbHeaderSize,
PSEthernetHeader psEthHeader
)
{
@@ -343,9 +342,7 @@ RXbBulkInProcessData (
PBYTE pbyRxSts;
PBYTE pbyRxRate;
PBYTE pbySQ;
-#ifdef Calcu_LinkQual
PBYTE pby3SQ;
-#endif
unsigned int cbHeaderSize;
PSKeyItem pKey = NULL;
WORD wRxTSC15_0 = 0;
@@ -416,7 +413,6 @@ RXbBulkInProcessData (
wPLCPwithPadding = ( (*pwPLCP_Length / 4) + ( (*pwPLCP_Length % 4) ? 1:0 ) ) *4;
pqwTSFTime = (PQWORD) (pbyDAddress + 8 + wPLCPwithPadding);
-#ifdef Calcu_LinkQual
if(pDevice->byBBType == BB_TYPE_11G) {
pby3SQ = pbyDAddress + 8 + wPLCPwithPadding + 12;
pbySQ = pby3SQ;
@@ -425,9 +421,6 @@ RXbBulkInProcessData (
pbySQ = pbyDAddress + 8 + wPLCPwithPadding + 8;
pby3SQ = pbySQ;
}
-#else
- pbySQ = pbyDAddress + 8 + wPLCPwithPadding + 8;
-#endif
pbyNewRsr = pbyDAddress + 8 + wPLCPwithPadding + 9;
pbyRSSI = pbyDAddress + 8 + wPLCPwithPadding + 10;
pbyRsr = pbyDAddress + 8 + wPLCPwithPadding + 11;
@@ -453,21 +446,22 @@ RXbBulkInProcessData (
if ((pMgmt->eCurrMode == WMAC_MODE_STANDBY) ||
(pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) {
if (pMgmt->sNodeDBTable[0].bActive) {
- if(IS_ETH_ADDRESS_EQUAL (pMgmt->abyCurrBSSID, pMACHeader->abyAddr2) ) {
+ if (!compare_ether_addr(pMgmt->abyCurrBSSID, pMACHeader->abyAddr2)) {
if (pMgmt->sNodeDBTable[0].uInActiveCount != 0)
pMgmt->sNodeDBTable[0].uInActiveCount = 0;
}
}
}
- if (!IS_MULTICAST_ADDRESS(pMACHeader->abyAddr1) && !IS_BROADCAST_ADDRESS(pMACHeader->abyAddr1)) {
+ if (!is_multicast_ether_addr(pMACHeader->abyAddr1) && !is_broadcast_ether_addr(pMACHeader->abyAddr1)) {
if ( WCTLbIsDuplicate(&(pDevice->sDupRxCache), (PS802_11Header) pbyFrame) ) {
pDevice->s802_11Counter.FrameDuplicateCount++;
return FALSE;
}
- if ( !IS_ETH_ADDRESS_EQUAL (pDevice->abyCurrentNetAddr, pMACHeader->abyAddr1) ) {
- return FALSE;
+ if (compare_ether_addr(pDevice->abyCurrentNetAddr,
+ pMACHeader->abyAddr1)) {
+ return FALSE;
}
}
@@ -475,7 +469,8 @@ RXbBulkInProcessData (
// Use for TKIP MIC
s_vGetDASA(pbyFrame, &cbHeaderSize, &pDevice->sRxEthHeader);
- if (IS_ETH_ADDRESS_EQUAL((PBYTE)&(pDevice->sRxEthHeader.abySrcAddr[0]), pDevice->abyCurrentNetAddr))
+ if (!compare_ether_addr((PBYTE)&(pDevice->sRxEthHeader.abySrcAddr[0]),
+ pDevice->abyCurrentNetAddr))
return FALSE;
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
@@ -568,8 +563,8 @@ RXbBulkInProcessData (
//
// RX OK
//
- //remove the CRC length
- FrameSize -= U_CRC_LEN;
+ /* remove the FCS/CRC length */
+ FrameSize -= ETH_FCS_LEN;
if ( !(*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI)) && // unicast address
(IS_FRAGMENT_PKT((pbyFrame)))
@@ -758,10 +753,11 @@ RXbBulkInProcessData (
pMgmt->pCurrBSS->byRSSIStatCnt++;
pMgmt->pCurrBSS->byRSSIStatCnt %= RSSI_STAT_COUNT;
pMgmt->pCurrBSS->ldBmAverage[pMgmt->pCurrBSS->byRSSIStatCnt] = ldBm;
- for(ii=0;ii<RSSI_STAT_COUNT;ii++) {
- if (pMgmt->pCurrBSS->ldBmAverage[ii] != 0) {
- pMgmt->pCurrBSS->ldBmMAX = max(pMgmt->pCurrBSS->ldBmAverage[ii], ldBm);
- }
+ for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
+ if (pMgmt->pCurrBSS->ldBmAverage[ii] != 0) {
+ pMgmt->pCurrBSS->ldBmMAX =
+ max(pMgmt->pCurrBSS->ldBmAverage[ii], ldBm);
+ }
}
}
*/
@@ -1448,7 +1444,7 @@ static BOOL s_bAPModeRxData (
if (FrameSize > CB_MAX_BUF_SIZE)
return FALSE;
// check DA
- if(IS_MULTICAST_ADDRESS((PBYTE)(skb->data+cbHeaderOffset))) {
+ if (is_multicast_ether_addr((PBYTE)(skb->data+cbHeaderOffset))) {
if (pMgmt->sNodeDBTable[0].bPSEnable) {
skbcpy = dev_alloc_skb((int)pDevice->rx_buf_sz);
@@ -1523,7 +1519,7 @@ static BOOL s_bAPModeRxData (
void RXvWorkItem(void *Context)
{
PSDevice pDevice = (PSDevice) Context;
- NTSTATUS ntStatus;
+ int ntStatus;
PRCB pRCB=NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Rx Polling Thread\n");
diff --git a/drivers/staging/vt6656/firmware.c b/drivers/staging/vt6656/firmware.c
index e1f96d7086f..ebb9c99df70 100644
--- a/drivers/staging/vt6656/firmware.c
+++ b/drivers/staging/vt6656/firmware.c
@@ -848,7 +848,7 @@ FIRMWAREbCheckVersion(
PSDevice pDevice
)
{
- NTSTATUS ntStatus;
+ int ntStatus;
ntStatus = CONTROLnsRequestIn(pDevice,
MESSAGE_TYPE_READ,
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index 89f5b18bdf1..c95833ac58e 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -82,7 +82,7 @@ static int msglevel = MSG_LEVEL_INFO;
void INTvWorkItem(void *Context)
{
PSDevice pDevice = (PSDevice) Context;
- NTSTATUS ntStatus;
+ int ntStatus;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Interrupt Polling Thread\n");
@@ -92,10 +92,9 @@ void INTvWorkItem(void *Context)
spin_unlock_irq(&pDevice->lock);
}
-NTSTATUS
-INTnsProcessData(PSDevice pDevice)
+int INTnsProcessData(PSDevice pDevice)
{
- NTSTATUS status = STATUS_SUCCESS;
+ int status = STATUS_SUCCESS;
PSINTData pINTData;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
struct net_device_stats *pStats = &pDevice->stats;
diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
index cdf355130de..3176c8d08d6 100644
--- a/drivers/staging/vt6656/int.h
+++ b/drivers/staging/vt6656/int.h
@@ -57,7 +57,7 @@ typedef struct tagSINTData {
BYTE byACKFail;
BYTE byFCSErr;
BYTE abySW[2];
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SINTData, *PSINTData;
@@ -68,10 +68,6 @@ SINTData, *PSINTData;
/*--------------------- Export Functions --------------------------*/
void INTvWorkItem(void *Context);
-
-NTSTATUS
-INTnsProcessData(
- PSDevice pDevice
- );
+int INTnsProcessData(PSDevice pDevice);
#endif /* __INT_H__ */
diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h
index fbba1d53e49..1ce39a4ba2f 100644
--- a/drivers/staging/vt6656/iocmd.h
+++ b/drivers/staging/vt6656/iocmd.h
@@ -70,10 +70,10 @@ typedef enum tagWMAC_CMD {
} WMAC_CMD, *PWMAC_CMD;
typedef enum tagWZONETYPE {
- ZoneType_USA=0,
- ZoneType_Japan=1,
- ZoneType_Europe=2
-}WZONETYPE;
+ ZoneType_USA = 0,
+ ZoneType_Japan = 1,
+ ZoneType_Europe = 2
+} WZONETYPE;
#define ADHOC 0
#define INFRA 1
@@ -83,9 +83,9 @@ typedef enum tagWZONETYPE {
#define ADHOC_STARTED 1
#define ADHOC_JOINTED 2
-#define PHY80211a 0
-#define PHY80211b 1
-#define PHY80211g 2
+#define PHY80211a 0
+#define PHY80211b 1
+#define PHY80211g 2
#define SSID_ID 0
#define SSID_MAXLEN 32
@@ -143,7 +143,6 @@ typedef struct tagSCmdZoneTypeSet {
} SCmdZoneTypeSet, *PSCmdZoneTypeSet;
-#ifdef WPA_SM_Transtatus
typedef struct tagSWPAResult {
char ifname[100];
u8 proto;
@@ -151,7 +150,6 @@ typedef struct tagSWPAResult {
u8 eap_type;
BOOL authenticated;
} SWPAResult, *PSWPAResult;
-#endif
typedef struct tagSCmdStartAP {
diff --git a/drivers/staging/vt6656/ioctl.c b/drivers/staging/vt6656/ioctl.c
index 19a84b66b09..d532618639b 100644
--- a/drivers/staging/vt6656/ioctl.c
+++ b/drivers/staging/vt6656/ioctl.c
@@ -48,9 +48,7 @@
//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel =MSG_LEVEL_INFO;
-#ifdef WPA_SM_Transtatus
SWPAResult wpa_Result;
-#endif
/*--------------------- Static Functions --------------------------*/
@@ -232,10 +230,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
pDevice->bEncryptionEnable = FALSE;
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
spin_lock_irq(&pDevice->lock);
- for(uu=0;uu<MAX_KEY_TABLE;uu++)
- MACvDisableKeyEntry(pDevice,uu);
+ for (uu = 0; uu < MAX_KEY_TABLE; uu++)
+ MACvDisableKeyEntry(pDevice, uu);
spin_unlock_irq(&pDevice->lock);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WEP function disable. \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WEP function disable.\n");
break;
}
@@ -656,7 +654,6 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
pReq->wResult = 0;
break;
-#ifdef WPA_SM_Transtatus
case 0xFF:
memset(wpa_Result.ifname,0,sizeof(wpa_Result.ifname));
wpa_Result.proto = 0;
@@ -676,7 +673,6 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
//DavidWang
if(wpa_Result.authenticated==TRUE) {
- #ifdef SndEvt_ToAPI
{
union iwreq_data wrqu;
@@ -687,7 +683,6 @@ if(wpa_Result.authenticated==TRUE) {
wrqu.data.length =pItemSSID->len;
wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, pItemSSID->abySSID);
}
- #endif
pDevice->fWPA_Authened = TRUE; //is successful peer to wpa_Result.authenticated?
}
@@ -700,7 +695,6 @@ if(wpa_Result.authenticated==TRUE) {
pReq->wResult = 0;
break;
-#endif
default:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Private command not support..\n");
diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h
index da03edcbacb..959c8868f6e 100644
--- a/drivers/staging/vt6656/iowpa.h
+++ b/drivers/staging/vt6656/iowpa.h
@@ -31,10 +31,8 @@
/*--------------------- Export Definitions -------------------------*/
-
#define WPA_IE_LEN 64
-
//WPA related
/*
typedef enum { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP } wpa_alg;
@@ -54,7 +52,7 @@ enum {
VIAWGET_SET_DROP_UNENCRYPT = 7,
VIAWGET_SET_DEAUTHENTICATE = 8,
VIAWGET_SET_ASSOCIATE = 9,
- VIAWGET_SET_DISASSOCIATE= 10
+ VIAWGET_SET_DISASSOCIATE = 10
};
@@ -76,8 +74,6 @@ typedef struct viawget_wpa_header {
u16 resp_ie_len;
} viawget_wpa_header;
-
-
struct viawget_wpa_param {
u32 cmd;
u8 addr[6];
@@ -86,43 +82,37 @@ struct viawget_wpa_param {
u8 len;
u8 data[0];
} generic_elem;
-
struct {
- u8 bssid[6];
+ u8 bssid[6];
u8 ssid[32];
u8 ssid_len;
- u8 *wpa_ie;
- u16 wpa_ie_len;
- int pairwise_suite;
- int group_suite;
- int key_mgmt_suite;
- int auth_alg;
- int mode;
- u8 roam_dbm; //DavidWang
+ u8 *wpa_ie;
+ u16 wpa_ie_len;
+ int pairwise_suite;
+ int group_suite;
+ int key_mgmt_suite;
+ int auth_alg;
+ int mode;
+ u8 roam_dbm;
} wpa_associate;
-
struct {
- int alg_name;
- u16 key_index;
- u16 set_tx;
- u8 *seq;
- u16 seq_len;
- u8 *key;
- u16 key_len;
+ int alg_name;
+ u16 key_index;
+ u16 set_tx;
+ u8 *seq;
+ u16 seq_len;
+ u8 *key;
+ u16 key_len;
} wpa_key;
-
struct {
u8 ssid_len;
u8 ssid[32];
} scan_req;
-
struct {
u16 scan_count;
u8 *buf;
} scan_results;
-
} u;
-
};
#pragma pack(1)
@@ -142,15 +132,12 @@ struct viawget_scan_result {
int maxrate;
};
-
/*--------------------- Export Classes ----------------------------*/
/*--------------------- Export Variables --------------------------*/
-
/*--------------------- Export Types ------------------------------*/
-
/*--------------------- Export Functions --------------------------*/
#endif /* __IOWPA_H__ */
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index fa40522d4a9..016b8e7766f 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -83,31 +83,9 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
long ldBm;
pDevice->wstats.status = pDevice->eOPMode;
- #ifdef Calcu_LinkQual
- #if 0
- if(pDevice->byBBType == BB_TYPE_11B) {
- if(pDevice->byCurrSQ > 120)
- pDevice->scStatistic.LinkQuality = 100;
- else
- pDevice->scStatistic.LinkQuality = pDevice->byCurrSQ*100/120;
- }
- else if(pDevice->byBBType == BB_TYPE_11G) {
- if(pDevice->byCurrSQ < 20)
- pDevice->scStatistic.LinkQuality = 100;
- else if(pDevice->byCurrSQ >96)
- pDevice->scStatistic.LinkQuality = 0;
- else
- pDevice->scStatistic.LinkQuality = (96-pDevice->byCurrSQ)*100/76;
- }
- if(pDevice->bLinkPass !=TRUE)
- pDevice->scStatistic.LinkQuality = 0;
- #endif
if(pDevice->scStatistic.LinkQuality > 100)
pDevice->scStatistic.LinkQuality = 100;
pDevice->wstats.qual.qual =(BYTE) pDevice->scStatistic.LinkQuality;
- #else
- pDevice->wstats.qual.qual = pDevice->byCurrSQ;
- #endif
RFvRSSITodBm(pDevice, (BYTE)(pDevice->uCurrRSSI), &ldBm);
pDevice->wstats.qual.level = ldBm;
//pDevice->wstats.qual.level = 0x100 - pDevice->uCurrRSSI;
@@ -133,18 +111,9 @@ static int iwctl_commit(struct net_device *dev,
void *wrq,
char *extra)
{
-//2008-0409-02, <Mark> by Einsn Liu
-/*
-#ifdef Safe_Close
- PSDevice pDevice = (PSDevice)netdev_priv(dev);
- if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
- return -EINVAL;
-#endif
-*/
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWCOMMIT \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWCOMMIT\n");
return 0;
-
}
/*
@@ -209,9 +178,7 @@ if(pDevice->byReAssocCount > 0) { //reject scan when re-associating!
spin_lock_irq(&pDevice->lock);
-#ifdef update_BssList
BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
-#endif
//mike add: active scan OR passive scan OR desire_ssid scan
if(wrq->length == sizeof(struct iw_scan_req)) {
@@ -273,14 +240,7 @@ int iwctl_giwscan(struct net_device *dev,
long ldBm;
char buf[MAX_WPA_IE_LEN * 2 + 30];
-//2008-0409-02, <Mark> by Einsn Liu
-/*
-#ifdef Safe_Close
- if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
- return -EINVAL;
-#endif
-*/
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWSCAN \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWSCAN\n");
if (pMgmt->eScanState == WMAC_IS_SCANNING) {
// In scanning..
@@ -349,39 +309,6 @@ int iwctl_giwscan(struct net_device *dev,
}
iwe.u.qual.updated=7;
-//2008-0409-01, <Mark> by Einsn Liu
-/*
-//2008-0220-03, <Modify> by Einsn Liu
- if(pDevice->bLinkPass== TRUE && IS_ETH_ADDRESS_EQUAL(pBSS->abyBSSID, pMgmt->abyCurrBSSID)){
- #ifdef Calcu_LinkQual
- #if 0
- if(pDevice->byBBType == BB_TYPE_11B) {
- if(pDevice->byCurrSQ > 120)
- pDevice->scStatistic.LinkQuality = 100;
- else
- pDevice->scStatistic.LinkQuality = pDevice->byCurrSQ*100/120;
- }
- else if(pDevice->byBBType == BB_TYPE_11G) {
- if(pDevice->byCurrSQ < 20)
- pDevice->scStatistic.LinkQuality = 100;
- else if(pDevice->byCurrSQ >96)
- pDevice->scStatistic.LinkQuality = 0;
- else
- pDevice->scStatistic.LinkQuality = (96-pDevice->byCurrSQ)*100/76;
- }
- if(pDevice->bLinkPass !=TRUE)
- pDevice->scStatistic.LinkQuality = 0;
- #endif
- if(pDevice->scStatistic.LinkQuality > 100)
- pDevice->scStatistic.LinkQuality = 100;
- iwe.u.qual.qual =(BYTE) pDevice->scStatistic.LinkQuality;
- #else
- iwe.u.qual.qual = pDevice->byCurrSQ;
- #endif
- }else {
- iwe.u.qual.qual = 0;
- }
-*/
current_ev = iwe_stream_add_event(info,current_ev, end_buf, &iwe, IW_EV_QUAL_LEN);
//ADD encryption
memset(&iwe, 0, sizeof(iwe));
@@ -634,16 +561,8 @@ int iwctl_giwrange(struct net_device *dev,
struct iw_range *range = (struct iw_range *) extra;
int i,k;
BYTE abySupportedRates[13]= {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90};
-//2008-0409-02, <Mark> by Einsn Liu
-/*
- #ifdef Safe_Close
- PSDevice pDevice = (PSDevice)netdev_priv(dev);
- if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
- return -EINVAL;
-#endif
- */
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRANGE \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWRANGE\n");
if (wrq->pointer) {
wrq->length = sizeof(struct iw_range);
memset(range, 0, sizeof(struct iw_range));
@@ -653,23 +572,19 @@ int iwctl_giwrange(struct net_device *dev,
// Should be based on cap_rid.country to give only
// what the current card support
k = 0;
- for(i = 0; i < 14; i++) {
+ for (i = 0; i < 14; i++) {
range->freq[k].i = i + 1; // List index
range->freq[k].m = frequency_list[i] * 100000;
range->freq[k++].e = 1; // Values in table in MHz -> * 10^5 * 10
}
range->num_frequency = k;
// Hum... Should put the right values there
- #ifdef Calcu_LinkQual
range->max_qual.qual = 100;
- #else
- range->max_qual.qual = 255;
- #endif
range->max_qual.level = 0;
range->max_qual.noise = 0;
range->sensitivity = 255;
- for(i = 0 ; i < 13 ; i++) {
+ for (i = 0 ; i < 13 ; i++) {
range->bitrate[i] = abySupportedRates[i] * 500000;
if(range->bitrate[i] == 0)
break;
@@ -761,7 +676,7 @@ int iwctl_siwap(struct net_device *dev,
memcpy(pMgmt->abyDesireBSSID, wrq->sa_data, 6);
//mike :add
- if ((IS_BROADCAST_ADDRESS(pMgmt->abyDesireBSSID)) ||
+ if ((is_broadcast_ether_addr(pMgmt->abyDesireBSSID)) ||
(memcmp(pMgmt->abyDesireBSSID, ZeroBSSID, 6) == 0)){
PRINT_K("SIOCSIWAP:invalid desired BSSID return!\n");
return rc;
@@ -772,7 +687,8 @@ int iwctl_siwap(struct net_device *dev,
unsigned int ii, uSameBssidNum = 0;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (pMgmt->sBSSList[ii].bActive &&
- IS_ETH_ADDRESS_EQUAL(pMgmt->sBSSList[ii].abyBSSID,pMgmt->abyDesireBSSID)) {
+ !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID,
+ pMgmt->abyDesireBSSID)) {
uSameBssidNum++;
}
}
@@ -957,7 +873,8 @@ int iwctl_siwessid(struct net_device *dev,
// by means of judging if there are two same BSSID exist in list ?
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (pMgmt->sBSSList[ii].bActive &&
- IS_ETH_ADDRESS_EQUAL(pMgmt->sBSSList[ii].abyBSSID, pCurr->abyBSSID)) {
+ !compare_ether_addr(pMgmt->sBSSList[ii].abyBSSID,
+ pCurr->abyBSSID)) {
uSameBssidNum++;
}
}
@@ -1057,7 +974,7 @@ int iwctl_siwrate(struct net_device *dev,
u8 normvalue = (u8) (wrq->value/500000);
// Check if rate is valid
- for(i = 0 ; i < 13 ; i++) {
+ for (i = 0 ; i < 13 ; i++) {
if(normvalue == abySupportedRates[i]) {
brate = i;
break;
@@ -1067,7 +984,7 @@ int iwctl_siwrate(struct net_device *dev,
// -1 designed the max rate (mostly auto mode)
if(wrq->value == -1) {
// Get the highest available rate
- for(i = 0 ; i < 13 ; i++) {
+ for (i = 0 ; i < 13 ; i++) {
if(abySupportedRates[i] == 0)
break;
}
@@ -1405,8 +1322,8 @@ int iwctl_siwencode(struct net_device *dev,
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
if (pDevice->flags & DEVICE_FLAGS_OPENED) {
spin_lock_irq(&pDevice->lock);
- for(uu=0;uu<MAX_KEY_TABLE;uu++)
- MACvDisableKeyEntry(pDevice,uu);
+ for (uu = 0; uu < MAX_KEY_TABLE; uu++)
+ MACvDisableKeyEntry(pDevice, uu);
spin_unlock_irq(&pDevice->lock);
}
}
@@ -1926,26 +1843,6 @@ param->u.wpa_key.key = (u8 *)key_array;
param->u.wpa_key.seq = (u8 *)seq;
param->u.wpa_key.seq_len = seq_len;
-#if 0
-printk("param->u.wpa_key.alg_name =%d\n",param->u.wpa_key.alg_name);
-printk("param->addr=%02x:%02x:%02x:%02x:%02x:%02x\n",
- param->addr[0],param->addr[1],param->addr[2],
- param->addr[3],param->addr[4],param->addr[5]);
-printk("param->u.wpa_key.set_tx =%d\n",param->u.wpa_key.set_tx);
-printk("param->u.wpa_key.key_index =%d\n",param->u.wpa_key.key_index);
-printk("param->u.wpa_key.key_len =%d\n",param->u.wpa_key.key_len);
-printk("param->u.wpa_key.key =");
-for(ii=0;ii<param->u.wpa_key.key_len;ii++)
- printk("%02x:",param->u.wpa_key.key[ii]);
- printk("\n");
-printk("param->u.wpa_key.seq_len =%d\n",param->u.wpa_key.seq_len);
-printk("param->u.wpa_key.seq =");
-for(ii=0;ii<param->u.wpa_key.seq_len;ii++)
- printk("%02x:",param->u.wpa_key.seq[ii]);
- printk("\n");
-
-printk("...........\n");
-#endif
//****set if current action is Network Manager count??
//****this method is so foolish,but there is no other way???
if(param->u.wpa_key.alg_name == WPA_ALG_NONE) {
diff --git a/drivers/staging/vt6656/iwctl.h b/drivers/staging/vt6656/iwctl.h
index df9a4cf3baa..d601e922021 100644
--- a/drivers/staging/vt6656/iwctl.h
+++ b/drivers/staging/vt6656/iwctl.h
@@ -33,15 +33,13 @@
/*--------------------- Export Definitions -------------------------*/
-
/*--------------------- Export Classes ----------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
-struct iw_statistics *iwctl_get_wireless_stats (struct net_device *dev);
-
+struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev);
int iwctl_siwap(struct net_device *dev,
struct iw_request_info *info,
diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
index b0890c181e7..d181a2f6626 100644
--- a/drivers/staging/vt6656/key.c
+++ b/drivers/staging/vt6656/key.c
@@ -174,7 +174,7 @@ BOOL KeybGetKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex,
*pKey = NULL;
for (i=0;i<MAX_KEY_TABLE;i++) {
if ((pTable->KeyTable[i].bInUse == TRUE) &&
- IS_ETH_ADDRESS_EQUAL(pTable->KeyTable[i].abyBSSID,pbyBSSID)) {
+ !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyIndex == 0xFFFFFFFF) {
if (pTable->KeyTable[i].PairwiseKey.bKeyValid == TRUE) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
@@ -245,7 +245,7 @@ BOOL KeybSetKey(
j = i;
}
if ((pTable->KeyTable[i].bInUse == TRUE) &&
- IS_ETH_ADDRESS_EQUAL(pTable->KeyTable[i].abyBSSID,pbyBSSID)) {
+ !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
// found table already exist
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
// Pairwise key
@@ -402,7 +402,7 @@ BOOL KeybRemoveKey(
int i;
BOOL bReturnValue = FALSE;
- if (IS_BROADCAST_ADDRESS(pbyBSSID)) {
+ if (is_broadcast_ether_addr(pbyBSSID)) {
// dealte all key
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
for (i=0;i<MAX_KEY_TABLE;i++) {
@@ -427,7 +427,7 @@ BOOL KeybRemoveKey(
} else {
for (i=0;i<MAX_KEY_TABLE;i++) {
if ( (pTable->KeyTable[i].bInUse == TRUE) &&
- IS_ETH_ADDRESS_EQUAL(pTable->KeyTable[i].abyBSSID,pbyBSSID)) {
+ !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE;
@@ -483,11 +483,11 @@ BOOL KeybRemoveAllKey(
for (i=0;i<MAX_KEY_TABLE;i++) {
if ((pTable->KeyTable[i].bInUse == TRUE) &&
- IS_ETH_ADDRESS_EQUAL(pTable->KeyTable[i].abyBSSID,pbyBSSID)) {
+ !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE;
- for(u=0;u<MAX_GROUP_KEY;u++) {
- pTable->KeyTable[i].GroupKey[u].bKeyValid = FALSE;
- }
+ for (u = 0; u < MAX_GROUP_KEY; u++)
+ pTable->KeyTable[i].GroupKey[u].bKeyValid = FALSE;
+
pTable->KeyTable[i].dwGTKeyIndex = 0;
s_vCheckKeyTableValid(pDevice, pTable);
return (TRUE);
@@ -531,19 +531,13 @@ void KeyvRemoveWEPKey(
return;
}
-void KeyvRemoveAllWEPKey(
- void *pDeviceHandler,
- PSKeyManagement pTable
- )
+void KeyvRemoveAllWEPKey(void *pDeviceHandler, PSKeyManagement pTable)
{
- PSDevice pDevice = (PSDevice) pDeviceHandler;
-
- int i;
-
- for(i=0;i<MAX_GROUP_KEY;i++) {
- KeyvRemoveWEPKey(pDevice,pTable, i);
- }
+ PSDevice pDevice = (PSDevice) pDeviceHandler;
+ int i;
+ for (i = 0; i < MAX_GROUP_KEY; i++)
+ KeyvRemoveWEPKey(pDevice, pTable, i);
}
/*
@@ -567,7 +561,7 @@ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
*pKey = NULL;
for (i=0;i<MAX_KEY_TABLE;i++) {
if ((pTable->KeyTable[i].bInUse == TRUE) &&
- IS_ETH_ADDRESS_EQUAL(pTable->KeyTable[i].abyBSSID,pbyBSSID)) {
+ !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyType == PAIRWISE_KEY) {
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index 0ab3db025f3..33698edde4f 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -306,8 +306,8 @@ BYTE pbyData[24];
pbyData[5] = (BYTE)(dwData2>>8);
pbyData[6] = (BYTE)(dwData2>>16);
pbyData[7] = (BYTE)(dwData2>>24);
- for(ii=8;ii<24;ii++)
- pbyData[ii] = *pbyKey++;
+ for (ii = 8; ii < 24; ii++)
+ pbyData[ii] = *pbyKey++;
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_SETKEY,
diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h
index 775c70928ec..491ff5ecd04 100644
--- a/drivers/staging/vt6656/mac.h
+++ b/drivers/staging/vt6656/mac.h
@@ -420,11 +420,11 @@
/*--------------------- Export Functions --------------------------*/
-void MACvSetMultiAddrByHash (PSDevice pDevice, BYTE byHashIdx);
+void MACvSetMultiAddrByHash(PSDevice pDevice, BYTE byHashIdx);
void MACvWriteMultiAddr(PSDevice pDevice, unsigned int uByteIdx, BYTE byData);
-BOOL MACbShutdown(PSDevice pDevice);;
-void MACvSetBBType(PSDevice pDevice,BYTE byType);
-void MACvSetMISCFifo (PSDevice pDevice, WORD wOffset, DWORD dwData);
+BOOL MACbShutdown(PSDevice pDevice);
+void MACvSetBBType(PSDevice pDevice, BYTE byType);
+void MACvSetMISCFifo(PSDevice pDevice, WORD wOffset, DWORD dwData);
void MACvDisableKeyEntry(PSDevice pDevice, unsigned int uEntryIdx);
void MACvSetKeyEntry(PSDevice pDevice, WORD wKeyCtl, unsigned int uEntryIdx,
unsigned int uKeyIdx, PBYTE pbyAddr, PDWORD pdwKey);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 098b0455e32..c528ef0f8ed 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -366,7 +366,7 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
BYTE byAntenna;
unsigned int ii;
CMD_CARD_INIT sInitCmd;
- NTSTATUS ntStatus = STATUS_SUCCESS;
+ int ntStatus = STATUS_SUCCESS;
RSP_CARD_INIT sInitRsp;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
BYTE byTmp;
@@ -407,8 +407,8 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
sInitCmd.byInitClass = (BYTE)InitType;
sInitCmd.bExistSWNetAddr = (BYTE) pDevice->bExistSWNetAddr;
- for(ii=0;ii<6;ii++)
- sInitCmd.bySWNetAddr[ii] = pDevice->abyCurrentNetAddr[ii];
+ for (ii = 0; ii < 6; ii++)
+ sInitCmd.bySWNetAddr[ii] = pDevice->abyCurrentNetAddr[ii];
sInitCmd.byShortRetryLimit = pDevice->byShortRetryLimit;
sInitCmd.byLongRetryLimit = pDevice->byLongRetryLimit;
@@ -487,10 +487,10 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
if(((pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Japan) ||
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Europe))&&
(pDevice->byOriginalZonetype == ZoneType_USA)) {
- for(ii=11;ii<14;ii++) {
- pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10];
- pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10];
- }
+ for (ii = 11; ii < 14; ii++) {
+ pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10];
+ pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10];
+ }
}
//{{ RobertYu: 20041124
@@ -718,33 +718,32 @@ static BOOL device_release_WPADEV(PSDevice pDevice)
static int vt6656_suspend(struct usb_interface *intf, pm_message_t message)
{
- PSDevice pDevice = usb_get_intfdata(intf);
- struct net_device *dev = pDevice->dev;
+ PSDevice device = usb_get_intfdata(intf);
- printk("VNTWUSB Suspend Start======>\n");
-if(dev != NULL) {
- if(pDevice->flags & DEVICE_FLAGS_OPENED)
- device_close(dev);
-}
+ if (!device || !device->dev)
+ return -ENODEV;
- usb_put_dev(interface_to_usbdev(intf));
- return 0;
+ if (device->flags & DEVICE_FLAGS_OPENED)
+ device_close(device->dev);
+
+ usb_put_dev(interface_to_usbdev(intf));
+
+ return 0;
}
static int vt6656_resume(struct usb_interface *intf)
{
- PSDevice pDevice = usb_get_intfdata(intf);
- struct net_device *dev = pDevice->dev;
-
- printk("VNTWUSB Resume Start======>\n");
- if(dev != NULL) {
- usb_get_dev(interface_to_usbdev(intf));
- if(!(pDevice->flags & DEVICE_FLAGS_OPENED)) {
- if(device_open(dev)!=0)
- printk("VNTWUSB Resume Start======>open fail\n");
- }
- }
- return 0;
+ PSDevice device = usb_get_intfdata(intf);
+
+ if (!device || !device->dev)
+ return -ENODEV;
+
+ usb_get_dev(interface_to_usbdev(intf));
+
+ if (!(device->flags & DEVICE_FLAGS_OPENED))
+ device_open(device->dev);
+
+ return 0;
}
#endif /* CONFIG_PM */
@@ -758,93 +757,75 @@ static const struct net_device_ops device_netdev_ops = {
.ndo_set_multicast_list = device_set_multi,
};
-
static int __devinit
vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
u8 fake_mac[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
struct usb_device *udev = interface_to_usbdev(intf);
- int rc = 0;
- struct net_device *netdev = NULL;
- PSDevice pDevice = NULL;
-
+ int rc = 0;
+ struct net_device *netdev = NULL;
+ PSDevice pDevice = NULL;
- printk(KERN_NOTICE "%s Ver. %s\n",DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
- printk(KERN_NOTICE "Copyright (c) 2004 VIA Networking Technologies, Inc.\n");
+ printk(KERN_NOTICE "%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
+ printk(KERN_NOTICE "Copyright (c) 2004 VIA Networking Technologies, Inc.\n");
- udev = usb_get_dev(udev);
-
- netdev = alloc_etherdev(sizeof(DEVICE_INFO));
-
- if (netdev == NULL) {
- printk(KERN_ERR DEVICE_NAME ": allocate net device failed \n");
- kfree(pDevice);
- goto err_nomem;
- }
+ udev = usb_get_dev(udev);
+ netdev = alloc_etherdev(sizeof(DEVICE_INFO));
- pDevice = netdev_priv(netdev);
- memset(pDevice, 0, sizeof(DEVICE_INFO));
+ if (!netdev) {
+ printk(KERN_ERR DEVICE_NAME ": allocate net device failed\n");
+ kfree(pDevice);
+ goto err_nomem;
+ }
- pDevice->dev = netdev;
- pDevice->usb = udev;
+ pDevice = netdev_priv(netdev);
+ memset(pDevice, 0, sizeof(DEVICE_INFO));
- // Set initial settings
- device_set_options(pDevice);
- spin_lock_init(&pDevice->lock);
+ pDevice->dev = netdev;
+ pDevice->usb = udev;
- pDevice->tx_80211 = device_dma0_tx_80211;
- pDevice->sMgmtObj.pAdapter = (void *)pDevice;
+ device_set_options(pDevice);
+ spin_lock_init(&pDevice->lock);
- netdev->netdev_ops = &device_netdev_ops;
+ pDevice->tx_80211 = device_dma0_tx_80211;
+ pDevice->sMgmtObj.pAdapter = (void *) pDevice;
- netdev->wireless_handlers = (struct iw_handler_def *)&iwctl_handler_def;
+ netdev->netdev_ops = &device_netdev_ops;
+ netdev->wireless_handlers =
+ (struct iw_handler_def *) &iwctl_handler_def;
- //2008-0623-01<Remark>by MikeLiu
- //2007-0821-01<Add>by MikeLiu
- usb_set_intfdata(intf, pDevice);
+ usb_set_intfdata(intf, pDevice);
SET_NETDEV_DEV(netdev, &intf->dev);
- memcpy(pDevice->dev->dev_addr, fake_mac, ETH_ALEN);
- rc = register_netdev(netdev);
- if (rc != 0) {
- printk(KERN_ERR DEVICE_NAME " Failed to register netdev\n");
+ memcpy(pDevice->dev->dev_addr, fake_mac, ETH_ALEN);
+ rc = register_netdev(netdev);
+ if (rc) {
+ printk(KERN_ERR DEVICE_NAME " Failed to register netdev\n");
free_netdev(netdev);
- kfree(pDevice);
- return -ENODEV;
- }
-
-//2008-07-21-01<Add>by MikeLiu
-//register wpadev
-#if 0
- if(wpa_set_wpadev(pDevice, 1)!=0) {
- printk("Fail to Register WPADEV?\n");
- unregister_netdev(pDevice->dev);
- free_netdev(netdev);
- kfree(pDevice);
- }
-#endif
- usb_device_reset(pDevice);
+ kfree(pDevice);
+ return -ENODEV;
+ }
-#ifdef SndEvt_ToAPI
-{
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.flags = RT_INSMOD_EVENT_FLAG;
- wrqu.data.length =IFNAMSIZ;
- wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, pDevice->dev->name);
-}
-#endif
+ usb_device_reset(pDevice);
+
+ {
+ union iwreq_data wrqu;
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.flags = RT_INSMOD_EVENT_FLAG;
+ wrqu.data.length = IFNAMSIZ;
+ wireless_send_event(pDevice->dev,
+ IWEVCUSTOM,
+ &wrqu,
+ pDevice->dev->name);
+ }
return 0;
-
err_nomem:
- //2008-0922-01<Add>by MikeLiu, decrease usb counter.
- usb_put_dev(udev);
+ usb_put_dev(udev);
- return -ENOMEM;
+ return -ENOMEM;
}
-
static void device_free_tx_bufs(PSDevice pDevice)
{
PUSB_SEND_CONTEXT pTxContext;
@@ -1065,7 +1046,6 @@ BOOL device_alloc_frag_buf(PSDevice pDevice, PSDeFragControlBlock pDeF) {
static int device_open(struct net_device *dev) {
PSDevice pDevice=(PSDevice) netdev_priv(dev);
-#ifdef WPA_SM_Transtatus
extern SWPAResult wpa_Result;
memset(wpa_Result.ifname,0,sizeof(wpa_Result.ifname));
wpa_Result.proto = 0;
@@ -1073,7 +1053,6 @@ static int device_open(struct net_device *dev) {
wpa_Result.eap_type = 0;
wpa_Result.authenticated = FALSE;
pDevice->fWPA_Authened = FALSE;
-#endif
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " device_open...\n");
@@ -1172,14 +1151,12 @@ static int device_open(struct net_device *dev) {
netif_stop_queue(pDevice->dev);
pDevice->flags |= DEVICE_FLAGS_OPENED;
-#ifdef SndEvt_ToAPI
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.flags = RT_UPDEV_EVENT_FLAG;
wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, NULL);
}
-#endif
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_open success.. \n");
return 0;
@@ -1211,14 +1188,12 @@ static int device_close(struct net_device *dev) {
if (pDevice == NULL)
return -ENODEV;
-#ifdef SndEvt_ToAPI
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.flags = RT_DOWNDEV_EVENT_FLAG;
wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, NULL);
}
-#endif
//2007-1121-02<Add>by EinsnLiu
if (pDevice->bLinkPass) {
@@ -1234,10 +1209,10 @@ device_release_WPADEV(pDevice);
pMgmt->bShareKeyAlgorithm = FALSE;
pDevice->bEncryptionEnable = FALSE;
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- spin_lock_irq(&pDevice->lock);
- for(uu=0;uu<MAX_KEY_TABLE;uu++)
+ spin_lock_irq(&pDevice->lock);
+ for (uu = 0; uu < MAX_KEY_TABLE; uu++)
MACvDisableKeyEntry(pDevice,uu);
- spin_unlock_irq(&pDevice->lock);
+ spin_unlock_irq(&pDevice->lock);
if ((pDevice->flags & DEVICE_FLAGS_UNPLUG) == FALSE) {
MACbShutdown(pDevice);
@@ -1250,10 +1225,7 @@ device_release_WPADEV(pDevice);
del_timer(&pDevice->sTimerCommand);
del_timer(&pMgmt->sTimerSecondCallback);
-//2007-0115-02<Add>by MikeLiu
-#ifdef TxInSleep
del_timer(&pDevice->sTimerTxData);
-#endif
if (pDevice->bDiversityRegCtlON) {
del_timer(&pDevice->TimerSQ3Tmax1);
@@ -1290,112 +1262,81 @@ device_release_WPADEV(pDevice);
return 0;
}
-
static void __devexit vt6656_disconnect(struct usb_interface *intf)
{
+ PSDevice device = usb_get_intfdata(intf);
- PSDevice pDevice = usb_get_intfdata(intf);
+ if (!device)
+ return;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_disconnect1.. \n");
- if (pDevice == NULL)
- return;
-
-#ifdef SndEvt_ToAPI
-{
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.flags = RT_RMMOD_EVENT_FLAG;
- wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, NULL);
-}
-#endif
+ {
+ union iwreq_data req;
+ memset(&req, 0, sizeof(req));
+ req.data.flags = RT_RMMOD_EVENT_FLAG;
+ wireless_send_event(device->dev, IWEVCUSTOM, &req, NULL);
+ }
-//2008-0714-01<Add>by MikeLiu
-device_release_WPADEV(pDevice);
+ device_release_WPADEV(device);
usb_set_intfdata(intf, NULL);
-//2008-0922-01<Add>by MikeLiu, decrease usb counter.
- usb_put_dev(interface_to_usbdev(intf));
+ usb_put_dev(interface_to_usbdev(intf));
- pDevice->flags |= DEVICE_FLAGS_UNPLUG;
- if (pDevice->dev != NULL) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "unregister_netdev..\n");
- unregister_netdev(pDevice->dev);
+ device->flags |= DEVICE_FLAGS_UNPLUG;
-//2008-07-21-01<Add>by MikeLiu
-//unregister wpadev
- if(wpa_set_wpadev(pDevice, 0)!=0)
- printk("unregister wpadev fail?\n");
-
- free_netdev(pDevice->dev);
- }
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_disconnect3.. \n");
+ if (device->dev) {
+ unregister_netdev(device->dev);
+ wpa_set_wpadev(device, 0);
+ free_netdev(device->dev);
+ }
}
+static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev)
+{
+ PSDevice pDevice = netdev_priv(dev);
+ spin_lock_irq(&pDevice->lock);
+ if (unlikely(pDevice->bStopTx0Pkt))
+ dev_kfree_skb_irq(skb);
+ else
+ vDMA0_tx_80211(pDevice, skb);
-static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev) {
- PSDevice pDevice=netdev_priv(dev);
- PBYTE pbMPDU;
- unsigned int cbMPDULen = 0;
-
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_tx_80211\n");
- spin_lock_irq(&pDevice->lock);
-
- if (pDevice->bStopTx0Pkt == TRUE) {
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- };
-
-
- cbMPDULen = skb->len;
- pbMPDU = skb->data;
-
- vDMA0_tx_80211(pDevice, skb);
-
- spin_unlock_irq(&pDevice->lock);
-
- return 0;
+ spin_unlock_irq(&pDevice->lock);
+ return NETDEV_TX_OK;
}
+static int device_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ PSDevice pDevice = netdev_priv(dev);
+ struct net_device_stats *stats = &pDevice->stats;
-static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
- PSDevice pDevice=netdev_priv(dev);
- struct net_device_stats* pStats = &pDevice->stats;
-
+ spin_lock_irq(&pDevice->lock);
- spin_lock_irq(&pDevice->lock);
+ netif_stop_queue(dev);
- netif_stop_queue(pDevice->dev);
+ if (!pDevice->bLinkPass) {
+ dev_kfree_skb_irq(skb);
+ goto out;
+ }
- if (pDevice->bLinkPass == FALSE) {
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
- if (pDevice->bStopDataPkt == TRUE) {
- dev_kfree_skb_irq(skb);
- pStats->tx_dropped++;
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
+ if (pDevice->bStopDataPkt) {
+ dev_kfree_skb_irq(skb);
+ stats->tx_dropped++;
+ goto out;
+ }
- if(nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) !=0) { //mike add:xmit fail!
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
- }
+ if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb)) {
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ }
- spin_unlock_irq(&pDevice->lock);
+out:
+ spin_unlock_irq(&pDevice->lock);
- return 0;
+ return NETDEV_TX_OK;
}
-
-
static unsigned const ethernet_polynomial = 0x04c11db7U;
static inline u32 ether_crc(int length, unsigned char *data)
{
@@ -1447,12 +1388,12 @@ static int Config_FileGetParameter(unsigned char *string,
return FALSE;
//check if current config line is marked by "#" ??
-for(ii=1;;ii++) {
- if(memcmp(start_p-ii,"\n",1)==0)
- break;
- if(memcmp(start_p-ii,"#",1)==0)
- return FALSE;
-}
+ for (ii = 1; ; ii++) {
+ if (memcmp(start_p - ii, "\n", 1) == 0)
+ break;
+ if (memcmp(start_p - ii, "#", 1) == 0)
+ return FALSE;
+ }
//find target string end point
end_p = kstrstr(start_p,"\n");
@@ -1585,7 +1526,6 @@ static int Read_config_file(PSDevice pDevice) {
}
}
-#if 1
//get other parameter
{
memset(tmpbuffer,0,sizeof(tmpbuffer));
@@ -1598,7 +1538,6 @@ static int Read_config_file(PSDevice pDevice) {
pDevice->config_file.eEncryptionStatus= (int) simple_strtol(tmpbuffer, NULL, 10);
}
}
-#endif
kfree(buffer);
return result;
diff --git a/drivers/staging/vt6656/mib.c b/drivers/staging/vt6656/mib.c
index b694fc86d74..8a6ee72f440 100644
--- a/drivers/staging/vt6656/mib.c
+++ b/drivers/staging/vt6656/mib.c
@@ -347,10 +347,9 @@ void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
if (WLAN_GET_FC_MOREFRAG(pHeader->wFrameCtl))
pStatistic->dwRsrRxFragment++;
- if (cbFrameLength < MIN_PACKET_LEN + 4) {
+ if (cbFrameLength < ETH_ZLEN + 4) {
pStatistic->dwRsrRunt++;
- }
- else if (cbFrameLength == MIN_PACKET_LEN + 4) {
+ } else if (cbFrameLength == ETH_ZLEN + 4) {
pStatistic->dwRsrRxFrmLen64++;
}
else if ((65 <= cbFrameLength) && (cbFrameLength <= 127)) {
@@ -364,17 +363,14 @@ void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
}
else if ((512 <= cbFrameLength) && (cbFrameLength <= 1023)) {
pStatistic->dwRsrRxFrmLen512_1023++;
- }
- else if ((1024 <= cbFrameLength) && (cbFrameLength <= MAX_PACKET_LEN + 4)) {
+ } else if ((1024 <= cbFrameLength) &&
+ (cbFrameLength <= ETH_FRAME_LEN + 4)) {
pStatistic->dwRsrRxFrmLen1024_1518++;
- } else if (cbFrameLength > MAX_PACKET_LEN + 4) {
+ } else if (cbFrameLength > ETH_FRAME_LEN + 4) {
pStatistic->dwRsrLong++;
}
-
}
-
-
/*
* Description: Update Rx Statistic Counter and copy Rx buffer
*
@@ -467,12 +463,10 @@ STAvUpdateTDStatCounter (
}
if ( !(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
-#ifdef Calcu_LinkQual
if (byRetyCnt < 2)
pStatistic->TxNoRetryOkCount ++;
else
pStatistic->TxRetryOkCount ++;
-#endif
pStatistic->ullTsrOK++;
pStatistic->CustomStat.ullTsrAllOK++;
@@ -493,9 +487,7 @@ STAvUpdateTDStatCounter (
}
else {
-#ifdef Calcu_LinkQual
pStatistic->TxFailCount ++;
-#endif
pStatistic->dwTsrErr++;
if (byTSR & TSR_RETRYTMO)
@@ -591,10 +583,7 @@ STAvClear802_11Counter(PSDot11Counters p802_11Counter)
*
*/
-void
-STAvUpdateUSBCounter(PSUSBCounter pUsbCounter,
- NTSTATUS ntStatus
- )
+void STAvUpdateUSBCounter(PSUSBCounter pUsbCounter, int ntStatus)
{
// if ( ntStatus == USBD_STATUS_CRC ) {
@@ -602,5 +591,3 @@ STAvUpdateUSBCounter(PSUSBCounter pUsbCounter,
// }
}
-
-
diff --git a/drivers/staging/vt6656/mib.h b/drivers/staging/vt6656/mib.h
index 0455ec9d327..a89cca0c5ec 100644
--- a/drivers/staging/vt6656/mib.h
+++ b/drivers/staging/vt6656/mib.h
@@ -356,7 +356,6 @@ typedef struct tagSStatCounter {
SCustomCounters CustomStat;
- #ifdef Calcu_LinkQual
//Tx count:
unsigned long TxNoRetryOkCount; /* success tx no retry ! */
unsigned long TxRetryOkCount; /* success tx but retry ! */
@@ -367,12 +366,9 @@ typedef struct tagSStatCounter {
//statistic
unsigned long SignalStren;
unsigned long LinkQuality;
- #endif
} SStatCounter, *PSStatCounter;
-#define NTSTATUS int
-
/*--------------------- Export Classes ----------------------------*/
/*--------------------- Export Variables --------------------------*/
@@ -381,7 +377,9 @@ typedef struct tagSStatCounter {
void STAvClearAllCounter(PSStatCounter pStatistic);
-void STAvUpdateIsrStatCounter (PSStatCounter pStatistic, BYTE byIsr0, BYTE byIsr1);
+void STAvUpdateIsrStatCounter(PSStatCounter pStatistic,
+ BYTE byIsr0,
+ BYTE byIsr1);
void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
BYTE byRSR, BYTE byNewRSR, BYTE byRxSts,
@@ -393,14 +391,8 @@ void STAvUpdateRDStatCounterEx(PSStatCounter pStatistic,
BYTE byRxRate, PBYTE pbyBuffer,
unsigned int cbFrameLength);
-void
-STAvUpdateTDStatCounter (
- PSStatCounter pStatistic,
- BYTE byPktNum,
- BYTE byRate,
- BYTE byTSR
- );
-
+void STAvUpdateTDStatCounter(PSStatCounter pStatistic, BYTE byPktNum,
+ BYTE byRate, BYTE byTSR);
void
STAvUpdate802_11Counter(
@@ -413,11 +405,6 @@ STAvUpdate802_11Counter(
);
void STAvClear802_11Counter(PSDot11Counters p802_11Counter);
-
-void
-STAvUpdateUSBCounter(
- PSUSBCounter pUsbCounter,
- NTSTATUS ntStatus
- );
+void STAvUpdateUSBCounter(PSUSBCounter pUsbCounter, int ntStatus);
#endif /* __MIB_H__ */
diff --git a/drivers/staging/vt6656/michael.c b/drivers/staging/vt6656/michael.c
index 671a8cf33e2..4d419814f27 100644
--- a/drivers/staging/vt6656/michael.c
+++ b/drivers/staging/vt6656/michael.c
@@ -74,7 +74,7 @@ static DWORD s_dwGetUINT32 (BYTE * p)
{
DWORD res = 0;
unsigned int i;
- for(i=0; i<4; i++ )
+ for (i = 0; i < 4; i++)
res |= (*p++) << (8*i);
return res;
}
@@ -83,7 +83,7 @@ static void s_vPutUINT32(BYTE *p, DWORD val)
// Convert from DWORD to BYTE[] in a portable way
{
unsigned int i;
- for(i=0; i<4; i++ ) {
+ for (i = 0; i < 4; i++) {
*p++ = (BYTE) (val & 0xff);
val >>= 8;
}
diff --git a/drivers/staging/vt6656/michael.h b/drivers/staging/vt6656/michael.h
index 3ab60928ef3..81351f50623 100644
--- a/drivers/staging/vt6656/michael.h
+++ b/drivers/staging/vt6656/michael.h
@@ -49,8 +49,8 @@ void MIC_vGetMIC(PDWORD pdwL, PDWORD pdwR);
/*--------------------- Export Macros ------------------------------*/
// Rotation functions on 32 bit values
-#define ROL32( A, n ) \
- ( ((A) << (n)) | ( ((A)>>(32-(n))) & ( (1UL << (n)) - 1 ) ) )
-#define ROR32( A, n ) ROL32( (A), 32-(n) )
+#define ROL32(A, n) \
+ (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
+#define ROR32(A, n) ROL32((A), 32-(n))
#endif /* __MICHAEL_H__ */
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index 766c5be6fd2..4d7d4e014d0 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -19,7 +19,7 @@
*
* File: power.c
*
- * Purpose: Handles 802.11 power managment functions
+ * Purpose: Handles 802.11 power management functions
*
* Author: Lyndon Chen
*
@@ -290,17 +290,11 @@ BOOL PSbSendNullPacket(void *hDeviceContext)
return FALSE;
}
-//2007-0115-03<Add>by MikeLiu
-#ifdef TxInSleep
if ((pDevice->bEnablePSMode == FALSE) &&
(pDevice->fTxDataInSleep == FALSE)){
return FALSE;
}
-#else
- if (pDevice->bEnablePSMode == FALSE) {
- return FALSE;
- }
-#endif
+
memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_NULLDATA_FR_MAXLEN);
pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool;
pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket));
diff --git a/drivers/staging/vt6656/power.h b/drivers/staging/vt6656/power.h
index 50792bb8c97..41bffe528b4 100644
--- a/drivers/staging/vt6656/power.h
+++ b/drivers/staging/vt6656/power.h
@@ -18,7 +18,7 @@
*
* File: power.h
*
- * Purpose: Handles 802.11 power managment functions
+ * Purpose: Handles 802.11 power management functions
*
* Author: Lyndon Chen
*
diff --git a/drivers/staging/vt6656/rf.h b/drivers/staging/vt6656/rf.h
index d4f8b94132b..f5ba8fd7f81 100644
--- a/drivers/staging/vt6656/rf.h
+++ b/drivers/staging/vt6656/rf.h
@@ -64,11 +64,7 @@ extern const BYTE RFaby11aChannelIndex[200];
/*--------------------- Export Functions --------------------------*/
BOOL IFRFbWriteEmbeded(PSDevice pDevice, DWORD dwData);
-BOOL RFbSetPower (
- PSDevice pDevice,
- unsigned int uRATE,
- unsigned int uCH
- );
+BOOL RFbSetPower(PSDevice pDevice, unsigned int uRATE, unsigned int uCH);
BOOL RFbRawSetPower(
PSDevice pDevice,
@@ -76,17 +72,8 @@ BOOL RFbRawSetPower(
unsigned int uRATE
);
-void
-RFvRSSITodBm (
- PSDevice pDevice,
- BYTE byCurrRSSI,
- long * pldBm
- );
-
-void
-RFbRFTableDownload (
- PSDevice pDevice
- );
+void RFvRSSITodBm(PSDevice pDevice, BYTE byCurrRSSI, long *pldBm);
+void RFbRFTableDownload(PSDevice pDevice);
BOOL s_bVT3226D0_11bLoCurrentAdjust(
PSDevice pDevice,
diff --git a/drivers/staging/vt6656/rndis.h b/drivers/staging/vt6656/rndis.h
index ac842dd13a6..fccf7e98eb6 100644
--- a/drivers/staging/vt6656/rndis.h
+++ b/drivers/staging/vt6656/rndis.h
@@ -152,7 +152,7 @@ typedef struct _CMD_CHANGE_BBTYPE
/*--------------------- Export Macros -------------------------*/
-#define EXCH_WORD(w) ( (WORD)((WORD)(w)<<8) | (WORD)((WORD)(w)>>8) )
+#define EXCH_WORD(w) ((WORD)((WORD)(w)<<8) | (WORD)((WORD)(w)>>8))
/*--------------------- Export Variables --------------------------*/
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 3e7e56649a5..deca2137d92 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -61,10 +61,7 @@
#include "rf.h"
#include "datarate.h"
#include "usbpipe.h"
-
-#ifdef WPA_SM_Transtatus
#include "iocmd.h"
-#endif
/*--------------------- Static Definitions -------------------------*/
@@ -304,10 +301,9 @@ s_vSaveTxPktInfo(PSDevice pDevice, BYTE byPktNum, PBYTE pbyDestAddr, WORD wPktLe
{
PSStatCounter pStatistic=&(pDevice->scStatistic);
-
- if (IS_BROADCAST_ADDRESS(pbyDestAddr))
+ if (is_broadcast_ether_addr(pbyDestAddr))
pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni = TX_PKT_BROAD;
- else if (IS_MULTICAST_ADDRESS(pbyDestAddr))
+ else if (is_multicast_ether_addr(pbyDestAddr))
pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni = TX_PKT_MULTI;
else
pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni = TX_PKT_UNI;
@@ -319,9 +315,6 @@ s_vSaveTxPktInfo(PSDevice pDevice, BYTE byPktNum, PBYTE pbyDestAddr, WORD wPktLe
ETH_ALEN);
}
-
-
-
static
void
s_vFillTxKey (
@@ -1473,7 +1466,7 @@ s_bPacketToWirelessUsb(
memset(pTxBufHead, 0, sizeof(TX_BUFFER));
// Get pkt type
- if (ntohs(psEthHeader->wType) > MAX_DATA_LEN) {
+ if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
if (pDevice->dwDiagRefCount == 0) {
cb802_1_H_len = 8;
} else {
@@ -1492,17 +1485,16 @@ s_bPacketToWirelessUsb(
bNeedACK = FALSE;
pTxBufHead->wFIFOCtl = pTxBufHead->wFIFOCtl & (~FIFOCTL_NEEDACK);
} else { //if (pDevice->dwDiagRefCount != 0) {
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- if (IS_MULTICAST_ADDRESS(&(psEthHeader->abyDstAddr[0])) ||
- IS_BROADCAST_ADDRESS(&(psEthHeader->abyDstAddr[0]))) {
- bNeedACK = FALSE;
- pTxBufHead->wFIFOCtl = pTxBufHead->wFIFOCtl & (~FIFOCTL_NEEDACK);
- }
- else {
- bNeedACK = TRUE;
- pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- }
+ if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
+ (pDevice->eOPMode == OP_MODE_AP)) {
+ if (is_multicast_ether_addr(psEthHeader->abyDstAddr)) {
+ bNeedACK = FALSE;
+ pTxBufHead->wFIFOCtl =
+ pTxBufHead->wFIFOCtl & (~FIFOCTL_NEEDACK);
+ } else {
+ bNeedACK = TRUE;
+ pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
+ }
}
else {
// MSDUs in Infra mode always need ACK
@@ -1708,7 +1700,7 @@ s_bPacketToWirelessUsb(
}
// 802.1H
- if (ntohs(psEthHeader->wType) > MAX_DATA_LEN) {
+ if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
if (pDevice->dwDiagRefCount == 0) {
if ( (psEthHeader->wType == TYPE_PKT_IPX) ||
(psEthHeader->wType == cpu_to_le16(0xF380))) {
@@ -2037,9 +2029,7 @@ CMD_STATUS csMgmt_xmit(
pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
-
- if (IS_MULTICAST_ADDRESS(&(pPacket->p80211Header->sA3.abyAddr1[0])) ||
- IS_BROADCAST_ADDRESS(&(pPacket->p80211Header->sA3.abyAddr1[0]))) {
+ if (is_multicast_ether_addr(pPacket->p80211Header->sA3.abyAddr1)) {
bNeedACK = FALSE;
}
else {
@@ -2446,9 +2436,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
-
- if (IS_MULTICAST_ADDRESS(&(p80211Header->sA3.abyAddr1[0])) ||
- IS_BROADCAST_ADDRESS(&(p80211Header->sA3.abyAddr1[0]))) {
+ if (is_multicast_ether_addr(p80211Header->sA3.abyAddr1)) {
bNeedACK = FALSE;
if (pDevice->bEnableHostWEP) {
uNodeIndex = 0;
@@ -2741,14 +2729,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
* Return Value: NULL
*/
-
-
-NTSTATUS
-nsDMA_tx_packet(
- PSDevice pDevice,
- unsigned int uDMAIdx,
- struct sk_buff *skb
- )
+int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
unsigned int BytesToWrite = 0, uHeaderLen = 0;
@@ -2770,9 +2751,6 @@ nsDMA_tx_packet(
unsigned int status;
WORD wKeepRate = pDevice->wCurrentRate;
struct net_device_stats* pStats = &pDevice->stats;
-//#ifdef WPA_SM_Transtatus
- // extern SWPAResult wpa_Result;
-//#endif
BOOL bTxeapol_key = FALSE;
@@ -2783,7 +2761,7 @@ nsDMA_tx_packet(
return 0;
}
- if (IS_MULTICAST_ADDRESS((PBYTE)(skb->data))) {
+ if (is_multicast_ether_addr((PBYTE)(skb->data))) {
uNodeIndex = 0;
bNodeExist = TRUE;
if (pMgmt->sNodeDBTable[0].bPSEnable) {
@@ -2975,7 +2953,7 @@ nsDMA_tx_packet(
else {
if (pDevice->eOPMode == OP_MODE_ADHOC) {
// Adhoc Tx rate decided from node DB
- if (IS_MULTICAST_ADDRESS(&(pDevice->sTxEthHeader.abyDstAddr[0]))) {
+ if (is_multicast_ether_addr(pDevice->sTxEthHeader.abyDstAddr)) {
// Multicast use highest data rate
pDevice->wCurrentRate = pMgmt->sNodeDBTable[0].wTxDataRate;
// preamble type
@@ -3071,28 +3049,12 @@ nsDMA_tx_packet(
}
else {
-#if 0
- if((pDevice->fWPA_Authened == FALSE) &&
- ((pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)||(pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK))){
- dev_kfree_skb_irq(skb);
- pStats->tx_dropped++;
- return STATUS_FAILURE;
- }
- else if (pTransmitKey == NULL) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"return no tx key\n");
- dev_kfree_skb_irq(skb);
- pStats->tx_dropped++;
- return STATUS_FAILURE;
- }
-#else
if (pTransmitKey == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"return no tx key\n");
dev_kfree_skb_irq(skb);
pStats->tx_dropped++;
return STATUS_FAILURE;
}
-#endif
-
}
}
@@ -3261,7 +3223,8 @@ bRelayPacketSend (
if (pDevice->wCurrentRate <= RATE_11M)
byPktType = PK_TYPE_11B;
- BytesToWrite = uDataLen + U_CRC_LEN;
+ BytesToWrite = uDataLen + ETH_FCS_LEN;
+
// Convert the packet to an usb frame and copy into our buffer
// and send the irp.
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index f90de42d7ab..f99acf1d8eb 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -683,9 +683,9 @@ bPacketToWirelessUsb(
);
void vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb);
-NTSTATUS nsDMA_tx_packet(PSDevice pDevice,
- unsigned int uDMAIdx,
- struct sk_buff *skb);
+int nsDMA_tx_packet(PSDevice pDevice,
+ unsigned int uDMAIdx,
+ struct sk_buff *skb);
CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket);
CMD_STATUS csBeacon_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket);
BOOL bRelayPacketSend(PSDevice pDevice, PBYTE pbySkbData,
diff --git a/drivers/staging/vt6656/tether.h b/drivers/staging/vt6656/tether.h
index d63586d5cdb..be87020d532 100644
--- a/drivers/staging/vt6656/tether.h
+++ b/drivers/staging/vt6656/tether.h
@@ -36,25 +36,10 @@
//
// constants
//
-#define U_CRC_LEN 4 //
#define U_ETHER_ADDR_STR_LEN (ETH_ALEN * 2 + 1)
// Ethernet address string length
-
-#define MIN_DATA_LEN 46 // min data length
-#define MAX_DATA_LEN 1500 // max data length
-
-#define MIN_PACKET_LEN (MIN_DATA_LEN + ETH_HLEN)
- // 60
- // min total packet length (tx)
-#define MAX_PACKET_LEN (MAX_DATA_LEN + ETH_HLEN)
- // 1514
- // max total packet length (tx)
-
-#define MAX_LOOKAHEAD_SIZE MAX_PACKET_LEN
-
#define U_MULTI_ADDR_LEN 8 // multicast address length
-
#ifdef __BIG_ENDIAN
#define TYPE_PKT_IP 0x0800 //
@@ -168,7 +153,7 @@ typedef struct tagSEthernetHeader {
BYTE abyDstAddr[ETH_ALEN];
BYTE abySrcAddr[ETH_ALEN];
WORD wType;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
SEthernetHeader, *PSEthernetHeader;
@@ -179,7 +164,7 @@ typedef struct tagS802_3Header {
BYTE abyDstAddr[ETH_ALEN];
BYTE abySrcAddr[ETH_ALEN];
WORD wLen;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
S802_3Header, *PS802_3Header;
//
@@ -193,30 +178,10 @@ typedef struct tagS802_11Header {
BYTE abyAddr3[ETH_ALEN];
WORD wSeqCtl;
BYTE abyAddr4[ETH_ALEN];
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
S802_11Header, *PS802_11Header;
/*--------------------- Export Macros ------------------------------*/
-// Frame type macro
-
-#define IS_MULTICAST_ADDRESS(pbyEtherAddr) \
- ((*(PBYTE)(pbyEtherAddr) & 0x01) == 1)
-
-#define IS_BROADCAST_ADDRESS(pbyEtherAddr) ( \
- (*(PDWORD)(pbyEtherAddr) == 0xFFFFFFFFL) && \
- (*(PWORD)((PBYTE)(pbyEtherAddr) + 4) == 0xFFFF) \
-)
-
-#define IS_NULL_ADDRESS(pbyEtherAddr) ( \
- (*(PDWORD)(pbyEtherAddr) == 0L) && \
- (*(PWORD)((PBYTE)(pbyEtherAddr) + 4) == 0) \
-)
-
-#define IS_ETH_ADDRESS_EQUAL(pbyAddr1, pbyAddr2) ( \
- (*(PDWORD)(pbyAddr1) == *(PDWORD)(pbyAddr2)) && \
- (*(PWORD)((PBYTE)(pbyAddr1) + 4) == \
- *(PWORD)((PBYTE)(pbyAddr2) + 4)) \
-)
/*--------------------- Export Classes ----------------------------*/
diff --git a/drivers/staging/vt6656/tkip.c b/drivers/staging/vt6656/tkip.c
index f83af5913aa..a6bd533f957 100644
--- a/drivers/staging/vt6656/tkip.c
+++ b/drivers/staging/vt6656/tkip.c
@@ -129,8 +129,6 @@ const BYTE TKIP_Sbox_Upper[256] = {
//STKIPKeyManagement sTKIPKeyTable[MAX_TKIP_KEY];
/*--------------------- Static Functions --------------------------*/
-unsigned int tkip_sbox(unsigned int index);
-unsigned int rotr1(unsigned int a);
/*--------------------- Export Variables --------------------------*/
@@ -139,7 +137,7 @@ unsigned int rotr1(unsigned int a);
/* Returns a 16 bit value from a 64K entry table. The Table */
/* is synthesized from two 256 entry byte wide tables. */
/************************************************************/
-unsigned int tkip_sbox(unsigned int index)
+static unsigned int tkip_sbox(unsigned int index)
{
unsigned int index_low;
unsigned int index_high;
@@ -155,7 +153,7 @@ unsigned int tkip_sbox(unsigned int index)
};
-unsigned int rotr1(unsigned int a)
+static unsigned int rotr1(unsigned int a)
{
unsigned int b;
diff --git a/drivers/staging/vt6656/ttype.h b/drivers/staging/vt6656/ttype.h
index c27f9858e2e..8e9450ef399 100644
--- a/drivers/staging/vt6656/ttype.h
+++ b/drivers/staging/vt6656/ttype.h
@@ -31,23 +31,6 @@
/******* Common definitions and typedefs ***********************************/
-//2007-0115-05<Add>by MikeLiu
-#ifndef TxInSleep
-#define TxInSleep
-#endif
-
-//DavidWang
-
-//2007-0814-01<Add>by MikeLiu
-#ifndef Safe_Close
-#define Safe_Close
-#endif
-
-//2008-0131-02<Add>by MikeLiu
-#ifndef Adhoc_STA
-#define Adhoc_STA
-#endif
-
typedef int BOOL;
#if !defined(TRUE)
@@ -57,19 +40,6 @@ typedef int BOOL;
#define FALSE 0
#endif
-//2007-0809-01<Add>by MikeLiu
-#ifndef update_BssList
-#define update_BssList
-#endif
-
-#ifndef WPA_SM_Transtatus
-#define WPA_SM_Transtatus
-#endif
-
-#ifndef Calcu_LinkQual
-#define Calcu_LinkQual
-#endif
-
/****** Simple typedefs ***************************************************/
typedef unsigned char BYTE; // 8-bit
@@ -94,7 +64,6 @@ typedef unsigned long ULONG_PTR; // 32-bit
typedef unsigned long DWORD_PTR; // 32-bit
// boolean pointer
-typedef unsigned int * PUINT;
typedef BYTE * PBYTE;
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index fd2355e34fb..a32785cb9d1 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -107,10 +107,7 @@ s_nsControlInUsbIoCompleteWrite(
/*--------------------- Export Functions --------------------------*/
-
-
-NTSTATUS
-PIPEnsControlOutAsyn(
+int PIPEnsControlOutAsyn(
PSDevice pDevice,
BYTE byRequest,
WORD wValue,
@@ -119,8 +116,7 @@ PIPEnsControlOutAsyn(
PBYTE pbyBuffer
)
{
- NTSTATUS ntStatus;
-
+ int ntStatus;
if (MP_TEST_FLAG(pDevice, fMP_DISCONNECTED))
return STATUS_FAILURE;
@@ -156,12 +152,7 @@ PIPEnsControlOutAsyn(
return ntStatus;
}
-
-
-
-
-NTSTATUS
-PIPEnsControlOut(
+int PIPEnsControlOut(
PSDevice pDevice,
BYTE byRequest,
WORD wValue,
@@ -170,10 +161,9 @@ PIPEnsControlOut(
PBYTE pbyBuffer
)
{
- NTSTATUS ntStatus = 0;
+ int ntStatus = 0;
int ii;
-
if (MP_TEST_FLAG(pDevice, fMP_DISCONNECTED))
return STATUS_FAILURE;
@@ -219,11 +209,7 @@ PIPEnsControlOut(
return STATUS_SUCCESS;
}
-
-
-
-NTSTATUS
-PIPEnsControlIn(
+int PIPEnsControlIn(
PSDevice pDevice,
BYTE byRequest,
WORD wValue,
@@ -232,7 +218,7 @@ PIPEnsControlIn(
PBYTE pbyBuffer
)
{
- NTSTATUS ntStatus = 0;
+ int ntStatus = 0;
int ii;
if (MP_TEST_FLAG(pDevice, fMP_DISCONNECTED))
@@ -360,13 +346,9 @@ s_nsControlInUsbIoCompleteRead(
* Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver
*
*/
-NTSTATUS
-PIPEnsInterruptRead(
- PSDevice pDevice
- )
+int PIPEnsInterruptRead(PSDevice pDevice)
{
- NTSTATUS ntStatus = STATUS_FAILURE;
-
+ int ntStatus = STATUS_FAILURE;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartInterruptUsbRead()\n");
@@ -381,29 +363,6 @@ PIPEnsInterruptRead(
// Now that we have created the urb, we will send a
// request to the USB device object.
//
-#if 0 //reserve int URB submit
- usb_fill_int_urb(pDevice->pInterruptURB,
- pDevice->usb,
- usb_rcvintpipe(pDevice->usb, 1),
- (void *) pDevice->intBuf.pDataBuf,
- MAX_INTERRUPT_SIZE,
- s_nsInterruptUsbIoCompleteRead,
- pDevice,
- pDevice->int_interval
- );
-#else //replace int URB submit by bulk transfer
-#ifndef Safe_Close
- usb_fill_int_urb(pDevice->pInterruptURB,
- pDevice->usb,
- usb_rcvintpipe(pDevice->usb, 1),
- (void *) pDevice->intBuf.pDataBuf,
- MAX_INTERRUPT_SIZE,
- s_nsInterruptUsbIoCompleteRead,
- pDevice,
- pDevice->int_interval
- );
-#else
-
pDevice->pInterruptURB->interval = pDevice->int_interval;
usb_fill_bulk_urb(pDevice->pInterruptURB,
@@ -413,8 +372,6 @@ usb_fill_bulk_urb(pDevice->pInterruptURB,
MAX_INTERRUPT_SIZE,
s_nsInterruptUsbIoCompleteRead,
pDevice);
-#endif
-#endif
ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC);
if (ntStatus != 0) {
@@ -448,8 +405,7 @@ s_nsInterruptUsbIoCompleteRead(
{
PSDevice pDevice;
- NTSTATUS ntStatus;
-
+ int ntStatus;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptUsbIoCompleteRead\n");
//
@@ -495,13 +451,6 @@ s_nsInterruptUsbIoCompleteRead(
if (pDevice->fKillEventPollingThread != TRUE) {
- #if 0 //reserve int URB submit
- ntStatus = usb_submit_urb(urb, GFP_ATOMIC);
- if (ntStatus != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Re-Submit int URB failed %d\n", ntStatus);
- }
- #else //replace int URB submit by bulk transfer
- #ifdef Safe_Close
usb_fill_bulk_urb(pDevice->pInterruptURB,
pDevice->usb,
usb_rcvbulkpipe(pDevice->usb, 1),
@@ -514,11 +463,6 @@ s_nsInterruptUsbIoCompleteRead(
if (ntStatus != 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit int URB failed %d\n", ntStatus);
}
-
- #else
- tasklet_schedule(&pDevice->EventWorkItem);
- #endif
-#endif
}
//
// We return STATUS_MORE_PROCESSING_REQUIRED so that the completion
@@ -540,13 +484,9 @@ s_nsInterruptUsbIoCompleteRead(
* Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver
*
*/
-NTSTATUS
-PIPEnsBulkInUsbRead(
- PSDevice pDevice,
- PRCB pRCB
- )
+int PIPEnsBulkInUsbRead(PSDevice pDevice, PRCB pRCB)
{
- NTSTATUS ntStatus= 0;
+ int ntStatus = 0;
struct urb *pUrb;
@@ -616,9 +556,7 @@ s_nsBulkInUsbIoCompleteRead(
unsigned long bytesRead;
BOOL bIndicateReceive = FALSE;
BOOL bReAllocSkb = FALSE;
- NTSTATUS status;
-
-
+ int status;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkInUsbIoCompleteRead\n");
status = urb->status;
@@ -628,9 +566,7 @@ s_nsBulkInUsbIoCompleteRead(
pDevice->ulBulkInError++;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK In failed %d\n", status);
- #ifdef Calcu_LinkQual
pDevice->scStatistic.RxFcsErrCnt ++;
- #endif
//todo...xxxxxx
// if (status == USBD_STATUS_CRC) {
// pDevice->ulBulkInContCRCError++;
@@ -644,9 +580,7 @@ s_nsBulkInUsbIoCompleteRead(
pDevice->ulBulkInContCRCError = 0;
pDevice->ulBulkInBytesRead += bytesRead;
- #ifdef Calcu_LinkQual
pDevice->scStatistic.RxOkCnt ++;
- #endif
}
@@ -690,7 +624,7 @@ PIPEnsSendBulkOut(
PUSB_SEND_CONTEXT pContext
)
{
- NTSTATUS status;
+ int status;
struct urb *pUrb;
@@ -771,7 +705,7 @@ s_nsBulkOutIoCompleteWrite(
)
{
PSDevice pDevice;
- NTSTATUS status;
+ int status;
CONTEXT_TYPE ContextType;
unsigned long ulBufLen;
PUSB_SEND_CONTEXT pContext;
@@ -803,10 +737,7 @@ s_nsBulkOutIoCompleteWrite(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Write %d bytes\n",(int)ulBufLen);
pDevice->ulBulkOutBytesWrite += ulBufLen;
pDevice->ulBulkOutContCRCError = 0;
- //2007-0115-06<Add>by MikeLiu
- #ifdef TxInSleep
- pDevice->nTxDataTimeCout = 0;
- #endif
+ pDevice->nTxDataTimeCout = 0;
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK Out failed %d\n", status);
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index f852b39027a..b3673474a9e 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -41,8 +41,7 @@
/*--------------------- Export Functions --------------------------*/
-NTSTATUS
-PIPEnsControlOut(
+int PIPEnsControlOut(
PSDevice pDevice,
BYTE byRequest,
WORD wValue,
@@ -51,10 +50,7 @@ PIPEnsControlOut(
PBYTE pbyBuffer
);
-
-
-NTSTATUS
-PIPEnsControlOutAsyn(
+int PIPEnsControlOutAsyn(
PSDevice pDevice,
BYTE byRequest,
WORD wValue,
@@ -63,8 +59,7 @@ PIPEnsControlOutAsyn(
PBYTE pbyBuffer
);
-NTSTATUS
-PIPEnsControlIn(
+int PIPEnsControlIn(
PSDevice pDevice,
BYTE byRequest,
WORD wValue,
@@ -73,24 +68,8 @@ PIPEnsControlIn(
PBYTE pbyBuffer
);
-
-
-
-NTSTATUS
-PIPEnsInterruptRead(
- PSDevice pDevice
- );
-
-NTSTATUS
-PIPEnsBulkInUsbRead(
- PSDevice pDevice,
- PRCB pRCB
- );
-
-NTSTATUS
-PIPEnsSendBulkOut(
- PSDevice pDevice,
- PUSB_SEND_CONTEXT pContext
- );
+int PIPEnsInterruptRead(PSDevice pDevice);
+int PIPEnsBulkInUsbRead(PSDevice pDevice, PRCB pRCB);
+int PIPEnsSendBulkOut(PSDevice pDevice, PUSB_SEND_CONTEXT pContext);
#endif /* __USBPIPE_H__ */
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 72e21b6f0e8..686747a0929 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -565,11 +565,9 @@ void vRunCommand(void *hDeviceContext)
return;
}
-//20080131-03,<Add> by Mike Liu
- #ifdef Adhoc_STA
memcpy(pMgmt->abyAdHocSSID,pMgmt->abyDesireSSID,
((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN);
- #endif
+
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: desire ssid = %s\n", pItemSSID->abySSID);
@@ -716,18 +714,6 @@ void vRunCommand(void *hDeviceContext)
return;
}
pDevice->byLinkWaitCount = 0;
- #if 0
- #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- // if(pDevice->bWPASuppWextEnabled == TRUE)
- {
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof (wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- printk("wireless_send_event--->SIOCGIWAP(disassociated:AUTHENTICATE_WAIT_timeout)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
- #endif
- #endif
s_bCommandComplete(pDevice);
break;
@@ -754,8 +740,6 @@ void vRunCommand(void *hDeviceContext)
netif_wake_queue(pDevice->dev);
}
- //2007-0115-07<Add>by MikeLiu
- #ifdef TxInSleep
if(pDevice->IsTxDataTrigger != FALSE) { //TxDataTimer is not triggered at the first time
// printk("Re-initial TxDataTimer****\n");
del_timer(&pDevice->sTimerTxData);
@@ -771,7 +755,6 @@ void vRunCommand(void *hDeviceContext)
}
pDevice->IsTxDataTrigger = TRUE;
add_timer(&pDevice->sTimerTxData);
- #endif
}
else if(pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) {
@@ -785,18 +768,6 @@ void vRunCommand(void *hDeviceContext)
return;
}
pDevice->byLinkWaitCount = 0;
- #if 0
- #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- // if(pDevice->bWPASuppWextEnabled == TRUE)
- {
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof (wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- printk("wireless_send_event--->SIOCGIWAP(disassociated:ASSOCIATE_WAIT_timeout)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
- #endif
- #endif
s_bCommandComplete(pDevice);
break;
@@ -907,7 +878,7 @@ void vRunCommand(void *hDeviceContext)
// CARDbRadioPowerOff(pDevice);
//2008-09-09<Add> BY Mike:Hot Key for Radio On/Off
{
- NTSTATUS ntStatus = STATUS_SUCCESS;
+ int ntStatus = STATUS_SUCCESS;
BYTE byTmp;
ntStatus = CONTROLnsRequestIn(pDevice,
@@ -1300,8 +1271,6 @@ void vResetCommandTimer(void *hDeviceContext)
pDevice->bCmdClear = FALSE;
}
-//2007-0115-08<Add>by MikeLiu
-#ifdef TxInSleep
void BSSvSecondTxData(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1320,12 +1289,8 @@ void BSSvSecondTxData(void *hDeviceContext)
spin_lock_irq(&pDevice->lock);
//is wap_supplicant running successful OR only open && sharekey mode!
- #if 1
if(((pDevice->bLinkPass ==TRUE)&&(pMgmt->eAuthenMode < WMAC_AUTH_WPA)) || //open && sharekey linking
(pDevice->fWPA_Authened == TRUE)) { //wpa linking
- #else
- if(pDevice->bLinkPass ==TRUE) {
- #endif
// printk("mike:%s-->InSleep Tx Data Procedure\n",__FUNCTION__);
pDevice->fTxDataInSleep = TRUE;
PSbSendNullPacket(pDevice); //send null packet
@@ -1337,5 +1302,3 @@ void BSSvSecondTxData(void *hDeviceContext)
add_timer(&pDevice->sTimerTxData);
return;
}
-#endif
-
diff --git a/drivers/staging/vt6656/wcmd.h b/drivers/staging/vt6656/wcmd.h
index 09c4411c689..d24a79dce61 100644
--- a/drivers/staging/vt6656/wcmd.h
+++ b/drivers/staging/vt6656/wcmd.h
@@ -128,9 +128,6 @@ WCMDvCommandThread(
);
*/
-//2007-0115-09<Add>by MikeLiu
-#ifdef TxInSleep
void BSSvSecondTxData(void *hDeviceContext);
-#endif
#endif /* __WCMD_H__ */
diff --git a/drivers/staging/vt6656/wctl.c b/drivers/staging/vt6656/wctl.c
index 857ce0bc00a..c231ae7176f 100644
--- a/drivers/staging/vt6656/wctl.c
+++ b/drivers/staging/vt6656/wctl.c
@@ -79,7 +79,8 @@ BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
for (ii = 0; ii < DUPLICATE_RX_CACHE_LENGTH; ii++) {
pCacheEntry = &(pCache->asCacheEntry[uIndex]);
if ((pCacheEntry->wFmSequence == pMACHeader->wSeqCtl) &&
- (IS_ETH_ADDRESS_EQUAL (&(pCacheEntry->abyAddr2[0]), &(pMACHeader->abyAddr2[0]))) &&
+ (!compare_ether_addr(&(pCacheEntry->abyAddr2[0]),
+ &(pMACHeader->abyAddr2[0]))) &&
(LOBYTE(pCacheEntry->wFrameCtl) == LOBYTE(pMACHeader->wFrameCtl))
) {
/* Duplicate match */
@@ -111,22 +112,21 @@ BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
* Return Value: index number in Defragment Database
*
*/
+
unsigned int WCTLuSearchDFCB(PSDevice pDevice, PS802_11Header pMACHeader)
{
unsigned int ii;
- for(ii=0;ii<pDevice->cbDFCB;ii++) {
- if ((pDevice->sRxDFCB[ii].bInUse == TRUE) &&
- (IS_ETH_ADDRESS_EQUAL (&(pDevice->sRxDFCB[ii].abyAddr2[0]), &(pMACHeader->abyAddr2[0])))
- ) {
- //
- return(ii);
- }
- }
- return(pDevice->cbDFCB);
+ for (ii = 0; ii < pDevice->cbDFCB; ii++) {
+ if ((pDevice->sRxDFCB[ii].bInUse == TRUE) &&
+ (!compare_ether_addr(&(pDevice->sRxDFCB[ii].abyAddr2[0]),
+ &(pMACHeader->abyAddr2[0])))) {
+ return ii;
+ }
+ }
+ return pDevice->cbDFCB;
}
-
/*
* Description:
* Insert received fragment packet in Defragment Database
@@ -147,7 +147,7 @@ unsigned int WCTLuInsertDFCB(PSDevice pDevice, PS802_11Header pMACHeader)
if (pDevice->cbFreeDFCB == 0)
return(pDevice->cbDFCB);
- for(ii=0;ii<pDevice->cbDFCB;ii++) {
+ for (ii = 0; ii < pDevice->cbDFCB; ii++) {
if (pDevice->sRxDFCB[ii].bInUse == FALSE) {
pDevice->cbFreeDFCB--;
pDevice->sRxDFCB[ii].uLifetime = pDevice->dwMaxReceiveLifetime;
diff --git a/drivers/staging/vt6656/wmgr.c b/drivers/staging/vt6656/wmgr.c
index 93c15f0580f..e4eca9b060b 100644
--- a/drivers/staging/vt6656/wmgr.c
+++ b/drivers/staging/vt6656/wmgr.c
@@ -353,9 +353,9 @@ void vMgrObjectInit(void *hDeviceContext)
pMgmt->pbyPSPacketPool = &pMgmt->byPSPacketPool[0];
pMgmt->pbyMgmtPacketPool = &pMgmt->byMgmtPacketPool[0];
pMgmt->uCurrChannel = pDevice->uChannel;
- for(ii=0;ii<WLAN_BSSID_LEN;ii++) {
- pMgmt->abyDesireBSSID[ii] = 0xFF;
- }
+ for (ii = 0; ii < WLAN_BSSID_LEN; ii++)
+ pMgmt->abyDesireBSSID[ii] = 0xFF;
+
pMgmt->sAssocInfo.AssocInfo.Length = sizeof(NDIS_802_11_ASSOCIATION_INFORMATION);
//memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN +1);
pMgmt->byCSSPK = KEY_CTL_NONE;
@@ -373,8 +373,6 @@ void vMgrObjectInit(void *hDeviceContext)
pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
pDevice->sTimerCommand.expires = RUN_AT(HZ);
-//2007-0115-10<Add>by MikeLiu
- #ifdef TxInSleep
init_timer(&pDevice->sTimerTxData);
pDevice->sTimerTxData.data = (unsigned long)pDevice;
pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
@@ -382,7 +380,6 @@ void vMgrObjectInit(void *hDeviceContext)
pDevice->fTxDataInSleep = FALSE;
pDevice->IsTxDataTrigger = FALSE;
pDevice->nTxDataTimeCout = 0;
- #endif
pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
pDevice->uCmdDequeueIdx = 0;
@@ -1056,7 +1053,6 @@ s_vMgrRxAssocResponse(
}
-#if 1
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
//need clear flags related to Networkmanager
pDevice->bwextstep0 = FALSE;
@@ -1065,7 +1061,6 @@ s_vMgrRxAssocResponse(
pDevice->bwextstep3 = FALSE;
pDevice->bWPASuppWextEnabled = FALSE;
#endif
-#endif
if(pMgmt->eCurrState == WMAC_STATE_ASSOC)
timer_expire(pDevice->sTimerCommand, 0);
@@ -1705,7 +1700,8 @@ s_vMgrRxDeauthentication(
pDevice->fWPA_Authened = FALSE;
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "AP deauthed me, reason=%d.\n", cpu_to_le16((*(sFrame.pwReason))));
// TODO: update BSS list for specific BSSID if pre-authentication case
- if (IS_ETH_ADDRESS_EQUAL(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID)) {
+ if (!compare_ether_addr(sFrame.pHdr->sA3.abyAddr3,
+ pMgmt->abyCurrBSSID)) {
if (pMgmt->eCurrState >= WMAC_STATE_AUTHPENDING) {
pMgmt->sNodeDBTable[0].bActive = FALSE;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
@@ -2471,11 +2467,8 @@ void vMgrCreateOwnIBSS(void *hDeviceContext,
pDevice->uCurrRSSI = 0;
pDevice->byCurrSQ = 0;
-//20080131-04,<Add> by Mike Liu
-#ifdef Adhoc_STA
memcpy(pMgmt->abyDesireSSID,pMgmt->abyAdHocSSID,
((PWLAN_IE_SSID)pMgmt->abyAdHocSSID)->len + WLAN_IEHDR_LEN);
-#endif
memset(pMgmt->abyCurrSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
memcpy(pMgmt->abyCurrSSID,
@@ -3099,12 +3092,6 @@ s_vMgrSynchBSS (
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
/* unsigned int ii, uSameBssidNum=0; */
- // for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- // if (pMgmt->sBSSList[ii].bActive &&
- // IS_ETH_ADDRESS_EQUAL(pMgmt->sBSSList[ii].abyBSSID, pCurr->abyBSSID)) {
- // uSameBssidNum++;
- // }
- // }
// if( uSameBssidNum>=2) { //we only check AP in hidden sssid mode
if ((pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || //networkmanager 0.7.0 does not give the pairwise-key selsection,
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) { // so we need re-selsect it according to real pairwise-key info.
@@ -4795,21 +4782,21 @@ s_bCipherMatch (
byMulticastCipher = KEY_CTL_INVALID;
}
- // check Pairwise Key Cipher
- for(i=0;i<pBSSNode->wCSSPKCount;i++) {
- if ((pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_WEP40) ||
- (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_WEP104)) {
- // this should not happen as defined 802.11i
- byCipherMask |= 0x01;
- } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_TKIP) {
- byCipherMask |= 0x02;
- } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_CCMP) {
- byCipherMask |= 0x04;
- } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_USE_GROUP) {
- // use group key only ignore all others
- byCipherMask = 0;
- i = pBSSNode->wCSSPKCount;
- }
+ /* check Pairwise Key Cipher */
+ for (i = 0; i < pBSSNode->wCSSPKCount; i++) {
+ if ((pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_WEP40) ||
+ (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_WEP104)) {
+ /* this should not happen as defined 802.11i */
+ byCipherMask |= 0x01;
+ } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_TKIP) {
+ byCipherMask |= 0x02;
+ } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_CCMP) {
+ byCipherMask |= 0x04;
+ } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_USE_GROUP) {
+ /* use group key only ignore all others */
+ byCipherMask = 0;
+ i = pBSSNode->wCSSPKCount;
+ }
}
} else if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
@@ -4828,17 +4815,17 @@ s_bCipherMatch (
byMulticastCipher = KEY_CTL_INVALID;
}
- // check Pairwise Key Cipher
- for(i=0;i<pBSSNode->wPKCount;i++) {
- if (pBSSNode->abyPKType[i] == WPA_TKIP) {
- byCipherMask |= 0x02;
- } else if (pBSSNode->abyPKType[i] == WPA_AESCCMP) {
- byCipherMask |= 0x04;
- } else if (pBSSNode->abyPKType[i] == WPA_NONE) {
- // use group key only ignore all others
- byCipherMask = 0;
- i = pBSSNode->wPKCount;
- }
+ /* check Pairwise Key Cipher */
+ for (i = 0; i < pBSSNode->wPKCount; i++) {
+ if (pBSSNode->abyPKType[i] == WPA_TKIP) {
+ byCipherMask |= 0x02;
+ } else if (pBSSNode->abyPKType[i] == WPA_AESCCMP) {
+ byCipherMask |= 0x04;
+ } else if (pBSSNode->abyPKType[i] == WPA_NONE) {
+ /* use group key only ignore all others */
+ byCipherMask = 0;
+ i = pBSSNode->wPKCount;
+ }
}
}
diff --git a/drivers/staging/vt6656/wmgr.h b/drivers/staging/vt6656/wmgr.h
index 1e5b916aea1..683840c0ac4 100644
--- a/drivers/staging/vt6656/wmgr.h
+++ b/drivers/staging/vt6656/wmgr.h
@@ -82,7 +82,7 @@
/*--------------------- Export Types ------------------------------*/
//mike define: make timer to expire after desired times
-#define timer_expire(timer,next_tick) mod_timer(&timer, RUN_AT(next_tick))
+#define timer_expire(timer, next_tick) mod_timer(&timer, RUN_AT(next_tick))
typedef void (*TimerFunction)(unsigned long);
@@ -259,9 +259,7 @@ typedef struct tagSMgmtObject
// Operation state variables
WMAC_CURRENT_MODE eCurrMode; // MAC current connection mode
WMAC_BSS_STATE eCurrState; // MAC current BSS state
- #ifdef SndEvt_ToAPI
WMAC_BSS_STATE eLastState; // MAC last BSS state
- #endif
PKnownBSS pCurrBSS;
BYTE byCSSGK;
@@ -293,10 +291,7 @@ typedef struct tagSMgmtObject
BYTE abyDesireBSSID[WLAN_BSSID_LEN];
//restore BSS info for Ad-Hoc mode
-//20080131-05,<Add> by Mike Liu
-#ifdef Adhoc_STA
BYTE abyAdHocSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
-#endif
// Adhoc or AP configuration vars
WORD wIBSSBeaconPeriod;
@@ -343,11 +338,11 @@ typedef struct tagSMgmtObject
BOOL bRxBeaconInTBTTWake;
BYTE abyPSTxMap[MAX_NODE_NUM + 1];
- // managment command related
+ // management command related
unsigned int uCmdBusy;
unsigned int uCmdHostAPBusy;
- // managment packet pool
+ // management packet pool
PBYTE pbyMgmtPacketPool;
BYTE byMgmtPacketPool[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
diff --git a/drivers/staging/vt6656/wpa.c b/drivers/staging/vt6656/wpa.c
index 1fa6c9b88ed..f492778ee8b 100644
--- a/drivers/staging/vt6656/wpa.c
+++ b/drivers/staging/vt6656/wpa.c
@@ -148,7 +148,8 @@ WPA_ParseRSN (
{
j = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wPKCount: %d, sizeof(pBSSList->abyPKType): %zu\n", pRSN->wPKCount, sizeof(pBSSList->abyPKType));
- for(i = 0; (i < pRSN->wPKCount) && (j < sizeof(pBSSList->abyPKType)/sizeof(BYTE)); i++) {
+ for (i = 0; (i < pRSN->wPKCount) &&
+ (j < sizeof(pBSSList->abyPKType)/sizeof(BYTE)); i++) {
if(pRSN->len >= 12+i*4+4) { //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)+PKS(4*i)
if ( !memcmp(pRSN->PKSList[i].abyOUI, abyOUI00, 4))
pBSSList->abyPKType[j++] = WPA_NONE;
@@ -180,7 +181,8 @@ WPA_ParseRSN (
j = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wAuthCount: %d, sizeof(pBSSList->abyAuthType): %zu\n",
pIE_RSN_Auth->wAuthCount, sizeof(pBSSList->abyAuthType));
- for(i = 0; (i < pIE_RSN_Auth->wAuthCount) && (j < sizeof(pBSSList->abyAuthType)/sizeof(BYTE)); i++) {
+ for (i = 0; (i < pIE_RSN_Auth->wAuthCount) &&
+ (j < sizeof(pBSSList->abyAuthType)/sizeof(BYTE)); i++) {
if(pRSN->len >= 14+4+(m+i)*4) { //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)+PKS(4*m)+AKC(2)+AKS(4*i)
if ( !memcmp(pIE_RSN_Auth->AuthKSList[i].abyOUI, abyOUI01, 4))
pBSSList->abyAuthType[j++] = WPA_AUTH_IEEE802_1X;
diff --git a/drivers/staging/vt6656/wpa2.h b/drivers/staging/vt6656/wpa2.h
index 429a910a5c5..46c295905b4 100644
--- a/drivers/staging/vt6656/wpa2.h
+++ b/drivers/staging/vt6656/wpa2.h
@@ -58,21 +58,9 @@ typedef struct tagSPMKIDCache {
/*--------------------- Export Functions --------------------------*/
-void
-WPA2_ClearRSN (
- PKnownBSS pBSSNode
- );
+void WPA2_ClearRSN(PKnownBSS pBSSNode);
+void WPA2vParseRSN(PKnownBSS pBSSNode, PWLAN_IE_RSN pRSN);
-void
-WPA2vParseRSN (
- PKnownBSS pBSSNode,
- PWLAN_IE_RSN pRSN
- );
-
-unsigned int
-WPA2uSetIEs(
- void *pMgmtHandle,
- PWLAN_IE_RSN pRSNIEs
- );
+unsigned int WPA2uSetIEs(void *pMgmtHandle, PWLAN_IE_RSN pRSNIEs);
#endif /* __WPA2_H__ */
diff --git a/drivers/staging/vt6656/wpactl.c b/drivers/staging/vt6656/wpactl.c
index 961f583368a..b407ae536bf 100644
--- a/drivers/staging/vt6656/wpactl.c
+++ b/drivers/staging/vt6656/wpactl.c
@@ -186,7 +186,6 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
return wpa_release_wpadev(pDevice);
}
-
/*
* Description:
* Set WPA algorithm & keys
@@ -349,9 +348,8 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
return -EINVAL;
}
-
- if (IS_BROADCAST_ADDRESS(&param->addr[0]) || (param->addr == NULL)) {
- // If IS_BROADCAST_ADDRESS, set the key as every key entry's group key.
+ if (is_broadcast_ether_addr(&param->addr[0]) || (param->addr == NULL)) {
+ /* if broadcast, set the key as every key entry's group key */
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
if ((KeybSetAllGroupKey(pDevice,
@@ -404,7 +402,7 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
} else {
// Key Table Full
- if (IS_ETH_ADDRESS_EQUAL(&param->addr[0], pDevice->abyBSSID)) {
+ if (!compare_ether_addr(&param->addr[0], pDevice->abyBSSID)) {
//DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
return -EINVAL;
@@ -647,9 +645,9 @@ static int wpa_get_scan(PSDevice pDevice,
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- for(jj=0;jj<MAX_BSS_NUM-ii-1;jj++) {
+ for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
- if((pMgmt->sBSSList[jj].bActive!=TRUE) ||
+ if ((pMgmt->sBSSList[jj].bActive != TRUE) ||
((pMgmt->sBSSList[jj].uRSSI>pMgmt->sBSSList[jj+1].uRSSI) &&(pMgmt->sBSSList[jj+1].bActive!=FALSE))) {
diff --git a/drivers/staging/winbond/mac_structures.h b/drivers/staging/winbond/mac_structures.h
index 7441015cb18..415256f69c3 100644
--- a/drivers/staging/winbond/mac_structures.h
+++ b/drivers/staging/winbond/mac_structures.h
@@ -41,8 +41,10 @@
#define DOT_11_MAC_HEADER_SIZE 24
#define DOT_11_SNAP_SIZE 6
#define DOT_11_DURATION_OFFSET 2
-#define DOT_11_SEQUENCE_OFFSET 22 /* Sequence control offset */
-#define DOT_11_TYPE_OFFSET 30 /* The start offset of 802.11 Frame// */
+/* Sequence control offset */
+#define DOT_11_SEQUENCE_OFFSET 22
+/* The start offset of 802.11 Frame// */
+#define DOT_11_TYPE_OFFSET 30
#define DOT_11_DATA_OFFSET 24
#define DOT_11_DA_OFFSET 4
#define DOT_3_TYPE_ARP 0x80F3
@@ -98,28 +100,28 @@
#define ELEMENT_ID_CF_PARAMETER_SET 4
#define ELEMENT_ID_TIM 5
#define ELEMENT_ID_IBSS_PARAMETER_SET 6
-// 7~15 reserverd
+/* 7~15 reserverd */
#define ELEMENT_ID_CHALLENGE_TEXT 16
-// 17~31 reserved for challenge text extension
-// 32~255 reserved
-//-- 11G --
+/* 17~31 reserved for challenge text extension */
+/* 32~255 reserved */
+/*-- 11G -- */
#define ELEMENT_ID_ERP_INFORMATION 42
#define ELEMENT_ID_EXTENDED_SUPPORTED_RATES 50
-//-- WPA --
+/* -- WPA -- */
#define ELEMENT_ID_RSN_WPA 221
#ifdef _WPA2_
#define ELEMENT_ID_RSN_WPA2 48
-#endif //endif WPA2
+#endif /* endif WPA2 */
#define WLAN_MAX_PAIRWISE_CIPHER_SUITE_COUNT ((u16) 6)
#define WLAN_MAX_AUTH_KEY_MGT_SUITE_LIST_COUNT ((u16) 2)
-//===================================================================
-// Reason Code (Table 18): indicate the reason of DisAssoc, DeAuthen
-// length of ReasonCode is 2 Octs.
-//===================================================================
+/* ===================================================================
+* Reason Code (Table 18): indicate the reason of DisAssoc, DeAuthen
+* length of ReasonCode is 2 Octs.
+* =================================================================== */
#define REASON_REASERED 0
#define REASON_UNSPECIDIED 1
#define REASON_PREAUTH_INVALID 2
@@ -385,9 +387,11 @@ struct Extended_Supported_Rates_Element {
#ifdef _WPA2_
#define VERSION_WPA2 1
#endif /* end def _WPA2_ */
-#define OUI_WPA 0x00F25000 /* WPA2.0 OUI=00:50:F2, the MSB is reserved for suite type */
+/* WPA2.0 OUI=00:50:F2, the MSB is reserved for suite type */
+#define OUI_WPA 0x00F25000
#ifdef _WPA2_
-#define OUI_WPA2 0x00AC0F00 /* for wpa2 change to 0x00ACOF04 by Ws 26/04/04 */
+/* for wpa2 change to 0x00ACOF04 by Ws 26/04/04 */
+#define OUI_WPA2 0x00AC0F00
#endif /* end def _WPA2_ */
#define OUI_WPA_ADDITIONAL 0x01
@@ -400,8 +404,8 @@ struct Extended_Supported_Rates_Element {
#define WPA_OUI_BIG ((u32) 0x01F25000)/* added by ws 09/23/04 */
#define WPA_OUI_LITTLE ((u32) 0x01F25001)/* added by ws 09/23/04 */
-
-#define WPA_WPS_OUI cpu_to_le32(0x04F25000) /* 20061108 For WPS. It's little endian. Big endian is 0x0050F204 */
+/* 20061108 For WPS. It's little endian. Big endian is 0x0050F204 */
+#define WPA_WPS_OUI cpu_to_le32(0x04F25000)
/* -----WPA2----- */
#ifdef _WPA2_
@@ -420,75 +424,65 @@ struct Extended_Supported_Rates_Element {
#define OUI_CIPHER_CCMP 0x04
#define OUI_CIPHER_WEP_104 0x05
-struct suite_selector
-{
- union
- {
+struct suite_selector{
+ union{
u8 Value[4];
- struct _SUIT_
- {
+ struct _SUIT_ {
u8 OUI[3];
u8 Type;
- }SuitSelector;
+ } SuitSelector;
};
};
-//-- WPA --
-struct RSN_Information_Element
-{
+/* -- WPA -- */
+struct RSN_Information_Element{
u8 Element_ID;
u8 Length;
- struct suite_selector OuiWPAAdditional; /* WPA version 2.0 additional field, and should be 00:50:F2:01 */
+ /* WPA version 2.0 additional field, and should be 00:50:F2:01 */
+ struct suite_selector OuiWPAAdditional;
u16 Version;
struct suite_selector GroupKeySuite;
u16 PairwiseKeySuiteCount;
struct suite_selector PairwiseKeySuite[1];
-}__attribute__ ((packed));
-struct RSN_Auth_Sub_Information_Element
-{
+} __attribute__ ((packed));
+struct RSN_Auth_Sub_Information_Element {
u16 AuthKeyMngtSuiteCount;
struct suite_selector AuthKeyMngtSuite[1];
-}__attribute__ ((packed));
+} __attribute__ ((packed));
/* -- WPA2 -- */
-struct RSN_Capability_Element
-{
- union
- {
+struct RSN_Capability_Element {
+ union {
u16 __attribute__ ((packed)) wValue;
#ifdef _BIG_ENDIAN_ /* 20060927 add by anson's endian */
- struct _RSN_Capability
- {
- u16 __attribute__ ((packed)) Reserved2 : 8; /* 20051201 */
- u16 __attribute__ ((packed)) Reserved1 : 2;
- u16 __attribute__ ((packed)) GTK_Replay_Counter : 2;
- u16 __attribute__ ((packed)) PTK_Replay_Counter : 2;
- u16 __attribute__ ((packed)) No_Pairwise : 1;
- u16 __attribute__ ((packed)) Pre_Auth : 1;
- }__attribute__ ((packed)) RSN_Capability;
+ struct _RSN_Capability {
+ u16 __attribute__ ((packed)) Reserved2:8; /* 20051201 */
+ u16 __attribute__ ((packed)) Reserved1:2;
+ u16 __attribute__ ((packed)) GTK_Replay_Counter:2;
+ u16 __attribute__ ((packed)) PTK_Replay_Counter:2;
+ u16 __attribute__ ((packed)) No_Pairwise:1;
+ u16 __attribute__ ((packed)) Pre_Auth:1;
+ } __attribute__ ((packed)) RSN_Capability;
#else
- struct _RSN_Capability
- {
- u16 __attribute__ ((packed)) Pre_Auth : 1;
- u16 __attribute__ ((packed)) No_Pairwise : 1;
- u16 __attribute__ ((packed)) PTK_Replay_Counter : 2;
- u16 __attribute__ ((packed)) GTK_Replay_Counter : 2;
- u16 __attribute__ ((packed)) Reserved1 : 2;
- u16 __attribute__ ((packed)) Reserved2 : 8; /* 20051201 */
- }__attribute__ ((packed)) RSN_Capability;
+ struct _RSN_Capability {
+ u16 __attribute__ ((packed)) Pre_Auth:1;
+ u16 __attribute__ ((packed)) No_Pairwise:1;
+ u16 __attribute__ ((packed)) PTK_Replay_Counter:2;
+ u16 __attribute__ ((packed)) GTK_Replay_Counter:2;
+ u16 __attribute__ ((packed)) Reserved1:2;
+ u16 __attribute__ ((packed)) Reserved2:8; /* 20051201 */
+ } __attribute__ ((packed)) RSN_Capability;
#endif
- }__attribute__ ((packed)) ;
-}__attribute__ ((packed)) ;
+ } __attribute__ ((packed)) ;
+} __attribute__ ((packed)) ;
#ifdef _WPA2_
-struct pmkid
-{
+struct pmkid {
u8 pValue[16];
};
-struct WPA2_RSN_Information_Element
-{
+struct WPA2_RSN_Information_Element {
u8 Element_ID;
u8 Length;
u16 Version;
@@ -496,29 +490,28 @@ struct WPA2_RSN_Information_Element
u16 PairwiseKeySuiteCount;
struct suite_selector PairwiseKeySuite[1];
-}__attribute__ ((packed));
+} __attribute__ ((packed));
-struct WPA2_RSN_Auth_Sub_Information_Element
-{
+struct WPA2_RSN_Auth_Sub_Information_Element {
u16 AuthKeyMngtSuiteCount;
struct suite_selector AuthKeyMngtSuite[1];
-}__attribute__ ((packed));
+} __attribute__ ((packed));
-struct PMKID_Information_Element
-{
+struct PMKID_Information_Element {
u16 PMKID_Count;
struct pmkid pmkid[16];
-}__attribute__ ((packed));
+} __attribute__ ((packed));
#endif /* enddef _WPA2_ */
/*============================================================
// MAC Frame structure (different type) and subfield structure
//============================================================*/
-struct MAC_frame_control
-{
- u8 mac_frame_info; /* a combination of the [Protocol Version, Control Type, Control Subtype]*/
- #ifdef _BIG_ENDIAN_ /* 20060927 add by anson's endian */
+struct MAC_frame_control {
+/* a combination of the [Protocol Version, Control Type, Control Subtype]*/
+ u8 mac_frame_info;
+/* 20060927 add by anson's endian */
+ #ifdef _BIG_ENDIAN_
u8 order:1;
u8 WEP:1;
u8 more_data:1;
@@ -540,7 +533,8 @@ struct MAC_frame_control
} __attribute__ ((packed));
struct Management_Frame {
- struct MAC_frame_control frame_control; /* 2B, ToDS,FromDS,MoreFrag,MoreData,Order=0 */
+/* 2B, ToDS,FromDS,MoreFrag,MoreData,Order=0 */
+ struct MAC_frame_control frame_control;
u16 duration;
u8 DA[MAC_ADDR_LENGTH]; /* Addr1 */
u8 SA[MAC_ADDR_LENGTH]; /* Addr2 */
@@ -552,7 +546,8 @@ struct Management_Frame {
/* SW-MAC don't Tx/Rx Control-Frame, HW-MAC do it. */
struct Control_Frame {
- struct MAC_frame_control frame_control; /* ToDS,FromDS,MoreFrag,Retry,MoreData,WEP,Order=0 */
+/* ToDS,FromDS,MoreFrag,Retry,MoreData,WEP,Order=0 */
+ struct MAC_frame_control frame_control;
u16 duration;
u8 RA[MAC_ADDR_LENGTH];
u8 TA[MAC_ADDR_LENGTH];
@@ -627,8 +622,9 @@ struct Authentication_Frame_Body {
u16 algorithmNumber;
u16 sequenceNumber;
u16 statusCode;
- /* NB: don't include ChallengeText in this structure
- // struct Challenge_Text_Element sChallengeTextElement; // wkchen added */
+ /* NB: don't include ChallengeText in this structure
+ // struct Challenge_Text_Element sChallengeTextElement;
+ // wkchen added */
} __attribute__ ((packed));
diff --git a/drivers/staging/winbond/phy_calibration.c b/drivers/staging/winbond/phy_calibration.c
index 78935865df1..5c1f05392db 100644
--- a/drivers/staging/winbond/phy_calibration.c
+++ b/drivers/staging/winbond/phy_calibration.c
@@ -19,23 +19,25 @@
/****************** LOCAL CONSTANT AND MACRO SECTION ************************/
#define LOOP_TIMES 20
-#define US 1000//MICROSECOND
+#define US 1000/* MICROSECOND*/
#define AG_CONST 0.6072529350
#define FIXED(X) ((s32)((X) * 32768.0))
#define DEG2RAD(X) 0.017453 * (X)
-static const s32 Angles[] =
-{
- FIXED(DEG2RAD(45.0)), FIXED(DEG2RAD(26.565)), FIXED(DEG2RAD(14.0362)),
- FIXED(DEG2RAD(7.12502)), FIXED(DEG2RAD(3.57633)), FIXED(DEG2RAD(1.78991)),
- FIXED(DEG2RAD(0.895174)),FIXED(DEG2RAD(0.447614)),FIXED(DEG2RAD(0.223811)),
- FIXED(DEG2RAD(0.111906)),FIXED(DEG2RAD(0.055953)),FIXED(DEG2RAD(0.027977))
+static const s32 Angles[] = {
+ FIXED(DEG2RAD(45.0)), FIXED(DEG2RAD(26.565)), FIXED(DEG2RAD(14.0362)),
+ FIXED(DEG2RAD(7.12502)), FIXED(DEG2RAD(3.57633)), FIXED(DEG2RAD(1.78991)),
+ FIXED(DEG2RAD(0.895174)), FIXED(DEG2RAD(0.447614)), FIXED(DEG2RAD(0.223811)),
+ FIXED(DEG2RAD(0.111906)), FIXED(DEG2RAD(0.055953)), FIXED(DEG2RAD(0.027977))
};
/****************** LOCAL FUNCTION DECLARATION SECTION **********************/
-//void _phy_rf_write_delay(struct hw_data *phw_data);
-//void phy_init_rf(struct hw_data *phw_data);
+
+/*
+ * void _phy_rf_write_delay(struct hw_data *phw_data);
+ * void phy_init_rf(struct hw_data *phw_data);
+ */
/****************** FUNCTION DEFINITION SECTION *****************************/
@@ -46,9 +48,7 @@ s32 _s13_to_s32(u32 data)
val = (data & 0x0FFF);
if ((data & BIT(12)) != 0)
- {
val |= 0xFFFFF000;
- }
return ((s32) val);
}
@@ -58,13 +58,9 @@ u32 _s32_to_s13(s32 data)
u32 val;
if (data > 4095)
- {
data = 4095;
- }
else if (data < -4096)
- {
data = -4096;
- }
val = data & 0x1FFF;
@@ -79,9 +75,7 @@ s32 _s4_to_s32(u32 data)
val = (data & 0x0007);
if ((data & BIT(3)) != 0)
- {
val |= 0xFFFFFFF8;
- }
return val;
}
@@ -91,13 +85,9 @@ u32 _s32_to_s4(s32 data)
u32 val;
if (data > 7)
- {
data = 7;
- }
else if (data < -8)
- {
data = -8;
- }
val = data & 0x000F;
@@ -112,9 +102,7 @@ s32 _s5_to_s32(u32 data)
val = (data & 0x000F);
if ((data & BIT(4)) != 0)
- {
val |= 0xFFFFFFF0;
- }
return val;
}
@@ -124,13 +112,9 @@ u32 _s32_to_s5(s32 data)
u32 val;
if (data > 15)
- {
data = 15;
- }
else if (data < -16)
- {
data = -16;
- }
val = data & 0x001F;
@@ -145,9 +129,7 @@ s32 _s6_to_s32(u32 data)
val = (data & 0x001F);
if ((data & BIT(5)) != 0)
- {
val |= 0xFFFFFFE0;
- }
return val;
}
@@ -157,13 +139,9 @@ u32 _s32_to_s6(s32 data)
u32 val;
if (data > 31)
- {
data = 31;
- }
else if (data < -32)
- {
data = -32;
- }
val = data & 0x003F;
@@ -178,9 +156,7 @@ s32 _s9_to_s32(u32 data)
val = data & 0x00FF;
if ((data & BIT(8)) != 0)
- {
val |= 0xFFFFFF00;
- }
return val;
}
@@ -190,13 +166,9 @@ u32 _s32_to_s9(s32 data)
u32 val;
if (data > 255)
- {
data = 255;
- }
else if (data < -256)
- {
data = -256;
- }
val = data & 0x01FF;
@@ -207,21 +179,19 @@ u32 _s32_to_s9(s32 data)
s32 _floor(s32 n)
{
if (n > 0)
- {
- n += 5;
- }
+ n += 5;
else
- {
n -= 5;
- }
return (n/10);
}
/****************************************************************************/
-// The following code is sqare-root function.
-// sqsum is the input and the output is sq_rt;
-// The maximum of sqsum = 2^27 -1;
+/*
+ * The following code is sqare-root function.
+ * sqsum is the input and the output is sq_rt;
+ * The maximum of sqsum = 2^27 -1;
+ */
u32 _sqrt(u32 sqsum)
{
u32 sq_rt;
@@ -232,18 +202,17 @@ u32 _sqrt(u32 sqsum)
int step;
g4 = sqsum / 100000000;
- g3 = (sqsum - g4*100000000) /1000000;
- g2 = (sqsum - g4*100000000 - g3*1000000) /10000;
- g1 = (sqsum - g4*100000000 - g3*1000000 - g2*10000) /100;
+ g3 = (sqsum - g4*100000000) / 1000000;
+ g2 = (sqsum - g4*100000000 - g3*1000000) / 10000;
+ g1 = (sqsum - g4*100000000 - g3*1000000 - g2*10000) / 100;
g0 = (sqsum - g4*100000000 - g3*1000000 - g2*10000 - g1*100);
next = g4;
step = 0;
seed = 0;
- while (((seed+1)*(step+1)) <= next)
- {
- step++;
- seed++;
+ while (((seed+1)*(step+1)) <= next) {
+ step++;
+ seed++;
}
sq_rt = seed * 10000;
@@ -251,20 +220,18 @@ u32 _sqrt(u32 sqsum)
step = 0;
seed = 2 * seed * 10;
- while (((seed+1)*(step+1)) <= next)
- {
+ while (((seed+1)*(step+1)) <= next) {
step++;
- seed++;
+ seed++;
}
sq_rt = sq_rt + step * 1000;
next = (next - seed * step) * 100 + g2;
seed = (seed + step) * 10;
step = 0;
- while (((seed+1)*(step+1)) <= next)
- {
+ while (((seed+1)*(step+1)) <= next) {
step++;
- seed++;
+ seed++;
}
sq_rt = sq_rt + step * 100;
@@ -272,21 +239,19 @@ u32 _sqrt(u32 sqsum)
seed = (seed + step) * 10;
step = 0;
- while (((seed+1)*(step+1)) <= next)
- {
+ while (((seed+1)*(step+1)) <= next) {
step++;
- seed++;
+ seed++;
}
sq_rt = sq_rt + step * 10;
- next = (next - seed* step) * 100 + g0;
+ next = (next - seed * step) * 100 + g0;
seed = (seed + step) * 10;
step = 0;
- while (((seed+1)*(step+1)) <= next)
- {
+ while (((seed+1)*(step+1)) <= next) {
step++;
- seed++;
+ seed++;
}
sq_rt = sq_rt + step;
@@ -300,38 +265,31 @@ void _sin_cos(s32 angle, s32 *sin, s32 *cos)
s32 X, Y, TargetAngle, CurrAngle;
unsigned Step;
- X=FIXED(AG_CONST); // AG_CONST * cos(0)
- Y=0; // AG_CONST * sin(0)
- TargetAngle=abs(angle);
- CurrAngle=0;
+ X = FIXED(AG_CONST); /* AG_CONST * cos(0) */
+ Y = 0; /* AG_CONST * sin(0) */
+ TargetAngle = abs(angle);
+ CurrAngle = 0;
- for (Step=0; Step < 12; Step++)
- {
+ for (Step = 0; Step < 12; Step++) {
s32 NewX;
- if(TargetAngle > CurrAngle)
- {
- NewX=X - (Y >> Step);
- Y=(X >> Step) + Y;
- X=NewX;
+ if (TargetAngle > CurrAngle) {
+ NewX = X - (Y >> Step);
+ Y = (X >> Step) + Y;
+ X = NewX;
CurrAngle += Angles[Step];
- }
- else
- {
- NewX=X + (Y >> Step);
- Y=-(X >> Step) + Y;
- X=NewX;
+ } else {
+ NewX = X + (Y >> Step);
+ Y = -(X >> Step) + Y;
+ X = NewX;
CurrAngle -= Angles[Step];
}
}
- if (angle > 0)
- {
+ if (angle > 0) {
*cos = X;
*sin = Y;
- }
- else
- {
+ } else {
*cos = X;
*sin = -Y;
}
@@ -343,7 +301,7 @@ static unsigned char hal_get_dxx_reg(struct hw_data *pHwData, u16 number, u32 *
number += 0x1000;
return Wb35Reg_ReadSync(pHwData, number, pValue);
}
-#define hw_get_dxx_reg( _A, _B, _C ) hal_get_dxx_reg( _A, _B, (u32 *)_C )
+#define hw_get_dxx_reg(_A, _B, _C) hal_get_dxx_reg(_A, _B, (u32 *)_C)
static unsigned char hal_set_dxx_reg(struct hw_data *pHwData, u16 number, u32 value)
{
@@ -354,7 +312,7 @@ static unsigned char hal_set_dxx_reg(struct hw_data *pHwData, u16 number, u32 va
ret = Wb35Reg_WriteSync(pHwData, number, value);
return ret;
}
-#define hw_set_dxx_reg( _A, _B, _C ) hal_set_dxx_reg( _A, _B, (u32)_C )
+#define hw_set_dxx_reg(_A, _B, _C) hal_set_dxx_reg(_A, _B, (u32)_C)
void _reset_rx_cal(struct hw_data *phw_data)
@@ -363,25 +321,20 @@ void _reset_rx_cal(struct hw_data *phw_data)
hw_get_dxx_reg(phw_data, 0x54, &val);
- if (phw_data->revision == 0x2002) // 1st-cut
- {
+ if (phw_data->revision == 0x2002) /* 1st-cut */
val &= 0xFFFF0000;
- }
- else // 2nd-cut
- {
+ else /* 2nd-cut */
val &= 0x000003FF;
- }
hw_set_dxx_reg(phw_data, 0x54, val);
}
-// ************for winbond calibration*********
-//
+/**************for winbond calibration*********/
+
+
-//
-//
-// *********************************************
+/**********************************************/
void _rxadc_dc_offset_cancellation_winbond(struct hw_data *phw_data, u32 frequency)
{
u32 reg_agc_ctrl3;
@@ -392,35 +345,31 @@ void _rxadc_dc_offset_cancellation_winbond(struct hw_data *phw_data, u32 frequen
PHY_DEBUG(("[CAL] -> [1]_rxadc_dc_offset_cancellation()\n"));
phy_init_rf(phw_data);
- // set calibration channel
- if( (RF_WB_242 == phw_data->phy_type) ||
- (RF_WB_242_1 == phw_data->phy_type) ) // 20060619.5 Add
- {
- if ((frequency >= 2412) && (frequency <= 2484))
- {
- // w89rf242 change frequency to 2390Mhz
+ /* set calibration channel */
+ if ((RF_WB_242 == phw_data->phy_type) ||
+ (RF_WB_242_1 == phw_data->phy_type)) /* 20060619.5 Add */{
+ if ((frequency >= 2412) && (frequency <= 2484)) {
+ /* w89rf242 change frequency to 2390Mhz */
PHY_DEBUG(("[CAL] W89RF242/11G/Channel=2390Mhz\n"));
phy_set_rf_data(phw_data, 3, (3<<24)|0x025586);
}
- }
- else
- {
+ } else {
}
- // reset cancel_dc_i[9:5] and cancel_dc_q[4:0] in register DC_Cancel
+ /* reset cancel_dc_i[9:5] and cancel_dc_q[4:0] in register DC_Cancel */
hw_get_dxx_reg(phw_data, 0x5C, &val);
val &= ~(0x03FF);
hw_set_dxx_reg(phw_data, 0x5C, val);
- // reset the TX and RX IQ calibration data
+ /* reset the TX and RX IQ calibration data */
hw_set_dxx_reg(phw_data, 0x3C, 0);
hw_set_dxx_reg(phw_data, 0x54, 0);
- hw_set_dxx_reg(phw_data, 0x58, 0x30303030); // IQ_Alpha Changed
+ hw_set_dxx_reg(phw_data, 0x58, 0x30303030); /* IQ_Alpha Changed */
- // a. Disable AGC
+ /* a. Disable AGC */
hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &reg_agc_ctrl3);
reg_agc_ctrl3 &= ~BIT(2);
reg_agc_ctrl3 |= (MASK_LNA_FIX_GAIN|MASK_AGC_FIX);
@@ -430,7 +379,7 @@ void _rxadc_dc_offset_cancellation_winbond(struct hw_data *phw_data, u32 frequen
val |= MASK_AGC_FIX_GAIN;
hw_set_dxx_reg(phw_data, REG_AGC_CTRL5, val);
- // b. Turn off BB RX
+ /* b. Turn off BB RX */
hw_get_dxx_reg(phw_data, REG_A_ACQ_CTRL, &reg_a_acq_ctrl);
reg_a_acq_ctrl |= MASK_AMER_OFF_REG;
hw_set_dxx_reg(phw_data, REG_A_ACQ_CTRL, reg_a_acq_ctrl);
@@ -439,9 +388,9 @@ void _rxadc_dc_offset_cancellation_winbond(struct hw_data *phw_data, u32 frequen
reg_b_acq_ctrl |= MASK_BMER_OFF_REG;
hw_set_dxx_reg(phw_data, REG_B_ACQ_CTRL, reg_b_acq_ctrl);
- // c. Make sure MAC is in receiving mode
- // d. Turn ON ADC calibration
- // - ADC calibrator is triggered by this signal rising from 0 to 1
+ /* c. Make sure MAC is in receiving mode
+ * d. Turn ON ADC calibration
+ * - ADC calibrator is triggered by this signal rising from 0 to 1 */
hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &val);
val &= ~MASK_ADC_DC_CAL_STR;
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, val);
@@ -449,7 +398,7 @@ void _rxadc_dc_offset_cancellation_winbond(struct hw_data *phw_data, u32 frequen
val |= MASK_ADC_DC_CAL_STR;
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, val);
- // e. The result are shown in "adc_dc_cal_i[8:0] and adc_dc_cal_q[8:0]"
+ /* e. The result are shown in "adc_dc_cal_i[8:0] and adc_dc_cal_q[8:0]" */
#ifdef _DEBUG
hw_get_dxx_reg(phw_data, REG_OFFSET_READ, &val);
PHY_DEBUG(("[CAL] REG_OFFSET_READ = 0x%08X\n", val));
@@ -464,23 +413,23 @@ void _rxadc_dc_offset_cancellation_winbond(struct hw_data *phw_data, u32 frequen
val &= ~MASK_ADC_DC_CAL_STR;
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, val);
- // f. Turn on BB RX
- //hw_get_dxx_reg(phw_data, REG_A_ACQ_CTRL, &reg_a_acq_ctrl);
+ /* f. Turn on BB RX */
+ /* hw_get_dxx_reg(phw_data, REG_A_ACQ_CTRL, &reg_a_acq_ctrl); */
reg_a_acq_ctrl &= ~MASK_AMER_OFF_REG;
hw_set_dxx_reg(phw_data, REG_A_ACQ_CTRL, reg_a_acq_ctrl);
- //hw_get_dxx_reg(phw_data, REG_B_ACQ_CTRL, &reg_b_acq_ctrl);
+ /* hw_get_dxx_reg(phw_data, REG_B_ACQ_CTRL, &reg_b_acq_ctrl); */
reg_b_acq_ctrl &= ~MASK_BMER_OFF_REG;
hw_set_dxx_reg(phw_data, REG_B_ACQ_CTRL, reg_b_acq_ctrl);
- // g. Enable AGC
- //hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &val);
+ /* g. Enable AGC */
+ /* hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &val); */
reg_agc_ctrl3 |= BIT(2);
reg_agc_ctrl3 &= ~(MASK_LNA_FIX_GAIN|MASK_AGC_FIX);
hw_set_dxx_reg(phw_data, REG_AGC_CTRL3, reg_agc_ctrl3);
}
-////////////////////////////////////////////////////////
+/****************************************************************/
void _txidac_dc_offset_cancellation_winbond(struct hw_data *phw_data)
{
u32 reg_agc_ctrl3;
@@ -497,22 +446,22 @@ void _txidac_dc_offset_cancellation_winbond(struct hw_data *phw_data)
PHY_DEBUG(("[CAL] -> [2]_txidac_dc_offset_cancellation()\n"));
- // a. Set to "TX calibration mode"
+ /* a. Set to "TX calibration mode" */
- //0x01 0xEE3FC2 ; 3B8FF ; Calibration (6a). enable TX IQ calibration loop circuits
+ /* 0x01 0xEE3FC2 ; 3B8FF ; Calibration (6a). enable TX IQ calibration loop circuits */
phy_set_rf_data(phw_data, 1, (1<<24)|0xEE3FC2);
- //0x0B 0x1905D6 ; 06417 ; Calibration (6b). enable TX I/Q cal loop squaring circuit
+ /* 0x0B 0x1905D6 ; 06417 ; Calibration (6b). enable TX I/Q cal loop squaring circuit */
phy_set_rf_data(phw_data, 11, (11<<24)|0x1901D6);
- //0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized
+ /* 0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized */
phy_set_rf_data(phw_data, 5, (5<<24)|0x24C48A);
- //0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized
+ /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */
phy_set_rf_data(phw_data, 6, (6<<24)|0x06890C);
- //0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode
+ /* 0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode */
phy_set_rf_data(phw_data, 0, (0<<24)|0xFDF1C0);
- hw_set_dxx_reg(phw_data, 0x58, 0x30303030); // IQ_Alpha Changed
+ hw_set_dxx_reg(phw_data, 0x58, 0x30303030); /* IQ_Alpha Changed */
- // a. Disable AGC
+ /* a. Disable AGC */
hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &reg_agc_ctrl3);
reg_agc_ctrl3 &= ~BIT(2);
reg_agc_ctrl3 |= (MASK_LNA_FIX_GAIN|MASK_AGC_FIX);
@@ -522,19 +471,19 @@ void _txidac_dc_offset_cancellation_winbond(struct hw_data *phw_data)
val |= MASK_AGC_FIX_GAIN;
hw_set_dxx_reg(phw_data, REG_AGC_CTRL5, val);
- // b. set iqcal_mode[1:0] to 0x2 and set iqcal_tone[3:2] to 0
+ /* b. set iqcal_mode[1:0] to 0x2 and set iqcal_tone[3:2] to 0 */
hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (read) = 0x%08X\n", reg_mode_ctrl));
reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE);
- // mode=2, tone=0
- //reg_mode_ctrl |= (MASK_CALIB_START|2);
+ /* mode=2, tone=0 */
+ /* reg_mode_ctrl |= (MASK_CALIB_START|2); */
- // mode=2, tone=1
- //reg_mode_ctrl |= (MASK_CALIB_START|2|(1<<2));
+ /* mode=2, tone=1 */
+ /* reg_mode_ctrl |= (MASK_CALIB_START|2|(1<<2)); */
- // mode=2, tone=2
+ /* mode=2, tone=2 */
reg_mode_ctrl |= (MASK_CALIB_START|2|(2<<2));
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
@@ -542,12 +491,10 @@ void _txidac_dc_offset_cancellation_winbond(struct hw_data *phw_data)
hw_get_dxx_reg(phw_data, 0x5C, &reg_dc_cancel);
PHY_DEBUG(("[CAL] DC_CANCEL (read) = 0x%08X\n", reg_dc_cancel));
- for (loop = 0; loop < LOOP_TIMES; loop++)
- {
+ for (loop = 0; loop < LOOP_TIMES; loop++) {
PHY_DEBUG(("[CAL] [%d.] ==================================\n", loop));
- // c.
- // reset cancel_dc_i[9:5] and cancel_dc_q[4:0] in register DC_Cancel
+ /* c. reset cancel_dc_i[9:5] and cancel_dc_q[4:0] in register DC_Cancel */
reg_dc_cancel &= ~(0x03FF);
PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel));
hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel);
@@ -562,7 +509,7 @@ void _txidac_dc_offset_cancellation_winbond(struct hw_data *phw_data)
PHY_DEBUG(("[CAL] mag_0=%d (iqcal_image_i=%d, iqcal_image_q=%d)\n",
mag_0, iqcal_image_i, iqcal_image_q));
- // d.
+ /* d. */
reg_dc_cancel |= (1 << CANCEL_DC_I_SHIFT);
PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel));
hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel);
@@ -577,18 +524,12 @@ void _txidac_dc_offset_cancellation_winbond(struct hw_data *phw_data)
PHY_DEBUG(("[CAL] mag_1=%d (iqcal_image_i=%d, iqcal_image_q=%d)\n",
mag_1, iqcal_image_i, iqcal_image_q));
- // e. Calculate the correct DC offset cancellation value for I
+ /* e. Calculate the correct DC offset cancellation value for I */
if (mag_0 != mag_1)
- {
fix_cancel_dc_i = (mag_0*10000) / (mag_0*10000 - mag_1*10000);
- }
- else
- {
+ else {
if (mag_0 == mag_1)
- {
PHY_DEBUG(("[CAL] ***** mag_0 = mag_1 !!\n"));
- }
-
fix_cancel_dc_i = 0;
}
@@ -596,12 +537,10 @@ void _txidac_dc_offset_cancellation_winbond(struct hw_data *phw_data)
fix_cancel_dc_i, _s32_to_s5(fix_cancel_dc_i)));
if ((abs(mag_1-mag_0)*6) > mag_0)
- {
break;
- }
}
- if ( loop >= 19 )
+ if (loop >= 19)
fix_cancel_dc_i = 0;
reg_dc_cancel &= ~(0x03FF);
@@ -609,13 +548,13 @@ void _txidac_dc_offset_cancellation_winbond(struct hw_data *phw_data)
hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel);
PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel));
- // g.
+ /* g. */
reg_mode_ctrl &= ~MASK_CALIB_START;
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
}
-///////////////////////////////////////////////////////
+/*****************************************************/
void _txqdac_dc_offset_cacellation_winbond(struct hw_data *phw_data)
{
u32 reg_agc_ctrl3;
@@ -631,20 +570,20 @@ void _txqdac_dc_offset_cacellation_winbond(struct hw_data *phw_data)
int loop;
PHY_DEBUG(("[CAL] -> [3]_txqdac_dc_offset_cacellation()\n"));
- //0x01 0xEE3FC2 ; 3B8FF ; Calibration (6a). enable TX IQ calibration loop circuits
+ /*0x01 0xEE3FC2 ; 3B8FF ; Calibration (6a). enable TX IQ calibration loop circuits */
phy_set_rf_data(phw_data, 1, (1<<24)|0xEE3FC2);
- //0x0B 0x1905D6 ; 06417 ; Calibration (6b). enable TX I/Q cal loop squaring circuit
+ /* 0x0B 0x1905D6 ; 06417 ; Calibration (6b). enable TX I/Q cal loop squaring circuit */
phy_set_rf_data(phw_data, 11, (11<<24)|0x1901D6);
- //0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized
+ /* 0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized */
phy_set_rf_data(phw_data, 5, (5<<24)|0x24C48A);
- //0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized
+ /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */
phy_set_rf_data(phw_data, 6, (6<<24)|0x06890C);
- //0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode
+ /* 0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode */
phy_set_rf_data(phw_data, 0, (0<<24)|0xFDF1C0);
- hw_set_dxx_reg(phw_data, 0x58, 0x30303030); // IQ_Alpha Changed
+ hw_set_dxx_reg(phw_data, 0x58, 0x30303030); /* IQ_Alpha Changed */
- // a. Disable AGC
+ /* a. Disable AGC */
hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &reg_agc_ctrl3);
reg_agc_ctrl3 &= ~BIT(2);
reg_agc_ctrl3 |= (MASK_LNA_FIX_GAIN|MASK_AGC_FIX);
@@ -654,11 +593,11 @@ void _txqdac_dc_offset_cacellation_winbond(struct hw_data *phw_data)
val |= MASK_AGC_FIX_GAIN;
hw_set_dxx_reg(phw_data, REG_AGC_CTRL5, val);
- // a. set iqcal_mode[1:0] to 0x3 and set iqcal_tone[3:2] to 0
+ /* a. set iqcal_mode[1:0] to 0x3 and set iqcal_tone[3:2] to 0 */
hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (read) = 0x%08X\n", reg_mode_ctrl));
- //reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE);
+ /* reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE); */
reg_mode_ctrl &= ~(MASK_IQCAL_MODE);
reg_mode_ctrl |= (MASK_CALIB_START|3);
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
@@ -667,12 +606,10 @@ void _txqdac_dc_offset_cacellation_winbond(struct hw_data *phw_data)
hw_get_dxx_reg(phw_data, 0x5C, &reg_dc_cancel);
PHY_DEBUG(("[CAL] DC_CANCEL (read) = 0x%08X\n", reg_dc_cancel));
- for (loop = 0; loop < LOOP_TIMES; loop++)
- {
+ for (loop = 0; loop < LOOP_TIMES; loop++) {
PHY_DEBUG(("[CAL] [%d.] ==================================\n", loop));
- // b.
- // reset cancel_dc_q[4:0] in register DC_Cancel
+ /* b. reset cancel_dc_q[4:0] in register DC_Cancel */
reg_dc_cancel &= ~(0x001F);
PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel));
hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel);
@@ -687,7 +624,7 @@ void _txqdac_dc_offset_cacellation_winbond(struct hw_data *phw_data)
PHY_DEBUG(("[CAL] mag_0=%d (iqcal_image_i=%d, iqcal_image_q=%d)\n",
mag_0, iqcal_image_i, iqcal_image_q));
- // c.
+ /* c. */
reg_dc_cancel |= (1 << CANCEL_DC_Q_SHIFT);
PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel));
hw_set_dxx_reg(phw_data, 0x5C, reg_dc_cancel);
@@ -702,18 +639,12 @@ void _txqdac_dc_offset_cacellation_winbond(struct hw_data *phw_data)
PHY_DEBUG(("[CAL] mag_1=%d (iqcal_image_i=%d, iqcal_image_q=%d)\n",
mag_1, iqcal_image_i, iqcal_image_q));
- // d. Calculate the correct DC offset cancellation value for I
+ /* d. Calculate the correct DC offset cancellation value for I */
if (mag_0 != mag_1)
- {
fix_cancel_dc_q = (mag_0*10000) / (mag_0*10000 - mag_1*10000);
- }
- else
- {
+ else {
if (mag_0 == mag_1)
- {
PHY_DEBUG(("[CAL] ***** mag_0 = mag_1 !!\n"));
- }
-
fix_cancel_dc_q = 0;
}
@@ -721,12 +652,10 @@ void _txqdac_dc_offset_cacellation_winbond(struct hw_data *phw_data)
fix_cancel_dc_q, _s32_to_s5(fix_cancel_dc_q)));
if ((abs(mag_1-mag_0)*6) > mag_0)
- {
break;
- }
}
- if ( loop >= 19 )
+ if (loop >= 19)
fix_cancel_dc_q = 0;
reg_dc_cancel &= ~(0x001F);
@@ -735,13 +664,13 @@ void _txqdac_dc_offset_cacellation_winbond(struct hw_data *phw_data)
PHY_DEBUG(("[CAL] DC_CANCEL (write) = 0x%08X\n", reg_dc_cancel));
- // f.
+ /* f. */
reg_mode_ctrl &= ~MASK_CALIB_START;
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
}
-//20060612.1.a 20060718.1 Modify
+/* 20060612.1.a 20060718.1 Modify */
u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
s32 a_2_threshold,
s32 b_2_threshold)
@@ -765,7 +694,7 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
s32 temp1, temp2;
u32 val;
u16 loop;
- s32 iqcal_tone_i_avg,iqcal_tone_q_avg;
+ s32 iqcal_tone_i_avg, iqcal_tone_q_avg;
u8 verify_count;
int capture_time;
@@ -780,18 +709,18 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
loop = LOOP_TIMES;
- while (loop > 0)
- {
+ while (loop > 0) {
PHY_DEBUG(("[CAL] [%d.] <_tx_iq_calibration_loop>\n", (LOOP_TIMES-loop+1)));
- iqcal_tone_i_avg=0;
- iqcal_tone_q_avg=0;
- if( !hw_set_dxx_reg(phw_data, 0x3C, 0x00) ) // 20060718.1 modify
+ iqcal_tone_i_avg = 0;
+ iqcal_tone_q_avg = 0;
+ if (!hw_set_dxx_reg(phw_data, 0x3C, 0x00)) /* 20060718.1 modify */
return 0;
- for(capture_time=0;capture_time<10;capture_time++)
- {
- // a. Set iqcal_mode[1:0] to 0x2 and set "calib_start" to 0x1 to
- // enable "IQ alibration Mode II"
+ for (capture_time = 0; capture_time < 10; capture_time++) {
+ /*
+ * a. Set iqcal_mode[1:0] to 0x2 and set "calib_start" to 0x1 to
+ * enable "IQ alibration Mode II"
+ */
reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE);
reg_mode_ctrl &= ~MASK_IQCAL_MODE;
reg_mode_ctrl |= (MASK_CALIB_START|0x02);
@@ -799,7 +728,7 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
- // b.
+ /* b. */
hw_get_dxx_reg(phw_data, REG_CALIB_READ1, &val);
PHY_DEBUG(("[CAL] CALIB_READ1 = 0x%08X\n", val));
@@ -813,21 +742,23 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
iq_mag_0_tx = (s32) _sqrt(sqsum);
PHY_DEBUG(("[CAL] ** iq_mag_0_tx=%d\n", iq_mag_0_tx));
- // c. Set "calib_start" to 0x0
+ /* c. Set "calib_start" to 0x0 */
reg_mode_ctrl &= ~MASK_CALIB_START;
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
- // d. Set iqcal_mode[1:0] to 0x3 and set "calib_start" to 0x1 to
- // enable "IQ alibration Mode II"
- //hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &val);
+ /*
+ * d. Set iqcal_mode[1:0] to 0x3 and set "calib_start" to 0x1 to
+ * enable "IQ alibration Mode II"
+ */
+ /* hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &val); */
hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl);
reg_mode_ctrl &= ~MASK_IQCAL_MODE;
reg_mode_ctrl |= (MASK_CALIB_START|0x03);
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
- // e.
+ /* e. */
hw_get_dxx_reg(phw_data, REG_CALIB_READ1, &val);
PHY_DEBUG(("[CAL] CALIB_READ1 = 0x%08X\n", val));
@@ -835,14 +766,11 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
iqcal_tone_q = _s13_to_s32((val & 0x03FFE000) >> 13);
PHY_DEBUG(("[CAL] ** iqcal_tone_i = %d, iqcal_tone_q = %d\n",
iqcal_tone_i, iqcal_tone_q));
- if( capture_time == 0)
- {
+ if (capture_time == 0)
continue;
- }
- else
- {
- iqcal_tone_i_avg=( iqcal_tone_i_avg*(capture_time-1) +iqcal_tone_i)/capture_time;
- iqcal_tone_q_avg=( iqcal_tone_q_avg*(capture_time-1) +iqcal_tone_q)/capture_time;
+ else {
+ iqcal_tone_i_avg = (iqcal_tone_i_avg*(capture_time-1) + iqcal_tone_i)/capture_time;
+ iqcal_tone_q_avg = (iqcal_tone_q_avg*(capture_time-1) + iqcal_tone_q)/capture_time;
}
}
@@ -857,11 +785,10 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
PHY_DEBUG(("[CAL] ** rot_i_b = %d, rot_q_b = %d\n",
rot_i_b, rot_q_b));
- // f.
+ /* f. */
divisor = ((iq_mag_0_tx * iq_mag_0_tx * 2)/1024 - rot_i_b) * 2;
- if (divisor == 0)
- {
+ if (divisor == 0) {
PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> ERROR *******\n"));
PHY_DEBUG(("[CAL] ** divisor=0 to calculate EPS and THETA !!\n"));
PHY_DEBUG(("[CAL] ******************************************\n"));
@@ -876,18 +803,16 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
phw_data->iq_rsdl_gain_tx_d2 = a_2;
phw_data->iq_rsdl_phase_tx_d2 = b_2;
- //if ((abs(a_2) < 150) && (abs(b_2) < 100))
- //if ((abs(a_2) < 200) && (abs(b_2) < 200))
- if ((abs(a_2) < a_2_threshold) && (abs(b_2) < b_2_threshold))
- {
+ /* if ((abs(a_2) < 150) && (abs(b_2) < 100)) */
+ /* if ((abs(a_2) < 200) && (abs(b_2) < 200)) */
+ if ((abs(a_2) < a_2_threshold) && (abs(b_2) < b_2_threshold)) {
verify_count++;
PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> *************\n"));
PHY_DEBUG(("[CAL] ** VERIFY OK # %d !!\n", verify_count));
PHY_DEBUG(("[CAL] ******************************************\n"));
- if (verify_count > 2)
- {
+ if (verify_count > 2) {
PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> *********\n"));
PHY_DEBUG(("[CAL] ** TX_IQ_CALIBRATION (EPS,THETA) OK !!\n"));
PHY_DEBUG(("[CAL] **************************************\n"));
@@ -895,37 +820,29 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
}
continue;
- }
- else
- {
+ } else
verify_count = 0;
- }
_sin_cos(b_2, &sin_b, &cos_b);
_sin_cos(b_2*2, &sin_2b, &cos_2b);
PHY_DEBUG(("[CAL] ** sin(b/2)=%d, cos(b/2)=%d\n", sin_b, cos_b));
PHY_DEBUG(("[CAL] ** sin(b)=%d, cos(b)=%d\n", sin_2b, cos_2b));
- if (cos_2b == 0)
- {
+ if (cos_2b == 0) {
PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> ERROR *******\n"));
PHY_DEBUG(("[CAL] ** cos(b)=0 !!\n"));
PHY_DEBUG(("[CAL] ******************************************\n"));
break;
}
- // 1280 * 32768 = 41943040
+ /* 1280 * 32768 = 41943040 */
temp1 = (41943040/cos_2b)*cos_b;
- //temp2 = (41943040/cos_2b)*sin_b*(-1);
- if (phw_data->revision == 0x2002) // 1st-cut
- {
+ /* temp2 = (41943040/cos_2b)*sin_b*(-1); */
+ if (phw_data->revision == 0x2002) /* 1st-cut */
temp2 = (41943040/cos_2b)*sin_b*(-1);
- }
- else // 2nd-cut
- {
+ else /* 2nd-cut */
temp2 = (41943040*4/cos_2b)*sin_b*(-1);
- }
tx_cal_flt_b[0] = _floor(temp1/(32768+a_2));
tx_cal_flt_b[1] = _floor(temp2/(32768+a_2));
@@ -937,37 +854,34 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
PHY_DEBUG(("[CAL] tx_cal_flt_b[3] = %d\n", tx_cal_flt_b[3]));
tx_cal[2] = tx_cal_flt_b[2];
- tx_cal[2] = tx_cal[2] +3;
+ tx_cal[2] = tx_cal[2] + 3;
tx_cal[1] = tx_cal[2];
tx_cal[3] = tx_cal_flt_b[3] - 128;
- tx_cal[0] = -tx_cal[3]+1;
+ tx_cal[0] = -tx_cal[3] + 1;
PHY_DEBUG(("[CAL] tx_cal[0] = %d\n", tx_cal[0]));
PHY_DEBUG(("[CAL] tx_cal[1] = %d\n", tx_cal[1]));
PHY_DEBUG(("[CAL] tx_cal[2] = %d\n", tx_cal[2]));
PHY_DEBUG(("[CAL] tx_cal[3] = %d\n", tx_cal[3]));
- //if ((tx_cal[0] == 0) && (tx_cal[1] == 0) &&
- // (tx_cal[2] == 0) && (tx_cal[3] == 0))
- //{
- // PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> *************\n"));
- // PHY_DEBUG(("[CAL] ** TX_IQ_CALIBRATION COMPLETE !!\n"));
- // PHY_DEBUG(("[CAL] ******************************************\n"));
- // return 0;
- //}
-
- // g.
- if (phw_data->revision == 0x2002) // 1st-cut
- {
+ /* if ((tx_cal[0] == 0) && (tx_cal[1] == 0) &&
+ (tx_cal[2] == 0) && (tx_cal[3] == 0))
+ { */
+ /* PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> *************\n"));
+ * PHY_DEBUG(("[CAL] ** TX_IQ_CALIBRATION COMPLETE !!\n"));
+ * PHY_DEBUG(("[CAL] ******************************************\n"));
+ * return 0;
+ } */
+
+ /* g. */
+ if (phw_data->revision == 0x2002) /* 1st-cut */{
hw_get_dxx_reg(phw_data, 0x54, &val);
PHY_DEBUG(("[CAL] ** 0x54 = 0x%08X\n", val));
tx_cal_reg[0] = _s4_to_s32((val & 0xF0000000) >> 28);
tx_cal_reg[1] = _s4_to_s32((val & 0x0F000000) >> 24);
tx_cal_reg[2] = _s4_to_s32((val & 0x00F00000) >> 20);
tx_cal_reg[3] = _s4_to_s32((val & 0x000F0000) >> 16);
- }
- else // 2nd-cut
- {
+ } else /* 2nd-cut */{
hw_get_dxx_reg(phw_data, 0x3C, &val);
PHY_DEBUG(("[CAL] ** 0x3C = 0x%08X\n", val));
tx_cal_reg[0] = _s5_to_s32((val & 0xF8000000) >> 27);
@@ -982,22 +896,17 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
PHY_DEBUG(("[CAL] tx_cal_reg[2] = %d\n", tx_cal_reg[2]));
PHY_DEBUG(("[CAL] tx_cal_reg[3] = %d\n", tx_cal_reg[3]));
- if (phw_data->revision == 0x2002) // 1st-cut
- {
- if (((tx_cal_reg[0]==7) || (tx_cal_reg[0]==(-8))) &&
- ((tx_cal_reg[3]==7) || (tx_cal_reg[3]==(-8))))
- {
+ if (phw_data->revision == 0x2002) /* 1st-cut */{
+ if (((tx_cal_reg[0] == 7) || (tx_cal_reg[0] == (-8))) &&
+ ((tx_cal_reg[3] == 7) || (tx_cal_reg[3] == (-8)))) {
PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> *********\n"));
PHY_DEBUG(("[CAL] ** TX_IQ_CALIBRATION SATUATION !!\n"));
PHY_DEBUG(("[CAL] **************************************\n"));
break;
}
- }
- else // 2nd-cut
- {
- if (((tx_cal_reg[0]==31) || (tx_cal_reg[0]==(-32))) &&
- ((tx_cal_reg[3]==31) || (tx_cal_reg[3]==(-32))))
- {
+ } else /* 2nd-cut */{
+ if (((tx_cal_reg[0] == 31) || (tx_cal_reg[0] == (-32))) &&
+ ((tx_cal_reg[3] == 31) || (tx_cal_reg[3] == (-32)))) {
PHY_DEBUG(("[CAL] ** <_tx_iq_calibration_loop> *********\n"));
PHY_DEBUG(("[CAL] ** TX_IQ_CALIBRATION SATUATION !!\n"));
PHY_DEBUG(("[CAL] **************************************\n"));
@@ -1014,8 +923,7 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
PHY_DEBUG(("[CAL] apply tx_cal[2] = %d\n", tx_cal[2]));
PHY_DEBUG(("[CAL] apply tx_cal[3] = %d\n", tx_cal[3]));
- if (phw_data->revision == 0x2002) // 1st-cut
- {
+ if (phw_data->revision == 0x2002) /* 1st-cut */{
val &= 0x0000FFFF;
val |= ((_s32_to_s4(tx_cal[0]) << 28)|
(_s32_to_s4(tx_cal[1]) << 24)|
@@ -1024,9 +932,7 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
hw_set_dxx_reg(phw_data, 0x54, val);
PHY_DEBUG(("[CAL] ** CALIB_DATA = 0x%08X\n", val));
return 0;
- }
- else // 2nd-cut
- {
+ } else /* 2nd-cut */{
val &= 0x000003FF;
val |= ((_s32_to_s5(tx_cal[0]) << 27)|
(_s32_to_s6(tx_cal[1]) << 21)|
@@ -1037,7 +943,7 @@ u8 _tx_iq_calibration_loop_winbond(struct hw_data *phw_data,
return 0;
}
- // i. Set "calib_start" to 0x0
+ /* i. Set "calib_start" to 0x0 */
reg_mode_ctrl &= ~MASK_CALIB_START;
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
@@ -1061,26 +967,26 @@ void _tx_iq_calibration_winbond(struct hw_data *phw_data)
PHY_DEBUG(("[CAL] -> [4]_tx_iq_calibration()\n"));
- //0x01 0xEE3FC2 ; 3B8FF ; Calibration (6a). enable TX IQ calibration loop circuits
+ /* 0x01 0xEE3FC2 ; 3B8FF ; Calibration (6a). enable TX IQ calibration loop circuits */
phy_set_rf_data(phw_data, 1, (1<<24)|0xEE3FC2);
- //0x0B 0x1905D6 ; 06417 ; Calibration (6b). enable TX I/Q cal loop squaring circuit
- phy_set_rf_data(phw_data, 11, (11<<24)|0x19BDD6); // 20060612.1.a 0x1905D6);
- //0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized
- phy_set_rf_data(phw_data, 5, (5<<24)|0x24C60A); //0x24C60A (high temperature)
- //0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized
- phy_set_rf_data(phw_data, 6, (6<<24)|0x34880C); // 20060612.1.a 0x06890C);
- //0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode
+ /* 0x0B 0x1905D6 ; 06417 ; Calibration (6b). enable TX I/Q cal loop squaring circuit */
+ phy_set_rf_data(phw_data, 11, (11<<24)|0x19BDD6); /* 20060612.1.a 0x1905D6); */
+ /* 0x05 0x24C60A ; 09318 ; Calibration (6c). setting TX-VGA gain: TXGCH=2 & GPK=110 --> to be optimized */
+ phy_set_rf_data(phw_data, 5, (5<<24)|0x24C60A); /* 0x24C60A (high temperature) */
+ /* 0x06 0x06880C ; 01A20 ; Calibration (6d). RXGCH=00; RXGCL=100 000 (RXVGA=32) --> to be optimized */
+ phy_set_rf_data(phw_data, 6, (6<<24)|0x34880C); /* 20060612.1.a 0x06890C); */
+ /* 0x00 0xFDF1C0 ; 3F7C7 ; Calibration (6e). turn on IQ imbalance/Test mode */
phy_set_rf_data(phw_data, 0, (0<<24)|0xFDF1C0);
- //; [BB-chip]: Calibration (6f).Send test pattern
- //; [BB-chip]: Calibration (6g). Search RXGCL optimal value
- //; [BB-chip]: Calibration (6h). Caculate TX-path IQ imbalance and setting TX path IQ compensation table
- //phy_set_rf_data(phw_data, 3, (3<<24)|0x025586);
+ /* ; [BB-chip]: Calibration (6f).Send test pattern */
+ /* ; [BB-chip]: Calibration (6g). Search RXGCL optimal value */
+ /* ; [BB-chip]: Calibration (6h). Caculate TX-path IQ imbalance and setting TX path IQ compensation table */
+ /* phy_set_rf_data(phw_data, 3, (3<<24)|0x025586); */
- msleep(30); // 20060612.1.a 30ms delay. Add the follow 2 lines
- //To adjust TXVGA to fit iq_mag_0 range from 1250 ~ 1750
- adjust_TXVGA_for_iq_mag( phw_data );
+ msleep(30); /* 20060612.1.a 30ms delay. Add the follow 2 lines */
+ /* To adjust TXVGA to fit iq_mag_0 range from 1250 ~ 1750 */
+ adjust_TXVGA_for_iq_mag(phw_data);
- // a. Disable AGC
+ /* a. Disable AGC */
hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &reg_agc_ctrl3);
reg_agc_ctrl3 &= ~BIT(2);
reg_agc_ctrl3 |= (MASK_LNA_FIX_GAIN|MASK_AGC_FIX);
@@ -1092,16 +998,12 @@ void _tx_iq_calibration_winbond(struct hw_data *phw_data)
result = _tx_iq_calibration_loop_winbond(phw_data, 150, 100);
- if (result > 0)
- {
- if (phw_data->revision == 0x2002) // 1st-cut
- {
+ if (result > 0) {
+ if (phw_data->revision == 0x2002) /* 1st-cut */{
hw_get_dxx_reg(phw_data, 0x54, &val);
val &= 0x0000FFFF;
hw_set_dxx_reg(phw_data, 0x54, val);
- }
- else // 2nd-cut
- {
+ } else /* 2nd-cut*/{
hw_get_dxx_reg(phw_data, 0x3C, &val);
val &= 0x000003FF;
hw_set_dxx_reg(phw_data, 0x3C, val);
@@ -1109,32 +1011,24 @@ void _tx_iq_calibration_winbond(struct hw_data *phw_data)
result = _tx_iq_calibration_loop_winbond(phw_data, 300, 200);
- if (result > 0)
- {
- if (phw_data->revision == 0x2002) // 1st-cut
- {
+ if (result > 0) {
+ if (phw_data->revision == 0x2002) /* 1st-cut */{
hw_get_dxx_reg(phw_data, 0x54, &val);
val &= 0x0000FFFF;
hw_set_dxx_reg(phw_data, 0x54, val);
- }
- else // 2nd-cut
- {
+ } else /* 2nd-cut*/{
hw_get_dxx_reg(phw_data, 0x3C, &val);
val &= 0x000003FF;
hw_set_dxx_reg(phw_data, 0x3C, val);
}
result = _tx_iq_calibration_loop_winbond(phw_data, 500, 400);
- if (result > 0)
- {
- if (phw_data->revision == 0x2002) // 1st-cut
- {
+ if (result > 0) {
+ if (phw_data->revision == 0x2002) /* 1st-cut */{
hw_get_dxx_reg(phw_data, 0x54, &val);
val &= 0x0000FFFF;
hw_set_dxx_reg(phw_data, 0x54, val);
- }
- else // 2nd-cut
- {
+ } else /* 2nd-cut */{
hw_get_dxx_reg(phw_data, 0x3C, &val);
val &= 0x000003FF;
hw_set_dxx_reg(phw_data, 0x3C, val);
@@ -1143,20 +1037,16 @@ void _tx_iq_calibration_winbond(struct hw_data *phw_data)
result = _tx_iq_calibration_loop_winbond(phw_data, 700, 500);
- if (result > 0)
- {
+ if (result > 0) {
PHY_DEBUG(("[CAL] ** <_tx_iq_calibration> **************\n"));
PHY_DEBUG(("[CAL] ** TX_IQ_CALIBRATION FAILURE !!\n"));
PHY_DEBUG(("[CAL] **************************************\n"));
- if (phw_data->revision == 0x2002) // 1st-cut
- {
+ if (phw_data->revision == 0x2002) /* 1st-cut */{
hw_get_dxx_reg(phw_data, 0x54, &val);
val &= 0x0000FFFF;
hw_set_dxx_reg(phw_data, 0x54, val);
- }
- else // 2nd-cut
- {
+ } else /* 2nd-cut */{
hw_get_dxx_reg(phw_data, 0x3C, &val);
val &= 0x000003FF;
hw_set_dxx_reg(phw_data, 0x3C, val);
@@ -1166,30 +1056,27 @@ void _tx_iq_calibration_winbond(struct hw_data *phw_data)
}
}
- // i. Set "calib_start" to 0x0
+ /* i. Set "calib_start" to 0x0 */
hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl);
reg_mode_ctrl &= ~MASK_CALIB_START;
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
- // g. Enable AGC
- //hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &val);
+ /* g. Enable AGC */
+ /* hw_get_dxx_reg(phw_data, REG_AGC_CTRL3, &val); */
reg_agc_ctrl3 |= BIT(2);
reg_agc_ctrl3 &= ~(MASK_LNA_FIX_GAIN|MASK_AGC_FIX);
hw_set_dxx_reg(phw_data, REG_AGC_CTRL3, reg_agc_ctrl3);
#ifdef _DEBUG
- if (phw_data->revision == 0x2002) // 1st-cut
- {
+ if (phw_data->revision == 0x2002) /* 1st-cut */{
hw_get_dxx_reg(phw_data, 0x54, &val);
PHY_DEBUG(("[CAL] ** 0x54 = 0x%08X\n", val));
tx_cal_reg[0] = _s4_to_s32((val & 0xF0000000) >> 28);
tx_cal_reg[1] = _s4_to_s32((val & 0x0F000000) >> 24);
tx_cal_reg[2] = _s4_to_s32((val & 0x00F00000) >> 20);
tx_cal_reg[3] = _s4_to_s32((val & 0x000F0000) >> 16);
- }
- else // 2nd-cut
- {
+ } else /* 2nd-cut */ {
hw_get_dxx_reg(phw_data, 0x3C, &val);
PHY_DEBUG(("[CAL] ** 0x3C = 0x%08X\n", val));
tx_cal_reg[0] = _s5_to_s32((val & 0xF8000000) >> 27);
@@ -1206,11 +1093,13 @@ void _tx_iq_calibration_winbond(struct hw_data *phw_data)
#endif
- // for test - BEN
- // RF Control Override
+ /*
+ * for test - BEN
+ * RF Control Override
+ */
}
-/////////////////////////////////////////////////////////////////////////////////////////
+/*****************************************************/
u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 frequency)
{
u32 reg_mode_ctrl;
@@ -1236,51 +1125,49 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
u32 pwr_image;
u8 verify_count;
- s32 iqcal_tone_i_avg,iqcal_tone_q_avg;
- s32 iqcal_image_i_avg,iqcal_image_q_avg;
- u16 capture_time;
+ s32 iqcal_tone_i_avg, iqcal_tone_q_avg;
+ s32 iqcal_image_i_avg, iqcal_image_q_avg;
+ u16 capture_time;
PHY_DEBUG(("[CAL] -> [5]_rx_iq_calibration_loop()\n"));
PHY_DEBUG(("[CAL] ** factor = %d\n", factor));
-// RF Control Override
+/* RF Control Override */
hw_get_cxx_reg(phw_data, 0x80, &val);
val |= BIT(19);
hw_set_cxx_reg(phw_data, 0x80, val);
-// RF_Ctrl
+/* RF_Ctrl */
hw_get_cxx_reg(phw_data, 0xE4, &val);
val |= BIT(0);
hw_set_cxx_reg(phw_data, 0xE4, val);
PHY_DEBUG(("[CAL] ** RF_CTRL(0xE4) = 0x%08X", val));
- hw_set_dxx_reg(phw_data, 0x58, 0x44444444); // IQ_Alpha
+ hw_set_dxx_reg(phw_data, 0x58, 0x44444444); /* IQ_Alpha */
- // b.
+ /* b. */
hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (read) = 0x%08X\n", reg_mode_ctrl));
verify_count = 0;
- //for (loop = 0; loop < 1; loop++)
- //for (loop = 0; loop < LOOP_TIMES; loop++)
+ /* for (loop = 0; loop < 1; loop++) */
+ /* for (loop = 0; loop < LOOP_TIMES; loop++) */
loop = LOOP_TIMES;
- while (loop > 0)
- {
+ while (loop > 0) {
PHY_DEBUG(("[CAL] [%d.] <_rx_iq_calibration_loop>\n", (LOOP_TIMES-loop+1)));
- iqcal_tone_i_avg=0;
- iqcal_tone_q_avg=0;
- iqcal_image_i_avg=0;
- iqcal_image_q_avg=0;
- capture_time=0;
-
- for(capture_time=0; capture_time<10; capture_time++)
- {
- // i. Set "calib_start" to 0x0
+ iqcal_tone_i_avg = 0;
+ iqcal_tone_q_avg = 0;
+ iqcal_image_i_avg = 0;
+ iqcal_image_q_avg = 0;
+ capture_time = 0;
+
+ for (capture_time = 0; capture_time < 10; capture_time++) {
+ /* i. Set "calib_start" to 0x0 */
reg_mode_ctrl &= ~MASK_CALIB_START;
- if( !hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl) )//20060718.1 modify
+ if (!hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl))/*20060718.1 modify */
return 0;
PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
@@ -1289,7 +1176,7 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
- // c.
+ /* c. */
hw_get_dxx_reg(phw_data, REG_CALIB_READ1, &val);
PHY_DEBUG(("[CAL] CALIB_READ1 = 0x%08X\n", val));
@@ -1305,16 +1192,13 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
iqcal_image_q = _s13_to_s32((val & 0x03FFE000) >> 13);
PHY_DEBUG(("[CAL] ** iqcal_image_i = %d, iqcal_image_q = %d\n",
iqcal_image_i, iqcal_image_q));
- if( capture_time == 0)
- {
+ if (capture_time == 0)
continue;
- }
- else
- {
- iqcal_image_i_avg=( iqcal_image_i_avg*(capture_time-1) +iqcal_image_i)/capture_time;
- iqcal_image_q_avg=( iqcal_image_q_avg*(capture_time-1) +iqcal_image_q)/capture_time;
- iqcal_tone_i_avg=( iqcal_tone_i_avg*(capture_time-1) +iqcal_tone_i)/capture_time;
- iqcal_tone_q_avg=( iqcal_tone_q_avg*(capture_time-1) +iqcal_tone_q)/capture_time;
+ else {
+ iqcal_image_i_avg = (iqcal_image_i_avg*(capture_time-1) + iqcal_image_i)/capture_time;
+ iqcal_image_q_avg = (iqcal_image_q_avg*(capture_time-1) + iqcal_image_q)/capture_time;
+ iqcal_tone_i_avg = (iqcal_tone_i_avg*(capture_time-1) + iqcal_tone_i)/capture_time;
+ iqcal_tone_q_avg = (iqcal_tone_q_avg*(capture_time-1) + iqcal_tone_q)/capture_time;
}
}
@@ -1324,7 +1208,7 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
iqcal_tone_i = iqcal_tone_i_avg;
iqcal_tone_q = iqcal_tone_q_avg;
- // d.
+ /* d. */
rot_tone_i_b = (iqcal_tone_i * iqcal_tone_i +
iqcal_tone_q * iqcal_tone_q) / 1024;
rot_tone_q_b = (iqcal_tone_i * iqcal_tone_q * (-1) +
@@ -1339,9 +1223,8 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
PHY_DEBUG(("[CAL] ** rot_image_i_b = %d\n", rot_image_i_b));
PHY_DEBUG(("[CAL] ** rot_image_q_b = %d\n", rot_image_q_b));
- // f.
- if (rot_tone_i_b == 0)
- {
+ /* f. */
+ if (rot_tone_i_b == 0) {
PHY_DEBUG(("[CAL] ** <_rx_iq_calibration_loop> ERROR *******\n"));
PHY_DEBUG(("[CAL] ** rot_tone_i_b=0 to calculate EPS and THETA !!\n"));
PHY_DEBUG(("[CAL] ******************************************\n"));
@@ -1363,26 +1246,21 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
PHY_DEBUG(("[CAL] ** sin(b/2)=%d, cos(b/2)=%d\n", sin_b, cos_b));
PHY_DEBUG(("[CAL] ** sin(b)=%d, cos(b)=%d\n", sin_2b, cos_2b));
- if (cos_2b == 0)
- {
+ if (cos_2b == 0) {
PHY_DEBUG(("[CAL] ** <_rx_iq_calibration_loop> ERROR *******\n"));
PHY_DEBUG(("[CAL] ** cos(b)=0 !!\n"));
PHY_DEBUG(("[CAL] ******************************************\n"));
break;
}
- // 1280 * 32768 = 41943040
+ /* 1280 * 32768 = 41943040 */
temp1 = (41943040/cos_2b)*cos_b;
- //temp2 = (41943040/cos_2b)*sin_b*(-1);
- if (phw_data->revision == 0x2002) // 1st-cut
- {
+ /* temp2 = (41943040/cos_2b)*sin_b*(-1); */
+ if (phw_data->revision == 0x2002)/* 1st-cut */
temp2 = (41943040/cos_2b)*sin_b*(-1);
- }
- else // 2nd-cut
- {
+ else/* 2nd-cut */
temp2 = (41943040*4/cos_2b)*sin_b*(-1);
- }
rx_cal_flt_b[0] = _floor(temp1/(32768+a_2));
rx_cal_flt_b[1] = _floor(temp2/(32768-a_2));
@@ -1403,23 +1281,21 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
PHY_DEBUG(("[CAL] rx_cal[2] = %d\n", rx_cal[2]));
PHY_DEBUG(("[CAL] rx_cal[3] = %d\n", rx_cal[3]));
- // e.
+ /* e. */
pwr_tone = (iqcal_tone_i*iqcal_tone_i + iqcal_tone_q*iqcal_tone_q);
pwr_image = (iqcal_image_i*iqcal_image_i + iqcal_image_q*iqcal_image_q)*factor;
PHY_DEBUG(("[CAL] ** pwr_tone = %d\n", pwr_tone));
PHY_DEBUG(("[CAL] ** pwr_image = %d\n", pwr_image));
- if (pwr_tone > pwr_image)
- {
+ if (pwr_tone > pwr_image) {
verify_count++;
PHY_DEBUG(("[CAL] ** <_rx_iq_calibration_loop> *************\n"));
PHY_DEBUG(("[CAL] ** VERIFY OK # %d !!\n", verify_count));
PHY_DEBUG(("[CAL] ******************************************\n"));
- if (verify_count > 2)
- {
+ if (verify_count > 2) {
PHY_DEBUG(("[CAL] ** <_rx_iq_calibration_loop> *********\n"));
PHY_DEBUG(("[CAL] ** RX_IQ_CALIBRATION OK !!\n"));
PHY_DEBUG(("[CAL] **************************************\n"));
@@ -1428,19 +1304,16 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
continue;
}
- // g.
+ /* g. */
hw_get_dxx_reg(phw_data, 0x54, &val);
PHY_DEBUG(("[CAL] ** 0x54 = 0x%08X\n", val));
- if (phw_data->revision == 0x2002) // 1st-cut
- {
+ if (phw_data->revision == 0x2002) /* 1st-cut */{
rx_cal_reg[0] = _s4_to_s32((val & 0x0000F000) >> 12);
rx_cal_reg[1] = _s4_to_s32((val & 0x00000F00) >> 8);
rx_cal_reg[2] = _s4_to_s32((val & 0x000000F0) >> 4);
rx_cal_reg[3] = _s4_to_s32((val & 0x0000000F));
- }
- else // 2nd-cut
- {
+ } else /* 2nd-cut */{
rx_cal_reg[0] = _s5_to_s32((val & 0xF8000000) >> 27);
rx_cal_reg[1] = _s6_to_s32((val & 0x07E00000) >> 21);
rx_cal_reg[2] = _s6_to_s32((val & 0x001F8000) >> 15);
@@ -1452,22 +1325,17 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
PHY_DEBUG(("[CAL] rx_cal_reg[2] = %d\n", rx_cal_reg[2]));
PHY_DEBUG(("[CAL] rx_cal_reg[3] = %d\n", rx_cal_reg[3]));
- if (phw_data->revision == 0x2002) // 1st-cut
- {
- if (((rx_cal_reg[0]==7) || (rx_cal_reg[0]==(-8))) &&
- ((rx_cal_reg[3]==7) || (rx_cal_reg[3]==(-8))))
- {
+ if (phw_data->revision == 0x2002) /* 1st-cut */{
+ if (((rx_cal_reg[0] == 7) || (rx_cal_reg[0] == (-8))) &&
+ ((rx_cal_reg[3] == 7) || (rx_cal_reg[3] == (-8)))) {
PHY_DEBUG(("[CAL] ** <_rx_iq_calibration_loop> *********\n"));
PHY_DEBUG(("[CAL] ** RX_IQ_CALIBRATION SATUATION !!\n"));
PHY_DEBUG(("[CAL] **************************************\n"));
break;
}
- }
- else // 2nd-cut
- {
- if (((rx_cal_reg[0]==31) || (rx_cal_reg[0]==(-32))) &&
- ((rx_cal_reg[3]==31) || (rx_cal_reg[3]==(-32))))
- {
+ } else /* 2nd-cut */{
+ if (((rx_cal_reg[0] == 31) || (rx_cal_reg[0] == (-32))) &&
+ ((rx_cal_reg[3] == 31) || (rx_cal_reg[3] == (-32)))) {
PHY_DEBUG(("[CAL] ** <_rx_iq_calibration_loop> *********\n"));
PHY_DEBUG(("[CAL] ** RX_IQ_CALIBRATION SATUATION !!\n"));
PHY_DEBUG(("[CAL] **************************************\n"));
@@ -1485,17 +1353,14 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
PHY_DEBUG(("[CAL] apply rx_cal[3] = %d\n", rx_cal[3]));
hw_get_dxx_reg(phw_data, 0x54, &val);
- if (phw_data->revision == 0x2002) // 1st-cut
- {
+ if (phw_data->revision == 0x2002) /* 1st-cut */{
val &= 0x0000FFFF;
val |= ((_s32_to_s4(rx_cal[0]) << 12)|
(_s32_to_s4(rx_cal[1]) << 8)|
(_s32_to_s4(rx_cal[2]) << 4)|
(_s32_to_s4(rx_cal[3])));
hw_set_dxx_reg(phw_data, 0x54, val);
- }
- else // 2nd-cut
- {
+ } else /* 2nd-cut */{
val &= 0x000003FF;
val |= ((_s32_to_s5(rx_cal[0]) << 27)|
(_s32_to_s6(rx_cal[1]) << 21)|
@@ -1503,7 +1368,7 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
(_s32_to_s5(rx_cal[3]) << 10));
hw_set_dxx_reg(phw_data, 0x54, val);
- if( loop == 3 )
+ if (loop == 3)
return 0;
}
PHY_DEBUG(("[CAL] ** CALIB_DATA = 0x%08X\n", val));
@@ -1514,12 +1379,12 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
return 1;
}
-//////////////////////////////////////////////////////////
+/*************************************************/
-//////////////////////////////////////////////////////////////////////////
+/***************************************************************/
void _rx_iq_calibration_winbond(struct hw_data *phw_data, u32 frequency)
{
-// figo 20050523 marked thsi flag for can't compile for relesase
+/* figo 20050523 marked this flag for can't compile for relesase */
#ifdef _DEBUG
s32 rx_cal_reg[4];
u32 val;
@@ -1528,37 +1393,34 @@ void _rx_iq_calibration_winbond(struct hw_data *phw_data, u32 frequency)
u8 result;
PHY_DEBUG(("[CAL] -> [5]_rx_iq_calibration()\n"));
-// a. Set RFIC to "RX calibration mode"
- //; ----- Calibration (7). RX path IQ imbalance calibration loop
- // 0x01 0xFFBFC2 ; 3FEFF ; Calibration (7a). enable RX IQ calibration loop circuits
+/* a. Set RFIC to "RX calibration mode" */
+ /* ; ----- Calibration (7). RX path IQ imbalance calibration loop */
+ /* 0x01 0xFFBFC2 ; 3FEFF ; Calibration (7a). enable RX IQ calibration loop circuits */
phy_set_rf_data(phw_data, 1, (1<<24)|0xEFBFC2);
- // 0x0B 0x1A01D6 ; 06817 ; Calibration (7b). enable RX I/Q cal loop SW1 circuit
+ /* 0x0B 0x1A01D6 ; 06817 ; Calibration (7b). enable RX I/Q cal loop SW1 circuits */
phy_set_rf_data(phw_data, 11, (11<<24)|0x1A05D6);
- //0x05 0x24848A ; 09212 ; Calibration (7c). setting TX-VGA gain (TXGCH) to 2 --> to be optimized
- phy_set_rf_data(phw_data, 5, (5<<24)| phw_data->txvga_setting_for_cal);
- //0x06 0x06840C ; 01A10 ; Calibration (7d). RXGCH=00; RXGCL=010 000 (RXVGA) --> to be optimized
+ /* 0x05 0x24848A ; 09212 ; Calibration (7c). setting TX-VGA gain (TXGCH) to 2 --> to be optimized */
+ phy_set_rf_data(phw_data, 5, (5<<24) | phw_data->txvga_setting_for_cal);
+ /* 0x06 0x06840C ; 01A10 ; Calibration (7d). RXGCH=00; RXGCL=010 000 (RXVGA) --> to be optimized */
phy_set_rf_data(phw_data, 6, (6<<24)|0x06834C);
- //0x00 0xFFF1C0 ; 3F7C7 ; Calibration (7e). turn on IQ imbalance/Test mode
+ /* 0x00 0xFFF1C0 ; 3F7C7 ; Calibration (7e). turn on IQ imbalance/Test mode */
phy_set_rf_data(phw_data, 0, (0<<24)|0xFFF1C0);
- // ; [BB-chip]: Calibration (7f). Send test pattern
- // ; [BB-chip]: Calibration (7g). Search RXGCL optimal value
- // ; [BB-chip]: Calibration (7h). Caculate RX-path IQ imbalance and setting RX path IQ compensation table
+ /* ; [BB-chip]: Calibration (7f). Send test pattern */
+ /* ; [BB-chip]: Calibration (7g). Search RXGCL optimal value */
+ /* ; [BB-chip]: Calibration (7h). Caculate RX-path IQ imbalance and setting RX path IQ compensation table */
result = _rx_iq_calibration_loop_winbond(phw_data, 12589, frequency);
- if (result > 0)
- {
+ if (result > 0) {
_reset_rx_cal(phw_data);
result = _rx_iq_calibration_loop_winbond(phw_data, 7943, frequency);
- if (result > 0)
- {
+ if (result > 0) {
_reset_rx_cal(phw_data);
result = _rx_iq_calibration_loop_winbond(phw_data, 5011, frequency);
- if (result > 0)
- {
+ if (result > 0) {
PHY_DEBUG(("[CAL] ** <_rx_iq_calibration> **************\n"));
PHY_DEBUG(("[CAL] ** RX_IQ_CALIBRATION FAILURE !!\n"));
PHY_DEBUG(("[CAL] **************************************\n"));
@@ -1571,15 +1433,12 @@ void _rx_iq_calibration_winbond(struct hw_data *phw_data, u32 frequency)
hw_get_dxx_reg(phw_data, 0x54, &val);
PHY_DEBUG(("[CAL] ** 0x54 = 0x%08X\n", val));
- if (phw_data->revision == 0x2002) // 1st-cut
- {
+ if (phw_data->revision == 0x2002) /* 1st-cut */{
rx_cal_reg[0] = _s4_to_s32((val & 0x0000F000) >> 12);
rx_cal_reg[1] = _s4_to_s32((val & 0x00000F00) >> 8);
rx_cal_reg[2] = _s4_to_s32((val & 0x000000F0) >> 4);
rx_cal_reg[3] = _s4_to_s32((val & 0x0000000F));
- }
- else // 2nd-cut
- {
+ } else /* 2nd-cut */{
rx_cal_reg[0] = _s5_to_s32((val & 0xF8000000) >> 27);
rx_cal_reg[1] = _s6_to_s32((val & 0x07E00000) >> 21);
rx_cal_reg[2] = _s6_to_s32((val & 0x001F8000) >> 15);
@@ -1594,7 +1453,7 @@ void _rx_iq_calibration_winbond(struct hw_data *phw_data, u32 frequency)
}
-////////////////////////////////////////////////////////////////////////
+/*******************************************************/
void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency)
{
u32 reg_mode_ctrl;
@@ -1602,7 +1461,7 @@ void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency)
PHY_DEBUG(("[CAL] -> phy_calibration_winbond()\n"));
- // 20040701 1.1.25.1000 kevin
+ /* 20040701 1.1.25.1000 kevin */
hw_get_cxx_reg(phw_data, 0x80, &mac_ctrl);
hw_get_cxx_reg(phw_data, 0xE4, &rf_ctrl);
hw_get_dxx_reg(phw_data, 0x58, &iq_alpha);
@@ -1610,72 +1469,71 @@ void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency)
_rxadc_dc_offset_cancellation_winbond(phw_data, frequency);
- //_txidac_dc_offset_cancellation_winbond(phw_data);
- //_txqdac_dc_offset_cacellation_winbond(phw_data);
+ /* _txidac_dc_offset_cancellation_winbond(phw_data); */
+ /* _txqdac_dc_offset_cacellation_winbond(phw_data); */
_tx_iq_calibration_winbond(phw_data);
_rx_iq_calibration_winbond(phw_data, frequency);
- //------------------------------------------------------------------------
+ /*********************************************************************/
hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl);
- reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE|MASK_CALIB_START); // set when finish
+ reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE|MASK_CALIB_START); /* set when finish */
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
- // i. Set RFIC to "Normal mode"
+ /* i. Set RFIC to "Normal mode" */
hw_set_cxx_reg(phw_data, 0x80, mac_ctrl);
hw_set_cxx_reg(phw_data, 0xE4, rf_ctrl);
hw_set_dxx_reg(phw_data, 0x58, iq_alpha);
- //------------------------------------------------------------------------
+ /*********************************************************************/
phy_init_rf(phw_data);
}
-//===========================
-void phy_set_rf_data( struct hw_data * pHwData, u32 index, u32 value )
+/******************/
+void phy_set_rf_data(struct hw_data *pHwData, u32 index, u32 value)
{
- u32 ltmp=0;
-
- switch( pHwData->phy_type )
- {
- case RF_MAXIM_2825:
- case RF_MAXIM_V1: // 11g Winbond 2nd BB(with Phy board (v1) + Maxim 331)
- ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( value, 18 );
- break;
-
- case RF_MAXIM_2827:
- ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( value, 18 );
- break;
-
- case RF_MAXIM_2828:
- ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( value, 18 );
- break;
-
- case RF_MAXIM_2829:
- ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( value, 18 );
- break;
-
- case RF_AIROHA_2230:
- case RF_AIROHA_2230S: // 20060420 Add this
- ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse( value, 20 );
- break;
-
- case RF_AIROHA_7230:
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | (value&0xffffff);
- break;
-
- case RF_WB_242:
- case RF_WB_242_1: // 20060619.5 Add
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( value, 24 );
- break;
- }
+ u32 ltmp = 0;
+
+ switch (pHwData->phy_type) {
+ case RF_MAXIM_2825:
+ case RF_MAXIM_V1: /* 11g Winbond 2nd BB(with Phy board (v1) + Maxim 331) */
+ ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18);
+ break;
+
+ case RF_MAXIM_2827:
+ ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18);
+ break;
+
+ case RF_MAXIM_2828:
+ ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18);
+ break;
+
+ case RF_MAXIM_2829:
+ ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18);
+ break;
+
+ case RF_AIROHA_2230:
+ case RF_AIROHA_2230S: /* 20060420 Add this */
+ ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse(value, 20);
+ break;
+
+ case RF_AIROHA_7230:
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | (value&0xffffff);
+ break;
+
+ case RF_WB_242:
+ case RF_WB_242_1:/* 20060619.5 Add */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse(value, 24);
+ break;
+ }
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
}
-// 20060717 modify as Bruce's mail
+/* 20060717 modify as Bruce's mail */
unsigned char adjust_TXVGA_for_iq_mag(struct hw_data *phw_data)
{
int init_txvga = 0;
@@ -1685,26 +1543,27 @@ unsigned char adjust_TXVGA_for_iq_mag(struct hw_data *phw_data)
s32 iqcal_tone_q0;
u32 sqsum;
s32 iq_mag_0_tx;
- u8 reg_state;
- int current_txvga;
+ u8 reg_state;
+ int current_txvga;
reg_state = 0;
- for( init_txvga=0; init_txvga<10; init_txvga++)
- {
- current_txvga = ( 0x24C40A|(init_txvga<<6) );
- phy_set_rf_data(phw_data, 5, ((5<<24)|current_txvga) );
+ for (init_txvga = 0; init_txvga < 10; init_txvga++) {
+ current_txvga = (0x24C40A|(init_txvga<<6));
+ phy_set_rf_data(phw_data, 5, ((5<<24)|current_txvga));
phw_data->txvga_setting_for_cal = current_txvga;
- msleep(30); // 20060612.1.a
+ msleep(30);/* 20060612.1.a */
- if( !hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl) ) // 20060718.1 modify
+ if (!hw_get_dxx_reg(phw_data, REG_MODE_CTRL, &reg_mode_ctrl))/* 20060718.1 modify */
return false;
PHY_DEBUG(("[CAL] MODE_CTRL (read) = 0x%08X\n", reg_mode_ctrl));
- // a. Set iqcal_mode[1:0] to 0x2 and set "calib_start" to 0x1 to
- // enable "IQ alibration Mode II"
+ /*
+ * a. Set iqcal_mode[1:0] to 0x2 and set "calib_start" to 0x1 to
+ * enable "IQ alibration Mode II"
+ */
reg_mode_ctrl &= ~(MASK_IQCAL_TONE_SEL|MASK_IQCAL_MODE);
reg_mode_ctrl &= ~MASK_IQCAL_MODE;
reg_mode_ctrl |= (MASK_CALIB_START|0x02);
@@ -1712,15 +1571,15 @@ unsigned char adjust_TXVGA_for_iq_mag(struct hw_data *phw_data)
hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
- udelay(1); // 20060612.1.a
+ udelay(1);/* 20060612.1.a */
- udelay(300); // 20060612.1.a
+ udelay(300);/* 20060612.1.a */
- // b.
+ /* b. */
hw_get_dxx_reg(phw_data, REG_CALIB_READ1, &val);
PHY_DEBUG(("[CAL] CALIB_READ1 = 0x%08X\n", val));
- udelay(300); // 20060612.1.a
+ udelay(300);/* 20060612.1.a */
iqcal_tone_i0 = _s13_to_s32(val & 0x00001FFF);
iqcal_tone_q0 = _s13_to_s32((val & 0x03FFE000) >> 13);
@@ -1731,23 +1590,18 @@ unsigned char adjust_TXVGA_for_iq_mag(struct hw_data *phw_data)
iq_mag_0_tx = (s32) _sqrt(sqsum);
PHY_DEBUG(("[CAL] ** auto_adjust_txvga_for_iq_mag_0_tx=%d\n", iq_mag_0_tx));
- if( iq_mag_0_tx>=700 && iq_mag_0_tx<=1750 )
+ if (iq_mag_0_tx >= 700 && iq_mag_0_tx <= 1750)
break;
- else if(iq_mag_0_tx > 1750)
- {
- init_txvga=-2;
+ else if (iq_mag_0_tx > 1750) {
+ init_txvga = -2;
continue;
- }
- else
+ } else
continue;
}
- if( iq_mag_0_tx>=700 && iq_mag_0_tx<=1750 )
+ if (iq_mag_0_tx >= 700 && iq_mag_0_tx <= 1750)
return true;
else
return false;
}
-
-
-
diff --git a/drivers/staging/winbond/reg.c b/drivers/staging/winbond/reg.c
index d9a8128b21f..990f9d4bdbb 100644
--- a/drivers/staging/winbond/reg.c
+++ b/drivers/staging/winbond/reg.c
@@ -966,42 +966,42 @@ void RFSynthesizer_initial(struct hw_data *pHwData)
switch (pHwData->phy_type) {
case RF_MAXIM_2825:
case RF_MAXIM_V1: /* 11g Winbond 2nd BB(with Phy board (v1) + Maxim 331) */
- number = sizeof(max2825_rf_data) / sizeof(max2825_rf_data[0]);
+ number = ARRAY_SIZE(max2825_rf_data);
for (i = 0; i < number; i++) {
pHwData->phy_para[i] = max2825_rf_data[i]; /* Backup Rf parameter */
pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2825_rf_data[i], 18);
}
break;
case RF_MAXIM_2827:
- number = sizeof(max2827_rf_data) / sizeof(max2827_rf_data[0]);
+ number = ARRAY_SIZE(max2827_rf_data);
for (i = 0; i < number; i++) {
pHwData->phy_para[i] = max2827_rf_data[i];
pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2827_rf_data[i], 18);
}
break;
case RF_MAXIM_2828:
- number = sizeof(max2828_rf_data) / sizeof(max2828_rf_data[0]);
+ number = ARRAY_SIZE(max2828_rf_data);
for (i = 0; i < number; i++) {
pHwData->phy_para[i] = max2828_rf_data[i];
pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2828_rf_data[i], 18);
}
break;
case RF_MAXIM_2829:
- number = sizeof(max2829_rf_data) / sizeof(max2829_rf_data[0]);
+ number = ARRAY_SIZE(max2829_rf_data);
for (i = 0; i < number; i++) {
pHwData->phy_para[i] = max2829_rf_data[i];
pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2829_rf_data[i], 18);
}
break;
case RF_AIROHA_2230:
- number = sizeof(al2230_rf_data) / sizeof(al2230_rf_data[0]);
+ number = ARRAY_SIZE(al2230_rf_data);
for (i = 0; i < number; i++) {
pHwData->phy_para[i] = al2230_rf_data[i];
pltmp[i] = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse(al2230_rf_data[i], 20);
}
break;
case RF_AIROHA_2230S:
- number = sizeof(al2230s_rf_data) / sizeof(al2230s_rf_data[0]);
+ number = ARRAY_SIZE(al2230s_rf_data);
for (i = 0; i < number; i++) {
pHwData->phy_para[i] = al2230s_rf_data[i];
pltmp[i] = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse(al2230s_rf_data[i], 20);
@@ -1013,12 +1013,12 @@ void RFSynthesizer_initial(struct hw_data *pHwData)
#ifdef _PE_STATE_DUMP_
printk("* PLL_ON low\n");
#endif
- number = sizeof(al7230_rf_data_24) / sizeof(al7230_rf_data_24[0]);
+ number = ARRAY_SIZE(al7230_rf_data_24);
Set_ChanIndep_RfData_al7230_24(pHwData, pltmp, number);
break;
case RF_WB_242:
case RF_WB_242_1:
- number = sizeof(w89rf242_rf_data) / sizeof(w89rf242_rf_data[0]);
+ number = ARRAY_SIZE(w89rf242_rf_data);
for (i = 0; i < number; i++) {
ltmp = w89rf242_rf_data[i];
if (i == 4) { /* Update the VCO trim from EEPROM */
@@ -1119,7 +1119,7 @@ void RFSynthesizer_initial(struct hw_data *pHwData)
printk("* PLL_ON low\n");
#endif
- number = sizeof(al7230_rf_data_50) / sizeof(al7230_rf_data_50[0]);
+ number = ARRAY_SIZE(al7230_rf_data_50);
Set_ChanIndep_RfData_al7230_50(pHwData, pltmp, number);
/* Write to register. number must less and equal than 16 */
for (i = 0; i < number; i++)
@@ -1747,7 +1747,7 @@ void RFSynthesizer_SwitchingChannel(struct hw_data *pHwData, struct chan_info C
pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2829_channel_data_24[Channel.ChanNo-1][i], 18);
Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, 3, NO_INCREMENT);
} else if (Channel.band == BAND_TYPE_OFDM_5) {
- count = sizeof(max2829_channel_data_50) / sizeof(max2829_channel_data_50[0]);
+ count = ARRAY_SIZE(max2829_channel_data_50);
for (i = 0; i < count; i++) {
if (max2829_channel_data_50[i][0] == Channel.ChanNo) {
@@ -1783,13 +1783,13 @@ void RFSynthesizer_SwitchingChannel(struct hw_data *pHwData, struct chan_info C
/* Update BB register */
BBProcessor_AL7230_2400(pHwData);
- number = sizeof(al7230_rf_data_24) / sizeof(al7230_rf_data_24[0]);
+ number = ARRAY_SIZE(al7230_rf_data_24);
Set_ChanIndep_RfData_al7230_24(pHwData, pltmp, number);
} else {
/* Update BB register */
BBProcessor_AL7230_5000(pHwData);
- number = sizeof(al7230_rf_data_50) / sizeof(al7230_rf_data_50[0]);
+ number = ARRAY_SIZE(al7230_rf_data_50);
Set_ChanIndep_RfData_al7230_50(pHwData, pltmp, number);
}
@@ -1814,7 +1814,7 @@ void RFSynthesizer_SwitchingChannel(struct hw_data *pHwData, struct chan_info C
Wb35Reg_Write(pHwData, 0x0864, ltmp);
}
- count = sizeof(al7230_channel_data_5) / sizeof(al7230_channel_data_5[0]);
+ count = ARRAY_SIZE(al7230_channel_data_5);
for (i = 0; i < count; i++) {
if (al7230_channel_data_5[i][0] == Channel.ChanNo) {
@@ -1978,7 +1978,7 @@ u8 RFSynthesizer_SetAiroha2230Power(struct hw_data *pHwData, u8 index)
u32 PowerData;
u8 i, count;
- count = sizeof(al2230_txvga_data) / sizeof(al2230_txvga_data[0]);
+ count = ARRAY_SIZE(al2230_txvga_data);
for (i = 0; i < count; i++) {
if (al2230_txvga_data[i][1] >= index)
break;
@@ -1996,7 +1996,7 @@ u8 RFSynthesizer_SetAiroha7230Power(struct hw_data *pHwData, u8 index)
u32 PowerData;
u8 i, count;
- count = sizeof(al7230_txvga_data) / sizeof(al7230_txvga_data[0]);
+ count = ARRAY_SIZE(al7230_txvga_data);
for (i = 0; i < count; i++) {
if (al7230_txvga_data[i][1] >= index)
break;
@@ -2013,7 +2013,7 @@ u8 RFSynthesizer_SetWinbond242Power(struct hw_data *pHwData, u8 index)
u32 PowerData;
u8 i, count;
- count = sizeof(w89rf242_txvga_data) / sizeof(w89rf242_txvga_data[0]);
+ count = ARRAY_SIZE(w89rf242_txvga_data);
for (i = 0; i < count; i++) {
if (w89rf242_txvga_data[i][1] >= index)
break;
@@ -2184,14 +2184,14 @@ void GetTxVgaFromEEPROM(struct hw_data *pHwData)
/* Adjust WB_242 to WB_242_1 TxVga scale */
if (pHwData->phy_type == RF_WB_242) {
for (i = 0; i < 4; i++) { /* Only 2412 2437 2462 2484 case must be modified */
- for (j = 0; j < (sizeof(w89rf242_txvga_old_mapping) / sizeof(w89rf242_txvga_old_mapping[0])); j++) {
+ for (j = 0; j < ARRAY_SIZE(w89rf242_txvga_old_mapping); j++) {
if (pctmp[i] < (u8) w89rf242_txvga_old_mapping[j][1]) {
pctmp[i] = (u8) w89rf242_txvga_old_mapping[j][0];
break;
}
}
- if (j == (sizeof(w89rf242_txvga_old_mapping) / sizeof(w89rf242_txvga_old_mapping[0])))
+ if (j == ARRAY_SIZE(w89rf242_txvga_old_mapping))
pctmp[i] = (u8)w89rf242_txvga_old_mapping[j-1][0];
}
}
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 251caa052ee..abaa05a630f 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -700,7 +700,7 @@ static int wb35_hw_init(struct ieee80211_hw *hw)
Mds_initial(priv);
/*
- * If no user-defined address in the registry, use the addresss
+ * If no user-defined address in the registry, use the address
* "burned" on the NIC instead.
*/
pMacAddr = priv->sLocalPara.ThisMacAddress;
diff --git a/drivers/staging/wlags49_h2/hcf.c b/drivers/staging/wlags49_h2/hcf.c
index 390628c6c1e..c4fe0ec9507 100644
--- a/drivers/staging/wlags49_h2/hcf.c
+++ b/drivers/staging/wlags49_h2/hcf.c
@@ -502,7 +502,7 @@ HCF_STATIC hcf_16* BASED xxxx[ ] = {
#endif // MSF_COMPONENT_ID
NULL //endsentinel
};
-#define xxxx_PRI_IDENTITY_OFFSET (sizeof(xxxx)/sizeof(xxxx[0]) - 3)
+#define xxxx_PRI_IDENTITY_OFFSET (ARRAY_SIZE(xxxx) - 3)
#endif // MSF_COMPONENT_ID / HCF_EXT_MB
diff --git a/drivers/staging/wlags49_h2/mdd.h b/drivers/staging/wlags49_h2/mdd.h
index b45c7ddd92e..b50b7b0a5ca 100644
--- a/drivers/staging/wlags49_h2/mdd.h
+++ b/drivers/staging/wlags49_h2/mdd.h
@@ -727,10 +727,10 @@ XX1( CFG_SCAN, SCAN_RS_STRCT, scan_result[32] ) /*Scan results *
#define CFG_FCBE 0xFCBE //FW codes ahead of available documentation, so ???????
#define CFG_FCBF 0xFCBF //FW codes ahead of available documentation, so ???????
-#define CFG_HANDOVER_ADDR 0xFCC0 //[AP] Station MAC Adrress re-associated with other AP
+#define CFG_HANDOVER_ADDR 0xFCC0 //[AP] Station MAC Address re-associated with other AP
#define CFG_SCAN_CHANNEL 0xFCC2 //Channel set for host requested scan
//;?#define CFG_SCAN_CHANNEL_MASK 0xFCC2 // contains
-#define CFG_DISASSOCIATE_ADDR 0xFCC4 //[AP] Station MAC Adrress to be disassociated
+#define CFG_DISASSOCIATE_ADDR 0xFCC4 //[AP] Station MAC Address to be disassociated
#define CFG_PROBE_DATA_RATE 0xFCC5 //WARP connection control
#define CFG_FRAME_BURST_LIMIT 0xFCC6 //
#define CFG_COEXISTENSE_BEHAVIOUR 0xFCC7 //[AP]
diff --git a/drivers/staging/wlags49_h2/wl_cs.c b/drivers/staging/wlags49_h2/wl_cs.c
index 10abd406b09..19c33545865 100644
--- a/drivers/staging/wlags49_h2/wl_cs.c
+++ b/drivers/staging/wlags49_h2/wl_cs.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright (c) 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
@@ -83,7 +83,6 @@
#include <linux/if_arp.h>
#include <linux/ioport.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -133,36 +132,35 @@ extern dbg_info_t *DbgInfo;
******************************************************************************/
static int wl_adapter_attach(struct pcmcia_device *link)
{
- struct net_device *dev;
- struct wl_private *lp;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_adapter_attach" );
- DBG_ENTER( DbgInfo );
-
- dev = wl_device_alloc();
- if(dev == NULL) {
- DBG_ERROR( DbgInfo, "wl_device_alloc returned NULL\n");
- return -ENOMEM;
- }
-
- link->io.NumPorts1 = HCF_NUM_IO_PORTS;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- link->io.IOAddrLines = 6;
- link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.IntType = INT_MEMORY_AND_IO;
- link->conf.ConfigIndex = 5;
- link->conf.Present = PRESENT_OPTION;
-
- link->priv = dev;
- lp = wl_priv(dev);
- lp->link = link;
-
- wl_adapter_insert(link);
-
- DBG_LEAVE( DbgInfo );
- return 0;
-} // wl_adapter_attach
+ struct net_device *dev;
+ struct wl_private *lp;
+ /*--------------------------------------------------------------------*/
+
+ DBG_FUNC("wl_adapter_attach");
+ DBG_ENTER(DbgInfo);
+
+ dev = wl_device_alloc();
+ if (dev == NULL) {
+ DBG_ERROR(DbgInfo, "wl_device_alloc returned NULL\n");
+ return -ENOMEM;
+ }
+
+ link->resource[0]->end = HCF_NUM_IO_PORTS;
+ link->resource[0]->flags= IO_DATA_PATH_WIDTH_16;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 5;
+ link->conf.Present = PRESENT_OPTION;
+
+ link->priv = dev;
+ lp = wl_priv(dev);
+ lp->link = link;
+
+ wl_adapter_insert(link);
+
+ DBG_LEAVE(DbgInfo);
+ return 0;
+} /* wl_adapter_attach */
/*============================================================================*/
@@ -190,25 +188,24 @@ static int wl_adapter_attach(struct pcmcia_device *link)
******************************************************************************/
static void wl_adapter_detach(struct pcmcia_device *link)
{
- struct net_device *dev = link->priv;
- /*------------------------------------------------------------------------*/
-
+ struct net_device *dev = link->priv;
+ /*--------------------------------------------------------------------*/
- DBG_FUNC( "wl_adapter_detach" );
- DBG_ENTER( DbgInfo );
- DBG_PARAM( DbgInfo, "link", "0x%p", link );
+ DBG_FUNC("wl_adapter_detach");
+ DBG_ENTER(DbgInfo);
+ DBG_PARAM(DbgInfo, "link", "0x%p", link);
- wl_adapter_release(link);
+ wl_adapter_release(link);
- if (dev) {
- unregister_wlags_sysfs(dev);
- unregister_netdev(dev);
- }
+ if (dev) {
+ unregister_wlags_sysfs(dev);
+ unregister_netdev(dev);
+ }
- wl_device_dealloc(dev);
+ wl_device_dealloc(dev);
- DBG_LEAVE( DbgInfo );
-} // wl_adapter_detach
+ DBG_LEAVE(DbgInfo);
+} /* wl_adapter_detach */
/*============================================================================*/
@@ -232,33 +229,33 @@ static void wl_adapter_detach(struct pcmcia_device *link)
* N/A
*
******************************************************************************/
-void wl_adapter_release( struct pcmcia_device *link )
+void wl_adapter_release(struct pcmcia_device *link)
{
- DBG_FUNC( "wl_adapter_release" );
- DBG_ENTER( DbgInfo );
- DBG_PARAM( DbgInfo, "link", "0x%p", link);
+ DBG_FUNC("wl_adapter_release");
+ DBG_ENTER(DbgInfo);
+ DBG_PARAM(DbgInfo, "link", "0x%p", link);
- /* Stop hardware */
- wl_remove(link->priv);
+ /* Stop hardware */
+ wl_remove(link->priv);
- pcmcia_disable_device(link);
+ pcmcia_disable_device(link);
- DBG_LEAVE( DbgInfo );
-} // wl_adapter_release
+ DBG_LEAVE(DbgInfo);
+} /* wl_adapter_release */
/*============================================================================*/
static int wl_adapter_suspend(struct pcmcia_device *link)
{
- struct net_device *dev = link->priv;
+ struct net_device *dev = link->priv;
- //if (link->open) {
+ /* if (link->open) { */
netif_device_detach(dev);
wl_suspend(dev);
-//// CHECK! pcmcia_release_configuration(link->handle);
- //}
+ /* CHECK! pcmcia_release_configuration(link->handle); */
+ /* } */
- return 0;
-} // wl_adapter_suspend
+ return 0;
+} /* wl_adapter_suspend */
static int wl_adapter_resume(struct pcmcia_device *link)
{
@@ -266,10 +263,10 @@ static int wl_adapter_resume(struct pcmcia_device *link)
wl_resume(dev);
- netif_device_attach( dev );
+ netif_device_attach(dev);
return 0;
-} // wl_adapter_resume
+} /* wl_adapter_resume */
/*******************************************************************************
* wl_adapter_insert()
@@ -291,60 +288,60 @@ static int wl_adapter_resume(struct pcmcia_device *link)
* N/A
*
******************************************************************************/
-void wl_adapter_insert( struct pcmcia_device *link )
+void wl_adapter_insert(struct pcmcia_device *link)
{
- struct net_device *dev;
- int i;
- int ret;
- /*------------------------------------------------------------------------*/
+ struct net_device *dev;
+ int i;
+ int ret;
+ /*--------------------------------------------------------------------*/
- DBG_FUNC( "wl_adapter_insert" );
- DBG_ENTER( DbgInfo );
- DBG_PARAM( DbgInfo, "link", "0x%p", link );
+ DBG_FUNC("wl_adapter_insert");
+ DBG_ENTER(DbgInfo);
+ DBG_PARAM(DbgInfo, "link", "0x%p", link);
- dev = link->priv;
+ dev = link->priv;
- /* Do we need to allocate an interrupt? */
- link->conf.Attributes |= CONF_ENABLE_IRQ;
+ /* Do we need to allocate an interrupt? */
+ link->conf.Attributes |= CONF_ENABLE_IRQ;
+ link->io_lines = 6;
- ret = pcmcia_request_io(link, &link->io);
- if (ret != 0)
- goto failed;
+ ret = pcmcia_request_io(link);
+ if (ret != 0)
+ goto failed;
- ret = pcmcia_request_irq(link, (void *) wl_isr);
- if (ret != 0)
- goto failed;
+ ret = pcmcia_request_irq(link, (void *) wl_isr);
+ if (ret != 0)
+ goto failed;
- ret = pcmcia_request_configuration(link, &link->conf);
- if (ret != 0)
- goto failed;
+ ret = pcmcia_request_configuration(link, &link->conf);
+ if (ret != 0)
+ goto failed;
- dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
- SET_NETDEV_DEV(dev, &link->dev);
- if (register_netdev(dev) != 0) {
- printk("%s: register_netdev() failed\n", MODULE_NAME);
- goto failed;
- }
+ SET_NETDEV_DEV(dev, &link->dev);
+ if (register_netdev(dev) != 0) {
+ printk("%s: register_netdev() failed\n", MODULE_NAME);
+ goto failed;
+ }
- register_wlags_sysfs(dev);
+ register_wlags_sysfs(dev);
- printk(KERN_INFO "%s: Wireless, io_addr %#03lx, irq %d, ""mac_address ",
- dev->name, dev->base_addr, dev->irq);
- for( i = 0; i < ETH_ALEN; i++ ) {
- printk("%02X%c", dev->dev_addr[i], ((i < (ETH_ALEN-1)) ? ':' : '\n'));
- }
+ printk(KERN_INFO "%s: Wireless, io_addr %#03lx, irq %d, ""mac_address ",
+ dev->name, dev->base_addr, dev->irq);
+ for (i = 0; i < ETH_ALEN; i++)
+ printk("%02X%c", dev->dev_addr[i], ((i < (ETH_ALEN-1)) ? ':' : '\n'));
- DBG_LEAVE( DbgInfo );
- return;
+ DBG_LEAVE(DbgInfo);
+ return;
failed:
- wl_adapter_release( link );
+ wl_adapter_release(link);
- DBG_LEAVE(DbgInfo);
- return;
-} // wl_adapter_insert
+ DBG_LEAVE(DbgInfo);
+ return;
+} /* wl_adapter_insert */
/*============================================================================*/
@@ -367,38 +364,36 @@ failed:
* errno value otherwise
*
******************************************************************************/
-int wl_adapter_open( struct net_device *dev )
+int wl_adapter_open(struct net_device *dev)
{
- struct wl_private *lp = wl_priv(dev);
- struct pcmcia_device *link = lp->link;
- int result = 0;
- int hcf_status = HCF_SUCCESS;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wl_adapter_open" );
- DBG_ENTER( DbgInfo );
- DBG_PRINT( "%s\n", VERSION_INFO );
- DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
-
- if(!pcmcia_dev_present(link))
- {
- DBG_LEAVE( DbgInfo );
- return -ENODEV;
- }
-
- link->open++;
-
- hcf_status = wl_open( dev );
-
- if( hcf_status != HCF_SUCCESS ) {
- link->open--;
- result = -ENODEV;
- }
-
- DBG_LEAVE( DbgInfo );
- return result;
-} // wl_adapter_open
+ struct wl_private *lp = wl_priv(dev);
+ struct pcmcia_device *link = lp->link;
+ int result = 0;
+ int hcf_status = HCF_SUCCESS;
+ /*--------------------------------------------------------------------*/
+
+ DBG_FUNC("wl_adapter_open");
+ DBG_ENTER(DbgInfo);
+ DBG_PRINT("%s\n", VERSION_INFO);
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+
+ if (!pcmcia_dev_present(link)) {
+ DBG_LEAVE(DbgInfo);
+ return -ENODEV;
+ }
+
+ link->open++;
+
+ hcf_status = wl_open(dev);
+
+ if (hcf_status != HCF_SUCCESS) {
+ link->open--;
+ result = -ENODEV;
+ }
+
+ DBG_LEAVE(DbgInfo);
+ return result;
+} /* wl_adapter_open */
/*============================================================================*/
@@ -421,56 +416,55 @@ int wl_adapter_open( struct net_device *dev )
* errno value otherwise
*
******************************************************************************/
-int wl_adapter_close( struct net_device *dev )
+int wl_adapter_close(struct net_device *dev)
{
- struct wl_private *lp = wl_priv(dev);
- struct pcmcia_device *link = lp->link;
- /*------------------------------------------------------------------------*/
+ struct wl_private *lp = wl_priv(dev);
+ struct pcmcia_device *link = lp->link;
+ /*--------------------------------------------------------------------*/
+ DBG_FUNC("wl_adapter_close");
+ DBG_ENTER(DbgInfo);
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- DBG_FUNC( "wl_adapter_close" );
- DBG_ENTER( DbgInfo );
- DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
+ if (link == NULL) {
+ DBG_LEAVE(DbgInfo);
+ return -ENODEV;
+ }
- if( link == NULL ) {
- DBG_LEAVE( DbgInfo );
- return -ENODEV;
- }
+ DBG_TRACE(DbgInfo, "%s: Shutting down adapter.\n", dev->name);
+ wl_close(dev);
- DBG_TRACE( DbgInfo, "%s: Shutting down adapter.\n", dev->name );
- wl_close( dev );
+ link->open--;
- link->open--;
-
- DBG_LEAVE( DbgInfo );
- return 0;
-} // wl_adapter_close
+ DBG_LEAVE(DbgInfo);
+ return 0;
+} /* wl_adapter_close */
/*============================================================================*/
static struct pcmcia_device_id wl_adapter_ids[] = {
-#if ! ((HCF_TYPE) & HCF_TYPE_HII5)
+#if !((HCF_TYPE) & HCF_TYPE_HII5)
PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0003),
PCMCIA_DEVICE_PROD_ID12("Agere Systems", "Wireless PC Card Model 0110",
- 0x33103a9b, 0xe175b0dd),
+ 0x33103a9b, 0xe175b0dd),
#else
PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0004),
PCMCIA_DEVICE_PROD_ID12("Linksys", "WCF54G_Wireless-G_CompactFlash_Card",
- 0x0733cc81, 0x98a599e1),
-#endif // (HCF_TYPE) & HCF_TYPE_HII5
+ 0x0733cc81, 0x98a599e1),
+#endif /* (HCF_TYPE) & HCF_TYPE_HII5 */
PCMCIA_DEVICE_NULL,
- };
+};
MODULE_DEVICE_TABLE(pcmcia, wl_adapter_ids);
static struct pcmcia_driver wlags49_driver = {
- .owner = THIS_MODULE,
- .drv = {
- .name = DRIVER_NAME,
- },
- .probe = wl_adapter_attach,
- .remove = wl_adapter_detach,
- .id_table = wl_adapter_ids,
- .suspend = wl_adapter_suspend,
- .resume = wl_adapter_resume,
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = DRIVER_NAME,
+ },
+ .probe = wl_adapter_attach,
+ .remove = wl_adapter_detach,
+ .id_table = wl_adapter_ids,
+ .suspend = wl_adapter_suspend,
+ .resume = wl_adapter_resume,
};
@@ -493,21 +487,20 @@ static struct pcmcia_driver wlags49_driver = {
* -1 on error
*
******************************************************************************/
-int wl_adapter_init_module( void )
+int wl_adapter_init_module(void)
{
- int ret;
- /*------------------------------------------------------------------------*/
+ int ret;
+ /*--------------------------------------------------------------------*/
+ DBG_FUNC("wl_adapter_init_module");
+ DBG_ENTER(DbgInfo);
+ DBG_TRACE(DbgInfo, "wl_adapter_init_module() -- PCMCIA\n");
- DBG_FUNC( "wl_adapter_init_module" );
- DBG_ENTER( DbgInfo );
- DBG_TRACE( DbgInfo, "wl_adapter_init_module() -- PCMCIA\n" );
+ ret = pcmcia_register_driver(&wlags49_driver);
- ret = pcmcia_register_driver(&wlags49_driver);
-
- DBG_LEAVE( DbgInfo );
- return ret;
-} // wl_adapter_init_module
+ DBG_LEAVE(DbgInfo);
+ return ret;
+} /* wl_adapter_init_module */
/*============================================================================*/
@@ -528,18 +521,18 @@ int wl_adapter_init_module( void )
* N/A
*
******************************************************************************/
-void wl_adapter_cleanup_module( void )
+void wl_adapter_cleanup_module(void)
{
- DBG_FUNC( "wl_adapter_cleanup_module" );
- DBG_ENTER( DbgInfo );
- DBG_TRACE( DbgInfo, "wl_adapter_cleanup_module() -- PCMCIA\n" );
+ DBG_FUNC("wl_adapter_cleanup_module");
+ DBG_ENTER(DbgInfo);
+ DBG_TRACE(DbgInfo, "wl_adapter_cleanup_module() -- PCMCIA\n");
- pcmcia_unregister_driver(&wlags49_driver);
+ pcmcia_unregister_driver(&wlags49_driver);
- DBG_LEAVE( DbgInfo );
- return;
-} // wl_adapter_cleanup_module
+ DBG_LEAVE(DbgInfo);
+ return;
+} /* wl_adapter_cleanup_module */
/*============================================================================*/
@@ -562,17 +555,16 @@ void wl_adapter_cleanup_module( void )
* 0 otherwise
*
******************************************************************************/
-int wl_adapter_is_open( struct net_device *dev )
+int wl_adapter_is_open(struct net_device *dev)
{
- struct wl_private *lp = wl_priv(dev);
- struct pcmcia_device *link = lp->link;
+ struct wl_private *lp = wl_priv(dev);
+ struct pcmcia_device *link = lp->link;
- if(!pcmcia_dev_present(link)) {
- return 0;
- }
+ if (!pcmcia_dev_present(link))
+ return 0;
- return( link->open );
-} // wl_adapter_is_open
+ return link->open;
+} /* wl_adapter_is_open */
/*============================================================================*/
@@ -596,97 +588,95 @@ int wl_adapter_is_open( struct net_device *dev )
* a pointer to a string describing the error(s)
*
******************************************************************************/
-const char* DbgEvent( int mask )
+const char *DbgEvent(int mask)
{
- static char DbgBuffer[256];
- char *pBuf;
- /*------------------------------------------------------------------------*/
-
+ static char DbgBuffer[256];
+ char *pBuf;
+ /*--------------------------------------------------------------------*/
- pBuf = DbgBuffer;
- *pBuf = '\0';
+ pBuf = DbgBuffer;
+ *pBuf = '\0';
- if( mask & CS_EVENT_WRITE_PROTECT )
- strcat( pBuf, "WRITE_PROTECT " );
+ if (mask & CS_EVENT_WRITE_PROTECT)
+ strcat(pBuf, "WRITE_PROTECT ");
- if(mask & CS_EVENT_CARD_LOCK)
- strcat( pBuf, "CARD_LOCK " );
+ if (mask & CS_EVENT_CARD_LOCK)
+ strcat(pBuf, "CARD_LOCK ");
- if(mask & CS_EVENT_CARD_INSERTION)
- strcat( pBuf, "CARD_INSERTION " );
+ if (mask & CS_EVENT_CARD_INSERTION)
+ strcat(pBuf, "CARD_INSERTION ");
- if(mask & CS_EVENT_CARD_REMOVAL)
- strcat( pBuf, "CARD_REMOVAL " );
+ if (mask & CS_EVENT_CARD_REMOVAL)
+ strcat(pBuf, "CARD_REMOVAL ");
- if(mask & CS_EVENT_BATTERY_DEAD)
- strcat( pBuf, "BATTERY_DEAD " );
+ if (mask & CS_EVENT_BATTERY_DEAD)
+ strcat(pBuf, "BATTERY_DEAD ");
- if(mask & CS_EVENT_BATTERY_LOW)
- strcat( pBuf, "BATTERY_LOW " );
+ if (mask & CS_EVENT_BATTERY_LOW)
+ strcat(pBuf, "BATTERY_LOW ");
- if(mask & CS_EVENT_READY_CHANGE)
- strcat( pBuf, "READY_CHANGE " );
+ if (mask & CS_EVENT_READY_CHANGE)
+ strcat(pBuf, "READY_CHANGE ");
- if(mask & CS_EVENT_CARD_DETECT)
- strcat( pBuf, "CARD_DETECT " );
+ if (mask & CS_EVENT_CARD_DETECT)
+ strcat(pBuf, "CARD_DETECT ");
- if(mask & CS_EVENT_RESET_REQUEST)
- strcat( pBuf, "RESET_REQUEST " );
+ if (mask & CS_EVENT_RESET_REQUEST)
+ strcat(pBuf, "RESET_REQUEST ");
- if(mask & CS_EVENT_RESET_PHYSICAL)
- strcat( pBuf, "RESET_PHYSICAL " );
+ if (mask & CS_EVENT_RESET_PHYSICAL)
+ strcat(pBuf, "RESET_PHYSICAL ");
- if(mask & CS_EVENT_CARD_RESET)
- strcat( pBuf, "CARD_RESET " );
+ if (mask & CS_EVENT_CARD_RESET)
+ strcat(pBuf, "CARD_RESET ");
- if(mask & CS_EVENT_REGISTRATION_COMPLETE)
- strcat( pBuf, "REGISTRATION_COMPLETE " );
+ if (mask & CS_EVENT_REGISTRATION_COMPLETE)
+ strcat(pBuf, "REGISTRATION_COMPLETE ");
- // if(mask & CS_EVENT_RESET_COMPLETE)
- // strcat( pBuf, "RESET_COMPLETE " );
+ /* if (mask & CS_EVENT_RESET_COMPLETE)
+ strcat(pBuf, "RESET_COMPLETE "); */
- if(mask & CS_EVENT_PM_SUSPEND)
- strcat( pBuf, "PM_SUSPEND " );
+ if (mask & CS_EVENT_PM_SUSPEND)
+ strcat(pBuf, "PM_SUSPEND ");
- if(mask & CS_EVENT_PM_RESUME)
- strcat( pBuf, "PM_RESUME " );
+ if (mask & CS_EVENT_PM_RESUME)
+ strcat(pBuf, "PM_RESUME ");
- if(mask & CS_EVENT_INSERTION_REQUEST)
- strcat( pBuf, "INSERTION_REQUEST " );
+ if (mask & CS_EVENT_INSERTION_REQUEST)
+ strcat(pBuf, "INSERTION_REQUEST ");
- if(mask & CS_EVENT_EJECTION_REQUEST)
- strcat( pBuf, "EJECTION_REQUEST " );
+ if (mask & CS_EVENT_EJECTION_REQUEST)
+ strcat(pBuf, "EJECTION_REQUEST ");
- if(mask & CS_EVENT_MTD_REQUEST)
- strcat( pBuf, "MTD_REQUEST " );
+ if (mask & CS_EVENT_MTD_REQUEST)
+ strcat(pBuf, "MTD_REQUEST ");
- if(mask & CS_EVENT_ERASE_COMPLETE)
- strcat( pBuf, "ERASE_COMPLETE " );
+ if (mask & CS_EVENT_ERASE_COMPLETE)
+ strcat(pBuf, "ERASE_COMPLETE ");
- if(mask & CS_EVENT_REQUEST_ATTENTION)
- strcat( pBuf, "REQUEST_ATTENTION " );
+ if (mask & CS_EVENT_REQUEST_ATTENTION)
+ strcat(pBuf, "REQUEST_ATTENTION ");
- if(mask & CS_EVENT_CB_DETECT)
- strcat( pBuf, "CB_DETECT " );
+ if (mask & CS_EVENT_CB_DETECT)
+ strcat(pBuf, "CB_DETECT ");
- if(mask & CS_EVENT_3VCARD)
- strcat( pBuf, "3VCARD " );
+ if (mask & CS_EVENT_3VCARD)
+ strcat(pBuf, "3VCARD ");
- if(mask & CS_EVENT_XVCARD)
- strcat( pBuf, "XVCARD " );
+ if (mask & CS_EVENT_XVCARD)
+ strcat(pBuf, "XVCARD ");
- if( *pBuf ) {
- pBuf[strlen(pBuf) - 1] = '\0';
- } else {
- if( mask != 0x0 ) {
- sprintf( pBuf, "<<0x%08x>>", mask );
- }
- }
+ if (*pBuf) {
+ pBuf[strlen(pBuf) - 1] = '\0';
+ } else {
+ if (mask != 0x0)
+ sprintf(pBuf, "<<0x%08x>>", mask);
+ }
- return pBuf;
-} // DbgEvent
+ return pBuf;
+} /* DbgEvent */
/*============================================================================*/
#endif /* DBG */
diff --git a/drivers/staging/wlags49_h2/wl_cs.h b/drivers/staging/wlags49_h2/wl_cs.h
index a9b8828a1a2..21f17be4f02 100644
--- a/drivers/staging/wlags49_h2/wl_cs.h
+++ b/drivers/staging/wlags49_h2/wl_cs.h
@@ -72,8 +72,6 @@ void wl_adapter_insert(struct pcmcia_device *link);
void wl_adapter_release(struct pcmcia_device *link);
-int wl_adapter_event(event_t event, int priority, event_callback_args_t *args );
-
int wl_adapter_init_module( void );
void wl_adapter_cleanup_module( void );
diff --git a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h
index d9a0ad039c1..02f0a20e178 100644
--- a/drivers/staging/wlags49_h2/wl_internal.h
+++ b/drivers/staging/wlags49_h2/wl_internal.h
@@ -69,7 +69,6 @@
******************************************************************************/
#include <linux/version.h>
#ifdef BUS_PCMCIA
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 1aa61dbdb79..e2a7ad05e54 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -1905,8 +1905,8 @@ int wl_rx_dma( struct net_device *dev )
DBG_FUNC("wl_rx")
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- if((( lp = (struct wl_private *)dev->priv ) != NULL ) &&
- !( lp->flags & WVLAN2_UIL_BUSY )) {
+ if((( lp = dev->priv ) != NULL ) &&
+ !( lp->flags & WVLAN2_UIL_BUSY )) {
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
diff --git a/drivers/staging/wlags49_h2/wl_pci.c b/drivers/staging/wlags49_h2/wl_pci.c
index 6751b4bad2e..020b17adee2 100644
--- a/drivers/staging/wlags49_h2/wl_pci.c
+++ b/drivers/staging/wlags49_h2/wl_pci.c
@@ -117,9 +117,13 @@ enum hermes_pci_versions {
};
static struct pci_device_id wl_pci_tbl[] __devinitdata = {
- { WL_LKM_PCI_VENDOR_ID, WL_LKM_PCI_DEVICE_ID_0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
- { WL_LKM_PCI_VENDOR_ID, WL_LKM_PCI_DEVICE_ID_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
- { WL_LKM_PCI_VENDOR_ID, WL_LKM_PCI_DEVICE_ID_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
+ { PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_0,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
+ { PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
+ { PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
+
{ } /* Terminating entry */
};
@@ -465,7 +469,7 @@ void __devexit wl_pci_remove(struct pci_dev *pdev)
free_irq( dev->irq, dev );
#ifdef ENABLE_DMA
- wl_pci_dma_free( pdev, (struct wl_private *)dev->priv );
+ wl_pci_dma_free( pdev, dev->priv );
#endif
wl_device_dealloc( dev );
@@ -534,7 +538,7 @@ int wl_pci_setup( struct pci_dev *pdev )
#ifdef ENABLE_DMA
/* Allocate DMA Descriptors */
- if( wl_pci_dma_alloc( pdev, (struct wl_private *)dev->priv ) < 0 ) {
+ if( wl_pci_dma_alloc( pdev, dev->priv ) < 0 ) {
DBG_ERROR( DbgInfo, "Could not allocate DMA descriptor memory!!!\n" );
DBG_LEAVE( DbgInfo );
return -ENOMEM;
@@ -570,7 +574,7 @@ int wl_pci_setup( struct pci_dev *pdev )
}
/* Make sure interrupts are enabled properly for CardBus */
- lp = (struct wl_private *)dev->priv;
+ lp = dev->priv;
if( lp->hcfCtx.IFB_BusType == CFG_NIC_BUS_TYPE_CARDBUS ||
lp->hcfCtx.IFB_BusType == CFG_NIC_BUS_TYPE_PCI ) {
diff --git a/drivers/staging/wlags49_h2/wl_pci.h b/drivers/staging/wlags49_h2/wl_pci.h
index 18d7b514ea6..cea04c44ec4 100644
--- a/drivers/staging/wlags49_h2/wl_pci.h
+++ b/drivers/staging/wlags49_h2/wl_pci.h
@@ -67,10 +67,10 @@
/*******************************************************************************
* constant definitions
******************************************************************************/
-#define WL_LKM_PCI_VENDOR_ID 0x11C1 // Lucent Microelectronics
-#define WL_LKM_PCI_DEVICE_ID_0 0xAB30 // Mini PCI
-#define WL_LKM_PCI_DEVICE_ID_1 0xAB34 // Mini PCI
-#define WL_LKM_PCI_DEVICE_ID_2 0xAB11 // WARP CardBus
+#define PCI_VENDOR_IDWL_LKM 0x11C1 /* Lucent Microelectronics */
+#define PCI_DEVICE_ID_WL_LKM_0 0xAB30 /* Mini PCI */
+#define PCI_DEVICE_ID_WL_LKM_1 0xAB34 /* Mini PCI */
+#define PCI_DEVICE_ID_WL_LKM_2 0xAB11 /* WARP CardBus */
diff --git a/drivers/staging/wlags49_h2/wl_profile.c b/drivers/staging/wlags49_h2/wl_profile.c
index 292d5792dd7..7a1337db7aa 100644
--- a/drivers/staging/wlags49_h2/wl_profile.c
+++ b/drivers/staging/wlags49_h2/wl_profile.c
@@ -169,7 +169,7 @@ void parse_config(struct net_device *dev)
DBG_ENTER(DbgInfo);
/* Get the wavelan specific info for this device */
- wvlan_config = (struct wl_private *)dev->priv;
+ wvlan_config = dev->priv;
if (wvlan_config == NULL) {
DBG_ERROR(DbgInfo, "Wavelan specific info struct not present?\n");
return;
diff --git a/drivers/staging/wlags49_h2/wl_util.c b/drivers/staging/wlags49_h2/wl_util.c
index bbdb9973d1e..ce8ed410a7e 100644
--- a/drivers/staging/wlags49_h2/wl_util.c
+++ b/drivers/staging/wlags49_h2/wl_util.c
@@ -259,41 +259,6 @@ int is_valid_key_string( char *s )
/*******************************************************************************
- * hexdigit2int()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Converts a hexadecimal digit character to an integer
- *
- * PARAMETERS:
- *
- * c - the hexadecimal digit character
- *
- * RETURNS:
- *
- * the converted integer
- *
- ******************************************************************************/
-int hexdigit2int( char c )
-{
- if( c >= '0' && c <= '9' )
- return c - '0';
-
- if( c >= 'A' && c <= 'F' )
- return c - 'A' + 10;
-
- if( c >= 'a' && c <= 'f' )
- return c - 'a' + 10;
-
- return 0;
-} // hexdigit2int
-/*============================================================================*/
-
-
-
-
-/*******************************************************************************
* key_string2key()
*******************************************************************************
*
@@ -328,7 +293,7 @@ void key_string2key( char *ks, KEY_STRCT *key )
p = (char *)key->key;
for( i = 2; i < l; i+=2 ) {
- *p++ = ( hexdigit2int( ks[i] ) << 4 ) + hexdigit2int (ks[i+1] );
+ *p++ = (hex_to_bin(ks[i]) << 4) + hex_to_bin(ks[i+1]);
n++;
}
diff --git a/drivers/staging/wlags49_h2/wl_util.h b/drivers/staging/wlags49_h2/wl_util.h
index 561e85b5c9b..ba537a60059 100644
--- a/drivers/staging/wlags49_h2/wl_util.h
+++ b/drivers/staging/wlags49_h2/wl_util.h
@@ -71,8 +71,6 @@ int is_valid_key_string( char *s );
void key_string2key( char *ks, KEY_STRCT *key );
-int hexdigit2int( char c );
-
void wl_hcf_error( struct net_device *dev, int hcfStatus );
void wl_endian_translate_event( ltv_t *pLtv );
diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig
index 82fcc1665e9..426d4efbabc 100644
--- a/drivers/staging/wlan-ng/Kconfig
+++ b/drivers/staging/wlan-ng/Kconfig
@@ -1,6 +1,6 @@
config PRISM2_USB
tristate "Prism2.5/3 USB driver"
- depends on WLAN && USB
+ depends on WLAN && USB && CFG80211
select WIRELESS_EXT
select WEXT_PRIV
default n
diff --git a/drivers/staging/wlan-ng/Makefile b/drivers/staging/wlan-ng/Makefile
index 5edac5c8d4e..db5d597563f 100644
--- a/drivers/staging/wlan-ng/Makefile
+++ b/drivers/staging/wlan-ng/Makefile
@@ -4,5 +4,4 @@ prism2_usb-objs := prism2usb.o \
p80211conv.o \
p80211req.o \
p80211wep.o \
- p80211wext.o \
p80211netdev.o
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
new file mode 100644
index 00000000000..4af83d5318f
--- /dev/null
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -0,0 +1,763 @@
+/* cfg80211 Interface for prism2_usb module */
+
+
+/* Prism2 channell/frequency/bitrate declarations */
+static const struct ieee80211_channel prism2_channels[] = {
+ { .center_freq = 2412 },
+ { .center_freq = 2417 },
+ { .center_freq = 2422 },
+ { .center_freq = 2427 },
+ { .center_freq = 2432 },
+ { .center_freq = 2437 },
+ { .center_freq = 2442 },
+ { .center_freq = 2447 },
+ { .center_freq = 2452 },
+ { .center_freq = 2457 },
+ { .center_freq = 2462 },
+ { .center_freq = 2467 },
+ { .center_freq = 2472 },
+ { .center_freq = 2484 },
+};
+
+static const struct ieee80211_rate prism2_rates[] = {
+ { .bitrate = 10 },
+ { .bitrate = 20 },
+ { .bitrate = 55 },
+ { .bitrate = 110 }
+};
+
+#define PRISM2_NUM_CIPHER_SUITES 2
+static const u32 prism2_cipher_suites[PRISM2_NUM_CIPHER_SUITES] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104
+};
+
+
+/* prism2 device private data */
+struct prism2_wiphy_private {
+ wlandevice_t *wlandev;
+
+ struct ieee80211_supported_band band;
+ struct ieee80211_channel channels[ARRAY_SIZE(prism2_channels)];
+ struct ieee80211_rate rates[ARRAY_SIZE(prism2_rates)];
+
+ struct cfg80211_scan_request *scan_request;
+};
+
+static const void * const prism2_wiphy_privid = &prism2_wiphy_privid;
+
+
+/* Helper Functions */
+static int prism2_result2err(int prism2_result)
+{
+ int err = 0;
+
+ switch (prism2_result) {
+ case P80211ENUM_resultcode_invalid_parameters:
+ err = -EINVAL;
+ break;
+ case P80211ENUM_resultcode_implementation_failure:
+ err = -EIO;
+ break;
+ case P80211ENUM_resultcode_not_supported:
+ err = -EOPNOTSUPP;
+ break;
+ default:
+ err = 0;
+ break;
+ }
+
+ return err;
+}
+
+static int prism2_domibset_uint32(wlandevice_t *wlandev, u32 did, u32 data)
+{
+ struct p80211msg_dot11req_mibset msg;
+ p80211item_uint32_t *mibitem = (p80211item_uint32_t *) &msg.mibattribute.data;
+
+ msg.msgcode = DIDmsg_dot11req_mibset;
+ mibitem->did = did;
+ mibitem->data = data;
+
+ return p80211req_dorequest(wlandev, (u8 *) &msg);
+}
+
+static int prism2_domibset_pstr32(wlandevice_t *wlandev,
+ u32 did, u8 len, u8 *data)
+{
+ struct p80211msg_dot11req_mibset msg;
+ p80211item_pstr32_t *mibitem = (p80211item_pstr32_t *) &msg.mibattribute.data;
+
+ msg.msgcode = DIDmsg_dot11req_mibset;
+ mibitem->did = did;
+ mibitem->data.len = len;
+ memcpy(mibitem->data.data, data, len);
+
+ return p80211req_dorequest(wlandev, (u8 *) &msg);
+}
+
+
+/* The interface functions, called by the cfg80211 layer */
+int prism2_change_virtual_intf(struct wiphy *wiphy,
+ struct net_device *dev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ wlandevice_t *wlandev = dev->ml_priv;
+ u32 data;
+ int result;
+ int err = 0;
+
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ if (wlandev->macmode == WLAN_MACMODE_IBSS_STA)
+ goto exit;
+ wlandev->macmode = WLAN_MACMODE_IBSS_STA;
+ data = 0;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (wlandev->macmode == WLAN_MACMODE_ESS_STA)
+ goto exit;
+ wlandev->macmode = WLAN_MACMODE_ESS_STA;
+ data = 1;
+ break;
+ default:
+ printk(KERN_WARNING "Operation mode: %d not support\n", type);
+ return -EOPNOTSUPP;
+ }
+
+ /* Set Operation mode to the PORT TYPE RID */
+ result = prism2_domibset_uint32(wlandev, DIDmib_p2_p2Static_p2CnfPortType, data);
+
+ if (result)
+ err = -EFAULT;
+
+ dev->ieee80211_ptr->iftype = type;
+
+exit:
+ return err;
+}
+
+int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, const u8 *mac_addr,
+ struct key_params *params)
+{
+ wlandevice_t *wlandev = dev->ml_priv;
+ u32 did;
+
+ int err = 0;
+ int result = 0;
+
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ result = prism2_domibset_uint32(wlandev,
+ DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
+ key_index);
+ if (result)
+ goto exit;
+
+ /* send key to driver */
+ switch (key_index) {
+ case 0:
+ did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey0;
+ break;
+
+ case 1:
+ did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey1;
+ break;
+
+ case 2:
+ did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey2;
+ break;
+
+ case 3:
+ did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey3;
+ break;
+
+ default:
+ err = -EINVAL;
+ goto exit;
+ }
+
+ result = prism2_domibset_pstr32(wlandev, did, params->key_len, params->key);
+ if (result)
+ goto exit;
+ break;
+
+ default:
+ pr_debug("Unsupported cipher suite\n");
+ result = 1;
+ }
+
+exit:
+ if (result)
+ err = -EFAULT;
+
+ return err;
+}
+
+int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, const u8 *mac_addr, void *cookie,
+ void (*callback)(void *cookie, struct key_params*))
+{
+ wlandevice_t *wlandev = dev->ml_priv;
+ struct key_params params;
+ int len;
+
+ if (key_index >= NUM_WEPKEYS)
+ return -EINVAL;
+
+ len = wlandev->wep_keylens[key_index];
+ memset(&params, 0, sizeof(params));
+
+ if (len == 13)
+ params.cipher = WLAN_CIPHER_SUITE_WEP104;
+ else if (len == 5)
+ params.cipher = WLAN_CIPHER_SUITE_WEP104;
+ else
+ return -ENOENT;
+ params.key_len = len;
+ params.key = wlandev->wep_keys[key_index];
+ params.seq_len = 0;
+
+ callback(cookie, &params);
+
+ return 0;
+}
+
+int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, const u8 *mac_addr)
+{
+ wlandevice_t *wlandev = dev->ml_priv;
+ u32 did;
+ int err = 0;
+ int result = 0;
+
+ /* There is no direct way in the hardware (AFAIK) of removing
+ a key, so we will cheat by setting the key to a bogus value */
+ /* send key to driver */
+ switch (key_index) {
+ case 0:
+ did =
+ DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey0;
+ break;
+
+ case 1:
+ did =
+ DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey1;
+ break;
+
+ case 2:
+ did =
+ DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey2;
+ break;
+
+ case 3:
+ did =
+ DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey3;
+ break;
+
+ default:
+ err = -EINVAL;
+ goto exit;
+ }
+
+ result = prism2_domibset_pstr32(wlandev, did, 13, "0000000000000");
+
+exit:
+ if (result)
+ err = -EFAULT;
+
+ return err;
+}
+
+int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index)
+{
+ wlandevice_t *wlandev = dev->ml_priv;
+
+ int err = 0;
+ int result = 0;
+
+ result = prism2_domibset_uint32(wlandev,
+ DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
+ key_index);
+
+ if (result)
+ err = -EFAULT;
+
+ return err;
+}
+
+
+int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_info *sinfo)
+{
+ wlandevice_t *wlandev = dev->ml_priv;
+ struct p80211msg_lnxreq_commsquality quality;
+ int result;
+
+ memset(sinfo, 0, sizeof(*sinfo));
+
+ if ((wlandev == NULL) || (wlandev->msdstate != WLAN_MSD_RUNNING))
+ return -EOPNOTSUPP;
+
+ /* build request message */
+ quality.msgcode = DIDmsg_lnxreq_commsquality;
+ quality.dbm.data = P80211ENUM_truth_true;
+ quality.dbm.status = P80211ENUM_msgitem_status_data_ok;
+
+ /* send message to nsd */
+ if (wlandev->mlmerequest == NULL)
+ return -EOPNOTSUPP;
+
+ result = wlandev->mlmerequest(wlandev, (struct p80211msg *) &quality);
+
+
+ if (result == 0) {
+ sinfo->txrate.legacy = quality.txrate.data;
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->signal = quality.level.data;
+ sinfo->filled |= STATION_INFO_SIGNAL;
+ }
+
+ return result;
+}
+
+int prism2_scan(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_scan_request *request)
+{
+ struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
+ wlandevice_t *wlandev = dev->ml_priv;
+ struct p80211msg_dot11req_scan msg1;
+ struct p80211msg_dot11req_scan_results msg2;
+ int result;
+ int err = 0;
+ int numbss = 0;
+ int i = 0;
+ u8 ie_buf[46];
+ int ie_len;
+
+ if (!request)
+ return -EINVAL;
+
+ if (priv->scan_request && priv->scan_request != request)
+ return -EBUSY;
+
+ if (wlandev->macmode == WLAN_MACMODE_ESS_AP) {
+ printk(KERN_ERR "Can't scan in AP mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ priv->scan_request = request;
+
+ memset(&msg1, 0x00, sizeof(struct p80211msg_dot11req_scan));
+ msg1.msgcode = DIDmsg_dot11req_scan;
+ msg1.bsstype.data = P80211ENUM_bsstype_any;
+
+ memset(&(msg1.bssid.data), 0xFF, sizeof(p80211item_pstr6_t));
+ msg1.bssid.data.len = 6;
+
+ if (request->n_ssids > 0) {
+ msg1.scantype.data = P80211ENUM_scantype_active;
+ msg1.ssid.data.len = request->ssids->ssid_len;
+ memcpy(msg1.ssid.data.data, request->ssids->ssid, request->ssids->ssid_len);
+ } else {
+ msg1.scantype.data = 0;
+ }
+ msg1.probedelay.data = 0;
+
+ for (i = 0;
+ (i < request->n_channels) && i < ARRAY_SIZE(prism2_channels);
+ i++)
+ msg1.channellist.data.data[i] =
+ ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+ msg1.channellist.data.len = request->n_channels;
+
+ msg1.maxchanneltime.data = 250;
+ msg1.minchanneltime.data = 200;
+
+ result = p80211req_dorequest(wlandev, (u8 *) &msg1);
+ if (result) {
+ err = prism2_result2err(msg1.resultcode.data);
+ goto exit;
+ }
+ /* Now retrieve scan results */
+ numbss = msg1.numbss.data;
+
+ for (i = 0; i < numbss; i++) {
+ memset(&msg2, 0, sizeof(msg2));
+ msg2.msgcode = DIDmsg_dot11req_scan_results;
+ msg2.bssindex.data = i;
+
+ result = p80211req_dorequest(wlandev, (u8 *) &msg2);
+ if ((result != 0) ||
+ (msg2.resultcode.data != P80211ENUM_resultcode_success)) {
+ break;
+ }
+
+ ie_buf[0] = WLAN_EID_SSID;
+ ie_buf[1] = msg2.ssid.data.len;
+ ie_len = ie_buf[1] + 2;
+ memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len);
+ cfg80211_inform_bss(wiphy,
+ ieee80211_get_channel(wiphy, ieee80211_dsss_chan_to_freq(msg2.dschannel.data)),
+ (const u8 *) &(msg2.bssid.data.data),
+ msg2.timestamp.data, msg2.capinfo.data,
+ msg2.beaconperiod.data,
+ ie_buf,
+ ie_len,
+ (msg2.signal.data - 65536) * 100, /* Conversion to signed type */
+ GFP_KERNEL
+ );
+ }
+
+ if (result)
+ err = prism2_result2err(msg2.resultcode.data);
+
+exit:
+ cfg80211_scan_done(request, err ? 1 : 0);
+ priv->scan_request = NULL;
+ return err;
+}
+
+int prism2_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
+ wlandevice_t *wlandev = priv->wlandev;
+ u32 data;
+ int result;
+ int err = 0;
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
+ if (wiphy->rts_threshold == -1)
+ data = 2347;
+ else
+ data = wiphy->rts_threshold;
+
+ result = prism2_domibset_uint32(wlandev,
+ DIDmib_dot11mac_dot11OperationTable_dot11RTSThreshold,
+ data);
+ if (result) {
+ err = -EFAULT;
+ goto exit;
+ }
+ }
+
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
+ if (wiphy->frag_threshold == -1)
+ data = 2346;
+ else
+ data = wiphy->frag_threshold;
+
+ result = prism2_domibset_uint32(wlandev,
+ DIDmib_dot11mac_dot11OperationTable_dot11FragmentationThreshold,
+ data);
+ if (result) {
+ err = -EFAULT;
+ goto exit;
+ }
+ }
+
+exit:
+ return err;
+}
+
+int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ wlandevice_t *wlandev = dev->ml_priv;
+ struct ieee80211_channel *channel = sme->channel;
+ struct p80211msg_lnxreq_autojoin msg_join;
+ u32 did;
+ int length = sme->ssid_len;
+ int chan = -1;
+ int is_wep = (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP40) ||
+ (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP104);
+ int result;
+ int err = 0;
+
+ /* Set the channel */
+ if (channel) {
+ chan = ieee80211_frequency_to_channel(channel->center_freq);
+ result = prism2_domibset_uint32(wlandev,
+ DIDmib_dot11phy_dot11PhyDSSSTable_dot11CurrentChannel,
+ chan);
+ if (result)
+ goto exit;
+ }
+
+ /* Set the authorisation */
+ if ((sme->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) ||
+ ((sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC) && !is_wep))
+ msg_join.authtype.data = P80211ENUM_authalg_opensystem;
+ else if ((sme->auth_type == NL80211_AUTHTYPE_SHARED_KEY) ||
+ ((sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC) && is_wep))
+ msg_join.authtype.data = P80211ENUM_authalg_sharedkey;
+ else
+ printk(KERN_WARNING
+ "Unhandled authorisation type for connect (%d)\n",
+ sme->auth_type);
+
+ /* Set the encryption - we only support wep */
+ if (is_wep) {
+ if (sme->key) {
+ result = prism2_domibset_uint32(wlandev,
+ DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
+ sme->key_idx);
+ if (result)
+ goto exit;
+
+ /* send key to driver */
+ switch (sme->key_idx) {
+ case 0:
+ did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey0;
+ break;
+
+ case 1:
+ did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey1;
+ break;
+
+ case 2:
+ did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey2;
+ break;
+
+ case 3:
+ did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey3;
+ break;
+
+ default:
+ err = -EINVAL;
+ goto exit;
+ }
+
+ result = prism2_domibset_pstr32(wlandev, did, sme->key_len, (u8 *) sme->key);
+ if (result)
+ goto exit;
+
+ }
+
+ /* Assume we should set privacy invoked and exclude unencrypted
+ We could possibly use sme->privacy here, but the assumption
+ seems reasonable anyway */
+ result = prism2_domibset_uint32(wlandev,
+ DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
+ P80211ENUM_truth_true);
+ if (result)
+ goto exit;
+
+ result = prism2_domibset_uint32(wlandev,
+ DIDmib_dot11smt_dot11PrivacyTable_dot11ExcludeUnencrypted,
+ P80211ENUM_truth_true);
+ if (result)
+ goto exit;
+
+ } else {
+ /* Assume we should unset privacy invoked
+ and exclude unencrypted */
+ result = prism2_domibset_uint32(wlandev,
+ DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
+ P80211ENUM_truth_false);
+ if (result)
+ goto exit;
+
+ result = prism2_domibset_uint32(wlandev,
+ DIDmib_dot11smt_dot11PrivacyTable_dot11ExcludeUnencrypted,
+ P80211ENUM_truth_false);
+ if (result)
+ goto exit;
+
+ }
+
+ /* Now do the actual join. Note there is no way that I can
+ see to request a specific bssid */
+ msg_join.msgcode = DIDmsg_lnxreq_autojoin;
+
+ memcpy(msg_join.ssid.data.data, sme->ssid, length);
+ msg_join.ssid.data.len = length;
+
+ result = p80211req_dorequest(wlandev, (u8 *) &msg_join);
+
+exit:
+ if (result)
+ err = -EFAULT;
+
+ return err;
+}
+
+int prism2_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code)
+{
+ wlandevice_t *wlandev = dev->ml_priv;
+ struct p80211msg_lnxreq_autojoin msg_join;
+ int result;
+ int err = 0;
+
+
+ /* Do a join, with a bogus ssid. Thats the only way I can think of */
+ msg_join.msgcode = DIDmsg_lnxreq_autojoin;
+
+ memcpy(msg_join.ssid.data.data, "---", 3);
+ msg_join.ssid.data.len = 3;
+
+ result = p80211req_dorequest(wlandev, (u8 *) &msg_join);
+
+ if (result)
+ err = -EFAULT;
+
+ return err;
+}
+
+
+int prism2_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params)
+{
+ return -EOPNOTSUPP;
+}
+
+int prism2_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+
+int prism2_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type,
+ int mbm)
+{
+ struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
+ wlandevice_t *wlandev = priv->wlandev;
+ u32 data;
+ int result;
+ int err = 0;
+
+ if (type == NL80211_TX_POWER_AUTOMATIC)
+ data = 30;
+ else
+ data = MBM_TO_DBM(mbm);
+
+ result = prism2_domibset_uint32(wlandev,
+ DIDmib_dot11phy_dot11PhyTxPowerTable_dot11CurrentTxPowerLevel,
+ data);
+
+ if (result) {
+ err = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return err;
+}
+
+int prism2_get_tx_power(struct wiphy *wiphy, int *dbm)
+{
+ struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
+ wlandevice_t *wlandev = priv->wlandev;
+ struct p80211msg_dot11req_mibget msg;
+ p80211item_uint32_t *mibitem = (p80211item_uint32_t *) &msg.mibattribute.data;
+ int result;
+ int err = 0;
+
+ msg.msgcode = DIDmsg_dot11req_mibget;
+ mibitem->did =
+ DIDmib_dot11phy_dot11PhyTxPowerTable_dot11CurrentTxPowerLevel;
+
+ result = p80211req_dorequest(wlandev, (u8 *) &msg);
+
+ if (result) {
+ err = -EFAULT;
+ goto exit;
+ }
+
+ *dbm = mibitem->data;
+
+exit:
+ return err;
+}
+
+
+
+
+/* Interface callback functions, passing data back up to the cfg80211 layer */
+void prism2_connect_result(wlandevice_t *wlandev, u8 failed)
+{
+ u16 status = failed ? WLAN_STATUS_UNSPECIFIED_FAILURE : WLAN_STATUS_SUCCESS;
+
+ cfg80211_connect_result(wlandev->netdev, wlandev->bssid,
+ NULL, 0, NULL, 0, status, GFP_KERNEL);
+}
+
+void prism2_disconnected(wlandevice_t *wlandev)
+{
+ cfg80211_disconnected(wlandev->netdev, 0, NULL,
+ 0, GFP_KERNEL);
+}
+
+void prism2_roamed(wlandevice_t *wlandev)
+{
+ cfg80211_roamed(wlandev->netdev, wlandev->bssid,
+ NULL, 0, NULL, 0, GFP_KERNEL);
+}
+
+
+/* Structures for declaring wiphy interface */
+static const struct cfg80211_ops prism2_usb_cfg_ops = {
+ .change_virtual_intf = prism2_change_virtual_intf,
+ .add_key = prism2_add_key,
+ .get_key = prism2_get_key,
+ .del_key = prism2_del_key,
+ .set_default_key = prism2_set_default_key,
+ .get_station = prism2_get_station,
+ .scan = prism2_scan,
+ .set_wiphy_params = prism2_set_wiphy_params,
+ .connect = prism2_connect,
+ .disconnect = prism2_disconnect,
+ .join_ibss = prism2_join_ibss,
+ .leave_ibss = prism2_leave_ibss,
+ .set_tx_power = prism2_set_tx_power,
+ .get_tx_power = prism2_get_tx_power,
+};
+
+
+/* Functions to create/free wiphy interface */
+struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
+{
+ struct wiphy *wiphy;
+ struct prism2_wiphy_private *priv;
+ wiphy = wiphy_new(&prism2_usb_cfg_ops, sizeof(struct prism2_wiphy_private));
+ if (!wiphy)
+ return NULL;
+
+ priv = wiphy_priv(wiphy);
+ priv->wlandev = wlandev;
+ memcpy(priv->channels, prism2_channels, sizeof(prism2_channels));
+ memcpy(priv->rates, prism2_rates, sizeof(prism2_rates));
+ priv->band.channels = priv->channels;
+ priv->band.n_channels = ARRAY_SIZE(prism2_channels);
+ priv->band.bitrates = priv->rates;
+ priv->band.n_bitrates = ARRAY_SIZE(prism2_rates);
+ priv->band.band = IEEE80211_BAND_2GHZ;
+ priv->band.ht_cap.ht_supported = false;
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+
+ set_wiphy_dev(wiphy, dev);
+ wiphy->privid = prism2_wiphy_privid;
+ wiphy->max_scan_ssids = 1;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION)
+ | BIT(NL80211_IFTYPE_ADHOC);
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wiphy->n_cipher_suites = PRISM2_NUM_CIPHER_SUITES;
+ wiphy->cipher_suites = prism2_cipher_suites;
+
+ if (wiphy_register(wiphy) < 0)
+ return NULL;
+
+ return wiphy;
+}
+
+
+void wlan_free_wiphy(struct wiphy *wiphy)
+{
+ wiphy_unregister(wiphy);
+ wiphy_free(wiphy);
+}
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 1fa42e01e8c..fa94a7cc86c 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1284,6 +1284,8 @@ typedef struct hfa384x {
u16 link_status_new;
struct sk_buff_head authq;
+ u32 txrate;
+
/* And here we have stuff that used to be in priv */
/* State variables */
@@ -1407,7 +1409,7 @@ int hfa384x_drvr_start(hfa384x_t *hw);
int hfa384x_drvr_stop(hfa384x_t *hw);
int
hfa384x_drvr_txframe(hfa384x_t *hw, struct sk_buff *skb,
- p80211_hdr_t *p80211_hdr, p80211_metawep_t *p80211_wep);
+ union p80211_hdr *p80211_hdr, struct p80211_metawep *p80211_wep);
void hfa384x_tx_timeout(wlandevice_t *wlandev);
int hfa384x_cmd_initialize(hfa384x_t *hw);
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index a41db5dc8c7..ea81cb547bb 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -2706,8 +2706,8 @@ int hfa384x_drvr_stop(hfa384x_t *hw)
* interrupt
----------------------------------------------------------------*/
int hfa384x_drvr_txframe(hfa384x_t *hw, struct sk_buff *skb,
- p80211_hdr_t *p80211_hdr,
- p80211_metawep_t *p80211_wep)
+ union p80211_hdr *p80211_hdr,
+ struct p80211_metawep *p80211_wep)
{
int usbpktlen = sizeof(hfa384x_tx_frame_t);
int result;
@@ -2752,7 +2752,7 @@ int hfa384x_drvr_txframe(hfa384x_t *hw, struct sk_buff *skb,
/* copy the header over to the txdesc */
memcpy(&(hw->txbuff.txfrm.desc.frame_control), p80211_hdr,
- sizeof(p80211_hdr_t));
+ sizeof(union p80211_hdr));
/* if we're using host WEP, increase size by IV+ICV */
if (p80211_wep->data) {
@@ -2805,11 +2805,13 @@ void hfa384x_tx_timeout(wlandevice_t *wlandev)
spin_lock_irqsave(&hw->ctlxq.lock, flags);
- if (!hw->wlandev->hwremoved &&
- /* Note the bitwise OR, not the logical OR. */
- (!test_and_set_bit(WORK_TX_HALT, &hw->usb_flags) |
- !test_and_set_bit(WORK_RX_HALT, &hw->usb_flags))) {
- schedule_work(&hw->usb_work);
+ if (!hw->wlandev->hwremoved) {
+ int sched;
+
+ sched = !test_and_set_bit(WORK_TX_HALT, &hw->usb_flags);
+ sched |= !test_and_set_bit(WORK_RX_HALT, &hw->usb_flags);
+ if (sched)
+ schedule_work(&hw->usb_work);
}
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
@@ -3471,7 +3473,7 @@ static void hfa384x_usbin_rx(wlandevice_t *wlandev, struct sk_buff *skb)
hfa384x_usbin_t *usbin = (hfa384x_usbin_t *) skb->data;
hfa384x_t *hw = wlandev->priv;
int hdrlen;
- p80211_rxmeta_t *rxmeta;
+ struct p80211_rxmeta *rxmeta;
u16 data_len;
u16 fc;
@@ -3588,14 +3590,14 @@ static void hfa384x_int_rxmonitor(wlandevice_t *wlandev,
datalen = le16_to_cpu(rxdesc->data_len);
/* Allocate an ind message+framesize skb */
- skblen = sizeof(p80211_caphdr_t) + hdrlen + datalen + WLAN_CRC_LEN;
+ skblen = sizeof(struct p80211_caphdr) + hdrlen + datalen + WLAN_CRC_LEN;
/* sanity check the length */
if (skblen >
- (sizeof(p80211_caphdr_t) +
+ (sizeof(struct p80211_caphdr) +
WLAN_HDR_A4_LEN + WLAN_DATA_MAXLEN + WLAN_CRC_LEN)) {
pr_debug("overlen frm: len=%zd\n",
- skblen - sizeof(p80211_caphdr_t));
+ skblen - sizeof(struct p80211_caphdr));
}
skb = dev_alloc_skb(skblen);
@@ -3609,13 +3611,13 @@ static void hfa384x_int_rxmonitor(wlandevice_t *wlandev,
/* only prepend the prism header if in the right mode */
if ((wlandev->netdev->type == ARPHRD_IEEE80211_PRISM) &&
(hw->sniffhdr != 0)) {
- p80211_caphdr_t *caphdr;
+ struct p80211_caphdr *caphdr;
/* The NEW header format! */
- datap = skb_put(skb, sizeof(p80211_caphdr_t));
- caphdr = (p80211_caphdr_t *) datap;
+ datap = skb_put(skb, sizeof(struct p80211_caphdr));
+ caphdr = (struct p80211_caphdr *) datap;
caphdr->version = htonl(P80211CAPTURE_VERSION);
- caphdr->length = htonl(sizeof(p80211_caphdr_t));
+ caphdr->length = htonl(sizeof(struct p80211_caphdr));
caphdr->mactime = __cpu_to_be64(rxdesc->time) * 1000;
caphdr->hosttime = __cpu_to_be64(jiffies);
caphdr->phytype = htonl(4); /* dss_dot11_b */
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 059e15055b7..83879f9a0b7 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -103,15 +103,15 @@ static u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
* May be called in interrupt or non-interrupt context
----------------------------------------------------------------*/
int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
- struct sk_buff *skb, p80211_hdr_t *p80211_hdr,
- p80211_metawep_t *p80211_wep)
+ struct sk_buff *skb, union p80211_hdr *p80211_hdr,
+ struct p80211_metawep *p80211_wep)
{
u16 fc;
u16 proto;
- wlan_ethhdr_t e_hdr;
- wlan_llc_t *e_llc;
- wlan_snap_t *e_snap;
+ struct wlan_ethhdr e_hdr;
+ struct wlan_llc *e_llc;
+ struct wlan_snap *e_snap;
int foo;
memcpy(&e_hdr, skb->data, sizeof(e_hdr));
@@ -148,7 +148,7 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
/* tack on SNAP */
e_snap =
- (wlan_snap_t *) skb_push(skb, sizeof(wlan_snap_t));
+ (struct wlan_snap *) skb_push(skb, sizeof(struct wlan_snap));
e_snap->type = htons(proto);
if (ethconv == WLAN_ETHCONV_8021h
&& p80211_stt_findproto(proto)) {
@@ -161,7 +161,7 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
/* tack on llc */
e_llc =
- (wlan_llc_t *) skb_push(skb, sizeof(wlan_llc_t));
+ (struct wlan_llc *) skb_push(skb, sizeof(struct wlan_llc));
e_llc->dsap = 0xAA; /* SNAP, see IEEE 802 */
e_llc->ssap = 0xAA;
e_llc->ctl = 0x03;
@@ -230,7 +230,7 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
/* jkriegl: from orinoco, modified */
static void orinoco_spy_gather(wlandevice_t *wlandev, char *mac,
- p80211_rxmeta_t *rxmeta)
+ struct p80211_rxmeta *rxmeta)
{
int i;
@@ -280,17 +280,17 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
unsigned int payload_offset;
u8 daddr[WLAN_ETHADDR_LEN];
u8 saddr[WLAN_ETHADDR_LEN];
- p80211_hdr_t *w_hdr;
- wlan_ethhdr_t *e_hdr;
- wlan_llc_t *e_llc;
- wlan_snap_t *e_snap;
+ union p80211_hdr *w_hdr;
+ struct wlan_ethhdr *e_hdr;
+ struct wlan_llc *e_llc;
+ struct wlan_snap *e_snap;
int foo;
payload_length = skb->len - WLAN_HDR_A3_LEN - WLAN_CRC_LEN;
payload_offset = WLAN_HDR_A3_LEN;
- w_hdr = (p80211_hdr_t *) skb->data;
+ w_hdr = (union p80211_hdr *) skb->data;
/* setup some vars for convenience */
fc = le16_to_cpu(w_hdr->a3.fc);
@@ -345,14 +345,14 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
wlandev->rx.decrypt++;
}
- e_hdr = (wlan_ethhdr_t *) (skb->data + payload_offset);
+ e_hdr = (struct wlan_ethhdr *) (skb->data + payload_offset);
- e_llc = (wlan_llc_t *) (skb->data + payload_offset);
+ e_llc = (struct wlan_llc *) (skb->data + payload_offset);
e_snap =
- (wlan_snap_t *) (skb->data + payload_offset + sizeof(wlan_llc_t));
+ (struct wlan_snap *) (skb->data + payload_offset + sizeof(struct wlan_llc));
/* Test for the various encodings */
- if ((payload_length >= sizeof(wlan_ethhdr_t)) &&
+ if ((payload_length >= sizeof(struct wlan_ethhdr)) &&
(e_llc->dsap != 0xaa || e_llc->ssap != 0xaa) &&
((memcmp(daddr, e_hdr->daddr, WLAN_ETHADDR_LEN) == 0) ||
(memcmp(saddr, e_hdr->saddr, WLAN_ETHADDR_LEN) == 0))) {
@@ -372,7 +372,7 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
- } else if ((payload_length >= sizeof(wlan_llc_t) + sizeof(wlan_snap_t))
+ } else if ((payload_length >= sizeof(struct wlan_llc) + sizeof(struct wlan_snap))
&& (e_llc->dsap == 0xaa) && (e_llc->ssap == 0xaa)
&& (e_llc->ctl == 0x03)
&&
@@ -398,7 +398,7 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
skb_pull(skb, payload_offset);
/* create 802.3 header at beginning of skb. */
- e_hdr = (wlan_ethhdr_t *) skb_push(skb, WLAN_ETHHDR_LEN);
+ e_hdr = (struct wlan_ethhdr *) skb_push(skb, WLAN_ETHHDR_LEN);
memcpy(e_hdr->daddr, daddr, WLAN_ETHADDR_LEN);
memcpy(e_hdr->saddr, saddr, WLAN_ETHADDR_LEN);
e_hdr->type = htons(payload_length);
@@ -406,7 +406,7 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
- } else if ((payload_length >= sizeof(wlan_llc_t) + sizeof(wlan_snap_t))
+ } else if ((payload_length >= sizeof(struct wlan_llc) + sizeof(struct wlan_snap))
&& (e_llc->dsap == 0xaa) && (e_llc->ssap == 0xaa)
&& (e_llc->ctl == 0x03)) {
pr_debug("802.1h/RFC1042 len: %d\n", payload_length);
@@ -414,13 +414,13 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
/* build a DIXII + RFC894 */
/* Test for an overlength frame */
- if ((payload_length - sizeof(wlan_llc_t) - sizeof(wlan_snap_t))
+ if ((payload_length - sizeof(struct wlan_llc) - sizeof(struct wlan_snap))
> netdev->mtu) {
/* A bogus length ethfrm has been sent. */
/* Is someone trying an oflow attack? */
printk(KERN_ERR "DIXII frame too large (%ld > %d)\n",
- (long int)(payload_length - sizeof(wlan_llc_t) -
- sizeof(wlan_snap_t)), netdev->mtu);
+ (long int)(payload_length - sizeof(struct wlan_llc) -
+ sizeof(struct wlan_snap)), netdev->mtu);
return 1;
}
@@ -428,13 +428,13 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
skb_pull(skb, payload_offset);
/* chop llc header from skb. */
- skb_pull(skb, sizeof(wlan_llc_t));
+ skb_pull(skb, sizeof(struct wlan_llc));
/* chop snap header from skb. */
- skb_pull(skb, sizeof(wlan_snap_t));
+ skb_pull(skb, sizeof(struct wlan_snap));
/* create 802.3 header at beginning of skb. */
- e_hdr = (wlan_ethhdr_t *) skb_push(skb, WLAN_ETHHDR_LEN);
+ e_hdr = (struct wlan_ethhdr *) skb_push(skb, WLAN_ETHHDR_LEN);
e_hdr->type = e_snap->type;
memcpy(e_hdr->daddr, daddr, WLAN_ETHADDR_LEN);
memcpy(e_hdr->saddr, saddr, WLAN_ETHADDR_LEN);
@@ -461,7 +461,7 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
skb_pull(skb, payload_offset);
/* create 802.3 header at beginning of skb. */
- e_hdr = (wlan_ethhdr_t *) skb_push(skb, WLAN_ETHHDR_LEN);
+ e_hdr = (struct wlan_ethhdr *) skb_push(skb, WLAN_ETHHDR_LEN);
memcpy(e_hdr->daddr, daddr, WLAN_ETHADDR_LEN);
memcpy(e_hdr->saddr, saddr, WLAN_ETHADDR_LEN);
e_hdr->type = htons(payload_length);
@@ -542,8 +542,8 @@ int p80211_stt_findproto(u16 proto)
----------------------------------------------------------------*/
void p80211skb_rxmeta_detach(struct sk_buff *skb)
{
- p80211_rxmeta_t *rxmeta;
- p80211_frmmeta_t *frmmeta;
+ struct p80211_rxmeta *rxmeta;
+ struct p80211_frmmeta *frmmeta;
/* Sanity checks */
if (skb == NULL) { /* bad skb */
@@ -589,8 +589,8 @@ exit:
int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
{
int result = 0;
- p80211_rxmeta_t *rxmeta;
- p80211_frmmeta_t *frmmeta;
+ struct p80211_rxmeta *rxmeta;
+ struct p80211_frmmeta *frmmeta;
/* If these already have metadata, we error out! */
if (P80211SKB_RXMETA(skb) != NULL) {
@@ -601,7 +601,7 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
}
/* Allocate the rxmeta */
- rxmeta = kzalloc(sizeof(p80211_rxmeta_t), GFP_ATOMIC);
+ rxmeta = kzalloc(sizeof(struct p80211_rxmeta), GFP_ATOMIC);
if (rxmeta == NULL) {
printk(KERN_ERR "%s: Failed to allocate rxmeta.\n",
@@ -615,8 +615,8 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
rxmeta->hosttime = jiffies;
/* Overlay a frmmeta_t onto skb->cb */
- memset(skb->cb, 0, sizeof(p80211_frmmeta_t));
- frmmeta = (p80211_frmmeta_t *) (skb->cb);
+ memset(skb->cb, 0, sizeof(struct p80211_frmmeta));
+ frmmeta = (struct p80211_frmmeta *) (skb->cb);
frmmeta->magic = P80211_FRMMETA_MAGIC;
frmmeta->rx = rxmeta;
exit:
@@ -641,7 +641,7 @@ exit:
----------------------------------------------------------------*/
void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb)
{
- p80211_frmmeta_t *meta;
+ struct p80211_frmmeta *meta;
meta = P80211SKB_FRMMETA(skb);
if (meta && meta->rx)
diff --git a/drivers/staging/wlan-ng/p80211conv.h b/drivers/staging/wlan-ng/p80211conv.h
index 6fe163be24f..eca0391c676 100644
--- a/drivers/staging/wlan-ng/p80211conv.h
+++ b/drivers/staging/wlan-ng/p80211conv.h
@@ -63,17 +63,17 @@
#define P80211CAPTURE_VERSION 0x80211001
-#define P80211_FRMMETA_MAGIC 0x802110
+#define P80211_FRMMETA_MAGIC 0x802110
#define P80211SKB_FRMMETA(s) \
- (((((p80211_frmmeta_t *)((s)->cb))->magic) == P80211_FRMMETA_MAGIC) ? \
- ((p80211_frmmeta_t *)((s)->cb)) : \
+ (((((struct p80211_frmmeta *)((s)->cb))->magic) == P80211_FRMMETA_MAGIC) ? \
+ ((struct p80211_frmmeta *)((s)->cb)) : \
(NULL))
#define P80211SKB_RXMETA(s) \
- (P80211SKB_FRMMETA((s)) ? P80211SKB_FRMMETA((s))->rx : ((p80211_rxmeta_t *)(NULL)))
+ (P80211SKB_FRMMETA((s)) ? P80211SKB_FRMMETA((s))->rx : ((struct p80211_rxmeta *)(NULL)))
-typedef struct p80211_rxmeta {
+struct p80211_rxmeta {
struct wlandevice *wlandev;
u64 mactime; /* Hi-rez MAC-supplied time value */
@@ -87,12 +87,12 @@ typedef struct p80211_rxmeta {
unsigned int preamble; /* P80211ENUM_preambletype_* */
unsigned int encoding; /* P80211ENUM_encoding_* */
-} p80211_rxmeta_t;
+};
-typedef struct p80211_frmmeta {
+struct p80211_frmmeta {
unsigned int magic;
- p80211_rxmeta_t *rx;
-} p80211_frmmeta_t;
+ struct p80211_rxmeta *rx;
+};
void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb);
int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb);
@@ -101,7 +101,7 @@ void p80211skb_rxmeta_detach(struct sk_buff *skb);
/*
* Frame capture header. (See doc/capturefrm.txt)
*/
-typedef struct p80211_caphdr {
+struct p80211_caphdr {
u32 version;
u32 length;
u64 mactime;
@@ -116,36 +116,36 @@ typedef struct p80211_caphdr {
s32 ssi_noise;
u32 preamble;
u32 encoding;
-} p80211_caphdr_t;
+};
/* buffer free method pointer type */
typedef void (*freebuf_method_t) (void *buf, int size);
-typedef struct p80211_metawep {
+struct p80211_metawep {
void *data;
u8 iv[4];
u8 icv[4];
-} p80211_metawep_t;
+};
/* local ether header type */
-typedef struct wlan_ethhdr {
+struct wlan_ethhdr {
u8 daddr[WLAN_ETHADDR_LEN];
u8 saddr[WLAN_ETHADDR_LEN];
u16 type;
-} __attribute__ ((packed)) wlan_ethhdr_t;
+} __attribute__ ((packed));
/* local llc header type */
-typedef struct wlan_llc {
+struct wlan_llc {
u8 dsap;
u8 ssap;
u8 ctl;
-} __attribute__ ((packed)) wlan_llc_t;
+} __attribute__ ((packed));
/* local snap header type */
-typedef struct wlan_snap {
+struct wlan_snap {
u8 oui[WLAN_IEEE_OUI_LEN];
u16 type;
-} __attribute__ ((packed)) wlan_snap_t;
+} __attribute__ ((packed));
/* Circular include trick */
struct wlandevice;
@@ -153,8 +153,8 @@ struct wlandevice;
int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
struct sk_buff *skb);
int skb_ether_to_p80211(struct wlandevice *wlandev, u32 ethconv,
- struct sk_buff *skb, p80211_hdr_t *p80211_hdr,
- p80211_metawep_t *p80211_wep);
+ struct sk_buff *skb, union p80211_hdr *p80211_hdr,
+ struct p80211_metawep *p80211_wep);
int p80211_stt_findproto(u16 proto);
diff --git a/drivers/staging/wlan-ng/p80211hdr.h b/drivers/staging/wlan-ng/p80211hdr.h
index 419de4dee56..1f6e4ebc6eb 100644
--- a/drivers/staging/wlan-ng/p80211hdr.h
+++ b/drivers/staging/wlan-ng/p80211hdr.h
@@ -94,7 +94,7 @@
/* Control */
#define WLAN_FSTYPE_BLOCKACKREQ 0x8
-#define WLAN_FSTYPE_BLOCKACK 0x9
+#define WLAN_FSTYPE_BLOCKACK 0x9
#define WLAN_FSTYPE_PSPOLL 0x0a
#define WLAN_FSTYPE_RTS 0x0b
#define WLAN_FSTYPE_CTS 0x0c
@@ -133,13 +133,13 @@
#define WLAN_GET_FC_FTYPE(n) ((((u16)(n)) & (BIT(2) | BIT(3))) >> 2)
#define WLAN_GET_FC_FSTYPE(n) ((((u16)(n)) & (BIT(4)|BIT(5)|BIT(6)|BIT(7))) >> 4)
-#define WLAN_GET_FC_TODS(n) ((((u16)(n)) & (BIT(8))) >> 8)
+#define WLAN_GET_FC_TODS(n) ((((u16)(n)) & (BIT(8))) >> 8)
#define WLAN_GET_FC_FROMDS(n) ((((u16)(n)) & (BIT(9))) >> 9)
#define WLAN_GET_FC_ISWEP(n) ((((u16)(n)) & (BIT(14))) >> 14)
#define WLAN_SET_FC_FTYPE(n) (((u16)(n)) << 2)
#define WLAN_SET_FC_FSTYPE(n) (((u16)(n)) << 4)
-#define WLAN_SET_FC_TODS(n) (((u16)(n)) << 8)
+#define WLAN_SET_FC_TODS(n) (((u16)(n)) << 8)
#define WLAN_SET_FC_FROMDS(n) (((u16)(n)) << 9)
#define WLAN_SET_FC_ISWEP(n) (((u16)(n)) << 14)
@@ -147,16 +147,16 @@
/* Generic 802.11 Header types */
-typedef struct p80211_hdr_a3 {
+struct p80211_hdr_a3 {
u16 fc;
u16 dur;
u8 a1[ETH_ALEN];
u8 a2[ETH_ALEN];
u8 a3[ETH_ALEN];
u16 seq;
-} __attribute__ ((packed)) p80211_hdr_a3_t;
+} __attribute__ ((packed));
-typedef struct p80211_hdr_a4 {
+struct p80211_hdr_a4 {
u16 fc;
u16 dur;
u8 a1[ETH_ALEN];
@@ -164,18 +164,18 @@ typedef struct p80211_hdr_a4 {
u8 a3[ETH_ALEN];
u16 seq;
u8 a4[ETH_ALEN];
-} __attribute__ ((packed)) p80211_hdr_a4_t;
+} __attribute__ ((packed));
-typedef union p80211_hdr {
- p80211_hdr_a3_t a3;
- p80211_hdr_a4_t a4;
-} __attribute__ ((packed)) p80211_hdr_t;
+union p80211_hdr {
+ struct p80211_hdr_a3 a3;
+ struct p80211_hdr_a4 a4;
+} __attribute__ ((packed));
/* Frame and header length macros */
#define WLAN_CTL_FRAMELEN(fstype) (\
(fstype) == WLAN_FSTYPE_BLOCKACKREQ ? 24 : \
- (fstype) == WLAN_FSTYPE_BLOCKACK ? 152 : \
+ (fstype) == WLAN_FSTYPE_BLOCKACK ? 152 : \
(fstype) == WLAN_FSTYPE_PSPOLL ? 20 : \
(fstype) == WLAN_FSTYPE_RTS ? 20 : \
(fstype) == WLAN_FSTYPE_CTS ? 14 : \
diff --git a/drivers/staging/wlan-ng/p80211ioctl.h b/drivers/staging/wlan-ng/p80211ioctl.h
index 64ca7f95262..0d47765452e 100644
--- a/drivers/staging/wlan-ng/p80211ioctl.h
+++ b/drivers/staging/wlan-ng/p80211ioctl.h
@@ -78,12 +78,12 @@
/* argument to the ioctl system call when issuing a request to */
/* the p80211 module. */
-typedef struct p80211ioctl_req {
+struct p80211ioctl_req {
char name[WLAN_DEVNAMELEN_MAX];
caddr_t data;
u32 magic;
u16 len;
u32 result;
-} __attribute__ ((packed)) p80211ioctl_req_t;
+} __attribute__ ((packed));
#endif /* _P80211IOCTL_H */
diff --git a/drivers/staging/wlan-ng/p80211meta.h b/drivers/staging/wlan-ng/p80211meta.h
index b9badcff681..c5f1a63add9 100644
--- a/drivers/staging/wlan-ng/p80211meta.h
+++ b/drivers/staging/wlan-ng/p80211meta.h
@@ -62,7 +62,7 @@
/* representation of category list metadata, group list metadata, */
/* and data item metadata for both Mib and Messages. */
-typedef struct p80211meta {
+struct p80211meta {
char *name; /* data item name */
u32 did; /* partial did */
u32 flags; /* set of various flag bits */
@@ -75,16 +75,16 @@ typedef struct p80211meta {
p80211_totext_t totextptr; /* ptr to totext conversion function */
p80211_fromtext_t fromtextptr; /* ptr to totext conversion function */
p80211_valid_t validfunptr; /* ptr to totext conversion function */
-} p80211meta_t;
+};
-typedef struct grplistitem {
+struct grplistitem {
char *name;
- p80211meta_t *itemlist;
-} grplistitem_t;
+ struct p80211meta *itemlist;
+};
-typedef struct catlistitem {
+struct catlistitem {
char *name;
- grplistitem_t *grplist;
-} catlistitem_t;
+ struct grplistitem *grplist;
+};
#endif /* _P80211META_H */
diff --git a/drivers/staging/wlan-ng/p80211metastruct.h b/drivers/staging/wlan-ng/p80211metastruct.h
index db12713eeaa..a8a4e3b5ffe 100644
--- a/drivers/staging/wlan-ng/p80211metastruct.h
+++ b/drivers/staging/wlan-ng/p80211metastruct.h
@@ -47,23 +47,23 @@
#ifndef _P80211MKMETASTRUCT_H
#define _P80211MKMETASTRUCT_H
-typedef struct p80211msg_dot11req_mibget {
+struct p80211msg_dot11req_mibget {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
p80211item_unk392_t mibattribute;
p80211item_uint32_t resultcode;
-} __attribute__ ((packed)) p80211msg_dot11req_mibget_t;
+} __attribute__ ((packed));
-typedef struct p80211msg_dot11req_mibset {
+struct p80211msg_dot11req_mibset {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
p80211item_unk392_t mibattribute;
p80211item_uint32_t resultcode;
-} __attribute__ ((packed)) p80211msg_dot11req_mibset_t;
+} __attribute__ ((packed));
-typedef struct p80211msg_dot11req_scan {
+struct p80211msg_dot11req_scan {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
@@ -81,9 +81,9 @@ typedef struct p80211msg_dot11req_scan {
p80211item_uint32_t resultcode;
p80211item_uint32_t numbss;
p80211item_uint32_t append;
-} __attribute__ ((packed)) p80211msg_dot11req_scan_t;
+} __attribute__ ((packed));
-typedef struct p80211msg_dot11req_scan_results {
+struct p80211msg_dot11req_scan_results {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
@@ -113,6 +113,7 @@ typedef struct p80211msg_dot11req_scan_results {
p80211item_uint32_t cfpollable;
p80211item_uint32_t cfpollreq;
p80211item_uint32_t privacy;
+ p80211item_uint32_t capinfo;
p80211item_uint32_t basicrate1;
p80211item_uint32_t basicrate2;
p80211item_uint32_t basicrate3;
@@ -129,9 +130,9 @@ typedef struct p80211msg_dot11req_scan_results {
p80211item_uint32_t supprate6;
p80211item_uint32_t supprate7;
p80211item_uint32_t supprate8;
-} __attribute__ ((packed)) p80211msg_dot11req_scan_results_t;
+} __attribute__ ((packed));
-typedef struct p80211msg_dot11req_start {
+struct p80211msg_dot11req_start {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
@@ -167,17 +168,17 @@ typedef struct p80211msg_dot11req_start {
p80211item_uint32_t operationalrate7;
p80211item_uint32_t operationalrate8;
p80211item_uint32_t resultcode;
-} __attribute__ ((packed)) p80211msg_dot11req_start_t;
+} __attribute__ ((packed));
-typedef struct p80211msg_lnxreq_ifstate {
+struct p80211msg_lnxreq_ifstate {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
p80211item_uint32_t ifstate;
p80211item_uint32_t resultcode;
-} __attribute__ ((packed)) p80211msg_lnxreq_ifstate_t;
+} __attribute__ ((packed));
-typedef struct p80211msg_lnxreq_wlansniff {
+struct p80211msg_lnxreq_wlansniff {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
@@ -189,18 +190,18 @@ typedef struct p80211msg_lnxreq_wlansniff {
p80211item_uint32_t stripfcs;
p80211item_uint32_t packet_trunc;
p80211item_uint32_t resultcode;
-} __attribute__ ((packed)) p80211msg_lnxreq_wlansniff_t;
+} __attribute__ ((packed));
-typedef struct p80211msg_lnxreq_hostwep {
+struct p80211msg_lnxreq_hostwep {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
p80211item_uint32_t resultcode;
p80211item_uint32_t decrypt;
p80211item_uint32_t encrypt;
-} __attribute__ ((packed)) p80211msg_lnxreq_hostwep_t;
+} __attribute__ ((packed));
-typedef struct p80211msg_lnxreq_commsquality {
+struct p80211msg_lnxreq_commsquality {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
@@ -209,9 +210,10 @@ typedef struct p80211msg_lnxreq_commsquality {
p80211item_uint32_t link;
p80211item_uint32_t level;
p80211item_uint32_t noise;
-} __attribute__ ((packed)) p80211msg_lnxreq_commsquality_t;
+ p80211item_uint32_t txrate;
+} __attribute__ ((packed));
-typedef struct p80211msg_lnxreq_autojoin {
+struct p80211msg_lnxreq_autojoin {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
@@ -219,26 +221,26 @@ typedef struct p80211msg_lnxreq_autojoin {
u8 pad_19D[3];
p80211item_uint32_t authtype;
p80211item_uint32_t resultcode;
-} __attribute__ ((packed)) p80211msg_lnxreq_autojoin_t;
+} __attribute__ ((packed));
-typedef struct p80211msg_p2req_readpda {
+struct p80211msg_p2req_readpda {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
p80211item_unk1024_t pda;
p80211item_uint32_t resultcode;
-} __attribute__ ((packed)) p80211msg_p2req_readpda_t;
+} __attribute__ ((packed));
-typedef struct p80211msg_p2req_ramdl_state {
+struct p80211msg_p2req_ramdl_state {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
p80211item_uint32_t enable;
p80211item_uint32_t exeaddr;
p80211item_uint32_t resultcode;
-} __attribute__ ((packed)) p80211msg_p2req_ramdl_state_t;
+} __attribute__ ((packed));
-typedef struct p80211msg_p2req_ramdl_write {
+struct p80211msg_p2req_ramdl_write {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
@@ -246,17 +248,17 @@ typedef struct p80211msg_p2req_ramdl_write {
p80211item_uint32_t len;
p80211item_unk4096_t data;
p80211item_uint32_t resultcode;
-} __attribute__ ((packed)) p80211msg_p2req_ramdl_write_t;
+} __attribute__ ((packed));
-typedef struct p80211msg_p2req_flashdl_state {
+struct p80211msg_p2req_flashdl_state {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
p80211item_uint32_t enable;
p80211item_uint32_t resultcode;
-} __attribute__ ((packed)) p80211msg_p2req_flashdl_state_t;
+} __attribute__ ((packed));
-typedef struct p80211msg_p2req_flashdl_write {
+struct p80211msg_p2req_flashdl_write {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
@@ -264,6 +266,6 @@ typedef struct p80211msg_p2req_flashdl_write {
p80211item_uint32_t len;
p80211item_unk4096_t data;
p80211item_uint32_t resultcode;
-} __attribute__ ((packed)) p80211msg_p2req_flashdl_write_t;
+} __attribute__ ((packed));
#endif
diff --git a/drivers/staging/wlan-ng/p80211mgmt.h b/drivers/staging/wlan-ng/p80211mgmt.h
index deb52f5fd78..3b5e8113ad1 100644
--- a/drivers/staging/wlan-ng/p80211mgmt.h
+++ b/drivers/staging/wlan-ng/p80211mgmt.h
@@ -298,7 +298,7 @@ typedef struct wlan_fr_mgmt {
u16 type;
u16 len; /* DOES NOT include CRC !!!! */
u8 *buf;
- p80211_hdr_t *hdr;
+ union p80211_hdr *hdr;
/* used for target specific data, skb in Linux */
void *priv;
/*-- fixed fields -----------*/
@@ -310,7 +310,7 @@ typedef struct wlan_fr_beacon {
u16 type;
u16 len;
u8 *buf;
- p80211_hdr_t *hdr;
+ union p80211_hdr *hdr;
/* used for target specific data, skb in Linux */
void *priv;
/*-- fixed fields -----------*/
@@ -333,7 +333,7 @@ typedef struct wlan_fr_ibssatim {
u16 type;
u16 len;
u8 *buf;
- p80211_hdr_t *hdr;
+ union p80211_hdr *hdr;
/* used for target specific data, skb in Linux */
void *priv;
@@ -349,7 +349,7 @@ typedef struct wlan_fr_disassoc {
u16 type;
u16 len;
u8 *buf;
- p80211_hdr_t *hdr;
+ union p80211_hdr *hdr;
/* used for target specific data, skb in Linux */
void *priv;
/*-- fixed fields -----------*/
@@ -364,7 +364,7 @@ typedef struct wlan_fr_assocreq {
u16 type;
u16 len;
u8 *buf;
- p80211_hdr_t *hdr;
+ union p80211_hdr *hdr;
/* used for target specific data, skb in Linux */
void *priv;
/*-- fixed fields -----------*/
@@ -381,7 +381,7 @@ typedef struct wlan_fr_assocresp {
u16 type;
u16 len;
u8 *buf;
- p80211_hdr_t *hdr;
+ union p80211_hdr *hdr;
/* used for target specific data, skb in Linux */
void *priv;
/*-- fixed fields -----------*/
@@ -398,7 +398,7 @@ typedef struct wlan_fr_reassocreq {
u16 type;
u16 len;
u8 *buf;
- p80211_hdr_t *hdr;
+ union p80211_hdr *hdr;
/* used for target specific data, skb in Linux */
void *priv;
/*-- fixed fields -----------*/
@@ -416,7 +416,7 @@ typedef struct wlan_fr_reassocresp {
u16 type;
u16 len;
u8 *buf;
- p80211_hdr_t *hdr;
+ union p80211_hdr *hdr;
/* used for target specific data, skb in Linux */
void *priv;
/*-- fixed fields -----------*/
@@ -433,7 +433,7 @@ typedef struct wlan_fr_probereq {
u16 type;
u16 len;
u8 *buf;
- p80211_hdr_t *hdr;
+ union p80211_hdr *hdr;
/* used for target specific data, skb in Linux */
void *priv;
/*-- fixed fields -----------*/
@@ -448,7 +448,7 @@ typedef struct wlan_fr_proberesp {
u16 type;
u16 len;
u8 *buf;
- p80211_hdr_t *hdr;
+ union p80211_hdr *hdr;
/* used for target specific data, skb in Linux */
void *priv;
/*-- fixed fields -----------*/
@@ -469,7 +469,7 @@ typedef struct wlan_fr_authen {
u16 type;
u16 len;
u8 *buf;
- p80211_hdr_t *hdr;
+ union p80211_hdr *hdr;
/* used for target specific data, skb in Linux */
void *priv;
/*-- fixed fields -----------*/
@@ -486,7 +486,7 @@ typedef struct wlan_fr_deauthen {
u16 type;
u16 len;
u8 *buf;
- p80211_hdr_t *hdr;
+ union p80211_hdr *hdr;
/* used for target specific data, skb in Linux */
void *priv;
/*-- fixed fields -----------*/
diff --git a/drivers/staging/wlan-ng/p80211msg.h b/drivers/staging/wlan-ng/p80211msg.h
index c691d3eeb9d..8e0f9a0cd74 100644
--- a/drivers/staging/wlan-ng/p80211msg.h
+++ b/drivers/staging/wlan-ng/p80211msg.h
@@ -50,10 +50,10 @@
#define WLAN_DEVNAMELEN_MAX 16
-typedef struct p80211msg {
+struct p80211msg {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
-} __attribute__ ((packed)) p80211msg_t;
+} __attribute__ ((packed));
#endif /* _P80211MSG_H */
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 763ab1187a1..aa1792c8429 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -75,6 +75,7 @@
#include <net/iw_handler.h>
#include <net/net_namespace.h>
+#include <net/cfg80211.h>
#include "p80211types.h"
#include "p80211hdr.h"
@@ -87,6 +88,8 @@
#include "p80211metastruct.h"
#include "p80211metadef.h"
+#include "cfg80211.c"
+
/* Support functions */
static void p80211netdev_rx_bh(unsigned long arg);
@@ -261,7 +264,7 @@ static void p80211netdev_rx_bh(unsigned long arg)
wlandevice_t *wlandev = (wlandevice_t *) arg;
struct sk_buff *skb = NULL;
netdevice_t *dev = wlandev->netdev;
- p80211_hdr_a3_t *hdr;
+ struct p80211_hdr_a3 *hdr;
u16 fc;
/* Let's empty our our queue */
@@ -285,7 +288,7 @@ static void p80211netdev_rx_bh(unsigned long arg)
netif_rx_ni(skb);
continue;
} else {
- hdr = (p80211_hdr_a3_t *) skb->data;
+ hdr = (struct p80211_hdr_a3 *) skb->data;
fc = le16_to_cpu(hdr->fc);
if (p80211_rx_typedrop(wlandev, fc)) {
dev_kfree_skb(skb);
@@ -347,8 +350,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
int result = 0;
int txresult = -1;
wlandevice_t *wlandev = netdev->ml_priv;
- p80211_hdr_t p80211_hdr;
- p80211_metawep_t p80211_wep;
+ union p80211_hdr p80211_hdr;
+ struct p80211_metawep p80211_wep;
if (skb == NULL)
return NETDEV_TX_OK;
@@ -358,8 +361,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
goto failed;
}
- memset(&p80211_hdr, 0, sizeof(p80211_hdr_t));
- memset(&p80211_wep, 0, sizeof(p80211_metawep_t));
+ memset(&p80211_hdr, 0, sizeof(union p80211_hdr));
+ memset(&p80211_wep, 0, sizeof(struct p80211_metawep));
if (netif_queue_stopped(netdev)) {
pr_debug("called when queue stopped.\n");
@@ -398,8 +401,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
goto failed;
}
/* move the header over */
- memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr_t));
- skb_pull(skb, sizeof(p80211_hdr_t));
+ memcpy(&p80211_hdr, skb->data, sizeof(union p80211_hdr));
+ skb_pull(skb, sizeof(union p80211_hdr));
} else {
if (skb_ether_to_p80211
(wlandev, wlandev->ethconv, skb, &p80211_hdr,
@@ -557,7 +560,7 @@ static int p80211netdev_ethtool(wlandevice_t *wlandev, void __user *useraddr)
static int p80211knetdev_do_ioctl(netdevice_t *dev, struct ifreq *ifr, int cmd)
{
int result = 0;
- p80211ioctl_req_t *req = (p80211ioctl_req_t *) ifr;
+ struct p80211ioctl_req *req = (struct p80211ioctl_req *) ifr;
wlandevice_t *wlandev = dev->ml_priv;
u8 *msgbuf;
@@ -604,7 +607,8 @@ static int p80211knetdev_do_ioctl(netdevice_t *dev, struct ifreq *ifr, int cmd)
result = -ENOMEM;
}
bail:
- return result; /* If allocate,copyfrom or copyto fails, return errno */
+ /* If allocate,copyfrom or copyto fails, return errno */
+ return result;
}
/*----------------------------------------------------------------
@@ -635,7 +639,7 @@ bail:
static int p80211knetdev_set_mac_address(netdevice_t *dev, void *addr)
{
struct sockaddr *new_addr = addr;
- p80211msg_dot11req_mibset_t dot11req;
+ struct p80211msg_dot11req_mibset dot11req;
p80211item_unk392_t *mibattr;
p80211item_pstr6_t *macaddr;
p80211item_uint32_t *resultcode;
@@ -651,9 +655,9 @@ static int p80211knetdev_set_mac_address(netdevice_t *dev, void *addr)
resultcode = &dot11req.resultcode;
/* Set up a dot11req_mibset */
- memset(&dot11req, 0, sizeof(p80211msg_dot11req_mibset_t));
+ memset(&dot11req, 0, sizeof(struct p80211msg_dot11req_mibset));
dot11req.msgcode = DIDmsg_dot11req_mibset;
- dot11req.msglen = sizeof(p80211msg_dot11req_mibset_t);
+ dot11req.msglen = sizeof(struct p80211msg_dot11req_mibset);
memcpy(dot11req.devname,
((wlandevice_t *) dev->ml_priv)->name, WLAN_DEVNAMELEN_MAX - 1);
@@ -732,6 +736,7 @@ static const struct net_device_ops p80211_netdev_ops = {
* Arguments:
* wlandev ptr to the wlandev structure for the
* interface.
+* physdev ptr to usb device
* Returns:
* zero on success, non-zero otherwise.
* Call Context:
@@ -740,10 +745,12 @@ static const struct net_device_ops p80211_netdev_ops = {
* compiled drivers, this function will be called in the
* context of the kernel startup code.
----------------------------------------------------------------*/
-int wlan_setup(wlandevice_t *wlandev)
+int wlan_setup(wlandevice_t *wlandev, struct device *physdev)
{
int result = 0;
- netdevice_t *dev;
+ netdevice_t *netdev;
+ struct wiphy *wiphy;
+ struct wireless_dev *wdev;
/* Set up the wlandev */
wlandev->state = WLAN_DEVICE_CLOSED;
@@ -755,20 +762,30 @@ int wlan_setup(wlandevice_t *wlandev)
tasklet_init(&wlandev->rx_bh,
p80211netdev_rx_bh, (unsigned long)wlandev);
+ /* Allocate and initialize the wiphy struct */
+ wiphy = wlan_create_wiphy(physdev, wlandev);
+ if (wiphy == NULL) {
+ printk(KERN_ERR "Failed to alloc wiphy.\n");
+ return 1;
+ }
+
/* Allocate and initialize the struct device */
- dev = alloc_netdev(0, "wlan%d", ether_setup);
- if (dev == NULL) {
+ netdev = alloc_netdev(sizeof(struct wireless_dev), "wlan%d", ether_setup);
+ if (netdev == NULL) {
printk(KERN_ERR "Failed to alloc netdev.\n");
+ wlan_free_wiphy(wiphy);
result = 1;
} else {
- wlandev->netdev = dev;
- dev->ml_priv = wlandev;
- dev->netdev_ops = &p80211_netdev_ops;
-
- dev->wireless_handlers = &p80211wext_handler_def;
-
- netif_stop_queue(dev);
- netif_carrier_off(dev);
+ wlandev->netdev = netdev;
+ netdev->ml_priv = wlandev;
+ netdev->netdev_ops = &p80211_netdev_ops;
+ wdev = netdev_priv(netdev);
+ wdev->wiphy = wiphy;
+ wdev->iftype = NL80211_IFTYPE_STATION;
+ netdev->ieee80211_ptr = wdev;
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
}
return result;
@@ -797,14 +814,14 @@ int wlan_setup(wlandevice_t *wlandev)
----------------------------------------------------------------*/
int wlan_unsetup(wlandevice_t *wlandev)
{
- int result = 0;
+ struct wireless_dev *wdev;
tasklet_kill(&wlandev->rx_bh);
- if (wlandev->netdev == NULL) {
- printk(KERN_ERR "called without wlandev->netdev set.\n");
- result = 1;
- } else {
+ if (wlandev->netdev) {
+ wdev = netdev_priv(wlandev->netdev);
+ if (wdev->wiphy)
+ wlan_free_wiphy(wdev->wiphy);
free_netdev(wlandev->netdev);
wlandev->netdev = NULL;
}
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 3c8c6480056..1ec33740f10 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -148,6 +148,7 @@ int p80211wext_event_associated(struct wlandevice *wlandev, int assoc);
#define MAX_KEYLEN 32
#define HOSTWEP_DEFAULTKEY_MASK (BIT(1)|BIT(0))
+#define HOSTWEP_SHAREDKEY BIT(3)
#define HOSTWEP_DECRYPT BIT(4)
#define HOSTWEP_ENCRYPT BIT(5)
#define HOSTWEP_PRIVACYINVOKED BIT(6)
@@ -183,9 +184,9 @@ typedef struct wlandevice {
int (*close) (struct wlandevice *wlandev);
void (*reset) (struct wlandevice *wlandev);
int (*txframe) (struct wlandevice *wlandev, struct sk_buff *skb,
- p80211_hdr_t *p80211_hdr,
- p80211_metawep_t *p80211_wep);
- int (*mlmerequest) (struct wlandevice *wlandev, p80211msg_t *msg);
+ union p80211_hdr *p80211_hdr,
+ struct p80211_metawep *p80211_wep);
+ int (*mlmerequest) (struct wlandevice *wlandev, struct p80211msg *msg);
int (*set_multicast_list) (struct wlandevice *wlandev,
netdevice_t *dev);
void (*tx_timeout) (struct wlandevice *wlandev);
@@ -233,7 +234,7 @@ int wep_decrypt(wlandevice_t *wlandev, u8 *buf, u32 len, int key_override,
int wep_encrypt(wlandevice_t *wlandev, u8 *buf, u8 *dst, u32 len, int keynum,
u8 *iv, u8 *icv);
-int wlan_setup(wlandevice_t *wlandev);
+int wlan_setup(wlandevice_t *wlandev, struct device *physdev);
int wlan_unsetup(wlandevice_t *wlandev);
int register_wlandev(wlandevice_t *wlandev);
int unregister_wlandev(wlandevice_t *wlandev);
diff --git a/drivers/staging/wlan-ng/p80211req.c b/drivers/staging/wlan-ng/p80211req.c
index 207f080cfc9..179194e7d2a 100644
--- a/drivers/staging/wlan-ng/p80211req.c
+++ b/drivers/staging/wlan-ng/p80211req.c
@@ -72,9 +72,9 @@
#include "p80211metastruct.h"
#include "p80211req.h"
-static void p80211req_handlemsg(wlandevice_t *wlandev, p80211msg_t *msg);
+static void p80211req_handlemsg(wlandevice_t *wlandev, struct p80211msg *msg);
static int p80211req_mibset_mibget(wlandevice_t *wlandev,
- p80211msg_dot11req_mibget_t *mib_msg,
+ struct p80211msg_dot11req_mibget *mib_msg,
int isget);
/*----------------------------------------------------------------
@@ -96,7 +96,7 @@ static int p80211req_mibset_mibget(wlandevice_t *wlandev,
int p80211req_dorequest(wlandevice_t *wlandev, u8 *msgbuf)
{
int result = 0;
- p80211msg_t *msg = (p80211msg_t *) msgbuf;
+ struct p80211msg *msg = (struct p80211msg *) msgbuf;
/* Check to make sure the MSD is running */
if (!((wlandev->msdstate == WLAN_MSD_HWPRESENT &&
@@ -150,13 +150,13 @@ int p80211req_dorequest(wlandevice_t *wlandev, u8 *msgbuf)
* Call context:
* Process thread
----------------------------------------------------------------*/
-static void p80211req_handlemsg(wlandevice_t *wlandev, p80211msg_t *msg)
+static void p80211req_handlemsg(wlandevice_t *wlandev, struct p80211msg *msg)
{
switch (msg->msgcode) {
case DIDmsg_lnxreq_hostwep:{
- p80211msg_lnxreq_hostwep_t *req =
- (p80211msg_lnxreq_hostwep_t *) msg;
+ struct p80211msg_lnxreq_hostwep *req =
+ (struct p80211msg_lnxreq_hostwep *) msg;
wlandev->hostwep &=
~(HOSTWEP_DECRYPT | HOSTWEP_ENCRYPT);
if (req->decrypt.data == P80211ENUM_truth_true)
@@ -169,8 +169,8 @@ static void p80211req_handlemsg(wlandevice_t *wlandev, p80211msg_t *msg)
case DIDmsg_dot11req_mibget:
case DIDmsg_dot11req_mibset:{
int isget = (msg->msgcode == DIDmsg_dot11req_mibget);
- p80211msg_dot11req_mibget_t *mib_msg =
- (p80211msg_dot11req_mibget_t *) msg;
+ struct p80211msg_dot11req_mibget *mib_msg =
+ (struct p80211msg_dot11req_mibget *) msg;
p80211req_mibset_mibget(wlandev, mib_msg, isget);
}
default:
@@ -181,7 +181,7 @@ static void p80211req_handlemsg(wlandevice_t *wlandev, p80211msg_t *msg)
}
static int p80211req_mibset_mibget(wlandevice_t *wlandev,
- p80211msg_dot11req_mibget_t *mib_msg,
+ struct p80211msg_dot11req_mibget *mib_msg,
int isget)
{
p80211itemd_t *mibitem = (p80211itemd_t *) mib_msg->mibattribute.data;
diff --git a/drivers/staging/wlan-ng/p80211wext.c b/drivers/staging/wlan-ng/p80211wext.c
deleted file mode 100644
index 387194d4a6e..00000000000
--- a/drivers/staging/wlan-ng/p80211wext.c
+++ /dev/null
@@ -1,1690 +0,0 @@
-/* src/p80211/p80211wext.c
-*
-* Glue code to make linux-wlan-ng a happy wireless extension camper.
-*
-* original author: Reyk Floeter <reyk@synack.de>
-* Completely re-written by Solomon Peachy <solomon@linux-wlan.com>
-*
-* Copyright (C) 2002 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*/
-
-/*================================================================*/
-/* System Includes */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-#include <linux/if_arp.h>
-#include <linux/bitops.h>
-#include <linux/uaccess.h>
-#include <asm/byteorder.h>
-#include <linux/if_ether.h>
-
-#include "p80211types.h"
-#include "p80211hdr.h"
-#include "p80211conv.h"
-#include "p80211mgmt.h"
-#include "p80211msg.h"
-#include "p80211metastruct.h"
-#include "p80211metadef.h"
-#include "p80211netdev.h"
-#include "p80211ioctl.h"
-#include "p80211req.h"
-
-static int p80211wext_giwrate(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_param *rrq, char *extra);
-static int p80211wext_giwessid(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *essid);
-
-static u8 p80211_mhz_to_channel(u16 mhz)
-{
- if (mhz >= 5000)
- return (mhz - 5000) / 5;
-
- if (mhz == 2484)
- return 14;
-
- if (mhz >= 2407)
- return (mhz - 2407) / 5;
-
- return 0;
-}
-
-static u16 p80211_channel_to_mhz(u8 ch, int dot11a)
-{
-
- if (ch == 0)
- return 0;
- if (ch > 200)
- return 0;
-
- /* 5G */
- if (dot11a)
- return 5000 + (5 * ch);
-
- /* 2.4G */
- if (ch == 14)
- return 2484;
-
- if ((ch < 14) && (ch > 0))
- return 2407 + (5 * ch);
-
- return 0;
-}
-
-/* taken from orinoco.c ;-) */
-static const long p80211wext_channel_freq[] = {
- 2412, 2417, 2422, 2427, 2432, 2437, 2442,
- 2447, 2452, 2457, 2462, 2467, 2472, 2484
-};
-
-#define NUM_CHANNELS ARRAY_SIZE(p80211wext_channel_freq)
-
-/* steal a spare bit to store the shared/opensystems state.
- should default to open if not set */
-#define HOSTWEP_SHAREDKEY BIT(3)
-
-static int qual_as_percent(int snr)
-{
- if (snr <= 0)
- return 0;
- if (snr <= 40)
- return snr * 5 / 2;
- return 100;
-}
-
-static int p80211wext_setmib(wlandevice_t *wlandev, u32 did, u32 data)
-{
- p80211msg_dot11req_mibset_t msg;
- p80211item_uint32_t *mibitem =
- (p80211item_uint32_t *)&msg.mibattribute.data;
- int result;
-
- msg.msgcode = DIDmsg_dot11req_mibset;
- memset(mibitem, 0, sizeof(*mibitem));
- mibitem->did = did;
- mibitem->data = data;
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
-
- return result;
-}
-
-/*
- * get a 32 bit mib value
- */
-static int p80211wext_getmib(wlandevice_t *wlandev, u32 did, u32 *data)
-{
- p80211msg_dot11req_mibset_t msg;
- p80211item_uint32_t *mibitem =
- (p80211item_uint32_t *)&msg.mibattribute.data;
- int result;
-
- msg.msgcode = DIDmsg_dot11req_mibget;
- memset(mibitem, 0, sizeof(*mibitem));
- mibitem->did = did;
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
- if (!result)
- *data = mibitem->data;
-
- return result;
-}
-
-static int p80211wext_autojoin(wlandevice_t *wlandev)
-{
- p80211msg_lnxreq_autojoin_t msg;
- struct iw_point data;
- char ssid[IW_ESSID_MAX_SIZE];
-
- int result;
- int err = 0;
-
- /* Get ESSID */
- result = p80211wext_giwessid(wlandev->netdev, NULL, &data, ssid);
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
- if (wlandev->hostwep & HOSTWEP_SHAREDKEY)
- msg.authtype.data = P80211ENUM_authalg_sharedkey;
- else
- msg.authtype.data = P80211ENUM_authalg_opensystem;
-
- msg.msgcode = DIDmsg_lnxreq_autojoin;
-
- /* Trim the last '\0' to fit the SSID format */
-
- if (data.length && ssid[data.length - 1] == '\0')
- data.length = data.length - 1;
-
- memcpy(msg.ssid.data.data, ssid, data.length);
- msg.ssid.data.len = data.length;
-
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
-exit:
-
- return err;
-
-}
-
-/* called by /proc/net/wireless */
-struct iw_statistics *p80211wext_get_wireless_stats(netdevice_t *dev)
-{
- p80211msg_lnxreq_commsquality_t quality;
- wlandevice_t *wlandev = dev->ml_priv;
- struct iw_statistics *wstats = &wlandev->wstats;
- int retval;
-
- /* Check */
- if ((wlandev == NULL) || (wlandev->msdstate != WLAN_MSD_RUNNING))
- return NULL;
-
- /* XXX Only valid in station mode */
- wstats->status = 0;
-
- /* build request message */
- quality.msgcode = DIDmsg_lnxreq_commsquality;
- quality.dbm.data = P80211ENUM_truth_true;
- quality.dbm.status = P80211ENUM_msgitem_status_data_ok;
-
- /* send message to nsd */
- if (wlandev->mlmerequest == NULL)
- return NULL;
-
- retval = wlandev->mlmerequest(wlandev, (p80211msg_t *) &quality);
-
- wstats->qual.qual = qual_as_percent(quality.link.data); /* overall link quality */
- wstats->qual.level = quality.level.data; /* instant signal level */
- wstats->qual.noise = quality.noise.data; /* instant noise level */
-
- wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
- wstats->discard.code = wlandev->rx.decrypt_err;
- wstats->discard.nwid = 0;
- wstats->discard.misc = 0;
-
- wstats->discard.fragment = 0; /* incomplete fragments */
- wstats->discard.retries = 0; /* tx retries. */
- wstats->miss.beacon = 0;
-
- return wstats;
-}
-
-static int p80211wext_giwname(netdevice_t *dev,
- struct iw_request_info *info,
- char *name, char *extra)
-{
- struct iw_param rate;
- int result;
- int err = 0;
-
- result = p80211wext_giwrate(dev, NULL, &rate, NULL);
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
- switch (rate.value) {
- case 1000000:
- case 2000000:
- strcpy(name, "IEEE 802.11-DS");
- break;
- case 5500000:
- case 11000000:
- strcpy(name, "IEEE 802.11-b");
- break;
- }
-exit:
- return err;
-}
-
-static int p80211wext_giwfreq(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_freq *freq, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- int result;
- int err = 0;
- unsigned int value;
-
- result = p80211wext_getmib(wlandev,
- DIDmib_dot11phy_dot11PhyDSSSTable_dot11CurrentChannel,
- &value);
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
- if (value > NUM_CHANNELS) {
- err = -EFAULT;
- goto exit;
- }
-
- /* convert into frequency instead of a channel */
- freq->e = 1;
- freq->m = p80211_channel_to_mhz(value, 0) * 100000;
-
-exit:
- return err;
-}
-
-static int p80211wext_siwfreq(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_freq *freq, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- int result;
- int err = 0;
- unsigned int value;
-
- if (!wlan_wext_write) {
- err = -EOPNOTSUPP;
- goto exit;
- }
-
- if ((freq->e == 0) && (freq->m <= 1000))
- value = freq->m;
- else
- value = p80211_mhz_to_channel(freq->m);
-
- result = p80211wext_setmib(wlandev,
- DIDmib_dot11phy_dot11PhyDSSSTable_dot11CurrentChannel,
- value);
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
-exit:
- return err;
-}
-
-static int p80211wext_giwmode(netdevice_t *dev,
- struct iw_request_info *info,
- __u32 *mode, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
-
- switch (wlandev->macmode) {
- case WLAN_MACMODE_IBSS_STA:
- *mode = IW_MODE_ADHOC;
- break;
- case WLAN_MACMODE_ESS_STA:
- *mode = IW_MODE_INFRA;
- break;
- case WLAN_MACMODE_ESS_AP:
- *mode = IW_MODE_MASTER;
- break;
- default:
- /* Not set yet. */
- *mode = IW_MODE_AUTO;
- }
-
- return 0;
-}
-
-static int p80211wext_siwmode(netdevice_t *dev,
- struct iw_request_info *info,
- __u32 *mode, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- int result;
- int err = 0;
-
- if (!wlan_wext_write) {
- err = -EOPNOTSUPP;
- goto exit;
- }
-
- if (*mode != IW_MODE_ADHOC && *mode != IW_MODE_INFRA &&
- *mode != IW_MODE_MASTER) {
- err = (-EOPNOTSUPP);
- goto exit;
- }
-
- /* Operation mode is the same with current mode */
- if (*mode == wlandev->macmode)
- goto exit;
-
- switch (*mode) {
- case IW_MODE_ADHOC:
- wlandev->macmode = WLAN_MACMODE_IBSS_STA;
- break;
- case IW_MODE_INFRA:
- wlandev->macmode = WLAN_MACMODE_ESS_STA;
- break;
- case IW_MODE_MASTER:
- wlandev->macmode = WLAN_MACMODE_ESS_AP;
- break;
- default:
- /* Not set yet. */
- printk(KERN_INFO "Operation mode: %d not support\n", *mode);
- return -EOPNOTSUPP;
- }
-
- /* Set Operation mode to the PORT TYPE RID */
- result = p80211wext_setmib(wlandev,
- DIDmib_p2_p2Static_p2CnfPortType,
- (*mode == IW_MODE_ADHOC) ? 0 : 1);
- if (result)
- err = -EFAULT;
-exit:
- return err;
-}
-
-static int p80211wext_giwrange(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra)
-{
- struct iw_range *range = (struct iw_range *)extra;
- int i, val;
-
- /* for backward compatability set size and zero everything we don't understand */
- data->length = sizeof(*range);
- memset(range, 0, sizeof(*range));
-
- range->txpower_capa = IW_TXPOW_DBM;
- /* XXX what about min/max_pmp, min/max_pmt, etc. */
-
- range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = 13;
-
- range->retry_capa = IW_RETRY_LIMIT;
- range->retry_flags = IW_RETRY_LIMIT;
- range->min_retry = 0;
- range->max_retry = 255;
-
- range->event_capa[0] = (IW_EVENT_CAPA_K_0 | /* mode/freq/ssid */
- IW_EVENT_CAPA_MASK(SIOCGIWAP) |
- IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
- range->event_capa[1] = IW_EVENT_CAPA_K_1; /* encode */
- range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVQUAL) |
- IW_EVENT_CAPA_MASK(IWEVCUSTOM));
-
- range->num_channels = NUM_CHANNELS;
-
- /* XXX need to filter against the regulatory domain &| active set */
- val = 0;
- for (i = 0; i < NUM_CHANNELS; i++) {
- range->freq[val].i = i + 1;
- range->freq[val].m = p80211wext_channel_freq[i] * 100000;
- range->freq[val].e = 1;
- val++;
- }
-
- range->num_frequency = val;
-
- /* Max of /proc/net/wireless */
- range->max_qual.qual = 100;
- range->max_qual.level = 0;
- range->max_qual.noise = 0;
- range->sensitivity = 3;
- /* XXX these need to be nsd-specific! */
-
- range->min_rts = 0;
- range->max_rts = 2347;
- range->min_frag = 256;
- range->max_frag = 2346;
-
- range->max_encoding_tokens = NUM_WEPKEYS;
- range->num_encoding_sizes = 2;
- range->encoding_size[0] = 5;
- range->encoding_size[1] = 13;
-
- /* XXX what about num_bitrates/throughput? */
- range->num_bitrates = 0;
-
- /* estimated max throughput */
- /* XXX need to cap it if we're running at ~2Mbps.. */
- range->throughput = 5500000;
-
- return 0;
-}
-
-static int p80211wext_giwap(netdevice_t *dev,
- struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra)
-{
-
- wlandevice_t *wlandev = dev->ml_priv;
-
- memcpy(ap_addr->sa_data, wlandev->bssid, WLAN_BSSID_LEN);
- ap_addr->sa_family = ARPHRD_ETHER;
-
- return 0;
-}
-
-static int p80211wext_giwencode(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *key)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- int err = 0;
- int i;
-
- i = (erq->flags & IW_ENCODE_INDEX) - 1;
- erq->flags = 0;
-
- if (wlandev->hostwep & HOSTWEP_PRIVACYINVOKED)
- erq->flags |= IW_ENCODE_ENABLED;
- else
- erq->flags |= IW_ENCODE_DISABLED;
-
- if (wlandev->hostwep & HOSTWEP_EXCLUDEUNENCRYPTED)
- erq->flags |= IW_ENCODE_RESTRICTED;
- else
- erq->flags |= IW_ENCODE_OPEN;
-
- i = (erq->flags & IW_ENCODE_INDEX) - 1;
-
- if (i == -1)
- i = wlandev->hostwep & HOSTWEP_DEFAULTKEY_MASK;
-
- if ((i < 0) || (i >= NUM_WEPKEYS)) {
- err = -EINVAL;
- goto exit;
- }
-
- erq->flags |= i + 1;
-
- /* copy the key from the driver cache as the keys are read-only MIBs */
- erq->length = wlandev->wep_keylens[i];
- memcpy(key, wlandev->wep_keys[i], erq->length);
-
-exit:
- return err;
-}
-
-static int p80211wext_siwencode(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *key)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- p80211msg_dot11req_mibset_t msg;
- p80211item_pstr32_t pstr;
-
- int err = 0;
- int result = 0;
- int i;
-
- if (!wlan_wext_write) {
- err = (-EOPNOTSUPP);
- goto exit;
- }
-
- /* Check the Key index first. */
- i = (erq->flags & IW_ENCODE_INDEX);
- if (i) {
- if ((i < 1) || (i > NUM_WEPKEYS)) {
- err = -EINVAL;
- goto exit;
- } else {
- i--;
- }
- /* Set current key number only if no keys are given */
- if (erq->flags & IW_ENCODE_NOKEY) {
- result =
- p80211wext_setmib(wlandev,
- DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
- i);
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
- }
-
- } else {
- /* Use defaultkey if no Key Index */
- i = wlandev->hostwep & HOSTWEP_DEFAULTKEY_MASK;
- }
-
- /* Check if there is no key information in the iwconfig request */
- if ((erq->flags & IW_ENCODE_NOKEY) == 0) {
-
- /*------------------------------------------------------------
- * If there is WEP Key for setting, check the Key Information
- * and then set it to the firmware.
- -------------------------------------------------------------*/
-
- if (erq->length > 0) {
- /* copy the key from the driver cache as the keys are read-only MIBs */
- wlandev->wep_keylens[i] = erq->length;
- memcpy(wlandev->wep_keys[i], key, erq->length);
-
- /* Prepare data struture for p80211req_dorequest. */
- memcpy(pstr.data.data, key, erq->length);
- pstr.data.len = erq->length;
-
- switch (i) {
- case 0:
- pstr.did =
- DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey0;
- break;
-
- case 1:
- pstr.did =
- DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey1;
- break;
-
- case 2:
- pstr.did =
- DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey2;
- break;
-
- case 3:
- pstr.did =
- DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey3;
- break;
-
- default:
- err = -EINVAL;
- goto exit;
- }
-
- msg.msgcode = DIDmsg_dot11req_mibset;
- memcpy(&msg.mibattribute.data, &pstr, sizeof(pstr));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
- }
-
- }
-
- /* Check the PrivacyInvoked flag */
- if (erq->flags & IW_ENCODE_DISABLED) {
- result =
- p80211wext_setmib(wlandev,
- DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
- P80211ENUM_truth_false);
- } else {
- result =
- p80211wext_setmib(wlandev,
- DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
- P80211ENUM_truth_true);
- }
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
- /* The security mode may be open or restricted, and its meaning
- depends on the card used. With most cards, in open mode no
- authentication is used and the card may also accept non-
- encrypted sessions, whereas in restricted mode only encrypted
- sessions are accepted and the card will use authentication if
- available.
- */
- if (erq->flags & IW_ENCODE_RESTRICTED) {
- result =
- p80211wext_setmib(wlandev,
- DIDmib_dot11smt_dot11PrivacyTable_dot11ExcludeUnencrypted,
- P80211ENUM_truth_true);
- } else if (erq->flags & IW_ENCODE_OPEN) {
- result =
- p80211wext_setmib(wlandev,
- DIDmib_dot11smt_dot11PrivacyTable_dot11ExcludeUnencrypted,
- P80211ENUM_truth_false);
- }
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
-exit:
-
- return err;
-}
-
-static int p80211wext_giwessid(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *essid)
-{
- wlandevice_t *wlandev = dev->ml_priv;
-
- if (wlandev->ssid.len) {
- data->length = wlandev->ssid.len;
- data->flags = 1;
- memcpy(essid, wlandev->ssid.data, data->length);
- essid[data->length] = 0;
- } else {
- memset(essid, 0, sizeof(wlandev->ssid.data));
- data->length = 0;
- data->flags = 0;
- }
-
- return 0;
-}
-
-static int p80211wext_siwessid(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *essid)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- p80211msg_lnxreq_autojoin_t msg;
-
- int result;
- int err = 0;
- int length = data->length;
-
- if (!wlan_wext_write) {
- err = (-EOPNOTSUPP);
- goto exit;
- }
-
- if (wlandev->hostwep & HOSTWEP_SHAREDKEY)
- msg.authtype.data = P80211ENUM_authalg_sharedkey;
- else
- msg.authtype.data = P80211ENUM_authalg_opensystem;
-
- msg.msgcode = DIDmsg_lnxreq_autojoin;
-
- /* Trim the last '\0' to fit the SSID format */
- if (length && essid[length - 1] == '\0')
- length--;
-
- memcpy(msg.ssid.data.data, essid, length);
- msg.ssid.data.len = length;
-
- pr_debug("autojoin_ssid for %s \n", essid);
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
- pr_debug("autojoin_ssid %d\n", result);
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
-exit:
- return err;
-}
-
-static int p80211wext_siwcommit(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *essid)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- int err = 0;
-
- if (!wlan_wext_write) {
- err = (-EOPNOTSUPP);
- goto exit;
- }
-
- /* Auto Join */
- err = p80211wext_autojoin(wlandev);
-
-exit:
- return err;
-}
-
-static int p80211wext_giwrate(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_param *rrq, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- int result;
- int err = 0;
- unsigned int value;
-
- result = p80211wext_getmib(wlandev, DIDmib_p2_p2MAC_p2CurrentTxRate, &value);
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
- rrq->fixed = 0; /* can it change? */
- rrq->disabled = 0;
- rrq->value = 0;
-
-#define HFA384x_RATEBIT_1 ((u16)1)
-#define HFA384x_RATEBIT_2 ((u16)2)
-#define HFA384x_RATEBIT_5dot5 ((u16)4)
-#define HFA384x_RATEBIT_11 ((u16)8)
-
- switch (value) {
- case HFA384x_RATEBIT_1:
- rrq->value = 1000000;
- break;
- case HFA384x_RATEBIT_2:
- rrq->value = 2000000;
- break;
- case HFA384x_RATEBIT_5dot5:
- rrq->value = 5500000;
- break;
- case HFA384x_RATEBIT_11:
- rrq->value = 11000000;
- break;
- default:
- err = -EINVAL;
- }
-exit:
- return err;
-}
-
-static int p80211wext_giwrts(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_param *rts, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- int result;
- int err = 0;
- unsigned int value;
-
- result = p80211wext_getmib(wlandev,
- DIDmib_dot11mac_dot11OperationTable_dot11RTSThreshold,
- &value);
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
- rts->value = value;
- rts->disabled = (rts->value == 2347);
- rts->fixed = 1;
-
-exit:
- return err;
-}
-
-static int p80211wext_siwrts(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_param *rts, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- int result;
- int err = 0;
- unsigned int value;
-
- if (!wlan_wext_write) {
- err = -EOPNOTSUPP;
- goto exit;
- }
-
- if (rts->disabled)
- value = 2347;
- else
- value = rts->value;
-
- result = p80211wext_setmib(wlandev,
- DIDmib_dot11mac_dot11OperationTable_dot11RTSThreshold,
- value);
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
-exit:
- return err;
-}
-
-static int p80211wext_giwfrag(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_param *frag, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- int result;
- int err = 0;
- unsigned int value;
-
- result = p80211wext_getmib(wlandev,
- DIDmib_dot11mac_dot11OperationTable_dot11FragmentationThreshold,
- &value);
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
- frag->value = value;
- frag->disabled = (frag->value == 2346);
- frag->fixed = 1;
-
-exit:
- return err;
-}
-
-static int p80211wext_siwfrag(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_param *frag, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- int result;
- int err = 0;
- int value;
-
- if (!wlan_wext_write) {
- err = (-EOPNOTSUPP);
- goto exit;
- }
-
- if (frag->disabled)
- value = 2346;
- else
- value = frag->value;
-
- result = p80211wext_setmib(wlandev,
- DIDmib_dot11mac_dot11OperationTable_dot11FragmentationThreshold,
- value);
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
-exit:
- return err;
-}
-
-#ifndef IW_RETRY_LONG
-#define IW_RETRY_LONG IW_RETRY_MAX
-#endif
-
-#ifndef IW_RETRY_SHORT
-#define IW_RETRY_SHORT IW_RETRY_MIN
-#endif
-
-static int p80211wext_giwretry(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_param *rrq, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- int result;
- int err = 0;
- u16 shortretry, longretry, lifetime;
- unsigned int value;
-
- result = p80211wext_getmib(wlandev,
- DIDmib_dot11mac_dot11OperationTable_dot11ShortRetryLimit,
- &value);
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
- shortretry = value;
-
- result = p80211wext_getmib(wlandev,
- DIDmib_dot11mac_dot11OperationTable_dot11LongRetryLimit,
- &value);
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
- longretry = value;
-
- result = p80211wext_getmib(wlandev,
- DIDmib_dot11mac_dot11OperationTable_dot11MaxTransmitMSDULifetime,
- &value);
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
- lifetime = value;
-
- rrq->disabled = 0;
-
- if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
- rrq->flags = IW_RETRY_LIFETIME;
- rrq->value = lifetime * 1024;
- } else {
- if (rrq->flags & IW_RETRY_LONG) {
- rrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
- rrq->value = longretry;
- } else {
- rrq->flags = IW_RETRY_LIMIT;
- rrq->value = shortretry;
- if (shortretry != longretry)
- rrq->flags |= IW_RETRY_SHORT;
- }
- }
-
-exit:
- return err;
-
-}
-
-static int p80211wext_siwretry(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_param *rrq, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- p80211item_uint32_t mibitem;
- p80211msg_dot11req_mibset_t msg;
- int result;
- int err = 0;
- unsigned int value;
-
- if (!wlan_wext_write) {
- err = (-EOPNOTSUPP);
- goto exit;
- }
-
- if (rrq->disabled) {
- err = -EINVAL;
- goto exit;
- }
-
- msg.msgcode = DIDmsg_dot11req_mibset;
-
- if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
-
- value = rrq->value /= 1024;
- result = p80211wext_setmib(wlandev,
- DIDmib_dot11mac_dot11OperationTable_dot11MaxTransmitMSDULifetime,
- value);
- if (result) {
- err = -EFAULT;
- goto exit;
- }
- } else {
- if (rrq->flags & IW_RETRY_LONG) {
- result = p80211wext_setmib(wlandev,
- DIDmib_dot11mac_dot11OperationTable_dot11LongRetryLimit,
- rrq->value);
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
- }
-
- if (rrq->flags & IW_RETRY_SHORT) {
- result = p80211wext_setmib(wlandev,
- DIDmib_dot11mac_dot11OperationTable_dot11ShortRetryLimit,
- rrq->value);
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
- }
- }
-
-exit:
- return err;
-
-}
-
-static int p80211wext_siwtxpow(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_param *rrq, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- p80211item_uint32_t mibitem;
- p80211msg_dot11req_mibset_t msg;
- int result;
- int err = 0;
- unsigned int value;
-
- if (!wlan_wext_write) {
- err = (-EOPNOTSUPP);
- goto exit;
- }
-
- if (rrq->fixed == 0)
- value = 30;
- else
- value = rrq->value;
- result = p80211wext_setmib(wlandev,
- DIDmib_dot11phy_dot11PhyTxPowerTable_dot11CurrentTxPowerLevel,
- value);
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
-exit:
- return err;
-}
-
-static int p80211wext_giwtxpow(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_param *rrq, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- int result;
- int err = 0;
- unsigned int value;
-
- result = p80211wext_getmib(wlandev,
- DIDmib_dot11phy_dot11PhyTxPowerTable_dot11CurrentTxPowerLevel,
- &value);
-
- if (result) {
- err = -EFAULT;
- goto exit;
- }
-
- /* XXX handle OFF by setting disabled = 1; */
-
- rrq->flags = 0; /* IW_TXPOW_DBM; */
- rrq->disabled = 0;
- rrq->fixed = 0;
- rrq->value = value;
-
-exit:
- return err;
-}
-
-static int p80211wext_siwspy(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_point *srq, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- struct sockaddr address[IW_MAX_SPY];
- int number = srq->length;
- int i;
-
- /* Copy the data from the input buffer */
- memcpy(address, extra, sizeof(struct sockaddr) * number);
-
- wlandev->spy_number = 0;
-
- if (number > 0) {
-
- /* extract the addresses */
- for (i = 0; i < number; i++) {
-
- memcpy(wlandev->spy_address[i], address[i].sa_data,
- ETH_ALEN);
- }
-
- /* reset stats */
- memset(wlandev->spy_stat, 0,
- sizeof(struct iw_quality) * IW_MAX_SPY);
-
- /* set number of addresses */
- wlandev->spy_number = number;
- }
-
- return 0;
-}
-
-/* jkriegl: from orinoco, modified */
-static int p80211wext_giwspy(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_point *srq, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
-
- struct sockaddr address[IW_MAX_SPY];
- struct iw_quality spy_stat[IW_MAX_SPY];
- int number;
- int i;
-
- number = wlandev->spy_number;
-
- if (number > 0) {
-
- /* populate address and spy struct's */
- for (i = 0; i < number; i++) {
- memcpy(address[i].sa_data, wlandev->spy_address[i],
- ETH_ALEN);
- address[i].sa_family = AF_UNIX;
- memcpy(&spy_stat[i], &wlandev->spy_stat[i],
- sizeof(struct iw_quality));
- }
-
- /* reset update flag */
- for (i = 0; i < number; i++)
- wlandev->spy_stat[i].updated = 0;
- }
-
- /* push stuff to user space */
- srq->length = number;
- memcpy(extra, address, sizeof(struct sockaddr) * number);
- memcpy(extra + sizeof(struct sockaddr) * number, spy_stat,
- sizeof(struct iw_quality) * number);
-
- return 0;
-}
-
-static int prism2_result2err(int prism2_result)
-{
- int err = 0;
-
- switch (prism2_result) {
- case P80211ENUM_resultcode_invalid_parameters:
- err = -EINVAL;
- break;
- case P80211ENUM_resultcode_implementation_failure:
- err = -EIO;
- break;
- case P80211ENUM_resultcode_not_supported:
- err = -EOPNOTSUPP;
- break;
- default:
- err = 0;
- break;
- }
-
- return err;
-}
-
-static int p80211wext_siwscan(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_point *srq, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- p80211msg_dot11req_scan_t msg;
- int result;
- int err = 0;
- int i = 0;
-
- if (wlandev->macmode == WLAN_MACMODE_ESS_AP) {
- printk(KERN_ERR "Can't scan in AP mode\n");
- err = (-EOPNOTSUPP);
- goto exit;
- }
-
- memset(&msg, 0x00, sizeof(p80211msg_dot11req_scan_t));
- msg.msgcode = DIDmsg_dot11req_scan;
- msg.bsstype.data = P80211ENUM_bsstype_any;
-
- memset(&(msg.bssid.data), 0xFF, sizeof(p80211item_pstr6_t));
- msg.bssid.data.len = 6;
-
- msg.scantype.data = P80211ENUM_scantype_active;
- msg.probedelay.data = 0;
-
- for (i = 1; i <= 14; i++)
- msg.channellist.data.data[i - 1] = i;
- msg.channellist.data.len = 14;
-
- msg.maxchanneltime.data = 250;
- msg.minchanneltime.data = 200;
-
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
- if (result)
- err = prism2_result2err(msg.resultcode.data);
-
-exit:
- return err;
-}
-
-/* Helper to translate scan into Wireless Extensions scan results.
- * Inspired by the prism54 code, which was in turn inspired by the
- * airo driver code.
- */
-static char *wext_translate_bss(struct iw_request_info *info, char *current_ev,
- char *end_buf,
- p80211msg_dot11req_scan_results_t *bss)
-{
- struct iw_event iwe; /* Temporary buffer */
-
- /* The first entry must be the MAC address */
- memcpy(iwe.u.ap_addr.sa_data, bss->bssid.data.data, WLAN_BSSID_LEN);
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- iwe.cmd = SIOCGIWAP;
- current_ev =
- iwe_stream_add_event(info, current_ev, end_buf, &iwe,
- IW_EV_ADDR_LEN);
-
- /* The following entries will be displayed in the same order we give them */
-
- /* The ESSID. */
- if (bss->ssid.data.len > 0) {
- char essid[IW_ESSID_MAX_SIZE + 1];
- int size;
-
- size =
- min_t(unsigned short, IW_ESSID_MAX_SIZE,
- bss->ssid.data.len);
- memset(&essid, 0, sizeof(essid));
- memcpy(&essid, bss->ssid.data.data, size);
- pr_debug(" essid size = %d\n", size);
- iwe.u.data.length = size;
- iwe.u.data.flags = 1;
- iwe.cmd = SIOCGIWESSID;
- current_ev =
- iwe_stream_add_point(info, current_ev, end_buf, &iwe,
- &essid[0]);
- pr_debug(" essid size OK.\n");
- }
-
- switch (bss->bsstype.data) {
- case P80211ENUM_bsstype_infrastructure:
- iwe.u.mode = IW_MODE_MASTER;
- break;
-
- case P80211ENUM_bsstype_independent:
- iwe.u.mode = IW_MODE_ADHOC;
- break;
-
- default:
- iwe.u.mode = 0;
- break;
- }
- iwe.cmd = SIOCGIWMODE;
- if (iwe.u.mode)
- current_ev =
- iwe_stream_add_event(info, current_ev, end_buf, &iwe,
- IW_EV_UINT_LEN);
-
- /* Encryption capability */
- if (bss->privacy.data == P80211ENUM_truth_true)
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- iwe.cmd = SIOCGIWENCODE;
- current_ev =
- iwe_stream_add_point(info, current_ev, end_buf, &iwe, NULL);
-
- /* Add frequency. (short) bss->channel is the frequency in MHz */
- iwe.u.freq.m = bss->dschannel.data;
- iwe.u.freq.e = 0;
- iwe.cmd = SIOCGIWFREQ;
- current_ev =
- iwe_stream_add_event(info, current_ev, end_buf, &iwe,
- IW_EV_FREQ_LEN);
-
- /* Add quality statistics */
- iwe.u.qual.level = bss->signal.data;
- iwe.u.qual.noise = bss->noise.data;
- /* do a simple SNR for quality */
- iwe.u.qual.qual = qual_as_percent(bss->signal.data - bss->noise.data);
- iwe.cmd = IWEVQUAL;
- current_ev =
- iwe_stream_add_event(info, current_ev, end_buf, &iwe,
- IW_EV_QUAL_LEN);
-
- return current_ev;
-}
-
-static int p80211wext_giwscan(netdevice_t *dev,
- struct iw_request_info *info,
- struct iw_point *srq, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- p80211msg_dot11req_scan_results_t msg;
- int result = 0;
- int err = 0;
- int i = 0;
- int scan_good = 0;
- char *current_ev = extra;
-
- /* Since wireless tools doesn't really have a way of passing how
- * many scan results results there were back here, keep grabbing them
- * until we fail.
- */
- do {
- memset(&msg, 0, sizeof(msg));
- msg.msgcode = DIDmsg_dot11req_scan_results;
- msg.bssindex.data = i;
-
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
- if ((result != 0) ||
- (msg.resultcode.data != P80211ENUM_resultcode_success)) {
- break;
- }
-
- current_ev =
- wext_translate_bss(info, current_ev,
- extra + IW_SCAN_MAX_DATA, &msg);
- scan_good = 1;
- i++;
- } while (i < IW_MAX_AP);
-
- srq->length = (current_ev - extra);
- srq->flags = 0; /* todo */
-
- if (result && !scan_good)
- err = prism2_result2err(msg.resultcode.data);
-
- return err;
-}
-
-/* extra wireless extensions stuff to support NetworkManager (I hope) */
-
-/* SIOCSIWENCODEEXT */
-static int p80211wext_set_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- p80211msg_dot11req_mibset_t msg;
- p80211item_pstr32_t *pstr;
-
- int result = 0;
- struct iw_point *encoding = &wrqu->encoding;
- int idx = encoding->flags & IW_ENCODE_INDEX;
-
- pr_debug("set_encode_ext flags[%d] alg[%d] keylen[%d]\n",
- ext->ext_flags, (int)ext->alg, (int)ext->key_len);
-
- if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
- /* set default key ? I'm not sure if this the the correct thing to do here */
-
- if (idx) {
- if (idx < 1 || idx > NUM_WEPKEYS)
- return -EINVAL;
- else
- idx--;
- }
- pr_debug("setting default key (%d)\n", idx);
- result =
- p80211wext_setmib(wlandev,
- DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
- idx);
- if (result)
- return -EFAULT;
- }
-
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- if (ext->alg != IW_ENCODE_ALG_WEP) {
- pr_debug("asked to set a non wep key :(\n");
- return -EINVAL;
- }
- if (idx) {
- if (idx < 1 || idx > NUM_WEPKEYS)
- return -EINVAL;
- else
- idx--;
- }
- pr_debug("Set WEP key (%d)\n", idx);
- wlandev->wep_keylens[idx] = ext->key_len;
- memcpy(wlandev->wep_keys[idx], ext->key, ext->key_len);
-
- memset(&msg, 0, sizeof(msg));
- pstr = (p80211item_pstr32_t *) &msg.mibattribute.data;
- memcpy(pstr->data.data, ext->key, ext->key_len);
- pstr->data.len = ext->key_len;
- switch (idx) {
- case 0:
- pstr->did =
- DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey0;
- break;
- case 1:
- pstr->did =
- DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey1;
- break;
- case 2:
- pstr->did =
- DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey2;
- break;
- case 3:
- pstr->did =
- DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey3;
- break;
- default:
- break;
- }
- msg.msgcode = DIDmsg_dot11req_mibset;
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
- pr_debug("result (%d)\n", result);
- }
- return result;
-}
-
-/* SIOCGIWENCODEEXT */
-static int p80211wext_get_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
-
- struct iw_point *encoding = &wrqu->encoding;
- int result = 0;
- int max_len;
- int idx;
-
- pr_debug("get_encode_ext flags[%d] alg[%d] keylen[%d]\n",
- ext->ext_flags, (int)ext->alg, (int)ext->key_len);
-
- max_len = encoding->length - sizeof(*ext);
- if (max_len <= 0) {
- pr_debug("get_encodeext max_len [%d] invalid\n", max_len);
- result = -EINVAL;
- goto exit;
- }
- idx = encoding->flags & IW_ENCODE_INDEX;
-
- pr_debug("get_encode_ext index [%d]\n", idx);
-
- if (idx) {
- if (idx < 1 || idx > NUM_WEPKEYS) {
- pr_debug("get_encode_ext invalid key index [%d]\n",
- idx);
- result = -EINVAL;
- goto exit;
- }
- idx--;
- } else {
- /* default key ? not sure what to do */
- /* will just use key[0] for now ! FIX ME */
- }
-
- encoding->flags = idx + 1;
- memset(ext, 0, sizeof(*ext));
-
- ext->alg = IW_ENCODE_ALG_WEP;
- ext->key_len = wlandev->wep_keylens[idx];
- memcpy(ext->key, wlandev->wep_keys[idx], ext->key_len);
-
- encoding->flags |= IW_ENCODE_ENABLED;
-exit:
- return result;
-}
-
-/* SIOCSIWAUTH */
-static int p80211_wext_set_iwauth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- struct iw_param *param = &wrqu->param;
- int result = 0;
-
- pr_debug("set_iwauth flags[%d]\n", (int)param->flags & IW_AUTH_INDEX);
-
- switch (param->flags & IW_AUTH_INDEX) {
- case IW_AUTH_DROP_UNENCRYPTED:
- pr_debug("drop_unencrypted %d\n", param->value);
- if (param->value)
- result =
- p80211wext_setmib(wlandev,
- DIDmib_dot11smt_dot11PrivacyTable_dot11ExcludeUnencrypted,
- P80211ENUM_truth_true);
- else
- result =
- p80211wext_setmib(wlandev,
- DIDmib_dot11smt_dot11PrivacyTable_dot11ExcludeUnencrypted,
- P80211ENUM_truth_false);
- break;
-
- case IW_AUTH_PRIVACY_INVOKED:
- pr_debug("privacy invoked %d\n", param->value);
- if (param->value)
- result =
- p80211wext_setmib(wlandev,
- DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
- P80211ENUM_truth_true);
- else
- result =
- p80211wext_setmib(wlandev,
- DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
- P80211ENUM_truth_false);
-
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
- pr_debug("set open_system\n");
- wlandev->hostwep &= ~HOSTWEP_SHAREDKEY;
- } else if (param->value & IW_AUTH_ALG_SHARED_KEY) {
- pr_debug("set shared key\n");
- wlandev->hostwep |= HOSTWEP_SHAREDKEY;
- } else {
- /* don't know what to do know */
- pr_debug("unknown AUTH_ALG (%d)\n", param->value);
- result = -EINVAL;
- }
- break;
-
- default:
- break;
- }
-
- return result;
-}
-
-/* SIOCSIWAUTH */
-static int p80211_wext_get_iwauth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- wlandevice_t *wlandev = dev->ml_priv;
- struct iw_param *param = &wrqu->param;
- int result = 0;
-
- pr_debug("get_iwauth flags[%d]\n", (int)param->flags & IW_AUTH_INDEX);
-
- switch (param->flags & IW_AUTH_INDEX) {
- case IW_AUTH_DROP_UNENCRYPTED:
- param->value =
- wlandev->hostwep & HOSTWEP_EXCLUDEUNENCRYPTED ? 1 : 0;
- break;
-
- case IW_AUTH_PRIVACY_INVOKED:
- param->value =
- wlandev->hostwep & HOSTWEP_PRIVACYINVOKED ? 1 : 0;
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- param->value =
- wlandev->hostwep & HOSTWEP_SHAREDKEY ?
- IW_AUTH_ALG_SHARED_KEY : IW_AUTH_ALG_OPEN_SYSTEM;
- break;
-
- default:
- break;
- }
-
- return result;
-}
-
-#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
-
-static iw_handler p80211wext_handlers[] = {
- IW_IOCTL(SIOCSIWCOMMIT) = (iw_handler) p80211wext_siwcommit,
- IW_IOCTL(SIOCGIWNAME) = (iw_handler) p80211wext_giwname,
-/* SIOCSIWNWID,SIOCGIWNWID */
- IW_IOCTL(SIOCSIWFREQ) = (iw_handler) p80211wext_siwfreq,
- IW_IOCTL(SIOCGIWFREQ) = (iw_handler) p80211wext_giwfreq,
- IW_IOCTL(SIOCSIWMODE) = (iw_handler) p80211wext_siwmode,
- IW_IOCTL(SIOCGIWMODE) = (iw_handler) p80211wext_giwmode,
-/* SIOCSIWSENS,SIOCGIWSENS,SIOCSIWRANGE */
- IW_IOCTL(SIOCGIWRANGE) = (iw_handler) p80211wext_giwrange,
-/* SIOCSIWPRIV,SIOCGIWPRIV,SIOCSIWSTATS,SIOCGIWSTATS */
- IW_IOCTL(SIOCSIWSPY) = (iw_handler) p80211wext_siwspy,
- IW_IOCTL(SIOCGIWSPY) = (iw_handler) p80211wext_giwspy,
-/* SIOCSIWAP */
- IW_IOCTL(SIOCGIWAP) = (iw_handler) p80211wext_giwap,
-/* SIOCGIWAPLIST */
- IW_IOCTL(SIOCSIWSCAN) = (iw_handler) p80211wext_siwscan,
- IW_IOCTL(SIOCGIWSCAN) = (iw_handler) p80211wext_giwscan,
- IW_IOCTL(SIOCSIWESSID) = (iw_handler) p80211wext_siwessid,
- IW_IOCTL(SIOCGIWESSID) = (iw_handler) p80211wext_giwessid,
-/* SIOCSIWNICKN */
- IW_IOCTL(SIOCGIWNICKN) = (iw_handler) p80211wext_giwessid,
-/* SIOCSIWRATE */
- IW_IOCTL(SIOCGIWRATE) = (iw_handler) p80211wext_giwrate,
- IW_IOCTL(SIOCSIWRTS) = (iw_handler) p80211wext_siwrts,
- IW_IOCTL(SIOCGIWRTS) = (iw_handler) p80211wext_giwrts,
- IW_IOCTL(SIOCSIWFRAG) = (iw_handler) p80211wext_siwfrag,
- IW_IOCTL(SIOCGIWFRAG) = (iw_handler) p80211wext_giwfrag,
- IW_IOCTL(SIOCSIWTXPOW) = (iw_handler) p80211wext_siwtxpow,
- IW_IOCTL(SIOCGIWTXPOW) = (iw_handler) p80211wext_giwtxpow,
- IW_IOCTL(SIOCSIWRETRY) = (iw_handler) p80211wext_siwretry,
- IW_IOCTL(SIOCGIWRETRY) = (iw_handler) p80211wext_giwretry,
- IW_IOCTL(SIOCSIWENCODE) = (iw_handler) p80211wext_siwencode,
- IW_IOCTL(SIOCGIWENCODE) = (iw_handler) p80211wext_giwencode,
-/* SIOCSIWPOWER,SIOCGIWPOWER */
-/* WPA operations */
-/* SIOCSIWGENIE,SIOCGIWGENIE generic IE */
- IW_IOCTL(SIOCSIWAUTH) = (iw_handler) p80211_wext_set_iwauth, /*set authentication mode params */
- IW_IOCTL(SIOCGIWAUTH) = (iw_handler) p80211_wext_get_iwauth, /*get authentication mode params */
- IW_IOCTL(SIOCSIWENCODEEXT) = (iw_handler) p80211wext_set_encodeext, /*set encoding token & mode */
- IW_IOCTL(SIOCGIWENCODEEXT) = (iw_handler) p80211wext_get_encodeext, /*get encoding token & mode */
-/* SIOCSIWPMKSA PMKSA cache operation */
-};
-
-struct iw_handler_def p80211wext_handler_def = {
- .num_standard = ARRAY_SIZE(p80211wext_handlers),
- .standard = p80211wext_handlers,
- .get_wireless_stats = p80211wext_get_wireless_stats
-};
-
-int p80211wext_event_associated(wlandevice_t *wlandev, int assoc)
-{
- union iwreq_data data;
-
- /* Send the association state first */
- data.ap_addr.sa_family = ARPHRD_ETHER;
- if (assoc)
- memcpy(data.ap_addr.sa_data, wlandev->bssid, ETH_ALEN);
- else
- memset(data.ap_addr.sa_data, 0, ETH_ALEN);
-
- if (wlan_wext_write)
- wireless_send_event(wlandev->netdev, SIOCGIWAP, &data, NULL);
-
- if (!assoc)
- goto done;
-
- /* XXX send association data, like IEs, etc etc. */
-
-done:
- return 0;
-}
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index d20c8797bcc..fd5ddb29436 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -73,26 +73,26 @@ MODULE_FIRMWARE(PRISM2_USB_FWFILE);
/*================================================================*/
/* Local Types */
-typedef struct s3datarec {
+struct s3datarec {
u32 len;
u32 addr;
u8 checksum;
u8 *data;
-} s3datarec_t;
+};
-typedef struct s3plugrec {
+struct s3plugrec {
u32 itemcode;
u32 addr;
u32 len;
-} s3plugrec_t;
+};
-typedef struct s3crcrec {
+struct s3crcrec {
u32 addr;
u32 len;
unsigned int dowrite;
-} s3crcrec_t;
+};
-typedef struct s3inforec {
+struct s3inforec {
u16 len;
u16 type;
union {
@@ -101,20 +101,20 @@ typedef struct s3inforec {
u16 buildseq;
hfa384x_compident_t platform;
} info;
-} s3inforec_t;
+};
-typedef struct pda {
+struct pda {
u8 buf[HFA384x_PDA_LEN_MAX];
hfa384x_pdrec_t *rec[HFA384x_PDA_RECS_MAX];
unsigned int nrec;
-} pda_t;
+};
-typedef struct imgchunk {
+struct imgchunk {
u32 addr; /* start address */
u32 len; /* in bytes */
u16 crc; /* CRC value (if it falls at a chunk boundary) */
u8 *data;
-} imgchunk_t;
+};
/*================================================================*/
/* Local Static Definitions */
@@ -124,26 +124,26 @@ typedef struct imgchunk {
/* Data records */
unsigned int ns3data;
-s3datarec_t s3data[S3DATA_MAX];
+struct s3datarec s3data[S3DATA_MAX];
/* Plug records */
unsigned int ns3plug;
-s3plugrec_t s3plug[S3PLUG_MAX];
+struct s3plugrec s3plug[S3PLUG_MAX];
/* CRC records */
unsigned int ns3crc;
-s3crcrec_t s3crc[S3CRC_MAX];
+struct s3crcrec s3crc[S3CRC_MAX];
/* Info records */
unsigned int ns3info;
-s3inforec_t s3info[S3INFO_MAX];
+struct s3inforec s3info[S3INFO_MAX];
/* S7 record (there _better_ be only one) */
u32 startaddr;
/* Load image chunks */
unsigned int nfchunks;
-imgchunk_t fchunk[CHUNKS_MAX];
+struct imgchunk fchunk[CHUNKS_MAX];
/* Note that for the following pdrec_t arrays, the len and code */
/* fields are stored in HOST byte order. The mkpdrlist() function */
@@ -151,7 +151,7 @@ imgchunk_t fchunk[CHUNKS_MAX];
/*----------------------------------------------------------------*/
/* PDA, built from [card|newfile]+[addfile1+addfile2...] */
-pda_t pda;
+struct pda pda;
hfa384x_compident_t nicid;
hfa384x_caplevel_t rfid;
hfa384x_caplevel_t macid;
@@ -165,21 +165,21 @@ wlandevice_t *wlandev);
static int read_fwfile(const struct ihex_binrec *rfptr);
-static int mkimage(imgchunk_t *clist, unsigned int *ccnt);
+static int mkimage(struct imgchunk *clist, unsigned int *ccnt);
-static int read_cardpda(pda_t *pda, wlandevice_t *wlandev);
+static int read_cardpda(struct pda *pda, wlandevice_t *wlandev);
-static int mkpdrlist(pda_t *pda);
+static int mkpdrlist(struct pda *pda);
-static int plugimage(imgchunk_t *fchunk, unsigned int nfchunks,
- s3plugrec_t *s3plug, unsigned int ns3plug, pda_t * pda);
+static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
+ struct s3plugrec *s3plug, unsigned int ns3plug, struct pda * pda);
-static int crcimage(imgchunk_t *fchunk, unsigned int nfchunks,
- s3crcrec_t *s3crc, unsigned int ns3crc);
+static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
+ struct s3crcrec *s3crc, unsigned int ns3crc);
-static int writeimage(wlandevice_t *wlandev, imgchunk_t *fchunk,
+static int writeimage(wlandevice_t *wlandev, struct imgchunk *fchunk,
unsigned int nfchunks);
-static void free_chunks(imgchunk_t *fchunk, unsigned int *nfchunks);
+static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks);
static void free_srecs(void);
@@ -239,7 +239,7 @@ int prism2_fwtry(struct usb_device *udev, wlandevice_t *wlandev)
int prism2_fwapply(const struct ihex_binrec *rfptr, wlandevice_t *wlandev)
{
signed int result = 0;
- p80211msg_dot11req_mibget_t getmsg;
+ struct p80211msg_dot11req_mibget getmsg;
p80211itemd_t *item;
u32 *data;
@@ -375,8 +375,8 @@ int prism2_fwapply(const struct ihex_binrec *rfptr, wlandevice_t *wlandev)
* 0 success
* ~0 failure
----------------------------------------------------------------*/
-int crcimage(imgchunk_t *fchunk, unsigned int nfchunks, s3crcrec_t *s3crc,
- unsigned int ns3crc)
+int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
+ struct s3crcrec *s3crc, unsigned int ns3crc)
{
int result = 0;
int i;
@@ -397,15 +397,14 @@ int crcimage(imgchunk_t *fchunk, unsigned int nfchunks, s3crcrec_t *s3crc,
for (c = 0; c < nfchunks; c++) {
cstart = fchunk[c].addr;
cend = fchunk[c].addr + fchunk[c].len;
- /* the line below does an address & len match search */
- /* unfortunately, I've found that the len fields of */
- /* some crc records don't match with the length of */
- /* the actual data, so we're not checking right */
- /* now */
- /* if ( crcstart-2 >= cstart && crcend <= cend ) break; */
+ /* the line below does an address & len match search */
+ /* unfortunately, I've found that the len fields of */
+ /* some crc records don't match with the length of */
+ /* the actual data, so we're not checking right now */
+ /* if (crcstart-2 >= cstart && crcend <= cend) break; */
/* note the -2 below, it's to make sure the chunk has */
- /* space for the CRC value */
+ /* space for the CRC value */
if (crcstart - 2 >= cstart && crcstart < cend)
break;
}
@@ -440,7 +439,7 @@ int crcimage(imgchunk_t *fchunk, unsigned int nfchunks, s3crcrec_t *s3crc,
* Returns:
* nothing
----------------------------------------------------------------*/
-void free_chunks(imgchunk_t *fchunk, unsigned int *nfchunks)
+void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks)
{
int i;
for (i = 0; i < *nfchunks; i++) {
@@ -490,7 +489,7 @@ void free_srecs(void)
* 0 - success
* ~0 - failure (probably an errno)
----------------------------------------------------------------*/
-int mkimage(imgchunk_t *clist, unsigned int *ccnt)
+int mkimage(struct imgchunk *clist, unsigned int *ccnt)
{
int result = 0;
int i;
@@ -583,7 +582,7 @@ int mkimage(imgchunk_t *clist, unsigned int *ccnt)
* 0 - success
* ~0 - failure (probably an errno)
----------------------------------------------------------------*/
-int mkpdrlist(pda_t *pda)
+int mkpdrlist(struct pda *pda)
{
int result = 0;
u16 *pda16 = (u16 *) pda->buf;
@@ -656,8 +655,8 @@ int mkpdrlist(pda_t *pda)
* 0 success
* ~0 failure
----------------------------------------------------------------*/
-int plugimage(imgchunk_t *fchunk, unsigned int nfchunks,
- s3plugrec_t *s3plug, unsigned int ns3plug, pda_t * pda)
+int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
+ struct s3plugrec *s3plug, unsigned int ns3plug, struct pda * pda)
{
int result = 0;
int i; /* plug index */
@@ -675,7 +674,7 @@ int plugimage(imgchunk_t *fchunk, unsigned int nfchunks,
pstart = s3plug[i].addr;
pend = s3plug[i].addr + s3plug[i].len;
/* find the matching PDR (or filename) */
- if (s3plug[i].itemcode != 0xffffffffUL) { /* not filename */
+ if (s3plug[i].itemcode != 0xffffffffUL) { /* not filename */
for (j = 0; j < pda->nrec; j++) {
if (s3plug[i].itemcode ==
le16_to_cpu(pda->rec[j]->code))
@@ -684,7 +683,7 @@ int plugimage(imgchunk_t *fchunk, unsigned int nfchunks,
} else {
j = -1;
}
- if (j >= pda->nrec && j != -1) { /* if no matching PDR, fail */
+ if (j >= pda->nrec && j != -1) { /* if no matching PDR, fail */
printk(KERN_WARNING
"warning: Failed to find PDR for "
"plugrec 0x%04x.\n", s3plug[i].itemcode);
@@ -764,10 +763,10 @@ int plugimage(imgchunk_t *fchunk, unsigned int nfchunks,
* 0 - success
* ~0 - failure (probably an errno)
----------------------------------------------------------------*/
-int read_cardpda(pda_t *pda, wlandevice_t *wlandev)
+int read_cardpda(struct pda *pda, wlandevice_t *wlandev)
{
int result = 0;
- p80211msg_p2req_readpda_t msg;
+ struct p80211msg_p2req_readpda msg;
/* set up the msg */
msg.msgcode = DIDmsg_p2req_readpda;
@@ -839,7 +838,7 @@ int read_cardpda(pda_t *pda, wlandevice_t *wlandev)
* ssssttttdd..dd
* s - Size in words (little endian)
* t - Info type (little endian), see #defines and
-* s3inforec_t for details about types.
+* struct s3inforec for details about types.
* d - (s - 1) little endian words giving the contents of
* the given info type.
*
@@ -978,13 +977,13 @@ int read_fwfile(const struct ihex_binrec *record)
* 0 success
* ~0 failure
----------------------------------------------------------------*/
-int writeimage(wlandevice_t *wlandev, imgchunk_t *fchunk,
+int writeimage(wlandevice_t *wlandev, struct imgchunk *fchunk,
unsigned int nfchunks)
{
int result = 0;
- p80211msg_p2req_ramdl_state_t rstatemsg;
- p80211msg_p2req_ramdl_write_t rwritemsg;
- p80211msg_t *msgp;
+ struct p80211msg_p2req_ramdl_state rstatemsg;
+ struct p80211msg_p2req_ramdl_write rwritemsg;
+ struct p80211msg *msgp;
u32 resultcode;
int i;
int j;
@@ -1030,7 +1029,7 @@ int writeimage(wlandevice_t *wlandev, imgchunk_t *fchunk,
rstatemsg.enable.data = P80211ENUM_truth_true;
rstatemsg.exeaddr.data = startaddr;
- msgp = (p80211msg_t *) &rstatemsg;
+ msgp = (struct p80211msg *) &rstatemsg;
result = prism2mgmt_ramdl_state(wlandev, msgp);
if (result) {
printk(KERN_ERR
@@ -1052,11 +1051,12 @@ int writeimage(wlandevice_t *wlandev, imgchunk_t *fchunk,
nwrites += (fchunk[i].len % WRITESIZE_MAX) ? 1 : 0;
curroff = 0;
for (j = 0; j < nwrites; j++) {
- currlen =
- (fchunk[i].len - (WRITESIZE_MAX * j)) >
- WRITESIZE_MAX ? WRITESIZE_MAX : (fchunk[i].len -
- (WRITESIZE_MAX *
- j));
+ /* TODO Move this to a separate function */
+ int lenleft = fchunk[i].len - (WRITESIZE_MAX * j);
+ if (fchunk[i].len > WRITESIZE_MAX)
+ currlen = WRITESIZE_MAX;
+ else
+ currlen = lenleft;
curroff = j * WRITESIZE_MAX;
currdaddr = fchunk[i].addr + curroff;
/* Setup the message */
@@ -1070,7 +1070,7 @@ int writeimage(wlandevice_t *wlandev, imgchunk_t *fchunk,
("Sending xxxdl_write message addr=%06x len=%d.\n",
currdaddr, currlen);
- msgp = (p80211msg_t *) &rwritemsg;
+ msgp = (struct p80211msg *) &rwritemsg;
result = prism2mgmt_ramdl_write(wlandev, msgp);
/* Check the results */
@@ -1097,7 +1097,7 @@ int writeimage(wlandevice_t *wlandev, imgchunk_t *fchunk,
rstatemsg.enable.data = P80211ENUM_truth_false;
rstatemsg.exeaddr.data = 0;
- msgp = (p80211msg_t *) &rstatemsg;
+ msgp = (struct p80211msg *) &rstatemsg;
result = prism2mgmt_ramdl_state(wlandev, msgp);
if (result) {
printk(KERN_ERR
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 4d1cdfc3542..04514a85d10 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -117,7 +117,7 @@ int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp)
{
int result = 0;
hfa384x_t *hw = wlandev->priv;
- p80211msg_dot11req_scan_t *msg = msgp;
+ struct p80211msg_dot11req_scan *msg = msgp;
u16 roamingmode, word;
int i, timeout;
int istmpenable = 0;
@@ -361,13 +361,13 @@ exit:
int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
{
int result = 0;
- p80211msg_dot11req_scan_results_t *req;
+ struct p80211msg_dot11req_scan_results *req;
hfa384x_t *hw = wlandev->priv;
hfa384x_HScanResultSub_t *item = NULL;
int count;
- req = (p80211msg_dot11req_scan_results_t *) msgp;
+ req = (struct p80211msg_dot11req_scan_results *) msgp;
req->resultcode.status = P80211ENUM_msgitem_status_data_ok;
@@ -463,6 +463,8 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
/* capinfo bits */
count = le16_to_cpu(item->capinfo);
+ req->capinfo.status = P80211ENUM_msgitem_status_data_ok;
+ req->capinfo.data = count;
/* privacy flag */
req->privacy.status = P80211ENUM_msgitem_status_data_ok;
@@ -511,7 +513,7 @@ int prism2mgmt_start(wlandevice_t *wlandev, void *msgp)
{
int result = 0;
hfa384x_t *hw = wlandev->priv;
- p80211msg_dot11req_start_t *msg = msgp;
+ struct p80211msg_dot11req_start *msg = msgp;
p80211pstrd_t *pstr;
u8 bytebuf[80];
@@ -687,7 +689,7 @@ done:
int prism2mgmt_readpda(wlandevice_t *wlandev, void *msgp)
{
hfa384x_t *hw = wlandev->priv;
- p80211msg_p2req_readpda_t *msg = msgp;
+ struct p80211msg_p2req_readpda *msg = msgp;
int result;
/* We only support collecting the PDA when in the FWLOAD
@@ -753,7 +755,7 @@ int prism2mgmt_readpda(wlandevice_t *wlandev, void *msgp)
int prism2mgmt_ramdl_state(wlandevice_t *wlandev, void *msgp)
{
hfa384x_t *hw = wlandev->priv;
- p80211msg_p2req_ramdl_state_t *msg = msgp;
+ struct p80211msg_p2req_ramdl_state *msg = msgp;
if (wlandev->msdstate != WLAN_MSD_FWLOAD) {
printk(KERN_ERR
@@ -809,7 +811,7 @@ int prism2mgmt_ramdl_state(wlandevice_t *wlandev, void *msgp)
int prism2mgmt_ramdl_write(wlandevice_t *wlandev, void *msgp)
{
hfa384x_t *hw = wlandev->priv;
- p80211msg_p2req_ramdl_write_t *msg = msgp;
+ struct p80211msg_p2req_ramdl_write *msg = msgp;
u32 addr;
u32 len;
u8 *buf;
@@ -872,7 +874,7 @@ int prism2mgmt_flashdl_state(wlandevice_t *wlandev, void *msgp)
{
int result = 0;
hfa384x_t *hw = wlandev->priv;
- p80211msg_p2req_flashdl_state_t *msg = msgp;
+ struct p80211msg_p2req_flashdl_state *msg = msgp;
if (wlandev->msdstate != WLAN_MSD_FWLOAD) {
printk(KERN_ERR
@@ -942,7 +944,7 @@ int prism2mgmt_flashdl_state(wlandevice_t *wlandev, void *msgp)
int prism2mgmt_flashdl_write(wlandevice_t *wlandev, void *msgp)
{
hfa384x_t *hw = wlandev->priv;
- p80211msg_p2req_flashdl_write_t *msg = msgp;
+ struct p80211msg_p2req_flashdl_write *msg = msgp;
u32 addr;
u32 len;
u8 *buf;
@@ -1006,7 +1008,7 @@ int prism2mgmt_autojoin(wlandevice_t *wlandev, void *msgp)
int result = 0;
u16 reg;
u16 port_type;
- p80211msg_lnxreq_autojoin_t *msg = msgp;
+ struct p80211msg_lnxreq_autojoin *msg = msgp;
p80211pstrd_t *pstr;
u8 bytebuf[256];
hfa384x_bytestr_t *p2bytestr = (hfa384x_bytestr_t *) bytebuf;
@@ -1074,7 +1076,7 @@ int prism2mgmt_autojoin(wlandevice_t *wlandev, void *msgp)
int prism2mgmt_wlansniff(wlandevice_t *wlandev, void *msgp)
{
int result = 0;
- p80211msg_lnxreq_wlansniff_t *msg = msgp;
+ struct p80211msg_lnxreq_wlansniff *msg = msgp;
hfa384x_t *hw = wlandev->priv;
u16 word;
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 0b0ec9c59a5..d3a06fa0b4f 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -79,7 +79,7 @@
#define F_READ 0x2 /* MIB may be read. */
#define F_WRITE 0x4 /* MIB may be written. */
-typedef struct mibrec {
+struct mibrec {
u32 did;
u16 flag;
u16 parm1;
@@ -89,63 +89,63 @@ typedef struct mibrec {
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg, void *data);
-} mibrec_t;
+ struct p80211msg_dot11req_mibset *msg, void *data);
+};
-static int prism2mib_bytearea2pstr(mibrec_t *mib,
+static int prism2mib_bytearea2pstr(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg,
+ struct p80211msg_dot11req_mibset *msg,
void *data);
-static int prism2mib_uint32(mibrec_t *mib,
+static int prism2mib_uint32(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg, void *data);
+ struct p80211msg_dot11req_mibset *msg, void *data);
-static int prism2mib_flag(mibrec_t *mib,
+static int prism2mib_flag(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg, void *data);
+ struct p80211msg_dot11req_mibset *msg, void *data);
-static int prism2mib_wepdefaultkey(mibrec_t *mib,
+static int prism2mib_wepdefaultkey(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg,
+ struct p80211msg_dot11req_mibset *msg,
void *data);
-static int prism2mib_privacyinvoked(mibrec_t *mib,
+static int prism2mib_privacyinvoked(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg,
+ struct p80211msg_dot11req_mibset *msg,
void *data);
-static int prism2mib_excludeunencrypted(mibrec_t *mib,
+static int prism2mib_excludeunencrypted(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg,
+ struct p80211msg_dot11req_mibset *msg,
void *data);
-static int prism2mib_fragmentationthreshold(mibrec_t *mib,
+static int prism2mib_fragmentationthreshold(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg,
+ struct p80211msg_dot11req_mibset *msg,
void *data);
-static int prism2mib_priv(mibrec_t *mib,
+static int prism2mib_priv(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg, void *data);
+ struct p80211msg_dot11req_mibset *msg, void *data);
-static mibrec_t mibtab[] = {
+static struct mibrec mibtab[] = {
/* dot11smt MIB's */
{DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey0,
@@ -261,11 +261,11 @@ int prism2mgmt_mibset_mibget(wlandevice_t *wlandev, void *msgp)
{
hfa384x_t *hw = wlandev->priv;
int result, isget;
- mibrec_t *mib;
+ struct mibrec *mib;
u16 which;
- p80211msg_dot11req_mibset_t *msg = msgp;
+ struct p80211msg_dot11req_mibset *msg = msgp;
p80211itemd_t *mibitem;
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
@@ -371,11 +371,11 @@ done:
*
----------------------------------------------------------------*/
-static int prism2mib_bytearea2pstr(mibrec_t *mib,
+static int prism2mib_bytearea2pstr(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg,
+ struct p80211msg_dot11req_mibset *msg,
void *data)
{
int result;
@@ -421,11 +421,11 @@ static int prism2mib_bytearea2pstr(mibrec_t *mib,
*
----------------------------------------------------------------*/
-static int prism2mib_uint32(mibrec_t *mib,
+static int prism2mib_uint32(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg, void *data)
+ struct p80211msg_dot11req_mibset *msg, void *data)
{
int result;
u32 *uint32 = (u32 *) data;
@@ -468,11 +468,11 @@ static int prism2mib_uint32(mibrec_t *mib,
*
----------------------------------------------------------------*/
-static int prism2mib_flag(mibrec_t *mib,
+static int prism2mib_flag(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg, void *data)
+ struct p80211msg_dot11req_mibset *msg, void *data)
{
int result;
u32 *uint32 = (u32 *) data;
@@ -525,11 +525,11 @@ static int prism2mib_flag(mibrec_t *mib,
*
----------------------------------------------------------------*/
-static int prism2mib_wepdefaultkey(mibrec_t *mib,
+static int prism2mib_wepdefaultkey(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg,
+ struct p80211msg_dot11req_mibset *msg,
void *data)
{
int result;
@@ -575,11 +575,11 @@ static int prism2mib_wepdefaultkey(mibrec_t *mib,
*
----------------------------------------------------------------*/
-static int prism2mib_privacyinvoked(mibrec_t *mib,
+static int prism2mib_privacyinvoked(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg,
+ struct p80211msg_dot11req_mibset *msg,
void *data)
{
int result;
@@ -621,11 +621,11 @@ static int prism2mib_privacyinvoked(mibrec_t *mib,
*
----------------------------------------------------------------*/
-static int prism2mib_excludeunencrypted(mibrec_t *mib,
+static int prism2mib_excludeunencrypted(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg,
+ struct p80211msg_dot11req_mibset *msg,
void *data)
{
int result;
@@ -660,11 +660,11 @@ static int prism2mib_excludeunencrypted(mibrec_t *mib,
*
----------------------------------------------------------------*/
-static int prism2mib_fragmentationthreshold(mibrec_t *mib,
+static int prism2mib_fragmentationthreshold(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg,
+ struct p80211msg_dot11req_mibset *msg,
void *data)
{
int result;
@@ -709,11 +709,11 @@ static int prism2mib_fragmentationthreshold(mibrec_t *mib,
*
----------------------------------------------------------------*/
-static int prism2mib_priv(mibrec_t *mib,
+static int prism2mib_priv(struct mibrec *mib,
int isget,
wlandevice_t *wlandev,
hfa384x_t *hw,
- p80211msg_dot11req_mibset_t *msg, void *data)
+ struct p80211msg_dot11req_mibset *msg, void *data)
{
p80211pstrd_t *pstr = (p80211pstrd_t *) data;
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 6cd09352f89..ed751f418db 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -83,8 +83,6 @@
#include "hfa384x.h"
#include "prism2mgmt.h"
-#define wlan_hexchar(x) (((x) < 0x0a) ? ('0' + (x)) : ('a' + ((x) - 0x0a)))
-
/* Create a string of printable chars from something that might not be */
/* It's recommended that the str be 4*len + 1 bytes long */
#define wlan_mkprintstr(buf, buflen, str, strlen) \
@@ -99,8 +97,8 @@
} else { \
(str)[j] = '\\'; \
(str)[j+1] = 'x'; \
- (str)[j+2] = wlan_hexchar(((buf)[i] & 0xf0) >> 4); \
- (str)[j+3] = wlan_hexchar(((buf)[i] & 0x0f)); \
+ (str)[j+2] = hex_asc_hi((buf)[i]); \
+ (str)[j+3] = hex_asc_lo((buf)[i]); \
j += 4; \
} \
} \
@@ -124,13 +122,17 @@ MODULE_PARM_DESC(prism2_reset_settletime, "reset settle time in ms");
MODULE_LICENSE("Dual MPL/GPL");
+void prism2_connect_result(wlandevice_t *wlandev, u8 failed);
+void prism2_disconnected(wlandevice_t *wlandev);
+void prism2_roamed(wlandevice_t *wlandev);
+
static int prism2sta_open(wlandevice_t *wlandev);
static int prism2sta_close(wlandevice_t *wlandev);
static void prism2sta_reset(wlandevice_t *wlandev);
static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb,
- p80211_hdr_t *p80211_hdr,
- p80211_metawep_t *p80211_wep);
-static int prism2sta_mlmerequest(wlandevice_t *wlandev, p80211msg_t *msg);
+ union p80211_hdr *p80211_hdr,
+ struct p80211_metawep *p80211_wep);
+static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg);
static int prism2sta_getcardinfo(wlandevice_t *wlandev);
static int prism2sta_globalsetup(wlandevice_t *wlandev);
static int prism2sta_setmulticast(wlandevice_t *wlandev, netdevice_t *dev);
@@ -266,8 +268,8 @@ static void prism2sta_reset(wlandevice_t *wlandev)
* process thread
----------------------------------------------------------------*/
static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb,
- p80211_hdr_t *p80211_hdr,
- p80211_metawep_t *p80211_wep)
+ union p80211_hdr *p80211_hdr,
+ struct p80211_metawep *p80211_wep)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
int result;
@@ -307,7 +309,7 @@ static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb,
* Call context:
* process thread
----------------------------------------------------------------*/
-static int prism2sta_mlmerequest(wlandevice_t *wlandev, p80211msg_t *msg)
+static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg)
{
hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
@@ -364,9 +366,9 @@ static int prism2sta_mlmerequest(wlandevice_t *wlandev, p80211msg_t *msg)
break; /* ignore me. */
case DIDmsg_lnxreq_ifstate:
{
- p80211msg_lnxreq_ifstate_t *ifstatemsg;
+ struct p80211msg_lnxreq_ifstate *ifstatemsg;
pr_debug("Received mlme ifstate request\n");
- ifstatemsg = (p80211msg_lnxreq_ifstate_t *) msg;
+ ifstatemsg = (struct p80211msg_lnxreq_ifstate *) msg;
result =
prism2sta_ifstate(wlandev,
ifstatemsg->ifstate.data);
@@ -385,11 +387,11 @@ static int prism2sta_mlmerequest(wlandevice_t *wlandev, p80211msg_t *msg)
result = prism2mgmt_autojoin(wlandev, msg);
break;
case DIDmsg_lnxreq_commsquality:{
- p80211msg_lnxreq_commsquality_t *qualmsg;
+ struct p80211msg_lnxreq_commsquality *qualmsg;
pr_debug("Received commsquality request\n");
- qualmsg = (p80211msg_lnxreq_commsquality_t *) msg;
+ qualmsg = (struct p80211msg_lnxreq_commsquality *) msg;
qualmsg->link.status =
P80211ENUM_msgitem_status_data_ok;
@@ -401,6 +403,7 @@ static int prism2sta_mlmerequest(wlandevice_t *wlandev, p80211msg_t *msg)
qualmsg->link.data = le16_to_cpu(hw->qual.CQ_currBSS);
qualmsg->level.data = le16_to_cpu(hw->qual.ASL_currBSS);
qualmsg->noise.data = le16_to_cpu(hw->qual.ANL_currFC);
+ qualmsg->txrate.data = hw->txrate;
break;
}
@@ -1300,6 +1303,9 @@ void prism2sta_processing_defer(struct work_struct *data)
(portstatus == HFA384x_PSTATUS_CONN_IBSS) ?
WLAN_MACMODE_IBSS_STA : WLAN_MACMODE_ESS_STA;
+ /* signal back up to cfg80211 layer */
+ prism2_connect_result(wlandev, P80211ENUM_truth_false);
+
/* Get the ball rolling on the comms quality stuff */
prism2sta_commsqual_defer(&hw->commsqual_bh);
}
@@ -1315,25 +1321,16 @@ void prism2sta_processing_defer(struct work_struct *data)
* Indicate Deauthentication
* Block Transmits, Ignore receives of data frames
*/
- if (hw->join_ap == 2) {
- hfa384x_JoinRequest_data_t joinreq;
- joinreq = hw->joinreq;
- /* Send the join request */
- hfa384x_drvr_setconfig(hw,
- HFA384x_RID_JOINREQUEST,
- &joinreq,
- HFA384x_RID_JOINREQUEST_LEN);
+ if (wlandev->netdev->type == ARPHRD_ETHER)
printk(KERN_INFO
- "linkstatus=DISCONNECTED (re-submitting join)\n");
- } else {
- if (wlandev->netdev->type == ARPHRD_ETHER)
- printk(KERN_INFO
- "linkstatus=DISCONNECTED (unhandled)\n");
- }
+ "linkstatus=DISCONNECTED (unhandled)\n");
wlandev->macmode = WLAN_MACMODE_NONE;
netif_carrier_off(wlandev->netdev);
+ /* signal back up to cfg80211 layer */
+ prism2_disconnected(wlandev);
+
break;
case HFA384x_LINK_AP_CHANGE:
@@ -1376,6 +1373,9 @@ void prism2sta_processing_defer(struct work_struct *data)
hw->link_status = HFA384x_LINK_CONNECTED;
netif_carrier_on(wlandev->netdev);
+ /* signal back up to cfg80211 layer */
+ prism2_roamed(wlandev);
+
break;
case HFA384x_LINK_AP_OUTOFRANGE:
@@ -1435,6 +1435,9 @@ void prism2sta_processing_defer(struct work_struct *data)
netif_carrier_off(wlandev->netdev);
+ /* signal back up to cfg80211 layer */
+ prism2_connect_result(wlandev, P80211ENUM_truth_true);
+
break;
default:
@@ -1446,7 +1449,6 @@ void prism2sta_processing_defer(struct work_struct *data)
}
wlandev->linkstatus = (hw->link_status == HFA384x_LINK_CONNECTED);
- p80211wext_event_associated(wlandev, wlandev->linkstatus);
failed:
return;
@@ -1985,6 +1987,9 @@ void prism2sta_commsqual_defer(struct work_struct *data)
hfa384x_t *hw = container_of(data, struct hfa384x, commsqual_bh);
wlandevice_t *wlandev = hw->wlandev;
hfa384x_bytestr32_t ssid;
+ struct p80211msg_dot11req_mibget msg;
+ p80211item_uint32_t *mibitem = (p80211item_uint32_t *)
+ &msg.mibattribute.data;
int result = 0;
if (hw->wlandev->hwremoved)
@@ -2013,6 +2018,34 @@ void prism2sta_commsqual_defer(struct work_struct *data)
le16_to_cpu(hw->qual.ANL_currFC));
}
+ /* Get the signal rate */
+ msg.msgcode = DIDmsg_dot11req_mibget;
+ mibitem->did = DIDmib_p2_p2MAC_p2CurrentTxRate;
+ result = p80211req_dorequest(wlandev, (u8 *) &msg);
+
+ if (result) {
+ pr_debug("get signal rate failed, result = %d\n",
+ result);
+ goto done;
+ }
+
+ switch (mibitem->data) {
+ case HFA384x_RATEBIT_1:
+ hw->txrate = 10;
+ break;
+ case HFA384x_RATEBIT_2:
+ hw->txrate = 20;
+ break;
+ case HFA384x_RATEBIT_5dot5:
+ hw->txrate = 55;
+ break;
+ case HFA384x_RATEBIT_11:
+ hw->txrate = 110;
+ break;
+ default:
+ pr_debug("Bad ratebit (%d)\n", mibitem->data);
+ }
+
/* Lastly, we need to make sure the BSSID didn't change on us */
result = hfa384x_drvr_getconfig(hw,
HFA384x_RID_CURRENTBSSID,
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index f5cff751db2..4efa027a81e 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -119,7 +119,7 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
}
hw = wlandev->priv;
- if (wlan_setup(wlandev) != 0) {
+ if (wlan_setup(wlandev, &(interface->dev)) != 0) {
printk(KERN_ERR "%s: wlan_setup() failed.\n", dev_info);
result = -EIO;
goto failed;
diff --git a/drivers/staging/xgifb/XGI.h b/drivers/staging/xgifb/XGI.h
deleted file mode 100644
index 87803dd032d..00000000000
--- a/drivers/staging/xgifb/XGI.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _XGI_H
-#define _XGI_H
-
-#if 1
-#define TWDEBUG(x)
-#else
-#define TWDEBUG(x) printk(KERN_INFO x "\n");
-#endif
-
-#endif
diff --git a/drivers/staging/xgifb/XGI_accel.c b/drivers/staging/xgifb/XGI_accel.c
index 86ec3421942..79549742cff 100644
--- a/drivers/staging/xgifb/XGI_accel.c
+++ b/drivers/staging/xgifb/XGI_accel.c
@@ -37,37 +37,17 @@
#include <linux/agp_backend.h>
#include <linux/types.h>
-/*
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-#include <linux/XGIfb.h>
-#else
-#include <video/XGIfb.h>
-#endif
-*/
#include <asm/io.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-#include <video/fbcon.h>
-#include <video/fbcon-cfb8.h>
-#include <video/fbcon-cfb16.h>
-#include <video/fbcon-cfb24.h>
-#include <video/fbcon-cfb32.h>
-#endif
-
-#include "osdef.h"
#include "vgatypes.h"
#include "vb_struct.h"
#include "XGIfb.h"
#include "XGI_accel.h"
-
-extern struct video_info xgi_video_info;
-extern int XGIfb_accel;
-
static const int XGIALUConv[] =
{
0x00, /* dest = 0; 0, GXclear, 0 */
@@ -108,109 +88,17 @@ static const int XGIPatALUConv[] =
0xFF, /* dest = 0xFF; 1, GXset, 0xF */
};
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,34)
static const unsigned char myrops[] = {
3, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
};
-#endif
/* 300 series */
-#if 0
-static void
-XGI300Sync(void)
-{
- XGI300Idle
-}
-#endif
static void
XGI310Sync(void)
{
XGI310Idle
}
-#if 0
-static void
-XGI300SetupForScreenToScreenCopy(int xdir, int ydir, int rop,
- unsigned int planemask, int trans_color)
-{
- XGI300SetupDSTColorDepth(xgi_video_info.DstColor);
- XGI300SetupSRCPitch(xgi_video_info.video_linelength)
- XGI300SetupDSTRect(xgi_video_info.video_linelength, 0xFFF)
-
- if(trans_color != -1) {
- XGI300SetupROP(0x0A)
- XGI300SetupSRCTrans(trans_color)
- XGI300SetupCMDFlag(TRANSPARENT_BITBLT)
- } else {
- XGI300SetupROP(XGIALUConv[rop])
- }
- if(xdir > 0) {
- XGI300SetupCMDFlag(X_INC)
- }
- if(ydir > 0) {
- XGI300SetupCMDFlag(Y_INC)
- }
-}
-
-static void
-XGI300SubsequentScreenToScreenCopy(int src_x, int src_y, int dst_x, int dst_y,
- int width, int height)
-{
- long srcbase, dstbase;
-
- srcbase = dstbase = 0;
- if (src_y >= 2048) {
- srcbase = xgi_video_info.video_linelength * src_y;
- src_y = 0;
- }
- if (dst_y >= 2048) {
- dstbase = xgi_video_info.video_linelength * dst_y;
- dst_y = 0;
- }
-
- XGI300SetupSRCBase(srcbase);
- XGI300SetupDSTBase(dstbase);
-
- if(!(xgi_video_info.CommandReg & X_INC)) {
- src_x += width-1;
- dst_x += width-1;
- }
- if(!(xgi_video_info.CommandReg & Y_INC)) {
- src_y += height-1;
- dst_y += height-1;
- }
- XGI300SetupRect(width, height)
- XGI300SetupSRCXY(src_x, src_y)
- XGI300SetupDSTXY(dst_x, dst_y)
- XGI300DoCMD
-}
-
-static void
-XGI300SetupForSolidFill(int color, int rop, unsigned int planemask)
-{
- XGI300SetupPATFG(color)
- XGI300SetupDSTRect(xgi_video_info.video_linelength, 0xFFF)
- XGI300SetupDSTColorDepth(xgi_video_info.DstColor);
- XGI300SetupROP(XGIPatALUConv[rop])
- XGI300SetupCMDFlag(PATFG)
-}
-static void
-XGI300SubsequentSolidFillRect(int x, int y, int w, int h)
-{
- long dstbase;
-
- dstbase = 0;
- if(y >= 2048) {
- dstbase = xgi_video_info.video_linelength * y;
- y = 0;
- }
- XGI300SetupDSTBase(dstbase)
- XGI300SetupDSTXY(x,y)
- XGI300SetupRect(w,h)
- XGI300SetupCMDFlag(X_INC | Y_INC | BITBLT)
- XGI300DoCMD
-}
-#endif
/* 310/325 series ------------------------------------------------ */
static void
@@ -326,8 +214,6 @@ void XGIfb_syncaccel(void)
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,34) /* --- KERNEL 2.5.34 and later --- */
-
int fbcon_XGI_sync(struct fb_info *info)
{
if(!XGIfb_accel) return 0;
@@ -399,198 +285,5 @@ void fbcon_XGI_copyarea(struct fb_info *info, const struct fb_copyarea *area)
}
-#endif
-
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,33) /* ------ KERNEL <2.5.34 ------ */
-
-void fbcon_XGI_bmove(struct display *p, int srcy, int srcx,
- int dsty, int dstx, int height, int width)
-{
- int xdir, ydir;
- CRITFLAGS
-
- if(!xgi_video_info.accel) {
- switch(xgi_video_info.video_bpp) {
- case 8:
-#ifdef FBCON_HAS_CFB8
- fbcon_cfb8_bmove(p, srcy, srcx, dsty, dstx, height, width);
-#endif
- break;
- case 16:
-#ifdef FBCON_HAS_CFB16
- fbcon_cfb16_bmove(p, srcy, srcx, dsty, dstx, height, width);
-#endif
- break;
- case 32:
-#ifdef FBCON_HAS_CFB32
- fbcon_cfb32_bmove(p, srcy, srcx, dsty, dstx, height, width);
-#endif
- break;
- }
- return;
- }
-
- srcx *= fontwidth(p);
- srcy *= fontheight(p);
- dstx *= fontwidth(p);
- dsty *= fontheight(p);
- width *= fontwidth(p);
- height *= fontheight(p);
-
- if(srcx < dstx) xdir = 0;
- else xdir = 1;
- if(srcy < dsty) ydir = 0;
- else ydir = 1;
-
-
- CRITBEGIN
- XGI310SetupForScreenToScreenCopy(xdir, ydir, 3, 0, -1);
- XGI310SubsequentScreenToScreenCopy(srcx, srcy, dstx, dsty, width, height);
- CRITEND
- XGI310Sync();
-#if 0
- printk(KERN_INFO "XGI_bmove sx %d sy %d dx %d dy %d w %d h %d\n",
- srcx, srcy, dstx, dsty, width, height);
-#endif
-
-}
-
-
-static void fbcon_XGI_clear(struct vc_data *conp, struct display *p,
- int srcy, int srcx, int height, int width, int color)
-{
- CRITFLAGS
-
- srcx *= fontwidth(p);
- srcy *= fontheight(p);
- width *= fontwidth(p);
- height *= fontheight(p);
-
-
- CRITBEGIN
- XGI310SetupForSolidFill(color, 3, 0);
- XGI310SubsequentSolidFillRect(srcx, srcy, width, height);
- CRITEND
- XGI310Sync();
-
-}
-
-void fbcon_XGI_clear8(struct vc_data *conp, struct display *p,
- int srcy, int srcx, int height, int width)
-{
- u32 bgx;
-
- if(!xgi_video_info.accel) {
-#ifdef FBCON_HAS_CFB8
- fbcon_cfb8_clear(conp, p, srcy, srcx, height, width);
-#endif
- return;
- }
-
- bgx = attr_bgcol_ec(p, conp);
- fbcon_XGI_clear(conp, p, srcy, srcx, height, width, bgx);
-}
-
-void fbcon_XGI_clear16(struct vc_data *conp, struct display *p,
- int srcy, int srcx, int height, int width)
-{
- u32 bgx;
- if(!xgi_video_info.accel) {
-#ifdef FBCON_HAS_CFB16
- fbcon_cfb16_clear(conp, p, srcy, srcx, height, width);
-#endif
- return;
- }
-
- bgx = ((u_int16_t*)p->dispsw_data)[attr_bgcol_ec(p, conp)];
- fbcon_XGI_clear(conp, p, srcy, srcx, height, width, bgx);
-}
-
-void fbcon_XGI_clear32(struct vc_data *conp, struct display *p,
- int srcy, int srcx, int height, int width)
-{
- u32 bgx;
-
- if(!xgi_video_info.accel) {
-#ifdef FBCON_HAS_CFB32
- fbcon_cfb32_clear(conp, p, srcy, srcx, height, width);
-#endif
- return;
- }
-
- bgx = ((u_int32_t*)p->dispsw_data)[attr_bgcol_ec(p, conp)];
- fbcon_XGI_clear(conp, p, srcy, srcx, height, width, bgx);
-}
-
-void fbcon_XGI_revc(struct display *p, int srcx, int srcy)
-{
- CRITFLAGS
-
- if(!xgi_video_info.accel) {
- switch(xgi_video_info.video_bpp) {
- case 16:
-#ifdef FBCON_HAS_CFB16
- fbcon_cfb16_revc(p, srcx, srcy);
-#endif
- break;
- case 32:
-#ifdef FBCON_HAS_CFB32
- fbcon_cfb32_revc(p, srcx, srcy);
-#endif
- break;
- }
- return;
- }
-
- srcx *= fontwidth(p);
- srcy *= fontheight(p);
-
-
- CRITBEGIN
- XGI310SetupForSolidFill(0, 0x0a, 0);
- XGI310SubsequentSolidFillRect(srcx, srcy, fontwidth(p), fontheight(p));
- CRITEND
- XGI310Sync();
-
-}
-
-#ifdef FBCON_HAS_CFB8
-struct display_switch fbcon_XGI8 = {
- setup: fbcon_cfb8_setup,
- bmove: fbcon_XGI_bmove,
- clear: fbcon_XGI_clear8,
- putc: fbcon_cfb8_putc,
- putcs: fbcon_cfb8_putcs,
- revc: fbcon_cfb8_revc,
- clear_margins: fbcon_cfb8_clear_margins,
- fontwidthmask: FONTWIDTH(4)|FONTWIDTH(8)|FONTWIDTH(12)|FONTWIDTH(16)
-};
-#endif
-#ifdef FBCON_HAS_CFB16
-struct display_switch fbcon_XGI16 = {
- setup: fbcon_cfb16_setup,
- bmove: fbcon_XGI_bmove,
- clear: fbcon_XGI_clear16,
- putc: fbcon_cfb16_putc,
- putcs: fbcon_cfb16_putcs,
- revc: fbcon_XGI_revc,
- clear_margins: fbcon_cfb16_clear_margins,
- fontwidthmask: FONTWIDTH(4)|FONTWIDTH(8)|FONTWIDTH(12)|FONTWIDTH(16)
-};
-#endif
-#ifdef FBCON_HAS_CFB32
-struct display_switch fbcon_XGI32 = {
- setup: fbcon_cfb32_setup,
- bmove: fbcon_XGI_bmove,
- clear: fbcon_XGI_clear32,
- putc: fbcon_cfb32_putc,
- putcs: fbcon_cfb32_putcs,
- revc: fbcon_XGI_revc,
- clear_margins: fbcon_cfb32_clear_margins,
- fontwidthmask: FONTWIDTH(4)|FONTWIDTH(8)|FONTWIDTH(12)|FONTWIDTH(16)
-};
-#endif
-
-#endif /* KERNEL VERSION */
diff --git a/drivers/staging/xgifb/XGI_accel.h b/drivers/staging/xgifb/XGI_accel.h
index 04e126772bb..28c057994b3 100644
--- a/drivers/staging/xgifb/XGI_accel.h
+++ b/drivers/staging/xgifb/XGI_accel.h
@@ -491,21 +491,9 @@ void XGIfb_syncaccel(void);
extern struct video_info xgi_video_info;
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,33)
-void fbcon_XGI_bmove(struct display *p, int srcy, int srcx, int dsty,
- int dstx, int height, int width);
-void fbcon_XGI_revc(struct display *p, int srcy, int srcx);
-void fbcon_XGI_clear8(struct vc_data *conp, struct display *p, int srcy,
- int srcx, int height, int width);
-void fbcon_XGI_clear16(struct vc_data *conp, struct display *p, int srcy,
- int srcx, int height, int width);
-void fbcon_XGI_clear32(struct vc_data *conp, struct display *p, int srcy,
- int srcx, int height, int width);
-#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,34)
extern int XGIfb_accel;
void fbcon_XGI_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
void fbcon_XGI_copyarea(struct fb_info *info, const struct fb_copyarea *area);
-#endif
+
#endif
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index 4f4171e8a68..fd1152eb2c9 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -42,17 +42,10 @@
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,8)
#define XGI_IOTYPE1 void __iomem
#define XGI_IOTYPE2 __iomem
#define XGIINITSTATIC static
-#else
-#define XGI_IOTYPE1 unsigned char
-#define XGI_IOTYPE2
-#define XGIINITSTATIC
-#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
static struct pci_device_id __devinitdata xgifb_pci_table[] = {
{ PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -63,7 +56,7 @@ static struct pci_device_id __devinitdata xgifb_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
-#endif
+
/* To be included in fb.h */
#ifndef FB_ACCEL_XGI_GLAMOUR_2
#define FB_ACCEL_XGI_GLAMOUR_2 40 /* XGI 315, 650, 740 */
@@ -255,9 +248,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
#define BRI_DRAM_SIZE_32MB 0x04
#define BRI_DRAM_SIZE_64MB 0x05
-#define HW_DEVICE_EXTENSION XGI_HW_DEVICE_INFO
-#define PHW_DEVICE_EXTENSION PXGI_HW_DEVICE_INFO
-
#define SR_BUFFER_SIZE 5
#define CR_BUFFER_SIZE 5
@@ -300,11 +290,7 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
/* ------------------- Global Variables ----------------------------- */
/* Fbcon variables */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
static struct fb_info* fb_info;
-#else
-static struct fb_info XGI_fb_info;
-#endif
static int video_type = FB_TYPE_PACKED_PIXELS;
@@ -336,12 +322,8 @@ static struct fb_var_screeninfo default_var = {
.vsync_len = 0,
.sync = 0,
.vmode = FB_VMODE_NONINTERLACED,
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- .reserved = {0, 0, 0, 0, 0, 0}
-#endif
};
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
static struct fb_fix_screeninfo XGIfb_fix = {
.id = "XGI",
.type = FB_TYPE_PACKED_PIXELS,
@@ -350,28 +332,7 @@ static struct fb_fix_screeninfo XGIfb_fix = {
};
static char myid[20];
static u32 pseudo_palette[17];
-#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-static struct display XGI_disp;
-
-static struct display_switch XGIfb_sw;
-
-static struct {
- u16 blue, green, red, pad;
-} XGI_palette[256];
-
-static union {
-#ifdef FBCON_HAS_CFB16
- u16 cfb16[16];
-#endif
-#ifdef FBCON_HAS_CFB32
- u32 cfb32[16];
-#endif
-} XGI_fbcon_cmap;
-
-static int XGIfb_inverse = 0;
-#endif
/* display status */
static int XGIfb_off = 0;
@@ -380,9 +341,6 @@ static int XGIfb_forcecrt1 = -1;
static int XGIvga_enabled = 0;
static int XGIfb_userom = 0;
//static int XGIfb_useoem = -1;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-static int currcon = 0;
-#endif
/* global flags */
static int XGIfb_registered;
@@ -415,10 +373,10 @@ unsigned char XGIfb_detectedlcda = 0xff;
/* XGIfb_info XGIfbinfo; */
/* TW: Hardware extension; contains data on hardware */
-HW_DEVICE_EXTENSION XGIhw_ext;
+struct xgi_hw_device_info XGIhw_ext;
/* TW: XGI private structure */
-VB_DEVICE_INFO XGI_Pr;
+struct vb_device_info XGI_Pr;
/* card parameters */
static unsigned long XGIfb_mmio_size = 0;
@@ -530,29 +488,21 @@ struct _XGIbios_mode {
/* mode-related variables */
#ifdef MODULE
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
static int xgifb_mode_idx = 1;
#else
-static int XGIfb_mode_idx = MODE_INDEX_NONE; /* Don't use a mode by default if we are a module */
-#endif
-#else
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
static int xgifb_mode_idx = -1; /* Use a default mode if we are inside the kernel */
-#else
-static int XGIfb_mode_idx = -1;
-#endif
#endif
u8 XGIfb_mode_no = 0;
u8 XGIfb_rate_idx = 0;
/* TW: CR36 evaluation */
-const USHORT XGI300paneltype[] =
+const unsigned short XGI300paneltype[] =
{ LCD_UNKNOWN, LCD_800x600, LCD_1024x768, LCD_1280x1024,
LCD_1280x960, LCD_640x480, LCD_1024x600, LCD_1152x768,
LCD_1024x768, LCD_1024x768, LCD_1024x768,
LCD_1024x768, LCD_1024x768, LCD_1024x768, LCD_1024x768 };
-const USHORT XGI310paneltype[] =
+const unsigned short XGI310paneltype[] =
{ LCD_UNKNOWN, LCD_800x600, LCD_1024x768, LCD_1280x1024,
LCD_640x480, LCD_1024x600, LCD_1152x864, LCD_1280x960,
LCD_1152x768, LCD_1400x1050,LCD_1280x768, LCD_1600x1200,
@@ -648,17 +598,6 @@ static const struct _chswtable {
{ 0, 0, "" , "" }
};
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-/* Offscreen layout */
-typedef struct _XGI_GLYINFO {
- unsigned char ch;
- int fontwidth;
- int fontheight;
- u8 gmask[72];
- int ngmask;
-} XGI_GLYINFO;
-#endif
-
typedef struct _XGI_OH {
struct _XGI_OH *poh_next;
struct _XGI_OH *poh_prev;
@@ -852,50 +791,6 @@ XGIINITSTATIC int __init XGIfb_setup(char *options);
/* fbdev routines */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- int XGIfb_init(void);
-static int XGIfb_get_fix(struct fb_fix_screeninfo *fix,
- int con,
- struct fb_info *info);
-static int XGIfb_get_var(struct fb_var_screeninfo *var,
- int con,
- struct fb_info *info);
-static int XGIfb_set_var(struct fb_var_screeninfo *var,
- int con,
- struct fb_info *info);
-static void XGIfb_crtc_to_var(struct fb_var_screeninfo *var);
-static int XGIfb_get_cmap(struct fb_cmap *cmap,
- int kspc,
- int con,
- struct fb_info *info);
-static int XGIfb_set_cmap(struct fb_cmap *cmap,
- int kspc,
- int con,
- struct fb_info *info);
-static int XGIfb_update_var(int con,
- struct fb_info *info);
-static int XGIfb_switch(int con,
- struct fb_info *info);
-static void XGIfb_blank(int blank,
- struct fb_info *info);
-static void XGIfb_set_disp(int con,
- struct fb_var_screeninfo *var,
- struct fb_info *info);
-static int XGI_getcolreg(unsigned regno, unsigned *red, unsigned *green,
- unsigned *blue, unsigned *transp,
- struct fb_info *fb_info);
-static void XGIfb_do_install_cmap(int con,
- struct fb_info *info);
-static void XGI_get_glyph(struct fb_info *info,
- XGI_GLYINFO *gly);
-static int XGIfb_mmap(struct fb_info *info, struct file *file,
- struct vm_area_struct *vma);
-static int XGIfb_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg, int con,
- struct fb_info *info);
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
XGIINITSTATIC int __init xgifb_init(void);
static int XGIfb_set_par(struct fb_info *info);
static int XGIfb_blank(int blank,
@@ -907,36 +802,25 @@ extern void fbcon_XGI_fillrect(struct fb_info *info,
const struct fb_fillrect *rect);
extern void fbcon_XGI_copyarea(struct fb_info *info,
const struct fb_copyarea *area);
-#if 0
-extern void cfb_imageblit(struct fb_info *info,
- const struct fb_image *image);
-#endif
extern int fbcon_XGI_sync(struct fb_info *info);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
static int XGIfb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg);
-#else
-static int XGIfb_ioctl(struct inode *inode,
- struct file *file,
- unsigned int cmd,
- unsigned long arg,
- struct fb_info *info);
-#endif
/*
extern int XGIfb_mode_rate_to_dclock(VB_DEVICE_INFO *XGI_Pr,
- PXGI_HW_DEVICE_INFO HwDeviceExtension,
+ struct xgi_hw_device_info *HwDeviceExtension,
unsigned char modeno, unsigned char rateindex);
-extern int XGIfb_mode_rate_to_ddata(VB_DEVICE_INFO *XGI_Pr, PXGI_HW_DEVICE_INFO HwDeviceExtension,
+extern int XGIfb_mode_rate_to_ddata(VB_DEVICE_INFO *XGI_Pr, struct xgi_hw_device_info *HwDeviceExtension,
unsigned char modeno, unsigned char rateindex,
unsigned int *left_margin, unsigned int *right_margin,
unsigned int *upper_margin, unsigned int *lower_margin,
unsigned int *hsync_len, unsigned int *vsync_len,
unsigned int *sync, unsigned int *vmode);
*/
-#endif
- extern BOOLEAN XGI_SearchModeID( USHORT ModeNo,USHORT *ModeIdIndex, PVB_DEVICE_INFO );
+extern unsigned char XGI_SearchModeID(unsigned short ModeNo,
+ unsigned short *ModeIdIndex,
+ struct vb_device_info *);
static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con,
struct fb_info *info);
@@ -956,10 +840,10 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
static void XGIfb_pre_setmode(void);
static void XGIfb_post_setmode(void);
-static BOOLEAN XGIfb_CheckVBRetrace(void);
-static BOOLEAN XGIfbcheckvretracecrt2(void);
-static BOOLEAN XGIfbcheckvretracecrt1(void);
-static BOOLEAN XGIfb_bridgeisslave(void);
+static unsigned char XGIfb_CheckVBRetrace(void);
+static unsigned char XGIfbcheckvretracecrt2(void);
+static unsigned char XGIfbcheckvretracecrt1(void);
+static unsigned char XGIfb_bridgeisslave(void);
struct XGI_memreq {
unsigned long offset;
@@ -994,30 +878,40 @@ static XGI_OH *XGIfb_poh_free(unsigned long base);
static void XGIfb_free_node(XGI_OH *poh);
/* Internal routines to access PCI configuration space */
-BOOLEAN XGIfb_query_VGA_config_space(PXGI_HW_DEVICE_INFO pXGIhw_ext,
- unsigned long offset, unsigned long set, unsigned long *value);
+unsigned char XGIfb_query_VGA_config_space(struct xgi_hw_device_info *pXGIhw_ext,
+ unsigned long offset,
+ unsigned long set,
+ unsigned long *value);
//BOOLEAN XGIfb_query_north_bridge_space(PXGI_HW_DEVICE_INFO pXGIhw_ext,
// unsigned long offset, unsigned long set, unsigned long *value);
/* Routines from init.c/init301.c */
-extern void InitTo330Pointer(UCHAR,PVB_DEVICE_INFO pVBInfo);
-extern BOOLEAN XGIInitNew(PXGI_HW_DEVICE_INFO HwDeviceExtension);
-extern BOOLEAN XGISetModeNew(PXGI_HW_DEVICE_INFO HwDeviceExtension, USHORT ModeNo);
+extern void InitTo330Pointer(unsigned char, struct vb_device_info *pVBInfo);
+extern unsigned char XGIInitNew(struct xgi_hw_device_info *HwDeviceExtension);
+extern unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short ModeNo);
//extern void XGI_SetEnableDstn(VB_DEVICE_INFO *XGI_Pr);
-extern void XGI_LongWait(VB_DEVICE_INFO *XGI_Pr);
-extern USHORT XGI_GetRatePtrCRT2( PXGI_HW_DEVICE_INFO pXGIHWDE, USHORT ModeNo,USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo );
+extern void XGI_LongWait(struct vb_device_info *XGI_Pr);
+extern unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo);
/* TW: Chrontel TV functions */
-extern USHORT XGI_GetCH700x(VB_DEVICE_INFO *XGI_Pr, USHORT tempbx);
-extern void XGI_SetCH700x(VB_DEVICE_INFO *XGI_Pr, USHORT tempbx);
-extern USHORT XGI_GetCH701x(VB_DEVICE_INFO *XGI_Pr, USHORT tempbx);
-extern void XGI_SetCH701x(VB_DEVICE_INFO *XGI_Pr, USHORT tempbx);
-extern void XGI_SetCH70xxANDOR(VB_DEVICE_INFO *XGI_Pr, USHORT tempax,USHORT tempbh);
-extern void XGI_DDC2Delay(VB_DEVICE_INFO *XGI_Pr, USHORT delaytime);
+extern unsigned short XGI_GetCH700x(struct vb_device_info *XGI_Pr,
+ unsigned short tempbx);
+extern void XGI_SetCH700x(struct vb_device_info *XGI_Pr, unsigned short tempbx);
+extern unsigned short XGI_GetCH701x(struct vb_device_info *XGI_Pr,
+ unsigned short tempbx);
+extern void XGI_SetCH701x(struct vb_device_info *XGI_Pr, unsigned short tempbx);
+extern void XGI_SetCH70xxANDOR(struct vb_device_info *XGI_Pr,
+ unsigned short tempax,
+ unsigned short tempbh);
+extern void XGI_DDC2Delay(struct vb_device_info *XGI_Pr, unsigned short delaytime);
/* TW: Sensing routines */
void XGI_Sense30x(void);
int XGIDoSense(int tempbl, int tempbh, int tempcl, int tempch);
-extern XGI21_LVDSCapStruct XGI21_LCDCapList[13];
+extern struct XGI21_LVDSCapStruct XGI21_LCDCapList[13];
#endif
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 867012b48a0..976c39bb286 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -28,9 +28,6 @@
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
-#include <linux/kernel.h>
-
-#include "osdef.h"
#ifndef XGIFB_PAN
@@ -164,16 +161,15 @@ struct video_info xgi_video_info;
/* --------------- Hardware Access Routines -------------------------- */
-#ifdef LINUX_KERNEL
int
-XGIfb_mode_rate_to_dclock(VB_DEVICE_INFO *XGI_Pr, PXGI_HW_DEVICE_INFO HwDeviceExtension,
+XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr, struct xgi_hw_device_info *HwDeviceExtension,
unsigned char modeno, unsigned char rateindex)
{
- USHORT ModeNo = modeno;
- USHORT ModeIdIndex = 0, ClockIndex = 0;
- USHORT RefreshRateTableIndex = 0;
+ unsigned short ModeNo = modeno;
+ unsigned short ModeIdIndex = 0, ClockIndex = 0;
+ unsigned short RefreshRateTableIndex = 0;
- /*ULONG temp = 0;*/
+ /*unsigned long temp = 0;*/
int Clock;
XGI_Pr->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
InitTo330Pointer( HwDeviceExtension->jChipType, XGI_Pr ) ;
@@ -201,16 +197,16 @@ XGIfb_mode_rate_to_dclock(VB_DEVICE_INFO *XGI_Pr, PXGI_HW_DEVICE_INFO HwDeviceEx
}
int
-XGIfb_mode_rate_to_ddata(VB_DEVICE_INFO *XGI_Pr, PXGI_HW_DEVICE_INFO HwDeviceExtension,
+XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr, struct xgi_hw_device_info *HwDeviceExtension,
unsigned char modeno, unsigned char rateindex,
u32 *left_margin, u32 *right_margin,
u32 *upper_margin, u32 *lower_margin,
u32 *hsync_len, u32 *vsync_len,
u32 *sync, u32 *vmode)
{
- USHORT ModeNo = modeno;
- USHORT ModeIdIndex = 0, index = 0;
- USHORT RefreshRateTableIndex = 0;
+ unsigned short ModeNo = modeno;
+ unsigned short ModeIdIndex = 0, index = 0;
+ unsigned short RefreshRateTableIndex = 0;
unsigned short VRE, VBE, VRS, VBS, VDE, VT;
unsigned short HRE, HBE, HRS, HBS, HDE, HT;
@@ -375,26 +371,13 @@ XGIfb_mode_rate_to_ddata(VB_DEVICE_INFO *XGI_Pr, PXGI_HW_DEVICE_INFO HwDeviceExt
}
}
-#if 0 /* That's bullshit, only the resolution needs to be shifted */
- if((*vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
- *upper_margin <<= 1;
- *lower_margin <<= 1;
- *vsync_len <<= 1;
- } else if((*vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) {
- *upper_margin >>= 1;
- *lower_margin >>= 1;
- *vsync_len >>= 1;
- }
-#endif
-
return 1;
}
-#endif
-void XGIRegInit(VB_DEVICE_INFO *XGI_Pr, ULONG BaseAddr)
+void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
{
XGI_Pr->RelIO = BaseAddr;
XGI_Pr->P3c4 = BaseAddr + 0x14;
@@ -432,8 +415,8 @@ u32 XGIfb_get_reg3(u16 port)
/* ------------ Interface for init & mode switching code ------------- */
-BOOLEAN
-XGIfb_query_VGA_config_space(PXGI_HW_DEVICE_INFO pXGIhw_ext,
+unsigned char
+XGIfb_query_VGA_config_space(struct xgi_hw_device_info *pXGIhw_ext,
unsigned long offset, unsigned long set, unsigned long *value)
{
static struct pci_dev *pdev = NULL;
@@ -445,10 +428,10 @@ XGIfb_query_VGA_config_space(PXGI_HW_DEVICE_INFO pXGIhw_ext,
DPRINTK("XGIfb: Set offset 0x%lx to 0x%lx\n", offset, *value);
if (!init) {
- init = TRUE;
+ init = 1;
pdev = pci_get_device(PCI_VENDOR_ID_XG, xgi_video_info.chip_id, pdev);
if (pdev) {
- valid_pdev = TRUE;
+ valid_pdev = 1;
pci_dev_put(pdev);
}
}
@@ -456,7 +439,7 @@ XGIfb_query_VGA_config_space(PXGI_HW_DEVICE_INFO pXGIhw_ext,
if (!valid_pdev) {
printk(KERN_DEBUG "XGIfb: Can't find XGI %d VGA device.\n",
xgi_video_info.chip_id);
- return FALSE;
+ return 0;
}
if (set == 0)
@@ -464,10 +447,10 @@ XGIfb_query_VGA_config_space(PXGI_HW_DEVICE_INFO pXGIhw_ext,
else
pci_write_config_dword(pdev, offset, (u32)(*value));
- return TRUE;
+ return 1;
}
-/*BOOLEAN XGIfb_query_north_bridge_space(PXGI_HW_DEVICE_INFO pXGIhw_ext,
+/*unsigned char XGIfb_query_north_bridge_space(struct xgi_hw_device_info *pXGIhw_ext,
unsigned long offset, unsigned long set, unsigned long *value)
{
static struct pci_dev *pdev = NULL;
@@ -475,7 +458,7 @@ XGIfb_query_VGA_config_space(PXGI_HW_DEVICE_INFO pXGIhw_ext,
u16 nbridge_id = 0;
if (!init) {
- init = TRUE;
+ init = 1;
switch (xgi_video_info.chip) {
case XGI_540:
nbridge_id = PCI_DEVICE_ID_XG_540;
@@ -502,13 +485,13 @@ XGIfb_query_VGA_config_space(PXGI_HW_DEVICE_INFO pXGIhw_ext,
pdev = pci_find_device(PCI_VENDOR_ID_SI, nbridge_id, pdev);
if (pdev)
- valid_pdev = TRUE;
+ valid_pdev = 1;
}
if (!valid_pdev) {
printk(KERN_DEBUG "XGIfb: Can't find XGI %d North Bridge device.\n",
nbridge_id);
- return FALSE;
+ return 0;
}
if (set == 0)
@@ -516,7 +499,7 @@ XGIfb_query_VGA_config_space(PXGI_HW_DEVICE_INFO pXGIhw_ext,
else
pci_write_config_dword(pdev, offset, (u32)(*value));
- return TRUE;
+ return 1;
}
*/
/* ------------------ Internal helper routines ----------------- */
@@ -627,7 +610,8 @@ int XGIfb_GetXG21LVDSData(void)
i += 25;
j--;
k++;
- } while ( (j>0) && ( k < (sizeof(XGI21_LCDCapList)/sizeof(XGI21_LVDSCapStruct)) ) );
+ } while ((j > 0) &&
+ (k < (sizeof(XGI21_LCDCapList)/sizeof(struct XGI21_LVDSCapStruct))));
return 1;
}
return 0;
@@ -954,46 +938,52 @@ static void XGIfb_search_tvstd(const char *name)
}
}
-static BOOLEAN XGIfb_bridgeisslave(void)
+static unsigned char XGIfb_bridgeisslave(void)
{
unsigned char usScratchP1_00;
- if(xgi_video_info.hasVB == HASVB_NONE) return FALSE;
+ if (xgi_video_info.hasVB == HASVB_NONE)
+ return 0;
inXGIIDXREG(XGIPART1,0x00,usScratchP1_00);
- if( (usScratchP1_00 & 0x50) == 0x10) {
- return TRUE;
- } else {
- return FALSE;
- }
+ if ((usScratchP1_00 & 0x50) == 0x10)
+ return 1;
+ else
+ return 0;
}
-static BOOLEAN XGIfbcheckvretracecrt1(void)
+static unsigned char XGIfbcheckvretracecrt1(void)
{
unsigned char temp;
inXGIIDXREG(XGICR,0x17,temp);
- if(!(temp & 0x80)) return FALSE;
+ if (!(temp & 0x80))
+ return 0;
inXGIIDXREG(XGISR,0x1f,temp);
- if(temp & 0xc0) return FALSE;
+ if (temp & 0xc0)
+ return 0;
-
- if(inXGIREG(XGIINPSTAT) & 0x08) return TRUE;
- else return FALSE;
+ if (inXGIREG(XGIINPSTAT) & 0x08)
+ return 1;
+ else
+ return 0;
}
-static BOOLEAN XGIfbcheckvretracecrt2(void)
+static unsigned char XGIfbcheckvretracecrt2(void)
{
unsigned char temp;
- if(xgi_video_info.hasVB == HASVB_NONE) return FALSE;
+ if (xgi_video_info.hasVB == HASVB_NONE)
+ return 0;
inXGIIDXREG(XGIPART1, 0x30, temp);
- if(temp & 0x02) return FALSE;
- else return TRUE;
+ if (temp & 0x02)
+ return 0;
+ else
+ return 1;
}
-static BOOLEAN XGIfb_CheckVBRetrace(void)
+static unsigned char XGIfb_CheckVBRetrace(void)
{
if(xgi_video_info.disp_state & DISPTYPE_DISP2) {
if(XGIfb_bridgeisslave()) {
@@ -1350,11 +1340,7 @@ static int XGIfb_set_par(struct fb_info *info)
// printk("XGIfb: inside set_par\n");
if((err = XGIfb_do_set_var(&info->var, 1, info)))
return err;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
- XGIfb_get_fix(&info->fix, info->currcon, info);
-#else
XGIfb_get_fix(&info->fix, -1, info);
-#endif
// printk("XGIfb:end of set_par\n");
return 0;
}
@@ -1540,58 +1526,7 @@ static int XGIfb_pan_display( struct fb_var_screeninfo *var,
}
#endif
-#if 0
-static int XGIfb_mmap(struct fb_info *info, struct file *file,
- struct vm_area_struct *vma)
-{
- unsigned long start;
- unsigned long off;
- u32 len, mmio_off;
-
- DEBUGPRN("inside mmap");
- if(vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) return -EINVAL;
-
- off = vma->vm_pgoff << PAGE_SHIFT;
-
- start = (unsigned long) xgi_video_info.video_base;
- len = PAGE_ALIGN((start & ~PAGE_MASK) + xgi_video_info.video_size);
- start &= PAGE_MASK;
-#if 0
- if (off >= len) {
- off -= len;
-#endif
- /* By Jake Page: Treat mmap request with offset beyond heapstart
- * as request for mapping the mmio area
- */
- #if 1
- mmio_off = PAGE_ALIGN((start & ~PAGE_MASK) + xgi_video_info.heapstart);
- if(off >= mmio_off) {
- off -= mmio_off;
- if(info->var.accel_flags) return -EINVAL;
-
- start = (unsigned long) xgi_video_info.mmio_base;
- len = PAGE_ALIGN((start & ~PAGE_MASK) + XGIfb_mmio_size);
- }
- start &= PAGE_MASK;
- #endif
- if((vma->vm_end - vma->vm_start + off) > len) return -EINVAL;
-
- off += start;
- vma->vm_pgoff = off >> PAGE_SHIFT;
- vma->vm_flags |= VM_IO; /* by Jake Page; is that really needed? */
-
-#if defined(__i386__) || defined(__x86_64__)
- if (boot_cpu_data.x86 > 3)
- pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
-#endif
- if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
- DEBUGPRN("end of mmap");
- return 0;
-}
-#endif
static int XGIfb_blank(int blank, struct fb_info *info)
{
u8 reg;
@@ -1610,15 +1545,8 @@ static int XGIfb_blank(int blank, struct fb_info *info)
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
static int XGIfb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
-#else
-static int XGIfb_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg,
- struct fb_info *info)
-#endif
-
{
DEBUGPRN("inside ioctl");
switch (cmd) {
@@ -1687,7 +1615,7 @@ static int XGIfb_ioctl(struct inode *inode, struct file *file,
break;
case XGIFB_GET_INFO: /* TW: New for communication with X driver */
{
- XGIfb_info *x = (XGIfb_info *)arg;
+ struct XGIfb_info *x = (struct XGIfb_info *)arg;
//x->XGIfb_id = XGIFB_ID;
x->XGIfb_version = VER_MAJOR;
@@ -1786,9 +1714,6 @@ static struct fb_ops XGIfb_ops = {
.fb_fillrect = fbcon_XGI_fillrect,
.fb_copyarea = fbcon_XGI_copyarea,
.fb_imageblit = cfb_imageblit,
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
- .fb_cursor = soft_cursor,
-#endif
.fb_sync = fbcon_XGI_sync,
.fb_ioctl = XGIfb_ioctl,
// .fb_mmap = XGIfb_mmap,
@@ -2008,9 +1933,9 @@ static int XGIfb_has_VB(void)
break;
default:
xgi_video_info.hasVB = HASVB_NONE;
- return FALSE;
+ return 0;
}
- return TRUE;
+ return 1;
}
@@ -2664,13 +2589,7 @@ static void XGIfb_pre_setmode(void)
static void XGIfb_post_setmode(void)
{
u8 reg;
- BOOLEAN doit = TRUE;
-#if 0 /* TW: Wrong: Is not in MMIO space, but in RAM */
- /* Backup mode number to MMIO space */
- if(xgi_video_info.mmio_vbase) {
- *(volatile u8 *)(((u8*)xgi_video_info.mmio_vbase) + 0x449) = (unsigned char)XGIfb_mode_no;
- }
-#endif
+ unsigned char doit = 1;
/* outXGIIDXREG(XGISR,IND_XGI_PASSWORD,XGI_PASSWORD);
outXGIIDXREG(XGICR,0x13,0x00);
setXGIIDXREG(XGISR,0x0E,0xF0,0x01);
@@ -2678,11 +2597,11 @@ static void XGIfb_post_setmode(void)
if (xgi_video_info.video_bpp == 8) {
/* TW: We can't switch off CRT1 on LVDS/Chrontel in 8bpp Modes */
if ((xgi_video_info.hasVB == HASVB_LVDS) || (xgi_video_info.hasVB == HASVB_LVDS_CHRONTEL)) {
- doit = FALSE;
+ doit = 0;
}
/* TW: We can't switch off CRT1 on 301B-DH in 8bpp Modes if using LCD */
if (xgi_video_info.disp_state & DISPTYPE_LCD) {
- doit = FALSE;
+ doit = 0;
}
}
@@ -2691,14 +2610,15 @@ static void XGIfb_post_setmode(void)
inXGIIDXREG(XGIPART1, 0x00, reg);
- if((reg & 0x50) == 0x10) {
- doit = FALSE;
- }
+ if ((reg & 0x50) == 0x10)
+ doit = 0;
- } else XGIfb_crt1off = 0;
+
+ } else
+ XGIfb_crt1off = 0;
inXGIIDXREG(XGICR, 0x17, reg);
- if((XGIfb_crt1off) && (doit))
+ if ((XGIfb_crt1off) && (doit))
reg &= ~0x80;
else
reg |= 0x80;
@@ -2907,7 +2827,7 @@ XGIINITSTATIC int __init XGIfb_setup(char *options)
static unsigned char VBIOS_BUF[65535];
-unsigned char* attempt_map_rom(struct pci_dev *dev,void *copy_address)
+unsigned char *attempt_map_rom(struct pci_dev *dev, void *copy_address)
{
u32 rom_size = 0;
u32 rom_address = 0;
@@ -2962,15 +2882,9 @@ int __devinit xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
XGIfb_registered = 0;
- memset(&XGIhw_ext, 0, sizeof(HW_DEVICE_EXTENSION));
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,3))
+ memset(&XGIhw_ext, 0, sizeof(struct xgi_hw_device_info));
fb_info = framebuffer_alloc(sizeof(struct fb_info), &pdev->dev);
if(!fb_info) return -ENOMEM;
-#else
- XGI_fb_info = kmalloc( sizeof(struct fb_info), GFP_KERNEL);
- if(!XGI_fb_info) return -ENOMEM;
- memset(XGI_fb_info, 0, sizeof(struct fb_info));
-#endif
xgi_video_info.chip_id = pdev->device;
pci_read_config_byte(pdev, PCI_REVISION_ID,&xgi_video_info.revision_id);
@@ -2988,14 +2902,15 @@ int __devinit xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
xgi_video_info.mmio_base = pci_resource_start(pdev, 1);
XGIfb_mmio_size = pci_resource_len(pdev, 1);
xgi_video_info.vga_base = pci_resource_start(pdev, 2) + 0x30;
- XGIhw_ext.pjIOAddress = (PUCHAR)xgi_video_info.vga_base;
+ XGIhw_ext.pjIOAddress = (unsigned char *)xgi_video_info.vga_base;
//XGI_Pr.RelIO = ioremap(pci_resource_start(pdev, 2), 128) + 0x30;
- printk("XGIfb: Relocate IO address: %lx [%08lx] \n", (unsigned long)pci_resource_start(pdev, 2), XGI_Pr.RelIO);
+ printk("XGIfb: Relocate IO address: %lx [%08lx]\n",
+ (unsigned long)pci_resource_start(pdev, 2), XGI_Pr.RelIO);
if (pci_enable_device(pdev))
return -EIO;
- XGIRegInit(&XGI_Pr, (ULONG)XGIhw_ext.pjIOAddress);
+ XGIRegInit(&XGI_Pr, (unsigned long)XGIhw_ext.pjIOAddress);
outXGIIDXREG(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
inXGIIDXREG(XGISR, IND_XGI_PASSWORD, reg1);
@@ -3052,7 +2967,7 @@ int __devinit xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case XG20:
case XG21:
case XG27:
- XGIhw_ext.bIntegratedMMEnabled = TRUE;
+ XGIhw_ext.bIntegratedMMEnabled = 1;
break;
default:
@@ -3080,7 +2995,7 @@ int __devinit xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
strcpy(XGIhw_ext.szVBIOSVer, "0.84");
- XGIhw_ext.pSR = vmalloc(sizeof(XGI_DSReg) * SR_BUFFER_SIZE);
+ XGIhw_ext.pSR = vmalloc(sizeof(struct XGI_DSReg) * SR_BUFFER_SIZE);
if (XGIhw_ext.pSR == NULL)
{
printk(KERN_ERR "XGIfb: Fatal error: Allocating SRReg space failed.\n");
@@ -3088,7 +3003,7 @@ int __devinit xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
XGIhw_ext.pSR[0].jIdx = XGIhw_ext.pSR[0].jVal = 0xFF;
- XGIhw_ext.pCR = vmalloc(sizeof(XGI_DSReg) * CR_BUFFER_SIZE);
+ XGIhw_ext.pCR = vmalloc(sizeof(struct XGI_DSReg) * CR_BUFFER_SIZE);
if (XGIhw_ext.pCR == NULL)
{
vfree(XGIhw_ext.pSR);
@@ -3218,7 +3133,7 @@ int __devinit xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
xgi_video_info.disp_state = DISPTYPE_LCD;
if (!XGIfb_GetXG21LVDSData()) {
int m;
- for (m=0; m < sizeof(XGI21_LCDCapList)/sizeof(XGI21_LVDSCapStruct); m++) {
+ for (m = 0; m < sizeof(XGI21_LCDCapList)/sizeof(struct XGI21_LVDSCapStruct); m++) {
if ((XGI21_LCDCapList[m].LVDSHDE == XGIbios_mode[xgifb_mode_idx].xres) &&
(XGI21_LCDCapList[m].LVDSVDE == XGIbios_mode[xgifb_mode_idx].yres)) {
XGINew_SetReg1( XGI_Pr.P3d4 , 0x36, m) ;
@@ -3341,14 +3256,14 @@ int __devinit xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
inXGIIDXREG(XGICR,0x38,tmp);
if((tmp & 0x03) == 0x03)
{
-// XGI_Pr.XGI_UseLCDA = TRUE;
+/* XGI_Pr.XGI_UseLCDA = 1; */
}else
{
// Currently on LCDA? (Some newer BIOSes set D0 in CR35)
inXGIIDXREG(XGICR,0x35,tmp);
if(tmp & 0x01)
{
-// XGI_Pr.XGI_UseLCDA = TRUE;
+/* XGI_Pr.XGI_UseLCDA = 1; */
}else
{
inXGIIDXREG(XGICR,0x30,tmp);
@@ -3357,7 +3272,7 @@ int __devinit xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
inXGIIDXREG(XGIPART1,0x13,tmp);
if(tmp & 0x04)
{
-// XGI_Pr.XGI_UseLCDA = TRUE;
+/* XGI_Pr.XGI_UseLCDA = 1; */
}
}
}
@@ -3462,20 +3377,6 @@ int __devinit xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
-
-#if 0
-#ifdef XGIFB_PAN
- if(XGIfb_ypan) {
- default_var.yres_virtual =
- xgi_video_info.heapstart / (default_var.xres * (default_var.bits_per_pixel >> 3));
- if(default_var.yres_virtual <= default_var.yres) {
- default_var.yres_virtual = default_var.yres;
- }
- }
-#endif
-#endif
-
-
xgi_video_info.accel = 0;
if(XGIfb_accel) {
xgi_video_info.accel = -1;
@@ -3511,7 +3412,8 @@ int __devinit xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
XGIfb_registered = 1;
- printk(KERN_INFO "XGIfb: Installed XGIFB_GET_INFO ioctl (%x)\n", XGIFB_GET_INFO);
+ printk(KERN_INFO "XGIfb: Installed XGIFB_GET_INFO ioctl (%lx)\n",
+ XGIFB_GET_INFO);
/* printk(KERN_INFO "XGIfb: 2D acceleration is %s, scrolling mode %s\n",
XGIfb_accel ? "enabled" : "disabled",
@@ -3538,11 +3440,7 @@ static void __devexit xgifb_remove(struct pci_dev *pdev)
/* Unregister the framebuffer */
// if(xgi_video_info.registered) {
unregister_framebuffer(fb_info);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,3))
framebuffer_release(fb_info);
-#else
- kfree(fb_info);
-#endif
// }
pci_set_drvdata(pdev, NULL);
@@ -3558,7 +3456,6 @@ static struct pci_driver xgifb_driver = {
XGIINITSTATIC int __init xgifb_init(void)
{
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,8)
#ifndef MODULE
char *option = NULL;
@@ -3566,15 +3463,13 @@ XGIINITSTATIC int __init xgifb_init(void)
return -ENODEV;
XGIfb_setup(option);
#endif
-#endif
return(pci_register_driver(&xgifb_driver));
}
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,8)
+
#ifndef MODULE
module_init(xgifb_init);
#endif
-#endif
/*****************************************************/
/* MODULE */
diff --git a/drivers/staging/xgifb/XGIfb.h b/drivers/staging/xgifb/XGIfb.h
index 41bf163d4e6..ef86a64d699 100644
--- a/drivers/staging/xgifb/XGIfb.h
+++ b/drivers/staging/xgifb/XGIfb.h
@@ -27,7 +27,7 @@
#define XGIFB_ID 0x53495346 /* Identify myself with 'XGIF' */
#endif
-typedef enum _XGI_CHIP_TYPE {
+enum XGI_CHIP_TYPE {
XGI_VGALegacy = 0,
XGI_300,
XGI_630,
@@ -53,9 +53,9 @@ typedef enum _XGI_CHIP_TYPE {
XG21,
XG27,
MAX_XGI_CHIP
-} XGI_CHIP_TYPE;
+};
-typedef enum _TVTYPE {
+enum xgi_tvtype {
TVMODE_NTSC = 0,
TVMODE_PAL,
TVMODE_HIVISION,
@@ -63,13 +63,11 @@ typedef enum _TVTYPE {
TVTYPE_PALN, // vicki@030226
TVTYPE_NTSCJ, // vicki@030226
TVMODE_TOTAL
-} XGI_TV_TYPE;
-
+};
-typedef struct _XGIFB_INFO XGIfb_info;
-struct _XGIFB_INFO {
-unsigned long XGIfb_id;
+struct XGIfb_info {
+ unsigned long XGIfb_id;
int chip_id; /* PCI ID of detected chip */
int memory; /* video memory in KB which XGIfb manages */
int heapstart; /* heap start (= XGIfb "mem" argument) in KB */
@@ -97,7 +95,7 @@ unsigned long XGIfb_id;
-typedef enum _TVPLUGTYPE { // vicki@030226
+enum xgi_tv_plug { /* vicki@030226 */
// TVPLUG_Legacy = 0,
// TVPLUG_COMPOSITE,
// TVPLUG_SVIDEO,
@@ -113,7 +111,7 @@ typedef enum _TVPLUGTYPE { // vicki@030226
TVPLUG_YPBPR_750P = 7,
TVPLUG_YPBPR_1080i = 8,
TVPLUG_TOTAL
-} XGI_TV_PLUG;
+};
struct mode_info {
@@ -132,10 +130,10 @@ struct ap_data {
unsigned long iobase;
unsigned int mem_size;
unsigned long disp_state;
- XGI_CHIP_TYPE chip;
+ enum XGI_CHIP_TYPE chip;
unsigned char hasVB;
- XGI_TV_TYPE TV_type;
- XGI_TV_PLUG TV_plug;
+ enum xgi_tvtype TV_type;
+ enum xgi_tv_plug TV_plug;
unsigned long version;
char reserved[256];
};
@@ -184,7 +182,7 @@ struct video_info{
unsigned char TV_type;
unsigned char TV_plug;
- XGI_CHIP_TYPE chip;
+ enum XGI_CHIP_TYPE chip;
unsigned char revision_id;
unsigned short DstColor;
@@ -207,9 +205,4 @@ struct video_info{
extern struct video_info xgi_video_info;
-#ifdef __KERNEL__
-//extern void xgi_malloc(struct xgi_memreq *req);
-extern void xgi_free(unsigned long base);
-extern void xgi_dispinfo(struct ap_data *rec);
-#endif
#endif
diff --git a/drivers/staging/xgifb/osdef.h b/drivers/staging/xgifb/osdef.h
deleted file mode 100644
index 4bc7d3a7440..00000000000
--- a/drivers/staging/xgifb/osdef.h
+++ /dev/null
@@ -1,153 +0,0 @@
-#ifndef _OSDEF_H_
-#define _OSDEF_H_
-
-/* #define WINCE_HEADER*/
-/*#define WIN2000*/
-/* #define TC */
-#define LINUX_KERNEL
-/* #define LINUX_XF86 */
-
-/**********************************************************************/
-#ifdef LINUX_KERNEL
-//#include <linux/config.h>
-#endif
-
-
-/**********************************************************************/
-#ifdef TC
-#endif
-#ifdef WIN2000
-#endif
-#ifdef WINCE_HEADER
-#endif
-#ifdef LINUX_XF86
-#define LINUX
-#endif
-#ifdef LINUX_KERNEL
-#define LINUX
-#endif
-
-/**********************************************************************/
-#ifdef TC
-#define XGI_SetMemory(MemoryAddress,MemorySize,value) memset(MemoryAddress, value, MemorySize);
-#endif
-#ifdef WIN2000
-#define XGI_SetMemory(MemoryAddress,MemorySize,value) MemFill((PVOID) MemoryAddress,(ULONG) MemorySize,(UCHAR) value);
-#endif
-#ifdef WINCE_HEADER
-#define XGI_SetMemory(MemoryAddress,MemorySize,value) memset(MemoryAddress, value, MemorySize);
-#endif
-#ifdef LINUX_XF86
-#define XGI_SetMemory(MemoryAddress,MemorySize,value) memset(MemoryAddress, value, MemorySize)
-#endif
-#ifdef LINUX_KERNEL
-#define XGI_SetMemory(MemoryAddress,MemorySize,value) memset(MemoryAddress, value, MemorySize)
-#endif
-/**********************************************************************/
-
-/**********************************************************************/
-
-#ifdef TC
-#define XGI_MemoryCopy(Destination,Soruce,Length) memmove(Destination, Soruce, Length);
-#endif
-#ifdef WIN2000
-#define XGI_MemoryCopy(Destination,Soruce,Length) /*VideoPortMoveMemory((PUCHAR)Destination , Soruce,length);*/
-#endif
-#ifdef WINCE_HEADER
-#define XGI_MemoryCopy(Destination,Soruce,Length) memmove(Destination, Soruce, Length);
-#endif
-#ifdef LINUX_XF86
-#define XGI_MemoryCopy(Destination,Soruce,Length) memcpy(Destination,Soruce,Length)
-#endif
-#ifdef LINUX_KERNEL
-#define XGI_MemoryCopy(Destination,Soruce,Length) memcpy(Destination,Soruce,Length)
-#endif
-
-/**********************************************************************/
-
-#ifdef OutPortByte
-#undef OutPortByte
-#endif /* OutPortByte */
-
-#ifdef OutPortWord
-#undef OutPortWord
-#endif /* OutPortWord */
-
-#ifdef OutPortLong
-#undef OutPortLong
-#endif /* OutPortLong */
-
-#ifdef InPortByte
-#undef InPortByte
-#endif /* InPortByte */
-
-#ifdef InPortWord
-#undef InPortWord
-#endif /* InPortWord */
-
-#ifdef InPortLong
-#undef InPortLong
-#endif /* InPortLong */
-
-/**********************************************************************/
-/* TC */
-/**********************************************************************/
-
-#ifdef TC
-#define OutPortByte(p,v) outp((unsigned short)(p),(unsigned char)(v))
-#define OutPortWord(p,v) outp((unsigned short)(p),(unsigned short)(v))
-#define OutPortLong(p,v) outp((unsigned short)(p),(unsigned long)(v))
-#define InPortByte(p) inp((unsigned short)(p))
-#define InPortWord(p) inp((unsigned short)(p))
-#define InPortLong(p) ((inp((unsigned short)(p+2))<<16) | inp((unsigned short)(p)))
-#endif
-
-/**********************************************************************/
-/* LINUX XF86 */
-/**********************************************************************/
-
-#ifdef LINUX_XF86
-#define OutPortByte(p,v) outb((CARD16)(p),(CARD8)(v))
-#define OutPortWord(p,v) outw((CARD16)(p),(CARD16)(v))
-#define OutPortLong(p,v) outl((CARD16)(p),(CARD32)(v))
-#define InPortByte(p) inb((CARD16)(p))
-#define InPortWord(p) inw((CARD16)(p))
-#define InPortLong(p) inl((CARD16)(p))
-#endif
-
-#ifdef LINUX_KERNEL
-#define OutPortByte(p,v) outb((u8)(v),(p))
-#define OutPortWord(p,v) outw((u16)(v),(p))
-#define OutPortLong(p,v) outl((u32)(v),(p))
-#define InPortByte(p) inb(p)
-#define InPortWord(p) inw(p)
-#define InPortLong(p) inl(p)
-#endif
-
-/**********************************************************************/
-/* WIN 2000 */
-/**********************************************************************/
-
-#ifdef WIN2000
-#define OutPortByte(p,v) VideoPortWritePortUchar ((PUCHAR) (p), (UCHAR) (v))
-#define OutPortWord(p,v) VideoPortWritePortUshort((PUSHORT) (p), (USHORT) (v))
-#define OutPortLong(p,v) VideoPortWritePortUlong ((PULONG) (p), (ULONG) (v))
-#define InPortByte(p) VideoPortReadPortUchar ((PUCHAR) (p))
-#define InPortWord(p) VideoPortReadPortUshort ((PUSHORT) (p))
-#define InPortLong(p) VideoPortReadPortUlong ((PULONG) (p))
-#endif
-
-
-/**********************************************************************/
-/* WIN CE */
-/**********************************************************************/
-
-#ifdef WINCE_HEADER
-#define OutPortByte(p,v) WRITE_PORT_UCHAR ((PUCHAR) (p), (UCHAR) (v))
-#define OutPortWord(p,v) WRITE_PORT_USHORT((PUSHORT) (p), (USHORT) (v))
-#define OutPortLong(p,v) WRITE_PORT_ULONG ((PULONG) (p), (ULONG) (v))
-#define InPortByte(p) READ_PORT_UCHAR ((PUCHAR) (p))
-#define InPortWord(p) READ_PORT_USHORT ((PUSHORT) (p))
-#define InPortLong(p) READ_PORT_ULONG ((PULONG) (p))
-#endif
-#endif // _OSDEF_H_
diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h
index 17a7ada4926..4de182b23d4 100644
--- a/drivers/staging/xgifb/vb_def.h
+++ b/drivers/staging/xgifb/vb_def.h
@@ -6,7 +6,7 @@
#define NewScratch
#endif
/* shampoo */
-#ifdef LINUX_KERNEL
+
#define SEQ_ADDRESS_PORT 0x0014
#define SEQ_DATA_PORT 0x0015
#define MISC_OUTPUT_REG_READ_PORT 0x001C
@@ -17,7 +17,7 @@
#define CRTC_ADDRESS_PORT_COLOR 0x0024
#define VIDEO_SUBSYSTEM_ENABLE_PORT 0x0013
#define PCI_COMMAND 0x04
-#endif
+
/* ~shampoo */
diff --git a/drivers/staging/xgifb/vb_ext.c b/drivers/staging/xgifb/vb_ext.c
index 49b39ee93a8..1ecf9e3e85f 100644
--- a/drivers/staging/xgifb/vb_ext.c
+++ b/drivers/staging/xgifb/vb_ext.c
@@ -1,40 +1,7 @@
-#include "osdef.h"
-
-
-
-
-#ifdef WIN2000
-
-#include <dderror.h>
-#include <devioctl.h>
-#include <miniport.h>
-#include <ntddvdeo.h>
-#include <video.h>
-#include "xgiv.h"
-#include "dd_i2c.h"
-#include "tools.h"
-#endif /* WIN2000 */
-
-#ifdef LINUX_XF86
-#include "xf86.h"
-#include "xf86PciInfo.h"
-#include "xgi.h"
-#include "xgi_regs.h"
-#endif
-
-#ifdef LINUX_KERNEL
#include <linux/version.h>
#include <asm/io.h>
#include <linux/types.h>
#include "XGIfb.h"
-/*#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
-#include <video/XGIfb.h>
-#else
-#include <linux/XGIfb.h>
-#endif*/
-#endif
-
-
#include "vb_def.h"
#include "vgatypes.h"
@@ -42,43 +9,33 @@
#include "vb_util.h"
#include "vb_setmode.h"
#include "vb_ext.h"
-extern UCHAR XGI330_SoftSetting;
-extern UCHAR XGI330_OutputSelect;
-extern USHORT XGI330_RGBSenseData2;
-extern USHORT XGI330_YCSenseData2;
-extern USHORT XGI330_VideoSenseData2;
-#ifdef WIN2000
-extern UCHAR SenseCHTV(PHW_DEVICE_EXTENSION pHWDE); /* 2007/05/17 Billy */
-#endif
-void XGI_GetSenseStatus( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo );
-BOOLEAN XGINew_GetPanelID(PVB_DEVICE_INFO pVBInfo);
-USHORT XGINew_SenseLCD(PXGI_HW_DEVICE_INFO,PVB_DEVICE_INFO pVBInfo);
-BOOLEAN XGINew_GetLCDDDCInfo(PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo);
-void XGISetDPMS( PXGI_HW_DEVICE_INFO pXGIHWDE , ULONG VESA_POWER_STATE ) ;
-BOOLEAN XGINew_BridgeIsEnable(PXGI_HW_DEVICE_INFO,PVB_DEVICE_INFO pVBInfo );
-BOOLEAN XGINew_Sense(USHORT tempbx,USHORT tempcx, PVB_DEVICE_INFO pVBInfo);
-BOOLEAN XGINew_SenseHiTV( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo ) ;
+extern unsigned char XGI330_SoftSetting;
+extern unsigned char XGI330_OutputSelect;
+extern unsigned short XGI330_RGBSenseData2;
+extern unsigned short XGI330_YCSenseData2;
+extern unsigned short XGI330_VideoSenseData2;
+void XGI_GetSenseStatus(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+unsigned char XGINew_GetPanelID(struct vb_device_info *pVBInfo);
+unsigned short XGINew_SenseLCD(struct xgi_hw_device_info *,
+ struct vb_device_info *pVBInfo);
+unsigned char XGINew_GetLCDDDCInfo(struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo);
+void XGISetDPMS(struct xgi_hw_device_info *pXGIHWDE,
+ unsigned long VESA_POWER_STATE);
+unsigned char XGINew_BridgeIsEnable(struct xgi_hw_device_info *, struct vb_device_info *pVBInfo);
+unsigned char XGINew_Sense(unsigned short tempbx, unsigned short tempcx,
+ struct vb_device_info *pVBInfo);
+unsigned char XGINew_SenseHiTV(struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo);
/**************************************************************
Dynamic Sense
*************************************************************/
void XGI_WaitDisplay(void);
-BOOLEAN XGI_Is301C(PVB_DEVICE_INFO);
-BOOLEAN XGI_Is301LV(PVB_DEVICE_INFO);
-
-#ifdef WIN2000
-UCHAR XGI_SenseLCD(PHW_DEVICE_EXTENSION, PVB_DEVICE_INFO);
-UCHAR XGI_GetLCDDDCInfo(PHW_DEVICE_EXTENSION,PVB_DEVICE_INFO);
+unsigned char XGI_Is301C(struct vb_device_info *);
+unsigned char XGI_Is301LV(struct vb_device_info *);
-extern BOOL bGetDdcInfo(
-PHW_DEVICE_EXTENSION pHWDE,
-ULONG ulWhichOne,
-PUCHAR pjQueryBuffer,
-ULONG ulBufferSize
- );
-
-#endif
/* --------------------------------------------------------------------- */
@@ -87,9 +44,9 @@ ULONG ulBufferSize
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGINew_Is301B( PVB_DEVICE_INFO pVBInfo )
+unsigned char XGINew_Is301B(struct vb_device_info *pVBInfo)
{
- USHORT flag ;
+ unsigned short flag ;
flag = XGINew_GetReg1( pVBInfo->Part4Port , 0x01 ) ;
@@ -105,7 +62,7 @@ BOOLEAN XGINew_Is301B( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGI_Is301C( PVB_DEVICE_INFO pVBInfo )
+unsigned char XGI_Is301C(struct vb_device_info *pVBInfo)
{
if ( ( XGINew_GetReg1( pVBInfo->Part4Port , 0x01 ) & 0xF0 ) == 0xC0 )
return( 1 ) ;
@@ -126,7 +83,7 @@ BOOLEAN XGI_Is301C( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGI_Is301LV( PVB_DEVICE_INFO pVBInfo )
+unsigned char XGI_Is301LV(struct vb_device_info *pVBInfo)
{
if ( XGINew_GetReg1( pVBInfo->Part4Port , 0x01 ) >= 0xD0 )
{
@@ -145,9 +102,11 @@ BOOLEAN XGI_Is301LV( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGINew_Sense( USHORT tempbx , USHORT tempcx, PVB_DEVICE_INFO pVBInfo )
+unsigned char XGINew_Sense(unsigned short tempbx,
+ unsigned short tempcx,
+ struct vb_device_info *pVBInfo)
{
- USHORT temp , i , tempch ;
+ unsigned short temp, i, tempch;
temp = tempbx & 0xFF ;
XGINew_SetReg1( pVBInfo->Part4Port , 0x11 , temp ) ;
@@ -169,284 +128,6 @@ BOOLEAN XGINew_Sense( USHORT tempbx , USHORT tempcx, PVB_DEVICE_INFO pVBInfo )
return( 0 ) ;
}
-#ifdef WIN2000
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SenseLCD */
-/* Input : */
-/* Output : */
-/* Description : */
-/* --------------------------------------------------------------------- */
-UCHAR XGI_SenseLCD( PHW_DEVICE_EXTENSION pHWDE, PVB_DEVICE_INFO pVBInfo)
-{
- USHORT tempax , tempbx , tempcx ;
- UCHAR SoftSetting = XGI330_SoftSetting ;
-
- if ( pVBInfo->VBType & ( VB_XGI301LV | VB_XGI302LV ) )
- return( 1 ) ;
-
-
- if ( SoftSetting & HotPlugFunction ) /* Hot Plug Detection */
- {
- XGINew_SetRegAND( pVBInfo->Part4Port , 0x0F , 0x3F ) ;
- tempbx = 0 ;
- tempcx = 0x9010 ;
- if ( XGINew_Sense( tempbx , tempcx, pVBInfo ) )
- return( 1 ) ;
-
- return( 0 ) ;
- }
- else /* Get LCD Info from EDID */
- return(XGI_GetLCDDDCInfo(pHWDE, pVBInfo));
-}
-
-
-/* --------------------------------------------------------------------- */
-/* Function : XGI_GetLCDDDCInfo */
-/* Input : */
-/* Output : */
-/* Description : */
-/* --------------------------------------------------------------------- */
-UCHAR XGI_GetLCDDDCInfo( PHW_DEVICE_EXTENSION pHWDE , PVB_DEVICE_INFO pVBInfo)
-{
- UCHAR tempah , tempbl , tempbh ;
- USHORT tempbx , temp ;
- UCHAR pjEDIDBuf[ 256 ] ;
- ULONG ulBufferSize = 256 ;
- UCHAR bMASK_OUTPUTSTATE_CRT2LCD = 2 ; /* 0423 shampoo */
-
- bGetDdcInfo( pHWDE , MASK_OUTPUTSTATE_CRT2LCD , pjEDIDBuf , ulBufferSize ) ;
- if ( ( *( ( PULONG )pjEDIDBuf ) == 0xFFFFFF00 ) && ( *( ( PULONG )( pjEDIDBuf + 4 ) ) == 0x00FFFFFF ) )
- {
- tempah = Panel1024x768 ;
- tempbl=( *( pjEDIDBuf + 0x3A ) ) & 0xf0 ;
-
- if ( tempbl != 0x40 )
- {
- tempah = Panel1600x1200 ;
- if ( tempbl != 0x60 )
- {
- tempah = Panel1280x1024 ;
- tempbh = ( *( pjEDIDBuf + 0x3B ) ) ;
- if ( tempbh != 0x00 )
- {
- tempah = Panel1280x960 ;
- if ( tempbh != 0x0C0 )
- {
- tempbx = ( ( *( pjEDIDBuf + 0x24 ) ) << 8 ) | ( *( pjEDIDBuf + 0x23 ) ) ;
- tempah = Panel1280x1024 ;
- if ( !( tempbx & 0x0100 ) )
- {
- tempah = Panel1024x768 ;
- if ( !( tempbx & 0x0E00 ) )
- {
- tempah = Panel1280x1024 ;
- }
- }
- }
-
- if ( tempbx & 0x00FF )
- {
- temp = ScalingLCD ;
- XGINew_SetRegOR( pVBInfo->P3d4 , 0x37 , temp ) ;
- }
- }
- }
- }
- XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x36 , ( ~0x07 ) , tempah ) ;
- tempah = ( ( *( pjEDIDBuf + 0x47 ) ) & 0x06 ) ; /* Polarity */
- tempah = ( tempah ^ 0x06 ) << 4 ;
- tempah |= LCDSync ;
- XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x37 , ( ~LCDSyncBit ) , tempah ) ;
- tempbh= XGINew_GetReg1( pVBInfo->P3d4 , 0x36 ) ;
- tempbh &= 0x07 ;
- if ( tempbh == Panel1280x960 )
- XGINew_SetRegAND( pVBInfo->P3d4 , 0x37 , 0x0E ) ;
- }
- else if ( *pjEDIDBuf == 0x20 )
- {
- tempah = Panel1024x768 ;
- XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x36 , ( ~0x07 ) , tempah ) ;
- }
- else
- {
- return( 0 ) ;
- }
-
- return( 1 ) ;
-}
-
-
-/* --------------------------------------------------------------------- */
-/* Function : XGI_DySense */
-/* Input : */
-/* Output : */
-/* Description : */
-/* --------------------------------------------------------------------- */
-BOOLEAN XGI_DySense( PHW_DEVICE_EXTENSION pHWDE , PUCHAR ujConnectStatus)
-{
- UCHAR pre_CRD,pre_SR1E , pre_Part2_0 , pre_Part4_D ;
- USHORT tempax , tempbx , tempcx , pushax , temp ;
- VB_DEVICE_INFO VBINF;
- PVB_DEVICE_INFO pVBInfo = &VBINF;
- UCHAR OutputSelect = XGI330_OutputSelect ;
- PXGI_HW_DEVICE_INFO HwDeviceExtension= pHWDE->pXGIHWDE ;
- UCHAR bConnectStatus = 0 ;
- pVBInfo->BaseAddr = HwDeviceExtension->pjIOAddress ;
- pVBInfo->ROMAddr = pHWDE->pjVirtualRomBase ;
-
- pVBInfo->P3c2 = pVBInfo->BaseAddr + 0x12 ;
- pVBInfo->P3c4 = pVBInfo->BaseAddr + 0x14 ;
- pVBInfo->P3d4 = pVBInfo->BaseAddr + 0x24 ;
- pVBInfo->Part2Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_10 ;
- pVBInfo->Part4Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14 ;
- pushax = XGINew_GetReg1( pVBInfo->P3d4 , 0x17 ) ; /* 0512 Fix Dysense hanged */
- temp = ( pushax & 0x00FF ) | 0x80 ;
- XGINew_SetRegOR( pVBInfo->P3d4 , 0x17 , temp ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x05 , 0x86 ) ;
- /* beginning of dynamic sense CRT1 */
-
- pVBInfo->IF_DEF_CH7007 = 0;
- if (pHWDE->bCH7007)
- {
- InitTo330Pointer( pHWDE->pXGIHWDE->jChipType, pVBInfo ) ;
- HwDeviceExtension->pDevice = (PVOID)pHWDE;
- pVBInfo->IF_DEF_CH7007 = 1;
- /* [Billy] 2007/05/14 For CH7007 */
- if ( pVBInfo->IF_DEF_CH7007 == 1 )
- {
- bConnectStatus = SenseCHTV(HwDeviceExtension->pDevice) ; /* 07/05/28 */
- XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x32 , ~0x03 , (UCHAR)bConnectStatus ) ;
- }
- }
- if(( pHWDE->jChipID >= XG40 ) || ( pHWDE->jChipID >= XG20 ))
- {
-
- if ( pHWDE->jChipID >= XG40 )
- XGINew_SetReg1( pVBInfo->P3d4 , 0x57 , 0x4A ) ; /* write sense pattern 30->4a */
- else
- XGINew_SetReg1( pVBInfo->P3d4 , 0x57 , 0x5F ) ; /* write sense pattern */
-
- XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x53 , 0xFF , 0x02 ) ; /* enable sense DAC */
- XGI_WaitDisply(pVBInfo) ;
-
- if(XGINew_GetReg2( pVBInfo->P3c2 ) & 0x10 )
- bConnectStatus |= Monitor1Sense ;
-
- XGINew_SetRegAND( pVBInfo->P3d4 , 0x53 , 0xFD ) ; /* disable sense DAC */
- XGINew_SetRegAND( pVBInfo->P3d4 , 0x57 , 0x00 ) ; /* clear sense pattern */
-
-
- /* ---------- End of dynamic sense CRT1 ----------- */
-
- /* ---------- beginning of dynamic sense VB ------------ */
- pre_SR1E = XGINew_GetReg1( pVBInfo->P3c4 , 0x1E ) ;
- XGINew_SetRegOR( pVBInfo->P3c4 , 0x1E , 0x20 ) ; /* Enable CRT2,work-a-round for 301B/301LV/302LV */
- pre_Part2_0 = XGINew_GetReg1( pVBInfo->Part2Port , 0x00 ) ;
- pre_Part4_D = XGINew_GetReg1( pVBInfo->Part4Port , 0x0D ) ;
-
- if ( XGI_Is301C( pVBInfo ) ) /* 301C only */
- XGINew_SetRegANDOR( pVBInfo->Part4Port , 0x0D , ~0x07 , 0x01 ) ; /* Set Part4 0x0D D[2:0] to 001b */
-
- /* tempax = 0 ; */
- if ( !XGI_Is301LV( pVBInfo ) )
- {
- tempbx = XGI330_RGBSenseData2 ;
- tempcx = 0x0E08 ;
- if(XGINew_Sense( tempbx , tempcx, pVBInfo ) )
- {
- bConnectStatus |= Monitor2Sense ;
- if ( OutputSelect & SetSCARTOutput )
- {
- bConnectStatus ^= ( Monitor2Sense | SCARTSense ) ;
- }
- }
- }
- if ( XGI_Is301C( pVBInfo ) ) /* 301C only */
- XGINew_SetRegOR( pVBInfo->Part4Port , 0x0D , 0x04 ) ; /* Set Part4 0x0D D[2]=1 for dynamic sense */
-
- if ( ( XGINew_Is301B( pVBInfo ) ) )
- XGINew_SetRegOR( pVBInfo->Part2Port , 0x00 , 0x0C ) ; /* ????????? */
-
- if ( XGINew_SenseHiTV( HwDeviceExtension , pVBInfo) ) /* add by kuku for Dysense HiTV //start */
- {
- bConnectStatus|= YPbPrSense ;
- }
- else
- {
- tempbx = XGI330_YCSenseData2 ; /* Y/C Sense Data Ptr */
- tempcx = 0x0604 ;
- if ( XGINew_Sense( tempbx , tempcx , pVBInfo) )
- bConnectStatus |= SVIDEOSense ;
-
- if ( OutputSelect & BoardTVType )
- {
- tempbx = XGI330_VideoSenseData2 ;
- tempcx = 0x0804 ;
- if ( XGINew_Sense(tempbx , tempcx, pVBInfo) )
- bConnectStatus|= AVIDEOSense ;
- }
- else
- {
- if ( !( bConnectStatus & SVIDEOSense ) )
- {
- tempbx = XGI330_VideoSenseData2 ;
- tempcx = 0x0804 ;
- if ( XGINew_Sense( tempbx , tempcx, pVBInfo ) )
- bConnectStatus |= AVIDEOSense ;
- }
- }
- } /* end */
- /* DySenseVBCnt */
-
- tempbx = 0 ;
- tempcx = 0 ;
- XGINew_Sense(tempbx , tempcx, pVBInfo ) ;
-
- if ( !( bConnectStatus & Monitor2Sense ) )
- {
- if ( XGI_SenseLCD( pHWDE , pVBInfo ) )
- bConnectStatus |= LCDSense ;
- }
-
- XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x32 , ~( AVIDEOSense | SVIDEOSense | LCDSense | Monitor2Sense | Monitor1Sense ) , bConnectStatus ) ;
-
- XGINew_SetReg1( pVBInfo->Part4Port , 0x0D , pre_Part4_D ) ;
- XGINew_SetReg1( pVBInfo->Part2Port , 0x00 , pre_Part2_0 ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x1E , pre_SR1E ) ;
-
- if ( XGI_Is301C( pVBInfo ) ) /* 301C only */
- {
- tempax = XGINew_GetReg1( pVBInfo->Part2Port , 0x00 ) ;
- if ( tempax & 0x20 )
- {
- /* Reset VBPro */
- for( tempcx = 2 ; tempcx > 0 ; tempcx-- )
- {
- tempax ^= 0x20 ;
- XGINew_SetReg1( pVBInfo->Part2Port , 0x00 , tempax ) ;
- }
- }
- }
- /* End of dynamic sense VB */
- }
- else
- {
- XGI_SenseCRT1(pVBInfo) ;
- XGI_GetSenseStatus( HwDeviceExtension, pVBInfo ) ; /* sense CRT2 */
- bConnectStatus = XGINew_GetReg1( pVBInfo->P3d4 , 0x32 ) ;
- }
- temp = pushax & 0x00FF ; /* 0512 Fix Dysense hanged */
- XGINew_SetReg1( pVBInfo->P3d4 , 0x17 , temp ) ;
- if ( bConnectStatus )
- {
- *ujConnectStatus = bConnectStatus ;
- return( 1 ) ;
- }
- else
- return( 0 ) ;
-}
-
-#endif /* WIN2000 */
/* --------------------------------------------------------------------- */
/* Function : XGISetDPMS */
@@ -454,13 +135,14 @@ BOOLEAN XGI_DySense( PHW_DEVICE_EXTENSION pHWDE , PUCHAR ujConnectStatus)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-VOID XGISetDPMS( PXGI_HW_DEVICE_INFO pXGIHWDE , ULONG VESA_POWER_STATE )
+void XGISetDPMS(struct xgi_hw_device_info *pXGIHWDE,
+ unsigned long VESA_POWER_STATE)
{
- USHORT ModeNo, ModeIdIndex ;
- UCHAR temp ;
- VB_DEVICE_INFO VBINF;
- PVB_DEVICE_INFO pVBInfo = &VBINF;
- pVBInfo->BaseAddr = (ULONG)pXGIHWDE->pjIOAddress ;
+ unsigned short ModeNo, ModeIdIndex;
+ unsigned char temp;
+ struct vb_device_info VBINF;
+ struct vb_device_info *pVBInfo = &VBINF;
+ pVBInfo->BaseAddr = (unsigned long)pXGIHWDE->pjIOAddress ;
pVBInfo->ROMAddr = pXGIHWDE->pjVirtualRomBase ;
@@ -527,18 +209,18 @@ VOID XGISetDPMS( PXGI_HW_DEVICE_INFO pXGIHWDE , ULONG VESA_POWER_STATE )
}
if ( VESA_POWER_STATE == 0x00000400 )
- XGINew_SetReg1( pVBInfo->Part4Port , 0x31 , ( UCHAR )( XGINew_GetReg1( pVBInfo->Part4Port , 0x31 ) & 0xFE ) ) ;
+ XGINew_SetReg1(pVBInfo->Part4Port, 0x31, (unsigned char)(XGINew_GetReg1(pVBInfo->Part4Port, 0x31) & 0xFE));
else
- XGINew_SetReg1( pVBInfo->Part4Port , 0x31 , ( UCHAR )( XGINew_GetReg1( pVBInfo->Part4Port , 0x31 ) | 0x01 ) ) ;
+ XGINew_SetReg1(pVBInfo->Part4Port, 0x31, (unsigned char)(XGINew_GetReg1(pVBInfo->Part4Port, 0x31) | 0x01));
- temp = ( UCHAR )XGINew_GetReg1( pVBInfo->P3c4 , 0x1f ) ;
+ temp = (unsigned char)XGINew_GetReg1(pVBInfo->P3c4, 0x1f);
temp &= 0x3f ;
switch ( VESA_POWER_STATE )
{
case 0x00000000: /* on */
if ( ( pXGIHWDE->ujVBChipID == VB_CHIP_301 ) || ( pXGIHWDE->ujVBChipID == VB_CHIP_302 ) )
{
- XGINew_SetReg1( pVBInfo->P3c4 , 0x1f , ( UCHAR )( temp | 0x00 ) ) ;
+ XGINew_SetReg1(pVBInfo->P3c4, 0x1f, (unsigned char)(temp | 0x00));
XGI_EnableBridge( pXGIHWDE, pVBInfo ) ;
}
else
@@ -596,7 +278,7 @@ VOID XGISetDPMS( PXGI_HW_DEVICE_INFO pXGIHWDE , ULONG VESA_POWER_STATE )
XGI_DisplayOff( pXGIHWDE, pVBInfo );
}
- XGINew_SetReg1( pVBInfo->P3c4 , 0x1f , ( UCHAR )( temp | 0x40 ) ) ;
+ XGINew_SetReg1(pVBInfo->P3c4, 0x1f, (unsigned char)(temp | 0x40));
break ;
case 0x00000200: /* suspend */
if ( pXGIHWDE->jChipType == XG21 )
@@ -609,12 +291,12 @@ VOID XGISetDPMS( PXGI_HW_DEVICE_INFO pXGIHWDE , ULONG VESA_POWER_STATE )
XGI_DisplayOff( pXGIHWDE, pVBInfo );
XGI_XG27BLSignalVDD( 0x20 , 0x00, pVBInfo ) ; /* LVDS signal off */
}
- XGINew_SetReg1( pVBInfo->P3c4 , 0x1f , ( UCHAR )( temp | 0x80 ) ) ;
+ XGINew_SetReg1(pVBInfo->P3c4, 0x1f, (unsigned char)(temp | 0x80));
break ;
case 0x00000400: /* off */
if ( (pXGIHWDE->ujVBChipID == VB_CHIP_301 ) || ( pXGIHWDE->ujVBChipID == VB_CHIP_302 ) )
{
- XGINew_SetReg1( pVBInfo->P3c4 , 0x1f , ( UCHAR )( temp | 0xc0 ) ) ;
+ XGINew_SetReg1(pVBInfo->P3c4, 0x1f, (unsigned char)(temp | 0xc0));
XGI_DisableBridge( pXGIHWDE, pVBInfo ) ;
}
else
@@ -677,12 +359,12 @@ VOID XGISetDPMS( PXGI_HW_DEVICE_INFO pXGIHWDE , ULONG VESA_POWER_STATE )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetSenseStatus( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo)
+void XGI_GetSenseStatus(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT tempax = 0 , tempbx , tempcx , temp ,
+ unsigned short tempax = 0 , tempbx , tempcx , temp ,
P2reg0 = 0 , SenseModeNo = 0 , OutputSelect = *pVBInfo->pOutputSelect ,
ModeIdIndex , i ;
- pVBInfo->BaseAddr = (ULONG)HwDeviceExtension->pjIOAddress ;
+ pVBInfo->BaseAddr = (unsigned long)HwDeviceExtension->pjIOAddress ;
if ( pVBInfo->IF_DEF_LVDS == 1 )
{
@@ -876,10 +558,11 @@ void XGI_GetSenseStatus( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-USHORT XGINew_SenseLCD( PXGI_HW_DEVICE_INFO HwDeviceExtension ,PVB_DEVICE_INFO pVBInfo)
+unsigned short XGINew_SenseLCD(struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
- /* USHORT SoftSetting ; */
- USHORT temp ;
+ /* unsigned short SoftSetting ; */
+ unsigned short temp ;
if ( ( HwDeviceExtension->jChipType >= XG20 ) || ( HwDeviceExtension->jChipType >= XG40 ) )
temp = 0 ;
@@ -899,9 +582,9 @@ USHORT XGINew_SenseLCD( PXGI_HW_DEVICE_INFO HwDeviceExtension ,PVB_DEVICE_INFO p
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGINew_GetLCDDDCInfo( PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo)
+unsigned char XGINew_GetLCDDDCInfo(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT temp ;
+ unsigned short temp ;
/* add lcd sense */
if ( HwDeviceExtension->ulCRT2LCDType == LCD_UNKNOWN )
@@ -910,7 +593,7 @@ BOOLEAN XGINew_GetLCDDDCInfo( PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_I
}
else
{
- temp = ( USHORT )HwDeviceExtension->ulCRT2LCDType ;
+ temp = (unsigned short)HwDeviceExtension->ulCRT2LCDType ;
switch( HwDeviceExtension->ulCRT2LCDType )
{
case LCD_INVALID:
@@ -952,26 +635,27 @@ BOOLEAN XGINew_GetLCDDDCInfo( PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_I
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGINew_GetPanelID(PVB_DEVICE_INFO pVBInfo )
+unsigned char XGINew_GetPanelID(struct vb_device_info *pVBInfo)
{
- USHORT PanelTypeTable[ 16 ] = { SyncNN | PanelRGB18Bit | Panel800x600 | _PanelType00 ,
- SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType01 ,
- SyncNN | PanelRGB18Bit | Panel800x600 | _PanelType02 ,
- SyncNN | PanelRGB18Bit | Panel640x480 | _PanelType03 ,
- SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType04 ,
- SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType05 ,
- SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType06 ,
- SyncNN | PanelRGB24Bit | Panel1024x768 | _PanelType07 ,
- SyncNN | PanelRGB18Bit | Panel800x600 | _PanelType08 ,
- SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType09 ,
- SyncNN | PanelRGB18Bit | Panel800x600 | _PanelType0A ,
- SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType0B ,
- SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType0C ,
- SyncNN | PanelRGB24Bit | Panel1024x768 | _PanelType0D ,
- SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType0E ,
- SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType0F } ;
- USHORT tempax , tempbx , temp ;
- /* USHORT return_flag ; */
+ unsigned short PanelTypeTable[16] = {
+ SyncNN | PanelRGB18Bit | Panel800x600 | _PanelType00,
+ SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType01,
+ SyncNN | PanelRGB18Bit | Panel800x600 | _PanelType02,
+ SyncNN | PanelRGB18Bit | Panel640x480 | _PanelType03,
+ SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType04,
+ SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType05,
+ SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType06,
+ SyncNN | PanelRGB24Bit | Panel1024x768 | _PanelType07,
+ SyncNN | PanelRGB18Bit | Panel800x600 | _PanelType08,
+ SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType09,
+ SyncNN | PanelRGB18Bit | Panel800x600 | _PanelType0A,
+ SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType0B,
+ SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType0C,
+ SyncNN | PanelRGB24Bit | Panel1024x768 | _PanelType0D,
+ SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType0E,
+ SyncNN | PanelRGB18Bit | Panel1024x768 | _PanelType0F };
+ unsigned short tempax , tempbx, temp;
+ /* unsigned short return_flag ; */
tempax = XGINew_GetReg1( pVBInfo->P3c4 , 0x1A ) ;
tempbx = tempax & 0x1E ;
@@ -1024,9 +708,9 @@ BOOLEAN XGINew_GetPanelID(PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGINew_BridgeIsEnable( PXGI_HW_DEVICE_INFO HwDeviceExtension ,PVB_DEVICE_INFO pVBInfo)
+unsigned char XGINew_BridgeIsEnable(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT flag ;
+ unsigned short flag ;
if ( XGI_BridgeIsOn( pVBInfo ) == 0 )
{
@@ -1051,9 +735,9 @@ BOOLEAN XGINew_BridgeIsEnable( PXGI_HW_DEVICE_INFO HwDeviceExtension ,PVB_DEVICE
/* Output : */
/* Description : */
/* ------------------------------------------------------ */
-BOOLEAN XGINew_SenseHiTV( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo )
+unsigned char XGINew_SenseHiTV(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT tempbx , tempcx , temp , i , tempch;
+ unsigned short tempbx , tempcx , temp , i , tempch;
tempbx = *pVBInfo->pYCSenseData2 ;
@@ -1132,14 +816,14 @@ BOOLEAN XGINew_SenseHiTV( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INF
; DX: PAnel V. resolution
;-----------------------------------------------------------------------------
*/
-void XGI_XG21Fun14Sub70( PVB_DEVICE_INFO pVBInfo , PX86_REGS pBiosArguments )
+void XGI_XG21Fun14Sub70(struct vb_device_info *pVBInfo, PX86_REGS pBiosArguments)
{
- USHORT ModeIdIndex;
- USHORT ModeNo;
+ unsigned short ModeIdIndex;
+ unsigned short ModeNo;
- USHORT EModeCount;
- USHORT lvdstableindex;
+ unsigned short EModeCount;
+ unsigned short lvdstableindex;
lvdstableindex = XGI_GetLVDSOEMTableIndex( pVBInfo );
pBiosArguments->h.bl = 0x81;
@@ -1153,7 +837,7 @@ void XGI_XG21Fun14Sub70( PVB_DEVICE_INFO pVBInfo , PX86_REGS pBiosArguments )
ModeNo = pVBInfo->EModeIDTable[ ModeIdIndex ].Ext_ModeID;
if ( pVBInfo->EModeIDTable[ ModeIdIndex ].Ext_ModeID == 0xFF )
{
- pBiosArguments->h.bh = (UCHAR) EModeCount;
+ pBiosArguments->h.bh = (unsigned char) EModeCount;
return;
}
if ( !XGI_XG21CheckLVDSMode( ModeNo , ModeIdIndex, pVBInfo) )
@@ -1175,13 +859,13 @@ void XGI_XG21Fun14Sub70( PVB_DEVICE_INFO pVBInfo , PX86_REGS pBiosArguments )
;
;-----------------------------------------------------------------------------
*/
-void XGI_XG21Fun14Sub71( PVB_DEVICE_INFO pVBInfo , PX86_REGS pBiosArguments )
+void XGI_XG21Fun14Sub71(struct vb_device_info *pVBInfo, PX86_REGS pBiosArguments)
{
- USHORT EModeCount;
- USHORT ModeIdIndex,resindex;
- USHORT ModeNo;
- USHORT EModeIndex = pBiosArguments->h.bh;
+ unsigned short EModeCount;
+ unsigned short ModeIdIndex, resindex;
+ unsigned short ModeNo;
+ unsigned short EModeIndex = pBiosArguments->h.bh;
EModeCount = 0;
for( ModeIdIndex = 0 ; ; ModeIdIndex ++ )
@@ -1199,7 +883,7 @@ void XGI_XG21Fun14Sub71( PVB_DEVICE_INFO pVBInfo , PX86_REGS pBiosArguments )
if (EModeCount == EModeIndex)
{
resindex = XGI_GetResInfo( ModeNo , ModeIdIndex, pVBInfo ) ;
- pBiosArguments->h.bl = (UCHAR) ModeNo;
+ pBiosArguments->h.bl = (unsigned char) ModeNo;
pBiosArguments->x.cx = pVBInfo->ModeResInfo[ resindex ].HTotal ; /* xres->ax */
pBiosArguments->x.dx = pVBInfo->ModeResInfo[ resindex ].VTotal ; /* yres->bx */
pBiosArguments->x.ax = 0x0014;
@@ -1221,10 +905,10 @@ void XGI_XG21Fun14Sub71( PVB_DEVICE_INFO pVBInfo , PX86_REGS pBiosArguments )
;
;-----------------------------------------------------------------------------
*/
-void XGI_XG21Fun14Sub72( PVB_DEVICE_INFO pVBInfo , PX86_REGS pBiosArguments )
+void XGI_XG21Fun14Sub72(struct vb_device_info *pVBInfo, PX86_REGS pBiosArguments)
{
- USHORT ModeIdIndex,resindex;
- USHORT ModeNo;
+ unsigned short ModeIdIndex, resindex;
+ unsigned short ModeNo;
ModeNo = pBiosArguments->h.bl ;
@@ -1280,11 +964,11 @@ void XGI_XG21Fun14Sub72( PVB_DEVICE_INFO pVBInfo , PX86_REGS pBiosArguments )
; BX[6]: *Value1 D[6] Panel H. Polarity
;-----------------------------------------------------------------------------
*/
-void XGI_XG21Fun14Sub73( PVB_DEVICE_INFO pVBInfo , PX86_REGS pBiosArguments )
+void XGI_XG21Fun14Sub73(struct vb_device_info *pVBInfo, PX86_REGS pBiosArguments)
{
- UCHAR Select;
+ unsigned char Select;
- USHORT lvdstableindex;
+ unsigned short lvdstableindex;
lvdstableindex = XGI_GetLVDSOEMTableIndex( pVBInfo );
Select = pBiosArguments->h.bl;
@@ -1314,10 +998,10 @@ void XGI_XG21Fun14Sub73( PVB_DEVICE_INFO pVBInfo , PX86_REGS pBiosArguments )
}
-void XGI_XG21Fun14( PXGI_HW_DEVICE_INFO pXGIHWDE, PX86_REGS pBiosArguments)
+void XGI_XG21Fun14(struct xgi_hw_device_info *pXGIHWDE, PX86_REGS pBiosArguments)
{
- VB_DEVICE_INFO VBINF;
- PVB_DEVICE_INFO pVBInfo = &VBINF;
+ struct vb_device_info VBINF;
+ struct vb_device_info *pVBInfo = &VBINF;
pVBInfo->IF_DEF_LVDS = 0 ;
pVBInfo->IF_DEF_CH7005 = 0 ;
diff --git a/drivers/staging/xgifb/vb_ext.h b/drivers/staging/xgifb/vb_ext.h
index 9a72f5ecb71..5cc4d12c225 100644
--- a/drivers/staging/xgifb/vb_ext.h
+++ b/drivers/staging/xgifb/vb_ext.h
@@ -2,15 +2,17 @@
#define _VBEXT_
struct DWORDREGS {
- ULONG Eax, Ebx, Ecx, Edx, Esi, Edi, Ebp;
+ unsigned long Eax, Ebx, Ecx, Edx, Esi, Edi, Ebp;
};
struct WORDREGS {
- USHORT ax, hi_ax, bx, hi_bx, cx, hi_cx, dx, hi_dx, si, hi_si, di ,hi_di, bp, hi_bp;
+ unsigned short ax, hi_ax, bx, hi_bx, cx, hi_cx, dx, hi_dx, si,
+ hi_si, di, hi_di, bp, hi_bp;
};
struct BYTEREGS {
- UCHAR al, ah, hi_al, hi_ah, bl, bh, hi_bl, hi_bh, cl, ch, hi_cl, hi_ch, dl, dh, hi_dl, hi_dh;
+ unsigned char al, ah, hi_al, hi_ah, bl, bh, hi_bl, hi_bh, cl, ch,
+ hi_cl, hi_ch, dl, dh, hi_dl, hi_dh;
};
typedef union _X86_REGS {
@@ -19,14 +21,14 @@ typedef union _X86_REGS {
struct BYTEREGS h;
} X86_REGS, *PX86_REGS;
-extern void XGI_XG21Fun14( PXGI_HW_DEVICE_INFO pXGIHWDE, PX86_REGS pBiosArguments);
-extern void XGISetDPMS( PXGI_HW_DEVICE_INFO pXGIHWDE , ULONG VESA_POWER_STATE ) ;
-extern void XGI_GetSenseStatus( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo );
-extern void XGINew_SetModeScratch ( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo ) ;
-extern void ReadVBIOSTablData( UCHAR ChipType , PVB_DEVICE_INFO pVBInfo);
-extern USHORT XGINew_SenseLCD(PXGI_HW_DEVICE_INFO,PVB_DEVICE_INFO pVBInfo);
-#ifdef WIN2000
-extern BOOLEAN XGI_DySense( PHW_DEVICE_EXTENSION pHWDE , PUCHAR ujConnectStatus );
-#endif /* WIN2000 */
+extern void XGI_XG21Fun14(struct xgi_hw_device_info *pXGIHWDE, PX86_REGS pBiosArguments);
+extern void XGISetDPMS(struct xgi_hw_device_info *pXGIHWDE,
+ unsigned long VESA_POWER_STATE);
+extern void XGI_GetSenseStatus(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+extern void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo) ;
+extern void ReadVBIOSTablData(unsigned char ChipType,
+ struct vb_device_info *pVBInfo);
+extern unsigned short XGINew_SenseLCD(struct xgi_hw_device_info *,
+ struct vb_device_info *pVBInfo);
#endif
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index b85ca9ba807..e02722d05f6 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -1,29 +1,9 @@
-#include "osdef.h"
#include "vgatypes.h"
-
-#ifdef LINUX_KERNEL
#include <linux/version.h>
#include <linux/types.h>
#include <linux/delay.h> /* udelay */
#include "XGIfb.h"
-/*#if LINUX_VERSxION_CODE >= KERNEL_VERSION(2,5,0)
-#include <video/XGIfb.h>
-#else
-#include <linux/XGIfb.h>
-#endif */
-#endif
-
-#ifdef WIN2000
-#include <dderror.h>
-#include <devioctl.h>
-#include <miniport.h>
-#include <ntddvdeo.h>
-#include <video.h>
-#include "xgiv.h"
-#include "dd_i2c.h"
-#include "tools.h"
-#endif
#include "vb_def.h"
#include "vb_struct.h"
@@ -32,123 +12,105 @@
#include "vb_init.h"
#include "vb_ext.h"
-#ifdef LINUX_XF86
-#include "xf86.h"
-#include "xf86PciInfo.h"
-#include "xgi.h"
-#include "xgi_regs.h"
-#endif
-#ifdef LINUX_KERNEL
#include <asm/io.h>
-#include <linux/types.h>
-#endif
-UCHAR XGINew_ChannelAB,XGINew_DataBusWidth;
-
-USHORT XGINew_DRAMType[17][5]={{0x0C,0x0A,0x02,0x40,0x39},{0x0D,0x0A,0x01,0x40,0x48},
- {0x0C,0x09,0x02,0x20,0x35},{0x0D,0x09,0x01,0x20,0x44},
- {0x0C,0x08,0x02,0x10,0x31},{0x0D,0x08,0x01,0x10,0x40},
- {0x0C,0x0A,0x01,0x20,0x34},{0x0C,0x09,0x01,0x08,0x32},
- {0x0B,0x08,0x02,0x08,0x21},{0x0C,0x08,0x01,0x08,0x30},
- {0x0A,0x08,0x02,0x04,0x11},{0x0B,0x0A,0x01,0x10,0x28},
- {0x09,0x08,0x02,0x02,0x01},{0x0B,0x09,0x01,0x08,0x24},
- {0x0B,0x08,0x01,0x04,0x20},{0x0A,0x08,0x01,0x02,0x10},
- {0x09,0x08,0x01,0x01,0x00}};
-
-USHORT XGINew_SDRDRAM_TYPE[13][5]=
-{
-{ 2,12, 9,64,0x35},
-{ 1,13, 9,64,0x44},
-{ 2,12, 8,32,0x31},
-{ 2,11, 9,32,0x25},
-{ 1,12, 9,32,0x34},
-{ 1,13, 8,32,0x40},
-{ 2,11, 8,16,0x21},
-{ 1,12, 8,16,0x30},
-{ 1,11, 9,16,0x24},
-{ 1,11, 8, 8,0x20},
-{ 2, 9, 8, 4,0x01},
-{ 1,10, 8, 4,0x10},
-{ 1, 9, 8, 2,0x00}
-};
-
-USHORT XGINew_DDRDRAM_TYPE[4][5]=
-{
-{ 2,12, 9,64,0x35},
-{ 2,12, 8,32,0x31},
-{ 2,11, 8,16,0x21},
-{ 2, 9, 8, 4,0x01}
-};
-USHORT XGINew_DDRDRAM_TYPE340[4][5]=
-{
-{ 2,13, 9,64,0x45},
-{ 2,12, 9,32,0x35},
-{ 2,12, 8,16,0x31},
-{ 2,11, 8, 8,0x21}
-};
-USHORT XGINew_DDRDRAM_TYPE20[12][5]=
-{
-{ 2,14,11,128,0x5D},
-{ 2,14,10,64,0x59},
-{ 2,13,11,64,0x4D},
-{ 2,14, 9,32,0x55},
-{ 2,13,10,32,0x49},
-{ 2,12,11,32,0x3D},
-{ 2,14, 8,16,0x51},
-{ 2,13, 9,16,0x45},
-{ 2,12,10,16,0x39},
-{ 2,13, 8, 8,0x41},
-{ 2,12, 9, 8,0x35},
-{ 2,12, 8, 4,0x31}
-};
-
-void XGINew_SetDRAMSize_340(PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO);
-void XGINew_SetDRAMSize_310(PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO);
-void XGINew_SetMemoryClock(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO );
-void XGINew_SetDRAMModeRegister(PVB_DEVICE_INFO );
-void XGINew_SetDRAMModeRegister340( PXGI_HW_DEVICE_INFO HwDeviceExtension );
-void XGINew_SetDRAMDefaultRegister340(PXGI_HW_DEVICE_INFO HwDeviceExtension, ULONG, PVB_DEVICE_INFO );
-UCHAR XGINew_GetXG20DRAMType( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo);
-BOOLEAN XGIInitNew( PXGI_HW_DEVICE_INFO HwDeviceExtension) ;
-
-int XGINew_DDRSizing340( PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO );
-void XGINew_DisableRefresh( PXGI_HW_DEVICE_INFO ,PVB_DEVICE_INFO) ;
-void XGINew_CheckBusWidth_310( PVB_DEVICE_INFO) ;
-int XGINew_SDRSizing(PVB_DEVICE_INFO);
-int XGINew_DDRSizing( PVB_DEVICE_INFO );
-void XGINew_EnableRefresh( PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO);
+unsigned char XGINew_ChannelAB, XGINew_DataBusWidth;
+
+unsigned short XGINew_DRAMType[17][5] = {
+ {0x0C, 0x0A, 0x02, 0x40, 0x39}, {0x0D, 0x0A, 0x01, 0x40, 0x48},
+ {0x0C, 0x09, 0x02, 0x20, 0x35}, {0x0D, 0x09, 0x01, 0x20, 0x44},
+ {0x0C, 0x08, 0x02, 0x10, 0x31}, {0x0D, 0x08, 0x01, 0x10, 0x40},
+ {0x0C, 0x0A, 0x01, 0x20, 0x34}, {0x0C, 0x09, 0x01, 0x08, 0x32},
+ {0x0B, 0x08, 0x02, 0x08, 0x21}, {0x0C, 0x08, 0x01, 0x08, 0x30},
+ {0x0A, 0x08, 0x02, 0x04, 0x11}, {0x0B, 0x0A, 0x01, 0x10, 0x28},
+ {0x09, 0x08, 0x02, 0x02, 0x01}, {0x0B, 0x09, 0x01, 0x08, 0x24},
+ {0x0B, 0x08, 0x01, 0x04, 0x20}, {0x0A, 0x08, 0x01, 0x02, 0x10},
+ {0x09, 0x08, 0x01, 0x01, 0x00} };
+
+unsigned short XGINew_SDRDRAM_TYPE[13][5] = {
+ { 2, 12, 9, 64, 0x35},
+ { 1, 13, 9, 64, 0x44},
+ { 2, 12, 8, 32, 0x31},
+ { 2, 11, 9, 32, 0x25},
+ { 1, 12, 9, 32, 0x34},
+ { 1, 13, 8, 32, 0x40},
+ { 2, 11, 8, 16, 0x21},
+ { 1, 12, 8, 16, 0x30},
+ { 1, 11, 9, 16, 0x24},
+ { 1, 11, 8, 8, 0x20},
+ { 2, 9, 8, 4, 0x01},
+ { 1, 10, 8, 4, 0x10},
+ { 1, 9, 8, 2, 0x00} };
+
+unsigned short XGINew_DDRDRAM_TYPE[4][5] = {
+ { 2, 12, 9, 64, 0x35},
+ { 2, 12, 8, 32, 0x31},
+ { 2, 11, 8, 16, 0x21},
+ { 2, 9, 8, 4, 0x01} };
+
+unsigned short XGINew_DDRDRAM_TYPE340[4][5] = {
+ { 2, 13, 9, 64, 0x45},
+ { 2, 12, 9, 32, 0x35},
+ { 2, 12, 8, 16, 0x31},
+ { 2, 11, 8, 8, 0x21} };
+
+unsigned short XGINew_DDRDRAM_TYPE20[12][5] = {
+ { 2, 14, 11, 128, 0x5D},
+ { 2, 14, 10, 64, 0x59},
+ { 2, 13, 11, 64, 0x4D},
+ { 2, 14, 9, 32, 0x55},
+ { 2, 13, 10, 32, 0x49},
+ { 2, 12, 11, 32, 0x3D},
+ { 2, 14, 8, 16, 0x51},
+ { 2, 13, 9, 16, 0x45},
+ { 2, 12, 10, 16, 0x39},
+ { 2, 13, 8, 8, 0x41},
+ { 2, 12, 9, 8, 0x35},
+ { 2, 12, 8, 4, 0x31} };
+
+void XGINew_SetDRAMSize_340(struct xgi_hw_device_info *, struct vb_device_info *);
+void XGINew_SetDRAMSize_310(struct xgi_hw_device_info *, struct vb_device_info *);
+void XGINew_SetMemoryClock(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *);
+void XGINew_SetDRAMModeRegister(struct vb_device_info *);
+void XGINew_SetDRAMModeRegister340(struct xgi_hw_device_info *HwDeviceExtension);
+void XGINew_SetDRAMDefaultRegister340(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned long, struct vb_device_info *);
+unsigned char XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo);
+unsigned char XGIInitNew(struct xgi_hw_device_info *HwDeviceExtension);
+
+int XGINew_DDRSizing340(struct xgi_hw_device_info *, struct vb_device_info *);
+void XGINew_DisableRefresh(struct xgi_hw_device_info *, struct vb_device_info *) ;
+void XGINew_CheckBusWidth_310(struct vb_device_info *) ;
+int XGINew_SDRSizing(struct vb_device_info *);
+int XGINew_DDRSizing(struct vb_device_info *);
+void XGINew_EnableRefresh(struct xgi_hw_device_info *, struct vb_device_info *);
int XGINew_RAMType; /*int ModeIDOffset,StandTable,CRT1Table,ScreenOffset,REFIndex;*/
-ULONG UNIROM; /* UNIROM */
-BOOLEAN ChkLFB( PVB_DEVICE_INFO );
-void XGINew_Delay15us(ULONG);
-void SetPowerConsume (PXGI_HW_DEVICE_INFO HwDeviceExtension,ULONG XGI_P3d4Port);
-void ReadVBIOSTablData( UCHAR ChipType , PVB_DEVICE_INFO pVBInfo);
-void XGINew_DDR1x_MRS_XG20( ULONG P3c4 , PVB_DEVICE_INFO pVBInfo);
-void XGINew_SetDRAMModeRegister_XG20( PXGI_HW_DEVICE_INFO HwDeviceExtension );
-void XGINew_SetDRAMModeRegister_XG27( PXGI_HW_DEVICE_INFO HwDeviceExtension );
-void XGINew_ChkSenseStatus ( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo ) ;
-void XGINew_SetModeScratch ( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo ) ;
-void XGINew_GetXG21Sense(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo) ;
-UCHAR GetXG21FPBits(PVB_DEVICE_INFO pVBInfo);
-void XGINew_GetXG27Sense(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo) ;
-UCHAR GetXG27FPBits(PVB_DEVICE_INFO pVBInfo);
-
-#ifdef WIN2000
-/* [Billy] 2007/05/20 For CH7007 */
-extern UCHAR CH7007TVReg_UNTSC[][8],CH7007TVReg_ONTSC[][8],CH7007TVReg_UPAL[][8],CH7007TVReg_OPAL[][8];
-extern UCHAR XGI7007_CHTVVCLKUNTSC[],XGI7007_CHTVVCLKONTSC[],XGI7007_CHTVVCLKUPAL[],XGI7007_CHTVVCLKOPAL[];
-#endif
-
-#ifdef LINUX_KERNEL
-void DelayUS(ULONG MicroSeconds)
+unsigned long UNIROM; /* UNIROM */
+unsigned char ChkLFB(struct vb_device_info *);
+void XGINew_Delay15us(unsigned long);
+void SetPowerConsume(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned long XGI_P3d4Port);
+void ReadVBIOSTablData(unsigned char ChipType, struct vb_device_info *pVBInfo);
+void XGINew_DDR1x_MRS_XG20(unsigned long P3c4, struct vb_device_info *pVBInfo);
+void XGINew_SetDRAMModeRegister_XG20(struct xgi_hw_device_info *HwDeviceExtension);
+void XGINew_SetDRAMModeRegister_XG27(struct xgi_hw_device_info *HwDeviceExtension);
+void XGINew_ChkSenseStatus(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo) ;
+void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo) ;
+void XGINew_GetXG21Sense(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo) ;
+unsigned char GetXG21FPBits(struct vb_device_info *pVBInfo);
+void XGINew_GetXG27Sense(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo) ;
+unsigned char GetXG27FPBits(struct vb_device_info *pVBInfo);
+
+void DelayUS(unsigned long MicroSeconds)
{
udelay(MicroSeconds);
}
-#endif
+
/* --------------------------------------------------------------------- */
/* Function : XGIInitNew */
@@ -156,46 +118,44 @@ void DelayUS(ULONG MicroSeconds)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGIInitNew( PXGI_HW_DEVICE_INFO HwDeviceExtension )
+unsigned char XGIInitNew(struct xgi_hw_device_info *HwDeviceExtension)
{
- VB_DEVICE_INFO VBINF;
- PVB_DEVICE_INFO pVBInfo = &VBINF;
- UCHAR i , temp = 0 , temp1 ;
+ struct vb_device_info VBINF;
+ struct vb_device_info *pVBInfo = &VBINF;
+ unsigned char i, temp = 0, temp1 ;
// VBIOSVersion[ 5 ] ;
- PUCHAR volatile pVideoMemory;
+ volatile unsigned char *pVideoMemory;
- /* ULONG j, k ; */
+ /* unsigned long j, k ; */
- PXGI_DSReg pSR ;
+ struct XGI_DSReg *pSR ;
- ULONG Temp ;
+ unsigned long Temp ;
pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase ;
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress ;
- pVBInfo->BaseAddr = (ULONG)HwDeviceExtension->pjIOAddress ;
+ pVBInfo->BaseAddr = (unsigned long)HwDeviceExtension->pjIOAddress ;
- pVideoMemory = ( PUCHAR )pVBInfo->ROMAddr;
+ pVideoMemory = (unsigned char *)pVBInfo->ROMAddr;
// Newdebugcode( 0x99 ) ;
/* if ( pVBInfo->ROMAddr == 0 ) */
- /* return( FALSE ) ; */
+ /* return( 0 ) ; */
- if ( pVBInfo->FBAddr == 0 )
-{
+ if (pVBInfo->FBAddr == 0) {
printk("\n pVBInfo->FBAddr == 0 ");
- return( FALSE ) ;
-}
+ return 0;
+ }
printk("1");
- if ( pVBInfo->BaseAddr == 0 )
-{
- printk("\npVBInfo->BaseAddr == 0 ");
- return( FALSE ) ;
+if (pVBInfo->BaseAddr == 0) {
+ printk("\npVBInfo->BaseAddr == 0 ");
+ return 0;
}
printk("2");
@@ -205,12 +165,9 @@ printk("2");
printk("3");
if ( !HwDeviceExtension->bIntegratedMMEnabled )
-{
- return( FALSE ) ; /* alan */
-}
-printk("4");
+ return 0; /* alan */
-// XGI_MemoryCopy( VBIOSVersion , HwDeviceExtension->szVBIOSVer , 4 ) ;
+printk("4");
// VBIOSVersion[ 4 ] = 0x0 ;
@@ -407,8 +364,8 @@ printk("15");
XGI_UnLockCRT2( HwDeviceExtension, pVBInfo) ;
XGINew_SetRegANDOR( pVBInfo->Part0Port , 0x3F , 0xEF , 0x00 ) ; /* alan, disable VideoCapture */
XGINew_SetReg1( pVBInfo->Part1Port , 0x00 , 0x00 ) ;
- temp1 = ( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x7B ) ; /* chk if BCLK>=100MHz */
- temp = ( UCHAR )( ( temp1 >> 4 ) & 0x0F ) ;
+ temp1 = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x7B); /* chk if BCLK>=100MHz */
+ temp = (unsigned char)((temp1 >> 4) & 0x0F);
XGINew_SetReg1( pVBInfo->Part1Port , 0x02 , ( *pVBInfo->pCRT2Data_1_2 ) ) ;
@@ -460,15 +417,14 @@ printk("18");
XGINew_SetReg1( pVBInfo->P3d4 , 0x83 , 0x00 ) ;
printk("181");
- if ( HwDeviceExtension->bSkipSense == FALSE )
- {
-printk("182");
+if (HwDeviceExtension->bSkipSense == 0) {
+ printk("182");
XGI_SenseCRT1(pVBInfo) ;
-printk("183");
+ printk("183");
/* XGINew_DetectMonitor( HwDeviceExtension ) ; */
-pVBInfo->IF_DEF_CH7007 = 0;
+ pVBInfo->IF_DEF_CH7007 = 0;
if ( ( HwDeviceExtension->jChipType == XG21 ) && (pVBInfo->IF_DEF_CH7007) )
{
printk("184");
@@ -504,8 +460,7 @@ printk("19");
XGINew_SetDRAMDefaultRegister340( HwDeviceExtension , pVBInfo->P3d4, pVBInfo ) ;
- if ( HwDeviceExtension->bSkipDramSizing == TRUE )
- {
+ if (HwDeviceExtension->bSkipDramSizing == 1) {
pSR = HwDeviceExtension->pSR ;
if ( pSR!=NULL )
{
@@ -519,15 +474,6 @@ printk("19");
} /* SkipDramSizing */
else
{
-#if 0
- if ( HwDeviceExtension->jChipType == XG20 )
- {
- XGINew_SetReg1( pVBInfo->P3c4 , 0x13 , pVBInfo->SR15[0][XGINew_RAMType] ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x14 , pVBInfo->SR15[1][XGINew_RAMType] ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x20 , 0x20 ) ;
- }
- else
-#endif
{
printk("20");
@@ -544,7 +490,7 @@ printk("22");
/* SetDefExt2Regs begin */
/*
AGP = 1 ;
- temp =( UCHAR )XGINew_GetReg1( pVBInfo->P3c4 , 0x3A ) ;
+ temp =(unsigned char)XGINew_GetReg1(pVBInfo->P3c4, 0x3A) ;
temp &= 0x30 ;
if ( temp == 0x30 )
AGP = 0 ;
@@ -563,7 +509,7 @@ printk("22");
// Temp = ( InPortLong( 0xcfc ) & 0xFFFF ) ;
// if ( Temp == 0x1039 )
// {
- XGINew_SetReg1( pVBInfo->P3c4 , 0x22 , ( UCHAR )( ( *pVBInfo->pSR22 ) & 0xFE ) ) ;
+ XGINew_SetReg1(pVBInfo->P3c4, 0x22, (unsigned char)((*pVBInfo->pSR22) & 0xFE));
// }
// else
// {
@@ -585,7 +531,7 @@ XGINew_SetReg1( pVBInfo->P3d4 , 0x8c , 0x87);
XGINew_SetReg1( pVBInfo->P3c4 , 0x14 , 0x31);
printk("25");
- return( TRUE ) ;
+return 1;
} /* end of init */
@@ -600,9 +546,10 @@ printk("25");
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-UCHAR XGINew_GetXG20DRAMType( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo)
+unsigned char XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
- UCHAR data, temp ;
+ unsigned char data, temp;
if ( HwDeviceExtension->jChipType < XG20 )
{
@@ -670,9 +617,9 @@ UCHAR XGINew_GetXG20DRAMType( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-UCHAR XGINew_Get310DRAMType(PVB_DEVICE_INFO pVBInfo)
+unsigned char XGINew_Get310DRAMType(struct vb_device_info *pVBInfo)
{
- UCHAR data ;
+ unsigned char data ;
/* index = XGINew_GetReg1( pVBInfo->P3c4 , 0x1A ) ; */
/* index &= 07 ; */
@@ -694,7 +641,7 @@ UCHAR XGINew_Get310DRAMType(PVB_DEVICE_INFO pVBInfo)
/* Description : */
/* --------------------------------------------------------------------- */
/*
-void XGINew_Delay15us(ULONG ulMicrsoSec)
+void XGINew_Delay15us(unsigned long ulMicrsoSec)
{
}
*/
@@ -706,9 +653,9 @@ void XGINew_Delay15us(ULONG ulMicrsoSec)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SDR_MRS( PVB_DEVICE_INFO pVBInfo )
+void XGINew_SDR_MRS(struct vb_device_info *pVBInfo)
{
- USHORT data ;
+ unsigned short data ;
data = XGINew_GetReg1( pVBInfo->P3c4 , 0x16 ) ;
data &= 0x3F ; /* SR16 D7=0,D6=0 */
@@ -726,7 +673,7 @@ void XGINew_SDR_MRS( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_DDR1x_MRS_340( ULONG P3c4 , PVB_DEVICE_INFO pVBInfo)
+void XGINew_DDR1x_MRS_340(unsigned long P3c4, struct vb_device_info *pVBInfo)
{
XGINew_SetReg1( P3c4 , 0x18 , 0x01 ) ;
XGINew_SetReg1( P3c4 , 0x19 , 0x20 ) ;
@@ -764,7 +711,7 @@ void XGINew_DDR1x_MRS_340( ULONG P3c4 , PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_DDR2x_MRS_340( ULONG P3c4 , PVB_DEVICE_INFO pVBInfo)
+void XGINew_DDR2x_MRS_340(unsigned long P3c4, struct vb_device_info *pVBInfo)
{
XGINew_SetReg1( P3c4 , 0x18 , 0x00 ) ;
XGINew_SetReg1( P3c4 , 0x19 , 0x20 ) ;
@@ -793,9 +740,10 @@ void XGINew_DDR2x_MRS_340( ULONG P3c4 , PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_DDRII_Bootup_XG27( PXGI_HW_DEVICE_INFO HwDeviceExtension , ULONG P3c4 , PVB_DEVICE_INFO pVBInfo)
+void XGINew_DDRII_Bootup_XG27(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned long P3c4, struct vb_device_info *pVBInfo)
{
- ULONG P3d4 = P3c4 + 0x10 ;
+ unsigned long P3d4 = P3c4 + 0x10 ;
XGINew_RAMType = ( int )XGINew_GetXG20DRAMType( HwDeviceExtension , pVBInfo ) ;
XGINew_SetMemoryClock( HwDeviceExtension , pVBInfo ) ;
@@ -871,9 +819,10 @@ void XGINew_DDRII_Bootup_XG27( PXGI_HW_DEVICE_INFO HwDeviceExtension , ULONG P3
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_DDR2_MRS_XG20( PXGI_HW_DEVICE_INFO HwDeviceExtension , ULONG P3c4 , PVB_DEVICE_INFO pVBInfo)
+void XGINew_DDR2_MRS_XG20(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned long P3c4, struct vb_device_info *pVBInfo)
{
- ULONG P3d4 = P3c4 + 0x10 ;
+ unsigned long P3d4 = P3c4 + 0x10 ;
XGINew_RAMType = ( int )XGINew_GetXG20DRAMType( HwDeviceExtension , pVBInfo ) ;
XGINew_SetMemoryClock( HwDeviceExtension , pVBInfo ) ;
@@ -923,9 +872,10 @@ void XGINew_DDR2_MRS_XG20( PXGI_HW_DEVICE_INFO HwDeviceExtension , ULONG P3c4 ,
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_DDR2_MRS_XG27( PXGI_HW_DEVICE_INFO HwDeviceExtension , ULONG P3c4 , PVB_DEVICE_INFO pVBInfo)
+void XGINew_DDR2_MRS_XG27(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned long P3c4, struct vb_device_info *pVBInfo)
{
- ULONG P3d4 = P3c4 + 0x10 ;
+ unsigned long P3d4 = P3c4 + 0x10 ;
XGINew_RAMType = ( int )XGINew_GetXG20DRAMType( HwDeviceExtension , pVBInfo ) ;
XGINew_SetMemoryClock( HwDeviceExtension , pVBInfo ) ;
@@ -1001,9 +951,10 @@ void XGINew_DDR2_MRS_XG27( PXGI_HW_DEVICE_INFO HwDeviceExtension , ULONG P3c4 ,
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_DDR1x_DefaultRegister( PXGI_HW_DEVICE_INFO HwDeviceExtension , ULONG Port , PVB_DEVICE_INFO pVBInfo)
+void XGINew_DDR1x_DefaultRegister(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned long Port, struct vb_device_info *pVBInfo)
{
- ULONG P3d4 = Port ,
+ unsigned long P3d4 = Port ,
P3c4 = Port - 0x10 ;
if ( HwDeviceExtension->jChipType >= XG20 )
@@ -1061,9 +1012,10 @@ void XGINew_DDR1x_DefaultRegister( PXGI_HW_DEVICE_INFO HwDeviceExtension , ULON
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_DDR2x_DefaultRegister( PXGI_HW_DEVICE_INFO HwDeviceExtension , ULONG Port ,PVB_DEVICE_INFO pVBInfo)
+void XGINew_DDR2x_DefaultRegister(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned long Port, struct vb_device_info *pVBInfo)
{
- ULONG P3d4 = Port ,
+ unsigned long P3d4 = Port ,
P3c4 = Port - 0x10 ;
XGINew_SetMemoryClock( HwDeviceExtension , pVBInfo ) ;
@@ -1112,9 +1064,10 @@ void XGINew_DDR2x_DefaultRegister( PXGI_HW_DEVICE_INFO HwDeviceExtension , ULON
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_DDR2_DefaultRegister( PXGI_HW_DEVICE_INFO HwDeviceExtension, ULONG Port , PVB_DEVICE_INFO pVBInfo)
+void XGINew_DDR2_DefaultRegister(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned long Port, struct vb_device_info *pVBInfo)
{
- ULONG P3d4 = Port ,
+ unsigned long P3d4 = Port ,
P3c4 = Port - 0x10 ;
/* keep following setting sequence, each setting in the same reg insert idle */
@@ -1150,12 +1103,13 @@ void XGINew_DDR2_DefaultRegister( PXGI_HW_DEVICE_INFO HwDeviceExtension, ULONG P
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SetDRAMDefaultRegister340( PXGI_HW_DEVICE_INFO HwDeviceExtension , ULONG Port , PVB_DEVICE_INFO pVBInfo)
+void XGINew_SetDRAMDefaultRegister340(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned long Port, struct vb_device_info *pVBInfo)
{
- UCHAR temp , temp1 , temp2 , temp3 ,
+ unsigned char temp, temp1, temp2, temp3 ,
i , j , k ;
- ULONG P3d4 = Port ,
+ unsigned long P3d4 = Port ,
P3c4 = Port - 0x10 ;
XGINew_SetReg1( P3d4 , 0x6D , pVBInfo->CR40[ 8 ][ XGINew_RAMType ] ) ;
@@ -1293,11 +1247,11 @@ void XGINew_SetDRAMDefaultRegister340( PXGI_HW_DEVICE_INFO HwDeviceExtension ,
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_DDR_MRS(PVB_DEVICE_INFO pVBInfo)
+void XGINew_DDR_MRS(struct vb_device_info *pVBInfo)
{
- USHORT data ;
+ unsigned short data ;
- PUCHAR volatile pVideoMemory = ( PUCHAR )pVBInfo->ROMAddr ;
+ volatile unsigned char *pVideoMemory = (unsigned char *)pVBInfo->ROMAddr;
/* SR16 <- 1F,DF,2F,AF */
/* yriver modified SR16 <- 0F,DF,0F,AF */
@@ -1361,11 +1315,11 @@ void XGINew_DDR_MRS(PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_VerifyMclk( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo)
+void XGINew_VerifyMclk(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- PUCHAR pVideoMemory = pVBInfo->FBAddr ;
- UCHAR i , j ;
- USHORT Temp , SR21 ;
+ unsigned char *pVideoMemory = pVBInfo->FBAddr ;
+ unsigned char i, j ;
+ unsigned short Temp , SR21 ;
pVideoMemory[ 0 ] = 0xaa ; /* alan */
pVideoMemory[ 16 ] = 0x55 ; /* note: PCI read cache is off */
@@ -1407,9 +1361,9 @@ void XGINew_VerifyMclk( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SetDRAMSize_340( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo)
+void XGINew_SetDRAMSize_340(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT data ;
+ unsigned short data ;
pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase ;
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress ;
@@ -1418,7 +1372,7 @@ void XGINew_SetDRAMSize_340( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_
data = XGINew_GetReg1( pVBInfo->P3c4 , 0x21 ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x21 , ( USHORT )( data & 0xDF ) ) ; /* disable read cache */
+ XGINew_SetReg1(pVBInfo->P3c4, 0x21, (unsigned short)(data & 0xDF)); /* disable read cache */
XGI_DisplayOff( HwDeviceExtension, pVBInfo );
/*data = XGINew_GetReg1( pVBInfo->P3c4 , 0x1 ) ;*/
@@ -1426,8 +1380,7 @@ void XGINew_SetDRAMSize_340( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_
/*XGINew_SetReg1( pVBInfo->P3c4 , 0x01 , data ) ;*/ /* Turn OFF Display */
XGINew_DDRSizing340( HwDeviceExtension, pVBInfo ) ;
data=XGINew_GetReg1( pVBInfo->P3c4 , 0x21 ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x21 , ( USHORT )( data | 0x20 ) ) ; /* enable read cache */
-
+ XGINew_SetReg1(pVBInfo->P3c4, 0x21, (unsigned short)(data | 0x20)); /* enable read cache */
}
@@ -1437,9 +1390,9 @@ void XGINew_SetDRAMSize_340( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SetDRAMSize_310( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo)
+void XGINew_SetDRAMSize_310(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT data ;
+ unsigned short data ;
pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase ,
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress ;
#ifdef XGI301
@@ -1455,7 +1408,7 @@ void XGINew_SetDRAMSize_310( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_
XGISetModeNew( HwDeviceExtension , 0x2e ) ;
data = XGINew_GetReg1( pVBInfo->P3c4 , 0x21 ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x21 , ( USHORT )( data & 0xDF ) ) ; /* disable read cache */
+ XGINew_SetReg1(pVBInfo->P3c4, 0x21, (unsigned short)(data & 0xDF)); /* disable read cache */
data = XGINew_GetReg1( pVBInfo->P3c4 , 0x1 ) ;
data |= 0x20 ;
@@ -1464,7 +1417,7 @@ void XGINew_SetDRAMSize_310( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_
data = XGINew_GetReg1( pVBInfo->P3c4 , 0x16 ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x16 , ( USHORT )( data | 0x0F ) ) ; /* assume lowest speed DRAM */
+ XGINew_SetReg1(pVBInfo->P3c4, 0x16, (unsigned short)(data | 0x0F)); /* assume lowest speed DRAM */
XGINew_SetDRAMModeRegister( pVBInfo ) ;
XGINew_DisableRefresh( HwDeviceExtension, pVBInfo ) ;
@@ -1485,11 +1438,11 @@ void XGINew_SetDRAMSize_310( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_
- XGINew_SetReg1( pVBInfo->P3c4 , 0x16 , pVBInfo->SR15[ 1 ][ XGINew_RAMType ] ) ; /* restore SR16 */
+ XGINew_SetReg1(pVBInfo->P3c4, 0x16, pVBInfo->SR15[1][XGINew_RAMType]); /* restore SR16 */
XGINew_EnableRefresh( HwDeviceExtension, pVBInfo ) ;
data=XGINew_GetReg1( pVBInfo->P3c4 ,0x21 ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x21 , ( USHORT )( data | 0x20 ) ) ; /* enable read cache */
+ XGINew_SetReg1(pVBInfo->P3c4, 0x21, (unsigned short)(data | 0x20)); /* enable read cache */
}
@@ -1501,14 +1454,14 @@ void XGINew_SetDRAMSize_310( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SetDRAMModeRegister340( PXGI_HW_DEVICE_INFO HwDeviceExtension )
+void XGINew_SetDRAMModeRegister340(struct xgi_hw_device_info *HwDeviceExtension)
{
- UCHAR data ;
- VB_DEVICE_INFO VBINF;
- PVB_DEVICE_INFO pVBInfo = &VBINF;
+ unsigned char data ;
+ struct vb_device_info VBINF;
+ struct vb_device_info *pVBInfo = &VBINF;
pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase ;
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress ;
- pVBInfo->BaseAddr = (ULONG)HwDeviceExtension->pjIOAddress ;
+ pVBInfo->BaseAddr = (unsigned long)HwDeviceExtension->pjIOAddress ;
pVBInfo->ISXPDOS = 0 ;
pVBInfo->P3c4 = pVBInfo->BaseAddr + 0x14 ;
@@ -1555,7 +1508,7 @@ void XGINew_SetDRAMModeRegister340( PXGI_HW_DEVICE_INFO HwDeviceExtension )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SetDRAMModeRegister( PVB_DEVICE_INFO pVBInfo)
+void XGINew_SetDRAMModeRegister(struct vb_device_info *pVBInfo)
{
if ( XGINew_Get310DRAMType( pVBInfo ) < 2 )
{
@@ -1575,9 +1528,9 @@ void XGINew_SetDRAMModeRegister( PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_DisableRefresh( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo)
+void XGINew_DisableRefresh(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT data ;
+ unsigned short data ;
data = XGINew_GetReg1( pVBInfo->P3c4 , 0x1B ) ;
@@ -1593,7 +1546,7 @@ void XGINew_DisableRefresh( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_I
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_EnableRefresh( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo)
+void XGINew_EnableRefresh(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
XGINew_SetReg1( pVBInfo->P3c4 , 0x1B , pVBInfo->SR15[ 3 ][ XGINew_RAMType ] ) ; /* SR1B */
@@ -1608,9 +1561,11 @@ void XGINew_EnableRefresh( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_IN
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_DisableChannelInterleaving( int index , USHORT XGINew_DDRDRAM_TYPE[][ 5 ] , PVB_DEVICE_INFO pVBInfo)
+void XGINew_DisableChannelInterleaving(int index,
+ unsigned short XGINew_DDRDRAM_TYPE[][5],
+ struct vb_device_info *pVBInfo)
{
- USHORT data ;
+ unsigned short data ;
data = XGINew_GetReg1( pVBInfo->P3c4 , 0x15 ) ;
data &= 0x1F ;
@@ -1642,9 +1597,11 @@ void XGINew_DisableChannelInterleaving( int index , USHORT XGINew_DDRDRAM_TYPE[]
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SetDRAMSizingType( int index , USHORT DRAMTYPE_TABLE[][ 5 ] ,PVB_DEVICE_INFO pVBInfo)
+void XGINew_SetDRAMSizingType(int index,
+ unsigned short DRAMTYPE_TABLE[][5],
+ struct vb_device_info *pVBInfo)
{
- USHORT data ;
+ unsigned short data;
data = DRAMTYPE_TABLE[ index ][ 4 ] ;
XGINew_SetRegANDOR( pVBInfo->P3c4 , 0x13 , 0x80 , data ) ;
@@ -1659,12 +1616,12 @@ void XGINew_SetDRAMSizingType( int index , USHORT DRAMTYPE_TABLE[][ 5 ] ,PVB_DEV
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_CheckBusWidth_310( PVB_DEVICE_INFO pVBInfo)
+void XGINew_CheckBusWidth_310(struct vb_device_info *pVBInfo)
{
- USHORT data ;
- PULONG volatile pVideoMemory ;
+ unsigned short data ;
+ volatile unsigned long *pVideoMemory ;
- pVideoMemory = (PULONG) pVBInfo->FBAddr;
+ pVideoMemory = (unsigned long *) pVBInfo->FBAddr;
if ( XGINew_Get310DRAMType( pVBInfo ) < 2 )
{
@@ -1690,7 +1647,7 @@ void XGINew_CheckBusWidth_310( PVB_DEVICE_INFO pVBInfo)
XGINew_DataBusWidth = 64 ;
XGINew_ChannelAB = 0 ;
data=XGINew_GetReg1( pVBInfo->P3c4 , 0x14 ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x14 , ( USHORT )( data & 0xFD ) ) ;
+ XGINew_SetReg1(pVBInfo->P3c4, 0x14, (unsigned short)(data & 0xFD));
}
if ( ( pVideoMemory[ 1 ] != 0x456789ABL ) || ( pVideoMemory[ 0 ] != 0x01234567L ) )
@@ -1699,7 +1656,8 @@ void XGINew_CheckBusWidth_310( PVB_DEVICE_INFO pVBInfo)
XGINew_DataBusWidth = 64 ;
XGINew_ChannelAB = 1 ;
data=XGINew_GetReg1( pVBInfo->P3c4 , 0x14 ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x14 , ( USHORT )( ( data & 0xFD ) | 0x01 ) ) ;
+ XGINew_SetReg1(pVBInfo->P3c4, 0x14,
+ (unsigned short)((data & 0xFD) | 0x01));
}
return ;
@@ -1792,9 +1750,13 @@ void XGINew_CheckBusWidth_310( PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-int XGINew_SetRank( int index , UCHAR RankNo , UCHAR XGINew_ChannelAB , USHORT DRAMTYPE_TABLE[][ 5 ] , PVB_DEVICE_INFO pVBInfo)
+int XGINew_SetRank(int index,
+ unsigned char RankNo,
+ unsigned char XGINew_ChannelAB,
+ unsigned short DRAMTYPE_TABLE[][5],
+ struct vb_device_info *pVBInfo)
{
- USHORT data ;
+ unsigned short data;
int RankSize ;
if ( ( RankNo == 2 ) && ( DRAMTYPE_TABLE[ index ][ 0 ] == 2 ) )
@@ -1829,9 +1791,13 @@ int XGINew_SetRank( int index , UCHAR RankNo , UCHAR XGINew_ChannelAB , USHORT D
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-int XGINew_SetDDRChannel( int index , UCHAR ChannelNo , UCHAR XGINew_ChannelAB , USHORT DRAMTYPE_TABLE[][ 5 ] , PVB_DEVICE_INFO pVBInfo)
+int XGINew_SetDDRChannel(int index,
+ unsigned char ChannelNo,
+ unsigned char XGINew_ChannelAB,
+ unsigned short DRAMTYPE_TABLE[][5],
+ struct vb_device_info *pVBInfo)
{
- USHORT data ;
+ unsigned short data;
int RankSize ;
RankSize = DRAMTYPE_TABLE[index][3]/2 * XGINew_DataBusWidth/32;
@@ -1865,30 +1831,29 @@ int XGINew_SetDDRChannel( int index , UCHAR ChannelNo , UCHAR XGINew_ChannelAB ,
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-int XGINew_CheckColumn( int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB_DEVICE_INFO pVBInfo)
+int XGINew_CheckColumn(int index,
+ unsigned short DRAMTYPE_TABLE[][5],
+ struct vb_device_info *pVBInfo)
{
int i ;
- ULONG Increment , Position ;
+ unsigned long Increment , Position ;
/* Increment = 1 << ( DRAMTYPE_TABLE[ index ][ 2 ] + XGINew_DataBusWidth / 64 + 1 ) ; */
Increment = 1 << ( 10 + XGINew_DataBusWidth / 64 ) ;
for( i = 0 , Position = 0 ; i < 2 ; i++ )
{
- *( ( PULONG )( pVBInfo->FBAddr + Position ) ) = Position ;
- Position += Increment ;
+ *((unsigned long *)(pVBInfo->FBAddr + Position)) = Position;
+ Position += Increment ;
}
-#ifdef WIN2000 /* chiawen for linux solution */
- DelayUS( 100 ) ;
-#endif
for( i = 0 , Position = 0 ; i < 2 ; i++ )
{
/* if ( pVBInfo->FBAddr[ Position ] != Position ) */
- if ( ( *( PULONG )( pVBInfo->FBAddr + Position ) ) != Position )
- return( 0 ) ;
- Position += Increment ;
+ if ((*(unsigned long *)(pVBInfo->FBAddr + Position)) != Position)
+ return 0;
+ Position += Increment;
}
return( 1 ) ;
}
@@ -1900,26 +1865,28 @@ int XGINew_CheckColumn( int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB_DEVICE_INF
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-int XGINew_CheckBanks( int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB_DEVICE_INFO pVBInfo)
+int XGINew_CheckBanks(int index,
+ unsigned short DRAMTYPE_TABLE[][5],
+ struct vb_device_info *pVBInfo)
{
int i ;
- ULONG Increment , Position ;
+ unsigned long Increment , Position ;
Increment = 1 << ( DRAMTYPE_TABLE[ index ][ 2 ] + XGINew_DataBusWidth / 64 + 2 ) ;
for( i = 0 , Position = 0 ; i < 4 ; i++ )
{
/* pVBInfo->FBAddr[ Position ] = Position ; */
- *( ( PULONG )( pVBInfo->FBAddr + Position ) ) = Position ;
- Position += Increment ;
+ *((unsigned long *)(pVBInfo->FBAddr + Position)) = Position;
+ Position += Increment ;
}
for( i = 0 , Position = 0 ; i < 4 ; i++ )
{
/* if (pVBInfo->FBAddr[ Position ] != Position ) */
- if ( ( *( PULONG )( pVBInfo->FBAddr + Position ) ) != Position )
- return( 0 ) ;
- Position += Increment ;
+ if ((*(unsigned long *)(pVBInfo->FBAddr + Position)) != Position)
+ return 0;
+ Position += Increment;
}
return( 1 ) ;
}
@@ -1931,10 +1898,12 @@ int XGINew_CheckBanks( int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB_DEVICE_INFO
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-int XGINew_CheckRank( int RankNo , int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB_DEVICE_INFO pVBInfo)
+int XGINew_CheckRank(int RankNo, int index,
+ unsigned short DRAMTYPE_TABLE[][5],
+ struct vb_device_info *pVBInfo)
{
int i ;
- ULONG Increment , Position ;
+ unsigned long Increment , Position ;
Increment = 1 << ( DRAMTYPE_TABLE[ index ][ 2 ] + DRAMTYPE_TABLE[ index ][ 1 ] +
DRAMTYPE_TABLE[ index ][ 0 ] + XGINew_DataBusWidth / 64 + RankNo ) ;
@@ -1942,18 +1911,18 @@ int XGINew_CheckRank( int RankNo , int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB
for( i = 0 , Position = 0 ; i < 2 ; i++ )
{
/* pVBInfo->FBAddr[ Position ] = Position ; */
- /* *( ( PULONG )( pVBInfo->FBAddr ) ) = Position ; */
- *( ( PULONG )( pVBInfo->FBAddr + Position ) ) = Position ;
- Position += Increment ;
+ /* *( (unsigned long *)( pVBInfo->FBAddr ) ) = Position ; */
+ *((unsigned long *)(pVBInfo->FBAddr + Position)) = Position;
+ Position += Increment;
}
for( i = 0 , Position = 0 ; i < 2 ; i++ )
{
/* if ( pVBInfo->FBAddr[ Position ] != Position ) */
- /* if ( ( *( PULONG )( pVBInfo->FBAddr ) ) != Position ) */
- if ( ( *( PULONG )( pVBInfo->FBAddr + Position ) ) != Position )
- return( 0 ) ;
- Position += Increment ;
+ /* if ( ( *(unsigned long *)( pVBInfo->FBAddr ) ) != Position ) */
+ if ((*(unsigned long *)(pVBInfo->FBAddr + Position)) != Position)
+ return 0;
+ Position += Increment;
}
return( 1 );
}
@@ -1965,10 +1934,12 @@ int XGINew_CheckRank( int RankNo , int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-int XGINew_CheckDDRRank( int RankNo , int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB_DEVICE_INFO pVBInfo)
+int XGINew_CheckDDRRank(int RankNo, int index,
+ unsigned short DRAMTYPE_TABLE[][5],
+ struct vb_device_info *pVBInfo)
{
- ULONG Increment , Position ;
- USHORT data ;
+ unsigned long Increment , Position ;
+ unsigned short data ;
Increment = 1 << ( DRAMTYPE_TABLE[ index ][ 2 ] + DRAMTYPE_TABLE[ index ][ 1 ] +
DRAMTYPE_TABLE[ index ][ 0 ] + XGINew_DataBusWidth / 64 + RankNo ) ;
@@ -1976,18 +1947,18 @@ int XGINew_CheckDDRRank( int RankNo , int index , USHORT DRAMTYPE_TABLE[][ 5 ],
Increment += Increment / 2 ;
Position = 0;
- *( ( PULONG )( pVBInfo->FBAddr + Position + 0 ) ) = 0x01234567 ;
- *( ( PULONG )( pVBInfo->FBAddr + Position + 1 ) ) = 0x456789AB ;
- *( ( PULONG )( pVBInfo->FBAddr + Position + 2 ) ) = 0x55555555 ;
- *( ( PULONG )( pVBInfo->FBAddr + Position + 3 ) ) = 0x55555555 ;
- *( ( PULONG )( pVBInfo->FBAddr + Position + 4 ) ) = 0xAAAAAAAA ;
- *( ( PULONG )( pVBInfo->FBAddr + Position + 5 ) ) = 0xAAAAAAAA ;
-
- if ( ( *( PULONG )( pVBInfo->FBAddr + 1 ) ) == 0x456789AB )
- return( 1 ) ;
+ *((unsigned long *)(pVBInfo->FBAddr + Position + 0)) = 0x01234567;
+ *((unsigned long *)(pVBInfo->FBAddr + Position + 1)) = 0x456789AB;
+ *((unsigned long *)(pVBInfo->FBAddr + Position + 2)) = 0x55555555;
+ *((unsigned long *)(pVBInfo->FBAddr + Position + 3)) = 0x55555555;
+ *((unsigned long *)(pVBInfo->FBAddr + Position + 4)) = 0xAAAAAAAA;
+ *((unsigned long *)(pVBInfo->FBAddr + Position + 5)) = 0xAAAAAAAA;
- if ( ( *( PULONG )( pVBInfo->FBAddr + 0 ) ) == 0x01234567 )
- return( 0 ) ;
+ if ((*(unsigned long *)(pVBInfo->FBAddr + 1)) == 0x456789AB)
+ return 1;
+
+ if ((*(unsigned long *)(pVBInfo->FBAddr + 0)) == 0x01234567)
+ return 0;
data = XGINew_GetReg1( pVBInfo->P3c4 , 0x14 ) ;
data &= 0xF3 ;
@@ -2007,7 +1978,9 @@ int XGINew_CheckDDRRank( int RankNo , int index , USHORT DRAMTYPE_TABLE[][ 5 ],
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-int XGINew_CheckRanks( int RankNo , int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB_DEVICE_INFO pVBInfo)
+int XGINew_CheckRanks(int RankNo, int index,
+ unsigned short DRAMTYPE_TABLE[][5],
+ struct vb_device_info *pVBInfo)
{
int r ;
@@ -2033,7 +2006,9 @@ int XGINew_CheckRanks( int RankNo , int index , USHORT DRAMTYPE_TABLE[][ 5 ], PV
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-int XGINew_CheckDDRRanks( int RankNo , int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB_DEVICE_INFO pVBInfo)
+int XGINew_CheckDDRRanks(int RankNo, int index,
+ unsigned short DRAMTYPE_TABLE[][5],
+ struct vb_device_info *pVBInfo)
{
int r ;
@@ -2059,10 +2034,10 @@ int XGINew_CheckDDRRanks( int RankNo , int index , USHORT DRAMTYPE_TABLE[][ 5 ],
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-int XGINew_SDRSizing(PVB_DEVICE_INFO pVBInfo)
+int XGINew_SDRSizing(struct vb_device_info *pVBInfo)
{
int i ;
- UCHAR j ;
+ unsigned char j ;
for( i = 0 ; i < 13 ; i++ )
{
@@ -2070,7 +2045,8 @@ int XGINew_SDRSizing(PVB_DEVICE_INFO pVBInfo)
for( j = 2 ; j > 0 ; j-- )
{
- if ( !XGINew_SetRank( i , ( UCHAR )j , XGINew_ChannelAB , XGINew_SDRDRAM_TYPE , pVBInfo) )
+ if (!XGINew_SetRank(i, (unsigned char)j, XGINew_ChannelAB,
+ XGINew_SDRDRAM_TYPE, pVBInfo))
continue ;
else
{
@@ -2089,11 +2065,13 @@ int XGINew_SDRSizing(PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-USHORT XGINew_SetDRAMSizeReg( int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB_DEVICE_INFO pVBInfo)
+unsigned short XGINew_SetDRAMSizeReg(int index,
+ unsigned short DRAMTYPE_TABLE[][5],
+ struct vb_device_info *pVBInfo)
{
- USHORT data = 0 , memsize = 0 ;
+ unsigned short data = 0 , memsize = 0;
int RankSize ;
- UCHAR ChannelNo ;
+ unsigned char ChannelNo ;
RankSize = DRAMTYPE_TABLE[ index ][ 3 ] * XGINew_DataBusWidth / 32 ;
data = XGINew_GetReg1( pVBInfo->P3c4 , 0x13 ) ;
@@ -2138,11 +2116,13 @@ USHORT XGINew_SetDRAMSizeReg( int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB_DEVI
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-USHORT XGINew_SetDRAMSize20Reg( int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB_DEVICE_INFO pVBInfo)
+unsigned short XGINew_SetDRAMSize20Reg(int index,
+ unsigned short DRAMTYPE_TABLE[][5],
+ struct vb_device_info *pVBInfo)
{
- USHORT data = 0 , memsize = 0 ;
+ unsigned short data = 0 , memsize = 0;
int RankSize ;
- UCHAR ChannelNo ;
+ unsigned char ChannelNo ;
RankSize = DRAMTYPE_TABLE[ index ][ 3 ] * XGINew_DataBusWidth / 8 ;
data = XGINew_GetReg1( pVBInfo->P3c4 , 0x13 ) ;
@@ -2188,31 +2168,32 @@ USHORT XGINew_SetDRAMSize20Reg( int index , USHORT DRAMTYPE_TABLE[][ 5 ], PVB_DE
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-int XGINew_ReadWriteRest( USHORT StopAddr , USHORT StartAddr, PVB_DEVICE_INFO pVBInfo)
+int XGINew_ReadWriteRest(unsigned short StopAddr, unsigned short StartAddr,
+ struct vb_device_info *pVBInfo)
{
int i ;
- ULONG Position = 0 ;
+ unsigned long Position = 0 ;
- *( ( PULONG )( pVBInfo->FBAddr + Position ) ) = Position ;
+ *((unsigned long *)(pVBInfo->FBAddr + Position)) = Position;
for( i = StartAddr ; i <= StopAddr ; i++ )
{
Position = 1 << i ;
- *( ( PULONG )( pVBInfo->FBAddr + Position ) ) = Position ;
+ *((unsigned long *)(pVBInfo->FBAddr + Position)) = Position;
}
DelayUS( 500 ) ; /* [Vicent] 2004/04/16. Fix #1759 Memory Size error in Multi-Adapter. */
Position = 0 ;
- if ( ( *( PULONG )( pVBInfo->FBAddr + Position ) ) != Position )
- return( 0 ) ;
+ if ((*(unsigned long *)(pVBInfo->FBAddr + Position)) != Position)
+ return 0;
for( i = StartAddr ; i <= StopAddr ; i++ )
{
Position = 1 << i ;
- if ( ( *( PULONG )( pVBInfo->FBAddr + Position ) ) != Position )
- return( 0 ) ;
+ if ((*(unsigned long *)(pVBInfo->FBAddr + Position)) != Position)
+ return 0;
}
return( 1 ) ;
}
@@ -2224,9 +2205,9 @@ int XGINew_ReadWriteRest( USHORT StopAddr , USHORT StartAddr, PVB_DEVICE_INFO pV
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-UCHAR XGINew_CheckFrequence( PVB_DEVICE_INFO pVBInfo )
+unsigned char XGINew_CheckFrequence(struct vb_device_info *pVBInfo)
{
- UCHAR data ;
+ unsigned char data ;
data = XGINew_GetReg1( pVBInfo->P3d4 , 0x97 ) ;
@@ -2247,9 +2228,9 @@ UCHAR XGINew_CheckFrequence( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_CheckChannel( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo)
+void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- UCHAR data;
+ unsigned char data;
switch( HwDeviceExtension->jChipType )
{
@@ -2528,10 +2509,10 @@ void XGINew_CheckChannel( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-int XGINew_DDRSizing340( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo)
+int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
int i ;
- USHORT memsize , addr ;
+ unsigned short memsize , addr ;
XGINew_SetReg1( pVBInfo->P3c4 , 0x15 , 0x00 ) ; /* noninterleaving */
XGINew_SetReg1( pVBInfo->P3c4 , 0x1C , 0x00 ) ; /* nontiling */
@@ -2548,7 +2529,7 @@ int XGINew_DDRSizing340( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO
continue ;
addr = memsize + ( XGINew_ChannelAB - 2 ) + 20 ;
- if ( ( HwDeviceExtension->ulVideoMemorySize - 1 ) < ( ULONG )( 1 << addr ) )
+ if ((HwDeviceExtension->ulVideoMemorySize - 1) < (unsigned long)(1 << addr))
continue ;
if ( XGINew_ReadWriteRest( addr , 5, pVBInfo ) == 1 )
@@ -2566,7 +2547,7 @@ int XGINew_DDRSizing340( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO
continue ;
addr = memsize + ( XGINew_ChannelAB - 2 ) + 20 ;
- if ( ( HwDeviceExtension->ulVideoMemorySize - 1 ) < ( ULONG )( 1 << addr ) )
+ if ((HwDeviceExtension->ulVideoMemorySize - 1) < (unsigned long)(1 << addr))
continue ;
if ( XGINew_ReadWriteRest( addr , 9, pVBInfo ) == 1 )
@@ -2583,10 +2564,10 @@ int XGINew_DDRSizing340( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-int XGINew_DDRSizing(PVB_DEVICE_INFO pVBInfo)
+int XGINew_DDRSizing(struct vb_device_info *pVBInfo)
{
int i ;
- UCHAR j ;
+ unsigned char j ;
for( i = 0 ; i < 4 ; i++ )
{
@@ -2595,7 +2576,8 @@ int XGINew_DDRSizing(PVB_DEVICE_INFO pVBInfo)
for( j = 2 ; j > 0 ; j-- )
{
XGINew_SetDDRChannel( i , j , XGINew_ChannelAB , XGINew_DDRDRAM_TYPE , pVBInfo ) ;
- if ( !XGINew_SetRank( i , ( UCHAR )j , XGINew_ChannelAB , XGINew_DDRDRAM_TYPE, pVBInfo ) )
+ if (!XGINew_SetRank(i, (unsigned char)j, XGINew_ChannelAB,
+ XGINew_DDRDRAM_TYPE, pVBInfo))
continue ;
else
{
@@ -2613,7 +2595,7 @@ int XGINew_DDRSizing(PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SetMemoryClock( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo)
+void XGINew_SetMemoryClock(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
@@ -2634,9 +2616,7 @@ void XGINew_SetMemoryClock( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_IN
if ( ( pVBInfo->MCLKData[ XGINew_RAMType ].SR28 == 0x1C ) && ( pVBInfo->MCLKData[ XGINew_RAMType ].SR29 == 0x01 )
&& ( ( ( pVBInfo->ECLKData[ XGINew_RAMType ].SR2E == 0x1C ) && ( pVBInfo->ECLKData[ XGINew_RAMType ].SR2F == 0x01 ) )
|| ( ( pVBInfo->ECLKData[ XGINew_RAMType ].SR2E == 0x22 ) && ( pVBInfo->ECLKData[ XGINew_RAMType ].SR2F == 0x01 ) ) ) )
- {
- XGINew_SetReg1( pVBInfo->P3c4 , 0x32 , ( ( UCHAR )XGINew_GetReg1( pVBInfo->P3c4 , 0x32 ) & 0xFC ) | 0x02 ) ;
- }
+ XGINew_SetReg1(pVBInfo->P3c4, 0x32, ((unsigned char)XGINew_GetReg1(pVBInfo->P3c4, 0x32) & 0xFC) | 0x02);
}
}
@@ -2647,12 +2627,12 @@ void XGINew_SetMemoryClock( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_IN
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN ChkLFB( PVB_DEVICE_INFO pVBInfo )
+unsigned char ChkLFB(struct vb_device_info *pVBInfo)
{
- if ( LFBDRAMTrap & XGINew_GetReg1( pVBInfo->P3d4 , 0x78 ) )
- return( TRUE ) ;
- else
- return( FALSE );
+ if (LFBDRAMTrap & XGINew_GetReg1(pVBInfo->P3d4 , 0x78))
+ return 1;
+ else
+ return 0;
}
@@ -2664,17 +2644,18 @@ BOOLEAN ChkLFB( PVB_DEVICE_INFO pVBInfo )
/* in second chip, assume CR A1 D[6]="1" in this case */
/* output : none */
/* --------------------------------------------------------------------- */
-void SetPowerConsume ( PXGI_HW_DEVICE_INFO HwDeviceExtension , ULONG XGI_P3d4Port )
+void SetPowerConsume(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned long XGI_P3d4Port)
{
- ULONG lTemp ;
- UCHAR bTemp;
+ unsigned long lTemp ;
+ unsigned char bTemp;
HwDeviceExtension->pQueryVGAConfigSpace( HwDeviceExtension , 0x08 , 0 , &lTemp ) ; /* Get */
if ((lTemp&0xFF)==0)
{
/* set CR58 D[5]=0 D[3]=0 */
XGINew_SetRegAND( XGI_P3d4Port , 0x58 , 0xD7 ) ;
- bTemp = (UCHAR) XGINew_GetReg1( XGI_P3d4Port , 0xCB ) ;
+ bTemp = (unsigned char) XGINew_GetReg1(XGI_P3d4Port, 0xCB);
if (bTemp&0x20)
{
if (!(bTemp&0x10))
@@ -2692,15 +2673,13 @@ void SetPowerConsume ( PXGI_HW_DEVICE_INFO HwDeviceExtension , ULONG XGI_P3d4Por
}
-
-#if defined(LINUX_XF86)||defined(LINUX_KERNEL)
-void XGINew_InitVBIOSData(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo)
+void XGINew_InitVBIOSData(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- /* ULONG ROMAddr = (ULONG)HwDeviceExtension->pjVirtualRomBase; */
+ /* unsigned long ROMAddr = (unsigned long)HwDeviceExtension->pjVirtualRomBase; */
pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase ;
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress ;
- pVBInfo->BaseAddr = (ULONG)HwDeviceExtension->pjIOAddress ;
+ pVBInfo->BaseAddr = (unsigned long)HwDeviceExtension->pjIOAddress ;
pVBInfo->ISXPDOS = 0 ;
pVBInfo->P3c4 = pVBInfo->BaseAddr + 0x14 ;
@@ -2736,7 +2715,6 @@ void XGINew_InitVBIOSData(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO
}
}
-#endif /* For Linux */
/* --------------------------------------------------------------------- */
/* Function : ReadVBIOSTablData */
@@ -2744,200 +2722,11 @@ void XGINew_InitVBIOSData(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void ReadVBIOSTablData( UCHAR ChipType , PVB_DEVICE_INFO pVBInfo)
+void ReadVBIOSTablData(unsigned char ChipType, struct vb_device_info *pVBInfo)
{
- PUCHAR volatile pVideoMemory = ( PUCHAR )pVBInfo->ROMAddr ;
- ULONG i ;
- UCHAR j , k ;
-#if 0
- ULONG ii , jj ;
- i = pVideoMemory[ 0x1CF ] | ( pVideoMemory[ 0x1D0 ] << 8 ) ; /* UniROM */
- if ( i != 0 )
- UNIROM = 1 ;
-
- ii = 0x90 ;
- for( jj = 0x00 ; jj < 0x08 ; jj++ )
- {
- pVBInfo->MCLKData[ jj ].SR28 = pVideoMemory[ ii ] ;
- pVBInfo->MCLKData[ jj ].SR29 = pVideoMemory[ ii + 1] ;
- pVBInfo->MCLKData[ jj ].SR2A = pVideoMemory[ ii + 2] ;
- pVBInfo->MCLKData[ jj ].CLOCK = pVideoMemory[ ii + 3 ] | ( pVideoMemory[ ii + 4 ] << 8 ) ;
- ii += 0x05 ;
- }
-
- ii = 0xB8 ;
- for( jj = 0x00 ; jj < 0x08 ; jj++ )
- {
- pVBInfo->ECLKData[ jj ].SR2E = pVideoMemory[ ii ] ;
- pVBInfo->ECLKData[ jj ].SR2F=pVideoMemory[ ii + 1 ] ;
- pVBInfo->ECLKData[ jj ].SR30= pVideoMemory[ ii + 2 ] ;
- pVBInfo->ECLKData[ jj ].CLOCK= pVideoMemory[ ii + 3 ] | ( pVideoMemory[ ii + 4 ] << 8 ) ;
- ii += 0x05 ;
- }
-
- /* Volari customize data area start */
- /* if ( ChipType == XG40 ) */
- if ( ChipType >= XG40 )
- {
- ii = 0xE0 ;
- for( jj = 0x00 ; jj < 0x03 ; jj++ )
- {
- pVBInfo->SR15[ jj ][ 0 ] = pVideoMemory[ ii ] ; /* SR13, SR14, and SR18 */
- pVBInfo->SR15[ jj ][ 1 ] = pVideoMemory[ ii + 1 ] ;
- pVBInfo->SR15[ jj ][ 2 ] = pVideoMemory[ ii + 2 ] ;
- pVBInfo->SR15[ jj ][ 3 ] = pVideoMemory[ ii + 3 ] ;
- pVBInfo->SR15[ jj ][ 4 ] = pVideoMemory[ ii + 4 ] ;
- pVBInfo->SR15[ jj ][ 5 ] = pVideoMemory[ ii + 5 ] ;
- pVBInfo->SR15[ jj ][ 6 ] = pVideoMemory[ ii + 6 ] ;
- pVBInfo->SR15[ jj ][ 7 ] = pVideoMemory[ ii + 7 ] ;
- ii += 0x08 ;
- }
- ii = 0x110 ;
- jj = 0x03 ;
- pVBInfo->SR15[ jj ][ 0 ] = pVideoMemory[ ii ] ; /* SR1B */
- pVBInfo->SR15[ jj ][ 1 ] = pVideoMemory[ ii + 1 ] ;
- pVBInfo->SR15[ jj ][ 2 ] = pVideoMemory[ ii + 2 ] ;
- pVBInfo->SR15[ jj ][ 3 ] = pVideoMemory[ ii + 3 ] ;
- pVBInfo->SR15[ jj ][ 4 ] = pVideoMemory[ ii + 4 ] ;
- pVBInfo->SR15[ jj ][ 5 ] = pVideoMemory[ ii + 5 ] ;
- pVBInfo->SR15[ jj ][ 6 ] = pVideoMemory[ ii + 6 ] ;
- pVBInfo->SR15[ jj ][ 7 ] = pVideoMemory[ ii + 7 ] ;
-
- *pVBInfo->pSR07 = pVideoMemory[ 0x74 ] ;
- *pVBInfo->pSR1F = pVideoMemory[ 0x75 ] ;
- *pVBInfo->pSR21 = pVideoMemory[ 0x76 ] ;
- *pVBInfo->pSR22 = pVideoMemory[ 0x77 ] ;
- *pVBInfo->pSR23 = pVideoMemory[ 0x78 ] ;
- *pVBInfo->pSR24 = pVideoMemory[ 0x79 ] ;
- pVBInfo->SR25[ 0 ] = pVideoMemory[ 0x7A ] ;
- *pVBInfo->pSR31 = pVideoMemory[ 0x7B ] ;
- *pVBInfo->pSR32 = pVideoMemory[ 0x7C ] ;
- *pVBInfo->pSR33 = pVideoMemory[ 0x7D ] ;
- ii = 0xF8 ;
-
- for( jj = 0 ; jj < 3 ; jj++ )
- {
- pVBInfo->CR40[ jj ][ 0 ] = pVideoMemory[ ii ] ;
- pVBInfo->CR40[ jj ][ 1 ] = pVideoMemory[ ii + 1 ] ;
- pVBInfo->CR40[ jj ][ 2 ] = pVideoMemory[ ii + 2 ] ;
- pVBInfo->CR40[ jj ][ 3 ] = pVideoMemory[ ii + 3 ] ;
- pVBInfo->CR40[ jj ][ 4 ] = pVideoMemory[ ii + 4 ] ;
- pVBInfo->CR40[ jj ][ 5 ] = pVideoMemory[ ii + 5 ] ;
- pVBInfo->CR40[ jj ][ 6 ] = pVideoMemory[ ii + 6 ] ;
- pVBInfo->CR40[ jj ][ 7 ] = pVideoMemory[ ii + 7 ] ;
- ii += 0x08 ;
- }
-
- ii = 0x118 ;
- for( j = 3 ; j < 24 ; j++ )
- {
- pVBInfo->CR40[ j ][ 0 ] = pVideoMemory[ ii ] ;
- pVBInfo->CR40[ j ][ 1 ] = pVideoMemory[ ii + 1 ] ;
- pVBInfo->CR40[ j ][ 2 ] = pVideoMemory[ ii + 2 ] ;
- pVBInfo->CR40[ j ][ 3 ] = pVideoMemory[ ii + 3 ] ;
- pVBInfo->CR40[ j ][ 4 ] = pVideoMemory[ ii + 4 ] ;
- pVBInfo->CR40[ j ][ 5 ] = pVideoMemory[ ii + 5 ] ;
- pVBInfo->CR40[ j ][ 6 ] = pVideoMemory[ ii + 6 ] ;
- pVBInfo->CR40[ j ][ 7 ] = pVideoMemory[ ii + 7 ] ;
- ii += 0x08 ;
- }
-
- i = pVideoMemory[ 0x1C0 ] | ( pVideoMemory[ 0x1C1 ] << 8 ) ;
-
- for( j = 0 ; j < 8 ; j++ )
- {
- for( k = 0 ; k < 4 ; k++ )
- pVBInfo->CR6B[ j ][ k ] = pVideoMemory[ i + 4 * j + k ] ;
- }
-
- i = pVideoMemory[ 0x1C2 ] | ( pVideoMemory[ 0x1C3 ] << 8 ) ;
-
- for( j = 0 ; j < 8 ; j++ )
- {
- for( k = 0 ; k < 4 ; k++ )
- pVBInfo->CR6E[ j ][ k ] = pVideoMemory[ i + 4 * j + k ] ;
- }
-
- i = pVideoMemory[ 0x1C4 ] | ( pVideoMemory[ 0x1C5 ] << 8 ) ;
- for( j = 0 ; j < 8 ; j++ )
- {
- for( k = 0 ; k < 32 ; k++ )
- pVBInfo->CR6F[ j ][ k ] = pVideoMemory[ i + 32 * j + k ] ;
- }
-
- i = pVideoMemory[ 0x1C6 ] | ( pVideoMemory[ 0x1C7 ] << 8 ) ;
-
- for( j = 0 ; j < 8 ; j++ )
- {
- for( k = 0 ; k < 2 ; k++ )
- pVBInfo->CR89[ j ][ k ] = pVideoMemory[ i + 2 * j + k ] ;
- }
-
- i = pVideoMemory[ 0x1C8 ] | ( pVideoMemory[ 0x1C9 ] << 8 ) ;
- for( j = 0 ; j < 12 ; j++ )
- pVBInfo->AGPReg[ j ] = pVideoMemory[ i + j ] ;
-
- i = pVideoMemory[ 0x1CF ] | ( pVideoMemory[ 0x1D0 ] << 8 ) ;
- for( j = 0 ; j < 4 ; j++ )
- pVBInfo->SR16[ j ] = pVideoMemory[ i + j ] ;
-
- if ( ChipType == XG21 )
- {
- if (pVideoMemory[ 0x67 ] & 0x80)
- {
- *pVBInfo->pDVOSetting = pVideoMemory[ 0x67 ];
- }
- if ( (pVideoMemory[ 0x67 ] & 0xC0) == 0xC0 )
- {
- *pVBInfo->pCR2E = pVideoMemory[ i + 4 ] ;
- *pVBInfo->pCR2F = pVideoMemory[ i + 5 ] ;
- *pVBInfo->pCR46 = pVideoMemory[ i + 6 ] ;
- *pVBInfo->pCR47 = pVideoMemory[ i + 7 ] ;
- }
- }
-
- if ( ChipType == XG27 )
- {
- jj = i+j;
- for( i = 0 ; i <= 0xB ; i++,jj++ )
- pVBInfo->pCRD0[i] = pVideoMemory[ jj ] ;
- for( i = 0x0 ; i <= 0x1 ; i++,jj++ )
- pVBInfo->pCRDE[i] = pVideoMemory[ jj ] ;
-
- *pVBInfo->pSR40 = pVideoMemory[ jj ] ;
- jj++;
- *pVBInfo->pSR41 = pVideoMemory[ jj ] ;
-
- if (pVideoMemory[ 0x67 ] & 0x80)
- {
- *pVBInfo->pDVOSetting = pVideoMemory[ 0x67 ];
- }
- if ( (pVideoMemory[ 0x67 ] & 0xC0) == 0xC0 )
- {
- jj++;
- *pVBInfo->pCR2E = pVideoMemory[ jj ] ;
- *pVBInfo->pCR2F = pVideoMemory[ jj + 1 ] ;
- *pVBInfo->pCR46 = pVideoMemory[ jj + 2 ] ;
- *pVBInfo->pCR47 = pVideoMemory[ jj + 3 ] ;
- }
-
- }
-
- *pVBInfo->pCRCF = pVideoMemory[ 0x1CA ] ;
- *pVBInfo->pXGINew_DRAMTypeDefinition = pVideoMemory[ 0x1CB ] ;
- *pVBInfo->pXGINew_I2CDefinition = pVideoMemory[ 0x1D1 ] ;
- if ( ChipType >= XG20 )
- {
- *pVBInfo->pXGINew_CR97 = pVideoMemory[ 0x1D2 ] ;
- if ( ChipType == XG27 )
- {
- *pVBInfo->pSR36 = pVideoMemory[ 0x1D3 ] ;
- *pVBInfo->pCR8F = pVideoMemory[ 0x1D5 ] ;
- }
- }
-
- }
-#endif
+ volatile unsigned char *pVideoMemory = (unsigned char *)pVBInfo->ROMAddr;
+ unsigned long i ;
+ unsigned char j, k ;
/* Volari customize data area end */
if ( ChipType == XG21 )
@@ -2972,7 +2761,8 @@ void ReadVBIOSTablData( UCHAR ChipType , PVB_DEVICE_INFO pVBInfo)
i += 25;
j--;
k++;
- } while ( (j>0) && ( k < (sizeof(XGI21_LCDCapList)/sizeof(XGI21_LVDSCapStruct)) ) );
+ } while ((j > 0) &&
+ (k < (sizeof(XGI21_LCDCapList)/sizeof(struct XGI21_LVDSCapStruct))));
}
else
{
@@ -3003,7 +2793,7 @@ void ReadVBIOSTablData( UCHAR ChipType , PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_DDR1x_MRS_XG20( ULONG P3c4 , PVB_DEVICE_INFO pVBInfo)
+void XGINew_DDR1x_MRS_XG20(unsigned long P3c4, struct vb_device_info *pVBInfo)
{
XGINew_SetReg1( P3c4 , 0x18 , 0x01 ) ;
@@ -3039,13 +2829,13 @@ void XGINew_DDR1x_MRS_XG20( ULONG P3c4 , PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SetDRAMModeRegister_XG20( PXGI_HW_DEVICE_INFO HwDeviceExtension )
+void XGINew_SetDRAMModeRegister_XG20(struct xgi_hw_device_info *HwDeviceExtension)
{
- VB_DEVICE_INFO VBINF;
- PVB_DEVICE_INFO pVBInfo = &VBINF;
+ struct vb_device_info VBINF;
+ struct vb_device_info *pVBInfo = &VBINF;
pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase ;
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress ;
- pVBInfo->BaseAddr = (ULONG)HwDeviceExtension->pjIOAddress ;
+ pVBInfo->BaseAddr = (unsigned long)HwDeviceExtension->pjIOAddress ;
pVBInfo->ISXPDOS = 0 ;
pVBInfo->P3c4 = pVBInfo->BaseAddr + 0x14 ;
@@ -3078,13 +2868,13 @@ void XGINew_SetDRAMModeRegister_XG20( PXGI_HW_DEVICE_INFO HwDeviceExtension )
XGINew_SetReg1( pVBInfo->P3c4 , 0x1B , 0x03 ) ;
}
-void XGINew_SetDRAMModeRegister_XG27( PXGI_HW_DEVICE_INFO HwDeviceExtension )
+void XGINew_SetDRAMModeRegister_XG27(struct xgi_hw_device_info *HwDeviceExtension)
{
- VB_DEVICE_INFO VBINF;
- PVB_DEVICE_INFO pVBInfo = &VBINF;
+ struct vb_device_info VBINF;
+ struct vb_device_info *pVBInfo = &VBINF;
pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase ;
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress ;
- pVBInfo->BaseAddr = (ULONG)HwDeviceExtension->pjIOAddress ;
+ pVBInfo->BaseAddr = (unsigned long)HwDeviceExtension->pjIOAddress ;
pVBInfo->ISXPDOS = 0 ;
pVBInfo->P3c4 = pVBInfo->BaseAddr + 0x14 ;
@@ -3120,13 +2910,12 @@ void XGINew_SetDRAMModeRegister_XG27( PXGI_HW_DEVICE_INFO HwDeviceExtension )
}
/*
-void XGINew_SetDRAMModeRegister_XG27( PXGI_HW_DEVICE_INFO HwDeviceExtension )
+void XGINew_SetDRAMModeRegister_XG27(struct xgi_hw_device_info *HwDeviceExtension)
{
-#ifndef LINUX_XF86
- UCHAR data ;
-#endif
- VB_DEVICE_INFO VBINF;
- PVB_DEVICE_INFO pVBInfo = &VBINF;
+
+ unsigned char data ;
+ struct vb_device_info VBINF;
+ struct vb_device_info *pVBInfo = &VBINF;
pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase ;
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress ;
pVBInfo->BaseAddr = HwDeviceExtension->pjIOAddress ;
@@ -3168,9 +2957,9 @@ void XGINew_SetDRAMModeRegister_XG27( PXGI_HW_DEVICE_INFO HwDeviceExtension )
/* Output : */
/* Description : */
/* -------------------------------------------------------- */
-void XGINew_ChkSenseStatus ( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo)
+void XGINew_ChkSenseStatus(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT tempbx=0 , temp , tempcx , CR3CData;
+ unsigned short tempbx = 0, temp, tempcx, CR3CData;
temp = XGINew_GetReg1( pVBInfo->P3d4 , 0x32 ) ;
@@ -3229,9 +3018,9 @@ void XGINew_ChkSenseStatus ( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_
/* Output : */
/* Description : */
/* -------------------------------------------------------- */
-void XGINew_SetModeScratch ( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo )
+void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT temp , tempcl = 0 , tempch = 0 , CR31Data , CR38Data;
+ unsigned short temp , tempcl = 0 , tempch = 0 , CR31Data , CR38Data;
temp = XGINew_GetReg1( pVBInfo->P3d4 , 0x3d ) ;
temp |= XGINew_GetReg1( pVBInfo->P3d4 , 0x3e ) << 8 ;
@@ -3326,23 +3115,13 @@ void XGINew_SetModeScratch ( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_
/* Output : */
/* Description : */
/* -------------------------------------------------------- */
-void XGINew_GetXG21Sense(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo)
+void XGINew_GetXG21Sense(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- UCHAR Temp;
- PUCHAR volatile pVideoMemory = ( PUCHAR )pVBInfo->ROMAddr ;
+ unsigned char Temp;
+ volatile unsigned char *pVideoMemory = (unsigned char *)pVBInfo->ROMAddr;
pVBInfo->IF_DEF_LVDS = 0 ;
-#ifdef WIN2000
- pVBInfo->IF_DEF_CH7007 = 0 ;
- if ( ( pVideoMemory[ 0x65 ] & 0x02 ) ) /* For XG21 CH7007 */
- {
- /* VideoDebugPrint((0, "ReadVBIOSTablData: pVideoMemory[ 0x65 ] =%x\n",pVideoMemory[ 0x65 ])); */
- pVBInfo->IF_DEF_CH7007 = 1 ; /* [Billy] 07/05/03 */
- XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x38 , ~0xE0 , 0x60 ) ; /* CH7007 on chip */
- }
- else
-#endif
#if 1
if (( pVideoMemory[ 0x65 ] & 0x01 ) ) /* For XG21 LVDS */
{
@@ -3378,9 +3157,9 @@ void XGINew_GetXG21Sense(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO
/* Output : */
/* Description : */
/* -------------------------------------------------------- */
-void XGINew_GetXG27Sense(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo)
+void XGINew_GetXG27Sense(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- UCHAR Temp,bCR4A;
+ unsigned char Temp, bCR4A;
pVBInfo->IF_DEF_LVDS = 0 ;
bCR4A = XGINew_GetReg1( pVBInfo->P3d4 , 0x4A ) ;
@@ -3402,9 +3181,9 @@ void XGINew_GetXG27Sense(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO
}
-UCHAR GetXG21FPBits(PVB_DEVICE_INFO pVBInfo)
+unsigned char GetXG21FPBits(struct vb_device_info *pVBInfo)
{
- UCHAR CR38,CR4A,temp;
+ unsigned char CR38, CR4A, temp;
CR4A = XGINew_GetReg1( pVBInfo->P3d4 , 0x4A ) ;
XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x4A , ~0x10 , 0x10 ) ; /* enable GPIOE read */
@@ -3422,9 +3201,9 @@ UCHAR GetXG21FPBits(PVB_DEVICE_INFO pVBInfo)
return temp;
}
-UCHAR GetXG27FPBits(PVB_DEVICE_INFO pVBInfo)
+unsigned char GetXG27FPBits(struct vb_device_info *pVBInfo)
{
- UCHAR CR4A,temp;
+ unsigned char CR4A, temp;
CR4A = XGINew_GetReg1( pVBInfo->P3d4 , 0x4A ) ;
XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x4A , ~0x03 , 0x03 ) ; /* enable GPIOA/B/C read */
diff --git a/drivers/staging/xgifb/vb_init.h b/drivers/staging/xgifb/vb_init.h
index 1f39d9c74cd..b47352b8e34 100644
--- a/drivers/staging/xgifb/vb_init.h
+++ b/drivers/staging/xgifb/vb_init.h
@@ -1,7 +1,7 @@
#ifndef _VBINIT_
#define _VBINIT_
-extern BOOLEAN XGIInitNew( PXGI_HW_DEVICE_INFO HwDeviceExtension ) ;
-extern XGI21_LVDSCapStruct XGI21_LCDCapList[13];
+extern unsigned char XGIInitNew(struct xgi_hw_device_info *HwDeviceExtension) ;
+extern struct XGI21_LVDSCapStruct XGI21_LCDCapList[13];
#endif
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index bd7f7389864..d90bf06bf62 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -1,43 +1,9 @@
-#include "osdef.h"
-#ifdef TC
-#include <stdio.h>
-#include <string.h>
-#include <conio.h>
-#include <dos.h>
-#endif
-
-
-#ifdef LINUX_XF86
-#include "xf86.h"
-#include "xf86PciInfo.h"
-#include "xgi.h"
-#include "xgi_regs.h"
-#endif
-
-#ifdef LINUX_KERNEL
#include <asm/io.h>
#include <linux/types.h>
#include <linux/version.h>
#include "XGIfb.h"
-/*#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
-#include <video/XGIfb.h>
-#else
-#include <linux/XGIfb.h>
-#endif*/
-#endif
-
-#ifdef WIN2000
-#include <dderror.h>
-#include <devioctl.h>
-#include <miniport.h>
-#include <ntddvdeo.h>
-#include <video.h>
-#include "xgiv.h"
-#include "dd_i2c.h"
-#include "tools.h"
-#endif
#include "vb_def.h"
#include "vgatypes.h"
@@ -54,194 +20,218 @@
-BOOLEAN XGI_IsLCDDualLink(PVB_DEVICE_INFO pVBInfo);
-BOOLEAN XGI_SetCRT2Group301(USHORT ModeNo, PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo);
-BOOLEAN XGI_BacklightByDrv(PVB_DEVICE_INFO pVBInfo);
-
-BOOLEAN XGI_IsLCDON(PVB_DEVICE_INFO pVBInfo);
-BOOLEAN XGI_DisableChISLCD(PVB_DEVICE_INFO pVBInfo);
-BOOLEAN XGI_EnableChISLCD(PVB_DEVICE_INFO pVBInfo);
-BOOLEAN XGI_AjustCRT2Rate(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,USHORT *i, PVB_DEVICE_INFO pVBInfo);
-BOOLEAN XGI_SearchModeID( USHORT ModeNo,USHORT *ModeIdIndex, PVB_DEVICE_INFO pVBInfo);
-BOOLEAN XGI_GetLCDInfo(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo);
-BOOLEAN XGISetModeNew( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo ) ;
-BOOLEAN XGI_BridgeIsOn(PVB_DEVICE_INFO pVBInfo);
-UCHAR XGI_GetModePtr( USHORT ModeNo,USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo);
-USHORT XGI_GetOffset(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo);
-USHORT XGI_GetRatePtrCRT2( PXGI_HW_DEVICE_INFO pXGIHWDE, USHORT ModeNo,USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo );
-USHORT XGI_GetResInfo(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo);
-USHORT XGI_GetColorDepth(USHORT ModeNo,USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo);
-USHORT XGI_GetVGAHT2(PVB_DEVICE_INFO pVBInfo);
-USHORT XGI_GetVCLK2Ptr(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo);
-void XGI_VBLongWait(PVB_DEVICE_INFO pVBInfo);
-void XGI_SaveCRT2Info(USHORT ModeNo, PVB_DEVICE_INFO pVBInfo);
-void XGI_GetCRT2Data(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_GetCRT2ResInfo(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_PreSetGroup1(USHORT ModeNo,USHORT ModeIdIndex, PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetGroup1(USHORT ModeNo,USHORT ModeIdIndex, PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetLockRegs(USHORT ModeNo,USHORT ModeIdIndex, PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetLCDRegs(USHORT ModeNo,USHORT ModeIdIndex, PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetGroup2(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetGroup3(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetGroup4(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetGroup5(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo);
-void* XGI_GetLcdPtr(USHORT BX, USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo);
-void* XGI_GetTVPtr(USHORT BX, USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_FirePWDEnable(PVB_DEVICE_INFO pVBInfo);
-void XGI_EnableGatingCRT(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo);
-void XGI_DisableGatingCRT(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetPanelDelay(USHORT tempbl, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetPanelPower(USHORT tempah,USHORT tempbl, PVB_DEVICE_INFO pVBInfo);
-void XGI_EnablePWD( PVB_DEVICE_INFO pVBInfo);
-void XGI_DisablePWD( PVB_DEVICE_INFO pVBInfo);
-void XGI_AutoThreshold( PVB_DEVICE_INFO pVBInfo);
-void XGI_SetTap4Regs( PVB_DEVICE_INFO pVBInfo);
-
-void XGI_DisplayOn(PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO pVBInfo);
-void XGI_DisplayOff( PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO pVBInfo );
-void XGI_SetCRT1Group(PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT ModeNo,USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo);
-void XGI_SetXG21CRTC(USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetXG21LCD(PVB_DEVICE_INFO pVBInfo,USHORT RefreshRateTableIndex,USHORT ModeNo);
-void XGI_SetXG27CRTC(USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetXG27LCD(PVB_DEVICE_INFO pVBInfo,USHORT RefreshRateTableIndex,USHORT ModeNo);
-void XGI_UpdateXG21CRTC(USHORT ModeNo, PVB_DEVICE_INFO pVBInfo, USHORT RefreshRateTableIndex);
-void XGI_WaitDisply(PVB_DEVICE_INFO pVBInfo);
-void XGI_SenseCRT1(PVB_DEVICE_INFO pVBInfo);
-void XGI_SetSeqRegs(USHORT ModeNo,USHORT StandTableIndex,USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo);
-void XGI_SetMiscRegs(USHORT StandTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetCRTCRegs(PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT StandTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetATTRegs(USHORT ModeNo,USHORT StandTableIndex,USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo );
-void XGI_SetGRCRegs(USHORT StandTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_ClearExt1Regs(PVB_DEVICE_INFO pVBInfo);
-
-void XGI_SetSync(USHORT RefreshRateTableIndex,PVB_DEVICE_INFO pVBInfo);
-void XGI_SetCRT1CRTC(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PVB_DEVICE_INFO pVBInfo,PXGI_HW_DEVICE_INFO HwDeviceExtension);
-void XGI_SetCRT1Timing_H(PVB_DEVICE_INFO pVBInfo,PXGI_HW_DEVICE_INFO HwDeviceExtension);
-void XGI_SetCRT1Timing_V(USHORT ModeIdIndex,USHORT ModeNo,PVB_DEVICE_INFO pVBInfo);
-void XGI_SetCRT1DE(PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PVB_DEVICE_INFO pVBInfo);
-void XGI_SetCRT1VCLK(USHORT ModeNo,USHORT ModeIdIndex,PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetCRT1FIFO(USHORT ModeNo,PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetCRT1ModeRegs(PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PVB_DEVICE_INFO pVBInfo);
-void XGI_SetVCLKState(PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT ModeNo,USHORT RefreshRateTableIndex,PVB_DEVICE_INFO pVBInfo);
-
-void XGI_LoadDAC(USHORT ModeNo,USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo);
-void XGI_WriteDAC(USHORT dl, USHORT ah, USHORT al, USHORT dh, PVB_DEVICE_INFO pVBInfo);
-/*void XGI_ClearBuffer(PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT ModeNo,PVB_DEVICE_INFO pVBInfo);*/
-void XGI_SetLCDAGroup(USHORT ModeNo,USHORT ModeIdIndex,PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo);
-void XGI_GetLVDSResInfo( USHORT ModeNo,USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo);
-void XGI_GetLVDSData(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PVB_DEVICE_INFO pVBInfo);
-void XGI_ModCRT1Regs(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo);
-void XGI_SetLVDSRegs(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PVB_DEVICE_INFO pVBInfo);
-void XGI_UpdateModeInfo(PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo);
-void XGI_GetVGAType(PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo);
-void XGI_GetVBType(PVB_DEVICE_INFO pVBInfo);
-void XGI_GetVBInfo(USHORT ModeNo,USHORT ModeIdIndex,PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo);
-void XGI_GetTVInfo(USHORT ModeNo,USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo);
-void XGI_SetCRT2ECLK( USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PVB_DEVICE_INFO pVBInfo);
-void InitTo330Pointer(UCHAR,PVB_DEVICE_INFO pVBInfo);
-void XGI_GetLCDSync(USHORT* HSyncWidth, USHORT* VSyncWidth, PVB_DEVICE_INFO pVBInfo);
-void XGI_DisableBridge(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo);
-void XGI_EnableBridge(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetCRT2VCLK(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_OEM310Setting(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetDelayComp(PVB_DEVICE_INFO pVBInfo);
-void XGI_SetLCDCap(PVB_DEVICE_INFO pVBInfo);
-void XGI_SetLCDCap_A(USHORT tempcx,PVB_DEVICE_INFO pVBInfo);
-void XGI_SetLCDCap_B(USHORT tempcx,PVB_DEVICE_INFO pVBInfo);
-void SetSpectrum(PVB_DEVICE_INFO pVBInfo);
-void XGI_SetAntiFlicker(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetEdgeEnhance(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetPhaseIncr(PVB_DEVICE_INFO pVBInfo);
-void XGI_SetYFilter(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_GetTVPtrIndex2(USHORT* tempbx,UCHAR* tempcl,UCHAR* tempch, PVB_DEVICE_INFO pVBInfo);
-USHORT XGI_GetTVPtrIndex( PVB_DEVICE_INFO pVBInfo );
-void XGI_SetCRT2ModeRegs(USHORT ModeNo,PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO pVBInfo );
-void XGI_CloseCRTC(PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO pVBInfo);
-void XGI_OpenCRTC(PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO pVBInfo);
-void XGI_GetRAMDAC2DATA(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_UnLockCRT2(PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO pVBInfo);
-void XGI_LockCRT2(PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO pVBInfo);
-void XGINew_EnableCRT2(PVB_DEVICE_INFO pVBInfo);
-void XGINew_LCD_Wait_Time(UCHAR DelayTime, PVB_DEVICE_INFO pVBInfo);
-void XGI_LongWait(PVB_DEVICE_INFO pVBInfo);
-void XGI_SetCRT1Offset( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTableIndex , PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo );
-void XGI_GetLCDVCLKPtr(UCHAR* di_0,UCHAR *di_1, PVB_DEVICE_INFO pVBInfo);
-UCHAR XGI_GetVCLKPtr(USHORT RefreshRateTableIndex,USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo);
-void XGI_GetVCLKLen(UCHAR tempal,UCHAR* di_0,UCHAR* di_1, PVB_DEVICE_INFO pVBInfo);
-USHORT XGI_GetLCDCapPtr(PVB_DEVICE_INFO pVBInfo);
-USHORT XGI_GetLCDCapPtr1(PVB_DEVICE_INFO pVBInfo);
-XGI301C_Tap4TimingStruct* XGI_GetTap4Ptr(USHORT tempcx, PVB_DEVICE_INFO pVBInfo);
-void XGI_SetXG21FPBits(PVB_DEVICE_INFO pVBInfo);
-void XGI_SetXG27FPBits(PVB_DEVICE_INFO pVBInfo);
-UCHAR XGI_XG21GetPSCValue(PVB_DEVICE_INFO pVBInfo);
-UCHAR XGI_XG27GetPSCValue(PVB_DEVICE_INFO pVBInfo);
-void XGI_XG21BLSignalVDD(USHORT tempbh,USHORT tempbl, PVB_DEVICE_INFO pVBInfo);
-void XGI_XG27BLSignalVDD(USHORT tempbh,USHORT tempbl, PVB_DEVICE_INFO pVBInfo);
-void XGI_XG21SetPanelDelay(USHORT tempbl, PVB_DEVICE_INFO pVBInfo);
-BOOLEAN XGI_XG21CheckLVDSMode(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo );
-void XGI_SetXG21LVDSPara(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo );
-void XGI_SetXG27LVDSPara(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo );
-UCHAR XGI_SetDefaultVCLK( PVB_DEVICE_INFO pVBInfo );
-
-extern void ReadVBIOSTablData( UCHAR ChipType , PVB_DEVICE_INFO pVBInfo);
-#ifdef WIN2000
-/* [Billy] 2007/05/17 For CH7007 */
-extern UCHAR CH7007TVReg_UNTSC[][8],CH7007TVReg_ONTSC[][8],CH7007TVReg_UPAL[][8],CH7007TVReg_OPAL[][8];
-extern UCHAR CH7007TVCRT1UNTSC_H[][10],CH7007TVCRT1ONTSC_H[][10],CH7007TVCRT1UPAL_H[][10],CH7007TVCRT1OPAL_H[][10] ;
-extern UCHAR CH7007TVCRT1UNTSC_V[][10],CH7007TVCRT1ONTSC_V[][10],CH7007TVCRT1UPAL_V[][10],CH7007TVCRT1OPAL_V[][10] ;
-extern UCHAR XGI7007_CHTVVCLKUNTSC[],XGI7007_CHTVVCLKONTSC[],XGI7007_CHTVVCLKUPAL[],XGI7007_CHTVVCLKOPAL[];
-
-extern BOOLEAN XGI_XG21CheckCH7007TVMode(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo ) ;
-extern void SetCH7007Regs(PXGI_HW_DEVICE_INFO HwDeviceExtension, USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo ) ;
-extern VP_STATUS TurnOnCH7007(PHW_DEVICE_EXTENSION pHWDE) ;
-extern VP_STATUS TurnOffCH7007(PHW_DEVICE_EXTENSION pHWDE) ;
-extern BOOLEAN IsCH7007TVMode(PVB_DEVICE_INFO pVBInfo) ;
-#endif
-
-/* USHORT XGINew_flag_clearbuffer; 0: no clear frame buffer 1:clear frame buffer */
-
-
-
-
-
-USHORT XGINew_MDA_DAC[]={0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x15,0x15,0x15,0x15,0x15,0x15,0x15,0x15,
- 0x15,0x15,0x15,0x15,0x15,0x15,0x15,0x15,
- 0x3F,0x3F,0x3F,0x3F,0x3F,0x3F,0x3F,0x3F,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x15,0x15,0x15,0x15,0x15,0x15,0x15,0x15,
- 0x15,0x15,0x15,0x15,0x15,0x15,0x15,0x15,
- 0x3F,0x3F,0x3F,0x3F,0x3F,0x3F,0x3F,0x3F};
-
-USHORT XGINew_CGA_DAC[]={0x00,0x10,0x04,0x14,0x01,0x11,0x09,0x15,
- 0x00,0x10,0x04,0x14,0x01,0x11,0x09,0x15,
- 0x2A,0x3A,0x2E,0x3E,0x2B,0x3B,0x2F,0x3F,
- 0x2A,0x3A,0x2E,0x3E,0x2B,0x3B,0x2F,0x3F,
- 0x00,0x10,0x04,0x14,0x01,0x11,0x09,0x15,
- 0x00,0x10,0x04,0x14,0x01,0x11,0x09,0x15,
- 0x2A,0x3A,0x2E,0x3E,0x2B,0x3B,0x2F,0x3F,
- 0x2A,0x3A,0x2E,0x3E,0x2B,0x3B,0x2F,0x3F};
-
-USHORT XGINew_EGA_DAC[]={0x00,0x10,0x04,0x14,0x01,0x11,0x05,0x15,
- 0x20,0x30,0x24,0x34,0x21,0x31,0x25,0x35,
- 0x08,0x18,0x0C,0x1C,0x09,0x19,0x0D,0x1D,
- 0x28,0x38,0x2C,0x3C,0x29,0x39,0x2D,0x3D,
- 0x02,0x12,0x06,0x16,0x03,0x13,0x07,0x17,
- 0x22,0x32,0x26,0x36,0x23,0x33,0x27,0x37,
- 0x0A,0x1A,0x0E,0x1E,0x0B,0x1B,0x0F,0x1F,
- 0x2A,0x3A,0x2E,0x3E,0x2B,0x3B,0x2F,0x3F};
-
-USHORT XGINew_VGA_DAC[]={0x00,0x10,0x04,0x14,0x01,0x11,0x09,0x15,
- 0x2A,0x3A,0x2E,0x3E,0x2B,0x3B,0x2F,0x3F,
- 0x00,0x05,0x08,0x0B,0x0E,0x11,0x14,0x18,
- 0x1C,0x20,0x24,0x28,0x2D,0x32,0x38,0x3F,
-
- 0x00,0x10,0x1F,0x2F,0x3F,0x1F,0x27,0x2F,
- 0x37,0x3F,0x2D,0x31,0x36,0x3A,0x3F,0x00,
- 0x07,0x0E,0x15,0x1C,0x0E,0x11,0x15,0x18,
- 0x1C,0x14,0x16,0x18,0x1A,0x1C,0x00,0x04,
- 0x08,0x0C,0x10,0x08,0x0A,0x0C,0x0E,0x10,
- 0x0B,0x0C,0x0D,0x0F,0x10};
+unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo);
+unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo);
+unsigned char XGI_BacklightByDrv(struct vb_device_info *pVBInfo);
+
+unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo);
+unsigned char XGI_DisableChISLCD(struct vb_device_info *pVBInfo);
+unsigned char XGI_EnableChISLCD(struct vb_device_info *pVBInfo);
+unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ unsigned short *i, struct vb_device_info *pVBInfo);
+unsigned char XGI_SearchModeID(unsigned short ModeNo,
+ unsigned short *ModeIdIndex,
+ struct vb_device_info *pVBInfo);
+unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo);
+unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short ModeNo);
+unsigned char XGI_BridgeIsOn(struct vb_device_info *pVBInfo);
+unsigned char XGI_GetModePtr(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo);
+unsigned short XGI_GetOffset(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo);
+unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo);
+unsigned short XGI_GetResInfo(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo);
+unsigned short XGI_GetColorDepth(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo);
+unsigned short XGI_GetVGAHT2(struct vb_device_info *pVBInfo);
+unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo);
+void XGI_VBLongWait(struct vb_device_info *pVBInfo);
+void XGI_SaveCRT2Info(unsigned short ModeNo, struct vb_device_info *pVBInfo);
+void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_GetCRT2ResInfo(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex, struct xgi_hw_device_info *HwDeviceExtension, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_SetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex, struct xgi_hw_device_info *HwDeviceExtension, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex, struct xgi_hw_device_info *HwDeviceExtension, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex, struct xgi_hw_device_info *HwDeviceExtension, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_FirePWDEnable(struct vb_device_info *pVBInfo);
+void XGI_EnableGatingCRT(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+void XGI_DisableGatingCRT(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+void XGI_SetPanelDelay(unsigned short tempbl, struct vb_device_info *pVBInfo);
+void XGI_SetPanelPower(unsigned short tempah, unsigned short tempbl, struct vb_device_info *pVBInfo);
+void XGI_EnablePWD(struct vb_device_info *pVBInfo);
+void XGI_DisablePWD(struct vb_device_info *pVBInfo);
+void XGI_AutoThreshold(struct vb_device_info *pVBInfo);
+void XGI_SetTap4Regs(struct vb_device_info *pVBInfo);
+
+void XGI_DisplayOn(struct xgi_hw_device_info *, struct vb_device_info *pVBInfo);
+void XGI_DisplayOff(struct xgi_hw_device_info *, struct vb_device_info *pVBInfo);
+void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension, unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void XGI_SetXG21CRTC(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_SetXG21LCD(struct vb_device_info *pVBInfo, unsigned short RefreshRateTableIndex, unsigned short ModeNo);
+void XGI_SetXG27CRTC(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_SetXG27LCD(struct vb_device_info *pVBInfo, unsigned short RefreshRateTableIndex, unsigned short ModeNo);
+void XGI_UpdateXG21CRTC(unsigned short ModeNo, struct vb_device_info *pVBInfo, unsigned short RefreshRateTableIndex);
+void XGI_WaitDisply(struct vb_device_info *pVBInfo);
+void XGI_SenseCRT1(struct vb_device_info *pVBInfo);
+void XGI_SetSeqRegs(unsigned short ModeNo, unsigned short StandTableIndex, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void XGI_SetMiscRegs(unsigned short StandTableIndex, struct vb_device_info *pVBInfo);
+void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension, unsigned short StandTableIndex, struct vb_device_info *pVBInfo);
+void XGI_SetATTRegs(unsigned short ModeNo, unsigned short StandTableIndex, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void XGI_SetGRCRegs(unsigned short StandTableIndex, struct vb_device_info *pVBInfo);
+void XGI_ClearExt1Regs(struct vb_device_info *pVBInfo);
+
+void XGI_SetSync(unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_SetCRT1CRTC(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo, struct xgi_hw_device_info *HwDeviceExtension);
+void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo, struct xgi_hw_device_info *HwDeviceExtension);
+void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex, unsigned short ModeNo, struct vb_device_info *pVBInfo);
+void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_SetCRT1VCLK(unsigned short ModeNo, unsigned short ModeIdIndex, struct xgi_hw_device_info *HwDeviceExtension, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_SetCRT1FIFO(unsigned short ModeNo, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension, unsigned short ModeNo, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+
+void XGI_LoadDAC(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void XGI_WriteDAC(unsigned short dl, unsigned short ah, unsigned short al, unsigned short dh, struct vb_device_info *pVBInfo);
+/*void XGI_ClearBuffer(struct xgi_hw_device_info *HwDeviceExtension, unsigned short ModeNo, struct vb_device_info *pVBInfo);*/
+void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+void XGI_GetLVDSResInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo);
+void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo);
+void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+void XGI_GetVGAType(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+void XGI_GetVBType(struct vb_device_info *pVBInfo);
+void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void XGI_SetCRT2ECLK(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void InitTo330Pointer(unsigned char, struct vb_device_info *pVBInfo);
+void XGI_GetLCDSync(unsigned short *HSyncWidth, unsigned short *VSyncWidth, struct vb_device_info *pVBInfo);
+void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+void XGI_SetCRT2VCLK(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_OEM310Setting(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void XGI_SetDelayComp(struct vb_device_info *pVBInfo);
+void XGI_SetLCDCap(struct vb_device_info *pVBInfo);
+void XGI_SetLCDCap_A(unsigned short tempcx, struct vb_device_info *pVBInfo);
+void XGI_SetLCDCap_B(unsigned short tempcx, struct vb_device_info *pVBInfo);
+void SetSpectrum(struct vb_device_info *pVBInfo);
+void XGI_SetAntiFlicker(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void XGI_SetEdgeEnhance(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void XGI_SetPhaseIncr(struct vb_device_info *pVBInfo);
+void XGI_SetYFilter(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char* tempcl,
+ unsigned char *tempch, struct vb_device_info *pVBInfo);
+unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo);
+void XGI_SetCRT2ModeRegs(unsigned short ModeNo, struct xgi_hw_device_info *, struct vb_device_info *pVBInfo);
+void XGI_CloseCRTC(struct xgi_hw_device_info *, struct vb_device_info *pVBInfo);
+void XGI_OpenCRTC(struct xgi_hw_device_info *, struct vb_device_info *pVBInfo);
+void XGI_GetRAMDAC2DATA(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo);
+void XGI_UnLockCRT2(struct xgi_hw_device_info *, struct vb_device_info *pVBInfo);
+void XGI_LockCRT2(struct xgi_hw_device_info *, struct vb_device_info *pVBInfo);
+void XGINew_EnableCRT2(struct vb_device_info *pVBInfo);
+void XGINew_LCD_Wait_Time(unsigned char DelayTime, struct vb_device_info *pVBInfo);
+void XGI_LongWait(struct vb_device_info *pVBInfo);
+void XGI_SetCRT1Offset(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo);
+void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
+ struct vb_device_info *pVBInfo);
+unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo);
+void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
+ unsigned char *di_1, struct vb_device_info *pVBInfo);
+unsigned short XGI_GetLCDCapPtr(struct vb_device_info *pVBInfo);
+unsigned short XGI_GetLCDCapPtr1(struct vb_device_info *pVBInfo);
+struct XGI301C_Tap4TimingStruct *XGI_GetTap4Ptr(unsigned short tempcx, struct vb_device_info *pVBInfo);
+void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo);
+void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo);
+unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo);
+unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo);
+void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl, struct vb_device_info *pVBInfo);
+void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl, struct vb_device_info *pVBInfo);
+void XGI_XG21SetPanelDelay(unsigned short tempbl, struct vb_device_info *pVBInfo);
+unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void XGI_SetXG21LVDSPara(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+void XGI_SetXG27LVDSPara(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo);
+
+extern void ReadVBIOSTablData(unsigned char ChipType, struct vb_device_info *pVBInfo);
+
+/* unsigned short XGINew_flag_clearbuffer; 0: no clear frame buffer 1:clear frame buffer */
+
+
+unsigned short XGINew_MDA_DAC[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
+ 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
+ 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F};
+
+unsigned short XGINew_CGA_DAC[] = {
+ 0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
+ 0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
+ 0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
+ 0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
+ 0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
+ 0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
+ 0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
+ 0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F};
+
+unsigned short XGINew_EGA_DAC[] = {
+ 0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x05, 0x15,
+ 0x20, 0x30, 0x24, 0x34, 0x21, 0x31, 0x25, 0x35,
+ 0x08, 0x18, 0x0C, 0x1C, 0x09, 0x19, 0x0D, 0x1D,
+ 0x28, 0x38, 0x2C, 0x3C, 0x29, 0x39, 0x2D, 0x3D,
+ 0x02, 0x12, 0x06, 0x16, 0x03, 0x13, 0x07, 0x17,
+ 0x22, 0x32, 0x26, 0x36, 0x23, 0x33, 0x27, 0x37,
+ 0x0A, 0x1A, 0x0E, 0x1E, 0x0B, 0x1B, 0x0F, 0x1F,
+ 0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F};
+
+unsigned short XGINew_VGA_DAC[] = {
+ 0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
+ 0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
+ 0x00, 0x05, 0x08, 0x0B, 0x0E, 0x11, 0x14, 0x18,
+ 0x1C, 0x20, 0x24, 0x28, 0x2D, 0x32, 0x38, 0x3F,
+ 0x00, 0x10, 0x1F, 0x2F, 0x3F, 0x1F, 0x27, 0x2F,
+ 0x37, 0x3F, 0x2D, 0x31, 0x36, 0x3A, 0x3F, 0x00,
+ 0x07, 0x0E, 0x15, 0x1C, 0x0E, 0x11, 0x15, 0x18,
+ 0x1C, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x00, 0x04,
+ 0x08, 0x0C, 0x10, 0x08, 0x0A, 0x0C, 0x0E, 0x10,
+ 0x0B, 0x0C, 0x0D, 0x0F, 0x10};
/* --------------------------------------------------------------------- */
@@ -250,35 +240,35 @@ USHORT XGINew_VGA_DAC[]={0x00,0x10,0x04,0x14,0x01,0x11,0x09,0x15,
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void InitTo330Pointer( UCHAR ChipType ,PVB_DEVICE_INFO pVBInfo)
+void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
{
- pVBInfo->SModeIDTable = (XGI_StStruct *) XGI330_SModeIDTable ;
- pVBInfo->StandTable = (XGI_StandTableStruct *) XGI330_StandTable ;
- pVBInfo->EModeIDTable = (XGI_ExtStruct *) XGI330_EModeIDTable ;
- pVBInfo->RefIndex = (XGI_Ext2Struct *) XGI330_RefIndex ;
- pVBInfo->XGINEWUB_CRT1Table = (XGI_CRT1TableStruct *) XGI_CRT1Table ;
+ pVBInfo->SModeIDTable = (struct XGI_StStruct *) XGI330_SModeIDTable ;
+ pVBInfo->StandTable = (struct XGI_StandTableStruct *) XGI330_StandTable ;
+ pVBInfo->EModeIDTable = (struct XGI_ExtStruct *) XGI330_EModeIDTable ;
+ pVBInfo->RefIndex = (struct XGI_Ext2Struct *) XGI330_RefIndex ;
+ pVBInfo->XGINEWUB_CRT1Table = (struct XGI_CRT1TableStruct *) XGI_CRT1Table ;
/* add for new UNIVGABIOS */
- /* XGINew_UBLCDDataTable = (XGI_LCDDataTablStruct *) XGI_LCDDataTable ; */
+ /* XGINew_UBLCDDataTable = (struct XGI_LCDDataTablStruct *) XGI_LCDDataTable ; */
/* XGINew_UBTVDataTable = (XGI_TVDataTablStruct *) XGI_TVDataTable ; */
if ( ChipType >= XG40 )
{
- pVBInfo->MCLKData = (XGI_MCLKDataStruct *) XGI340New_MCLKData ;
- pVBInfo->ECLKData = (XGI_ECLKDataStruct *) XGI340_ECLKData ;
+ pVBInfo->MCLKData = (struct XGI_MCLKDataStruct *) XGI340New_MCLKData;
+ pVBInfo->ECLKData = (struct XGI_ECLKDataStruct *) XGI340_ECLKData;
}
else
{
- pVBInfo->MCLKData = (XGI_MCLKDataStruct *) XGI330New_MCLKData ;
- pVBInfo->ECLKData = (XGI_ECLKDataStruct *) XGI330_ECLKData ;
+ pVBInfo->MCLKData = (struct XGI_MCLKDataStruct *) XGI330New_MCLKData;
+ pVBInfo->ECLKData = (struct XGI_ECLKDataStruct *) XGI330_ECLKData;
}
- pVBInfo->VCLKData = (XGI_VCLKDataStruct *) XGI_VCLKData ;
- pVBInfo->VBVCLKData = (XGI_VBVCLKDataStruct *) XGI_VBVCLKData ;
+ pVBInfo->VCLKData = (struct XGI_VCLKDataStruct *) XGI_VCLKData ;
+ pVBInfo->VBVCLKData = (struct XGI_VBVCLKDataStruct *) XGI_VBVCLKData ;
pVBInfo->ScreenOffset = XGI330_ScreenOffset ;
- pVBInfo->StResInfo = (XGI_StResInfoStruct *) XGI330_StResInfo ;
- pVBInfo->ModeResInfo = (XGI_ModeResInfoStruct *) XGI330_ModeResInfo ;
+ pVBInfo->StResInfo = (struct XGI_StResInfoStruct *) XGI330_StResInfo ;
+ pVBInfo->ModeResInfo = (struct XGI_ModeResInfoStruct *) XGI330_ModeResInfo ;
pVBInfo->pOutputSelect = &XGI330_OutputSelect ;
pVBInfo->pSoftSetting = &XGI330_SoftSetting ;
@@ -342,9 +332,9 @@ void InitTo330Pointer( UCHAR ChipType ,PVB_DEVICE_INFO pVBInfo)
pVBInfo->Ren750pGroup3 = XGI330_Ren750pGroup3 ;
- pVBInfo->TimingH = (XGI_TimingHStruct *) XGI_TimingH ;
- pVBInfo->TimingV = (XGI_TimingVStruct *) XGI_TimingV ;
- pVBInfo->UpdateCRT1 = (XGI_XG21CRT1Struct *) XGI_UpdateCRT1Table ;
+ pVBInfo->TimingH = (struct XGI_TimingHStruct *) XGI_TimingH ;
+ pVBInfo->TimingV = (struct XGI_TimingVStruct *) XGI_TimingV ;
+ pVBInfo->UpdateCRT1 = (struct XGI_XG21CRT1Struct *) XGI_UpdateCRT1Table ;
pVBInfo->CHTVVCLKUNTSC = XGI330_CHTVVCLKUNTSC ;
pVBInfo->CHTVVCLKONTSC = XGI330_CHTVVCLKONTSC ;
@@ -371,7 +361,7 @@ void InitTo330Pointer( UCHAR ChipType ,PVB_DEVICE_INFO pVBInfo)
if ( ChipType == XG27 )
{
- pVBInfo->MCLKData = (XGI_MCLKDataStruct *) XGI27New_MCLKData ;
+ pVBInfo->MCLKData = (struct XGI_MCLKDataStruct *) XGI27New_MCLKData;
pVBInfo->CR40 = XGI27_cr41 ;
pVBInfo->pXGINew_CR97 = &XG27_CR97 ;
pVBInfo->pSR36 = &XG27_SR36 ;
@@ -405,14 +395,15 @@ void InitTo330Pointer( UCHAR ChipType ,PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGISetModeNew( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo )
+unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short ModeNo)
{
- USHORT ModeIdIndex ;
- /* PUCHAR pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress ; */
- VB_DEVICE_INFO VBINF;
- PVB_DEVICE_INFO pVBInfo = &VBINF;
+ unsigned short ModeIdIndex ;
+ /* unsigned char *pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress ; */
+ struct vb_device_info VBINF;
+ struct vb_device_info *pVBInfo = &VBINF;
pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase ;
- pVBInfo->BaseAddr = (ULONG)HwDeviceExtension->pjIOAddress ;
+ pVBInfo->BaseAddr = (unsigned long)HwDeviceExtension->pjIOAddress ;
pVBInfo->IF_DEF_LVDS = 0 ;
pVBInfo->IF_DEF_CH7005 = 0 ;
pVBInfo->IF_DEF_LCDA = 1 ;
@@ -485,9 +476,6 @@ BOOLEAN XGISetModeNew( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo )
XGI_GetVBType( pVBInfo ) ;
InitTo330Pointer( HwDeviceExtension->jChipType, pVBInfo ) ;
-#ifdef WIN2000
- ReadVBIOSTablData( HwDeviceExtension->jChipType , pVBInfo) ;
-#endif
if ( ModeNo & 0x80 )
{
ModeNo = ModeNo & 0x7F ;
@@ -560,30 +548,9 @@ BOOLEAN XGISetModeNew( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo )
} /* !XG20 */
else
{
-#ifdef WIN2000
- if ( pVBInfo->IF_DEF_CH7007 == 1 )
- {
-
- VideoDebugPrint((0, "XGISetModeNew: pVBIfo->IF_DEF_CH7007==1\n"));
- pVBInfo->VBType = VB_CH7007 ;
- XGI_GetVBInfo(ModeNo , ModeIdIndex , HwDeviceExtension, pVBInfo ) ;
- XGI_GetTVInfo(ModeNo , ModeIdIndex, pVBInfo ) ;
- XGI_GetLCDInfo(ModeNo , ModeIdIndex, pVBInfo ) ;
- if( !(XGI_XG21CheckCH7007TVMode(ModeNo, ModeIdIndex, pVBInfo )) )
- {
- return FALSE;
- }
- }
-#endif
-
-
- if ( pVBInfo->IF_DEF_LVDS == 1 )
- {
- if ( !XGI_XG21CheckLVDSMode(ModeNo , ModeIdIndex, pVBInfo) )
- {
- return FALSE;
- }
- }
+ if (pVBInfo->IF_DEF_LVDS == 1)
+ if (!XGI_XG21CheckLVDSMode(ModeNo , ModeIdIndex, pVBInfo))
+ return 0;
if ( ModeNo <= 0x13 )
{
@@ -642,7 +609,7 @@ BOOLEAN XGISetModeNew( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo )
XGI_LockCRT2( HwDeviceExtension, pVBInfo ) ;
}
- return( TRUE ) ;
+ return 1;
}
@@ -652,14 +619,16 @@ BOOLEAN XGISetModeNew( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT1Group( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
- USHORT StandTableIndex ,
+ unsigned short StandTableIndex ,
RefreshRateTableIndex ,
b3CC ,
temp ;
- USHORT XGINew_P3cc = pVBInfo->P3cc;
+ unsigned short XGINew_P3cc = pVBInfo->P3cc;
/* XGINew_CRT1Mode = ModeNo ; // SaveModeID */
StandTableIndex = XGI_GetModePtr( ModeNo , ModeIdIndex, pVBInfo ) ;
@@ -710,14 +679,14 @@ void XGI_SetCRT1Group( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo , U
{
XGINew_SetReg1( pVBInfo->P3c4 , 0x2B , 0x4E) ;
XGINew_SetReg1( pVBInfo->P3c4 , 0x2C , 0xE9) ;
- b3CC =(UCHAR) XGINew_GetReg2(XGINew_P3cc) ;
+ b3CC = (unsigned char) XGINew_GetReg2(XGINew_P3cc) ;
XGINew_SetReg3(XGINew_P3cc , (b3CC |= 0x0C) ) ;
}
else if ( ( ModeNo == 0x04) | ( ModeNo == 0x05) | ( ModeNo == 0x0D) )
{
XGINew_SetReg1( pVBInfo->P3c4 , 0x2B , 0x1B) ;
XGINew_SetReg1( pVBInfo->P3c4 , 0x2C , 0xE3) ;
- b3CC = (UCHAR)XGINew_GetReg2(XGINew_P3cc) ;
+ b3CC = (unsigned char)XGINew_GetReg2(XGINew_P3cc) ;
XGINew_SetReg3(XGINew_P3cc , (b3CC |= 0x0C) ) ;
}
}
@@ -763,13 +732,6 @@ void XGI_SetCRT1Group( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo , U
XGI_LoadDAC( ModeNo , ModeIdIndex, pVBInfo ) ;
/* XGI_ClearBuffer( HwDeviceExtension , ModeNo, pVBInfo ) ; */
-#ifdef WIN2000
- if ( pVBInfo->IF_DEF_CH7007 == 1 ) /* [Billy] 2007/05/14 */
- {
- VideoDebugPrint((0, "XGI_SetCRT1Group: VBInfo->IF_DEF_CH7007==1\n"));
- SetCH7007Regs(HwDeviceExtension, ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo ) ; /* 07/05/28 */
- }
-#endif
}
@@ -779,9 +741,10 @@ void XGI_SetCRT1Group( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo , U
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-UCHAR XGI_GetModePtr( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo )
+unsigned char XGI_GetModePtr(unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- UCHAR index ;
+ unsigned char index ;
if ( ModeNo <= 0x13 )
index = pVBInfo->SModeIDTable[ ModeIdIndex ].St_StTableIndex ;
@@ -802,7 +765,7 @@ UCHAR XGI_GetModePtr( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInf
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-/*UCHAR XGI_SetBIOSData( USHORT ModeNo , USHORT ModeIdIndex )
+/*unsigned char XGI_SetBIOSData(unsigned short ModeNo, unsigned short ModeIdIndex)
{
return( 0 ) ;
}
@@ -814,7 +777,7 @@ UCHAR XGI_GetModePtr( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInf
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-/*UCHAR XGI_ClearBankRegs( USHORT ModeNo , USHORT ModeIdIndex )
+/*unsigned char XGI_ClearBankRegs(unsigned short ModeNo, unsigned short ModeIdIndex)
{
return( 0 ) ;
}
@@ -826,12 +789,13 @@ UCHAR XGI_GetModePtr( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInf
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetSeqRegs( USHORT ModeNo , USHORT StandTableIndex , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetSeqRegs(unsigned short ModeNo, unsigned short StandTableIndex,
+ unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
- UCHAR tempah ,
+ unsigned char tempah ,
SRdata ;
- USHORT i ,
+ unsigned short i ,
modeflag ;
if ( ModeNo <= 0x13 )
@@ -873,9 +837,9 @@ void XGI_SetSeqRegs( USHORT ModeNo , USHORT StandTableIndex , USHORT ModeIdInde
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetMiscRegs( USHORT StandTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetMiscRegs(unsigned short StandTableIndex, struct vb_device_info *pVBInfo)
{
- UCHAR Miscdata ;
+ unsigned char Miscdata ;
Miscdata = pVBInfo->StandTable[ StandTableIndex ].MISC ; /* Get Misc from file */
/*
@@ -898,12 +862,13 @@ void XGI_SetMiscRegs( USHORT StandTableIndex, PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRTCRegs( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT StandTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short StandTableIndex, struct vb_device_info *pVBInfo)
{
- UCHAR CRTCdata ;
- USHORT i ;
+ unsigned char CRTCdata ;
+ unsigned short i ;
- CRTCdata = ( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x11 ) ;
+ CRTCdata = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x11);
CRTCdata &= 0x7f ;
XGINew_SetReg1( pVBInfo->P3d4 , 0x11 , CRTCdata ) ; /* Unlock CRTC */
@@ -933,11 +898,11 @@ void XGI_SetCRTCRegs( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT StandTableI
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetATTRegs( USHORT ModeNo , USHORT StandTableIndex , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetATTRegs(unsigned short ModeNo, unsigned short StandTableIndex,
+ unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
- UCHAR ARdata ;
- USHORT i ,
- modeflag ;
+ unsigned char ARdata ;
+ unsigned short i, modeflag;
if ( ModeNo <= 0x13 )
modeflag = pVBInfo->SModeIDTable[ ModeIdIndex ].St_ModeFlag ;
@@ -983,10 +948,10 @@ void XGI_SetATTRegs( USHORT ModeNo , USHORT StandTableIndex , USHORT ModeIdIndex
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetGRCRegs( USHORT StandTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetGRCRegs(unsigned short StandTableIndex, struct vb_device_info *pVBInfo)
{
- UCHAR GRdata ;
- USHORT i ;
+ unsigned char GRdata ;
+ unsigned short i ;
for( i = 0 ; i <= 0x08 ; i++ )
{
@@ -996,7 +961,7 @@ void XGI_SetGRCRegs( USHORT StandTableIndex, PVB_DEVICE_INFO pVBInfo )
if ( pVBInfo->ModeType > ModeVGA )
{
- GRdata = ( UCHAR )XGINew_GetReg1( pVBInfo->P3ce , 0x05 ) ;
+ GRdata = (unsigned char)XGINew_GetReg1(pVBInfo->P3ce, 0x05);
GRdata &= 0xBF ; /* 256 color disable */
XGINew_SetReg1( pVBInfo->P3ce , 0x05 , GRdata ) ;
}
@@ -1009,9 +974,9 @@ void XGI_SetGRCRegs( USHORT StandTableIndex, PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_ClearExt1Regs(PVB_DEVICE_INFO pVBInfo)
+void XGI_ClearExt1Regs(struct vb_device_info *pVBInfo)
{
- USHORT i ;
+ unsigned short i ;
for( i = 0x0A ; i <= 0x0E ; i++ )
XGINew_SetReg1( pVBInfo->P3c4 , i , 0x00 ) ; /* Clear SR0A-SR0E */
@@ -1024,7 +989,7 @@ void XGI_ClearExt1Regs(PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-UCHAR XGI_SetDefaultVCLK( PVB_DEVICE_INFO pVBInfo )
+unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo)
{
XGINew_SetRegANDOR( pVBInfo->P3c4 , 0x31 , ~0x30 , 0x20 ) ;
@@ -1046,13 +1011,15 @@ UCHAR XGI_SetDefaultVCLK( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-USHORT XGI_GetRatePtrCRT2( PXGI_HW_DEVICE_INFO pXGIHWDE, USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo )
+unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- SHORT LCDRefreshIndex[] = { 0x00 , 0x00 , 0x03 , 0x01 } ,
+ short LCDRefreshIndex[] = { 0x00 , 0x00 , 0x03 , 0x01 } ,
LCDARefreshIndex[] = { 0x00 , 0x00 , 0x03 , 0x01 , 0x01 , 0x01 , 0x01 } ;
- USHORT RefreshRateTableIndex , i ,
- modeflag , index , temp ;
+ unsigned short RefreshRateTableIndex, i, modeflag, index, temp;
if ( ModeNo <= 0x13 )
{
@@ -1183,13 +1150,11 @@ USHORT XGI_GetRatePtrCRT2( PXGI_HW_DEVICE_INFO pXGIHWDE, USHORT ModeNo , USHORT
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGI_AjustCRT2Rate( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTableIndex , USHORT *i, PVB_DEVICE_INFO pVBInfo )
+unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo, unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ unsigned short *i, struct vb_device_info *pVBInfo)
{
- USHORT tempax ,
- tempbx ,
- resinfo ,
- modeflag ,
- infoflag ;
+ unsigned short tempax, tempbx, resinfo, modeflag, infoflag;
if ( ModeNo <= 0x13 )
{
@@ -1359,9 +1324,9 @@ BOOLEAN XGI_AjustCRT2Rate( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRa
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetSync(USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetSync(unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo)
{
- USHORT sync ,
+ unsigned short sync ,
temp ;
sync = pVBInfo->RefIndex[ RefreshRateTableIndex ].Ext_InfoFlag >> 8 ; /* di+0x00 */
@@ -1378,17 +1343,18 @@ void XGI_SetSync(USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT1CRTC( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTableIndex,PVB_DEVICE_INFO pVBInfo, PXGI_HW_DEVICE_INFO HwDeviceExtension )
+void XGI_SetCRT1CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo,
+ struct xgi_hw_device_info *HwDeviceExtension)
{
- UCHAR index ,
- data ;
-
- USHORT i ;
+ unsigned char index, data;
+ unsigned short i;
index = pVBInfo->RefIndex[ RefreshRateTableIndex ].Ext_CRT1CRTC ; /* Get index */
index = index&IndexMask ;
- data =( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x11 ) ;
+ data = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x11);
data &= 0x7F ;
XGINew_SetReg1(pVBInfo->P3d4,0x11,data); /* Unlock CRTC */
@@ -1416,16 +1382,16 @@ void XGI_SetCRT1CRTC( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT1Timing_H( PVB_DEVICE_INFO pVBInfo, PXGI_HW_DEVICE_INFO HwDeviceExtension )
+void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo, struct xgi_hw_device_info *HwDeviceExtension)
{
- UCHAR data , data1, pushax;
- USHORT i , j ;
+ unsigned char data, data1, pushax;
+ unsigned short i, j;
/* XGINew_SetReg1( pVBInfo->P3d4 , 0x51 , 0 ) ; */
/* XGINew_SetReg1( pVBInfo->P3d4 , 0x56 , 0 ) ; */
/* XGINew_SetRegANDOR( pVBInfo->P3d4 ,0x11 , 0x7f , 0x00 ) ; */
- data = ( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x11 ) ; /* unlock cr0-7 */
+ data = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x11); /* unlock cr0-7 */
data &= 0x7F ;
XGINew_SetReg1( pVBInfo->P3d4 , 0x11 , data ) ;
@@ -1435,16 +1401,16 @@ void XGI_SetCRT1Timing_H( PVB_DEVICE_INFO pVBInfo, PXGI_HW_DEVICE_INFO HwDeviceE
for( i = 0x01 ; i <= 0x04 ; i++ )
{
data = pVBInfo->TimingH[ 0 ].data[ i ] ;
- XGINew_SetReg1( pVBInfo->P3d4 , ( USHORT )( i + 1 ) , data ) ;
+ XGINew_SetReg1( pVBInfo->P3d4, (unsigned short)(i + 1), data);
}
for( i = 0x05 ; i <= 0x06 ; i++ )
{
data = pVBInfo->TimingH[ 0 ].data[ i ];
- XGINew_SetReg1( pVBInfo->P3c4 ,( USHORT )( i + 6 ) , data ) ;
+ XGINew_SetReg1(pVBInfo->P3c4, (unsigned short)(i + 6), data);
}
- j = ( UCHAR )XGINew_GetReg1( pVBInfo->P3c4 , 0x0e ) ;
+ j = (unsigned char)XGINew_GetReg1(pVBInfo->P3c4, 0x0e);
j &= 0x1F ;
data = pVBInfo->TimingH[ 0 ].data[ 7 ] ;
data &= 0xE0 ;
@@ -1453,17 +1419,17 @@ void XGI_SetCRT1Timing_H( PVB_DEVICE_INFO pVBInfo, PXGI_HW_DEVICE_INFO HwDeviceE
if ( HwDeviceExtension->jChipType >= XG20 )
{
- data = ( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x04 ) ;
+ data = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x04);
data = data - 1 ;
XGINew_SetReg1( pVBInfo->P3d4 , 0x04 , data ) ;
- data = ( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x05 ) ;
+ data = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x05);
data1 = data ;
data1 &= 0xE0 ;
data &= 0x1F ;
if ( data == 0 )
{
pushax = data ;
- data = ( UCHAR )XGINew_GetReg1( pVBInfo->P3c4 , 0x0c ) ;
+ data = (unsigned char)XGINew_GetReg1(pVBInfo->P3c4, 0x0c);
data &= 0xFB ;
XGINew_SetReg1( pVBInfo->P3c4 , 0x0c , data ) ;
data = pushax ;
@@ -1471,7 +1437,7 @@ void XGI_SetCRT1Timing_H( PVB_DEVICE_INFO pVBInfo, PXGI_HW_DEVICE_INFO HwDeviceE
data = data - 1 ;
data |= data1 ;
XGINew_SetReg1( pVBInfo->P3d4 , 0x05 , data ) ;
- data = ( UCHAR )XGINew_GetReg1( pVBInfo->P3c4 , 0x0e ) ;
+ data = (unsigned char)XGINew_GetReg1(pVBInfo->P3c4, 0x0e);
data = data >> 5 ;
data = data + 3 ;
if ( data > 7 )
@@ -1488,10 +1454,12 @@ void XGI_SetCRT1Timing_H( PVB_DEVICE_INFO pVBInfo, PXGI_HW_DEVICE_INFO HwDeviceE
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT1Timing_V( USHORT ModeIdIndex , USHORT ModeNo,PVB_DEVICE_INFO pVBInfo )
+void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
+ unsigned short ModeNo,
+ struct vb_device_info *pVBInfo)
{
- UCHAR data ;
- USHORT i , j ;
+ unsigned char data;
+ unsigned short i, j;
/* XGINew_SetReg1( pVBInfo->P3d4 , 0x51 , 0 ) ; */
/* XGINew_SetReg1( pVBInfo->P3d4 , 0x56 , 0 ) ; */
@@ -1500,22 +1468,22 @@ void XGI_SetCRT1Timing_V( USHORT ModeIdIndex , USHORT ModeNo,PVB_DEVICE_INFO pVB
for( i = 0x00 ; i <= 0x01 ; i++ )
{
data = pVBInfo->TimingV[ 0 ].data[ i ] ;
- XGINew_SetReg1( pVBInfo->P3d4 , ( USHORT )( i + 6 ) , data ) ;
+ XGINew_SetReg1(pVBInfo->P3d4, (unsigned short)(i + 6), data);
}
for( i = 0x02 ; i <= 0x03 ; i++ )
{
data = pVBInfo->TimingV[ 0 ].data[ i ] ;
- XGINew_SetReg1( pVBInfo->P3d4 , ( USHORT )( i + 0x0e ) , data ) ;
+ XGINew_SetReg1(pVBInfo->P3d4, (unsigned short)(i + 0x0e), data);
}
for( i = 0x04 ; i <= 0x05 ; i++ )
{
data = pVBInfo->TimingV[ 0 ].data[ i ] ;
- XGINew_SetReg1( pVBInfo->P3d4 , ( USHORT )( i + 0x11 ) , data ) ;
+ XGINew_SetReg1(pVBInfo->P3d4, (unsigned short)(i + 0x11), data);
}
- j = ( UCHAR )XGINew_GetReg1( pVBInfo->P3c4 , 0x0a ) ;
+ j = (unsigned char)XGINew_GetReg1(pVBInfo->P3c4, 0x0a);
j &= 0xC0 ;
data = pVBInfo->TimingV[ 0 ].data[ 6 ] ;
data &= 0x3F ;
@@ -1535,7 +1503,7 @@ void XGI_SetCRT1Timing_V( USHORT ModeIdIndex , USHORT ModeNo,PVB_DEVICE_INFO pVB
if ( i )
data |= 0x80 ;
- j = ( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x09 ) ;
+ j = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x09);
j &= 0x5F ;
data |= j ;
XGINew_SetReg1( pVBInfo->P3d4 , 0x09 , data ) ;
@@ -1548,10 +1516,12 @@ void XGI_SetCRT1Timing_V( USHORT ModeIdIndex , USHORT ModeNo,PVB_DEVICE_INFO pVB
/* Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F */
/* Description : Set LCD timing */
/* --------------------------------------------------------------------- */
-void XGI_SetXG21CRTC(USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo)
+void XGI_SetXG21CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
- UCHAR StandTableIndex, index, Tempax, Tempbx, Tempcx, Tempdx ;
- USHORT Temp1, Temp2, Temp3 ;
+ unsigned char StandTableIndex, index, Tempax, Tempbx, Tempcx, Tempdx;
+ unsigned short Temp1, Temp2, Temp3;
if ( ModeNo <= 0x13 )
{
@@ -1580,7 +1550,7 @@ void XGI_SetXG21CRTC(USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableI
Tempdx |= Tempcx ; /* Tempdx: VRS[8:1] */
XGINew_SetReg1( pVBInfo->P3c4 , 0x34 , Tempdx ) ; /* SR34[7:0]: VRS[8:1] */
- Temp1 = Tempcx << 1 ; /* Temp1[8]: VRS[8] UCHAR -> USHORT */
+ Temp1 = Tempcx << 1 ; /* Temp1[8]: VRS[8] unsigned char -> unsigned short */
Temp1 |= Tempbx ; /* Temp1[8:0]: VRS[8:0] */
Tempax &= 0x80 ; /* Tempax[7]: CR7[7] */
Temp2 = Tempax << 2 ; /* Temp2[9]: VRS[9] */
@@ -1594,11 +1564,11 @@ void XGI_SetXG21CRTC(USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableI
if ( Tempax < Temp3 ) /* VRE[3:0]<VRS[3:0] */
Temp2 |= 0x10 ; /* Temp2: VRE + 0x10 */
Temp2 &= 0xFF ; /* Temp2[7:0]: VRE[7:0] */
- Tempax = (UCHAR)Temp2 ; /* Tempax[7:0]: VRE[7:0] */
+ Tempax = (unsigned char)Temp2; /* Tempax[7:0]: VRE[7:0] */
Tempax <<= 2 ; /* Tempax << 2: VRE[5:0] */
Temp1 &= 0x600 ; /* Temp1[10:9]: VRS[10:9] */
Temp1 >>= 9 ; /* [10:9]->[1:0] */
- Tempbx = (UCHAR)Temp1 ; /* Tempbx[1:0]: VRS[10:9] */
+ Tempbx = (unsigned char)Temp1; /* Tempbx[1:0]: VRS[10:9] */
Tempax |= Tempbx ; /* VRE[5:0]VRS[10:9] */
Tempax &= 0x7F ;
XGINew_SetReg1( pVBInfo->P3c4 , 0x3F , Tempax ) ; /* SR3F D[7:2]->VRE D[1:0]->VRS */
@@ -1632,7 +1602,7 @@ void XGI_SetXG21CRTC(USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableI
Temp2 |= 0x40 ; /* Temp2 + 0x40 */
Temp2 &= 0xFF ;
- Tempax = (UCHAR)Temp2 ; /* Tempax: HRE[7:0] */
+ Tempax = (unsigned char)Temp2; /* Tempax: HRE[7:0] */
Tempax <<= 2 ; /* Tempax[7:2]: HRE[5:0] */
Tempdx >>= 6 ; /* Tempdx[7:6]->[1:0] HRS[9:8] */
Tempax |= Tempdx ; /* HRE[5:0]HRS[9:8] */
@@ -1676,20 +1646,22 @@ void XGI_SetXG21CRTC(USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableI
Temp2 |= 0x20 ; /* VRE + 0x20 */
Temp2 &= 0xFF ;
- Tempax = (UCHAR)Temp2 ; /* Tempax: VRE[7:0] */
+ Tempax = (unsigned char)Temp2; /* Tempax: VRE[7:0] */
Tempax <<= 2 ; /* Tempax[7:0]; VRE[5:0]00 */
Temp1 &= 0x600 ; /* Temp1[10:9]: VRS[10:9] */
Temp1 >>= 9 ; /* Temp1[1:0]: VRS[10:9] */
- Tempbx = (UCHAR)Temp1 ;
+ Tempbx = (unsigned char)Temp1;
Tempax |= Tempbx ; /* Tempax[7:0]: VRE[5:0]VRS[10:9] */
Tempax &= 0x7F ;
XGINew_SetReg1( pVBInfo->P3c4 , 0x3F , Tempax ) ; /* SR3F D[7:2]->VRE D[1:0]->VRS */
}
}
-void XGI_SetXG27CRTC(USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo)
+void XGI_SetXG27CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT StandTableIndex, index, Tempax, Tempbx, Tempcx, Tempdx ;
+ unsigned short StandTableIndex, index, Tempax, Tempbx, Tempcx, Tempdx;
if ( ModeNo <= 0x13 )
{
@@ -1726,7 +1698,7 @@ void XGI_SetXG27CRTC(USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableI
Tempbx |= Tempax ; /* Tempbx[9:0]: VRE[9:0] */
if ( Tempax <= (Tempcx & 0x0F) ) /* VRE[3:0]<=VRS[3:0] */
Tempbx |= 0x10 ; /* Tempbx: VRE + 0x10 */
- Tempax = (UCHAR)Tempbx & 0xFF; /* Tempax[7:0]: VRE[7:0] */
+ Tempax = (unsigned char)Tempbx & 0xFF; /* Tempax[7:0]: VRE[7:0] */
Tempax <<= 2 ; /* Tempax << 2: VRE[5:0] */
Tempcx = (Tempcx&0x600)>>8; /* Tempcx VRS[10:9] */
XGINew_SetRegANDOR( pVBInfo->P3c4 , 0x3F , ~0xFC, Tempax ) ; /* SR3F D[7:2]->VRE D[5:0] */
@@ -1810,10 +1782,12 @@ void XGI_SetXG27CRTC(USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableI
/* Output : FCLK duty cycle, FCLK delay compensation */
/* Description : All values set zero */
/* --------------------------------------------------------------------- */
-void XGI_SetXG21LCD(PVB_DEVICE_INFO pVBInfo,USHORT RefreshRateTableIndex,USHORT ModeNo)
+void XGI_SetXG21LCD(struct vb_device_info *pVBInfo,
+ unsigned short RefreshRateTableIndex,
+ unsigned short ModeNo)
{
- USHORT Data , Temp , b3CC ;
- USHORT XGI_P3cc ;
+ unsigned short Data, Temp, b3CC;
+ unsigned short XGI_P3cc;
XGI_P3cc = pVBInfo->P3cc ;
@@ -1844,7 +1818,7 @@ void XGI_SetXG21LCD(PVB_DEVICE_INFO pVBInfo,USHORT RefreshRateTableIndex,USHORT
if ( ModeNo <= 0x13 )
{
- b3CC = (UCHAR) XGINew_GetReg2( XGI_P3cc ) ;
+ b3CC = (unsigned char) XGINew_GetReg2(XGI_P3cc);
if ( b3CC & 0x40 )
XGINew_SetRegOR( pVBInfo->P3c4 , 0x30 , 0x20 ) ; /* Hsync polarity */
if ( b3CC & 0x80 )
@@ -1860,10 +1834,12 @@ void XGI_SetXG21LCD(PVB_DEVICE_INFO pVBInfo,USHORT RefreshRateTableIndex,USHORT
}
}
-void XGI_SetXG27LCD(PVB_DEVICE_INFO pVBInfo,USHORT RefreshRateTableIndex,USHORT ModeNo)
+void XGI_SetXG27LCD(struct vb_device_info *pVBInfo,
+ unsigned short RefreshRateTableIndex,
+ unsigned short ModeNo)
{
- USHORT Data , Temp , b3CC ;
- USHORT XGI_P3cc ;
+ unsigned short Data , Temp , b3CC ;
+ unsigned short XGI_P3cc ;
XGI_P3cc = pVBInfo->P3cc ;
@@ -1896,7 +1872,7 @@ void XGI_SetXG27LCD(PVB_DEVICE_INFO pVBInfo,USHORT RefreshRateTableIndex,USHORT
if ( ModeNo <= 0x13 )
{
- b3CC = (UCHAR) XGINew_GetReg2( XGI_P3cc ) ;
+ b3CC = (unsigned char) XGINew_GetReg2(XGI_P3cc);
if ( b3CC & 0x40 )
XGINew_SetRegOR( pVBInfo->P3c4 , 0x30 , 0x20 ) ; /* Hsync polarity */
if ( b3CC & 0x80 )
@@ -1918,7 +1894,9 @@ void XGI_SetXG27LCD(PVB_DEVICE_INFO pVBInfo,USHORT RefreshRateTableIndex,USHORT
/* Output : CRT1 CRTC */
/* Description : Modify CRT1 Hsync/Vsync to fix LCD mode timing */
/* --------------------------------------------------------------------- */
-void XGI_UpdateXG21CRTC( USHORT ModeNo , PVB_DEVICE_INFO pVBInfo , USHORT RefreshRateTableIndex )
+void XGI_UpdateXG21CRTC(unsigned short ModeNo,
+ struct vb_device_info *pVBInfo,
+ unsigned short RefreshRateTableIndex)
{
int i , index = -1;
@@ -1961,16 +1939,15 @@ void XGI_UpdateXG21CRTC( USHORT ModeNo , PVB_DEVICE_INFO pVBInfo , USHORT Refres
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT1DE( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo,USHORT ModeIdIndex , USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT resindex ,
- tempax ,
- tempbx ,
- tempcx ,
- temp ,
- modeflag ;
+ unsigned short resindex, tempax, tempbx, tempcx, temp, modeflag;
- UCHAR data ;
+ unsigned char data;
resindex = XGI_GetResInfo( ModeNo , ModeIdIndex, pVBInfo ) ;
@@ -2013,13 +1990,13 @@ void XGI_SetCRT1DE( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo,USHORT
tempax -= 1 ;
tempbx -= 1 ;
tempcx = tempax ;
- temp = ( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x11 ) ;
- data = ( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x11 ) ;
+ temp = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x11);
+ data = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x11);
data &= 0x7F ;
XGINew_SetReg1( pVBInfo->P3d4 , 0x11 , data ) ; /* Unlock CRTC */
- XGINew_SetReg1( pVBInfo->P3d4 , 0x01 , ( USHORT )( tempcx & 0xff ) ) ;
- XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x0b , ~0x0c , ( USHORT )( ( tempcx & 0x0ff00 ) >> 10 ) ) ;
- XGINew_SetReg1( pVBInfo->P3d4 , 0x12 , ( USHORT )( tempbx & 0xff ) ) ;
+ XGINew_SetReg1(pVBInfo->P3d4, 0x01, (unsigned short)(tempcx & 0xff));
+ XGINew_SetRegANDOR(pVBInfo->P3d4, 0x0b, ~0x0c, (unsigned short)((tempcx & 0x0ff00) >> 10));
+ XGINew_SetReg1(pVBInfo->P3d4, 0x12, (unsigned short)(tempbx & 0xff));
tempax = 0 ;
tempbx = tempbx >> 8 ;
@@ -2030,7 +2007,7 @@ void XGI_SetCRT1DE( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo,USHORT
tempax |= 0x40 ;
XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x07 , ~0x42 , tempax ) ;
- data =( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x07 ) ;
+ data = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x07);
data &= 0xFF ;
tempax = 0 ;
@@ -2048,9 +2025,11 @@ void XGI_SetCRT1DE( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo,USHORT
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-USHORT XGI_GetResInfo(USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo )
+unsigned short XGI_GetResInfo(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT resindex ;
+ unsigned short resindex;
if ( ModeNo <= 0x13 )
{
@@ -2070,9 +2049,13 @@ USHORT XGI_GetResInfo(USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBIn
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT1Offset( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTableIndex , PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetCRT1Offset(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
- USHORT temp ,
+ unsigned short temp ,
ah ,
al ,
temp2 ,
@@ -2131,7 +2114,7 @@ void XGI_SetCRT1Offset( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRate
i |= temp ;
XGINew_SetReg1( pVBInfo->P3c4 , 0x0E , i ) ;
- temp =( UCHAR )temp2 ;
+ temp = (unsigned char)temp2;
temp &= 0xFF ; /* al */
XGINew_SetReg1( pVBInfo->P3d4 , 0x13 , temp ) ;
@@ -2163,11 +2146,13 @@ void XGI_SetCRT1Offset( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRate
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT1VCLK( USHORT ModeNo , USHORT ModeIdIndex ,
- PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetCRT1VCLK(unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
- UCHAR index , data ;
- USHORT vclkindex ;
+ unsigned char index, data;
+ unsigned short vclkindex ;
if ( pVBInfo->IF_DEF_LVDS == 1 )
{
@@ -2224,9 +2209,11 @@ void XGI_SetCRT1VCLK( USHORT ModeNo , USHORT ModeIdIndex ,
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT1FIFO( USHORT ModeNo , PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo )
+void XGI_SetCRT1FIFO(unsigned short ModeNo,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
- USHORT data ;
+ unsigned short data ;
data = XGINew_GetReg1( pVBInfo->P3c4 , 0x3D ) ;
data &= 0xfe ;
@@ -2273,10 +2260,12 @@ void XGI_SetCRT1FIFO( USHORT ModeNo , PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT1ModeRegs( PXGI_HW_DEVICE_INFO HwDeviceExtension ,
- USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTableIndex,PVB_DEVICE_INFO pVBInfo )
+void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short ModeNo, unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT data ,
+ unsigned short data ,
data2 ,
data3 ,
infoflag = 0 ,
@@ -2411,13 +2400,16 @@ void XGI_SetCRT1ModeRegs( PXGI_HW_DEVICE_INFO HwDeviceExtension ,
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetVCLKState( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo , USHORT RefreshRateTableIndex,PVB_DEVICE_INFO pVBInfo )
+void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short ModeNo,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT data ,
+ unsigned short data ,
data2 = 0 ;
- SHORT VCLK ;
+ short VCLK ;
- UCHAR index ;
+ unsigned char index;
if ( ModeNo <= 0x13 )
VCLK = 0 ;
@@ -2475,9 +2467,9 @@ void XGI_SetVCLKState( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo ,
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-/*void XGI_VesaLowResolution( USHORT ModeNo , USHORT ModeIdIndex ,PVB_DEVICE_INFO pVBInfo)
+/*void XGI_VesaLowResolution(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
- USHORT modeflag;
+ unsigned short modeflag;
if ( ModeNo > 0x13 )
modeflag = pVBInfo->EModeIDTable[ ModeIdIndex ].Ext_ModeFlag ;
@@ -2518,9 +2510,11 @@ void XGI_SetVCLKState( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo ,
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_LoadDAC( USHORT ModeNo , USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo )
+void XGI_LoadDAC(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT data , data2 , time ,
+ unsigned short data , data2 , time ,
i , j , k , m , n , o ,
si , di , bx , dl , al , ah , dh ,
*table = NULL ;
@@ -2627,9 +2621,11 @@ void XGI_LoadDAC( USHORT ModeNo , USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_WriteDAC( USHORT dl , USHORT ah , USHORT al , USHORT dh,PVB_DEVICE_INFO pVBInfo )
+void XGI_WriteDAC(unsigned short dl, unsigned short ah,
+ unsigned short al, unsigned short dh,
+ struct vb_device_info *pVBInfo)
{
- USHORT temp , bh , bl ;
+ unsigned short temp , bh , bl ;
bh = ah ;
bl = al ;
@@ -2652,70 +2648,24 @@ void XGI_WriteDAC( USHORT dl , USHORT ah , USHORT al , USHORT dh,PVB_DEVICE_INFO
bh = temp ;
}
}
- XGINew_SetReg3( pVBInfo->P3c9 , ( USHORT )dh ) ;
- XGINew_SetReg3( pVBInfo->P3c9 , ( USHORT )bh ) ;
- XGINew_SetReg3( pVBInfo->P3c9 , ( USHORT )bl ) ;
+ XGINew_SetReg3(pVBInfo->P3c9, (unsigned short)dh);
+ XGINew_SetReg3(pVBInfo->P3c9, (unsigned short)bh);
+ XGINew_SetReg3(pVBInfo->P3c9, (unsigned short)bl);
}
-#if 0
-/* --------------------------------------------------------------------- */
-/* Function : XGI_ClearBuffer */
-/* Input : */
-/* Output : */
-/* Description : */
-/* --------------------------------------------------------------------- */
-void XGI_ClearBuffer( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo, PVB_DEVICE_INFO pVBInfo)
-{
- PVOID VideoMemoryAddress = ( PVOID )HwDeviceExtension->pjVideoMemoryAddress ;
- ULONG AdapterMemorySize = ( ULONG )HwDeviceExtension->ulVideoMemorySize ;
- PUSHORT pBuffer ;
-#ifndef LINUX_XF86
- int i ;
-#endif
-
- if ( pVBInfo->ModeType >= ModeEGA )
- {
- if ( ModeNo > 0x13 )
- {
- AdapterMemorySize = 0x40000 ; /* clear 256k */
- /* GetDRAMSize( HwDeviceExtension ) ; */
- XGI_SetMemory( VideoMemoryAddress , AdapterMemorySize , 0 ) ;
- }
- else
- {
-/*
- pBuffer = VideoMemoryAddress ;
- for( i = 0 ; i < 0x4000 ; i++ )
- pBuffer[ i ] = 0x0000 ;
-*/
- }
- }
- else
- {
- pBuffer = VideoMemoryAddress ;
- if ( pVBInfo->ModeType < ModeCGA )
- {
-/*
- for ( i = 0 ; i < 0x4000 ; i++ )
- pBuffer[ i ] = 0x0720 ;
-*/
- }
- else
- XGI_SetMemory( VideoMemoryAddress , 0x8000 , 0 ) ;
- }
-}
-
-#endif
/* --------------------------------------------------------------------- */
/* Function : XGI_SetLCDAGroup */
/* Input : */
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetLCDAGroup( USHORT ModeNo , USHORT ModeIdIndex , PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetLCDAGroup(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
- USHORT RefreshRateTableIndex ;
- /* USHORT temp ; */
+ unsigned short RefreshRateTableIndex ;
+ /* unsigned short temp ; */
/* pVBInfo->SelectCRT2Rate = 0 ; */
@@ -2735,9 +2685,11 @@ void XGI_SetLCDAGroup( USHORT ModeNo , USHORT ModeIdIndex , PXGI_HW_DEVICE_INFO
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetLVDSResInfo( USHORT ModeNo , USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo )
+void XGI_GetLVDSResInfo(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT resindex , xres , yres , modeflag ;
+ unsigned short resindex , xres , yres , modeflag ;
if ( ModeNo <= 0x13 )
{
@@ -2803,17 +2755,20 @@ void XGI_GetLVDSResInfo( USHORT ModeNo , USHORT ModeIdIndex,PVB_DEVICE_INFO pVB
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetLVDSData( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_GetLVDSData(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT tempbx ;
- XGI330_LVDSDataStruct *LCDPtr = NULL ;
- XGI330_CHTVDataStruct *TVPtr = NULL ;
+ unsigned short tempbx ;
+ struct XGI330_LVDSDataStruct *LCDPtr = NULL ;
+ struct XGI330_CHTVDataStruct *TVPtr = NULL ;
tempbx = 2 ;
if ( pVBInfo->VBInfo & ( SetCRT2ToLCD | SetCRT2ToLCDA ) )
{
- LCDPtr = ( XGI330_LVDSDataStruct * )XGI_GetLcdPtr( tempbx, ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo) ;
+ LCDPtr = (struct XGI330_LVDSDataStruct *)XGI_GetLcdPtr(tempbx, ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
pVBInfo->VGAHT = LCDPtr->VGAHT ;
pVBInfo->VGAVT = LCDPtr->VGAVT ;
pVBInfo->HT = LCDPtr->LCDHT ;
@@ -2823,7 +2778,7 @@ void XGI_GetLVDSData( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTa
{
if ( pVBInfo->VBInfo & SetCRT2ToTV )
{
- TVPtr = ( XGI330_CHTVDataStruct * )XGI_GetTVPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
+ TVPtr = (struct XGI330_CHTVDataStruct *)XGI_GetTVPtr(tempbx, ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
pVBInfo->VGAHT = TVPtr->VGAHT ;
pVBInfo->VGAVT = TVPtr->VGAVT ;
pVBInfo->HT = TVPtr->LCDHT ;
@@ -2866,16 +2821,18 @@ void XGI_GetLVDSData( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTa
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_ModCRT1Regs( USHORT ModeNo , USHORT ModeIdIndex ,
- USHORT RefreshRateTableIndex , PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo )
+void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
- UCHAR index ;
- USHORT tempbx , i ;
- XGI_LVDSCRT1HDataStruct *LCDPtr = NULL ;
- XGI_LVDSCRT1VDataStruct *LCDPtr1 =NULL ;
- /* XGI330_CHTVDataStruct *TVPtr = NULL ; */
- XGI_CH7007TV_TimingHStruct *CH7007TV_TimingHPtr = NULL;
- XGI_CH7007TV_TimingVStruct *CH7007TV_TimingVPtr = NULL;
+ unsigned char index;
+ unsigned short tempbx , i ;
+ struct XGI_LVDSCRT1HDataStruct *LCDPtr = NULL;
+ struct XGI_LVDSCRT1VDataStruct *LCDPtr1 = NULL;
+ /* struct XGI330_CHTVDataStruct *TVPtr = NULL ; */
+ struct XGI_CH7007TV_TimingHStruct *CH7007TV_TimingHPtr = NULL;
+ struct XGI_CH7007TV_TimingVStruct *CH7007TV_TimingVPtr = NULL;
if( ModeNo <= 0x13 )
index = pVBInfo->SModeIDTable[ ModeIdIndex ].St_CRT2CRTC ;
@@ -2890,7 +2847,7 @@ void XGI_ModCRT1Regs( USHORT ModeNo , USHORT ModeIdIndex ,
if ( pVBInfo->VBInfo & ( SetCRT2ToLCD | SetCRT2ToLCDA ) )
{
- LCDPtr = ( XGI_LVDSCRT1HDataStruct * )XGI_GetLcdPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
+ LCDPtr = (struct XGI_LVDSCRT1HDataStruct *)XGI_GetLcdPtr(tempbx, ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
for( i = 0 ; i < 8 ; i++ )
pVBInfo->TimingH[ 0 ].data[ i ] = LCDPtr[ 0 ].Reg[ i ] ;
@@ -2900,7 +2857,7 @@ void XGI_ModCRT1Regs( USHORT ModeNo , USHORT ModeIdIndex ,
{
if ( pVBInfo->VBInfo & SetCRT2ToTV )
{
- CH7007TV_TimingHPtr = ( XGI_CH7007TV_TimingHStruct *)XGI_GetTVPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
+ CH7007TV_TimingHPtr = (struct XGI_CH7007TV_TimingHStruct *)XGI_GetTVPtr(tempbx, ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
for( i = 0 ; i < 8 ; i++ )
pVBInfo->TimingH[ 0 ].data[ i ] = CH7007TV_TimingHPtr[ 0 ].data[ i ] ;
@@ -2910,7 +2867,7 @@ void XGI_ModCRT1Regs( USHORT ModeNo , USHORT ModeIdIndex ,
/* if ( pVBInfo->IF_DEF_CH7017 == 1 )
{
if ( pVBInfo->VBInfo & SetCRT2ToTV )
- TVPtr = ( XGI330_CHTVDataStruct *)XGI_GetTVPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
+ TVPtr = ( struct XGI330_CHTVDataStruct *)XGI_GetTVPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
} */
XGI_SetCRT1Timing_H(pVBInfo,HwDeviceExtension) ;
@@ -2925,7 +2882,7 @@ void XGI_ModCRT1Regs( USHORT ModeNo , USHORT ModeIdIndex ,
if ( pVBInfo->VBInfo & ( SetCRT2ToLCD | SetCRT2ToLCDA ) )
{
- LCDPtr1 = ( XGI_LVDSCRT1VDataStruct * )XGI_GetLcdPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
+ LCDPtr1 = (struct XGI_LVDSCRT1VDataStruct *)XGI_GetLcdPtr(tempbx, ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
for( i = 0 ; i < 7 ; i++ )
pVBInfo->TimingV[ 0 ].data[ i ] = LCDPtr1[ 0 ].Reg[ i ] ;
}
@@ -2934,7 +2891,7 @@ void XGI_ModCRT1Regs( USHORT ModeNo , USHORT ModeIdIndex ,
{
if ( pVBInfo->VBInfo & SetCRT2ToTV )
{
- CH7007TV_TimingVPtr = ( XGI_CH7007TV_TimingVStruct *)XGI_GetTVPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
+ CH7007TV_TimingVPtr = (struct XGI_CH7007TV_TimingVStruct *)XGI_GetTVPtr(tempbx, ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
for( i = 0 ; i < 7 ; i++ )
pVBInfo->TimingV[ 0 ].data[ i ] = CH7007TV_TimingVPtr[ 0 ].data[ i ] ;
@@ -2943,7 +2900,7 @@ void XGI_ModCRT1Regs( USHORT ModeNo , USHORT ModeIdIndex ,
/* if ( pVBInfo->IF_DEF_CH7017 == 1 )
{
if ( pVBInfo->VBInfo & SetCRT2ToTV )
- TVPtr = ( XGI330_CHTVDataStruct *)XGI_GetTVPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
+ TVPtr = ( struct XGI330_CHTVDataStruct *)XGI_GetTVPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
} */
XGI_SetCRT1Timing_V( ModeIdIndex , ModeNo , pVBInfo) ;
@@ -2966,12 +2923,14 @@ void XGI_ModCRT1Regs( USHORT ModeNo , USHORT ModeIdIndex ,
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetLVDSRegs( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT tempbx , tempax , tempcx , tempdx , push1 , push2 , modeflag ;
+ unsigned short tempbx , tempax , tempcx , tempdx , push1 , push2 , modeflag ;
unsigned long temp , temp1 , temp2 , temp3 , push3 ;
- XGI330_LCDDataDesStruct *LCDPtr = NULL ;
- XGI330_LCDDataDesStruct2 *LCDPtr1 = NULL ;
+ struct XGI330_LCDDataDesStruct *LCDPtr = NULL ;
+ struct XGI330_LCDDataDesStruct2 *LCDPtr1 = NULL ;
if ( ModeNo > 0x13 )
modeflag = pVBInfo->EModeIDTable[ ModeIdIndex ].Ext_ModeFlag ;
@@ -2985,16 +2944,16 @@ void XGI_SetLVDSRegs( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
if ( pVBInfo->IF_DEF_OEMUtil == 1 )
{
tempbx = 8 ;
- LCDPtr = ( XGI330_LCDDataDesStruct * )XGI_GetLcdPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
+ LCDPtr = (struct XGI330_LCDDataDesStruct *)XGI_GetLcdPtr(tempbx, ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
}
if ( ( pVBInfo->IF_DEF_OEMUtil == 0 ) || ( LCDPtr == 0 ) )
{
tempbx = 3 ;
if ( pVBInfo->LCDInfo & EnableScalingLCD )
- LCDPtr1 = ( XGI330_LCDDataDesStruct2 * )XGI_GetLcdPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
+ LCDPtr1 = (struct XGI330_LCDDataDesStruct2 *)XGI_GetLcdPtr(tempbx, ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
else
- LCDPtr = ( XGI330_LCDDataDesStruct * )XGI_GetLcdPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
+ LCDPtr = (struct XGI330_LCDDataDesStruct *)XGI_GetLcdPtr(tempbx, ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
}
XGI_GetLCDSync( &tempax , &tempbx ,pVBInfo) ;
@@ -3056,8 +3015,8 @@ void XGI_SetLVDSRegs( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
tempcx = tempcx >> 3 ;
tempbx = tempbx >> 3 ;
- XGINew_SetReg1( pVBInfo->Part1Port , 0x16 , ( USHORT )( tempbx & 0xff ) ) ;
- XGINew_SetReg1( pVBInfo->Part1Port , 0x17 , ( USHORT )( tempcx & 0xff ) ) ;
+ XGINew_SetReg1(pVBInfo->Part1Port, 0x16, (unsigned short)(tempbx & 0xff));
+ XGINew_SetReg1(pVBInfo->Part1Port, 0x17, (unsigned short)(tempcx & 0xff));
tempax = pVBInfo->HT ;
@@ -3085,7 +3044,7 @@ void XGI_SetLVDSRegs( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
tempax |= tempcx ;
XGINew_SetReg1( pVBInfo->Part1Port , 0x15 , tempax ) ;
- XGINew_SetReg1( pVBInfo->Part1Port , 0x14 , ( USHORT )( tempbx & 0xff ) ) ;
+ XGINew_SetReg1(pVBInfo->Part1Port, 0x14, (unsigned short)(tempbx & 0xff));
tempax = pVBInfo->VT ;
if ( pVBInfo->LCDInfo & EnableScalingLCD )
@@ -3099,13 +3058,13 @@ void XGI_SetLVDSRegs( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
if ( tempcx >= tempax )
tempcx -= tempax ;
- XGINew_SetReg1( pVBInfo->Part1Port , 0x1b , ( USHORT )( tempbx & 0xff ) ) ;
- XGINew_SetReg1( pVBInfo->Part1Port , 0x1c , ( USHORT )( tempcx & 0xff ) ) ;
+ XGINew_SetReg1(pVBInfo->Part1Port, 0x1b, (unsigned short)(tempbx & 0xff));
+ XGINew_SetReg1(pVBInfo->Part1Port, 0x1c, (unsigned short)(tempcx & 0xff));
tempbx = ( tempbx >> 8 ) & 0x07 ;
tempcx = ( tempcx >> 8 ) & 0x07 ;
- XGINew_SetReg1( pVBInfo->Part1Port , 0x1d , ( USHORT )( ( tempcx << 3 ) | tempbx ) ) ;
+ XGINew_SetReg1(pVBInfo->Part1Port, 0x1d, (unsigned short)((tempcx << 3) | tempbx));
tempax = pVBInfo->VT ;
if ( pVBInfo->LCDInfo & EnableScalingLCD )
@@ -3123,8 +3082,8 @@ void XGI_SetLVDSRegs( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
if ( tempcx >= tempax )
tempcx -= tempax ;
- XGINew_SetReg1( pVBInfo->Part1Port , 0x18 , ( USHORT )( tempbx & 0xff ) ) ;
- XGINew_SetRegANDOR( pVBInfo->Part1Port , 0x19 , ~0x0f , ( USHORT )( tempcx & 0x0f ) ) ;
+ XGINew_SetReg1(pVBInfo->Part1Port, 0x18, (unsigned short)(tempbx & 0xff));
+ XGINew_SetRegANDOR(pVBInfo->Part1Port, 0x19, ~0x0f, (unsigned short)(tempcx & 0x0f));
tempax = ( ( tempbx >> 8 ) & 0x07 ) << 3 ;
@@ -3145,7 +3104,7 @@ void XGI_SetLVDSRegs( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
temp = tempax ; /* 0430 ylshieh */
temp1 = ( temp << 18 ) / tempbx ;
- tempdx = ( USHORT )( ( temp << 18 ) % tempbx ) ;
+ tempdx = (unsigned short)((temp << 18) % tempbx);
if ( tempdx != 0 )
temp1 += 1 ;
@@ -3153,10 +3112,10 @@ void XGI_SetLVDSRegs( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
temp2 = temp1 ;
push3 = temp2 ;
- XGINew_SetReg1( pVBInfo->Part1Port , 0x37 , ( USHORT )( temp2 & 0xff ) ) ;
- XGINew_SetReg1( pVBInfo->Part1Port , 0x36 , ( USHORT )( ( temp2 >> 8 ) & 0xff ) ) ;
+ XGINew_SetReg1(pVBInfo->Part1Port, 0x37, (unsigned short)(temp2 & 0xff));
+ XGINew_SetReg1(pVBInfo->Part1Port, 0x36, (unsigned short)((temp2 >> 8) & 0xff));
- tempbx = ( USHORT )( temp2 >> 16 ) ;
+ tempbx = (unsigned short)(temp2 >> 16);
tempax = tempbx & 0x03 ;
tempbx = pVBInfo->VGAVDE ;
@@ -3168,10 +3127,10 @@ void XGI_SetLVDSRegs( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
if ( pVBInfo->VBType & VB_XGI301C )
{
temp2 = push3 ;
- XGINew_SetReg1( pVBInfo->Part4Port , 0x3c , ( USHORT )( temp2 & 0xff ) ) ;
- XGINew_SetReg1( pVBInfo->Part4Port , 0x3b , ( USHORT )( ( temp2 >> 8 ) & 0xff ) ) ;
- tempbx = ( USHORT )( temp2 >> 16 ) ;
- XGINew_SetRegANDOR( pVBInfo->Part4Port , 0x3a , ~0xc0 , ( USHORT )( ( tempbx & 0xff ) << 6 ) ) ;
+ XGINew_SetReg1(pVBInfo->Part4Port, 0x3c, (unsigned short)(temp2 & 0xff));
+ XGINew_SetReg1(pVBInfo->Part4Port, 0x3b, (unsigned short)((temp2 >> 8) & 0xff));
+ tempbx = (unsigned short)(temp2 >> 16);
+ XGINew_SetRegANDOR(pVBInfo->Part4Port, 0x3a, ~0xc0, (unsigned short)((tempbx & 0xff) << 6));
tempcx = pVBInfo->VGAVDE ;
if ( tempcx == pVBInfo->VDE )
@@ -3185,7 +3144,7 @@ void XGI_SetLVDSRegs( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
temp1 = tempcx << 16 ;
- tempax = ( USHORT )( temp1 / tempbx ) ;
+ tempax = (unsigned short)(temp1 / tempbx);
if ( ( tempbx & 0xffff ) == ( tempcx & 0xffff ) )
tempax = 65535 ;
@@ -3199,28 +3158,28 @@ void XGI_SetLVDSRegs( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
temp3 = ( temp3 & 0xffff0000 ) + ( temp1 & 0xffff ) ;
- tempax = ( USHORT )( temp3 & 0xff ) ;
+ tempax = (unsigned short)(temp3 & 0xff);
XGINew_SetReg1( pVBInfo->Part1Port , 0x1f , tempax ) ;
temp1 = pVBInfo->VGAVDE << 18 ;
temp1 = temp1 / push3 ;
- tempbx = ( USHORT )( temp1 & 0xffff ) ;
+ tempbx = (unsigned short)(temp1 & 0xffff);
if ( pVBInfo->LCDResInfo == Panel1024x768 )
tempbx -= 1 ;
tempax = ( ( tempbx >> 8 ) & 0xff ) << 3 ;
- tempax |= ( USHORT )( ( temp3 >> 8 ) & 0x07 ) ;
- XGINew_SetReg1( pVBInfo->Part1Port , 0x20 , ( USHORT )( tempax & 0xff ) ) ;
- XGINew_SetReg1( pVBInfo->Part1Port , 0x21 , ( USHORT )( tempbx & 0xff ) ) ;
+ tempax |= (unsigned short)((temp3 >> 8) & 0x07);
+ XGINew_SetReg1(pVBInfo->Part1Port, 0x20, (unsigned short)(tempax & 0xff));
+ XGINew_SetReg1(pVBInfo->Part1Port, 0x21, (unsigned short)(tempbx & 0xff));
temp3 = temp3 >> 16 ;
if ( modeflag & HalfDCLK )
temp3 = temp3 >> 1 ;
- XGINew_SetReg1(pVBInfo->Part1Port , 0x22 , ( USHORT )( ( temp3 >> 8 ) & 0xff ) ) ;
- XGINew_SetReg1(pVBInfo->Part1Port , 0x23 , ( USHORT )( temp3 & 0xff ) ) ;
+ XGINew_SetReg1(pVBInfo->Part1Port , 0x22, (unsigned short)((temp3 >> 8) & 0xff));
+ XGINew_SetReg1(pVBInfo->Part1Port , 0x23, (unsigned short)(temp3 & 0xff));
}
}
}
@@ -3232,9 +3191,9 @@ void XGI_SetLVDSRegs( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT2ECLK( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetCRT2ECLK(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo)
{
- UCHAR di_0 , di_1 , tempal ;
+ unsigned char di_0, di_1, tempal;
int i ;
tempal = XGI_GetVCLKPtr( RefreshRateTableIndex , ModeNo , ModeIdIndex, pVBInfo ) ;
@@ -3243,7 +3202,7 @@ void XGI_SetCRT2ECLK( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
for( i = 0 ; i < 4 ; i++ )
{
- XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x31 , ~0x30 , ( USHORT )( 0x10 * i ) ) ;
+ XGINew_SetRegANDOR(pVBInfo->P3d4, 0x31, ~0x30, (unsigned short)(0x10 * i));
if ( pVBInfo->IF_DEF_CH7007 == 1 )
{
XGINew_SetReg1( pVBInfo->P3c4 , 0x2b , di_0 ) ;
@@ -3269,9 +3228,9 @@ void XGI_SetCRT2ECLK( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_UpdateModeInfo( PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo )
+void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT tempcl ,
+ unsigned short tempcl ,
tempch ,
temp ,
tempbl ,
@@ -3377,7 +3336,7 @@ void XGI_UpdateModeInfo( PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetVGAType( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo)
+void XGI_GetVGAType(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
/*
if ( HwDeviceExtension->jChipType >= XG20 )
@@ -3399,9 +3358,9 @@ void XGI_GetVGAType( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVB
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetVBType(PVB_DEVICE_INFO pVBInfo)
+void XGI_GetVBType(struct vb_device_info *pVBInfo)
{
- USHORT flag , tempbx , tempah ;
+ unsigned short flag , tempbx , tempah ;
if ( pVBInfo->IF_DEF_CH7007 == 1 )
{
@@ -3462,9 +3421,9 @@ void XGI_GetVBType(PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetVBInfo( USHORT ModeNo , USHORT ModeIdIndex , PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo )
+void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT tempax ,
+ unsigned short tempax ,
push ,
tempbx ,
temp ,
@@ -3703,9 +3662,9 @@ void XGI_GetVBInfo( USHORT ModeNo , USHORT ModeIdIndex , PXGI_HW_DEVICE_INFO HwD
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetTVInfo( USHORT ModeNo , USHORT ModeIdIndex ,PVB_DEVICE_INFO pVBInfo )
+void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
- USHORT temp ,
+ unsigned short temp ,
tempbx = 0 ,
resinfo = 0 ,
modeflag ,
@@ -3838,9 +3797,10 @@ void XGI_GetTVInfo( USHORT ModeNo , USHORT ModeIdIndex ,PVB_DEVICE_INFO pVBInfo
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGI_GetLCDInfo( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo)
+unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT temp ,
+ unsigned short temp ,
tempax ,
tempbx ,
modeflag ,
@@ -4047,96 +4007,12 @@ BOOLEAN XGI_GetLCDInfo( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBI
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGI_SearchModeID( USHORT ModeNo , USHORT *ModeIdIndex, PVB_DEVICE_INFO pVBInfo )
+unsigned char XGI_SearchModeID(unsigned short ModeNo,
+ unsigned short *ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
-#ifdef TC
-
- if ( ModeNo <= 5 )
- ModeNo |= 1 ;
-
- if ( ModeNo <= 0x13 )
- {
- /* for (*ModeIdIndex=0;*ModeIdIndex<sizeof(pVBInfo->SModeIDTable)/sizeof(XGI_StStruct);(*ModeIdIndex)++) */
- for( *ModeIdIndex = 0 ; ; ( *ModeIdIndex )++ )
- {
- if ( pVBInfo->SModeIDTable[ *ModeIdIndex ].St_ModeID == ModeNo )
- break ;
- if ( pVBInfo->SModeIDTable[ *ModeIdIndex ].St_ModeID == 0xFF )
- return( FALSE ) ;
- }
-
- VGA_INFO = ( PUCHAR )MK_FP( 0 , 0x489 ) ;
-
- if ( ModeNo == 0x07 )
- {
- if ( ( *VGA_INFO & 0x10 ) != 0 )
- ( *ModeIdIndex )++ ; /* 400 lines */
- /* else 350 lines */
- }
-
- if ( ModeNo <= 3 )
- {
- if ( ( *VGA_INFO & 0x80 ) == 0 )
- {
- ( *ModeIdIndex )++ ;
- if ( ( *VGA_INFO & 0x10 ) != 0 )
- ( *ModeIdIndex )++ ; /* 400 lines */
- /* else 350 lines */
- }
- /* else 200 lines */
- }
- }
- else
- {
- /* for (*ModeIdIndex=0;*ModeIdIndex<sizeof(pVBInfo->EModeIDTable)/sizeof(XGI_ExtStruct);(*ModeIdIndex)++) */
- for( *ModeIdIndex = 0 ; ; ( *ModeIdIndex )++ )
- {
- if ( pVBInfo->EModeIDTable[ *ModeIdIndex ].Ext_ModeID == ModeNo )
- break ;
- if ( pVBInfo->EModeIDTable[ *ModeIdIndex ].Ext_ModeID == 0xFF )
- return( FALSE ) ;
- }
- }
-
-
-#endif
-
-#ifdef WIN2000
-
- if ( ModeNo <= 5 )
- ModeNo |= 1 ;
- if ( ModeNo <= 0x13 )
- {
- /* for (*ModeIdIndex=0;*ModeIdIndex<sizeof(pVBInfo->SModeIDTable)/sizeof(XGI_StStruct);(*ModeIdIndex)++) */
- for( *ModeIdIndex = 0 ; ; ( *ModeIdIndex )++ )
- {
- if ( pVBInfo->SModeIDTable[ *ModeIdIndex ].St_ModeID == ModeNo )
- break ;
- if ( pVBInfo->SModeIDTable[ *ModeIdIndex ].St_ModeID == 0xFF )
- return( FALSE ) ;
- }
-
- if ( ModeNo == 0x07 )
- ( *ModeIdIndex )++ ; /* 400 lines */
-
- if ( ModeNo <=3 )
- ( *ModeIdIndex ) += 2 ; /* 400 lines */
- /* else 350 lines */
- }
- else
- {
- /* for (*ModeIdIndex=0;*ModeIdIndex<sizeof(pVBInfo->EModeIDTable)/sizeof(XGI_ExtStruct);(*ModeIdIndex)++) */
- for( *ModeIdIndex = 0 ; ; ( *ModeIdIndex )++ )
- {
- if ( pVBInfo->EModeIDTable[ *ModeIdIndex ].Ext_ModeID == ModeNo )
- break ;
- if ( pVBInfo->EModeIDTable[ *ModeIdIndex ].Ext_ModeID == 0xFF )
- return( FALSE ) ;
- }
- }
-#endif
#ifdef LINUX /* chiawen for linux solution */
@@ -4144,13 +4020,13 @@ BOOLEAN XGI_SearchModeID( USHORT ModeNo , USHORT *ModeIdIndex, PVB_DEVICE_INFO p
ModeNo |= 1 ;
if ( ModeNo <= 0x13 )
{
- /* for (*ModeIdIndex=0;*ModeIdIndex<sizeof(pVBInfo->SModeIDTable)/sizeof(XGI_StStruct);(*ModeIdIndex)++) */
+ /* for (*ModeIdIndex=0;*ModeIdIndex<sizeof(pVBInfo->SModeIDTable)/sizeof(struct XGI_StStruct);(*ModeIdIndex)++) */
for( *ModeIdIndex = 0 ; ; ( *ModeIdIndex )++ )
{
- if ( pVBInfo->SModeIDTable[ *ModeIdIndex ].St_ModeID == ModeNo )
- break ;
- if ( pVBInfo->SModeIDTable[ *ModeIdIndex ].St_ModeID == 0xFF )
- return( FALSE ) ;
+ if (pVBInfo->SModeIDTable[*ModeIdIndex].St_ModeID == ModeNo)
+ break;
+ if (pVBInfo->SModeIDTable[*ModeIdIndex].St_ModeID == 0xFF)
+ return 0;
}
if ( ModeNo == 0x07 )
@@ -4162,19 +4038,19 @@ BOOLEAN XGI_SearchModeID( USHORT ModeNo , USHORT *ModeIdIndex, PVB_DEVICE_INFO p
}
else
{
- /* for (*ModeIdIndex=0;*ModeIdIndex<sizeof(pVBInfo->EModeIDTable)/sizeof(XGI_ExtStruct);(*ModeIdIndex)++) */
+ /* for (*ModeIdIndex=0;*ModeIdIndex<sizeof(pVBInfo->EModeIDTable)/sizeof(struct XGI_ExtStruct);(*ModeIdIndex)++) */
for( *ModeIdIndex = 0 ; ; ( *ModeIdIndex )++ )
{
- if ( pVBInfo->EModeIDTable[ *ModeIdIndex ].Ext_ModeID == ModeNo )
- break ;
- if ( pVBInfo->EModeIDTable[ *ModeIdIndex ].Ext_ModeID == 0xFF )
- return( FALSE ) ;
+ if (pVBInfo->EModeIDTable[*ModeIdIndex].Ext_ModeID == ModeNo)
+ break;
+ if (pVBInfo->EModeIDTable[*ModeIdIndex].Ext_ModeID == 0xFF)
+ return 0;
}
}
#endif
- return( TRUE ) ;
+ return 1;
}
@@ -4188,9 +4064,12 @@ BOOLEAN XGI_SearchModeID( USHORT ModeNo , USHORT *ModeIdIndex, PVB_DEVICE_INFO p
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGINew_CheckMemorySize(PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT ModeNo,USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo)
+unsigned char XGINew_CheckMemorySize(struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT memorysize ,
+ unsigned short memorysize ,
modeflag ,
temp ,
temp1 ,
@@ -4199,7 +4078,7 @@ BOOLEAN XGINew_CheckMemorySize(PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT Mode
/* if ( ( HwDeviceExtension->jChipType == XGI_650 ) ||
( HwDeviceExtension->jChipType == XGI_650M ) )
{
- return( TRUE ) ;
+ return 1;
} */
if ( ModeNo <= 0x13 )
@@ -4257,10 +4136,10 @@ BOOLEAN XGINew_CheckMemorySize(PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT Mode
temp <<= 1 ;
}
}
- if ( temp < memorysize )
- return( FALSE ) ;
+ if (temp < memorysize)
+ return 0;
else
- return( TRUE ) ;
+ return 1;
}
@@ -4270,10 +4149,10 @@ BOOLEAN XGINew_CheckMemorySize(PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT Mode
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-/*void XGINew_IsLowResolution( USHORT ModeNo , USHORT ModeIdIndex, BOOLEAN XGINew_CheckMemorySize(PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT ModeNo,USHORT ModeIdIndex,PVB_DEVICE_INFO pVBInfo)
+/*void XGINew_IsLowResolution(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned char XGINew_CheckMemorySize(struct xgi_hw_device_info *HwDeviceExtension, unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
- USHORT data ;
- USHORT ModeFlag ;
+ unsigned short data ;
+ unsigned short ModeFlag ;
data = XGINew_GetReg1( pVBInfo->P3c4 , 0x0F ) ;
data &= 0x7F ;
@@ -4302,7 +4181,7 @@ BOOLEAN XGINew_CheckMemorySize(PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT Mode
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_DisplayOn( PXGI_HW_DEVICE_INFO pXGIHWDE , PVB_DEVICE_INFO pVBInfo )
+void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE, struct vb_device_info *pVBInfo)
{
XGINew_SetRegANDOR(pVBInfo->P3c4,0x01,0xDF,0x00);
@@ -4331,12 +4210,6 @@ void XGI_DisplayOn( PXGI_HW_DEVICE_INFO pXGIHWDE , PVB_DEVICE_INFO pVBInfo )
if (pVBInfo->IF_DEF_CH7007 == 1) /* [Billy] 07/05/23 For CH7007 */
{
-#ifdef WIN2000
- if ( IsCH7007TVMode( pVBInfo ) )
- {
- TurnOnCH7007(pXGIHWDE->pDevice) ; /* 07/05/28 */
- }
-#endif
}
@@ -4372,7 +4245,7 @@ void XGI_DisplayOn( PXGI_HW_DEVICE_INFO pXGIHWDE , PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_DisplayOff( PXGI_HW_DEVICE_INFO pXGIHWDE , PVB_DEVICE_INFO pVBInfo )
+void XGI_DisplayOff(struct xgi_hw_device_info *pXGIHWDE, struct vb_device_info *pVBInfo)
{
if ( pXGIHWDE->jChipType == XG21 )
@@ -4392,9 +4265,6 @@ void XGI_DisplayOff( PXGI_HW_DEVICE_INFO pXGIHWDE , PVB_DEVICE_INFO pVBInfo )
{
/* if( IsCH7007TVMode( pVBInfo ) == 0 ) */
{
-#ifdef WIN2000
- TurnOffCH7007(pXGIHWDE->pDevice) ; /* 07/05/28 */
-#endif
}
}
@@ -4423,7 +4293,7 @@ void XGI_DisplayOff( PXGI_HW_DEVICE_INFO pXGIHWDE , PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : chiawen for sensecrt1 */
/* --------------------------------------------------------------------- */
-void XGI_WaitDisply( PVB_DEVICE_INFO pVBInfo )
+void XGI_WaitDisply(struct vb_device_info *pVBInfo)
{
while( ( XGINew_GetReg2( pVBInfo->P3da ) & 0x01 ) )
break ;
@@ -4439,58 +4309,59 @@ void XGI_WaitDisply( PVB_DEVICE_INFO pVBInfo )
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SenseCRT1( PVB_DEVICE_INFO pVBInfo )
+void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
{
- UCHAR CRTCData[ 17 ] = { 0x5F , 0x4F , 0x50 , 0x82 , 0x55 , 0x81 ,
- 0x0B , 0x3E , 0xE9 , 0x0B , 0xDF , 0xE7 ,
- 0x04 , 0x00 , 0x00 , 0x05 , 0x00 } ;
+ unsigned char CRTCData[17] = {
+ 0x5F , 0x4F , 0x50 , 0x82 , 0x55 , 0x81 ,
+ 0x0B , 0x3E , 0xE9 , 0x0B , 0xDF , 0xE7 ,
+ 0x04 , 0x00 , 0x00 , 0x05 , 0x00 };
- UCHAR SR01 = 0 , SR1F = 0 , SR07 = 0 , SR06 = 0 ;
+ unsigned char SR01 = 0, SR1F = 0, SR07 = 0, SR06 = 0;
- UCHAR CR17 , CR63 , SR31 ;
- USHORT temp ;
- UCHAR DAC_TEST_PARMS[ 3 ] = { 0x0F , 0x0F , 0x0F } ;
+ unsigned char CR17, CR63, SR31;
+ unsigned short temp ;
+ unsigned char DAC_TEST_PARMS[3] = { 0x0F, 0x0F, 0x0F } ;
int i ;
XGINew_SetReg1( pVBInfo->P3c4 , 0x05 , 0x86 ) ;
/* [2004/05/06] Vicent to fix XG42 single LCD sense to CRT+LCD */
XGINew_SetReg1( pVBInfo->P3d4 , 0x57 , 0x4A ) ;
- XGINew_SetReg1( pVBInfo->P3d4 , 0x53 , ( UCHAR )( XGINew_GetReg1( pVBInfo->P3d4 , 0x53 ) | 0x02 ) ) ;
+ XGINew_SetReg1(pVBInfo->P3d4, 0x53, (unsigned char)(XGINew_GetReg1(pVBInfo->P3d4, 0x53) | 0x02));
- SR31 = ( UCHAR )XGINew_GetReg1( pVBInfo->P3c4 , 0x31 ) ;
- CR63 = ( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x63 ) ;
- SR01 = ( UCHAR )XGINew_GetReg1( pVBInfo->P3c4 , 0x01 ) ;
+ SR31 = (unsigned char)XGINew_GetReg1(pVBInfo->P3c4, 0x31);
+ CR63 = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x63);
+ SR01 = (unsigned char)XGINew_GetReg1(pVBInfo->P3c4, 0x01);
- XGINew_SetReg1( pVBInfo->P3c4 , 0x01 , ( UCHAR )( SR01 & 0xDF ) ) ;
- XGINew_SetReg1( pVBInfo->P3d4 , 0x63 , ( UCHAR )( CR63 & 0xBF ) ) ;
+ XGINew_SetReg1(pVBInfo->P3c4, 0x01, (unsigned char)(SR01 & 0xDF));
+ XGINew_SetReg1(pVBInfo->P3d4, 0x63, (unsigned char)(CR63 & 0xBF));
- CR17 = ( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x17 ) ;
- XGINew_SetReg1( pVBInfo->P3d4 , 0x17 , ( UCHAR )( CR17 | 0x80 ) ) ;
+ CR17 = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x17);
+ XGINew_SetReg1(pVBInfo->P3d4, 0x17, (unsigned char)(CR17 | 0x80)) ;
- SR1F = ( UCHAR )XGINew_GetReg1( pVBInfo->P3c4 , 0x1F ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x1F , ( UCHAR )( SR1F | 0x04 ) ) ;
+ SR1F = (unsigned char)XGINew_GetReg1(pVBInfo->P3c4, 0x1F);
+ XGINew_SetReg1(pVBInfo->P3c4, 0x1F, (unsigned char)(SR1F | 0x04));
- SR07 = ( UCHAR )XGINew_GetReg1( pVBInfo->P3c4 , 0x07 ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x07 , ( UCHAR )( SR07 & 0xFB ) ) ;
- SR06 = ( UCHAR )XGINew_GetReg1( pVBInfo->P3c4 , 0x06 ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x06 , ( UCHAR )( SR06 & 0xC3 ) ) ;
+ SR07 = (unsigned char)XGINew_GetReg1(pVBInfo->P3c4, 0x07);
+ XGINew_SetReg1(pVBInfo->P3c4, 0x07, (unsigned char)(SR07 & 0xFB));
+ SR06 = (unsigned char)XGINew_GetReg1(pVBInfo->P3c4, 0x06);
+ XGINew_SetReg1(pVBInfo->P3c4, 0x06, (unsigned char)(SR06 & 0xC3));
XGINew_SetReg1( pVBInfo->P3d4 , 0x11 , 0x00 ) ;
for( i = 0 ; i < 8 ; i++ )
- XGINew_SetReg1( pVBInfo->P3d4 , ( USHORT )i , CRTCData[ i ] ) ;
+ XGINew_SetReg1(pVBInfo->P3d4, (unsigned short)i, CRTCData[i]);
for( i = 8 ; i < 11 ; i++ )
- XGINew_SetReg1( pVBInfo->P3d4 , ( USHORT )( i + 8 ) , CRTCData[ i ] ) ;
+ XGINew_SetReg1(pVBInfo->P3d4, (unsigned short)(i + 8), CRTCData[i]);
for( i = 11 ; i < 13 ; i++ )
- XGINew_SetReg1( pVBInfo->P3d4 , ( USHORT )( i + 4 ) , CRTCData[ i ] ) ;
+ XGINew_SetReg1(pVBInfo->P3d4, (unsigned short)(i + 4), CRTCData[i]);
for( i = 13 ; i < 16 ; i++ )
- XGINew_SetReg1( pVBInfo->P3c4 , ( USHORT )( i - 3 ) , CRTCData[ i ] ) ;
+ XGINew_SetReg1(pVBInfo->P3c4, (unsigned short)(i - 3), CRTCData[i]);
- XGINew_SetReg1( pVBInfo->P3c4 , 0x0E , ( UCHAR )( CRTCData[ 16 ] & 0xE0 ) ) ;
+ XGINew_SetReg1(pVBInfo->P3c4, 0x0E, (unsigned char)(CRTCData[16] & 0xE0));
XGINew_SetReg1( pVBInfo->P3c4 , 0x31 , 0x00 ) ;
XGINew_SetReg1( pVBInfo->P3c4 , 0x2B , 0x1B ) ;
@@ -4500,9 +4371,9 @@ void XGI_SenseCRT1( PVB_DEVICE_INFO pVBInfo )
for( i = 0 ; i < 256 ; i++ )
{
- XGINew_SetReg3( ( pVBInfo->P3c8 + 1 ) , ( UCHAR )DAC_TEST_PARMS[ 0 ] ) ;
- XGINew_SetReg3( ( pVBInfo->P3c8 + 1 ) , ( UCHAR )DAC_TEST_PARMS[ 1 ] ) ;
- XGINew_SetReg3( ( pVBInfo->P3c8 + 1 ) , ( UCHAR )DAC_TEST_PARMS[ 2 ] ) ;
+ XGINew_SetReg3((pVBInfo->P3c8 + 1), (unsigned char)DAC_TEST_PARMS[0]);
+ XGINew_SetReg3((pVBInfo->P3c8 + 1), (unsigned char)DAC_TEST_PARMS[1]);
+ XGINew_SetReg3((pVBInfo->P3c8 + 1), (unsigned char)DAC_TEST_PARMS[2]);
}
XGI_VBLongWait( pVBInfo ) ;
@@ -4538,148 +4409,18 @@ void XGI_SenseCRT1( PVB_DEVICE_INFO pVBInfo )
XGINew_SetReg1( pVBInfo->P3c4 , 0x31 , SR31 ) ;
/* [2004/05/11] Vicent */
- XGINew_SetReg1( pVBInfo->P3d4 , 0x53 , ( UCHAR )( XGINew_GetReg1( pVBInfo->P3d4 , 0x53 ) & 0xFD ) ) ;
- XGINew_SetReg1( pVBInfo->P3c4 , 0x1F , ( UCHAR ) SR1F ) ;
+ XGINew_SetReg1(pVBInfo->P3d4, 0x53,
+ (unsigned char)(XGINew_GetReg1(pVBInfo->P3d4, 0x53) & 0xFD));
+ XGINew_SetReg1(pVBInfo->P3c4, 0x1F, (unsigned char)SR1F);
}
-#ifdef TC
-/* --------------------------------------------------------------------- */
-/* Function : INT1AReturnCode */
-/* Input : */
-/* Output : */
-/* Description : */
-/* --------------------------------------------------------------------- */
-int INT1AReturnCode( union REGS regs )
-{
- if ( regs.x.cflag )
- {
- /* printf( "Error to find pci device!\n" ) ; */
- return( 1 ) ;
- }
- switch(regs.h.ah)
- {
- case 0: return 0;
- break ;
- case 0x81:
- printf( "Function not support\n" ) ;
- break ;
- case 0x83:
- printf( "bad vendor id\n" ) ;
- break ;
- case 0x86:
- printf( "device not found\n" ) ;
- break ;
- case 0x87:
- printf( "bad register number\n" ) ;
- break ;
- case 0x88:
- printf( "set failed\n" ) ;
- break ;
- case 0x89:
- printf( "buffer too small" ) ;
- break ;
- default:
- break ;
- }
- return( 1 ) ;
-}
-/* --------------------------------------------------------------------- */
-/* Function : FindPCIIOBase */
-/* Input : */
-/* Output : */
-/* Description : */
-/* --------------------------------------------------------------------- */
-unsigned FindPCIIOBase( unsigned index , unsigned deviceid )
-{
- union REGS regs ;
-
- regs.h.ah = 0xb1 ; /* PCI_FUNCTION_ID */
- regs.h.al = 0x02 ; /* FIND_PCI_DEVICE */
- regs.x.cx = deviceid ;
- regs.x.dx = 0x1039 ;
- regs.x.si = index ; /* find n-th device */
-
- int86( 0x1A , &regs , &regs ) ;
-
- if ( INT1AReturnCode( regs ) != 0 )
- return( 0 ) ;
-
- /* regs.h.bh bus number */
- /* regs.h.bl device number */
- regs.h.ah = 0xb1 ; /* PCI_FUNCTION_ID */
- regs.h.al = 0x09 ; /* READ_CONFIG_WORD */
- regs.x.cx = deviceid ;
- regs.x.dx = 0x1039 ;
- regs.x.di = 0x18 ; /* register number */
- int86( 0x1A , &regs , &regs ) ;
-
- if ( INT1AReturnCode( regs ) != 0 )
- return( 0 ) ;
-
- return( regs.x.cx ) ;
-}
-
-#endif
-
-
-
-#ifdef TC
-/* --------------------------------------------------------------------- */
-/* Function : main */
-/* Input : */
-/* Output : */
-/* Description : */
-/* --------------------------------------------------------------------- */
-void main(int argc, char *argv[])
-{
- XGI_HW_DEVICE_INFO HwDeviceExtension ;
- USHORT temp ;
- USHORT ModeNo ;
-
- /* HwDeviceExtension.pjVirtualRomBase =(PUCHAR) MK_FP(0xC000,0); */
- /* HwDeviceExtension.pjVideoMemoryAddress = (PUCHAR)MK_FP(0xA000,0); */
-
-
- HwDeviceExtension.pjIOAddress = ( FindPCIIOBase( 0 ,0x6300 ) & 0xFF80 ) + 0x30 ;
- HwDeviceExtension.jChipType = XGI_340 ;
-
-
-
- /* HwDeviceExtension.pjIOAddress = ( FindPCIIOBase( 0 , 0x5315 ) & 0xFF80 ) + 0x30 ; */
-
- HwDeviceExtension.pjIOAddress = ( FindPCIIOBase( 0 , 0x330 ) & 0xFF80 ) + 0x30 ;
- HwDeviceExtension.jChipType = XGI_340 ;
-
-
- HwDeviceExtension.ujVBChipID = VB_CHIP_301 ;
- StrCpy(HwDeviceExtension.szVBIOSVer , "0.84" ) ;
- HwDeviceExtension.bSkipDramSizing = FALSE ;
- HwDeviceExtension.ulVideoMemorySize = 0 ;
-
- if ( argc == 2 )
- {
- ModeNo = atoi( argv[ 1 ] ) ;
- }
- else
- {
- ModeNo = 0x2e ;
- /* ModeNo = 0x37 ; 1024x768x 4bpp */
- /* ModeNo = 0x38 ; 1024x768x 8bpp */
- /* ModeNo = 0x4A ; 1024x768x 16bpp */
- /* ModeNo = 0x47 ; 800x600x 16bpp */
- }
-
- /* XGIInitNew( &HwDeviceExtension ) ; */
- XGISetModeNew( &HwDeviceExtension , ModeNo ) ;
-}
-#endif
/* --------------------------------------------------------------------- */
@@ -4688,7 +4429,7 @@ void main(int argc, char *argv[])
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_WaitDisplay( PVB_DEVICE_INFO pVBInfo )
+void XGI_WaitDisplay(struct vb_device_info *pVBInfo)
{
while( !( XGINew_GetReg2( pVBInfo->P3da ) & 0x01 ) ) ;
@@ -4704,9 +4445,11 @@ void XGI_WaitDisplay( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGI_SetCRT2Group301( USHORT ModeNo , PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo )
+unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
- USHORT tempbx ,
+ unsigned short tempbx ,
ModeIdIndex ,
RefreshRateTableIndex ;
@@ -4739,7 +4482,7 @@ BOOLEAN XGI_SetCRT2Group301( USHORT ModeNo , PXGI_HW_DEVICE_INFO HwDeviceExtensi
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_AutoThreshold( PVB_DEVICE_INFO pVBInfo )
+void XGI_AutoThreshold(struct vb_device_info *pVBInfo)
{
if ( !( pVBInfo->SetFlag & Win9xDOSMode ) )
XGINew_SetRegOR( pVBInfo->Part1Port , 0x01 , 0x40 ) ;
@@ -4752,9 +4495,9 @@ void XGI_AutoThreshold( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SaveCRT2Info( USHORT ModeNo , PVB_DEVICE_INFO pVBInfo)
+void XGI_SaveCRT2Info(unsigned short ModeNo, struct vb_device_info *pVBInfo)
{
- USHORT temp1 ,
+ unsigned short temp1 ,
temp2 ;
XGINew_SetReg1( pVBInfo->P3d4 , 0x34 , ModeNo ) ; /* reserve CR34 for CRT1 Mode No */
@@ -4770,9 +4513,11 @@ void XGI_SaveCRT2Info( USHORT ModeNo , PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetCRT2ResInfo( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_GetCRT2ResInfo(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT xres ,
+ unsigned short xres ,
yres ,
modeflag ,
resindex ;
@@ -4867,7 +4612,7 @@ void XGI_GetCRT2ResInfo( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVB
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGI_IsLCDDualLink( PVB_DEVICE_INFO pVBInfo )
+unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
{
if ( ( ( ( pVBInfo->VBInfo & SetCRT2ToLCD ) | SetCRT2ToLCDA ) ) && ( pVBInfo->LCDInfo & SetLCDDualLink ) ) /* shampoo0129 */
@@ -4883,15 +4628,15 @@ BOOLEAN XGI_IsLCDDualLink( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetCRT2Data( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo)
{
- USHORT tempax = 0,
+ unsigned short tempax = 0,
tempbx ,
modeflag ,
resinfo ;
- XGI_LCDDataStruct *LCDPtr = NULL ;
- XGI_TVDataStruct *TVPtr = NULL ;
+ struct XGI_LCDDataStruct *LCDPtr = NULL ;
+ struct XGI_TVDataStruct *TVPtr = NULL ;
if ( ModeNo <= 0x13 )
{
@@ -4917,7 +4662,7 @@ void XGI_GetCRT2Data( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTa
if ( pVBInfo->VBInfo & ( SetCRT2ToLCD | SetCRT2ToLCDA ) )
{
- LCDPtr = (XGI_LCDDataStruct* )XGI_GetLcdPtr( tempbx, ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
+ LCDPtr = (struct XGI_LCDDataStruct *)XGI_GetLcdPtr(tempbx, ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
pVBInfo->RVBHCMAX = LCDPtr->RVBHCMAX ;
pVBInfo->RVBHCFACT = LCDPtr->RVBHCFACT ;
@@ -5021,7 +4766,7 @@ void XGI_GetCRT2Data( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTa
if ( pVBInfo->VBInfo & ( SetCRT2ToTV ) )
{
tempbx = 4 ;
- TVPtr = ( XGI_TVDataStruct * )XGI_GetTVPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
+ TVPtr = (struct XGI_TVDataStruct *)XGI_GetTVPtr(tempbx, ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
pVBInfo->RVBHCMAX = TVPtr->RVBHCMAX ;
pVBInfo->RVBHCFACT = TVPtr->RVBHCFACT ;
@@ -5109,11 +4854,9 @@ void XGI_GetCRT2Data( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTa
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT2VCLK( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetCRT2VCLK(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo)
{
- UCHAR di_0 ,
- di_1 ,
- tempal ;
+ unsigned char di_0, di_1, tempal;
tempal = XGI_GetVCLKPtr( RefreshRateTableIndex , ModeNo , ModeIdIndex, pVBInfo ) ;
XGI_GetVCLKLen( tempal, &di_0 , &di_1, pVBInfo ) ;
@@ -5146,9 +4889,10 @@ void XGI_SetCRT2VCLK( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTab
/* Output : al -> VCLK Index */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetLCDVCLKPtr( UCHAR* di_0 , UCHAR *di_1, PVB_DEVICE_INFO pVBInfo )
+void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
+ struct vb_device_info *pVBInfo)
{
- USHORT index ;
+ unsigned short index ;
if ( pVBInfo->VBInfo & ( SetCRT2ToLCD | SetCRT2ToLCDA ) )
{
@@ -5182,17 +4926,16 @@ void XGI_GetLCDVCLKPtr( UCHAR* di_0 , UCHAR *di_1, PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-UCHAR XGI_GetVCLKPtr(USHORT RefreshRateTableIndex,USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo)
+unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
+ unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT index ,
+ unsigned short index ,
modeflag ;
-#ifndef LINUX_XF86
- USHORT tempbx ;
-#endif
-
- UCHAR tempal ;
- UCHAR *CHTVVCLKPtr = NULL ;
+ unsigned short tempbx ;
+ unsigned char tempal;
+ unsigned char *CHTVVCLKPtr = NULL;
if ( ModeNo <= 0x13 )
modeflag = pVBInfo->SModeIDTable[ ModeIdIndex ].St_ModeFlag ; /* si+St_ResInfo */
@@ -5344,7 +5087,7 @@ UCHAR XGI_GetVCLKPtr(USHORT RefreshRateTableIndex,USHORT ModeNo,USHORT ModeIdInd
}
- tempal = ( UCHAR )XGINew_GetReg2( ( pVBInfo->P3ca + 0x02 ) ) ;
+ tempal = (unsigned char)XGINew_GetReg2((pVBInfo->P3ca + 0x02));
tempal = tempal >> 2 ;
tempal &= 0x03 ;
@@ -5365,19 +5108,20 @@ UCHAR XGI_GetVCLKPtr(USHORT RefreshRateTableIndex,USHORT ModeNo,USHORT ModeIdInd
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetVCLKLen(UCHAR tempal,UCHAR* di_0,UCHAR* di_1, PVB_DEVICE_INFO pVBInfo)
+void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
+ unsigned char *di_1, struct vb_device_info *pVBInfo)
{
if ( pVBInfo->IF_DEF_CH7007 == 1 ) /* [Billy] 2007/05/16 */
{
/* VideoDebugPrint((0, "XGI_GetVCLKLen: pVBInfo->IF_DEF_CH7007==1\n")); */
- *di_0 = ( UCHAR )XGI_CH7007VCLKData[ tempal ].SR2B ;
- *di_1 = ( UCHAR )XGI_CH7007VCLKData[ tempal ].SR2C ;
+ *di_0 = (unsigned char)XGI_CH7007VCLKData[tempal].SR2B;
+ *di_1 = (unsigned char)XGI_CH7007VCLKData[tempal].SR2C;
}
else if ( pVBInfo->VBType & ( VB_XGI301 | VB_XGI301B | VB_XGI302B | VB_XGI301LV | VB_XGI302LV | VB_XGI301C ) )
{
if ( ( !( pVBInfo->VBInfo & SetCRT2ToLCDA ) ) && ( pVBInfo->SetFlag & ProgrammingCRT2 ) )
{
- *di_0 = ( UCHAR )XGI_VBVCLKData[ tempal ].SR2B ;
+ *di_0 = (unsigned char)XGI_VBVCLKData[tempal].SR2B;
*di_1 = XGI_VBVCLKData[ tempal ].SR2C ;
}
}
@@ -5395,11 +5139,14 @@ void XGI_GetVCLKLen(UCHAR tempal,UCHAR* di_0,UCHAR* di_1, PVB_DEVICE_INFO pVBInf
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT2Offset( USHORT ModeNo ,
- USHORT ModeIdIndex , USHORT RefreshRateTableIndex , PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetCRT2Offset(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
- USHORT offset ;
- UCHAR temp ;
+ unsigned short offset ;
+ unsigned char temp;
if ( pVBInfo->VBInfo & SetInSlaveMode )
{
@@ -5407,12 +5154,12 @@ void XGI_SetCRT2Offset( USHORT ModeNo ,
}
offset = XGI_GetOffset( ModeNo , ModeIdIndex , RefreshRateTableIndex , HwDeviceExtension, pVBInfo ) ;
- temp = ( UCHAR )( offset & 0xFF ) ;
+ temp = (unsigned char)(offset & 0xFF);
XGINew_SetReg1( pVBInfo->Part1Port , 0x07 , temp ) ;
- temp =( UCHAR)( ( offset & 0xFF00 ) >> 8 ) ;
- XGINew_SetReg1( pVBInfo->Part1Port , 0x09 , temp ) ;
- temp =( UCHAR )( ( ( offset >> 3 ) & 0xFF ) + 1 ) ;
- XGINew_SetReg1( pVBInfo->Part1Port , 0x03 , temp ) ;
+ temp = (unsigned char)((offset & 0xFF00) >> 8);
+ XGINew_SetReg1(pVBInfo->Part1Port , 0x09 , temp);
+ temp = (unsigned char)(((offset >> 3) & 0xFF) + 1) ;
+ XGINew_SetReg1(pVBInfo->Part1Port, 0x03, temp);
}
@@ -5422,9 +5169,9 @@ void XGI_SetCRT2Offset( USHORT ModeNo ,
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-USHORT XGI_GetOffset(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PXGI_HW_DEVICE_INFO HwDeviceExtension,PVB_DEVICE_INFO pVBInfo)
+unsigned short XGI_GetOffset(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT temp ,
+ unsigned short temp ,
colordepth ,
modeinfo ,
index ,
@@ -5471,7 +5218,7 @@ USHORT XGI_GetOffset(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableInd
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT2FIFO( PVB_DEVICE_INFO pVBInfo)
+void XGI_SetCRT2FIFO(struct vb_device_info *pVBInfo)
{
XGINew_SetReg1( pVBInfo->Part1Port , 0x01 , 0x3B ) ; /* threshold high ,disable auto threshold */
XGINew_SetRegANDOR( pVBInfo->Part1Port , 0x02 , ~( 0x3F ) , 0x04 ) ; /* threshold low default 04h */
@@ -5484,10 +5231,12 @@ void XGI_SetCRT2FIFO( PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_PreSetGroup1(USHORT ModeNo , USHORT ModeIdIndex ,PXGI_HW_DEVICE_INFO HwDeviceExtension,
- USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT tempcx = 0 ,
+ unsigned short tempcx = 0 ,
CRT1Index = 0 ,
resinfo = 0 ;
@@ -5518,10 +5267,12 @@ void XGI_PreSetGroup1(USHORT ModeNo , USHORT ModeIdIndex ,PXGI_HW_DEVICE_INFO Hw
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetGroup1( USHORT ModeNo , USHORT ModeIdIndex ,
- PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT temp = 0 ,
+ unsigned short temp = 0 ,
tempax = 0 ,
tempbx = 0 ,
tempcx = 0 ,
@@ -5694,10 +5445,12 @@ void XGI_SetGroup1( USHORT ModeNo , USHORT ModeIdIndex ,
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetLockRegs( USHORT ModeNo , USHORT ModeIdIndex ,
- PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT push1 ,
+ unsigned short push1 ,
push2 ,
tempax ,
tempbx = 0 ,
@@ -6141,10 +5894,10 @@ void XGI_SetLockRegs( USHORT ModeNo , USHORT ModeIdIndex ,
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetGroup2( USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIndex,
- PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex,
+ struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT i ,
+ unsigned short i ,
j ,
tempax ,
tempbx ,
@@ -6155,9 +5908,9 @@ void XGI_SetGroup2( USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIn
modeflag ,
resinfo ,
crt2crtc ;
- UCHAR *TimingPoint ;
+ unsigned char *TimingPoint ;
- ULONG longtemp ,
+ unsigned long longtemp ,
tempeax ,
tempebx ,
temp2 ,
@@ -6266,7 +6019,7 @@ void XGI_SetGroup2( USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIn
tempax = ( tempax & 0x00FF ) | ( ( tempax & 0x00FF ) << 8 ) ;
push1 = tempax ;
temp = ( tempax & 0xFF00 ) >> 8 ;
- temp += ( USHORT )TimingPoint[ 0 ] ;
+ temp += (unsigned short)TimingPoint[0];
if ( pVBInfo->VBType & ( VB_XGI301B | VB_XGI302B | VB_XGI301LV | VB_XGI302LV | VB_XGI301C ) )
{
@@ -6543,7 +6296,7 @@ void XGI_SetGroup2( USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIn
tempeax += 1 ;
}
- tempax = ( USHORT )tempeax ;
+ tempax = (unsigned short)tempeax;
/* 301b */
if ( pVBInfo->VBType & ( VB_XGI301B | VB_XGI302B | VB_XGI301LV | VB_XGI302LV | VB_XGI301C ) )
@@ -6553,8 +6306,8 @@ void XGI_SetGroup2( USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIn
/* end 301b */
tempbx = push1 ;
- tempbx =( USHORT )( ( ( tempeax & 0x0000FF00 ) & 0x1F00 ) | ( tempbx & 0x00FF ) ) ;
- tempax =( USHORT )( ( ( tempeax & 0x000000FF ) << 8 ) | ( tempax & 0x00FF ) ) ;
+ tempbx = (unsigned short)(((tempeax & 0x0000FF00) & 0x1F00) | (tempbx & 0x00FF));
+ tempax = (unsigned short)(((tempeax & 0x000000FF) << 8) | (tempax & 0x00FF));
temp = ( tempax & 0xFF00 ) >> 8 ;
}
else
@@ -6607,7 +6360,7 @@ void XGI_SetGroup2( USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIn
XGINew_SetReg1( pVBInfo->Part2Port , 0x4d , temp ) ;
temp=XGINew_GetReg1( pVBInfo->Part2Port , 0x43 ) ; /* 301b change */
- XGINew_SetReg1( pVBInfo->Part2Port , 0x43 , ( USHORT )( temp - 3 ) ) ;
+ XGINew_SetReg1( pVBInfo->Part2Port , 0x43, (unsigned short)( temp - 3 ) ) ;
if ( !( pVBInfo->TVInfo & ( SetYPbPrMode525p | SetYPbPrMode750p ) ) )
{
@@ -6631,7 +6384,7 @@ void XGI_SetGroup2( USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIn
if ( pVBInfo->TVInfo & SetPALMTV )
{
- tempax = ( UCHAR )XGINew_GetReg1( pVBInfo->Part2Port , 0x01 ) ;
+ tempax = (unsigned char)XGINew_GetReg1(pVBInfo->Part2Port, 0x01);
tempax-- ;
XGINew_SetRegAND( pVBInfo->Part2Port , 0x01 , tempax ) ;
@@ -6660,9 +6413,9 @@ void XGI_SetGroup2( USHORT ModeNo, USHORT ModeIdIndex, USHORT RefreshRateTableIn
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetLCDRegs(USHORT ModeNo,USHORT ModeIdIndex, PXGI_HW_DEVICE_INFO HwDeviceExtension,USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo)
+void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex, struct xgi_hw_device_info *HwDeviceExtension, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo)
{
- USHORT push1 ,
+ unsigned short push1 ,
push2 ,
pushbx ,
tempax ,
@@ -6676,7 +6429,7 @@ void XGI_SetLCDRegs(USHORT ModeNo,USHORT ModeIdIndex, PXGI_HW_DEVICE_INFO HwDev
modeflag ,
CRT1Index ;
- XGI_LCDDesStruct *LCDBDesPtr = NULL ;
+ struct XGI_LCDDesStruct *LCDBDesPtr = NULL ;
if ( ModeNo <= 0x13 )
@@ -6746,7 +6499,7 @@ void XGI_SetLCDRegs(USHORT ModeNo,USHORT ModeIdIndex, PXGI_HW_DEVICE_INFO HwDev
/* Customized LCDB Des no add */
tempbx = 5 ;
- LCDBDesPtr = ( XGI_LCDDesStruct * )XGI_GetLcdPtr( tempbx , ModeNo , ModeIdIndex , RefreshRateTableIndex, pVBInfo ) ;
+ LCDBDesPtr = (struct XGI_LCDDesStruct *)XGI_GetLcdPtr(tempbx, ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
tempah = pVBInfo->LCDResInfo ;
tempah &= PanelResInfo ;
@@ -6914,13 +6667,14 @@ void XGI_SetLCDRegs(USHORT ModeNo,USHORT ModeIdIndex, PXGI_HW_DEVICE_INFO HwDev
/* Output : di -> Tap4 Reg. Setting Pointer */
/* Description : */
/* --------------------------------------------------------------------- */
-XGI301C_Tap4TimingStruct* XGI_GetTap4Ptr(USHORT tempcx, PVB_DEVICE_INFO pVBInfo)
+struct XGI301C_Tap4TimingStruct *XGI_GetTap4Ptr(unsigned short tempcx,
+ struct vb_device_info *pVBInfo)
{
- USHORT tempax ,
+ unsigned short tempax ,
tempbx ,
i ;
- XGI301C_Tap4TimingStruct *Tap4TimingPtr ;
+ struct XGI301C_Tap4TimingStruct *Tap4TimingPtr ;
if ( tempcx == 0 )
{
@@ -6974,12 +6728,12 @@ XGI301C_Tap4TimingStruct* XGI_GetTap4Ptr(USHORT tempcx, PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetTap4Regs( PVB_DEVICE_INFO pVBInfo)
+void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
{
- USHORT i ,
+ unsigned short i ,
j ;
- XGI301C_Tap4TimingStruct *Tap4TimingPtr ;
+ struct XGI301C_Tap4TimingStruct *Tap4TimingPtr ;
if ( !( pVBInfo->VBType & VB_XGI301C ) )
return ;
@@ -7012,11 +6766,11 @@ void XGI_SetTap4Regs( PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetGroup3(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo)
+void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
- USHORT i;
- UCHAR *tempdi;
- USHORT modeflag;
+ unsigned short i;
+ unsigned char *tempdi;
+ unsigned short modeflag;
if(ModeNo<=0x13)
{
@@ -7099,16 +6853,16 @@ void XGI_SetGroup3(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetGroup4(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo)
+void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT tempax ,
+ unsigned short tempax ,
tempcx ,
tempbx ,
modeflag ,
temp ,
temp2 ;
- ULONG tempebx ,
+ unsigned long tempebx ,
tempeax ,
templong ;
@@ -7230,12 +6984,12 @@ void XGI_SetGroup4(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex
}
- temp = ( USHORT )( tempebx & 0x000000FF ) ;
+ temp = (unsigned short)(tempebx & 0x000000FF);
XGINew_SetReg1( pVBInfo->Part4Port , 0x1B , temp ) ;
- temp = ( USHORT )( ( tempebx & 0x0000FF00 ) >> 8 ) ;
+ temp = (unsigned short)((tempebx & 0x0000FF00) >> 8);
XGINew_SetReg1( pVBInfo->Part4Port , 0x1A , temp ) ;
- tempbx = ( USHORT )( tempebx >> 16 ) ;
+ tempbx = (unsigned short)(tempebx >> 16);
temp = tempbx & 0x00FF ;
temp = temp << 4 ;
temp |= ( ( tempcx & 0xFF00 ) >> 8 ) ;
@@ -7350,9 +7104,9 @@ void XGI_SetGroup4(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetGroup5( USHORT ModeNo , USHORT ModeIdIndex , PVB_DEVICE_INFO pVBInfo)
+void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
- USHORT Pindex ,
+ unsigned short Pindex ,
Pdata ;
Pindex = pVBInfo->Part5Port ;
@@ -7375,9 +7129,13 @@ void XGI_SetGroup5( USHORT ModeNo , USHORT ModeIdIndex , PVB_DEVICE_INFO pVBInfo
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void* XGI_GetLcdPtr( USHORT BX , USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void *XGI_GetLcdPtr(unsigned short BX,
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT i ,
+ unsigned short i ,
tempdx ,
tempcx ,
tempbx ,
@@ -7385,7 +7143,7 @@ void* XGI_GetLcdPtr( USHORT BX , USHORT ModeNo , USHORT ModeIdIndex , USHORT Ref
modeflag ,
table ;
- XGI330_LCDDataTablStruct *tempdi = 0 ;
+ struct XGI330_LCDDataTablStruct *tempdi = 0 ;
tempbx = BX;
@@ -7877,10 +7635,13 @@ void* XGI_GetLcdPtr( USHORT BX , USHORT ModeNo , USHORT ModeIdIndex , USHORT Ref
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void* XGI_GetTVPtr (USHORT BX,USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT i , tempdx , tempbx , tempal , modeflag , table ;
- XGI330_TVDataTablStruct *tempdi = 0 ;
+ unsigned short i , tempdx , tempbx , tempal , modeflag , table ;
+ struct XGI330_TVDataTablStruct *tempdi = 0 ;
tempbx = BX ;
@@ -7955,53 +7716,9 @@ void* XGI_GetTVPtr (USHORT BX,USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRat
if ( table == 0x00 ) /* 07/05/22 */
{
-#ifdef WIN2000
- if ( pVBInfo->IF_DEF_CH7007 == 1 )
- {
- switch( tempdi[ i ].DATAPTR )
- {
- case 0:
- return &CH7007TVCRT1UNTSC_H[ tempal ] ;
- break ;
- case 1:
- return &CH7007TVCRT1ONTSC_H[ tempal ] ;
- break ;
- case 2:
- return &CH7007TVCRT1UPAL_H[ tempal ] ;
- break ;
- case 3:
- return &CH7007TVCRT1OPAL_H[ tempal ] ;
- break ;
- default:
- break ;
- }
- }
-#endif
}
else if ( table == 0x01 )
{
-#ifdef WIN2000
- if ( pVBInfo->IF_DEF_CH7007 == 1 )
- {
- switch( tempdi[ i ].DATAPTR )
- {
- case 0:
- return &CH7007TVCRT1UNTSC_V[ tempal ] ;
- break ;
- case 1:
- return &CH7007TVCRT1ONTSC_V[ tempal ] ;
- break ;
- case 2:
- return &CH7007TVCRT1UPAL_V[ tempal ] ;
- break ;
- case 3:
- return &CH7007TVCRT1OPAL_V[ tempal ] ;
- break ;
- default:
- break ;
- }
- }
-#endif
}
else if ( table == 0x04 )
{
@@ -8075,49 +7792,6 @@ void* XGI_GetTVPtr (USHORT BX,USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRat
}
else if( table == 0x06 )
{
-#ifdef WIN2000
- if ( pVBInfo->IF_DEF_CH7007 == 1 )
- {
- /* VideoDebugPrint((0, "XGI_GetTVPtr: pVBInfo->IF_DEF_CH7007==1\n")); */
- switch( tempdi[ i ].DATAPTR )
- {
- case 0:
- return &CH7007TVReg_UNTSC[ tempal ] ;
- break ;
- case 1:
- return &CH7007TVReg_ONTSC[ tempal ] ;
- break ;
- case 2:
- return &CH7007TVReg_UPAL[ tempal ] ;
- break ;
- case 3:
- return &CH7007TVReg_OPAL[ tempal ] ;
- break ;
- default:
- break ;
- }
- }
- else
- {
- switch( tempdi[ i ].DATAPTR )
- {
- case 0:
- return &XGI_CHTVRegUNTSC[ tempal ] ;
- break ;
- case 1:
- return &XGI_CHTVRegONTSC[ tempal ] ;
- break ;
- case 2:
- return &XGI_CHTVRegUPAL[ tempal ] ;
- break ;
- case 3:
- return &XGI_CHTVRegOPAL[ tempal ] ;
- break ;
- default:
- break ;
- }
- }
-#endif
}
return( 0 ) ;
}
@@ -8126,18 +7800,18 @@ void* XGI_GetTVPtr (USHORT BX,USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRat
/* --------------------------------------------------------------------- */
/* Function : XGI_BacklightByDrv */
/* Input : */
-/* Output : TRUE -> Skip backlight control */
+/* Output : 1 -> Skip backlight control */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGI_BacklightByDrv( PVB_DEVICE_INFO pVBInfo )
+unsigned char XGI_BacklightByDrv(struct vb_device_info *pVBInfo)
{
- UCHAR tempah ;
+ unsigned char tempah ;
- tempah = ( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x3A ) ;
- if ( tempah & BacklightControlBit )
- return TRUE ;
+ tempah = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x3A) ;
+ if (tempah & BacklightControlBit)
+ return 1;
else
- return FALSE ;
+ return 0;
}
@@ -8148,7 +7822,7 @@ BOOLEAN XGI_BacklightByDrv( PVB_DEVICE_INFO pVBInfo )
/* Description : Turn off VDD & Backlight : Fire disable procedure */
/* --------------------------------------------------------------------- */
/*
-void XGI_FirePWDDisable( PVB_DEVICE_INFO pVBInfo )
+void XGI_FirePWDDisable(struct vb_device_info *pVBInfo)
{
XGINew_SetRegANDOR( pVBInfo->Part1Port , 0x26 , 0x00 , 0xFC ) ;
}
@@ -8160,7 +7834,7 @@ void XGI_FirePWDDisable( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : Turn on VDD & Backlight : Fire enable procedure */
/* --------------------------------------------------------------------- */
-void XGI_FirePWDEnable(PVB_DEVICE_INFO pVBInfo )
+void XGI_FirePWDEnable(struct vb_device_info *pVBInfo)
{
XGINew_SetRegANDOR( pVBInfo->Part1Port , 0x26 , 0x03 , 0xFC ) ;
}
@@ -8172,7 +7846,7 @@ void XGI_FirePWDEnable(PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_EnableGatingCRT(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo)
+void XGI_EnableGatingCRT(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x63 , 0xBF , 0x40 ) ;
}
@@ -8184,7 +7858,7 @@ void XGI_EnableGatingCRT(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_DisableGatingCRT(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo)
+void XGI_DisableGatingCRT(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
XGINew_SetRegANDOR( pVBInfo->P3d4 , 0x63 , 0xBF , 0x00 ) ;
@@ -8201,9 +7875,9 @@ void XGI_DisableGatingCRT(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO
/* : bl : 3 ; T3 : the duration between CPL off and signal off */
/* : bl : 4 ; T4 : the duration signal off and Vdd off */
/* --------------------------------------------------------------------- */
-void XGI_SetPanelDelay(USHORT tempbl, PVB_DEVICE_INFO pVBInfo)
+void XGI_SetPanelDelay(unsigned short tempbl, struct vb_device_info *pVBInfo)
{
- USHORT index ;
+ unsigned short index ;
index = XGI_GetLCDCapPtr(pVBInfo) ;
@@ -8231,7 +7905,7 @@ void XGI_SetPanelDelay(USHORT tempbl, PVB_DEVICE_INFO pVBInfo)
/* = 1011b = 0Bh ; Backlight off, Power on */
/* = 1111b = 0Fh ; Backlight off, Power off */
/* --------------------------------------------------------------------- */
-void XGI_SetPanelPower(USHORT tempah,USHORT tempbl, PVB_DEVICE_INFO pVBInfo)
+void XGI_SetPanelPower(unsigned short tempah, unsigned short tempbl, struct vb_device_info *pVBInfo)
{
if ( pVBInfo->VBType & ( VB_XGI301LV | VB_XGI302LV | VB_XGI301C ) )
XGINew_SetRegANDOR( pVBInfo->Part4Port , 0x26 , tempbl , tempah ) ;
@@ -8239,10 +7913,10 @@ void XGI_SetPanelPower(USHORT tempah,USHORT tempbl, PVB_DEVICE_INFO pVBInfo)
XGINew_SetRegANDOR( pVBInfo->P3c4 , 0x11 , tempbl , tempah ) ;
}
-UCHAR XG21GPIODataTransfer(UCHAR ujDate)
+unsigned char XG21GPIODataTransfer(unsigned char ujDate)
{
- UCHAR ujRet = 0;
- UCHAR i = 0;
+ unsigned char ujRet = 0;
+ unsigned char i = 0;
for (i=0; i<8; i++)
{
@@ -8260,9 +7934,9 @@ UCHAR XG21GPIODataTransfer(UCHAR ujDate)
/* bl[1] : LVDS backlight */
/* bl[0] : LVDS VDD */
/*----------------------------------------------------------------------------*/
-UCHAR XGI_XG21GetPSCValue(PVB_DEVICE_INFO pVBInfo)
+unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo)
{
- UCHAR CR4A,temp;
+ unsigned char CR4A, temp;
CR4A = XGINew_GetReg1( pVBInfo->P3d4 , 0x4A ) ;
XGINew_SetRegAND( pVBInfo->P3d4 , 0x4A , ~0x23 ) ; /* enable GPIO write */
@@ -8281,9 +7955,9 @@ UCHAR XGI_XG21GetPSCValue(PVB_DEVICE_INFO pVBInfo)
/* bl[1] : LVDS backlight */
/* bl[0] : LVDS VDD */
/*----------------------------------------------------------------------------*/
-UCHAR XGI_XG27GetPSCValue(PVB_DEVICE_INFO pVBInfo)
+unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
{
- UCHAR CR4A,CRB4,temp;
+ unsigned char CR4A, CRB4, temp;
CR4A = XGINew_GetReg1( pVBInfo->P3d4 , 0x4A ) ;
XGINew_SetRegAND( pVBInfo->P3d4 , 0x4A , ~0x0C ) ; /* enable GPIO write */
@@ -8306,9 +7980,9 @@ UCHAR XGI_XG27GetPSCValue(PVB_DEVICE_INFO pVBInfo)
/* 000010b : clear bit 1, to set bit1 */
/* 000001b : clear bit 0, to set bit0 */
/*----------------------------------------------------------------------------*/
-void XGI_XG21BLSignalVDD(USHORT tempbh,USHORT tempbl, PVB_DEVICE_INFO pVBInfo)
+void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl, struct vb_device_info *pVBInfo)
{
- UCHAR CR4A,temp;
+ unsigned char CR4A, temp;
CR4A = XGINew_GetReg1( pVBInfo->P3d4 , 0x4A ) ;
tempbh &= 0x23;
@@ -8331,10 +8005,10 @@ void XGI_XG21BLSignalVDD(USHORT tempbh,USHORT tempbl, PVB_DEVICE_INFO pVBInfo)
XGINew_SetReg1( pVBInfo->P3d4 , 0x48 , temp ) ;
}
-void XGI_XG27BLSignalVDD(USHORT tempbh,USHORT tempbl, PVB_DEVICE_INFO pVBInfo)
+void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl, struct vb_device_info *pVBInfo)
{
- UCHAR CR4A,temp;
- USHORT tempbh0,tempbl0;
+ unsigned char CR4A, temp;
+ unsigned short tempbh0, tempbl0;
tempbh0 = tempbh;
tempbl0 = tempbl;
@@ -8362,15 +8036,13 @@ void XGI_XG27BLSignalVDD(USHORT tempbh,USHORT tempbl, PVB_DEVICE_INFO pVBInfo)
}
/* --------------------------------------------------------------------- */
-USHORT XGI_GetLVDSOEMTableIndex(PVB_DEVICE_INFO pVBInfo)
+unsigned short XGI_GetLVDSOEMTableIndex(struct vb_device_info *pVBInfo)
{
- USHORT index ;
+ unsigned short index ;
index = XGINew_GetReg1( pVBInfo->P3d4 , 0x36 ) ;
- if (index<sizeof(XGI21_LCDCapList)/sizeof(XGI21_LVDSCapStruct))
- {
- return index;
- }
+ if (index < sizeof(XGI21_LCDCapList)/sizeof(struct XGI21_LVDSCapStruct))
+ return index;
return 0;
}
@@ -8384,9 +8056,9 @@ USHORT XGI_GetLVDSOEMTableIndex(PVB_DEVICE_INFO pVBInfo)
/* : bl : 3 ; T3 : the duration between CPL off and signal off */
/* : bl : 4 ; T4 : the duration signal off and Vdd off */
/* --------------------------------------------------------------------- */
-void XGI_XG21SetPanelDelay(USHORT tempbl, PVB_DEVICE_INFO pVBInfo)
+void XGI_XG21SetPanelDelay(unsigned short tempbl, struct vb_device_info *pVBInfo)
{
- USHORT index ;
+ unsigned short index ;
index = XGI_GetLVDSOEMTableIndex( pVBInfo );
if ( tempbl == 1 )
@@ -8402,9 +8074,11 @@ void XGI_XG21SetPanelDelay(USHORT tempbl, PVB_DEVICE_INFO pVBInfo)
XGINew_LCD_Wait_Time( pVBInfo->XG21_LVDSCapList[ index ].PSC_S4, pVBInfo ) ;
}
-BOOLEAN XGI_XG21CheckLVDSMode(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo )
+unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT xres ,
+ unsigned short xres ,
yres ,
colordepth ,
modeflag ,
@@ -8445,10 +8119,10 @@ BOOLEAN XGI_XG21CheckLVDSMode(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO
lvdstableindex = XGI_GetLVDSOEMTableIndex( pVBInfo );
if ( xres > (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE) )
- return FALSE;
+ return 0;
if ( yres > (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE) )
- return FALSE;
+ return 0;
if ( ModeNo > 0x13 )
{
@@ -8456,18 +8130,17 @@ BOOLEAN XGI_XG21CheckLVDSMode(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO
( yres != (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE)) )
{
colordepth = XGI_GetColorDepth( ModeNo , ModeIdIndex, pVBInfo ) ;
- if ( colordepth > 2 )
- {
- return FALSE;
- }
+ if (colordepth > 2)
+ return 0;
+
}
}
- return TRUE;
+ return 1;
}
-void XGI_SetXG21FPBits(PVB_DEVICE_INFO pVBInfo)
+void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo)
{
- UCHAR temp;
+ unsigned char temp;
temp = XGINew_GetReg1( pVBInfo->P3d4 , 0x37 ) ; /* D[0] 1: 18bit */
temp = ( temp & 1 ) << 6;
@@ -8476,9 +8149,9 @@ void XGI_SetXG21FPBits(PVB_DEVICE_INFO pVBInfo)
}
-void XGI_SetXG27FPBits(PVB_DEVICE_INFO pVBInfo)
+void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo)
{
- UCHAR temp;
+ unsigned char temp;
temp = XGINew_GetReg1( pVBInfo->P3d4 , 0x37 ) ; /* D[1:0] 01: 18bit, 00: dual 12, 10: single 24 */
temp = ( temp & 3 ) << 6;
@@ -8487,27 +8160,28 @@ void XGI_SetXG27FPBits(PVB_DEVICE_INFO pVBInfo)
}
-void XGI_SetXG21LVDSPara(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetXG21LVDSPara(unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- UCHAR temp,Miscdata;
- USHORT xres ,
+ unsigned char temp, Miscdata;
+ unsigned short xres ,
yres ,
modeflag ,
resindex ,
lvdstableindex ;
- USHORT LVDSHT,LVDSHBS,LVDSHRS,LVDSHRE,LVDSHBE;
- USHORT LVDSVT,LVDSVBS,LVDSVRS,LVDSVRE,LVDSVBE;
- USHORT value;
+ unsigned short LVDSHT,LVDSHBS,LVDSHRS,LVDSHRE,LVDSHBE;
+ unsigned short LVDSVT,LVDSVBS,LVDSVRS,LVDSVRE,LVDSVBE;
+ unsigned short value;
lvdstableindex = XGI_GetLVDSOEMTableIndex( pVBInfo );
- temp = (UCHAR) ( ( pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDS_Capability & (LCDPolarity << 8 ) ) >> 8 );
+ temp = (unsigned char) ((pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDS_Capability & (LCDPolarity << 8)) >> 8);
temp &= LCDPolarity;
- Miscdata =(UCHAR) XGINew_GetReg2(pVBInfo->P3cc) ;
+ Miscdata = (unsigned char) XGINew_GetReg2(pVBInfo->P3cc) ;
XGINew_SetReg3( pVBInfo->P3c2 , (Miscdata & 0x3F) | temp ) ;
- temp = (UCHAR) ( pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDS_Capability & LCDPolarity ) ;
+ temp = (unsigned char) (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDS_Capability & LCDPolarity) ;
XGINew_SetRegANDOR( pVBInfo->P3c4 , 0x35 , ~0x80 , temp&0x80 ) ; /* SR35[7] FP VSync polarity */
XGINew_SetRegANDOR( pVBInfo->P3c4 , 0x30 , ~0x20 , (temp&0x40)>>1 ) ; /* SR30[5] FP HSync polarity */
@@ -8563,7 +8237,7 @@ void XGI_SetXG21LVDSPara(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBIn
LVDSVBE = LVDSVBS + LVDSVT - pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE ;
- temp = ( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x11 ) ;
+ temp = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x11) ;
XGINew_SetReg1( pVBInfo->P3d4 , 0x11 , temp & 0x7f ) ; /* Unlock CRTC */
if (!( modeflag & Charx8Dot ))
@@ -8670,26 +8344,27 @@ void XGI_SetXG21LVDSPara(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBIn
}
/* no shadow case */
-void XGI_SetXG27LVDSPara(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetXG27LVDSPara(unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- UCHAR temp,Miscdata;
- USHORT xres ,
+ unsigned char temp, Miscdata;
+ unsigned short xres ,
yres ,
modeflag ,
resindex ,
lvdstableindex ;
- USHORT LVDSHT,LVDSHBS,LVDSHRS,LVDSHRE,LVDSHBE;
- USHORT LVDSVT,LVDSVBS,LVDSVRS,LVDSVRE,LVDSVBE;
- USHORT value;
+ unsigned short LVDSHT,LVDSHBS,LVDSHRS,LVDSHRE,LVDSHBE;
+ unsigned short LVDSVT,LVDSVBS,LVDSVRS,LVDSVRE,LVDSVBE;
+ unsigned short value;
lvdstableindex = XGI_GetLVDSOEMTableIndex( pVBInfo );
- temp = (UCHAR) ( ( pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDS_Capability & (LCDPolarity << 8 ) ) >> 8 );
+ temp = (unsigned char) ((pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDS_Capability & (LCDPolarity << 8)) >> 8);
temp &= LCDPolarity;
- Miscdata =(UCHAR) XGINew_GetReg2(pVBInfo->P3cc) ;
+ Miscdata = (unsigned char) XGINew_GetReg2(pVBInfo->P3cc);
XGINew_SetReg3( pVBInfo->P3c2 , (Miscdata & 0x3F) | temp ) ;
- temp = (UCHAR) ( pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDS_Capability & LCDPolarity ) ;
+ temp = (unsigned char) (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDS_Capability & LCDPolarity) ;
XGINew_SetRegANDOR( pVBInfo->P3c4 , 0x35 , ~0x80 , temp&0x80 ) ; /* SR35[7] FP VSync polarity */
XGINew_SetRegANDOR( pVBInfo->P3c4 , 0x30 , ~0x20 , (temp&0x40)>>1 ) ; /* SR30[5] FP HSync polarity */
@@ -8745,7 +8420,7 @@ void XGI_SetXG27LVDSPara(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBIn
LVDSVBE = LVDSVBS + LVDSVT - pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE ;
- temp = ( UCHAR )XGINew_GetReg1( pVBInfo->P3d4 , 0x11 ) ;
+ temp = (unsigned char)XGINew_GetReg1(pVBInfo->P3d4, 0x11) ;
XGINew_SetReg1( pVBInfo->P3d4 , 0x11 , temp & 0x7f ) ; /* Unlock CRTC */
if (!( modeflag & Charx8Dot ))
@@ -8853,21 +8528,21 @@ void XGI_SetXG27LVDSPara(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBIn
/* --------------------------------------------------------------------- */
/* Function : XGI_IsLCDON */
/* Input : */
-/* Output : FALSE : Skip PSC Control */
-/* TRUE: Disable PSC */
+/* Output : 0 : Skip PSC Control */
+/* 1: Disable PSC */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGI_IsLCDON(PVB_DEVICE_INFO pVBInfo)
+unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
{
- USHORT tempax ;
+ unsigned short tempax ;
tempax = pVBInfo->VBInfo ;
if ( tempax & SetCRT2ToDualEdge )
- return FALSE ;
+ return 0;
else if ( tempax & ( DisableCRT2Display | SwitchToCRT2 | SetSimuScanMode ) )
- return TRUE ;
+ return 1;
- return FALSE ;
+ return 0;
}
@@ -8877,9 +8552,9 @@ BOOLEAN XGI_IsLCDON(PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_EnablePWD( PVB_DEVICE_INFO pVBInfo )
+void XGI_EnablePWD(struct vb_device_info *pVBInfo)
{
- USHORT index ,
+ unsigned short index ,
temp ;
index = XGI_GetLCDCapPtr(pVBInfo) ;
@@ -8899,7 +8574,7 @@ void XGI_EnablePWD( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_DisablePWD( PVB_DEVICE_INFO pVBInfo )
+void XGI_DisablePWD(struct vb_device_info *pVBInfo)
{
XGINew_SetRegAND( pVBInfo->Part4Port , 0x27 , 0x7F ) ; /* disable PWD */
}
@@ -8908,30 +8583,30 @@ void XGI_DisablePWD( PVB_DEVICE_INFO pVBInfo )
/* --------------------------------------------------------------------- */
/* Function : XGI_DisableChISLCD */
/* Input : */
-/* Output : FALSE -> Not LCD Mode */
+/* Output : 0 -> Not LCD Mode */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGI_DisableChISLCD(PVB_DEVICE_INFO pVBInfo)
+unsigned char XGI_DisableChISLCD(struct vb_device_info *pVBInfo)
{
- USHORT tempbx ,
+ unsigned short tempbx ,
tempah ;
tempbx = pVBInfo->SetFlag & ( DisableChA | DisableChB ) ;
- tempah = ~( ( USHORT )XGINew_GetReg1( pVBInfo->Part1Port , 0x2E ) ) ;
+ tempah = ~((unsigned short) XGINew_GetReg1(pVBInfo->Part1Port, 0x2E));
if ( tempbx & ( EnableChA | DisableChA ) )
{
if ( !( tempah & 0x08 ) ) /* Chk LCDA Mode */
- return FALSE ;
+ return 0 ;
}
if ( !( tempbx & ( EnableChB | DisableChB ) ) )
- return FALSE ;
+ return 0;
if ( tempah & 0x01 ) /* Chk LCDB Mode */
- return TRUE ;
+ return 1;
- return FALSE ;
+ return 0;
}
@@ -8941,28 +8616,28 @@ BOOLEAN XGI_DisableChISLCD(PVB_DEVICE_INFO pVBInfo)
/* Output : 0 -> Not LCD mode */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGI_EnableChISLCD(PVB_DEVICE_INFO pVBInfo)
+unsigned char XGI_EnableChISLCD(struct vb_device_info *pVBInfo)
{
- USHORT tempbx ,
+ unsigned short tempbx ,
tempah ;
tempbx = pVBInfo->SetFlag & ( EnableChA | EnableChB ) ;
- tempah = ~( ( USHORT )XGINew_GetReg1( pVBInfo->Part1Port , 0x2E ) ) ;
+ tempah = ~( (unsigned short)XGINew_GetReg1( pVBInfo->Part1Port , 0x2E ) ) ;
if ( tempbx & ( EnableChA | DisableChA ) )
{
if ( !( tempah & 0x08 ) ) /* Chk LCDA Mode */
- return FALSE ;
+ return 0;
}
if ( !( tempbx & ( EnableChB | DisableChB ) ) )
- return FALSE ;
+ return 0;
if ( tempah & 0x01 ) /* Chk LCDB Mode */
- return TRUE ;
+ return 1;
- return FALSE ;
+ return 0;
}
@@ -8972,9 +8647,9 @@ BOOLEAN XGI_EnableChISLCD(PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-USHORT XGI_GetLCDCapPtr( PVB_DEVICE_INFO pVBInfo )
+unsigned short XGI_GetLCDCapPtr(struct vb_device_info *pVBInfo)
{
- UCHAR tempal ,
+ unsigned char tempal ,
tempah ,
tempbl ,
i ;
@@ -9011,9 +8686,9 @@ USHORT XGI_GetLCDCapPtr( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-USHORT XGI_GetLCDCapPtr1( PVB_DEVICE_INFO pVBInfo )
+unsigned short XGI_GetLCDCapPtr1(struct vb_device_info *pVBInfo)
{
- USHORT tempah ,
+ unsigned short tempah ,
tempal ,
tempbl ,
i ;
@@ -9056,9 +8731,10 @@ USHORT XGI_GetLCDCapPtr1( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetLCDSync( USHORT* HSyncWidth , USHORT* VSyncWidth, PVB_DEVICE_INFO pVBInfo )
+void XGI_GetLCDSync(unsigned short *HSyncWidth , unsigned short *VSyncWidth,
+ struct vb_device_info *pVBInfo)
{
- USHORT Index ;
+ unsigned short Index ;
Index = XGI_GetLCDCapPtr(pVBInfo) ;
*HSyncWidth = pVBInfo->LCDCapList[ Index ].LCD_HSyncWidth ;
@@ -9075,9 +8751,9 @@ void XGI_GetLCDSync( USHORT* HSyncWidth , USHORT* VSyncWidth, PVB_DEVICE_INFO pV
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_EnableBridge( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO pVBInfo)
+void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT tempbl ,
+ unsigned short tempbl ,
tempah ;
if ( pVBInfo->SetFlag == Win9xDOSMode )
@@ -9146,7 +8822,7 @@ void XGI_EnableBridge( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO p
{
if ( ( pVBInfo->SetFlag & EnableChB ) || ( pVBInfo->VBInfo & ( SetCRT2ToLCD | SetCRT2ToTV | SetCRT2ToRAMDAC ) ) )
{
- tempah = ( UCHAR )XGINew_GetReg1( pVBInfo->P3c4 , 0x32 ) ;
+ tempah = (unsigned char)XGINew_GetReg1(pVBInfo->P3c4, 0x32);
tempah &= 0xDF;
if ( pVBInfo->VBInfo & SetInSlaveMode )
{
@@ -9156,8 +8832,7 @@ void XGI_EnableBridge( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO p
XGINew_SetReg1( pVBInfo->P3c4 , 0x32 , tempah ) ;
XGINew_SetRegOR( pVBInfo->P3c4 , 0x1E , 0x20 ) ;
-
- tempah = ( UCHAR )XGINew_GetReg1( pVBInfo->Part1Port , 0x2E ) ;
+ tempah = (unsigned char)XGINew_GetReg1(pVBInfo->Part1Port, 0x2E);
if ( !( tempah & 0x80 ) )
XGINew_SetRegOR( pVBInfo->Part1Port , 0x2E , 0x80 ) ; /* BVBDOENABLE = 1 */
@@ -9238,7 +8913,7 @@ void XGI_EnableBridge( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO p
- tempah = ( UCHAR )XGINew_GetReg1( pVBInfo->Part1Port , 0x2E ) ;
+ tempah = (unsigned char)XGINew_GetReg1(pVBInfo->Part1Port, 0x2E);
if ( !( tempah & 0x80 ) )
XGINew_SetRegOR( pVBInfo->Part1Port , 0x2E , 0x80 ) ; /* BVBDOENABLE = 1 */
@@ -9289,9 +8964,9 @@ void XGI_EnableBridge( PXGI_HW_DEVICE_INFO HwDeviceExtension , PVB_DEVICE_INFO p
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_DisableBridge(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo)
+void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT tempax ,
+ unsigned short tempax ,
tempbx ,
tempah = 0 ,
tempbl = 0 ;
@@ -9470,9 +9145,9 @@ void XGI_DisableBridge(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pV
/* A : Ext750p */
/* B : St750p */
/* --------------------------------------------------------------------- */
-USHORT XGI_GetTVPtrIndex( PVB_DEVICE_INFO pVBInfo )
+unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
{
- USHORT tempbx = 0 ;
+ unsigned short tempbx = 0 ;
if ( pVBInfo->TVInfo & SetPALTV )
tempbx = 2 ;
@@ -9497,7 +9172,7 @@ USHORT XGI_GetTVPtrIndex( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : Customized Param. for 301 */
/* --------------------------------------------------------------------- */
-void XGI_OEM310Setting( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo)
+void XGI_OEM310Setting(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
if ( pVBInfo->SetFlag & Win9xDOSMode )
return ;
@@ -9527,11 +9202,11 @@ void XGI_OEM310Setting( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBI
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetDelayComp( PVB_DEVICE_INFO pVBInfo )
+void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
{
- USHORT index ;
+ unsigned short index ;
- UCHAR tempah ,
+ unsigned char tempah ,
tempbl ,
tempbh ;
@@ -9605,9 +9280,9 @@ void XGI_SetDelayComp( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetLCDCap( PVB_DEVICE_INFO pVBInfo )
+void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
{
- USHORT tempcx ;
+ unsigned short tempcx ;
tempcx = pVBInfo->LCDCapList[ XGI_GetLCDCapPtr(pVBInfo) ].LCD_Capability ;
@@ -9616,10 +9291,12 @@ void XGI_SetLCDCap( PVB_DEVICE_INFO pVBInfo )
if ( pVBInfo->VBType & ( VB_XGI301LV | VB_XGI302LV | VB_XGI301C ) )
{ /* 301LV/302LV only */
/* Set 301LV Capability */
- XGINew_SetReg1( pVBInfo->Part4Port , 0x24 , ( UCHAR )( tempcx & 0x1F ) ) ;
+ XGINew_SetReg1(pVBInfo->Part4Port, 0x24, (unsigned char)(tempcx & 0x1F));
}
/* VB Driving */
- XGINew_SetRegANDOR( pVBInfo->Part4Port , 0x0D , ~( ( EnableVBCLKDRVLOW | EnablePLLSPLOW ) >> 8 ) , ( USHORT )( ( tempcx & ( EnableVBCLKDRVLOW | EnablePLLSPLOW ) ) >> 8 ) ) ;
+ XGINew_SetRegANDOR(pVBInfo->Part4Port, 0x0D,
+ ~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8),
+ (unsigned short)((tempcx & (EnableVBCLKDRVLOW | EnablePLLSPLOW)) >> 8));
}
if ( pVBInfo->VBType & ( VB_XGI301B | VB_XGI302B | VB_XGI301LV | VB_XGI302LV | VB_XGI301C ) )
@@ -9646,32 +9323,34 @@ void XGI_SetLCDCap( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetLCDCap_A(USHORT tempcx,PVB_DEVICE_INFO pVBInfo)
+void XGI_SetLCDCap_A(unsigned short tempcx, struct vb_device_info *pVBInfo)
{
- USHORT temp ;
+ unsigned short temp ;
temp = XGINew_GetReg1( pVBInfo->P3d4 , 0x37 ) ;
if ( temp & LCDRGB18Bit )
{
- XGINew_SetRegANDOR( pVBInfo->Part1Port , 0x19 , 0x0F , ( USHORT )( 0x20 | ( tempcx & 0x00C0 ) ) ) ; /* Enable Dither */
+ XGINew_SetRegANDOR(pVBInfo->Part1Port, 0x19, 0x0F,
+ (unsigned short)(0x20 | (tempcx & 0x00C0))); /* Enable Dither */
XGINew_SetRegANDOR( pVBInfo->Part1Port , 0x1A , 0x7F , 0x80 ) ;
}
else
{
- XGINew_SetRegANDOR( pVBInfo->Part1Port , 0x19 , 0x0F , ( USHORT )( 0x30 | ( tempcx & 0x00C0 ) ) ) ;
+ XGINew_SetRegANDOR(pVBInfo->Part1Port, 0x19, 0x0F,
+ (unsigned short)(0x30 | (tempcx & 0x00C0)));
XGINew_SetRegANDOR( pVBInfo->Part1Port , 0x1A , 0x7F , 0x00 ) ;
}
/*
if ( tempcx & EnableLCD24bpp ) // 24bits
{
- XGINew_SetRegANDOR(pVBInfo->Part1Port,0x19, 0x0F,(USHORT)(0x30|(tempcx&0x00C0)) );
+ XGINew_SetRegANDOR(pVBInfo->Part1Port,0x19, 0x0F,(unsigned short)(0x30|(tempcx&0x00C0)) );
XGINew_SetRegANDOR(pVBInfo->Part1Port,0x1A,0x7F,0x00);
}
else
{
- XGINew_SetRegANDOR(pVBInfo->Part1Port,0x19, 0x0F,(USHORT)(0x20|(tempcx&0x00C0)) );//Enable Dither
+ XGINew_SetRegANDOR(pVBInfo->Part1Port,0x19, 0x0F,(unsigned short)(0x20|(tempcx&0x00C0)) ); // Enable Dither
XGINew_SetRegANDOR(pVBInfo->Part1Port,0x1A,0x7F,0x80);
}
*/
@@ -9684,12 +9363,14 @@ void XGI_SetLCDCap_A(USHORT tempcx,PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetLCDCap_B(USHORT tempcx,PVB_DEVICE_INFO pVBInfo)
+void XGI_SetLCDCap_B(unsigned short tempcx, struct vb_device_info *pVBInfo)
{
if ( tempcx & EnableLCD24bpp ) /* 24bits */
- XGINew_SetRegANDOR( pVBInfo->Part2Port , 0x1A , 0xE0 , ( USHORT )( ( ( tempcx & 0x00ff ) >> 6 ) | 0x0c ) ) ;
+ XGINew_SetRegANDOR(pVBInfo->Part2Port, 0x1A, 0xE0,
+ (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x0c));
else
- XGINew_SetRegANDOR( pVBInfo->Part2Port , 0x1A , 0xE0 , ( USHORT )( ( ( tempcx & 0x00ff ) >> 6 ) | 0x18 ) ) ; /* Enable Dither */
+ XGINew_SetRegANDOR(pVBInfo->Part2Port, 0x1A, 0xE0,
+ (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x18)); /* Enable Dither */
}
@@ -9699,9 +9380,9 @@ void XGI_SetLCDCap_B(USHORT tempcx,PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void SetSpectrum( PVB_DEVICE_INFO pVBInfo )
+void SetSpectrum(struct vb_device_info *pVBInfo)
{
- USHORT index ;
+ unsigned short index ;
index = XGI_GetLCDCapPtr(pVBInfo) ;
@@ -9725,12 +9406,13 @@ void SetSpectrum( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : Set TV Customized Param. */
/* --------------------------------------------------------------------- */
-void XGI_SetAntiFlicker( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_SetAntiFlicker(unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT tempbx ,
+ unsigned short tempbx ,
index ;
- UCHAR tempah ;
+ unsigned char tempah ;
if (pVBInfo->TVInfo & ( SetYPbPrMode525p | SetYPbPrMode750p ) )
return ;
@@ -9761,12 +9443,12 @@ void XGI_SetAntiFlicker( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVB
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetEdgeEnhance( USHORT ModeNo , USHORT ModeIdIndex , PVB_DEVICE_INFO pVBInfo)
+void XGI_SetEdgeEnhance(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
- USHORT tempbx ,
+ unsigned short tempbx ,
index ;
- UCHAR tempah ;
+ unsigned char tempah ;
tempbx = XGI_GetTVPtrIndex(pVBInfo ) ;
@@ -9795,22 +9477,26 @@ void XGI_SetEdgeEnhance( USHORT ModeNo , USHORT ModeIdIndex , PVB_DEVICE_INFO pV
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetPhaseIncr( PVB_DEVICE_INFO pVBInfo )
+void XGI_SetPhaseIncr(struct vb_device_info *pVBInfo)
{
- USHORT tempbx ;
+ unsigned short tempbx ;
- UCHAR tempcl ,
+ unsigned char tempcl ,
tempch ;
- ULONG tempData ;
+ unsigned long tempData ;
XGI_GetTVPtrIndex2( &tempbx , &tempcl , &tempch, pVBInfo ) ; /* bx, cl, ch */
tempData = TVPhaseList[ tempbx ] ;
- XGINew_SetReg1( pVBInfo->Part2Port , 0x31 , ( USHORT )( tempData & 0x000000FF ) ) ;
- XGINew_SetReg1( pVBInfo->Part2Port , 0x32 , ( USHORT )( ( tempData & 0x0000FF00 ) >> 8 ) ) ;
- XGINew_SetReg1( pVBInfo->Part2Port , 0x33 , ( USHORT )( ( tempData & 0x00FF0000 ) >> 16 ) ) ;
- XGINew_SetReg1( pVBInfo->Part2Port , 0x34 , ( USHORT )( ( tempData & 0xFF000000 ) >> 24 ) ) ;
+ XGINew_SetReg1(pVBInfo->Part2Port, 0x31,
+ (unsigned short)(tempData & 0x000000FF));
+ XGINew_SetReg1(pVBInfo->Part2Port, 0x32,
+ (unsigned short)((tempData & 0x0000FF00) >> 8));
+ XGINew_SetReg1(pVBInfo->Part2Port, 0x33,
+ (unsigned short)((tempData & 0x00FF0000) >> 16));
+ XGINew_SetReg1(pVBInfo->Part2Port, 0x34,
+ (unsigned short)((tempData & 0xFF000000) >> 24));
}
@@ -9820,12 +9506,13 @@ void XGI_SetPhaseIncr( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_SetYFilter( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo)
+void XGI_SetYFilter(unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
- USHORT tempbx ,
+ unsigned short tempbx ,
index ;
- UCHAR tempcl ,
+ unsigned char tempcl ,
tempch ,
tempal ,
*filterPtr ;
@@ -9924,7 +9611,8 @@ void XGI_SetYFilter( USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo
/* 1 : 301B/302B/301LV/302LV */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetTVPtrIndex2(USHORT* tempbx,UCHAR* tempcl,UCHAR* tempch, PVB_DEVICE_INFO pVBInfo)
+void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
+ unsigned char *tempch, struct vb_device_info *pVBInfo)
{
*tempbx = 0 ;
*tempcl = 0 ;
@@ -9966,12 +9654,14 @@ void XGI_GetTVPtrIndex2(USHORT* tempbx,UCHAR* tempcl,UCHAR* tempch, PVB_DEVICE_I
/* Output : */
/* Description : Origin code for crt2group */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT2ModeRegs(USHORT ModeNo,PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo)
+void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
- USHORT tempbl ;
- SHORT tempcl ;
+ unsigned short tempbl ;
+ short tempcl ;
- UCHAR tempah ;
+ unsigned char tempah ;
/* XGINew_SetReg1( pVBInfo->Part1Port , 0x03 , 0x00 ) ; // fix write part1 index 0 BTDRAM bit Bug */
tempah=0;
@@ -10195,9 +9885,9 @@ void XGI_SetCRT2ModeRegs(USHORT ModeNo,PXGI_HW_DEVICE_INFO HwDeviceExtension, PV
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_CloseCRTC( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo )
+void XGI_CloseCRTC(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT tempbx ;
+ unsigned short tempbx ;
tempbx = 0 ;
@@ -10214,9 +9904,9 @@ void XGI_CloseCRTC( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBIn
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_OpenCRTC( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo )
+void XGI_OpenCRTC(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
- USHORT tempbx ;
+ unsigned short tempbx ;
tempbx = 0 ;
@@ -10230,9 +9920,9 @@ void XGI_OpenCRTC( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInf
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_GetRAMDAC2DATA(USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTableIndex, PVB_DEVICE_INFO pVBInfo )
+void XGI_GetRAMDAC2DATA(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct vb_device_info *pVBInfo)
{
- USHORT tempax ,
+ unsigned short tempax ,
tempbx ,
temp1 ,
temp2 ,
@@ -10257,15 +9947,15 @@ void XGI_GetRAMDAC2DATA(USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateT
modeflag = pVBInfo->EModeIDTable[ ModeIdIndex ].Ext_ModeFlag ;
CRT1Index = pVBInfo->RefIndex[ RefreshRateTableIndex ].Ext_CRT1CRTC ;
CRT1Index &= IndexMask ;
- temp1 = ( USHORT )pVBInfo->XGINEWUB_CRT1Table[ CRT1Index ].CR[ 0 ] ;
- temp2 = ( USHORT )pVBInfo->XGINEWUB_CRT1Table[ CRT1Index ].CR[ 5 ] ;
+ temp1 = (unsigned short)pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[0];
+ temp2 = (unsigned short)pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[5];
tempax = ( temp1 & 0xFF ) | ( ( temp2 & 0x03 ) << 8 ) ;
- tempbx = ( USHORT )pVBInfo->XGINEWUB_CRT1Table[ CRT1Index ].CR[ 8 ] ;
- tempcx = ( USHORT )pVBInfo->XGINEWUB_CRT1Table[ CRT1Index ].CR[ 14 ] << 8 ;
+ tempbx = (unsigned short)pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[8];
+ tempcx = (unsigned short)pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[14] << 8;
tempcx &= 0x0100 ;
tempcx = tempcx << 2 ;
tempbx |= tempcx;
- temp1 = ( USHORT )pVBInfo->XGINEWUB_CRT1Table[ CRT1Index ].CR[ 9 ] ;
+ temp1 = (unsigned short)pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[9];
}
if ( temp1 & 0x01 )
@@ -10295,11 +9985,11 @@ void XGI_GetRAMDAC2DATA(USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateT
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-USHORT XGI_GetColorDepth(USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo)
+unsigned short XGI_GetColorDepth(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
- USHORT ColorDepth[ 6 ] = { 1 , 2 , 4 , 4 , 6 , 8 } ;
- SHORT index ;
- USHORT modeflag ;
+ unsigned short ColorDepth[ 6 ] = { 1 , 2 , 4 , 4 , 6 , 8 } ;
+ short index ;
+ unsigned short modeflag ;
if ( ModeNo <= 0x13 )
{
@@ -10326,7 +10016,7 @@ USHORT XGI_GetColorDepth(USHORT ModeNo , USHORT ModeIdIndex, PVB_DEVICE_INFO pVB
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_UnLockCRT2( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo )
+void XGI_UnLockCRT2(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
XGINew_SetRegANDOR( pVBInfo->Part1Port , 0x2f , 0xFF , 0x01 ) ;
@@ -10340,7 +10030,7 @@ void XGI_UnLockCRT2( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVB
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_LockCRT2( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBInfo )
+void XGI_LockCRT2(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo)
{
XGINew_SetRegANDOR( pVBInfo->Part1Port , 0x2F , 0xFE , 0x00 ) ;
@@ -10355,7 +10045,7 @@ void XGI_LockCRT2( PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO pVBIn
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_EnableCRT2( PVB_DEVICE_INFO pVBInfo)
+void XGINew_EnableCRT2(struct vb_device_info *pVBInfo)
{
XGINew_SetRegANDOR( pVBInfo->P3c4 , 0x1E , 0xFF , 0x20 ) ;
}
@@ -10368,12 +10058,12 @@ void XGINew_EnableCRT2( PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_LCD_Wait_Time(UCHAR DelayTime, PVB_DEVICE_INFO pVBInfo)
+void XGINew_LCD_Wait_Time(unsigned char DelayTime, struct vb_device_info *pVBInfo)
{
- USHORT i ,
+ unsigned short i ,
j ;
- ULONG temp ,
+ unsigned long temp ,
flag ;
flag = 0 ;
@@ -10405,9 +10095,9 @@ void XGINew_LCD_Wait_Time(UCHAR DelayTime, PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-BOOLEAN XGI_BridgeIsOn( PVB_DEVICE_INFO pVBInfo )
+unsigned char XGI_BridgeIsOn(struct vb_device_info *pVBInfo)
{
- USHORT flag ;
+ unsigned short flag ;
if ( pVBInfo->IF_DEF_LVDS == 1 )
{
@@ -10431,9 +10121,9 @@ BOOLEAN XGI_BridgeIsOn( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_LongWait(PVB_DEVICE_INFO pVBInfo)
+void XGI_LongWait(struct vb_device_info *pVBInfo)
{
- USHORT i ;
+ unsigned short i ;
i = XGINew_GetReg1( pVBInfo->P3c4 , 0x1F ) ;
@@ -10460,9 +10150,9 @@ void XGI_LongWait(PVB_DEVICE_INFO pVBInfo)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGI_VBLongWait( PVB_DEVICE_INFO pVBInfo )
+void XGI_VBLongWait(struct vb_device_info *pVBInfo)
{
- USHORT tempal ,
+ unsigned short tempal ,
temp ,
i ,
j ;
@@ -10519,16 +10209,16 @@ return ;
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-USHORT XGI_GetVGAHT2( PVB_DEVICE_INFO pVBInfo )
+unsigned short XGI_GetVGAHT2(struct vb_device_info *pVBInfo)
{
- ULONG tempax ,
+ unsigned long tempax ,
tempbx ;
tempbx = ( ( pVBInfo->VGAVT - pVBInfo->VGAVDE ) * pVBInfo->RVBHCMAX ) & 0xFFFF ;
tempax = ( pVBInfo->VT - pVBInfo->VDE ) * pVBInfo->RVBHCFACT ;
tempax = ( tempax * pVBInfo->HT ) /tempbx ;
- return( ( USHORT )tempax ) ;
+ return( (unsigned short)tempax ) ;
}
@@ -10538,19 +10228,23 @@ USHORT XGI_GetVGAHT2( PVB_DEVICE_INFO pVBInfo )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-USHORT XGI_GetVCLK2Ptr( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateTableIndex , PXGI_HW_DEVICE_INFO HwDeviceExtension ,PVB_DEVICE_INFO pVBInfo)
+unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
- USHORT tempbx ;
+ unsigned short tempbx ;
- USHORT LCDXlat1VCLK[ 4 ] = { VCLK65 + 2 , VCLK65 + 2 , VCLK65 + 2 , VCLK65 + 2 } ;
- USHORT LCDXlat2VCLK[ 4 ] = { VCLK108_2 + 5 , VCLK108_2 + 5 , VCLK108_2 + 5 , VCLK108_2 + 5 } ;
- USHORT LVDSXlat1VCLK[ 4 ] = { VCLK40 , VCLK40 , VCLK40 , VCLK40 } ;
- USHORT LVDSXlat2VCLK[ 4 ] = { VCLK65 + 2 , VCLK65 + 2 , VCLK65 + 2 , VCLK65 + 2 } ;
- USHORT LVDSXlat3VCLK[ 4 ] = { VCLK65 + 2 , VCLK65 + 2 , VCLK65 + 2 , VCLK65 + 2 } ;
+ unsigned short LCDXlat1VCLK[ 4 ] = { VCLK65 + 2 , VCLK65 + 2 , VCLK65 + 2 , VCLK65 + 2 } ;
+ unsigned short LCDXlat2VCLK[ 4 ] = { VCLK108_2 + 5 , VCLK108_2 + 5 , VCLK108_2 + 5 , VCLK108_2 + 5 } ;
+ unsigned short LVDSXlat1VCLK[ 4 ] = { VCLK40 , VCLK40 , VCLK40 , VCLK40 } ;
+ unsigned short LVDSXlat2VCLK[ 4 ] = { VCLK65 + 2 , VCLK65 + 2 , VCLK65 + 2 , VCLK65 + 2 } ;
+ unsigned short LVDSXlat3VCLK[ 4 ] = { VCLK65 + 2 , VCLK65 + 2 , VCLK65 + 2 , VCLK65 + 2 } ;
- USHORT CRT2Index , VCLKIndex ;
- USHORT modeflag , resinfo ;
- UCHAR *CHTVVCLKPtr = NULL ;
+ unsigned short CRT2Index , VCLKIndex ;
+ unsigned short modeflag , resinfo ;
+ unsigned char *CHTVVCLKPtr = NULL ;
if ( ModeNo <= 0x13 )
{
@@ -10665,7 +10359,7 @@ USHORT XGI_GetVCLK2Ptr( USHORT ModeNo , USHORT ModeIdIndex , USHORT RefreshRateT
}
else
{ /* for CRT2 */
- VCLKIndex = ( UCHAR )XGINew_GetReg2( ( pVBInfo->P3ca + 0x02 ) ) ; /* Port 3cch */
+ VCLKIndex = (unsigned char)XGINew_GetReg2((pVBInfo->P3ca + 0x02)); /* Port 3cch */
VCLKIndex = ( ( VCLKIndex >> 2 ) & 0x03 ) ;
if ( ModeNo > 0x13 )
{
diff --git a/drivers/staging/xgifb/vb_setmode.h b/drivers/staging/xgifb/vb_setmode.h
index 09753d70666..0dcc2979617 100644
--- a/drivers/staging/xgifb/vb_setmode.h
+++ b/drivers/staging/xgifb/vb_setmode.h
@@ -1,40 +1,42 @@
#ifndef _VBSETMODE_
#define _VBSETMODE_
-extern void InitTo330Pointer(UCHAR,PVB_DEVICE_INFO);
-extern void XGI_UnLockCRT2(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO );
-extern void XGI_LockCRT2(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO );
-extern void XGI_LongWait( PVB_DEVICE_INFO );
-extern void XGI_SetCRT2ModeRegs(USHORT ModeNo,PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO );
-extern void XGI_DisableBridge(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO );
-extern void XGI_EnableBridge(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO );
-extern void XGI_DisplayOff( PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO );
-extern void XGI_DisplayOn( PXGI_HW_DEVICE_INFO, PVB_DEVICE_INFO );
-extern void XGI_GetVBType(PVB_DEVICE_INFO);
-extern void XGI_SenseCRT1(PVB_DEVICE_INFO );
-extern void XGI_GetVGAType(PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO );
-extern void XGI_GetVBInfo(USHORT ModeNo,USHORT ModeIdIndex,PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO );
-extern void XGI_GetTVInfo(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO );
-extern void XGI_SetCRT1Offset(USHORT ModeNo,USHORT ModeIdIndex,USHORT RefreshRateTableIndex,PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO );
-extern void XGI_SetLCDAGroup(USHORT ModeNo,USHORT ModeIdIndex,PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO );
-extern void XGI_WaitDisply( PVB_DEVICE_INFO );
-extern USHORT XGI_GetResInfo(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo);
+extern void InitTo330Pointer(unsigned char, struct vb_device_info *);
+extern void XGI_UnLockCRT2(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *);
+extern void XGI_LockCRT2(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *);
+extern void XGI_LongWait(struct vb_device_info *);
+extern void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
+ struct xgi_hw_device_info *,
+ struct vb_device_info *);
+extern void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *);
+extern void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *);
+extern void XGI_DisplayOff(struct xgi_hw_device_info *, struct vb_device_info *);
+extern void XGI_DisplayOn(struct xgi_hw_device_info *, struct vb_device_info *);
+extern void XGI_GetVBType(struct vb_device_info *);
+extern void XGI_SenseCRT1(struct vb_device_info *);
+extern void XGI_GetVGAType(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *);
+extern void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *);
+extern void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *);
+extern void XGI_SetCRT1Offset(unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *);
+extern void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *);
+extern void XGI_WaitDisply(struct vb_device_info *);
+extern unsigned short XGI_GetResInfo(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
-extern BOOLEAN XGISetModeNew( PXGI_HW_DEVICE_INFO HwDeviceExtension , USHORT ModeNo ) ;
+extern unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension, unsigned short ModeNo) ;
-extern BOOLEAN XGI_SearchModeID( USHORT ModeNo,USHORT *ModeIdIndex, PVB_DEVICE_INFO );
-extern BOOLEAN XGI_GetLCDInfo(USHORT ModeNo,USHORT ModeIdIndex,PVB_DEVICE_INFO );
-extern BOOLEAN XGI_BridgeIsOn( PVB_DEVICE_INFO );
-extern BOOLEAN XGI_SetCRT2Group301(USHORT ModeNo, PXGI_HW_DEVICE_INFO HwDeviceExtension, PVB_DEVICE_INFO);
-extern USHORT XGI_GetRatePtrCRT2( PXGI_HW_DEVICE_INFO pXGIHWDE, USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO );
+extern unsigned char XGI_SearchModeID(unsigned short ModeNo, unsigned short *ModeIdIndex, struct vb_device_info *);
+extern unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *);
+extern unsigned char XGI_BridgeIsOn(struct vb_device_info *);
+extern unsigned char XGI_SetCRT2Group301(unsigned short ModeNo, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *);
+extern unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE, unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *);
-extern void XGI_SetXG21FPBits(PVB_DEVICE_INFO pVBInfo);
-extern void XGI_SetXG27FPBits(PVB_DEVICE_INFO pVBInfo);
-extern void XGI_XG21BLSignalVDD(USHORT tempbh,USHORT tempbl, PVB_DEVICE_INFO pVBInfo);
-extern void XGI_XG27BLSignalVDD(USHORT tempbh,USHORT tempbl, PVB_DEVICE_INFO pVBInfo);
-extern void XGI_XG21SetPanelDelay(USHORT tempbl, PVB_DEVICE_INFO pVBInfo);
-extern BOOLEAN XGI_XG21CheckLVDSMode(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo );
-extern void XGI_SetXG21LVDSPara(USHORT ModeNo,USHORT ModeIdIndex, PVB_DEVICE_INFO pVBInfo );
-extern USHORT XGI_GetLVDSOEMTableIndex(PVB_DEVICE_INFO pVBInfo);
+extern void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo);
+extern void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo);
+extern void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl, struct vb_device_info *pVBInfo);
+extern void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl, struct vb_device_info *pVBInfo);
+extern void XGI_XG21SetPanelDelay(unsigned short tempbl, struct vb_device_info *pVBInfo);
+extern unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+extern void XGI_SetXG21LVDSPara(unsigned short ModeNo, unsigned short ModeIdIndex, struct vb_device_info *pVBInfo);
+extern unsigned short XGI_GetLVDSOEMTableIndex(struct vb_device_info *pVBInfo);
#endif
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
index bb25c0e2785..9c6e0c7ac78 100644
--- a/drivers/staging/xgifb/vb_struct.h
+++ b/drivers/staging/xgifb/vb_struct.h
@@ -10,525 +10,518 @@
-typedef struct _XGI_PanelDelayTblStruct
+struct XGI_PanelDelayTblStruct
{
- UCHAR timer[2];
-} XGI_PanelDelayTblStruct;
+ unsigned char timer[2];
+};
-typedef struct _XGI_LCDDataStruct
+struct XGI_LCDDataStruct
{
- USHORT RVBHCMAX;
- USHORT RVBHCFACT;
- USHORT VGAHT;
- USHORT VGAVT;
- USHORT LCDHT;
- USHORT LCDVT;
-} XGI_LCDDataStruct;
+ unsigned short RVBHCMAX;
+ unsigned short RVBHCFACT;
+ unsigned short VGAHT;
+ unsigned short VGAVT;
+ unsigned short LCDHT;
+ unsigned short LCDVT;
+};
-typedef struct _XGI_LVDSCRT1HDataStruct
+struct XGI_LVDSCRT1HDataStruct
{
- UCHAR Reg[8];
-} XGI_LVDSCRT1HDataStruct;
-typedef struct _XGI_LVDSCRT1VDataStruct
-{
- UCHAR Reg[7];
-} XGI_LVDSCRT1VDataStruct;
-
-
-typedef struct _XGI_TVDataStruct
-{
- USHORT RVBHCMAX;
- USHORT RVBHCFACT;
- USHORT VGAHT;
- USHORT VGAVT;
- USHORT TVHDE;
- USHORT TVVDE;
- USHORT RVBHRS;
- UCHAR FlickerMode;
- USHORT HALFRVBHRS;
- UCHAR RY1COE;
- UCHAR RY2COE;
- UCHAR RY3COE;
- UCHAR RY4COE;
-} XGI_TVDataStruct;
-
-typedef struct _XGI_LVDSDataStruct
-{
- USHORT VGAHT;
- USHORT VGAVT;
- USHORT LCDHT;
- USHORT LCDVT;
-} XGI_LVDSDataStruct;
-
-typedef struct _XGI_LVDSDesStruct
-{
- USHORT LCDHDES;
- USHORT LCDVDES;
-} XGI_LVDSDesStruct;
-
-typedef struct _XGI_LVDSCRT1DataStruct
-{
- UCHAR CR[15];
-} XGI_LVDSCRT1DataStruct;
-
-/*add for LCDA*/
+ unsigned char Reg[8];
+};
-
-typedef struct _XGI_StStruct
-{
- UCHAR St_ModeID;
- USHORT St_ModeFlag;
- UCHAR St_StTableIndex;
- UCHAR St_CRT2CRTC;
- UCHAR St_CRT2CRTC2;
- UCHAR St_ResInfo;
- UCHAR VB_StTVFlickerIndex;
- UCHAR VB_StTVEdgeIndex;
- UCHAR VB_StTVYFilterIndex;
-} XGI_StStruct;
-
-typedef struct _XGI_StandTableStruct
-{
- UCHAR CRT_COLS;
- UCHAR ROWS;
- UCHAR CHAR_HEIGHT;
- USHORT CRT_LEN;
- UCHAR SR[4];
- UCHAR MISC;
- UCHAR CRTC[0x19];
- UCHAR ATTR[0x14];
- UCHAR GRC[9];
-} XGI_StandTableStruct;
-
-typedef struct _XGI_ExtStruct
-{
- UCHAR Ext_ModeID;
- USHORT Ext_ModeFlag;
- USHORT Ext_ModeInfo;
- USHORT Ext_Point;
- USHORT Ext_VESAID;
- UCHAR Ext_VESAMEMSize;
- UCHAR Ext_RESINFO;
- UCHAR VB_ExtTVFlickerIndex;
- UCHAR VB_ExtTVEdgeIndex;
- UCHAR VB_ExtTVYFilterIndex;
- UCHAR REFindex;
-} XGI_ExtStruct;
-
-typedef struct _XGI_Ext2Struct
+struct XGI_LVDSCRT1VDataStruct
{
- USHORT Ext_InfoFlag;
- UCHAR Ext_CRT1CRTC;
- UCHAR Ext_CRTVCLK;
- UCHAR Ext_CRT2CRTC;
- UCHAR Ext_CRT2CRTC2;
- UCHAR ModeID;
- USHORT XRes;
- USHORT YRes;
- /* USHORT ROM_OFFSET; */
-} XGI_Ext2Struct;
-
+ unsigned char Reg[7];
+};
-typedef struct _XGI_MCLKDataStruct
-{
- UCHAR SR28,SR29,SR2A;
- USHORT CLOCK;
-} XGI_MCLKDataStruct;
-typedef struct _XGI_ECLKDataStruct
+struct XGI_TVDataStruct
{
- UCHAR SR2E,SR2F,SR30;
- USHORT CLOCK;
-} XGI_ECLKDataStruct;
+ unsigned short RVBHCMAX;
+ unsigned short RVBHCFACT;
+ unsigned short VGAHT;
+ unsigned short VGAVT;
+ unsigned short TVHDE;
+ unsigned short TVVDE;
+ unsigned short RVBHRS;
+ unsigned char FlickerMode;
+ unsigned short HALFRVBHRS;
+ unsigned char RY1COE;
+ unsigned char RY2COE;
+ unsigned char RY3COE;
+ unsigned char RY4COE;
+};
-typedef struct _XGI_VCLKDataStruct
+struct XGI_LVDSDataStruct
{
- UCHAR SR2B,SR2C;
- USHORT CLOCK;
-} XGI_VCLKDataStruct;
+ unsigned short VGAHT;
+ unsigned short VGAVT;
+ unsigned short LCDHT;
+ unsigned short LCDVT;
+};
-typedef struct _XGI_VBVCLKDataStruct
+struct XGI_LVDSDesStruct
{
- UCHAR Part4_A,Part4_B;
- USHORT CLOCK;
-} XGI_VBVCLKDataStruct;
+ unsigned short LCDHDES;
+ unsigned short LCDVDES;
+};
-typedef struct _XGI_StResInfoStruct
+struct XGI_LVDSCRT1DataStruct
{
- USHORT HTotal;
- USHORT VTotal;
-} XGI_StResInfoStruct;
+ unsigned char CR[15];
+};
-typedef struct _XGI_ModeResInfoStruct
-{
- USHORT HTotal;
- USHORT VTotal;
- UCHAR XChar;
- UCHAR YChar;
-} XGI_ModeResInfoStruct;
+/*add for LCDA*/
-typedef struct _XGI_LCDNBDesStruct
-{
- UCHAR NB[12];
-} XGI_LCDNBDesStruct;
+struct XGI_StStruct
+{
+ unsigned char St_ModeID;
+ unsigned short St_ModeFlag;
+ unsigned char St_StTableIndex;
+ unsigned char St_CRT2CRTC;
+ unsigned char St_CRT2CRTC2;
+ unsigned char St_ResInfo;
+ unsigned char VB_StTVFlickerIndex;
+ unsigned char VB_StTVEdgeIndex;
+ unsigned char VB_StTVYFilterIndex;
+};
+
+struct XGI_StandTableStruct
+{
+ unsigned char CRT_COLS;
+ unsigned char ROWS;
+ unsigned char CHAR_HEIGHT;
+ unsigned short CRT_LEN;
+ unsigned char SR[4];
+ unsigned char MISC;
+ unsigned char CRTC[0x19];
+ unsigned char ATTR[0x14];
+ unsigned char GRC[9];
+};
+
+struct XGI_ExtStruct
+{
+ unsigned char Ext_ModeID;
+ unsigned short Ext_ModeFlag;
+ unsigned short Ext_ModeInfo;
+ unsigned short Ext_Point;
+ unsigned short Ext_VESAID;
+ unsigned char Ext_VESAMEMSize;
+ unsigned char Ext_RESINFO;
+ unsigned char VB_ExtTVFlickerIndex;
+ unsigned char VB_ExtTVEdgeIndex;
+ unsigned char VB_ExtTVYFilterIndex;
+ unsigned char REFindex;
+};
+
+struct XGI_Ext2Struct
+{
+ unsigned short Ext_InfoFlag;
+ unsigned char Ext_CRT1CRTC;
+ unsigned char Ext_CRTVCLK;
+ unsigned char Ext_CRT2CRTC;
+ unsigned char Ext_CRT2CRTC2;
+ unsigned char ModeID;
+ unsigned short XRes;
+ unsigned short YRes;
+ /* unsigned short ROM_OFFSET; */
+};
+
+
+struct XGI_MCLKDataStruct
+{
+ unsigned char SR28, SR29, SR2A;
+ unsigned short CLOCK;
+};
+
+struct XGI_ECLKDataStruct
+{
+ unsigned char SR2E, SR2F, SR30;
+ unsigned short CLOCK;
+};
+
+struct XGI_VCLKDataStruct
+{
+ unsigned char SR2B, SR2C;
+ unsigned short CLOCK;
+};
+
+struct XGI_VBVCLKDataStruct
+{
+ unsigned char Part4_A, Part4_B;
+ unsigned short CLOCK;
+};
+
+struct XGI_StResInfoStruct
+{
+ unsigned short HTotal;
+ unsigned short VTotal;
+};
+
+struct XGI_ModeResInfoStruct
+{
+ unsigned short HTotal;
+ unsigned short VTotal;
+ unsigned char XChar;
+ unsigned char YChar;
+};
+
+struct XGI_LCDNBDesStruct
+{
+ unsigned char NB[12];
+};
/*add for new UNIVGABIOS*/
-typedef struct _XGI_LCDDesStruct
+struct XGI_LCDDesStruct
{
- USHORT LCDHDES;
- USHORT LCDHRS;
- USHORT LCDVDES;
- USHORT LCDVRS;
-} XGI_LCDDesStruct;
+ unsigned short LCDHDES;
+ unsigned short LCDHRS;
+ unsigned short LCDVDES;
+ unsigned short LCDVRS;
+};
-typedef struct _XGI_LCDDataTablStruct
+struct XGI_LCDDataTablStruct
{
- UCHAR PANELID;
- USHORT MASK;
- USHORT CAP;
- USHORT DATAPTR;
-} XGI_LCDDataTablStruct;
+ unsigned char PANELID;
+ unsigned short MASK;
+ unsigned short CAP;
+ unsigned short DATAPTR;
+};
-typedef struct _XGI_TVTablDataStruct
+struct XGI_TVTablDataStruct
{
- USHORT MASK;
- USHORT CAP;
- USHORT DATAPTR;
-} XGI_TVDataTablStruct;
+ unsigned short MASK;
+ unsigned short CAP;
+ unsigned short DATAPTR;
+};
-typedef struct _XGI330_LCDDesDataStruct
+struct XGI330_LCDDataDesStruct
{
- USHORT LCDHDES;
- USHORT LCDHRS;
- USHORT LCDVDES;
- USHORT LCDVRS;
-} XGI330_LCDDataDesStruct;
+ unsigned short LCDHDES;
+ unsigned short LCDHRS;
+ unsigned short LCDVDES;
+ unsigned short LCDVRS;
+};
-typedef struct _XGI330_LVDSDataStruct
+struct XGI330_LVDSDataStruct
{
- USHORT VGAHT;
- USHORT VGAVT;
- USHORT LCDHT;
- USHORT LCDVT;
-} XGI330_LVDSDataStruct;
+ unsigned short VGAHT;
+ unsigned short VGAVT;
+ unsigned short LCDHT;
+ unsigned short LCDVT;
+};
-typedef struct _XGI330_LCDDesDataStruct2
+struct XGI330_LCDDataDesStruct2
{
- USHORT LCDHDES;
- USHORT LCDHRS;
- USHORT LCDVDES;
- USHORT LCDVRS;
- USHORT LCDHSync;
- USHORT LCDVSync;
-} XGI330_LCDDataDesStruct2;
+ unsigned short LCDHDES;
+ unsigned short LCDHRS;
+ unsigned short LCDVDES;
+ unsigned short LCDVRS;
+ unsigned short LCDHSync;
+ unsigned short LCDVSync;
+};
-typedef struct _XGI330_LCDDataStruct
+struct XGI330_LCDDataStruct
{
- USHORT RVBHCMAX;
- USHORT RVBHCFACT;
- USHORT VGAHT;
- USHORT VGAVT;
- USHORT LCDHT;
- USHORT LCDVT;
-} XGI330_LCDDataStruct;
+ unsigned short RVBHCMAX;
+ unsigned short RVBHCFACT;
+ unsigned short VGAHT;
+ unsigned short VGAVT;
+ unsigned short LCDHT;
+ unsigned short LCDVT;
+};
-typedef struct _XGI330_TVDataStruct
+struct XGI330_TVDataStruct
{
- USHORT RVBHCMAX;
- USHORT RVBHCFACT;
- USHORT VGAHT;
- USHORT VGAVT;
- USHORT TVHDE;
- USHORT TVVDE;
- USHORT RVBHRS;
- UCHAR FlickerMode;
- USHORT HALFRVBHRS;
-} XGI330_TVDataStruct;
-
-typedef struct _XGI330_LCDDataTablStruct
-{
- UCHAR PANELID;
- USHORT MASK;
- USHORT CAP;
- USHORT DATAPTR;
-} XGI330_LCDDataTablStruct;
-
-typedef struct _XGI330_TVDataTablStruct
-{
- USHORT MASK;
- USHORT CAP;
- USHORT DATAPTR;
-} XGI330_TVDataTablStruct;
-
-
-typedef struct _XGI330_CHTVDataStruct
-{
- USHORT VGAHT;
- USHORT VGAVT;
- USHORT LCDHT;
- USHORT LCDVT;
-} XGI330_CHTVDataStruct;
+ unsigned short RVBHCMAX;
+ unsigned short RVBHCFACT;
+ unsigned short VGAHT;
+ unsigned short VGAVT;
+ unsigned short TVHDE;
+ unsigned short TVVDE;
+ unsigned short RVBHRS;
+ unsigned char FlickerMode;
+ unsigned short HALFRVBHRS;
+};
+
+struct XGI330_LCDDataTablStruct
+{
+ unsigned char PANELID;
+ unsigned short MASK;
+ unsigned short CAP;
+ unsigned short DATAPTR;
+};
+
+struct XGI330_TVDataTablStruct
+{
+ unsigned short MASK;
+ unsigned short CAP;
+ unsigned short DATAPTR;
+};
+
+
+struct XGI330_CHTVDataStruct
+{
+ unsigned short VGAHT;
+ unsigned short VGAVT;
+ unsigned short LCDHT;
+ unsigned short LCDVT;
+};
-typedef struct _XGI_TimingHStruct
-{
- UCHAR data[8];
-} XGI_TimingHStruct;
-
-typedef struct _XGI_TimingVStruct
-{
- UCHAR data[7];
-} XGI_TimingVStruct;
-
-typedef struct _XGI_CH7007TV_TimingHStruct
-{
- UCHAR data[10];
-} XGI_CH7007TV_TimingHStruct;
-
-typedef struct _XGI_CH7007TV_TimingVStruct
-{
- UCHAR data[10];
-} XGI_CH7007TV_TimingVStruct;
-
-typedef struct _XGI_XG21CRT1Struct
-{
- UCHAR ModeID,CR02,CR03,CR15,CR16;
-} XGI_XG21CRT1Struct;
-
-typedef struct _XGI330_CHTVRegDataStruct
-{
- UCHAR Reg[16];
-} XGI330_CHTVRegDataStruct;
-
-typedef struct _XGI330_LCDCapStruct
-{
- UCHAR LCD_ID;
- USHORT LCD_Capability;
- UCHAR LCD_SetFlag;
- UCHAR LCD_DelayCompensation;
- UCHAR LCD_HSyncWidth;
- UCHAR LCD_VSyncWidth;
- UCHAR LCD_VCLK;
- UCHAR LCDA_VCLKData1;
- UCHAR LCDA_VCLKData2;
- UCHAR LCUCHAR_VCLKData1;
- UCHAR LCUCHAR_VCLKData2;
- UCHAR PSC_S1;
- UCHAR PSC_S2;
- UCHAR PSC_S3;
- UCHAR PSC_S4;
- UCHAR PSC_S5;
- UCHAR PWD_2B;
- UCHAR PWD_2C;
- UCHAR PWD_2D;
- UCHAR PWD_2E;
- UCHAR PWD_2F;
- UCHAR Spectrum_31;
- UCHAR Spectrum_32;
- UCHAR Spectrum_33;
- UCHAR Spectrum_34;
-} XGI330_LCDCapStruct;
-
-typedef struct _XGI21_LVDSCapStruct
-{
- USHORT LVDS_Capability;
- USHORT LVDSHT;
- USHORT LVDSVT;
- USHORT LVDSHDE;
- USHORT LVDSVDE;
- USHORT LVDSHFP;
- USHORT LVDSVFP;
- USHORT LVDSHSYNC;
- USHORT LVDSVSYNC;
- UCHAR VCLKData1;
- UCHAR VCLKData2;
- UCHAR PSC_S1;
- UCHAR PSC_S2;
- UCHAR PSC_S3;
- UCHAR PSC_S4;
- UCHAR PSC_S5;
-} XGI21_LVDSCapStruct;
-
-typedef struct _XGI_CRT1TableStruct
-{
- UCHAR CR[16];
-} XGI_CRT1TableStruct;
-
-
-typedef struct _XGI330_VCLKDataStruct
-{
- UCHAR SR2B,SR2C;
- USHORT CLOCK;
-} XGI330_VCLKDataStruct;
-
-typedef struct _XGI301C_Tap4TimingStruct
-{
- USHORT DE;
- UCHAR Reg[64]; /* C0-FF */
-} XGI301C_Tap4TimingStruct;
-
-typedef struct _XGI_New_StandTableStruct
-{
- UCHAR CRT_COLS;
- UCHAR ROWS;
- UCHAR CHAR_HEIGHT;
- USHORT CRT_LEN;
- UCHAR SR[4];
- UCHAR MISC;
- UCHAR CRTC[0x19];
- UCHAR ATTR[0x14];
- UCHAR GRC[9];
-} XGI_New_StandTableStruct;
-
-typedef UCHAR DRAM8Type[8];
-typedef UCHAR DRAM4Type[4];
-typedef UCHAR DRAM32Type[32];
-typedef UCHAR DRAM2Type[2];
-
-typedef struct _VB_DEVICE_INFO VB_DEVICE_INFO;
-typedef VB_DEVICE_INFO * PVB_DEVICE_INFO;
-
-struct _VB_DEVICE_INFO
-{
- BOOLEAN ISXPDOS;
- ULONG P3c4,P3d4,P3c0,P3ce,P3c2,P3cc;
- ULONG P3ca,P3c6,P3c7,P3c8,P3c9,P3da;
- ULONG Part0Port,Part1Port,Part2Port;
- ULONG Part3Port,Part4Port,Part5Port;
- USHORT RVBHCFACT,RVBHCMAX,RVBHRS;
- USHORT VGAVT,VGAHT,VGAVDE,VGAHDE;
- USHORT VT,HT,VDE,HDE;
- USHORT LCDHRS,LCDVRS,LCDHDES,LCDVDES;
-
- USHORT ModeType;
- USHORT IF_DEF_LVDS,IF_DEF_TRUMPION,IF_DEF_DSTN;/* ,IF_DEF_FSTN; add for dstn */
- USHORT IF_DEF_CRT2Monitor,IF_DEF_VideoCapture;
- USHORT IF_DEF_LCDA,IF_DEF_CH7017,IF_DEF_YPbPr,IF_DEF_ScaleLCD,IF_DEF_OEMUtil,IF_DEF_PWD;
- USHORT IF_DEF_ExpLink;
- USHORT IF_DEF_CH7005,IF_DEF_HiVision;
- USHORT IF_DEF_CH7007; /* Billy 2007/05/03 */
- USHORT LCDResInfo,LCDTypeInfo, VBType;/*301b*/
- USHORT VBInfo,TVInfo,LCDInfo, Set_VGAType;
- USHORT VBExtInfo;/*301lv*/
- USHORT SetFlag;
- USHORT NewFlickerMode;
- USHORT SelectCRT2Rate;
-
- PUCHAR ROMAddr;
- PUCHAR FBAddr;
- ULONG BaseAddr;
- ULONG RelIO;
-
- DRAM4Type *CR6B;
- DRAM4Type *CR6E;
- DRAM32Type *CR6F;
- DRAM2Type *CR89;
-
- DRAM8Type *SR15; /* pointer : point to array */
- DRAM8Type *CR40;
- UCHAR *pSoftSetting;
- UCHAR *pOutputSelect;
-
- USHORT *pRGBSenseData;
- USHORT *pRGBSenseData2; /*301b*/
- USHORT *pVideoSenseData;
- USHORT *pVideoSenseData2;
- USHORT *pYCSenseData;
- USHORT *pYCSenseData2;
-
- UCHAR *pSR07;
- UCHAR *CR49;
- UCHAR *pSR1F;
- UCHAR *AGPReg;
- UCHAR *SR16;
- UCHAR *pSR21;
- UCHAR *pSR22;
- UCHAR *pSR23;
- UCHAR *pSR24;
- UCHAR *SR25;
- UCHAR *pSR31;
- UCHAR *pSR32;
- UCHAR *pSR33;
- UCHAR *pSR36; /* alan 12/07/2006 */
- UCHAR *pCRCF;
- UCHAR *pCRD0; /* alan 12/07/2006 */
- UCHAR *pCRDE; /* alan 12/07/2006 */
- UCHAR *pCR8F; /* alan 12/07/2006 */
- UCHAR *pSR40; /* alan 12/07/2006 */
- UCHAR *pSR41; /* alan 12/07/2006 */
- UCHAR *pDVOSetting;
- UCHAR *pCR2E;
- UCHAR *pCR2F;
- UCHAR *pCR46;
- UCHAR *pCR47;
- UCHAR *pCRT2Data_1_2;
- UCHAR *pCRT2Data_4_D;
- UCHAR *pCRT2Data_4_E;
- UCHAR *pCRT2Data_4_10;
- XGI_MCLKDataStruct *MCLKData;
- XGI_ECLKDataStruct *ECLKData;
-
- UCHAR *XGI_TVDelayList;
- UCHAR *XGI_TVDelayList2;
- UCHAR *CHTVVCLKUNTSC;
- UCHAR *CHTVVCLKONTSC;
- UCHAR *CHTVVCLKUPAL;
- UCHAR *CHTVVCLKOPAL;
- UCHAR *NTSCTiming;
- UCHAR *PALTiming;
- UCHAR *HiTVExtTiming;
- UCHAR *HiTVSt1Timing;
- UCHAR *HiTVSt2Timing;
- UCHAR *HiTVTextTiming;
- UCHAR *YPbPr750pTiming;
- UCHAR *YPbPr525pTiming;
- UCHAR *YPbPr525iTiming;
- UCHAR *HiTVGroup3Data;
- UCHAR *HiTVGroup3Simu;
- UCHAR *HiTVGroup3Text;
- UCHAR *Ren525pGroup3;
- UCHAR *Ren750pGroup3;
- UCHAR *ScreenOffset;
- UCHAR *pXGINew_DRAMTypeDefinition;
- UCHAR *pXGINew_I2CDefinition ;
- UCHAR *pXGINew_CR97 ;
-
- XGI330_LCDCapStruct *LCDCapList;
- XGI21_LVDSCapStruct *XG21_LVDSCapList;
-
- XGI_TimingHStruct *TimingH;
- XGI_TimingVStruct *TimingV;
-
- XGI_StStruct *SModeIDTable;
- XGI_StandTableStruct *StandTable;
- XGI_ExtStruct *EModeIDTable;
- XGI_Ext2Struct *RefIndex;
+struct XGI_TimingHStruct
+{
+ unsigned char data[8];
+};
+
+struct XGI_TimingVStruct
+{
+ unsigned char data[7];
+};
+
+struct XGI_CH7007TV_TimingHStruct
+{
+ unsigned char data[10];
+};
+
+struct XGI_CH7007TV_TimingVStruct
+{
+ unsigned char data[10];
+};
+
+struct XGI_XG21CRT1Struct
+{
+ unsigned char ModeID, CR02, CR03, CR15, CR16;
+};
+
+struct XGI330_CHTVRegDataStruct
+{
+ unsigned char Reg[16];
+};
+
+struct XGI330_LCDCapStruct
+{
+ unsigned char LCD_ID;
+ unsigned short LCD_Capability;
+ unsigned char LCD_SetFlag;
+ unsigned char LCD_DelayCompensation;
+ unsigned char LCD_HSyncWidth;
+ unsigned char LCD_VSyncWidth;
+ unsigned char LCD_VCLK;
+ unsigned char LCDA_VCLKData1;
+ unsigned char LCDA_VCLKData2;
+ unsigned char LCUCHAR_VCLKData1;
+ unsigned char LCUCHAR_VCLKData2;
+ unsigned char PSC_S1;
+ unsigned char PSC_S2;
+ unsigned char PSC_S3;
+ unsigned char PSC_S4;
+ unsigned char PSC_S5;
+ unsigned char PWD_2B;
+ unsigned char PWD_2C;
+ unsigned char PWD_2D;
+ unsigned char PWD_2E;
+ unsigned char PWD_2F;
+ unsigned char Spectrum_31;
+ unsigned char Spectrum_32;
+ unsigned char Spectrum_33;
+ unsigned char Spectrum_34;
+};
+
+struct XGI21_LVDSCapStruct
+{
+ unsigned short LVDS_Capability;
+ unsigned short LVDSHT;
+ unsigned short LVDSVT;
+ unsigned short LVDSHDE;
+ unsigned short LVDSVDE;
+ unsigned short LVDSHFP;
+ unsigned short LVDSVFP;
+ unsigned short LVDSHSYNC;
+ unsigned short LVDSVSYNC;
+ unsigned char VCLKData1;
+ unsigned char VCLKData2;
+ unsigned char PSC_S1;
+ unsigned char PSC_S2;
+ unsigned char PSC_S3;
+ unsigned char PSC_S4;
+ unsigned char PSC_S5;
+};
+
+struct XGI_CRT1TableStruct
+{
+ unsigned char CR[16];
+};
+
+
+struct XGI330_VCLKDataStruct
+{
+ unsigned char SR2B, SR2C;
+ unsigned short CLOCK;
+};
+
+struct XGI301C_Tap4TimingStruct
+{
+ unsigned short DE;
+ unsigned char Reg[64]; /* C0-FF */
+};
+
+struct XGI_New_StandTableStruct
+{
+ unsigned char CRT_COLS;
+ unsigned char ROWS;
+ unsigned char CHAR_HEIGHT;
+ unsigned short CRT_LEN;
+ unsigned char SR[4];
+ unsigned char MISC;
+ unsigned char CRTC[0x19];
+ unsigned char ATTR[0x14];
+ unsigned char GRC[9];
+};
+
+struct vb_device_info
+{
+ unsigned char ISXPDOS;
+ unsigned long P3c4,P3d4,P3c0,P3ce,P3c2,P3cc;
+ unsigned long P3ca,P3c6,P3c7,P3c8,P3c9,P3da;
+ unsigned long Part0Port,Part1Port,Part2Port;
+ unsigned long Part3Port,Part4Port,Part5Port;
+ unsigned short RVBHCFACT,RVBHCMAX,RVBHRS;
+ unsigned short VGAVT,VGAHT,VGAVDE,VGAHDE;
+ unsigned short VT,HT,VDE,HDE;
+ unsigned short LCDHRS,LCDVRS,LCDHDES,LCDVDES;
+
+ unsigned short ModeType;
+ unsigned short IF_DEF_LVDS,IF_DEF_TRUMPION,IF_DEF_DSTN;/* ,IF_DEF_FSTN; add for dstn */
+ unsigned short IF_DEF_CRT2Monitor,IF_DEF_VideoCapture;
+ unsigned short IF_DEF_LCDA,IF_DEF_CH7017,IF_DEF_YPbPr,IF_DEF_ScaleLCD,IF_DEF_OEMUtil,IF_DEF_PWD;
+ unsigned short IF_DEF_ExpLink;
+ unsigned short IF_DEF_CH7005,IF_DEF_HiVision;
+ unsigned short IF_DEF_CH7007; /* Billy 2007/05/03 */
+ unsigned short LCDResInfo,LCDTypeInfo, VBType;/*301b*/
+ unsigned short VBInfo,TVInfo,LCDInfo, Set_VGAType;
+ unsigned short VBExtInfo;/*301lv*/
+ unsigned short SetFlag;
+ unsigned short NewFlickerMode;
+ unsigned short SelectCRT2Rate;
+
+ unsigned char *ROMAddr;
+ unsigned char *FBAddr;
+ unsigned long BaseAddr;
+ unsigned long RelIO;
+
+ unsigned char (*CR6B)[4];
+ unsigned char (*CR6E)[4];
+ unsigned char (*CR6F)[32];
+ unsigned char (*CR89)[2];
+
+ unsigned char (*SR15)[8];
+ unsigned char (*CR40)[8];
+
+ unsigned char *pSoftSetting;
+ unsigned char *pOutputSelect;
+
+ unsigned short *pRGBSenseData;
+ unsigned short *pRGBSenseData2; /*301b*/
+ unsigned short *pVideoSenseData;
+ unsigned short *pVideoSenseData2;
+ unsigned short *pYCSenseData;
+ unsigned short *pYCSenseData2;
+
+ unsigned char *pSR07;
+ unsigned char *CR49;
+ unsigned char *pSR1F;
+ unsigned char *AGPReg;
+ unsigned char *SR16;
+ unsigned char *pSR21;
+ unsigned char *pSR22;
+ unsigned char *pSR23;
+ unsigned char *pSR24;
+ unsigned char *SR25;
+ unsigned char *pSR31;
+ unsigned char *pSR32;
+ unsigned char *pSR33;
+ unsigned char *pSR36; /* alan 12/07/2006 */
+ unsigned char *pCRCF;
+ unsigned char *pCRD0; /* alan 12/07/2006 */
+ unsigned char *pCRDE; /* alan 12/07/2006 */
+ unsigned char *pCR8F; /* alan 12/07/2006 */
+ unsigned char *pSR40; /* alan 12/07/2006 */
+ unsigned char *pSR41; /* alan 12/07/2006 */
+ unsigned char *pDVOSetting;
+ unsigned char *pCR2E;
+ unsigned char *pCR2F;
+ unsigned char *pCR46;
+ unsigned char *pCR47;
+ unsigned char *pCRT2Data_1_2;
+ unsigned char *pCRT2Data_4_D;
+ unsigned char *pCRT2Data_4_E;
+ unsigned char *pCRT2Data_4_10;
+ struct XGI_MCLKDataStruct *MCLKData;
+ struct XGI_ECLKDataStruct *ECLKData;
+
+ unsigned char *XGI_TVDelayList;
+ unsigned char *XGI_TVDelayList2;
+ unsigned char *CHTVVCLKUNTSC;
+ unsigned char *CHTVVCLKONTSC;
+ unsigned char *CHTVVCLKUPAL;
+ unsigned char *CHTVVCLKOPAL;
+ unsigned char *NTSCTiming;
+ unsigned char *PALTiming;
+ unsigned char *HiTVExtTiming;
+ unsigned char *HiTVSt1Timing;
+ unsigned char *HiTVSt2Timing;
+ unsigned char *HiTVTextTiming;
+ unsigned char *YPbPr750pTiming;
+ unsigned char *YPbPr525pTiming;
+ unsigned char *YPbPr525iTiming;
+ unsigned char *HiTVGroup3Data;
+ unsigned char *HiTVGroup3Simu;
+ unsigned char *HiTVGroup3Text;
+ unsigned char *Ren525pGroup3;
+ unsigned char *Ren750pGroup3;
+ unsigned char *ScreenOffset;
+ unsigned char *pXGINew_DRAMTypeDefinition;
+ unsigned char *pXGINew_I2CDefinition ;
+ unsigned char *pXGINew_CR97 ;
+
+ struct XGI330_LCDCapStruct *LCDCapList;
+ struct XGI21_LVDSCapStruct *XG21_LVDSCapList;
+
+ struct XGI_TimingHStruct *TimingH;
+ struct XGI_TimingVStruct *TimingV;
+
+ struct XGI_StStruct *SModeIDTable;
+ struct XGI_StandTableStruct *StandTable;
+ struct XGI_ExtStruct *EModeIDTable;
+ struct XGI_Ext2Struct *RefIndex;
/* XGINew_CRT1TableStruct *CRT1Table; */
- XGI_CRT1TableStruct *XGINEWUB_CRT1Table;
- XGI_VCLKDataStruct *VCLKData;
- XGI_VBVCLKDataStruct *VBVCLKData;
- XGI_StResInfoStruct *StResInfo;
- XGI_ModeResInfoStruct *ModeResInfo;
- XGI_XG21CRT1Struct *UpdateCRT1;
-}; /* _VB_DEVICE_INFO */
-
-
-typedef struct
-{
- USHORT Horizontal_ACTIVE;
- USHORT Horizontal_FP;
- USHORT Horizontal_SYNC;
- USHORT Horizontal_BP;
- USHORT Vertical_ACTIVE;
- USHORT Vertical_FP;
- USHORT Vertical_SYNC;
- USHORT Vertical_BP;
+ struct XGI_CRT1TableStruct *XGINEWUB_CRT1Table;
+ struct XGI_VCLKDataStruct *VCLKData;
+ struct XGI_VBVCLKDataStruct *VBVCLKData;
+ struct XGI_StResInfoStruct *StResInfo;
+ struct XGI_ModeResInfoStruct *ModeResInfo;
+ struct XGI_XG21CRT1Struct *UpdateCRT1;
+}; /* _struct vb_device_info */
+
+
+struct TimingInfo
+{
+ unsigned short Horizontal_ACTIVE;
+ unsigned short Horizontal_FP;
+ unsigned short Horizontal_SYNC;
+ unsigned short Horizontal_BP;
+ unsigned short Vertical_ACTIVE;
+ unsigned short Vertical_FP;
+ unsigned short Vertical_SYNC;
+ unsigned short Vertical_BP;
double DCLK;
- UCHAR FrameRate;
- UCHAR Interlace;
- USHORT Margin;
-} TimingInfo;
+ unsigned char FrameRate;
+ unsigned char Interlace;
+ unsigned short Margin;
+};
#define _VB_STRUCT_
#endif /* _VB_STRUCT_ */
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index 781caefc56b..510ef767868 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -1,7 +1,7 @@
#define Tap4
-XGI_MCLKDataStruct XGI330New_MCLKData[]=
+struct XGI_MCLKDataStruct XGI330New_MCLKData[] =
{
{ 0x5c,0x23,0x01,166},
{ 0x5c,0x23,0x01,166},
@@ -13,7 +13,7 @@ XGI_MCLKDataStruct XGI330New_MCLKData[]=
{ 0x29,0x01,0x81,300}
};
//yilin modify for xgi20
-XGI_MCLKDataStruct XGI340New_MCLKData[]=
+struct XGI_MCLKDataStruct XGI340New_MCLKData[] =
{
{ 0x16,0x01,0x01,166},
{ 0x19,0x02,0x01,124},
@@ -25,7 +25,7 @@ XGI_MCLKDataStruct XGI340New_MCLKData[]=
{ 0x5c,0x23,0x01,166}
};
-XGI_MCLKDataStruct XGI27New_MCLKData[]=
+struct XGI_MCLKDataStruct XGI27New_MCLKData[] =
{
{ 0x5c,0x23,0x01,166},
{ 0x19,0x02,0x01,124},
@@ -37,7 +37,7 @@ XGI_MCLKDataStruct XGI27New_MCLKData[]=
{ 0x5c,0x23,0x01,166}
};
-XGI_ECLKDataStruct XGI330_ECLKData[]=
+struct XGI_ECLKDataStruct XGI330_ECLKData[] =
{
{ 0x7c,0x08,0x01,200},
{ 0x7c,0x08,0x01,200},
@@ -49,7 +49,7 @@ XGI_ECLKDataStruct XGI330_ECLKData[]=
{ 0x29,0x01,0x81,300}
};
//yilin modify for xgi20
-XGI_ECLKDataStruct XGI340_ECLKData[]=
+struct XGI_ECLKDataStruct XGI340_ECLKData[] =
{
{ 0x5c,0x23,0x01,166},
{ 0x55,0x84,0x01,123},
@@ -63,14 +63,14 @@ XGI_ECLKDataStruct XGI340_ECLKData[]=
-UCHAR XGI340_SR13[4][8]={
+unsigned char XGI340_SR13[4][8] = {
{0x35,0x45,0xb1,0x00,0x00,0x00,0x00,0x00},/* SR13 */
{0x41,0x51,0x5c,0x00,0x00,0x00,0x00,0x00},/* SR14 */
{0x31,0x42,0x42,0x00,0x00,0x00,0x00,0x00},/* SR18 */
{0x03,0x03,0x03,0x00,0x00,0x00,0x00,0x00}/* SR1B */
};
-UCHAR XGI340_cr41[24][8]=
+unsigned char XGI340_cr41[24][8] =
{{0x20,0x50,0x60,0x00,0x00,0x00,0x00,0x00},/* 0 CR41 */
{0xc4,0x40,0x84,0x00,0x00,0x00,0x00,0x00},/* 1 CR8A */
{0xc4,0x40,0x84,0x00,0x00,0x00,0x00,0x00},/* 2 CR8B */
@@ -98,7 +98,7 @@ UCHAR XGI340_cr41[24][8]=
};
-UCHAR XGI27_cr41[24][8]=
+unsigned char XGI27_cr41[24][8] =
{
{0x20,0x40,0x60,0x00,0x00,0x00,0x00,0x00},/* 0 CR41 */
{0xC4,0x40,0x84,0x00,0x00,0x00,0x00,0x00},/* 1 CR8A */
@@ -126,37 +126,7 @@ UCHAR XGI27_cr41[24][8]=
{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}/* 23 CRC5 */
};
-
-#if 0
-UCHAR XGI27_cr41[24][8]=
-{
-{0x20,0x60,0x60,0x00,0x00,0x00,0x00,0x00},/* 0 CR41 */
-{0x04,0x44,0x84,0x00,0x00,0x00,0x00,0x00},/* 1 CR8A */
-{0x04,0x40,0x84,0x00,0x00,0x00,0x00,0x00},/* 2 CR8B */
-{0xb5,0x03,0xa4,0x00,0x00,0x00,0x00,0x00},/* 3 CR40[7],CR99[2:0],CR45[3:0]*/
-{0xf0,0xf5,0xf0,0x00,0x00,0x00,0x00,0x00},/* 4 CR59 */
-{0xa4,0x1C,0x24,0x00,0x00,0x00,0x00,0x00},/* 5 CR68 */
-{0x77,0x77,0x44,0x00,0x00,0x00,0x00,0x00},/* 6 CR69 */
-{0x77,0x77,0x44,0x00,0x00,0x00,0x00,0x00},/* 7 CR6A */
-{0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00},/* 8 CR6D */
-{0x55,0x55,0x55,0x00,0x00,0x00,0x00,0x00},/* 9 CR80 */
-{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},/* 10 CR81 */
-{0x48,0xa8,0x48,0x00,0x00,0x00,0x00,0x00},/* 11 CR82 */
-{0x77,0x88,0x77,0x00,0x00,0x00,0x00,0x00},/* 12 CR85 */
-{0x88,0x88,0x88,0x00,0x00,0x00,0x00,0x00},/* 13 CR86 */
-{0x44,0x32,0x44,0x00,0x00,0x00,0x00,0x00},/* 14 CR90 */
-{0x44,0x33,0x44,0x00,0x00,0x00,0x00,0x00},/* 15 CR91 */
-{0x07,0x07,0x07,0x00,0x00,0x00,0x00,0x00},/* 16 CR92 */
-{0x44,0x63,0x44,0x00,0x00,0x00,0x00,0x00},/* 17 CR93 */
-{0x0A,0x14,0x0A,0x00,0x00,0x00,0x00,0x00},/* 18 CR94 */
-{0x0C,0x0B,0x0C,0x00,0x00,0x00,0x00,0x00},/* 19 CR95 */
-{0x05,0x22,0x05,0x00,0x00,0x00,0x00,0x00},/* 20 CR96 */
-{0xf0,0xf0,0xf0,0x00,0x00,0x00,0x00,0x00},/* 21 CRC3 */
-{0x03,0x00,0x02,0x00,0x00,0x00,0x00,0x00},/* 22 CRC4 */
-{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}/* 23 CRC5 */
-};
-#endif
-UCHAR XGI340_CR6B[8][4]={
+unsigned char XGI340_CR6B[8][4] = {
{0xaa,0xaa,0xaa,0xaa},
{0xaa,0xaa,0xaa,0xaa},
{0xaa,0xaa,0xaa,0xaa},
@@ -167,7 +137,7 @@ UCHAR XGI340_CR6B[8][4]={
{0x00,0x00,0x00,0x00}
};
-UCHAR XGI340_CR6E[8][4]={
+unsigned char XGI340_CR6E[8][4] = {
{0x00,0x00,0x00,0x00},
{0x00,0x00,0x00,0x00},
{0x00,0x00,0x00,0x00},
@@ -178,7 +148,7 @@ UCHAR XGI340_CR6E[8][4]={
{0x00,0x00,0x00,0x00}
};
-UCHAR XGI340_CR6F[8][32]={
+unsigned char XGI340_CR6F[8][32] = {
{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
@@ -189,7 +159,7 @@ UCHAR XGI340_CR6F[8][32]={
{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
};
-UCHAR XGI340_CR89[8][2]={
+unsigned char XGI340_CR89[8][2] = {
{0x00,0x00},
{0x00,0x00},
{0x00,0x00},
@@ -200,11 +170,12 @@ UCHAR XGI340_CR89[8][2]={
{0x00,0x00}
};
/* CR47,CR48,CR49,CR4A,CR4B,CR4C,CR70,CR71,CR74,CR75,CR76,CR77 */
-UCHAR XGI340_AGPReg[12]={0x28,0x23,0x00,0x20,0x00,0x20,0x00,0x05,0xd0,0x10,0x10,0x00};
+unsigned char XGI340_AGPReg[12] = {0x28, 0x23, 0x00, 0x20, 0x00, 0x20, 0x00,
+ 0x05, 0xd0, 0x10, 0x10, 0x00};
-UCHAR XGI340_SR16[4]={0x03,0x83,0x03,0x83};
+unsigned char XGI340_SR16[4] = {0x03, 0x83, 0x03, 0x83};
-UCHAR XGI330_SR15_1[8][8]={
+unsigned char XGI330_SR15_1[8][8] = {
{0x0,0x0,0x00,0x00,0x20,0x20,0x00,0x00},
{0x5,0x15,0x15,0x15,0x15,0x15,0x00,0x00},
{0xba,0xba,0xba,0xba,0xBA,0xBA,0x00,0x00},
@@ -215,7 +186,7 @@ UCHAR XGI330_SR15_1[8][8]={
{0x0,0xa5,0xfb,0xf6,0xF6,0xF6,0x00,0x00}
};
-UCHAR XGI330_cr40_1[15][8]={
+unsigned char XGI330_cr40_1[15][8] = {
{0x66,0x40,0x40,0x28,0x24,0x24,0x00,0x00},
{0x66,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
@@ -233,14 +204,14 @@ UCHAR XGI330_cr40_1[15][8]={
{0x00,0xA2,0x00,0x00,0xA2,0xA2,0x00,0x00},
};
-UCHAR XGI330_sr25[]={0x00,0x0};
-UCHAR XGI330_sr31=0xc0;
-UCHAR XGI330_sr32=0x11;
-UCHAR XGI330_SR33=0x00;
-UCHAR XG40_CRCF=0x13;
-UCHAR XG40_DRAMTypeDefinition=0xFF ;
+unsigned char XGI330_sr25[] = {0x00, 0x0};
+unsigned char XGI330_sr31 = 0xc0;
+unsigned char XGI330_sr32 = 0x11;
+unsigned char XGI330_SR33 = 0x00;
+unsigned char XG40_CRCF = 0x13;
+unsigned char XG40_DRAMTypeDefinition = 0xFF ;
-XGI_StStruct XGI330_SModeIDTable[]=
+struct XGI_StStruct XGI330_SModeIDTable[] =
{
{0x01,0x9208,0x01,0x00,0x10,0x00,0x00,0x01,0x00},
{0x01,0x1210,0x14,0x01,0x00,0x01,0x00,0x01,0x00},
@@ -265,7 +236,7 @@ XGI_StStruct XGI330_SModeIDTable[]=
};
-XGI_ExtStruct XGI330_EModeIDTable[]=
+struct XGI_ExtStruct XGI330_EModeIDTable[] =
{
{0x6a,0x2212,0x0407,0x3a81,0x0102,0x08,0x07,0x00,0x00,0x07,0x0e},
{0x2e,0x0a1b,0x0306,0x3a57,0x0101,0x08,0x06,0x00,0x00,0x05,0x06},
@@ -337,7 +308,7 @@ XGI_ExtStruct XGI330_EModeIDTable[]=
{0xff,0x0000,0x0000,0x0000,0x0000,0x00,0x00,0x00,0x00,0x00,0x00}
};
-XGI_StandTableStruct XGI330_StandTable[]=
+struct XGI_StandTableStruct XGI330_StandTable[] =
{
/* MD_0_200 */
{
@@ -775,13 +746,13 @@ XGI_StandTableStruct XGI330_StandTable[]=
}
};
-XGI_TimingHStruct XGI_TimingH[]=
+struct XGI_TimingHStruct XGI_TimingH[] =
{{{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}}};
-XGI_TimingVStruct XGI_TimingV[]=
+struct XGI_TimingVStruct XGI_TimingV[] =
{{{0x00,0x00,0x00,0x00,0x00,0x00,0x00}}};
-XGI_XG21CRT1Struct XGI_UpdateCRT1Table[]=
+struct XGI_XG21CRT1Struct XGI_UpdateCRT1Table[] =
{
{0x01,0x27,0x91,0x8f,0xc0}, /* 00 */
{0x03,0x4f,0x83,0x8f,0xc0}, /* 01 */
@@ -802,7 +773,7 @@ XGI_XG21CRT1Struct XGI_UpdateCRT1Table[]=
{0x59,0x27,0x91,0x8f,0xc0} /* 16 */
};
-XGI_CRT1TableStruct XGI_CRT1Table[]=
+struct XGI_CRT1TableStruct XGI_CRT1Table[] =
{
{{0x2d,0x28,0x90,0x2c,0x90,0x00,0x04,0x00,
0xbf,0x1f,0x9c,0x8e,0x96,0xb9,0x30}}, /* 0x0 */
@@ -950,7 +921,7 @@ XGI_CRT1TableStruct XGI_CRT1Table[]=
0x03,0xDE,0xC0,0x84,0xBF,0x04,0x90}} /* 0x47 */
};
-XGI330_CHTVRegDataStruct XGI_CHTVRegUNTSC[] = {
+struct XGI330_CHTVRegDataStruct XGI_CHTVRegUNTSC[] = {
/* Index:000h,001h,002h,004h,003h,005h,006h,007h,008h,015h,01Fh,00Ch,00Dh,00Eh,00Fh,010h */
{{ 0x4A,0x77,0xBB,0x94,0x84,0x48,0xFE,0x50,0x04,0x00,0x80,0x00,0x00,0x00,0x00,0x01 }},/* 00 (640x200,640x400) */
{{ 0x4A,0x77,0xBB,0x94,0x84,0x48,0xFE,0x50,0x04,0x00,0x80,0x00,0x00,0x00,0x00,0x01 }},/* 01 (640x350) */
@@ -961,7 +932,7 @@ XGI330_CHTVRegDataStruct XGI_CHTVRegUNTSC[] = {
{{ 0xEE,0x77,0xBB,0x66,0x87,0x32,0x01,0x5A,0x04,0x00,0x80,0x1B,0xD4,0x2F,0x6F,0x00 }}/* 06 (1024x768) ;;5/6/02 */
};
-XGI330_CHTVRegDataStruct XGI_CHTVRegONTSC[]= {
+struct XGI330_CHTVRegDataStruct XGI_CHTVRegONTSC[] = {
/* Index:000h,001h,002h,004h,003h,005h,006h,007h,008h,015h,01Fh,00Ch,00Dh,00Eh,00Fh,010h */
{{ 0x49,0x77,0xBB,0x7B,0x84,0x34,0x00,0x50,0x04,0x00,0x80,0x00,0x00,0x00,0x00,0x01 }},/* 00 (640x200,640x400) */
{{ 0x49,0x77,0xBB,0x7B,0x84,0x34,0x00,0x50,0x04,0x00,0x80,0x00,0x00,0x00,0x00,0x01 }},/* 01 (640x350) */
@@ -972,7 +943,7 @@ XGI330_CHTVRegDataStruct XGI_CHTVRegONTSC[]= {
{{ 0xED,0x77,0xBB,0x66,0x8C,0x21,0x02,0x5A,0x04,0x00,0x80,0x1F,0xA0,0x7E,0x73,0x00 }}/* 06 (1024x768) ;;5/6/02 */
};
-XGI330_CHTVRegDataStruct XGI_CHTVRegUPAL[]= {
+struct XGI330_CHTVRegDataStruct XGI_CHTVRegUPAL[] = {
/* Index:000h,001h,002h,004h,003h,005h,006h,007h,008h,015h,01Fh,00Ch,00Dh,00Eh,00Fh,010h */
{{ 0x41,0x7F,0xB7,0x34,0xAD,0x50,0x34,0x83,0x05,0x00,0x80,0x00,0x00,0x00,0x00,0x01 }},/* ; 00 (640x200,640x400) */
{{ 0x41,0x7F,0xB7,0x80,0x85,0x50,0x00,0x83,0x05,0x00,0x80,0x00,0x00,0x00,0x00,0x01 }},/* ; 01 (640x350) */
@@ -983,7 +954,7 @@ XGI330_CHTVRegDataStruct XGI_CHTVRegUPAL[]= {
{{ 0xE5,0x7F,0xB7,0x1D,0xA7,0x3E,0x04,0x5A,0x05,0x00,0x80,0x20,0x3E,0xE4,0x22,0x00 }}/* ; 06 (1024x768) ;;1/12/02 */
};
-XGI330_CHTVRegDataStruct XGI_CHTVRegOPAL[]={
+struct XGI330_CHTVRegDataStruct XGI_CHTVRegOPAL[] = {
/* Index:000,0x01,0x02,0x04,0x03,0x05,0x06,0x07,0x08,0x15,0x1F,0x0C,0x0D,0x0E,0x0F,0x10h */
{{ 0x41,0x7F,0xB7,0x36,0xAD,0x50,0x34,0x83,0x05,0x00,0x80,0x00,0x00,0x00,0x00,0x01 }},/* 00 (640x200,640x400) */
{{ 0x41,0x7F,0xB7,0x86,0x85,0x50,0x00,0x83,0x05,0x00,0x80,0x00,0x00,0x00,0x00,0x01 }},/* 01 (640x350) */
@@ -994,14 +965,14 @@ XGI330_CHTVRegDataStruct XGI_CHTVRegOPAL[]={
{{ 0xE4,0x7F,0xB7,0x1E,0xAF,0x29,0x37,0x5A,0x05,0x00,0x80,0x25,0x8C,0xB2,0x2A,0x00 }}/* 06 (1024x768) ;;1/12/02 */
};
-UCHAR XGI_CH7017LV1024x768[]={0x60,0x02,0x00,0x07,0x40,0xED,0xA3,
- 0xC8,0xC7,0xAC,0xE0,0x02};
-UCHAR XGI_CH7017LV1400x1050[]={0x60,0x03,0x11,0x00,0x40,0xE3,0xAD,
- 0xDB,0xF6,0xAC,0xE0,0x02};
+unsigned char XGI_CH7017LV1024x768[] = {0x60, 0x02, 0x00, 0x07, 0x40, 0xED, 0xA3,
+ 0xC8, 0xC7, 0xAC, 0xE0, 0x02};
+unsigned char XGI_CH7017LV1400x1050[] = {0x60, 0x03, 0x11, 0x00, 0x40, 0xE3, 0xAD,
+ 0xDB, 0xF6, 0xAC, 0xE0, 0x02};
/*add for new UNIVGABIOS*/
-XGI330_LCDDataStruct XGI_StLCD1024x768Data[]=
+struct XGI330_LCDDataStruct XGI_StLCD1024x768Data[] =
{
{ 62, 25, 800, 546,1344, 806},
{ 32, 15, 930, 546,1344, 806},
@@ -1012,7 +983,7 @@ XGI330_LCDDataStruct XGI_StLCD1024x768Data[]=
{ 1, 1,1344, 806,1344, 806}
};
-XGI330_LCDDataStruct XGI_ExtLCD1024x768Data[]=
+struct XGI330_LCDDataStruct XGI_ExtLCD1024x768Data[] =
{
{ 42, 25,1536, 419,1344, 806}, /* { 12, 5, 896, 512,1344, 806}, // alan 09/12/2003 */
{ 48, 25,1536, 369,1344, 806}, /* { 12, 5, 896, 510,1344, 806}, // alan 09/12/2003 */
@@ -1029,7 +1000,7 @@ XGI330_LCDDataStruct XGI_ExtLCD1024x768Data[]=
{ 1, 1,1344, 806,1344, 806}
};
-/*XGI330_LCDDataStruct XGI_St2LCD1024x768Data[]=
+/*struct XGI330_LCDDataStruct XGI_St2LCD1024x768Data[] =
{
{ 62, 25, 800, 546,1344, 806},
{ 32, 15, 930, 546,1344, 806},
@@ -1040,7 +1011,7 @@ XGI330_LCDDataStruct XGI_ExtLCD1024x768Data[]=
{ 1, 1,1344, 806,1344, 806}
};*/
-XGI330_LCDDataStruct XGI_CetLCD1024x768Data[]=
+struct XGI330_LCDDataStruct XGI_CetLCD1024x768Data[] =
{
{ 1,1,1344,806,1344,806 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{ 1,1,1344,806,1344,806 }, /* 01 (320x350,640x350) */
@@ -1051,7 +1022,7 @@ XGI330_LCDDataStruct XGI_CetLCD1024x768Data[]=
{ 1,1,1344,806,1344,806 } /* 06 (1024x768x60Hz) */
};
-XGI330_LCDDataStruct XGI_StLCD1280x1024Data[]=
+struct XGI330_LCDDataStruct XGI_StLCD1280x1024Data[] =
{
{ 22, 5, 800, 510,1650,1088},
{ 22, 5, 800, 510,1650,1088},
@@ -1063,7 +1034,7 @@ XGI330_LCDDataStruct XGI_StLCD1280x1024Data[]=
{ 1, 1,1688,1066,1688,1066}
};
-XGI330_LCDDataStruct XGI_ExtLCD1280x1024Data[]=
+struct XGI330_LCDDataStruct XGI_ExtLCD1280x1024Data[] =
{
{ 211, 60,1024, 501,1688,1066},
{ 211, 60,1024, 508,1688,1066},
@@ -1075,7 +1046,7 @@ XGI330_LCDDataStruct XGI_ExtLCD1280x1024Data[]=
{ 1, 1,1688,1066,1688,1066}
};
-XGI330_LCDDataStruct XGI_St2LCD1280x1024Data[]=
+struct XGI330_LCDDataStruct XGI_St2LCD1280x1024Data[] =
{
{ 22, 5, 800, 510,1650,1088},
{ 22, 5, 800, 510,1650,1088},
@@ -1087,7 +1058,7 @@ XGI330_LCDDataStruct XGI_St2LCD1280x1024Data[]=
{ 1, 1,1688,1066,1688,1066}
};
-XGI330_LCDDataStruct XGI_CetLCD1280x1024Data[]=
+struct XGI330_LCDDataStruct XGI_CetLCD1280x1024Data[] =
{
{ 1,1,1688,1066,1688,1066 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 1,1,1688,1066,1688,1066 }, /* 01 (320x350,640x350) */
@@ -1100,7 +1071,7 @@ XGI330_LCDDataStruct XGI_CetLCD1280x1024Data[]=
{ 1,1,1688,1066,1688,1066 } /* 08 (1400x1050x60Hz) */
};
-XGI330_LCDDataStruct XGI_StLCD1400x1050Data[]=
+struct XGI330_LCDDataStruct XGI_StLCD1400x1050Data[] =
{
{ 211,100,2100,408,1688,1066 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 211,64,1536,358,1688,1066 }, /* 01 (320x350,640x350) */
@@ -1113,7 +1084,7 @@ XGI330_LCDDataStruct XGI_StLCD1400x1050Data[]=
{ 1,1,1688,1066,1688,1066 } /* 08 (1400x1050x60Hz) */
};
-XGI330_LCDDataStruct XGI_ExtLCD1400x1050Data[]=
+struct XGI330_LCDDataStruct XGI_ExtLCD1400x1050Data[] =
{
{ 211,100,2100,408,1688,1066 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 211,64,1536,358,1688,1066 }, /* 01 (320x350,640x350) */
@@ -1126,7 +1097,7 @@ XGI330_LCDDataStruct XGI_ExtLCD1400x1050Data[]=
{ 1,1,1688,1066,1688,1066 } /* 08 (1400x1050x60Hz) */
};
-XGI330_LCDDataStruct XGI_ExtLCD1600x1200Data[]=
+struct XGI330_LCDDataStruct XGI_ExtLCD1600x1200Data[] =
{
{ 4,1,1620,420,2160,1250 }, /* { 3,1,2160,425,2160,1250 }, // 00 (320x200,320x400,640x200,640x400) // alan 10/14/2003 */
{ 27,7,1920,375,2160,1250 }, /* 01 (320x350,640x350) */
@@ -1140,7 +1111,7 @@ XGI330_LCDDataStruct XGI_ExtLCD1600x1200Data[]=
{ 1,1,2160,1250,2160,1250 } /* 09 (1600x1200x60Hz) ;302lv */
};
-XGI330_LCDDataStruct XGI_StLCD1600x1200Data[]=
+struct XGI330_LCDDataStruct XGI_StLCD1600x1200Data[] =
{
{ 27,4,800,500,2160,1250 },/* 00 (320x200,320x400,640x200,640x400) */
{ 27,4,800,500,2160,1250 },/* 01 (320x350,640x350) */
@@ -1154,7 +1125,7 @@ XGI330_LCDDataStruct XGI_StLCD1600x1200Data[]=
{ 1,1,2160,1250,2160,1250 } /* 09 (1600x1200) */
};
-XGI330_LCDDataStruct XGI_CetLCD1400x1050Data[]=
+struct XGI330_LCDDataStruct XGI_CetLCD1400x1050Data[] =
{
{ 1,1,1688,1066,1688,1066 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 1,1,1688,1066,1688,1066 }, /* 01 (320x350,640x350) */
@@ -1167,7 +1138,7 @@ XGI330_LCDDataStruct XGI_CetLCD1400x1050Data[]=
{ 1,1,1688,1066,1688,1066 } /* 08 (1400x1050x60Hz) */
};
-XGI330_LCDDataStruct XGI_NoScalingData[]=
+struct XGI330_LCDDataStruct XGI_NoScalingData[] =
{
{ 1, 1, 800, 449, 800, 449},
{ 1, 1, 800, 449, 800, 449},
@@ -1179,7 +1150,7 @@ XGI330_LCDDataStruct XGI_NoScalingData[]=
{ 1, 1,1688,1066,1688,1066}
};
-XGI330_LCDDataStruct XGI_ExtLCD1024x768x75Data[]=
+struct XGI330_LCDDataStruct XGI_ExtLCD1024x768x75Data[] =
{
{42,25,1536,419,1344,806 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{48,25,1536,369,1344,806 }, /* ; 01 (320x350,640x350) */
@@ -1190,7 +1161,7 @@ XGI330_LCDDataStruct XGI_ExtLCD1024x768x75Data[]=
{1,1,1312,800,1312,800 } /* ; 06 (1024x768x75Hz) */
};
-XGI330_LCDDataStruct XGI_StLCD1024x768x75Data[]=
+struct XGI330_LCDDataStruct XGI_StLCD1024x768x75Data[] =
{
{42,25,1536,419,1344,806 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{48,25,1536,369,1344,806 }, /* ; 01 (320x350,640x350) */
@@ -1201,7 +1172,7 @@ XGI330_LCDDataStruct XGI_StLCD1024x768x75Data[]=
{1,1,1312,800,1312,800 } /* ; 06 (1024x768x75Hz) */
};
-XGI330_LCDDataStruct XGI_CetLCD1024x768x75Data[]=
+struct XGI330_LCDDataStruct XGI_CetLCD1024x768x75Data[] =
{
{1,1,1312,800,1312,800}, /* ; 00 (320x200,320x400,640x200,640x400) */
{1,1,1312,800,1312,800}, /* ; 01 (320x350,640x350) */
@@ -1212,7 +1183,7 @@ XGI330_LCDDataStruct XGI_CetLCD1024x768x75Data[]=
{1,1,1312,800,1312,800} /* ; 06 (1024x768x75Hz) */
};
-XGI330_LCDDataStruct XGI_ExtLCD1280x1024x75Data[]=
+struct XGI330_LCDDataStruct XGI_ExtLCD1280x1024x75Data[] =
{
{211,60,1024,501,1688,1066 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{211,60,1024,508,1688,1066 }, /* ; 01 (320x350,640x350) */
@@ -1224,7 +1195,7 @@ XGI330_LCDDataStruct XGI_ExtLCD1280x1024x75Data[]=
{1,1,1688,1066,1688,1066 } /* ; 07 (1280x1024x75Hz) */
};
-XGI330_LCDDataStruct XGI_StLCD1280x1024x75Data[]=
+struct XGI330_LCDDataStruct XGI_StLCD1280x1024x75Data[] =
{
{211,60,1024,501,1688,1066 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{211,60,1024,508,1688,1066 }, /* ; 01 (320x350,640x350) */
@@ -1236,7 +1207,7 @@ XGI330_LCDDataStruct XGI_StLCD1280x1024x75Data[]=
{1,1,1688,1066,1688,1066 } /* ; 07 (1280x1024x75Hz) */
};
-XGI330_LCDDataStruct XGI_CetLCD1280x1024x75Data[]=
+struct XGI330_LCDDataStruct XGI_CetLCD1280x1024x75Data[] =
{
{1,1,1688,1066,1688,1066}, /* ; 00 (320x200,320x400,640x200,640x400) */
{1,1,1688,1066,1688,1066}, /* ; 01 (320x350,640x350) */
@@ -1248,7 +1219,7 @@ XGI330_LCDDataStruct XGI_CetLCD1280x1024x75Data[]=
{1,1,1688,1066,1688,1066} /* ; 07 (1280x1024x75Hz) */
};
-XGI330_LCDDataStruct XGI_NoScalingDatax75[]=
+struct XGI330_LCDDataStruct XGI_NoScalingDatax75[] =
{
{1,1,800,449,800,449 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{1,1,800,449,800,449 }, /* ; 01 (320x350,640x350) */
@@ -1263,7 +1234,7 @@ XGI330_LCDDataStruct XGI_NoScalingDatax75[]=
{1,1,1688,806,1688,806 } /* ; 0A (1280x768x75Hz) */
};
-XGI330_LCDDataDesStruct XGI_ExtLCDDes1024x768Data[]=
+struct XGI330_LCDDataDesStruct XGI_ExtLCDDes1024x768Data[] =
{
{ 9,1057,0, 771 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{ 9,1057,0, 771 }, /* ; 01 (320x350,640x350) */
@@ -1274,7 +1245,7 @@ XGI330_LCDDataDesStruct XGI_ExtLCDDes1024x768Data[]=
{ 9,1057,805, 770 } /* ; 06 (1024x768x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_StLCDDes1024x768Data[]=
+struct XGI330_LCDDataDesStruct XGI_StLCDDes1024x768Data[] =
{
{ 9,1057,737,703 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{ 9,1057,686,651 }, /* ; 01 (320x350,640x350) */
@@ -1285,7 +1256,7 @@ XGI330_LCDDataDesStruct XGI_StLCDDes1024x768Data[]=
{ 9,1057,805,770 } /* ; 06 (1024x768x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_CetLCDDes1024x768Data[]=
+struct XGI330_LCDDataDesStruct XGI_CetLCDDes1024x768Data[] =
{
{ 1152,856,622,587 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{ 1152,856,597,562 }, /* ; 01 (320x350,640x350) */
@@ -1296,7 +1267,7 @@ XGI330_LCDDataDesStruct XGI_CetLCDDes1024x768Data[]=
{ 0,1048,805,770 } /* ; 06 (1024x768x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1280x1024Data[]=
+struct XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1280x1024Data[] =
{
{ 18,1346,981,940 },/* 00 (320x200,320x400,640x200,640x400) */
{ 18,1346,926,865 },/* 01 (320x350,640x350) */
@@ -1308,7 +1279,7 @@ XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1280x1024Data[]=
{ 18,1346,1065,1024 }/* 07 (1280x1024x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_StLCDDLDes1280x1024Data[]=
+struct XGI330_LCDDataDesStruct XGI_StLCDDLDes1280x1024Data[] =
{
{ 18,1346,970,907 },/* 00 (320x200,320x400,640x200,640x400) */
{ 18,1346,917,854 },/* 01 (320x350,640x350) */
@@ -1320,7 +1291,7 @@ XGI330_LCDDataDesStruct XGI_StLCDDLDes1280x1024Data[]=
{ 18,1346,1065,1024 }/* 07 (1280x1024x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_CetLCDDLDes1280x1024Data[]=
+struct XGI330_LCDDataDesStruct XGI_CetLCDDLDes1280x1024Data[] =
{
{ 1368,1008,752,711 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 1368,1008,729,688 }, /* 01 (320x350,640x350) */
@@ -1332,7 +1303,7 @@ XGI330_LCDDataDesStruct XGI_CetLCDDLDes1280x1024Data[]=
{ 18,1346,1065,1024 } /* 07 (1280x1024x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_ExtLCDDes1280x1024Data[]=
+struct XGI330_LCDDataDesStruct XGI_ExtLCDDes1280x1024Data[] =
{
{ 9,1337,981,940 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{ 9,1337,926,884 }, /* ; 01 (320x350,640x350) alan, 2003/09/30 */
@@ -1344,7 +1315,7 @@ XGI330_LCDDataDesStruct XGI_ExtLCDDes1280x1024Data[]=
{ 9,1337,1065,1024 } /* ; 07 (1280x1024x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_StLCDDes1280x1024Data[]=
+struct XGI330_LCDDataDesStruct XGI_StLCDDes1280x1024Data[] =
{
{ 9,1337,970,907 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{ 9,1337,917,854 }, /* ; 01 (320x350,640x350) */
@@ -1356,7 +1327,7 @@ XGI330_LCDDataDesStruct XGI_StLCDDes1280x1024Data[]=
{ 9,1337,1065,1024 } /* ; 07 (1280x1024x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_CetLCDDes1280x1024Data[]=
+struct XGI330_LCDDataDesStruct XGI_CetLCDDes1280x1024Data[] =
{
{ 1368,1008,752,711 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 1368,1008,729,688 }, /* 01 (320x350,640x350) */
@@ -1368,7 +1339,7 @@ XGI330_LCDDataDesStruct XGI_CetLCDDes1280x1024Data[]=
{ 9,1337,1065,1024 } /* 07 (1280x1024x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_StLCDDLDes1400x1050Data[]=
+struct XGI330_LCDDataDesStruct XGI_StLCDDLDes1400x1050Data[] =
{
{ 18,1464,0,1051 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 18,1464,0,1051 }, /* 01 (320x350,640x350) */
@@ -1381,7 +1352,7 @@ XGI330_LCDDataDesStruct XGI_StLCDDLDes1400x1050Data[]=
{ 18,1464,0,1051 } /* 08 (1400x1050x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1400x1050Data[]=
+struct XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1400x1050Data[] =
{
{ 18,1464,0,1051 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 18,1464,0,1051 }, /* 01 (320x350,640x350) */
@@ -1394,7 +1365,7 @@ XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1400x1050Data[]=
{ 18,1464,0,1051 } /* 08 (1400x1050x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_StLCDDes1400x1050Data[]=
+struct XGI330_LCDDataDesStruct XGI_StLCDDes1400x1050Data[] =
{
{ 9,1455,0,1051 },/* 00 (320x200,320x400,640x200,640x400) */
{ 9,1455,0,1051 },/* 01 (320x350,640x350) */
@@ -1407,7 +1378,7 @@ XGI330_LCDDataDesStruct XGI_StLCDDes1400x1050Data[]=
{ 9,1455,0,1051 } /* 08 (1400x1050x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_ExtLCDDes1400x1050Data[]=
+struct XGI330_LCDDataDesStruct XGI_ExtLCDDes1400x1050Data[] =
{
{ 9,1455,0,1051 },/* 00 (320x200,320x400,640x200,640x400) */
{ 9,1455,0,1051 },/* 01 (320x350,640x350) */
@@ -1420,7 +1391,7 @@ XGI330_LCDDataDesStruct XGI_ExtLCDDes1400x1050Data[]=
{ 9,1455,0,1051 } /* 08 (1400x1050x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_CetLCDDes1400x1050Data[]=
+struct XGI330_LCDDataDesStruct XGI_CetLCDDes1400x1050Data[] =
{
{ 1308,1068,781,766 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 1308,1068,781,766 }, /* 01 (320x350,640x350) */
@@ -1433,7 +1404,7 @@ XGI330_LCDDataDesStruct XGI_CetLCDDes1400x1050Data[]=
{ 18,1464,0,1051 } /* 08 (1400x1050x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_CetLCDDes1400x1050Data2[]=
+struct XGI330_LCDDataDesStruct XGI_CetLCDDes1400x1050Data2[] =
{
{ 0,1448,0,1051 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 0,1448,0,1051 }, /* 01 (320x350,640x350) */
@@ -1444,7 +1415,7 @@ XGI330_LCDDataDesStruct XGI_CetLCDDes1400x1050Data2[]=
-XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1600x1200Data[]=
+struct XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1600x1200Data[] =
{
{ 18,1682,0,1201 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 18,1682,0,1201 }, /* 01 (320x350,640x350) */
@@ -1458,7 +1429,7 @@ XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1600x1200Data[]=
{ 18,1682,0,1201 } /* 09 (1600x1200x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_StLCDDLDes1600x1200Data[]=
+struct XGI330_LCDDataDesStruct XGI_StLCDDLDes1600x1200Data[] =
{
{ 18,1682,1150,1101 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 18,1682,1083,1034 }, /* 01 (320x350,640x350) */
@@ -1472,7 +1443,7 @@ XGI330_LCDDataDesStruct XGI_StLCDDLDes1600x1200Data[]=
{ 18,1682,0,1201 } /* 09 (1600x1200x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_ExtLCDDes1600x1200Data[]=
+struct XGI330_LCDDataDesStruct XGI_ExtLCDDes1600x1200Data[] =
{
{ 9,1673,0,1201 },/* 00 (320x200,320x400,640x200,640x400) */
{ 9,1673,0,1201 },/* 01 (320x350,640x350) */
@@ -1486,7 +1457,7 @@ XGI330_LCDDataDesStruct XGI_ExtLCDDes1600x1200Data[]=
{ 9,1673,0,1201 } /* 09 (1600x1200x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_StLCDDes1600x1200Data[]=
+struct XGI330_LCDDataDesStruct XGI_StLCDDes1600x1200Data[] =
{
{ 9,1673,1150,1101 },/* 00 (320x200,320x400,640x200,640x400) */
{ 9,1673,1083,1034 },/* 01 (320x350,640x350) */
@@ -1500,7 +1471,7 @@ XGI330_LCDDataDesStruct XGI_StLCDDes1600x1200Data[]=
{ 9,1673,0,1201 } /* 09 (1600x1200x60Hz) */
};
-XGI330_LCDDataDesStruct2 XGI_NoScalingDesData[]=
+struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesData[] =
{
{ 9,657,448,405,96,2 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 9,657,448,355,96,2 }, /* 01 (320x350,640x350) */
@@ -1515,7 +1486,7 @@ XGI330_LCDDataDesStruct2 XGI_NoScalingDesData[]=
{ 9,1337,0,771,112,6 } /* 0A (1280x768x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_ExtLCDDes1024x768x75Data[]= /* ;;1024x768x75Hz */
+struct XGI330_LCDDataDesStruct XGI_ExtLCDDes1024x768x75Data[] = /* ;;1024x768x75Hz */
{
{9,1049,0,769}, /* ; 00 (320x200,320x400,640x200,640x400) */
{9,1049,0,769}, /* ; 01 (320x350,640x350) */
@@ -1526,7 +1497,7 @@ XGI330_LCDDataDesStruct XGI_ExtLCDDes1024x768x75Data[]= /* ;;1024x768x75Hz */
{9,1049,0,769} /* ; 06 (1024x768x75Hz) */
};
-XGI330_LCDDataDesStruct XGI_StLCDDes1024x768x75Data[]=
+struct XGI330_LCDDataDesStruct XGI_StLCDDes1024x768x75Data[] =
{
{9,1049,0,769}, /* ; 00 (320x200,320x400,640x200,640x400) */
{9,1049,0,769}, /* ; 01 (320x350,640x350) */
@@ -1537,7 +1508,7 @@ XGI330_LCDDataDesStruct XGI_StLCDDes1024x768x75Data[]=
{9,1049,0,769} /* ; 06 (1024x768x75Hz) */
};
-XGI330_LCDDataDesStruct XGI_CetLCDDes1024x768x75Data[]= /* ;;1024x768x75Hz */
+struct XGI330_LCDDataDesStruct XGI_CetLCDDes1024x768x75Data[] = /* ;;1024x768x75Hz */
{
{1152,856,622,587}, /* ; 00 (320x200,320x400,640x200,640x400) */
{1152,856,597,562}, /* ; 01 (320x350,640x350) */
@@ -1548,7 +1519,7 @@ XGI330_LCDDataDesStruct XGI_CetLCDDes1024x768x75Data[]= /* ;;1024x768x75Hz */
{9,1049,0,769} /* ; 06 (1024x768x75Hz) */
};
-XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1280x1024x75Data[]= /* ;;1280x1024x75Hz */
+struct XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1280x1024x75Data[] = /* ;;1280x1024x75Hz */
{
{18,1314,0,1025 },/* ; 00 (320x200,320x400,640x200,640x400) */
{18,1314,0,1025 },/* ; 01 (320x350,640x350) */
@@ -1560,7 +1531,7 @@ XGI330_LCDDataDesStruct XGI_ExtLCDDLDes1280x1024x75Data[]= /* ;;1280x10
{18,1314,0,1025 }/* ; 07 (1280x1024x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_StLCDDLDes1280x1024x75Data[]=
+struct XGI330_LCDDataDesStruct XGI_StLCDDLDes1280x1024x75Data[] =
{
{18,1314,0,1025 },/* ; 00 (320x200,320x400,640x200,640x400) */
{18,1314,0,1025 },/* ; 01 (320x350,640x350) */
@@ -1572,7 +1543,7 @@ XGI330_LCDDataDesStruct XGI_StLCDDLDes1280x1024x75Data[]=
{18,1314,0,1025 }/* ; 07 (1280x1024x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_CetLCDDLDes1280x1024x75Data[]= /* 1280x1024x75Hz */
+struct XGI330_LCDDataDesStruct XGI_CetLCDDLDes1280x1024x75Data[] = /* 1280x1024x75Hz */
{
{1368,1008,752,711}, /* ; 00 (320x200,320x400,640x200,640x400) */
{1368,1008,729,688}, /* ; 01 (320x350,640x350) */
@@ -1584,7 +1555,7 @@ XGI330_LCDDataDesStruct XGI_CetLCDDLDes1280x1024x75Data[]= /* 1280x1024x75Hz */
{18,1314,0,1025} /* ; 07 (1280x1024x75Hz) */
};
-XGI330_LCDDataDesStruct XGI_ExtLCDDes1280x1024x75Data[]= /* ;;1280x1024x75Hz */
+struct XGI330_LCDDataDesStruct XGI_ExtLCDDes1280x1024x75Data[] = /* ;;1280x1024x75Hz */
{
{9,1305,0,1025},/* ; 00 (320x200,320x400,640x200,640x400) */
{9,1305,0,1025},/* ; 01 (320x350,640x350) */
@@ -1596,7 +1567,7 @@ XGI330_LCDDataDesStruct XGI_ExtLCDDes1280x1024x75Data[]= /* ;;1280x1024
{9,1305,0,1025} /* ; 07 (1280x1024x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_StLCDDes1280x1024x75Data[]=
+struct XGI330_LCDDataDesStruct XGI_StLCDDes1280x1024x75Data[] =
{
{9,1305,0,1025},/* ; 00 (320x200,320x400,640x200,640x400) */
{9,1305,0,1025},/* ; 01 (320x350,640x350) */
@@ -1608,7 +1579,7 @@ XGI330_LCDDataDesStruct XGI_StLCDDes1280x1024x75Data[]=
{9,1305,0,1025} /* ; 07 (1280x1024x60Hz) */
};
-XGI330_LCDDataDesStruct XGI_CetLCDDes1280x1024x75Data[]= /* 1280x1024x75Hz */
+struct XGI330_LCDDataDesStruct XGI_CetLCDDes1280x1024x75Data[] = /* 1280x1024x75Hz */
{
{1368,1008,752,711}, /* ; 00 (320x200,320x400,640x200,640x400) */
{1368,1008,729,688}, /* ; 01 (320x350,640x350) */
@@ -1620,7 +1591,7 @@ XGI330_LCDDataDesStruct XGI_CetLCDDes1280x1024x75Data[]= /* 1280x1024x75Hz */
{9,1305,0,1025} /* ; 07 (1280x1024x75Hz) */
};
-XGI330_LCDDataDesStruct2 XGI_NoScalingDesDatax75[]= /* Scaling LCD 75Hz */
+struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesDatax75[] = /* Scaling LCD 75Hz */
{
{9,657,448,405,96,2}, /* ; 00 (320x200,320x400,640x200,640x400) */
{9,657,448,355,96,2}, /* ; 01 (320x350,640x350) */
@@ -1635,7 +1606,7 @@ XGI330_LCDDataDesStruct2 XGI_NoScalingDesDatax75[]= /* Scaling LCD 75Hz */
{9,1337,0,771,112,6} /* ; 0A (1280x768x60Hz) */
};
-XGI330_TVDataStruct XGI_StPALData[]=
+struct XGI330_TVDataStruct XGI_StPALData[] =
{
{ 1, 1, 864, 525,1270, 400, 100, 0, 760},
{ 1, 1, 864, 525,1270, 350, 100, 0, 760},
@@ -1645,7 +1616,7 @@ XGI330_TVDataStruct XGI_StPALData[]=
{ 1, 1, 864, 525,1270, 600, 50, 0, 0}
};
-XGI330_TVDataStruct XGI_ExtPALData[]=
+struct XGI330_TVDataStruct XGI_ExtPALData[] =
{
{ 2, 1,1080, 463,1270, 500, 50, 0, 50},
{ 15, 7,1152, 413,1270, 500, 50, 0, 50},
@@ -1657,7 +1628,7 @@ XGI330_TVDataStruct XGI_ExtPALData[]=
{ 3, 2,1080, 619,1270, 540, 438, 0, 438}
};
-XGI330_TVDataStruct XGI_StNTSCData[]=
+struct XGI330_TVDataStruct XGI_StNTSCData[] =
{
{ 1, 1, 858, 525,1270, 400, 50, 0, 760},
{ 1, 1, 858, 525,1270, 350, 50, 0, 640},
@@ -1666,7 +1637,7 @@ XGI330_TVDataStruct XGI_StNTSCData[]=
{ 1, 1, 858, 525,1270, 480, 0, 0, 760}
};
-XGI330_TVDataStruct XGI_ExtNTSCData[]=
+struct XGI330_TVDataStruct XGI_ExtNTSCData[] =
{
{ 9, 5, 1001, 453,1270, 420, 171, 0, 171},
{ 12, 5, 858, 403,1270, 420, 171, 0, 171},
@@ -1679,7 +1650,7 @@ XGI330_TVDataStruct XGI_ExtNTSCData[]=
{ 3, 2,1001, 533,1270, 420, 0, 0, 0}
};
-XGI330_TVDataStruct XGI_St1HiTVData[]=
+struct XGI330_TVDataStruct XGI_St1HiTVData[] =
{
{ 1,1,892,563,690,800,0,0,0 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 1,1,892,563,690,700,0,0,0 }, /* 01 (320x350,640x350) */
@@ -1689,7 +1660,7 @@ XGI330_TVDataStruct XGI_St1HiTVData[]=
{ 8,5,1050,683,1648,960,0x150,1,0 } /* 05 (400x300,800x600) */
};
-XGI330_TVDataStruct XGI_St2HiTVData[]=
+struct XGI330_TVDataStruct XGI_St2HiTVData[] =
{
{ 3,1,840,483,1648,960,0x032,0,0 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 1,1,892,563,690,700,0,0,0 }, /* 01 (320x350,640x350) */
@@ -1700,7 +1671,7 @@ XGI330_TVDataStruct XGI_St2HiTVData[]=
};
-XGI330_TVDataStruct XGI_ExtHiTVData[]=
+struct XGI330_TVDataStruct XGI_ExtHiTVData[] =
{
{ 6,1,840,563,1632,960,0,0,0 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 3,1,960,563,1632,960,0,0,0 }, /* 01 (320x350,640x350) */
@@ -1716,7 +1687,7 @@ XGI330_TVDataStruct XGI_ExtHiTVData[]=
};
-XGI330_TVDataStruct XGI_ExtYPbPr525iData[]=
+struct XGI330_TVDataStruct XGI_ExtYPbPr525iData[] =
{
{ 9, 5, 1001, 453,1270, 420, 171, 0, 171},
{ 12, 5, 858, 403,1270, 420, 171, 0, 171},
@@ -1729,7 +1700,7 @@ XGI330_TVDataStruct XGI_ExtYPbPr525iData[]=
{ 3, 2,1001, 533,1250, 420, 0, 0, 0}
};
-XGI330_TVDataStruct XGI_StYPbPr525iData[]=
+struct XGI330_TVDataStruct XGI_StYPbPr525iData[] =
{
{ 1, 1, 858, 525,1270, 400, 50, 0, 760},
{ 1, 1, 858, 525,1270, 350, 50, 0, 640},
@@ -1738,7 +1709,7 @@ XGI330_TVDataStruct XGI_StYPbPr525iData[]=
{ 1, 1, 858, 525,1270, 480, 0, 0, 760},
};
-XGI330_TVDataStruct XGI_ExtYPbPr525pData[]=
+struct XGI330_TVDataStruct XGI_ExtYPbPr525pData[] =
{
{ 9, 5, 1001, 453,1270, 420, 171, 0, 171},
{ 12, 5, 858, 403,1270, 420, 171, 0, 171},
@@ -1751,7 +1722,7 @@ XGI330_TVDataStruct XGI_ExtYPbPr525pData[]=
{ 3, 2,1001, 533,1270, 420, 0, 0, 0}
};
-XGI330_TVDataStruct XGI_StYPbPr525pData[]=
+struct XGI330_TVDataStruct XGI_StYPbPr525pData[] =
{
{ 1, 1,1716, 525,1270, 400, 50, 0, 760},
{ 1, 1,1716, 525,1270, 350, 50, 0, 640},
@@ -1760,7 +1731,7 @@ XGI330_TVDataStruct XGI_StYPbPr525pData[]=
{ 1, 1,1716, 525,1270, 480, 0, 0, 760},
};
-XGI330_TVDataStruct XGI_ExtYPbPr750pData[]=
+struct XGI330_TVDataStruct XGI_ExtYPbPr750pData[] =
{
{ 3, 1, 935, 470,1130, 680, 50, 0, 0}, /* 00 (320x200,320x400,640x200,640x400) */
{ 24, 7, 935, 420,1130, 680, 50, 0, 0}, /* 01 (320x350,640x350) */
@@ -1775,7 +1746,7 @@ XGI330_TVDataStruct XGI_ExtYPbPr750pData[]=
{ 10, 9,1320, 830,1130, 640, 50, 0, 0}
};
-XGI330_TVDataStruct XGI_StYPbPr750pData[]=
+struct XGI330_TVDataStruct XGI_StYPbPr750pData[] =
{
{ 1, 1,1650, 750,1280, 400, 50, 0, 760},
{ 1, 1,1650, 750,1280, 350, 50, 0, 640},
@@ -1784,7 +1755,7 @@ XGI330_TVDataStruct XGI_StYPbPr750pData[]=
{ 1, 1,1650, 750,1280, 480, 0, 0, 760},
};
-UCHAR XGI330_NTSCTiming[] = {
+unsigned char XGI330_NTSCTiming[] = {
0x17,0x1d,0x03,0x09,0x05,0x06,0x0c,0x0c,
0x94,0x49,0x01,0x0a,0x06,0x0d,0x04,0x0a,
0x06,0x14,0x0d,0x04,0x0a,0x00,0x85,0x1b,
@@ -1794,7 +1765,7 @@ UCHAR XGI330_NTSCTiming[] = {
0x60,0x80,0x14,0x90,0x8c,0x60,0x14,0x50,
0x00,0x40,0x44,0x00,0xdb,0x02,0x3b,0x00};
-UCHAR XGI330_PALTiming[] = {
+unsigned char XGI330_PALTiming[] = {
0x21,0x5A,0x35,0x6e,0x04,0x38,0x3d,0x70,
0x94,0x49,0x01,0x12,0x06,0x3e,0x35,0x6d,
0x06,0x14,0x3e,0x35,0x6d,0x00,0x45,0x2b,
@@ -1804,7 +1775,7 @@ UCHAR XGI330_PALTiming[] = {
0x60,0x80,0x14,0x90,0x8c,0x60,0x14,0x63,
0x00,0x40,0x3e,0x00,0xe1,0x02,0x28,0x00};
-UCHAR XGI330_HiTVExtTiming[] =
+unsigned char XGI330_HiTVExtTiming[] =
{
0x2D,0x60,0x2C,0x5F,0x08,0x31,0x3A,0x64,
0x28,0x02,0x01,0x3D,0x06,0x3E,0x35,0x6D,
@@ -1818,7 +1789,7 @@ UCHAR XGI330_HiTVExtTiming[] =
};
-UCHAR XGI330_HiTVSt1Timing[] =
+unsigned char XGI330_HiTVSt1Timing[] =
{
0x32,0x65,0x2C,0x5F,0x08,0x31,0x3A,0x65,
0x28,0x02,0x01,0x3D,0x06,0x3E,0x35,0x6D,
@@ -1831,7 +1802,7 @@ UCHAR XGI330_HiTVSt1Timing[] =
0x0E,0x00,0xfc,0xff,0x2d,0x00
};
-UCHAR XGI330_HiTVSt2Timing[] =
+unsigned char XGI330_HiTVSt2Timing[] =
{
0x32,0x65,0x2C,0x5F,0x08,0x31,0x3A,0x64,
0x28,0x02,0x01,0x3D,0x06,0x3E,0x35,0x6D,
@@ -1844,7 +1815,7 @@ UCHAR XGI330_HiTVSt2Timing[] =
0x27,0x00,0xFC,0xff,0x6a,0x00
};
-UCHAR XGI330_HiTVTextTiming[] =
+unsigned char XGI330_HiTVTextTiming[] =
{
0x32,0x65,0x2C,0x5F,0x08,0x31,0x3A,0x65,
0x28,0x02,0x01,0x3D,0x06,0x3E,0x35,0x6D,
@@ -1857,7 +1828,7 @@ UCHAR XGI330_HiTVTextTiming[] =
0x11,0x00,0xFC,0xFF,0x32,0x00
};
-UCHAR XGI330_YPbPr750pTiming[] =
+unsigned char XGI330_YPbPr750pTiming[] =
{
0x30,0x1d,0xe8,0x09,0x09,0xed,0x0c,0x0c,
0x98,0x0a,0x01,0x0c,0x06,0x0d,0x04,0x0a,
@@ -1870,7 +1841,7 @@ UCHAR XGI330_YPbPr750pTiming[] =
0x11,0x00,0xfc,0xff,0x32,0x00
};
-UCHAR XGI330_YPbPr525pTiming[] =
+unsigned char XGI330_YPbPr525pTiming[] =
{
0x3E,0x11,0x06,0x09,0x0b,0x0c,0x0c,0x0c,
0x98,0x0a,0x01,0x0d,0x06,0x0d,0x04,0x0a,
@@ -1883,7 +1854,7 @@ UCHAR XGI330_YPbPr525pTiming[] =
0x11,0x00,0xFC,0xFF,0x32,0x00
};
-UCHAR XGI330_YPbPr525iTiming[] =
+unsigned char XGI330_YPbPr525iTiming[] =
{
0x1B,0x21,0x03,0x09,0x05,0x06,0x0C,0x0C,
0x94,0x49,0x01,0x0A,0x06,0x0D,0x04,0x0A,
@@ -1897,7 +1868,7 @@ UCHAR XGI330_YPbPr525iTiming[] =
};
-UCHAR XGI330_HiTVGroup3Data[] =
+unsigned char XGI330_HiTVGroup3Data[] =
{
0x00,0x1A,0x22,0x63,0x62,0x22,0x08,0x5F,
0x05,0x21,0xB2,0xB2,0x55,0x77,0x2A,0xA6,
@@ -1909,7 +1880,7 @@ UCHAR XGI330_HiTVGroup3Data[] =
0x18,0x05,0x18,0x05,0x4C,0xA8,0x01
};
-UCHAR XGI330_HiTVGroup3Simu[] =
+unsigned char XGI330_HiTVGroup3Simu[] =
{
0x00,0x1A,0x22,0x63,0x62,0x22,0x08,0x95,
0xDB,0x20,0xB8,0xB8,0x55,0x47,0x2A,0xA6,
@@ -1921,7 +1892,7 @@ UCHAR XGI330_HiTVGroup3Simu[] =
0x18,0x05,0x18,0x05,0x4C,0xA8,0x01
};
-UCHAR XGI330_HiTVGroup3Text[] =
+unsigned char XGI330_HiTVGroup3Text[] =
{
0x00,0x1A,0x22,0x63,0x62,0x22,0x08,0xA7,
0xF5,0x20,0xCE,0xCE,0x55,0x47,0x2A,0xA6,
@@ -1933,7 +1904,7 @@ UCHAR XGI330_HiTVGroup3Text[] =
0x18,0x05,0x18,0x05,0x4C,0xA8,0x01
};
-UCHAR XGI330_Ren525pGroup3[] =
+unsigned char XGI330_Ren525pGroup3[] =
{
0x00,0x14,0x15,0x25,0x55,0x15,0x0b,0x13,
0xB1,0x41,0x62,0x62,0xFF,0xF4,0x45,0xa6,
@@ -1945,7 +1916,7 @@ UCHAR XGI330_Ren525pGroup3[] =
0x1a,0x1F,0x25,0x2a,0x4C,0xAA,0x01
};
-UCHAR XGI330_Ren750pGroup3[] =
+unsigned char XGI330_Ren750pGroup3[] =
{
0x00,0x14,0x15,0x25,0x55,0x15,0x0b,0x7a,
0x54,0x41,0xE7,0xE7,0xFF,0xF4,0x45,0xa6,
@@ -1957,7 +1928,7 @@ UCHAR XGI330_Ren750pGroup3[] =
0x18,0x1D,0x23,0x28,0x4C,0xAA,0x01
};
-XGI_PanelDelayTblStruct XGI330_PanelDelayTbl[]=
+struct XGI_PanelDelayTblStruct XGI330_PanelDelayTbl[] =
{
{{0x00,0x00}},
{{0x00,0x00}},
@@ -1977,7 +1948,7 @@ XGI_PanelDelayTblStruct XGI330_PanelDelayTbl[]=
{{0x00,0x00}}
};
-XGI330_LVDSDataStruct XGI330_LVDS320x480Data_1[]=
+struct XGI330_LVDSDataStruct XGI330_LVDS320x480Data_1[] =
{
{848, 433,400,525},
{848, 389,400,525},
@@ -1990,7 +1961,7 @@ XGI330_LVDSDataStruct XGI330_LVDS320x480Data_1[]=
{800, 525,1000, 635}
};
-XGI330_LVDSDataStruct XGI330_LVDS800x600Data_1[]=
+struct XGI330_LVDSDataStruct XGI330_LVDS800x600Data_1[] =
{
{848, 433,1060, 629},
{848, 389,1060, 629},
@@ -2003,7 +1974,7 @@ XGI330_LVDSDataStruct XGI330_LVDS800x600Data_1[]=
{800, 525,1000, 635}
};
-XGI330_LVDSDataStruct XGI330_LVDS800x600Data_2[]=
+struct XGI330_LVDSDataStruct XGI330_LVDS800x600Data_2[] =
{
{1056, 628,1056, 628},
{1056, 628,1056, 628},
@@ -2016,7 +1987,7 @@ XGI330_LVDSDataStruct XGI330_LVDS800x600Data_2[]=
{800, 525,1000, 635}
};
-XGI330_LVDSDataStruct XGI_LVDS1024x768Data_1[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1024x768Data_1[] =
{
{ 960 , 438 , 1344 , 806 } , /* 00 (320x200,320x400,640x200,640x400) */
{ 960 , 388 , 1344 , 806 } , /* 01 (320x350,640x350) */
@@ -2028,7 +1999,7 @@ XGI330_LVDSDataStruct XGI_LVDS1024x768Data_1[]=
};
-XGI330_LVDSDataStruct XGI_LVDS1024x768Data_2[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1024x768Data_2[] =
{
{1344, 806,1344, 806},
{1344, 806,1344, 806},
@@ -2041,7 +2012,7 @@ XGI330_LVDSDataStruct XGI_LVDS1024x768Data_2[]=
{800, 525,1280, 813}
};
-XGI330_LVDSDataStruct XGI_LVDS1280x1024Data_1[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1280x1024Data_1[] =
{
{1048, 442,1688, 1066},
{1048, 392,1688, 1066},
@@ -2053,7 +2024,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x1024Data_1[]=
{1688, 1066,1688, 1066}
};
-XGI330_LVDSDataStruct XGI_LVDS1280x1024Data_2[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1280x1024Data_2[] =
{
{1344, 806,1344, 806},
{1344, 806,1344, 806},
@@ -2066,7 +2037,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x1024Data_2[]=
{800, 525,1280, 813}
};
/*
-XGI330_LVDSDataStruct XGI_LVDS1280x768Data_1[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1280x768Data_1[] =
{
{768,438,1408,806},
{768,388,1408,806},
@@ -2079,7 +2050,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x768Data_1[]=
{1408,806,1408,806}
};
-XGI330_LVDSDataStruct XGI_LVDS1280x768Data_2[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1280x768Data_2[] =
{
{1408, 806,1408, 806},
{1408, 806,1408, 806},
@@ -2092,7 +2063,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x768Data_2[]=
{1408, 806,1408, 806}
};
-XGI330_LVDSDataStruct XGI_LVDS1280x768NData_1[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1280x768NData_1[] =
{
{704, 438,1344, 806},
{704, 388,1344, 806},
@@ -2105,7 +2076,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x768NData_1[]=
{1344, 806,1344, 806}
};
-XGI330_LVDSDataStruct XGI_LVDS1280x768NData_2[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1280x768NData_2[] =
{
{1344, 806,1344, 806},
{1344, 806,1344, 806},
@@ -2118,7 +2089,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x768NData_2[]=
{1344, 806,1344, 806}
};
-XGI330_LVDSDataStruct XGI_LVDS1280x768SData_1[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1280x768SData_1[] =
{
{1048,438,1688,806},
{1048,388,1688,806},
@@ -2131,7 +2102,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x768SData_1[]=
{1688,806,1688,806}
};
-XGI330_LVDSDataStruct XGI_LVDS1280x768SData_2[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1280x768SData_2[] =
{
{1688,806,1688,806},
{1688,806,1688,806},
@@ -2144,7 +2115,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x768SData_2[]=
{1688,806,1688,806}
};
*/
-XGI330_LVDSDataStruct XGI_LVDS1400x1050Data_1[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1400x1050Data_1[] =
{
{928,416,1688,1066},
{928,366,1688,1066},
@@ -2157,7 +2128,7 @@ XGI330_LVDSDataStruct XGI_LVDS1400x1050Data_1[]=
{1688,1066,1688,1066}
};
-XGI330_LVDSDataStruct XGI_LVDS1400x1050Data_2[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1400x1050Data_2[] =
{
{1688,1066,1688,1066},
{1688,1066,1688,1066},
@@ -2170,7 +2141,7 @@ XGI330_LVDSDataStruct XGI_LVDS1400x1050Data_2[]=
{1688,1066,1688,1066}
};
-XGI330_LVDSDataStruct XGI_LVDS1600x1200Data_1[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1600x1200Data_1[] =
{ /* ;;[ycchen] 12/05/02 LCDHTxLCDVT=2048x1320 */
{ 1088,520,2048,1320 },/* 00 (320x200,320x400,640x200,640x400) */
{ 1088,470,2048,1320 },/* 01 (320x350,640x350) */
@@ -2184,7 +2155,7 @@ XGI330_LVDSDataStruct XGI_LVDS1600x1200Data_1[]=
{ 2048,1320,2048,1320 } /* 09 (1600x1200) */
};
-XGI330_LVDSDataStruct XGI_LVDSNoScalingData[]=
+struct XGI330_LVDSDataStruct XGI_LVDSNoScalingData[] =
{
{ 800,449,800,449 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 800,449,800,449 }, /* 01 (320x350,640x350) */
@@ -2199,7 +2170,7 @@ XGI330_LVDSDataStruct XGI_LVDSNoScalingData[]=
{ 1688,806,1688,806 } /* 0A (1280x768x60Hz) */
};
-XGI330_LVDSDataStruct XGI_LVDS1024x768Data_1x75[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1024x768Data_1x75[] =
{
{960,438,1312,800 }, /* 00 (320x200,320x400,640x200,640x400) */
{960,388,1312,800 }, /* 01 (320x350,640x350) */
@@ -2211,7 +2182,7 @@ XGI330_LVDSDataStruct XGI_LVDS1024x768Data_1x75[]=
};
-XGI330_LVDSDataStruct XGI_LVDS1024x768Data_2x75[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1024x768Data_2x75[] =
{
{1312,800,1312,800}, /* ; 00 (320x200,320x400,640x200,640x400) */
{1312,800,1312,800}, /* ; 01 (320x350,640x350) */
@@ -2222,7 +2193,7 @@ XGI330_LVDSDataStruct XGI_LVDS1024x768Data_2x75[]=
{1312,800,1312,800}, /* ; 06 (512x384,1024x768) */
};
-XGI330_LVDSDataStruct XGI_LVDS1280x1024Data_1x75[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1280x1024Data_1x75[] =
{
{1048,442,1688,1066 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{1048,392,1688,1066 }, /* ; 01 (320x350,640x350) */
@@ -2234,7 +2205,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x1024Data_1x75[]=
{1688,1066,1688,1066 }, /* ; 06; 07 (640x512,1280x1024) */
};
-XGI330_LVDSDataStruct XGI_LVDS1280x1024Data_2x75[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1280x1024Data_2x75[] =
{
{1688,1066,1688,1066 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{1688,1066,1688,1066 }, /* ; 01 (320x350,640x350) */
@@ -2246,7 +2217,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x1024Data_2x75[]=
{1688,1066,1688,1066 }, /* ; 06; 07 (640x512,1280x1024) */
};
-XGI330_LVDSDataStruct XGI_LVDSNoScalingDatax75[]=
+struct XGI330_LVDSDataStruct XGI_LVDSNoScalingDatax75[] =
{
{800,449,800,449 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{800,449,800,449 }, /* ; 01 (320x350,640x350) */
@@ -2261,7 +2232,7 @@ XGI330_LVDSDataStruct XGI_LVDSNoScalingDatax75[]=
{1688,806,1688,806 }, /* ; 0A (1280x768x75Hz) */
};
-XGI330_LVDSDataStruct XGI_LVDS1024x768Des_1[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1024x768Des_1[] =
{
{ 0,1048, 0, 771 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 0,1048, 0, 771 }, /* 01 (320x350,640x350) */
@@ -2272,7 +2243,7 @@ XGI330_LVDSDataStruct XGI_LVDS1024x768Des_1[]=
{ 0,1048, 805, 770 } /* 06 (1024x768x60Hz) */
} ;
-XGI330_LVDSDataStruct XGI_LVDS1024x768Des_2[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1024x768Des_2[] =
{
{ 1142, 856, 622, 587 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 1142, 856, 597, 562 }, /* 01 (320x350,640x350) */
@@ -2283,7 +2254,7 @@ XGI330_LVDSDataStruct XGI_LVDS1024x768Des_2[]=
{ 0,1048, 805, 771 } /* 06 (1024x768x60Hz) */
};
-XGI330_LVDSDataStruct XGI_LVDS1024x768Des_3[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1024x768Des_3[] =
{
{ 320, 24, 622, 587 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 320, 24, 597, 562 }, /* 01 (320x350,640x350) */
@@ -2292,7 +2263,7 @@ XGI330_LVDSDataStruct XGI_LVDS1024x768Des_3[]=
{ 320, 24, 722, 687 } /* 04 (640x480x60Hz) */
};
-XGI330_LVDSDataStruct XGI_LVDS1280x1024Des_1[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1280x1024Des_1[] =
{
{ 0,1328, 0, 1025 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 0,1328, 0, 1025 }, /* 01 (320x350,640x350) */
@@ -2305,7 +2276,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x1024Des_1[]=
};
/* The Display setting for DE Mode Panel */
-XGI330_LVDSDataStruct XGI_LVDS1280x1024Des_2[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1280x1024Des_2[] =
{
{ 1368,1008,752,711 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 1368,1008,729,688 }, /* 01 (320x350,640x350) */
@@ -2317,7 +2288,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x1024Des_2[]=
{ 0000,1328,0,1025 } /* 07 (1280x1024x60Hz) */
};
-XGI330_LVDSDataStruct XGI_LVDS1400x1050Des_1[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1400x1050Des_1[] =
{
{ 0,1448,0,1051 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 0,1448,0,1051 }, /* 01 (320x350,640x350) */
@@ -2330,7 +2301,7 @@ XGI330_LVDSDataStruct XGI_LVDS1400x1050Des_1[]=
{ 0,1448,0,1051 } /* 08 (1400x1050x60Hz) */
};
-XGI330_LVDSDataStruct XGI_LVDS1400x1050Des_2[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1400x1050Des_2[] =
{
{ 1308,1068, 781, 766 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 1308,1068, 781, 766 }, /* 01 (320x350,640x350) */
@@ -2343,7 +2314,7 @@ XGI330_LVDSDataStruct XGI_LVDS1400x1050Des_2[]=
{ 0,1448,0,1051 } /* 08 (1400x1050x60Hz) */
};
-XGI330_LVDSDataStruct XGI_LVDS1600x1200Des_1[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1600x1200Des_1[] =
{
{ 0,1664,0,1201 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 0,1664,0,1201 }, /* 01 (320x350,640x350) */
@@ -2359,7 +2330,7 @@ XGI330_LVDSDataStruct XGI_LVDS1600x1200Des_1[]=
-XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesData[]=
+struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesData[] =
{
{ 0, 648, 448, 405, 96, 2 }, /* 00 (320x200,320x400,640x200,640x400) */
{ 0, 648, 448, 355, 96, 2 }, /* 01 (320x350,640x350) */
@@ -2374,7 +2345,7 @@ XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesData[]=
{ 0,1328,0,0771, 112, 6 } /* 0A (1280x768x60Hz) */
};
-XGI330_LVDSDataStruct XGI_LVDS1024x768Des_1x75[]= /* ; 1024x768 Full-screen */
+struct XGI330_LVDSDataStruct XGI_LVDS1024x768Des_1x75[] = /* ; 1024x768 Full-screen */
{
{0,1040,0,769}, /* ; 00 (320x200,320x400,640x200,640x400) */
{0,1040,0,769}, /* ; 01 (320x350,640x350) */
@@ -2385,7 +2356,7 @@ XGI330_LVDSDataStruct XGI_LVDS1024x768Des_1x75[]= /* ; 1024x768 Full-screen */
{0,1040,0,769} /* ; 06 (1024x768x75Hz) */
};
-XGI330_LVDSDataStruct XGI_LVDS1024x768Des_2x75[]= /* ; 1024x768 center-screen (Enh. Mode) */
+struct XGI330_LVDSDataStruct XGI_LVDS1024x768Des_2x75[] = /* ; 1024x768 center-screen (Enh. Mode) */
{
{1142, 856,622,587 }, /* 00 (320x200,320x400,640x200,640x400) */
{1142, 856,597,562 }, /* 01 (320x350,640x350) */
@@ -2396,7 +2367,7 @@ XGI330_LVDSDataStruct XGI_LVDS1024x768Des_2x75[]= /* ; 1024x768 center-screen (E
{ 0,1048,805,771 } /* 06 (1024x768x60Hz) */
};
-XGI330_LVDSDataStruct XGI_LVDS1024x768Des_3x75[]= /* ; 1024x768 center-screen (St.Mode) */
+struct XGI330_LVDSDataStruct XGI_LVDS1024x768Des_3x75[] = /* ; 1024x768 center-screen (St.Mode) */
{
{320,24,622,587 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{320,24,597,562 }, /* ; 01 (320x350,640x350) */
@@ -2405,7 +2376,7 @@ XGI330_LVDSDataStruct XGI_LVDS1024x768Des_3x75[]= /* ; 1024x768 center-screen (S
{320,24,722,687 } /* ; 04 (640x480x60Hz) */
};
-XGI330_LVDSDataStruct XGI_LVDS1280x1024Des_1x75[]=
+struct XGI330_LVDSDataStruct XGI_LVDS1280x1024Des_1x75[] =
{
{0,1296,0,1025}, /* ; 00 (320x200,320x400,640x200,640x400) */
{0,1296,0,1025}, /* ; 01 (320x350,640x350) */
@@ -2418,7 +2389,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x1024Des_1x75[]=
};
/* The Display setting for DE Mode Panel */
-XGI330_LVDSDataStruct XGI_LVDS1280x1024Des_2x75[]= /* [ycchen] 02/18/03 Set DE as default */
+struct XGI330_LVDSDataStruct XGI_LVDS1280x1024Des_2x75[] = /* [ycchen] 02/18/03 Set DE as default */
{
{1368,976,752,711 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{1368,976,729,688 }, /* ; 01 (320x350,640x350) */
@@ -2430,7 +2401,7 @@ XGI330_LVDSDataStruct XGI_LVDS1280x1024Des_2x75[]= /* [ycchen] 02/18/03 Set DE
{0,1296,0,1025 } /* ; 07 (1280x1024x75Hz) */
};
-XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesDatax75[]= /* Scaling LCD 75Hz */
+struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesDatax75[] = /* Scaling LCD 75Hz */
{
{ 0,648,448,405,96,2 }, /* ; 00 (320x200,320x400,640x200,640x400) */
{ 0,648,448,355,96,2 }, /* ; 01 (320x350,640x350) */
@@ -2445,7 +2416,7 @@ XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesDatax75[]= /* Scaling LCD 75Hz */
{ 0,1328,0,771,112,6 } /* ; 0A (1280x768x75Hz) */
};
-XGI330_LVDSDataStruct XGI330_LVDS640x480Data_1[]=
+struct XGI330_LVDSDataStruct XGI330_LVDS640x480Data_1[] =
{
{800, 449, 800, 449},
{800, 449, 800, 449},
@@ -2458,7 +2429,7 @@ XGI330_LVDSDataStruct XGI330_LVDS640x480Data_1[]=
{1056, 628,1056, 628}
};
-XGI330_CHTVDataStruct XGI_CHTVUNTSCData[]=
+struct XGI330_CHTVDataStruct XGI_CHTVUNTSCData[] =
{
{840, 600, 840, 600},
{840, 600, 840, 600},
@@ -2468,7 +2439,7 @@ XGI330_CHTVDataStruct XGI_CHTVUNTSCData[]=
{1064, 750,1064, 750}
};
-XGI330_CHTVDataStruct XGI_CHTVONTSCData[]=
+struct XGI330_CHTVDataStruct XGI_CHTVONTSCData[] =
{
{840, 525, 840, 525},
{840, 525, 840, 525},
@@ -2478,7 +2449,7 @@ XGI330_CHTVDataStruct XGI_CHTVONTSCData[]=
{1040, 700,1040, 700}
};
-XGI330_CHTVDataStruct XGI_CHTVUPALData[]=
+struct XGI330_CHTVDataStruct XGI_CHTVUPALData[] =
{
{1008, 625,1008, 625},
{1008, 625,1008, 625},
@@ -2488,7 +2459,7 @@ XGI330_CHTVDataStruct XGI_CHTVUPALData[]=
{936, 836, 936, 836}
};
-XGI330_CHTVDataStruct XGI_CHTVOPALData[]=
+struct XGI330_CHTVDataStruct XGI_CHTVOPALData[] =
{
{1008, 625,1008, 625},
{1008, 625,1008, 625},
@@ -2498,7 +2469,7 @@ XGI330_CHTVDataStruct XGI_CHTVOPALData[]=
{960, 750, 960, 750}
};
-XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_1_H[]=
+struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_1_H[] =
{
/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
{{ 0x4B,0x27,0x8F,0x32,0x1B,0x00,0x45,0x00 }}, /* 00 (320x) */
@@ -2511,7 +2482,7 @@ XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_1_H[]=
{{ 0xA3,0x7F,0x87,0x86,0x97,0x00,0x02,0x00 }} /* 07 (1024x) */
};
-XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_1_H[]=
+struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_1_H[] =
{
/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
{{ 0x56,0x27,0x9A,0x30,0x1E,0x00,0x05,0x00 }}, /* 00 (320x) */
@@ -2525,7 +2496,7 @@ XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_1_H[]=
{{ 0xCE,0x9F,0x92,0xA8,0x16,0x00,0x07,0x00 }} /* 08 (1280x) */
};
-XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_2_H[]=
+struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_2_H[] =
{
/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
{{ 0x63,0x27,0x87,0x3B,0x8C,0x00,0x01,0x00 }}, /* 00 (320x) */
@@ -2538,7 +2509,7 @@ XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_2_H[]=
{{ 0xA3,0x7F,0x87,0x86,0x97,0x00,0x02,0x00 }} /* 07 (1024x) */
};
-XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_H[]=
+struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_H[] =
{
/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
{{ 0x7E,0x3B,0x9A,0x44,0x12,0x00,0x01,0x00 }}, /* 00 (320x) */
@@ -2552,7 +2523,7 @@ XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_H[]=
{{ 0xCE,0x9F,0x92,0xA8,0x16,0x00,0x07,0x00 }} /* 08 (1280x) */
};
-XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11400x1050_1_H[]=
+struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11400x1050_1_H[] =
{ /* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
{{ 0x47,0x27,0x8B,0x2C,0x1A,0x00,0x05,0x00 }}, /* 00 (320x) */
{{ 0x47,0x27,0x8B,0x30,0x1E,0x00,0x05,0x00 }}, /* 01 (360x) */
@@ -2566,7 +2537,7 @@ XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11400x1050_1_H[]=
{{ 0xCE,0xAE,0x92,0xB3,0x01,0x00,0x03,0x00 }} /* 09 (1400x) */
};
-XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11400x1050_2_H[]=
+struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11400x1050_2_H[] =
{ /* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
{{ 0x76,0x3F,0x83,0x45,0x8C,0x00,0x41,0x00 }}, /* 00 (320x) */
{{ 0x76,0x3F,0x83,0x45,0x8C,0x00,0x41,0x00 }}, /* 01 (360x) */
@@ -2580,7 +2551,7 @@ XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11400x1050_2_H[]=
{{ 0xCE,0xAE,0x92,0xBC,0x0A,0x00,0x03,0x00 }} /* 09 (1400x) */
};
-XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11600x1200_1_H[]=
+struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11600x1200_1_H[] =
/* ;302lv channelA [ycchen] 12/05/02 LCDHT=2048 */
{ /* ; CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
{{ 0x5B,0x27,0x9F,0x32,0x0A,0x00,0x01,0x00 }},/* 00 (320x) */
@@ -2596,7 +2567,7 @@ XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11600x1200_1_H[]=
{{ 0xFB,0xC7,0x9F,0xC9,0x81,0x00,0x07,0x00 }} /* 0A (1600x) */
};
-XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_1_V[]=
+struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_1_V[] =
{ /* CR06,CR07,CR10,CR11,CR15,CR16,SR0A+CR09(5->7) */
{{ 0x97,0x1F,0x60,0x87,0x5D,0x83,0x10 }}, /* 00 (x350) */
{{ 0xB4,0x1F,0x92,0x89,0x8F,0xB5,0x30 }}, /* 01 (x400) */
@@ -2605,7 +2576,7 @@ XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_1_V[]=
{{ 0x24,0xF5,0x02,0x88,0xFF,0x25,0x90 }} /* 04 (x768) */
};
-XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_2_V[]=
+struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_2_V[] =
{ /* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
{{ 0x24,0xBB,0x31,0x87,0x5D,0x25,0x30 }}, /* 00 (x350) */
{{ 0x24,0xBB,0x4A,0x80,0x8F,0x25,0x30 }}, /* 01 (x400) */
@@ -2614,7 +2585,7 @@ XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_2_V[]=
{{ 0x24,0xF5,0x02,0x88,0xFF,0x25,0x90 }} /* 04 (x768) */
};
-XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_1_V[]=
+struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_1_V[] =
{ /* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
{{ 0x86,0x1F,0x5E,0x82,0x5D,0x87,0x00 }}, /* 00 (x350) */
{{ 0xB8,0x1F,0x90,0x84,0x8F,0xB9,0x30 }}, /* 01 (x400) */
@@ -2624,7 +2595,7 @@ XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_1_V[]=
{{ 0x28,0x5A,0x13,0x87,0xFF,0x29,0xA9 }} /* 05 (x1024) */
};
-XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_2_V[]=
+struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_2_V[] =
{ /* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
{{ 0x28,0xD2,0xAF,0x83,0xAE,0xD8,0xA1 }}, /* 00 (x350) */
{{ 0x28,0xD2,0xC8,0x8C,0xC7,0xF2,0x81 }}, /* 01 (x400) */
@@ -2634,7 +2605,7 @@ XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_2_V[]=
{{ 0x28,0x5A,0x13,0x87,0xFF,0x29,0xA9 }} /* 05 (x1024) */
};
-XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11400x1050_1_V[]=
+struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11400x1050_1_V[] =
{ /* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
{{ 0x6C,0x1F,0x60,0x84,0x5D,0x6D,0x10 }}, /* 00 (x350) */
{{ 0x9E,0x1F,0x93,0x86,0x8F,0x9F,0x30 }}, /* 01 (x400) */
@@ -2645,7 +2616,7 @@ XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11400x1050_1_V[]=
{{ 0x28,0x10,0x1A,0x80,0x19,0x29,0x0F }} /* 06 (x1050) */
};
-XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11400x1050_2_V[]=
+struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11400x1050_2_V[] =
{ /* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
{{ 0x28,0x92,0xB6,0x83,0xB5,0xCF,0x81 }}, /* 00 (x350) */
{{ 0x28,0x92,0xD5,0x82,0xD4,0xEE,0x81 }}, /* 01 (x400) */
@@ -2656,7 +2627,7 @@ XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11400x1050_2_V[]=
{{ 0x28,0x10,0x1A,0x87,0x19,0x29,0x8F }} /* 06 (x1050) */
};
-XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11600x1200_1_V[]=
+struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11600x1200_1_V[] =
{
/* CR06,CR07,CR10,CR11,CR15,CR16,SR0A+CR09(5->7) */
{{ 0xd4,0x1F,0x81,0x84,0x5D,0xd5,0x10 }}, /* 00 (x350) */
@@ -2669,7 +2640,7 @@ XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11600x1200_1_V[]=
{{ 0x26,0x11,0xd3,0x86,0xaF,0x27,0x3f }} /* 07 (x1200) */
};
-XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_1_Hx75[]=
+struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_1_Hx75[] =
{ /* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
{{ 0x4B,0x27,0x8F,0x32,0x1B,0x00,0x45,0x00 }},/* ; 00 (320x) */
{{ 0x4B,0x27,0x8F,0x2B,0x03,0x00,0x44,0x00 }},/* ; 01 (360x) */
@@ -2681,7 +2652,7 @@ XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_1_Hx75[]=
{{ 0x9F,0x7F,0x83,0x85,0x91,0x00,0x02,0x00 }} /* ; 07 (1024x) */
};
-XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_1_Vx75[]=
+struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_1_Vx75[] =
{ /* CR06,CR07,CR10,CR11,CR15,CR16,SR0A+CR09(5->7) */
{{ 0x97,0x1F,0x60,0x87,0x5D,0x83,0x10 }},/* ; 00 (x350) */
{{ 0xB4,0x1F,0x92,0x89,0x8F,0xB5,0x30 }},/* ; 01 (x400) */
@@ -2690,7 +2661,7 @@ XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_1_Vx75[]=
{{ 0x1E,0xF5,0x00,0x83,0xFF,0x1F,0x90 }} /* ; 04 (x768) */
};
-XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_2_Hx75[]=
+struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_2_Hx75[] =
{ /* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
{{ 0x63,0x27,0x87,0x3B,0x8C,0x00,0x01,0x00 }},/* ; 00 (320x) */
{{ 0x63,0x27,0x87,0x3B,0x8C,0x00,0x01,0x00 }},/* ; 01 (360x) */
@@ -2702,7 +2673,7 @@ XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_2_Hx75[]=
{{ 0xA3,0x7F,0x87,0x86,0x97,0x00,0x02,0x00 }} /* ; 07 (1024x) */
};
-XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_2_Vx75[]=
+struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_2_Vx75[] =
{ /* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
{{ 0x24,0xBB,0x31,0x87,0x5D,0x25,0x30 }},/* ; 00 (x350) */
{{ 0x24,0xBB,0x4A,0x80,0x8F,0x25,0x30 }},/* ; 01 (x400) */
@@ -2711,7 +2682,7 @@ XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_2_Vx75[]=
{{ 0x24,0xF5,0x02,0x88,0xFF,0x25,0x90 }} /* ; 04 (x768) */
};
-XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_1_Hx75[]=
+struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_1_Hx75[] =
{ /* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
{{ 0x56,0x27,0x9A,0x30,0x1E,0x00,0x05,0x00 }},/* ; 00 (320x) */
{{ 0x56,0x27,0x9A,0x30,0x1E,0x00,0x05,0x00 }},/* ; 01 (360x) */
@@ -2724,7 +2695,7 @@ XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_1_Hx75[]=
{{ 0xCE,0x9F,0x92,0xA5,0x17,0x00,0x07,0x00 }} /* ; 08 (1280x) */
};
-XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_1_Vx75[]=
+struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_1_Vx75[] =
{ /* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
{{ 0x86,0xD1,0xBC,0x80,0xBB,0xE5,0x00 }},/* ; 00 (x350) */
{{ 0xB8,0x1F,0x90,0x84,0x8F,0xB9,0x30 }},/* ; 01 (x400) */
@@ -2734,7 +2705,7 @@ XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_1_Vx75[]=
{{ 0x28,0x5A,0x13,0x87,0xFF,0x29,0xA9 }} /* ; 05 (x1024) */
};
-XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_Hx75[]=
+struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_Hx75[] =
{
/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
{{ 0x7E,0x3B,0x9A,0x44,0x12,0x00,0x01,0x00 }},/* ; 00 (320x) */
@@ -2748,7 +2719,7 @@ XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_Hx75[]=
{{ 0xCE,0x9F,0x92,0xA8,0x16,0x00,0x07,0x00 }} /* ; 08 (1280x) */
};
-XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_2_Vx75[]=
+struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_2_Vx75[] =
{
/* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
{{ 0x28,0xD2,0xAF,0x83,0xAE,0xD8,0xA1 }},/* ; 00 (x350) */
@@ -2759,7 +2730,7 @@ XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_2_Vx75[]=
{{ 0x28,0x5A,0x13,0x87,0xFF,0x29,0xA9 }} /* ; 05 (x1024) */
};
-XGI_LVDSCRT1DataStruct XGI_CHTVCRT1UNTSC[]=
+struct XGI_LVDSCRT1DataStruct XGI_CHTVCRT1UNTSC[] =
{
{{0x64,0x4f,0x88,0x56,0x9f,0x56,0x3e,
0xe8,0x84,0x8f,0x57,0x20,0x00,0x01,0x00 }},
@@ -2775,7 +2746,7 @@ XGI_LVDSCRT1DataStruct XGI_CHTVCRT1UNTSC[]=
0x90,0x8c,0x57,0xed,0x20,0x00,0x06,0x01 }}
};
-XGI_LVDSCRT1DataStruct XGI_CHTVCRT1ONTSC[]=
+struct XGI_LVDSCRT1DataStruct XGI_CHTVCRT1ONTSC[] =
{
{{0x64,0x4f,0x88,0x5a,0x9f,0x0b,0x3e,
0xc0,0x84,0x8f,0x0c,0x20,0x00,0x01,0x00 }},
@@ -2791,7 +2762,7 @@ XGI_LVDSCRT1DataStruct XGI_CHTVCRT1ONTSC[]=
0x7f,0x86,0x57,0xbb,0x00,0x00,0x06,0x01 }}
};
-XGI_LVDSCRT1DataStruct XGI_CHTVCRT1UPAL[]=
+struct XGI_LVDSCRT1DataStruct XGI_CHTVCRT1UPAL[] =
{
{{0x79,0x4f,0x9d,0x5a,0x90,0x6f,0x3e,
0xf8,0x83,0x8f,0x70,0x20,0x00,0x05,0x00 }},
@@ -2807,7 +2778,7 @@ XGI_LVDSCRT1DataStruct XGI_CHTVCRT1UPAL[]=
0xc8,0x8c,0x57,0xe9,0x20,0x00,0x05,0x01 }}
};
-XGI_LVDSCRT1DataStruct XGI_CHTVCRT1OPAL[]=
+struct XGI_LVDSCRT1DataStruct XGI_CHTVCRT1OPAL[] =
{
{{0x79,0x4f,0x9d,0x5a,0x90,0x6f,0x3e,
0xf0,0x83,0x8f,0x70,0x20,0x00,0x05,0x00 }},
@@ -2824,7 +2795,7 @@ XGI_LVDSCRT1DataStruct XGI_CHTVCRT1OPAL[]=
};
/*add for new UNIVGABIOS*/
-XGI330_LCDDataTablStruct XGI_LCDDataTable[]=
+struct XGI330_LCDDataTablStruct XGI_LCDDataTable[] =
{
{Panel1024x768,0x0019,0x0001,0}, /* XGI_ExtLCD1024x768Data */
{Panel1024x768,0x0019,0x0000,1}, /* XGI_StLCD1024x768Data */
@@ -2848,7 +2819,7 @@ XGI330_LCDDataTablStruct XGI_LCDDataTable[]=
{0xFF,0x0000,0x0000,0} /* End of table */
};
-XGI330_LCDDataTablStruct XGI_LCDDesDataTable[]=
+struct XGI330_LCDDataTablStruct XGI_LCDDesDataTable[] =
{
{Panel1024x768,0x0019,0x0001,0}, /* XGI_ExtLCDDes1024x768Data */
{Panel1024x768,0x0019,0x0000,1}, /* XGI_StLCDDes1024x768Data */
@@ -2873,7 +2844,7 @@ XGI330_LCDDataTablStruct XGI_LCDDesDataTable[]=
{0xFF,0x0000,0x0000,0}
};
-XGI330_LCDDataTablStruct XGI_EPLLCDCRT1Ptr_H[]=
+struct XGI330_LCDDataTablStruct XGI_EPLLCDCRT1Ptr_H[] =
{
{Panel1024x768,0x0018,0x0000,0}, /* XGI_LVDSCRT11024x768_1_H */
{Panel1024x768,0x0018,0x0010,1}, /* XGI_LVDSCRT11024x768_2_H */
@@ -2889,7 +2860,7 @@ XGI330_LCDDataTablStruct XGI_EPLLCDCRT1Ptr_H[]=
{0xFF,0x0000,0x0000,0}
};
-XGI330_LCDDataTablStruct XGI_EPLLCDCRT1Ptr_V[]=
+struct XGI330_LCDDataTablStruct XGI_EPLLCDCRT1Ptr_V[] =
{
{Panel1024x768,0x0018,0x0000,0}, /* XGI_LVDSCRT11024x768_1_V */
{Panel1024x768,0x0018,0x0010,1}, /* XGI_LVDSCRT11024x768_2_V */
@@ -2905,7 +2876,7 @@ XGI330_LCDDataTablStruct XGI_EPLLCDCRT1Ptr_V[]=
{0xFF,0x0000,0x0000,0}
};
-XGI330_LCDDataTablStruct XGI_EPLLCDDataPtr[]=
+struct XGI330_LCDDataTablStruct XGI_EPLLCDDataPtr[] =
{
{Panel1024x768,0x0018,0x0000,0}, /* XGI_LVDS1024x768Data_1 */
{Panel1024x768,0x0018,0x0010,1}, /* XGI_LVDS1024x768Data_2 */
@@ -2923,7 +2894,7 @@ XGI330_LCDDataTablStruct XGI_EPLLCDDataPtr[]=
{0xFF,0x0000,0x0000,0}
};
-XGI330_LCDDataTablStruct XGI_EPLLCDDesDataPtr[]=
+struct XGI330_LCDDataTablStruct XGI_EPLLCDDesDataPtr[] =
{
{Panel1024x768,0x0018,0x0000,0}, /* XGI_LVDS1024x768Des_1 */
{Panel1024x768,0x0618,0x0410,1}, /* XGI_LVDS1024x768Des_3 */
@@ -2943,14 +2914,14 @@ XGI330_LCDDataTablStruct XGI_EPLLCDDesDataPtr[]=
{0xFF,0x0000,0x0000,0}
};
-XGI330_LCDDataTablStruct XGI_EPLCHLCDRegPtr[]=
+struct XGI330_LCDDataTablStruct XGI_EPLCHLCDRegPtr[] =
{
{Panel1024x768,0x0000,0x0000,0}, /* XGI_CH7017LV1024x768 */
{Panel1400x1050,0x0000,0x0000,1}, /* XGI_CH7017LV1400x1050 */
{0xFF,0x0000,0x0000,0}
};
-XGI330_TVDataTablStruct XGI_TVDataTable[]=
+struct XGI330_TVDataTablStruct XGI_TVDataTable[] =
{
{0x09E1,0x0001,0}, /* XGI_ExtPALData */
{0x09E1,0x0000,1}, /* XGI_ExtNTSCData */
@@ -2968,7 +2939,7 @@ XGI330_TVDataTablStruct XGI_TVDataTable[]=
{0xffff,0x0000,12} /* END */
};
-USHORT TVLenList[]=
+unsigned short TVLenList[] =
{
LVDSCRT1Len_H,
LVDSCRT1Len_V,
@@ -2981,7 +2952,7 @@ USHORT TVLenList[]=
} ;
/* Chrontel 7017 TV CRT1 Timing List */
-XGI330_TVDataTablStruct XGI_EPLCHTVCRT1Ptr[]=
+struct XGI330_TVDataTablStruct XGI_EPLCHTVCRT1Ptr[] =
{
{0x0011,0x0000,0}, /* XGI_CHTVCRT1UNTSC */
{0x0011,0x0010,1}, /* XGI_CHTVCRT1ONTSC */
@@ -2991,7 +2962,7 @@ XGI330_TVDataTablStruct XGI_EPLCHTVCRT1Ptr[]=
};
/* ;;Chrontel 7017 TV Timing List */
-XGI330_TVDataTablStruct XGI_EPLCHTVDataPtr[]=
+struct XGI330_TVDataTablStruct XGI_EPLCHTVDataPtr[] =
{
{0x0011,0x0000,0}, /* XGI_CHTVUNTSCData */
{0x0011,0x0010,1}, /* XGI_CHTVONTSCData */
@@ -3001,7 +2972,7 @@ XGI330_TVDataTablStruct XGI_EPLCHTVDataPtr[]=
};
/* ;;Chrontel 7017 TV Reg. List */
-XGI330_TVDataTablStruct XGI_EPLCHTVRegPtr[]=
+struct XGI330_TVDataTablStruct XGI_EPLCHTVRegPtr[] =
{
{0x0011,0x0000,0}, /* XGI_CHTVRegUNTSC */
{0x0011,0x0010,1}, /* XGI_CHTVRegONTSC */
@@ -3010,7 +2981,7 @@ XGI330_TVDataTablStruct XGI_EPLCHTVRegPtr[]=
{0xFFFF,0x0000,4}
};
-USHORT LCDLenList[]=
+unsigned short LCDLenList[] =
{
LVDSCRT1Len_H,
LVDSCRT1Len_V,
@@ -3024,7 +2995,7 @@ USHORT LCDLenList[]=
0
} ;
-XGI330_LCDCapStruct XGI660_LCDDLCapList[]= /* 660, Dual link */
+struct XGI330_LCDCapStruct XGI660_LCDDLCapList[] = /* 660, Dual link */
{
/* LCDCap1024x768 */
{Panel1024x768, DefaultLCDCap, 0, 0x014, 0x88, 0x06, VCLK65,
@@ -3056,7 +3027,7 @@ XGI330_LCDCapStruct XGI660_LCDDLCapList[]= /* 660, Dual link */
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
-XGI330_LCDCapStruct XGI_LCDDLCapList[]= /* Dual link only */
+struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = /* Dual link only */
{
/* LCDCap1024x768 */
{Panel1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
@@ -3088,7 +3059,7 @@ XGI330_LCDCapStruct XGI_LCDDLCapList[]= /* Dual link only */
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
-XGI330_LCDCapStruct XGI660_LCDCapList[]=
+struct XGI330_LCDCapStruct XGI660_LCDCapList[] =
{
/* LCDCap1024x768 */
{Panel1024x768, DefaultLCDCap, 0, 0x014, 0x88, 0x06, VCLK65,
@@ -3120,7 +3091,7 @@ XGI330_LCDCapStruct XGI660_LCDCapList[]=
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
-XGI330_LCDCapStruct XGI_LCDCapList[]=
+struct XGI330_LCDCapStruct XGI_LCDCapList[] =
{
/* LCDCap1024x768 */
{Panel1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
@@ -3152,7 +3123,7 @@ XGI330_LCDCapStruct XGI_LCDCapList[]=
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
-XGI21_LVDSCapStruct XGI21_LCDCapList[]=
+struct XGI21_LVDSCapStruct XGI21_LCDCapList[] =
{
{DisableLCD24bpp + LCDPolarity,
2160,1250,1600,1200, 64, 1, 192, 3,
@@ -3181,7 +3152,7 @@ XGI21_LVDSCapStruct XGI21_LCDCapList[]=
};
-XGI_Ext2Struct XGI330_RefIndex[]=
+struct XGI_Ext2Struct XGI330_RefIndex[] =
{
{Support32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175, 0x00,0x10,0x59, 320, 200},/* 00 */
{Support32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175, 0x00,0x10,0x00, 320, 400},/* 01 */
@@ -3260,7 +3231,7 @@ XGI_Ext2Struct XGI330_RefIndex[]=
-XGI330_VCLKDataStruct XGI330_VCLKData[]=
+struct XGI330_VCLKDataStruct XGI330_VCLKData[] =
{
{ 0x1b,0xe1, 25}, /* 0x0 */
{ 0x4e,0xe4, 28}, /* 0x1 */
@@ -3344,7 +3315,7 @@ XGI330_VCLKDataStruct XGI330_VCLKData[]=
{ 0x3b,0x61,108} /* 0x4f */
};
-XGI_VBVCLKDataStruct XGI330_VBVCLKData[]=
+struct XGI_VBVCLKDataStruct XGI330_VBVCLKData[] =
{
{ 0x1b,0xe1, 25}, /* 0x0 */
{ 0x4e,0xe4, 28}, /* 0x1 */
@@ -3422,9 +3393,11 @@ XGI_VBVCLKDataStruct XGI330_VBVCLKData[]=
{ 0x70,0x44,108}, /* 0x49 chiawen for 1400x1050*/
};
-UCHAR XGI330_ScreenOffset[]={ 0x14,0x19,0x20,0x28,0x32,0x40,0x50,0x64,0x78,0x80,0x2d,0x35,0x57,0x48 };
+unsigned char XGI330_ScreenOffset[] = { 0x14, 0x19, 0x20, 0x28, 0x32, 0x40,
+ 0x50, 0x64, 0x78, 0x80, 0x2d, 0x35,
+ 0x57, 0x48};
-XGI_StResInfoStruct XGI330_StResInfo[]=
+struct XGI_StResInfoStruct XGI330_StResInfo[] =
{
{ 640,400},
{ 640,350},
@@ -3433,7 +3406,7 @@ XGI_StResInfoStruct XGI330_StResInfo[]=
{ 640,480}
};
-XGI_ModeResInfoStruct XGI330_ModeResInfo[]=
+struct XGI_ModeResInfoStruct XGI330_ModeResInfo[] =
{
{ 320, 200, 8, 8},
{ 320, 240, 8, 8},
@@ -3460,10 +3433,10 @@ XGI_ModeResInfoStruct XGI330_ModeResInfo[]=
{ 1152, 864, 8,16}
};
-UCHAR XGI330_OutputSelect =0x40;
-UCHAR XGI330_SoftSetting = 0x30;
-UCHAR XGI330_SR07=0x18;
-UCHAR XGI330New_SR15[8][8]={
+unsigned char XGI330_OutputSelect = 0x40;
+unsigned char XGI330_SoftSetting = 0x30;
+unsigned char XGI330_SR07 = 0x18;
+unsigned char XGI330New_SR15[8][8] = {
{0x0,0x4,0x60,0x60},
{0xf,0xf,0xf,0xf},
{0xba,0xba,0xba,0xba},
@@ -3474,7 +3447,7 @@ UCHAR XGI330New_SR15[8][8]={
{0x0,0xa5,0xfb,0xf6}
};
-UCHAR XGI330New_CR40[5][8]={
+unsigned char XGI330New_CR40[5][8] = {
{0x77,0x77,0x44,0x44},
{0x77,0x77,0x44,0x44},
{0x0,0x0,0x0,0x0},
@@ -3482,63 +3455,63 @@ UCHAR XGI330New_CR40[5][8]={
{0x0,0x0,0xf0,0xf8}
};
-UCHAR XGI330_CR49[]={0xaa,0x88};
-UCHAR XGI330_SR1F=0x0;
-UCHAR XGI330_SR21=0xa3;
-UCHAR XGI330_650_SR21=0xa7;
-UCHAR XGI330_SR22=0xfb;
-UCHAR XGI330_SR23=0xf6;
-UCHAR XGI330_SR24=0xd;
-
-UCHAR XGI660_SR21=0xa3;/* 2003.0312 */
-UCHAR XGI660_SR22=0xf3;/* 2003.0312 */
-
-UCHAR XGI330_LVDS_SR32=0x00; /* ynlai for 650 LVDS */
-UCHAR XGI330_LVDS_SR33=0x00; /* chiawen for 650 LVDS */
-UCHAR XGI330_650_SR31=0x40;
-UCHAR XGI330_650_SR33=0x04;
-UCHAR XGI330_CRT2Data_1_2 = 0x0;
-UCHAR XGI330_CRT2Data_4_D = 0x0;
-UCHAR XGI330_CRT2Data_4_E = 0x0;
-UCHAR XGI330_CRT2Data_4_10 = 0x80;
-USHORT XGI330_RGBSenseData = 0xd1;
-USHORT XGI330_VideoSenseData = 0xb9;
-USHORT XGI330_YCSenseData = 0xb3;
-USHORT XGI330_RGBSenseData2 = 0x0190; /*301b*/
-USHORT XGI330_VideoSenseData2 = 0x0110;
-USHORT XGI330_YCSenseData2 = 0x016B;
-UCHAR XGI330_NTSCPhase[] = {0x21,0xed,0x8a,0x8};
-UCHAR XGI330_PALPhase[] = {0x2a,0x5,0xd3,0x0};
-UCHAR XGI330_NTSCPhase2[] = {0x21,0xF0,0x7B,0xD6};/*301b*/
-UCHAR XGI330_PALPhase2[] = {0x2a,0x09,0x86,0xe9};
-UCHAR XGI330_PALMPhase[] = {0x21,0xE4,0x2E,0x9B}; /*palmn*/
-UCHAR XGI330_PALNPhase[] = {0x21,0xF4,0x3E,0xBA};
-UCHAR XG40_I2CDefinition = 0x00 ;
-UCHAR XG20_CR97 = 0x10 ;
-
-UCHAR XG21_DVOSetting = 0x00 ;
-UCHAR XG21_CR2E = 0x00 ;
-UCHAR XG21_CR2F = 0x00 ;
-UCHAR XG21_CR46 = 0x00 ;
-UCHAR XG21_CR47 = 0x00 ;
-
-UCHAR XG27_CR97 = 0xC1 ;
-UCHAR XG27_SR36 = 0x30 ;
-UCHAR XG27_CR8F = 0x0C ;
-UCHAR XG27_CRD0[] = {0,0,0,0,0,0,0,0x82,0x00,0x66,0x01,0x00} ;
-UCHAR XG27_CRDE[] = {0,0} ;
-UCHAR XG27_SR40 = 0x04 ;
-UCHAR XG27_SR41 = 0x00 ;
-
-UCHAR XGI330_CHTVVCLKUNTSC[]={0x00 };
-
-UCHAR XGI330_CHTVVCLKONTSC[]={0x00 };
-
-UCHAR XGI330_CHTVVCLKUPAL[]={0x00 };
-
-UCHAR XGI330_CHTVVCLKOPAL[]={0x00 };
-
-UCHAR XGI7007_CHTVVCLKUNTSC[]={CH7007TVVCLK30_2,
+unsigned char XGI330_CR49[] = {0xaa, 0x88};
+unsigned char XGI330_SR1F = 0x0;
+unsigned char XGI330_SR21 = 0xa3;
+unsigned char XGI330_650_SR21 = 0xa7;
+unsigned char XGI330_SR22 = 0xfb;
+unsigned char XGI330_SR23 = 0xf6;
+unsigned char XGI330_SR24 = 0xd;
+
+unsigned char XGI660_SR21 = 0xa3;/* 2003.0312 */
+unsigned char XGI660_SR22 = 0xf3;/* 2003.0312 */
+
+unsigned char XGI330_LVDS_SR32 = 0x00; /* ynlai for 650 LVDS */
+unsigned char XGI330_LVDS_SR33 = 0x00; /* chiawen for 650 LVDS */
+unsigned char XGI330_650_SR31 = 0x40;
+unsigned char XGI330_650_SR33 = 0x04;
+unsigned char XGI330_CRT2Data_1_2 = 0x0;
+unsigned char XGI330_CRT2Data_4_D = 0x0;
+unsigned char XGI330_CRT2Data_4_E = 0x0;
+unsigned char XGI330_CRT2Data_4_10 = 0x80;
+unsigned short XGI330_RGBSenseData = 0xd1;
+unsigned short XGI330_VideoSenseData = 0xb9;
+unsigned short XGI330_YCSenseData = 0xb3;
+unsigned short XGI330_RGBSenseData2 = 0x0190; /*301b*/
+unsigned short XGI330_VideoSenseData2 = 0x0110;
+unsigned short XGI330_YCSenseData2 = 0x016B;
+unsigned char XGI330_NTSCPhase[] = {0x21, 0xed, 0x8a, 0x8};
+unsigned char XGI330_PALPhase[] = {0x2a, 0x5, 0xd3, 0x0};
+unsigned char XGI330_NTSCPhase2[] = {0x21, 0xF0, 0x7B, 0xD6};/*301b*/
+unsigned char XGI330_PALPhase2[] = {0x2a, 0x09, 0x86, 0xe9};
+unsigned char XGI330_PALMPhase[] = {0x21, 0xE4, 0x2E, 0x9B}; /*palmn*/
+unsigned char XGI330_PALNPhase[] = {0x21, 0xF4, 0x3E, 0xBA};
+unsigned char XG40_I2CDefinition = 0x00 ;
+unsigned char XG20_CR97 = 0x10 ;
+
+unsigned char XG21_DVOSetting = 0x00 ;
+unsigned char XG21_CR2E = 0x00 ;
+unsigned char XG21_CR2F = 0x00 ;
+unsigned char XG21_CR46 = 0x00 ;
+unsigned char XG21_CR47 = 0x00 ;
+
+unsigned char XG27_CR97 = 0xC1 ;
+unsigned char XG27_SR36 = 0x30 ;
+unsigned char XG27_CR8F = 0x0C ;
+unsigned char XG27_CRD0[] = {0, 0, 0, 0, 0, 0, 0, 0x82, 0x00, 0x66, 0x01, 0x00};
+unsigned char XG27_CRDE[] = {0, 0};
+unsigned char XG27_SR40 = 0x04 ;
+unsigned char XG27_SR41 = 0x00 ;
+
+unsigned char XGI330_CHTVVCLKUNTSC[] = {0x00};
+
+unsigned char XGI330_CHTVVCLKONTSC[] = {0x00};
+
+unsigned char XGI330_CHTVVCLKUPAL[] = {0x00};
+
+unsigned char XGI330_CHTVVCLKOPAL[] = {0x00};
+
+unsigned char XGI7007_CHTVVCLKUNTSC[] = {CH7007TVVCLK30_2,
CH7007TVVCLK30_2,
CH7007TVVCLK30_2,
CH7007TVVCLK30_2,
@@ -3546,7 +3519,7 @@ UCHAR XGI7007_CHTVVCLKUNTSC[]={CH7007TVVCLK30_2,
CH7007TVVCLK47_8
};
-UCHAR XGI7007_CHTVVCLKONTSC[]={CH7007TVVCLK26_4,
+unsigned char XGI7007_CHTVVCLKONTSC[] = {CH7007TVVCLK26_4,
CH7007TVVCLK26_4,
CH7007TVVCLK26_4,
CH7007TVVCLK26_4,
@@ -3554,7 +3527,7 @@ UCHAR XGI7007_CHTVVCLKONTSC[]={CH7007TVVCLK26_4,
CH7007TVVCLK43_6
};
-UCHAR XGI7007_CHTVVCLKUPAL[]={CH7007TVVCLK31_5,
+unsigned char XGI7007_CHTVVCLKUPAL[] = {CH7007TVVCLK31_5,
CH7007TVVCLK31_5,
CH7007TVVCLK31_5,
CH7007TVVCLK31_5,
@@ -3562,7 +3535,7 @@ UCHAR XGI7007_CHTVVCLKUPAL[]={CH7007TVVCLK31_5,
CH7007TVVCLK39
};
-UCHAR XGI7007_CHTVVCLKOPAL[]={CH7007TVVCLK31_5,
+unsigned char XGI7007_CHTVVCLKOPAL[] = {CH7007TVVCLK31_5,
CH7007TVVCLK31_5,
CH7007TVVCLK31_5,
CH7007TVVCLK31_5,
@@ -3570,7 +3543,7 @@ UCHAR XGI7007_CHTVVCLKOPAL[]={CH7007TVVCLK31_5,
CH7007TVVCLK36
};
-XGI330_VCLKDataStruct XGI_CH7007VCLKData[]=
+struct XGI330_VCLKDataStruct XGI_CH7007VCLKData[] =
{
{ 0x60,0x36,30}, /* 0 30.2 MHZ */
{ 0x40,0x4A,28}, /* 1 28.19 MHZ */
@@ -3585,7 +3558,7 @@ XGI330_VCLKDataStruct XGI_CH7007VCLKData[]=
{ 0xFF,0x00,0 } /* End mark */
};
-XGI330_VCLKDataStruct XGI_VCLKData[]=
+struct XGI330_VCLKDataStruct XGI_VCLKData[] =
{
/* SR2B,SR2C,SR2D */
{ 0x1B,0xE1,25 },/* 00 (25.175MHz) */
@@ -3786,7 +3759,7 @@ XGI330_VCLKDataStruct XGI_VCLKData[]=
{ 0xFF,0x00,0 }/* End mark */
} ;
-XGI330_VCLKDataStruct XGI_VBVCLKData[]=
+struct XGI330_VCLKDataStruct XGI_VBVCLKData[] =
{
{ 0x1B,0xE1,25 },/* 00 (25.175MHz) */
@@ -3987,7 +3960,7 @@ XGI330_VCLKDataStruct XGI_VBVCLKData[]=
{ 0xFF,0x00,0 } /* End mark */
};
-UCHAR XGI660_TVDelayList[]=
+unsigned char XGI660_TVDelayList[] =
{
0x44, /* ; 0 ExtNTSCDelay */
0x44, /* ; 1 StNTSCDelay */
@@ -4003,7 +3976,7 @@ UCHAR XGI660_TVDelayList[]=
0x44 /* ; B StYPbPrDealy(750p) */
};
-UCHAR XGI660_TVDelayList2[]=
+unsigned char XGI660_TVDelayList2[] =
{
0x44, /* ; 0 ExtNTSCDelay */
0x44, /* ; 1 StNTSCDelay */
@@ -4019,7 +3992,7 @@ UCHAR XGI660_TVDelayList2[]=
0x44 /* ; B StYPbPrDealy(750p) */
};
-UCHAR XGI301TVDelayList[]=
+unsigned char XGI301TVDelayList[] =
{
0x22, /* ; 0 ExtNTSCDelay */
0x22, /* ; 1 StNTSCDelay */
@@ -4035,7 +4008,7 @@ UCHAR XGI301TVDelayList[]=
0x22 /* B StYPbPrDealy(750p) */
};
-UCHAR XGI301TVDelayList2[]=
+unsigned char XGI301TVDelayList2[] =
{
0x22, /* ; 0 ExtNTSCDelay */
0x22, /* ; 1 StNTSCDelay */
@@ -4052,7 +4025,7 @@ UCHAR XGI301TVDelayList2[]=
};
-UCHAR TVAntiFlickList[]=
+unsigned char TVAntiFlickList[] =
{/* NTSCAntiFlicker */
0x04, /* ; 0 Adaptive */
0x00, /* ; 1 new anti-flicker ? */
@@ -4065,7 +4038,7 @@ UCHAR TVAntiFlickList[]=
};
-UCHAR TVEdgeList[]=
+unsigned char TVEdgeList[] =
{
0x00, /* ; 0 NTSC No Edge enhance */
0x04, /* ; 1 NTSC Adaptive Edge enhance */
@@ -4075,7 +4048,7 @@ UCHAR TVEdgeList[]=
0x00 /* ; 1 HiTV */
};
-ULONG TVPhaseList[]=
+unsigned long TVPhaseList[] =
{ 0x08BAED21, /* ; 0 NTSC phase */
0x00E3052A, /* ; 1 PAL phase */
0x9B2EE421, /* ; 2 PAL-M phase */
@@ -4092,7 +4065,7 @@ ULONG TVPhaseList[]=
0xE00A831E /* ; D PAL-M 1024x768 */
};
-UCHAR NTSCYFilter1[]=
+unsigned char NTSCYFilter1[] =
{
0x00,0xF4,0x10,0x38 ,/* 0 : 320x text mode */
0x00,0xF4,0x10,0x38 ,/* 1 : 360x text mode */
@@ -4103,7 +4076,7 @@ UCHAR NTSCYFilter1[]=
0xEB,0x15,0x25,0xF6 /* 6 : 800x gra. mode */
};
-UCHAR PALYFilter1[]=
+unsigned char PALYFilter1[] =
{
0x00,0xF4,0x10,0x38, /* 0 : 320x text mode */
0x00,0xF4,0x10,0x38 ,/* 1 : 360x text mode */
@@ -4114,7 +4087,7 @@ UCHAR PALYFilter1[]=
0xFC,0xFB,0x14,0x2A /* 6 : 800x gra. mode */
};
-UCHAR PALMYFilter1[]=
+unsigned char PALMYFilter1[] =
{
0x00,0xF4,0x10,0x38, /* 0 : 320x text mode */
0x00,0xF4,0x10,0x38, /* 1 : 360x text mode */
@@ -4126,7 +4099,7 @@ UCHAR PALMYFilter1[]=
0xFF,0xFF,0xFF,0xFF /* End of Table */
};
-UCHAR PALNYFilter1[]=
+unsigned char PALNYFilter1[] =
{
0x00,0xF4,0x10,0x38, /* 0 : 320x text mode */
0x00,0xF4,0x10,0x38, /* 1 : 360x text mode */
@@ -4138,7 +4111,7 @@ UCHAR PALNYFilter1[]=
0xFF,0xFF,0xFF,0xFF /* End of Table */
};
-UCHAR NTSCYFilter2[]=
+unsigned char NTSCYFilter2[] =
{
0xFF,0x03,0x02,0xF6,0xFC,0x27,0x46, /* 0 : 320x text mode */
0x01,0x02,0xFE,0xF7,0x03,0x27,0x3C, /* 1 : 360x text mode */
@@ -4150,7 +4123,7 @@ UCHAR NTSCYFilter2[]=
0xFF,0xFF,0xFC,0x00,0x0F,0x22,0x28 /* 7 : 1024xgra. mode */
};
-UCHAR PALYFilter2[]=
+unsigned char PALYFilter2[] =
{
0xFF,0x03,0x02,0xF6,0xFC,0x27,0x46, /* 0 : 320x text mode */
0x01,0x02,0xFE,0xF7,0x03,0x27,0x3C, /* 1 : 360x text mode */
@@ -4162,7 +4135,7 @@ UCHAR PALYFilter2[]=
0xFF,0xFF,0xFC,0x00,0x0F,0x22,0x28 /* 7 : 1024xgra. mode */
};
-UCHAR PALMYFilter2[]=
+unsigned char PALMYFilter2[] =
{
0xFF,0x03,0x02,0xF6,0xFC,0x27,0x46, /* 0 : 320x text mode */
0x01,0x02,0xFE,0xF7,0x03,0x27,0x3C, /* 1 : 360x text mode */
@@ -4174,7 +4147,7 @@ UCHAR PALMYFilter2[]=
0xFF,0xFF,0xFC,0x00,0x0F,0x22,0x28 /* 7 : 1024xgra. mode */
};
-UCHAR PALNYFilter2[]=
+unsigned char PALNYFilter2[] =
{
0xFF,0x03,0x02,0xF6,0xFC,0x27,0x46, /* 0 : 320x text mode */
0x01,0x02,0xFE,0xF7,0x03,0x27,0x3C, /* 1 : 360x text mode */
@@ -4186,14 +4159,14 @@ UCHAR PALNYFilter2[]=
0xFF,0xFF,0xFC,0x00,0x0F,0x22,0x28 /* 7 : 1024xgra. mode */
};
-UCHAR XGI_NTSC1024AdjTime[]=
+unsigned char XGI_NTSC1024AdjTime[] =
{
0xa7,0x07,0xf2,0x6e,0x17,0x8b,0x73,0x53,
0x13,0x40,0x34,0xF4,0x63,0xBB,0xCC,0x7A,
0x58,0xe4,0x73,0xd0,0x13
};
-XGI301C_Tap4TimingStruct HiTVTap4Timing[]=
+struct XGI301C_Tap4TimingStruct HiTVTap4Timing[] =
{
{0,{
0x00,0x20,0x00,0x00,0x7F,0x20,0x02,0x7F, /* ; C0-C7 */
@@ -4208,7 +4181,7 @@ XGI301C_Tap4TimingStruct HiTVTap4Timing[]=
}
};
-XGI301C_Tap4TimingStruct EnlargeTap4Timing[]=
+struct XGI301C_Tap4TimingStruct EnlargeTap4Timing[] =
{
{0,{
0x00,0x20,0x00,0x00,0x7F,0x20,0x02,0x7F, /* ; C0-C7 */
@@ -4223,7 +4196,7 @@ XGI301C_Tap4TimingStruct EnlargeTap4Timing[]=
}
};
-XGI301C_Tap4TimingStruct NoScaleTap4Timing[]=
+struct XGI301C_Tap4TimingStruct NoScaleTap4Timing[] =
{
{0,{
0x00,0x20,0x00,0x00,0x7F,0x20,0x02,0x7F, /* ; C0-C7 */
@@ -4238,7 +4211,7 @@ XGI301C_Tap4TimingStruct NoScaleTap4Timing[]=
}
};
-XGI301C_Tap4TimingStruct PALTap4Timing[]=
+struct XGI301C_Tap4TimingStruct PALTap4Timing[] =
{
{600, {
0x05,0x19,0x05,0x7D,0x03,0x19,0x06,0x7E, /* ; C0-C7 */
@@ -4276,7 +4249,7 @@ XGI301C_Tap4TimingStruct PALTap4Timing[]=
}
};
-XGI301C_Tap4TimingStruct NTSCTap4Timing[]=
+struct XGI301C_Tap4TimingStruct NTSCTap4Timing[] =
{
{480, {
0x04,0x1A,0x04,0x7E,0x03,0x1A,0x06,0x7D, /* ; C0-C7 */
@@ -4314,7 +4287,7 @@ XGI301C_Tap4TimingStruct NTSCTap4Timing[]=
}
};
-XGI301C_Tap4TimingStruct YPbPr525pTap4Timing[]=
+struct XGI301C_Tap4TimingStruct YPbPr525pTap4Timing[] =
{
{480, {
0x04,0x1A,0x04,0x7E,0x03,0x1A,0x06,0x7D, /* ; C0-C7 */
@@ -4352,7 +4325,7 @@ XGI301C_Tap4TimingStruct YPbPr525pTap4Timing[]=
}
};
-XGI301C_Tap4TimingStruct YPbPr525iTap4Timing[]=
+struct XGI301C_Tap4TimingStruct YPbPr525iTap4Timing[] =
{
{480, {
0x04,0x1A,0x04,0x7E,0x03,0x1A,0x06,0x7D, /* ; C0-C7 */
@@ -4390,7 +4363,7 @@ XGI301C_Tap4TimingStruct YPbPr525iTap4Timing[]=
}
};
-XGI301C_Tap4TimingStruct YPbPr750pTap4Timing[]=
+struct XGI301C_Tap4TimingStruct YPbPr750pTap4Timing[] =
{ {0xFFFF,
{
0x05,0x19,0x05,0x7D,0x03,0x19,0x06,0x7E, /* ; C0-C7 */
diff --git a/drivers/staging/xgifb/vb_util.c b/drivers/staging/xgifb/vb_util.c
index 87531b49b73..2c40368ceee 100644
--- a/drivers/staging/xgifb/vb_util.c
+++ b/drivers/staging/xgifb/vb_util.c
@@ -1,54 +1,25 @@
-#include "osdef.h"
#include "vb_def.h"
#include "vgatypes.h"
#include "vb_struct.h"
-#ifdef LINUX_KERNEL
#include "XGIfb.h"
#include <asm/io.h>
#include <linux/types.h>
-#endif
-
-#ifdef TC
-#include <stdio.h>
-#include <string.h>
-#include <conio.h>
-#include <dos.h>
-#endif
-
-#ifdef WIN2000
-#include <dderror.h>
-#include <devioctl.h>
-#include <miniport.h>
-#include <ntddvdeo.h>
-#include <video.h>
-
-#include "xgiv.h"
-#include "dd_i2c.h"
-#include "tools.h"
-#endif
-
-#ifdef LINUX_XF86
-#include "xf86.h"
-#include "xf86PciInfo.h"
-#include "xgi.h"
-#include "xgi_regs.h"
-#endif
-
-
-
-
-void XGINew_SetReg1( ULONG , USHORT , USHORT ) ;
-void XGINew_SetReg2( ULONG , USHORT , USHORT ) ;
-void XGINew_SetReg3( ULONG , USHORT ) ;
-void XGINew_SetReg4( ULONG , ULONG ) ;
-UCHAR XGINew_GetReg1( ULONG , USHORT) ;
-UCHAR XGINew_GetReg2( ULONG ) ;
-ULONG XGINew_GetReg3( ULONG ) ;
-void XGINew_ClearDAC( PUCHAR ) ;
-void XGINew_SetRegANDOR(ULONG Port,USHORT Index,USHORT DataAND,USHORT DataOR);
-void XGINew_SetRegOR(ULONG Port,USHORT Index,USHORT DataOR);
-void XGINew_SetRegAND(ULONG Port,USHORT Index,USHORT DataAND);
+
+void XGINew_SetReg1(unsigned long,unsigned short,unsigned short);
+void XGINew_SetReg2(unsigned long,unsigned short,unsigned short);
+void XGINew_SetReg3(unsigned long,unsigned short);
+void XGINew_SetReg4(unsigned long,unsigned long);
+unsigned char XGINew_GetReg1(unsigned long, unsigned short);
+unsigned char XGINew_GetReg2(unsigned long);
+unsigned long XGINew_GetReg3(unsigned long);
+void XGINew_ClearDAC(unsigned char *);
+void XGINew_SetRegANDOR(unsigned long Port,unsigned short Index,
+ unsigned short DataAND,unsigned short DataOR);
+void XGINew_SetRegOR(unsigned long Port,unsigned short Index,
+ unsigned short DataOR);
+void XGINew_SetRegAND(unsigned long Port,unsigned short Index,
+ unsigned short DataAND);
/* --------------------------------------------------------------------- */
@@ -57,15 +28,10 @@ void XGINew_SetRegAND(ULONG Port,USHORT Index,USHORT DataAND);
/* Output : */
/* Description : SR CRTC GR */
/* --------------------------------------------------------------------- */
-void XGINew_SetReg1( ULONG port , USHORT index , USHORT data )
+void XGINew_SetReg1( unsigned long port , unsigned short index , unsigned short data )
{
-#ifdef LINUX_XF86
- OutPortByte( ( PUCHAR )(ULONG)port , index ) ;
- OutPortByte( ( PUCHAR )(ULONG)port + 1 , data ) ;
-#else
- OutPortByte( port , index ) ;
- OutPortByte( port + 1 , data ) ;
-#endif
+ outb(index, port);
+ outb(data, port + 1);
}
@@ -75,9 +41,9 @@ void XGINew_SetReg1( ULONG port , USHORT index , USHORT data )
/* Output : */
/* Description : AR( 3C0 ) */
/* --------------------------------------------------------------------- */
-/*void XGINew_SetReg2( ULONG port , USHORT index , USHORT data )
+/*void XGINew_SetReg2( unsigned long port , unsigned short index , unsigned short data )
{
- InPortByte( ( PUCHAR )port + 0x3da - 0x3c0 ) ;
+ InPortByte((P unsigned char )port + 0x3da - 0x3c0) ;
OutPortByte( XGINew_P3c0 , index ) ;
OutPortByte( XGINew_P3c0 , data ) ;
OutPortByte( XGINew_P3c0 , 0x20 ) ;
@@ -90,9 +56,9 @@ void XGINew_SetReg1( ULONG port , USHORT index , USHORT data )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SetReg3( ULONG port , USHORT data )
+void XGINew_SetReg3( unsigned long port , unsigned short data )
{
- OutPortByte( port , data ) ;
+ outb(data, port);
}
@@ -102,9 +68,9 @@ void XGINew_SetReg3( ULONG port , USHORT data )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SetReg4( ULONG port , ULONG data )
+void XGINew_SetReg4( unsigned long port , unsigned long data )
{
- OutPortLong( port , data ) ;
+ outl(data, port);
}
@@ -114,18 +80,12 @@ void XGINew_SetReg4( ULONG port , ULONG data )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-UCHAR XGINew_GetReg1( ULONG port , USHORT index )
+unsigned char XGINew_GetReg1(unsigned long port, unsigned short index)
{
- UCHAR data ;
-
-#ifdef LINUX_XF86
- OutPortByte( ( PUCHAR )(ULONG)port , index ) ;
- data = InPortByte( ( PUCHAR )(ULONG)port + 1 ) ;
-#else
- OutPortByte( port , index ) ;
- data = InPortByte( port + 1 ) ;
-#endif
+ unsigned char data ;
+ outb(index, port);
+ data = inb(port + 1) ;
return( data ) ;
}
@@ -136,11 +96,11 @@ UCHAR XGINew_GetReg1( ULONG port , USHORT index )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-UCHAR XGINew_GetReg2( ULONG port )
+unsigned char XGINew_GetReg2(unsigned long port)
{
- UCHAR data ;
+ unsigned char data ;
- data = InPortByte( port ) ;
+ data = inb(port) ;
return( data ) ;
}
@@ -152,11 +112,11 @@ UCHAR XGINew_GetReg2( ULONG port )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-ULONG XGINew_GetReg3( ULONG port )
+unsigned long XGINew_GetReg3( unsigned long port )
{
- ULONG data ;
+ unsigned long data ;
- data = InPortLong( port ) ;
+ data = inl(port) ;
return( data ) ;
}
@@ -169,9 +129,9 @@ ULONG XGINew_GetReg3( ULONG port )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SetRegANDOR( ULONG Port , USHORT Index , USHORT DataAND , USHORT DataOR )
+void XGINew_SetRegANDOR( unsigned long Port , unsigned short Index , unsigned short DataAND , unsigned short DataOR )
{
- USHORT temp ;
+ unsigned short temp ;
temp = XGINew_GetReg1( Port , Index ) ; /* XGINew_Part1Port index 02 */
temp = ( temp & ( DataAND ) ) | DataOR ;
@@ -185,9 +145,9 @@ void XGINew_SetRegANDOR( ULONG Port , USHORT Index , USHORT DataAND , USHORT Dat
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SetRegAND(ULONG Port,USHORT Index,USHORT DataAND)
+void XGINew_SetRegAND(unsigned long Port,unsigned short Index,unsigned short DataAND)
{
- USHORT temp ;
+ unsigned short temp ;
temp = XGINew_GetReg1( Port , Index ) ; /* XGINew_Part1Port index 02 */
temp &= DataAND ;
@@ -201,9 +161,9 @@ void XGINew_SetRegAND(ULONG Port,USHORT Index,USHORT DataAND)
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void XGINew_SetRegOR( ULONG Port , USHORT Index , USHORT DataOR )
+void XGINew_SetRegOR( unsigned long Port , unsigned short Index , unsigned short DataOR )
{
- USHORT temp ;
+ unsigned short temp ;
temp = XGINew_GetReg1( Port , Index ) ; /* XGINew_Part1Port index 02 */
temp |= DataOR ;
@@ -219,29 +179,14 @@ void XGINew_SetRegOR( ULONG Port , USHORT Index , USHORT DataOR )
/* --------------------------------------------------------------------- */
void NewDelaySeconds( int seconds )
{
-#ifdef WIN2000
- int j ;
-#endif
int i ;
for( i = 0 ; i < seconds ; i++ )
{
-#ifdef TC
- delay( 1000 ) ;
-#endif
-
-#ifdef WIN2000
- for ( j = 0 ; j < 20000 ; j++ )
- VideoPortStallExecution( 50 ) ;
-#endif
-#ifdef WINCE_HEADER
-#endif
-#ifdef LINUX_KERNEL
-#endif
}
}
@@ -252,7 +197,7 @@ void NewDelaySeconds( int seconds )
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
-void Newdebugcode( UCHAR code )
+void Newdebugcode(unsigned char code)
{
// OutPortByte ( 0x80 , code ) ;
/* OutPortByte ( 0x300 , code ) ; */
diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h
index 91779d8cfdc..156f6445c88 100644
--- a/drivers/staging/xgifb/vb_util.h
+++ b/drivers/staging/xgifb/vb_util.h
@@ -1,15 +1,15 @@
#ifndef _VBUTIL_
#define _VBUTIL_
extern void NewDelaySeconds( int );
-extern void Newdebugcode( UCHAR );
-extern void XGINew_SetReg1(ULONG, USHORT, USHORT);
-extern void XGINew_SetReg3(ULONG, USHORT);
-extern UCHAR XGINew_GetReg1(ULONG, USHORT);
-extern UCHAR XGINew_GetReg2(ULONG);
-extern void XGINew_SetReg4(ULONG, ULONG);
-extern ULONG XGINew_GetReg3(ULONG);
-extern void XGINew_SetRegOR(ULONG Port,USHORT Index,USHORT DataOR);
-extern void XGINew_SetRegAND(ULONG Port,USHORT Index,USHORT DataAND);
-extern void XGINew_SetRegANDOR(ULONG Port,USHORT Index,USHORT DataAND,USHORT DataOR);
+extern void Newdebugcode(unsigned char);
+extern void XGINew_SetReg1(unsigned long, unsigned short, unsigned short);
+extern void XGINew_SetReg3(unsigned long, unsigned short);
+extern unsigned char XGINew_GetReg1(unsigned long, unsigned short);
+extern unsigned char XGINew_GetReg2(unsigned long);
+extern void XGINew_SetReg4(unsigned long, unsigned long);
+extern unsigned long XGINew_GetReg3(unsigned long);
+extern void XGINew_SetRegOR(unsigned long Port,unsigned short Index,unsigned short DataOR);
+extern void XGINew_SetRegAND(unsigned long Port,unsigned short Index,unsigned short DataAND);
+extern void XGINew_SetRegANDOR(unsigned long Port,unsigned short Index,unsigned short DataAND,unsigned short DataOR);
#endif
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
index 295ea860ae4..df839eeb5ef 100644
--- a/drivers/staging/xgifb/vgatypes.h
+++ b/drivers/staging/xgifb/vgatypes.h
@@ -2,136 +2,14 @@
#ifndef _VGATYPES_
#define _VGATYPES_
-#include "osdef.h"
-
-#ifdef LINUX_XF86
-#include "xf86Version.h"
-#include "xf86Pci.h"
-#endif
-
-#ifdef LINUX_KERNEL /* We don't want the X driver to depend on kernel source */
#include <linux/ioctl.h>
-#endif
-
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-#ifndef TRUE
-#define TRUE 1
-#endif
-
-#ifndef NULL
-#define NULL 0
-#endif
-
-#ifndef CHAR
-typedef char CHAR;
-#endif
-
-#ifndef SHORT
-typedef short SHORT;
-#endif
-
-#ifndef LONG
-typedef long LONG;
-#endif
-
-#ifndef UCHAR
-typedef unsigned char UCHAR;
-#endif
-
-#ifndef USHORT
-typedef unsigned short USHORT;
-#endif
-
-#ifndef ULONG
-typedef unsigned long ULONG;
-#endif
-
-#ifndef PUCHAR
-typedef UCHAR *PUCHAR;
-#endif
-
-#ifndef PUSHORT
-typedef USHORT *PUSHORT;
-#endif
-
-#ifndef PLONGU
-typedef ULONG *PULONG;
-#endif
-
-#ifndef VOID
-typedef void VOID;
-#endif
-
-#ifndef PVOID
-typedef void *PVOID;
-#endif
-
-#ifndef BOOLEAN
-typedef UCHAR BOOLEAN;
-#endif
-/*
-#ifndef bool
-typedef UCHAR bool;
-#endif
-*/
-#ifdef LINUX_KERNEL
-typedef unsigned long XGIIOADDRESS;
-#endif
-
-#ifdef LINUX_XF86
-#if XF86_VERSION_CURRENT < XF86_VERSION_NUMERIC(4,2,0,0,0)
-typedef unsigned char IOADDRESS;
-typedef unsigned char XGIIOADDRESS;
-#else
-typedef IOADDRESS XGIIOADDRESS;
-#endif
-#endif
#ifndef VBIOS_VER_MAX_LENGTH
-#define VBIOS_VER_MAX_LENGTH 4
-#endif
-
-#ifndef WIN2000
-
-#ifndef LINUX_KERNEL /* For the linux kernel, this is defined in xgifb.h */
-#ifndef XGI_CHIP_TYPE
-typedef enum _XGI_CHIP_TYPE {
- XGI_VGALegacy = 0,
-#ifdef LINUX_XF86
- XGI_530,
- XGI_OLD,
-#endif
- XGI_300,
- XGI_630,
- XGI_640,
- XGI_315H,
- XGI_315,
- XGI_315PRO,
- XGI_550,
- XGI_650,
- XGI_650M,
- XGI_740,
- XGI_330,
- XGI_661,
- XGI_660,
- XGI_760,
- XG40 = 32,
- XG41,
- XG42,
- XG45,
- XG20 = 48,
- XG21,
- XG27,
- MAX_XGI_CHIP
-} XGI_CHIP_TYPE;
-#endif
+#define VBIOS_VER_MAX_LENGTH 5
#endif
#ifndef XGI_VB_CHIP_TYPE
-typedef enum _XGI_VB_CHIP_TYPE {
+enum XGI_VB_CHIP_TYPE {
VB_CHIP_Legacy = 0,
VB_CHIP_301,
VB_CHIP_301B,
@@ -143,11 +21,11 @@ typedef enum _XGI_VB_CHIP_TYPE {
VB_CHIP_302ELV,
VB_CHIP_UNKNOWN, /* other video bridge or no video bridge */
MAX_VB_CHIP
-} XGI_VB_CHIP_TYPE;
+};
#endif
#ifndef XGI_LCD_TYPE
-typedef enum _XGI_LCD_TYPE {
+enum XGI_LCD_TYPE {
LCD_INVALID = 0,
LCD_320x480, /* FSTN, DSTN */
LCD_640x480,
@@ -171,155 +49,90 @@ typedef enum _XGI_LCD_TYPE {
LCD_2048x1536,
LCD_CUSTOM,
LCD_UNKNOWN
-} XGI_LCD_TYPE;
+};
#endif
-#endif /* not WIN2000 */
-
-#ifndef PXGI_DSReg
-typedef struct _XGI_DSReg
+struct XGI_DSReg
{
- UCHAR jIdx;
- UCHAR jVal;
-} XGI_DSReg, *PXGI_DSReg;
-#endif
-
-#ifndef XGI_HW_DEVICE_INFO
-
-typedef struct _XGI_HW_DEVICE_INFO XGI_HW_DEVICE_INFO, *PXGI_HW_DEVICE_INFO;
-
-typedef BOOLEAN (*PXGI_QUERYSPACE) (PXGI_HW_DEVICE_INFO, ULONG, ULONG, ULONG *);
+ unsigned char jIdx;
+ unsigned char jVal;
+};
-struct _XGI_HW_DEVICE_INFO
+struct xgi_hw_device_info
{
- ULONG ulExternalChip; /* NO VB or other video bridge*/
+ unsigned long ulExternalChip; /* NO VB or other video bridge*/
/* if ujVBChipID = VB_CHIP_UNKNOWN, */
-#ifdef LINUX_XF86
- PCITAG PciTag; /* PCI Tag */
-#endif
- PUCHAR pjVirtualRomBase; /* ROM image */
+ unsigned char *pjVirtualRomBase; /* ROM image */
- BOOLEAN UseROM; /* Use the ROM image if provided */
+ unsigned char UseROM; /* Use the ROM image if provided */
- PVOID pDevice;
+ void *pDevice;
- PUCHAR pjVideoMemoryAddress;/* base virtual memory address */
+ unsigned char *pjVideoMemoryAddress;/* base virtual memory address */
/* of Linear VGA memory */
- ULONG ulVideoMemorySize; /* size, in bytes, of the memory on the board */
+ unsigned long ulVideoMemorySize; /* size, in bytes, of the memory on the board */
- PUCHAR pjIOAddress; /* base I/O address of VGA ports (0x3B0) */
+ unsigned char *pjIOAddress; /* base I/O address of VGA ports (0x3B0) */
- PUCHAR pjCustomizedROMImage;
+ unsigned char *pjCustomizedROMImage;
- PUCHAR pj2ndVideoMemoryAddress;
- ULONG ul2ndVideoMemorySize;
+ unsigned char *pj2ndVideoMemoryAddress;
+ unsigned long ul2ndVideoMemorySize;
- PUCHAR pj2ndIOAddress;
-/*#ifndef WIN2000
- XGIIOADDRESS pjIOAddress; // base I/O address of VGA ports (0x3B0)
-#endif */
- UCHAR jChipType; /* Used to Identify Graphics Chip */
+ unsigned char *pj2ndIOAddress;
+ unsigned char jChipType; /* Used to Identify Graphics Chip */
/* defined in the data structure type */
/* "XGI_CHIP_TYPE" */
- UCHAR jChipRevision; /* Used to Identify Graphics Chip Revision */
+ unsigned char jChipRevision; /* Used to Identify Graphics Chip Revision */
- UCHAR ujVBChipID; /* the ID of video bridge */
+ unsigned char ujVBChipID; /* the ID of video bridge */
/* defined in the data structure type */
/* "XGI_VB_CHIP_TYPE" */
- BOOLEAN bNewScratch;
+ unsigned char bNewScratch;
- ULONG ulCRT2LCDType; /* defined in the data structure type */
+ unsigned long ulCRT2LCDType; /* defined in the data structure type */
- ULONG usExternalChip; /* NO VB or other video bridge (other than */
+ unsigned long usExternalChip; /* NO VB or other video bridge (other than */
/* video bridge) */
- BOOLEAN bIntegratedMMEnabled;/* supporting integration MM enable */
+ unsigned char bIntegratedMMEnabled;/* supporting integration MM enable */
- BOOLEAN bSkipDramSizing; /* True: Skip video memory sizing. */
+ unsigned char bSkipDramSizing; /* True: Skip video memory sizing. */
- BOOLEAN bSkipSense;
+ unsigned char bSkipSense;
- BOOLEAN bIsPowerSaving; /* True: XGIInit() is invoked by power management,
+ unsigned char bIsPowerSaving; /* True: XGIInit() is invoked by power management,
otherwise by 2nd adapter's initialzation */
- PXGI_DSReg pSR; /* restore SR registers in initial function. */
+ struct XGI_DSReg *pSR; /* restore SR registers in initial function. */
/* end data :(idx, val) = (FF, FF). */
/* Note : restore SR registers if */
- /* bSkipDramSizing = TRUE */
+ /* bSkipDramSizing = 1 */
- PXGI_DSReg pCR; /* restore CR registers in initial function. */
+ struct XGI_DSReg *pCR; /* restore CR registers in initial function. */
/* end data :(idx, val) = (FF, FF) */
/* Note : restore cR registers if */
- /* bSkipDramSizing = TRUE */
-/*
-#endif
-*/
+ /* bSkipDramSizing = 1 */
- PXGI_QUERYSPACE pQueryVGAConfigSpace;
+ unsigned char(*pQueryVGAConfigSpace)(struct xgi_hw_device_info *,
+ unsigned long, unsigned long,
+ unsigned long *);
- PXGI_QUERYSPACE pQueryNorthBridgeSpace;
+ unsigned char(*pQueryNorthBridgeSpace)(struct xgi_hw_device_info *,
+ unsigned long, unsigned long,
+ unsigned long *);
- UCHAR szVBIOSVer[VBIOS_VER_MAX_LENGTH];
+ unsigned char szVBIOSVer[VBIOS_VER_MAX_LENGTH];
};
-#endif
/* Addtional IOCTL for communication xgifb <> X driver */
/* If changing this, xgifb.h must also be changed (for xgifb) */
-#ifdef LINUX_XF86 /* We don't want the X driver to depend on the kernel source */
-
-/* ioctl for identifying and giving some info (esp. memory heap start) */
-#define XGIFB_GET_INFO 0x80046ef8 /* Wow, what a terrible hack... */
-
-/* Structure argument for XGIFB_GET_INFO ioctl */
-typedef struct _XGIFB_INFO xgifb_info, *pxgifb_info;
-
-struct _XGIFB_INFO {
- CARD32 xgifb_id; /* for identifying xgifb */
-#ifndef XGIFB_ID
-#define XGIFB_ID 0x53495346 /* Identify myself with 'XGIF' */
-#endif
- CARD32 chip_id; /* PCI ID of detected chip */
- CARD32 memory; /* video memory in KB which xgifb manages */
- CARD32 heapstart; /* heap start (= xgifb "mem" argument) in KB */
- CARD8 fbvidmode; /* current xgifb mode */
-
- CARD8 xgifb_version;
- CARD8 xgifb_revision;
- CARD8 xgifb_patchlevel;
-
- CARD8 xgifb_caps; /* xgifb's capabilities */
-
- CARD32 xgifb_tqlen; /* turbo queue length (in KB) */
-
- CARD32 xgifb_pcibus; /* The card's PCI ID */
- CARD32 xgifb_pcislot;
- CARD32 xgifb_pcifunc;
-
- CARD8 xgifb_lcdpdc;
-
- CARD8 xgifb_lcda;
-
- CARD32 xgifb_vbflags;
- CARD32 xgifb_currentvbflags;
-
- CARD32 xgifb_scalelcd;
- CARD32 xgifb_specialtiming;
-
- CARD8 xgifb_haveemi;
- CARD8 xgifb_emi30,xgifb_emi31,xgifb_emi32,xgifb_emi33;
- CARD8 xgifb_haveemilcd;
-
- CARD8 xgifb_lcdpdca;
-
- CARD8 reserved[212]; /* for future use */
-};
-#endif
#endif
diff --git a/drivers/staging/zram/Kconfig b/drivers/staging/zram/Kconfig
new file mode 100644
index 00000000000..4654ae2eb42
--- /dev/null
+++ b/drivers/staging/zram/Kconfig
@@ -0,0 +1,29 @@
+config ZRAM
+ tristate "Compressed RAM block device support"
+ depends on BLOCK
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
+ default n
+ help
+ Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
+ Pages written to these disks are compressed and stored in memory
+ itself. These disks allow very fast I/O and compression provides
+ good amounts of memory savings.
+
+ It has several use cases, for example: /tmp storage, use as swap
+ disks and maybe many more.
+
+ See zram.txt for more information.
+ Project home: http://compcache.googlecode.com/
+
+config ZRAM_STATS
+ bool "Enable statistics for compressed RAM disks"
+ depends on ZRAM
+ default y
+ help
+ Enable statistics collection for compressed RAM devices. Statistics
+ are exported through ioctl interface, so you have to use zramconfig
+ program to get them. This adds only a minimal overhead.
+
+ If unsure, say Y.
+
diff --git a/drivers/staging/zram/Makefile b/drivers/staging/zram/Makefile
new file mode 100644
index 00000000000..b2c087aa105
--- /dev/null
+++ b/drivers/staging/zram/Makefile
@@ -0,0 +1,3 @@
+zram-objs := zram_drv.o xvmalloc.o
+
+obj-$(CONFIG_ZRAM) += zram.o
diff --git a/drivers/staging/ramzswap/xvmalloc.c b/drivers/staging/zram/xvmalloc.c
index 3fdbb8ada82..3fdbb8ada82 100644
--- a/drivers/staging/ramzswap/xvmalloc.c
+++ b/drivers/staging/zram/xvmalloc.c
diff --git a/drivers/staging/ramzswap/xvmalloc.h b/drivers/staging/zram/xvmalloc.h
index 5b1a81aa5fa..5b1a81aa5fa 100644
--- a/drivers/staging/ramzswap/xvmalloc.h
+++ b/drivers/staging/zram/xvmalloc.h
diff --git a/drivers/staging/ramzswap/xvmalloc_int.h b/drivers/staging/zram/xvmalloc_int.h
index e23ed5c8b8e..e23ed5c8b8e 100644
--- a/drivers/staging/ramzswap/xvmalloc_int.h
+++ b/drivers/staging/zram/xvmalloc_int.h
diff --git a/drivers/staging/zram/zram.txt b/drivers/staging/zram/zram.txt
new file mode 100644
index 00000000000..520edc1bea7
--- /dev/null
+++ b/drivers/staging/zram/zram.txt
@@ -0,0 +1,62 @@
+zram: Compressed RAM based block devices
+----------------------------------------
+
+Project home: http://compcache.googlecode.com/
+
+* Introduction
+
+The zram module creates RAM based block devices: /dev/ramX (X = 0, 1, ...).
+Pages written to these disks are compressed and stored in memory itself.
+These disks allow very fast I/O and compression provides good amounts of
+memory savings.
+
+See project home for use cases, performance numbers and a lot more.
+
+Individual zram devices are configured and initialized using zramconfig
+userspace utility as shown in examples below. See zramconfig man page for
+more details.
+
+* Usage
+
+Following shows a typical sequence of steps for using zram.
+
+1) Load Modules:
+ modprobe zram num_devices=4
+ This creates 4 (uninitialized) devices: /dev/zram{0,1,2,3}
+ (num_devices parameter is optional. Default: 1)
+
+2) Initialize:
+ Use zramconfig utility to configure and initialize individual
+ zram devices. For example:
+ zramconfig /dev/zram0 --init # uses default value of disksize_kb
+ zramconfig /dev/zram1 --disksize_kb=102400 # 100MB /dev/zram1
+
+ *See zramconfig man page for more details and examples*
+
+3) Activate:
+ mkswap /dev/zram0
+ swapon /dev/zram0
+
+ mkfs.ext4 /dev/zram1
+ mount /dev/zram1 /tmp
+
+4) Stats:
+ zramconfig /dev/zram0 --stats
+ zramconfig /dev/zram1 --stats
+
+5) Deactivate:
+ swapoff /dev/zram0
+ umount /dev/zram1
+
+6) Reset:
+ zramconfig /dev/zram0 --reset
+ zramconfig /dev/zram1 --reset
+ (This frees memory allocated for the given device).
+
+
+Please report any problems at:
+ - Mailing list: linux-mm-cc at laptop dot org
+ - Issue tracker: http://code.google.com/p/compcache/issues/list
+
+Nitin Gupta
+ngupta@vflare.org
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
new file mode 100644
index 00000000000..722c840ac63
--- /dev/null
+++ b/drivers/staging/zram/zram_drv.c
@@ -0,0 +1,806 @@
+/*
+ * Compressed RAM block device
+ *
+ * Copyright (C) 2008, 2009, 2010 Nitin Gupta
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the licence that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ *
+ * Project home: http://compcache.googlecode.com
+ */
+
+#define KMSG_COMPONENT "zram"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+#include <linux/device.h>
+#include <linux/genhd.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/lzo.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#include "zram_drv.h"
+
+/* Globals */
+static int zram_major;
+static struct zram *devices;
+
+/* Module params (documentation at end) */
+static unsigned int num_devices;
+
+static int zram_test_flag(struct zram *zram, u32 index,
+ enum zram_pageflags flag)
+{
+ return zram->table[index].flags & BIT(flag);
+}
+
+static void zram_set_flag(struct zram *zram, u32 index,
+ enum zram_pageflags flag)
+{
+ zram->table[index].flags |= BIT(flag);
+}
+
+static void zram_clear_flag(struct zram *zram, u32 index,
+ enum zram_pageflags flag)
+{
+ zram->table[index].flags &= ~BIT(flag);
+}
+
+static int page_zero_filled(void *ptr)
+{
+ unsigned int pos;
+ unsigned long *page;
+
+ page = (unsigned long *)ptr;
+
+ for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
+ if (page[pos])
+ return 0;
+ }
+
+ return 1;
+}
+
+static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
+{
+ if (!zram->disksize) {
+ pr_info(
+ "disk size not provided. You can use disksize_kb module "
+ "param to specify size.\nUsing default: (%u%% of RAM).\n",
+ default_disksize_perc_ram
+ );
+ zram->disksize = default_disksize_perc_ram *
+ (totalram_bytes / 100);
+ }
+
+ if (zram->disksize > 2 * (totalram_bytes)) {
+ pr_info(
+ "There is little point creating a zram of greater than "
+ "twice the size of memory since we expect a 2:1 compression "
+ "ratio. Note that zram uses about 0.1%% of the size of "
+ "the disk when not in use so a huge zram is "
+ "wasteful.\n"
+ "\tMemory Size: %zu kB\n"
+ "\tSize you selected: %zu kB\n"
+ "Continuing anyway ...\n",
+ totalram_bytes >> 10, zram->disksize
+ );
+ }
+
+ zram->disksize &= PAGE_MASK;
+}
+
+static void zram_ioctl_get_stats(struct zram *zram,
+ struct zram_ioctl_stats *s)
+{
+ s->disksize = zram->disksize;
+
+#if defined(CONFIG_ZRAM_STATS)
+ {
+ struct zram_stats *rs = &zram->stats;
+ size_t succ_writes, mem_used;
+ unsigned int good_compress_perc = 0, no_compress_perc = 0;
+
+ mem_used = xv_get_total_size_bytes(zram->mem_pool)
+ + (rs->pages_expand << PAGE_SHIFT);
+ succ_writes = zram_stat64_read(zram, &rs->num_writes) -
+ zram_stat64_read(zram, &rs->failed_writes);
+
+ if (succ_writes && rs->pages_stored) {
+ good_compress_perc = rs->good_compress * 100
+ / rs->pages_stored;
+ no_compress_perc = rs->pages_expand * 100
+ / rs->pages_stored;
+ }
+
+ s->num_reads = zram_stat64_read(zram, &rs->num_reads);
+ s->num_writes = zram_stat64_read(zram, &rs->num_writes);
+ s->failed_reads = zram_stat64_read(zram, &rs->failed_reads);
+ s->failed_writes = zram_stat64_read(zram, &rs->failed_writes);
+ s->invalid_io = zram_stat64_read(zram, &rs->invalid_io);
+ s->notify_free = zram_stat64_read(zram, &rs->notify_free);
+ s->pages_zero = rs->pages_zero;
+
+ s->good_compress_pct = good_compress_perc;
+ s->pages_expand_pct = no_compress_perc;
+
+ s->pages_stored = rs->pages_stored;
+ s->pages_used = mem_used >> PAGE_SHIFT;
+ s->orig_data_size = rs->pages_stored << PAGE_SHIFT;
+ s->compr_data_size = rs->compr_size;
+ s->mem_used_total = mem_used;
+ }
+#endif /* CONFIG_ZRAM_STATS */
+}
+
+static void zram_free_page(struct zram *zram, size_t index)
+{
+ u32 clen;
+ void *obj;
+
+ struct page *page = zram->table[index].page;
+ u32 offset = zram->table[index].offset;
+
+ if (unlikely(!page)) {
+ /*
+ * No memory is allocated for zero filled pages.
+ * Simply clear zero page flag.
+ */
+ if (zram_test_flag(zram, index, ZRAM_ZERO)) {
+ zram_clear_flag(zram, index, ZRAM_ZERO);
+ zram_stat_dec(&zram->stats.pages_zero);
+ }
+ return;
+ }
+
+ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
+ clen = PAGE_SIZE;
+ __free_page(page);
+ zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
+ zram_stat_dec(&zram->stats.pages_expand);
+ goto out;
+ }
+
+ obj = kmap_atomic(page, KM_USER0) + offset;
+ clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
+ kunmap_atomic(obj, KM_USER0);
+
+ xv_free(zram->mem_pool, page, offset);
+ if (clen <= PAGE_SIZE / 2)
+ zram_stat_dec(&zram->stats.good_compress);
+
+out:
+ zram->stats.compr_size -= clen;
+ zram_stat_dec(&zram->stats.pages_stored);
+
+ zram->table[index].page = NULL;
+ zram->table[index].offset = 0;
+}
+
+static void handle_zero_page(struct page *page)
+{
+ void *user_mem;
+
+ user_mem = kmap_atomic(page, KM_USER0);
+ memset(user_mem, 0, PAGE_SIZE);
+ kunmap_atomic(user_mem, KM_USER0);
+
+ flush_dcache_page(page);
+}
+
+static void handle_uncompressed_page(struct zram *zram,
+ struct page *page, u32 index)
+{
+ unsigned char *user_mem, *cmem;
+
+ user_mem = kmap_atomic(page, KM_USER0);
+ cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
+ zram->table[index].offset;
+
+ memcpy(user_mem, cmem, PAGE_SIZE);
+ kunmap_atomic(user_mem, KM_USER0);
+ kunmap_atomic(cmem, KM_USER1);
+
+ flush_dcache_page(page);
+}
+
+static int zram_read(struct zram *zram, struct bio *bio)
+{
+
+ int i;
+ u32 index;
+ struct bio_vec *bvec;
+
+ zram_stat64_inc(zram, &zram->stats.num_reads);
+
+ index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
+ bio_for_each_segment(bvec, bio, i) {
+ int ret;
+ size_t clen;
+ struct page *page;
+ struct zobj_header *zheader;
+ unsigned char *user_mem, *cmem;
+
+ page = bvec->bv_page;
+
+ if (zram_test_flag(zram, index, ZRAM_ZERO)) {
+ handle_zero_page(page);
+ continue;
+ }
+
+ /* Requested page is not present in compressed area */
+ if (unlikely(!zram->table[index].page)) {
+ pr_debug("Read before write: sector=%lu, size=%u",
+ (ulong)(bio->bi_sector), bio->bi_size);
+ /* Do nothing */
+ continue;
+ }
+
+ /* Page is stored uncompressed since it's incompressible */
+ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
+ handle_uncompressed_page(zram, page, index);
+ continue;
+ }
+
+ user_mem = kmap_atomic(page, KM_USER0);
+ clen = PAGE_SIZE;
+
+ cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
+ zram->table[index].offset;
+
+ ret = lzo1x_decompress_safe(
+ cmem + sizeof(*zheader),
+ xv_get_object_size(cmem) - sizeof(*zheader),
+ user_mem, &clen);
+
+ kunmap_atomic(user_mem, KM_USER0);
+ kunmap_atomic(cmem, KM_USER1);
+
+ /* Should NEVER happen. Return bio error if it does. */
+ if (unlikely(ret != LZO_E_OK)) {
+ pr_err("Decompression failed! err=%d, page=%u\n",
+ ret, index);
+ zram_stat64_inc(zram, &zram->stats.failed_reads);
+ goto out;
+ }
+
+ flush_dcache_page(page);
+ index++;
+ }
+
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio_endio(bio, 0);
+ return 0;
+
+out:
+ bio_io_error(bio);
+ return 0;
+}
+
+static int zram_write(struct zram *zram, struct bio *bio)
+{
+ int i;
+ u32 index;
+ struct bio_vec *bvec;
+
+ zram_stat64_inc(zram, &zram->stats.num_writes);
+
+ index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
+
+ bio_for_each_segment(bvec, bio, i) {
+ int ret;
+ u32 offset;
+ size_t clen;
+ struct zobj_header *zheader;
+ struct page *page, *page_store;
+ unsigned char *user_mem, *cmem, *src;
+
+ page = bvec->bv_page;
+ src = zram->compress_buffer;
+
+ /*
+ * System overwrites unused sectors. Free memory associated
+ * with this sector now.
+ */
+ if (zram->table[index].page ||
+ zram_test_flag(zram, index, ZRAM_ZERO))
+ zram_free_page(zram, index);
+
+ mutex_lock(&zram->lock);
+
+ user_mem = kmap_atomic(page, KM_USER0);
+ if (page_zero_filled(user_mem)) {
+ kunmap_atomic(user_mem, KM_USER0);
+ mutex_unlock(&zram->lock);
+ zram_stat_inc(&zram->stats.pages_zero);
+ zram_set_flag(zram, index, ZRAM_ZERO);
+ continue;
+ }
+
+ ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
+ zram->compress_workmem);
+
+ kunmap_atomic(user_mem, KM_USER0);
+
+ if (unlikely(ret != LZO_E_OK)) {
+ mutex_unlock(&zram->lock);
+ pr_err("Compression failed! err=%d\n", ret);
+ zram_stat64_inc(zram, &zram->stats.failed_writes);
+ goto out;
+ }
+
+ /*
+ * Page is incompressible. Store it as-is (uncompressed)
+ * since we do not want to return too many disk write
+ * errors which has side effect of hanging the system.
+ */
+ if (unlikely(clen > max_zpage_size)) {
+ clen = PAGE_SIZE;
+ page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
+ if (unlikely(!page_store)) {
+ mutex_unlock(&zram->lock);
+ pr_info("Error allocating memory for "
+ "incompressible page: %u\n", index);
+ zram_stat64_inc(zram,
+ &zram->stats.failed_writes);
+ goto out;
+ }
+
+ offset = 0;
+ zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
+ zram_stat_inc(&zram->stats.pages_expand);
+ zram->table[index].page = page_store;
+ src = kmap_atomic(page, KM_USER0);
+ goto memstore;
+ }
+
+ if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
+ &zram->table[index].page, &offset,
+ GFP_NOIO | __GFP_HIGHMEM)) {
+ mutex_unlock(&zram->lock);
+ pr_info("Error allocating memory for compressed "
+ "page: %u, size=%zu\n", index, clen);
+ zram_stat64_inc(zram, &zram->stats.failed_writes);
+ goto out;
+ }
+
+memstore:
+ zram->table[index].offset = offset;
+
+ cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
+ zram->table[index].offset;
+
+#if 0
+ /* Back-reference needed for memory defragmentation */
+ if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
+ zheader = (struct zobj_header *)cmem;
+ zheader->table_idx = index;
+ cmem += sizeof(*zheader);
+ }
+#endif
+
+ memcpy(cmem, src, clen);
+
+ kunmap_atomic(cmem, KM_USER1);
+ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
+ kunmap_atomic(src, KM_USER0);
+
+ /* Update stats */
+ zram->stats.compr_size += clen;
+ zram_stat_inc(&zram->stats.pages_stored);
+ if (clen <= PAGE_SIZE / 2)
+ zram_stat_inc(&zram->stats.good_compress);
+
+ mutex_unlock(&zram->lock);
+ index++;
+ }
+
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio_endio(bio, 0);
+ return 0;
+
+out:
+ bio_io_error(bio);
+ return 0;
+}
+
+/*
+ * Check if request is within bounds and page aligned.
+ */
+static inline int valid_io_request(struct zram *zram, struct bio *bio)
+{
+ if (unlikely(
+ (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
+ (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
+ (bio->bi_size & (PAGE_SIZE - 1)))) {
+
+ return 0;
+ }
+
+ /* I/O request is valid */
+ return 1;
+}
+
+/*
+ * Handler function for all zram I/O requests.
+ */
+static int zram_make_request(struct request_queue *queue, struct bio *bio)
+{
+ int ret = 0;
+ struct zram *zram = queue->queuedata;
+
+ if (unlikely(!zram->init_done)) {
+ bio_io_error(bio);
+ return 0;
+ }
+
+ if (!valid_io_request(zram, bio)) {
+ zram_stat64_inc(zram, &zram->stats.invalid_io);
+ bio_io_error(bio);
+ return 0;
+ }
+
+ switch (bio_data_dir(bio)) {
+ case READ:
+ ret = zram_read(zram, bio);
+ break;
+
+ case WRITE:
+ ret = zram_write(zram, bio);
+ break;
+ }
+
+ return ret;
+}
+
+static void reset_device(struct zram *zram)
+{
+ size_t index;
+
+ /* Do not accept any new I/O request */
+ zram->init_done = 0;
+
+ /* Free various per-device buffers */
+ kfree(zram->compress_workmem);
+ free_pages((unsigned long)zram->compress_buffer, 1);
+
+ zram->compress_workmem = NULL;
+ zram->compress_buffer = NULL;
+
+ /* Free all pages that are still in this zram device */
+ for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
+ struct page *page;
+ u16 offset;
+
+ page = zram->table[index].page;
+ offset = zram->table[index].offset;
+
+ if (!page)
+ continue;
+
+ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
+ __free_page(page);
+ else
+ xv_free(zram->mem_pool, page, offset);
+ }
+
+ vfree(zram->table);
+ zram->table = NULL;
+
+ xv_destroy_pool(zram->mem_pool);
+ zram->mem_pool = NULL;
+
+ /* Reset stats */
+ memset(&zram->stats, 0, sizeof(zram->stats));
+
+ zram->disksize = 0;
+}
+
+static int zram_ioctl_init_device(struct zram *zram)
+{
+ int ret;
+ size_t num_pages;
+
+ if (zram->init_done) {
+ pr_info("Device already initialized!\n");
+ return -EBUSY;
+ }
+
+ zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
+
+ zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!zram->compress_workmem) {
+ pr_err("Error allocating compressor working memory!\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
+ if (!zram->compress_buffer) {
+ pr_err("Error allocating compressor buffer space\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ num_pages = zram->disksize >> PAGE_SHIFT;
+ zram->table = vmalloc(num_pages * sizeof(*zram->table));
+ if (!zram->table) {
+ pr_err("Error allocating zram address table\n");
+ /* To prevent accessing table entries during cleanup */
+ zram->disksize = 0;
+ ret = -ENOMEM;
+ goto fail;
+ }
+ memset(zram->table, 0, num_pages * sizeof(*zram->table));
+
+ set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+
+ /* zram devices sort of resembles non-rotational disks */
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
+
+ zram->mem_pool = xv_create_pool();
+ if (!zram->mem_pool) {
+ pr_err("Error creating memory pool\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ zram->init_done = 1;
+
+ pr_debug("Initialization done!\n");
+ return 0;
+
+fail:
+ reset_device(zram);
+
+ pr_err("Initialization failed: err=%d\n", ret);
+ return ret;
+}
+
+static int zram_ioctl_reset_device(struct zram *zram)
+{
+ if (zram->init_done)
+ reset_device(zram);
+
+ return 0;
+}
+
+static int zram_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ size_t disksize_kb;
+
+ struct zram *zram = bdev->bd_disk->private_data;
+
+ switch (cmd) {
+ case ZRAMIO_SET_DISKSIZE_KB:
+ if (zram->init_done) {
+ ret = -EBUSY;
+ goto out;
+ }
+ if (copy_from_user(&disksize_kb, (void *)arg,
+ _IOC_SIZE(cmd))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ zram->disksize = disksize_kb << 10;
+ pr_info("Disk size set to %zu kB\n", disksize_kb);
+ break;
+
+ case ZRAMIO_GET_STATS:
+ {
+ struct zram_ioctl_stats *stats;
+ if (!zram->init_done) {
+ ret = -ENOTTY;
+ goto out;
+ }
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ zram_ioctl_get_stats(zram, stats);
+ if (copy_to_user((void *)arg, stats, sizeof(*stats))) {
+ kfree(stats);
+ ret = -EFAULT;
+ goto out;
+ }
+ kfree(stats);
+ break;
+ }
+ case ZRAMIO_INIT:
+ ret = zram_ioctl_init_device(zram);
+ break;
+
+ case ZRAMIO_RESET:
+ /* Do not reset an active device! */
+ if (bdev->bd_holders) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Make sure all pending I/O is finished */
+ if (bdev)
+ fsync_bdev(bdev);
+
+ ret = zram_ioctl_reset_device(zram);
+ break;
+
+ default:
+ pr_info("Invalid ioctl %u\n", cmd);
+ ret = -ENOTTY;
+ }
+
+out:
+ return ret;
+}
+
+void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
+{
+ struct zram *zram;
+
+ zram = bdev->bd_disk->private_data;
+ zram_free_page(zram, index);
+ zram_stat64_inc(zram, &zram->stats.notify_free);
+}
+
+static const struct block_device_operations zram_devops = {
+ .ioctl = zram_ioctl,
+ .swap_slot_free_notify = zram_slot_free_notify,
+ .owner = THIS_MODULE
+};
+
+static int create_device(struct zram *zram, int device_id)
+{
+ int ret = 0;
+
+ mutex_init(&zram->lock);
+ spin_lock_init(&zram->stat64_lock);
+
+ zram->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!zram->queue) {
+ pr_err("Error allocating disk queue for device %d\n",
+ device_id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ blk_queue_make_request(zram->queue, zram_make_request);
+ zram->queue->queuedata = zram;
+
+ /* gendisk structure */
+ zram->disk = alloc_disk(1);
+ if (!zram->disk) {
+ blk_cleanup_queue(zram->queue);
+ pr_warning("Error allocating disk structure for device %d\n",
+ device_id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ zram->disk->major = zram_major;
+ zram->disk->first_minor = device_id;
+ zram->disk->fops = &zram_devops;
+ zram->disk->queue = zram->queue;
+ zram->disk->private_data = zram;
+ snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
+
+ /* Actual capacity set using ZRAMIO_SET_DISKSIZE_KB ioctl */
+ set_capacity(zram->disk, 0);
+
+ /*
+ * To ensure that we always get PAGE_SIZE aligned
+ * and n*PAGE_SIZED sized I/O requests.
+ */
+ blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
+ blk_queue_logical_block_size(zram->disk->queue, PAGE_SIZE);
+ blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
+ blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
+
+ add_disk(zram->disk);
+
+ zram->init_done = 0;
+
+out:
+ return ret;
+}
+
+static void destroy_device(struct zram *zram)
+{
+ if (zram->disk) {
+ del_gendisk(zram->disk);
+ put_disk(zram->disk);
+ }
+
+ if (zram->queue)
+ blk_cleanup_queue(zram->queue);
+}
+
+static int __init zram_init(void)
+{
+ int ret, dev_id;
+
+ if (num_devices > max_num_devices) {
+ pr_warning("Invalid value for num_devices: %u\n",
+ num_devices);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ zram_major = register_blkdev(0, "zram");
+ if (zram_major <= 0) {
+ pr_warning("Unable to get major number\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (!num_devices) {
+ pr_info("num_devices not specified. Using default: 1\n");
+ num_devices = 1;
+ }
+
+ /* Allocate the device array and initialize each one */
+ pr_info("Creating %u devices ...\n", num_devices);
+ devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
+ if (!devices) {
+ ret = -ENOMEM;
+ goto unregister;
+ }
+
+ for (dev_id = 0; dev_id < num_devices; dev_id++) {
+ ret = create_device(&devices[dev_id], dev_id);
+ if (ret)
+ goto free_devices;
+ }
+
+ return 0;
+
+free_devices:
+ while (dev_id)
+ destroy_device(&devices[--dev_id]);
+ kfree(devices);
+unregister:
+ unregister_blkdev(zram_major, "zram");
+out:
+ return ret;
+}
+
+static void __exit zram_exit(void)
+{
+ int i;
+ struct zram *zram;
+
+ for (i = 0; i < num_devices; i++) {
+ zram = &devices[i];
+
+ destroy_device(zram);
+ if (zram->init_done)
+ reset_device(zram);
+ }
+
+ unregister_blkdev(zram_major, "zram");
+
+ kfree(devices);
+ pr_debug("Cleanup done!\n");
+}
+
+module_param(num_devices, uint, 0);
+MODULE_PARM_DESC(num_devices, "Number of zram devices");
+
+module_init(zram_init);
+module_exit(zram_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
+MODULE_DESCRIPTION("Compressed RAM Block Device");
diff --git a/drivers/staging/ramzswap/ramzswap_drv.h b/drivers/staging/zram/zram_drv.h
index 63c30420df2..945f9740442 100644
--- a/drivers/staging/ramzswap/ramzswap_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -1,5 +1,5 @@
/*
- * Compressed RAM based swap device
+ * Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
*
@@ -12,13 +12,13 @@
* Project home: http://compcache.googlecode.com
*/
-#ifndef _RAMZSWAP_DRV_H_
-#define _RAMZSWAP_DRV_H_
+#ifndef _ZRAM_DRV_H_
+#define _ZRAM_DRV_H_
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include "ramzswap_ioctl.h"
+#include "zram_ioctl.h"
#include "xvmalloc.h"
/*
@@ -41,7 +41,7 @@ struct zobj_header {
/*-- Configurable parameters */
-/* Default ramzswap disk size: 25% of total RAM */
+/* Default zram disk size: 25% of total RAM */
static const unsigned default_disksize_perc_ram = 25;
/*
@@ -63,23 +63,20 @@ static const unsigned max_zpage_size = PAGE_SIZE / 4 * 3;
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
-/* Flags for ramzswap pages (table[page_no].flags) */
-enum rzs_pageflags {
+/* Flags for zram pages (table[page_no].flags) */
+enum zram_pageflags {
/* Page is stored uncompressed */
- RZS_UNCOMPRESSED,
+ ZRAM_UNCOMPRESSED,
/* Page consists entirely of zeros */
- RZS_ZERO,
+ ZRAM_ZERO,
- __NR_RZS_PAGEFLAGS,
+ __NR_ZRAM_PAGEFLAGS,
};
/*-- Data structures */
-/*
- * Allocated for each swap slot, indexed by page no.
- * These table entries must fit exactly in a page.
- */
+/* Allocated for each disk page */
struct table {
struct page *page;
u16 offset;
@@ -87,17 +84,17 @@ struct table {
u8 flags;
} __attribute__((aligned(4)));
-struct ramzswap_stats {
+struct zram_stats {
/* basic stats */
size_t compr_size; /* compressed size of pages stored -
* needed to enforce memlimit */
/* more stats */
-#if defined(CONFIG_RAMZSWAP_STATS)
+#if defined(CONFIG_ZRAM_STATS)
u64 num_reads; /* failed + successful */
u64 num_writes; /* --do-- */
u64 failed_reads; /* should NEVER! happen */
u64 failed_writes; /* can happen when memory is too low */
- u64 invalid_io; /* non-swap I/O requests */
+ u64 invalid_io; /* non-page-aligned I/O requests */
u64 notify_free; /* no. of swap slot free notifications */
u32 pages_zero; /* no. of zero filled pages */
u32 pages_stored; /* no. of pages currently stored */
@@ -106,62 +103,62 @@ struct ramzswap_stats {
#endif
};
-struct ramzswap {
+struct zram {
struct xv_pool *mem_pool;
void *compress_workmem;
void *compress_buffer;
struct table *table;
spinlock_t stat64_lock; /* protect 64-bit stats */
- struct mutex lock;
+ struct mutex lock; /* protect compression buffers against
+ * concurrent writes */
struct request_queue *queue;
struct gendisk *disk;
int init_done;
/*
- * This is limit on amount of *uncompressed* worth of data
- * we can hold. When backing swap device is provided, it is
- * set equal to device size.
+ * This is the limit on amount of *uncompressed* worth of data
+ * we can store in a disk.
*/
size_t disksize; /* bytes */
- struct ramzswap_stats stats;
+ struct zram_stats stats;
};
/*-- */
/* Debugging and Stats */
-#if defined(CONFIG_RAMZSWAP_STATS)
-static void rzs_stat_inc(u32 *v)
+#if defined(CONFIG_ZRAM_STATS)
+static void zram_stat_inc(u32 *v)
{
*v = *v + 1;
}
-static void rzs_stat_dec(u32 *v)
+static void zram_stat_dec(u32 *v)
{
*v = *v - 1;
}
-static void rzs_stat64_inc(struct ramzswap *rzs, u64 *v)
+static void zram_stat64_inc(struct zram *zram, u64 *v)
{
- spin_lock(&rzs->stat64_lock);
+ spin_lock(&zram->stat64_lock);
*v = *v + 1;
- spin_unlock(&rzs->stat64_lock);
+ spin_unlock(&zram->stat64_lock);
}
-static u64 rzs_stat64_read(struct ramzswap *rzs, u64 *v)
+static u64 zram_stat64_read(struct zram *zram, u64 *v)
{
u64 val;
- spin_lock(&rzs->stat64_lock);
+ spin_lock(&zram->stat64_lock);
val = *v;
- spin_unlock(&rzs->stat64_lock);
+ spin_unlock(&zram->stat64_lock);
return val;
}
#else
-#define rzs_stat_inc(v)
-#define rzs_stat_dec(v)
-#define rzs_stat64_inc(r, v)
-#define rzs_stat64_read(r, v)
-#endif /* CONFIG_RAMZSWAP_STATS */
+#define zram_stat_inc(v)
+#define zram_stat_dec(v)
+#define zram_stat64_inc(r, v)
+#define zram_stat64_read(r, v)
+#endif /* CONFIG_ZRAM_STATS */
#endif
diff --git a/drivers/staging/ramzswap/ramzswap_ioctl.h b/drivers/staging/zram/zram_ioctl.h
index db94bcb4296..5c415fa4f17 100644
--- a/drivers/staging/ramzswap/ramzswap_ioctl.h
+++ b/drivers/staging/zram/zram_ioctl.h
@@ -1,5 +1,5 @@
/*
- * Compressed RAM based swap device
+ * Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
*
@@ -12,17 +12,16 @@
* Project home: http://compcache.googlecode.com
*/
-#ifndef _RAMZSWAP_IOCTL_H_
-#define _RAMZSWAP_IOCTL_H_
+#ifndef _ZRAM_IOCTL_H_
+#define _ZRAM_IOCTL_H_
-struct ramzswap_ioctl_stats {
- u64 disksize; /* user specified or equal to backing swap
- * size (if present) */
+struct zram_ioctl_stats {
+ u64 disksize; /* disksize in bytes (user specifies in KB) */
u64 num_reads; /* failed + successful */
u64 num_writes; /* --do-- */
u64 failed_reads; /* should NEVER! happen */
u64 failed_writes; /* can happen when memory is too low */
- u64 invalid_io; /* non-swap I/O requests */
+ u64 invalid_io; /* non-page-aligned I/O requests */
u64 notify_free; /* no. of swap slot free notifications */
u32 pages_zero; /* no. of zero filled pages */
u32 good_compress_pct; /* no. of pages with compression ratio<=50% */
@@ -34,9 +33,9 @@ struct ramzswap_ioctl_stats {
u64 mem_used_total;
} __attribute__ ((packed, aligned(4)));
-#define RZSIO_SET_DISKSIZE_KB _IOW('z', 0, size_t)
-#define RZSIO_GET_STATS _IOR('z', 1, struct ramzswap_ioctl_stats)
-#define RZSIO_INIT _IO('z', 2)
-#define RZSIO_RESET _IO('z', 3)
+#define ZRAMIO_SET_DISKSIZE_KB _IOW('z', 0, size_t)
+#define ZRAMIO_GET_STATS _IOR('z', 1, struct zram_ioctl_stats)
+#define ZRAMIO_INIT _IO('z', 2)
+#define ZRAMIO_RESET _IO('z', 3)
#endif
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c
index 99cb2246ac7..a1900e50251 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/telephony/ixj_pcmcia.c
@@ -8,7 +8,6 @@
#include <linux/errno.h> /* error codes */
#include <linux/slab.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -33,9 +32,8 @@ static int ixj_probe(struct pcmcia_device *p_dev)
{
dev_dbg(&p_dev->dev, "ixj_attach()\n");
/* Create new ixj device */
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = 3;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
p_dev->priv = kzalloc(sizeof(struct ixj_info_t), GFP_KERNEL);
if (!p_dev->priv) {
@@ -121,13 +119,14 @@ static int ixj_config_check(struct pcmcia_device *p_dev,
{
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
+ p_dev->io_lines = 3;
if (io->nwin == 2) {
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ if (!pcmcia_request_io(p_dev))
return 0;
}
return -ENODEV;
@@ -151,7 +150,8 @@ static int ixj_config(struct pcmcia_device * link)
/*
* Register the card with the core.
*/
- j = ixj_pcmcia_probe(link->io.BasePort1, link->io.BasePort1 + 0x10);
+ j = ixj_pcmcia_probe(link->resource[0]->start,
+ link->resource[0]->start + 0x10);
info->ndev = 1;
ixj_get_serial(link, j);
diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c
index 371f87f8bc2..a8ea2f19a0c 100644
--- a/drivers/uio/uio_cif.c
+++ b/drivers/uio/uio_cif.c
@@ -79,7 +79,7 @@ static int __devinit hilscher_pci_probe(struct pci_dev *dev,
}
info->version = "0.0.1";
info->irq = dev->irq;
- info->irq_flags = IRQF_DISABLED | IRQF_SHARED;
+ info->irq_flags = IRQF_SHARED;
info->handler = hilscher_handler;
if (uio_register_device(&dev->dev, info))
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 61e569df2bb..7174d518b8a 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -155,7 +155,6 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
* Interrupt sharing is not supported.
*/
- uioinfo->irq_flags |= IRQF_DISABLED;
uioinfo->handler = uio_pdrv_genirq_handler;
uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol;
uioinfo->open = uio_pdrv_genirq_open;
diff --git a/drivers/uio/uio_sercos3.c b/drivers/uio/uio_sercos3.c
index 3d461cd73e6..a187fa14c5c 100644
--- a/drivers/uio/uio_sercos3.c
+++ b/drivers/uio/uio_sercos3.c
@@ -154,7 +154,7 @@ static int __devinit sercos3_pci_probe(struct pci_dev *dev,
info->name = "Sercos_III_PCI";
info->version = "0.0.1";
info->irq = dev->irq;
- info->irq_flags = IRQF_DISABLED | IRQF_SHARED;
+ info->irq_flags = IRQF_SHARED;
info->handler = sercos3_handler;
info->irqcontrol = sercos3_irqcontrol;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 6a58cb1330c..4aa00e6e57a 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -45,7 +45,8 @@ config USB_ARCH_HAS_OHCI
default y if STB03xxx
default y if PPC_MPC52xx
# MIPS:
- default y if SOC_AU1X00
+ default y if MIPS_ALCHEMY
+ default y if MACH_JZ4740
# SH:
default y if CPU_SUBTYPE_SH7720
default y if CPU_SUBTYPE_SH7721
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 80b4008c89b..239f050efa3 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -41,7 +41,7 @@ obj-$(CONFIG_USB_MICROTEK) += image/
obj-$(CONFIG_USB_SERIAL) += serial/
obj-$(CONFIG_USB) += misc/
-obj-y += early/
+obj-$(CONFIG_EARLY_PRINTK_DBGP) += early/
obj-$(CONFIG_USB_ATM) += atm/
obj-$(CONFIG_USB_SPEEDTOUCH) += atm/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 101ffc965ee..f383cb42b1d 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -564,7 +564,7 @@ static void cxacru_timeout_kill(unsigned long data)
}
static int cxacru_start_wait_urb(struct urb *urb, struct completion *done,
- int* actual_length)
+ int *actual_length)
{
struct timer_list timer;
@@ -952,7 +952,7 @@ static int cxacru_fw(struct usb_device *usb_dev, enum cxacru_fw_request fw,
put_unaligned(cpu_to_le32(addr), (__le32 *)(buf + offb));
offb += 4;
addr += l;
- if(l)
+ if (l)
memcpy(buf + offb, data + offd, l);
if (l < stride)
memset(buf + offb + l, 0, stride - l);
@@ -967,7 +967,7 @@ static int cxacru_fw(struct usb_device *usb_dev, enum cxacru_fw_request fw,
}
offb = 0;
}
- } while(offd < size);
+ } while (offd < size);
dbg("sent fw %#x", fw);
ret = 0;
@@ -1043,8 +1043,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
if (instance->modem_type->boot_rom_patch) {
val = cpu_to_le32(BR_ADDR);
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_STACK_ADDR, (u8 *) &val, 4);
- }
- else {
+ } else {
ret = cxacru_fw(usb_dev, FW_GOTO_MEM, 0x0, 0x0, FW_ADDR, NULL, 0);
}
if (ret) {
@@ -1068,7 +1067,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
}
static int cxacru_find_firmware(struct cxacru_data *instance,
- char* phase, const struct firmware **fw_p)
+ char *phase, const struct firmware **fw_p)
{
struct usbatm_data *usbatm = instance->usbatm;
struct device *dev = &usbatm->usb_intf->dev;
@@ -1128,6 +1127,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
{
struct cxacru_data *instance;
struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
int ret;
/* instance init */
@@ -1172,15 +1172,34 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
goto fail;
}
- usb_fill_int_urb(instance->rcv_urb,
+ if (!cmd_ep) {
+ dbg("cxacru_bind: no command endpoint");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_INT) {
+ usb_fill_int_urb(instance->rcv_urb,
usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD),
instance->rcv_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->rcv_done, 1);
- usb_fill_int_urb(instance->snd_urb,
+ usb_fill_int_urb(instance->snd_urb,
usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD),
instance->snd_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->snd_done, 4);
+ } else {
+ usb_fill_bulk_urb(instance->rcv_urb,
+ usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD),
+ instance->rcv_buf, PAGE_SIZE,
+ cxacru_blocking_completion, &instance->rcv_done);
+
+ usb_fill_bulk_urb(instance->snd_urb,
+ usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD),
+ instance->snd_buf, PAGE_SIZE,
+ cxacru_blocking_completion, &instance->snd_done);
+ }
mutex_init(&instance->cm_serialize);
@@ -1325,8 +1344,24 @@ static struct usbatm_driver cxacru_driver = {
.tx_padding = 11,
};
-static int cxacru_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int cxacru_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ char buf[15];
+
+ /* Avoid ADSL routers (cx82310_eth).
+ * Abort if bDeviceClass is 0xff and iProduct is "USB NET CARD".
+ */
+ if (usb_dev->descriptor.bDeviceClass == USB_CLASS_VENDOR_SPEC
+ && usb_string(usb_dev, usb_dev->descriptor.iProduct,
+ buf, sizeof(buf)) > 0) {
+ if (!strcmp(buf, "USB NET CARD")) {
+ dev_info(&intf->dev, "ignoring cx82310_eth device\n");
+ return -ENODEV;
+ }
+ }
+
return usbatm_usb_probe(intf, id, &cxacru_driver);
}
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 80f9617d3a1..4716e707de5 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -753,11 +753,13 @@ static struct usb_driver speedtch_usb_driver = {
.id_table = speedtch_usb_ids
};
-static void speedtch_release_interfaces(struct usb_device *usb_dev, int num_interfaces) {
+static void speedtch_release_interfaces(struct usb_device *usb_dev,
+ int num_interfaces)
+{
struct usb_interface *cur_intf;
int i;
- for(i = 0; i < num_interfaces; i++)
+ for (i = 0; i < num_interfaces; i++)
if ((cur_intf = usb_ifnum_to_if(usb_dev, i))) {
usb_set_intfdata(cur_intf, NULL);
usb_driver_release_interface(&speedtch_usb_driver, cur_intf);
@@ -792,7 +794,7 @@ static int speedtch_bind(struct usbatm_data *usbatm,
/* claim all interfaces */
- for (i=0; i < num_interfaces; i++) {
+ for (i = 0; i < num_interfaces; i++) {
cur_intf = usb_ifnum_to_if(usb_dev, i);
if ((i != ifnum) && cur_intf) {
@@ -842,7 +844,7 @@ static int speedtch_bind(struct usbatm_data *usbatm,
use_isoc = 0; /* fall back to bulk if endpoint not found */
- for (i=0; i<desc->desc.bNumEndpoints; i++) {
+ for (i = 0; i < desc->desc.bNumEndpoints; i++) {
const struct usb_endpoint_descriptor *endpoint_desc = &desc->endpoint[i].desc;
if ((endpoint_desc->bEndpointAddress == target_address)) {
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index ebae9448014..ea071a5b6ee 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -67,6 +67,7 @@
#include <linux/mutex.h>
#include <linux/freezer.h>
#include <linux/slab.h>
+#include <linux/kernel.h>
#include <asm/unaligned.h>
@@ -1576,6 +1577,7 @@ static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
char file_arr[] = "CMVxy.bin";
char *file;
+ kparam_block_sysfs_write(cmv_file);
/* set proper name corresponding modem version and line type */
if (cmv_file[sc->modem_index] == NULL) {
if (UEA_CHIP_VERSION(sc) == ADI930)
@@ -1594,6 +1596,7 @@ static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
strlcat(cmv_name, file, UEA_FW_NAME_MAX);
if (ver == 2)
strlcat(cmv_name, ".v2", UEA_FW_NAME_MAX);
+ kparam_unblock_sysfs_write(cmv_file);
}
static int request_cmvs_old(struct uea_softc *sc,
@@ -2436,7 +2439,6 @@ UEA_ATTR(firmid, 0);
/* Retrieve the device End System Identifier (MAC) */
-#define htoi(x) (isdigit(x) ? x-'0' : toupper(x)-'A'+10)
static int uea_getesi(struct uea_softc *sc, u_char * esi)
{
unsigned char mac_str[2 * ETH_ALEN + 1];
@@ -2447,7 +2449,8 @@ static int uea_getesi(struct uea_softc *sc, u_char * esi)
return 1;
for (i = 0; i < ETH_ALEN; i++)
- esi[i] = htoi(mac_str[2 * i]) * 16 + htoi(mac_str[2 * i + 1]);
+ esi[i] = hex_to_bin(mac_str[2 * i]) * 16 +
+ hex_to_bin(mac_str[2 * i + 1]);
return 0;
}
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 9b53e8df464..05bf5a27b5b 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -84,8 +84,8 @@
#ifdef VERBOSE_DEBUG
static int usbatm_print_packet(const unsigned char *data, int len);
-#define PACKETDEBUG(arg...) usbatm_print_packet (arg)
-#define vdbg(arg...) dbg (arg)
+#define PACKETDEBUG(arg...) usbatm_print_packet(arg)
+#define vdbg(arg...) dbg(arg)
#else
#define PACKETDEBUG(arg...)
#define vdbg(arg...)
@@ -273,8 +273,7 @@ static void usbatm_complete(struct urb *urb)
if (unlikely(status) &&
(!(channel->usbatm->flags & UDSL_IGNORE_EILSEQ) ||
- status != -EILSEQ ))
- {
+ status != -EILSEQ)) {
if (status == -ESHUTDOWN)
return;
@@ -494,7 +493,7 @@ static unsigned int usbatm_write_cells(struct usbatm_data *instance,
ptr += data_len;
__skb_pull(skb, data_len);
- if(!left)
+ if (!left)
continue;
memset(ptr, 0, left);
@@ -506,7 +505,7 @@ static unsigned int usbatm_write_cells(struct usbatm_data *instance,
trailer[2] = ctrl->len >> 8;
trailer[3] = ctrl->len;
- ctrl->crc = ~ crc32_be(ctrl->crc, ptr, left - 4);
+ ctrl->crc = ~crc32_be(ctrl->crc, ptr, left - 4);
trailer[4] = ctrl->crc >> 24;
trailer[5] = ctrl->crc >> 16;
@@ -516,8 +515,7 @@ static unsigned int usbatm_write_cells(struct usbatm_data *instance,
target[3] |= 0x2; /* adjust PTI */
ctrl->len = 0; /* tag this skb finished */
- }
- else
+ } else
ctrl->crc = crc32_be(ctrl->crc, ptr, left);
}
@@ -1146,7 +1144,7 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
instance->tx_channel.endpoint = usb_sndbulkpipe(usb_dev, driver->bulk_out);
/* tx buffer size must be a positive multiple of the stride */
- instance->tx_channel.buf_size = max (instance->tx_channel.stride,
+ instance->tx_channel.buf_size = max(instance->tx_channel.stride,
snd_buf_bytes - (snd_buf_bytes % instance->tx_channel.stride));
/* rx buffer size must be a positive multiple of the endpoint maxpacket */
@@ -1159,7 +1157,7 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
goto fail_unbind;
}
- num_packets = max (1U, (rcv_buf_bytes + maxpacket / 2) / maxpacket); /* round */
+ num_packets = max(1U, (rcv_buf_bytes + maxpacket / 2) / maxpacket); /* round */
if (num_packets * maxpacket > UDSL_MAX_BUF_SIZE)
num_packets--;
@@ -1262,7 +1260,7 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
usb_free_urb(instance->urbs[i]);
}
- kfree (instance);
+ kfree(instance);
return error;
}
@@ -1390,9 +1388,8 @@ static int usbatm_print_packet(const unsigned char *data, int len)
for (i = 0; i < len;) {
buffer[0] = '\0';
sprintf(buffer, "%.3d :", i);
- for (j = 0; (j < 16) && (i < len); j++, i++) {
+ for (j = 0; (j < 16) && (i < len); j++, i++)
sprintf(buffer, "%s %2.2x", buffer, data[i]);
- }
dbg("%s", buffer);
}
return i;
diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
index 0863f85fcc2..5fc48940521 100644
--- a/drivers/usb/atm/usbatm.h
+++ b/drivers/usb/atm/usbatm.h
@@ -48,7 +48,7 @@
dev_warn(&(instance)->usb_intf->dev, \
"failed assertion '%s' at line %d", \
__stringify(x), __LINE__); \
- } while(0)
+ } while (0)
#endif
#define usb_err(instance, format, arg...) \
@@ -59,7 +59,7 @@
dev_warn(&(instance)->usb_intf->dev , format , ## arg)
#ifdef DEBUG
#define usb_dbg(instance, format, arg...) \
- dev_printk(KERN_DEBUG , &(instance)->usb_intf->dev , format , ## arg)
+ dev_printk(KERN_DEBUG , &(instance)->usb_intf->dev , format , ## arg)
#else
#define usb_dbg(instance, format, arg...) \
do {} while (0)
@@ -104,21 +104,21 @@ struct usbatm_data;
/*
* Assuming all methods exist and succeed, they are called in this order:
*
-* bind, heavy_init, atm_start, ..., atm_stop, unbind
+* bind, heavy_init, atm_start, ..., atm_stop, unbind
*/
struct usbatm_driver {
const char *driver_name;
/* init device ... can sleep, or cause probe() failure */
- int (*bind) (struct usbatm_data *, struct usb_interface *,
+ int (*bind) (struct usbatm_data *, struct usb_interface *,
const struct usb_device_id *id);
/* additional device initialization that is too slow to be done in probe() */
- int (*heavy_init) (struct usbatm_data *, struct usb_interface *);
+ int (*heavy_init) (struct usbatm_data *, struct usb_interface *);
/* cleanup device ... can sleep, but can't fail */
- void (*unbind) (struct usbatm_data *, struct usb_interface *);
+ void (*unbind) (struct usbatm_data *, struct usb_interface *);
/* init ATM device ... can sleep, or cause ATM initialization failure */
int (*atm_start) (struct usbatm_data *, struct atm_dev *);
@@ -126,9 +126,9 @@ struct usbatm_driver {
/* cleanup ATM device ... can sleep, but can't fail */
void (*atm_stop) (struct usbatm_data *, struct atm_dev *);
- int bulk_in; /* bulk rx endpoint */
- int isoc_in; /* isochronous rx endpoint */
- int bulk_out; /* bulk tx endpoint */
+ int bulk_in; /* bulk rx endpoint */
+ int isoc_in; /* isochronous rx endpoint */
+ int bulk_out; /* bulk tx endpoint */
unsigned rx_padding;
unsigned tx_padding;
@@ -156,7 +156,7 @@ struct usbatm_channel {
struct usbatm_data {
/******************
* public fields *
- ******************/
+ ******************/
/* mini driver */
struct usbatm_driver *driver;
@@ -174,7 +174,7 @@ struct usbatm_data {
/********************************
* private fields - do not use *
- ********************************/
+ ********************************/
struct kref refcount;
struct mutex serialize;
diff --git a/drivers/usb/atm/xusbatm.c b/drivers/usb/atm/xusbatm.c
index 17d167bbd2d..48ee0c5ff28 100644
--- a/drivers/usb/atm/xusbatm.c
+++ b/drivers/usb/atm/xusbatm.c
@@ -49,13 +49,13 @@ static struct usbatm_driver xusbatm_drivers[XUSBATM_DRIVERS_MAX];
static struct usb_device_id xusbatm_usb_ids[XUSBATM_DRIVERS_MAX + 1];
static struct usb_driver xusbatm_usb_driver;
-static struct usb_interface *xusbatm_find_intf (struct usb_device *usb_dev, int altsetting, u8 ep)
+static struct usb_interface *xusbatm_find_intf(struct usb_device *usb_dev, int altsetting, u8 ep)
{
struct usb_host_interface *alt;
struct usb_interface *intf;
int i, j;
- for(i = 0; i < usb_dev->actconfig->desc.bNumInterfaces; i++)
+ for (i = 0; i < usb_dev->actconfig->desc.bNumInterfaces; i++)
if ((intf = usb_dev->actconfig->interface[i]) && (alt = usb_altnum_to_altsetting(intf, altsetting)))
for (j = 0; j < alt->desc.bNumEndpoints; j++)
if (alt->endpoint[j].desc.bEndpointAddress == ep)
@@ -63,7 +63,7 @@ static struct usb_interface *xusbatm_find_intf (struct usb_device *usb_dev, int
return NULL;
}
-static int xusbatm_capture_intf (struct usbatm_data *usbatm, struct usb_device *usb_dev,
+static int xusbatm_capture_intf(struct usbatm_data *usbatm, struct usb_device *usb_dev,
struct usb_interface *intf, int altsetting, int claim)
{
int ifnum = intf->altsetting->desc.bInterfaceNumber;
@@ -80,7 +80,7 @@ static int xusbatm_capture_intf (struct usbatm_data *usbatm, struct usb_device *
return 0;
}
-static void xusbatm_release_intf (struct usb_device *usb_dev, struct usb_interface *intf, int claimed)
+static void xusbatm_release_intf(struct usb_device *usb_dev, struct usb_interface *intf, int claimed)
{
if (claimed) {
usb_set_intfdata(intf, NULL);
@@ -147,7 +147,7 @@ static void xusbatm_unbind(struct usbatm_data *usbatm,
usb_dbg(usbatm, "%s entered\n", __func__);
- for(i = 0; i < usb_dev->actconfig->desc.bNumInterfaces; i++) {
+ for (i = 0; i < usb_dev->actconfig->desc.bNumInterfaces; i++) {
struct usb_interface *cur_intf = usb_dev->actconfig->interface[i];
if (cur_intf && (usb_get_intfdata(cur_intf) == usbatm)) {
diff --git a/drivers/usb/c67x00/c67x00-hcd.c b/drivers/usb/c67x00/c67x00-hcd.c
index a22b887f4e9..d3e1356d091 100644
--- a/drivers/usb/c67x00/c67x00-hcd.c
+++ b/drivers/usb/c67x00/c67x00-hcd.c
@@ -264,7 +264,7 @@ static void c67x00_hcd_irq(struct c67x00_sie *sie, u16 int_status, u16 msg)
if (unlikely(hcd->state == HC_STATE_HALT))
return;
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
+ if (!HCD_HW_ACCESSIBLE(hcd))
return;
/* Handle Start of frame events */
@@ -282,7 +282,7 @@ static int c67x00_hcd_start(struct usb_hcd *hcd)
{
hcd->uses_new_polling = 1;
hcd->state = HC_STATE_RUNNING;
- hcd->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
return 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 89d260d6b03..bc62fae0680 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -636,19 +636,13 @@ static void acm_tty_unregister(struct acm *acm)
static int acm_tty_chars_in_buffer(struct tty_struct *tty);
-static void acm_port_down(struct acm *acm, int drain)
+static void acm_port_down(struct acm *acm)
{
int i, nr = acm->rx_buflimit;
mutex_lock(&open_mutex);
if (acm->dev) {
usb_autopm_get_interface(acm->control);
acm_set_control(acm, acm->ctrlout = 0);
- /* try letting the last writes drain naturally */
- if (drain) {
- wait_event_interruptible_timeout(acm->drain_wait,
- (ACM_NW == acm_wb_is_avail(acm)) || !acm->dev,
- ACM_CLOSE_TIMEOUT * HZ);
- }
usb_kill_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_kill_urb(acm->wb[i].urb);
@@ -664,7 +658,7 @@ static void acm_tty_hangup(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
tty_port_hangup(&acm->port);
- acm_port_down(acm, 0);
+ acm_port_down(acm);
}
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
@@ -685,7 +679,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
mutex_unlock(&open_mutex);
return;
}
- acm_port_down(acm, 0);
+ acm_port_down(acm);
tty_port_close_end(&acm->port, tty);
tty_port_tty_set(&acm->port, NULL);
}
@@ -971,7 +965,8 @@ static int acm_probe(struct usb_interface *intf,
}
if (!buflen) {
- if (intf->cur_altsetting->endpoint->extralen &&
+ if (intf->cur_altsetting->endpoint &&
+ intf->cur_altsetting->endpoint->extralen &&
intf->cur_altsetting->endpoint->extra) {
dev_dbg(&intf->dev,
"Seeking extra descriptors on endpoint\n");
@@ -1487,6 +1482,11 @@ static int acm_reset_resume(struct usb_interface *intf)
USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
USB_CDC_ACM_PROTO_VENDOR)
+#define SAMSUNG_PCSUITE_ACM_INFO(x) \
+ USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \
+ USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
+ USB_CDC_ACM_PROTO_VENDOR)
+
/*
* USB driver structure.
*/
@@ -1597,6 +1597,17 @@ static const struct usb_device_id acm_ids[] = {
{ NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
{ NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
{ NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */
+ { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */
+ { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */
+ { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */
+ { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */
+ { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
+ { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
+ { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
+ { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
@@ -1605,6 +1616,10 @@ static const struct usb_device_id acm_ids[] = {
.driver_info = NOT_A_MODEM,
},
+ /* control interfaces without any protocol set */
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+ USB_CDC_PROTO_NONE) },
+
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_V25TER) },
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 84f9e52327f..e325162859b 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -135,7 +135,7 @@ MFG:HEWLETT-PACKARD;MDL:DESKJET 970C;CMD:MLC,PCL,PML;CLASS:PRINTER;DESCRIPTION:H
* ->lock locks what interrupt accesses.
*/
struct usblp {
- struct usb_device *dev; /* USB device */
+ struct usb_device *dev; /* USB device */
struct mutex wmut;
struct mutex mut;
spinlock_t lock; /* locks rcomplete, wcomplete */
@@ -169,7 +169,8 @@ struct usblp {
};
#ifdef DEBUG
-static void usblp_dump(struct usblp *usblp) {
+static void usblp_dump(struct usblp *usblp)
+{
int p;
dbg("usblp=0x%p", usblp);
@@ -216,8 +217,8 @@ static const struct quirk_printer_struct quirk_printers[] = {
{ 0x03f0, 0x0304, USBLP_QUIRK_BIDIR }, /* HP DeskJet 810C/812C */
{ 0x03f0, 0x0404, USBLP_QUIRK_BIDIR }, /* HP DeskJet 830C */
{ 0x03f0, 0x0504, USBLP_QUIRK_BIDIR }, /* HP DeskJet 885C */
- { 0x03f0, 0x0604, USBLP_QUIRK_BIDIR }, /* HP DeskJet 840C */
- { 0x03f0, 0x0804, USBLP_QUIRK_BIDIR }, /* HP DeskJet 816C */
+ { 0x03f0, 0x0604, USBLP_QUIRK_BIDIR }, /* HP DeskJet 840C */
+ { 0x03f0, 0x0804, USBLP_QUIRK_BIDIR }, /* HP DeskJet 816C */
{ 0x03f0, 0x1104, USBLP_QUIRK_BIDIR }, /* HP Deskjet 959C */
{ 0x0409, 0xefbe, USBLP_QUIRK_BIDIR }, /* NEC Picty900 (HP OEM) */
{ 0x0409, 0xbef4, USBLP_QUIRK_BIDIR }, /* NEC Picty760 (HP OEM) */
@@ -254,9 +255,8 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
/* High byte has the interface index.
Low byte has the alternate setting.
*/
- if ((request == USBLP_REQ_GET_ID) && (type == USB_TYPE_CLASS)) {
- index = (usblp->ifnum<<8)|usblp->protocol[usblp->current_protocol].alt_setting;
- }
+ if ((request == USBLP_REQ_GET_ID) && (type == USB_TYPE_CLASS))
+ index = (usblp->ifnum<<8)|usblp->protocol[usblp->current_protocol].alt_setting;
retval = usb_control_msg(usblp->dev,
dir ? usb_rcvctrlpipe(usblp->dev, 0) : usb_sndctrlpipe(usblp->dev, 0),
@@ -372,7 +372,7 @@ static int usblp_check_status(struct usblp *usblp, int err)
return newerr;
}
-static int handle_bidir (struct usblp *usblp)
+static int handle_bidir(struct usblp *usblp)
{
if (usblp->bidir && usblp->used) {
if (usblp_submit_read(usblp) < 0)
@@ -395,14 +395,13 @@ static int usblp_open(struct inode *inode, struct file *file)
if (minor < 0)
return -ENODEV;
- mutex_lock (&usblp_mutex);
+ mutex_lock(&usblp_mutex);
retval = -ENODEV;
intf = usb_find_interface(&usblp_driver, minor);
- if (!intf) {
+ if (!intf)
goto out;
- }
- usblp = usb_get_intfdata (intf);
+ usblp = usb_get_intfdata(intf);
if (!usblp || !usblp->dev || !usblp->present)
goto out;
@@ -433,18 +432,18 @@ static int usblp_open(struct inode *inode, struct file *file)
retval = -EIO;
}
out:
- mutex_unlock (&usblp_mutex);
+ mutex_unlock(&usblp_mutex);
return retval;
}
-static void usblp_cleanup (struct usblp *usblp)
+static void usblp_cleanup(struct usblp *usblp)
{
printk(KERN_INFO "usblp%d: removed\n", usblp->minor);
kfree(usblp->readbuf);
- kfree (usblp->device_id_string);
- kfree (usblp->statusbuf);
- kfree (usblp);
+ kfree(usblp->device_id_string);
+ kfree(usblp->statusbuf);
+ kfree(usblp);
}
static void usblp_unlink_urbs(struct usblp *usblp)
@@ -458,14 +457,14 @@ static int usblp_release(struct inode *inode, struct file *file)
usblp->flags &= ~LP_ABORT;
- mutex_lock (&usblp_mutex);
+ mutex_lock(&usblp_mutex);
usblp->used = 0;
if (usblp->present) {
usblp_unlink_urbs(usblp);
usb_autopm_put_interface(usblp->intf);
- } else /* finish cleanup from disconnect */
- usblp_cleanup (usblp);
- mutex_unlock (&usblp_mutex);
+ } else /* finish cleanup from disconnect */
+ usblp_cleanup(usblp);
+ mutex_unlock(&usblp_mutex);
return 0;
}
@@ -495,190 +494,190 @@ static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int twoints[2];
int retval = 0;
- mutex_lock (&usblp->mut);
+ mutex_lock(&usblp->mut);
if (!usblp->present) {
retval = -ENODEV;
goto done;
}
dbg("usblp_ioctl: cmd=0x%x (%c nr=%d len=%d dir=%d)", cmd, _IOC_TYPE(cmd),
- _IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd) );
+ _IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd));
if (_IOC_TYPE(cmd) == 'P') /* new-style ioctl number */
switch (_IOC_NR(cmd)) {
- case IOCNR_GET_DEVICE_ID: /* get the DEVICE_ID string */
- if (_IOC_DIR(cmd) != _IOC_READ) {
- retval = -EINVAL;
- goto done;
- }
+ case IOCNR_GET_DEVICE_ID: /* get the DEVICE_ID string */
+ if (_IOC_DIR(cmd) != _IOC_READ) {
+ retval = -EINVAL;
+ goto done;
+ }
- length = usblp_cache_device_id_string(usblp);
- if (length < 0) {
- retval = length;
- goto done;
- }
- if (length > _IOC_SIZE(cmd))
- length = _IOC_SIZE(cmd); /* truncate */
-
- if (copy_to_user((void __user *) arg,
- usblp->device_id_string,
- (unsigned long) length)) {
- retval = -EFAULT;
- goto done;
- }
+ length = usblp_cache_device_id_string(usblp);
+ if (length < 0) {
+ retval = length;
+ goto done;
+ }
+ if (length > _IOC_SIZE(cmd))
+ length = _IOC_SIZE(cmd); /* truncate */
+
+ if (copy_to_user((void __user *) arg,
+ usblp->device_id_string,
+ (unsigned long) length)) {
+ retval = -EFAULT;
+ goto done;
+ }
- break;
+ break;
- case IOCNR_GET_PROTOCOLS:
- if (_IOC_DIR(cmd) != _IOC_READ ||
- _IOC_SIZE(cmd) < sizeof(twoints)) {
- retval = -EINVAL;
- goto done;
- }
+ case IOCNR_GET_PROTOCOLS:
+ if (_IOC_DIR(cmd) != _IOC_READ ||
+ _IOC_SIZE(cmd) < sizeof(twoints)) {
+ retval = -EINVAL;
+ goto done;
+ }
- twoints[0] = usblp->current_protocol;
- twoints[1] = 0;
- for (i = USBLP_FIRST_PROTOCOL;
- i <= USBLP_LAST_PROTOCOL; i++) {
- if (usblp->protocol[i].alt_setting >= 0)
- twoints[1] |= (1<<i);
- }
+ twoints[0] = usblp->current_protocol;
+ twoints[1] = 0;
+ for (i = USBLP_FIRST_PROTOCOL;
+ i <= USBLP_LAST_PROTOCOL; i++) {
+ if (usblp->protocol[i].alt_setting >= 0)
+ twoints[1] |= (1<<i);
+ }
- if (copy_to_user((void __user *)arg,
- (unsigned char *)twoints,
- sizeof(twoints))) {
- retval = -EFAULT;
- goto done;
- }
+ if (copy_to_user((void __user *)arg,
+ (unsigned char *)twoints,
+ sizeof(twoints))) {
+ retval = -EFAULT;
+ goto done;
+ }
- break;
+ break;
- case IOCNR_SET_PROTOCOL:
- if (_IOC_DIR(cmd) != _IOC_WRITE) {
- retval = -EINVAL;
- goto done;
- }
+ case IOCNR_SET_PROTOCOL:
+ if (_IOC_DIR(cmd) != _IOC_WRITE) {
+ retval = -EINVAL;
+ goto done;
+ }
#ifdef DEBUG
- if (arg == -10) {
- usblp_dump(usblp);
- break;
- }
+ if (arg == -10) {
+ usblp_dump(usblp);
+ break;
+ }
#endif
- usblp_unlink_urbs(usblp);
- retval = usblp_set_protocol(usblp, arg);
- if (retval < 0) {
- usblp_set_protocol(usblp,
- usblp->current_protocol);
- }
- break;
+ usblp_unlink_urbs(usblp);
+ retval = usblp_set_protocol(usblp, arg);
+ if (retval < 0) {
+ usblp_set_protocol(usblp,
+ usblp->current_protocol);
+ }
+ break;
- case IOCNR_HP_SET_CHANNEL:
- if (_IOC_DIR(cmd) != _IOC_WRITE ||
- le16_to_cpu(usblp->dev->descriptor.idVendor) != 0x03F0 ||
- usblp->quirks & USBLP_QUIRK_BIDIR) {
- retval = -EINVAL;
- goto done;
- }
+ case IOCNR_HP_SET_CHANNEL:
+ if (_IOC_DIR(cmd) != _IOC_WRITE ||
+ le16_to_cpu(usblp->dev->descriptor.idVendor) != 0x03F0 ||
+ usblp->quirks & USBLP_QUIRK_BIDIR) {
+ retval = -EINVAL;
+ goto done;
+ }
- err = usblp_hp_channel_change_request(usblp,
- arg, &newChannel);
- if (err < 0) {
- dev_err(&usblp->dev->dev,
- "usblp%d: error = %d setting "
- "HP channel\n",
- usblp->minor, err);
- retval = -EIO;
- goto done;
- }
+ err = usblp_hp_channel_change_request(usblp,
+ arg, &newChannel);
+ if (err < 0) {
+ dev_err(&usblp->dev->dev,
+ "usblp%d: error = %d setting "
+ "HP channel\n",
+ usblp->minor, err);
+ retval = -EIO;
+ goto done;
+ }
- dbg("usblp%d requested/got HP channel %ld/%d",
- usblp->minor, arg, newChannel);
- break;
+ dbg("usblp%d requested/got HP channel %ld/%d",
+ usblp->minor, arg, newChannel);
+ break;
- case IOCNR_GET_BUS_ADDRESS:
- if (_IOC_DIR(cmd) != _IOC_READ ||
- _IOC_SIZE(cmd) < sizeof(twoints)) {
- retval = -EINVAL;
- goto done;
- }
+ case IOCNR_GET_BUS_ADDRESS:
+ if (_IOC_DIR(cmd) != _IOC_READ ||
+ _IOC_SIZE(cmd) < sizeof(twoints)) {
+ retval = -EINVAL;
+ goto done;
+ }
- twoints[0] = usblp->dev->bus->busnum;
- twoints[1] = usblp->dev->devnum;
- if (copy_to_user((void __user *)arg,
- (unsigned char *)twoints,
- sizeof(twoints))) {
- retval = -EFAULT;
- goto done;
- }
+ twoints[0] = usblp->dev->bus->busnum;
+ twoints[1] = usblp->dev->devnum;
+ if (copy_to_user((void __user *)arg,
+ (unsigned char *)twoints,
+ sizeof(twoints))) {
+ retval = -EFAULT;
+ goto done;
+ }
- dbg("usblp%d is bus=%d, device=%d",
- usblp->minor, twoints[0], twoints[1]);
- break;
+ dbg("usblp%d is bus=%d, device=%d",
+ usblp->minor, twoints[0], twoints[1]);
+ break;
- case IOCNR_GET_VID_PID:
- if (_IOC_DIR(cmd) != _IOC_READ ||
- _IOC_SIZE(cmd) < sizeof(twoints)) {
- retval = -EINVAL;
- goto done;
- }
+ case IOCNR_GET_VID_PID:
+ if (_IOC_DIR(cmd) != _IOC_READ ||
+ _IOC_SIZE(cmd) < sizeof(twoints)) {
+ retval = -EINVAL;
+ goto done;
+ }
- twoints[0] = le16_to_cpu(usblp->dev->descriptor.idVendor);
- twoints[1] = le16_to_cpu(usblp->dev->descriptor.idProduct);
- if (copy_to_user((void __user *)arg,
- (unsigned char *)twoints,
- sizeof(twoints))) {
- retval = -EFAULT;
- goto done;
- }
+ twoints[0] = le16_to_cpu(usblp->dev->descriptor.idVendor);
+ twoints[1] = le16_to_cpu(usblp->dev->descriptor.idProduct);
+ if (copy_to_user((void __user *)arg,
+ (unsigned char *)twoints,
+ sizeof(twoints))) {
+ retval = -EFAULT;
+ goto done;
+ }
- dbg("usblp%d is VID=0x%4.4X, PID=0x%4.4X",
- usblp->minor, twoints[0], twoints[1]);
- break;
+ dbg("usblp%d is VID=0x%4.4X, PID=0x%4.4X",
+ usblp->minor, twoints[0], twoints[1]);
+ break;
- case IOCNR_SOFT_RESET:
- if (_IOC_DIR(cmd) != _IOC_NONE) {
- retval = -EINVAL;
- goto done;
- }
- retval = usblp_reset(usblp);
- break;
- default:
- retval = -ENOTTY;
+ case IOCNR_SOFT_RESET:
+ if (_IOC_DIR(cmd) != _IOC_NONE) {
+ retval = -EINVAL;
+ goto done;
+ }
+ retval = usblp_reset(usblp);
+ break;
+ default:
+ retval = -ENOTTY;
}
else /* old-style ioctl value */
switch (cmd) {
- case LPGETSTATUS:
- if ((retval = usblp_read_status(usblp, usblp->statusbuf))) {
- if (printk_ratelimit())
- printk(KERN_ERR "usblp%d:"
- "failed reading printer status (%d)\n",
- usblp->minor, retval);
- retval = -EIO;
- goto done;
- }
- status = *usblp->statusbuf;
- if (copy_to_user ((void __user *)arg, &status, sizeof(int)))
- retval = -EFAULT;
- break;
+ case LPGETSTATUS:
+ if ((retval = usblp_read_status(usblp, usblp->statusbuf))) {
+ if (printk_ratelimit())
+ printk(KERN_ERR "usblp%d:"
+ "failed reading printer status (%d)\n",
+ usblp->minor, retval);
+ retval = -EIO;
+ goto done;
+ }
+ status = *usblp->statusbuf;
+ if (copy_to_user((void __user *)arg, &status, sizeof(int)))
+ retval = -EFAULT;
+ break;
- case LPABORT:
- if (arg)
- usblp->flags |= LP_ABORT;
- else
- usblp->flags &= ~LP_ABORT;
- break;
+ case LPABORT:
+ if (arg)
+ usblp->flags |= LP_ABORT;
+ else
+ usblp->flags &= ~LP_ABORT;
+ break;
- default:
- retval = -ENOTTY;
+ default:
+ retval = -ENOTTY;
}
done:
- mutex_unlock (&usblp->mut);
+ mutex_unlock(&usblp->mut);
return retval;
}
@@ -840,7 +839,7 @@ static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, lo
}
done:
- mutex_unlock (&usblp->mut);
+ mutex_unlock(&usblp->mut);
return count;
}
@@ -1023,7 +1022,7 @@ raise_urb:
* while you are sending print data, and you don't try to query the
* printer status every couple of milliseconds, you will probably be OK.
*/
-static unsigned int usblp_quirks (__u16 vendor, __u16 product)
+static unsigned int usblp_quirks(__u16 vendor, __u16 product)
{
int i;
@@ -1031,7 +1030,7 @@ static unsigned int usblp_quirks (__u16 vendor, __u16 product)
if (vendor == quirk_printers[i].vendorId &&
product == quirk_printers[i].productId)
return quirk_printers[i].quirks;
- }
+ }
return 0;
}
@@ -1061,7 +1060,7 @@ static struct usb_class_driver usblp_class = {
static ssize_t usblp_show_ieee1284_id(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
- struct usblp *usblp = usb_get_intfdata (intf);
+ struct usblp *usblp = usb_get_intfdata(intf);
if (usblp->device_id_string[0] == 0 &&
usblp->device_id_string[1] == 0)
@@ -1075,7 +1074,7 @@ static DEVICE_ATTR(ieee1284_id, S_IRUGO, usblp_show_ieee1284_id, NULL);
static int usblp_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct usb_device *dev = interface_to_usbdev (intf);
+ struct usb_device *dev = interface_to_usbdev(intf);
struct usblp *usblp;
int protocol;
int retval;
@@ -1089,7 +1088,7 @@ static int usblp_probe(struct usb_interface *intf,
}
usblp->dev = dev;
mutex_init(&usblp->wmut);
- mutex_init (&usblp->mut);
+ mutex_init(&usblp->mut);
spin_lock_init(&usblp->lock);
init_waitqueue_head(&usblp->rwait);
init_waitqueue_head(&usblp->wwait);
@@ -1153,7 +1152,7 @@ static int usblp_probe(struct usb_interface *intf,
usblp_check_status(usblp, 0);
#endif
- usb_set_intfdata (intf, usblp);
+ usb_set_intfdata(intf, usblp);
usblp->present = 1;
@@ -1177,7 +1176,7 @@ static int usblp_probe(struct usb_interface *intf,
return 0;
abort_intfdata:
- usb_set_intfdata (intf, NULL);
+ usb_set_intfdata(intf, NULL);
device_remove_file(&intf->dev, &dev_attr_ieee1284_id);
abort:
kfree(usblp->readbuf);
@@ -1340,35 +1339,35 @@ static int usblp_cache_device_id_string(struct usblp *usblp)
static void usblp_disconnect(struct usb_interface *intf)
{
- struct usblp *usblp = usb_get_intfdata (intf);
+ struct usblp *usblp = usb_get_intfdata(intf);
usb_deregister_dev(intf, &usblp_class);
if (!usblp || !usblp->dev) {
dev_err(&intf->dev, "bogus disconnect\n");
- BUG ();
+ BUG();
}
device_remove_file(&intf->dev, &dev_attr_ieee1284_id);
- mutex_lock (&usblp_mutex);
- mutex_lock (&usblp->mut);
+ mutex_lock(&usblp_mutex);
+ mutex_lock(&usblp->mut);
usblp->present = 0;
wake_up(&usblp->wwait);
wake_up(&usblp->rwait);
- usb_set_intfdata (intf, NULL);
+ usb_set_intfdata(intf, NULL);
usblp_unlink_urbs(usblp);
- mutex_unlock (&usblp->mut);
+ mutex_unlock(&usblp->mut);
if (!usblp->used)
- usblp_cleanup (usblp);
- mutex_unlock (&usblp_mutex);
+ usblp_cleanup(usblp);
+ mutex_unlock(&usblp_mutex);
}
static int usblp_suspend(struct usb_interface *intf, pm_message_t message)
{
- struct usblp *usblp = usb_get_intfdata (intf);
+ struct usblp *usblp = usb_get_intfdata(intf);
usblp_unlink_urbs(usblp);
#if 0 /* XXX Do we want this? What if someone is reading, should we fail? */
@@ -1382,10 +1381,10 @@ static int usblp_suspend(struct usb_interface *intf, pm_message_t message)
static int usblp_resume(struct usb_interface *intf)
{
- struct usblp *usblp = usb_get_intfdata (intf);
+ struct usblp *usblp = usb_get_intfdata(intf);
int r;
- r = handle_bidir (usblp);
+ r = handle_bidir(usblp);
return r;
}
@@ -1401,7 +1400,7 @@ static const struct usb_device_id usblp_ids[] = {
{ } /* Terminating entry */
};
-MODULE_DEVICE_TABLE (usb, usblp_ids);
+MODULE_DEVICE_TABLE(usb, usblp_ids);
static struct usb_driver usblp_driver = {
.name = "usblp",
@@ -1426,8 +1425,8 @@ static void __exit usblp_exit(void)
module_init(usblp_init);
module_exit(usblp_exit);
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
module_param(proto_bias, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(proto_bias, "Favourite protocol number");
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 7e594449600..9eed5b52d9d 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS
If you are unsure about this, say N here.
config USB_SUSPEND
- bool "USB runtime power management (suspend/resume and wakeup)"
+ bool "USB runtime power management (autosuspend) and wakeup"
depends on USB && PM_RUNTIME
help
If you say Y here, you can use driver calls or the sysfs
- "power/level" file to suspend or resume individual USB
- peripherals and to enable or disable autosuspend (see
+ "power/control" file to enable or disable autosuspend for
+ individual USB peripherals (see
Documentation/usb/power-management.txt for more details).
Also, USB "remote wakeup" signaling is supported, whereby some
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index c2f62a3993d..f1aaff6202a 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1668,13 +1668,10 @@ static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
default:
if (intf->dev.driver)
driver = to_usb_driver(intf->dev.driver);
- if (driver == NULL || driver->ioctl == NULL) {
+ if (driver == NULL || driver->unlocked_ioctl == NULL) {
retval = -ENOTTY;
} else {
- /* keep API that guarantees BKL */
- lock_kernel();
- retval = driver->ioctl(intf, ctl->ioctl_code, buf);
- unlock_kernel();
+ retval = driver->unlocked_ioctl(intf, ctl->ioctl_code, buf);
if (retval == -ENOIOCTLCMD)
retval = -ENOTTY;
}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index a6bd53ace03..d7a4401ef01 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1742,9 +1742,8 @@ static int usb_runtime_suspend(struct device *dev)
}
/* Prevent the parent from suspending immediately after */
- else if (udev->parent) {
+ else if (udev->parent)
udev->parent->last_busy = jiffies;
- }
}
/* Runtime suspend for a USB interface doesn't mean anything. */
@@ -1786,21 +1785,19 @@ static int usb_runtime_idle(struct device *dev)
return 0;
}
-static struct dev_pm_ops usb_bus_pm_ops = {
+static const struct dev_pm_ops usb_bus_pm_ops = {
.runtime_suspend = usb_runtime_suspend,
.runtime_resume = usb_runtime_resume,
.runtime_idle = usb_runtime_idle,
};
-#else
-
-#define usb_bus_pm_ops (*(struct dev_pm_ops *) NULL)
-
#endif /* CONFIG_USB_SUSPEND */
struct bus_type usb_bus_type = {
.name = "usb",
.match = usb_device_match,
.uevent = usb_uevent,
+#ifdef CONFIG_USB_SUSPEND
.pm = &usb_bus_pm_ops,
+#endif
};
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 4f84a41ee7a..3788e738e26 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -96,16 +96,21 @@ static ssize_t show_ep_interval(struct device *dev,
switch (usb_endpoint_type(ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
- if (ep->udev->speed == USB_SPEED_HIGH) /* uframes per NAK */
+ if (ep->udev->speed == USB_SPEED_HIGH)
+ /* uframes per NAK */
interval = ep->desc->bInterval;
break;
+
case USB_ENDPOINT_XFER_ISOC:
interval = 1 << (ep->desc->bInterval - 1);
break;
+
case USB_ENDPOINT_XFER_BULK:
- if (ep->udev->speed == USB_SPEED_HIGH && !in) /* uframes per NAK */
+ if (ep->udev->speed == USB_SPEED_HIGH && !in)
+ /* uframes per NAK */
interval = ep->desc->bInterval;
break;
+
case USB_ENDPOINT_XFER_INT:
if (ep->udev->speed == USB_SPEED_HIGH)
interval = 1 << (ep->desc->bInterval - 1);
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index f06f5dbc8cd..1e6ccef2cf0 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -159,9 +159,9 @@ void usb_major_cleanup(void)
int usb_register_dev(struct usb_interface *intf,
struct usb_class_driver *class_driver)
{
- int retval = -EINVAL;
+ int retval;
int minor_base = class_driver->minor_base;
- int minor = 0;
+ int minor;
char name[20];
char *temp;
@@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf,
*/
minor_base = 0;
#endif
- intf->minor = -1;
-
- dbg ("looking for a minor, starting at %d", minor_base);
if (class_driver->fops == NULL)
- goto exit;
+ return -EINVAL;
+ if (intf->minor >= 0)
+ return -EADDRINUSE;
+
+ retval = init_usb_class();
+ if (retval)
+ return retval;
+
+ dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
down_write(&minor_rwsem);
for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
@@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf,
continue;
usb_minors[minor] = class_driver->fops;
-
- retval = 0;
+ intf->minor = minor;
break;
}
up_write(&minor_rwsem);
-
- if (retval)
- goto exit;
-
- retval = init_usb_class();
- if (retval)
- goto exit;
-
- intf->minor = minor;
+ if (intf->minor < 0)
+ return -EXFULL;
/* create a usb class device for this usb interface */
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf,
"%s", temp);
if (IS_ERR(intf->usb_dev)) {
down_write(&minor_rwsem);
- usb_minors[intf->minor] = NULL;
+ usb_minors[minor] = NULL;
+ intf->minor = -1;
up_write(&minor_rwsem);
retval = PTR_ERR(intf->usb_dev);
}
-exit:
return retval;
}
EXPORT_SYMBOL_GPL(usb_register_dev);
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 9a34ccb0a1c..69ecd3c9231 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -105,8 +105,10 @@ int usb_choose_configuration(struct usb_device *udev)
/* When the first config's first interface is one of Microsoft's
* pet nonstandard Ethernet-over-USB protocols, ignore it unless
* this kernel has enabled the necessary host side driver.
+ * But: Don't ignore it if it's the only config.
*/
- if (i == 0 && desc && (is_rndis(desc) || is_activesync(desc))) {
+ if (i == 0 && num_configs > 1 && desc &&
+ (is_rndis(desc) || is_activesync(desc))) {
#if !defined(CONFIG_USB_NET_RNDIS_HOST) && !defined(CONFIG_USB_NET_RNDIS_HOST_MODULE)
continue;
#else
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 1cf2d1e79a5..c3f98543caa 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -66,10 +66,7 @@ static void companion_common(struct pci_dev *pdev, struct usb_hcd *hcd,
* vice versa.
*/
companion = NULL;
- for (;;) {
- companion = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, companion);
- if (!companion)
- break;
+ for_each_pci_dev(companion) {
if (companion->bus != pdev->bus ||
PCI_SLOT(companion->devfn) != slot)
continue;
@@ -250,6 +247,9 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (retval != 0)
goto err4;
set_hs_companion(dev, hcd);
+
+ if (pci_dev_run_wake(dev))
+ pm_runtime_put_noidle(&dev->dev);
return retval;
err4:
@@ -292,6 +292,17 @@ void usb_hcd_pci_remove(struct pci_dev *dev)
if (!hcd)
return;
+ if (pci_dev_run_wake(dev))
+ pm_runtime_get_noresume(&dev->dev);
+
+ /* Fake an interrupt request in order to give the driver a chance
+ * to test whether the controller hardware has been removed (e.g.,
+ * cardbus physical eject).
+ */
+ local_irq_disable();
+ usb_hcd_irq(0, hcd);
+ local_irq_enable();
+
usb_remove_hcd(hcd);
if (hcd->driver->flags & HCD_MEMORY) {
iounmap(hcd->regs);
@@ -317,12 +328,34 @@ void usb_hcd_pci_shutdown(struct pci_dev *dev)
if (!hcd)
return;
- if (hcd->driver->shutdown)
+ if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) &&
+ hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
}
EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_OPS
+
+#ifdef CONFIG_PPC_PMAC
+static void powermac_set_asic(struct pci_dev *pci_dev, int enable)
+{
+ /* Enanble or disable ASIC clocks for USB */
+ if (machine_is(powermac)) {
+ struct device_node *of_node;
+
+ of_node = pci_device_to_OF_node(pci_dev);
+ if (of_node)
+ pmac_call_feature(PMAC_FTR_USB_ENABLE,
+ of_node, 0, enable);
+ }
+}
+
+#else
+
+static inline void powermac_set_asic(struct pci_dev *pci_dev, int enable)
+{}
+
+#endif /* CONFIG_PPC_PMAC */
static int check_root_hub_suspended(struct device *dev)
{
@@ -337,7 +370,7 @@ static int check_root_hub_suspended(struct device *dev)
return 0;
}
-static int hcd_pci_suspend(struct device *dev)
+static int suspend_common(struct device *dev, bool do_wakeup)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
@@ -352,13 +385,21 @@ static int hcd_pci_suspend(struct device *dev)
if (retval)
return retval;
- /* We might already be suspended (runtime PM -- not yet written) */
- if (pci_dev->current_state != PCI_D0)
- return retval;
-
if (hcd->driver->pci_suspend) {
- retval = hcd->driver->pci_suspend(hcd);
+ /* Optimization: Don't suspend if a root-hub wakeup is
+ * pending and it would cause the HCD to wake up anyway.
+ */
+ if (do_wakeup && HCD_WAKEUP_PENDING(hcd))
+ return -EBUSY;
+ retval = hcd->driver->pci_suspend(hcd, do_wakeup);
suspend_report_result(hcd->driver->pci_suspend, retval);
+
+ /* Check again in case wakeup raced with pci_suspend */
+ if (retval == 0 && do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
+ if (hcd->driver->pci_resume)
+ hcd->driver->pci_resume(hcd, false);
+ retval = -EBUSY;
+ }
if (retval)
return retval;
}
@@ -374,6 +415,48 @@ static int hcd_pci_suspend(struct device *dev)
return retval;
}
+static int resume_common(struct device *dev, int event)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
+ int retval;
+
+ if (hcd->state != HC_STATE_SUSPENDED) {
+ dev_dbg(dev, "can't resume, not suspended!\n");
+ return 0;
+ }
+
+ retval = pci_enable_device(pci_dev);
+ if (retval < 0) {
+ dev_err(dev, "can't re-enable after resume, %d!\n", retval);
+ return retval;
+ }
+
+ pci_set_master(pci_dev);
+
+ clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+
+ if (hcd->driver->pci_resume) {
+ if (event != PM_EVENT_AUTO_RESUME)
+ wait_for_companions(pci_dev, hcd);
+
+ retval = hcd->driver->pci_resume(hcd,
+ event == PM_EVENT_RESTORE);
+ if (retval) {
+ dev_err(dev, "PCI post-resume error %d!\n", retval);
+ usb_hc_died(hcd);
+ }
+ }
+ return retval;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int hcd_pci_suspend(struct device *dev)
+{
+ return suspend_common(dev, device_may_wakeup(dev));
+}
+
static int hcd_pci_suspend_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -408,16 +491,7 @@ static int hcd_pci_suspend_noirq(struct device *dev)
return retval;
}
-#ifdef CONFIG_PPC_PMAC
- /* Disable ASIC clocks for USB */
- if (machine_is(powermac)) {
- struct device_node *of_node;
-
- of_node = pci_device_to_OF_node(pci_dev);
- if (of_node)
- pmac_call_feature(PMAC_FTR_USB_ENABLE, of_node, 0, 0);
- }
-#endif
+ powermac_set_asic(pci_dev, 0);
return retval;
}
@@ -425,69 +499,63 @@ static int hcd_pci_resume_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
-#ifdef CONFIG_PPC_PMAC
- /* Reenable ASIC clocks for USB */
- if (machine_is(powermac)) {
- struct device_node *of_node;
-
- of_node = pci_device_to_OF_node(pci_dev);
- if (of_node)
- pmac_call_feature(PMAC_FTR_USB_ENABLE,
- of_node, 0, 1);
- }
-#endif
+ powermac_set_asic(pci_dev, 1);
/* Go back to D0 and disable remote wakeup */
pci_back_from_sleep(pci_dev);
return 0;
}
-static int resume_common(struct device *dev, bool hibernated)
+static int hcd_pci_resume(struct device *dev)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
- int retval;
+ return resume_common(dev, PM_EVENT_RESUME);
+}
- if (hcd->state != HC_STATE_SUSPENDED) {
- dev_dbg(dev, "can't resume, not suspended!\n");
- return 0;
- }
+static int hcd_pci_restore(struct device *dev)
+{
+ return resume_common(dev, PM_EVENT_RESTORE);
+}
- retval = pci_enable_device(pci_dev);
- if (retval < 0) {
- dev_err(dev, "can't re-enable after resume, %d!\n", retval);
- return retval;
- }
+#else
- pci_set_master(pci_dev);
+#define hcd_pci_suspend NULL
+#define hcd_pci_suspend_noirq NULL
+#define hcd_pci_resume_noirq NULL
+#define hcd_pci_resume NULL
+#define hcd_pci_restore NULL
- clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+#endif /* CONFIG_PM_SLEEP */
- if (hcd->driver->pci_resume) {
- /* This call should be made only during system resume,
- * not during runtime resume.
- */
- wait_for_companions(pci_dev, hcd);
+#ifdef CONFIG_PM_RUNTIME
- retval = hcd->driver->pci_resume(hcd, hibernated);
- if (retval) {
- dev_err(dev, "PCI post-resume error %d!\n", retval);
- usb_hc_died(hcd);
- }
- }
+static int hcd_pci_runtime_suspend(struct device *dev)
+{
+ int retval;
+
+ retval = suspend_common(dev, true);
+ if (retval == 0)
+ powermac_set_asic(to_pci_dev(dev), 0);
+ dev_dbg(dev, "hcd_pci_runtime_suspend: %d\n", retval);
return retval;
}
-static int hcd_pci_resume(struct device *dev)
+static int hcd_pci_runtime_resume(struct device *dev)
{
- return resume_common(dev, false);
-}
+ int retval;
-static int hcd_pci_restore(struct device *dev)
-{
- return resume_common(dev, true);
+ powermac_set_asic(to_pci_dev(dev), 1);
+ retval = resume_common(dev, PM_EVENT_AUTO_RESUME);
+ dev_dbg(dev, "hcd_pci_runtime_resume: %d\n", retval);
+ return retval;
}
+#else
+
+#define hcd_pci_runtime_suspend NULL
+#define hcd_pci_runtime_resume NULL
+
+#endif /* CONFIG_PM_RUNTIME */
+
const struct dev_pm_ops usb_hcd_pci_pm_ops = {
.suspend = hcd_pci_suspend,
.suspend_noirq = hcd_pci_suspend_noirq,
@@ -501,7 +569,9 @@ const struct dev_pm_ops usb_hcd_pci_pm_ops = {
.poweroff_noirq = hcd_pci_suspend_noirq,
.restore_noirq = hcd_pci_resume_noirq,
.restore = hcd_pci_restore,
+ .runtime_suspend = hcd_pci_runtime_suspend,
+ .runtime_resume = hcd_pci_runtime_resume,
};
EXPORT_SYMBOL_GPL(usb_hcd_pci_pm_ops);
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM_OPS */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 12742f152f4..5cca00a6d09 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -667,7 +667,7 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
unsigned long flags;
char buffer[6]; /* Any root hubs with > 31 ports? */
- if (unlikely(!hcd->rh_registered))
+ if (unlikely(!hcd->rh_pollable))
return;
if (!hcd->uses_new_polling && !hcd->status_urb)
return;
@@ -679,7 +679,7 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
spin_lock_irqsave(&hcd_root_hub_lock, flags);
urb = hcd->status_urb;
if (urb) {
- hcd->poll_pending = 0;
+ clear_bit(HCD_FLAG_POLL_PENDING, &hcd->flags);
hcd->status_urb = NULL;
urb->actual_length = length;
memcpy(urb->transfer_buffer, buffer, length);
@@ -690,7 +690,7 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
spin_lock(&hcd_root_hub_lock);
} else {
length = 0;
- hcd->poll_pending = 1;
+ set_bit(HCD_FLAG_POLL_PENDING, &hcd->flags);
}
spin_unlock_irqrestore(&hcd_root_hub_lock, flags);
}
@@ -699,7 +699,7 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
* exceed that limit if HZ is 100. The math is more clunky than
* maybe expected, this is to make sure that all timers for USB devices
* fire at the same time to give the CPU a break inbetween */
- if (hcd->uses_new_polling ? hcd->poll_rh :
+ if (hcd->uses_new_polling ? HCD_POLL_RH(hcd) :
(length == 0 && hcd->status_urb != NULL))
mod_timer (&hcd->rh_timer, (jiffies/(HZ/4) + 1) * (HZ/4));
}
@@ -736,7 +736,7 @@ static int rh_queue_status (struct usb_hcd *hcd, struct urb *urb)
mod_timer(&hcd->rh_timer, (jiffies/(HZ/4) + 1) * (HZ/4));
/* If a status change has already occurred, report it ASAP */
- else if (hcd->poll_pending)
+ else if (HCD_POLL_PENDING(hcd))
mod_timer(&hcd->rh_timer, jiffies);
retval = 0;
done:
@@ -1150,8 +1150,7 @@ int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
* finish unlinking the initial failed usb_set_address()
* or device descriptor fetch.
*/
- if (!test_bit(HCD_FLAG_SAW_IRQ, &hcd->flags) &&
- !is_root_hub(urb->dev)) {
+ if (!HCD_SAW_IRQ(hcd) && !is_root_hub(urb->dev)) {
dev_warn(hcd->self.controller, "Unlink after no-IRQ? "
"Controller is probably using the wrong IRQ.\n");
set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
@@ -1219,6 +1218,11 @@ static int hcd_alloc_coherent(struct usb_bus *bus,
{
unsigned char *vaddr;
+ if (*vaddr_handle == NULL) {
+ WARN_ON_ONCE(1);
+ return -EFAULT;
+ }
+
vaddr = hcd_buffer_alloc(bus, size + sizeof(vaddr),
mem_flags, dma_handle);
if (!vaddr)
@@ -1941,6 +1945,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
dev_dbg(&rhdev->dev, "usb %s%s\n",
(msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
+ clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
if (!hcd->driver->bus_resume)
return -ENOENT;
if (hcd->state == HC_STATE_RUNNING)
@@ -1994,8 +1999,10 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
unsigned long flags;
spin_lock_irqsave (&hcd_root_hub_lock, flags);
- if (hcd->rh_registered)
+ if (hcd->rh_registered) {
+ set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
queue_work(pm_wq, &hcd->wakeup_work);
+ }
spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
}
EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub);
@@ -2063,8 +2070,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
*/
local_irq_save(flags);
- if (unlikely(hcd->state == HC_STATE_HALT ||
- !test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) {
+ if (unlikely(hcd->state == HC_STATE_HALT || !HCD_HW_ACCESSIBLE(hcd))) {
rc = IRQ_NONE;
} else if (hcd->driver->irq(hcd) == IRQ_NONE) {
rc = IRQ_NONE;
@@ -2079,6 +2085,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
local_irq_restore(flags);
return rc;
}
+EXPORT_SYMBOL_GPL(usb_hcd_irq);
/*-------------------------------------------------------------------------*/
@@ -2098,7 +2105,7 @@ void usb_hc_died (struct usb_hcd *hcd)
spin_lock_irqsave (&hcd_root_hub_lock, flags);
if (hcd->rh_registered) {
- hcd->poll_rh = 0;
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
/* make khubd clean up old urbs and devices */
usb_set_device_state (hcd->self.root_hub,
@@ -2217,6 +2224,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
retval = -ENOMEM;
goto err_allocate_root_hub;
}
+ hcd->self.root_hub = rhdev;
switch (hcd->driver->flags & HCD_MASK) {
case HCD_USB11:
@@ -2229,9 +2237,8 @@ int usb_add_hcd(struct usb_hcd *hcd,
rhdev->speed = USB_SPEED_SUPER;
break;
default:
- goto err_allocate_root_hub;
+ goto err_set_rh_speed;
}
- hcd->self.root_hub = rhdev;
/* wakeup flag init defaults to "everything works" for root hubs,
* but drivers can override it in reset() if needed, along with
@@ -2246,6 +2253,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
dev_err(hcd->self.controller, "can't setup\n");
goto err_hcd_driver_setup;
}
+ hcd->rh_pollable = 1;
/* NOTE: root hub and controller capabilities may not be the same */
if (device_can_wakeup(hcd->self.controller)
@@ -2300,23 +2308,38 @@ int usb_add_hcd(struct usb_hcd *hcd,
retval);
goto error_create_attr_group;
}
- if (hcd->uses_new_polling && hcd->poll_rh)
+ if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
usb_hcd_poll_rh_status(hcd);
return retval;
error_create_attr_group:
+ if (HC_IS_RUNNING(hcd->state))
+ hcd->state = HC_STATE_QUIESCING;
+ spin_lock_irq(&hcd_root_hub_lock);
+ hcd->rh_registered = 0;
+ spin_unlock_irq(&hcd_root_hub_lock);
+
+#ifdef CONFIG_USB_SUSPEND
+ cancel_work_sync(&hcd->wakeup_work);
+#endif
mutex_lock(&usb_bus_list_lock);
- usb_disconnect(&hcd->self.root_hub);
+ usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_list_lock);
err_register_root_hub:
+ hcd->rh_pollable = 0;
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
hcd->driver->stop(hcd);
+ hcd->state = HC_STATE_HALT;
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
err_hcd_driver_start:
if (hcd->irq >= 0)
free_irq(irqnum, hcd);
err_request_irq:
err_hcd_driver_setup:
- hcd->self.root_hub = NULL;
- usb_put_dev(rhdev);
+err_set_rh_speed:
+ usb_put_dev(hcd->self.root_hub);
err_allocate_root_hub:
usb_deregister_bus(&hcd->self);
err_register_bus:
@@ -2335,8 +2358,13 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
*/
void usb_remove_hcd(struct usb_hcd *hcd)
{
+ struct usb_device *rhdev = hcd->self.root_hub;
+
dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
+ usb_get_dev(rhdev);
+ sysfs_remove_group(&rhdev->dev.kobj, &usb_bus_attr_group);
+
if (HC_IS_RUNNING (hcd->state))
hcd->state = HC_STATE_QUIESCING;
@@ -2349,19 +2377,30 @@ void usb_remove_hcd(struct usb_hcd *hcd)
cancel_work_sync(&hcd->wakeup_work);
#endif
- sysfs_remove_group(&hcd->self.root_hub->dev.kobj, &usb_bus_attr_group);
mutex_lock(&usb_bus_list_lock);
- usb_disconnect(&hcd->self.root_hub);
+ usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_list_lock);
+ /* Prevent any more root-hub status calls from the timer.
+ * The HCD might still restart the timer (if a port status change
+ * interrupt occurs), but usb_hcd_poll_rh_status() won't invoke
+ * the hub_status_data() callback.
+ */
+ hcd->rh_pollable = 0;
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
+
hcd->driver->stop(hcd);
hcd->state = HC_STATE_HALT;
- hcd->poll_rh = 0;
+ /* In case the HCD restarted the timer, stop it again. */
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);
if (hcd->irq >= 0)
free_irq(hcd->irq, hcd);
+
+ usb_put_dev(hcd->self.root_hub);
usb_deregister_bus(&hcd->self);
hcd_buffer_destroy(hcd);
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 70cccc75a36..84c1897188d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -20,6 +20,7 @@
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
#include <linux/usb/hcd.h>
+#include <linux/usb/quirks.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
@@ -1294,6 +1295,7 @@ descriptor_error:
return -ENODEV;
}
+/* No BKL needed */
static int
hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
{
@@ -1801,7 +1803,6 @@ int usb_new_device(struct usb_device *udev)
pm_runtime_set_active(&udev->dev);
pm_runtime_enable(&udev->dev);
- usb_detect_quirks(udev);
err = usb_enumerate_device(udev); /* Read descriptors */
if (err < 0)
goto fail;
@@ -2880,7 +2881,9 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
}
retval = 0;
-
+ /* notify HCD that we have a device connected and addressed */
+ if (hcd->driver->update_device)
+ hcd->driver->update_device(hcd, udev);
fail:
if (retval) {
hub_port_disable(hub, port1, 0);
@@ -3111,6 +3114,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
if (status < 0)
goto loop;
+ usb_detect_quirks(udev);
+ if (udev->quirks & USB_QUIRK_DELAY_INIT)
+ msleep(1000);
+
/* consecutive bus-powered hubs aren't reliable; they can
* violate the voltage drop budget. if the new child has
* a "powered" LED, users should notice we didn't enable it
@@ -3463,7 +3470,7 @@ static struct usb_driver hub_driver = {
.reset_resume = hub_reset_resume,
.pre_reset = hub_pre_reset,
.post_reset = hub_post_reset,
- .ioctl = hub_ioctl,
+ .unlocked_ioctl = hub_ioctl,
.id_table = hub_id_table,
.supports_autosuspend = 1,
};
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 1a27618b67d..095fa536669 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -265,13 +265,9 @@ static int remount(struct super_block *sb, int *flags, char *data)
return -EINVAL;
}
- lock_kernel();
-
if (usbfs_mount && usbfs_mount->mnt_sb)
update_sb(usbfs_mount->mnt_sb);
- unlock_kernel();
-
return 0;
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index fd4c36ea5e4..9f0ce7de0e3 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1724,6 +1724,15 @@ free_interfaces:
if (ret)
goto free_interfaces;
+ /* if it's already configured, clear out old state first.
+ * getting rid of old interfaces means unbinding their drivers.
+ */
+ if (dev->state != USB_STATE_ADDRESS)
+ usb_disable_device(dev, 1); /* Skip ep0 */
+
+ /* Get rid of pending async Set-Config requests for this device */
+ cancel_async_set_config(dev);
+
/* Make sure we have bandwidth (and available HCD resources) for this
* configuration. Remove endpoints from the schedule if we're dropping
* this configuration to set configuration 0. After this point, the
@@ -1733,20 +1742,11 @@ free_interfaces:
mutex_lock(&hcd->bandwidth_mutex);
ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
if (ret < 0) {
- usb_autosuspend_device(dev);
mutex_unlock(&hcd->bandwidth_mutex);
+ usb_autosuspend_device(dev);
goto free_interfaces;
}
- /* if it's already configured, clear out old state first.
- * getting rid of old interfaces means unbinding their drivers.
- */
- if (dev->state != USB_STATE_ADDRESS)
- usb_disable_device(dev, 1); /* Skip ep0 */
-
- /* Get rid of pending async Set-Config requests for this device */
- cancel_async_set_config(dev);
-
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -1761,8 +1761,8 @@ free_interfaces:
if (!cp) {
usb_set_device_state(dev, USB_STATE_ADDRESS);
usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
- usb_autosuspend_device(dev);
mutex_unlock(&hcd->bandwidth_mutex);
+ usb_autosuspend_device(dev);
goto free_interfaces;
}
mutex_unlock(&hcd->bandwidth_mutex);
@@ -1802,6 +1802,7 @@ free_interfaces:
intf->dev.groups = usb_interface_groups;
intf->dev.dma_mask = dev->dev.dma_mask;
INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
+ intf->minor = -1;
device_initialize(&intf->dev);
dev_set_name(&intf->dev, "%d-%s:%d.%d",
dev->bus->busnum, dev->devpath,
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index db99c084df9..25719da45e3 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -38,6 +38,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech Harmony 700-series */
+ { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Philips PSC805 audio device */
{ USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 7c0555548ac..419e6b34e2f 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -137,6 +137,16 @@ void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
}
EXPORT_SYMBOL_GPL(usb_anchor_urb);
+/* Callers must hold anchor->lock */
+static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
+{
+ urb->anchor = NULL;
+ list_del(&urb->anchor_list);
+ usb_put_urb(urb);
+ if (list_empty(&anchor->urb_list))
+ wake_up(&anchor->wait);
+}
+
/**
* usb_unanchor_urb - unanchors an URB
* @urb: pointer to the urb to anchor
@@ -156,17 +166,14 @@ void usb_unanchor_urb(struct urb *urb)
return;
spin_lock_irqsave(&anchor->lock, flags);
- if (unlikely(anchor != urb->anchor)) {
- /* we've lost the race to another thread */
- spin_unlock_irqrestore(&anchor->lock, flags);
- return;
- }
- urb->anchor = NULL;
- list_del(&urb->anchor_list);
+ /*
+ * At this point, we could be competing with another thread which
+ * has the same intention. To protect the urb from being unanchored
+ * twice, only the winner of the race gets the job.
+ */
+ if (likely(anchor == urb->anchor))
+ __usb_unanchor_urb(urb, anchor);
spin_unlock_irqrestore(&anchor->lock, flags);
- usb_put_urb(urb);
- if (list_empty(&anchor->urb_list))
- wake_up(&anchor->wait);
}
EXPORT_SYMBOL_GPL(usb_unanchor_urb);
@@ -749,20 +756,11 @@ EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
- unsigned long flags;
- spin_lock_irqsave(&anchor->lock, flags);
- while (!list_empty(&anchor->urb_list)) {
- victim = list_entry(anchor->urb_list.prev, struct urb,
- anchor_list);
- usb_get_urb(victim);
- spin_unlock_irqrestore(&anchor->lock, flags);
- /* this will unanchor the URB */
+ while ((victim = usb_get_from_anchor(anchor)) != NULL) {
usb_unlink_urb(victim);
usb_put_urb(victim);
- spin_lock_irqsave(&anchor->lock, flags);
}
- spin_unlock_irqrestore(&anchor->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
@@ -799,12 +797,11 @@ struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
victim = list_entry(anchor->urb_list.next, struct urb,
anchor_list);
usb_get_urb(victim);
- spin_unlock_irqrestore(&anchor->lock, flags);
- usb_unanchor_urb(victim);
+ __usb_unanchor_urb(victim, anchor);
} else {
- spin_unlock_irqrestore(&anchor->lock, flags);
victim = NULL;
}
+ spin_unlock_irqrestore(&anchor->lock, flags);
return victim;
}
@@ -826,12 +823,7 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
while (!list_empty(&anchor->urb_list)) {
victim = list_entry(anchor->urb_list.prev, struct urb,
anchor_list);
- usb_get_urb(victim);
- spin_unlock_irqrestore(&anchor->lock, flags);
- /* this may free the URB */
- usb_unanchor_urb(victim);
- usb_put_urb(victim);
- spin_lock_irqsave(&anchor->lock, flags);
+ __usb_unanchor_urb(victim, anchor);
}
spin_unlock_irqrestore(&anchor->lock, flags);
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 5ae14f6c1e7..fdd4130fbb7 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -317,10 +317,6 @@ static const struct dev_pm_ops usb_device_pm_ops = {
.restore = usb_dev_restore,
};
-#else
-
-#define usb_device_pm_ops (*(struct dev_pm_ops *) NULL)
-
#endif /* CONFIG_PM */
@@ -338,7 +334,9 @@ struct device_type usb_device_type = {
.release = usb_release_dev,
.uevent = usb_dev_uevent,
.devnode = usb_devnode,
+#ifdef CONFIG_PM
.pm = &usb_device_pm_ops,
+#endif
};
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 591ae9fde19..cd27f9bde2c 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -714,6 +714,7 @@ config USB_GADGETFS
config USB_FUNCTIONFS
tristate "Function Filesystem (EXPERIMENTAL)"
depends on EXPERIMENTAL
+ select USB_FUNCTIONFS_GENERIC if !(USB_FUNCTIONFS_ETH || USB_FUNCTIONFS_RNDIS)
help
The Function Filesystem (FunctioFS) lets one create USB
composite functions in user space in the same way as GadgetFS
@@ -722,31 +723,31 @@ config USB_FUNCTIONFS
implemented in kernel space (for instance Ethernet, serial or
mass storage) and other are implemented in user space.
+ If you say "y" or "m" here you will be able what kind of
+ configurations the gadget will provide.
+
Say "y" to link the driver statically, or "m" to build
a dynamically linked module called "g_ffs".
config USB_FUNCTIONFS_ETH
- bool "Include CDC ECM (Ethernet) function"
+ bool "Include configuration with CDC ECM (Ethernet)"
depends on USB_FUNCTIONFS && NET
help
- Include an CDC ECM (Ethernet) funcion in the CDC ECM (Funcion)
- Filesystem. If you also say "y" to the RNDIS query below the
- gadget will have two configurations.
+ Include a configuration with CDC ECM funcion (Ethernet) and the
+ Funcion Filesystem.
config USB_FUNCTIONFS_RNDIS
- bool "Include RNDIS (Ethernet) function"
+ bool "Include configuration with RNDIS (Ethernet)"
depends on USB_FUNCTIONFS && NET
help
- Include an RNDIS (Ethernet) funcion in the Funcion Filesystem.
- If you also say "y" to the CDC ECM query above the gadget will
- have two configurations.
+ Include a configuration with RNDIS funcion (Ethernet) and the Filesystem.
config USB_FUNCTIONFS_GENERIC
bool "Include 'pure' configuration"
- depends on USB_FUNCTIONFS && (USB_FUNCTIONFS_ETH || USB_FUNCTIONFS_RNDIS)
+ depends on USB_FUNCTIONFS
help
- Include a configuration with FunctionFS and no Ethernet
- configuration.
+ Include a configuration with the Function Filesystem alone with
+ no Ethernet interface.
config USB_FILE_STORAGE
tristate "File-backed Storage Gadget"
@@ -863,6 +864,7 @@ config USB_G_NOKIA
config USB_G_MULTI
tristate "Multifunction Composite Gadget (EXPERIMENTAL)"
depends on BLOCK && NET
+ select USB_G_MULTI_CDC if !USB_G_MULTI_RNDIS
help
The Multifunction Composite Gadget provides Ethernet (RNDIS
and/or CDC Ethernet), mass storage and ACM serial link
@@ -913,6 +915,34 @@ config USB_G_HID
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "g_hid".
+config USB_G_DBGP
+ tristate "EHCI Debug Device Gadget"
+ help
+ This gadget emulates an EHCI Debug device. This is useful when you want
+ to interact with an EHCI Debug Port.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "g_dbgp".
+
+if USB_G_DBGP
+choice
+ prompt "EHCI Debug Device mode"
+ default USB_G_DBGP_SERIAL
+
+config USB_G_DBGP_PRINTK
+ depends on USB_G_DBGP
+ bool "printk"
+ help
+ Directly printk() received data. No interaction.
+
+config USB_G_DBGP_SERIAL
+ depends on USB_G_DBGP
+ bool "serial"
+ help
+ Userland can interact using /dev/ttyGSxxx.
+endchoice
+endif
+
# put drivers that need isochronous transfer support (for audio
# or video class gadget drivers), or specific hardware, here.
config USB_G_WEBCAM
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 9bcde110feb..27283df37d0 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -44,6 +44,7 @@ g_printer-objs := printer.o
g_cdc-objs := cdc2.o
g_multi-objs := multi.o
g_hid-objs := hid.o
+g_dbgp-objs := dbgp.o
g_nokia-objs := nokia.o
g_webcam-objs := webcam.o
@@ -52,7 +53,6 @@ obj-$(CONFIG_USB_AUDIO) += g_audio.o
obj-$(CONFIG_USB_ETH) += g_ether.o
obj-$(CONFIG_USB_GADGETFS) += gadgetfs.o
obj-$(CONFIG_USB_FUNCTIONFS) += g_ffs.o
-obj-$(CONFIG_USB_ETH_FUNCTIONFS) += g_eth_ffs.o
obj-$(CONFIG_USB_FILE_STORAGE) += g_file_storage.o
obj-$(CONFIG_USB_MASS_STORAGE) += g_mass_storage.o
obj-$(CONFIG_USB_G_SERIAL) += g_serial.o
@@ -60,6 +60,7 @@ obj-$(CONFIG_USB_G_PRINTER) += g_printer.o
obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o
obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
obj-$(CONFIG_USB_G_HID) += g_hid.o
+obj-$(CONFIG_USB_G_DBGP) += g_dbgp.o
obj-$(CONFIG_USB_G_MULTI) += g_multi.o
obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
index a62af7b5909..b744ccd0f34 100644
--- a/drivers/usb/gadget/audio.c
+++ b/drivers/usb/gadget/audio.c
@@ -89,7 +89,7 @@ static const struct usb_descriptor_header *otg_desc[] = {
/*-------------------------------------------------------------------------*/
-static int __init audio_do_config(struct usb_configuration *c)
+static int __ref audio_do_config(struct usb_configuration *c)
{
/* FIXME alloc iConfiguration string, set it in c->strings */
@@ -113,7 +113,7 @@ static struct usb_configuration audio_config_driver = {
/*-------------------------------------------------------------------------*/
-static int __init audio_bind(struct usb_composite_dev *cdev)
+static int __ref audio_bind(struct usb_composite_dev *cdev)
{
int gcnum;
int status;
diff --git a/drivers/usb/gadget/cdc2.c b/drivers/usb/gadget/cdc2.c
index 928137d3dbd..1f5ba2fd4c1 100644
--- a/drivers/usb/gadget/cdc2.c
+++ b/drivers/usb/gadget/cdc2.c
@@ -129,7 +129,7 @@ static u8 hostaddr[ETH_ALEN];
/*
* We _always_ have both CDC ECM and CDC ACM functions.
*/
-static int __init cdc_do_config(struct usb_configuration *c)
+static int __ref cdc_do_config(struct usb_configuration *c)
{
int status;
@@ -159,7 +159,7 @@ static struct usb_configuration cdc_config_driver = {
/*-------------------------------------------------------------------------*/
-static int __init cdc_bind(struct usb_composite_dev *cdev)
+static int __ref cdc_bind(struct usb_composite_dev *cdev)
{
int gcnum;
struct usb_gadget *gadget = cdev->gadget;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 391d169f8d0..1160c55de7f 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -673,20 +673,83 @@ static int get_string(struct usb_composite_dev *cdev,
* string IDs. Drivers for functions, configurations, or gadgets will
* then store that ID in the appropriate descriptors and string table.
*
- * All string identifier should be allocated using this routine, to
- * ensure that for example different functions don't wrongly assign
- * different meanings to the same identifier.
+ * All string identifier should be allocated using this,
+ * @usb_string_ids_tab() or @usb_string_ids_n() routine, to ensure
+ * that for example different functions don't wrongly assign different
+ * meanings to the same identifier.
*/
int usb_string_id(struct usb_composite_dev *cdev)
{
if (cdev->next_string_id < 254) {
- /* string id 0 is reserved */
+ /* string id 0 is reserved by USB spec for list of
+ * supported languages */
+ /* 255 reserved as well? -- mina86 */
cdev->next_string_id++;
return cdev->next_string_id;
}
return -ENODEV;
}
+/**
+ * usb_string_ids() - allocate unused string IDs in batch
+ * @cdev: the device whose string descriptor IDs are being allocated
+ * @str: an array of usb_string objects to assign numbers to
+ * Context: single threaded during gadget setup
+ *
+ * @usb_string_ids() is called from bind() callbacks to allocate
+ * string IDs. Drivers for functions, configurations, or gadgets will
+ * then copy IDs from the string table to the appropriate descriptors
+ * and string table for other languages.
+ *
+ * All string identifier should be allocated using this,
+ * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for
+ * example different functions don't wrongly assign different meanings
+ * to the same identifier.
+ */
+int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str)
+{
+ int next = cdev->next_string_id;
+
+ for (; str->s; ++str) {
+ if (unlikely(next >= 254))
+ return -ENODEV;
+ str->id = ++next;
+ }
+
+ cdev->next_string_id = next;
+
+ return 0;
+}
+
+/**
+ * usb_string_ids_n() - allocate unused string IDs in batch
+ * @c: the device whose string descriptor IDs are being allocated
+ * @n: number of string IDs to allocate
+ * Context: single threaded during gadget setup
+ *
+ * Returns the first requested ID. This ID and next @n-1 IDs are now
+ * valid IDs. At least provided that @n is non-zero because if it
+ * is, returns last requested ID which is now very useful information.
+ *
+ * @usb_string_ids_n() is called from bind() callbacks to allocate
+ * string IDs. Drivers for functions, configurations, or gadgets will
+ * then store that ID in the appropriate descriptors and string table.
+ *
+ * All string identifier should be allocated using this,
+ * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for
+ * example different functions don't wrongly assign different meanings
+ * to the same identifier.
+ */
+int usb_string_ids_n(struct usb_composite_dev *c, unsigned n)
+{
+ unsigned next = c->next_string_id;
+ if (unlikely(n > 254 || (unsigned)next + n > 254))
+ return -ENODEV;
+ c->next_string_id += n;
+ return next + 1;
+}
+
+
/*-------------------------------------------------------------------------*/
static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req)
@@ -893,6 +956,8 @@ static void composite_disconnect(struct usb_gadget *gadget)
spin_lock_irqsave(&cdev->lock, flags);
if (cdev->config)
reset_config(cdev);
+ if (composite->disconnect)
+ composite->disconnect(cdev);
spin_unlock_irqrestore(&cdev->lock, flags);
}
diff --git a/drivers/usb/gadget/dbgp.c b/drivers/usb/gadget/dbgp.c
new file mode 100644
index 00000000000..0ed50a2c0a3
--- /dev/null
+++ b/drivers/usb/gadget/dbgp.c
@@ -0,0 +1,434 @@
+/*
+ * dbgp.c -- EHCI Debug Port device gadget
+ *
+ * Copyright (C) 2010 Stephane Duverger
+ *
+ * Released under the GPLv2.
+ *
+ */
+
+/* verbose messages */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+/* See comments in "zero.c" */
+#include "epautoconf.c"
+
+#ifdef CONFIG_USB_G_DBGP_SERIAL
+#include "u_serial.c"
+#endif
+
+#define DRIVER_VENDOR_ID 0x0525 /* NetChip */
+#define DRIVER_PRODUCT_ID 0xc0de /* undefined */
+
+#define USB_DEBUG_MAX_PACKET_SIZE 8
+#define DBGP_REQ_EP0_LEN 128
+#define DBGP_REQ_LEN 512
+
+static struct dbgp {
+ struct usb_gadget *gadget;
+ struct usb_request *req;
+ struct usb_ep *i_ep;
+ struct usb_ep *o_ep;
+#ifdef CONFIG_USB_G_DBGP_SERIAL
+ struct gserial *serial;
+#endif
+} dbgp;
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = sizeof device_desc,
+ .bDescriptorType = USB_DT_DEVICE,
+ .bcdUSB = __constant_cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_VENDOR_SPEC,
+ .idVendor = __constant_cpu_to_le16(DRIVER_VENDOR_ID),
+ .idProduct = __constant_cpu_to_le16(DRIVER_PRODUCT_ID),
+ .bNumConfigurations = 1,
+};
+
+static struct usb_debug_descriptor dbg_desc = {
+ .bLength = sizeof dbg_desc,
+ .bDescriptorType = USB_DT_DEBUG,
+};
+
+static struct usb_endpoint_descriptor i_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .bEndpointAddress = USB_DIR_IN,
+};
+
+static struct usb_endpoint_descriptor o_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .bEndpointAddress = USB_DIR_OUT,
+};
+
+#ifdef CONFIG_USB_G_DBGP_PRINTK
+static int dbgp_consume(char *buf, unsigned len)
+{
+ char c;
+
+ if (!len)
+ return 0;
+
+ c = buf[len-1];
+ if (c != 0)
+ buf[len-1] = 0;
+
+ printk(KERN_NOTICE "%s%c", buf, c);
+ return 0;
+}
+
+static void __disable_ep(struct usb_ep *ep)
+{
+ if (ep && ep->driver_data == dbgp.gadget) {
+ usb_ep_disable(ep);
+ ep->driver_data = NULL;
+ }
+}
+
+static void dbgp_disable_ep(void)
+{
+ __disable_ep(dbgp.i_ep);
+ __disable_ep(dbgp.o_ep);
+}
+
+static void dbgp_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ int stp;
+ int err = 0;
+ int status = req->status;
+
+ if (ep == dbgp.i_ep) {
+ stp = 1;
+ goto fail;
+ }
+
+ if (status != 0) {
+ stp = 2;
+ goto release_req;
+ }
+
+ dbgp_consume(req->buf, req->actual);
+
+ req->length = DBGP_REQ_LEN;
+ err = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (err < 0) {
+ stp = 3;
+ goto release_req;
+ }
+
+ return;
+
+release_req:
+ kfree(req->buf);
+ usb_ep_free_request(dbgp.o_ep, req);
+ dbgp_disable_ep();
+fail:
+ dev_dbg(&dbgp.gadget->dev,
+ "complete: failure (%d:%d) ==> %d\n", stp, err, status);
+}
+
+static int dbgp_enable_ep_req(struct usb_ep *ep)
+{
+ int err, stp;
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req) {
+ err = -ENOMEM;
+ stp = 1;
+ goto fail_1;
+ }
+
+ req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL);
+ if (!req->buf) {
+ err = -ENOMEM;
+ stp = 2;
+ goto fail_2;
+ }
+
+ req->complete = dbgp_complete;
+ req->length = DBGP_REQ_LEN;
+ err = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (err < 0) {
+ stp = 3;
+ goto fail_3;
+ }
+
+ return 0;
+
+fail_3:
+ kfree(req->buf);
+fail_2:
+ usb_ep_free_request(dbgp.o_ep, req);
+fail_1:
+ dev_dbg(&dbgp.gadget->dev,
+ "enable ep req: failure (%d:%d)\n", stp, err);
+ return err;
+}
+
+static int __enable_ep(struct usb_ep *ep, struct usb_endpoint_descriptor *desc)
+{
+ int err = usb_ep_enable(ep, desc);
+ ep->driver_data = dbgp.gadget;
+ return err;
+}
+
+static int dbgp_enable_ep(void)
+{
+ int err, stp;
+
+ err = __enable_ep(dbgp.i_ep, &i_desc);
+ if (err < 0) {
+ stp = 1;
+ goto fail_1;
+ }
+
+ err = __enable_ep(dbgp.o_ep, &o_desc);
+ if (err < 0) {
+ stp = 2;
+ goto fail_2;
+ }
+
+ err = dbgp_enable_ep_req(dbgp.o_ep);
+ if (err < 0) {
+ stp = 3;
+ goto fail_3;
+ }
+
+ return 0;
+
+fail_3:
+ __disable_ep(dbgp.o_ep);
+fail_2:
+ __disable_ep(dbgp.i_ep);
+fail_1:
+ dev_dbg(&dbgp.gadget->dev, "enable ep: failure (%d:%d)\n", stp, err);
+ return err;
+}
+#endif
+
+static void dbgp_disconnect(struct usb_gadget *gadget)
+{
+#ifdef CONFIG_USB_G_DBGP_PRINTK
+ dbgp_disable_ep();
+#else
+ gserial_disconnect(dbgp.serial);
+#endif
+}
+
+static void dbgp_unbind(struct usb_gadget *gadget)
+{
+#ifdef CONFIG_USB_G_DBGP_SERIAL
+ kfree(dbgp.serial);
+#endif
+ if (dbgp.req) {
+ kfree(dbgp.req->buf);
+ usb_ep_free_request(gadget->ep0, dbgp.req);
+ }
+
+ gadget->ep0->driver_data = NULL;
+}
+
+static int __init dbgp_configure_endpoints(struct usb_gadget *gadget)
+{
+ int stp;
+
+ usb_ep_autoconfig_reset(gadget);
+
+ dbgp.i_ep = usb_ep_autoconfig(gadget, &i_desc);
+ if (!dbgp.i_ep) {
+ stp = 1;
+ goto fail_1;
+ }
+
+ dbgp.i_ep->driver_data = gadget;
+ i_desc.wMaxPacketSize =
+ __constant_cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
+
+ dbgp.o_ep = usb_ep_autoconfig(gadget, &o_desc);
+ if (!dbgp.o_ep) {
+ dbgp.i_ep->driver_data = NULL;
+ stp = 2;
+ goto fail_2;
+ }
+
+ dbgp.o_ep->driver_data = gadget;
+ o_desc.wMaxPacketSize =
+ __constant_cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
+
+ dbg_desc.bDebugInEndpoint = i_desc.bEndpointAddress & 0x7f;
+ dbg_desc.bDebugOutEndpoint = o_desc.bEndpointAddress & 0x7f;
+
+#ifdef CONFIG_USB_G_DBGP_SERIAL
+ dbgp.serial->in = dbgp.i_ep;
+ dbgp.serial->out = dbgp.o_ep;
+
+ dbgp.serial->in_desc = &i_desc;
+ dbgp.serial->out_desc = &o_desc;
+
+ if (gserial_setup(gadget, 1) < 0) {
+ stp = 3;
+ goto fail_3;
+ }
+
+ return 0;
+
+fail_3:
+ dbgp.o_ep->driver_data = NULL;
+#else
+ return 0;
+#endif
+fail_2:
+ dbgp.i_ep->driver_data = NULL;
+fail_1:
+ dev_dbg(&dbgp.gadget->dev, "ep config: failure (%d)\n", stp);
+ return -ENODEV;
+}
+
+static int __init dbgp_bind(struct usb_gadget *gadget)
+{
+ int err, stp;
+
+ dbgp.gadget = gadget;
+
+ dbgp.req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
+ if (!dbgp.req) {
+ err = -ENOMEM;
+ stp = 1;
+ goto fail;
+ }
+
+ dbgp.req->buf = kmalloc(DBGP_REQ_EP0_LEN, GFP_KERNEL);
+ if (!dbgp.req->buf) {
+ err = -ENOMEM;
+ stp = 2;
+ goto fail;
+ }
+
+ dbgp.req->length = DBGP_REQ_EP0_LEN;
+ gadget->ep0->driver_data = gadget;
+
+#ifdef CONFIG_USB_G_DBGP_SERIAL
+ dbgp.serial = kzalloc(sizeof(struct gserial), GFP_KERNEL);
+ if (!dbgp.serial) {
+ stp = 3;
+ err = -ENOMEM;
+ goto fail;
+ }
+#endif
+ err = dbgp_configure_endpoints(gadget);
+ if (err < 0) {
+ stp = 4;
+ goto fail;
+ }
+
+ dev_dbg(&dbgp.gadget->dev, "bind: success\n");
+ return 0;
+
+fail:
+ dev_dbg(&gadget->dev, "bind: failure (%d:%d)\n", stp, err);
+ dbgp_unbind(gadget);
+ return err;
+}
+
+static void dbgp_setup_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ dev_dbg(&dbgp.gadget->dev, "setup complete: %d, %d/%d\n",
+ req->status, req->actual, req->length);
+}
+
+static int dbgp_setup(struct usb_gadget *gadget,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_request *req = dbgp.req;
+ u8 request = ctrl->bRequest;
+ u16 value = le16_to_cpu(ctrl->wValue);
+ u16 length = le16_to_cpu(ctrl->wLength);
+ int err = 0;
+ void *data;
+ u16 len;
+
+ gadget->ep0->driver_data = gadget;
+
+ if (request == USB_REQ_GET_DESCRIPTOR) {
+ switch (value>>8) {
+ case USB_DT_DEVICE:
+ dev_dbg(&dbgp.gadget->dev, "setup: desc device\n");
+ len = sizeof device_desc;
+ data = &device_desc;
+ break;
+ case USB_DT_DEBUG:
+ dev_dbg(&dbgp.gadget->dev, "setup: desc debug\n");
+ len = sizeof dbg_desc;
+ data = &dbg_desc;
+ break;
+ default:
+ goto fail;
+ }
+ } else if (request == USB_REQ_SET_FEATURE &&
+ value == USB_DEVICE_DEBUG_MODE) {
+ len = 0;
+ data = NULL;
+ dev_dbg(&dbgp.gadget->dev, "setup: feat debug\n");
+#ifdef CONFIG_USB_G_DBGP_PRINTK
+ err = dbgp_enable_ep();
+#else
+ err = gserial_connect(dbgp.serial, 0);
+#endif
+ if (err < 0)
+ goto fail;
+ } else
+ goto fail;
+
+ if (len >= 0) {
+ req->length = min(length, len);
+ req->zero = len < req->length;
+ if (data && req->length)
+ memcpy(req->buf, data, req->length);
+
+ req->complete = dbgp_setup_complete;
+ return usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
+ }
+
+fail:
+ dev_dbg(&dbgp.gadget->dev,
+ "setup: failure req %x v %x\n", request, value);
+ return err;
+}
+
+static struct usb_gadget_driver dbgp_driver = {
+ .function = "dbgp",
+ .speed = USB_SPEED_HIGH,
+ .bind = dbgp_bind,
+ .unbind = dbgp_unbind,
+ .setup = dbgp_setup,
+ .disconnect = dbgp_disconnect,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "dbgp"
+ },
+};
+
+static int __init dbgp_init(void)
+{
+ return usb_gadget_register_driver(&dbgp_driver);
+}
+
+static void __exit dbgp_exit(void)
+{
+ usb_gadget_unregister_driver(&dbgp_driver);
+#ifdef CONFIG_USB_G_DBGP_SERIAL
+ gserial_cleanup();
+#endif
+}
+
+MODULE_AUTHOR("Stephane Duverger");
+MODULE_LICENSE("GPL");
+module_init(dbgp_init);
+module_exit(dbgp_exit);
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 4f9e578cde9..dc6546248ed 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -1542,7 +1542,7 @@ static int dummy_hub_status (struct usb_hcd *hcd, char *buf)
dum = hcd_to_dummy (hcd);
spin_lock_irqsave (&dum->lock, flags);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
+ if (!HCD_HW_ACCESSIBLE(hcd))
goto done;
if (dum->resuming && time_after_eq (jiffies, dum->re_timeout)) {
@@ -1588,7 +1588,7 @@ static int dummy_hub_control (
int retval = 0;
unsigned long flags;
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
+ if (!HCD_HW_ACCESSIBLE(hcd))
return -ETIMEDOUT;
dum = hcd_to_dummy (hcd);
@@ -1739,7 +1739,7 @@ static int dummy_bus_resume (struct usb_hcd *hcd)
dev_dbg (&hcd->self.root_hub->dev, "%s\n", __func__);
spin_lock_irq (&dum->lock);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
rc = -ESHUTDOWN;
} else {
dum->rh_state = DUMMY_RH_RUNNING;
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 400f80372d9..114fa024c22 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -237,7 +237,7 @@ static u8 hostaddr[ETH_ALEN];
* the first one present. That's to make Microsoft's drivers happy,
* and to follow DOCSIS 1.0 (cable modem standard).
*/
-static int __init rndis_do_config(struct usb_configuration *c)
+static int __ref rndis_do_config(struct usb_configuration *c)
{
/* FIXME alloc iConfiguration string, set it in c->strings */
@@ -270,7 +270,7 @@ MODULE_PARM_DESC(use_eem, "use CDC EEM mode");
/*
* We _always_ have an ECM, CDC Subset, or EEM configuration.
*/
-static int __init eth_do_config(struct usb_configuration *c)
+static int __ref eth_do_config(struct usb_configuration *c)
{
/* FIXME alloc iConfiguration string, set it in c->strings */
@@ -297,7 +297,7 @@ static struct usb_configuration eth_config_driver = {
/*-------------------------------------------------------------------------*/
-static int __init eth_bind(struct usb_composite_dev *cdev)
+static int __ref eth_bind(struct usb_composite_dev *cdev)
{
int gcnum;
struct usb_gadget *gadget = cdev->gadget;
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index b91115f84b1..1f48ceb55a7 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -61,7 +61,7 @@ DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \
+ UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0))
/* B.3.2 Class-Specific AC Interface Descriptor */
-static struct uac_ac_header_descriptor_v1_2 ac_header_desc = {
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_LENGTH,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_HEADER,
@@ -125,7 +125,7 @@ static struct usb_audio_control_selector feature_unit = {
};
#define OUTPUT_TERMINAL_ID 3
-static struct uac_output_terminal_descriptor_v1 output_terminal_desc = {
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
.bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
@@ -155,7 +155,7 @@ static struct usb_interface_descriptor as_interface_alt_1_desc = {
};
/* B.4.2 Class-Specific AS Interface Descriptor */
-static struct uac_as_header_descriptor_v1 as_header_desc = {
+static struct uac1_as_header_descriptor as_header_desc = {
.bLength = UAC_DT_AS_HEADER_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 2aaa0f75c6c..e4f59505520 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -714,9 +714,7 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
struct ffs_function *func = ffs->func;
ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
} else if (gadget->ops->ioctl) {
- lock_kernel();
ret = gadget->ops->ioctl(gadget, code, value);
- unlock_kernel();
} else {
ret = -ENOTTY;
}
@@ -1377,7 +1375,8 @@ static void ffs_data_reset(struct ffs_data *ffs)
static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
{
- unsigned i, count;
+ struct usb_gadget_strings **lang;
+ int first_id;
ENTER();
@@ -1385,7 +1384,9 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
|| test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
return -EBADFD;
- ffs_data_get(ffs);
+ first_id = usb_string_ids_n(cdev, ffs->strings_count);
+ if (unlikely(first_id < 0))
+ return first_id;
ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
if (unlikely(!ffs->ep0req))
@@ -1393,25 +1394,16 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
ffs->ep0req->complete = ffs_ep0_complete;
ffs->ep0req->context = ffs;
- /* Get strings identifiers */
- for (count = ffs->strings_count, i = 0; i < count; ++i) {
- struct usb_gadget_strings **lang;
-
- int id = usb_string_id(cdev);
- if (unlikely(id < 0)) {
- usb_ep_free_request(cdev->gadget->ep0, ffs->ep0req);
- ffs->ep0req = NULL;
- return id;
- }
-
- lang = ffs->stringtabs;
- do {
- (*lang)->strings[i].id = id;
- ++lang;
- } while (*lang);
+ lang = ffs->stringtabs;
+ for (lang = ffs->stringtabs; *lang; ++lang) {
+ struct usb_string *str = (*lang)->strings;
+ int id = first_id;
+ for (; str->s; ++id, ++str)
+ str->id = id;
}
ffs->gadget = cdev->gadget;
+ ffs_data_get(ffs);
return 0;
}
@@ -1480,9 +1472,9 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
}
-static int functionfs_add(struct usb_composite_dev *cdev,
- struct usb_configuration *c,
- struct ffs_data *ffs)
+static int functionfs_bind_config(struct usb_composite_dev *cdev,
+ struct usb_configuration *c,
+ struct ffs_data *ffs)
{
struct ffs_function *func;
int ret;
diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c
index 1e00ff9866a..53e120208e9 100644
--- a/drivers/usb/gadget/f_hid.c
+++ b/drivers/usb/gadget/f_hid.c
@@ -142,7 +142,7 @@ static struct usb_descriptor_header *hidg_fs_descriptors[] = {
static ssize_t f_hidg_read(struct file *file, char __user *buffer,
size_t count, loff_t *ptr)
{
- struct f_hidg *hidg = (struct f_hidg *)file->private_data;
+ struct f_hidg *hidg = file->private_data;
char *tmp_buff = NULL;
unsigned long flags;
@@ -200,7 +200,7 @@ static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
size_t count, loff_t *offp)
{
- struct f_hidg *hidg = (struct f_hidg *)file->private_data;
+ struct f_hidg *hidg = file->private_data;
ssize_t status = -ENOMEM;
if (!access_ok(VERIFY_READ, buffer, count))
@@ -257,7 +257,7 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
static unsigned int f_hidg_poll(struct file *file, poll_table *wait)
{
- struct f_hidg *hidg = (struct f_hidg *)file->private_data;
+ struct f_hidg *hidg = file->private_data;
unsigned int ret = 0;
poll_wait(file, &hidg->read_queue, wait);
diff --git a/drivers/usb/gadget/f_loopback.c b/drivers/usb/gadget/f_loopback.c
index e91d1b16d9b..43225879c3c 100644
--- a/drivers/usb/gadget/f_loopback.c
+++ b/drivers/usb/gadget/f_loopback.c
@@ -324,7 +324,7 @@ static void loopback_disable(struct usb_function *f)
/*-------------------------------------------------------------------------*/
-static int __init loopback_bind_config(struct usb_configuration *c)
+static int __ref loopback_bind_config(struct usb_configuration *c)
{
struct f_loopback *loop;
int status;
@@ -346,7 +346,7 @@ static int __init loopback_bind_config(struct usb_configuration *c)
return status;
}
-static struct usb_configuration loopback_driver = {
+static struct usb_configuration loopback_driver = {
.label = "loopback",
.strings = loopback_strings,
.bind = loopback_bind_config,
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 4ce899c9b16..32cce029f65 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -316,6 +316,27 @@ static const char fsg_string_interface[] = "Mass Storage";
/*-------------------------------------------------------------------------*/
struct fsg_dev;
+struct fsg_common;
+
+/* FSF callback functions */
+struct fsg_operations {
+ /* Callback function to call when thread exits. If no
+ * callback is set or it returns value lower then zero MSF
+ * will force eject all LUNs it operates on (including those
+ * marked as non-removable or with prevent_medium_removal flag
+ * set). */
+ int (*thread_exits)(struct fsg_common *common);
+
+ /* Called prior to ejection. Negative return means error,
+ * zero means to continue with ejection, positive means not to
+ * eject. */
+ int (*pre_eject)(struct fsg_common *common,
+ struct fsg_lun *lun, int num);
+ /* Called after ejection. Negative return means error, zero
+ * or positive is just a success. */
+ int (*post_eject)(struct fsg_common *common,
+ struct fsg_lun *lun, int num);
+};
/* Data shared by all the FSG instances. */
@@ -333,7 +354,6 @@ struct fsg_common {
struct usb_ep *ep0; /* Copy of gadget->ep0 */
struct usb_request *ep0req; /* Copy of cdev->req */
unsigned int ep0_req_tag;
- const char *ep0req_name;
struct fsg_buffhd *next_buffhd_to_fill;
struct fsg_buffhd *next_buffhd_to_drain;
@@ -369,8 +389,8 @@ struct fsg_common {
struct completion thread_notifier;
struct task_struct *thread_task;
- /* Callback function to call when thread exits. */
- int (*thread_exits)(struct fsg_common *common);
+ /* Callback functions. */
+ const struct fsg_operations *ops;
/* Gadget's private data. */
void *private_data;
@@ -394,12 +414,8 @@ struct fsg_config {
const char *lun_name_format;
const char *thread_name;
- /* Callback function to call when thread exits. If no
- * callback is set or it returns value lower then zero MSF
- * will force eject all LUNs it operates on (including those
- * marked as non-removable or with prevent_medium_removal flag
- * set). */
- int (*thread_exits)(struct fsg_common *common);
+ /* Callback functions. */
+ const struct fsg_operations *ops;
/* Gadget's private data. */
void *private_data;
@@ -435,6 +451,7 @@ static inline int __fsg_is_set(struct fsg_common *common,
if (common->fsg)
return 1;
ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
+ WARN_ON(1);
return 0;
}
@@ -623,8 +640,6 @@ static int fsg_setup(struct usb_function *f,
/* Respond with data/status */
req->length = min((u16)1, w_length);
- fsg->common->ep0req_name =
- ctrl->bRequestType & USB_DIR_IN ? "ep0-in" : "ep0-out";
return ep0_queue(fsg->common);
}
@@ -1395,43 +1410,55 @@ static int do_start_stop(struct fsg_common *common)
} else if (!curlun->removable) {
curlun->sense_data = SS_INVALID_COMMAND;
return -EINVAL;
- }
-
- loej = common->cmnd[4] & 0x02;
- start = common->cmnd[4] & 0x01;
-
- /* eject code from file_storage.c:do_start_stop() */
-
- if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
- (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
+ } else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
+ (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
- if (!start) {
- /* Are we allowed to unload the media? */
- if (curlun->prevent_medium_removal) {
- LDBG(curlun, "unload attempt prevented\n");
- curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
- return -EINVAL;
- }
- if (loej) { /* Simulate an unload/eject */
- up_read(&common->filesem);
- down_write(&common->filesem);
- fsg_lun_close(curlun);
- up_write(&common->filesem);
- down_read(&common->filesem);
- }
- } else {
+ loej = common->cmnd[4] & 0x02;
+ start = common->cmnd[4] & 0x01;
- /* Our emulation doesn't support mounting; the medium is
- * available for use as soon as it is loaded. */
+ /* Our emulation doesn't support mounting; the medium is
+ * available for use as soon as it is loaded. */
+ if (start) {
if (!fsg_lun_is_open(curlun)) {
curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
return -EINVAL;
}
+ return 0;
}
- return 0;
+
+ /* Are we allowed to unload the media? */
+ if (curlun->prevent_medium_removal) {
+ LDBG(curlun, "unload attempt prevented\n");
+ curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
+ return -EINVAL;
+ }
+
+ if (!loej)
+ return 0;
+
+ /* Simulate an unload/eject */
+ if (common->ops && common->ops->pre_eject) {
+ int r = common->ops->pre_eject(common, curlun,
+ curlun - common->luns);
+ if (unlikely(r < 0))
+ return r;
+ else if (r)
+ return 0;
+ }
+
+ up_read(&common->filesem);
+ down_write(&common->filesem);
+ fsg_lun_close(curlun);
+ up_write(&common->filesem);
+ down_read(&common->filesem);
+
+ return common->ops && common->ops->post_eject
+ ? min(0, common->ops->post_eject(common, curlun,
+ curlun - common->luns))
+ : 0;
}
@@ -2610,7 +2637,8 @@ static int fsg_main_thread(void *common_)
common->thread_task = NULL;
spin_unlock_irq(&common->lock);
- if (!common->thread_exits || common->thread_exits(common) < 0) {
+ if (!common->ops || !common->ops->thread_exits
+ || common->ops->thread_exits(common) < 0) {
struct fsg_lun *curlun = common->luns;
unsigned i = common->nluns;
@@ -2686,6 +2714,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
common->free_storage_on_release = 0;
}
+ common->ops = cfg->ops;
common->private_data = cfg->private_data;
common->gadget = gadget;
@@ -2807,7 +2836,6 @@ buffhds_first_it:
/* Tell the thread to start working */
- common->thread_exits = cfg->thread_exits;
common->thread_task =
kthread_create(fsg_main_thread, common,
OR(cfg->thread_name, "file-storage"));
@@ -2990,9 +3018,9 @@ static struct usb_gadget_strings *fsg_strings_array[] = {
NULL,
};
-static int fsg_add(struct usb_composite_dev *cdev,
- struct usb_configuration *c,
- struct fsg_common *common)
+static int fsg_bind_config(struct usb_composite_dev *cdev,
+ struct usb_configuration *c,
+ struct fsg_common *common)
{
struct fsg_dev *fsg;
int rc;
@@ -3024,6 +3052,13 @@ static int fsg_add(struct usb_composite_dev *cdev,
return rc;
}
+static inline int __deprecated __maybe_unused
+fsg_add(struct usb_composite_dev *cdev,
+ struct usb_configuration *c,
+ struct fsg_common *common)
+{
+ return fsg_bind_config(cdev, c, common);
+}
/************************* Module parameters *************************/
@@ -3096,8 +3131,8 @@ fsg_config_from_params(struct fsg_config *cfg,
cfg->product_name = 0;
cfg->release = 0xffff;
- cfg->thread_exits = 0;
- cfg->private_data = 0;
+ cfg->ops = NULL;
+ cfg->private_data = NULL;
/* Finalise */
cfg->can_stall = params->stall;
diff --git a/drivers/usb/gadget/f_sourcesink.c b/drivers/usb/gadget/f_sourcesink.c
index 6d3cc443d91..685d768f336 100644
--- a/drivers/usb/gadget/f_sourcesink.c
+++ b/drivers/usb/gadget/f_sourcesink.c
@@ -404,7 +404,7 @@ static void sourcesink_disable(struct usb_function *f)
/*-------------------------------------------------------------------------*/
-static int __init sourcesink_bind_config(struct usb_configuration *c)
+static int __ref sourcesink_bind_config(struct usb_configuration *c)
{
struct f_sourcesink *ss;
int status;
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index b49d86e3e45..a857b7ac238 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -56,7 +56,7 @@
* following protocols: RBC (0x01), ATAPI or SFF-8020i (0x02), QIC-157 (0c03),
* UFI (0x04), SFF-8070i (0x05), and transparent SCSI (0x06), selected by
* the optional "protocol" module parameter. In addition, the default
- * Vendor ID, Product ID, and release number can be overridden.
+ * Vendor ID, Product ID, release number and serial number can be overridden.
*
* There is support for multiple logical units (LUNs), each of which has
* its own backing file. The number of LUNs can be set using the optional
@@ -93,6 +93,8 @@
* removable Default false, boolean for removable media
* luns=N Default N = number of filenames, number of
* LUNs to support
+ * nofua=b[,b...] Default false, booleans for ignore FUA flag
+ * in SCSI WRITE(10,12) commands
* stall Default determined according to the type of
* USB device controller (usually true),
* boolean to permit the driver to halt
@@ -106,17 +108,18 @@
* vendor=0xVVVV Default 0x0525 (NetChip), USB Vendor ID
* product=0xPPPP Default 0xa4a5 (FSG), USB Product ID
* release=0xRRRR Override the USB release number (bcdDevice)
+ * serial=HHHH... Override serial number (string of hex chars)
* buflen=N Default N=16384, buffer size used (will be
* rounded down to a multiple of
* PAGE_CACHE_SIZE)
*
* If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "ro",
- * "removable", "luns", "stall", and "cdrom" options are available; default
- * values are used for everything else.
+ * "removable", "luns", "nofua", "stall", and "cdrom" options are available;
+ * default values are used for everything else.
*
* The pathnames of the backing files and the ro settings are available in
- * the attribute files "file" and "ro" in the lun<n> subdirectory of the
- * gadget's sysfs directory. If the "removable" option is set, writing to
+ * the attribute files "file", "nofua", and "ro" in the lun<n> subdirectory of
+ * the gadget's sysfs directory. If the "removable" option is set, writing to
* these files will simulate ejecting/loading the medium (writing an empty
* line means eject) and adjusting a write-enable tab. Changes to the ro
* setting are not allowed when the medium is loaded or if CD-ROM emulation
@@ -270,6 +273,8 @@
#define DRIVER_DESC "File-backed Storage Gadget"
#define DRIVER_NAME "g_file_storage"
+/* DRIVER_VERSION must be at least 6 characters long, as it is used
+ * to generate a fallback serial number. */
#define DRIVER_VERSION "20 November 2008"
static char fsg_string_manufacturer[64];
@@ -301,8 +306,10 @@ MODULE_LICENSE("Dual BSD/GPL");
static struct {
char *file[FSG_MAX_LUNS];
int ro[FSG_MAX_LUNS];
+ int nofua[FSG_MAX_LUNS];
unsigned int num_filenames;
unsigned int num_ros;
+ unsigned int num_nofuas;
unsigned int nluns;
int removable;
@@ -314,6 +321,7 @@ static struct {
unsigned short vendor;
unsigned short product;
unsigned short release;
+ char *serial;
unsigned int buflen;
int transport_type;
@@ -341,6 +349,10 @@ MODULE_PARM_DESC(file, "names of backing files or devices");
module_param_array_named(ro, mod_data.ro, bool, &mod_data.num_ros, S_IRUGO);
MODULE_PARM_DESC(ro, "true to force read-only");
+module_param_array_named(nofua, mod_data.nofua, bool, &mod_data.num_nofuas,
+ S_IRUGO);
+MODULE_PARM_DESC(nofua, "true to ignore SCSI WRITE(10,12) FUA bit");
+
module_param_named(luns, mod_data.nluns, uint, S_IRUGO);
MODULE_PARM_DESC(luns, "number of LUNs");
@@ -353,6 +365,8 @@ MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO);
MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk");
+module_param_named(serial, mod_data.serial, charp, S_IRUGO);
+MODULE_PARM_DESC(serial, "USB serial number");
/* In the non-TEST version, only the module parameters listed above
* are available. */
@@ -1272,7 +1286,8 @@ static int do_write(struct fsg_dev *fsg)
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
- if (fsg->cmnd[1] & 0x08) { // FUA
+ /* FUA */
+ if (!curlun->nofua && (fsg->cmnd[1] & 0x08)) {
spin_lock(&curlun->filp->f_lock);
curlun->filp->f_flags |= O_DSYNC;
spin_unlock(&curlun->filp->f_lock);
@@ -3126,6 +3141,7 @@ static int fsg_main_thread(void *fsg_)
/* The write permissions and store_xxx pointers are set in fsg_bind() */
static DEVICE_ATTR(ro, 0444, fsg_show_ro, NULL);
+static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, NULL);
static DEVICE_ATTR(file, 0444, fsg_show_file, NULL);
@@ -3197,6 +3213,7 @@ static int __init check_parameters(struct fsg_dev *fsg)
{
int prot;
int gcnum;
+ int i;
/* Store the default values */
mod_data.transport_type = USB_PR_BULK;
@@ -3272,13 +3289,65 @@ static int __init check_parameters(struct fsg_dev *fsg)
ERROR(fsg, "invalid buflen\n");
return -ETOOSMALL;
}
+
#endif /* CONFIG_USB_FILE_STORAGE_TEST */
+ /* Serial string handling.
+ * On a real device, the serial string would be loaded
+ * from permanent storage. */
+ if (mod_data.serial) {
+ const char *ch;
+ unsigned len = 0;
+
+ /* Sanity check :
+ * The CB[I] specification limits the serial string to
+ * 12 uppercase hexadecimal characters.
+ * BBB need at least 12 uppercase hexadecimal characters,
+ * with a maximum of 126. */
+ for (ch = mod_data.serial; *ch; ++ch) {
+ ++len;
+ if ((*ch < '0' || *ch > '9') &&
+ (*ch < 'A' || *ch > 'F')) { /* not uppercase hex */
+ WARNING(fsg,
+ "Invalid serial string character: %c; "
+ "Failing back to default\n",
+ *ch);
+ goto fill_serial;
+ }
+ }
+ if (len > 126 ||
+ (mod_data.transport_type == USB_PR_BULK && len < 12) ||
+ (mod_data.transport_type != USB_PR_BULK && len > 12)) {
+ WARNING(fsg,
+ "Invalid serial string length; "
+ "Failing back to default\n");
+ goto fill_serial;
+ }
+ fsg_strings[FSG_STRING_SERIAL - 1].s = mod_data.serial;
+ } else {
+ WARNING(fsg,
+ "Userspace failed to provide serial number; "
+ "Failing back to default\n");
+fill_serial:
+ /* Serial number not specified or invalid, make our own.
+ * We just encode it from the driver version string,
+ * 12 characters to comply with both CB[I] and BBB spec.
+ * Warning : Two devices running the same kernel will have
+ * the same fallback serial number. */
+ for (i = 0; i < 12; i += 2) {
+ unsigned char c = DRIVER_VERSION[i / 2];
+
+ if (!c)
+ break;
+ sprintf(&fsg_string_serial[i], "%02X", c);
+ }
+ }
+
return 0;
}
-static int __init fsg_bind(struct usb_gadget *gadget)
+static int __ref fsg_bind(struct usb_gadget *gadget)
{
struct fsg_dev *fsg = the_fsg;
int rc;
@@ -3305,6 +3374,10 @@ static int __init fsg_bind(struct usb_gadget *gadget)
}
}
+ /* Only for removable media? */
+ dev_attr_nofua.attr.mode = 0644;
+ dev_attr_nofua.store = fsg_store_nofua;
+
/* Find out how many LUNs there should be */
i = mod_data.nluns;
if (i == 0)
@@ -3330,6 +3403,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
curlun->ro = mod_data.cdrom || mod_data.ro[i];
curlun->initially_ro = curlun->ro;
curlun->removable = mod_data.removable;
+ curlun->nofua = mod_data.nofua[i];
curlun->dev.release = lun_release;
curlun->dev.parent = &gadget->dev;
curlun->dev.driver = &fsg_driver.driver;
@@ -3344,6 +3418,8 @@ static int __init fsg_bind(struct usb_gadget *gadget)
if ((rc = device_create_file(&curlun->dev,
&dev_attr_ro)) != 0 ||
(rc = device_create_file(&curlun->dev,
+ &dev_attr_nofua)) != 0 ||
+ (rc = device_create_file(&curlun->dev,
&dev_attr_file)) != 0) {
device_unregister(&curlun->dev);
goto out;
@@ -3447,16 +3523,6 @@ static int __init fsg_bind(struct usb_gadget *gadget)
init_utsname()->sysname, init_utsname()->release,
gadget->name);
- /* On a real device, serial[] would be loaded from permanent
- * storage. We just encode it from the driver version string. */
- for (i = 0; i < sizeof fsg_string_serial - 2; i += 2) {
- unsigned char c = DRIVER_VERSION[i / 2];
-
- if (!c)
- break;
- sprintf(&fsg_string_serial[i], "%02X", c);
- }
-
fsg->thread_task = kthread_create(fsg_main_thread, fsg,
"file-storage-gadget");
if (IS_ERR(fsg->thread_task)) {
@@ -3478,8 +3544,8 @@ static int __init fsg_bind(struct usb_gadget *gadget)
if (IS_ERR(p))
p = NULL;
}
- LINFO(curlun, "ro=%d, file: %s\n",
- curlun->ro, (p ? p : "(error)"));
+ LINFO(curlun, "ro=%d, nofua=%d, file: %s\n",
+ curlun->ro, curlun->nofua, (p ? p : "(error)"));
}
}
kfree(pathbuf);
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 82506ca297d..a5ea2c1d8c9 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -32,6 +32,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/usb/ch9.h>
@@ -2397,7 +2398,7 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
EXPORT_SYMBOL(usb_gadget_unregister_driver);
/* udc structure's alloc and setup, include ep-param alloc */
-static struct qe_udc __devinit *qe_udc_config(struct of_device *ofdev)
+static struct qe_udc __devinit *qe_udc_config(struct platform_device *ofdev)
{
struct qe_udc *udc;
struct device_node *np = ofdev->dev.of_node;
@@ -2522,7 +2523,7 @@ static void qe_udc_release(struct device *dev)
}
/* Driver probe functions */
-static int __devinit qe_udc_probe(struct of_device *ofdev,
+static int __devinit qe_udc_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -2678,18 +2679,18 @@ err1:
}
#ifdef CONFIG_PM
-static int qe_udc_suspend(struct of_device *dev, pm_message_t state)
+static int qe_udc_suspend(struct platform_device *dev, pm_message_t state)
{
return -ENOTSUPP;
}
-static int qe_udc_resume(struct of_device *dev)
+static int qe_udc_resume(struct platform_device *dev)
{
return -ENOTSUPP;
}
#endif
-static int __devexit qe_udc_remove(struct of_device *ofdev)
+static int __devexit qe_udc_remove(struct platform_device *ofdev)
{
struct qe_ep *ep;
unsigned int size;
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
index d1af253a910..a9474f8d532 100644
--- a/drivers/usb/gadget/g_ffs.c
+++ b/drivers/usb/gadget/g_ffs.c
@@ -32,12 +32,13 @@
# include "u_ether.c"
static u8 gfs_hostaddr[ETH_ALEN];
-#else
-# if !defined CONFIG_USB_FUNCTIONFS_GENERIC
-# define CONFIG_USB_FUNCTIONFS_GENERIC
+# ifdef CONFIG_USB_FUNCTIONFS_ETH
+static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
# endif
+#else
# define gether_cleanup() do { } while (0)
# define gether_setup(gadget, hostaddr) ((int)0)
+# define gfs_hostaddr NULL
#endif
#include "f_fs.c"
@@ -107,15 +108,7 @@ static const struct usb_descriptor_header *gfs_otg_desc[] = {
enum {
GFS_STRING_MANUFACTURER_IDX,
GFS_STRING_PRODUCT_IDX,
-#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
- GFS_STRING_RNDIS_CONFIG_IDX,
-#endif
-#ifdef CONFIG_USB_FUNCTIONFS_ETH
- GFS_STRING_ECM_CONFIG_IDX,
-#endif
-#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
- GFS_STRING_GENERIC_CONFIG_IDX,
-#endif
+ GFS_STRING_FIRST_CONFIG_IDX,
};
static char gfs_manufacturer[50];
@@ -126,13 +119,13 @@ static struct usb_string gfs_strings[] = {
[GFS_STRING_MANUFACTURER_IDX].s = gfs_manufacturer,
[GFS_STRING_PRODUCT_IDX].s = gfs_driver_desc,
#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
- [GFS_STRING_RNDIS_CONFIG_IDX].s = "FunctionFS + RNDIS",
+ { .s = "FunctionFS + RNDIS" },
#endif
#ifdef CONFIG_USB_FUNCTIONFS_ETH
- [GFS_STRING_ECM_CONFIG_IDX].s = "FunctionFS + ECM",
+ { .s = "FunctionFS + ECM" },
#endif
#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
- [GFS_STRING_GENERIC_CONFIG_IDX].s = "FunctionFS",
+ { .s = "FunctionFS" },
#endif
{ } /* end of list */
};
@@ -146,59 +139,33 @@ static struct usb_gadget_strings *gfs_dev_strings[] = {
};
+
+struct gfs_configuration {
+ struct usb_configuration c;
+ int (*eth)(struct usb_configuration *c, u8 *ethaddr);
+} gfs_configurations[] = {
#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
-static int gfs_do_rndis_config(struct usb_configuration *c);
-
-static struct usb_configuration gfs_rndis_config_driver = {
- .label = "FunctionFS + RNDIS",
- .bind = gfs_do_rndis_config,
- .bConfigurationValue = 1,
- /* .iConfiguration = DYNAMIC */
- .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
-};
-# define gfs_add_rndis_config(cdev) \
- usb_add_config(cdev, &gfs_rndis_config_driver)
-#else
-# define gfs_add_rndis_config(cdev) 0
+ {
+ .eth = rndis_bind_config,
+ },
#endif
-
#ifdef CONFIG_USB_FUNCTIONFS_ETH
-static int gfs_do_ecm_config(struct usb_configuration *c);
-
-static struct usb_configuration gfs_ecm_config_driver = {
- .label = "FunctionFS + ECM",
- .bind = gfs_do_ecm_config,
- .bConfigurationValue = 1,
- /* .iConfiguration = DYNAMIC */
- .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
-};
-# define gfs_add_ecm_config(cdev) \
- usb_add_config(cdev, &gfs_ecm_config_driver)
-#else
-# define gfs_add_ecm_config(cdev) 0
+ {
+ .eth = eth_bind_config,
+ },
#endif
-
#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
-static int gfs_do_generic_config(struct usb_configuration *c);
-
-static struct usb_configuration gfs_generic_config_driver = {
- .label = "FunctionFS",
- .bind = gfs_do_generic_config,
- .bConfigurationValue = 2,
- /* .iConfiguration = DYNAMIC */
- .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
-};
-# define gfs_add_generic_config(cdev) \
- usb_add_config(cdev, &gfs_generic_config_driver)
-#else
-# define gfs_add_generic_config(cdev) 0
+ {
+ },
#endif
+};
static int gfs_bind(struct usb_composite_dev *cdev);
static int gfs_unbind(struct usb_composite_dev *cdev);
+static int gfs_do_config(struct usb_configuration *c);
static struct usb_composite_driver gfs_driver = {
.name = gfs_short_name,
@@ -267,7 +234,7 @@ static int functionfs_check_dev_callback(const char *dev_name)
static int gfs_bind(struct usb_composite_dev *cdev)
{
- int ret;
+ int ret, i;
ENTER();
@@ -284,57 +251,32 @@ static int gfs_bind(struct usb_composite_dev *cdev)
snprintf(gfs_manufacturer, sizeof gfs_manufacturer, "%s %s with %s",
init_utsname()->sysname, init_utsname()->release,
cdev->gadget->name);
- ret = usb_string_id(cdev);
- if (unlikely(ret < 0))
- goto error;
- gfs_strings[GFS_STRING_MANUFACTURER_IDX].id = ret;
- gfs_dev_desc.iManufacturer = ret;
-
- ret = usb_string_id(cdev);
- if (unlikely(ret < 0))
- goto error;
- gfs_strings[GFS_STRING_PRODUCT_IDX].id = ret;
- gfs_dev_desc.iProduct = ret;
-
-#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
- ret = usb_string_id(cdev);
- if (unlikely(ret < 0))
- goto error;
- gfs_strings[GFS_STRING_RNDIS_CONFIG_IDX].id = ret;
- gfs_rndis_config_driver.iConfiguration = ret;
-#endif
-#ifdef CONFIG_USB_FUNCTIONFS_ETH
- ret = usb_string_id(cdev);
+ ret = usb_string_ids_tab(cdev, gfs_strings);
if (unlikely(ret < 0))
goto error;
- gfs_strings[GFS_STRING_ECM_CONFIG_IDX].id = ret;
- gfs_ecm_config_driver.iConfiguration = ret;
-#endif
-#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
- ret = usb_string_id(cdev);
- if (unlikely(ret < 0))
- goto error;
- gfs_strings[GFS_STRING_GENERIC_CONFIG_IDX].id = ret;
- gfs_generic_config_driver.iConfiguration = ret;
-#endif
+ gfs_dev_desc.iManufacturer = gfs_strings[GFS_STRING_MANUFACTURER_IDX].id;
+ gfs_dev_desc.iProduct = gfs_strings[GFS_STRING_PRODUCT_IDX].id;
ret = functionfs_bind(gfs_ffs_data, cdev);
if (unlikely(ret < 0))
goto error;
- ret = gfs_add_rndis_config(cdev);
- if (unlikely(ret < 0))
- goto error_unbind;
+ for (i = 0; i < ARRAY_SIZE(gfs_configurations); ++i) {
+ struct gfs_configuration *c = gfs_configurations + i;
- ret = gfs_add_ecm_config(cdev);
- if (unlikely(ret < 0))
- goto error_unbind;
+ ret = GFS_STRING_FIRST_CONFIG_IDX + i;
+ c->c.label = gfs_strings[ret].s;
+ c->c.iConfiguration = gfs_strings[ret].id;
+ c->c.bind = gfs_do_config;
+ c->c.bConfigurationValue = 1 + i;
+ c->c.bmAttributes = USB_CONFIG_ATT_SELFPOWER;
- ret = gfs_add_generic_config(cdev);
- if (unlikely(ret < 0))
- goto error_unbind;
+ ret = usb_add_config(cdev, &c->c);
+ if (unlikely(ret < 0))
+ goto error_unbind;
+ }
return 0;
@@ -368,10 +310,10 @@ static int gfs_unbind(struct usb_composite_dev *cdev)
}
-static int __gfs_do_config(struct usb_configuration *c,
- int (*eth)(struct usb_configuration *c, u8 *ethaddr),
- u8 *ethaddr)
+static int gfs_do_config(struct usb_configuration *c)
{
+ struct gfs_configuration *gc =
+ container_of(c, struct gfs_configuration, c);
int ret;
if (WARN_ON(!gfs_ffs_data))
@@ -382,13 +324,13 @@ static int __gfs_do_config(struct usb_configuration *c,
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- if (eth) {
- ret = eth(c, ethaddr);
+ if (gc->eth) {
+ ret = gc->eth(c, gfs_hostaddr);
if (unlikely(ret < 0))
return ret;
}
- ret = functionfs_add(c->cdev, c, gfs_ffs_data);
+ ret = functionfs_bind_config(c->cdev, c, gfs_ffs_data);
if (unlikely(ret < 0))
return ret;
@@ -406,32 +348,12 @@ static int __gfs_do_config(struct usb_configuration *c,
return 0;
}
-#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
-static int gfs_do_rndis_config(struct usb_configuration *c)
-{
- ENTER();
-
- return __gfs_do_config(c, rndis_bind_config, gfs_hostaddr);
-}
-#endif
#ifdef CONFIG_USB_FUNCTIONFS_ETH
-static int gfs_do_ecm_config(struct usb_configuration *c)
-{
- ENTER();
-
- return __gfs_do_config(c,
- can_support_ecm(c->cdev->gadget)
- ? ecm_bind_config : geth_bind_config,
- gfs_hostaddr);
-}
-#endif
-
-#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
-static int gfs_do_generic_config(struct usb_configuration *c)
+static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
{
- ENTER();
-
- return __gfs_do_config(c, NULL, NULL);
+ return can_support_ecm(c->cdev->gadget)
+ ? ecm_bind_config(c, ethaddr)
+ : geth_bind_config(c, ethaddr);
}
#endif
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 2b56ce62185..1b413a5cc3f 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -238,7 +238,7 @@ static const struct usb_interface_descriptor ac_interface_desc = {
};
/* B.3.2 Class-Specific AC Interface Descriptor */
-static const struct uac_ac_header_descriptor_v1_1 ac_header_desc = {
+static const struct uac1_ac_header_descriptor_1 ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = USB_MS_HEADER,
@@ -1157,7 +1157,7 @@ fail:
/*
* Creates an output endpoint, and initializes output ports.
*/
-static int __init gmidi_bind(struct usb_gadget *gadget)
+static int __ref gmidi_bind(struct usb_gadget *gadget)
{
struct gmidi_device *dev;
struct usb_ep *in_ep, *out_ep;
diff --git a/drivers/usb/gadget/hid.c b/drivers/usb/gadget/hid.c
index 775722686ed..735495bf841 100644
--- a/drivers/usb/gadget/hid.c
+++ b/drivers/usb/gadget/hid.c
@@ -127,7 +127,7 @@ static struct usb_gadget_strings *dev_strings[] = {
/****************************** Configurations ******************************/
-static int __init do_config(struct usb_configuration *c)
+static int __ref do_config(struct usb_configuration *c)
{
struct hidg_func_node *e;
int func = 0, status = 0;
@@ -156,7 +156,7 @@ static struct usb_configuration config_driver = {
/****************************** Gadget Bind ******************************/
-static int __init hid_bind(struct usb_composite_dev *cdev)
+static int __ref hid_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
struct list_head *tmp;
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index de8a8380350..fc35406fc80 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1299,11 +1299,9 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
struct usb_gadget *gadget = dev->gadget;
long ret = -ENOTTY;
- if (gadget->ops->ioctl) {
- lock_kernel();
+ if (gadget->ops->ioctl)
ret = gadget->ops->ioctl (gadget, code, value);
- unlock_kernel();
- }
+
return ret;
}
@@ -1867,13 +1865,9 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
buf += 4;
length -= 4;
- kbuf = kmalloc (length, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
- if (copy_from_user (kbuf, buf, length)) {
- kfree (kbuf);
- return -EFAULT;
- }
+ kbuf = memdup_user(buf, length);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
spin_lock_irq (&dev->lock);
value = -EINVAL;
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
index a3913519fd5..c2d2a201f84 100644
--- a/drivers/usb/gadget/langwell_udc.c
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -842,9 +842,9 @@ static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
VDBG(dev, "req->mapped = 0\n");
}
- DBG(dev, "%s queue req %p, len %u, buf %p, dma 0x%08x\n",
- _ep->name,
- _req, _req->length, _req->buf, _req->dma);
+ DBG(dev, "%s queue req %p, len %u, buf %p, dma 0x%08llx\n",
+ _ep->name,
+ _req, _req->length, _req->buf, (unsigned long long)_req->dma);
_req->status = -EINPROGRESS;
_req->actual = 0;
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 166bf71fd34..e03058fe23c 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1609,6 +1609,7 @@ static int __init m66592_probe(struct platform_device *pdev)
/* initialize ucd */
m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL);
if (m66592 == NULL) {
+ ret = -ENOMEM;
pr_err("kzalloc error\n");
goto clean_up;
}
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
index 705cc1f7632..585f2559484 100644
--- a/drivers/usb/gadget/mass_storage.c
+++ b/drivers/usb/gadget/mass_storage.c
@@ -141,9 +141,14 @@ static int msg_thread_exits(struct fsg_common *common)
return 0;
}
-static int __init msg_do_config(struct usb_configuration *c)
+static int __ref msg_do_config(struct usb_configuration *c)
{
- struct fsg_common *common;
+ static const struct fsg_operations ops = {
+ .thread_exits = msg_thread_exits,
+ };
+ static struct fsg_common common;
+
+ struct fsg_common *retp;
struct fsg_config config;
int ret;
@@ -153,13 +158,14 @@ static int __init msg_do_config(struct usb_configuration *c)
}
fsg_config_from_params(&config, &mod_data);
- config.thread_exits = msg_thread_exits;
- common = fsg_common_init(0, c->cdev, &config);
- if (IS_ERR(common))
- return PTR_ERR(common);
+ config.ops = &ops;
+
+ retp = fsg_common_init(&common, c->cdev, &config);
+ if (IS_ERR(retp))
+ return PTR_ERR(retp);
- ret = fsg_add(c->cdev, c, common);
- fsg_common_put(common);
+ ret = fsg_bind_config(c->cdev, c, &common);
+ fsg_common_put(&common);
return ret;
}
@@ -176,7 +182,7 @@ static struct usb_configuration msg_config_driver = {
/****************************** Gadget Bind ******************************/
-static int __init msg_bind(struct usb_composite_dev *cdev)
+static int __ref msg_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
int status;
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index a930d7fd7e7..795d7623216 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/utsname.h>
+#include <linux/module.h>
#if defined USB_ETH_RNDIS
@@ -35,14 +36,13 @@
#define DRIVER_DESC "Multifunction Composite Gadget"
-#define DRIVER_VERSION "2009/07/21"
-/*-------------------------------------------------------------------------*/
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Michal Nazarewicz");
+MODULE_LICENSE("GPL");
-#define MULTI_VENDOR_NUM 0x0525 /* XXX NetChip */
-#define MULTI_PRODUCT_NUM 0xa4ab /* XXX */
-/*-------------------------------------------------------------------------*/
+/***************************** All the files... *****************************/
/*
* kbuild is not very cooperative with respect to linking separately
@@ -57,6 +57,8 @@
#include "config.c"
#include "epautoconf.c"
+#include "f_mass_storage.c"
+
#include "u_serial.c"
#include "f_acm.c"
@@ -68,13 +70,24 @@
#endif
#include "u_ether.c"
-#undef DBG /* u_ether.c has broken idea about macros */
-#undef VDBG /* so clean up after it */
-#undef ERROR
-#undef INFO
-#include "f_mass_storage.c"
-/*-------------------------------------------------------------------------*/
+
+/***************************** Device Descriptor ****************************/
+
+#define MULTI_VENDOR_NUM 0x0525 /* XXX NetChip */
+#define MULTI_PRODUCT_NUM 0xa4ab /* XXX */
+
+
+enum {
+ __MULTI_NO_CONFIG,
+#ifdef CONFIG_USB_G_MULTI_RNDIS
+ MULTI_RNDIS_CONFIG_NUM,
+#endif
+#ifdef CONFIG_USB_G_MULTI_CDC
+ MULTI_CDC_CONFIG_NUM,
+#endif
+};
+
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
@@ -82,80 +95,82 @@ static struct usb_device_descriptor device_desc = {
.bcdUSB = cpu_to_le16(0x0200),
- /* .bDeviceClass = USB_CLASS_COMM, */
- /* .bDeviceSubClass = 0, */
- /* .bDeviceProtocol = 0, */
- .bDeviceClass = 0xEF,
+ .bDeviceClass = USB_CLASS_MISC /* 0xEF */,
.bDeviceSubClass = 2,
.bDeviceProtocol = 1,
- /* .bMaxPacketSize0 = f(hardware) */
/* Vendor and product id can be overridden by module parameters. */
.idVendor = cpu_to_le16(MULTI_VENDOR_NUM),
.idProduct = cpu_to_le16(MULTI_PRODUCT_NUM),
- /* .bcdDevice = f(hardware) */
- /* .iManufacturer = DYNAMIC */
- /* .iProduct = DYNAMIC */
- /* NO SERIAL NUMBER */
- .bNumConfigurations = 1,
};
-static struct usb_otg_descriptor otg_descriptor = {
- .bLength = sizeof otg_descriptor,
- .bDescriptorType = USB_DT_OTG,
-
- /* REVISIT SRP-only hardware is possible, although
- * it would not be called "OTG" ...
- */
- .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
-};
static const struct usb_descriptor_header *otg_desc[] = {
- (struct usb_descriptor_header *) &otg_descriptor,
+ (struct usb_descriptor_header *) &(struct usb_otg_descriptor){
+ .bLength = sizeof(struct usb_otg_descriptor),
+ .bDescriptorType = USB_DT_OTG,
+
+ /*
+ * REVISIT SRP-only hardware is possible, although
+ * it would not be called "OTG" ...
+ */
+ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
+ },
NULL,
};
-/* string IDs are assigned dynamically */
-
-#define STRING_MANUFACTURER_IDX 0
-#define STRING_PRODUCT_IDX 1
+enum {
+ MULTI_STRING_MANUFACTURER_IDX,
+ MULTI_STRING_PRODUCT_IDX,
+#ifdef CONFIG_USB_G_MULTI_RNDIS
+ MULTI_STRING_RNDIS_CONFIG_IDX,
+#endif
+#ifdef CONFIG_USB_G_MULTI_CDC
+ MULTI_STRING_CDC_CONFIG_IDX,
+#endif
+};
static char manufacturer[50];
static struct usb_string strings_dev[] = {
- [STRING_MANUFACTURER_IDX].s = manufacturer,
- [STRING_PRODUCT_IDX].s = DRIVER_DESC,
+ [MULTI_STRING_MANUFACTURER_IDX].s = manufacturer,
+ [MULTI_STRING_PRODUCT_IDX].s = DRIVER_DESC,
+#ifdef CONFIG_USB_G_MULTI_RNDIS
+ [MULTI_STRING_RNDIS_CONFIG_IDX].s = "Multifunction with RNDIS",
+#endif
+#ifdef CONFIG_USB_G_MULTI_CDC
+ [MULTI_STRING_CDC_CONFIG_IDX].s = "Multifunction with CDC ECM",
+#endif
{ } /* end of list */
};
-static struct usb_gadget_strings stringtab_dev = {
- .language = 0x0409, /* en-us */
- .strings = strings_dev,
-};
-
static struct usb_gadget_strings *dev_strings[] = {
- &stringtab_dev,
+ &(struct usb_gadget_strings){
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+ },
NULL,
};
-static u8 hostaddr[ETH_ALEN];
/****************************** Configurations ******************************/
-static struct fsg_module_parameters mod_data = {
- .stall = 1
-};
-FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
+static struct fsg_module_parameters fsg_mod_data = { .stall = 1 };
+FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
+
+static struct fsg_common fsg_common;
+
+static u8 hostaddr[ETH_ALEN];
-static struct fsg_common *fsg_common;
+/********** RNDIS **********/
#ifdef USB_ETH_RNDIS
-static int __init rndis_do_config(struct usb_configuration *c)
+static __ref int rndis_do_config(struct usb_configuration *c)
{
int ret;
@@ -172,26 +187,42 @@ static int __init rndis_do_config(struct usb_configuration *c)
if (ret < 0)
return ret;
- ret = fsg_add(c->cdev, c, fsg_common);
+ ret = fsg_bind_config(c->cdev, c, &fsg_common);
if (ret < 0)
return ret;
return 0;
}
-static struct usb_configuration rndis_config_driver = {
- .label = "Multifunction Composite (RNDIS + MS + ACM)",
- .bind = rndis_do_config,
- .bConfigurationValue = 2,
- /* .iConfiguration = DYNAMIC */
- .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
-};
+static int rndis_config_register(struct usb_composite_dev *cdev)
+{
+ static struct usb_configuration config = {
+ .bind = rndis_do_config,
+ .bConfigurationValue = MULTI_RNDIS_CONFIG_NUM,
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+ };
+
+ config.label = strings_dev[MULTI_STRING_RNDIS_CONFIG_IDX].s;
+ config.iConfiguration = strings_dev[MULTI_STRING_RNDIS_CONFIG_IDX].id;
+
+ return usb_add_config(cdev, &config);
+}
+
+#else
+
+static int rndis_config_register(struct usb_composite_dev *cdev)
+{
+ return 0;
+}
#endif
+
+/********** CDC ECM **********/
+
#ifdef CONFIG_USB_G_MULTI_CDC
-static int __init cdc_do_config(struct usb_configuration *c)
+static __ref int cdc_do_config(struct usb_configuration *c)
{
int ret;
@@ -208,20 +239,33 @@ static int __init cdc_do_config(struct usb_configuration *c)
if (ret < 0)
return ret;
- ret = fsg_add(c->cdev, c, fsg_common);
+ ret = fsg_bind_config(c->cdev, c, &fsg_common);
if (ret < 0)
return ret;
return 0;
}
-static struct usb_configuration cdc_config_driver = {
- .label = "Multifunction Composite (CDC + MS + ACM)",
- .bind = cdc_do_config,
- .bConfigurationValue = 1,
- /* .iConfiguration = DYNAMIC */
- .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
-};
+static int cdc_config_register(struct usb_composite_dev *cdev)
+{
+ static struct usb_configuration config = {
+ .bind = cdc_do_config,
+ .bConfigurationValue = MULTI_CDC_CONFIG_NUM,
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+ };
+
+ config.label = strings_dev[MULTI_STRING_CDC_CONFIG_IDX].s;
+ config.iConfiguration = strings_dev[MULTI_STRING_CDC_CONFIG_IDX].id;
+
+ return usb_add_config(cdev, &config);
+}
+
+#else
+
+static int cdc_config_register(struct usb_composite_dev *cdev)
+{
+ return 0;
+}
#endif
@@ -230,7 +274,7 @@ static struct usb_configuration cdc_config_driver = {
/****************************** Gadget Bind ******************************/
-static int __init multi_bind(struct usb_composite_dev *cdev)
+static int __ref multi_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
int status, gcnum;
@@ -252,67 +296,56 @@ static int __init multi_bind(struct usb_composite_dev *cdev)
goto fail0;
/* set up mass storage function */
- fsg_common = fsg_common_from_params(0, cdev, &mod_data);
- if (IS_ERR(fsg_common)) {
- status = PTR_ERR(fsg_common);
- goto fail1;
+ {
+ void *retp;
+ retp = fsg_common_from_params(&fsg_common, cdev, &fsg_mod_data);
+ if (IS_ERR(retp)) {
+ status = PTR_ERR(retp);
+ goto fail1;
+ }
}
-
+ /* set bcdDevice */
gcnum = usb_gadget_controller_number(gadget);
- if (gcnum >= 0)
+ if (gcnum >= 0) {
device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
- else {
- /* We assume that can_support_ecm() tells the truth;
- * but if the controller isn't recognized at all then
- * that assumption is a bit more likely to be wrong.
- */
- WARNING(cdev, "controller '%s' not recognized\n",
- gadget->name);
+ } else {
+ WARNING(cdev, "controller '%s' not recognized\n", gadget->name);
device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099);
}
-
- /* Allocate string descriptor numbers ... note that string
- * contents can be overridden by the composite_dev glue.
- */
-
- /* device descriptor strings: manufacturer, product */
+ /* allocate string descriptor numbers */
snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
init_utsname()->sysname, init_utsname()->release,
gadget->name);
- status = usb_string_id(cdev);
- if (status < 0)
- goto fail2;
- strings_dev[STRING_MANUFACTURER_IDX].id = status;
- device_desc.iManufacturer = status;
- status = usb_string_id(cdev);
- if (status < 0)
+ status = usb_string_ids_tab(cdev, strings_dev);
+ if (unlikely(status < 0))
goto fail2;
- strings_dev[STRING_PRODUCT_IDX].id = status;
- device_desc.iProduct = status;
-#ifdef USB_ETH_RNDIS
- /* register our first configuration */
- status = usb_add_config(cdev, &rndis_config_driver);
- if (status < 0)
+ device_desc.iManufacturer =
+ strings_dev[MULTI_STRING_MANUFACTURER_IDX].id;
+ device_desc.iProduct =
+ strings_dev[MULTI_STRING_PRODUCT_IDX].id;
+
+ /* register configurations */
+ status = rndis_config_register(cdev);
+ if (unlikely(status < 0))
goto fail2;
-#endif
-#ifdef CONFIG_USB_G_MULTI_CDC
- /* register our second configuration */
- status = usb_add_config(cdev, &cdc_config_driver);
- if (status < 0)
+ status = cdc_config_register(cdev);
+ if (unlikely(status < 0))
goto fail2;
-#endif
- dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
- fsg_common_put(fsg_common);
+ /* we're done */
+ dev_info(&gadget->dev, DRIVER_DESC "\n");
+ fsg_common_put(&fsg_common);
return 0;
+
+ /* error recovery */
fail2:
- fsg_common_put(fsg_common);
+ fsg_common_put(&fsg_common);
fail1:
gserial_cleanup();
fail0:
@@ -339,18 +372,15 @@ static struct usb_composite_driver multi_driver = {
.unbind = __exit_p(multi_unbind),
};
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR("Michal Nazarewicz");
-MODULE_LICENSE("GPL");
-static int __init g_multi_init(void)
+static int __init multi_init(void)
{
return usb_composite_register(&multi_driver);
}
-module_init(g_multi_init);
+module_init(multi_init);
-static void __exit g_multi_cleanup(void)
+static void __exit multi_exit(void)
{
usb_composite_unregister(&multi_driver);
}
-module_exit(g_multi_cleanup);
+module_exit(multi_exit);
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 4c3ac5c4223..cf241c371a7 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -25,7 +25,7 @@
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
@@ -70,6 +70,7 @@
#define DRIVER_DESC "Printer Gadget"
#define DRIVER_VERSION "2007 OCT 06"
+static DEFINE_MUTEX(printer_mutex);
static const char shortname [] = "printer";
static const char driver_desc [] = DRIVER_DESC;
@@ -476,7 +477,7 @@ printer_open(struct inode *inode, struct file *fd)
unsigned long flags;
int ret = -EBUSY;
- lock_kernel();
+ mutex_lock(&printer_mutex);
dev = container_of(inode->i_cdev, struct printer_dev, printer_cdev);
spin_lock_irqsave(&dev->lock, flags);
@@ -492,7 +493,7 @@ printer_open(struct inode *inode, struct file *fd)
spin_unlock_irqrestore(&dev->lock, flags);
DBG(dev, "printer_open returned %x\n", ret);
- unlock_kernel();
+ mutex_unlock(&printer_mutex);
return ret;
}
@@ -1346,7 +1347,7 @@ printer_unbind(struct usb_gadget *gadget)
set_gadget_data(gadget, NULL);
}
-static int __init
+static int __ref
printer_bind(struct usb_gadget *gadget)
{
struct printer_dev *dev;
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 70a81784275..2456ccd9965 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1557,6 +1557,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
/* initialize ucd */
r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
if (r8a66597 == NULL) {
+ ret = -ENOMEM;
printk(KERN_ERR "kzalloc error\n");
goto clean_up;
}
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 020fa5a25fd..972d5ddd1e1 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -293,9 +293,13 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_VENDOR_DESCRIPTION:
pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
- length = strlen (rndis_per_dev_params [configNr].vendorDescr);
- memcpy (outbuf,
- rndis_per_dev_params [configNr].vendorDescr, length);
+ if ( rndis_per_dev_params [configNr].vendorDescr ) {
+ length = strlen (rndis_per_dev_params [configNr].vendorDescr);
+ memcpy (outbuf,
+ rndis_per_dev_params [configNr].vendorDescr, length);
+ } else {
+ outbuf[0] = 0;
+ }
retval = 0;
break;
@@ -1148,7 +1152,7 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
-int __init rndis_init (void)
+int rndis_init(void)
{
u8 i;
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
index c236aaa9dcd..907c3300811 100644
--- a/drivers/usb/gadget/rndis.h
+++ b/drivers/usb/gadget/rndis.h
@@ -262,7 +262,7 @@ int rndis_signal_disconnect (int configNr);
int rndis_state (int configNr);
extern void rndis_set_host_mac (int configNr, const u8 *addr);
-int __devinit rndis_init (void);
+int rndis_init(void);
void rndis_exit (void);
#endif /* _LINUX_RNDIS_H */
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 26193eceb32..a229744a8c7 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -33,6 +34,7 @@
#include <plat/regs-usb-hsotg.h>
#include <mach/regs-sys.h>
#include <plat/udc-hs.h>
+#include <plat/cpu.h>
#define DMA_ADDR_INVALID (~((dma_addr_t)0))
@@ -91,7 +93,9 @@ struct s3c_hsotg_req;
* For periodic IN endpoints, we have fifo_size and fifo_load to try
* and keep track of the amount of data in the periodic FIFO for each
* of these as we don't have a status register that tells us how much
- * is in each of them.
+ * is in each of them. (note, this may actually be useless information
+ * as in shared-fifo mode periodic in acts like a single-frame packet
+ * buffer than a fifo)
*/
struct s3c_hsotg_ep {
struct usb_ep ep;
@@ -128,6 +132,7 @@ struct s3c_hsotg_ep {
* @regs: The memory area mapped for accessing registers.
* @regs_res: The resource that was allocated when claiming register space.
* @irq: The IRQ number we are using
+ * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos.
* @debug_root: root directrory for debugfs.
* @debug_file: main status file for debugfs.
* @debug_fifo: FIFO status file for debugfs.
@@ -145,6 +150,9 @@ struct s3c_hsotg {
void __iomem *regs;
struct resource *regs_res;
int irq;
+ struct clk *clk;
+
+ unsigned int dedicated_fifos:1;
struct dentry *debug_root;
struct dentry *debug_file;
@@ -310,11 +318,11 @@ static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
hsotg->regs + S3C_GNPTXFSIZ);
*/
- /* set FIFO sizes to 2048/0x1C0 */
+ /* set FIFO sizes to 2048/1024 */
writel(2048, hsotg->regs + S3C_GRXFSIZ);
writel(S3C_GNPTXFSIZ_NPTxFStAddr(2048) |
- S3C_GNPTXFSIZ_NPTxFDep(0x1C0),
+ S3C_GNPTXFSIZ_NPTxFDep(1024),
hsotg->regs + S3C_GNPTXFSIZ);
/* arange all the rest of the TX FIFOs, as some versions of this
@@ -464,7 +472,7 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
if (to_write == 0)
return 0;
- if (periodic) {
+ if (periodic && !hsotg->dedicated_fifos) {
u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));
int size_left;
int size_done;
@@ -474,6 +482,14 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
+ /* if shared fifo, we cannot write anything until the
+ * previous data has been completely sent.
+ */
+ if (hs_ep->fifo_load != 0) {
+ s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
+ return -ENOSPC;
+ }
+
dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
__func__, size_left,
hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
@@ -494,6 +510,11 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
return -ENOSPC;
}
+ } else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
+ can_write = readl(hsotg->regs + S3C_DTXFSTS(hs_ep->index));
+
+ can_write &= 0xffff;
+ can_write *= 4;
} else {
if (S3C_GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) {
dev_dbg(hsotg->dev,
@@ -505,6 +526,7 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
}
can_write = S3C_GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts);
+ can_write *= 4; /* fifo size is in 32bit quantities. */
}
dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n",
@@ -517,6 +539,17 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
if (can_write > 512)
can_write = 512;
+ /* limit the write to one max-packet size worth of data, but allow
+ * the transfer to return that it did not run out of fifo space
+ * doing it. */
+ if (to_write > hs_ep->ep.maxpacket) {
+ to_write = hs_ep->ep.maxpacket;
+
+ s3c_hsotg_en_gsint(hsotg,
+ periodic ? S3C_GINTSTS_PTxFEmp :
+ S3C_GINTSTS_NPTxFEmp);
+ }
+
/* see if we can write data */
if (to_write > can_write) {
@@ -579,12 +612,10 @@ static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
maxsize = S3C_DxEPTSIZ_XferSize_LIMIT + 1;
maxpkt = S3C_DxEPTSIZ_PktCnt_LIMIT + 1;
} else {
+ maxsize = 64+64;
if (hs_ep->dir_in) {
- /* maxsize = S3C_DIEPTSIZ0_XferSize_LIMIT + 1; */
- maxsize = 64+64+1;
maxpkt = S3C_DIEPTSIZ0_PktCnt_LIMIT + 1;
} else {
- maxsize = 0x3f;
maxpkt = 2;
}
}
@@ -1353,6 +1384,9 @@ static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
read_ptr = hs_req->req.actual;
max_req = hs_req->req.length - read_ptr;
+ dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
+ __func__, to_read, max_req, read_ptr, hs_req->req.length);
+
if (to_read > max_req) {
/* more data appeared than we where willing
* to deal with in this request.
@@ -1362,9 +1396,6 @@ static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
WARN_ON_ONCE(1);
}
- dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
- __func__, to_read, max_req, read_ptr, hs_req->req.length);
-
hs_ep->total_data += to_read;
hs_req->req.actual += to_read;
to_read = DIV_ROUND_UP(to_read, 4);
@@ -1433,9 +1464,11 @@ static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,
static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
int epnum, bool was_setup)
{
+ u32 epsize = readl(hsotg->regs + S3C_DOEPTSIZ(epnum));
struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];
struct s3c_hsotg_req *hs_req = hs_ep->req;
struct usb_request *req = &hs_req->req;
+ unsigned size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
int result = 0;
if (!hs_req) {
@@ -1444,9 +1477,7 @@ static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
}
if (using_dma(hsotg)) {
- u32 epsize = readl(hsotg->regs + S3C_DOEPTSIZ(epnum));
unsigned size_done;
- unsigned size_left;
/* Calculate the size of the transfer by checking how much
* is left in the endpoint size register and then working it
@@ -1456,14 +1487,18 @@ static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
* so may overshoot/undershoot the transfer.
*/
- size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
-
size_done = hs_ep->size_loaded - size_left;
size_done += hs_ep->last_load;
req->actual = size_done;
}
+ /* if there is more request to do, schedule new transfer */
+ if (req->actual < req->length && size_left == 0) {
+ s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
+ return;
+ }
+
if (req->actual < req->length && req->short_not_ok) {
dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
__func__, req->actual, req->length);
@@ -1758,7 +1793,7 @@ static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
if (dir_in) {
s3c_hsotg_complete_in(hsotg, hs_ep);
- if (idx == 0)
+ if (idx == 0 && !hs_ep->req)
s3c_hsotg_enqueue_setup(hsotg);
} else if (using_dma(hsotg)) {
/* We're using DMA, we need to fire an OutDone here
@@ -1818,6 +1853,15 @@ static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
__func__, idx);
clear |= S3C_DIEPMSK_INTknEPMisMsk;
}
+
+ /* FIFO has space or is empty (see GAHBCFG) */
+ if (hsotg->dedicated_fifos &&
+ ints & S3C_DIEPMSK_TxFIFOEmpty) {
+ dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
+ __func__, idx);
+ s3c_hsotg_trytx(hsotg, hs_ep);
+ clear |= S3C_DIEPMSK_TxFIFOEmpty;
+ }
}
writel(clear, hsotg->regs + epint_reg);
@@ -2071,17 +2115,12 @@ irq_retry:
kill_all_requests(hsotg, &hsotg->eps[0], -ECONNRESET, true);
/* it seems after a reset we can end up with a situation
- * where the TXFIFO still has data in it... try flushing
- * it to remove anything that may still be in it.
+ * where the TXFIFO still has data in it... the docs
+ * suggest resetting all the fifos, so use the init_fifo
+ * code to relayout and flush the fifos.
*/
- if (1) {
- writel(S3C_GRSTCTL_TxFNum(0) | S3C_GRSTCTL_TxFFlsh,
- hsotg->regs + S3C_GRSTCTL);
-
- dev_info(hsotg->dev, "GNPTXSTS=%08x\n",
- readl(hsotg->regs + S3C_GNPTXSTS));
- }
+ s3c_hsotg_init_fifo(hsotg);
s3c_hsotg_enqueue_setup(hsotg);
@@ -2274,6 +2313,12 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep,
break;
}
+ /* if the hardware has dedicated fifos, we must give each IN EP
+ * a unique tx-fifo even if it is non-periodic.
+ */
+ if (dir_in && hsotg->dedicated_fifos)
+ epctrl |= S3C_DxEPCTL_TxFNum(index);
+
/* for non control endpoints, set PID to D0 */
if (index)
epctrl |= S3C_DxEPCTL_SetD0PID;
@@ -2563,7 +2608,8 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
S3C_DIEPMSK_INTknEPMisMsk |
- S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk,
+ S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk |
+ ((hsotg->dedicated_fifos) ? S3C_DIEPMSK_TxFIFOEmpty : 0),
hsotg->regs + S3C_DIEPMSK);
/* don't need XferCompl, we get that from RXFIFO in slave mode. In
@@ -2732,7 +2778,7 @@ static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg,
*/
ptxfifo = readl(hsotg->regs + S3C_DPTXFSIZn(epnum));
- hs_ep->fifo_size = S3C_DPTXFSIZn_DPTxFSize_GET(ptxfifo);
+ hs_ep->fifo_size = S3C_DPTXFSIZn_DPTxFSize_GET(ptxfifo) * 4;
/* if we're using dma, we need to set the next-endpoint pointer
* to be something valid.
@@ -2753,13 +2799,33 @@ static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg,
*/
static void s3c_hsotg_otgreset(struct s3c_hsotg *hsotg)
{
- u32 osc;
+ struct clk *xusbxti;
+ u32 pwr, osc;
- writel(0, S3C_PHYPWR);
+ pwr = readl(S3C_PHYPWR);
+ pwr &= ~0x19;
+ writel(pwr, S3C_PHYPWR);
mdelay(1);
osc = hsotg->plat->is_osc ? S3C_PHYCLK_EXT_OSC : 0;
+ xusbxti = clk_get(hsotg->dev, "xusbxti");
+ if (xusbxti && !IS_ERR(xusbxti)) {
+ switch (clk_get_rate(xusbxti)) {
+ case 12*MHZ:
+ osc |= S3C_PHYCLK_CLKSEL_12M;
+ break;
+ case 24*MHZ:
+ osc |= S3C_PHYCLK_CLKSEL_24M;
+ break;
+ default:
+ case 48*MHZ:
+ /* default reference clock */
+ break;
+ }
+ clk_put(xusbxti);
+ }
+
writel(osc | 0x10, S3C_PHYCLK);
/* issue a full set of resets to the otg and core */
@@ -2772,6 +2838,8 @@ static void s3c_hsotg_otgreset(struct s3c_hsotg *hsotg)
static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
{
+ u32 cfg4;
+
/* unmask subset of endpoint interrupts */
writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
@@ -2807,6 +2875,14 @@ static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
writel(using_dma(hsotg) ? S3C_GAHBCFG_DMAEn : 0x0,
hsotg->regs + S3C_GAHBCFG);
+
+ /* check hardware configuration */
+
+ cfg4 = readl(hsotg->regs + 0x50);
+ hsotg->dedicated_fifos = (cfg4 >> 25) & 1;
+
+ dev_info(hsotg->dev, "%s fifos\n",
+ hsotg->dedicated_fifos ? "dedicated" : "shared");
}
static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
@@ -3181,13 +3257,20 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
hsotg->dev = dev;
hsotg->plat = plat;
+ hsotg->clk = clk_get(&pdev->dev, "otg");
+ if (IS_ERR(hsotg->clk)) {
+ dev_err(dev, "cannot get otg clock\n");
+ ret = -EINVAL;
+ goto err_mem;
+ }
+
platform_set_drvdata(pdev, hsotg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "cannot find register resource 0\n");
ret = -EINVAL;
- goto err_mem;
+ goto err_clk;
}
hsotg->regs_res = request_mem_region(res->start, resource_size(res),
@@ -3195,7 +3278,7 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
if (!hsotg->regs_res) {
dev_err(dev, "cannot reserve registers\n");
ret = -ENOENT;
- goto err_mem;
+ goto err_clk;
}
hsotg->regs = ioremap(res->start, resource_size(res));
@@ -3248,6 +3331,8 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
/* reset the system */
+ clk_enable(hsotg->clk);
+
s3c_hsotg_gate(pdev, true);
s3c_hsotg_otgreset(hsotg);
@@ -3271,7 +3356,8 @@ err_regs:
err_regs_res:
release_resource(hsotg->regs_res);
kfree(hsotg->regs_res);
-
+err_clk:
+ clk_put(hsotg->clk);
err_mem:
kfree(hsotg);
return ret;
@@ -3293,6 +3379,9 @@ static int __devexit s3c_hsotg_remove(struct platform_device *pdev)
s3c_hsotg_gate(pdev, false);
+ clk_disable(hsotg->clk);
+ clk_put(hsotg->clk);
+
kfree(hsotg);
return 0;
}
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index f46a60962da..b22eedbc7dc 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -137,7 +137,7 @@ MODULE_PARM_DESC(n_ports, "number of ports to create, default=1");
/*-------------------------------------------------------------------------*/
-static int __init serial_bind_config(struct usb_configuration *c)
+static int __ref serial_bind_config(struct usb_configuration *c)
{
unsigned i;
int status = 0;
@@ -161,7 +161,7 @@ static struct usb_configuration serial_config_driver = {
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
-static int __init gs_bind(struct usb_composite_dev *cdev)
+static int __ref gs_bind(struct usb_composite_dev *cdev)
{
int gcnum;
struct usb_gadget *gadget = cdev->gadget;
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index 04c462ff0ea..484acfb1a7c 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -57,10 +57,12 @@
#include <asm/unaligned.h>
-/* Thanks to NetChip Technologies for donating this product ID.
+/*
+ * Thanks to NetChip Technologies for donating this product ID.
*
* DO NOT REUSE THESE IDs with any other driver!! Ever!!
- * Instead: allocate your own, using normal USB-IF procedures. */
+ * Instead: allocate your own, using normal USB-IF procedures.
+ */
#define FSG_VENDOR_ID 0x0525 /* NetChip */
#define FSG_PRODUCT_ID 0xa4a5 /* Linux-USB File-backed Storage Gadget */
@@ -84,14 +86,27 @@
#define LWARN(lun, fmt, args...) dev_warn(&(lun)->dev, fmt, ## args)
#define LINFO(lun, fmt, args...) dev_info(&(lun)->dev, fmt, ## args)
-/* Keep those macros in sync with thos in
- * include/linux/ubs/composite.h or else GCC will complain. If they
+/*
+ * Keep those macros in sync with those in
+ * include/linux/usb/composite.h or else GCC will complain. If they
* are identical (the same names of arguments, white spaces in the
* same places) GCC will allow redefinition otherwise (even if some
- * white space is removed or added) warning will be issued. No
- * checking if those symbols is defined is performed because warning
- * is desired when those macros were defined by someone else to mean
- * something else. */
+ * white space is removed or added) warning will be issued.
+ *
+ * Those macros are needed here because File Storage Gadget does not
+ * include the composite.h header. For composite gadgets those macros
+ * are redundant since composite.h is included any way.
+ *
+ * One could check whether those macros are already defined (which
+ * would indicate composite.h had been included) or not (which would
+ * indicate we were in FSG) but this is not done because a warning is
+ * desired if definitions here differ from the ones in composite.h.
+ *
+ * We want the definitions to match and be the same in File Storage
+ * Gadget as well as Mass Storage Function (and so composite gadgets
+ * using MSF). If someone changes them in composite.h it will produce
+ * a warning in this file when building MSF.
+ */
#define DBG(d, fmt, args...) dev_dbg(&(d)->gadget->dev , fmt , ## args)
#define VDBG(d, fmt, args...) dev_vdbg(&(d)->gadget->dev , fmt , ## args)
#define ERROR(d, fmt, args...) dev_err(&(d)->gadget->dev , fmt , ## args)
@@ -269,6 +284,7 @@ struct fsg_lun {
unsigned int prevent_medium_removal:1;
unsigned int registered:1;
unsigned int info_valid:1;
+ unsigned int nofua:1;
u32 sense_data;
u32 sense_data_info;
@@ -313,9 +329,11 @@ struct fsg_buffhd {
enum fsg_buffer_state state;
struct fsg_buffhd *next;
- /* The NetChip 2280 is faster, and handles some protocol faults
+ /*
+ * The NetChip 2280 is faster, and handles some protocol faults
* better, if we don't submit any short bulk-out read requests.
- * So we will record the intended request length here. */
+ * So we will record the intended request length here.
+ */
unsigned int bulk_out_intended_length;
struct usb_request *inreq;
@@ -395,8 +413,10 @@ fsg_intf_desc = {
.iInterface = FSG_STRING_INTERFACE,
};
-/* Three full-speed endpoint descriptors: bulk-in, bulk-out,
- * and interrupt-in. */
+/*
+ * Three full-speed endpoint descriptors: bulk-in, bulk-out, and
+ * interrupt-in.
+ */
static struct usb_endpoint_descriptor
fsg_fs_bulk_in_desc = {
@@ -459,7 +479,7 @@ static struct usb_descriptor_header *fsg_fs_function[] = {
*
* That means alternate endpoint descriptors (bigger packets)
* and a "device qualifier" ... plus more construction options
- * for the config descriptor.
+ * for the configuration descriptor.
*/
static struct usb_endpoint_descriptor
fsg_hs_bulk_in_desc = {
@@ -547,8 +567,10 @@ static struct usb_gadget_strings fsg_stringtab = {
/*-------------------------------------------------------------------------*/
-/* If the next two routines are called while the gadget is registered,
- * the caller must own fsg->filesem for writing. */
+/*
+ * If the next two routines are called while the gadget is registered,
+ * the caller must own fsg->filesem for writing.
+ */
static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
{
@@ -587,8 +609,10 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
goto out;
}
- /* If we can't read the file, it's no good.
- * If we can't write the file, use it read-only. */
+ /*
+ * If we can't read the file, it's no good.
+ * If we can't write the file, use it read-only.
+ */
if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
LINFO(curlun, "file not readable: %s\n", filename);
goto out;
@@ -646,8 +670,10 @@ static void fsg_lun_close(struct fsg_lun *curlun)
/*-------------------------------------------------------------------------*/
-/* Sync the file data, don't bother with the metadata.
- * This code was copied from fs/buffer.c:sys_fdatasync(). */
+/*
+ * Sync the file data, don't bother with the metadata.
+ * This code was copied from fs/buffer.c:sys_fdatasync().
+ */
static int fsg_lun_fsync_sub(struct fsg_lun *curlun)
{
struct file *filp = curlun->filp;
@@ -689,6 +715,14 @@ static ssize_t fsg_show_ro(struct device *dev, struct device_attribute *attr,
: curlun->initially_ro);
}
+static ssize_t fsg_show_nofua(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fsg_lun *curlun = fsg_lun_from_dev(dev);
+
+ return sprintf(buf, "%u\n", curlun->nofua);
+}
+
static ssize_t fsg_show_file(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -723,26 +757,47 @@ static ssize_t fsg_store_ro(struct device *dev, struct device_attribute *attr,
ssize_t rc = count;
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
struct rw_semaphore *filesem = dev_get_drvdata(dev);
- int i;
+ unsigned long ro;
- if (sscanf(buf, "%d", &i) != 1)
+ if (strict_strtoul(buf, 2, &ro))
return -EINVAL;
- /* Allow the write-enable status to change only while the backing file
- * is closed. */
+ /*
+ * Allow the write-enable status to change only while the
+ * backing file is closed.
+ */
down_read(filesem);
if (fsg_lun_is_open(curlun)) {
LDBG(curlun, "read-only status change prevented\n");
rc = -EBUSY;
} else {
- curlun->ro = !!i;
- curlun->initially_ro = !!i;
+ curlun->ro = ro;
+ curlun->initially_ro = ro;
LDBG(curlun, "read-only status set to %d\n", curlun->ro);
}
up_read(filesem);
return rc;
}
+static ssize_t fsg_store_nofua(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsg_lun *curlun = fsg_lun_from_dev(dev);
+ unsigned long nofua;
+
+ if (strict_strtoul(buf, 2, &nofua))
+ return -EINVAL;
+
+ /* Sync data when switching from async mode to sync */
+ if (!nofua && curlun->nofua)
+ fsg_lun_fsync_sub(curlun);
+
+ curlun->nofua = nofua;
+
+ return count;
+}
+
static ssize_t fsg_store_file(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 1da755a1c85..6bb876d6525 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -704,17 +704,6 @@ static char *host_addr;
module_param(host_addr, charp, S_IRUGO);
MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
-
-static u8 __init nibble(unsigned char c)
-{
- if (isdigit(c))
- return c - '0';
- c = toupper(c);
- if (isxdigit(c))
- return 10 + c - 'A';
- return 0;
-}
-
static int get_ether_addr(const char *str, u8 *dev_addr)
{
if (str) {
@@ -725,8 +714,8 @@ static int get_ether_addr(const char *str, u8 *dev_addr)
if ((*str == '.') || (*str == ':'))
str++;
- num = nibble(*str++) << 4;
- num |= (nibble(*str++));
+ num = hex_to_bin(*str++) << 4;
+ num |= hex_to_bin(*str++);
dev_addr [i] = num;
}
if (is_valid_ether_addr(dev_addr))
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 3e8dcb5455e..01e5354a4c2 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -18,6 +18,7 @@
/* #define VERBOSE_DEBUG */
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/delay.h>
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index 2dcffdac86d..5e807f083bc 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -94,7 +94,7 @@ uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt)
break;
}
- if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) {
+ if (i == ARRAY_SIZE(uvc_formats)) {
printk(KERN_INFO "Unsupported format 0x%08x.\n",
fmt->fmt.pix.pixelformat);
return -EINVAL;
diff --git a/drivers/usb/gadget/webcam.c b/drivers/usb/gadget/webcam.c
index 288d21155ab..de1deb7a3c6 100644
--- a/drivers/usb/gadget/webcam.c
+++ b/drivers/usb/gadget/webcam.c
@@ -308,7 +308,7 @@ static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
* USB configuration
*/
-static int __init
+static int __ref
webcam_config_bind(struct usb_configuration *c)
{
return uvc_bind_config(c, uvc_control_cls, uvc_fs_streaming_cls,
@@ -330,7 +330,7 @@ webcam_unbind(struct usb_composite_dev *cdev)
return 0;
}
-static int __init
+static int __ref
webcam_bind(struct usb_composite_dev *cdev)
{
int ret;
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 807280d069f..cf353920bb1 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -264,7 +264,7 @@ static void zero_resume(struct usb_composite_dev *cdev)
/*-------------------------------------------------------------------------*/
-static int __init zero_bind(struct usb_composite_dev *cdev)
+static int __ref zero_bind(struct usb_composite_dev *cdev)
{
int gcnum;
struct usb_gadget *gadget = cdev->gadget;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index f865be2276d..2d926cec072 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -72,8 +72,9 @@ config USB_EHCI_ROOT_HUB_TT
from ARC, and has since changed hands a few times.
config USB_EHCI_TT_NEWSCHED
- bool "Improved Transaction Translator scheduling (EXPERIMENTAL)"
- depends on USB_EHCI_HCD && EXPERIMENTAL
+ bool "Improved Transaction Translator scheduling"
+ depends on USB_EHCI_HCD
+ default y
---help---
This changes the periodic scheduling code to fill more of the low
and full speed bandwidth available from the Transaction Translator
@@ -84,9 +85,11 @@ config USB_EHCI_TT_NEWSCHED
If you have multiple periodic low/fullspeed devices connected to a
highspeed USB hub which is connected to a highspeed USB Host
Controller, and some of those devices will not work correctly
- (possibly due to "ENOSPC" or "-28" errors), say Y.
+ (possibly due to "ENOSPC" or "-28" errors), say Y. Conversely, if
+ you have only one such device and it doesn't work, you could try
+ saying N.
- If unsure, say N.
+ If unsure, say Y.
config USB_EHCI_BIG_ENDIAN_MMIO
bool
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index faa61748db7..2baf8a84908 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -228,7 +228,7 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
* the root hub is either suspended or stopped.
*/
spin_lock_irqsave(&ehci->lock, flags);
- ehci_prepare_ports_for_controller_suspend(ehci);
+ ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 874d2000bf9..76b7fd2d838 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -98,13 +98,18 @@ static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
} else {
ehci_dbg (ehci,
- "%s hcc_params %04x thresh %d uframes %s%s%s\n",
+ "%s hcc_params %04x thresh %d uframes %s%s%s%s%s%s%s\n",
label,
params,
HCC_ISOC_THRES(params),
HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
HCC_CANPARK(params) ? " park" : "",
- HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
+ HCC_64BIT_ADDR(params) ? " 64 bit addr" : "",
+ HCC_LPM(params) ? " LPM" : "",
+ HCC_PER_PORT_CHANGE_EVENT(params) ? " ppce" : "",
+ HCC_HW_PREFETCH(params) ? " hw prefetch" : "",
+ HCC_32FRAME_PERIODIC_LIST(params) ?
+ " 32 peridic list" : "");
}
}
#else
@@ -191,8 +196,9 @@ static int __maybe_unused
dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
{
return scnprintf (buf, len,
- "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+ "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s%s",
label, label [0] ? " " : "", status,
+ (status & STS_PPCE_MASK) ? " PPCE" : "",
(status & STS_ASS) ? " Async" : "",
(status & STS_PSS) ? " Periodic" : "",
(status & STS_RECL) ? " Recl" : "",
@@ -210,8 +216,9 @@ static int __maybe_unused
dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
{
return scnprintf (buf, len,
- "%s%sintrenable %02x%s%s%s%s%s%s",
+ "%s%sintrenable %02x%s%s%s%s%s%s%s",
label, label [0] ? " " : "", enable,
+ (enable & STS_PPCE_MASK) ? " PPCE" : "",
(enable & STS_IAA) ? " IAA" : "",
(enable & STS_FATAL) ? " FATAL" : "",
(enable & STS_FLR) ? " FLR" : "",
@@ -228,9 +235,15 @@ static int
dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
{
return scnprintf (buf, len,
- "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
+ "%s%scommand %07x %s%s%s%s%s%s=%d ithresh=%d%s%s%s%s "
+ "period=%s%s %s",
label, label [0] ? " " : "", command,
- (command & CMD_PARK) ? "park" : "(park)",
+ (command & CMD_HIRD) ? " HIRD" : "",
+ (command & CMD_PPCEE) ? " PPCEE" : "",
+ (command & CMD_FSP) ? " FSP" : "",
+ (command & CMD_ASPE) ? " ASPE" : "",
+ (command & CMD_PSPE) ? " PSPE" : "",
+ (command & CMD_PARK) ? " park" : "(park)",
CMD_PARK_CNT (command),
(command >> 16) & 0x3f,
(command & CMD_LRESET) ? " LReset" : "",
@@ -257,11 +270,22 @@ dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
}
return scnprintf (buf, len,
- "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
+ "%s%sport:%d status %06x %d %s%s%s%s%s%s "
+ "sig=%s%s%s%s%s%s%s%s%s%s%s",
label, label [0] ? " " : "", port, status,
+ status>>25,/*device address */
+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ACK ?
+ " ACK" : "",
+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_NYET ?
+ " NYET" : "",
+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_STALL ?
+ " STALL" : "",
+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ERR ?
+ " ERR" : "",
(status & PORT_POWER) ? " POWER" : "",
(status & PORT_OWNER) ? " OWNER" : "",
sig,
+ (status & PORT_LPM) ? " LPM" : "",
(status & PORT_RESET) ? " RESET" : "",
(status & PORT_SUSPEND) ? " SUSPEND" : "",
(status & PORT_RESUME) ? " RESUME" : "",
@@ -330,6 +354,13 @@ static int debug_async_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *);
static int debug_async_open(struct inode *, struct file *);
+static int debug_lpm_open(struct inode *, struct file *);
+static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+static ssize_t debug_lpm_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+static int debug_lpm_close(struct inode *inode, struct file *file);
+
static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
static int debug_close(struct inode *, struct file *);
@@ -351,6 +382,13 @@ static const struct file_operations debug_registers_fops = {
.read = debug_output,
.release = debug_close,
};
+static const struct file_operations debug_lpm_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_lpm_open,
+ .read = debug_lpm_read,
+ .write = debug_lpm_write,
+ .release = debug_lpm_close,
+};
static struct dentry *ehci_debug_root;
@@ -674,7 +712,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
spin_lock_irqsave (&ehci->lock, flags);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
size = scnprintf (next, size,
"bus %s, device %s\n"
"%s\n"
@@ -917,51 +955,127 @@ static int debug_registers_open(struct inode *inode, struct file *file)
return file->private_data ? 0 : -ENOMEM;
}
+static int debug_lpm_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int debug_lpm_close(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ /* TODO: show lpm stats */
+ return 0;
+}
+
+static ssize_t debug_lpm_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ char buf[50];
+ size_t len;
+ u32 temp;
+ unsigned long port;
+ u32 __iomem *portsc ;
+ u32 params;
+
+ hcd = bus_to_hcd(file->private_data);
+ ehci = hcd_to_ehci(hcd);
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ if (len > 0 && buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ if (strncmp(buf, "enable", 5) == 0) {
+ if (strict_strtoul(buf + 7, 10, &port))
+ return -EINVAL;
+ params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ if (port > HCS_N_PORTS(params)) {
+ ehci_dbg(ehci, "ERR: LPM on bad port %lu\n", port);
+ return -ENODEV;
+ }
+ portsc = &ehci->regs->port_status[port-1];
+ temp = ehci_readl(ehci, portsc);
+ if (!(temp & PORT_DEV_ADDR)) {
+ ehci_dbg(ehci, "LPM: no device attached\n");
+ return -ENODEV;
+ }
+ temp |= PORT_LPM;
+ ehci_writel(ehci, temp, portsc);
+ printk(KERN_INFO "force enable LPM for port %lu\n", port);
+ } else if (strncmp(buf, "hird=", 5) == 0) {
+ unsigned long hird;
+ if (strict_strtoul(buf + 5, 16, &hird))
+ return -EINVAL;
+ printk(KERN_INFO "setting hird %s %lu\n", buf + 6, hird);
+ temp = ehci_readl(ehci, &ehci->regs->command);
+ temp &= ~CMD_HIRD;
+ temp |= hird << 24;
+ ehci_writel(ehci, temp, &ehci->regs->command);
+ } else if (strncmp(buf, "disable", 7) == 0) {
+ if (strict_strtoul(buf + 8, 10, &port))
+ return -EINVAL;
+ params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ if (port > HCS_N_PORTS(params)) {
+ ehci_dbg(ehci, "ERR: LPM off bad port %lu\n", port);
+ return -ENODEV;
+ }
+ portsc = &ehci->regs->port_status[port-1];
+ temp = ehci_readl(ehci, portsc);
+ if (!(temp & PORT_DEV_ADDR)) {
+ ehci_dbg(ehci, "ERR: no device attached\n");
+ return -ENODEV;
+ }
+ temp &= ~PORT_LPM;
+ ehci_writel(ehci, temp, portsc);
+ printk(KERN_INFO "disabled LPM for port %lu\n", port);
+ } else
+ return -EOPNOTSUPP;
+ return count;
+}
+
static inline void create_debug_files (struct ehci_hcd *ehci)
{
struct usb_bus *bus = &ehci_to_hcd(ehci)->self;
ehci->debug_dir = debugfs_create_dir(bus->bus_name, ehci_debug_root);
if (!ehci->debug_dir)
- goto dir_error;
-
- ehci->debug_async = debugfs_create_file("async", S_IRUGO,
- ehci->debug_dir, bus,
- &debug_async_fops);
- if (!ehci->debug_async)
- goto async_error;
-
- ehci->debug_periodic = debugfs_create_file("periodic", S_IRUGO,
- ehci->debug_dir, bus,
- &debug_periodic_fops);
- if (!ehci->debug_periodic)
- goto periodic_error;
-
- ehci->debug_registers = debugfs_create_file("registers", S_IRUGO,
- ehci->debug_dir, bus,
- &debug_registers_fops);
- if (!ehci->debug_registers)
- goto registers_error;
+ return;
+
+ if (!debugfs_create_file("async", S_IRUGO, ehci->debug_dir, bus,
+ &debug_async_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("periodic", S_IRUGO, ehci->debug_dir, bus,
+ &debug_periodic_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("registers", S_IRUGO, ehci->debug_dir, bus,
+ &debug_registers_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("lpm", S_IRUGO|S_IWUGO, ehci->debug_dir, bus,
+ &debug_lpm_fops))
+ goto file_error;
+
return;
-registers_error:
- debugfs_remove(ehci->debug_periodic);
-periodic_error:
- debugfs_remove(ehci->debug_async);
-async_error:
- debugfs_remove(ehci->debug_dir);
-dir_error:
- ehci->debug_periodic = NULL;
- ehci->debug_async = NULL;
- ehci->debug_dir = NULL;
+file_error:
+ debugfs_remove_recursive(ehci->debug_dir);
}
static inline void remove_debug_files (struct ehci_hcd *ehci)
{
- debugfs_remove(ehci->debug_registers);
- debugfs_remove(ehci->debug_periodic);
- debugfs_remove(ehci->debug_async);
- debugfs_remove(ehci->debug_dir);
+ debugfs_remove_recursive(ehci->debug_dir);
}
#endif /* STUB_DEBUG_FILES */
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 5cd967d2893..a416421abfa 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -313,7 +313,8 @@ static int ehci_fsl_drv_suspend(struct device *dev)
struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
void __iomem *non_ehci = hcd->regs;
- ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd));
+ ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd),
+ device_may_wakeup(dev));
if (!fsl_deep_sleep())
return 0;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index a3ef2a9d9dc..34a928d3b7d 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -36,6 +36,7 @@
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -78,7 +79,13 @@ static const char hcd_name [] = "ehci_hcd";
#define EHCI_TUNE_RL_TT 0
#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
#define EHCI_TUNE_MULT_TT 1
-#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
+/*
+ * Some drivers think it's safe to schedule isochronous transfers more than
+ * 256 ms into the future (partly as a result of an old bug in the scheduling
+ * code). In an attempt to avoid trouble, we will use a minimum scheduling
+ * length of 512 frames instead of 256.
+ */
+#define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */
#define EHCI_IAA_MSECS 10 /* arbitrary */
#define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
@@ -100,6 +107,11 @@ static int ignore_oc = 0;
module_param (ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
+/* for link power management(LPM) feature */
+static unsigned int hird;
+module_param(hird, int, S_IRUGO);
+MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n");
+
#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
/*-------------------------------------------------------------------------*/
@@ -304,6 +316,7 @@ static void end_unlink_async(struct ehci_hcd *ehci);
static void ehci_work(struct ehci_hcd *ehci);
#include "ehci-hub.c"
+#include "ehci-lpm.c"
#include "ehci-mem.c"
#include "ehci-q.c"
#include "ehci-sched.c"
@@ -577,6 +590,11 @@ static int ehci_init(struct usb_hcd *hcd)
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
log2_irq_thresh = 0;
temp = 1 << (16 + log2_irq_thresh);
+ if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
+ ehci->has_ppcd = 1;
+ ehci_dbg(ehci, "enable per-port change event\n");
+ temp |= CMD_PPCEE;
+ }
if (HCC_CANPARK(hcc_params)) {
/* HW default park == 3, on hardware that supports it (like
* NVidia and ALI silicon), maximizes throughput on the async
@@ -603,10 +621,22 @@ static int ehci_init(struct usb_hcd *hcd)
default: BUG();
}
}
+ if (HCC_LPM(hcc_params)) {
+ /* support link power management EHCI 1.1 addendum */
+ ehci_dbg(ehci, "support lpm\n");
+ ehci->has_lpm = 1;
+ if (hird > 0xf) {
+ ehci_dbg(ehci, "hird %d invalid, use default 0",
+ hird);
+ hird = 0;
+ }
+ temp |= hird << 24;
+ }
ehci->command = temp;
/* Accept arbitrarily long scatter-gather lists */
- hcd->self.sg_tablesize = ~0;
+ if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+ hcd->self.sg_tablesize = ~0;
return 0;
}
@@ -619,7 +649,6 @@ static int ehci_run (struct usb_hcd *hcd)
u32 hcc_params;
hcd->uses_new_polling = 1;
- hcd->poll_rh = 0;
/* EHCI spec section 4.1 */
if ((retval = ehci_reset(ehci)) != 0) {
@@ -764,6 +793,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* remote wakeup [4.3.1] */
if (status & STS_PCD) {
unsigned i = HCS_N_PORTS (ehci->hcs_params);
+ u32 ppcd = 0;
/* kick root hub later */
pcd_status = status;
@@ -772,9 +802,18 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
if (!(cmd & CMD_RUN))
usb_hcd_resume_root_hub(hcd);
+ /* get per-port change detect bits */
+ if (ehci->has_ppcd)
+ ppcd = status >> 16;
+
while (i--) {
- int pstatus = ehci_readl(ehci,
- &ehci->regs->port_status [i]);
+ int pstatus;
+
+ /* leverage per-port change bits feature */
+ if (ehci->has_ppcd && !(ppcd & (1 << i)))
+ continue;
+ pstatus = ehci_readl(ehci,
+ &ehci->regs->port_status[i]);
if (pstatus & PORT_OWNER)
continue;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index e7d3d8def28..796ea0c8900 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -107,7 +107,7 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
}
static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
- bool suspending)
+ bool suspending, bool do_wakeup)
{
int port;
u32 temp;
@@ -117,8 +117,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
* when the controller is suspended or resumed. In all other
* cases they don't need to be changed.
*/
- if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup ||
- device_may_wakeup(ehci_to_hcd(ehci)->self.controller))
+ if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
return;
/* clear phy low-power mode before changing wakeup flags */
@@ -167,6 +166,10 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg);
}
}
+
+ /* Does the root hub have a port wakeup pending? */
+ if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD))
+ usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
}
static int ehci_bus_suspend (struct usb_hcd *hcd)
@@ -316,7 +319,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
spin_lock_irq (&ehci->lock);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
spin_unlock_irq(&ehci->lock);
return -ESHUTDOWN;
}
@@ -603,6 +606,7 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
u32 mask;
int ports, i, retval = 1;
unsigned long flags;
+ u32 ppcd = 0;
/* if !USB_SUSPEND, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
@@ -632,7 +636,15 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
/* port N changes (bit N)? */
spin_lock_irqsave (&ehci->lock, flags);
+
+ /* get per-port change detect bits */
+ if (ehci->has_ppcd)
+ ppcd = ehci_readl(ehci, &ehci->regs->status) >> 16;
+
for (i = 0; i < ports; i++) {
+ /* leverage per-port change bits feature */
+ if (ehci->has_ppcd && !(ppcd & (1 << i)))
+ continue;
temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
/*
@@ -790,6 +802,11 @@ static int ehci_hub_control (
status_reg);
break;
case USB_PORT_FEAT_C_CONNECTION:
+ if (ehci->has_lpm) {
+ /* clear PORTSC bits on disconnect */
+ temp &= ~PORT_LPM;
+ temp &= ~PORT_DEV_ADDR;
+ }
ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_CSC,
status_reg);
break;
diff --git a/drivers/usb/host/ehci-lpm.c b/drivers/usb/host/ehci-lpm.c
new file mode 100644
index 00000000000..b4d4d63c13e
--- /dev/null
+++ b/drivers/usb/host/ehci-lpm.c
@@ -0,0 +1,83 @@
+/* ehci-lpm.c EHCI HCD LPM support code
+ * Copyright (c) 2008 - 2010, Intel Corporation.
+ * Author: Jacob Pan <jacob.jun.pan@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+/* this file is part of ehci-hcd.c */
+static int ehci_lpm_set_da(struct ehci_hcd *ehci, int dev_addr, int port_num)
+{
+ u32 __iomem portsc;
+
+ ehci_dbg(ehci, "set dev address %d for port %d\n", dev_addr, port_num);
+ if (port_num > HCS_N_PORTS(ehci->hcs_params)) {
+ ehci_dbg(ehci, "invalid port number %d\n", port_num);
+ return -ENODEV;
+ }
+ portsc = ehci_readl(ehci, &ehci->regs->port_status[port_num-1]);
+ portsc &= ~PORT_DEV_ADDR;
+ portsc |= dev_addr<<25;
+ ehci_writel(ehci, portsc, &ehci->regs->port_status[port_num-1]);
+ return 0;
+}
+
+/*
+ * this function is used to check if the device support LPM
+ * if yes, mark the PORTSC register with PORT_LPM bit
+ */
+static int ehci_lpm_check(struct ehci_hcd *ehci, int port)
+{
+ u32 __iomem *portsc ;
+ u32 val32;
+ int retval;
+
+ portsc = &ehci->regs->port_status[port-1];
+ val32 = ehci_readl(ehci, portsc);
+ if (!(val32 & PORT_DEV_ADDR)) {
+ ehci_dbg(ehci, "LPM: no device attached\n");
+ return -ENODEV;
+ }
+ val32 |= PORT_LPM;
+ ehci_writel(ehci, val32, portsc);
+ msleep(5);
+ val32 |= PORT_SUSPEND;
+ ehci_dbg(ehci, "Sending LPM 0x%08x to port %d\n", val32, port);
+ ehci_writel(ehci, val32, portsc);
+ /* wait for ACK */
+ msleep(10);
+ retval = handshake(ehci, &ehci->regs->port_status[port-1], PORT_SSTS,
+ PORTSC_SUSPEND_STS_ACK, 125);
+ dbg_port(ehci, "LPM", port, val32);
+ if (retval != -ETIMEDOUT) {
+ ehci_dbg(ehci, "LPM: device ACK for LPM\n");
+ val32 |= PORT_LPM;
+ /*
+ * now device should be in L1 sleep, let's wake up the device
+ * so that we can complete enumeration.
+ */
+ ehci_writel(ehci, val32, portsc);
+ msleep(10);
+ val32 |= PORT_RESUME;
+ ehci_writel(ehci, val32, portsc);
+ } else {
+ ehci_dbg(ehci, "LPM: device does not ACK, disable LPM %d\n",
+ retval);
+ val32 &= ~PORT_LPM;
+ retval = -ETIMEDOUT;
+ ehci_writel(ehci, val32, portsc);
+ }
+
+ return retval;
+}
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 5450e628157..116ae280053 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -38,6 +38,7 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/usb/ulpi.h>
#include <plat/usb.h>
/*
@@ -236,6 +237,35 @@ static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask)
/*-------------------------------------------------------------------------*/
+static void omap_ehci_soft_phy_reset(struct ehci_hcd_omap *omap, u8 port)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ unsigned reg = 0;
+
+ reg = ULPI_FUNC_CTRL_RESET
+ /* FUNCTION_CTRL_SET register */
+ | (ULPI_SET(ULPI_FUNC_CTRL) << EHCI_INSNREG05_ULPI_REGADD_SHIFT)
+ /* Write */
+ | (2 << EHCI_INSNREG05_ULPI_OPSEL_SHIFT)
+ /* PORTn */
+ | ((port + 1) << EHCI_INSNREG05_ULPI_PORTSEL_SHIFT)
+ /* start ULPI access*/
+ | (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT);
+
+ ehci_omap_writel(omap->ehci_base, EHCI_INSNREG05_ULPI, reg);
+
+ /* Wait for ULPI access completion */
+ while ((ehci_omap_readl(omap->ehci_base, EHCI_INSNREG05_ULPI)
+ & (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT))) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(omap->dev, "phy reset operation timed out\n");
+ break;
+ }
+ }
+}
+
/* omap_start_ehc
* - Start the TI USBHOST controller
*/
@@ -425,6 +455,12 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
gpio_set_value(omap->reset_gpio_port[1], 1);
}
+ /* Soft reset the PHY using PHY reset command over ULPI */
+ if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY)
+ omap_ehci_soft_phy_reset(omap, 0);
+ if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY)
+ omap_ehci_soft_phy_reset(omap, 1);
+
return 0;
err_sys_status:
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index d43d176161a..a1e8d273103 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -114,10 +114,16 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
break;
case PCI_VENDOR_ID_INTEL:
ehci->need_io_watchdog = 0;
+ ehci->fs_i_thresh = 1;
if (pdev->device == 0x27cc) {
ehci->broken_periodic = 1;
ehci_info(ehci, "using broken periodic workaround\n");
}
+ if (pdev->device == 0x0806 || pdev->device == 0x0811
+ || pdev->device == 0x0829) {
+ ehci_info(ehci, "disable lpm for langwell/penwell\n");
+ ehci->has_lpm = 0;
+ }
break;
case PCI_VENDOR_ID_TDI:
if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
@@ -277,7 +283,7 @@ done:
* Also they depend on separate root hub suspend/resume.
*/
-static int ehci_pci_suspend(struct usb_hcd *hcd)
+static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
unsigned long flags;
@@ -291,7 +297,7 @@ static int ehci_pci_suspend(struct usb_hcd *hcd)
* the root hub is either suspended or stopped.
*/
spin_lock_irqsave (&ehci->lock, flags);
- ehci_prepare_ports_for_controller_suspend(ehci);
+ ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
@@ -361,6 +367,22 @@ static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
}
#endif
+static int ehci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int rc = 0;
+
+ if (!udev->parent) /* udev is root hub itself, impossible */
+ rc = -1;
+ /* we only support lpm device connected to root hub yet */
+ if (ehci->has_lpm && !udev->parent->parent) {
+ rc = ehci_lpm_set_da(ehci, udev->devnum, udev->portnum);
+ if (!rc)
+ rc = ehci_lpm_check(ehci, udev->portnum);
+ }
+ return rc;
+}
+
static const struct hc_driver ehci_pci_hc_driver = {
.description = hcd_name,
.product_desc = "EHCI Host Controller",
@@ -407,6 +429,11 @@ static const struct hc_driver ehci_pci_hc_driver = {
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
+ /*
+ * call back when device connected and addressed
+ */
+ .update_device = ehci_update_device,
+
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 5aec92866ab..ba52be47302 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -106,7 +106,7 @@ ppc44x_enable_bmt(struct device_node *dn)
static int __devinit
-ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
+ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
@@ -192,17 +192,19 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
}
rv = usb_add_hcd(hcd, irq, 0);
- if (rv == 0)
- return 0;
+ if (rv)
+ goto err_ehci;
+
+ return 0;
+err_ehci:
+ if (ehci->has_amcc_usb23)
+ iounmap(ehci->ohci_hcctrl_reg);
iounmap(hcd->regs);
err_ioremap:
irq_dispose_mapping(irq);
err_irq:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-
- if (ehci->has_amcc_usb23)
- iounmap(ehci->ohci_hcctrl_reg);
err_rmr:
usb_put_hcd(hcd);
@@ -210,7 +212,7 @@ err_rmr:
}
-static int ehci_hcd_ppc_of_remove(struct of_device *op)
+static int ehci_hcd_ppc_of_remove(struct platform_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
@@ -253,7 +255,7 @@ static int ehci_hcd_ppc_of_remove(struct of_device *op)
}
-static int ehci_hcd_ppc_of_shutdown(struct of_device *op)
+static int ehci_hcd_ppc_of_shutdown(struct platform_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 11a79c4f4a9..233c288e3f9 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1126,8 +1126,7 @@ submit_async (
#endif
spin_lock_irqsave (&ehci->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
rc = -ESHUTDOWN;
goto done;
}
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 805ec633a65..a92526d6e5a 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -880,8 +880,7 @@ static int intr_submit (
spin_lock_irqsave (&ehci->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
@@ -1075,15 +1074,6 @@ iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
if (stream->ep)
stream->ep->hcpriv = NULL;
- if (stream->rescheduled) {
- ehci_info (ehci, "ep%d%s-iso rescheduled "
- "%lu times in %lu seconds\n",
- stream->bEndpointAddress, is_in ? "in" : "out",
- stream->rescheduled,
- ((jiffies - stream->start)/HZ)
- );
- }
-
kfree(stream);
}
}
@@ -1396,30 +1386,25 @@ iso_stream_schedule (
struct ehci_iso_stream *stream
)
{
- u32 now, next, start, period;
+ u32 now, next, start, period, span;
int status;
unsigned mod = ehci->periodic_size << 3;
struct ehci_iso_sched *sched = urb->hcpriv;
- struct pci_dev *pdev;
- if (sched->span > (mod - SCHEDULE_SLOP)) {
- ehci_dbg (ehci, "iso request %p too long\n", urb);
- status = -EFBIG;
- goto fail;
+ period = urb->interval;
+ span = sched->span;
+ if (!stream->highspeed) {
+ period <<= 3;
+ span <<= 3;
}
- if ((stream->depth + sched->span) > mod) {
- ehci_dbg (ehci, "request %p would overflow (%d+%d>%d)\n",
- urb, stream->depth, sched->span, mod);
+ if (span > mod - SCHEDULE_SLOP) {
+ ehci_dbg (ehci, "iso request %p too long\n", urb);
status = -EFBIG;
goto fail;
}
- period = urb->interval;
- if (!stream->highspeed)
- period <<= 3;
-
- now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
+ now = ehci_readl(ehci, &ehci->regs->frame_index) & (mod - 1);
/* Typical case: reuse current schedule, stream is still active.
* Hopefully there are no gaps from the host falling behind
@@ -1427,34 +1412,35 @@ iso_stream_schedule (
* slot in the schedule, implicitly assuming URB_ISO_ASAP.
*/
if (likely (!list_empty (&stream->td_list))) {
- pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
- start = stream->next_uframe;
+ u32 excess;
/* For high speed devices, allow scheduling within the
- * isochronous scheduling threshold. For full speed devices,
- * don't. (Work around for Intel ICH9 bug.)
+ * isochronous scheduling threshold. For full speed devices
+ * and Intel PCI-based controllers, don't (work around for
+ * Intel ICH9 bug).
*/
- if (!stream->highspeed &&
- pdev->vendor == PCI_VENDOR_ID_INTEL)
+ if (!stream->highspeed && ehci->fs_i_thresh)
next = now + ehci->i_thresh;
else
next = now;
- /* Fell behind (by up to twice the slop amount)? */
- if (((start - next) & (mod - 1)) >=
- mod - 2 * SCHEDULE_SLOP)
- start += period * DIV_ROUND_UP(
- (next - start) & (mod - 1),
- period);
-
- /* Tried to schedule too far into the future? */
- if (unlikely(((start - now) & (mod - 1)) + sched->span
- >= mod - 2 * SCHEDULE_SLOP)) {
+ /* Fell behind (by up to twice the slop amount)?
+ * We decide based on the time of the last currently-scheduled
+ * slot, not the time of the next available slot.
+ */
+ excess = (stream->next_uframe - period - next) & (mod - 1);
+ if (excess >= mod - 2 * SCHEDULE_SLOP)
+ start = next + excess - mod + period *
+ DIV_ROUND_UP(mod - excess, period);
+ else
+ start = next + excess + period;
+ if (start - now >= mod) {
+ ehci_dbg(ehci, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now - period, period,
+ mod);
status = -EFBIG;
goto fail;
}
- stream->next_uframe = start;
- goto ready;
}
/* need to schedule; when's the next (u)frame we could start?
@@ -1463,51 +1449,60 @@ iso_stream_schedule (
* can also help high bandwidth if the dma and irq loads don't
* jump until after the queue is primed.
*/
- start = SCHEDULE_SLOP + (now & ~0x07);
- start %= mod;
- stream->next_uframe = start;
-
- /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
-
- /* find a uframe slot with enough bandwidth */
- for (; start < (stream->next_uframe + period); start++) {
- int enough_space;
-
- /* check schedule: enough space? */
- if (stream->highspeed)
- enough_space = itd_slot_ok (ehci, mod, start,
- stream->usecs, period);
- else {
- if ((start % 8) >= 6)
- continue;
- enough_space = sitd_slot_ok (ehci, mod, stream,
- start, sched, period);
+ else {
+ start = SCHEDULE_SLOP + (now & ~0x07);
+
+ /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
+
+ /* find a uframe slot with enough bandwidth */
+ next = start + period;
+ for (; start < next; start++) {
+
+ /* check schedule: enough space? */
+ if (stream->highspeed) {
+ if (itd_slot_ok(ehci, mod, start,
+ stream->usecs, period))
+ break;
+ } else {
+ if ((start % 8) >= 6)
+ continue;
+ if (sitd_slot_ok(ehci, mod, stream,
+ start, sched, period))
+ break;
+ }
}
- /* schedule it here if there's enough bandwidth */
- if (enough_space) {
- stream->next_uframe = start % mod;
- goto ready;
+ /* no room in the schedule */
+ if (start == next) {
+ ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
+ urb, now, now + mod);
+ status = -ENOSPC;
+ goto fail;
}
}
- /* no room in the schedule */
- ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n",
- list_empty (&stream->td_list) ? "" : "re",
- urb, now, now + mod);
- status = -ENOSPC;
+ /* Tried to schedule too far into the future? */
+ if (unlikely(start - now + span - period
+ >= mod - 2 * SCHEDULE_SLOP)) {
+ ehci_dbg(ehci, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now, span - period,
+ mod - 2 * SCHEDULE_SLOP);
+ status = -EFBIG;
+ goto fail;
+ }
-fail:
- iso_sched_free (stream, sched);
- urb->hcpriv = NULL;
- return status;
+ stream->next_uframe = start & (mod - 1);
-ready:
/* report high speed start in uframes; full speed, in frames */
urb->start_frame = stream->next_uframe;
if (!stream->highspeed)
urb->start_frame >>= 3;
return 0;
+
+ fail:
+ iso_sched_free(stream, sched);
+ urb->hcpriv = NULL;
+ return status;
}
/*-------------------------------------------------------------------------*/
@@ -1602,7 +1597,7 @@ itd_link_urb (
struct ehci_iso_sched *iso_sched = urb->hcpriv;
struct ehci_itd *itd;
- next_uframe = stream->next_uframe % mod;
+ next_uframe = stream->next_uframe & (mod - 1);
if (unlikely (list_empty(&stream->td_list))) {
ehci_to_hcd(ehci)->self.bandwidth_allocated
@@ -1613,7 +1608,6 @@ itd_link_urb (
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
urb->interval,
next_uframe >> 3, next_uframe & 0x7);
- stream->start = jiffies;
}
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
@@ -1639,14 +1633,13 @@ itd_link_urb (
itd_patch(ehci, itd, iso_sched, packet, uframe);
next_uframe += stream->interval;
- stream->depth += stream->interval;
- next_uframe %= mod;
+ next_uframe &= mod - 1;
packet++;
/* link completed itds into the schedule */
if (((next_uframe >> 3) != frame)
|| packet == urb->number_of_packets) {
- itd_link (ehci, frame % ehci->periodic_size, itd);
+ itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
itd = NULL;
}
}
@@ -1695,7 +1688,6 @@ itd_complete (
t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
itd->hw_transaction [uframe] = 0;
- stream->depth -= stream->interval;
/* report transfer status */
if (unlikely (t & ISO_ERRS)) {
@@ -1815,8 +1807,7 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
@@ -2024,9 +2015,8 @@ sitd_link_urb (
"sched devp %s ep%d%s-iso [%d] %dms/%04x\n",
urb->dev->devpath, stream->bEndpointAddress & 0x0f,
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
- (next_uframe >> 3) % ehci->periodic_size,
+ (next_uframe >> 3) & (ehci->periodic_size - 1),
stream->interval, hc32_to_cpu(ehci, stream->splits));
- stream->start = jiffies;
}
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
@@ -2047,13 +2037,12 @@ sitd_link_urb (
sitd->urb = urb;
sitd_patch(ehci, stream, sitd, sched, packet);
- sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size,
+ sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
sitd);
next_uframe += stream->interval << 3;
- stream->depth += stream->interval << 3;
}
- stream->next_uframe = next_uframe % mod;
+ stream->next_uframe = next_uframe & (mod - 1);
/* don't need that schedule data any more */
iso_sched_free (stream, sched);
@@ -2111,7 +2100,6 @@ sitd_complete (
desc->actual_length = desc->length - SITD_LENGTH(t);
urb->actual_length += desc->actual_length;
}
- stream->depth -= stream->interval << 3;
/* handle completion now? */
if ((urb_index + 1) != urb->number_of_packets)
@@ -2201,8 +2189,7 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
@@ -2263,7 +2250,7 @@ scan_periodic (struct ehci_hcd *ehci)
now_uframe = ehci->next_uframe;
if (HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
clock = ehci_readl(ehci, &ehci->regs->frame_index);
- clock_frame = (clock >> 3) % ehci->periodic_size;
+ clock_frame = (clock >> 3) & (ehci->periodic_size - 1);
} else {
clock = now_uframe + mod - 1;
clock_frame = -1;
@@ -2272,7 +2259,7 @@ scan_periodic (struct ehci_hcd *ehci)
free_cached_lists(ehci);
ehci->clock_frame = clock_frame;
}
- clock %= mod;
+ clock &= mod - 1;
clock_frame = clock >> 3;
for (;;) {
@@ -2361,7 +2348,7 @@ restart:
* frame is current.
*/
if (((frame == clock_frame) ||
- (((frame + 1) % ehci->periodic_size)
+ (((frame + 1) & (ehci->periodic_size - 1))
== clock_frame))
&& live
&& (q.sitd->hw_results &
@@ -2428,7 +2415,8 @@ restart:
|| ehci->periodic_sched == 0)
break;
ehci->next_uframe = now_uframe;
- now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
+ now = ehci_readl(ehci, &ehci->regs->frame_index) &
+ (mod - 1);
if (now_uframe == now)
break;
@@ -2441,7 +2429,7 @@ restart:
}
} else {
now_uframe++;
- now_uframe %= mod;
+ now_uframe &= mod - 1;
}
}
}
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 4899f451add..6c8076ad821 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -140,7 +140,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
/**
* ehci_hcd_xilinx_of_probe - Probe method for the USB host controller
- * @op: pointer to the of_device to which the host controller bound
+ * @op: pointer to the platform_device bound to the host controller
* @match: pointer to of_device_id structure, not used
*
* This function requests resources and sets up appropriate properties for the
@@ -149,7 +149,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
* entry, and sets an appropriate value for hcd->has_tt.
*/
static int __devinit
-ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match)
+ehci_hcd_xilinx_of_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
@@ -242,12 +242,12 @@ err_rmr:
/**
* ehci_hcd_xilinx_of_remove - shutdown hcd and release resources
- * @op: pointer to of_device structure that is to be removed
+ * @op: pointer to platform_device structure that is to be removed
*
* Remove the hcd structure, and release resources that has been requested
* during probe.
*/
-static int ehci_hcd_xilinx_of_remove(struct of_device *op)
+static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
dev_set_drvdata(&op->dev, NULL);
@@ -266,11 +266,11 @@ static int ehci_hcd_xilinx_of_remove(struct of_device *op)
/**
* ehci_hcd_xilinx_of_shutdown - shutdown the hcd
- * @op: pointer to of_device structure that is to be removed
+ * @op: pointer to platform_device structure that is to be removed
*
* Properly shutdown the hcd, call driver's shutdown routine.
*/
-static int ehci_hcd_xilinx_of_shutdown(struct of_device *op)
+static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 650a687f285..bde823f704e 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -130,6 +130,7 @@ struct ehci_hcd { /* one per controller */
unsigned has_amcc_usb23:1;
unsigned need_io_watchdog:1;
unsigned broken_periodic:1;
+ unsigned fs_i_thresh:1; /* Intel iso scheduling */
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
@@ -140,7 +141,8 @@ struct ehci_hcd { /* one per controller */
#define OHCI_HCCTRL_LEN 0x4
__hc32 *ohci_hcctrl_reg;
unsigned has_hostpc:1;
-
+ unsigned has_lpm:1; /* support link power management */
+ unsigned has_ppcd:1; /* support per-port change bits */
u8 sbrn; /* packed release number */
/* irq statistics */
@@ -154,9 +156,6 @@ struct ehci_hcd { /* one per controller */
/* debug files */
#ifdef DEBUG
struct dentry *debug_dir;
- struct dentry *debug_async;
- struct dentry *debug_periodic;
- struct dentry *debug_registers;
#endif
};
@@ -401,15 +400,12 @@ struct ehci_iso_stream {
u32 refcount;
u8 bEndpointAddress;
u8 highspeed;
- u16 depth; /* depth in uframes */
struct list_head td_list; /* queued itds/sitds */
struct list_head free_list; /* list of unused itds/sitds */
struct usb_device *udev;
struct usb_host_endpoint *ep;
/* output of (re)scheduling */
- unsigned long start; /* jiffies */
- unsigned long rescheduled;
int next_uframe;
__hc32 splits;
@@ -538,11 +534,11 @@ struct ehci_fstn {
/* Prepare the PORTSC wakeup flags during controller suspend/resume */
-#define ehci_prepare_ports_for_controller_suspend(ehci) \
- ehci_adjust_port_wakeup_flags(ehci, true);
+#define ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup) \
+ ehci_adjust_port_wakeup_flags(ehci, true, do_wakeup);
-#define ehci_prepare_ports_for_controller_resume(ehci) \
- ehci_adjust_port_wakeup_flags(ehci, false);
+#define ehci_prepare_ports_for_controller_resume(ehci) \
+ ehci_adjust_port_wakeup_flags(ehci, false, false);
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index c7c8392a88b..20092a27a1e 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -561,7 +561,7 @@ static const struct hc_driver fhci_driver = {
.hub_control = fhci_hub_control,
};
-static int __devinit of_fhci_probe(struct of_device *ofdev,
+static int __devinit of_fhci_probe(struct platform_device *ofdev,
const struct of_device_id *ofid)
{
struct device *dev = &ofdev->dev;
@@ -801,7 +801,7 @@ static int __devexit fhci_remove(struct device *dev)
return 0;
}
-static int __devexit of_fhci_remove(struct of_device *ofdev)
+static int __devexit of_fhci_remove(struct platform_device *ofdev)
{
return fhci_remove(&ofdev->dev);
}
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 35742f8c7cd..9bfac657572 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -159,7 +159,7 @@ static int hwahc_op_start(struct usb_hcd *usb_hcd)
goto error_set_cluster_id;
usb_hcd->uses_new_polling = 1;
- usb_hcd->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
usb_hcd->state = HC_STATE_RUNNING;
result = 0;
out:
@@ -776,7 +776,7 @@ static int hwahc_probe(struct usb_interface *usb_iface,
goto error_alloc;
}
usb_hcd->wireless = 1;
- usb_hcd->flags |= HCD_FLAG_SAW_IRQ;
+ set_bit(HCD_FLAG_SAW_IRQ, &usb_hcd->flags);
wusbhc = usb_hcd_to_wusbhc(usb_hcd);
hwahc = container_of(wusbhc, struct hwahc, wusbhc);
hwahc_init(hwahc);
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index ca0e98d8e1f..3e5630369c3 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1521,7 +1521,7 @@ static int imx21_hc_reset(struct usb_hcd *hcd)
return -ETIMEDOUT;
}
spin_unlock_irq(&imx21->lock);
- schedule_timeout(1);
+ schedule_timeout_uninterruptible(1);
spin_lock_irq(&imx21->lock);
}
spin_unlock_irqrestore(&imx21->lock, flags);
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
index d995351f9be..0f97820e65b 100644
--- a/drivers/usb/host/isp1362.h
+++ b/drivers/usb/host/isp1362.h
@@ -8,29 +8,7 @@
/*
* Platform specific compile time options
*/
-#if defined(CONFIG_ARCH_KARO)
-#include <asm/arch/hardware.h>
-#include <asm/arch/pxa-regs.h>
-#include <asm/arch/karo.h>
-
-#define USE_32BIT 1
-
-
-/* These options are mutually eclusive */
-#define USE_PLATFORM_DELAY 1
-#define USE_NDELAY 0
-/*
- * MAX_ROOT_PORTS: Number of downstream ports
- *
- * The chip has two USB ports, one of which can be configured as
- * an USB device port, so the value of this constant is implementation
- * specific.
- */
-#define MAX_ROOT_PORTS 2
-#define DUMMY_DELAY_ACCESS do {} while (0)
-
-/* insert platform specific definitions for other machines here */
-#elif defined(CONFIG_BLACKFIN)
+#if defined(CONFIG_BLACKFIN)
#include <linux/io.h>
#define USE_32BIT 0
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index dbcafa29c77..bdba8c5d844 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -482,7 +482,6 @@ static int isp1760_run(struct usb_hcd *hcd)
u32 chipid;
hcd->uses_new_polling = 1;
- hcd->poll_rh = 0;
hcd->state = HC_STATE_RUNNING;
isp1760_enable_interrupts(hcd);
@@ -830,6 +829,7 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
* almost immediately. With ISP1761, this register requires a delay of
* 195ns between a write and subsequent read (see section 15.1.1.3).
*/
+ mmiowb();
ndelay(195);
skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
@@ -871,6 +871,7 @@ static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
* almost immediately. With ISP1761, this register requires a delay of
* 195ns between a write and subsequent read (see section 15.1.1.3).
*/
+ mmiowb();
ndelay(195);
skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG);
@@ -1450,7 +1451,7 @@ static int isp1760_prepare_enqueue(struct isp1760_hcd *priv, struct urb *urb,
epnum = urb->ep->desc.bEndpointAddress;
spin_lock_irqsave(&priv->lock, flags);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &priv_to_hcd(priv)->flags)) {
+ if (!HCD_HW_ACCESSIBLE(priv_to_hcd(priv))) {
rc = -ESHUTDOWN;
goto done;
}
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index ec85d0c3cc3..3b28dbfca05 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -27,7 +27,7 @@
#endif
#ifdef CONFIG_PPC_OF
-static int of_isp1760_probe(struct of_device *dev,
+static int of_isp1760_probe(struct platform_device *dev,
const struct of_device_id *match)
{
struct usb_hcd *hcd;
@@ -95,7 +95,7 @@ release_reg:
return ret;
}
-static int of_isp1760_remove(struct of_device *dev)
+static int of_isp1760_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(&dev->dev);
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 8ad2441b028..36abd2baa3e 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -645,7 +645,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
hcd->product_desc,
hcd_name);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
size -= scnprintf (next, size,
"SUSPENDED (no register access)\n");
goto done;
@@ -687,7 +687,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
next += temp;
temp = scnprintf (next, size, "hub poll timer %s\n",
- ohci_to_hcd(ohci)->poll_rh ? "ON" : "off");
+ HCD_POLL_RH(ohci_to_hcd(ohci)) ? "ON" : "off");
size -= temp;
next += temp;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index fc576557d8a..c3b4ccc7337 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -212,7 +212,7 @@ static int ohci_urb_enqueue (
spin_lock_irqsave (&ohci->lock, flags);
/* don't submit to a dead HC */
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
retval = -ENODEV;
goto fail;
}
@@ -685,7 +685,7 @@ retry:
}
/* use rhsc irqs after khubd is fully initialized */
- hcd->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
hcd->uses_new_polling = 1;
/* start controller operations */
@@ -822,7 +822,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
else if (ints & OHCI_INTR_RD) {
ohci_vdbg(ohci, "resume detect\n");
ohci_writel(ohci, OHCI_INTR_RD, &regs->intrstatus);
- hcd->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
if (ohci->autostop) {
spin_lock (&ohci->lock);
ohci_rh_resume (ohci);
@@ -1031,7 +1031,7 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_hcd_ep93xx_driver
#endif
-#ifdef CONFIG_SOC_AU1X00
+#ifdef CONFIG_MIPS_ALCHEMY
#include "ohci-au1xxx.c"
#define PLATFORM_DRIVER ohci_hcd_au1xxx_driver
#endif
@@ -1095,6 +1095,11 @@ MODULE_LICENSE ("GPL");
#define TMIO_OHCI_DRIVER ohci_hcd_tmio_driver
#endif
+#ifdef CONFIG_MACH_JZ4740
+#include "ohci-jz4740.c"
+#define PLATFORM_DRIVER ohci_hcd_jz4740_driver
+#endif
+
#if !defined(PCI_DRIVER) && \
!defined(PLATFORM_DRIVER) && \
!defined(OMAP1_PLATFORM_DRIVER) && \
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 65cac8cc892..cddcda95b57 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -284,7 +284,7 @@ static int ohci_bus_suspend (struct usb_hcd *hcd)
spin_lock_irq (&ohci->lock);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)))
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
rc = -ESHUTDOWN;
else
rc = ohci_rh_suspend (ohci, 0);
@@ -302,7 +302,7 @@ static int ohci_bus_resume (struct usb_hcd *hcd)
spin_lock_irq (&ohci->lock);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)))
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
rc = -ESHUTDOWN;
else
rc = ohci_rh_resume (ohci);
@@ -355,6 +355,11 @@ static void ohci_finish_controller_resume(struct usb_hcd *hcd)
ohci_readl(ohci, &ohci->regs->intrenable);
msleep(20);
}
+
+ /* Does the root hub have a port wakeup pending? */
+ if (ohci_readl(ohci, &ohci->regs->intrstatus) &
+ (OHCI_INTR_RD | OHCI_INTR_RHSC))
+ usb_hcd_resume_root_hub(hcd);
}
/* Carry out polling-, autostop-, and autoresume-related state changes */
@@ -364,7 +369,7 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
int poll_rh = 1;
int rhsc_enable;
- /* Some broken controllers never turn off RHCS in the interrupt
+ /* Some broken controllers never turn off RHSC in the interrupt
* status register. For their sake we won't re-enable RHSC
* interrupts if the interrupt bit is already active.
*/
@@ -489,7 +494,7 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
unsigned long flags;
spin_lock_irqsave (&ohci->lock, flags);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
+ if (!HCD_HW_ACCESSIBLE(hcd))
goto done;
/* undocumented erratum seen on at least rev D */
@@ -533,8 +538,12 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
}
}
- hcd->poll_rh = ohci_root_hub_state_changes(ohci, changed,
- any_connected, rhsc_status);
+ if (ohci_root_hub_state_changes(ohci, changed,
+ any_connected, rhsc_status))
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ else
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
done:
spin_unlock_irqrestore (&ohci->lock, flags);
@@ -701,7 +710,7 @@ static int ohci_hub_control (
u32 temp;
int retval = 0;
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)))
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
return -ESHUTDOWN;
switch (typeReq) {
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
new file mode 100644
index 00000000000..10e1872f3ab
--- /dev/null
+++ b/drivers/usb/host/ohci-jz4740.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+struct jz4740_ohci_hcd {
+ struct ohci_hcd ohci_hcd;
+
+ struct regulator *vbus;
+ bool vbus_enabled;
+ struct clk *clk;
+};
+
+static inline struct jz4740_ohci_hcd *hcd_to_jz4740_hcd(struct usb_hcd *hcd)
+{
+ return (struct jz4740_ohci_hcd *)(hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *jz4740_hcd_to_hcd(struct jz4740_ohci_hcd *jz4740_ohci)
+{
+ return container_of((void *)jz4740_ohci, struct usb_hcd, hcd_priv);
+}
+
+static int ohci_jz4740_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ ret = ohci_init(ohci);
+ if (ret < 0)
+ return ret;
+
+ ohci->num_ports = 1;
+
+ ret = ohci_run(ohci);
+ if (ret < 0) {
+ dev_err(hcd->self.controller, "Can not start %s",
+ hcd->self.bus_name);
+ ohci_stop(hcd);
+ return ret;
+ }
+ return 0;
+}
+
+static int ohci_jz4740_set_vbus_power(struct jz4740_ohci_hcd *jz4740_ohci,
+ bool enabled)
+{
+ int ret = 0;
+
+ if (!jz4740_ohci->vbus)
+ return 0;
+
+ if (enabled && !jz4740_ohci->vbus_enabled) {
+ ret = regulator_enable(jz4740_ohci->vbus);
+ if (ret)
+ dev_err(jz4740_hcd_to_hcd(jz4740_ohci)->self.controller,
+ "Could not power vbus\n");
+ } else if (!enabled && jz4740_ohci->vbus_enabled) {
+ ret = regulator_disable(jz4740_ohci->vbus);
+ }
+
+ if (ret == 0)
+ jz4740_ohci->vbus_enabled = enabled;
+
+ return ret;
+}
+
+static int ohci_jz4740_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd);
+ int ret;
+
+ switch (typeReq) {
+ case SetHubFeature:
+ if (wValue == USB_PORT_FEAT_POWER)
+ ret = ohci_jz4740_set_vbus_power(jz4740_ohci, true);
+ break;
+ case ClearHubFeature:
+ if (wValue == USB_PORT_FEAT_POWER)
+ ret = ohci_jz4740_set_vbus_power(jz4740_ohci, false);
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+}
+
+
+static const struct hc_driver ohci_jz4740_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "JZ4740 OHCI",
+ .hcd_priv_size = sizeof(struct jz4740_ohci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .start = ohci_jz4740_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_jz4740_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+
+static __devinit int jz4740_ohci_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct usb_hcd *hcd;
+ struct jz4740_ohci_hcd *jz4740_ohci;
+ struct resource *res;
+ int irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENOENT;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get platform irq\n");
+ return irq;
+ }
+
+ hcd = usb_create_hcd(&ohci_jz4740_hc_driver, &pdev->dev, "jz4740");
+ if (!hcd) {
+ dev_err(&pdev->dev, "Failed to create hcd.\n");
+ return -ENOMEM;
+ }
+
+ jz4740_ohci = hcd_to_jz4740_hcd(hcd);
+
+ res = request_mem_region(res->start, resource_size(res), hcd_name);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to request mem region.\n");
+ ret = -EBUSY;
+ goto err_free;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = ioremap(res->start, resource_size(res));
+
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "Failed to ioremap registers.\n");
+ ret = -EBUSY;
+ goto err_release_mem;
+ }
+
+ jz4740_ohci->clk = clk_get(&pdev->dev, "uhc");
+ if (IS_ERR(jz4740_ohci->clk)) {
+ ret = PTR_ERR(jz4740_ohci->clk);
+ dev_err(&pdev->dev, "Failed to get clock: %d\n", ret);
+ goto err_iounmap;
+ }
+
+ jz4740_ohci->vbus = regulator_get(&pdev->dev, "vbus");
+ if (IS_ERR(jz4740_ohci->vbus))
+ jz4740_ohci->vbus = NULL;
+
+
+ clk_set_rate(jz4740_ohci->clk, 48000000);
+ clk_enable(jz4740_ohci->clk);
+ if (jz4740_ohci->vbus)
+ ohci_jz4740_set_vbus_power(jz4740_ohci, true);
+
+ platform_set_drvdata(pdev, hcd);
+
+ ohci_hcd_init(hcd_to_ohci(hcd));
+
+ ret = usb_add_hcd(hcd, irq, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add hcd: %d\n", ret);
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ platform_set_drvdata(pdev, NULL);
+ if (jz4740_ohci->vbus) {
+ regulator_disable(jz4740_ohci->vbus);
+ regulator_put(jz4740_ohci->vbus);
+ }
+ clk_disable(jz4740_ohci->clk);
+
+ clk_put(jz4740_ohci->clk);
+err_iounmap:
+ iounmap(hcd->regs);
+err_release_mem:
+ release_mem_region(res->start, resource_size(res));
+err_free:
+ usb_put_hcd(hcd);
+
+ return ret;
+}
+
+static __devexit int jz4740_ohci_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd);
+
+ usb_remove_hcd(hcd);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (jz4740_ohci->vbus) {
+ regulator_disable(jz4740_ohci->vbus);
+ regulator_put(jz4740_ohci->vbus);
+ }
+
+ clk_disable(jz4740_ohci->clk);
+ clk_put(jz4740_ohci->clk);
+
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static struct platform_driver ohci_hcd_jz4740_driver = {
+ .probe = jz4740_ohci_probe,
+ .remove = __devexit_p(jz4740_ohci_remove),
+ .driver = {
+ .name = "jz4740-ohci",
+ .owner = THIS_MODULE,
+ },
+};
+
+MODULE_ALIAS("platfrom:jz4740-ohci");
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index b8a1148f248..6bdc8b25a6a 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -392,7 +392,7 @@ static int __devinit ohci_pci_start (struct usb_hcd *hcd)
#ifdef CONFIG_PM
-static int ohci_pci_suspend(struct usb_hcd *hcd)
+static int ohci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index cd74bbdd007..653d6a60edb 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -329,7 +329,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev)
memset(&i2c_info, 0, sizeof(struct i2c_board_info));
strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE);
isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
- normal_i2c);
+ normal_i2c, NULL);
i2c_put_adapter(i2c_adap);
if (!isp1301_i2c_client) {
err("failed to connect I2C to ISP1301 USB Transceiver");
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index df165917412..b2c2dbf0876 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -81,7 +81,7 @@ static const struct hc_driver ohci_ppc_of_hc_driver = {
static int __devinit
-ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
+ohci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
@@ -183,7 +183,7 @@ err_rmr:
return rv;
}
-static int ohci_hcd_ppc_of_remove(struct of_device *op)
+static int ohci_hcd_ppc_of_remove(struct platform_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
dev_set_drvdata(&op->dev, NULL);
@@ -201,7 +201,7 @@ static int ohci_hcd_ppc_of_remove(struct of_device *op)
return 0;
}
-static int ohci_hcd_ppc_of_shutdown(struct of_device *op)
+static int ohci_hcd_ppc_of_shutdown(struct platform_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
diff --git a/drivers/usb/host/ohci-ssb.c b/drivers/usb/host/ohci-ssb.c
index 23fd6a886bd..48ee6943bf3 100644
--- a/drivers/usb/host/ohci-ssb.c
+++ b/drivers/usb/host/ohci-ssb.c
@@ -93,8 +93,11 @@ static void ssb_ohci_detach(struct ssb_device *dev)
{
struct usb_hcd *hcd = ssb_get_drvdata(dev);
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
usb_remove_hcd(hcd);
iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
ssb_device_disable(dev, 0);
}
@@ -106,10 +109,52 @@ static int ssb_ohci_attach(struct ssb_device *dev)
int err = -ENOMEM;
u32 tmp, flags = 0;
- if (dev->id.coreid == SSB_DEV_USB11_HOSTDEV)
- flags |= SSB_OHCI_TMSLOW_HOSTMODE;
+ if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
+ dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+ return -EOPNOTSUPP;
- ssb_device_enable(dev, flags);
+ if (dev->id.coreid == SSB_DEV_USB11_HOSTDEV) {
+ /* Put the device into host-mode. */
+ flags |= SSB_OHCI_TMSLOW_HOSTMODE;
+ ssb_device_enable(dev, flags);
+ } else if (dev->id.coreid == SSB_DEV_USB20_HOST) {
+ /*
+ * USB 2.0 special considerations:
+ *
+ * In addition to the standard SSB reset sequence, the Host
+ * Control Register must be programmed to bring the USB core
+ * and various phy components out of reset.
+ */
+ ssb_device_enable(dev, 0);
+ ssb_write32(dev, 0x200, 0x7ff);
+
+ /* Change Flush control reg */
+ tmp = ssb_read32(dev, 0x400);
+ tmp &= ~8;
+ ssb_write32(dev, 0x400, tmp);
+ tmp = ssb_read32(dev, 0x400);
+
+ /* Change Shim control reg */
+ tmp = ssb_read32(dev, 0x304);
+ tmp &= ~0x100;
+ ssb_write32(dev, 0x304, tmp);
+ tmp = ssb_read32(dev, 0x304);
+
+ udelay(1);
+
+ /* Work around for 5354 failures */
+ if (dev->id.revision == 2 && dev->bus->chip_id == 0x5354) {
+ /* Change syn01 reg */
+ tmp = 0x00fe00fe;
+ ssb_write32(dev, 0x894, tmp);
+
+ /* Change syn03 reg */
+ tmp = ssb_read32(dev, 0x89c);
+ tmp |= 0x1;
+ ssb_write32(dev, 0x89c, tmp);
+ }
+ } else
+ ssb_device_enable(dev, 0);
hcd = usb_create_hcd(&ssb_ohci_hc_driver, dev->dev,
dev_name(dev->dev));
@@ -200,6 +245,7 @@ static int ssb_ohci_resume(struct ssb_device *dev)
static const struct ssb_device_id ssb_ohci_table[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOSTDEV, SSB_ANY_REV),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOST, SSB_ANY_REV),
+ SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB20_HOST, SSB_ANY_REV),
SSB_DEVTABLE_END
};
MODULE_DEVICE_TABLE(ssb, ssb_ohci_table);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index f608dfd09a8..d9c85a29273 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -1641,8 +1641,7 @@ static int submit_async(struct oxu_hcd *oxu, struct urb *urb,
#endif
spin_lock_irqsave(&oxu->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &oxu_to_hcd(oxu)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
rc = -ESHUTDOWN;
goto done;
}
@@ -2209,8 +2208,7 @@ static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
spin_lock_irqsave(&oxu->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &oxu_to_hcd(oxu)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
status = -ESHUTDOWN;
goto done;
}
@@ -2715,7 +2713,6 @@ static int oxu_run(struct usb_hcd *hcd)
u32 temp, hcc_params;
hcd->uses_new_polling = 1;
- hcd->poll_rh = 0;
/* EHCI spec section 4.1 */
retval = ehci_reset(oxu);
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index bcf9f0e809d..990f06b89ea 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -813,8 +813,11 @@ static int sl811h_urb_enqueue(
#endif
/* avoid all allocations within spinlocks */
- if (!hep->hcpriv)
+ if (!hep->hcpriv) {
ep = kzalloc(sizeof *ep, mem_flags);
+ if (ep == NULL)
+ return -ENOMEM;
+ }
spin_lock_irqsave(&sl811->lock, flags);
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 58cb73c8420..0e13a00eb2e 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -20,7 +20,6 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -43,8 +42,6 @@ MODULE_LICENSE("GPL");
/* VARIABLES */
/*====================================================================*/
-static const char driver_name[DEV_NAME_LEN] = "sl811_cs";
-
typedef struct local_info_t {
struct pcmcia_device *p_dev;
} local_info_t;
@@ -165,16 +162,16 @@ static int sl811_cs_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
- return pcmcia_request_io(p_dev, &p_dev->io);
+ return pcmcia_request_io(p_dev);
}
pcmcia_disable_device(p_dev);
return -ENODEV;
@@ -192,7 +189,7 @@ static int sl811_cs_config(struct pcmcia_device *link)
goto failed;
/* require an IRQ and two registers */
- if (!link->io.NumPorts1 || link->io.NumPorts1 < 2)
+ if (resource_size(link->resource[0]) < 2)
goto failed;
if (!link->irq)
@@ -207,11 +204,10 @@ static int sl811_cs_config(struct pcmcia_device *link)
if (link->conf.Vpp)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
printk(", irq %d", link->irq);
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1+link->io.NumPorts1-1);
+ printk(", io %pR", link->resource[0]);
printk("\n");
- if (sl811_hc_init(parent, link->io.BasePort1, link->irq)
+ if (sl811_hc_init(parent, link->resource[0]->start, link->irq)
< 0) {
failed:
printk(KERN_WARNING "sl811_cs_config failed\n");
@@ -246,7 +242,7 @@ MODULE_DEVICE_TABLE(pcmcia, sl811_ids);
static struct pcmcia_driver sl811_cs_driver = {
.owner = THIS_MODULE,
.drv = {
- .name = (char *)driver_name,
+ .name = "sl811_cs",
},
.probe = sl811_cs_probe,
.remove = sl811_cs_detach,
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
index 98cf0b26b96..6e7fb5f38db 100644
--- a/drivers/usb/host/uhci-debug.c
+++ b/drivers/usb/host/uhci-debug.c
@@ -17,7 +17,6 @@
#include "uhci-hcd.h"
-#define uhci_debug_operations (* (const struct file_operations *) NULL)
static struct dentry *uhci_debugfs_root;
#ifdef DEBUG
@@ -495,18 +494,16 @@ static int uhci_debug_open(struct inode *inode, struct file *file)
{
struct uhci_hcd *uhci = inode->i_private;
struct uhci_debug *up;
- int ret = -ENOMEM;
unsigned long flags;
- lock_kernel();
up = kmalloc(sizeof(*up), GFP_KERNEL);
if (!up)
- goto out;
+ return -ENOMEM;
up->data = kmalloc(MAX_OUTPUT, GFP_KERNEL);
if (!up->data) {
kfree(up);
- goto out;
+ return -ENOMEM;
}
up->size = 0;
@@ -517,10 +514,7 @@ static int uhci_debug_open(struct inode *inode, struct file *file)
file->private_data = up;
- ret = 0;
-out:
- unlock_kernel();
- return ret;
+ return 0;
}
static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence)
@@ -528,9 +522,9 @@ static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence)
struct uhci_debug *up;
loff_t new = -1;
- lock_kernel();
up = file->private_data;
+ /* XXX: atomic 64bit seek access, but that needs to be fixed in the VFS */
switch (whence) {
case 0:
new = off;
@@ -539,11 +533,10 @@ static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence)
new = file->f_pos + off;
break;
}
- if (new < 0 || new > up->size) {
- unlock_kernel();
+
+ if (new < 0 || new > up->size)
return -EINVAL;
- }
- unlock_kernel();
+
return (file->f_pos = new);
}
@@ -564,7 +557,6 @@ static int uhci_debug_release(struct inode *inode, struct file *file)
return 0;
}
-#undef uhci_debug_operations
static const struct file_operations uhci_debug_operations = {
.owner = THIS_MODULE,
.open = uhci_debug_open,
@@ -572,6 +564,7 @@ static const struct file_operations uhci_debug_operations = {
.read = uhci_debug_read,
.release = uhci_debug_release,
};
+#define UHCI_DEBUG_OPS
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 6637e52736d..f52d04db28f 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -140,7 +140,7 @@ static void finish_reset(struct uhci_hcd *uhci)
uhci->rh_state = UHCI_RH_RESET;
uhci->is_stopped = UHCI_IS_STOPPED;
uhci_to_hcd(uhci)->state = HC_STATE_HALT;
- uhci_to_hcd(uhci)->poll_rh = 0;
+ clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
uhci->dead = 0; /* Full reset resurrects the controller */
}
@@ -176,6 +176,8 @@ static void check_and_reset_hc(struct uhci_hcd *uhci)
*/
static void configure_hc(struct uhci_hcd *uhci)
{
+ struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci));
+
/* Set the frame length to the default: 1 ms exactly */
outb(USBSOF_DEFAULT, uhci->io_addr + USBSOF);
@@ -191,8 +193,11 @@ static void configure_hc(struct uhci_hcd *uhci)
mb();
/* Enable PIRQ */
- pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
- USBLEGSUP_DEFAULT);
+ pci_write_config_word(pdev, USBLEGSUP, USBLEGSUP_DEFAULT);
+
+ /* Disable platform-specific non-PME# wakeup */
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ pci_write_config_byte(pdev, USBRES_INTEL, 0);
}
@@ -344,7 +349,10 @@ __acquires(uhci->lock)
/* If interrupts don't work and remote wakeup is enabled then
* the suspended root hub needs to be polled.
*/
- uhci_to_hcd(uhci)->poll_rh = (!int_enable && wakeup_enable);
+ if (!int_enable && wakeup_enable)
+ set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
+ else
+ clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
uhci_scan_schedule(uhci);
uhci_fsbr_off(uhci);
@@ -363,7 +371,7 @@ static void start_rh(struct uhci_hcd *uhci)
uhci->io_addr + USBINTR);
mb();
uhci->rh_state = UHCI_RH_RUNNING;
- uhci_to_hcd(uhci)->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
}
static void wakeup_rh(struct uhci_hcd *uhci)
@@ -589,7 +597,7 @@ static int uhci_start(struct usb_hcd *hcd)
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
int retval = -EBUSY;
int i;
- struct dentry *dentry;
+ struct dentry __maybe_unused *dentry;
hcd->uses_new_polling = 1;
@@ -599,18 +607,16 @@ static int uhci_start(struct usb_hcd *hcd)
INIT_LIST_HEAD(&uhci->idle_qh_list);
init_waitqueue_head(&uhci->waitqh);
- if (DEBUG_CONFIGURED) {
- dentry = debugfs_create_file(hcd->self.bus_name,
- S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root,
- uhci, &uhci_debug_operations);
- if (!dentry) {
- dev_err(uhci_dev(uhci), "couldn't create uhci "
- "debugfs entry\n");
- retval = -ENOMEM;
- goto err_create_debug_entry;
- }
- uhci->dentry = dentry;
+#ifdef UHCI_DEBUG_OPS
+ dentry = debugfs_create_file(hcd->self.bus_name,
+ S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root,
+ uhci, &uhci_debug_operations);
+ if (!dentry) {
+ dev_err(uhci_dev(uhci), "couldn't create uhci debugfs entry\n");
+ return -ENOMEM;
}
+ uhci->dentry = dentry;
+#endif
uhci->frame = dma_alloc_coherent(uhci_dev(uhci),
UHCI_NUMFRAMES * sizeof(*uhci->frame),
@@ -691,7 +697,9 @@ static int uhci_start(struct usb_hcd *hcd)
configure_hc(uhci);
uhci->is_initialized = 1;
+ spin_lock_irq(&uhci->lock);
start_rh(uhci);
+ spin_unlock_irq(&uhci->lock);
return 0;
/*
@@ -722,7 +730,6 @@ err_alloc_frame_cpu:
err_alloc_frame:
debugfs_remove(uhci->dentry);
-err_create_debug_entry:
return retval;
}
@@ -731,7 +738,7 @@ static void uhci_stop(struct usb_hcd *hcd)
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
spin_lock_irq(&uhci->lock);
- if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) && !uhci->dead)
+ if (HCD_HW_ACCESSIBLE(hcd) && !uhci->dead)
uhci_hc_died(uhci);
uhci_scan_schedule(uhci);
spin_unlock_irq(&uhci->lock);
@@ -748,7 +755,7 @@ static int uhci_rh_suspend(struct usb_hcd *hcd)
int rc = 0;
spin_lock_irq(&uhci->lock);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
+ if (!HCD_HW_ACCESSIBLE(hcd))
rc = -ESHUTDOWN;
else if (uhci->dead)
; /* Dead controllers tell no tales */
@@ -775,7 +782,7 @@ static int uhci_rh_resume(struct usb_hcd *hcd)
int rc = 0;
spin_lock_irq(&uhci->lock);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
+ if (!HCD_HW_ACCESSIBLE(hcd))
rc = -ESHUTDOWN;
else if (!uhci->dead)
wakeup_rh(uhci);
@@ -783,15 +790,16 @@ static int uhci_rh_resume(struct usb_hcd *hcd)
return rc;
}
-static int uhci_pci_suspend(struct usb_hcd *hcd)
+static int uhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci));
int rc = 0;
dev_dbg(uhci_dev(uhci), "%s\n", __func__);
spin_lock_irq(&uhci->lock);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) || uhci->dead)
+ if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
goto done_okay; /* Already suspended or dead */
if (uhci->rh_state > UHCI_RH_SUSPENDED) {
@@ -803,11 +811,15 @@ static int uhci_pci_suspend(struct usb_hcd *hcd)
/* All PCI host controllers are required to disable IRQ generation
* at the source, so we must turn off PIRQ.
*/
- pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0);
- mb();
- hcd->poll_rh = 0;
-
- /* FIXME: Enable non-PME# remote wakeup? */
+ pci_write_config_word(pdev, USBLEGSUP, 0);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+ /* Enable platform-specific non-PME# wakeup */
+ if (do_wakeup) {
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ pci_write_config_byte(pdev, USBRES_INTEL,
+ USBPORT1EN | USBPORT2EN);
+ }
done_okay:
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
@@ -826,7 +838,6 @@ static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
* even if the controller was dead.
*/
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- mb();
spin_lock_irq(&uhci->lock);
@@ -834,8 +845,6 @@ static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
if (hibernated)
uhci_hc_died(uhci);
- /* FIXME: Disable non-PME# remote wakeup? */
-
/* The firmware or a boot kernel may have changed the controller
* settings during a system wakeup. Check it and reconfigure
* to avoid problems.
@@ -845,22 +854,20 @@ static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
/* If the controller was dead before, it's back alive now */
configure_hc(uhci);
- if (uhci->rh_state == UHCI_RH_RESET) {
-
- /* The controller had to be reset */
+ /* Tell the core if the controller had to be reset */
+ if (uhci->rh_state == UHCI_RH_RESET)
usb_root_hub_lost_power(hcd->self.root_hub);
- suspend_rh(uhci, UHCI_RH_SUSPENDED);
- }
spin_unlock_irq(&uhci->lock);
/* If interrupts don't work and remote wakeup is enabled then
* the suspended root hub needs to be polled.
*/
- if (!uhci->RD_enable && hcd->self.root_hub->do_remote_wakeup) {
- hcd->poll_rh = 1;
- usb_hcd_poll_rh_status(hcd);
- }
+ if (!uhci->RD_enable && hcd->self.root_hub->do_remote_wakeup)
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+ /* Does the root hub have a port wakeup pending? */
+ usb_hcd_poll_rh_status(hcd);
return 0;
}
#endif
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 26bd1b2bcbf..49bf2790f9c 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -67,12 +67,17 @@
#define USBPORTSC_RES3 0x4000 /* reserved, write zeroes */
#define USBPORTSC_RES4 0x8000 /* reserved, write zeroes */
-/* Legacy support register */
+/* PCI legacy support register */
#define USBLEGSUP 0xc0
#define USBLEGSUP_DEFAULT 0x2000 /* only PIRQ enable set */
#define USBLEGSUP_RWC 0x8f00 /* the R/WC bits */
#define USBLEGSUP_RO 0x5040 /* R/O and reserved bits */
+/* PCI Intel-specific resume-enable register */
+#define USBRES_INTEL 0xc4
+#define USBPORT1EN 0x01
+#define USBPORT2EN 0x02
+
#define UHCI_PTR_BITS cpu_to_le32(0x000F)
#define UHCI_PTR_TERM cpu_to_le32(0x0001)
#define UHCI_PTR_QH cpu_to_le32(0x0002)
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 8270055848c..6d59c0f77f2 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -190,7 +190,7 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
spin_lock_irqsave(&uhci->lock, flags);
uhci_scan_schedule(uhci);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) || uhci->dead)
+ if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
goto done;
uhci_check_ports(uhci);
@@ -200,7 +200,7 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
case UHCI_RH_SUSPENDING:
case UHCI_RH_SUSPENDED:
/* if port change, ask to be resumed */
- if (status)
+ if (status || uhci->resuming_ports)
usb_hcd_resume_root_hub(hcd);
break;
@@ -246,7 +246,7 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wPortChange, wPortStatus;
unsigned long flags;
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) || uhci->dead)
+ if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
return -ETIMEDOUT;
spin_lock_irqsave(&uhci->lock, flags);
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index acd582c0280..d3ade401848 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -565,7 +565,7 @@ static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
qh->unlink_frame = uhci->frame_number;
/* Force an interrupt so we know when the QH is fully unlinked */
- if (list_empty(&uhci->skel_unlink_qh->node))
+ if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
uhci_set_next_interrupt(uhci);
/* Move the QH from its old list to the end of the unlinking list */
@@ -1667,7 +1667,7 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
qh->advance_jiffies = jiffies;
goto done;
}
- ret = 0;
+ ret = uhci->is_stopped;
}
/* The queue hasn't advanced; check for timeout */
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index e0d3401285c..72b6892fda6 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -68,7 +68,7 @@ static int whc_start(struct usb_hcd *usb_hcd)
whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN);
usb_hcd->uses_new_polling = 1;
- usb_hcd->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
usb_hcd->state = HC_STATE_RUNNING;
out:
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index ab5a14fbfee..dc0ab8382f5 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -475,7 +475,7 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
|| (prev_end & (WHCI_PAGE_SIZE-1))
|| (dma_addr & (WHCI_PAGE_SIZE-1))
|| std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
- if (std->len % qset->max_packet != 0)
+ if (std && std->len % qset->max_packet != 0)
return -EINVAL;
std = qset_new_std(whc, qset, urb, mem_flags);
if (std == NULL) {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 2eb658d2639..4e51343ddff 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -391,49 +391,6 @@ struct xhci_ring *xhci_stream_id_to_ring(
return ep->stream_info->stream_rings[stream_id];
}
-struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- unsigned int stream_id)
-{
- struct xhci_virt_ep *ep;
-
- ep = &xhci->devs[slot_id]->eps[ep_index];
- /* Common case: no streams */
- if (!(ep->ep_state & EP_HAS_STREAMS))
- return ep->ring;
-
- if (stream_id == 0) {
- xhci_warn(xhci,
- "WARN: Slot ID %u, ep index %u has streams, "
- "but URB has no stream ID.\n",
- slot_id, ep_index);
- return NULL;
- }
-
- if (stream_id < ep->stream_info->num_streams)
- return ep->stream_info->stream_rings[stream_id];
-
- xhci_warn(xhci,
- "WARN: Slot ID %u, ep index %u has "
- "stream IDs 1 to %u allocated, "
- "but stream ID %u is requested.\n",
- slot_id, ep_index,
- ep->stream_info->num_streams - 1,
- stream_id);
- return NULL;
-}
-
-/* Get the right ring for the given URB.
- * If the endpoint supports streams, boundary check the URB's stream ID.
- * If the endpoint doesn't support streams, return the singular endpoint ring.
- */
-struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
- struct urb *urb)
-{
- return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
- xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
-}
-
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
static int xhci_test_radix_tree(struct xhci_hcd *xhci,
unsigned int num_streams,
@@ -1112,8 +1069,18 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
/* Set up the endpoint ring */
- virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 1, true, mem_flags);
+ /*
+ * Isochronous endpoint ring needs bigger size because one isoc URB
+ * carries multiple packets and it will insert multiple tds to the
+ * ring.
+ * This should be replaced with dynamic ring resizing in the future.
+ */
+ if (usb_endpoint_xfer_isoc(&ep->desc))
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 8, true, mem_flags);
+ else
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 1, true, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
@@ -1124,6 +1091,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
virt_dev->num_rings_cached--;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
}
+ virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
@@ -1389,6 +1357,22 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
return command;
}
+void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
+{
+ int last;
+
+ if (!urb_priv)
+ return;
+
+ last = urb_priv->length - 1;
+ if (last >= 0) {
+ int i;
+ for (i = 0; i <= last; i++)
+ kfree(urb_priv->td[i]);
+ }
+ kfree(urb_priv);
+}
+
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command)
{
@@ -1588,7 +1572,7 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
unsigned int num_tests;
int i, ret;
- num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]);
+ num_tests = ARRAY_SIZE(simple_test_vector);
for (i = 0; i < num_tests; i++) {
ret = xhci_test_trb_in_td(xhci,
xhci->event_ring->first_seg,
@@ -1601,7 +1585,7 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
return ret;
}
- num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]);
+ num_tests = ARRAY_SIZE(complex_test_vector);
for (i = 0; i < num_tests; i++) {
ret = xhci_test_trb_in_td(xhci,
complex_test_vector[i].input_seg,
@@ -1617,6 +1601,29 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
return 0;
}
+static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
+{
+ u64 temp;
+ dma_addr_t deq;
+
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue);
+ if (deq == 0 && !in_interrupt())
+ xhci_warn(xhci, "WARN something wrong with SW event ring "
+ "dequeue ptr.\n");
+ /* Update HC event ring dequeue pointer */
+ temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp &= ERST_PTR_MASK;
+ /* Don't clear the EHB bit (which is RW1C) because
+ * there might be more events to service.
+ */
+ temp &= ~ERST_EHB;
+ xhci_dbg(xhci, "// Write event ring dequeue pointer, "
+ "preserving EHB bit\n");
+ xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
+ &xhci->ir_set->erst_dequeue);
+}
+
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 11482b6b938..f7efe025bed 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -53,6 +53,7 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval;
+ u32 temp;
hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
@@ -93,6 +94,14 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
return retval;
xhci_dbg(xhci, "Reset complete\n");
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ if (HCC_64BIT_ADDR(temp)) {
+ xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
+ dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
+ } else {
+ dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
+ }
+
xhci_dbg(xhci, "Calling HCD init\n");
/* Initialize HCD and host controller data structures. */
retval = xhci_init(hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bfc99a93945..48e60d166ff 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -131,7 +131,7 @@ static void next_trb(struct xhci_hcd *xhci,
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
- *trb = (*trb)++;
+ (*trb)++;
}
}
@@ -301,28 +301,6 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
return 1;
}
-void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
-{
- u64 temp;
- dma_addr_t deq;
-
- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue);
- if (deq == 0 && !in_interrupt())
- xhci_warn(xhci, "WARN something wrong with SW event ring "
- "dequeue ptr.\n");
- /* Update HC event ring dequeue pointer */
- temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- temp &= ERST_PTR_MASK;
- /* Don't clear the EHB bit (which is RW1C) because
- * there might be more events to service.
- */
- temp &= ~ERST_EHB;
- xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n");
- xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
- &xhci->ir_set->erst_dequeue);
-}
-
/* Ring the host controller doorbell after placing a command on the ring */
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
{
@@ -359,11 +337,6 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
field = xhci_readl(xhci, db_addr) & DB_MASK;
field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
xhci_writel(xhci, field, db_addr);
- /* Flush PCI posted writes - FIXME Matthew Wilcox says this
- * isn't time-critical and we shouldn't make the CPU wait for
- * the flush.
- */
- xhci_readl(xhci, db_addr);
}
}
@@ -419,6 +392,50 @@ static struct xhci_segment *find_trb_seg(
return cur_seg;
}
+
+static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id)
+{
+ struct xhci_virt_ep *ep;
+
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ /* Common case: no streams */
+ if (!(ep->ep_state & EP_HAS_STREAMS))
+ return ep->ring;
+
+ if (stream_id == 0) {
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has streams, "
+ "but URB has no stream ID.\n",
+ slot_id, ep_index);
+ return NULL;
+ }
+
+ if (stream_id < ep->stream_info->num_streams)
+ return ep->stream_info->stream_rings[stream_id];
+
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has "
+ "stream IDs 1 to %u allocated, "
+ "but stream ID %u is requested.\n",
+ slot_id, ep_index,
+ ep->stream_info->num_streams - 1,
+ stream_id);
+ return NULL;
+}
+
+/* Get the right ring for the given URB.
+ * If the endpoint supports streams, boundary check the URB's stream ID.
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
+ */
+static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
+ struct urb *urb)
+{
+ return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
+ xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
+}
+
/*
* Move the xHC's endpoint ring dequeue pointer past cur_td.
* Record the new state of the xHC's endpoint ring dequeue segment,
@@ -578,16 +595,24 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
struct xhci_td *cur_td, int status, char *adjective)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct urb *urb;
+ struct urb_priv *urb_priv;
- cur_td->urb->hcpriv = NULL;
- usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb);
- xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb);
+ urb = cur_td->urb;
+ urb_priv = urb->hcpriv;
+ urb_priv->td_cnt++;
- spin_unlock(&xhci->lock);
- usb_hcd_giveback_urb(hcd, cur_td->urb, status);
- kfree(cur_td);
- spin_lock(&xhci->lock);
- xhci_dbg(xhci, "%s URB given back\n", adjective);
+ /* Only giveback urb when this is the last td in urb */
+ if (urb_priv->td_cnt == urb_priv->length) {
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
+
+ spin_unlock(&xhci->lock);
+ usb_hcd_giveback_urb(hcd, urb, status);
+ xhci_urb_free_priv(xhci, urb_priv);
+ spin_lock(&xhci->lock);
+ xhci_dbg(xhci, "%s URB given back\n", adjective);
+ }
}
/*
@@ -1132,7 +1157,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
/* Update event ring dequeue pointer before dropping the lock */
inc_deq(xhci, xhci->event_ring, true);
- xhci_set_hc_event_deq(xhci);
spin_unlock(&xhci->lock);
/* Pass this up to the core */
@@ -1258,6 +1282,425 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
}
/*
+ * Finish the td processing, remove the td from td list;
+ * Return 1 if the urb can be given back.
+ */
+static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status, bool skip)
+{
+ struct xhci_virt_device *xdev;
+ struct xhci_ring *ep_ring;
+ unsigned int slot_id;
+ int ep_index;
+ struct urb *urb = NULL;
+ struct xhci_ep_ctx *ep_ctx;
+ int ret = 0;
+ struct urb_priv *urb_priv;
+ u32 trb_comp_code;
+
+ slot_id = TRB_TO_SLOT_ID(event->flags);
+ xdev = xhci->devs[slot_id];
+ ep_index = TRB_TO_EP_ID(event->flags) - 1;
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+ trb_comp_code = GET_COMP_CODE(event->transfer_len);
+
+ if (skip)
+ goto td_cleanup;
+
+ if (trb_comp_code == COMP_STOP_INVAL ||
+ trb_comp_code == COMP_STOP) {
+ /* The Endpoint Stop Command completion will take care of any
+ * stopped TDs. A stopped TD may be restarted, so don't update
+ * the ring dequeue pointer or take this TD off any lists yet.
+ */
+ ep->stopped_td = td;
+ ep->stopped_trb = event_trb;
+ return 0;
+ } else {
+ if (trb_comp_code == COMP_STALL) {
+ /* The transfer is completed from the driver's
+ * perspective, but we need to issue a set dequeue
+ * command for this stalled endpoint to move the dequeue
+ * pointer past the TD. We can't do that here because
+ * the halt condition must be cleared first. Let the
+ * USB class driver clear the stall later.
+ */
+ ep->stopped_td = td;
+ ep->stopped_trb = event_trb;
+ ep->stopped_stream = ep_ring->stream_id;
+ } else if (xhci_requires_manual_halt_cleanup(xhci,
+ ep_ctx, trb_comp_code)) {
+ /* Other types of errors halt the endpoint, but the
+ * class driver doesn't call usb_reset_endpoint() unless
+ * the error is -EPIPE. Clear the halted status in the
+ * xHCI hardware manually.
+ */
+ xhci_cleanup_halted_endpoint(xhci,
+ slot_id, ep_index, ep_ring->stream_id,
+ td, event_trb);
+ } else {
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ inc_deq(xhci, ep_ring, false);
+ inc_deq(xhci, ep_ring, false);
+ }
+
+td_cleanup:
+ /* Clean up the endpoint's TD list */
+ urb = td->urb;
+ urb_priv = urb->hcpriv;
+
+ /* Do one last check of the actual transfer length.
+ * If the host controller said we transferred more data than
+ * the buffer length, urb->actual_length will be a very big
+ * number (since it's unsigned). Play it safe and say we didn't
+ * transfer anything.
+ */
+ if (urb->actual_length > urb->transfer_buffer_length) {
+ xhci_warn(xhci, "URB transfer length is wrong, "
+ "xHC issue? req. len = %u, "
+ "act. len = %u\n",
+ urb->transfer_buffer_length,
+ urb->actual_length);
+ urb->actual_length = 0;
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ }
+ list_del(&td->td_list);
+ /* Was this TD slated to be cancelled but completed anyway? */
+ if (!list_empty(&td->cancelled_td_list))
+ list_del(&td->cancelled_td_list);
+
+ urb_priv->td_cnt++;
+ /* Giveback the urb when all the tds are completed */
+ if (urb_priv->td_cnt == urb_priv->length)
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/*
+ * Process control tds, update urb status and actual_length.
+ */
+static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status)
+{
+ struct xhci_virt_device *xdev;
+ struct xhci_ring *ep_ring;
+ unsigned int slot_id;
+ int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 trb_comp_code;
+
+ slot_id = TRB_TO_SLOT_ID(event->flags);
+ xdev = xhci->devs[slot_id];
+ ep_index = TRB_TO_EP_ID(event->flags) - 1;
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+ trb_comp_code = GET_COMP_CODE(event->transfer_len);
+
+ xhci_debug_trb(xhci, xhci->event_ring->dequeue);
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ if (event_trb == ep_ring->dequeue) {
+ xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
+ "without IOC set??\n");
+ *status = -ESHUTDOWN;
+ } else if (event_trb != td->last_trb) {
+ xhci_warn(xhci, "WARN: Success on ctrl data TRB "
+ "without IOC set??\n");
+ *status = -ESHUTDOWN;
+ } else {
+ xhci_dbg(xhci, "Successful control transfer!\n");
+ *status = 0;
+ }
+ break;
+ case COMP_SHORT_TX:
+ xhci_warn(xhci, "WARN: short transfer on control ep\n");
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ break;
+ default:
+ if (!xhci_requires_manual_halt_cleanup(xhci,
+ ep_ctx, trb_comp_code))
+ break;
+ xhci_dbg(xhci, "TRB error code %u, "
+ "halted endpoint index = %u\n",
+ trb_comp_code, ep_index);
+ /* else fall through */
+ case COMP_STALL:
+ /* Did we transfer part of the data (middle) phase? */
+ if (event_trb != ep_ring->dequeue &&
+ event_trb != td->last_trb)
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length
+ - TRB_LEN(event->transfer_len);
+ else
+ td->urb->actual_length = 0;
+
+ xhci_cleanup_halted_endpoint(xhci,
+ slot_id, ep_index, 0, td, event_trb);
+ return finish_td(xhci, td, event_trb, event, ep, status, true);
+ }
+ /*
+ * Did we transfer any data, despite the errors that might have
+ * happened? I.e. did we get past the setup stage?
+ */
+ if (event_trb != ep_ring->dequeue) {
+ /* The event was for the status stage */
+ if (event_trb == td->last_trb) {
+ if (td->urb->actual_length != 0) {
+ /* Don't overwrite a previously set error code
+ */
+ if ((*status == -EINPROGRESS || *status == 0) &&
+ (td->urb->transfer_flags
+ & URB_SHORT_NOT_OK))
+ /* Did we already see a short data
+ * stage? */
+ *status = -EREMOTEIO;
+ } else {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length;
+ }
+ } else {
+ /* Maybe the event was for the data stage? */
+ if (trb_comp_code != COMP_STOP_INVAL) {
+ /* We didn't stop on a link TRB in the middle */
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+ TRB_LEN(event->transfer_len);
+ xhci_dbg(xhci, "Waiting for status "
+ "stage event\n");
+ return 0;
+ }
+ }
+ }
+
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
+}
+
+/*
+ * Process isochronous tds, update urb packet status and actual_length.
+ */
+static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status)
+{
+ struct xhci_ring *ep_ring;
+ struct urb_priv *urb_priv;
+ int idx;
+ int len = 0;
+ int skip_td = 0;
+ union xhci_trb *cur_trb;
+ struct xhci_segment *cur_seg;
+ u32 trb_comp_code;
+
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+ trb_comp_code = GET_COMP_CODE(event->transfer_len);
+ urb_priv = td->urb->hcpriv;
+ idx = urb_priv->td_cnt;
+
+ if (ep->skip) {
+ /* The transfer is partly done */
+ *status = -EXDEV;
+ td->urb->iso_frame_desc[idx].status = -EXDEV;
+ } else {
+ /* handle completion code */
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ td->urb->iso_frame_desc[idx].status = 0;
+ xhci_dbg(xhci, "Successful isoc transfer!\n");
+ break;
+ case COMP_SHORT_TX:
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ td->urb->iso_frame_desc[idx].status =
+ -EREMOTEIO;
+ else
+ td->urb->iso_frame_desc[idx].status = 0;
+ break;
+ case COMP_BW_OVER:
+ td->urb->iso_frame_desc[idx].status = -ECOMM;
+ skip_td = 1;
+ break;
+ case COMP_BUFF_OVER:
+ case COMP_BABBLE:
+ td->urb->iso_frame_desc[idx].status = -EOVERFLOW;
+ skip_td = 1;
+ break;
+ case COMP_STALL:
+ td->urb->iso_frame_desc[idx].status = -EPROTO;
+ skip_td = 1;
+ break;
+ case COMP_STOP:
+ case COMP_STOP_INVAL:
+ break;
+ default:
+ td->urb->iso_frame_desc[idx].status = -1;
+ break;
+ }
+ }
+
+ /* calc actual length */
+ if (ep->skip) {
+ td->urb->iso_frame_desc[idx].actual_length = 0;
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ inc_deq(xhci, ep_ring, false);
+ inc_deq(xhci, ep_ring, false);
+ return finish_td(xhci, td, event_trb, event, ep, status, true);
+ }
+
+ if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
+ td->urb->iso_frame_desc[idx].actual_length =
+ td->urb->iso_frame_desc[idx].length;
+ td->urb->actual_length +=
+ td->urb->iso_frame_desc[idx].length;
+ } else {
+ for (cur_trb = ep_ring->dequeue,
+ cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
+ next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+ if ((cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
+ (cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
+ len +=
+ TRB_LEN(cur_trb->generic.field[2]);
+ }
+ len += TRB_LEN(cur_trb->generic.field[2]) -
+ TRB_LEN(event->transfer_len);
+
+ if (trb_comp_code != COMP_STOP_INVAL) {
+ td->urb->iso_frame_desc[idx].actual_length = len;
+ td->urb->actual_length += len;
+ }
+ }
+
+ if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
+ *status = 0;
+
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
+}
+
+/*
+ * Process bulk and interrupt tds, update urb status and actual_length.
+ */
+static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status)
+{
+ struct xhci_ring *ep_ring;
+ union xhci_trb *cur_trb;
+ struct xhci_segment *cur_seg;
+ u32 trb_comp_code;
+
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+ trb_comp_code = GET_COMP_CODE(event->transfer_len);
+
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ /* Double check that the HW transferred everything. */
+ if (event_trb != td->last_trb) {
+ xhci_warn(xhci, "WARN Successful completion "
+ "on short TX\n");
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ } else {
+ if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
+ xhci_dbg(xhci, "Successful bulk "
+ "transfer!\n");
+ else
+ xhci_dbg(xhci, "Successful interrupt "
+ "transfer!\n");
+ *status = 0;
+ }
+ break;
+ case COMP_SHORT_TX:
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ break;
+ default:
+ /* Others already handled above */
+ break;
+ }
+ dev_dbg(&td->urb->dev->dev,
+ "ep %#x - asked for %d bytes, "
+ "%d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ td->urb->transfer_buffer_length,
+ TRB_LEN(event->transfer_len));
+ /* Fast path - was this the last TRB in the TD for this URB? */
+ if (event_trb == td->last_trb) {
+ if (TRB_LEN(event->transfer_len) != 0) {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+ TRB_LEN(event->transfer_len);
+ if (td->urb->transfer_buffer_length <
+ td->urb->actual_length) {
+ xhci_warn(xhci, "HC gave bad length "
+ "of %d bytes left\n",
+ TRB_LEN(event->transfer_len));
+ td->urb->actual_length = 0;
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ }
+ /* Don't overwrite a previously set error code */
+ if (*status == -EINPROGRESS) {
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ }
+ } else {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length;
+ /* Ignore a short packet completion if the
+ * untransferred length was zero.
+ */
+ if (*status == -EREMOTEIO)
+ *status = 0;
+ }
+ } else {
+ /* Slow path - walk the list, starting from the dequeue
+ * pointer, to get the actual length transferred.
+ */
+ td->urb->actual_length = 0;
+ for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
+ cur_trb != event_trb;
+ next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+ if ((cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
+ (cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
+ td->urb->actual_length +=
+ TRB_LEN(cur_trb->generic.field[2]);
+ }
+ /* If the ring didn't stop on a Link or No-op TRB, add
+ * in the actual bytes transferred from the Normal TRB
+ */
+ if (trb_comp_code != COMP_STOP_INVAL)
+ td->urb->actual_length +=
+ TRB_LEN(cur_trb->generic.field[2]) -
+ TRB_LEN(event->transfer_len);
+ }
+
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
+}
+
+/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
* At this point, the host controller is probably hosed and should be reset.
@@ -1276,10 +1719,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
union xhci_trb *event_trb;
struct urb *urb = NULL;
int status = -EINPROGRESS;
+ struct urb_priv *urb_priv;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
+ int ret = 0;
- xhci_dbg(xhci, "In %s\n", __func__);
slot_id = TRB_TO_SLOT_ID(event->flags);
xdev = xhci->devs[slot_id];
if (!xdev) {
@@ -1293,51 +1737,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep = &xdev->eps[ep_index];
ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
- if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
+ if (!ep_ring ||
+ (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
"or incorrect stream ring\n");
return -ENODEV;
}
event_dma = event->buffer;
- /* This TRB should be in the TD at the head of this ring's TD list */
- xhci_dbg(xhci, "%s - checking for list empty\n", __func__);
- if (list_empty(&ep_ring->td_list)) {
- xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
- TRB_TO_SLOT_ID(event->flags), ep_index);
- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
- (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
- xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
- urb = NULL;
- goto cleanup;
- }
- xhci_dbg(xhci, "%s - getting list entry\n", __func__);
- td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
-
- /* Is this a TRB in the currently executing TD? */
- xhci_dbg(xhci, "%s - looking for TD\n", __func__);
- event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
- td->last_trb, event_dma);
- xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg);
- if (!event_seg) {
- /* HC is busted, give up! */
- xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
- return -ESHUTDOWN;
- }
- event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
- (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
- xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
- lower_32_bits(event->buffer));
- xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
- upper_32_bits(event->buffer));
- xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
- (unsigned int) event->transfer_len);
- xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
- (unsigned int) event->flags);
-
- /* Look for common error cases */
trb_comp_code = GET_COMP_CODE(event->transfer_len);
+ /* Look for common error cases */
switch (trb_comp_code) {
/* Skip codes that require special handling depending on
* transfer type
@@ -1373,278 +1782,156 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
status = -ENOSR;
break;
+ case COMP_BW_OVER:
+ xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
+ break;
+ case COMP_BUFF_OVER:
+ xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
+ break;
+ case COMP_UNDERRUN:
+ /*
+ * When the Isoch ring is empty, the xHC will generate
+ * a Ring Overrun Event for IN Isoch endpoint or Ring
+ * Underrun Event for OUT Isoch endpoint.
+ */
+ xhci_dbg(xhci, "underrun event on endpoint\n");
+ if (!list_empty(&ep_ring->td_list))
+ xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
+ "still with TDs queued?\n",
+ TRB_TO_SLOT_ID(event->flags), ep_index);
+ goto cleanup;
+ case COMP_OVERRUN:
+ xhci_dbg(xhci, "overrun event on endpoint\n");
+ if (!list_empty(&ep_ring->td_list))
+ xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
+ "still with TDs queued?\n",
+ TRB_TO_SLOT_ID(event->flags), ep_index);
+ goto cleanup;
+ case COMP_MISSED_INT:
+ /*
+ * When encounter missed service error, one or more isoc tds
+ * may be missed by xHC.
+ * Set skip flag of the ep_ring; Complete the missed tds as
+ * short transfer when process the ep_ring next time.
+ */
+ ep->skip = true;
+ xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
+ goto cleanup;
default:
if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
status = 0;
break;
}
- xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
- urb = NULL;
+ xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
+ "busted\n");
goto cleanup;
}
- /* Now update the urb's actual_length and give back to the core */
- /* Was this a control transfer? */
- if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
- xhci_debug_trb(xhci, xhci->event_ring->dequeue);
- switch (trb_comp_code) {
- case COMP_SUCCESS:
- if (event_trb == ep_ring->dequeue) {
- xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
- status = -ESHUTDOWN;
- } else if (event_trb != td->last_trb) {
- xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
- status = -ESHUTDOWN;
- } else {
- xhci_dbg(xhci, "Successful control transfer!\n");
- status = 0;
- }
- break;
- case COMP_SHORT_TX:
- xhci_warn(xhci, "WARN: short transfer on control ep\n");
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- status = -EREMOTEIO;
- else
- status = 0;
- break;
-
- default:
- if (!xhci_requires_manual_halt_cleanup(xhci,
- ep_ctx, trb_comp_code))
- break;
- xhci_dbg(xhci, "TRB error code %u, "
- "halted endpoint index = %u\n",
- trb_comp_code, ep_index);
- /* else fall through */
- case COMP_STALL:
- /* Did we transfer part of the data (middle) phase? */
- if (event_trb != ep_ring->dequeue &&
- event_trb != td->last_trb)
- td->urb->actual_length =
- td->urb->transfer_buffer_length
- - TRB_LEN(event->transfer_len);
- else
- td->urb->actual_length = 0;
- xhci_cleanup_halted_endpoint(xhci,
- slot_id, ep_index, 0, td, event_trb);
- goto td_cleanup;
- }
- /*
- * Did we transfer any data, despite the errors that might have
- * happened? I.e. did we get past the setup stage?
+ do {
+ /* This TRB should be in the TD at the head of this ring's
+ * TD list.
*/
- if (event_trb != ep_ring->dequeue) {
- /* The event was for the status stage */
- if (event_trb == td->last_trb) {
- if (td->urb->actual_length != 0) {
- /* Don't overwrite a previously set error code */
- if ((status == -EINPROGRESS ||
- status == 0) &&
- (td->urb->transfer_flags
- & URB_SHORT_NOT_OK))
- /* Did we already see a short data stage? */
- status = -EREMOTEIO;
- } else {
- td->urb->actual_length =
- td->urb->transfer_buffer_length;
- }
- } else {
- /* Maybe the event was for the data stage? */
- if (trb_comp_code != COMP_STOP_INVAL) {
- /* We didn't stop on a link TRB in the middle */
- td->urb->actual_length =
- td->urb->transfer_buffer_length -
- TRB_LEN(event->transfer_len);
- xhci_dbg(xhci, "Waiting for status stage event\n");
- urb = NULL;
- goto cleanup;
- }
+ if (list_empty(&ep_ring->td_list)) {
+ xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
+ "with no TDs queued?\n",
+ TRB_TO_SLOT_ID(event->flags), ep_index);
+ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+ (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
+ xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+ if (ep->skip) {
+ ep->skip = false;
+ xhci_dbg(xhci, "td_list is empty while skip "
+ "flag set. Clear skip flag.\n");
}
+ ret = 0;
+ goto cleanup;
}
- } else {
- switch (trb_comp_code) {
- case COMP_SUCCESS:
- /* Double check that the HW transferred everything. */
- if (event_trb != td->last_trb) {
- xhci_warn(xhci, "WARN Successful completion "
- "on short TX\n");
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- status = -EREMOTEIO;
- else
- status = 0;
- } else {
- if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
- xhci_dbg(xhci, "Successful bulk "
- "transfer!\n");
- else
- xhci_dbg(xhci, "Successful interrupt "
- "transfer!\n");
- status = 0;
- }
- break;
- case COMP_SHORT_TX:
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- status = -EREMOTEIO;
- else
- status = 0;
- break;
- default:
- /* Others already handled above */
- break;
+
+ td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+ /* Is this a TRB in the currently executing TD? */
+ event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
+ td->last_trb, event_dma);
+ if (event_seg && ep->skip) {
+ xhci_dbg(xhci, "Found td. Clear skip flag.\n");
+ ep->skip = false;
+ }
+ if (!event_seg &&
+ (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) {
+ /* HC is busted, give up! */
+ xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
+ "part of current TD\n");
+ return -ESHUTDOWN;
}
- dev_dbg(&td->urb->dev->dev,
- "ep %#x - asked for %d bytes, "
- "%d bytes untransferred\n",
- td->urb->ep->desc.bEndpointAddress,
- td->urb->transfer_buffer_length,
- TRB_LEN(event->transfer_len));
- /* Fast path - was this the last TRB in the TD for this URB? */
- if (event_trb == td->last_trb) {
- if (TRB_LEN(event->transfer_len) != 0) {
- td->urb->actual_length =
- td->urb->transfer_buffer_length -
- TRB_LEN(event->transfer_len);
- if (td->urb->transfer_buffer_length <
- td->urb->actual_length) {
- xhci_warn(xhci, "HC gave bad length "
- "of %d bytes left\n",
- TRB_LEN(event->transfer_len));
- td->urb->actual_length = 0;
- if (td->urb->transfer_flags &
- URB_SHORT_NOT_OK)
- status = -EREMOTEIO;
- else
- status = 0;
- }
- /* Don't overwrite a previously set error code */
- if (status == -EINPROGRESS) {
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- status = -EREMOTEIO;
- else
- status = 0;
- }
- } else {
- td->urb->actual_length = td->urb->transfer_buffer_length;
- /* Ignore a short packet completion if the
- * untransferred length was zero.
- */
- if (status == -EREMOTEIO)
- status = 0;
- }
- } else {
- /* Slow path - walk the list, starting from the dequeue
- * pointer, to get the actual length transferred.
- */
- union xhci_trb *cur_trb;
- struct xhci_segment *cur_seg;
- td->urb->actual_length = 0;
- for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
- cur_trb != event_trb;
- next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if ((cur_trb->generic.field[3] &
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
- (cur_trb->generic.field[3] &
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
- td->urb->actual_length +=
- TRB_LEN(cur_trb->generic.field[2]);
- }
- /* If the ring didn't stop on a Link or No-op TRB, add
- * in the actual bytes transferred from the Normal TRB
+ if (event_seg) {
+ event_trb = &event_seg->trbs[(event_dma -
+ event_seg->dma) / sizeof(*event_trb)];
+ /*
+ * No-op TRB should not trigger interrupts.
+ * If event_trb is a no-op TRB, it means the
+ * corresponding TD has been cancelled. Just ignore
+ * the TD.
*/
- if (trb_comp_code != COMP_STOP_INVAL)
- td->urb->actual_length +=
- TRB_LEN(cur_trb->generic.field[2]) -
- TRB_LEN(event->transfer_len);
+ if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
+ == TRB_TYPE(TRB_TR_NOOP)) {
+ xhci_dbg(xhci, "event_trb is a no-op TRB. "
+ "Skip it\n");
+ goto cleanup;
+ }
}
- }
- if (trb_comp_code == COMP_STOP_INVAL ||
- trb_comp_code == COMP_STOP) {
- /* The Endpoint Stop Command completion will take care of any
- * stopped TDs. A stopped TD may be restarted, so don't update
- * the ring dequeue pointer or take this TD off any lists yet.
+
+ /* Now update the urb's actual_length and give back to
+ * the core
*/
- ep->stopped_td = td;
- ep->stopped_trb = event_trb;
- } else {
- if (trb_comp_code == COMP_STALL) {
- /* The transfer is completed from the driver's
- * perspective, but we need to issue a set dequeue
- * command for this stalled endpoint to move the dequeue
- * pointer past the TD. We can't do that here because
- * the halt condition must be cleared first. Let the
- * USB class driver clear the stall later.
- */
- ep->stopped_td = td;
- ep->stopped_trb = event_trb;
- ep->stopped_stream = ep_ring->stream_id;
- } else if (xhci_requires_manual_halt_cleanup(xhci,
- ep_ctx, trb_comp_code)) {
- /* Other types of errors halt the endpoint, but the
- * class driver doesn't call usb_reset_endpoint() unless
- * the error is -EPIPE. Clear the halted status in the
- * xHCI hardware manually.
- */
- xhci_cleanup_halted_endpoint(xhci,
- slot_id, ep_index, ep_ring->stream_id, td, event_trb);
- } else {
- /* Update ring dequeue pointer */
- while (ep_ring->dequeue != td->last_trb)
- inc_deq(xhci, ep_ring, false);
- inc_deq(xhci, ep_ring, false);
- }
+ if (usb_endpoint_xfer_control(&td->urb->ep->desc))
+ ret = process_ctrl_td(xhci, td, event_trb, event, ep,
+ &status);
+ else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
+ ret = process_isoc_td(xhci, td, event_trb, event, ep,
+ &status);
+ else
+ ret = process_bulk_intr_td(xhci, td, event_trb, event,
+ ep, &status);
-td_cleanup:
- /* Clean up the endpoint's TD list */
- urb = td->urb;
- /* Do one last check of the actual transfer length.
- * If the host controller said we transferred more data than
- * the buffer length, urb->actual_length will be a very big
- * number (since it's unsigned). Play it safe and say we didn't
- * transfer anything.
+cleanup:
+ /*
+ * Do not update event ring dequeue pointer if ep->skip is set.
+ * Will roll back to continue process missed tds.
*/
- if (urb->actual_length > urb->transfer_buffer_length) {
- xhci_warn(xhci, "URB transfer length is wrong, "
- "xHC issue? req. len = %u, "
- "act. len = %u\n",
- urb->transfer_buffer_length,
- urb->actual_length);
- urb->actual_length = 0;
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- status = -EREMOTEIO;
- else
- status = 0;
+ if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
+ inc_deq(xhci, xhci->event_ring, true);
}
- list_del(&td->td_list);
- /* Was this TD slated to be cancelled but completed anyway? */
- if (!list_empty(&td->cancelled_td_list))
- list_del(&td->cancelled_td_list);
- /* Leave the TD around for the reset endpoint function to use
- * (but only if it's not a control endpoint, since we already
- * queued the Set TR dequeue pointer command for stalled
- * control endpoints).
- */
- if (usb_endpoint_xfer_control(&urb->ep->desc) ||
- (trb_comp_code != COMP_STALL &&
- trb_comp_code != COMP_BABBLE)) {
- kfree(td);
+ if (ret) {
+ urb = td->urb;
+ urb_priv = urb->hcpriv;
+ /* Leave the TD around for the reset endpoint function
+ * to use(but only if it's not a control endpoint,
+ * since we already queued the Set TR dequeue pointer
+ * command for stalled control endpoints).
+ */
+ if (usb_endpoint_xfer_control(&urb->ep->desc) ||
+ (trb_comp_code != COMP_STALL &&
+ trb_comp_code != COMP_BABBLE))
+ xhci_urb_free_priv(xhci, urb_priv);
+
+ usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
+ xhci_dbg(xhci, "Giveback URB %p, len = %d, "
+ "status = %d\n",
+ urb, urb->actual_length, status);
+ spin_unlock(&xhci->lock);
+ usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
+ spin_lock(&xhci->lock);
}
- urb->hcpriv = NULL;
- }
-cleanup:
- inc_deq(xhci, xhci->event_ring, true);
- xhci_set_hc_event_deq(xhci);
- /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
- if (urb) {
- usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
- xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
- urb, urb->actual_length, status);
- spin_unlock(&xhci->lock);
- usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
- spin_lock(&xhci->lock);
- }
+ /*
+ * If ep->skip is set, it means there are missed tds on the
+ * endpoint ring need to take care of.
+ * Process them as short transfer until reach the td pointed by
+ * the event.
+ */
+ } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
+
return 0;
}
@@ -1652,7 +1939,7 @@ cleanup:
* This function handles all OS-owned events on the event ring. It may drop
* xhci->lock between event processing (e.g. to pass up port status changes).
*/
-void xhci_handle_event(struct xhci_hcd *xhci)
+static void xhci_handle_event(struct xhci_hcd *xhci)
{
union xhci_trb *event;
int update_ptrs = 1;
@@ -1710,15 +1997,130 @@ void xhci_handle_event(struct xhci_hcd *xhci)
return;
}
- if (update_ptrs) {
- /* Update SW and HC event ring dequeue pointer */
+ if (update_ptrs)
+ /* Update SW event ring dequeue pointer */
inc_deq(xhci, xhci->event_ring, true);
- xhci_set_hc_event_deq(xhci);
- }
+
/* Are there more items on the event ring? */
xhci_handle_event(xhci);
}
+/*
+ * xHCI spec says we can get an interrupt, and if the HC has an error condition,
+ * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
+ * indicators of an event TRB error, but we check the status *first* to be safe.
+ */
+irqreturn_t xhci_irq(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ u32 status;
+ union xhci_trb *trb;
+ u64 temp_64;
+ union xhci_trb *event_ring_deq;
+ dma_addr_t deq;
+
+ spin_lock(&xhci->lock);
+ trb = xhci->event_ring->dequeue;
+ /* Check if the xHC generated the interrupt, or the irq is shared */
+ status = xhci_readl(xhci, &xhci->op_regs->status);
+ if (status == 0xffffffff)
+ goto hw_died;
+
+ if (!(status & STS_EINT)) {
+ spin_unlock(&xhci->lock);
+ xhci_warn(xhci, "Spurious interrupt.\n");
+ return IRQ_NONE;
+ }
+ xhci_dbg(xhci, "op reg status = %08x\n", status);
+ xhci_dbg(xhci, "Event ring dequeue ptr:\n");
+ xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
+ (unsigned long long)
+ xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
+ lower_32_bits(trb->link.segment_ptr),
+ upper_32_bits(trb->link.segment_ptr),
+ (unsigned int) trb->link.intr_target,
+ (unsigned int) trb->link.control);
+
+ if (status & STS_FATAL) {
+ xhci_warn(xhci, "WARNING: Host System Error\n");
+ xhci_halt(xhci);
+hw_died:
+ xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+ spin_unlock(&xhci->lock);
+ return -ESHUTDOWN;
+ }
+
+ /*
+ * Clear the op reg interrupt status first,
+ * so we can receive interrupts from other MSI-X interrupters.
+ * Write 1 to clear the interrupt status.
+ */
+ status |= STS_EINT;
+ xhci_writel(xhci, status, &xhci->op_regs->status);
+ /* FIXME when MSI-X is supported and there are multiple vectors */
+ /* Clear the MSI-X event interrupt status */
+
+ if (hcd->irq != -1) {
+ u32 irq_pending;
+ /* Acknowledge the PCI interrupt */
+ irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ irq_pending |= 0x3;
+ xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
+ }
+
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
+ xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
+ "Shouldn't IRQs be disabled?\n");
+ /* Clear the event handler busy flag (RW1C);
+ * the event ring should be empty.
+ */
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp_64 | ERST_EHB,
+ &xhci->ir_set->erst_dequeue);
+ spin_unlock(&xhci->lock);
+
+ return IRQ_HANDLED;
+ }
+
+ event_ring_deq = xhci->event_ring->dequeue;
+ /* FIXME this should be a delayed service routine
+ * that clears the EHB.
+ */
+ xhci_handle_event(xhci);
+
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ /* If necessary, update the HW's version of the event ring deq ptr. */
+ if (event_ring_deq != xhci->event_ring->dequeue) {
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue);
+ if (deq == 0)
+ xhci_warn(xhci, "WARN something wrong with SW event "
+ "ring dequeue ptr.\n");
+ /* Update HC event ring dequeue pointer */
+ temp_64 &= ERST_PTR_MASK;
+ temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ }
+
+ /* Clear the event handler busy flag (RW1C); event ring is empty. */
+ temp_64 |= ERST_EHB;
+ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
+
+ spin_unlock(&xhci->lock);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
+{
+ irqreturn_t ret;
+
+ set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+
+ ret = xhci_irq(hcd);
+
+ return ret;
+}
+
/**** Endpoint Ring Operations ****/
/*
@@ -1827,10 +2229,12 @@ static int prepare_transfer(struct xhci_hcd *xhci,
unsigned int stream_id,
unsigned int num_trbs,
struct urb *urb,
- struct xhci_td **td,
+ unsigned int td_index,
gfp_t mem_flags)
{
int ret;
+ struct urb_priv *urb_priv;
+ struct xhci_td *td;
struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
@@ -1846,24 +2250,29 @@ static int prepare_transfer(struct xhci_hcd *xhci,
num_trbs, mem_flags);
if (ret)
return ret;
- *td = kzalloc(sizeof(struct xhci_td), mem_flags);
- if (!*td)
- return -ENOMEM;
- INIT_LIST_HEAD(&(*td)->td_list);
- INIT_LIST_HEAD(&(*td)->cancelled_td_list);
- ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
- if (unlikely(ret)) {
- kfree(*td);
- return ret;
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[td_index];
+
+ INIT_LIST_HEAD(&td->td_list);
+ INIT_LIST_HEAD(&td->cancelled_td_list);
+
+ if (td_index == 0) {
+ ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
+ if (unlikely(ret)) {
+ xhci_urb_free_priv(xhci, urb_priv);
+ urb->hcpriv = NULL;
+ return ret;
+ }
}
- (*td)->urb = urb;
- urb->hcpriv = (void *) (*td);
+ td->urb = urb;
/* Add this TD to the tail of the endpoint ring's TD list */
- list_add_tail(&(*td)->td_list, &ep_ring->td_list);
- (*td)->start_seg = ep_ring->enq_seg;
- (*td)->first_trb = ep_ring->enqueue;
+ list_add_tail(&td->td_list, &ep_ring->td_list);
+ td->start_seg = ep_ring->enq_seg;
+ td->first_trb = ep_ring->enqueue;
+
+ urb_priv->td[td_index] = td;
return 0;
}
@@ -2002,6 +2411,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
{
struct xhci_ring *ep_ring;
unsigned int num_trbs;
+ struct urb_priv *urb_priv;
struct xhci_td *td;
struct scatterlist *sg;
int num_sgs;
@@ -2022,9 +2432,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, &td, mem_flags);
+ num_trbs, urb, 0, mem_flags);
if (trb_buff_len < 0)
return trb_buff_len;
+
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[0];
+
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
@@ -2145,6 +2559,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ep_ring;
+ struct urb_priv *urb_priv;
struct xhci_td *td;
int num_trbs;
struct xhci_generic_trb *start_trb;
@@ -2190,10 +2605,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, &td, mem_flags);
+ num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[0];
+
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
@@ -2279,6 +2697,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct xhci_generic_trb *start_trb;
int start_cycle;
u32 field, length_field;
+ struct urb_priv *urb_priv;
struct xhci_td *td;
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
@@ -2306,10 +2725,13 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs++;
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, &td, mem_flags);
+ num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[0];
+
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
@@ -2366,6 +2788,224 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return 0;
}
+static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
+ struct urb *urb, int i)
+{
+ int num_trbs = 0;
+ u64 addr, td_len, running_total;
+
+ addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
+ td_len = urb->iso_frame_desc[i].length;
+
+ running_total = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (running_total != 0)
+ num_trbs++;
+
+ while (running_total < td_len) {
+ num_trbs++;
+ running_total += TRB_MAX_BUFF_SIZE;
+ }
+
+ return num_trbs;
+}
+
+/* This is for isoc transfer */
+static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ struct urb_priv *urb_priv;
+ struct xhci_td *td;
+ int num_tds, trbs_per_td;
+ struct xhci_generic_trb *start_trb;
+ bool first_trb;
+ int start_cycle;
+ u32 field, length_field;
+ int running_total, trb_buff_len, td_len, td_remain_len, ret;
+ u64 start_addr, addr;
+ int i, j;
+
+ ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+
+ num_tds = urb->number_of_packets;
+ if (num_tds < 1) {
+ xhci_dbg(xhci, "Isoc URB with zero packets?\n");
+ return -EINVAL;
+ }
+
+ if (!in_interrupt())
+ dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
+ " addr = %#llx, num_tds = %d\n",
+ urb->ep->desc.bEndpointAddress,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer_length,
+ (unsigned long long)urb->transfer_dma,
+ num_tds);
+
+ start_addr = (u64) urb->transfer_dma;
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+
+ /* Queue the first TRB, even if it's zero-length */
+ for (i = 0; i < num_tds; i++) {
+ first_trb = true;
+
+ running_total = 0;
+ addr = start_addr + urb->iso_frame_desc[i].offset;
+ td_len = urb->iso_frame_desc[i].length;
+ td_remain_len = td_len;
+
+ trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
+
+ ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
+ urb->stream_id, trbs_per_td, urb, i, mem_flags);
+ if (ret < 0)
+ return ret;
+
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[i];
+
+ for (j = 0; j < trbs_per_td; j++) {
+ u32 remainder = 0;
+ field = 0;
+
+ if (first_trb) {
+ /* Queue the isoc TRB */
+ field |= TRB_TYPE(TRB_ISOC);
+ /* Assume URB_ISO_ASAP is set */
+ field |= TRB_SIA;
+ if (i > 0)
+ field |= ep_ring->cycle_state;
+ first_trb = false;
+ } else {
+ /* Queue other normal TRBs */
+ field |= TRB_TYPE(TRB_NORMAL);
+ field |= ep_ring->cycle_state;
+ }
+
+ /* Chain all the TRBs together; clear the chain bit in
+ * the last TRB to indicate it's the last TRB in the
+ * chain.
+ */
+ if (j < trbs_per_td - 1) {
+ field |= TRB_CHAIN;
+ } else {
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+ }
+
+ /* Calculate TRB length */
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (trb_buff_len > td_remain_len)
+ trb_buff_len = td_remain_len;
+
+ remainder = xhci_td_remainder(td_len - running_total);
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+ TRB_INTR_TARGET(0);
+ queue_trb(xhci, ep_ring, false, false,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ length_field,
+ /* We always want to know if the TRB was short,
+ * or we won't get an event when it completes.
+ * (Unless we use event data TRBs, which are a
+ * waste of space and HC resources.)
+ */
+ field | TRB_ISP);
+ running_total += trb_buff_len;
+
+ addr += trb_buff_len;
+ td_remain_len -= trb_buff_len;
+ }
+
+ /* Check TD length */
+ if (running_total != td_len) {
+ xhci_err(xhci, "ISOC TD length unmatch\n");
+ return -EINVAL;
+ }
+ }
+
+ wmb();
+ start_trb->field[3] |= start_cycle;
+
+ ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
+ return 0;
+}
+
+/*
+ * Check transfer ring to guarantee there is enough room for the urb.
+ * Update ISO URB start_frame and interval.
+ * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
+ * update the urb->start_frame by now.
+ * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
+ */
+int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_virt_device *xdev;
+ struct xhci_ring *ep_ring;
+ struct xhci_ep_ctx *ep_ctx;
+ int start_frame;
+ int xhci_interval;
+ int ep_interval;
+ int num_tds, num_trbs, i;
+ int ret;
+
+ xdev = xhci->devs[slot_id];
+ ep_ring = xdev->eps[ep_index].ring;
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+
+ num_trbs = 0;
+ num_tds = urb->number_of_packets;
+ for (i = 0; i < num_tds; i++)
+ num_trbs += count_isoc_trbs_needed(xhci, urb, i);
+
+ /* Check the ring to guarantee there is enough room for the whole urb.
+ * Do not insert any td of the urb to the ring if the check failed.
+ */
+ ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK,
+ num_trbs, mem_flags);
+ if (ret)
+ return ret;
+
+ start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+ start_frame &= 0x3fff;
+
+ urb->start_frame = start_frame;
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ urb->start_frame >>= 3;
+
+ xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
+ ep_interval = urb->interval;
+ /* Convert to microframes */
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ ep_interval *= 8;
+ /* FIXME change this to a warning and a suggestion to use the new API
+ * to set the polling interval (once the API is added).
+ */
+ if (xhci_interval != ep_interval) {
+ if (!printk_ratelimit())
+ dev_dbg(&urb->dev->dev, "Driver uses different interval"
+ " (%d microframe%s) than xHCI "
+ "(%d microframe%s)\n",
+ ep_interval,
+ ep_interval == 1 ? "" : "s",
+ xhci_interval,
+ xhci_interval == 1 ? "" : "s");
+ urb->interval = xhci_interval;
+ /* Convert back to frames for LS/FS devices */
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ urb->interval /= 8;
+ }
+ return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+}
+
/**** Command Ring Operations ****/
/* Generic function for queueing a command TRB on the command ring.
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 3998f72cd0c..d5c550ea3e6 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -20,6 +20,7 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/log2.h>
#include <linux/module.h>
@@ -171,22 +172,84 @@ int xhci_reset(struct xhci_hcd *xhci)
return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
}
+/*
+ * Free IRQs
+ * free all IRQs request
+ */
+static void xhci_free_irq(struct xhci_hcd *xhci)
+{
+ int i;
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
-#if 0
-/* Set up MSI-X table for entry 0 (may claim other entries later) */
-static int xhci_setup_msix(struct xhci_hcd *xhci)
+ /* return if using legacy interrupt */
+ if (xhci_to_hcd(xhci)->irq >= 0)
+ return;
+
+ if (xhci->msix_entries) {
+ for (i = 0; i < xhci->msix_count; i++)
+ if (xhci->msix_entries[i].vector)
+ free_irq(xhci->msix_entries[i].vector,
+ xhci_to_hcd(xhci));
+ } else if (pdev->irq >= 0)
+ free_irq(pdev->irq, xhci_to_hcd(xhci));
+
+ return;
+}
+
+/*
+ * Set up MSI
+ */
+static int xhci_setup_msi(struct xhci_hcd *xhci)
{
int ret;
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+
+ ret = pci_enable_msi(pdev);
+ if (ret) {
+ xhci_err(xhci, "failed to allocate MSI entry\n");
+ return ret;
+ }
+
+ ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
+ 0, "xhci_hcd", xhci_to_hcd(xhci));
+ if (ret) {
+ xhci_err(xhci, "disable MSI interrupt\n");
+ pci_disable_msi(pdev);
+ }
+
+ return ret;
+}
+
+/*
+ * Set up MSI-X
+ */
+static int xhci_setup_msix(struct xhci_hcd *xhci)
+{
+ int i, ret = 0;
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
- xhci->msix_count = 0;
- /* XXX: did I do this right? ixgbe does kcalloc for more than one */
- xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL);
+ /*
+ * calculate number of msi-x vectors supported.
+ * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
+ * with max number of interrupters based on the xhci HCSPARAMS1.
+ * - num_online_cpus: maximum msi-x vectors per CPUs core.
+ * Add additional 1 vector to ensure always available interrupt.
+ */
+ xhci->msix_count = min(num_online_cpus() + 1,
+ HCS_MAX_INTRS(xhci->hcs_params1));
+
+ xhci->msix_entries =
+ kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
+ GFP_KERNEL);
if (!xhci->msix_entries) {
xhci_err(xhci, "Failed to allocate MSI-X entries\n");
return -ENOMEM;
}
- xhci->msix_entries[0].entry = 0;
+
+ for (i = 0; i < xhci->msix_count; i++) {
+ xhci->msix_entries[i].entry = i;
+ xhci->msix_entries[i].vector = 0;
+ }
ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
if (ret) {
@@ -194,20 +257,19 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
goto free_entries;
}
- /*
- * Pass the xhci pointer value as the request_irq "cookie".
- * If more irqs are added, this will need to be unique for each one.
- */
- ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0,
- "xHCI", xhci_to_hcd(xhci));
- if (ret) {
- xhci_err(xhci, "Failed to allocate MSI-X interrupt\n");
- goto disable_msix;
+ for (i = 0; i < xhci->msix_count; i++) {
+ ret = request_irq(xhci->msix_entries[i].vector,
+ (irq_handler_t)xhci_msi_irq,
+ 0, "xhci_hcd", xhci_to_hcd(xhci));
+ if (ret)
+ goto disable_msix;
}
- xhci_dbg(xhci, "Finished setting up MSI-X\n");
- return 0;
+
+ return ret;
disable_msix:
+ xhci_err(xhci, "disable MSI-X interrupt\n");
+ xhci_free_irq(xhci);
pci_disable_msix(pdev);
free_entries:
kfree(xhci->msix_entries);
@@ -215,21 +277,23 @@ free_entries:
return ret;
}
-/* XXX: code duplication; can xhci_setup_msix call this? */
/* Free any IRQs and disable MSI-X */
static void xhci_cleanup_msix(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
- if (!xhci->msix_entries)
- return;
- free_irq(xhci->msix_entries[0].vector, xhci);
- pci_disable_msix(pdev);
- kfree(xhci->msix_entries);
- xhci->msix_entries = NULL;
- xhci_dbg(xhci, "Finished cleaning up MSI-X\n");
+ xhci_free_irq(xhci);
+
+ if (xhci->msix_entries) {
+ pci_disable_msix(pdev);
+ kfree(xhci->msix_entries);
+ xhci->msix_entries = NULL;
+ } else {
+ pci_disable_msi(pdev);
+ }
+
+ return;
}
-#endif
/*
* Initialize memory for HCD and xHC (one-time init).
@@ -257,100 +321,8 @@ int xhci_init(struct usb_hcd *hcd)
return retval;
}
-/*
- * Called in interrupt context when there might be work
- * queued on the event ring
- *
- * xhci->lock must be held by caller.
- */
-static void xhci_work(struct xhci_hcd *xhci)
-{
- u32 temp;
- u64 temp_64;
-
- /*
- * Clear the op reg interrupt status first,
- * so we can receive interrupts from other MSI-X interrupters.
- * Write 1 to clear the interrupt status.
- */
- temp = xhci_readl(xhci, &xhci->op_regs->status);
- temp |= STS_EINT;
- xhci_writel(xhci, temp, &xhci->op_regs->status);
- /* FIXME when MSI-X is supported and there are multiple vectors */
- /* Clear the MSI-X event interrupt status */
-
- /* Acknowledge the interrupt */
- temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- temp |= 0x3;
- xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
- /* Flush posted writes */
- xhci_readl(xhci, &xhci->ir_set->irq_pending);
-
- if (xhci->xhc_state & XHCI_STATE_DYING)
- xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
- "Shouldn't IRQs be disabled?\n");
- else
- /* FIXME this should be a delayed service routine
- * that clears the EHB.
- */
- xhci_handle_event(xhci);
-
- /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
- /* Flush posted writes -- FIXME is this necessary? */
- xhci_readl(xhci, &xhci->ir_set->irq_pending);
-}
-
/*-------------------------------------------------------------------------*/
-/*
- * xHCI spec says we can get an interrupt, and if the HC has an error condition,
- * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
- * indicators of an event TRB error, but we check the status *first* to be safe.
- */
-irqreturn_t xhci_irq(struct usb_hcd *hcd)
-{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- u32 temp, temp2;
- union xhci_trb *trb;
-
- spin_lock(&xhci->lock);
- trb = xhci->event_ring->dequeue;
- /* Check if the xHC generated the interrupt, or the irq is shared */
- temp = xhci_readl(xhci, &xhci->op_regs->status);
- temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- if (temp == 0xffffffff && temp2 == 0xffffffff)
- goto hw_died;
-
- if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
- spin_unlock(&xhci->lock);
- return IRQ_NONE;
- }
- xhci_dbg(xhci, "op reg status = %08x\n", temp);
- xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2);
- xhci_dbg(xhci, "Event ring dequeue ptr:\n");
- xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
- (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
- lower_32_bits(trb->link.segment_ptr),
- upper_32_bits(trb->link.segment_ptr),
- (unsigned int) trb->link.intr_target,
- (unsigned int) trb->link.control);
-
- if (temp & STS_FATAL) {
- xhci_warn(xhci, "WARNING: Host System Error\n");
- xhci_halt(xhci);
-hw_died:
- xhci_to_hcd(xhci)->state = HC_STATE_HALT;
- spin_unlock(&xhci->lock);
- return -ESHUTDOWN;
- }
-
- xhci_work(xhci);
- spin_unlock(&xhci->lock);
-
- return IRQ_HANDLED;
-}
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
void xhci_event_ring_work(unsigned long arg)
@@ -423,21 +395,36 @@ int xhci_run(struct usb_hcd *hcd)
{
u32 temp;
u64 temp_64;
+ u32 ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
void (*doorbell)(struct xhci_hcd *) = NULL;
hcd->uses_new_polling = 1;
- hcd->poll_rh = 0;
xhci_dbg(xhci, "xhci_run\n");
-#if 0 /* FIXME: MSI not setup yet */
- /* Do this at the very last minute */
+ /* unregister the legacy interrupt */
+ if (hcd->irq)
+ free_irq(hcd->irq, hcd);
+ hcd->irq = -1;
+
ret = xhci_setup_msix(xhci);
- if (!ret)
- return ret;
+ if (ret)
+ /* fall back to msi*/
+ ret = xhci_setup_msi(xhci);
+
+ if (ret) {
+ /* fall back to legacy interrupt*/
+ ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
+ hcd->irq_descr, hcd);
+ if (ret) {
+ xhci_err(xhci, "request interrupt %d failed\n",
+ pdev->irq);
+ return ret;
+ }
+ hcd->irq = pdev->irq;
+ }
- return -ENOSYS;
-#endif
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
init_timer(&xhci->event_ring_timer);
xhci->event_ring_timer.data = (unsigned long) xhci;
@@ -495,7 +482,6 @@ int xhci_run(struct usb_hcd *hcd)
return -ENODEV;
}
- xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
if (doorbell)
(*doorbell)(xhci);
if (xhci->quirks & XHCI_NEC_HOST)
@@ -522,11 +508,9 @@ void xhci_stop(struct usb_hcd *hcd)
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
xhci_reset(xhci);
+ xhci_cleanup_msix(xhci);
spin_unlock_irq(&xhci->lock);
-#if 0 /* No MSI yet */
- xhci_cleanup_msix(xhci);
-#endif
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
/* Tell the event ring poll function not to reschedule */
xhci->zombie = 1;
@@ -560,11 +544,8 @@ void xhci_shutdown(struct usb_hcd *hcd)
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
- spin_unlock_irq(&xhci->lock);
-
-#if 0
xhci_cleanup_msix(xhci);
-#endif
+ spin_unlock_irq(&xhci->lock);
xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
xhci_readl(xhci, &xhci->op_regs->status));
@@ -720,7 +701,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
unsigned long flags;
int ret = 0;
unsigned int slot_id, ep_index;
-
+ struct urb_priv *urb_priv;
+ int size, i;
if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
return -EINVAL;
@@ -734,12 +716,36 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
ret = -EINVAL;
goto exit;
}
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
if (!in_interrupt())
xhci_dbg(xhci, "urb submitted during PCI suspend\n");
ret = -ESHUTDOWN;
goto exit;
}
+
+ if (usb_endpoint_xfer_isoc(&urb->ep->desc))
+ size = urb->number_of_packets;
+ else
+ size = 1;
+
+ urb_priv = kzalloc(sizeof(struct urb_priv) +
+ size * sizeof(struct xhci_td *), mem_flags);
+ if (!urb_priv)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++) {
+ urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags);
+ if (!urb_priv->td[i]) {
+ urb_priv->length = i;
+ xhci_urb_free_priv(xhci, urb_priv);
+ return -ENOMEM;
+ }
+ }
+
+ urb_priv->length = size;
+ urb_priv->td_cnt = 0;
+ urb->hcpriv = urb_priv;
+
if (usb_endpoint_xfer_control(&urb->ep->desc)) {
/* Check to see if the max packet size for the default control
* endpoint changed during FS device enumeration
@@ -788,11 +794,18 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
slot_id, ep_index);
spin_unlock_irqrestore(&xhci->lock, flags);
} else {
- ret = -EINVAL;
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ goto dying;
+ ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
+ slot_id, ep_index);
+ spin_unlock_irqrestore(&xhci->lock, flags);
}
exit:
return ret;
dying:
+ xhci_urb_free_priv(xhci, urb_priv);
+ urb->hcpriv = NULL;
xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
"non-responsive xHCI host.\n",
urb->ep->desc.bEndpointAddress, urb);
@@ -800,6 +813,47 @@ dying:
return -ESHUTDOWN;
}
+/* Get the right ring for the given URB.
+ * If the endpoint supports streams, boundary check the URB's stream ID.
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
+ */
+static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
+ struct urb *urb)
+{
+ unsigned int slot_id;
+ unsigned int ep_index;
+ unsigned int stream_id;
+ struct xhci_virt_ep *ep;
+
+ slot_id = urb->dev->slot_id;
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ stream_id = urb->stream_id;
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ /* Common case: no streams */
+ if (!(ep->ep_state & EP_HAS_STREAMS))
+ return ep->ring;
+
+ if (stream_id == 0) {
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has streams, "
+ "but URB has no stream ID.\n",
+ slot_id, ep_index);
+ return NULL;
+ }
+
+ if (stream_id < ep->stream_info->num_streams)
+ return ep->stream_info->stream_rings[stream_id];
+
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has "
+ "stream IDs 1 to %u allocated, "
+ "but stream ID %u is requested.\n",
+ slot_id, ep_index,
+ ep->stream_info->num_streams - 1,
+ stream_id);
+ return NULL;
+}
+
/*
* Remove the URB's TD from the endpoint ring. This may cause the HC to stop
* USB transfers, potentially stopping in the middle of a TRB buffer. The HC
@@ -834,9 +888,10 @@ dying:
int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
unsigned long flags;
- int ret;
+ int ret, i;
u32 temp;
struct xhci_hcd *xhci;
+ struct urb_priv *urb_priv;
struct xhci_td *td;
unsigned int ep_index;
struct xhci_ring *ep_ring;
@@ -851,12 +906,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
temp = xhci_readl(xhci, &xhci->op_regs->status);
if (temp == 0xffffffff) {
xhci_dbg(xhci, "HW died, freeing TD.\n");
- td = (struct xhci_td *) urb->hcpriv;
+ urb_priv = urb->hcpriv;
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&xhci->lock, flags);
usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN);
- kfree(td);
+ xhci_urb_free_priv(xhci, urb_priv);
return ret;
}
if (xhci->xhc_state & XHCI_STATE_DYING) {
@@ -884,9 +939,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_dbg(xhci, "Endpoint ring:\n");
xhci_debug_ring(xhci, ep_ring);
- td = (struct xhci_td *) urb->hcpriv;
- list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
+ urb_priv = urb->hcpriv;
+
+ for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+ td = urb_priv->td[i];
+ list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
+ }
+
/* Queue a stop endpoint command, but only if this is
* the first cancellation to be handled.
*/
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6c7e3430ec9..34a60d9f056 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -720,6 +720,14 @@ struct xhci_virt_ep {
struct timer_list stop_cmd_timer;
int stop_cmds_pending;
struct xhci_hcd *xhci;
+ /*
+ * Sometimes the xHC can not process isochronous endpoint ring quickly
+ * enough, and it will miss some isoc tds on the ring and generate
+ * a Missed Service Error Event.
+ * Set skip flag when receive a Missed Service Error Event and
+ * process the missed tds on the endpoint ring.
+ */
+ bool skip;
};
struct xhci_virt_device {
@@ -911,6 +919,9 @@ struct xhci_event_cmd {
/* Control transfer TRB specific fields */
#define TRB_DIR_IN (1<<16)
+/* Isochronous TRB specific fields */
+#define TRB_SIA (1<<31)
+
struct xhci_generic_trb {
u32 field[4];
};
@@ -1082,6 +1093,12 @@ struct xhci_scratchpad {
dma_addr_t *sp_dma_buffers;
};
+struct urb_priv {
+ int length;
+ int td_cnt;
+ struct xhci_td *td[0];
+};
+
/*
* Each segment table entry is 4*32bits long. 1K seems like an ok size:
* (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
@@ -1130,7 +1147,7 @@ struct xhci_hcd {
int page_size;
/* Valid values are 12 to 20, inclusive */
int page_shift;
- /* only one MSI vector for now, but might need more later */
+ /* msi-x vectors */
int msix_count;
struct msix_entry *msix_entries;
/* data structures */
@@ -1327,11 +1344,6 @@ void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ring *xhci_dma_to_transfer_ring(
struct xhci_virt_ep *ep,
u64 address);
-struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
- struct urb *urb);
-struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- unsigned int stream_id);
struct xhci_ring *xhci_stream_id_to_ring(
struct xhci_virt_device *dev,
unsigned int ep_index,
@@ -1339,6 +1351,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_in_ctx, bool allocate_completion,
gfp_t mem_flags);
+void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv);
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command);
@@ -1358,6 +1371,7 @@ void xhci_stop(struct usb_hcd *hcd);
void xhci_shutdown(struct usb_hcd *hcd);
int xhci_get_frame(struct usb_hcd *hcd);
irqreturn_t xhci_irq(struct usb_hcd *hcd);
+irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd);
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
@@ -1386,8 +1400,6 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
void *xhci_setup_one_noop(struct xhci_hcd *xhci);
-void xhci_handle_event(struct xhci_hcd *xhci);
-void xhci_set_hc_event_deq(struct xhci_hcd *xhci);
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id);
@@ -1401,6 +1413,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
+int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index);
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed);
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index d240de097c6..801324af947 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -439,7 +439,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
/* drain secondary buffer */
int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary;
i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount);
- if (i < 0) {
+ if (i) {
retval = -EFAULT;
goto exit;
}
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 82e16630a78..aecf380f6ec 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -650,7 +650,7 @@ static int ftdi_elan_open(struct inode *inode, struct file *file)
static int ftdi_elan_release(struct inode *inode, struct file *file)
{
- struct usb_ftdi *ftdi = (struct usb_ftdi *)file->private_data;
+ struct usb_ftdi *ftdi = file->private_data;
if (ftdi == NULL)
return -ENODEV;
up(&ftdi->sw_lock); /* decrement the count on our device */
@@ -673,7 +673,7 @@ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer,
int bytes_read = 0;
int retry_on_empty = 10;
int retry_on_timeout = 5;
- struct usb_ftdi *ftdi = (struct usb_ftdi *)file->private_data;
+ struct usb_ftdi *ftdi = file->private_data;
if (ftdi->disconnected > 0) {
return -ENODEV;
}
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 7dc9d3c6998..bc88c79875a 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/usb/iowarrior.h>
@@ -61,6 +61,7 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/* Module parameters */
+static DEFINE_MUTEX(iowarrior_mutex);
static int debug = 0;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "debug=1 enables debugging messages");
@@ -282,7 +283,7 @@ static ssize_t iowarrior_read(struct file *file, char __user *buffer,
int read_idx;
int offset;
- dev = (struct iowarrior *)file->private_data;
+ dev = file->private_data;
/* verify that the device wasn't unplugged */
if (dev == NULL || !dev->present)
@@ -348,7 +349,7 @@ static ssize_t iowarrior_write(struct file *file,
char *buf = NULL; /* for IOW24 and IOW56 we need a buffer */
struct urb *int_out_urb = NULL;
- dev = (struct iowarrior *)file->private_data;
+ dev = file->private_data;
mutex_lock(&dev->mutex);
/* verify that the device wasn't unplugged */
@@ -483,7 +484,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
int retval;
int io_res; /* checks for bytes read/written and copy_to/from_user results */
- dev = (struct iowarrior *)file->private_data;
+ dev = file->private_data;
if (dev == NULL) {
return -ENODEV;
}
@@ -493,7 +494,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
return -ENOMEM;
/* lock this object */
- lock_kernel();
+ mutex_lock(&iowarrior_mutex);
mutex_lock(&dev->mutex);
/* verify that the device wasn't unplugged */
@@ -541,7 +542,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
retval = io_res;
else {
io_res = copy_to_user(user_buffer, buffer, dev->report_size);
- if (io_res < 0)
+ if (io_res)
retval = -EFAULT;
}
break;
@@ -573,7 +574,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
}
io_res = copy_to_user((struct iowarrior_info __user *)arg, &info,
sizeof(struct iowarrior_info));
- if (io_res < 0)
+ if (io_res)
retval = -EFAULT;
break;
}
@@ -585,7 +586,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
error_out:
/* unlock the device */
mutex_unlock(&dev->mutex);
- unlock_kernel();
+ mutex_unlock(&iowarrior_mutex);
kfree(buffer);
return retval;
}
@@ -602,12 +603,12 @@ static int iowarrior_open(struct inode *inode, struct file *file)
dbg("%s", __func__);
- lock_kernel();
+ mutex_lock(&iowarrior_mutex);
subminor = iminor(inode);
interface = usb_find_interface(&iowarrior_driver, subminor);
if (!interface) {
- unlock_kernel();
+ mutex_unlock(&iowarrior_mutex);
err("%s - error, can't find device for minor %d", __func__,
subminor);
return -ENODEV;
@@ -617,7 +618,7 @@ static int iowarrior_open(struct inode *inode, struct file *file)
dev = usb_get_intfdata(interface);
if (!dev) {
mutex_unlock(&iowarrior_open_disc_lock);
- unlock_kernel();
+ mutex_unlock(&iowarrior_mutex);
return -ENODEV;
}
@@ -644,7 +645,7 @@ static int iowarrior_open(struct inode *inode, struct file *file)
out:
mutex_unlock(&dev->mutex);
- unlock_kernel();
+ mutex_unlock(&iowarrior_mutex);
return retval;
}
@@ -656,7 +657,7 @@ static int iowarrior_release(struct inode *inode, struct file *file)
struct iowarrior *dev;
int retval = 0;
- dev = (struct iowarrior *)file->private_data;
+ dev = file->private_data;
if (dev == NULL) {
return -ENODEV;
}
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 8547bf9e317..6482c6e2e6b 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -448,7 +448,7 @@ static int tower_release (struct inode *inode, struct file *file)
dbg(2, "%s: enter", __func__);
- dev = (struct lego_usb_tower *)file->private_data;
+ dev = file->private_data;
if (dev == NULL) {
dbg(1, "%s: object is NULL", __func__);
@@ -597,7 +597,7 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
dbg(2, "%s: enter, count = %Zd", __func__, count);
- dev = (struct lego_usb_tower *)file->private_data;
+ dev = file->private_data;
/* lock this object */
if (mutex_lock_interruptible(&dev->lock)) {
@@ -686,7 +686,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
dbg(2, "%s: enter, count = %Zd", __func__, count);
- dev = (struct lego_usb_tower *)file->private_data;
+ dev = file->private_data;
/* lock this object */
if (mutex_lock_interruptible(&dev->lock)) {
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index a85771b1563..cc13ae61712 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -32,7 +32,7 @@
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <linux/poll.h>
@@ -72,6 +72,7 @@ struct rio_usb_data {
struct mutex lock; /* general race avoidance */
};
+static DEFINE_MUTEX(rio500_mutex);
static struct rio_usb_data rio_instance;
static int open_rio(struct inode *inode, struct file *file)
@@ -79,12 +80,12 @@ static int open_rio(struct inode *inode, struct file *file)
struct rio_usb_data *rio = &rio_instance;
/* against disconnect() */
- lock_kernel();
+ mutex_lock(&rio500_mutex);
mutex_lock(&(rio->lock));
if (rio->isopen || !rio->present) {
mutex_unlock(&(rio->lock));
- unlock_kernel();
+ mutex_unlock(&rio500_mutex);
return -EBUSY;
}
rio->isopen = 1;
@@ -94,7 +95,7 @@ static int open_rio(struct inode *inode, struct file *file)
mutex_unlock(&(rio->lock));
dev_info(&rio->rio_dev->dev, "Rio opened.\n");
- unlock_kernel();
+ mutex_unlock(&rio500_mutex);
return 0;
}
@@ -491,7 +492,7 @@ static void disconnect_rio(struct usb_interface *intf)
struct rio_usb_data *rio = usb_get_intfdata (intf);
usb_set_intfdata (intf, NULL);
- lock_kernel();
+ mutex_lock(&rio500_mutex);
if (rio) {
usb_deregister_dev(intf, &usb_rio_class);
@@ -501,7 +502,7 @@ static void disconnect_rio(struct usb_interface *intf)
/* better let it finish - the release will do whats needed */
rio->rio_dev = NULL;
mutex_unlock(&(rio->lock));
- unlock_kernel();
+ mutex_unlock(&rio500_mutex);
return;
}
kfree(rio->ibuf);
@@ -512,7 +513,7 @@ static void disconnect_rio(struct usb_interface *intf)
rio->present = 0;
mutex_unlock(&(rio->lock));
}
- unlock_kernel();
+ mutex_unlock(&rio500_mutex);
}
static const struct usb_device_id rio_table[] = {
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index d25814c172b..70d00e99a4b 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -2487,7 +2487,7 @@ sisusb_release(struct inode *inode, struct file *file)
{
struct sisusb_usb_data *sisusb;
- if (!(sisusb = (struct sisusb_usb_data *)file->private_data))
+ if (!(sisusb = file->private_data))
return -ENODEV;
mutex_lock(&sisusb->lock);
@@ -2519,7 +2519,7 @@ sisusb_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
u16 buf16;
u32 buf32, address;
- if (!(sisusb = (struct sisusb_usb_data *)file->private_data))
+ if (!(sisusb = file->private_data))
return -ENODEV;
mutex_lock(&sisusb->lock);
@@ -2661,7 +2661,7 @@ sisusb_write(struct file *file, const char __user *buffer, size_t count,
u16 buf16;
u32 buf32, address;
- if (!(sisusb = (struct sisusb_usb_data *)file->private_data))
+ if (!(sisusb = file->private_data))
return -ENODEV;
mutex_lock(&sisusb->lock);
@@ -2804,7 +2804,7 @@ sisusb_lseek(struct file *file, loff_t offset, int orig)
struct sisusb_usb_data *sisusb;
loff_t ret;
- if (!(sisusb = (struct sisusb_usb_data *)file->private_data))
+ if (!(sisusb = file->private_data))
return -ENODEV;
mutex_lock(&sisusb->lock);
@@ -2969,7 +2969,7 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
long retval = 0;
u32 __user *argp = (u32 __user *)arg;
- if (!(sisusb = (struct sisusb_usb_data *)file->private_data))
+ if (!(sisusb = file->private_data))
return -ENODEV;
mutex_lock(&sisusb->lock);
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 7828c764b32..d00dde19194 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
@@ -30,6 +29,7 @@
#define IOCTL_GET_DRV_VERSION 2
+static DEFINE_MUTEX(lcd_mutex);
static const struct usb_device_id id_table[] = {
{ .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, },
{ },
@@ -74,12 +74,12 @@ static int lcd_open(struct inode *inode, struct file *file)
struct usb_interface *interface;
int subminor, r;
- lock_kernel();
+ mutex_lock(&lcd_mutex);
subminor = iminor(inode);
interface = usb_find_interface(&lcd_driver, subminor);
if (!interface) {
- unlock_kernel();
+ mutex_unlock(&lcd_mutex);
err ("USBLCD: %s - error, can't find device for minor %d",
__func__, subminor);
return -ENODEV;
@@ -89,7 +89,7 @@ static int lcd_open(struct inode *inode, struct file *file)
dev = usb_get_intfdata(interface);
if (!dev) {
mutex_unlock(&open_disc_mutex);
- unlock_kernel();
+ mutex_unlock(&lcd_mutex);
return -ENODEV;
}
@@ -101,13 +101,13 @@ static int lcd_open(struct inode *inode, struct file *file)
r = usb_autopm_get_interface(interface);
if (r < 0) {
kref_put(&dev->kref, lcd_delete);
- unlock_kernel();
+ mutex_unlock(&lcd_mutex);
return r;
}
/* save our object in the file's private structure */
file->private_data = dev;
- unlock_kernel();
+ mutex_unlock(&lcd_mutex);
return 0;
}
@@ -116,7 +116,7 @@ static int lcd_release(struct inode *inode, struct file *file)
{
struct usb_lcd *dev;
- dev = (struct usb_lcd *)file->private_data;
+ dev = file->private_data;
if (dev == NULL)
return -ENODEV;
@@ -132,7 +132,7 @@ static ssize_t lcd_read(struct file *file, char __user * buffer, size_t count, l
int retval = 0;
int bytes_read;
- dev = (struct usb_lcd *)file->private_data;
+ dev = file->private_data;
/* do a blocking bulk read to get data from the device */
retval = usb_bulk_msg(dev->udev,
@@ -158,20 +158,20 @@ static long lcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
u16 bcdDevice;
char buf[30];
- dev = (struct usb_lcd *)file->private_data;
+ dev = file->private_data;
if (dev == NULL)
return -ENODEV;
switch (cmd) {
case IOCTL_GET_HARD_VERSION:
- lock_kernel();
+ mutex_lock(&lcd_mutex);
bcdDevice = le16_to_cpu((dev->udev)->descriptor.bcdDevice);
sprintf(buf,"%1d%1d.%1d%1d",
(bcdDevice & 0xF000)>>12,
(bcdDevice & 0xF00)>>8,
(bcdDevice & 0xF0)>>4,
(bcdDevice & 0xF));
- unlock_kernel();
+ mutex_unlock(&lcd_mutex);
if (copy_to_user((void __user *)arg,buf,strlen(buf))!=0)
return -EFAULT;
break;
@@ -217,7 +217,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, siz
struct urb *urb = NULL;
char *buf = NULL;
- dev = (struct usb_lcd *)file->private_data;
+ dev = file->private_data;
/* verify that we actually have some data to write */
if (count == 0)
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 16dffe99d9f..eef370eb7a5 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -136,7 +136,7 @@ try_iso:
iso_out = e;
}
}
- if ((in && out) || (iso_in && iso_out))
+ if ((in && out) || iso_in || iso_out)
goto found;
}
return -EINVAL;
@@ -162,6 +162,9 @@ found:
dev->in_iso_pipe = usb_rcvisocpipe (udev,
iso_in->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
+ }
+
+ if (iso_out) {
dev->iso_out = &iso_out->desc;
dev->out_iso_pipe = usb_sndisocpipe (udev,
iso_out->desc.bEndpointAddress
@@ -1378,7 +1381,6 @@ static void iso_callback (struct urb *urb)
break;
}
}
- simple_free_urb (urb);
ctx->pending--;
if (ctx->pending == 0) {
@@ -1495,6 +1497,7 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param,
}
simple_free_urb (urbs [i]);
+ urbs[i] = NULL;
context.pending--;
context.submit_error = 1;
break;
@@ -1504,6 +1507,10 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param,
wait_for_completion (&context.done);
+ for (i = 0; i < param->sglen; i++) {
+ if (urbs[i])
+ simple_free_urb(urbs[i]);
+ }
/*
* Isochronous transfers are expected to fail sometimes. As an
* arbitrary limit, we will report an error if any submissions
@@ -1548,6 +1555,7 @@ fail:
* off just killing the userspace task and waiting for it to exit.
*/
+/* No BKL needed */
static int
usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
{
@@ -2170,7 +2178,7 @@ static struct usb_driver usbtest_driver = {
.name = "usbtest",
.id_table = id_table,
.probe = usbtest_probe,
- .ioctl = usbtest_ioctl,
+ .unlocked_ioctl = usbtest_ioctl,
.disconnect = usbtest_disconnect,
.suspend = usbtest_suspend,
.resume = usbtest_resume,
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 61c76b13f0f..44cb37b5a4d 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -646,17 +646,14 @@ static int mon_bin_open(struct inode *inode, struct file *file)
size_t size;
int rc;
- lock_kernel();
mutex_lock(&mon_lock);
if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) {
mutex_unlock(&mon_lock);
- unlock_kernel();
return -ENODEV;
}
if (mbus != &mon_bus0 && mbus->u_bus == NULL) {
printk(KERN_ERR TAG ": consistency error on open\n");
mutex_unlock(&mon_lock);
- unlock_kernel();
return -ENODEV;
}
@@ -689,7 +686,6 @@ static int mon_bin_open(struct inode *inode, struct file *file)
file->private_data = rp;
mutex_unlock(&mon_lock);
- unlock_kernel();
return 0;
err_allocbuff:
@@ -698,7 +694,6 @@ err_allocvec:
kfree(rp);
err_alloc:
mutex_unlock(&mon_lock);
- unlock_kernel();
return rc;
}
@@ -954,7 +949,7 @@ static int mon_bin_queued(struct mon_reader_bin *rp)
/*
*/
-static int mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct mon_reader_bin *rp = file->private_data;
// struct mon_bus* mbus = rp->r.m_bus;
@@ -1009,7 +1004,7 @@ static int mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_lock(&rp->fetch_lock);
spin_lock_irqsave(&rp->b_lock, flags);
- mon_free_buff(rp->b_vec, size/CHUNK_SIZE);
+ mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
kfree(rp->b_vec);
rp->b_vec = vec;
rp->b_size = size;
@@ -1094,19 +1089,6 @@ static int mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
-static long mon_bin_unlocked_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret;
-
- lock_kernel();
- ret = mon_bin_ioctl(file, cmd, arg);
- unlock_kernel();
-
- return ret;
-}
-
-
#ifdef CONFIG_COMPAT
static long mon_bin_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
@@ -1250,7 +1232,7 @@ static const struct file_operations mon_fops_binary = {
.read = mon_bin_read,
/* .write = mon_text_write, */
.poll = mon_bin_poll,
- .unlocked_ioctl = mon_bin_unlocked_ioctl,
+ .unlocked_ioctl = mon_bin_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mon_bin_compat_ioctl,
#endif
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 59dc3d351b6..5ab5bb89bae 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c,
index, transmit ? 'T' : 'R', cppi_ch);
cppi_ch->hw_ep = ep;
cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
+ cppi_ch->channel.max_len = 0x7fffffff;
DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
return &cppi_ch->channel;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 3b795c56221..540c766c4f8 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -704,7 +704,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
#ifdef CONFIG_USB_MUSB_HDRC_HCD
if (int_usb & MUSB_INTR_CONNECT) {
struct usb_hcd *hcd = musb_to_hcd(musb);
- void __iomem *mbase = musb->mregs;
handled = IRQ_HANDLED;
musb->is_active = 1;
@@ -717,9 +716,9 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
if (is_peripheral_active(musb)) {
/* REVISIT HNP; just force disconnect */
}
- musb_writew(mbase, MUSB_INTRTXE, musb->epmask);
- musb_writew(mbase, MUSB_INTRRXE, musb->epmask & 0xfffe);
- musb_writeb(mbase, MUSB_INTRUSBE, 0xf7);
+ musb_writew(musb->mregs, MUSB_INTRTXE, musb->epmask);
+ musb_writew(musb->mregs, MUSB_INTRRXE, musb->epmask & 0xfffe);
+ musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
#endif
musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
|USB_PORT_STAT_HIGH_SPEED
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index bba76af0c0c..9e8639d4e86 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -92,29 +92,29 @@ static const struct musb_register_map musb_regmap[] = {
{ "LS_EOF1", 0x7E, 8 },
{ "SOFT_RST", 0x7F, 8 },
{ "DMA_CNTLch0", 0x204, 16 },
- { "DMA_ADDRch0", 0x208, 16 },
- { "DMA_COUNTch0", 0x20C, 16 },
+ { "DMA_ADDRch0", 0x208, 32 },
+ { "DMA_COUNTch0", 0x20C, 32 },
{ "DMA_CNTLch1", 0x214, 16 },
- { "DMA_ADDRch1", 0x218, 16 },
- { "DMA_COUNTch1", 0x21C, 16 },
+ { "DMA_ADDRch1", 0x218, 32 },
+ { "DMA_COUNTch1", 0x21C, 32 },
{ "DMA_CNTLch2", 0x224, 16 },
- { "DMA_ADDRch2", 0x228, 16 },
- { "DMA_COUNTch2", 0x22C, 16 },
+ { "DMA_ADDRch2", 0x228, 32 },
+ { "DMA_COUNTch2", 0x22C, 32 },
{ "DMA_CNTLch3", 0x234, 16 },
- { "DMA_ADDRch3", 0x238, 16 },
- { "DMA_COUNTch3", 0x23C, 16 },
+ { "DMA_ADDRch3", 0x238, 32 },
+ { "DMA_COUNTch3", 0x23C, 32 },
{ "DMA_CNTLch4", 0x244, 16 },
- { "DMA_ADDRch4", 0x248, 16 },
- { "DMA_COUNTch4", 0x24C, 16 },
+ { "DMA_ADDRch4", 0x248, 32 },
+ { "DMA_COUNTch4", 0x24C, 32 },
{ "DMA_CNTLch5", 0x254, 16 },
- { "DMA_ADDRch5", 0x258, 16 },
- { "DMA_COUNTch5", 0x25C, 16 },
+ { "DMA_ADDRch5", 0x258, 32 },
+ { "DMA_COUNTch5", 0x25C, 32 },
{ "DMA_CNTLch6", 0x264, 16 },
- { "DMA_ADDRch6", 0x268, 16 },
- { "DMA_COUNTch6", 0x26C, 16 },
+ { "DMA_ADDRch6", 0x268, 32 },
+ { "DMA_COUNTch6", 0x26C, 32 },
{ "DMA_CNTLch7", 0x274, 16 },
- { "DMA_ADDRch7", 0x278, 16 },
- { "DMA_COUNTch7", 0x27C, 16 },
+ { "DMA_ADDRch7", 0x278, 32 },
+ { "DMA_COUNTch7", 0x27C, 32 },
{ } /* Terminating Entry */
};
@@ -195,15 +195,14 @@ static const struct file_operations musb_regdump_fops = {
static int musb_test_mode_open(struct inode *inode, struct file *file)
{
- file->private_data = inode->i_private;
-
return single_open(file, musb_test_mode_show, inode->i_private);
}
static ssize_t musb_test_mode_write(struct file *file,
const char __user *ubuf, size_t count, loff_t *ppos)
{
- struct musb *musb = file->private_data;
+ struct seq_file *s = file->private_data;
+ struct musb *musb = s->private;
u8 test = 0;
char buf[18];
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 6fca870e957..d065e23f123 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req)
#ifndef CONFIG_MUSB_PIO_ONLY
if (is_dma_capable() && musb_ep->dma) {
struct dma_controller *c = musb->dma_controller;
+ size_t request_size;
+
+ /* setup DMA, then program endpoint CSR */
+ request_size = min_t(size_t, request->length - request->actual,
+ musb_ep->dma->max_len);
use_dma = (request->dma != DMA_ADDR_INVALID);
@@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req)
#ifdef CONFIG_USB_INVENTRA_DMA
{
- size_t request_size;
-
- /* setup DMA, then program endpoint CSR */
- request_size = min_t(size_t, request->length,
- musb_ep->dma->max_len);
if (request_size < musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
else
@@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
0,
- request->dma,
- request->length);
+ request->dma + request->actual,
+ request_size);
if (!use_dma) {
c->channel_release(musb_ep->dma);
musb_ep->dma = NULL;
@@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
request->zero,
- request->dma,
- request->length);
+ request->dma + request->actual,
+ request_size);
#endif
}
#endif
@@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum)
request->zero = 0;
}
- /* ... or if not, then complete it. */
- musb_g_giveback(musb_ep, request, 0);
-
- /*
- * Kickstart next transfer if appropriate;
- * the packet that just completed might not
- * be transmitted for hours or days.
- * REVISIT for double buffering...
- * FIXME revisit for stalls too...
- */
- musb_ep_select(mbase, epnum);
- csr = musb_readw(epio, MUSB_TXCSR);
- if (csr & MUSB_TXCSR_FIFONOTEMPTY)
- return;
-
- request = musb_ep->desc ? next_request(musb_ep) : NULL;
- if (!request) {
- DBG(4, "%s idle now\n",
- musb_ep->end_point.name);
- return;
+ if (request->actual == request->length) {
+ musb_g_giveback(musb_ep, request, 0);
+ request = musb_ep->desc ? next_request(musb_ep) : NULL;
+ if (!request) {
+ DBG(4, "%s idle now\n",
+ musb_ep->end_point.name);
+ return;
+ }
}
}
@@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
{
const u8 epnum = req->epnum;
struct usb_request *request = &req->request;
- struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
unsigned fifo_count = 0;
- u16 len = musb_ep->packet_sz;
+ u16 len;
u16 csr = musb_readw(epio, MUSB_RXCSR);
+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep = &hw_ep->ep_in;
+ else
+ musb_ep = &hw_ep->ep_out;
+
+ len = musb_ep->packet_sz;
/* We shouldn't get here while DMA is active, but we do... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
*/
csr |= MUSB_RXCSR_DMAENAB;
-#ifdef USE_MODE1
csr |= MUSB_RXCSR_AUTOCLEAR;
+#ifdef USE_MODE1
/* csr |= MUSB_RXCSR_DMAMODE; */
/* this special sequence (enabling and then
@@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req)
if (request->actual < request->length) {
int transfer_size = 0;
#ifdef USE_MODE1
- transfer_size = min(request->length,
+ transfer_size = min(request->length - request->actual,
channel->max_len);
#else
- transfer_size = len;
+ transfer_size = min(request->length - request->actual,
+ (unsigned)len);
#endif
if (transfer_size <= musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
@@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
u16 csr;
struct usb_request *request;
void __iomem *mbase = musb->mregs;
- struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep = &hw_ep->ep_in;
+ else
+ musb_ep = &hw_ep->ep_out;
musb_ep_select(mbase, epnum);
@@ -1081,7 +1084,7 @@ struct free_record {
/*
* Context: controller locked, IRQs blocked.
*/
-static void musb_ep_restart(struct musb *musb, struct musb_request *req)
+void musb_ep_restart(struct musb *musb, struct musb_request *req)
{
DBG(3, "<== %s request %p len %u on hw_ep%d\n",
req->tx ? "TX/IN" : "RX/OUT",
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index c8b140325d8..572b1da7f2d 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *);
extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
+extern void musb_ep_restart(struct musb *, struct musb_request *);
+
#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 21b9788d024..6dd03f4c5f4 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -261,6 +261,7 @@ __acquires(musb->lock)
ctrlrequest->wIndex & 0x0f;
struct musb_ep *musb_ep;
struct musb_hw_ep *ep;
+ struct musb_request *request;
void __iomem *regs;
int is_in;
u16 csr;
@@ -302,6 +303,14 @@ __acquires(musb->lock)
musb_writew(regs, MUSB_RXCSR, csr);
}
+ /* Maybe start the first request in the queue */
+ request = to_musb_request(
+ next_request(musb_ep));
+ if (!musb_ep->busy && request) {
+ DBG(3, "restarting the request\n");
+ musb_ep_restart(musb, request);
+ }
+
/* select ep0 again */
musb_ep_select(mbase, 0);
} break;
@@ -402,6 +411,9 @@ __acquires(musb->lock)
musb->g.a_alt_hnp_support = 1;
break;
#endif
+ case USB_DEVICE_DEBUG_MODE:
+ handled = 0;
+ break;
stall:
default:
handled = -EINVAL;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 877d20b1dff..9e65c47cc98 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
qh->segsize = length;
+ /*
+ * Ensure the data reaches to main memory before starting
+ * DMA transfer
+ */
+ wmb();
+
if (!dma->channel_program(channel, pkt_size, mode,
urb->transfer_dma + offset, length)) {
dma->channel_release(channel);
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 92e85e027cf..43233c397b6 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -244,7 +244,7 @@ int musb_hub_control(
spin_lock_irqsave(&musb->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) {
spin_unlock_irqrestore(&musb->lock, flags);
return -ESHUTDOWN;
}
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index dc66e4376d4..6dc107f2524 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -173,10 +173,7 @@ static int dma_channel_program(struct dma_channel *channel,
musb_channel->max_packet_sz = packet_sz;
channel->status = MUSB_DMA_STATUS_BUSY;
- if ((mode == 1) && (len >= packet_sz))
- configure_channel(channel, packet_sz, 1, dma_addr, len);
- else
- configure_channel(channel, packet_sz, 0, dma_addr, len);
+ configure_channel(channel, packet_sz, mode, dma_addr, len);
return true;
}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index e06d65e36bf..2111a241dd0 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -32,8 +32,6 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <plat/mux.h>
-
#include "musb_core.h"
#include "omap2430.h"
@@ -194,10 +192,6 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
u32 l;
struct omap_musb_board_data *data = board_data;
-#if defined(CONFIG_ARCH_OMAP2430)
- omap_cfg_reg(AE5_2430_USB0HS_STP);
-#endif
-
/* We require some kind of external transceiver, hooked
* up through ULPI. TWL4030-family PMICs include one,
* which needs a driver, drivers aren't always needed.
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 3d2d3e549bd..3b1289572d7 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -49,8 +49,6 @@ config USB_ULPI
Enable this to support ULPI connected USB OTG transceivers which
are likely found on embedded boards.
- The only chip currently supported is NXP's ISP1504
-
config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 0e8888588d4..0bc97698af1 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -347,11 +347,20 @@ static void twl4030_i2c_access(struct twl4030_usb *twl, int on)
}
}
-static void twl4030_phy_power(struct twl4030_usb *twl, int on)
+static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
{
- u8 pwr;
+ u8 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
+
+ if (on)
+ pwr &= ~PHY_PWR_PHYPWD;
+ else
+ pwr |= PHY_PWR_PHYPWD;
- pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
+ WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+}
+
+static void twl4030_phy_power(struct twl4030_usb *twl, int on)
+{
if (on) {
regulator_enable(twl->usb3v1);
regulator_enable(twl->usb1v8);
@@ -365,15 +374,13 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
VUSB_DEDICATED2);
regulator_enable(twl->usb1v5);
- pwr &= ~PHY_PWR_PHYPWD;
- WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+ __twl4030_phy_power(twl, 1);
twl4030_usb_write(twl, PHY_CLK_CTRL,
twl4030_usb_read(twl, PHY_CLK_CTRL) |
(PHY_CLK_CTRL_CLOCKGATING_EN |
PHY_CLK_CTRL_CLK32K_EN));
- } else {
- pwr |= PHY_PWR_PHYPWD;
- WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+ } else {
+ __twl4030_phy_power(twl, 0);
regulator_disable(twl->usb1v5);
regulator_disable(twl->usb1v8);
regulator_disable(twl->usb3v1);
@@ -387,19 +394,25 @@ static void twl4030_phy_suspend(struct twl4030_usb *twl, int controller_off)
twl4030_phy_power(twl, 0);
twl->asleep = 1;
+ dev_dbg(twl->dev, "%s\n", __func__);
}
-static void twl4030_phy_resume(struct twl4030_usb *twl)
+static void __twl4030_phy_resume(struct twl4030_usb *twl)
{
- if (!twl->asleep)
- return;
-
twl4030_phy_power(twl, 1);
twl4030_i2c_access(twl, 1);
twl4030_usb_set_mode(twl, twl->usb_mode);
if (twl->usb_mode == T2_USB_MODE_ULPI)
twl4030_i2c_access(twl, 0);
+}
+
+static void twl4030_phy_resume(struct twl4030_usb *twl)
+{
+ if (!twl->asleep)
+ return;
+ __twl4030_phy_resume(twl);
twl->asleep = 0;
+ dev_dbg(twl->dev, "%s\n", __func__);
}
static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
@@ -408,8 +421,8 @@ static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
- /* put VUSB3V1 LDO in active state */
- twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);
+ /* Keep VUSB3V1 LDO in sleep state until VBUS/ID change detected*/
+ /*twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);*/
/* input to VUSB3V1 LDO is from VBAT, not VBUS */
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
@@ -502,6 +515,26 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
return IRQ_HANDLED;
}
+static void twl4030_usb_phy_init(struct twl4030_usb *twl)
+{
+ int status;
+
+ status = twl4030_usb_linkstat(twl);
+ if (status >= 0) {
+ if (status == USB_EVENT_NONE) {
+ __twl4030_phy_power(twl, 0);
+ twl->asleep = 1;
+ } else {
+ __twl4030_phy_resume(twl);
+ twl->asleep = 0;
+ }
+
+ blocking_notifier_call_chain(&twl->otg.notifier, status,
+ twl->otg.gadget);
+ }
+ sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+}
+
static int twl4030_set_suspend(struct otg_transceiver *x, int suspend)
{
struct twl4030_usb *twl = xceiv_to_twl(x);
@@ -568,7 +601,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
twl->otg.set_peripheral = twl4030_set_peripheral;
twl->otg.set_suspend = twl4030_set_suspend;
twl->usb_mode = pdata->usb_mode;
- twl->asleep = 1;
+ twl->asleep = 1;
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
@@ -606,15 +639,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
return status;
}
- /* The IRQ handler just handles changes from the previous states
- * of the ID and VBUS pins ... in probe() we must initialize that
- * previous state. The easy way: fake an IRQ.
- *
- * REVISIT: a real IRQ might have happened already, if PREEMPT is
- * enabled. Else the IRQ may not yet be configured or enabled,
- * because of scheduling delays.
+ /* Power down phy or make it work according to
+ * current link state.
*/
- twl4030_usb_irq(twl->irq, twl);
+ twl4030_usb_phy_init(twl);
dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
return 0;
diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c
index d331b222ad2..ccc81950822 100644
--- a/drivers/usb/otg/ulpi.c
+++ b/drivers/usb/otg/ulpi.c
@@ -31,30 +31,110 @@
#define ULPI_ID(vendor, product) (((vendor) << 16) | (product))
-#define TR_FLAG(flags, a, b) (((flags) & a) ? b : 0)
-
/* ULPI hardcoded IDs, used for probing */
static unsigned int ulpi_ids[] = {
ULPI_ID(0x04cc, 0x1504), /* NXP ISP1504 */
+ ULPI_ID(0x0424, 0x0006), /* SMSC USB3319 */
};
-static int ulpi_set_flags(struct otg_transceiver *otg)
+static int ulpi_set_otg_flags(struct otg_transceiver *otg)
{
- unsigned int flags = 0;
+ unsigned int flags = ULPI_OTG_CTRL_DP_PULLDOWN |
+ ULPI_OTG_CTRL_DM_PULLDOWN;
- if (otg->flags & USB_OTG_PULLUP_ID)
+ if (otg->flags & ULPI_OTG_ID_PULLUP)
flags |= ULPI_OTG_CTRL_ID_PULLUP;
- if (otg->flags & USB_OTG_PULLDOWN_DM)
- flags |= ULPI_OTG_CTRL_DM_PULLDOWN;
+ /*
+ * ULPI Specification rev.1.1 default
+ * for Dp/DmPulldown is enabled.
+ */
+ if (otg->flags & ULPI_OTG_DP_PULLDOWN_DIS)
+ flags &= ~ULPI_OTG_CTRL_DP_PULLDOWN;
- if (otg->flags & USB_OTG_PULLDOWN_DP)
- flags |= ULPI_OTG_CTRL_DP_PULLDOWN;
+ if (otg->flags & ULPI_OTG_DM_PULLDOWN_DIS)
+ flags &= ~ULPI_OTG_CTRL_DM_PULLDOWN;
- if (otg->flags & USB_OTG_EXT_VBUS_INDICATOR)
+ if (otg->flags & ULPI_OTG_EXTVBUSIND)
flags |= ULPI_OTG_CTRL_EXTVBUSIND;
- return otg_io_write(otg, flags, ULPI_SET(ULPI_OTG_CTRL));
+ return otg_io_write(otg, flags, ULPI_OTG_CTRL);
+}
+
+static int ulpi_set_fc_flags(struct otg_transceiver *otg)
+{
+ unsigned int flags = 0;
+
+ /*
+ * ULPI Specification rev.1.1 default
+ * for XcvrSelect is Full Speed.
+ */
+ if (otg->flags & ULPI_FC_HS)
+ flags |= ULPI_FUNC_CTRL_HIGH_SPEED;
+ else if (otg->flags & ULPI_FC_LS)
+ flags |= ULPI_FUNC_CTRL_LOW_SPEED;
+ else if (otg->flags & ULPI_FC_FS4LS)
+ flags |= ULPI_FUNC_CTRL_FS4LS;
+ else
+ flags |= ULPI_FUNC_CTRL_FULL_SPEED;
+
+ if (otg->flags & ULPI_FC_TERMSEL)
+ flags |= ULPI_FUNC_CTRL_TERMSELECT;
+
+ /*
+ * ULPI Specification rev.1.1 default
+ * for OpMode is Normal Operation.
+ */
+ if (otg->flags & ULPI_FC_OP_NODRV)
+ flags |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+ else if (otg->flags & ULPI_FC_OP_DIS_NRZI)
+ flags |= ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI;
+ else if (otg->flags & ULPI_FC_OP_NSYNC_NEOP)
+ flags |= ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP;
+ else
+ flags |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
+
+ /*
+ * ULPI Specification rev.1.1 default
+ * for SuspendM is Powered.
+ */
+ flags |= ULPI_FUNC_CTRL_SUSPENDM;
+
+ return otg_io_write(otg, flags, ULPI_FUNC_CTRL);
+}
+
+static int ulpi_set_ic_flags(struct otg_transceiver *otg)
+{
+ unsigned int flags = 0;
+
+ if (otg->flags & ULPI_IC_AUTORESUME)
+ flags |= ULPI_IFC_CTRL_AUTORESUME;
+
+ if (otg->flags & ULPI_IC_EXTVBUS_INDINV)
+ flags |= ULPI_IFC_CTRL_EXTERNAL_VBUS;
+
+ if (otg->flags & ULPI_IC_IND_PASSTHRU)
+ flags |= ULPI_IFC_CTRL_PASSTHRU;
+
+ if (otg->flags & ULPI_IC_PROTECT_DIS)
+ flags |= ULPI_IFC_CTRL_PROTECT_IFC_DISABLE;
+
+ return otg_io_write(otg, flags, ULPI_IFC_CTRL);
+}
+
+static int ulpi_set_flags(struct otg_transceiver *otg)
+{
+ int ret;
+
+ ret = ulpi_set_otg_flags(otg);
+ if (ret)
+ return ret;
+
+ ret = ulpi_set_ic_flags(otg);
+ if (ret)
+ return ret;
+
+ return ulpi_set_fc_flags(otg);
}
static int ulpi_init(struct otg_transceiver *otg)
@@ -81,6 +161,31 @@ static int ulpi_init(struct otg_transceiver *otg)
return -ENODEV;
}
+static int ulpi_set_host(struct otg_transceiver *otg, struct usb_bus *host)
+{
+ unsigned int flags = otg_io_read(otg, ULPI_IFC_CTRL);
+
+ if (!host) {
+ otg->host = NULL;
+ return 0;
+ }
+
+ otg->host = host;
+
+ flags &= ~(ULPI_IFC_CTRL_6_PIN_SERIAL_MODE |
+ ULPI_IFC_CTRL_3_PIN_SERIAL_MODE |
+ ULPI_IFC_CTRL_CARKITMODE);
+
+ if (otg->flags & ULPI_IC_6PIN_SERIAL)
+ flags |= ULPI_IFC_CTRL_6_PIN_SERIAL_MODE;
+ else if (otg->flags & ULPI_IC_3PIN_SERIAL)
+ flags |= ULPI_IFC_CTRL_3_PIN_SERIAL_MODE;
+ else if (otg->flags & ULPI_IC_CARKIT)
+ flags |= ULPI_IFC_CTRL_CARKITMODE;
+
+ return otg_io_write(otg, flags, ULPI_IFC_CTRL);
+}
+
static int ulpi_set_vbus(struct otg_transceiver *otg, bool on)
{
unsigned int flags = otg_io_read(otg, ULPI_OTG_CTRL);
@@ -88,14 +193,14 @@ static int ulpi_set_vbus(struct otg_transceiver *otg, bool on)
flags &= ~(ULPI_OTG_CTRL_DRVVBUS | ULPI_OTG_CTRL_DRVVBUS_EXT);
if (on) {
- if (otg->flags & USB_OTG_DRV_VBUS)
+ if (otg->flags & ULPI_OTG_DRVVBUS)
flags |= ULPI_OTG_CTRL_DRVVBUS;
- if (otg->flags & USB_OTG_DRV_VBUS_EXT)
+ if (otg->flags & ULPI_OTG_DRVVBUS_EXT)
flags |= ULPI_OTG_CTRL_DRVVBUS_EXT;
}
- return otg_io_write(otg, flags, ULPI_SET(ULPI_OTG_CTRL));
+ return otg_io_write(otg, flags, ULPI_OTG_CTRL);
}
struct otg_transceiver *
@@ -112,6 +217,7 @@ otg_ulpi_create(struct otg_io_access_ops *ops,
otg->flags = flags;
otg->io_ops = ops;
otg->init = ulpi_init;
+ otg->set_host = ulpi_set_host;
otg->set_vbus = ulpi_set_vbus;
return otg;
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index bd8aab0ef1c..916b2b6d765 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -642,6 +642,15 @@ config USB_SERIAL_ZIO
To compile this driver as a module, choose M here: the
module will be called zio.
+config USB_SERIAL_SSU100
+ tristate "USB Quatech SSU-100 Single Port Serial Driver"
+ help
+ Say Y here if you want to use the Quatech SSU-100 single
+ port usb to serial adapter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ssu100.
+
config USB_SERIAL_DEBUG
tristate "USB Debugging Device"
help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index e54c728c016..40ebe17b6ea 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_USB_SERIAL_SAFE) += safe_serial.o
obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o
obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o
obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o
+obj-$(CONFIG_USB_SERIAL_SSU100) += ssu100.o
obj-$(CONFIG_USB_SERIAL_SYMBOL) += symbolserial.o
obj-$(CONFIG_USB_SERIAL_WWAN) += usb_wwan.o
obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 8b8c7976b4c..4f1744c5871 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -56,6 +56,7 @@ static int debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
@@ -88,6 +89,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
+ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
@@ -109,6 +111,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
@@ -122,6 +125,10 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
+ { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
+ { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
+ { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
+ { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
@@ -218,8 +225,8 @@ static struct usb_serial_driver cp210x_device = {
#define BITS_STOP_2 0x0002
/* CP210X_SET_BREAK */
-#define BREAK_ON 0x0000
-#define BREAK_OFF 0x0001
+#define BREAK_ON 0x0001
+#define BREAK_OFF 0x0000
/* CP210X_(SET_MHS|GET_MDMSTS) */
#define CONTROL_DTR 0x0001
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index fd35f73b572..b92070c103c 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -609,8 +609,10 @@ static void digi_wakeup_write_lock(struct work_struct *work)
static void digi_wakeup_write(struct usb_serial_port *port)
{
struct tty_struct *tty = tty_port_tty_get(&port->port);
- tty_wakeup(tty);
- tty_kref_put(tty);
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
}
@@ -1682,7 +1684,7 @@ static int digi_read_inb_callback(struct urb *urb)
priv->dp_throttle_restart = 1;
/* receive data */
- if (opcode == DIGI_CMD_RECEIVE_DATA) {
+ if (tty && opcode == DIGI_CMD_RECEIVE_DATA) {
/* get flag from port_status */
flag = 0;
@@ -1763,10 +1765,12 @@ static int digi_read_oob_callback(struct urb *urb)
return -1;
tty = tty_port_tty_get(&port->port);
+
rts = 0;
- rts = tty->termios->c_cflag & CRTSCTS;
+ if (tty)
+ rts = tty->termios->c_cflag & CRTSCTS;
- if (opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
+ if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
spin_lock(&priv->dp_port_lock);
/* convert from digi flags to termiox flags */
if (val & DIGI_READ_INPUT_SIGNALS_CTS) {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index e298dc4baed..97cc87d654c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -157,6 +157,9 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_5_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_6_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_7_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_USINT_CAT_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_USINT_WKEY_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_USINT_RS232_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) },
@@ -177,6 +180,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+ { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
@@ -746,6 +750,17 @@ static struct usb_device_id id_table_combined [] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
+ { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -1372,7 +1387,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
}
/* set max packet size based on descriptor */
- priv->max_packet_size = ep_desc->wMaxPacketSize;
+ priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize);
dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
}
@@ -1827,7 +1842,7 @@ static int ftdi_process_packet(struct tty_struct *tty,
if (port->port.console && port->sysrq) {
for (i = 0; i < len; i++, ch++) {
- if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+ if (!usb_serial_handle_sysrq_char(port, *ch))
tty_insert_flip_char(tty, *ch, flag);
}
} else {
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index d01946db8fa..15a4583775a 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -40,6 +40,11 @@
#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
+/* US Interface Navigator (http://www.usinterface.com/) */
+#define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */
+#define FTDI_USINT_WKEY_PID 0xb811 /* Navigator WKEY and FSK lines */
+#define FTDI_USINT_RS232_PID 0xb812 /* Navigator RS232 and CONFIG lines */
+
/* OOCDlink by Joern Kaipf <joernk@web.de>
* (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
@@ -105,6 +110,9 @@
/* Propox devices */
#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
+/* Lenz LI-USB Computer Interface. */
+#define FTDI_LENZ_LIUSB_PID 0xD780
+
/*
* Xsens Technologies BV products (http://www.xsens.com).
*/
@@ -127,6 +135,18 @@
#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
/*
+ * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
+ */
+#define FTDI_CHAMSYS_24_MASTER_WING_PID 0xDAF8
+#define FTDI_CHAMSYS_PC_WING_PID 0xDAF9
+#define FTDI_CHAMSYS_USB_DMX_PID 0xDAFA
+#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB
+#define FTDI_CHAMSYS_MINI_WING_PID 0xDAFC
+#define FTDI_CHAMSYS_MAXI_WING_PID 0xDAFD
+#define FTDI_CHAMSYS_MEDIA_WING_PID 0xDAFE
+#define FTDI_CHAMSYS_WING_PID 0xDAFF
+
+/*
* Westrex International devices submitted by Cory Lee
*/
#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
@@ -984,6 +1004,12 @@
#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
/*
+ * Ionics PlugComputer
+ */
+#define IONICS_VID 0x1c0c
+#define IONICS_PLUGCOMPUTER_PID 0x0102
+
+/*
* Dresden Elektronik Sensor Terminal Board
*/
#define DE_VID 0x1cf1 /* Vendor ID */
@@ -1032,3 +1058,8 @@
#define XVERVE_SIGNALYZER_SH2_PID 0xBCA2
#define XVERVE_SIGNALYZER_SH4_PID 0xBCA4
+/*
+ * Segway Robotic Mobility Platform USB interface (using VID 0x0403)
+ * Submitted by John G. Rogers
+ */
+#define SEGWAY_RMP200_PID 0xe729
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index a817ced8283..e6833e216fc 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -208,18 +208,23 @@ retry:
urb->transfer_buffer_length = count;
usb_serial_debug_data(debug, &port->dev, __func__, count,
urb->transfer_buffer);
+ spin_lock_irqsave(&port->lock, flags);
+ port->tx_bytes += count;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ clear_bit(i, &port->write_urbs_free);
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev, "%s - error submitting urb: %d\n",
__func__, result);
+ set_bit(i, &port->write_urbs_free);
+ spin_lock_irqsave(&port->lock, flags);
+ port->tx_bytes -= count;
+ spin_unlock_irqrestore(&port->lock, flags);
+
clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
return result;
}
- clear_bit(i, &port->write_urbs_free);
-
- spin_lock_irqsave(&port->lock, flags);
- port->tx_bytes += count;
- spin_unlock_irqrestore(&port->lock, flags);
/* Try sending off another urb, unless in irq context (in which case
* there will be no free urb). */
@@ -338,7 +343,7 @@ void usb_serial_generic_process_read_urb(struct urb *urb)
tty_insert_flip_string(tty, ch, urb->actual_length);
else {
for (i = 0; i < urb->actual_length; i++, ch++) {
- if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+ if (!usb_serial_handle_sysrq_char(port, *ch))
tty_insert_flip_char(tty, *ch, TTY_NORMAL);
}
}
@@ -443,12 +448,11 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
#ifdef CONFIG_MAGIC_SYSRQ
-int usb_serial_handle_sysrq_char(struct tty_struct *tty,
- struct usb_serial_port *port, unsigned int ch)
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
{
if (port->sysrq && port->port.console) {
if (ch && time_before(jiffies, port->sysrq)) {
- handle_sysrq(ch, tty);
+ handle_sysrq(ch);
port->sysrq = 0;
return 1;
}
@@ -457,8 +461,7 @@ int usb_serial_handle_sysrq_char(struct tty_struct *tty,
return 0;
}
#else
-int usb_serial_handle_sysrq_char(struct tty_struct *tty,
- struct usb_serial_port *port, unsigned int ch)
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
{
return 0;
}
@@ -513,6 +516,7 @@ void usb_serial_generic_disconnect(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i)
generic_cleanup(serial->port[i]);
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_disconnect);
void usb_serial_generic_release(struct usb_serial *serial)
{
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 0fca2659206..a7cfc595293 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1151,7 +1151,7 @@ static int download_fw(struct edgeport_serial *serial)
/* Check if we have an old version in the I2C and
update if necessary */
- if (download_cur_ver != download_new_ver) {
+ if (download_cur_ver < download_new_ver) {
dbg("%s - Update I2C dld from %d.%d to %d.%d",
__func__,
firmware_version->Ver_Major,
@@ -1284,7 +1284,7 @@ static int download_fw(struct edgeport_serial *serial)
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
- return status;
+ return -EINVAL;
}
/* Update I2C with type 0xf2 record with correct
@@ -1298,7 +1298,7 @@ static int download_fw(struct edgeport_serial *serial)
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
- return status;
+ return -EINVAL;
}
/* verify the write -- must do this in order for
@@ -1321,7 +1321,7 @@ static int download_fw(struct edgeport_serial *serial)
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
- return status;
+ return -EINVAL;
}
kfree(vheader);
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 28913fa95fb..4735931b4c7 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -534,7 +534,6 @@ static struct usb_device_id ipaq_id_table [] = {
{ USB_DEVICE(0x413C, 0x4009) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x4505, 0x0010) }, /* Smartphone */
{ USB_DEVICE(0x5E04, 0xCE00) }, /* SAGEM Wireless Assistant */
- { USB_DEVICE(0x0BB4, 0x00CF) }, /* HTC smartphone modems */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 74551cb2e8e..efc72113216 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1,6 +1,8 @@
/*
* Infinity Unlimited USB Phoenix driver
*
+ * Copyright (C) 2010 James Courtier-Dutton (James@superbug.co.uk)
+
* Copyright (C) 2007 Alain Degreffe (eczema@ecze.com)
*
* Original code taken from iuutool (Copyright (C) 2006 Juan Carlos Borrás)
@@ -40,7 +42,7 @@ static int debug;
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.11"
+#define DRIVER_VERSION "v0.12"
#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver"
static const struct usb_device_id id_table[] = {
@@ -81,6 +83,9 @@ struct iuu_private {
u8 *dbgbuf; /* debug buffer */
u8 len;
int vcc; /* vcc (either 3 or 5 V) */
+ u32 baud;
+ u32 boost;
+ u32 clk;
};
@@ -157,13 +162,14 @@ static int iuu_tiocmset(struct tty_struct *tty, struct file *file,
port->number, set, clear);
spin_lock_irqsave(&priv->lock, flags);
- if (set & TIOCM_RTS)
- priv->tiostatus = TIOCM_RTS;
- if (!(set & TIOCM_RTS) && priv->tiostatus == TIOCM_RTS) {
+ if ((set & TIOCM_RTS) && !(priv->tiostatus == TIOCM_RTS)) {
dbg("%s TIOCMSET RESET called !!!", __func__);
priv->reset = 1;
}
+ if (set & TIOCM_RTS)
+ priv->tiostatus = TIOCM_RTS;
+
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
@@ -851,20 +857,24 @@ static int iuu_uart_off(struct usb_serial_port *port)
return status;
}
-static int iuu_uart_baud(struct usb_serial_port *port, u32 baud,
+static int iuu_uart_baud(struct usb_serial_port *port, u32 baud_base,
u32 *actual, u8 parity)
{
int status;
+ u32 baud;
u8 *dataout;
u8 DataCount = 0;
u8 T1Frekvens = 0;
u8 T1reload = 0;
unsigned int T1FrekvensHZ = 0;
+ dbg("%s - enter baud_base=%d", __func__, baud_base);
dataout = kmalloc(sizeof(u8) * 5, GFP_KERNEL);
if (!dataout)
return -ENOMEM;
+ /*baud = (((priv->clk / 35) * baud_base) / 100000); */
+ baud = baud_base;
if (baud < 1200 || baud > 230400) {
kfree(dataout);
@@ -948,15 +958,20 @@ static void iuu_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
const u32 supported_mask = CMSPAR|PARENB|PARODD;
-
+ struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned int cflag = tty->termios->c_cflag;
int status;
u32 actual;
u32 parity;
int csize = CS7;
- int baud = 9600; /* Fixed for the moment */
+ int baud;
u32 newval = cflag & supported_mask;
+ /* Just use the ospeed. ispeed should be the same. */
+ baud = tty->termios->c_ospeed;
+
+ dbg("%s - enter c_ospeed or baud=%d", __func__, baud);
+
/* compute the parity parameter */
parity = 0;
if (cflag & CMSPAR) { /* Using mark space */
@@ -976,15 +991,15 @@ static void iuu_set_termios(struct tty_struct *tty,
/* set it */
status = iuu_uart_baud(port,
- (clockmode == 2) ? 16457 : 9600 * boost / 100,
+ baud * priv->boost / 100,
&actual, parity);
/* set the termios value to the real one, so the user now what has
* changed. We support few fields so its easies to copy the old hw
* settings back over and then adjust them
*/
- if (old_termios)
- tty_termios_copy_hw(tty->termios, old_termios);
+ if (old_termios)
+ tty_termios_copy_hw(tty->termios, old_termios);
if (status != 0) /* Set failed - return old bits */
return;
/* Re-encode speed, parity and csize */
@@ -1018,6 +1033,7 @@ static void iuu_close(struct usb_serial_port *port)
static void iuu_init_termios(struct tty_struct *tty)
{
+ dbg("%s - enter", __func__);
*(tty->termios) = tty_std_termios;
tty->termios->c_cflag = CLOCAL | CREAD | CS8 | B9600
| TIOCM_CTS | CSTOPB | PARENB;
@@ -1033,10 +1049,16 @@ static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port)
struct usb_serial *serial = port->serial;
u8 *buf;
int result;
+ int baud;
u32 actual;
struct iuu_private *priv = usb_get_serial_port_data(port);
- dbg("%s - port %d", __func__, port->number);
+ baud = tty->termios->c_ospeed;
+ tty->termios->c_ispeed = baud;
+ /* Re-encode speed */
+ tty_encode_baud_rate(tty, baud, baud);
+
+ dbg("%s - port %d, baud %d", __func__, port->number, baud);
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
@@ -1071,23 +1093,29 @@ static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port)
iuu_uart_on(port);
if (boost < 100)
boost = 100;
+ priv->boost = boost;
+ priv->baud = baud;
switch (clockmode) {
case 2: /* 3.680 Mhz */
+ priv->clk = IUU_CLK_3680000;
iuu_clk(port, IUU_CLK_3680000 * boost / 100);
result =
- iuu_uart_baud(port, 9600 * boost / 100, &actual,
+ iuu_uart_baud(port, baud * boost / 100, &actual,
IUU_PARITY_EVEN);
break;
case 3: /* 6.00 Mhz */
iuu_clk(port, IUU_CLK_6000000 * boost / 100);
+ priv->clk = IUU_CLK_6000000;
+ /* Ratio of 6000000 to 3500000 for baud 9600 */
result =
iuu_uart_baud(port, 16457 * boost / 100, &actual,
IUU_PARITY_EVEN);
break;
default: /* 3.579 Mhz */
iuu_clk(port, IUU_CLK_3579000 * boost / 100);
+ priv->clk = IUU_CLK_3579000;
result =
- iuu_uart_baud(port, 9600 * boost / 100, &actual,
+ iuu_uart_baud(port, baud * boost / 100, &actual,
IUU_PARITY_EVEN);
}
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 30922a7e334..aa665817a27 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -2024,6 +2024,9 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file,
case TIOCGICOUNT:
cnow = mos7720_port->icount;
+
+ memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
icount.cts = cnow.cts;
icount.dsr = cnow.dsr;
icount.rng = cnow.rng;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 585b7e66374..1a42bc21379 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -119,16 +119,20 @@
* by making a change here, in moschip_port_id_table, and in
* moschip_id_table_combined
*/
-#define USB_VENDOR_ID_BANDB 0x0856
-#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
-#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
-#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
-#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
-#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
-#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
-#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
-#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
-#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
+#define USB_VENDOR_ID_BANDB 0x0856
+#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
+#define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00
+#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
+#define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01
+#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
+#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
+#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
+#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
+#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
+#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
+#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
+#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
+#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
/* This driver also supports
* ATEN UC2324 device using Moschip MCS7840
@@ -184,13 +188,17 @@ static const struct usb_device_id moschip_port_id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
@@ -201,13 +209,17 @@ static const struct usb_device_id moschip_id_table_combined[] __devinitconst = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
@@ -2273,6 +2285,9 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file,
case TIOCGICOUNT:
cnow = mos7840_port->icount;
smp_rmb();
+
+ memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
icount.cts = cnow.cts;
icount.dsr = cnow.dsr;
icount.rng = cnow.rng;
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index a6b207c8491..1f00f243c26 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -25,6 +25,7 @@ static int debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
+ { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5cd30e4345c..c46911af282 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -145,7 +145,10 @@ static void option_instat_callback(struct urb *urb);
#define HUAWEI_PRODUCT_E143D 0x143D
#define HUAWEI_PRODUCT_E143E 0x143E
#define HUAWEI_PRODUCT_E143F 0x143F
+#define HUAWEI_PRODUCT_K4505 0x1464
+#define HUAWEI_PRODUCT_K3765 0x1465
#define HUAWEI_PRODUCT_E14AC 0x14AC
+#define HUAWEI_PRODUCT_ETS1220 0x1803
#define QUANTA_VENDOR_ID 0x0408
#define QUANTA_PRODUCT_Q101 0xEA02
@@ -161,6 +164,14 @@ static void option_instat_callback(struct urb *urb);
#define YISO_VENDOR_ID 0x0EAB
#define YISO_PRODUCT_U893 0xC893
+/*
+ * NOVATEL WIRELESS PRODUCTS
+ *
+ * Note from Novatel Wireless:
+ * If your Novatel modem does not work on linux, don't
+ * change the option module, but check our website. If
+ * that does not help, contact ddeschepper@nvtl.com
+*/
/* MERLIN EVDO PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_V640 0x1100
#define NOVATELWIRELESS_PRODUCT_V620 0x1110
@@ -182,24 +193,39 @@ static void option_instat_callback(struct urb *urb);
#define NOVATELWIRELESS_PRODUCT_EU730 0x2400
#define NOVATELWIRELESS_PRODUCT_EU740 0x2410
#define NOVATELWIRELESS_PRODUCT_EU870D 0x2420
-
/* OVATION PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
-#define NOVATELWIRELESS_PRODUCT_U727 0x5010
-#define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
-#define NOVATELWIRELESS_PRODUCT_MC760 0x6000
+/*
+ * Note from Novatel Wireless:
+ * All PID in the 5xxx range are currently reserved for
+ * auto-install CDROMs, and should not be added to this
+ * module.
+ *
+ * #define NOVATELWIRELESS_PRODUCT_U727 0x5010
+ * #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
+*/
#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002
-
-/* FUTURE NOVATEL PRODUCTS */
-#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001
-#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000
-#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001
-#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000
-#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001
-#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000
-#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001
-#define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001
+#define NOVATELWIRELESS_PRODUCT_MC780 0x6010
+#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000
+#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001
+#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3 0x7003
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4 0x7004
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5 0x7005
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6 0x7006
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7 0x7007
+#define NOVATELWIRELESS_PRODUCT_MC996D 0x7030
+#define NOVATELWIRELESS_PRODUCT_MF3470 0x7041
+#define NOVATELWIRELESS_PRODUCT_MC547 0x7042
+#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0x8000
+#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001
+#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
+#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
+#define NOVATELWIRELESS_PRODUCT_G1 0xA001
+#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
+#define NOVATELWIRELESS_PRODUCT_G2 0xA010
/* AMOI PRODUCTS */
#define AMOI_VENDOR_ID 0x1614
@@ -264,9 +290,6 @@ static void option_instat_callback(struct urb *urb);
#define BANDRICH_PRODUCT_1011 0x1011
#define BANDRICH_PRODUCT_1012 0x1012
-#define AMOI_VENDOR_ID 0x1614
-#define AMOI_PRODUCT_9508 0x0800
-
#define QUALCOMM_VENDOR_ID 0x05C6
#define CMOTECH_VENDOR_ID 0x16d8
@@ -365,6 +388,10 @@ static void option_instat_callback(struct urb *urb);
#define OLIVETTI_VENDOR_ID 0x0b3c
#define OLIVETTI_PRODUCT_OLICARD100 0xc000
+/* Celot products */
+#define CELOT_VENDOR_ID 0x211f
+#define CELOT_PRODUCT_CT680M 0x6801
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -482,38 +509,48 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
{ USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
- { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) },
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -885,10 +922,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
-
{ USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
-
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1017,6 +1053,13 @@ static int option_probe(struct usb_serial *serial,
serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
return -ENODEV;
+ /* Don't bind network interfaces on Huawei K3765 & K4505 */
+ if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID &&
+ (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 ||
+ serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) &&
+ serial->interface->cur_altsetting->desc.bInterfaceNumber == 1)
+ return -ENODEV;
+
data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
if (!data)
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 6b600182227..8ae4c6cbc38 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
+ { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
@@ -788,7 +789,7 @@ static void pl2303_process_read_urb(struct urb *urb)
if (port->port.console && port->sysrq) {
for (i = 0; i < urb->actual_length; ++i)
- if (!usb_serial_handle_sysrq_char(tty, port, data[i]))
+ if (!usb_serial_handle_sysrq_char(port, data[i]))
tty_insert_flip_char(tty, data[i], tty_flag);
} else {
tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index a871645389d..43eb9bdad42 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -128,6 +128,10 @@
#define CRESSI_VENDOR_ID 0x04b8
#define CRESSI_EDY_PRODUCT_ID 0x0521
+/* Zeagle dive computer interface */
+#define ZEAGLE_VENDOR_ID 0x04b8
+#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522
+
/* Sony, USB data cable for CMD-Jxx mobile phones */
#define SONY_VENDOR_ID 0x054c
#define SONY_QN3USB_PRODUCT_ID 0x0437
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
new file mode 100644
index 00000000000..e986002b384
--- /dev/null
+++ b/drivers/usb/serial/ssu100.c
@@ -0,0 +1,774 @@
+/*
+ * usb-serial driver for Quatech SSU-100
+ *
+ * based on ftdi_sio.c and the original serqt_usb.c from Quatech
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/module.h>
+#include <linux/serial.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include <linux/serial_reg.h>
+#include <linux/uaccess.h>
+
+#define QT_OPEN_CLOSE_CHANNEL 0xca
+#define QT_SET_GET_DEVICE 0xc2
+#define QT_SET_GET_REGISTER 0xc0
+#define QT_GET_SET_PREBUF_TRIG_LVL 0xcc
+#define QT_SET_ATF 0xcd
+#define QT_GET_SET_UART 0xc1
+#define QT_TRANSFER_IN 0xc0
+#define QT_HW_FLOW_CONTROL_MASK 0xc5
+#define QT_SW_FLOW_CONTROL_MASK 0xc6
+
+#define SERIAL_MSR_MASK 0xf0
+
+#define SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS)
+
+#define SERIAL_EVEN_PARITY (UART_LCR_PARITY | UART_LCR_EPAR)
+
+#define MAX_BAUD_RATE 460800
+
+#define ATC_DISABLED 0x00
+#define DUPMODE_BITS 0xc0
+#define RR_BITS 0x03
+#define LOOPMODE_BITS 0x41
+#define RS232_MODE 0x00
+#define RTSCTS_TO_CONNECTOR 0x40
+#define CLKS_X4 0x02
+#define FULLPWRBIT 0x00000080
+#define NEXT_BOARD_POWER_BIT 0x00000004
+
+static int debug;
+
+/* Version Information */
+#define DRIVER_VERSION "v0.1"
+#define DRIVER_DESC "Quatech SSU-100 USB to Serial Driver"
+
+#define USB_VENDOR_ID_QUATECH 0x061d /* Quatech VID */
+#define QUATECH_SSU100 0xC020 /* SSU100 */
+
+static const struct usb_device_id id_table[] = {
+ {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU100)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+
+static struct usb_driver ssu100_driver = {
+ .name = "ssu100",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .id_table = id_table,
+ .suspend = usb_serial_suspend,
+ .resume = usb_serial_resume,
+ .no_dynamic_id = 1,
+ .supports_autosuspend = 1,
+};
+
+struct ssu100_port_private {
+ spinlock_t status_lock;
+ u8 shadowLSR;
+ u8 shadowMSR;
+ wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
+ unsigned short max_packet_size;
+ struct async_icount icount;
+};
+
+static void ssu100_release(struct usb_serial *serial)
+{
+ struct ssu100_port_private *priv = usb_get_serial_port_data(*serial->port);
+
+ dbg("%s", __func__);
+ kfree(priv);
+}
+
+static inline int ssu100_control_msg(struct usb_device *dev,
+ u8 request, u16 data, u16 index)
+{
+ return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ request, 0x40, data, index,
+ NULL, 0, 300);
+}
+
+static inline int ssu100_setdevice(struct usb_device *dev, u8 *data)
+{
+ u16 x = ((u16)(data[1] << 8) | (u16)(data[0]));
+
+ return ssu100_control_msg(dev, QT_SET_GET_DEVICE, x, 0);
+}
+
+
+static inline int ssu100_getdevice(struct usb_device *dev, u8 *data)
+{
+ return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ QT_SET_GET_DEVICE, 0xc0, 0, 0,
+ data, 3, 300);
+}
+
+static inline int ssu100_getregister(struct usb_device *dev,
+ unsigned short uart,
+ unsigned short reg,
+ u8 *data)
+{
+ return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ QT_SET_GET_REGISTER, 0xc0, reg,
+ uart, data, sizeof(*data), 300);
+
+}
+
+
+static inline int ssu100_setregister(struct usb_device *dev,
+ unsigned short uart,
+ unsigned short reg,
+ u16 data)
+{
+ u16 value = (data << 8) | reg;
+
+ return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ QT_SET_GET_REGISTER, 0x40, value, uart,
+ NULL, 0, 300);
+
+}
+
+#define set_mctrl(dev, set) update_mctrl((dev), (set), 0)
+#define clear_mctrl(dev, clear) update_mctrl((dev), 0, (clear))
+
+/* these do not deal with device that have more than 1 port */
+static inline int update_mctrl(struct usb_device *dev, unsigned int set,
+ unsigned int clear)
+{
+ unsigned urb_value;
+ int result;
+
+ if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0) {
+ dbg("%s - DTR|RTS not being set|cleared", __func__);
+ return 0; /* no change */
+ }
+
+ clear &= ~set; /* 'set' takes precedence over 'clear' */
+ urb_value = 0;
+ if (set & TIOCM_DTR)
+ urb_value |= UART_MCR_DTR;
+ if (set & TIOCM_RTS)
+ urb_value |= UART_MCR_RTS;
+
+ result = ssu100_setregister(dev, 0, UART_MCR, urb_value);
+ if (result < 0)
+ dbg("%s Error from MODEM_CTRL urb", __func__);
+
+ return result;
+}
+
+static int ssu100_initdevice(struct usb_device *dev)
+{
+ u8 *data;
+ int result = 0;
+
+ dbg("%s", __func__);
+
+ data = kzalloc(3, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ result = ssu100_getdevice(dev, data);
+ if (result < 0) {
+ dbg("%s - get_device failed %i", __func__, result);
+ goto out;
+ }
+
+ data[1] &= ~FULLPWRBIT;
+
+ result = ssu100_setdevice(dev, data);
+ if (result < 0) {
+ dbg("%s - setdevice failed %i", __func__, result);
+ goto out;
+ }
+
+ result = ssu100_control_msg(dev, QT_GET_SET_PREBUF_TRIG_LVL, 128, 0);
+ if (result < 0) {
+ dbg("%s - set prebuffer level failed %i", __func__, result);
+ goto out;
+ }
+
+ result = ssu100_control_msg(dev, QT_SET_ATF, ATC_DISABLED, 0);
+ if (result < 0) {
+ dbg("%s - set ATFprebuffer level failed %i", __func__, result);
+ goto out;
+ }
+
+ result = ssu100_getdevice(dev, data);
+ if (result < 0) {
+ dbg("%s - get_device failed %i", __func__, result);
+ goto out;
+ }
+
+ data[0] &= ~(RR_BITS | DUPMODE_BITS);
+ data[0] |= CLKS_X4;
+ data[1] &= ~(LOOPMODE_BITS);
+ data[1] |= RS232_MODE;
+
+ result = ssu100_setdevice(dev, data);
+ if (result < 0) {
+ dbg("%s - setdevice failed %i", __func__, result);
+ goto out;
+ }
+
+out: kfree(data);
+ return result;
+
+}
+
+
+static void ssu100_set_termios(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ struct ktermios *old_termios)
+{
+ struct usb_device *dev = port->serial->dev;
+ struct ktermios *termios = tty->termios;
+ u16 baud, divisor, remainder;
+ unsigned int cflag = termios->c_cflag;
+ u16 urb_value = 0; /* will hold the new flags */
+ int result;
+
+ dbg("%s", __func__);
+
+ if (cflag & PARENB) {
+ if (cflag & PARODD)
+ urb_value |= UART_LCR_PARITY;
+ else
+ urb_value |= SERIAL_EVEN_PARITY;
+ }
+
+ switch (cflag & CSIZE) {
+ case CS5:
+ urb_value |= UART_LCR_WLEN5;
+ break;
+ case CS6:
+ urb_value |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ urb_value |= UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ urb_value |= UART_LCR_WLEN8;
+ break;
+ }
+
+ baud = tty_get_baud_rate(tty);
+ if (!baud)
+ baud = 9600;
+
+ dbg("%s - got baud = %d\n", __func__, baud);
+
+
+ divisor = MAX_BAUD_RATE / baud;
+ remainder = MAX_BAUD_RATE % baud;
+ if (((remainder * 2) >= baud) && (baud != 110))
+ divisor++;
+
+ urb_value = urb_value << 8;
+
+ result = ssu100_control_msg(dev, QT_GET_SET_UART, divisor, urb_value);
+ if (result < 0)
+ dbg("%s - set uart failed", __func__);
+
+ if (cflag & CRTSCTS)
+ result = ssu100_control_msg(dev, QT_HW_FLOW_CONTROL_MASK,
+ SERIAL_CRTSCTS, 0);
+ else
+ result = ssu100_control_msg(dev, QT_HW_FLOW_CONTROL_MASK,
+ 0, 0);
+ if (result < 0)
+ dbg("%s - set HW flow control failed", __func__);
+
+ if (I_IXOFF(tty) || I_IXON(tty)) {
+ u16 x = ((u16)(START_CHAR(tty) << 8) | (u16)(STOP_CHAR(tty)));
+
+ result = ssu100_control_msg(dev, QT_SW_FLOW_CONTROL_MASK,
+ x, 0);
+ } else
+ result = ssu100_control_msg(dev, QT_SW_FLOW_CONTROL_MASK,
+ 0, 0);
+
+ if (result < 0)
+ dbg("%s - set SW flow control failed", __func__);
+
+}
+
+
+static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
+{
+ struct usb_device *dev = port->serial->dev;
+ struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ u8 *data;
+ int result;
+ unsigned long flags;
+
+ dbg("%s - port %d", __func__, port->number);
+
+ data = kzalloc(2, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ QT_OPEN_CLOSE_CHANNEL,
+ QT_TRANSFER_IN, 0x01,
+ 0, data, 2, 300);
+ if (result < 0) {
+ dbg("%s - open failed %i", __func__, result);
+ kfree(data);
+ return result;
+ }
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ priv->shadowLSR = data[0];
+ priv->shadowMSR = data[1];
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ kfree(data);
+
+/* set to 9600 */
+ result = ssu100_control_msg(dev, QT_GET_SET_UART, 0x30, 0x0300);
+ if (result < 0)
+ dbg("%s - set uart failed", __func__);
+
+ if (tty)
+ ssu100_set_termios(tty, port, tty->termios);
+
+ return usb_serial_generic_open(tty, port);
+}
+
+static void ssu100_close(struct usb_serial_port *port)
+{
+ dbg("%s", __func__);
+ usb_serial_generic_close(port);
+}
+
+static int get_serial_info(struct usb_serial_port *port,
+ struct serial_struct __user *retinfo)
+{
+ struct serial_struct tmp;
+
+ if (!retinfo)
+ return -EFAULT;
+
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.line = port->serial->minor;
+ tmp.port = 0;
+ tmp.irq = 0;
+ tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ;
+ tmp.xmit_fifo_size = port->bulk_out_size;
+ tmp.baud_base = 9600;
+ tmp.close_delay = 5*HZ;
+ tmp.closing_wait = 30*HZ;
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+{
+ struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ struct async_icount prev, cur;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ prev = priv->icount;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ while (1) {
+ wait_event_interruptible(priv->delta_msr_wait,
+ ((priv->icount.rng != prev.rng) ||
+ (priv->icount.dsr != prev.dsr) ||
+ (priv->icount.dcd != prev.dcd) ||
+ (priv->icount.cts != prev.cts)));
+
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ cur = priv->icount;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ if ((prev.rng == cur.rng) &&
+ (prev.dsr == cur.dsr) &&
+ (prev.dcd == cur.dcd) &&
+ (prev.cts == cur.cts))
+ return -EIO;
+
+ if ((arg & TIOCM_RNG && (prev.rng != cur.rng)) ||
+ (arg & TIOCM_DSR && (prev.dsr != cur.dsr)) ||
+ (arg & TIOCM_CD && (prev.dcd != cur.dcd)) ||
+ (arg & TIOCM_CTS && (prev.cts != cur.cts)))
+ return 0;
+ }
+ return 0;
+}
+
+static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ void __user *user_arg = (void __user *)arg;
+
+ dbg("%s cmd 0x%04x", __func__, cmd);
+
+ switch (cmd) {
+ case TIOCGSERIAL:
+ return get_serial_info(port,
+ (struct serial_struct __user *) arg);
+
+ case TIOCMIWAIT:
+ return wait_modem_info(port, arg);
+
+ case TIOCGICOUNT:
+ {
+ struct serial_icounter_struct icount;
+ struct async_icount cnow = priv->icount;
+ memset(&icount, 0, sizeof(icount));
+ icount.cts = cnow.cts;
+ icount.dsr = cnow.dsr;
+ icount.rng = cnow.rng;
+ icount.dcd = cnow.dcd;
+ icount.rx = cnow.rx;
+ icount.tx = cnow.tx;
+ icount.frame = cnow.frame;
+ icount.overrun = cnow.overrun;
+ icount.parity = cnow.parity;
+ icount.brk = cnow.brk;
+ icount.buf_overrun = cnow.buf_overrun;
+ if (copy_to_user(user_arg, &icount, sizeof(icount)))
+ return -EFAULT;
+ return 0;
+ }
+
+ default:
+ break;
+ }
+
+ dbg("%s arg not supported", __func__);
+
+ return -ENOIOCTLCMD;
+}
+
+static void ssu100_set_max_packet_size(struct usb_serial_port *port)
+{
+ struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ struct usb_device *udev = serial->dev;
+
+ struct usb_interface *interface = serial->interface;
+ struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
+
+ unsigned num_endpoints;
+ int i;
+ unsigned long flags;
+
+ num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
+ dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
+
+ for (i = 0; i < num_endpoints; i++) {
+ dev_info(&udev->dev, "Endpoint %d MaxPacketSize %d\n", i+1,
+ interface->cur_altsetting->endpoint[i].desc.wMaxPacketSize);
+ ep_desc = &interface->cur_altsetting->endpoint[i].desc;
+ }
+
+ /* set max packet size based on descriptor */
+ spin_lock_irqsave(&priv->status_lock, flags);
+ priv->max_packet_size = ep_desc->wMaxPacketSize;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
+}
+
+static int ssu100_attach(struct usb_serial *serial)
+{
+ struct ssu100_port_private *priv;
+ struct usb_serial_port *port = *serial->port;
+
+ dbg("%s", __func__);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n", __func__,
+ sizeof(*priv));
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&priv->status_lock);
+ init_waitqueue_head(&priv->delta_msr_wait);
+ usb_set_serial_port_data(port, priv);
+ ssu100_set_max_packet_size(port);
+
+ return ssu100_initdevice(serial->dev);
+}
+
+static int ssu100_tiocmget(struct tty_struct *tty, struct file *file)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_device *dev = port->serial->dev;
+ u8 *d;
+ int r;
+
+ dbg("%s\n", __func__);
+
+ d = kzalloc(2, GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ r = ssu100_getregister(dev, 0, UART_MCR, d);
+ if (r < 0)
+ goto mget_out;
+
+ r = ssu100_getregister(dev, 0, UART_MSR, d+1);
+ if (r < 0)
+ goto mget_out;
+
+ r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) |
+ (d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) |
+ (d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) |
+ (d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) |
+ (d[1] & UART_MSR_RI ? TIOCM_RI : 0) |
+ (d[1] & UART_MSR_DSR ? TIOCM_DSR : 0);
+
+mget_out:
+ kfree(d);
+ return r;
+}
+
+static int ssu100_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_device *dev = port->serial->dev;
+
+ dbg("%s\n", __func__);
+ return update_mctrl(dev, set, clear);
+}
+
+static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct usb_device *dev = port->serial->dev;
+
+ dbg("%s\n", __func__);
+
+ mutex_lock(&port->serial->disc_mutex);
+ if (!port->serial->disconnected) {
+ /* Disable flow control */
+ if (!on &&
+ ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
+ dev_err(&port->dev, "error from flowcontrol urb\n");
+ /* drop RTS and DTR */
+ if (on)
+ set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+ else
+ clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+ }
+ mutex_unlock(&port->serial->disc_mutex);
+}
+
+static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
+{
+ struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ priv->shadowMSR = msr;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ if (msr & UART_MSR_ANY_DELTA) {
+ /* update input line counters */
+ if (msr & UART_MSR_DCTS)
+ priv->icount.cts++;
+ if (msr & UART_MSR_DDSR)
+ priv->icount.dsr++;
+ if (msr & UART_MSR_DDCD)
+ priv->icount.dcd++;
+ if (msr & UART_MSR_TERI)
+ priv->icount.rng++;
+ wake_up_interruptible(&priv->delta_msr_wait);
+ }
+}
+
+static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
+ char *tty_flag)
+{
+ struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ priv->shadowLSR = lsr;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ *tty_flag = TTY_NORMAL;
+ if (lsr & UART_LSR_BRK_ERROR_BITS) {
+ /* we always want to update icount, but we only want to
+ * update tty_flag for one case */
+ if (lsr & UART_LSR_BI) {
+ priv->icount.brk++;
+ *tty_flag = TTY_BREAK;
+ usb_serial_handle_break(port);
+ }
+ if (lsr & UART_LSR_PE) {
+ priv->icount.parity++;
+ if (*tty_flag == TTY_NORMAL)
+ *tty_flag = TTY_PARITY;
+ }
+ if (lsr & UART_LSR_FE) {
+ priv->icount.frame++;
+ if (*tty_flag == TTY_NORMAL)
+ *tty_flag = TTY_FRAME;
+ }
+ if (lsr & UART_LSR_OE){
+ priv->icount.overrun++;
+ if (*tty_flag == TTY_NORMAL)
+ *tty_flag = TTY_OVERRUN;
+ }
+ }
+
+}
+
+static int ssu100_process_packet(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ struct ssu100_port_private *priv,
+ char *packet, int len)
+{
+ int i;
+ char flag = TTY_NORMAL;
+ char *ch;
+
+ dbg("%s - port %d", __func__, port->number);
+
+ if ((len >= 4) &&
+ (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
+ ((packet[2] == 0x00) || (packet[2] == 0x01))) {
+ if (packet[2] == 0x00) {
+ ssu100_update_lsr(port, packet[3], &flag);
+ if (flag == TTY_OVERRUN)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+ if (packet[2] == 0x01)
+ ssu100_update_msr(port, packet[3]);
+
+ len -= 4;
+ ch = packet + 4;
+ } else
+ ch = packet;
+
+ if (!len)
+ return 0; /* status only */
+
+ if (port->port.console && port->sysrq) {
+ for (i = 0; i < len; i++, ch++) {
+ if (!usb_serial_handle_sysrq_char(port, *ch))
+ tty_insert_flip_char(tty, *ch, flag);
+ }
+ } else
+ tty_insert_flip_string_fixed_flag(tty, ch, flag, len);
+
+ return len;
+}
+
+static void ssu100_process_read_urb(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ char *data = (char *)urb->transfer_buffer;
+ struct tty_struct *tty;
+ int count = 0;
+ int i;
+ int len;
+
+ dbg("%s", __func__);
+
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+
+ for (i = 0; i < urb->actual_length; i += priv->max_packet_size) {
+ len = min_t(int, urb->actual_length - i, priv->max_packet_size);
+ count += ssu100_process_packet(tty, port, priv, &data[i], len);
+ }
+
+ if (count)
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+}
+
+static struct usb_serial_driver ssu100_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ssu100",
+ },
+ .description = DRIVER_DESC,
+ .id_table = id_table,
+ .usb_driver = &ssu100_driver,
+ .num_ports = 1,
+ .bulk_in_size = 256,
+ .bulk_out_size = 256,
+ .open = ssu100_open,
+ .close = ssu100_close,
+ .attach = ssu100_attach,
+ .release = ssu100_release,
+ .dtr_rts = ssu100_dtr_rts,
+ .process_read_urb = ssu100_process_read_urb,
+ .tiocmget = ssu100_tiocmget,
+ .tiocmset = ssu100_tiocmset,
+ .ioctl = ssu100_ioctl,
+ .set_termios = ssu100_set_termios,
+ .disconnect = usb_serial_generic_disconnect,
+};
+
+static int __init ssu100_init(void)
+{
+ int retval;
+
+ dbg("%s", __func__);
+
+ /* register with usb-serial */
+ retval = usb_serial_register(&ssu100_device);
+
+ if (retval)
+ goto failed_usb_sio_register;
+
+ retval = usb_register(&ssu100_driver);
+ if (retval)
+ goto failed_usb_register;
+
+ printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+ DRIVER_DESC "\n");
+
+ return 0;
+
+failed_usb_register:
+ usb_serial_deregister(&ssu100_device);
+failed_usb_sio_register:
+ return retval;
+}
+
+static void __exit ssu100_exit(void)
+{
+ usb_deregister(&ssu100_driver);
+ usb_serial_deregister(&ssu100_device);
+}
+
+module_init(ssu100_init);
+module_exit(ssu100_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug enabled or not");
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 941c2d409f8..7a2177c79bd 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -653,6 +653,7 @@ exit:
return id;
}
+/* Caller must hold table_lock */
static struct usb_serial_driver *search_serial_device(
struct usb_interface *iface)
{
@@ -718,17 +719,24 @@ int usb_serial_probe(struct usb_interface *interface,
int num_ports = 0;
int max_endpoints;
- lock_kernel(); /* guard against unloading a serial driver module */
+ mutex_lock(&table_lock);
type = search_serial_device(interface);
if (!type) {
- unlock_kernel();
+ mutex_unlock(&table_lock);
dbg("none matched");
return -ENODEV;
}
+ if (!try_module_get(type->driver.owner)) {
+ mutex_unlock(&table_lock);
+ dev_err(&interface->dev, "module get failed, exiting\n");
+ return -EIO;
+ }
+ mutex_unlock(&table_lock);
+
serial = create_serial(dev, interface, type);
if (!serial) {
- unlock_kernel();
+ module_put(type->driver.owner);
dev_err(&interface->dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
@@ -737,22 +745,13 @@ int usb_serial_probe(struct usb_interface *interface,
if (type->probe) {
const struct usb_device_id *id;
- if (!try_module_get(type->driver.owner)) {
- unlock_kernel();
- dev_err(&interface->dev,
- "module get failed, exiting\n");
- kfree(serial);
- return -EIO;
- }
-
id = get_iface_id(type, interface);
retval = type->probe(serial, id);
- module_put(type->driver.owner);
if (retval) {
- unlock_kernel();
dbg("sub driver rejected device");
kfree(serial);
+ module_put(type->driver.owner);
return retval;
}
}
@@ -822,9 +821,9 @@ int usb_serial_probe(struct usb_interface *interface,
* properly during a later invocation of usb_serial_probe
*/
if (num_bulk_in == 0 || num_bulk_out == 0) {
- unlock_kernel();
dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
kfree(serial);
+ module_put(type->driver.owner);
return -ENODEV;
}
}
@@ -835,27 +834,18 @@ int usb_serial_probe(struct usb_interface *interface,
if (type == &usb_serial_generic_device) {
num_ports = num_bulk_out;
if (num_ports == 0) {
- unlock_kernel();
dev_err(&interface->dev,
"Generic device with no bulk out, not allowed.\n");
kfree(serial);
+ module_put(type->driver.owner);
return -EIO;
}
}
#endif
if (!num_ports) {
/* if this device type has a calc_num_ports function, call it */
- if (type->calc_num_ports) {
- if (!try_module_get(type->driver.owner)) {
- unlock_kernel();
- dev_err(&interface->dev,
- "module get failed, exiting\n");
- kfree(serial);
- return -EIO;
- }
+ if (type->calc_num_ports)
num_ports = type->calc_num_ports(serial);
- module_put(type->driver.owner);
- }
if (!num_ports)
num_ports = type->num_ports;
}
@@ -878,7 +868,6 @@ int usb_serial_probe(struct usb_interface *interface,
max_endpoints = max(max_endpoints, num_interrupt_out);
max_endpoints = max(max_endpoints, (int)serial->num_ports);
serial->num_port_pointers = max_endpoints;
- unlock_kernel();
dbg("%s - setting up %d port structures for this device",
__func__, max_endpoints);
@@ -1045,13 +1034,7 @@ int usb_serial_probe(struct usb_interface *interface,
/* if this device type has an attach function, call it */
if (type->attach) {
- if (!try_module_get(type->driver.owner)) {
- dev_err(&interface->dev,
- "module get failed, exiting\n");
- goto probe_error;
- }
retval = type->attach(serial);
- module_put(type->driver.owner);
if (retval < 0)
goto probe_error;
serial->attached = 1;
@@ -1077,6 +1060,8 @@ int usb_serial_probe(struct usb_interface *interface,
dev_set_name(&port->dev, "ttyUSB%d", port->number);
dbg ("%s - registering %s", __func__, dev_name(&port->dev));
port->dev_state = PORT_REGISTERING;
+ device_enable_async_suspend(&port->dev);
+
retval = device_add(&port->dev);
if (retval) {
dev_err(&port->dev, "Error registering port device, "
@@ -1092,10 +1077,12 @@ int usb_serial_probe(struct usb_interface *interface,
exit:
/* success */
usb_set_intfdata(interface, serial);
+ module_put(type->driver.owner);
return 0;
probe_error:
usb_serial_put(serial);
+ module_put(type->driver.owner);
return -EIO;
}
EXPORT_SYMBOL_GPL(usb_serial_probe);
@@ -1349,6 +1336,7 @@ int usb_serial_register(struct usb_serial_driver *driver)
driver->description = driver->driver.name;
/* Add this device to our list of devices */
+ mutex_lock(&table_lock);
list_add(&driver->driver_list, &usb_serial_driver_list);
retval = usb_serial_bus_register(driver);
@@ -1360,6 +1348,7 @@ int usb_serial_register(struct usb_serial_driver *driver)
printk(KERN_INFO "USB Serial support registered for %s\n",
driver->description);
+ mutex_unlock(&table_lock);
return retval;
}
EXPORT_SYMBOL_GPL(usb_serial_register);
@@ -1370,8 +1359,10 @@ void usb_serial_deregister(struct usb_serial_driver *device)
/* must be called with BKL held */
printk(KERN_INFO "USB Serial deregistering driver %s\n",
device->description);
+ mutex_lock(&table_lock);
list_del(&device->driver_list);
usb_serial_bus_deregister(device);
+ mutex_unlock(&table_lock);
}
EXPORT_SYMBOL_GPL(usb_serial_deregister);
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index 54cc94277ac..6542ca40d50 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -269,7 +269,7 @@ static int freecom_transport(struct scsi_cmnd *srb, struct us_data *us)
/* The firmware will time-out commands after 20 seconds. Some commands
* can legitimately take longer than this, so we use a different
* command that only waits for the interrupt and then sends status,
- * without having to send a new ATAPI command to the device.
+ * without having to send a new ATAPI command to the device.
*
* NOTE: There is some indication that a data transfer after a timeout
* may not work, but that is a condition that should never happen.
@@ -324,14 +324,14 @@ static int freecom_transport(struct scsi_cmnd *srb, struct us_data *us)
/* Find the length we desire to read. */
switch (srb->cmnd[0]) {
- case INQUIRY:
- case REQUEST_SENSE: /* 16 or 18 bytes? spec says 18, lots of devices only have 16 */
- case MODE_SENSE:
- case MODE_SENSE_10:
- length = le16_to_cpu(fst->Count);
- break;
- default:
- length = scsi_bufflen(srb);
+ case INQUIRY:
+ case REQUEST_SENSE: /* 16 or 18 bytes? spec says 18, lots of devices only have 16 */
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ length = le16_to_cpu(fst->Count);
+ break;
+ default:
+ length = scsi_bufflen(srb);
}
/* verify that this amount is legal */
@@ -414,7 +414,7 @@ static int freecom_transport(struct scsi_cmnd *srb, struct us_data *us)
/* should never hit here -- filtered in usb.c */
US_DEBUGP ("freecom unimplemented direction: %d\n",
us->srb->sc_data_direction);
- // Return fail, SCSI seems to handle this better.
+ /* Return fail, SCSI seems to handle this better. */
return USB_STOR_TRANSPORT_FAILED;
break;
}
@@ -494,8 +494,7 @@ static void pdump (void *ibuffer, int length)
offset = 0;
}
offset += sprintf (line+offset, "%08x:", i);
- }
- else if ((i & 7) == 0) {
+ } else if ((i & 7) == 0) {
offset += sprintf (line+offset, " -");
}
offset += sprintf (line+offset, " %02x", buffer[i] & 0xff);
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index e9cbc1467f7..6b9982cd542 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1456,8 +1456,7 @@ static int isd200_init_info(struct us_data *us)
int retStatus = ISD200_GOOD;
struct isd200_info *info;
- info = (struct isd200_info *)
- kzalloc(sizeof(struct isd200_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct isd200_info), GFP_KERNEL);
if (!info)
retStatus = ISD200_ERROR;
else {
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index a7d0bf9d92a..90bb0175a15 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -336,6 +336,7 @@ static int usb_stor_control_thread(void * __us)
else {
US_DEBUG(usb_stor_show_command(us->srb));
us->proto_handler(us->srb, us);
+ usb_mark_last_busy(us->pusb_dev);
}
/* lock access to the state */
@@ -845,6 +846,7 @@ static int usb_stor_scan_thread(void * __us)
/* Should we unbind if no devices were detected? */
}
+ usb_autopm_put_interface(us->pusb_intf);
complete_and_exit(&us->scanning_done, 0);
}
@@ -968,6 +970,7 @@ int usb_stor_probe2(struct us_data *us)
goto BadDevice;
}
+ usb_autopm_get_interface_no_resume(us->pusb_intf);
wake_up_process(th);
return 0;
@@ -1040,6 +1043,7 @@ static struct usb_driver usb_storage_driver = {
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = usb_storage_usb_ids,
+ .supports_autosuspend = 1,
.soft_unbind = 1,
};
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index d110588b56f..552679b8dbd 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -142,7 +142,7 @@ static int skel_release(struct inode *inode, struct file *file)
{
struct usb_skel *dev;
- dev = (struct usb_skel *)file->private_data;
+ dev = file->private_data;
if (dev == NULL)
return -ENODEV;
@@ -162,7 +162,7 @@ static int skel_flush(struct file *file, fl_owner_t id)
struct usb_skel *dev;
int res;
- dev = (struct usb_skel *)file->private_data;
+ dev = file->private_data;
if (dev == NULL)
return -ENODEV;
@@ -246,7 +246,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
int rv;
bool ongoing_io;
- dev = (struct usb_skel *)file->private_data;
+ dev = file->private_data;
/* if we cannot read at all, return EOF */
if (!dev->bulk_in_urb || !count)
@@ -401,7 +401,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
char *buf = NULL;
size_t writesize = min(count, (size_t)MAX_TRANSFER);
- dev = (struct usb_skel *)file->private_data;
+ dev = file->private_data;
/* verify that we actually have some data to write */
if (count == 0)
diff --git a/drivers/uwb/address.c b/drivers/uwb/address.c
index 973321327c4..8739c4f4d01 100644
--- a/drivers/uwb/address.c
+++ b/drivers/uwb/address.c
@@ -363,10 +363,7 @@ size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr,
{
size_t result;
if (type)
- result = scnprintf(buf, buf_size,
- "%02x:%02x:%02x:%02x:%02x:%02x",
- addr[0], addr[1], addr[2],
- addr[3], addr[4], addr[5]);
+ result = scnprintf(buf, buf_size, "%pM", addr);
else
result = scnprintf(buf, buf_size, "%02x:%02x",
addr[1], addr[0]);
diff --git a/drivers/uwb/wlp/wss-lc.c b/drivers/uwb/wlp/wss-lc.c
index a005d2a03b5..67872c83b67 100644
--- a/drivers/uwb/wlp/wss-lc.c
+++ b/drivers/uwb/wlp/wss-lc.c
@@ -791,11 +791,8 @@ int wlp_wss_prep_hdr(struct wlp *wlp, struct wlp_eda_node *eda_entry,
} else {
if (printk_ratelimit())
dev_err(dev, "WLP: Destination neighbor (Ethernet: "
- "%02x:%02x:%02x:%02x:%02x:%02x, Dev: "
- "%02x:%02x) is not connected. \n", eth_addr[0],
- eth_addr[1], eth_addr[2], eth_addr[3],
- eth_addr[4], eth_addr[5], dev_addr->data[1],
- dev_addr->data[0]);
+ "%pM, Dev: %02x:%02x) is not connected.\n",
+ eth_addr, dev_addr->data[1], dev_addr->data[0]);
result = -EINVAL;
}
return result;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 29e850a7a2f..72ab71fdf05 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -243,7 +243,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
int r, nlogs = 0;
while (datalen > 0) {
- if (unlikely(headcount >= VHOST_NET_MAX_SG)) {
+ if (unlikely(seg >= UIO_MAXIOV)) {
r = -ENOBUFS;
goto err;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e05557d5299..344019774dd 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
return 0;
}
+static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
+{
+ INIT_LIST_HEAD(&work->node);
+ work->fn = fn;
+ init_waitqueue_head(&work->done);
+ work->flushing = 0;
+ work->queue_seq = work->done_seq = 0;
+}
+
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
unsigned long mask, struct vhost_dev *dev)
{
- struct vhost_work *work = &poll->work;
-
init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
init_poll_funcptr(&poll->table, vhost_poll_func);
poll->mask = mask;
poll->dev = dev;
- INIT_LIST_HEAD(&work->node);
- work->fn = fn;
- init_waitqueue_head(&work->done);
- work->flushing = 0;
- work->queue_seq = work->done_seq = 0;
+ vhost_work_init(&poll->work, fn);
}
/* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
remove_wait_queue(poll->wqh, &poll->wait);
}
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- struct vhost_work *work = &poll->work;
unsigned seq;
int left;
int flushing;
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
seq = work->queue_seq;
work->flushing++;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
wait_event(work->done, ({
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
left = seq - work->done_seq <= 0;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
left;
}));
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
flushing = --work->flushing;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
BUG_ON(flushing < 0);
}
-void vhost_poll_queue(struct vhost_poll *poll)
+/* Flush any work that has been scheduled. When calling this, don't hold any
+ * locks that are also used by the callback. */
+void vhost_poll_flush(struct vhost_poll *poll)
+{
+ vhost_work_flush(poll->dev, &poll->work);
+}
+
+static inline void vhost_work_queue(struct vhost_dev *dev,
+ struct vhost_work *work)
{
- struct vhost_dev *dev = poll->dev;
- struct vhost_work *work = &poll->work;
unsigned long flags;
spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
spin_unlock_irqrestore(&dev->work_lock, flags);
}
+void vhost_poll_queue(struct vhost_poll *poll)
+{
+ vhost_work_queue(poll->dev, &poll->work);
+}
+
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
@@ -201,6 +212,45 @@ static int vhost_worker(void *data)
}
}
+/* Helper to allocate iovec buffers for all vqs. */
+static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
+{
+ int i;
+ for (i = 0; i < dev->nvqs; ++i) {
+ dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
+ UIO_MAXIOV, GFP_KERNEL);
+ dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV,
+ GFP_KERNEL);
+ dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads *
+ UIO_MAXIOV, GFP_KERNEL);
+
+ if (!dev->vqs[i].indirect || !dev->vqs[i].log ||
+ !dev->vqs[i].heads)
+ goto err_nomem;
+ }
+ return 0;
+err_nomem:
+ for (; i >= 0; --i) {
+ kfree(dev->vqs[i].indirect);
+ kfree(dev->vqs[i].log);
+ kfree(dev->vqs[i].heads);
+ }
+ return -ENOMEM;
+}
+
+static void vhost_dev_free_iovecs(struct vhost_dev *dev)
+{
+ int i;
+ for (i = 0; i < dev->nvqs; ++i) {
+ kfree(dev->vqs[i].indirect);
+ dev->vqs[i].indirect = NULL;
+ kfree(dev->vqs[i].log);
+ dev->vqs[i].log = NULL;
+ kfree(dev->vqs[i].heads);
+ dev->vqs[i].heads = NULL;
+ }
+}
+
long vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue *vqs, int nvqs)
{
@@ -218,6 +268,9 @@ long vhost_dev_init(struct vhost_dev *dev,
dev->worker = NULL;
for (i = 0; i < dev->nvqs; ++i) {
+ dev->vqs[i].log = NULL;
+ dev->vqs[i].indirect = NULL;
+ dev->vqs[i].heads = NULL;
dev->vqs[i].dev = dev;
mutex_init(&dev->vqs[i].mutex);
vhost_vq_reset(dev, dev->vqs + i);
@@ -236,6 +289,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
return dev->mm == current->mm ? 0 : -EPERM;
}
+struct vhost_attach_cgroups_struct {
+ struct vhost_work work;
+ struct task_struct *owner;
+ int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+ struct vhost_attach_cgroups_struct *s;
+ s = container_of(work, struct vhost_attach_cgroups_struct, work);
+ s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static int vhost_attach_cgroups(struct vhost_dev *dev)
+{
+ struct vhost_attach_cgroups_struct attach;
+ attach.owner = current;
+ vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+ vhost_work_queue(dev, &attach.work);
+ vhost_work_flush(dev, &attach.work);
+ return attach.ret;
+}
+
/* Caller should have device mutex */
static long vhost_dev_set_owner(struct vhost_dev *dev)
{
@@ -255,14 +331,20 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
}
dev->worker = worker;
- err = cgroup_attach_task_current_cg(worker);
+ wake_up_process(worker); /* avoid contributing to loadavg */
+
+ err = vhost_attach_cgroups(dev);
+ if (err)
+ goto err_cgroup;
+
+ err = vhost_dev_alloc_iovecs(dev);
if (err)
goto err_cgroup;
- wake_up_process(worker); /* avoid contributing to loadavg */
return 0;
err_cgroup:
kthread_stop(worker);
+ dev->worker = NULL;
err_worker:
if (dev->mm)
mmput(dev->mm);
@@ -309,6 +391,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
fput(dev->vqs[i].call);
vhost_vq_reset(dev, dev->vqs + i);
}
+ vhost_dev_free_iovecs(dev);
if (dev->log_ctx)
eventfd_ctx_put(dev->log_ctx);
dev->log_ctx = NULL;
@@ -323,7 +406,10 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
dev->mm = NULL;
WARN_ON(!list_empty(&dev->work_list));
- kthread_stop(dev->worker);
+ if (dev->worker) {
+ kthread_stop(dev->worker);
+ dev->worker = NULL;
+ }
}
static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
@@ -819,11 +905,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
if (r < 0)
return r;
len -= l;
- if (!len)
+ if (!len) {
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
return 0;
+ }
}
- if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
/* Length written exceeds what we have stored. This is a bug. */
BUG();
return 0;
@@ -907,7 +994,7 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
}
ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
- ARRAY_SIZE(vq->indirect));
+ UIO_MAXIOV);
if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d in indirect.\n", ret);
return ret;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index afd77295971..edc89298999 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -15,11 +15,6 @@
struct vhost_device;
-enum {
- /* Enough place for all fragments, head, and virtio net header. */
- VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2,
-};
-
struct vhost_work;
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
@@ -93,12 +88,15 @@ struct vhost_virtqueue {
bool log_used;
u64 log_addr;
- struct iovec indirect[VHOST_NET_MAX_SG];
- struct iovec iov[VHOST_NET_MAX_SG];
- struct iovec hdr[VHOST_NET_MAX_SG];
+ struct iovec iov[UIO_MAXIOV];
+ /* hdr is used to store the virtio header.
+ * Since each iovec has >= 1 byte length, we never need more than
+ * header length entries to store the header. */
+ struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
+ struct iovec *indirect;
size_t vhost_hlen;
size_t sock_hlen;
- struct vring_used_elem heads[VHOST_NET_MAX_SG];
+ struct vring_used_elem *heads;
/* We use a kind of RCU to access private pointer.
* All readers access it from worker, which makes it possible to
* flush the vhost_work instead of synchronize_rcu. Therefore readers do
@@ -109,7 +107,7 @@ struct vhost_virtqueue {
void *private_data;
/* Log write descriptors */
void __user *log_base;
- struct vhost_log log[VHOST_NET_MAX_SG];
+ struct vhost_log *log;
};
struct vhost_dev {
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 3d94a147172..8b31fdfefc9 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1505,7 +1505,7 @@ config FB_SIS_315
config FB_VIA
tristate "VIA UniChrome (Pro) and Chrome9 display support"
- depends on FB && PCI
+ depends on FB && PCI && X86
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -1871,6 +1871,7 @@ config FB_MBX_DEBUG
config FB_FSL_DIU
tristate "Freescale DIU framebuffer support"
depends on FB && FSL_SOC
+ select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -1895,6 +1896,13 @@ config FB_W100
If unsure, say N.
+config SH_MIPI_DSI
+ tristate
+ depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
+
+config SH_LCD_MIPI_DSI
+ bool
+
config FB_SH_MOBILE_LCDC
tristate "SuperH Mobile LCDC framebuffer support"
depends on FB && (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
@@ -1903,9 +1911,17 @@ config FB_SH_MOBILE_LCDC
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
select FB_DEFERRED_IO
+ select SH_MIPI_DSI if SH_LCD_MIPI_DSI
---help---
Frame buffer driver for the on-chip SH-Mobile LCD controller.
+config FB_SH_MOBILE_HDMI
+ tristate "SuperH Mobile HDMI controller support"
+ depends on FB_SH_MOBILE_LCDC
+ select FB_MODE_HELPERS
+ ---help---
+ Driver for the on-chip SH-Mobile HDMI controller.
+
config FB_TMIO
tristate "Toshiba Mobile IO FrameBuffer support"
depends on FB && MFD_CORE
@@ -1930,7 +1946,7 @@ config FB_TMIO_ACCELL
config FB_S3C
tristate "Samsung S3C framebuffer support"
- depends on FB && ARCH_S3C64XX
+ depends on FB && S3C_DEV_FB
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -2229,6 +2245,15 @@ config FB_BROADSHEET
and could also have been called by other names when coupled with
a bridge adapter.
+config FB_JZ4740
+ tristate "JZ4740 LCD framebuffer support"
+ depends on FB && MACH_JZ4740
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ help
+ Framebuffer support for the JZ4740 SoC.
+
source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index ddc2af2ba45..485e8ed1318 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -123,6 +123,8 @@ obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
obj-$(CONFIG_FB_PS3) += ps3fb.o
obj-$(CONFIG_FB_SM501) += sm501fb.o
obj-$(CONFIG_FB_XILINX) += xilinxfb.o
+obj-$(CONFIG_SH_MIPI_DSI) += sh_mipi_dsi.o
+obj-$(CONFIG_FB_SH_MOBILE_HDMI) += sh_mobile_hdmi.o
obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
obj-$(CONFIG_FB_OMAP) += omap/
obj-y += omap2/
@@ -131,6 +133,7 @@ obj-$(CONFIG_FB_CARMINE) += carminefb.o
obj-$(CONFIG_FB_MB862XX) += mb862xx/
obj-$(CONFIG_FB_MSM) += msm/
obj-$(CONFIG_FB_NUC900) += nuc900fb.o
+obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index afe21e6eb54..1c2c68356ea 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -80,7 +80,10 @@ static void clcdfb_disable(struct clcd_fb *fb)
/*
* Disable CLCD clock source.
*/
- clk_disable(fb->clk);
+ if (fb->clk_enabled) {
+ fb->clk_enabled = false;
+ clk_disable(fb->clk);
+ }
}
static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
@@ -88,7 +91,10 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
/*
* Enable the CLCD clock source.
*/
- clk_enable(fb->clk);
+ if (!fb->clk_enabled) {
+ fb->clk_enabled = true;
+ clk_enable(fb->clk);
+ }
/*
* Bring up by first enabling..
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 09f1b9b462f..4dc13467281 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -273,7 +273,7 @@ static int __devinit bw2_do_default_mode(struct bw2_par *par,
return 0;
}
-static int __devinit bw2_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit bw2_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -350,7 +350,7 @@ out_err:
return err;
}
-static int __devexit bw2_remove(struct of_device *op)
+static int __devexit bw2_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct bw2_par *par = info->par;
@@ -390,12 +390,12 @@ static int __init bw2_init(void)
if (fb_get_options("bw2fb", NULL))
return -ENODEV;
- return of_register_driver(&bw2_driver, &of_bus_type);
+ return of_register_platform_driver(&bw2_driver);
}
static void __exit bw2_exit(void)
{
- of_unregister_driver(&bw2_driver);
+ of_unregister_platform_driver(&bw2_driver);
}
module_init(bw2_init);
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index e5dc2241194..24249535ac8 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -446,7 +446,7 @@ static struct sbus_mmap_map __cg14_mmap_map[CG14_MMAP_ENTRIES] __devinitdata = {
{ .size = 0 }
};
-static void cg14_unmap_regs(struct of_device *op, struct fb_info *info,
+static void cg14_unmap_regs(struct platform_device *op, struct fb_info *info,
struct cg14_par *par)
{
if (par->regs)
@@ -463,7 +463,7 @@ static void cg14_unmap_regs(struct of_device *op, struct fb_info *info,
info->screen_base, info->fix.smem_len);
}
-static int __devinit cg14_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit cg14_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -570,7 +570,7 @@ out_err:
return err;
}
-static int __devexit cg14_remove(struct of_device *op)
+static int __devexit cg14_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct cg14_par *par = info->par;
@@ -610,12 +610,12 @@ static int __init cg14_init(void)
if (fb_get_options("cg14fb", NULL))
return -ENODEV;
- return of_register_driver(&cg14_driver, &of_bus_type);
+ return of_register_platform_driver(&cg14_driver);
}
static void __exit cg14_exit(void)
{
- of_unregister_driver(&cg14_driver);
+ of_unregister_platform_driver(&cg14_driver);
}
module_init(cg14_init);
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index 558d73a948a..09c0c3c4248 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -346,7 +346,7 @@ static int __devinit cg3_do_default_mode(struct cg3_par *par)
return 0;
}
-static int __devinit cg3_probe(struct of_device *op,
+static int __devinit cg3_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
@@ -433,7 +433,7 @@ out_err:
return err;
}
-static int __devexit cg3_remove(struct of_device *op)
+static int __devexit cg3_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct cg3_par *par = info->par;
@@ -477,12 +477,12 @@ static int __init cg3_init(void)
if (fb_get_options("cg3fb", NULL))
return -ENODEV;
- return of_register_driver(&cg3_driver, &of_bus_type);
+ return of_register_platform_driver(&cg3_driver);
}
static void __exit cg3_exit(void)
{
- of_unregister_driver(&cg3_driver);
+ of_unregister_platform_driver(&cg3_driver);
}
module_init(cg3_init);
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index 480d761a27a..2b5a97058b0 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -718,7 +718,7 @@ static void __devinit cg6_chip_init(struct fb_info *info)
sbus_writel(info->var.yres - 1, &fbc->clipmaxy);
}
-static void cg6_unmap_regs(struct of_device *op, struct fb_info *info,
+static void cg6_unmap_regs(struct platform_device *op, struct fb_info *info,
struct cg6_par *par)
{
if (par->fbc)
@@ -737,7 +737,7 @@ static void cg6_unmap_regs(struct of_device *op, struct fb_info *info,
info->fix.smem_len);
}
-static int __devinit cg6_probe(struct of_device *op,
+static int __devinit cg6_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
@@ -827,7 +827,7 @@ out_err:
return err;
}
-static int __devexit cg6_remove(struct of_device *op)
+static int __devexit cg6_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct cg6_par *par = info->par;
@@ -870,12 +870,12 @@ static int __init cg6_init(void)
if (fb_get_options("cg6fb", NULL))
return -ENODEV;
- return of_register_driver(&cg6_driver, &of_bus_type);
+ return of_register_platform_driver(&cg6_driver);
}
static void __exit cg6_exit(void)
{
- of_unregister_driver(&cg6_driver);
+ of_unregister_platform_driver(&cg6_driver);
}
module_init(cg6_init);
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 8e8f18d29d7..5a35f22372b 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -6,7 +6,7 @@ menu "Console display driver support"
config VGA_CONSOLE
bool "VGA text console" if EMBEDDED || !X86
- depends on !ARCH_ACORN && !ARCH_EBSA110 && !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !ARCH_VERSATILE && !SUPERH && !BLACKFIN && !AVR32 && !MN10300
+ depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER)
default y
help
Saying Y here will allow you to use Linux in text mode through a
diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
index af88651b073..28b1a834906 100644
--- a/drivers/video/console/bitblit.c
+++ b/drivers/video/console/bitblit.c
@@ -22,7 +22,7 @@
/*
* Accelerated handlers.
*/
-static inline void update_attr(u8 *dst, u8 *src, int attribute,
+static void update_attr(u8 *dst, u8 *src, int attribute,
struct vc_data *vc)
{
int i, offset = (vc->vc_font.height < 10) ? 1 : 2;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index b0a3fa00706..7ccc967831f 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -283,10 +283,11 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
struct fbcon_ops *ops = info->fbcon_par;
return (info->state != FBINFO_STATE_RUNNING ||
- vc->vc_mode != KD_TEXT || ops->graphics);
+ vc->vc_mode != KD_TEXT || ops->graphics) &&
+ !vt_force_oops_output(vc);
}
-static inline int get_color(struct vc_data *vc, struct fb_info *info,
+static int get_color(struct vc_data *vc, struct fb_info *info,
u16 c, int is_fg)
{
int depth = fb_get_color_depth(&info->var, &info->fix);
@@ -1073,6 +1074,7 @@ static void fbcon_init(struct vc_data *vc, int init)
if (p->userfont)
charcnt = FNTCHARCNT(p->fontdata);
+ vc->vc_panic_force_write = !!(info->flags & FBINFO_CAN_FORCE_OUTPUT);
vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1);
vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
if (charcnt == 256) {
@@ -2342,6 +2344,30 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
return 0;
}
+static int fbcon_debug_enter(struct vc_data *vc)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ ops->save_graphics = ops->graphics;
+ ops->graphics = 0;
+ if (info->fbops->fb_debug_enter)
+ info->fbops->fb_debug_enter(info);
+ fbcon_set_palette(vc, color_table);
+ return 0;
+}
+
+static int fbcon_debug_leave(struct vc_data *vc)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ ops->graphics = ops->save_graphics;
+ if (info->fbops->fb_debug_leave)
+ info->fbops->fb_debug_leave(info);
+ return 0;
+}
+
static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
{
u8 *fontdata = vc->vc_font.data;
@@ -3276,6 +3302,8 @@ static const struct consw fb_con = {
.con_screen_pos = fbcon_screen_pos,
.con_getxy = fbcon_getxy,
.con_resize = fbcon_resize,
+ .con_debug_enter = fbcon_debug_enter,
+ .con_debug_leave = fbcon_debug_leave,
};
static struct notifier_block fbcon_event_notifier = {
@@ -3480,7 +3508,7 @@ static void fbcon_exit(void)
softback_buf = 0UL;
for (i = 0; i < FB_MAX; i++) {
- int pending;
+ int pending = 0;
mapped = 0;
info = registered_fb[i];
@@ -3488,7 +3516,8 @@ static void fbcon_exit(void)
if (info == NULL)
continue;
- pending = cancel_work_sync(&info->queue);
+ if (info->queue.func)
+ pending = cancel_work_sync(&info->queue);
DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
"no"));
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index 89a346880ec..6bd2e0c7f20 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -74,6 +74,7 @@ struct fbcon_ops {
int cursor_reset;
int blank_state;
int graphics;
+ int save_graphics; /* for debug enter/leave */
int flags;
int rotate;
int cur_rotate;
diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c
index d135671d996..41b32ae23da 100644
--- a/drivers/video/console/fbcon_ccw.c
+++ b/drivers/video/console/fbcon_ccw.c
@@ -22,7 +22,7 @@
* Rotation 270 degrees
*/
-static inline void ccw_update_attr(u8 *dst, u8 *src, int attribute,
+static void ccw_update_attr(u8 *dst, u8 *src, int attribute,
struct vc_data *vc)
{
int i, j, offset = (vc->vc_font.height < 10) ? 1 : 2;
diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/console/fbcon_cw.c
index 126110f8454..6a737827beb 100644
--- a/drivers/video/console/fbcon_cw.c
+++ b/drivers/video/console/fbcon_cw.c
@@ -22,7 +22,7 @@
* Rotation 90 degrees
*/
-static inline void cw_update_attr(u8 *dst, u8 *src, int attribute,
+static void cw_update_attr(u8 *dst, u8 *src, int attribute,
struct vc_data *vc)
{
int i, j, offset = (vc->vc_font.height < 10) ? 1 : 2;
diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/console/fbcon_ud.c
index 93a3e7381b5..ff0872c0498 100644
--- a/drivers/video/console/fbcon_ud.c
+++ b/drivers/video/console/fbcon_ud.c
@@ -22,7 +22,7 @@
* Rotation 180 degrees
*/
-static inline void ud_update_attr(u8 *dst, u8 *src, int attribute,
+static void ud_update_attr(u8 *dst, u8 *src, int attribute,
struct vc_data *vc)
{
int i, offset = (vc->vc_font.height < 10) ? 1 : 2;
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 182dd6f8aad..54e32c51361 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1108,7 +1108,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
charmap += 4 * cmapsz;
#endif
- unlock_kernel();
spin_lock_irq(&vga_lock);
/* First, the Sequencer */
vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1);
@@ -1192,7 +1191,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
}
spin_unlock_irq(&vga_lock);
- lock_kernel();
return 0;
}
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index 49fcbe8f18a..c225dcce89e 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -40,6 +40,8 @@
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/pci.h>
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 4a56f46af40..70477c2e4b6 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -13,10 +13,10 @@
#include <linux/platform_device.h>
#include <linux/screen_info.h>
#include <linux/dmi.h>
-
+#include <linux/pci.h>
#include <video/vga.h>
-static struct fb_var_screeninfo efifb_defined __initdata = {
+static struct fb_var_screeninfo efifb_defined __devinitdata = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
@@ -27,7 +27,7 @@ static struct fb_var_screeninfo efifb_defined __initdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
-static struct fb_fix_screeninfo efifb_fix __initdata = {
+static struct fb_fix_screeninfo efifb_fix __devinitdata = {
.id = "EFI VGA",
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
@@ -39,17 +39,31 @@ enum {
M_I20, /* 20-Inch iMac */
M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
M_I24, /* 24-Inch iMac */
+ M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
+ M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
+ M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
M_MINI, /* Mac Mini */
+ M_MINI_3_1, /* Mac Mini, 3,1th gen */
+ M_MINI_4_1, /* Mac Mini, 4,1th gen */
M_MB, /* MacBook */
M_MB_2, /* MacBook, 2nd rev. */
M_MB_3, /* MacBook, 3rd rev. */
+ M_MB_5_1, /* MacBook, 5th rev. */
+ M_MB_6_1, /* MacBook, 6th rev. */
+ M_MB_7_1, /* MacBook, 7th rev. */
M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
M_MBA, /* MacBook Air */
M_MBP, /* MacBook Pro */
M_MBP_2, /* MacBook Pro 2nd gen */
+ M_MBP_2_2, /* MacBook Pro 2,2nd gen */
M_MBP_SR, /* MacBook Pro (Santa Rosa) */
M_MBP_4, /* MacBook Pro, 4th gen */
M_MBP_5_1, /* MacBook Pro, 5,1th gen */
+ M_MBP_5_2, /* MacBook Pro, 5,2th gen */
+ M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
+ M_MBP_6_1, /* MacBook Pro, 6,1th gen */
+ M_MBP_6_2, /* MacBook Pro, 6,2th gen */
+ M_MBP_7_1, /* MacBook Pro, 7,1th gen */
M_UNKNOWN /* placeholder */
};
@@ -59,19 +73,33 @@ static struct efifb_dmi_info {
int stride;
int width;
int height;
-} dmi_list[] = {
+} dmi_list[] __initdata = {
[M_I17] = { "i17", 0x80010000, 1472 * 4, 1440, 900 },
[M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */
[M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 },
[M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */
+ [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 },
+ [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 },
+ [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 },
[M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 },
+ [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 },
+ [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 },
[M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 },
+ [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 },
+ [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 },
+ [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 },
[M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 },
[M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 },
[M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
+ [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 },
[M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
[M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
[M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
+ [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 },
+ [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 },
+ [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 },
+ [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 },
+ [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 },
[M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
};
@@ -83,7 +111,7 @@ static int set_system(const struct dmi_system_id *id);
DMI_MATCH(DMI_PRODUCT_NAME, name) }, \
&dmi_list[enumid] }
-static struct dmi_system_id __initdata dmi_system_table[] = {
+static const struct dmi_system_id dmi_system_table[] __initconst = {
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac4,1", M_I17),
/* At least one of these two will be right; maybe both? */
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac5,1", M_I20),
@@ -92,7 +120,12 @@ static struct dmi_system_id __initdata dmi_system_table[] = {
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
/* At least one of these two will be right; maybe both? */
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
@@ -101,14 +134,23 @@ static struct dmi_system_id __initdata dmi_system_table[] = {
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
{},
};
@@ -116,7 +158,7 @@ static int set_system(const struct dmi_system_id *id)
{
struct efifb_dmi_info *info = id->driver_data;
if (info->base == 0)
- return -ENODEV;
+ return 0;
printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p "
"(%dx%d, stride %d)\n", id->ident,
@@ -124,18 +166,55 @@ static int set_system(const struct dmi_system_id *id)
info->stride);
/* Trust the bootloader over the DMI tables */
- if (screen_info.lfb_base == 0)
+ if (screen_info.lfb_base == 0) {
+#if defined(CONFIG_PCI)
+ struct pci_dev *dev = NULL;
+ int found_bar = 0;
+#endif
screen_info.lfb_base = info->base;
- if (screen_info.lfb_linelength == 0)
- screen_info.lfb_linelength = info->stride;
- if (screen_info.lfb_width == 0)
- screen_info.lfb_width = info->width;
- if (screen_info.lfb_height == 0)
- screen_info.lfb_height = info->height;
- if (screen_info.orig_video_isVGA == 0)
- screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
- return 0;
+#if defined(CONFIG_PCI)
+ /* make sure that the address in the table is actually on a
+ * VGA device's PCI BAR */
+
+ for_each_pci_dev(dev) {
+ int i;
+ if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ continue;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ resource_size_t start, end;
+
+ start = pci_resource_start(dev, i);
+ if (start == 0)
+ break;
+ end = pci_resource_end(dev, i);
+ if (screen_info.lfb_base >= start &&
+ screen_info.lfb_base < end) {
+ found_bar = 1;
+ }
+ }
+ }
+ if (!found_bar)
+ screen_info.lfb_base = 0;
+#endif
+ }
+ if (screen_info.lfb_base) {
+ if (screen_info.lfb_linelength == 0)
+ screen_info.lfb_linelength = info->stride;
+ if (screen_info.lfb_width == 0)
+ screen_info.lfb_width = info->width;
+ if (screen_info.lfb_height == 0)
+ screen_info.lfb_height = info->height;
+ if (screen_info.orig_video_isVGA == 0)
+ screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
+ } else {
+ screen_info.lfb_linelength = 0;
+ screen_info.lfb_width = 0;
+ screen_info.lfb_height = 0;
+ screen_info.orig_video_isVGA = 0;
+ return 0;
+ }
+ return 1;
}
static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 731fce64df9..b06647517c0 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1362,6 +1362,7 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
vma->vm_pgoff = off >> PAGE_SHIFT;
/* This is an IO map - tell maydump to skip this VMA */
vma->vm_flags |= VM_IO | VM_RESERVED;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
fb_pgprotect(file, vma, off);
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
@@ -1786,7 +1787,7 @@ static int ofonly __read_mostly;
int fb_get_options(char *name, char **option)
{
char *opt, *options = NULL;
- int opt_len, retval = 0;
+ int retval = 0;
int name_len = strlen(name), i;
if (name_len && ofonly && strncmp(name, "offb", 4))
@@ -1796,8 +1797,7 @@ int fb_get_options(char *name, char **option)
for (i = 0; i < FB_MAX; i++) {
if (video_options[i] == NULL)
continue;
- opt_len = strlen(video_options[i]);
- if (!opt_len)
+ if (!video_options[i][0])
continue;
opt = video_options[i];
if (!strncmp(name, opt, name_len) &&
diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c
index 95c0227f47f..6739b2af3bc 100644
--- a/drivers/video/ffb.c
+++ b/drivers/video/ffb.c
@@ -893,7 +893,7 @@ static void ffb_init_fix(struct fb_info *info)
info->fix.accel = FB_ACCEL_SUN_CREATOR;
}
-static int __devinit ffb_probe(struct of_device *op,
+static int __devinit ffb_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
@@ -1023,7 +1023,7 @@ out_err:
return err;
}
-static int __devexit ffb_remove(struct of_device *op)
+static int __devexit ffb_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct ffb_par *par = info->par;
@@ -1067,12 +1067,12 @@ static int __init ffb_init(void)
if (fb_get_options("ffb", NULL))
return -ENODEV;
- return of_register_driver(&ffb_driver, &of_bus_type);
+ return of_register_platform_driver(&ffb_driver);
}
static void __exit ffb_exit(void)
{
- of_unregister_driver(&ffb_driver);
+ of_unregister_platform_driver(&ffb_driver);
}
module_init(ffb_init);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 27455ce298b..8bbbf08fa3c 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -34,7 +34,8 @@
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
-#include "fsl-diu-fb.h"
+#include <linux/fsl-diu-fb.h>
+#include "edid.h"
/*
* These parameters give default parameters
@@ -217,6 +218,7 @@ struct mfb_info {
int x_aoi_d; /* aoi display x offset to physical screen */
int y_aoi_d; /* aoi display y offset to physical screen */
struct fsl_diu_data *parent;
+ u8 *edid_data;
};
@@ -317,6 +319,17 @@ static void fsl_diu_free(void *virt, size_t size)
free_pages_exact(virt, size);
}
+/*
+ * Workaround for failed writing desc register of planes.
+ * Needed with MPC5121 DIU rev 2.0 silicon.
+ */
+void wr_reg_wa(u32 *reg, u32 val)
+{
+ do {
+ out_be32(reg, val);
+ } while (in_be32(reg) != val);
+}
+
static int fsl_diu_enable_panel(struct fb_info *info)
{
struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
@@ -330,7 +343,7 @@ static int fsl_diu_enable_panel(struct fb_info *info)
switch (mfbi->index) {
case 0: /* plane 0 */
if (hw->desc[0] != ad->paddr)
- out_be32(&hw->desc[0], ad->paddr);
+ wr_reg_wa(&hw->desc[0], ad->paddr);
break;
case 1: /* plane 1 AOI 0 */
cmfbi = machine_data->fsl_diu_info[2]->par;
@@ -340,7 +353,7 @@ static int fsl_diu_enable_panel(struct fb_info *info)
cpu_to_le32(cmfbi->ad->paddr);
else
ad->next_ad = 0;
- out_be32(&hw->desc[1], ad->paddr);
+ wr_reg_wa(&hw->desc[1], ad->paddr);
}
break;
case 3: /* plane 2 AOI 0 */
@@ -351,14 +364,14 @@ static int fsl_diu_enable_panel(struct fb_info *info)
cpu_to_le32(cmfbi->ad->paddr);
else
ad->next_ad = 0;
- out_be32(&hw->desc[2], ad->paddr);
+ wr_reg_wa(&hw->desc[2], ad->paddr);
}
break;
case 2: /* plane 1 AOI 1 */
pmfbi = machine_data->fsl_diu_info[1]->par;
ad->next_ad = 0;
if (hw->desc[1] == machine_data->dummy_ad->paddr)
- out_be32(&hw->desc[1], ad->paddr);
+ wr_reg_wa(&hw->desc[1], ad->paddr);
else /* AOI0 open */
pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
break;
@@ -366,7 +379,7 @@ static int fsl_diu_enable_panel(struct fb_info *info)
pmfbi = machine_data->fsl_diu_info[3]->par;
ad->next_ad = 0;
if (hw->desc[2] == machine_data->dummy_ad->paddr)
- out_be32(&hw->desc[2], ad->paddr);
+ wr_reg_wa(&hw->desc[2], ad->paddr);
else /* AOI0 was open */
pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
break;
@@ -390,27 +403,24 @@ static int fsl_diu_disable_panel(struct fb_info *info)
switch (mfbi->index) {
case 0: /* plane 0 */
if (hw->desc[0] != machine_data->dummy_ad->paddr)
- out_be32(&hw->desc[0],
- machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[0], machine_data->dummy_ad->paddr);
break;
case 1: /* plane 1 AOI 0 */
cmfbi = machine_data->fsl_diu_info[2]->par;
if (cmfbi->count > 0) /* AOI1 is open */
- out_be32(&hw->desc[1], cmfbi->ad->paddr);
+ wr_reg_wa(&hw->desc[1], cmfbi->ad->paddr);
/* move AOI1 to the first */
else /* AOI1 was closed */
- out_be32(&hw->desc[1],
- machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[1], machine_data->dummy_ad->paddr);
/* close AOI 0 */
break;
case 3: /* plane 2 AOI 0 */
cmfbi = machine_data->fsl_diu_info[4]->par;
if (cmfbi->count > 0) /* AOI1 is open */
- out_be32(&hw->desc[2], cmfbi->ad->paddr);
+ wr_reg_wa(&hw->desc[2], cmfbi->ad->paddr);
/* move AOI1 to the first */
else /* AOI1 was closed */
- out_be32(&hw->desc[2],
- machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[2], machine_data->dummy_ad->paddr);
/* close AOI 0 */
break;
case 2: /* plane 1 AOI 1 */
@@ -421,7 +431,7 @@ static int fsl_diu_disable_panel(struct fb_info *info)
/* AOI0 is open, must be the first */
pmfbi->ad->next_ad = 0;
} else /* AOI1 is the first in the chain */
- out_be32(&hw->desc[1], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[1], machine_data->dummy_ad->paddr);
/* close AOI 1 */
break;
case 4: /* plane 2 AOI 1 */
@@ -432,7 +442,7 @@ static int fsl_diu_disable_panel(struct fb_info *info)
/* AOI0 is open, must be the first */
pmfbi->ad->next_ad = 0;
} else /* AOI1 is the first in the chain */
- out_be32(&hw->desc[2], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[2], machine_data->dummy_ad->paddr);
/* close AOI 1 */
break;
default:
@@ -1100,6 +1110,10 @@ static int fsl_diu_open(struct fb_info *info, int user)
struct mfb_info *mfbi = info->par;
int res = 0;
+ /* free boot splash memory on first /dev/fb0 open */
+ if (!mfbi->index && diu_ops.release_bootmem)
+ diu_ops.release_bootmem();
+
spin_lock(&diu_lock);
mfbi->count++;
if (mfbi->count == 1) {
@@ -1173,18 +1187,30 @@ static int __devinit install_fb(struct fb_info *info)
int rc;
struct mfb_info *mfbi = info->par;
const char *aoi_mode, *init_aoi_mode = "320x240";
+ struct fb_videomode *db = fsl_diu_mode_db;
+ unsigned int dbsize = ARRAY_SIZE(fsl_diu_mode_db);
+ int has_default_mode = 1;
if (init_fbinfo(info))
return -EINVAL;
- if (mfbi->index == 0) /* plane 0 */
+ if (mfbi->index == 0) { /* plane 0 */
+ if (mfbi->edid_data) {
+ /* Now build modedb from EDID */
+ fb_edid_to_monspecs(mfbi->edid_data, &info->monspecs);
+ fb_videomode_to_modelist(info->monspecs.modedb,
+ info->monspecs.modedb_len,
+ &info->modelist);
+ db = info->monspecs.modedb;
+ dbsize = info->monspecs.modedb_len;
+ }
aoi_mode = fb_mode;
- else
+ } else {
aoi_mode = init_aoi_mode;
+ }
pr_debug("mode used = %s\n", aoi_mode);
- rc = fb_find_mode(&info->var, info, aoi_mode, fsl_diu_mode_db,
- ARRAY_SIZE(fsl_diu_mode_db), &fsl_diu_default_mode, default_bpp);
-
+ rc = fb_find_mode(&info->var, info, aoi_mode, db, dbsize,
+ &fsl_diu_default_mode, default_bpp);
switch (rc) {
case 1:
pr_debug("using mode specified in @mode\n");
@@ -1202,10 +1228,50 @@ static int __devinit install_fb(struct fb_info *info)
default:
pr_debug("rc = %d\n", rc);
pr_debug("failed to find mode\n");
- return -EINVAL;
+ /*
+ * For plane 0 we continue and look into
+ * driver's internal modedb.
+ */
+ if (mfbi->index == 0 && mfbi->edid_data)
+ has_default_mode = 0;
+ else
+ return -EINVAL;
break;
}
+ if (!has_default_mode) {
+ rc = fb_find_mode(&info->var, info, aoi_mode, fsl_diu_mode_db,
+ ARRAY_SIZE(fsl_diu_mode_db),
+ &fsl_diu_default_mode,
+ default_bpp);
+ if (rc > 0 && rc < 5)
+ has_default_mode = 1;
+ }
+
+ /* Still not found, use preferred mode from database if any */
+ if (!has_default_mode && info->monspecs.modedb) {
+ struct fb_monspecs *specs = &info->monspecs;
+ struct fb_videomode *modedb = &specs->modedb[0];
+
+ /*
+ * Get preferred timing. If not found,
+ * first mode in database will be used.
+ */
+ if (specs->misc & FB_MISC_1ST_DETAIL) {
+ int i;
+
+ for (i = 0; i < specs->modedb_len; i++) {
+ if (specs->modedb[i].flag & FB_MODE_IS_FIRST) {
+ modedb = &specs->modedb[i];
+ break;
+ }
+ }
+ }
+
+ info->var.bits_per_pixel = default_bpp;
+ fb_videomode_to_var(&info->var, modedb);
+ }
+
pr_debug("xres_virtual %d\n", info->var.xres_virtual);
pr_debug("bits_per_pixel %d\n", info->var.bits_per_pixel);
@@ -1244,6 +1310,9 @@ static void uninstall_fb(struct fb_info *info)
if (!mfbi->registered)
return;
+ if (mfbi->index == 0)
+ kfree(mfbi->edid_data);
+
unregister_framebuffer(info);
unmap_video_memory(info);
if (&info->cmap)
@@ -1324,7 +1393,7 @@ static void free_irq_local(int irq)
* Power management hooks. Note that we won't be called from IRQ context,
* unlike the blank functions above, so we may sleep.
*/
-static int fsl_diu_suspend(struct of_device *ofdev, pm_message_t state)
+static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct fsl_diu_data *machine_data;
@@ -1334,7 +1403,7 @@ static int fsl_diu_suspend(struct of_device *ofdev, pm_message_t state)
return 0;
}
-static int fsl_diu_resume(struct of_device *ofdev)
+static int fsl_diu_resume(struct platform_device *ofdev)
{
struct fsl_diu_data *machine_data;
@@ -1418,7 +1487,7 @@ static ssize_t show_monitor(struct device *device,
return diu_ops.show_monitor_port(machine_data->monitor_port, buf);
}
-static int __devinit fsl_diu_probe(struct of_device *ofdev,
+static int __devinit fsl_diu_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -1427,6 +1496,7 @@ static int __devinit fsl_diu_probe(struct of_device *ofdev,
int ret, i, error = 0;
struct resource res;
struct fsl_diu_data *machine_data;
+ int diu_mode;
machine_data = kzalloc(sizeof(struct fsl_diu_data), GFP_KERNEL);
if (!machine_data)
@@ -1443,6 +1513,17 @@ static int __devinit fsl_diu_probe(struct of_device *ofdev,
mfbi = machine_data->fsl_diu_info[i]->par;
memcpy(mfbi, &mfb_template[i], sizeof(struct mfb_info));
mfbi->parent = machine_data;
+
+ if (mfbi->index == 0) {
+ const u8 *prop;
+ int len;
+
+ /* Get EDID */
+ prop = of_get_property(np, "edid", &len);
+ if (prop && len == EDID_LENGTH)
+ mfbi->edid_data = kmemdup(prop, EDID_LENGTH,
+ GFP_KERNEL);
+ }
}
ret = of_address_to_resource(np, 0, &res);
@@ -1463,7 +1544,9 @@ static int __devinit fsl_diu_probe(struct of_device *ofdev,
goto error2;
}
- out_be32(&dr.diu_reg->diu_mode, 0); /* disable DIU anyway*/
+ diu_mode = in_be32(&dr.diu_reg->diu_mode);
+ if (diu_mode != MFB_MODE1)
+ out_be32(&dr.diu_reg->diu_mode, 0); /* disable DIU */
/* Get the IRQ of the DIU */
machine_data->irq = irq_of_parse_and_map(np, 0);
@@ -1511,7 +1594,13 @@ static int __devinit fsl_diu_probe(struct of_device *ofdev,
machine_data->dummy_ad->offset_xyd = 0;
machine_data->dummy_ad->next_ad = 0;
- out_be32(&dr.diu_reg->desc[0], machine_data->dummy_ad->paddr);
+ /*
+ * Let DIU display splash screen if it was pre-initialized
+ * by the bootloader, set dummy area descriptor otherwise.
+ */
+ if (diu_mode != MFB_MODE1)
+ out_be32(&dr.diu_reg->desc[0], machine_data->dummy_ad->paddr);
+
out_be32(&dr.diu_reg->desc[1], machine_data->dummy_ad->paddr);
out_be32(&dr.diu_reg->desc[2], machine_data->dummy_ad->paddr);
@@ -1578,7 +1667,7 @@ error2:
}
-static int fsl_diu_remove(struct of_device *ofdev)
+static int fsl_diu_remove(struct platform_device *ofdev)
{
struct fsl_diu_data *machine_data;
int i;
diff --git a/drivers/video/fsl-diu-fb.h b/drivers/video/fsl-diu-fb.h
deleted file mode 100644
index fc295d7ea46..00000000000
--- a/drivers/video/fsl-diu-fb.h
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * Freescale DIU Frame Buffer device driver
- *
- * Authors: Hongjun Chen <hong-jun.chen@freescale.com>
- * Paul Widmer <paul.widmer@freescale.com>
- * Srikanth Srinivasan <srikanth.srinivasan@freescale.com>
- * York Sun <yorksun@freescale.com>
- *
- * Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __FSL_DIU_FB_H__
-#define __FSL_DIU_FB_H__
-
-/* Arbitrary threshold to determine the allocation method
- * See mpc8610fb_set_par(), map_video_memory(), and unmap_video_memory()
- */
-#define MEM_ALLOC_THRESHOLD (1024*768*4+32)
-/* Minimum value that the pixel clock can be set to in pico seconds
- * This is determined by platform clock/3 where the minimum platform
- * clock is 533MHz. This gives 5629 pico seconds.
- */
-#define MIN_PIX_CLK 5629
-#define MAX_PIX_CLK 96096
-
-#include <linux/types.h>
-
-struct mfb_alpha {
- int enable;
- int alpha;
-};
-
-struct mfb_chroma_key {
- int enable;
- __u8 red_max;
- __u8 green_max;
- __u8 blue_max;
- __u8 red_min;
- __u8 green_min;
- __u8 blue_min;
-};
-
-struct aoi_display_offset {
- int x_aoi_d;
- int y_aoi_d;
-};
-
-#define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key)
-#define MFB_WAIT_FOR_VSYNC _IOW('F', 0x20, u_int32_t)
-#define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8)
-
-#define MFB_SET_ALPHA 0x80014d00
-#define MFB_GET_ALPHA 0x40014d00
-#define MFB_SET_AOID 0x80084d04
-#define MFB_GET_AOID 0x40084d04
-#define MFB_SET_PIXFMT 0x80014d08
-#define MFB_GET_PIXFMT 0x40014d08
-
-#define FBIOGET_GWINFO 0x46E0
-#define FBIOPUT_GWINFO 0x46E1
-
-#ifdef __KERNEL__
-#include <linux/spinlock.h>
-
-/*
- * These are the fields of area descriptor(in DDR memory) for every plane
- */
-struct diu_ad {
- /* Word 0(32-bit) in DDR memory */
-/* __u16 comp; */
-/* __u16 pixel_s:2; */
-/* __u16 pallete:1; */
-/* __u16 red_c:2; */
-/* __u16 green_c:2; */
-/* __u16 blue_c:2; */
-/* __u16 alpha_c:3; */
-/* __u16 byte_f:1; */
-/* __u16 res0:3; */
-
- __be32 pix_fmt; /* hard coding pixel format */
-
- /* Word 1(32-bit) in DDR memory */
- __le32 addr;
-
- /* Word 2(32-bit) in DDR memory */
-/* __u32 delta_xs:11; */
-/* __u32 res1:1; */
-/* __u32 delta_ys:11; */
-/* __u32 res2:1; */
-/* __u32 g_alpha:8; */
- __le32 src_size_g_alpha;
-
- /* Word 3(32-bit) in DDR memory */
-/* __u32 delta_xi:11; */
-/* __u32 res3:5; */
-/* __u32 delta_yi:11; */
-/* __u32 res4:3; */
-/* __u32 flip:2; */
- __le32 aoi_size;
-
- /* Word 4(32-bit) in DDR memory */
- /*__u32 offset_xi:11;
- __u32 res5:5;
- __u32 offset_yi:11;
- __u32 res6:5;
- */
- __le32 offset_xyi;
-
- /* Word 5(32-bit) in DDR memory */
- /*__u32 offset_xd:11;
- __u32 res7:5;
- __u32 offset_yd:11;
- __u32 res8:5; */
- __le32 offset_xyd;
-
-
- /* Word 6(32-bit) in DDR memory */
- __u8 ckmax_r;
- __u8 ckmax_g;
- __u8 ckmax_b;
- __u8 res9;
-
- /* Word 7(32-bit) in DDR memory */
- __u8 ckmin_r;
- __u8 ckmin_g;
- __u8 ckmin_b;
- __u8 res10;
-/* __u32 res10:8; */
-
- /* Word 8(32-bit) in DDR memory */
- __le32 next_ad;
-
- /* Word 9(32-bit) in DDR memory, just for 64-bit aligned */
- __u32 paddr;
-} __attribute__ ((packed));
-
-/* DIU register map */
-struct diu {
- __be32 desc[3];
- __be32 gamma;
- __be32 pallete;
- __be32 cursor;
- __be32 curs_pos;
- __be32 diu_mode;
- __be32 bgnd;
- __be32 bgnd_wb;
- __be32 disp_size;
- __be32 wb_size;
- __be32 wb_mem_addr;
- __be32 hsyn_para;
- __be32 vsyn_para;
- __be32 syn_pol;
- __be32 thresholds;
- __be32 int_status;
- __be32 int_mask;
- __be32 colorbar[8];
- __be32 filling;
- __be32 plut;
-} __attribute__ ((packed));
-
-struct diu_hw {
- struct diu *diu_reg;
- spinlock_t reg_lock;
-
- __u32 mode; /* DIU operation mode */
-};
-
-struct diu_addr {
- __u8 __iomem *vaddr; /* Virtual address */
- dma_addr_t paddr; /* Physical address */
- __u32 offset;
-};
-
-struct diu_pool {
- struct diu_addr ad;
- struct diu_addr gamma;
- struct diu_addr pallete;
- struct diu_addr cursor;
-};
-
-#define FSL_DIU_BASE_OFFSET 0x2C000 /* Offset of DIU */
-#define INT_LCDC 64 /* DIU interrupt number */
-
-#define FSL_AOI_NUM 6 /* 5 AOIs and one dummy AOI */
- /* 1 for plane 0, 2 for plane 1&2 each */
-
-/* Minimum X and Y resolutions */
-#define MIN_XRES 64
-#define MIN_YRES 64
-
-/* HW cursor parameters */
-#define MAX_CURS 32
-
-/* Modes of operation of DIU */
-#define MFB_MODE0 0 /* DIU off */
-#define MFB_MODE1 1 /* All three planes output to display */
-#define MFB_MODE2 2 /* Plane 1 to display, planes 2+3 written back*/
-#define MFB_MODE3 3 /* All three planes written back to memory */
-#define MFB_MODE4 4 /* Color bar generation */
-
-/* INT_STATUS/INT_MASK field descriptions */
-#define INT_VSYNC 0x01 /* Vsync interrupt */
-#define INT_VSYNC_WB 0x02 /* Vsync interrupt for write back operation */
-#define INT_UNDRUN 0x04 /* Under run exception interrupt */
-#define INT_PARERR 0x08 /* Display parameters error interrupt */
-#define INT_LS_BF_VS 0x10 /* Lines before vsync. interrupt */
-
-/* Panels'operation modes */
-#define MFB_TYPE_OUTPUT 0 /* Panel output to display */
-#define MFB_TYPE_OFF 1 /* Panel off */
-#define MFB_TYPE_WB 2 /* Panel written back to memory */
-#define MFB_TYPE_TEST 3 /* Panel generate color bar */
-
-#endif /* __KERNEL__ */
-#endif /* __FSL_DIU_FB_H__ */
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index 15d20010944..d885c770eb8 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -368,7 +368,7 @@ static int __init iga_init(struct fb_info *info, struct iga_par *par)
return 1;
}
-int __init igafb_init(void)
+static int __init igafb_init(void)
{
struct fb_info *info;
struct pci_dev *pdev;
@@ -531,6 +531,7 @@ int __init igafb_init(void)
iounmap(info->screen_base);
kfree(par->mmap_map);
kfree(info);
+ return -ENODEV;
}
#ifdef CONFIG_SPARC
@@ -556,7 +557,7 @@ int __init igafb_init(void)
return 0;
}
-int __init igafb_setup(char *options)
+static int __init igafb_setup(char *options)
{
char *this_opt;
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 43f0639b1c1..5c363d026f6 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -40,6 +40,12 @@
*/
#define DEBUG_VAR 1
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
+ (defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) && \
+ defined(CONFIG_FB_IMX_MODULE))
+#define PWMR_BACKLIGHT_AVAILABLE
+#endif
+
#define DRIVER_NAME "imx-fb"
#define LCDC_SSA 0x00
@@ -175,7 +181,9 @@ struct imxfb_info {
struct imx_fb_videomode *mode;
int num_modes;
+#ifdef PWMR_BACKLIGHT_AVAILABLE
struct backlight_device *bl;
+#endif
void (*lcd_power)(int);
void (*backlight_power)(int);
@@ -450,8 +458,7 @@ static int imxfb_set_par(struct fb_info *info)
return 0;
}
-
-
+#ifdef PWMR_BACKLIGHT_AVAILABLE
static int imxfb_bl_get_brightness(struct backlight_device *bl)
{
struct imxfb_info *fbi = bl_get_data(bl);
@@ -516,6 +523,7 @@ static void imxfb_exit_backlight(struct imxfb_info *fbi)
if (fbi->bl)
backlight_device_unregister(fbi->bl);
}
+#endif
static void imxfb_enable_controller(struct imxfb_info *fbi)
{
@@ -647,6 +655,9 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
fbi->regs + LCDC_SIZE);
writel(fbi->pcr, fbi->regs + LCDC_PCR);
+#ifndef PWMR_BACKLIGHT_AVAILABLE
+ writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
+#endif
writel(fbi->lscr1, fbi->regs + LCDC_LSCR1);
writel(fbi->dmacr, fbi->regs + LCDC_DMACR);
@@ -847,7 +858,9 @@ static int __init imxfb_probe(struct platform_device *pdev)
imxfb_enable_controller(fbi);
fbi->pdev = pdev;
+#ifdef PWMR_BACKLIGHT_AVAILABLE
imxfb_init_backlight(fbi);
+#endif
return 0;
@@ -885,7 +898,9 @@ static int __devexit imxfb_remove(struct platform_device *pdev)
imxfb_disable_controller(fbi);
+#ifdef PWMR_BACKLIGHT_AVAILABLE
imxfb_exit_backlight(fbi);
+#endif
unregister_framebuffer(info);
pdata = pdev->dev.platform_data;
diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
new file mode 100644
index 00000000000..670ecaa0385
--- /dev/null
+++ b/drivers/video/jz4740_fb.c
@@ -0,0 +1,847 @@
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * JZ4740 SoC LCD framebuffer driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+
+#include <linux/console.h>
+#include <linux/fb.h>
+
+#include <linux/dma-mapping.h>
+
+#include <asm/mach-jz4740/jz4740_fb.h>
+#include <asm/mach-jz4740/gpio.h>
+
+#define JZ_REG_LCD_CFG 0x00
+#define JZ_REG_LCD_VSYNC 0x04
+#define JZ_REG_LCD_HSYNC 0x08
+#define JZ_REG_LCD_VAT 0x0C
+#define JZ_REG_LCD_DAH 0x10
+#define JZ_REG_LCD_DAV 0x14
+#define JZ_REG_LCD_PS 0x18
+#define JZ_REG_LCD_CLS 0x1C
+#define JZ_REG_LCD_SPL 0x20
+#define JZ_REG_LCD_REV 0x24
+#define JZ_REG_LCD_CTRL 0x30
+#define JZ_REG_LCD_STATE 0x34
+#define JZ_REG_LCD_IID 0x38
+#define JZ_REG_LCD_DA0 0x40
+#define JZ_REG_LCD_SA0 0x44
+#define JZ_REG_LCD_FID0 0x48
+#define JZ_REG_LCD_CMD0 0x4C
+#define JZ_REG_LCD_DA1 0x50
+#define JZ_REG_LCD_SA1 0x54
+#define JZ_REG_LCD_FID1 0x58
+#define JZ_REG_LCD_CMD1 0x5C
+
+#define JZ_LCD_CFG_SLCD BIT(31)
+#define JZ_LCD_CFG_PS_DISABLE BIT(23)
+#define JZ_LCD_CFG_CLS_DISABLE BIT(22)
+#define JZ_LCD_CFG_SPL_DISABLE BIT(21)
+#define JZ_LCD_CFG_REV_DISABLE BIT(20)
+#define JZ_LCD_CFG_HSYNCM BIT(19)
+#define JZ_LCD_CFG_PCLKM BIT(18)
+#define JZ_LCD_CFG_INV BIT(17)
+#define JZ_LCD_CFG_SYNC_DIR BIT(16)
+#define JZ_LCD_CFG_PS_POLARITY BIT(15)
+#define JZ_LCD_CFG_CLS_POLARITY BIT(14)
+#define JZ_LCD_CFG_SPL_POLARITY BIT(13)
+#define JZ_LCD_CFG_REV_POLARITY BIT(12)
+#define JZ_LCD_CFG_HSYNC_ACTIVE_LOW BIT(11)
+#define JZ_LCD_CFG_PCLK_FALLING_EDGE BIT(10)
+#define JZ_LCD_CFG_DE_ACTIVE_LOW BIT(9)
+#define JZ_LCD_CFG_VSYNC_ACTIVE_LOW BIT(8)
+#define JZ_LCD_CFG_18_BIT BIT(7)
+#define JZ_LCD_CFG_PDW (BIT(5) | BIT(4))
+#define JZ_LCD_CFG_MODE_MASK 0xf
+
+#define JZ_LCD_CTRL_BURST_4 (0x0 << 28)
+#define JZ_LCD_CTRL_BURST_8 (0x1 << 28)
+#define JZ_LCD_CTRL_BURST_16 (0x2 << 28)
+#define JZ_LCD_CTRL_RGB555 BIT(27)
+#define JZ_LCD_CTRL_OFUP BIT(26)
+#define JZ_LCD_CTRL_FRC_GRAYSCALE_16 (0x0 << 24)
+#define JZ_LCD_CTRL_FRC_GRAYSCALE_4 (0x1 << 24)
+#define JZ_LCD_CTRL_FRC_GRAYSCALE_2 (0x2 << 24)
+#define JZ_LCD_CTRL_PDD_MASK (0xff << 16)
+#define JZ_LCD_CTRL_EOF_IRQ BIT(13)
+#define JZ_LCD_CTRL_SOF_IRQ BIT(12)
+#define JZ_LCD_CTRL_OFU_IRQ BIT(11)
+#define JZ_LCD_CTRL_IFU0_IRQ BIT(10)
+#define JZ_LCD_CTRL_IFU1_IRQ BIT(9)
+#define JZ_LCD_CTRL_DD_IRQ BIT(8)
+#define JZ_LCD_CTRL_QDD_IRQ BIT(7)
+#define JZ_LCD_CTRL_REVERSE_ENDIAN BIT(6)
+#define JZ_LCD_CTRL_LSB_FISRT BIT(5)
+#define JZ_LCD_CTRL_DISABLE BIT(4)
+#define JZ_LCD_CTRL_ENABLE BIT(3)
+#define JZ_LCD_CTRL_BPP_1 0x0
+#define JZ_LCD_CTRL_BPP_2 0x1
+#define JZ_LCD_CTRL_BPP_4 0x2
+#define JZ_LCD_CTRL_BPP_8 0x3
+#define JZ_LCD_CTRL_BPP_15_16 0x4
+#define JZ_LCD_CTRL_BPP_18_24 0x5
+
+#define JZ_LCD_CMD_SOF_IRQ BIT(15)
+#define JZ_LCD_CMD_EOF_IRQ BIT(16)
+#define JZ_LCD_CMD_ENABLE_PAL BIT(12)
+
+#define JZ_LCD_SYNC_MASK 0x3ff
+
+#define JZ_LCD_STATE_DISABLED BIT(0)
+
+struct jzfb_framedesc {
+ uint32_t next;
+ uint32_t addr;
+ uint32_t id;
+ uint32_t cmd;
+} __packed;
+
+struct jzfb {
+ struct fb_info *fb;
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct resource *mem;
+ struct jz4740_fb_platform_data *pdata;
+
+ size_t vidmem_size;
+ void *vidmem;
+ dma_addr_t vidmem_phys;
+ struct jzfb_framedesc *framedesc;
+ dma_addr_t framedesc_phys;
+
+ struct clk *ldclk;
+ struct clk *lpclk;
+
+ unsigned is_enabled:1;
+ struct mutex lock;
+
+ uint32_t pseudo_palette[16];
+};
+
+static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
+ .id = "JZ4740 FB",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+ .accel = FB_ACCEL_NONE,
+};
+
+static const struct jz_gpio_bulk_request jz_lcd_ctrl_pins[] = {
+ JZ_GPIO_BULK_PIN(LCD_PCLK),
+ JZ_GPIO_BULK_PIN(LCD_HSYNC),
+ JZ_GPIO_BULK_PIN(LCD_VSYNC),
+ JZ_GPIO_BULK_PIN(LCD_DE),
+ JZ_GPIO_BULK_PIN(LCD_PS),
+ JZ_GPIO_BULK_PIN(LCD_REV),
+ JZ_GPIO_BULK_PIN(LCD_CLS),
+ JZ_GPIO_BULK_PIN(LCD_SPL),
+};
+
+static const struct jz_gpio_bulk_request jz_lcd_data_pins[] = {
+ JZ_GPIO_BULK_PIN(LCD_DATA0),
+ JZ_GPIO_BULK_PIN(LCD_DATA1),
+ JZ_GPIO_BULK_PIN(LCD_DATA2),
+ JZ_GPIO_BULK_PIN(LCD_DATA3),
+ JZ_GPIO_BULK_PIN(LCD_DATA4),
+ JZ_GPIO_BULK_PIN(LCD_DATA5),
+ JZ_GPIO_BULK_PIN(LCD_DATA6),
+ JZ_GPIO_BULK_PIN(LCD_DATA7),
+ JZ_GPIO_BULK_PIN(LCD_DATA8),
+ JZ_GPIO_BULK_PIN(LCD_DATA9),
+ JZ_GPIO_BULK_PIN(LCD_DATA10),
+ JZ_GPIO_BULK_PIN(LCD_DATA11),
+ JZ_GPIO_BULK_PIN(LCD_DATA12),
+ JZ_GPIO_BULK_PIN(LCD_DATA13),
+ JZ_GPIO_BULK_PIN(LCD_DATA14),
+ JZ_GPIO_BULK_PIN(LCD_DATA15),
+ JZ_GPIO_BULK_PIN(LCD_DATA16),
+ JZ_GPIO_BULK_PIN(LCD_DATA17),
+};
+
+static unsigned int jzfb_num_ctrl_pins(struct jzfb *jzfb)
+{
+ unsigned int num;
+
+ switch (jzfb->pdata->lcd_type) {
+ case JZ_LCD_TYPE_GENERIC_16_BIT:
+ num = 4;
+ break;
+ case JZ_LCD_TYPE_GENERIC_18_BIT:
+ num = 4;
+ break;
+ case JZ_LCD_TYPE_8BIT_SERIAL:
+ num = 3;
+ break;
+ case JZ_LCD_TYPE_SPECIAL_TFT_1:
+ case JZ_LCD_TYPE_SPECIAL_TFT_2:
+ case JZ_LCD_TYPE_SPECIAL_TFT_3:
+ num = 8;
+ break;
+ default:
+ num = 0;
+ break;
+ }
+ return num;
+}
+
+static unsigned int jzfb_num_data_pins(struct jzfb *jzfb)
+{
+ unsigned int num;
+
+ switch (jzfb->pdata->lcd_type) {
+ case JZ_LCD_TYPE_GENERIC_16_BIT:
+ num = 16;
+ break;
+ case JZ_LCD_TYPE_GENERIC_18_BIT:
+ num = 18;
+ break;
+ case JZ_LCD_TYPE_8BIT_SERIAL:
+ num = 8;
+ break;
+ case JZ_LCD_TYPE_SPECIAL_TFT_1:
+ case JZ_LCD_TYPE_SPECIAL_TFT_2:
+ case JZ_LCD_TYPE_SPECIAL_TFT_3:
+ if (jzfb->pdata->bpp == 18)
+ num = 18;
+ else
+ num = 16;
+ break;
+ default:
+ num = 0;
+ break;
+ }
+ return num;
+}
+
+/* Based on CNVT_TOHW macro from skeletonfb.c */
+static inline uint32_t jzfb_convert_color_to_hw(unsigned val,
+ struct fb_bitfield *bf)
+{
+ return (((val << bf->length) + 0x7FFF - val) >> 16) << bf->offset;
+}
+
+static int jzfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp, struct fb_info *fb)
+{
+ uint32_t color;
+
+ if (regno >= 16)
+ return -EINVAL;
+
+ color = jzfb_convert_color_to_hw(red, &fb->var.red);
+ color |= jzfb_convert_color_to_hw(green, &fb->var.green);
+ color |= jzfb_convert_color_to_hw(blue, &fb->var.blue);
+ color |= jzfb_convert_color_to_hw(transp, &fb->var.transp);
+
+ ((uint32_t *)(fb->pseudo_palette))[regno] = color;
+
+ return 0;
+}
+
+static int jzfb_get_controller_bpp(struct jzfb *jzfb)
+{
+ switch (jzfb->pdata->bpp) {
+ case 18:
+ case 24:
+ return 32;
+ case 15:
+ return 16;
+ default:
+ return jzfb->pdata->bpp;
+ }
+}
+
+static struct fb_videomode *jzfb_get_mode(struct jzfb *jzfb,
+ struct fb_var_screeninfo *var)
+{
+ size_t i;
+ struct fb_videomode *mode = jzfb->pdata->modes;
+
+ for (i = 0; i < jzfb->pdata->num_modes; ++i, ++mode) {
+ if (mode->xres == var->xres && mode->yres == var->yres)
+ return mode;
+ }
+
+ return NULL;
+}
+
+static int jzfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fb)
+{
+ struct jzfb *jzfb = fb->par;
+ struct fb_videomode *mode;
+
+ if (var->bits_per_pixel != jzfb_get_controller_bpp(jzfb) &&
+ var->bits_per_pixel != jzfb->pdata->bpp)
+ return -EINVAL;
+
+ mode = jzfb_get_mode(jzfb, var);
+ if (mode == NULL)
+ return -EINVAL;
+
+ fb_videomode_to_var(var, mode);
+
+ switch (jzfb->pdata->bpp) {
+ case 8:
+ break;
+ case 15:
+ var->red.offset = 10;
+ var->red.length = 5;
+ var->green.offset = 6;
+ var->green.length = 5;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ break;
+ case 16:
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ break;
+ case 18:
+ var->red.offset = 16;
+ var->red.length = 6;
+ var->green.offset = 8;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 6;
+ var->bits_per_pixel = 32;
+ break;
+ case 32:
+ case 24:
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->bits_per_pixel = 32;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int jzfb_set_par(struct fb_info *info)
+{
+ struct jzfb *jzfb = info->par;
+ struct jz4740_fb_platform_data *pdata = jzfb->pdata;
+ struct fb_var_screeninfo *var = &info->var;
+ struct fb_videomode *mode;
+ uint16_t hds, vds;
+ uint16_t hde, vde;
+ uint16_t ht, vt;
+ uint32_t ctrl;
+ uint32_t cfg;
+ unsigned long rate;
+
+ mode = jzfb_get_mode(jzfb, var);
+ if (mode == NULL)
+ return -EINVAL;
+
+ if (mode == info->mode)
+ return 0;
+
+ info->mode = mode;
+
+ hds = mode->hsync_len + mode->left_margin;
+ hde = hds + mode->xres;
+ ht = hde + mode->right_margin;
+
+ vds = mode->vsync_len + mode->upper_margin;
+ vde = vds + mode->yres;
+ vt = vde + mode->lower_margin;
+
+ ctrl = JZ_LCD_CTRL_OFUP | JZ_LCD_CTRL_BURST_16;
+
+ switch (pdata->bpp) {
+ case 1:
+ ctrl |= JZ_LCD_CTRL_BPP_1;
+ break;
+ case 2:
+ ctrl |= JZ_LCD_CTRL_BPP_2;
+ break;
+ case 4:
+ ctrl |= JZ_LCD_CTRL_BPP_4;
+ break;
+ case 8:
+ ctrl |= JZ_LCD_CTRL_BPP_8;
+ break;
+ case 15:
+ ctrl |= JZ_LCD_CTRL_RGB555; /* Falltrough */
+ case 16:
+ ctrl |= JZ_LCD_CTRL_BPP_15_16;
+ break;
+ case 18:
+ case 24:
+ case 32:
+ ctrl |= JZ_LCD_CTRL_BPP_18_24;
+ break;
+ default:
+ break;
+ }
+
+ cfg = pdata->lcd_type & 0xf;
+
+ if (!(mode->sync & FB_SYNC_HOR_HIGH_ACT))
+ cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW;
+
+ if (!(mode->sync & FB_SYNC_VERT_HIGH_ACT))
+ cfg |= JZ_LCD_CFG_VSYNC_ACTIVE_LOW;
+
+ if (pdata->pixclk_falling_edge)
+ cfg |= JZ_LCD_CFG_PCLK_FALLING_EDGE;
+
+ if (pdata->date_enable_active_low)
+ cfg |= JZ_LCD_CFG_DE_ACTIVE_LOW;
+
+ if (pdata->lcd_type == JZ_LCD_TYPE_GENERIC_18_BIT)
+ cfg |= JZ_LCD_CFG_18_BIT;
+
+ if (mode->pixclock) {
+ rate = PICOS2KHZ(mode->pixclock) * 1000;
+ mode->refresh = rate / vt / ht;
+ } else {
+ if (pdata->lcd_type == JZ_LCD_TYPE_8BIT_SERIAL)
+ rate = mode->refresh * (vt + 2 * mode->xres) * ht;
+ else
+ rate = mode->refresh * vt * ht;
+
+ mode->pixclock = KHZ2PICOS(rate / 1000);
+ }
+
+ mutex_lock(&jzfb->lock);
+ if (!jzfb->is_enabled)
+ clk_enable(jzfb->ldclk);
+ else
+ ctrl |= JZ_LCD_CTRL_ENABLE;
+
+ switch (pdata->lcd_type) {
+ case JZ_LCD_TYPE_SPECIAL_TFT_1:
+ case JZ_LCD_TYPE_SPECIAL_TFT_2:
+ case JZ_LCD_TYPE_SPECIAL_TFT_3:
+ writel(pdata->special_tft_config.spl, jzfb->base + JZ_REG_LCD_SPL);
+ writel(pdata->special_tft_config.cls, jzfb->base + JZ_REG_LCD_CLS);
+ writel(pdata->special_tft_config.ps, jzfb->base + JZ_REG_LCD_PS);
+ writel(pdata->special_tft_config.ps, jzfb->base + JZ_REG_LCD_REV);
+ break;
+ default:
+ cfg |= JZ_LCD_CFG_PS_DISABLE;
+ cfg |= JZ_LCD_CFG_CLS_DISABLE;
+ cfg |= JZ_LCD_CFG_SPL_DISABLE;
+ cfg |= JZ_LCD_CFG_REV_DISABLE;
+ break;
+ }
+
+ writel(mode->hsync_len, jzfb->base + JZ_REG_LCD_HSYNC);
+ writel(mode->vsync_len, jzfb->base + JZ_REG_LCD_VSYNC);
+
+ writel((ht << 16) | vt, jzfb->base + JZ_REG_LCD_VAT);
+
+ writel((hds << 16) | hde, jzfb->base + JZ_REG_LCD_DAH);
+ writel((vds << 16) | vde, jzfb->base + JZ_REG_LCD_DAV);
+
+ writel(cfg, jzfb->base + JZ_REG_LCD_CFG);
+
+ writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL);
+
+ if (!jzfb->is_enabled)
+ clk_disable(jzfb->ldclk);
+
+ mutex_unlock(&jzfb->lock);
+
+ clk_set_rate(jzfb->lpclk, rate);
+ clk_set_rate(jzfb->ldclk, rate * 3);
+
+ return 0;
+}
+
+static void jzfb_enable(struct jzfb *jzfb)
+{
+ uint32_t ctrl;
+
+ clk_enable(jzfb->ldclk);
+
+ jz_gpio_bulk_resume(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
+ jz_gpio_bulk_resume(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
+
+ writel(0, jzfb->base + JZ_REG_LCD_STATE);
+
+ writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0);
+
+ ctrl = readl(jzfb->base + JZ_REG_LCD_CTRL);
+ ctrl |= JZ_LCD_CTRL_ENABLE;
+ ctrl &= ~JZ_LCD_CTRL_DISABLE;
+ writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL);
+}
+
+static void jzfb_disable(struct jzfb *jzfb)
+{
+ uint32_t ctrl;
+
+ ctrl = readl(jzfb->base + JZ_REG_LCD_CTRL);
+ ctrl |= JZ_LCD_CTRL_DISABLE;
+ writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL);
+ do {
+ ctrl = readl(jzfb->base + JZ_REG_LCD_STATE);
+ } while (!(ctrl & JZ_LCD_STATE_DISABLED));
+
+ jz_gpio_bulk_suspend(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
+ jz_gpio_bulk_suspend(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
+
+ clk_disable(jzfb->ldclk);
+}
+
+static int jzfb_blank(int blank_mode, struct fb_info *info)
+{
+ struct jzfb *jzfb = info->par;
+
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ mutex_lock(&jzfb->lock);
+ if (jzfb->is_enabled) {
+ mutex_unlock(&jzfb->lock);
+ return 0;
+ }
+
+ jzfb_enable(jzfb);
+ jzfb->is_enabled = 1;
+
+ mutex_unlock(&jzfb->lock);
+ break;
+ default:
+ mutex_lock(&jzfb->lock);
+ if (!jzfb->is_enabled) {
+ mutex_unlock(&jzfb->lock);
+ return 0;
+ }
+
+ jzfb_disable(jzfb);
+ jzfb->is_enabled = 0;
+
+ mutex_unlock(&jzfb->lock);
+ break;
+ }
+
+ return 0;
+}
+
+static int jzfb_alloc_devmem(struct jzfb *jzfb)
+{
+ int max_videosize = 0;
+ struct fb_videomode *mode = jzfb->pdata->modes;
+ void *page;
+ int i;
+
+ for (i = 0; i < jzfb->pdata->num_modes; ++mode, ++i) {
+ if (max_videosize < mode->xres * mode->yres)
+ max_videosize = mode->xres * mode->yres;
+ }
+
+ max_videosize *= jzfb_get_controller_bpp(jzfb) >> 3;
+
+ jzfb->framedesc = dma_alloc_coherent(&jzfb->pdev->dev,
+ sizeof(*jzfb->framedesc),
+ &jzfb->framedesc_phys, GFP_KERNEL);
+
+ if (!jzfb->framedesc)
+ return -ENOMEM;
+
+ jzfb->vidmem_size = PAGE_ALIGN(max_videosize);
+ jzfb->vidmem = dma_alloc_coherent(&jzfb->pdev->dev,
+ jzfb->vidmem_size,
+ &jzfb->vidmem_phys, GFP_KERNEL);
+
+ if (!jzfb->vidmem)
+ goto err_free_framedesc;
+
+ for (page = jzfb->vidmem;
+ page < jzfb->vidmem + PAGE_ALIGN(jzfb->vidmem_size);
+ page += PAGE_SIZE) {
+ SetPageReserved(virt_to_page(page));
+ }
+
+ jzfb->framedesc->next = jzfb->framedesc_phys;
+ jzfb->framedesc->addr = jzfb->vidmem_phys;
+ jzfb->framedesc->id = 0xdeafbead;
+ jzfb->framedesc->cmd = 0;
+ jzfb->framedesc->cmd |= max_videosize / 4;
+
+ return 0;
+
+err_free_framedesc:
+ dma_free_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc),
+ jzfb->framedesc, jzfb->framedesc_phys);
+ return -ENOMEM;
+}
+
+static void jzfb_free_devmem(struct jzfb *jzfb)
+{
+ dma_free_coherent(&jzfb->pdev->dev, jzfb->vidmem_size,
+ jzfb->vidmem, jzfb->vidmem_phys);
+ dma_free_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc),
+ jzfb->framedesc, jzfb->framedesc_phys);
+}
+
+static struct fb_ops jzfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = jzfb_check_var,
+ .fb_set_par = jzfb_set_par,
+ .fb_blank = jzfb_blank,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_setcolreg = jzfb_setcolreg,
+};
+
+static int __devinit jzfb_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct jzfb *jzfb;
+ struct fb_info *fb;
+ struct jz4740_fb_platform_data *pdata = pdev->dev.platform_data;
+ struct resource *mem;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "Missing platform data\n");
+ return -ENXIO;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "Failed to get register memory resource\n");
+ return -ENXIO;
+ }
+
+ mem = request_mem_region(mem->start, resource_size(mem), pdev->name);
+ if (!mem) {
+ dev_err(&pdev->dev, "Failed to request register memory region\n");
+ return -EBUSY;
+ }
+
+ fb = framebuffer_alloc(sizeof(struct jzfb), &pdev->dev);
+ if (!fb) {
+ dev_err(&pdev->dev, "Failed to allocate framebuffer device\n");
+ ret = -ENOMEM;
+ goto err_release_mem_region;
+ }
+
+ fb->fbops = &jzfb_ops;
+ fb->flags = FBINFO_DEFAULT;
+
+ jzfb = fb->par;
+ jzfb->pdev = pdev;
+ jzfb->pdata = pdata;
+ jzfb->mem = mem;
+
+ jzfb->ldclk = clk_get(&pdev->dev, "lcd");
+ if (IS_ERR(jzfb->ldclk)) {
+ ret = PTR_ERR(jzfb->ldclk);
+ dev_err(&pdev->dev, "Failed to get lcd clock: %d\n", ret);
+ goto err_framebuffer_release;
+ }
+
+ jzfb->lpclk = clk_get(&pdev->dev, "lcd_pclk");
+ if (IS_ERR(jzfb->lpclk)) {
+ ret = PTR_ERR(jzfb->lpclk);
+ dev_err(&pdev->dev, "Failed to get lcd pixel clock: %d\n", ret);
+ goto err_put_ldclk;
+ }
+
+ jzfb->base = ioremap(mem->start, resource_size(mem));
+ if (!jzfb->base) {
+ dev_err(&pdev->dev, "Failed to ioremap register memory region\n");
+ ret = -EBUSY;
+ goto err_put_lpclk;
+ }
+
+ platform_set_drvdata(pdev, jzfb);
+
+ mutex_init(&jzfb->lock);
+
+ fb_videomode_to_modelist(pdata->modes, pdata->num_modes,
+ &fb->modelist);
+ fb_videomode_to_var(&fb->var, pdata->modes);
+ fb->var.bits_per_pixel = pdata->bpp;
+ jzfb_check_var(&fb->var, fb);
+
+ ret = jzfb_alloc_devmem(jzfb);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to allocate video memory\n");
+ goto err_iounmap;
+ }
+
+ fb->fix = jzfb_fix;
+ fb->fix.line_length = fb->var.bits_per_pixel * fb->var.xres / 8;
+ fb->fix.mmio_start = mem->start;
+ fb->fix.mmio_len = resource_size(mem);
+ fb->fix.smem_start = jzfb->vidmem_phys;
+ fb->fix.smem_len = fb->fix.line_length * fb->var.yres;
+ fb->screen_base = jzfb->vidmem;
+ fb->pseudo_palette = jzfb->pseudo_palette;
+
+ fb_alloc_cmap(&fb->cmap, 256, 0);
+
+ clk_enable(jzfb->ldclk);
+ jzfb->is_enabled = 1;
+
+ writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0);
+
+ fb->mode = NULL;
+ jzfb_set_par(fb);
+
+ jz_gpio_bulk_request(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
+ jz_gpio_bulk_request(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
+
+ ret = register_framebuffer(fb);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register framebuffer: %d\n", ret);
+ goto err_free_devmem;
+ }
+
+ jzfb->fb = fb;
+
+ return 0;
+
+err_free_devmem:
+ jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
+ jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
+
+ fb_dealloc_cmap(&fb->cmap);
+ jzfb_free_devmem(jzfb);
+err_iounmap:
+ iounmap(jzfb->base);
+err_put_lpclk:
+ clk_put(jzfb->lpclk);
+err_put_ldclk:
+ clk_put(jzfb->ldclk);
+err_framebuffer_release:
+ framebuffer_release(fb);
+err_release_mem_region:
+ release_mem_region(mem->start, resource_size(mem));
+ return ret;
+}
+
+static int __devexit jzfb_remove(struct platform_device *pdev)
+{
+ struct jzfb *jzfb = platform_get_drvdata(pdev);
+
+ jzfb_blank(FB_BLANK_POWERDOWN, jzfb->fb);
+
+ jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
+ jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
+
+ iounmap(jzfb->base);
+ release_mem_region(jzfb->mem->start, resource_size(jzfb->mem));
+
+ fb_dealloc_cmap(&jzfb->fb->cmap);
+ jzfb_free_devmem(jzfb);
+
+ platform_set_drvdata(pdev, NULL);
+
+ clk_put(jzfb->lpclk);
+ clk_put(jzfb->ldclk);
+
+ framebuffer_release(jzfb->fb);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int jzfb_suspend(struct device *dev)
+{
+ struct jzfb *jzfb = dev_get_drvdata(dev);
+
+ acquire_console_sem();
+ fb_set_suspend(jzfb->fb, 1);
+ release_console_sem();
+
+ mutex_lock(&jzfb->lock);
+ if (jzfb->is_enabled)
+ jzfb_disable(jzfb);
+ mutex_unlock(&jzfb->lock);
+
+ return 0;
+}
+
+static int jzfb_resume(struct device *dev)
+{
+ struct jzfb *jzfb = dev_get_drvdata(dev);
+ clk_enable(jzfb->ldclk);
+
+ mutex_lock(&jzfb->lock);
+ if (jzfb->is_enabled)
+ jzfb_enable(jzfb);
+ mutex_unlock(&jzfb->lock);
+
+ acquire_console_sem();
+ fb_set_suspend(jzfb->fb, 0);
+ release_console_sem();
+
+ return 0;
+}
+
+static const struct dev_pm_ops jzfb_pm_ops = {
+ .suspend = jzfb_suspend,
+ .resume = jzfb_resume,
+ .poweroff = jzfb_suspend,
+ .restore = jzfb_resume,
+};
+
+#define JZFB_PM_OPS (&jzfb_pm_ops)
+
+#else
+#define JZFB_PM_OPS NULL
+#endif
+
+static struct platform_driver jzfb_driver = {
+ .probe = jzfb_probe,
+ .remove = __devexit_p(jzfb_remove),
+ .driver = {
+ .name = "jz4740-fb",
+ .pm = JZFB_PM_OPS,
+ },
+};
+
+static int __init jzfb_init(void)
+{
+ return platform_driver_register(&jzfb_driver);
+}
+module_init(jzfb_init);
+
+static void __exit jzfb_exit(void)
+{
+ platform_driver_unregister(&jzfb_driver);
+}
+module_exit(jzfb_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("JZ4740 SoC LCD framebuffer driver");
+MODULE_ALIAS("platform:jz4740-fb");
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index 9e8bf7d5e24..b599e5e36ce 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -529,7 +529,7 @@ static void leo_fixup_var_rgb(struct fb_var_screeninfo *var)
var->transp.length = 0;
}
-static void leo_unmap_regs(struct of_device *op, struct fb_info *info,
+static void leo_unmap_regs(struct platform_device *op, struct fb_info *info,
struct leo_par *par)
{
if (par->lc_ss0_usr)
@@ -547,7 +547,7 @@ static void leo_unmap_regs(struct of_device *op, struct fb_info *info,
of_iounmap(&op->resource[0], info->screen_base, 0x800000);
}
-static int __devinit leo_probe(struct of_device *op,
+static int __devinit leo_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
@@ -637,7 +637,7 @@ out_err:
return err;
}
-static int __devexit leo_remove(struct of_device *op)
+static int __devexit leo_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct leo_par *par = info->par;
@@ -677,12 +677,12 @@ static int __init leo_init(void)
if (fb_get_options("leofb", NULL))
return -ENODEV;
- return of_register_driver(&leo_driver, &of_bus_type);
+ return of_register_platform_driver(&leo_driver);
}
static void __exit leo_exit(void)
{
- of_unregister_driver(&leo_driver);
+ of_unregister_platform_driver(&leo_driver);
}
module_init(leo_init);
diff --git a/drivers/video/matrox/i2c-matroxfb.c b/drivers/video/matrox/i2c-matroxfb.c
index 403b14445a7..0fb280ead3d 100644
--- a/drivers/video/matrox/i2c-matroxfb.c
+++ b/drivers/video/matrox/i2c-matroxfb.c
@@ -191,7 +191,7 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) {
};
i2c_new_probed_device(&m2info->maven.adapter,
- &maven_info, addr_list);
+ &maven_info, addr_list, NULL);
}
}
return m2info;
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index f3a4e15672d..f96a471cb1a 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) {
static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) {
#if defined(__alpha__) || defined(__i386__) || defined(__x86_64__)
/*
- * memcpy_toio works for us if:
+ * iowrite32_rep works for us if:
* (1) Copies data as 32bit quantities, not byte after byte,
* (2) Performs LE ordered stores, and
* (3) It copes with unaligned source (destination is guaranteed to be page
* aligned and length is guaranteed to be multiple of 4).
*/
- memcpy_toio(va.vaddr, src, len);
+ iowrite32_rep(va.vaddr, src, len >> 2);
#else
u_int32_t __iomem* addr = va.vaddr;
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
index 4e2b8cc3d46..b1c4374cf94 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -550,7 +550,7 @@ static int mb862xx_gdc_init(struct mb862xxfb_par *par)
return 0;
}
-static int __devinit of_platform_mb862xx_probe(struct of_device *ofdev,
+static int __devinit of_platform_mb862xx_probe(struct platform_device *ofdev,
const struct of_device_id *id)
{
struct device_node *np = ofdev->dev.of_node;
@@ -669,7 +669,7 @@ fbrel:
return ret;
}
-static int __devexit of_platform_mb862xx_remove(struct of_device *ofdev)
+static int __devexit of_platform_mb862xx_remove(struct platform_device *ofdev)
{
struct fb_info *fbi = dev_get_drvdata(&ofdev->dev);
struct mb862xxfb_par *par = fbi->par;
diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c
index c1ff271017a..7c316c34dfc 100644
--- a/drivers/video/msm/mddi.c
+++ b/drivers/video/msm/mddi.c
@@ -187,10 +187,8 @@ static void mddi_wait_interrupt(struct mddi_info *mddi, uint32_t intmask);
static void mddi_handle_rev_data_avail(struct mddi_info *mddi)
{
- union mddi_rev *rev = mddi->rev_data;
uint32_t rev_data_count;
uint32_t rev_crc_err_count;
- int i;
struct reg_read_info *ri;
size_t prev_offset;
uint16_t length;
@@ -670,7 +668,7 @@ static int __init mddi_rev_data_setup(struct mddi_info *mddi)
return 0;
}
-static int __init mddi_probe(struct platform_device *pdev)
+static int __devinit mddi_probe(struct platform_device *pdev)
{
struct msm_mddi_platform_data *pdata = pdev->dev.platform_data;
struct mddi_info *mddi = &mddi_info[pdev->id];
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 19c01c6208e..3c28db03ad3 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -258,7 +258,6 @@ int get_img(struct mdp_img *img, struct fb_info *info,
{
int put_needed, ret = 0;
struct file *file;
- unsigned long vstart;
file = fget_light(img->memory_id, &put_needed);
if (file == NULL)
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 46dda7d8aae..cb163a5397b 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -19,13 +19,14 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <asm/io.h>
-#include <asm/prom.h>
#ifdef CONFIG_PPC64
#include <asm/pci-bridge.h>
diff --git a/drivers/video/omap/lcd_apollon.c b/drivers/video/omap/lcd_apollon.c
index 2be94eb3bbf..10459d8bd9a 100644
--- a/drivers/video/omap/lcd_apollon.c
+++ b/drivers/video/omap/lcd_apollon.c
@@ -25,7 +25,6 @@
#include <linux/platform_device.h>
#include <mach/gpio.h>
-#include <plat/mux.h>
#include "omapfb.h"
@@ -34,8 +33,6 @@
static int apollon_panel_init(struct lcd_panel *panel,
struct omapfb_device *fbdev)
{
- /* configure LCD PWR_EN */
- omap_cfg_reg(M21_242X_GPIO11);
return 0;
}
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index aaf5d308a04..e1c765d1141 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -28,12 +28,13 @@
#include <linux/fb.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
-#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
#include <linux/mutex.h>
#include <plat/display.h>
+#include <plat/nokia-dsi-panel.h>
/* DSI Virtual channel. Hardcoded for now. */
#define TCH 0
@@ -62,11 +63,136 @@
#define DCS_GET_ID2 0xdb
#define DCS_GET_ID3 0xdc
-/* #define TAAL_USE_ESD_CHECK */
#define TAAL_ESD_CHECK_PERIOD msecs_to_jiffies(5000)
+static irqreturn_t taal_te_isr(int irq, void *data);
+static void taal_te_timeout_work_callback(struct work_struct *work);
static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
+struct panel_regulator {
+ struct regulator *regulator;
+ const char *name;
+ int min_uV;
+ int max_uV;
+};
+
+static void free_regulators(struct panel_regulator *regulators, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ /* disable/put in reverse order */
+ regulator_disable(regulators[n - i - 1].regulator);
+ regulator_put(regulators[n - i - 1].regulator);
+ }
+}
+
+static int init_regulators(struct omap_dss_device *dssdev,
+ struct panel_regulator *regulators, int n)
+{
+ int r, i, v;
+
+ for (i = 0; i < n; i++) {
+ struct regulator *reg;
+
+ reg = regulator_get(&dssdev->dev, regulators[i].name);
+ if (IS_ERR(reg)) {
+ dev_err(&dssdev->dev, "failed to get regulator %s\n",
+ regulators[i].name);
+ r = PTR_ERR(reg);
+ goto err;
+ }
+
+ /* FIXME: better handling of fixed vs. variable regulators */
+ v = regulator_get_voltage(reg);
+ if (v < regulators[i].min_uV || v > regulators[i].max_uV) {
+ r = regulator_set_voltage(reg, regulators[i].min_uV,
+ regulators[i].max_uV);
+ if (r) {
+ dev_err(&dssdev->dev,
+ "failed to set regulator %s voltage\n",
+ regulators[i].name);
+ regulator_put(reg);
+ goto err;
+ }
+ }
+
+ r = regulator_enable(reg);
+ if (r) {
+ dev_err(&dssdev->dev, "failed to enable regulator %s\n",
+ regulators[i].name);
+ regulator_put(reg);
+ goto err;
+ }
+
+ regulators[i].regulator = reg;
+ }
+
+ return 0;
+
+err:
+ free_regulators(regulators, i);
+
+ return r;
+}
+
+/**
+ * struct panel_config - panel configuration
+ * @name: panel name
+ * @type: panel type
+ * @timings: panel resolution
+ * @sleep: various panel specific delays, passed to msleep() if non-zero
+ * @reset_sequence: reset sequence timings, passed to udelay() if non-zero
+ * @regulators: array of panel regulators
+ * @num_regulators: number of regulators in the array
+ */
+struct panel_config {
+ const char *name;
+ int type;
+
+ struct omap_video_timings timings;
+
+ struct {
+ unsigned int sleep_in;
+ unsigned int sleep_out;
+ unsigned int hw_reset;
+ unsigned int enable_te;
+ } sleep;
+
+ struct {
+ unsigned int high;
+ unsigned int low;
+ } reset_sequence;
+
+ struct panel_regulator *regulators;
+ int num_regulators;
+};
+
+enum {
+ PANEL_TAAL,
+};
+
+static struct panel_config panel_configs[] = {
+ {
+ .name = "taal",
+ .type = PANEL_TAAL,
+ .timings = {
+ .x_res = 864,
+ .y_res = 480,
+ },
+ .sleep = {
+ .sleep_in = 5,
+ .sleep_out = 5,
+ .hw_reset = 5,
+ .enable_te = 100, /* possible panel bug */
+ },
+ .reset_sequence = {
+ .high = 10,
+ .low = 10,
+ },
+ },
+};
+
struct taal_data {
struct mutex lock;
@@ -84,8 +210,15 @@ struct taal_data {
bool mirror;
bool te_enabled;
- bool use_ext_te;
- struct completion te_completion;
+
+ atomic_t do_update;
+ struct {
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+ } update_region;
+ struct delayed_work te_timeout_work;
bool use_dsi_bl;
@@ -96,8 +229,16 @@ struct taal_data {
struct workqueue_struct *esd_wq;
struct delayed_work esd_work;
+
+ struct panel_config *panel_config;
};
+static inline struct nokia_dsi_panel_data
+*get_panel_data(const struct omap_dss_device *dssdev)
+{
+ return (struct nokia_dsi_panel_data *) dssdev->data;
+}
+
static void taal_esd_work(struct work_struct *work);
static void hw_guard_start(struct taal_data *td, int guard_msec)
@@ -159,7 +300,8 @@ static int taal_sleep_in(struct taal_data *td)
hw_guard_start(td, 120);
- msleep(5);
+ if (td->panel_config->sleep.sleep_in)
+ msleep(td->panel_config->sleep.sleep_in);
return 0;
}
@@ -176,7 +318,8 @@ static int taal_sleep_out(struct taal_data *td)
hw_guard_start(td, 120);
- msleep(5);
+ if (td->panel_config->sleep.sleep_out)
+ msleep(td->panel_config->sleep.sleep_out);
return 0;
}
@@ -279,6 +422,7 @@ static int taal_bl_update_status(struct backlight_device *dev)
{
struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev);
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
int r;
int level;
@@ -290,24 +434,26 @@ static int taal_bl_update_status(struct backlight_device *dev)
dev_dbg(&dssdev->dev, "update brightness to %d\n", level);
+ mutex_lock(&td->lock);
+
if (td->use_dsi_bl) {
if (td->enabled) {
dsi_bus_lock();
r = taal_dcs_write_1(DCS_BRIGHTNESS, level);
dsi_bus_unlock();
- if (r)
- return r;
+ } else {
+ r = 0;
}
} else {
- if (!dssdev->set_backlight)
- return -EINVAL;
-
- r = dssdev->set_backlight(dssdev, level);
- if (r)
- return r;
+ if (!panel_data->set_backlight)
+ r = -EINVAL;
+ else
+ r = panel_data->set_backlight(dssdev, level);
}
- return 0;
+ mutex_unlock(&td->lock);
+
+ return r;
}
static int taal_bl_get_intensity(struct backlight_device *dev)
@@ -344,16 +490,6 @@ static void taal_get_resolution(struct omap_dss_device *dssdev,
}
}
-static irqreturn_t taal_te_isr(int irq, void *data)
-{
- struct omap_dss_device *dssdev = data;
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-
- complete_all(&td->te_completion);
-
- return IRQ_HANDLED;
-}
-
static ssize_t taal_num_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -362,6 +498,8 @@ static ssize_t taal_num_errors_show(struct device *dev,
u8 errors;
int r;
+ mutex_lock(&td->lock);
+
if (td->enabled) {
dsi_bus_lock();
r = taal_dcs_read_1(DCS_READ_NUM_ERRORS, &errors);
@@ -370,6 +508,8 @@ static ssize_t taal_num_errors_show(struct device *dev,
r = -ENODEV;
}
+ mutex_unlock(&td->lock);
+
if (r)
return r;
@@ -384,6 +524,8 @@ static ssize_t taal_hw_revision_show(struct device *dev,
u8 id1, id2, id3;
int r;
+ mutex_lock(&td->lock);
+
if (td->enabled) {
dsi_bus_lock();
r = taal_get_id(&id1, &id2, &id3);
@@ -392,6 +534,8 @@ static ssize_t taal_hw_revision_show(struct device *dev,
r = -ENODEV;
}
+ mutex_unlock(&td->lock);
+
if (r)
return r;
@@ -441,6 +585,8 @@ static ssize_t store_cabc_mode(struct device *dev,
if (i == ARRAY_SIZE(cabc_modes))
return -EINVAL;
+ mutex_lock(&td->lock);
+
if (td->enabled) {
dsi_bus_lock();
if (!td->cabc_broken)
@@ -450,6 +596,8 @@ static ssize_t store_cabc_mode(struct device *dev,
td->cabc_mode = i;
+ mutex_unlock(&td->lock);
+
return count;
}
@@ -488,47 +636,93 @@ static struct attribute_group taal_attr_group = {
.attrs = taal_attrs,
};
+static void taal_hw_reset(struct omap_dss_device *dssdev)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
+
+ if (panel_data->reset_gpio == -1)
+ return;
+
+ gpio_set_value(panel_data->reset_gpio, 1);
+ if (td->panel_config->reset_sequence.high)
+ udelay(td->panel_config->reset_sequence.high);
+ /* reset the panel */
+ gpio_set_value(panel_data->reset_gpio, 0);
+ /* assert reset */
+ if (td->panel_config->reset_sequence.low)
+ udelay(td->panel_config->reset_sequence.low);
+ gpio_set_value(panel_data->reset_gpio, 1);
+ /* wait after releasing reset */
+ if (td->panel_config->sleep.hw_reset)
+ msleep(td->panel_config->sleep.hw_reset);
+}
+
static int taal_probe(struct omap_dss_device *dssdev)
{
struct backlight_properties props;
struct taal_data *td;
struct backlight_device *bldev;
- int r;
-
- const struct omap_video_timings taal_panel_timings = {
- .x_res = 864,
- .y_res = 480,
- };
+ struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
+ struct panel_config *panel_config = NULL;
+ int r, i;
dev_dbg(&dssdev->dev, "probe\n");
+ if (!panel_data || !panel_data->name) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(panel_configs); i++) {
+ if (strcmp(panel_data->name, panel_configs[i].name) == 0) {
+ panel_config = &panel_configs[i];
+ break;
+ }
+ }
+
+ if (!panel_config) {
+ r = -EINVAL;
+ goto err;
+ }
+
dssdev->panel.config = OMAP_DSS_LCD_TFT;
- dssdev->panel.timings = taal_panel_timings;
+ dssdev->panel.timings = panel_config->timings;
dssdev->ctrl.pixel_size = 24;
td = kzalloc(sizeof(*td), GFP_KERNEL);
if (!td) {
r = -ENOMEM;
- goto err0;
+ goto err;
}
td->dssdev = dssdev;
+ td->panel_config = panel_config;
mutex_init(&td->lock);
+ atomic_set(&td->do_update, 0);
+
+ r = init_regulators(dssdev, panel_config->regulators,
+ panel_config->num_regulators);
+ if (r)
+ goto err_reg;
+
td->esd_wq = create_singlethread_workqueue("taal_esd");
if (td->esd_wq == NULL) {
dev_err(&dssdev->dev, "can't create ESD workqueue\n");
r = -ENOMEM;
- goto err1;
+ goto err_wq;
}
INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work);
dev_set_drvdata(&dssdev->dev, td);
+ taal_hw_reset(dssdev);
+
/* if no platform set_backlight() defined, presume DSI backlight
* control */
memset(&props, 0, sizeof(struct backlight_properties));
- if (!dssdev->set_backlight)
+ if (!panel_data->set_backlight)
td->use_dsi_bl = true;
if (td->use_dsi_bl)
@@ -539,7 +733,7 @@ static int taal_probe(struct omap_dss_device *dssdev)
&taal_bl_ops, &props);
if (IS_ERR(bldev)) {
r = PTR_ERR(bldev);
- goto err2;
+ goto err_bl;
}
td->bldev = bldev;
@@ -553,13 +747,13 @@ static int taal_probe(struct omap_dss_device *dssdev)
taal_bl_update_status(bldev);
- if (dssdev->phy.dsi.ext_te) {
- int gpio = dssdev->phy.dsi.ext_te_gpio;
+ if (panel_data->use_ext_te) {
+ int gpio = panel_data->ext_te_gpio;
r = gpio_request(gpio, "taal irq");
if (r) {
dev_err(&dssdev->dev, "GPIO request failed\n");
- goto err3;
+ goto err_gpio;
}
gpio_direction_input(gpio);
@@ -571,49 +765,52 @@ static int taal_probe(struct omap_dss_device *dssdev)
if (r) {
dev_err(&dssdev->dev, "IRQ request failed\n");
gpio_free(gpio);
- goto err3;
+ goto err_irq;
}
- init_completion(&td->te_completion);
+ INIT_DELAYED_WORK_DEFERRABLE(&td->te_timeout_work,
+ taal_te_timeout_work_callback);
- td->use_ext_te = true;
+ dev_dbg(&dssdev->dev, "Using GPIO TE\n");
}
r = sysfs_create_group(&dssdev->dev.kobj, &taal_attr_group);
if (r) {
dev_err(&dssdev->dev, "failed to create sysfs files\n");
- goto err4;
+ goto err_sysfs;
}
return 0;
-err4:
- if (td->use_ext_te) {
- int gpio = dssdev->phy.dsi.ext_te_gpio;
- free_irq(gpio_to_irq(gpio), dssdev);
- gpio_free(gpio);
- }
-err3:
+err_sysfs:
+ if (panel_data->use_ext_te)
+ free_irq(gpio_to_irq(panel_data->ext_te_gpio), dssdev);
+err_irq:
+ if (panel_data->use_ext_te)
+ gpio_free(panel_data->ext_te_gpio);
+err_gpio:
backlight_device_unregister(bldev);
-err2:
- cancel_delayed_work_sync(&td->esd_work);
+err_bl:
destroy_workqueue(td->esd_wq);
-err1:
+err_wq:
+ free_regulators(panel_config->regulators, panel_config->num_regulators);
+err_reg:
kfree(td);
-err0:
+err:
return r;
}
static void taal_remove(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
struct backlight_device *bldev;
dev_dbg(&dssdev->dev, "remove\n");
sysfs_remove_group(&dssdev->dev.kobj, &taal_attr_group);
- if (td->use_ext_te) {
- int gpio = dssdev->phy.dsi.ext_te_gpio;
+ if (panel_data->use_ext_te) {
+ int gpio = panel_data->ext_te_gpio;
free_irq(gpio_to_irq(gpio), dssdev);
gpio_free(gpio);
}
@@ -623,9 +820,15 @@ static void taal_remove(struct omap_dss_device *dssdev)
taal_bl_update_status(bldev);
backlight_device_unregister(bldev);
- cancel_delayed_work_sync(&td->esd_work);
+ cancel_delayed_work(&td->esd_work);
destroy_workqueue(td->esd_wq);
+ /* reset, to be sure that the panel is in a valid state */
+ taal_hw_reset(dssdev);
+
+ free_regulators(td->panel_config->regulators,
+ td->panel_config->num_regulators);
+
kfree(td);
}
@@ -635,23 +838,14 @@ static int taal_power_on(struct omap_dss_device *dssdev)
u8 id1, id2, id3;
int r;
- if (dssdev->platform_enable) {
- r = dssdev->platform_enable(dssdev);
- if (r)
- return r;
- }
-
- /* it seems we have to wait a bit until taal is ready */
- msleep(5);
-
- dsi_bus_lock();
-
r = omapdss_dsi_display_enable(dssdev);
if (r) {
dev_err(&dssdev->dev, "failed to enable DSI\n");
goto err0;
}
+ taal_hw_reset(dssdev);
+
omapdss_dsi_vc_enable_hs(TCH, false);
r = taal_sleep_out(td);
@@ -662,34 +856,47 @@ static int taal_power_on(struct omap_dss_device *dssdev)
if (r)
goto err;
- /* on early revisions CABC is broken */
- if (id2 == 0x00 || id2 == 0xff || id2 == 0x81)
+ /* on early Taal revisions CABC is broken */
+ if (td->panel_config->type == PANEL_TAAL &&
+ (id2 == 0x00 || id2 == 0xff || id2 == 0x81))
td->cabc_broken = true;
- taal_dcs_write_1(DCS_BRIGHTNESS, 0xff);
- taal_dcs_write_1(DCS_CTRL_DISPLAY, (1<<2) | (1<<5)); /* BL | BCTRL */
+ r = taal_dcs_write_1(DCS_BRIGHTNESS, 0xff);
+ if (r)
+ goto err;
+
+ r = taal_dcs_write_1(DCS_CTRL_DISPLAY,
+ (1<<2) | (1<<5)); /* BL | BCTRL */
+ if (r)
+ goto err;
+
+ r = taal_dcs_write_1(DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */
+ if (r)
+ goto err;
- taal_dcs_write_1(DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */
+ r = taal_set_addr_mode(td->rotate, td->mirror);
+ if (r)
+ goto err;
- taal_set_addr_mode(td->rotate, td->mirror);
- if (!td->cabc_broken)
- taal_dcs_write_1(DCS_WRITE_CABC, td->cabc_mode);
+ if (!td->cabc_broken) {
+ r = taal_dcs_write_1(DCS_WRITE_CABC, td->cabc_mode);
+ if (r)
+ goto err;
+ }
- taal_dcs_write_0(DCS_DISPLAY_ON);
+ r = taal_dcs_write_0(DCS_DISPLAY_ON);
+ if (r)
+ goto err;
r = _taal_enable_te(dssdev, td->te_enabled);
if (r)
goto err;
-#ifdef TAAL_USE_ESD_CHECK
- queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
-#endif
-
td->enabled = 1;
if (!td->intro_printed) {
- dev_info(&dssdev->dev, "revision %02x.%02x.%02x\n",
- id1, id2, id3);
+ dev_info(&dssdev->dev, "%s panel revision %02x.%02x.%02x\n",
+ td->panel_config->name, id1, id2, id3);
if (td->cabc_broken)
dev_info(&dssdev->dev,
"old Taal version, CABC disabled\n");
@@ -698,46 +905,44 @@ static int taal_power_on(struct omap_dss_device *dssdev)
omapdss_dsi_vc_enable_hs(TCH, true);
- dsi_bus_unlock();
-
return 0;
err:
+ dev_err(&dssdev->dev, "error while enabling panel, issuing HW reset\n");
+
+ taal_hw_reset(dssdev);
+
omapdss_dsi_display_disable(dssdev);
err0:
- dsi_bus_unlock();
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
-
return r;
}
static void taal_power_off(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ int r;
- dsi_bus_lock();
-
- cancel_delayed_work(&td->esd_work);
-
- taal_dcs_write_0(DCS_DISPLAY_OFF);
- taal_sleep_in(td);
+ r = taal_dcs_write_0(DCS_DISPLAY_OFF);
+ if (!r) {
+ r = taal_sleep_in(td);
+ /* HACK: wait a bit so that the message goes through */
+ msleep(10);
+ }
- /* wait a bit so that the message goes through */
- msleep(10);
+ if (r) {
+ dev_err(&dssdev->dev,
+ "error disabling panel, issuing HW reset\n");
+ taal_hw_reset(dssdev);
+ }
omapdss_dsi_display_disable(dssdev);
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
-
td->enabled = 0;
-
- dsi_bus_unlock();
}
static int taal_enable(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
int r;
dev_dbg(&dssdev->dev, "enable\n");
@@ -749,10 +954,19 @@ static int taal_enable(struct omap_dss_device *dssdev)
goto err;
}
+ dsi_bus_lock();
+
r = taal_power_on(dssdev);
+
+ dsi_bus_unlock();
+
if (r)
goto err;
+ if (panel_data->use_esd_check)
+ queue_delayed_work(td->esd_wq, &td->esd_work,
+ TAAL_ESD_CHECK_PERIOD);
+
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
mutex_unlock(&td->lock);
@@ -772,9 +986,15 @@ static void taal_disable(struct omap_dss_device *dssdev)
mutex_lock(&td->lock);
+ cancel_delayed_work(&td->esd_work);
+
+ dsi_bus_lock();
+
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
taal_power_off(dssdev);
+ dsi_bus_unlock();
+
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
mutex_unlock(&td->lock);
@@ -794,7 +1014,14 @@ static int taal_suspend(struct omap_dss_device *dssdev)
goto err;
}
+ cancel_delayed_work(&td->esd_work);
+
+ dsi_bus_lock();
+
taal_power_off(dssdev);
+
+ dsi_bus_unlock();
+
dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
mutex_unlock(&td->lock);
@@ -808,6 +1035,7 @@ err:
static int taal_resume(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
int r;
dev_dbg(&dssdev->dev, "resume\n");
@@ -819,8 +1047,20 @@ static int taal_resume(struct omap_dss_device *dssdev)
goto err;
}
+ dsi_bus_lock();
+
r = taal_power_on(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ dsi_bus_unlock();
+
+ if (r) {
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+ } else {
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ if (panel_data->use_esd_check)
+ queue_delayed_work(td->esd_wq, &td->esd_work,
+ TAAL_ESD_CHECK_PERIOD);
+ }
mutex_unlock(&td->lock);
@@ -837,10 +1077,52 @@ static void taal_framedone_cb(int err, void *data)
dsi_bus_unlock();
}
+static irqreturn_t taal_te_isr(int irq, void *data)
+{
+ struct omap_dss_device *dssdev = data;
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ int old;
+ int r;
+
+ old = atomic_cmpxchg(&td->do_update, 1, 0);
+
+ if (old) {
+ cancel_delayed_work(&td->te_timeout_work);
+
+ r = omap_dsi_update(dssdev, TCH,
+ td->update_region.x,
+ td->update_region.y,
+ td->update_region.w,
+ td->update_region.h,
+ taal_framedone_cb, dssdev);
+ if (r)
+ goto err;
+ }
+
+ return IRQ_HANDLED;
+err:
+ dev_err(&dssdev->dev, "start update failed\n");
+ dsi_bus_unlock();
+ return IRQ_HANDLED;
+}
+
+static void taal_te_timeout_work_callback(struct work_struct *work)
+{
+ struct taal_data *td = container_of(work, struct taal_data,
+ te_timeout_work.work);
+ struct omap_dss_device *dssdev = td->dssdev;
+
+ dev_err(&dssdev->dev, "TE not received for 250ms!\n");
+
+ atomic_set(&td->do_update, 0);
+ dsi_bus_unlock();
+}
+
static int taal_update(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
int r;
dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
@@ -853,7 +1135,7 @@ static int taal_update(struct omap_dss_device *dssdev,
goto err;
}
- r = omap_dsi_prepare_update(dssdev, &x, &y, &w, &h);
+ r = omap_dsi_prepare_update(dssdev, &x, &y, &w, &h, true);
if (r)
goto err;
@@ -861,10 +1143,21 @@ static int taal_update(struct omap_dss_device *dssdev,
if (r)
goto err;
- r = omap_dsi_update(dssdev, TCH, x, y, w, h,
- taal_framedone_cb, dssdev);
- if (r)
- goto err;
+ if (td->te_enabled && panel_data->use_ext_te) {
+ td->update_region.x = x;
+ td->update_region.y = y;
+ td->update_region.w = w;
+ td->update_region.h = h;
+ barrier();
+ schedule_delayed_work(&td->te_timeout_work,
+ msecs_to_jiffies(250));
+ atomic_set(&td->do_update, 1);
+ } else {
+ r = omap_dsi_update(dssdev, TCH, x, y, w, h,
+ taal_framedone_cb, dssdev);
+ if (r)
+ goto err;
+ }
/* note: no bus_unlock here. unlock is in framedone_cb */
mutex_unlock(&td->lock);
@@ -894,20 +1187,19 @@ static int taal_sync(struct omap_dss_device *dssdev)
static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
int r;
- td->te_enabled = enable;
-
if (enable)
r = taal_dcs_write_1(DCS_TEAR_ON, 0);
else
r = taal_dcs_write_0(DCS_TEAR_OFF);
- omapdss_dsi_enable_te(dssdev, enable);
+ if (!panel_data->use_ext_te)
+ omapdss_dsi_enable_te(dssdev, enable);
- /* XXX for some reason, DSI TE breaks if we don't wait here.
- * Panel bug? Needs more studying */
- msleep(100);
+ if (td->panel_config->sleep.enable_te)
+ msleep(td->panel_config->sleep.enable_te);
return r;
}
@@ -918,10 +1210,26 @@ static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
int r;
mutex_lock(&td->lock);
+
+ if (td->te_enabled == enable)
+ goto end;
+
dsi_bus_lock();
- r = _taal_enable_te(dssdev, enable);
+ if (td->enabled) {
+ r = _taal_enable_te(dssdev, enable);
+ if (r)
+ goto err;
+ }
+
+ td->te_enabled = enable;
+
+ dsi_bus_unlock();
+end:
+ mutex_unlock(&td->lock);
+ return 0;
+err:
dsi_bus_unlock();
mutex_unlock(&td->lock);
@@ -948,6 +1256,10 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
mutex_lock(&td->lock);
+
+ if (td->rotate == rotate)
+ goto end;
+
dsi_bus_lock();
if (td->enabled) {
@@ -959,6 +1271,7 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
td->rotate = rotate;
dsi_bus_unlock();
+end:
mutex_unlock(&td->lock);
return 0;
err:
@@ -987,6 +1300,10 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
dev_dbg(&dssdev->dev, "mirror %d\n", enable);
mutex_lock(&td->lock);
+
+ if (td->mirror == enable)
+ goto end;
+
dsi_bus_lock();
if (td->enabled) {
r = taal_set_addr_mode(td->rotate, enable);
@@ -997,6 +1314,7 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
td->mirror = enable;
dsi_bus_unlock();
+end:
mutex_unlock(&td->lock);
return 0;
err:
@@ -1024,23 +1342,30 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
int r;
mutex_lock(&td->lock);
+
+ if (!td->enabled) {
+ r = -ENODEV;
+ goto err1;
+ }
+
dsi_bus_lock();
r = taal_dcs_read_1(DCS_GET_ID1, &id1);
if (r)
- goto err;
+ goto err2;
r = taal_dcs_read_1(DCS_GET_ID2, &id2);
if (r)
- goto err;
+ goto err2;
r = taal_dcs_read_1(DCS_GET_ID3, &id3);
if (r)
- goto err;
+ goto err2;
dsi_bus_unlock();
mutex_unlock(&td->lock);
return 0;
-err:
+err2:
dsi_bus_unlock();
+err1:
mutex_unlock(&td->lock);
return r;
}
@@ -1128,6 +1453,7 @@ static void taal_esd_work(struct work_struct *work)
struct taal_data *td = container_of(work, struct taal_data,
esd_work.work);
struct omap_dss_device *dssdev = td->dssdev;
+ struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
u8 state1, state2;
int r;
@@ -1168,7 +1494,7 @@ static void taal_esd_work(struct work_struct *work)
}
/* Self-diagnostics result is also shown on TE GPIO line. We need
* to re-enable TE after self diagnostics */
- if (td->use_ext_te && td->te_enabled) {
+ if (td->te_enabled && panel_data->use_ext_te) {
r = taal_dcs_write_1(DCS_TEAR_ON, 0);
if (r)
goto err;
@@ -1184,6 +1510,7 @@ err:
dev_err(&dssdev->dev, "performing LCD reset\n");
taal_power_off(dssdev);
+ taal_hw_reset(dssdev);
taal_power_on(dssdev);
dsi_bus_unlock();
diff --git a/drivers/video/omap2/displays/panel-toppoly-tdo35s.c b/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
index fa434ca6e4b..e320e67d06f 100644
--- a/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
+++ b/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
@@ -73,8 +73,12 @@ static void toppoly_tdo_panel_power_off(struct omap_dss_device *dssdev)
static int toppoly_tdo_panel_probe(struct omap_dss_device *dssdev)
{
- dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS;
+ dssdev->panel.config = OMAP_DSS_LCD_TFT |
+ OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS |
+ OMAP_DSS_LCD_IPC |
+ OMAP_DSS_LCD_ONOFF;
+
dssdev->panel.timings = toppoly_tdo_panel_timings;
return 0;
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index e777e352dbc..5ecdc000409 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -31,6 +31,7 @@
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
+#include <linux/hardirq.h>
#include <plat/sram.h>
#include <plat/clock.h>
@@ -335,7 +336,7 @@ void dispc_save_context(void)
void dispc_restore_context(void)
{
RR(SYSCONFIG);
- RR(IRQENABLE);
+ /*RR(IRQENABLE);*/
/*RR(CONTROL);*/
RR(CONFIG);
RR(DEFAULT_COLOR0);
@@ -472,6 +473,15 @@ void dispc_restore_context(void)
/* enable last, because LCD & DIGIT enable are here */
RR(CONTROL);
+
+ /* clear spurious SYNC_LOST_DIGIT interrupts */
+ dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
+
+ /*
+ * enable last so IRQs won't trigger before
+ * the context is fully restored
+ */
+ RR(IRQENABLE);
}
#undef SR
@@ -3019,7 +3029,7 @@ void dispc_fake_vsync_irq(void)
u32 irqstatus = DISPC_IRQ_VSYNC;
int i;
- local_irq_disable();
+ WARN_ON(!in_interrupt());
for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
struct omap_dispc_isr_data *isr_data;
@@ -3031,8 +3041,6 @@ void dispc_fake_vsync_irq(void)
if (isr_data->mask & irqstatus)
isr_data->isr(isr_data->arg, irqstatus);
}
-
- local_irq_enable();
}
#endif
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index ef8c8529dda..22dd7a474f7 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -82,6 +82,9 @@ static ssize_t display_upd_mode_store(struct device *dev,
int val, r;
enum omap_dss_update_mode mode;
+ if (!dssdev->driver->set_update_mode)
+ return -EINVAL;
+
val = simple_strtoul(buf, NULL, 10);
switch (val) {
@@ -343,7 +346,6 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
case OMAP_DISPLAY_TYPE_VENC:
case OMAP_DISPLAY_TYPE_SDI:
return 24;
- return 24;
default:
BUG();
}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 3af207b2bde..b3fa3a7db91 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -165,6 +165,14 @@ struct dsi_reg { u16 idx; };
#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
+#define DSI_CIO_IRQ_ERROR_MASK \
+ (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
+ DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
+ DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRCONTROL1 | \
+ DSI_CIO_IRQ_ERRCONTROL2 | DSI_CIO_IRQ_ERRCONTROL3 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3)
#define DSI_DT_DCS_SHORT_WRITE_0 0x05
#define DSI_DT_DCS_SHORT_WRITE_1 0x15
@@ -232,13 +240,15 @@ static struct
unsigned pll_locked;
struct completion bta_completion;
+ void (*bta_callback)(void);
int update_channel;
struct dsi_update_region update_region;
bool te_enabled;
- struct work_struct framedone_work;
+ struct workqueue_struct *workqueue;
+
void (*framedone_callback)(int, void *);
void *framedone_data;
@@ -509,9 +519,13 @@ void dsi_irq_handler(void)
dss_collect_irq_stats(vcstatus, dsi.irq_stats.vc_irqs[i]);
#endif
- if (vcstatus & DSI_VC_IRQ_BTA)
+ if (vcstatus & DSI_VC_IRQ_BTA) {
complete(&dsi.bta_completion);
+ if (dsi.bta_callback)
+ dsi.bta_callback();
+ }
+
if (vcstatus & DSI_VC_IRQ_ERROR_MASK) {
DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
i, vcstatus);
@@ -536,8 +550,12 @@ void dsi_irq_handler(void)
/* flush posted write */
dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
- DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
- print_irq_status_cio(ciostatus);
+ if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
+ DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
+ print_irq_status_cio(ciostatus);
+ } else if (debug_irq) {
+ print_irq_status_cio(ciostatus);
+ }
}
dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
@@ -584,11 +602,8 @@ static void _dsi_initialize_irq(void)
for (i = 0; i < 4; ++i)
dsi_write_reg(DSI_VC_IRQENABLE(i), l);
- /* XXX zonda responds incorrectly, causing control error:
- Exit from LP-ESC mode to LP11 uses wrong transition states on the
- data lines LP0 and LN0. */
- dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE,
- -1 & (~DSI_CIO_IRQ_ERRCONTROL2));
+ l = DSI_CIO_IRQ_ERROR_MASK;
+ dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, l);
}
static u32 dsi_get_errors(void)
@@ -1098,6 +1113,7 @@ int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) {
DSSERR("PLL not coming out of reset.\n");
r = -ENODEV;
+ dispc_pck_free_enable(0);
goto err1;
}
@@ -1740,42 +1756,52 @@ static void dsi_vc_initial_config(int channel)
dsi.vc[channel].mode = DSI_VC_MODE_L4;
}
-static void dsi_vc_config_l4(int channel)
+static int dsi_vc_config_l4(int channel)
{
if (dsi.vc[channel].mode == DSI_VC_MODE_L4)
- return;
+ return 0;
DSSDBGF("%d", channel);
dsi_vc_enable(channel, 0);
- if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
+ /* VC_BUSY */
+ if (wait_for_bit_change(DSI_VC_CTRL(channel), 15, 0) != 0) {
DSSERR("vc(%d) busy when trying to config for L4\n", channel);
+ return -EIO;
+ }
REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
dsi_vc_enable(channel, 1);
dsi.vc[channel].mode = DSI_VC_MODE_L4;
+
+ return 0;
}
-static void dsi_vc_config_vp(int channel)
+static int dsi_vc_config_vp(int channel)
{
if (dsi.vc[channel].mode == DSI_VC_MODE_VP)
- return;
+ return 0;
DSSDBGF("%d", channel);
dsi_vc_enable(channel, 0);
- if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
+ /* VC_BUSY */
+ if (wait_for_bit_change(DSI_VC_CTRL(channel), 15, 0) != 0) {
DSSERR("vc(%d) busy when trying to config for VP\n", channel);
+ return -EIO;
+ }
REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */
dsi_vc_enable(channel, 1);
dsi.vc[channel].mode = DSI_VC_MODE_VP;
+
+ return 0;
}
@@ -1854,19 +1880,19 @@ static u16 dsi_vc_flush_receive_data(int channel)
u32 val;
u8 dt;
val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
- DSSDBG("\trawval %#08x\n", val);
+ DSSERR("\trawval %#08x\n", val);
dt = FLD_GET(val, 5, 0);
if (dt == DSI_DT_RX_ACK_WITH_ERR) {
u16 err = FLD_GET(val, 23, 8);
dsi_show_rx_ack_with_err(err);
} else if (dt == DSI_DT_RX_SHORT_READ_1) {
- DSSDBG("\tDCS short response, 1 byte: %#x\n",
+ DSSERR("\tDCS short response, 1 byte: %#x\n",
FLD_GET(val, 23, 8));
} else if (dt == DSI_DT_RX_SHORT_READ_2) {
- DSSDBG("\tDCS short response, 2 byte: %#x\n",
+ DSSERR("\tDCS short response, 2 byte: %#x\n",
FLD_GET(val, 23, 8));
} else if (dt == DSI_DT_RX_DCS_LONG_READ) {
- DSSDBG("\tDCS long response, len %d\n",
+ DSSERR("\tDCS long response, len %d\n",
FLD_GET(val, 23, 8));
dsi_vc_flush_long_data(channel);
} else {
@@ -2087,6 +2113,13 @@ int dsi_vc_dcs_write(int channel, u8 *data, int len)
if (r)
goto err;
+ if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */
+ DSSERR("rx fifo not empty after write, dumping data:\n");
+ dsi_vc_flush_receive_data(channel);
+ r = -EIO;
+ goto err;
+ }
+
return 0;
err:
DSSERR("dsi_vc_dcs_write(ch %d, cmd 0x%02x, len %d) failed\n",
@@ -2233,11 +2266,12 @@ int dsi_vc_dcs_read_1(int channel, u8 dcs_cmd, u8 *data)
}
EXPORT_SYMBOL(dsi_vc_dcs_read_1);
-int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u16 *data)
+int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u8 *data1, u8 *data2)
{
+ u8 buf[2];
int r;
- r = dsi_vc_dcs_read(channel, dcs_cmd, (u8 *)data, 2);
+ r = dsi_vc_dcs_read(channel, dcs_cmd, buf, 2);
if (r < 0)
return r;
@@ -2245,231 +2279,122 @@ int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u16 *data)
if (r != 2)
return -EIO;
+ *data1 = buf[0];
+ *data2 = buf[1];
+
return 0;
}
EXPORT_SYMBOL(dsi_vc_dcs_read_2);
int dsi_vc_set_max_rx_packet_size(int channel, u16 len)
{
- int r;
- r = dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
+ return dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
len, 0);
-
- if (r)
- return r;
-
- r = dsi_vc_send_bta_sync(channel);
-
- return r;
}
EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
-static void dsi_set_lp_rx_timeout(unsigned long ns)
+static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16)
{
- u32 r;
- unsigned x4, x16;
unsigned long fck;
- unsigned long ticks;
+ unsigned long total_ticks;
+ u32 r;
- /* ticks in DSI_FCK */
+ BUG_ON(ticks > 0x1fff);
+ /* ticks in DSI_FCK */
fck = dsi_fclk_rate();
- ticks = (fck / 1000 / 1000) * ns / 1000;
- x4 = 0;
- x16 = 0;
-
- if (ticks > 0x1fff) {
- ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
- x4 = 1;
- x16 = 0;
- }
-
- if (ticks > 0x1fff) {
- ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
- x4 = 0;
- x16 = 1;
- }
-
- if (ticks > 0x1fff) {
- ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
- x4 = 1;
- x16 = 1;
- }
-
- if (ticks > 0x1fff) {
- DSSWARN("LP_TX_TO over limit, setting it to max\n");
- ticks = 0x1fff;
- x4 = 1;
- x16 = 1;
- }
r = dsi_read_reg(DSI_TIMING2);
r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
- r = FLD_MOD(r, x16, 14, 14); /* LP_RX_TO_X16 */
- r = FLD_MOD(r, x4, 13, 13); /* LP_RX_TO_X4 */
+ r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
+ r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
dsi_write_reg(DSI_TIMING2, r);
- DSSDBG("LP_RX_TO %lu ns (%#lx ticks%s%s)\n",
- (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
- (fck / 1000 / 1000),
- ticks, x4 ? " x4" : "", x16 ? " x16" : "");
+ total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
+
+ DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n",
+ total_ticks,
+ ticks, x4 ? " x4" : "", x16 ? " x16" : "",
+ (total_ticks * 1000) / (fck / 1000 / 1000));
}
-static void dsi_set_ta_timeout(unsigned long ns)
+static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16)
{
- u32 r;
- unsigned x8, x16;
unsigned long fck;
- unsigned long ticks;
+ unsigned long total_ticks;
+ u32 r;
+
+ BUG_ON(ticks > 0x1fff);
/* ticks in DSI_FCK */
fck = dsi_fclk_rate();
- ticks = (fck / 1000 / 1000) * ns / 1000;
- x8 = 0;
- x16 = 0;
-
- if (ticks > 0x1fff) {
- ticks = (fck / 1000 / 1000) * ns / 1000 / 8;
- x8 = 1;
- x16 = 0;
- }
-
- if (ticks > 0x1fff) {
- ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
- x8 = 0;
- x16 = 1;
- }
-
- if (ticks > 0x1fff) {
- ticks = (fck / 1000 / 1000) * ns / 1000 / (8 * 16);
- x8 = 1;
- x16 = 1;
- }
-
- if (ticks > 0x1fff) {
- DSSWARN("TA_TO over limit, setting it to max\n");
- ticks = 0x1fff;
- x8 = 1;
- x16 = 1;
- }
r = dsi_read_reg(DSI_TIMING1);
r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
- r = FLD_MOD(r, x16, 30, 30); /* TA_TO_X16 */
- r = FLD_MOD(r, x8, 29, 29); /* TA_TO_X8 */
+ r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
+ r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
dsi_write_reg(DSI_TIMING1, r);
- DSSDBG("TA_TO %lu ns (%#lx ticks%s%s)\n",
- (ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1) * 1000) /
- (fck / 1000 / 1000),
- ticks, x8 ? " x8" : "", x16 ? " x16" : "");
+ total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
+
+ DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n",
+ total_ticks,
+ ticks, x8 ? " x8" : "", x16 ? " x16" : "",
+ (total_ticks * 1000) / (fck / 1000 / 1000));
}
-static void dsi_set_stop_state_counter(unsigned long ns)
+static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16)
{
- u32 r;
- unsigned x4, x16;
unsigned long fck;
- unsigned long ticks;
+ unsigned long total_ticks;
+ u32 r;
- /* ticks in DSI_FCK */
+ BUG_ON(ticks > 0x1fff);
+ /* ticks in DSI_FCK */
fck = dsi_fclk_rate();
- ticks = (fck / 1000 / 1000) * ns / 1000;
- x4 = 0;
- x16 = 0;
-
- if (ticks > 0x1fff) {
- ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
- x4 = 1;
- x16 = 0;
- }
-
- if (ticks > 0x1fff) {
- ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
- x4 = 0;
- x16 = 1;
- }
-
- if (ticks > 0x1fff) {
- ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
- x4 = 1;
- x16 = 1;
- }
-
- if (ticks > 0x1fff) {
- DSSWARN("STOP_STATE_COUNTER_IO over limit, "
- "setting it to max\n");
- ticks = 0x1fff;
- x4 = 1;
- x16 = 1;
- }
r = dsi_read_reg(DSI_TIMING1);
r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
- r = FLD_MOD(r, x16, 14, 14); /* STOP_STATE_X16_IO */
- r = FLD_MOD(r, x4, 13, 13); /* STOP_STATE_X4_IO */
+ r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
+ r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
dsi_write_reg(DSI_TIMING1, r);
- DSSDBG("STOP_STATE_COUNTER %lu ns (%#lx ticks%s%s)\n",
- (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
- (fck / 1000 / 1000),
- ticks, x4 ? " x4" : "", x16 ? " x16" : "");
+ total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
+
+ DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n",
+ total_ticks,
+ ticks, x4 ? " x4" : "", x16 ? " x16" : "",
+ (total_ticks * 1000) / (fck / 1000 / 1000));
}
-static void dsi_set_hs_tx_timeout(unsigned long ns)
+static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16)
{
- u32 r;
- unsigned x4, x16;
unsigned long fck;
- unsigned long ticks;
+ unsigned long total_ticks;
+ u32 r;
- /* ticks in TxByteClkHS */
+ BUG_ON(ticks > 0x1fff);
+ /* ticks in TxByteClkHS */
fck = dsi_get_txbyteclkhs();
- ticks = (fck / 1000 / 1000) * ns / 1000;
- x4 = 0;
- x16 = 0;
-
- if (ticks > 0x1fff) {
- ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
- x4 = 1;
- x16 = 0;
- }
-
- if (ticks > 0x1fff) {
- ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
- x4 = 0;
- x16 = 1;
- }
-
- if (ticks > 0x1fff) {
- ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
- x4 = 1;
- x16 = 1;
- }
-
- if (ticks > 0x1fff) {
- DSSWARN("HS_TX_TO over limit, setting it to max\n");
- ticks = 0x1fff;
- x4 = 1;
- x16 = 1;
- }
r = dsi_read_reg(DSI_TIMING2);
r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
- r = FLD_MOD(r, x16, 30, 30); /* HS_TX_TO_X16 */
- r = FLD_MOD(r, x4, 29, 29); /* HS_TX_TO_X8 (4 really) */
+ r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
+ r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
dsi_write_reg(DSI_TIMING2, r);
- DSSDBG("HS_TX_TO %lu ns (%#lx ticks%s%s)\n",
- (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
- (fck / 1000 / 1000),
- ticks, x4 ? " x4" : "", x16 ? " x16" : "");
+ total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
+
+ DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n",
+ total_ticks,
+ ticks, x4 ? " x4" : "", x16 ? " x16" : "",
+ (total_ticks * 1000) / (fck / 1000 / 1000));
}
static int dsi_proto_config(struct omap_dss_device *dssdev)
{
@@ -2487,10 +2412,10 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
DSI_FIFO_SIZE_32);
/* XXX what values for the timeouts? */
- dsi_set_stop_state_counter(1000);
- dsi_set_ta_timeout(6400000);
- dsi_set_lp_rx_timeout(48000);
- dsi_set_hs_tx_timeout(1000000);
+ dsi_set_stop_state_counter(0x1000, false, false);
+ dsi_set_ta_timeout(0x1fff, true, true);
+ dsi_set_lp_rx_timeout(0x1fff, true, true);
+ dsi_set_hs_tx_timeout(0x1fff, true, true);
switch (dssdev->ctrl.pixel_size) {
case 16:
@@ -2759,6 +2684,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
unsigned packet_payload;
unsigned packet_len;
u32 l;
+ int r;
const unsigned channel = dsi.update_channel;
/* line buffer is 1024 x 24bits */
/* XXX: for some reason using full buffer size causes considerable TX
@@ -2809,8 +2735,9 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
dsi_perf_mark_start();
- schedule_delayed_work(&dsi.framedone_timeout_work,
+ r = queue_delayed_work(dsi.workqueue, &dsi.framedone_timeout_work,
msecs_to_jiffies(250));
+ BUG_ON(r == 0);
dss_start_update(dssdev);
@@ -2834,62 +2761,70 @@ static void dsi_te_timeout(unsigned long arg)
}
#endif
-static void dsi_framedone_timeout_work_callback(struct work_struct *work)
+static void dsi_handle_framedone(int error)
{
- int r;
const int channel = dsi.update_channel;
- DSSERR("Framedone not received for 250ms!\n");
+ cancel_delayed_work(&dsi.framedone_timeout_work);
+
+ dsi_vc_disable_bta_irq(channel);
/* SIDLEMODE back to smart-idle */
dispc_enable_sidle();
+ dsi.bta_callback = NULL;
+
if (dsi.te_enabled) {
/* enable LP_RX_TO again after the TE */
REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
}
- /* Send BTA after the frame. We need this for the TE to work, as TE
- * trigger is only sent for BTAs without preceding packet. Thus we need
- * to BTA after the pixel packets so that next BTA will cause TE
- * trigger.
- *
- * This is not needed when TE is not in use, but we do it anyway to
- * make sure that the transfer has been completed. It would be more
- * optimal, but more complex, to wait only just before starting next
- * transfer. */
- r = dsi_vc_send_bta_sync(channel);
- if (r)
- DSSERR("BTA after framedone failed\n");
-
/* RX_FIFO_NOT_EMPTY */
if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
DSSERR("Received error during frame transfer:\n");
dsi_vc_flush_receive_data(channel);
+ if (!error)
+ error = -EIO;
}
- dsi.framedone_callback(-ETIMEDOUT, dsi.framedone_data);
+ dsi.framedone_callback(error, dsi.framedone_data);
+
+ if (!error)
+ dsi_perf_show("DISPC");
}
-static void dsi_framedone_irq_callback(void *data, u32 mask)
+static void dsi_framedone_timeout_work_callback(struct work_struct *work)
{
- /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
- * turns itself off. However, DSI still has the pixels in its buffers,
- * and is sending the data.
- */
+ /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
+ * 250ms which would conflict with this timeout work. What should be
+ * done is first cancel the transfer on the HW, and then cancel the
+ * possibly scheduled framedone work. However, cancelling the transfer
+ * on the HW is buggy, and would probably require resetting the whole
+ * DSI */
- /* SIDLEMODE back to smart-idle */
- dispc_enable_sidle();
+ DSSERR("Framedone not received for 250ms!\n");
- schedule_work(&dsi.framedone_work);
+ dsi_handle_framedone(-ETIMEDOUT);
}
-static void dsi_handle_framedone(void)
+static void dsi_framedone_bta_callback(void)
+{
+ dsi_handle_framedone(0);
+
+#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
+ dispc_fake_vsync_irq();
+#endif
+}
+
+static void dsi_framedone_irq_callback(void *data, u32 mask)
{
- int r;
const int channel = dsi.update_channel;
+ int r;
- DSSDBG("FRAMEDONE\n");
+ /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
+ * turns itself off. However, DSI still has the pixels in its buffers,
+ * and is sending the data.
+ */
if (dsi.te_enabled) {
/* enable LP_RX_TO again after the TE */
@@ -2904,37 +2839,30 @@ static void dsi_handle_framedone(void)
* This is not needed when TE is not in use, but we do it anyway to
* make sure that the transfer has been completed. It would be more
* optimal, but more complex, to wait only just before starting next
- * transfer. */
- r = dsi_vc_send_bta_sync(channel);
- if (r)
- DSSERR("BTA after framedone failed\n");
-
- /* RX_FIFO_NOT_EMPTY */
- if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
- DSSERR("Received error during frame transfer:\n");
- dsi_vc_flush_receive_data(channel);
- }
-
-#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
- dispc_fake_vsync_irq();
-#endif
-}
-
-static void dsi_framedone_work_callback(struct work_struct *work)
-{
- DSSDBGF();
+ * transfer.
+ *
+ * Also, as there's no interrupt telling when the transfer has been
+ * done and the channel could be reconfigured, the only way is to
+ * busyloop until TE_SIZE is zero. With BTA we can do this
+ * asynchronously.
+ * */
- cancel_delayed_work_sync(&dsi.framedone_timeout_work);
+ dsi.bta_callback = dsi_framedone_bta_callback;
- dsi_handle_framedone();
+ barrier();
- dsi_perf_show("DISPC");
+ dsi_vc_enable_bta_irq(channel);
- dsi.framedone_callback(0, dsi.framedone_data);
+ r = dsi_vc_send_bta(channel);
+ if (r) {
+ DSSERR("BTA after framedone failed\n");
+ dsi_handle_framedone(-EIO);
+ }
}
int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
- u16 *x, u16 *y, u16 *w, u16 *h)
+ u16 *x, u16 *y, u16 *w, u16 *h,
+ bool enlarge_update_area)
{
u16 dw, dh;
@@ -2958,7 +2886,8 @@ int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
dsi_perf_mark_setup();
if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
- dss_setup_partial_planes(dssdev, x, y, w, h);
+ dss_setup_partial_planes(dssdev, x, y, w, h,
+ enlarge_update_area);
dispc_set_lcd_size(*w, *h);
}
@@ -2973,6 +2902,12 @@ int omap_dsi_update(struct omap_dss_device *dssdev,
{
dsi.update_channel = channel;
+ /* OMAP DSS cannot send updates of odd widths.
+ * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON
+ * here to make sure we catch erroneous updates. Otherwise we'll only
+ * see rather obscure HW error happening, as DSS halts. */
+ BUG_ON(x % 2 == 1);
+
if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
dsi.framedone_callback = callback;
dsi.framedone_data = data;
@@ -2985,7 +2920,12 @@ int omap_dsi_update(struct omap_dss_device *dssdev,
dsi_update_screen_dispc(dssdev, x, y, w, h);
} else {
- dsi_update_screen_l4(dssdev, x, y, w, h);
+ int r;
+
+ r = dsi_update_screen_l4(dssdev, x, y, w, h);
+ if (r)
+ return r;
+
dsi_perf_show("L4");
callback(0, data);
}
@@ -3048,8 +2988,10 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
cinfo.regm3 = dssdev->phy.dsi.div.regm3;
cinfo.regm4 = dssdev->phy.dsi.div.regm4;
r = dsi_calc_clock_rates(&cinfo);
- if (r)
+ if (r) {
+ DSSERR("Failed to calc dsi clocks\n");
return r;
+ }
r = dsi_pll_set_clock_div(&cinfo);
if (r) {
@@ -3147,6 +3089,13 @@ err0:
static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev)
{
+ /* disable interface */
+ dsi_if_enable(0);
+ dsi_vc_enable(0, 0);
+ dsi_vc_enable(1, 0);
+ dsi_vc_enable(2, 0);
+ dsi_vc_enable(3, 0);
+
dss_select_dispc_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
dss_select_dsi_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
dsi_complexio_uninit();
@@ -3257,7 +3206,7 @@ void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
burst_size_bytes = 16 * 32 / 8;
*fifo_high = fifo_size - burst_size_bytes;
- *fifo_low = fifo_size - burst_size_bytes * 8;
+ *fifo_low = fifo_size - burst_size_bytes * 2;
}
int dsi_init_display(struct omap_dss_device *dssdev)
@@ -3274,6 +3223,18 @@ int dsi_init_display(struct omap_dss_device *dssdev)
return 0;
}
+void dsi_wait_dsi1_pll_active(void)
+{
+ if (wait_for_bit_change(DSI_PLL_STATUS, 7, 1) != 1)
+ DSSERR("DSI1 PLL clock not active\n");
+}
+
+void dsi_wait_dsi2_pll_active(void)
+{
+ if (wait_for_bit_change(DSI_PLL_STATUS, 8, 1) != 1)
+ DSSERR("DSI2 PLL clock not active\n");
+}
+
int dsi_init(struct platform_device *pdev)
{
u32 rev;
@@ -3292,7 +3253,10 @@ int dsi_init(struct platform_device *pdev)
mutex_init(&dsi.lock);
sema_init(&dsi.bus_lock, 1);
- INIT_WORK(&dsi.framedone_work, dsi_framedone_work_callback);
+ dsi.workqueue = create_singlethread_workqueue("dsi");
+ if (dsi.workqueue == NULL)
+ return -ENOMEM;
+
INIT_DELAYED_WORK_DEFERRABLE(&dsi.framedone_timeout_work,
dsi_framedone_timeout_work_callback);
@@ -3328,6 +3292,7 @@ int dsi_init(struct platform_device *pdev)
err2:
iounmap(dsi.base);
err1:
+ destroy_workqueue(dsi.workqueue);
return r;
}
@@ -3335,6 +3300,8 @@ void dsi_exit(void)
{
iounmap(dsi.base);
+ destroy_workqueue(dsi.workqueue);
+
DSSDBG("omap_dsi_exit\n");
}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 24b18258654..77c3621c917 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -265,6 +265,9 @@ void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
+ if (clk_src == DSS_SRC_DSI1_PLL_FCLK)
+ dsi_wait_dsi1_pll_active();
+
REG_FLD_MOD(DSS_CONTROL, b, 0, 0); /* DISPC_CLK_SWITCH */
dss.dispc_clk_source = clk_src;
@@ -279,6 +282,9 @@ void dss_select_dsi_clk_source(enum dss_clk_source clk_src)
b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
+ if (clk_src == DSS_SRC_DSI2_PLL_FCLK)
+ dsi_wait_dsi2_pll_active();
+
REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */
dss.dsi_clk_source = clk_src;
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 786f433fd57..5c7940d5f28 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -199,7 +199,8 @@ int dss_init_overlay_managers(struct platform_device *pdev);
void dss_uninit_overlay_managers(struct platform_device *pdev);
int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl);
void dss_setup_partial_planes(struct omap_dss_device *dssdev,
- u16 *x, u16 *y, u16 *w, u16 *h);
+ u16 *x, u16 *y, u16 *w, u16 *h,
+ bool enlarge_update_area);
void dss_start_update(struct omap_dss_device *dssdev);
/* overlay */
@@ -281,6 +282,8 @@ void dsi_pll_uninit(void);
void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
u32 fifo_size, enum omap_burst_size *burst_size,
u32 *fifo_low, u32 *fifo_high);
+void dsi_wait_dsi1_pll_active(void);
+void dsi_wait_dsi2_pll_active(void);
#else
static inline int dsi_init(struct platform_device *pdev)
{
@@ -289,6 +292,12 @@ static inline int dsi_init(struct platform_device *pdev)
static inline void dsi_exit(void)
{
}
+static inline void dsi_wait_dsi1_pll_active(void)
+{
+}
+static inline void dsi_wait_dsi2_pll_active(void)
+{
+}
#endif
/* DPI */
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 9e1fbe531bf..6a649ab5539 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -440,6 +440,10 @@ struct manager_cache_data {
/* manual update region */
u16 x, y, w, h;
+
+ /* enlarge the update area if the update area contains scaled
+ * overlays */
+ bool enlarge_update_area;
};
static struct {
@@ -525,7 +529,7 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
int i;
struct omap_dss_device *dssdev = mgr->device;
- if (!dssdev)
+ if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return 0;
if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
@@ -596,11 +600,14 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
int r;
int i;
- if (!ovl->manager || !ovl->manager->device)
+ if (!ovl->manager)
return 0;
dssdev = ovl->manager->device;
+ if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
channel = OMAP_DSS_CHANNEL_DIGIT;
@@ -718,6 +725,7 @@ static int configure_overlay(enum omap_plane plane)
u16 x, y, w, h;
u32 paddr;
int r;
+ u16 orig_w, orig_h, orig_outw, orig_outh;
DSSDBGF("%d", plane);
@@ -738,8 +746,16 @@ static int configure_overlay(enum omap_plane plane)
outh = c->out_height == 0 ? c->height : c->out_height;
paddr = c->paddr;
+ orig_w = w;
+ orig_h = h;
+ orig_outw = outw;
+ orig_outh = outh;
+
if (c->manual_update && mc->do_manual_update) {
unsigned bpp;
+ unsigned scale_x_m = w, scale_x_d = outw;
+ unsigned scale_y_m = h, scale_y_d = outh;
+
/* If the overlay is outside the update region, disable it */
if (!rectangle_intersects(mc->x, mc->y, mc->w, mc->h,
x, y, outw, outh)) {
@@ -770,38 +786,47 @@ static int configure_overlay(enum omap_plane plane)
BUG();
}
- if (dispc_is_overlay_scaled(c)) {
- /* If the overlay is scaled, the update area has
- * already been enlarged to cover the whole overlay. We
- * only need to adjust x/y here */
- x = c->pos_x - mc->x;
- y = c->pos_y - mc->y;
+ if (mc->x > c->pos_x) {
+ x = 0;
+ outw -= (mc->x - c->pos_x);
+ paddr += (mc->x - c->pos_x) *
+ scale_x_m / scale_x_d * bpp / 8;
} else {
- if (mc->x > c->pos_x) {
- x = 0;
- w -= (mc->x - c->pos_x);
- paddr += (mc->x - c->pos_x) * bpp / 8;
- } else {
- x = c->pos_x - mc->x;
- }
-
- if (mc->y > c->pos_y) {
- y = 0;
- h -= (mc->y - c->pos_y);
- paddr += (mc->y - c->pos_y) * c->screen_width *
- bpp / 8;
- } else {
- y = c->pos_y - mc->y;
- }
-
- if (mc->w < (x+w))
- w -= (x+w) - (mc->w);
+ x = c->pos_x - mc->x;
+ }
- if (mc->h < (y+h))
- h -= (y+h) - (mc->h);
+ if (mc->y > c->pos_y) {
+ y = 0;
+ outh -= (mc->y - c->pos_y);
+ paddr += (mc->y - c->pos_y) *
+ scale_y_m / scale_y_d *
+ c->screen_width * bpp / 8;
+ } else {
+ y = c->pos_y - mc->y;
+ }
- outw = w;
- outh = h;
+ if (mc->w < (x + outw))
+ outw -= (x + outw) - (mc->w);
+
+ if (mc->h < (y + outh))
+ outh -= (y + outh) - (mc->h);
+
+ w = w * outw / orig_outw;
+ h = h * outh / orig_outh;
+
+ /* YUV mode overlay's input width has to be even and the
+ * algorithm above may adjust the width to be odd.
+ *
+ * Here we adjust the width if needed, preferring to increase
+ * the width if the original width was bigger.
+ */
+ if ((w & 1) &&
+ (c->color_mode == OMAP_DSS_COLOR_YUV2 ||
+ c->color_mode == OMAP_DSS_COLOR_UYVY)) {
+ if (orig_w > w)
+ w += 1;
+ else
+ w -= 1;
}
}
@@ -960,7 +985,7 @@ static void make_even(u16 *x, u16 *w)
/* Configure dispc for partial update. Return possibly modified update
* area */
void dss_setup_partial_planes(struct omap_dss_device *dssdev,
- u16 *xi, u16 *yi, u16 *wi, u16 *hi)
+ u16 *xi, u16 *yi, u16 *wi, u16 *hi, bool enlarge_update_area)
{
struct overlay_cache_data *oc;
struct manager_cache_data *mc;
@@ -969,6 +994,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
int i;
u16 x, y, w, h;
unsigned long flags;
+ bool area_changed;
x = *xi;
y = *yi;
@@ -989,73 +1015,91 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
spin_lock_irqsave(&dss_cache.lock, flags);
- /* We need to show the whole overlay if it is scaled. So look for
- * those, and make the update area larger if found.
- * Also mark the overlay cache dirty */
- for (i = 0; i < num_ovls; ++i) {
- unsigned x1, y1, x2, y2;
- unsigned outw, outh;
+ /*
+ * Execute the outer loop until the inner loop has completed
+ * once without increasing the update area. This will ensure that
+ * all scaled overlays end up completely within the update area.
+ */
+ do {
+ area_changed = false;
- oc = &dss_cache.overlay_cache[i];
+ /* We need to show the whole overlay if it is scaled. So look
+ * for those, and make the update area larger if found.
+ * Also mark the overlay cache dirty */
+ for (i = 0; i < num_ovls; ++i) {
+ unsigned x1, y1, x2, y2;
+ unsigned outw, outh;
- if (oc->channel != mgr->id)
- continue;
+ oc = &dss_cache.overlay_cache[i];
- oc->dirty = true;
+ if (oc->channel != mgr->id)
+ continue;
- if (!oc->enabled)
- continue;
+ oc->dirty = true;
- if (!dispc_is_overlay_scaled(oc))
- continue;
+ if (!enlarge_update_area)
+ continue;
- outw = oc->out_width == 0 ? oc->width : oc->out_width;
- outh = oc->out_height == 0 ? oc->height : oc->out_height;
+ if (!oc->enabled)
+ continue;
- /* is the overlay outside the update region? */
- if (!rectangle_intersects(x, y, w, h,
- oc->pos_x, oc->pos_y,
- outw, outh))
- continue;
+ if (!dispc_is_overlay_scaled(oc))
+ continue;
- /* if the overlay totally inside the update region? */
- if (rectangle_subset(oc->pos_x, oc->pos_y, outw, outh,
- x, y, w, h))
- continue;
+ outw = oc->out_width == 0 ?
+ oc->width : oc->out_width;
+ outh = oc->out_height == 0 ?
+ oc->height : oc->out_height;
+
+ /* is the overlay outside the update region? */
+ if (!rectangle_intersects(x, y, w, h,
+ oc->pos_x, oc->pos_y,
+ outw, outh))
+ continue;
- if (x > oc->pos_x)
- x1 = oc->pos_x;
- else
- x1 = x;
+ /* if the overlay totally inside the update region? */
+ if (rectangle_subset(oc->pos_x, oc->pos_y, outw, outh,
+ x, y, w, h))
+ continue;
- if (y > oc->pos_y)
- y1 = oc->pos_y;
- else
- y1 = y;
+ if (x > oc->pos_x)
+ x1 = oc->pos_x;
+ else
+ x1 = x;
- if ((x + w) < (oc->pos_x + outw))
- x2 = oc->pos_x + outw;
- else
- x2 = x + w;
+ if (y > oc->pos_y)
+ y1 = oc->pos_y;
+ else
+ y1 = y;
- if ((y + h) < (oc->pos_y + outh))
- y2 = oc->pos_y + outh;
- else
- y2 = y + h;
+ if ((x + w) < (oc->pos_x + outw))
+ x2 = oc->pos_x + outw;
+ else
+ x2 = x + w;
- x = x1;
- y = y1;
- w = x2 - x1;
- h = y2 - y1;
+ if ((y + h) < (oc->pos_y + outh))
+ y2 = oc->pos_y + outh;
+ else
+ y2 = y + h;
- make_even(&x, &w);
+ x = x1;
+ y = y1;
+ w = x2 - x1;
+ h = y2 - y1;
- DSSDBG("changing upd area due to ovl(%d) scaling %d,%d %dx%d\n",
+ make_even(&x, &w);
+
+ DSSDBG("changing upd area due to ovl(%d) "
+ "scaling %d,%d %dx%d\n",
i, x, y, w, h);
- }
+
+ area_changed = true;
+ }
+ } while (area_changed);
mc = &dss_cache.manager_cache[mgr->id];
mc->do_manual_update = true;
+ mc->enlarge_update_area = enlarge_update_area;
mc->x = x;
mc->y = y;
mc->w = w;
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 82336583ade..244dca81a39 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -65,7 +65,7 @@ static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf,
for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
mgr = omap_dss_get_overlay_manager(i);
- if (strncmp(buf, mgr->name, len) == 0)
+ if (sysfs_streq(buf, mgr->name))
break;
mgr = NULL;
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index cc23f53cc62..bbe62464e92 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -886,7 +886,7 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
return -EINVAL;
if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
- dss_setup_partial_planes(dssdev, x, y, w, h);
+ dss_setup_partial_planes(dssdev, x, y, w, h, true);
dispc_set_lcd_size(*w, *h);
}
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 9c7361871d7..6f435450987 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -34,12 +34,37 @@
#include "omapfb.h"
+static u8 get_mem_idx(struct omapfb_info *ofbi)
+{
+ if (ofbi->id == ofbi->region->id)
+ return 0;
+
+ return OMAPFB_MEM_IDX_ENABLED | ofbi->region->id;
+}
+
+static struct omapfb2_mem_region *get_mem_region(struct omapfb_info *ofbi,
+ u8 mem_idx)
+{
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+
+ if (mem_idx & OMAPFB_MEM_IDX_ENABLED)
+ mem_idx &= OMAPFB_MEM_IDX_MASK;
+ else
+ mem_idx = ofbi->id;
+
+ if (mem_idx >= fbdev->num_fbs)
+ return NULL;
+
+ return &fbdev->regions[mem_idx];
+}
+
static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
{
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omap_overlay *ovl;
- struct omap_overlay_info info;
+ struct omap_overlay_info old_info;
+ struct omapfb2_mem_region *old_rg, *new_rg;
int r = 0;
DBG("omapfb_setup_plane\n");
@@ -52,36 +77,106 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
/* XXX uses only the first overlay */
ovl = ofbi->overlays[0];
- if (pi->enabled && !ofbi->region.size) {
+ old_rg = ofbi->region;
+ new_rg = get_mem_region(ofbi, pi->mem_idx);
+ if (!new_rg) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* Take the locks in a specific order to keep lockdep happy */
+ if (old_rg->id < new_rg->id) {
+ omapfb_get_mem_region(old_rg);
+ omapfb_get_mem_region(new_rg);
+ } else if (new_rg->id < old_rg->id) {
+ omapfb_get_mem_region(new_rg);
+ omapfb_get_mem_region(old_rg);
+ } else
+ omapfb_get_mem_region(old_rg);
+
+ if (pi->enabled && !new_rg->size) {
/*
* This plane's memory was freed, can't enable it
* until it's reallocated.
*/
r = -EINVAL;
- goto out;
+ goto put_mem;
}
- ovl->get_overlay_info(ovl, &info);
+ ovl->get_overlay_info(ovl, &old_info);
- info.pos_x = pi->pos_x;
- info.pos_y = pi->pos_y;
- info.out_width = pi->out_width;
- info.out_height = pi->out_height;
- info.enabled = pi->enabled;
+ if (old_rg != new_rg) {
+ ofbi->region = new_rg;
+ set_fb_fix(fbi);
+ }
- r = ovl->set_overlay_info(ovl, &info);
- if (r)
- goto out;
+ if (pi->enabled) {
+ struct omap_overlay_info info;
- if (ovl->manager) {
- r = ovl->manager->apply(ovl->manager);
+ r = omapfb_setup_overlay(fbi, ovl, pi->pos_x, pi->pos_y,
+ pi->out_width, pi->out_height);
if (r)
- goto out;
+ goto undo;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ if (!info.enabled) {
+ info.enabled = pi->enabled;
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r)
+ goto undo;
+ }
+ } else {
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ info.enabled = pi->enabled;
+ info.pos_x = pi->pos_x;
+ info.pos_y = pi->pos_y;
+ info.out_width = pi->out_width;
+ info.out_height = pi->out_height;
+
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r)
+ goto undo;
}
-out:
- if (r)
- dev_err(fbdev->dev, "setup_plane failed\n");
+ if (ovl->manager)
+ ovl->manager->apply(ovl->manager);
+
+ /* Release the locks in a specific order to keep lockdep happy */
+ if (old_rg->id > new_rg->id) {
+ omapfb_put_mem_region(old_rg);
+ omapfb_put_mem_region(new_rg);
+ } else if (new_rg->id > old_rg->id) {
+ omapfb_put_mem_region(new_rg);
+ omapfb_put_mem_region(old_rg);
+ } else
+ omapfb_put_mem_region(old_rg);
+
+ return 0;
+
+ undo:
+ if (old_rg != new_rg) {
+ ofbi->region = old_rg;
+ set_fb_fix(fbi);
+ }
+
+ ovl->set_overlay_info(ovl, &old_info);
+ put_mem:
+ /* Release the locks in a specific order to keep lockdep happy */
+ if (old_rg->id > new_rg->id) {
+ omapfb_put_mem_region(old_rg);
+ omapfb_put_mem_region(new_rg);
+ } else if (new_rg->id > old_rg->id) {
+ omapfb_put_mem_region(new_rg);
+ omapfb_put_mem_region(old_rg);
+ } else
+ omapfb_put_mem_region(old_rg);
+ out:
+ dev_err(fbdev->dev, "setup_plane failed\n");
+
return r;
}
@@ -92,8 +187,8 @@ static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
if (ofbi->num_overlays != 1) {
memset(pi, 0, sizeof(*pi));
} else {
- struct omap_overlay_info *ovli;
struct omap_overlay *ovl;
+ struct omap_overlay_info *ovli;
ovl = ofbi->overlays[0];
ovli = &ovl->info;
@@ -103,6 +198,7 @@ static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
pi->enabled = ovli->enabled;
pi->channel_out = 0; /* xxx */
pi->mirror = 0;
+ pi->mem_idx = get_mem_idx(ofbi);
pi->out_width = ovli->out_width;
pi->out_height = ovli->out_height;
}
@@ -115,7 +211,7 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omapfb2_mem_region *rg;
- int r, i;
+ int r = 0, i;
size_t size;
if (mi->type > OMAPFB_MEMTYPE_MAX)
@@ -123,22 +219,44 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
size = PAGE_ALIGN(mi->size);
- rg = &ofbi->region;
+ rg = ofbi->region;
- for (i = 0; i < ofbi->num_overlays; i++) {
- if (ofbi->overlays[i]->info.enabled)
- return -EBUSY;
+ down_write_nested(&rg->lock, rg->id);
+ atomic_inc(&rg->lock_count);
+
+ if (atomic_read(&rg->map_count)) {
+ r = -EBUSY;
+ goto out;
+ }
+
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ struct omapfb_info *ofbi2 = FB2OFB(fbdev->fbs[i]);
+ int j;
+
+ if (ofbi2->region != rg)
+ continue;
+
+ for (j = 0; j < ofbi2->num_overlays; j++) {
+ if (ofbi2->overlays[j]->info.enabled) {
+ r = -EBUSY;
+ goto out;
+ }
+ }
}
if (rg->size != size || rg->type != mi->type) {
r = omapfb_realloc_fbmem(fbi, size, mi->type);
if (r) {
dev_err(fbdev->dev, "realloc fbmem failed\n");
- return r;
+ goto out;
}
}
- return 0;
+ out:
+ atomic_dec(&rg->lock_count);
+ up_write(&rg->lock);
+
+ return r;
}
static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
@@ -146,12 +264,14 @@ static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_mem_region *rg;
- rg = &ofbi->region;
+ rg = omapfb_get_mem_region(ofbi->region);
memset(mi, 0, sizeof(*mi));
mi->size = rg->size;
mi->type = rg->type;
+ omapfb_put_mem_region(rg);
+
return 0;
}
@@ -490,6 +610,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
struct omapfb_vram_info vram_info;
struct omapfb_tearsync_info tearsync_info;
struct omapfb_display_info display_info;
+ u32 crt;
} p;
int r = 0;
@@ -648,6 +769,17 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
r = -EFAULT;
break;
+ case FBIO_WAITFORVSYNC:
+ if (get_user(p.crt, (__u32 __user *)arg)) {
+ r = -EFAULT;
+ break;
+ }
+ if (p.crt != 0) {
+ r = -ENODEV;
+ break;
+ }
+ /* FALLTHROUGH */
+
case OMAPFB_WAITFORVSYNC:
DBG("ioctl WAITFORVSYNC\n");
if (!display) {
@@ -738,7 +870,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
break;
}
- if (!display->driver->enable_te) {
+ if (!display || !display->driver->enable_te) {
r = -ENODEV;
break;
}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 4b4506da96d..04034d410d6 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -157,7 +157,7 @@ static void fill_fb(struct fb_info *fbi)
static unsigned omapfb_get_vrfb_offset(const struct omapfb_info *ofbi, int rot)
{
- const struct vrfb *vrfb = &ofbi->region.vrfb;
+ const struct vrfb *vrfb = &ofbi->region->vrfb;
unsigned offset;
switch (rot) {
@@ -185,27 +185,27 @@ static unsigned omapfb_get_vrfb_offset(const struct omapfb_info *ofbi, int rot)
static u32 omapfb_get_region_rot_paddr(const struct omapfb_info *ofbi, int rot)
{
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
- return ofbi->region.vrfb.paddr[rot]
+ return ofbi->region->vrfb.paddr[rot]
+ omapfb_get_vrfb_offset(ofbi, rot);
} else {
- return ofbi->region.paddr;
+ return ofbi->region->paddr;
}
}
static u32 omapfb_get_region_paddr(const struct omapfb_info *ofbi)
{
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
- return ofbi->region.vrfb.paddr[0];
+ return ofbi->region->vrfb.paddr[0];
else
- return ofbi->region.paddr;
+ return ofbi->region->paddr;
}
static void __iomem *omapfb_get_region_vaddr(const struct omapfb_info *ofbi)
{
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
- return ofbi->region.vrfb.vaddr[0];
+ return ofbi->region->vrfb.vaddr[0];
else
- return ofbi->region.vaddr;
+ return ofbi->region->vaddr;
}
static struct omapfb_colormode omapfb_colormodes[] = {
@@ -450,7 +450,7 @@ static int check_vrfb_fb_size(unsigned long region_size,
static int check_fb_size(const struct omapfb_info *ofbi,
struct fb_var_screeninfo *var)
{
- unsigned long max_frame_size = ofbi->region.size;
+ unsigned long max_frame_size = ofbi->region->size;
int bytespp = var->bits_per_pixel >> 3;
unsigned long line_size = var->xres_virtual * bytespp;
@@ -497,7 +497,7 @@ static int check_fb_size(const struct omapfb_info *ofbi,
static int setup_vrfb_rotation(struct fb_info *fbi)
{
struct omapfb_info *ofbi = FB2OFB(fbi);
- struct omapfb2_mem_region *rg = &ofbi->region;
+ struct omapfb2_mem_region *rg = ofbi->region;
struct vrfb *vrfb = &rg->vrfb;
struct fb_var_screeninfo *var = &fbi->var;
struct fb_fix_screeninfo *fix = &fbi->fix;
@@ -558,9 +558,9 @@ static int setup_vrfb_rotation(struct fb_info *fbi)
return r;
/* used by open/write in fbmem.c */
- fbi->screen_base = ofbi->region.vrfb.vaddr[0];
+ fbi->screen_base = ofbi->region->vrfb.vaddr[0];
- fix->smem_start = ofbi->region.vrfb.paddr[0];
+ fix->smem_start = ofbi->region->vrfb.paddr[0];
switch (var->nonstd) {
case OMAPFB_COLOR_YUV422:
@@ -599,7 +599,7 @@ void set_fb_fix(struct fb_info *fbi)
struct fb_fix_screeninfo *fix = &fbi->fix;
struct fb_var_screeninfo *var = &fbi->var;
struct omapfb_info *ofbi = FB2OFB(fbi);
- struct omapfb2_mem_region *rg = &ofbi->region;
+ struct omapfb2_mem_region *rg = ofbi->region;
DBG("set_fb_fix\n");
@@ -668,8 +668,7 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
DBG("check_fb_var %d\n", ofbi->id);
- if (ofbi->region.size == 0)
- return 0;
+ WARN_ON(!atomic_read(&ofbi->region->lock_count));
r = fb_mode_to_dss_mode(var, &mode);
if (r) {
@@ -684,13 +683,14 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
}
}
- if (var->rotate < 0 || var->rotate > 3)
+ if (var->rotate > 3)
return -EINVAL;
if (check_fb_res_bounds(var))
return -EINVAL;
- if (check_fb_size(ofbi, var))
+ /* When no memory is allocated ignore the size check */
+ if (ofbi->region->size != 0 && check_fb_size(ofbi, var))
return -EINVAL;
if (var->xres + var->xoffset > var->xres_virtual)
@@ -822,9 +822,43 @@ static unsigned calc_rotation_offset_vrfb(const struct fb_var_screeninfo *var,
return offset;
}
+static void omapfb_calc_addr(const struct omapfb_info *ofbi,
+ const struct fb_var_screeninfo *var,
+ const struct fb_fix_screeninfo *fix,
+ int rotation, u32 *paddr, void __iomem **vaddr)
+{
+ u32 data_start_p;
+ void __iomem *data_start_v;
+ int offset;
+
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+ data_start_p = omapfb_get_region_rot_paddr(ofbi, rotation);
+ data_start_v = NULL;
+ } else {
+ data_start_p = omapfb_get_region_paddr(ofbi);
+ data_start_v = omapfb_get_region_vaddr(ofbi);
+ }
+
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
+ offset = calc_rotation_offset_vrfb(var, fix, rotation);
+ else
+ offset = calc_rotation_offset_dma(var, fix, rotation);
+
+ data_start_p += offset;
+ data_start_v += offset;
+
+ if (offset)
+ DBG("offset %d, %d = %d\n",
+ var->xoffset, var->yoffset, offset);
+
+ DBG("paddr %x, vaddr %p\n", data_start_p, data_start_v);
+
+ *paddr = data_start_p;
+ *vaddr = data_start_v;
+}
/* setup overlay according to the fb */
-static int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
+int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
u16 posx, u16 posy, u16 outw, u16 outh)
{
int r = 0;
@@ -832,9 +866,8 @@ static int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
struct fb_var_screeninfo *var = &fbi->var;
struct fb_fix_screeninfo *fix = &fbi->fix;
enum omap_color_mode mode = 0;
- int offset;
- u32 data_start_p;
- void __iomem *data_start_v;
+ u32 data_start_p = 0;
+ void __iomem *data_start_v = NULL;
struct omap_overlay_info info;
int xres, yres;
int screen_width;
@@ -842,6 +875,8 @@ static int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
int rotation = var->rotate;
int i;
+ WARN_ON(!atomic_read(&ofbi->region->lock_count));
+
for (i = 0; i < ofbi->num_overlays; i++) {
if (ovl != ofbi->overlays[i])
continue;
@@ -861,28 +896,9 @@ static int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
yres = var->yres;
}
-
- if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
- data_start_p = omapfb_get_region_rot_paddr(ofbi, rotation);
- data_start_v = NULL;
- } else {
- data_start_p = omapfb_get_region_paddr(ofbi);
- data_start_v = omapfb_get_region_vaddr(ofbi);
- }
-
- if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
- offset = calc_rotation_offset_vrfb(var, fix, rotation);
- else
- offset = calc_rotation_offset_dma(var, fix, rotation);
-
- data_start_p += offset;
- data_start_v += offset;
-
- if (offset)
- DBG("offset %d, %d = %d\n",
- var->xoffset, var->yoffset, offset);
-
- DBG("paddr %x, vaddr %p\n", data_start_p, data_start_v);
+ if (ofbi->region->size)
+ omapfb_calc_addr(ofbi, var, fix, rotation,
+ &data_start_p, &data_start_v);
r = fb_mode_to_dss_mode(var, &mode);
if (r) {
@@ -954,12 +970,14 @@ int omapfb_apply_changes(struct fb_info *fbi, int init)
fill_fb(fbi);
#endif
+ WARN_ON(!atomic_read(&ofbi->region->lock_count));
+
for (i = 0; i < ofbi->num_overlays; i++) {
ovl = ofbi->overlays[i];
DBG("apply_changes, fb %d, ovl %d\n", ofbi->id, ovl->id);
- if (ofbi->region.size == 0) {
+ if (ofbi->region->size == 0) {
/* the fb is not available. disable the overlay */
omapfb_overlay_enable(ovl, 0);
if (!init && ovl->manager)
@@ -1007,36 +1025,48 @@ err:
* DO NOT MODIFY PAR */
static int omapfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
int r;
DBG("check_var(%d)\n", FB2OFB(fbi)->id);
+ omapfb_get_mem_region(ofbi->region);
+
r = check_fb_var(fbi, var);
+ omapfb_put_mem_region(ofbi->region);
+
return r;
}
/* set the video mode according to info->var */
static int omapfb_set_par(struct fb_info *fbi)
{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
int r;
DBG("set_par(%d)\n", FB2OFB(fbi)->id);
+ omapfb_get_mem_region(ofbi->region);
+
set_fb_fix(fbi);
r = setup_vrfb_rotation(fbi);
if (r)
- return r;
+ goto out;
r = omapfb_apply_changes(fbi, 0);
+ out:
+ omapfb_put_mem_region(ofbi->region);
+
return r;
}
static int omapfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
struct fb_var_screeninfo new_var;
int r;
@@ -1052,23 +1082,31 @@ static int omapfb_pan_display(struct fb_var_screeninfo *var,
fbi->var = new_var;
+ omapfb_get_mem_region(ofbi->region);
+
r = omapfb_apply_changes(fbi, 0);
+ omapfb_put_mem_region(ofbi->region);
+
return r;
}
static void mmap_user_open(struct vm_area_struct *vma)
{
- struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data;
+ struct omapfb2_mem_region *rg = vma->vm_private_data;
- atomic_inc(&ofbi->map_count);
+ omapfb_get_mem_region(rg);
+ atomic_inc(&rg->map_count);
+ omapfb_put_mem_region(rg);
}
static void mmap_user_close(struct vm_area_struct *vma)
{
- struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data;
+ struct omapfb2_mem_region *rg = vma->vm_private_data;
- atomic_dec(&ofbi->map_count);
+ omapfb_get_mem_region(rg);
+ atomic_dec(&rg->map_count);
+ omapfb_put_mem_region(rg);
}
static struct vm_operations_struct mmap_user_ops = {
@@ -1080,9 +1118,11 @@ static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
{
struct omapfb_info *ofbi = FB2OFB(fbi);
struct fb_fix_screeninfo *fix = &fbi->fix;
+ struct omapfb2_mem_region *rg;
unsigned long off;
unsigned long start;
u32 len;
+ int r = -EINVAL;
if (vma->vm_end - vma->vm_start == 0)
return 0;
@@ -1090,12 +1130,14 @@ static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
return -EINVAL;
off = vma->vm_pgoff << PAGE_SHIFT;
+ rg = omapfb_get_mem_region(ofbi->region);
+
start = omapfb_get_region_paddr(ofbi);
len = fix->smem_len;
if (off >= len)
- return -EINVAL;
+ goto error;
if ((vma->vm_end - vma->vm_start + off) > len)
- return -EINVAL;
+ goto error;
off += start;
@@ -1105,13 +1147,25 @@ static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
vma->vm_flags |= VM_IO | VM_RESERVED;
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &mmap_user_ops;
- vma->vm_private_data = ofbi;
+ vma->vm_private_data = rg;
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ r = -EAGAIN;
+ goto error;
+ }
+
/* vm_ops.open won't be called for mmap itself. */
- atomic_inc(&ofbi->map_count);
+ atomic_inc(&rg->map_count);
+
+ omapfb_put_mem_region(rg);
+
return 0;
+
+ error:
+ omapfb_put_mem_region(ofbi->region);
+
+ return r;
}
/* Store a single color palette entry into a pseudo palette or the hardware
@@ -1154,11 +1208,6 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
if (r != 0)
break;
- if (regno < 0) {
- r = -EINVAL;
- break;
- }
-
if (regno < 16) {
u16 pal;
pal = ((red >> (16 - var->red.length)) <<
@@ -1217,6 +1266,9 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
int do_update = 0;
int r = 0;
+ if (!display)
+ return -EINVAL;
+
omapfb_lock(fbdev);
switch (blank) {
@@ -1300,7 +1352,9 @@ static void omapfb_free_fbmem(struct fb_info *fbi)
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omapfb2_mem_region *rg;
- rg = &ofbi->region;
+ rg = ofbi->region;
+
+ WARN_ON(atomic_read(&rg->map_count));
if (rg->paddr)
if (omap_vram_free(rg->paddr, rg->size))
@@ -1355,8 +1409,15 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
void __iomem *vaddr;
int r;
- rg = &ofbi->region;
- memset(rg, 0, sizeof(*rg));
+ rg = ofbi->region;
+
+ rg->paddr = 0;
+ rg->vaddr = NULL;
+ memset(&rg->vrfb, 0, sizeof rg->vrfb);
+ rg->size = 0;
+ rg->type = 0;
+ rg->alloc = false;
+ rg->map = false;
size = PAGE_ALIGN(size);
@@ -1609,7 +1670,7 @@ static int omapfb_allocate_all_fbs(struct omapfb2_device *fbdev)
for (i = 0; i < fbdev->num_fbs; i++) {
struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
struct omapfb2_mem_region *rg;
- rg = &ofbi->region;
+ rg = ofbi->region;
DBG("region%d phys %08x virt %p size=%lu\n",
i,
@@ -1626,7 +1687,7 @@ int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type)
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omap_dss_device *display = fb2display(fbi);
- struct omapfb2_mem_region *rg = &ofbi->region;
+ struct omapfb2_mem_region *rg = ofbi->region;
unsigned long old_size = rg->size;
unsigned long old_paddr = rg->paddr;
int old_type = rg->type;
@@ -1709,7 +1770,7 @@ static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi)
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->pseudo_palette = fbdev->pseudo_palette;
- if (ofbi->region.size == 0) {
+ if (ofbi->region->size == 0) {
clear_fb_info(fbi);
return 0;
}
@@ -1871,6 +1932,10 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
ofbi->fbdev = fbdev;
ofbi->id = i;
+ ofbi->region = &fbdev->regions[i];
+ ofbi->region->id = i;
+ init_rwsem(&ofbi->region->lock);
+
/* assign these early, so that fb alloc can use them */
ofbi->rotation_type = def_vrfb ? OMAP_DSS_ROT_VRFB :
OMAP_DSS_ROT_DMA;
@@ -1900,7 +1965,13 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
/* setup fb_infos */
for (i = 0; i < fbdev->num_fbs; i++) {
- r = omapfb_fb_init(fbdev, fbdev->fbs[i]);
+ struct fb_info *fbi = fbdev->fbs[i];
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+
+ omapfb_get_mem_region(ofbi->region);
+ r = omapfb_fb_init(fbdev, fbi);
+ omapfb_put_mem_region(ofbi->region);
+
if (r) {
dev_err(fbdev->dev, "failed to setup fb_info\n");
return r;
@@ -1921,20 +1992,19 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
DBG("framebuffers registered\n");
for (i = 0; i < fbdev->num_fbs; i++) {
- r = omapfb_apply_changes(fbdev->fbs[i], 1);
+ struct fb_info *fbi = fbdev->fbs[i];
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+
+ omapfb_get_mem_region(ofbi->region);
+ r = omapfb_apply_changes(fbi, 1);
+ omapfb_put_mem_region(ofbi->region);
+
if (r) {
dev_err(fbdev->dev, "failed to change mode\n");
return r;
}
}
- DBG("create sysfs for fbs\n");
- r = omapfb_create_sysfs(fbdev);
- if (r) {
- dev_err(fbdev->dev, "failed to create sysfs entries\n");
- return r;
- }
-
/* Enable fb0 */
if (fbdev->num_fbs > 0) {
struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[0]);
@@ -1968,11 +2038,11 @@ static int omapfb_mode_to_timings(const char *mode_str,
#ifdef CONFIG_OMAP2_DSS_VENC
if (strcmp(mode_str, "pal") == 0) {
*timings = omap_dss_pal_timings;
- *bpp = 0;
+ *bpp = 24;
return 0;
} else if (strcmp(mode_str, "ntsc") == 0) {
*timings = omap_dss_ntsc_timings;
- *bpp = 0;
+ *bpp = 24;
return 0;
}
#endif
@@ -2220,6 +2290,13 @@ static int omapfb_probe(struct platform_device *pdev)
}
}
+ DBG("create sysfs for fbs\n");
+ r = omapfb_create_sysfs(fbdev);
+ if (r) {
+ dev_err(fbdev->dev, "failed to create sysfs entries\n");
+ goto cleanup;
+ }
+
return 0;
cleanup:
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 5179219128b..6f9c72cd6bb 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -49,6 +49,7 @@ static ssize_t store_rotate_type(struct device *dev,
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_mem_region *rg;
enum omap_dss_rotation_type rot_type;
int r;
@@ -64,9 +65,11 @@ static ssize_t store_rotate_type(struct device *dev,
if (rot_type == ofbi->rotation_type)
goto out;
- if (ofbi->region.size) {
+ rg = omapfb_get_mem_region(ofbi->region);
+
+ if (rg->size) {
r = -EBUSY;
- goto out;
+ goto put_region;
}
ofbi->rotation_type = rot_type;
@@ -75,6 +78,8 @@ static ssize_t store_rotate_type(struct device *dev,
* Since the VRAM for this FB is not allocated at the moment we don't
* need to do any further parameter checking at this point.
*/
+put_region:
+ omapfb_put_mem_region(rg);
out:
unlock_fb_info(fbi);
@@ -97,7 +102,7 @@ static ssize_t store_mirror(struct device *dev,
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
- bool mirror;
+ unsigned long mirror;
int r;
struct fb_var_screeninfo new_var;
@@ -111,6 +116,8 @@ static ssize_t store_mirror(struct device *dev,
ofbi->mirror = mirror;
+ omapfb_get_mem_region(ofbi->region);
+
memcpy(&new_var, &fbi->var, sizeof(new_var));
r = check_fb_var(fbi, &new_var);
if (r)
@@ -125,6 +132,8 @@ static ssize_t store_mirror(struct device *dev,
r = count;
out:
+ omapfb_put_mem_region(ofbi->region);
+
unlock_fb_info(fbi);
return r;
@@ -263,11 +272,15 @@ static ssize_t store_overlays(struct device *dev, struct device_attribute *attr,
DBG("detaching %d\n", ofbi->overlays[i]->id);
+ omapfb_get_mem_region(ofbi->region);
+
omapfb_overlay_enable(ovl, 0);
if (ovl->manager)
ovl->manager->apply(ovl->manager);
+ omapfb_put_mem_region(ofbi->region);
+
for (t = i + 1; t < ofbi->num_overlays; t++) {
ofbi->rotation[t-1] = ofbi->rotation[t];
ofbi->overlays[t-1] = ofbi->overlays[t];
@@ -300,7 +313,12 @@ static ssize_t store_overlays(struct device *dev, struct device_attribute *attr,
}
if (added) {
+ omapfb_get_mem_region(ofbi->region);
+
r = omapfb_apply_changes(fbi, 0);
+
+ omapfb_put_mem_region(ofbi->region);
+
if (r)
goto out;
}
@@ -388,7 +406,12 @@ static ssize_t store_overlays_rotate(struct device *dev,
for (i = 0; i < num_ovls; ++i)
ofbi->rotation[i] = rotation[i];
+ omapfb_get_mem_region(ofbi->region);
+
r = omapfb_apply_changes(fbi, 0);
+
+ omapfb_put_mem_region(ofbi->region);
+
if (r)
goto out;
@@ -408,7 +431,7 @@ static ssize_t show_size(struct device *dev,
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
- return snprintf(buf, PAGE_SIZE, "%lu\n", ofbi->region.size);
+ return snprintf(buf, PAGE_SIZE, "%lu\n", ofbi->region->size);
}
static ssize_t store_size(struct device *dev, struct device_attribute *attr,
@@ -416,6 +439,8 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omapfb2_mem_region *rg;
unsigned long size;
int r;
int i;
@@ -425,15 +450,33 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
if (!lock_fb_info(fbi))
return -ENODEV;
- for (i = 0; i < ofbi->num_overlays; i++) {
- if (ofbi->overlays[i]->info.enabled) {
- r = -EBUSY;
- goto out;
+ rg = ofbi->region;
+
+ down_write_nested(&rg->lock, rg->id);
+ atomic_inc(&rg->lock_count);
+
+ if (atomic_read(&rg->map_count)) {
+ r = -EBUSY;
+ goto out;
+ }
+
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ struct omapfb_info *ofbi2 = FB2OFB(fbdev->fbs[i]);
+ int j;
+
+ if (ofbi2->region != rg)
+ continue;
+
+ for (j = 0; j < ofbi2->num_overlays; j++) {
+ if (ofbi2->overlays[j]->info.enabled) {
+ r = -EBUSY;
+ goto out;
+ }
}
}
- if (size != ofbi->region.size) {
- r = omapfb_realloc_fbmem(fbi, size, ofbi->region.type);
+ if (size != ofbi->region->size) {
+ r = omapfb_realloc_fbmem(fbi, size, ofbi->region->type);
if (r) {
dev_err(dev, "realloc fbmem failed\n");
goto out;
@@ -442,6 +485,9 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
r = count;
out:
+ atomic_dec(&rg->lock_count);
+ up_write(&rg->lock);
+
unlock_fb_info(fbi);
return r;
@@ -453,7 +499,7 @@ static ssize_t show_phys(struct device *dev,
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
- return snprintf(buf, PAGE_SIZE, "%0x\n", ofbi->region.paddr);
+ return snprintf(buf, PAGE_SIZE, "%0x\n", ofbi->region->paddr);
}
static ssize_t show_virt(struct device *dev,
@@ -462,7 +508,7 @@ static ssize_t show_virt(struct device *dev,
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
- return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region.vaddr);
+ return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr);
}
static struct device_attribute omapfb_attrs[] = {
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index cd54fdbfd8b..1305fc9880b 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -27,6 +27,8 @@
#define DEBUG
#endif
+#include <linux/rwsem.h>
+
#include <plat/display.h>
#ifdef DEBUG
@@ -44,6 +46,7 @@ extern unsigned int omapfb_debug;
#define OMAPFB_MAX_OVL_PER_FB 3
struct omapfb2_mem_region {
+ int id;
u32 paddr;
void __iomem *vaddr;
struct vrfb vrfb;
@@ -51,13 +54,15 @@ struct omapfb2_mem_region {
u8 type; /* OMAPFB_PLANE_MEM_* */
bool alloc; /* allocated by the driver */
bool map; /* kernel mapped by the driver */
+ atomic_t map_count;
+ struct rw_semaphore lock;
+ atomic_t lock_count;
};
/* appended to fb_info */
struct omapfb_info {
int id;
- struct omapfb2_mem_region region;
- atomic_t map_count;
+ struct omapfb2_mem_region *region;
int num_overlays;
struct omap_overlay *overlays[OMAPFB_MAX_OVL_PER_FB];
struct omapfb2_device *fbdev;
@@ -76,6 +81,7 @@ struct omapfb2_device {
unsigned num_fbs;
struct fb_info *fbs[10];
+ struct omapfb2_mem_region regions[10];
unsigned num_displays;
struct omap_dss_device *displays[10];
@@ -117,6 +123,9 @@ int omapfb_update_window(struct fb_info *fbi,
int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
struct fb_var_screeninfo *var);
+int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
+ u16 posx, u16 posy, u16 outw, u16 outh);
+
/* find the display connected to this fb, if any */
static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
{
@@ -148,8 +157,24 @@ static inline int omapfb_overlay_enable(struct omap_overlay *ovl,
struct omap_overlay_info info;
ovl->get_overlay_info(ovl, &info);
+ if (info.enabled == enable)
+ return 0;
info.enabled = enable;
return ovl->set_overlay_info(ovl, &info);
}
+static inline struct omapfb2_mem_region *
+omapfb_get_mem_region(struct omapfb2_mem_region *rg)
+{
+ down_read_nested(&rg->lock, rg->id);
+ atomic_inc(&rg->lock_count);
+ return rg;
+}
+
+static inline void omapfb_put_mem_region(struct omapfb2_mem_region *rg)
+{
+ atomic_dec(&rg->lock_count);
+ up_read(&rg->lock);
+}
+
#endif
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index 6552751e81a..b6c3fc2db63 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -249,7 +249,7 @@ static void p9100_init_fix(struct fb_info *info, int linebytes, struct device_no
info->fix.accel = FB_ACCEL_SUN_CGTHREE;
}
-static int __devinit p9100_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit p9100_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
@@ -326,7 +326,7 @@ out_err:
return err;
}
-static int __devexit p9100_remove(struct of_device *op)
+static int __devexit p9100_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct p9100_par *par = info->par;
@@ -367,12 +367,12 @@ static int __init p9100_init(void)
if (fb_get_options("p9100fb", NULL))
return -ENODEV;
- return of_register_driver(&p9100_driver, &of_bus_type);
+ return of_register_platform_driver(&p9100_driver);
}
static void __exit p9100_exit(void)
{
- of_unregister_driver(&p9100_driver);
+ of_unregister_platform_driver(&p9100_driver);
}
module_init(p9100_init);
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index 72a1f4c0473..a50e1977b31 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -533,7 +533,7 @@ static int __init platinumfb_setup(char *options)
#define invalidate_cache(addr)
#endif
-static int __devinit platinumfb_probe(struct of_device* odev,
+static int __devinit platinumfb_probe(struct platform_device* odev,
const struct of_device_id *match)
{
struct device_node *dp = odev->dev.of_node;
@@ -646,7 +646,7 @@ static int __devinit platinumfb_probe(struct of_device* odev,
return rc;
}
-static int __devexit platinumfb_remove(struct of_device* odev)
+static int __devexit platinumfb_remove(struct platform_device* odev)
{
struct fb_info *info = dev_get_drvdata(&odev->dev);
struct fb_info_platinum *pinfo = info->par;
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index c91a7f70f7b..a31a77ff6f3 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -298,8 +298,8 @@ static void set_dma_control0(struct pxa168fb_info *fbi)
* Set bit to enable graphics DMA.
*/
x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
- x |= fbi->active ? 0x00000100 : 0;
- fbi->active = 0;
+ x &= ~CFG_GRA_ENA_MASK;
+ x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0);
/*
* If we are in a pseudo-color mode, we need to enable
@@ -559,7 +559,7 @@ static struct fb_ops pxa168fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int __init pxa168fb_init_mode(struct fb_info *info,
+static int __devinit pxa168fb_init_mode(struct fb_info *info,
struct pxa168fb_mach_info *mi)
{
struct pxa168fb_info *fbi = info->par;
@@ -599,7 +599,7 @@ static int __init pxa168fb_init_mode(struct fb_info *info,
return ret;
}
-static int __init pxa168fb_probe(struct platform_device *pdev)
+static int __devinit pxa168fb_probe(struct platform_device *pdev)
{
struct pxa168fb_mach_info *mi;
struct fb_info *info = 0;
@@ -792,7 +792,7 @@ static struct platform_driver pxa168fb_driver = {
.probe = pxa168fb_probe,
};
-static int __devinit pxa168fb_init(void)
+static int __init pxa168fb_init(void)
{
return platform_driver_register(&pxa168fb_driver);
}
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 9682ecc60e1..f9aca9d13d1 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -1,7 +1,7 @@
/* linux/drivers/video/s3c-fb.c
*
* Copyright 2008 Openmoko Inc.
- * Copyright 2008 Simtec Electronics
+ * Copyright 2008-2010 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
@@ -9,7 +9,7 @@
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * published by the Free Software FoundatIon.
*/
#include <linux/kernel.h>
@@ -21,9 +21,11 @@
#include <linux/clk.h>
#include <linux/fb.h>
#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
#include <mach/map.h>
-#include <mach/regs-fb.h>
+#include <plat/regs-fb-v4.h>
#include <plat/fb.h>
/* This driver will export a number of framebuffer interfaces depending
@@ -36,9 +38,9 @@
* output timings and as the control for the output power-down state.
*/
-/* note, some of the functions that get called are derived from including
- * <mach/regs-fb.h> as they are specific to the architecture that the code
- * is being built for.
+/* note, the previous use of <mach/regs-fb.h> to get platform specific data
+ * has been replaced by using the platform device name to pick the correct
+ * configuration data for the system.
*/
#ifdef CONFIG_FB_S3C_DEBUG_REGWRITE
@@ -48,13 +50,108 @@
__raw_writel(v, r); } while(0)
#endif /* FB_S3C_DEBUG_REGWRITE */
+/* irq_flags bits */
+#define S3C_FB_VSYNC_IRQ_EN 0
+
+#define VSYNC_TIMEOUT_MSEC 50
+
struct s3c_fb;
+#define VALID_BPP(x) (1 << ((x) - 1))
+
+#define OSD_BASE(win, variant) ((variant).osd + ((win) * (variant).osd_stride))
+#define VIDOSD_A(win, variant) (OSD_BASE(win, variant) + 0x00)
+#define VIDOSD_B(win, variant) (OSD_BASE(win, variant) + 0x04)
+#define VIDOSD_C(win, variant) (OSD_BASE(win, variant) + 0x08)
+#define VIDOSD_D(win, variant) (OSD_BASE(win, variant) + 0x0C)
+
+/**
+ * struct s3c_fb_variant - fb variant information
+ * @is_2443: Set if S3C2443/S3C2416 style hardware.
+ * @nr_windows: The number of windows.
+ * @vidtcon: The base for the VIDTCONx registers
+ * @wincon: The base for the WINxCON registers.
+ * @winmap: The base for the WINxMAP registers.
+ * @keycon: The abse for the WxKEYCON registers.
+ * @buf_start: Offset of buffer start registers.
+ * @buf_size: Offset of buffer size registers.
+ * @buf_end: Offset of buffer end registers.
+ * @osd: The base for the OSD registers.
+ * @palette: Address of palette memory, or 0 if none.
+ * @has_prtcon: Set if has PRTCON register.
+ * @has_shadowcon: Set if has SHADOWCON register.
+ */
+struct s3c_fb_variant {
+ unsigned int is_2443:1;
+ unsigned short nr_windows;
+ unsigned short vidtcon;
+ unsigned short wincon;
+ unsigned short winmap;
+ unsigned short keycon;
+ unsigned short buf_start;
+ unsigned short buf_end;
+ unsigned short buf_size;
+ unsigned short osd;
+ unsigned short osd_stride;
+ unsigned short palette[S3C_FB_MAX_WIN];
+
+ unsigned int has_prtcon:1;
+ unsigned int has_shadowcon:1;
+};
+
+/**
+ * struct s3c_fb_win_variant
+ * @has_osd_c: Set if has OSD C register.
+ * @has_osd_d: Set if has OSD D register.
+ * @has_osd_alpha: Set if can change alpha transparency for a window.
+ * @palette_sz: Size of palette in entries.
+ * @palette_16bpp: Set if palette is 16bits wide.
+ * @osd_size_off: If != 0, supports setting up OSD for a window; the appropriate
+ * register is located at the given offset from OSD_BASE.
+ * @valid_bpp: 1 bit per BPP setting to show valid bits-per-pixel.
+ *
+ * valid_bpp bit x is set if (x+1)BPP is supported.
+ */
+struct s3c_fb_win_variant {
+ unsigned int has_osd_c:1;
+ unsigned int has_osd_d:1;
+ unsigned int has_osd_alpha:1;
+ unsigned int palette_16bpp:1;
+ unsigned short osd_size_off;
+ unsigned short palette_sz;
+ u32 valid_bpp;
+};
+
+/**
+ * struct s3c_fb_driverdata - per-device type driver data for init time.
+ * @variant: The variant information for this driver.
+ * @win: The window information for each window.
+ */
+struct s3c_fb_driverdata {
+ struct s3c_fb_variant variant;
+ struct s3c_fb_win_variant *win[S3C_FB_MAX_WIN];
+};
+
+/**
+ * struct s3c_fb_palette - palette information
+ * @r: Red bitfield.
+ * @g: Green bitfield.
+ * @b: Blue bitfield.
+ * @a: Alpha bitfield.
+ */
+struct s3c_fb_palette {
+ struct fb_bitfield r;
+ struct fb_bitfield g;
+ struct fb_bitfield b;
+ struct fb_bitfield a;
+};
+
/**
* struct s3c_fb_win - per window private data for each framebuffer.
* @windata: The platform data supplied for the window configuration.
* @parent: The hardware that this window is part of.
* @fbinfo: Pointer pack to the framebuffer info for this window.
+ * @varint: The variant information for this window.
* @palette_buffer: Buffer/cache to hold palette entries.
* @pseudo_palette: For use in TRUECOLOUR modes for entries 0..15/
* @index: The window number of this window.
@@ -65,6 +162,7 @@ struct s3c_fb_win {
struct s3c_fb *parent;
struct fb_info *fbinfo;
struct s3c_fb_palette palette;
+ struct s3c_fb_win_variant variant;
u32 *palette_buffer;
u32 pseudo_palette[16];
@@ -72,37 +170,54 @@ struct s3c_fb_win {
};
/**
+ * struct s3c_fb_vsync - vsync information
+ * @wait: a queue for processes waiting for vsync
+ * @count: vsync interrupt count
+ */
+struct s3c_fb_vsync {
+ wait_queue_head_t wait;
+ unsigned int count;
+};
+
+/**
* struct s3c_fb - overall hardware state of the hardware
* @dev: The device that we bound to, for printing, etc.
* @regs_res: The resource we claimed for the IO registers.
* @bus_clk: The clk (hclk) feeding our interface and possibly pixclk.
* @regs: The mapped hardware registers.
+ * @variant: Variant information for this hardware.
* @enabled: A bitmask of enabled hardware windows.
* @pdata: The platform configuration data passed with the device.
* @windows: The hardware windows that have been claimed.
+ * @irq_no: IRQ line number
+ * @irq_flags: irq flags
+ * @vsync_info: VSYNC-related information (count, queues...)
*/
struct s3c_fb {
struct device *dev;
struct resource *regs_res;
struct clk *bus_clk;
void __iomem *regs;
+ struct s3c_fb_variant variant;
unsigned char enabled;
struct s3c_fb_platdata *pdata;
struct s3c_fb_win *windows[S3C_FB_MAX_WIN];
+
+ int irq_no;
+ unsigned long irq_flags;
+ struct s3c_fb_vsync vsync_info;
};
/**
- * s3c_fb_win_has_palette() - determine if a mode has a palette
- * @win: The window number being queried.
- * @bpp: The number of bits per pixel to test.
- *
- * Work out if the given window supports palletised data at the specified bpp.
+ * s3c_fb_validate_win_bpp - validate the bits-per-pixel for this mode.
+ * @win: The device window.
+ * @bpp: The bit depth.
*/
-static int s3c_fb_win_has_palette(unsigned int win, unsigned int bpp)
+static bool s3c_fb_validate_win_bpp(struct s3c_fb_win *win, unsigned int bpp)
{
- return s3c_fb_win_pal_size(win) <= (1 << bpp);
+ return win->variant.valid_bpp & VALID_BPP(bpp);
}
/**
@@ -125,7 +240,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
var->xres_virtual = max((unsigned int)windata->virtual_x, var->xres);
var->yres_virtual = max((unsigned int)windata->virtual_y, var->yres);
- if (!s3c_fb_validate_win_bpp(win->index, var->bits_per_pixel)) {
+ if (!s3c_fb_validate_win_bpp(win, var->bits_per_pixel)) {
dev_dbg(sfb->dev, "win %d: unsupported bpp %d\n",
win->index, var->bits_per_pixel);
return -EINVAL;
@@ -140,7 +255,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
case 2:
case 4:
case 8:
- if (!s3c_fb_win_has_palette(win->index, var->bits_per_pixel)) {
+ if (sfb->variant.palette[win->index] != 0) {
/* non palletised, A:1,R:2,G:3,B:2 mode */
var->red.offset = 4;
var->green.offset = 2;
@@ -255,6 +370,66 @@ static int s3c_fb_align_word(unsigned int bpp, unsigned int pix)
}
/**
+ * vidosd_set_size() - set OSD size for a window
+ *
+ * @win: the window to set OSD size for
+ * @size: OSD size register value
+ */
+static void vidosd_set_size(struct s3c_fb_win *win, u32 size)
+{
+ struct s3c_fb *sfb = win->parent;
+
+ /* OSD can be set up if osd_size_off != 0 for this window */
+ if (win->variant.osd_size_off)
+ writel(size, sfb->regs + OSD_BASE(win->index, sfb->variant)
+ + win->variant.osd_size_off);
+}
+
+/**
+ * vidosd_set_alpha() - set alpha transparency for a window
+ *
+ * @win: the window to set OSD size for
+ * @alpha: alpha register value
+ */
+static void vidosd_set_alpha(struct s3c_fb_win *win, u32 alpha)
+{
+ struct s3c_fb *sfb = win->parent;
+
+ if (win->variant.has_osd_alpha)
+ writel(alpha, sfb->regs + VIDOSD_C(win->index, sfb->variant));
+}
+
+/**
+ * shadow_protect_win() - disable updating values from shadow registers at vsync
+ *
+ * @win: window to protect registers for
+ * @protect: 1 to protect (disable updates)
+ */
+static void shadow_protect_win(struct s3c_fb_win *win, bool protect)
+{
+ struct s3c_fb *sfb = win->parent;
+ u32 reg;
+
+ if (protect) {
+ if (sfb->variant.has_prtcon) {
+ writel(PRTCON_PROTECT, sfb->regs + PRTCON);
+ } else if (sfb->variant.has_shadowcon) {
+ reg = readl(sfb->regs + SHADOWCON);
+ writel(reg | SHADOWCON_WINx_PROTECT(win->index),
+ sfb->regs + SHADOWCON);
+ }
+ } else {
+ if (sfb->variant.has_prtcon) {
+ writel(0, sfb->regs + PRTCON);
+ } else if (sfb->variant.has_shadowcon) {
+ reg = readl(sfb->regs + SHADOWCON);
+ writel(reg & ~SHADOWCON_WINx_PROTECT(win->index),
+ sfb->regs + SHADOWCON);
+ }
+ }
+}
+
+/**
* s3c_fb_set_par() - framebuffer request to set new framebuffer state.
* @info: The framebuffer to change.
*
@@ -266,14 +441,17 @@ static int s3c_fb_set_par(struct fb_info *info)
struct s3c_fb_win *win = info->par;
struct s3c_fb *sfb = win->parent;
void __iomem *regs = sfb->regs;
+ void __iomem *buf = regs;
int win_no = win->index;
- u32 osdc_data = 0;
+ u32 alpha = 0;
u32 data;
u32 pagewidth;
int clkdiv;
dev_dbg(sfb->dev, "setting framebuffer parameters\n");
+ shadow_protect_win(win, 1);
+
switch (var->bits_per_pixel) {
case 32:
case 24:
@@ -282,7 +460,7 @@ static int s3c_fb_set_par(struct fb_info *info)
info->fix.visual = FB_VISUAL_TRUECOLOR;
break;
case 8:
- if (s3c_fb_win_has_palette(win_no, 8))
+ if (win->variant.palette_sz >= 256)
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
else
info->fix.visual = FB_VISUAL_TRUECOLOR;
@@ -297,12 +475,15 @@ static int s3c_fb_set_par(struct fb_info *info)
info->fix.line_length = (var->xres_virtual * var->bits_per_pixel) / 8;
+ info->fix.xpanstep = info->var.xres_virtual > info->var.xres ? 1 : 0;
+ info->fix.ypanstep = info->var.yres_virtual > info->var.yres ? 1 : 0;
+
/* disable the window whilst we update it */
writel(0, regs + WINCON(win_no));
- /* use window 0 as the basis for the lcd output timings */
+ /* use platform specified window as the basis for the lcd timings */
- if (win_no == 0) {
+ if (win_no == sfb->pdata->default_win) {
clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
data = sfb->pdata->vidcon0;
@@ -315,6 +496,9 @@ static int s3c_fb_set_par(struct fb_info *info)
/* write the timing data to the panel */
+ if (sfb->variant.is_2443)
+ data |= (1 << 5);
+
data |= VIDCON0_ENVID | VIDCON0_ENVID_F;
writel(data, regs + VIDCON0);
@@ -322,53 +506,54 @@ static int s3c_fb_set_par(struct fb_info *info)
VIDTCON0_VFPD(var->lower_margin - 1) |
VIDTCON0_VSPW(var->vsync_len - 1);
- writel(data, regs + VIDTCON0);
+ writel(data, regs + sfb->variant.vidtcon);
data = VIDTCON1_HBPD(var->left_margin - 1) |
VIDTCON1_HFPD(var->right_margin - 1) |
VIDTCON1_HSPW(var->hsync_len - 1);
- writel(data, regs + VIDTCON1);
+ /* VIDTCON1 */
+ writel(data, regs + sfb->variant.vidtcon + 4);
data = VIDTCON2_LINEVAL(var->yres - 1) |
VIDTCON2_HOZVAL(var->xres - 1);
- writel(data, regs + VIDTCON2);
+ writel(data, regs +sfb->variant.vidtcon + 8 );
}
/* write the buffer address */
- writel(info->fix.smem_start, regs + VIDW_BUF_START(win_no));
+ /* start and end registers stride is 8 */
+ buf = regs + win_no * 8;
+
+ writel(info->fix.smem_start, buf + sfb->variant.buf_start);
data = info->fix.smem_start + info->fix.line_length * var->yres;
- writel(data, regs + VIDW_BUF_END(win_no));
+ writel(data, buf + sfb->variant.buf_end);
pagewidth = (var->xres * var->bits_per_pixel) >> 3;
data = VIDW_BUF_SIZE_OFFSET(info->fix.line_length - pagewidth) |
VIDW_BUF_SIZE_PAGEWIDTH(pagewidth);
- writel(data, regs + VIDW_BUF_SIZE(win_no));
+ writel(data, regs + sfb->variant.buf_size + (win_no * 4));
/* write 'OSD' registers to control position of framebuffer */
data = VIDOSDxA_TOPLEFT_X(0) | VIDOSDxA_TOPLEFT_Y(0);
- writel(data, regs + VIDOSD_A(win_no));
+ writel(data, regs + VIDOSD_A(win_no, sfb->variant));
data = VIDOSDxB_BOTRIGHT_X(s3c_fb_align_word(var->bits_per_pixel,
var->xres - 1)) |
VIDOSDxB_BOTRIGHT_Y(var->yres - 1);
- writel(data, regs + VIDOSD_B(win_no));
+ writel(data, regs + VIDOSD_B(win_no, sfb->variant));
data = var->xres * var->yres;
- osdc_data = VIDISD14C_ALPHA1_R(0xf) |
+ alpha = VIDISD14C_ALPHA1_R(0xf) |
VIDISD14C_ALPHA1_G(0xf) |
VIDISD14C_ALPHA1_B(0xf);
- if (s3c_fb_has_osd_d(win_no)) {
- writel(data, regs + VIDOSD_D(win_no));
- writel(osdc_data, regs + VIDOSD_C(win_no));
- } else
- writel(data, regs + VIDOSD_C(win_no));
+ vidosd_set_alpha(win, alpha);
+ vidosd_set_size(win, data);
data = WINCONx_ENWIN;
@@ -424,13 +609,15 @@ static int s3c_fb_set_par(struct fb_info *info)
else
data |= WINCON0_BPPMODE_24BPP_888;
+ data |= WINCONx_WSWP;
data |= WINCONx_BURSTLEN_16WORD;
break;
}
- /* It has no color key control register for window0 */
+ /* Enable the colour keying for the window below this one */
if (win_no > 0) {
u32 keycon0_data = 0, keycon1_data = 0;
+ void __iomem *keycon = regs + sfb->variant.keycon;
keycon0_data = ~(WxKEYCON0_KEYBL_EN |
WxKEYCON0_KEYEN_F |
@@ -438,12 +625,23 @@ static int s3c_fb_set_par(struct fb_info *info)
keycon1_data = WxKEYCON1_COLVAL(0xffffff);
- writel(keycon0_data, regs + WxKEYCONy(win_no-1, 0));
- writel(keycon1_data, regs + WxKEYCONy(win_no-1, 1));
+ keycon += (win_no - 1) * 8;
+
+ writel(keycon0_data, keycon + WKEYCON0);
+ writel(keycon1_data, keycon + WKEYCON1);
+ }
+
+ writel(data, regs + sfb->variant.wincon + (win_no * 4));
+ writel(0x0, regs + sfb->variant.winmap + (win_no * 4));
+
+ /* Enable DMA channel for this window */
+ if (sfb->variant.has_shadowcon) {
+ data = readl(sfb->regs + SHADOWCON);
+ data |= SHADOWCON_CHx_ENABLE(win_no);
+ writel(data, sfb->regs + SHADOWCON);
}
- writel(data, regs + WINCON(win_no));
- writel(0x0, regs + WINxMAP(win_no));
+ shadow_protect_win(win, 0);
return 0;
}
@@ -470,7 +668,7 @@ static void s3c_fb_update_palette(struct s3c_fb *sfb,
void __iomem *palreg;
u32 palcon;
- palreg = sfb->regs + s3c_fb_pal_reg(win->index, reg);
+ palreg = sfb->regs + sfb->variant.palette[win->index];
dev_dbg(sfb->dev, "%s: win %d, reg %d (%p): %08x\n",
__func__, win->index, reg, palreg, value);
@@ -480,10 +678,10 @@ static void s3c_fb_update_palette(struct s3c_fb *sfb,
palcon = readl(sfb->regs + WPALCON);
writel(palcon | WPALCON_PAL_UPDATE, sfb->regs + WPALCON);
- if (s3c_fb_pal_is16(win->index))
- writew(value, palreg);
+ if (win->variant.palette_16bpp)
+ writew(value, palreg + (reg * 2));
else
- writel(value, palreg);
+ writel(value, palreg + (reg * 4));
writel(palcon, sfb->regs + WPALCON);
}
@@ -532,7 +730,7 @@ static int s3c_fb_setcolreg(unsigned regno,
break;
case FB_VISUAL_PSEUDOCOLOR:
- if (regno < s3c_fb_win_pal_size(win->index)) {
+ if (regno < win->variant.palette_sz) {
val = chan_to_field(red, &win->palette.r);
val |= chan_to_field(green, &win->palette.g);
val |= chan_to_field(blue, &win->palette.b);
@@ -591,7 +789,7 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
dev_dbg(sfb->dev, "blank mode %d\n", blank_mode);
- wincon = readl(sfb->regs + WINCON(index));
+ wincon = readl(sfb->regs + sfb->variant.wincon + (index * 4));
switch (blank_mode) {
case FB_BLANK_POWERDOWN:
@@ -602,11 +800,11 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
case FB_BLANK_NORMAL:
/* disable the DMA and display 0x0 (black) */
writel(WINxMAP_MAP | WINxMAP_MAP_COLOUR(0x0),
- sfb->regs + WINxMAP(index));
+ sfb->regs + sfb->variant.winmap + (index * 4));
break;
case FB_BLANK_UNBLANK:
- writel(0x0, sfb->regs + WINxMAP(index));
+ writel(0x0, sfb->regs + sfb->variant.winmap + (index * 4));
wincon |= WINCONx_ENWIN;
sfb->enabled |= (1 << index);
break;
@@ -617,7 +815,7 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
return 1;
}
- writel(wincon, sfb->regs + WINCON(index));
+ writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4));
/* Check the enabled state to see if we need to be running the
* main LCD interface, as if there are no active windows then
@@ -636,12 +834,185 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
/* we're stuck with this until we can do something about overriding
* the power control using the blanking event for a single fb.
*/
- if (index == 0)
+ if (index == sfb->pdata->default_win)
s3c_fb_enable(sfb, blank_mode != FB_BLANK_POWERDOWN ? 1 : 0);
return 0;
}
+/**
+ * s3c_fb_pan_display() - Pan the display.
+ *
+ * Note that the offsets can be written to the device at any time, as their
+ * values are latched at each vsync automatically. This also means that only
+ * the last call to this function will have any effect on next vsync, but
+ * there is no need to sleep waiting for it to prevent tearing.
+ *
+ * @var: The screen information to verify.
+ * @info: The framebuffer device.
+ */
+static int s3c_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct s3c_fb_win *win = info->par;
+ struct s3c_fb *sfb = win->parent;
+ void __iomem *buf = sfb->regs + win->index * 8;
+ unsigned int start_boff, end_boff;
+
+ /* Offset in bytes to the start of the displayed area */
+ start_boff = var->yoffset * info->fix.line_length;
+ /* X offset depends on the current bpp */
+ if (info->var.bits_per_pixel >= 8) {
+ start_boff += var->xoffset * (info->var.bits_per_pixel >> 3);
+ } else {
+ switch (info->var.bits_per_pixel) {
+ case 4:
+ start_boff += var->xoffset >> 1;
+ break;
+ case 2:
+ start_boff += var->xoffset >> 2;
+ break;
+ case 1:
+ start_boff += var->xoffset >> 3;
+ break;
+ default:
+ dev_err(sfb->dev, "invalid bpp\n");
+ return -EINVAL;
+ }
+ }
+ /* Offset in bytes to the end of the displayed area */
+ end_boff = start_boff + var->yres * info->fix.line_length;
+
+ /* Temporarily turn off per-vsync update from shadow registers until
+ * both start and end addresses are updated to prevent corruption */
+ shadow_protect_win(win, 1);
+
+ writel(info->fix.smem_start + start_boff, buf + sfb->variant.buf_start);
+ writel(info->fix.smem_start + end_boff, buf + sfb->variant.buf_end);
+
+ shadow_protect_win(win, 0);
+
+ return 0;
+}
+
+/**
+ * s3c_fb_enable_irq() - enable framebuffer interrupts
+ * @sfb: main hardware state
+ */
+static void s3c_fb_enable_irq(struct s3c_fb *sfb)
+{
+ void __iomem *regs = sfb->regs;
+ u32 irq_ctrl_reg;
+
+ if (!test_and_set_bit(S3C_FB_VSYNC_IRQ_EN, &sfb->irq_flags)) {
+ /* IRQ disabled, enable it */
+ irq_ctrl_reg = readl(regs + VIDINTCON0);
+
+ irq_ctrl_reg |= VIDINTCON0_INT_ENABLE;
+ irq_ctrl_reg |= VIDINTCON0_INT_FRAME;
+
+ irq_ctrl_reg &= ~VIDINTCON0_FRAMESEL0_MASK;
+ irq_ctrl_reg |= VIDINTCON0_FRAMESEL0_VSYNC;
+ irq_ctrl_reg &= ~VIDINTCON0_FRAMESEL1_MASK;
+ irq_ctrl_reg |= VIDINTCON0_FRAMESEL1_NONE;
+
+ writel(irq_ctrl_reg, regs + VIDINTCON0);
+ }
+}
+
+/**
+ * s3c_fb_disable_irq() - disable framebuffer interrupts
+ * @sfb: main hardware state
+ */
+static void s3c_fb_disable_irq(struct s3c_fb *sfb)
+{
+ void __iomem *regs = sfb->regs;
+ u32 irq_ctrl_reg;
+
+ if (test_and_clear_bit(S3C_FB_VSYNC_IRQ_EN, &sfb->irq_flags)) {
+ /* IRQ enabled, disable it */
+ irq_ctrl_reg = readl(regs + VIDINTCON0);
+
+ irq_ctrl_reg &= ~VIDINTCON0_INT_FRAME;
+ irq_ctrl_reg &= ~VIDINTCON0_INT_ENABLE;
+
+ writel(irq_ctrl_reg, regs + VIDINTCON0);
+ }
+}
+
+static irqreturn_t s3c_fb_irq(int irq, void *dev_id)
+{
+ struct s3c_fb *sfb = dev_id;
+ void __iomem *regs = sfb->regs;
+ u32 irq_sts_reg;
+
+ irq_sts_reg = readl(regs + VIDINTCON1);
+
+ if (irq_sts_reg & VIDINTCON1_INT_FRAME) {
+
+ /* VSYNC interrupt, accept it */
+ writel(VIDINTCON1_INT_FRAME, regs + VIDINTCON1);
+
+ sfb->vsync_info.count++;
+ wake_up_interruptible(&sfb->vsync_info.wait);
+ }
+
+ /* We only support waiting for VSYNC for now, so it's safe
+ * to always disable irqs here.
+ */
+ s3c_fb_disable_irq(sfb);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * s3c_fb_wait_for_vsync() - sleep until next VSYNC interrupt or timeout
+ * @sfb: main hardware state
+ * @crtc: head index.
+ */
+static int s3c_fb_wait_for_vsync(struct s3c_fb *sfb, u32 crtc)
+{
+ unsigned long count;
+ int ret;
+
+ if (crtc != 0)
+ return -ENODEV;
+
+ count = sfb->vsync_info.count;
+ s3c_fb_enable_irq(sfb);
+ ret = wait_event_interruptible_timeout(sfb->vsync_info.wait,
+ count != sfb->vsync_info.count,
+ msecs_to_jiffies(VSYNC_TIMEOUT_MSEC));
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int s3c_fb_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
+{
+ struct s3c_fb_win *win = info->par;
+ struct s3c_fb *sfb = win->parent;
+ int ret;
+ u32 crtc;
+
+ switch (cmd) {
+ case FBIO_WAITFORVSYNC:
+ if (get_user(crtc, (u32 __user *)arg)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = s3c_fb_wait_for_vsync(sfb, crtc);
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
static struct fb_ops s3c_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = s3c_fb_check_var,
@@ -651,9 +1022,33 @@ static struct fb_ops s3c_fb_ops = {
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
+ .fb_pan_display = s3c_fb_pan_display,
+ .fb_ioctl = s3c_fb_ioctl,
};
/**
+ * s3c_fb_missing_pixclock() - calculates pixel clock
+ * @mode: The video mode to change.
+ *
+ * Calculate the pixel clock when none has been given through platform data.
+ */
+static void __devinit s3c_fb_missing_pixclock(struct fb_videomode *mode)
+{
+ u64 pixclk = 1000000000000ULL;
+ u32 div;
+
+ div = mode->left_margin + mode->hsync_len + mode->right_margin +
+ mode->xres;
+ div *= mode->upper_margin + mode->vsync_len + mode->lower_margin +
+ mode->yres;
+ div *= mode->refresh ? : 60;
+
+ do_div(pixclk, div);
+
+ mode->pixclock = pixclk;
+}
+
+/**
* s3c_fb_alloc_memory() - allocate display memory for framebuffer window
* @sfb: The base resources for the hardware.
* @win: The window to initialise memory for.
@@ -711,7 +1106,8 @@ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
{
struct fb_info *fbi = win->fbinfo;
- dma_free_writecombine(sfb->dev, PAGE_ALIGN(fbi->fix.smem_len),
+ if (fbi->screen_base)
+ dma_free_writecombine(sfb->dev, PAGE_ALIGN(fbi->fix.smem_len),
fbi->screen_base, fbi->fix.smem_start);
}
@@ -724,9 +1120,18 @@ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
*/
static void s3c_fb_release_win(struct s3c_fb *sfb, struct s3c_fb_win *win)
{
+ u32 data;
+
if (win->fbinfo) {
+ if (sfb->variant.has_shadowcon) {
+ data = readl(sfb->regs + SHADOWCON);
+ data &= ~SHADOWCON_CHx_ENABLE(win->index);
+ data &= ~SHADOWCON_CHx_LOCAL_ENABLE(win->index);
+ writel(data, sfb->regs + SHADOWCON);
+ }
unregister_framebuffer(win->fbinfo);
- fb_dealloc_cmap(&win->fbinfo->cmap);
+ if (win->fbinfo->cmap.len)
+ fb_dealloc_cmap(&win->fbinfo->cmap);
s3c_fb_free_memory(sfb, win);
framebuffer_release(win->fbinfo);
}
@@ -735,12 +1140,14 @@ static void s3c_fb_release_win(struct s3c_fb *sfb, struct s3c_fb_win *win)
/**
* s3c_fb_probe_win() - register an hardware window
* @sfb: The base resources for the hardware
+ * @variant: The variant information for this window.
* @res: Pointer to where to place the resultant window.
*
* Allocate and do the basic initialisation for one of the hardware's graphics
* windows.
*/
static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
+ struct s3c_fb_win_variant *variant,
struct s3c_fb_win **res)
{
struct fb_var_screeninfo *var;
@@ -751,9 +1158,11 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
int palette_size;
int ret;
- dev_dbg(sfb->dev, "probing window %d\n", win_no);
+ dev_dbg(sfb->dev, "probing window %d, variant %p\n", win_no, variant);
+
+ init_waitqueue_head(&sfb->vsync_info.wait);
- palette_size = s3c_fb_win_pal_size(win_no);
+ palette_size = variant->palette_sz * 4;
fbinfo = framebuffer_alloc(sizeof(struct s3c_fb_win) +
palette_size * sizeof(u32), sfb->dev);
@@ -770,7 +1179,9 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
WARN_ON(windata->win_mode.yres == 0);
win = fbinfo->par;
+ *res = win;
var = &fbinfo->var;
+ win->variant = *variant;
win->fbinfo = fbinfo;
win->parent = sfb;
win->windata = windata;
@@ -784,7 +1195,24 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
}
/* setup the r/b/g positions for the window's palette */
- s3c_fb_init_palette(win_no, &win->palette);
+ if (win->variant.palette_16bpp) {
+ /* Set RGB 5:6:5 as default */
+ win->palette.r.offset = 11;
+ win->palette.r.length = 5;
+ win->palette.g.offset = 5;
+ win->palette.g.length = 6;
+ win->palette.b.offset = 0;
+ win->palette.b.length = 5;
+
+ } else {
+ /* Set 8bpp or 8bpp and 1bit alpha */
+ win->palette.r.offset = 16;
+ win->palette.r.length = 8;
+ win->palette.g.offset = 8;
+ win->palette.g.length = 8;
+ win->palette.b.offset = 0;
+ win->palette.b.length = 8;
+ }
/* setup the initial video mode from the window */
fb_videomode_to_var(&fbinfo->var, initmode);
@@ -808,7 +1236,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
/* create initial colour map */
- ret = fb_alloc_cmap(&fbinfo->cmap, s3c_fb_win_pal_size(win_no), 1);
+ ret = fb_alloc_cmap(&fbinfo->cmap, win->variant.palette_sz, 1);
if (ret == 0)
fb_set_cmap(&fbinfo->cmap, fbinfo);
else
@@ -826,7 +1254,6 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
return ret;
}
- *res = win;
dev_info(sfb->dev, "window %d: fb %s\n", win_no, fbinfo->fix.id);
return 0;
@@ -842,18 +1269,19 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
static void s3c_fb_clear_win(struct s3c_fb *sfb, int win)
{
void __iomem *regs = sfb->regs;
-
- writel(0, regs + WINCON(win));
- writel(0xffffff, regs + WxKEYCONy(win, 0));
- writel(0xffffff, regs + WxKEYCONy(win, 1));
-
- writel(0, regs + VIDOSD_A(win));
- writel(0, regs + VIDOSD_B(win));
- writel(0, regs + VIDOSD_C(win));
+ u32 reg;
+
+ writel(0, regs + sfb->variant.wincon + (win * 4));
+ writel(0, regs + VIDOSD_A(win, sfb->variant));
+ writel(0, regs + VIDOSD_B(win, sfb->variant));
+ writel(0, regs + VIDOSD_C(win, sfb->variant));
+ reg = readl(regs + SHADOWCON);
+ writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON);
}
static int __devinit s3c_fb_probe(struct platform_device *pdev)
{
+ struct s3c_fb_driverdata *fbdrv;
struct device *dev = &pdev->dev;
struct s3c_fb_platdata *pd;
struct s3c_fb *sfb;
@@ -861,6 +1289,13 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
int win;
int ret = 0;
+ fbdrv = (struct s3c_fb_driverdata *)platform_get_device_id(pdev)->driver_data;
+
+ if (fbdrv->variant.nr_windows > S3C_FB_MAX_WIN) {
+ dev_err(dev, "too many windows, cannot attach\n");
+ return -EINVAL;
+ }
+
pd = pdev->dev.platform_data;
if (!pd) {
dev_err(dev, "no platform data specified\n");
@@ -873,8 +1308,11 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ dev_dbg(dev, "allocate new framebuffer %p\n", sfb);
+
sfb->dev = dev;
sfb->pdata = pd;
+ sfb->variant = fbdrv->variant;
sfb->bus_clk = clk_get(dev, "lcd");
if (IS_ERR(sfb->bus_clk)) {
@@ -906,6 +1344,20 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
goto err_req_region;
}
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to acquire irq resource\n");
+ ret = -ENOENT;
+ goto err_ioremap;
+ }
+ sfb->irq_no = res->start;
+ ret = request_irq(sfb->irq_no, s3c_fb_irq,
+ 0, "s3c_fb", sfb);
+ if (ret) {
+ dev_err(dev, "irq request failed\n");
+ goto err_ioremap;
+ }
+
dev_dbg(dev, "got resources (regs %p), probing windows\n", sfb->regs);
/* setup gpio and output polarity controls */
@@ -916,21 +1368,34 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
/* zero all windows before we do anything */
- for (win = 0; win < S3C_FB_MAX_WIN; win++)
+ for (win = 0; win < fbdrv->variant.nr_windows; win++)
s3c_fb_clear_win(sfb, win);
+ /* initialise colour key controls */
+ for (win = 0; win < (fbdrv->variant.nr_windows - 1); win++) {
+ void __iomem *regs = sfb->regs + sfb->variant.keycon;
+
+ regs += (win * 8);
+ writel(0xffffff, regs + WKEYCON0);
+ writel(0xffffff, regs + WKEYCON1);
+ }
+
/* we have the register setup, start allocating framebuffers */
- for (win = 0; win < S3C_FB_MAX_WIN; win++) {
+ for (win = 0; win < fbdrv->variant.nr_windows; win++) {
if (!pd->win[win])
continue;
- ret = s3c_fb_probe_win(sfb, win, &sfb->windows[win]);
+ if (!pd->win[win]->win_mode.pixclock)
+ s3c_fb_missing_pixclock(&pd->win[win]->win_mode);
+
+ ret = s3c_fb_probe_win(sfb, win, fbdrv->win[win],
+ &sfb->windows[win]);
if (ret < 0) {
dev_err(dev, "failed to create window %d\n", win);
for (; win >= 0; win--)
s3c_fb_release_win(sfb, sfb->windows[win]);
- goto err_ioremap;
+ goto err_irq;
}
}
@@ -938,6 +1403,9 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
return 0;
+err_irq:
+ free_irq(sfb->irq_no, sfb);
+
err_ioremap:
iounmap(sfb->regs);
@@ -970,6 +1438,8 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
if (sfb->windows[win])
s3c_fb_release_win(sfb, sfb->windows[win]);
+ free_irq(sfb->irq_no, sfb);
+
iounmap(sfb->regs);
clk_disable(sfb->bus_clk);
@@ -1016,9 +1486,17 @@ static int s3c_fb_resume(struct platform_device *pdev)
writel(pd->vidcon1, sfb->regs + VIDCON1);
/* zero all windows before we do anything */
- for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++)
+ for (win_no = 0; win_no < sfb->variant.nr_windows; win_no++)
s3c_fb_clear_win(sfb, win_no);
+ for (win_no = 0; win_no < sfb->variant.nr_windows - 1; win_no++) {
+ void __iomem *regs = sfb->regs + sfb->variant.keycon;
+
+ regs += (win_no * 8);
+ writel(0xffffff, regs + WKEYCON0);
+ writel(0xffffff, regs + WKEYCON1);
+ }
+
/* restore framebuffers */
for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
win = sfb->windows[win_no];
@@ -1036,11 +1514,208 @@ static int s3c_fb_resume(struct platform_device *pdev)
#define s3c_fb_resume NULL
#endif
+
+#define VALID_BPP124 (VALID_BPP(1) | VALID_BPP(2) | VALID_BPP(4))
+#define VALID_BPP1248 (VALID_BPP124 | VALID_BPP(8))
+
+static struct s3c_fb_win_variant s3c_fb_data_64xx_wins[] = {
+ [0] = {
+ .has_osd_c = 1,
+ .osd_size_off = 0x8,
+ .palette_sz = 256,
+ .valid_bpp = VALID_BPP1248 | VALID_BPP(16) | VALID_BPP(24),
+ },
+ [1] = {
+ .has_osd_c = 1,
+ .has_osd_d = 1,
+ .osd_size_off = 0x12,
+ .has_osd_alpha = 1,
+ .palette_sz = 256,
+ .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) |
+ VALID_BPP(18) | VALID_BPP(19) |
+ VALID_BPP(24) | VALID_BPP(25)),
+ },
+ [2] = {
+ .has_osd_c = 1,
+ .has_osd_d = 1,
+ .osd_size_off = 0x12,
+ .has_osd_alpha = 1,
+ .palette_sz = 16,
+ .palette_16bpp = 1,
+ .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) |
+ VALID_BPP(18) | VALID_BPP(19) |
+ VALID_BPP(24) | VALID_BPP(25)),
+ },
+ [3] = {
+ .has_osd_c = 1,
+ .has_osd_alpha = 1,
+ .palette_sz = 16,
+ .palette_16bpp = 1,
+ .valid_bpp = (VALID_BPP124 | VALID_BPP(16) |
+ VALID_BPP(18) | VALID_BPP(19) |
+ VALID_BPP(24) | VALID_BPP(25)),
+ },
+ [4] = {
+ .has_osd_c = 1,
+ .has_osd_alpha = 1,
+ .palette_sz = 4,
+ .palette_16bpp = 1,
+ .valid_bpp = (VALID_BPP(1) | VALID_BPP(2) |
+ VALID_BPP(16) | VALID_BPP(18) |
+ VALID_BPP(24) | VALID_BPP(25)),
+ },
+};
+
+static struct s3c_fb_driverdata s3c_fb_data_64xx = {
+ .variant = {
+ .nr_windows = 5,
+ .vidtcon = VIDTCON0,
+ .wincon = WINCON(0),
+ .winmap = WINxMAP(0),
+ .keycon = WKEYCON,
+ .osd = VIDOSD_BASE,
+ .osd_stride = 16,
+ .buf_start = VIDW_BUF_START(0),
+ .buf_size = VIDW_BUF_SIZE(0),
+ .buf_end = VIDW_BUF_END(0),
+
+ .palette = {
+ [0] = 0x400,
+ [1] = 0x800,
+ [2] = 0x300,
+ [3] = 0x320,
+ [4] = 0x340,
+ },
+
+ .has_prtcon = 1,
+ },
+ .win[0] = &s3c_fb_data_64xx_wins[0],
+ .win[1] = &s3c_fb_data_64xx_wins[1],
+ .win[2] = &s3c_fb_data_64xx_wins[2],
+ .win[3] = &s3c_fb_data_64xx_wins[3],
+ .win[4] = &s3c_fb_data_64xx_wins[4],
+};
+
+static struct s3c_fb_driverdata s3c_fb_data_s5pc100 = {
+ .variant = {
+ .nr_windows = 5,
+ .vidtcon = VIDTCON0,
+ .wincon = WINCON(0),
+ .winmap = WINxMAP(0),
+ .keycon = WKEYCON,
+ .osd = VIDOSD_BASE,
+ .osd_stride = 16,
+ .buf_start = VIDW_BUF_START(0),
+ .buf_size = VIDW_BUF_SIZE(0),
+ .buf_end = VIDW_BUF_END(0),
+
+ .palette = {
+ [0] = 0x2400,
+ [1] = 0x2800,
+ [2] = 0x2c00,
+ [3] = 0x3000,
+ [4] = 0x3400,
+ },
+
+ .has_prtcon = 1,
+ },
+ .win[0] = &s3c_fb_data_64xx_wins[0],
+ .win[1] = &s3c_fb_data_64xx_wins[1],
+ .win[2] = &s3c_fb_data_64xx_wins[2],
+ .win[3] = &s3c_fb_data_64xx_wins[3],
+ .win[4] = &s3c_fb_data_64xx_wins[4],
+};
+
+static struct s3c_fb_driverdata s3c_fb_data_s5pv210 = {
+ .variant = {
+ .nr_windows = 5,
+ .vidtcon = VIDTCON0,
+ .wincon = WINCON(0),
+ .winmap = WINxMAP(0),
+ .keycon = WKEYCON,
+ .osd = VIDOSD_BASE,
+ .osd_stride = 16,
+ .buf_start = VIDW_BUF_START(0),
+ .buf_size = VIDW_BUF_SIZE(0),
+ .buf_end = VIDW_BUF_END(0),
+
+ .palette = {
+ [0] = 0x2400,
+ [1] = 0x2800,
+ [2] = 0x2c00,
+ [3] = 0x3000,
+ [4] = 0x3400,
+ },
+
+ .has_shadowcon = 1,
+ },
+ .win[0] = &s3c_fb_data_64xx_wins[0],
+ .win[1] = &s3c_fb_data_64xx_wins[1],
+ .win[2] = &s3c_fb_data_64xx_wins[2],
+ .win[3] = &s3c_fb_data_64xx_wins[3],
+ .win[4] = &s3c_fb_data_64xx_wins[4],
+};
+
+/* S3C2443/S3C2416 style hardware */
+static struct s3c_fb_driverdata s3c_fb_data_s3c2443 = {
+ .variant = {
+ .nr_windows = 2,
+ .is_2443 = 1,
+
+ .vidtcon = 0x08,
+ .wincon = 0x14,
+ .winmap = 0xd0,
+ .keycon = 0xb0,
+ .osd = 0x28,
+ .osd_stride = 12,
+ .buf_start = 0x64,
+ .buf_size = 0x94,
+ .buf_end = 0x7c,
+
+ .palette = {
+ [0] = 0x400,
+ [1] = 0x800,
+ },
+ },
+ .win[0] = &(struct s3c_fb_win_variant) {
+ .palette_sz = 256,
+ .valid_bpp = VALID_BPP1248 | VALID_BPP(16) | VALID_BPP(24),
+ },
+ .win[1] = &(struct s3c_fb_win_variant) {
+ .has_osd_c = 1,
+ .has_osd_alpha = 1,
+ .palette_sz = 256,
+ .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) |
+ VALID_BPP(18) | VALID_BPP(19) |
+ VALID_BPP(24) | VALID_BPP(25) |
+ VALID_BPP(28)),
+ },
+};
+
+static struct platform_device_id s3c_fb_driver_ids[] = {
+ {
+ .name = "s3c-fb",
+ .driver_data = (unsigned long)&s3c_fb_data_64xx,
+ }, {
+ .name = "s5pc100-fb",
+ .driver_data = (unsigned long)&s3c_fb_data_s5pc100,
+ }, {
+ .name = "s5pv210-fb",
+ .driver_data = (unsigned long)&s3c_fb_data_s5pv210,
+ }, {
+ .name = "s3c2443-fb",
+ .driver_data = (unsigned long)&s3c_fb_data_s3c2443,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, s3c_fb_driver_ids);
+
static struct platform_driver s3c_fb_driver = {
.probe = s3c_fb_probe,
.remove = __devexit_p(s3c_fb_remove),
.suspend = s3c_fb_suspend,
.resume = s3c_fb_resume,
+ .id_table = s3c_fb_driver_ids,
.driver = {
.name = "s3c-fb",
.owner = THIS_MODULE,
diff --git a/drivers/video/sh_mipi_dsi.c b/drivers/video/sh_mipi_dsi.c
new file mode 100644
index 00000000000..5699ce0c178
--- /dev/null
+++ b/drivers/video/sh_mipi_dsi.c
@@ -0,0 +1,505 @@
+/*
+ * Renesas SH-mobile MIPI DSI support
+ *
+ * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <video/mipi_display.h>
+#include <video/sh_mipi_dsi.h>
+#include <video/sh_mobile_lcdc.h>
+
+#define CMTSRTCTR 0x80d0
+#define CMTSRTREQ 0x8070
+
+#define DSIINTE 0x0060
+
+/* E.g., sh7372 has 2 MIPI-DSIs - one for each LCDC */
+#define MAX_SH_MIPI_DSI 2
+
+struct sh_mipi {
+ void __iomem *base;
+ struct clk *dsit_clk;
+ struct clk *dsip_clk;
+};
+
+static struct sh_mipi *mipi_dsi[MAX_SH_MIPI_DSI];
+
+/* Protect the above array */
+static DEFINE_MUTEX(array_lock);
+
+static struct sh_mipi *sh_mipi_by_handle(int handle)
+{
+ if (handle >= ARRAY_SIZE(mipi_dsi) || handle < 0)
+ return NULL;
+
+ return mipi_dsi[handle];
+}
+
+static int sh_mipi_send_short(struct sh_mipi *mipi, u8 dsi_cmd,
+ u8 cmd, u8 param)
+{
+ u32 data = (dsi_cmd << 24) | (cmd << 16) | (param << 8);
+ int cnt = 100;
+
+ /* transmit a short packet to LCD panel */
+ iowrite32(1 | data, mipi->base + 0x80d0); /* CMTSRTCTR */
+ iowrite32(1, mipi->base + 0x8070); /* CMTSRTREQ */
+
+ while ((ioread32(mipi->base + 0x8070) & 1) && --cnt)
+ udelay(1);
+
+ return cnt ? 0 : -ETIMEDOUT;
+}
+
+#define LCD_CHAN2MIPI(c) ((c) < LCDC_CHAN_MAINLCD || (c) > LCDC_CHAN_SUBLCD ? \
+ -EINVAL : (c) - 1)
+
+static int sh_mipi_dcs(int handle, u8 cmd)
+{
+ struct sh_mipi *mipi = sh_mipi_by_handle(LCD_CHAN2MIPI(handle));
+ if (!mipi)
+ return -ENODEV;
+ return sh_mipi_send_short(mipi, MIPI_DSI_DCS_SHORT_WRITE, cmd, 0);
+}
+
+static int sh_mipi_dcs_param(int handle, u8 cmd, u8 param)
+{
+ struct sh_mipi *mipi = sh_mipi_by_handle(LCD_CHAN2MIPI(handle));
+ if (!mipi)
+ return -ENODEV;
+ return sh_mipi_send_short(mipi, MIPI_DSI_DCS_SHORT_WRITE_PARAM, cmd,
+ param);
+}
+
+static void sh_mipi_dsi_enable(struct sh_mipi *mipi, bool enable)
+{
+ /*
+ * enable LCDC data tx, transition to LPS after completion of each HS
+ * packet
+ */
+ iowrite32(0x00000002 | enable, mipi->base + 0x8000); /* DTCTR */
+}
+
+static void sh_mipi_shutdown(struct platform_device *pdev)
+{
+ struct sh_mipi *mipi = platform_get_drvdata(pdev);
+
+ sh_mipi_dsi_enable(mipi, false);
+}
+
+static void mipi_display_on(void *arg, struct fb_info *info)
+{
+ struct sh_mipi *mipi = arg;
+
+ sh_mipi_dsi_enable(mipi, true);
+}
+
+static void mipi_display_off(void *arg)
+{
+ struct sh_mipi *mipi = arg;
+
+ sh_mipi_dsi_enable(mipi, false);
+}
+
+static int __init sh_mipi_setup(struct sh_mipi *mipi,
+ struct sh_mipi_dsi_info *pdata)
+{
+ void __iomem *base = mipi->base;
+ struct sh_mobile_lcdc_chan_cfg *ch = pdata->lcd_chan;
+ u32 pctype, datatype, pixfmt;
+ u32 linelength;
+ bool yuv;
+
+ /* Select data format */
+ switch (pdata->data_format) {
+ case MIPI_RGB888:
+ pctype = 0;
+ datatype = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
+ linelength = ch->lcd_cfg.xres * 3;
+ yuv = false;
+ break;
+ case MIPI_RGB565:
+ pctype = 1;
+ datatype = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
+ linelength = ch->lcd_cfg.xres * 2;
+ yuv = false;
+ break;
+ case MIPI_RGB666_LP:
+ pctype = 2;
+ datatype = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
+ linelength = ch->lcd_cfg.xres * 3;
+ yuv = false;
+ break;
+ case MIPI_RGB666:
+ pctype = 3;
+ datatype = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ pixfmt = MIPI_DCS_PIXEL_FMT_18BIT;
+ linelength = (ch->lcd_cfg.xres * 18 + 7) / 8;
+ yuv = false;
+ break;
+ case MIPI_BGR888:
+ pctype = 8;
+ datatype = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
+ linelength = ch->lcd_cfg.xres * 3;
+ yuv = false;
+ break;
+ case MIPI_BGR565:
+ pctype = 9;
+ datatype = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
+ linelength = ch->lcd_cfg.xres * 2;
+ yuv = false;
+ break;
+ case MIPI_BGR666_LP:
+ pctype = 0xa;
+ datatype = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
+ linelength = ch->lcd_cfg.xres * 3;
+ yuv = false;
+ break;
+ case MIPI_BGR666:
+ pctype = 0xb;
+ datatype = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ pixfmt = MIPI_DCS_PIXEL_FMT_18BIT;
+ linelength = (ch->lcd_cfg.xres * 18 + 7) / 8;
+ yuv = false;
+ break;
+ case MIPI_YUYV:
+ pctype = 4;
+ datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16;
+ pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
+ linelength = ch->lcd_cfg.xres * 2;
+ yuv = true;
+ break;
+ case MIPI_UYVY:
+ pctype = 5;
+ datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16;
+ pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
+ linelength = ch->lcd_cfg.xres * 2;
+ yuv = true;
+ break;
+ case MIPI_YUV420_L:
+ pctype = 6;
+ datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12;
+ pixfmt = MIPI_DCS_PIXEL_FMT_12BIT;
+ linelength = (ch->lcd_cfg.xres * 12 + 7) / 8;
+ yuv = true;
+ break;
+ case MIPI_YUV420:
+ pctype = 7;
+ datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12;
+ pixfmt = MIPI_DCS_PIXEL_FMT_12BIT;
+ /* Length of U/V line */
+ linelength = (ch->lcd_cfg.xres + 1) / 2;
+ yuv = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((yuv && ch->interface_type != YUV422) ||
+ (!yuv && ch->interface_type != RGB24))
+ return -EINVAL;
+
+ /* reset DSI link */
+ iowrite32(0x00000001, base); /* SYSCTRL */
+ /* Hold reset for 100 cycles of the slowest of bus, HS byte and LP clock */
+ udelay(50);
+ iowrite32(0x00000000, base); /* SYSCTRL */
+
+ /* setup DSI link */
+
+ /*
+ * Default = ULPS enable |
+ * Contention detection enabled |
+ * EoT packet transmission enable |
+ * CRC check enable |
+ * ECC check enable
+ * additionally enable first two lanes
+ */
+ iowrite32(0x00003703, base + 0x04); /* SYSCONF */
+ /*
+ * T_wakeup = 0x7000
+ * T_hs-trail = 3
+ * T_hs-prepare = 3
+ * T_clk-trail = 3
+ * T_clk-prepare = 2
+ */
+ iowrite32(0x70003332, base + 0x08); /* TIMSET */
+ /* no responses requested */
+ iowrite32(0x00000000, base + 0x18); /* RESREQSET0 */
+ /* request response to packets of type 0x28 */
+ iowrite32(0x00000100, base + 0x1c); /* RESREQSET1 */
+ /* High-speed transmission timeout, default 0xffffffff */
+ iowrite32(0x0fffffff, base + 0x20); /* HSTTOVSET */
+ /* LP reception timeout, default 0xffffffff */
+ iowrite32(0x0fffffff, base + 0x24); /* LPRTOVSET */
+ /* Turn-around timeout, default 0xffffffff */
+ iowrite32(0x0fffffff, base + 0x28); /* TATOVSET */
+ /* Peripheral reset timeout, default 0xffffffff */
+ iowrite32(0x0fffffff, base + 0x2c); /* PRTOVSET */
+ /* Enable timeout counters */
+ iowrite32(0x00000f00, base + 0x30); /* DSICTRL */
+ /* Interrupts not used, disable all */
+ iowrite32(0, base + DSIINTE);
+ /* DSI-Tx bias on */
+ iowrite32(0x00000001, base + 0x70); /* PHYCTRL */
+ udelay(200);
+ /* Deassert resets, power on, set multiplier */
+ iowrite32(0x03070b01, base + 0x70); /* PHYCTRL */
+
+ /* setup l-bridge */
+
+ /*
+ * Enable transmission of all packets,
+ * transmit LPS after each HS packet completion
+ */
+ iowrite32(0x00000006, base + 0x8000); /* DTCTR */
+ /* VSYNC width = 2 (<< 17) */
+ iowrite32(0x00040000 | (pctype << 12) | datatype, base + 0x8020); /* VMCTR1 */
+ /*
+ * Non-burst mode with sync pulses: VSE and HSE are output,
+ * HSA period allowed, no commands in LP
+ */
+ iowrite32(0x00e00000, base + 0x8024); /* VMCTR2 */
+ /*
+ * 0x660 = 1632 bytes per line (RGB24, 544 pixels: see
+ * sh_mobile_lcdc_info.ch[0].lcd_cfg.xres), HSALEN = 1 - default
+ * (unused, since VMCTR2[HSABM] = 0)
+ */
+ iowrite32(1 | (linelength << 16), base + 0x8028); /* VMLEN1 */
+
+ msleep(5);
+
+ /* setup LCD panel */
+
+ /* cf. drivers/video/omap/lcd_mipid.c */
+ sh_mipi_dcs(ch->chan, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(120);
+ /*
+ * [7] - Page Address Mode
+ * [6] - Column Address Mode
+ * [5] - Page / Column Address Mode
+ * [4] - Display Device Line Refresh Order
+ * [3] - RGB/BGR Order
+ * [2] - Display Data Latch Data Order
+ * [1] - Flip Horizontal
+ * [0] - Flip Vertical
+ */
+ sh_mipi_dcs_param(ch->chan, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ /* cf. set_data_lines() */
+ sh_mipi_dcs_param(ch->chan, MIPI_DCS_SET_PIXEL_FORMAT,
+ pixfmt << 4);
+ sh_mipi_dcs(ch->chan, MIPI_DCS_SET_DISPLAY_ON);
+
+ return 0;
+}
+
+static int __init sh_mipi_probe(struct platform_device *pdev)
+{
+ struct sh_mipi *mipi;
+ struct sh_mipi_dsi_info *pdata = pdev->dev.platform_data;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ unsigned long rate, f_current;
+ int idx = pdev->id, ret;
+ char dsip_clk[] = "dsi.p_clk";
+
+ if (!res || idx >= ARRAY_SIZE(mipi_dsi) || !pdata)
+ return -ENODEV;
+
+ mutex_lock(&array_lock);
+ if (idx < 0)
+ for (idx = 0; idx < ARRAY_SIZE(mipi_dsi) && mipi_dsi[idx]; idx++)
+ ;
+
+ if (idx == ARRAY_SIZE(mipi_dsi)) {
+ ret = -EBUSY;
+ goto efindslot;
+ }
+
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
+ if (!mipi) {
+ ret = -ENOMEM;
+ goto ealloc;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "MIPI register region already claimed\n");
+ ret = -EBUSY;
+ goto ereqreg;
+ }
+
+ mipi->base = ioremap(res->start, resource_size(res));
+ if (!mipi->base) {
+ ret = -ENOMEM;
+ goto emap;
+ }
+
+ mipi->dsit_clk = clk_get(&pdev->dev, "dsit_clk");
+ if (IS_ERR(mipi->dsit_clk)) {
+ ret = PTR_ERR(mipi->dsit_clk);
+ goto eclktget;
+ }
+
+ f_current = clk_get_rate(mipi->dsit_clk);
+ /* 80MHz required by the datasheet */
+ rate = clk_round_rate(mipi->dsit_clk, 80000000);
+ if (rate > 0 && rate != f_current)
+ ret = clk_set_rate(mipi->dsit_clk, rate);
+ else
+ ret = rate;
+ if (ret < 0)
+ goto esettrate;
+
+ dev_dbg(&pdev->dev, "DSI-T clk %lu -> %lu\n", f_current, rate);
+
+ sprintf(dsip_clk, "dsi%1.1dp_clk", idx);
+ mipi->dsip_clk = clk_get(&pdev->dev, dsip_clk);
+ if (IS_ERR(mipi->dsip_clk)) {
+ ret = PTR_ERR(mipi->dsip_clk);
+ goto eclkpget;
+ }
+
+ f_current = clk_get_rate(mipi->dsip_clk);
+ /* Between 10 and 50MHz */
+ rate = clk_round_rate(mipi->dsip_clk, 24000000);
+ if (rate > 0 && rate != f_current)
+ ret = clk_set_rate(mipi->dsip_clk, rate);
+ else
+ ret = rate;
+ if (ret < 0)
+ goto esetprate;
+
+ dev_dbg(&pdev->dev, "DSI-P clk %lu -> %lu\n", f_current, rate);
+
+ msleep(10);
+
+ ret = clk_enable(mipi->dsit_clk);
+ if (ret < 0)
+ goto eclkton;
+
+ ret = clk_enable(mipi->dsip_clk);
+ if (ret < 0)
+ goto eclkpon;
+
+ mipi_dsi[idx] = mipi;
+
+ ret = sh_mipi_setup(mipi, pdata);
+ if (ret < 0)
+ goto emipisetup;
+
+ mutex_unlock(&array_lock);
+ platform_set_drvdata(pdev, mipi);
+
+ /* Set up LCDC callbacks */
+ pdata->lcd_chan->board_cfg.board_data = mipi;
+ pdata->lcd_chan->board_cfg.display_on = mipi_display_on;
+ pdata->lcd_chan->board_cfg.display_off = mipi_display_off;
+
+ return 0;
+
+emipisetup:
+ mipi_dsi[idx] = NULL;
+ clk_disable(mipi->dsip_clk);
+eclkpon:
+ clk_disable(mipi->dsit_clk);
+eclkton:
+esetprate:
+ clk_put(mipi->dsip_clk);
+eclkpget:
+esettrate:
+ clk_put(mipi->dsit_clk);
+eclktget:
+ iounmap(mipi->base);
+emap:
+ release_mem_region(res->start, resource_size(res));
+ereqreg:
+ kfree(mipi);
+ealloc:
+efindslot:
+ mutex_unlock(&array_lock);
+
+ return ret;
+}
+
+static int __exit sh_mipi_remove(struct platform_device *pdev)
+{
+ struct sh_mipi_dsi_info *pdata = pdev->dev.platform_data;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct sh_mipi *mipi = platform_get_drvdata(pdev);
+ int i, ret;
+
+ mutex_lock(&array_lock);
+
+ for (i = 0; i < ARRAY_SIZE(mipi_dsi) && mipi_dsi[i] != mipi; i++)
+ ;
+
+ if (i == ARRAY_SIZE(mipi_dsi)) {
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ mipi_dsi[i] = NULL;
+ }
+
+ mutex_unlock(&array_lock);
+
+ if (ret < 0)
+ return ret;
+
+ pdata->lcd_chan->board_cfg.display_on = NULL;
+ pdata->lcd_chan->board_cfg.display_off = NULL;
+ pdata->lcd_chan->board_cfg.board_data = NULL;
+
+ clk_disable(mipi->dsip_clk);
+ clk_disable(mipi->dsit_clk);
+ clk_put(mipi->dsit_clk);
+ clk_put(mipi->dsip_clk);
+ iounmap(mipi->base);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+ platform_set_drvdata(pdev, NULL);
+ kfree(mipi);
+
+ return 0;
+}
+
+static struct platform_driver sh_mipi_driver = {
+ .remove = __exit_p(sh_mipi_remove),
+ .shutdown = sh_mipi_shutdown,
+ .driver = {
+ .name = "sh-mipi-dsi",
+ },
+};
+
+static int __init sh_mipi_init(void)
+{
+ return platform_driver_probe(&sh_mipi_driver, sh_mipi_probe);
+}
+module_init(sh_mipi_init);
+
+static void __exit sh_mipi_exit(void)
+{
+ platform_driver_unregister(&sh_mipi_driver);
+}
+module_exit(sh_mipi_exit);
+
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_DESCRIPTION("SuperH / ARM-shmobile MIPI DSI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
new file mode 100644
index 00000000000..2fde08cc66b
--- /dev/null
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -0,0 +1,1028 @@
+/*
+ * SH-Mobile High-Definition Multimedia Interface (HDMI) driver
+ * for SLISHDMI13T and SLIPHDMIT IP cores
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <video/sh_mobile_hdmi.h>
+#include <video/sh_mobile_lcdc.h>
+
+#define HDMI_SYSTEM_CTRL 0x00 /* System control */
+#define HDMI_L_R_DATA_SWAP_CTRL_RPKT 0x01 /* L/R data swap control,
+ bits 19..16 of 20-bit N for Audio Clock Regeneration packet */
+#define HDMI_20_BIT_N_FOR_AUDIO_RPKT_15_8 0x02 /* bits 15..8 of 20-bit N for Audio Clock Regeneration packet */
+#define HDMI_20_BIT_N_FOR_AUDIO_RPKT_7_0 0x03 /* bits 7..0 of 20-bit N for Audio Clock Regeneration packet */
+#define HDMI_SPDIF_AUDIO_SAMP_FREQ_CTS 0x04 /* SPDIF audio sampling frequency,
+ bits 19..16 of Internal CTS */
+#define HDMI_INTERNAL_CTS_15_8 0x05 /* bits 15..8 of Internal CTS */
+#define HDMI_INTERNAL_CTS_7_0 0x06 /* bits 7..0 of Internal CTS */
+#define HDMI_EXTERNAL_CTS_19_16 0x07 /* External CTS */
+#define HDMI_EXTERNAL_CTS_15_8 0x08 /* External CTS */
+#define HDMI_EXTERNAL_CTS_7_0 0x09 /* External CTS */
+#define HDMI_AUDIO_SETTING_1 0x0A /* Audio setting.1 */
+#define HDMI_AUDIO_SETTING_2 0x0B /* Audio setting.2 */
+#define HDMI_I2S_AUDIO_SET 0x0C /* I2S audio setting */
+#define HDMI_DSD_AUDIO_SET 0x0D /* DSD audio setting */
+#define HDMI_DEBUG_MONITOR_1 0x0E /* Debug monitor.1 */
+#define HDMI_DEBUG_MONITOR_2 0x0F /* Debug monitor.2 */
+#define HDMI_I2S_INPUT_PIN_SWAP 0x10 /* I2S input pin swap */
+#define HDMI_AUDIO_STATUS_BITS_SETTING_1 0x11 /* Audio status bits setting.1 */
+#define HDMI_AUDIO_STATUS_BITS_SETTING_2 0x12 /* Audio status bits setting.2 */
+#define HDMI_CATEGORY_CODE 0x13 /* Category code */
+#define HDMI_SOURCE_NUM_AUDIO_WORD_LEN 0x14 /* Source number/Audio word length */
+#define HDMI_AUDIO_VIDEO_SETTING_1 0x15 /* Audio/Video setting.1 */
+#define HDMI_VIDEO_SETTING_1 0x16 /* Video setting.1 */
+#define HDMI_DEEP_COLOR_MODES 0x17 /* Deep Color Modes */
+
+/* 12 16- and 10-bit Color space conversion parameters: 0x18..0x2f */
+#define HDMI_COLOR_SPACE_CONVERSION_PARAMETERS 0x18
+
+#define HDMI_EXTERNAL_VIDEO_PARAM_SETTINGS 0x30 /* External video parameter settings */
+#define HDMI_EXTERNAL_H_TOTAL_7_0 0x31 /* External horizontal total (LSB) */
+#define HDMI_EXTERNAL_H_TOTAL_11_8 0x32 /* External horizontal total (MSB) */
+#define HDMI_EXTERNAL_H_BLANK_7_0 0x33 /* External horizontal blank (LSB) */
+#define HDMI_EXTERNAL_H_BLANK_9_8 0x34 /* External horizontal blank (MSB) */
+#define HDMI_EXTERNAL_H_DELAY_7_0 0x35 /* External horizontal delay (LSB) */
+#define HDMI_EXTERNAL_H_DELAY_9_8 0x36 /* External horizontal delay (MSB) */
+#define HDMI_EXTERNAL_H_DURATION_7_0 0x37 /* External horizontal duration (LSB) */
+#define HDMI_EXTERNAL_H_DURATION_9_8 0x38 /* External horizontal duration (MSB) */
+#define HDMI_EXTERNAL_V_TOTAL_7_0 0x39 /* External vertical total (LSB) */
+#define HDMI_EXTERNAL_V_TOTAL_9_8 0x3A /* External vertical total (MSB) */
+#define HDMI_AUDIO_VIDEO_SETTING_2 0x3B /* Audio/Video setting.2 */
+#define HDMI_EXTERNAL_V_BLANK 0x3D /* External vertical blank */
+#define HDMI_EXTERNAL_V_DELAY 0x3E /* External vertical delay */
+#define HDMI_EXTERNAL_V_DURATION 0x3F /* External vertical duration */
+#define HDMI_CTRL_PKT_MANUAL_SEND_CONTROL 0x40 /* Control packet manual send control */
+#define HDMI_CTRL_PKT_AUTO_SEND 0x41 /* Control packet auto send with VSYNC control */
+#define HDMI_AUTO_CHECKSUM_OPTION 0x42 /* Auto checksum option */
+#define HDMI_VIDEO_SETTING_2 0x45 /* Video setting.2 */
+#define HDMI_OUTPUT_OPTION 0x46 /* Output option */
+#define HDMI_SLIPHDMIT_PARAM_OPTION 0x51 /* SLIPHDMIT parameter option */
+#define HDMI_HSYNC_PMENT_AT_EMB_7_0 0x52 /* HSYNC placement at embedded sync (LSB) */
+#define HDMI_HSYNC_PMENT_AT_EMB_15_8 0x53 /* HSYNC placement at embedded sync (MSB) */
+#define HDMI_VSYNC_PMENT_AT_EMB_7_0 0x54 /* VSYNC placement at embedded sync (LSB) */
+#define HDMI_VSYNC_PMENT_AT_EMB_14_8 0x55 /* VSYNC placement at embedded sync (MSB) */
+#define HDMI_SLIPHDMIT_PARAM_SETTINGS_1 0x56 /* SLIPHDMIT parameter settings.1 */
+#define HDMI_SLIPHDMIT_PARAM_SETTINGS_2 0x57 /* SLIPHDMIT parameter settings.2 */
+#define HDMI_SLIPHDMIT_PARAM_SETTINGS_3 0x58 /* SLIPHDMIT parameter settings.3 */
+#define HDMI_SLIPHDMIT_PARAM_SETTINGS_5 0x59 /* SLIPHDMIT parameter settings.5 */
+#define HDMI_SLIPHDMIT_PARAM_SETTINGS_6 0x5A /* SLIPHDMIT parameter settings.6 */
+#define HDMI_SLIPHDMIT_PARAM_SETTINGS_7 0x5B /* SLIPHDMIT parameter settings.7 */
+#define HDMI_SLIPHDMIT_PARAM_SETTINGS_8 0x5C /* SLIPHDMIT parameter settings.8 */
+#define HDMI_SLIPHDMIT_PARAM_SETTINGS_9 0x5D /* SLIPHDMIT parameter settings.9 */
+#define HDMI_SLIPHDMIT_PARAM_SETTINGS_10 0x5E /* SLIPHDMIT parameter settings.10 */
+#define HDMI_CTRL_PKT_BUF_INDEX 0x5F /* Control packet buffer index */
+#define HDMI_CTRL_PKT_BUF_ACCESS_HB0 0x60 /* Control packet data buffer access window - HB0 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_HB1 0x61 /* Control packet data buffer access window - HB1 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_HB2 0x62 /* Control packet data buffer access window - HB2 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB0 0x63 /* Control packet data buffer access window - PB0 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB1 0x64 /* Control packet data buffer access window - PB1 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB2 0x65 /* Control packet data buffer access window - PB2 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB3 0x66 /* Control packet data buffer access window - PB3 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB4 0x67 /* Control packet data buffer access window - PB4 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB5 0x68 /* Control packet data buffer access window - PB5 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB6 0x69 /* Control packet data buffer access window - PB6 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB7 0x6A /* Control packet data buffer access window - PB7 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB8 0x6B /* Control packet data buffer access window - PB8 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB9 0x6C /* Control packet data buffer access window - PB9 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB10 0x6D /* Control packet data buffer access window - PB10 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB11 0x6E /* Control packet data buffer access window - PB11 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB12 0x6F /* Control packet data buffer access window - PB12 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB13 0x70 /* Control packet data buffer access window - PB13 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB14 0x71 /* Control packet data buffer access window - PB14 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB15 0x72 /* Control packet data buffer access window - PB15 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB16 0x73 /* Control packet data buffer access window - PB16 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB17 0x74 /* Control packet data buffer access window - PB17 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB18 0x75 /* Control packet data buffer access window - PB18 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB19 0x76 /* Control packet data buffer access window - PB19 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB20 0x77 /* Control packet data buffer access window - PB20 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB21 0x78 /* Control packet data buffer access window - PB21 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB22 0x79 /* Control packet data buffer access window - PB22 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB23 0x7A /* Control packet data buffer access window - PB23 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB24 0x7B /* Control packet data buffer access window - PB24 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB25 0x7C /* Control packet data buffer access window - PB25 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB26 0x7D /* Control packet data buffer access window - PB26 */
+#define HDMI_CTRL_PKT_BUF_ACCESS_PB27 0x7E /* Control packet data buffer access window - PB27 */
+#define HDMI_EDID_KSV_FIFO_ACCESS_WINDOW 0x80 /* EDID/KSV FIFO access window */
+#define HDMI_DDC_BUS_ACCESS_FREQ_CTRL_7_0 0x81 /* DDC bus access frequency control (LSB) */
+#define HDMI_DDC_BUS_ACCESS_FREQ_CTRL_15_8 0x82 /* DDC bus access frequency control (MSB) */
+#define HDMI_INTERRUPT_MASK_1 0x92 /* Interrupt mask.1 */
+#define HDMI_INTERRUPT_MASK_2 0x93 /* Interrupt mask.2 */
+#define HDMI_INTERRUPT_STATUS_1 0x94 /* Interrupt status.1 */
+#define HDMI_INTERRUPT_STATUS_2 0x95 /* Interrupt status.2 */
+#define HDMI_INTERRUPT_MASK_3 0x96 /* Interrupt mask.3 */
+#define HDMI_INTERRUPT_MASK_4 0x97 /* Interrupt mask.4 */
+#define HDMI_INTERRUPT_STATUS_3 0x98 /* Interrupt status.3 */
+#define HDMI_INTERRUPT_STATUS_4 0x99 /* Interrupt status.4 */
+#define HDMI_SOFTWARE_HDCP_CONTROL_1 0x9A /* Software HDCP control.1 */
+#define HDMI_FRAME_COUNTER 0x9C /* Frame counter */
+#define HDMI_FRAME_COUNTER_FOR_RI_CHECK 0x9D /* Frame counter for Ri check */
+#define HDMI_HDCP_CONTROL 0xAF /* HDCP control */
+#define HDMI_RI_FRAME_COUNT_REGISTER 0xB2 /* Ri frame count register */
+#define HDMI_DDC_BUS_CONTROL 0xB7 /* DDC bus control */
+#define HDMI_HDCP_STATUS 0xB8 /* HDCP status */
+#define HDMI_SHA0 0xB9 /* sha0 */
+#define HDMI_SHA1 0xBA /* sha1 */
+#define HDMI_SHA2 0xBB /* sha2 */
+#define HDMI_SHA3 0xBC /* sha3 */
+#define HDMI_SHA4 0xBD /* sha4 */
+#define HDMI_BCAPS_READ 0xBE /* BCAPS read / debug */
+#define HDMI_AKSV_BKSV_7_0_MONITOR 0xBF /* AKSV/BKSV[7:0] monitor */
+#define HDMI_AKSV_BKSV_15_8_MONITOR 0xC0 /* AKSV/BKSV[15:8] monitor */
+#define HDMI_AKSV_BKSV_23_16_MONITOR 0xC1 /* AKSV/BKSV[23:16] monitor */
+#define HDMI_AKSV_BKSV_31_24_MONITOR 0xC2 /* AKSV/BKSV[31:24] monitor */
+#define HDMI_AKSV_BKSV_39_32_MONITOR 0xC3 /* AKSV/BKSV[39:32] monitor */
+#define HDMI_EDID_SEGMENT_POINTER 0xC4 /* EDID segment pointer */
+#define HDMI_EDID_WORD_ADDRESS 0xC5 /* EDID word address */
+#define HDMI_EDID_DATA_FIFO_ADDRESS 0xC6 /* EDID data FIFO address */
+#define HDMI_NUM_OF_HDMI_DEVICES 0xC7 /* Number of HDMI devices */
+#define HDMI_HDCP_ERROR_CODE 0xC8 /* HDCP error code */
+#define HDMI_100MS_TIMER_SET 0xC9 /* 100ms timer setting */
+#define HDMI_5SEC_TIMER_SET 0xCA /* 5sec timer setting */
+#define HDMI_RI_READ_COUNT 0xCB /* Ri read count */
+#define HDMI_AN_SEED 0xCC /* An seed */
+#define HDMI_MAX_NUM_OF_RCIVRS_ALLOWED 0xCD /* Maximum number of receivers allowed */
+#define HDMI_HDCP_MEMORY_ACCESS_CONTROL_1 0xCE /* HDCP memory access control.1 */
+#define HDMI_HDCP_MEMORY_ACCESS_CONTROL_2 0xCF /* HDCP memory access control.2 */
+#define HDMI_HDCP_CONTROL_2 0xD0 /* HDCP Control 2 */
+#define HDMI_HDCP_KEY_MEMORY_CONTROL 0xD2 /* HDCP Key Memory Control */
+#define HDMI_COLOR_SPACE_CONV_CONFIG_1 0xD3 /* Color space conversion configuration.1 */
+#define HDMI_VIDEO_SETTING_3 0xD4 /* Video setting.3 */
+#define HDMI_RI_7_0 0xD5 /* Ri[7:0] */
+#define HDMI_RI_15_8 0xD6 /* Ri[15:8] */
+#define HDMI_PJ 0xD7 /* Pj */
+#define HDMI_SHA_RD 0xD8 /* sha_rd */
+#define HDMI_RI_7_0_SAVED 0xD9 /* Ri[7:0] saved */
+#define HDMI_RI_15_8_SAVED 0xDA /* Ri[15:8] saved */
+#define HDMI_PJ_SAVED 0xDB /* Pj saved */
+#define HDMI_NUM_OF_DEVICES 0xDC /* Number of devices */
+#define HDMI_HOT_PLUG_MSENS_STATUS 0xDF /* Hot plug/MSENS status */
+#define HDMI_BCAPS_WRITE 0xE0 /* bcaps */
+#define HDMI_BSTAT_7_0 0xE1 /* bstat[7:0] */
+#define HDMI_BSTAT_15_8 0xE2 /* bstat[15:8] */
+#define HDMI_BKSV_7_0 0xE3 /* bksv[7:0] */
+#define HDMI_BKSV_15_8 0xE4 /* bksv[15:8] */
+#define HDMI_BKSV_23_16 0xE5 /* bksv[23:16] */
+#define HDMI_BKSV_31_24 0xE6 /* bksv[31:24] */
+#define HDMI_BKSV_39_32 0xE7 /* bksv[39:32] */
+#define HDMI_AN_7_0 0xE8 /* An[7:0] */
+#define HDMI_AN_15_8 0xE9 /* An [15:8] */
+#define HDMI_AN_23_16 0xEA /* An [23:16] */
+#define HDMI_AN_31_24 0xEB /* An [31:24] */
+#define HDMI_AN_39_32 0xEC /* An [39:32] */
+#define HDMI_AN_47_40 0xED /* An [47:40] */
+#define HDMI_AN_55_48 0xEE /* An [55:48] */
+#define HDMI_AN_63_56 0xEF /* An [63:56] */
+#define HDMI_PRODUCT_ID 0xF0 /* Product ID */
+#define HDMI_REVISION_ID 0xF1 /* Revision ID */
+#define HDMI_TEST_MODE 0xFE /* Test mode */
+
+enum hotplug_state {
+ HDMI_HOTPLUG_DISCONNECTED,
+ HDMI_HOTPLUG_CONNECTED,
+ HDMI_HOTPLUG_EDID_DONE,
+};
+
+struct sh_hdmi {
+ void __iomem *base;
+ enum hotplug_state hp_state;
+ struct clk *hdmi_clk;
+ struct device *dev;
+ struct fb_info *info;
+ struct delayed_work edid_work;
+ struct fb_var_screeninfo var;
+};
+
+static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
+{
+ iowrite8(data, hdmi->base + reg);
+}
+
+static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg)
+{
+ return ioread8(hdmi->base + reg);
+}
+
+/* External video parameter settings */
+static void hdmi_external_video_param(struct sh_hdmi *hdmi)
+{
+ struct fb_var_screeninfo *var = &hdmi->var;
+ u16 htotal, hblank, hdelay, vtotal, vblank, vdelay, voffset;
+ u8 sync = 0;
+
+ htotal = var->xres + var->right_margin + var->left_margin + var->hsync_len;
+
+ hdelay = var->hsync_len + var->left_margin;
+ hblank = var->right_margin + hdelay;
+
+ /*
+ * Vertical timing looks a bit different in Figure 18,
+ * but let's try the same first by setting offset = 0
+ */
+ vtotal = var->yres + var->upper_margin + var->lower_margin + var->vsync_len;
+
+ vdelay = var->vsync_len + var->upper_margin;
+ vblank = var->lower_margin + vdelay;
+ voffset = min(var->upper_margin / 2, 6U);
+
+ /*
+ * [3]: VSYNC polarity: Positive
+ * [2]: HSYNC polarity: Positive
+ * [1]: Interlace/Progressive: Progressive
+ * [0]: External video settings enable: used.
+ */
+ if (var->sync & FB_SYNC_HOR_HIGH_ACT)
+ sync |= 4;
+ if (var->sync & FB_SYNC_VERT_HIGH_ACT)
+ sync |= 8;
+
+ pr_debug("H: %u, %u, %u, %u; V: %u, %u, %u, %u; sync 0x%x\n",
+ htotal, hblank, hdelay, var->hsync_len,
+ vtotal, vblank, vdelay, var->vsync_len, sync);
+
+ hdmi_write(hdmi, sync | (voffset << 4), HDMI_EXTERNAL_VIDEO_PARAM_SETTINGS);
+
+ hdmi_write(hdmi, htotal, HDMI_EXTERNAL_H_TOTAL_7_0);
+ hdmi_write(hdmi, htotal >> 8, HDMI_EXTERNAL_H_TOTAL_11_8);
+
+ hdmi_write(hdmi, hblank, HDMI_EXTERNAL_H_BLANK_7_0);
+ hdmi_write(hdmi, hblank >> 8, HDMI_EXTERNAL_H_BLANK_9_8);
+
+ hdmi_write(hdmi, hdelay, HDMI_EXTERNAL_H_DELAY_7_0);
+ hdmi_write(hdmi, hdelay >> 8, HDMI_EXTERNAL_H_DELAY_9_8);
+
+ hdmi_write(hdmi, var->hsync_len, HDMI_EXTERNAL_H_DURATION_7_0);
+ hdmi_write(hdmi, var->hsync_len >> 8, HDMI_EXTERNAL_H_DURATION_9_8);
+
+ hdmi_write(hdmi, vtotal, HDMI_EXTERNAL_V_TOTAL_7_0);
+ hdmi_write(hdmi, vtotal >> 8, HDMI_EXTERNAL_V_TOTAL_9_8);
+
+ hdmi_write(hdmi, vblank, HDMI_EXTERNAL_V_BLANK);
+
+ hdmi_write(hdmi, vdelay, HDMI_EXTERNAL_V_DELAY);
+
+ hdmi_write(hdmi, var->vsync_len, HDMI_EXTERNAL_V_DURATION);
+
+ /* Set bit 0 of HDMI_EXTERNAL_VIDEO_PARAM_SETTINGS here for manual mode */
+}
+
+/**
+ * sh_hdmi_video_config()
+ */
+static void sh_hdmi_video_config(struct sh_hdmi *hdmi)
+{
+ /*
+ * [7:4]: Audio sampling frequency: 48kHz
+ * [3:1]: Input video format: RGB and YCbCr 4:4:4 (Y on Green)
+ * [0]: Internal/External DE select: internal
+ */
+ hdmi_write(hdmi, 0x20, HDMI_AUDIO_VIDEO_SETTING_1);
+
+ /*
+ * [7:6]: Video output format: RGB 4:4:4
+ * [5:4]: Input video data width: 8 bit
+ * [3:1]: EAV/SAV location: channel 1
+ * [0]: Video input color space: RGB
+ */
+ hdmi_write(hdmi, 0x34, HDMI_VIDEO_SETTING_1);
+
+ /*
+ * [7:6]: Together with bit [6] of HDMI_AUDIO_VIDEO_SETTING_2, which is
+ * left at 0 by default, this configures 24bpp and sets the Color Depth
+ * (CD) field in the General Control Packet
+ */
+ hdmi_write(hdmi, 0x20, HDMI_DEEP_COLOR_MODES);
+}
+
+/**
+ * sh_hdmi_audio_config()
+ */
+static void sh_hdmi_audio_config(struct sh_hdmi *hdmi)
+{
+ /*
+ * [7:4] L/R data swap control
+ * [3:0] appropriate N[19:16]
+ */
+ hdmi_write(hdmi, 0x00, HDMI_L_R_DATA_SWAP_CTRL_RPKT);
+ /* appropriate N[15:8] */
+ hdmi_write(hdmi, 0x18, HDMI_20_BIT_N_FOR_AUDIO_RPKT_15_8);
+ /* appropriate N[7:0] */
+ hdmi_write(hdmi, 0x00, HDMI_20_BIT_N_FOR_AUDIO_RPKT_7_0);
+
+ /* [7:4] 48 kHz SPDIF not used */
+ hdmi_write(hdmi, 0x20, HDMI_SPDIF_AUDIO_SAMP_FREQ_CTS);
+
+ /*
+ * [6:5] set required down sampling rate if required
+ * [4:3] set required audio source
+ */
+ hdmi_write(hdmi, 0x00, HDMI_AUDIO_SETTING_1);
+
+ /* [3:0] set sending channel number for channel status */
+ hdmi_write(hdmi, 0x40, HDMI_AUDIO_SETTING_2);
+
+ /*
+ * [5:2] set valid I2S source input pin
+ * [1:0] set input I2S source mode
+ */
+ hdmi_write(hdmi, 0x04, HDMI_I2S_AUDIO_SET);
+
+ /* [7:4] set valid DSD source input pin */
+ hdmi_write(hdmi, 0x00, HDMI_DSD_AUDIO_SET);
+
+ /* [7:0] set appropriate I2S input pin swap settings if required */
+ hdmi_write(hdmi, 0x00, HDMI_I2S_INPUT_PIN_SWAP);
+
+ /*
+ * [7] set validity bit for channel status
+ * [3:0] set original sample frequency for channel status
+ */
+ hdmi_write(hdmi, 0x00, HDMI_AUDIO_STATUS_BITS_SETTING_1);
+
+ /*
+ * [7] set value for channel status
+ * [6] set value for channel status
+ * [5] set copyright bit for channel status
+ * [4:2] set additional information for channel status
+ * [1:0] set clock accuracy for channel status
+ */
+ hdmi_write(hdmi, 0x00, HDMI_AUDIO_STATUS_BITS_SETTING_2);
+
+ /* [7:0] set category code for channel status */
+ hdmi_write(hdmi, 0x00, HDMI_CATEGORY_CODE);
+
+ /*
+ * [7:4] set source number for channel status
+ * [3:0] set word length for channel status
+ */
+ hdmi_write(hdmi, 0x00, HDMI_SOURCE_NUM_AUDIO_WORD_LEN);
+
+ /* [7:4] set sample frequency for channel status */
+ hdmi_write(hdmi, 0x20, HDMI_AUDIO_VIDEO_SETTING_1);
+}
+
+/**
+ * sh_hdmi_phy_config()
+ */
+static void sh_hdmi_phy_config(struct sh_hdmi *hdmi)
+{
+ /* 720p, 8bit, 74.25MHz. Might need to be adjusted for other formats */
+ hdmi_write(hdmi, 0x19, HDMI_SLIPHDMIT_PARAM_SETTINGS_1);
+ hdmi_write(hdmi, 0x00, HDMI_SLIPHDMIT_PARAM_SETTINGS_2);
+ hdmi_write(hdmi, 0x00, HDMI_SLIPHDMIT_PARAM_SETTINGS_3);
+ /* PLLA_CONFIG[7:0]: VCO gain, VCO offset, LPF resistance[0] */
+ hdmi_write(hdmi, 0x44, HDMI_SLIPHDMIT_PARAM_SETTINGS_5);
+ hdmi_write(hdmi, 0x32, HDMI_SLIPHDMIT_PARAM_SETTINGS_6);
+ hdmi_write(hdmi, 0x4A, HDMI_SLIPHDMIT_PARAM_SETTINGS_7);
+ hdmi_write(hdmi, 0x0E, HDMI_SLIPHDMIT_PARAM_SETTINGS_8);
+ hdmi_write(hdmi, 0x25, HDMI_SLIPHDMIT_PARAM_SETTINGS_9);
+ hdmi_write(hdmi, 0x04, HDMI_SLIPHDMIT_PARAM_SETTINGS_10);
+}
+
+/**
+ * sh_hdmi_avi_infoframe_setup() - Auxiliary Video Information InfoFrame CONTROL PACKET
+ */
+static void sh_hdmi_avi_infoframe_setup(struct sh_hdmi *hdmi)
+{
+ /* AVI InfoFrame */
+ hdmi_write(hdmi, 0x06, HDMI_CTRL_PKT_BUF_INDEX);
+
+ /* Packet Type = 0x82 */
+ hdmi_write(hdmi, 0x82, HDMI_CTRL_PKT_BUF_ACCESS_HB0);
+
+ /* Version = 0x02 */
+ hdmi_write(hdmi, 0x02, HDMI_CTRL_PKT_BUF_ACCESS_HB1);
+
+ /* Length = 13 (0x0D) */
+ hdmi_write(hdmi, 0x0D, HDMI_CTRL_PKT_BUF_ACCESS_HB2);
+
+ /* N. A. Checksum */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0);
+
+ /*
+ * Y = RGB
+ * A0 = No Data
+ * B = Bar Data not valid
+ * S = No Data
+ */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB1);
+
+ /*
+ * C = No Data
+ * M = 16:9 Picture Aspect Ratio
+ * R = Same as picture aspect ratio
+ */
+ hdmi_write(hdmi, 0x28, HDMI_CTRL_PKT_BUF_ACCESS_PB2);
+
+ /*
+ * ITC = No Data
+ * EC = xvYCC601
+ * Q = Default (depends on video format)
+ * SC = No Known non_uniform Scaling
+ */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB3);
+
+ /*
+ * VIC = 1280 x 720p: ignored if external config is used
+ * Send 2 for 720 x 480p, 16 for 1080p
+ */
+ hdmi_write(hdmi, 4, HDMI_CTRL_PKT_BUF_ACCESS_PB4);
+
+ /* PR = No Repetition */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB5);
+
+ /* Line Number of End of Top Bar (lower 8 bits) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB6);
+
+ /* Line Number of End of Top Bar (upper 8 bits) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB7);
+
+ /* Line Number of Start of Bottom Bar (lower 8 bits) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB8);
+
+ /* Line Number of Start of Bottom Bar (upper 8 bits) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB9);
+
+ /* Pixel Number of End of Left Bar (lower 8 bits) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB10);
+
+ /* Pixel Number of End of Left Bar (upper 8 bits) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB11);
+
+ /* Pixel Number of Start of Right Bar (lower 8 bits) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB12);
+
+ /* Pixel Number of Start of Right Bar (upper 8 bits) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB13);
+}
+
+/**
+ * sh_hdmi_audio_infoframe_setup() - Audio InfoFrame of CONTROL PACKET
+ */
+static void sh_hdmi_audio_infoframe_setup(struct sh_hdmi *hdmi)
+{
+ /* Audio InfoFrame */
+ hdmi_write(hdmi, 0x08, HDMI_CTRL_PKT_BUF_INDEX);
+
+ /* Packet Type = 0x84 */
+ hdmi_write(hdmi, 0x84, HDMI_CTRL_PKT_BUF_ACCESS_HB0);
+
+ /* Version Number = 0x01 */
+ hdmi_write(hdmi, 0x01, HDMI_CTRL_PKT_BUF_ACCESS_HB1);
+
+ /* 0 Length = 10 (0x0A) */
+ hdmi_write(hdmi, 0x0A, HDMI_CTRL_PKT_BUF_ACCESS_HB2);
+
+ /* n. a. Checksum */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0);
+
+ /* Audio Channel Count = Refer to Stream Header */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB1);
+
+ /* Refer to Stream Header */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB2);
+
+ /* Format depends on coding type (i.e. CT0...CT3) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB3);
+
+ /* Speaker Channel Allocation = Front Right + Front Left */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB4);
+
+ /* Level Shift Value = 0 dB, Down - mix is permitted or no information */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB5);
+
+ /* Reserved (0) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB6);
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB7);
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB8);
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB9);
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB10);
+}
+
+/**
+ * sh_hdmi_gamut_metadata_setup() - Gamut Metadata Packet of CONTROL PACKET
+ */
+static void sh_hdmi_gamut_metadata_setup(struct sh_hdmi *hdmi)
+{
+ int i;
+
+ /* Gamut Metadata Packet */
+ hdmi_write(hdmi, 0x04, HDMI_CTRL_PKT_BUF_INDEX);
+
+ /* Packet Type = 0x0A */
+ hdmi_write(hdmi, 0x0A, HDMI_CTRL_PKT_BUF_ACCESS_HB0);
+ /* Gamut Packet is not used, so default value */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB1);
+ /* Gamut Packet is not used, so default value */
+ hdmi_write(hdmi, 0x10, HDMI_CTRL_PKT_BUF_ACCESS_HB2);
+
+ /* GBD bytes 0 through 27 */
+ for (i = 0; i <= 27; i++)
+ /* HDMI_CTRL_PKT_BUF_ACCESS_PB0_63H - PB27_7EH */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0 + i);
+}
+
+/**
+ * sh_hdmi_acp_setup() - Audio Content Protection Packet (ACP)
+ */
+static void sh_hdmi_acp_setup(struct sh_hdmi *hdmi)
+{
+ int i;
+
+ /* Audio Content Protection Packet (ACP) */
+ hdmi_write(hdmi, 0x01, HDMI_CTRL_PKT_BUF_INDEX);
+
+ /* Packet Type = 0x04 */
+ hdmi_write(hdmi, 0x04, HDMI_CTRL_PKT_BUF_ACCESS_HB0);
+ /* ACP_Type */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB1);
+ /* Reserved (0) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB2);
+
+ /* GBD bytes 0 through 27 */
+ for (i = 0; i <= 27; i++)
+ /* HDMI_CTRL_PKT_BUF_ACCESS_PB0 - PB27 */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0 + i);
+}
+
+/**
+ * sh_hdmi_isrc1_setup() - ISRC1 Packet
+ */
+static void sh_hdmi_isrc1_setup(struct sh_hdmi *hdmi)
+{
+ int i;
+
+ /* ISRC1 Packet */
+ hdmi_write(hdmi, 0x02, HDMI_CTRL_PKT_BUF_INDEX);
+
+ /* Packet Type = 0x05 */
+ hdmi_write(hdmi, 0x05, HDMI_CTRL_PKT_BUF_ACCESS_HB0);
+ /* ISRC_Cont, ISRC_Valid, Reserved (0), ISRC_Status */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB1);
+ /* Reserved (0) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB2);
+
+ /* PB0 UPC_EAN_ISRC_0-15 */
+ /* Bytes PB16-PB27 shall be set to a value of 0. */
+ for (i = 0; i <= 27; i++)
+ /* HDMI_CTRL_PKT_BUF_ACCESS_PB0 - PB27 */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0 + i);
+}
+
+/**
+ * sh_hdmi_isrc2_setup() - ISRC2 Packet
+ */
+static void sh_hdmi_isrc2_setup(struct sh_hdmi *hdmi)
+{
+ int i;
+
+ /* ISRC2 Packet */
+ hdmi_write(hdmi, 0x03, HDMI_CTRL_PKT_BUF_INDEX);
+
+ /* HB0 Packet Type = 0x06 */
+ hdmi_write(hdmi, 0x06, HDMI_CTRL_PKT_BUF_ACCESS_HB0);
+ /* Reserved (0) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB1);
+ /* Reserved (0) */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB2);
+
+ /* PB0 UPC_EAN_ISRC_16-31 */
+ /* Bytes PB16-PB27 shall be set to a value of 0. */
+ for (i = 0; i <= 27; i++)
+ /* HDMI_CTRL_PKT_BUF_ACCESS_PB0 - PB27 */
+ hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0 + i);
+}
+
+/**
+ * sh_hdmi_configure() - Initialise HDMI for output
+ */
+static void sh_hdmi_configure(struct sh_hdmi *hdmi)
+{
+ /* Configure video format */
+ sh_hdmi_video_config(hdmi);
+
+ /* Configure audio format */
+ sh_hdmi_audio_config(hdmi);
+
+ /* Configure PHY */
+ sh_hdmi_phy_config(hdmi);
+
+ /* Auxiliary Video Information (AVI) InfoFrame */
+ sh_hdmi_avi_infoframe_setup(hdmi);
+
+ /* Audio InfoFrame */
+ sh_hdmi_audio_infoframe_setup(hdmi);
+
+ /* Gamut Metadata packet */
+ sh_hdmi_gamut_metadata_setup(hdmi);
+
+ /* Audio Content Protection (ACP) Packet */
+ sh_hdmi_acp_setup(hdmi);
+
+ /* ISRC1 Packet */
+ sh_hdmi_isrc1_setup(hdmi);
+
+ /* ISRC2 Packet */
+ sh_hdmi_isrc2_setup(hdmi);
+
+ /*
+ * Control packet auto send with VSYNC control: auto send
+ * General control, Gamut metadata, ISRC, and ACP packets
+ */
+ hdmi_write(hdmi, 0x8E, HDMI_CTRL_PKT_AUTO_SEND);
+
+ /* FIXME */
+ msleep(10);
+
+ /* PS mode b->d, reset PLLA and PLLB */
+ hdmi_write(hdmi, 0x4C, HDMI_SYSTEM_CTRL);
+
+ udelay(10);
+
+ hdmi_write(hdmi, 0x40, HDMI_SYSTEM_CTRL);
+}
+
+static void sh_hdmi_read_edid(struct sh_hdmi *hdmi)
+{
+ struct fb_var_screeninfo *var = &hdmi->var;
+ struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data;
+ struct fb_videomode *lcd_cfg = &pdata->lcd_chan->lcd_cfg;
+ unsigned long height = var->height, width = var->width;
+ int i;
+ u8 edid[128];
+
+ /* Read EDID */
+ pr_debug("Read back EDID code:");
+ for (i = 0; i < 128; i++) {
+ edid[i] = hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW);
+#ifdef DEBUG
+ if ((i % 16) == 0) {
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG "%02X | %02X", i, edid[i]);
+ } else {
+ printk(KERN_CONT " %02X", edid[i]);
+ }
+#endif
+ }
+#ifdef DEBUG
+ printk(KERN_CONT "\n");
+#endif
+ fb_parse_edid(edid, var);
+ pr_debug("%u-%u-%u-%u x %u-%u-%u-%u @ %lu kHz monitor detected\n",
+ var->left_margin, var->xres, var->right_margin, var->hsync_len,
+ var->upper_margin, var->yres, var->lower_margin, var->vsync_len,
+ PICOS2KHZ(var->pixclock));
+
+ /* FIXME: Use user-provided configuration instead of EDID */
+ var->width = width;
+ var->xres = lcd_cfg->xres;
+ var->xres_virtual = lcd_cfg->xres;
+ var->left_margin = lcd_cfg->left_margin;
+ var->right_margin = lcd_cfg->right_margin;
+ var->hsync_len = lcd_cfg->hsync_len;
+ var->height = height;
+ var->yres = lcd_cfg->yres;
+ var->yres_virtual = lcd_cfg->yres * 2;
+ var->upper_margin = lcd_cfg->upper_margin;
+ var->lower_margin = lcd_cfg->lower_margin;
+ var->vsync_len = lcd_cfg->vsync_len;
+ var->sync = lcd_cfg->sync;
+ var->pixclock = lcd_cfg->pixclock;
+
+ hdmi_external_video_param(hdmi);
+}
+
+static irqreturn_t sh_hdmi_hotplug(int irq, void *dev_id)
+{
+ struct sh_hdmi *hdmi = dev_id;
+ u8 status1, status2, mask1, mask2;
+
+ /* mode_b and PLLA and PLLB reset */
+ hdmi_write(hdmi, 0x2C, HDMI_SYSTEM_CTRL);
+
+ /* How long shall reset be held? */
+ udelay(10);
+
+ /* mode_b and PLLA and PLLB reset release */
+ hdmi_write(hdmi, 0x20, HDMI_SYSTEM_CTRL);
+
+ status1 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_1);
+ status2 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_2);
+
+ mask1 = hdmi_read(hdmi, HDMI_INTERRUPT_MASK_1);
+ mask2 = hdmi_read(hdmi, HDMI_INTERRUPT_MASK_2);
+
+ /* Correct would be to ack only set bits, but the datasheet requires 0xff */
+ hdmi_write(hdmi, 0xFF, HDMI_INTERRUPT_STATUS_1);
+ hdmi_write(hdmi, 0xFF, HDMI_INTERRUPT_STATUS_2);
+
+ if (printk_ratelimit())
+ pr_debug("IRQ #%d: Status #1: 0x%x & 0x%x, #2: 0x%x & 0x%x\n",
+ irq, status1, mask1, status2, mask2);
+
+ if (!((status1 & mask1) | (status2 & mask2))) {
+ return IRQ_NONE;
+ } else if (status1 & 0xc0) {
+ u8 msens;
+
+ /* Datasheet specifies 10ms... */
+ udelay(500);
+
+ msens = hdmi_read(hdmi, HDMI_HOT_PLUG_MSENS_STATUS);
+ pr_debug("MSENS 0x%x\n", msens);
+ /* Check, if hot plug & MSENS pin status are both high */
+ if ((msens & 0xC0) == 0xC0) {
+ /* Display plug in */
+ hdmi->hp_state = HDMI_HOTPLUG_CONNECTED;
+
+ /* Set EDID word address */
+ hdmi_write(hdmi, 0x00, HDMI_EDID_WORD_ADDRESS);
+ /* Set EDID segment pointer */
+ hdmi_write(hdmi, 0x00, HDMI_EDID_SEGMENT_POINTER);
+ /* Enable EDID interrupt */
+ hdmi_write(hdmi, 0xC6, HDMI_INTERRUPT_MASK_1);
+ } else if (!(status1 & 0x80)) {
+ /* Display unplug, beware multiple interrupts */
+ if (hdmi->hp_state != HDMI_HOTPLUG_DISCONNECTED)
+ schedule_delayed_work(&hdmi->edid_work, 0);
+
+ hdmi->hp_state = HDMI_HOTPLUG_DISCONNECTED;
+ /* display_off will switch back to mode_a */
+ }
+ } else if (status1 & 2) {
+ /* EDID error interrupt: retry */
+ /* Set EDID word address */
+ hdmi_write(hdmi, 0x00, HDMI_EDID_WORD_ADDRESS);
+ /* Set EDID segment pointer */
+ hdmi_write(hdmi, 0x00, HDMI_EDID_SEGMENT_POINTER);
+ } else if (status1 & 4) {
+ /* Disable EDID interrupt */
+ hdmi_write(hdmi, 0xC0, HDMI_INTERRUPT_MASK_1);
+ hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE;
+ schedule_delayed_work(&hdmi->edid_work, msecs_to_jiffies(10));
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void hdmi_display_on(void *arg, struct fb_info *info)
+{
+ struct sh_hdmi *hdmi = arg;
+ struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data;
+
+ if (info->var.xres != 1280 || info->var.yres != 720) {
+ dev_warn(info->device, "Unsupported framebuffer geometry %ux%u\n",
+ info->var.xres, info->var.yres);
+ return;
+ }
+
+ pr_debug("%s(%p): state %x\n", __func__, pdata->lcd_dev, info->state);
+ /*
+ * FIXME: not a good place to store fb_info. And we cannot nullify it
+ * even on monitor disconnect. What should the lifecycle be?
+ */
+ hdmi->info = info;
+ switch (hdmi->hp_state) {
+ case HDMI_HOTPLUG_EDID_DONE:
+ /* PS mode d->e. All functions are active */
+ hdmi_write(hdmi, 0x80, HDMI_SYSTEM_CTRL);
+ pr_debug("HDMI running\n");
+ break;
+ case HDMI_HOTPLUG_DISCONNECTED:
+ info->state = FBINFO_STATE_SUSPENDED;
+ default:
+ hdmi->var = info->var;
+ }
+}
+
+static void hdmi_display_off(void *arg)
+{
+ struct sh_hdmi *hdmi = arg;
+ struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data;
+
+ pr_debug("%s(%p)\n", __func__, pdata->lcd_dev);
+ /* PS mode e->a */
+ hdmi_write(hdmi, 0x10, HDMI_SYSTEM_CTRL);
+}
+
+/* Hotplug interrupt occurred, read EDID */
+static void edid_work_fn(struct work_struct *work)
+{
+ struct sh_hdmi *hdmi = container_of(work, struct sh_hdmi, edid_work.work);
+ struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data;
+
+ pr_debug("%s(%p): begin, hotplug status %d\n", __func__,
+ pdata->lcd_dev, hdmi->hp_state);
+
+ if (!pdata->lcd_dev)
+ return;
+
+ if (hdmi->hp_state == HDMI_HOTPLUG_EDID_DONE) {
+ pm_runtime_get_sync(hdmi->dev);
+ /* A device has been plugged in */
+ sh_hdmi_read_edid(hdmi);
+ msleep(10);
+ sh_hdmi_configure(hdmi);
+ /* Switched to another (d) power-save mode */
+ msleep(10);
+
+ if (!hdmi->info)
+ return;
+
+ acquire_console_sem();
+
+ /* HDMI plug in */
+ hdmi->info->var = hdmi->var;
+ if (hdmi->info->state != FBINFO_STATE_RUNNING)
+ fb_set_suspend(hdmi->info, 0);
+ else
+ hdmi_display_on(hdmi, hdmi->info);
+
+ release_console_sem();
+ } else {
+ if (!hdmi->info)
+ return;
+
+ acquire_console_sem();
+
+ /* HDMI disconnect */
+ fb_set_suspend(hdmi->info, 1);
+
+ release_console_sem();
+ pm_runtime_put(hdmi->dev);
+ }
+
+ pr_debug("%s(%p): end\n", __func__, pdata->lcd_dev);
+}
+
+static int __init sh_hdmi_probe(struct platform_device *pdev)
+{
+ struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0), ret;
+ struct sh_hdmi *hdmi;
+ long rate;
+
+ if (!res || !pdata || irq < 0)
+ return -ENODEV;
+
+ hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi) {
+ dev_err(&pdev->dev, "Cannot allocate device data\n");
+ return -ENOMEM;
+ }
+
+ hdmi->dev = &pdev->dev;
+
+ hdmi->hdmi_clk = clk_get(&pdev->dev, "ick");
+ if (IS_ERR(hdmi->hdmi_clk)) {
+ ret = PTR_ERR(hdmi->hdmi_clk);
+ dev_err(&pdev->dev, "Unable to get clock: %d\n", ret);
+ goto egetclk;
+ }
+
+ rate = PICOS2KHZ(pdata->lcd_chan->lcd_cfg.pixclock) * 1000;
+
+ rate = clk_round_rate(hdmi->hdmi_clk, rate);
+ if (rate < 0) {
+ ret = rate;
+ dev_err(&pdev->dev, "Cannot get suitable rate: %ld\n", rate);
+ goto erate;
+ }
+
+ ret = clk_set_rate(hdmi->hdmi_clk, rate);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot set rate %ld: %d\n", rate, ret);
+ goto erate;
+ }
+
+ pr_debug("HDMI set frequency %lu\n", rate);
+
+ ret = clk_enable(hdmi->hdmi_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot enable clock: %d\n", ret);
+ goto eclkenable;
+ }
+
+ dev_info(&pdev->dev, "Enabled HDMI clock at %luHz\n", rate);
+
+ if (!request_mem_region(res->start, resource_size(res), dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "HDMI register region already claimed\n");
+ ret = -EBUSY;
+ goto ereqreg;
+ }
+
+ hdmi->base = ioremap(res->start, resource_size(res));
+ if (!hdmi->base) {
+ dev_err(&pdev->dev, "HDMI register region already claimed\n");
+ ret = -ENOMEM;
+ goto emap;
+ }
+
+ platform_set_drvdata(pdev, hdmi);
+
+#if 1
+ /* Product and revision IDs are 0 in sh-mobile version */
+ dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n",
+ hdmi_read(hdmi, HDMI_PRODUCT_ID), hdmi_read(hdmi, HDMI_REVISION_ID));
+#endif
+
+ /* Set up LCDC callbacks */
+ pdata->lcd_chan->board_cfg.board_data = hdmi;
+ pdata->lcd_chan->board_cfg.display_on = hdmi_display_on;
+ pdata->lcd_chan->board_cfg.display_off = hdmi_display_off;
+
+ INIT_DELAYED_WORK(&hdmi->edid_work, edid_work_fn);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ ret = request_irq(irq, sh_hdmi_hotplug, 0,
+ dev_name(&pdev->dev), hdmi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to request irq: %d\n", ret);
+ goto ereqirq;
+ }
+
+ return 0;
+
+ereqirq:
+ pm_runtime_disable(&pdev->dev);
+ iounmap(hdmi->base);
+emap:
+ release_mem_region(res->start, resource_size(res));
+ereqreg:
+ clk_disable(hdmi->hdmi_clk);
+eclkenable:
+erate:
+ clk_put(hdmi->hdmi_clk);
+egetclk:
+ kfree(hdmi);
+
+ return ret;
+}
+
+static int __exit sh_hdmi_remove(struct platform_device *pdev)
+{
+ struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data;
+ struct sh_hdmi *hdmi = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0);
+
+ pdata->lcd_chan->board_cfg.display_on = NULL;
+ pdata->lcd_chan->board_cfg.display_off = NULL;
+ pdata->lcd_chan->board_cfg.board_data = NULL;
+
+ free_irq(irq, hdmi);
+ pm_runtime_disable(&pdev->dev);
+ cancel_delayed_work_sync(&hdmi->edid_work);
+ clk_disable(hdmi->hdmi_clk);
+ clk_put(hdmi->hdmi_clk);
+ iounmap(hdmi->base);
+ release_mem_region(res->start, resource_size(res));
+ kfree(hdmi);
+
+ return 0;
+}
+
+static struct platform_driver sh_hdmi_driver = {
+ .remove = __exit_p(sh_hdmi_remove),
+ .driver = {
+ .name = "sh-mobile-hdmi",
+ },
+};
+
+static int __init sh_hdmi_init(void)
+{
+ return platform_driver_probe(&sh_hdmi_driver, sh_hdmi_probe);
+}
+module_init(sh_hdmi_init);
+
+static void __exit sh_hdmi_exit(void)
+{
+ platform_driver_unregister(&sh_hdmi_driver);
+}
+module_exit(sh_hdmi_exit);
+
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_DESCRIPTION("SuperH / ARM-shmobile HDMI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 12c451a711e..d72075a9f01 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -56,6 +56,7 @@ static int lcdc_shared_regs[] = {
/* per-channel registers */
enum { LDDCKPAT1R, LDDCKPAT2R, LDMT1R, LDMT2R, LDMT3R, LDDFR, LDSM1R,
LDSM2R, LDSA1R, LDMLSR, LDHCNR, LDHSYNR, LDVLNR, LDVSYNR, LDPMR,
+ LDHAJR,
NR_CH_REGS };
static unsigned long lcdc_offs_mainlcd[NR_CH_REGS] = {
@@ -74,6 +75,7 @@ static unsigned long lcdc_offs_mainlcd[NR_CH_REGS] = {
[LDVLNR] = 0x450,
[LDVSYNR] = 0x454,
[LDPMR] = 0x460,
+ [LDHAJR] = 0x4a0,
};
static unsigned long lcdc_offs_sublcd[NR_CH_REGS] = {
@@ -137,6 +139,7 @@ struct sh_mobile_lcdc_priv {
struct clk *dot_clk;
unsigned long lddckr;
struct sh_mobile_lcdc_chan ch[2];
+ struct notifier_block notifier;
unsigned long saved_shared_regs[NR_SHARED_REGS];
int started;
};
@@ -404,6 +407,56 @@ static void sh_mobile_lcdc_start_stop(struct sh_mobile_lcdc_priv *priv,
lcdc_write(priv, _LDDCKSTPR, 1); /* stop dotclock */
}
+static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch)
+{
+ struct fb_var_screeninfo *var = &ch->info->var;
+ unsigned long h_total, hsync_pos;
+ u32 tmp;
+
+ tmp = ch->ldmt1r_value;
+ tmp |= (var->sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 1 << 28;
+ tmp |= (var->sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 1 << 27;
+ tmp |= (ch->cfg.flags & LCDC_FLAGS_DWPOL) ? 1 << 26 : 0;
+ tmp |= (ch->cfg.flags & LCDC_FLAGS_DIPOL) ? 1 << 25 : 0;
+ tmp |= (ch->cfg.flags & LCDC_FLAGS_DAPOL) ? 1 << 24 : 0;
+ tmp |= (ch->cfg.flags & LCDC_FLAGS_HSCNT) ? 1 << 17 : 0;
+ tmp |= (ch->cfg.flags & LCDC_FLAGS_DWCNT) ? 1 << 16 : 0;
+ lcdc_write_chan(ch, LDMT1R, tmp);
+
+ /* setup SYS bus */
+ lcdc_write_chan(ch, LDMT2R, ch->cfg.sys_bus_cfg.ldmt2r);
+ lcdc_write_chan(ch, LDMT3R, ch->cfg.sys_bus_cfg.ldmt3r);
+
+ /* horizontal configuration */
+ h_total = var->xres + var->hsync_len +
+ var->left_margin + var->right_margin;
+ tmp = h_total / 8; /* HTCN */
+ tmp |= (var->xres / 8) << 16; /* HDCN */
+ lcdc_write_chan(ch, LDHCNR, tmp);
+
+ hsync_pos = var->xres + var->right_margin;
+ tmp = hsync_pos / 8; /* HSYNP */
+ tmp |= (var->hsync_len / 8) << 16; /* HSYNW */
+ lcdc_write_chan(ch, LDHSYNR, tmp);
+
+ /* vertical configuration */
+ tmp = var->yres + var->vsync_len +
+ var->upper_margin + var->lower_margin; /* VTLN */
+ tmp |= var->yres << 16; /* VDLN */
+ lcdc_write_chan(ch, LDVLNR, tmp);
+
+ tmp = var->yres + var->lower_margin; /* VSYNP */
+ tmp |= var->vsync_len << 16; /* VSYNW */
+ lcdc_write_chan(ch, LDVSYNR, tmp);
+
+ /* Adjust horizontal synchronisation for HDMI */
+ tmp = ((var->xres & 7) << 24) |
+ ((h_total & 7) << 16) |
+ ((var->hsync_len & 7) << 8) |
+ hsync_pos;
+ lcdc_write_chan(ch, LDHAJR, tmp);
+}
+
static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
{
struct sh_mobile_lcdc_chan *ch;
@@ -470,49 +523,11 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
if (!ch->enabled)
continue;
- tmp = ch->ldmt1r_value;
- tmp |= (lcd_cfg->sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 1 << 28;
- tmp |= (lcd_cfg->sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 1 << 27;
- tmp |= (ch->cfg.flags & LCDC_FLAGS_DWPOL) ? 1 << 26 : 0;
- tmp |= (ch->cfg.flags & LCDC_FLAGS_DIPOL) ? 1 << 25 : 0;
- tmp |= (ch->cfg.flags & LCDC_FLAGS_DAPOL) ? 1 << 24 : 0;
- tmp |= (ch->cfg.flags & LCDC_FLAGS_HSCNT) ? 1 << 17 : 0;
- tmp |= (ch->cfg.flags & LCDC_FLAGS_DWCNT) ? 1 << 16 : 0;
- lcdc_write_chan(ch, LDMT1R, tmp);
-
- /* setup SYS bus */
- lcdc_write_chan(ch, LDMT2R, ch->cfg.sys_bus_cfg.ldmt2r);
- lcdc_write_chan(ch, LDMT3R, ch->cfg.sys_bus_cfg.ldmt3r);
-
- /* horizontal configuration */
- tmp = lcd_cfg->xres + lcd_cfg->hsync_len;
- tmp += lcd_cfg->left_margin;
- tmp += lcd_cfg->right_margin;
- tmp /= 8; /* HTCN */
- tmp |= (lcd_cfg->xres / 8) << 16; /* HDCN */
- lcdc_write_chan(ch, LDHCNR, tmp);
-
- tmp = lcd_cfg->xres;
- tmp += lcd_cfg->right_margin;
- tmp /= 8; /* HSYNP */
- tmp |= (lcd_cfg->hsync_len / 8) << 16; /* HSYNW */
- lcdc_write_chan(ch, LDHSYNR, tmp);
+ sh_mobile_lcdc_geometry(ch);
/* power supply */
lcdc_write_chan(ch, LDPMR, 0);
- /* vertical configuration */
- tmp = lcd_cfg->yres + lcd_cfg->vsync_len;
- tmp += lcd_cfg->upper_margin;
- tmp += lcd_cfg->lower_margin; /* VTLN */
- tmp |= lcd_cfg->yres << 16; /* VDLN */
- lcdc_write_chan(ch, LDVLNR, tmp);
-
- tmp = lcd_cfg->yres;
- tmp += lcd_cfg->lower_margin; /* VSYNP */
- tmp |= lcd_cfg->vsync_len << 16; /* VSYNW */
- lcdc_write_chan(ch, LDVSYNR, tmp);
-
board_cfg = &ch->cfg.board_cfg;
if (board_cfg->setup_sys)
ret = board_cfg->setup_sys(board_cfg->board_data, ch,
@@ -577,7 +592,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
board_cfg = &ch->cfg.board_cfg;
if (board_cfg->display_on)
- board_cfg->display_on(board_cfg->board_data);
+ board_cfg->display_on(board_cfg->board_data, ch->info);
}
return 0;
@@ -943,6 +958,62 @@ static const struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = {
.runtime_resume = sh_mobile_lcdc_runtime_resume,
};
+static int sh_mobile_lcdc_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct fb_event *event = data;
+ struct fb_info *info = event->info;
+ struct sh_mobile_lcdc_chan *ch = info->par;
+ struct sh_mobile_lcdc_board_cfg *board_cfg = &ch->cfg.board_cfg;
+ struct fb_var_screeninfo *var;
+
+ if (&ch->lcdc->notifier != nb)
+ return 0;
+
+ dev_dbg(info->dev, "%s(): action = %lu, data = %p\n",
+ __func__, action, event->data);
+
+ switch(action) {
+ case FB_EVENT_SUSPEND:
+ if (board_cfg->display_off)
+ board_cfg->display_off(board_cfg->board_data);
+ pm_runtime_put(info->device);
+ break;
+ case FB_EVENT_RESUME:
+ var = &info->var;
+
+ /* HDMI must be enabled before LCDC configuration */
+ if (board_cfg->display_on)
+ board_cfg->display_on(board_cfg->board_data, ch->info);
+
+ /* Check if the new display is not in our modelist */
+ if (ch->info->modelist.next &&
+ !fb_match_mode(var, &ch->info->modelist)) {
+ struct fb_videomode mode;
+ int ret;
+
+ /* Can we handle this display? */
+ if (var->xres > ch->cfg.lcd_cfg.xres ||
+ var->yres > ch->cfg.lcd_cfg.yres)
+ return -ENOMEM;
+
+ /* Add to the modelist */
+ fb_var_to_videomode(&mode, var);
+ ret = fb_add_videomode(&mode, &ch->info->modelist);
+ if (ret < 0)
+ return ret;
+ }
+
+ pm_runtime_get_sync(info->device);
+
+ sh_mobile_lcdc_geometry(ch);
+
+ break;
+ }
+
+ return 0;
+}
+
static int sh_mobile_lcdc_remove(struct platform_device *pdev);
static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
@@ -1020,15 +1091,19 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
goto err1;
}
+ priv->base = ioremap_nocache(res->start, resource_size(res));
+ if (!priv->base)
+ goto err1;
+
error = sh_mobile_lcdc_setup_clocks(pdev, pdata->clock_source, priv);
if (error) {
dev_err(&pdev->dev, "unable to setup clocks\n");
goto err1;
}
- priv->base = ioremap_nocache(res->start, (res->end - res->start) + 1);
-
for (i = 0; i < j; i++) {
+ struct fb_var_screeninfo *var;
+ struct fb_videomode *lcd_cfg;
cfg = &priv->ch[i].cfg;
priv->ch[i].info = framebuffer_alloc(0, &pdev->dev);
@@ -1039,22 +1114,33 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
}
info = priv->ch[i].info;
+ var = &info->var;
+ lcd_cfg = &cfg->lcd_cfg;
info->fbops = &sh_mobile_lcdc_ops;
- info->var.xres = info->var.xres_virtual = cfg->lcd_cfg.xres;
- info->var.yres = cfg->lcd_cfg.yres;
+ var->xres = var->xres_virtual = lcd_cfg->xres;
+ var->yres = lcd_cfg->yres;
/* Default Y virtual resolution is 2x panel size */
- info->var.yres_virtual = info->var.yres * 2;
- info->var.width = cfg->lcd_size_cfg.width;
- info->var.height = cfg->lcd_size_cfg.height;
- info->var.activate = FB_ACTIVATE_NOW;
- error = sh_mobile_lcdc_set_bpp(&info->var, cfg->bpp);
+ var->yres_virtual = var->yres * 2;
+ var->width = cfg->lcd_size_cfg.width;
+ var->height = cfg->lcd_size_cfg.height;
+ var->activate = FB_ACTIVATE_NOW;
+ var->left_margin = lcd_cfg->left_margin;
+ var->right_margin = lcd_cfg->right_margin;
+ var->upper_margin = lcd_cfg->upper_margin;
+ var->lower_margin = lcd_cfg->lower_margin;
+ var->hsync_len = lcd_cfg->hsync_len;
+ var->vsync_len = lcd_cfg->vsync_len;
+ var->sync = lcd_cfg->sync;
+ var->pixclock = lcd_cfg->pixclock;
+
+ error = sh_mobile_lcdc_set_bpp(var, cfg->bpp);
if (error)
break;
info->fix = sh_mobile_lcdc_fix;
- info->fix.line_length = cfg->lcd_cfg.xres * (cfg->bpp / 8);
+ info->fix.line_length = lcd_cfg->xres * (cfg->bpp / 8);
info->fix.smem_len = info->fix.line_length *
- info->var.yres_virtual;
+ var->yres_virtual;
buf = dma_alloc_coherent(&pdev->dev, info->fix.smem_len,
&priv->ch[i].dma_handle, GFP_KERNEL);
@@ -1119,10 +1205,14 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
ch->cfg.bpp);
/* deferred io mode: disable clock to save power */
- if (info->fbdefio)
+ if (info->fbdefio || info->state == FBINFO_STATE_SUSPENDED)
sh_mobile_lcdc_clk_off(priv);
}
+ /* Failure ignored */
+ priv->notifier.notifier_call = sh_mobile_lcdc_notify;
+ fb_register_client(&priv->notifier);
+
return 0;
err1:
sh_mobile_lcdc_remove(pdev);
@@ -1136,6 +1226,8 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev)
struct fb_info *info;
int i;
+ fb_unregister_client(&priv->notifier);
+
for (i = 0; i < ARRAY_SIZE(priv->ch); i++)
if (priv->ch[i].info && priv->ch[i].info->dev)
unregister_framebuffer(priv->ch[i].info);
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 559bf1727a2..b52f8e4ef1f 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1701,6 +1701,9 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
break;
case FBIOGET_VBLANK:
+
+ memset(&sisvbblank, 0, sizeof(struct fb_vblank));
+
sisvbblank.count = 0;
sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
diff --git a/drivers/video/sunxvr1000.c b/drivers/video/sunxvr1000.c
index 489b44e8db8..5dbe06af222 100644
--- a/drivers/video/sunxvr1000.c
+++ b/drivers/video/sunxvr1000.c
@@ -111,7 +111,7 @@ static int __devinit gfb_set_fbinfo(struct gfb_info *gp)
return 0;
}
-static int __devinit gfb_probe(struct of_device *op,
+static int __devinit gfb_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
@@ -172,7 +172,7 @@ err_out:
return err;
}
-static int __devexit gfb_remove(struct of_device *op)
+static int __devexit gfb_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct gfb_info *gp = info->par;
@@ -213,12 +213,12 @@ static int __init gfb_init(void)
if (fb_get_options("gfb", NULL))
return -ENODEV;
- return of_register_driver(&gfb_driver, &of_bus_type);
+ return of_register_platform_driver(&gfb_driver);
}
static void __exit gfb_exit(void)
{
- of_unregister_driver(&gfb_driver);
+ of_unregister_platform_driver(&gfb_driver);
}
module_init(gfb_init);
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index cc039b33d2d..77ad27955cf 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -342,7 +342,7 @@ tcx_init_fix(struct fb_info *info, int linebytes)
info->fix.accel = FB_ACCEL_SUN_TCX;
}
-static void tcx_unmap_regs(struct of_device *op, struct fb_info *info,
+static void tcx_unmap_regs(struct platform_device *op, struct fb_info *info,
struct tcx_par *par)
{
if (par->tec)
@@ -362,7 +362,7 @@ static void tcx_unmap_regs(struct of_device *op, struct fb_info *info,
info->screen_base, info->fix.smem_len);
}
-static int __devinit tcx_probe(struct of_device *op,
+static int __devinit tcx_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
@@ -486,7 +486,7 @@ out_err:
return err;
}
-static int __devexit tcx_remove(struct of_device *op)
+static int __devexit tcx_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct tcx_par *par = info->par;
@@ -526,12 +526,12 @@ static int __init tcx_init(void)
if (fb_get_options("tcxfb", NULL))
return -ENODEV;
- return of_register_driver(&tcx_driver, &of_bus_type);
+ return of_register_platform_driver(&tcx_driver);
}
static void __exit tcx_exit(void)
{
- of_unregister_driver(&tcx_driver);
+ of_unregister_platform_driver(&tcx_driver);
}
module_init(tcx_init);
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 98054839004..3ee5e63cfa4 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -1571,8 +1571,8 @@ out_err_iobase:
if (default_par->mtrr_handle >= 0)
mtrr_del(default_par->mtrr_handle, info->fix.smem_start,
info->fix.smem_len);
- release_mem_region(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
+ release_region(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
out_err_screenbase:
if (info->screen_base)
iounmap(info->screen_base);
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 7b8839ebf3c..52ec0959d46 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -1977,8 +1977,7 @@ static void __devexit uvesafb_exit(void)
module_exit(uvesafb_exit);
-#define param_get_scroll NULL
-static int param_set_scroll(const char *val, struct kernel_param *kp)
+static int param_set_scroll(const char *val, const struct kernel_param *kp)
{
ypan = 0;
@@ -1993,7 +1992,9 @@ static int param_set_scroll(const char *val, struct kernel_param *kp)
return 0;
}
-
+static struct kernel_param_ops param_ops_scroll = {
+ .set = param_set_scroll,
+};
#define param_check_scroll(name, p) __param_check(name, p, void)
module_param_named(scroll, ypan, scroll, 0);
diff --git a/drivers/video/via/chip.h b/drivers/video/via/chip.h
index d9b6e06e070..ef1f3de2e05 100644
--- a/drivers/video/via/chip.h
+++ b/drivers/video/via/chip.h
@@ -160,7 +160,6 @@ struct lvds_setting_information {
int v_active;
int bpp;
int refresh_rate;
- int get_lcd_size_method;
int lcd_panel_id;
int lcd_panel_hres;
int lcd_panel_vres;
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index b996803ae2c..7dcb4d5bb9c 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -23,143 +23,341 @@
#include "global.h"
static struct pll_map pll_value[] = {
- {CLK_25_175M, CLE266_PLL_25_175M, K800_PLL_25_175M,
- CX700_25_175M, VX855_25_175M},
- {CLK_29_581M, CLE266_PLL_29_581M, K800_PLL_29_581M,
- CX700_29_581M, VX855_29_581M},
- {CLK_26_880M, CLE266_PLL_26_880M, K800_PLL_26_880M,
- CX700_26_880M, VX855_26_880M},
- {CLK_31_490M, CLE266_PLL_31_490M, K800_PLL_31_490M,
- CX700_31_490M, VX855_31_490M},
- {CLK_31_500M, CLE266_PLL_31_500M, K800_PLL_31_500M,
- CX700_31_500M, VX855_31_500M},
- {CLK_31_728M, CLE266_PLL_31_728M, K800_PLL_31_728M,
- CX700_31_728M, VX855_31_728M},
- {CLK_32_668M, CLE266_PLL_32_668M, K800_PLL_32_668M,
- CX700_32_668M, VX855_32_668M},
- {CLK_36_000M, CLE266_PLL_36_000M, K800_PLL_36_000M,
- CX700_36_000M, VX855_36_000M},
- {CLK_40_000M, CLE266_PLL_40_000M, K800_PLL_40_000M,
- CX700_40_000M, VX855_40_000M},
- {CLK_41_291M, CLE266_PLL_41_291M, K800_PLL_41_291M,
- CX700_41_291M, VX855_41_291M},
- {CLK_43_163M, CLE266_PLL_43_163M, K800_PLL_43_163M,
- CX700_43_163M, VX855_43_163M},
- {CLK_45_250M, CLE266_PLL_45_250M, K800_PLL_45_250M,
- CX700_45_250M, VX855_45_250M},
- {CLK_46_000M, CLE266_PLL_46_000M, K800_PLL_46_000M,
- CX700_46_000M, VX855_46_000M},
- {CLK_46_996M, CLE266_PLL_46_996M, K800_PLL_46_996M,
- CX700_46_996M, VX855_46_996M},
- {CLK_48_000M, CLE266_PLL_48_000M, K800_PLL_48_000M,
- CX700_48_000M, VX855_48_000M},
- {CLK_48_875M, CLE266_PLL_48_875M, K800_PLL_48_875M,
- CX700_48_875M, VX855_48_875M},
- {CLK_49_500M, CLE266_PLL_49_500M, K800_PLL_49_500M,
- CX700_49_500M, VX855_49_500M},
- {CLK_52_406M, CLE266_PLL_52_406M, K800_PLL_52_406M,
- CX700_52_406M, VX855_52_406M},
- {CLK_52_977M, CLE266_PLL_52_977M, K800_PLL_52_977M,
- CX700_52_977M, VX855_52_977M},
- {CLK_56_250M, CLE266_PLL_56_250M, K800_PLL_56_250M,
- CX700_56_250M, VX855_56_250M},
- {CLK_57_275M, 0, 0, 0, VX855_57_275M},
- {CLK_60_466M, CLE266_PLL_60_466M, K800_PLL_60_466M,
- CX700_60_466M, VX855_60_466M},
- {CLK_61_500M, CLE266_PLL_61_500M, K800_PLL_61_500M,
- CX700_61_500M, VX855_61_500M},
- {CLK_65_000M, CLE266_PLL_65_000M, K800_PLL_65_000M,
- CX700_65_000M, VX855_65_000M},
- {CLK_65_178M, CLE266_PLL_65_178M, K800_PLL_65_178M,
- CX700_65_178M, VX855_65_178M},
- {CLK_66_750M, CLE266_PLL_66_750M, K800_PLL_66_750M,
- CX700_66_750M, VX855_66_750M},
- {CLK_68_179M, CLE266_PLL_68_179M, K800_PLL_68_179M,
- CX700_68_179M, VX855_68_179M},
- {CLK_69_924M, CLE266_PLL_69_924M, K800_PLL_69_924M,
- CX700_69_924M, VX855_69_924M},
- {CLK_70_159M, CLE266_PLL_70_159M, K800_PLL_70_159M,
- CX700_70_159M, VX855_70_159M},
- {CLK_72_000M, CLE266_PLL_72_000M, K800_PLL_72_000M,
- CX700_72_000M, VX855_72_000M},
- {CLK_78_750M, CLE266_PLL_78_750M, K800_PLL_78_750M,
- CX700_78_750M, VX855_78_750M},
- {CLK_80_136M, CLE266_PLL_80_136M, K800_PLL_80_136M,
- CX700_80_136M, VX855_80_136M},
- {CLK_83_375M, CLE266_PLL_83_375M, K800_PLL_83_375M,
- CX700_83_375M, VX855_83_375M},
- {CLK_83_950M, CLE266_PLL_83_950M, K800_PLL_83_950M,
- CX700_83_950M, VX855_83_950M},
- {CLK_84_750M, CLE266_PLL_84_750M, K800_PLL_84_750M,
- CX700_84_750M, VX855_84_750M},
- {CLK_85_860M, CLE266_PLL_85_860M, K800_PLL_85_860M,
- CX700_85_860M, VX855_85_860M},
- {CLK_88_750M, CLE266_PLL_88_750M, K800_PLL_88_750M,
- CX700_88_750M, VX855_88_750M},
- {CLK_94_500M, CLE266_PLL_94_500M, K800_PLL_94_500M,
- CX700_94_500M, VX855_94_500M},
- {CLK_97_750M, CLE266_PLL_97_750M, K800_PLL_97_750M,
- CX700_97_750M, VX855_97_750M},
- {CLK_101_000M, CLE266_PLL_101_000M, K800_PLL_101_000M,
- CX700_101_000M, VX855_101_000M},
- {CLK_106_500M, CLE266_PLL_106_500M, K800_PLL_106_500M,
- CX700_106_500M, VX855_106_500M},
- {CLK_108_000M, CLE266_PLL_108_000M, K800_PLL_108_000M,
- CX700_108_000M, VX855_108_000M},
- {CLK_113_309M, CLE266_PLL_113_309M, K800_PLL_113_309M,
- CX700_113_309M, VX855_113_309M},
- {CLK_118_840M, CLE266_PLL_118_840M, K800_PLL_118_840M,
- CX700_118_840M, VX855_118_840M},
- {CLK_119_000M, CLE266_PLL_119_000M, K800_PLL_119_000M,
- CX700_119_000M, VX855_119_000M},
- {CLK_121_750M, CLE266_PLL_121_750M, K800_PLL_121_750M,
- CX700_121_750M, 0},
- {CLK_125_104M, CLE266_PLL_125_104M, K800_PLL_125_104M,
- CX700_125_104M, 0},
- {CLK_133_308M, CLE266_PLL_133_308M, K800_PLL_133_308M,
- CX700_133_308M, 0},
- {CLK_135_000M, CLE266_PLL_135_000M, K800_PLL_135_000M,
- CX700_135_000M, VX855_135_000M},
- {CLK_136_700M, CLE266_PLL_136_700M, K800_PLL_136_700M,
- CX700_136_700M, VX855_136_700M},
- {CLK_138_400M, CLE266_PLL_138_400M, K800_PLL_138_400M,
- CX700_138_400M, VX855_138_400M},
- {CLK_146_760M, CLE266_PLL_146_760M, K800_PLL_146_760M,
- CX700_146_760M, VX855_146_760M},
- {CLK_153_920M, CLE266_PLL_153_920M, K800_PLL_153_920M,
- CX700_153_920M, VX855_153_920M},
- {CLK_156_000M, CLE266_PLL_156_000M, K800_PLL_156_000M,
- CX700_156_000M, VX855_156_000M},
- {CLK_157_500M, CLE266_PLL_157_500M, K800_PLL_157_500M,
- CX700_157_500M, VX855_157_500M},
- {CLK_162_000M, CLE266_PLL_162_000M, K800_PLL_162_000M,
- CX700_162_000M, VX855_162_000M},
- {CLK_187_000M, CLE266_PLL_187_000M, K800_PLL_187_000M,
- CX700_187_000M, VX855_187_000M},
- {CLK_193_295M, CLE266_PLL_193_295M, K800_PLL_193_295M,
- CX700_193_295M, VX855_193_295M},
- {CLK_202_500M, CLE266_PLL_202_500M, K800_PLL_202_500M,
- CX700_202_500M, VX855_202_500M},
- {CLK_204_000M, CLE266_PLL_204_000M, K800_PLL_204_000M,
- CX700_204_000M, VX855_204_000M},
- {CLK_218_500M, CLE266_PLL_218_500M, K800_PLL_218_500M,
- CX700_218_500M, VX855_218_500M},
- {CLK_234_000M, CLE266_PLL_234_000M, K800_PLL_234_000M,
- CX700_234_000M, VX855_234_000M},
- {CLK_267_250M, CLE266_PLL_267_250M, K800_PLL_267_250M,
- CX700_267_250M, VX855_267_250M},
- {CLK_297_500M, CLE266_PLL_297_500M, K800_PLL_297_500M,
- CX700_297_500M, VX855_297_500M},
- {CLK_74_481M, CLE266_PLL_74_481M, K800_PLL_74_481M,
- CX700_74_481M, VX855_74_481M},
- {CLK_172_798M, CLE266_PLL_172_798M, K800_PLL_172_798M,
- CX700_172_798M, VX855_172_798M},
- {CLK_122_614M, CLE266_PLL_122_614M, K800_PLL_122_614M,
- CX700_122_614M, VX855_122_614M},
- {CLK_74_270M, CLE266_PLL_74_270M, K800_PLL_74_270M,
- CX700_74_270M, 0},
- {CLK_148_500M, CLE266_PLL_148_500M, K800_PLL_148_500M,
- CX700_148_500M, VX855_148_500M}
+ {25175000,
+ {99, 7, 3},
+ {85, 3, 4}, /* ignoring bit difference: 0x00008000 */
+ {141, 5, 4},
+ {141, 5, 4} },
+ {29581000,
+ {33, 4, 2},
+ {66, 2, 4}, /* ignoring bit difference: 0x00808000 */
+ {166, 5, 4}, /* ignoring bit difference: 0x00008000 */
+ {165, 5, 4} },
+ {26880000,
+ {15, 4, 1},
+ {30, 2, 3}, /* ignoring bit difference: 0x00808000 */
+ {150, 5, 4},
+ {150, 5, 4} },
+ {31500000,
+ {53, 3, 3}, /* ignoring bit difference: 0x00008000 */
+ {141, 4, 4}, /* ignoring bit difference: 0x00008000 */
+ {176, 5, 4},
+ {176, 5, 4} },
+ {31728000,
+ {31, 7, 1},
+ {177, 5, 4}, /* ignoring bit difference: 0x00008000 */
+ {177, 5, 4},
+ {142, 4, 4} },
+ {32688000,
+ {73, 4, 3},
+ {146, 4, 4}, /* ignoring bit difference: 0x00008000 */
+ {183, 5, 4},
+ {146, 4, 4} },
+ {36000000,
+ {101, 5, 3}, /* ignoring bit difference: 0x00008000 */
+ {161, 4, 4}, /* ignoring bit difference: 0x00008000 */
+ {202, 5, 4},
+ {161, 4, 4} },
+ {40000000,
+ {89, 4, 3},
+ {89, 4, 3}, /* ignoring bit difference: 0x00008000 */
+ {112, 5, 3},
+ {112, 5, 3} },
+ {41291000,
+ {23, 4, 1},
+ {69, 3, 3}, /* ignoring bit difference: 0x00008000 */
+ {115, 5, 3},
+ {115, 5, 3} },
+ {43163000,
+ {121, 5, 3},
+ {121, 5, 3}, /* ignoring bit difference: 0x00008000 */
+ {121, 5, 3},
+ {121, 5, 3} },
+ {45250000,
+ {127, 5, 3},
+ {127, 5, 3}, /* ignoring bit difference: 0x00808000 */
+ {127, 5, 3},
+ {127, 5, 3} },
+ {46000000,
+ {90, 7, 2},
+ {103, 4, 3}, /* ignoring bit difference: 0x00008000 */
+ {129, 5, 3},
+ {103, 4, 3} },
+ {46996000,
+ {105, 4, 3}, /* ignoring bit difference: 0x00008000 */
+ {131, 5, 3}, /* ignoring bit difference: 0x00808000 */
+ {131, 5, 3}, /* ignoring bit difference: 0x00808000 */
+ {105, 4, 3} },
+ {48000000,
+ {67, 20, 0},
+ {134, 5, 3}, /* ignoring bit difference: 0x00808000 */
+ {134, 5, 3},
+ {134, 5, 3} },
+ {48875000,
+ {99, 29, 0},
+ {82, 3, 3}, /* ignoring bit difference: 0x00808000 */
+ {82, 3, 3}, /* ignoring bit difference: 0x00808000 */
+ {137, 5, 3} },
+ {49500000,
+ {83, 6, 2},
+ {83, 3, 3}, /* ignoring bit difference: 0x00008000 */
+ {138, 5, 3},
+ {83, 3, 3} },
+ {52406000,
+ {117, 4, 3},
+ {117, 4, 3}, /* ignoring bit difference: 0x00008000 */
+ {117, 4, 3},
+ {88, 3, 3} },
+ {52977000,
+ {37, 5, 1},
+ {148, 5, 3}, /* ignoring bit difference: 0x00808000 */
+ {148, 5, 3},
+ {148, 5, 3} },
+ {56250000,
+ {55, 7, 1}, /* ignoring bit difference: 0x00008000 */
+ {126, 4, 3}, /* ignoring bit difference: 0x00008000 */
+ {157, 5, 3},
+ {157, 5, 3} },
+ {57275000,
+ {0, 0, 0},
+ {2, 2, 0},
+ {2, 2, 0},
+ {157, 5, 3} }, /* ignoring bit difference: 0x00808000 */
+ {60466000,
+ {76, 9, 1},
+ {169, 5, 3}, /* ignoring bit difference: 0x00808000 */
+ {169, 5, 3}, /* FIXED: old = {72, 2, 3} */
+ {169, 5, 3} },
+ {61500000,
+ {86, 20, 0},
+ {172, 5, 3}, /* ignoring bit difference: 0x00808000 */
+ {172, 5, 3},
+ {172, 5, 3} },
+ {65000000,
+ {109, 6, 2}, /* ignoring bit difference: 0x00008000 */
+ {109, 3, 3}, /* ignoring bit difference: 0x00008000 */
+ {109, 3, 3},
+ {109, 3, 3} },
+ {65178000,
+ {91, 5, 2},
+ {182, 5, 3}, /* ignoring bit difference: 0x00808000 */
+ {109, 3, 3},
+ {182, 5, 3} },
+ {66750000,
+ {75, 4, 2},
+ {150, 4, 3}, /* ignoring bit difference: 0x00808000 */
+ {150, 4, 3},
+ {112, 3, 3} },
+ {68179000,
+ {19, 4, 0},
+ {114, 3, 3}, /* ignoring bit difference: 0x00008000 */
+ {190, 5, 3},
+ {191, 5, 3} },
+ {69924000,
+ {83, 17, 0},
+ {195, 5, 3}, /* ignoring bit difference: 0x00808000 */
+ {195, 5, 3},
+ {195, 5, 3} },
+ {70159000,
+ {98, 20, 0},
+ {196, 5, 3}, /* ignoring bit difference: 0x00808000 */
+ {196, 5, 3},
+ {195, 5, 3} },
+ {72000000,
+ {121, 24, 0},
+ {161, 4, 3}, /* ignoring bit difference: 0x00808000 */
+ {161, 4, 3},
+ {161, 4, 3} },
+ {78750000,
+ {33, 3, 1},
+ {66, 3, 2}, /* ignoring bit difference: 0x00008000 */
+ {110, 5, 2},
+ {110, 5, 2} },
+ {80136000,
+ {28, 5, 0},
+ {68, 3, 2}, /* ignoring bit difference: 0x00008000 */
+ {112, 5, 2},
+ {112, 5, 2} },
+ {83375000,
+ {93, 2, 3},
+ {93, 4, 2}, /* ignoring bit difference: 0x00800000 */
+ {93, 4, 2}, /* ignoring bit difference: 0x00800000 */
+ {117, 5, 2} },
+ {83950000,
+ {41, 7, 0},
+ {117, 5, 2}, /* ignoring bit difference: 0x00008000 */
+ {117, 5, 2},
+ {117, 5, 2} },
+ {84750000,
+ {118, 5, 2},
+ {118, 5, 2}, /* ignoring bit difference: 0x00808000 */
+ {118, 5, 2},
+ {118, 5, 2} },
+ {85860000,
+ {84, 7, 1},
+ {120, 5, 2}, /* ignoring bit difference: 0x00808000 */
+ {120, 5, 2},
+ {118, 5, 2} },
+ {88750000,
+ {31, 5, 0},
+ {124, 5, 2}, /* ignoring bit difference: 0x00808000 */
+ {174, 7, 2}, /* ignoring bit difference: 0x00808000 */
+ {124, 5, 2} },
+ {94500000,
+ {33, 5, 0},
+ {132, 5, 2}, /* ignoring bit difference: 0x00008000 */
+ {132, 5, 2},
+ {132, 5, 2} },
+ {97750000,
+ {82, 6, 1},
+ {137, 5, 2}, /* ignoring bit difference: 0x00808000 */
+ {137, 5, 2},
+ {137, 5, 2} },
+ {101000000,
+ {127, 9, 1},
+ {141, 5, 2}, /* ignoring bit difference: 0x00808000 */
+ {141, 5, 2},
+ {141, 5, 2} },
+ {106500000,
+ {119, 4, 2},
+ {119, 4, 2}, /* ignoring bit difference: 0x00808000 */
+ {119, 4, 2},
+ {149, 5, 2} },
+ {108000000,
+ {121, 4, 2},
+ {121, 4, 2}, /* ignoring bit difference: 0x00808000 */
+ {151, 5, 2},
+ {151, 5, 2} },
+ {113309000,
+ {95, 12, 0},
+ {95, 3, 2}, /* ignoring bit difference: 0x00808000 */
+ {95, 3, 2},
+ {159, 5, 2} },
+ {118840000,
+ {83, 5, 1},
+ {166, 5, 2}, /* ignoring bit difference: 0x00808000 */
+ {166, 5, 2},
+ {166, 5, 2} },
+ {119000000,
+ {108, 13, 0},
+ {133, 4, 2}, /* ignoring bit difference: 0x00808000 */
+ {133, 4, 2},
+ {167, 5, 2} },
+ {121750000,
+ {85, 5, 1},
+ {170, 5, 2}, /* ignoring bit difference: 0x00808000 */
+ {68, 2, 2},
+ {0, 0, 0} },
+ {125104000,
+ {53, 6, 0}, /* ignoring bit difference: 0x00008000 */
+ {106, 3, 2}, /* ignoring bit difference: 0x00008000 */
+ {175, 5, 2},
+ {0, 0, 0} },
+ {135000000,
+ {94, 5, 1},
+ {28, 3, 0}, /* ignoring bit difference: 0x00804000 */
+ {151, 4, 2},
+ {189, 5, 2} },
+ {136700000,
+ {115, 12, 0},
+ {191, 5, 2}, /* ignoring bit difference: 0x00808000 */
+ {191, 5, 2},
+ {191, 5, 2} },
+ {138400000,
+ {87, 9, 0},
+ {116, 3, 2}, /* ignoring bit difference: 0x00808000 */
+ {116, 3, 2},
+ {194, 5, 2} },
+ {146760000,
+ {103, 5, 1},
+ {206, 5, 2}, /* ignoring bit difference: 0x00808000 */
+ {206, 5, 2},
+ {206, 5, 2} },
+ {153920000,
+ {86, 8, 0},
+ {86, 4, 1}, /* ignoring bit difference: 0x00808000 */
+ {86, 4, 1},
+ {86, 4, 1} }, /* FIXED: old = {84, 2, 1} */
+ {156000000,
+ {109, 5, 1},
+ {109, 5, 1}, /* ignoring bit difference: 0x00808000 */
+ {109, 5, 1},
+ {108, 5, 1} },
+ {157500000,
+ {55, 5, 0}, /* ignoring bit difference: 0x00008000 */
+ {22, 2, 0}, /* ignoring bit difference: 0x00802000 */
+ {110, 5, 1},
+ {110, 5, 1} },
+ {162000000,
+ {113, 5, 1},
+ {113, 5, 1}, /* ignoring bit difference: 0x00808000 */
+ {113, 5, 1},
+ {113, 5, 1} },
+ {187000000,
+ {118, 9, 0},
+ {131, 5, 1}, /* ignoring bit difference: 0x00808000 */
+ {131, 5, 1},
+ {131, 5, 1} },
+ {193295000,
+ {108, 8, 0},
+ {81, 3, 1}, /* ignoring bit difference: 0x00808000 */
+ {135, 5, 1},
+ {135, 5, 1} },
+ {202500000,
+ {99, 7, 0},
+ {85, 3, 1}, /* ignoring bit difference: 0x00808000 */
+ {142, 5, 1},
+ {142, 5, 1} },
+ {204000000,
+ {100, 7, 0},
+ {143, 5, 1}, /* ignoring bit difference: 0x00808000 */
+ {143, 5, 1},
+ {143, 5, 1} },
+ {218500000,
+ {92, 6, 0},
+ {153, 5, 1}, /* ignoring bit difference: 0x00808000 */
+ {153, 5, 1},
+ {153, 5, 1} },
+ {234000000,
+ {98, 6, 0},
+ {98, 3, 1}, /* ignoring bit difference: 0x00008000 */
+ {98, 3, 1},
+ {164, 5, 1} },
+ {267250000,
+ {112, 6, 0},
+ {112, 3, 1}, /* ignoring bit difference: 0x00808000 */
+ {187, 5, 1},
+ {187, 5, 1} },
+ {297500000,
+ {102, 5, 0}, /* ignoring bit difference: 0x00008000 */
+ {166, 4, 1}, /* ignoring bit difference: 0x00008000 */
+ {208, 5, 1},
+ {208, 5, 1} },
+ {74481000,
+ {26, 5, 0},
+ {125, 3, 3}, /* ignoring bit difference: 0x00808000 */
+ {208, 5, 3},
+ {209, 5, 3} },
+ {172798000,
+ {121, 5, 1},
+ {121, 5, 1}, /* ignoring bit difference: 0x00808000 */
+ {121, 5, 1},
+ {121, 5, 1} },
+ {122614000,
+ {60, 7, 0},
+ {137, 4, 2}, /* ignoring bit difference: 0x00808000 */
+ {137, 4, 2},
+ {172, 5, 2} },
+ {74270000,
+ {83, 8, 1},
+ {208, 5, 3},
+ {208, 5, 3},
+ {0, 0, 0} },
+ {148500000,
+ {83, 8, 0},
+ {208, 5, 2},
+ {166, 4, 2},
+ {208, 5, 2} }
};
static struct fifo_depth_select display_fifo_depth_reg = {
@@ -1360,40 +1558,70 @@ void viafb_load_FIFO_reg(int set_iga, int hor_active, int ver_active)
}
+static u32 cle266_encode_pll(struct pll_config pll)
+{
+ return (pll.multiplier << 8)
+ | (pll.rshift << 6)
+ | pll.divisor;
+}
+
+static u32 k800_encode_pll(struct pll_config pll)
+{
+ return ((pll.divisor - 2) << 16)
+ | (pll.rshift << 10)
+ | (pll.multiplier - 2);
+}
+
+static u32 vx855_encode_pll(struct pll_config pll)
+{
+ return (pll.divisor << 16)
+ | (pll.rshift << 10)
+ | pll.multiplier;
+}
+
u32 viafb_get_clk_value(int clk)
{
- int i;
+ u32 value = 0;
+ int i = 0;
- for (i = 0; i < NUM_TOTAL_PLL_TABLE; i++) {
- if (clk == pll_value[i].clk) {
- switch (viaparinfo->chip_info->gfx_chip_name) {
- case UNICHROME_CLE266:
- case UNICHROME_K400:
- return pll_value[i].cle266_pll;
-
- case UNICHROME_K800:
- case UNICHROME_PM800:
- case UNICHROME_CN700:
- return pll_value[i].k800_pll;
-
- case UNICHROME_CX700:
- case UNICHROME_K8M890:
- case UNICHROME_P4M890:
- case UNICHROME_P4M900:
- case UNICHROME_VX800:
- return pll_value[i].cx700_pll;
- case UNICHROME_VX855:
- return pll_value[i].vx855_pll;
- }
+ while (i < NUM_TOTAL_PLL_TABLE && clk != pll_value[i].clk)
+ i++;
+
+ if (i == NUM_TOTAL_PLL_TABLE) {
+ printk(KERN_WARNING "viafb_get_clk_value: PLL lookup failed!");
+ } else {
+ switch (viaparinfo->chip_info->gfx_chip_name) {
+ case UNICHROME_CLE266:
+ case UNICHROME_K400:
+ value = cle266_encode_pll(pll_value[i].cle266_pll);
+ break;
+
+ case UNICHROME_K800:
+ case UNICHROME_PM800:
+ case UNICHROME_CN700:
+ value = k800_encode_pll(pll_value[i].k800_pll);
+ break;
+
+ case UNICHROME_CX700:
+ case UNICHROME_CN750:
+ case UNICHROME_K8M890:
+ case UNICHROME_P4M890:
+ case UNICHROME_P4M900:
+ case UNICHROME_VX800:
+ value = k800_encode_pll(pll_value[i].cx700_pll);
+ break;
+
+ case UNICHROME_VX855:
+ value = vx855_encode_pll(pll_value[i].vx855_pll);
+ break;
}
}
- DEBUG_MSG(KERN_INFO "Can't find match PLL value\n\n");
- return 0;
+ return value;
}
/* Set VCLK*/
-void viafb_set_vclock(u32 CLK, int set_iga)
+void viafb_set_vclock(u32 clk, int set_iga)
{
/* H.W. Reset : ON */
viafb_write_reg_mask(CR17, VIACR, 0x00, BIT7);
@@ -1403,26 +1631,23 @@ void viafb_set_vclock(u32 CLK, int set_iga)
switch (viaparinfo->chip_info->gfx_chip_name) {
case UNICHROME_CLE266:
case UNICHROME_K400:
- viafb_write_reg(SR46, VIASR, CLK / 0x100);
- viafb_write_reg(SR47, VIASR, CLK % 0x100);
+ via_write_reg(VIASR, SR46, (clk & 0x00FF));
+ via_write_reg(VIASR, SR47, (clk & 0xFF00) >> 8);
break;
case UNICHROME_K800:
case UNICHROME_PM800:
case UNICHROME_CN700:
case UNICHROME_CX700:
+ case UNICHROME_CN750:
case UNICHROME_K8M890:
case UNICHROME_P4M890:
case UNICHROME_P4M900:
case UNICHROME_VX800:
case UNICHROME_VX855:
- viafb_write_reg(SR44, VIASR, CLK / 0x10000);
- DEBUG_MSG(KERN_INFO "\nSR44=%x", CLK / 0x10000);
- viafb_write_reg(SR45, VIASR, (CLK & 0xFFFF) / 0x100);
- DEBUG_MSG(KERN_INFO "\nSR45=%x",
- (CLK & 0xFFFF) / 0x100);
- viafb_write_reg(SR46, VIASR, CLK % 0x100);
- DEBUG_MSG(KERN_INFO "\nSR46=%x", CLK % 0x100);
+ via_write_reg(VIASR, SR44, (clk & 0x0000FF));
+ via_write_reg(VIASR, SR45, (clk & 0x00FF00) >> 8);
+ via_write_reg(VIASR, SR46, (clk & 0xFF0000) >> 16);
break;
}
}
@@ -1432,22 +1657,23 @@ void viafb_set_vclock(u32 CLK, int set_iga)
switch (viaparinfo->chip_info->gfx_chip_name) {
case UNICHROME_CLE266:
case UNICHROME_K400:
- viafb_write_reg(SR44, VIASR, CLK / 0x100);
- viafb_write_reg(SR45, VIASR, CLK % 0x100);
+ via_write_reg(VIASR, SR44, (clk & 0x00FF));
+ via_write_reg(VIASR, SR45, (clk & 0xFF00) >> 8);
break;
case UNICHROME_K800:
case UNICHROME_PM800:
case UNICHROME_CN700:
case UNICHROME_CX700:
+ case UNICHROME_CN750:
case UNICHROME_K8M890:
case UNICHROME_P4M890:
case UNICHROME_P4M900:
case UNICHROME_VX800:
case UNICHROME_VX855:
- viafb_write_reg(SR4A, VIASR, CLK / 0x10000);
- viafb_write_reg(SR4B, VIASR, (CLK & 0xFFFF) / 0x100);
- viafb_write_reg(SR4C, VIASR, CLK % 0x100);
+ via_write_reg(VIASR, SR4A, (clk & 0x0000FF));
+ via_write_reg(VIASR, SR4B, (clk & 0x00FF00) >> 8);
+ via_write_reg(VIASR, SR4C, (clk & 0xFF0000) >> 16);
break;
}
}
@@ -1791,8 +2017,6 @@ void viafb_init_chip_info(int chip_type)
viafb_set_iga_path();
viaparinfo->lvds_setting_info->display_method = viafb_lcd_dsp_method;
- viaparinfo->lvds_setting_info->get_lcd_size_method =
- GET_LCD_SIZE_BY_USER_SETTING;
viaparinfo->lvds_setting_info->lcd_mode = viafb_lcd_mode;
viaparinfo->lvds_setting_info2->display_method =
viaparinfo->lvds_setting_info->display_method;
@@ -1946,13 +2170,6 @@ static void init_tmds_chip_info(void)
static void init_lvds_chip_info(void)
{
- if (viafb_lcd_panel_id > LCD_PANEL_ID_MAXIMUM)
- viaparinfo->lvds_setting_info->get_lcd_size_method =
- GET_LCD_SIZE_BY_VGA_BIOS;
- else
- viaparinfo->lvds_setting_info->get_lcd_size_method =
- GET_LCD_SIZE_BY_USER_SETTING;
-
viafb_lvds_trasmitter_identify();
viafb_init_lcd_size();
viafb_init_lvds_output_interface(&viaparinfo->chip_info->lvds_chip_info,
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index a109de37981..c4439989529 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -700,12 +700,18 @@ struct _lcd_scaling_factor {
struct _lcd_ver_scaling_factor lcd_ver_scaling_factor;
};
+struct pll_config {
+ u16 multiplier;
+ u8 divisor;
+ u8 rshift;
+};
+
struct pll_map {
u32 clk;
- u32 cle266_pll;
- u32 k800_pll;
- u32 cx700_pll;
- u32 vx855_pll;
+ struct pll_config cle266_pll;
+ struct pll_config k800_pll;
+ struct pll_config cx700_pll;
+ struct pll_config vx855_pll;
};
struct rgbLUT {
diff --git a/drivers/video/via/ioctl.c b/drivers/video/via/ioctl.c
index da03c074e32..4d553d0b8d7 100644
--- a/drivers/video/via/ioctl.c
+++ b/drivers/video/via/ioctl.c
@@ -25,6 +25,8 @@ int viafb_ioctl_get_viafb_info(u_long arg)
{
struct viafb_ioctl_info viainfo;
+ memset(&viainfo, 0, sizeof(struct viafb_ioctl_info));
+
viainfo.viafb_id = VIAID;
viainfo.vendor_id = PCI_VIA_VENDOR_ID;
diff --git a/drivers/video/via/ioctl.h b/drivers/video/via/ioctl.h
index c430fa23008..6010d10b59e 100644
--- a/drivers/video/via/ioctl.h
+++ b/drivers/video/via/ioctl.h
@@ -35,11 +35,9 @@
#define VIAFB_GET_SAMM_INFO 0x56494107 /* 'VIA\07' */
#define VIAFB_TURN_ON_OUTPUT_DEVICE 0x56494108 /* 'VIA\08' */
#define VIAFB_TURN_OFF_OUTPUT_DEVICE 0x56494109 /* 'VIA\09' */
-#define VIAFB_SET_DEVICE 0x5649410A
#define VIAFB_GET_DEVICE 0x5649410B
#define VIAFB_GET_DRIVER_VERSION 0x56494112 /* 'VIA\12' */
#define VIAFB_GET_CHIP_INFO 0x56494113 /* 'VIA\13' */
-#define VIAFB_SET_DEVICE_INFO 0x56494114
#define VIAFB_GET_DEVICE_INFO 0x56494115
#define VIAFB_GET_DEVICE_SUPPORT 0x56494118
@@ -50,7 +48,6 @@
#define VIAFB_GET_GAMMA_LUT 0x56494124
#define VIAFB_SET_GAMMA_LUT 0x56494125
#define VIAFB_GET_GAMMA_SUPPORT_STATE 0x56494126
-#define VIAFB_SET_SECOND_MODE 0x56494129
#define VIAFB_SYNC_SURFACE 0x56494130
#define VIAFB_GET_DRIVER_CAPS 0x56494131
#define VIAFB_GET_IGA_SCALING_INFO 0x56494132
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 2ab0f156439..fc25ae30c5f 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -75,8 +75,6 @@ static void check_diport_of_integrated_lvds(
static struct display_timing lcd_centering_timging(struct display_timing
mode_crt_reg,
struct display_timing panel_crt_reg);
-static void viafb_load_scaling_factor_for_p4m900(int set_hres,
- int set_vres, int panel_hres, int panel_vres);
static int check_lvds_chip(int device_id_subaddr, int device_id)
{
@@ -89,33 +87,8 @@ static int check_lvds_chip(int device_id_subaddr, int device_id)
void viafb_init_lcd_size(void)
{
DEBUG_MSG(KERN_INFO "viafb_init_lcd_size()\n");
- DEBUG_MSG(KERN_INFO
- "viaparinfo->lvds_setting_info->get_lcd_size_method %d\n",
- viaparinfo->lvds_setting_info->get_lcd_size_method);
- switch (viaparinfo->lvds_setting_info->get_lcd_size_method) {
- case GET_LCD_SIZE_BY_SYSTEM_BIOS:
- break;
- case GET_LCD_SZIE_BY_HW_STRAPPING:
- break;
- case GET_LCD_SIZE_BY_VGA_BIOS:
- DEBUG_MSG(KERN_INFO "Get LCD Size method by VGA BIOS !!\n");
- fp_id_to_vindex(viafb_lcd_panel_id);
- DEBUG_MSG(KERN_INFO "LCD Panel_ID = %d\n",
- viaparinfo->lvds_setting_info->lcd_panel_id);
- break;
- case GET_LCD_SIZE_BY_USER_SETTING:
- DEBUG_MSG(KERN_INFO "Get LCD Size method by user setting !!\n");
- fp_id_to_vindex(viafb_lcd_panel_id);
- DEBUG_MSG(KERN_INFO "LCD Panel_ID = %d\n",
- viaparinfo->lvds_setting_info->lcd_panel_id);
- break;
- default:
- DEBUG_MSG(KERN_INFO "viafb_init_lcd_size fail\n");
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID1_800X600;
- fp_id_to_vindex(LCD_PANEL_ID1_800X600);
- }
+ fp_id_to_vindex(viafb_lcd_panel_id);
viaparinfo->lvds_setting_info2->lcd_panel_id =
viaparinfo->lvds_setting_info->lcd_panel_id;
viaparinfo->lvds_setting_info2->lcd_panel_hres =
@@ -437,14 +410,9 @@ static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres,
/* LCD Scaling Enable */
viafb_write_reg_mask(CR79, VIACR, 0x07, BIT0 + BIT1 + BIT2);
- if (UNICHROME_P4M900 == viaparinfo->chip_info->gfx_chip_name) {
- viafb_load_scaling_factor_for_p4m900(set_hres, set_vres,
- panel_hres, panel_vres);
- return;
- }
/* Check if expansion for horizontal */
- if (set_hres != panel_hres) {
+ if (set_hres < panel_hres) {
/* Load Horizontal Scaling Factor */
switch (viaparinfo->chip_info->gfx_chip_name) {
case UNICHROME_CLE266:
@@ -464,6 +432,10 @@ static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres,
case UNICHROME_CX700:
case UNICHROME_K8M890:
case UNICHROME_P4M890:
+ case UNICHROME_P4M900:
+ case UNICHROME_CN750:
+ case UNICHROME_VX800:
+ case UNICHROME_VX855:
reg_value =
K800_LCD_HOR_SCF_FORMULA(set_hres, panel_hres);
/* Horizontal scaling enabled */
@@ -483,7 +455,7 @@ static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres,
}
/* Check if expansion for vertical */
- if (set_vres != panel_vres) {
+ if (set_vres < panel_vres) {
/* Load Vertical Scaling Factor */
switch (viaparinfo->chip_info->gfx_chip_name) {
case UNICHROME_CLE266:
@@ -503,6 +475,10 @@ static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres,
case UNICHROME_CX700:
case UNICHROME_K8M890:
case UNICHROME_P4M890:
+ case UNICHROME_P4M900:
+ case UNICHROME_CN750:
+ case UNICHROME_VX800:
+ case UNICHROME_VX855:
reg_value =
K800_LCD_VER_SCF_FORMULA(set_vres, panel_vres);
/* Vertical scaling enabled */
@@ -648,9 +624,8 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
(mode_crt_reg, panel_crt_reg), IGA1);
} else {
/* Expansion */
- if ((plvds_setting_info->display_method ==
- LCD_EXPANDSION) & ((set_hres != panel_hres)
- || (set_vres != panel_vres))) {
+ if (plvds_setting_info->display_method == LCD_EXPANDSION
+ && (set_hres < panel_hres || set_vres < panel_vres)) {
/* expansion timing IGA2 loaded panel set timing*/
viafb_load_crtc_timing(panel_crt_reg, IGA2);
DEBUG_MSG(KERN_INFO "viafb_load_crtc_timing!!\n");
@@ -1139,69 +1114,3 @@ bool viafb_lcd_get_mobile_state(bool *mobile)
return false;
}
}
-
-static void viafb_load_scaling_factor_for_p4m900(int set_hres,
- int set_vres, int panel_hres, int panel_vres)
-{
- int h_scaling_factor;
- int v_scaling_factor;
- u8 cra2 = 0;
- u8 cr77 = 0;
- u8 cr78 = 0;
- u8 cr79 = 0;
- u8 cr9f = 0;
- /* Check if expansion for horizontal */
- if (set_hres < panel_hres) {
- /* Load Horizontal Scaling Factor */
-
- /* For VIA_K8M800 or later chipsets. */
- h_scaling_factor =
- K800_LCD_HOR_SCF_FORMULA(set_hres, panel_hres);
- /* HSCaleFactor[1:0] at CR9F[1:0] */
- cr9f = h_scaling_factor & 0x0003;
- /* HSCaleFactor[9:2] at CR77[7:0] */
- cr77 = (h_scaling_factor & 0x03FC) >> 2;
- /* HSCaleFactor[11:10] at CR79[5:4] */
- cr79 = (h_scaling_factor & 0x0C00) >> 10;
- cr79 <<= 4;
-
- /* Horizontal scaling enabled */
- cra2 = 0xC0;
-
- DEBUG_MSG(KERN_INFO "Horizontal Scaling value = %d\n",
- h_scaling_factor);
- } else {
- /* Horizontal scaling disabled */
- cra2 = 0x00;
- }
-
- /* Check if expansion for vertical */
- if (set_vres < panel_vres) {
- /* Load Vertical Scaling Factor */
-
- /* For VIA_K8M800 or later chipsets. */
- v_scaling_factor =
- K800_LCD_VER_SCF_FORMULA(set_vres, panel_vres);
-
- /* Vertical scaling enabled */
- cra2 |= 0x08;
- /* VSCaleFactor[0] at CR79[3] */
- cr79 |= ((v_scaling_factor & 0x0001) << 3);
- /* VSCaleFactor[8:1] at CR78[7:0] */
- cr78 |= (v_scaling_factor & 0x01FE) >> 1;
- /* VSCaleFactor[10:9] at CR79[7:6] */
- cr79 |= ((v_scaling_factor & 0x0600) >> 9) << 6;
-
- DEBUG_MSG(KERN_INFO "Vertical Scaling value = %d\n",
- v_scaling_factor);
- } else {
- /* Vertical scaling disabled */
- cra2 |= 0x00;
- }
-
- viafb_write_reg_mask(CRA2, VIACR, cra2, BIT3 + BIT6 + BIT7);
- viafb_write_reg_mask(CR77, VIACR, cr77, 0xFF);
- viafb_write_reg_mask(CR78, VIACR, cr78, 0xFF);
- viafb_write_reg_mask(CR79, VIACR, cr79, 0xF8);
- viafb_write_reg_mask(CR9F, VIACR, cr9f, BIT0 + BIT1);
-}
diff --git a/drivers/video/via/lcd.h b/drivers/video/via/lcd.h
index 9762ec62b49..b348efc360b 100644
--- a/drivers/video/via/lcd.h
+++ b/drivers/video/via/lcd.h
@@ -28,11 +28,6 @@
#define VT3271_DEVICE_ID_REG 0x02
#define VT3271_DEVICE_ID 0x71
-#define GET_LCD_SIZE_BY_SYSTEM_BIOS 0x01
-#define GET_LCD_SIZE_BY_VGA_BIOS 0x02
-#define GET_LCD_SZIE_BY_HW_STRAPPING 0x03
-#define GET_LCD_SIZE_BY_USER_SETTING 0x04
-
/* Definition DVI Panel ID*/
/* Resolution: 640x480, Channel: single, Dithering: Enable */
#define LCD_PANEL_ID0_640X480 0x00
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 7f0de7f006a..2cbe1031b42 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -631,7 +631,6 @@
#define CLK_25_175M 25175000
#define CLK_26_880M 26880000
#define CLK_29_581M 29581000
-#define CLK_31_490M 31490000
#define CLK_31_500M 31500000
#define CLK_31_728M 31728000
#define CLK_32_668M 32688000
@@ -676,7 +675,6 @@
#define CLK_119_000M 119000000
#define CLK_121_750M 121750000 /* 121.704MHz */
#define CLK_125_104M 125104000
-#define CLK_133_308M 133308000
#define CLK_135_000M 135000000
#define CLK_136_700M 136700000
#define CLK_138_400M 138400000
@@ -699,313 +697,6 @@
#define CLK_172_798M 172798000
#define CLK_122_614M 122614000
-/* CLE266 PLL value
-*/
-#define CLE266_PLL_25_175M 0x0000C763
-#define CLE266_PLL_26_880M 0x0000440F
-#define CLE266_PLL_29_581M 0x00008421
-#define CLE266_PLL_31_490M 0x00004721
-#define CLE266_PLL_31_500M 0x0000C3B5
-#define CLE266_PLL_31_728M 0x0000471F
-#define CLE266_PLL_32_668M 0x0000C449
-#define CLE266_PLL_36_000M 0x0000C5E5
-#define CLE266_PLL_40_000M 0x0000C459
-#define CLE266_PLL_41_291M 0x00004417
-#define CLE266_PLL_43_163M 0x0000C579
-#define CLE266_PLL_45_250M 0x0000C57F /* 45.46MHz */
-#define CLE266_PLL_46_000M 0x0000875A
-#define CLE266_PLL_46_996M 0x0000C4E9
-#define CLE266_PLL_48_000M 0x00001443
-#define CLE266_PLL_48_875M 0x00001D63
-#define CLE266_PLL_49_500M 0x00008653
-#define CLE266_PLL_52_406M 0x0000C475
-#define CLE266_PLL_52_977M 0x00004525
-#define CLE266_PLL_56_250M 0x000047B7
-#define CLE266_PLL_60_466M 0x0000494C
-#define CLE266_PLL_61_500M 0x00001456
-#define CLE266_PLL_65_000M 0x000086ED
-#define CLE266_PLL_65_178M 0x0000855B
-#define CLE266_PLL_66_750M 0x0000844B /* 67.116MHz */
-#define CLE266_PLL_68_179M 0x00000413
-#define CLE266_PLL_69_924M 0x00001153
-#define CLE266_PLL_70_159M 0x00001462
-#define CLE266_PLL_72_000M 0x00001879
-#define CLE266_PLL_74_270M 0x00004853
-#define CLE266_PLL_78_750M 0x00004321
-#define CLE266_PLL_80_136M 0x0000051C
-#define CLE266_PLL_83_375M 0x0000C25D
-#define CLE266_PLL_83_950M 0x00000729
-#define CLE266_PLL_84_750M 0x00008576 /* 84.537MHz */
-#define CLE266_PLL_85_860M 0x00004754
-#define CLE266_PLL_88_750M 0x0000051F
-#define CLE266_PLL_94_500M 0x00000521
-#define CLE266_PLL_97_750M 0x00004652
-#define CLE266_PLL_101_000M 0x0000497F
-#define CLE266_PLL_106_500M 0x00008477 /* 106.491463 MHz */
-#define CLE266_PLL_108_000M 0x00008479
-#define CLE266_PLL_113_309M 0x00000C5F
-#define CLE266_PLL_118_840M 0x00004553
-#define CLE266_PLL_119_000M 0x00000D6C
-#define CLE266_PLL_121_750M 0x00004555 /* 121.704MHz */
-#define CLE266_PLL_125_104M 0x000006B5
-#define CLE266_PLL_133_308M 0x0000465F
-#define CLE266_PLL_135_000M 0x0000455E
-#define CLE266_PLL_136_700M 0x00000C73
-#define CLE266_PLL_138_400M 0x00000957
-#define CLE266_PLL_146_760M 0x00004567
-#define CLE266_PLL_148_500M 0x00000853
-#define CLE266_PLL_153_920M 0x00000856
-#define CLE266_PLL_156_000M 0x0000456D
-#define CLE266_PLL_157_500M 0x000005B7
-#define CLE266_PLL_162_000M 0x00004571
-#define CLE266_PLL_187_000M 0x00000976
-#define CLE266_PLL_193_295M 0x0000086C
-#define CLE266_PLL_202_500M 0x00000763
-#define CLE266_PLL_204_000M 0x00000764
-#define CLE266_PLL_218_500M 0x0000065C
-#define CLE266_PLL_234_000M 0x00000662
-#define CLE266_PLL_267_250M 0x00000670
-#define CLE266_PLL_297_500M 0x000005E6
-#define CLE266_PLL_74_481M 0x0000051A
-#define CLE266_PLL_172_798M 0x00004579
-#define CLE266_PLL_122_614M 0x0000073C
-
-/* K800 PLL value
-*/
-#define K800_PLL_25_175M 0x00539001
-#define K800_PLL_26_880M 0x001C8C80
-#define K800_PLL_29_581M 0x00409080
-#define K800_PLL_31_490M 0x006F9001
-#define K800_PLL_31_500M 0x008B9002
-#define K800_PLL_31_728M 0x00AF9003
-#define K800_PLL_32_668M 0x00909002
-#define K800_PLL_36_000M 0x009F9002
-#define K800_PLL_40_000M 0x00578C02
-#define K800_PLL_41_291M 0x00438C01
-#define K800_PLL_43_163M 0x00778C03
-#define K800_PLL_45_250M 0x007D8C83 /* 45.46MHz */
-#define K800_PLL_46_000M 0x00658C02
-#define K800_PLL_46_996M 0x00818C83
-#define K800_PLL_48_000M 0x00848C83
-#define K800_PLL_48_875M 0x00508C81
-#define K800_PLL_49_500M 0x00518C01
-#define K800_PLL_52_406M 0x00738C02
-#define K800_PLL_52_977M 0x00928C83
-#define K800_PLL_56_250M 0x007C8C02
-#define K800_PLL_60_466M 0x00A78C83
-#define K800_PLL_61_500M 0x00AA8C83
-#define K800_PLL_65_000M 0x006B8C01
-#define K800_PLL_65_178M 0x00B48C83
-#define K800_PLL_66_750M 0x00948C82 /* 67.116MHz */
-#define K800_PLL_68_179M 0x00708C01
-#define K800_PLL_69_924M 0x00C18C83
-#define K800_PLL_70_159M 0x00C28C83
-#define K800_PLL_72_000M 0x009F8C82
-#define K800_PLL_74_270M 0x00ce0c03
-#define K800_PLL_78_750M 0x00408801
-#define K800_PLL_80_136M 0x00428801
-#define K800_PLL_83_375M 0x005B0882
-#define K800_PLL_83_950M 0x00738803
-#define K800_PLL_84_750M 0x00748883 /* 84.477MHz */
-#define K800_PLL_85_860M 0x00768883
-#define K800_PLL_88_750M 0x007A8883
-#define K800_PLL_94_500M 0x00828803
-#define K800_PLL_97_750M 0x00878883
-#define K800_PLL_101_000M 0x008B8883
-#define K800_PLL_106_500M 0x00758882 /* 106.491463 MHz */
-#define K800_PLL_108_000M 0x00778882
-#define K800_PLL_113_309M 0x005D8881
-#define K800_PLL_118_840M 0x00A48883
-#define K800_PLL_119_000M 0x00838882
-#define K800_PLL_121_750M 0x00A88883 /* 121.704MHz */
-#define K800_PLL_125_104M 0x00688801
-#define K800_PLL_133_308M 0x005D8801
-#define K800_PLL_135_000M 0x001A4081
-#define K800_PLL_136_700M 0x00BD8883
-#define K800_PLL_138_400M 0x00728881
-#define K800_PLL_146_760M 0x00CC8883
-#define K800_PLL_148_500M 0x00ce0803
-#define K800_PLL_153_920M 0x00548482
-#define K800_PLL_156_000M 0x006B8483
-#define K800_PLL_157_500M 0x00142080
-#define K800_PLL_162_000M 0x006F8483
-#define K800_PLL_187_000M 0x00818483
-#define K800_PLL_193_295M 0x004F8481
-#define K800_PLL_202_500M 0x00538481
-#define K800_PLL_204_000M 0x008D8483
-#define K800_PLL_218_500M 0x00978483
-#define K800_PLL_234_000M 0x00608401
-#define K800_PLL_267_250M 0x006E8481
-#define K800_PLL_297_500M 0x00A48402
-#define K800_PLL_74_481M 0x007B8C81
-#define K800_PLL_172_798M 0x00778483
-#define K800_PLL_122_614M 0x00878882
-
-/* PLL for VT3324 */
-#define CX700_25_175M 0x008B1003
-#define CX700_26_719M 0x00931003
-#define CX700_26_880M 0x00941003
-#define CX700_29_581M 0x00A49003
-#define CX700_31_490M 0x00AE1003
-#define CX700_31_500M 0x00AE1003
-#define CX700_31_728M 0x00AF1003
-#define CX700_32_668M 0x00B51003
-#define CX700_36_000M 0x00C81003
-#define CX700_40_000M 0x006E0C03
-#define CX700_41_291M 0x00710C03
-#define CX700_43_163M 0x00770C03
-#define CX700_45_250M 0x007D0C03 /* 45.46MHz */
-#define CX700_46_000M 0x007F0C03
-#define CX700_46_996M 0x00818C83
-#define CX700_48_000M 0x00840C03
-#define CX700_48_875M 0x00508C81
-#define CX700_49_500M 0x00880C03
-#define CX700_52_406M 0x00730C02
-#define CX700_52_977M 0x00920C03
-#define CX700_56_250M 0x009B0C03
-#define CX700_60_466M 0x00460C00
-#define CX700_61_500M 0x00AA0C03
-#define CX700_65_000M 0x006B0C01
-#define CX700_65_178M 0x006B0C01
-#define CX700_66_750M 0x00940C02 /*67.116MHz */
-#define CX700_68_179M 0x00BC0C03
-#define CX700_69_924M 0x00C10C03
-#define CX700_70_159M 0x00C20C03
-#define CX700_72_000M 0x009F0C02
-#define CX700_74_270M 0x00CE0C03
-#define CX700_74_481M 0x00CE0C03
-#define CX700_78_750M 0x006C0803
-#define CX700_80_136M 0x006E0803
-#define CX700_83_375M 0x005B0882
-#define CX700_83_950M 0x00730803
-#define CX700_84_750M 0x00740803 /* 84.537Mhz */
-#define CX700_85_860M 0x00760803
-#define CX700_88_750M 0x00AC8885
-#define CX700_94_500M 0x00820803
-#define CX700_97_750M 0x00870803
-#define CX700_101_000M 0x008B0803
-#define CX700_106_500M 0x00750802
-#define CX700_108_000M 0x00950803
-#define CX700_113_309M 0x005D0801
-#define CX700_118_840M 0x00A40803
-#define CX700_119_000M 0x00830802
-#define CX700_121_750M 0x00420800 /* 121.704MHz */
-#define CX700_125_104M 0x00AD0803
-#define CX700_133_308M 0x00930802
-#define CX700_135_000M 0x00950802
-#define CX700_136_700M 0x00BD0803
-#define CX700_138_400M 0x00720801
-#define CX700_146_760M 0x00CC0803
-#define CX700_148_500M 0x00a40802
-#define CX700_153_920M 0x00540402
-#define CX700_156_000M 0x006B0403
-#define CX700_157_500M 0x006C0403
-#define CX700_162_000M 0x006F0403
-#define CX700_172_798M 0x00770403
-#define CX700_187_000M 0x00810403
-#define CX700_193_295M 0x00850403
-#define CX700_202_500M 0x008C0403
-#define CX700_204_000M 0x008D0403
-#define CX700_218_500M 0x00970403
-#define CX700_234_000M 0x00600401
-#define CX700_267_250M 0x00B90403
-#define CX700_297_500M 0x00CE0403
-#define CX700_122_614M 0x00870802
-
-/* PLL for VX855 */
-#define VX855_22_000M 0x007B1005
-#define VX855_25_175M 0x008D1005
-#define VX855_26_719M 0x00961005
-#define VX855_26_880M 0x00961005
-#define VX855_27_000M 0x00971005
-#define VX855_29_581M 0x00A51005
-#define VX855_29_829M 0x00641003
-#define VX855_31_490M 0x00B01005
-#define VX855_31_500M 0x00B01005
-#define VX855_31_728M 0x008E1004
-#define VX855_32_668M 0x00921004
-#define VX855_36_000M 0x00A11004
-#define VX855_40_000M 0x00700C05
-#define VX855_41_291M 0x00730C05
-#define VX855_43_163M 0x00790C05
-#define VX855_45_250M 0x007F0C05 /* 45.46MHz */
-#define VX855_46_000M 0x00670C04
-#define VX855_46_996M 0x00690C04
-#define VX855_48_000M 0x00860C05
-#define VX855_48_875M 0x00890C05
-#define VX855_49_500M 0x00530C03
-#define VX855_52_406M 0x00580C03
-#define VX855_52_977M 0x00940C05
-#define VX855_56_250M 0x009D0C05
-#define VX855_57_275M 0x009D8C85 /* Used by XO panel */
-#define VX855_60_466M 0x00A90C05
-#define VX855_61_500M 0x00AC0C05
-#define VX855_65_000M 0x006D0C03
-#define VX855_65_178M 0x00B60C05
-#define VX855_66_750M 0x00700C03 /*67.116MHz */
-#define VX855_67_295M 0x00BC0C05
-#define VX855_68_179M 0x00BF0C05
-#define VX855_68_369M 0x00BF0C05
-#define VX855_69_924M 0x00C30C05
-#define VX855_70_159M 0x00C30C05
-#define VX855_72_000M 0x00A10C04
-#define VX855_73_023M 0x00CC0C05
-#define VX855_74_481M 0x00D10C05
-#define VX855_78_750M 0x006E0805
-#define VX855_79_466M 0x006F0805
-#define VX855_80_136M 0x00700805
-#define VX855_81_627M 0x00720805
-#define VX855_83_375M 0x00750805
-#define VX855_83_527M 0x00750805
-#define VX855_83_950M 0x00750805
-#define VX855_84_537M 0x00760805
-#define VX855_84_750M 0x00760805 /* 84.537Mhz */
-#define VX855_85_500M 0x00760805 /* 85.909080 MHz*/
-#define VX855_85_860M 0x00760805
-#define VX855_85_909M 0x00760805
-#define VX855_88_750M 0x007C0805
-#define VX855_89_489M 0x007D0805
-#define VX855_94_500M 0x00840805
-#define VX855_96_648M 0x00870805
-#define VX855_97_750M 0x00890805
-#define VX855_101_000M 0x008D0805
-#define VX855_106_500M 0x00950805
-#define VX855_108_000M 0x00970805
-#define VX855_110_125M 0x00990805
-#define VX855_112_000M 0x009D0805
-#define VX855_113_309M 0x009F0805
-#define VX855_115_000M 0x00A10805
-#define VX855_118_840M 0x00A60805
-#define VX855_119_000M 0x00A70805
-#define VX855_121_750M 0x00AA0805 /* 121.704MHz */
-#define VX855_122_614M 0x00AC0805
-#define VX855_126_266M 0x00B10805
-#define VX855_130_250M 0x00B60805 /* 130.250 */
-#define VX855_135_000M 0x00BD0805
-#define VX855_136_700M 0x00BF0805
-#define VX855_137_750M 0x00C10805
-#define VX855_138_400M 0x00C20805
-#define VX855_144_300M 0x00CA0805
-#define VX855_146_760M 0x00CE0805
-#define VX855_148_500M 0x00D00805
-#define VX855_153_920M 0x00540402
-#define VX855_156_000M 0x006C0405
-#define VX855_156_867M 0x006E0405
-#define VX855_157_500M 0x006E0405
-#define VX855_162_000M 0x00710405
-#define VX855_172_798M 0x00790405
-#define VX855_187_000M 0x00830405
-#define VX855_193_295M 0x00870405
-#define VX855_202_500M 0x008E0405
-#define VX855_204_000M 0x008F0405
-#define VX855_218_500M 0x00990405
-#define VX855_229_500M 0x00A10405
-#define VX855_234_000M 0x00A40405
-#define VX855_267_250M 0x00BB0405
-#define VX855_297_500M 0x00D00405
-#define VX855_339_500M 0x00770005
-#define VX855_340_772M 0x00770005
-
/* Definition CRTC Timing Index */
#define H_TOTAL_INDEX 0
diff --git a/drivers/video/via/via-core.c b/drivers/video/via/via-core.c
index e8cfe839211..66f40303311 100644
--- a/drivers/video/via/via-core.c
+++ b/drivers/video/via/via-core.c
@@ -64,7 +64,7 @@ static inline int viafb_mmio_read(int reg)
*/
static u32 viafb_enabled_ints;
-static void viafb_int_init(void)
+static void __devinit viafb_int_init(void)
{
viafb_enabled_ints = 0;
@@ -489,7 +489,7 @@ out_unmap:
return ret;
}
-static void __devexit via_pci_teardown_mmio(struct viafb_dev *vdev)
+static void via_pci_teardown_mmio(struct viafb_dev *vdev)
{
iounmap(vdev->fbmem);
iounmap(vdev->engine_mmio);
@@ -548,7 +548,7 @@ static int __devinit via_setup_subdevs(struct viafb_dev *vdev)
return 0;
}
-static void __devexit via_teardown_subdevs(void)
+static void via_teardown_subdevs(void)
{
int i;
@@ -613,22 +613,24 @@ static void __devexit via_pci_remove(struct pci_dev *pdev)
static struct pci_device_id via_pci_table[] __devinitdata = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
.driver_data = UNICHROME_CLE266 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
- .driver_data = UNICHROME_PM800 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
.driver_data = UNICHROME_K400 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
.driver_data = UNICHROME_K800 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
+ .driver_data = UNICHROME_PM800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN700_DID),
.driver_data = UNICHROME_CN700 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
- .driver_data = UNICHROME_K8M890 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
.driver_data = UNICHROME_CX700 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
- .driver_data = UNICHROME_P4M900 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
.driver_data = UNICHROME_CN750 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
+ .driver_data = UNICHROME_K8M890 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
+ .driver_data = UNICHROME_P4M890 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
+ .driver_data = UNICHROME_P4M900 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
.driver_data = UNICHROME_VX800 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
diff --git a/drivers/video/via/via-gpio.c b/drivers/video/via/via-gpio.c
index 595516aea69..39acb37e7a1 100644
--- a/drivers/video/via/via-gpio.c
+++ b/drivers/video/via/via-gpio.c
@@ -73,7 +73,7 @@ struct viafb_gpio_cfg {
struct gpio_chip gpio_chip;
struct viafb_dev *vdev;
struct viafb_gpio *active_gpios[VIAFB_NUM_GPIOS];
- char *gpio_names[VIAFB_NUM_GPIOS];
+ const char *gpio_names[VIAFB_NUM_GPIOS];
};
/*
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 1082541358f..bdd0e4130f4 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -49,11 +49,6 @@ char *viafb_active_dev;
char *viafb_lcd_port = "";
char *viafb_dvi_port = "";
-static void viafb_set_device(struct device_t active_dev);
-static int apply_device_setting(struct viafb_ioctl_setting setting_info,
- struct fb_info *info);
-static void apply_second_mode_setting(struct fb_var_screeninfo
- *sec_var);
static void retrieve_device_setting(struct viafb_ioctl_setting
*setting_info);
static int viafb_pan_display(struct fb_var_screeninfo *var,
@@ -221,9 +216,9 @@ static int viafb_check_var(struct fb_var_screeninfo *var,
/* Adjust var according to our driver's own table */
viafb_fill_var_timing_info(var, viafb_refresh, vmode_entry);
- if (info->var.accel_flags & FB_ACCELF_TEXT &&
+ if (var->accel_flags & FB_ACCELF_TEXT &&
!ppar->shared->vdev->engine_mmio)
- info->var.accel_flags = 0;
+ var->accel_flags = 0;
return 0;
}
@@ -234,6 +229,7 @@ static int viafb_set_par(struct fb_info *info)
struct VideoModeTable *vmode_entry, *vmode_entry1 = NULL;
DEBUG_MSG(KERN_INFO "viafb_set_par!\n");
+ viafb_update_fix(info);
viapar->depth = fb_get_color_depth(&info->var, &info->fix);
viafb_update_device_setting(viafbinfo->var.xres, viafbinfo->var.yres,
viafbinfo->var.bits_per_pixel, viafb_refresh, 0);
@@ -257,7 +253,6 @@ static int viafb_set_par(struct fb_info *info)
}
if (vmode_entry) {
- viafb_update_fix(info);
if (viafb_dual_fb && viapar->iga_path == IGA2)
viafb_bpp1 = info->var.bits_per_pixel;
else
@@ -478,13 +473,6 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
if (gpu32 & LCD_Device)
viafb_lcd_disable();
break;
- case VIAFB_SET_DEVICE:
- if (copy_from_user(&u.active_dev, (void *)argp,
- sizeof(u.active_dev)))
- return -EFAULT;
- viafb_set_device(u.active_dev);
- viafb_set_par(info);
- break;
case VIAFB_GET_DEVICE:
u.active_dev.crt = viafb_CRT_ON;
u.active_dev.dvi = viafb_DVI_ON;
@@ -527,21 +515,6 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
break;
- case VIAFB_SET_DEVICE_INFO:
- if (copy_from_user(&u.viafb_setting,
- argp, sizeof(u.viafb_setting)))
- return -EFAULT;
- if (apply_device_setting(u.viafb_setting, info) < 0)
- return -EINVAL;
-
- break;
-
- case VIAFB_SET_SECOND_MODE:
- if (copy_from_user(&u.sec_var, argp, sizeof(u.sec_var)))
- return -EFAULT;
- apply_second_mode_setting(&u.sec_var);
- break;
-
case VIAFB_GET_DEVICE_INFO:
retrieve_device_setting(&u.viafb_setting);
@@ -913,112 +886,6 @@ static int viafb_sync(struct fb_info *info)
return 0;
}
-static void check_available_device_to_enable(int device_id)
-{
- int device_num = 0;
-
- /* Initialize: */
- viafb_CRT_ON = STATE_OFF;
- viafb_DVI_ON = STATE_OFF;
- viafb_LCD_ON = STATE_OFF;
- viafb_LCD2_ON = STATE_OFF;
- viafb_DeviceStatus = None_Device;
-
- if ((device_id & CRT_Device) && (device_num < MAX_ACTIVE_DEV_NUM)) {
- viafb_CRT_ON = STATE_ON;
- device_num++;
- viafb_DeviceStatus |= CRT_Device;
- }
-
- if ((device_id & DVI_Device) && (device_num < MAX_ACTIVE_DEV_NUM)) {
- viafb_DVI_ON = STATE_ON;
- device_num++;
- viafb_DeviceStatus |= DVI_Device;
- }
-
- if ((device_id & LCD_Device) && (device_num < MAX_ACTIVE_DEV_NUM)) {
- viafb_LCD_ON = STATE_ON;
- device_num++;
- viafb_DeviceStatus |= LCD_Device;
- }
-
- if ((device_id & LCD2_Device) && (device_num < MAX_ACTIVE_DEV_NUM)) {
- viafb_LCD2_ON = STATE_ON;
- device_num++;
- viafb_DeviceStatus |= LCD2_Device;
- }
-
- if (viafb_DeviceStatus == None_Device) {
- /* Use CRT as default active device: */
- viafb_CRT_ON = STATE_ON;
- viafb_DeviceStatus = CRT_Device;
- }
- DEBUG_MSG(KERN_INFO "Device Status:%x", viafb_DeviceStatus);
-}
-
-static void viafb_set_device(struct device_t active_dev)
-{
- /* Check available device to enable: */
- int device_id = None_Device;
- if (active_dev.crt)
- device_id |= CRT_Device;
- if (active_dev.dvi)
- device_id |= DVI_Device;
- if (active_dev.lcd)
- device_id |= LCD_Device;
-
- check_available_device_to_enable(device_id);
-
- /* Check property of LCD: */
- if (viafb_LCD_ON) {
- if (active_dev.lcd_dsp_cent) {
- viaparinfo->lvds_setting_info->display_method =
- viafb_lcd_dsp_method = LCD_CENTERING;
- } else {
- viaparinfo->lvds_setting_info->display_method =
- viafb_lcd_dsp_method = LCD_EXPANDSION;
- }
-
- if (active_dev.lcd_mode == LCD_SPWG) {
- viaparinfo->lvds_setting_info->lcd_mode =
- viafb_lcd_mode = LCD_SPWG;
- } else {
- viaparinfo->lvds_setting_info->lcd_mode =
- viafb_lcd_mode = LCD_OPENLDI;
- }
-
- if (active_dev.lcd_panel_id <= LCD_PANEL_ID_MAXIMUM) {
- viafb_lcd_panel_id = active_dev.lcd_panel_id;
- viafb_init_lcd_size();
- }
- }
-
- /* Check property of mode: */
- if (!active_dev.xres1)
- viafb_second_xres = 640;
- else
- viafb_second_xres = active_dev.xres1;
- if (!active_dev.yres1)
- viafb_second_yres = 480;
- else
- viafb_second_yres = active_dev.yres1;
- if (active_dev.bpp != 0)
- viafb_bpp = active_dev.bpp;
- if (active_dev.bpp1 != 0)
- viafb_bpp1 = active_dev.bpp1;
- if (active_dev.refresh != 0)
- viafb_refresh = active_dev.refresh;
- if (active_dev.refresh1 != 0)
- viafb_refresh1 = active_dev.refresh1;
- if ((active_dev.samm == STATE_OFF) || (active_dev.samm == STATE_ON))
- viafb_SAMM_ON = active_dev.samm;
- viafb_primary_dev = active_dev.primary_dev;
-
- via_set_primary_address(0);
- via_set_secondary_address(viafb_SAMM_ON ? viafb_second_offset : 0);
- viafb_set_iga_path();
-}
-
static int get_primary_device(void)
{
int primary_device = 0;
@@ -1060,124 +927,6 @@ static int get_primary_device(void)
return primary_device;
}
-static void apply_second_mode_setting(struct fb_var_screeninfo
- *sec_var)
-{
- u32 htotal, vtotal, long_refresh;
-
- htotal = sec_var->xres + sec_var->left_margin +
- sec_var->right_margin + sec_var->hsync_len;
- vtotal = sec_var->yres + sec_var->upper_margin +
- sec_var->lower_margin + sec_var->vsync_len;
- if ((sec_var->xres_virtual * (sec_var->bits_per_pixel >> 3)) & 0x1F) {
- /*Is 32 bytes alignment? */
- /*32 pixel alignment */
- sec_var->xres_virtual = (sec_var->xres_virtual + 31) & ~31;
- }
-
- htotal = sec_var->xres + sec_var->left_margin +
- sec_var->right_margin + sec_var->hsync_len;
- vtotal = sec_var->yres + sec_var->upper_margin +
- sec_var->lower_margin + sec_var->vsync_len;
- long_refresh = 1000000000UL / sec_var->pixclock * 1000;
- long_refresh /= (htotal * vtotal);
-
- viafb_second_xres = sec_var->xres;
- viafb_second_yres = sec_var->yres;
- viafb_second_virtual_xres = sec_var->xres_virtual;
- viafb_second_virtual_yres = sec_var->yres_virtual;
- viafb_bpp1 = sec_var->bits_per_pixel;
- viafb_refresh1 = viafb_get_refresh(sec_var->xres, sec_var->yres,
- long_refresh);
-}
-
-static int apply_device_setting(struct viafb_ioctl_setting setting_info,
- struct fb_info *info)
-{
- int need_set_mode = 0;
- DEBUG_MSG(KERN_INFO "apply_device_setting\n");
-
- if (setting_info.device_flag) {
- need_set_mode = 1;
- check_available_device_to_enable(setting_info.device_status);
- }
-
- /* Unlock LCD's operation according to LCD flag
- and check if the setting value is valid. */
- /* If the value is valid, apply the new setting value to the device. */
- if (viafb_LCD_ON) {
- if (setting_info.lcd_operation_flag & OP_LCD_CENTERING) {
- need_set_mode = 1;
- if (setting_info.lcd_attributes.display_center) {
- /* Centering */
- viaparinfo->lvds_setting_info->display_method =
- LCD_CENTERING;
- viafb_lcd_dsp_method = LCD_CENTERING;
- viaparinfo->lvds_setting_info2->display_method =
- viafb_lcd_dsp_method = LCD_CENTERING;
- } else {
- /* expandsion */
- viaparinfo->lvds_setting_info->display_method =
- LCD_EXPANDSION;
- viafb_lcd_dsp_method = LCD_EXPANDSION;
- viaparinfo->lvds_setting_info2->display_method =
- LCD_EXPANDSION;
- viafb_lcd_dsp_method = LCD_EXPANDSION;
- }
- }
-
- if (setting_info.lcd_operation_flag & OP_LCD_MODE) {
- need_set_mode = 1;
- if (setting_info.lcd_attributes.lcd_mode ==
- LCD_SPWG) {
- viaparinfo->lvds_setting_info->lcd_mode =
- viafb_lcd_mode = LCD_SPWG;
- } else {
- viaparinfo->lvds_setting_info->lcd_mode =
- viafb_lcd_mode = LCD_OPENLDI;
- }
- viaparinfo->lvds_setting_info2->lcd_mode =
- viaparinfo->lvds_setting_info->lcd_mode;
- }
-
- if (setting_info.lcd_operation_flag & OP_LCD_PANEL_ID) {
- need_set_mode = 1;
- if (setting_info.lcd_attributes.panel_id <=
- LCD_PANEL_ID_MAXIMUM) {
- viafb_lcd_panel_id =
- setting_info.lcd_attributes.panel_id;
- viafb_init_lcd_size();
- }
- }
- }
-
- if (0 != (setting_info.samm_status & OP_SAMM)) {
- setting_info.samm_status =
- setting_info.samm_status & (~OP_SAMM);
- if (setting_info.samm_status == 0
- || setting_info.samm_status == 1) {
- viafb_SAMM_ON = setting_info.samm_status;
-
- if (viafb_SAMM_ON)
- viafb_primary_dev = setting_info.primary_device;
-
- via_set_primary_address(0);
- via_set_secondary_address(viafb_SAMM_ON ?
- viafb_second_offset : 0);
- viafb_set_iga_path();
- }
- need_set_mode = 1;
- }
-
- if (!need_set_mode) {
- ;
- } else {
- viafb_set_iga_path();
- viafb_set_par(info);
- }
- return true;
-}
-
static void retrieve_device_setting(struct viafb_ioctl_setting
*setting_info)
{
@@ -1776,10 +1525,6 @@ int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
parse_lcd_port();
parse_dvi_port();
- /* for dual-fb must viafb_SAMM_ON=1 and viafb_dual_fb=1 */
- if (!viafb_SAMM_ON)
- viafb_dual_fb = 0;
-
viafb_init_chip_info(vdev->chip_type);
/*
* The framebuffer will have been successfully mapped by
@@ -1823,30 +1568,13 @@ int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
parse_mode(viafb_mode1, &viafb_second_xres,
&viafb_second_yres);
- if (0 == viafb_second_virtual_xres) {
- switch (viafb_second_xres) {
- case 1400:
- viafb_second_virtual_xres = 1408;
- break;
- default:
- viafb_second_virtual_xres = viafb_second_xres;
- break;
- }
- }
- if (0 == viafb_second_virtual_yres)
- viafb_second_virtual_yres = viafb_second_yres;
+ viafb_second_virtual_xres = viafb_second_xres;
+ viafb_second_virtual_yres = viafb_second_yres;
}
default_var.xres = default_xres;
default_var.yres = default_yres;
- switch (default_xres) {
- case 1400:
- default_var.xres_virtual = 1408;
- break;
- default:
- default_var.xres_virtual = default_xres;
- break;
- }
+ default_var.xres_virtual = default_xres;
default_var.yres_virtual = default_yres;
default_var.bits_per_pixel = viafb_bpp;
default_var.pixclock =
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index d31dc96f838..85d76ec4c63 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -726,7 +726,9 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
/* Prepare startup mode */
+ kparam_block_sysfs_write(mode_option);
rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
+ kparam_unblock_sysfs_write(mode_option);
if (! ((rc == 1) || (rc == 2))) {
rc = -EINVAL;
dev_err(info->device, "mode %s not found\n", mode_option);
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index e66b8b19ce5..d8b12c32e3e 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -858,9 +858,9 @@ unsigned long w100fb_gpio_read(int port)
void w100fb_gpio_write(int port, unsigned long value)
{
if (port==W100_GPIO_PORT_A)
- value = writel(value, remapped_regs + mmGPIO_DATA);
+ writel(value, remapped_regs + mmGPIO_DATA);
else
- value = writel(value, remapped_regs + mmGPIO_DATA2);
+ writel(value, remapped_regs + mmGPIO_DATA2);
}
EXPORT_SYMBOL(w100fb_gpio_read);
EXPORT_SYMBOL(w100fb_gpio_write);
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index fa97d3e7c21..7c7f42a1279 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -684,7 +684,7 @@ static struct xenbus_driver xenfb_driver = {
static int __init xenfb_init(void)
{
- if (!xen_domain())
+ if (!xen_pv_domain())
return -ENODEV;
/* Nothing to do if running in dom0. */
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index 574dc54e12d..0c9ce88e95e 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -397,7 +397,7 @@ static int xilinxfb_release(struct device *dev)
*/
static int __devinit
-xilinxfb_of_probe(struct of_device *op, const struct of_device_id *match)
+xilinxfb_of_probe(struct platform_device *op, const struct of_device_id *match)
{
const u32 *prop;
u32 *p;
@@ -477,7 +477,7 @@ xilinxfb_of_probe(struct of_device *op, const struct of_device_id *match)
return -ENODEV;
}
-static int __devexit xilinxfb_of_remove(struct of_device *op)
+static int __devexit xilinxfb_of_remove(struct platform_device *op)
{
return xilinxfb_release(&op->dev);
}
@@ -485,6 +485,8 @@ static int __devexit xilinxfb_of_remove(struct of_device *op)
/* Match table for of_platform binding */
static struct of_device_id xilinxfb_of_match[] __devinitdata = {
{ .compatible = "xlnx,xps-tft-1.00.a", },
+ { .compatible = "xlnx,xps-tft-2.00.a", },
+ { .compatible = "xlnx,xps-tft-2.01.a", },
{ .compatible = "xlnx,plb-tft-cntlr-ref-1.00.a", },
{ .compatible = "xlnx,plb-dvi-cntlr-ref-1.00.c", },
{},
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index afcfacc9bbe..24efd8ea41b 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -73,6 +73,13 @@ config WM8350_WATCHDOG
# ARM Architecture
+config ARM_SP805_WATCHDOG
+ tristate "ARM SP805 Watchdog"
+ depends on ARM_AMBA
+ help
+ ARM Primecell SP805 Watchdog timer. This will reboot your system when
+ the timeout is reached.
+
config AT91RM9200_WATCHDOG
tristate "AT91RM9200 watchdog"
depends on ARCH_AT91RM9200
@@ -206,11 +213,11 @@ config OMAP_WATCHDOG
here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
config PNX4008_WATCHDOG
- tristate "PNX4008 Watchdog"
- depends on ARCH_PNX4008
+ tristate "PNX4008 and LPC32XX Watchdog"
+ depends on ARCH_PNX4008 || ARCH_LPC32XX
help
Say Y here if to include support for the watchdog timer
- in the PNX4008 processor.
+ in the PNX4008 or LPC32XX processor.
This driver can be built as a module by choosing M. The module
will be called pnx4008_wdt.
@@ -401,6 +408,17 @@ config ALIM7101_WDT
Most people will say N.
+config F71808E_WDT
+ tristate "Fintek F71808E and F71882FG Watchdog"
+ depends on X86 && EXPERIMENTAL
+ help
+ This is the driver for the hardware watchdog on the Fintek
+ F71808E and F71882FG Super I/O controllers.
+
+ You can compile this driver directly into the kernel, or use
+ it as a module. The module will be called f71808e_wdt.
+
+
config GEODE_WDT
tristate "AMD Geode CS5535/CS5536 Watchdog"
depends on CS5535_MFGPT
@@ -556,16 +574,21 @@ config IT87_WDT
be called it87_wdt.
config HP_WATCHDOG
- tristate "HP Proliant iLO 2 Hardware Watchdog Timer"
+ tristate "HP Proliant iLO2+ Hardware Watchdog Timer"
depends on X86
help
A software monitoring watchdog and NMI sourcing driver. This driver
- will detect lockups and provide stack trace. Also, when an NMI
- occurs this driver will make the necessary BIOS calls to log
- the cause of the NMI. This is a driver that will only load on a
- HP ProLiant system with a minimum of iLO2 support.
- To compile this driver as a module, choose M here: the
- module will be called hpwdt.
+ will detect lockups and provide a stack trace. This is a driver that
+ will only load on a HP ProLiant system with a minimum of iLO2 support.
+ To compile this driver as a module, choose M here: the module will be
+ called hpwdt.
+
+config HPWDT_NMI_DECODING
+ bool "NMI decoding support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
+ depends on HP_WATCHDOG
+ help
+ When an NMI occurs this feature will make the necessary BIOS calls to
+ log the cause of the NMI.
config SC1200_WDT
tristate "National Semiconductor PC87307/PC97307 (ala SC1200) Watchdog"
@@ -875,6 +898,24 @@ config TXX9_WDT
help
Hardware driver for the built-in watchdog timer on TXx9 MIPS SoCs.
+config OCTEON_WDT
+ tristate "Cavium OCTEON SOC family Watchdog Timer"
+ depends on CPU_CAVIUM_OCTEON
+ default y
+ select EXPORT_UASM if OCTEON_WDT = m
+ help
+ Hardware driver for OCTEON's on chip watchdog timer.
+ Enables the watchdog for all cores running Linux. It
+ installs a NMI handler and pokes the watchdog based on an
+ interrupt. On first expiration of the watchdog, the
+ interrupt handler pokes it. The second expiration causes an
+ NMI that prints a message. The third expiration causes a
+ global soft reset.
+
+ When userspace has /dev/watchdog open, no poking is done
+ from the first interrupt, it is then only poked when the
+ device is written.
+
# PARISC Architecture
# POWERPC Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 72f3e2073f8..8374503fcc6 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o
# ALPHA Architecture
# ARM Architecture
+obj-$(CONFIG_ARM_SP805_WATCHDOG) += sp805_wdt.o
obj-$(CONFIG_AT91RM9200_WATCHDOG) += at91rm9200_wdt.o
obj-$(CONFIG_AT91SAM9X_WATCHDOG) += at91sam9_wdt.o
obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o
@@ -66,6 +67,7 @@ obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o
obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
+obj-$(CONFIG_F71808E_WDT) += f71808e_wdt.o
obj-$(CONFIG_GEODE_WDT) += geodewdt.o
obj-$(CONFIG_SC520_WDT) += sc520_wdt.o
obj-$(CONFIG_SBC_FITPC2_WATCHDOG) += sbc_fitpc2_wdt.o
@@ -114,6 +116,8 @@ obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o
obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
+obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o
+octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o
# PARISC Architecture
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index d62b9ce8f77..566343b3c13 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -526,7 +526,7 @@ static const struct file_operations cpwd_fops = {
.release = cpwd_release,
};
-static int __devinit cpwd_probe(struct of_device *op,
+static int __devinit cpwd_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device_node *options;
@@ -545,7 +545,7 @@ static int __devinit cpwd_probe(struct of_device *op,
goto out;
}
- p->irq = op->irqs[0];
+ p->irq = op->archdata.irqs[0];
spin_lock_init(&p->lock);
@@ -639,7 +639,7 @@ out_free:
goto out;
}
-static int __devexit cpwd_remove(struct of_device *op)
+static int __devexit cpwd_remove(struct platform_device *op)
{
struct cpwd *p = dev_get_drvdata(&op->dev);
int i;
@@ -688,12 +688,12 @@ static struct of_platform_driver cpwd_driver = {
static int __init cpwd_init(void)
{
- return of_register_driver(&cpwd_driver, &of_bus_type);
+ return of_register_platform_driver(&cpwd_driver);
}
static void __exit cpwd_exit(void)
{
- of_unregister_driver(&cpwd_driver);
+ of_unregister_platform_driver(&cpwd_driver);
}
module_init(cpwd_init);
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
new file mode 100644
index 00000000000..7e5c266cda4
--- /dev/null
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -0,0 +1,768 @@
+/***************************************************************************
+ * Copyright (C) 2006 by Hans Edgington <hans@edgington.nl> *
+ * Copyright (C) 2007-2009 Hans de Goede <hdegoede@redhat.com> *
+ * Copyright (C) 2010 Giel van Schijndel <me@mortis.eu> *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the *
+ * Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ ***************************************************************************/
+
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+
+#define DRVNAME "f71808e_wdt"
+
+#define SIO_F71808FG_LD_WDT 0x07 /* Watchdog timer logical device */
+#define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */
+#define SIO_LOCK_KEY 0xAA /* Key to diasble Super-I/O */
+
+#define SIO_REG_LDSEL 0x07 /* Logical device select */
+#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
+#define SIO_REG_DEVREV 0x22 /* Device revision */
+#define SIO_REG_MANID 0x23 /* Fintek ID (2 bytes) */
+#define SIO_REG_ENABLE 0x30 /* Logical device enable */
+#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
+
+#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */
+#define SIO_F71808_ID 0x0901 /* Chipset ID */
+#define SIO_F71858_ID 0x0507 /* Chipset ID */
+#define SIO_F71862_ID 0x0601 /* Chipset ID */
+#define SIO_F71882_ID 0x0541 /* Chipset ID */
+#define SIO_F71889_ID 0x0723 /* Chipset ID */
+
+#define F71882FG_REG_START 0x01
+
+#define F71808FG_REG_WDO_CONF 0xf0
+#define F71808FG_REG_WDT_CONF 0xf5
+#define F71808FG_REG_WD_TIME 0xf6
+
+#define F71808FG_FLAG_WDOUT_EN 7
+
+#define F71808FG_FLAG_WDTMOUT_STS 5
+#define F71808FG_FLAG_WD_EN 5
+#define F71808FG_FLAG_WD_PULSE 4
+#define F71808FG_FLAG_WD_UNIT 3
+
+/* Default values */
+#define WATCHDOG_TIMEOUT 60 /* 1 minute default timeout */
+#define WATCHDOG_MAX_TIMEOUT (60 * 255)
+#define WATCHDOG_PULSE_WIDTH 125 /* 125 ms, default pulse width for
+ watchdog signal */
+
+static unsigned short force_id;
+module_param(force_id, ushort, 0);
+MODULE_PARM_DESC(force_id, "Override the detected device ID");
+
+static const int max_timeout = WATCHDOG_MAX_TIMEOUT;
+static int timeout = 60; /* default timeout in seconds */
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. 1<= timeout <="
+ __MODULE_STRING(WATCHDOG_MAX_TIMEOUT) " (default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
+
+static unsigned int pulse_width = WATCHDOG_PULSE_WIDTH;
+module_param(pulse_width, uint, 0);
+MODULE_PARM_DESC(pulse_width,
+ "Watchdog signal pulse width. 0(=level), 1 ms, 25 ms, 125 ms or 5000 ms"
+ " (default=" __MODULE_STRING(WATCHDOG_PULSE_WIDTH) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0444);
+MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
+
+static unsigned int start_withtimeout;
+module_param(start_withtimeout, uint, 0);
+MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
+ " given initial timeout. Zero (default) disables this feature.");
+
+enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg };
+
+static const char *f71808e_names[] = {
+ "f71808fg",
+ "f71858fg",
+ "f71862fg",
+ "f71882fg",
+ "f71889fg",
+};
+
+/* Super-I/O Function prototypes */
+static inline int superio_inb(int base, int reg);
+static inline int superio_inw(int base, int reg);
+static inline void superio_outb(int base, int reg, u8 val);
+static inline void superio_set_bit(int base, int reg, int bit);
+static inline void superio_clear_bit(int base, int reg, int bit);
+static inline int superio_enter(int base);
+static inline void superio_select(int base, int ld);
+static inline void superio_exit(int base);
+
+struct watchdog_data {
+ unsigned short sioaddr;
+ enum chips type;
+ unsigned long opened;
+ struct mutex lock;
+ char expect_close;
+ struct watchdog_info ident;
+
+ unsigned short timeout;
+ u8 timer_val; /* content for the wd_time register */
+ char minutes_mode;
+ u8 pulse_val; /* pulse width flag */
+ char pulse_mode; /* enable pulse output mode? */
+ char caused_reboot; /* last reboot was by the watchdog */
+};
+
+static struct watchdog_data watchdog = {
+ .lock = __MUTEX_INITIALIZER(watchdog.lock),
+};
+
+/* Super I/O functions */
+static inline int superio_inb(int base, int reg)
+{
+ outb(reg, base);
+ return inb(base + 1);
+}
+
+static int superio_inw(int base, int reg)
+{
+ int val;
+ val = superio_inb(base, reg) << 8;
+ val |= superio_inb(base, reg + 1);
+ return val;
+}
+
+static inline void superio_outb(int base, int reg, u8 val)
+{
+ outb(reg, base);
+ outb(val, base + 1);
+}
+
+static inline void superio_set_bit(int base, int reg, int bit)
+{
+ unsigned long val = superio_inb(base, reg);
+ __set_bit(bit, &val);
+ superio_outb(base, reg, val);
+}
+
+static inline void superio_clear_bit(int base, int reg, int bit)
+{
+ unsigned long val = superio_inb(base, reg);
+ __clear_bit(bit, &val);
+ superio_outb(base, reg, val);
+}
+
+static inline int superio_enter(int base)
+{
+ /* Don't step on other drivers' I/O space by accident */
+ if (!request_muxed_region(base, 2, DRVNAME)) {
+ printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
+ (int)base);
+ return -EBUSY;
+ }
+
+ /* according to the datasheet the key must be send twice! */
+ outb(SIO_UNLOCK_KEY, base);
+ outb(SIO_UNLOCK_KEY, base);
+
+ return 0;
+}
+
+static inline void superio_select(int base, int ld)
+{
+ outb(SIO_REG_LDSEL, base);
+ outb(ld, base + 1);
+}
+
+static inline void superio_exit(int base)
+{
+ outb(SIO_LOCK_KEY, base);
+ release_region(base, 2);
+}
+
+static int watchdog_set_timeout(int timeout)
+{
+ if (timeout <= 0
+ || timeout > max_timeout) {
+ printk(KERN_ERR DRVNAME ": watchdog timeout out of range\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&watchdog.lock);
+
+ watchdog.timeout = timeout;
+ if (timeout > 0xff) {
+ watchdog.timer_val = DIV_ROUND_UP(timeout, 60);
+ watchdog.minutes_mode = true;
+ } else {
+ watchdog.timer_val = timeout;
+ watchdog.minutes_mode = false;
+ }
+
+ mutex_unlock(&watchdog.lock);
+
+ return 0;
+}
+
+static int watchdog_set_pulse_width(unsigned int pw)
+{
+ int err = 0;
+
+ mutex_lock(&watchdog.lock);
+
+ if (pw <= 1) {
+ watchdog.pulse_val = 0;
+ } else if (pw <= 25) {
+ watchdog.pulse_val = 1;
+ } else if (pw <= 125) {
+ watchdog.pulse_val = 2;
+ } else if (pw <= 5000) {
+ watchdog.pulse_val = 3;
+ } else {
+ printk(KERN_ERR DRVNAME ": pulse width out of range\n");
+ err = -EINVAL;
+ goto exit_unlock;
+ }
+
+ watchdog.pulse_mode = pw;
+
+exit_unlock:
+ mutex_unlock(&watchdog.lock);
+ return err;
+}
+
+static int watchdog_keepalive(void)
+{
+ int err = 0;
+
+ mutex_lock(&watchdog.lock);
+ err = superio_enter(watchdog.sioaddr);
+ if (err)
+ goto exit_unlock;
+ superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT);
+
+ if (watchdog.minutes_mode)
+ /* select minutes for timer units */
+ superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDT_CONF,
+ F71808FG_FLAG_WD_UNIT);
+ else
+ /* select seconds for timer units */
+ superio_clear_bit(watchdog.sioaddr, F71808FG_REG_WDT_CONF,
+ F71808FG_FLAG_WD_UNIT);
+
+ /* Set timer value */
+ superio_outb(watchdog.sioaddr, F71808FG_REG_WD_TIME,
+ watchdog.timer_val);
+
+ superio_exit(watchdog.sioaddr);
+
+exit_unlock:
+ mutex_unlock(&watchdog.lock);
+ return err;
+}
+
+static int watchdog_start(void)
+{
+ /* Make sure we don't die as soon as the watchdog is enabled below */
+ int err = watchdog_keepalive();
+ if (err)
+ return err;
+
+ mutex_lock(&watchdog.lock);
+ err = superio_enter(watchdog.sioaddr);
+ if (err)
+ goto exit_unlock;
+ superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT);
+
+ /* Watchdog pin configuration */
+ switch (watchdog.type) {
+ case f71808fg:
+ /* Set pin 21 to GPIO23/WDTRST#, then to WDTRST# */
+ superio_clear_bit(watchdog.sioaddr, 0x2a, 3);
+ superio_clear_bit(watchdog.sioaddr, 0x2b, 3);
+ break;
+
+ case f71882fg:
+ /* Set pin 56 to WDTRST# */
+ superio_set_bit(watchdog.sioaddr, 0x29, 1);
+ break;
+
+ default:
+ /*
+ * 'default' label to shut up the compiler and catch
+ * programmer errors
+ */
+ err = -ENODEV;
+ goto exit_superio;
+ }
+
+ superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT);
+ superio_set_bit(watchdog.sioaddr, SIO_REG_ENABLE, 0);
+ superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDO_CONF,
+ F71808FG_FLAG_WDOUT_EN);
+
+ superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDT_CONF,
+ F71808FG_FLAG_WD_EN);
+
+ if (watchdog.pulse_mode) {
+ /* Select "pulse" output mode with given duration */
+ u8 wdt_conf = superio_inb(watchdog.sioaddr,
+ F71808FG_REG_WDT_CONF);
+
+ /* Set WD_PSWIDTH bits (1:0) */
+ wdt_conf = (wdt_conf & 0xfc) | (watchdog.pulse_val & 0x03);
+ /* Set WD_PULSE to "pulse" mode */
+ wdt_conf |= BIT(F71808FG_FLAG_WD_PULSE);
+
+ superio_outb(watchdog.sioaddr, F71808FG_REG_WDT_CONF,
+ wdt_conf);
+ } else {
+ /* Select "level" output mode */
+ superio_clear_bit(watchdog.sioaddr, F71808FG_REG_WDT_CONF,
+ F71808FG_FLAG_WD_PULSE);
+ }
+
+exit_superio:
+ superio_exit(watchdog.sioaddr);
+exit_unlock:
+ mutex_unlock(&watchdog.lock);
+
+ return err;
+}
+
+static int watchdog_stop(void)
+{
+ int err = 0;
+
+ mutex_lock(&watchdog.lock);
+ err = superio_enter(watchdog.sioaddr);
+ if (err)
+ goto exit_unlock;
+ superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT);
+
+ superio_clear_bit(watchdog.sioaddr, F71808FG_REG_WDT_CONF,
+ F71808FG_FLAG_WD_EN);
+
+ superio_exit(watchdog.sioaddr);
+
+exit_unlock:
+ mutex_unlock(&watchdog.lock);
+
+ return err;
+}
+
+static int watchdog_get_status(void)
+{
+ int status = 0;
+
+ mutex_lock(&watchdog.lock);
+ status = (watchdog.caused_reboot) ? WDIOF_CARDRESET : 0;
+ mutex_unlock(&watchdog.lock);
+
+ return status;
+}
+
+static bool watchdog_is_running(void)
+{
+ /*
+ * if we fail to determine the watchdog's status assume it to be
+ * running to be on the safe side
+ */
+ bool is_running = true;
+
+ mutex_lock(&watchdog.lock);
+ if (superio_enter(watchdog.sioaddr))
+ goto exit_unlock;
+ superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT);
+
+ is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
+ && (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
+ & F71808FG_FLAG_WD_EN);
+
+ superio_exit(watchdog.sioaddr);
+
+exit_unlock:
+ mutex_unlock(&watchdog.lock);
+ return is_running;
+}
+
+/* /dev/watchdog api */
+
+static int watchdog_open(struct inode *inode, struct file *file)
+{
+ int err;
+
+ /* If the watchdog is alive we don't need to start it again */
+ if (test_and_set_bit(0, &watchdog.opened))
+ return -EBUSY;
+
+ err = watchdog_start();
+ if (err) {
+ clear_bit(0, &watchdog.opened);
+ return err;
+ }
+
+ if (nowayout)
+ __module_get(THIS_MODULE);
+
+ watchdog.expect_close = 0;
+ return nonseekable_open(inode, file);
+}
+
+static int watchdog_release(struct inode *inode, struct file *file)
+{
+ clear_bit(0, &watchdog.opened);
+
+ if (!watchdog.expect_close) {
+ watchdog_keepalive();
+ printk(KERN_CRIT DRVNAME
+ ": Unexpected close, not stopping watchdog!\n");
+ } else if (!nowayout) {
+ watchdog_stop();
+ }
+ return 0;
+}
+
+/*
+ * watchdog_write:
+ * @file: file handle to the watchdog
+ * @buf: buffer to write
+ * @count: count of bytes
+ * @ppos: pointer to the position to write. No seeks allowed
+ *
+ * A write to a watchdog device is defined as a keepalive signal. Any
+ * write of data will do, as we we don't define content meaning.
+ */
+
+static ssize_t watchdog_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ if (count) {
+ if (!nowayout) {
+ size_t i;
+
+ /* In case it was set long ago */
+ bool expect_close = false;
+
+ for (i = 0; i != count; i++) {
+ char c;
+ if (get_user(c, buf + i))
+ return -EFAULT;
+ expect_close = (c == 'V');
+ }
+
+ /* Properly order writes across fork()ed processes */
+ mutex_lock(&watchdog.lock);
+ watchdog.expect_close = expect_close;
+ mutex_unlock(&watchdog.lock);
+ }
+
+ /* someone wrote to us, we should restart timer */
+ watchdog_keepalive();
+ }
+ return count;
+}
+
+/*
+ * watchdog_ioctl:
+ * @inode: inode of the device
+ * @file: file handle to the device
+ * @cmd: watchdog command
+ * @arg: argument pointer
+ *
+ * The watchdog API defines a common set of functions for all watchdogs
+ * according to their available features.
+ */
+static long watchdog_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int status;
+ int new_options;
+ int new_timeout;
+ union {
+ struct watchdog_info __user *ident;
+ int __user *i;
+ } uarg;
+
+ uarg.i = (int __user *)arg;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(uarg.ident, &watchdog.ident,
+ sizeof(watchdog.ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ status = watchdog_get_status();
+ if (status < 0)
+ return status;
+ return put_user(status, uarg.i);
+
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, uarg.i);
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(new_options, uarg.i))
+ return -EFAULT;
+
+ if (new_options & WDIOS_DISABLECARD)
+ watchdog_stop();
+
+ if (new_options & WDIOS_ENABLECARD)
+ return watchdog_start();
+
+
+ case WDIOC_KEEPALIVE:
+ watchdog_keepalive();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_timeout, uarg.i))
+ return -EFAULT;
+
+ if (watchdog_set_timeout(new_timeout))
+ return -EINVAL;
+
+ watchdog_keepalive();
+ /* Fall */
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(watchdog.timeout, uarg.i);
+
+ default:
+ return -ENOTTY;
+
+ }
+}
+
+static int watchdog_notify_sys(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ if (code == SYS_DOWN || code == SYS_HALT)
+ watchdog_stop();
+ return NOTIFY_DONE;
+}
+
+static const struct file_operations watchdog_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = watchdog_open,
+ .release = watchdog_release,
+ .write = watchdog_write,
+ .unlocked_ioctl = watchdog_ioctl,
+};
+
+static struct miscdevice watchdog_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &watchdog_fops,
+};
+
+static struct notifier_block watchdog_notifier = {
+ .notifier_call = watchdog_notify_sys,
+};
+
+static int __init watchdog_init(int sioaddr)
+{
+ int wdt_conf, err = 0;
+
+ /* No need to lock watchdog.lock here because no entry points
+ * into the module have been registered yet.
+ */
+ watchdog.sioaddr = sioaddr;
+ watchdog.ident.options = WDIOC_SETTIMEOUT
+ | WDIOF_MAGICCLOSE
+ | WDIOF_KEEPALIVEPING;
+
+ snprintf(watchdog.ident.identity,
+ sizeof(watchdog.ident.identity), "%s watchdog",
+ f71808e_names[watchdog.type]);
+
+ err = superio_enter(sioaddr);
+ if (err)
+ return err;
+ superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT);
+
+ wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF);
+ watchdog.caused_reboot = wdt_conf & F71808FG_FLAG_WDTMOUT_STS;
+
+ superio_exit(sioaddr);
+
+ err = watchdog_set_timeout(timeout);
+ if (err)
+ return err;
+ err = watchdog_set_pulse_width(pulse_width);
+ if (err)
+ return err;
+
+ err = register_reboot_notifier(&watchdog_notifier);
+ if (err)
+ return err;
+
+ err = misc_register(&watchdog_miscdev);
+ if (err) {
+ printk(KERN_ERR DRVNAME
+ ": cannot register miscdev on minor=%d\n",
+ watchdog_miscdev.minor);
+ goto exit_reboot;
+ }
+
+ if (start_withtimeout) {
+ if (start_withtimeout <= 0
+ || start_withtimeout > max_timeout) {
+ printk(KERN_ERR DRVNAME
+ ": starting timeout out of range\n");
+ err = -EINVAL;
+ goto exit_miscdev;
+ }
+
+ err = watchdog_start();
+ if (err) {
+ printk(KERN_ERR DRVNAME
+ ": cannot start watchdog timer\n");
+ goto exit_miscdev;
+ }
+
+ mutex_lock(&watchdog.lock);
+ err = superio_enter(sioaddr);
+ if (err)
+ goto exit_unlock;
+ superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT);
+
+ if (start_withtimeout > 0xff) {
+ /* select minutes for timer units */
+ superio_set_bit(sioaddr, F71808FG_REG_WDT_CONF,
+ F71808FG_FLAG_WD_UNIT);
+ superio_outb(sioaddr, F71808FG_REG_WD_TIME,
+ DIV_ROUND_UP(start_withtimeout, 60));
+ } else {
+ /* select seconds for timer units */
+ superio_clear_bit(sioaddr, F71808FG_REG_WDT_CONF,
+ F71808FG_FLAG_WD_UNIT);
+ superio_outb(sioaddr, F71808FG_REG_WD_TIME,
+ start_withtimeout);
+ }
+
+ superio_exit(sioaddr);
+ mutex_unlock(&watchdog.lock);
+
+ if (nowayout)
+ __module_get(THIS_MODULE);
+
+ printk(KERN_INFO DRVNAME
+ ": watchdog started with initial timeout of %u sec\n",
+ start_withtimeout);
+ }
+
+ return 0;
+
+exit_unlock:
+ mutex_unlock(&watchdog.lock);
+exit_miscdev:
+ misc_deregister(&watchdog_miscdev);
+exit_reboot:
+ unregister_reboot_notifier(&watchdog_notifier);
+
+ return err;
+}
+
+static int __init f71808e_find(int sioaddr)
+{
+ u16 devid;
+ int err = superio_enter(sioaddr);
+ if (err)
+ return err;
+
+ devid = superio_inw(sioaddr, SIO_REG_MANID);
+ if (devid != SIO_FINTEK_ID) {
+ pr_debug(DRVNAME ": Not a Fintek device\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
+ switch (devid) {
+ case SIO_F71808_ID:
+ watchdog.type = f71808fg;
+ break;
+ case SIO_F71882_ID:
+ watchdog.type = f71882fg;
+ break;
+ case SIO_F71862_ID:
+ case SIO_F71889_ID:
+ /* These have a watchdog, though it isn't implemented (yet). */
+ err = -ENOSYS;
+ goto exit;
+ case SIO_F71858_ID:
+ /* Confirmed (by datasheet) not to have a watchdog. */
+ err = -ENODEV;
+ goto exit;
+ default:
+ printk(KERN_INFO DRVNAME ": Unrecognized Fintek device: %04x\n",
+ (unsigned int)devid);
+ err = -ENODEV;
+ goto exit;
+ }
+
+ printk(KERN_INFO DRVNAME ": Found %s watchdog chip, revision %d\n",
+ f71808e_names[watchdog.type],
+ (int)superio_inb(sioaddr, SIO_REG_DEVREV));
+exit:
+ superio_exit(sioaddr);
+ return err;
+}
+
+static int __init f71808e_init(void)
+{
+ static const unsigned short addrs[] = { 0x2e, 0x4e };
+ int err = -ENODEV;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(addrs); i++) {
+ err = f71808e_find(addrs[i]);
+ if (err == 0)
+ break;
+ }
+ if (i == ARRAY_SIZE(addrs))
+ return err;
+
+ return watchdog_init(addrs[i]);
+}
+
+static void __exit f71808e_exit(void)
+{
+ if (watchdog_is_running()) {
+ printk(KERN_WARNING DRVNAME
+ ": Watchdog timer still running, stopping it\n");
+ watchdog_stop();
+ }
+ misc_deregister(&watchdog_miscdev);
+ unregister_reboot_notifier(&watchdog_notifier);
+}
+
+MODULE_DESCRIPTION("F71808E Watchdog Driver");
+MODULE_AUTHOR("Giel van Schijndel <me@mortis.eu>");
+MODULE_LICENSE("GPL");
+
+module_init(f71808e_init);
+module_exit(f71808e_exit);
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 1df284f9c2a..9c21d19043a 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -260,7 +260,7 @@ static struct miscdevice gef_wdt_miscdev = {
};
-static int __devinit gef_wdt_probe(struct of_device *dev,
+static int __devinit gef_wdt_probe(struct platform_device *dev,
const struct of_device_id *match)
{
int timeout = 10;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 809e7167a62..3d77116e463 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -16,38 +16,55 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/init.h>
-#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/nmi.h>
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
-#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/kdebug.h>
#include <linux/moduleparam.h>
-#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
-#include <linux/reboot.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/watchdog.h>
+#ifdef CONFIG_HPWDT_NMI_DECODING
#include <linux/dmi.h>
-#include <linux/efi.h>
-#include <linux/string.h>
-#include <linux/bootmem.h>
-#include <asm/desc.h>
+#include <linux/spinlock.h>
+#include <linux/nmi.h>
+#include <linux/kdebug.h>
+#include <linux/notifier.h>
#include <asm/cacheflush.h>
+#endif /* CONFIG_HPWDT_NMI_DECODING */
+
+#define HPWDT_VERSION "1.2.0"
+#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
+#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000)
+#define HPWDT_MAX_TIMER TICKS_TO_SECS(65535)
+#define DEFAULT_MARGIN 30
+
+static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
+static unsigned int reload; /* the computed soft_margin */
+static int nowayout = WATCHDOG_NOWAYOUT;
+static char expect_release;
+static unsigned long hpwdt_is_open;
+
+static void __iomem *pci_mem_addr; /* the PCI-memory address */
+static unsigned long __iomem *hpwdt_timer_reg;
+static unsigned long __iomem *hpwdt_timer_con;
+static struct pci_device_id hpwdt_devices[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) }, /* iLO2 */
+ { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) }, /* iLO3 */
+ {0}, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, hpwdt_devices);
+
+#ifdef CONFIG_HPWDT_NMI_DECODING
#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */
#define CRU_BIOS_SIGNATURE_VALUE 0x55524324
#define PCI_BIOS32_PARAGRAPH_LEN 16
#define PCI_ROM_BASE1 0x000F0000
#define ROM_SIZE 0x10000
-#define HPWDT_VERSION "1.1.1"
struct bios32_service_dir {
u32 signature;
@@ -112,37 +129,17 @@ struct cmn_registers {
u32 reflags;
} __attribute__((packed));
-#define DEFAULT_MARGIN 30
-static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
-static unsigned int reload; /* the computed soft_margin */
-static int nowayout = WATCHDOG_NOWAYOUT;
-static char expect_release;
-static unsigned long hpwdt_is_open;
+static unsigned int hpwdt_nmi_decoding;
static unsigned int allow_kdump;
-static unsigned int hpwdt_nmi_sourcing;
static unsigned int priority; /* hpwdt at end of die_notify list */
-
-static void __iomem *pci_mem_addr; /* the PCI-memory address */
-static unsigned long __iomem *hpwdt_timer_reg;
-static unsigned long __iomem *hpwdt_timer_con;
-
static DEFINE_SPINLOCK(rom_lock);
-
static void *cru_rom_addr;
-
static struct cmn_registers cmn_regs;
-static struct pci_device_id hpwdt_devices[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) },
- { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) },
- {0}, /* terminate list */
-};
-MODULE_DEVICE_TABLE(pci, hpwdt_devices);
-
extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
unsigned long *pRomEntry);
-#ifndef CONFIG_X86_64
+#ifdef CONFIG_X86_32
/* --32 Bit Bios------------------------------------------------------------ */
#define HPWDT_ARCH 32
@@ -246,8 +243,8 @@ static int __devinit cru_detect(unsigned long map_entry,
physical_bios_offset);
printk(KERN_DEBUG "hpwdt: CRU Length: 0x%lx\n",
cru_length);
- printk(KERN_DEBUG "hpwdt: CRU Mapped Address: 0x%x\n",
- (unsigned int)&cru_rom_addr);
+ printk(KERN_DEBUG "hpwdt: CRU Mapped Address: %p\n",
+ &cru_rom_addr);
}
iounmap(bios32_map);
return retval;
@@ -331,8 +328,9 @@ static int __devinit detect_cru_service(void)
iounmap(p);
return rc;
}
-
-#else
+/* ------------------------------------------------------------------------- */
+#endif /* CONFIG_X86_32 */
+#ifdef CONFIG_X86_64
/* --64 Bit Bios------------------------------------------------------------ */
#define HPWDT_ARCH 64
@@ -410,17 +408,16 @@ static int __devinit detect_cru_service(void)
/* if cru_rom_addr has been set then we found a CRU service */
return ((cru_rom_addr != NULL) ? 0 : -ENODEV);
}
-
/* ------------------------------------------------------------------------- */
-
-#endif
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_HPWDT_NMI_DECODING */
/*
* Watchdog operations
*/
static void hpwdt_start(void)
{
- reload = (soft_margin * 1000) / 128;
+ reload = SECS_TO_TICKS(soft_margin);
iowrite16(reload, hpwdt_timer_reg);
iowrite16(0x85, hpwdt_timer_con);
}
@@ -441,8 +438,7 @@ static void hpwdt_ping(void)
static int hpwdt_change_timer(int new_margin)
{
- /* Arbitrary, can't find the card's limits */
- if (new_margin < 5 || new_margin > 600) {
+ if (new_margin < 1 || new_margin > HPWDT_MAX_TIMER) {
printk(KERN_WARNING
"hpwdt: New value passed in is invalid: %d seconds.\n",
new_margin);
@@ -453,11 +449,17 @@ static int hpwdt_change_timer(int new_margin)
printk(KERN_DEBUG
"hpwdt: New timer passed in is %d seconds.\n",
new_margin);
- reload = (soft_margin * 1000) / 128;
+ reload = SECS_TO_TICKS(soft_margin);
return 0;
}
+static int hpwdt_time_left(void)
+{
+ return TICKS_TO_SECS(ioread16(hpwdt_timer_reg));
+}
+
+#ifdef CONFIG_HPWDT_NMI_DECODING
/*
* NMI Handler
*/
@@ -468,26 +470,29 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
static int die_nmi_called;
if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI)
- return NOTIFY_OK;
-
- if (hpwdt_nmi_sourcing) {
- spin_lock_irqsave(&rom_lock, rom_pl);
- if (!die_nmi_called)
- asminline_call(&cmn_regs, cru_rom_addr);
- die_nmi_called = 1;
- spin_unlock_irqrestore(&rom_lock, rom_pl);
- if (cmn_regs.u1.ral == 0) {
- printk(KERN_WARNING "hpwdt: An NMI occurred, "
- "but unable to determine source.\n");
- } else {
- if (allow_kdump)
- hpwdt_stop();
- panic("An NMI occurred, please see the Integrated "
- "Management Log for details.\n");
- }
+ goto out;
+
+ if (!hpwdt_nmi_decoding)
+ goto out;
+
+ spin_lock_irqsave(&rom_lock, rom_pl);
+ if (!die_nmi_called)
+ asminline_call(&cmn_regs, cru_rom_addr);
+ die_nmi_called = 1;
+ spin_unlock_irqrestore(&rom_lock, rom_pl);
+ if (cmn_regs.u1.ral == 0) {
+ printk(KERN_WARNING "hpwdt: An NMI occurred, "
+ "but unable to determine source.\n");
+ } else {
+ if (allow_kdump)
+ hpwdt_stop();
+ panic("An NMI occurred, please see the Integrated "
+ "Management Log for details.\n");
}
+out:
return NOTIFY_OK;
}
+#endif /* CONFIG_HPWDT_NMI_DECODING */
/*
* /dev/watchdog handling
@@ -557,7 +562,7 @@ static const struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
- .identity = "HP iLO2 HW Watchdog Timer",
+ .identity = "HP iLO2+ HW Watchdog Timer",
};
static long hpwdt_ioctl(struct file *file, unsigned int cmd,
@@ -599,6 +604,10 @@ static long hpwdt_ioctl(struct file *file, unsigned int cmd,
case WDIOC_GETTIMEOUT:
ret = put_user(soft_margin, p);
break;
+
+ case WDIOC_GETTIMELEFT:
+ ret = put_user(hpwdt_time_left(), p);
+ break;
}
return ret;
}
@@ -621,80 +630,45 @@ static struct miscdevice hpwdt_miscdev = {
.fops = &hpwdt_fops,
};
+#ifdef CONFIG_HPWDT_NMI_DECODING
static struct notifier_block die_notifier = {
.notifier_call = hpwdt_pretimeout,
.priority = 0,
};
+#endif /* CONFIG_HPWDT_NMI_DECODING */
/*
* Init & Exit
*/
+#ifdef CONFIG_HPWDT_NMI_DECODING
#ifdef ARCH_HAS_NMI_WATCHDOG
-static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev)
+static void __devinit hpwdt_check_nmi_decoding(struct pci_dev *dev)
{
/*
* If nmi_watchdog is turned off then we can turn on
- * our nmi sourcing capability.
+ * our nmi decoding capability.
*/
if (!nmi_watchdog_active())
- hpwdt_nmi_sourcing = 1;
+ hpwdt_nmi_decoding = 1;
else
- dev_warn(&dev->dev, "NMI sourcing is disabled. To enable this "
+ dev_warn(&dev->dev, "NMI decoding is disabled. To enable this "
"functionality you must reboot with nmi_watchdog=0 "
"and load the hpwdt driver with priority=1.\n");
}
#else
-static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev)
+static void __devinit hpwdt_check_nmi_decoding(struct pci_dev *dev)
{
- dev_warn(&dev->dev, "NMI sourcing is disabled. "
+ dev_warn(&dev->dev, "NMI decoding is disabled. "
"Your kernel does not support a NMI Watchdog.\n");
}
-#endif
+#endif /* ARCH_HAS_NMI_WATCHDOG */
-static int __devinit hpwdt_init_one(struct pci_dev *dev,
- const struct pci_device_id *ent)
+static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
{
int retval;
/*
- * Check if we can do NMI sourcing or not
- */
- hpwdt_check_nmi_sourcing(dev);
-
- /*
- * First let's find out if we are on an iLO2 server. We will
- * not run on a legacy ASM box.
- * So we only support the G5 ProLiant servers and higher.
- */
- if (dev->subsystem_vendor != PCI_VENDOR_ID_HP) {
- dev_warn(&dev->dev,
- "This server does not have an iLO2 ASIC.\n");
- return -ENODEV;
- }
-
- if (pci_enable_device(dev)) {
- dev_warn(&dev->dev,
- "Not possible to enable PCI Device: 0x%x:0x%x.\n",
- ent->vendor, ent->device);
- return -ENODEV;
- }
-
- pci_mem_addr = pci_iomap(dev, 1, 0x80);
- if (!pci_mem_addr) {
- dev_warn(&dev->dev,
- "Unable to detect the iLO2 server memory.\n");
- retval = -ENOMEM;
- goto error_pci_iomap;
- }
- hpwdt_timer_reg = pci_mem_addr + 0x70;
- hpwdt_timer_con = pci_mem_addr + 0x72;
-
- /* Make sure that we have a valid soft_margin */
- if (hpwdt_change_timer(soft_margin))
- hpwdt_change_timer(DEFAULT_MARGIN);
-
- /*
* We need to map the ROM to get the CRU service.
* For 32 bit Operating Systems we need to go through the 32 Bit
* BIOS Service Directory
@@ -705,7 +679,7 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev,
dev_warn(&dev->dev,
"Unable to detect the %d Bit CRU Service.\n",
HPWDT_ARCH);
- goto error_get_cru;
+ return retval;
}
/*
@@ -728,9 +702,87 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev,
dev_warn(&dev->dev,
"Unable to register a die notifier (err=%d).\n",
retval);
- goto error_die_notifier;
+ if (cru_rom_addr)
+ iounmap(cru_rom_addr);
}
+ dev_info(&dev->dev,
+ "HP Watchdog Timer Driver: NMI decoding initialized"
+ ", allow kernel dump: %s (default = 0/OFF)"
+ ", priority: %s (default = 0/LAST).\n",
+ (allow_kdump == 0) ? "OFF" : "ON",
+ (priority == 0) ? "LAST" : "FIRST");
+ return 0;
+}
+
+static void __devexit hpwdt_exit_nmi_decoding(void)
+{
+ unregister_die_notifier(&die_notifier);
+ if (cru_rom_addr)
+ iounmap(cru_rom_addr);
+}
+#else /* !CONFIG_HPWDT_NMI_DECODING */
+static void __devinit hpwdt_check_nmi_decoding(struct pci_dev *dev)
+{
+}
+
+static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
+{
+ return 0;
+}
+
+static void __devexit hpwdt_exit_nmi_decoding(void)
+{
+}
+#endif /* CONFIG_HPWDT_NMI_DECODING */
+
+static int __devinit hpwdt_init_one(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ int retval;
+
+ /*
+ * Check if we can do NMI decoding or not
+ */
+ hpwdt_check_nmi_decoding(dev);
+
+ /*
+ * First let's find out if we are on an iLO2+ server. We will
+ * not run on a legacy ASM box.
+ * So we only support the G5 ProLiant servers and higher.
+ */
+ if (dev->subsystem_vendor != PCI_VENDOR_ID_HP) {
+ dev_warn(&dev->dev,
+ "This server does not have an iLO2+ ASIC.\n");
+ return -ENODEV;
+ }
+
+ if (pci_enable_device(dev)) {
+ dev_warn(&dev->dev,
+ "Not possible to enable PCI Device: 0x%x:0x%x.\n",
+ ent->vendor, ent->device);
+ return -ENODEV;
+ }
+
+ pci_mem_addr = pci_iomap(dev, 1, 0x80);
+ if (!pci_mem_addr) {
+ dev_warn(&dev->dev,
+ "Unable to detect the iLO2+ server memory.\n");
+ retval = -ENOMEM;
+ goto error_pci_iomap;
+ }
+ hpwdt_timer_reg = pci_mem_addr + 0x70;
+ hpwdt_timer_con = pci_mem_addr + 0x72;
+
+ /* Make sure that we have a valid soft_margin */
+ if (hpwdt_change_timer(soft_margin))
+ hpwdt_change_timer(DEFAULT_MARGIN);
+
+ /* Initialize NMI Decoding functionality */
+ retval = hpwdt_init_nmi_decoding(dev);
+ if (retval != 0)
+ goto error_init_nmi_decoding;
+
retval = misc_register(&hpwdt_miscdev);
if (retval < 0) {
dev_warn(&dev->dev,
@@ -739,23 +791,14 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev,
goto error_misc_register;
}
- printk(KERN_INFO
- "hp Watchdog Timer Driver: %s"
- ", timer margin: %d seconds (nowayout=%d)"
- ", allow kernel dump: %s (default = 0/OFF)"
- ", priority: %s (default = 0/LAST).\n",
- HPWDT_VERSION, soft_margin, nowayout,
- (allow_kdump == 0) ? "OFF" : "ON",
- (priority == 0) ? "LAST" : "FIRST");
-
+ dev_info(&dev->dev, "HP Watchdog Timer Driver: %s"
+ ", timer margin: %d seconds (nowayout=%d).\n",
+ HPWDT_VERSION, soft_margin, nowayout);
return 0;
error_misc_register:
- unregister_die_notifier(&die_notifier);
-error_die_notifier:
- if (cru_rom_addr)
- iounmap(cru_rom_addr);
-error_get_cru:
+ hpwdt_exit_nmi_decoding();
+error_init_nmi_decoding:
pci_iounmap(dev, pci_mem_addr);
error_pci_iomap:
pci_disable_device(dev);
@@ -768,10 +811,7 @@ static void __devexit hpwdt_exit(struct pci_dev *dev)
hpwdt_stop();
misc_deregister(&hpwdt_miscdev);
- unregister_die_notifier(&die_notifier);
-
- if (cru_rom_addr)
- iounmap(cru_rom_addr);
+ hpwdt_exit_nmi_decoding();
pci_iounmap(dev, pci_mem_addr);
pci_disable_device(dev);
}
@@ -802,16 +842,18 @@ MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(soft_margin, int, 0);
MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds");
-module_param(allow_kdump, int, 0);
-MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
-
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+#ifdef CONFIG_HPWDT_NMI_DECODING
+module_param(allow_kdump, int, 0);
+MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
+
module_param(priority, int, 0);
MODULE_PARM_DESC(priority, "The hpwdt driver handles NMIs first or last"
" (default = 0/Last)\n");
+#endif /* !CONFIG_HPWDT_NMI_DECODING */
module_init(hpwdt_init);
module_exit(hpwdt_cleanup);
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 4cda64dd309..8fa213cdb49 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -185,7 +185,7 @@ static struct miscdevice mpc8xxx_wdt_miscdev = {
.fops = &mpc8xxx_wdt_fops,
};
-static int __devinit mpc8xxx_wdt_probe(struct of_device *ofdev,
+static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
int ret;
@@ -238,7 +238,7 @@ err_unmap:
return ret;
}
-static int __devexit mpc8xxx_wdt_remove(struct of_device *ofdev)
+static int __devexit mpc8xxx_wdt_remove(struct platform_device *ofdev)
{
mpc8xxx_wdt_pr_warn("watchdog removed");
del_timer_sync(&wdt_timer);
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
new file mode 100644
index 00000000000..2a410170eca
--- /dev/null
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -0,0 +1,745 @@
+/*
+ * Octeon Watchdog driver
+ *
+ * Copyright (C) 2007, 2008, 2009, 2010 Cavium Networks
+ *
+ * Some parts derived from wdt.c
+ *
+ * (c) Copyright 1996-1997 Alan Cox <alan@lxorguk.ukuu.org.uk>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
+ * warranty for any of this software. This material is provided
+ * "AS-IS" and at no charge.
+ *
+ * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ *
+ * The OCTEON watchdog has a maximum timeout of 2^32 * io_clock.
+ * For most systems this is less than 10 seconds, so to allow for
+ * software to request longer watchdog heartbeats, we maintain software
+ * counters to count multiples of the base rate. If the system locks
+ * up in such a manner that we can not run the software counters, the
+ * only result is a watchdog reset sooner than was requested. But
+ * that is OK, because in this case userspace would likely not be able
+ * to do anything anyhow.
+ *
+ * The hardware watchdog interval we call the period. The OCTEON
+ * watchdog goes through several stages, after the first period an
+ * irq is asserted, then if it is not reset, after the next period NMI
+ * is asserted, then after an additional period a chip wide soft reset.
+ * So for the software counters, we reset watchdog after each period
+ * and decrement the counter. But for the last two periods we need to
+ * let the watchdog progress to the NMI stage so we disable the irq
+ * and let it proceed. Once in the NMI, we print the register state
+ * to the serial port and then wait for the reset.
+ *
+ * A watchdog is maintained for each CPU in the system, that way if
+ * one CPU suffers a lockup, we also get a register dump and reset.
+ * The userspace ping resets the watchdog on all CPUs.
+ *
+ * Before userspace opens the watchdog device, we still run the
+ * watchdogs to catch any lockups that may be kernel related.
+ *
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/watchdog.h>
+#include <linux/cpumask.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+
+#include <asm/mipsregs.h>
+#include <asm/uasm.h>
+
+#include <asm/octeon/octeon.h>
+
+/* The count needed to achieve timeout_sec. */
+static unsigned int timeout_cnt;
+
+/* The maximum period supported. */
+static unsigned int max_timeout_sec;
+
+/* The current period. */
+static unsigned int timeout_sec;
+
+/* Set to non-zero when userspace countdown mode active */
+static int do_coundown;
+static unsigned int countdown_reset;
+static unsigned int per_cpu_countdown[NR_CPUS];
+
+static cpumask_t irq_enabled_cpus;
+
+#define WD_TIMO 60 /* Default heartbeat = 60 seconds */
+
+static int heartbeat = WD_TIMO;
+module_param(heartbeat, int, S_IRUGO);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeat in seconds. (0 < heartbeat, default="
+ __MODULE_STRING(WD_TIMO) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, S_IRUGO);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static unsigned long octeon_wdt_is_open;
+static char expect_close;
+
+static u32 __initdata nmi_stage1_insns[64];
+/* We need one branch and therefore one relocation per target label. */
+static struct uasm_label __initdata labels[5];
+static struct uasm_reloc __initdata relocs[5];
+
+enum lable_id {
+ label_enter_bootloader = 1
+};
+
+/* Some CP0 registers */
+#define K0 26
+#define C0_CVMMEMCTL 11, 7
+#define C0_STATUS 12, 0
+#define C0_EBASE 15, 1
+#define C0_DESAVE 31, 0
+
+void octeon_wdt_nmi_stage2(void);
+
+static void __init octeon_wdt_build_stage1(void)
+{
+ int i;
+ int len;
+ u32 *p = nmi_stage1_insns;
+#ifdef CONFIG_HOTPLUG_CPU
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+#endif
+
+ /*
+ * For the next few instructions running the debugger may
+ * cause corruption of k0 in the saved registers. Since we're
+ * about to crash, nobody probably cares.
+ *
+ * Save K0 into the debug scratch register
+ */
+ uasm_i_dmtc0(&p, K0, C0_DESAVE);
+
+ uasm_i_mfc0(&p, K0, C0_STATUS);
+#ifdef CONFIG_HOTPLUG_CPU
+ uasm_il_bbit0(&p, &r, K0, ilog2(ST0_NMI), label_enter_bootloader);
+#endif
+ /* Force 64-bit addressing enabled */
+ uasm_i_ori(&p, K0, K0, ST0_UX | ST0_SX | ST0_KX);
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ uasm_i_mfc0(&p, K0, C0_EBASE);
+ /* Coreid number in K0 */
+ uasm_i_andi(&p, K0, K0, 0xf);
+ /* 8 * coreid in bits 16-31 */
+ uasm_i_dsll_safe(&p, K0, K0, 3 + 16);
+ uasm_i_ori(&p, K0, K0, 0x8001);
+ uasm_i_dsll_safe(&p, K0, K0, 16);
+ uasm_i_ori(&p, K0, K0, 0x0700);
+ uasm_i_drotr_safe(&p, K0, K0, 32);
+ /*
+ * Should result in: 0x8001,0700,0000,8*coreid which is
+ * CVMX_CIU_WDOGX(coreid) - 0x0500
+ *
+ * Now ld K0, CVMX_CIU_WDOGX(coreid)
+ */
+ uasm_i_ld(&p, K0, 0x500, K0);
+ /*
+ * If bit one set handle the NMI as a watchdog event.
+ * otherwise transfer control to bootloader.
+ */
+ uasm_il_bbit0(&p, &r, K0, 1, label_enter_bootloader);
+ uasm_i_nop(&p);
+#endif
+
+ /* Clear Dcache so cvmseg works right. */
+ uasm_i_cache(&p, 1, 0, 0);
+
+ /* Use K0 to do a read/modify/write of CVMMEMCTL */
+ uasm_i_dmfc0(&p, K0, C0_CVMMEMCTL);
+ /* Clear out the size of CVMSEG */
+ uasm_i_dins(&p, K0, 0, 0, 6);
+ /* Set CVMSEG to its largest value */
+ uasm_i_ori(&p, K0, K0, 0x1c0 | 54);
+ /* Store the CVMMEMCTL value */
+ uasm_i_dmtc0(&p, K0, C0_CVMMEMCTL);
+
+ /* Load the address of the second stage handler */
+ UASM_i_LA(&p, K0, (long)octeon_wdt_nmi_stage2);
+ uasm_i_jr(&p, K0);
+ uasm_i_dmfc0(&p, K0, C0_DESAVE);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ uasm_build_label(&l, p, label_enter_bootloader);
+ /* Jump to the bootloader and restore K0 */
+ UASM_i_LA(&p, K0, (long)octeon_bootloader_entry_addr);
+ uasm_i_jr(&p, K0);
+ uasm_i_dmfc0(&p, K0, C0_DESAVE);
+#endif
+ uasm_resolve_relocs(relocs, labels);
+
+ len = (int)(p - nmi_stage1_insns);
+ pr_debug("Synthesized NMI stage 1 handler (%d instructions).\n", len);
+
+ pr_debug("\t.set push\n");
+ pr_debug("\t.set noreorder\n");
+ for (i = 0; i < len; i++)
+ pr_debug("\t.word 0x%08x\n", nmi_stage1_insns[i]);
+ pr_debug("\t.set pop\n");
+
+ if (len > 32)
+ panic("NMI stage 1 handler exceeds 32 instructions, was %d\n", len);
+}
+
+static int cpu2core(int cpu)
+{
+#ifdef CONFIG_SMP
+ return cpu_logical_map(cpu);
+#else
+ return cvmx_get_core_num();
+#endif
+}
+
+static int core2cpu(int coreid)
+{
+#ifdef CONFIG_SMP
+ return cpu_number_map(coreid);
+#else
+ return 0;
+#endif
+}
+
+/**
+ * Poke the watchdog when an interrupt is received
+ *
+ * @cpl:
+ * @dev_id:
+ *
+ * Returns
+ */
+static irqreturn_t octeon_wdt_poke_irq(int cpl, void *dev_id)
+{
+ unsigned int core = cvmx_get_core_num();
+ int cpu = core2cpu(core);
+
+ if (do_coundown) {
+ if (per_cpu_countdown[cpu] > 0) {
+ /* We're alive, poke the watchdog */
+ cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1);
+ per_cpu_countdown[cpu]--;
+ } else {
+ /* Bad news, you are about to reboot. */
+ disable_irq_nosync(cpl);
+ cpumask_clear_cpu(cpu, &irq_enabled_cpus);
+ }
+ } else {
+ /* Not open, just ping away... */
+ cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1);
+ }
+ return IRQ_HANDLED;
+}
+
+/* From setup.c */
+extern int prom_putchar(char c);
+
+/**
+ * Write a string to the uart
+ *
+ * @str: String to write
+ */
+static void octeon_wdt_write_string(const char *str)
+{
+ /* Just loop writing one byte at a time */
+ while (*str)
+ prom_putchar(*str++);
+}
+
+/**
+ * Write a hex number out of the uart
+ *
+ * @value: Number to display
+ * @digits: Number of digits to print (1 to 16)
+ */
+static void octeon_wdt_write_hex(u64 value, int digits)
+{
+ int d;
+ int v;
+ for (d = 0; d < digits; d++) {
+ v = (value >> ((digits - d - 1) * 4)) & 0xf;
+ if (v >= 10)
+ prom_putchar('a' + v - 10);
+ else
+ prom_putchar('0' + v);
+ }
+}
+
+const char *reg_name[] = {
+ "$0", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra"
+};
+
+/**
+ * NMI stage 3 handler. NMIs are handled in the following manner:
+ * 1) The first NMI handler enables CVMSEG and transfers from
+ * the bootbus region into normal memory. It is careful to not
+ * destroy any registers.
+ * 2) The second stage handler uses CVMSEG to save the registers
+ * and create a stack for C code. It then calls the third level
+ * handler with one argument, a pointer to the register values.
+ * 3) The third, and final, level handler is the following C
+ * function that prints out some useful infomration.
+ *
+ * @reg: Pointer to register state before the NMI
+ */
+void octeon_wdt_nmi_stage3(u64 reg[32])
+{
+ u64 i;
+
+ unsigned int coreid = cvmx_get_core_num();
+ /*
+ * Save status and cause early to get them before any changes
+ * might happen.
+ */
+ u64 cp0_cause = read_c0_cause();
+ u64 cp0_status = read_c0_status();
+ u64 cp0_error_epc = read_c0_errorepc();
+ u64 cp0_epc = read_c0_epc();
+
+ /* Delay so output from all cores output is not jumbled together. */
+ __delay(100000000ull * coreid);
+
+ octeon_wdt_write_string("\r\n*** NMI Watchdog interrupt on Core 0x");
+ octeon_wdt_write_hex(coreid, 1);
+ octeon_wdt_write_string(" ***\r\n");
+ for (i = 0; i < 32; i++) {
+ octeon_wdt_write_string("\t");
+ octeon_wdt_write_string(reg_name[i]);
+ octeon_wdt_write_string("\t0x");
+ octeon_wdt_write_hex(reg[i], 16);
+ if (i & 1)
+ octeon_wdt_write_string("\r\n");
+ }
+ octeon_wdt_write_string("\terr_epc\t0x");
+ octeon_wdt_write_hex(cp0_error_epc, 16);
+
+ octeon_wdt_write_string("\tepc\t0x");
+ octeon_wdt_write_hex(cp0_epc, 16);
+ octeon_wdt_write_string("\r\n");
+
+ octeon_wdt_write_string("\tstatus\t0x");
+ octeon_wdt_write_hex(cp0_status, 16);
+ octeon_wdt_write_string("\tcause\t0x");
+ octeon_wdt_write_hex(cp0_cause, 16);
+ octeon_wdt_write_string("\r\n");
+
+ octeon_wdt_write_string("\tsum0\t0x");
+ octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU_INTX_SUM0(coreid * 2)), 16);
+ octeon_wdt_write_string("\ten0\t0x");
+ octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)), 16);
+ octeon_wdt_write_string("\r\n");
+
+ octeon_wdt_write_string("*** Chip soft reset soon ***\r\n");
+}
+
+static void octeon_wdt_disable_interrupt(int cpu)
+{
+ unsigned int core;
+ unsigned int irq;
+ union cvmx_ciu_wdogx ciu_wdog;
+
+ core = cpu2core(cpu);
+
+ irq = OCTEON_IRQ_WDOG0 + core;
+
+ /* Poke the watchdog to clear out its state */
+ cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1);
+
+ /* Disable the hardware. */
+ ciu_wdog.u64 = 0;
+ cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64);
+
+ free_irq(irq, octeon_wdt_poke_irq);
+}
+
+static void octeon_wdt_setup_interrupt(int cpu)
+{
+ unsigned int core;
+ unsigned int irq;
+ union cvmx_ciu_wdogx ciu_wdog;
+
+ core = cpu2core(cpu);
+
+ /* Disable it before doing anything with the interrupts. */
+ ciu_wdog.u64 = 0;
+ cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64);
+
+ per_cpu_countdown[cpu] = countdown_reset;
+
+ irq = OCTEON_IRQ_WDOG0 + core;
+
+ if (request_irq(irq, octeon_wdt_poke_irq,
+ IRQF_DISABLED, "octeon_wdt", octeon_wdt_poke_irq))
+ panic("octeon_wdt: Couldn't obtain irq %d", irq);
+
+ cpumask_set_cpu(cpu, &irq_enabled_cpus);
+
+ /* Poke the watchdog to clear out its state */
+ cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1);
+
+ /* Finally enable the watchdog now that all handlers are installed */
+ ciu_wdog.u64 = 0;
+ ciu_wdog.s.len = timeout_cnt;
+ ciu_wdog.s.mode = 3; /* 3 = Interrupt + NMI + Soft-Reset */
+ cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64);
+}
+
+static int octeon_wdt_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_DOWN_PREPARE:
+ octeon_wdt_disable_interrupt(cpu);
+ break;
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ octeon_wdt_setup_interrupt(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static void octeon_wdt_ping(void)
+{
+ int cpu;
+ int coreid;
+
+ for_each_online_cpu(cpu) {
+ coreid = cpu2core(cpu);
+ cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1);
+ per_cpu_countdown[cpu] = countdown_reset;
+ if ((countdown_reset || !do_coundown) &&
+ !cpumask_test_cpu(cpu, &irq_enabled_cpus)) {
+ /* We have to enable the irq */
+ int irq = OCTEON_IRQ_WDOG0 + coreid;
+ enable_irq(irq);
+ cpumask_set_cpu(cpu, &irq_enabled_cpus);
+ }
+ }
+}
+
+static void octeon_wdt_calc_parameters(int t)
+{
+ unsigned int periods;
+
+ timeout_sec = max_timeout_sec;
+
+
+ /*
+ * Find the largest interrupt period, that can evenly divide
+ * the requested heartbeat time.
+ */
+ while ((t % timeout_sec) != 0)
+ timeout_sec--;
+
+ periods = t / timeout_sec;
+
+ /*
+ * The last two periods are after the irq is disabled, and
+ * then to the nmi, so we subtract them off.
+ */
+
+ countdown_reset = periods > 2 ? periods - 2 : 0;
+ heartbeat = t;
+ timeout_cnt = ((octeon_get_clock_rate() >> 8) * timeout_sec) >> 8;
+}
+
+static int octeon_wdt_set_heartbeat(int t)
+{
+ int cpu;
+ int coreid;
+ union cvmx_ciu_wdogx ciu_wdog;
+
+ if (t <= 0)
+ return -1;
+
+ octeon_wdt_calc_parameters(t);
+
+ for_each_online_cpu(cpu) {
+ coreid = cpu2core(cpu);
+ cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1);
+ ciu_wdog.u64 = 0;
+ ciu_wdog.s.len = timeout_cnt;
+ ciu_wdog.s.mode = 3; /* 3 = Interrupt + NMI + Soft-Reset */
+ cvmx_write_csr(CVMX_CIU_WDOGX(coreid), ciu_wdog.u64);
+ cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1);
+ }
+ octeon_wdt_ping(); /* Get the irqs back on. */
+ return 0;
+}
+
+/**
+ * octeon_wdt_write:
+ * @file: file handle to the watchdog
+ * @buf: buffer to write (unused as data does not matter here
+ * @count: count of bytes
+ * @ppos: pointer to the position to write. No seeks allowed
+ *
+ * A write to a watchdog device is defined as a keepalive signal. Any
+ * write of data will do, as we we don't define content meaning.
+ */
+
+static ssize_t octeon_wdt_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ if (count) {
+ if (!nowayout) {
+ size_t i;
+
+ /* In case it was set long ago */
+ expect_close = 0;
+
+ for (i = 0; i != count; i++) {
+ char c;
+ if (get_user(c, buf + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_close = 1;
+ }
+ }
+ octeon_wdt_ping();
+ }
+ return count;
+}
+
+/**
+ * octeon_wdt_ioctl:
+ * @file: file handle to the device
+ * @cmd: watchdog command
+ * @arg: argument pointer
+ *
+ * The watchdog API defines a common set of functions for all
+ * watchdogs according to their available features. We only
+ * actually usefully support querying capabilities and setting
+ * the timeout.
+ */
+
+static long octeon_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int new_heartbeat;
+
+ static struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT|
+ WDIOF_MAGICCLOSE|
+ WDIOF_KEEPALIVEPING,
+ .firmware_version = 1,
+ .identity = "OCTEON",
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+ case WDIOC_KEEPALIVE:
+ octeon_wdt_ping();
+ return 0;
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_heartbeat, p))
+ return -EFAULT;
+ if (octeon_wdt_set_heartbeat(new_heartbeat))
+ return -EINVAL;
+ /* Fall through. */
+ case WDIOC_GETTIMEOUT:
+ return put_user(heartbeat, p);
+ default:
+ return -ENOTTY;
+ }
+}
+
+/**
+ * octeon_wdt_open:
+ * @inode: inode of device
+ * @file: file handle to device
+ *
+ * The watchdog device has been opened. The watchdog device is single
+ * open and on opening we do a ping to reset the counters.
+ */
+
+static int octeon_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(0, &octeon_wdt_is_open))
+ return -EBUSY;
+ /*
+ * Activate
+ */
+ octeon_wdt_ping();
+ do_coundown = 1;
+ return nonseekable_open(inode, file);
+}
+
+/**
+ * octeon_wdt_release:
+ * @inode: inode to board
+ * @file: file handle to board
+ *
+ * The watchdog has a configurable API. There is a religious dispute
+ * between people who want their watchdog to be able to shut down and
+ * those who want to be sure if the watchdog manager dies the machine
+ * reboots. In the former case we disable the counters, in the latter
+ * case you have to open it again very soon.
+ */
+
+static int octeon_wdt_release(struct inode *inode, struct file *file)
+{
+ if (expect_close) {
+ do_coundown = 0;
+ octeon_wdt_ping();
+ } else {
+ pr_crit("octeon_wdt: WDT device closed unexpectedly. WDT will not stop!\n");
+ }
+ clear_bit(0, &octeon_wdt_is_open);
+ expect_close = 0;
+ return 0;
+}
+
+static const struct file_operations octeon_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = octeon_wdt_write,
+ .unlocked_ioctl = octeon_wdt_ioctl,
+ .open = octeon_wdt_open,
+ .release = octeon_wdt_release,
+};
+
+static struct miscdevice octeon_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &octeon_wdt_fops,
+};
+
+static struct notifier_block octeon_wdt_cpu_notifier = {
+ .notifier_call = octeon_wdt_cpu_callback,
+};
+
+
+/**
+ * Module/ driver initialization.
+ *
+ * Returns Zero on success
+ */
+static int __init octeon_wdt_init(void)
+{
+ int i;
+ int ret;
+ int cpu;
+ u64 *ptr;
+
+ /*
+ * Watchdog time expiration length = The 16 bits of LEN
+ * represent the most significant bits of a 24 bit decrementer
+ * that decrements every 256 cycles.
+ *
+ * Try for a timeout of 5 sec, if that fails a smaller number
+ * of even seconds,
+ */
+ max_timeout_sec = 6;
+ do {
+ max_timeout_sec--;
+ timeout_cnt = ((octeon_get_clock_rate() >> 8) * max_timeout_sec) >> 8;
+ } while (timeout_cnt > 65535);
+
+ BUG_ON(timeout_cnt == 0);
+
+ octeon_wdt_calc_parameters(heartbeat);
+
+ pr_info("octeon_wdt: Initial granularity %d Sec.\n", timeout_sec);
+
+ ret = misc_register(&octeon_wdt_miscdev);
+ if (ret) {
+ pr_err("octeon_wdt: cannot register miscdev on minor=%d (err=%d)\n",
+ WATCHDOG_MINOR, ret);
+ goto out;
+ }
+
+ /* Build the NMI handler ... */
+ octeon_wdt_build_stage1();
+
+ /* ... and install it. */
+ ptr = (u64 *) nmi_stage1_insns;
+ for (i = 0; i < 16; i++) {
+ cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, i * 8);
+ cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, ptr[i]);
+ }
+ cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0x81fc0000);
+
+ cpumask_clear(&irq_enabled_cpus);
+
+ for_each_online_cpu(cpu)
+ octeon_wdt_setup_interrupt(cpu);
+
+ register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
+out:
+ return ret;
+}
+
+/**
+ * Module / driver shutdown
+ */
+static void __exit octeon_wdt_cleanup(void)
+{
+ int cpu;
+
+ misc_deregister(&octeon_wdt_miscdev);
+
+ unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
+
+ for_each_online_cpu(cpu) {
+ int core = cpu2core(cpu);
+ /* Disable the watchdog */
+ cvmx_write_csr(CVMX_CIU_WDOGX(core), 0);
+ /* Free the interrupt handler */
+ free_irq(OCTEON_IRQ_WDOG0 + core, octeon_wdt_poke_irq);
+ }
+ /*
+ * Disable the boot-bus memory, the code it points to is soon
+ * to go missing.
+ */
+ cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
+MODULE_DESCRIPTION("Cavium Networks Octeon Watchdog driver.");
+module_init(octeon_wdt_init);
+module_exit(octeon_wdt_cleanup);
diff --git a/drivers/watchdog/octeon-wdt-nmi.S b/drivers/watchdog/octeon-wdt-nmi.S
new file mode 100644
index 00000000000..8a900a5e323
--- /dev/null
+++ b/drivers/watchdog/octeon-wdt-nmi.S
@@ -0,0 +1,64 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 Cavium Networks
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+#define SAVE_REG(r) sd $r, -32768+6912-(32-r)*8($0)
+
+ NESTED(octeon_wdt_nmi_stage2, 0, sp)
+ .set push
+ .set noreorder
+ .set noat
+ /* Save all registers to the top CVMSEG. This shouldn't
+ * corrupt any state used by the kernel. Also all registers
+ * should have the value right before the NMI. */
+ SAVE_REG(0)
+ SAVE_REG(1)
+ SAVE_REG(2)
+ SAVE_REG(3)
+ SAVE_REG(4)
+ SAVE_REG(5)
+ SAVE_REG(6)
+ SAVE_REG(7)
+ SAVE_REG(8)
+ SAVE_REG(9)
+ SAVE_REG(10)
+ SAVE_REG(11)
+ SAVE_REG(12)
+ SAVE_REG(13)
+ SAVE_REG(14)
+ SAVE_REG(15)
+ SAVE_REG(16)
+ SAVE_REG(17)
+ SAVE_REG(18)
+ SAVE_REG(19)
+ SAVE_REG(20)
+ SAVE_REG(21)
+ SAVE_REG(22)
+ SAVE_REG(23)
+ SAVE_REG(24)
+ SAVE_REG(25)
+ SAVE_REG(26)
+ SAVE_REG(27)
+ SAVE_REG(28)
+ SAVE_REG(29)
+ SAVE_REG(30)
+ SAVE_REG(31)
+ /* Set the stack to begin right below the registers */
+ li sp, -32768+6912-32*8
+ /* Load the address of the third stage handler */
+ dla a0, octeon_wdt_nmi_stage3
+ /* Call the third stage handler */
+ jal a0
+ /* a0 is the address of the saved registers */
+ move a0, sp
+ /* Loop forvever if we get here. */
+1: b 1b
+ nop
+ .set pop
+ END(octeon_wdt_nmi_stage2)
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index 5dceeddc885..3faee1ae64b 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -172,7 +172,7 @@ static struct miscdevice riowd_miscdev = {
.fops = &riowd_fops
};
-static int __devinit riowd_probe(struct of_device *op,
+static int __devinit riowd_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct riowd *p;
@@ -219,7 +219,7 @@ out:
return err;
}
-static int __devexit riowd_remove(struct of_device *op)
+static int __devexit riowd_remove(struct platform_device *op)
{
struct riowd *p = dev_get_drvdata(&op->dev);
@@ -250,12 +250,12 @@ static struct of_platform_driver riowd_driver = {
static int __init riowd_init(void)
{
- return of_register_driver(&riowd_driver, &of_bus_type);
+ return of_register_platform_driver(&riowd_driver);
}
static void __exit riowd_exit(void)
{
- of_unregister_driver(&riowd_driver);
+ of_unregister_platform_driver(&riowd_driver);
}
module_init(riowd_init);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 300932580de..ae53662c29b 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -532,21 +532,22 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
static int __devexit s3c2410wdt_remove(struct platform_device *dev)
{
- s3c2410wdt_cpufreq_deregister();
-
- release_resource(wdt_mem);
- kfree(wdt_mem);
- wdt_mem = NULL;
+ misc_deregister(&s3c2410wdt_miscdev);
- free_irq(wdt_irq->start, dev);
- wdt_irq = NULL;
+ s3c2410wdt_cpufreq_deregister();
clk_disable(wdt_clock);
clk_put(wdt_clock);
wdt_clock = NULL;
+ free_irq(wdt_irq->start, dev);
+ wdt_irq = NULL;
+
iounmap(wdt_base);
- misc_deregister(&s3c2410wdt_miscdev);
+
+ release_resource(wdt_mem);
+ kfree(wdt_mem);
+ wdt_mem = NULL;
return 0;
}
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 88c83aa5730..f31493e65b3 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -305,7 +305,7 @@ static int __init sbwdog_init(void)
if (ret) {
printk(KERN_ERR "%s: failed to request irq 1 - %d\n",
ident.identity, ret);
- return ret;
+ goto out;
}
ret = misc_register(&sbwdog_miscdev);
@@ -313,14 +313,20 @@ static int __init sbwdog_init(void)
printk(KERN_INFO "%s: timeout is %ld.%ld secs\n",
ident.identity,
timeout / 1000000, (timeout / 100000) % 10);
- } else
- free_irq(1, (void *)user_dog);
+ return 0;
+ }
+ free_irq(1, (void *)user_dog);
+out:
+ unregister_reboot_notifier(&sbwdog_notifier);
+
return ret;
}
static void __exit sbwdog_exit(void)
{
misc_deregister(&sbwdog_miscdev);
+ free_irq(1, (void *)user_dog);
+ unregister_reboot_notifier(&sbwdog_notifier);
}
module_init(sbwdog_init);
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 9c40f48804f..0461858e07d 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -425,6 +425,8 @@ static int __devinit sch311x_wdt_probe(struct platform_device *pdev)
val = therm_trip ? 0x06 : 0x04;
outb(val, sch311x_wdt_data.runtime_reg + RESGEN);
+ sch311x_wdt_miscdev.parent = dev;
+
err = misc_register(&sch311x_wdt_miscdev);
if (err != 0) {
dev_err(dev, "cannot register miscdev on minor=%d (err=%d)\n",
@@ -432,8 +434,6 @@ static int __devinit sch311x_wdt_probe(struct platform_device *pdev)
goto exit_release_region3;
}
- sch311x_wdt_miscdev.parent = dev;
-
dev_info(dev,
"SMSC SCH311x WDT initialized. timeout=%d sec (nowayout=%d)\n",
timeout, nowayout);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
new file mode 100644
index 00000000000..9127eda2145
--- /dev/null
+++ b/drivers/watchdog/sp805_wdt.c
@@ -0,0 +1,387 @@
+/*
+ * drivers/char/watchdog/sp805-wdt.c
+ *
+ * Watchdog driver for ARM SP805 watchdog module
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2 or later. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/device.h>
+#include <linux/resource.h>
+#include <linux/amba/bus.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+
+/* default timeout in seconds */
+#define DEFAULT_TIMEOUT 60
+
+#define MODULE_NAME "sp805-wdt"
+
+/* watchdog register offsets and masks */
+#define WDTLOAD 0x000
+ #define LOAD_MIN 0x00000001
+ #define LOAD_MAX 0xFFFFFFFF
+#define WDTVALUE 0x004
+#define WDTCONTROL 0x008
+ /* control register masks */
+ #define INT_ENABLE (1 << 0)
+ #define RESET_ENABLE (1 << 1)
+#define WDTINTCLR 0x00C
+#define WDTRIS 0x010
+#define WDTMIS 0x014
+ #define INT_MASK (1 << 0)
+#define WDTLOCK 0xC00
+ #define UNLOCK 0x1ACCE551
+ #define LOCK 0x00000001
+
+/**
+ * struct sp805_wdt: sp805 wdt device structure
+ *
+ * lock: spin lock protecting dev structure and io access
+ * base: base address of wdt
+ * clk: clock structure of wdt
+ * dev: amba device structure of wdt
+ * status: current status of wdt
+ * load_val: load value to be set for current timeout
+ * timeout: current programmed timeout
+ */
+struct sp805_wdt {
+ spinlock_t lock;
+ void __iomem *base;
+ struct clk *clk;
+ struct amba_device *adev;
+ unsigned long status;
+ #define WDT_BUSY 0
+ #define WDT_CAN_BE_CLOSED 1
+ unsigned int load_val;
+ unsigned int timeout;
+};
+
+/* local variables */
+static struct sp805_wdt *wdt;
+static int nowayout = WATCHDOG_NOWAYOUT;
+
+/* This routine finds load value that will reset system in required timout */
+static void wdt_setload(unsigned int timeout)
+{
+ u64 load, rate;
+
+ rate = clk_get_rate(wdt->clk);
+
+ /*
+ * sp805 runs counter with given value twice, after the end of first
+ * counter it gives an interrupt and then starts counter again. If
+ * interrupt already occured then it resets the system. This is why
+ * load is half of what should be required.
+ */
+ load = div_u64(rate, 2) * timeout - 1;
+
+ load = (load > LOAD_MAX) ? LOAD_MAX : load;
+ load = (load < LOAD_MIN) ? LOAD_MIN : load;
+
+ spin_lock(&wdt->lock);
+ wdt->load_val = load;
+ /* roundup timeout to closest positive integer value */
+ wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
+ spin_unlock(&wdt->lock);
+}
+
+/* returns number of seconds left for reset to occur */
+static u32 wdt_timeleft(void)
+{
+ u64 load, rate;
+
+ rate = clk_get_rate(wdt->clk);
+
+ spin_lock(&wdt->lock);
+ load = readl(wdt->base + WDTVALUE);
+
+ /*If the interrupt is inactive then time left is WDTValue + WDTLoad. */
+ if (!(readl(wdt->base + WDTRIS) & INT_MASK))
+ load += wdt->load_val + 1;
+ spin_unlock(&wdt->lock);
+
+ return div_u64(load, rate);
+}
+
+/* enables watchdog timers reset */
+static void wdt_enable(void)
+{
+ spin_lock(&wdt->lock);
+
+ writel(UNLOCK, wdt->base + WDTLOCK);
+ writel(wdt->load_val, wdt->base + WDTLOAD);
+ writel(INT_MASK, wdt->base + WDTINTCLR);
+ writel(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
+ writel(LOCK, wdt->base + WDTLOCK);
+
+ spin_unlock(&wdt->lock);
+}
+
+/* disables watchdog timers reset */
+static void wdt_disable(void)
+{
+ spin_lock(&wdt->lock);
+
+ writel(UNLOCK, wdt->base + WDTLOCK);
+ writel(0, wdt->base + WDTCONTROL);
+ writel(0, wdt->base + WDTLOAD);
+ writel(LOCK, wdt->base + WDTLOCK);
+
+ spin_unlock(&wdt->lock);
+}
+
+static ssize_t sp805_wdt_write(struct file *file, const char *data,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, data + i))
+ return -EFAULT;
+ /* Check for Magic Close character */
+ if (c == 'V') {
+ set_bit(WDT_CAN_BE_CLOSED,
+ &wdt->status);
+ break;
+ }
+ }
+ }
+ wdt_enable();
+ }
+ return len;
+}
+
+static const struct watchdog_info ident = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = MODULE_NAME,
+};
+
+static long sp805_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -ENOTTY;
+ unsigned int timeout;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user((struct watchdog_info *)arg, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ break;
+
+ case WDIOC_GETSTATUS:
+ ret = put_user(0, (int *)arg);
+ break;
+
+ case WDIOC_KEEPALIVE:
+ wdt_enable();
+ ret = 0;
+ break;
+
+ case WDIOC_SETTIMEOUT:
+ ret = get_user(timeout, (unsigned int *)arg);
+ if (ret)
+ break;
+
+ wdt_setload(timeout);
+
+ wdt_enable();
+ /* Fall through */
+
+ case WDIOC_GETTIMEOUT:
+ ret = put_user(wdt->timeout, (unsigned int *)arg);
+ break;
+ case WDIOC_GETTIMELEFT:
+ ret = put_user(wdt_timeleft(), (unsigned int *)arg);
+ break;
+ }
+ return ret;
+}
+
+static int sp805_wdt_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ if (test_and_set_bit(WDT_BUSY, &wdt->status))
+ return -EBUSY;
+
+ ret = clk_enable(wdt->clk);
+ if (ret) {
+ dev_err(&wdt->adev->dev, "clock enable fail");
+ goto err;
+ }
+
+ wdt_enable();
+
+ /* can not be closed, once enabled */
+ clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
+ return nonseekable_open(inode, file);
+
+err:
+ clear_bit(WDT_BUSY, &wdt->status);
+ return ret;
+}
+
+static int sp805_wdt_release(struct inode *inode, struct file *file)
+{
+ if (!test_bit(WDT_CAN_BE_CLOSED, &wdt->status)) {
+ clear_bit(WDT_BUSY, &wdt->status);
+ dev_warn(&wdt->adev->dev, "Device closed unexpectedly\n");
+ return 0;
+ }
+
+ wdt_disable();
+ clk_disable(wdt->clk);
+ clear_bit(WDT_BUSY, &wdt->status);
+
+ return 0;
+}
+
+static const struct file_operations sp805_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = sp805_wdt_write,
+ .unlocked_ioctl = sp805_wdt_ioctl,
+ .open = sp805_wdt_open,
+ .release = sp805_wdt_release,
+};
+
+static struct miscdevice sp805_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &sp805_wdt_fops,
+};
+
+static int __devinit
+sp805_wdt_probe(struct amba_device *adev, struct amba_id *id)
+{
+ int ret = 0;
+
+ if (!request_mem_region(adev->res.start, resource_size(&adev->res),
+ "sp805_wdt")) {
+ dev_warn(&adev->dev, "Failed to get memory region resource\n");
+ ret = -ENOENT;
+ goto err;
+ }
+
+ wdt = kzalloc(sizeof(*wdt), GFP_KERNEL);
+ if (!wdt) {
+ dev_warn(&adev->dev, "Kzalloc failed\n");
+ ret = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ wdt->clk = clk_get(&adev->dev, NULL);
+ if (IS_ERR(wdt->clk)) {
+ dev_warn(&adev->dev, "Clock not found\n");
+ ret = PTR_ERR(wdt->clk);
+ goto err_clk_get;
+ }
+
+ wdt->base = ioremap(adev->res.start, resource_size(&adev->res));
+ if (!wdt->base) {
+ ret = -ENOMEM;
+ dev_warn(&adev->dev, "ioremap fail\n");
+ goto err_ioremap;
+ }
+
+ wdt->adev = adev;
+ spin_lock_init(&wdt->lock);
+ wdt_setload(DEFAULT_TIMEOUT);
+
+ ret = misc_register(&sp805_wdt_miscdev);
+ if (ret < 0) {
+ dev_warn(&adev->dev, "cannot register misc device\n");
+ goto err_misc_register;
+ }
+
+ dev_info(&adev->dev, "registration successful\n");
+ return 0;
+
+err_misc_register:
+ iounmap(wdt->base);
+err_ioremap:
+ clk_put(wdt->clk);
+err_clk_get:
+ kfree(wdt);
+ wdt = NULL;
+err_kzalloc:
+ release_mem_region(adev->res.start, resource_size(&adev->res));
+err:
+ dev_err(&adev->dev, "Probe Failed!!!\n");
+ return ret;
+}
+
+static int __devexit sp805_wdt_remove(struct amba_device *adev)
+{
+ misc_deregister(&sp805_wdt_miscdev);
+ iounmap(wdt->base);
+ clk_put(wdt->clk);
+ kfree(wdt);
+ release_mem_region(adev->res.start, resource_size(&adev->res));
+
+ return 0;
+}
+
+static struct amba_id sp805_wdt_ids[] __initdata = {
+ {
+ .id = 0x00141805,
+ .mask = 0x00ffffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver sp805_wdt_driver = {
+ .drv = {
+ .name = MODULE_NAME,
+ },
+ .id_table = sp805_wdt_ids,
+ .probe = sp805_wdt_probe,
+ .remove = __devexit_p(sp805_wdt_remove),
+};
+
+static int __init sp805_wdt_init(void)
+{
+ return amba_driver_register(&sp805_wdt_driver);
+}
+module_init(sp805_wdt_init);
+
+static void __exit sp805_wdt_exit(void)
+{
+ amba_driver_unregister(&sp805_wdt_driver);
+}
+module_exit(sp805_wdt_exit);
+
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout,
+ "Set to 1 to keep watchdog running after device release");
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 458c499c122..18cdeb4c425 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -449,6 +449,9 @@ static __devinit int ts72xx_wdt_probe(struct platform_device *pdev)
wdt->pdev = pdev;
mutex_init(&wdt->lock);
+ /* make sure that the watchdog is disabled */
+ ts72xx_wdt_stop(wdt);
+
error = misc_register(&ts72xx_wdt_miscdev);
if (error) {
dev_err(&pdev->dev, "failed to register miscdev\n");
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 7b22e3cdbc8..6130c88fa5a 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -60,19 +60,6 @@
#define PFX "wdt_pci: "
-/*
- * Until Access I/O gets their application for a PCI vendor ID approved,
- * I don't think that it's appropriate to move these constants into the
- * regular pci_ids.h file. -- JPN 2000/01/18
- */
-
-#ifndef PCI_VENDOR_ID_ACCESSIO
-#define PCI_VENDOR_ID_ACCESSIO 0x494f
-#endif
-#ifndef PCI_DEVICE_ID_WDG_CSM
-#define PCI_DEVICE_ID_WDG_CSM 0x22c0
-#endif
-
/* We can only use 1 card due to the /dev/watchdog restriction */
static int dev_count;
@@ -743,7 +730,7 @@ static void __devexit wdtpci_remove_one(struct pci_dev *pdev)
static struct pci_device_id wdtpci_pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_ACCESSIO,
- .device = PCI_DEVICE_ID_WDG_CSM,
+ .device = PCI_DEVICE_ID_ACCESSIO_WDG_CSM,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index fad3df2c127..60d71e9abe9 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -62,4 +62,18 @@ config XEN_SYS_HYPERVISOR
virtual environment, /sys/hypervisor will still be present,
but will have no xen contents.
+config XEN_PLATFORM_PCI
+ tristate "xen platform pci device driver"
+ depends on XEN_PVHVM
+ default m
+ help
+ Driver for the Xen PCI Platform device: it is responsible for
+ initializing xenbus and grant_table when running in a Xen HVM
+ domain. As a consequence this driver is required to run any Xen PV
+ frontend on Xen HVM.
+
+config SWIOTLB_XEN
+ def_bool y
+ depends on SWIOTLB
+
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 7c284342f30..fcaf838f54b 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -9,4 +9,6 @@ obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += balloon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o
obj-$(CONFIG_XENFS) += xenfs/
-obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o \ No newline at end of file
+obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
+obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o
+obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 1a0d8c2a035..500290b150b 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -85,13 +85,6 @@ static struct sys_device balloon_sysdev;
static int register_balloon(struct sys_device *sysdev);
-/*
- * Protects atomic reservation decrease/increase against concurrent increases.
- * Also protects non-atomic updates of current_pages and driver_pages, and
- * balloon lists.
- */
-static DEFINE_SPINLOCK(balloon_lock);
-
static struct balloon_stats balloon_stats;
/* We increase/decrease in batches which fit in a page */
@@ -210,7 +203,7 @@ static int increase_reservation(unsigned long nr_pages)
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
- spin_lock_irqsave(&balloon_lock, flags);
+ spin_lock_irqsave(&xen_reservation_lock, flags);
page = balloon_first_page();
for (i = 0; i < nr_pages; i++) {
@@ -254,7 +247,7 @@ static int increase_reservation(unsigned long nr_pages)
balloon_stats.current_pages += rc;
out:
- spin_unlock_irqrestore(&balloon_lock, flags);
+ spin_unlock_irqrestore(&xen_reservation_lock, flags);
return rc < 0 ? rc : rc != nr_pages;
}
@@ -299,7 +292,7 @@ static int decrease_reservation(unsigned long nr_pages)
kmap_flush_unused();
flush_tlb_all();
- spin_lock_irqsave(&balloon_lock, flags);
+ spin_lock_irqsave(&xen_reservation_lock, flags);
/* No more mappings: invalidate P2M and add to balloon. */
for (i = 0; i < nr_pages; i++) {
@@ -315,7 +308,7 @@ static int decrease_reservation(unsigned long nr_pages)
balloon_stats.current_pages -= nr_pages;
- spin_unlock_irqrestore(&balloon_lock, flags);
+ spin_unlock_irqrestore(&xen_reservation_lock, flags);
return need_sleep;
}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index db8f506817f..13365ba3521 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -29,6 +29,7 @@
#include <linux/bootmem.h>
#include <linux/slab.h>
+#include <asm/desc.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
#include <asm/idle.h>
@@ -36,10 +37,14 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
+#include <xen/xen.h>
+#include <xen/hvm.h>
#include <xen/xen-ops.h>
#include <xen/events.h>
#include <xen/interface/xen.h>
#include <xen/interface/event_channel.h>
+#include <xen/interface/hvm/hvm_op.h>
+#include <xen/interface/hvm/params.h>
/*
* This lock protects updates to the following mapping and reference-count
@@ -107,6 +112,7 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
#define VALID_EVTCHN(chn) ((chn) != 0)
static struct irq_chip xen_dynamic_chip;
+static struct irq_chip xen_percpu_chip;
/* Constructor for packed IRQ information. */
static struct irq_info mk_unbound_info(void)
@@ -335,9 +341,18 @@ static int find_unbound_irq(void)
int irq;
struct irq_desc *desc;
- for (irq = 0; irq < nr_irqs; irq++)
+ for (irq = 0; irq < nr_irqs; irq++) {
+ desc = irq_to_desc(irq);
+ /* only 0->15 have init'd desc; handle irq > 16 */
+ if (desc == NULL)
+ break;
+ if (desc->chip == &no_irq_chip)
+ break;
+ if (desc->chip != &xen_dynamic_chip)
+ continue;
if (irq_info[irq].type == IRQT_UNBOUND)
break;
+ }
if (irq == nr_irqs)
panic("No available IRQ to bind to: increase nr_irqs!\n");
@@ -346,7 +361,7 @@ static int find_unbound_irq(void)
if (WARN_ON(desc == NULL))
return -1;
- dynamic_irq_init(irq);
+ dynamic_irq_init_keep_chip_data(irq);
return irq;
}
@@ -363,7 +378,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq = find_unbound_irq();
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_level_irq, "event");
+ handle_edge_irq, "event");
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_evtchn_info(evtchn);
@@ -389,8 +404,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
if (irq < 0)
goto out;
- set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_level_irq, "ipi");
+ set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+ handle_percpu_irq, "ipi");
bind_ipi.vcpu = cpu;
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
@@ -430,8 +445,8 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
irq = find_unbound_irq();
- set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_level_irq, "virq");
+ set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+ handle_percpu_irq, "virq");
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_virq_info(evtchn, virq);
@@ -536,6 +551,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
if (irq < 0)
return irq;
+ irqflags |= IRQF_NO_SUSPEND;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
@@ -617,17 +633,13 @@ static DEFINE_PER_CPU(unsigned, xed_nesting_count);
* a bitset of words which contain pending event bits. The second
* level is a bitset of pending events themselves.
*/
-void xen_evtchn_do_upcall(struct pt_regs *regs)
+static void __xen_evtchn_do_upcall(void)
{
int cpu = get_cpu();
- struct pt_regs *old_regs = set_irq_regs(regs);
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
unsigned count;
- exit_idle();
- irq_enter();
-
do {
unsigned long pending_words;
@@ -664,14 +676,31 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
count = __get_cpu_var(xed_nesting_count);
__get_cpu_var(xed_nesting_count) = 0;
- } while(count != 1);
+ } while (count != 1 || vcpu_info->evtchn_upcall_pending);
out:
+
+ put_cpu();
+}
+
+void xen_evtchn_do_upcall(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ exit_idle();
+ irq_enter();
+
+ __xen_evtchn_do_upcall();
+
irq_exit();
set_irq_regs(old_regs);
+}
- put_cpu();
+void xen_hvm_evtchn_do_upcall(void)
+{
+ __xen_evtchn_do_upcall();
}
+EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
/* Rebind a new event channel to an existing irq. */
void rebind_evtchn_irq(int evtchn, int irq)
@@ -708,7 +737,10 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
- if (!VALID_EVTCHN(evtchn))
+ /* events delivered via platform PCI interrupts are always
+ * routed to vcpu 0 */
+ if (!VALID_EVTCHN(evtchn) ||
+ (xen_hvm_domain() && !xen_have_vector_callback))
return -1;
/* Send future instances of this interrupt to other vcpu. */
@@ -933,6 +965,54 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
.retrigger = retrigger_dynirq,
};
+static struct irq_chip xen_percpu_chip __read_mostly = {
+ .name = "xen-percpu",
+
+ .disable = disable_dynirq,
+ .mask = disable_dynirq,
+ .unmask = enable_dynirq,
+
+ .ack = ack_dynirq,
+};
+
+int xen_set_callback_via(uint64_t via)
+{
+ struct xen_hvm_param a;
+ a.domid = DOMID_SELF;
+ a.index = HVM_PARAM_CALLBACK_IRQ;
+ a.value = via;
+ return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
+}
+EXPORT_SYMBOL_GPL(xen_set_callback_via);
+
+#ifdef CONFIG_XEN_PVHVM
+/* Vector callbacks are better than PCI interrupts to receive event
+ * channel notifications because we can receive vector callbacks on any
+ * vcpu and we don't need PCI support or APIC interactions. */
+void xen_callback_vector(void)
+{
+ int rc;
+ uint64_t callback_via;
+ if (xen_have_vector_callback) {
+ callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
+ rc = xen_set_callback_via(callback_via);
+ if (rc) {
+ printk(KERN_ERR "Request for Xen HVM callback vector"
+ " failed.\n");
+ xen_have_vector_callback = 0;
+ return;
+ }
+ printk(KERN_INFO "Xen HVM callback vector for event delivery is "
+ "enabled\n");
+ /* in the restore case the vector has already been allocated */
+ if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
+ alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
+ }
+}
+#else
+void xen_callback_vector(void) {}
+#endif
+
void __init xen_init_IRQ(void)
{
int i;
@@ -947,5 +1027,10 @@ void __init xen_init_IRQ(void)
for (i = 0; i < NR_EVENT_CHANNELS; i++)
mask_evtchn(i);
- irq_ctx_init(smp_processor_id());
+ if (xen_hvm_domain()) {
+ xen_callback_vector();
+ native_init_IRQ();
+ } else {
+ irq_ctx_init(smp_processor_id());
+ }
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index f66db3b91d6..6c453181649 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -37,11 +37,13 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
+#include <linux/io.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/page.h>
#include <xen/grant_table.h>
+#include <xen/interface/memory.h>
#include <asm/xen/hypercall.h>
#include <asm/pgtable.h>
@@ -59,6 +61,8 @@ static unsigned int boot_max_nr_grant_frames;
static int gnttab_free_count;
static grant_ref_t gnttab_free_head;
static DEFINE_SPINLOCK(gnttab_list_lock);
+unsigned long xen_hvm_resume_frames;
+EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
static struct grant_entry *shared;
@@ -433,7 +437,7 @@ static unsigned int __max_nr_grant_frames(void)
return query.max_nr_frames;
}
-static inline unsigned int max_nr_grant_frames(void)
+unsigned int gnttab_max_grant_frames(void)
{
unsigned int xen_max = __max_nr_grant_frames();
@@ -441,6 +445,7 @@ static inline unsigned int max_nr_grant_frames(void)
return boot_max_nr_grant_frames;
return xen_max;
}
+EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
@@ -449,6 +454,30 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
unsigned int nr_gframes = end_idx + 1;
int rc;
+ if (xen_hvm_domain()) {
+ struct xen_add_to_physmap xatp;
+ unsigned int i = end_idx;
+ rc = 0;
+ /*
+ * Loop backwards, so that the first hypercall has the largest
+ * index, ensuring that the table will grow only once.
+ */
+ do {
+ xatp.domid = DOMID_SELF;
+ xatp.idx = i;
+ xatp.space = XENMAPSPACE_grant_table;
+ xatp.gpfn = (xen_hvm_resume_frames >> PAGE_SHIFT) + i;
+ rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
+ if (rc != 0) {
+ printk(KERN_WARNING
+ "grant table add_to_physmap failed, err=%d\n", rc);
+ break;
+ }
+ } while (i-- > start_idx);
+
+ return rc;
+ }
+
frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
if (!frames)
return -ENOMEM;
@@ -465,7 +494,7 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
BUG_ON(rc || setup.status);
- rc = arch_gnttab_map_shared(frames, nr_gframes, max_nr_grant_frames(),
+ rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(),
&shared);
BUG_ON(rc);
@@ -476,9 +505,27 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
int gnttab_resume(void)
{
- if (max_nr_grant_frames() < nr_grant_frames)
+ unsigned int max_nr_gframes;
+
+ max_nr_gframes = gnttab_max_grant_frames();
+ if (max_nr_gframes < nr_grant_frames)
return -ENOSYS;
- return gnttab_map(0, nr_grant_frames - 1);
+
+ if (xen_pv_domain())
+ return gnttab_map(0, nr_grant_frames - 1);
+
+ if (!shared) {
+ shared = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes);
+ if (shared == NULL) {
+ printk(KERN_WARNING
+ "Failed to ioremap gnttab share frames!");
+ return -ENOMEM;
+ }
+ }
+
+ gnttab_map(0, nr_grant_frames - 1);
+
+ return 0;
}
int gnttab_suspend(void)
@@ -495,7 +542,7 @@ static int gnttab_expand(unsigned int req_entries)
cur = nr_grant_frames;
extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
GREFS_PER_GRANT_FRAME);
- if (cur + extra > max_nr_grant_frames())
+ if (cur + extra > gnttab_max_grant_frames())
return -ENOSPC;
rc = gnttab_map(cur, cur + extra - 1);
@@ -505,15 +552,12 @@ static int gnttab_expand(unsigned int req_entries)
return rc;
}
-static int __devinit gnttab_init(void)
+int gnttab_init(void)
{
int i;
unsigned int max_nr_glist_frames, nr_glist_frames;
unsigned int nr_init_grefs;
- if (!xen_domain())
- return -ENODEV;
-
nr_grant_frames = 1;
boot_max_nr_grant_frames = __max_nr_grant_frames();
@@ -556,5 +600,18 @@ static int __devinit gnttab_init(void)
kfree(gnttab_list);
return -ENOMEM;
}
+EXPORT_SYMBOL_GPL(gnttab_init);
+
+static int __devinit __gnttab_init(void)
+{
+ /* Delay grant-table initialization in the PV on HVM case */
+ if (xen_hvm_domain())
+ return 0;
+
+ if (!xen_pv_domain())
+ return -ENODEV;
+
+ return gnttab_init();
+}
-core_initcall(gnttab_init);
+core_initcall(__gnttab_init);
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 07e857b0de1..ef9c7db5207 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -9,6 +9,7 @@
#include <linux/stop_machine.h>
#include <linux/freezer.h>
+#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/grant_table.h>
#include <xen/events.h>
@@ -17,6 +18,7 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
+#include <asm/xen/hypervisor.h>
enum shutdown_state {
SHUTDOWN_INVALID = -1,
@@ -33,10 +35,30 @@ enum shutdown_state {
static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
#ifdef CONFIG_PM_SLEEP
-static int xen_suspend(void *data)
+static int xen_hvm_suspend(void *data)
{
+ struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
int *cancelled = data;
+
+ BUG_ON(!irqs_disabled());
+
+ *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
+
+ xen_hvm_post_suspend(*cancelled);
+ gnttab_resume();
+
+ if (!*cancelled) {
+ xen_irq_resume();
+ xen_timer_resume();
+ }
+
+ return 0;
+}
+
+static int xen_suspend(void *data)
+{
int err;
+ int *cancelled = data;
BUG_ON(!irqs_disabled());
@@ -106,7 +128,10 @@ static void do_suspend(void)
goto out_resume;
}
- err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
+ if (xen_hvm_domain())
+ err = stop_machine(xen_hvm_suspend, &cancelled, cpumask_of(0));
+ else
+ err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
dpm_resume_noirq(PMSG_RESUME);
@@ -212,7 +237,7 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
goto again;
if (sysrq_key != '\0')
- handle_sysrq(sysrq_key, NULL);
+ handle_sysrq(sysrq_key);
}
static struct xenbus_watch sysrq_watch = {
@@ -255,7 +280,19 @@ static int shutdown_event(struct notifier_block *notifier,
return NOTIFY_DONE;
}
-static int __init setup_shutdown_event(void)
+static int __init __setup_shutdown_event(void)
+{
+ /* Delay initialization in the PV on HVM case */
+ if (xen_hvm_domain())
+ return 0;
+
+ if (!xen_pv_domain())
+ return -ENODEV;
+
+ return xen_setup_shutdown_event();
+}
+
+int xen_setup_shutdown_event(void)
{
static struct notifier_block xenstore_notifier = {
.notifier_call = shutdown_event
@@ -264,5 +301,6 @@ static int __init setup_shutdown_event(void)
return 0;
}
+EXPORT_SYMBOL_GPL(xen_setup_shutdown_event);
-subsys_initcall(setup_shutdown_event);
+subsys_initcall(__setup_shutdown_event);
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
new file mode 100644
index 00000000000..c01b5ddce52
--- /dev/null
+++ b/drivers/xen/platform-pci.c
@@ -0,0 +1,207 @@
+/******************************************************************************
+ * platform-pci.c
+ *
+ * Xen platform PCI device driver
+ * Copyright (c) 2005, Intel Corporation.
+ * Copyright (c) 2007, XenSource Inc.
+ * Copyright (c) 2010, Citrix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <xen/platform_pci.h>
+#include <xen/grant_table.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/hvm.h>
+#include <xen/xen-ops.h>
+
+#define DRV_NAME "xen-platform-pci"
+
+MODULE_AUTHOR("ssmith@xensource.com and stefano.stabellini@eu.citrix.com");
+MODULE_DESCRIPTION("Xen platform PCI device");
+MODULE_LICENSE("GPL");
+
+static unsigned long platform_mmio;
+static unsigned long platform_mmio_alloc;
+static unsigned long platform_mmiolen;
+static uint64_t callback_via;
+
+unsigned long alloc_xen_mmio(unsigned long len)
+{
+ unsigned long addr;
+
+ addr = platform_mmio + platform_mmio_alloc;
+ platform_mmio_alloc += len;
+ BUG_ON(platform_mmio_alloc > platform_mmiolen);
+
+ return addr;
+}
+
+static uint64_t get_callback_via(struct pci_dev *pdev)
+{
+ u8 pin;
+ int irq;
+
+ irq = pdev->irq;
+ if (irq < 16)
+ return irq; /* ISA IRQ */
+
+ pin = pdev->pin;
+
+ /* We don't know the GSI. Specify the PCI INTx line instead. */
+ return ((uint64_t)0x01 << 56) | /* PCI INTx identifier */
+ ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
+ ((uint64_t)pdev->bus->number << 16) |
+ ((uint64_t)(pdev->devfn & 0xff) << 8) |
+ ((uint64_t)(pin - 1) & 3);
+}
+
+static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
+{
+ xen_hvm_evtchn_do_upcall();
+ return IRQ_HANDLED;
+}
+
+static int xen_allocate_irq(struct pci_dev *pdev)
+{
+ return request_irq(pdev->irq, do_hvm_evtchn_intr,
+ IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+ "xen-platform-pci", pdev);
+}
+
+static int platform_pci_resume(struct pci_dev *pdev)
+{
+ int err;
+ if (xen_have_vector_callback)
+ return 0;
+ err = xen_set_callback_via(callback_via);
+ if (err) {
+ dev_err(&pdev->dev, "platform_pci_resume failure!\n");
+ return err;
+ }
+ return 0;
+}
+
+static int __devinit platform_pci_init(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int i, ret;
+ long ioaddr, iolen;
+ long mmio_addr, mmio_len;
+ unsigned int max_nr_gframes;
+
+ i = pci_enable_device(pdev);
+ if (i)
+ return i;
+
+ ioaddr = pci_resource_start(pdev, 0);
+ iolen = pci_resource_len(pdev, 0);
+
+ mmio_addr = pci_resource_start(pdev, 1);
+ mmio_len = pci_resource_len(pdev, 1);
+
+ if (mmio_addr == 0 || ioaddr == 0) {
+ dev_err(&pdev->dev, "no resources found\n");
+ ret = -ENOENT;
+ goto pci_out;
+ }
+
+ if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) {
+ dev_err(&pdev->dev, "MEM I/O resource 0x%lx @ 0x%lx busy\n",
+ mmio_addr, mmio_len);
+ ret = -EBUSY;
+ goto pci_out;
+ }
+
+ if (request_region(ioaddr, iolen, DRV_NAME) == NULL) {
+ dev_err(&pdev->dev, "I/O resource 0x%lx @ 0x%lx busy\n",
+ iolen, ioaddr);
+ ret = -EBUSY;
+ goto mem_out;
+ }
+
+ platform_mmio = mmio_addr;
+ platform_mmiolen = mmio_len;
+
+ if (!xen_have_vector_callback) {
+ ret = xen_allocate_irq(pdev);
+ if (ret) {
+ dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
+ goto out;
+ }
+ callback_via = get_callback_via(pdev);
+ ret = xen_set_callback_via(callback_via);
+ if (ret) {
+ dev_warn(&pdev->dev, "Unable to set the evtchn callback "
+ "err=%d\n", ret);
+ goto out;
+ }
+ }
+
+ max_nr_gframes = gnttab_max_grant_frames();
+ xen_hvm_resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
+ ret = gnttab_init();
+ if (ret)
+ goto out;
+ xenbus_probe(NULL);
+ ret = xen_setup_shutdown_event();
+ if (ret)
+ goto out;
+ return 0;
+
+out:
+ release_region(ioaddr, iolen);
+mem_out:
+ release_mem_region(mmio_addr, mmio_len);
+pci_out:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static struct pci_device_id platform_pci_tbl[] __devinitdata = {
+ {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, platform_pci_tbl);
+
+static struct pci_driver platform_driver = {
+ .name = DRV_NAME,
+ .probe = platform_pci_init,
+ .id_table = platform_pci_tbl,
+#ifdef CONFIG_PM
+ .resume_early = platform_pci_resume,
+#endif
+};
+
+static int __init platform_pci_module_init(void)
+{
+ /* no unplug has been done, IGNORE hasn't been specified: just
+ * return now */
+ if (!xen_platform_pci_unplug)
+ return -ENODEV;
+
+ return pci_register_driver(&platform_driver);
+}
+
+module_init(platform_pci_module_init);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
new file mode 100644
index 00000000000..54469c3eeac
--- /dev/null
+++ b/drivers/xen/swiotlb-xen.c
@@ -0,0 +1,515 @@
+/*
+ * Copyright 2010
+ * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ *
+ * This code provides a IOMMU for Xen PV guests with PCI passthrough.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * PV guests under Xen are running in an non-contiguous memory architecture.
+ *
+ * When PCI pass-through is utilized, this necessitates an IOMMU for
+ * translating bus (DMA) to virtual and vice-versa and also providing a
+ * mechanism to have contiguous pages for device drivers operations (say DMA
+ * operations).
+ *
+ * Specifically, under Xen the Linux idea of pages is an illusion. It
+ * assumes that pages start at zero and go up to the available memory. To
+ * help with that, the Linux Xen MMU provides a lookup mechanism to
+ * translate the page frame numbers (PFN) to machine frame numbers (MFN)
+ * and vice-versa. The MFN are the "real" frame numbers. Furthermore
+ * memory is not contiguous. Xen hypervisor stitches memory for guests
+ * from different pools, which means there is no guarantee that PFN==MFN
+ * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
+ * allocated in descending order (high to low), meaning the guest might
+ * never get any MFN's under the 4GB mark.
+ *
+ */
+
+#include <linux/bootmem.h>
+#include <linux/dma-mapping.h>
+#include <xen/swiotlb-xen.h>
+#include <xen/page.h>
+#include <xen/xen-ops.h>
+/*
+ * Used to do a quick range check in swiotlb_tbl_unmap_single and
+ * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
+ * API.
+ */
+
+static char *xen_io_tlb_start, *xen_io_tlb_end;
+static unsigned long xen_io_tlb_nslabs;
+/*
+ * Quick lookup value of the bus address of the IOTLB.
+ */
+
+u64 start_dma_addr;
+
+static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
+{
+ return phys_to_machine(XPADDR(paddr)).maddr;;
+}
+
+static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
+{
+ return machine_to_phys(XMADDR(baddr)).paddr;
+}
+
+static dma_addr_t xen_virt_to_bus(void *address)
+{
+ return xen_phys_to_bus(virt_to_phys(address));
+}
+
+static int check_pages_physically_contiguous(unsigned long pfn,
+ unsigned int offset,
+ size_t length)
+{
+ unsigned long next_mfn;
+ int i;
+ int nr_pages;
+
+ next_mfn = pfn_to_mfn(pfn);
+ nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
+
+ for (i = 1; i < nr_pages; i++) {
+ if (pfn_to_mfn(++pfn) != ++next_mfn)
+ return 0;
+ }
+ return 1;
+}
+
+static int range_straddles_page_boundary(phys_addr_t p, size_t size)
+{
+ unsigned long pfn = PFN_DOWN(p);
+ unsigned int offset = p & ~PAGE_MASK;
+
+ if (offset + size <= PAGE_SIZE)
+ return 0;
+ if (check_pages_physically_contiguous(pfn, offset, size))
+ return 0;
+ return 1;
+}
+
+static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
+{
+ unsigned long mfn = PFN_DOWN(dma_addr);
+ unsigned long pfn = mfn_to_local_pfn(mfn);
+ phys_addr_t paddr;
+
+ /* If the address is outside our domain, it CAN
+ * have the same virtual address as another address
+ * in our domain. Therefore _only_ check address within our domain.
+ */
+ if (pfn_valid(pfn)) {
+ paddr = PFN_PHYS(pfn);
+ return paddr >= virt_to_phys(xen_io_tlb_start) &&
+ paddr < virt_to_phys(xen_io_tlb_end);
+ }
+ return 0;
+}
+
+static int max_dma_bits = 32;
+
+static int
+xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
+{
+ int i, rc;
+ int dma_bits;
+
+ dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
+
+ i = 0;
+ do {
+ int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
+
+ do {
+ rc = xen_create_contiguous_region(
+ (unsigned long)buf + (i << IO_TLB_SHIFT),
+ get_order(slabs << IO_TLB_SHIFT),
+ dma_bits);
+ } while (rc && dma_bits++ < max_dma_bits);
+ if (rc)
+ return rc;
+
+ i += slabs;
+ } while (i < nslabs);
+ return 0;
+}
+
+void __init xen_swiotlb_init(int verbose)
+{
+ unsigned long bytes;
+ int rc;
+
+ xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
+ xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
+
+ bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
+
+ /*
+ * Get IO TLB memory from any location.
+ */
+ xen_io_tlb_start = alloc_bootmem(bytes);
+ if (!xen_io_tlb_start)
+ panic("Cannot allocate SWIOTLB buffer");
+
+ xen_io_tlb_end = xen_io_tlb_start + bytes;
+ /*
+ * And replace that memory with pages under 4GB.
+ */
+ rc = xen_swiotlb_fixup(xen_io_tlb_start,
+ bytes,
+ xen_io_tlb_nslabs);
+ if (rc)
+ goto error;
+
+ start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
+ swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
+
+ return;
+error:
+ panic("DMA(%d): Failed to exchange pages allocated for DMA with Xen! "\
+ "We either don't have the permission or you do not have enough"\
+ "free memory under 4GB!\n", rc);
+}
+
+void *
+xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags)
+{
+ void *ret;
+ int order = get_order(size);
+ u64 dma_mask = DMA_BIT_MASK(32);
+ unsigned long vstart;
+
+ /*
+ * Ignore region specifiers - the kernel's ideas of
+ * pseudo-phys memory layout has nothing to do with the
+ * machine physical layout. We can't allocate highmem
+ * because we can't return a pointer to it.
+ */
+ flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+ if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
+ return ret;
+
+ vstart = __get_free_pages(flags, order);
+ ret = (void *)vstart;
+
+ if (hwdev && hwdev->coherent_dma_mask)
+ dma_mask = dma_alloc_coherent_mask(hwdev, flags);
+
+ if (ret) {
+ if (xen_create_contiguous_region(vstart, order,
+ fls64(dma_mask)) != 0) {
+ free_pages(vstart, order);
+ return NULL;
+ }
+ memset(ret, 0, size);
+ *dma_handle = virt_to_machine(ret).maddr;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
+
+void
+xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+ dma_addr_t dev_addr)
+{
+ int order = get_order(size);
+
+ if (dma_release_from_coherent(hwdev, order, vaddr))
+ return;
+
+ xen_destroy_contiguous_region((unsigned long)vaddr, order);
+ free_pages((unsigned long)vaddr, order);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
+
+
+/*
+ * Map a single buffer of the indicated size for DMA in streaming mode. The
+ * physical address to use is returned.
+ *
+ * Once the device is given the dma address, the device owns this memory until
+ * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
+ */
+dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ phys_addr_t phys = page_to_phys(page) + offset;
+ dma_addr_t dev_addr = xen_phys_to_bus(phys);
+ void *map;
+
+ BUG_ON(dir == DMA_NONE);
+ /*
+ * If the address happens to be in the device's DMA window,
+ * we can safely return the device addr and not worry about bounce
+ * buffering it.
+ */
+ if (dma_capable(dev, dev_addr, size) &&
+ !range_straddles_page_boundary(phys, size) && !swiotlb_force)
+ return dev_addr;
+
+ /*
+ * Oh well, have to allocate and map a bounce buffer.
+ */
+ map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
+ if (!map)
+ return DMA_ERROR_CODE;
+
+ dev_addr = xen_virt_to_bus(map);
+
+ /*
+ * Ensure that the address returned is DMA'ble
+ */
+ if (!dma_capable(dev, dev_addr, size))
+ panic("map_single: bounce buffer is not DMA'ble");
+
+ return dev_addr;
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
+
+/*
+ * Unmap a single streaming mode DMA translation. The dma_addr and size must
+ * match what was provided for in a previous xen_swiotlb_map_page call. All
+ * other usages are undefined.
+ *
+ * After this call, reads by the cpu to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ phys_addr_t paddr = xen_bus_to_phys(dev_addr);
+
+ BUG_ON(dir == DMA_NONE);
+
+ /* NOTE: We use dev_addr here, not paddr! */
+ if (is_xen_swiotlb_buffer(dev_addr)) {
+ swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
+ return;
+ }
+
+ if (dir != DMA_FROM_DEVICE)
+ return;
+
+ /*
+ * phys_to_virt doesn't work with hihgmem page but we could
+ * call dma_mark_clean() with hihgmem page here. However, we
+ * are fine since dma_mark_clean() is null on POWERPC. We can
+ * make dma_mark_clean() take a physical address if necessary.
+ */
+ dma_mark_clean(phys_to_virt(paddr), size);
+}
+
+void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ xen_unmap_single(hwdev, dev_addr, size, dir);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
+
+/*
+ * Make physical memory consistent for a single streaming mode DMA translation
+ * after a transfer.
+ *
+ * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
+ * using the cpu, yet do not wish to teardown the dma mapping, you must
+ * call this function before doing so. At the next point you give the dma
+ * address back to the card, you must first perform a
+ * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
+ */
+static void
+xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ enum dma_sync_target target)
+{
+ phys_addr_t paddr = xen_bus_to_phys(dev_addr);
+
+ BUG_ON(dir == DMA_NONE);
+
+ /* NOTE: We use dev_addr here, not paddr! */
+ if (is_xen_swiotlb_buffer(dev_addr)) {
+ swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
+ target);
+ return;
+ }
+
+ if (dir != DMA_FROM_DEVICE)
+ return;
+
+ dma_mark_clean(phys_to_virt(paddr), size);
+}
+
+void
+xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
+
+void
+xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
+
+/*
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * This is the scatter-gather version of the above xen_swiotlb_map_page
+ * interface. Here the scatter gather list elements are each tagged with the
+ * appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
+ * same here.
+ */
+int
+xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(dir == DMA_NONE);
+
+ for_each_sg(sgl, sg, nelems, i) {
+ phys_addr_t paddr = sg_phys(sg);
+ dma_addr_t dev_addr = xen_phys_to_bus(paddr);
+
+ if (swiotlb_force ||
+ !dma_capable(hwdev, dev_addr, sg->length) ||
+ range_straddles_page_boundary(paddr, sg->length)) {
+ void *map = swiotlb_tbl_map_single(hwdev,
+ start_dma_addr,
+ sg_phys(sg),
+ sg->length, dir);
+ if (!map) {
+ /* Don't panic here, we expect map_sg users
+ to do proper error handling. */
+ xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
+ attrs);
+ sgl[0].dma_length = 0;
+ return DMA_ERROR_CODE;
+ }
+ sg->dma_address = xen_virt_to_bus(map);
+ } else
+ sg->dma_address = dev_addr;
+ sg->dma_length = sg->length;
+ }
+ return nelems;
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
+
+int
+xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ return xen_swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg);
+
+/*
+ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
+ * concerning calls here are the same as for swiotlb_unmap_page() above.
+ */
+void
+xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(dir == DMA_NONE);
+
+ for_each_sg(sgl, sg, nelems, i)
+ xen_unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
+
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
+
+void
+xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ return xen_swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg);
+
+/*
+ * Make physical memory consistent for a set of streaming mode DMA translations
+ * after a transfer.
+ *
+ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
+ * and usage.
+ */
+static void
+xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ enum dma_sync_target target)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nelems, i)
+ xen_swiotlb_sync_single(hwdev, sg->dma_address,
+ sg->dma_length, dir, target);
+}
+
+void
+xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
+
+void
+xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
+
+int
+xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
+{
+ return !dma_addr;
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
+
+/*
+ * Return whether the given device DMA address mask can be supported
+ * properly. For example, if your device can only drive the low 24-bits
+ * during bus mastering, then you would pass 0x00ffffff as the mask to
+ * this function.
+ */
+int
+xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
+{
+ return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 7b3e973a1ae..7e49527189b 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -133,17 +133,12 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev,
}
EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
+static void xenbus_switch_fatal(struct xenbus_device *, int, int,
+ const char *, ...);
-/**
- * xenbus_switch_state
- * @dev: xenbus device
- * @state: new state
- *
- * Advertise in the store a change of the given driver to the given new_state.
- * Return 0 on success, or -errno on error. On error, the device will switch
- * to XenbusStateClosing, and the error will be saved in the store.
- */
-int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
+static int
+__xenbus_switch_state(struct xenbus_device *dev,
+ enum xenbus_state state, int depth)
{
/* We check whether the state is currently set to the given value, and
if not, then the state is set. We don't want to unconditionally
@@ -152,35 +147,65 @@ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
to it, as the device will be tearing down, and we don't want to
resurrect that directory.
- Note that, because of this cached value of our state, this function
- will not work inside a Xenstore transaction (something it was
- trying to in the past) because dev->state would not get reset if
- the transaction was aborted.
-
+ Note that, because of this cached value of our state, this
+ function will not take a caller's Xenstore transaction
+ (something it was trying to in the past) because dev->state
+ would not get reset if the transaction was aborted.
*/
+ struct xenbus_transaction xbt;
int current_state;
- int err;
+ int err, abort;
if (state == dev->state)
return 0;
- err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d",
- &current_state);
- if (err != 1)
+again:
+ abort = 1;
+
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ xenbus_switch_fatal(dev, depth, err, "starting transaction");
return 0;
+ }
+
+ err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
+ if (err != 1)
+ goto abort;
- err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state);
+ err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
if (err) {
- if (state != XenbusStateClosing) /* Avoid looping */
- xenbus_dev_fatal(dev, err, "writing new state");
- return err;
+ xenbus_switch_fatal(dev, depth, err, "writing new state");
+ goto abort;
}
- dev->state = state;
+ abort = 0;
+abort:
+ err = xenbus_transaction_end(xbt, abort);
+ if (err) {
+ if (err == -EAGAIN && !abort)
+ goto again;
+ xenbus_switch_fatal(dev, depth, err, "ending transaction");
+ } else
+ dev->state = state;
return 0;
}
+
+/**
+ * xenbus_switch_state
+ * @dev: xenbus device
+ * @state: new state
+ *
+ * Advertise in the store a change of the given driver to the given new_state.
+ * Return 0 on success, or -errno on error. On error, the device will switch
+ * to XenbusStateClosing, and the error will be saved in the store.
+ */
+int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
+{
+ return __xenbus_switch_state(dev, state, 0);
+}
+
EXPORT_SYMBOL_GPL(xenbus_switch_state);
int xenbus_frontend_closed(struct xenbus_device *dev)
@@ -284,6 +309,23 @@ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
/**
+ * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
+ * avoiding recursion within xenbus_switch_state.
+ */
+static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
+ const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ xenbus_va_dev_error(dev, err, fmt, ap);
+ va_end(ap);
+
+ if (!depth)
+ __xenbus_switch_state(dev, XenbusStateClosing, 1);
+}
+
+/**
* xenbus_grant_ring
* @dev: xenbus device
* @ring_mfn: mfn of ring to grant
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 3479332113e..d409495876f 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -56,6 +56,9 @@
#include <xen/events.h>
#include <xen/page.h>
+#include <xen/platform_pci.h>
+#include <xen/hvm.h>
+
#include "xenbus_comms.h"
#include "xenbus_probe.h"
@@ -769,7 +772,7 @@ EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
void xenbus_probe(struct work_struct *unused)
{
- BUG_ON((xenstored_ready <= 0));
+ xenstored_ready = 1;
/* Enumerate devices in xenstore and watch for changes. */
xenbus_probe_devices(&xenbus_frontend);
@@ -779,8 +782,23 @@ void xenbus_probe(struct work_struct *unused)
/* Notify others that xenstore is up */
blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
}
+EXPORT_SYMBOL_GPL(xenbus_probe);
-static int __init xenbus_probe_init(void)
+static int __init xenbus_probe_initcall(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (xen_initial_domain() || xen_hvm_domain())
+ return 0;
+
+ xenbus_probe(NULL);
+ return 0;
+}
+
+device_initcall(xenbus_probe_initcall);
+
+static int __init xenbus_init(void)
{
int err = 0;
@@ -805,11 +823,24 @@ static int __init xenbus_probe_init(void)
if (xen_initial_domain()) {
/* dom0 not yet supported */
} else {
- xenstored_ready = 1;
- xen_store_evtchn = xen_start_info->store_evtchn;
- xen_store_mfn = xen_start_info->store_mfn;
+ if (xen_hvm_domain()) {
+ uint64_t v = 0;
+ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
+ if (err)
+ goto out_error;
+ xen_store_evtchn = (int)v;
+ err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+ if (err)
+ goto out_error;
+ xen_store_mfn = (unsigned long)v;
+ xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
+ } else {
+ xen_store_evtchn = xen_start_info->store_evtchn;
+ xen_store_mfn = xen_start_info->store_mfn;
+ xen_store_interface = mfn_to_virt(xen_store_mfn);
+ xenstored_ready = 1;
+ }
}
- xen_store_interface = mfn_to_virt(xen_store_mfn);
/* Initialize the interface to xenstore. */
err = xs_init();
@@ -819,9 +850,6 @@ static int __init xenbus_probe_init(void)
goto out_unreg_back;
}
- if (!xen_initial_domain())
- xenbus_probe(NULL);
-
#ifdef CONFIG_XEN_COMPAT_XENFS
/*
* Create xenfs mountpoint in /proc for compatibility with
@@ -842,7 +870,7 @@ static int __init xenbus_probe_init(void)
return err;
}
-postcore_initcall(xenbus_probe_init);
+postcore_initcall(xenbus_init);
MODULE_LICENSE("GPL");
@@ -950,6 +978,9 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
#ifndef MODULE
static int __init boot_wait_for_devices(void)
{
+ if (xen_hvm_domain() && !xen_platform_pci_unplug)
+ return -ENODEV;
+
ready_to_wait_for_devices = 1;
wait_for_devices(NULL);
return 0;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 7b547f53f65..5534690075a 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -76,6 +76,14 @@ struct xs_handle {
/*
* Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex.
* response_mutex is never taken simultaneously with the other three.
+ *
+ * transaction_mutex must be held before incrementing
+ * transaction_count. The mutex is held when a suspend is in
+ * progress to prevent new transactions starting.
+ *
+ * When decrementing transaction_count to zero the wait queue
+ * should be woken up, the suspend code waits for count to
+ * reach zero.
*/
/* One request at a time. */
@@ -85,7 +93,9 @@ struct xs_handle {
struct mutex response_mutex;
/* Protect transactions against save/restore. */
- struct rw_semaphore transaction_mutex;
+ struct mutex transaction_mutex;
+ atomic_t transaction_count;
+ wait_queue_head_t transaction_wq;
/* Protect watch (de)register against save/restore. */
struct rw_semaphore watch_mutex;
@@ -157,6 +167,31 @@ static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
return body;
}
+static void transaction_start(void)
+{
+ mutex_lock(&xs_state.transaction_mutex);
+ atomic_inc(&xs_state.transaction_count);
+ mutex_unlock(&xs_state.transaction_mutex);
+}
+
+static void transaction_end(void)
+{
+ if (atomic_dec_and_test(&xs_state.transaction_count))
+ wake_up(&xs_state.transaction_wq);
+}
+
+static void transaction_suspend(void)
+{
+ mutex_lock(&xs_state.transaction_mutex);
+ wait_event(xs_state.transaction_wq,
+ atomic_read(&xs_state.transaction_count) == 0);
+}
+
+static void transaction_resume(void)
+{
+ mutex_unlock(&xs_state.transaction_mutex);
+}
+
void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
{
void *ret;
@@ -164,7 +199,7 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
int err;
if (req_msg.type == XS_TRANSACTION_START)
- down_read(&xs_state.transaction_mutex);
+ transaction_start();
mutex_lock(&xs_state.request_mutex);
@@ -180,7 +215,7 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
if ((msg->type == XS_TRANSACTION_END) ||
((req_msg.type == XS_TRANSACTION_START) &&
(msg->type == XS_ERROR)))
- up_read(&xs_state.transaction_mutex);
+ transaction_end();
return ret;
}
@@ -432,11 +467,11 @@ int xenbus_transaction_start(struct xenbus_transaction *t)
{
char *id_str;
- down_read(&xs_state.transaction_mutex);
+ transaction_start();
id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
if (IS_ERR(id_str)) {
- up_read(&xs_state.transaction_mutex);
+ transaction_end();
return PTR_ERR(id_str);
}
@@ -461,7 +496,7 @@ int xenbus_transaction_end(struct xenbus_transaction t, int abort)
err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
- up_read(&xs_state.transaction_mutex);
+ transaction_end();
return err;
}
@@ -662,7 +697,7 @@ EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
void xs_suspend(void)
{
- down_write(&xs_state.transaction_mutex);
+ transaction_suspend();
down_write(&xs_state.watch_mutex);
mutex_lock(&xs_state.request_mutex);
mutex_lock(&xs_state.response_mutex);
@@ -677,7 +712,7 @@ void xs_resume(void)
mutex_unlock(&xs_state.response_mutex);
mutex_unlock(&xs_state.request_mutex);
- up_write(&xs_state.transaction_mutex);
+ transaction_resume();
/* No need for watches_lock: the watch_mutex is sufficient. */
list_for_each_entry(watch, &watches, list) {
@@ -693,7 +728,7 @@ void xs_suspend_cancel(void)
mutex_unlock(&xs_state.response_mutex);
mutex_unlock(&xs_state.request_mutex);
up_write(&xs_state.watch_mutex);
- up_write(&xs_state.transaction_mutex);
+ mutex_unlock(&xs_state.transaction_mutex);
}
static int xenwatch_thread(void *unused)
@@ -843,8 +878,10 @@ int xs_init(void)
mutex_init(&xs_state.request_mutex);
mutex_init(&xs_state.response_mutex);
- init_rwsem(&xs_state.transaction_mutex);
+ mutex_init(&xs_state.transaction_mutex);
init_rwsem(&xs_state.watch_mutex);
+ atomic_set(&xs_state.transaction_count, 0);
+ init_waitqueue_head(&xs_state.transaction_wq);
/* Initialize the shared memory rings to talk to xenstored */
err = xb_init_comms();
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 8924d93136f..78bfab0700b 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -65,7 +65,7 @@ static struct file_system_type xenfs_type = {
static int __init xenfs_init(void)
{
- if (xen_pv_domain())
+ if (xen_domain())
return register_filesystem(&xenfs_type);
printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n");
@@ -74,7 +74,7 @@ static int __init xenfs_init(void)
static void __exit xenfs_exit(void)
{
- if (xen_pv_domain())
+ if (xen_domain())
unregister_filesystem(&xenfs_type);
}
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
index f28ece39736..3b39c3752e2 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenfs/xenbus.c
@@ -124,6 +124,9 @@ static ssize_t xenbus_file_read(struct file *filp,
mutex_lock(&u->reply_mutex);
while (list_empty(&u->read_buffers)) {
mutex_unlock(&u->reply_mutex);
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
ret = wait_event_interruptible(u->read_waitq,
!list_empty(&u->read_buffers));
if (ret)
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 3c7046d7965..cafc5045429 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -22,8 +22,9 @@ static loff_t
proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
{
loff_t new = -1;
+ struct inode *inode = file->f_path.dentry->d_inode;
- lock_kernel();
+ mutex_lock(&inode->i_mutex);
switch (whence) {
case 0:
new = off;
@@ -35,12 +36,12 @@ proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
new = sizeof(struct ConfigDev) + off;
break;
}
- if (new < 0 || new > sizeof(struct ConfigDev)) {
- unlock_kernel();
- return -EINVAL;
- }
- unlock_kernel();
- return (file->f_pos = new);
+ if (new < 0 || new > sizeof(struct ConfigDev))
+ new = -EINVAL;
+ else
+ file->f_pos = new;
+ mutex_unlock(&inode->i_mutex);
+ return new;
}
static ssize_t
@@ -67,7 +68,7 @@ proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *
cd.cd_BoardAddr = (void *)zorro_resource_start(z);
cd.cd_BoardSize = zorro_resource_len(z);
- if (copy_to_user(buf, &cd, nbytes))
+ if (copy_to_user(buf, (void *)&cd + pos, nbytes))
return -EFAULT;
*ppos += nbytes;